summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--bitbake/lib/bb/fetch/hg.py9
-rw-r--r--meta-moblin/packages/json-glib/json-glib_git.bb2
-rw-r--r--meta-moblin/packages/libunique/libunique_1.0.6.bb8
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/0001-drm-remove-define-for-non-linux-systems.patch48
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/0002-i915-remove-settable-use_mi_batchbuffer_start.patch60
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/0003-i915-Ignore-X-server-provided-mmio-address.patch41
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/0004-i915-Use-more-consistent-names-for-regs-and-store.patch2746
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/0005-i915-Add-support-for-MSI-and-interrupt-mitigation.patch424
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/0006-i915-Track-progress-inside-of-batchbuffers-for-dete.patch46
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/0007-i915-Initialize-hardware-status-page-at-device-load.patch137
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/0008-Add-Intel-ACPI-IGD-OpRegion-support.patch572
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/0009-drm-fix-sysfs-error-path.patch23
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/0010-i915-separate-suspend-resume-functions.patch1079
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/0011-drm-vblank-rework.patch1534
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/0012-Export-shmem_file_setup-for-DRM-GEM.patch25
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/0013-Export-kmap_atomic_pfn-for-DRM-GEM.patch24
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/0014-drm-Add-GEM-graphics-execution-manager-to-i915.patch5483
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/0015-i915-Add-chip-set-ID-param.patch35
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/0016-i915-Use-struct_mutex-to-protect-ring-in-GEM-mode.patch205
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/0017-i915-Make-use-of-sarea_priv-conditional.patch147
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/0018-i915-gem-install-and-uninstall-irq-handler-in-enter.patch44
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/0019-DRM-Return-EBADF-on-bad-object-in-flink-and-retur.patch32
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/0020-drm-Avoid-oops-in-GEM-execbuffers-with-bad-argument.patch23
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/0021-drm-G33-class-hardware-has-a-newer-965-style-MCH-n.patch23
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/0022-drm-use-ioremap_wc-in-i915-instead-of-ioremap.patch58
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/0023-drm-clean-up-many-sparse-warnings-in-i915.patch192
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/0024-fastboot-create-a-asynchronous-initlevel.patch133
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/0025-fastboot-turn-the-USB-hostcontroller-initcalls-into.patch59
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/0026-fastboot-convert-a-few-non-critical-ACPI-drivers-to.patch51
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/0027-fastboot-hold-the-BKL-over-the-async-init-call-sequ.patch37
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/0028-fastboot-sync-the-async-execution-before-late_initc.patch92
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/0029-fastboot-make-fastboot-a-config-option.patch53
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/0030-fastboot-retry-mounting-the-root-fs-if-we-can-t-fin.patch64
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/0031-fastboot-make-the-raid-autodetect-code-wait-for-all.patch41
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/0032-fastboot-remove-wait-for-all-devices-before-mounti.patch41
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/0033-fastboot-make-the-RAID-autostart-code-print-a-messa.patch32
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/0034-fastboot-fix-typo-in-init-Kconfig-text.patch26
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/0035-fastboot-remove-duplicate-unpack_to_rootfs.patch161
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/0036-warning-fix-init-do_mounts_md-c.patch82
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/0037-init-initramfs.c-unused-function-when-compiling-wit.patch37
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/0038-fastboot-fix-blackfin-breakage-due-to-vmlinux.lds-c.patch38
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/0039-Add-a-script-to-visualize-the-kernel-boot-process.patch177
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/0040-fastboot-fix-issues-and-improve-output-of-bootgraph.patch91
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/0041-r8169-8101e.patch940
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/0042-intelfb-945gme.patch154
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/0043-superreadahead-patch.patch65
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/defconfig-menlow3137
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.27/psb-driver.patch33991
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.28+2.6.29-rc2/i915_split.patch1627
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.28+2.6.29-rc2/psb-driver.patch21564
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/0001-drm-Split-out-the-mm-declarations-in-a-separate-hea.patch486
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/0002-drm-Add-a-tracker-for-global-objects.patch191
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/0003-drm-Export-hash-table-functionality.patch58
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/0007-drm-Add-unlocked-IOCTL-functionality-from-the-drm-r.patch53
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/config-generic (renamed from meta-moblin/packages/linux/linux-moblin-2.6.28+2.6.29-rc2/defconfig-netbook)1165
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/config-ivi127
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/config-menlow8
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/config-mrst2316
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/config-netbook52
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/defconfig-menlow (renamed from meta-moblin/packages/linux/linux-moblin-2.6.28+2.6.29-rc2/defconfig-menlow)543
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/defconfig-netbook (renamed from meta-moblin/packages/linux/linux-moblin-2.6.27/defconfig-netbook)2
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6-build-nonintconfig.patch128
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.19-modesetting-by-default.patch11
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-dont-wait-for-mouse.patch (renamed from meta-moblin/packages/linux/linux-moblin-2.6.28+2.6.29-rc2/0002-fastboot-remove-wait-for-all-devices-before-mounti.patch)15
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-drm-i915-Fix-LVDS-dither-setting.patch56
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-drm-revert.patch (renamed from meta-moblin/packages/linux/linux-moblin-2.6.28+2.6.29-rc2/0006-Revert-drm-i915-GEM-on-PAE-has-problems-disable.patch)10
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-e100-add-support-for-82552-10-100-adapter.patch208
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-enable-async-by-default.patch (renamed from meta-moblin/packages/linux/linux-moblin-2.6.28+2.6.29-rc2/0005-fastboot-async-enable-default.patch)0
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-even-faster-kms.patch20
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-fast-initrd.patch (renamed from meta-moblin/packages/linux/linux-moblin-2.6.28+2.6.29-rc2/0003-fastboot-remove-duplicate-unpack_to_rootfs.patch)0
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-fast-kms.patch285
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-flip-ide-net.patch40
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-input-introduce-a-tougher-i8042.reset.patch92
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-jbd-longer-commit-interval.patch28
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-kms-after-sata.patch32
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-msiwind.patch57
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-pnv-agp.patch83
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-pnv-drm.patch336
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-pnv-fix-gtt-size.patch21
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-pnv-fix-i2c.patch38
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-psb-S0i1_and_S0i3_OSPM_support.patch28
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-psb-driver.patch37524
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-retry-root-mount.patch (renamed from meta-moblin/packages/linux/linux-moblin-2.6.28+2.6.29-rc2/0001-fastboot-retry-mounting-the-root-fs-if-we-can-t-fin.patch)0
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-silence-acer-message.patch (renamed from meta-moblin/packages/linux/linux-moblin-2.6.28+2.6.29-rc2/0007-acer-error-msg.patch)0
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-sreadahead.patch (renamed from meta-moblin/packages/linux/linux-moblin-2.6.28+2.6.29-rc2/0004-superreadahead-patch.patch)0
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-timberdale.patch6095
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-touchkit.patch130
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.30-fix-async.patch69
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.30-fix-suspend.patch139
-rw-r--r--meta-moblin/packages/linux/linux-moblin_2.6.27.bb59
-rw-r--r--meta-moblin/packages/linux/linux-moblin_2.6.28+2.6.29-rc2.bb24
-rw-r--r--meta-moblin/packages/linux/linux-moblin_2.6.29.1.bb46
-rw-r--r--meta-moblin/packages/mojito/mojito_git.bb7
-rw-r--r--meta-moblin/packages/pulseaudio/libatomics-ops/fedora/libatomic_ops-1.2-ppclwzfix.patch (renamed from meta-openmoko/packages/pulseaudio/libatomics-ops/fedora/libatomic_ops-1.2-ppclwzfix.patch)0
-rw-r--r--meta-moblin/packages/pulseaudio/libatomics-ops_1.2.bb (renamed from meta-openmoko/packages/pulseaudio/libatomics-ops_1.2.bb)0
-rw-r--r--meta-moblin/packages/pulseaudio/libcanberra/autoconf_version.patch38
-rw-r--r--meta-moblin/packages/pulseaudio/libcanberra_0.10.bb28
-rw-r--r--meta-moblin/packages/pulseaudio/pulseaudio-0.9.12/2113.diff (renamed from meta-openmoko/packages/pulseaudio/pulseaudio-0.9.10/2113.diff)0
-rw-r--r--meta-moblin/packages/pulseaudio/pulseaudio-0.9.12/2114.diff (renamed from meta-openmoko/packages/pulseaudio/pulseaudio-0.9.10/2114.diff)0
-rw-r--r--meta-moblin/packages/pulseaudio/pulseaudio-0.9.12/autoconf_version.patch13
-rw-r--r--meta-moblin/packages/pulseaudio/pulseaudio-0.9.12/buildfix.patch13
-rw-r--r--meta-moblin/packages/pulseaudio/pulseaudio-0.9.12/gcc4-compile-fix.patch (renamed from meta-openmoko/packages/pulseaudio/files/gcc4-compile-fix.patch)0
-rw-r--r--meta-moblin/packages/pulseaudio/pulseaudio-0.9.12/volatiles.04_pulse (renamed from meta-openmoko/packages/pulseaudio/files/volatiles.04_pulse)0
-rw-r--r--meta-moblin/packages/pulseaudio/pulseaudio.inc (renamed from meta-openmoko/packages/pulseaudio/pulse.inc)0
-rw-r--r--meta-moblin/packages/pulseaudio/pulseaudio_0.9.12.bb17
-rw-r--r--meta-moblin/packages/tasks/task-moblin-x11-netbook.bb4
-rw-r--r--meta-openmoko/packages/pulseaudio/files/disable-using-glibc-tls.patch25
-rw-r--r--meta-openmoko/packages/pulseaudio/files/fix-dbus-without-hal.patch15
-rw-r--r--meta-openmoko/packages/pulseaudio/files/fix-shm.patch20
-rw-r--r--meta-openmoko/packages/pulseaudio/pulseaudio-0.9.10/libpulsedsp-references-libpulsecore.patch24
-rw-r--r--meta-openmoko/packages/pulseaudio/pulseaudio-0.9.10/libtool2.patch48
-rw-r--r--meta-openmoko/packages/pulseaudio/pulseaudio-0.9.10/pa-drop-caps-returns-void.patch17
-rw-r--r--meta-openmoko/packages/pulseaudio/pulseaudio_0.9.10.bb21
-rw-r--r--meta/classes/base.bbclass11
-rw-r--r--meta/classes/package.bbclass4
-rw-r--r--meta/conf/bitbake.conf4
-rw-r--r--meta/conf/checksums.ini124
-rw-r--r--meta/conf/distro/include/poky-fixed-revisions.inc8
-rw-r--r--meta/conf/distro/include/poky-floating-revisions.inc2
-rw-r--r--meta/conf/distro/include/preferred-xorg-versions.inc10
-rw-r--r--meta/packages/clutter/clutter-mozembed/link-with-g++.patch21
-rw-r--r--meta/packages/clutter/clutter/enable_tests.patch30
-rw-r--r--meta/packages/dbus/dbus-native_1.2.4.bb2
-rw-r--r--meta/packages/dbus/dbus.inc2
-rw-r--r--meta/packages/dbus/dbus_1.2.4.bb3
-rw-r--r--meta/packages/drm/files/poulsbo.patch2516
-rw-r--r--meta/packages/drm/libdrm-2.4.7/installtests.patch39
-rw-r--r--meta/packages/drm/libdrm_2.4.4.bb9
-rw-r--r--meta/packages/drm/libdrm_2.4.7.bb12
-rw-r--r--meta/packages/e2fsprogs/e2fsprogs-native_1.41.2.bb25
-rw-r--r--meta/packages/initrdscripts/files/init-live.sh1
-rw-r--r--meta/packages/initrdscripts/initramfs-live-boot_1.0.bb2
-rw-r--r--meta/packages/libnl/libnl_1.0-pre8.bb (renamed from meta/packages/libnl/libnl_1.0-pre6.bb)6
-rw-r--r--meta/packages/libproxy/libproxy/asneededfix.patch13
-rw-r--r--meta/packages/libproxy/libproxy_0.2.3.bb16
-rw-r--r--meta/packages/libsoup/libsoup-2.4_2.25.91.bb14
-rw-r--r--meta/packages/mesa/mesa-7.0.2/fix-host-compile.patch30
-rw-r--r--meta/packages/mesa/mesa-7.0.2/mklib-rpath-link.patch23
-rw-r--r--meta/packages/mesa/mesa-dri_7.4.bb (renamed from meta/packages/mesa/mesa-dri_7.2.bb)0
-rw-r--r--meta/packages/mesa/mesa-dri_git.bb2
-rw-r--r--meta/packages/mesa/mesa-xlib_7.4.bb (renamed from meta/packages/mesa/mesa-xlib_7.2.bb)0
-rw-r--r--meta/packages/mozilla-headless/mozilla-headless/configurefix.patch203
-rw-r--r--meta/packages/mozilla-headless/mozilla-headless_git.bb10
-rw-r--r--meta/packages/mutter/mutter/crosscompile.patch (renamed from meta/packages/gnome/metacity-clutter/crosscompile.patch)0
-rw-r--r--meta/packages/mutter/mutter/fix_pkgconfig.patch (renamed from meta/packages/gnome/metacity-clutter/fix_pkgconfig.patch)0
-rw-r--r--meta/packages/mutter/mutter/nodocs.patch (renamed from meta/packages/gnome/metacity-clutter/nodocs.patch)0
-rw-r--r--meta/packages/mutter/mutter_git.bb (renamed from meta/packages/gnome/metacity-clutter_git.bb)6
-rw-r--r--meta/packages/sqlite/sqlite3_3.6.10.bb (renamed from meta/packages/sqlite/sqlite3_3.6.7.bb)0
-rw-r--r--meta/packages/tasks/task-poky-x11-netbook.bb4
-rw-r--r--meta/packages/xorg-driver/xf86-input-evdev_2.2.1.bb (renamed from meta/packages/xorg-driver/xf86-input-evdev_2.1.1.bb)0
-rw-r--r--meta/packages/xorg-driver/xf86-input-synaptics_1.1.0.bb (renamed from meta/packages/xorg-driver/xf86-input-synaptics_0.99.3.bb)0
-rw-r--r--meta/packages/xorg-driver/xf86-video-intel-dri2_git.bb27
-rw-r--r--meta/packages/xorg-driver/xf86-video-intel_2.7.0.bb (renamed from meta/packages/xorg-driver/xf86-video-intel_2.6.0.bb)0
-rw-r--r--meta/packages/xorg-lib/libice_1.0.4.bb2
-rw-r--r--meta/packages/xorg-lib/libsm_1.1.0.bb2
-rw-r--r--meta/packages/xorg-lib/libxt_1.0.5.bb2
-rw-r--r--meta/packages/xorg-xserver/xserver-xf86-dri-lite.inc2
157 files changed, 50514 insertions, 79251 deletions
diff --git a/bitbake/lib/bb/fetch/hg.py b/bitbake/lib/bb/fetch/hg.py
index b87fd0fbe..7643e159e 100644
--- a/bitbake/lib/bb/fetch/hg.py
+++ b/bitbake/lib/bb/fetch/hg.py
@@ -123,9 +123,6 @@ class Hg(Fetch):
bb.msg.debug(1, bb.msg.domain.Fetcher, "Running %s" % updatecmd)
runfetchcmd(updatecmd, d)
- updatecmd = self._buildhgcommand(ud, d, "update")
- bb.msg.debug(1, bb.msg.domain.Fetcher, "Running %s" % updatecmd)
- runfetchcmd(updatecmd, d)
else:
fetchcmd = self._buildhgcommand(ud, d, "fetch")
bb.msg.note(1, bb.msg.domain.Fetcher, "Fetch " + loc)
@@ -134,6 +131,12 @@ class Hg(Fetch):
os.chdir(ud.pkgdir)
bb.msg.debug(1, bb.msg.domain.Fetcher, "Running %s" % fetchcmd)
runfetchcmd(fetchcmd, d)
+
+ # Even when we clone (fetch), we still need to update as hg's clone
+ # won't checkout the specified revision if its on a branch
+ updatecmd = self._buildhgcommand(ud, d, "update")
+ bb.msg.debug(1, bb.msg.domain.Fetcher, "Running %s" % updatecmd)
+ runfetchcmd(updatecmd, d)
os.chdir(ud.pkgdir)
try:
diff --git a/meta-moblin/packages/json-glib/json-glib_git.bb b/meta-moblin/packages/json-glib/json-glib_git.bb
index fc84accaa..0033715d8 100644
--- a/meta-moblin/packages/json-glib/json-glib_git.bb
+++ b/meta-moblin/packages/json-glib/json-glib_git.bb
@@ -9,6 +9,8 @@ DEPENDS = "glib-2.0"
inherit autotools_stage
+acpaths = "-I ${S}/build/autotools "
+
do_configure_prepend () {
touch ${S}/gtk-doc.make
} \ No newline at end of file
diff --git a/meta-moblin/packages/libunique/libunique_1.0.6.bb b/meta-moblin/packages/libunique/libunique_1.0.6.bb
new file mode 100644
index 000000000..bd796fb46
--- /dev/null
+++ b/meta-moblin/packages/libunique/libunique_1.0.6.bb
@@ -0,0 +1,8 @@
+SRC_URI = "http://ftp.gnome.org/pub/GNOME/sources/libunique/1.0/libunique-1.0.6.tar.bz2"
+PR = "r0"
+
+DEPENDS = "glib-2.0 gtk+ dbus"
+
+S = "${WORKDIR}/unique-${PV}"
+
+inherit autotools_stage
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/0001-drm-remove-define-for-non-linux-systems.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/0001-drm-remove-define-for-non-linux-systems.patch
deleted file mode 100644
index 588c1af70..000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/0001-drm-remove-define-for-non-linux-systems.patch
+++ /dev/null
@@ -1,48 +0,0 @@
-commit 2e6ec7cdc09f36be1cbe9aeaccfc45f307fc0060
-Author: Carlos R. Mafra <crmafra2@gmail.com>
-Date: Wed Jul 30 12:29:37 2008 -0700
-
- drm: remove #define's for non-linux systems
-
- There is no point in considering FreeBSD et al. in the linux kernel
- source code.
-
- Signed-off-by: Carlos R. Mafra <crmafra@gmail.com>
- Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
- Signed-off-by: Dave Airlie <airlied@redhat.com>
-
-diff --git a/include/drm/drm.h b/include/drm/drm.h
-index 38d3c6b..0864c69 100644
---- a/include/drm/drm.h
-+++ b/include/drm/drm.h
-@@ -36,7 +36,6 @@
- #ifndef _DRM_H_
- #define _DRM_H_
-
--#if defined(__linux__)
- #if defined(__KERNEL__)
- #endif
- #include <asm/ioctl.h> /* For _IO* macros */
-@@ -46,22 +45,6 @@
- #define DRM_IOC_WRITE _IOC_WRITE
- #define DRM_IOC_READWRITE _IOC_READ|_IOC_WRITE
- #define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size)
--#elif defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__)
--#if defined(__FreeBSD__) && defined(IN_MODULE)
--/* Prevent name collision when including sys/ioccom.h */
--#undef ioctl
--#include <sys/ioccom.h>
--#define ioctl(a,b,c) xf86ioctl(a,b,c)
--#else
--#include <sys/ioccom.h>
--#endif /* __FreeBSD__ && xf86ioctl */
--#define DRM_IOCTL_NR(n) ((n) & 0xff)
--#define DRM_IOC_VOID IOC_VOID
--#define DRM_IOC_READ IOC_OUT
--#define DRM_IOC_WRITE IOC_IN
--#define DRM_IOC_READWRITE IOC_INOUT
--#define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size)
--#endif
-
- #define DRM_MAJOR 226
- #define DRM_MAX_MINOR 15
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/0002-i915-remove-settable-use_mi_batchbuffer_start.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/0002-i915-remove-settable-use_mi_batchbuffer_start.patch
deleted file mode 100644
index f3c41f7cb..000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/0002-i915-remove-settable-use_mi_batchbuffer_start.patch
+++ /dev/null
@@ -1,60 +0,0 @@
-commit 91019197abbfde388d0b71b0fc8979a936c23fe3
-Author: Keith Packard <keithp@keithp.com>
-Date: Wed Jul 30 12:28:47 2008 -0700
-
- i915: remove settable use_mi_batchbuffer_start
-
- The driver can know what hardware requires MI_BATCH_BUFFER vs
- MI_BATCH_BUFFER_START; there's no reason to let user mode configure this.
-
- Signed-off-by: Eric Anholt <eric@anholt.net>
- Signed-off-by: Dave Airlie <airlied@redhat.com>
-
-diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
-index 8897434..24adbde 100644
---- a/drivers/gpu/drm/i915/i915_dma.c
-+++ b/drivers/gpu/drm/i915/i915_dma.c
-@@ -159,13 +159,6 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
- dev_priv->current_page = 0;
- dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
-
-- /* We are using separate values as placeholders for mechanisms for
-- * private backbuffer/depthbuffer usage.
-- */
-- dev_priv->use_mi_batchbuffer_start = 0;
-- if (IS_I965G(dev)) /* 965 doesn't support older method */
-- dev_priv->use_mi_batchbuffer_start = 1;
--
- /* Allow hardware batchbuffers unless told otherwise.
- */
- dev_priv->allow_batchbuffer = 1;
-@@ -486,7 +479,7 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
- return ret;
- }
-
-- if (dev_priv->use_mi_batchbuffer_start) {
-+ if (!IS_I830(dev) && !IS_845G(dev)) {
- BEGIN_LP_RING(2);
- if (IS_I965G(dev)) {
- OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
-@@ -697,8 +690,6 @@ static int i915_setparam(struct drm_device *dev, void *data,
-
- switch (param->param) {
- case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
-- if (!IS_I965G(dev))
-- dev_priv->use_mi_batchbuffer_start = param->value;
- break;
- case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
- dev_priv->tex_lru_log_granularity = param->value;
-diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
-index d7326d9..2d441d3 100644
---- a/drivers/gpu/drm/i915/i915_drv.h
-+++ b/drivers/gpu/drm/i915/i915_drv.h
-@@ -99,7 +99,6 @@ typedef struct drm_i915_private {
- int front_offset;
- int current_page;
- int page_flipping;
-- int use_mi_batchbuffer_start;
-
- wait_queue_head_t irq_queue;
- atomic_t irq_received;
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/0003-i915-Ignore-X-server-provided-mmio-address.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/0003-i915-Ignore-X-server-provided-mmio-address.patch
deleted file mode 100644
index 9f7e0b4bc..000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/0003-i915-Ignore-X-server-provided-mmio-address.patch
+++ /dev/null
@@ -1,41 +0,0 @@
-commit 20ae3cf7d4a9ae8d23bcffa67c9a34fc2640d217
-Author: Keith Packard <keithp@keithp.com>
-Date: Wed Jul 30 12:36:08 2008 -0700
-
- i915: Ignore X server provided mmio address
-
- It is already correctly detected by the kernel for use in suspend/resume.
-
- Signed-off-by: Eric Anholt <eric@anholt.net>
- Signed-off-by: Dave Airlie <airlied@redhat.com>
-
-diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
-index 24adbde..01a869b 100644
---- a/drivers/gpu/drm/i915/i915_dma.c
-+++ b/drivers/gpu/drm/i915/i915_dma.c
-@@ -121,13 +121,6 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
- return -EINVAL;
- }
-
-- dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset);
-- if (!dev_priv->mmio_map) {
-- i915_dma_cleanup(dev);
-- DRM_ERROR("can not find mmio map!\n");
-- return -EINVAL;
-- }
--
- dev_priv->sarea_priv = (drm_i915_sarea_t *)
- ((u8 *) dev_priv->sarea->handle + init->sarea_priv_offset);
-
-@@ -194,11 +187,6 @@ static int i915_dma_resume(struct drm_device * dev)
- return -EINVAL;
- }
-
-- if (!dev_priv->mmio_map) {
-- DRM_ERROR("can not find mmio map!\n");
-- return -EINVAL;
-- }
--
- if (dev_priv->ring.map.handle == NULL) {
- DRM_ERROR("can not ioremap virtual address for"
- " ring buffer\n");
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/0004-i915-Use-more-consistent-names-for-regs-and-store.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/0004-i915-Use-more-consistent-names-for-regs-and-store.patch
deleted file mode 100644
index f7a310ea6..000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/0004-i915-Use-more-consistent-names-for-regs-and-store.patch
+++ /dev/null
@@ -1,2746 +0,0 @@
-commit 573e91575687018b4307f53a50f4da0084dbdf3d
-Author: Jesse Barnes <jbarnes@virtuousgeek.org>
-Date: Tue Jul 29 11:54:06 2008 -0700
-
- i915: Use more consistent names for regs, and store them in a separate file.
-
- Signed-off-by: Eric Anholt <eric@anholt.net>
- Signed-off-by: Dave Airlie <airlied@redhat.com>
-
-diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
-index 01a869b..7be580b 100644
---- a/drivers/gpu/drm/i915/i915_dma.c
-+++ b/drivers/gpu/drm/i915/i915_dma.c
-@@ -40,11 +40,11 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
- {
- drm_i915_private_t *dev_priv = dev->dev_private;
- drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
-- u32 last_head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
-+ u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
- int i;
-
- for (i = 0; i < 10000; i++) {
-- ring->head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
-+ ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
- ring->space = ring->head - (ring->tail + 8);
- if (ring->space < 0)
- ring->space += ring->Size;
-@@ -67,8 +67,8 @@ void i915_kernel_lost_context(struct drm_device * dev)
- drm_i915_private_t *dev_priv = dev->dev_private;
- drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
-
-- ring->head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
-- ring->tail = I915_READ(LP_RING + RING_TAIL) & TAIL_ADDR;
-+ ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
-+ ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
- ring->space = ring->head - (ring->tail + 8);
- if (ring->space < 0)
- ring->space += ring->Size;
-@@ -98,13 +98,13 @@ static int i915_dma_cleanup(struct drm_device * dev)
- drm_pci_free(dev, dev_priv->status_page_dmah);
- dev_priv->status_page_dmah = NULL;
- /* Need to rewrite hardware status page */
-- I915_WRITE(0x02080, 0x1ffff000);
-+ I915_WRITE(HWS_PGA, 0x1ffff000);
- }
-
- if (dev_priv->status_gfx_addr) {
- dev_priv->status_gfx_addr = 0;
- drm_core_ioremapfree(&dev_priv->hws_map, dev);
-- I915_WRITE(0x2080, 0x1ffff000);
-+ I915_WRITE(HWS_PGA, 0x1ffff000);
- }
-
- return 0;
-@@ -170,7 +170,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
- dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
-
- memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
-- I915_WRITE(0x02080, dev_priv->dma_status_page);
-+ I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
- }
- DRM_DEBUG("Enabled hardware status page\n");
- return 0;
-@@ -201,9 +201,9 @@ static int i915_dma_resume(struct drm_device * dev)
- DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
-
- if (dev_priv->status_gfx_addr != 0)
-- I915_WRITE(0x02080, dev_priv->status_gfx_addr);
-+ I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
- else
-- I915_WRITE(0x02080, dev_priv->dma_status_page);
-+ I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
- DRM_DEBUG("Enabled hardware status page\n");
-
- return 0;
-@@ -402,8 +402,8 @@ static void i915_emit_breadcrumb(struct drm_device *dev)
- dev_priv->sarea_priv->last_enqueue = dev_priv->counter = 1;
-
- BEGIN_LP_RING(4);
-- OUT_RING(CMD_STORE_DWORD_IDX);
-- OUT_RING(20);
-+ OUT_RING(MI_STORE_DWORD_INDEX);
-+ OUT_RING(5 << MI_STORE_DWORD_INDEX_SHIFT);
- OUT_RING(dev_priv->counter);
- OUT_RING(0);
- ADVANCE_LP_RING();
-@@ -505,7 +505,7 @@ static int i915_dispatch_flip(struct drm_device * dev)
- i915_kernel_lost_context(dev);
-
- BEGIN_LP_RING(2);
-- OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE);
-+ OUT_RING(MI_FLUSH | MI_READ_FLUSH);
- OUT_RING(0);
- ADVANCE_LP_RING();
-
-@@ -530,8 +530,8 @@ static int i915_dispatch_flip(struct drm_device * dev)
- dev_priv->sarea_priv->last_enqueue = dev_priv->counter++;
-
- BEGIN_LP_RING(4);
-- OUT_RING(CMD_STORE_DWORD_IDX);
-- OUT_RING(20);
-+ OUT_RING(MI_STORE_DWORD_INDEX);
-+ OUT_RING(5 << MI_STORE_DWORD_INDEX_SHIFT);
- OUT_RING(dev_priv->counter);
- OUT_RING(0);
- ADVANCE_LP_RING();
-@@ -728,8 +728,8 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
- dev_priv->hw_status_page = dev_priv->hws_map.handle;
-
- memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
-- I915_WRITE(0x02080, dev_priv->status_gfx_addr);
-- DRM_DEBUG("load hws 0x2080 with gfx mem 0x%x\n",
-+ I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
-+ DRM_DEBUG("load hws HWS_PGA with gfx mem 0x%x\n",
- dev_priv->status_gfx_addr);
- DRM_DEBUG("load hws at %p\n", dev_priv->hw_status_page);
- return 0;
-diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
-index 93aed1c..6c99aab 100644
---- a/drivers/gpu/drm/i915/i915_drv.c
-+++ b/drivers/gpu/drm/i915/i915_drv.c
-@@ -279,13 +279,13 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
- dev_priv->saveDSPASTRIDE = I915_READ(DSPASTRIDE);
- dev_priv->saveDSPASIZE = I915_READ(DSPASIZE);
- dev_priv->saveDSPAPOS = I915_READ(DSPAPOS);
-- dev_priv->saveDSPABASE = I915_READ(DSPABASE);
-+ dev_priv->saveDSPAADDR = I915_READ(DSPAADDR);
- if (IS_I965G(dev)) {
- dev_priv->saveDSPASURF = I915_READ(DSPASURF);
- dev_priv->saveDSPATILEOFF = I915_READ(DSPATILEOFF);
- }
- i915_save_palette(dev, PIPE_A);
-- dev_priv->savePIPEASTAT = I915_READ(I915REG_PIPEASTAT);
-+ dev_priv->savePIPEASTAT = I915_READ(PIPEASTAT);
-
- /* Pipe & plane B info */
- dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF);
-@@ -307,13 +307,13 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
- dev_priv->saveDSPBSTRIDE = I915_READ(DSPBSTRIDE);
- dev_priv->saveDSPBSIZE = I915_READ(DSPBSIZE);
- dev_priv->saveDSPBPOS = I915_READ(DSPBPOS);
-- dev_priv->saveDSPBBASE = I915_READ(DSPBBASE);
-+ dev_priv->saveDSPBADDR = I915_READ(DSPBADDR);
- if (IS_I965GM(dev) || IS_IGD_GM(dev)) {
- dev_priv->saveDSPBSURF = I915_READ(DSPBSURF);
- dev_priv->saveDSPBTILEOFF = I915_READ(DSPBTILEOFF);
- }
- i915_save_palette(dev, PIPE_B);
-- dev_priv->savePIPEBSTAT = I915_READ(I915REG_PIPEBSTAT);
-+ dev_priv->savePIPEBSTAT = I915_READ(PIPEBSTAT);
-
- /* CRT state */
- dev_priv->saveADPA = I915_READ(ADPA);
-@@ -328,9 +328,9 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
- dev_priv->saveLVDS = I915_READ(LVDS);
- if (!IS_I830(dev) && !IS_845G(dev))
- dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
-- dev_priv->saveLVDSPP_ON = I915_READ(LVDSPP_ON);
-- dev_priv->saveLVDSPP_OFF = I915_READ(LVDSPP_OFF);
-- dev_priv->savePP_CYCLE = I915_READ(PP_CYCLE);
-+ dev_priv->savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS);
-+ dev_priv->savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS);
-+ dev_priv->savePP_DIVISOR = I915_READ(PP_DIVISOR);
-
- /* FIXME: save TV & SDVO state */
-
-@@ -341,19 +341,19 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
- dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL);
-
- /* Interrupt state */
-- dev_priv->saveIIR = I915_READ(I915REG_INT_IDENTITY_R);
-- dev_priv->saveIER = I915_READ(I915REG_INT_ENABLE_R);
-- dev_priv->saveIMR = I915_READ(I915REG_INT_MASK_R);
-+ dev_priv->saveIIR = I915_READ(IIR);
-+ dev_priv->saveIER = I915_READ(IER);
-+ dev_priv->saveIMR = I915_READ(IMR);
-
- /* VGA state */
-- dev_priv->saveVCLK_DIVISOR_VGA0 = I915_READ(VCLK_DIVISOR_VGA0);
-- dev_priv->saveVCLK_DIVISOR_VGA1 = I915_READ(VCLK_DIVISOR_VGA1);
-- dev_priv->saveVCLK_POST_DIV = I915_READ(VCLK_POST_DIV);
-+ dev_priv->saveVGA0 = I915_READ(VGA0);
-+ dev_priv->saveVGA1 = I915_READ(VGA1);
-+ dev_priv->saveVGA_PD = I915_READ(VGA_PD);
- dev_priv->saveVGACNTRL = I915_READ(VGACNTRL);
-
- /* Clock gating state */
- dev_priv->saveD_STATE = I915_READ(D_STATE);
-- dev_priv->saveDSPCLK_GATE_D = I915_READ(DSPCLK_GATE_D);
-+ dev_priv->saveCG_2D_DIS = I915_READ(CG_2D_DIS);
-
- /* Cache mode state */
- dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
-@@ -363,7 +363,7 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
-
- /* Scratch space */
- for (i = 0; i < 16; i++) {
-- dev_priv->saveSWF0[i] = I915_READ(SWF0 + (i << 2));
-+ dev_priv->saveSWF0[i] = I915_READ(SWF00 + (i << 2));
- dev_priv->saveSWF1[i] = I915_READ(SWF10 + (i << 2));
- }
- for (i = 0; i < 3; i++)
-@@ -424,7 +424,7 @@ static int i915_resume(struct drm_device *dev)
- I915_WRITE(DSPASIZE, dev_priv->saveDSPASIZE);
- I915_WRITE(DSPAPOS, dev_priv->saveDSPAPOS);
- I915_WRITE(PIPEASRC, dev_priv->savePIPEASRC);
-- I915_WRITE(DSPABASE, dev_priv->saveDSPABASE);
-+ I915_WRITE(DSPAADDR, dev_priv->saveDSPAADDR);
- I915_WRITE(DSPASTRIDE, dev_priv->saveDSPASTRIDE);
- if (IS_I965G(dev)) {
- I915_WRITE(DSPASURF, dev_priv->saveDSPASURF);
-@@ -436,7 +436,7 @@ static int i915_resume(struct drm_device *dev)
- i915_restore_palette(dev, PIPE_A);
- /* Enable the plane */
- I915_WRITE(DSPACNTR, dev_priv->saveDSPACNTR);
-- I915_WRITE(DSPABASE, I915_READ(DSPABASE));
-+ I915_WRITE(DSPAADDR, I915_READ(DSPAADDR));
-
- /* Pipe & plane B info */
- if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) {
-@@ -466,7 +466,7 @@ static int i915_resume(struct drm_device *dev)
- I915_WRITE(DSPBSIZE, dev_priv->saveDSPBSIZE);
- I915_WRITE(DSPBPOS, dev_priv->saveDSPBPOS);
- I915_WRITE(PIPEBSRC, dev_priv->savePIPEBSRC);
-- I915_WRITE(DSPBBASE, dev_priv->saveDSPBBASE);
-+ I915_WRITE(DSPBADDR, dev_priv->saveDSPBADDR);
- I915_WRITE(DSPBSTRIDE, dev_priv->saveDSPBSTRIDE);
- if (IS_I965G(dev)) {
- I915_WRITE(DSPBSURF, dev_priv->saveDSPBSURF);
-@@ -478,7 +478,7 @@ static int i915_resume(struct drm_device *dev)
- i915_restore_palette(dev, PIPE_B);
- /* Enable the plane */
- I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR);
-- I915_WRITE(DSPBBASE, I915_READ(DSPBBASE));
-+ I915_WRITE(DSPBADDR, I915_READ(DSPBADDR));
-
- /* CRT state */
- I915_WRITE(ADPA, dev_priv->saveADPA);
-@@ -493,9 +493,9 @@ static int i915_resume(struct drm_device *dev)
-
- I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS);
- I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL);
-- I915_WRITE(LVDSPP_ON, dev_priv->saveLVDSPP_ON);
-- I915_WRITE(LVDSPP_OFF, dev_priv->saveLVDSPP_OFF);
-- I915_WRITE(PP_CYCLE, dev_priv->savePP_CYCLE);
-+ I915_WRITE(PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS);
-+ I915_WRITE(PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS);
-+ I915_WRITE(PP_DIVISOR, dev_priv->savePP_DIVISOR);
- I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL);
-
- /* FIXME: restore TV & SDVO state */
-@@ -508,14 +508,14 @@ static int i915_resume(struct drm_device *dev)
-
- /* VGA state */
- I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL);
-- I915_WRITE(VCLK_DIVISOR_VGA0, dev_priv->saveVCLK_DIVISOR_VGA0);
-- I915_WRITE(VCLK_DIVISOR_VGA1, dev_priv->saveVCLK_DIVISOR_VGA1);
-- I915_WRITE(VCLK_POST_DIV, dev_priv->saveVCLK_POST_DIV);
-+ I915_WRITE(VGA0, dev_priv->saveVGA0);
-+ I915_WRITE(VGA1, dev_priv->saveVGA1);
-+ I915_WRITE(VGA_PD, dev_priv->saveVGA_PD);
- udelay(150);
-
- /* Clock gating state */
- I915_WRITE (D_STATE, dev_priv->saveD_STATE);
-- I915_WRITE (DSPCLK_GATE_D, dev_priv->saveDSPCLK_GATE_D);
-+ I915_WRITE(CG_2D_DIS, dev_priv->saveCG_2D_DIS);
-
- /* Cache mode state */
- I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
-@@ -524,7 +524,7 @@ static int i915_resume(struct drm_device *dev)
- I915_WRITE (MI_ARB_STATE, dev_priv->saveMI_ARB_STATE | 0xffff0000);
-
- for (i = 0; i < 16; i++) {
-- I915_WRITE(SWF0 + (i << 2), dev_priv->saveSWF0[i]);
-+ I915_WRITE(SWF00 + (i << 2), dev_priv->saveSWF0[i]);
- I915_WRITE(SWF10 + (i << 2), dev_priv->saveSWF1[i+7]);
- }
- for (i = 0; i < 3; i++)
-diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
-index 2d441d3..afb51a3 100644
---- a/drivers/gpu/drm/i915/i915_drv.h
-+++ b/drivers/gpu/drm/i915/i915_drv.h
-@@ -30,6 +30,8 @@
- #ifndef _I915_DRV_H_
- #define _I915_DRV_H_
-
-+#include "i915_reg.h"
-+
- /* General customization:
- */
-
-@@ -138,7 +140,7 @@ typedef struct drm_i915_private {
- u32 saveDSPASTRIDE;
- u32 saveDSPASIZE;
- u32 saveDSPAPOS;
-- u32 saveDSPABASE;
-+ u32 saveDSPAADDR;
- u32 saveDSPASURF;
- u32 saveDSPATILEOFF;
- u32 savePFIT_PGM_RATIOS;
-@@ -159,24 +161,24 @@ typedef struct drm_i915_private {
- u32 saveDSPBSTRIDE;
- u32 saveDSPBSIZE;
- u32 saveDSPBPOS;
-- u32 saveDSPBBASE;
-+ u32 saveDSPBADDR;
- u32 saveDSPBSURF;
- u32 saveDSPBTILEOFF;
-- u32 saveVCLK_DIVISOR_VGA0;
-- u32 saveVCLK_DIVISOR_VGA1;
-- u32 saveVCLK_POST_DIV;
-+ u32 saveVGA0;
-+ u32 saveVGA1;
-+ u32 saveVGA_PD;
- u32 saveVGACNTRL;
- u32 saveADPA;
- u32 saveLVDS;
-- u32 saveLVDSPP_ON;
-- u32 saveLVDSPP_OFF;
-+ u32 savePP_ON_DELAYS;
-+ u32 savePP_OFF_DELAYS;
- u32 saveDVOA;
- u32 saveDVOB;
- u32 saveDVOC;
- u32 savePP_ON;
- u32 savePP_OFF;
- u32 savePP_CONTROL;
-- u32 savePP_CYCLE;
-+ u32 savePP_DIVISOR;
- u32 savePFIT_CONTROL;
- u32 save_palette_a[256];
- u32 save_palette_b[256];
-@@ -189,7 +191,7 @@ typedef struct drm_i915_private {
- u32 saveIMR;
- u32 saveCACHE_MODE_0;
- u32 saveD_STATE;
-- u32 saveDSPCLK_GATE_D;
-+ u32 saveCG_2D_DIS;
- u32 saveMI_ARB_STATE;
- u32 saveSWF0[16];
- u32 saveSWF1[16];
-@@ -283,816 +285,26 @@ extern void i915_mem_release(struct drm_device * dev,
- if (I915_VERBOSE) DRM_DEBUG("ADVANCE_LP_RING %x\n", outring); \
- dev_priv->ring.tail = outring; \
- dev_priv->ring.space -= outcount * 4; \
-- I915_WRITE(LP_RING + RING_TAIL, outring); \
-+ I915_WRITE(PRB0_TAIL, outring); \
- } while(0)
-
--extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
--
--/* Extended config space */
--#define LBB 0xf4
--
--/* VGA stuff */
--
--#define VGA_ST01_MDA 0x3ba
--#define VGA_ST01_CGA 0x3da
--
--#define VGA_MSR_WRITE 0x3c2
--#define VGA_MSR_READ 0x3cc
--#define VGA_MSR_MEM_EN (1<<1)
--#define VGA_MSR_CGA_MODE (1<<0)
--
--#define VGA_SR_INDEX 0x3c4
--#define VGA_SR_DATA 0x3c5
--
--#define VGA_AR_INDEX 0x3c0
--#define VGA_AR_VID_EN (1<<5)
--#define VGA_AR_DATA_WRITE 0x3c0
--#define VGA_AR_DATA_READ 0x3c1
--
--#define VGA_GR_INDEX 0x3ce
--#define VGA_GR_DATA 0x3cf
--/* GR05 */
--#define VGA_GR_MEM_READ_MODE_SHIFT 3
--#define VGA_GR_MEM_READ_MODE_PLANE 1
--/* GR06 */
--#define VGA_GR_MEM_MODE_MASK 0xc
--#define VGA_GR_MEM_MODE_SHIFT 2
--#define VGA_GR_MEM_A0000_AFFFF 0
--#define VGA_GR_MEM_A0000_BFFFF 1
--#define VGA_GR_MEM_B0000_B7FFF 2
--#define VGA_GR_MEM_B0000_BFFFF 3
--
--#define VGA_DACMASK 0x3c6
--#define VGA_DACRX 0x3c7
--#define VGA_DACWX 0x3c8
--#define VGA_DACDATA 0x3c9
--
--#define VGA_CR_INDEX_MDA 0x3b4
--#define VGA_CR_DATA_MDA 0x3b5
--#define VGA_CR_INDEX_CGA 0x3d4
--#define VGA_CR_DATA_CGA 0x3d5
--
--#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23))
--#define GFX_OP_BREAKPOINT_INTERRUPT ((0<<29)|(1<<23))
--#define CMD_REPORT_HEAD (7<<23)
--#define CMD_STORE_DWORD_IDX ((0x21<<23) | 0x1)
--#define CMD_OP_BATCH_BUFFER ((0x0<<29)|(0x30<<23)|0x1)
--
--#define INST_PARSER_CLIENT 0x00000000
--#define INST_OP_FLUSH 0x02000000
--#define INST_FLUSH_MAP_CACHE 0x00000001
--
--#define BB1_START_ADDR_MASK (~0x7)
--#define BB1_PROTECTED (1<<0)
--#define BB1_UNPROTECTED (0<<0)
--#define BB2_END_ADDR_MASK (~0x7)
--
--/* Framebuffer compression */
--#define FBC_CFB_BASE 0x03200 /* 4k page aligned */
--#define FBC_LL_BASE 0x03204 /* 4k page aligned */
--#define FBC_CONTROL 0x03208
--#define FBC_CTL_EN (1<<31)
--#define FBC_CTL_PERIODIC (1<<30)
--#define FBC_CTL_INTERVAL_SHIFT (16)
--#define FBC_CTL_UNCOMPRESSIBLE (1<<14)
--#define FBC_CTL_STRIDE_SHIFT (5)
--#define FBC_CTL_FENCENO (1<<0)
--#define FBC_COMMAND 0x0320c
--#define FBC_CMD_COMPRESS (1<<0)
--#define FBC_STATUS 0x03210
--#define FBC_STAT_COMPRESSING (1<<31)
--#define FBC_STAT_COMPRESSED (1<<30)
--#define FBC_STAT_MODIFIED (1<<29)
--#define FBC_STAT_CURRENT_LINE (1<<0)
--#define FBC_CONTROL2 0x03214
--#define FBC_CTL_FENCE_DBL (0<<4)
--#define FBC_CTL_IDLE_IMM (0<<2)
--#define FBC_CTL_IDLE_FULL (1<<2)
--#define FBC_CTL_IDLE_LINE (2<<2)
--#define FBC_CTL_IDLE_DEBUG (3<<2)
--#define FBC_CTL_CPU_FENCE (1<<1)
--#define FBC_CTL_PLANEA (0<<0)
--#define FBC_CTL_PLANEB (1<<0)
--#define FBC_FENCE_OFF 0x0321b
--
--#define FBC_LL_SIZE (1536)
--#define FBC_LL_PAD (32)
--
--/* Interrupt bits:
-- */
--#define USER_INT_FLAG (1<<1)
--#define VSYNC_PIPEB_FLAG (1<<5)
--#define VSYNC_PIPEA_FLAG (1<<7)
--#define HWB_OOM_FLAG (1<<13) /* binner out of memory */
--
--#define I915REG_HWSTAM 0x02098
--#define I915REG_INT_IDENTITY_R 0x020a4
--#define I915REG_INT_MASK_R 0x020a8
--#define I915REG_INT_ENABLE_R 0x020a0
--
--#define I915REG_PIPEASTAT 0x70024
--#define I915REG_PIPEBSTAT 0x71024
--
--#define I915_VBLANK_INTERRUPT_ENABLE (1UL<<17)
--#define I915_VBLANK_CLEAR (1UL<<1)
--
--#define SRX_INDEX 0x3c4
--#define SRX_DATA 0x3c5
--#define SR01 1
--#define SR01_SCREEN_OFF (1<<5)
--
--#define PPCR 0x61204
--#define PPCR_ON (1<<0)
--
--#define DVOB 0x61140
--#define DVOB_ON (1<<31)
--#define DVOC 0x61160
--#define DVOC_ON (1<<31)
--#define LVDS 0x61180
--#define LVDS_ON (1<<31)
--
--#define ADPA 0x61100
--#define ADPA_DPMS_MASK (~(3<<10))
--#define ADPA_DPMS_ON (0<<10)
--#define ADPA_DPMS_SUSPEND (1<<10)
--#define ADPA_DPMS_STANDBY (2<<10)
--#define ADPA_DPMS_OFF (3<<10)
--
--#define NOPID 0x2094
--#define LP_RING 0x2030
--#define HP_RING 0x2040
--/* The binner has its own ring buffer:
-- */
--#define HWB_RING 0x2400
--
--#define RING_TAIL 0x00
--#define TAIL_ADDR 0x001FFFF8
--#define RING_HEAD 0x04
--#define HEAD_WRAP_COUNT 0xFFE00000
--#define HEAD_WRAP_ONE 0x00200000
--#define HEAD_ADDR 0x001FFFFC
--#define RING_START 0x08
--#define START_ADDR 0x0xFFFFF000
--#define RING_LEN 0x0C
--#define RING_NR_PAGES 0x001FF000
--#define RING_REPORT_MASK 0x00000006
--#define RING_REPORT_64K 0x00000002
--#define RING_REPORT_128K 0x00000004
--#define RING_NO_REPORT 0x00000000
--#define RING_VALID_MASK 0x00000001
--#define RING_VALID 0x00000001
--#define RING_INVALID 0x00000000
--
--/* Instruction parser error reg:
-- */
--#define IPEIR 0x2088
--
--/* Scratch pad debug 0 reg:
-- */
--#define SCPD0 0x209c
--
--/* Error status reg:
-- */
--#define ESR 0x20b8
--
--/* Secondary DMA fetch address debug reg:
-- */
--#define DMA_FADD_S 0x20d4
--
--/* Memory Interface Arbitration State
-- */
--#define MI_ARB_STATE 0x20e4
--
--/* Cache mode 0 reg.
-- * - Manipulating render cache behaviour is central
-- * to the concept of zone rendering, tuning this reg can help avoid
-- * unnecessary render cache reads and even writes (for z/stencil)
-- * at beginning and end of scene.
-- *
-- * - To change a bit, write to this reg with a mask bit set and the
-- * bit of interest either set or cleared. EG: (BIT<<16) | BIT to set.
-- */
--#define Cache_Mode_0 0x2120
--#define CACHE_MODE_0 0x2120
--#define CM0_MASK_SHIFT 16
--#define CM0_IZ_OPT_DISABLE (1<<6)
--#define CM0_ZR_OPT_DISABLE (1<<5)
--#define CM0_DEPTH_EVICT_DISABLE (1<<4)
--#define CM0_COLOR_EVICT_DISABLE (1<<3)
--#define CM0_DEPTH_WRITE_DISABLE (1<<1)
--#define CM0_RC_OP_FLUSH_DISABLE (1<<0)
--
--
--/* Graphics flush control. A CPU write flushes the GWB of all writes.
-- * The data is discarded.
-- */
--#define GFX_FLSH_CNTL 0x2170
--
--/* Binner control. Defines the location of the bin pointer list:
-- */
--#define BINCTL 0x2420
--#define BC_MASK (1 << 9)
--
--/* Binned scene info.
-- */
--#define BINSCENE 0x2428
--#define BS_OP_LOAD (1 << 8)
--#define BS_MASK (1 << 22)
--
--/* Bin command parser debug reg:
-- */
--#define BCPD 0x2480
--
--/* Bin memory control debug reg:
-- */
--#define BMCD 0x2484
--
--/* Bin data cache debug reg:
-- */
--#define BDCD 0x2488
--
--/* Binner pointer cache debug reg:
-- */
--#define BPCD 0x248c
--
--/* Binner scratch pad debug reg:
-- */
--#define BINSKPD 0x24f0
--
--/* HWB scratch pad debug reg:
-- */
--#define HWBSKPD 0x24f4
--
--/* Binner memory pool reg:
-- */
--#define BMP_BUFFER 0x2430
--#define BMP_PAGE_SIZE_4K (0 << 10)
--#define BMP_BUFFER_SIZE_SHIFT 1
--#define BMP_ENABLE (1 << 0)
--
--/* Get/put memory from the binner memory pool:
-- */
--#define BMP_GET 0x2438
--#define BMP_PUT 0x2440
--#define BMP_OFFSET_SHIFT 5
--
--/* 3D state packets:
-- */
--#define GFX_OP_RASTER_RULES ((0x3<<29)|(0x7<<24))
--
--#define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19))
--#define SC_UPDATE_SCISSOR (0x1<<1)
--#define SC_ENABLE_MASK (0x1<<0)
--#define SC_ENABLE (0x1<<0)
--
--#define GFX_OP_LOAD_INDIRECT ((0x3<<29)|(0x1d<<24)|(0x7<<16))
--
--#define GFX_OP_SCISSOR_INFO ((0x3<<29)|(0x1d<<24)|(0x81<<16)|(0x1))
--#define SCI_YMIN_MASK (0xffff<<16)
--#define SCI_XMIN_MASK (0xffff<<0)
--#define SCI_YMAX_MASK (0xffff<<16)
--#define SCI_XMAX_MASK (0xffff<<0)
--
--#define GFX_OP_SCISSOR_ENABLE ((0x3<<29)|(0x1c<<24)|(0x10<<19))
--#define GFX_OP_SCISSOR_RECT ((0x3<<29)|(0x1d<<24)|(0x81<<16)|1)
--#define GFX_OP_COLOR_FACTOR ((0x3<<29)|(0x1d<<24)|(0x1<<16)|0x0)
--#define GFX_OP_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16))
--#define GFX_OP_MAP_INFO ((0x3<<29)|(0x1d<<24)|0x4)
--#define GFX_OP_DESTBUFFER_VARS ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0)
--#define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3))
--
--#define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2)
--
--#define SRC_COPY_BLT_CMD ((2<<29)|(0x43<<22)|4)
--#define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6)
--#define XY_SRC_COPY_BLT_WRITE_ALPHA (1<<21)
--#define XY_SRC_COPY_BLT_WRITE_RGB (1<<20)
--#define XY_SRC_COPY_BLT_SRC_TILED (1<<15)
--#define XY_SRC_COPY_BLT_DST_TILED (1<<11)
--
--#define MI_BATCH_BUFFER ((0x30<<23)|1)
--#define MI_BATCH_BUFFER_START (0x31<<23)
--#define MI_BATCH_BUFFER_END (0xA<<23)
--#define MI_BATCH_NON_SECURE (1)
--#define MI_BATCH_NON_SECURE_I965 (1<<8)
--
--#define MI_WAIT_FOR_EVENT ((0x3<<23))
--#define MI_WAIT_FOR_PLANE_B_FLIP (1<<6)
--#define MI_WAIT_FOR_PLANE_A_FLIP (1<<2)
--#define MI_WAIT_FOR_PLANE_A_SCANLINES (1<<1)
--
--#define MI_LOAD_SCAN_LINES_INCL ((0x12<<23))
--
--#define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2)
--#define ASYNC_FLIP (1<<22)
--#define DISPLAY_PLANE_A (0<<20)
--#define DISPLAY_PLANE_B (1<<20)
--
--/* Display regs */
--#define DSPACNTR 0x70180
--#define DSPBCNTR 0x71180
--#define DISPPLANE_SEL_PIPE_MASK (1<<24)
--
--/* Define the region of interest for the binner:
-- */
--#define CMD_OP_BIN_CONTROL ((0x3<<29)|(0x1d<<24)|(0x84<<16)|4)
--
--#define CMD_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1)
--
--#define CMD_MI_FLUSH (0x04 << 23)
--#define MI_NO_WRITE_FLUSH (1 << 2)
--#define MI_READ_FLUSH (1 << 0)
--#define MI_EXE_FLUSH (1 << 1)
--#define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */
--#define MI_SCENE_COUNT (1 << 3) /* just increment scene count */
--
--#define BREADCRUMB_BITS 31
--#define BREADCRUMB_MASK ((1U << BREADCRUMB_BITS) - 1)
--
--#define READ_BREADCRUMB(dev_priv) (((volatile u32*)(dev_priv->hw_status_page))[5])
--#define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hw_status_page))[reg])
--
--#define BLC_PWM_CTL 0x61254
--#define BACKLIGHT_MODULATION_FREQ_SHIFT (17)
--
--#define BLC_PWM_CTL2 0x61250
- /**
-- * This is the most significant 15 bits of the number of backlight cycles in a
-- * complete cycle of the modulated backlight control.
-+ * Reads a dword out of the status page, which is written to from the command
-+ * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
-+ * MI_STORE_DATA_IMM.
- *
-- * The actual value is this field multiplied by two.
-- */
--#define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17)
--#define BLM_LEGACY_MODE (1 << 16)
--/**
-- * This is the number of cycles out of the backlight modulation cycle for which
-- * the backlight is on.
-+ * The following dwords have a reserved meaning:
-+ * 0: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
-+ * 4: ring 0 head pointer
-+ * 5: ring 1 head pointer (915-class)
-+ * 6: ring 2 head pointer (915-class)
- *
-- * This field must be no greater than the number of cycles in the complete
-- * backlight modulation cycle.
-- */
--#define BACKLIGHT_DUTY_CYCLE_SHIFT (0)
--#define BACKLIGHT_DUTY_CYCLE_MASK (0xffff)
--
--#define I915_GCFGC 0xf0
--#define I915_LOW_FREQUENCY_ENABLE (1 << 7)
--#define I915_DISPLAY_CLOCK_190_200_MHZ (0 << 4)
--#define I915_DISPLAY_CLOCK_333_MHZ (4 << 4)
--#define I915_DISPLAY_CLOCK_MASK (7 << 4)
--
--#define I855_HPLLCC 0xc0
--#define I855_CLOCK_CONTROL_MASK (3 << 0)
--#define I855_CLOCK_133_200 (0 << 0)
--#define I855_CLOCK_100_200 (1 << 0)
--#define I855_CLOCK_100_133 (2 << 0)
--#define I855_CLOCK_166_250 (3 << 0)
--
--/* p317, 319
-+ * The area from dword 0x10 to 0x3ff is available for driver usage.
- */
--#define VCLK2_VCO_M 0x6008 /* treat as 16 bit? (includes msbs) */
--#define VCLK2_VCO_N 0x600a
--#define VCLK2_VCO_DIV_SEL 0x6012
--
--#define VCLK_DIVISOR_VGA0 0x6000
--#define VCLK_DIVISOR_VGA1 0x6004
--#define VCLK_POST_DIV 0x6010
--/** Selects a post divisor of 4 instead of 2. */
--# define VGA1_PD_P2_DIV_4 (1 << 15)
--/** Overrides the p2 post divisor field */
--# define VGA1_PD_P1_DIV_2 (1 << 13)
--# define VGA1_PD_P1_SHIFT 8
--/** P1 value is 2 greater than this field */
--# define VGA1_PD_P1_MASK (0x1f << 8)
--/** Selects a post divisor of 4 instead of 2. */
--# define VGA0_PD_P2_DIV_4 (1 << 7)
--/** Overrides the p2 post divisor field */
--# define VGA0_PD_P1_DIV_2 (1 << 5)
--# define VGA0_PD_P1_SHIFT 0
--/** P1 value is 2 greater than this field */
--# define VGA0_PD_P1_MASK (0x1f << 0)
--
--/* PCI D state control register */
--#define D_STATE 0x6104
--#define DSPCLK_GATE_D 0x6200
--
--/* I830 CRTC registers */
--#define HTOTAL_A 0x60000
--#define HBLANK_A 0x60004
--#define HSYNC_A 0x60008
--#define VTOTAL_A 0x6000c
--#define VBLANK_A 0x60010
--#define VSYNC_A 0x60014
--#define PIPEASRC 0x6001c
--#define BCLRPAT_A 0x60020
--#define VSYNCSHIFT_A 0x60028
--
--#define HTOTAL_B 0x61000
--#define HBLANK_B 0x61004
--#define HSYNC_B 0x61008
--#define VTOTAL_B 0x6100c
--#define VBLANK_B 0x61010
--#define VSYNC_B 0x61014
--#define PIPEBSRC 0x6101c
--#define BCLRPAT_B 0x61020
--#define VSYNCSHIFT_B 0x61028
--
--#define PP_STATUS 0x61200
--# define PP_ON (1 << 31)
--/**
-- * Indicates that all dependencies of the panel are on:
-- *
-- * - PLL enabled
-- * - pipe enabled
-- * - LVDS/DVOB/DVOC on
-- */
--# define PP_READY (1 << 30)
--# define PP_SEQUENCE_NONE (0 << 28)
--# define PP_SEQUENCE_ON (1 << 28)
--# define PP_SEQUENCE_OFF (2 << 28)
--# define PP_SEQUENCE_MASK 0x30000000
--#define PP_CONTROL 0x61204
--# define POWER_TARGET_ON (1 << 0)
--
--#define LVDSPP_ON 0x61208
--#define LVDSPP_OFF 0x6120c
--#define PP_CYCLE 0x61210
--
--#define PFIT_CONTROL 0x61230
--# define PFIT_ENABLE (1 << 31)
--# define PFIT_PIPE_MASK (3 << 29)
--# define PFIT_PIPE_SHIFT 29
--# define VERT_INTERP_DISABLE (0 << 10)
--# define VERT_INTERP_BILINEAR (1 << 10)
--# define VERT_INTERP_MASK (3 << 10)
--# define VERT_AUTO_SCALE (1 << 9)
--# define HORIZ_INTERP_DISABLE (0 << 6)
--# define HORIZ_INTERP_BILINEAR (1 << 6)
--# define HORIZ_INTERP_MASK (3 << 6)
--# define HORIZ_AUTO_SCALE (1 << 5)
--# define PANEL_8TO6_DITHER_ENABLE (1 << 3)
--
--#define PFIT_PGM_RATIOS 0x61234
--# define PFIT_VERT_SCALE_MASK 0xfff00000
--# define PFIT_HORIZ_SCALE_MASK 0x0000fff0
--
--#define PFIT_AUTO_RATIOS 0x61238
--
--
--#define DPLL_A 0x06014
--#define DPLL_B 0x06018
--# define DPLL_VCO_ENABLE (1 << 31)
--# define DPLL_DVO_HIGH_SPEED (1 << 30)
--# define DPLL_SYNCLOCK_ENABLE (1 << 29)
--# define DPLL_VGA_MODE_DIS (1 << 28)
--# define DPLLB_MODE_DAC_SERIAL (1 << 26) /* i915 */
--# define DPLLB_MODE_LVDS (2 << 26) /* i915 */
--# define DPLL_MODE_MASK (3 << 26)
--# define DPLL_DAC_SERIAL_P2_CLOCK_DIV_10 (0 << 24) /* i915 */
--# define DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 (1 << 24) /* i915 */
--# define DPLLB_LVDS_P2_CLOCK_DIV_14 (0 << 24) /* i915 */
--# define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */
--# define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */
--# define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
--/**
-- * The i830 generation, in DAC/serial mode, defines p1 as two plus this
-- * bitfield, or just 2 if PLL_P1_DIVIDE_BY_TWO is set.
-- */
--# define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000
--/**
-- * The i830 generation, in LVDS mode, defines P1 as the bit number set within
-- * this field (only one bit may be set).
-- */
--# define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS 0x003f0000
--# define DPLL_FPA01_P1_POST_DIV_SHIFT 16
--# define PLL_P2_DIVIDE_BY_4 (1 << 23) /* i830, required in DVO non-gang */
--# define PLL_P1_DIVIDE_BY_TWO (1 << 21) /* i830 */
--# define PLL_REF_INPUT_DREFCLK (0 << 13)
--# define PLL_REF_INPUT_TVCLKINA (1 << 13) /* i830 */
--# define PLL_REF_INPUT_TVCLKINBC (2 << 13) /* SDVO TVCLKIN */
--# define PLLB_REF_INPUT_SPREADSPECTRUMIN (3 << 13)
--# define PLL_REF_INPUT_MASK (3 << 13)
--# define PLL_LOAD_PULSE_PHASE_SHIFT 9
--/*
-- * Parallel to Serial Load Pulse phase selection.
-- * Selects the phase for the 10X DPLL clock for the PCIe
-- * digital display port. The range is 4 to 13; 10 or more
-- * is just a flip delay. The default is 6
-- */
--# define PLL_LOAD_PULSE_PHASE_MASK (0xf << PLL_LOAD_PULSE_PHASE_SHIFT)
--# define DISPLAY_RATE_SELECT_FPA1 (1 << 8)
--
--/**
-- * SDVO multiplier for 945G/GM. Not used on 965.
-- *
-- * \sa DPLL_MD_UDI_MULTIPLIER_MASK
-- */
--# define SDVO_MULTIPLIER_MASK 0x000000ff
--# define SDVO_MULTIPLIER_SHIFT_HIRES 4
--# define SDVO_MULTIPLIER_SHIFT_VGA 0
--
--/** @defgroup DPLL_MD
-- * @{
-- */
--/** Pipe A SDVO/UDI clock multiplier/divider register for G965. */
--#define DPLL_A_MD 0x0601c
--/** Pipe B SDVO/UDI clock multiplier/divider register for G965. */
--#define DPLL_B_MD 0x06020
--/**
-- * UDI pixel divider, controlling how many pixels are stuffed into a packet.
-- *
-- * Value is pixels minus 1. Must be set to 1 pixel for SDVO.
-- */
--# define DPLL_MD_UDI_DIVIDER_MASK 0x3f000000
--# define DPLL_MD_UDI_DIVIDER_SHIFT 24
--/** UDI pixel divider for VGA, same as DPLL_MD_UDI_DIVIDER_MASK. */
--# define DPLL_MD_VGA_UDI_DIVIDER_MASK 0x003f0000
--# define DPLL_MD_VGA_UDI_DIVIDER_SHIFT 16
--/**
-- * SDVO/UDI pixel multiplier.
-- *
-- * SDVO requires that the bus clock rate be between 1 and 2 Ghz, and the bus
-- * clock rate is 10 times the DPLL clock. At low resolution/refresh rate
-- * modes, the bus rate would be below the limits, so SDVO allows for stuffing
-- * dummy bytes in the datastream at an increased clock rate, with both sides of
-- * the link knowing how many bytes are fill.
-- *
-- * So, for a mode with a dotclock of 65Mhz, we would want to double the clock
-- * rate to 130Mhz to get a bus rate of 1.30Ghz. The DPLL clock rate would be
-- * set to 130Mhz, and the SDVO multiplier set to 2x in this register and
-- * through an SDVO command.
-- *
-- * This register field has values of multiplication factor minus 1, with
-- * a maximum multiplier of 5 for SDVO.
-- */
--# define DPLL_MD_UDI_MULTIPLIER_MASK 0x00003f00
--# define DPLL_MD_UDI_MULTIPLIER_SHIFT 8
--/** SDVO/UDI pixel multiplier for VGA, same as DPLL_MD_UDI_MULTIPLIER_MASK.
-- * This best be set to the default value (3) or the CRT won't work. No,
-- * I don't entirely understand what this does...
-- */
--# define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f
--# define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
--/** @} */
--
--#define DPLL_TEST 0x606c
--# define DPLLB_TEST_SDVO_DIV_1 (0 << 22)
--# define DPLLB_TEST_SDVO_DIV_2 (1 << 22)
--# define DPLLB_TEST_SDVO_DIV_4 (2 << 22)
--# define DPLLB_TEST_SDVO_DIV_MASK (3 << 22)
--# define DPLLB_TEST_N_BYPASS (1 << 19)
--# define DPLLB_TEST_M_BYPASS (1 << 18)
--# define DPLLB_INPUT_BUFFER_ENABLE (1 << 16)
--# define DPLLA_TEST_N_BYPASS (1 << 3)
--# define DPLLA_TEST_M_BYPASS (1 << 2)
--# define DPLLA_INPUT_BUFFER_ENABLE (1 << 0)
--
--#define ADPA 0x61100
--#define ADPA_DAC_ENABLE (1<<31)
--#define ADPA_DAC_DISABLE 0
--#define ADPA_PIPE_SELECT_MASK (1<<30)
--#define ADPA_PIPE_A_SELECT 0
--#define ADPA_PIPE_B_SELECT (1<<30)
--#define ADPA_USE_VGA_HVPOLARITY (1<<15)
--#define ADPA_SETS_HVPOLARITY 0
--#define ADPA_VSYNC_CNTL_DISABLE (1<<11)
--#define ADPA_VSYNC_CNTL_ENABLE 0
--#define ADPA_HSYNC_CNTL_DISABLE (1<<10)
--#define ADPA_HSYNC_CNTL_ENABLE 0
--#define ADPA_VSYNC_ACTIVE_HIGH (1<<4)
--#define ADPA_VSYNC_ACTIVE_LOW 0
--#define ADPA_HSYNC_ACTIVE_HIGH (1<<3)
--#define ADPA_HSYNC_ACTIVE_LOW 0
--
--#define FPA0 0x06040
--#define FPA1 0x06044
--#define FPB0 0x06048
--#define FPB1 0x0604c
--# define FP_N_DIV_MASK 0x003f0000
--# define FP_N_DIV_SHIFT 16
--# define FP_M1_DIV_MASK 0x00003f00
--# define FP_M1_DIV_SHIFT 8
--# define FP_M2_DIV_MASK 0x0000003f
--# define FP_M2_DIV_SHIFT 0
--
--
--#define PORT_HOTPLUG_EN 0x61110
--# define SDVOB_HOTPLUG_INT_EN (1 << 26)
--# define SDVOC_HOTPLUG_INT_EN (1 << 25)
--# define TV_HOTPLUG_INT_EN (1 << 18)
--# define CRT_HOTPLUG_INT_EN (1 << 9)
--# define CRT_HOTPLUG_FORCE_DETECT (1 << 3)
--
--#define PORT_HOTPLUG_STAT 0x61114
--# define CRT_HOTPLUG_INT_STATUS (1 << 11)
--# define TV_HOTPLUG_INT_STATUS (1 << 10)
--# define CRT_HOTPLUG_MONITOR_MASK (3 << 8)
--# define CRT_HOTPLUG_MONITOR_COLOR (3 << 8)
--# define CRT_HOTPLUG_MONITOR_MONO (2 << 8)
--# define CRT_HOTPLUG_MONITOR_NONE (0 << 8)
--# define SDVOC_HOTPLUG_INT_STATUS (1 << 7)
--# define SDVOB_HOTPLUG_INT_STATUS (1 << 6)
--
--#define SDVOB 0x61140
--#define SDVOC 0x61160
--#define SDVO_ENABLE (1 << 31)
--#define SDVO_PIPE_B_SELECT (1 << 30)
--#define SDVO_STALL_SELECT (1 << 29)
--#define SDVO_INTERRUPT_ENABLE (1 << 26)
--/**
-- * 915G/GM SDVO pixel multiplier.
-- *
-- * Programmed value is multiplier - 1, up to 5x.
-- *
-- * \sa DPLL_MD_UDI_MULTIPLIER_MASK
-- */
--#define SDVO_PORT_MULTIPLY_MASK (7 << 23)
--#define SDVO_PORT_MULTIPLY_SHIFT 23
--#define SDVO_PHASE_SELECT_MASK (15 << 19)
--#define SDVO_PHASE_SELECT_DEFAULT (6 << 19)
--#define SDVO_CLOCK_OUTPUT_INVERT (1 << 18)
--#define SDVOC_GANG_MODE (1 << 16)
--#define SDVO_BORDER_ENABLE (1 << 7)
--#define SDVOB_PCIE_CONCURRENCY (1 << 3)
--#define SDVO_DETECTED (1 << 2)
--/* Bits to be preserved when writing */
--#define SDVOB_PRESERVE_MASK ((1 << 17) | (1 << 16) | (1 << 14))
--#define SDVOC_PRESERVE_MASK (1 << 17)
--
--/** @defgroup LVDS
-- * @{
-- */
--/**
-- * This register controls the LVDS output enable, pipe selection, and data
-- * format selection.
-- *
-- * All of the clock/data pairs are force powered down by power sequencing.
-- */
--#define LVDS 0x61180
--/**
-- * Enables the LVDS port. This bit must be set before DPLLs are enabled, as
-- * the DPLL semantics change when the LVDS is assigned to that pipe.
-- */
--# define LVDS_PORT_EN (1 << 31)
--/** Selects pipe B for LVDS data. Must be set on pre-965. */
--# define LVDS_PIPEB_SELECT (1 << 30)
--
--/**
-- * Enables the A0-A2 data pairs and CLKA, containing 18 bits of color data per
-- * pixel.
-- */
--# define LVDS_A0A2_CLKA_POWER_MASK (3 << 8)
--# define LVDS_A0A2_CLKA_POWER_DOWN (0 << 8)
--# define LVDS_A0A2_CLKA_POWER_UP (3 << 8)
--/**
-- * Controls the A3 data pair, which contains the additional LSBs for 24 bit
-- * mode. Only enabled if LVDS_A0A2_CLKA_POWER_UP also indicates it should be
-- * on.
-- */
--# define LVDS_A3_POWER_MASK (3 << 6)
--# define LVDS_A3_POWER_DOWN (0 << 6)
--# define LVDS_A3_POWER_UP (3 << 6)
--/**
-- * Controls the CLKB pair. This should only be set when LVDS_B0B3_POWER_UP
-- * is set.
-- */
--# define LVDS_CLKB_POWER_MASK (3 << 4)
--# define LVDS_CLKB_POWER_DOWN (0 << 4)
--# define LVDS_CLKB_POWER_UP (3 << 4)
--
--/**
-- * Controls the B0-B3 data pairs. This must be set to match the DPLL p2
-- * setting for whether we are in dual-channel mode. The B3 pair will
-- * additionally only be powered up when LVDS_A3_POWER_UP is set.
-- */
--# define LVDS_B0B3_POWER_MASK (3 << 2)
--# define LVDS_B0B3_POWER_DOWN (0 << 2)
--# define LVDS_B0B3_POWER_UP (3 << 2)
--
--#define PIPEACONF 0x70008
--#define PIPEACONF_ENABLE (1<<31)
--#define PIPEACONF_DISABLE 0
--#define PIPEACONF_DOUBLE_WIDE (1<<30)
--#define I965_PIPECONF_ACTIVE (1<<30)
--#define PIPEACONF_SINGLE_WIDE 0
--#define PIPEACONF_PIPE_UNLOCKED 0
--#define PIPEACONF_PIPE_LOCKED (1<<25)
--#define PIPEACONF_PALETTE 0
--#define PIPEACONF_GAMMA (1<<24)
--#define PIPECONF_FORCE_BORDER (1<<25)
--#define PIPECONF_PROGRESSIVE (0 << 21)
--#define PIPECONF_INTERLACE_W_FIELD_INDICATION (6 << 21)
--#define PIPECONF_INTERLACE_FIELD_0_ONLY (7 << 21)
--
--#define DSPARB 0x70030
--#define DSPARB_CSTART_MASK (0x7f << 7)
--#define DSPARB_CSTART_SHIFT 7
--#define DSPARB_BSTART_MASK (0x7f)
--#define DSPARB_BSTART_SHIFT 0
--
--#define PIPEBCONF 0x71008
--#define PIPEBCONF_ENABLE (1<<31)
--#define PIPEBCONF_DISABLE 0
--#define PIPEBCONF_DOUBLE_WIDE (1<<30)
--#define PIPEBCONF_DISABLE 0
--#define PIPEBCONF_GAMMA (1<<24)
--#define PIPEBCONF_PALETTE 0
--
--#define PIPEBGCMAXRED 0x71010
--#define PIPEBGCMAXGREEN 0x71014
--#define PIPEBGCMAXBLUE 0x71018
--#define PIPEBSTAT 0x71024
--#define PIPEBFRAMEHIGH 0x71040
--#define PIPEBFRAMEPIXEL 0x71044
--
--#define DSPACNTR 0x70180
--#define DSPBCNTR 0x71180
--#define DISPLAY_PLANE_ENABLE (1<<31)
--#define DISPLAY_PLANE_DISABLE 0
--#define DISPPLANE_GAMMA_ENABLE (1<<30)
--#define DISPPLANE_GAMMA_DISABLE 0
--#define DISPPLANE_PIXFORMAT_MASK (0xf<<26)
--#define DISPPLANE_8BPP (0x2<<26)
--#define DISPPLANE_15_16BPP (0x4<<26)
--#define DISPPLANE_16BPP (0x5<<26)
--#define DISPPLANE_32BPP_NO_ALPHA (0x6<<26)
--#define DISPPLANE_32BPP (0x7<<26)
--#define DISPPLANE_STEREO_ENABLE (1<<25)
--#define DISPPLANE_STEREO_DISABLE 0
--#define DISPPLANE_SEL_PIPE_MASK (1<<24)
--#define DISPPLANE_SEL_PIPE_A 0
--#define DISPPLANE_SEL_PIPE_B (1<<24)
--#define DISPPLANE_SRC_KEY_ENABLE (1<<22)
--#define DISPPLANE_SRC_KEY_DISABLE 0
--#define DISPPLANE_LINE_DOUBLE (1<<20)
--#define DISPPLANE_NO_LINE_DOUBLE 0
--#define DISPPLANE_STEREO_POLARITY_FIRST 0
--#define DISPPLANE_STEREO_POLARITY_SECOND (1<<18)
--/* plane B only */
--#define DISPPLANE_ALPHA_TRANS_ENABLE (1<<15)
--#define DISPPLANE_ALPHA_TRANS_DISABLE 0
--#define DISPPLANE_SPRITE_ABOVE_DISPLAYA 0
--#define DISPPLANE_SPRITE_ABOVE_OVERLAY (1)
--
--#define DSPABASE 0x70184
--#define DSPASTRIDE 0x70188
--
--#define DSPBBASE 0x71184
--#define DSPBADDR DSPBBASE
--#define DSPBSTRIDE 0x71188
--
--#define DSPAKEYVAL 0x70194
--#define DSPAKEYMASK 0x70198
--
--#define DSPAPOS 0x7018C /* reserved */
--#define DSPASIZE 0x70190
--#define DSPBPOS 0x7118C
--#define DSPBSIZE 0x71190
--
--#define DSPASURF 0x7019C
--#define DSPATILEOFF 0x701A4
--
--#define DSPBSURF 0x7119C
--#define DSPBTILEOFF 0x711A4
--
--#define VGACNTRL 0x71400
--# define VGA_DISP_DISABLE (1 << 31)
--# define VGA_2X_MODE (1 << 30)
--# define VGA_PIPE_B_SELECT (1 << 29)
--
--/*
-- * Some BIOS scratch area registers. The 845 (and 830?) store the amount
-- * of video memory available to the BIOS in SWF1.
-- */
--
--#define SWF0 0x71410
--
--/*
-- * 855 scratch registers.
-- */
--#define SWF10 0x70410
--
--#define SWF30 0x72414
--
--/*
-- * Overlay registers. These are overlay registers accessed via MMIO.
-- * Those loaded via the overlay register page are defined in i830_video.c.
-- */
--#define OVADD 0x30000
--
--#define DOVSTA 0x30008
--#define OC_BUF (0x3<<20)
-+#define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hw_status_page))[reg])
-+#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, 5)
-
--#define OGAMC5 0x30010
--#define OGAMC4 0x30014
--#define OGAMC3 0x30018
--#define OGAMC2 0x3001c
--#define OGAMC1 0x30020
--#define OGAMC0 0x30024
--/*
-- * Palette registers
-- */
--#define PALETTE_A 0x0a000
--#define PALETTE_B 0x0a800
-+extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
-
- #define IS_I830(dev) ((dev)->pci_device == 0x3577)
- #define IS_845G(dev) ((dev)->pci_device == 0x2562)
-diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
-index df03611..4a2de78 100644
---- a/drivers/gpu/drm/i915/i915_irq.c
-+++ b/drivers/gpu/drm/i915/i915_irq.c
-@@ -31,10 +31,6 @@
- #include "i915_drm.h"
- #include "i915_drv.h"
-
--#define USER_INT_FLAG (1<<1)
--#define VSYNC_PIPEB_FLAG (1<<5)
--#define VSYNC_PIPEA_FLAG (1<<7)
--
- #define MAX_NOPID ((u32)~0)
-
- /**
-@@ -236,40 +232,43 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
- u16 temp;
- u32 pipea_stats, pipeb_stats;
-
-- pipea_stats = I915_READ(I915REG_PIPEASTAT);
-- pipeb_stats = I915_READ(I915REG_PIPEBSTAT);
-+ pipea_stats = I915_READ(PIPEASTAT);
-+ pipeb_stats = I915_READ(PIPEBSTAT);
-
-- temp = I915_READ16(I915REG_INT_IDENTITY_R);
-+ temp = I915_READ16(IIR);
-
-- temp &= (USER_INT_FLAG | VSYNC_PIPEA_FLAG | VSYNC_PIPEB_FLAG);
-+ temp &= (I915_USER_INTERRUPT |
-+ I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
-+ I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT);
-
- DRM_DEBUG("%s flag=%08x\n", __FUNCTION__, temp);
-
- if (temp == 0)
- return IRQ_NONE;
-
-- I915_WRITE16(I915REG_INT_IDENTITY_R, temp);
-- (void) I915_READ16(I915REG_INT_IDENTITY_R);
-+ I915_WRITE16(IIR, temp);
-+ (void) I915_READ16(IIR);
- DRM_READMEMORYBARRIER();
-
- dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
-
-- if (temp & USER_INT_FLAG)
-+ if (temp & I915_USER_INTERRUPT)
- DRM_WAKEUP(&dev_priv->irq_queue);
-
-- if (temp & (VSYNC_PIPEA_FLAG | VSYNC_PIPEB_FLAG)) {
-+ if (temp & (I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
-+ I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT)) {
- int vblank_pipe = dev_priv->vblank_pipe;
-
- if ((vblank_pipe &
- (DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B))
- == (DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B)) {
-- if (temp & VSYNC_PIPEA_FLAG)
-+ if (temp & I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT)
- atomic_inc(&dev->vbl_received);
-- if (temp & VSYNC_PIPEB_FLAG)
-+ if (temp & I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT)
- atomic_inc(&dev->vbl_received2);
-- } else if (((temp & VSYNC_PIPEA_FLAG) &&
-+ } else if (((temp & I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT) &&
- (vblank_pipe & DRM_I915_VBLANK_PIPE_A)) ||
-- ((temp & VSYNC_PIPEB_FLAG) &&
-+ ((temp & I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT) &&
- (vblank_pipe & DRM_I915_VBLANK_PIPE_B)))
- atomic_inc(&dev->vbl_received);
-
-@@ -278,12 +277,12 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
-
- if (dev_priv->swaps_pending > 0)
- drm_locked_tasklet(dev, i915_vblank_tasklet);
-- I915_WRITE(I915REG_PIPEASTAT,
-+ I915_WRITE(PIPEASTAT,
- pipea_stats|I915_VBLANK_INTERRUPT_ENABLE|
-- I915_VBLANK_CLEAR);
-- I915_WRITE(I915REG_PIPEBSTAT,
-+ PIPE_VBLANK_INTERRUPT_STATUS);
-+ I915_WRITE(PIPEBSTAT,
- pipeb_stats|I915_VBLANK_INTERRUPT_ENABLE|
-- I915_VBLANK_CLEAR);
-+ PIPE_VBLANK_INTERRUPT_STATUS);
- }
-
- return IRQ_HANDLED;
-@@ -304,12 +303,12 @@ static int i915_emit_irq(struct drm_device * dev)
- dev_priv->sarea_priv->last_enqueue = dev_priv->counter = 1;
-
- BEGIN_LP_RING(6);
-- OUT_RING(CMD_STORE_DWORD_IDX);
-- OUT_RING(20);
-+ OUT_RING(MI_STORE_DWORD_INDEX);
-+ OUT_RING(5 << MI_STORE_DWORD_INDEX_SHIFT);
- OUT_RING(dev_priv->counter);
- OUT_RING(0);
- OUT_RING(0);
-- OUT_RING(GFX_OP_USER_INTERRUPT);
-+ OUT_RING(MI_USER_INTERRUPT);
- ADVANCE_LP_RING();
-
- return dev_priv->counter;
-@@ -421,11 +420,11 @@ static void i915_enable_interrupt (struct drm_device *dev)
-
- flag = 0;
- if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_A)
-- flag |= VSYNC_PIPEA_FLAG;
-+ flag |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
- if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B)
-- flag |= VSYNC_PIPEB_FLAG;
-+ flag |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
-
-- I915_WRITE16(I915REG_INT_ENABLE_R, USER_INT_FLAG | flag);
-+ I915_WRITE16(IER, I915_USER_INTERRUPT | flag);
- }
-
- /* Set the vblank monitor pipe
-@@ -465,11 +464,11 @@ int i915_vblank_pipe_get(struct drm_device *dev, void *data,
- return -EINVAL;
- }
-
-- flag = I915_READ(I915REG_INT_ENABLE_R);
-+ flag = I915_READ(IER);
- pipe->pipe = 0;
-- if (flag & VSYNC_PIPEA_FLAG)
-+ if (flag & I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT)
- pipe->pipe |= DRM_I915_VBLANK_PIPE_A;
-- if (flag & VSYNC_PIPEB_FLAG)
-+ if (flag & I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT)
- pipe->pipe |= DRM_I915_VBLANK_PIPE_B;
-
- return 0;
-@@ -587,9 +586,9 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
- {
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-
-- I915_WRITE16(I915REG_HWSTAM, 0xfffe);
-- I915_WRITE16(I915REG_INT_MASK_R, 0x0);
-- I915_WRITE16(I915REG_INT_ENABLE_R, 0x0);
-+ I915_WRITE16(HWSTAM, 0xfffe);
-+ I915_WRITE16(IMR, 0x0);
-+ I915_WRITE16(IER, 0x0);
- }
-
- void i915_driver_irq_postinstall(struct drm_device * dev)
-@@ -614,10 +613,10 @@ void i915_driver_irq_uninstall(struct drm_device * dev)
- if (!dev_priv)
- return;
-
-- I915_WRITE16(I915REG_HWSTAM, 0xffff);
-- I915_WRITE16(I915REG_INT_MASK_R, 0xffff);
-- I915_WRITE16(I915REG_INT_ENABLE_R, 0x0);
-+ I915_WRITE16(HWSTAM, 0xffff);
-+ I915_WRITE16(IMR, 0xffff);
-+ I915_WRITE16(IER, 0x0);
-
-- temp = I915_READ16(I915REG_INT_IDENTITY_R);
-- I915_WRITE16(I915REG_INT_IDENTITY_R, temp);
-+ temp = I915_READ16(IIR);
-+ I915_WRITE16(IIR, temp);
- }
-diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
-new file mode 100644
-index 0000000..477c64e
---- /dev/null
-+++ b/drivers/gpu/drm/i915/i915_reg.h
-@@ -0,0 +1,1405 @@
-+/* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
-+ * All Rights Reserved.
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a
-+ * copy of this software and associated documentation files (the
-+ * "Software"), to deal in the Software without restriction, including
-+ * without limitation the rights to use, copy, modify, merge, publish,
-+ * distribute, sub license, and/or sell copies of the Software, and to
-+ * permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice (including the
-+ * next paragraph) shall be included in all copies or substantial portions
-+ * of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
-+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
-+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
-+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-+ */
-+
-+#ifndef _I915_REG_H_
-+#define _I915_REG_H_
-+
-+/* MCH MMIO space */
-+/** 915-945 and GM965 MCH register controlling DRAM channel access */
-+#define DCC 0x200
-+#define DCC_ADDRESSING_MODE_SINGLE_CHANNEL (0 << 0)
-+#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC (1 << 0)
-+#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED (2 << 0)
-+#define DCC_ADDRESSING_MODE_MASK (3 << 0)
-+#define DCC_CHANNEL_XOR_DISABLE (1 << 10)
-+
-+/** 965 MCH register controlling DRAM channel configuration */
-+#define CHDECMISC 0x111
-+#define CHDECMISC_FLEXMEMORY (1 << 1)
-+
-+/*
-+ * The Bridge device's PCI config space has information about the
-+ * fb aperture size and the amount of pre-reserved memory.
-+ */
-+#define INTEL_GMCH_CTRL 0x52
-+#define INTEL_GMCH_ENABLED 0x4
-+#define INTEL_GMCH_MEM_MASK 0x1
-+#define INTEL_GMCH_MEM_64M 0x1
-+#define INTEL_GMCH_MEM_128M 0
-+
-+#define INTEL_855_GMCH_GMS_MASK (0x7 << 4)
-+#define INTEL_855_GMCH_GMS_DISABLED (0x0 << 4)
-+#define INTEL_855_GMCH_GMS_STOLEN_1M (0x1 << 4)
-+#define INTEL_855_GMCH_GMS_STOLEN_4M (0x2 << 4)
-+#define INTEL_855_GMCH_GMS_STOLEN_8M (0x3 << 4)
-+#define INTEL_855_GMCH_GMS_STOLEN_16M (0x4 << 4)
-+#define INTEL_855_GMCH_GMS_STOLEN_32M (0x5 << 4)
-+
-+#define INTEL_915G_GMCH_GMS_STOLEN_48M (0x6 << 4)
-+#define INTEL_915G_GMCH_GMS_STOLEN_64M (0x7 << 4)
-+
-+/* PCI config space */
-+
-+#define HPLLCC 0xc0 /* 855 only */
-+#define GC_CLOCK_CONTROL_MASK (3 << 0)
-+#define GC_CLOCK_133_200 (0 << 0)
-+#define GC_CLOCK_100_200 (1 << 0)
-+#define GC_CLOCK_100_133 (2 << 0)
-+#define GC_CLOCK_166_250 (3 << 0)
-+#define GCFGC 0xf0 /* 915+ only */
-+#define GC_LOW_FREQUENCY_ENABLE (1 << 7)
-+#define GC_DISPLAY_CLOCK_190_200_MHZ (0 << 4)
-+#define GC_DISPLAY_CLOCK_333_MHZ (4 << 4)
-+#define GC_DISPLAY_CLOCK_MASK (7 << 4)
-+#define LBB 0xf4
-+
-+/* VGA stuff */
-+
-+#define VGA_ST01_MDA 0x3ba
-+#define VGA_ST01_CGA 0x3da
-+
-+#define VGA_MSR_WRITE 0x3c2
-+#define VGA_MSR_READ 0x3cc
-+#define VGA_MSR_MEM_EN (1<<1)
-+#define VGA_MSR_CGA_MODE (1<<0)
-+
-+#define VGA_SR_INDEX 0x3c4
-+#define VGA_SR_DATA 0x3c5
-+
-+#define VGA_AR_INDEX 0x3c0
-+#define VGA_AR_VID_EN (1<<5)
-+#define VGA_AR_DATA_WRITE 0x3c0
-+#define VGA_AR_DATA_READ 0x3c1
-+
-+#define VGA_GR_INDEX 0x3ce
-+#define VGA_GR_DATA 0x3cf
-+/* GR05 */
-+#define VGA_GR_MEM_READ_MODE_SHIFT 3
-+#define VGA_GR_MEM_READ_MODE_PLANE 1
-+/* GR06 */
-+#define VGA_GR_MEM_MODE_MASK 0xc
-+#define VGA_GR_MEM_MODE_SHIFT 2
-+#define VGA_GR_MEM_A0000_AFFFF 0
-+#define VGA_GR_MEM_A0000_BFFFF 1
-+#define VGA_GR_MEM_B0000_B7FFF 2
-+#define VGA_GR_MEM_B0000_BFFFF 3
-+
-+#define VGA_DACMASK 0x3c6
-+#define VGA_DACRX 0x3c7
-+#define VGA_DACWX 0x3c8
-+#define VGA_DACDATA 0x3c9
-+
-+#define VGA_CR_INDEX_MDA 0x3b4
-+#define VGA_CR_DATA_MDA 0x3b5
-+#define VGA_CR_INDEX_CGA 0x3d4
-+#define VGA_CR_DATA_CGA 0x3d5
-+
-+/*
-+ * Memory interface instructions used by the kernel
-+ */
-+#define MI_INSTR(opcode, flags) (((opcode) << 23) | (flags))
-+
-+#define MI_NOOP MI_INSTR(0, 0)
-+#define MI_USER_INTERRUPT MI_INSTR(0x02, 0)
-+#define MI_WAIT_FOR_EVENT MI_INSTR(0x03, 0)
-+#define MI_WAIT_FOR_PLANE_B_FLIP (1<<6)
-+#define MI_WAIT_FOR_PLANE_A_FLIP (1<<2)
-+#define MI_WAIT_FOR_PLANE_A_SCANLINES (1<<1)
-+#define MI_FLUSH MI_INSTR(0x04, 0)
-+#define MI_READ_FLUSH (1 << 0)
-+#define MI_EXE_FLUSH (1 << 1)
-+#define MI_NO_WRITE_FLUSH (1 << 2)
-+#define MI_SCENE_COUNT (1 << 3) /* just increment scene count */
-+#define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */
-+#define MI_BATCH_BUFFER_END MI_INSTR(0x0a, 0)
-+#define MI_REPORT_HEAD MI_INSTR(0x07, 0)
-+#define MI_LOAD_SCAN_LINES_INCL MI_INSTR(0x12, 0)
-+#define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1)
-+#define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */
-+#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1)
-+#define MI_STORE_DWORD_INDEX_SHIFT 2
-+#define MI_LOAD_REGISTER_IMM MI_INSTR(0x22, 1)
-+#define MI_BATCH_BUFFER MI_INSTR(0x30, 1)
-+#define MI_BATCH_NON_SECURE (1)
-+#define MI_BATCH_NON_SECURE_I965 (1<<8)
-+#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0)
-+
-+/*
-+ * 3D instructions used by the kernel
-+ */
-+#define GFX_INSTR(opcode, flags) ((0x3 << 29) | ((opcode) << 24) | (flags))
-+
-+#define GFX_OP_RASTER_RULES ((0x3<<29)|(0x7<<24))
-+#define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19))
-+#define SC_UPDATE_SCISSOR (0x1<<1)
-+#define SC_ENABLE_MASK (0x1<<0)
-+#define SC_ENABLE (0x1<<0)
-+#define GFX_OP_LOAD_INDIRECT ((0x3<<29)|(0x1d<<24)|(0x7<<16))
-+#define GFX_OP_SCISSOR_INFO ((0x3<<29)|(0x1d<<24)|(0x81<<16)|(0x1))
-+#define SCI_YMIN_MASK (0xffff<<16)
-+#define SCI_XMIN_MASK (0xffff<<0)
-+#define SCI_YMAX_MASK (0xffff<<16)
-+#define SCI_XMAX_MASK (0xffff<<0)
-+#define GFX_OP_SCISSOR_ENABLE ((0x3<<29)|(0x1c<<24)|(0x10<<19))
-+#define GFX_OP_SCISSOR_RECT ((0x3<<29)|(0x1d<<24)|(0x81<<16)|1)
-+#define GFX_OP_COLOR_FACTOR ((0x3<<29)|(0x1d<<24)|(0x1<<16)|0x0)
-+#define GFX_OP_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16))
-+#define GFX_OP_MAP_INFO ((0x3<<29)|(0x1d<<24)|0x4)
-+#define GFX_OP_DESTBUFFER_VARS ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0)
-+#define GFX_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1)
-+#define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3))
-+#define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2)
-+#define SRC_COPY_BLT_CMD ((2<<29)|(0x43<<22)|4)
-+#define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6)
-+#define XY_MONO_SRC_COPY_IMM_BLT ((2<<29)|(0x71<<22)|5)
-+#define XY_SRC_COPY_BLT_WRITE_ALPHA (1<<21)
-+#define XY_SRC_COPY_BLT_WRITE_RGB (1<<20)
-+#define BLT_DEPTH_8 (0<<24)
-+#define BLT_DEPTH_16_565 (1<<24)
-+#define BLT_DEPTH_16_1555 (2<<24)
-+#define BLT_DEPTH_32 (3<<24)
-+#define BLT_ROP_GXCOPY (0xcc<<16)
-+#define XY_SRC_COPY_BLT_SRC_TILED (1<<15) /* 965+ only */
-+#define XY_SRC_COPY_BLT_DST_TILED (1<<11) /* 965+ only */
-+#define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2)
-+#define ASYNC_FLIP (1<<22)
-+#define DISPLAY_PLANE_A (0<<20)
-+#define DISPLAY_PLANE_B (1<<20)
-+
-+/*
-+ * Instruction and interrupt control regs
-+ */
-+
-+#define PRB0_TAIL 0x02030
-+#define PRB0_HEAD 0x02034
-+#define PRB0_START 0x02038
-+#define PRB0_CTL 0x0203c
-+#define TAIL_ADDR 0x001FFFF8
-+#define HEAD_WRAP_COUNT 0xFFE00000
-+#define HEAD_WRAP_ONE 0x00200000
-+#define HEAD_ADDR 0x001FFFFC
-+#define RING_NR_PAGES 0x001FF000
-+#define RING_REPORT_MASK 0x00000006
-+#define RING_REPORT_64K 0x00000002
-+#define RING_REPORT_128K 0x00000004
-+#define RING_NO_REPORT 0x00000000
-+#define RING_VALID_MASK 0x00000001
-+#define RING_VALID 0x00000001
-+#define RING_INVALID 0x00000000
-+#define PRB1_TAIL 0x02040 /* 915+ only */
-+#define PRB1_HEAD 0x02044 /* 915+ only */
-+#define PRB1_START 0x02048 /* 915+ only */
-+#define PRB1_CTL 0x0204c /* 915+ only */
-+#define ACTHD_I965 0x02074
-+#define HWS_PGA 0x02080
-+#define HWS_ADDRESS_MASK 0xfffff000
-+#define HWS_START_ADDRESS_SHIFT 4
-+#define IPEIR 0x02088
-+#define NOPID 0x02094
-+#define HWSTAM 0x02098
-+#define SCPD0 0x0209c /* 915+ only */
-+#define IER 0x020a0
-+#define IIR 0x020a4
-+#define IMR 0x020a8
-+#define ISR 0x020ac
-+#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18)
-+#define I915_DISPLAY_PORT_INTERRUPT (1<<17)
-+#define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15)
-+#define I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT (1<<14)
-+#define I915_HWB_OOM_INTERRUPT (1<<13)
-+#define I915_SYNC_STATUS_INTERRUPT (1<<12)
-+#define I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT (1<<11)
-+#define I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT (1<<10)
-+#define I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT (1<<9)
-+#define I915_DISPLAY_PLANE_C_FLIP_PENDING_INTERRUPT (1<<8)
-+#define I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT (1<<7)
-+#define I915_DISPLAY_PIPE_A_EVENT_INTERRUPT (1<<6)
-+#define I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT (1<<5)
-+#define I915_DISPLAY_PIPE_B_EVENT_INTERRUPT (1<<4)
-+#define I915_DEBUG_INTERRUPT (1<<2)
-+#define I915_USER_INTERRUPT (1<<1)
-+#define I915_ASLE_INTERRUPT (1<<0)
-+#define EIR 0x020b0
-+#define EMR 0x020b4
-+#define ESR 0x020b8
-+#define INSTPM 0x020c0
-+#define ACTHD 0x020c8
-+#define FW_BLC 0x020d8
-+#define FW_BLC_SELF 0x020e0 /* 915+ only */
-+#define MI_ARB_STATE 0x020e4 /* 915+ only */
-+#define CACHE_MODE_0 0x02120 /* 915+ only */
-+#define CM0_MASK_SHIFT 16
-+#define CM0_IZ_OPT_DISABLE (1<<6)
-+#define CM0_ZR_OPT_DISABLE (1<<5)
-+#define CM0_DEPTH_EVICT_DISABLE (1<<4)
-+#define CM0_COLOR_EVICT_DISABLE (1<<3)
-+#define CM0_DEPTH_WRITE_DISABLE (1<<1)
-+#define CM0_RC_OP_FLUSH_DISABLE (1<<0)
-+#define GFX_FLSH_CNTL 0x02170 /* 915+ only */
-+
-+/*
-+ * Framebuffer compression (915+ only)
-+ */
-+
-+#define FBC_CFB_BASE 0x03200 /* 4k page aligned */
-+#define FBC_LL_BASE 0x03204 /* 4k page aligned */
-+#define FBC_CONTROL 0x03208
-+#define FBC_CTL_EN (1<<31)
-+#define FBC_CTL_PERIODIC (1<<30)
-+#define FBC_CTL_INTERVAL_SHIFT (16)
-+#define FBC_CTL_UNCOMPRESSIBLE (1<<14)
-+#define FBC_CTL_STRIDE_SHIFT (5)
-+#define FBC_CTL_FENCENO (1<<0)
-+#define FBC_COMMAND 0x0320c
-+#define FBC_CMD_COMPRESS (1<<0)
-+#define FBC_STATUS 0x03210
-+#define FBC_STAT_COMPRESSING (1<<31)
-+#define FBC_STAT_COMPRESSED (1<<30)
-+#define FBC_STAT_MODIFIED (1<<29)
-+#define FBC_STAT_CURRENT_LINE (1<<0)
-+#define FBC_CONTROL2 0x03214
-+#define FBC_CTL_FENCE_DBL (0<<4)
-+#define FBC_CTL_IDLE_IMM (0<<2)
-+#define FBC_CTL_IDLE_FULL (1<<2)
-+#define FBC_CTL_IDLE_LINE (2<<2)
-+#define FBC_CTL_IDLE_DEBUG (3<<2)
-+#define FBC_CTL_CPU_FENCE (1<<1)
-+#define FBC_CTL_PLANEA (0<<0)
-+#define FBC_CTL_PLANEB (1<<0)
-+#define FBC_FENCE_OFF 0x0321b
-+
-+#define FBC_LL_SIZE (1536)
-+
-+/*
-+ * GPIO regs
-+ */
-+#define GPIOA 0x5010
-+#define GPIOB 0x5014
-+#define GPIOC 0x5018
-+#define GPIOD 0x501c
-+#define GPIOE 0x5020
-+#define GPIOF 0x5024
-+#define GPIOG 0x5028
-+#define GPIOH 0x502c
-+# define GPIO_CLOCK_DIR_MASK (1 << 0)
-+# define GPIO_CLOCK_DIR_IN (0 << 1)
-+# define GPIO_CLOCK_DIR_OUT (1 << 1)
-+# define GPIO_CLOCK_VAL_MASK (1 << 2)
-+# define GPIO_CLOCK_VAL_OUT (1 << 3)
-+# define GPIO_CLOCK_VAL_IN (1 << 4)
-+# define GPIO_CLOCK_PULLUP_DISABLE (1 << 5)
-+# define GPIO_DATA_DIR_MASK (1 << 8)
-+# define GPIO_DATA_DIR_IN (0 << 9)
-+# define GPIO_DATA_DIR_OUT (1 << 9)
-+# define GPIO_DATA_VAL_MASK (1 << 10)
-+# define GPIO_DATA_VAL_OUT (1 << 11)
-+# define GPIO_DATA_VAL_IN (1 << 12)
-+# define GPIO_DATA_PULLUP_DISABLE (1 << 13)
-+
-+/*
-+ * Clock control & power management
-+ */
-+
-+#define VGA0 0x6000
-+#define VGA1 0x6004
-+#define VGA_PD 0x6010
-+#define VGA0_PD_P2_DIV_4 (1 << 7)
-+#define VGA0_PD_P1_DIV_2 (1 << 5)
-+#define VGA0_PD_P1_SHIFT 0
-+#define VGA0_PD_P1_MASK (0x1f << 0)
-+#define VGA1_PD_P2_DIV_4 (1 << 15)
-+#define VGA1_PD_P1_DIV_2 (1 << 13)
-+#define VGA1_PD_P1_SHIFT 8
-+#define VGA1_PD_P1_MASK (0x1f << 8)
-+#define DPLL_A 0x06014
-+#define DPLL_B 0x06018
-+#define DPLL_VCO_ENABLE (1 << 31)
-+#define DPLL_DVO_HIGH_SPEED (1 << 30)
-+#define DPLL_SYNCLOCK_ENABLE (1 << 29)
-+#define DPLL_VGA_MODE_DIS (1 << 28)
-+#define DPLLB_MODE_DAC_SERIAL (1 << 26) /* i915 */
-+#define DPLLB_MODE_LVDS (2 << 26) /* i915 */
-+#define DPLL_MODE_MASK (3 << 26)
-+#define DPLL_DAC_SERIAL_P2_CLOCK_DIV_10 (0 << 24) /* i915 */
-+#define DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 (1 << 24) /* i915 */
-+#define DPLLB_LVDS_P2_CLOCK_DIV_14 (0 << 24) /* i915 */
-+#define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */
-+#define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */
-+#define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
-+
-+#define I915_FIFO_UNDERRUN_STATUS (1UL<<31)
-+#define I915_CRC_ERROR_ENABLE (1UL<<29)
-+#define I915_CRC_DONE_ENABLE (1UL<<28)
-+#define I915_GMBUS_EVENT_ENABLE (1UL<<27)
-+#define I915_VSYNC_INTERRUPT_ENABLE (1UL<<25)
-+#define I915_DISPLAY_LINE_COMPARE_ENABLE (1UL<<24)
-+#define I915_DPST_EVENT_ENABLE (1UL<<23)
-+#define I915_LEGACY_BLC_EVENT_ENABLE (1UL<<22)
-+#define I915_ODD_FIELD_INTERRUPT_ENABLE (1UL<<21)
-+#define I915_EVEN_FIELD_INTERRUPT_ENABLE (1UL<<20)
-+#define I915_START_VBLANK_INTERRUPT_ENABLE (1UL<<18) /* 965 or later */
-+#define I915_VBLANK_INTERRUPT_ENABLE (1UL<<17)
-+#define I915_OVERLAY_UPDATED_ENABLE (1UL<<16)
-+#define I915_CRC_ERROR_INTERRUPT_STATUS (1UL<<13)
-+#define I915_CRC_DONE_INTERRUPT_STATUS (1UL<<12)
-+#define I915_GMBUS_INTERRUPT_STATUS (1UL<<11)
-+#define I915_VSYNC_INTERRUPT_STATUS (1UL<<9)
-+#define I915_DISPLAY_LINE_COMPARE_STATUS (1UL<<8)
-+#define I915_DPST_EVENT_STATUS (1UL<<7)
-+#define I915_LEGACY_BLC_EVENT_STATUS (1UL<<6)
-+#define I915_ODD_FIELD_INTERRUPT_STATUS (1UL<<5)
-+#define I915_EVEN_FIELD_INTERRUPT_STATUS (1UL<<4)
-+#define I915_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */
-+#define I915_VBLANK_INTERRUPT_STATUS (1UL<<1)
-+#define I915_OVERLAY_UPDATED_STATUS (1UL<<0)
-+
-+#define SRX_INDEX 0x3c4
-+#define SRX_DATA 0x3c5
-+#define SR01 1
-+#define SR01_SCREEN_OFF (1<<5)
-+
-+#define PPCR 0x61204
-+#define PPCR_ON (1<<0)
-+
-+#define DVOB 0x61140
-+#define DVOB_ON (1<<31)
-+#define DVOC 0x61160
-+#define DVOC_ON (1<<31)
-+#define LVDS 0x61180
-+#define LVDS_ON (1<<31)
-+
-+#define ADPA 0x61100
-+#define ADPA_DPMS_MASK (~(3<<10))
-+#define ADPA_DPMS_ON (0<<10)
-+#define ADPA_DPMS_SUSPEND (1<<10)
-+#define ADPA_DPMS_STANDBY (2<<10)
-+#define ADPA_DPMS_OFF (3<<10)
-+
-+#define RING_TAIL 0x00
-+#define TAIL_ADDR 0x001FFFF8
-+#define RING_HEAD 0x04
-+#define HEAD_WRAP_COUNT 0xFFE00000
-+#define HEAD_WRAP_ONE 0x00200000
-+#define HEAD_ADDR 0x001FFFFC
-+#define RING_START 0x08
-+#define START_ADDR 0xFFFFF000
-+#define RING_LEN 0x0C
-+#define RING_NR_PAGES 0x001FF000
-+#define RING_REPORT_MASK 0x00000006
-+#define RING_REPORT_64K 0x00000002
-+#define RING_REPORT_128K 0x00000004
-+#define RING_NO_REPORT 0x00000000
-+#define RING_VALID_MASK 0x00000001
-+#define RING_VALID 0x00000001
-+#define RING_INVALID 0x00000000
-+
-+/* Scratch pad debug 0 reg:
-+ */
-+#define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000
-+/*
-+ * The i830 generation, in LVDS mode, defines P1 as the bit number set within
-+ * this field (only one bit may be set).
-+ */
-+#define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS 0x003f0000
-+#define DPLL_FPA01_P1_POST_DIV_SHIFT 16
-+/* i830, required in DVO non-gang */
-+#define PLL_P2_DIVIDE_BY_4 (1 << 23)
-+#define PLL_P1_DIVIDE_BY_TWO (1 << 21) /* i830 */
-+#define PLL_REF_INPUT_DREFCLK (0 << 13)
-+#define PLL_REF_INPUT_TVCLKINA (1 << 13) /* i830 */
-+#define PLL_REF_INPUT_TVCLKINBC (2 << 13) /* SDVO TVCLKIN */
-+#define PLLB_REF_INPUT_SPREADSPECTRUMIN (3 << 13)
-+#define PLL_REF_INPUT_MASK (3 << 13)
-+#define PLL_LOAD_PULSE_PHASE_SHIFT 9
-+/*
-+ * Parallel to Serial Load Pulse phase selection.
-+ * Selects the phase for the 10X DPLL clock for the PCIe
-+ * digital display port. The range is 4 to 13; 10 or more
-+ * is just a flip delay. The default is 6
-+ */
-+#define PLL_LOAD_PULSE_PHASE_MASK (0xf << PLL_LOAD_PULSE_PHASE_SHIFT)
-+#define DISPLAY_RATE_SELECT_FPA1 (1 << 8)
-+/*
-+ * SDVO multiplier for 945G/GM. Not used on 965.
-+ */
-+#define SDVO_MULTIPLIER_MASK 0x000000ff
-+#define SDVO_MULTIPLIER_SHIFT_HIRES 4
-+#define SDVO_MULTIPLIER_SHIFT_VGA 0
-+#define DPLL_A_MD 0x0601c /* 965+ only */
-+/*
-+ * UDI pixel divider, controlling how many pixels are stuffed into a packet.
-+ *
-+ * Value is pixels minus 1. Must be set to 1 pixel for SDVO.
-+ */
-+#define DPLL_MD_UDI_DIVIDER_MASK 0x3f000000
-+#define DPLL_MD_UDI_DIVIDER_SHIFT 24
-+/* UDI pixel divider for VGA, same as DPLL_MD_UDI_DIVIDER_MASK. */
-+#define DPLL_MD_VGA_UDI_DIVIDER_MASK 0x003f0000
-+#define DPLL_MD_VGA_UDI_DIVIDER_SHIFT 16
-+/*
-+ * SDVO/UDI pixel multiplier.
-+ *
-+ * SDVO requires that the bus clock rate be between 1 and 2 Ghz, and the bus
-+ * clock rate is 10 times the DPLL clock. At low resolution/refresh rate
-+ * modes, the bus rate would be below the limits, so SDVO allows for stuffing
-+ * dummy bytes in the datastream at an increased clock rate, with both sides of
-+ * the link knowing how many bytes are fill.
-+ *
-+ * So, for a mode with a dotclock of 65Mhz, we would want to double the clock
-+ * rate to 130Mhz to get a bus rate of 1.30Ghz. The DPLL clock rate would be
-+ * set to 130Mhz, and the SDVO multiplier set to 2x in this register and
-+ * through an SDVO command.
-+ *
-+ * This register field has values of multiplication factor minus 1, with
-+ * a maximum multiplier of 5 for SDVO.
-+ */
-+#define DPLL_MD_UDI_MULTIPLIER_MASK 0x00003f00
-+#define DPLL_MD_UDI_MULTIPLIER_SHIFT 8
-+/*
-+ * SDVO/UDI pixel multiplier for VGA, same as DPLL_MD_UDI_MULTIPLIER_MASK.
-+ * This best be set to the default value (3) or the CRT won't work. No,
-+ * I don't entirely understand what this does...
-+ */
-+#define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f
-+#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
-+#define DPLL_B_MD 0x06020 /* 965+ only */
-+#define FPA0 0x06040
-+#define FPA1 0x06044
-+#define FPB0 0x06048
-+#define FPB1 0x0604c
-+#define FP_N_DIV_MASK 0x003f0000
-+#define FP_N_DIV_SHIFT 16
-+#define FP_M1_DIV_MASK 0x00003f00
-+#define FP_M1_DIV_SHIFT 8
-+#define FP_M2_DIV_MASK 0x0000003f
-+#define FP_M2_DIV_SHIFT 0
-+#define DPLL_TEST 0x606c
-+#define DPLLB_TEST_SDVO_DIV_1 (0 << 22)
-+#define DPLLB_TEST_SDVO_DIV_2 (1 << 22)
-+#define DPLLB_TEST_SDVO_DIV_4 (2 << 22)
-+#define DPLLB_TEST_SDVO_DIV_MASK (3 << 22)
-+#define DPLLB_TEST_N_BYPASS (1 << 19)
-+#define DPLLB_TEST_M_BYPASS (1 << 18)
-+#define DPLLB_INPUT_BUFFER_ENABLE (1 << 16)
-+#define DPLLA_TEST_N_BYPASS (1 << 3)
-+#define DPLLA_TEST_M_BYPASS (1 << 2)
-+#define DPLLA_INPUT_BUFFER_ENABLE (1 << 0)
-+#define D_STATE 0x6104
-+#define CG_2D_DIS 0x6200
-+#define CG_3D_DIS 0x6204
-+
-+/*
-+ * Palette regs
-+ */
-+
-+#define PALETTE_A 0x0a000
-+#define PALETTE_B 0x0a800
-+
-+/*
-+ * Overlay regs
-+ */
-+
-+#define OVADD 0x30000
-+#define DOVSTA 0x30008
-+#define OC_BUF (0x3<<20)
-+#define OGAMC5 0x30010
-+#define OGAMC4 0x30014
-+#define OGAMC3 0x30018
-+#define OGAMC2 0x3001c
-+#define OGAMC1 0x30020
-+#define OGAMC0 0x30024
-+
-+/*
-+ * Display engine regs
-+ */
-+
-+/* Pipe A timing regs */
-+#define HTOTAL_A 0x60000
-+#define HBLANK_A 0x60004
-+#define HSYNC_A 0x60008
-+#define VTOTAL_A 0x6000c
-+#define VBLANK_A 0x60010
-+#define VSYNC_A 0x60014
-+#define PIPEASRC 0x6001c
-+#define BCLRPAT_A 0x60020
-+
-+/* Pipe B timing regs */
-+#define HTOTAL_B 0x61000
-+#define HBLANK_B 0x61004
-+#define HSYNC_B 0x61008
-+#define VTOTAL_B 0x6100c
-+#define VBLANK_B 0x61010
-+#define VSYNC_B 0x61014
-+#define PIPEBSRC 0x6101c
-+#define BCLRPAT_B 0x61020
-+
-+/* VGA port control */
-+#define ADPA 0x61100
-+#define ADPA_DAC_ENABLE (1<<31)
-+#define ADPA_DAC_DISABLE 0
-+#define ADPA_PIPE_SELECT_MASK (1<<30)
-+#define ADPA_PIPE_A_SELECT 0
-+#define ADPA_PIPE_B_SELECT (1<<30)
-+#define ADPA_USE_VGA_HVPOLARITY (1<<15)
-+#define ADPA_SETS_HVPOLARITY 0
-+#define ADPA_VSYNC_CNTL_DISABLE (1<<11)
-+#define ADPA_VSYNC_CNTL_ENABLE 0
-+#define ADPA_HSYNC_CNTL_DISABLE (1<<10)
-+#define ADPA_HSYNC_CNTL_ENABLE 0
-+#define ADPA_VSYNC_ACTIVE_HIGH (1<<4)
-+#define ADPA_VSYNC_ACTIVE_LOW 0
-+#define ADPA_HSYNC_ACTIVE_HIGH (1<<3)
-+#define ADPA_HSYNC_ACTIVE_LOW 0
-+#define ADPA_DPMS_MASK (~(3<<10))
-+#define ADPA_DPMS_ON (0<<10)
-+#define ADPA_DPMS_SUSPEND (1<<10)
-+#define ADPA_DPMS_STANDBY (2<<10)
-+#define ADPA_DPMS_OFF (3<<10)
-+
-+/* Hotplug control (945+ only) */
-+#define PORT_HOTPLUG_EN 0x61110
-+#define SDVOB_HOTPLUG_INT_EN (1 << 26)
-+#define SDVOC_HOTPLUG_INT_EN (1 << 25)
-+#define TV_HOTPLUG_INT_EN (1 << 18)
-+#define CRT_HOTPLUG_INT_EN (1 << 9)
-+#define CRT_HOTPLUG_FORCE_DETECT (1 << 3)
-+
-+#define PORT_HOTPLUG_STAT 0x61114
-+#define CRT_HOTPLUG_INT_STATUS (1 << 11)
-+#define TV_HOTPLUG_INT_STATUS (1 << 10)
-+#define CRT_HOTPLUG_MONITOR_MASK (3 << 8)
-+#define CRT_HOTPLUG_MONITOR_COLOR (3 << 8)
-+#define CRT_HOTPLUG_MONITOR_MONO (2 << 8)
-+#define CRT_HOTPLUG_MONITOR_NONE (0 << 8)
-+#define SDVOC_HOTPLUG_INT_STATUS (1 << 7)
-+#define SDVOB_HOTPLUG_INT_STATUS (1 << 6)
-+
-+/* SDVO port control */
-+#define SDVOB 0x61140
-+#define SDVOC 0x61160
-+#define SDVO_ENABLE (1 << 31)
-+#define SDVO_PIPE_B_SELECT (1 << 30)
-+#define SDVO_STALL_SELECT (1 << 29)
-+#define SDVO_INTERRUPT_ENABLE (1 << 26)
-+/**
-+ * 915G/GM SDVO pixel multiplier.
-+ *
-+ * Programmed value is multiplier - 1, up to 5x.
-+ *
-+ * \sa DPLL_MD_UDI_MULTIPLIER_MASK
-+ */
-+#define SDVO_PORT_MULTIPLY_MASK (7 << 23)
-+#define SDVO_PORT_MULTIPLY_SHIFT 23
-+#define SDVO_PHASE_SELECT_MASK (15 << 19)
-+#define SDVO_PHASE_SELECT_DEFAULT (6 << 19)
-+#define SDVO_CLOCK_OUTPUT_INVERT (1 << 18)
-+#define SDVOC_GANG_MODE (1 << 16)
-+#define SDVO_BORDER_ENABLE (1 << 7)
-+#define SDVOB_PCIE_CONCURRENCY (1 << 3)
-+#define SDVO_DETECTED (1 << 2)
-+/* Bits to be preserved when writing */
-+#define SDVOB_PRESERVE_MASK ((1 << 17) | (1 << 16) | (1 << 14) | (1 << 26))
-+#define SDVOC_PRESERVE_MASK ((1 << 17) | (1 << 26))
-+
-+/* DVO port control */
-+#define DVOA 0x61120
-+#define DVOB 0x61140
-+#define DVOC 0x61160
-+#define DVO_ENABLE (1 << 31)
-+#define DVO_PIPE_B_SELECT (1 << 30)
-+#define DVO_PIPE_STALL_UNUSED (0 << 28)
-+#define DVO_PIPE_STALL (1 << 28)
-+#define DVO_PIPE_STALL_TV (2 << 28)
-+#define DVO_PIPE_STALL_MASK (3 << 28)
-+#define DVO_USE_VGA_SYNC (1 << 15)
-+#define DVO_DATA_ORDER_I740 (0 << 14)
-+#define DVO_DATA_ORDER_FP (1 << 14)
-+#define DVO_VSYNC_DISABLE (1 << 11)
-+#define DVO_HSYNC_DISABLE (1 << 10)
-+#define DVO_VSYNC_TRISTATE (1 << 9)
-+#define DVO_HSYNC_TRISTATE (1 << 8)
-+#define DVO_BORDER_ENABLE (1 << 7)
-+#define DVO_DATA_ORDER_GBRG (1 << 6)
-+#define DVO_DATA_ORDER_RGGB (0 << 6)
-+#define DVO_DATA_ORDER_GBRG_ERRATA (0 << 6)
-+#define DVO_DATA_ORDER_RGGB_ERRATA (1 << 6)
-+#define DVO_VSYNC_ACTIVE_HIGH (1 << 4)
-+#define DVO_HSYNC_ACTIVE_HIGH (1 << 3)
-+#define DVO_BLANK_ACTIVE_HIGH (1 << 2)
-+#define DVO_OUTPUT_CSTATE_PIXELS (1 << 1) /* SDG only */
-+#define DVO_OUTPUT_SOURCE_SIZE_PIXELS (1 << 0) /* SDG only */
-+#define DVO_PRESERVE_MASK (0x7<<24)
-+#define DVOA_SRCDIM 0x61124
-+#define DVOB_SRCDIM 0x61144
-+#define DVOC_SRCDIM 0x61164
-+#define DVO_SRCDIM_HORIZONTAL_SHIFT 12
-+#define DVO_SRCDIM_VERTICAL_SHIFT 0
-+
-+/* LVDS port control */
-+#define LVDS 0x61180
-+/*
-+ * Enables the LVDS port. This bit must be set before DPLLs are enabled, as
-+ * the DPLL semantics change when the LVDS is assigned to that pipe.
-+ */
-+#define LVDS_PORT_EN (1 << 31)
-+/* Selects pipe B for LVDS data. Must be set on pre-965. */
-+#define LVDS_PIPEB_SELECT (1 << 30)
-+/*
-+ * Enables the A0-A2 data pairs and CLKA, containing 18 bits of color data per
-+ * pixel.
-+ */
-+#define LVDS_A0A2_CLKA_POWER_MASK (3 << 8)
-+#define LVDS_A0A2_CLKA_POWER_DOWN (0 << 8)
-+#define LVDS_A0A2_CLKA_POWER_UP (3 << 8)
-+/*
-+ * Controls the A3 data pair, which contains the additional LSBs for 24 bit
-+ * mode. Only enabled if LVDS_A0A2_CLKA_POWER_UP also indicates it should be
-+ * on.
-+ */
-+#define LVDS_A3_POWER_MASK (3 << 6)
-+#define LVDS_A3_POWER_DOWN (0 << 6)
-+#define LVDS_A3_POWER_UP (3 << 6)
-+/*
-+ * Controls the CLKB pair. This should only be set when LVDS_B0B3_POWER_UP
-+ * is set.
-+ */
-+#define LVDS_CLKB_POWER_MASK (3 << 4)
-+#define LVDS_CLKB_POWER_DOWN (0 << 4)
-+#define LVDS_CLKB_POWER_UP (3 << 4)
-+/*
-+ * Controls the B0-B3 data pairs. This must be set to match the DPLL p2
-+ * setting for whether we are in dual-channel mode. The B3 pair will
-+ * additionally only be powered up when LVDS_A3_POWER_UP is set.
-+ */
-+#define LVDS_B0B3_POWER_MASK (3 << 2)
-+#define LVDS_B0B3_POWER_DOWN (0 << 2)
-+#define LVDS_B0B3_POWER_UP (3 << 2)
-+
-+/* Panel power sequencing */
-+#define PP_STATUS 0x61200
-+#define PP_ON (1 << 31)
-+/*
-+ * Indicates that all dependencies of the panel are on:
-+ *
-+ * - PLL enabled
-+ * - pipe enabled
-+ * - LVDS/DVOB/DVOC on
-+ */
-+#define PP_READY (1 << 30)
-+#define PP_SEQUENCE_NONE (0 << 28)
-+#define PP_SEQUENCE_ON (1 << 28)
-+#define PP_SEQUENCE_OFF (2 << 28)
-+#define PP_SEQUENCE_MASK 0x30000000
-+#define PP_CONTROL 0x61204
-+#define POWER_TARGET_ON (1 << 0)
-+#define PP_ON_DELAYS 0x61208
-+#define PP_OFF_DELAYS 0x6120c
-+#define PP_DIVISOR 0x61210
-+
-+/* Panel fitting */
-+#define PFIT_CONTROL 0x61230
-+#define PFIT_ENABLE (1 << 31)
-+#define PFIT_PIPE_MASK (3 << 29)
-+#define PFIT_PIPE_SHIFT 29
-+#define VERT_INTERP_DISABLE (0 << 10)
-+#define VERT_INTERP_BILINEAR (1 << 10)
-+#define VERT_INTERP_MASK (3 << 10)
-+#define VERT_AUTO_SCALE (1 << 9)
-+#define HORIZ_INTERP_DISABLE (0 << 6)
-+#define HORIZ_INTERP_BILINEAR (1 << 6)
-+#define HORIZ_INTERP_MASK (3 << 6)
-+#define HORIZ_AUTO_SCALE (1 << 5)
-+#define PANEL_8TO6_DITHER_ENABLE (1 << 3)
-+#define PFIT_PGM_RATIOS 0x61234
-+#define PFIT_VERT_SCALE_MASK 0xfff00000
-+#define PFIT_HORIZ_SCALE_MASK 0x0000fff0
-+#define PFIT_AUTO_RATIOS 0x61238
-+
-+/* Backlight control */
-+#define BLC_PWM_CTL 0x61254
-+#define BACKLIGHT_MODULATION_FREQ_SHIFT (17)
-+#define BLC_PWM_CTL2 0x61250 /* 965+ only */
-+/*
-+ * This is the most significant 15 bits of the number of backlight cycles in a
-+ * complete cycle of the modulated backlight control.
-+ *
-+ * The actual value is this field multiplied by two.
-+ */
-+#define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17)
-+#define BLM_LEGACY_MODE (1 << 16)
-+/*
-+ * This is the number of cycles out of the backlight modulation cycle for which
-+ * the backlight is on.
-+ *
-+ * This field must be no greater than the number of cycles in the complete
-+ * backlight modulation cycle.
-+ */
-+#define BACKLIGHT_DUTY_CYCLE_SHIFT (0)
-+#define BACKLIGHT_DUTY_CYCLE_MASK (0xffff)
-+
-+/* TV port control */
-+#define TV_CTL 0x68000
-+/** Enables the TV encoder */
-+# define TV_ENC_ENABLE (1 << 31)
-+/** Sources the TV encoder input from pipe B instead of A. */
-+# define TV_ENC_PIPEB_SELECT (1 << 30)
-+/** Outputs composite video (DAC A only) */
-+# define TV_ENC_OUTPUT_COMPOSITE (0 << 28)
-+/** Outputs SVideo video (DAC B/C) */
-+# define TV_ENC_OUTPUT_SVIDEO (1 << 28)
-+/** Outputs Component video (DAC A/B/C) */
-+# define TV_ENC_OUTPUT_COMPONENT (2 << 28)
-+/** Outputs Composite and SVideo (DAC A/B/C) */
-+# define TV_ENC_OUTPUT_SVIDEO_COMPOSITE (3 << 28)
-+# define TV_TRILEVEL_SYNC (1 << 21)
-+/** Enables slow sync generation (945GM only) */
-+# define TV_SLOW_SYNC (1 << 20)
-+/** Selects 4x oversampling for 480i and 576p */
-+# define TV_OVERSAMPLE_4X (0 << 18)
-+/** Selects 2x oversampling for 720p and 1080i */
-+# define TV_OVERSAMPLE_2X (1 << 18)
-+/** Selects no oversampling for 1080p */
-+# define TV_OVERSAMPLE_NONE (2 << 18)
-+/** Selects 8x oversampling */
-+# define TV_OVERSAMPLE_8X (3 << 18)
-+/** Selects progressive mode rather than interlaced */
-+# define TV_PROGRESSIVE (1 << 17)
-+/** Sets the colorburst to PAL mode. Required for non-M PAL modes. */
-+# define TV_PAL_BURST (1 << 16)
-+/** Field for setting delay of Y compared to C */
-+# define TV_YC_SKEW_MASK (7 << 12)
-+/** Enables a fix for 480p/576p standard definition modes on the 915GM only */
-+# define TV_ENC_SDP_FIX (1 << 11)
-+/**
-+ * Enables a fix for the 915GM only.
-+ *
-+ * Not sure what it does.
-+ */
-+# define TV_ENC_C0_FIX (1 << 10)
-+/** Bits that must be preserved by software */
-+# define TV_CTL_SAVE ((3 << 8) | (3 << 6))
-+# define TV_FUSE_STATE_MASK (3 << 4)
-+/** Read-only state that reports all features enabled */
-+# define TV_FUSE_STATE_ENABLED (0 << 4)
-+/** Read-only state that reports that Macrovision is disabled in hardware*/
-+# define TV_FUSE_STATE_NO_MACROVISION (1 << 4)
-+/** Read-only state that reports that TV-out is disabled in hardware. */
-+# define TV_FUSE_STATE_DISABLED (2 << 4)
-+/** Normal operation */
-+# define TV_TEST_MODE_NORMAL (0 << 0)
-+/** Encoder test pattern 1 - combo pattern */
-+# define TV_TEST_MODE_PATTERN_1 (1 << 0)
-+/** Encoder test pattern 2 - full screen vertical 75% color bars */
-+# define TV_TEST_MODE_PATTERN_2 (2 << 0)
-+/** Encoder test pattern 3 - full screen horizontal 75% color bars */
-+# define TV_TEST_MODE_PATTERN_3 (3 << 0)
-+/** Encoder test pattern 4 - random noise */
-+# define TV_TEST_MODE_PATTERN_4 (4 << 0)
-+/** Encoder test pattern 5 - linear color ramps */
-+# define TV_TEST_MODE_PATTERN_5 (5 << 0)
-+/**
-+ * This test mode forces the DACs to 50% of full output.
-+ *
-+ * This is used for load detection in combination with TVDAC_SENSE_MASK
-+ */
-+# define TV_TEST_MODE_MONITOR_DETECT (7 << 0)
-+# define TV_TEST_MODE_MASK (7 << 0)
-+
-+#define TV_DAC 0x68004
-+/**
-+ * Reports that DAC state change logic has reported change (RO).
-+ *
-+ * This gets cleared when TV_DAC_STATE_EN is cleared
-+*/
-+# define TVDAC_STATE_CHG (1 << 31)
-+# define TVDAC_SENSE_MASK (7 << 28)
-+/** Reports that DAC A voltage is above the detect threshold */
-+# define TVDAC_A_SENSE (1 << 30)
-+/** Reports that DAC B voltage is above the detect threshold */
-+# define TVDAC_B_SENSE (1 << 29)
-+/** Reports that DAC C voltage is above the detect threshold */
-+# define TVDAC_C_SENSE (1 << 28)
-+/**
-+ * Enables DAC state detection logic, for load-based TV detection.
-+ *
-+ * The PLL of the chosen pipe (in TV_CTL) must be running, and the encoder set
-+ * to off, for load detection to work.
-+ */
-+# define TVDAC_STATE_CHG_EN (1 << 27)
-+/** Sets the DAC A sense value to high */
-+# define TVDAC_A_SENSE_CTL (1 << 26)
-+/** Sets the DAC B sense value to high */
-+# define TVDAC_B_SENSE_CTL (1 << 25)
-+/** Sets the DAC C sense value to high */
-+# define TVDAC_C_SENSE_CTL (1 << 24)
-+/** Overrides the ENC_ENABLE and DAC voltage levels */
-+# define DAC_CTL_OVERRIDE (1 << 7)
-+/** Sets the slew rate. Must be preserved in software */
-+# define ENC_TVDAC_SLEW_FAST (1 << 6)
-+# define DAC_A_1_3_V (0 << 4)
-+# define DAC_A_1_1_V (1 << 4)
-+# define DAC_A_0_7_V (2 << 4)
-+# define DAC_A_OFF (3 << 4)
-+# define DAC_B_1_3_V (0 << 2)
-+# define DAC_B_1_1_V (1 << 2)
-+# define DAC_B_0_7_V (2 << 2)
-+# define DAC_B_OFF (3 << 2)
-+# define DAC_C_1_3_V (0 << 0)
-+# define DAC_C_1_1_V (1 << 0)
-+# define DAC_C_0_7_V (2 << 0)
-+# define DAC_C_OFF (3 << 0)
-+
-+/**
-+ * CSC coefficients are stored in a floating point format with 9 bits of
-+ * mantissa and 2 or 3 bits of exponent. The exponent is represented as 2**-n,
-+ * where 2-bit exponents are unsigned n, and 3-bit exponents are signed n with
-+ * -1 (0x3) being the only legal negative value.
-+ */
-+#define TV_CSC_Y 0x68010
-+# define TV_RY_MASK 0x07ff0000
-+# define TV_RY_SHIFT 16
-+# define TV_GY_MASK 0x00000fff
-+# define TV_GY_SHIFT 0
-+
-+#define TV_CSC_Y2 0x68014
-+# define TV_BY_MASK 0x07ff0000
-+# define TV_BY_SHIFT 16
-+/**
-+ * Y attenuation for component video.
-+ *
-+ * Stored in 1.9 fixed point.
-+ */
-+# define TV_AY_MASK 0x000003ff
-+# define TV_AY_SHIFT 0
-+
-+#define TV_CSC_U 0x68018
-+# define TV_RU_MASK 0x07ff0000
-+# define TV_RU_SHIFT 16
-+# define TV_GU_MASK 0x000007ff
-+# define TV_GU_SHIFT 0
-+
-+#define TV_CSC_U2 0x6801c
-+# define TV_BU_MASK 0x07ff0000
-+# define TV_BU_SHIFT 16
-+/**
-+ * U attenuation for component video.
-+ *
-+ * Stored in 1.9 fixed point.
-+ */
-+# define TV_AU_MASK 0x000003ff
-+# define TV_AU_SHIFT 0
-+
-+#define TV_CSC_V 0x68020
-+# define TV_RV_MASK 0x0fff0000
-+# define TV_RV_SHIFT 16
-+# define TV_GV_MASK 0x000007ff
-+# define TV_GV_SHIFT 0
-+
-+#define TV_CSC_V2 0x68024
-+# define TV_BV_MASK 0x07ff0000
-+# define TV_BV_SHIFT 16
-+/**
-+ * V attenuation for component video.
-+ *
-+ * Stored in 1.9 fixed point.
-+ */
-+# define TV_AV_MASK 0x000007ff
-+# define TV_AV_SHIFT 0
-+
-+#define TV_CLR_KNOBS 0x68028
-+/** 2s-complement brightness adjustment */
-+# define TV_BRIGHTNESS_MASK 0xff000000
-+# define TV_BRIGHTNESS_SHIFT 24
-+/** Contrast adjustment, as a 2.6 unsigned floating point number */
-+# define TV_CONTRAST_MASK 0x00ff0000
-+# define TV_CONTRAST_SHIFT 16
-+/** Saturation adjustment, as a 2.6 unsigned floating point number */
-+# define TV_SATURATION_MASK 0x0000ff00
-+# define TV_SATURATION_SHIFT 8
-+/** Hue adjustment, as an integer phase angle in degrees */
-+# define TV_HUE_MASK 0x000000ff
-+# define TV_HUE_SHIFT 0
-+
-+#define TV_CLR_LEVEL 0x6802c
-+/** Controls the DAC level for black */
-+# define TV_BLACK_LEVEL_MASK 0x01ff0000
-+# define TV_BLACK_LEVEL_SHIFT 16
-+/** Controls the DAC level for blanking */
-+# define TV_BLANK_LEVEL_MASK 0x000001ff
-+# define TV_BLANK_LEVEL_SHIFT 0
-+
-+#define TV_H_CTL_1 0x68030
-+/** Number of pixels in the hsync. */
-+# define TV_HSYNC_END_MASK 0x1fff0000
-+# define TV_HSYNC_END_SHIFT 16
-+/** Total number of pixels minus one in the line (display and blanking). */
-+# define TV_HTOTAL_MASK 0x00001fff
-+# define TV_HTOTAL_SHIFT 0
-+
-+#define TV_H_CTL_2 0x68034
-+/** Enables the colorburst (needed for non-component color) */
-+# define TV_BURST_ENA (1 << 31)
-+/** Offset of the colorburst from the start of hsync, in pixels minus one. */
-+# define TV_HBURST_START_SHIFT 16
-+# define TV_HBURST_START_MASK 0x1fff0000
-+/** Length of the colorburst */
-+# define TV_HBURST_LEN_SHIFT 0
-+# define TV_HBURST_LEN_MASK 0x0001fff
-+
-+#define TV_H_CTL_3 0x68038
-+/** End of hblank, measured in pixels minus one from start of hsync */
-+# define TV_HBLANK_END_SHIFT 16
-+# define TV_HBLANK_END_MASK 0x1fff0000
-+/** Start of hblank, measured in pixels minus one from start of hsync */
-+# define TV_HBLANK_START_SHIFT 0
-+# define TV_HBLANK_START_MASK 0x0001fff
-+
-+#define TV_V_CTL_1 0x6803c
-+/** XXX */
-+# define TV_NBR_END_SHIFT 16
-+# define TV_NBR_END_MASK 0x07ff0000
-+/** XXX */
-+# define TV_VI_END_F1_SHIFT 8
-+# define TV_VI_END_F1_MASK 0x00003f00
-+/** XXX */
-+# define TV_VI_END_F2_SHIFT 0
-+# define TV_VI_END_F2_MASK 0x0000003f
-+
-+#define TV_V_CTL_2 0x68040
-+/** Length of vsync, in half lines */
-+# define TV_VSYNC_LEN_MASK 0x07ff0000
-+# define TV_VSYNC_LEN_SHIFT 16
-+/** Offset of the start of vsync in field 1, measured in one less than the
-+ * number of half lines.
-+ */
-+# define TV_VSYNC_START_F1_MASK 0x00007f00
-+# define TV_VSYNC_START_F1_SHIFT 8
-+/**
-+ * Offset of the start of vsync in field 2, measured in one less than the
-+ * number of half lines.
-+ */
-+# define TV_VSYNC_START_F2_MASK 0x0000007f
-+# define TV_VSYNC_START_F2_SHIFT 0
-+
-+#define TV_V_CTL_3 0x68044
-+/** Enables generation of the equalization signal */
-+# define TV_EQUAL_ENA (1 << 31)
-+/** Length of vsync, in half lines */
-+# define TV_VEQ_LEN_MASK 0x007f0000
-+# define TV_VEQ_LEN_SHIFT 16
-+/** Offset of the start of equalization in field 1, measured in one less than
-+ * the number of half lines.
-+ */
-+# define TV_VEQ_START_F1_MASK 0x0007f00
-+# define TV_VEQ_START_F1_SHIFT 8
-+/**
-+ * Offset of the start of equalization in field 2, measured in one less than
-+ * the number of half lines.
-+ */
-+# define TV_VEQ_START_F2_MASK 0x000007f
-+# define TV_VEQ_START_F2_SHIFT 0
-+
-+#define TV_V_CTL_4 0x68048
-+/**
-+ * Offset to start of vertical colorburst, measured in one less than the
-+ * number of lines from vertical start.
-+ */
-+# define TV_VBURST_START_F1_MASK 0x003f0000
-+# define TV_VBURST_START_F1_SHIFT 16
-+/**
-+ * Offset to the end of vertical colorburst, measured in one less than the
-+ * number of lines from the start of NBR.
-+ */
-+# define TV_VBURST_END_F1_MASK 0x000000ff
-+# define TV_VBURST_END_F1_SHIFT 0
-+
-+#define TV_V_CTL_5 0x6804c
-+/**
-+ * Offset to start of vertical colorburst, measured in one less than the
-+ * number of lines from vertical start.
-+ */
-+# define TV_VBURST_START_F2_MASK 0x003f0000
-+# define TV_VBURST_START_F2_SHIFT 16
-+/**
-+ * Offset to the end of vertical colorburst, measured in one less than the
-+ * number of lines from the start of NBR.
-+ */
-+# define TV_VBURST_END_F2_MASK 0x000000ff
-+# define TV_VBURST_END_F2_SHIFT 0
-+
-+#define TV_V_CTL_6 0x68050
-+/**
-+ * Offset to start of vertical colorburst, measured in one less than the
-+ * number of lines from vertical start.
-+ */
-+# define TV_VBURST_START_F3_MASK 0x003f0000
-+# define TV_VBURST_START_F3_SHIFT 16
-+/**
-+ * Offset to the end of vertical colorburst, measured in one less than the
-+ * number of lines from the start of NBR.
-+ */
-+# define TV_VBURST_END_F3_MASK 0x000000ff
-+# define TV_VBURST_END_F3_SHIFT 0
-+
-+#define TV_V_CTL_7 0x68054
-+/**
-+ * Offset to start of vertical colorburst, measured in one less than the
-+ * number of lines from vertical start.
-+ */
-+# define TV_VBURST_START_F4_MASK 0x003f0000
-+# define TV_VBURST_START_F4_SHIFT 16
-+/**
-+ * Offset to the end of vertical colorburst, measured in one less than the
-+ * number of lines from the start of NBR.
-+ */
-+# define TV_VBURST_END_F4_MASK 0x000000ff
-+# define TV_VBURST_END_F4_SHIFT 0
-+
-+#define TV_SC_CTL_1 0x68060
-+/** Turns on the first subcarrier phase generation DDA */
-+# define TV_SC_DDA1_EN (1 << 31)
-+/** Turns on the first subcarrier phase generation DDA */
-+# define TV_SC_DDA2_EN (1 << 30)
-+/** Turns on the first subcarrier phase generation DDA */
-+# define TV_SC_DDA3_EN (1 << 29)
-+/** Sets the subcarrier DDA to reset frequency every other field */
-+# define TV_SC_RESET_EVERY_2 (0 << 24)
-+/** Sets the subcarrier DDA to reset frequency every fourth field */
-+# define TV_SC_RESET_EVERY_4 (1 << 24)
-+/** Sets the subcarrier DDA to reset frequency every eighth field */
-+# define TV_SC_RESET_EVERY_8 (2 << 24)
-+/** Sets the subcarrier DDA to never reset the frequency */
-+# define TV_SC_RESET_NEVER (3 << 24)
-+/** Sets the peak amplitude of the colorburst.*/
-+# define TV_BURST_LEVEL_MASK 0x00ff0000
-+# define TV_BURST_LEVEL_SHIFT 16
-+/** Sets the increment of the first subcarrier phase generation DDA */
-+# define TV_SCDDA1_INC_MASK 0x00000fff
-+# define TV_SCDDA1_INC_SHIFT 0
-+
-+#define TV_SC_CTL_2 0x68064
-+/** Sets the rollover for the second subcarrier phase generation DDA */
-+# define TV_SCDDA2_SIZE_MASK 0x7fff0000
-+# define TV_SCDDA2_SIZE_SHIFT 16
-+/** Sets the increent of the second subcarrier phase generation DDA */
-+# define TV_SCDDA2_INC_MASK 0x00007fff
-+# define TV_SCDDA2_INC_SHIFT 0
-+
-+#define TV_SC_CTL_3 0x68068
-+/** Sets the rollover for the third subcarrier phase generation DDA */
-+# define TV_SCDDA3_SIZE_MASK 0x7fff0000
-+# define TV_SCDDA3_SIZE_SHIFT 16
-+/** Sets the increent of the third subcarrier phase generation DDA */
-+# define TV_SCDDA3_INC_MASK 0x00007fff
-+# define TV_SCDDA3_INC_SHIFT 0
-+
-+#define TV_WIN_POS 0x68070
-+/** X coordinate of the display from the start of horizontal active */
-+# define TV_XPOS_MASK 0x1fff0000
-+# define TV_XPOS_SHIFT 16
-+/** Y coordinate of the display from the start of vertical active (NBR) */
-+# define TV_YPOS_MASK 0x00000fff
-+# define TV_YPOS_SHIFT 0
-+
-+#define TV_WIN_SIZE 0x68074
-+/** Horizontal size of the display window, measured in pixels*/
-+# define TV_XSIZE_MASK 0x1fff0000
-+# define TV_XSIZE_SHIFT 16
-+/**
-+ * Vertical size of the display window, measured in pixels.
-+ *
-+ * Must be even for interlaced modes.
-+ */
-+# define TV_YSIZE_MASK 0x00000fff
-+# define TV_YSIZE_SHIFT 0
-+
-+#define TV_FILTER_CTL_1 0x68080
-+/**
-+ * Enables automatic scaling calculation.
-+ *
-+ * If set, the rest of the registers are ignored, and the calculated values can
-+ * be read back from the register.
-+ */
-+# define TV_AUTO_SCALE (1 << 31)
-+/**
-+ * Disables the vertical filter.
-+ *
-+ * This is required on modes more than 1024 pixels wide */
-+# define TV_V_FILTER_BYPASS (1 << 29)
-+/** Enables adaptive vertical filtering */
-+# define TV_VADAPT (1 << 28)
-+# define TV_VADAPT_MODE_MASK (3 << 26)
-+/** Selects the least adaptive vertical filtering mode */
-+# define TV_VADAPT_MODE_LEAST (0 << 26)
-+/** Selects the moderately adaptive vertical filtering mode */
-+# define TV_VADAPT_MODE_MODERATE (1 << 26)
-+/** Selects the most adaptive vertical filtering mode */
-+# define TV_VADAPT_MODE_MOST (3 << 26)
-+/**
-+ * Sets the horizontal scaling factor.
-+ *
-+ * This should be the fractional part of the horizontal scaling factor divided
-+ * by the oversampling rate. TV_HSCALE should be less than 1, and set to:
-+ *
-+ * (src width - 1) / ((oversample * dest width) - 1)
-+ */
-+# define TV_HSCALE_FRAC_MASK 0x00003fff
-+# define TV_HSCALE_FRAC_SHIFT 0
-+
-+#define TV_FILTER_CTL_2 0x68084
-+/**
-+ * Sets the integer part of the 3.15 fixed-point vertical scaling factor.
-+ *
-+ * TV_VSCALE should be (src height - 1) / ((interlace * dest height) - 1)
-+ */
-+# define TV_VSCALE_INT_MASK 0x00038000
-+# define TV_VSCALE_INT_SHIFT 15
-+/**
-+ * Sets the fractional part of the 3.15 fixed-point vertical scaling factor.
-+ *
-+ * \sa TV_VSCALE_INT_MASK
-+ */
-+# define TV_VSCALE_FRAC_MASK 0x00007fff
-+# define TV_VSCALE_FRAC_SHIFT 0
-+
-+#define TV_FILTER_CTL_3 0x68088
-+/**
-+ * Sets the integer part of the 3.15 fixed-point vertical scaling factor.
-+ *
-+ * TV_VSCALE should be (src height - 1) / (1/4 * (dest height - 1))
-+ *
-+ * For progressive modes, TV_VSCALE_IP_INT should be set to zeroes.
-+ */
-+# define TV_VSCALE_IP_INT_MASK 0x00038000
-+# define TV_VSCALE_IP_INT_SHIFT 15
-+/**
-+ * Sets the fractional part of the 3.15 fixed-point vertical scaling factor.
-+ *
-+ * For progressive modes, TV_VSCALE_IP_INT should be set to zeroes.
-+ *
-+ * \sa TV_VSCALE_IP_INT_MASK
-+ */
-+# define TV_VSCALE_IP_FRAC_MASK 0x00007fff
-+# define TV_VSCALE_IP_FRAC_SHIFT 0
-+
-+#define TV_CC_CONTROL 0x68090
-+# define TV_CC_ENABLE (1 << 31)
-+/**
-+ * Specifies which field to send the CC data in.
-+ *
-+ * CC data is usually sent in field 0.
-+ */
-+# define TV_CC_FID_MASK (1 << 27)
-+# define TV_CC_FID_SHIFT 27
-+/** Sets the horizontal position of the CC data. Usually 135. */
-+# define TV_CC_HOFF_MASK 0x03ff0000
-+# define TV_CC_HOFF_SHIFT 16
-+/** Sets the vertical position of the CC data. Usually 21 */
-+# define TV_CC_LINE_MASK 0x0000003f
-+# define TV_CC_LINE_SHIFT 0
-+
-+#define TV_CC_DATA 0x68094
-+# define TV_CC_RDY (1 << 31)
-+/** Second word of CC data to be transmitted. */
-+# define TV_CC_DATA_2_MASK 0x007f0000
-+# define TV_CC_DATA_2_SHIFT 16
-+/** First word of CC data to be transmitted. */
-+# define TV_CC_DATA_1_MASK 0x0000007f
-+# define TV_CC_DATA_1_SHIFT 0
-+
-+#define TV_H_LUMA_0 0x68100
-+#define TV_H_LUMA_59 0x681ec
-+#define TV_H_CHROMA_0 0x68200
-+#define TV_H_CHROMA_59 0x682ec
-+#define TV_V_LUMA_0 0x68300
-+#define TV_V_LUMA_42 0x683a8
-+#define TV_V_CHROMA_0 0x68400
-+#define TV_V_CHROMA_42 0x684a8
-+
-+/* Display & cursor control */
-+
-+/* Pipe A */
-+#define PIPEADSL 0x70000
-+#define PIPEACONF 0x70008
-+#define PIPEACONF_ENABLE (1<<31)
-+#define PIPEACONF_DISABLE 0
-+#define PIPEACONF_DOUBLE_WIDE (1<<30)
-+#define I965_PIPECONF_ACTIVE (1<<30)
-+#define PIPEACONF_SINGLE_WIDE 0
-+#define PIPEACONF_PIPE_UNLOCKED 0
-+#define PIPEACONF_PIPE_LOCKED (1<<25)
-+#define PIPEACONF_PALETTE 0
-+#define PIPEACONF_GAMMA (1<<24)
-+#define PIPECONF_FORCE_BORDER (1<<25)
-+#define PIPECONF_PROGRESSIVE (0 << 21)
-+#define PIPECONF_INTERLACE_W_FIELD_INDICATION (6 << 21)
-+#define PIPECONF_INTERLACE_FIELD_0_ONLY (7 << 21)
-+#define PIPEASTAT 0x70024
-+#define PIPE_FIFO_UNDERRUN_STATUS (1UL<<31)
-+#define PIPE_CRC_ERROR_ENABLE (1UL<<29)
-+#define PIPE_CRC_DONE_ENABLE (1UL<<28)
-+#define PIPE_GMBUS_EVENT_ENABLE (1UL<<27)
-+#define PIPE_HOTPLUG_INTERRUPT_ENABLE (1UL<<26)
-+#define PIPE_VSYNC_INTERRUPT_ENABLE (1UL<<25)
-+#define PIPE_DISPLAY_LINE_COMPARE_ENABLE (1UL<<24)
-+#define PIPE_DPST_EVENT_ENABLE (1UL<<23)
-+#define PIPE_LEGACY_BLC_EVENT_ENABLE (1UL<<22)
-+#define PIPE_ODD_FIELD_INTERRUPT_ENABLE (1UL<<21)
-+#define PIPE_EVEN_FIELD_INTERRUPT_ENABLE (1UL<<20)
-+#define PIPE_HOTPLUG_TV_INTERRUPT_ENABLE (1UL<<18) /* pre-965 */
-+#define PIPE_START_VBLANK_INTERRUPT_ENABLE (1UL<<18) /* 965 or later */
-+#define PIPE_VBLANK_INTERRUPT_ENABLE (1UL<<17)
-+#define PIPE_OVERLAY_UPDATED_ENABLE (1UL<<16)
-+#define PIPE_CRC_ERROR_INTERRUPT_STATUS (1UL<<13)
-+#define PIPE_CRC_DONE_INTERRUPT_STATUS (1UL<<12)
-+#define PIPE_GMBUS_INTERRUPT_STATUS (1UL<<11)
-+#define PIPE_HOTPLUG_INTERRUPT_STATUS (1UL<<10)
-+#define PIPE_VSYNC_INTERRUPT_STATUS (1UL<<9)
-+#define PIPE_DISPLAY_LINE_COMPARE_STATUS (1UL<<8)
-+#define PIPE_DPST_EVENT_STATUS (1UL<<7)
-+#define PIPE_LEGACY_BLC_EVENT_STATUS (1UL<<6)
-+#define PIPE_ODD_FIELD_INTERRUPT_STATUS (1UL<<5)
-+#define PIPE_EVEN_FIELD_INTERRUPT_STATUS (1UL<<4)
-+#define PIPE_HOTPLUG_TV_INTERRUPT_STATUS (1UL<<2) /* pre-965 */
-+#define PIPE_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */
-+#define PIPE_VBLANK_INTERRUPT_STATUS (1UL<<1)
-+#define PIPE_OVERLAY_UPDATED_STATUS (1UL<<0)
-+
-+#define DSPARB 0x70030
-+#define DSPARB_CSTART_MASK (0x7f << 7)
-+#define DSPARB_CSTART_SHIFT 7
-+#define DSPARB_BSTART_MASK (0x7f)
-+#define DSPARB_BSTART_SHIFT 0
-+/*
-+ * The two pipe frame counter registers are not synchronized, so
-+ * reading a stable value is somewhat tricky. The following code
-+ * should work:
-+ *
-+ * do {
-+ * high1 = ((INREG(PIPEAFRAMEHIGH) & PIPE_FRAME_HIGH_MASK) >>
-+ * PIPE_FRAME_HIGH_SHIFT;
-+ * low1 = ((INREG(PIPEAFRAMEPIXEL) & PIPE_FRAME_LOW_MASK) >>
-+ * PIPE_FRAME_LOW_SHIFT);
-+ * high2 = ((INREG(PIPEAFRAMEHIGH) & PIPE_FRAME_HIGH_MASK) >>
-+ * PIPE_FRAME_HIGH_SHIFT);
-+ * } while (high1 != high2);
-+ * frame = (high1 << 8) | low1;
-+ */
-+#define PIPEAFRAMEHIGH 0x70040
-+#define PIPE_FRAME_HIGH_MASK 0x0000ffff
-+#define PIPE_FRAME_HIGH_SHIFT 0
-+#define PIPEAFRAMEPIXEL 0x70044
-+#define PIPE_FRAME_LOW_MASK 0xff000000
-+#define PIPE_FRAME_LOW_SHIFT 24
-+#define PIPE_PIXEL_MASK 0x00ffffff
-+#define PIPE_PIXEL_SHIFT 0
-+
-+/* Cursor A & B regs */
-+#define CURACNTR 0x70080
-+#define CURSOR_MODE_DISABLE 0x00
-+#define CURSOR_MODE_64_32B_AX 0x07
-+#define CURSOR_MODE_64_ARGB_AX ((1 << 5) | CURSOR_MODE_64_32B_AX)
-+#define MCURSOR_GAMMA_ENABLE (1 << 26)
-+#define CURABASE 0x70084
-+#define CURAPOS 0x70088
-+#define CURSOR_POS_MASK 0x007FF
-+#define CURSOR_POS_SIGN 0x8000
-+#define CURSOR_X_SHIFT 0
-+#define CURSOR_Y_SHIFT 16
-+#define CURBCNTR 0x700c0
-+#define CURBBASE 0x700c4
-+#define CURBPOS 0x700c8
-+
-+/* Display A control */
-+#define DSPACNTR 0x70180
-+#define DISPLAY_PLANE_ENABLE (1<<31)
-+#define DISPLAY_PLANE_DISABLE 0
-+#define DISPPLANE_GAMMA_ENABLE (1<<30)
-+#define DISPPLANE_GAMMA_DISABLE 0
-+#define DISPPLANE_PIXFORMAT_MASK (0xf<<26)
-+#define DISPPLANE_8BPP (0x2<<26)
-+#define DISPPLANE_15_16BPP (0x4<<26)
-+#define DISPPLANE_16BPP (0x5<<26)
-+#define DISPPLANE_32BPP_NO_ALPHA (0x6<<26)
-+#define DISPPLANE_32BPP (0x7<<26)
-+#define DISPPLANE_STEREO_ENABLE (1<<25)
-+#define DISPPLANE_STEREO_DISABLE 0
-+#define DISPPLANE_SEL_PIPE_MASK (1<<24)
-+#define DISPPLANE_SEL_PIPE_A 0
-+#define DISPPLANE_SEL_PIPE_B (1<<24)
-+#define DISPPLANE_SRC_KEY_ENABLE (1<<22)
-+#define DISPPLANE_SRC_KEY_DISABLE 0
-+#define DISPPLANE_LINE_DOUBLE (1<<20)
-+#define DISPPLANE_NO_LINE_DOUBLE 0
-+#define DISPPLANE_STEREO_POLARITY_FIRST 0
-+#define DISPPLANE_STEREO_POLARITY_SECOND (1<<18)
-+#define DSPAADDR 0x70184
-+#define DSPASTRIDE 0x70188
-+#define DSPAPOS 0x7018C /* reserved */
-+#define DSPASIZE 0x70190
-+#define DSPASURF 0x7019C /* 965+ only */
-+#define DSPATILEOFF 0x701A4 /* 965+ only */
-+
-+/* VBIOS flags */
-+#define SWF00 0x71410
-+#define SWF01 0x71414
-+#define SWF02 0x71418
-+#define SWF03 0x7141c
-+#define SWF04 0x71420
-+#define SWF05 0x71424
-+#define SWF06 0x71428
-+#define SWF10 0x70410
-+#define SWF11 0x70414
-+#define SWF14 0x71420
-+#define SWF30 0x72414
-+#define SWF31 0x72418
-+#define SWF32 0x7241c
-+
-+/* Pipe B */
-+#define PIPEBDSL 0x71000
-+#define PIPEBCONF 0x71008
-+#define PIPEBSTAT 0x71024
-+#define PIPEBFRAMEHIGH 0x71040
-+#define PIPEBFRAMEPIXEL 0x71044
-+
-+/* Display B control */
-+#define DSPBCNTR 0x71180
-+#define DISPPLANE_ALPHA_TRANS_ENABLE (1<<15)
-+#define DISPPLANE_ALPHA_TRANS_DISABLE 0
-+#define DISPPLANE_SPRITE_ABOVE_DISPLAY 0
-+#define DISPPLANE_SPRITE_ABOVE_OVERLAY (1)
-+#define DSPBADDR 0x71184
-+#define DSPBSTRIDE 0x71188
-+#define DSPBPOS 0x7118C
-+#define DSPBSIZE 0x71190
-+#define DSPBSURF 0x7119C
-+#define DSPBTILEOFF 0x711A4
-+
-+/* VBIOS regs */
-+#define VGACNTRL 0x71400
-+# define VGA_DISP_DISABLE (1 << 31)
-+# define VGA_2X_MODE (1 << 30)
-+# define VGA_PIPE_B_SELECT (1 << 29)
-+
-+#endif /* _I915_REG_H_ */
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/0005-i915-Add-support-for-MSI-and-interrupt-mitigation.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/0005-i915-Add-support-for-MSI-and-interrupt-mitigation.patch
deleted file mode 100644
index 9337475c3..000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/0005-i915-Add-support-for-MSI-and-interrupt-mitigation.patch
+++ /dev/null
@@ -1,424 +0,0 @@
-commit 4f99970852559935b27bc634318f34c18c5fd143
-Author: Eric Anholt <eric@anholt.net>
-Date: Tue Jul 29 12:10:39 2008 -0700
-
- i915: Add support for MSI and interrupt mitigation.
-
- Previous attempts at interrupt mitigation had been foiled by i915_wait_irq's
- failure to update the sarea seqno value when the status page indicated that
- the seqno had already been passed. MSI support has been seen to cut CPU
- costs by up to 40% in some workloads by avoiding other expensive interrupt
- handlers for frequent graphics interrupts.
-
- Signed-off-by: Eric Anholt <eric@anholt.net>
- Signed-off-by: Dave Airlie <airlied@redhat.com>
-
-diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
-index 53f0e5a..61ed515 100644
---- a/drivers/gpu/drm/drm_irq.c
-+++ b/drivers/gpu/drm/drm_irq.c
-@@ -63,7 +63,7 @@ int drm_irq_by_busid(struct drm_device *dev, void *data,
- p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn))
- return -EINVAL;
-
-- p->irq = dev->irq;
-+ p->irq = dev->pdev->irq;
-
- DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum,
- p->irq);
-@@ -89,7 +89,7 @@ static int drm_irq_install(struct drm_device * dev)
- if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
- return -EINVAL;
-
-- if (dev->irq == 0)
-+ if (dev->pdev->irq == 0)
- return -EINVAL;
-
- mutex_lock(&dev->struct_mutex);
-@@ -107,7 +107,7 @@ static int drm_irq_install(struct drm_device * dev)
- dev->irq_enabled = 1;
- mutex_unlock(&dev->struct_mutex);
-
-- DRM_DEBUG("irq=%d\n", dev->irq);
-+ DRM_DEBUG("irq=%d\n", dev->pdev->irq);
-
- if (drm_core_check_feature(dev, DRIVER_IRQ_VBL)) {
- init_waitqueue_head(&dev->vbl_queue);
-@@ -127,8 +127,12 @@ static int drm_irq_install(struct drm_device * dev)
- if (drm_core_check_feature(dev, DRIVER_IRQ_SHARED))
- sh_flags = IRQF_SHARED;
-
-- ret = request_irq(dev->irq, dev->driver->irq_handler,
-+ ret = request_irq(dev->pdev->irq, dev->driver->irq_handler,
- sh_flags, dev->devname, dev);
-+ /* Expose the device irq number to drivers that want to export it for
-+ * whatever reason.
-+ */
-+ dev->irq = dev->pdev->irq;
- if (ret < 0) {
- mutex_lock(&dev->struct_mutex);
- dev->irq_enabled = 0;
-@@ -164,11 +168,11 @@ int drm_irq_uninstall(struct drm_device * dev)
- if (!irq_enabled)
- return -EINVAL;
-
-- DRM_DEBUG("irq=%d\n", dev->irq);
-+ DRM_DEBUG("irq=%d\n", dev->pdev->irq);
-
- dev->driver->irq_uninstall(dev);
-
-- free_irq(dev->irq, dev);
-+ free_irq(dev->pdev->irq, dev);
-
- dev->locked_tasklet_func = NULL;
-
-@@ -201,7 +205,7 @@ int drm_control(struct drm_device *dev, void *data,
- if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
- return 0;
- if (dev->if_version < DRM_IF_VERSION(1, 2) &&
-- ctl->irq != dev->irq)
-+ ctl->irq != dev->pdev->irq)
- return -EINVAL;
- return drm_irq_install(dev);
- case DRM_UNINST_HANDLER:
-@@ -239,7 +243,7 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_pr
- int ret = 0;
- unsigned int flags, seq;
-
-- if ((!dev->irq) || (!dev->irq_enabled))
-+ if ((!dev->pdev->irq) || (!dev->irq_enabled))
- return -EINVAL;
-
- if (vblwait->request.type &
-diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
-index 7be580b..10bfb0c 100644
---- a/drivers/gpu/drm/i915/i915_dma.c
-+++ b/drivers/gpu/drm/i915/i915_dma.c
-@@ -84,7 +84,7 @@ static int i915_dma_cleanup(struct drm_device * dev)
- * may not have been called from userspace and after dev_private
- * is freed, it's too late.
- */
-- if (dev->irq)
-+ if (dev->irq_enabled)
- drm_irq_uninstall(dev);
-
- if (dev_priv->ring.virtual_start) {
-@@ -644,7 +644,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
-
- switch (param->param) {
- case I915_PARAM_IRQ_ACTIVE:
-- value = dev->irq ? 1 : 0;
-+ value = dev->irq_enabled;
- break;
- case I915_PARAM_ALLOW_BATCHBUFFER:
- value = dev_priv->allow_batchbuffer ? 1 : 0;
-@@ -763,6 +763,20 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
- ret = drm_addmap(dev, base, size, _DRM_REGISTERS,
- _DRM_KERNEL | _DRM_DRIVER,
- &dev_priv->mmio_map);
-+
-+
-+ /* On the 945G/GM, the chipset reports the MSI capability on the
-+ * integrated graphics even though the support isn't actually there
-+ * according to the published specs. It doesn't appear to function
-+ * correctly in testing on 945G.
-+ * This may be a side effect of MSI having been made available for PEG
-+ * and the registers being closely associated.
-+ */
-+ if (!IS_I945G(dev) && !IS_I945GM(dev))
-+ pci_enable_msi(dev->pdev);
-+
-+ spin_lock_init(&dev_priv->user_irq_lock);
-+
- return ret;
- }
-
-@@ -770,6 +784,9 @@ int i915_driver_unload(struct drm_device *dev)
- {
- struct drm_i915_private *dev_priv = dev->dev_private;
-
-+ if (dev->pdev->msi_enabled)
-+ pci_disable_msi(dev->pdev);
-+
- if (dev_priv->mmio_map)
- drm_rmmap(dev, dev_priv->mmio_map);
-
-diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
-index afb51a3..8daf0d8 100644
---- a/drivers/gpu/drm/i915/i915_drv.h
-+++ b/drivers/gpu/drm/i915/i915_drv.h
-@@ -105,6 +105,12 @@ typedef struct drm_i915_private {
- wait_queue_head_t irq_queue;
- atomic_t irq_received;
- atomic_t irq_emitted;
-+ /** Protects user_irq_refcount and irq_mask_reg */
-+ spinlock_t user_irq_lock;
-+ /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */
-+ int user_irq_refcount;
-+ /** Cached value of IMR to avoid reads in updating the bitfield */
-+ u32 irq_mask_reg;
-
- int tex_lru_log_granularity;
- int allow_batchbuffer;
-diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
-index 4a2de78..24d11ed 100644
---- a/drivers/gpu/drm/i915/i915_irq.c
-+++ b/drivers/gpu/drm/i915/i915_irq.c
-@@ -33,6 +33,31 @@
-
- #define MAX_NOPID ((u32)~0)
-
-+/** These are the interrupts used by the driver */
-+#define I915_INTERRUPT_ENABLE_MASK (I915_USER_INTERRUPT | \
-+ I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | \
-+ I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT)
-+
-+static inline void
-+i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
-+{
-+ if ((dev_priv->irq_mask_reg & mask) != 0) {
-+ dev_priv->irq_mask_reg &= ~mask;
-+ I915_WRITE(IMR, dev_priv->irq_mask_reg);
-+ (void) I915_READ(IMR);
-+ }
-+}
-+
-+static inline void
-+i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
-+{
-+ if ((dev_priv->irq_mask_reg & mask) != mask) {
-+ dev_priv->irq_mask_reg |= mask;
-+ I915_WRITE(IMR, dev_priv->irq_mask_reg);
-+ (void) I915_READ(IMR);
-+ }
-+}
-+
- /**
- * Emit blits for scheduled buffer swaps.
- *
-@@ -229,46 +254,50 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
- {
- struct drm_device *dev = (struct drm_device *) arg;
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-- u16 temp;
- u32 pipea_stats, pipeb_stats;
-+ u32 iir;
-
- pipea_stats = I915_READ(PIPEASTAT);
- pipeb_stats = I915_READ(PIPEBSTAT);
-
-- temp = I915_READ16(IIR);
--
-- temp &= (I915_USER_INTERRUPT |
-- I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
-- I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT);
-+ if (dev->pdev->msi_enabled)
-+ I915_WRITE(IMR, ~0);
-+ iir = I915_READ(IIR);
-
-- DRM_DEBUG("%s flag=%08x\n", __FUNCTION__, temp);
-+ DRM_DEBUG("iir=%08x\n", iir);
-
-- if (temp == 0)
-+ if (iir == 0) {
-+ if (dev->pdev->msi_enabled) {
-+ I915_WRITE(IMR, dev_priv->irq_mask_reg);
-+ (void) I915_READ(IMR);
-+ }
- return IRQ_NONE;
-+ }
-
-- I915_WRITE16(IIR, temp);
-- (void) I915_READ16(IIR);
-- DRM_READMEMORYBARRIER();
-+ I915_WRITE(IIR, iir);
-+ if (dev->pdev->msi_enabled)
-+ I915_WRITE(IMR, dev_priv->irq_mask_reg);
-+ (void) I915_READ(IIR); /* Flush posted writes */
-
- dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
-
-- if (temp & I915_USER_INTERRUPT)
-+ if (iir & I915_USER_INTERRUPT)
- DRM_WAKEUP(&dev_priv->irq_queue);
-
-- if (temp & (I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
-- I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT)) {
-+ if (iir & (I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
-+ I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT)) {
- int vblank_pipe = dev_priv->vblank_pipe;
-
- if ((vblank_pipe &
- (DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B))
- == (DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B)) {
-- if (temp & I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT)
-+ if (iir & I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT)
- atomic_inc(&dev->vbl_received);
-- if (temp & I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT)
-+ if (iir & I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT)
- atomic_inc(&dev->vbl_received2);
-- } else if (((temp & I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT) &&
-+ } else if (((iir & I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT) &&
- (vblank_pipe & DRM_I915_VBLANK_PIPE_A)) ||
-- ((temp & I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT) &&
-+ ((iir & I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT) &&
- (vblank_pipe & DRM_I915_VBLANK_PIPE_B)))
- atomic_inc(&dev->vbl_received);
-
-@@ -314,6 +343,27 @@ static int i915_emit_irq(struct drm_device * dev)
- return dev_priv->counter;
- }
-
-+static void i915_user_irq_get(struct drm_device *dev)
-+{
-+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-+
-+ spin_lock(&dev_priv->user_irq_lock);
-+ if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1))
-+ i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
-+ spin_unlock(&dev_priv->user_irq_lock);
-+}
-+
-+static void i915_user_irq_put(struct drm_device *dev)
-+{
-+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-+
-+ spin_lock(&dev_priv->user_irq_lock);
-+ BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0);
-+ if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0))
-+ i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
-+ spin_unlock(&dev_priv->user_irq_lock);
-+}
-+
- static int i915_wait_irq(struct drm_device * dev, int irq_nr)
- {
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-@@ -322,13 +372,17 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
- DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr,
- READ_BREADCRUMB(dev_priv));
-
-- if (READ_BREADCRUMB(dev_priv) >= irq_nr)
-+ if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
-+ dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
- return 0;
-+ }
-
- dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
-
-+ i915_user_irq_get(dev);
- DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ,
- READ_BREADCRUMB(dev_priv) >= irq_nr);
-+ i915_user_irq_put(dev);
-
- if (ret == -EBUSY) {
- DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
-@@ -413,20 +467,6 @@ int i915_irq_wait(struct drm_device *dev, void *data,
- return i915_wait_irq(dev, irqwait->irq_seq);
- }
-
--static void i915_enable_interrupt (struct drm_device *dev)
--{
-- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-- u16 flag;
--
-- flag = 0;
-- if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_A)
-- flag |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
-- if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B)
-- flag |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
--
-- I915_WRITE16(IER, I915_USER_INTERRUPT | flag);
--}
--
- /* Set the vblank monitor pipe
- */
- int i915_vblank_pipe_set(struct drm_device *dev, void *data,
-@@ -434,6 +474,7 @@ int i915_vblank_pipe_set(struct drm_device *dev, void *data,
- {
- drm_i915_private_t *dev_priv = dev->dev_private;
- drm_i915_vblank_pipe_t *pipe = data;
-+ u32 enable_mask = 0, disable_mask = 0;
-
- if (!dev_priv) {
- DRM_ERROR("called with no initialization\n");
-@@ -445,9 +486,20 @@ int i915_vblank_pipe_set(struct drm_device *dev, void *data,
- return -EINVAL;
- }
-
-- dev_priv->vblank_pipe = pipe->pipe;
-+ if (pipe->pipe & DRM_I915_VBLANK_PIPE_A)
-+ enable_mask |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
-+ else
-+ disable_mask |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
-+
-+ if (pipe->pipe & DRM_I915_VBLANK_PIPE_B)
-+ enable_mask |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
-+ else
-+ disable_mask |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
-
-- i915_enable_interrupt (dev);
-+ i915_enable_irq(dev_priv, enable_mask);
-+ i915_disable_irq(dev_priv, disable_mask);
-+
-+ dev_priv->vblank_pipe = pipe->pipe;
-
- return 0;
- }
-@@ -464,7 +516,7 @@ int i915_vblank_pipe_get(struct drm_device *dev, void *data,
- return -EINVAL;
- }
-
-- flag = I915_READ(IER);
-+ flag = I915_READ(IMR);
- pipe->pipe = 0;
- if (flag & I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT)
- pipe->pipe |= DRM_I915_VBLANK_PIPE_A;
-@@ -586,9 +638,9 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
- {
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-
-- I915_WRITE16(HWSTAM, 0xfffe);
-- I915_WRITE16(IMR, 0x0);
-- I915_WRITE16(IER, 0x0);
-+ I915_WRITE(HWSTAM, 0xfffe);
-+ I915_WRITE(IMR, 0x0);
-+ I915_WRITE(IER, 0x0);
- }
-
- void i915_driver_irq_postinstall(struct drm_device * dev)
-@@ -601,7 +653,18 @@ void i915_driver_irq_postinstall(struct drm_device * dev)
-
- if (!dev_priv->vblank_pipe)
- dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A;
-- i915_enable_interrupt(dev);
-+
-+ /* Set initial unmasked IRQs to just the selected vblank pipes. */
-+ dev_priv->irq_mask_reg = ~0;
-+ if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_A)
-+ dev_priv->irq_mask_reg &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
-+ if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B)
-+ dev_priv->irq_mask_reg &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
-+
-+ I915_WRITE(IMR, dev_priv->irq_mask_reg);
-+ I915_WRITE(IER, I915_INTERRUPT_ENABLE_MASK);
-+ (void) I915_READ(IER);
-+
- DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
- }
-
-@@ -613,10 +676,10 @@ void i915_driver_irq_uninstall(struct drm_device * dev)
- if (!dev_priv)
- return;
-
-- I915_WRITE16(HWSTAM, 0xffff);
-- I915_WRITE16(IMR, 0xffff);
-- I915_WRITE16(IER, 0x0);
-+ I915_WRITE(HWSTAM, 0xffff);
-+ I915_WRITE(IMR, 0xffff);
-+ I915_WRITE(IER, 0x0);
-
-- temp = I915_READ16(IIR);
-- I915_WRITE16(IIR, temp);
-+ temp = I915_READ(IIR);
-+ I915_WRITE(IIR, temp);
- }
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/0006-i915-Track-progress-inside-of-batchbuffers-for-dete.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/0006-i915-Track-progress-inside-of-batchbuffers-for-dete.patch
deleted file mode 100644
index 8736250f0..000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/0006-i915-Track-progress-inside-of-batchbuffers-for-dete.patch
+++ /dev/null
@@ -1,46 +0,0 @@
-commit 1236e8610ab9c6f9f8297e60530bedb2640c7224
-Author: Keith Packard <keithp@keithp.com>
-Date: Wed Jul 30 12:21:20 2008 -0700
-
- i915: Track progress inside of batchbuffers for determining wedgedness.
-
- This avoids early termination for long-running commands.
-
- Signed-off-by: Eric Anholt <eric@anholt.net>
- Signed-off-by: Dave Airlie <airlied@redhat.com>
-
-diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
-index 10bfb0c..4c72a01 100644
---- a/drivers/gpu/drm/i915/i915_dma.c
-+++ b/drivers/gpu/drm/i915/i915_dma.c
-@@ -40,11 +40,15 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
- {
- drm_i915_private_t *dev_priv = dev->dev_private;
- drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
-+ u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
-+ u32 last_acthd = I915_READ(acthd_reg);
-+ u32 acthd;
- u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
- int i;
-
-- for (i = 0; i < 10000; i++) {
-+ for (i = 0; i < 100000; i++) {
- ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
-+ acthd = I915_READ(acthd_reg);
- ring->space = ring->head - (ring->tail + 8);
- if (ring->space < 0)
- ring->space += ring->Size;
-@@ -55,8 +59,13 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
-
- if (ring->head != last_head)
- i = 0;
-+ if (acthd != last_acthd)
-+ i = 0;
-
- last_head = ring->head;
-+ last_acthd = acthd;
-+ msleep_interruptible(10);
-+
- }
-
- return -EBUSY;
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/0007-i915-Initialize-hardware-status-page-at-device-load.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/0007-i915-Initialize-hardware-status-page-at-device-load.patch
deleted file mode 100644
index 79f068f42..000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/0007-i915-Initialize-hardware-status-page-at-device-load.patch
+++ /dev/null
@@ -1,137 +0,0 @@
-commit 75fed4ae8454aa975c274b2585ec2287dd15773d
-Author: Keith Packard <keithp@keithp.com>
-Date: Wed Jul 30 13:03:43 2008 -0700
-
- i915: Initialize hardware status page at device load when possible.
-
- Some chips were unstable with repeated setup/teardown of the hardware status
- page.
-
- Signed-off-by: Eric Anholt <eric@anholt.net>
- Signed-off-by: Dave Airlie <airlied@redhat.com>
-
-diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
-index 4c72a01..b3c4ac9 100644
---- a/drivers/gpu/drm/i915/i915_dma.c
-+++ b/drivers/gpu/drm/i915/i915_dma.c
-@@ -71,6 +71,52 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
- return -EBUSY;
- }
-
-+/**
-+ * Sets up the hardware status page for devices that need a physical address
-+ * in the register.
-+ */
-+int i915_init_phys_hws(struct drm_device *dev)
-+{
-+ drm_i915_private_t *dev_priv = dev->dev_private;
-+ /* Program Hardware Status Page */
-+ dev_priv->status_page_dmah =
-+ drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff);
-+
-+ if (!dev_priv->status_page_dmah) {
-+ DRM_ERROR("Can not allocate hardware status page\n");
-+ return -ENOMEM;
-+ }
-+ dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr;
-+ dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
-+
-+ memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
-+
-+ I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
-+ DRM_DEBUG("Enabled hardware status page\n");
-+ return 0;
-+}
-+
-+/**
-+ * Frees the hardware status page, whether it's a physical address or a virtual
-+ * address set up by the X Server.
-+ */
-+void i915_free_hws(struct drm_device *dev)
-+{
-+ drm_i915_private_t *dev_priv = dev->dev_private;
-+ if (dev_priv->status_page_dmah) {
-+ drm_pci_free(dev, dev_priv->status_page_dmah);
-+ dev_priv->status_page_dmah = NULL;
-+ }
-+
-+ if (dev_priv->status_gfx_addr) {
-+ dev_priv->status_gfx_addr = 0;
-+ drm_core_ioremapfree(&dev_priv->hws_map, dev);
-+ }
-+
-+ /* Need to rewrite hardware status page */
-+ I915_WRITE(HWS_PGA, 0x1ffff000);
-+}
-+
- void i915_kernel_lost_context(struct drm_device * dev)
- {
- drm_i915_private_t *dev_priv = dev->dev_private;
-@@ -103,18 +149,9 @@ static int i915_dma_cleanup(struct drm_device * dev)
- dev_priv->ring.map.size = 0;
- }
-
-- if (dev_priv->status_page_dmah) {
-- drm_pci_free(dev, dev_priv->status_page_dmah);
-- dev_priv->status_page_dmah = NULL;
-- /* Need to rewrite hardware status page */
-- I915_WRITE(HWS_PGA, 0x1ffff000);
-- }
--
-- if (dev_priv->status_gfx_addr) {
-- dev_priv->status_gfx_addr = 0;
-- drm_core_ioremapfree(&dev_priv->hws_map, dev);
-- I915_WRITE(HWS_PGA, 0x1ffff000);
-- }
-+ /* Clear the HWS virtual address at teardown */
-+ if (I915_NEED_GFX_HWS(dev))
-+ i915_free_hws(dev);
-
- return 0;
- }
-@@ -165,23 +202,6 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
- */
- dev_priv->allow_batchbuffer = 1;
-
-- /* Program Hardware Status Page */
-- if (!I915_NEED_GFX_HWS(dev)) {
-- dev_priv->status_page_dmah =
-- drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff);
--
-- if (!dev_priv->status_page_dmah) {
-- i915_dma_cleanup(dev);
-- DRM_ERROR("Can not allocate hardware status page\n");
-- return -ENOMEM;
-- }
-- dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr;
-- dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
--
-- memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
-- I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
-- }
-- DRM_DEBUG("Enabled hardware status page\n");
- return 0;
- }
-
-@@ -773,6 +793,12 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
- _DRM_KERNEL | _DRM_DRIVER,
- &dev_priv->mmio_map);
-
-+ /* Init HWS */
-+ if (!I915_NEED_GFX_HWS(dev)) {
-+ ret = i915_init_phys_hws(dev);
-+ if (ret != 0)
-+ return ret;
-+ }
-
- /* On the 945G/GM, the chipset reports the MSI capability on the
- * integrated graphics even though the support isn't actually there
-@@ -796,6 +822,8 @@ int i915_driver_unload(struct drm_device *dev)
- if (dev->pdev->msi_enabled)
- pci_disable_msi(dev->pdev);
-
-+ i915_free_hws(dev);
-+
- if (dev_priv->mmio_map)
- drm_rmmap(dev, dev_priv->mmio_map);
-
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/0008-Add-Intel-ACPI-IGD-OpRegion-support.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/0008-Add-Intel-ACPI-IGD-OpRegion-support.patch
deleted file mode 100644
index afa6f9634..000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/0008-Add-Intel-ACPI-IGD-OpRegion-support.patch
+++ /dev/null
@@ -1,572 +0,0 @@
-commit 91c2ebb8e78aa64f4807399b506ec0090ae5f3d6
-Author: Matthew Garrett <mjg59@srcf.ucam.org>
-Date: Tue Aug 5 19:37:25 2008 +0100
-
- Add Intel ACPI IGD OpRegion support
-
- This adds the support necessary for allowing ACPI backlight control to
- work on some newer Intel-based graphics systems. Tested on Thinkpad T61
- and HP 2510p hardware.
-
- Signed-off-by: Matthew Garrett <mjg@redhat.com>
- Signed-off-by: Dave Airlie <airlied@linux.ie>
-
-diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
-index a9e6046..b032808 100644
---- a/drivers/gpu/drm/i915/Makefile
-+++ b/drivers/gpu/drm/i915/Makefile
-@@ -3,7 +3,7 @@
- # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
-
- ccflags-y := -Iinclude/drm
--i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o
-+i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_opregion.o
-
- i915-$(CONFIG_COMPAT) += i915_ioc32.o
-
-diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
-index b3c4ac9..cead62f 100644
---- a/drivers/gpu/drm/i915/i915_dma.c
-+++ b/drivers/gpu/drm/i915/i915_dma.c
-@@ -810,6 +810,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
- if (!IS_I945G(dev) && !IS_I945GM(dev))
- pci_enable_msi(dev->pdev);
-
-+ intel_opregion_init(dev);
-+
- spin_lock_init(&dev_priv->user_irq_lock);
-
- return ret;
-@@ -827,6 +829,8 @@ int i915_driver_unload(struct drm_device *dev)
- if (dev_priv->mmio_map)
- drm_rmmap(dev, dev_priv->mmio_map);
-
-+ intel_opregion_free(dev);
-+
- drm_free(dev->dev_private, sizeof(drm_i915_private_t),
- DRM_MEM_DRIVER);
-
-diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
-index 6c99aab..d95eca2 100644
---- a/drivers/gpu/drm/i915/i915_drv.c
-+++ b/drivers/gpu/drm/i915/i915_drv.c
-@@ -371,6 +371,8 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
-
- i915_save_vga(dev);
-
-+ intel_opregion_free(dev);
-+
- if (state.event == PM_EVENT_SUSPEND) {
- /* Shut down the device */
- pci_disable_device(dev->pdev);
-@@ -532,6 +534,8 @@ static int i915_resume(struct drm_device *dev)
-
- i915_restore_vga(dev);
-
-+ intel_opregion_init(dev);
-+
- return 0;
- }
-
-diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
-index 8daf0d8..e4bd01c 100644
---- a/drivers/gpu/drm/i915/i915_drv.h
-+++ b/drivers/gpu/drm/i915/i915_drv.h
-@@ -82,6 +82,14 @@ typedef struct _drm_i915_vbl_swap {
- unsigned int sequence;
- } drm_i915_vbl_swap_t;
-
-+struct intel_opregion {
-+ struct opregion_header *header;
-+ struct opregion_acpi *acpi;
-+ struct opregion_swsci *swsci;
-+ struct opregion_asle *asle;
-+ int enabled;
-+};
-+
- typedef struct drm_i915_private {
- drm_local_map_t *sarea;
- drm_local_map_t *mmio_map;
-@@ -122,6 +130,8 @@ typedef struct drm_i915_private {
- drm_i915_vbl_swap_t vbl_swaps;
- unsigned int swaps_pending;
-
-+ struct intel_opregion opregion;
-+
- /* Register state */
- u8 saveLBB;
- u32 saveDSPACNTR;
-@@ -244,6 +254,7 @@ extern int i915_vblank_pipe_get(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
- extern int i915_vblank_swap(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-+extern void i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask);
-
- /* i915_mem.c */
- extern int i915_mem_alloc(struct drm_device *dev, void *data,
-@@ -258,6 +269,12 @@ extern void i915_mem_takedown(struct mem_block **heap);
- extern void i915_mem_release(struct drm_device * dev,
- struct drm_file *file_priv, struct mem_block *heap);
-
-+/* i915_opregion.c */
-+extern int intel_opregion_init(struct drm_device *dev);
-+extern void intel_opregion_free(struct drm_device *dev);
-+extern void opregion_asle_intr(struct drm_device *dev);
-+extern void opregion_enable_asle(struct drm_device *dev);
-+
- #define I915_READ(reg) DRM_READ32(dev_priv->mmio_map, (reg))
- #define I915_WRITE(reg,val) DRM_WRITE32(dev_priv->mmio_map, (reg), (val))
- #define I915_READ16(reg) DRM_READ16(dev_priv->mmio_map, (reg))
-diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
-index 24d11ed..ae7d3a8 100644
---- a/drivers/gpu/drm/i915/i915_irq.c
-+++ b/drivers/gpu/drm/i915/i915_irq.c
-@@ -36,9 +36,11 @@
- /** These are the interrupts used by the driver */
- #define I915_INTERRUPT_ENABLE_MASK (I915_USER_INTERRUPT | \
- I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | \
-- I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT)
-+ I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT | \
-+ I915_ASLE_INTERRUPT | \
-+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT)
-
--static inline void
-+void
- i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
- {
- if ((dev_priv->irq_mask_reg & mask) != 0) {
-@@ -274,6 +276,9 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
- return IRQ_NONE;
- }
-
-+ I915_WRITE(PIPEASTAT, pipea_stats);
-+ I915_WRITE(PIPEBSTAT, pipeb_stats);
-+
- I915_WRITE(IIR, iir);
- if (dev->pdev->msi_enabled)
- I915_WRITE(IMR, dev_priv->irq_mask_reg);
-@@ -306,14 +311,14 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
-
- if (dev_priv->swaps_pending > 0)
- drm_locked_tasklet(dev, i915_vblank_tasklet);
-- I915_WRITE(PIPEASTAT,
-- pipea_stats|I915_VBLANK_INTERRUPT_ENABLE|
-- PIPE_VBLANK_INTERRUPT_STATUS);
-- I915_WRITE(PIPEBSTAT,
-- pipeb_stats|I915_VBLANK_INTERRUPT_ENABLE|
-- PIPE_VBLANK_INTERRUPT_STATUS);
- }
-
-+ if (iir & I915_ASLE_INTERRUPT)
-+ opregion_asle_intr(dev);
-+
-+ if (iir & I915_DISPLAY_PIPE_B_EVENT_INTERRUPT)
-+ opregion_asle_intr(dev);
-+
- return IRQ_HANDLED;
- }
-
-@@ -661,10 +666,14 @@ void i915_driver_irq_postinstall(struct drm_device * dev)
- if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B)
- dev_priv->irq_mask_reg &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
-
-+ dev_priv->irq_mask_reg &= I915_INTERRUPT_ENABLE_MASK;
-+
- I915_WRITE(IMR, dev_priv->irq_mask_reg);
- I915_WRITE(IER, I915_INTERRUPT_ENABLE_MASK);
- (void) I915_READ(IER);
-
-+ opregion_enable_asle(dev);
-+
- DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
- }
-
-diff --git a/drivers/gpu/drm/i915/i915_opregion.c b/drivers/gpu/drm/i915/i915_opregion.c
-new file mode 100644
-index 0000000..1787a0c
---- /dev/null
-+++ b/drivers/gpu/drm/i915/i915_opregion.c
-@@ -0,0 +1,371 @@
-+/*
-+ * Copyright 2008 Intel Corporation <hong.liu@intel.com>
-+ * Copyright 2008 Red Hat <mjg@redhat.com>
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining
-+ * a copy of this software and associated documentation files (the
-+ * "Software"), to deal in the Software without restriction, including
-+ * without limitation the rights to use, copy, modify, merge, publish,
-+ * distribute, sub license, and/or sell copies of the Software, and to
-+ * permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice (including the
-+ * next paragraph) shall be included in all copies or substantial
-+ * portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-+ * NON-INFRINGEMENT. IN NO EVENT SHALL INTEL AND/OR ITS SUPPLIERS BE
-+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
-+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-+ * SOFTWARE.
-+ *
-+ */
-+
-+#include <linux/acpi.h>
-+
-+#include "drmP.h"
-+#include "i915_drm.h"
-+#include "i915_drv.h"
-+
-+#define PCI_ASLE 0xe4
-+#define PCI_LBPC 0xf4
-+#define PCI_ASLS 0xfc
-+
-+#define OPREGION_SZ (8*1024)
-+#define OPREGION_HEADER_OFFSET 0
-+#define OPREGION_ACPI_OFFSET 0x100
-+#define OPREGION_SWSCI_OFFSET 0x200
-+#define OPREGION_ASLE_OFFSET 0x300
-+#define OPREGION_VBT_OFFSET 0x1000
-+
-+#define OPREGION_SIGNATURE "IntelGraphicsMem"
-+#define MBOX_ACPI (1<<0)
-+#define MBOX_SWSCI (1<<1)
-+#define MBOX_ASLE (1<<2)
-+
-+struct opregion_header {
-+ u8 signature[16];
-+ u32 size;
-+ u32 opregion_ver;
-+ u8 bios_ver[32];
-+ u8 vbios_ver[16];
-+ u8 driver_ver[16];
-+ u32 mboxes;
-+ u8 reserved[164];
-+} __attribute__((packed));
-+
-+/* OpRegion mailbox #1: public ACPI methods */
-+struct opregion_acpi {
-+ u32 drdy; /* driver readiness */
-+ u32 csts; /* notification status */
-+ u32 cevt; /* current event */
-+ u8 rsvd1[20];
-+ u32 didl[8]; /* supported display devices ID list */
-+ u32 cpdl[8]; /* currently presented display list */
-+ u32 cadl[8]; /* currently active display list */
-+ u32 nadl[8]; /* next active devices list */
-+ u32 aslp; /* ASL sleep time-out */
-+ u32 tidx; /* toggle table index */
-+ u32 chpd; /* current hotplug enable indicator */
-+ u32 clid; /* current lid state*/
-+ u32 cdck; /* current docking state */
-+ u32 sxsw; /* Sx state resume */
-+ u32 evts; /* ASL supported events */
-+ u32 cnot; /* current OS notification */
-+ u32 nrdy; /* driver status */
-+ u8 rsvd2[60];
-+} __attribute__((packed));
-+
-+/* OpRegion mailbox #2: SWSCI */
-+struct opregion_swsci {
-+ u32 scic; /* SWSCI command|status|data */
-+ u32 parm; /* command parameters */
-+ u32 dslp; /* driver sleep time-out */
-+ u8 rsvd[244];
-+} __attribute__((packed));
-+
-+/* OpRegion mailbox #3: ASLE */
-+struct opregion_asle {
-+ u32 ardy; /* driver readiness */
-+ u32 aslc; /* ASLE interrupt command */
-+ u32 tche; /* technology enabled indicator */
-+ u32 alsi; /* current ALS illuminance reading */
-+ u32 bclp; /* backlight brightness to set */
-+ u32 pfit; /* panel fitting state */
-+ u32 cblv; /* current brightness level */
-+ u16 bclm[20]; /* backlight level duty cycle mapping table */
-+ u32 cpfm; /* current panel fitting mode */
-+ u32 epfm; /* enabled panel fitting modes */
-+ u8 plut[74]; /* panel LUT and identifier */
-+ u32 pfmb; /* PWM freq and min brightness */
-+ u8 rsvd[102];
-+} __attribute__((packed));
-+
-+/* ASLE irq request bits */
-+#define ASLE_SET_ALS_ILLUM (1 << 0)
-+#define ASLE_SET_BACKLIGHT (1 << 1)
-+#define ASLE_SET_PFIT (1 << 2)
-+#define ASLE_SET_PWM_FREQ (1 << 3)
-+#define ASLE_REQ_MSK 0xf
-+
-+/* response bits of ASLE irq request */
-+#define ASLE_ALS_ILLUM_FAIL (2<<10)
-+#define ASLE_BACKLIGHT_FAIL (2<<12)
-+#define ASLE_PFIT_FAIL (2<<14)
-+#define ASLE_PWM_FREQ_FAIL (2<<16)
-+
-+/* ASLE backlight brightness to set */
-+#define ASLE_BCLP_VALID (1<<31)
-+#define ASLE_BCLP_MSK (~(1<<31))
-+
-+/* ASLE panel fitting request */
-+#define ASLE_PFIT_VALID (1<<31)
-+#define ASLE_PFIT_CENTER (1<<0)
-+#define ASLE_PFIT_STRETCH_TEXT (1<<1)
-+#define ASLE_PFIT_STRETCH_GFX (1<<2)
-+
-+/* PWM frequency and minimum brightness */
-+#define ASLE_PFMB_BRIGHTNESS_MASK (0xff)
-+#define ASLE_PFMB_BRIGHTNESS_VALID (1<<8)
-+#define ASLE_PFMB_PWM_MASK (0x7ffffe00)
-+#define ASLE_PFMB_PWM_VALID (1<<31)
-+
-+#define ASLE_CBLV_VALID (1<<31)
-+
-+static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
-+{
-+ struct drm_i915_private *dev_priv = dev->dev_private;
-+ struct opregion_asle *asle = dev_priv->opregion.asle;
-+ u32 blc_pwm_ctl, blc_pwm_ctl2;
-+
-+ if (!(bclp & ASLE_BCLP_VALID))
-+ return ASLE_BACKLIGHT_FAIL;
-+
-+ bclp &= ASLE_BCLP_MSK;
-+ if (bclp < 0 || bclp > 255)
-+ return ASLE_BACKLIGHT_FAIL;
-+
-+ blc_pwm_ctl = I915_READ(BLC_PWM_CTL);
-+ blc_pwm_ctl &= ~BACKLIGHT_DUTY_CYCLE_MASK;
-+ blc_pwm_ctl2 = I915_READ(BLC_PWM_CTL2);
-+
-+ if (blc_pwm_ctl2 & BLM_COMBINATION_MODE)
-+ pci_write_config_dword(dev->pdev, PCI_LBPC, bclp);
-+ else
-+ I915_WRITE(BLC_PWM_CTL, blc_pwm_ctl | ((bclp * 0x101)-1));
-+
-+ asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID;
-+
-+ return 0;
-+}
-+
-+static u32 asle_set_als_illum(struct drm_device *dev, u32 alsi)
-+{
-+ /* alsi is the current ALS reading in lux. 0 indicates below sensor
-+ range, 0xffff indicates above sensor range. 1-0xfffe are valid */
-+ return 0;
-+}
-+
-+static u32 asle_set_pwm_freq(struct drm_device *dev, u32 pfmb)
-+{
-+ struct drm_i915_private *dev_priv = dev->dev_private;
-+ if (pfmb & ASLE_PFMB_PWM_VALID) {
-+ u32 blc_pwm_ctl = I915_READ(BLC_PWM_CTL);
-+ u32 pwm = pfmb & ASLE_PFMB_PWM_MASK;
-+ blc_pwm_ctl &= BACKLIGHT_DUTY_CYCLE_MASK;
-+ pwm = pwm >> 9;
-+ /* FIXME - what do we do with the PWM? */
-+ }
-+ return 0;
-+}
-+
-+static u32 asle_set_pfit(struct drm_device *dev, u32 pfit)
-+{
-+ /* Panel fitting is currently controlled by the X code, so this is a
-+ noop until modesetting support works fully */
-+ if (!(pfit & ASLE_PFIT_VALID))
-+ return ASLE_PFIT_FAIL;
-+ return 0;
-+}
-+
-+void opregion_asle_intr(struct drm_device *dev)
-+{
-+ struct drm_i915_private *dev_priv = dev->dev_private;
-+ struct opregion_asle *asle = dev_priv->opregion.asle;
-+ u32 asle_stat = 0;
-+ u32 asle_req;
-+
-+ if (!asle)
-+ return;
-+
-+ asle_req = asle->aslc & ASLE_REQ_MSK;
-+
-+ if (!asle_req) {
-+ DRM_DEBUG("non asle set request??\n");
-+ return;
-+ }
-+
-+ if (asle_req & ASLE_SET_ALS_ILLUM)
-+ asle_stat |= asle_set_als_illum(dev, asle->alsi);
-+
-+ if (asle_req & ASLE_SET_BACKLIGHT)
-+ asle_stat |= asle_set_backlight(dev, asle->bclp);
-+
-+ if (asle_req & ASLE_SET_PFIT)
-+ asle_stat |= asle_set_pfit(dev, asle->pfit);
-+
-+ if (asle_req & ASLE_SET_PWM_FREQ)
-+ asle_stat |= asle_set_pwm_freq(dev, asle->pfmb);
-+
-+ asle->aslc = asle_stat;
-+}
-+
-+#define ASLE_ALS_EN (1<<0)
-+#define ASLE_BLC_EN (1<<1)
-+#define ASLE_PFIT_EN (1<<2)
-+#define ASLE_PFMB_EN (1<<3)
-+
-+void opregion_enable_asle(struct drm_device *dev)
-+{
-+ struct drm_i915_private *dev_priv = dev->dev_private;
-+ struct opregion_asle *asle = dev_priv->opregion.asle;
-+
-+ if (asle) {
-+ u32 pipeb_stats = I915_READ(PIPEBSTAT);
-+ if (IS_MOBILE(dev)) {
-+ /* Many devices trigger events with a write to the
-+ legacy backlight controller, so we need to ensure
-+ that it's able to generate interrupts */
-+ I915_WRITE(PIPEBSTAT, pipeb_stats |=
-+ I915_LEGACY_BLC_EVENT_ENABLE);
-+ i915_enable_irq(dev_priv, I915_ASLE_INTERRUPT |
-+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT);
-+ } else
-+ i915_enable_irq(dev_priv, I915_ASLE_INTERRUPT);
-+
-+ asle->tche = ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN |
-+ ASLE_PFMB_EN;
-+ asle->ardy = 1;
-+ }
-+}
-+
-+#define ACPI_EV_DISPLAY_SWITCH (1<<0)
-+#define ACPI_EV_LID (1<<1)
-+#define ACPI_EV_DOCK (1<<2)
-+
-+static struct intel_opregion *system_opregion;
-+
-+int intel_opregion_video_event(struct notifier_block *nb, unsigned long val,
-+ void *data)
-+{
-+ /* The only video events relevant to opregion are 0x80. These indicate
-+ either a docking event, lid switch or display switch request. In
-+ Linux, these are handled by the dock, button and video drivers.
-+ We might want to fix the video driver to be opregion-aware in
-+ future, but right now we just indicate to the firmware that the
-+ request has been handled */
-+
-+ struct opregion_acpi *acpi;
-+
-+ if (!system_opregion)
-+ return NOTIFY_DONE;
-+
-+ acpi = system_opregion->acpi;
-+ acpi->csts = 0;
-+
-+ return NOTIFY_OK;
-+}
-+
-+static struct notifier_block intel_opregion_notifier = {
-+ .notifier_call = intel_opregion_video_event,
-+};
-+
-+int intel_opregion_init(struct drm_device *dev)
-+{
-+ struct drm_i915_private *dev_priv = dev->dev_private;
-+ struct intel_opregion *opregion = &dev_priv->opregion;
-+ void *base;
-+ u32 asls, mboxes;
-+ int err = 0;
-+
-+ pci_read_config_dword(dev->pdev, PCI_ASLS, &asls);
-+ DRM_DEBUG("graphic opregion physical addr: 0x%x\n", asls);
-+ if (asls == 0) {
-+ DRM_DEBUG("ACPI OpRegion not supported!\n");
-+ return -ENOTSUPP;
-+ }
-+
-+ base = ioremap(asls, OPREGION_SZ);
-+ if (!base)
-+ return -ENOMEM;
-+
-+ opregion->header = base;
-+ if (memcmp(opregion->header->signature, OPREGION_SIGNATURE, 16)) {
-+ DRM_DEBUG("opregion signature mismatch\n");
-+ err = -EINVAL;
-+ goto err_out;
-+ }
-+
-+ mboxes = opregion->header->mboxes;
-+ if (mboxes & MBOX_ACPI) {
-+ DRM_DEBUG("Public ACPI methods supported\n");
-+ opregion->acpi = base + OPREGION_ACPI_OFFSET;
-+ } else {
-+ DRM_DEBUG("Public ACPI methods not supported\n");
-+ err = -ENOTSUPP;
-+ goto err_out;
-+ }
-+ opregion->enabled = 1;
-+
-+ if (mboxes & MBOX_SWSCI) {
-+ DRM_DEBUG("SWSCI supported\n");
-+ opregion->swsci = base + OPREGION_SWSCI_OFFSET;
-+ }
-+ if (mboxes & MBOX_ASLE) {
-+ DRM_DEBUG("ASLE supported\n");
-+ opregion->asle = base + OPREGION_ASLE_OFFSET;
-+ }
-+
-+ /* Notify BIOS we are ready to handle ACPI video ext notifs.
-+ * Right now, all the events are handled by the ACPI video module.
-+ * We don't actually need to do anything with them. */
-+ opregion->acpi->csts = 0;
-+ opregion->acpi->drdy = 1;
-+
-+ system_opregion = opregion;
-+ register_acpi_notifier(&intel_opregion_notifier);
-+
-+ return 0;
-+
-+err_out:
-+ iounmap(opregion->header);
-+ opregion->header = NULL;
-+ return err;
-+}
-+
-+void intel_opregion_free(struct drm_device *dev)
-+{
-+ struct drm_i915_private *dev_priv = dev->dev_private;
-+ struct intel_opregion *opregion = &dev_priv->opregion;
-+
-+ if (!opregion->enabled)
-+ return;
-+
-+ opregion->acpi->drdy = 0;
-+
-+ system_opregion = NULL;
-+ unregister_acpi_notifier(&intel_opregion_notifier);
-+
-+ /* just clear all opregion memory pointers now */
-+ iounmap(opregion->header);
-+ opregion->header = NULL;
-+ opregion->acpi = NULL;
-+ opregion->swsci = NULL;
-+ opregion->asle = NULL;
-+
-+ opregion->enabled = 0;
-+}
-diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
-index 477c64e..43ad2cb 100644
---- a/drivers/gpu/drm/i915/i915_reg.h
-+++ b/drivers/gpu/drm/i915/i915_reg.h
-@@ -740,6 +740,7 @@
- #define BLC_PWM_CTL 0x61254
- #define BACKLIGHT_MODULATION_FREQ_SHIFT (17)
- #define BLC_PWM_CTL2 0x61250 /* 965+ only */
-+#define BLM_COMBINATION_MODE (1 << 30)
- /*
- * This is the most significant 15 bits of the number of backlight cycles in a
- * complete cycle of the modulated backlight control.
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/0009-drm-fix-sysfs-error-path.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/0009-drm-fix-sysfs-error-path.patch
deleted file mode 100644
index 8dea82480..000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/0009-drm-fix-sysfs-error-path.patch
+++ /dev/null
@@ -1,23 +0,0 @@
-commit 2e9c9eedfe0be777c051a2198dddf459adcc407b
-Author: Dave Airlie <airlied@redhat.com>
-Date: Tue Sep 2 10:06:06 2008 +1000
-
- drm: fix sysfs error path.
-
- Pointed out by Roel Kluin on dri-devel.
-
- Signed-off-by: Dave Airlie <airlied@redhat.com>
-
-diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
-index af211a0..1611b9b 100644
---- a/drivers/gpu/drm/drm_sysfs.c
-+++ b/drivers/gpu/drm/drm_sysfs.c
-@@ -184,7 +184,7 @@ int drm_sysfs_device_add(struct drm_minor *minor)
- err_out_files:
- if (i > 0)
- for (j = 0; j < i; j++)
-- device_remove_file(&minor->kdev, &device_attrs[i]);
-+ device_remove_file(&minor->kdev, &device_attrs[j]);
- device_unregister(&minor->kdev);
- err_out:
-
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/0010-i915-separate-suspend-resume-functions.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/0010-i915-separate-suspend-resume-functions.patch
deleted file mode 100644
index 897d50c39..000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/0010-i915-separate-suspend-resume-functions.patch
+++ /dev/null
@@ -1,1079 +0,0 @@
-commit a850828c640735fb410c782717c9eb7f8474e356
-Author: Jesse Barnes <jbarnes@virtuousgeek.org>
-Date: Mon Aug 25 15:11:06 2008 -0700
-
- separate i915 suspend/resume functions into their own file
-
- [Patch against drm-next. Consider this a trial balloon for our new Linux
- development model.]
-
- This is a big chunk of code. Separating it out makes it easier to change
- without churn on the main i915_drv.c file (and there will be churn as we
- fix bugs and add things like kernel mode setting). Also makes it easier
- to share this file with BSD.
-
- Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
- Signed-off-by: Dave Airlie <airlied@redhat.com>
-
-diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
-index b032808..c4bbda6 100644
---- a/drivers/gpu/drm/i915/Makefile
-+++ b/drivers/gpu/drm/i915/Makefile
-@@ -3,7 +3,8 @@
- # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
-
- ccflags-y := -Iinclude/drm
--i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_opregion.o
-+i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_opregion.o \
-+ i915_suspend.o
-
- i915-$(CONFIG_COMPAT) += i915_ioc32.o
-
-diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
-index d95eca2..eff66ed 100644
---- a/drivers/gpu/drm/i915/i915_drv.c
-+++ b/drivers/gpu/drm/i915/i915_drv.c
-@@ -38,211 +38,9 @@ static struct pci_device_id pciidlist[] = {
- i915_PCI_IDS
- };
-
--enum pipe {
-- PIPE_A = 0,
-- PIPE_B,
--};
--
--static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
--{
-- struct drm_i915_private *dev_priv = dev->dev_private;
--
-- if (pipe == PIPE_A)
-- return (I915_READ(DPLL_A) & DPLL_VCO_ENABLE);
-- else
-- return (I915_READ(DPLL_B) & DPLL_VCO_ENABLE);
--}
--
--static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
--{
-- struct drm_i915_private *dev_priv = dev->dev_private;
-- unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B);
-- u32 *array;
-- int i;
--
-- if (!i915_pipe_enabled(dev, pipe))
-- return;
--
-- if (pipe == PIPE_A)
-- array = dev_priv->save_palette_a;
-- else
-- array = dev_priv->save_palette_b;
--
-- for(i = 0; i < 256; i++)
-- array[i] = I915_READ(reg + (i << 2));
--}
--
--static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
--{
-- struct drm_i915_private *dev_priv = dev->dev_private;
-- unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B);
-- u32 *array;
-- int i;
--
-- if (!i915_pipe_enabled(dev, pipe))
-- return;
--
-- if (pipe == PIPE_A)
-- array = dev_priv->save_palette_a;
-- else
-- array = dev_priv->save_palette_b;
--
-- for(i = 0; i < 256; i++)
-- I915_WRITE(reg + (i << 2), array[i]);
--}
--
--static u8 i915_read_indexed(u16 index_port, u16 data_port, u8 reg)
--{
-- outb(reg, index_port);
-- return inb(data_port);
--}
--
--static u8 i915_read_ar(u16 st01, u8 reg, u16 palette_enable)
--{
-- inb(st01);
-- outb(palette_enable | reg, VGA_AR_INDEX);
-- return inb(VGA_AR_DATA_READ);
--}
--
--static void i915_write_ar(u8 st01, u8 reg, u8 val, u16 palette_enable)
--{
-- inb(st01);
-- outb(palette_enable | reg, VGA_AR_INDEX);
-- outb(val, VGA_AR_DATA_WRITE);
--}
--
--static void i915_write_indexed(u16 index_port, u16 data_port, u8 reg, u8 val)
--{
-- outb(reg, index_port);
-- outb(val, data_port);
--}
--
--static void i915_save_vga(struct drm_device *dev)
--{
-- struct drm_i915_private *dev_priv = dev->dev_private;
-- int i;
-- u16 cr_index, cr_data, st01;
--
-- /* VGA color palette registers */
-- dev_priv->saveDACMASK = inb(VGA_DACMASK);
-- /* DACCRX automatically increments during read */
-- outb(0, VGA_DACRX);
-- /* Read 3 bytes of color data from each index */
-- for (i = 0; i < 256 * 3; i++)
-- dev_priv->saveDACDATA[i] = inb(VGA_DACDATA);
--
-- /* MSR bits */
-- dev_priv->saveMSR = inb(VGA_MSR_READ);
-- if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) {
-- cr_index = VGA_CR_INDEX_CGA;
-- cr_data = VGA_CR_DATA_CGA;
-- st01 = VGA_ST01_CGA;
-- } else {
-- cr_index = VGA_CR_INDEX_MDA;
-- cr_data = VGA_CR_DATA_MDA;
-- st01 = VGA_ST01_MDA;
-- }
--
-- /* CRT controller regs */
-- i915_write_indexed(cr_index, cr_data, 0x11,
-- i915_read_indexed(cr_index, cr_data, 0x11) &
-- (~0x80));
-- for (i = 0; i <= 0x24; i++)
-- dev_priv->saveCR[i] =
-- i915_read_indexed(cr_index, cr_data, i);
-- /* Make sure we don't turn off CR group 0 writes */
-- dev_priv->saveCR[0x11] &= ~0x80;
--
-- /* Attribute controller registers */
-- inb(st01);
-- dev_priv->saveAR_INDEX = inb(VGA_AR_INDEX);
-- for (i = 0; i <= 0x14; i++)
-- dev_priv->saveAR[i] = i915_read_ar(st01, i, 0);
-- inb(st01);
-- outb(dev_priv->saveAR_INDEX, VGA_AR_INDEX);
-- inb(st01);
--
-- /* Graphics controller registers */
-- for (i = 0; i < 9; i++)
-- dev_priv->saveGR[i] =
-- i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, i);
--
-- dev_priv->saveGR[0x10] =
-- i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x10);
-- dev_priv->saveGR[0x11] =
-- i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x11);
-- dev_priv->saveGR[0x18] =
-- i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x18);
--
-- /* Sequencer registers */
-- for (i = 0; i < 8; i++)
-- dev_priv->saveSR[i] =
-- i915_read_indexed(VGA_SR_INDEX, VGA_SR_DATA, i);
--}
--
--static void i915_restore_vga(struct drm_device *dev)
--{
-- struct drm_i915_private *dev_priv = dev->dev_private;
-- int i;
-- u16 cr_index, cr_data, st01;
--
-- /* MSR bits */
-- outb(dev_priv->saveMSR, VGA_MSR_WRITE);
-- if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) {
-- cr_index = VGA_CR_INDEX_CGA;
-- cr_data = VGA_CR_DATA_CGA;
-- st01 = VGA_ST01_CGA;
-- } else {
-- cr_index = VGA_CR_INDEX_MDA;
-- cr_data = VGA_CR_DATA_MDA;
-- st01 = VGA_ST01_MDA;
-- }
--
-- /* Sequencer registers, don't write SR07 */
-- for (i = 0; i < 7; i++)
-- i915_write_indexed(VGA_SR_INDEX, VGA_SR_DATA, i,
-- dev_priv->saveSR[i]);
--
-- /* CRT controller regs */
-- /* Enable CR group 0 writes */
-- i915_write_indexed(cr_index, cr_data, 0x11, dev_priv->saveCR[0x11]);
-- for (i = 0; i <= 0x24; i++)
-- i915_write_indexed(cr_index, cr_data, i, dev_priv->saveCR[i]);
--
-- /* Graphics controller regs */
-- for (i = 0; i < 9; i++)
-- i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, i,
-- dev_priv->saveGR[i]);
--
-- i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x10,
-- dev_priv->saveGR[0x10]);
-- i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x11,
-- dev_priv->saveGR[0x11]);
-- i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x18,
-- dev_priv->saveGR[0x18]);
--
-- /* Attribute controller registers */
-- inb(st01);
-- for (i = 0; i <= 0x14; i++)
-- i915_write_ar(st01, i, dev_priv->saveAR[i], 0);
-- inb(st01); /* switch back to index mode */
-- outb(dev_priv->saveAR_INDEX | 0x20, VGA_AR_INDEX);
-- inb(st01);
--
-- /* VGA color palette registers */
-- outb(dev_priv->saveDACMASK, VGA_DACMASK);
-- /* DACCRX automatically increments during read */
-- outb(0, VGA_DACWX);
-- /* Read 3 bytes of color data from each index */
-- for (i = 0; i < 256 * 3; i++)
-- outb(dev_priv->saveDACDATA[i], VGA_DACDATA);
--
--}
--
- static int i915_suspend(struct drm_device *dev, pm_message_t state)
- {
- struct drm_i915_private *dev_priv = dev->dev_private;
-- int i;
-
- if (!dev || !dev_priv) {
- printk(KERN_ERR "dev: %p, dev_priv: %p\n", dev, dev_priv);
-@@ -254,122 +52,8 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
- return 0;
-
- pci_save_state(dev->pdev);
-- pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
--
-- /* Display arbitration control */
-- dev_priv->saveDSPARB = I915_READ(DSPARB);
--
-- /* Pipe & plane A info */
-- dev_priv->savePIPEACONF = I915_READ(PIPEACONF);
-- dev_priv->savePIPEASRC = I915_READ(PIPEASRC);
-- dev_priv->saveFPA0 = I915_READ(FPA0);
-- dev_priv->saveFPA1 = I915_READ(FPA1);
-- dev_priv->saveDPLL_A = I915_READ(DPLL_A);
-- if (IS_I965G(dev))
-- dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD);
-- dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A);
-- dev_priv->saveHBLANK_A = I915_READ(HBLANK_A);
-- dev_priv->saveHSYNC_A = I915_READ(HSYNC_A);
-- dev_priv->saveVTOTAL_A = I915_READ(VTOTAL_A);
-- dev_priv->saveVBLANK_A = I915_READ(VBLANK_A);
-- dev_priv->saveVSYNC_A = I915_READ(VSYNC_A);
-- dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
--
-- dev_priv->saveDSPACNTR = I915_READ(DSPACNTR);
-- dev_priv->saveDSPASTRIDE = I915_READ(DSPASTRIDE);
-- dev_priv->saveDSPASIZE = I915_READ(DSPASIZE);
-- dev_priv->saveDSPAPOS = I915_READ(DSPAPOS);
-- dev_priv->saveDSPAADDR = I915_READ(DSPAADDR);
-- if (IS_I965G(dev)) {
-- dev_priv->saveDSPASURF = I915_READ(DSPASURF);
-- dev_priv->saveDSPATILEOFF = I915_READ(DSPATILEOFF);
-- }
-- i915_save_palette(dev, PIPE_A);
-- dev_priv->savePIPEASTAT = I915_READ(PIPEASTAT);
--
-- /* Pipe & plane B info */
-- dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF);
-- dev_priv->savePIPEBSRC = I915_READ(PIPEBSRC);
-- dev_priv->saveFPB0 = I915_READ(FPB0);
-- dev_priv->saveFPB1 = I915_READ(FPB1);
-- dev_priv->saveDPLL_B = I915_READ(DPLL_B);
-- if (IS_I965G(dev))
-- dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD);
-- dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B);
-- dev_priv->saveHBLANK_B = I915_READ(HBLANK_B);
-- dev_priv->saveHSYNC_B = I915_READ(HSYNC_B);
-- dev_priv->saveVTOTAL_B = I915_READ(VTOTAL_B);
-- dev_priv->saveVBLANK_B = I915_READ(VBLANK_B);
-- dev_priv->saveVSYNC_B = I915_READ(VSYNC_B);
-- dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
--
-- dev_priv->saveDSPBCNTR = I915_READ(DSPBCNTR);
-- dev_priv->saveDSPBSTRIDE = I915_READ(DSPBSTRIDE);
-- dev_priv->saveDSPBSIZE = I915_READ(DSPBSIZE);
-- dev_priv->saveDSPBPOS = I915_READ(DSPBPOS);
-- dev_priv->saveDSPBADDR = I915_READ(DSPBADDR);
-- if (IS_I965GM(dev) || IS_IGD_GM(dev)) {
-- dev_priv->saveDSPBSURF = I915_READ(DSPBSURF);
-- dev_priv->saveDSPBTILEOFF = I915_READ(DSPBTILEOFF);
-- }
-- i915_save_palette(dev, PIPE_B);
-- dev_priv->savePIPEBSTAT = I915_READ(PIPEBSTAT);
--
-- /* CRT state */
-- dev_priv->saveADPA = I915_READ(ADPA);
-
-- /* LVDS state */
-- dev_priv->savePP_CONTROL = I915_READ(PP_CONTROL);
-- dev_priv->savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
-- dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
-- if (IS_I965G(dev))
-- dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
-- if (IS_MOBILE(dev) && !IS_I830(dev))
-- dev_priv->saveLVDS = I915_READ(LVDS);
-- if (!IS_I830(dev) && !IS_845G(dev))
-- dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
-- dev_priv->savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS);
-- dev_priv->savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS);
-- dev_priv->savePP_DIVISOR = I915_READ(PP_DIVISOR);
--
-- /* FIXME: save TV & SDVO state */
--
-- /* FBC state */
-- dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE);
-- dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE);
-- dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2);
-- dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL);
--
-- /* Interrupt state */
-- dev_priv->saveIIR = I915_READ(IIR);
-- dev_priv->saveIER = I915_READ(IER);
-- dev_priv->saveIMR = I915_READ(IMR);
--
-- /* VGA state */
-- dev_priv->saveVGA0 = I915_READ(VGA0);
-- dev_priv->saveVGA1 = I915_READ(VGA1);
-- dev_priv->saveVGA_PD = I915_READ(VGA_PD);
-- dev_priv->saveVGACNTRL = I915_READ(VGACNTRL);
--
-- /* Clock gating state */
-- dev_priv->saveD_STATE = I915_READ(D_STATE);
-- dev_priv->saveCG_2D_DIS = I915_READ(CG_2D_DIS);
--
-- /* Cache mode state */
-- dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
--
-- /* Memory Arbitration state */
-- dev_priv->saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);
--
-- /* Scratch space */
-- for (i = 0; i < 16; i++) {
-- dev_priv->saveSWF0[i] = I915_READ(SWF00 + (i << 2));
-- dev_priv->saveSWF1[i] = I915_READ(SWF10 + (i << 2));
-- }
-- for (i = 0; i < 3; i++)
-- dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2));
--
-- i915_save_vga(dev);
-+ i915_save_state(dev);
-
- intel_opregion_free(dev);
-
-@@ -384,155 +68,13 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
-
- static int i915_resume(struct drm_device *dev)
- {
-- struct drm_i915_private *dev_priv = dev->dev_private;
-- int i;
--
- pci_set_power_state(dev->pdev, PCI_D0);
- pci_restore_state(dev->pdev);
- if (pci_enable_device(dev->pdev))
- return -1;
- pci_set_master(dev->pdev);
-
-- pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB);
--
-- I915_WRITE(DSPARB, dev_priv->saveDSPARB);
--
-- /* Pipe & plane A info */
-- /* Prime the clock */
-- if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) {
-- I915_WRITE(DPLL_A, dev_priv->saveDPLL_A &
-- ~DPLL_VCO_ENABLE);
-- udelay(150);
-- }
-- I915_WRITE(FPA0, dev_priv->saveFPA0);
-- I915_WRITE(FPA1, dev_priv->saveFPA1);
-- /* Actually enable it */
-- I915_WRITE(DPLL_A, dev_priv->saveDPLL_A);
-- udelay(150);
-- if (IS_I965G(dev))
-- I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD);
-- udelay(150);
--
-- /* Restore mode */
-- I915_WRITE(HTOTAL_A, dev_priv->saveHTOTAL_A);
-- I915_WRITE(HBLANK_A, dev_priv->saveHBLANK_A);
-- I915_WRITE(HSYNC_A, dev_priv->saveHSYNC_A);
-- I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A);
-- I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A);
-- I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A);
-- I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A);
--
-- /* Restore plane info */
-- I915_WRITE(DSPASIZE, dev_priv->saveDSPASIZE);
-- I915_WRITE(DSPAPOS, dev_priv->saveDSPAPOS);
-- I915_WRITE(PIPEASRC, dev_priv->savePIPEASRC);
-- I915_WRITE(DSPAADDR, dev_priv->saveDSPAADDR);
-- I915_WRITE(DSPASTRIDE, dev_priv->saveDSPASTRIDE);
-- if (IS_I965G(dev)) {
-- I915_WRITE(DSPASURF, dev_priv->saveDSPASURF);
-- I915_WRITE(DSPATILEOFF, dev_priv->saveDSPATILEOFF);
-- }
--
-- I915_WRITE(PIPEACONF, dev_priv->savePIPEACONF);
--
-- i915_restore_palette(dev, PIPE_A);
-- /* Enable the plane */
-- I915_WRITE(DSPACNTR, dev_priv->saveDSPACNTR);
-- I915_WRITE(DSPAADDR, I915_READ(DSPAADDR));
--
-- /* Pipe & plane B info */
-- if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) {
-- I915_WRITE(DPLL_B, dev_priv->saveDPLL_B &
-- ~DPLL_VCO_ENABLE);
-- udelay(150);
-- }
-- I915_WRITE(FPB0, dev_priv->saveFPB0);
-- I915_WRITE(FPB1, dev_priv->saveFPB1);
-- /* Actually enable it */
-- I915_WRITE(DPLL_B, dev_priv->saveDPLL_B);
-- udelay(150);
-- if (IS_I965G(dev))
-- I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD);
-- udelay(150);
--
-- /* Restore mode */
-- I915_WRITE(HTOTAL_B, dev_priv->saveHTOTAL_B);
-- I915_WRITE(HBLANK_B, dev_priv->saveHBLANK_B);
-- I915_WRITE(HSYNC_B, dev_priv->saveHSYNC_B);
-- I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B);
-- I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B);
-- I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B);
-- I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B);
--
-- /* Restore plane info */
-- I915_WRITE(DSPBSIZE, dev_priv->saveDSPBSIZE);
-- I915_WRITE(DSPBPOS, dev_priv->saveDSPBPOS);
-- I915_WRITE(PIPEBSRC, dev_priv->savePIPEBSRC);
-- I915_WRITE(DSPBADDR, dev_priv->saveDSPBADDR);
-- I915_WRITE(DSPBSTRIDE, dev_priv->saveDSPBSTRIDE);
-- if (IS_I965G(dev)) {
-- I915_WRITE(DSPBSURF, dev_priv->saveDSPBSURF);
-- I915_WRITE(DSPBTILEOFF, dev_priv->saveDSPBTILEOFF);
-- }
--
-- I915_WRITE(PIPEBCONF, dev_priv->savePIPEBCONF);
--
-- i915_restore_palette(dev, PIPE_B);
-- /* Enable the plane */
-- I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR);
-- I915_WRITE(DSPBADDR, I915_READ(DSPBADDR));
--
-- /* CRT state */
-- I915_WRITE(ADPA, dev_priv->saveADPA);
--
-- /* LVDS state */
-- if (IS_I965G(dev))
-- I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2);
-- if (IS_MOBILE(dev) && !IS_I830(dev))
-- I915_WRITE(LVDS, dev_priv->saveLVDS);
-- if (!IS_I830(dev) && !IS_845G(dev))
-- I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL);
--
-- I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS);
-- I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL);
-- I915_WRITE(PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS);
-- I915_WRITE(PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS);
-- I915_WRITE(PP_DIVISOR, dev_priv->savePP_DIVISOR);
-- I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL);
--
-- /* FIXME: restore TV & SDVO state */
--
-- /* FBC info */
-- I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE);
-- I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE);
-- I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2);
-- I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL);
--
-- /* VGA state */
-- I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL);
-- I915_WRITE(VGA0, dev_priv->saveVGA0);
-- I915_WRITE(VGA1, dev_priv->saveVGA1);
-- I915_WRITE(VGA_PD, dev_priv->saveVGA_PD);
-- udelay(150);
--
-- /* Clock gating state */
-- I915_WRITE (D_STATE, dev_priv->saveD_STATE);
-- I915_WRITE(CG_2D_DIS, dev_priv->saveCG_2D_DIS);
--
-- /* Cache mode state */
-- I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
--
-- /* Memory arbitration state */
-- I915_WRITE (MI_ARB_STATE, dev_priv->saveMI_ARB_STATE | 0xffff0000);
--
-- for (i = 0; i < 16; i++) {
-- I915_WRITE(SWF00 + (i << 2), dev_priv->saveSWF0[i]);
-- I915_WRITE(SWF10 + (i << 2), dev_priv->saveSWF1[i+7]);
-- }
-- for (i = 0; i < 3; i++)
-- I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]);
--
-- i915_restore_vga(dev);
-+ i915_restore_state(dev);
-
- intel_opregion_init(dev);
-
-diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
-index e4bd01c..a82b487 100644
---- a/drivers/gpu/drm/i915/i915_drv.h
-+++ b/drivers/gpu/drm/i915/i915_drv.h
-@@ -41,6 +41,11 @@
- #define DRIVER_DESC "Intel Graphics"
- #define DRIVER_DATE "20060119"
-
-+enum pipe {
-+ PIPE_A = 0,
-+ PIPE_B,
-+};
-+
- /* Interface history:
- *
- * 1.1: Original.
-@@ -269,6 +274,10 @@ extern void i915_mem_takedown(struct mem_block **heap);
- extern void i915_mem_release(struct drm_device * dev,
- struct drm_file *file_priv, struct mem_block *heap);
-
-+/* i915_suspend.c */
-+extern int i915_save_state(struct drm_device *dev);
-+extern int i915_restore_state(struct drm_device *dev);
-+
- /* i915_opregion.c */
- extern int intel_opregion_init(struct drm_device *dev);
- extern void intel_opregion_free(struct drm_device *dev);
-@@ -279,6 +288,8 @@ extern void opregion_enable_asle(struct drm_device *dev);
- #define I915_WRITE(reg,val) DRM_WRITE32(dev_priv->mmio_map, (reg), (val))
- #define I915_READ16(reg) DRM_READ16(dev_priv->mmio_map, (reg))
- #define I915_WRITE16(reg,val) DRM_WRITE16(dev_priv->mmio_map, (reg), (val))
-+#define I915_READ8(reg) DRM_READ8(dev_priv->mmio_map, (reg))
-+#define I915_WRITE8(reg,val) DRM_WRITE8(dev_priv->mmio_map, (reg), (val))
-
- #define I915_VERBOSE 0
-
-diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
-new file mode 100644
-index 0000000..e0c1fe4
---- /dev/null
-+++ b/drivers/gpu/drm/i915/i915_suspend.c
-@@ -0,0 +1,509 @@
-+/*
-+ *
-+ * Copyright 2008 (c) Intel Corporation
-+ * Jesse Barnes <jbarnes@virtuousgeek.org>
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a
-+ * copy of this software and associated documentation files (the
-+ * "Software"), to deal in the Software without restriction, including
-+ * without limitation the rights to use, copy, modify, merge, publish,
-+ * distribute, sub license, and/or sell copies of the Software, and to
-+ * permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice (including the
-+ * next paragraph) shall be included in all copies or substantial portions
-+ * of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
-+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
-+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
-+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-+ */
-+
-+#include "drmP.h"
-+#include "drm.h"
-+#include "i915_drm.h"
-+#include "i915_drv.h"
-+
-+static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
-+{
-+ struct drm_i915_private *dev_priv = dev->dev_private;
-+
-+ if (pipe == PIPE_A)
-+ return (I915_READ(DPLL_A) & DPLL_VCO_ENABLE);
-+ else
-+ return (I915_READ(DPLL_B) & DPLL_VCO_ENABLE);
-+}
-+
-+static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
-+{
-+ struct drm_i915_private *dev_priv = dev->dev_private;
-+ unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B);
-+ u32 *array;
-+ int i;
-+
-+ if (!i915_pipe_enabled(dev, pipe))
-+ return;
-+
-+ if (pipe == PIPE_A)
-+ array = dev_priv->save_palette_a;
-+ else
-+ array = dev_priv->save_palette_b;
-+
-+ for(i = 0; i < 256; i++)
-+ array[i] = I915_READ(reg + (i << 2));
-+}
-+
-+static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
-+{
-+ struct drm_i915_private *dev_priv = dev->dev_private;
-+ unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B);
-+ u32 *array;
-+ int i;
-+
-+ if (!i915_pipe_enabled(dev, pipe))
-+ return;
-+
-+ if (pipe == PIPE_A)
-+ array = dev_priv->save_palette_a;
-+ else
-+ array = dev_priv->save_palette_b;
-+
-+ for(i = 0; i < 256; i++)
-+ I915_WRITE(reg + (i << 2), array[i]);
-+}
-+
-+static u8 i915_read_indexed(struct drm_device *dev, u16 index_port, u16 data_port, u8 reg)
-+{
-+ struct drm_i915_private *dev_priv = dev->dev_private;
-+
-+ I915_WRITE8(index_port, reg);
-+ return I915_READ8(data_port);
-+}
-+
-+static u8 i915_read_ar(struct drm_device *dev, u16 st01, u8 reg, u16 palette_enable)
-+{
-+ struct drm_i915_private *dev_priv = dev->dev_private;
-+
-+ I915_READ8(st01);
-+ I915_WRITE8(VGA_AR_INDEX, palette_enable | reg);
-+ return I915_READ8(VGA_AR_DATA_READ);
-+}
-+
-+static void i915_write_ar(struct drm_device *dev, u16 st01, u8 reg, u8 val, u16 palette_enable)
-+{
-+ struct drm_i915_private *dev_priv = dev->dev_private;
-+
-+ I915_READ8(st01);
-+ I915_WRITE8(VGA_AR_INDEX, palette_enable | reg);
-+ I915_WRITE8(VGA_AR_DATA_WRITE, val);
-+}
-+
-+static void i915_write_indexed(struct drm_device *dev, u16 index_port, u16 data_port, u8 reg, u8 val)
-+{
-+ struct drm_i915_private *dev_priv = dev->dev_private;
-+
-+ I915_WRITE8(index_port, reg);
-+ I915_WRITE8(data_port, val);
-+}
-+
-+static void i915_save_vga(struct drm_device *dev)
-+{
-+ struct drm_i915_private *dev_priv = dev->dev_private;
-+ int i;
-+ u16 cr_index, cr_data, st01;
-+
-+ /* VGA color palette registers */
-+ dev_priv->saveDACMASK = I915_READ8(VGA_DACMASK);
-+ /* DACCRX automatically increments during read */
-+ I915_WRITE8(VGA_DACRX, 0);
-+ /* Read 3 bytes of color data from each index */
-+ for (i = 0; i < 256 * 3; i++)
-+ dev_priv->saveDACDATA[i] = I915_READ8(VGA_DACDATA);
-+
-+ /* MSR bits */
-+ dev_priv->saveMSR = I915_READ8(VGA_MSR_READ);
-+ if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) {
-+ cr_index = VGA_CR_INDEX_CGA;
-+ cr_data = VGA_CR_DATA_CGA;
-+ st01 = VGA_ST01_CGA;
-+ } else {
-+ cr_index = VGA_CR_INDEX_MDA;
-+ cr_data = VGA_CR_DATA_MDA;
-+ st01 = VGA_ST01_MDA;
-+ }
-+
-+ /* CRT controller regs */
-+ i915_write_indexed(dev, cr_index, cr_data, 0x11,
-+ i915_read_indexed(dev, cr_index, cr_data, 0x11) &
-+ (~0x80));
-+ for (i = 0; i <= 0x24; i++)
-+ dev_priv->saveCR[i] =
-+ i915_read_indexed(dev, cr_index, cr_data, i);
-+ /* Make sure we don't turn off CR group 0 writes */
-+ dev_priv->saveCR[0x11] &= ~0x80;
-+
-+ /* Attribute controller registers */
-+ I915_READ8(st01);
-+ dev_priv->saveAR_INDEX = I915_READ8(VGA_AR_INDEX);
-+ for (i = 0; i <= 0x14; i++)
-+ dev_priv->saveAR[i] = i915_read_ar(dev, st01, i, 0);
-+ I915_READ8(st01);
-+ I915_WRITE8(VGA_AR_INDEX, dev_priv->saveAR_INDEX);
-+ I915_READ8(st01);
-+
-+ /* Graphics controller registers */
-+ for (i = 0; i < 9; i++)
-+ dev_priv->saveGR[i] =
-+ i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i);
-+
-+ dev_priv->saveGR[0x10] =
-+ i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10);
-+ dev_priv->saveGR[0x11] =
-+ i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11);
-+ dev_priv->saveGR[0x18] =
-+ i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18);
-+
-+ /* Sequencer registers */
-+ for (i = 0; i < 8; i++)
-+ dev_priv->saveSR[i] =
-+ i915_read_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i);
-+}
-+
-+static void i915_restore_vga(struct drm_device *dev)
-+{
-+ struct drm_i915_private *dev_priv = dev->dev_private;
-+ int i;
-+ u16 cr_index, cr_data, st01;
-+
-+ /* MSR bits */
-+ I915_WRITE8(VGA_MSR_WRITE, dev_priv->saveMSR);
-+ if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) {
-+ cr_index = VGA_CR_INDEX_CGA;
-+ cr_data = VGA_CR_DATA_CGA;
-+ st01 = VGA_ST01_CGA;
-+ } else {
-+ cr_index = VGA_CR_INDEX_MDA;
-+ cr_data = VGA_CR_DATA_MDA;
-+ st01 = VGA_ST01_MDA;
-+ }
-+
-+ /* Sequencer registers, don't write SR07 */
-+ for (i = 0; i < 7; i++)
-+ i915_write_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i,
-+ dev_priv->saveSR[i]);
-+
-+ /* CRT controller regs */
-+ /* Enable CR group 0 writes */
-+ i915_write_indexed(dev, cr_index, cr_data, 0x11, dev_priv->saveCR[0x11]);
-+ for (i = 0; i <= 0x24; i++)
-+ i915_write_indexed(dev, cr_index, cr_data, i, dev_priv->saveCR[i]);
-+
-+ /* Graphics controller regs */
-+ for (i = 0; i < 9; i++)
-+ i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i,
-+ dev_priv->saveGR[i]);
-+
-+ i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10,
-+ dev_priv->saveGR[0x10]);
-+ i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11,
-+ dev_priv->saveGR[0x11]);
-+ i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18,
-+ dev_priv->saveGR[0x18]);
-+
-+ /* Attribute controller registers */
-+ I915_READ8(st01); /* switch back to index mode */
-+ for (i = 0; i <= 0x14; i++)
-+ i915_write_ar(dev, st01, i, dev_priv->saveAR[i], 0);
-+ I915_READ8(st01); /* switch back to index mode */
-+ I915_WRITE8(VGA_AR_INDEX, dev_priv->saveAR_INDEX | 0x20);
-+ I915_READ8(st01);
-+
-+ /* VGA color palette registers */
-+ I915_WRITE8(VGA_DACMASK, dev_priv->saveDACMASK);
-+ /* DACCRX automatically increments during read */
-+ I915_WRITE8(VGA_DACWX, 0);
-+ /* Read 3 bytes of color data from each index */
-+ for (i = 0; i < 256 * 3; i++)
-+ I915_WRITE8(VGA_DACDATA, dev_priv->saveDACDATA[i]);
-+
-+}
-+
-+int i915_save_state(struct drm_device *dev)
-+{
-+ struct drm_i915_private *dev_priv = dev->dev_private;
-+ int i;
-+
-+ pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
-+
-+ /* Display arbitration control */
-+ dev_priv->saveDSPARB = I915_READ(DSPARB);
-+
-+ /* Pipe & plane A info */
-+ dev_priv->savePIPEACONF = I915_READ(PIPEACONF);
-+ dev_priv->savePIPEASRC = I915_READ(PIPEASRC);
-+ dev_priv->saveFPA0 = I915_READ(FPA0);
-+ dev_priv->saveFPA1 = I915_READ(FPA1);
-+ dev_priv->saveDPLL_A = I915_READ(DPLL_A);
-+ if (IS_I965G(dev))
-+ dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD);
-+ dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A);
-+ dev_priv->saveHBLANK_A = I915_READ(HBLANK_A);
-+ dev_priv->saveHSYNC_A = I915_READ(HSYNC_A);
-+ dev_priv->saveVTOTAL_A = I915_READ(VTOTAL_A);
-+ dev_priv->saveVBLANK_A = I915_READ(VBLANK_A);
-+ dev_priv->saveVSYNC_A = I915_READ(VSYNC_A);
-+ dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
-+
-+ dev_priv->saveDSPACNTR = I915_READ(DSPACNTR);
-+ dev_priv->saveDSPASTRIDE = I915_READ(DSPASTRIDE);
-+ dev_priv->saveDSPASIZE = I915_READ(DSPASIZE);
-+ dev_priv->saveDSPAPOS = I915_READ(DSPAPOS);
-+ dev_priv->saveDSPAADDR = I915_READ(DSPAADDR);
-+ if (IS_I965G(dev)) {
-+ dev_priv->saveDSPASURF = I915_READ(DSPASURF);
-+ dev_priv->saveDSPATILEOFF = I915_READ(DSPATILEOFF);
-+ }
-+ i915_save_palette(dev, PIPE_A);
-+ dev_priv->savePIPEASTAT = I915_READ(PIPEASTAT);
-+
-+ /* Pipe & plane B info */
-+ dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF);
-+ dev_priv->savePIPEBSRC = I915_READ(PIPEBSRC);
-+ dev_priv->saveFPB0 = I915_READ(FPB0);
-+ dev_priv->saveFPB1 = I915_READ(FPB1);
-+ dev_priv->saveDPLL_B = I915_READ(DPLL_B);
-+ if (IS_I965G(dev))
-+ dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD);
-+ dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B);
-+ dev_priv->saveHBLANK_B = I915_READ(HBLANK_B);
-+ dev_priv->saveHSYNC_B = I915_READ(HSYNC_B);
-+ dev_priv->saveVTOTAL_B = I915_READ(VTOTAL_B);
-+ dev_priv->saveVBLANK_B = I915_READ(VBLANK_B);
-+ dev_priv->saveVSYNC_B = I915_READ(VSYNC_B);
-+ dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
-+
-+ dev_priv->saveDSPBCNTR = I915_READ(DSPBCNTR);
-+ dev_priv->saveDSPBSTRIDE = I915_READ(DSPBSTRIDE);
-+ dev_priv->saveDSPBSIZE = I915_READ(DSPBSIZE);
-+ dev_priv->saveDSPBPOS = I915_READ(DSPBPOS);
-+ dev_priv->saveDSPBADDR = I915_READ(DSPBADDR);
-+ if (IS_I965GM(dev) || IS_IGD_GM(dev)) {
-+ dev_priv->saveDSPBSURF = I915_READ(DSPBSURF);
-+ dev_priv->saveDSPBTILEOFF = I915_READ(DSPBTILEOFF);
-+ }
-+ i915_save_palette(dev, PIPE_B);
-+ dev_priv->savePIPEBSTAT = I915_READ(PIPEBSTAT);
-+
-+ /* CRT state */
-+ dev_priv->saveADPA = I915_READ(ADPA);
-+
-+ /* LVDS state */
-+ dev_priv->savePP_CONTROL = I915_READ(PP_CONTROL);
-+ dev_priv->savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
-+ dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
-+ if (IS_I965G(dev))
-+ dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
-+ if (IS_MOBILE(dev) && !IS_I830(dev))
-+ dev_priv->saveLVDS = I915_READ(LVDS);
-+ if (!IS_I830(dev) && !IS_845G(dev))
-+ dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
-+ dev_priv->savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS);
-+ dev_priv->savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS);
-+ dev_priv->savePP_DIVISOR = I915_READ(PP_DIVISOR);
-+
-+ /* FIXME: save TV & SDVO state */
-+
-+ /* FBC state */
-+ dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE);
-+ dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE);
-+ dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2);
-+ dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL);
-+
-+ /* Interrupt state */
-+ dev_priv->saveIIR = I915_READ(IIR);
-+ dev_priv->saveIER = I915_READ(IER);
-+ dev_priv->saveIMR = I915_READ(IMR);
-+
-+ /* VGA state */
-+ dev_priv->saveVGA0 = I915_READ(VGA0);
-+ dev_priv->saveVGA1 = I915_READ(VGA1);
-+ dev_priv->saveVGA_PD = I915_READ(VGA_PD);
-+ dev_priv->saveVGACNTRL = I915_READ(VGACNTRL);
-+
-+ /* Clock gating state */
-+ dev_priv->saveD_STATE = I915_READ(D_STATE);
-+ dev_priv->saveCG_2D_DIS = I915_READ(CG_2D_DIS);
-+
-+ /* Cache mode state */
-+ dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
-+
-+ /* Memory Arbitration state */
-+ dev_priv->saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);
-+
-+ /* Scratch space */
-+ for (i = 0; i < 16; i++) {
-+ dev_priv->saveSWF0[i] = I915_READ(SWF00 + (i << 2));
-+ dev_priv->saveSWF1[i] = I915_READ(SWF10 + (i << 2));
-+ }
-+ for (i = 0; i < 3; i++)
-+ dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2));
-+
-+ i915_save_vga(dev);
-+
-+ return 0;
-+}
-+
-+int i915_restore_state(struct drm_device *dev)
-+{
-+ struct drm_i915_private *dev_priv = dev->dev_private;
-+ int i;
-+
-+ pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB);
-+
-+ I915_WRITE(DSPARB, dev_priv->saveDSPARB);
-+
-+ /* Pipe & plane A info */
-+ /* Prime the clock */
-+ if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) {
-+ I915_WRITE(DPLL_A, dev_priv->saveDPLL_A &
-+ ~DPLL_VCO_ENABLE);
-+ DRM_UDELAY(150);
-+ }
-+ I915_WRITE(FPA0, dev_priv->saveFPA0);
-+ I915_WRITE(FPA1, dev_priv->saveFPA1);
-+ /* Actually enable it */
-+ I915_WRITE(DPLL_A, dev_priv->saveDPLL_A);
-+ DRM_UDELAY(150);
-+ if (IS_I965G(dev))
-+ I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD);
-+ DRM_UDELAY(150);
-+
-+ /* Restore mode */
-+ I915_WRITE(HTOTAL_A, dev_priv->saveHTOTAL_A);
-+ I915_WRITE(HBLANK_A, dev_priv->saveHBLANK_A);
-+ I915_WRITE(HSYNC_A, dev_priv->saveHSYNC_A);
-+ I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A);
-+ I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A);
-+ I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A);
-+ I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A);
-+
-+ /* Restore plane info */
-+ I915_WRITE(DSPASIZE, dev_priv->saveDSPASIZE);
-+ I915_WRITE(DSPAPOS, dev_priv->saveDSPAPOS);
-+ I915_WRITE(PIPEASRC, dev_priv->savePIPEASRC);
-+ I915_WRITE(DSPAADDR, dev_priv->saveDSPAADDR);
-+ I915_WRITE(DSPASTRIDE, dev_priv->saveDSPASTRIDE);
-+ if (IS_I965G(dev)) {
-+ I915_WRITE(DSPASURF, dev_priv->saveDSPASURF);
-+ I915_WRITE(DSPATILEOFF, dev_priv->saveDSPATILEOFF);
-+ }
-+
-+ I915_WRITE(PIPEACONF, dev_priv->savePIPEACONF);
-+
-+ i915_restore_palette(dev, PIPE_A);
-+ /* Enable the plane */
-+ I915_WRITE(DSPACNTR, dev_priv->saveDSPACNTR);
-+ I915_WRITE(DSPAADDR, I915_READ(DSPAADDR));
-+
-+ /* Pipe & plane B info */
-+ if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) {
-+ I915_WRITE(DPLL_B, dev_priv->saveDPLL_B &
-+ ~DPLL_VCO_ENABLE);
-+ DRM_UDELAY(150);
-+ }
-+ I915_WRITE(FPB0, dev_priv->saveFPB0);
-+ I915_WRITE(FPB1, dev_priv->saveFPB1);
-+ /* Actually enable it */
-+ I915_WRITE(DPLL_B, dev_priv->saveDPLL_B);
-+ DRM_UDELAY(150);
-+ if (IS_I965G(dev))
-+ I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD);
-+ DRM_UDELAY(150);
-+
-+ /* Restore mode */
-+ I915_WRITE(HTOTAL_B, dev_priv->saveHTOTAL_B);
-+ I915_WRITE(HBLANK_B, dev_priv->saveHBLANK_B);
-+ I915_WRITE(HSYNC_B, dev_priv->saveHSYNC_B);
-+ I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B);
-+ I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B);
-+ I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B);
-+ I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B);
-+
-+ /* Restore plane info */
-+ I915_WRITE(DSPBSIZE, dev_priv->saveDSPBSIZE);
-+ I915_WRITE(DSPBPOS, dev_priv->saveDSPBPOS);
-+ I915_WRITE(PIPEBSRC, dev_priv->savePIPEBSRC);
-+ I915_WRITE(DSPBADDR, dev_priv->saveDSPBADDR);
-+ I915_WRITE(DSPBSTRIDE, dev_priv->saveDSPBSTRIDE);
-+ if (IS_I965G(dev)) {
-+ I915_WRITE(DSPBSURF, dev_priv->saveDSPBSURF);
-+ I915_WRITE(DSPBTILEOFF, dev_priv->saveDSPBTILEOFF);
-+ }
-+
-+ I915_WRITE(PIPEBCONF, dev_priv->savePIPEBCONF);
-+
-+ i915_restore_palette(dev, PIPE_B);
-+ /* Enable the plane */
-+ I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR);
-+ I915_WRITE(DSPBADDR, I915_READ(DSPBADDR));
-+
-+ /* CRT state */
-+ I915_WRITE(ADPA, dev_priv->saveADPA);
-+
-+ /* LVDS state */
-+ if (IS_I965G(dev))
-+ I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2);
-+ if (IS_MOBILE(dev) && !IS_I830(dev))
-+ I915_WRITE(LVDS, dev_priv->saveLVDS);
-+ if (!IS_I830(dev) && !IS_845G(dev))
-+ I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL);
-+
-+ I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS);
-+ I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL);
-+ I915_WRITE(PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS);
-+ I915_WRITE(PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS);
-+ I915_WRITE(PP_DIVISOR, dev_priv->savePP_DIVISOR);
-+ I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL);
-+
-+ /* FIXME: restore TV & SDVO state */
-+
-+ /* FBC info */
-+ I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE);
-+ I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE);
-+ I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2);
-+ I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL);
-+
-+ /* VGA state */
-+ I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL);
-+ I915_WRITE(VGA0, dev_priv->saveVGA0);
-+ I915_WRITE(VGA1, dev_priv->saveVGA1);
-+ I915_WRITE(VGA_PD, dev_priv->saveVGA_PD);
-+ DRM_UDELAY(150);
-+
-+ /* Clock gating state */
-+ I915_WRITE (D_STATE, dev_priv->saveD_STATE);
-+ I915_WRITE (CG_2D_DIS, dev_priv->saveCG_2D_DIS);
-+
-+ /* Cache mode state */
-+ I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
-+
-+ /* Memory arbitration state */
-+ I915_WRITE (MI_ARB_STATE, dev_priv->saveMI_ARB_STATE | 0xffff0000);
-+
-+ for (i = 0; i < 16; i++) {
-+ I915_WRITE(SWF00 + (i << 2), dev_priv->saveSWF0[i]);
-+ I915_WRITE(SWF10 + (i << 2), dev_priv->saveSWF1[i+7]);
-+ }
-+ for (i = 0; i < 3; i++)
-+ I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]);
-+
-+ i915_restore_vga(dev);
-+
-+ return 0;
-+}
-+
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/0011-drm-vblank-rework.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/0011-drm-vblank-rework.patch
deleted file mode 100644
index 6161a71f0..000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/0011-drm-vblank-rework.patch
+++ /dev/null
@@ -1,1534 +0,0 @@
-commit 2aebb4e4e62d09b4a95be7be7c24a7f6528385b7
-Author: Jesse Barnes <jbarnes@virtuousgeek.org>
-Date: Tue Sep 30 12:14:26 2008 -0700
-
- drm: Rework vblank-wait handling to allow interrupt reduction.
-
- Previously, drivers supporting vblank interrupt waits would run the interrupt
- all the time, or all the time that any 3d client was running, preventing the
- CPU from sleeping for long when the system was otherwise idle. Now, interrupts
- are disabled any time that no client is waiting on a vblank event. The new
- method uses vblank counters on the chipsets when the interrupts are turned
- off, rather than counting interrupts, so that we can continue to present
- accurate vblank numbers.
-
- Co-author: Michel Dänzer <michel@tungstengraphics.com>
- Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
- Signed-off-by: Eric Anholt <eric@anholt.net>
- Signed-off-by: Dave Airlie <airlied@redhat.com>
-
-diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
-index 452c2d8..fb45fe7 100644
---- a/drivers/gpu/drm/drm_drv.c
-+++ b/drivers/gpu/drm/drm_drv.c
-@@ -116,6 +116,8 @@ static struct drm_ioctl_desc drm_ioctls[] = {
-
- DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, 0),
-
-+ DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0),
-+
- DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- };
-
-diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
-index 61ed515..d0c13d9 100644
---- a/drivers/gpu/drm/drm_irq.c
-+++ b/drivers/gpu/drm/drm_irq.c
-@@ -71,19 +71,131 @@ int drm_irq_by_busid(struct drm_device *dev, void *data,
- return 0;
- }
-
-+static void vblank_disable_fn(unsigned long arg)
-+{
-+ struct drm_device *dev = (struct drm_device *)arg;
-+ unsigned long irqflags;
-+ int i;
-+
-+ if (!dev->vblank_disable_allowed)
-+ return;
-+
-+ for (i = 0; i < dev->num_crtcs; i++) {
-+ spin_lock_irqsave(&dev->vbl_lock, irqflags);
-+ if (atomic_read(&dev->vblank_refcount[i]) == 0 &&
-+ dev->vblank_enabled[i]) {
-+ DRM_DEBUG("disabling vblank on crtc %d\n", i);
-+ dev->last_vblank[i] =
-+ dev->driver->get_vblank_counter(dev, i);
-+ dev->driver->disable_vblank(dev, i);
-+ dev->vblank_enabled[i] = 0;
-+ }
-+ spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
-+ }
-+}
-+
-+static void drm_vblank_cleanup(struct drm_device *dev)
-+{
-+ /* Bail if the driver didn't call drm_vblank_init() */
-+ if (dev->num_crtcs == 0)
-+ return;
-+
-+ del_timer(&dev->vblank_disable_timer);
-+
-+ vblank_disable_fn((unsigned long)dev);
-+
-+ drm_free(dev->vbl_queue, sizeof(*dev->vbl_queue) * dev->num_crtcs,
-+ DRM_MEM_DRIVER);
-+ drm_free(dev->vbl_sigs, sizeof(*dev->vbl_sigs) * dev->num_crtcs,
-+ DRM_MEM_DRIVER);
-+ drm_free(dev->_vblank_count, sizeof(*dev->_vblank_count) *
-+ dev->num_crtcs, DRM_MEM_DRIVER);
-+ drm_free(dev->vblank_refcount, sizeof(*dev->vblank_refcount) *
-+ dev->num_crtcs, DRM_MEM_DRIVER);
-+ drm_free(dev->vblank_enabled, sizeof(*dev->vblank_enabled) *
-+ dev->num_crtcs, DRM_MEM_DRIVER);
-+ drm_free(dev->last_vblank, sizeof(*dev->last_vblank) * dev->num_crtcs,
-+ DRM_MEM_DRIVER);
-+ drm_free(dev->vblank_inmodeset, sizeof(*dev->vblank_inmodeset) *
-+ dev->num_crtcs, DRM_MEM_DRIVER);
-+
-+ dev->num_crtcs = 0;
-+}
-+
-+int drm_vblank_init(struct drm_device *dev, int num_crtcs)
-+{
-+ int i, ret = -ENOMEM;
-+
-+ setup_timer(&dev->vblank_disable_timer, vblank_disable_fn,
-+ (unsigned long)dev);
-+ spin_lock_init(&dev->vbl_lock);
-+ atomic_set(&dev->vbl_signal_pending, 0);
-+ dev->num_crtcs = num_crtcs;
-+
-+ dev->vbl_queue = drm_alloc(sizeof(wait_queue_head_t) * num_crtcs,
-+ DRM_MEM_DRIVER);
-+ if (!dev->vbl_queue)
-+ goto err;
-+
-+ dev->vbl_sigs = drm_alloc(sizeof(struct list_head) * num_crtcs,
-+ DRM_MEM_DRIVER);
-+ if (!dev->vbl_sigs)
-+ goto err;
-+
-+ dev->_vblank_count = drm_alloc(sizeof(atomic_t) * num_crtcs,
-+ DRM_MEM_DRIVER);
-+ if (!dev->_vblank_count)
-+ goto err;
-+
-+ dev->vblank_refcount = drm_alloc(sizeof(atomic_t) * num_crtcs,
-+ DRM_MEM_DRIVER);
-+ if (!dev->vblank_refcount)
-+ goto err;
-+
-+ dev->vblank_enabled = drm_calloc(num_crtcs, sizeof(int),
-+ DRM_MEM_DRIVER);
-+ if (!dev->vblank_enabled)
-+ goto err;
-+
-+ dev->last_vblank = drm_calloc(num_crtcs, sizeof(u32), DRM_MEM_DRIVER);
-+ if (!dev->last_vblank)
-+ goto err;
-+
-+ dev->vblank_inmodeset = drm_calloc(num_crtcs, sizeof(int),
-+ DRM_MEM_DRIVER);
-+ if (!dev->vblank_inmodeset)
-+ goto err;
-+
-+ /* Zero per-crtc vblank stuff */
-+ for (i = 0; i < num_crtcs; i++) {
-+ init_waitqueue_head(&dev->vbl_queue[i]);
-+ INIT_LIST_HEAD(&dev->vbl_sigs[i]);
-+ atomic_set(&dev->_vblank_count[i], 0);
-+ atomic_set(&dev->vblank_refcount[i], 0);
-+ }
-+
-+ dev->vblank_disable_allowed = 0;
-+
-+ return 0;
-+
-+err:
-+ drm_vblank_cleanup(dev);
-+ return ret;
-+}
-+EXPORT_SYMBOL(drm_vblank_init);
-+
- /**
- * Install IRQ handler.
- *
- * \param dev DRM device.
-- * \param irq IRQ number.
- *
-- * Initializes the IRQ related data, and setups drm_device::vbl_queue. Installs the handler, calling the driver
-+ * Initializes the IRQ related data. Installs the handler, calling the driver
- * \c drm_driver_irq_preinstall() and \c drm_driver_irq_postinstall() functions
- * before and after the installation.
- */
--static int drm_irq_install(struct drm_device * dev)
-+int drm_irq_install(struct drm_device *dev)
- {
-- int ret;
-+ int ret = 0;
- unsigned long sh_flags = 0;
-
- if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
-@@ -109,17 +221,6 @@ static int drm_irq_install(struct drm_device * dev)
-
- DRM_DEBUG("irq=%d\n", dev->pdev->irq);
-
-- if (drm_core_check_feature(dev, DRIVER_IRQ_VBL)) {
-- init_waitqueue_head(&dev->vbl_queue);
--
-- spin_lock_init(&dev->vbl_lock);
--
-- INIT_LIST_HEAD(&dev->vbl_sigs);
-- INIT_LIST_HEAD(&dev->vbl_sigs2);
--
-- dev->vbl_pending = 0;
-- }
--
- /* Before installing handler */
- dev->driver->irq_preinstall(dev);
-
-@@ -141,10 +242,16 @@ static int drm_irq_install(struct drm_device * dev)
- }
-
- /* After installing handler */
-- dev->driver->irq_postinstall(dev);
-+ ret = dev->driver->irq_postinstall(dev);
-+ if (ret < 0) {
-+ mutex_lock(&dev->struct_mutex);
-+ dev->irq_enabled = 0;
-+ mutex_unlock(&dev->struct_mutex);
-+ }
-
-- return 0;
-+ return ret;
- }
-+EXPORT_SYMBOL(drm_irq_install);
-
- /**
- * Uninstall the IRQ handler.
-@@ -174,11 +281,12 @@ int drm_irq_uninstall(struct drm_device * dev)
-
- free_irq(dev->pdev->irq, dev);
-
-+ drm_vblank_cleanup(dev);
-+
- dev->locked_tasklet_func = NULL;
-
- return 0;
- }
--
- EXPORT_SYMBOL(drm_irq_uninstall);
-
- /**
-@@ -218,6 +326,174 @@ int drm_control(struct drm_device *dev, void *data,
- }
-
- /**
-+ * drm_vblank_count - retrieve "cooked" vblank counter value
-+ * @dev: DRM device
-+ * @crtc: which counter to retrieve
-+ *
-+ * Fetches the "cooked" vblank count value that represents the number of
-+ * vblank events since the system was booted, including lost events due to
-+ * modesetting activity.
-+ */
-+u32 drm_vblank_count(struct drm_device *dev, int crtc)
-+{
-+ return atomic_read(&dev->_vblank_count[crtc]);
-+}
-+EXPORT_SYMBOL(drm_vblank_count);
-+
-+/**
-+ * drm_update_vblank_count - update the master vblank counter
-+ * @dev: DRM device
-+ * @crtc: counter to update
-+ *
-+ * Call back into the driver to update the appropriate vblank counter
-+ * (specified by @crtc). Deal with wraparound, if it occurred, and
-+ * update the last read value so we can deal with wraparound on the next
-+ * call if necessary.
-+ *
-+ * Only necessary when going from off->on, to account for frames we
-+ * didn't get an interrupt for.
-+ *
-+ * Note: caller must hold dev->vbl_lock since this reads & writes
-+ * device vblank fields.
-+ */
-+static void drm_update_vblank_count(struct drm_device *dev, int crtc)
-+{
-+ u32 cur_vblank, diff;
-+
-+ /*
-+ * Interrupts were disabled prior to this call, so deal with counter
-+ * wrap if needed.
-+ * NOTE! It's possible we lost a full dev->max_vblank_count events
-+ * here if the register is small or we had vblank interrupts off for
-+ * a long time.
-+ */
-+ cur_vblank = dev->driver->get_vblank_counter(dev, crtc);
-+ diff = cur_vblank - dev->last_vblank[crtc];
-+ if (cur_vblank < dev->last_vblank[crtc]) {
-+ diff += dev->max_vblank_count;
-+
-+ DRM_DEBUG("last_vblank[%d]=0x%x, cur_vblank=0x%x => diff=0x%x\n",
-+ crtc, dev->last_vblank[crtc], cur_vblank, diff);
-+ }
-+
-+ DRM_DEBUG("enabling vblank interrupts on crtc %d, missed %d\n",
-+ crtc, diff);
-+
-+ atomic_add(diff, &dev->_vblank_count[crtc]);
-+}
-+
-+/**
-+ * drm_vblank_get - get a reference count on vblank events
-+ * @dev: DRM device
-+ * @crtc: which CRTC to own
-+ *
-+ * Acquire a reference count on vblank events to avoid having them disabled
-+ * while in use.
-+ *
-+ * RETURNS
-+ * Zero on success, nonzero on failure.
-+ */
-+int drm_vblank_get(struct drm_device *dev, int crtc)
-+{
-+ unsigned long irqflags;
-+ int ret = 0;
-+
-+ spin_lock_irqsave(&dev->vbl_lock, irqflags);
-+ /* Going from 0->1 means we have to enable interrupts again */
-+ if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1 &&
-+ !dev->vblank_enabled[crtc]) {
-+ ret = dev->driver->enable_vblank(dev, crtc);
-+ DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", crtc, ret);
-+ if (ret)
-+ atomic_dec(&dev->vblank_refcount[crtc]);
-+ else {
-+ dev->vblank_enabled[crtc] = 1;
-+ drm_update_vblank_count(dev, crtc);
-+ }
-+ }
-+ spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
-+
-+ return ret;
-+}
-+EXPORT_SYMBOL(drm_vblank_get);
-+
-+/**
-+ * drm_vblank_put - give up ownership of vblank events
-+ * @dev: DRM device
-+ * @crtc: which counter to give up
-+ *
-+ * Release ownership of a given vblank counter, turning off interrupts
-+ * if possible.
-+ */
-+void drm_vblank_put(struct drm_device *dev, int crtc)
-+{
-+ /* Last user schedules interrupt disable */
-+ if (atomic_dec_and_test(&dev->vblank_refcount[crtc]))
-+ mod_timer(&dev->vblank_disable_timer, jiffies + 5*DRM_HZ);
-+}
-+EXPORT_SYMBOL(drm_vblank_put);
-+
-+/**
-+ * drm_modeset_ctl - handle vblank event counter changes across mode switch
-+ * @DRM_IOCTL_ARGS: standard ioctl arguments
-+ *
-+ * Applications should call the %_DRM_PRE_MODESET and %_DRM_POST_MODESET
-+ * ioctls around modesetting so that any lost vblank events are accounted for.
-+ *
-+ * Generally the counter will reset across mode sets. If interrupts are
-+ * enabled around this call, we don't have to do anything since the counter
-+ * will have already been incremented.
-+ */
-+int drm_modeset_ctl(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv)
-+{
-+ struct drm_modeset_ctl *modeset = data;
-+ unsigned long irqflags;
-+ int crtc, ret = 0;
-+
-+ /* If drm_vblank_init() hasn't been called yet, just no-op */
-+ if (!dev->num_crtcs)
-+ goto out;
-+
-+ crtc = modeset->crtc;
-+ if (crtc >= dev->num_crtcs) {
-+ ret = -EINVAL;
-+ goto out;
-+ }
-+
-+ /*
-+ * To avoid all the problems that might happen if interrupts
-+ * were enabled/disabled around or between these calls, we just
-+ * have the kernel take a reference on the CRTC (just once though
-+ * to avoid corrupting the count if multiple, mismatch calls occur),
-+ * so that interrupts remain enabled in the interim.
-+ */
-+ switch (modeset->cmd) {
-+ case _DRM_PRE_MODESET:
-+ if (!dev->vblank_inmodeset[crtc]) {
-+ dev->vblank_inmodeset[crtc] = 1;
-+ drm_vblank_get(dev, crtc);
-+ }
-+ break;
-+ case _DRM_POST_MODESET:
-+ if (dev->vblank_inmodeset[crtc]) {
-+ spin_lock_irqsave(&dev->vbl_lock, irqflags);
-+ dev->vblank_disable_allowed = 1;
-+ dev->vblank_inmodeset[crtc] = 0;
-+ spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
-+ drm_vblank_put(dev, crtc);
-+ }
-+ break;
-+ default:
-+ ret = -EINVAL;
-+ break;
-+ }
-+
-+out:
-+ return ret;
-+}
-+
-+/**
- * Wait for VBLANK.
- *
- * \param inode device inode.
-@@ -236,12 +512,12 @@ int drm_control(struct drm_device *dev, void *data,
- *
- * If a signal is not requested, then calls vblank_wait().
- */
--int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_priv)
-+int drm_wait_vblank(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv)
- {
- union drm_wait_vblank *vblwait = data;
-- struct timeval now;
- int ret = 0;
-- unsigned int flags, seq;
-+ unsigned int flags, seq, crtc;
-
- if ((!dev->pdev->irq) || (!dev->irq_enabled))
- return -EINVAL;
-@@ -255,13 +531,17 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_pr
- }
-
- flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK;
-+ crtc = flags & _DRM_VBLANK_SECONDARY ? 1 : 0;
-
-- if (!drm_core_check_feature(dev, (flags & _DRM_VBLANK_SECONDARY) ?
-- DRIVER_IRQ_VBL2 : DRIVER_IRQ_VBL))
-+ if (crtc >= dev->num_crtcs)
- return -EINVAL;
-
-- seq = atomic_read((flags & _DRM_VBLANK_SECONDARY) ? &dev->vbl_received2
-- : &dev->vbl_received);
-+ ret = drm_vblank_get(dev, crtc);
-+ if (ret) {
-+ DRM_ERROR("failed to acquire vblank counter, %d\n", ret);
-+ return ret;
-+ }
-+ seq = drm_vblank_count(dev, crtc);
-
- switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) {
- case _DRM_VBLANK_RELATIVE:
-@@ -270,7 +550,8 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_pr
- case _DRM_VBLANK_ABSOLUTE:
- break;
- default:
-- return -EINVAL;
-+ ret = -EINVAL;
-+ goto done;
- }
-
- if ((flags & _DRM_VBLANK_NEXTONMISS) &&
-@@ -280,8 +561,7 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_pr
-
- if (flags & _DRM_VBLANK_SIGNAL) {
- unsigned long irqflags;
-- struct list_head *vbl_sigs = (flags & _DRM_VBLANK_SECONDARY)
-- ? &dev->vbl_sigs2 : &dev->vbl_sigs;
-+ struct list_head *vbl_sigs = &dev->vbl_sigs[crtc];
- struct drm_vbl_sig *vbl_sig;
-
- spin_lock_irqsave(&dev->vbl_lock, irqflags);
-@@ -302,22 +582,29 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_pr
- }
- }
-
-- if (dev->vbl_pending >= 100) {
-+ if (atomic_read(&dev->vbl_signal_pending) >= 100) {
- spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
-- return -EBUSY;
-+ ret = -EBUSY;
-+ goto done;
- }
-
-- dev->vbl_pending++;
--
- spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
-
-- if (!
-- (vbl_sig =
-- drm_alloc(sizeof(struct drm_vbl_sig), DRM_MEM_DRIVER))) {
-- return -ENOMEM;
-+ vbl_sig = drm_calloc(1, sizeof(struct drm_vbl_sig),
-+ DRM_MEM_DRIVER);
-+ if (!vbl_sig) {
-+ ret = -ENOMEM;
-+ goto done;
-+ }
-+
-+ ret = drm_vblank_get(dev, crtc);
-+ if (ret) {
-+ drm_free(vbl_sig, sizeof(struct drm_vbl_sig),
-+ DRM_MEM_DRIVER);
-+ return ret;
- }
-
-- memset((void *)vbl_sig, 0, sizeof(*vbl_sig));
-+ atomic_inc(&dev->vbl_signal_pending);
-
- vbl_sig->sequence = vblwait->request.sequence;
- vbl_sig->info.si_signo = vblwait->request.signal;
-@@ -331,20 +618,29 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_pr
-
- vblwait->reply.sequence = seq;
- } else {
-- if (flags & _DRM_VBLANK_SECONDARY) {
-- if (dev->driver->vblank_wait2)
-- ret = dev->driver->vblank_wait2(dev, &vblwait->request.sequence);
-- } else if (dev->driver->vblank_wait)
-- ret =
-- dev->driver->vblank_wait(dev,
-- &vblwait->request.sequence);
--
-- do_gettimeofday(&now);
-- vblwait->reply.tval_sec = now.tv_sec;
-- vblwait->reply.tval_usec = now.tv_usec;
-+ DRM_DEBUG("waiting on vblank count %d, crtc %d\n",
-+ vblwait->request.sequence, crtc);
-+ DRM_WAIT_ON(ret, dev->vbl_queue[crtc], 3 * DRM_HZ,
-+ ((drm_vblank_count(dev, crtc)
-+ - vblwait->request.sequence) <= (1 << 23)));
-+
-+ if (ret != -EINTR) {
-+ struct timeval now;
-+
-+ do_gettimeofday(&now);
-+
-+ vblwait->reply.tval_sec = now.tv_sec;
-+ vblwait->reply.tval_usec = now.tv_usec;
-+ vblwait->reply.sequence = drm_vblank_count(dev, crtc);
-+ DRM_DEBUG("returning %d to client\n",
-+ vblwait->reply.sequence);
-+ } else {
-+ DRM_DEBUG("vblank wait interrupted by signal\n");
-+ }
- }
-
-- done:
-+done:
-+ drm_vblank_put(dev, crtc);
- return ret;
- }
-
-@@ -352,44 +648,57 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_pr
- * Send the VBLANK signals.
- *
- * \param dev DRM device.
-+ * \param crtc CRTC where the vblank event occurred
- *
- * Sends a signal for each task in drm_device::vbl_sigs and empties the list.
- *
- * If a signal is not requested, then calls vblank_wait().
- */
--void drm_vbl_send_signals(struct drm_device * dev)
-+static void drm_vbl_send_signals(struct drm_device *dev, int crtc)
- {
-+ struct drm_vbl_sig *vbl_sig, *tmp;
-+ struct list_head *vbl_sigs;
-+ unsigned int vbl_seq;
- unsigned long flags;
-- int i;
-
- spin_lock_irqsave(&dev->vbl_lock, flags);
-
-- for (i = 0; i < 2; i++) {
-- struct drm_vbl_sig *vbl_sig, *tmp;
-- struct list_head *vbl_sigs = i ? &dev->vbl_sigs2 : &dev->vbl_sigs;
-- unsigned int vbl_seq = atomic_read(i ? &dev->vbl_received2 :
-- &dev->vbl_received);
-+ vbl_sigs = &dev->vbl_sigs[crtc];
-+ vbl_seq = drm_vblank_count(dev, crtc);
-
-- list_for_each_entry_safe(vbl_sig, tmp, vbl_sigs, head) {
-- if ((vbl_seq - vbl_sig->sequence) <= (1 << 23)) {
-- vbl_sig->info.si_code = vbl_seq;
-- send_sig_info(vbl_sig->info.si_signo,
-- &vbl_sig->info, vbl_sig->task);
-+ list_for_each_entry_safe(vbl_sig, tmp, vbl_sigs, head) {
-+ if ((vbl_seq - vbl_sig->sequence) <= (1 << 23)) {
-+ vbl_sig->info.si_code = vbl_seq;
-+ send_sig_info(vbl_sig->info.si_signo,
-+ &vbl_sig->info, vbl_sig->task);
-
-- list_del(&vbl_sig->head);
--
-- drm_free(vbl_sig, sizeof(*vbl_sig),
-- DRM_MEM_DRIVER);
-+ list_del(&vbl_sig->head);
-
-- dev->vbl_pending--;
-- }
-- }
-+ drm_free(vbl_sig, sizeof(*vbl_sig),
-+ DRM_MEM_DRIVER);
-+ atomic_dec(&dev->vbl_signal_pending);
-+ drm_vblank_put(dev, crtc);
-+ }
- }
-
- spin_unlock_irqrestore(&dev->vbl_lock, flags);
- }
-
--EXPORT_SYMBOL(drm_vbl_send_signals);
-+/**
-+ * drm_handle_vblank - handle a vblank event
-+ * @dev: DRM device
-+ * @crtc: where this event occurred
-+ *
-+ * Drivers should call this routine in their vblank interrupt handlers to
-+ * update the vblank counter and send any signals that may be pending.
-+ */
-+void drm_handle_vblank(struct drm_device *dev, int crtc)
-+{
-+ atomic_inc(&dev->_vblank_count[crtc]);
-+ DRM_WAKEUP(&dev->vbl_queue[crtc]);
-+ drm_vbl_send_signals(dev, crtc);
-+}
-+EXPORT_SYMBOL(drm_handle_vblank);
-
- /**
- * Tasklet wrapper function.
-diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
-index cead62f..8609ec2 100644
---- a/drivers/gpu/drm/i915/i915_dma.c
-+++ b/drivers/gpu/drm/i915/i915_dma.c
-@@ -673,7 +673,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
-
- switch (param->param) {
- case I915_PARAM_IRQ_ACTIVE:
-- value = dev->irq_enabled;
-+ value = dev->pdev->irq ? 1 : 0;
- break;
- case I915_PARAM_ALLOW_BATCHBUFFER:
- value = dev_priv->allow_batchbuffer ? 1 : 0;
-@@ -808,7 +808,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
- * and the registers being closely associated.
- */
- if (!IS_I945G(dev) && !IS_I945GM(dev))
-- pci_enable_msi(dev->pdev);
-+ if (pci_enable_msi(dev->pdev))
-+ DRM_ERROR("failed to enable MSI\n");
-
- intel_opregion_init(dev);
-
-diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
-index eff66ed..37af03f 100644
---- a/drivers/gpu/drm/i915/i915_drv.c
-+++ b/drivers/gpu/drm/i915/i915_drv.c
-@@ -85,10 +85,8 @@ static struct drm_driver driver = {
- /* don't use mtrr's here, the Xserver or user space app should
- * deal with them for intel hardware.
- */
-- .driver_features =
-- DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/
-- DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL |
-- DRIVER_IRQ_VBL2,
-+ .driver_features = DRIVER_USE_AGP | DRIVER_REQUIRE_AGP |
-+ DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
- .load = i915_driver_load,
- .unload = i915_driver_unload,
- .lastclose = i915_driver_lastclose,
-@@ -96,8 +94,9 @@ static struct drm_driver driver = {
- .suspend = i915_suspend,
- .resume = i915_resume,
- .device_is_agp = i915_driver_device_is_agp,
-- .vblank_wait = i915_driver_vblank_wait,
-- .vblank_wait2 = i915_driver_vblank_wait2,
-+ .get_vblank_counter = i915_get_vblank_counter,
-+ .enable_vblank = i915_enable_vblank,
-+ .disable_vblank = i915_disable_vblank,
- .irq_preinstall = i915_driver_irq_preinstall,
- .irq_postinstall = i915_driver_irq_postinstall,
- .irq_uninstall = i915_driver_irq_uninstall,
-diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
-index 71326ca..d1a02be 100644
---- a/drivers/gpu/drm/i915/i915_drv.h
-+++ b/drivers/gpu/drm/i915/i915_drv.h
-@@ -83,10 +83,15 @@ struct mem_block {
- typedef struct _drm_i915_vbl_swap {
- struct list_head head;
- drm_drawable_t drw_id;
-- unsigned int pipe;
-+ unsigned int plane;
- unsigned int sequence;
- } drm_i915_vbl_swap_t;
-
-+struct opregion_header;
-+struct opregion_acpi;
-+struct opregion_swsci;
-+struct opregion_asle;
-+
- struct intel_opregion {
- struct opregion_header *header;
- struct opregion_acpi *acpi;
-@@ -105,7 +110,7 @@ typedef struct drm_i915_private {
- drm_dma_handle_t *status_page_dmah;
- void *hw_status_page;
- dma_addr_t dma_status_page;
-- unsigned long counter;
-+ uint32_t counter;
- unsigned int status_gfx_addr;
- drm_local_map_t hws_map;
-
-@@ -247,16 +252,17 @@ extern int i915_irq_emit(struct drm_device *dev, void *data,
- extern int i915_irq_wait(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-
--extern int i915_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence);
--extern int i915_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence);
- extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
- extern void i915_driver_irq_preinstall(struct drm_device * dev);
--extern void i915_driver_irq_postinstall(struct drm_device * dev);
-+extern int i915_driver_irq_postinstall(struct drm_device *dev);
- extern void i915_driver_irq_uninstall(struct drm_device * dev);
- extern int i915_vblank_pipe_set(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
- extern int i915_vblank_pipe_get(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-+extern int i915_enable_vblank(struct drm_device *dev, int crtc);
-+extern void i915_disable_vblank(struct drm_device *dev, int crtc);
-+extern u32 i915_get_vblank_counter(struct drm_device *dev, int crtc);
- extern int i915_vblank_swap(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
- extern void i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask);
-@@ -278,6 +284,10 @@ extern void i915_mem_release(struct drm_device * dev,
- extern int i915_save_state(struct drm_device *dev);
- extern int i915_restore_state(struct drm_device *dev);
-
-+/* i915_suspend.c */
-+extern int i915_save_state(struct drm_device *dev);
-+extern int i915_restore_state(struct drm_device *dev);
-+
- /* i915_opregion.c */
- extern int intel_opregion_init(struct drm_device *dev);
- extern void intel_opregion_free(struct drm_device *dev);
-diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
-index ae7d3a8..f875959 100644
---- a/drivers/gpu/drm/i915/i915_irq.c
-+++ b/drivers/gpu/drm/i915/i915_irq.c
-@@ -35,9 +35,8 @@
-
- /** These are the interrupts used by the driver */
- #define I915_INTERRUPT_ENABLE_MASK (I915_USER_INTERRUPT | \
-- I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | \
-- I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT | \
- I915_ASLE_INTERRUPT | \
-+ I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \
- I915_DISPLAY_PIPE_B_EVENT_INTERRUPT)
-
- void
-@@ -61,6 +60,64 @@ i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
- }
-
- /**
-+ * i915_get_pipe - return the the pipe associated with a given plane
-+ * @dev: DRM device
-+ * @plane: plane to look for
-+ *
-+ * The Intel Mesa & 2D drivers call the vblank routines with a plane number
-+ * rather than a pipe number, since they may not always be equal. This routine
-+ * maps the given @plane back to a pipe number.
-+ */
-+static int
-+i915_get_pipe(struct drm_device *dev, int plane)
-+{
-+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-+ u32 dspcntr;
-+
-+ dspcntr = plane ? I915_READ(DSPBCNTR) : I915_READ(DSPACNTR);
-+
-+ return dspcntr & DISPPLANE_SEL_PIPE_MASK ? 1 : 0;
-+}
-+
-+/**
-+ * i915_get_plane - return the the plane associated with a given pipe
-+ * @dev: DRM device
-+ * @pipe: pipe to look for
-+ *
-+ * The Intel Mesa & 2D drivers call the vblank routines with a plane number
-+ * rather than a plane number, since they may not always be equal. This routine
-+ * maps the given @pipe back to a plane number.
-+ */
-+static int
-+i915_get_plane(struct drm_device *dev, int pipe)
-+{
-+ if (i915_get_pipe(dev, 0) == pipe)
-+ return 0;
-+ return 1;
-+}
-+
-+/**
-+ * i915_pipe_enabled - check if a pipe is enabled
-+ * @dev: DRM device
-+ * @pipe: pipe to check
-+ *
-+ * Reading certain registers when the pipe is disabled can hang the chip.
-+ * Use this routine to make sure the PLL is running and the pipe is active
-+ * before reading such registers if unsure.
-+ */
-+static int
-+i915_pipe_enabled(struct drm_device *dev, int pipe)
-+{
-+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-+ unsigned long pipeconf = pipe ? PIPEBCONF : PIPEACONF;
-+
-+ if (I915_READ(pipeconf) & PIPEACONF_ENABLE)
-+ return 1;
-+
-+ return 0;
-+}
-+
-+/**
- * Emit blits for scheduled buffer swaps.
- *
- * This function will be called with the HW lock held.
-@@ -71,8 +128,7 @@ static void i915_vblank_tasklet(struct drm_device *dev)
- unsigned long irqflags;
- struct list_head *list, *tmp, hits, *hit;
- int nhits, nrects, slice[2], upper[2], lower[2], i;
-- unsigned counter[2] = { atomic_read(&dev->vbl_received),
-- atomic_read(&dev->vbl_received2) };
-+ unsigned counter[2];
- struct drm_drawable_info *drw;
- drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv;
- u32 cpp = dev_priv->cpp;
-@@ -94,6 +150,9 @@ static void i915_vblank_tasklet(struct drm_device *dev)
- src_pitch >>= 2;
- }
-
-+ counter[0] = drm_vblank_count(dev, 0);
-+ counter[1] = drm_vblank_count(dev, 1);
-+
- DRM_DEBUG("\n");
-
- INIT_LIST_HEAD(&hits);
-@@ -106,12 +165,14 @@ static void i915_vblank_tasklet(struct drm_device *dev)
- list_for_each_safe(list, tmp, &dev_priv->vbl_swaps.head) {
- drm_i915_vbl_swap_t *vbl_swap =
- list_entry(list, drm_i915_vbl_swap_t, head);
-+ int pipe = i915_get_pipe(dev, vbl_swap->plane);
-
-- if ((counter[vbl_swap->pipe] - vbl_swap->sequence) > (1<<23))
-+ if ((counter[pipe] - vbl_swap->sequence) > (1<<23))
- continue;
-
- list_del(list);
- dev_priv->swaps_pending--;
-+ drm_vblank_put(dev, pipe);
-
- spin_unlock(&dev_priv->swaps_lock);
- spin_lock(&dev->drw_lock);
-@@ -204,7 +265,7 @@ static void i915_vblank_tasklet(struct drm_device *dev)
- drm_i915_vbl_swap_t *swap_hit =
- list_entry(hit, drm_i915_vbl_swap_t, head);
- struct drm_clip_rect *rect;
-- int num_rects, pipe;
-+ int num_rects, plane;
- unsigned short top, bottom;
-
- drw = drm_get_drawable_info(dev, swap_hit->drw_id);
-@@ -213,9 +274,9 @@ static void i915_vblank_tasklet(struct drm_device *dev)
- continue;
-
- rect = drw->rects;
-- pipe = swap_hit->pipe;
-- top = upper[pipe];
-- bottom = lower[pipe];
-+ plane = swap_hit->plane;
-+ top = upper[plane];
-+ bottom = lower[plane];
-
- for (num_rects = drw->num_rects; num_rects--; rect++) {
- int y1 = max(rect->y1, top);
-@@ -252,22 +313,54 @@ static void i915_vblank_tasklet(struct drm_device *dev)
- }
- }
-
-+u32 i915_get_vblank_counter(struct drm_device *dev, int plane)
-+{
-+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-+ unsigned long high_frame;
-+ unsigned long low_frame;
-+ u32 high1, high2, low, count;
-+ int pipe;
-+
-+ pipe = i915_get_pipe(dev, plane);
-+ high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH;
-+ low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL;
-+
-+ if (!i915_pipe_enabled(dev, pipe)) {
-+ DRM_ERROR("trying to get vblank count for disabled pipe %d\n", pipe);
-+ return 0;
-+ }
-+
-+ /*
-+ * High & low register fields aren't synchronized, so make sure
-+ * we get a low value that's stable across two reads of the high
-+ * register.
-+ */
-+ do {
-+ high1 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
-+ PIPE_FRAME_HIGH_SHIFT);
-+ low = ((I915_READ(low_frame) & PIPE_FRAME_LOW_MASK) >>
-+ PIPE_FRAME_LOW_SHIFT);
-+ high2 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
-+ PIPE_FRAME_HIGH_SHIFT);
-+ } while (high1 != high2);
-+
-+ count = (high1 << 8) | low;
-+
-+ return count;
-+}
-+
- irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
- {
- struct drm_device *dev = (struct drm_device *) arg;
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-- u32 pipea_stats, pipeb_stats;
- u32 iir;
--
-- pipea_stats = I915_READ(PIPEASTAT);
-- pipeb_stats = I915_READ(PIPEBSTAT);
-+ u32 pipea_stats, pipeb_stats;
-+ int vblank = 0;
-
- if (dev->pdev->msi_enabled)
- I915_WRITE(IMR, ~0);
- iir = I915_READ(IIR);
-
-- DRM_DEBUG("iir=%08x\n", iir);
--
- if (iir == 0) {
- if (dev->pdev->msi_enabled) {
- I915_WRITE(IMR, dev_priv->irq_mask_reg);
-@@ -276,48 +369,56 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
- return IRQ_NONE;
- }
-
-- I915_WRITE(PIPEASTAT, pipea_stats);
-- I915_WRITE(PIPEBSTAT, pipeb_stats);
--
-- I915_WRITE(IIR, iir);
-- if (dev->pdev->msi_enabled)
-- I915_WRITE(IMR, dev_priv->irq_mask_reg);
-- (void) I915_READ(IIR); /* Flush posted writes */
--
-- dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
--
-- if (iir & I915_USER_INTERRUPT)
-- DRM_WAKEUP(&dev_priv->irq_queue);
--
-- if (iir & (I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
-- I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT)) {
-- int vblank_pipe = dev_priv->vblank_pipe;
--
-- if ((vblank_pipe &
-- (DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B))
-- == (DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B)) {
-- if (iir & I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT)
-- atomic_inc(&dev->vbl_received);
-- if (iir & I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT)
-- atomic_inc(&dev->vbl_received2);
-- } else if (((iir & I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT) &&
-- (vblank_pipe & DRM_I915_VBLANK_PIPE_A)) ||
-- ((iir & I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT) &&
-- (vblank_pipe & DRM_I915_VBLANK_PIPE_B)))
-- atomic_inc(&dev->vbl_received);
-+ /*
-+ * Clear the PIPE(A|B)STAT regs before the IIR otherwise
-+ * we may get extra interrupts.
-+ */
-+ if (iir & I915_DISPLAY_PIPE_A_EVENT_INTERRUPT) {
-+ pipea_stats = I915_READ(PIPEASTAT);
-+ if (!(dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_A))
-+ pipea_stats &= ~(PIPE_START_VBLANK_INTERRUPT_ENABLE |
-+ PIPE_VBLANK_INTERRUPT_ENABLE);
-+ else if (pipea_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS|
-+ PIPE_VBLANK_INTERRUPT_STATUS)) {
-+ vblank++;
-+ drm_handle_vblank(dev, i915_get_plane(dev, 0));
-+ }
-
-- DRM_WAKEUP(&dev->vbl_queue);
-- drm_vbl_send_signals(dev);
-+ I915_WRITE(PIPEASTAT, pipea_stats);
-+ }
-+ if (iir & I915_DISPLAY_PIPE_B_EVENT_INTERRUPT) {
-+ pipeb_stats = I915_READ(PIPEBSTAT);
-+ /* Ack the event */
-+ I915_WRITE(PIPEBSTAT, pipeb_stats);
-+
-+ /* The vblank interrupt gets enabled even if we didn't ask for
-+ it, so make sure it's shut down again */
-+ if (!(dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B))
-+ pipeb_stats &= ~(PIPE_START_VBLANK_INTERRUPT_ENABLE |
-+ PIPE_VBLANK_INTERRUPT_ENABLE);
-+ else if (pipeb_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS|
-+ PIPE_VBLANK_INTERRUPT_STATUS)) {
-+ vblank++;
-+ drm_handle_vblank(dev, i915_get_plane(dev, 1));
-+ }
-
-- if (dev_priv->swaps_pending > 0)
-- drm_locked_tasklet(dev, i915_vblank_tasklet);
-+ if (pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS)
-+ opregion_asle_intr(dev);
-+ I915_WRITE(PIPEBSTAT, pipeb_stats);
- }
-
- if (iir & I915_ASLE_INTERRUPT)
- opregion_asle_intr(dev);
-
-- if (iir & I915_DISPLAY_PIPE_B_EVENT_INTERRUPT)
-- opregion_asle_intr(dev);
-+ dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
-+
-+ if (dev->pdev->msi_enabled)
-+ I915_WRITE(IMR, dev_priv->irq_mask_reg);
-+ I915_WRITE(IIR, iir);
-+ (void) I915_READ(IIR);
-+
-+ if (vblank && dev_priv->swaps_pending > 0)
-+ drm_locked_tasklet(dev, i915_vblank_tasklet);
-
- return IRQ_HANDLED;
- }
-@@ -358,7 +459,7 @@ static void i915_user_irq_get(struct drm_device *dev)
- spin_unlock(&dev_priv->user_irq_lock);
- }
-
--static void i915_user_irq_put(struct drm_device *dev)
-+void i915_user_irq_put(struct drm_device *dev)
- {
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-
-@@ -395,41 +496,10 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
- }
-
- dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
-- return ret;
--}
--
--static int i915_driver_vblank_do_wait(struct drm_device *dev, unsigned int *sequence,
-- atomic_t *counter)
--{
-- drm_i915_private_t *dev_priv = dev->dev_private;
-- unsigned int cur_vblank;
-- int ret = 0;
--
-- if (!dev_priv) {
-- DRM_ERROR("called with no initialization\n");
-- return -EINVAL;
-- }
--
-- DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
-- (((cur_vblank = atomic_read(counter))
-- - *sequence) <= (1<<23)));
--
-- *sequence = cur_vblank;
-
- return ret;
- }
-
--
--int i915_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence)
--{
-- return i915_driver_vblank_do_wait(dev, sequence, &dev->vbl_received);
--}
--
--int i915_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence)
--{
-- return i915_driver_vblank_do_wait(dev, sequence, &dev->vbl_received2);
--}
--
- /* Needs the lock as it touches the ring.
- */
- int i915_irq_emit(struct drm_device *dev, void *data,
-@@ -472,40 +542,88 @@ int i915_irq_wait(struct drm_device *dev, void *data,
- return i915_wait_irq(dev, irqwait->irq_seq);
- }
-
-+int i915_enable_vblank(struct drm_device *dev, int plane)
-+{
-+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-+ int pipe = i915_get_pipe(dev, plane);
-+ u32 pipestat_reg = 0;
-+ u32 pipestat;
-+
-+ switch (pipe) {
-+ case 0:
-+ pipestat_reg = PIPEASTAT;
-+ i915_enable_irq(dev_priv, I915_DISPLAY_PIPE_A_EVENT_INTERRUPT);
-+ break;
-+ case 1:
-+ pipestat_reg = PIPEBSTAT;
-+ i915_enable_irq(dev_priv, I915_DISPLAY_PIPE_B_EVENT_INTERRUPT);
-+ break;
-+ default:
-+ DRM_ERROR("tried to enable vblank on non-existent pipe %d\n",
-+ pipe);
-+ break;
-+ }
-+
-+ if (pipestat_reg) {
-+ pipestat = I915_READ(pipestat_reg);
-+ if (IS_I965G(dev))
-+ pipestat |= PIPE_START_VBLANK_INTERRUPT_ENABLE;
-+ else
-+ pipestat |= PIPE_VBLANK_INTERRUPT_ENABLE;
-+ /* Clear any stale interrupt status */
-+ pipestat |= (PIPE_START_VBLANK_INTERRUPT_STATUS |
-+ PIPE_VBLANK_INTERRUPT_STATUS);
-+ I915_WRITE(pipestat_reg, pipestat);
-+ }
-+
-+ return 0;
-+}
-+
-+void i915_disable_vblank(struct drm_device *dev, int plane)
-+{
-+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-+ int pipe = i915_get_pipe(dev, plane);
-+ u32 pipestat_reg = 0;
-+ u32 pipestat;
-+
-+ switch (pipe) {
-+ case 0:
-+ pipestat_reg = PIPEASTAT;
-+ i915_disable_irq(dev_priv, I915_DISPLAY_PIPE_A_EVENT_INTERRUPT);
-+ break;
-+ case 1:
-+ pipestat_reg = PIPEBSTAT;
-+ i915_disable_irq(dev_priv, I915_DISPLAY_PIPE_B_EVENT_INTERRUPT);
-+ break;
-+ default:
-+ DRM_ERROR("tried to disable vblank on non-existent pipe %d\n",
-+ pipe);
-+ break;
-+ }
-+
-+ if (pipestat_reg) {
-+ pipestat = I915_READ(pipestat_reg);
-+ pipestat &= ~(PIPE_START_VBLANK_INTERRUPT_ENABLE |
-+ PIPE_VBLANK_INTERRUPT_ENABLE);
-+ /* Clear any stale interrupt status */
-+ pipestat |= (PIPE_START_VBLANK_INTERRUPT_STATUS |
-+ PIPE_VBLANK_INTERRUPT_STATUS);
-+ I915_WRITE(pipestat_reg, pipestat);
-+ }
-+}
-+
- /* Set the vblank monitor pipe
- */
- int i915_vblank_pipe_set(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
- {
- drm_i915_private_t *dev_priv = dev->dev_private;
-- drm_i915_vblank_pipe_t *pipe = data;
-- u32 enable_mask = 0, disable_mask = 0;
-
- if (!dev_priv) {
- DRM_ERROR("called with no initialization\n");
- return -EINVAL;
- }
-
-- if (pipe->pipe & ~(DRM_I915_VBLANK_PIPE_A|DRM_I915_VBLANK_PIPE_B)) {
-- DRM_ERROR("called with invalid pipe 0x%x\n", pipe->pipe);
-- return -EINVAL;
-- }
--
-- if (pipe->pipe & DRM_I915_VBLANK_PIPE_A)
-- enable_mask |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
-- else
-- disable_mask |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
--
-- if (pipe->pipe & DRM_I915_VBLANK_PIPE_B)
-- enable_mask |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
-- else
-- disable_mask |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
--
-- i915_enable_irq(dev_priv, enable_mask);
-- i915_disable_irq(dev_priv, disable_mask);
--
-- dev_priv->vblank_pipe = pipe->pipe;
--
- return 0;
- }
-
-@@ -514,19 +632,13 @@ int i915_vblank_pipe_get(struct drm_device *dev, void *data,
- {
- drm_i915_private_t *dev_priv = dev->dev_private;
- drm_i915_vblank_pipe_t *pipe = data;
-- u16 flag;
-
- if (!dev_priv) {
- DRM_ERROR("called with no initialization\n");
- return -EINVAL;
- }
-
-- flag = I915_READ(IMR);
-- pipe->pipe = 0;
-- if (flag & I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT)
-- pipe->pipe |= DRM_I915_VBLANK_PIPE_A;
-- if (flag & I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT)
-- pipe->pipe |= DRM_I915_VBLANK_PIPE_B;
-+ pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
-
- return 0;
- }
-@@ -540,9 +652,10 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
- drm_i915_private_t *dev_priv = dev->dev_private;
- drm_i915_vblank_swap_t *swap = data;
- drm_i915_vbl_swap_t *vbl_swap;
-- unsigned int pipe, seqtype, curseq;
-+ unsigned int pipe, seqtype, curseq, plane;
- unsigned long irqflags;
- struct list_head *list;
-+ int ret;
-
- if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __func__);
-@@ -560,7 +673,8 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
- return -EINVAL;
- }
-
-- pipe = (swap->seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0;
-+ plane = (swap->seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0;
-+ pipe = i915_get_pipe(dev, plane);
-
- seqtype = swap->seqtype & (_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE);
-
-@@ -579,7 +693,14 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
-
- spin_unlock_irqrestore(&dev->drw_lock, irqflags);
-
-- curseq = atomic_read(pipe ? &dev->vbl_received2 : &dev->vbl_received);
-+ /*
-+ * We take the ref here and put it when the swap actually completes
-+ * in the tasklet.
-+ */
-+ ret = drm_vblank_get(dev, pipe);
-+ if (ret)
-+ return ret;
-+ curseq = drm_vblank_count(dev, pipe);
-
- if (seqtype == _DRM_VBLANK_RELATIVE)
- swap->sequence += curseq;
-@@ -589,6 +710,7 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
- swap->sequence = curseq + 1;
- } else {
- DRM_DEBUG("Missed target sequence\n");
-+ drm_vblank_put(dev, pipe);
- return -EINVAL;
- }
- }
-@@ -599,7 +721,7 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
- vbl_swap = list_entry(list, drm_i915_vbl_swap_t, head);
-
- if (vbl_swap->drw_id == swap->drawable &&
-- vbl_swap->pipe == pipe &&
-+ vbl_swap->plane == plane &&
- vbl_swap->sequence == swap->sequence) {
- spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
- DRM_DEBUG("Already scheduled\n");
-@@ -611,6 +733,7 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
-
- if (dev_priv->swaps_pending >= 100) {
- DRM_DEBUG("Too many swaps queued\n");
-+ drm_vblank_put(dev, pipe);
- return -EBUSY;
- }
-
-@@ -618,13 +741,14 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
-
- if (!vbl_swap) {
- DRM_ERROR("Failed to allocate memory to queue swap\n");
-+ drm_vblank_put(dev, pipe);
- return -ENOMEM;
- }
-
- DRM_DEBUG("\n");
-
- vbl_swap->drw_id = swap->drawable;
-- vbl_swap->pipe = pipe;
-+ vbl_swap->plane = plane;
- vbl_swap->sequence = swap->sequence;
-
- spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
-@@ -643,28 +767,32 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
- {
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-
-- I915_WRITE(HWSTAM, 0xfffe);
-- I915_WRITE(IMR, 0x0);
-+ I915_WRITE(HWSTAM, 0xeffe);
-+ I915_WRITE(IMR, 0xffffffff);
- I915_WRITE(IER, 0x0);
- }
-
--void i915_driver_irq_postinstall(struct drm_device * dev)
-+int i915_driver_irq_postinstall(struct drm_device *dev)
- {
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-+ int ret, num_pipes = 2;
-
- spin_lock_init(&dev_priv->swaps_lock);
- INIT_LIST_HEAD(&dev_priv->vbl_swaps.head);
- dev_priv->swaps_pending = 0;
-
-- if (!dev_priv->vblank_pipe)
-- dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A;
--
- /* Set initial unmasked IRQs to just the selected vblank pipes. */
- dev_priv->irq_mask_reg = ~0;
-- if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_A)
-- dev_priv->irq_mask_reg &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
-- if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B)
-- dev_priv->irq_mask_reg &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
-+
-+ ret = drm_vblank_init(dev, num_pipes);
-+ if (ret)
-+ return ret;
-+
-+ dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
-+ dev_priv->irq_mask_reg &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
-+ dev_priv->irq_mask_reg &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
-+
-+ dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
-
- dev_priv->irq_mask_reg &= I915_INTERRUPT_ENABLE_MASK;
-
-@@ -673,22 +801,29 @@ void i915_driver_irq_postinstall(struct drm_device * dev)
- (void) I915_READ(IER);
-
- opregion_enable_asle(dev);
--
- DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
-+
-+ return 0;
- }
-
- void i915_driver_irq_uninstall(struct drm_device * dev)
- {
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-- u16 temp;
-+ u32 temp;
-
- if (!dev_priv)
- return;
-
-- I915_WRITE(HWSTAM, 0xffff);
-- I915_WRITE(IMR, 0xffff);
-+ dev_priv->vblank_pipe = 0;
-+
-+ I915_WRITE(HWSTAM, 0xffffffff);
-+ I915_WRITE(IMR, 0xffffffff);
- I915_WRITE(IER, 0x0);
-
-+ temp = I915_READ(PIPEASTAT);
-+ I915_WRITE(PIPEASTAT, temp);
-+ temp = I915_READ(PIPEBSTAT);
-+ I915_WRITE(PIPEBSTAT, temp);
- temp = I915_READ(IIR);
- I915_WRITE(IIR, temp);
- }
-diff --git a/include/drm/drm.h b/include/drm/drm.h
-index 0864c69..15e5503 100644
---- a/include/drm/drm.h
-+++ b/include/drm/drm.h
-@@ -454,6 +454,7 @@ struct drm_irq_busid {
- enum drm_vblank_seq_type {
- _DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */
- _DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */
-+ _DRM_VBLANK_FLIP = 0x8000000, /**< Scheduled buffer swap should flip */
- _DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */
- _DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */
- _DRM_VBLANK_SIGNAL = 0x40000000 /**< Send signal instead of blocking */
-@@ -486,6 +487,19 @@ union drm_wait_vblank {
- struct drm_wait_vblank_reply reply;
- };
-
-+#define _DRM_PRE_MODESET 1
-+#define _DRM_POST_MODESET 2
-+
-+/**
-+ * DRM_IOCTL_MODESET_CTL ioctl argument type
-+ *
-+ * \sa drmModesetCtl().
-+ */
-+struct drm_modeset_ctl {
-+ uint32_t crtc;
-+ uint32_t cmd;
-+};
-+
- /**
- * DRM_IOCTL_AGP_ENABLE ioctl argument type.
- *
-@@ -570,6 +584,7 @@ struct drm_set_version {
- #define DRM_IOCTL_GET_CLIENT DRM_IOWR(0x05, struct drm_client)
- #define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, struct drm_stats)
- #define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, struct drm_set_version)
-+#define DRM_IOCTL_MODESET_CTL DRM_IOW(0x08, struct drm_modeset_ctl)
-
- #define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, struct drm_unique)
- #define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, struct drm_auth)
-diff --git a/include/drm/drmP.h b/include/drm/drmP.h
-index 1c1b13e..e79ce07 100644
---- a/include/drm/drmP.h
-+++ b/include/drm/drmP.h
-@@ -580,11 +580,54 @@ struct drm_driver {
- int (*kernel_context_switch) (struct drm_device *dev, int old,
- int new);
- void (*kernel_context_switch_unlock) (struct drm_device *dev);
-- int (*vblank_wait) (struct drm_device *dev, unsigned int *sequence);
-- int (*vblank_wait2) (struct drm_device *dev, unsigned int *sequence);
- int (*dri_library_name) (struct drm_device *dev, char *buf);
-
- /**
-+ * get_vblank_counter - get raw hardware vblank counter
-+ * @dev: DRM device
-+ * @crtc: counter to fetch
-+ *
-+ * Driver callback for fetching a raw hardware vblank counter
-+ * for @crtc. If a device doesn't have a hardware counter, the
-+ * driver can simply return the value of drm_vblank_count and
-+ * make the enable_vblank() and disable_vblank() hooks into no-ops,
-+ * leaving interrupts enabled at all times.
-+ *
-+ * Wraparound handling and loss of events due to modesetting is dealt
-+ * with in the DRM core code.
-+ *
-+ * RETURNS
-+ * Raw vblank counter value.
-+ */
-+ u32 (*get_vblank_counter) (struct drm_device *dev, int crtc);
-+
-+ /**
-+ * enable_vblank - enable vblank interrupt events
-+ * @dev: DRM device
-+ * @crtc: which irq to enable
-+ *
-+ * Enable vblank interrupts for @crtc. If the device doesn't have
-+ * a hardware vblank counter, this routine should be a no-op, since
-+ * interrupts will have to stay on to keep the count accurate.
-+ *
-+ * RETURNS
-+ * Zero on success, appropriate errno if the given @crtc's vblank
-+ * interrupt cannot be enabled.
-+ */
-+ int (*enable_vblank) (struct drm_device *dev, int crtc);
-+
-+ /**
-+ * disable_vblank - disable vblank interrupt events
-+ * @dev: DRM device
-+ * @crtc: which irq to enable
-+ *
-+ * Disable vblank interrupts for @crtc. If the device doesn't have
-+ * a hardware vblank counter, this routine should be a no-op, since
-+ * interrupts will have to stay on to keep the count accurate.
-+ */
-+ void (*disable_vblank) (struct drm_device *dev, int crtc);
-+
-+ /**
- * Called by \c drm_device_is_agp. Typically used to determine if a
- * card is really attached to AGP or not.
- *
-@@ -601,7 +644,7 @@ struct drm_driver {
-
- irqreturn_t(*irq_handler) (DRM_IRQ_ARGS);
- void (*irq_preinstall) (struct drm_device *dev);
-- void (*irq_postinstall) (struct drm_device *dev);
-+ int (*irq_postinstall) (struct drm_device *dev);
- void (*irq_uninstall) (struct drm_device *dev);
- void (*reclaim_buffers) (struct drm_device *dev,
- struct drm_file * file_priv);
-@@ -730,13 +773,28 @@ struct drm_device {
- /** \name VBLANK IRQ support */
- /*@{ */
-
-- wait_queue_head_t vbl_queue; /**< VBLANK wait queue */
-- atomic_t vbl_received;
-- atomic_t vbl_received2; /**< number of secondary VBLANK interrupts */
-+ /*
-+ * At load time, disabling the vblank interrupt won't be allowed since
-+ * old clients may not call the modeset ioctl and therefore misbehave.
-+ * Once the modeset ioctl *has* been called though, we can safely
-+ * disable them when unused.
-+ */
-+ int vblank_disable_allowed;
-+
-+ wait_queue_head_t *vbl_queue; /**< VBLANK wait queue */
-+ atomic_t *_vblank_count; /**< number of VBLANK interrupts (driver must alloc the right number of counters) */
- spinlock_t vbl_lock;
-- struct list_head vbl_sigs; /**< signal list to send on VBLANK */
-- struct list_head vbl_sigs2; /**< signals to send on secondary VBLANK */
-- unsigned int vbl_pending;
-+ struct list_head *vbl_sigs; /**< signal list to send on VBLANK */
-+ atomic_t vbl_signal_pending; /* number of signals pending on all crtcs*/
-+ atomic_t *vblank_refcount; /* number of users of vblank interruptsper crtc */
-+ u32 *last_vblank; /* protected by dev->vbl_lock, used */
-+ /* for wraparound handling */
-+ int *vblank_enabled; /* so we don't call enable more than
-+ once per disable */
-+ int *vblank_inmodeset; /* Display driver is setting mode */
-+ struct timer_list vblank_disable_timer;
-+
-+ u32 max_vblank_count; /**< size of vblank counter register */
- spinlock_t tasklet_lock; /**< For drm_locked_tasklet */
- void (*locked_tasklet_func)(struct drm_device *dev);
-
-@@ -757,6 +815,7 @@ struct drm_device {
- struct pci_controller *hose;
- #endif
- struct drm_sg_mem *sg; /**< Scatter gather memory */
-+ int num_crtcs; /**< Number of CRTCs on this device */
- void *dev_private; /**< device private data */
- struct drm_sigdata sigdata; /**< For block_all_signals */
- sigset_t sigmask;
-@@ -990,10 +1049,19 @@ extern void drm_driver_irq_preinstall(struct drm_device *dev);
- extern void drm_driver_irq_postinstall(struct drm_device *dev);
- extern void drm_driver_irq_uninstall(struct drm_device *dev);
-
-+extern int drm_vblank_init(struct drm_device *dev, int num_crtcs);
- extern int drm_wait_vblank(struct drm_device *dev, void *data,
-- struct drm_file *file_priv);
-+ struct drm_file *filp);
- extern int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq);
--extern void drm_vbl_send_signals(struct drm_device *dev);
-+extern void drm_locked_tasklet(struct drm_device *dev,
-+ void(*func)(struct drm_device *));
-+extern u32 drm_vblank_count(struct drm_device *dev, int crtc);
-+extern void drm_handle_vblank(struct drm_device *dev, int crtc);
-+extern int drm_vblank_get(struct drm_device *dev, int crtc);
-+extern void drm_vblank_put(struct drm_device *dev, int crtc);
-+/* Modesetting support */
-+extern int drm_modeset_ctl(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv);
- extern void drm_locked_tasklet(struct drm_device *dev, void(*func)(struct drm_device*));
-
- /* AGP/GART support (drm_agpsupport.h) */
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/0012-Export-shmem_file_setup-for-DRM-GEM.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/0012-Export-shmem_file_setup-for-DRM-GEM.patch
deleted file mode 100644
index 642d89ba7..000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/0012-Export-shmem_file_setup-for-DRM-GEM.patch
+++ /dev/null
@@ -1,25 +0,0 @@
-commit 48e13db26a25ebaf61f1fc28f612d6b35ddf1965
-Author: Keith Packard <keithp@keithp.com>
-Date: Fri Jun 20 00:08:06 2008 -0700
-
- Export shmem_file_setup for DRM-GEM
-
- GEM needs to create shmem files to back buffer objects. Though currently
- creation of files for objects could have been driven from userland, the
- modesetting work will require allocation of buffer objects before userland
- is running, for boot-time message display.
-
- Signed-off-by: Eric Anholt <eric@anholt.net>
-
-diff --git a/mm/shmem.c b/mm/shmem.c
-index 04fb4f1..515909d 100644
---- a/mm/shmem.c
-+++ b/mm/shmem.c
-@@ -2582,6 +2582,7 @@ put_memory:
- shmem_unacct_size(flags, size);
- return ERR_PTR(error);
- }
-+EXPORT_SYMBOL(shmem_file_setup);
-
- /**
- * shmem_zero_setup - setup a shared anonymous mapping
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/0013-Export-kmap_atomic_pfn-for-DRM-GEM.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/0013-Export-kmap_atomic_pfn-for-DRM-GEM.patch
deleted file mode 100644
index cc90d4626..000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/0013-Export-kmap_atomic_pfn-for-DRM-GEM.patch
+++ /dev/null
@@ -1,24 +0,0 @@
-commit 25eaa97fc74b225e13cf11ed8d770192ddc9355d
-Author: Eric Anholt <eric@anholt.net>
-Date: Thu Aug 21 12:53:33 2008 -0700
-
- Export kmap_atomic_pfn for DRM-GEM.
-
- The driver would like to map IO space directly for copying data in when
- appropriate, to avoid CPU cache flushing for streaming writes.
- kmap_atomic_pfn lets us avoid IPIs associated with ioremap for this process.
-
- Signed-off-by: Eric Anholt <eric@anholt.net>
-
-diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
-index 165c871..d52e91d 100644
---- a/arch/x86/mm/highmem_32.c
-+++ b/arch/x86/mm/highmem_32.c
-@@ -137,6 +137,7 @@ void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
-
- return (void*) vaddr;
- }
-+EXPORT_SYMBOL(kmap_atomic_pfn);
-
- struct page *kmap_atomic_to_page(void *ptr)
- {
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/0014-drm-Add-GEM-graphics-execution-manager-to-i915.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/0014-drm-Add-GEM-graphics-execution-manager-to-i915.patch
deleted file mode 100644
index 95cca5d0c..000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/0014-drm-Add-GEM-graphics-execution-manager-to-i915.patch
+++ /dev/null
@@ -1,5483 +0,0 @@
-commit c97398223c6a505fac2c783a624dc80e0aa5d5d0
-Author: Eric Anholt <eric@anholt.net>
-Date: Wed Jul 30 12:06:12 2008 -0700
-
- drm: Add GEM ("graphics execution manager") to i915 driver.
-
- GEM allows the creation of persistent buffer objects accessible by the
- graphics device through new ioctls for managing execution of commands on the
- device. The userland API is almost entirely driver-specific to ensure that
- any driver building on this model can easily map the interface to individual
- driver requirements.
-
- GEM is used by the 2d driver for managing its internal state allocations and
- will be used for pixmap storage to reduce memory consumption and enable
- zero-copy GLX_EXT_texture_from_pixmap, and in the 3d driver is used to enable
- GL_EXT_framebuffer_object and GL_ARB_pixel_buffer_object.
-
- Signed-off-by: Eric Anholt <eric@anholt.net>
-
-diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
-index e9f9a97..74da994 100644
---- a/drivers/gpu/drm/Makefile
-+++ b/drivers/gpu/drm/Makefile
-@@ -4,8 +4,9 @@
-
- ccflags-y := -Iinclude/drm
-
--drm-y := drm_auth.o drm_bufs.o drm_context.o drm_dma.o drm_drawable.o \
-- drm_drv.o drm_fops.o drm_ioctl.o drm_irq.o \
-+drm-y := drm_auth.o drm_bufs.o drm_cache.o \
-+ drm_context.o drm_dma.o drm_drawable.o \
-+ drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \
- drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
- drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \
- drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o
-diff --git a/drivers/gpu/drm/drm_agpsupport.c b/drivers/gpu/drm/drm_agpsupport.c
-index aefa5ac..2639be2 100644
---- a/drivers/gpu/drm/drm_agpsupport.c
-+++ b/drivers/gpu/drm/drm_agpsupport.c
-@@ -33,6 +33,7 @@
-
- #include "drmP.h"
- #include <linux/module.h>
-+#include <asm/agp.h>
-
- #if __OS_HAS_AGP
-
-@@ -452,4 +453,52 @@ int drm_agp_unbind_memory(DRM_AGP_MEM * handle)
- return agp_unbind_memory(handle);
- }
-
--#endif /* __OS_HAS_AGP */
-+/**
-+ * Binds a collection of pages into AGP memory at the given offset, returning
-+ * the AGP memory structure containing them.
-+ *
-+ * No reference is held on the pages during this time -- it is up to the
-+ * caller to handle that.
-+ */
-+DRM_AGP_MEM *
-+drm_agp_bind_pages(struct drm_device *dev,
-+ struct page **pages,
-+ unsigned long num_pages,
-+ uint32_t gtt_offset)
-+{
-+ DRM_AGP_MEM *mem;
-+ int ret, i;
-+
-+ DRM_DEBUG("\n");
-+
-+ mem = drm_agp_allocate_memory(dev->agp->bridge, num_pages,
-+ AGP_USER_MEMORY);
-+ if (mem == NULL) {
-+ DRM_ERROR("Failed to allocate memory for %ld pages\n",
-+ num_pages);
-+ return NULL;
-+ }
-+
-+ for (i = 0; i < num_pages; i++)
-+ mem->memory[i] = phys_to_gart(page_to_phys(pages[i]));
-+ mem->page_count = num_pages;
-+
-+ mem->is_flushed = true;
-+ ret = drm_agp_bind_memory(mem, gtt_offset / PAGE_SIZE);
-+ if (ret != 0) {
-+ DRM_ERROR("Failed to bind AGP memory: %d\n", ret);
-+ agp_free_memory(mem);
-+ return NULL;
-+ }
-+
-+ return mem;
-+}
-+EXPORT_SYMBOL(drm_agp_bind_pages);
-+
-+void drm_agp_chipset_flush(struct drm_device *dev)
-+{
-+ agp_flush_chipset(dev->agp->bridge);
-+}
-+EXPORT_SYMBOL(drm_agp_chipset_flush);
-+
-+#endif /* __OS_HAS_AGP */
-diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c
-new file mode 100644
-index 0000000..9475f7d
---- /dev/null
-+++ b/drivers/gpu/drm/drm_cache.c
-@@ -0,0 +1,76 @@
-+/**************************************************************************
-+ *
-+ * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
-+ * All Rights Reserved.
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a
-+ * copy of this software and associated documentation files (the
-+ * "Software"), to deal in the Software without restriction, including
-+ * without limitation the rights to use, copy, modify, merge, publish,
-+ * distribute, sub license, and/or sell copies of the Software, and to
-+ * permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice (including the
-+ * next paragraph) shall be included in all copies or substantial portions
-+ * of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
-+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
-+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
-+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
-+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
-+ *
-+ **************************************************************************/
-+/*
-+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
-+ */
-+
-+#include "drmP.h"
-+
-+#if defined(CONFIG_X86)
-+static void
-+drm_clflush_page(struct page *page)
-+{
-+ uint8_t *page_virtual;
-+ unsigned int i;
-+
-+ if (unlikely(page == NULL))
-+ return;
-+
-+ page_virtual = kmap_atomic(page, KM_USER0);
-+ for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
-+ clflush(page_virtual + i);
-+ kunmap_atomic(page_virtual, KM_USER0);
-+}
-+#endif
-+
-+static void
-+drm_clflush_ipi_handler(void *null)
-+{
-+ wbinvd();
-+}
-+
-+void
-+drm_clflush_pages(struct page *pages[], unsigned long num_pages)
-+{
-+
-+#if defined(CONFIG_X86)
-+ if (cpu_has_clflush) {
-+ unsigned long i;
-+
-+ mb();
-+ for (i = 0; i < num_pages; ++i)
-+ drm_clflush_page(*pages++);
-+ mb();
-+
-+ return;
-+ }
-+#endif
-+
-+ if (on_each_cpu(drm_clflush_ipi_handler, NULL, 1) != 0)
-+ DRM_ERROR("Timed out waiting for cache flush.\n");
-+}
-+EXPORT_SYMBOL(drm_clflush_pages);
-diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
-index fb45fe7..96f416a 100644
---- a/drivers/gpu/drm/drm_drv.c
-+++ b/drivers/gpu/drm/drm_drv.c
-@@ -119,6 +119,10 @@ static struct drm_ioctl_desc drm_ioctls[] = {
- DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0),
-
- DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-+
-+ DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, 0),
-+ DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH),
-+ DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH),
- };
-
- #define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
-diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
-index dcf8b4d..0d46627 100644
---- a/drivers/gpu/drm/drm_fops.c
-+++ b/drivers/gpu/drm/drm_fops.c
-@@ -256,6 +256,9 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
-
- INIT_LIST_HEAD(&priv->lhead);
-
-+ if (dev->driver->driver_features & DRIVER_GEM)
-+ drm_gem_open(dev, priv);
-+
- if (dev->driver->open) {
- ret = dev->driver->open(dev, priv);
- if (ret < 0)
-@@ -400,6 +403,9 @@ int drm_release(struct inode *inode, struct file *filp)
- dev->driver->reclaim_buffers(dev, file_priv);
- }
-
-+ if (dev->driver->driver_features & DRIVER_GEM)
-+ drm_gem_release(dev, file_priv);
-+
- drm_fasync(-1, filp, 0);
-
- mutex_lock(&dev->ctxlist_mutex);
-diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
-new file mode 100644
-index 0000000..434155b
---- /dev/null
-+++ b/drivers/gpu/drm/drm_gem.c
-@@ -0,0 +1,420 @@
-+/*
-+ * Copyright © 2008 Intel Corporation
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a
-+ * copy of this software and associated documentation files (the "Software"),
-+ * to deal in the Software without restriction, including without limitation
-+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
-+ * and/or sell copies of the Software, and to permit persons to whom the
-+ * Software is furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice (including the next
-+ * paragraph) shall be included in all copies or substantial portions of the
-+ * Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
-+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ *
-+ * Authors:
-+ * Eric Anholt <eric@anholt.net>
-+ *
-+ */
-+
-+#include <linux/types.h>
-+#include <linux/slab.h>
-+#include <linux/mm.h>
-+#include <linux/uaccess.h>
-+#include <linux/fs.h>
-+#include <linux/file.h>
-+#include <linux/module.h>
-+#include <linux/mman.h>
-+#include <linux/pagemap.h>
-+#include "drmP.h"
-+
-+/** @file drm_gem.c
-+ *
-+ * This file provides some of the base ioctls and library routines for
-+ * the graphics memory manager implemented by each device driver.
-+ *
-+ * Because various devices have different requirements in terms of
-+ * synchronization and migration strategies, implementing that is left up to
-+ * the driver, and all that the general API provides should be generic --
-+ * allocating objects, reading/writing data with the cpu, freeing objects.
-+ * Even there, platform-dependent optimizations for reading/writing data with
-+ * the CPU mean we'll likely hook those out to driver-specific calls. However,
-+ * the DRI2 implementation wants to have at least allocate/mmap be generic.
-+ *
-+ * The goal was to have swap-backed object allocation managed through
-+ * struct file. However, file descriptors as handles to a struct file have
-+ * two major failings:
-+ * - Process limits prevent more than 1024 or so being used at a time by
-+ * default.
-+ * - Inability to allocate high fds will aggravate the X Server's select()
-+ * handling, and likely that of many GL client applications as well.
-+ *
-+ * This led to a plan of using our own integer IDs (called handles, following
-+ * DRM terminology) to mimic fds, and implement the fd syscalls we need as
-+ * ioctls. The objects themselves will still include the struct file so
-+ * that we can transition to fds if the required kernel infrastructure shows
-+ * up at a later date, and as our interface with shmfs for memory allocation.
-+ */
-+
-+/**
-+ * Initialize the GEM device fields
-+ */
-+
-+int
-+drm_gem_init(struct drm_device *dev)
-+{
-+ spin_lock_init(&dev->object_name_lock);
-+ idr_init(&dev->object_name_idr);
-+ atomic_set(&dev->object_count, 0);
-+ atomic_set(&dev->object_memory, 0);
-+ atomic_set(&dev->pin_count, 0);
-+ atomic_set(&dev->pin_memory, 0);
-+ atomic_set(&dev->gtt_count, 0);
-+ atomic_set(&dev->gtt_memory, 0);
-+ return 0;
-+}
-+
-+/**
-+ * Allocate a GEM object of the specified size with shmfs backing store
-+ */
-+struct drm_gem_object *
-+drm_gem_object_alloc(struct drm_device *dev, size_t size)
-+{
-+ struct drm_gem_object *obj;
-+
-+ BUG_ON((size & (PAGE_SIZE - 1)) != 0);
-+
-+ obj = kcalloc(1, sizeof(*obj), GFP_KERNEL);
-+
-+ obj->dev = dev;
-+ obj->filp = shmem_file_setup("drm mm object", size, 0);
-+ if (IS_ERR(obj->filp)) {
-+ kfree(obj);
-+ return NULL;
-+ }
-+
-+ kref_init(&obj->refcount);
-+ kref_init(&obj->handlecount);
-+ obj->size = size;
-+ if (dev->driver->gem_init_object != NULL &&
-+ dev->driver->gem_init_object(obj) != 0) {
-+ fput(obj->filp);
-+ kfree(obj);
-+ return NULL;
-+ }
-+ atomic_inc(&dev->object_count);
-+ atomic_add(obj->size, &dev->object_memory);
-+ return obj;
-+}
-+EXPORT_SYMBOL(drm_gem_object_alloc);
-+
-+/**
-+ * Removes the mapping from handle to filp for this object.
-+ */
-+static int
-+drm_gem_handle_delete(struct drm_file *filp, int handle)
-+{
-+ struct drm_device *dev;
-+ struct drm_gem_object *obj;
-+
-+ /* This is gross. The idr system doesn't let us try a delete and
-+ * return an error code. It just spews if you fail at deleting.
-+ * So, we have to grab a lock around finding the object and then
-+ * doing the delete on it and dropping the refcount, or the user
-+ * could race us to double-decrement the refcount and cause a
-+ * use-after-free later. Given the frequency of our handle lookups,
-+ * we may want to use ida for number allocation and a hash table
-+ * for the pointers, anyway.
-+ */
-+ spin_lock(&filp->table_lock);
-+
-+ /* Check if we currently have a reference on the object */
-+ obj = idr_find(&filp->object_idr, handle);
-+ if (obj == NULL) {
-+ spin_unlock(&filp->table_lock);
-+ return -EINVAL;
-+ }
-+ dev = obj->dev;
-+
-+ /* Release reference and decrement refcount. */
-+ idr_remove(&filp->object_idr, handle);
-+ spin_unlock(&filp->table_lock);
-+
-+ mutex_lock(&dev->struct_mutex);
-+ drm_gem_object_handle_unreference(obj);
-+ mutex_unlock(&dev->struct_mutex);
-+
-+ return 0;
-+}
-+
-+/**
-+ * Create a handle for this object. This adds a handle reference
-+ * to the object, which includes a regular reference count. Callers
-+ * will likely want to dereference the object afterwards.
-+ */
-+int
-+drm_gem_handle_create(struct drm_file *file_priv,
-+ struct drm_gem_object *obj,
-+ int *handlep)
-+{
-+ int ret;
-+
-+ /*
-+ * Get the user-visible handle using idr.
-+ */
-+again:
-+ /* ensure there is space available to allocate a handle */
-+ if (idr_pre_get(&file_priv->object_idr, GFP_KERNEL) == 0)
-+ return -ENOMEM;
-+
-+ /* do the allocation under our spinlock */
-+ spin_lock(&file_priv->table_lock);
-+ ret = idr_get_new_above(&file_priv->object_idr, obj, 1, handlep);
-+ spin_unlock(&file_priv->table_lock);
-+ if (ret == -EAGAIN)
-+ goto again;
-+
-+ if (ret != 0)
-+ return ret;
-+
-+ drm_gem_object_handle_reference(obj);
-+ return 0;
-+}
-+EXPORT_SYMBOL(drm_gem_handle_create);
-+
-+/** Returns a reference to the object named by the handle. */
-+struct drm_gem_object *
-+drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
-+ int handle)
-+{
-+ struct drm_gem_object *obj;
-+
-+ spin_lock(&filp->table_lock);
-+
-+ /* Check if we currently have a reference on the object */
-+ obj = idr_find(&filp->object_idr, handle);
-+ if (obj == NULL) {
-+ spin_unlock(&filp->table_lock);
-+ return NULL;
-+ }
-+
-+ drm_gem_object_reference(obj);
-+
-+ spin_unlock(&filp->table_lock);
-+
-+ return obj;
-+}
-+EXPORT_SYMBOL(drm_gem_object_lookup);
-+
-+/**
-+ * Releases the handle to an mm object.
-+ */
-+int
-+drm_gem_close_ioctl(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv)
-+{
-+ struct drm_gem_close *args = data;
-+ int ret;
-+
-+ if (!(dev->driver->driver_features & DRIVER_GEM))
-+ return -ENODEV;
-+
-+ ret = drm_gem_handle_delete(file_priv, args->handle);
-+
-+ return ret;
-+}
-+
-+/**
-+ * Create a global name for an object, returning the name.
-+ *
-+ * Note that the name does not hold a reference; when the object
-+ * is freed, the name goes away.
-+ */
-+int
-+drm_gem_flink_ioctl(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv)
-+{
-+ struct drm_gem_flink *args = data;
-+ struct drm_gem_object *obj;
-+ int ret;
-+
-+ if (!(dev->driver->driver_features & DRIVER_GEM))
-+ return -ENODEV;
-+
-+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
-+ if (obj == NULL)
-+ return -EINVAL;
-+
-+again:
-+ if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0)
-+ return -ENOMEM;
-+
-+ spin_lock(&dev->object_name_lock);
-+ if (obj->name) {
-+ spin_unlock(&dev->object_name_lock);
-+ return -EEXIST;
-+ }
-+ ret = idr_get_new_above(&dev->object_name_idr, obj, 1,
-+ &obj->name);
-+ spin_unlock(&dev->object_name_lock);
-+ if (ret == -EAGAIN)
-+ goto again;
-+
-+ if (ret != 0) {
-+ mutex_lock(&dev->struct_mutex);
-+ drm_gem_object_unreference(obj);
-+ mutex_unlock(&dev->struct_mutex);
-+ return ret;
-+ }
-+
-+ /*
-+ * Leave the reference from the lookup around as the
-+ * name table now holds one
-+ */
-+ args->name = (uint64_t) obj->name;
-+
-+ return 0;
-+}
-+
-+/**
-+ * Open an object using the global name, returning a handle and the size.
-+ *
-+ * This handle (of course) holds a reference to the object, so the object
-+ * will not go away until the handle is deleted.
-+ */
-+int
-+drm_gem_open_ioctl(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv)
-+{
-+ struct drm_gem_open *args = data;
-+ struct drm_gem_object *obj;
-+ int ret;
-+ int handle;
-+
-+ if (!(dev->driver->driver_features & DRIVER_GEM))
-+ return -ENODEV;
-+
-+ spin_lock(&dev->object_name_lock);
-+ obj = idr_find(&dev->object_name_idr, (int) args->name);
-+ if (obj)
-+ drm_gem_object_reference(obj);
-+ spin_unlock(&dev->object_name_lock);
-+ if (!obj)
-+ return -ENOENT;
-+
-+ ret = drm_gem_handle_create(file_priv, obj, &handle);
-+ mutex_lock(&dev->struct_mutex);
-+ drm_gem_object_unreference(obj);
-+ mutex_unlock(&dev->struct_mutex);
-+ if (ret)
-+ return ret;
-+
-+ args->handle = handle;
-+ args->size = obj->size;
-+
-+ return 0;
-+}
-+
-+/**
-+ * Called at device open time, sets up the structure for handling refcounting
-+ * of mm objects.
-+ */
-+void
-+drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
-+{
-+ idr_init(&file_private->object_idr);
-+ spin_lock_init(&file_private->table_lock);
-+}
-+
-+/**
-+ * Called at device close to release the file's
-+ * handle references on objects.
-+ */
-+static int
-+drm_gem_object_release_handle(int id, void *ptr, void *data)
-+{
-+ struct drm_gem_object *obj = ptr;
-+
-+ drm_gem_object_handle_unreference(obj);
-+
-+ return 0;
-+}
-+
-+/**
-+ * Called at close time when the filp is going away.
-+ *
-+ * Releases any remaining references on objects by this filp.
-+ */
-+void
-+drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
-+{
-+ mutex_lock(&dev->struct_mutex);
-+ idr_for_each(&file_private->object_idr,
-+ &drm_gem_object_release_handle, NULL);
-+
-+ idr_destroy(&file_private->object_idr);
-+ mutex_unlock(&dev->struct_mutex);
-+}
-+
-+/**
-+ * Called after the last reference to the object has been lost.
-+ *
-+ * Frees the object
-+ */
-+void
-+drm_gem_object_free(struct kref *kref)
-+{
-+ struct drm_gem_object *obj = (struct drm_gem_object *) kref;
-+ struct drm_device *dev = obj->dev;
-+
-+ BUG_ON(!mutex_is_locked(&dev->struct_mutex));
-+
-+ if (dev->driver->gem_free_object != NULL)
-+ dev->driver->gem_free_object(obj);
-+
-+ fput(obj->filp);
-+ atomic_dec(&dev->object_count);
-+ atomic_sub(obj->size, &dev->object_memory);
-+ kfree(obj);
-+}
-+EXPORT_SYMBOL(drm_gem_object_free);
-+
-+/**
-+ * Called after the last handle to the object has been closed
-+ *
-+ * Removes any name for the object. Note that this must be
-+ * called before drm_gem_object_free or we'll be touching
-+ * freed memory
-+ */
-+void
-+drm_gem_object_handle_free(struct kref *kref)
-+{
-+ struct drm_gem_object *obj = container_of(kref,
-+ struct drm_gem_object,
-+ handlecount);
-+ struct drm_device *dev = obj->dev;
-+
-+ /* Remove any name for this object */
-+ spin_lock(&dev->object_name_lock);
-+ if (obj->name) {
-+ idr_remove(&dev->object_name_idr, obj->name);
-+ spin_unlock(&dev->object_name_lock);
-+ /*
-+ * The object name held a reference to this object, drop
-+ * that now.
-+ */
-+ drm_gem_object_unreference(obj);
-+ } else
-+ spin_unlock(&dev->object_name_lock);
-+
-+}
-+EXPORT_SYMBOL(drm_gem_object_handle_free);
-+
-diff --git a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c
-index 0177012..803bc9e 100644
---- a/drivers/gpu/drm/drm_memory.c
-+++ b/drivers/gpu/drm/drm_memory.c
-@@ -133,6 +133,7 @@ int drm_free_agp(DRM_AGP_MEM * handle, int pages)
- {
- return drm_agp_free_memory(handle) ? 0 : -EINVAL;
- }
-+EXPORT_SYMBOL(drm_free_agp);
-
- /** Wrapper around agp_bind_memory() */
- int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start)
-@@ -145,6 +146,7 @@ int drm_unbind_agp(DRM_AGP_MEM * handle)
- {
- return drm_agp_unbind_memory(handle);
- }
-+EXPORT_SYMBOL(drm_unbind_agp);
-
- #else /* __OS_HAS_AGP */
- static inline void *agp_remap(unsigned long offset, unsigned long size,
-diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
-index dcff9e9..217ad7d 100644
---- a/drivers/gpu/drm/drm_mm.c
-+++ b/drivers/gpu/drm/drm_mm.c
-@@ -169,6 +169,7 @@ struct drm_mm_node *drm_mm_get_block(struct drm_mm_node * parent,
-
- return child;
- }
-+EXPORT_SYMBOL(drm_mm_get_block);
-
- /*
- * Put a block. Merge with the previous and / or next block if they are free.
-@@ -217,6 +218,7 @@ void drm_mm_put_block(struct drm_mm_node * cur)
- drm_free(cur, sizeof(*cur), DRM_MEM_MM);
- }
- }
-+EXPORT_SYMBOL(drm_mm_put_block);
-
- struct drm_mm_node *drm_mm_search_free(const struct drm_mm * mm,
- unsigned long size,
-@@ -265,6 +267,7 @@ int drm_mm_clean(struct drm_mm * mm)
-
- return (head->next->next == head);
- }
-+EXPORT_SYMBOL(drm_mm_search_free);
-
- int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
- {
-@@ -273,7 +276,7 @@ int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
-
- return drm_mm_create_tail_node(mm, start, size);
- }
--
-+EXPORT_SYMBOL(drm_mm_init);
-
- void drm_mm_takedown(struct drm_mm * mm)
- {
-diff --git a/drivers/gpu/drm/drm_proc.c b/drivers/gpu/drm/drm_proc.c
-index 93b1e04..d490db4 100644
---- a/drivers/gpu/drm/drm_proc.c
-+++ b/drivers/gpu/drm/drm_proc.c
-@@ -49,6 +49,10 @@ static int drm_queues_info(char *buf, char **start, off_t offset,
- int request, int *eof, void *data);
- static int drm_bufs_info(char *buf, char **start, off_t offset,
- int request, int *eof, void *data);
-+static int drm_gem_name_info(char *buf, char **start, off_t offset,
-+ int request, int *eof, void *data);
-+static int drm_gem_object_info(char *buf, char **start, off_t offset,
-+ int request, int *eof, void *data);
- #if DRM_DEBUG_CODE
- static int drm_vma_info(char *buf, char **start, off_t offset,
- int request, int *eof, void *data);
-@@ -60,13 +64,16 @@ static int drm_vma_info(char *buf, char **start, off_t offset,
- static struct drm_proc_list {
- const char *name; /**< file name */
- int (*f) (char *, char **, off_t, int, int *, void *); /**< proc callback*/
-+ u32 driver_features; /**< Required driver features for this entry */
- } drm_proc_list[] = {
-- {"name", drm_name_info},
-- {"mem", drm_mem_info},
-- {"vm", drm_vm_info},
-- {"clients", drm_clients_info},
-- {"queues", drm_queues_info},
-- {"bufs", drm_bufs_info},
-+ {"name", drm_name_info, 0},
-+ {"mem", drm_mem_info, 0},
-+ {"vm", drm_vm_info, 0},
-+ {"clients", drm_clients_info, 0},
-+ {"queues", drm_queues_info, 0},
-+ {"bufs", drm_bufs_info, 0},
-+ {"gem_names", drm_gem_name_info, DRIVER_GEM},
-+ {"gem_objects", drm_gem_object_info, DRIVER_GEM},
- #if DRM_DEBUG_CODE
- {"vma", drm_vma_info},
- #endif
-@@ -90,8 +97,9 @@ static struct drm_proc_list {
- int drm_proc_init(struct drm_minor *minor, int minor_id,
- struct proc_dir_entry *root)
- {
-+ struct drm_device *dev = minor->dev;
- struct proc_dir_entry *ent;
-- int i, j;
-+ int i, j, ret;
- char name[64];
-
- sprintf(name, "%d", minor_id);
-@@ -102,23 +110,42 @@ int drm_proc_init(struct drm_minor *minor, int minor_id,
- }
-
- for (i = 0; i < DRM_PROC_ENTRIES; i++) {
-+ u32 features = drm_proc_list[i].driver_features;
-+
-+ if (features != 0 &&
-+ (dev->driver->driver_features & features) != features)
-+ continue;
-+
- ent = create_proc_entry(drm_proc_list[i].name,
- S_IFREG | S_IRUGO, minor->dev_root);
- if (!ent) {
- DRM_ERROR("Cannot create /proc/dri/%s/%s\n",
- name, drm_proc_list[i].name);
-- for (j = 0; j < i; j++)
-- remove_proc_entry(drm_proc_list[i].name,
-- minor->dev_root);
-- remove_proc_entry(name, root);
-- minor->dev_root = NULL;
-- return -1;
-+ ret = -1;
-+ goto fail;
- }
- ent->read_proc = drm_proc_list[i].f;
- ent->data = minor;
- }
-
-+ if (dev->driver->proc_init) {
-+ ret = dev->driver->proc_init(minor);
-+ if (ret) {
-+ DRM_ERROR("DRM: Driver failed to initialize "
-+ "/proc/dri.\n");
-+ goto fail;
-+ }
-+ }
-+
- return 0;
-+ fail:
-+
-+ for (j = 0; j < i; j++)
-+ remove_proc_entry(drm_proc_list[i].name,
-+ minor->dev_root);
-+ remove_proc_entry(name, root);
-+ minor->dev_root = NULL;
-+ return ret;
- }
-
- /**
-@@ -133,12 +160,16 @@ int drm_proc_init(struct drm_minor *minor, int minor_id,
- */
- int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root)
- {
-+ struct drm_device *dev = minor->dev;
- int i;
- char name[64];
-
- if (!root || !minor->dev_root)
- return 0;
-
-+ if (dev->driver->proc_cleanup)
-+ dev->driver->proc_cleanup(minor);
-+
- for (i = 0; i < DRM_PROC_ENTRIES; i++)
- remove_proc_entry(drm_proc_list[i].name, minor->dev_root);
- sprintf(name, "%d", minor->index);
-@@ -480,6 +511,84 @@ static int drm_clients_info(char *buf, char **start, off_t offset,
- return ret;
- }
-
-+struct drm_gem_name_info_data {
-+ int len;
-+ char *buf;
-+ int eof;
-+};
-+
-+static int drm_gem_one_name_info(int id, void *ptr, void *data)
-+{
-+ struct drm_gem_object *obj = ptr;
-+ struct drm_gem_name_info_data *nid = data;
-+
-+ DRM_INFO("name %d size %d\n", obj->name, obj->size);
-+ if (nid->eof)
-+ return 0;
-+
-+ nid->len += sprintf(&nid->buf[nid->len],
-+ "%6d%9d%8d%9d\n",
-+ obj->name, obj->size,
-+ atomic_read(&obj->handlecount.refcount),
-+ atomic_read(&obj->refcount.refcount));
-+ if (nid->len > DRM_PROC_LIMIT) {
-+ nid->eof = 1;
-+ return 0;
-+ }
-+ return 0;
-+}
-+
-+static int drm_gem_name_info(char *buf, char **start, off_t offset,
-+ int request, int *eof, void *data)
-+{
-+ struct drm_minor *minor = (struct drm_minor *) data;
-+ struct drm_device *dev = minor->dev;
-+ struct drm_gem_name_info_data nid;
-+
-+ if (offset > DRM_PROC_LIMIT) {
-+ *eof = 1;
-+ return 0;
-+ }
-+
-+ nid.len = sprintf(buf, " name size handles refcount\n");
-+ nid.buf = buf;
-+ nid.eof = 0;
-+ idr_for_each(&dev->object_name_idr, drm_gem_one_name_info, &nid);
-+
-+ *start = &buf[offset];
-+ *eof = 0;
-+ if (nid.len > request + offset)
-+ return request;
-+ *eof = 1;
-+ return nid.len - offset;
-+}
-+
-+static int drm_gem_object_info(char *buf, char **start, off_t offset,
-+ int request, int *eof, void *data)
-+{
-+ struct drm_minor *minor = (struct drm_minor *) data;
-+ struct drm_device *dev = minor->dev;
-+ int len = 0;
-+
-+ if (offset > DRM_PROC_LIMIT) {
-+ *eof = 1;
-+ return 0;
-+ }
-+
-+ *start = &buf[offset];
-+ *eof = 0;
-+ DRM_PROC_PRINT("%d objects\n", atomic_read(&dev->object_count));
-+ DRM_PROC_PRINT("%d object bytes\n", atomic_read(&dev->object_memory));
-+ DRM_PROC_PRINT("%d pinned\n", atomic_read(&dev->pin_count));
-+ DRM_PROC_PRINT("%d pin bytes\n", atomic_read(&dev->pin_memory));
-+ DRM_PROC_PRINT("%d gtt bytes\n", atomic_read(&dev->gtt_memory));
-+ DRM_PROC_PRINT("%d gtt total\n", dev->gtt_total);
-+ if (len > request + offset)
-+ return request;
-+ *eof = 1;
-+ return len - offset;
-+}
-+
- #if DRM_DEBUG_CODE
-
- static int drm__vma_info(char *buf, char **start, off_t offset, int request,
-diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
-index c2f584f..82f4657 100644
---- a/drivers/gpu/drm/drm_stub.c
-+++ b/drivers/gpu/drm/drm_stub.c
-@@ -152,6 +152,15 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
- goto error_out_unreg;
- }
-
-+ if (driver->driver_features & DRIVER_GEM) {
-+ retcode = drm_gem_init(dev);
-+ if (retcode) {
-+ DRM_ERROR("Cannot initialize graphics execution "
-+ "manager (GEM)\n");
-+ goto error_out_unreg;
-+ }
-+ }
-+
- return 0;
-
- error_out_unreg:
-@@ -317,6 +326,7 @@ int drm_put_dev(struct drm_device * dev)
- int drm_put_minor(struct drm_minor **minor_p)
- {
- struct drm_minor *minor = *minor_p;
-+
- DRM_DEBUG("release secondary minor %d\n", minor->index);
-
- if (minor->type == DRM_MINOR_LEGACY)
-diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
-index c4bbda6..5ba78e4 100644
---- a/drivers/gpu/drm/i915/Makefile
-+++ b/drivers/gpu/drm/i915/Makefile
-@@ -4,7 +4,11 @@
-
- ccflags-y := -Iinclude/drm
- i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_opregion.o \
-- i915_suspend.o
-+ i915_suspend.o \
-+ i915_gem.o \
-+ i915_gem_debug.o \
-+ i915_gem_proc.o \
-+ i915_gem_tiling.o
-
- i915-$(CONFIG_COMPAT) += i915_ioc32.o
-
-diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
-index 8609ec2..3b5aa74 100644
---- a/drivers/gpu/drm/i915/i915_dma.c
-+++ b/drivers/gpu/drm/i915/i915_dma.c
-@@ -170,24 +170,31 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
- dev_priv->sarea_priv = (drm_i915_sarea_t *)
- ((u8 *) dev_priv->sarea->handle + init->sarea_priv_offset);
-
-- dev_priv->ring.Start = init->ring_start;
-- dev_priv->ring.End = init->ring_end;
-- dev_priv->ring.Size = init->ring_size;
-- dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
-+ if (init->ring_size != 0) {
-+ if (dev_priv->ring.ring_obj != NULL) {
-+ i915_dma_cleanup(dev);
-+ DRM_ERROR("Client tried to initialize ringbuffer in "
-+ "GEM mode\n");
-+ return -EINVAL;
-+ }
-
-- dev_priv->ring.map.offset = init->ring_start;
-- dev_priv->ring.map.size = init->ring_size;
-- dev_priv->ring.map.type = 0;
-- dev_priv->ring.map.flags = 0;
-- dev_priv->ring.map.mtrr = 0;
-+ dev_priv->ring.Size = init->ring_size;
-+ dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
-
-- drm_core_ioremap(&dev_priv->ring.map, dev);
-+ dev_priv->ring.map.offset = init->ring_start;
-+ dev_priv->ring.map.size = init->ring_size;
-+ dev_priv->ring.map.type = 0;
-+ dev_priv->ring.map.flags = 0;
-+ dev_priv->ring.map.mtrr = 0;
-
-- if (dev_priv->ring.map.handle == NULL) {
-- i915_dma_cleanup(dev);
-- DRM_ERROR("can not ioremap virtual address for"
-- " ring buffer\n");
-- return -ENOMEM;
-+ drm_core_ioremap(&dev_priv->ring.map, dev);
-+
-+ if (dev_priv->ring.map.handle == NULL) {
-+ i915_dma_cleanup(dev);
-+ DRM_ERROR("can not ioremap virtual address for"
-+ " ring buffer\n");
-+ return -ENOMEM;
-+ }
- }
-
- dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
-@@ -377,9 +384,10 @@ static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, int dwor
- return 0;
- }
-
--static int i915_emit_box(struct drm_device * dev,
-- struct drm_clip_rect __user * boxes,
-- int i, int DR1, int DR4)
-+int
-+i915_emit_box(struct drm_device *dev,
-+ struct drm_clip_rect __user *boxes,
-+ int i, int DR1, int DR4)
- {
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_clip_rect box;
-@@ -681,6 +689,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
- case I915_PARAM_LAST_DISPATCH:
- value = READ_BREADCRUMB(dev_priv);
- break;
-+ case I915_PARAM_HAS_GEM:
-+ value = 1;
-+ break;
- default:
- DRM_ERROR("Unknown parameter %d\n", param->param);
- return -EINVAL;
-@@ -784,6 +795,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
- memset(dev_priv, 0, sizeof(drm_i915_private_t));
-
- dev->dev_private = (void *)dev_priv;
-+ dev_priv->dev = dev;
-
- /* Add register map (needed for suspend/resume) */
- base = drm_get_resource_start(dev, mmio_bar);
-@@ -793,6 +805,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
- _DRM_KERNEL | _DRM_DRIVER,
- &dev_priv->mmio_map);
-
-+ i915_gem_load(dev);
-+
- /* Init HWS */
- if (!I915_NEED_GFX_HWS(dev)) {
- ret = i915_init_phys_hws(dev);
-@@ -838,6 +852,25 @@ int i915_driver_unload(struct drm_device *dev)
- return 0;
- }
-
-+int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv)
-+{
-+ struct drm_i915_file_private *i915_file_priv;
-+
-+ DRM_DEBUG("\n");
-+ i915_file_priv = (struct drm_i915_file_private *)
-+ drm_alloc(sizeof(*i915_file_priv), DRM_MEM_FILES);
-+
-+ if (!i915_file_priv)
-+ return -ENOMEM;
-+
-+ file_priv->driver_priv = i915_file_priv;
-+
-+ i915_file_priv->mm.last_gem_seqno = 0;
-+ i915_file_priv->mm.last_gem_throttle_seqno = 0;
-+
-+ return 0;
-+}
-+
- void i915_driver_lastclose(struct drm_device * dev)
- {
- drm_i915_private_t *dev_priv = dev->dev_private;
-@@ -845,6 +878,8 @@ void i915_driver_lastclose(struct drm_device * dev)
- if (!dev_priv)
- return;
-
-+ i915_gem_lastclose(dev);
-+
- if (dev_priv->agp_heap)
- i915_mem_takedown(&(dev_priv->agp_heap));
-
-@@ -857,6 +892,13 @@ void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
- i915_mem_release(dev, file_priv, dev_priv->agp_heap);
- }
-
-+void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv)
-+{
-+ struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
-+
-+ drm_free(i915_file_priv, sizeof(*i915_file_priv), DRM_MEM_FILES);
-+}
-+
- struct drm_ioctl_desc i915_ioctls[] = {
- DRM_IOCTL_DEF(DRM_I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
-@@ -875,6 +917,22 @@ struct drm_ioctl_desc i915_ioctls[] = {
- DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH ),
- DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH),
-+ DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH),
-+ DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH),
-+ DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
-+ DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
-+ DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH),
-+ DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH),
-+ DRM_IOCTL_DEF(DRM_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH),
-+ DRM_IOCTL_DEF(DRM_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH),
-+ DRM_IOCTL_DEF(DRM_I915_GEM_CREATE, i915_gem_create_ioctl, 0),
-+ DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, 0),
-+ DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, 0),
-+ DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, 0),
-+ DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, 0),
-+ DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, 0),
-+ DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, 0),
-+ DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, 0),
- };
-
- int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
-diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
-index 37af03f..a80ead2 100644
---- a/drivers/gpu/drm/i915/i915_drv.c
-+++ b/drivers/gpu/drm/i915/i915_drv.c
-@@ -85,12 +85,15 @@ static struct drm_driver driver = {
- /* don't use mtrr's here, the Xserver or user space app should
- * deal with them for intel hardware.
- */
-- .driver_features = DRIVER_USE_AGP | DRIVER_REQUIRE_AGP |
-- DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
-+ .driver_features =
-+ DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/
-+ DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM,
- .load = i915_driver_load,
- .unload = i915_driver_unload,
-+ .open = i915_driver_open,
- .lastclose = i915_driver_lastclose,
- .preclose = i915_driver_preclose,
-+ .postclose = i915_driver_postclose,
- .suspend = i915_suspend,
- .resume = i915_resume,
- .device_is_agp = i915_driver_device_is_agp,
-@@ -104,6 +107,10 @@ static struct drm_driver driver = {
- .reclaim_buffers = drm_core_reclaim_buffers,
- .get_map_ofs = drm_core_get_map_ofs,
- .get_reg_ofs = drm_core_get_reg_ofs,
-+ .proc_init = i915_gem_proc_init,
-+ .proc_cleanup = i915_gem_proc_cleanup,
-+ .gem_init_object = i915_gem_init_object,
-+ .gem_free_object = i915_gem_free_object,
- .ioctls = i915_ioctls,
- .fops = {
- .owner = THIS_MODULE,
-diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
-index d1a02be..87b071a 100644
---- a/drivers/gpu/drm/i915/i915_drv.h
-+++ b/drivers/gpu/drm/i915/i915_drv.h
-@@ -39,7 +39,7 @@
-
- #define DRIVER_NAME "i915"
- #define DRIVER_DESC "Intel Graphics"
--#define DRIVER_DATE "20060119"
-+#define DRIVER_DATE "20080730"
-
- enum pipe {
- PIPE_A = 0,
-@@ -60,16 +60,23 @@ enum pipe {
- #define DRIVER_MINOR 6
- #define DRIVER_PATCHLEVEL 0
-
-+#define WATCH_COHERENCY 0
-+#define WATCH_BUF 0
-+#define WATCH_EXEC 0
-+#define WATCH_LRU 0
-+#define WATCH_RELOC 0
-+#define WATCH_INACTIVE 0
-+#define WATCH_PWRITE 0
-+
- typedef struct _drm_i915_ring_buffer {
- int tail_mask;
-- unsigned long Start;
-- unsigned long End;
- unsigned long Size;
- u8 *virtual_start;
- int head;
- int tail;
- int space;
- drm_local_map_t map;
-+ struct drm_gem_object *ring_obj;
- } drm_i915_ring_buffer_t;
-
- struct mem_block {
-@@ -101,6 +108,8 @@ struct intel_opregion {
- };
-
- typedef struct drm_i915_private {
-+ struct drm_device *dev;
-+
- drm_local_map_t *sarea;
- drm_local_map_t *mmio_map;
-
-@@ -113,6 +122,7 @@ typedef struct drm_i915_private {
- uint32_t counter;
- unsigned int status_gfx_addr;
- drm_local_map_t hws_map;
-+ struct drm_gem_object *hws_obj;
-
- unsigned int cpp;
- int back_offset;
-@@ -122,7 +132,6 @@ typedef struct drm_i915_private {
-
- wait_queue_head_t irq_queue;
- atomic_t irq_received;
-- atomic_t irq_emitted;
- /** Protects user_irq_refcount and irq_mask_reg */
- spinlock_t user_irq_lock;
- /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */
-@@ -230,8 +239,174 @@ typedef struct drm_i915_private {
- u8 saveDACMASK;
- u8 saveDACDATA[256*3]; /* 256 3-byte colors */
- u8 saveCR[37];
-+
-+ struct {
-+ struct drm_mm gtt_space;
-+
-+ /**
-+ * List of objects currently involved in rendering from the
-+ * ringbuffer.
-+ *
-+ * A reference is held on the buffer while on this list.
-+ */
-+ struct list_head active_list;
-+
-+ /**
-+ * List of objects which are not in the ringbuffer but which
-+ * still have a write_domain which needs to be flushed before
-+ * unbinding.
-+ *
-+ * A reference is held on the buffer while on this list.
-+ */
-+ struct list_head flushing_list;
-+
-+ /**
-+ * LRU list of objects which are not in the ringbuffer and
-+ * are ready to unbind, but are still in the GTT.
-+ *
-+ * A reference is not held on the buffer while on this list,
-+ * as merely being GTT-bound shouldn't prevent its being
-+ * freed, and we'll pull it off the list in the free path.
-+ */
-+ struct list_head inactive_list;
-+
-+ /**
-+ * List of breadcrumbs associated with GPU requests currently
-+ * outstanding.
-+ */
-+ struct list_head request_list;
-+
-+ /**
-+ * We leave the user IRQ off as much as possible,
-+ * but this means that requests will finish and never
-+ * be retired once the system goes idle. Set a timer to
-+ * fire periodically while the ring is running. When it
-+ * fires, go retire requests.
-+ */
-+ struct delayed_work retire_work;
-+
-+ uint32_t next_gem_seqno;
-+
-+ /**
-+ * Waiting sequence number, if any
-+ */
-+ uint32_t waiting_gem_seqno;
-+
-+ /**
-+ * Last seq seen at irq time
-+ */
-+ uint32_t irq_gem_seqno;
-+
-+ /**
-+ * Flag if the X Server, and thus DRM, is not currently in
-+ * control of the device.
-+ *
-+ * This is set between LeaveVT and EnterVT. It needs to be
-+ * replaced with a semaphore. It also needs to be
-+ * transitioned away from for kernel modesetting.
-+ */
-+ int suspended;
-+
-+ /**
-+ * Flag if the hardware appears to be wedged.
-+ *
-+ * This is set when attempts to idle the device timeout.
-+ * It prevents command submission from occuring and makes
-+ * every pending request fail
-+ */
-+ int wedged;
-+
-+ /** Bit 6 swizzling required for X tiling */
-+ uint32_t bit_6_swizzle_x;
-+ /** Bit 6 swizzling required for Y tiling */
-+ uint32_t bit_6_swizzle_y;
-+ } mm;
- } drm_i915_private_t;
-
-+/** driver private structure attached to each drm_gem_object */
-+struct drm_i915_gem_object {
-+ struct drm_gem_object *obj;
-+
-+ /** Current space allocated to this object in the GTT, if any. */
-+ struct drm_mm_node *gtt_space;
-+
-+ /** This object's place on the active/flushing/inactive lists */
-+ struct list_head list;
-+
-+ /**
-+ * This is set if the object is on the active or flushing lists
-+ * (has pending rendering), and is not set if it's on inactive (ready
-+ * to be unbound).
-+ */
-+ int active;
-+
-+ /**
-+ * This is set if the object has been written to since last bound
-+ * to the GTT
-+ */
-+ int dirty;
-+
-+ /** AGP memory structure for our GTT binding. */
-+ DRM_AGP_MEM *agp_mem;
-+
-+ struct page **page_list;
-+
-+ /**
-+ * Current offset of the object in GTT space.
-+ *
-+ * This is the same as gtt_space->start
-+ */
-+ uint32_t gtt_offset;
-+
-+ /** Boolean whether this object has a valid gtt offset. */
-+ int gtt_bound;
-+
-+ /** How many users have pinned this object in GTT space */
-+ int pin_count;
-+
-+ /** Breadcrumb of last rendering to the buffer. */
-+ uint32_t last_rendering_seqno;
-+
-+ /** Current tiling mode for the object. */
-+ uint32_t tiling_mode;
-+
-+ /**
-+ * Flagging of which individual pages are valid in GEM_DOMAIN_CPU when
-+ * GEM_DOMAIN_CPU is not in the object's read domain.
-+ */
-+ uint8_t *page_cpu_valid;
-+};
-+
-+/**
-+ * Request queue structure.
-+ *
-+ * The request queue allows us to note sequence numbers that have been emitted
-+ * and may be associated with active buffers to be retired.
-+ *
-+ * By keeping this list, we can avoid having to do questionable
-+ * sequence-number comparisons on buffer last_rendering_seqnos, and associate
-+ * an emission time with seqnos for tracking how far ahead of the GPU we are.
-+ */
-+struct drm_i915_gem_request {
-+ /** GEM sequence number associated with this request. */
-+ uint32_t seqno;
-+
-+ /** Time at which this request was emitted, in jiffies. */
-+ unsigned long emitted_jiffies;
-+
-+ /** Cache domains that were flushed at the start of the request. */
-+ uint32_t flush_domains;
-+
-+ struct list_head list;
-+};
-+
-+struct drm_i915_file_private {
-+ struct {
-+ uint32_t last_gem_seqno;
-+ uint32_t last_gem_throttle_seqno;
-+ } mm;
-+};
-+
- extern struct drm_ioctl_desc i915_ioctls[];
- extern int i915_max_ioctl;
-
-@@ -239,18 +414,26 @@ extern int i915_max_ioctl;
- extern void i915_kernel_lost_context(struct drm_device * dev);
- extern int i915_driver_load(struct drm_device *, unsigned long flags);
- extern int i915_driver_unload(struct drm_device *);
-+extern int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv);
- extern void i915_driver_lastclose(struct drm_device * dev);
- extern void i915_driver_preclose(struct drm_device *dev,
- struct drm_file *file_priv);
-+extern void i915_driver_postclose(struct drm_device *dev,
-+ struct drm_file *file_priv);
- extern int i915_driver_device_is_agp(struct drm_device * dev);
- extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
- unsigned long arg);
-+extern int i915_emit_box(struct drm_device *dev,
-+ struct drm_clip_rect __user *boxes,
-+ int i, int DR1, int DR4);
-
- /* i915_irq.c */
- extern int i915_irq_emit(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
- extern int i915_irq_wait(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-+void i915_user_irq_get(struct drm_device *dev);
-+void i915_user_irq_put(struct drm_device *dev);
-
- extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
- extern void i915_driver_irq_preinstall(struct drm_device * dev);
-@@ -279,6 +462,67 @@ extern int i915_mem_destroy_heap(struct drm_device *dev, void *data,
- extern void i915_mem_takedown(struct mem_block **heap);
- extern void i915_mem_release(struct drm_device * dev,
- struct drm_file *file_priv, struct mem_block *heap);
-+/* i915_gem.c */
-+int i915_gem_init_ioctl(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv);
-+int i915_gem_create_ioctl(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv);
-+int i915_gem_pread_ioctl(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv);
-+int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv);
-+int i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv);
-+int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv);
-+int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv);
-+int i915_gem_execbuffer(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv);
-+int i915_gem_pin_ioctl(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv);
-+int i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv);
-+int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv);
-+int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv);
-+int i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv);
-+int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv);
-+int i915_gem_set_tiling(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv);
-+int i915_gem_get_tiling(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv);
-+void i915_gem_load(struct drm_device *dev);
-+int i915_gem_proc_init(struct drm_minor *minor);
-+void i915_gem_proc_cleanup(struct drm_minor *minor);
-+int i915_gem_init_object(struct drm_gem_object *obj);
-+void i915_gem_free_object(struct drm_gem_object *obj);
-+int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment);
-+void i915_gem_object_unpin(struct drm_gem_object *obj);
-+void i915_gem_lastclose(struct drm_device *dev);
-+uint32_t i915_get_gem_seqno(struct drm_device *dev);
-+void i915_gem_retire_requests(struct drm_device *dev);
-+void i915_gem_retire_work_handler(struct work_struct *work);
-+void i915_gem_clflush_object(struct drm_gem_object *obj);
-+
-+/* i915_gem_tiling.c */
-+void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
-+
-+/* i915_gem_debug.c */
-+void i915_gem_dump_object(struct drm_gem_object *obj, int len,
-+ const char *where, uint32_t mark);
-+#if WATCH_INACTIVE
-+void i915_verify_inactive(struct drm_device *dev, char *file, int line);
-+#else
-+#define i915_verify_inactive(dev, file, line)
-+#endif
-+void i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle);
-+void i915_gem_dump_object(struct drm_gem_object *obj, int len,
-+ const char *where, uint32_t mark);
-+void i915_dump_lru(struct drm_device *dev, const char *where);
-
- /* i915_suspend.c */
- extern int i915_save_state(struct drm_device *dev);
-@@ -347,6 +591,7 @@ extern void opregion_enable_asle(struct drm_device *dev);
- */
- #define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hw_status_page))[reg])
- #define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, 5)
-+#define I915_GEM_HWS_INDEX 0x10
-
- extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
-
-diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
-new file mode 100644
-index 0000000..90ae8a0
---- /dev/null
-+++ b/drivers/gpu/drm/i915/i915_gem.c
-@@ -0,0 +1,2497 @@
-+/*
-+ * Copyright © 2008 Intel Corporation
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a
-+ * copy of this software and associated documentation files (the "Software"),
-+ * to deal in the Software without restriction, including without limitation
-+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
-+ * and/or sell copies of the Software, and to permit persons to whom the
-+ * Software is furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice (including the next
-+ * paragraph) shall be included in all copies or substantial portions of the
-+ * Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
-+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ *
-+ * Authors:
-+ * Eric Anholt <eric@anholt.net>
-+ *
-+ */
-+
-+#include "drmP.h"
-+#include "drm.h"
-+#include "i915_drm.h"
-+#include "i915_drv.h"
-+#include <linux/swap.h>
-+
-+static int
-+i915_gem_object_set_domain(struct drm_gem_object *obj,
-+ uint32_t read_domains,
-+ uint32_t write_domain);
-+static int
-+i915_gem_object_set_domain_range(struct drm_gem_object *obj,
-+ uint64_t offset,
-+ uint64_t size,
-+ uint32_t read_domains,
-+ uint32_t write_domain);
-+static int
-+i915_gem_set_domain(struct drm_gem_object *obj,
-+ struct drm_file *file_priv,
-+ uint32_t read_domains,
-+ uint32_t write_domain);
-+static int i915_gem_object_get_page_list(struct drm_gem_object *obj);
-+static void i915_gem_object_free_page_list(struct drm_gem_object *obj);
-+static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
-+
-+int
-+i915_gem_init_ioctl(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv)
-+{
-+ drm_i915_private_t *dev_priv = dev->dev_private;
-+ struct drm_i915_gem_init *args = data;
-+
-+ mutex_lock(&dev->struct_mutex);
-+
-+ if (args->gtt_start >= args->gtt_end ||
-+ (args->gtt_start & (PAGE_SIZE - 1)) != 0 ||
-+ (args->gtt_end & (PAGE_SIZE - 1)) != 0) {
-+ mutex_unlock(&dev->struct_mutex);
-+ return -EINVAL;
-+ }
-+
-+ drm_mm_init(&dev_priv->mm.gtt_space, args->gtt_start,
-+ args->gtt_end - args->gtt_start);
-+
-+ dev->gtt_total = (uint32_t) (args->gtt_end - args->gtt_start);
-+
-+ mutex_unlock(&dev->struct_mutex);
-+
-+ return 0;
-+}
-+
-+
-+/**
-+ * Creates a new mm object and returns a handle to it.
-+ */
-+int
-+i915_gem_create_ioctl(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv)
-+{
-+ struct drm_i915_gem_create *args = data;
-+ struct drm_gem_object *obj;
-+ int handle, ret;
-+
-+ args->size = roundup(args->size, PAGE_SIZE);
-+
-+ /* Allocate the new object */
-+ obj = drm_gem_object_alloc(dev, args->size);
-+ if (obj == NULL)
-+ return -ENOMEM;
-+
-+ ret = drm_gem_handle_create(file_priv, obj, &handle);
-+ mutex_lock(&dev->struct_mutex);
-+ drm_gem_object_handle_unreference(obj);
-+ mutex_unlock(&dev->struct_mutex);
-+
-+ if (ret)
-+ return ret;
-+
-+ args->handle = handle;
-+
-+ return 0;
-+}
-+
-+/**
-+ * Reads data from the object referenced by handle.
-+ *
-+ * On error, the contents of *data are undefined.
-+ */
-+int
-+i915_gem_pread_ioctl(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv)
-+{
-+ struct drm_i915_gem_pread *args = data;
-+ struct drm_gem_object *obj;
-+ struct drm_i915_gem_object *obj_priv;
-+ ssize_t read;
-+ loff_t offset;
-+ int ret;
-+
-+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
-+ if (obj == NULL)
-+ return -EBADF;
-+ obj_priv = obj->driver_private;
-+
-+ /* Bounds check source.
-+ *
-+ * XXX: This could use review for overflow issues...
-+ */
-+ if (args->offset > obj->size || args->size > obj->size ||
-+ args->offset + args->size > obj->size) {
-+ drm_gem_object_unreference(obj);
-+ return -EINVAL;
-+ }
-+
-+ mutex_lock(&dev->struct_mutex);
-+
-+ ret = i915_gem_object_set_domain_range(obj, args->offset, args->size,
-+ I915_GEM_DOMAIN_CPU, 0);
-+ if (ret != 0) {
-+ drm_gem_object_unreference(obj);
-+ mutex_unlock(&dev->struct_mutex);
-+ }
-+
-+ offset = args->offset;
-+
-+ read = vfs_read(obj->filp, (char __user *)(uintptr_t)args->data_ptr,
-+ args->size, &offset);
-+ if (read != args->size) {
-+ drm_gem_object_unreference(obj);
-+ mutex_unlock(&dev->struct_mutex);
-+ if (read < 0)
-+ return read;
-+ else
-+ return -EINVAL;
-+ }
-+
-+ drm_gem_object_unreference(obj);
-+ mutex_unlock(&dev->struct_mutex);
-+
-+ return 0;
-+}
-+
-+static int
-+i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
-+ struct drm_i915_gem_pwrite *args,
-+ struct drm_file *file_priv)
-+{
-+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
-+ ssize_t remain;
-+ loff_t offset;
-+ char __user *user_data;
-+ char *vaddr;
-+ int i, o, l;
-+ int ret = 0;
-+ unsigned long pfn;
-+ unsigned long unwritten;
-+
-+ user_data = (char __user *) (uintptr_t) args->data_ptr;
-+ remain = args->size;
-+ if (!access_ok(VERIFY_READ, user_data, remain))
-+ return -EFAULT;
-+
-+
-+ mutex_lock(&dev->struct_mutex);
-+ ret = i915_gem_object_pin(obj, 0);
-+ if (ret) {
-+ mutex_unlock(&dev->struct_mutex);
-+ return ret;
-+ }
-+ ret = i915_gem_set_domain(obj, file_priv,
-+ I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
-+ if (ret)
-+ goto fail;
-+
-+ obj_priv = obj->driver_private;
-+ offset = obj_priv->gtt_offset + args->offset;
-+ obj_priv->dirty = 1;
-+
-+ while (remain > 0) {
-+ /* Operation in this page
-+ *
-+ * i = page number
-+ * o = offset within page
-+ * l = bytes to copy
-+ */
-+ i = offset >> PAGE_SHIFT;
-+ o = offset & (PAGE_SIZE-1);
-+ l = remain;
-+ if ((o + l) > PAGE_SIZE)
-+ l = PAGE_SIZE - o;
-+
-+ pfn = (dev->agp->base >> PAGE_SHIFT) + i;
-+
-+#ifdef CONFIG_HIGHMEM
-+ /* kmap_atomic can't map IO pages on non-HIGHMEM kernels
-+ */
-+ vaddr = kmap_atomic_pfn(pfn, KM_USER0);
-+#if WATCH_PWRITE
-+ DRM_INFO("pwrite i %d o %d l %d pfn %ld vaddr %p\n",
-+ i, o, l, pfn, vaddr);
-+#endif
-+ unwritten = __copy_from_user_inatomic_nocache(vaddr + o,
-+ user_data, l);
-+ kunmap_atomic(vaddr, KM_USER0);
-+
-+ if (unwritten)
-+#endif /* CONFIG_HIGHMEM */
-+ {
-+ vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
-+#if WATCH_PWRITE
-+ DRM_INFO("pwrite slow i %d o %d l %d "
-+ "pfn %ld vaddr %p\n",
-+ i, o, l, pfn, vaddr);
-+#endif
-+ if (vaddr == NULL) {
-+ ret = -EFAULT;
-+ goto fail;
-+ }
-+ unwritten = __copy_from_user(vaddr + o, user_data, l);
-+#if WATCH_PWRITE
-+ DRM_INFO("unwritten %ld\n", unwritten);
-+#endif
-+ iounmap(vaddr);
-+ if (unwritten) {
-+ ret = -EFAULT;
-+ goto fail;
-+ }
-+ }
-+
-+ remain -= l;
-+ user_data += l;
-+ offset += l;
-+ }
-+#if WATCH_PWRITE && 1
-+ i915_gem_clflush_object(obj);
-+ i915_gem_dump_object(obj, args->offset + args->size, __func__, ~0);
-+ i915_gem_clflush_object(obj);
-+#endif
-+
-+fail:
-+ i915_gem_object_unpin(obj);
-+ mutex_unlock(&dev->struct_mutex);
-+
-+ return ret;
-+}
-+
-+int
-+i915_gem_shmem_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
-+ struct drm_i915_gem_pwrite *args,
-+ struct drm_file *file_priv)
-+{
-+ int ret;
-+ loff_t offset;
-+ ssize_t written;
-+
-+ mutex_lock(&dev->struct_mutex);
-+
-+ ret = i915_gem_set_domain(obj, file_priv,
-+ I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
-+ if (ret) {
-+ mutex_unlock(&dev->struct_mutex);
-+ return ret;
-+ }
-+
-+ offset = args->offset;
-+
-+ written = vfs_write(obj->filp,
-+ (char __user *)(uintptr_t) args->data_ptr,
-+ args->size, &offset);
-+ if (written != args->size) {
-+ mutex_unlock(&dev->struct_mutex);
-+ if (written < 0)
-+ return written;
-+ else
-+ return -EINVAL;
-+ }
-+
-+ mutex_unlock(&dev->struct_mutex);
-+
-+ return 0;
-+}
-+
-+/**
-+ * Writes data to the object referenced by handle.
-+ *
-+ * On error, the contents of the buffer that were to be modified are undefined.
-+ */
-+int
-+i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv)
-+{
-+ struct drm_i915_gem_pwrite *args = data;
-+ struct drm_gem_object *obj;
-+ struct drm_i915_gem_object *obj_priv;
-+ int ret = 0;
-+
-+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
-+ if (obj == NULL)
-+ return -EBADF;
-+ obj_priv = obj->driver_private;
-+
-+ /* Bounds check destination.
-+ *
-+ * XXX: This could use review for overflow issues...
-+ */
-+ if (args->offset > obj->size || args->size > obj->size ||
-+ args->offset + args->size > obj->size) {
-+ drm_gem_object_unreference(obj);
-+ return -EINVAL;
-+ }
-+
-+ /* We can only do the GTT pwrite on untiled buffers, as otherwise
-+ * it would end up going through the fenced access, and we'll get
-+ * different detiling behavior between reading and writing.
-+ * pread/pwrite currently are reading and writing from the CPU
-+ * perspective, requiring manual detiling by the client.
-+ */
-+ if (obj_priv->tiling_mode == I915_TILING_NONE &&
-+ dev->gtt_total != 0)
-+ ret = i915_gem_gtt_pwrite(dev, obj, args, file_priv);
-+ else
-+ ret = i915_gem_shmem_pwrite(dev, obj, args, file_priv);
-+
-+#if WATCH_PWRITE
-+ if (ret)
-+ DRM_INFO("pwrite failed %d\n", ret);
-+#endif
-+
-+ drm_gem_object_unreference(obj);
-+
-+ return ret;
-+}
-+
-+/**
-+ * Called when user space prepares to use an object
-+ */
-+int
-+i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv)
-+{
-+ struct drm_i915_gem_set_domain *args = data;
-+ struct drm_gem_object *obj;
-+ int ret;
-+
-+ if (!(dev->driver->driver_features & DRIVER_GEM))
-+ return -ENODEV;
-+
-+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
-+ if (obj == NULL)
-+ return -EBADF;
-+
-+ mutex_lock(&dev->struct_mutex);
-+#if WATCH_BUF
-+ DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n",
-+ obj, obj->size, args->read_domains, args->write_domain);
-+#endif
-+ ret = i915_gem_set_domain(obj, file_priv,
-+ args->read_domains, args->write_domain);
-+ drm_gem_object_unreference(obj);
-+ mutex_unlock(&dev->struct_mutex);
-+ return ret;
-+}
-+
-+/**
-+ * Called when user space has done writes to this buffer
-+ */
-+int
-+i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv)
-+{
-+ struct drm_i915_gem_sw_finish *args = data;
-+ struct drm_gem_object *obj;
-+ struct drm_i915_gem_object *obj_priv;
-+ int ret = 0;
-+
-+ if (!(dev->driver->driver_features & DRIVER_GEM))
-+ return -ENODEV;
-+
-+ mutex_lock(&dev->struct_mutex);
-+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
-+ if (obj == NULL) {
-+ mutex_unlock(&dev->struct_mutex);
-+ return -EBADF;
-+ }
-+
-+#if WATCH_BUF
-+ DRM_INFO("%s: sw_finish %d (%p %d)\n",
-+ __func__, args->handle, obj, obj->size);
-+#endif
-+ obj_priv = obj->driver_private;
-+
-+ /* Pinned buffers may be scanout, so flush the cache */
-+ if ((obj->write_domain & I915_GEM_DOMAIN_CPU) && obj_priv->pin_count) {
-+ i915_gem_clflush_object(obj);
-+ drm_agp_chipset_flush(dev);
-+ }
-+ drm_gem_object_unreference(obj);
-+ mutex_unlock(&dev->struct_mutex);
-+ return ret;
-+}
-+
-+/**
-+ * Maps the contents of an object, returning the address it is mapped
-+ * into.
-+ *
-+ * While the mapping holds a reference on the contents of the object, it doesn't
-+ * imply a ref on the object itself.
-+ */
-+int
-+i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv)
-+{
-+ struct drm_i915_gem_mmap *args = data;
-+ struct drm_gem_object *obj;
-+ loff_t offset;
-+ unsigned long addr;
-+
-+ if (!(dev->driver->driver_features & DRIVER_GEM))
-+ return -ENODEV;
-+
-+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
-+ if (obj == NULL)
-+ return -EBADF;
-+
-+ offset = args->offset;
-+
-+ down_write(&current->mm->mmap_sem);
-+ addr = do_mmap(obj->filp, 0, args->size,
-+ PROT_READ | PROT_WRITE, MAP_SHARED,
-+ args->offset);
-+ up_write(&current->mm->mmap_sem);
-+ mutex_lock(&dev->struct_mutex);
-+ drm_gem_object_unreference(obj);
-+ mutex_unlock(&dev->struct_mutex);
-+ if (IS_ERR((void *)addr))
-+ return addr;
-+
-+ args->addr_ptr = (uint64_t) addr;
-+
-+ return 0;
-+}
-+
-+static void
-+i915_gem_object_free_page_list(struct drm_gem_object *obj)
-+{
-+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
-+ int page_count = obj->size / PAGE_SIZE;
-+ int i;
-+
-+ if (obj_priv->page_list == NULL)
-+ return;
-+
-+
-+ for (i = 0; i < page_count; i++)
-+ if (obj_priv->page_list[i] != NULL) {
-+ if (obj_priv->dirty)
-+ set_page_dirty(obj_priv->page_list[i]);
-+ mark_page_accessed(obj_priv->page_list[i]);
-+ page_cache_release(obj_priv->page_list[i]);
-+ }
-+ obj_priv->dirty = 0;
-+
-+ drm_free(obj_priv->page_list,
-+ page_count * sizeof(struct page *),
-+ DRM_MEM_DRIVER);
-+ obj_priv->page_list = NULL;
-+}
-+
-+static void
-+i915_gem_object_move_to_active(struct drm_gem_object *obj)
-+{
-+ struct drm_device *dev = obj->dev;
-+ drm_i915_private_t *dev_priv = dev->dev_private;
-+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
-+
-+ /* Add a reference if we're newly entering the active list. */
-+ if (!obj_priv->active) {
-+ drm_gem_object_reference(obj);
-+ obj_priv->active = 1;
-+ }
-+ /* Move from whatever list we were on to the tail of execution. */
-+ list_move_tail(&obj_priv->list,
-+ &dev_priv->mm.active_list);
-+}
-+
-+
-+static void
-+i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
-+{
-+ struct drm_device *dev = obj->dev;
-+ drm_i915_private_t *dev_priv = dev->dev_private;
-+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
-+
-+ i915_verify_inactive(dev, __FILE__, __LINE__);
-+ if (obj_priv->pin_count != 0)
-+ list_del_init(&obj_priv->list);
-+ else
-+ list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
-+
-+ if (obj_priv->active) {
-+ obj_priv->active = 0;
-+ drm_gem_object_unreference(obj);
-+ }
-+ i915_verify_inactive(dev, __FILE__, __LINE__);
-+}
-+
-+/**
-+ * Creates a new sequence number, emitting a write of it to the status page
-+ * plus an interrupt, which will trigger i915_user_interrupt_handler.
-+ *
-+ * Must be called with struct_lock held.
-+ *
-+ * Returned sequence numbers are nonzero on success.
-+ */
-+static uint32_t
-+i915_add_request(struct drm_device *dev, uint32_t flush_domains)
-+{
-+ drm_i915_private_t *dev_priv = dev->dev_private;
-+ struct drm_i915_gem_request *request;
-+ uint32_t seqno;
-+ int was_empty;
-+ RING_LOCALS;
-+
-+ request = drm_calloc(1, sizeof(*request), DRM_MEM_DRIVER);
-+ if (request == NULL)
-+ return 0;
-+
-+ /* Grab the seqno we're going to make this request be, and bump the
-+ * next (skipping 0 so it can be the reserved no-seqno value).
-+ */
-+ seqno = dev_priv->mm.next_gem_seqno;
-+ dev_priv->mm.next_gem_seqno++;
-+ if (dev_priv->mm.next_gem_seqno == 0)
-+ dev_priv->mm.next_gem_seqno++;
-+
-+ BEGIN_LP_RING(4);
-+ OUT_RING(MI_STORE_DWORD_INDEX);
-+ OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-+ OUT_RING(seqno);
-+
-+ OUT_RING(MI_USER_INTERRUPT);
-+ ADVANCE_LP_RING();
-+
-+ DRM_DEBUG("%d\n", seqno);
-+
-+ request->seqno = seqno;
-+ request->emitted_jiffies = jiffies;
-+ request->flush_domains = flush_domains;
-+ was_empty = list_empty(&dev_priv->mm.request_list);
-+ list_add_tail(&request->list, &dev_priv->mm.request_list);
-+
-+ if (was_empty)
-+ schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
-+ return seqno;
-+}
-+
-+/**
-+ * Command execution barrier
-+ *
-+ * Ensures that all commands in the ring are finished
-+ * before signalling the CPU
-+ */
-+uint32_t
-+i915_retire_commands(struct drm_device *dev)
-+{
-+ drm_i915_private_t *dev_priv = dev->dev_private;
-+ uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
-+ uint32_t flush_domains = 0;
-+ RING_LOCALS;
-+
-+ /* The sampler always gets flushed on i965 (sigh) */
-+ if (IS_I965G(dev))
-+ flush_domains |= I915_GEM_DOMAIN_SAMPLER;
-+ BEGIN_LP_RING(2);
-+ OUT_RING(cmd);
-+ OUT_RING(0); /* noop */
-+ ADVANCE_LP_RING();
-+ return flush_domains;
-+}
-+
-+/**
-+ * Moves buffers associated only with the given active seqno from the active
-+ * to inactive list, potentially freeing them.
-+ */
-+static void
-+i915_gem_retire_request(struct drm_device *dev,
-+ struct drm_i915_gem_request *request)
-+{
-+ drm_i915_private_t *dev_priv = dev->dev_private;
-+
-+ /* Move any buffers on the active list that are no longer referenced
-+ * by the ringbuffer to the flushing/inactive lists as appropriate.
-+ */
-+ while (!list_empty(&dev_priv->mm.active_list)) {
-+ struct drm_gem_object *obj;
-+ struct drm_i915_gem_object *obj_priv;
-+
-+ obj_priv = list_first_entry(&dev_priv->mm.active_list,
-+ struct drm_i915_gem_object,
-+ list);
-+ obj = obj_priv->obj;
-+
-+ /* If the seqno being retired doesn't match the oldest in the
-+ * list, then the oldest in the list must still be newer than
-+ * this seqno.
-+ */
-+ if (obj_priv->last_rendering_seqno != request->seqno)
-+ return;
-+#if WATCH_LRU
-+ DRM_INFO("%s: retire %d moves to inactive list %p\n",
-+ __func__, request->seqno, obj);
-+#endif
-+
-+ if (obj->write_domain != 0) {
-+ list_move_tail(&obj_priv->list,
-+ &dev_priv->mm.flushing_list);
-+ } else {
-+ i915_gem_object_move_to_inactive(obj);
-+ }
-+ }
-+
-+ if (request->flush_domains != 0) {
-+ struct drm_i915_gem_object *obj_priv, *next;
-+
-+ /* Clear the write domain and activity from any buffers
-+ * that are just waiting for a flush matching the one retired.
-+ */
-+ list_for_each_entry_safe(obj_priv, next,
-+ &dev_priv->mm.flushing_list, list) {
-+ struct drm_gem_object *obj = obj_priv->obj;
-+
-+ if (obj->write_domain & request->flush_domains) {
-+ obj->write_domain = 0;
-+ i915_gem_object_move_to_inactive(obj);
-+ }
-+ }
-+
-+ }
-+}
-+
-+/**
-+ * Returns true if seq1 is later than seq2.
-+ */
-+static int
-+i915_seqno_passed(uint32_t seq1, uint32_t seq2)
-+{
-+ return (int32_t)(seq1 - seq2) >= 0;
-+}
-+
-+uint32_t
-+i915_get_gem_seqno(struct drm_device *dev)
-+{
-+ drm_i915_private_t *dev_priv = dev->dev_private;
-+
-+ return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
-+}
-+
-+/**
-+ * This function clears the request list as sequence numbers are passed.
-+ */
-+void
-+i915_gem_retire_requests(struct drm_device *dev)
-+{
-+ drm_i915_private_t *dev_priv = dev->dev_private;
-+ uint32_t seqno;
-+
-+ seqno = i915_get_gem_seqno(dev);
-+
-+ while (!list_empty(&dev_priv->mm.request_list)) {
-+ struct drm_i915_gem_request *request;
-+ uint32_t retiring_seqno;
-+
-+ request = list_first_entry(&dev_priv->mm.request_list,
-+ struct drm_i915_gem_request,
-+ list);
-+ retiring_seqno = request->seqno;
-+
-+ if (i915_seqno_passed(seqno, retiring_seqno) ||
-+ dev_priv->mm.wedged) {
-+ i915_gem_retire_request(dev, request);
-+
-+ list_del(&request->list);
-+ drm_free(request, sizeof(*request), DRM_MEM_DRIVER);
-+ } else
-+ break;
-+ }
-+}
-+
-+void
-+i915_gem_retire_work_handler(struct work_struct *work)
-+{
-+ drm_i915_private_t *dev_priv;
-+ struct drm_device *dev;
-+
-+ dev_priv = container_of(work, drm_i915_private_t,
-+ mm.retire_work.work);
-+ dev = dev_priv->dev;
-+
-+ mutex_lock(&dev->struct_mutex);
-+ i915_gem_retire_requests(dev);
-+ if (!list_empty(&dev_priv->mm.request_list))
-+ schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
-+ mutex_unlock(&dev->struct_mutex);
-+}
-+
-+/**
-+ * Waits for a sequence number to be signaled, and cleans up the
-+ * request and object lists appropriately for that event.
-+ */
-+int
-+i915_wait_request(struct drm_device *dev, uint32_t seqno)
-+{
-+ drm_i915_private_t *dev_priv = dev->dev_private;
-+ int ret = 0;
-+
-+ BUG_ON(seqno == 0);
-+
-+ if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
-+ dev_priv->mm.waiting_gem_seqno = seqno;
-+ i915_user_irq_get(dev);
-+ ret = wait_event_interruptible(dev_priv->irq_queue,
-+ i915_seqno_passed(i915_get_gem_seqno(dev),
-+ seqno) ||
-+ dev_priv->mm.wedged);
-+ i915_user_irq_put(dev);
-+ dev_priv->mm.waiting_gem_seqno = 0;
-+ }
-+ if (dev_priv->mm.wedged)
-+ ret = -EIO;
-+
-+ if (ret && ret != -ERESTARTSYS)
-+ DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
-+ __func__, ret, seqno, i915_get_gem_seqno(dev));
-+
-+ /* Directly dispatch request retiring. While we have the work queue
-+ * to handle this, the waiter on a request often wants an associated
-+ * buffer to have made it to the inactive list, and we would need
-+ * a separate wait queue to handle that.
-+ */
-+ if (ret == 0)
-+ i915_gem_retire_requests(dev);
-+
-+ return ret;
-+}
-+
-+static void
-+i915_gem_flush(struct drm_device *dev,
-+ uint32_t invalidate_domains,
-+ uint32_t flush_domains)
-+{
-+ drm_i915_private_t *dev_priv = dev->dev_private;
-+ uint32_t cmd;
-+ RING_LOCALS;
-+
-+#if WATCH_EXEC
-+ DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
-+ invalidate_domains, flush_domains);
-+#endif
-+
-+ if (flush_domains & I915_GEM_DOMAIN_CPU)
-+ drm_agp_chipset_flush(dev);
-+
-+ if ((invalidate_domains | flush_domains) & ~(I915_GEM_DOMAIN_CPU |
-+ I915_GEM_DOMAIN_GTT)) {
-+ /*
-+ * read/write caches:
-+ *
-+ * I915_GEM_DOMAIN_RENDER is always invalidated, but is
-+ * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
-+ * also flushed at 2d versus 3d pipeline switches.
-+ *
-+ * read-only caches:
-+ *
-+ * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
-+ * MI_READ_FLUSH is set, and is always flushed on 965.
-+ *
-+ * I915_GEM_DOMAIN_COMMAND may not exist?
-+ *
-+ * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
-+ * invalidated when MI_EXE_FLUSH is set.
-+ *
-+ * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
-+ * invalidated with every MI_FLUSH.
-+ *
-+ * TLBs:
-+ *
-+ * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
-+ * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
-+ * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
-+ * are flushed at any MI_FLUSH.
-+ */
-+
-+ cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
-+ if ((invalidate_domains|flush_domains) &
-+ I915_GEM_DOMAIN_RENDER)
-+ cmd &= ~MI_NO_WRITE_FLUSH;
-+ if (!IS_I965G(dev)) {
-+ /*
-+ * On the 965, the sampler cache always gets flushed
-+ * and this bit is reserved.
-+ */
-+ if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
-+ cmd |= MI_READ_FLUSH;
-+ }
-+ if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
-+ cmd |= MI_EXE_FLUSH;
-+
-+#if WATCH_EXEC
-+ DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
-+#endif
-+ BEGIN_LP_RING(2);
-+ OUT_RING(cmd);
-+ OUT_RING(0); /* noop */
-+ ADVANCE_LP_RING();
-+ }
-+}
-+
-+/**
-+ * Ensures that all rendering to the object has completed and the object is
-+ * safe to unbind from the GTT or access from the CPU.
-+ */
-+static int
-+i915_gem_object_wait_rendering(struct drm_gem_object *obj)
-+{
-+ struct drm_device *dev = obj->dev;
-+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
-+ int ret;
-+
-+ /* If there are writes queued to the buffer, flush and
-+ * create a new seqno to wait for.
-+ */
-+ if (obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)) {
-+ uint32_t write_domain = obj->write_domain;
-+#if WATCH_BUF
-+ DRM_INFO("%s: flushing object %p from write domain %08x\n",
-+ __func__, obj, write_domain);
-+#endif
-+ i915_gem_flush(dev, 0, write_domain);
-+
-+ i915_gem_object_move_to_active(obj);
-+ obj_priv->last_rendering_seqno = i915_add_request(dev,
-+ write_domain);
-+ BUG_ON(obj_priv->last_rendering_seqno == 0);
-+#if WATCH_LRU
-+ DRM_INFO("%s: flush moves to exec list %p\n", __func__, obj);
-+#endif
-+ }
-+
-+ /* If there is rendering queued on the buffer being evicted, wait for
-+ * it.
-+ */
-+ if (obj_priv->active) {
-+#if WATCH_BUF
-+ DRM_INFO("%s: object %p wait for seqno %08x\n",
-+ __func__, obj, obj_priv->last_rendering_seqno);
-+#endif
-+ ret = i915_wait_request(dev, obj_priv->last_rendering_seqno);
-+ if (ret != 0)
-+ return ret;
-+ }
-+
-+ return 0;
-+}
-+
-+/**
-+ * Unbinds an object from the GTT aperture.
-+ */
-+static int
-+i915_gem_object_unbind(struct drm_gem_object *obj)
-+{
-+ struct drm_device *dev = obj->dev;
-+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
-+ int ret = 0;
-+
-+#if WATCH_BUF
-+ DRM_INFO("%s:%d %p\n", __func__, __LINE__, obj);
-+ DRM_INFO("gtt_space %p\n", obj_priv->gtt_space);
-+#endif
-+ if (obj_priv->gtt_space == NULL)
-+ return 0;
-+
-+ if (obj_priv->pin_count != 0) {
-+ DRM_ERROR("Attempting to unbind pinned buffer\n");
-+ return -EINVAL;
-+ }
-+
-+ /* Wait for any rendering to complete
-+ */
-+ ret = i915_gem_object_wait_rendering(obj);
-+ if (ret) {
-+ DRM_ERROR("wait_rendering failed: %d\n", ret);
-+ return ret;
-+ }
-+
-+ /* Move the object to the CPU domain to ensure that
-+ * any possible CPU writes while it's not in the GTT
-+ * are flushed when we go to remap it. This will
-+ * also ensure that all pending GPU writes are finished
-+ * before we unbind.
-+ */
-+ ret = i915_gem_object_set_domain(obj, I915_GEM_DOMAIN_CPU,
-+ I915_GEM_DOMAIN_CPU);
-+ if (ret) {
-+ DRM_ERROR("set_domain failed: %d\n", ret);
-+ return ret;
-+ }
-+
-+ if (obj_priv->agp_mem != NULL) {
-+ drm_unbind_agp(obj_priv->agp_mem);
-+ drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
-+ obj_priv->agp_mem = NULL;
-+ }
-+
-+ BUG_ON(obj_priv->active);
-+
-+ i915_gem_object_free_page_list(obj);
-+
-+ if (obj_priv->gtt_space) {
-+ atomic_dec(&dev->gtt_count);
-+ atomic_sub(obj->size, &dev->gtt_memory);
-+
-+ drm_mm_put_block(obj_priv->gtt_space);
-+ obj_priv->gtt_space = NULL;
-+ }
-+
-+ /* Remove ourselves from the LRU list if present. */
-+ if (!list_empty(&obj_priv->list))
-+ list_del_init(&obj_priv->list);
-+
-+ return 0;
-+}
-+
-+static int
-+i915_gem_evict_something(struct drm_device *dev)
-+{
-+ drm_i915_private_t *dev_priv = dev->dev_private;
-+ struct drm_gem_object *obj;
-+ struct drm_i915_gem_object *obj_priv;
-+ int ret = 0;
-+
-+ for (;;) {
-+ /* If there's an inactive buffer available now, grab it
-+ * and be done.
-+ */
-+ if (!list_empty(&dev_priv->mm.inactive_list)) {
-+ obj_priv = list_first_entry(&dev_priv->mm.inactive_list,
-+ struct drm_i915_gem_object,
-+ list);
-+ obj = obj_priv->obj;
-+ BUG_ON(obj_priv->pin_count != 0);
-+#if WATCH_LRU
-+ DRM_INFO("%s: evicting %p\n", __func__, obj);
-+#endif
-+ BUG_ON(obj_priv->active);
-+
-+ /* Wait on the rendering and unbind the buffer. */
-+ ret = i915_gem_object_unbind(obj);
-+ break;
-+ }
-+
-+ /* If we didn't get anything, but the ring is still processing
-+ * things, wait for one of those things to finish and hopefully
-+ * leave us a buffer to evict.
-+ */
-+ if (!list_empty(&dev_priv->mm.request_list)) {
-+ struct drm_i915_gem_request *request;
-+
-+ request = list_first_entry(&dev_priv->mm.request_list,
-+ struct drm_i915_gem_request,
-+ list);
-+
-+ ret = i915_wait_request(dev, request->seqno);
-+ if (ret)
-+ break;
-+
-+ /* if waiting caused an object to become inactive,
-+ * then loop around and wait for it. Otherwise, we
-+ * assume that waiting freed and unbound something,
-+ * so there should now be some space in the GTT
-+ */
-+ if (!list_empty(&dev_priv->mm.inactive_list))
-+ continue;
-+ break;
-+ }
-+
-+ /* If we didn't have anything on the request list but there
-+ * are buffers awaiting a flush, emit one and try again.
-+ * When we wait on it, those buffers waiting for that flush
-+ * will get moved to inactive.
-+ */
-+ if (!list_empty(&dev_priv->mm.flushing_list)) {
-+ obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
-+ struct drm_i915_gem_object,
-+ list);
-+ obj = obj_priv->obj;
-+
-+ i915_gem_flush(dev,
-+ obj->write_domain,
-+ obj->write_domain);
-+ i915_add_request(dev, obj->write_domain);
-+
-+ obj = NULL;
-+ continue;
-+ }
-+
-+ DRM_ERROR("inactive empty %d request empty %d "
-+ "flushing empty %d\n",
-+ list_empty(&dev_priv->mm.inactive_list),
-+ list_empty(&dev_priv->mm.request_list),
-+ list_empty(&dev_priv->mm.flushing_list));
-+ /* If we didn't do any of the above, there's nothing to be done
-+ * and we just can't fit it in.
-+ */
-+ return -ENOMEM;
-+ }
-+ return ret;
-+}
-+
-+static int
-+i915_gem_object_get_page_list(struct drm_gem_object *obj)
-+{
-+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
-+ int page_count, i;
-+ struct address_space *mapping;
-+ struct inode *inode;
-+ struct page *page;
-+ int ret;
-+
-+ if (obj_priv->page_list)
-+ return 0;
-+
-+ /* Get the list of pages out of our struct file. They'll be pinned
-+ * at this point until we release them.
-+ */
-+ page_count = obj->size / PAGE_SIZE;
-+ BUG_ON(obj_priv->page_list != NULL);
-+ obj_priv->page_list = drm_calloc(page_count, sizeof(struct page *),
-+ DRM_MEM_DRIVER);
-+ if (obj_priv->page_list == NULL) {
-+ DRM_ERROR("Faled to allocate page list\n");
-+ return -ENOMEM;
-+ }
-+
-+ inode = obj->filp->f_path.dentry->d_inode;
-+ mapping = inode->i_mapping;
-+ for (i = 0; i < page_count; i++) {
-+ page = read_mapping_page(mapping, i, NULL);
-+ if (IS_ERR(page)) {
-+ ret = PTR_ERR(page);
-+ DRM_ERROR("read_mapping_page failed: %d\n", ret);
-+ i915_gem_object_free_page_list(obj);
-+ return ret;
-+ }
-+ obj_priv->page_list[i] = page;
-+ }
-+ return 0;
-+}
-+
-+/**
-+ * Finds free space in the GTT aperture and binds the object there.
-+ */
-+static int
-+i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
-+{
-+ struct drm_device *dev = obj->dev;
-+ drm_i915_private_t *dev_priv = dev->dev_private;
-+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
-+ struct drm_mm_node *free_space;
-+ int page_count, ret;
-+
-+ if (alignment == 0)
-+ alignment = PAGE_SIZE;
-+ if (alignment & (PAGE_SIZE - 1)) {
-+ DRM_ERROR("Invalid object alignment requested %u\n", alignment);
-+ return -EINVAL;
-+ }
-+
-+ search_free:
-+ free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
-+ obj->size, alignment, 0);
-+ if (free_space != NULL) {
-+ obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size,
-+ alignment);
-+ if (obj_priv->gtt_space != NULL) {
-+ obj_priv->gtt_space->private = obj;
-+ obj_priv->gtt_offset = obj_priv->gtt_space->start;
-+ }
-+ }
-+ if (obj_priv->gtt_space == NULL) {
-+ /* If the gtt is empty and we're still having trouble
-+ * fitting our object in, we're out of memory.
-+ */
-+#if WATCH_LRU
-+ DRM_INFO("%s: GTT full, evicting something\n", __func__);
-+#endif
-+ if (list_empty(&dev_priv->mm.inactive_list) &&
-+ list_empty(&dev_priv->mm.flushing_list) &&
-+ list_empty(&dev_priv->mm.active_list)) {
-+ DRM_ERROR("GTT full, but LRU list empty\n");
-+ return -ENOMEM;
-+ }
-+
-+ ret = i915_gem_evict_something(dev);
-+ if (ret != 0) {
-+ DRM_ERROR("Failed to evict a buffer %d\n", ret);
-+ return ret;
-+ }
-+ goto search_free;
-+ }
-+
-+#if WATCH_BUF
-+ DRM_INFO("Binding object of size %d at 0x%08x\n",
-+ obj->size, obj_priv->gtt_offset);
-+#endif
-+ ret = i915_gem_object_get_page_list(obj);
-+ if (ret) {
-+ drm_mm_put_block(obj_priv->gtt_space);
-+ obj_priv->gtt_space = NULL;
-+ return ret;
-+ }
-+
-+ page_count = obj->size / PAGE_SIZE;
-+ /* Create an AGP memory structure pointing at our pages, and bind it
-+ * into the GTT.
-+ */
-+ obj_priv->agp_mem = drm_agp_bind_pages(dev,
-+ obj_priv->page_list,
-+ page_count,
-+ obj_priv->gtt_offset);
-+ if (obj_priv->agp_mem == NULL) {
-+ i915_gem_object_free_page_list(obj);
-+ drm_mm_put_block(obj_priv->gtt_space);
-+ obj_priv->gtt_space = NULL;
-+ return -ENOMEM;
-+ }
-+ atomic_inc(&dev->gtt_count);
-+ atomic_add(obj->size, &dev->gtt_memory);
-+
-+ /* Assert that the object is not currently in any GPU domain. As it
-+ * wasn't in the GTT, there shouldn't be any way it could have been in
-+ * a GPU cache
-+ */
-+ BUG_ON(obj->read_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
-+ BUG_ON(obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
-+
-+ return 0;
-+}
-+
-+void
-+i915_gem_clflush_object(struct drm_gem_object *obj)
-+{
-+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
-+
-+ /* If we don't have a page list set up, then we're not pinned
-+ * to GPU, and we can ignore the cache flush because it'll happen
-+ * again at bind time.
-+ */
-+ if (obj_priv->page_list == NULL)
-+ return;
-+
-+ drm_clflush_pages(obj_priv->page_list, obj->size / PAGE_SIZE);
-+}
-+
-+/*
-+ * Set the next domain for the specified object. This
-+ * may not actually perform the necessary flushing/invaliding though,
-+ * as that may want to be batched with other set_domain operations
-+ *
-+ * This is (we hope) the only really tricky part of gem. The goal
-+ * is fairly simple -- track which caches hold bits of the object
-+ * and make sure they remain coherent. A few concrete examples may
-+ * help to explain how it works. For shorthand, we use the notation
-+ * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
-+ * a pair of read and write domain masks.
-+ *
-+ * Case 1: the batch buffer
-+ *
-+ * 1. Allocated
-+ * 2. Written by CPU
-+ * 3. Mapped to GTT
-+ * 4. Read by GPU
-+ * 5. Unmapped from GTT
-+ * 6. Freed
-+ *
-+ * Let's take these a step at a time
-+ *
-+ * 1. Allocated
-+ * Pages allocated from the kernel may still have
-+ * cache contents, so we set them to (CPU, CPU) always.
-+ * 2. Written by CPU (using pwrite)
-+ * The pwrite function calls set_domain (CPU, CPU) and
-+ * this function does nothing (as nothing changes)
-+ * 3. Mapped by GTT
-+ * This function asserts that the object is not
-+ * currently in any GPU-based read or write domains
-+ * 4. Read by GPU
-+ * i915_gem_execbuffer calls set_domain (COMMAND, 0).
-+ * As write_domain is zero, this function adds in the
-+ * current read domains (CPU+COMMAND, 0).
-+ * flush_domains is set to CPU.
-+ * invalidate_domains is set to COMMAND
-+ * clflush is run to get data out of the CPU caches
-+ * then i915_dev_set_domain calls i915_gem_flush to
-+ * emit an MI_FLUSH and drm_agp_chipset_flush
-+ * 5. Unmapped from GTT
-+ * i915_gem_object_unbind calls set_domain (CPU, CPU)
-+ * flush_domains and invalidate_domains end up both zero
-+ * so no flushing/invalidating happens
-+ * 6. Freed
-+ * yay, done
-+ *
-+ * Case 2: The shared render buffer
-+ *
-+ * 1. Allocated
-+ * 2. Mapped to GTT
-+ * 3. Read/written by GPU
-+ * 4. set_domain to (CPU,CPU)
-+ * 5. Read/written by CPU
-+ * 6. Read/written by GPU
-+ *
-+ * 1. Allocated
-+ * Same as last example, (CPU, CPU)
-+ * 2. Mapped to GTT
-+ * Nothing changes (assertions find that it is not in the GPU)
-+ * 3. Read/written by GPU
-+ * execbuffer calls set_domain (RENDER, RENDER)
-+ * flush_domains gets CPU
-+ * invalidate_domains gets GPU
-+ * clflush (obj)
-+ * MI_FLUSH and drm_agp_chipset_flush
-+ * 4. set_domain (CPU, CPU)
-+ * flush_domains gets GPU
-+ * invalidate_domains gets CPU
-+ * wait_rendering (obj) to make sure all drawing is complete.
-+ * This will include an MI_FLUSH to get the data from GPU
-+ * to memory
-+ * clflush (obj) to invalidate the CPU cache
-+ * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
-+ * 5. Read/written by CPU
-+ * cache lines are loaded and dirtied
-+ * 6. Read written by GPU
-+ * Same as last GPU access
-+ *
-+ * Case 3: The constant buffer
-+ *
-+ * 1. Allocated
-+ * 2. Written by CPU
-+ * 3. Read by GPU
-+ * 4. Updated (written) by CPU again
-+ * 5. Read by GPU
-+ *
-+ * 1. Allocated
-+ * (CPU, CPU)
-+ * 2. Written by CPU
-+ * (CPU, CPU)
-+ * 3. Read by GPU
-+ * (CPU+RENDER, 0)
-+ * flush_domains = CPU
-+ * invalidate_domains = RENDER
-+ * clflush (obj)
-+ * MI_FLUSH
-+ * drm_agp_chipset_flush
-+ * 4. Updated (written) by CPU again
-+ * (CPU, CPU)
-+ * flush_domains = 0 (no previous write domain)
-+ * invalidate_domains = 0 (no new read domains)
-+ * 5. Read by GPU
-+ * (CPU+RENDER, 0)
-+ * flush_domains = CPU
-+ * invalidate_domains = RENDER
-+ * clflush (obj)
-+ * MI_FLUSH
-+ * drm_agp_chipset_flush
-+ */
-+static int
-+i915_gem_object_set_domain(struct drm_gem_object *obj,
-+ uint32_t read_domains,
-+ uint32_t write_domain)
-+{
-+ struct drm_device *dev = obj->dev;
-+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
-+ uint32_t invalidate_domains = 0;
-+ uint32_t flush_domains = 0;
-+ int ret;
-+
-+#if WATCH_BUF
-+ DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
-+ __func__, obj,
-+ obj->read_domains, read_domains,
-+ obj->write_domain, write_domain);
-+#endif
-+ /*
-+ * If the object isn't moving to a new write domain,
-+ * let the object stay in multiple read domains
-+ */
-+ if (write_domain == 0)
-+ read_domains |= obj->read_domains;
-+ else
-+ obj_priv->dirty = 1;
-+
-+ /*
-+ * Flush the current write domain if
-+ * the new read domains don't match. Invalidate
-+ * any read domains which differ from the old
-+ * write domain
-+ */
-+ if (obj->write_domain && obj->write_domain != read_domains) {
-+ flush_domains |= obj->write_domain;
-+ invalidate_domains |= read_domains & ~obj->write_domain;
-+ }
-+ /*
-+ * Invalidate any read caches which may have
-+ * stale data. That is, any new read domains.
-+ */
-+ invalidate_domains |= read_domains & ~obj->read_domains;
-+ if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) {
-+#if WATCH_BUF
-+ DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
-+ __func__, flush_domains, invalidate_domains);
-+#endif
-+ /*
-+ * If we're invaliding the CPU cache and flushing a GPU cache,
-+ * then pause for rendering so that the GPU caches will be
-+ * flushed before the cpu cache is invalidated
-+ */
-+ if ((invalidate_domains & I915_GEM_DOMAIN_CPU) &&
-+ (flush_domains & ~(I915_GEM_DOMAIN_CPU |
-+ I915_GEM_DOMAIN_GTT))) {
-+ ret = i915_gem_object_wait_rendering(obj);
-+ if (ret)
-+ return ret;
-+ }
-+ i915_gem_clflush_object(obj);
-+ }
-+
-+ if ((write_domain | flush_domains) != 0)
-+ obj->write_domain = write_domain;
-+
-+ /* If we're invalidating the CPU domain, clear the per-page CPU
-+ * domain list as well.
-+ */
-+ if (obj_priv->page_cpu_valid != NULL &&
-+ (write_domain != 0 ||
-+ read_domains & I915_GEM_DOMAIN_CPU)) {
-+ drm_free(obj_priv->page_cpu_valid, obj->size / PAGE_SIZE,
-+ DRM_MEM_DRIVER);
-+ obj_priv->page_cpu_valid = NULL;
-+ }
-+ obj->read_domains = read_domains;
-+
-+ dev->invalidate_domains |= invalidate_domains;
-+ dev->flush_domains |= flush_domains;
-+#if WATCH_BUF
-+ DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n",
-+ __func__,
-+ obj->read_domains, obj->write_domain,
-+ dev->invalidate_domains, dev->flush_domains);
-+#endif
-+ return 0;
-+}
-+
-+/**
-+ * Set the read/write domain on a range of the object.
-+ *
-+ * Currently only implemented for CPU reads, otherwise drops to normal
-+ * i915_gem_object_set_domain().
-+ */
-+static int
-+i915_gem_object_set_domain_range(struct drm_gem_object *obj,
-+ uint64_t offset,
-+ uint64_t size,
-+ uint32_t read_domains,
-+ uint32_t write_domain)
-+{
-+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
-+ int ret, i;
-+
-+ if (obj->read_domains & I915_GEM_DOMAIN_CPU)
-+ return 0;
-+
-+ if (read_domains != I915_GEM_DOMAIN_CPU ||
-+ write_domain != 0)
-+ return i915_gem_object_set_domain(obj,
-+ read_domains, write_domain);
-+
-+ /* Wait on any GPU rendering to the object to be flushed. */
-+ if (obj->write_domain & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) {
-+ ret = i915_gem_object_wait_rendering(obj);
-+ if (ret)
-+ return ret;
-+ }
-+
-+ if (obj_priv->page_cpu_valid == NULL) {
-+ obj_priv->page_cpu_valid = drm_calloc(1, obj->size / PAGE_SIZE,
-+ DRM_MEM_DRIVER);
-+ }
-+
-+ /* Flush the cache on any pages that are still invalid from the CPU's
-+ * perspective.
-+ */
-+ for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE; i++) {
-+ if (obj_priv->page_cpu_valid[i])
-+ continue;
-+
-+ drm_clflush_pages(obj_priv->page_list + i, 1);
-+
-+ obj_priv->page_cpu_valid[i] = 1;
-+ }
-+
-+ return 0;
-+}
-+
-+/**
-+ * Once all of the objects have been set in the proper domain,
-+ * perform the necessary flush and invalidate operations.
-+ *
-+ * Returns the write domains flushed, for use in flush tracking.
-+ */
-+static uint32_t
-+i915_gem_dev_set_domain(struct drm_device *dev)
-+{
-+ uint32_t flush_domains = dev->flush_domains;
-+
-+ /*
-+ * Now that all the buffers are synced to the proper domains,
-+ * flush and invalidate the collected domains
-+ */
-+ if (dev->invalidate_domains | dev->flush_domains) {
-+#if WATCH_EXEC
-+ DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
-+ __func__,
-+ dev->invalidate_domains,
-+ dev->flush_domains);
-+#endif
-+ i915_gem_flush(dev,
-+ dev->invalidate_domains,
-+ dev->flush_domains);
-+ dev->invalidate_domains = 0;
-+ dev->flush_domains = 0;
-+ }
-+
-+ return flush_domains;
-+}
-+
-+/**
-+ * Pin an object to the GTT and evaluate the relocations landing in it.
-+ */
-+static int
-+i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
-+ struct drm_file *file_priv,
-+ struct drm_i915_gem_exec_object *entry)
-+{
-+ struct drm_device *dev = obj->dev;
-+ struct drm_i915_gem_relocation_entry reloc;
-+ struct drm_i915_gem_relocation_entry __user *relocs;
-+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
-+ int i, ret;
-+ uint32_t last_reloc_offset = -1;
-+ void *reloc_page = NULL;
-+
-+ /* Choose the GTT offset for our buffer and put it there. */
-+ ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
-+ if (ret)
-+ return ret;
-+
-+ entry->offset = obj_priv->gtt_offset;
-+
-+ relocs = (struct drm_i915_gem_relocation_entry __user *)
-+ (uintptr_t) entry->relocs_ptr;
-+ /* Apply the relocations, using the GTT aperture to avoid cache
-+ * flushing requirements.
-+ */
-+ for (i = 0; i < entry->relocation_count; i++) {
-+ struct drm_gem_object *target_obj;
-+ struct drm_i915_gem_object *target_obj_priv;
-+ uint32_t reloc_val, reloc_offset, *reloc_entry;
-+ int ret;
-+
-+ ret = copy_from_user(&reloc, relocs + i, sizeof(reloc));
-+ if (ret != 0) {
-+ i915_gem_object_unpin(obj);
-+ return ret;
-+ }
-+
-+ target_obj = drm_gem_object_lookup(obj->dev, file_priv,
-+ reloc.target_handle);
-+ if (target_obj == NULL) {
-+ i915_gem_object_unpin(obj);
-+ return -EBADF;
-+ }
-+ target_obj_priv = target_obj->driver_private;
-+
-+ /* The target buffer should have appeared before us in the
-+ * exec_object list, so it should have a GTT space bound by now.
-+ */
-+ if (target_obj_priv->gtt_space == NULL) {
-+ DRM_ERROR("No GTT space found for object %d\n",
-+ reloc.target_handle);
-+ drm_gem_object_unreference(target_obj);
-+ i915_gem_object_unpin(obj);
-+ return -EINVAL;
-+ }
-+
-+ if (reloc.offset > obj->size - 4) {
-+ DRM_ERROR("Relocation beyond object bounds: "
-+ "obj %p target %d offset %d size %d.\n",
-+ obj, reloc.target_handle,
-+ (int) reloc.offset, (int) obj->size);
-+ drm_gem_object_unreference(target_obj);
-+ i915_gem_object_unpin(obj);
-+ return -EINVAL;
-+ }
-+ if (reloc.offset & 3) {
-+ DRM_ERROR("Relocation not 4-byte aligned: "
-+ "obj %p target %d offset %d.\n",
-+ obj, reloc.target_handle,
-+ (int) reloc.offset);
-+ drm_gem_object_unreference(target_obj);
-+ i915_gem_object_unpin(obj);
-+ return -EINVAL;
-+ }
-+
-+ if (reloc.write_domain && target_obj->pending_write_domain &&
-+ reloc.write_domain != target_obj->pending_write_domain) {
-+ DRM_ERROR("Write domain conflict: "
-+ "obj %p target %d offset %d "
-+ "new %08x old %08x\n",
-+ obj, reloc.target_handle,
-+ (int) reloc.offset,
-+ reloc.write_domain,
-+ target_obj->pending_write_domain);
-+ drm_gem_object_unreference(target_obj);
-+ i915_gem_object_unpin(obj);
-+ return -EINVAL;
-+ }
-+
-+#if WATCH_RELOC
-+ DRM_INFO("%s: obj %p offset %08x target %d "
-+ "read %08x write %08x gtt %08x "
-+ "presumed %08x delta %08x\n",
-+ __func__,
-+ obj,
-+ (int) reloc.offset,
-+ (int) reloc.target_handle,
-+ (int) reloc.read_domains,
-+ (int) reloc.write_domain,
-+ (int) target_obj_priv->gtt_offset,
-+ (int) reloc.presumed_offset,
-+ reloc.delta);
-+#endif
-+
-+ target_obj->pending_read_domains |= reloc.read_domains;
-+ target_obj->pending_write_domain |= reloc.write_domain;
-+
-+ /* If the relocation already has the right value in it, no
-+ * more work needs to be done.
-+ */
-+ if (target_obj_priv->gtt_offset == reloc.presumed_offset) {
-+ drm_gem_object_unreference(target_obj);
-+ continue;
-+ }
-+
-+ /* Now that we're going to actually write some data in,
-+ * make sure that any rendering using this buffer's contents
-+ * is completed.
-+ */
-+ i915_gem_object_wait_rendering(obj);
-+
-+ /* As we're writing through the gtt, flush
-+ * any CPU writes before we write the relocations
-+ */
-+ if (obj->write_domain & I915_GEM_DOMAIN_CPU) {
-+ i915_gem_clflush_object(obj);
-+ drm_agp_chipset_flush(dev);
-+ obj->write_domain = 0;
-+ }
-+
-+ /* Map the page containing the relocation we're going to
-+ * perform.
-+ */
-+ reloc_offset = obj_priv->gtt_offset + reloc.offset;
-+ if (reloc_page == NULL ||
-+ (last_reloc_offset & ~(PAGE_SIZE - 1)) !=
-+ (reloc_offset & ~(PAGE_SIZE - 1))) {
-+ if (reloc_page != NULL)
-+ iounmap(reloc_page);
-+
-+ reloc_page = ioremap(dev->agp->base +
-+ (reloc_offset & ~(PAGE_SIZE - 1)),
-+ PAGE_SIZE);
-+ last_reloc_offset = reloc_offset;
-+ if (reloc_page == NULL) {
-+ drm_gem_object_unreference(target_obj);
-+ i915_gem_object_unpin(obj);
-+ return -ENOMEM;
-+ }
-+ }
-+
-+ reloc_entry = (uint32_t *)((char *)reloc_page +
-+ (reloc_offset & (PAGE_SIZE - 1)));
-+ reloc_val = target_obj_priv->gtt_offset + reloc.delta;
-+
-+#if WATCH_BUF
-+ DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
-+ obj, (unsigned int) reloc.offset,
-+ readl(reloc_entry), reloc_val);
-+#endif
-+ writel(reloc_val, reloc_entry);
-+
-+ /* Write the updated presumed offset for this entry back out
-+ * to the user.
-+ */
-+ reloc.presumed_offset = target_obj_priv->gtt_offset;
-+ ret = copy_to_user(relocs + i, &reloc, sizeof(reloc));
-+ if (ret != 0) {
-+ drm_gem_object_unreference(target_obj);
-+ i915_gem_object_unpin(obj);
-+ return ret;
-+ }
-+
-+ drm_gem_object_unreference(target_obj);
-+ }
-+
-+ if (reloc_page != NULL)
-+ iounmap(reloc_page);
-+
-+#if WATCH_BUF
-+ if (0)
-+ i915_gem_dump_object(obj, 128, __func__, ~0);
-+#endif
-+ return 0;
-+}
-+
-+/** Dispatch a batchbuffer to the ring
-+ */
-+static int
-+i915_dispatch_gem_execbuffer(struct drm_device *dev,
-+ struct drm_i915_gem_execbuffer *exec,
-+ uint64_t exec_offset)
-+{
-+ drm_i915_private_t *dev_priv = dev->dev_private;
-+ struct drm_clip_rect __user *boxes = (struct drm_clip_rect __user *)
-+ (uintptr_t) exec->cliprects_ptr;
-+ int nbox = exec->num_cliprects;
-+ int i = 0, count;
-+ uint32_t exec_start, exec_len;
-+ RING_LOCALS;
-+
-+ exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
-+ exec_len = (uint32_t) exec->batch_len;
-+
-+ if ((exec_start | exec_len) & 0x7) {
-+ DRM_ERROR("alignment\n");
-+ return -EINVAL;
-+ }
-+
-+ if (!exec_start)
-+ return -EINVAL;
-+
-+ count = nbox ? nbox : 1;
-+
-+ for (i = 0; i < count; i++) {
-+ if (i < nbox) {
-+ int ret = i915_emit_box(dev, boxes, i,
-+ exec->DR1, exec->DR4);
-+ if (ret)
-+ return ret;
-+ }
-+
-+ if (IS_I830(dev) || IS_845G(dev)) {
-+ BEGIN_LP_RING(4);
-+ OUT_RING(MI_BATCH_BUFFER);
-+ OUT_RING(exec_start | MI_BATCH_NON_SECURE);
-+ OUT_RING(exec_start + exec_len - 4);
-+ OUT_RING(0);
-+ ADVANCE_LP_RING();
-+ } else {
-+ BEGIN_LP_RING(2);
-+ if (IS_I965G(dev)) {
-+ OUT_RING(MI_BATCH_BUFFER_START |
-+ (2 << 6) |
-+ MI_BATCH_NON_SECURE_I965);
-+ OUT_RING(exec_start);
-+ } else {
-+ OUT_RING(MI_BATCH_BUFFER_START |
-+ (2 << 6));
-+ OUT_RING(exec_start | MI_BATCH_NON_SECURE);
-+ }
-+ ADVANCE_LP_RING();
-+ }
-+ }
-+
-+ /* XXX breadcrumb */
-+ return 0;
-+}
-+
-+/* Throttle our rendering by waiting until the ring has completed our requests
-+ * emitted over 20 msec ago.
-+ *
-+ * This should get us reasonable parallelism between CPU and GPU but also
-+ * relatively low latency when blocking on a particular request to finish.
-+ */
-+static int
-+i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
-+{
-+ struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
-+ int ret = 0;
-+ uint32_t seqno;
-+
-+ mutex_lock(&dev->struct_mutex);
-+ seqno = i915_file_priv->mm.last_gem_throttle_seqno;
-+ i915_file_priv->mm.last_gem_throttle_seqno =
-+ i915_file_priv->mm.last_gem_seqno;
-+ if (seqno)
-+ ret = i915_wait_request(dev, seqno);
-+ mutex_unlock(&dev->struct_mutex);
-+ return ret;
-+}
-+
-+int
-+i915_gem_execbuffer(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv)
-+{
-+ drm_i915_private_t *dev_priv = dev->dev_private;
-+ struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
-+ struct drm_i915_gem_execbuffer *args = data;
-+ struct drm_i915_gem_exec_object *exec_list = NULL;
-+ struct drm_gem_object **object_list = NULL;
-+ struct drm_gem_object *batch_obj;
-+ int ret, i, pinned = 0;
-+ uint64_t exec_offset;
-+ uint32_t seqno, flush_domains;
-+
-+#if WATCH_EXEC
-+ DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
-+ (int) args->buffers_ptr, args->buffer_count, args->batch_len);
-+#endif
-+
-+ /* Copy in the exec list from userland */
-+ exec_list = drm_calloc(sizeof(*exec_list), args->buffer_count,
-+ DRM_MEM_DRIVER);
-+ object_list = drm_calloc(sizeof(*object_list), args->buffer_count,
-+ DRM_MEM_DRIVER);
-+ if (exec_list == NULL || object_list == NULL) {
-+ DRM_ERROR("Failed to allocate exec or object list "
-+ "for %d buffers\n",
-+ args->buffer_count);
-+ ret = -ENOMEM;
-+ goto pre_mutex_err;
-+ }
-+ ret = copy_from_user(exec_list,
-+ (struct drm_i915_relocation_entry __user *)
-+ (uintptr_t) args->buffers_ptr,
-+ sizeof(*exec_list) * args->buffer_count);
-+ if (ret != 0) {
-+ DRM_ERROR("copy %d exec entries failed %d\n",
-+ args->buffer_count, ret);
-+ goto pre_mutex_err;
-+ }
-+
-+ mutex_lock(&dev->struct_mutex);
-+
-+ i915_verify_inactive(dev, __FILE__, __LINE__);
-+
-+ if (dev_priv->mm.wedged) {
-+ DRM_ERROR("Execbuf while wedged\n");
-+ mutex_unlock(&dev->struct_mutex);
-+ return -EIO;
-+ }
-+
-+ if (dev_priv->mm.suspended) {
-+ DRM_ERROR("Execbuf while VT-switched.\n");
-+ mutex_unlock(&dev->struct_mutex);
-+ return -EBUSY;
-+ }
-+
-+ /* Zero the gloabl flush/invalidate flags. These
-+ * will be modified as each object is bound to the
-+ * gtt
-+ */
-+ dev->invalidate_domains = 0;
-+ dev->flush_domains = 0;
-+
-+ /* Look up object handles and perform the relocations */
-+ for (i = 0; i < args->buffer_count; i++) {
-+ object_list[i] = drm_gem_object_lookup(dev, file_priv,
-+ exec_list[i].handle);
-+ if (object_list[i] == NULL) {
-+ DRM_ERROR("Invalid object handle %d at index %d\n",
-+ exec_list[i].handle, i);
-+ ret = -EBADF;
-+ goto err;
-+ }
-+
-+ object_list[i]->pending_read_domains = 0;
-+ object_list[i]->pending_write_domain = 0;
-+ ret = i915_gem_object_pin_and_relocate(object_list[i],
-+ file_priv,
-+ &exec_list[i]);
-+ if (ret) {
-+ DRM_ERROR("object bind and relocate failed %d\n", ret);
-+ goto err;
-+ }
-+ pinned = i + 1;
-+ }
-+
-+ /* Set the pending read domains for the batch buffer to COMMAND */
-+ batch_obj = object_list[args->buffer_count-1];
-+ batch_obj->pending_read_domains = I915_GEM_DOMAIN_COMMAND;
-+ batch_obj->pending_write_domain = 0;
-+
-+ i915_verify_inactive(dev, __FILE__, __LINE__);
-+
-+ for (i = 0; i < args->buffer_count; i++) {
-+ struct drm_gem_object *obj = object_list[i];
-+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
-+
-+ if (obj_priv->gtt_space == NULL) {
-+ /* We evicted the buffer in the process of validating
-+ * our set of buffers in. We could try to recover by
-+ * kicking them everything out and trying again from
-+ * the start.
-+ */
-+ ret = -ENOMEM;
-+ goto err;
-+ }
-+
-+ /* make sure all previous memory operations have passed */
-+ ret = i915_gem_object_set_domain(obj,
-+ obj->pending_read_domains,
-+ obj->pending_write_domain);
-+ if (ret)
-+ goto err;
-+ }
-+
-+ i915_verify_inactive(dev, __FILE__, __LINE__);
-+
-+ /* Flush/invalidate caches and chipset buffer */
-+ flush_domains = i915_gem_dev_set_domain(dev);
-+
-+ i915_verify_inactive(dev, __FILE__, __LINE__);
-+
-+#if WATCH_COHERENCY
-+ for (i = 0; i < args->buffer_count; i++) {
-+ i915_gem_object_check_coherency(object_list[i],
-+ exec_list[i].handle);
-+ }
-+#endif
-+
-+ exec_offset = exec_list[args->buffer_count - 1].offset;
-+
-+#if WATCH_EXEC
-+ i915_gem_dump_object(object_list[args->buffer_count - 1],
-+ args->batch_len,
-+ __func__,
-+ ~0);
-+#endif
-+
-+ (void)i915_add_request(dev, flush_domains);
-+
-+ /* Exec the batchbuffer */
-+ ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset);
-+ if (ret) {
-+ DRM_ERROR("dispatch failed %d\n", ret);
-+ goto err;
-+ }
-+
-+ /*
-+ * Ensure that the commands in the batch buffer are
-+ * finished before the interrupt fires
-+ */
-+ flush_domains = i915_retire_commands(dev);
-+
-+ i915_verify_inactive(dev, __FILE__, __LINE__);
-+
-+ /*
-+ * Get a seqno representing the execution of the current buffer,
-+ * which we can wait on. We would like to mitigate these interrupts,
-+ * likely by only creating seqnos occasionally (so that we have
-+ * *some* interrupts representing completion of buffers that we can
-+ * wait on when trying to clear up gtt space).
-+ */
-+ seqno = i915_add_request(dev, flush_domains);
-+ BUG_ON(seqno == 0);
-+ i915_file_priv->mm.last_gem_seqno = seqno;
-+ for (i = 0; i < args->buffer_count; i++) {
-+ struct drm_gem_object *obj = object_list[i];
-+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
-+
-+ i915_gem_object_move_to_active(obj);
-+ obj_priv->last_rendering_seqno = seqno;
-+#if WATCH_LRU
-+ DRM_INFO("%s: move to exec list %p\n", __func__, obj);
-+#endif
-+ }
-+#if WATCH_LRU
-+ i915_dump_lru(dev, __func__);
-+#endif
-+
-+ i915_verify_inactive(dev, __FILE__, __LINE__);
-+
-+ /* Copy the new buffer offsets back to the user's exec list. */
-+ ret = copy_to_user((struct drm_i915_relocation_entry __user *)
-+ (uintptr_t) args->buffers_ptr,
-+ exec_list,
-+ sizeof(*exec_list) * args->buffer_count);
-+ if (ret)
-+ DRM_ERROR("failed to copy %d exec entries "
-+ "back to user (%d)\n",
-+ args->buffer_count, ret);
-+err:
-+ if (object_list != NULL) {
-+ for (i = 0; i < pinned; i++)
-+ i915_gem_object_unpin(object_list[i]);
-+
-+ for (i = 0; i < args->buffer_count; i++)
-+ drm_gem_object_unreference(object_list[i]);
-+ }
-+ mutex_unlock(&dev->struct_mutex);
-+
-+pre_mutex_err:
-+ drm_free(object_list, sizeof(*object_list) * args->buffer_count,
-+ DRM_MEM_DRIVER);
-+ drm_free(exec_list, sizeof(*exec_list) * args->buffer_count,
-+ DRM_MEM_DRIVER);
-+
-+ return ret;
-+}
-+
-+int
-+i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
-+{
-+ struct drm_device *dev = obj->dev;
-+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
-+ int ret;
-+
-+ i915_verify_inactive(dev, __FILE__, __LINE__);
-+ if (obj_priv->gtt_space == NULL) {
-+ ret = i915_gem_object_bind_to_gtt(obj, alignment);
-+ if (ret != 0) {
-+ DRM_ERROR("Failure to bind: %d", ret);
-+ return ret;
-+ }
-+ }
-+ obj_priv->pin_count++;
-+
-+ /* If the object is not active and not pending a flush,
-+ * remove it from the inactive list
-+ */
-+ if (obj_priv->pin_count == 1) {
-+ atomic_inc(&dev->pin_count);
-+ atomic_add(obj->size, &dev->pin_memory);
-+ if (!obj_priv->active &&
-+ (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
-+ I915_GEM_DOMAIN_GTT)) == 0 &&
-+ !list_empty(&obj_priv->list))
-+ list_del_init(&obj_priv->list);
-+ }
-+ i915_verify_inactive(dev, __FILE__, __LINE__);
-+
-+ return 0;
-+}
-+
-+void
-+i915_gem_object_unpin(struct drm_gem_object *obj)
-+{
-+ struct drm_device *dev = obj->dev;
-+ drm_i915_private_t *dev_priv = dev->dev_private;
-+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
-+
-+ i915_verify_inactive(dev, __FILE__, __LINE__);
-+ obj_priv->pin_count--;
-+ BUG_ON(obj_priv->pin_count < 0);
-+ BUG_ON(obj_priv->gtt_space == NULL);
-+
-+ /* If the object is no longer pinned, and is
-+ * neither active nor being flushed, then stick it on
-+ * the inactive list
-+ */
-+ if (obj_priv->pin_count == 0) {
-+ if (!obj_priv->active &&
-+ (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
-+ I915_GEM_DOMAIN_GTT)) == 0)
-+ list_move_tail(&obj_priv->list,
-+ &dev_priv->mm.inactive_list);
-+ atomic_dec(&dev->pin_count);
-+ atomic_sub(obj->size, &dev->pin_memory);
-+ }
-+ i915_verify_inactive(dev, __FILE__, __LINE__);
-+}
-+
-+int
-+i915_gem_pin_ioctl(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv)
-+{
-+ struct drm_i915_gem_pin *args = data;
-+ struct drm_gem_object *obj;
-+ struct drm_i915_gem_object *obj_priv;
-+ int ret;
-+
-+ mutex_lock(&dev->struct_mutex);
-+
-+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
-+ if (obj == NULL) {
-+ DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
-+ args->handle);
-+ mutex_unlock(&dev->struct_mutex);
-+ return -EBADF;
-+ }
-+ obj_priv = obj->driver_private;
-+
-+ ret = i915_gem_object_pin(obj, args->alignment);
-+ if (ret != 0) {
-+ drm_gem_object_unreference(obj);
-+ mutex_unlock(&dev->struct_mutex);
-+ return ret;
-+ }
-+
-+ /* XXX - flush the CPU caches for pinned objects
-+ * as the X server doesn't manage domains yet
-+ */
-+ if (obj->write_domain & I915_GEM_DOMAIN_CPU) {
-+ i915_gem_clflush_object(obj);
-+ drm_agp_chipset_flush(dev);
-+ obj->write_domain = 0;
-+ }
-+ args->offset = obj_priv->gtt_offset;
-+ drm_gem_object_unreference(obj);
-+ mutex_unlock(&dev->struct_mutex);
-+
-+ return 0;
-+}
-+
-+int
-+i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv)
-+{
-+ struct drm_i915_gem_pin *args = data;
-+ struct drm_gem_object *obj;
-+
-+ mutex_lock(&dev->struct_mutex);
-+
-+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
-+ if (obj == NULL) {
-+ DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
-+ args->handle);
-+ mutex_unlock(&dev->struct_mutex);
-+ return -EBADF;
-+ }
-+
-+ i915_gem_object_unpin(obj);
-+
-+ drm_gem_object_unreference(obj);
-+ mutex_unlock(&dev->struct_mutex);
-+ return 0;
-+}
-+
-+int
-+i915_gem_busy_ioctl(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv)
-+{
-+ struct drm_i915_gem_busy *args = data;
-+ struct drm_gem_object *obj;
-+ struct drm_i915_gem_object *obj_priv;
-+
-+ mutex_lock(&dev->struct_mutex);
-+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
-+ if (obj == NULL) {
-+ DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
-+ args->handle);
-+ mutex_unlock(&dev->struct_mutex);
-+ return -EBADF;
-+ }
-+
-+ obj_priv = obj->driver_private;
-+ args->busy = obj_priv->active;
-+
-+ drm_gem_object_unreference(obj);
-+ mutex_unlock(&dev->struct_mutex);
-+ return 0;
-+}
-+
-+int
-+i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv)
-+{
-+ return i915_gem_ring_throttle(dev, file_priv);
-+}
-+
-+int i915_gem_init_object(struct drm_gem_object *obj)
-+{
-+ struct drm_i915_gem_object *obj_priv;
-+
-+ obj_priv = drm_calloc(1, sizeof(*obj_priv), DRM_MEM_DRIVER);
-+ if (obj_priv == NULL)
-+ return -ENOMEM;
-+
-+ /*
-+ * We've just allocated pages from the kernel,
-+ * so they've just been written by the CPU with
-+ * zeros. They'll need to be clflushed before we
-+ * use them with the GPU.
-+ */
-+ obj->write_domain = I915_GEM_DOMAIN_CPU;
-+ obj->read_domains = I915_GEM_DOMAIN_CPU;
-+
-+ obj->driver_private = obj_priv;
-+ obj_priv->obj = obj;
-+ INIT_LIST_HEAD(&obj_priv->list);
-+ return 0;
-+}
-+
-+void i915_gem_free_object(struct drm_gem_object *obj)
-+{
-+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
-+
-+ while (obj_priv->pin_count > 0)
-+ i915_gem_object_unpin(obj);
-+
-+ i915_gem_object_unbind(obj);
-+
-+ drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER);
-+ drm_free(obj->driver_private, 1, DRM_MEM_DRIVER);
-+}
-+
-+static int
-+i915_gem_set_domain(struct drm_gem_object *obj,
-+ struct drm_file *file_priv,
-+ uint32_t read_domains,
-+ uint32_t write_domain)
-+{
-+ struct drm_device *dev = obj->dev;
-+ int ret;
-+ uint32_t flush_domains;
-+
-+ BUG_ON(!mutex_is_locked(&dev->struct_mutex));
-+
-+ ret = i915_gem_object_set_domain(obj, read_domains, write_domain);
-+ if (ret)
-+ return ret;
-+ flush_domains = i915_gem_dev_set_domain(obj->dev);
-+
-+ if (flush_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT))
-+ (void) i915_add_request(dev, flush_domains);
-+
-+ return 0;
-+}
-+
-+/** Unbinds all objects that are on the given buffer list. */
-+static int
-+i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head)
-+{
-+ struct drm_gem_object *obj;
-+ struct drm_i915_gem_object *obj_priv;
-+ int ret;
-+
-+ while (!list_empty(head)) {
-+ obj_priv = list_first_entry(head,
-+ struct drm_i915_gem_object,
-+ list);
-+ obj = obj_priv->obj;
-+
-+ if (obj_priv->pin_count != 0) {
-+ DRM_ERROR("Pinned object in unbind list\n");
-+ mutex_unlock(&dev->struct_mutex);
-+ return -EINVAL;
-+ }
-+
-+ ret = i915_gem_object_unbind(obj);
-+ if (ret != 0) {
-+ DRM_ERROR("Error unbinding object in LeaveVT: %d\n",
-+ ret);
-+ mutex_unlock(&dev->struct_mutex);
-+ return ret;
-+ }
-+ }
-+
-+
-+ return 0;
-+}
-+
-+static int
-+i915_gem_idle(struct drm_device *dev)
-+{
-+ drm_i915_private_t *dev_priv = dev->dev_private;
-+ uint32_t seqno, cur_seqno, last_seqno;
-+ int stuck, ret;
-+
-+ if (dev_priv->mm.suspended)
-+ return 0;
-+
-+ /* Hack! Don't let anybody do execbuf while we don't control the chip.
-+ * We need to replace this with a semaphore, or something.
-+ */
-+ dev_priv->mm.suspended = 1;
-+
-+ i915_kernel_lost_context(dev);
-+
-+ /* Flush the GPU along with all non-CPU write domains
-+ */
-+ i915_gem_flush(dev, ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT),
-+ ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
-+ seqno = i915_add_request(dev, ~(I915_GEM_DOMAIN_CPU |
-+ I915_GEM_DOMAIN_GTT));
-+
-+ if (seqno == 0) {
-+ mutex_unlock(&dev->struct_mutex);
-+ return -ENOMEM;
-+ }
-+
-+ dev_priv->mm.waiting_gem_seqno = seqno;
-+ last_seqno = 0;
-+ stuck = 0;
-+ for (;;) {
-+ cur_seqno = i915_get_gem_seqno(dev);
-+ if (i915_seqno_passed(cur_seqno, seqno))
-+ break;
-+ if (last_seqno == cur_seqno) {
-+ if (stuck++ > 100) {
-+ DRM_ERROR("hardware wedged\n");
-+ dev_priv->mm.wedged = 1;
-+ DRM_WAKEUP(&dev_priv->irq_queue);
-+ break;
-+ }
-+ }
-+ msleep(10);
-+ last_seqno = cur_seqno;
-+ }
-+ dev_priv->mm.waiting_gem_seqno = 0;
-+
-+ i915_gem_retire_requests(dev);
-+
-+ /* Active and flushing should now be empty as we've
-+ * waited for a sequence higher than any pending execbuffer
-+ */
-+ BUG_ON(!list_empty(&dev_priv->mm.active_list));
-+ BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
-+
-+ /* Request should now be empty as we've also waited
-+ * for the last request in the list
-+ */
-+ BUG_ON(!list_empty(&dev_priv->mm.request_list));
-+
-+ /* Move all buffers out of the GTT. */
-+ ret = i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list);
-+ if (ret)
-+ return ret;
-+
-+ BUG_ON(!list_empty(&dev_priv->mm.active_list));
-+ BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
-+ BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
-+ BUG_ON(!list_empty(&dev_priv->mm.request_list));
-+ return 0;
-+}
-+
-+static int
-+i915_gem_init_hws(struct drm_device *dev)
-+{
-+ drm_i915_private_t *dev_priv = dev->dev_private;
-+ struct drm_gem_object *obj;
-+ struct drm_i915_gem_object *obj_priv;
-+ int ret;
-+
-+ /* If we need a physical address for the status page, it's already
-+ * initialized at driver load time.
-+ */
-+ if (!I915_NEED_GFX_HWS(dev))
-+ return 0;
-+
-+ obj = drm_gem_object_alloc(dev, 4096);
-+ if (obj == NULL) {
-+ DRM_ERROR("Failed to allocate status page\n");
-+ return -ENOMEM;
-+ }
-+ obj_priv = obj->driver_private;
-+
-+ ret = i915_gem_object_pin(obj, 4096);
-+ if (ret != 0) {
-+ drm_gem_object_unreference(obj);
-+ return ret;
-+ }
-+
-+ dev_priv->status_gfx_addr = obj_priv->gtt_offset;
-+ dev_priv->hws_map.offset = dev->agp->base + obj_priv->gtt_offset;
-+ dev_priv->hws_map.size = 4096;
-+ dev_priv->hws_map.type = 0;
-+ dev_priv->hws_map.flags = 0;
-+ dev_priv->hws_map.mtrr = 0;
-+
-+ drm_core_ioremap(&dev_priv->hws_map, dev);
-+ if (dev_priv->hws_map.handle == NULL) {
-+ DRM_ERROR("Failed to map status page.\n");
-+ memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
-+ drm_gem_object_unreference(obj);
-+ return -EINVAL;
-+ }
-+ dev_priv->hws_obj = obj;
-+ dev_priv->hw_status_page = dev_priv->hws_map.handle;
-+ memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
-+ I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
-+ DRM_DEBUG("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
-+
-+ return 0;
-+}
-+
-+static int
-+i915_gem_init_ringbuffer(struct drm_device *dev)
-+{
-+ drm_i915_private_t *dev_priv = dev->dev_private;
-+ struct drm_gem_object *obj;
-+ struct drm_i915_gem_object *obj_priv;
-+ int ret;
-+
-+ ret = i915_gem_init_hws(dev);
-+ if (ret != 0)
-+ return ret;
-+
-+ obj = drm_gem_object_alloc(dev, 128 * 1024);
-+ if (obj == NULL) {
-+ DRM_ERROR("Failed to allocate ringbuffer\n");
-+ return -ENOMEM;
-+ }
-+ obj_priv = obj->driver_private;
-+
-+ ret = i915_gem_object_pin(obj, 4096);
-+ if (ret != 0) {
-+ drm_gem_object_unreference(obj);
-+ return ret;
-+ }
-+
-+ /* Set up the kernel mapping for the ring. */
-+ dev_priv->ring.Size = obj->size;
-+ dev_priv->ring.tail_mask = obj->size - 1;
-+
-+ dev_priv->ring.map.offset = dev->agp->base + obj_priv->gtt_offset;
-+ dev_priv->ring.map.size = obj->size;
-+ dev_priv->ring.map.type = 0;
-+ dev_priv->ring.map.flags = 0;
-+ dev_priv->ring.map.mtrr = 0;
-+
-+ drm_core_ioremap(&dev_priv->ring.map, dev);
-+ if (dev_priv->ring.map.handle == NULL) {
-+ DRM_ERROR("Failed to map ringbuffer.\n");
-+ memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
-+ drm_gem_object_unreference(obj);
-+ return -EINVAL;
-+ }
-+ dev_priv->ring.ring_obj = obj;
-+ dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
-+
-+ /* Stop the ring if it's running. */
-+ I915_WRITE(PRB0_CTL, 0);
-+ I915_WRITE(PRB0_HEAD, 0);
-+ I915_WRITE(PRB0_TAIL, 0);
-+ I915_WRITE(PRB0_START, 0);
-+
-+ /* Initialize the ring. */
-+ I915_WRITE(PRB0_START, obj_priv->gtt_offset);
-+ I915_WRITE(PRB0_CTL,
-+ ((obj->size - 4096) & RING_NR_PAGES) |
-+ RING_NO_REPORT |
-+ RING_VALID);
-+
-+ /* Update our cache of the ring state */
-+ i915_kernel_lost_context(dev);
-+
-+ return 0;
-+}
-+
-+static void
-+i915_gem_cleanup_ringbuffer(struct drm_device *dev)
-+{
-+ drm_i915_private_t *dev_priv = dev->dev_private;
-+
-+ if (dev_priv->ring.ring_obj == NULL)
-+ return;
-+
-+ drm_core_ioremapfree(&dev_priv->ring.map, dev);
-+
-+ i915_gem_object_unpin(dev_priv->ring.ring_obj);
-+ drm_gem_object_unreference(dev_priv->ring.ring_obj);
-+ dev_priv->ring.ring_obj = NULL;
-+ memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
-+
-+ if (dev_priv->hws_obj != NULL) {
-+ i915_gem_object_unpin(dev_priv->hws_obj);
-+ drm_gem_object_unreference(dev_priv->hws_obj);
-+ dev_priv->hws_obj = NULL;
-+ memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
-+
-+ /* Write high address into HWS_PGA when disabling. */
-+ I915_WRITE(HWS_PGA, 0x1ffff000);
-+ }
-+}
-+
-+int
-+i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv)
-+{
-+ drm_i915_private_t *dev_priv = dev->dev_private;
-+ int ret;
-+
-+ if (dev_priv->mm.wedged) {
-+ DRM_ERROR("Reenabling wedged hardware, good luck\n");
-+ dev_priv->mm.wedged = 0;
-+ }
-+
-+ ret = i915_gem_init_ringbuffer(dev);
-+ if (ret != 0)
-+ return ret;
-+
-+ mutex_lock(&dev->struct_mutex);
-+ BUG_ON(!list_empty(&dev_priv->mm.active_list));
-+ BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
-+ BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
-+ BUG_ON(!list_empty(&dev_priv->mm.request_list));
-+ dev_priv->mm.suspended = 0;
-+ mutex_unlock(&dev->struct_mutex);
-+ return 0;
-+}
-+
-+int
-+i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv)
-+{
-+ int ret;
-+
-+ mutex_lock(&dev->struct_mutex);
-+ ret = i915_gem_idle(dev);
-+ if (ret == 0)
-+ i915_gem_cleanup_ringbuffer(dev);
-+ mutex_unlock(&dev->struct_mutex);
-+
-+ return 0;
-+}
-+
-+void
-+i915_gem_lastclose(struct drm_device *dev)
-+{
-+ int ret;
-+ drm_i915_private_t *dev_priv = dev->dev_private;
-+
-+ mutex_lock(&dev->struct_mutex);
-+
-+ if (dev_priv->ring.ring_obj != NULL) {
-+ ret = i915_gem_idle(dev);
-+ if (ret)
-+ DRM_ERROR("failed to idle hardware: %d\n", ret);
-+
-+ i915_gem_cleanup_ringbuffer(dev);
-+ }
-+
-+ mutex_unlock(&dev->struct_mutex);
-+}
-+
-+void
-+i915_gem_load(struct drm_device *dev)
-+{
-+ drm_i915_private_t *dev_priv = dev->dev_private;
-+
-+ INIT_LIST_HEAD(&dev_priv->mm.active_list);
-+ INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
-+ INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
-+ INIT_LIST_HEAD(&dev_priv->mm.request_list);
-+ INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
-+ i915_gem_retire_work_handler);
-+ dev_priv->mm.next_gem_seqno = 1;
-+
-+ i915_gem_detect_bit_6_swizzle(dev);
-+}
-diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c
-new file mode 100644
-index 0000000..131c088
---- /dev/null
-+++ b/drivers/gpu/drm/i915/i915_gem_debug.c
-@@ -0,0 +1,201 @@
-+/*
-+ * Copyright © 2008 Intel Corporation
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a
-+ * copy of this software and associated documentation files (the "Software"),
-+ * to deal in the Software without restriction, including without limitation
-+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
-+ * and/or sell copies of the Software, and to permit persons to whom the
-+ * Software is furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice (including the next
-+ * paragraph) shall be included in all copies or substantial portions of the
-+ * Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
-+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ *
-+ * Authors:
-+ * Keith Packard <keithp@keithp.com>
-+ *
-+ */
-+
-+#include "drmP.h"
-+#include "drm.h"
-+#include "i915_drm.h"
-+#include "i915_drv.h"
-+
-+#if WATCH_INACTIVE
-+void
-+i915_verify_inactive(struct drm_device *dev, char *file, int line)
-+{
-+ drm_i915_private_t *dev_priv = dev->dev_private;
-+ struct drm_gem_object *obj;
-+ struct drm_i915_gem_object *obj_priv;
-+
-+ list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
-+ obj = obj_priv->obj;
-+ if (obj_priv->pin_count || obj_priv->active ||
-+ (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
-+ I915_GEM_DOMAIN_GTT)))
-+ DRM_ERROR("inactive %p (p %d a %d w %x) %s:%d\n",
-+ obj,
-+ obj_priv->pin_count, obj_priv->active,
-+ obj->write_domain, file, line);
-+ }
-+}
-+#endif /* WATCH_INACTIVE */
-+
-+
-+#if WATCH_BUF | WATCH_EXEC | WATCH_PWRITE
-+static void
-+i915_gem_dump_page(struct page *page, uint32_t start, uint32_t end,
-+ uint32_t bias, uint32_t mark)
-+{
-+ uint32_t *mem = kmap_atomic(page, KM_USER0);
-+ int i;
-+ for (i = start; i < end; i += 4)
-+ DRM_INFO("%08x: %08x%s\n",
-+ (int) (bias + i), mem[i / 4],
-+ (bias + i == mark) ? " ********" : "");
-+ kunmap_atomic(mem, KM_USER0);
-+ /* give syslog time to catch up */
-+ msleep(1);
-+}
-+
-+void
-+i915_gem_dump_object(struct drm_gem_object *obj, int len,
-+ const char *where, uint32_t mark)
-+{
-+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
-+ int page;
-+
-+ DRM_INFO("%s: object at offset %08x\n", where, obj_priv->gtt_offset);
-+ for (page = 0; page < (len + PAGE_SIZE-1) / PAGE_SIZE; page++) {
-+ int page_len, chunk, chunk_len;
-+
-+ page_len = len - page * PAGE_SIZE;
-+ if (page_len > PAGE_SIZE)
-+ page_len = PAGE_SIZE;
-+
-+ for (chunk = 0; chunk < page_len; chunk += 128) {
-+ chunk_len = page_len - chunk;
-+ if (chunk_len > 128)
-+ chunk_len = 128;
-+ i915_gem_dump_page(obj_priv->page_list[page],
-+ chunk, chunk + chunk_len,
-+ obj_priv->gtt_offset +
-+ page * PAGE_SIZE,
-+ mark);
-+ }
-+ }
-+}
-+#endif
-+
-+#if WATCH_LRU
-+void
-+i915_dump_lru(struct drm_device *dev, const char *where)
-+{
-+ drm_i915_private_t *dev_priv = dev->dev_private;
-+ struct drm_i915_gem_object *obj_priv;
-+
-+ DRM_INFO("active list %s {\n", where);
-+ list_for_each_entry(obj_priv, &dev_priv->mm.active_list,
-+ list)
-+ {
-+ DRM_INFO(" %p: %08x\n", obj_priv,
-+ obj_priv->last_rendering_seqno);
-+ }
-+ DRM_INFO("}\n");
-+ DRM_INFO("flushing list %s {\n", where);
-+ list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list,
-+ list)
-+ {
-+ DRM_INFO(" %p: %08x\n", obj_priv,
-+ obj_priv->last_rendering_seqno);
-+ }
-+ DRM_INFO("}\n");
-+ DRM_INFO("inactive %s {\n", where);
-+ list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
-+ DRM_INFO(" %p: %08x\n", obj_priv,
-+ obj_priv->last_rendering_seqno);
-+ }
-+ DRM_INFO("}\n");
-+}
-+#endif
-+
-+
-+#if WATCH_COHERENCY
-+void
-+i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
-+{
-+ struct drm_device *dev = obj->dev;
-+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
-+ int page;
-+ uint32_t *gtt_mapping;
-+ uint32_t *backing_map = NULL;
-+ int bad_count = 0;
-+
-+ DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %dkb):\n",
-+ __func__, obj, obj_priv->gtt_offset, handle,
-+ obj->size / 1024);
-+
-+ gtt_mapping = ioremap(dev->agp->base + obj_priv->gtt_offset,
-+ obj->size);
-+ if (gtt_mapping == NULL) {
-+ DRM_ERROR("failed to map GTT space\n");
-+ return;
-+ }
-+
-+ for (page = 0; page < obj->size / PAGE_SIZE; page++) {
-+ int i;
-+
-+ backing_map = kmap_atomic(obj_priv->page_list[page], KM_USER0);
-+
-+ if (backing_map == NULL) {
-+ DRM_ERROR("failed to map backing page\n");
-+ goto out;
-+ }
-+
-+ for (i = 0; i < PAGE_SIZE / 4; i++) {
-+ uint32_t cpuval = backing_map[i];
-+ uint32_t gttval = readl(gtt_mapping +
-+ page * 1024 + i);
-+
-+ if (cpuval != gttval) {
-+ DRM_INFO("incoherent CPU vs GPU at 0x%08x: "
-+ "0x%08x vs 0x%08x\n",
-+ (int)(obj_priv->gtt_offset +
-+ page * PAGE_SIZE + i * 4),
-+ cpuval, gttval);
-+ if (bad_count++ >= 8) {
-+ DRM_INFO("...\n");
-+ goto out;
-+ }
-+ }
-+ }
-+ kunmap_atomic(backing_map, KM_USER0);
-+ backing_map = NULL;
-+ }
-+
-+ out:
-+ if (backing_map != NULL)
-+ kunmap_atomic(backing_map, KM_USER0);
-+ iounmap(gtt_mapping);
-+
-+ /* give syslog time to catch up */
-+ msleep(1);
-+
-+ /* Directly flush the object, since we just loaded values with the CPU
-+ * from the backing pages and we don't want to disturb the cache
-+ * management that we're trying to observe.
-+ */
-+
-+ i915_gem_clflush_object(obj);
-+}
-+#endif
-diff --git a/drivers/gpu/drm/i915/i915_gem_proc.c b/drivers/gpu/drm/i915/i915_gem_proc.c
-new file mode 100644
-index 0000000..15d4160
---- /dev/null
-+++ b/drivers/gpu/drm/i915/i915_gem_proc.c
-@@ -0,0 +1,292 @@
-+/*
-+ * Copyright © 2008 Intel Corporation
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a
-+ * copy of this software and associated documentation files (the "Software"),
-+ * to deal in the Software without restriction, including without limitation
-+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
-+ * and/or sell copies of the Software, and to permit persons to whom the
-+ * Software is furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice (including the next
-+ * paragraph) shall be included in all copies or substantial portions of the
-+ * Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
-+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ *
-+ * Authors:
-+ * Eric Anholt <eric@anholt.net>
-+ * Keith Packard <keithp@keithp.com>
-+ *
-+ */
-+
-+#include "drmP.h"
-+#include "drm.h"
-+#include "i915_drm.h"
-+#include "i915_drv.h"
-+
-+static int i915_gem_active_info(char *buf, char **start, off_t offset,
-+ int request, int *eof, void *data)
-+{
-+ struct drm_minor *minor = (struct drm_minor *) data;
-+ struct drm_device *dev = minor->dev;
-+ drm_i915_private_t *dev_priv = dev->dev_private;
-+ struct drm_i915_gem_object *obj_priv;
-+ int len = 0;
-+
-+ if (offset > DRM_PROC_LIMIT) {
-+ *eof = 1;
-+ return 0;
-+ }
-+
-+ *start = &buf[offset];
-+ *eof = 0;
-+ DRM_PROC_PRINT("Active:\n");
-+ list_for_each_entry(obj_priv, &dev_priv->mm.active_list,
-+ list)
-+ {
-+ struct drm_gem_object *obj = obj_priv->obj;
-+ if (obj->name) {
-+ DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n",
-+ obj, obj->name,
-+ obj->read_domains, obj->write_domain,
-+ obj_priv->last_rendering_seqno);
-+ } else {
-+ DRM_PROC_PRINT(" %p: %08x %08x %d\n",
-+ obj,
-+ obj->read_domains, obj->write_domain,
-+ obj_priv->last_rendering_seqno);
-+ }
-+ }
-+ if (len > request + offset)
-+ return request;
-+ *eof = 1;
-+ return len - offset;
-+}
-+
-+static int i915_gem_flushing_info(char *buf, char **start, off_t offset,
-+ int request, int *eof, void *data)
-+{
-+ struct drm_minor *minor = (struct drm_minor *) data;
-+ struct drm_device *dev = minor->dev;
-+ drm_i915_private_t *dev_priv = dev->dev_private;
-+ struct drm_i915_gem_object *obj_priv;
-+ int len = 0;
-+
-+ if (offset > DRM_PROC_LIMIT) {
-+ *eof = 1;
-+ return 0;
-+ }
-+
-+ *start = &buf[offset];
-+ *eof = 0;
-+ DRM_PROC_PRINT("Flushing:\n");
-+ list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list,
-+ list)
-+ {
-+ struct drm_gem_object *obj = obj_priv->obj;
-+ if (obj->name) {
-+ DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n",
-+ obj, obj->name,
-+ obj->read_domains, obj->write_domain,
-+ obj_priv->last_rendering_seqno);
-+ } else {
-+ DRM_PROC_PRINT(" %p: %08x %08x %d\n", obj,
-+ obj->read_domains, obj->write_domain,
-+ obj_priv->last_rendering_seqno);
-+ }
-+ }
-+ if (len > request + offset)
-+ return request;
-+ *eof = 1;
-+ return len - offset;
-+}
-+
-+static int i915_gem_inactive_info(char *buf, char **start, off_t offset,
-+ int request, int *eof, void *data)
-+{
-+ struct drm_minor *minor = (struct drm_minor *) data;
-+ struct drm_device *dev = minor->dev;
-+ drm_i915_private_t *dev_priv = dev->dev_private;
-+ struct drm_i915_gem_object *obj_priv;
-+ int len = 0;
-+
-+ if (offset > DRM_PROC_LIMIT) {
-+ *eof = 1;
-+ return 0;
-+ }
-+
-+ *start = &buf[offset];
-+ *eof = 0;
-+ DRM_PROC_PRINT("Inactive:\n");
-+ list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list,
-+ list)
-+ {
-+ struct drm_gem_object *obj = obj_priv->obj;
-+ if (obj->name) {
-+ DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n",
-+ obj, obj->name,
-+ obj->read_domains, obj->write_domain,
-+ obj_priv->last_rendering_seqno);
-+ } else {
-+ DRM_PROC_PRINT(" %p: %08x %08x %d\n", obj,
-+ obj->read_domains, obj->write_domain,
-+ obj_priv->last_rendering_seqno);
-+ }
-+ }
-+ if (len > request + offset)
-+ return request;
-+ *eof = 1;
-+ return len - offset;
-+}
-+
-+static int i915_gem_request_info(char *buf, char **start, off_t offset,
-+ int request, int *eof, void *data)
-+{
-+ struct drm_minor *minor = (struct drm_minor *) data;
-+ struct drm_device *dev = minor->dev;
-+ drm_i915_private_t *dev_priv = dev->dev_private;
-+ struct drm_i915_gem_request *gem_request;
-+ int len = 0;
-+
-+ if (offset > DRM_PROC_LIMIT) {
-+ *eof = 1;
-+ return 0;
-+ }
-+
-+ *start = &buf[offset];
-+ *eof = 0;
-+ DRM_PROC_PRINT("Request:\n");
-+ list_for_each_entry(gem_request, &dev_priv->mm.request_list,
-+ list)
-+ {
-+ DRM_PROC_PRINT(" %d @ %d %08x\n",
-+ gem_request->seqno,
-+ (int) (jiffies - gem_request->emitted_jiffies),
-+ gem_request->flush_domains);
-+ }
-+ if (len > request + offset)
-+ return request;
-+ *eof = 1;
-+ return len - offset;
-+}
-+
-+static int i915_gem_seqno_info(char *buf, char **start, off_t offset,
-+ int request, int *eof, void *data)
-+{
-+ struct drm_minor *minor = (struct drm_minor *) data;
-+ struct drm_device *dev = minor->dev;
-+ drm_i915_private_t *dev_priv = dev->dev_private;
-+ int len = 0;
-+
-+ if (offset > DRM_PROC_LIMIT) {
-+ *eof = 1;
-+ return 0;
-+ }
-+
-+ *start = &buf[offset];
-+ *eof = 0;
-+ DRM_PROC_PRINT("Current sequence: %d\n", i915_get_gem_seqno(dev));
-+ DRM_PROC_PRINT("Waiter sequence: %d\n",
-+ dev_priv->mm.waiting_gem_seqno);
-+ DRM_PROC_PRINT("IRQ sequence: %d\n", dev_priv->mm.irq_gem_seqno);
-+ if (len > request + offset)
-+ return request;
-+ *eof = 1;
-+ return len - offset;
-+}
-+
-+
-+static int i915_interrupt_info(char *buf, char **start, off_t offset,
-+ int request, int *eof, void *data)
-+{
-+ struct drm_minor *minor = (struct drm_minor *) data;
-+ struct drm_device *dev = minor->dev;
-+ drm_i915_private_t *dev_priv = dev->dev_private;
-+ int len = 0;
-+
-+ if (offset > DRM_PROC_LIMIT) {
-+ *eof = 1;
-+ return 0;
-+ }
-+
-+ *start = &buf[offset];
-+ *eof = 0;
-+ DRM_PROC_PRINT("Interrupt enable: %08x\n",
-+ I915_READ(IER));
-+ DRM_PROC_PRINT("Interrupt identity: %08x\n",
-+ I915_READ(IIR));
-+ DRM_PROC_PRINT("Interrupt mask: %08x\n",
-+ I915_READ(IMR));
-+ DRM_PROC_PRINT("Pipe A stat: %08x\n",
-+ I915_READ(PIPEASTAT));
-+ DRM_PROC_PRINT("Pipe B stat: %08x\n",
-+ I915_READ(PIPEBSTAT));
-+ DRM_PROC_PRINT("Interrupts received: %d\n",
-+ atomic_read(&dev_priv->irq_received));
-+ DRM_PROC_PRINT("Current sequence: %d\n",
-+ i915_get_gem_seqno(dev));
-+ DRM_PROC_PRINT("Waiter sequence: %d\n",
-+ dev_priv->mm.waiting_gem_seqno);
-+ DRM_PROC_PRINT("IRQ sequence: %d\n",
-+ dev_priv->mm.irq_gem_seqno);
-+ if (len > request + offset)
-+ return request;
-+ *eof = 1;
-+ return len - offset;
-+}
-+
-+static struct drm_proc_list {
-+ /** file name */
-+ const char *name;
-+ /** proc callback*/
-+ int (*f) (char *, char **, off_t, int, int *, void *);
-+} i915_gem_proc_list[] = {
-+ {"i915_gem_active", i915_gem_active_info},
-+ {"i915_gem_flushing", i915_gem_flushing_info},
-+ {"i915_gem_inactive", i915_gem_inactive_info},
-+ {"i915_gem_request", i915_gem_request_info},
-+ {"i915_gem_seqno", i915_gem_seqno_info},
-+ {"i915_gem_interrupt", i915_interrupt_info},
-+};
-+
-+#define I915_GEM_PROC_ENTRIES ARRAY_SIZE(i915_gem_proc_list)
-+
-+int i915_gem_proc_init(struct drm_minor *minor)
-+{
-+ struct proc_dir_entry *ent;
-+ int i, j;
-+
-+ for (i = 0; i < I915_GEM_PROC_ENTRIES; i++) {
-+ ent = create_proc_entry(i915_gem_proc_list[i].name,
-+ S_IFREG | S_IRUGO, minor->dev_root);
-+ if (!ent) {
-+ DRM_ERROR("Cannot create /proc/dri/.../%s\n",
-+ i915_gem_proc_list[i].name);
-+ for (j = 0; j < i; j++)
-+ remove_proc_entry(i915_gem_proc_list[i].name,
-+ minor->dev_root);
-+ return -1;
-+ }
-+ ent->read_proc = i915_gem_proc_list[i].f;
-+ ent->data = minor;
-+ }
-+ return 0;
-+}
-+
-+void i915_gem_proc_cleanup(struct drm_minor *minor)
-+{
-+ int i;
-+
-+ if (!minor->dev_root)
-+ return;
-+
-+ for (i = 0; i < I915_GEM_PROC_ENTRIES; i++)
-+ remove_proc_entry(i915_gem_proc_list[i].name, minor->dev_root);
-+}
-diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
-new file mode 100644
-index 0000000..0c1b3a0
---- /dev/null
-+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
-@@ -0,0 +1,256 @@
-+/*
-+ * Copyright © 2008 Intel Corporation
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a
-+ * copy of this software and associated documentation files (the "Software"),
-+ * to deal in the Software without restriction, including without limitation
-+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
-+ * and/or sell copies of the Software, and to permit persons to whom the
-+ * Software is furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice (including the next
-+ * paragraph) shall be included in all copies or substantial portions of the
-+ * Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
-+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ *
-+ * Authors:
-+ * Eric Anholt <eric@anholt.net>
-+ *
-+ */
-+
-+#include "drmP.h"
-+#include "drm.h"
-+#include "i915_drm.h"
-+#include "i915_drv.h"
-+
-+/** @file i915_gem_tiling.c
-+ *
-+ * Support for managing tiling state of buffer objects.
-+ *
-+ * The idea behind tiling is to increase cache hit rates by rearranging
-+ * pixel data so that a group of pixel accesses are in the same cacheline.
-+ * Performance improvement from doing this on the back/depth buffer are on
-+ * the order of 30%.
-+ *
-+ * Intel architectures make this somewhat more complicated, though, by
-+ * adjustments made to addressing of data when the memory is in interleaved
-+ * mode (matched pairs of DIMMS) to improve memory bandwidth.
-+ * For interleaved memory, the CPU sends every sequential 64 bytes
-+ * to an alternate memory channel so it can get the bandwidth from both.
-+ *
-+ * The GPU also rearranges its accesses for increased bandwidth to interleaved
-+ * memory, and it matches what the CPU does for non-tiled. However, when tiled
-+ * it does it a little differently, since one walks addresses not just in the
-+ * X direction but also Y. So, along with alternating channels when bit
-+ * 6 of the address flips, it also alternates when other bits flip -- Bits 9
-+ * (every 512 bytes, an X tile scanline) and 10 (every two X tile scanlines)
-+ * are common to both the 915 and 965-class hardware.
-+ *
-+ * The CPU also sometimes XORs in higher bits as well, to improve
-+ * bandwidth doing strided access like we do so frequently in graphics. This
-+ * is called "Channel XOR Randomization" in the MCH documentation. The result
-+ * is that the CPU is XORing in either bit 11 or bit 17 to bit 6 of its address
-+ * decode.
-+ *
-+ * All of this bit 6 XORing has an effect on our memory management,
-+ * as we need to make sure that the 3d driver can correctly address object
-+ * contents.
-+ *
-+ * If we don't have interleaved memory, all tiling is safe and no swizzling is
-+ * required.
-+ *
-+ * When bit 17 is XORed in, we simply refuse to tile at all. Bit
-+ * 17 is not just a page offset, so as we page an objet out and back in,
-+ * individual pages in it will have different bit 17 addresses, resulting in
-+ * each 64 bytes being swapped with its neighbor!
-+ *
-+ * Otherwise, if interleaved, we have to tell the 3d driver what the address
-+ * swizzling it needs to do is, since it's writing with the CPU to the pages
-+ * (bit 6 and potentially bit 11 XORed in), and the GPU is reading from the
-+ * pages (bit 6, 9, and 10 XORed in), resulting in a cumulative bit swizzling
-+ * required by the CPU of XORing in bit 6, 9, 10, and potentially 11, in order
-+ * to match what the GPU expects.
-+ */
-+
-+/**
-+ * Detects bit 6 swizzling of address lookup between IGD access and CPU
-+ * access through main memory.
-+ */
-+void
-+i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
-+{
-+ drm_i915_private_t *dev_priv = dev->dev_private;
-+ uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
-+ uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
-+
-+ if (!IS_I9XX(dev)) {
-+ /* As far as we know, the 865 doesn't have these bit 6
-+ * swizzling issues.
-+ */
-+ swizzle_x = I915_BIT_6_SWIZZLE_NONE;
-+ swizzle_y = I915_BIT_6_SWIZZLE_NONE;
-+ } else if (!IS_I965G(dev) || IS_I965GM(dev)) {
-+ uint32_t dcc;
-+
-+ /* On 915-945 and GM965, channel interleave by the CPU is
-+ * determined by DCC. The CPU will alternate based on bit 6
-+ * in interleaved mode, and the GPU will then also alternate
-+ * on bit 6, 9, and 10 for X, but the CPU may also optionally
-+ * alternate based on bit 17 (XOR not disabled and XOR
-+ * bit == 17).
-+ */
-+ dcc = I915_READ(DCC);
-+ switch (dcc & DCC_ADDRESSING_MODE_MASK) {
-+ case DCC_ADDRESSING_MODE_SINGLE_CHANNEL:
-+ case DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC:
-+ swizzle_x = I915_BIT_6_SWIZZLE_NONE;
-+ swizzle_y = I915_BIT_6_SWIZZLE_NONE;
-+ break;
-+ case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED:
-+ if (IS_I915G(dev) || IS_I915GM(dev) ||
-+ dcc & DCC_CHANNEL_XOR_DISABLE) {
-+ swizzle_x = I915_BIT_6_SWIZZLE_9_10;
-+ swizzle_y = I915_BIT_6_SWIZZLE_9;
-+ } else if (IS_I965GM(dev)) {
-+ /* GM965 only does bit 11-based channel
-+ * randomization
-+ */
-+ swizzle_x = I915_BIT_6_SWIZZLE_9_10_11;
-+ swizzle_y = I915_BIT_6_SWIZZLE_9_11;
-+ } else {
-+ /* Bit 17 or perhaps other swizzling */
-+ swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
-+ swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
-+ }
-+ break;
-+ }
-+ if (dcc == 0xffffffff) {
-+ DRM_ERROR("Couldn't read from MCHBAR. "
-+ "Disabling tiling.\n");
-+ swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
-+ swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
-+ }
-+ } else {
-+ /* The 965, G33, and newer, have a very flexible memory
-+ * configuration. It will enable dual-channel mode
-+ * (interleaving) on as much memory as it can, and the GPU
-+ * will additionally sometimes enable different bit 6
-+ * swizzling for tiled objects from the CPU.
-+ *
-+ * Here's what I found on the G965:
-+ * slot fill memory size swizzling
-+ * 0A 0B 1A 1B 1-ch 2-ch
-+ * 512 0 0 0 512 0 O
-+ * 512 0 512 0 16 1008 X
-+ * 512 0 0 512 16 1008 X
-+ * 0 512 0 512 16 1008 X
-+ * 1024 1024 1024 0 2048 1024 O
-+ *
-+ * We could probably detect this based on either the DRB
-+ * matching, which was the case for the swizzling required in
-+ * the table above, or from the 1-ch value being less than
-+ * the minimum size of a rank.
-+ */
-+ if (I915_READ16(C0DRB3) != I915_READ16(C1DRB3)) {
-+ swizzle_x = I915_BIT_6_SWIZZLE_NONE;
-+ swizzle_y = I915_BIT_6_SWIZZLE_NONE;
-+ } else {
-+ swizzle_x = I915_BIT_6_SWIZZLE_9_10;
-+ swizzle_y = I915_BIT_6_SWIZZLE_9;
-+ }
-+ }
-+
-+ dev_priv->mm.bit_6_swizzle_x = swizzle_x;
-+ dev_priv->mm.bit_6_swizzle_y = swizzle_y;
-+}
-+
-+/**
-+ * Sets the tiling mode of an object, returning the required swizzling of
-+ * bit 6 of addresses in the object.
-+ */
-+int
-+i915_gem_set_tiling(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv)
-+{
-+ struct drm_i915_gem_set_tiling *args = data;
-+ drm_i915_private_t *dev_priv = dev->dev_private;
-+ struct drm_gem_object *obj;
-+ struct drm_i915_gem_object *obj_priv;
-+
-+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
-+ if (obj == NULL)
-+ return -EINVAL;
-+ obj_priv = obj->driver_private;
-+
-+ mutex_lock(&dev->struct_mutex);
-+
-+ if (args->tiling_mode == I915_TILING_NONE) {
-+ obj_priv->tiling_mode = I915_TILING_NONE;
-+ args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
-+ } else {
-+ if (args->tiling_mode == I915_TILING_X)
-+ args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
-+ else
-+ args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
-+ /* If we can't handle the swizzling, make it untiled. */
-+ if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) {
-+ args->tiling_mode = I915_TILING_NONE;
-+ args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
-+ }
-+ }
-+ obj_priv->tiling_mode = args->tiling_mode;
-+
-+ mutex_unlock(&dev->struct_mutex);
-+
-+ drm_gem_object_unreference(obj);
-+
-+ return 0;
-+}
-+
-+/**
-+ * Returns the current tiling mode and required bit 6 swizzling for the object.
-+ */
-+int
-+i915_gem_get_tiling(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv)
-+{
-+ struct drm_i915_gem_get_tiling *args = data;
-+ drm_i915_private_t *dev_priv = dev->dev_private;
-+ struct drm_gem_object *obj;
-+ struct drm_i915_gem_object *obj_priv;
-+
-+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
-+ if (obj == NULL)
-+ return -EINVAL;
-+ obj_priv = obj->driver_private;
-+
-+ mutex_lock(&dev->struct_mutex);
-+
-+ args->tiling_mode = obj_priv->tiling_mode;
-+ switch (obj_priv->tiling_mode) {
-+ case I915_TILING_X:
-+ args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
-+ break;
-+ case I915_TILING_Y:
-+ args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
-+ break;
-+ case I915_TILING_NONE:
-+ args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
-+ break;
-+ default:
-+ DRM_ERROR("unknown tiling mode\n");
-+ }
-+
-+ mutex_unlock(&dev->struct_mutex);
-+
-+ drm_gem_object_unreference(obj);
-+
-+ return 0;
-+}
-diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
-index f875959..f295bdf 100644
---- a/drivers/gpu/drm/i915/i915_irq.c
-+++ b/drivers/gpu/drm/i915/i915_irq.c
-@@ -407,15 +407,20 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
- I915_WRITE(PIPEBSTAT, pipeb_stats);
- }
-
-- if (iir & I915_ASLE_INTERRUPT)
-- opregion_asle_intr(dev);
-+ I915_WRITE(IIR, iir);
-+ if (dev->pdev->msi_enabled)
-+ I915_WRITE(IMR, dev_priv->irq_mask_reg);
-+ (void) I915_READ(IIR); /* Flush posted writes */
-
- dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
-
-- if (dev->pdev->msi_enabled)
-- I915_WRITE(IMR, dev_priv->irq_mask_reg);
-- I915_WRITE(IIR, iir);
-- (void) I915_READ(IIR);
-+ if (iir & I915_USER_INTERRUPT) {
-+ dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev);
-+ DRM_WAKEUP(&dev_priv->irq_queue);
-+ }
-+
-+ if (iir & I915_ASLE_INTERRUPT)
-+ opregion_asle_intr(dev);
-
- if (vblank && dev_priv->swaps_pending > 0)
- drm_locked_tasklet(dev, i915_vblank_tasklet);
-@@ -449,7 +454,7 @@ static int i915_emit_irq(struct drm_device * dev)
- return dev_priv->counter;
- }
-
--static void i915_user_irq_get(struct drm_device *dev)
-+void i915_user_irq_get(struct drm_device *dev)
- {
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-
-diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
-index 43ad2cb..5c2d9f2 100644
---- a/drivers/gpu/drm/i915/i915_reg.h
-+++ b/drivers/gpu/drm/i915/i915_reg.h
-@@ -25,19 +25,6 @@
- #ifndef _I915_REG_H_
- #define _I915_REG_H_
-
--/* MCH MMIO space */
--/** 915-945 and GM965 MCH register controlling DRAM channel access */
--#define DCC 0x200
--#define DCC_ADDRESSING_MODE_SINGLE_CHANNEL (0 << 0)
--#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC (1 << 0)
--#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED (2 << 0)
--#define DCC_ADDRESSING_MODE_MASK (3 << 0)
--#define DCC_CHANNEL_XOR_DISABLE (1 << 10)
--
--/** 965 MCH register controlling DRAM channel configuration */
--#define CHDECMISC 0x111
--#define CHDECMISC_FLEXMEMORY (1 << 1)
--
- /*
- * The Bridge device's PCI config space has information about the
- * fb aperture size and the amount of pre-reserved memory.
-@@ -516,6 +503,30 @@
- #define PALETTE_A 0x0a000
- #define PALETTE_B 0x0a800
-
-+/* MCH MMIO space */
-+
-+/*
-+ * MCHBAR mirror.
-+ *
-+ * This mirrors the MCHBAR MMIO space whose location is determined by
-+ * device 0 function 0's pci config register 0x44 or 0x48 and matches it in
-+ * every way. It is not accessible from the CP register read instructions.
-+ *
-+ */
-+#define MCHBAR_MIRROR_BASE 0x10000
-+
-+/** 915-945 and GM965 MCH register controlling DRAM channel access */
-+#define DCC 0x10200
-+#define DCC_ADDRESSING_MODE_SINGLE_CHANNEL (0 << 0)
-+#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC (1 << 0)
-+#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED (2 << 0)
-+#define DCC_ADDRESSING_MODE_MASK (3 << 0)
-+#define DCC_CHANNEL_XOR_DISABLE (1 << 10)
-+
-+/** 965 MCH register controlling DRAM channel configuration */
-+#define C0DRB3 0x10206
-+#define C1DRB3 0x10606
-+
- /*
- * Overlay regs
- */
-diff --git a/include/drm/drm.h b/include/drm/drm.h
-index 15e5503..f46ba4b 100644
---- a/include/drm/drm.h
-+++ b/include/drm/drm.h
-@@ -570,6 +570,34 @@ struct drm_set_version {
- int drm_dd_minor;
- };
-
-+/** DRM_IOCTL_GEM_CLOSE ioctl argument type */
-+struct drm_gem_close {
-+ /** Handle of the object to be closed. */
-+ uint32_t handle;
-+ uint32_t pad;
-+};
-+
-+/** DRM_IOCTL_GEM_FLINK ioctl argument type */
-+struct drm_gem_flink {
-+ /** Handle for the object being named */
-+ uint32_t handle;
-+
-+ /** Returned global name */
-+ uint32_t name;
-+};
-+
-+/** DRM_IOCTL_GEM_OPEN ioctl argument type */
-+struct drm_gem_open {
-+ /** Name of object being opened */
-+ uint32_t name;
-+
-+ /** Returned handle for the object */
-+ uint32_t handle;
-+
-+ /** Returned size of the object */
-+ uint64_t size;
-+};
-+
- #define DRM_IOCTL_BASE 'd'
- #define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr)
- #define DRM_IOR(nr,type) _IOR(DRM_IOCTL_BASE,nr,type)
-@@ -585,6 +613,9 @@ struct drm_set_version {
- #define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, struct drm_stats)
- #define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, struct drm_set_version)
- #define DRM_IOCTL_MODESET_CTL DRM_IOW(0x08, struct drm_modeset_ctl)
-+#define DRM_IOCTL_GEM_CLOSE DRM_IOW (0x09, struct drm_gem_close)
-+#define DRM_IOCTL_GEM_FLINK DRM_IOWR(0x0a, struct drm_gem_flink)
-+#define DRM_IOCTL_GEM_OPEN DRM_IOWR(0x0b, struct drm_gem_open)
-
- #define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, struct drm_unique)
- #define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, struct drm_auth)
-diff --git a/include/drm/drmP.h b/include/drm/drmP.h
-index e79ce07..1469a1b 100644
---- a/include/drm/drmP.h
-+++ b/include/drm/drmP.h
-@@ -104,6 +104,7 @@ struct drm_device;
- #define DRIVER_DMA_QUEUE 0x200
- #define DRIVER_FB_DMA 0x400
- #define DRIVER_IRQ_VBL2 0x800
-+#define DRIVER_GEM 0x1000
-
- /***********************************************************************/
- /** \name Begin the DRM... */
-@@ -387,6 +388,10 @@ struct drm_file {
- struct drm_minor *minor;
- int remove_auth_on_close;
- unsigned long lock_count;
-+ /** Mapping of mm object handles to object pointers. */
-+ struct idr object_idr;
-+ /** Lock for synchronization of access to object_idr. */
-+ spinlock_t table_lock;
- struct file *filp;
- void *driver_priv;
- };
-@@ -558,6 +563,56 @@ struct drm_ati_pcigart_info {
- };
-
- /**
-+ * This structure defines the drm_mm memory object, which will be used by the
-+ * DRM for its buffer objects.
-+ */
-+struct drm_gem_object {
-+ /** Reference count of this object */
-+ struct kref refcount;
-+
-+ /** Handle count of this object. Each handle also holds a reference */
-+ struct kref handlecount;
-+
-+ /** Related drm device */
-+ struct drm_device *dev;
-+
-+ /** File representing the shmem storage */
-+ struct file *filp;
-+
-+ /**
-+ * Size of the object, in bytes. Immutable over the object's
-+ * lifetime.
-+ */
-+ size_t size;
-+
-+ /**
-+ * Global name for this object, starts at 1. 0 means unnamed.
-+ * Access is covered by the object_name_lock in the related drm_device
-+ */
-+ int name;
-+
-+ /**
-+ * Memory domains. These monitor which caches contain read/write data
-+ * related to the object. When transitioning from one set of domains
-+ * to another, the driver is called to ensure that caches are suitably
-+ * flushed and invalidated
-+ */
-+ uint32_t read_domains;
-+ uint32_t write_domain;
-+
-+ /**
-+ * While validating an exec operation, the
-+ * new read/write domain values are computed here.
-+ * They will be transferred to the above values
-+ * at the point that any cache flushing occurs
-+ */
-+ uint32_t pending_read_domains;
-+ uint32_t pending_write_domain;
-+
-+ void *driver_private;
-+};
-+
-+/**
- * DRM driver structure. This structure represent the common code for
- * a family of cards. There will one drm_device for each card present
- * in this family
-@@ -657,6 +712,18 @@ struct drm_driver {
- void (*set_version) (struct drm_device *dev,
- struct drm_set_version *sv);
-
-+ int (*proc_init)(struct drm_minor *minor);
-+ void (*proc_cleanup)(struct drm_minor *minor);
-+
-+ /**
-+ * Driver-specific constructor for drm_gem_objects, to set up
-+ * obj->driver_private.
-+ *
-+ * Returns 0 on success.
-+ */
-+ int (*gem_init_object) (struct drm_gem_object *obj);
-+ void (*gem_free_object) (struct drm_gem_object *obj);
-+
- int major;
- int minor;
- int patchlevel;
-@@ -830,6 +897,22 @@ struct drm_device {
- spinlock_t drw_lock;
- struct idr drw_idr;
- /*@} */
-+
-+ /** \name GEM information */
-+ /*@{ */
-+ spinlock_t object_name_lock;
-+ struct idr object_name_idr;
-+ atomic_t object_count;
-+ atomic_t object_memory;
-+ atomic_t pin_count;
-+ atomic_t pin_memory;
-+ atomic_t gtt_count;
-+ atomic_t gtt_memory;
-+ uint32_t gtt_total;
-+ uint32_t invalidate_domains; /* domains pending invalidation */
-+ uint32_t flush_domains; /* domains pending flush */
-+ /*@} */
-+
- };
-
- static __inline__ int drm_core_check_feature(struct drm_device *dev,
-@@ -926,6 +1009,10 @@ extern void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area);
- extern DRM_AGP_MEM *drm_alloc_agp(struct drm_device *dev, int pages, u32 type);
- extern int drm_free_agp(DRM_AGP_MEM * handle, int pages);
- extern int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start);
-+extern DRM_AGP_MEM *drm_agp_bind_pages(struct drm_device *dev,
-+ struct page **pages,
-+ unsigned long num_pages,
-+ uint32_t gtt_offset);
- extern int drm_unbind_agp(DRM_AGP_MEM * handle);
-
- /* Misc. IOCTL support (drm_ioctl.h) */
-@@ -988,6 +1075,9 @@ extern int drm_getmagic(struct drm_device *dev, void *data,
- extern int drm_authmagic(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-
-+/* Cache management (drm_cache.c) */
-+void drm_clflush_pages(struct page *pages[], unsigned long num_pages);
-+
- /* Locking IOCTL support (drm_lock.h) */
- extern int drm_lock(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-@@ -1094,6 +1184,7 @@ extern DRM_AGP_MEM *drm_agp_allocate_memory(struct agp_bridge_data *bridge, size
- extern int drm_agp_free_memory(DRM_AGP_MEM * handle);
- extern int drm_agp_bind_memory(DRM_AGP_MEM * handle, off_t start);
- extern int drm_agp_unbind_memory(DRM_AGP_MEM * handle);
-+extern void drm_agp_chipset_flush(struct drm_device *dev);
-
- /* Stub support (drm_stub.h) */
- extern int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
-@@ -1156,6 +1247,66 @@ extern unsigned long drm_mm_tail_space(struct drm_mm *mm);
- extern int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size);
- extern int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size);
-
-+/* Graphics Execution Manager library functions (drm_gem.c) */
-+int drm_gem_init(struct drm_device *dev);
-+void drm_gem_object_free(struct kref *kref);
-+struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev,
-+ size_t size);
-+void drm_gem_object_handle_free(struct kref *kref);
-+
-+static inline void
-+drm_gem_object_reference(struct drm_gem_object *obj)
-+{
-+ kref_get(&obj->refcount);
-+}
-+
-+static inline void
-+drm_gem_object_unreference(struct drm_gem_object *obj)
-+{
-+ if (obj == NULL)
-+ return;
-+
-+ kref_put(&obj->refcount, drm_gem_object_free);
-+}
-+
-+int drm_gem_handle_create(struct drm_file *file_priv,
-+ struct drm_gem_object *obj,
-+ int *handlep);
-+
-+static inline void
-+drm_gem_object_handle_reference(struct drm_gem_object *obj)
-+{
-+ drm_gem_object_reference(obj);
-+ kref_get(&obj->handlecount);
-+}
-+
-+static inline void
-+drm_gem_object_handle_unreference(struct drm_gem_object *obj)
-+{
-+ if (obj == NULL)
-+ return;
-+
-+ /*
-+ * Must bump handle count first as this may be the last
-+ * ref, in which case the object would disappear before we
-+ * checked for a name
-+ */
-+ kref_put(&obj->handlecount, drm_gem_object_handle_free);
-+ drm_gem_object_unreference(obj);
-+}
-+
-+struct drm_gem_object *drm_gem_object_lookup(struct drm_device *dev,
-+ struct drm_file *filp,
-+ int handle);
-+int drm_gem_close_ioctl(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv);
-+int drm_gem_flink_ioctl(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv);
-+int drm_gem_open_ioctl(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv);
-+void drm_gem_open(struct drm_device *dev, struct drm_file *file_private);
-+void drm_gem_release(struct drm_device *dev, struct drm_file *file_private);
-+
- extern void drm_core_ioremap(struct drm_map *map, struct drm_device *dev);
- extern void drm_core_ioremap_wc(struct drm_map *map, struct drm_device *dev);
- extern void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev);
-diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
-index 05c66cf..59d08fc 100644
---- a/include/drm/i915_drm.h
-+++ b/include/drm/i915_drm.h
-@@ -143,6 +143,22 @@ typedef struct _drm_i915_sarea {
- #define DRM_I915_GET_VBLANK_PIPE 0x0e
- #define DRM_I915_VBLANK_SWAP 0x0f
- #define DRM_I915_HWS_ADDR 0x11
-+#define DRM_I915_GEM_INIT 0x13
-+#define DRM_I915_GEM_EXECBUFFER 0x14
-+#define DRM_I915_GEM_PIN 0x15
-+#define DRM_I915_GEM_UNPIN 0x16
-+#define DRM_I915_GEM_BUSY 0x17
-+#define DRM_I915_GEM_THROTTLE 0x18
-+#define DRM_I915_GEM_ENTERVT 0x19
-+#define DRM_I915_GEM_LEAVEVT 0x1a
-+#define DRM_I915_GEM_CREATE 0x1b
-+#define DRM_I915_GEM_PREAD 0x1c
-+#define DRM_I915_GEM_PWRITE 0x1d
-+#define DRM_I915_GEM_MMAP 0x1e
-+#define DRM_I915_GEM_SET_DOMAIN 0x1f
-+#define DRM_I915_GEM_SW_FINISH 0x20
-+#define DRM_I915_GEM_SET_TILING 0x21
-+#define DRM_I915_GEM_GET_TILING 0x22
-
- #define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
- #define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
-@@ -160,6 +176,20 @@ typedef struct _drm_i915_sarea {
- #define DRM_IOCTL_I915_SET_VBLANK_PIPE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
- #define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
- #define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t)
-+#define DRM_IOCTL_I915_GEM_PIN DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin)
-+#define DRM_IOCTL_I915_GEM_UNPIN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin)
-+#define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy)
-+#define DRM_IOCTL_I915_GEM_THROTTLE DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE)
-+#define DRM_IOCTL_I915_GEM_ENTERVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT)
-+#define DRM_IOCTL_I915_GEM_LEAVEVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT)
-+#define DRM_IOCTL_I915_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create)
-+#define DRM_IOCTL_I915_GEM_PREAD DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread)
-+#define DRM_IOCTL_I915_GEM_PWRITE DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite)
-+#define DRM_IOCTL_I915_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap)
-+#define DRM_IOCTL_I915_GEM_SET_DOMAIN DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain)
-+#define DRM_IOCTL_I915_GEM_SW_FINISH DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish)
-+#define DRM_IOCTL_I915_GEM_SET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling)
-+#define DRM_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling)
-
- /* Allow drivers to submit batchbuffers directly to hardware, relying
- * on the security mechanisms provided by hardware.
-@@ -200,6 +230,7 @@ typedef struct drm_i915_irq_wait {
- #define I915_PARAM_IRQ_ACTIVE 1
- #define I915_PARAM_ALLOW_BATCHBUFFER 2
- #define I915_PARAM_LAST_DISPATCH 3
-+#define I915_PARAM_HAS_GEM 5
-
- typedef struct drm_i915_getparam {
- int param;
-@@ -267,4 +298,305 @@ typedef struct drm_i915_hws_addr {
- uint64_t addr;
- } drm_i915_hws_addr_t;
-
-+struct drm_i915_gem_init {
-+ /**
-+ * Beginning offset in the GTT to be managed by the DRM memory
-+ * manager.
-+ */
-+ uint64_t gtt_start;
-+ /**
-+ * Ending offset in the GTT to be managed by the DRM memory
-+ * manager.
-+ */
-+ uint64_t gtt_end;
-+};
-+
-+struct drm_i915_gem_create {
-+ /**
-+ * Requested size for the object.
-+ *
-+ * The (page-aligned) allocated size for the object will be returned.
-+ */
-+ uint64_t size;
-+ /**
-+ * Returned handle for the object.
-+ *
-+ * Object handles are nonzero.
-+ */
-+ uint32_t handle;
-+ uint32_t pad;
-+};
-+
-+struct drm_i915_gem_pread {
-+ /** Handle for the object being read. */
-+ uint32_t handle;
-+ uint32_t pad;
-+ /** Offset into the object to read from */
-+ uint64_t offset;
-+ /** Length of data to read */
-+ uint64_t size;
-+ /**
-+ * Pointer to write the data into.
-+ *
-+ * This is a fixed-size type for 32/64 compatibility.
-+ */
-+ uint64_t data_ptr;
-+};
-+
-+struct drm_i915_gem_pwrite {
-+ /** Handle for the object being written to. */
-+ uint32_t handle;
-+ uint32_t pad;
-+ /** Offset into the object to write to */
-+ uint64_t offset;
-+ /** Length of data to write */
-+ uint64_t size;
-+ /**
-+ * Pointer to read the data from.
-+ *
-+ * This is a fixed-size type for 32/64 compatibility.
-+ */
-+ uint64_t data_ptr;
-+};
-+
-+struct drm_i915_gem_mmap {
-+ /** Handle for the object being mapped. */
-+ uint32_t handle;
-+ uint32_t pad;
-+ /** Offset in the object to map. */
-+ uint64_t offset;
-+ /**
-+ * Length of data to map.
-+ *
-+ * The value will be page-aligned.
-+ */
-+ uint64_t size;
-+ /**
-+ * Returned pointer the data was mapped at.
-+ *
-+ * This is a fixed-size type for 32/64 compatibility.
-+ */
-+ uint64_t addr_ptr;
-+};
-+
-+struct drm_i915_gem_set_domain {
-+ /** Handle for the object */
-+ uint32_t handle;
-+
-+ /** New read domains */
-+ uint32_t read_domains;
-+
-+ /** New write domain */
-+ uint32_t write_domain;
-+};
-+
-+struct drm_i915_gem_sw_finish {
-+ /** Handle for the object */
-+ uint32_t handle;
-+};
-+
-+struct drm_i915_gem_relocation_entry {
-+ /**
-+ * Handle of the buffer being pointed to by this relocation entry.
-+ *
-+ * It's appealing to make this be an index into the mm_validate_entry
-+ * list to refer to the buffer, but this allows the driver to create
-+ * a relocation list for state buffers and not re-write it per
-+ * exec using the buffer.
-+ */
-+ uint32_t target_handle;
-+
-+ /**
-+ * Value to be added to the offset of the target buffer to make up
-+ * the relocation entry.
-+ */
-+ uint32_t delta;
-+
-+ /** Offset in the buffer the relocation entry will be written into */
-+ uint64_t offset;
-+
-+ /**
-+ * Offset value of the target buffer that the relocation entry was last
-+ * written as.
-+ *
-+ * If the buffer has the same offset as last time, we can skip syncing
-+ * and writing the relocation. This value is written back out by
-+ * the execbuffer ioctl when the relocation is written.
-+ */
-+ uint64_t presumed_offset;
-+
-+ /**
-+ * Target memory domains read by this operation.
-+ */
-+ uint32_t read_domains;
-+
-+ /**
-+ * Target memory domains written by this operation.
-+ *
-+ * Note that only one domain may be written by the whole
-+ * execbuffer operation, so that where there are conflicts,
-+ * the application will get -EINVAL back.
-+ */
-+ uint32_t write_domain;
-+};
-+
-+/** @{
-+ * Intel memory domains
-+ *
-+ * Most of these just align with the various caches in
-+ * the system and are used to flush and invalidate as
-+ * objects end up cached in different domains.
-+ */
-+/** CPU cache */
-+#define I915_GEM_DOMAIN_CPU 0x00000001
-+/** Render cache, used by 2D and 3D drawing */
-+#define I915_GEM_DOMAIN_RENDER 0x00000002
-+/** Sampler cache, used by texture engine */
-+#define I915_GEM_DOMAIN_SAMPLER 0x00000004
-+/** Command queue, used to load batch buffers */
-+#define I915_GEM_DOMAIN_COMMAND 0x00000008
-+/** Instruction cache, used by shader programs */
-+#define I915_GEM_DOMAIN_INSTRUCTION 0x00000010
-+/** Vertex address cache */
-+#define I915_GEM_DOMAIN_VERTEX 0x00000020
-+/** GTT domain - aperture and scanout */
-+#define I915_GEM_DOMAIN_GTT 0x00000040
-+/** @} */
-+
-+struct drm_i915_gem_exec_object {
-+ /**
-+ * User's handle for a buffer to be bound into the GTT for this
-+ * operation.
-+ */
-+ uint32_t handle;
-+
-+ /** Number of relocations to be performed on this buffer */
-+ uint32_t relocation_count;
-+ /**
-+ * Pointer to array of struct drm_i915_gem_relocation_entry containing
-+ * the relocations to be performed in this buffer.
-+ */
-+ uint64_t relocs_ptr;
-+
-+ /** Required alignment in graphics aperture */
-+ uint64_t alignment;
-+
-+ /**
-+ * Returned value of the updated offset of the object, for future
-+ * presumed_offset writes.
-+ */
-+ uint64_t offset;
-+};
-+
-+struct drm_i915_gem_execbuffer {
-+ /**
-+ * List of buffers to be validated with their relocations to be
-+ * performend on them.
-+ *
-+ * This is a pointer to an array of struct drm_i915_gem_validate_entry.
-+ *
-+ * These buffers must be listed in an order such that all relocations
-+ * a buffer is performing refer to buffers that have already appeared
-+ * in the validate list.
-+ */
-+ uint64_t buffers_ptr;
-+ uint32_t buffer_count;
-+
-+ /** Offset in the batchbuffer to start execution from. */
-+ uint32_t batch_start_offset;
-+ /** Bytes used in batchbuffer from batch_start_offset */
-+ uint32_t batch_len;
-+ uint32_t DR1;
-+ uint32_t DR4;
-+ uint32_t num_cliprects;
-+ /** This is a struct drm_clip_rect *cliprects */
-+ uint64_t cliprects_ptr;
-+};
-+
-+struct drm_i915_gem_pin {
-+ /** Handle of the buffer to be pinned. */
-+ uint32_t handle;
-+ uint32_t pad;
-+
-+ /** alignment required within the aperture */
-+ uint64_t alignment;
-+
-+ /** Returned GTT offset of the buffer. */
-+ uint64_t offset;
-+};
-+
-+struct drm_i915_gem_unpin {
-+ /** Handle of the buffer to be unpinned. */
-+ uint32_t handle;
-+ uint32_t pad;
-+};
-+
-+struct drm_i915_gem_busy {
-+ /** Handle of the buffer to check for busy */
-+ uint32_t handle;
-+
-+ /** Return busy status (1 if busy, 0 if idle) */
-+ uint32_t busy;
-+};
-+
-+#define I915_TILING_NONE 0
-+#define I915_TILING_X 1
-+#define I915_TILING_Y 2
-+
-+#define I915_BIT_6_SWIZZLE_NONE 0
-+#define I915_BIT_6_SWIZZLE_9 1
-+#define I915_BIT_6_SWIZZLE_9_10 2
-+#define I915_BIT_6_SWIZZLE_9_11 3
-+#define I915_BIT_6_SWIZZLE_9_10_11 4
-+/* Not seen by userland */
-+#define I915_BIT_6_SWIZZLE_UNKNOWN 5
-+
-+struct drm_i915_gem_set_tiling {
-+ /** Handle of the buffer to have its tiling state updated */
-+ uint32_t handle;
-+
-+ /**
-+ * Tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
-+ * I915_TILING_Y).
-+ *
-+ * This value is to be set on request, and will be updated by the
-+ * kernel on successful return with the actual chosen tiling layout.
-+ *
-+ * The tiling mode may be demoted to I915_TILING_NONE when the system
-+ * has bit 6 swizzling that can't be managed correctly by GEM.
-+ *
-+ * Buffer contents become undefined when changing tiling_mode.
-+ */
-+ uint32_t tiling_mode;
-+
-+ /**
-+ * Stride in bytes for the object when in I915_TILING_X or
-+ * I915_TILING_Y.
-+ */
-+ uint32_t stride;
-+
-+ /**
-+ * Returned address bit 6 swizzling required for CPU access through
-+ * mmap mapping.
-+ */
-+ uint32_t swizzle_mode;
-+};
-+
-+struct drm_i915_gem_get_tiling {
-+ /** Handle of the buffer to get tiling state for. */
-+ uint32_t handle;
-+
-+ /**
-+ * Current tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
-+ * I915_TILING_Y).
-+ */
-+ uint32_t tiling_mode;
-+
-+ /**
-+ * Returned address bit 6 swizzling required for CPU access through
-+ * mmap mapping.
-+ */
-+ uint32_t swizzle_mode;
-+};
-+
- #endif /* _I915_DRM_H_ */
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/0015-i915-Add-chip-set-ID-param.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/0015-i915-Add-chip-set-ID-param.patch
deleted file mode 100644
index c3bf8ebd1..000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/0015-i915-Add-chip-set-ID-param.patch
+++ /dev/null
@@ -1,35 +0,0 @@
-commit 26ead293ddf664f33dc0ba12b726887c40ce3957
-Author: Kristian Høgsberg <krh@redhat.com>
-Date: Wed Aug 20 11:08:52 2008 -0400
-
- i915: Add chip set ID param.
-
- Signed-off-by: Kristian Høgsberg <krh@redhat.com>
- Signed-off-by: Eric Anholt <eric@anholt.net>
-
-diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
-index 3b5aa74..205d21e 100644
---- a/drivers/gpu/drm/i915/i915_dma.c
-+++ b/drivers/gpu/drm/i915/i915_dma.c
-@@ -689,6 +689,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
- case I915_PARAM_LAST_DISPATCH:
- value = READ_BREADCRUMB(dev_priv);
- break;
-+ case I915_PARAM_CHIPSET_ID:
-+ value = dev->pci_device;
-+ break;
- case I915_PARAM_HAS_GEM:
- value = 1;
- break;
-diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
-index 59d08fc..eb4b350 100644
---- a/include/drm/i915_drm.h
-+++ b/include/drm/i915_drm.h
-@@ -230,6 +230,7 @@ typedef struct drm_i915_irq_wait {
- #define I915_PARAM_IRQ_ACTIVE 1
- #define I915_PARAM_ALLOW_BATCHBUFFER 2
- #define I915_PARAM_LAST_DISPATCH 3
-+#define I915_PARAM_CHIPSET_ID 4
- #define I915_PARAM_HAS_GEM 5
-
- typedef struct drm_i915_getparam {
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/0016-i915-Use-struct_mutex-to-protect-ring-in-GEM-mode.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/0016-i915-Use-struct_mutex-to-protect-ring-in-GEM-mode.patch
deleted file mode 100644
index 910f37e9c..000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/0016-i915-Use-struct_mutex-to-protect-ring-in-GEM-mode.patch
+++ /dev/null
@@ -1,205 +0,0 @@
-commit 8a524209fce67d3b6d2e831b5dad4eced796ce98
-Author: Eric Anholt <eric@anholt.net>
-Date: Mon Sep 1 16:45:29 2008 -0700
-
- i915: Use struct_mutex to protect ring in GEM mode.
-
- In the conversion for GEM, we had stopped using the hardware lock to protect
- ring usage, since it was all internal to the DRM now. However, some paths
- weren't converted to using struct_mutex to prevent multiple threads from
- concurrently working on the ring, in particular between the vblank swap handler
- and ioctls.
-
- Signed-off-by: Eric Anholt <eric@anholt.net>
-
-diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
-index 205d21e..25f59c1 100644
---- a/drivers/gpu/drm/i915/i915_dma.c
-+++ b/drivers/gpu/drm/i915/i915_dma.c
-@@ -588,9 +588,15 @@ static int i915_quiescent(struct drm_device * dev)
- static int i915_flush_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
- {
-- LOCK_TEST_WITH_RETURN(dev, file_priv);
-+ int ret;
-+
-+ RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
-
-- return i915_quiescent(dev);
-+ mutex_lock(&dev->struct_mutex);
-+ ret = i915_quiescent(dev);
-+ mutex_unlock(&dev->struct_mutex);
-+
-+ return ret;
- }
-
- static int i915_batchbuffer(struct drm_device *dev, void *data,
-@@ -611,14 +617,16 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
- DRM_DEBUG("i915 batchbuffer, start %x used %d cliprects %d\n",
- batch->start, batch->used, batch->num_cliprects);
-
-- LOCK_TEST_WITH_RETURN(dev, file_priv);
-+ RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- if (batch->num_cliprects && DRM_VERIFYAREA_READ(batch->cliprects,
- batch->num_cliprects *
- sizeof(struct drm_clip_rect)))
- return -EFAULT;
-
-+ mutex_lock(&dev->struct_mutex);
- ret = i915_dispatch_batchbuffer(dev, batch);
-+ mutex_unlock(&dev->struct_mutex);
-
- sarea_priv->last_dispatch = (int)hw_status[5];
- return ret;
-@@ -637,7 +645,7 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
- DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
- cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
-
-- LOCK_TEST_WITH_RETURN(dev, file_priv);
-+ RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- if (cmdbuf->num_cliprects &&
- DRM_VERIFYAREA_READ(cmdbuf->cliprects,
-@@ -647,7 +655,9 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
- return -EFAULT;
- }
-
-+ mutex_lock(&dev->struct_mutex);
- ret = i915_dispatch_cmdbuffer(dev, cmdbuf);
-+ mutex_unlock(&dev->struct_mutex);
- if (ret) {
- DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
- return ret;
-@@ -660,11 +670,17 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
- static int i915_flip_bufs(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
- {
-+ int ret;
-+
- DRM_DEBUG("%s\n", __FUNCTION__);
-
-- LOCK_TEST_WITH_RETURN(dev, file_priv);
-+ RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
-
-- return i915_dispatch_flip(dev);
-+ mutex_lock(&dev->struct_mutex);
-+ ret = i915_dispatch_flip(dev);
-+ mutex_unlock(&dev->struct_mutex);
-+
-+ return ret;
- }
-
- static int i915_getparam(struct drm_device *dev, void *data,
-diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
-index 87b071a..8547f0a 100644
---- a/drivers/gpu/drm/i915/i915_drv.h
-+++ b/drivers/gpu/drm/i915/i915_drv.h
-@@ -285,6 +285,9 @@ typedef struct drm_i915_private {
- */
- struct delayed_work retire_work;
-
-+ /** Work task for vblank-related ring access */
-+ struct work_struct vblank_work;
-+
- uint32_t next_gem_seqno;
-
- /**
-@@ -435,6 +438,7 @@ extern int i915_irq_wait(struct drm_device *dev, void *data,
- void i915_user_irq_get(struct drm_device *dev);
- void i915_user_irq_put(struct drm_device *dev);
-
-+extern void i915_gem_vblank_work_handler(struct work_struct *work);
- extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
- extern void i915_driver_irq_preinstall(struct drm_device * dev);
- extern int i915_driver_irq_postinstall(struct drm_device *dev);
-@@ -538,6 +542,17 @@ extern void intel_opregion_free(struct drm_device *dev);
- extern void opregion_asle_intr(struct drm_device *dev);
- extern void opregion_enable_asle(struct drm_device *dev);
-
-+/**
-+ * Lock test for when it's just for synchronization of ring access.
-+ *
-+ * In that case, we don't need to do it when GEM is initialized as nobody else
-+ * has access to the ring.
-+ */
-+#define RING_LOCK_TEST_WITH_RETURN(dev, file_priv) do { \
-+ if (((drm_i915_private_t *)dev->dev_private)->ring.ring_obj == NULL) \
-+ LOCK_TEST_WITH_RETURN(dev, file_priv); \
-+} while (0)
-+
- #define I915_READ(reg) DRM_READ32(dev_priv->mmio_map, (reg))
- #define I915_WRITE(reg,val) DRM_WRITE32(dev_priv->mmio_map, (reg), (val))
- #define I915_READ16(reg) DRM_READ16(dev_priv->mmio_map, (reg))
-diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
-index 90ae8a0..bb6e5a3 100644
---- a/drivers/gpu/drm/i915/i915_gem.c
-+++ b/drivers/gpu/drm/i915/i915_gem.c
-@@ -2491,6 +2491,8 @@ i915_gem_load(struct drm_device *dev)
- INIT_LIST_HEAD(&dev_priv->mm.request_list);
- INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
- i915_gem_retire_work_handler);
-+ INIT_WORK(&dev_priv->mm.vblank_work,
-+ i915_gem_vblank_work_handler);
- dev_priv->mm.next_gem_seqno = 1;
-
- i915_gem_detect_bit_6_swizzle(dev);
-diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
-index f295bdf..d04c526 100644
---- a/drivers/gpu/drm/i915/i915_irq.c
-+++ b/drivers/gpu/drm/i915/i915_irq.c
-@@ -349,6 +349,21 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int plane)
- return count;
- }
-
-+void
-+i915_gem_vblank_work_handler(struct work_struct *work)
-+{
-+ drm_i915_private_t *dev_priv;
-+ struct drm_device *dev;
-+
-+ dev_priv = container_of(work, drm_i915_private_t,
-+ mm.vblank_work);
-+ dev = dev_priv->dev;
-+
-+ mutex_lock(&dev->struct_mutex);
-+ i915_vblank_tasklet(dev);
-+ mutex_unlock(&dev->struct_mutex);
-+}
-+
- irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
- {
- struct drm_device *dev = (struct drm_device *) arg;
-@@ -422,8 +437,12 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
- if (iir & I915_ASLE_INTERRUPT)
- opregion_asle_intr(dev);
-
-- if (vblank && dev_priv->swaps_pending > 0)
-- drm_locked_tasklet(dev, i915_vblank_tasklet);
-+ if (vblank && dev_priv->swaps_pending > 0) {
-+ if (dev_priv->ring.ring_obj == NULL)
-+ drm_locked_tasklet(dev, i915_vblank_tasklet);
-+ else
-+ schedule_work(&dev_priv->mm.vblank_work);
-+ }
-
- return IRQ_HANDLED;
- }
-@@ -514,14 +533,15 @@ int i915_irq_emit(struct drm_device *dev, void *data,
- drm_i915_irq_emit_t *emit = data;
- int result;
-
-- LOCK_TEST_WITH_RETURN(dev, file_priv);
-+ RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- if (!dev_priv) {
- DRM_ERROR("called with no initialization\n");
- return -EINVAL;
- }
--
-+ mutex_lock(&dev->struct_mutex);
- result = i915_emit_irq(dev);
-+ mutex_unlock(&dev->struct_mutex);
-
- if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
- DRM_ERROR("copy_to_user\n");
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/0017-i915-Make-use-of-sarea_priv-conditional.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/0017-i915-Make-use-of-sarea_priv-conditional.patch
deleted file mode 100644
index 542b69dd5..000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/0017-i915-Make-use-of-sarea_priv-conditional.patch
+++ /dev/null
@@ -1,147 +0,0 @@
-commit 69749cf99189a8a78de201ac24990c91ee111469
-Author: Kristian Høgsberg <krh@redhat.com>
-Date: Wed Aug 20 11:20:13 2008 -0400
-
- i915: Make use of sarea_priv conditional.
-
- We fail ioctls that depend on the sarea_priv with EINVAL.
-
- Signed-off-by: Kristian Høgsberg <krh@redhat.com>
- Signed-off-by: Eric Anholt <eric@anholt.net>
-
-diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
-index 25f59c1..dbd3f49 100644
---- a/drivers/gpu/drm/i915/i915_dma.c
-+++ b/drivers/gpu/drm/i915/i915_dma.c
-@@ -55,7 +55,8 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
- if (ring->space >= n)
- return 0;
-
-- dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
-+ if (dev_priv->sarea_priv)
-+ dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
-
- if (ring->head != last_head)
- i = 0;
-@@ -128,7 +129,7 @@ void i915_kernel_lost_context(struct drm_device * dev)
- if (ring->space < 0)
- ring->space += ring->Size;
-
-- if (ring->head == ring->tail)
-+ if (ring->head == ring->tail && dev_priv->sarea_priv)
- dev_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
- }
-
-@@ -433,10 +434,11 @@ static void i915_emit_breadcrumb(struct drm_device *dev)
- drm_i915_private_t *dev_priv = dev->dev_private;
- RING_LOCALS;
-
-- dev_priv->sarea_priv->last_enqueue = ++dev_priv->counter;
--
-+ dev_priv->counter++;
- if (dev_priv->counter > 0x7FFFFFFFUL)
-- dev_priv->sarea_priv->last_enqueue = dev_priv->counter = 1;
-+ dev_priv->counter = 0;
-+ if (dev_priv->sarea_priv)
-+ dev_priv->sarea_priv->last_enqueue = dev_priv->counter;
-
- BEGIN_LP_RING(4);
- OUT_RING(MI_STORE_DWORD_INDEX);
-@@ -534,6 +536,9 @@ static int i915_dispatch_flip(struct drm_device * dev)
- drm_i915_private_t *dev_priv = dev->dev_private;
- RING_LOCALS;
-
-+ if (!dev_priv->sarea_priv)
-+ return -EINVAL;
-+
- DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n",
- __FUNCTION__,
- dev_priv->current_page,
-@@ -628,7 +633,8 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
- ret = i915_dispatch_batchbuffer(dev, batch);
- mutex_unlock(&dev->struct_mutex);
-
-- sarea_priv->last_dispatch = (int)hw_status[5];
-+ if (sarea_priv)
-+ sarea_priv->last_dispatch = (int)hw_status[5];
- return ret;
- }
-
-@@ -663,7 +669,8 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
- return ret;
- }
-
-- sarea_priv->last_dispatch = (int)hw_status[5];
-+ if (sarea_priv)
-+ sarea_priv->last_dispatch = (int)hw_status[5];
- return 0;
- }
-
-diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
-index d04c526..ef03a59 100644
---- a/drivers/gpu/drm/i915/i915_irq.c
-+++ b/drivers/gpu/drm/i915/i915_irq.c
-@@ -427,7 +427,9 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
- I915_WRITE(IMR, dev_priv->irq_mask_reg);
- (void) I915_READ(IIR); /* Flush posted writes */
-
-- dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
-+ if (dev_priv->sarea_priv)
-+ dev_priv->sarea_priv->last_dispatch =
-+ READ_BREADCRUMB(dev_priv);
-
- if (iir & I915_USER_INTERRUPT) {
- dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev);
-@@ -456,10 +458,11 @@ static int i915_emit_irq(struct drm_device * dev)
-
- DRM_DEBUG("\n");
-
-- dev_priv->sarea_priv->last_enqueue = ++dev_priv->counter;
--
-+ dev_priv->counter++;
- if (dev_priv->counter > 0x7FFFFFFFUL)
-- dev_priv->sarea_priv->last_enqueue = dev_priv->counter = 1;
-+ dev_priv->counter = 1;
-+ if (dev_priv->sarea_priv)
-+ dev_priv->sarea_priv->last_enqueue = dev_priv->counter;
-
- BEGIN_LP_RING(6);
- OUT_RING(MI_STORE_DWORD_INDEX);
-@@ -503,11 +506,15 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
- READ_BREADCRUMB(dev_priv));
-
- if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
-- dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
-+ if (dev_priv->sarea_priv) {
-+ dev_priv->sarea_priv->last_dispatch =
-+ READ_BREADCRUMB(dev_priv);
-+ }
- return 0;
- }
-
-- dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
-+ if (dev_priv->sarea_priv)
-+ dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
-
- i915_user_irq_get(dev);
- DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ,
-@@ -519,7 +526,9 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
- READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
- }
-
-- dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
-+ if (dev_priv->sarea_priv)
-+ dev_priv->sarea_priv->last_dispatch =
-+ READ_BREADCRUMB(dev_priv);
-
- return ret;
- }
-@@ -682,7 +691,7 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
- struct list_head *list;
- int ret;
-
-- if (!dev_priv) {
-+ if (!dev_priv || !dev_priv->sarea_priv) {
- DRM_ERROR("%s called with no initialization\n", __func__);
- return -EINVAL;
- }
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/0018-i915-gem-install-and-uninstall-irq-handler-in-enter.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/0018-i915-gem-install-and-uninstall-irq-handler-in-enter.patch
deleted file mode 100644
index 3593fa582..000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/0018-i915-gem-install-and-uninstall-irq-handler-in-enter.patch
+++ /dev/null
@@ -1,44 +0,0 @@
-commit 7ad6d5861b04bbb2cdc36d1dcf8989e16f86e659
-Author: Kristian Høgsberg <krh@redhat.com>
-Date: Wed Aug 20 11:04:27 2008 -0400
-
- i915 gem: install and uninstall irq handler in entervt and leavevt ioctls.
-
- Signed-off-by: Kristian Høgsberg <krh@redhat.com>
- Signed-off-by: Eric Anholt <eric@anholt.net>
-
-diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
-index bb6e5a3..5fe5034 100644
---- a/drivers/gpu/drm/i915/i915_gem.c
-+++ b/drivers/gpu/drm/i915/i915_gem.c
-@@ -2443,6 +2443,9 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
- BUG_ON(!list_empty(&dev_priv->mm.request_list));
- dev_priv->mm.suspended = 0;
- mutex_unlock(&dev->struct_mutex);
-+
-+ drm_irq_install(dev);
-+
- return 0;
- }
-
-@@ -2458,6 +2461,8 @@ i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
- i915_gem_cleanup_ringbuffer(dev);
- mutex_unlock(&dev->struct_mutex);
-
-+ drm_irq_uninstall(dev);
-+
- return 0;
- }
-
-diff --git a/include/drm/drmP.h b/include/drm/drmP.h
-index 1469a1b..51ee72c 100644
---- a/include/drm/drmP.h
-+++ b/include/drm/drmP.h
-@@ -1134,6 +1134,7 @@ extern void drm_core_reclaim_buffers(struct drm_device *dev,
- extern int drm_control(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
- extern irqreturn_t drm_irq_handler(DRM_IRQ_ARGS);
-+extern int drm_irq_install(struct drm_device *dev);
- extern int drm_irq_uninstall(struct drm_device *dev);
- extern void drm_driver_irq_preinstall(struct drm_device *dev);
- extern void drm_driver_irq_postinstall(struct drm_device *dev);
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/0019-DRM-Return-EBADF-on-bad-object-in-flink-and-retur.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/0019-DRM-Return-EBADF-on-bad-object-in-flink-and-retur.patch
deleted file mode 100644
index 6de4514e2..000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/0019-DRM-Return-EBADF-on-bad-object-in-flink-and-retur.patch
+++ /dev/null
@@ -1,32 +0,0 @@
-commit c3de45b0488762a9161e9b9e8bf419f63c100c47
-Author: Eric Anholt <eric@anholt.net>
-Date: Tue Sep 9 11:40:34 2008 -0700
-
- DRM: Return -EBADF on bad object in flink, and return curent name if it exists.
-
- Signed-off-by: Eric Anholt <eric@anholt.net>
-
-diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
-index 434155b..ccd1afd 100644
---- a/drivers/gpu/drm/drm_gem.c
-+++ b/drivers/gpu/drm/drm_gem.c
-@@ -251,7 +251,7 @@ drm_gem_flink_ioctl(struct drm_device *dev, void *data,
-
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
- if (obj == NULL)
-- return -EINVAL;
-+ return -EBADF;
-
- again:
- if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0)
-@@ -259,8 +259,9 @@ again:
-
- spin_lock(&dev->object_name_lock);
- if (obj->name) {
-+ args->name = obj->name;
- spin_unlock(&dev->object_name_lock);
-- return -EEXIST;
-+ return 0;
- }
- ret = idr_get_new_above(&dev->object_name_idr, obj, 1,
- &obj->name);
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/0020-drm-Avoid-oops-in-GEM-execbuffers-with-bad-argument.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/0020-drm-Avoid-oops-in-GEM-execbuffers-with-bad-argument.patch
deleted file mode 100644
index 7080907cd..000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/0020-drm-Avoid-oops-in-GEM-execbuffers-with-bad-argument.patch
+++ /dev/null
@@ -1,23 +0,0 @@
-commit 880db7a8dbed226d638b3a48aa1a3996f8624911
-Author: Eric Anholt <eric@anholt.net>
-Date: Wed Sep 10 14:22:49 2008 -0700
-
- drm: Avoid oops in GEM execbuffers with bad arguments.
-
- Signed-off-by: Eric Anholt <eric@anholt.net>
-
-diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
-index 5fe5034..29d9d21 100644
---- a/drivers/gpu/drm/i915/i915_gem.c
-+++ b/drivers/gpu/drm/i915/i915_gem.c
-@@ -1763,6 +1763,10 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
- (int) args->buffers_ptr, args->buffer_count, args->batch_len);
- #endif
-
-+ if (args->buffer_count < 1) {
-+ DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
-+ return -EINVAL;
-+ }
- /* Copy in the exec list from userland */
- exec_list = drm_calloc(sizeof(*exec_list), args->buffer_count,
- DRM_MEM_DRIVER);
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/0021-drm-G33-class-hardware-has-a-newer-965-style-MCH-n.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/0021-drm-G33-class-hardware-has-a-newer-965-style-MCH-n.patch
deleted file mode 100644
index f5481d7d8..000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/0021-drm-G33-class-hardware-has-a-newer-965-style-MCH-n.patch
+++ /dev/null
@@ -1,23 +0,0 @@
-commit 930469634910fa87c21f0a7423c98b270d35d8c6
-Author: Eric Anholt <eric@anholt.net>
-Date: Mon Sep 15 13:13:34 2008 -0700
-
- drm: G33-class hardware has a newer 965-style MCH (no DCC register).
-
- Fixes bad software fallback rendering in Mesa in dual-channel configurations.
-
- d9a2470012588dc5313a5ac8bb2f03575af00e99
-
-diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
-index 0c1b3a0..6b3f1e4 100644
---- a/drivers/gpu/drm/i915/i915_gem_tiling.c
-+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
-@@ -96,7 +96,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
- */
- swizzle_x = I915_BIT_6_SWIZZLE_NONE;
- swizzle_y = I915_BIT_6_SWIZZLE_NONE;
-- } else if (!IS_I965G(dev) || IS_I965GM(dev)) {
-+ } else if ((!IS_I965G(dev) && !IS_G33(dev)) || IS_I965GM(dev)) {
- uint32_t dcc;
-
- /* On 915-945 and GM965, channel interleave by the CPU is
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/0022-drm-use-ioremap_wc-in-i915-instead-of-ioremap.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/0022-drm-use-ioremap_wc-in-i915-instead-of-ioremap.patch
deleted file mode 100644
index 8e6cbe95a..000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/0022-drm-use-ioremap_wc-in-i915-instead-of-ioremap.patch
+++ /dev/null
@@ -1,58 +0,0 @@
-commit d9f2382adde582f8792ad96e9570716bcbea21a0
-Author: Eric Anholt <eric@anholt.net>
-Date: Tue Sep 23 14:50:57 2008 -0700
-
- drm: Use ioremap_wc in i915_driver instead of ioremap, since we always want WC.
-
- Fixes failure to map the ringbuffer when PAT tells us we don't get to do
- uncached on something that's already mapped WC, or something along those lines.
-
- Signed-off-by: Eric Anholt <eric@anholt.net>
-
-diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
-index 29d9d21..6ecfd10 100644
---- a/drivers/gpu/drm/i915/i915_gem.c
-+++ b/drivers/gpu/drm/i915/i915_gem.c
-@@ -233,7 +233,7 @@ i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
- if (unwritten)
- #endif /* CONFIG_HIGHMEM */
- {
-- vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
-+ vaddr = ioremap_wc(pfn << PAGE_SHIFT, PAGE_SIZE);
- #if WATCH_PWRITE
- DRM_INFO("pwrite slow i %d o %d l %d "
- "pfn %ld vaddr %p\n",
-@@ -1612,9 +1612,10 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
- if (reloc_page != NULL)
- iounmap(reloc_page);
-
-- reloc_page = ioremap(dev->agp->base +
-- (reloc_offset & ~(PAGE_SIZE - 1)),
-- PAGE_SIZE);
-+ reloc_page = ioremap_wc(dev->agp->base +
-+ (reloc_offset &
-+ ~(PAGE_SIZE - 1)),
-+ PAGE_SIZE);
- last_reloc_offset = reloc_offset;
- if (reloc_page == NULL) {
- drm_gem_object_unreference(target_obj);
-@@ -2318,7 +2319,9 @@ i915_gem_init_hws(struct drm_device *dev)
- dev_priv->hws_map.flags = 0;
- dev_priv->hws_map.mtrr = 0;
-
-- drm_core_ioremap(&dev_priv->hws_map, dev);
-+ /* Ioremapping here is the wrong thing to do. We want cached access.
-+ */
-+ drm_core_ioremap_wc(&dev_priv->hws_map, dev);
- if (dev_priv->hws_map.handle == NULL) {
- DRM_ERROR("Failed to map status page.\n");
- memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
-@@ -2369,7 +2372,7 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
- dev_priv->ring.map.flags = 0;
- dev_priv->ring.map.mtrr = 0;
-
-- drm_core_ioremap(&dev_priv->ring.map, dev);
-+ drm_core_ioremap_wc(&dev_priv->ring.map, dev);
- if (dev_priv->ring.map.handle == NULL) {
- DRM_ERROR("Failed to map ringbuffer.\n");
- memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/0023-drm-clean-up-many-sparse-warnings-in-i915.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/0023-drm-clean-up-many-sparse-warnings-in-i915.patch
deleted file mode 100644
index 236b16158..000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/0023-drm-clean-up-many-sparse-warnings-in-i915.patch
+++ /dev/null
@@ -1,192 +0,0 @@
-commit 034994cfffbb2371b720e3f49378031ebc12645e
-Author: Eric Anholt <eric@anholt.net>
-Date: Thu Oct 2 12:24:47 2008 -0700
-
- drm: Clean up many sparse warnings in i915.
-
- Signed-off-by: Eric Anholt <eric@anholt.net>
-
-diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
-index dbd3f49..814cc12 100644
---- a/drivers/gpu/drm/i915/i915_dma.c
-+++ b/drivers/gpu/drm/i915/i915_dma.c
-@@ -76,7 +76,7 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
- * Sets up the hardware status page for devices that need a physical address
- * in the register.
- */
--int i915_init_phys_hws(struct drm_device *dev)
-+static int i915_init_phys_hws(struct drm_device *dev)
- {
- drm_i915_private_t *dev_priv = dev->dev_private;
- /* Program Hardware Status Page */
-@@ -101,7 +101,7 @@ int i915_init_phys_hws(struct drm_device *dev)
- * Frees the hardware status page, whether it's a physical address or a virtual
- * address set up by the X Server.
- */
--void i915_free_hws(struct drm_device *dev)
-+static void i915_free_hws(struct drm_device *dev)
- {
- drm_i915_private_t *dev_priv = dev->dev_private;
- if (dev_priv->status_page_dmah) {
-@@ -145,8 +145,8 @@ static int i915_dma_cleanup(struct drm_device * dev)
-
- if (dev_priv->ring.virtual_start) {
- drm_core_ioremapfree(&dev_priv->ring.map, dev);
-- dev_priv->ring.virtual_start = 0;
-- dev_priv->ring.map.handle = 0;
-+ dev_priv->ring.virtual_start = NULL;
-+ dev_priv->ring.map.handle = NULL;
- dev_priv->ring.map.size = 0;
- }
-
-@@ -827,9 +827,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
- base = drm_get_resource_start(dev, mmio_bar);
- size = drm_get_resource_len(dev, mmio_bar);
-
-- ret = drm_addmap(dev, base, size, _DRM_REGISTERS,
-- _DRM_KERNEL | _DRM_DRIVER,
-- &dev_priv->mmio_map);
-+ dev_priv->regs = ioremap(base, size);
-
- i915_gem_load(dev);
-
-@@ -867,8 +865,8 @@ int i915_driver_unload(struct drm_device *dev)
-
- i915_free_hws(dev);
-
-- if (dev_priv->mmio_map)
-- drm_rmmap(dev, dev_priv->mmio_map);
-+ if (dev_priv->regs != NULL)
-+ iounmap(dev_priv->regs);
-
- intel_opregion_free(dev);
-
-diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
-index 8547f0a..b184d54 100644
---- a/drivers/gpu/drm/i915/i915_drv.h
-+++ b/drivers/gpu/drm/i915/i915_drv.h
-@@ -110,8 +110,8 @@ struct intel_opregion {
- typedef struct drm_i915_private {
- struct drm_device *dev;
-
-+ void __iomem *regs;
- drm_local_map_t *sarea;
-- drm_local_map_t *mmio_map;
-
- drm_i915_sarea_t *sarea_priv;
- drm_i915_ring_buffer_t ring;
-@@ -553,12 +553,12 @@ extern void opregion_enable_asle(struct drm_device *dev);
- LOCK_TEST_WITH_RETURN(dev, file_priv); \
- } while (0)
-
--#define I915_READ(reg) DRM_READ32(dev_priv->mmio_map, (reg))
--#define I915_WRITE(reg,val) DRM_WRITE32(dev_priv->mmio_map, (reg), (val))
--#define I915_READ16(reg) DRM_READ16(dev_priv->mmio_map, (reg))
--#define I915_WRITE16(reg,val) DRM_WRITE16(dev_priv->mmio_map, (reg), (val))
--#define I915_READ8(reg) DRM_READ8(dev_priv->mmio_map, (reg))
--#define I915_WRITE8(reg,val) DRM_WRITE8(dev_priv->mmio_map, (reg), (val))
-+#define I915_READ(reg) readl(dev_priv->regs + (reg))
-+#define I915_WRITE(reg,val) writel(val, dev_priv->regs + (reg))
-+#define I915_READ16(reg) readw(dev_priv->regs + (reg))
-+#define I915_WRITE16(reg,val) writel(val, dev_priv->regs + (reg))
-+#define I915_READ8(reg) readb(dev_priv->regs + (reg))
-+#define I915_WRITE8(reg,val) writeb(val, dev_priv->regs + (reg))
-
- #define I915_VERBOSE 0
-
-diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
-index 6ecfd10..6a89449 100644
---- a/drivers/gpu/drm/i915/i915_gem.c
-+++ b/drivers/gpu/drm/i915/i915_gem.c
-@@ -176,7 +176,8 @@ i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
- ssize_t remain;
- loff_t offset;
- char __user *user_data;
-- char *vaddr;
-+ char __iomem *vaddr;
-+ char *vaddr_atomic;
- int i, o, l;
- int ret = 0;
- unsigned long pfn;
-@@ -219,16 +220,20 @@ i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
- pfn = (dev->agp->base >> PAGE_SHIFT) + i;
-
- #ifdef CONFIG_HIGHMEM
-- /* kmap_atomic can't map IO pages on non-HIGHMEM kernels
-+ /* This is a workaround for the low performance of iounmap
-+ * (approximate 10% cpu cost on normal 3D workloads).
-+ * kmap_atomic on HIGHMEM kernels happens to let us map card
-+ * memory without taking IPIs. When the vmap rework lands
-+ * we should be able to dump this hack.
- */
-- vaddr = kmap_atomic_pfn(pfn, KM_USER0);
-+ vaddr_atomic = kmap_atomic_pfn(pfn, KM_USER0);
- #if WATCH_PWRITE
- DRM_INFO("pwrite i %d o %d l %d pfn %ld vaddr %p\n",
-- i, o, l, pfn, vaddr);
-+ i, o, l, pfn, vaddr_atomic);
- #endif
-- unwritten = __copy_from_user_inatomic_nocache(vaddr + o,
-+ unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + o,
- user_data, l);
-- kunmap_atomic(vaddr, KM_USER0);
-+ kunmap_atomic(vaddr_atomic, KM_USER0);
-
- if (unwritten)
- #endif /* CONFIG_HIGHMEM */
-@@ -271,7 +276,7 @@ fail:
- return ret;
- }
-
--int
-+static int
- i915_gem_shmem_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
- struct drm_i915_gem_pwrite *args,
- struct drm_file *file_priv)
-@@ -587,7 +592,7 @@ i915_add_request(struct drm_device *dev, uint32_t flush_domains)
- * Ensures that all commands in the ring are finished
- * before signalling the CPU
- */
--uint32_t
-+static uint32_t
- i915_retire_commands(struct drm_device *dev)
- {
- drm_i915_private_t *dev_priv = dev->dev_private;
-@@ -734,7 +739,7 @@ i915_gem_retire_work_handler(struct work_struct *work)
- * Waits for a sequence number to be signaled, and cleans up the
- * request and object lists appropriately for that event.
- */
--int
-+static int
- i915_wait_request(struct drm_device *dev, uint32_t seqno)
- {
- drm_i915_private_t *dev_priv = dev->dev_private;
-@@ -1483,7 +1488,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
- int i, ret;
- uint32_t last_reloc_offset = -1;
-- void *reloc_page = NULL;
-+ void __iomem *reloc_page = NULL;
-
- /* Choose the GTT offset for our buffer and put it there. */
- ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
-@@ -1500,8 +1505,8 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
- for (i = 0; i < entry->relocation_count; i++) {
- struct drm_gem_object *target_obj;
- struct drm_i915_gem_object *target_obj_priv;
-- uint32_t reloc_val, reloc_offset, *reloc_entry;
-- int ret;
-+ uint32_t reloc_val, reloc_offset;
-+ uint32_t __iomem *reloc_entry;
-
- ret = copy_from_user(&reloc, relocs + i, sizeof(reloc));
- if (ret != 0) {
-@@ -1624,7 +1629,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
- }
- }
-
-- reloc_entry = (uint32_t *)((char *)reloc_page +
-+ reloc_entry = (uint32_t __iomem *)(reloc_page +
- (reloc_offset & (PAGE_SIZE - 1)));
- reloc_val = target_obj_priv->gtt_offset + reloc.delta;
-
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/0024-fastboot-create-a-asynchronous-initlevel.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/0024-fastboot-create-a-asynchronous-initlevel.patch
deleted file mode 100644
index db518b36e..000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/0024-fastboot-create-a-asynchronous-initlevel.patch
+++ /dev/null
@@ -1,133 +0,0 @@
-From ac9103dd8e4dc65c110d6cba9a3380c6c617ffa7 Mon Sep 17 00:00:00 2001
-From: Arjan van de Ven <arjan@linux.intel.com>
-Date: Fri, 18 Jul 2008 15:16:08 -0700
-Subject: [PATCH] fastboot: create a "asynchronous" initlevel
-
-This patch creates an asynchronous initlevel (6a) which is at the same
-level as the normal device initcalls, but with the difference that they
-are run asynchronous from all the other initcalls. The purpose of this
-*selective* level is that we can move long waiting inits that are not
-boot-critical to this level one at a time.
-
-To keep things not totally insane, the asynchronous initcalls are async
-to the other initcalls, but are still ordered to themselves; think of it
-as "bottom-half-not-softirq". This has the benefit that async drivers
-still have stable device ordering between them.
-
-Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
-Signed-off-by: Ingo Molnar <mingo@elte.hu>
----
- include/asm-generic/vmlinux.lds.h | 3 +++
- include/linux/init.h | 6 ++++++
- init/main.c | 35 ++++++++++++++++++++++++++++++++---
- 3 files changed, 41 insertions(+), 3 deletions(-)
-
-Index: linux-2.6.27/include/asm-generic/vmlinux.lds.h
-===================================================================
---- linux-2.6.27.orig/include/asm-generic/vmlinux.lds.h 2008-10-14 16:55:43.000000000 +0200
-+++ linux-2.6.27/include/asm-generic/vmlinux.lds.h 2008-10-14 17:00:59.000000000 +0200
-@@ -376,6 +376,9 @@
- *(.initcall5.init) \
- *(.initcall5s.init) \
- *(.initcallrootfs.init) \
-+ __async_initcall_start = .; \
-+ *(.initcall6a.init) \
-+ __async_initcall_end = .; \
- *(.initcall6.init) \
- *(.initcall6s.init) \
- *(.initcall7.init) \
-Index: linux-2.6.27/include/linux/init.h
-===================================================================
---- linux-2.6.27.orig/include/linux/init.h 2008-10-14 16:55:45.000000000 +0200
-+++ linux-2.6.27/include/linux/init.h 2008-10-14 17:00:59.000000000 +0200
-@@ -197,11 +197,13 @@ extern void (*late_time_init)(void);
- #define fs_initcall_sync(fn) __define_initcall("5s",fn,5s)
- #define rootfs_initcall(fn) __define_initcall("rootfs",fn,rootfs)
- #define device_initcall(fn) __define_initcall("6",fn,6)
-+#define device_initcall_async(fn) __define_initcall("6a", fn, 6a)
- #define device_initcall_sync(fn) __define_initcall("6s",fn,6s)
- #define late_initcall(fn) __define_initcall("7",fn,7)
- #define late_initcall_sync(fn) __define_initcall("7s",fn,7s)
-
- #define __initcall(fn) device_initcall(fn)
-+#define __initcall_async(fn) device_initcall_async(fn)
-
- #define __exitcall(fn) \
- static exitcall_t __exitcall_##fn __exit_call = fn
-@@ -257,6 +259,7 @@ void __init parse_early_param(void);
- * be one per module.
- */
- #define module_init(x) __initcall(x);
-+#define module_init_async(x) __initcall_async(x);
-
- /**
- * module_exit() - driver exit entry point
-@@ -279,10 +282,13 @@ void __init parse_early_param(void);
- #define subsys_initcall(fn) module_init(fn)
- #define fs_initcall(fn) module_init(fn)
- #define device_initcall(fn) module_init(fn)
-+#define device_initcall_async(fn) module_init(fn)
- #define late_initcall(fn) module_init(fn)
-
- #define security_initcall(fn) module_init(fn)
-
-+#define module_init_async(fn) module_init(fn)
-+
- /* Each module must use one module_init(). */
- #define module_init(initfn) \
- static inline initcall_t __inittest(void) \
-Index: linux-2.6.27/init/main.c
-===================================================================
---- linux-2.6.27.orig/init/main.c 2008-10-14 16:55:47.000000000 +0200
-+++ linux-2.6.27/init/main.c 2008-10-14 17:00:59.000000000 +0200
-@@ -745,18 +745,47 @@ int do_one_initcall(initcall_t fn)
-
-
- extern initcall_t __initcall_start[], __initcall_end[], __early_initcall_end[];
-+extern initcall_t __async_initcall_start[], __async_initcall_end[];
-
--static void __init do_initcalls(void)
-+static void __init do_async_initcalls(struct work_struct *dummy)
- {
- initcall_t *call;
-
-- for (call = __early_initcall_end; call < __initcall_end; call++)
-+ for (call = __async_initcall_start; call < __async_initcall_end; call++)
- do_one_initcall(*call);
-+}
-+
-+static struct workqueue_struct *async_init_wq;
-+
-+static void __init do_initcalls(void)
-+{
-+ initcall_t *call;
-+ static DECLARE_WORK(async_work, do_async_initcalls);
-+ int phase = 0; /* 0 = levels 0 - 6, 1 = level 6a, 2 = after level 6a */
-+
-+ async_init_wq = create_singlethread_workqueue("kasyncinit");
-+
-+ for (call = __early_initcall_end; call < __initcall_end; call++) {
-+ if (phase == 0 && call >= __async_initcall_start) {
-+ phase = 1;
-+ queue_work(async_init_wq, &async_work);
-+ }
-+ if (phase == 1 && call >= __async_initcall_end)
-+ phase = 2;
-+ if (phase != 1)
-+ do_one_initcall(*call);
-+ }
-
-- /* Make sure there is no pending stuff from the initcall sequence */
-+ /*
-+ * Make sure there is no pending stuff from the initcall sequence,
-+ * including the async initcalls
-+ */
- flush_scheduled_work();
-+ flush_workqueue(async_init_wq);
-+ destroy_workqueue(async_init_wq);
- }
-
-+
- /*
- * Ok, the machine is now initialized. None of the devices
- * have been touched yet, but the CPU subsystem is up and
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/0025-fastboot-turn-the-USB-hostcontroller-initcalls-into.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/0025-fastboot-turn-the-USB-hostcontroller-initcalls-into.patch
deleted file mode 100644
index f6db800c7..000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/0025-fastboot-turn-the-USB-hostcontroller-initcalls-into.patch
+++ /dev/null
@@ -1,59 +0,0 @@
-From d1a26186ee222329a797bb0b2c8e2b5bc7d94d42 Mon Sep 17 00:00:00 2001
-From: Arjan van de Ven <arjan@linux.intel.com>
-Date: Fri, 18 Jul 2008 15:16:53 -0700
-Subject: [PATCH] fastboot: turn the USB hostcontroller initcalls into async initcalls
-
-the USB host controller init calls take a long time, mostly due to a
-"minimally 100 msec" delay *per port* during initialization.
-These are prime candidates for going in parallel to everything else.
-
-The USB device ordering is not affected by this due to the
-serialized-within-eachother property of async initcalls.
-
-Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
-Signed-off-by: Ingo Molnar <mingo@elte.hu>
----
- drivers/usb/host/ehci-hcd.c | 2 +-
- drivers/usb/host/ohci-hcd.c | 2 +-
- drivers/usb/host/uhci-hcd.c | 2 +-
- 3 files changed, 3 insertions(+), 3 deletions(-)
-
-Index: linux-2.6.27/drivers/usb/host/ehci-hcd.c
-===================================================================
---- linux-2.6.27.orig/drivers/usb/host/ehci-hcd.c 2008-10-14 16:55:35.000000000 +0200
-+++ linux-2.6.27/drivers/usb/host/ehci-hcd.c 2008-10-14 17:01:27.000000000 +0200
-@@ -1107,7 +1107,7 @@ clean0:
- #endif
- return retval;
- }
--module_init(ehci_hcd_init);
-+module_init_async(ehci_hcd_init);
-
- static void __exit ehci_hcd_cleanup(void)
- {
-Index: linux-2.6.27/drivers/usb/host/ohci-hcd.c
-===================================================================
---- linux-2.6.27.orig/drivers/usb/host/ohci-hcd.c 2008-10-14 16:55:35.000000000 +0200
-+++ linux-2.6.27/drivers/usb/host/ohci-hcd.c 2008-10-14 17:01:27.000000000 +0200
-@@ -1186,7 +1186,7 @@ static int __init ohci_hcd_mod_init(void
-
- return retval;
- }
--module_init(ohci_hcd_mod_init);
-+module_init_async(ohci_hcd_mod_init);
-
- static void __exit ohci_hcd_mod_exit(void)
- {
-Index: linux-2.6.27/drivers/usb/host/uhci-hcd.c
-===================================================================
---- linux-2.6.27.orig/drivers/usb/host/uhci-hcd.c 2008-10-14 16:55:35.000000000 +0200
-+++ linux-2.6.27/drivers/usb/host/uhci-hcd.c 2008-10-14 17:01:27.000000000 +0200
-@@ -999,7 +999,7 @@ static void __exit uhci_hcd_cleanup(void
- kfree(errbuf);
- }
-
--module_init(uhci_hcd_init);
-+module_init_async(uhci_hcd_init);
- module_exit(uhci_hcd_cleanup);
-
- MODULE_AUTHOR(DRIVER_AUTHOR);
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/0026-fastboot-convert-a-few-non-critical-ACPI-drivers-to.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/0026-fastboot-convert-a-few-non-critical-ACPI-drivers-to.patch
deleted file mode 100644
index 4b10a9310..000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/0026-fastboot-convert-a-few-non-critical-ACPI-drivers-to.patch
+++ /dev/null
@@ -1,51 +0,0 @@
-From 60ddc2e5c44b4b9f5fcb440065469eacbeabf5eb Mon Sep 17 00:00:00 2001
-From: Arjan van de Ven <arjan@linux.intel.com>
-Date: Fri, 18 Jul 2008 15:17:35 -0700
-Subject: [PATCH] fastboot: convert a few non-critical ACPI drivers to async initcalls
-
-This patch converts a few non-critical ACPI drivers to async initcalls;
-these initcalls (battery, button and thermal) tend to take quite a bit of
-time (100's of milliseconds) due to the hardware they need to talk to,
-but are otherwise clearly non-essential for the boot process.
-
-Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
-Signed-off-by: Ingo Molnar <mingo@elte.hu>
----
- drivers/acpi/battery.c | 2 +-
- drivers/acpi/button.c | 2 +-
- drivers/acpi/thermal.c | 2 +-
- 3 files changed, 3 insertions(+), 3 deletions(-)
-
-Index: linux-2.6.27/drivers/acpi/battery.c
-===================================================================
---- linux-2.6.27.orig/drivers/acpi/battery.c 2008-10-14 16:55:15.000000000 +0200
-+++ linux-2.6.27/drivers/acpi/battery.c 2008-10-14 17:01:33.000000000 +0200
-@@ -904,5 +904,5 @@ static void __exit acpi_battery_exit(voi
- #endif
- }
-
--module_init(acpi_battery_init);
-+module_init_async(acpi_battery_init);
- module_exit(acpi_battery_exit);
-Index: linux-2.6.27/drivers/acpi/button.c
-===================================================================
---- linux-2.6.27.orig/drivers/acpi/button.c 2008-10-14 16:55:15.000000000 +0200
-+++ linux-2.6.27/drivers/acpi/button.c 2008-10-14 17:01:33.000000000 +0200
-@@ -545,5 +545,5 @@ static void __exit acpi_button_exit(void
- remove_proc_entry(ACPI_BUTTON_CLASS, acpi_root_dir);
- }
-
--module_init(acpi_button_init);
-+module_init_async(acpi_button_init);
- module_exit(acpi_button_exit);
-Index: linux-2.6.27/drivers/acpi/thermal.c
-===================================================================
---- linux-2.6.27.orig/drivers/acpi/thermal.c 2008-10-14 16:55:15.000000000 +0200
-+++ linux-2.6.27/drivers/acpi/thermal.c 2008-10-14 17:01:33.000000000 +0200
-@@ -1876,5 +1876,5 @@ static void __exit acpi_thermal_exit(voi
- return;
- }
-
--module_init(acpi_thermal_init);
-+module_init_async(acpi_thermal_init);
- module_exit(acpi_thermal_exit);
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/0027-fastboot-hold-the-BKL-over-the-async-init-call-sequ.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/0027-fastboot-hold-the-BKL-over-the-async-init-call-sequ.patch
deleted file mode 100644
index 11fb34dd9..000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/0027-fastboot-hold-the-BKL-over-the-async-init-call-sequ.patch
+++ /dev/null
@@ -1,37 +0,0 @@
-From 3e6558b693dd1e69e3177bc248977f067a769f14 Mon Sep 17 00:00:00 2001
-From: Arjan van de Ven <arjan@linux.intel.com>
-Date: Sun, 20 Jul 2008 08:59:24 -0700
-Subject: [PATCH] fastboot: hold the BKL over the async init call sequence
-
-Regular init calls are called with the BKL held; make sure
-the async init calls are also called with the BKL held.
-While this reduces parallelism a little, it does provide
-lock-for-lock compatibility. The hit to prallelism isn't too
-bad, most of the init calls are done immediately or actually
-block for their delays.
-
-Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
-Signed-off-by: Ingo Molnar <mingo@elte.hu>
----
- init/main.c | 6 ++++++
- 1 file changed, 6 insertions(+)
-
-Index: linux-2.6.27/init/main.c
-===================================================================
---- linux-2.6.27.orig/init/main.c 2008-10-14 17:00:59.000000000 +0200
-+++ linux-2.6.27/init/main.c 2008-10-14 17:01:38.000000000 +0200
-@@ -751,8 +751,14 @@ static void __init do_async_initcalls(st
- {
- initcall_t *call;
-
-+ /*
-+ * For compatibility with normal init calls... take the BKL
-+ * not pretty, not desirable, but compatibility first
-+ */
-+ lock_kernel();
- for (call = __async_initcall_start; call < __async_initcall_end; call++)
- do_one_initcall(*call);
-+ unlock_kernel();
- }
-
- static struct workqueue_struct *async_init_wq;
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/0028-fastboot-sync-the-async-execution-before-late_initc.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/0028-fastboot-sync-the-async-execution-before-late_initc.patch
deleted file mode 100644
index d1ff95a39..000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/0028-fastboot-sync-the-async-execution-before-late_initc.patch
+++ /dev/null
@@ -1,92 +0,0 @@
-From 660625fb93f2fc0e633da9cb71d13d895b385f64 Mon Sep 17 00:00:00 2001
-From: Arjan van de Ven <arjan@linux.intel.com>
-Date: Sun, 20 Jul 2008 09:00:41 -0700
-Subject: [PATCH] fastboot: sync the async execution before late_initcall and move level 6s (sync) first
-
-Rene Herman points out several cases where it's basically needed to have
-all level 6/6a/6s calls done before the level 7 (late_initcall) code
-runs. This patch adds a sync point in the transition from the 6's to the
-7's.
-
-Second, this patch makes sure that level 6s (sync) happens before the
-async code starts, and puts a user in driver/pci in this category that
-needs to happen before device init.
-
-Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
-Signed-off-by: Ingo Molnar <mingo@elte.hu>
----
- drivers/pci/pci.c | 2 +-
- include/asm-generic/vmlinux.lds.h | 3 ++-
- init/main.c | 14 +++++++++++++-
- 3 files changed, 16 insertions(+), 3 deletions(-)
-
-Index: linux-2.6.27/drivers/pci/pci.c
-===================================================================
---- linux-2.6.27.orig/drivers/pci/pci.c 2008-10-14 16:55:30.000000000 +0200
-+++ linux-2.6.27/drivers/pci/pci.c 2008-10-14 17:01:42.000000000 +0200
-@@ -1909,7 +1909,7 @@ static int __devinit pci_setup(char *str
- }
- early_param("pci", pci_setup);
-
--device_initcall(pci_init);
-+device_initcall_sync(pci_init);
-
- EXPORT_SYMBOL(pci_reenable_device);
- EXPORT_SYMBOL(pci_enable_device_io);
-Index: linux-2.6.27/include/asm-generic/vmlinux.lds.h
-===================================================================
---- linux-2.6.27.orig/include/asm-generic/vmlinux.lds.h 2008-10-14 17:00:59.000000000 +0200
-+++ linux-2.6.27/include/asm-generic/vmlinux.lds.h 2008-10-14 17:01:42.000000000 +0200
-@@ -376,11 +376,12 @@
- *(.initcall5.init) \
- *(.initcall5s.init) \
- *(.initcallrootfs.init) \
-+ *(.initcall6s.init) \
- __async_initcall_start = .; \
- *(.initcall6a.init) \
- __async_initcall_end = .; \
- *(.initcall6.init) \
-- *(.initcall6s.init) \
-+ __device_initcall_end = .; \
- *(.initcall7.init) \
- *(.initcall7s.init)
-
-Index: linux-2.6.27/init/main.c
-===================================================================
---- linux-2.6.27.orig/init/main.c 2008-10-14 17:01:38.000000000 +0200
-+++ linux-2.6.27/init/main.c 2008-10-14 17:01:42.000000000 +0200
-@@ -746,6 +746,7 @@ int do_one_initcall(initcall_t fn)
-
- extern initcall_t __initcall_start[], __initcall_end[], __early_initcall_end[];
- extern initcall_t __async_initcall_start[], __async_initcall_end[];
-+extern initcall_t __device_initcall_end[];
-
- static void __init do_async_initcalls(struct work_struct *dummy)
- {
-@@ -767,7 +768,13 @@ static void __init do_initcalls(void)
- {
- initcall_t *call;
- static DECLARE_WORK(async_work, do_async_initcalls);
-- int phase = 0; /* 0 = levels 0 - 6, 1 = level 6a, 2 = after level 6a */
-+ /*
-+ * 0 = levels 0 - 6,
-+ * 1 = level 6a,
-+ * 2 = after level 6a,
-+ * 3 = after level 6
-+ */
-+ int phase = 0;
-
- async_init_wq = create_singlethread_workqueue("kasyncinit");
-
-@@ -778,6 +785,11 @@ static void __init do_initcalls(void)
- }
- if (phase == 1 && call >= __async_initcall_end)
- phase = 2;
-+ if (phase == 2 && call >= __device_initcall_end) {
-+ phase = 3;
-+ /* make sure all async work is done before level 7 */
-+ flush_workqueue(async_init_wq);
-+ }
- if (phase != 1)
- do_one_initcall(*call);
- }
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/0029-fastboot-make-fastboot-a-config-option.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/0029-fastboot-make-fastboot-a-config-option.patch
deleted file mode 100644
index 73b856372..000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/0029-fastboot-make-fastboot-a-config-option.patch
+++ /dev/null
@@ -1,53 +0,0 @@
-From 50b6962016b824dfac254b8f36fc6cac301c8a8d Mon Sep 17 00:00:00 2001
-From: Arjan van de Ven <arjan@linux.intel.com>
-Date: Sun, 20 Jul 2008 10:20:49 -0700
-Subject: [PATCH] fastboot: make fastboot a config option
-
-to mitigate the risks of async bootup, make fastboot a configuration
-option...
-
-Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
-Signed-off-by: Ingo Molnar <mingo@elte.hu>
----
- init/Kconfig | 11 +++++++++++
- init/main.c | 4 ++++
- 2 files changed, 15 insertions(+)
-
-Index: linux-2.6.27/init/Kconfig
-===================================================================
---- linux-2.6.27.orig/init/Kconfig 2008-10-14 16:55:47.000000000 +0200
-+++ linux-2.6.27/init/Kconfig 2008-10-14 17:01:48.000000000 +0200
-@@ -524,6 +524,17 @@ config CC_OPTIMIZE_FOR_SIZE
-
- If unsure, say Y.
-
-+config FASTBOOT
-+ bool "Fast boot support"
-+ help
-+ The fastboot option will cause the kernel to try to optimize
-+ for faster boot.
-+
-+ This includes doing some of the device initialization asynchronous
-+ as well as opportunistically trying to mount the root fs early.
-+
-+ If unsure, say N.
-+
- config SYSCTL
- bool
-
-Index: linux-2.6.27/init/main.c
-===================================================================
---- linux-2.6.27.orig/init/main.c 2008-10-14 17:01:42.000000000 +0200
-+++ linux-2.6.27/init/main.c 2008-10-14 17:01:48.000000000 +0200
-@@ -781,7 +781,11 @@ static void __init do_initcalls(void)
- for (call = __early_initcall_end; call < __initcall_end; call++) {
- if (phase == 0 && call >= __async_initcall_start) {
- phase = 1;
-+#ifdef CONFIG_FASTBOOT
- queue_work(async_init_wq, &async_work);
-+#else
-+ do_async_initcalls(NULL);
-+#endif
- }
- if (phase == 1 && call >= __async_initcall_end)
- phase = 2;
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/0030-fastboot-retry-mounting-the-root-fs-if-we-can-t-fin.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/0030-fastboot-retry-mounting-the-root-fs-if-we-can-t-fin.patch
deleted file mode 100644
index 0e0c7fa84..000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/0030-fastboot-retry-mounting-the-root-fs-if-we-can-t-fin.patch
+++ /dev/null
@@ -1,64 +0,0 @@
-From db62cd29f9b9142c19c574ca00916f66ff22ed4a Mon Sep 17 00:00:00 2001
-From: Arjan van de Ven <arjan@linux.intel.com>
-Date: Sun, 20 Jul 2008 13:01:28 -0700
-Subject: [PATCH] fastboot: retry mounting the root fs if we can't find init
-
-currently we wait until all device init is done before trying to mount
-the root fs, and to consequently execute init.
-
-In preparation for relaxing the first delay, this patch adds a retry
-attempt in case /sbin/init is not found. Before retrying, the code
-will wait for all device init to complete.
-
-While this patch by itself doesn't gain boot time yet (it needs follow on
-patches), the alternative already is to panic()...
-
-Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
----
- init/main.c | 19 +++++++++++++++++++
- 1 file changed, 19 insertions(+)
-
-Index: linux-2.6.27/init/main.c
-===================================================================
---- linux-2.6.27.orig/init/main.c 2008-10-14 17:01:48.000000000 +0200
-+++ linux-2.6.27/init/main.c 2008-10-14 17:02:42.000000000 +0200
-@@ -845,6 +845,7 @@ static void run_init_process(char *init_
- */
- static int noinline init_post(void)
- {
-+ int retry_count = 1;
- free_initmem();
- unlock_kernel();
- mark_rodata_ro();
-@@ -865,6 +866,7 @@ static int noinline init_post(void)
- ramdisk_execute_command);
- }
-
-+retry:
- /*
- * We try each of these until one succeeds.
- *
-@@ -877,6 +879,23 @@ static int noinline init_post(void)
- "defaults...\n", execute_command);
- }
- run_init_process("/sbin/init");
-+
-+ if (retry_count > 0) {
-+ retry_count--;
-+ /*
-+ * We haven't found init yet... potentially because the device
-+ * is still being probed. We need to
-+ * - flush keventd and friends
-+ * - wait for the known devices to complete their probing
-+ * - try to mount the root fs again
-+ */
-+ flush_scheduled_work();
-+ while (driver_probe_done() != 0)
-+ msleep(100);
-+ prepare_namespace();
-+ goto retry;
-+ }
-+
- run_init_process("/etc/init");
- run_init_process("/bin/init");
- run_init_process("/bin/sh");
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/0031-fastboot-make-the-raid-autodetect-code-wait-for-all.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/0031-fastboot-make-the-raid-autodetect-code-wait-for-all.patch
deleted file mode 100644
index 03b3b8220..000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/0031-fastboot-make-the-raid-autodetect-code-wait-for-all.patch
+++ /dev/null
@@ -1,41 +0,0 @@
-From b52c36a95ed8026b6925fe8595ebcab6921ae62d Mon Sep 17 00:00:00 2001
-From: Arjan van de Ven <arjan@linux.intel.com>
-Date: Sun, 20 Jul 2008 13:07:09 -0700
-Subject: [PATCH] fastboot: make the raid autodetect code wait for all devices to init
-
-The raid autodetect code really needs to have all devices probed before
-it can detect raid arrays; not doing so would give rather messy situations
-where arrays would get detected as degraded while they shouldn't be etc.
-
-This is in preparation of removing the "wait for everything to init"
-code that makes everyone pay, not just raid users.
-
-Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
----
- init/do_mounts_md.c | 7 +++++++
- 1 files changed, 7 insertions(+), 0 deletions(-)
-
-diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
-index 693d246..c0412a9 100644
---- a/init/do_mounts_md.c
-+++ b/init/do_mounts_md.c
-@@ -267,9 +267,16 @@ __setup("md=", md_setup);
- void __init md_run_setup(void)
- {
- create_dev("/dev/md0", MKDEV(MD_MAJOR, 0));
-+
- if (raid_noautodetect)
- printk(KERN_INFO "md: Skipping autodetection of RAID arrays. (raid=noautodetect)\n");
- else {
-+ /*
-+ * Since we don't want to detect and use half a raid array, we need to
-+ * wait for the known devices to complete their probing
-+ */
-+ while (driver_probe_done() != 0)
-+ msleep(100);
- int fd = sys_open("/dev/md0", 0, 0);
- if (fd >= 0) {
- sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
---
-1.5.4.3
-
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/0032-fastboot-remove-wait-for-all-devices-before-mounti.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/0032-fastboot-remove-wait-for-all-devices-before-mounti.patch
deleted file mode 100644
index c963d4eaf..000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/0032-fastboot-remove-wait-for-all-devices-before-mounti.patch
+++ /dev/null
@@ -1,41 +0,0 @@
-From 1b5a2bd0602010398cb473d1b821a9f1c1399caf Mon Sep 17 00:00:00 2001
-From: Arjan van de Ven <arjan@linux.intel.com>
-Date: Sun, 20 Jul 2008 13:12:16 -0700
-Subject: [PATCH] fastboot: remove "wait for all devices before mounting root" delay
-
-In the non-initrd case, we wait for all devices to finish their
-probing before we try to mount the rootfs.
-In practice, this means that we end up waiting 2 extra seconds for
-the PS/2 mouse probing even though the root holding device has been
-ready since a long time.
-
-The previous two patches in this series made the RAID autodetect code
-do it's own "wait for probing to be done" code, and added
-"wait and retry" functionality in case the root device isn't actually
-available.
-
-These two changes should make it safe to remove the delay itself,
-and this patch does this. On my test laptop, this reduces the boot time
-by 2 seconds (kernel time goes from 3.9 to 1.9 seconds).
-
-Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
----
- init/do_mounts.c | 2 ++
- 1 file changed, 2 insertions(+)
-
-Index: linux-2.6.27/init/do_mounts.c
-===================================================================
---- linux-2.6.27.orig/init/do_mounts.c 2008-10-14 16:57:34.000000000 +0200
-+++ linux-2.6.27/init/do_mounts.c 2008-10-14 17:02:51.000000000 +0200
-@@ -365,9 +365,11 @@ void __init prepare_namespace(void)
- ssleep(root_delay);
- }
-
-+#ifndef CONFIG_FASTBOOT
- /* wait for the known devices to complete their probing */
- while (driver_probe_done() != 0)
- msleep(100);
-+#endif
-
- md_run_setup();
-
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/0033-fastboot-make-the-RAID-autostart-code-print-a-messa.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/0033-fastboot-make-the-RAID-autostart-code-print-a-messa.patch
deleted file mode 100644
index 55c6c1ada..000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/0033-fastboot-make-the-RAID-autostart-code-print-a-messa.patch
+++ /dev/null
@@ -1,32 +0,0 @@
-From 799d0da9e645258b9d1ae11d4aac73c9474906e3 Mon Sep 17 00:00:00 2001
-From: Arjan van de Ven <arjan@linux.intel.com>
-Date: Sun, 20 Jul 2008 16:30:29 -0700
-Subject: [PATCH] fastboot: make the RAID autostart code print a message just before waiting
-
-As requested/suggested by Neil Brown: make the raid code print that it's
-about to wait for probing to be done as well as give a suggestion on how
-to disable the probing if the user doesn't use raid.
-
-Signed-off-by: Arjan van de Ven <arjan@linux.intel.com
----
- init/do_mounts_md.c | 4 +++-
- 1 files changed, 3 insertions(+), 1 deletions(-)
-
-diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
-index c0412a9..1ec5c41 100644
---- a/init/do_mounts_md.c
-+++ b/init/do_mounts_md.c
-@@ -275,7 +275,9 @@ void __init md_run_setup(void)
- * Since we don't want to detect and use half a raid array, we need to
- * wait for the known devices to complete their probing
- */
-- while (driver_probe_done() != 0)
-+ printk(KERN_INFO "md: Waiting for all devices to be available before autodetect\n");
-+ printk(KERN_INFO "md: If you don't use raid, use raid=noautodetect\n");
-+ while (driver_probe_done() < 0)
- msleep(100);
- int fd = sys_open("/dev/md0", 0, 0);
- if (fd >= 0) {
---
-1.5.4.3
-
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/0034-fastboot-fix-typo-in-init-Kconfig-text.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/0034-fastboot-fix-typo-in-init-Kconfig-text.patch
deleted file mode 100644
index c6f3b8e9a..000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/0034-fastboot-fix-typo-in-init-Kconfig-text.patch
+++ /dev/null
@@ -1,26 +0,0 @@
-From 1a23ed42e1baf0481cc70c2f71d97b0bf0f1be70 Mon Sep 17 00:00:00 2001
-From: Ingo Molnar <mingo@elte.hu>
-Date: Thu, 31 Jul 2008 12:52:29 +0200
-Subject: [PATCH] fastboot: fix typo in init/Kconfig text
-
-noticed by Randy Dunlap.
-
-Reported-by: Randy Dunlap <randy.dunlap@oracle.com>
-Signed-off-by: Ingo Molnar <mingo@elte.hu>
----
- init/Kconfig | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-Index: linux-2.6.27/init/Kconfig
-===================================================================
---- linux-2.6.27.orig/init/Kconfig 2008-10-14 17:02:39.000000000 +0200
-+++ linux-2.6.27/init/Kconfig 2008-10-14 17:02:56.000000000 +0200
-@@ -530,7 +530,7 @@ config FASTBOOT
- The fastboot option will cause the kernel to try to optimize
- for faster boot.
-
-- This includes doing some of the device initialization asynchronous
-+ This includes doing some of the device initialization asynchronously
- as well as opportunistically trying to mount the root fs early.
-
- If unsure, say N.
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/0035-fastboot-remove-duplicate-unpack_to_rootfs.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/0035-fastboot-remove-duplicate-unpack_to_rootfs.patch
deleted file mode 100644
index b8af74eaf..000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/0035-fastboot-remove-duplicate-unpack_to_rootfs.patch
+++ /dev/null
@@ -1,161 +0,0 @@
-From 8929dda869d51b953c8f300864da62297db8a74e Mon Sep 17 00:00:00 2001
-From: Li, Shaohua <shaohua.li@intel.com>
-Date: Wed, 13 Aug 2008 17:26:01 +0800
-Subject: [PATCH] fastboot: remove duplicate unpack_to_rootfs()
-
-we check if initrd is initramfs first and then do real unpack. The
-check isn't required, we can directly do unpack. If initrd isn't
-initramfs, we can remove garbage. In my laptop, this saves 0.1s boot
-time. This penalizes non-initramfs case, but now initramfs is mostly
-widely used.
-
-Signed-off-by: Shaohua Li <shaohua.li@intel.com>
-Acked-by: Arjan van de Ven <arjan@infradead.org>
-Signed-off-by: Ingo Molnar <mingo@elte.hu>
----
- init/initramfs.c | 71 ++++++++++++++++++++++++++++++++++++++++++-----------
- 1 files changed, 56 insertions(+), 15 deletions(-)
-
-diff --git a/init/initramfs.c b/init/initramfs.c
-index 644fc01..da8d030 100644
---- a/init/initramfs.c
-+++ b/init/initramfs.c
-@@ -5,6 +5,7 @@
- #include <linux/fcntl.h>
- #include <linux/delay.h>
- #include <linux/string.h>
-+#include <linux/dirent.h>
- #include <linux/syscalls.h>
-
- static __initdata char *message;
-@@ -121,8 +122,6 @@ static __initdata char *victim;
- static __initdata unsigned count;
- static __initdata loff_t this_header, next_header;
-
--static __initdata int dry_run;
--
- static inline void __init eat(unsigned n)
- {
- victim += n;
-@@ -183,10 +182,6 @@ static int __init do_header(void)
- parse_header(collected);
- next_header = this_header + N_ALIGN(name_len) + body_len;
- next_header = (next_header + 3) & ~3;
-- if (dry_run) {
-- read_into(name_buf, N_ALIGN(name_len), GotName);
-- return 0;
-- }
- state = SkipIt;
- if (name_len <= 0 || name_len > PATH_MAX)
- return 0;
-@@ -257,8 +252,6 @@ static int __init do_name(void)
- free_hash();
- return 0;
- }
-- if (dry_run)
-- return 0;
- clean_path(collected, mode);
- if (S_ISREG(mode)) {
- int ml = maybe_link();
-@@ -423,10 +416,9 @@ static void __init flush_window(void)
- outcnt = 0;
- }
-
--static char * __init unpack_to_rootfs(char *buf, unsigned len, int check_only)
-+static char * __init unpack_to_rootfs(char *buf, unsigned len)
- {
- int written;
-- dry_run = check_only;
- header_buf = kmalloc(110, GFP_KERNEL);
- symlink_buf = kmalloc(PATH_MAX + N_ALIGN(PATH_MAX) + 1, GFP_KERNEL);
- name_buf = kmalloc(N_ALIGN(PATH_MAX), GFP_KERNEL);
-@@ -520,10 +512,57 @@ skip:
- initrd_end = 0;
- }
-
-+#define BUF_SIZE 1024
-+static void __init clean_rootfs(void)
-+{
-+ int fd;
-+ void *buf;
-+ struct linux_dirent64 *dirp;
-+ int count;
-+
-+ fd = sys_open("/", O_RDONLY, 0);
-+ WARN_ON(fd < 0);
-+ if (fd < 0)
-+ return;
-+ buf = kzalloc(BUF_SIZE, GFP_KERNEL);
-+ WARN_ON(!buf);
-+ if (!buf) {
-+ sys_close(fd);
-+ return;
-+ }
-+
-+ dirp = buf;
-+ count = sys_getdents64(fd, dirp, BUF_SIZE);
-+ while (count > 0) {
-+ while (count > 0) {
-+ struct stat st;
-+ int ret;
-+
-+ ret = sys_newlstat(dirp->d_name, &st);
-+ WARN_ON_ONCE(ret);
-+ if (!ret) {
-+ if (S_ISDIR(st.st_mode))
-+ sys_rmdir(dirp->d_name);
-+ else
-+ sys_unlink(dirp->d_name);
-+ }
-+
-+ count -= dirp->d_reclen;
-+ dirp = (void *)dirp + dirp->d_reclen;
-+ }
-+ dirp = buf;
-+ memset(buf, 0, BUF_SIZE);
-+ count = sys_getdents64(fd, dirp, BUF_SIZE);
-+ }
-+
-+ sys_close(fd);
-+ kfree(buf);
-+}
-+
- static int __init populate_rootfs(void)
- {
- char *err = unpack_to_rootfs(__initramfs_start,
-- __initramfs_end - __initramfs_start, 0);
-+ __initramfs_end - __initramfs_start);
- if (err)
- panic(err);
- if (initrd_start) {
-@@ -531,13 +570,15 @@ static int __init populate_rootfs(void)
- int fd;
- printk(KERN_INFO "checking if image is initramfs...");
- err = unpack_to_rootfs((char *)initrd_start,
-- initrd_end - initrd_start, 1);
-+ initrd_end - initrd_start);
- if (!err) {
- printk(" it is\n");
-- unpack_to_rootfs((char *)initrd_start,
-- initrd_end - initrd_start, 0);
- free_initrd();
- return 0;
-+ } else {
-+ clean_rootfs();
-+ unpack_to_rootfs(__initramfs_start,
-+ __initramfs_end - __initramfs_start);
- }
- printk("it isn't (%s); looks like an initrd\n", err);
- fd = sys_open("/initrd.image", O_WRONLY|O_CREAT, 0700);
-@@ -550,7 +591,7 @@ static int __init populate_rootfs(void)
- #else
- printk(KERN_INFO "Unpacking initramfs...");
- err = unpack_to_rootfs((char *)initrd_start,
-- initrd_end - initrd_start, 0);
-+ initrd_end - initrd_start);
- if (err)
- panic(err);
- printk(" done\n");
---
-1.5.4.3
-
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/0036-warning-fix-init-do_mounts_md-c.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/0036-warning-fix-init-do_mounts_md-c.patch
deleted file mode 100644
index 9ba44a892..000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/0036-warning-fix-init-do_mounts_md-c.patch
+++ /dev/null
@@ -1,82 +0,0 @@
-From fa3038625d7df2a1244c5b753069e7fdf99af3b5 Mon Sep 17 00:00:00 2001
-From: Ingo Molnar <mingo@elte.hu>
-Date: Mon, 18 Aug 2008 12:54:00 +0200
-Subject: [PATCH] warning: fix init do_mounts_md c
-MIME-Version: 1.0
-Content-Type: text/plain; charset=utf-8
-Content-Transfer-Encoding: 8bit
-
-fix warning:
-
- init/do_mounts_md.c: In function ‘md_run_setup’:
- init/do_mounts_md.c:282: warning: ISO C90 forbids mixed declarations and code
-
-also, use the opportunity to put the RAID autodetection code
-into a separate function - this also solves a checkpatch style warning.
-
-No code changed:
-
-md5:
- aa36a35faef371b05f1974ad583bdbbd do_mounts_md.o.before.asm
- aa36a35faef371b05f1974ad583bdbbd do_mounts_md.o.after.asm
-
-Signed-off-by: Ingo Molnar <mingo@elte.hu>
----
- init/do_mounts_md.c | 36 +++++++++++++++++++++---------------
- 1 files changed, 21 insertions(+), 15 deletions(-)
-
-diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
-index 1ec5c41..c0dfd3c 100644
---- a/init/do_mounts_md.c
-+++ b/init/do_mounts_md.c
-@@ -264,26 +264,32 @@ static int __init raid_setup(char *str)
- __setup("raid=", raid_setup);
- __setup("md=", md_setup);
-
-+static void autodetect_raid(void)
-+{
-+ int fd;
-+
-+ /*
-+ * Since we don't want to detect and use half a raid array, we need to
-+ * wait for the known devices to complete their probing
-+ */
-+ printk(KERN_INFO "md: Waiting for all devices to be available before autodetect\n");
-+ printk(KERN_INFO "md: If you don't use raid, use raid=noautodetect\n");
-+ while (driver_probe_done() < 0)
-+ msleep(100);
-+ fd = sys_open("/dev/md0", 0, 0);
-+ if (fd >= 0) {
-+ sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
-+ sys_close(fd);
-+ }
-+}
-+
- void __init md_run_setup(void)
- {
- create_dev("/dev/md0", MKDEV(MD_MAJOR, 0));
-
- if (raid_noautodetect)
- printk(KERN_INFO "md: Skipping autodetection of RAID arrays. (raid=noautodetect)\n");
-- else {
-- /*
-- * Since we don't want to detect and use half a raid array, we need to
-- * wait for the known devices to complete their probing
-- */
-- printk(KERN_INFO "md: Waiting for all devices to be available before autodetect\n");
-- printk(KERN_INFO "md: If you don't use raid, use raid=noautodetect\n");
-- while (driver_probe_done() < 0)
-- msleep(100);
-- int fd = sys_open("/dev/md0", 0, 0);
-- if (fd >= 0) {
-- sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
-- sys_close(fd);
-- }
-- }
-+ else
-+ autodetect_raid();
- md_setup_drive();
- }
---
-1.5.4.3
-
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/0037-init-initramfs.c-unused-function-when-compiling-wit.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/0037-init-initramfs.c-unused-function-when-compiling-wit.patch
deleted file mode 100644
index 159f98867..000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/0037-init-initramfs.c-unused-function-when-compiling-wit.patch
+++ /dev/null
@@ -1,37 +0,0 @@
-From b4931e6c151acad06b4c12dc7cdb634366d7d27a Mon Sep 17 00:00:00 2001
-From: Steven Noonan <steven@uplinklabs.net>
-Date: Mon, 8 Sep 2008 16:19:10 -0700
-Subject: [PATCH] init/initramfs.c: unused function when compiling without CONFIG_BLK_DEV_RAM
-
-Fixing compiler warning when the kernel isn't compiled with support
-for RAM block devices enabled.
-
-Signed-off-by: Steven Noonan <steven@uplinklabs.net>
-Signed-off-by: Ingo Molnar <mingo@elte.hu>
----
- init/initramfs.c | 2 ++
- 1 files changed, 2 insertions(+), 0 deletions(-)
-
-diff --git a/init/initramfs.c b/init/initramfs.c
-index da8d030..2f056e2 100644
---- a/init/initramfs.c
-+++ b/init/initramfs.c
-@@ -512,6 +512,7 @@ skip:
- initrd_end = 0;
- }
-
-+#ifdef CONFIG_BLK_DEV_RAM
- #define BUF_SIZE 1024
- static void __init clean_rootfs(void)
- {
-@@ -558,6 +559,7 @@ static void __init clean_rootfs(void)
- sys_close(fd);
- kfree(buf);
- }
-+#endif
-
- static int __init populate_rootfs(void)
- {
---
-1.5.4.3
-
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/0038-fastboot-fix-blackfin-breakage-due-to-vmlinux.lds-c.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/0038-fastboot-fix-blackfin-breakage-due-to-vmlinux.lds-c.patch
deleted file mode 100644
index 8d1e3f22f..000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/0038-fastboot-fix-blackfin-breakage-due-to-vmlinux.lds-c.patch
+++ /dev/null
@@ -1,38 +0,0 @@
-From 5e4f25d1f43991324794657655bbbc43983522a2 Mon Sep 17 00:00:00 2001
-From: Arjan van de Ven <arjan@infradead.org>
-Date: Wed, 10 Sep 2008 08:25:34 -0700
-Subject: [PATCH] fastboot: fix blackfin breakage due to vmlinux.lds change
-
-As reported by Mike Frysinger, the vmlinux.lds changes should
-have used VMLINUX_SYMBOL()...
-
-Reported-by: Mike Frysinger <vapier.adi@gmail.com>
-Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
-Acked-by: Bryan Wu <cooloney@kernel.org>
-Signed-off-by: Ingo Molnar <mingo@elte.hu>
----
- include/asm-generic/vmlinux.lds.h | 6 +++---
- 1 files changed, 3 insertions(+), 3 deletions(-)
-
-diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
-index b9be858..ccabc4e 100644
---- a/include/asm-generic/vmlinux.lds.h
-+++ b/include/asm-generic/vmlinux.lds.h
-@@ -377,11 +377,11 @@
- *(.initcall5s.init) \
- *(.initcallrootfs.init) \
- *(.initcall6s.init) \
-- __async_initcall_start = .; \
-+ VMLINUX_SYMBOL(__async_initcall_start) = .; \
- *(.initcall6a.init) \
-- __async_initcall_end = .; \
-+ VMLINUX_SYMBOL(__async_initcall_end) = .; \
- *(.initcall6.init) \
-- __device_initcall_end = .; \
-+ VMLINUX_SYMBOL(__device_initcall_end) = .; \
- *(.initcall7.init) \
- *(.initcall7s.init)
-
---
-1.5.4.3
-
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/0039-Add-a-script-to-visualize-the-kernel-boot-process.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/0039-Add-a-script-to-visualize-the-kernel-boot-process.patch
deleted file mode 100644
index 6bcaab108..000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/0039-Add-a-script-to-visualize-the-kernel-boot-process.patch
+++ /dev/null
@@ -1,177 +0,0 @@
-From 77e9695b9d5c9ce761dedc193045d9cb64b8e245 Mon Sep 17 00:00:00 2001
-From: Arjan van de Ven <arjan@linux.intel.com>
-Date: Sat, 13 Sep 2008 09:36:06 -0700
-Subject: [PATCH] Add a script to visualize the kernel boot process / time
-
-When optimizing the kernel boot time, it's very valuable to visualize
-what is going on at which time. In addition, with the fastboot asynchronous
-initcall level, it's very valuable to see which initcall gets run where
-and when.
-
-This patch adds a script to turn a dmesg into a SVG graph (that can be
-shown with tools such as InkScape, Gimp or Firefox) and a small change
-to the initcall code to print the PID of the thread calling the initcall
-(so that the script can work out the parallelism).
-
-Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
----
- init/main.c | 1
- scripts/bootgraph.pl | 138 +++++++++++++++++++++++++++++++++++++++++++++++++++
- 2 files changed, 139 insertions(+)
- create mode 100644 scripts/bootgraph.pl
-
-Index: linux-2.6.27/init/main.c
-===================================================================
---- linux-2.6.27.orig/init/main.c 2008-10-14 17:02:46.000000000 +0200
-+++ linux-2.6.27/init/main.c 2008-10-14 17:05:23.000000000 +0200
-@@ -709,6 +709,7 @@ int do_one_initcall(initcall_t fn)
-
- if (initcall_debug) {
- printk("calling %pF\n", fn);
-+ printk(" @ %i\n", task_pid_nr(current));
- t0 = ktime_get();
- }
-
-Index: linux-2.6.27/scripts/bootgraph.pl
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.27/scripts/bootgraph.pl 2008-10-14 17:03:34.000000000 +0200
-@@ -0,0 +1,138 @@
-+#!/usr/bin/perl
-+
-+# Copyright 2008, Intel Corporation
-+#
-+# This file is part of the Linux kernel
-+#
-+# This program file is free software; you can redistribute it and/or modify it
-+# under the terms of the GNU General Public License as published by the
-+# Free Software Foundation; version 2 of the License.
-+#
-+# This program is distributed in the hope that it will be useful, but WITHOUT
-+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
-+# for more details.
-+#
-+# You should have received a copy of the GNU General Public License
-+# along with this program in a file named COPYING; if not, write to the
-+# Free Software Foundation, Inc.,
-+# 51 Franklin Street, Fifth Floor,
-+# Boston, MA 02110-1301 USA
-+#
-+# Authors:
-+# Arjan van de Ven <arjan@linux.intel.com>
-+
-+
-+#
-+# This script turns a dmesg output into a SVG graphic that shows which
-+# functions take how much time. You can view SVG graphics with various
-+# programs, including Inkscape, The Gimp and Firefox.
-+#
-+#
-+# For this script to work, the kernel needs to be compiled with the
-+# CONFIG_PRINTK_TIME configuration option enabled, and with
-+# "initcall_debug" passed on the kernel command line.
-+#
-+# usage:
-+# dmesg | perl scripts/bootgraph.pl > output.svg
-+#
-+
-+my @rows;
-+my %start, %end, %row;
-+my $done = 0;
-+my $rowcount = 0;
-+my $maxtime = 0;
-+my $count = 0;
-+while (<>) {
-+ my $line = $_;
-+ if ($line =~ /([0-9\.]+)\] calling ([a-zA-Z\_]+)\+/) {
-+ my $func = $2;
-+ if ($done == 0) {
-+ $start{$func} = $1;
-+ }
-+ $row{$func} = 1;
-+ if ($line =~ /\@ ([0-9]+)/) {
-+ my $pid = $1;
-+ if (!defined($rows[$pid])) {
-+ $rowcount = $rowcount + 1;
-+ $rows[$pid] = $rowcount;
-+ }
-+ $row{$func} = $rows[$pid];
-+ }
-+ $count = $count + 1;
-+ }
-+
-+ if ($line =~ /([0-9\.]+)\] initcall ([a-zA-Z\_]+)\+.*returned/) {
-+ if ($done == 0) {
-+ $end{$2} = $1;
-+ $maxtime = $1;
-+ }
-+ }
-+ if ($line =~ /Write protecting the/) {
-+ $done = 1;
-+ }
-+}
-+
-+if ($count == 0) {
-+ print "No data found in the dmesg. Make sure CONFIG_PRINTK_TIME is enabled and\n";
-+ print "that initcall_debug is passed on the kernel command line.\n\n";
-+ print "Usage: \n";
-+ print " dmesg | perl scripts/bootgraph.pl > output.svg\n\n";
-+ exit;
-+}
-+
-+print "<?xml version=\"1.0\" standalone=\"no\"?> \n";
-+print "<svg width=\"1000\" height=\"100%\" version=\"1.1\" xmlns=\"http://www.w3.org/2000/svg\">\n";
-+
-+my @styles;
-+
-+$styles[0] = "fill:rgb(0,0,255);fill-opacity:0.5;stroke-width:1;stroke:rgb(0,0,0)";
-+$styles[1] = "fill:rgb(0,255,0);fill-opacity:0.5;stroke-width:1;stroke:rgb(0,0,0)";
-+$styles[2] = "fill:rgb(255,0,20);fill-opacity:0.5;stroke-width:1;stroke:rgb(0,0,0)";
-+$styles[3] = "fill:rgb(255,255,20);fill-opacity:0.5;stroke-width:1;stroke:rgb(0,0,0)";
-+$styles[4] = "fill:rgb(255,0,255);fill-opacity:0.5;stroke-width:1;stroke:rgb(0,0,0)";
-+$styles[5] = "fill:rgb(0,255,255);fill-opacity:0.5;stroke-width:1;stroke:rgb(0,0,0)";
-+$styles[6] = "fill:rgb(0,128,255);fill-opacity:0.5;stroke-width:1;stroke:rgb(0,0,0)";
-+$styles[7] = "fill:rgb(0,255,128);fill-opacity:0.5;stroke-width:1;stroke:rgb(0,0,0)";
-+$styles[8] = "fill:rgb(255,0,128);fill-opacity:0.5;stroke-width:1;stroke:rgb(0,0,0)";
-+$styles[9] = "fill:rgb(255,255,128);fill-opacity:0.5;stroke-width:1;stroke:rgb(0,0,0)";
-+$styles[10] = "fill:rgb(255,128,255);fill-opacity:0.5;stroke-width:1;stroke:rgb(0,0,0)";
-+$styles[11] = "fill:rgb(128,255,255);fill-opacity:0.5;stroke-width:1;stroke:rgb(0,0,0)";
-+
-+my $mult = 950.0 / $maxtime;
-+my $threshold = 0.0500 / $maxtime;
-+my $stylecounter = 0;
-+while (($key,$value) = each %start) {
-+ my $duration = $end{$key} - $start{$key};
-+
-+ if ($duration >= $threshold) {
-+ my $s, $s2, $e, $y;
-+ $s = $value * $mult;
-+ $s2 = $s + 6;
-+ $e = $end{$key} * $mult;
-+ $w = $e - $s;
-+
-+ $y = $row{$key} * 150;
-+ $y2 = $y + 4;
-+
-+ $style = $styles[$stylecounter];
-+ $stylecounter = $stylecounter + 1;
-+ if ($stylecounter > 11) {
-+ $stylecounter = 0;
-+ };
-+
-+ print "<rect x=\"$s\" width=\"$w\" y=\"$y\" height=\"145\" style=\"$style\"/>\n";
-+ print "<text transform=\"translate($s2,$y2) rotate(90)\">$key</text>\n";
-+ }
-+}
-+
-+
-+# print the time line on top
-+my $time = 0.0;
-+while ($time < $maxtime) {
-+ my $s2 = $time * $mult;
-+ print "<text transform=\"translate($s2,89) rotate(90)\">$time</text>\n";
-+ $time = $time + 0.1;
-+}
-+
-+print "</svg>\n";
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/0040-fastboot-fix-issues-and-improve-output-of-bootgraph.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/0040-fastboot-fix-issues-and-improve-output-of-bootgraph.patch
deleted file mode 100644
index 0daba9d2c..000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/0040-fastboot-fix-issues-and-improve-output-of-bootgraph.patch
+++ /dev/null
@@ -1,91 +0,0 @@
-From 5470e09b98074974316bbf98c8b8da01d670c2a4 Mon Sep 17 00:00:00 2001
-From: Arjan van de Ven <arjan@linux.intel.com>
-Date: Sun, 14 Sep 2008 15:30:52 -0700
-Subject: [PATCH] fastboot: fix issues and improve output of bootgraph.pl
-
-David Sanders reported some issues with bootgraph.pl's display
-of his sytems bootup; this commit fixes these by scaling the graph
-not from 0 - end time but from the first initcall to the end time;
-the minimum display size etc also now need to scale with this, as does
-the axis display.
-
-Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
----
- scripts/bootgraph.pl | 25 +++++++++++++++++--------
- 1 files changed, 17 insertions(+), 8 deletions(-)
-
-diff --git a/scripts/bootgraph.pl b/scripts/bootgraph.pl
-index d459b8b..4e5f4ab 100644
---- a/scripts/bootgraph.pl
-+++ b/scripts/bootgraph.pl
-@@ -42,6 +42,7 @@ my %start, %end, %row;
- my $done = 0;
- my $rowcount = 0;
- my $maxtime = 0;
-+my $firsttime = 100;
- my $count = 0;
- while (<>) {
- my $line = $_;
-@@ -49,6 +50,9 @@ while (<>) {
- my $func = $2;
- if ($done == 0) {
- $start{$func} = $1;
-+ if ($1 < $firsttime) {
-+ $firsttime = $1;
-+ }
- }
- $row{$func} = 1;
- if ($line =~ /\@ ([0-9]+)/) {
-@@ -71,6 +75,9 @@ while (<>) {
- if ($line =~ /Write protecting the/) {
- $done = 1;
- }
-+ if ($line =~ /Freeing unused kernel memory/) {
-+ $done = 1;
-+ }
- }
-
- if ($count == 0) {
-@@ -99,17 +106,17 @@ $styles[9] = "fill:rgb(255,255,128);fill-opacity:0.5;stroke-width:1;stroke:rgb(0
- $styles[10] = "fill:rgb(255,128,255);fill-opacity:0.5;stroke-width:1;stroke:rgb(0,0,0)";
- $styles[11] = "fill:rgb(128,255,255);fill-opacity:0.5;stroke-width:1;stroke:rgb(0,0,0)";
-
--my $mult = 950.0 / $maxtime;
--my $threshold = 0.0500 / $maxtime;
-+my $mult = 950.0 / ($maxtime - $firsttime);
-+my $threshold = ($maxtime - $firsttime) / 60.0;
- my $stylecounter = 0;
- while (($key,$value) = each %start) {
- my $duration = $end{$key} - $start{$key};
-
- if ($duration >= $threshold) {
- my $s, $s2, $e, $y;
-- $s = $value * $mult;
-+ $s = ($value - $firsttime) * $mult;
- $s2 = $s + 6;
-- $e = $end{$key} * $mult;
-+ $e = ($end{$key} - $firsttime) * $mult;
- $w = $e - $s;
-
- $y = $row{$key} * 150;
-@@ -128,11 +135,13 @@ while (($key,$value) = each %start) {
-
-
- # print the time line on top
--my $time = 0.0;
-+my $time = $firsttime;
-+my $step = ($maxtime - $firsttime) / 15;
- while ($time < $maxtime) {
-- my $s2 = $time * $mult;
-- print "<text transform=\"translate($s2,89) rotate(90)\">$time</text>\n";
-- $time = $time + 0.1;
-+ my $s2 = ($time - $firsttime) * $mult;
-+ my $tm = int($time * 100) / 100.0;
-+ print "<text transform=\"translate($s2,89) rotate(90)\">$tm</text>\n";
-+ $time = $time + $step;
- }
-
- print "</svg>\n";
---
-1.5.4.3
-
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/0041-r8169-8101e.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/0041-r8169-8101e.patch
deleted file mode 100644
index 781c9a127..000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/0041-r8169-8101e.patch
+++ /dev/null
@@ -1,940 +0,0 @@
-From 771c0d99c0ab3ca7f1a9bc400e8259171b518d5f Mon Sep 17 00:00:00 2001
-From: Francois Romieu <romieu@fr.zoreil.com>
-Date: Thu, 21 Aug 2008 23:20:40 +0200
-Subject: [PATCH] r8169: fix RxMissed register access
-
-- the register location is defined for the 8169 chipset only and
- there is no 8169 beyond RTL_GIGA_MAC_VER_06
-- only the lower 3 bytes of the register are valid
-
-Fixes:
-1. http://bugzilla.kernel.org/show_bug.cgi?id=10180
-2. http://bugzilla.kernel.org/show_bug.cgi?id=11062 (bits of)
-
-Tested by Hermann Gausterer and Adam Huffman.
-
-Signed-off-by: Francois Romieu <romieu@fr.zoreil.com>
-Cc: Edward Hsu <edward_hsu@realtek.com.tw>
----
- drivers/net/r8169.c | 25 ++++++++++++++-----------
- 1 files changed, 14 insertions(+), 11 deletions(-)
-
-diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
-index 0f6f974..4190ee7 100644
---- a/drivers/net/r8169.c
-+++ b/drivers/net/r8169.c
-@@ -2099,8 +2099,6 @@ static void rtl_hw_start_8168(struct net_device *dev)
-
- RTL_R8(IntrMask);
-
-- RTL_W32(RxMissed, 0);
--
- rtl_set_rx_mode(dev);
-
- RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
-@@ -2143,8 +2141,6 @@ static void rtl_hw_start_8101(struct net_device *dev)
-
- RTL_R8(IntrMask);
-
-- RTL_W32(RxMissed, 0);
--
- rtl_set_rx_mode(dev);
-
- RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
-@@ -2922,6 +2918,17 @@ static int rtl8169_poll(struct napi_struct *napi, int budget)
- return work_done;
- }
-
-+static void rtl8169_rx_missed(struct net_device *dev, void __iomem *ioaddr)
-+{
-+ struct rtl8169_private *tp = netdev_priv(dev);
-+
-+ if (tp->mac_version > RTL_GIGA_MAC_VER_06)
-+ return;
-+
-+ dev->stats.rx_missed_errors += (RTL_R32(RxMissed) & 0xffffff);
-+ RTL_W32(RxMissed, 0);
-+}
-+
- static void rtl8169_down(struct net_device *dev)
- {
- struct rtl8169_private *tp = netdev_priv(dev);
-@@ -2939,9 +2946,7 @@ core_down:
-
- rtl8169_asic_down(ioaddr);
-
-- /* Update the error counts. */
-- dev->stats.rx_missed_errors += RTL_R32(RxMissed);
-- RTL_W32(RxMissed, 0);
-+ rtl8169_rx_missed(dev, ioaddr);
-
- spin_unlock_irq(&tp->lock);
-
-@@ -3063,8 +3068,7 @@ static struct net_device_stats *rtl8169_get_stats(struct net_device *dev)
-
- if (netif_running(dev)) {
- spin_lock_irqsave(&tp->lock, flags);
-- dev->stats.rx_missed_errors += RTL_R32(RxMissed);
-- RTL_W32(RxMissed, 0);
-+ rtl8169_rx_missed(dev, ioaddr);
- spin_unlock_irqrestore(&tp->lock, flags);
- }
-
-@@ -3089,8 +3093,7 @@ static int rtl8169_suspend(struct pci_dev *pdev, pm_message_t state)
-
- rtl8169_asic_down(ioaddr);
-
-- dev->stats.rx_missed_errors += RTL_R32(RxMissed);
-- RTL_W32(RxMissed, 0);
-+ rtl8169_rx_missed(dev, ioaddr);
-
- spin_unlock_irq(&tp->lock);
-
---
-1.5.3.3
-
-From 6ee4bc96d446a9c466a18b715c7ab2d662c03ebd Mon Sep 17 00:00:00 2001
-From: Francois Romieu <romieu@fr.zoreil.com>
-Date: Sat, 26 Jul 2008 14:26:06 +0200
-Subject: [PATCH] r8169: get ethtool settings through the generic mii helper
-
-It avoids to report unsupported link capabilities with
-the fast-ethernet only 8101/8102.
-
-Signed-off-by: Francois Romieu <romieu@fr.zoreil.com>
-Tested-by: Martin Capitanio <martin@capitanio.org>
-Fixed-by: Ivan Vecera <ivecera@redhat.com>
-Cc: Edward Hsu <edward_hsu@realtek.com.tw>
----
- drivers/net/r8169.c | 99 +++++++++++++++++++++++---------------------------
- 1 files changed, 46 insertions(+), 53 deletions(-)
-
-diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
-index 4190ee7..7e026a6 100644
---- a/drivers/net/r8169.c
-+++ b/drivers/net/r8169.c
-@@ -370,8 +370,9 @@ struct ring_info {
- };
-
- enum features {
-- RTL_FEATURE_WOL = (1 << 0),
-- RTL_FEATURE_MSI = (1 << 1),
-+ RTL_FEATURE_WOL = (1 << 0),
-+ RTL_FEATURE_MSI = (1 << 1),
-+ RTL_FEATURE_GMII = (1 << 2),
- };
-
- struct rtl8169_private {
-@@ -406,13 +407,15 @@ struct rtl8169_private {
- struct vlan_group *vlgrp;
- #endif
- int (*set_speed)(struct net_device *, u8 autoneg, u16 speed, u8 duplex);
-- void (*get_settings)(struct net_device *, struct ethtool_cmd *);
-+ int (*get_settings)(struct net_device *, struct ethtool_cmd *);
- void (*phy_reset_enable)(void __iomem *);
- void (*hw_start)(struct net_device *);
- unsigned int (*phy_reset_pending)(void __iomem *);
- unsigned int (*link_ok)(void __iomem *);
- struct delayed_work task;
- unsigned features;
-+
-+ struct mii_if_info mii;
- };
-
- MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
-@@ -482,6 +485,23 @@ static int mdio_read(void __iomem *ioaddr, int reg_addr)
- return value;
- }
-
-+static void rtl_mdio_write(struct net_device *dev, int phy_id, int location,
-+ int val)
-+{
-+ struct rtl8169_private *tp = netdev_priv(dev);
-+ void __iomem *ioaddr = tp->mmio_addr;
-+
-+ mdio_write(ioaddr, location, val);
-+}
-+
-+static int rtl_mdio_read(struct net_device *dev, int phy_id, int location)
-+{
-+ struct rtl8169_private *tp = netdev_priv(dev);
-+ void __iomem *ioaddr = tp->mmio_addr;
-+
-+ return mdio_read(ioaddr, location);
-+}
-+
- static void rtl8169_irq_mask_and_ack(void __iomem *ioaddr)
- {
- RTL_W16(IntrMask, 0x0000);
-@@ -850,7 +870,7 @@ static int rtl8169_rx_vlan_skb(struct rtl8169_private *tp, struct RxDesc *desc,
-
- #endif
-
--static void rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
-+static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
- {
- struct rtl8169_private *tp = netdev_priv(dev);
- void __iomem *ioaddr = tp->mmio_addr;
-@@ -867,65 +887,29 @@ static void rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
-
- cmd->speed = SPEED_1000;
- cmd->duplex = DUPLEX_FULL; /* Always set */
-+
-+ return 0;
- }
-
--static void rtl8169_gset_xmii(struct net_device *dev, struct ethtool_cmd *cmd)
-+static int rtl8169_gset_xmii(struct net_device *dev, struct ethtool_cmd *cmd)
- {
- struct rtl8169_private *tp = netdev_priv(dev);
-- void __iomem *ioaddr = tp->mmio_addr;
-- u8 status;
--
-- cmd->supported = SUPPORTED_10baseT_Half |
-- SUPPORTED_10baseT_Full |
-- SUPPORTED_100baseT_Half |
-- SUPPORTED_100baseT_Full |
-- SUPPORTED_1000baseT_Full |
-- SUPPORTED_Autoneg |
-- SUPPORTED_TP;
--
-- cmd->autoneg = 1;
-- cmd->advertising = ADVERTISED_TP | ADVERTISED_Autoneg;
--
-- if (tp->phy_auto_nego_reg & ADVERTISE_10HALF)
-- cmd->advertising |= ADVERTISED_10baseT_Half;
-- if (tp->phy_auto_nego_reg & ADVERTISE_10FULL)
-- cmd->advertising |= ADVERTISED_10baseT_Full;
-- if (tp->phy_auto_nego_reg & ADVERTISE_100HALF)
-- cmd->advertising |= ADVERTISED_100baseT_Half;
-- if (tp->phy_auto_nego_reg & ADVERTISE_100FULL)
-- cmd->advertising |= ADVERTISED_100baseT_Full;
-- if (tp->phy_1000_ctrl_reg & ADVERTISE_1000FULL)
-- cmd->advertising |= ADVERTISED_1000baseT_Full;
--
-- status = RTL_R8(PHYstatus);
--
-- if (status & _1000bpsF)
-- cmd->speed = SPEED_1000;
-- else if (status & _100bps)
-- cmd->speed = SPEED_100;
-- else if (status & _10bps)
-- cmd->speed = SPEED_10;
--
-- if (status & TxFlowCtrl)
-- cmd->advertising |= ADVERTISED_Asym_Pause;
-- if (status & RxFlowCtrl)
-- cmd->advertising |= ADVERTISED_Pause;
--
-- cmd->duplex = ((status & _1000bpsF) || (status & FullDup)) ?
-- DUPLEX_FULL : DUPLEX_HALF;
-+
-+ return mii_ethtool_gset(&tp->mii, cmd);
- }
-
- static int rtl8169_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
- {
- struct rtl8169_private *tp = netdev_priv(dev);
- unsigned long flags;
-+ int rc;
-
- spin_lock_irqsave(&tp->lock, flags);
-
-- tp->get_settings(dev, cmd);
-+ rc = tp->get_settings(dev, cmd);
-
- spin_unlock_irqrestore(&tp->lock, flags);
-- return 0;
-+ return rc;
- }
-
- static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs,
-@@ -1513,7 +1497,7 @@ static const struct rtl_cfg_info {
- unsigned int align;
- u16 intr_event;
- u16 napi_event;
-- unsigned msi;
-+ unsigned features;
- } rtl_cfg_infos [] = {
- [RTL_CFG_0] = {
- .hw_start = rtl_hw_start_8169,
-@@ -1522,7 +1506,7 @@ static const struct rtl_cfg_info {
- .intr_event = SYSErr | LinkChg | RxOverflow |
- RxFIFOOver | TxErr | TxOK | RxOK | RxErr,
- .napi_event = RxFIFOOver | TxErr | TxOK | RxOK | RxOverflow,
-- .msi = 0
-+ .features = RTL_FEATURE_GMII
- },
- [RTL_CFG_1] = {
- .hw_start = rtl_hw_start_8168,
-@@ -1531,7 +1515,7 @@ static const struct rtl_cfg_info {
- .intr_event = SYSErr | LinkChg | RxOverflow |
- TxErr | TxOK | RxOK | RxErr,
- .napi_event = TxErr | TxOK | RxOK | RxOverflow,
-- .msi = RTL_FEATURE_MSI
-+ .features = RTL_FEATURE_GMII | RTL_FEATURE_MSI
- },
- [RTL_CFG_2] = {
- .hw_start = rtl_hw_start_8101,
-@@ -1540,7 +1524,7 @@ static const struct rtl_cfg_info {
- .intr_event = SYSErr | LinkChg | RxOverflow | PCSTimeout |
- RxFIFOOver | TxErr | TxOK | RxOK | RxErr,
- .napi_event = RxFIFOOver | TxErr | TxOK | RxOK | RxOverflow,
-- .msi = RTL_FEATURE_MSI
-+ .features = RTL_FEATURE_MSI
- }
- };
-
-@@ -1552,7 +1536,7 @@ static unsigned rtl_try_msi(struct pci_dev *pdev, void __iomem *ioaddr,
- u8 cfg2;
-
- cfg2 = RTL_R8(Config2) & ~MSIEnable;
-- if (cfg->msi) {
-+ if (cfg->features & RTL_FEATURE_MSI) {
- if (pci_enable_msi(pdev)) {
- dev_info(&pdev->dev, "no MSI. Back to INTx.\n");
- } else {
-@@ -1578,6 +1562,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
- const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data;
- const unsigned int region = cfg->region;
- struct rtl8169_private *tp;
-+ struct mii_if_info *mii;
- struct net_device *dev;
- void __iomem *ioaddr;
- unsigned int i;
-@@ -1602,6 +1587,14 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
- tp->pci_dev = pdev;
- tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
-
-+ mii = &tp->mii;
-+ mii->dev = dev;
-+ mii->mdio_read = rtl_mdio_read;
-+ mii->mdio_write = rtl_mdio_write;
-+ mii->phy_id_mask = 0x1f;
-+ mii->reg_num_mask = 0x1f;
-+ mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII);
-+
- /* enable device (incl. PCI PM wakeup and hotplug setup) */
- rc = pci_enable_device(pdev);
- if (rc < 0) {
---
-1.5.3.3
-
-From ef60b2a38e223a331e13ef503aee7cd5d4d5c12c Mon Sep 17 00:00:00 2001
-From: Hugh Dickins <hugh@veritas.com>
-Date: Mon, 8 Sep 2008 21:49:01 +0100
-Subject: [PATCH] r8169: select MII in Kconfig
-
-drivers/built-in.o: In function `rtl8169_gset_xmii':
-r8169.c:(.text+0x82259): undefined reference to `mii_ethtool_gset'
-suggests that the r8169 driver now needs to select MII.
-
-Signed-off-by: Hugh Dickins <hugh@veritas.com>
-Signed-off-by: Francois Romieu <romieu@fr.zoreil.com>
-Cc: Edward Hsu <edward_hsu@realtek.com.tw>
----
- drivers/net/Kconfig | 1 +
- 1 files changed, 1 insertions(+), 0 deletions(-)
-
-diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
-index 4a11296..60a0453 100644
---- a/drivers/net/Kconfig
-+++ b/drivers/net/Kconfig
-@@ -2046,6 +2046,7 @@ config R8169
- tristate "Realtek 8169 gigabit ethernet support"
- depends on PCI
- select CRC32
-+ select MII
- ---help---
- Say Y here if you have a Realtek 8169 PCI Gigabit Ethernet adapter.
-
---
-1.5.3.3
-
-From bca31864fca6004c4a4a9bd549e95c93b3c3bb10 Mon Sep 17 00:00:00 2001
-From: Francois Romieu <romieu@fr.zoreil.com>
-Date: Sat, 2 Aug 2008 15:50:02 +0200
-Subject: [PATCH] r8169: Tx performance tweak helper
-
-Signed-off-by: Francois Romieu <romieu@fr.zoreil.com>
-Cc: Edward Hsu <edward_hsu@realtek.com.tw>
----
- drivers/net/r8169.c | 15 ++++++++++-----
- 1 files changed, 10 insertions(+), 5 deletions(-)
-
-diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
-index 7e026a6..eea96fb 100644
---- a/drivers/net/r8169.c
-+++ b/drivers/net/r8169.c
-@@ -2054,12 +2054,20 @@ static void rtl_hw_start_8169(struct net_device *dev)
- RTL_W16(IntrMask, tp->intr_event);
- }
-
-+static void rtl_tx_performance_tweak(struct pci_dev *pdev, u8 force)
-+{
-+ u8 ctl;
-+
-+ pci_read_config_byte(pdev, 0x69, &ctl);
-+ ctl = (ctl & ~0x70) | force;
-+ pci_write_config_byte(pdev, 0x69, ctl);
-+}
-+
- static void rtl_hw_start_8168(struct net_device *dev)
- {
- struct rtl8169_private *tp = netdev_priv(dev);
- void __iomem *ioaddr = tp->mmio_addr;
- struct pci_dev *pdev = tp->pci_dev;
-- u8 ctl;
-
- RTL_W8(Cfg9346, Cfg9346_Unlock);
-
-@@ -2073,10 +2081,7 @@ static void rtl_hw_start_8168(struct net_device *dev)
-
- RTL_W16(CPlusCmd, tp->cp_cmd);
-
-- /* Tx performance tweak. */
-- pci_read_config_byte(pdev, 0x69, &ctl);
-- ctl = (ctl & ~0x70) | 0x50;
-- pci_write_config_byte(pdev, 0x69, ctl);
-+ rtl_tx_performance_tweak(pdev, 0x50);
-
- RTL_W16(IntrMitigate, 0x5151);
-
---
-1.5.3.3
-
-From 7a929ae7d5a3618f56bf1ccaf8c62df628e820aa Mon Sep 17 00:00:00 2001
-From: Francois Romieu <romieu@fr.zoreil.com>
-Date: Sat, 5 Jul 2008 00:21:15 +0200
-Subject: [PATCH] r8169: use pci_find_capability for the PCI-E features
-
-Signed-off-by: Francois Romieu <romieu@fr.zoreil.com>
-Cc: Edward Hsu <edward_hsu@realtek.com.tw>
----
- drivers/net/r8169.c | 32 ++++++++++++++++++++++++--------
- 1 files changed, 24 insertions(+), 8 deletions(-)
-
-diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
-index eea96fb..5c00522 100644
---- a/drivers/net/r8169.c
-+++ b/drivers/net/r8169.c
-@@ -61,6 +61,7 @@ static const int multicast_filter_limit = 32;
- /* MAC address length */
- #define MAC_ADDR_LEN 6
-
-+#define MAX_READ_REQUEST_SHIFT 12
- #define RX_FIFO_THRESH 7 /* 7 means NO threshold, Rx buffer level before first PCI xfer. */
- #define RX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
- #define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
-@@ -412,6 +413,7 @@ struct rtl8169_private {
- void (*hw_start)(struct net_device *);
- unsigned int (*phy_reset_pending)(void __iomem *);
- unsigned int (*link_ok)(void __iomem *);
-+ int pcie_cap;
- struct delayed_work task;
- unsigned features;
-
-@@ -1663,6 +1665,10 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
- goto err_out_free_res_4;
- }
-
-+ tp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
-+ if (!tp->pcie_cap && netif_msg_probe(tp))
-+ dev_info(&pdev->dev, "no PCI Express capability\n");
-+
- /* Unneeded ? Don't mess with Mrs. Murphy. */
- rtl8169_irq_mask_and_ack(ioaddr);
-
-@@ -2054,13 +2060,19 @@ static void rtl_hw_start_8169(struct net_device *dev)
- RTL_W16(IntrMask, tp->intr_event);
- }
-
--static void rtl_tx_performance_tweak(struct pci_dev *pdev, u8 force)
-+static void rtl_tx_performance_tweak(struct pci_dev *pdev, u16 force)
- {
-- u8 ctl;
-+ struct net_device *dev = pci_get_drvdata(pdev);
-+ struct rtl8169_private *tp = netdev_priv(dev);
-+ int cap = tp->pcie_cap;
-+
-+ if (cap) {
-+ u16 ctl;
-
-- pci_read_config_byte(pdev, 0x69, &ctl);
-- ctl = (ctl & ~0x70) | force;
-- pci_write_config_byte(pdev, 0x69, ctl);
-+ pci_read_config_word(pdev, cap + PCI_EXP_DEVCTL, &ctl);
-+ ctl = (ctl & ~PCI_EXP_DEVCTL_READRQ) | force;
-+ pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL, ctl);
-+ }
- }
-
- static void rtl_hw_start_8168(struct net_device *dev)
-@@ -2081,7 +2093,7 @@ static void rtl_hw_start_8168(struct net_device *dev)
-
- RTL_W16(CPlusCmd, tp->cp_cmd);
-
-- rtl_tx_performance_tweak(pdev, 0x50);
-+ rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
-
- RTL_W16(IntrMitigate, 0x5151);
-
-@@ -2114,8 +2126,12 @@ static void rtl_hw_start_8101(struct net_device *dev)
-
- if ((tp->mac_version == RTL_GIGA_MAC_VER_13) ||
- (tp->mac_version == RTL_GIGA_MAC_VER_16)) {
-- pci_write_config_word(pdev, 0x68, 0x00);
-- pci_write_config_word(pdev, 0x69, 0x08);
-+ int cap = tp->pcie_cap;
-+
-+ if (cap) {
-+ pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL,
-+ PCI_EXP_DEVCTL_NOSNOOP_EN);
-+ }
- }
-
- RTL_W8(Cfg9346, Cfg9346_Unlock);
---
-1.5.3.3
-
-From ba648bdcbca93084360d348eb43dde4b19b2489e Mon Sep 17 00:00:00 2001
-From: Francois Romieu <romieu@fr.zoreil.com>
-Date: Sun, 1 Jun 2008 22:37:49 +0200
-Subject: [PATCH] r8169: add 8168/8101 registers description
-
-Signed-off-by: Francois Romieu <romieu@fr.zoreil.com>
-Cc: Edward Hsu <edward_hsu@realtek.com.tw>
----
- drivers/net/r8169.c | 47 +++++++++++++++++++++++++++++++++++++++++++----
- 1 files changed, 43 insertions(+), 4 deletions(-)
-
-diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
-index 5c00522..0b8db03 100644
---- a/drivers/net/r8169.c
-+++ b/drivers/net/r8169.c
-@@ -197,9 +197,6 @@ enum rtl_registers {
- Config5 = 0x56,
- MultiIntr = 0x5c,
- PHYAR = 0x60,
-- TBICSR = 0x64,
-- TBI_ANAR = 0x68,
-- TBI_LPAR = 0x6a,
- PHYstatus = 0x6c,
- RxMaxSize = 0xda,
- CPlusCmd = 0xe0,
-@@ -213,6 +210,32 @@ enum rtl_registers {
- FuncForceEvent = 0xfc,
- };
-
-+enum rtl8110_registers {
-+ TBICSR = 0x64,
-+ TBI_ANAR = 0x68,
-+ TBI_LPAR = 0x6a,
-+};
-+
-+enum rtl8168_8101_registers {
-+ CSIDR = 0x64,
-+ CSIAR = 0x68,
-+#define CSIAR_FLAG 0x80000000
-+#define CSIAR_WRITE_CMD 0x80000000
-+#define CSIAR_BYTE_ENABLE 0x0f
-+#define CSIAR_BYTE_ENABLE_SHIFT 12
-+#define CSIAR_ADDR_MASK 0x0fff
-+
-+ EPHYAR = 0x80,
-+#define EPHYAR_FLAG 0x80000000
-+#define EPHYAR_WRITE_CMD 0x80000000
-+#define EPHYAR_REG_MASK 0x1f
-+#define EPHYAR_REG_SHIFT 16
-+#define EPHYAR_DATA_MASK 0xffff
-+ DBG_REG = 0xd1,
-+#define FIX_NAK_1 (1 << 4)
-+#define FIX_NAK_2 (1 << 3)
-+};
-+
- enum rtl_register_content {
- /* InterruptStatusBits */
- SYSErr = 0x8000,
-@@ -266,7 +289,13 @@ enum rtl_register_content {
- TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
-
- /* Config1 register p.24 */
-+ LEDS1 = (1 << 7),
-+ LEDS0 = (1 << 6),
- MSIEnable = (1 << 5), /* Enable Message Signaled Interrupt */
-+ Speed_down = (1 << 4),
-+ MEMMAP = (1 << 3),
-+ IOMAP = (1 << 2),
-+ VPD = (1 << 1),
- PMEnable = (1 << 0), /* Power Management Enable */
-
- /* Config2 register p. 25 */
-@@ -276,6 +305,7 @@ enum rtl_register_content {
- /* Config3 register p.25 */
- MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */
- LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */
-+ Beacon_en = (1 << 0), /* 8168 only. Reserved in the 8168b */
-
- /* Config5 register p.27 */
- BWF = (1 << 6), /* Accept Broadcast wakeup frame */
-@@ -293,7 +323,16 @@ enum rtl_register_content {
- TBINwComplete = 0x01000000,
-
- /* CPlusCmd p.31 */
-- PktCntrDisable = (1 << 7), // 8168
-+ EnableBist = (1 << 15), // 8168 8101
-+ Mac_dbgo_oe = (1 << 14), // 8168 8101
-+ Normal_mode = (1 << 13), // unused
-+ Force_half_dup = (1 << 12), // 8168 8101
-+ Force_rxflow_en = (1 << 11), // 8168 8101
-+ Force_txflow_en = (1 << 10), // 8168 8101
-+ Cxpl_dbg_sel = (1 << 9), // 8168 8101
-+ ASF = (1 << 8), // 8168 8101
-+ PktCntrDisable = (1 << 7), // 8168 8101
-+ Mac_dbgo_sel = 0x001c, // 8168
- RxVlan = (1 << 6),
- RxChkSum = (1 << 5),
- PCIDAC = (1 << 4),
---
-1.5.3.3
-
-From 61650c9e3d637b0990d9f26b1421ac4b55f5c744 Mon Sep 17 00:00:00 2001
-From: Francois Romieu <romieu@fr.zoreil.com>
-Date: Sat, 2 Aug 2008 20:44:13 +0200
-Subject: [PATCH] r8169: add hw start helpers for the 8168 and the 8101
-
-This commit triggers three 'defined but not used' warnings but
-I prefer avoiding to tie these helpers to a specific change in
-the hw start sequences of the 8168 or of the 8101.
-
-Signed-off-by: Francois Romieu <romieu@fr.zoreil.com>
-Cc: Edward Hsu <edward_hsu@realtek.com.tw>
----
- drivers/net/r8169.c | 96 +++++++++++++++++++++++++++++++++++++++++++++++++++
- 1 files changed, 96 insertions(+), 0 deletions(-)
-
-diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
-index 0b8db03..52eba5c 100644
---- a/drivers/net/r8169.c
-+++ b/drivers/net/r8169.c
-@@ -526,6 +526,11 @@ static int mdio_read(void __iomem *ioaddr, int reg_addr)
- return value;
- }
-
-+static void mdio_patch(void __iomem *ioaddr, int reg_addr, int value)
-+{
-+ mdio_write(ioaddr, reg_addr, mdio_read(ioaddr, reg_addr) | value);
-+}
-+
- static void rtl_mdio_write(struct net_device *dev, int phy_id, int location,
- int val)
- {
-@@ -543,6 +548,72 @@ static int rtl_mdio_read(struct net_device *dev, int phy_id, int location)
- return mdio_read(ioaddr, location);
- }
-
-+static void rtl_ephy_write(void __iomem *ioaddr, int reg_addr, int value)
-+{
-+ unsigned int i;
-+
-+ RTL_W32(EPHYAR, EPHYAR_WRITE_CMD | (value & EPHYAR_DATA_MASK) |
-+ (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
-+
-+ for (i = 0; i < 100; i++) {
-+ if (!(RTL_R32(EPHYAR) & EPHYAR_FLAG))
-+ break;
-+ udelay(10);
-+ }
-+}
-+
-+static u16 rtl_ephy_read(void __iomem *ioaddr, int reg_addr)
-+{
-+ u16 value = 0xffff;
-+ unsigned int i;
-+
-+ RTL_W32(EPHYAR, (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
-+
-+ for (i = 0; i < 100; i++) {
-+ if (RTL_R32(EPHYAR) & EPHYAR_FLAG) {
-+ value = RTL_R32(EPHYAR) & EPHYAR_DATA_MASK;
-+ break;
-+ }
-+ udelay(10);
-+ }
-+
-+ return value;
-+}
-+
-+static void rtl_csi_write(void __iomem *ioaddr, int addr, int value)
-+{
-+ unsigned int i;
-+
-+ RTL_W32(CSIDR, value);
-+ RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
-+ CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
-+
-+ for (i = 0; i < 100; i++) {
-+ if (!(RTL_R32(CSIAR) & CSIAR_FLAG))
-+ break;
-+ udelay(10);
-+ }
-+}
-+
-+static u32 rtl_csi_read(void __iomem *ioaddr, int addr)
-+{
-+ u32 value = ~0x00;
-+ unsigned int i;
-+
-+ RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) |
-+ CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
-+
-+ for (i = 0; i < 100; i++) {
-+ if (RTL_R32(CSIAR) & CSIAR_FLAG) {
-+ value = RTL_R32(CSIDR);
-+ break;
-+ }
-+ udelay(10);
-+ }
-+
-+ return value;
-+}
-+
- static void rtl8169_irq_mask_and_ack(void __iomem *ioaddr)
- {
- RTL_W16(IntrMask, 0x0000);
-@@ -2114,6 +2185,31 @@ static void rtl_tx_performance_tweak(struct pci_dev *pdev, u16 force)
- }
- }
-
-+static void rtl_csi_access_enable(void __iomem *ioaddr)
-+{
-+ u32 csi;
-+
-+ csi = rtl_csi_read(ioaddr, 0x070c) & 0x00ffffff;
-+ rtl_csi_write(ioaddr, 0x070c, csi | 0x27000000);
-+}
-+
-+struct ephy_info {
-+ unsigned int offset;
-+ u16 mask;
-+ u16 bits;
-+};
-+
-+static void rtl_ephy_init(void __iomem *ioaddr, struct ephy_info *e, int len)
-+{
-+ u16 w;
-+
-+ while (len-- > 0) {
-+ w = (rtl_ephy_read(ioaddr, e->offset) & ~e->mask) | e->bits;
-+ rtl_ephy_write(ioaddr, e->offset, w);
-+ e++;
-+ }
-+}
-+
- static void rtl_hw_start_8168(struct net_device *dev)
- {
- struct rtl8169_private *tp = netdev_priv(dev);
---
-1.5.3.3
-
-From 81fbfc404f2a13646bee46fa98545c0023e3a67a Mon Sep 17 00:00:00 2001
-From: Francois Romieu <romieu@fr.zoreil.com>
-Date: Sat, 2 Aug 2008 21:08:49 +0200
-Subject: [PATCH] r8169: additional 8101 and 8102 support
-
-Signed-off-by: Ivan Vecera <ivecera@redhat.com>
-Signed-off-by: Francois Romieu <romieu@fr.zoreil.com>
-Cc: Edward Hsu <edward_hsu@realtek.com.tw>
----
- drivers/net/r8169.c | 124 ++++++++++++++++++++++++++++++++++++++++++++++++++-
- 1 files changed, 122 insertions(+), 2 deletions(-)
-
-diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
-index 52eba5c..f28c202 100644
---- a/drivers/net/r8169.c
-+++ b/drivers/net/r8169.c
-@@ -96,6 +96,10 @@ enum mac_version {
- RTL_GIGA_MAC_VER_04 = 0x04, // 8169SB
- RTL_GIGA_MAC_VER_05 = 0x05, // 8110SCd
- RTL_GIGA_MAC_VER_06 = 0x06, // 8110SCe
-+ RTL_GIGA_MAC_VER_07 = 0x07, // 8102e
-+ RTL_GIGA_MAC_VER_08 = 0x08, // 8102e
-+ RTL_GIGA_MAC_VER_09 = 0x09, // 8102e
-+ RTL_GIGA_MAC_VER_10 = 0x0a, // 8101e
- RTL_GIGA_MAC_VER_11 = 0x0b, // 8168Bb
- RTL_GIGA_MAC_VER_12 = 0x0c, // 8168Be
- RTL_GIGA_MAC_VER_13 = 0x0d, // 8101Eb
-@@ -122,6 +126,10 @@ static const struct {
- _R("RTL8169sb/8110sb", RTL_GIGA_MAC_VER_04, 0xff7e1880), // 8169SB
- _R("RTL8169sc/8110sc", RTL_GIGA_MAC_VER_05, 0xff7e1880), // 8110SCd
- _R("RTL8169sc/8110sc", RTL_GIGA_MAC_VER_06, 0xff7e1880), // 8110SCe
-+ _R("RTL8102e", RTL_GIGA_MAC_VER_07, 0xff7e1880), // PCI-E
-+ _R("RTL8102e", RTL_GIGA_MAC_VER_08, 0xff7e1880), // PCI-E
-+ _R("RTL8102e", RTL_GIGA_MAC_VER_09, 0xff7e1880), // PCI-E
-+ _R("RTL8101e", RTL_GIGA_MAC_VER_10, 0xff7e1880), // PCI-E
- _R("RTL8168b/8111b", RTL_GIGA_MAC_VER_11, 0xff7e1880), // PCI-E
- _R("RTL8168b/8111b", RTL_GIGA_MAC_VER_12, 0xff7e1880), // PCI-E
- _R("RTL8101e", RTL_GIGA_MAC_VER_13, 0xff7e1880), // PCI-E 8139
-@@ -837,8 +845,12 @@ static int rtl8169_set_speed_xmii(struct net_device *dev,
- }
- }
-
-- /* The 8100e/8101e do Fast Ethernet only. */
-- if ((tp->mac_version == RTL_GIGA_MAC_VER_13) ||
-+ /* The 8100e/8101e/8102e do Fast Ethernet only. */
-+ if ((tp->mac_version == RTL_GIGA_MAC_VER_07) ||
-+ (tp->mac_version == RTL_GIGA_MAC_VER_08) ||
-+ (tp->mac_version == RTL_GIGA_MAC_VER_09) ||
-+ (tp->mac_version == RTL_GIGA_MAC_VER_10) ||
-+ (tp->mac_version == RTL_GIGA_MAC_VER_13) ||
- (tp->mac_version == RTL_GIGA_MAC_VER_14) ||
- (tp->mac_version == RTL_GIGA_MAC_VER_15) ||
- (tp->mac_version == RTL_GIGA_MAC_VER_16)) {
-@@ -1212,8 +1224,17 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
- { 0x7c800000, 0x30000000, RTL_GIGA_MAC_VER_11 },
-
- /* 8101 family. */
-+ { 0x7cf00000, 0x34a00000, RTL_GIGA_MAC_VER_09 },
-+ { 0x7cf00000, 0x24a00000, RTL_GIGA_MAC_VER_09 },
-+ { 0x7cf00000, 0x34900000, RTL_GIGA_MAC_VER_08 },
-+ { 0x7cf00000, 0x24900000, RTL_GIGA_MAC_VER_08 },
-+ { 0x7cf00000, 0x34800000, RTL_GIGA_MAC_VER_07 },
-+ { 0x7cf00000, 0x24800000, RTL_GIGA_MAC_VER_07 },
- { 0x7cf00000, 0x34000000, RTL_GIGA_MAC_VER_13 },
-+ { 0x7cf00000, 0x34300000, RTL_GIGA_MAC_VER_10 },
- { 0x7cf00000, 0x34200000, RTL_GIGA_MAC_VER_16 },
-+ { 0x7c800000, 0x34800000, RTL_GIGA_MAC_VER_09 },
-+ { 0x7c800000, 0x24800000, RTL_GIGA_MAC_VER_09 },
- { 0x7c800000, 0x34000000, RTL_GIGA_MAC_VER_16 },
- /* FIXME: where did these entries come from ? -- FR */
- { 0xfc800000, 0x38800000, RTL_GIGA_MAC_VER_15 },
-@@ -1375,6 +1396,22 @@ static void rtl8168cx_hw_phy_config(void __iomem *ioaddr)
- rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init));
- }
-
-+static void rtl8102e_hw_phy_config(void __iomem *ioaddr)
-+{
-+ struct phy_reg phy_reg_init[] = {
-+ { 0x1f, 0x0003 },
-+ { 0x08, 0x441d },
-+ { 0x01, 0x9100 },
-+ { 0x1f, 0x0000 }
-+ };
-+
-+ mdio_write(ioaddr, 0x1f, 0x0000);
-+ mdio_patch(ioaddr, 0x11, 1 << 12);
-+ mdio_patch(ioaddr, 0x19, 1 << 13);
-+
-+ rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init));
-+}
-+
- static void rtl_hw_phy_config(struct net_device *dev)
- {
- struct rtl8169_private *tp = netdev_priv(dev);
-@@ -1392,6 +1429,11 @@ static void rtl_hw_phy_config(struct net_device *dev)
- case RTL_GIGA_MAC_VER_04:
- rtl8169sb_hw_phy_config(ioaddr);
- break;
-+ case RTL_GIGA_MAC_VER_07:
-+ case RTL_GIGA_MAC_VER_08:
-+ case RTL_GIGA_MAC_VER_09:
-+ rtl8102e_hw_phy_config(ioaddr);
-+ break;
- case RTL_GIGA_MAC_VER_18:
- rtl8168cp_hw_phy_config(ioaddr);
- break;
-@@ -2253,6 +2295,70 @@ static void rtl_hw_start_8168(struct net_device *dev)
- RTL_W16(IntrMask, tp->intr_event);
- }
-
-+#define R810X_CPCMD_QUIRK_MASK (\
-+ EnableBist | \
-+ Mac_dbgo_oe | \
-+ Force_half_dup | \
-+ Force_half_dup | \
-+ Force_txflow_en | \
-+ Cxpl_dbg_sel | \
-+ ASF | \
-+ PktCntrDisable | \
-+ PCIDAC | \
-+ PCIMulRW)
-+
-+static void rtl_hw_start_8102e_1(void __iomem *ioaddr, struct pci_dev *pdev)
-+{
-+ static struct ephy_info e_info_8102e_1[] = {
-+ { 0x01, 0, 0x6e65 },
-+ { 0x02, 0, 0x091f },
-+ { 0x03, 0, 0xc2f9 },
-+ { 0x06, 0, 0xafb5 },
-+ { 0x07, 0, 0x0e00 },
-+ { 0x19, 0, 0xec80 },
-+ { 0x01, 0, 0x2e65 },
-+ { 0x01, 0, 0x6e65 }
-+ };
-+ u8 cfg1;
-+
-+ rtl_csi_access_enable(ioaddr);
-+
-+ RTL_W8(DBG_REG, FIX_NAK_1);
-+
-+ rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
-+
-+ RTL_W8(Config1,
-+ LEDS1 | LEDS0 | Speed_down | MEMMAP | IOMAP | VPD | PMEnable);
-+ RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
-+
-+ cfg1 = RTL_R8(Config1);
-+ if ((cfg1 & LEDS0) && (cfg1 & LEDS1))
-+ RTL_W8(Config1, cfg1 & ~LEDS0);
-+
-+ RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R810X_CPCMD_QUIRK_MASK);
-+
-+ rtl_ephy_init(ioaddr, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1));
-+}
-+
-+static void rtl_hw_start_8102e_2(void __iomem *ioaddr, struct pci_dev *pdev)
-+{
-+ rtl_csi_access_enable(ioaddr);
-+
-+ rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
-+
-+ RTL_W8(Config1, MEMMAP | IOMAP | VPD | PMEnable);
-+ RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
-+
-+ RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R810X_CPCMD_QUIRK_MASK);
-+}
-+
-+static void rtl_hw_start_8102e_3(void __iomem *ioaddr, struct pci_dev *pdev)
-+{
-+ rtl_hw_start_8102e_2(ioaddr, pdev);
-+
-+ rtl_ephy_write(ioaddr, 0x03, 0xc2f9);
-+}
-+
- static void rtl_hw_start_8101(struct net_device *dev)
- {
- struct rtl8169_private *tp = netdev_priv(dev);
-@@ -2269,6 +2375,20 @@ static void rtl_hw_start_8101(struct net_device *dev)
- }
- }
-
-+ switch (tp->mac_version) {
-+ case RTL_GIGA_MAC_VER_07:
-+ rtl_hw_start_8102e_1(ioaddr, pdev);
-+ break;
-+
-+ case RTL_GIGA_MAC_VER_08:
-+ rtl_hw_start_8102e_3(ioaddr, pdev);
-+ break;
-+
-+ case RTL_GIGA_MAC_VER_09:
-+ rtl_hw_start_8102e_2(ioaddr, pdev);
-+ break;
-+ }
-+
- RTL_W8(Cfg9346, Cfg9346_Unlock);
-
- RTL_W8(EarlyTxThres, EarlyTxThld);
---
-1.5.3.3
-
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/0042-intelfb-945gme.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/0042-intelfb-945gme.patch
deleted file mode 100644
index 0f74d47bf..000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/0042-intelfb-945gme.patch
+++ /dev/null
@@ -1,154 +0,0 @@
-The following patch adds support for Intel's 945GME graphics chip to
-the intelfb driver. I have assumed that the 945GME is identical to the
-already-supported 945GM apart from its PCI IDs; this is based on a quick
-look at the X driver for these chips which seems to treat them
-identically.
-
-Signed-off-by: Phil Endecott <spam_from_intelfb@chezphil.org>
-
----
-
-The 945GME is used in the ASUS Eee 901, and I coded this in the hope that
-I'd be able to use it to get a console at the native 1024x600 resolution
-which is not known to the BIOS. I realised too late that the intelfb
-driver does not support mode changing on laptops, so it won't be any
-use for me. But rather than throw it away I will post it here as
-essentially "untested"; maybe someone who knows more about this driver,
-and with more useful hardware to test on, can pick it up.
-
----
- Documentation/fb/intelfb.txt | 1 +
- drivers/video/intelfb/intelfb.h | 7 +++++--
- drivers/video/intelfb/intelfb_i2c.c | 1 +
- drivers/video/intelfb/intelfbdrv.c | 7 ++++++-
- drivers/video/intelfb/intelfbhw.c | 7 +++++++
- 5 files changed, 20 insertions(+), 3 deletions(-)
-
-Index: linux-2.6.27/Documentation/fb/intelfb.txt
-===================================================================
---- linux-2.6.27.orig/Documentation/fb/intelfb.txt 2008-10-14 16:54:54.000000000 +0200
-+++ linux-2.6.27/Documentation/fb/intelfb.txt 2008-10-14 17:05:36.000000000 +0200
-@@ -14,6 +14,7 @@ graphics devices. These would include:
- Intel 915GM
- Intel 945G
- Intel 945GM
-+ Intel 945GME
- Intel 965G
- Intel 965GM
-
-Index: linux-2.6.27/drivers/video/intelfb/intelfb.h
-===================================================================
---- linux-2.6.27.orig/drivers/video/intelfb/intelfb.h 2008-10-14 16:55:37.000000000 +0200
-+++ linux-2.6.27/drivers/video/intelfb/intelfb.h 2008-10-14 17:05:36.000000000 +0200
-@@ -12,9 +12,9 @@
- #endif
-
- /*** Version/name ***/
--#define INTELFB_VERSION "0.9.5"
-+#define INTELFB_VERSION "0.9.6"
- #define INTELFB_MODULE_NAME "intelfb"
--#define SUPPORTED_CHIPSETS "830M/845G/852GM/855GM/865G/915G/915GM/945G/945GM/965G/965GM"
-+#define SUPPORTED_CHIPSETS "830M/845G/852GM/855GM/865G/915G/915GM/945G/945GM/945GME/965G/965GM"
-
-
- /*** Debug/feature defines ***/
-@@ -58,6 +58,7 @@
- #define PCI_DEVICE_ID_INTEL_915GM 0x2592
- #define PCI_DEVICE_ID_INTEL_945G 0x2772
- #define PCI_DEVICE_ID_INTEL_945GM 0x27A2
-+#define PCI_DEVICE_ID_INTEL_945GME 0x27AE
- #define PCI_DEVICE_ID_INTEL_965G 0x29A2
- #define PCI_DEVICE_ID_INTEL_965GM 0x2A02
-
-@@ -160,6 +161,7 @@ enum intel_chips {
- INTEL_915GM,
- INTEL_945G,
- INTEL_945GM,
-+ INTEL_945GME,
- INTEL_965G,
- INTEL_965GM,
- };
-@@ -363,6 +365,7 @@ struct intelfb_info {
- ((dinfo)->chipset == INTEL_915GM) || \
- ((dinfo)->chipset == INTEL_945G) || \
- ((dinfo)->chipset == INTEL_945GM) || \
-+ ((dinfo)->chipset == INTEL_945GME) || \
- ((dinfo)->chipset == INTEL_965G) || \
- ((dinfo)->chipset == INTEL_965GM))
-
-Index: linux-2.6.27/drivers/video/intelfb/intelfb_i2c.c
-===================================================================
---- linux-2.6.27.orig/drivers/video/intelfb/intelfb_i2c.c 2008-10-14 16:55:37.000000000 +0200
-+++ linux-2.6.27/drivers/video/intelfb/intelfb_i2c.c 2008-10-14 17:05:36.000000000 +0200
-@@ -171,6 +171,7 @@ void intelfb_create_i2c_busses(struct in
- /* has some LVDS + tv-out */
- case INTEL_945G:
- case INTEL_945GM:
-+ case INTEL_945GME:
- case INTEL_965G:
- case INTEL_965GM:
- /* SDVO ports have a single control bus - 2 devices */
-Index: linux-2.6.27/drivers/video/intelfb/intelfbdrv.c
-===================================================================
---- linux-2.6.27.orig/drivers/video/intelfb/intelfbdrv.c 2008-10-14 16:55:37.000000000 +0200
-+++ linux-2.6.27/drivers/video/intelfb/intelfbdrv.c 2008-10-14 17:05:36.000000000 +0200
-@@ -2,7 +2,7 @@
- * intelfb
- *
- * Linux framebuffer driver for Intel(R) 830M/845G/852GM/855GM/865G/915G/915GM/
-- * 945G/945GM/965G/965GM integrated graphics chips.
-+ * 945G/945GM/945GME/965G/965GM integrated graphics chips.
- *
- * Copyright © 2002, 2003 David Dawes <dawes@xfree86.org>
- * 2004 Sylvain Meyer
-@@ -102,6 +102,9 @@
- *
- * 04/2008 - Version 0.9.5
- * Add support for 965G/965GM. (Maik Broemme <mbroemme@plusserver.de>)
-+ *
-+ * 08/2008 - Version 0.9.6
-+ * Add support for 945GME. (Phil Endecott <spam_from_intelfb@chezphil.org>)
- */
-
- #include <linux/module.h>
-@@ -183,6 +186,7 @@ static struct pci_device_id intelfb_pci_
- { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_915GM, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_915GM },
- { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_945G, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_945G },
- { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_945GM, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_945GM },
-+ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_945GME, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_945GME },
- { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_965G, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_965G },
- { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_965GM, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_965GM },
- { 0, }
-@@ -555,6 +559,7 @@ static int __devinit intelfb_pci_registe
- (ent->device == PCI_DEVICE_ID_INTEL_915GM) ||
- (ent->device == PCI_DEVICE_ID_INTEL_945G) ||
- (ent->device == PCI_DEVICE_ID_INTEL_945GM) ||
-+ (ent->device == PCI_DEVICE_ID_INTEL_945GME) ||
- (ent->device == PCI_DEVICE_ID_INTEL_965G) ||
- (ent->device == PCI_DEVICE_ID_INTEL_965GM)) {
-
-Index: linux-2.6.27/drivers/video/intelfb/intelfbhw.c
-===================================================================
---- linux-2.6.27.orig/drivers/video/intelfb/intelfbhw.c 2008-10-14 16:55:37.000000000 +0200
-+++ linux-2.6.27/drivers/video/intelfb/intelfbhw.c 2008-10-14 17:05:36.000000000 +0200
-@@ -143,6 +143,12 @@ int intelfbhw_get_chipset(struct pci_dev
- dinfo->mobile = 1;
- dinfo->pll_index = PLLS_I9xx;
- return 0;
-+ case PCI_DEVICE_ID_INTEL_945GME:
-+ dinfo->name = "Intel(R) 945GME";
-+ dinfo->chipset = INTEL_945GME;
-+ dinfo->mobile = 1;
-+ dinfo->pll_index = PLLS_I9xx;
-+ return 0;
- case PCI_DEVICE_ID_INTEL_965G:
- dinfo->name = "Intel(R) 965G";
- dinfo->chipset = INTEL_965G;
-@@ -186,6 +192,7 @@ int intelfbhw_get_memory(struct pci_dev
- case PCI_DEVICE_ID_INTEL_915GM:
- case PCI_DEVICE_ID_INTEL_945G:
- case PCI_DEVICE_ID_INTEL_945GM:
-+ case PCI_DEVICE_ID_INTEL_945GME:
- case PCI_DEVICE_ID_INTEL_965G:
- case PCI_DEVICE_ID_INTEL_965GM:
- /* 915, 945 and 965 chipsets support a 256MB aperture.
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/0043-superreadahead-patch.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/0043-superreadahead-patch.patch
deleted file mode 100644
index 101c100dd..000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/0043-superreadahead-patch.patch
+++ /dev/null
@@ -1,65 +0,0 @@
-From: Arjan van de Ven <arjan@linux.intel.com>
-Date: Sun, 21 Sep 2008 11:58:27 -0700
-Subject: [PATCH] superreadahead patch
-
----
- fs/ext3/ioctl.c | 3 +++
- fs/ext3/super.c | 1 +
- include/linux/ext3_fs.h | 1 +
- include/linux/fs.h | 2 ++
- 4 files changed, 7 insertions(+), 0 deletions(-)
-
-diff --git a/fs/ext3/ioctl.c b/fs/ext3/ioctl.c
-index 0d0c701..7e62d7d 100644
---- a/fs/ext3/ioctl.c
-+++ b/fs/ext3/ioctl.c
-@@ -286,6 +286,9 @@ group_add_out:
- mnt_drop_write(filp->f_path.mnt);
- return err;
- }
-+ case EXT3_IOC_INODE_JIFFIES: {
-+ return inode->created_when;
-+ }
-
-
- default:
-diff --git a/fs/ext3/super.c b/fs/ext3/super.c
-index 2845425..6a896a4 100644
---- a/fs/ext3/super.c
-+++ b/fs/ext3/super.c
-@@ -456,6 +456,7 @@ static struct inode *ext3_alloc_inode(struct super_block *sb)
- #endif
- ei->i_block_alloc_info = NULL;
- ei->vfs_inode.i_version = 1;
-+ ei->vfs_inode.created_when = jiffies;
- return &ei->vfs_inode;
- }
-
-diff --git a/include/linux/ext3_fs.h b/include/linux/ext3_fs.h
-index 36c5403..b409fa7 100644
---- a/include/linux/ext3_fs.h
-+++ b/include/linux/ext3_fs.h
-@@ -225,6 +225,7 @@ struct ext3_new_group_data {
- #endif
- #define EXT3_IOC_GETRSVSZ _IOR('f', 5, long)
- #define EXT3_IOC_SETRSVSZ _IOW('f', 6, long)
-+#define EXT3_IOC_INODE_JIFFIES _IOR('f', 19, long)
-
- /*
- * ioctl commands in 32 bit emulation
-diff --git a/include/linux/fs.h b/include/linux/fs.h
-index c6455da..4ac846d 100644
---- a/include/linux/fs.h
-+++ b/include/linux/fs.h
-@@ -655,6 +655,8 @@ struct inode {
- void *i_security;
- #endif
- void *i_private; /* fs or device private pointer */
-+
-+ unsigned long created_when; /* jiffies of creation time */
- };
-
- /*
---
-1.5.5.1
-
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/defconfig-menlow b/meta-moblin/packages/linux/linux-moblin-2.6.27/defconfig-menlow
deleted file mode 100644
index 30c165622..000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/defconfig-menlow
+++ /dev/null
@@ -1,3137 +0,0 @@
-#
-# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.27
-# Wed Jan 14 11:45:36 2009
-#
-# CONFIG_64BIT is not set
-CONFIG_X86_32=y
-# CONFIG_X86_64 is not set
-CONFIG_X86=y
-CONFIG_ARCH_DEFCONFIG="arch/x86/configs/i386_defconfig"
-# CONFIG_GENERIC_LOCKBREAK is not set
-CONFIG_GENERIC_TIME=y
-CONFIG_GENERIC_CMOS_UPDATE=y
-CONFIG_CLOCKSOURCE_WATCHDOG=y
-CONFIG_GENERIC_CLOCKEVENTS=y
-CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y
-CONFIG_LOCKDEP_SUPPORT=y
-CONFIG_STACKTRACE_SUPPORT=y
-CONFIG_HAVE_LATENCYTOP_SUPPORT=y
-CONFIG_FAST_CMPXCHG_LOCAL=y
-CONFIG_MMU=y
-CONFIG_ZONE_DMA=y
-CONFIG_GENERIC_ISA_DMA=y
-CONFIG_GENERIC_IOMAP=y
-CONFIG_GENERIC_BUG=y
-CONFIG_GENERIC_HWEIGHT=y
-# CONFIG_GENERIC_GPIO is not set
-CONFIG_ARCH_MAY_HAVE_PC_FDC=y
-# CONFIG_RWSEM_GENERIC_SPINLOCK is not set
-CONFIG_RWSEM_XCHGADD_ALGORITHM=y
-# CONFIG_ARCH_HAS_ILOG2_U32 is not set
-# CONFIG_ARCH_HAS_ILOG2_U64 is not set
-CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
-CONFIG_GENERIC_CALIBRATE_DELAY=y
-# CONFIG_GENERIC_TIME_VSYSCALL is not set
-CONFIG_ARCH_HAS_CPU_RELAX=y
-CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y
-CONFIG_HAVE_SETUP_PER_CPU_AREA=y
-# CONFIG_HAVE_CPUMASK_OF_CPU_MAP is not set
-CONFIG_ARCH_HIBERNATION_POSSIBLE=y
-CONFIG_ARCH_SUSPEND_POSSIBLE=y
-# CONFIG_ZONE_DMA32 is not set
-CONFIG_ARCH_POPULATES_NODE_MAP=y
-# CONFIG_AUDIT_ARCH is not set
-CONFIG_ARCH_SUPPORTS_AOUT=y
-CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING=y
-CONFIG_GENERIC_HARDIRQS=y
-CONFIG_GENERIC_IRQ_PROBE=y
-CONFIG_GENERIC_PENDING_IRQ=y
-CONFIG_X86_SMP=y
-CONFIG_X86_32_SMP=y
-CONFIG_X86_HT=y
-CONFIG_X86_BIOS_REBOOT=y
-CONFIG_X86_TRAMPOLINE=y
-CONFIG_KTIME_SCALAR=y
-CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
-
-#
-# General setup
-#
-CONFIG_EXPERIMENTAL=y
-CONFIG_LOCK_KERNEL=y
-CONFIG_INIT_ENV_ARG_LIMIT=32
-CONFIG_LOCALVERSION="-default"
-# CONFIG_LOCALVERSION_AUTO is not set
-CONFIG_SWAP=y
-CONFIG_SYSVIPC=y
-CONFIG_SYSVIPC_SYSCTL=y
-CONFIG_POSIX_MQUEUE=y
-CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_BSD_PROCESS_ACCT_V3=y
-CONFIG_TASKSTATS=y
-CONFIG_TASK_DELAY_ACCT=y
-# CONFIG_TASK_XACCT is not set
-CONFIG_AUDIT=y
-CONFIG_AUDITSYSCALL=y
-CONFIG_AUDIT_TREE=y
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_LOG_BUF_SHIFT=15
-# CONFIG_CGROUPS is not set
-CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y
-# CONFIG_GROUP_SCHED is not set
-CONFIG_SYSFS_DEPRECATED=y
-CONFIG_SYSFS_DEPRECATED_V2=y
-CONFIG_RELAY=y
-CONFIG_NAMESPACES=y
-# CONFIG_UTS_NS is not set
-# CONFIG_IPC_NS is not set
-# CONFIG_USER_NS is not set
-# CONFIG_PID_NS is not set
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE=""
-CONFIG_CC_OPTIMIZE_FOR_SIZE=y
-# CONFIG_FASTBOOT is not set
-CONFIG_SYSCTL=y
-# CONFIG_EMBEDDED is not set
-CONFIG_UID16=y
-CONFIG_SYSCTL_SYSCALL=y
-CONFIG_KALLSYMS=y
-CONFIG_KALLSYMS_ALL=y
-# CONFIG_KALLSYMS_EXTRA_PASS is not set
-CONFIG_HOTPLUG=y
-CONFIG_PRINTK=y
-CONFIG_BUG=y
-CONFIG_ELF_CORE=y
-CONFIG_PCSPKR_PLATFORM=y
-CONFIG_COMPAT_BRK=y
-CONFIG_BASE_FULL=y
-CONFIG_FUTEX=y
-CONFIG_ANON_INODES=y
-CONFIG_EPOLL=y
-CONFIG_SIGNALFD=y
-CONFIG_TIMERFD=y
-CONFIG_EVENTFD=y
-CONFIG_SHMEM=y
-CONFIG_VM_EVENT_COUNTERS=y
-CONFIG_SLAB=y
-# CONFIG_SLUB is not set
-# CONFIG_SLOB is not set
-CONFIG_PROFILING=y
-# CONFIG_MARKERS is not set
-# CONFIG_OPROFILE is not set
-CONFIG_HAVE_OPROFILE=y
-# CONFIG_KPROBES is not set
-CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y
-CONFIG_HAVE_IOREMAP_PROT=y
-CONFIG_HAVE_KPROBES=y
-CONFIG_HAVE_KRETPROBES=y
-# CONFIG_HAVE_ARCH_TRACEHOOK is not set
-# CONFIG_HAVE_DMA_ATTRS is not set
-CONFIG_USE_GENERIC_SMP_HELPERS=y
-# CONFIG_HAVE_CLK is not set
-CONFIG_PROC_PAGE_MONITOR=y
-CONFIG_HAVE_GENERIC_DMA_COHERENT=y
-CONFIG_SLABINFO=y
-CONFIG_RT_MUTEXES=y
-# CONFIG_TINY_SHMEM is not set
-CONFIG_BASE_SMALL=0
-CONFIG_MODULES=y
-# CONFIG_MODULE_FORCE_LOAD is not set
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_FORCE_UNLOAD=y
-CONFIG_MODVERSIONS=y
-CONFIG_MODULE_SRCVERSION_ALL=y
-CONFIG_KMOD=y
-CONFIG_STOP_MACHINE=y
-CONFIG_BLOCK=y
-CONFIG_LBD=y
-CONFIG_BLK_DEV_IO_TRACE=y
-CONFIG_LSF=y
-# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_BLK_DEV_INTEGRITY is not set
-
-#
-# IO Schedulers
-#
-CONFIG_IOSCHED_NOOP=y
-CONFIG_IOSCHED_AS=y
-CONFIG_IOSCHED_DEADLINE=y
-CONFIG_IOSCHED_CFQ=y
-# CONFIG_DEFAULT_AS is not set
-# CONFIG_DEFAULT_DEADLINE is not set
-CONFIG_DEFAULT_CFQ=y
-# CONFIG_DEFAULT_NOOP is not set
-CONFIG_DEFAULT_IOSCHED="cfq"
-CONFIG_CLASSIC_RCU=y
-
-#
-# Processor type and features
-#
-CONFIG_TICK_ONESHOT=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
-CONFIG_SMP=y
-CONFIG_X86_FIND_SMP_CONFIG=y
-CONFIG_X86_MPPARSE=y
-# CONFIG_X86_PC is not set
-# CONFIG_X86_ELAN is not set
-# CONFIG_X86_VOYAGER is not set
-CONFIG_X86_GENERICARCH=y
-# CONFIG_X86_NUMAQ is not set
-# CONFIG_X86_SUMMIT is not set
-# CONFIG_X86_ES7000 is not set
-# CONFIG_X86_BIGSMP is not set
-# CONFIG_X86_VSMP is not set
-# CONFIG_X86_RDC321X is not set
-CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
-# CONFIG_PARAVIRT_GUEST is not set
-# CONFIG_MEMTEST is not set
-CONFIG_X86_CYCLONE_TIMER=y
-# CONFIG_M386 is not set
-# CONFIG_M486 is not set
-CONFIG_M586=y
-# CONFIG_M586TSC is not set
-# CONFIG_M586MMX is not set
-# CONFIG_M686 is not set
-# CONFIG_MPENTIUMII is not set
-# CONFIG_MPENTIUMIII is not set
-# CONFIG_MPENTIUMM is not set
-# CONFIG_MPENTIUM4 is not set
-# CONFIG_MK6 is not set
-# CONFIG_MK7 is not set
-# CONFIG_MK8 is not set
-# CONFIG_MCRUSOE is not set
-# CONFIG_MEFFICEON is not set
-# CONFIG_MWINCHIPC6 is not set
-# CONFIG_MWINCHIP2 is not set
-# CONFIG_MWINCHIP3D is not set
-# CONFIG_MGEODEGX1 is not set
-# CONFIG_MGEODE_LX is not set
-# CONFIG_MCYRIXIII is not set
-# CONFIG_MVIAC3_2 is not set
-# CONFIG_MVIAC7 is not set
-# CONFIG_MPSC is not set
-# CONFIG_MCORE2 is not set
-# CONFIG_GENERIC_CPU is not set
-CONFIG_X86_GENERIC=y
-CONFIG_X86_CPU=y
-CONFIG_X86_CMPXCHG=y
-CONFIG_X86_L1_CACHE_SHIFT=7
-CONFIG_X86_XADD=y
-CONFIG_X86_PPRO_FENCE=y
-CONFIG_X86_F00F_BUG=y
-CONFIG_X86_WP_WORKS_OK=y
-CONFIG_X86_INVLPG=y
-CONFIG_X86_BSWAP=y
-CONFIG_X86_POPAD_OK=y
-CONFIG_X86_ALIGNMENT_16=y
-CONFIG_X86_INTEL_USERCOPY=y
-CONFIG_X86_MINIMUM_CPU_FAMILY=4
-CONFIG_HPET_TIMER=y
-CONFIG_DMI=y
-# CONFIG_IOMMU_HELPER is not set
-CONFIG_NR_CPUS=8
-# CONFIG_SCHED_SMT is not set
-CONFIG_SCHED_MC=y
-# CONFIG_PREEMPT_NONE is not set
-CONFIG_PREEMPT_VOLUNTARY=y
-# CONFIG_PREEMPT is not set
-CONFIG_X86_LOCAL_APIC=y
-CONFIG_X86_IO_APIC=y
-CONFIG_X86_MCE=y
-CONFIG_X86_MCE_NONFATAL=y
-# CONFIG_X86_MCE_P4THERMAL is not set
-CONFIG_VM86=y
-# CONFIG_TOSHIBA is not set
-# CONFIG_I8K is not set
-CONFIG_X86_REBOOTFIXUPS=y
-CONFIG_MICROCODE=m
-CONFIG_MICROCODE_OLD_INTERFACE=y
-CONFIG_X86_MSR=m
-CONFIG_X86_CPUID=m
-# CONFIG_NOHIGHMEM is not set
-CONFIG_HIGHMEM4G=y
-# CONFIG_HIGHMEM64G is not set
-CONFIG_PAGE_OFFSET=0xC0000000
-CONFIG_HIGHMEM=y
-CONFIG_SELECT_MEMORY_MODEL=y
-CONFIG_FLATMEM_MANUAL=y
-# CONFIG_DISCONTIGMEM_MANUAL is not set
-# CONFIG_SPARSEMEM_MANUAL is not set
-CONFIG_FLATMEM=y
-CONFIG_FLAT_NODE_MEM_MAP=y
-# CONFIG_SPARSEMEM_STATIC is not set
-# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
-CONFIG_PAGEFLAGS_EXTENDED=y
-CONFIG_SPLIT_PTLOCK_CPUS=4
-# CONFIG_RESOURCES_64BIT is not set
-CONFIG_ZONE_DMA_FLAG=1
-CONFIG_BOUNCE=y
-CONFIG_VIRT_TO_BUS=y
-CONFIG_HIGHPTE=y
-# CONFIG_MATH_EMULATION is not set
-CONFIG_MTRR=y
-# CONFIG_MTRR_SANITIZER is not set
-# CONFIG_X86_PAT is not set
-CONFIG_EFI=y
-# CONFIG_IRQBALANCE is not set
-CONFIG_SECCOMP=y
-# CONFIG_HZ_100 is not set
-CONFIG_HZ_250=y
-# CONFIG_HZ_300 is not set
-# CONFIG_HZ_1000 is not set
-CONFIG_HZ=250
-CONFIG_SCHED_HRTICK=y
-CONFIG_KEXEC=y
-# CONFIG_CRASH_DUMP is not set
-# CONFIG_KEXEC_JUMP is not set
-CONFIG_PHYSICAL_START=0x100000
-# CONFIG_RELOCATABLE is not set
-CONFIG_PHYSICAL_ALIGN=0x100000
-CONFIG_HOTPLUG_CPU=y
-CONFIG_COMPAT_VDSO=y
-CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
-
-#
-# Power management options
-#
-CONFIG_PM=y
-# CONFIG_PM_DEBUG is not set
-CONFIG_PM_SLEEP_SMP=y
-CONFIG_PM_SLEEP=y
-CONFIG_SUSPEND=y
-CONFIG_SUSPEND_FREEZER=y
-CONFIG_HIBERNATION=y
-CONFIG_PM_STD_PARTITION=""
-CONFIG_ACPI=y
-CONFIG_ACPI_SLEEP=y
-CONFIG_ACPI_PROCFS=y
-CONFIG_ACPI_PROCFS_POWER=y
-CONFIG_ACPI_SYSFS_POWER=y
-CONFIG_ACPI_PROC_EVENT=y
-CONFIG_ACPI_AC=y
-CONFIG_ACPI_BATTERY=y
-CONFIG_ACPI_BUTTON=y
-CONFIG_ACPI_VIDEO=y
-CONFIG_ACPI_FAN=y
-CONFIG_ACPI_DOCK=y
-# CONFIG_ACPI_BAY is not set
-CONFIG_ACPI_PROCESSOR=y
-CONFIG_ACPI_HOTPLUG_CPU=y
-CONFIG_ACPI_THERMAL=y
-# CONFIG_ACPI_WMI is not set
-# CONFIG_ACPI_ASUS is not set
-# CONFIG_ACPI_TOSHIBA is not set
-CONFIG_ACPI_CUSTOM_DSDT_FILE=""
-# CONFIG_ACPI_CUSTOM_DSDT is not set
-CONFIG_ACPI_BLACKLIST_YEAR=2001
-# CONFIG_ACPI_DEBUG is not set
-CONFIG_ACPI_EC=y
-# CONFIG_ACPI_PCI_SLOT is not set
-CONFIG_ACPI_POWER=y
-CONFIG_ACPI_SYSTEM=y
-CONFIG_X86_PM_TIMER=y
-CONFIG_ACPI_CONTAINER=y
-CONFIG_ACPI_SBS=y
-CONFIG_X86_APM_BOOT=y
-CONFIG_APM=y
-# CONFIG_APM_IGNORE_USER_SUSPEND is not set
-CONFIG_APM_DO_ENABLE=y
-# CONFIG_APM_CPU_IDLE is not set
-CONFIG_APM_DISPLAY_BLANK=y
-CONFIG_APM_ALLOW_INTS=y
-# CONFIG_APM_REAL_MODE_POWER_OFF is not set
-
-#
-# CPU Frequency scaling
-#
-CONFIG_CPU_FREQ=y
-CONFIG_CPU_FREQ_TABLE=y
-# CONFIG_CPU_FREQ_DEBUG is not set
-CONFIG_CPU_FREQ_STAT=m
-CONFIG_CPU_FREQ_STAT_DETAILS=y
-# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set
-# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set
-CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y
-# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set
-# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set
-CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
-CONFIG_CPU_FREQ_GOV_POWERSAVE=m
-CONFIG_CPU_FREQ_GOV_USERSPACE=y
-CONFIG_CPU_FREQ_GOV_ONDEMAND=m
-CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m
-
-#
-# CPUFreq processor drivers
-#
-CONFIG_X86_ACPI_CPUFREQ=y
-# CONFIG_X86_POWERNOW_K6 is not set
-# CONFIG_X86_POWERNOW_K7 is not set
-# CONFIG_X86_POWERNOW_K8 is not set
-# CONFIG_X86_GX_SUSPMOD is not set
-CONFIG_X86_SPEEDSTEP_CENTRINO=m
-CONFIG_X86_SPEEDSTEP_CENTRINO_TABLE=y
-CONFIG_X86_SPEEDSTEP_ICH=m
-CONFIG_X86_SPEEDSTEP_SMI=m
-CONFIG_X86_P4_CLOCKMOD=m
-# CONFIG_X86_CPUFREQ_NFORCE2 is not set
-# CONFIG_X86_LONGRUN is not set
-# CONFIG_X86_LONGHAUL is not set
-# CONFIG_X86_E_POWERSAVER is not set
-
-#
-# shared options
-#
-# CONFIG_X86_ACPI_CPUFREQ_PROC_INTF is not set
-CONFIG_X86_SPEEDSTEP_LIB=m
-CONFIG_X86_SPEEDSTEP_RELAXED_CAP_CHECK=y
-CONFIG_CPU_IDLE=y
-CONFIG_CPU_IDLE_GOV_LADDER=y
-CONFIG_CPU_IDLE_GOV_MENU=y
-
-#
-# Bus options (PCI etc.)
-#
-CONFIG_PCI=y
-# CONFIG_PCI_GOBIOS is not set
-# CONFIG_PCI_GOMMCONFIG is not set
-# CONFIG_PCI_GODIRECT is not set
-# CONFIG_PCI_GOOLPC is not set
-CONFIG_PCI_GOANY=y
-CONFIG_PCI_BIOS=y
-CONFIG_PCI_DIRECT=y
-CONFIG_PCI_MMCONFIG=y
-CONFIG_PCI_DOMAINS=y
-CONFIG_PCIEPORTBUS=y
-CONFIG_HOTPLUG_PCI_PCIE=m
-CONFIG_PCIEAER=y
-# CONFIG_PCIEASPM is not set
-CONFIG_ARCH_SUPPORTS_MSI=y
-CONFIG_PCI_MSI=y
-CONFIG_PCI_LEGACY=y
-# CONFIG_PCI_DEBUG is not set
-CONFIG_HT_IRQ=y
-CONFIG_ISA_DMA_API=y
-CONFIG_ISA=y
-# CONFIG_EISA is not set
-# CONFIG_MCA is not set
-# CONFIG_SCx200 is not set
-# CONFIG_OLPC is not set
-# CONFIG_PCCARD is not set
-CONFIG_HOTPLUG_PCI=m
-CONFIG_HOTPLUG_PCI_FAKE=m
-# CONFIG_HOTPLUG_PCI_COMPAQ is not set
-# CONFIG_HOTPLUG_PCI_IBM is not set
-CONFIG_HOTPLUG_PCI_ACPI=m
-CONFIG_HOTPLUG_PCI_ACPI_IBM=m
-CONFIG_HOTPLUG_PCI_CPCI=y
-CONFIG_HOTPLUG_PCI_CPCI_ZT5550=m
-CONFIG_HOTPLUG_PCI_CPCI_GENERIC=m
-CONFIG_HOTPLUG_PCI_SHPC=m
-
-#
-# Executable file formats / Emulations
-#
-CONFIG_BINFMT_ELF=y
-CONFIG_BINFMT_AOUT=m
-CONFIG_BINFMT_MISC=m
-CONFIG_NET=y
-
-#
-# Networking options
-#
-CONFIG_PACKET=m
-CONFIG_PACKET_MMAP=y
-CONFIG_UNIX=y
-CONFIG_XFRM=y
-CONFIG_XFRM_USER=m
-# CONFIG_XFRM_SUB_POLICY is not set
-# CONFIG_XFRM_MIGRATE is not set
-# CONFIG_XFRM_STATISTICS is not set
-CONFIG_XFRM_IPCOMP=m
-CONFIG_NET_KEY=m
-# CONFIG_NET_KEY_MIGRATE is not set
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-CONFIG_IP_ADVANCED_ROUTER=y
-CONFIG_ASK_IP_FIB_HASH=y
-# CONFIG_IP_FIB_TRIE is not set
-CONFIG_IP_FIB_HASH=y
-CONFIG_IP_MULTIPLE_TABLES=y
-CONFIG_IP_ROUTE_MULTIPATH=y
-CONFIG_IP_ROUTE_VERBOSE=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-CONFIG_IP_PNP_BOOTP=y
-CONFIG_IP_PNP_RARP=y
-CONFIG_NET_IPIP=m
-CONFIG_NET_IPGRE=m
-CONFIG_NET_IPGRE_BROADCAST=y
-CONFIG_IP_MROUTE=y
-CONFIG_IP_PIMSM_V1=y
-CONFIG_IP_PIMSM_V2=y
-# CONFIG_ARPD is not set
-CONFIG_SYN_COOKIES=y
-CONFIG_INET_AH=m
-CONFIG_INET_ESP=m
-CONFIG_INET_IPCOMP=m
-CONFIG_INET_XFRM_TUNNEL=m
-CONFIG_INET_TUNNEL=m
-CONFIG_INET_XFRM_MODE_TRANSPORT=m
-CONFIG_INET_XFRM_MODE_TUNNEL=m
-CONFIG_INET_XFRM_MODE_BEET=y
-# CONFIG_INET_LRO is not set
-CONFIG_INET_DIAG=m
-CONFIG_INET_TCP_DIAG=m
-CONFIG_TCP_CONG_ADVANCED=y
-CONFIG_TCP_CONG_BIC=m
-CONFIG_TCP_CONG_CUBIC=m
-CONFIG_TCP_CONG_WESTWOOD=m
-CONFIG_TCP_CONG_HTCP=m
-CONFIG_TCP_CONG_HSTCP=m
-CONFIG_TCP_CONG_HYBLA=m
-CONFIG_TCP_CONG_VEGAS=m
-CONFIG_TCP_CONG_SCALABLE=m
-CONFIG_TCP_CONG_LP=m
-CONFIG_TCP_CONG_VENO=m
-# CONFIG_TCP_CONG_YEAH is not set
-# CONFIG_TCP_CONG_ILLINOIS is not set
-# CONFIG_DEFAULT_BIC is not set
-# CONFIG_DEFAULT_CUBIC is not set
-# CONFIG_DEFAULT_HTCP is not set
-# CONFIG_DEFAULT_VEGAS is not set
-# CONFIG_DEFAULT_WESTWOOD is not set
-CONFIG_DEFAULT_RENO=y
-CONFIG_DEFAULT_TCP_CONG="reno"
-# CONFIG_TCP_MD5SIG is not set
-CONFIG_IP_VS=m
-# CONFIG_IP_VS_DEBUG is not set
-CONFIG_IP_VS_TAB_BITS=12
-
-#
-# IPVS transport protocol load balancing support
-#
-CONFIG_IP_VS_PROTO_TCP=y
-CONFIG_IP_VS_PROTO_UDP=y
-CONFIG_IP_VS_PROTO_ESP=y
-CONFIG_IP_VS_PROTO_AH=y
-
-#
-# IPVS scheduler
-#
-CONFIG_IP_VS_RR=m
-CONFIG_IP_VS_WRR=m
-CONFIG_IP_VS_LC=m
-CONFIG_IP_VS_WLC=m
-CONFIG_IP_VS_LBLC=m
-CONFIG_IP_VS_LBLCR=m
-CONFIG_IP_VS_DH=m
-CONFIG_IP_VS_SH=m
-CONFIG_IP_VS_SED=m
-CONFIG_IP_VS_NQ=m
-
-#
-# IPVS application helper
-#
-CONFIG_IP_VS_FTP=m
-CONFIG_IPV6=m
-CONFIG_IPV6_PRIVACY=y
-CONFIG_IPV6_ROUTER_PREF=y
-CONFIG_IPV6_ROUTE_INFO=y
-# CONFIG_IPV6_OPTIMISTIC_DAD is not set
-CONFIG_INET6_AH=m
-CONFIG_INET6_ESP=m
-CONFIG_INET6_IPCOMP=m
-# CONFIG_IPV6_MIP6 is not set
-CONFIG_INET6_XFRM_TUNNEL=m
-CONFIG_INET6_TUNNEL=m
-CONFIG_INET6_XFRM_MODE_TRANSPORT=m
-CONFIG_INET6_XFRM_MODE_TUNNEL=m
-CONFIG_INET6_XFRM_MODE_BEET=m
-# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set
-CONFIG_IPV6_SIT=m
-CONFIG_IPV6_NDISC_NODETYPE=y
-CONFIG_IPV6_TUNNEL=m
-# CONFIG_IPV6_MULTIPLE_TABLES is not set
-# CONFIG_IPV6_MROUTE is not set
-# CONFIG_NETLABEL is not set
-CONFIG_NETWORK_SECMARK=y
-CONFIG_NETFILTER=y
-# CONFIG_NETFILTER_DEBUG is not set
-CONFIG_NETFILTER_ADVANCED=y
-CONFIG_BRIDGE_NETFILTER=y
-
-#
-# Core Netfilter Configuration
-#
-CONFIG_NETFILTER_NETLINK=m
-CONFIG_NETFILTER_NETLINK_QUEUE=m
-CONFIG_NETFILTER_NETLINK_LOG=m
-# CONFIG_NF_CONNTRACK is not set
-CONFIG_NETFILTER_XTABLES=m
-CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
-# CONFIG_NETFILTER_XT_TARGET_DSCP is not set
-CONFIG_NETFILTER_XT_TARGET_MARK=m
-CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
-# CONFIG_NETFILTER_XT_TARGET_NFLOG is not set
-# CONFIG_NETFILTER_XT_TARGET_RATEEST is not set
-# CONFIG_NETFILTER_XT_TARGET_TRACE is not set
-CONFIG_NETFILTER_XT_TARGET_SECMARK=m
-# CONFIG_NETFILTER_XT_TARGET_TCPMSS is not set
-# CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP is not set
-CONFIG_NETFILTER_XT_MATCH_COMMENT=m
-CONFIG_NETFILTER_XT_MATCH_DCCP=m
-# CONFIG_NETFILTER_XT_MATCH_DSCP is not set
-CONFIG_NETFILTER_XT_MATCH_ESP=m
-# CONFIG_NETFILTER_XT_MATCH_IPRANGE is not set
-CONFIG_NETFILTER_XT_MATCH_LENGTH=m
-CONFIG_NETFILTER_XT_MATCH_LIMIT=m
-CONFIG_NETFILTER_XT_MATCH_MAC=m
-CONFIG_NETFILTER_XT_MATCH_MARK=m
-# CONFIG_NETFILTER_XT_MATCH_OWNER is not set
-CONFIG_NETFILTER_XT_MATCH_POLICY=m
-CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
-CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
-CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
-CONFIG_NETFILTER_XT_MATCH_QUOTA=m
-# CONFIG_NETFILTER_XT_MATCH_RATEEST is not set
-CONFIG_NETFILTER_XT_MATCH_REALM=m
-CONFIG_NETFILTER_XT_MATCH_SCTP=m
-CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
-CONFIG_NETFILTER_XT_MATCH_STRING=m
-CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
-# CONFIG_NETFILTER_XT_MATCH_TIME is not set
-# CONFIG_NETFILTER_XT_MATCH_U32 is not set
-# CONFIG_NETFILTER_XT_MATCH_HASHLIMIT is not set
-
-#
-# IP: Netfilter Configuration
-#
-CONFIG_IP_NF_QUEUE=m
-CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_MATCH_RECENT=m
-CONFIG_IP_NF_MATCH_ECN=m
-CONFIG_IP_NF_MATCH_AH=m
-CONFIG_IP_NF_MATCH_TTL=m
-CONFIG_IP_NF_MATCH_ADDRTYPE=m
-CONFIG_IP_NF_FILTER=m
-CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_LOG=m
-CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_IP_NF_MANGLE=m
-CONFIG_IP_NF_TARGET_ECN=m
-CONFIG_IP_NF_TARGET_TTL=m
-CONFIG_IP_NF_RAW=m
-# CONFIG_IP_NF_SECURITY is not set
-CONFIG_IP_NF_ARPTABLES=m
-CONFIG_IP_NF_ARPFILTER=m
-CONFIG_IP_NF_ARP_MANGLE=m
-
-#
-# IPv6: Netfilter Configuration
-#
-CONFIG_IP6_NF_QUEUE=m
-CONFIG_IP6_NF_IPTABLES=m
-CONFIG_IP6_NF_MATCH_RT=m
-CONFIG_IP6_NF_MATCH_OPTS=m
-CONFIG_IP6_NF_MATCH_FRAG=m
-CONFIG_IP6_NF_MATCH_HL=m
-CONFIG_IP6_NF_MATCH_IPV6HEADER=m
-CONFIG_IP6_NF_MATCH_AH=m
-# CONFIG_IP6_NF_MATCH_MH is not set
-CONFIG_IP6_NF_MATCH_EUI64=m
-CONFIG_IP6_NF_FILTER=m
-CONFIG_IP6_NF_TARGET_LOG=m
-CONFIG_IP6_NF_TARGET_REJECT=m
-CONFIG_IP6_NF_MANGLE=m
-CONFIG_IP6_NF_TARGET_HL=m
-CONFIG_IP6_NF_RAW=m
-# CONFIG_IP6_NF_SECURITY is not set
-
-#
-# DECnet: Netfilter Configuration
-#
-CONFIG_DECNET_NF_GRABULATOR=m
-
-#
-# Bridge: Netfilter Configuration
-#
-CONFIG_BRIDGE_NF_EBTABLES=m
-CONFIG_BRIDGE_EBT_BROUTE=m
-CONFIG_BRIDGE_EBT_T_FILTER=m
-CONFIG_BRIDGE_EBT_T_NAT=m
-CONFIG_BRIDGE_EBT_802_3=m
-CONFIG_BRIDGE_EBT_AMONG=m
-CONFIG_BRIDGE_EBT_ARP=m
-CONFIG_BRIDGE_EBT_IP=m
-# CONFIG_BRIDGE_EBT_IP6 is not set
-CONFIG_BRIDGE_EBT_LIMIT=m
-CONFIG_BRIDGE_EBT_MARK=m
-CONFIG_BRIDGE_EBT_PKTTYPE=m
-CONFIG_BRIDGE_EBT_STP=m
-CONFIG_BRIDGE_EBT_VLAN=m
-CONFIG_BRIDGE_EBT_ARPREPLY=m
-CONFIG_BRIDGE_EBT_DNAT=m
-CONFIG_BRIDGE_EBT_MARK_T=m
-CONFIG_BRIDGE_EBT_REDIRECT=m
-CONFIG_BRIDGE_EBT_SNAT=m
-CONFIG_BRIDGE_EBT_LOG=m
-CONFIG_BRIDGE_EBT_ULOG=m
-# CONFIG_BRIDGE_EBT_NFLOG is not set
-CONFIG_IP_DCCP=m
-CONFIG_INET_DCCP_DIAG=m
-CONFIG_IP_DCCP_ACKVEC=y
-
-#
-# DCCP CCIDs Configuration (EXPERIMENTAL)
-#
-CONFIG_IP_DCCP_CCID2=m
-# CONFIG_IP_DCCP_CCID2_DEBUG is not set
-CONFIG_IP_DCCP_CCID3=m
-# CONFIG_IP_DCCP_CCID3_DEBUG is not set
-CONFIG_IP_DCCP_CCID3_RTO=100
-CONFIG_IP_DCCP_TFRC_LIB=m
-
-#
-# DCCP Kernel Hacking
-#
-# CONFIG_IP_DCCP_DEBUG is not set
-CONFIG_IP_SCTP=m
-# CONFIG_SCTP_DBG_MSG is not set
-# CONFIG_SCTP_DBG_OBJCNT is not set
-# CONFIG_SCTP_HMAC_NONE is not set
-# CONFIG_SCTP_HMAC_SHA1 is not set
-CONFIG_SCTP_HMAC_MD5=y
-# CONFIG_TIPC is not set
-CONFIG_ATM=m
-CONFIG_ATM_CLIP=m
-CONFIG_ATM_CLIP_NO_ICMP=y
-CONFIG_ATM_LANE=m
-CONFIG_ATM_MPOA=m
-CONFIG_ATM_BR2684=m
-# CONFIG_ATM_BR2684_IPFILTER is not set
-CONFIG_STP=m
-CONFIG_BRIDGE=m
-CONFIG_VLAN_8021Q=m
-# CONFIG_VLAN_8021Q_GVRP is not set
-CONFIG_DECNET=m
-CONFIG_DECNET_ROUTER=y
-CONFIG_LLC=m
-CONFIG_LLC2=m
-CONFIG_IPX=m
-# CONFIG_IPX_INTERN is not set
-CONFIG_ATALK=m
-CONFIG_DEV_APPLETALK=m
-CONFIG_LTPC=m
-CONFIG_COPS=m
-CONFIG_COPS_DAYNA=y
-CONFIG_COPS_TANGENT=y
-CONFIG_IPDDP=m
-CONFIG_IPDDP_ENCAP=y
-CONFIG_IPDDP_DECAP=y
-CONFIG_X25=m
-CONFIG_LAPB=m
-CONFIG_ECONET=m
-# CONFIG_ECONET_AUNUDP is not set
-# CONFIG_ECONET_NATIVE is not set
-CONFIG_WAN_ROUTER=m
-CONFIG_NET_SCHED=y
-
-#
-# Queueing/Scheduling
-#
-CONFIG_NET_SCH_CBQ=m
-CONFIG_NET_SCH_HTB=m
-CONFIG_NET_SCH_HFSC=m
-CONFIG_NET_SCH_ATM=m
-CONFIG_NET_SCH_PRIO=m
-CONFIG_NET_SCH_RED=m
-CONFIG_NET_SCH_SFQ=m
-CONFIG_NET_SCH_TEQL=m
-CONFIG_NET_SCH_TBF=m
-CONFIG_NET_SCH_GRED=m
-CONFIG_NET_SCH_DSMARK=m
-CONFIG_NET_SCH_NETEM=m
-CONFIG_NET_SCH_INGRESS=m
-
-#
-# Classification
-#
-CONFIG_NET_CLS=y
-CONFIG_NET_CLS_BASIC=m
-CONFIG_NET_CLS_TCINDEX=m
-CONFIG_NET_CLS_ROUTE4=m
-CONFIG_NET_CLS_ROUTE=y
-CONFIG_NET_CLS_FW=m
-CONFIG_NET_CLS_U32=m
-CONFIG_CLS_U32_PERF=y
-CONFIG_CLS_U32_MARK=y
-CONFIG_NET_CLS_RSVP=m
-CONFIG_NET_CLS_RSVP6=m
-# CONFIG_NET_CLS_FLOW is not set
-# CONFIG_NET_EMATCH is not set
-CONFIG_NET_CLS_ACT=y
-CONFIG_NET_ACT_POLICE=m
-CONFIG_NET_ACT_GACT=m
-CONFIG_GACT_PROB=y
-CONFIG_NET_ACT_MIRRED=m
-CONFIG_NET_ACT_IPT=m
-# CONFIG_NET_ACT_NAT is not set
-CONFIG_NET_ACT_PEDIT=m
-CONFIG_NET_ACT_SIMP=m
-# CONFIG_NET_CLS_IND is not set
-CONFIG_NET_SCH_FIFO=y
-
-#
-# Network testing
-#
-CONFIG_NET_PKTGEN=m
-# CONFIG_HAMRADIO is not set
-# CONFIG_CAN is not set
-# CONFIG_IRDA is not set
-CONFIG_BT=m
-CONFIG_BT_L2CAP=m
-CONFIG_BT_SCO=m
-CONFIG_BT_RFCOMM=m
-CONFIG_BT_RFCOMM_TTY=y
-CONFIG_BT_BNEP=m
-CONFIG_BT_BNEP_MC_FILTER=y
-CONFIG_BT_BNEP_PROTO_FILTER=y
-CONFIG_BT_HIDP=m
-
-#
-# Bluetooth device drivers
-#
-CONFIG_BT_HCIUSB=m
-CONFIG_BT_HCIUSB_SCO=y
-# CONFIG_BT_HCIBTUSB is not set
-# CONFIG_BT_HCIBTSDIO is not set
-CONFIG_BT_HCIUART=m
-CONFIG_BT_HCIUART_H4=y
-CONFIG_BT_HCIUART_BCSP=y
-# CONFIG_BT_HCIUART_LL is not set
-CONFIG_BT_HCIBCM203X=m
-CONFIG_BT_HCIBPA10X=m
-CONFIG_BT_HCIBFUSB=m
-CONFIG_BT_HCIVHCI=m
-# CONFIG_AF_RXRPC is not set
-CONFIG_FIB_RULES=y
-
-#
-# Wireless
-#
-# CONFIG_CFG80211 is not set
-CONFIG_WIRELESS_EXT=y
-CONFIG_WIRELESS_EXT_SYSFS=y
-# CONFIG_MAC80211 is not set
-CONFIG_IEEE80211=m
-# CONFIG_IEEE80211_DEBUG is not set
-CONFIG_IEEE80211_CRYPT_WEP=m
-CONFIG_IEEE80211_CRYPT_CCMP=m
-CONFIG_IEEE80211_CRYPT_TKIP=m
-# CONFIG_RFKILL is not set
-# CONFIG_NET_9P is not set
-
-#
-# Device Drivers
-#
-
-#
-# Generic Driver Options
-#
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-# CONFIG_STANDALONE is not set
-CONFIG_PREVENT_FIRMWARE_BUILD=y
-CONFIG_FW_LOADER=y
-CONFIG_FIRMWARE_IN_KERNEL=y
-CONFIG_EXTRA_FIRMWARE=""
-# CONFIG_DEBUG_DRIVER is not set
-# CONFIG_DEBUG_DEVRES is not set
-# CONFIG_SYS_HYPERVISOR is not set
-CONFIG_CONNECTOR=y
-CONFIG_PROC_EVENTS=y
-CONFIG_MTD=m
-# CONFIG_MTD_DEBUG is not set
-CONFIG_MTD_CONCAT=m
-CONFIG_MTD_PARTITIONS=y
-CONFIG_MTD_REDBOOT_PARTS=m
-CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-1
-# CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED is not set
-# CONFIG_MTD_REDBOOT_PARTS_READONLY is not set
-# CONFIG_MTD_AR7_PARTS is not set
-
-#
-# User Modules And Translation Layers
-#
-CONFIG_MTD_CHAR=m
-CONFIG_MTD_BLKDEVS=m
-CONFIG_MTD_BLOCK=m
-# CONFIG_MTD_BLOCK_RO is not set
-# CONFIG_FTL is not set
-# CONFIG_NFTL is not set
-# CONFIG_INFTL is not set
-CONFIG_RFD_FTL=m
-# CONFIG_SSFDC is not set
-# CONFIG_MTD_OOPS is not set
-
-#
-# RAM/ROM/Flash chip drivers
-#
-CONFIG_MTD_CFI=m
-CONFIG_MTD_JEDECPROBE=m
-CONFIG_MTD_GEN_PROBE=m
-CONFIG_MTD_CFI_ADV_OPTIONS=y
-CONFIG_MTD_CFI_NOSWAP=y
-# CONFIG_MTD_CFI_BE_BYTE_SWAP is not set
-# CONFIG_MTD_CFI_LE_BYTE_SWAP is not set
-# CONFIG_MTD_CFI_GEOMETRY is not set
-CONFIG_MTD_MAP_BANK_WIDTH_1=y
-CONFIG_MTD_MAP_BANK_WIDTH_2=y
-CONFIG_MTD_MAP_BANK_WIDTH_4=y
-# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
-# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
-# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
-CONFIG_MTD_CFI_I1=y
-CONFIG_MTD_CFI_I2=y
-# CONFIG_MTD_CFI_I4 is not set
-# CONFIG_MTD_CFI_I8 is not set
-# CONFIG_MTD_OTP is not set
-CONFIG_MTD_CFI_INTELEXT=m
-CONFIG_MTD_CFI_AMDSTD=m
-CONFIG_MTD_CFI_STAA=m
-CONFIG_MTD_CFI_UTIL=m
-# CONFIG_MTD_RAM is not set
-# CONFIG_MTD_ROM is not set
-CONFIG_MTD_ABSENT=m
-
-#
-# Mapping drivers for chip access
-#
-CONFIG_MTD_COMPLEX_MAPPINGS=y
-CONFIG_MTD_PHYSMAP=m
-CONFIG_MTD_PHYSMAP_START=0x8000000
-CONFIG_MTD_PHYSMAP_LEN=0x4000000
-CONFIG_MTD_PHYSMAP_BANKWIDTH=2
-CONFIG_MTD_SC520CDP=m
-CONFIG_MTD_NETSC520=m
-CONFIG_MTD_TS5500=m
-CONFIG_MTD_SBC_GXX=m
-CONFIG_MTD_AMD76XROM=m
-CONFIG_MTD_ICHXROM=m
-# CONFIG_MTD_ESB2ROM is not set
-# CONFIG_MTD_CK804XROM is not set
-CONFIG_MTD_SCB2_FLASH=m
-CONFIG_MTD_NETtel=m
-CONFIG_MTD_DILNETPC=m
-CONFIG_MTD_DILNETPC_BOOTSIZE=0x80000
-CONFIG_MTD_L440GX=m
-CONFIG_MTD_PCI=m
-# CONFIG_MTD_INTEL_VR_NOR is not set
-# CONFIG_MTD_PLATRAM is not set
-
-#
-# Self-contained MTD device drivers
-#
-CONFIG_MTD_PMC551=m
-CONFIG_MTD_PMC551_BUGFIX=y
-# CONFIG_MTD_PMC551_DEBUG is not set
-# CONFIG_MTD_DATAFLASH is not set
-# CONFIG_MTD_M25P80 is not set
-CONFIG_MTD_SLRAM=m
-CONFIG_MTD_PHRAM=m
-CONFIG_MTD_MTDRAM=m
-CONFIG_MTDRAM_TOTAL_SIZE=4096
-CONFIG_MTDRAM_ERASE_SIZE=128
-CONFIG_MTD_BLOCK2MTD=m
-
-#
-# Disk-On-Chip Device Drivers
-#
-CONFIG_MTD_DOC2000=m
-CONFIG_MTD_DOC2001=m
-CONFIG_MTD_DOC2001PLUS=m
-CONFIG_MTD_DOCPROBE=m
-CONFIG_MTD_DOCECC=m
-CONFIG_MTD_DOCPROBE_ADVANCED=y
-CONFIG_MTD_DOCPROBE_ADDRESS=0x0000
-CONFIG_MTD_DOCPROBE_HIGH=y
-CONFIG_MTD_DOCPROBE_55AA=y
-CONFIG_MTD_NAND=m
-# CONFIG_MTD_NAND_VERIFY_WRITE is not set
-CONFIG_MTD_NAND_ECC_SMC=y
-# CONFIG_MTD_NAND_MUSEUM_IDS is not set
-CONFIG_MTD_NAND_IDS=m
-CONFIG_MTD_NAND_DISKONCHIP=m
-# CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADVANCED is not set
-CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS=0
-CONFIG_MTD_NAND_DISKONCHIP_BBTWRITE=y
-# CONFIG_MTD_NAND_CAFE is not set
-CONFIG_MTD_NAND_CS553X=m
-CONFIG_MTD_NAND_NANDSIM=m
-# CONFIG_MTD_NAND_PLATFORM is not set
-# CONFIG_MTD_ALAUDA is not set
-CONFIG_MTD_ONENAND=m
-# CONFIG_MTD_ONENAND_VERIFY_WRITE is not set
-CONFIG_MTD_ONENAND_OTP=y
-# CONFIG_MTD_ONENAND_2X_PROGRAM is not set
-# CONFIG_MTD_ONENAND_SIM is not set
-
-#
-# UBI - Unsorted block images
-#
-# CONFIG_MTD_UBI is not set
-# CONFIG_PARPORT is not set
-CONFIG_PNP=y
-# CONFIG_PNP_DEBUG is not set
-
-#
-# Protocols
-#
-# CONFIG_ISAPNP is not set
-CONFIG_PNPBIOS=y
-CONFIG_PNPBIOS_PROC_FS=y
-CONFIG_PNPACPI=y
-CONFIG_BLK_DEV=y
-# CONFIG_BLK_DEV_FD is not set
-CONFIG_BLK_DEV_XD=m
-CONFIG_BLK_CPQ_DA=m
-CONFIG_BLK_CPQ_CISS_DA=m
-CONFIG_CISS_SCSI_TAPE=y
-CONFIG_BLK_DEV_DAC960=m
-CONFIG_BLK_DEV_UMEM=m
-# CONFIG_BLK_DEV_COW_COMMON is not set
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_CRYPTOLOOP=m
-CONFIG_BLK_DEV_NBD=m
-CONFIG_BLK_DEV_SX8=m
-# CONFIG_BLK_DEV_UB is not set
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_COUNT=16
-CONFIG_BLK_DEV_RAM_SIZE=64000
-# CONFIG_BLK_DEV_XIP is not set
-CONFIG_CDROM_PKTCDVD=m
-CONFIG_CDROM_PKTCDVD_BUFFERS=8
-CONFIG_CDROM_PKTCDVD_WCACHE=y
-CONFIG_ATA_OVER_ETH=m
-# CONFIG_BLK_DEV_HD is not set
-CONFIG_MISC_DEVICES=y
-# CONFIG_IBM_ASM is not set
-# CONFIG_PHANTOM is not set
-# CONFIG_EEPROM_93CX6 is not set
-# CONFIG_SGI_IOC4 is not set
-# CONFIG_TIFM_CORE is not set
-# CONFIG_ACER_WMI is not set
-# CONFIG_ASUS_LAPTOP is not set
-# CONFIG_FUJITSU_LAPTOP is not set
-# CONFIG_TC1100_WMI is not set
-# CONFIG_MSI_LAPTOP is not set
-# CONFIG_COMPAL_LAPTOP is not set
-# CONFIG_SONY_LAPTOP is not set
-# CONFIG_THINKPAD_ACPI is not set
-# CONFIG_INTEL_MENLOW is not set
-# CONFIG_EEEPC_LAPTOP is not set
-# CONFIG_ENCLOSURE_SERVICES is not set
-# CONFIG_HP_ILO is not set
-CONFIG_HAVE_IDE=y
-# CONFIG_IDE is not set
-
-#
-# SCSI device support
-#
-CONFIG_RAID_ATTRS=m
-CONFIG_SCSI=y
-CONFIG_SCSI_DMA=y
-# CONFIG_SCSI_TGT is not set
-CONFIG_SCSI_NETLINK=y
-CONFIG_SCSI_PROC_FS=y
-
-#
-# SCSI support type (disk, tape, CD-ROM)
-#
-CONFIG_BLK_DEV_SD=y
-CONFIG_CHR_DEV_ST=m
-CONFIG_CHR_DEV_OSST=m
-CONFIG_BLK_DEV_SR=y
-# CONFIG_BLK_DEV_SR_VENDOR is not set
-CONFIG_CHR_DEV_SG=y
-CONFIG_CHR_DEV_SCH=m
-
-#
-# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
-#
-CONFIG_SCSI_MULTI_LUN=y
-CONFIG_SCSI_CONSTANTS=y
-CONFIG_SCSI_LOGGING=y
-# CONFIG_SCSI_SCAN_ASYNC is not set
-CONFIG_SCSI_WAIT_SCAN=m
-
-#
-# SCSI Transports
-#
-CONFIG_SCSI_SPI_ATTRS=m
-CONFIG_SCSI_FC_ATTRS=m
-CONFIG_SCSI_ISCSI_ATTRS=m
-# CONFIG_SCSI_SAS_LIBSAS is not set
-# CONFIG_SCSI_SRP_ATTRS is not set
-CONFIG_SCSI_LOWLEVEL=y
-# CONFIG_ISCSI_TCP is not set
-# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
-# CONFIG_SCSI_3W_9XXX is not set
-# CONFIG_SCSI_7000FASST is not set
-# CONFIG_SCSI_ACARD is not set
-# CONFIG_SCSI_AHA152X is not set
-# CONFIG_SCSI_AHA1542 is not set
-# CONFIG_SCSI_AACRAID is not set
-# CONFIG_SCSI_AIC7XXX is not set
-# CONFIG_SCSI_AIC7XXX_OLD is not set
-# CONFIG_SCSI_AIC79XX is not set
-# CONFIG_SCSI_AIC94XX is not set
-# CONFIG_SCSI_DPT_I2O is not set
-# CONFIG_SCSI_ADVANSYS is not set
-# CONFIG_SCSI_IN2000 is not set
-# CONFIG_SCSI_ARCMSR is not set
-# CONFIG_MEGARAID_NEWGEN is not set
-# CONFIG_MEGARAID_LEGACY is not set
-# CONFIG_MEGARAID_SAS is not set
-# CONFIG_SCSI_HPTIOP is not set
-# CONFIG_SCSI_BUSLOGIC is not set
-# CONFIG_SCSI_DMX3191D is not set
-# CONFIG_SCSI_DTC3280 is not set
-# CONFIG_SCSI_EATA is not set
-# CONFIG_SCSI_FUTURE_DOMAIN is not set
-CONFIG_SCSI_GDTH=m
-# CONFIG_SCSI_GENERIC_NCR5380 is not set
-# CONFIG_SCSI_GENERIC_NCR5380_MMIO is not set
-# CONFIG_SCSI_IPS is not set
-# CONFIG_SCSI_INITIO is not set
-# CONFIG_SCSI_INIA100 is not set
-# CONFIG_SCSI_MVSAS is not set
-# CONFIG_SCSI_NCR53C406A is not set
-# CONFIG_SCSI_STEX is not set
-# CONFIG_SCSI_SYM53C8XX_2 is not set
-# CONFIG_SCSI_IPR is not set
-# CONFIG_SCSI_PAS16 is not set
-# CONFIG_SCSI_QLOGIC_FAS is not set
-# CONFIG_SCSI_QLOGIC_1280 is not set
-# CONFIG_SCSI_QLA_FC is not set
-# CONFIG_SCSI_QLA_ISCSI is not set
-# CONFIG_SCSI_LPFC is not set
-# CONFIG_SCSI_SYM53C416 is not set
-# CONFIG_SCSI_DC395x is not set
-# CONFIG_SCSI_DC390T is not set
-# CONFIG_SCSI_T128 is not set
-# CONFIG_SCSI_U14_34F is not set
-# CONFIG_SCSI_ULTRASTOR is not set
-# CONFIG_SCSI_NSP32 is not set
-# CONFIG_SCSI_DEBUG is not set
-# CONFIG_SCSI_SRP is not set
-# CONFIG_SCSI_DH is not set
-CONFIG_ATA=y
-# CONFIG_ATA_NONSTANDARD is not set
-CONFIG_ATA_ACPI=y
-CONFIG_SATA_PMP=y
-# CONFIG_SATA_AHCI is not set
-# CONFIG_SATA_SIL24 is not set
-CONFIG_ATA_SFF=y
-# CONFIG_SATA_SVW is not set
-CONFIG_ATA_PIIX=y
-# CONFIG_SATA_MV is not set
-# CONFIG_SATA_NV is not set
-# CONFIG_PDC_ADMA is not set
-# CONFIG_SATA_QSTOR is not set
-# CONFIG_SATA_PROMISE is not set
-# CONFIG_SATA_SX4 is not set
-# CONFIG_SATA_SIL is not set
-# CONFIG_SATA_SIS is not set
-# CONFIG_SATA_ULI is not set
-# CONFIG_SATA_VIA is not set
-# CONFIG_SATA_VITESSE is not set
-# CONFIG_SATA_INIC162X is not set
-# CONFIG_PATA_ACPI is not set
-# CONFIG_PATA_ALI is not set
-# CONFIG_PATA_AMD is not set
-# CONFIG_PATA_ARTOP is not set
-# CONFIG_PATA_ATIIXP is not set
-# CONFIG_PATA_CMD640_PCI is not set
-# CONFIG_PATA_CMD64X is not set
-# CONFIG_PATA_CS5520 is not set
-# CONFIG_PATA_CS5530 is not set
-# CONFIG_PATA_CS5535 is not set
-# CONFIG_PATA_CS5536 is not set
-# CONFIG_PATA_CYPRESS is not set
-# CONFIG_PATA_EFAR is not set
-CONFIG_ATA_GENERIC=y
-# CONFIG_PATA_HPT366 is not set
-# CONFIG_PATA_HPT37X is not set
-# CONFIG_PATA_HPT3X2N is not set
-# CONFIG_PATA_HPT3X3 is not set
-# CONFIG_PATA_IT821X is not set
-# CONFIG_PATA_IT8213 is not set
-# CONFIG_PATA_JMICRON is not set
-# CONFIG_PATA_LEGACY is not set
-# CONFIG_PATA_TRIFLEX is not set
-# CONFIG_PATA_MARVELL is not set
-CONFIG_PATA_MPIIX=y
-# CONFIG_PATA_OLDPIIX is not set
-# CONFIG_PATA_NETCELL is not set
-# CONFIG_PATA_NINJA32 is not set
-# CONFIG_PATA_NS87410 is not set
-# CONFIG_PATA_NS87415 is not set
-# CONFIG_PATA_OPTI is not set
-# CONFIG_PATA_OPTIDMA is not set
-# CONFIG_PATA_PDC_OLD is not set
-# CONFIG_PATA_QDI is not set
-# CONFIG_PATA_RADISYS is not set
-# CONFIG_PATA_RZ1000 is not set
-# CONFIG_PATA_SC1200 is not set
-# CONFIG_PATA_SERVERWORKS is not set
-# CONFIG_PATA_PDC2027X is not set
-# CONFIG_PATA_SIL680 is not set
-# CONFIG_PATA_SIS is not set
-# CONFIG_PATA_VIA is not set
-# CONFIG_PATA_WINBOND is not set
-# CONFIG_PATA_WINBOND_VLB is not set
-# CONFIG_PATA_SCH is not set
-# CONFIG_MD is not set
-# CONFIG_FUSION is not set
-
-#
-# IEEE 1394 (FireWire) support
-#
-
-#
-# Enable only one of the two stacks, unless you know what you are doing
-#
-# CONFIG_FIREWIRE is not set
-CONFIG_IEEE1394=m
-CONFIG_IEEE1394_OHCI1394=m
-# CONFIG_IEEE1394_PCILYNX is not set
-CONFIG_IEEE1394_SBP2=m
-# CONFIG_IEEE1394_SBP2_PHYS_DMA is not set
-CONFIG_IEEE1394_ETH1394_ROM_ENTRY=y
-CONFIG_IEEE1394_ETH1394=m
-CONFIG_IEEE1394_RAWIO=m
-CONFIG_IEEE1394_VIDEO1394=m
-CONFIG_IEEE1394_DV1394=m
-# CONFIG_IEEE1394_VERBOSEDEBUG is not set
-CONFIG_I2O=m
-CONFIG_I2O_LCT_NOTIFY_ON_CHANGES=y
-CONFIG_I2O_EXT_ADAPTEC=y
-CONFIG_I2O_CONFIG=m
-CONFIG_I2O_CONFIG_OLD_IOCTL=y
-CONFIG_I2O_BUS=m
-CONFIG_I2O_BLOCK=m
-CONFIG_I2O_SCSI=m
-CONFIG_I2O_PROC=m
-# CONFIG_MACINTOSH_DRIVERS is not set
-CONFIG_NETDEVICES=y
-CONFIG_IFB=m
-CONFIG_DUMMY=m
-CONFIG_BONDING=m
-# CONFIG_MACVLAN is not set
-CONFIG_EQUALIZER=m
-CONFIG_TUN=m
-# CONFIG_VETH is not set
-# CONFIG_NET_SB1000 is not set
-# CONFIG_ARCNET is not set
-CONFIG_PHYLIB=m
-
-#
-# MII PHY device drivers
-#
-CONFIG_MARVELL_PHY=m
-CONFIG_DAVICOM_PHY=m
-CONFIG_QSEMI_PHY=m
-CONFIG_LXT_PHY=m
-CONFIG_CICADA_PHY=m
-CONFIG_VITESSE_PHY=m
-CONFIG_SMSC_PHY=m
-# CONFIG_BROADCOM_PHY is not set
-# CONFIG_ICPLUS_PHY is not set
-# CONFIG_REALTEK_PHY is not set
-# CONFIG_MDIO_BITBANG is not set
-CONFIG_NET_ETHERNET=y
-CONFIG_MII=y
-# CONFIG_HAPPYMEAL is not set
-# CONFIG_SUNGEM is not set
-# CONFIG_CASSINI is not set
-CONFIG_NET_VENDOR_3COM=y
-CONFIG_EL1=m
-CONFIG_EL2=m
-CONFIG_ELPLUS=m
-CONFIG_EL16=m
-CONFIG_EL3=m
-CONFIG_3C515=m
-CONFIG_VORTEX=m
-CONFIG_TYPHOON=m
-# CONFIG_LANCE is not set
-CONFIG_NET_VENDOR_SMC=y
-CONFIG_WD80x3=m
-CONFIG_ULTRA=m
-CONFIG_SMC9194=m
-# CONFIG_ENC28J60 is not set
-# CONFIG_NET_VENDOR_RACAL is not set
-CONFIG_NET_TULIP=y
-CONFIG_DE2104X=m
-CONFIG_TULIP=m
-# CONFIG_TULIP_MWI is not set
-# CONFIG_TULIP_MMIO is not set
-CONFIG_TULIP_NAPI=y
-CONFIG_TULIP_NAPI_HW_MITIGATION=y
-CONFIG_DE4X5=m
-CONFIG_WINBOND_840=m
-CONFIG_DM9102=m
-CONFIG_ULI526X=m
-# CONFIG_AT1700 is not set
-# CONFIG_DEPCA is not set
-# CONFIG_HP100 is not set
-# CONFIG_NET_ISA is not set
-# CONFIG_IBM_NEW_EMAC_ZMII is not set
-# CONFIG_IBM_NEW_EMAC_RGMII is not set
-# CONFIG_IBM_NEW_EMAC_TAH is not set
-# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
-CONFIG_NET_PCI=y
-# CONFIG_PCNET32 is not set
-# CONFIG_AMD8111_ETH is not set
-# CONFIG_ADAPTEC_STARFIRE is not set
-# CONFIG_AC3200 is not set
-# CONFIG_APRICOT is not set
-# CONFIG_B44 is not set
-# CONFIG_FORCEDETH is not set
-# CONFIG_CS89x0 is not set
-# CONFIG_EEPRO100 is not set
-CONFIG_E100=m
-# CONFIG_FEALNX is not set
-# CONFIG_NATSEMI is not set
-CONFIG_NE2K_PCI=m
-CONFIG_8139CP=m
-CONFIG_8139TOO=m
-# CONFIG_8139TOO_PIO is not set
-# CONFIG_8139TOO_TUNE_TWISTER is not set
-CONFIG_8139TOO_8129=y
-# CONFIG_8139_OLD_RX_RESET is not set
-# CONFIG_R6040 is not set
-# CONFIG_SIS900 is not set
-CONFIG_EPIC100=m
-# CONFIG_SUNDANCE is not set
-# CONFIG_TLAN is not set
-# CONFIG_VIA_RHINE is not set
-# CONFIG_SC92031 is not set
-CONFIG_NETDEV_1000=y
-# CONFIG_ACENIC is not set
-# CONFIG_DL2K is not set
-CONFIG_E1000=m
-CONFIG_E1000_DISABLE_PACKET_SPLIT=y
-# CONFIG_E1000E is not set
-# CONFIG_IP1000 is not set
-# CONFIG_IGB is not set
-# CONFIG_NS83820 is not set
-# CONFIG_HAMACHI is not set
-# CONFIG_YELLOWFIN is not set
-# CONFIG_R8169 is not set
-# CONFIG_SIS190 is not set
-CONFIG_SKGE=y
-# CONFIG_SKGE_DEBUG is not set
-CONFIG_SKY2=y
-# CONFIG_SKY2_DEBUG is not set
-# CONFIG_VIA_VELOCITY is not set
-# CONFIG_TIGON3 is not set
-# CONFIG_BNX2 is not set
-# CONFIG_QLA3XXX is not set
-# CONFIG_ATL1 is not set
-# CONFIG_ATL1E is not set
-CONFIG_NETDEV_10000=y
-# CONFIG_CHELSIO_T1 is not set
-# CONFIG_CHELSIO_T3 is not set
-# CONFIG_IXGBE is not set
-CONFIG_IXGB=m
-# CONFIG_S2IO is not set
-# CONFIG_MYRI10GE is not set
-# CONFIG_NETXEN_NIC is not set
-# CONFIG_NIU is not set
-# CONFIG_MLX4_CORE is not set
-# CONFIG_TEHUTI is not set
-# CONFIG_BNX2X is not set
-# CONFIG_SFC is not set
-# CONFIG_TR is not set
-
-#
-# Wireless LAN
-#
-# CONFIG_WLAN_PRE80211 is not set
-CONFIG_WLAN_80211=y
-CONFIG_IPW2100=m
-# CONFIG_IPW2100_MONITOR is not set
-# CONFIG_IPW2100_DEBUG is not set
-CONFIG_IPW2200=m
-# CONFIG_IPW2200_MONITOR is not set
-# CONFIG_IPW2200_QOS is not set
-# CONFIG_IPW2200_DEBUG is not set
-# CONFIG_LIBERTAS is not set
-# CONFIG_AIRO is not set
-# CONFIG_HERMES is not set
-# CONFIG_ATMEL is not set
-# CONFIG_PRISM54 is not set
-# CONFIG_USB_ZD1201 is not set
-# CONFIG_USB_NET_RNDIS_WLAN is not set
-# CONFIG_IWLWIFI_LEDS is not set
-# CONFIG_HOSTAP is not set
-
-#
-# USB Network Adapters
-#
-CONFIG_USB_CATC=m
-CONFIG_USB_KAWETH=m
-CONFIG_USB_PEGASUS=m
-CONFIG_USB_RTL8150=m
-CONFIG_USB_USBNET=y
-CONFIG_USB_NET_AX8817X=y
-CONFIG_USB_NET_CDCETHER=m
-# CONFIG_USB_NET_DM9601 is not set
-CONFIG_USB_NET_GL620A=m
-CONFIG_USB_NET_NET1080=m
-CONFIG_USB_NET_PLUSB=m
-# CONFIG_USB_NET_MCS7830 is not set
-CONFIG_USB_NET_RNDIS_HOST=m
-CONFIG_USB_NET_CDC_SUBSET=m
-CONFIG_USB_ALI_M5632=y
-CONFIG_USB_AN2720=y
-CONFIG_USB_BELKIN=y
-CONFIG_USB_ARMLINUX=y
-CONFIG_USB_EPSON2888=y
-# CONFIG_USB_KC2190 is not set
-CONFIG_USB_NET_ZAURUS=m
-# CONFIG_WAN is not set
-CONFIG_ATM_DRIVERS=y
-# CONFIG_ATM_DUMMY is not set
-# CONFIG_ATM_TCP is not set
-# CONFIG_ATM_LANAI is not set
-# CONFIG_ATM_ENI is not set
-# CONFIG_ATM_FIRESTREAM is not set
-# CONFIG_ATM_ZATM is not set
-# CONFIG_ATM_NICSTAR is not set
-# CONFIG_ATM_IDT77252 is not set
-# CONFIG_ATM_AMBASSADOR is not set
-# CONFIG_ATM_HORIZON is not set
-# CONFIG_ATM_IA is not set
-# CONFIG_ATM_FORE200E is not set
-# CONFIG_ATM_HE is not set
-# CONFIG_FDDI is not set
-# CONFIG_HIPPI is not set
-CONFIG_PPP=m
-CONFIG_PPP_MULTILINK=y
-CONFIG_PPP_FILTER=y
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_SYNC_TTY=m
-CONFIG_PPP_DEFLATE=m
-CONFIG_PPP_BSDCOMP=m
-CONFIG_PPP_MPPE=m
-CONFIG_PPPOE=m
-CONFIG_PPPOATM=m
-# CONFIG_PPPOL2TP is not set
-CONFIG_SLIP=m
-CONFIG_SLIP_COMPRESSED=y
-CONFIG_SLHC=m
-CONFIG_SLIP_SMART=y
-CONFIG_SLIP_MODE_SLIP6=y
-CONFIG_NET_FC=y
-CONFIG_NETCONSOLE=m
-# CONFIG_NETCONSOLE_DYNAMIC is not set
-CONFIG_NETPOLL=y
-CONFIG_NETPOLL_TRAP=y
-CONFIG_NET_POLL_CONTROLLER=y
-# CONFIG_ISDN is not set
-CONFIG_PHONE=m
-# CONFIG_PHONE_IXJ is not set
-
-#
-# Input device support
-#
-CONFIG_INPUT=y
-CONFIG_INPUT_FF_MEMLESS=y
-CONFIG_INPUT_POLLDEV=m
-
-#
-# Userland interfaces
-#
-CONFIG_INPUT_MOUSEDEV=y
-CONFIG_INPUT_MOUSEDEV_PSAUX=y
-CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
-CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
-CONFIG_INPUT_JOYDEV=m
-CONFIG_INPUT_EVDEV=y
-# CONFIG_INPUT_EVBUG is not set
-
-#
-# Input Device Drivers
-#
-CONFIG_INPUT_KEYBOARD=y
-CONFIG_KEYBOARD_ATKBD=y
-CONFIG_KEYBOARD_SUNKBD=m
-# CONFIG_KEYBOARD_LKKBD is not set
-CONFIG_KEYBOARD_XTKBD=m
-CONFIG_KEYBOARD_NEWTON=m
-# CONFIG_KEYBOARD_STOWAWAY is not set
-CONFIG_INPUT_MOUSE=y
-CONFIG_MOUSE_PS2=y
-CONFIG_MOUSE_PS2_ALPS=y
-CONFIG_MOUSE_PS2_LOGIPS2PP=y
-CONFIG_MOUSE_PS2_SYNAPTICS=y
-CONFIG_MOUSE_PS2_LIFEBOOK=y
-CONFIG_MOUSE_PS2_TRACKPOINT=y
-# CONFIG_MOUSE_PS2_TOUCHKIT is not set
-CONFIG_MOUSE_SERIAL=m
-# CONFIG_MOUSE_APPLETOUCH is not set
-# CONFIG_MOUSE_BCM5974 is not set
-CONFIG_MOUSE_INPORT=m
-CONFIG_MOUSE_ATIXL=y
-CONFIG_MOUSE_LOGIBM=m
-CONFIG_MOUSE_PC110PAD=m
-# CONFIG_MOUSE_VSXXXAA is not set
-CONFIG_INPUT_JOYSTICK=y
-CONFIG_JOYSTICK_ANALOG=m
-CONFIG_JOYSTICK_A3D=m
-CONFIG_JOYSTICK_ADI=m
-CONFIG_JOYSTICK_COBRA=m
-CONFIG_JOYSTICK_GF2K=m
-CONFIG_JOYSTICK_GRIP=m
-CONFIG_JOYSTICK_GRIP_MP=m
-CONFIG_JOYSTICK_GUILLEMOT=m
-CONFIG_JOYSTICK_INTERACT=m
-CONFIG_JOYSTICK_SIDEWINDER=m
-CONFIG_JOYSTICK_TMDC=m
-CONFIG_JOYSTICK_IFORCE=m
-CONFIG_JOYSTICK_IFORCE_USB=y
-CONFIG_JOYSTICK_IFORCE_232=y
-CONFIG_JOYSTICK_WARRIOR=m
-CONFIG_JOYSTICK_MAGELLAN=m
-CONFIG_JOYSTICK_SPACEORB=m
-CONFIG_JOYSTICK_SPACEBALL=m
-CONFIG_JOYSTICK_STINGER=m
-CONFIG_JOYSTICK_TWIDJOY=m
-# CONFIG_JOYSTICK_ZHENHUA is not set
-CONFIG_JOYSTICK_JOYDUMP=m
-# CONFIG_JOYSTICK_XPAD is not set
-# CONFIG_INPUT_TABLET is not set
-CONFIG_INPUT_TOUCHSCREEN=y
-CONFIG_TOUCHSCREEN_ADS7846=m
-# CONFIG_TOUCHSCREEN_FUJITSU is not set
-CONFIG_TOUCHSCREEN_GUNZE=m
-CONFIG_TOUCHSCREEN_ELO=m
-CONFIG_TOUCHSCREEN_MTOUCH=m
-# CONFIG_TOUCHSCREEN_INEXIO is not set
-CONFIG_TOUCHSCREEN_MK712=m
-# CONFIG_TOUCHSCREEN_HTCPEN is not set
-# CONFIG_TOUCHSCREEN_PENMOUNT is not set
-# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set
-# CONFIG_TOUCHSCREEN_TOUCHWIN is not set
-# CONFIG_TOUCHSCREEN_UCB1400 is not set
-# CONFIG_TOUCHSCREEN_WM97XX is not set
-# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set
-# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set
-CONFIG_INPUT_MISC=y
-CONFIG_INPUT_PCSPKR=y
-# CONFIG_INPUT_APANEL is not set
-CONFIG_INPUT_WISTRON_BTNS=m
-# CONFIG_INPUT_ATLAS_BTNS is not set
-# CONFIG_INPUT_ATI_REMOTE is not set
-# CONFIG_INPUT_ATI_REMOTE2 is not set
-# CONFIG_INPUT_KEYSPAN_REMOTE is not set
-# CONFIG_INPUT_POWERMATE is not set
-# CONFIG_INPUT_YEALINK is not set
-CONFIG_INPUT_UINPUT=m
-
-#
-# Hardware I/O ports
-#
-CONFIG_SERIO=y
-CONFIG_SERIO_I8042=y
-CONFIG_SERIO_SERPORT=m
-CONFIG_SERIO_CT82C710=m
-CONFIG_SERIO_PCIPS2=m
-CONFIG_SERIO_LIBPS2=y
-CONFIG_SERIO_RAW=m
-CONFIG_GAMEPORT=m
-CONFIG_GAMEPORT_NS558=m
-CONFIG_GAMEPORT_L4=m
-CONFIG_GAMEPORT_EMU10K1=m
-CONFIG_GAMEPORT_FM801=m
-
-#
-# Character devices
-#
-CONFIG_VT=y
-CONFIG_CONSOLE_TRANSLATIONS=y
-CONFIG_VT_CONSOLE=y
-CONFIG_HW_CONSOLE=y
-CONFIG_VT_HW_CONSOLE_BINDING=y
-CONFIG_DEVKMEM=y
-CONFIG_SERIAL_NONSTANDARD=y
-# CONFIG_COMPUTONE is not set
-# CONFIG_ROCKETPORT is not set
-# CONFIG_CYCLADES is not set
-# CONFIG_DIGIEPCA is not set
-# CONFIG_ESPSERIAL is not set
-# CONFIG_MOXA_INTELLIO is not set
-# CONFIG_MOXA_SMARTIO is not set
-# CONFIG_ISI is not set
-# CONFIG_SYNCLINK is not set
-# CONFIG_SYNCLINKMP is not set
-# CONFIG_SYNCLINK_GT is not set
-# CONFIG_N_HDLC is not set
-# CONFIG_RISCOM8 is not set
-# CONFIG_SPECIALIX is not set
-# CONFIG_SX is not set
-# CONFIG_RIO is not set
-# CONFIG_STALDRV is not set
-# CONFIG_NOZOMI is not set
-
-#
-# Serial drivers
-#
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_FIX_EARLYCON_MEM=y
-CONFIG_SERIAL_8250_PCI=y
-CONFIG_SERIAL_8250_PNP=y
-CONFIG_SERIAL_8250_NR_UARTS=8
-CONFIG_SERIAL_8250_RUNTIME_UARTS=4
-CONFIG_SERIAL_8250_EXTENDED=y
-# CONFIG_SERIAL_8250_MANY_PORTS is not set
-CONFIG_SERIAL_8250_SHARE_IRQ=y
-# CONFIG_SERIAL_8250_DETECT_IRQ is not set
-# CONFIG_SERIAL_8250_RSA is not set
-
-#
-# Non-8250 serial port support
-#
-CONFIG_SERIAL_CORE=y
-CONFIG_SERIAL_CORE_CONSOLE=y
-CONFIG_SERIAL_JSM=y
-CONFIG_UNIX98_PTYS=y
-CONFIG_LEGACY_PTYS=y
-CONFIG_LEGACY_PTY_COUNT=64
-CONFIG_IPMI_HANDLER=m
-CONFIG_IPMI_PANIC_EVENT=y
-CONFIG_IPMI_PANIC_STRING=y
-CONFIG_IPMI_DEVICE_INTERFACE=m
-CONFIG_IPMI_SI=m
-CONFIG_IPMI_WATCHDOG=m
-CONFIG_IPMI_POWEROFF=m
-CONFIG_HW_RANDOM=y
-CONFIG_HW_RANDOM_INTEL=m
-# CONFIG_HW_RANDOM_AMD is not set
-# CONFIG_HW_RANDOM_GEODE is not set
-# CONFIG_HW_RANDOM_VIA is not set
-CONFIG_NVRAM=m
-# CONFIG_DTLK is not set
-# CONFIG_R3964 is not set
-# CONFIG_APPLICOM is not set
-# CONFIG_SONYPI is not set
-# CONFIG_MWAVE is not set
-# CONFIG_PC8736x_GPIO is not set
-# CONFIG_NSC_GPIO is not set
-# CONFIG_CS5535_GPIO is not set
-CONFIG_RAW_DRIVER=m
-CONFIG_MAX_RAW_DEVS=4096
-CONFIG_HPET=y
-CONFIG_HPET_MMAP=y
-CONFIG_HANGCHECK_TIMER=m
-# CONFIG_TCG_TPM is not set
-# CONFIG_TELCLOCK is not set
-CONFIG_DEVPORT=y
-CONFIG_I2C=m
-CONFIG_I2C_BOARDINFO=y
-CONFIG_I2C_CHARDEV=m
-CONFIG_I2C_HELPER_AUTO=y
-CONFIG_I2C_ALGOBIT=m
-CONFIG_I2C_ALGOPCA=m
-
-#
-# I2C Hardware Bus support
-#
-
-#
-# PC SMBus host controller drivers
-#
-CONFIG_I2C_ALI1535=m
-CONFIG_I2C_ALI1563=m
-CONFIG_I2C_ALI15X3=m
-CONFIG_I2C_AMD756=m
-CONFIG_I2C_AMD756_S4882=m
-CONFIG_I2C_AMD8111=m
-CONFIG_I2C_I801=m
-# CONFIG_I2C_ISCH is not set
-CONFIG_I2C_PIIX4=m
-CONFIG_I2C_NFORCE2=m
-# CONFIG_I2C_NFORCE2_S4985 is not set
-CONFIG_I2C_SIS5595=m
-CONFIG_I2C_SIS630=m
-CONFIG_I2C_SIS96X=m
-CONFIG_I2C_VIA=m
-CONFIG_I2C_VIAPRO=m
-
-#
-# I2C system bus drivers (mostly embedded / system-on-chip)
-#
-CONFIG_I2C_OCORES=m
-# CONFIG_I2C_SIMTEC is not set
-
-#
-# External I2C/SMBus adapter drivers
-#
-CONFIG_I2C_PARPORT_LIGHT=m
-# CONFIG_I2C_TAOS_EVM is not set
-# CONFIG_I2C_TINY_USB is not set
-
-#
-# Graphics adapter I2C/DDC channel drivers
-#
-CONFIG_I2C_VOODOO3=m
-
-#
-# Other I2C/SMBus bus drivers
-#
-CONFIG_I2C_PCA_ISA=m
-# CONFIG_I2C_PCA_PLATFORM is not set
-CONFIG_I2C_STUB=m
-CONFIG_SCx200_ACB=m
-
-#
-# Miscellaneous I2C Chip support
-#
-# CONFIG_DS1682 is not set
-# CONFIG_AT24 is not set
-CONFIG_SENSORS_EEPROM=m
-CONFIG_SENSORS_PCF8574=m
-# CONFIG_PCF8575 is not set
-CONFIG_SENSORS_PCA9539=m
-CONFIG_SENSORS_PCF8591=m
-CONFIG_SENSORS_MAX6875=m
-# CONFIG_SENSORS_TSL2550 is not set
-# CONFIG_I2C_DEBUG_CORE is not set
-# CONFIG_I2C_DEBUG_ALGO is not set
-# CONFIG_I2C_DEBUG_BUS is not set
-# CONFIG_I2C_DEBUG_CHIP is not set
-CONFIG_SPI=y
-# CONFIG_SPI_DEBUG is not set
-CONFIG_SPI_MASTER=y
-
-#
-# SPI Master Controller Drivers
-#
-CONFIG_SPI_BITBANG=m
-
-#
-# SPI Protocol Masters
-#
-# CONFIG_SPI_AT25 is not set
-# CONFIG_SPI_SPIDEV is not set
-# CONFIG_SPI_TLE62X0 is not set
-CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
-# CONFIG_GPIOLIB is not set
-CONFIG_W1=m
-CONFIG_W1_CON=y
-
-#
-# 1-wire Bus Masters
-#
-CONFIG_W1_MASTER_MATROX=m
-CONFIG_W1_MASTER_DS2490=m
-CONFIG_W1_MASTER_DS2482=m
-
-#
-# 1-wire Slaves
-#
-CONFIG_W1_SLAVE_THERM=m
-CONFIG_W1_SLAVE_SMEM=m
-CONFIG_W1_SLAVE_DS2433=m
-CONFIG_W1_SLAVE_DS2433_CRC=y
-# CONFIG_W1_SLAVE_DS2760 is not set
-CONFIG_POWER_SUPPLY=y
-# CONFIG_POWER_SUPPLY_DEBUG is not set
-# CONFIG_PDA_POWER is not set
-# CONFIG_BATTERY_DS2760 is not set
-CONFIG_HWMON=y
-CONFIG_HWMON_VID=m
-# CONFIG_SENSORS_ABITUGURU is not set
-# CONFIG_SENSORS_ABITUGURU3 is not set
-# CONFIG_SENSORS_AD7414 is not set
-# CONFIG_SENSORS_AD7418 is not set
-# CONFIG_SENSORS_ADCXX is not set
-# CONFIG_SENSORS_ADM1021 is not set
-# CONFIG_SENSORS_ADM1025 is not set
-# CONFIG_SENSORS_ADM1026 is not set
-# CONFIG_SENSORS_ADM1029 is not set
-# CONFIG_SENSORS_ADM1031 is not set
-# CONFIG_SENSORS_ADM9240 is not set
-# CONFIG_SENSORS_ADT7470 is not set
-# CONFIG_SENSORS_ADT7473 is not set
-# CONFIG_SENSORS_K8TEMP is not set
-# CONFIG_SENSORS_ASB100 is not set
-# CONFIG_SENSORS_ATXP1 is not set
-# CONFIG_SENSORS_DS1621 is not set
-# CONFIG_SENSORS_I5K_AMB is not set
-# CONFIG_SENSORS_F71805F is not set
-# CONFIG_SENSORS_F71882FG is not set
-# CONFIG_SENSORS_F75375S is not set
-# CONFIG_SENSORS_FSCHER is not set
-# CONFIG_SENSORS_FSCPOS is not set
-# CONFIG_SENSORS_FSCHMD is not set
-# CONFIG_SENSORS_GL518SM is not set
-# CONFIG_SENSORS_GL520SM is not set
-# CONFIG_SENSORS_CORETEMP is not set
-# CONFIG_SENSORS_IBMAEM is not set
-# CONFIG_SENSORS_IBMPEX is not set
-# CONFIG_SENSORS_IT87 is not set
-# CONFIG_SENSORS_LM63 is not set
-# CONFIG_SENSORS_LM70 is not set
-# CONFIG_SENSORS_LM75 is not set
-# CONFIG_SENSORS_LM77 is not set
-# CONFIG_SENSORS_LM78 is not set
-# CONFIG_SENSORS_LM80 is not set
-# CONFIG_SENSORS_LM83 is not set
-CONFIG_SENSORS_LM85=m
-# CONFIG_SENSORS_LM87 is not set
-# CONFIG_SENSORS_LM90 is not set
-# CONFIG_SENSORS_LM92 is not set
-# CONFIG_SENSORS_LM93 is not set
-# CONFIG_SENSORS_MAX1619 is not set
-# CONFIG_SENSORS_MAX6650 is not set
-# CONFIG_SENSORS_PC87360 is not set
-# CONFIG_SENSORS_PC87427 is not set
-# CONFIG_SENSORS_SIS5595 is not set
-# CONFIG_SENSORS_DME1737 is not set
-# CONFIG_SENSORS_SMSC47M1 is not set
-# CONFIG_SENSORS_SMSC47M192 is not set
-# CONFIG_SENSORS_SMSC47B397 is not set
-# CONFIG_SENSORS_ADS7828 is not set
-# CONFIG_SENSORS_THMC50 is not set
-# CONFIG_SENSORS_VIA686A is not set
-# CONFIG_SENSORS_VT1211 is not set
-# CONFIG_SENSORS_VT8231 is not set
-# CONFIG_SENSORS_W83781D is not set
-# CONFIG_SENSORS_W83791D is not set
-# CONFIG_SENSORS_W83792D is not set
-# CONFIG_SENSORS_W83793 is not set
-# CONFIG_SENSORS_W83L785TS is not set
-# CONFIG_SENSORS_W83L786NG is not set
-# CONFIG_SENSORS_W83627HF is not set
-# CONFIG_SENSORS_W83627EHF is not set
-# CONFIG_SENSORS_HDAPS is not set
-# CONFIG_SENSORS_APPLESMC is not set
-# CONFIG_HWMON_DEBUG_CHIP is not set
-CONFIG_THERMAL=y
-# CONFIG_THERMAL_HWMON is not set
-# CONFIG_WATCHDOG is not set
-
-#
-# Sonics Silicon Backplane
-#
-CONFIG_SSB_POSSIBLE=y
-# CONFIG_SSB is not set
-
-#
-# Multifunction device drivers
-#
-# CONFIG_MFD_CORE is not set
-# CONFIG_MFD_SM501 is not set
-# CONFIG_HTC_PASIC3 is not set
-# CONFIG_MFD_TMIO is not set
-
-#
-# Multimedia devices
-#
-
-#
-# Multimedia core support
-#
-CONFIG_VIDEO_DEV=m
-CONFIG_VIDEO_V4L2_COMMON=m
-CONFIG_VIDEO_ALLOW_V4L1=y
-CONFIG_VIDEO_V4L1_COMPAT=y
-CONFIG_DVB_CORE=m
-CONFIG_VIDEO_MEDIA=m
-
-#
-# Multimedia drivers
-#
-# CONFIG_MEDIA_ATTACH is not set
-CONFIG_MEDIA_TUNER=m
-# CONFIG_MEDIA_TUNER_CUSTOMIZE is not set
-CONFIG_MEDIA_TUNER_SIMPLE=m
-CONFIG_MEDIA_TUNER_TDA8290=m
-CONFIG_MEDIA_TUNER_TDA18271=m
-CONFIG_MEDIA_TUNER_TDA9887=m
-CONFIG_MEDIA_TUNER_TEA5761=m
-CONFIG_MEDIA_TUNER_TEA5767=m
-CONFIG_MEDIA_TUNER_MT20XX=m
-CONFIG_MEDIA_TUNER_MT2060=m
-CONFIG_MEDIA_TUNER_XC2028=m
-CONFIG_MEDIA_TUNER_XC5000=m
-CONFIG_VIDEO_V4L2=m
-CONFIG_VIDEO_V4L1=m
-CONFIG_VIDEOBUF_GEN=m
-CONFIG_VIDEOBUF_VMALLOC=m
-CONFIG_VIDEO_IR=m
-CONFIG_VIDEO_TVEEPROM=m
-CONFIG_VIDEO_TUNER=m
-CONFIG_VIDEO_CAPTURE_DRIVERS=y
-# CONFIG_VIDEO_ADV_DEBUG is not set
-CONFIG_VIDEO_HELPER_CHIPS_AUTO=y
-CONFIG_VIDEO_IR_I2C=m
-CONFIG_VIDEO_MSP3400=m
-CONFIG_VIDEO_CS53L32A=m
-CONFIG_VIDEO_WM8775=m
-CONFIG_VIDEO_SAA711X=m
-CONFIG_VIDEO_TVP5150=m
-CONFIG_VIDEO_CX25840=m
-CONFIG_VIDEO_CX2341X=m
-# CONFIG_VIDEO_VIVI is not set
-# CONFIG_VIDEO_BT848 is not set
-# CONFIG_VIDEO_PMS is not set
-# CONFIG_VIDEO_CPIA is not set
-# CONFIG_VIDEO_CPIA2 is not set
-# CONFIG_VIDEO_SAA5246A is not set
-# CONFIG_VIDEO_SAA5249 is not set
-# CONFIG_TUNER_3036 is not set
-# CONFIG_VIDEO_STRADIS is not set
-# CONFIG_VIDEO_ZORAN is not set
-# CONFIG_VIDEO_SAA7134 is not set
-# CONFIG_VIDEO_MXB is not set
-# CONFIG_VIDEO_DPC is not set
-# CONFIG_VIDEO_HEXIUM_ORION is not set
-# CONFIG_VIDEO_HEXIUM_GEMINI is not set
-# CONFIG_VIDEO_CX88 is not set
-# CONFIG_VIDEO_CX23885 is not set
-# CONFIG_VIDEO_AU0828 is not set
-# CONFIG_VIDEO_IVTV is not set
-# CONFIG_VIDEO_CX18 is not set
-# CONFIG_VIDEO_CAFE_CCIC is not set
-CONFIG_V4L_USB_DRIVERS=y
-# CONFIG_USB_VIDEO_CLASS is not set
-# CONFIG_USB_GSPCA is not set
-CONFIG_VIDEO_PVRUSB2=m
-CONFIG_VIDEO_PVRUSB2_SYSFS=y
-CONFIG_VIDEO_PVRUSB2_DVB=y
-# CONFIG_VIDEO_PVRUSB2_DEBUGIFC is not set
-CONFIG_VIDEO_EM28XX=m
-# CONFIG_VIDEO_EM28XX_ALSA is not set
-# CONFIG_VIDEO_EM28XX_DVB is not set
-# CONFIG_VIDEO_USBVISION is not set
-CONFIG_VIDEO_USBVIDEO=m
-CONFIG_USB_VICAM=m
-CONFIG_USB_IBMCAM=m
-CONFIG_USB_KONICAWC=m
-CONFIG_USB_QUICKCAM_MESSENGER=m
-CONFIG_USB_ET61X251=m
-CONFIG_VIDEO_OVCAMCHIP=m
-CONFIG_USB_W9968CF=m
-CONFIG_USB_OV511=m
-CONFIG_USB_SE401=m
-CONFIG_USB_SN9C102=m
-CONFIG_USB_STV680=m
-# CONFIG_USB_ZC0301 is not set
-CONFIG_USB_PWC=m
-# CONFIG_USB_PWC_DEBUG is not set
-# CONFIG_USB_ZR364XX is not set
-# CONFIG_USB_STKWEBCAM is not set
-# CONFIG_USB_S2255 is not set
-# CONFIG_SOC_CAMERA is not set
-# CONFIG_VIDEO_SH_MOBILE_CEU is not set
-CONFIG_RADIO_ADAPTERS=y
-# CONFIG_RADIO_CADET is not set
-# CONFIG_RADIO_RTRACK is not set
-# CONFIG_RADIO_RTRACK2 is not set
-# CONFIG_RADIO_AZTECH is not set
-# CONFIG_RADIO_GEMTEK is not set
-# CONFIG_RADIO_GEMTEK_PCI is not set
-# CONFIG_RADIO_MAXIRADIO is not set
-# CONFIG_RADIO_MAESTRO is not set
-# CONFIG_RADIO_SF16FMI is not set
-# CONFIG_RADIO_SF16FMR2 is not set
-# CONFIG_RADIO_TERRATEC is not set
-# CONFIG_RADIO_TRUST is not set
-# CONFIG_RADIO_TYPHOON is not set
-# CONFIG_RADIO_ZOLTRIX is not set
-# CONFIG_USB_DSBR is not set
-# CONFIG_USB_SI470X is not set
-CONFIG_DVB_CAPTURE_DRIVERS=y
-
-#
-# Supported SAA7146 based PCI Adapters
-#
-# CONFIG_TTPCI_EEPROM is not set
-# CONFIG_DVB_AV7110 is not set
-# CONFIG_DVB_BUDGET_CORE is not set
-
-#
-# Supported USB Adapters
-#
-CONFIG_DVB_USB=m
-# CONFIG_DVB_USB_DEBUG is not set
-CONFIG_DVB_USB_A800=m
-CONFIG_DVB_USB_DIBUSB_MB=m
-# CONFIG_DVB_USB_DIBUSB_MB_FAULTY is not set
-CONFIG_DVB_USB_DIBUSB_MC=m
-# CONFIG_DVB_USB_DIB0700 is not set
-CONFIG_DVB_USB_UMT_010=m
-# CONFIG_DVB_USB_CXUSB is not set
-# CONFIG_DVB_USB_M920X is not set
-# CONFIG_DVB_USB_GL861 is not set
-# CONFIG_DVB_USB_AU6610 is not set
-CONFIG_DVB_USB_DIGITV=m
-CONFIG_DVB_USB_VP7045=m
-CONFIG_DVB_USB_VP702X=m
-CONFIG_DVB_USB_GP8PSK=m
-CONFIG_DVB_USB_NOVA_T_USB2=m
-# CONFIG_DVB_USB_TTUSB2 is not set
-CONFIG_DVB_USB_DTT200U=m
-# CONFIG_DVB_USB_OPERA1 is not set
-# CONFIG_DVB_USB_AF9005 is not set
-# CONFIG_DVB_USB_DW2102 is not set
-# CONFIG_DVB_USB_ANYSEE is not set
-# CONFIG_DVB_TTUSB_BUDGET is not set
-# CONFIG_DVB_TTUSB_DEC is not set
-# CONFIG_DVB_CINERGYT2 is not set
-# CONFIG_DVB_SIANO_SMS1XXX is not set
-
-#
-# Supported FlexCopII (B2C2) Adapters
-#
-# CONFIG_DVB_B2C2_FLEXCOP is not set
-
-#
-# Supported BT878 Adapters
-#
-
-#
-# Supported Pluto2 Adapters
-#
-# CONFIG_DVB_PLUTO2 is not set
-
-#
-# Supported DVB Frontends
-#
-
-#
-# Customise DVB Frontends
-#
-# CONFIG_DVB_FE_CUSTOMISE is not set
-
-#
-# DVB-S (satellite) frontends
-#
-CONFIG_DVB_CX24110=m
-CONFIG_DVB_CX24123=m
-CONFIG_DVB_MT312=m
-CONFIG_DVB_S5H1420=m
-CONFIG_DVB_STV0299=m
-CONFIG_DVB_TDA8083=m
-CONFIG_DVB_TDA10086=m
-CONFIG_DVB_VES1X93=m
-# CONFIG_DVB_TUNER_ITD1000 is not set
-CONFIG_DVB_TDA826X=m
-CONFIG_DVB_TUA6100=m
-
-#
-# DVB-T (terrestrial) frontends
-#
-CONFIG_DVB_SP8870=m
-CONFIG_DVB_SP887X=m
-CONFIG_DVB_CX22700=m
-CONFIG_DVB_CX22702=m
-# CONFIG_DVB_DRX397XD is not set
-CONFIG_DVB_L64781=m
-CONFIG_DVB_TDA1004X=m
-CONFIG_DVB_NXT6000=m
-CONFIG_DVB_MT352=m
-CONFIG_DVB_ZL10353=m
-CONFIG_DVB_DIB3000MB=m
-CONFIG_DVB_DIB3000MC=m
-# CONFIG_DVB_DIB7000M is not set
-# CONFIG_DVB_DIB7000P is not set
-CONFIG_DVB_TDA10048=m
-
-#
-# DVB-C (cable) frontends
-#
-CONFIG_DVB_VES1820=m
-CONFIG_DVB_TDA10021=m
-# CONFIG_DVB_TDA10023 is not set
-CONFIG_DVB_STV0297=m
-
-#
-# ATSC (North American/Korean Terrestrial/Cable DTV) frontends
-#
-CONFIG_DVB_NXT200X=m
-CONFIG_DVB_OR51211=m
-CONFIG_DVB_OR51132=m
-CONFIG_DVB_BCM3510=m
-CONFIG_DVB_LGDT330X=m
-CONFIG_DVB_S5H1409=m
-# CONFIG_DVB_AU8522 is not set
-CONFIG_DVB_S5H1411=m
-
-#
-# Digital terrestrial only tuners/PLL
-#
-CONFIG_DVB_PLL=m
-# CONFIG_DVB_TUNER_DIB0070 is not set
-
-#
-# SEC control devices for DVB-S
-#
-CONFIG_DVB_LNBP21=m
-# CONFIG_DVB_ISL6405 is not set
-CONFIG_DVB_ISL6421=m
-CONFIG_DAB=y
-CONFIG_USB_DABUSB=m
-
-#
-# Graphics support
-#
-CONFIG_AGP=m
-# CONFIG_AGP_ALI is not set
-# CONFIG_AGP_ATI is not set
-# CONFIG_AGP_AMD is not set
-# CONFIG_AGP_AMD64 is not set
-CONFIG_AGP_INTEL=m
-CONFIG_AGP_NVIDIA=m
-# CONFIG_AGP_SIS is not set
-# CONFIG_AGP_SWORKS is not set
-# CONFIG_AGP_VIA is not set
-# CONFIG_AGP_EFFICEON is not set
-CONFIG_DRM=m
-# CONFIG_DRM_TDFX is not set
-# CONFIG_DRM_R128 is not set
-# CONFIG_DRM_RADEON is not set
-# CONFIG_DRM_I810 is not set
-# CONFIG_DRM_I830 is not set
-# CONFIG_DRM_I915 is not set
-# CONFIG_DRM_MGA is not set
-# CONFIG_DRM_SIS is not set
-# CONFIG_DRM_VIA is not set
-# CONFIG_DRM_SAVAGE is not set
-CONFIG_DRM_PSB=m
-CONFIG_VGASTATE=m
-CONFIG_VIDEO_OUTPUT_CONTROL=y
-CONFIG_FB=y
-CONFIG_FIRMWARE_EDID=y
-CONFIG_FB_DDC=m
-CONFIG_FB_CFB_FILLRECT=y
-CONFIG_FB_CFB_COPYAREA=y
-CONFIG_FB_CFB_IMAGEBLIT=y
-# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
-# CONFIG_FB_SYS_FILLRECT is not set
-# CONFIG_FB_SYS_COPYAREA is not set
-# CONFIG_FB_SYS_IMAGEBLIT is not set
-# CONFIG_FB_FOREIGN_ENDIAN is not set
-# CONFIG_FB_SYS_FOPS is not set
-# CONFIG_FB_SVGALIB is not set
-# CONFIG_FB_MACMODES is not set
-CONFIG_FB_BACKLIGHT=y
-CONFIG_FB_MODE_HELPERS=y
-CONFIG_FB_TILEBLITTING=y
-
-#
-# Frame buffer hardware drivers
-#
-# CONFIG_FB_CIRRUS is not set
-# CONFIG_FB_PM2 is not set
-# CONFIG_FB_CYBER2000 is not set
-# CONFIG_FB_ARC is not set
-# CONFIG_FB_ASILIANT is not set
-# CONFIG_FB_IMSTT is not set
-CONFIG_FB_VGA16=m
-# CONFIG_FB_UVESA is not set
-CONFIG_FB_VESA=y
-# CONFIG_FB_EFI is not set
-# CONFIG_FB_IMAC is not set
-# CONFIG_FB_N411 is not set
-# CONFIG_FB_HGA is not set
-# CONFIG_FB_S1D13XXX is not set
-CONFIG_FB_NVIDIA=m
-CONFIG_FB_NVIDIA_I2C=y
-# CONFIG_FB_NVIDIA_DEBUG is not set
-CONFIG_FB_NVIDIA_BACKLIGHT=y
-CONFIG_FB_RIVA=m
-CONFIG_FB_RIVA_I2C=y
-# CONFIG_FB_RIVA_DEBUG is not set
-CONFIG_FB_RIVA_BACKLIGHT=y
-CONFIG_FB_I810=m
-CONFIG_FB_I810_GTF=y
-CONFIG_FB_I810_I2C=y
-# CONFIG_FB_LE80578 is not set
-CONFIG_FB_INTEL=m
-# CONFIG_FB_INTEL_DEBUG is not set
-CONFIG_FB_INTEL_I2C=y
-# CONFIG_FB_MATROX is not set
-CONFIG_FB_RADEON=m
-CONFIG_FB_RADEON_I2C=y
-CONFIG_FB_RADEON_BACKLIGHT=y
-# CONFIG_FB_RADEON_DEBUG is not set
-# CONFIG_FB_ATY128 is not set
-CONFIG_FB_ATY=m
-CONFIG_FB_ATY_CT=y
-CONFIG_FB_ATY_GENERIC_LCD=y
-CONFIG_FB_ATY_GX=y
-CONFIG_FB_ATY_BACKLIGHT=y
-# CONFIG_FB_S3 is not set
-# CONFIG_FB_SAVAGE is not set
-# CONFIG_FB_SIS is not set
-# CONFIG_FB_NEOMAGIC is not set
-# CONFIG_FB_KYRO is not set
-# CONFIG_FB_3DFX is not set
-# CONFIG_FB_VOODOO1 is not set
-# CONFIG_FB_VT8623 is not set
-# CONFIG_FB_CYBLA is not set
-# CONFIG_FB_TRIDENT is not set
-# CONFIG_FB_ARK is not set
-# CONFIG_FB_PM3 is not set
-# CONFIG_FB_CARMINE is not set
-# CONFIG_FB_GEODE is not set
-# CONFIG_FB_VIRTUAL is not set
-CONFIG_BACKLIGHT_LCD_SUPPORT=y
-CONFIG_LCD_CLASS_DEVICE=m
-# CONFIG_LCD_LTV350QV is not set
-# CONFIG_LCD_ILI9320 is not set
-# CONFIG_LCD_VGG2432A4 is not set
-# CONFIG_LCD_PLATFORM is not set
-CONFIG_BACKLIGHT_CLASS_DEVICE=y
-# CONFIG_BACKLIGHT_CORGI is not set
-# CONFIG_BACKLIGHT_PROGEAR is not set
-# CONFIG_BACKLIGHT_MBP_NVIDIA is not set
-
-#
-# Display device support
-#
-# CONFIG_DISPLAY_SUPPORT is not set
-
-#
-# Console display driver support
-#
-CONFIG_VGA_CONSOLE=y
-CONFIG_VGACON_SOFT_SCROLLBACK=y
-CONFIG_VGACON_SOFT_SCROLLBACK_SIZE=64
-CONFIG_VIDEO_SELECT=y
-CONFIG_MDA_CONSOLE=m
-CONFIG_DUMMY_CONSOLE=y
-CONFIG_FRAMEBUFFER_CONSOLE=y
-# CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY is not set
-CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
-# CONFIG_FONTS is not set
-CONFIG_FONT_8x8=y
-CONFIG_FONT_8x16=y
-# CONFIG_LOGO is not set
-CONFIG_SOUND=m
-CONFIG_SND=m
-CONFIG_SND_TIMER=m
-CONFIG_SND_PCM=m
-CONFIG_SND_HWDEP=m
-CONFIG_SND_RAWMIDI=m
-CONFIG_SND_SEQUENCER=m
-CONFIG_SND_SEQ_DUMMY=m
-CONFIG_SND_OSSEMUL=y
-CONFIG_SND_MIXER_OSS=m
-CONFIG_SND_PCM_OSS=m
-CONFIG_SND_PCM_OSS_PLUGINS=y
-CONFIG_SND_SEQUENCER_OSS=y
-CONFIG_SND_DYNAMIC_MINORS=y
-CONFIG_SND_SUPPORT_OLD_API=y
-CONFIG_SND_VERBOSE_PROCFS=y
-CONFIG_SND_VERBOSE_PRINTK=y
-CONFIG_SND_DEBUG=y
-# CONFIG_SND_DEBUG_VERBOSE is not set
-# CONFIG_SND_PCM_XRUN_DEBUG is not set
-CONFIG_SND_VMASTER=y
-CONFIG_SND_MPU401_UART=m
-CONFIG_SND_AC97_CODEC=m
-CONFIG_SND_DRIVERS=y
-CONFIG_SND_DUMMY=m
-CONFIG_SND_VIRMIDI=m
-CONFIG_SND_MTPAV=m
-CONFIG_SND_SERIAL_U16550=m
-CONFIG_SND_MPU401=m
-CONFIG_SND_AC97_POWER_SAVE=y
-CONFIG_SND_AC97_POWER_SAVE_DEFAULT=0
-CONFIG_SND_ISA=y
-# CONFIG_SND_ADLIB is not set
-# CONFIG_SND_AD1816A is not set
-# CONFIG_SND_AD1848 is not set
-# CONFIG_SND_ALS100 is not set
-# CONFIG_SND_AZT2320 is not set
-# CONFIG_SND_CMI8330 is not set
-# CONFIG_SND_CS4231 is not set
-# CONFIG_SND_CS4232 is not set
-# CONFIG_SND_CS4236 is not set
-# CONFIG_SND_DT019X is not set
-# CONFIG_SND_ES968 is not set
-# CONFIG_SND_ES1688 is not set
-# CONFIG_SND_ES18XX is not set
-# CONFIG_SND_SC6000 is not set
-# CONFIG_SND_GUSCLASSIC is not set
-# CONFIG_SND_GUSEXTREME is not set
-# CONFIG_SND_GUSMAX is not set
-# CONFIG_SND_INTERWAVE is not set
-# CONFIG_SND_INTERWAVE_STB is not set
-# CONFIG_SND_OPL3SA2 is not set
-# CONFIG_SND_OPTI92X_AD1848 is not set
-# CONFIG_SND_OPTI92X_CS4231 is not set
-# CONFIG_SND_OPTI93X is not set
-# CONFIG_SND_MIRO is not set
-# CONFIG_SND_SB8 is not set
-# CONFIG_SND_SB16 is not set
-# CONFIG_SND_SBAWE is not set
-# CONFIG_SND_SGALAXY is not set
-# CONFIG_SND_SSCAPE is not set
-# CONFIG_SND_WAVEFRONT is not set
-CONFIG_SND_PCI=y
-# CONFIG_SND_AD1889 is not set
-# CONFIG_SND_ALS300 is not set
-# CONFIG_SND_ALS4000 is not set
-# CONFIG_SND_ALI5451 is not set
-# CONFIG_SND_ATIIXP is not set
-# CONFIG_SND_ATIIXP_MODEM is not set
-# CONFIG_SND_AU8810 is not set
-# CONFIG_SND_AU8820 is not set
-# CONFIG_SND_AU8830 is not set
-# CONFIG_SND_AW2 is not set
-# CONFIG_SND_AZT3328 is not set
-# CONFIG_SND_BT87X is not set
-# CONFIG_SND_CA0106 is not set
-# CONFIG_SND_CMIPCI is not set
-# CONFIG_SND_OXYGEN is not set
-# CONFIG_SND_CS4281 is not set
-# CONFIG_SND_CS46XX is not set
-# CONFIG_SND_CS5530 is not set
-# CONFIG_SND_CS5535AUDIO is not set
-# CONFIG_SND_DARLA20 is not set
-# CONFIG_SND_GINA20 is not set
-# CONFIG_SND_LAYLA20 is not set
-# CONFIG_SND_DARLA24 is not set
-# CONFIG_SND_GINA24 is not set
-# CONFIG_SND_LAYLA24 is not set
-# CONFIG_SND_MONA is not set
-# CONFIG_SND_MIA is not set
-# CONFIG_SND_ECHO3G is not set
-# CONFIG_SND_INDIGO is not set
-# CONFIG_SND_INDIGOIO is not set
-# CONFIG_SND_INDIGODJ is not set
-# CONFIG_SND_EMU10K1 is not set
-# CONFIG_SND_EMU10K1X is not set
-# CONFIG_SND_ENS1370 is not set
-# CONFIG_SND_ENS1371 is not set
-# CONFIG_SND_ES1938 is not set
-# CONFIG_SND_ES1968 is not set
-# CONFIG_SND_FM801 is not set
-CONFIG_SND_HDA_INTEL=m
-# CONFIG_SND_HDA_HWDEP is not set
-CONFIG_SND_HDA_CODEC_REALTEK=y
-CONFIG_SND_HDA_CODEC_ANALOG=y
-CONFIG_SND_HDA_CODEC_SIGMATEL=y
-CONFIG_SND_HDA_CODEC_VIA=y
-CONFIG_SND_HDA_CODEC_ATIHDMI=y
-CONFIG_SND_HDA_CODEC_CONEXANT=y
-CONFIG_SND_HDA_CODEC_CMEDIA=y
-CONFIG_SND_HDA_CODEC_SI3054=y
-CONFIG_SND_HDA_GENERIC=y
-# CONFIG_SND_HDA_POWER_SAVE is not set
-# CONFIG_SND_HDSP is not set
-# CONFIG_SND_HDSPM is not set
-# CONFIG_SND_HIFIER is not set
-# CONFIG_SND_ICE1712 is not set
-# CONFIG_SND_ICE1724 is not set
-CONFIG_SND_INTEL8X0=m
-CONFIG_SND_INTEL8X0M=m
-# CONFIG_SND_KORG1212 is not set
-# CONFIG_SND_MAESTRO3 is not set
-# CONFIG_SND_MIXART is not set
-# CONFIG_SND_NM256 is not set
-# CONFIG_SND_PCXHR is not set
-# CONFIG_SND_RIPTIDE is not set
-# CONFIG_SND_RME32 is not set
-# CONFIG_SND_RME96 is not set
-# CONFIG_SND_RME9652 is not set
-# CONFIG_SND_SIS7019 is not set
-# CONFIG_SND_SONICVIBES is not set
-# CONFIG_SND_TRIDENT is not set
-# CONFIG_SND_VIA82XX is not set
-# CONFIG_SND_VIA82XX_MODEM is not set
-# CONFIG_SND_VIRTUOSO is not set
-# CONFIG_SND_VX222 is not set
-# CONFIG_SND_YMFPCI is not set
-CONFIG_SND_SPI=y
-CONFIG_SND_USB=y
-CONFIG_SND_USB_AUDIO=m
-# CONFIG_SND_USB_USX2Y is not set
-# CONFIG_SND_USB_CAIAQ is not set
-# CONFIG_SND_SOC is not set
-# CONFIG_SOUND_PRIME is not set
-CONFIG_AC97_BUS=m
-CONFIG_HID_SUPPORT=y
-CONFIG_HID=y
-# CONFIG_HID_DEBUG is not set
-# CONFIG_HIDRAW is not set
-
-#
-# USB Input Devices
-#
-CONFIG_USB_HID=y
-CONFIG_USB_HIDINPUT_POWERBOOK=y
-CONFIG_HID_FF=y
-CONFIG_HID_PID=y
-CONFIG_LOGITECH_FF=y
-# CONFIG_LOGIRUMBLEPAD2_FF is not set
-# CONFIG_PANTHERLORD_FF is not set
-CONFIG_THRUSTMASTER_FF=y
-# CONFIG_ZEROPLUS_FF is not set
-CONFIG_USB_HIDDEV=y
-CONFIG_USB_SUPPORT=y
-CONFIG_USB_ARCH_HAS_HCD=y
-CONFIG_USB_ARCH_HAS_OHCI=y
-CONFIG_USB_ARCH_HAS_EHCI=y
-CONFIG_USB=y
-# CONFIG_USB_DEBUG is not set
-# CONFIG_USB_ANNOUNCE_NEW_DEVICES is not set
-
-#
-# Miscellaneous USB options
-#
-CONFIG_USB_DEVICEFS=y
-CONFIG_USB_DEVICE_CLASS=y
-# CONFIG_USB_DYNAMIC_MINORS is not set
-CONFIG_USB_SUSPEND=y
-# CONFIG_USB_OTG is not set
-CONFIG_USB_MON=y
-
-#
-# USB Host Controller Drivers
-#
-# CONFIG_USB_C67X00_HCD is not set
-CONFIG_USB_EHCI_HCD=y
-CONFIG_USB_EHCI_ROOT_HUB_TT=y
-CONFIG_USB_EHCI_TT_NEWSCHED=y
-# CONFIG_USB_ISP116X_HCD is not set
-# CONFIG_USB_ISP1760_HCD is not set
-CONFIG_USB_OHCI_HCD=y
-# CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set
-# CONFIG_USB_OHCI_BIG_ENDIAN_MMIO is not set
-CONFIG_USB_OHCI_LITTLE_ENDIAN=y
-CONFIG_USB_UHCI_HCD=y
-# CONFIG_USB_SL811_HCD is not set
-# CONFIG_USB_R8A66597_HCD is not set
-# CONFIG_USB_GADGET_MUSB_HDRC is not set
-
-#
-# USB Device Class drivers
-#
-CONFIG_USB_ACM=m
-CONFIG_USB_PRINTER=m
-# CONFIG_USB_WDM is not set
-
-#
-# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
-#
-
-#
-# may also be needed; see USB_STORAGE Help for more information
-#
-CONFIG_USB_STORAGE=y
-# CONFIG_USB_STORAGE_DEBUG is not set
-CONFIG_USB_STORAGE_DATAFAB=y
-CONFIG_USB_STORAGE_FREECOM=y
-# CONFIG_USB_STORAGE_ISD200 is not set
-CONFIG_USB_STORAGE_DPCM=y
-CONFIG_USB_STORAGE_USBAT=y
-CONFIG_USB_STORAGE_SDDR09=y
-CONFIG_USB_STORAGE_SDDR55=y
-CONFIG_USB_STORAGE_JUMPSHOT=y
-CONFIG_USB_STORAGE_ALAUDA=y
-# CONFIG_USB_STORAGE_ONETOUCH is not set
-# CONFIG_USB_STORAGE_KARMA is not set
-# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set
-# CONFIG_USB_LIBUSUAL is not set
-
-#
-# USB Imaging devices
-#
-CONFIG_USB_MDC800=m
-CONFIG_USB_MICROTEK=m
-
-#
-# USB port drivers
-#
-CONFIG_USB_SERIAL=m
-CONFIG_USB_EZUSB=y
-CONFIG_USB_SERIAL_GENERIC=y
-# CONFIG_USB_SERIAL_AIRCABLE is not set
-CONFIG_USB_SERIAL_ARK3116=m
-CONFIG_USB_SERIAL_BELKIN=m
-# CONFIG_USB_SERIAL_CH341 is not set
-CONFIG_USB_SERIAL_WHITEHEAT=m
-CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m
-CONFIG_USB_SERIAL_CP2101=m
-CONFIG_USB_SERIAL_CYPRESS_M8=m
-CONFIG_USB_SERIAL_EMPEG=m
-CONFIG_USB_SERIAL_FTDI_SIO=m
-CONFIG_USB_SERIAL_FUNSOFT=m
-CONFIG_USB_SERIAL_VISOR=m
-CONFIG_USB_SERIAL_IPAQ=m
-CONFIG_USB_SERIAL_IR=m
-CONFIG_USB_SERIAL_EDGEPORT=m
-CONFIG_USB_SERIAL_EDGEPORT_TI=m
-CONFIG_USB_SERIAL_GARMIN=m
-CONFIG_USB_SERIAL_IPW=m
-# CONFIG_USB_SERIAL_IUU is not set
-CONFIG_USB_SERIAL_KEYSPAN_PDA=m
-CONFIG_USB_SERIAL_KEYSPAN=m
-CONFIG_USB_SERIAL_KEYSPAN_MPR=y
-CONFIG_USB_SERIAL_KEYSPAN_USA28=y
-CONFIG_USB_SERIAL_KEYSPAN_USA28X=y
-CONFIG_USB_SERIAL_KEYSPAN_USA28XA=y
-CONFIG_USB_SERIAL_KEYSPAN_USA28XB=y
-CONFIG_USB_SERIAL_KEYSPAN_USA19=y
-CONFIG_USB_SERIAL_KEYSPAN_USA18X=y
-CONFIG_USB_SERIAL_KEYSPAN_USA19W=y
-CONFIG_USB_SERIAL_KEYSPAN_USA19QW=y
-CONFIG_USB_SERIAL_KEYSPAN_USA19QI=y
-CONFIG_USB_SERIAL_KEYSPAN_USA49W=y
-CONFIG_USB_SERIAL_KEYSPAN_USA49WLC=y
-CONFIG_USB_SERIAL_KLSI=m
-CONFIG_USB_SERIAL_KOBIL_SCT=m
-CONFIG_USB_SERIAL_MCT_U232=m
-# CONFIG_USB_SERIAL_MOS7720 is not set
-# CONFIG_USB_SERIAL_MOS7840 is not set
-# CONFIG_USB_SERIAL_MOTOROLA is not set
-CONFIG_USB_SERIAL_NAVMAN=m
-CONFIG_USB_SERIAL_PL2303=m
-# CONFIG_USB_SERIAL_OTI6858 is not set
-# CONFIG_USB_SERIAL_SPCP8X5 is not set
-CONFIG_USB_SERIAL_HP4X=m
-CONFIG_USB_SERIAL_SAFE=m
-CONFIG_USB_SERIAL_SAFE_PADDED=y
-CONFIG_USB_SERIAL_SIERRAWIRELESS=m
-CONFIG_USB_SERIAL_TI=m
-CONFIG_USB_SERIAL_CYBERJACK=m
-CONFIG_USB_SERIAL_XIRCOM=m
-CONFIG_USB_SERIAL_OPTION=m
-CONFIG_USB_SERIAL_OMNINET=m
-# CONFIG_USB_SERIAL_DEBUG is not set
-
-#
-# USB Miscellaneous drivers
-#
-CONFIG_USB_EMI62=m
-CONFIG_USB_EMI26=m
-# CONFIG_USB_ADUTUX is not set
-CONFIG_USB_RIO500=m
-CONFIG_USB_LEGOTOWER=m
-CONFIG_USB_LCD=m
-# CONFIG_USB_BERRY_CHARGE is not set
-CONFIG_USB_LED=m
-CONFIG_USB_CYPRESS_CY7C63=m
-CONFIG_USB_CYTHERM=m
-# CONFIG_USB_PHIDGET is not set
-CONFIG_USB_IDMOUSE=m
-# CONFIG_USB_FTDI_ELAN is not set
-CONFIG_USB_APPLEDISPLAY=m
-CONFIG_USB_SISUSBVGA=m
-CONFIG_USB_SISUSBVGA_CON=y
-CONFIG_USB_LD=m
-# CONFIG_USB_TRANCEVIBRATOR is not set
-# CONFIG_USB_IOWARRIOR is not set
-# CONFIG_USB_TEST is not set
-# CONFIG_USB_ISIGHTFW is not set
-CONFIG_USB_ATM=m
-CONFIG_USB_SPEEDTOUCH=m
-CONFIG_USB_CXACRU=m
-CONFIG_USB_UEAGLEATM=m
-CONFIG_USB_XUSBATM=m
-CONFIG_USB_GADGET=y
-# CONFIG_USB_GADGET_DEBUG is not set
-CONFIG_USB_GADGET_DEBUG_FILES=y
-# CONFIG_USB_GADGET_DEBUG_FS is not set
-CONFIG_USB_GADGET_SELECTED=y
-CONFIG_USB_GADGET_AMD5536UDC=y
-CONFIG_USB_AMD5536UDC=y
-# CONFIG_USB_GADGET_ATMEL_USBA is not set
-# CONFIG_USB_GADGET_FSL_USB2 is not set
-# CONFIG_USB_GADGET_NET2280 is not set
-# CONFIG_USB_GADGET_PXA25X is not set
-# CONFIG_USB_GADGET_M66592 is not set
-# CONFIG_USB_GADGET_PXA27X is not set
-# CONFIG_USB_GADGET_GOKU is not set
-# CONFIG_USB_GADGET_LH7A40X is not set
-# CONFIG_USB_GADGET_OMAP is not set
-# CONFIG_USB_GADGET_S3C2410 is not set
-# CONFIG_USB_GADGET_AT91 is not set
-# CONFIG_USB_GADGET_DUMMY_HCD is not set
-CONFIG_USB_GADGET_DUALSPEED=y
-# CONFIG_USB_ZERO is not set
-CONFIG_USB_ETH=m
-CONFIG_USB_ETH_RNDIS=y
-# CONFIG_USB_GADGETFS is not set
-CONFIG_USB_FILE_STORAGE=m
-CONFIG_USB_FILE_STORAGE_TEST=y
-# CONFIG_USB_G_SERIAL is not set
-# CONFIG_USB_MIDI_GADGET is not set
-# CONFIG_USB_G_PRINTER is not set
-# CONFIG_USB_CDC_COMPOSITE is not set
-CONFIG_MMC=y
-# CONFIG_MMC_DEBUG is not set
-CONFIG_MMC_UNSAFE_RESUME=y
-
-#
-# MMC/SD Card Drivers
-#
-CONFIG_MMC_BLOCK=y
-CONFIG_MMC_BLOCK_BOUNCE=y
-# CONFIG_SDIO_UART is not set
-# CONFIG_MMC_TEST is not set
-
-#
-# MMC/SD Host Controller Drivers
-#
-CONFIG_MMC_SDHCI=y
-# CONFIG_MMC_SDHCI_PCI is not set
-# CONFIG_MMC_WBSD is not set
-# CONFIG_MMC_TIFM_SD is not set
-# CONFIG_MEMSTICK is not set
-CONFIG_NEW_LEDS=y
-CONFIG_LEDS_CLASS=m
-
-#
-# LED drivers
-#
-# CONFIG_LEDS_PCA9532 is not set
-# CONFIG_LEDS_CLEVO_MAIL is not set
-# CONFIG_LEDS_PCA955X is not set
-
-#
-# LED Triggers
-#
-CONFIG_LEDS_TRIGGERS=y
-CONFIG_LEDS_TRIGGER_TIMER=m
-CONFIG_LEDS_TRIGGER_HEARTBEAT=m
-# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set
-# CONFIG_ACCESSIBILITY is not set
-# CONFIG_INFINIBAND is not set
-# CONFIG_EDAC is not set
-CONFIG_RTC_LIB=m
-CONFIG_RTC_CLASS=m
-
-#
-# RTC interfaces
-#
-CONFIG_RTC_INTF_SYSFS=y
-CONFIG_RTC_INTF_PROC=y
-CONFIG_RTC_INTF_DEV=y
-CONFIG_RTC_INTF_DEV_UIE_EMUL=y
-CONFIG_RTC_DRV_TEST=m
-
-#
-# I2C RTC drivers
-#
-CONFIG_RTC_DRV_DS1307=m
-# CONFIG_RTC_DRV_DS1374 is not set
-CONFIG_RTC_DRV_DS1672=m
-# CONFIG_RTC_DRV_MAX6900 is not set
-CONFIG_RTC_DRV_RS5C372=m
-CONFIG_RTC_DRV_ISL1208=m
-CONFIG_RTC_DRV_X1205=m
-CONFIG_RTC_DRV_PCF8563=m
-CONFIG_RTC_DRV_PCF8583=m
-# CONFIG_RTC_DRV_M41T80 is not set
-# CONFIG_RTC_DRV_S35390A is not set
-# CONFIG_RTC_DRV_FM3130 is not set
-
-#
-# SPI RTC drivers
-#
-# CONFIG_RTC_DRV_M41T94 is not set
-# CONFIG_RTC_DRV_DS1305 is not set
-CONFIG_RTC_DRV_MAX6902=m
-# CONFIG_RTC_DRV_R9701 is not set
-CONFIG_RTC_DRV_RS5C348=m
-
-#
-# Platform RTC drivers
-#
-# CONFIG_RTC_DRV_CMOS is not set
-# CONFIG_RTC_DRV_DS1511 is not set
-CONFIG_RTC_DRV_DS1553=m
-CONFIG_RTC_DRV_DS1742=m
-# CONFIG_RTC_DRV_STK17TA8 is not set
-CONFIG_RTC_DRV_M48T86=m
-# CONFIG_RTC_DRV_M48T59 is not set
-CONFIG_RTC_DRV_V3020=m
-
-#
-# on-CPU RTC drivers
-#
-# CONFIG_DMADEVICES is not set
-# CONFIG_UIO is not set
-
-#
-# Firmware Drivers
-#
-CONFIG_EDD=m
-# CONFIG_EDD_OFF is not set
-CONFIG_FIRMWARE_MEMMAP=y
-# CONFIG_EFI_VARS is not set
-# CONFIG_DELL_RBU is not set
-# CONFIG_DCDBAS is not set
-CONFIG_DMIID=y
-# CONFIG_ISCSI_IBFT_FIND is not set
-
-#
-# File systems
-#
-CONFIG_EXT2_FS=y
-CONFIG_EXT2_FS_XATTR=y
-CONFIG_EXT2_FS_POSIX_ACL=y
-CONFIG_EXT2_FS_SECURITY=y
-# CONFIG_EXT2_FS_XIP is not set
-CONFIG_EXT3_FS=y
-CONFIG_EXT3_FS_XATTR=y
-CONFIG_EXT3_FS_POSIX_ACL=y
-CONFIG_EXT3_FS_SECURITY=y
-# CONFIG_EXT4DEV_FS is not set
-CONFIG_JBD=y
-# CONFIG_JBD_DEBUG is not set
-CONFIG_FS_MBCACHE=y
-CONFIG_REISERFS_FS=m
-# CONFIG_REISERFS_CHECK is not set
-# CONFIG_REISERFS_PROC_INFO is not set
-CONFIG_REISERFS_FS_XATTR=y
-CONFIG_REISERFS_FS_POSIX_ACL=y
-CONFIG_REISERFS_FS_SECURITY=y
-CONFIG_JFS_FS=m
-CONFIG_JFS_POSIX_ACL=y
-CONFIG_JFS_SECURITY=y
-# CONFIG_JFS_DEBUG is not set
-CONFIG_JFS_STATISTICS=y
-CONFIG_FS_POSIX_ACL=y
-# CONFIG_XFS_FS is not set
-# CONFIG_GFS2_FS is not set
-# CONFIG_OCFS2_FS is not set
-CONFIG_DNOTIFY=y
-CONFIG_INOTIFY=y
-CONFIG_INOTIFY_USER=y
-CONFIG_QUOTA=y
-# CONFIG_QUOTA_NETLINK_INTERFACE is not set
-CONFIG_PRINT_QUOTA_WARNING=y
-CONFIG_QFMT_V1=m
-CONFIG_QFMT_V2=m
-CONFIG_QUOTACTL=y
-CONFIG_AUTOFS_FS=m
-CONFIG_AUTOFS4_FS=m
-CONFIG_FUSE_FS=m
-CONFIG_GENERIC_ACL=y
-
-#
-# CD-ROM/DVD Filesystems
-#
-CONFIG_ISO9660_FS=y
-CONFIG_JOLIET=y
-CONFIG_ZISOFS=y
-CONFIG_UDF_FS=m
-CONFIG_UDF_NLS=y
-
-#
-# DOS/FAT/NT Filesystems
-#
-CONFIG_FAT_FS=y
-CONFIG_MSDOS_FS=y
-CONFIG_VFAT_FS=y
-CONFIG_FAT_DEFAULT_CODEPAGE=437
-CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
-CONFIG_NTFS_FS=m
-# CONFIG_NTFS_DEBUG is not set
-CONFIG_NTFS_RW=y
-
-#
-# Pseudo filesystems
-#
-CONFIG_PROC_FS=y
-CONFIG_PROC_KCORE=y
-CONFIG_PROC_SYSCTL=y
-CONFIG_SYSFS=y
-CONFIG_TMPFS=y
-CONFIG_TMPFS_POSIX_ACL=y
-CONFIG_HUGETLBFS=y
-CONFIG_HUGETLB_PAGE=y
-CONFIG_CONFIGFS_FS=m
-
-#
-# Miscellaneous filesystems
-#
-CONFIG_ADFS_FS=m
-# CONFIG_ADFS_FS_RW is not set
-CONFIG_AFFS_FS=m
-# CONFIG_ECRYPT_FS is not set
-CONFIG_HFS_FS=m
-CONFIG_HFSPLUS_FS=m
-CONFIG_BEFS_FS=m
-# CONFIG_BEFS_DEBUG is not set
-CONFIG_BFS_FS=m
-CONFIG_EFS_FS=m
-CONFIG_JFFS2_FS=m
-CONFIG_JFFS2_FS_DEBUG=0
-CONFIG_JFFS2_FS_WRITEBUFFER=y
-# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
-CONFIG_JFFS2_SUMMARY=y
-CONFIG_JFFS2_FS_XATTR=y
-CONFIG_JFFS2_FS_POSIX_ACL=y
-CONFIG_JFFS2_FS_SECURITY=y
-CONFIG_JFFS2_COMPRESSION_OPTIONS=y
-CONFIG_JFFS2_ZLIB=y
-# CONFIG_JFFS2_LZO is not set
-CONFIG_JFFS2_RTIME=y
-# CONFIG_JFFS2_RUBIN is not set
-# CONFIG_JFFS2_CMODE_NONE is not set
-CONFIG_JFFS2_CMODE_PRIORITY=y
-# CONFIG_JFFS2_CMODE_SIZE is not set
-# CONFIG_JFFS2_CMODE_FAVOURLZO is not set
-CONFIG_CRAMFS=y
-CONFIG_VXFS_FS=m
-# CONFIG_MINIX_FS is not set
-# CONFIG_OMFS_FS is not set
-CONFIG_HPFS_FS=m
-CONFIG_QNX4FS_FS=m
-CONFIG_ROMFS_FS=m
-CONFIG_SYSV_FS=m
-CONFIG_UFS_FS=m
-CONFIG_UFS_FS_WRITE=y
-# CONFIG_UFS_DEBUG is not set
-CONFIG_NETWORK_FILESYSTEMS=y
-CONFIG_NFS_FS=m
-CONFIG_NFS_V3=y
-CONFIG_NFS_V3_ACL=y
-CONFIG_NFS_V4=y
-CONFIG_NFSD=m
-CONFIG_NFSD_V2_ACL=y
-CONFIG_NFSD_V3=y
-CONFIG_NFSD_V3_ACL=y
-CONFIG_NFSD_V4=y
-CONFIG_LOCKD=m
-CONFIG_LOCKD_V4=y
-CONFIG_EXPORTFS=m
-CONFIG_NFS_ACL_SUPPORT=m
-CONFIG_NFS_COMMON=y
-CONFIG_SUNRPC=m
-CONFIG_SUNRPC_GSS=m
-CONFIG_RPCSEC_GSS_KRB5=m
-CONFIG_RPCSEC_GSS_SPKM3=m
-CONFIG_SMB_FS=y
-# CONFIG_SMB_NLS_DEFAULT is not set
-CONFIG_CIFS=m
-CONFIG_CIFS_STATS=y
-CONFIG_CIFS_STATS2=y
-CONFIG_CIFS_WEAK_PW_HASH=y
-# CONFIG_CIFS_UPCALL is not set
-CONFIG_CIFS_XATTR=y
-CONFIG_CIFS_POSIX=y
-# CONFIG_CIFS_DEBUG2 is not set
-# CONFIG_CIFS_EXPERIMENTAL is not set
-# CONFIG_NCP_FS is not set
-# CONFIG_CODA_FS is not set
-# CONFIG_AFS_FS is not set
-
-#
-# Partition Types
-#
-CONFIG_PARTITION_ADVANCED=y
-# CONFIG_ACORN_PARTITION is not set
-CONFIG_OSF_PARTITION=y
-# CONFIG_AMIGA_PARTITION is not set
-CONFIG_ATARI_PARTITION=y
-CONFIG_MAC_PARTITION=y
-CONFIG_MSDOS_PARTITION=y
-CONFIG_BSD_DISKLABEL=y
-# CONFIG_MINIX_SUBPARTITION is not set
-CONFIG_SOLARIS_X86_PARTITION=y
-CONFIG_UNIXWARE_DISKLABEL=y
-CONFIG_LDM_PARTITION=y
-# CONFIG_LDM_DEBUG is not set
-CONFIG_SGI_PARTITION=y
-CONFIG_ULTRIX_PARTITION=y
-CONFIG_SUN_PARTITION=y
-CONFIG_KARMA_PARTITION=y
-CONFIG_EFI_PARTITION=y
-# CONFIG_SYSV68_PARTITION is not set
-CONFIG_NLS=y
-CONFIG_NLS_DEFAULT="utf8"
-CONFIG_NLS_CODEPAGE_437=y
-CONFIG_NLS_CODEPAGE_737=m
-CONFIG_NLS_CODEPAGE_775=m
-CONFIG_NLS_CODEPAGE_850=m
-CONFIG_NLS_CODEPAGE_852=m
-CONFIG_NLS_CODEPAGE_855=m
-CONFIG_NLS_CODEPAGE_857=m
-CONFIG_NLS_CODEPAGE_860=m
-CONFIG_NLS_CODEPAGE_861=m
-CONFIG_NLS_CODEPAGE_862=m
-CONFIG_NLS_CODEPAGE_863=m
-CONFIG_NLS_CODEPAGE_864=m
-CONFIG_NLS_CODEPAGE_865=m
-CONFIG_NLS_CODEPAGE_866=m
-CONFIG_NLS_CODEPAGE_869=m
-CONFIG_NLS_CODEPAGE_936=m
-CONFIG_NLS_CODEPAGE_950=m
-CONFIG_NLS_CODEPAGE_932=m
-CONFIG_NLS_CODEPAGE_949=m
-CONFIG_NLS_CODEPAGE_874=m
-CONFIG_NLS_ISO8859_8=m
-CONFIG_NLS_CODEPAGE_1250=m
-CONFIG_NLS_CODEPAGE_1251=m
-CONFIG_NLS_ASCII=y
-CONFIG_NLS_ISO8859_1=y
-CONFIG_NLS_ISO8859_2=m
-CONFIG_NLS_ISO8859_3=m
-CONFIG_NLS_ISO8859_4=m
-CONFIG_NLS_ISO8859_5=m
-CONFIG_NLS_ISO8859_6=m
-CONFIG_NLS_ISO8859_7=m
-CONFIG_NLS_ISO8859_9=m
-CONFIG_NLS_ISO8859_13=m
-CONFIG_NLS_ISO8859_14=m
-CONFIG_NLS_ISO8859_15=m
-CONFIG_NLS_KOI8_R=m
-CONFIG_NLS_KOI8_U=m
-CONFIG_NLS_UTF8=m
-# CONFIG_DLM is not set
-
-#
-# Kernel hacking
-#
-CONFIG_TRACE_IRQFLAGS_SUPPORT=y
-# CONFIG_PRINTK_TIME is not set
-CONFIG_ENABLE_WARN_DEPRECATED=y
-CONFIG_ENABLE_MUST_CHECK=y
-CONFIG_FRAME_WARN=1024
-CONFIG_MAGIC_SYSRQ=y
-# CONFIG_UNUSED_SYMBOLS is not set
-CONFIG_DEBUG_FS=y
-# CONFIG_HEADERS_CHECK is not set
-CONFIG_DEBUG_KERNEL=y
-# CONFIG_DEBUG_SHIRQ is not set
-CONFIG_DETECT_SOFTLOCKUP=y
-# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
-CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
-CONFIG_SCHED_DEBUG=y
-# CONFIG_SCHEDSTATS is not set
-CONFIG_TIMER_STATS=y
-# CONFIG_DEBUG_OBJECTS is not set
-# CONFIG_DEBUG_SLAB is not set
-# CONFIG_DEBUG_RT_MUTEXES is not set
-# CONFIG_RT_MUTEX_TESTER is not set
-# CONFIG_DEBUG_SPINLOCK is not set
-# CONFIG_DEBUG_MUTEXES is not set
-# CONFIG_DEBUG_LOCK_ALLOC is not set
-# CONFIG_PROVE_LOCKING is not set
-# CONFIG_LOCK_STAT is not set
-# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
-# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
-# CONFIG_DEBUG_KOBJECT is not set
-# CONFIG_DEBUG_HIGHMEM is not set
-CONFIG_DEBUG_BUGVERBOSE=y
-# CONFIG_DEBUG_INFO is not set
-# CONFIG_DEBUG_VM is not set
-# CONFIG_DEBUG_WRITECOUNT is not set
-CONFIG_DEBUG_MEMORY_INIT=y
-# CONFIG_DEBUG_LIST is not set
-# CONFIG_DEBUG_SG is not set
-# CONFIG_FRAME_POINTER is not set
-# CONFIG_BOOT_PRINTK_DELAY is not set
-# CONFIG_RCU_TORTURE_TEST is not set
-# CONFIG_BACKTRACE_SELF_TEST is not set
-# CONFIG_FAULT_INJECTION is not set
-# CONFIG_LATENCYTOP is not set
-# CONFIG_SYSCTL_SYSCALL_CHECK is not set
-CONFIG_HAVE_FTRACE=y
-CONFIG_HAVE_DYNAMIC_FTRACE=y
-# CONFIG_FTRACE is not set
-# CONFIG_IRQSOFF_TRACER is not set
-# CONFIG_SYSPROF_TRACER is not set
-# CONFIG_SCHED_TRACER is not set
-# CONFIG_CONTEXT_SWITCH_TRACER is not set
-# CONFIG_PROVIDE_OHCI1394_DMA_INIT is not set
-# CONFIG_SAMPLES is not set
-CONFIG_HAVE_ARCH_KGDB=y
-# CONFIG_KGDB is not set
-# CONFIG_STRICT_DEVMEM is not set
-CONFIG_X86_VERBOSE_BOOTUP=y
-CONFIG_EARLY_PRINTK=y
-# CONFIG_DEBUG_STACKOVERFLOW is not set
-# CONFIG_DEBUG_STACK_USAGE is not set
-# CONFIG_DEBUG_PAGEALLOC is not set
-# CONFIG_DEBUG_PER_CPU_MAPS is not set
-# CONFIG_X86_PTDUMP is not set
-# CONFIG_DEBUG_RODATA is not set
-# CONFIG_DEBUG_NX_TEST is not set
-# CONFIG_4KSTACKS is not set
-CONFIG_DOUBLEFAULT=y
-# CONFIG_MMIOTRACE is not set
-CONFIG_IO_DELAY_TYPE_0X80=0
-CONFIG_IO_DELAY_TYPE_0XED=1
-CONFIG_IO_DELAY_TYPE_UDELAY=2
-CONFIG_IO_DELAY_TYPE_NONE=3
-CONFIG_IO_DELAY_0X80=y
-# CONFIG_IO_DELAY_0XED is not set
-# CONFIG_IO_DELAY_UDELAY is not set
-# CONFIG_IO_DELAY_NONE is not set
-CONFIG_DEFAULT_IO_DELAY_TYPE=0
-# CONFIG_DEBUG_BOOT_PARAMS is not set
-# CONFIG_CPA_DEBUG is not set
-# CONFIG_OPTIMIZE_INLINING is not set
-
-#
-# Security options
-#
-CONFIG_KEYS=y
-CONFIG_KEYS_DEBUG_PROC_KEYS=y
-CONFIG_SECURITY=y
-CONFIG_SECURITY_NETWORK=y
-# CONFIG_SECURITY_NETWORK_XFRM is not set
-# CONFIG_SECURITY_FILE_CAPABILITIES is not set
-# CONFIG_SECURITY_ROOTPLUG is not set
-CONFIG_SECURITY_DEFAULT_MMAP_MIN_ADDR=0
-CONFIG_SECURITY_SELINUX=y
-CONFIG_SECURITY_SELINUX_BOOTPARAM=y
-CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=1
-CONFIG_SECURITY_SELINUX_DISABLE=y
-CONFIG_SECURITY_SELINUX_DEVELOP=y
-CONFIG_SECURITY_SELINUX_AVC_STATS=y
-CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1
-# CONFIG_SECURITY_SELINUX_ENABLE_SECMARK_DEFAULT is not set
-# CONFIG_SECURITY_SELINUX_POLICYDB_VERSION_MAX is not set
-CONFIG_CRYPTO=y
-
-#
-# Crypto core or helper
-#
-CONFIG_CRYPTO_ALGAPI=y
-CONFIG_CRYPTO_AEAD=m
-CONFIG_CRYPTO_BLKCIPHER=y
-CONFIG_CRYPTO_HASH=y
-CONFIG_CRYPTO_MANAGER=y
-# CONFIG_CRYPTO_GF128MUL is not set
-CONFIG_CRYPTO_NULL=m
-# CONFIG_CRYPTO_CRYPTD is not set
-CONFIG_CRYPTO_AUTHENC=m
-CONFIG_CRYPTO_TEST=m
-
-#
-# Authenticated Encryption with Associated Data
-#
-# CONFIG_CRYPTO_CCM is not set
-# CONFIG_CRYPTO_GCM is not set
-# CONFIG_CRYPTO_SEQIV is not set
-
-#
-# Block modes
-#
-CONFIG_CRYPTO_CBC=y
-# CONFIG_CRYPTO_CTR is not set
-# CONFIG_CRYPTO_CTS is not set
-CONFIG_CRYPTO_ECB=m
-# CONFIG_CRYPTO_LRW is not set
-CONFIG_CRYPTO_PCBC=m
-# CONFIG_CRYPTO_XTS is not set
-
-#
-# Hash modes
-#
-CONFIG_CRYPTO_HMAC=y
-# CONFIG_CRYPTO_XCBC is not set
-
-#
-# Digest
-#
-CONFIG_CRYPTO_CRC32C=m
-CONFIG_CRYPTO_MD4=m
-CONFIG_CRYPTO_MD5=y
-CONFIG_CRYPTO_MICHAEL_MIC=m
-# CONFIG_CRYPTO_RMD128 is not set
-# CONFIG_CRYPTO_RMD160 is not set
-# CONFIG_CRYPTO_RMD256 is not set
-# CONFIG_CRYPTO_RMD320 is not set
-CONFIG_CRYPTO_SHA1=m
-CONFIG_CRYPTO_SHA256=m
-CONFIG_CRYPTO_SHA512=m
-CONFIG_CRYPTO_TGR192=m
-CONFIG_CRYPTO_WP512=m
-
-#
-# Ciphers
-#
-CONFIG_CRYPTO_AES=m
-CONFIG_CRYPTO_AES_586=m
-CONFIG_CRYPTO_ANUBIS=m
-CONFIG_CRYPTO_ARC4=m
-CONFIG_CRYPTO_BLOWFISH=m
-# CONFIG_CRYPTO_CAMELLIA is not set
-CONFIG_CRYPTO_CAST5=y
-CONFIG_CRYPTO_CAST6=m
-CONFIG_CRYPTO_DES=y
-# CONFIG_CRYPTO_FCRYPT is not set
-CONFIG_CRYPTO_KHAZAD=m
-# CONFIG_CRYPTO_SALSA20 is not set
-# CONFIG_CRYPTO_SALSA20_586 is not set
-# CONFIG_CRYPTO_SEED is not set
-CONFIG_CRYPTO_SERPENT=m
-CONFIG_CRYPTO_TEA=m
-CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_TWOFISH_COMMON=m
-# CONFIG_CRYPTO_TWOFISH_586 is not set
-
-#
-# Compression
-#
-CONFIG_CRYPTO_DEFLATE=m
-# CONFIG_CRYPTO_LZO is not set
-CONFIG_CRYPTO_HW=y
-CONFIG_CRYPTO_DEV_PADLOCK=m
-CONFIG_CRYPTO_DEV_PADLOCK_AES=m
-CONFIG_CRYPTO_DEV_PADLOCK_SHA=m
-CONFIG_CRYPTO_DEV_GEODE=m
-# CONFIG_CRYPTO_DEV_HIFN_795X is not set
-CONFIG_HAVE_KVM=y
-CONFIG_VIRTUALIZATION=y
-# CONFIG_KVM is not set
-# CONFIG_LGUEST is not set
-# CONFIG_VIRTIO_PCI is not set
-# CONFIG_VIRTIO_BALLOON is not set
-
-#
-# Library routines
-#
-CONFIG_BITREVERSE=y
-CONFIG_GENERIC_FIND_FIRST_BIT=y
-CONFIG_GENERIC_FIND_NEXT_BIT=y
-CONFIG_CRC_CCITT=m
-CONFIG_CRC16=m
-# CONFIG_CRC_T10DIF is not set
-CONFIG_CRC_ITU_T=m
-CONFIG_CRC32=y
-# CONFIG_CRC7 is not set
-CONFIG_LIBCRC32C=m
-CONFIG_AUDIT_GENERIC=y
-CONFIG_ZLIB_INFLATE=y
-CONFIG_ZLIB_DEFLATE=m
-CONFIG_REED_SOLOMON=m
-CONFIG_REED_SOLOMON_DEC16=y
-CONFIG_TEXTSEARCH=y
-CONFIG_TEXTSEARCH_KMP=m
-CONFIG_TEXTSEARCH_BM=m
-CONFIG_TEXTSEARCH_FSM=m
-CONFIG_PLIST=y
-CONFIG_HAS_IOMEM=y
-CONFIG_HAS_IOPORT=y
-CONFIG_HAS_DMA=y
-CONFIG_CHECK_SIGNATURE=y
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/psb-driver.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/psb-driver.patch
deleted file mode 100644
index 1ef6e378f..000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/psb-driver.patch
+++ /dev/null
@@ -1,33991 +0,0 @@
-Index: linux-2.6.27/include/drm/drm.h
-===================================================================
---- linux-2.6.27.orig/include/drm/drm.h 2009-02-05 13:29:29.000000000 +0000
-+++ linux-2.6.27/include/drm/drm.h 2009-02-05 13:29:33.000000000 +0000
-@@ -173,6 +173,7 @@
- _DRM_AGP = 3, /**< AGP/GART */
- _DRM_SCATTER_GATHER = 4, /**< Scatter/gather memory for PCI DMA */
- _DRM_CONSISTENT = 5, /**< Consistent memory for PCI DMA */
-+ _DRM_TTM = 7
- };
-
- /**
-@@ -598,6 +599,400 @@
- uint64_t size;
- };
-
-+#define DRM_FENCE_FLAG_EMIT 0x00000001
-+#define DRM_FENCE_FLAG_SHAREABLE 0x00000002
-+#define DRM_FENCE_FLAG_WAIT_LAZY 0x00000004
-+#define DRM_FENCE_FLAG_WAIT_IGNORE_SIGNALS 0x00000008
-+#define DRM_FENCE_FLAG_NO_USER 0x00000010
-+
-+/* Reserved for driver use */
-+#define DRM_FENCE_MASK_DRIVER 0xFF000000
-+
-+#define DRM_FENCE_TYPE_EXE 0x00000001
-+
-+struct drm_fence_arg {
-+ unsigned int handle;
-+ unsigned int fence_class;
-+ unsigned int type;
-+ unsigned int flags;
-+ unsigned int signaled;
-+ unsigned int error;
-+ unsigned int sequence;
-+ unsigned int pad64;
-+ uint64_t expand_pad[2]; /*Future expansion */
-+};
-+
-+/* Buffer permissions, referring to how the GPU uses the buffers.
-+ * these translate to fence types used for the buffers.
-+ * Typically a texture buffer is read, A destination buffer is write and
-+ * a command (batch-) buffer is exe. Can be or-ed together.
-+ */
-+
-+#define DRM_BO_FLAG_READ (1ULL << 0)
-+#define DRM_BO_FLAG_WRITE (1ULL << 1)
-+#define DRM_BO_FLAG_EXE (1ULL << 2)
-+
-+/*
-+ * Status flags. Can be read to determine the actual state of a buffer.
-+ * Can also be set in the buffer mask before validation.
-+ */
-+
-+/*
-+ * Mask: Never evict this buffer. Not even with force. This type of buffer is only
-+ * available to root and must be manually removed before buffer manager shutdown
-+ * or lock.
-+ * Flags: Acknowledge
-+ */
-+#define DRM_BO_FLAG_NO_EVICT (1ULL << 4)
-+
-+/*
-+ * Mask: Require that the buffer is placed in mappable memory when validated.
-+ * If not set the buffer may or may not be in mappable memory when validated.
-+ * Flags: If set, the buffer is in mappable memory.
-+ */
-+#define DRM_BO_FLAG_MAPPABLE (1ULL << 5)
-+
-+/* Mask: The buffer should be shareable with other processes.
-+ * Flags: The buffer is shareable with other processes.
-+ */
-+#define DRM_BO_FLAG_SHAREABLE (1ULL << 6)
-+
-+/* Mask: If set, place the buffer in cache-coherent memory if available.
-+ * If clear, never place the buffer in cache coherent memory if validated.
-+ * Flags: The buffer is currently in cache-coherent memory.
-+ */
-+#define DRM_BO_FLAG_CACHED (1ULL << 7)
-+
-+/* Mask: Make sure that every time this buffer is validated,
-+ * it ends up on the same location provided that the memory mask is the same.
-+ * The buffer will also not be evicted when claiming space for
-+ * other buffers. Basically a pinned buffer but it may be thrown out as
-+ * part of buffer manager shutdown or locking.
-+ * Flags: Acknowledge.
-+ */
-+#define DRM_BO_FLAG_NO_MOVE (1ULL << 8)
-+
-+/* Mask: Make sure the buffer is in cached memory when mapped
-+ * Flags: Acknowledge.
-+ * Buffers allocated with this flag should not be used for suballocators
-+ * This type may have issues on CPUs with over-aggressive caching
-+ * http://marc.info/?l=linux-kernel&m=102376926732464&w=2
-+ */
-+#define DRM_BO_FLAG_CACHED_MAPPED (1ULL << 19)
-+
-+
-+/* Mask: Force DRM_BO_FLAG_CACHED flag strictly also if it is set.
-+ * Flags: Acknowledge.
-+ */
-+#define DRM_BO_FLAG_FORCE_CACHING (1ULL << 13)
-+
-+/*
-+ * Mask: Force DRM_BO_FLAG_MAPPABLE flag strictly also if it is clear.
-+ * Flags: Acknowledge.
-+ */
-+#define DRM_BO_FLAG_FORCE_MAPPABLE (1ULL << 14)
-+#define DRM_BO_FLAG_TILE (1ULL << 15)
-+
-+/*
-+ * Memory type flags that can be or'ed together in the mask, but only
-+ * one appears in flags.
-+ */
-+
-+/* System memory */
-+#define DRM_BO_FLAG_MEM_LOCAL (1ULL << 24)
-+/* Translation table memory */
-+#define DRM_BO_FLAG_MEM_TT (1ULL << 25)
-+/* Vram memory */
-+#define DRM_BO_FLAG_MEM_VRAM (1ULL << 26)
-+/* Up to the driver to define. */
-+#define DRM_BO_FLAG_MEM_PRIV0 (1ULL << 27)
-+#define DRM_BO_FLAG_MEM_PRIV1 (1ULL << 28)
-+#define DRM_BO_FLAG_MEM_PRIV2 (1ULL << 29)
-+#define DRM_BO_FLAG_MEM_PRIV3 (1ULL << 30)
-+#define DRM_BO_FLAG_MEM_PRIV4 (1ULL << 31)
-+/* We can add more of these now with a 64-bit flag type */
-+
-+/* Memory flag mask */
-+#define DRM_BO_MASK_MEM 0x00000000FF000000ULL
-+#define DRM_BO_MASK_MEMTYPE 0x00000000FF0800A0ULL
-+
-+/* Driver-private flags */
-+#define DRM_BO_MASK_DRIVER 0xFFFF000000000000ULL
-+
-+/* Don't block on validate and map */
-+#define DRM_BO_HINT_DONT_BLOCK 0x00000002
-+/* Don't place this buffer on the unfenced list.*/
-+#define DRM_BO_HINT_DONT_FENCE 0x00000004
-+#define DRM_BO_HINT_WAIT_LAZY 0x00000008
-+#define DRM_BO_HINT_PRESUMED_OFFSET 0x00000010
-+
-+#define DRM_BO_INIT_MAGIC 0xfe769812
-+#define DRM_BO_INIT_MAJOR 1
-+#define DRM_BO_INIT_MINOR 0
-+#define DRM_BO_INIT_PATCH 0
-+
-+
-+struct drm_bo_info_req {
-+ uint64_t mask;
-+ uint64_t flags;
-+ unsigned int handle;
-+ unsigned int hint;
-+ unsigned int fence_class;
-+ unsigned int desired_tile_stride;
-+ unsigned int tile_info;
-+ unsigned int pad64;
-+ uint64_t presumed_offset;
-+};
-+
-+struct drm_bo_create_req {
-+ uint64_t mask;
-+ uint64_t size;
-+ uint64_t buffer_start;
-+ unsigned int hint;
-+ unsigned int page_alignment;
-+};
-+
-+
-+/*
-+ * Reply flags
-+ */
-+
-+#define DRM_BO_REP_BUSY 0x00000001
-+
-+struct drm_bo_info_rep {
-+ uint64_t flags;
-+ uint64_t mask;
-+ uint64_t size;
-+ uint64_t offset;
-+ uint64_t arg_handle;
-+ uint64_t buffer_start;
-+ unsigned int handle;
-+ unsigned int fence_flags;
-+ unsigned int rep_flags;
-+ unsigned int page_alignment;
-+ unsigned int desired_tile_stride;
-+ unsigned int hw_tile_stride;
-+ unsigned int tile_info;
-+ unsigned int pad64;
-+ uint64_t expand_pad[4]; /*Future expansion */
-+};
-+
-+struct drm_bo_arg_rep {
-+ struct drm_bo_info_rep bo_info;
-+ int ret;
-+ unsigned int pad64;
-+};
-+
-+struct drm_bo_create_arg {
-+ union {
-+ struct drm_bo_create_req req;
-+ struct drm_bo_info_rep rep;
-+ } d;
-+};
-+
-+struct drm_bo_handle_arg {
-+ unsigned int handle;
-+};
-+
-+struct drm_bo_reference_info_arg {
-+ union {
-+ struct drm_bo_handle_arg req;
-+ struct drm_bo_info_rep rep;
-+ } d;
-+};
-+
-+struct drm_bo_map_wait_idle_arg {
-+ union {
-+ struct drm_bo_info_req req;
-+ struct drm_bo_info_rep rep;
-+ } d;
-+};
-+
-+struct drm_bo_op_req {
-+ enum {
-+ drm_bo_validate,
-+ drm_bo_fence,
-+ drm_bo_ref_fence,
-+ } op;
-+ unsigned int arg_handle;
-+ struct drm_bo_info_req bo_req;
-+};
-+
-+
-+struct drm_bo_op_arg {
-+ uint64_t next;
-+ union {
-+ struct drm_bo_op_req req;
-+ struct drm_bo_arg_rep rep;
-+ } d;
-+ int handled;
-+ unsigned int pad64;
-+};
-+
-+
-+#define DRM_BO_MEM_LOCAL 0
-+#define DRM_BO_MEM_TT 1
-+#define DRM_BO_MEM_VRAM 2
-+#define DRM_BO_MEM_PRIV0 3
-+#define DRM_BO_MEM_PRIV1 4
-+#define DRM_BO_MEM_PRIV2 5
-+#define DRM_BO_MEM_PRIV3 6
-+#define DRM_BO_MEM_PRIV4 7
-+
-+#define DRM_BO_MEM_TYPES 8 /* For now. */
-+
-+#define DRM_BO_LOCK_UNLOCK_BM (1 << 0)
-+#define DRM_BO_LOCK_IGNORE_NO_EVICT (1 << 1)
-+
-+struct drm_bo_version_arg {
-+ uint32_t major;
-+ uint32_t minor;
-+ uint32_t patchlevel;
-+};
-+
-+struct drm_mm_type_arg {
-+ unsigned int mem_type;
-+ unsigned int lock_flags;
-+};
-+
-+struct drm_mm_init_arg {
-+ unsigned int magic;
-+ unsigned int major;
-+ unsigned int minor;
-+ unsigned int mem_type;
-+ uint64_t p_offset;
-+ uint64_t p_size;
-+};
-+
-+/*
-+ * Drm mode setting
-+ */
-+#define DRM_DISPLAY_INFO_LEN 32
-+#define DRM_OUTPUT_NAME_LEN 32
-+#define DRM_DISPLAY_MODE_LEN 32
-+#define DRM_PROP_NAME_LEN 32
-+
-+#define DRM_MODE_TYPE_BUILTIN (1<<0)
-+#define DRM_MODE_TYPE_CLOCK_C ((1<<1) | DRM_MODE_TYPE_BUILTIN)
-+#define DRM_MODE_TYPE_CRTC_C ((1<<2) | DRM_MODE_TYPE_BUILTIN)
-+#define DRM_MODE_TYPE_PREFERRED (1<<3)
-+#define DRM_MODE_TYPE_DEFAULT (1<<4)
-+#define DRM_MODE_TYPE_USERDEF (1<<5)
-+#define DRM_MODE_TYPE_DRIVER (1<<6)
-+#define DRM_MODE_TYPE_USERPREF (1<<7)
-+
-+struct drm_mode_modeinfo {
-+
-+ unsigned int id;
-+
-+ unsigned int clock;
-+ unsigned short hdisplay, hsync_start, hsync_end, htotal, hskew;
-+ unsigned short vdisplay, vsync_start, vsync_end, vtotal, vscan;
-+
-+ unsigned int vrefresh; /* vertical refresh * 1000 */
-+
-+ unsigned int flags;
-+ unsigned int type;
-+ char name[DRM_DISPLAY_MODE_LEN];
-+};
-+
-+struct drm_mode_card_res {
-+
-+ int count_fbs;
-+ unsigned int __user *fb_id;
-+
-+ int count_crtcs;
-+ unsigned int __user *crtc_id;
-+
-+ int count_outputs;
-+ unsigned int __user *output_id;
-+
-+ int count_modes;
-+ struct drm_mode_modeinfo __user *modes;
-+
-+};
-+
-+struct drm_mode_crtc {
-+ unsigned int crtc_id; /**< Id */
-+ unsigned int fb_id; /**< Id of framebuffer */
-+
-+ int x, y; /**< Position on the frameuffer */
-+
-+ unsigned int mode; /**< Current mode used */
-+
-+ int count_outputs;
-+ unsigned int outputs; /**< Outputs that are connected */
-+
-+ int count_possibles;
-+ unsigned int possibles; /**< Outputs that can be connected */
-+
-+ unsigned int __user *set_outputs; /**< Outputs to be connected */
-+
-+ int gamma_size;
-+
-+};
-+
-+struct drm_mode_get_output {
-+
-+ unsigned int output; /**< Id */
-+ unsigned int crtc; /**< Id of crtc */
-+ unsigned char name[DRM_OUTPUT_NAME_LEN];
-+
-+ unsigned int connection;
-+ unsigned int mm_width, mm_height; /**< HxW in millimeters */
-+ unsigned int subpixel;
-+
-+ int count_crtcs;
-+ unsigned int crtcs; /**< possible crtc to connect to */
-+
-+ int count_clones;
-+ unsigned int clones; /**< list of clones */
-+
-+ int count_modes;
-+ unsigned int __user *modes; /**< list of modes it supports */
-+
-+ int count_props;
-+ unsigned int __user *props;
-+ unsigned int __user *prop_values;
-+};
-+
-+#define DRM_MODE_PROP_PENDING (1<<0)
-+#define DRM_MODE_PROP_RANGE (1<<1)
-+#define DRM_MODE_PROP_IMMUTABLE (1<<2)
-+#define DRM_MODE_PROP_ENUM (1<<3) // enumerated type with text strings
-+
-+struct drm_mode_property_enum {
-+ uint32_t value;
-+ unsigned char name[DRM_PROP_NAME_LEN];
-+};
-+
-+struct drm_mode_get_property {
-+
-+ unsigned int prop_id;
-+ unsigned int flags;
-+ unsigned char name[DRM_PROP_NAME_LEN];
-+
-+ int count_values;
-+ uint32_t __user *values;
-+
-+ int count_enums;
-+ struct drm_mode_property_enum *enums;
-+};
-+
-+struct drm_mode_fb_cmd {
-+ unsigned int buffer_id;
-+ unsigned int width, height;
-+ unsigned int pitch;
-+ unsigned int bpp;
-+ unsigned int handle;
-+ unsigned int depth;
-+};
-+
-+struct drm_mode_mode_cmd {
-+ unsigned int output_id;
-+ unsigned int mode_id;
-+};
-+
- #define DRM_IOCTL_BASE 'd'
- #define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr)
- #define DRM_IOR(nr,type) _IOR(DRM_IOCTL_BASE,nr,type)
-@@ -664,6 +1059,47 @@
-
- #define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, struct drm_update_draw)
-
-+#define DRM_IOCTL_MM_INIT DRM_IOWR(0xc0, struct drm_mm_init_arg)
-+#define DRM_IOCTL_MM_TAKEDOWN DRM_IOWR(0xc1, struct drm_mm_type_arg)
-+#define DRM_IOCTL_MM_LOCK DRM_IOWR(0xc2, struct drm_mm_type_arg)
-+#define DRM_IOCTL_MM_UNLOCK DRM_IOWR(0xc3, struct drm_mm_type_arg)
-+
-+#define DRM_IOCTL_FENCE_CREATE DRM_IOWR(0xc4, struct drm_fence_arg)
-+#define DRM_IOCTL_FENCE_REFERENCE DRM_IOWR(0xc6, struct drm_fence_arg)
-+#define DRM_IOCTL_FENCE_UNREFERENCE DRM_IOWR(0xc7, struct drm_fence_arg)
-+#define DRM_IOCTL_FENCE_SIGNALED DRM_IOWR(0xc8, struct drm_fence_arg)
-+#define DRM_IOCTL_FENCE_FLUSH DRM_IOWR(0xc9, struct drm_fence_arg)
-+#define DRM_IOCTL_FENCE_WAIT DRM_IOWR(0xca, struct drm_fence_arg)
-+#define DRM_IOCTL_FENCE_EMIT DRM_IOWR(0xcb, struct drm_fence_arg)
-+#define DRM_IOCTL_FENCE_BUFFERS DRM_IOWR(0xcc, struct drm_fence_arg)
-+
-+#define DRM_IOCTL_BO_CREATE DRM_IOWR(0xcd, struct drm_bo_create_arg)
-+#define DRM_IOCTL_BO_MAP DRM_IOWR(0xcf, struct drm_bo_map_wait_idle_arg)
-+#define DRM_IOCTL_BO_UNMAP DRM_IOWR(0xd0, struct drm_bo_handle_arg)
-+#define DRM_IOCTL_BO_REFERENCE DRM_IOWR(0xd1, struct drm_bo_reference_info_arg)
-+#define DRM_IOCTL_BO_UNREFERENCE DRM_IOWR(0xd2, struct drm_bo_handle_arg)
-+#define DRM_IOCTL_BO_SETSTATUS DRM_IOWR(0xd3, struct drm_bo_map_wait_idle_arg)
-+#define DRM_IOCTL_BO_INFO DRM_IOWR(0xd4, struct drm_bo_reference_info_arg)
-+#define DRM_IOCTL_BO_WAIT_IDLE DRM_IOWR(0xd5, struct drm_bo_map_wait_idle_arg)
-+#define DRM_IOCTL_BO_VERSION DRM_IOR(0xd6, struct drm_bo_version_arg)
-+
-+
-+#define DRM_IOCTL_MODE_GETRESOURCES DRM_IOWR(0xA0, struct drm_mode_card_res)
-+#define DRM_IOCTL_MODE_GETCRTC DRM_IOWR(0xA1, struct drm_mode_crtc)
-+#define DRM_IOCTL_MODE_GETOUTPUT DRM_IOWR(0xA2, struct drm_mode_get_output)
-+#define DRM_IOCTL_MODE_SETCRTC DRM_IOWR(0xA3, struct drm_mode_crtc)
-+#define DRM_IOCTL_MODE_ADDFB DRM_IOWR(0xA4, struct drm_mode_fb_cmd)
-+#define DRM_IOCTL_MODE_RMFB DRM_IOWR(0xA5, unsigned int)
-+#define DRM_IOCTL_MODE_GETFB DRM_IOWR(0xA6, struct drm_mode_fb_cmd)
-+
-+#define DRM_IOCTL_MODE_ADDMODE DRM_IOWR(0xA7, struct drm_mode_modeinfo)
-+#define DRM_IOCTL_MODE_RMMODE DRM_IOWR(0xA8, unsigned int)
-+#define DRM_IOCTL_MODE_ATTACHMODE DRM_IOWR(0xA9, struct drm_mode_mode_cmd)
-+#define DRM_IOCTL_MODE_DETACHMODE DRM_IOWR(0xAA, struct drm_mode_mode_cmd)
-+
-+#define DRM_IOCTL_MODE_GETPROPERTY DRM_IOWR(0xAB, struct drm_mode_get_property)
-+/*@}*/
-+
- /**
- * Device specific ioctls should only be in their respective headers
- * The device specific ioctl range is from 0x40 to 0x99.
-@@ -718,6 +1154,11 @@
- typedef struct drm_agp_info drm_agp_info_t;
- typedef struct drm_scatter_gather drm_scatter_gather_t;
- typedef struct drm_set_version drm_set_version_t;
-+
-+typedef struct drm_fence_arg drm_fence_arg_t;
-+typedef struct drm_mm_type_arg drm_mm_type_arg_t;
-+typedef struct drm_mm_init_arg drm_mm_init_arg_t;
-+typedef enum drm_bo_type drm_bo_type_t;
- #endif
-
- #endif
-Index: linux-2.6.27/include/drm/drmP.h
-===================================================================
---- linux-2.6.27.orig/include/drm/drmP.h 2009-02-05 13:29:30.000000000 +0000
-+++ linux-2.6.27/include/drm/drmP.h 2009-02-05 13:29:33.000000000 +0000
-@@ -57,6 +57,7 @@
- #include <linux/dma-mapping.h>
- #include <linux/mm.h>
- #include <linux/cdev.h>
-+#include <linux/i2c.h>
- #include <linux/mutex.h>
- #if defined(__alpha__) || defined(__powerpc__)
- #include <asm/pgtable.h> /* For pte_wrprotect */
-@@ -146,9 +147,24 @@
- #define DRM_MEM_CTXLIST 21
- #define DRM_MEM_MM 22
- #define DRM_MEM_HASHTAB 23
-+#define DRM_MEM_OBJECTS 24
-+#define DRM_MEM_FENCE 25
-+#define DRM_MEM_TTM 26
-+#define DRM_MEM_BUFOBJ 27
-
- #define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8)
- #define DRM_MAP_HASH_OFFSET 0x10000000
-+#define DRM_MAP_HASH_ORDER 12
-+#define DRM_OBJECT_HASH_ORDER 12
-+#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
-+#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
-+/*
-+ * This should be small enough to allow the use of kmalloc for hash tables
-+ * instead of vmalloc.
-+ */
-+
-+#define DRM_FILE_HASH_ORDER 8
-+#define DRM_MM_INIT_MAX_PAGES 256
-
- /*@}*/
-
-@@ -376,6 +392,14 @@
- struct drm_freelist freelist;
- };
-
-+
-+enum drm_ref_type {
-+ _DRM_REF_USE = 0,
-+ _DRM_REF_TYPE1,
-+ _DRM_NO_REF_TYPES
-+};
-+
-+
- /** File private data */
- struct drm_file {
- int authenticated;
-@@ -388,12 +412,26 @@
- struct drm_minor *minor;
- int remove_auth_on_close;
- unsigned long lock_count;
-+
- /** Mapping of mm object handles to object pointers. */
- struct idr object_idr;
- /** Lock for synchronization of access to object_idr. */
- spinlock_t table_lock;
-+
-+ /*
-+ * The user object hash table is global and resides in the
-+ * drm_device structure. We protect the lists and hash tables with the
-+ * device struct_mutex. A bit coarse-grained but probably the best
-+ * option.
-+ */
-+
-+ struct list_head refd_objects;
-+
-+ struct drm_open_hash refd_object_hash[_DRM_NO_REF_TYPES];
- struct file *filp;
- void *driver_priv;
-+
-+ struct list_head fbs;
- };
-
- /** Wait queue */
-@@ -523,6 +561,7 @@
- struct drm_hash_item hash;
- struct drm_map *map; /**< mapping */
- uint64_t user_token;
-+ struct drm_mm_node *file_offset_node;
- };
-
- typedef struct drm_map drm_local_map_t;
-@@ -612,6 +651,11 @@
- void *driver_private;
- };
-
-+
-+#include "drm_objects.h"
-+#include "drm_edid.h"
-+#include "drm_crtc.h"
-+
- /**
- * DRM driver structure. This structure represent the common code for
- * a family of cards. There will one drm_device for each card present
-@@ -637,50 +681,8 @@
- void (*kernel_context_switch_unlock) (struct drm_device *dev);
- int (*dri_library_name) (struct drm_device *dev, char *buf);
-
-- /**
-- * get_vblank_counter - get raw hardware vblank counter
-- * @dev: DRM device
-- * @crtc: counter to fetch
-- *
-- * Driver callback for fetching a raw hardware vblank counter
-- * for @crtc. If a device doesn't have a hardware counter, the
-- * driver can simply return the value of drm_vblank_count and
-- * make the enable_vblank() and disable_vblank() hooks into no-ops,
-- * leaving interrupts enabled at all times.
-- *
-- * Wraparound handling and loss of events due to modesetting is dealt
-- * with in the DRM core code.
-- *
-- * RETURNS
-- * Raw vblank counter value.
-- */
-- u32 (*get_vblank_counter) (struct drm_device *dev, int crtc);
--
-- /**
-- * enable_vblank - enable vblank interrupt events
-- * @dev: DRM device
-- * @crtc: which irq to enable
-- *
-- * Enable vblank interrupts for @crtc. If the device doesn't have
-- * a hardware vblank counter, this routine should be a no-op, since
-- * interrupts will have to stay on to keep the count accurate.
-- *
-- * RETURNS
-- * Zero on success, appropriate errno if the given @crtc's vblank
-- * interrupt cannot be enabled.
-- */
-- int (*enable_vblank) (struct drm_device *dev, int crtc);
--
-- /**
-- * disable_vblank - disable vblank interrupt events
-- * @dev: DRM device
-- * @crtc: which irq to enable
-- *
-- * Disable vblank interrupts for @crtc. If the device doesn't have
-- * a hardware vblank counter, this routine should be a no-op, since
-- * interrupts will have to stay on to keep the count accurate.
-- */
-- void (*disable_vblank) (struct drm_device *dev, int crtc);
-+ int (*vblank_wait) (struct drm_device *dev, unsigned int *sequence);
-+ int (*vblank_wait2) (struct drm_device *dev, unsigned int *sequence);
-
- /**
- * Called by \c drm_device_is_agp. Typically used to determine if a
-@@ -715,6 +717,13 @@
- int (*proc_init)(struct drm_minor *minor);
- void (*proc_cleanup)(struct drm_minor *minor);
-
-+ /* FB routines, if present */
-+ int (*fb_probe)(struct drm_device *dev, struct drm_crtc *crtc);
-+ int (*fb_remove)(struct drm_device *dev, struct drm_crtc *crtc);
-+
-+ struct drm_fence_driver *fence_driver;
-+ struct drm_bo_driver *bo_driver;
-+
- /**
- * Driver-specific constructor for drm_gem_objects, to set up
- * obj->driver_private.
-@@ -800,6 +809,10 @@
- struct list_head maplist; /**< Linked list of regions */
- int map_count; /**< Number of mappable regions */
- struct drm_open_hash map_hash; /**< User token hash table for maps */
-+ struct drm_mm offset_manager; /**< User token manager */
-+ struct drm_open_hash object_hash; /**< User token hash table for objects */
-+ struct address_space *dev_mapping; /**< For unmap_mapping_range() */
-+ struct page *ttm_dummy_page;
-
- /** \name Context handle management */
- /*@{ */
-@@ -848,20 +861,13 @@
- */
- int vblank_disable_allowed;
-
-- wait_queue_head_t *vbl_queue; /**< VBLANK wait queue */
-- atomic_t *_vblank_count; /**< number of VBLANK interrupts (driver must alloc the right number of counters) */
-+ wait_queue_head_t vbl_queue; /**< VBLANK wait queue */
-+ atomic_t vbl_received;
-+ atomic_t vbl_received2; /**< number of secondary VBLANK interrupts */
- spinlock_t vbl_lock;
-- struct list_head *vbl_sigs; /**< signal list to send on VBLANK */
-- atomic_t vbl_signal_pending; /* number of signals pending on all crtcs*/
-- atomic_t *vblank_refcount; /* number of users of vblank interruptsper crtc */
-- u32 *last_vblank; /* protected by dev->vbl_lock, used */
-- /* for wraparound handling */
-- int *vblank_enabled; /* so we don't call enable more than
-- once per disable */
-- int *vblank_inmodeset; /* Display driver is setting mode */
-- struct timer_list vblank_disable_timer;
--
-- u32 max_vblank_count; /**< size of vblank counter register */
-+ struct list_head vbl_sigs; /**< signal list to send on VBLANK */
-+ struct list_head vbl_sigs2; /**< signals to send on secondary VBLANK */
-+ unsigned int vbl_pending;
- spinlock_t tasklet_lock; /**< For drm_locked_tasklet */
- void (*locked_tasklet_func)(struct drm_device *dev);
-
-@@ -892,12 +898,18 @@
- unsigned int agp_buffer_token;
- struct drm_minor *primary; /**< render type primary screen head */
-
-+ struct drm_fence_manager fm;
-+ struct drm_buffer_manager bm;
-+
- /** \name Drawable information */
- /*@{ */
- spinlock_t drw_lock;
- struct idr drw_idr;
- /*@} */
-
-+ /* DRM mode setting */
-+ struct drm_mode_config mode_config;
-+
- /** \name GEM information */
- /*@{ */
- spinlock_t object_name_lock;
-@@ -915,6 +927,27 @@
-
- };
-
-+#if __OS_HAS_AGP
-+struct drm_agp_ttm_backend {
-+ struct drm_ttm_backend backend;
-+ DRM_AGP_MEM *mem;
-+ struct agp_bridge_data *bridge;
-+ int populated;
-+};
-+#endif
-+
-+typedef struct ati_pcigart_ttm_backend {
-+ struct drm_ttm_backend backend;
-+ int populated;
-+ void (*gart_flush_fn)(struct drm_device *dev);
-+ struct drm_ati_pcigart_info *gart_info;
-+ unsigned long offset;
-+ struct page **pages;
-+ int num_pages;
-+ int bound;
-+ struct drm_device *dev;
-+} ati_pcigart_ttm_backend_t;
-+
- static __inline__ int drm_core_check_feature(struct drm_device *dev,
- int feature)
- {
-@@ -979,8 +1012,12 @@
- /*@{*/
-
- /* Driver support (drm_drv.h) */
--extern int drm_init(struct drm_driver *driver);
-+extern int drm_init(struct drm_driver *driver,
-+ struct pci_device_id *pciidlist);
- extern void drm_exit(struct drm_driver *driver);
-+extern void drm_cleanup_pci(struct pci_dev *pdev);
-+extern void drm_vbl_send_signals(struct drm_device *dev);
-+extern struct drm_ttm_backend *drm_agp_init_ttm(struct drm_device *dev);
- extern int drm_ioctl(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg);
- extern long drm_compat_ioctl(struct file *filp,
-Index: linux-2.6.27/include/drm/drm_pciids.h
-===================================================================
---- linux-2.6.27.orig/include/drm/drm_pciids.h 2008-10-09 23:13:53.000000000 +0100
-+++ linux-2.6.27/include/drm/drm_pciids.h 2009-02-05 13:29:33.000000000 +0000
-@@ -413,3 +413,9 @@
- {0x8086, 0x2e12, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x8086, 0x2e22, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0, 0, 0}
-+
-+#define psb_PCI_IDS \
-+ {0x8086, 0x8108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PSB_8108}, \
-+ {0x8086, 0x8109, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PSB_8109}, \
-+ {0, 0, 0}
-+
-Index: linux-2.6.27/drivers/gpu/drm/Makefile
-===================================================================
---- linux-2.6.27.orig/drivers/gpu/drm/Makefile 2009-02-05 13:29:29.000000000 +0000
-+++ linux-2.6.27/drivers/gpu/drm/Makefile 2009-02-05 13:29:33.000000000 +0000
-@@ -9,11 +9,14 @@
- drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \
- drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
- drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \
-- drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o
-+ drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o \
-+ drm_fence.o drm_object.o drm_crtc.o drm_ttm.o drm_bo.o \
-+ drm_bo_lock.o drm_bo_move.o drm_edid.o drm_modes.o drm_regman.o
-
- drm-$(CONFIG_COMPAT) += drm_ioc32.o
-
- obj-$(CONFIG_DRM) += drm.o
-+obj-$(CONFIG_DRM_PSB) += psb/
- obj-$(CONFIG_DRM_TDFX) += tdfx/
- obj-$(CONFIG_DRM_R128) += r128/
- obj-$(CONFIG_DRM_RADEON)+= radeon/
-@@ -24,4 +27,3 @@
- obj-$(CONFIG_DRM_SIS) += sis/
- obj-$(CONFIG_DRM_SAVAGE)+= savage/
- obj-$(CONFIG_DRM_VIA) +=via/
--
-Index: linux-2.6.27/drivers/gpu/drm/drm_agpsupport.c
-===================================================================
---- linux-2.6.27.orig/drivers/gpu/drm/drm_agpsupport.c 2009-02-05 13:29:29.000000000 +0000
-+++ linux-2.6.27/drivers/gpu/drm/drm_agpsupport.c 2009-02-05 13:29:33.000000000 +0000
-@@ -453,47 +453,158 @@
- return agp_unbind_memory(handle);
- }
-
--/**
-- * Binds a collection of pages into AGP memory at the given offset, returning
-- * the AGP memory structure containing them.
-- *
-- * No reference is held on the pages during this time -- it is up to the
-- * caller to handle that.
-+
-+
-+/*
-+ * AGP ttm backend interface.
- */
--DRM_AGP_MEM *
--drm_agp_bind_pages(struct drm_device *dev,
-- struct page **pages,
-- unsigned long num_pages,
-- uint32_t gtt_offset)
-+
-+#ifndef AGP_USER_TYPES
-+#define AGP_USER_TYPES (1 << 16)
-+#define AGP_USER_MEMORY (AGP_USER_TYPES)
-+#define AGP_USER_CACHED_MEMORY (AGP_USER_TYPES + 1)
-+#endif
-+#define AGP_REQUIRED_MAJOR 0
-+#define AGP_REQUIRED_MINOR 102
-+
-+static int drm_agp_needs_unbind_cache_adjust(struct drm_ttm_backend *backend)
- {
-+ return ((backend->flags & DRM_BE_FLAG_BOUND_CACHED) ? 0 : 1);
-+}
-+
-+
-+static int drm_agp_populate(struct drm_ttm_backend *backend,
-+ unsigned long num_pages, struct page **pages)
-+{
-+ struct drm_agp_ttm_backend *agp_be =
-+ container_of(backend, struct drm_agp_ttm_backend, backend);
-+ struct page **cur_page, **last_page = pages + num_pages;
- DRM_AGP_MEM *mem;
-- int ret, i;
-
-- DRM_DEBUG("\n");
-+ DRM_DEBUG("drm_agp_populate_ttm\n");
-+ mem = drm_agp_allocate_memory(agp_be->bridge, num_pages, AGP_USER_MEMORY);
-+ if (!mem)
-+ return -ENOMEM;
-+
-+ DRM_DEBUG("Current page count is %ld\n", (long) mem->page_count);
-+ mem->page_count = 0;
-+ for (cur_page = pages; cur_page < last_page; ++cur_page)
-+ mem->memory[mem->page_count++] = phys_to_gart(page_to_phys(*cur_page));
-+ agp_be->mem = mem;
-+ return 0;
-+}
-+
-+static int drm_agp_bind_ttm(struct drm_ttm_backend *backend,
-+ struct drm_bo_mem_reg *bo_mem)
-+{
-+ struct drm_agp_ttm_backend *agp_be =
-+ container_of(backend, struct drm_agp_ttm_backend, backend);
-+ DRM_AGP_MEM *mem = agp_be->mem;
-+ int ret;
-+ int snooped = (bo_mem->flags & DRM_BO_FLAG_CACHED) && !(bo_mem->flags & DRM_BO_FLAG_CACHED_MAPPED);
-+
-+ DRM_DEBUG("drm_agp_bind_ttm\n");
-+ mem->is_flushed = 1;
-+ mem->type = AGP_USER_MEMORY;
-+ /* CACHED MAPPED implies not snooped memory */
-+ if (snooped)
-+ mem->type = AGP_USER_CACHED_MEMORY;
-+
-+ ret = drm_agp_bind_memory(mem, bo_mem->mm_node->start);
-+ if (ret)
-+ DRM_ERROR("AGP Bind memory failed\n");
-+
-+ DRM_FLAG_MASKED(backend->flags, (bo_mem->flags & DRM_BO_FLAG_CACHED) ?
-+ DRM_BE_FLAG_BOUND_CACHED : 0,
-+ DRM_BE_FLAG_BOUND_CACHED);
-+ return ret;
-+}
-+
-+static int drm_agp_unbind_ttm(struct drm_ttm_backend *backend)
-+{
-+ struct drm_agp_ttm_backend *agp_be =
-+ container_of(backend, struct drm_agp_ttm_backend, backend);
-+
-+ DRM_DEBUG("drm_agp_unbind_ttm\n");
-+ if (agp_be->mem->is_bound)
-+ return drm_agp_unbind_memory(agp_be->mem);
-+ else
-+ return 0;
-+}
-+
-+static void drm_agp_clear_ttm(struct drm_ttm_backend *backend)
-+{
-+ struct drm_agp_ttm_backend *agp_be =
-+ container_of(backend, struct drm_agp_ttm_backend, backend);
-+ DRM_AGP_MEM *mem = agp_be->mem;
-+
-+ DRM_DEBUG("drm_agp_clear_ttm\n");
-+ if (mem) {
-+ backend->func->unbind(backend);
-+ agp_free_memory(mem);
-+ }
-+ agp_be->mem = NULL;
-+}
-+
-+static void drm_agp_destroy_ttm(struct drm_ttm_backend *backend)
-+{
-+ struct drm_agp_ttm_backend *agp_be;
-+
-+ if (backend) {
-+ DRM_DEBUG("drm_agp_destroy_ttm\n");
-+ agp_be = container_of(backend, struct drm_agp_ttm_backend, backend);
-+ if (agp_be && agp_be->mem)
-+ backend->func->clear(backend);
-+ }
-+}
-+
-+static struct drm_ttm_backend_func agp_ttm_backend = {
-+ .needs_ub_cache_adjust = drm_agp_needs_unbind_cache_adjust,
-+ .populate = drm_agp_populate,
-+ .clear = drm_agp_clear_ttm,
-+ .bind = drm_agp_bind_ttm,
-+ .unbind = drm_agp_unbind_ttm,
-+ .destroy = drm_agp_destroy_ttm,
-+};
-
-- mem = drm_agp_allocate_memory(dev->agp->bridge, num_pages,
-- AGP_USER_MEMORY);
-- if (mem == NULL) {
-- DRM_ERROR("Failed to allocate memory for %ld pages\n",
-- num_pages);
-+struct drm_ttm_backend *drm_agp_init_ttm(struct drm_device *dev)
-+{
-+
-+ struct drm_agp_ttm_backend *agp_be;
-+ struct agp_kern_info *info;
-+
-+ if (!dev->agp) {
-+ DRM_ERROR("AGP is not initialized.\n");
- return NULL;
- }
-+ info = &dev->agp->agp_info;
-
-- for (i = 0; i < num_pages; i++)
-- mem->memory[i] = phys_to_gart(page_to_phys(pages[i]));
-- mem->page_count = num_pages;
--
-- mem->is_flushed = true;
-- ret = drm_agp_bind_memory(mem, gtt_offset / PAGE_SIZE);
-- if (ret != 0) {
-- DRM_ERROR("Failed to bind AGP memory: %d\n", ret);
-- agp_free_memory(mem);
-+ if (info->version.major != AGP_REQUIRED_MAJOR ||
-+ info->version.minor < AGP_REQUIRED_MINOR) {
-+ DRM_ERROR("Wrong agpgart version %d.%d\n"
-+ "\tYou need at least version %d.%d.\n",
-+ info->version.major,
-+ info->version.minor,
-+ AGP_REQUIRED_MAJOR,
-+ AGP_REQUIRED_MINOR);
- return NULL;
- }
-
-- return mem;
-+
-+ agp_be = drm_calloc(1, sizeof(*agp_be), DRM_MEM_TTM);
-+ if (!agp_be)
-+ return NULL;
-+
-+ agp_be->mem = NULL;
-+
-+ agp_be->bridge = dev->agp->bridge;
-+ agp_be->populated = 0;
-+ agp_be->backend.func = &agp_ttm_backend;
-+ agp_be->backend.dev = dev;
-+
-+ return &agp_be->backend;
- }
--EXPORT_SYMBOL(drm_agp_bind_pages);
-+EXPORT_SYMBOL(drm_agp_init_ttm);
-
- void drm_agp_chipset_flush(struct drm_device *dev)
- {
-Index: linux-2.6.27/drivers/gpu/drm/drm_bo.c
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.27/drivers/gpu/drm/drm_bo.c 2009-02-05 13:29:33.000000000 +0000
-@@ -0,0 +1,2660 @@
-+/**************************************************************************
-+ *
-+ * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
-+ * All Rights Reserved.
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a
-+ * copy of this software and associated documentation files (the
-+ * "Software"), to deal in the Software without restriction, including
-+ * without limitation the rights to use, copy, modify, merge, publish,
-+ * distribute, sub license, and/or sell copies of the Software, and to
-+ * permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice (including the
-+ * next paragraph) shall be included in all copies or substantial portions
-+ * of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
-+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
-+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
-+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
-+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
-+ *
-+ **************************************************************************/
-+/*
-+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
-+ */
-+
-+#include "drmP.h"
-+
-+/*
-+ * Locking may look a bit complicated but isn't really:
-+ *
-+ * The buffer usage atomic_t needs to be protected by dev->struct_mutex
-+ * when there is a chance that it can be zero before or after the operation.
-+ *
-+ * dev->struct_mutex also protects all lists and list heads,
-+ * Hash tables and hash heads.
-+ *
-+ * bo->mutex protects the buffer object itself excluding the usage field.
-+ * bo->mutex does also protect the buffer list heads, so to manipulate those,
-+ * we need both the bo->mutex and the dev->struct_mutex.
-+ *
-+ * Locking order is bo->mutex, dev->struct_mutex. Therefore list traversal
-+ * is a bit complicated. When dev->struct_mutex is released to grab bo->mutex,
-+ * the list traversal will, in general, need to be restarted.
-+ *
-+ */
-+
-+static void drm_bo_destroy_locked(struct drm_buffer_object *bo);
-+static int drm_bo_setup_vm_locked(struct drm_buffer_object *bo);
-+static void drm_bo_takedown_vm_locked(struct drm_buffer_object *bo);
-+static void drm_bo_unmap_virtual(struct drm_buffer_object *bo);
-+
-+static inline uint64_t drm_bo_type_flags(unsigned type)
-+{
-+ return (1ULL << (24 + type));
-+}
-+
-+/*
-+ * bo locked. dev->struct_mutex locked.
-+ */
-+
-+void drm_bo_add_to_pinned_lru(struct drm_buffer_object *bo)
-+{
-+ struct drm_mem_type_manager *man;
-+
-+ DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
-+ DRM_ASSERT_LOCKED(&bo->mutex);
-+
-+ man = &bo->dev->bm.man[bo->pinned_mem_type];
-+ list_add_tail(&bo->pinned_lru, &man->pinned);
-+}
-+
-+void drm_bo_add_to_lru(struct drm_buffer_object *bo)
-+{
-+ struct drm_mem_type_manager *man;
-+
-+ DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
-+
-+ if (!(bo->mem.mask & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))
-+ || bo->mem.mem_type != bo->pinned_mem_type) {
-+ man = &bo->dev->bm.man[bo->mem.mem_type];
-+ list_add_tail(&bo->lru, &man->lru);
-+ } else {
-+ INIT_LIST_HEAD(&bo->lru);
-+ }
-+}
-+
-+static int drm_bo_vm_pre_move(struct drm_buffer_object *bo, int old_is_pci)
-+{
-+#ifdef DRM_ODD_MM_COMPAT
-+ int ret;
-+
-+ if (!bo->map_list.map)
-+ return 0;
-+
-+ ret = drm_bo_lock_kmm(bo);
-+ if (ret)
-+ return ret;
-+ drm_bo_unmap_virtual(bo);
-+ if (old_is_pci)
-+ drm_bo_finish_unmap(bo);
-+#else
-+ if (!bo->map_list.map)
-+ return 0;
-+
-+ drm_bo_unmap_virtual(bo);
-+#endif
-+ return 0;
-+}
-+
-+static void drm_bo_vm_post_move(struct drm_buffer_object *bo)
-+{
-+#ifdef DRM_ODD_MM_COMPAT
-+ int ret;
-+
-+ if (!bo->map_list.map)
-+ return;
-+
-+ ret = drm_bo_remap_bound(bo);
-+ if (ret) {
-+ DRM_ERROR("Failed to remap a bound buffer object.\n"
-+ "\tThis might cause a sigbus later.\n");
-+ }
-+ drm_bo_unlock_kmm(bo);
-+#endif
-+}
-+
-+/*
-+ * Call bo->mutex locked.
-+ */
-+
-+static int drm_bo_add_ttm(struct drm_buffer_object *bo)
-+{
-+ struct drm_device *dev = bo->dev;
-+ int ret = 0;
-+
-+ DRM_ASSERT_LOCKED(&bo->mutex);
-+ bo->ttm = NULL;
-+
-+ switch (bo->type) {
-+ case drm_bo_type_dc:
-+ case drm_bo_type_kernel:
-+ bo->ttm = drm_ttm_init(dev, bo->num_pages << PAGE_SHIFT);
-+ if (!bo->ttm)
-+ ret = -ENOMEM;
-+ break;
-+ case drm_bo_type_user:
-+ bo->ttm = drm_ttm_init(dev, bo->num_pages << PAGE_SHIFT);
-+ if (!bo->ttm)
-+ ret = -ENOMEM;
-+
-+ ret = drm_ttm_set_user(bo->ttm, current,
-+ bo->mem.mask & DRM_BO_FLAG_WRITE,
-+ bo->buffer_start,
-+ bo->num_pages,
-+ dev->bm.dummy_read_page);
-+ if (ret)
-+ return ret;
-+
-+ break;
-+ default:
-+ DRM_ERROR("Illegal buffer object type\n");
-+ ret = -EINVAL;
-+ break;
-+ }
-+
-+ return ret;
-+}
-+
-+static int drm_bo_handle_move_mem(struct drm_buffer_object *bo,
-+ struct drm_bo_mem_reg *mem,
-+ int evict, int no_wait)
-+{
-+ struct drm_device *dev = bo->dev;
-+ struct drm_buffer_manager *bm = &dev->bm;
-+ int old_is_pci = drm_mem_reg_is_pci(dev, &bo->mem);
-+ int new_is_pci = drm_mem_reg_is_pci(dev, mem);
-+ struct drm_mem_type_manager *old_man = &bm->man[bo->mem.mem_type];
-+ struct drm_mem_type_manager *new_man = &bm->man[mem->mem_type];
-+ int ret = 0;
-+
-+ if (old_is_pci || new_is_pci ||
-+ ((mem->flags ^ bo->mem.flags) & DRM_BO_FLAG_CACHED))
-+ ret = drm_bo_vm_pre_move(bo, old_is_pci);
-+ if (ret)
-+ return ret;
-+
-+ /*
-+ * Create and bind a ttm if required.
-+ */
-+
-+ if (!(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm == NULL)) {
-+ ret = drm_bo_add_ttm(bo);
-+ if (ret)
-+ goto out_err;
-+
-+ if (mem->mem_type != DRM_BO_MEM_LOCAL) {
-+ ret = drm_bind_ttm(bo->ttm, mem);
-+ if (ret)
-+ goto out_err;
-+ }
-+
-+ if (bo->mem.mem_type == DRM_BO_MEM_LOCAL) {
-+
-+ struct drm_bo_mem_reg *old_mem = &bo->mem;
-+ uint64_t save_flags = old_mem->flags;
-+ uint64_t save_mask = old_mem->mask;
-+
-+ *old_mem = *mem;
-+ mem->mm_node = NULL;
-+ old_mem->mask = save_mask;
-+ DRM_FLAG_MASKED(save_flags, mem->flags,
-+ DRM_BO_MASK_MEMTYPE);
-+ goto moved;
-+ }
-+
-+ }
-+
-+ if (!(old_man->flags & _DRM_FLAG_MEMTYPE_FIXED) &&
-+ !(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED)) {
-+
-+ ret = drm_bo_move_ttm(bo, evict, no_wait, mem);
-+
-+ } else if (dev->driver->bo_driver->move) {
-+ ret = dev->driver->bo_driver->move(bo, evict, no_wait, mem);
-+
-+ } else {
-+
-+ ret = drm_bo_move_memcpy(bo, evict, no_wait, mem);
-+
-+ }
-+
-+ if (ret)
-+ goto out_err;
-+
-+moved:
-+ if (old_is_pci || new_is_pci)
-+ drm_bo_vm_post_move(bo);
-+
-+ if (bo->priv_flags & _DRM_BO_FLAG_EVICTED) {
-+ ret =
-+ dev->driver->bo_driver->invalidate_caches(dev,
-+ bo->mem.flags);
-+ if (ret)
-+ DRM_ERROR("Can not flush read caches\n");
-+ }
-+
-+ DRM_FLAG_MASKED(bo->priv_flags,
-+ (evict) ? _DRM_BO_FLAG_EVICTED : 0,
-+ _DRM_BO_FLAG_EVICTED);
-+
-+ if (bo->mem.mm_node)
-+ bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) +
-+ bm->man[bo->mem.mem_type].gpu_offset;
-+
-+
-+ return 0;
-+
-+out_err:
-+ if (old_is_pci || new_is_pci)
-+ drm_bo_vm_post_move(bo);
-+
-+ new_man = &bm->man[bo->mem.mem_type];
-+ if ((new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && bo->ttm) {
-+ drm_ttm_unbind(bo->ttm);
-+ drm_destroy_ttm(bo->ttm);
-+ bo->ttm = NULL;
-+ }
-+
-+ return ret;
-+}
-+
-+/*
-+ * Call bo->mutex locked.
-+ * Wait until the buffer is idle.
-+ */
-+
-+int drm_bo_wait(struct drm_buffer_object *bo, int lazy, int ignore_signals,
-+ int no_wait)
-+{
-+ int ret;
-+
-+ DRM_ASSERT_LOCKED(&bo->mutex);
-+
-+ if (bo->fence) {
-+ if (drm_fence_object_signaled(bo->fence, bo->fence_type)) {
-+ drm_fence_usage_deref_unlocked(&bo->fence);
-+ return 0;
-+ }
-+ if (no_wait)
-+ return -EBUSY;
-+
-+ ret = drm_fence_object_wait(bo->fence, lazy, ignore_signals,
-+ bo->fence_type);
-+ if (ret)
-+ return ret;
-+
-+ drm_fence_usage_deref_unlocked(&bo->fence);
-+ }
-+ return 0;
-+}
-+EXPORT_SYMBOL(drm_bo_wait);
-+
-+static int drm_bo_expire_fence(struct drm_buffer_object *bo, int allow_errors)
-+{
-+ struct drm_device *dev = bo->dev;
-+ struct drm_buffer_manager *bm = &dev->bm;
-+
-+ if (bo->fence) {
-+ if (bm->nice_mode) {
-+ unsigned long _end = jiffies + 3 * DRM_HZ;
-+ int ret;
-+ do {
-+ ret = drm_bo_wait(bo, 0, 1, 0);
-+ if (ret && allow_errors)
-+ return ret;
-+
-+ } while (ret && !time_after_eq(jiffies, _end));
-+
-+ if (bo->fence) {
-+ bm->nice_mode = 0;
-+ DRM_ERROR("Detected GPU lockup or "
-+ "fence driver was taken down. "
-+ "Evicting buffer.\n");
-+ }
-+ }
-+ if (bo->fence)
-+ drm_fence_usage_deref_unlocked(&bo->fence);
-+ }
-+ return 0;
-+}
-+
-+/*
-+ * Call dev->struct_mutex locked.
-+ * Attempts to remove all private references to a buffer by expiring its
-+ * fence object and removing from lru lists and memory managers.
-+ */
-+
-+static void drm_bo_cleanup_refs(struct drm_buffer_object *bo, int remove_all)
-+{
-+ struct drm_device *dev = bo->dev;
-+ struct drm_buffer_manager *bm = &dev->bm;
-+
-+ DRM_ASSERT_LOCKED(&dev->struct_mutex);
-+
-+ atomic_inc(&bo->usage);
-+ mutex_unlock(&dev->struct_mutex);
-+ mutex_lock(&bo->mutex);
-+
-+ DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
-+
-+ if (bo->fence && drm_fence_object_signaled(bo->fence,
-+ bo->fence_type))
-+ drm_fence_usage_deref_unlocked(&bo->fence);
-+
-+ if (bo->fence && remove_all)
-+ (void)drm_bo_expire_fence(bo, 0);
-+
-+ mutex_lock(&dev->struct_mutex);
-+
-+ if (!atomic_dec_and_test(&bo->usage))
-+ goto out;
-+
-+ if (!bo->fence) {
-+ list_del_init(&bo->lru);
-+ if (bo->mem.mm_node) {
-+ drm_mm_put_block(bo->mem.mm_node);
-+ if (bo->pinned_node == bo->mem.mm_node)
-+ bo->pinned_node = NULL;
-+ bo->mem.mm_node = NULL;
-+ }
-+ list_del_init(&bo->pinned_lru);
-+ if (bo->pinned_node) {
-+ drm_mm_put_block(bo->pinned_node);
-+ bo->pinned_node = NULL;
-+ }
-+ list_del_init(&bo->ddestroy);
-+ mutex_unlock(&bo->mutex);
-+ drm_bo_destroy_locked(bo);
-+ return;
-+ }
-+
-+ if (list_empty(&bo->ddestroy)) {
-+ drm_fence_object_flush(bo->fence, bo->fence_type);
-+ list_add_tail(&bo->ddestroy, &bm->ddestroy);
-+ schedule_delayed_work(&bm->wq,
-+ ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
-+ }
-+
-+out:
-+ mutex_unlock(&bo->mutex);
-+ return;
-+}
-+
-+static void drm_bo_unreserve_size(unsigned long size)
-+{
-+ //drm_free_memctl(size);
-+}
-+
-+/*
-+ * Verify that refcount is 0 and that there are no internal references
-+ * to the buffer object. Then destroy it.
-+ */
-+
-+static void drm_bo_destroy_locked(struct drm_buffer_object *bo)
-+{
-+ struct drm_device *dev = bo->dev;
-+ struct drm_buffer_manager *bm = &dev->bm;
-+ unsigned long reserved_size;
-+
-+ DRM_ASSERT_LOCKED(&dev->struct_mutex);
-+
-+ if (list_empty(&bo->lru) && bo->mem.mm_node == NULL &&
-+ list_empty(&bo->pinned_lru) && bo->pinned_node == NULL &&
-+ list_empty(&bo->ddestroy) && atomic_read(&bo->usage) == 0) {
-+ if (bo->fence != NULL) {
-+ DRM_ERROR("Fence was non-zero.\n");
-+ drm_bo_cleanup_refs(bo, 0);
-+ return;
-+ }
-+
-+#ifdef DRM_ODD_MM_COMPAT
-+ BUG_ON(!list_empty(&bo->vma_list));
-+ BUG_ON(!list_empty(&bo->p_mm_list));
-+#endif
-+
-+ if (bo->ttm) {
-+ drm_ttm_unbind(bo->ttm);
-+ drm_destroy_ttm(bo->ttm);
-+ bo->ttm = NULL;
-+ }
-+
-+ atomic_dec(&bm->count);
-+
-+ reserved_size = bo->reserved_size;
-+
-+ drm_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ);
-+ drm_bo_unreserve_size(reserved_size);
-+
-+ return;
-+ }
-+
-+ /*
-+ * Some stuff is still trying to reference the buffer object.
-+ * Get rid of those references.
-+ */
-+
-+ drm_bo_cleanup_refs(bo, 0);
-+
-+ return;
-+}
-+
-+/*
-+ * Call dev->struct_mutex locked.
-+ */
-+
-+static void drm_bo_delayed_delete(struct drm_device *dev, int remove_all)
-+{
-+ struct drm_buffer_manager *bm = &dev->bm;
-+
-+ struct drm_buffer_object *entry, *nentry;
-+ struct list_head *list, *next;
-+
-+ list_for_each_safe(list, next, &bm->ddestroy) {
-+ entry = list_entry(list, struct drm_buffer_object, ddestroy);
-+
-+ nentry = NULL;
-+ if (next != &bm->ddestroy) {
-+ nentry = list_entry(next, struct drm_buffer_object,
-+ ddestroy);
-+ atomic_inc(&nentry->usage);
-+ }
-+
-+ drm_bo_cleanup_refs(entry, remove_all);
-+
-+ if (nentry)
-+ atomic_dec(&nentry->usage);
-+ }
-+}
-+
-+static void drm_bo_delayed_workqueue(struct work_struct *work)
-+{
-+ struct drm_buffer_manager *bm =
-+ container_of(work, struct drm_buffer_manager, wq.work);
-+ struct drm_device *dev = container_of(bm, struct drm_device, bm);
-+
-+ DRM_DEBUG("Delayed delete Worker\n");
-+
-+ mutex_lock(&dev->struct_mutex);
-+ if (!bm->initialized) {
-+ mutex_unlock(&dev->struct_mutex);
-+ return;
-+ }
-+ drm_bo_delayed_delete(dev, 0);
-+ if (bm->initialized && !list_empty(&bm->ddestroy)) {
-+ schedule_delayed_work(&bm->wq,
-+ ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
-+ }
-+ mutex_unlock(&dev->struct_mutex);
-+}
-+
-+void drm_bo_usage_deref_locked(struct drm_buffer_object **bo)
-+{
-+ struct drm_buffer_object *tmp_bo = *bo;
-+ bo = NULL;
-+
-+ DRM_ASSERT_LOCKED(&tmp_bo->dev->struct_mutex);
-+
-+ if (atomic_dec_and_test(&tmp_bo->usage))
-+ drm_bo_destroy_locked(tmp_bo);
-+}
-+EXPORT_SYMBOL(drm_bo_usage_deref_locked);
-+
-+static void drm_bo_base_deref_locked(struct drm_file *file_priv,
-+ struct drm_user_object *uo)
-+{
-+ struct drm_buffer_object *bo =
-+ drm_user_object_entry(uo, struct drm_buffer_object, base);
-+
-+ DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
-+
-+ drm_bo_takedown_vm_locked(bo);
-+ drm_bo_usage_deref_locked(&bo);
-+}
-+
-+void drm_bo_usage_deref_unlocked(struct drm_buffer_object **bo)
-+{
-+ struct drm_buffer_object *tmp_bo = *bo;
-+ struct drm_device *dev = tmp_bo->dev;
-+
-+ *bo = NULL;
-+ if (atomic_dec_and_test(&tmp_bo->usage)) {
-+ mutex_lock(&dev->struct_mutex);
-+ if (atomic_read(&tmp_bo->usage) == 0)
-+ drm_bo_destroy_locked(tmp_bo);
-+ mutex_unlock(&dev->struct_mutex);
-+ }
-+}
-+EXPORT_SYMBOL(drm_bo_usage_deref_unlocked);
-+
-+void drm_putback_buffer_objects(struct drm_device *dev)
-+{
-+ struct drm_buffer_manager *bm = &dev->bm;
-+ struct list_head *list = &bm->unfenced;
-+ struct drm_buffer_object *entry, *next;
-+
-+ mutex_lock(&dev->struct_mutex);
-+ list_for_each_entry_safe(entry, next, list, lru) {
-+ atomic_inc(&entry->usage);
-+ mutex_unlock(&dev->struct_mutex);
-+
-+ mutex_lock(&entry->mutex);
-+ BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
-+ mutex_lock(&dev->struct_mutex);
-+
-+ list_del_init(&entry->lru);
-+ DRM_FLAG_MASKED(entry->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
-+ wake_up_all(&entry->event_queue);
-+
-+ /*
-+ * FIXME: Might want to put back on head of list
-+ * instead of tail here.
-+ */
-+
-+ drm_bo_add_to_lru(entry);
-+ mutex_unlock(&entry->mutex);
-+ drm_bo_usage_deref_locked(&entry);
-+ }
-+ mutex_unlock(&dev->struct_mutex);
-+}
-+EXPORT_SYMBOL(drm_putback_buffer_objects);
-+
-+
-+/*
-+ * Note. The caller has to register (if applicable)
-+ * and deregister fence object usage.
-+ */
-+
-+int drm_fence_buffer_objects(struct drm_device *dev,
-+ struct list_head *list,
-+ uint32_t fence_flags,
-+ struct drm_fence_object *fence,
-+ struct drm_fence_object **used_fence)
-+{
-+ struct drm_buffer_manager *bm = &dev->bm;
-+ struct drm_buffer_object *entry;
-+ uint32_t fence_type = 0;
-+ uint32_t fence_class = ~0;
-+ int count = 0;
-+ int ret = 0;
-+ struct list_head *l;
-+
-+ mutex_lock(&dev->struct_mutex);
-+
-+ if (!list)
-+ list = &bm->unfenced;
-+
-+ if (fence)
-+ fence_class = fence->fence_class;
-+
-+ list_for_each_entry(entry, list, lru) {
-+ BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
-+ fence_type |= entry->new_fence_type;
-+ if (fence_class == ~0)
-+ fence_class = entry->new_fence_class;
-+ else if (entry->new_fence_class != fence_class) {
-+ DRM_ERROR("Unmatching fence classes on unfenced list: "
-+ "%d and %d.\n",
-+ fence_class,
-+ entry->new_fence_class);
-+ ret = -EINVAL;
-+ goto out;
-+ }
-+ count++;
-+ }
-+
-+ if (!count) {
-+ ret = -EINVAL;
-+ goto out;
-+ }
-+
-+ if (fence) {
-+ if ((fence_type & fence->type) != fence_type ||
-+ (fence->fence_class != fence_class)) {
-+ DRM_ERROR("Given fence doesn't match buffers "
-+ "on unfenced list.\n");
-+ ret = -EINVAL;
-+ goto out;
-+ }
-+ } else {
-+ mutex_unlock(&dev->struct_mutex);
-+ ret = drm_fence_object_create(dev, fence_class, fence_type,
-+ fence_flags | DRM_FENCE_FLAG_EMIT,
-+ &fence);
-+ mutex_lock(&dev->struct_mutex);
-+ if (ret)
-+ goto out;
-+ }
-+
-+ count = 0;
-+ l = list->next;
-+ while (l != list) {
-+ prefetch(l->next);
-+ entry = list_entry(l, struct drm_buffer_object, lru);
-+ atomic_inc(&entry->usage);
-+ mutex_unlock(&dev->struct_mutex);
-+ mutex_lock(&entry->mutex);
-+ mutex_lock(&dev->struct_mutex);
-+ list_del_init(l);
-+ if (entry->priv_flags & _DRM_BO_FLAG_UNFENCED) {
-+ count++;
-+ if (entry->fence)
-+ drm_fence_usage_deref_locked(&entry->fence);
-+ entry->fence = drm_fence_reference_locked(fence);
-+ entry->fence_class = entry->new_fence_class;
-+ entry->fence_type = entry->new_fence_type;
-+ DRM_FLAG_MASKED(entry->priv_flags, 0,
-+ _DRM_BO_FLAG_UNFENCED);
-+ wake_up_all(&entry->event_queue);
-+ drm_bo_add_to_lru(entry);
-+ }
-+ mutex_unlock(&entry->mutex);
-+ drm_bo_usage_deref_locked(&entry);
-+ l = list->next;
-+ }
-+ DRM_DEBUG("Fenced %d buffers\n", count);
-+out:
-+ mutex_unlock(&dev->struct_mutex);
-+ *used_fence = fence;
-+ return ret;
-+}
-+EXPORT_SYMBOL(drm_fence_buffer_objects);
-+
-+/*
-+ * bo->mutex locked
-+ */
-+
-+static int drm_bo_evict(struct drm_buffer_object *bo, unsigned mem_type,
-+ int no_wait)
-+{
-+ int ret = 0;
-+ struct drm_device *dev = bo->dev;
-+ struct drm_bo_mem_reg evict_mem;
-+
-+ /*
-+ * Someone might have modified the buffer before we took the
-+ * buffer mutex.
-+ */
-+
-+ if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED)
-+ goto out;
-+ if (bo->mem.mem_type != mem_type)
-+ goto out;
-+
-+ ret = drm_bo_wait(bo, 0, 0, no_wait);
-+
-+ if (ret && ret != -EAGAIN) {
-+ DRM_ERROR("Failed to expire fence before "
-+ "buffer eviction.\n");
-+ goto out;
-+ }
-+
-+ evict_mem = bo->mem;
-+ evict_mem.mm_node = NULL;
-+
-+ evict_mem = bo->mem;
-+ evict_mem.mask = dev->driver->bo_driver->evict_mask(bo);
-+ ret = drm_bo_mem_space(bo, &evict_mem, no_wait);
-+
-+ if (ret) {
-+ if (ret != -EAGAIN)
-+ DRM_ERROR("Failed to find memory space for "
-+ "buffer 0x%p eviction.\n", bo);
-+ goto out;
-+ }
-+
-+ ret = drm_bo_handle_move_mem(bo, &evict_mem, 1, no_wait);
-+
-+ if (ret) {
-+ if (ret != -EAGAIN)
-+ DRM_ERROR("Buffer eviction failed\n");
-+ goto out;
-+ }
-+
-+ mutex_lock(&dev->struct_mutex);
-+ if (evict_mem.mm_node) {
-+ if (evict_mem.mm_node != bo->pinned_node)
-+ drm_mm_put_block(evict_mem.mm_node);
-+ evict_mem.mm_node = NULL;
-+ }
-+ list_del(&bo->lru);
-+ drm_bo_add_to_lru(bo);
-+ mutex_unlock(&dev->struct_mutex);
-+
-+ DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_EVICTED,
-+ _DRM_BO_FLAG_EVICTED);
-+
-+out:
-+ return ret;
-+}
-+
-+/**
-+ * Repeatedly evict memory from the LRU for @mem_type until we create enough
-+ * space, or we've evicted everything and there isn't enough space.
-+ */
-+static int drm_bo_mem_force_space(struct drm_device *dev,
-+ struct drm_bo_mem_reg *mem,
-+ uint32_t mem_type, int no_wait)
-+{
-+ struct drm_mm_node *node;
-+ struct drm_buffer_manager *bm = &dev->bm;
-+ struct drm_buffer_object *entry;
-+ struct drm_mem_type_manager *man = &bm->man[mem_type];
-+ struct list_head *lru;
-+ unsigned long num_pages = mem->num_pages;
-+ int ret;
-+
-+ mutex_lock(&dev->struct_mutex);
-+ do {
-+ node = drm_mm_search_free(&man->manager, num_pages,
-+ mem->page_alignment, 1);
-+ if (node)
-+ break;
-+
-+ lru = &man->lru;
-+ if (lru->next == lru)
-+ break;
-+
-+ entry = list_entry(lru->next, struct drm_buffer_object, lru);
-+ atomic_inc(&entry->usage);
-+ mutex_unlock(&dev->struct_mutex);
-+ mutex_lock(&entry->mutex);
-+ BUG_ON(entry->mem.flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT));
-+
-+ ret = drm_bo_evict(entry, mem_type, no_wait);
-+ mutex_unlock(&entry->mutex);
-+ drm_bo_usage_deref_unlocked(&entry);
-+ if (ret)
-+ return ret;
-+ mutex_lock(&dev->struct_mutex);
-+ } while (1);
-+
-+ if (!node) {
-+ mutex_unlock(&dev->struct_mutex);
-+ return -ENOMEM;
-+ }
-+
-+ node = drm_mm_get_block(node, num_pages, mem->page_alignment);
-+ if (!node) {
-+ mutex_unlock(&dev->struct_mutex);
-+ return -ENOMEM;
-+ }
-+
-+ mutex_unlock(&dev->struct_mutex);
-+ mem->mm_node = node;
-+ mem->mem_type = mem_type;
-+ return 0;
-+}
-+
-+static int drm_bo_mt_compatible(struct drm_mem_type_manager *man,
-+ int disallow_fixed,
-+ uint32_t mem_type,
-+ uint64_t mask, uint32_t *res_mask)
-+{
-+ uint64_t cur_flags = drm_bo_type_flags(mem_type);
-+ uint64_t flag_diff;
-+
-+ if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && disallow_fixed)
-+ return 0;
-+ if (man->flags & _DRM_FLAG_MEMTYPE_CACHED)
-+ cur_flags |= DRM_BO_FLAG_CACHED;
-+ if (man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE)
-+ cur_flags |= DRM_BO_FLAG_MAPPABLE;
-+ if (man->flags & _DRM_FLAG_MEMTYPE_CSELECT)
-+ DRM_FLAG_MASKED(cur_flags, mask, DRM_BO_FLAG_CACHED);
-+
-+ if ((cur_flags & mask & DRM_BO_MASK_MEM) == 0)
-+ return 0;
-+
-+ if (mem_type == DRM_BO_MEM_LOCAL) {
-+ *res_mask = cur_flags;
-+ return 1;
-+ }
-+
-+ flag_diff = (mask ^ cur_flags);
-+ if (flag_diff & DRM_BO_FLAG_CACHED_MAPPED)
-+ cur_flags |= DRM_BO_FLAG_CACHED_MAPPED;
-+
-+ if ((flag_diff & DRM_BO_FLAG_CACHED) &&
-+ (!(mask & DRM_BO_FLAG_CACHED) ||
-+ (mask & DRM_BO_FLAG_FORCE_CACHING)))
-+ return 0;
-+
-+ if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
-+ ((mask & DRM_BO_FLAG_MAPPABLE) ||
-+ (mask & DRM_BO_FLAG_FORCE_MAPPABLE)))
-+ return 0;
-+
-+ *res_mask = cur_flags;
-+ return 1;
-+}
-+
-+/**
-+ * Creates space for memory region @mem according to its type.
-+ *
-+ * This function first searches for free space in compatible memory types in
-+ * the priority order defined by the driver. If free space isn't found, then
-+ * drm_bo_mem_force_space is attempted in priority order to evict and find
-+ * space.
-+ */
-+int drm_bo_mem_space(struct drm_buffer_object *bo,
-+ struct drm_bo_mem_reg *mem, int no_wait)
-+{
-+ struct drm_device *dev = bo->dev;
-+ struct drm_buffer_manager *bm = &dev->bm;
-+ struct drm_mem_type_manager *man;
-+
-+ uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio;
-+ const uint32_t *prios = dev->driver->bo_driver->mem_type_prio;
-+ uint32_t i;
-+ uint32_t mem_type = DRM_BO_MEM_LOCAL;
-+ uint32_t cur_flags;
-+ int type_found = 0;
-+ int type_ok = 0;
-+ int has_eagain = 0;
-+ struct drm_mm_node *node = NULL;
-+ int ret;
-+
-+ mem->mm_node = NULL;
-+ for (i = 0; i < num_prios; ++i) {
-+ mem_type = prios[i];
-+ man = &bm->man[mem_type];
-+
-+ type_ok = drm_bo_mt_compatible(man,
-+ bo->type == drm_bo_type_user,
-+ mem_type, mem->mask,
-+ &cur_flags);
-+
-+ if (!type_ok)
-+ continue;
-+
-+ if (mem_type == DRM_BO_MEM_LOCAL)
-+ break;
-+
-+ if ((mem_type == bo->pinned_mem_type) &&
-+ (bo->pinned_node != NULL)) {
-+ node = bo->pinned_node;
-+ break;
-+ }
-+
-+ mutex_lock(&dev->struct_mutex);
-+ if (man->has_type && man->use_type) {
-+ type_found = 1;
-+ node = drm_mm_search_free(&man->manager, mem->num_pages,
-+ mem->page_alignment, 1);
-+ if (node)
-+ node = drm_mm_get_block(node, mem->num_pages,
-+ mem->page_alignment);
-+ }
-+ mutex_unlock(&dev->struct_mutex);
-+ if (node)
-+ break;
-+ }
-+
-+ if ((type_ok && (mem_type == DRM_BO_MEM_LOCAL)) || node) {
-+ mem->mm_node = node;
-+ mem->mem_type = mem_type;
-+ mem->flags = cur_flags;
-+ return 0;
-+ }
-+
-+ if (!type_found)
-+ return -EINVAL;
-+
-+ num_prios = dev->driver->bo_driver->num_mem_busy_prio;
-+ prios = dev->driver->bo_driver->mem_busy_prio;
-+
-+ for (i = 0; i < num_prios; ++i) {
-+ mem_type = prios[i];
-+ man = &bm->man[mem_type];
-+
-+ if (!man->has_type)
-+ continue;
-+
-+ if (!drm_bo_mt_compatible(man,
-+ bo->type == drm_bo_type_user,
-+ mem_type,
-+ mem->mask,
-+ &cur_flags))
-+ continue;
-+
-+ ret = drm_bo_mem_force_space(dev, mem, mem_type, no_wait);
-+
-+ if (ret == 0 && mem->mm_node) {
-+ mem->flags = cur_flags;
-+ return 0;
-+ }
-+
-+ if (ret == -EAGAIN)
-+ has_eagain = 1;
-+ }
-+
-+ ret = (has_eagain) ? -EAGAIN : -ENOMEM;
-+ return ret;
-+}
-+EXPORT_SYMBOL(drm_bo_mem_space);
-+
-+static int drm_bo_new_mask(struct drm_buffer_object *bo,
-+ uint64_t new_flags, uint64_t used_mask)
-+{
-+ uint32_t new_props;
-+
-+ if (bo->type == drm_bo_type_user &&
-+ ((new_flags & (DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING)) !=
-+ (DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING))) {
-+ DRM_ERROR("User buffers require cache-coherent memory.\n");
-+ return -EINVAL;
-+ }
-+
-+ if ((used_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) {
-+ DRM_ERROR("DRM_BO_FLAG_NO_EVICT is only available to priviliged processes.\n");
-+ return -EPERM;
-+ }
-+
-+ if (likely(used_mask & DRM_BO_MASK_MEM) &&
-+ (bo->mem.flags & DRM_BO_FLAG_NO_EVICT) &&
-+ !DRM_SUSER(DRM_CURPROC)) {
-+ if (likely(bo->mem.flags & new_flags & used_mask &
-+ DRM_BO_MASK_MEM))
-+ new_flags = (new_flags & ~DRM_BO_MASK_MEM) |
-+ (bo->mem.flags & DRM_BO_MASK_MEM);
-+ else {
-+ DRM_ERROR("Incompatible memory type specification "
-+ "for NO_EVICT buffer.\n");
-+ return -EPERM;
-+ }
-+ }
-+
-+ if ((new_flags & DRM_BO_FLAG_NO_MOVE)) {
-+ DRM_ERROR("DRM_BO_FLAG_NO_MOVE is not properly implemented yet.\n");
-+ return -EPERM;
-+ }
-+
-+ new_props = new_flags & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE |
-+ DRM_BO_FLAG_READ);
-+
-+ if (!new_props) {
-+ DRM_ERROR("Invalid buffer object rwx properties\n");
-+ return -EINVAL;
-+ }
-+
-+ bo->mem.mask = new_flags;
-+ return 0;
-+}
-+
-+/*
-+ * Call dev->struct_mutex locked.
-+ */
-+
-+struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file *file_priv,
-+ uint32_t handle, int check_owner)
-+{
-+ struct drm_user_object *uo;
-+ struct drm_buffer_object *bo;
-+
-+ uo = drm_lookup_user_object(file_priv, handle);
-+
-+ if (!uo || (uo->type != drm_buffer_type)) {
-+ DRM_ERROR("Could not find buffer object 0x%08x\n", handle);
-+ return NULL;
-+ }
-+
-+ if (check_owner && file_priv != uo->owner) {
-+ if (!drm_lookup_ref_object(file_priv, uo, _DRM_REF_USE))
-+ return NULL;
-+ }
-+
-+ bo = drm_user_object_entry(uo, struct drm_buffer_object, base);
-+ atomic_inc(&bo->usage);
-+ return bo;
-+}
-+EXPORT_SYMBOL(drm_lookup_buffer_object);
-+
-+/*
-+ * Call bo->mutex locked.
-+ * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
-+ * Doesn't do any fence flushing as opposed to the drm_bo_busy function.
-+ */
-+
-+static int drm_bo_quick_busy(struct drm_buffer_object *bo)
-+{
-+ struct drm_fence_object *fence = bo->fence;
-+
-+ BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
-+ if (fence) {
-+ if (drm_fence_object_signaled(fence, bo->fence_type)) {
-+ drm_fence_usage_deref_unlocked(&bo->fence);
-+ return 0;
-+ }
-+ return 1;
-+ }
-+ return 0;
-+}
-+
-+/*
-+ * Call bo->mutex locked.
-+ * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
-+ */
-+
-+static int drm_bo_busy(struct drm_buffer_object *bo)
-+{
-+ struct drm_fence_object *fence = bo->fence;
-+
-+ BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
-+ if (fence) {
-+ if (drm_fence_object_signaled(fence, bo->fence_type)) {
-+ drm_fence_usage_deref_unlocked(&bo->fence);
-+ return 0;
-+ }
-+ drm_fence_object_flush(fence, DRM_FENCE_TYPE_EXE);
-+ if (drm_fence_object_signaled(fence, bo->fence_type)) {
-+ drm_fence_usage_deref_unlocked(&bo->fence);
-+ return 0;
-+ }
-+ return 1;
-+ }
-+ return 0;
-+}
-+
-+static int drm_bo_evict_cached(struct drm_buffer_object *bo)
-+{
-+ int ret = 0;
-+
-+ BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
-+ if (bo->mem.mm_node)
-+ ret = drm_bo_evict(bo, DRM_BO_MEM_TT, 1);
-+ return ret;
-+}
-+
-+/*
-+ * Wait until a buffer is unmapped.
-+ */
-+
-+static int drm_bo_wait_unmapped(struct drm_buffer_object *bo, int no_wait)
-+{
-+ int ret = 0;
-+
-+ if ((atomic_read(&bo->mapped) >= 0) && no_wait)
-+ return -EBUSY;
-+
-+ DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
-+ atomic_read(&bo->mapped) == -1);
-+
-+ if (ret == -EINTR)
-+ ret = -EAGAIN;
-+
-+ return ret;
-+}
-+
-+static int drm_bo_check_unfenced(struct drm_buffer_object *bo)
-+{
-+ int ret;
-+
-+ mutex_lock(&bo->mutex);
-+ ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
-+ mutex_unlock(&bo->mutex);
-+ return ret;
-+}
-+
-+/*
-+ * Wait until a buffer, scheduled to be fenced moves off the unfenced list.
-+ * Until then, we cannot really do anything with it except delete it.
-+ */
-+
-+static int drm_bo_wait_unfenced(struct drm_buffer_object *bo, int no_wait,
-+ int eagain_if_wait)
-+{
-+ int ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
-+
-+ if (ret && no_wait)
-+ return -EBUSY;
-+ else if (!ret)
-+ return 0;
-+
-+ ret = 0;
-+ mutex_unlock(&bo->mutex);
-+ DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
-+ !drm_bo_check_unfenced(bo));
-+ mutex_lock(&bo->mutex);
-+ if (ret == -EINTR)
-+ return -EAGAIN;
-+ ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
-+ if (ret) {
-+ DRM_ERROR("Timeout waiting for buffer to become fenced\n");
-+ return -EBUSY;
-+ }
-+ if (eagain_if_wait)
-+ return -EAGAIN;
-+
-+ return 0;
-+}
-+
-+/*
-+ * Fill in the ioctl reply argument with buffer info.
-+ * Bo locked.
-+ */
-+
-+void drm_bo_fill_rep_arg(struct drm_buffer_object *bo,
-+ struct drm_bo_info_rep *rep)
-+{
-+ if (!rep)
-+ return;
-+
-+ rep->handle = bo->base.hash.key;
-+ rep->flags = bo->mem.flags;
-+ rep->size = bo->num_pages * PAGE_SIZE;
-+ rep->offset = bo->offset;
-+
-+ if (bo->type == drm_bo_type_dc)
-+ rep->arg_handle = bo->map_list.user_token;
-+ else
-+ rep->arg_handle = 0;
-+
-+ rep->mask = bo->mem.mask;
-+ rep->buffer_start = bo->buffer_start;
-+ rep->fence_flags = bo->fence_type;
-+ rep->rep_flags = 0;
-+ rep->page_alignment = bo->mem.page_alignment;
-+
-+ if ((bo->priv_flags & _DRM_BO_FLAG_UNFENCED) || drm_bo_quick_busy(bo)) {
-+ DRM_FLAG_MASKED(rep->rep_flags, DRM_BO_REP_BUSY,
-+ DRM_BO_REP_BUSY);
-+ }
-+}
-+EXPORT_SYMBOL(drm_bo_fill_rep_arg);
-+
-+/*
-+ * Wait for buffer idle and register that we've mapped the buffer.
-+ * Mapping is registered as a drm_ref_object with type _DRM_REF_TYPE1,
-+ * so that if the client dies, the mapping is automatically
-+ * unregistered.
-+ */
-+
-+static int drm_buffer_object_map(struct drm_file *file_priv, uint32_t handle,
-+ uint32_t map_flags, unsigned hint,
-+ struct drm_bo_info_rep *rep)
-+{
-+ struct drm_buffer_object *bo;
-+ struct drm_device *dev = file_priv->minor->dev;
-+ int ret = 0;
-+ int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
-+
-+ mutex_lock(&dev->struct_mutex);
-+ bo = drm_lookup_buffer_object(file_priv, handle, 1);
-+ mutex_unlock(&dev->struct_mutex);
-+
-+ if (!bo)
-+ return -EINVAL;
-+
-+ mutex_lock(&bo->mutex);
-+ ret = drm_bo_wait_unfenced(bo, no_wait, 0);
-+ if (ret)
-+ goto out;
-+
-+ /*
-+ * If this returns true, we are currently unmapped.
-+ * We need to do this test, because unmapping can
-+ * be done without the bo->mutex held.
-+ */
-+
-+ while (1) {
-+ if (atomic_inc_and_test(&bo->mapped)) {
-+ if (no_wait && drm_bo_busy(bo)) {
-+ atomic_dec(&bo->mapped);
-+ ret = -EBUSY;
-+ goto out;
-+ }
-+ ret = drm_bo_wait(bo, 0, 0, no_wait);
-+ if (ret) {
-+ atomic_dec(&bo->mapped);
-+ goto out;
-+ }
-+
-+ if (bo->mem.flags & DRM_BO_FLAG_CACHED_MAPPED)
-+ drm_bo_evict_cached(bo);
-+
-+ break;
-+ } else if (bo->mem.flags & DRM_BO_FLAG_CACHED_MAPPED) {
-+
-+ /*
-+ * We are already mapped with different flags.
-+ * need to wait for unmap.
-+ */
-+
-+ ret = drm_bo_wait_unmapped(bo, no_wait);
-+ if (ret)
-+ goto out;
-+
-+ continue;
-+ }
-+ break;
-+ }
-+
-+ mutex_lock(&dev->struct_mutex);
-+ ret = drm_add_ref_object(file_priv, &bo->base, _DRM_REF_TYPE1);
-+ mutex_unlock(&dev->struct_mutex);
-+ if (ret) {
-+ if (atomic_add_negative(-1, &bo->mapped))
-+ wake_up_all(&bo->event_queue);
-+
-+ } else
-+ drm_bo_fill_rep_arg(bo, rep);
-+out:
-+ mutex_unlock(&bo->mutex);
-+ drm_bo_usage_deref_unlocked(&bo);
-+ return ret;
-+}
-+
-+static int drm_buffer_object_unmap(struct drm_file *file_priv, uint32_t handle)
-+{
-+ struct drm_device *dev = file_priv->minor->dev;
-+ struct drm_buffer_object *bo;
-+ struct drm_ref_object *ro;
-+ int ret = 0;
-+
-+ mutex_lock(&dev->struct_mutex);
-+
-+ bo = drm_lookup_buffer_object(file_priv, handle, 1);
-+ if (!bo) {
-+ ret = -EINVAL;
-+ goto out;
-+ }
-+
-+ ro = drm_lookup_ref_object(file_priv, &bo->base, _DRM_REF_TYPE1);
-+ if (!ro) {
-+ ret = -EINVAL;
-+ goto out;
-+ }
-+
-+ drm_remove_ref_object(file_priv, ro);
-+ drm_bo_usage_deref_locked(&bo);
-+out:
-+ mutex_unlock(&dev->struct_mutex);
-+ return ret;
-+}
-+
-+/*
-+ * Call struct-sem locked.
-+ */
-+
-+static void drm_buffer_user_object_unmap(struct drm_file *file_priv,
-+ struct drm_user_object *uo,
-+ enum drm_ref_type action)
-+{
-+ struct drm_buffer_object *bo =
-+ drm_user_object_entry(uo, struct drm_buffer_object, base);
-+
-+ /*
-+ * We DON'T want to take the bo->lock here, because we want to
-+ * hold it when we wait for unmapped buffer.
-+ */
-+
-+ BUG_ON(action != _DRM_REF_TYPE1);
-+
-+ if (atomic_add_negative(-1, &bo->mapped))
-+ wake_up_all(&bo->event_queue);
-+}
-+
-+/*
-+ * bo->mutex locked.
-+ * Note that new_mem_flags are NOT transferred to the bo->mem.mask.
-+ */
-+
-+int drm_bo_move_buffer(struct drm_buffer_object *bo, uint64_t new_mem_flags,
-+ int no_wait, int move_unfenced)
-+{
-+ struct drm_device *dev = bo->dev;
-+ struct drm_buffer_manager *bm = &dev->bm;
-+ int ret = 0;
-+ struct drm_bo_mem_reg mem;
-+ /*
-+ * Flush outstanding fences.
-+ */
-+
-+ drm_bo_busy(bo);
-+
-+ /*
-+ * Wait for outstanding fences.
-+ */
-+
-+ ret = drm_bo_wait(bo, 0, 0, no_wait);
-+ if (ret)
-+ return ret;
-+
-+ mem.num_pages = bo->num_pages;
-+ mem.size = mem.num_pages << PAGE_SHIFT;
-+ mem.mask = new_mem_flags;
-+ mem.page_alignment = bo->mem.page_alignment;
-+
-+ mutex_lock(&bm->evict_mutex);
-+ mutex_lock(&dev->struct_mutex);
-+ list_del_init(&bo->lru);
-+ mutex_unlock(&dev->struct_mutex);
-+
-+ /*
-+ * Determine where to move the buffer.
-+ */
-+ ret = drm_bo_mem_space(bo, &mem, no_wait);
-+ if (ret)
-+ goto out_unlock;
-+
-+ ret = drm_bo_handle_move_mem(bo, &mem, 0, no_wait);
-+
-+out_unlock:
-+ mutex_lock(&dev->struct_mutex);
-+ if (ret || !move_unfenced) {
-+ if (mem.mm_node) {
-+ if (mem.mm_node != bo->pinned_node)
-+ drm_mm_put_block(mem.mm_node);
-+ mem.mm_node = NULL;
-+ }
-+ drm_bo_add_to_lru(bo);
-+ if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) {
-+ wake_up_all(&bo->event_queue);
-+ DRM_FLAG_MASKED(bo->priv_flags, 0,
-+ _DRM_BO_FLAG_UNFENCED);
-+ }
-+ } else {
-+ list_add_tail(&bo->lru, &bm->unfenced);
-+ DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
-+ _DRM_BO_FLAG_UNFENCED);
-+ }
-+ mutex_unlock(&dev->struct_mutex);
-+ mutex_unlock(&bm->evict_mutex);
-+ return ret;
-+}
-+
-+static int drm_bo_mem_compat(struct drm_bo_mem_reg *mem)
-+{
-+ uint32_t flag_diff = (mem->mask ^ mem->flags);
-+
-+ if ((mem->mask & mem->flags & DRM_BO_MASK_MEM) == 0)
-+ return 0;
-+ if ((flag_diff & DRM_BO_FLAG_CACHED) &&
-+ (/* !(mem->mask & DRM_BO_FLAG_CACHED) ||*/
-+ (mem->mask & DRM_BO_FLAG_FORCE_CACHING)))
-+ return 0;
-+
-+ if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
-+ ((mem->mask & DRM_BO_FLAG_MAPPABLE) ||
-+ (mem->mask & DRM_BO_FLAG_FORCE_MAPPABLE)))
-+ return 0;
-+ return 1;
-+}
-+
-+/*
-+ * bo locked.
-+ */
-+
-+static int drm_buffer_object_validate(struct drm_buffer_object *bo,
-+ uint32_t fence_class,
-+ int move_unfenced, int no_wait)
-+{
-+ struct drm_device *dev = bo->dev;
-+ struct drm_buffer_manager *bm = &dev->bm;
-+ struct drm_bo_driver *driver = dev->driver->bo_driver;
-+ uint32_t ftype;
-+ int ret;
-+
-+ DRM_DEBUG("New flags 0x%016llx, Old flags 0x%016llx\n",
-+ (unsigned long long) bo->mem.mask,
-+ (unsigned long long) bo->mem.flags);
-+
-+ ret = driver->fence_type(bo, &fence_class, &ftype);
-+
-+ if (ret) {
-+ DRM_ERROR("Driver did not support given buffer permissions\n");
-+ return ret;
-+ }
-+
-+ /*
-+ * We're switching command submission mechanism,
-+ * or cannot simply rely on the hardware serializing for us.
-+ *
-+ * Insert a driver-dependant barrier or wait for buffer idle.
-+ */
-+
-+ if ((fence_class != bo->fence_class) ||
-+ ((ftype ^ bo->fence_type) & bo->fence_type)) {
-+
-+ ret = -EINVAL;
-+ if (driver->command_stream_barrier) {
-+ ret = driver->command_stream_barrier(bo,
-+ fence_class,
-+ ftype,
-+ no_wait);
-+ }
-+ if (ret)
-+ ret = drm_bo_wait(bo, 0, 0, no_wait);
-+
-+ if (ret)
-+ return ret;
-+
-+ }
-+
-+ bo->new_fence_class = fence_class;
-+ bo->new_fence_type = ftype;
-+
-+ ret = drm_bo_wait_unmapped(bo, no_wait);
-+ if (ret) {
-+ DRM_ERROR("Timed out waiting for buffer unmap.\n");
-+ return ret;
-+ }
-+
-+ /*
-+ * Check whether we need to move buffer.
-+ */
-+
-+ if (!drm_bo_mem_compat(&bo->mem)) {
-+ ret = drm_bo_move_buffer(bo, bo->mem.mask, no_wait,
-+ move_unfenced);
-+ if (ret) {
-+ if (ret != -EAGAIN)
-+ DRM_ERROR("Failed moving buffer.\n");
-+ if (ret == -ENOMEM)
-+ DRM_ERROR("Out of aperture space.\n");
-+ return ret;
-+ }
-+ }
-+
-+ /*
-+ * Pinned buffers.
-+ */
-+
-+ if (bo->mem.mask & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) {
-+ bo->pinned_mem_type = bo->mem.mem_type;
-+ mutex_lock(&dev->struct_mutex);
-+ list_del_init(&bo->pinned_lru);
-+ drm_bo_add_to_pinned_lru(bo);
-+
-+ if (bo->pinned_node != bo->mem.mm_node) {
-+ if (bo->pinned_node != NULL)
-+ drm_mm_put_block(bo->pinned_node);
-+ bo->pinned_node = bo->mem.mm_node;
-+ }
-+
-+ mutex_unlock(&dev->struct_mutex);
-+
-+ } else if (bo->pinned_node != NULL) {
-+
-+ mutex_lock(&dev->struct_mutex);
-+
-+ if (bo->pinned_node != bo->mem.mm_node)
-+ drm_mm_put_block(bo->pinned_node);
-+
-+ list_del_init(&bo->pinned_lru);
-+ bo->pinned_node = NULL;
-+ mutex_unlock(&dev->struct_mutex);
-+
-+ }
-+
-+ /*
-+ * We might need to add a TTM.
-+ */
-+
-+ if (bo->mem.mem_type == DRM_BO_MEM_LOCAL && bo->ttm == NULL) {
-+ ret = drm_bo_add_ttm(bo);
-+ if (ret)
-+ return ret;
-+ }
-+ DRM_FLAG_MASKED(bo->mem.flags, bo->mem.mask, ~DRM_BO_MASK_MEMTYPE);
-+
-+ /*
-+ * Finally, adjust lru to be sure.
-+ */
-+
-+ mutex_lock(&dev->struct_mutex);
-+ list_del(&bo->lru);
-+ if (move_unfenced) {
-+ list_add_tail(&bo->lru, &bm->unfenced);
-+ DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
-+ _DRM_BO_FLAG_UNFENCED);
-+ } else {
-+ drm_bo_add_to_lru(bo);
-+ if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) {
-+ wake_up_all(&bo->event_queue);
-+ DRM_FLAG_MASKED(bo->priv_flags, 0,
-+ _DRM_BO_FLAG_UNFENCED);
-+ }
-+ }
-+ mutex_unlock(&dev->struct_mutex);
-+
-+ return 0;
-+}
-+
-+int drm_bo_do_validate(struct drm_buffer_object *bo,
-+ uint64_t flags, uint64_t mask, uint32_t hint,
-+ uint32_t fence_class,
-+ int no_wait,
-+ struct drm_bo_info_rep *rep)
-+{
-+ int ret;
-+
-+ mutex_lock(&bo->mutex);
-+ ret = drm_bo_wait_unfenced(bo, no_wait, 0);
-+
-+ if (ret)
-+ goto out;
-+
-+ DRM_FLAG_MASKED(flags, bo->mem.mask, ~mask);
-+ ret = drm_bo_new_mask(bo, flags, mask);
-+ if (ret)
-+ goto out;
-+
-+ ret = drm_buffer_object_validate(bo,
-+ fence_class,
-+ !(hint & DRM_BO_HINT_DONT_FENCE),
-+ no_wait);
-+out:
-+ if (rep)
-+ drm_bo_fill_rep_arg(bo, rep);
-+
-+ mutex_unlock(&bo->mutex);
-+ return ret;
-+}
-+EXPORT_SYMBOL(drm_bo_do_validate);
-+
-+
-+int drm_bo_handle_validate(struct drm_file *file_priv, uint32_t handle,
-+ uint32_t fence_class,
-+ uint64_t flags, uint64_t mask,
-+ uint32_t hint,
-+ int use_old_fence_class,
-+ struct drm_bo_info_rep *rep,
-+ struct drm_buffer_object **bo_rep)
-+{
-+ struct drm_device *dev = file_priv->minor->dev;
-+ struct drm_buffer_object *bo;
-+ int ret;
-+ int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
-+
-+ mutex_lock(&dev->struct_mutex);
-+ bo = drm_lookup_buffer_object(file_priv, handle, 1);
-+ mutex_unlock(&dev->struct_mutex);
-+
-+ if (!bo)
-+ return -EINVAL;
-+
-+ if (use_old_fence_class)
-+ fence_class = bo->fence_class;
-+
-+ /*
-+ * Only allow creator to change shared buffer mask.
-+ */
-+
-+ if (bo->base.owner != file_priv)
-+ mask &= ~(DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE);
-+
-+
-+ ret = drm_bo_do_validate(bo, flags, mask, hint, fence_class,
-+ no_wait, rep);
-+
-+ if (!ret && bo_rep)
-+ *bo_rep = bo;
-+ else
-+ drm_bo_usage_deref_unlocked(&bo);
-+
-+ return ret;
-+}
-+EXPORT_SYMBOL(drm_bo_handle_validate);
-+
-+static int drm_bo_handle_info(struct drm_file *file_priv, uint32_t handle,
-+ struct drm_bo_info_rep *rep)
-+{
-+ struct drm_device *dev = file_priv->minor->dev;
-+ struct drm_buffer_object *bo;
-+
-+ mutex_lock(&dev->struct_mutex);
-+ bo = drm_lookup_buffer_object(file_priv, handle, 1);
-+ mutex_unlock(&dev->struct_mutex);
-+
-+ if (!bo)
-+ return -EINVAL;
-+
-+ mutex_lock(&bo->mutex);
-+ if (!(bo->priv_flags & _DRM_BO_FLAG_UNFENCED))
-+ (void)drm_bo_busy(bo);
-+ drm_bo_fill_rep_arg(bo, rep);
-+ mutex_unlock(&bo->mutex);
-+ drm_bo_usage_deref_unlocked(&bo);
-+ return 0;
-+}
-+
-+static int drm_bo_handle_wait(struct drm_file *file_priv, uint32_t handle,
-+ uint32_t hint,
-+ struct drm_bo_info_rep *rep)
-+{
-+ struct drm_device *dev = file_priv->minor->dev;
-+ struct drm_buffer_object *bo;
-+ int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
-+ int ret;
-+
-+ mutex_lock(&dev->struct_mutex);
-+ bo = drm_lookup_buffer_object(file_priv, handle, 1);
-+ mutex_unlock(&dev->struct_mutex);
-+
-+ if (!bo)
-+ return -EINVAL;
-+
-+ mutex_lock(&bo->mutex);
-+ ret = drm_bo_wait_unfenced(bo, no_wait, 0);
-+ if (ret)
-+ goto out;
-+ ret = drm_bo_wait(bo, hint & DRM_BO_HINT_WAIT_LAZY, 0, no_wait);
-+ if (ret)
-+ goto out;
-+
-+ drm_bo_fill_rep_arg(bo, rep);
-+
-+out:
-+ mutex_unlock(&bo->mutex);
-+ drm_bo_usage_deref_unlocked(&bo);
-+ return ret;
-+}
-+
-+static inline size_t drm_size_align(size_t size)
-+{
-+ size_t tmpSize = 4;
-+ if (size > PAGE_SIZE)
-+ return PAGE_ALIGN(size);
-+ while (tmpSize < size)
-+ tmpSize <<= 1;
-+
-+ return (size_t) tmpSize;
-+}
-+
-+static int drm_bo_reserve_size(struct drm_device *dev,
-+ int user_bo,
-+ unsigned long num_pages,
-+ unsigned long *size)
-+{
-+ struct drm_bo_driver *driver = dev->driver->bo_driver;
-+
-+ *size = drm_size_align(sizeof(struct drm_buffer_object)) +
-+ /* Always account for a TTM, even for fixed memory types */
-+ drm_ttm_size(dev, num_pages, user_bo) +
-+ /* user space mapping structure */
-+ drm_size_align(sizeof(drm_local_map_t)) +
-+ /* file offset space, aperture space, pinned space */
-+ 3*drm_size_align(sizeof(struct drm_mm_node *)) +
-+ /* ttm backend */
-+ driver->backend_size(dev, num_pages);
-+
-+ // FIXME - ENOMEM?
-+ return 0;
-+}
-+
-+int drm_buffer_object_create(struct drm_device *dev,
-+ unsigned long size,
-+ enum drm_bo_type type,
-+ uint64_t mask,
-+ uint32_t hint,
-+ uint32_t page_alignment,
-+ unsigned long buffer_start,
-+ struct drm_buffer_object **buf_obj)
-+{
-+ struct drm_buffer_manager *bm = &dev->bm;
-+ struct drm_buffer_object *bo;
-+ int ret = 0;
-+ unsigned long num_pages;
-+ unsigned long reserved_size;
-+
-+ size += buffer_start & ~PAGE_MASK;
-+ num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
-+ if (num_pages == 0) {
-+ DRM_ERROR("Illegal buffer object size.\n");
-+ return -EINVAL;
-+ }
-+
-+ ret = drm_bo_reserve_size(dev, type == drm_bo_type_user,
-+ num_pages, &reserved_size);
-+
-+ if (ret) {
-+ DRM_DEBUG("Failed reserving space for buffer object.\n");
-+ return ret;
-+ }
-+
-+ bo = drm_calloc(1, sizeof(*bo), DRM_MEM_BUFOBJ);
-+
-+ if (!bo) {
-+ drm_bo_unreserve_size(num_pages);
-+ return -ENOMEM;
-+ }
-+
-+ mutex_init(&bo->mutex);
-+ mutex_lock(&bo->mutex);
-+
-+ bo->reserved_size = reserved_size;
-+ atomic_set(&bo->usage, 1);
-+ atomic_set(&bo->mapped, -1);
-+ DRM_INIT_WAITQUEUE(&bo->event_queue);
-+ INIT_LIST_HEAD(&bo->lru);
-+ INIT_LIST_HEAD(&bo->pinned_lru);
-+ INIT_LIST_HEAD(&bo->ddestroy);
-+#ifdef DRM_ODD_MM_COMPAT
-+ INIT_LIST_HEAD(&bo->p_mm_list);
-+ INIT_LIST_HEAD(&bo->vma_list);
-+#endif
-+ bo->dev = dev;
-+ bo->type = type;
-+ bo->num_pages = num_pages;
-+ bo->mem.mem_type = DRM_BO_MEM_LOCAL;
-+ bo->mem.num_pages = bo->num_pages;
-+ bo->mem.mm_node = NULL;
-+ bo->mem.page_alignment = page_alignment;
-+ bo->buffer_start = buffer_start & PAGE_MASK;
-+ bo->priv_flags = 0;
-+ bo->mem.flags = DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED |
-+ DRM_BO_FLAG_MAPPABLE;
-+ bo->mem.mask = DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED |
-+ DRM_BO_FLAG_MAPPABLE;
-+ atomic_inc(&bm->count);
-+ ret = drm_bo_new_mask(bo, mask, mask);
-+ if (ret)
-+ goto out_err;
-+
-+ if (bo->type == drm_bo_type_dc) {
-+ mutex_lock(&dev->struct_mutex);
-+ ret = drm_bo_setup_vm_locked(bo);
-+ mutex_unlock(&dev->struct_mutex);
-+ if (ret)
-+ goto out_err;
-+ }
-+
-+ ret = drm_buffer_object_validate(bo, 0, 0, hint & DRM_BO_HINT_DONT_BLOCK);
-+ if (ret)
-+ goto out_err;
-+
-+ mutex_unlock(&bo->mutex);
-+ *buf_obj = bo;
-+ return 0;
-+
-+out_err:
-+ mutex_unlock(&bo->mutex);
-+
-+ drm_bo_usage_deref_unlocked(&bo);
-+ return ret;
-+}
-+EXPORT_SYMBOL(drm_buffer_object_create);
-+
-+
-+static int drm_bo_add_user_object(struct drm_file *file_priv,
-+ struct drm_buffer_object *bo, int shareable)
-+{
-+ struct drm_device *dev = file_priv->minor->dev;
-+ int ret;
-+
-+ mutex_lock(&dev->struct_mutex);
-+ ret = drm_add_user_object(file_priv, &bo->base, shareable);
-+ if (ret)
-+ goto out;
-+
-+ bo->base.remove = drm_bo_base_deref_locked;
-+ bo->base.type = drm_buffer_type;
-+ bo->base.ref_struct_locked = NULL;
-+ bo->base.unref = drm_buffer_user_object_unmap;
-+
-+out:
-+ mutex_unlock(&dev->struct_mutex);
-+ return ret;
-+}
-+
-+int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
-+{
-+ struct drm_bo_create_arg *arg = data;
-+ struct drm_bo_create_req *req = &arg->d.req;
-+ struct drm_bo_info_rep *rep = &arg->d.rep;
-+ struct drm_buffer_object *entry;
-+ enum drm_bo_type bo_type;
-+ int ret = 0;
-+
-+ DRM_DEBUG("drm_bo_create_ioctl: %dkb, %dkb align\n",
-+ (int)(req->size / 1024), req->page_alignment * 4);
-+
-+ if (!dev->bm.initialized) {
-+ DRM_ERROR("Buffer object manager is not initialized.\n");
-+ return -EINVAL;
-+ }
-+
-+ bo_type = (req->buffer_start) ? drm_bo_type_user : drm_bo_type_dc;
-+
-+ if (bo_type == drm_bo_type_user)
-+ req->mask &= ~DRM_BO_FLAG_SHAREABLE;
-+
-+ ret = drm_buffer_object_create(file_priv->minor->dev,
-+ req->size, bo_type, req->mask,
-+ req->hint, req->page_alignment,
-+ req->buffer_start, &entry);
-+ if (ret)
-+ goto out;
-+
-+ ret = drm_bo_add_user_object(file_priv, entry,
-+ req->mask & DRM_BO_FLAG_SHAREABLE);
-+ if (ret) {
-+ drm_bo_usage_deref_unlocked(&entry);
-+ goto out;
-+ }
-+
-+ mutex_lock(&entry->mutex);
-+ drm_bo_fill_rep_arg(entry, rep);
-+ mutex_unlock(&entry->mutex);
-+
-+out:
-+ return ret;
-+}
-+
-+int drm_bo_setstatus_ioctl(struct drm_device *dev,
-+ void *data, struct drm_file *file_priv)
-+{
-+ struct drm_bo_map_wait_idle_arg *arg = data;
-+ struct drm_bo_info_req *req = &arg->d.req;
-+ struct drm_bo_info_rep *rep = &arg->d.rep;
-+ int ret;
-+
-+ if (!dev->bm.initialized) {
-+ DRM_ERROR("Buffer object manager is not initialized.\n");
-+ return -EINVAL;
-+ }
-+
-+ ret = drm_bo_read_lock(&dev->bm.bm_lock);
-+ if (ret)
-+ return ret;
-+
-+ ret = drm_bo_handle_validate(file_priv, req->handle, req->fence_class,
-+ req->flags,
-+ req->mask,
-+ req->hint | DRM_BO_HINT_DONT_FENCE,
-+ 1,
-+ rep, NULL);
-+
-+ (void) drm_bo_read_unlock(&dev->bm.bm_lock);
-+ if (ret)
-+ return ret;
-+
-+ return 0;
-+}
-+
-+int drm_bo_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
-+{
-+ struct drm_bo_map_wait_idle_arg *arg = data;
-+ struct drm_bo_info_req *req = &arg->d.req;
-+ struct drm_bo_info_rep *rep = &arg->d.rep;
-+ int ret;
-+ if (!dev->bm.initialized) {
-+ DRM_ERROR("Buffer object manager is not initialized.\n");
-+ return -EINVAL;
-+ }
-+
-+ ret = drm_buffer_object_map(file_priv, req->handle, req->mask,
-+ req->hint, rep);
-+ if (ret)
-+ return ret;
-+
-+ return 0;
-+}
-+
-+int drm_bo_unmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
-+{
-+ struct drm_bo_handle_arg *arg = data;
-+ int ret;
-+ if (!dev->bm.initialized) {
-+ DRM_ERROR("Buffer object manager is not initialized.\n");
-+ return -EINVAL;
-+ }
-+
-+ ret = drm_buffer_object_unmap(file_priv, arg->handle);
-+ return ret;
-+}
-+
-+
-+int drm_bo_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
-+{
-+ struct drm_bo_reference_info_arg *arg = data;
-+ struct drm_bo_handle_arg *req = &arg->d.req;
-+ struct drm_bo_info_rep *rep = &arg->d.rep;
-+ struct drm_user_object *uo;
-+ int ret;
-+
-+ if (!dev->bm.initialized) {
-+ DRM_ERROR("Buffer object manager is not initialized.\n");
-+ return -EINVAL;
-+ }
-+
-+ ret = drm_user_object_ref(file_priv, req->handle,
-+ drm_buffer_type, &uo);
-+ if (ret)
-+ return ret;
-+
-+ ret = drm_bo_handle_info(file_priv, req->handle, rep);
-+ if (ret)
-+ return ret;
-+
-+ return 0;
-+}
-+
-+int drm_bo_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
-+{
-+ struct drm_bo_handle_arg *arg = data;
-+ int ret = 0;
-+
-+ if (!dev->bm.initialized) {
-+ DRM_ERROR("Buffer object manager is not initialized.\n");
-+ return -EINVAL;
-+ }
-+
-+ ret = drm_user_object_unref(file_priv, arg->handle, drm_buffer_type);
-+ return ret;
-+}
-+
-+int drm_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
-+{
-+ struct drm_bo_reference_info_arg *arg = data;
-+ struct drm_bo_handle_arg *req = &arg->d.req;
-+ struct drm_bo_info_rep *rep = &arg->d.rep;
-+ int ret;
-+
-+ if (!dev->bm.initialized) {
-+ DRM_ERROR("Buffer object manager is not initialized.\n");
-+ return -EINVAL;
-+ }
-+
-+ ret = drm_bo_handle_info(file_priv, req->handle, rep);
-+ if (ret)
-+ return ret;
-+
-+ return 0;
-+}
-+
-+int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
-+{
-+ struct drm_bo_map_wait_idle_arg *arg = data;
-+ struct drm_bo_info_req *req = &arg->d.req;
-+ struct drm_bo_info_rep *rep = &arg->d.rep;
-+ int ret;
-+ if (!dev->bm.initialized) {
-+ DRM_ERROR("Buffer object manager is not initialized.\n");
-+ return -EINVAL;
-+ }
-+
-+ ret = drm_bo_handle_wait(file_priv, req->handle,
-+ req->hint, rep);
-+ if (ret)
-+ return ret;
-+
-+ return 0;
-+}
-+
-+static int drm_bo_leave_list(struct drm_buffer_object *bo,
-+ uint32_t mem_type,
-+ int free_pinned,
-+ int allow_errors)
-+{
-+ struct drm_device *dev = bo->dev;
-+ int ret = 0;
-+
-+ mutex_lock(&bo->mutex);
-+
-+ ret = drm_bo_expire_fence(bo, allow_errors);
-+ if (ret)
-+ goto out;
-+
-+ if (free_pinned) {
-+ DRM_FLAG_MASKED(bo->mem.flags, 0, DRM_BO_FLAG_NO_MOVE);
-+ mutex_lock(&dev->struct_mutex);
-+ list_del_init(&bo->pinned_lru);
-+ if (bo->pinned_node == bo->mem.mm_node)
-+ bo->pinned_node = NULL;
-+ if (bo->pinned_node != NULL) {
-+ drm_mm_put_block(bo->pinned_node);
-+ bo->pinned_node = NULL;
-+ }
-+ mutex_unlock(&dev->struct_mutex);
-+ }
-+
-+ if (bo->mem.flags & DRM_BO_FLAG_NO_EVICT) {
-+ DRM_ERROR("A DRM_BO_NO_EVICT buffer present at "
-+ "cleanup. Removing flag and evicting.\n");
-+ bo->mem.flags &= ~DRM_BO_FLAG_NO_EVICT;
-+ bo->mem.mask &= ~DRM_BO_FLAG_NO_EVICT;
-+ }
-+
-+ if (bo->mem.mem_type == mem_type)
-+ ret = drm_bo_evict(bo, mem_type, 0);
-+
-+ if (ret) {
-+ if (allow_errors) {
-+ goto out;
-+ } else {
-+ ret = 0;
-+ DRM_ERROR("Cleanup eviction failed\n");
-+ }
-+ }
-+
-+out:
-+ mutex_unlock(&bo->mutex);
-+ return ret;
-+}
-+
-+
-+static struct drm_buffer_object *drm_bo_entry(struct list_head *list,
-+ int pinned_list)
-+{
-+ if (pinned_list)
-+ return list_entry(list, struct drm_buffer_object, pinned_lru);
-+ else
-+ return list_entry(list, struct drm_buffer_object, lru);
-+}
-+
-+/*
-+ * dev->struct_mutex locked.
-+ */
-+
-+static int drm_bo_force_list_clean(struct drm_device *dev,
-+ struct list_head *head,
-+ unsigned mem_type,
-+ int free_pinned,
-+ int allow_errors,
-+ int pinned_list)
-+{
-+ struct list_head *list, *next, *prev;
-+ struct drm_buffer_object *entry, *nentry;
-+ int ret;
-+ int do_restart;
-+
-+ /*
-+ * The list traversal is a bit odd here, because an item may
-+ * disappear from the list when we release the struct_mutex or
-+ * when we decrease the usage count. Also we're not guaranteed
-+ * to drain pinned lists, so we can't always restart.
-+ */
-+
-+restart:
-+ nentry = NULL;
-+ list_for_each_safe(list, next, head) {
-+ prev = list->prev;
-+
-+ entry = (nentry != NULL) ? nentry: drm_bo_entry(list, pinned_list);
-+ atomic_inc(&entry->usage);
-+ if (nentry) {
-+ atomic_dec(&nentry->usage);
-+ nentry = NULL;
-+ }
-+
-+ /*
-+ * Protect the next item from destruction, so we can check
-+ * its list pointers later on.
-+ */
-+
-+ if (next != head) {
-+ nentry = drm_bo_entry(next, pinned_list);
-+ atomic_inc(&nentry->usage);
-+ }
-+ mutex_unlock(&dev->struct_mutex);
-+
-+ ret = drm_bo_leave_list(entry, mem_type, free_pinned,
-+ allow_errors);
-+ mutex_lock(&dev->struct_mutex);
-+
-+ drm_bo_usage_deref_locked(&entry);
-+ if (ret)
-+ return ret;
-+
-+ /*
-+ * Has the next item disappeared from the list?
-+ */
-+
-+ do_restart = ((next->prev != list) && (next->prev != prev));
-+
-+ if (nentry != NULL && do_restart)
-+ drm_bo_usage_deref_locked(&nentry);
-+
-+ if (do_restart)
-+ goto restart;
-+ }
-+ return 0;
-+}
-+
-+int drm_bo_clean_mm(struct drm_device *dev, unsigned mem_type)
-+{
-+ struct drm_buffer_manager *bm = &dev->bm;
-+ struct drm_mem_type_manager *man = &bm->man[mem_type];
-+ int ret = -EINVAL;
-+
-+ if (mem_type >= DRM_BO_MEM_TYPES) {
-+ DRM_ERROR("Illegal memory type %d\n", mem_type);
-+ return ret;
-+ }
-+
-+ if (!man->has_type) {
-+ DRM_ERROR("Trying to take down uninitialized "
-+ "memory manager type %u\n", mem_type);
-+ return ret;
-+ }
-+ man->use_type = 0;
-+ man->has_type = 0;
-+
-+ ret = 0;
-+ if (mem_type > 0) {
-+ BUG_ON(!list_empty(&bm->unfenced));
-+ drm_bo_force_list_clean(dev, &man->lru, mem_type, 1, 0, 0);
-+ drm_bo_force_list_clean(dev, &man->pinned, mem_type, 1, 0, 1);
-+
-+ if (drm_mm_clean(&man->manager)) {
-+ drm_mm_takedown(&man->manager);
-+ } else {
-+ ret = -EBUSY;
-+ }
-+ }
-+
-+ return ret;
-+}
-+EXPORT_SYMBOL(drm_bo_clean_mm);
-+
-+/**
-+ *Evict all buffers of a particular mem_type, but leave memory manager
-+ *regions for NO_MOVE buffers intact. New buffers cannot be added at this
-+ *point since we have the hardware lock.
-+ */
-+
-+static int drm_bo_lock_mm(struct drm_device *dev, unsigned mem_type)
-+{
-+ int ret;
-+ struct drm_buffer_manager *bm = &dev->bm;
-+ struct drm_mem_type_manager *man = &bm->man[mem_type];
-+
-+ if (mem_type == 0 || mem_type >= DRM_BO_MEM_TYPES) {
-+ DRM_ERROR("Illegal memory manager memory type %u.\n", mem_type);
-+ return -EINVAL;
-+ }
-+
-+ if (!man->has_type) {
-+ DRM_ERROR("Memory type %u has not been initialized.\n",
-+ mem_type);
-+ return 0;
-+ }
-+
-+ ret = drm_bo_force_list_clean(dev, &man->lru, mem_type, 0, 1, 0);
-+ if (ret)
-+ return ret;
-+ ret = drm_bo_force_list_clean(dev, &man->pinned, mem_type, 0, 1, 1);
-+
-+ return ret;
-+}
-+
-+int drm_bo_init_mm(struct drm_device *dev,
-+ unsigned type,
-+ unsigned long p_offset, unsigned long p_size)
-+{
-+ struct drm_buffer_manager *bm = &dev->bm;
-+ int ret = -EINVAL;
-+ struct drm_mem_type_manager *man;
-+
-+ if (type >= DRM_BO_MEM_TYPES) {
-+ DRM_ERROR("Illegal memory type %d\n", type);
-+ return ret;
-+ }
-+
-+ man = &bm->man[type];
-+ if (man->has_type) {
-+ DRM_ERROR("Memory manager already initialized for type %d\n",
-+ type);
-+ return ret;
-+ }
-+
-+ ret = dev->driver->bo_driver->init_mem_type(dev, type, man);
-+ if (ret)
-+ return ret;
-+
-+ ret = 0;
-+ if (type != DRM_BO_MEM_LOCAL) {
-+ if (!p_size) {
-+ DRM_ERROR("Zero size memory manager type %d\n", type);
-+ return ret;
-+ }
-+ ret = drm_mm_init(&man->manager, p_offset, p_size);
-+ if (ret)
-+ return ret;
-+ }
-+ man->has_type = 1;
-+ man->use_type = 1;
-+
-+ INIT_LIST_HEAD(&man->lru);
-+ INIT_LIST_HEAD(&man->pinned);
-+
-+ return 0;
-+}
-+EXPORT_SYMBOL(drm_bo_init_mm);
-+
-+/*
-+ * This function is intended to be called on drm driver unload.
-+ * If you decide to call it from lastclose, you must protect the call
-+ * from a potentially racing drm_bo_driver_init in firstopen.
-+ * (This may happen on X server restart).
-+ */
-+
-+int drm_bo_driver_finish(struct drm_device *dev)
-+{
-+ struct drm_buffer_manager *bm = &dev->bm;
-+ int ret = 0;
-+ unsigned i = DRM_BO_MEM_TYPES;
-+ struct drm_mem_type_manager *man;
-+
-+ mutex_lock(&dev->struct_mutex);
-+
-+ if (!bm->initialized)
-+ goto out;
-+ bm->initialized = 0;
-+
-+ while (i--) {
-+ man = &bm->man[i];
-+ if (man->has_type) {
-+ man->use_type = 0;
-+ if ((i != DRM_BO_MEM_LOCAL) && drm_bo_clean_mm(dev, i)) {
-+ ret = -EBUSY;
-+ DRM_ERROR("DRM memory manager type %d "
-+ "is not clean.\n", i);
-+ }
-+ man->has_type = 0;
-+ }
-+ }
-+ mutex_unlock(&dev->struct_mutex);
-+
-+ if (!cancel_delayed_work(&bm->wq))
-+ flush_scheduled_work();
-+
-+ mutex_lock(&dev->struct_mutex);
-+ drm_bo_delayed_delete(dev, 1);
-+ if (list_empty(&bm->ddestroy))
-+ DRM_DEBUG("Delayed destroy list was clean\n");
-+
-+ if (list_empty(&bm->man[0].lru))
-+ DRM_DEBUG("Swap list was clean\n");
-+
-+ if (list_empty(&bm->man[0].pinned))
-+ DRM_DEBUG("NO_MOVE list was clean\n");
-+
-+ if (list_empty(&bm->unfenced))
-+ DRM_DEBUG("Unfenced list was clean\n");
-+
-+ __free_page(bm->dummy_read_page);
-+
-+out:
-+ mutex_unlock(&dev->struct_mutex);
-+ return ret;
-+}
-+EXPORT_SYMBOL(drm_bo_driver_finish);
-+
-+/*
-+ * This function is intended to be called on drm driver load.
-+ * If you decide to call it from firstopen, you must protect the call
-+ * from a potentially racing drm_bo_driver_finish in lastclose.
-+ * (This may happen on X server restart).
-+ */
-+
-+int drm_bo_driver_init(struct drm_device *dev)
-+{
-+ struct drm_bo_driver *driver = dev->driver->bo_driver;
-+ struct drm_buffer_manager *bm = &dev->bm;
-+ int ret = -EINVAL;
-+
-+ bm->dummy_read_page = NULL;
-+ drm_bo_init_lock(&bm->bm_lock);
-+ mutex_lock(&dev->struct_mutex);
-+ if (!driver)
-+ goto out_unlock;
-+
-+ bm->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
-+ if (!bm->dummy_read_page) {
-+ ret = -ENOMEM;
-+ goto out_unlock;
-+ }
-+
-+
-+ /*
-+ * Initialize the system memory buffer type.
-+ * Other types need to be driver / IOCTL initialized.
-+ */
-+ ret = drm_bo_init_mm(dev, DRM_BO_MEM_LOCAL, 0, 0);
-+ if (ret)
-+ goto out_unlock;
-+
-+ INIT_DELAYED_WORK(&bm->wq, drm_bo_delayed_workqueue);
-+
-+ bm->initialized = 1;
-+ bm->nice_mode = 1;
-+ atomic_set(&bm->count, 0);
-+ bm->cur_pages = 0;
-+ INIT_LIST_HEAD(&bm->unfenced);
-+ INIT_LIST_HEAD(&bm->ddestroy);
-+out_unlock:
-+ mutex_unlock(&dev->struct_mutex);
-+ return ret;
-+}
-+EXPORT_SYMBOL(drm_bo_driver_init);
-+
-+int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
-+{
-+ struct drm_mm_init_arg *arg = data;
-+ struct drm_buffer_manager *bm = &dev->bm;
-+ struct drm_bo_driver *driver = dev->driver->bo_driver;
-+ int ret;
-+
-+ if (!driver) {
-+ DRM_ERROR("Buffer objects are not supported by this driver\n");
-+ return -EINVAL;
-+ }
-+
-+ ret = drm_bo_write_lock(&bm->bm_lock, file_priv);
-+ if (ret)
-+ return ret;
-+
-+ ret = -EINVAL;
-+ if (arg->magic != DRM_BO_INIT_MAGIC) {
-+ DRM_ERROR("You are using an old libdrm that is not compatible with\n"
-+ "\tthe kernel DRM module. Please upgrade your libdrm.\n");
-+ return -EINVAL;
-+ }
-+ if (arg->major != DRM_BO_INIT_MAJOR) {
-+ DRM_ERROR("libdrm and kernel DRM buffer object interface major\n"
-+ "\tversion don't match. Got %d, expected %d.\n",
-+ arg->major, DRM_BO_INIT_MAJOR);
-+ return -EINVAL;
-+ }
-+
-+ mutex_lock(&dev->struct_mutex);
-+ if (!bm->initialized) {
-+ DRM_ERROR("DRM memory manager was not initialized.\n");
-+ goto out;
-+ }
-+ if (arg->mem_type == 0) {
-+ DRM_ERROR("System memory buffers already initialized.\n");
-+ goto out;
-+ }
-+ ret = drm_bo_init_mm(dev, arg->mem_type,
-+ arg->p_offset, arg->p_size);
-+
-+out:
-+ mutex_unlock(&dev->struct_mutex);
-+ (void) drm_bo_write_unlock(&bm->bm_lock, file_priv);
-+
-+ if (ret)
-+ return ret;
-+
-+ return 0;
-+}
-+
-+int drm_mm_takedown_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
-+{
-+ struct drm_mm_type_arg *arg = data;
-+ struct drm_buffer_manager *bm = &dev->bm;
-+ struct drm_bo_driver *driver = dev->driver->bo_driver;
-+ int ret;
-+
-+ if (!driver) {
-+ DRM_ERROR("Buffer objects are not supported by this driver\n");
-+ return -EINVAL;
-+ }
-+
-+ ret = drm_bo_write_lock(&bm->bm_lock, file_priv);
-+ if (ret)
-+ return ret;
-+
-+ mutex_lock(&dev->struct_mutex);
-+ ret = -EINVAL;
-+ if (!bm->initialized) {
-+ DRM_ERROR("DRM memory manager was not initialized\n");
-+ goto out;
-+ }
-+ if (arg->mem_type == 0) {
-+ DRM_ERROR("No takedown for System memory buffers.\n");
-+ goto out;
-+ }
-+ ret = 0;
-+ if (drm_bo_clean_mm(dev, arg->mem_type)) {
-+ DRM_ERROR("Memory manager type %d not clean. "
-+ "Delaying takedown\n", arg->mem_type);
-+ }
-+out:
-+ mutex_unlock(&dev->struct_mutex);
-+ (void) drm_bo_write_unlock(&bm->bm_lock, file_priv);
-+
-+ if (ret)
-+ return ret;
-+
-+ return 0;
-+}
-+
-+int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
-+{
-+ struct drm_mm_type_arg *arg = data;
-+ struct drm_bo_driver *driver = dev->driver->bo_driver;
-+ int ret;
-+
-+ if (!driver) {
-+ DRM_ERROR("Buffer objects are not supported by this driver\n");
-+ return -EINVAL;
-+ }
-+
-+ if (arg->lock_flags & DRM_BO_LOCK_IGNORE_NO_EVICT) {
-+ DRM_ERROR("Lock flag DRM_BO_LOCK_IGNORE_NO_EVICT not supported yet.\n");
-+ return -EINVAL;
-+ }
-+
-+ if (arg->lock_flags & DRM_BO_LOCK_UNLOCK_BM) {
-+ ret = drm_bo_write_lock(&dev->bm.bm_lock, file_priv);
-+ if (ret)
-+ return ret;
-+ }
-+
-+ mutex_lock(&dev->struct_mutex);
-+ ret = drm_bo_lock_mm(dev, arg->mem_type);
-+ mutex_unlock(&dev->struct_mutex);
-+ if (ret) {
-+ (void) drm_bo_write_unlock(&dev->bm.bm_lock, file_priv);
-+ return ret;
-+ }
-+
-+ return 0;
-+}
-+
-+int drm_mm_unlock_ioctl(struct drm_device *dev,
-+ void *data,
-+ struct drm_file *file_priv)
-+{
-+ struct drm_mm_type_arg *arg = data;
-+ struct drm_bo_driver *driver = dev->driver->bo_driver;
-+ int ret;
-+
-+ if (!driver) {
-+ DRM_ERROR("Buffer objects are not supported by this driver\n");
-+ return -EINVAL;
-+ }
-+
-+ if (arg->lock_flags & DRM_BO_LOCK_UNLOCK_BM) {
-+ ret = drm_bo_write_unlock(&dev->bm.bm_lock, file_priv);
-+ if (ret)
-+ return ret;
-+ }
-+
-+ return 0;
-+}
-+
-+/*
-+ * buffer object vm functions.
-+ */
-+
-+int drm_mem_reg_is_pci(struct drm_device *dev, struct drm_bo_mem_reg *mem)
-+{
-+ struct drm_buffer_manager *bm = &dev->bm;
-+ struct drm_mem_type_manager *man = &bm->man[mem->mem_type];
-+
-+ if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED)) {
-+ if (mem->mem_type == DRM_BO_MEM_LOCAL)
-+ return 0;
-+
-+ if (man->flags & _DRM_FLAG_MEMTYPE_CMA)
-+ return 0;
-+
-+ if (mem->flags & DRM_BO_FLAG_CACHED)
-+ return 0;
-+ }
-+ return 1;
-+}
-+EXPORT_SYMBOL(drm_mem_reg_is_pci);
-+
-+/**
-+ * \c Get the PCI offset for the buffer object memory.
-+ *
-+ * \param bo The buffer object.
-+ * \param bus_base On return the base of the PCI region
-+ * \param bus_offset On return the byte offset into the PCI region
-+ * \param bus_size On return the byte size of the buffer object or zero if
-+ * the buffer object memory is not accessible through a PCI region.
-+ * \return Failure indication.
-+ *
-+ * Returns -EINVAL if the buffer object is currently not mappable.
-+ * Otherwise returns zero.
-+ */
-+
-+int drm_bo_pci_offset(struct drm_device *dev,
-+ struct drm_bo_mem_reg *mem,
-+ unsigned long *bus_base,
-+ unsigned long *bus_offset, unsigned long *bus_size)
-+{
-+ struct drm_buffer_manager *bm = &dev->bm;
-+ struct drm_mem_type_manager *man = &bm->man[mem->mem_type];
-+
-+ *bus_size = 0;
-+ if (!(man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE))
-+ return -EINVAL;
-+
-+ if (drm_mem_reg_is_pci(dev, mem)) {
-+ *bus_offset = mem->mm_node->start << PAGE_SHIFT;
-+ *bus_size = mem->num_pages << PAGE_SHIFT;
-+ *bus_base = man->io_offset;
-+ }
-+
-+ return 0;
-+}
-+
-+/**
-+ * \c Kill all user-space virtual mappings of this buffer object.
-+ *
-+ * \param bo The buffer object.
-+ *
-+ * Call bo->mutex locked.
-+ */
-+
-+void drm_bo_unmap_virtual(struct drm_buffer_object *bo)
-+{
-+ struct drm_device *dev = bo->dev;
-+ loff_t offset = ((loff_t) bo->map_list.hash.key) << PAGE_SHIFT;
-+ loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
-+
-+ if (!dev->dev_mapping)
-+ return;
-+
-+ unmap_mapping_range(dev->dev_mapping, offset, holelen, 1);
-+}
-+
-+static void drm_bo_takedown_vm_locked(struct drm_buffer_object *bo)
-+{
-+ struct drm_map_list *list;
-+ drm_local_map_t *map;
-+ struct drm_device *dev = bo->dev;
-+
-+ DRM_ASSERT_LOCKED(&dev->struct_mutex);
-+ if (bo->type != drm_bo_type_dc)
-+ return;
-+
-+ list = &bo->map_list;
-+ if (list->user_token) {
-+ drm_ht_remove_item(&dev->map_hash, &list->hash);
-+ list->user_token = 0;
-+ }
-+ if (list->file_offset_node) {
-+ drm_mm_put_block(list->file_offset_node);
-+ list->file_offset_node = NULL;
-+ }
-+
-+ map = list->map;
-+ if (!map)
-+ return;
-+
-+ drm_free(map, sizeof(*map), DRM_MEM_BUFOBJ);
-+ list->map = NULL;
-+ list->user_token = 0ULL;
-+ drm_bo_usage_deref_locked(&bo);
-+}
-+
-+static int drm_bo_setup_vm_locked(struct drm_buffer_object *bo)
-+{
-+ struct drm_map_list *list = &bo->map_list;
-+ drm_local_map_t *map;
-+ struct drm_device *dev = bo->dev;
-+
-+ DRM_ASSERT_LOCKED(&dev->struct_mutex);
-+ list->map = drm_calloc(1, sizeof(*map), DRM_MEM_BUFOBJ);
-+ if (!list->map)
-+ return -ENOMEM;
-+
-+ map = list->map;
-+ map->offset = 0;
-+ map->type = _DRM_TTM;
-+ map->flags = _DRM_REMOVABLE;
-+ map->size = bo->mem.num_pages * PAGE_SIZE;
-+ atomic_inc(&bo->usage);
-+ map->handle = (void *)bo;
-+
-+ list->file_offset_node = drm_mm_search_free(&dev->offset_manager,
-+ bo->mem.num_pages, 0, 0);
-+
-+ if (!list->file_offset_node) {
-+ drm_bo_takedown_vm_locked(bo);
-+ return -ENOMEM;
-+ }
-+
-+ list->file_offset_node = drm_mm_get_block(list->file_offset_node,
-+ bo->mem.num_pages, 0);
-+ if (!list->file_offset_node) {
-+ drm_bo_takedown_vm_locked(bo);
-+ return -ENOMEM;
-+ }
-+
-+ list->hash.key = list->file_offset_node->start;
-+ if (drm_ht_insert_item(&dev->map_hash, &list->hash)) {
-+ drm_bo_takedown_vm_locked(bo);
-+ return -ENOMEM;
-+ }
-+
-+ list->user_token = ((uint64_t) list->hash.key) << PAGE_SHIFT;
-+
-+ return 0;
-+}
-+
-+int drm_bo_version_ioctl(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv)
-+{
-+ struct drm_bo_version_arg *arg = (struct drm_bo_version_arg *)data;
-+
-+ arg->major = DRM_BO_INIT_MAJOR;
-+ arg->minor = DRM_BO_INIT_MINOR;
-+ arg->patchlevel = DRM_BO_INIT_PATCH;
-+
-+ return 0;
-+}
-Index: linux-2.6.27/drivers/gpu/drm/drm_bo_lock.c
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.27/drivers/gpu/drm/drm_bo_lock.c 2009-02-05 13:29:33.000000000 +0000
-@@ -0,0 +1,175 @@
-+/**************************************************************************
-+ *
-+ * Copyright (c) 2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
-+ * All Rights Reserved.
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a
-+ * copy of this software and associated documentation files (the
-+ * "Software"), to deal in the Software without restriction, including
-+ * without limitation the rights to use, copy, modify, merge, publish,
-+ * distribute, sub license, and/or sell copies of the Software, and to
-+ * permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice (including the
-+ * next paragraph) shall be included in all copies or substantial portions
-+ * of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
-+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
-+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
-+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
-+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
-+ *
-+ **************************************************************************/
-+/*
-+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
-+ */
-+
-+/*
-+ * This file implements a simple replacement for the buffer manager use
-+ * of the heavyweight hardware lock.
-+ * The lock is a read-write lock. Taking it in read mode is fast, and
-+ * intended for in-kernel use only.
-+ * Taking it in write mode is slow.
-+ *
-+ * The write mode is used only when there is a need to block all
-+ * user-space processes from allocating a
-+ * new memory area.
-+ * Typical use in write mode is X server VT switching, and it's allowed
-+ * to leave kernel space with the write lock held. If a user-space process
-+ * dies while having the write-lock, it will be released during the file
-+ * descriptor release.
-+ *
-+ * The read lock is typically placed at the start of an IOCTL- or
-+ * user-space callable function that may end up allocating a memory area.
-+ * This includes setstatus, super-ioctls and no_pfn; the latter may move
-+ * unmappable regions to mappable. It's a bug to leave kernel space with the
-+ * read lock held.
-+ *
-+ * Both read- and write lock taking is interruptible for low signal-delivery
-+ * latency. The locking functions will return -EAGAIN if interrupted by a
-+ * signal.
-+ *
-+ * Locking order: The lock should be taken BEFORE any kernel mutexes
-+ * or spinlocks.
-+ */
-+
-+#include "drmP.h"
-+
-+void drm_bo_init_lock(struct drm_bo_lock *lock)
-+{
-+ DRM_INIT_WAITQUEUE(&lock->queue);
-+ atomic_set(&lock->write_lock_pending, 0);
-+ atomic_set(&lock->readers, 0);
-+}
-+
-+void drm_bo_read_unlock(struct drm_bo_lock *lock)
-+{
-+ if (unlikely(atomic_add_negative(-1, &lock->readers)))
-+ BUG();
-+ if (atomic_read(&lock->readers) == 0)
-+ wake_up_interruptible(&lock->queue);
-+}
-+EXPORT_SYMBOL(drm_bo_read_unlock);
-+
-+int drm_bo_read_lock(struct drm_bo_lock *lock)
-+{
-+ while (unlikely(atomic_read(&lock->write_lock_pending) != 0)) {
-+ int ret;
-+ ret = wait_event_interruptible
-+ (lock->queue, atomic_read(&lock->write_lock_pending) == 0);
-+ if (ret)
-+ return -EAGAIN;
-+ }
-+
-+ while (unlikely(!atomic_add_unless(&lock->readers, 1, -1))) {
-+ int ret;
-+ ret = wait_event_interruptible
-+ (lock->queue, atomic_add_unless(&lock->readers, 1, -1));
-+ if (ret)
-+ return -EAGAIN;
-+ }
-+ return 0;
-+}
-+EXPORT_SYMBOL(drm_bo_read_lock);
-+
-+static int __drm_bo_write_unlock(struct drm_bo_lock *lock)
-+{
-+ if (unlikely(atomic_cmpxchg(&lock->readers, -1, 0) != -1))
-+ return -EINVAL;
-+ if (unlikely(atomic_cmpxchg(&lock->write_lock_pending, 1, 0) != 1))
-+ return -EINVAL;
-+ wake_up_interruptible(&lock->queue);
-+ return 0;
-+}
-+
-+static void drm_bo_write_lock_remove(struct drm_file *file_priv,
-+ struct drm_user_object *item)
-+{
-+ struct drm_bo_lock *lock = container_of(item, struct drm_bo_lock, base);
-+ int ret;
-+
-+ ret = __drm_bo_write_unlock(lock);
-+ BUG_ON(ret);
-+}
-+
-+int drm_bo_write_lock(struct drm_bo_lock *lock, struct drm_file *file_priv)
-+{
-+ int ret = 0;
-+ struct drm_device *dev;
-+
-+ if (unlikely(atomic_cmpxchg(&lock->write_lock_pending, 0, 1) != 0))
-+ return -EINVAL;
-+
-+ while (unlikely(atomic_cmpxchg(&lock->readers, 0, -1) != 0)) {
-+ ret = wait_event_interruptible
-+ (lock->queue, atomic_cmpxchg(&lock->readers, 0, -1) == 0);
-+
-+ if (ret) {
-+ atomic_set(&lock->write_lock_pending, 0);
-+ wake_up_interruptible(&lock->queue);
-+ return -EAGAIN;
-+ }
-+ }
-+
-+ /*
-+ * Add a dummy user-object, the destructor of which will
-+ * make sure the lock is released if the client dies
-+ * while holding it.
-+ */
-+
-+ dev = file_priv->minor->dev;
-+ mutex_lock(&dev->struct_mutex);
-+ ret = drm_add_user_object(file_priv, &lock->base, 0);
-+ lock->base.remove = &drm_bo_write_lock_remove;
-+ lock->base.type = drm_lock_type;
-+ if (ret)
-+ (void)__drm_bo_write_unlock(lock);
-+
-+ mutex_unlock(&dev->struct_mutex);
-+
-+ return ret;
-+}
-+
-+int drm_bo_write_unlock(struct drm_bo_lock *lock, struct drm_file *file_priv)
-+{
-+ struct drm_device *dev = file_priv->minor->dev;
-+ struct drm_ref_object *ro;
-+
-+ mutex_lock(&dev->struct_mutex);
-+
-+ if (lock->base.owner != file_priv) {
-+ mutex_unlock(&dev->struct_mutex);
-+ return -EINVAL;
-+ }
-+ ro = drm_lookup_ref_object(file_priv, &lock->base, _DRM_REF_USE);
-+ BUG_ON(!ro);
-+ drm_remove_ref_object(file_priv, ro);
-+ lock->base.owner = NULL;
-+
-+ mutex_unlock(&dev->struct_mutex);
-+ return 0;
-+}
-Index: linux-2.6.27/drivers/gpu/drm/drm_bo_move.c
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.27/drivers/gpu/drm/drm_bo_move.c 2009-02-05 13:29:33.000000000 +0000
-@@ -0,0 +1,597 @@
-+/**************************************************************************
-+ *
-+ * Copyright (c) 2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
-+ * All Rights Reserved.
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a
-+ * copy of this software and associated documentation files (the
-+ * "Software"), to deal in the Software without restriction, including
-+ * without limitation the rights to use, copy, modify, merge, publish,
-+ * distribute, sub license, and/or sell copies of the Software, and to
-+ * permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice (including the
-+ * next paragraph) shall be included in all copies or substantial portions
-+ * of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
-+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
-+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
-+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
-+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
-+ *
-+ **************************************************************************/
-+/*
-+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
-+ */
-+
-+#include "drmP.h"
-+
-+/**
-+ * Free the old memory node unless it's a pinned region and we
-+ * have not been requested to free also pinned regions.
-+ */
-+
-+static void drm_bo_free_old_node(struct drm_buffer_object *bo)
-+{
-+ struct drm_bo_mem_reg *old_mem = &bo->mem;
-+
-+ if (old_mem->mm_node && (old_mem->mm_node != bo->pinned_node)) {
-+ mutex_lock(&bo->dev->struct_mutex);
-+ drm_mm_put_block(old_mem->mm_node);
-+ mutex_unlock(&bo->dev->struct_mutex);
-+ }
-+ old_mem->mm_node = NULL;
-+}
-+
-+int drm_bo_move_ttm(struct drm_buffer_object *bo,
-+ int evict, int no_wait, struct drm_bo_mem_reg *new_mem)
-+{
-+ struct drm_ttm *ttm = bo->ttm;
-+ struct drm_bo_mem_reg *old_mem = &bo->mem;
-+ uint64_t save_flags = old_mem->flags;
-+ uint64_t save_mask = old_mem->mask;
-+ int ret;
-+
-+ if (old_mem->mem_type != DRM_BO_MEM_LOCAL) {
-+ if (evict)
-+ drm_ttm_evict(ttm);
-+ else
-+ drm_ttm_unbind(ttm);
-+
-+ drm_bo_free_old_node(bo);
-+ DRM_FLAG_MASKED(old_mem->flags,
-+ DRM_BO_FLAG_CACHED | DRM_BO_FLAG_MAPPABLE |
-+ DRM_BO_FLAG_MEM_LOCAL, DRM_BO_MASK_MEMTYPE);
-+ old_mem->mem_type = DRM_BO_MEM_LOCAL;
-+ save_flags = old_mem->flags;
-+ }
-+ if (new_mem->mem_type != DRM_BO_MEM_LOCAL) {
-+ ret = drm_bind_ttm(ttm, new_mem);
-+ if (ret)
-+ return ret;
-+ }
-+
-+ *old_mem = *new_mem;
-+ new_mem->mm_node = NULL;
-+ old_mem->mask = save_mask;
-+ DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
-+ return 0;
-+}
-+EXPORT_SYMBOL(drm_bo_move_ttm);
-+
-+/**
-+ * \c Return a kernel virtual address to the buffer object PCI memory.
-+ *
-+ * \param bo The buffer object.
-+ * \return Failure indication.
-+ *
-+ * Returns -EINVAL if the buffer object is currently not mappable.
-+ * Returns -ENOMEM if the ioremap operation failed.
-+ * Otherwise returns zero.
-+ *
-+ * After a successfull call, bo->iomap contains the virtual address, or NULL
-+ * if the buffer object content is not accessible through PCI space.
-+ * Call bo->mutex locked.
-+ */
-+
-+int drm_mem_reg_ioremap(struct drm_device *dev, struct drm_bo_mem_reg *mem,
-+ void **virtual)
-+{
-+ struct drm_buffer_manager *bm = &dev->bm;
-+ struct drm_mem_type_manager *man = &bm->man[mem->mem_type];
-+ unsigned long bus_offset;
-+ unsigned long bus_size;
-+ unsigned long bus_base;
-+ int ret;
-+ void *addr;
-+
-+ *virtual = NULL;
-+ ret = drm_bo_pci_offset(dev, mem, &bus_base, &bus_offset, &bus_size);
-+ if (ret || bus_size == 0)
-+ return ret;
-+
-+ if (!(man->flags & _DRM_FLAG_NEEDS_IOREMAP))
-+ addr = (void *)(((u8 *) man->io_addr) + bus_offset);
-+ else {
-+ addr = ioremap_nocache(bus_base + bus_offset, bus_size);
-+ if (!addr)
-+ return -ENOMEM;
-+ }
-+ *virtual = addr;
-+ return 0;
-+}
-+EXPORT_SYMBOL(drm_mem_reg_ioremap);
-+
-+/**
-+ * \c Unmap mapping obtained using drm_bo_ioremap
-+ *
-+ * \param bo The buffer object.
-+ *
-+ * Call bo->mutex locked.
-+ */
-+
-+void drm_mem_reg_iounmap(struct drm_device *dev, struct drm_bo_mem_reg *mem,
-+ void *virtual)
-+{
-+ struct drm_buffer_manager *bm;
-+ struct drm_mem_type_manager *man;
-+
-+ bm = &dev->bm;
-+ man = &bm->man[mem->mem_type];
-+
-+ if (virtual && (man->flags & _DRM_FLAG_NEEDS_IOREMAP))
-+ iounmap(virtual);
-+}
-+EXPORT_SYMBOL(drm_mem_reg_iounmap);
-+
-+static int drm_copy_io_page(void *dst, void *src, unsigned long page)
-+{
-+ uint32_t *dstP =
-+ (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
-+ uint32_t *srcP =
-+ (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
-+
-+ int i;
-+ for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
-+ iowrite32(ioread32(srcP++), dstP++);
-+ return 0;
-+}
-+
-+static int drm_copy_io_ttm_page(struct drm_ttm *ttm, void *src,
-+ unsigned long page)
-+{
-+ struct page *d = drm_ttm_get_page(ttm, page);
-+ void *dst;
-+
-+ if (!d)
-+ return -ENOMEM;
-+
-+ src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
-+ dst = kmap(d);
-+ if (!dst)
-+ return -ENOMEM;
-+
-+ memcpy_fromio(dst, src, PAGE_SIZE);
-+ kunmap(d);
-+ return 0;
-+}
-+
-+static int drm_copy_ttm_io_page(struct drm_ttm *ttm, void *dst, unsigned long page)
-+{
-+ struct page *s = drm_ttm_get_page(ttm, page);
-+ void *src;
-+
-+ if (!s)
-+ return -ENOMEM;
-+
-+ dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
-+ src = kmap(s);
-+ if (!src)
-+ return -ENOMEM;
-+
-+ memcpy_toio(dst, src, PAGE_SIZE);
-+ kunmap(s);
-+ return 0;
-+}
-+
-+int drm_bo_move_memcpy(struct drm_buffer_object *bo,
-+ int evict, int no_wait, struct drm_bo_mem_reg *new_mem)
-+{
-+ struct drm_device *dev = bo->dev;
-+ struct drm_mem_type_manager *man = &dev->bm.man[new_mem->mem_type];
-+ struct drm_ttm *ttm = bo->ttm;
-+ struct drm_bo_mem_reg *old_mem = &bo->mem;
-+ struct drm_bo_mem_reg old_copy = *old_mem;
-+ void *old_iomap;
-+ void *new_iomap;
-+ int ret;
-+ uint64_t save_flags = old_mem->flags;
-+ uint64_t save_mask = old_mem->mask;
-+ unsigned long i;
-+ unsigned long page;
-+ unsigned long add = 0;
-+ int dir;
-+
-+ ret = drm_mem_reg_ioremap(dev, old_mem, &old_iomap);
-+ if (ret)
-+ return ret;
-+ ret = drm_mem_reg_ioremap(dev, new_mem, &new_iomap);
-+ if (ret)
-+ goto out;
-+
-+ if (old_iomap == NULL && new_iomap == NULL)
-+ goto out2;
-+ if (old_iomap == NULL && ttm == NULL)
-+ goto out2;
-+
-+ add = 0;
-+ dir = 1;
-+
-+ if ((old_mem->mem_type == new_mem->mem_type) &&
-+ (new_mem->mm_node->start <
-+ old_mem->mm_node->start + old_mem->mm_node->size)) {
-+ dir = -1;
-+ add = new_mem->num_pages - 1;
-+ }
-+
-+ for (i = 0; i < new_mem->num_pages; ++i) {
-+ page = i * dir + add;
-+ if (old_iomap == NULL)
-+ ret = drm_copy_ttm_io_page(ttm, new_iomap, page);
-+ else if (new_iomap == NULL)
-+ ret = drm_copy_io_ttm_page(ttm, old_iomap, page);
-+ else
-+ ret = drm_copy_io_page(new_iomap, old_iomap, page);
-+ if (ret)
-+ goto out1;
-+ }
-+ mb();
-+out2:
-+ drm_bo_free_old_node(bo);
-+
-+ *old_mem = *new_mem;
-+ new_mem->mm_node = NULL;
-+ old_mem->mask = save_mask;
-+ DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
-+
-+ if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (ttm != NULL)) {
-+ drm_ttm_unbind(ttm);
-+ drm_destroy_ttm(ttm);
-+ bo->ttm = NULL;
-+ }
-+
-+out1:
-+ drm_mem_reg_iounmap(dev, new_mem, new_iomap);
-+out:
-+ drm_mem_reg_iounmap(dev, &old_copy, old_iomap);
-+ return ret;
-+}
-+EXPORT_SYMBOL(drm_bo_move_memcpy);
-+
-+/*
-+ * Transfer a buffer object's memory and LRU status to a newly
-+ * created object. User-space references remains with the old
-+ * object. Call bo->mutex locked.
-+ */
-+
-+int drm_buffer_object_transfer(struct drm_buffer_object *bo,
-+ struct drm_buffer_object **new_obj)
-+{
-+ struct drm_buffer_object *fbo;
-+ struct drm_device *dev = bo->dev;
-+ struct drm_buffer_manager *bm = &dev->bm;
-+
-+ fbo = drm_calloc(1, sizeof(*fbo), DRM_MEM_BUFOBJ);
-+ if (!fbo)
-+ return -ENOMEM;
-+
-+ *fbo = *bo;
-+ mutex_init(&fbo->mutex);
-+ mutex_lock(&fbo->mutex);
-+ mutex_lock(&dev->struct_mutex);
-+
-+ DRM_INIT_WAITQUEUE(&bo->event_queue);
-+ INIT_LIST_HEAD(&fbo->ddestroy);
-+ INIT_LIST_HEAD(&fbo->lru);
-+ INIT_LIST_HEAD(&fbo->pinned_lru);
-+#ifdef DRM_ODD_MM_COMPAT
-+ INIT_LIST_HEAD(&fbo->vma_list);
-+ INIT_LIST_HEAD(&fbo->p_mm_list);
-+#endif
-+
-+ fbo->fence = drm_fence_reference_locked(bo->fence);
-+ fbo->pinned_node = NULL;
-+ fbo->mem.mm_node->private = (void *)fbo;
-+ atomic_set(&fbo->usage, 1);
-+ atomic_inc(&bm->count);
-+ mutex_unlock(&dev->struct_mutex);
-+ mutex_unlock(&fbo->mutex);
-+ bo->reserved_size = 0;
-+ *new_obj = fbo;
-+ return 0;
-+}
-+
-+/*
-+ * Since move is underway, we need to block signals in this function.
-+ * We cannot restart until it has finished.
-+ */
-+
-+int drm_bo_move_accel_cleanup(struct drm_buffer_object *bo,
-+ int evict, int no_wait, uint32_t fence_class,
-+ uint32_t fence_type, uint32_t fence_flags,
-+ struct drm_bo_mem_reg *new_mem)
-+{
-+ struct drm_device *dev = bo->dev;
-+ struct drm_mem_type_manager *man = &dev->bm.man[new_mem->mem_type];
-+ struct drm_bo_mem_reg *old_mem = &bo->mem;
-+ int ret;
-+ uint64_t save_flags = old_mem->flags;
-+ uint64_t save_mask = old_mem->mask;
-+ struct drm_buffer_object *old_obj;
-+
-+ if (bo->fence)
-+ drm_fence_usage_deref_unlocked(&bo->fence);
-+ ret = drm_fence_object_create(dev, fence_class, fence_type,
-+ fence_flags | DRM_FENCE_FLAG_EMIT,
-+ &bo->fence);
-+ bo->fence_type = fence_type;
-+ if (ret)
-+ return ret;
-+
-+#ifdef DRM_ODD_MM_COMPAT
-+ /*
-+ * In this mode, we don't allow pipelining a copy blit,
-+ * since the buffer will be accessible from user space
-+ * the moment we return and rebuild the page tables.
-+ *
-+ * With normal vm operation, page tables are rebuilt
-+ * on demand using fault(), which waits for buffer idle.
-+ */
-+ if (1)
-+#else
-+ if (evict || ((bo->mem.mm_node == bo->pinned_node) &&
-+ bo->mem.mm_node != NULL))
-+#endif
-+ {
-+ ret = drm_bo_wait(bo, 0, 1, 0);
-+ if (ret)
-+ return ret;
-+
-+ drm_bo_free_old_node(bo);
-+
-+ if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm != NULL)) {
-+ drm_ttm_unbind(bo->ttm);
-+ drm_destroy_ttm(bo->ttm);
-+ bo->ttm = NULL;
-+ }
-+ } else {
-+
-+ /* This should help pipeline ordinary buffer moves.
-+ *
-+ * Hang old buffer memory on a new buffer object,
-+ * and leave it to be released when the GPU
-+ * operation has completed.
-+ */
-+
-+ ret = drm_buffer_object_transfer(bo, &old_obj);
-+
-+ if (ret)
-+ return ret;
-+
-+ if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED))
-+ old_obj->ttm = NULL;
-+ else
-+ bo->ttm = NULL;
-+
-+ mutex_lock(&dev->struct_mutex);
-+ list_del_init(&old_obj->lru);
-+ DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
-+ drm_bo_add_to_lru(old_obj);
-+
-+ drm_bo_usage_deref_locked(&old_obj);
-+ mutex_unlock(&dev->struct_mutex);
-+
-+ }
-+
-+ *old_mem = *new_mem;
-+ new_mem->mm_node = NULL;
-+ old_mem->mask = save_mask;
-+ DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
-+ return 0;
-+}
-+EXPORT_SYMBOL(drm_bo_move_accel_cleanup);
-+
-+int drm_bo_same_page(unsigned long offset,
-+ unsigned long offset2)
-+{
-+ return (offset & PAGE_MASK) == (offset2 & PAGE_MASK);
-+}
-+EXPORT_SYMBOL(drm_bo_same_page);
-+
-+unsigned long drm_bo_offset_end(unsigned long offset,
-+ unsigned long end)
-+{
-+ offset = (offset + PAGE_SIZE) & PAGE_MASK;
-+ return (end < offset) ? end : offset;
-+}
-+EXPORT_SYMBOL(drm_bo_offset_end);
-+
-+static pgprot_t drm_kernel_io_prot(uint32_t map_type)
-+{
-+ pgprot_t tmp = PAGE_KERNEL;
-+
-+#if defined(__i386__) || defined(__x86_64__)
-+#ifdef USE_PAT_WC
-+#warning using pat
-+ if (drm_use_pat() && map_type == _DRM_TTM) {
-+ pgprot_val(tmp) |= _PAGE_PAT;
-+ return tmp;
-+ }
-+#endif
-+ if (boot_cpu_data.x86 > 3 && map_type != _DRM_AGP) {
-+ pgprot_val(tmp) |= _PAGE_PCD;
-+ pgprot_val(tmp) &= ~_PAGE_PWT;
-+ }
-+#elif defined(__powerpc__)
-+ pgprot_val(tmp) |= _PAGE_NO_CACHE;
-+ if (map_type == _DRM_REGISTERS)
-+ pgprot_val(tmp) |= _PAGE_GUARDED;
-+#endif
-+#if defined(__ia64__)
-+ if (map_type == _DRM_TTM)
-+ tmp = pgprot_writecombine(tmp);
-+ else
-+ tmp = pgprot_noncached(tmp);
-+#endif
-+ return tmp;
-+}
-+
-+static int drm_bo_ioremap(struct drm_buffer_object *bo, unsigned long bus_base,
-+ unsigned long bus_offset, unsigned long bus_size,
-+ struct drm_bo_kmap_obj *map)
-+{
-+ struct drm_device *dev = bo->dev;
-+ struct drm_bo_mem_reg *mem = &bo->mem;
-+ struct drm_mem_type_manager *man = &dev->bm.man[mem->mem_type];
-+
-+ if (!(man->flags & _DRM_FLAG_NEEDS_IOREMAP)) {
-+ map->bo_kmap_type = bo_map_premapped;
-+ map->virtual = (void *)(((u8 *) man->io_addr) + bus_offset);
-+ } else {
-+ map->bo_kmap_type = bo_map_iomap;
-+ map->virtual = ioremap_nocache(bus_base + bus_offset, bus_size);
-+ }
-+ return (!map->virtual) ? -ENOMEM : 0;
-+}
-+
-+static int drm_bo_kmap_ttm(struct drm_buffer_object *bo,
-+ unsigned long start_page, unsigned long num_pages,
-+ struct drm_bo_kmap_obj *map)
-+{
-+ struct drm_device *dev = bo->dev;
-+ struct drm_bo_mem_reg *mem = &bo->mem;
-+ struct drm_mem_type_manager *man = &dev->bm.man[mem->mem_type];
-+ pgprot_t prot;
-+ struct drm_ttm *ttm = bo->ttm;
-+ struct page *d;
-+ int i;
-+
-+ BUG_ON(!ttm);
-+
-+ if (num_pages == 1 && (mem->flags & DRM_BO_FLAG_CACHED)) {
-+
-+ /*
-+ * We're mapping a single page, and the desired
-+ * page protection is consistent with the bo.
-+ */
-+
-+ map->bo_kmap_type = bo_map_kmap;
-+ map->page = drm_ttm_get_page(ttm, start_page);
-+ map->virtual = kmap(map->page);
-+ } else {
-+ /*
-+ * Populate the part we're mapping;
-+ */
-+
-+ for (i = start_page; i < start_page + num_pages; ++i) {
-+ d = drm_ttm_get_page(ttm, i);
-+ if (!d)
-+ return -ENOMEM;
-+ }
-+
-+ /*
-+ * We need to use vmap to get the desired page protection
-+ * or to make the buffer object look contigous.
-+ */
-+
-+ prot = (mem->flags & DRM_BO_FLAG_CACHED) ?
-+ PAGE_KERNEL :
-+ drm_kernel_io_prot(man->drm_bus_maptype);
-+ map->bo_kmap_type = bo_map_vmap;
-+ map->virtual = vmap(ttm->pages + start_page,
-+ num_pages, 0, prot);
-+ }
-+ return (!map->virtual) ? -ENOMEM : 0;
-+}
-+
-+/*
-+ * This function is to be used for kernel mapping of buffer objects.
-+ * It chooses the appropriate mapping method depending on the memory type
-+ * and caching policy the buffer currently has.
-+ * Mapping multiple pages or buffers that live in io memory is a bit slow and
-+ * consumes vmalloc space. Be restrictive with such mappings.
-+ * Mapping single pages usually returns the logical kernel address,
-+ * (which is fast)
-+ * BUG may use slower temporary mappings for high memory pages or
-+ * uncached / write-combined pages.
-+ *
-+ * The function fills in a drm_bo_kmap_obj which can be used to return the
-+ * kernel virtual address of the buffer.
-+ *
-+ * Code servicing a non-priviliged user request is only allowed to map one
-+ * page at a time. We might need to implement a better scheme to stop such
-+ * processes from consuming all vmalloc space.
-+ */
-+
-+int drm_bo_kmap(struct drm_buffer_object *bo, unsigned long start_page,
-+ unsigned long num_pages, struct drm_bo_kmap_obj *map)
-+{
-+ int ret;
-+ unsigned long bus_base;
-+ unsigned long bus_offset;
-+ unsigned long bus_size;
-+
-+ map->virtual = NULL;
-+
-+ if (num_pages > bo->num_pages)
-+ return -EINVAL;
-+ if (start_page > bo->num_pages)
-+ return -EINVAL;
-+#if 0
-+ if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
-+ return -EPERM;
-+#endif
-+ ret = drm_bo_pci_offset(bo->dev, &bo->mem, &bus_base,
-+ &bus_offset, &bus_size);
-+
-+ if (ret)
-+ return ret;
-+
-+ if (bus_size == 0) {
-+ return drm_bo_kmap_ttm(bo, start_page, num_pages, map);
-+ } else {
-+ bus_offset += start_page << PAGE_SHIFT;
-+ bus_size = num_pages << PAGE_SHIFT;
-+ return drm_bo_ioremap(bo, bus_base, bus_offset, bus_size, map);
-+ }
-+}
-+EXPORT_SYMBOL(drm_bo_kmap);
-+
-+void drm_bo_kunmap(struct drm_bo_kmap_obj *map)
-+{
-+ if (!map->virtual)
-+ return;
-+
-+ switch (map->bo_kmap_type) {
-+ case bo_map_iomap:
-+ iounmap(map->virtual);
-+ break;
-+ case bo_map_vmap:
-+ vunmap(map->virtual);
-+ break;
-+ case bo_map_kmap:
-+ kunmap(map->page);
-+ break;
-+ case bo_map_premapped:
-+ break;
-+ default:
-+ BUG();
-+ }
-+ map->virtual = NULL;
-+ map->page = NULL;
-+}
-+EXPORT_SYMBOL(drm_bo_kunmap);
-Index: linux-2.6.27/drivers/gpu/drm/drm_bufs.c
-===================================================================
---- linux-2.6.27.orig/drivers/gpu/drm/drm_bufs.c 2008-10-09 23:13:53.000000000 +0100
-+++ linux-2.6.27/drivers/gpu/drm/drm_bufs.c 2009-02-05 13:29:33.000000000 +0000
-@@ -409,6 +409,7 @@
- break;
- case _DRM_SHM:
- vfree(map->handle);
-+ dev->sigdata.lock = dev->lock.hw_lock = NULL; /* SHM removed */
- break;
- case _DRM_AGP:
- case _DRM_SCATTER_GATHER:
-@@ -419,6 +420,8 @@
- dmah.size = map->size;
- __drm_pci_free(dev, &dmah);
- break;
-+ case _DRM_TTM:
-+ BUG_ON(1);
- }
- drm_free(map, sizeof(*map), DRM_MEM_MAPS);
-
-Index: linux-2.6.27/drivers/gpu/drm/drm_crtc.c
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.27/drivers/gpu/drm/drm_crtc.c 2009-02-05 13:29:33.000000000 +0000
-@@ -0,0 +1,2170 @@
-+/*
-+ * Copyright (c) 2006-2007 Intel Corporation
-+ * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
-+ *
-+ * DRM core CRTC related functions
-+ *
-+ * Permission to use, copy, modify, distribute, and sell this software and its
-+ * documentation for any purpose is hereby granted without fee, provided that
-+ * the above copyright notice appear in all copies and that both that copyright
-+ * notice and this permission notice appear in supporting documentation, and
-+ * that the name of the copyright holders not be used in advertising or
-+ * publicity pertaining to distribution of the software without specific,
-+ * written prior permission. The copyright holders make no representations
-+ * about the suitability of this software for any purpose. It is provided "as
-+ * is" without express or implied warranty.
-+ *
-+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
-+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
-+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
-+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
-+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
-+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
-+ * OF THIS SOFTWARE.
-+ *
-+ * Authors:
-+ * Keith Packard
-+ * Eric Anholt <eric@anholt.net>
-+ * Dave Airlie <airlied@linux.ie>
-+ * Jesse Barnes <jesse.barnes@intel.com>
-+ */
-+#include <linux/list.h>
-+#include "drm.h"
-+#include "drmP.h"
-+#include "drm_crtc.h"
-+
-+/**
-+ * drm_idr_get - allocate a new identifier
-+ * @dev: DRM device
-+ * @ptr: object pointer, used to generate unique ID
-+ *
-+ * LOCKING:
-+ * Caller must hold DRM mode_config lock.
-+ *
-+ * Create a unique identifier based on @ptr in @dev's identifier space. Used
-+ * for tracking modes, CRTCs and outputs.
-+ *
-+ * RETURNS:
-+ * New unique (relative to other objects in @dev) integer identifier for the
-+ * object.
-+ */
-+int drm_idr_get(struct drm_device *dev, void *ptr)
-+{
-+ int new_id = 0;
-+ int ret;
-+again:
-+ if (idr_pre_get(&dev->mode_config.crtc_idr, GFP_KERNEL) == 0) {
-+ DRM_ERROR("Ran out memory getting a mode number\n");
-+ return 0;
-+ }
-+
-+ ret = idr_get_new_above(&dev->mode_config.crtc_idr, ptr, 1, &new_id);
-+ if (ret == -EAGAIN)
-+ goto again;
-+
-+ return new_id;
-+}
-+
-+/**
-+ * drm_idr_put - free an identifer
-+ * @dev: DRM device
-+ * @id: ID to free
-+ *
-+ * LOCKING:
-+ * Caller must hold DRM mode_config lock.
-+ *
-+ * Free @id from @dev's unique identifier pool.
-+ */
-+void drm_idr_put(struct drm_device *dev, int id)
-+{
-+ idr_remove(&dev->mode_config.crtc_idr, id);
-+}
-+
-+/**
-+ * drm_crtc_from_fb - find the CRTC structure associated with an fb
-+ * @dev: DRM device
-+ * @fb: framebuffer in question
-+ *
-+ * LOCKING:
-+ * Caller must hold mode_config lock.
-+ *
-+ * Find CRTC in the mode_config structure that matches @fb.
-+ *
-+ * RETURNS:
-+ * Pointer to the CRTC or NULL if it wasn't found.
-+ */
-+struct drm_crtc *drm_crtc_from_fb(struct drm_device *dev,
-+ struct drm_framebuffer *fb)
-+{
-+ struct drm_crtc *crtc;
-+
-+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-+ if (crtc->fb == fb)
-+ return crtc;
-+ }
-+ return NULL;
-+}
-+
-+/**
-+ * drm_framebuffer_create - create a new framebuffer object
-+ * @dev: DRM device
-+ *
-+ * LOCKING:
-+ * Caller must hold mode config lock.
-+ *
-+ * Creates a new framebuffer objects and adds it to @dev's DRM mode_config.
-+ *
-+ * RETURNS:
-+ * Pointer to new framebuffer or NULL on error.
-+ */
-+struct drm_framebuffer *drm_framebuffer_create(struct drm_device *dev)
-+{
-+ struct drm_framebuffer *fb;
-+
-+ /* Limit to single framebuffer for now */
-+ if (dev->mode_config.num_fb > 1) {
-+ mutex_unlock(&dev->mode_config.mutex);
-+ DRM_ERROR("Attempt to add multiple framebuffers failed\n");
-+ return NULL;
-+ }
-+
-+ fb = kzalloc(sizeof(struct drm_framebuffer), GFP_KERNEL);
-+ if (!fb)
-+ return NULL;
-+
-+ fb->id = drm_idr_get(dev, fb);
-+ fb->dev = dev;
-+ dev->mode_config.num_fb++;
-+ list_add(&fb->head, &dev->mode_config.fb_list);
-+
-+ return fb;
-+}
-+EXPORT_SYMBOL(drm_framebuffer_create);
-+
-+/**
-+ * drm_framebuffer_destroy - remove a framebuffer object
-+ * @fb: framebuffer to remove
-+ *
-+ * LOCKING:
-+ * Caller must hold mode config lock.
-+ *
-+ * Scans all the CRTCs in @dev's mode_config. If they're using @fb, removes
-+ * it, setting it to NULL.
-+ */
-+void drm_framebuffer_destroy(struct drm_framebuffer *fb)
-+{
-+ struct drm_device *dev = fb->dev;
-+ struct drm_crtc *crtc;
-+
-+ /* remove from any CRTC */
-+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-+ if (crtc->fb == fb)
-+ crtc->fb = NULL;
-+ }
-+
-+ drm_idr_put(dev, fb->id);
-+ list_del(&fb->head);
-+ dev->mode_config.num_fb--;
-+
-+ kfree(fb);
-+}
-+EXPORT_SYMBOL(drm_framebuffer_destroy);
-+
-+/**
-+ * drm_crtc_create - create a new CRTC object
-+ * @dev: DRM device
-+ * @funcs: callbacks for the new CRTC
-+ *
-+ * LOCKING:
-+ * Caller must hold mode config lock.
-+ *
-+ * Creates a new CRTC object and adds it to @dev's mode_config structure.
-+ *
-+ * RETURNS:
-+ * Pointer to new CRTC object or NULL on error.
-+ */
-+struct drm_crtc *drm_crtc_create(struct drm_device *dev,
-+ const struct drm_crtc_funcs *funcs)
-+{
-+ struct drm_crtc *crtc;
-+
-+ crtc = kzalloc(sizeof(struct drm_crtc), GFP_KERNEL);
-+ if (!crtc)
-+ return NULL;
-+
-+ crtc->dev = dev;
-+ crtc->funcs = funcs;
-+
-+ crtc->id = drm_idr_get(dev, crtc);
-+
-+ list_add_tail(&crtc->head, &dev->mode_config.crtc_list);
-+ dev->mode_config.num_crtc++;
-+
-+ return crtc;
-+}
-+EXPORT_SYMBOL(drm_crtc_create);
-+
-+/**
-+ * drm_crtc_destroy - remove a CRTC object
-+ * @crtc: CRTC to remove
-+ *
-+ * LOCKING:
-+ * Caller must hold mode config lock.
-+ *
-+ * Cleanup @crtc. Calls @crtc's cleanup function, then removes @crtc from
-+ * its associated DRM device's mode_config. Frees it afterwards.
-+ */
-+void drm_crtc_destroy(struct drm_crtc *crtc)
-+{
-+ struct drm_device *dev = crtc->dev;
-+
-+ if (crtc->funcs->cleanup)
-+ (*crtc->funcs->cleanup)(crtc);
-+
-+ drm_idr_put(dev, crtc->id);
-+ list_del(&crtc->head);
-+ dev->mode_config.num_crtc--;
-+ kfree(crtc);
-+}
-+EXPORT_SYMBOL(drm_crtc_destroy);
-+
-+/**
-+ * drm_crtc_in_use - check if a given CRTC is in a mode_config
-+ * @crtc: CRTC to check
-+ *
-+ * LOCKING:
-+ * Caller must hold mode config lock.
-+ *
-+ * Walk @crtc's DRM device's mode_config and see if it's in use.
-+ *
-+ * RETURNS:
-+ * True if @crtc is part of the mode_config, false otherwise.
-+ */
-+bool drm_crtc_in_use(struct drm_crtc *crtc)
-+{
-+ struct drm_output *output;
-+ struct drm_device *dev = crtc->dev;
-+ /* FIXME: Locking around list access? */
-+ list_for_each_entry(output, &dev->mode_config.output_list, head)
-+ if (output->crtc == crtc)
-+ return true;
-+ return false;
-+}
-+EXPORT_SYMBOL(drm_crtc_in_use);
-+
-+/*
-+ * Detailed mode info for a standard 640x480@60Hz monitor
-+ */
-+static struct drm_display_mode std_mode[] = {
-+ { DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 25200, 640, 656,
-+ 752, 800, 0, 480, 490, 492, 525, 0,
-+ V_NHSYNC | V_NVSYNC) }, /* 640x480@60Hz */
-+};
-+
-+/**
-+ * drm_crtc_probe_output_modes - get complete set of display modes
-+ * @dev: DRM device
-+ * @maxX: max width for modes
-+ * @maxY: max height for modes
-+ *
-+ * LOCKING:
-+ * Caller must hold mode config lock.
-+ *
-+ * Based on @dev's mode_config layout, scan all the outputs and try to detect
-+ * modes on them. Modes will first be added to the output's probed_modes
-+ * list, then culled (based on validity and the @maxX, @maxY parameters) and
-+ * put into the normal modes list.
-+ *
-+ * Intended to be used either at bootup time or when major configuration
-+ * changes have occurred.
-+ *
-+ * FIXME: take into account monitor limits
-+ */
-+void drm_crtc_probe_output_modes(struct drm_device *dev, int maxX, int maxY)
-+{
-+ struct drm_output *output;
-+ struct drm_display_mode *mode, *t;
-+ int ret;
-+ //if (maxX == 0 || maxY == 0)
-+ // TODO
-+
-+ list_for_each_entry(output, &dev->mode_config.output_list, head) {
-+
-+ /* set all modes to the unverified state */
-+ list_for_each_entry_safe(mode, t, &output->modes, head)
-+ mode->status = MODE_UNVERIFIED;
-+
-+ output->status = (*output->funcs->detect)(output);
-+
-+ if (output->status == output_status_disconnected) {
-+ DRM_DEBUG("%s is disconnected\n", output->name);
-+ /* TODO set EDID to NULL */
-+ continue;
-+ }
-+
-+ ret = (*output->funcs->get_modes)(output);
-+
-+ if (ret) {
-+ drm_mode_output_list_update(output);
-+ }
-+
-+ if (maxX && maxY)
-+ drm_mode_validate_size(dev, &output->modes, maxX,
-+ maxY, 0);
-+ list_for_each_entry_safe(mode, t, &output->modes, head) {
-+ if (mode->status == MODE_OK)
-+ mode->status = (*output->funcs->mode_valid)(output,mode);
-+ }
-+
-+
-+ drm_mode_prune_invalid(dev, &output->modes, 1);
-+
-+ if (list_empty(&output->modes)) {
-+ struct drm_display_mode *stdmode;
-+
-+ DRM_DEBUG("No valid modes on %s\n", output->name);
-+
-+ /* Should we do this here ???
-+ * When no valid EDID modes are available we end up
-+ * here and bailed in the past, now we add a standard
-+ * 640x480@60Hz mode and carry on.
-+ */
-+ stdmode = drm_mode_duplicate(dev, &std_mode[0]);
-+ drm_mode_probed_add(output, stdmode);
-+ drm_mode_list_concat(&output->probed_modes,
-+ &output->modes);
-+
-+ DRM_DEBUG("Adding standard 640x480 @ 60Hz to %s\n",
-+ output->name);
-+ }
-+
-+ drm_mode_sort(&output->modes);
-+
-+ DRM_DEBUG("Probed modes for %s\n", output->name);
-+ list_for_each_entry_safe(mode, t, &output->modes, head) {
-+ mode->vrefresh = drm_mode_vrefresh(mode);
-+
-+ drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
-+ drm_mode_debug_printmodeline(dev, mode);
-+ }
-+ }
-+}
-+EXPORT_SYMBOL(drm_crtc_probe_output_modes);
-+
-+/**
-+ * drm_crtc_set_mode - set a mode
-+ * @crtc: CRTC to program
-+ * @mode: mode to use
-+ * @x: width of mode
-+ * @y: height of mode
-+ *
-+ * LOCKING:
-+ * Caller must hold mode config lock.
-+ *
-+ * Try to set @mode on @crtc. Give @crtc and its associated outputs a chance
-+ * to fixup or reject the mode prior to trying to set it.
-+ *
-+ * RETURNS:
-+ * True if the mode was set successfully, or false otherwise.
-+ */
-+bool drm_crtc_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
-+ int x, int y)
-+{
-+ struct drm_device *dev = crtc->dev;
-+ struct drm_display_mode *adjusted_mode, saved_mode;
-+ int saved_x, saved_y;
-+ bool didLock = false;
-+ bool ret = false;
-+ struct drm_output *output;
-+
-+ adjusted_mode = drm_mode_duplicate(dev, mode);
-+
-+ crtc->enabled = drm_crtc_in_use(crtc);
-+
-+ if (!crtc->enabled) {
-+ return true;
-+ }
-+
-+ didLock = crtc->funcs->lock(crtc);
-+
-+ saved_mode = crtc->mode;
-+ saved_x = crtc->x;
-+ saved_y = crtc->y;
-+
-+ /* Update crtc values up front so the driver can rely on them for mode
-+ * setting.
-+ */
-+ crtc->mode = *mode;
-+ crtc->x = x;
-+ crtc->y = y;
-+
-+ /* XXX short-circuit changes to base location only */
-+
-+ /* Pass our mode to the outputs and the CRTC to give them a chance to
-+ * adjust it according to limitations or output properties, and also
-+ * a chance to reject the mode entirely.
-+ */
-+ list_for_each_entry(output, &dev->mode_config.output_list, head) {
-+
-+ if (output->crtc != crtc)
-+ continue;
-+
-+ if (!output->funcs->mode_fixup(output, mode, adjusted_mode)) {
-+ goto done;
-+ }
-+ }
-+
-+ if (!crtc->funcs->mode_fixup(crtc, mode, adjusted_mode)) {
-+ goto done;
-+ }
-+
-+ /* Prepare the outputs and CRTCs before setting the mode. */
-+ list_for_each_entry(output, &dev->mode_config.output_list, head) {
-+
-+ if (output->crtc != crtc)
-+ continue;
-+
-+ /* Disable the output as the first thing we do. */
-+ output->funcs->prepare(output);
-+ }
-+
-+ crtc->funcs->prepare(crtc);
-+
-+ /* Set up the DPLL and any output state that needs to adjust or depend
-+ * on the DPLL.
-+ */
-+ crtc->funcs->mode_set(crtc, mode, adjusted_mode, x, y);
-+
-+ list_for_each_entry(output, &dev->mode_config.output_list, head) {
-+
-+ if (output->crtc != crtc)
-+ continue;
-+
-+ DRM_INFO("%s: set mode %s %x\n", output->name, mode->name, mode->mode_id);
-+
-+ output->funcs->mode_set(output, mode, adjusted_mode);
-+ }
-+
-+ /* Now, enable the clocks, plane, pipe, and outputs that we set up. */
-+ crtc->funcs->commit(crtc);
-+
-+ list_for_each_entry(output, &dev->mode_config.output_list, head) {
-+
-+ if (output->crtc != crtc)
-+ continue;
-+
-+ output->funcs->commit(output);
-+
-+#if 0 // TODO def RANDR_12_INTERFACE
-+ if (output->randr_output)
-+ RRPostPendingProperties (output->randr_output);
-+#endif
-+ }
-+
-+ /* XXX free adjustedmode */
-+ drm_mode_destroy(dev, adjusted_mode);
-+ ret = 1;
-+ /* TODO */
-+// if (scrn->pScreen)
-+// drm_crtc_set_screen_sub_pixel_order(dev);
-+
-+done:
-+ if (!ret) {
-+ crtc->x = saved_x;
-+ crtc->y = saved_y;
-+ crtc->mode = saved_mode;
-+ }
-+
-+ if (didLock)
-+ crtc->funcs->unlock (crtc);
-+
-+ return ret;
-+}
-+EXPORT_SYMBOL(drm_crtc_set_mode);
-+
-+/**
-+ * drm_disable_unused_functions - disable unused objects
-+ * @dev: DRM device
-+ *
-+ * LOCKING:
-+ * Caller must hold mode config lock.
-+ *
-+ * If an output or CRTC isn't part of @dev's mode_config, it can be disabled
-+ * by calling its dpms function, which should power it off.
-+ */
-+void drm_disable_unused_functions(struct drm_device *dev)
-+{
-+ struct drm_output *output;
-+ struct drm_crtc *crtc;
-+
-+ list_for_each_entry(output, &dev->mode_config.output_list, head) {
-+ if (!output->crtc)
-+ (*output->funcs->dpms)(output, DPMSModeOff);
-+ }
-+
-+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-+ if (!crtc->enabled)
-+ crtc->funcs->dpms(crtc, DPMSModeOff);
-+ }
-+}
-+EXPORT_SYMBOL(drm_disable_unused_functions);
-+
-+/**
-+ * drm_mode_probed_add - add a mode to the specified output's probed mode list
-+ * @output: output the new mode
-+ * @mode: mode data
-+ *
-+ * LOCKING:
-+ * Caller must hold mode config lock.
-+ *
-+ * Add @mode to @output's mode list for later use.
-+ */
-+void drm_mode_probed_add(struct drm_output *output,
-+ struct drm_display_mode *mode)
-+{
-+ list_add(&mode->head, &output->probed_modes);
-+}
-+EXPORT_SYMBOL(drm_mode_probed_add);
-+
-+/**
-+ * drm_mode_remove - remove and free a mode
-+ * @output: output list to modify
-+ * @mode: mode to remove
-+ *
-+ * LOCKING:
-+ * Caller must hold mode config lock.
-+ *
-+ * Remove @mode from @output's mode list, then free it.
-+ */
-+void drm_mode_remove(struct drm_output *output, struct drm_display_mode *mode)
-+{
-+ list_del(&mode->head);
-+ kfree(mode);
-+}
-+EXPORT_SYMBOL(drm_mode_remove);
-+
-+/**
-+ * drm_output_create - create a new output
-+ * @dev: DRM device
-+ * @funcs: callbacks for this output
-+ * @name: user visible name of the output
-+ *
-+ * LOCKING:
-+ * Caller must hold @dev's mode_config lock.
-+ *
-+ * Creates a new drm_output structure and adds it to @dev's mode_config
-+ * structure.
-+ *
-+ * RETURNS:
-+ * Pointer to the new output or NULL on error.
-+ */
-+struct drm_output *drm_output_create(struct drm_device *dev,
-+ const struct drm_output_funcs *funcs,
-+ const char *name)
-+{
-+ struct drm_output *output = NULL;
-+
-+ output = kzalloc(sizeof(struct drm_output), GFP_KERNEL);
-+ if (!output)
-+ return NULL;
-+
-+ output->dev = dev;
-+ output->funcs = funcs;
-+ output->id = drm_idr_get(dev, output);
-+ if (name)
-+ strncpy(output->name, name, DRM_OUTPUT_LEN);
-+ output->name[DRM_OUTPUT_LEN - 1] = 0;
-+ output->subpixel_order = SubPixelUnknown;
-+ INIT_LIST_HEAD(&output->probed_modes);
-+ INIT_LIST_HEAD(&output->modes);
-+ /* randr_output? */
-+ /* output_set_monitor(output)? */
-+ /* check for output_ignored(output)? */
-+
-+ mutex_lock(&dev->mode_config.mutex);
-+ list_add_tail(&output->head, &dev->mode_config.output_list);
-+ dev->mode_config.num_output++;
-+
-+ mutex_unlock(&dev->mode_config.mutex);
-+
-+ return output;
-+
-+}
-+EXPORT_SYMBOL(drm_output_create);
-+
-+/**
-+ * drm_output_destroy - remove an output
-+ * @output: output to remove
-+ *
-+ * LOCKING:
-+ * Caller must hold @dev's mode_config lock.
-+ *
-+ * Call @output's cleanup function, then remove the output from the DRM
-+ * mode_config after freeing @output's modes.
-+ */
-+void drm_output_destroy(struct drm_output *output)
-+{
-+ struct drm_device *dev = output->dev;
-+ struct drm_display_mode *mode, *t;
-+
-+ if (*output->funcs->cleanup)
-+ (*output->funcs->cleanup)(output);
-+
-+ list_for_each_entry_safe(mode, t, &output->probed_modes, head)
-+ drm_mode_remove(output, mode);
-+
-+ list_for_each_entry_safe(mode, t, &output->modes, head)
-+ drm_mode_remove(output, mode);
-+
-+ mutex_lock(&dev->mode_config.mutex);
-+ drm_idr_put(dev, output->id);
-+ list_del(&output->head);
-+ mutex_unlock(&dev->mode_config.mutex);
-+ kfree(output);
-+}
-+EXPORT_SYMBOL(drm_output_destroy);
-+
-+/**
-+ * drm_output_rename - rename an output
-+ * @output: output to rename
-+ * @name: new user visible name
-+ *
-+ * LOCKING:
-+ * None.
-+ *
-+ * Simply stuff a new name into @output's name field, based on @name.
-+ *
-+ * RETURNS:
-+ * True if the name was changed, false otherwise.
-+ */
-+bool drm_output_rename(struct drm_output *output, const char *name)
-+{
-+ if (!name)
-+ return false;
-+
-+ strncpy(output->name, name, DRM_OUTPUT_LEN);
-+ output->name[DRM_OUTPUT_LEN - 1] = 0;
-+
-+ DRM_DEBUG("Changed name to %s\n", output->name);
-+// drm_output_set_monitor(output);
-+// if (drm_output_ignored(output))
-+// return FALSE;
-+
-+ return 1;
-+}
-+EXPORT_SYMBOL(drm_output_rename);
-+
-+/**
-+ * drm_mode_create - create a new display mode
-+ * @dev: DRM device
-+ *
-+ * LOCKING:
-+ * None.
-+ *
-+ * Create a new drm_display_mode, give it an ID, and return it.
-+ *
-+ * RETURNS:
-+ * Pointer to new mode on success, NULL on error.
-+ */
-+struct drm_display_mode *drm_mode_create(struct drm_device *dev)
-+{
-+ struct drm_display_mode *nmode;
-+
-+ nmode = kzalloc(sizeof(struct drm_display_mode), GFP_KERNEL);
-+ if (!nmode)
-+ return NULL;
-+
-+ nmode->mode_id = drm_idr_get(dev, nmode);
-+ return nmode;
-+}
-+EXPORT_SYMBOL(drm_mode_create);
-+
-+/**
-+ * drm_mode_destroy - remove a mode
-+ * @dev: DRM device
-+ * @mode: mode to remove
-+ *
-+ * LOCKING:
-+ * Caller must hold mode config lock.
-+ *
-+ * Free @mode's unique identifier, then free it.
-+ */
-+void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode)
-+{
-+ drm_idr_put(dev, mode->mode_id);
-+
-+ kfree(mode);
-+}
-+EXPORT_SYMBOL(drm_mode_destroy);
-+
-+/**
-+ * drm_mode_config_init - initialize DRM mode_configuration structure
-+ * @dev: DRM device
-+ *
-+ * LOCKING:
-+ * None, should happen single threaded at init time.
-+ *
-+ * Initialize @dev's mode_config structure, used for tracking the graphics
-+ * configuration of @dev.
-+ */
-+void drm_mode_config_init(struct drm_device *dev)
-+{
-+ mutex_init(&dev->mode_config.mutex);
-+ INIT_LIST_HEAD(&dev->mode_config.fb_list);
-+ INIT_LIST_HEAD(&dev->mode_config.crtc_list);
-+ INIT_LIST_HEAD(&dev->mode_config.output_list);
-+ INIT_LIST_HEAD(&dev->mode_config.property_list);
-+ INIT_LIST_HEAD(&dev->mode_config.usermode_list);
-+ idr_init(&dev->mode_config.crtc_idr);
-+}
-+EXPORT_SYMBOL(drm_mode_config_init);
-+
-+/**
-+ * drm_get_buffer_object - find the buffer object for a given handle
-+ * @dev: DRM device
-+ * @bo: pointer to caller's buffer_object pointer
-+ * @handle: handle to lookup
-+ *
-+ * LOCKING:
-+ * Must take @dev's struct_mutex to protect buffer object lookup.
-+ *
-+ * Given @handle, lookup the buffer object in @dev and put it in the caller's
-+ * @bo pointer.
-+ *
-+ * RETURNS:
-+ * Zero on success, -EINVAL if the handle couldn't be found.
-+ */
-+static int drm_get_buffer_object(struct drm_device *dev, struct drm_buffer_object **bo, unsigned long handle)
-+{
-+ struct drm_user_object *uo;
-+ struct drm_hash_item *hash;
-+ int ret;
-+
-+ *bo = NULL;
-+
-+ mutex_lock(&dev->struct_mutex);
-+ ret = drm_ht_find_item(&dev->object_hash, handle, &hash);
-+ if (ret) {
-+ DRM_ERROR("Couldn't find handle.\n");
-+ ret = -EINVAL;
-+ goto out_err;
-+ }
-+
-+ uo = drm_hash_entry(hash, struct drm_user_object, hash);
-+ if (uo->type != drm_buffer_type) {
-+ ret = -EINVAL;
-+ goto out_err;
-+ }
-+
-+ *bo = drm_user_object_entry(uo, struct drm_buffer_object, base);
-+ ret = 0;
-+out_err:
-+ mutex_unlock(&dev->struct_mutex);
-+ return ret;
-+}
-+
-+char drm_init_mode[32];
-+int drm_init_xres;
-+int drm_init_yres;
-+EXPORT_SYMBOL(drm_init_mode);
-+EXPORT_SYMBOL(drm_init_xres);
-+EXPORT_SYMBOL(drm_init_yres);
-+
-+/**
-+ * drm_pick_crtcs - pick crtcs for output devices
-+ * @dev: DRM device
-+ *
-+ * LOCKING:
-+ * Caller must hold mode config lock.
-+ */
-+static void drm_pick_crtcs (struct drm_device *dev)
-+{
-+ int c, o, assigned;
-+ struct drm_output *output, *output_equal;
-+ struct drm_crtc *crtc;
-+ struct drm_display_mode *des_mode = NULL, *modes, *modes_equal;
-+
-+ list_for_each_entry(output, &dev->mode_config.output_list, head) {
-+ output->crtc = NULL;
-+
-+ /* Don't hook up outputs that are disconnected ??
-+ *
-+ * This is debateable. Do we want fixed /dev/fbX or
-+ * dynamic on hotplug (need mode code for that though) ?
-+ *
-+ * If we don't hook up outputs now, then we only create
-+ * /dev/fbX for the output that's enabled, that's good as
-+ * the users console will be on that output.
-+ *
-+ * If we do hook up outputs that are disconnected now, then
-+ * the user may end up having to muck about with the fbcon
-+ * map flags to assign his console to the enabled output. Ugh.
-+ */
-+ if (output->status != output_status_connected)
-+ continue;
-+
-+ des_mode = NULL;
-+ list_for_each_entry(des_mode, &output->modes, head) {
-+ if (/* !strcmp(des_mode->name, drm_init_mode) || */
-+ des_mode->hdisplay==drm_init_xres
-+ && des_mode->vdisplay==drm_init_yres) {
-+ des_mode->type |= DRM_MODE_TYPE_USERPREF;
-+ break;
-+ }
-+
-+ }
-+ /* No userdef mode (initial mode set from module parameter) */
-+ if (!des_mode || !(des_mode->type & DRM_MODE_TYPE_USERPREF)) {
-+ list_for_each_entry(des_mode, &output->modes, head) {
-+ if (des_mode->type & DRM_MODE_TYPE_PREFERRED)
-+ break;
-+ }
-+ }
-+
-+ /* No preferred mode, and no default mode, let's just
-+ select the first available */
-+ if (!des_mode || (!(des_mode->type & DRM_MODE_TYPE_PREFERRED)
-+ && !(des_mode->type & DRM_MODE_TYPE_USERPREF))) {
-+ list_for_each_entry(des_mode, &output->modes, head) {
-+ if (des_mode)
-+ break;
-+ }
-+ }
-+
-+ c = -1;
-+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-+ assigned = 0;
-+
-+ c++;
-+ if ((output->possible_crtcs & (1 << c)) == 0)
-+ continue;
-+
-+ list_for_each_entry(output_equal, &dev->mode_config.output_list, head) {
-+ if (output->id == output_equal->id)
-+ continue;
-+
-+ /* Find out if crtc has been assigned before */
-+ if (output_equal->crtc == crtc)
-+ assigned = 1;
-+ }
-+
-+#if 1 /* continue for now */
-+ if (assigned)
-+ continue;
-+#endif
-+
-+ o = -1;
-+ list_for_each_entry(output_equal, &dev->mode_config.output_list, head) {
-+ o++;
-+ if (output->id == output_equal->id)
-+ continue;
-+
-+ list_for_each_entry(modes, &output->modes, head) {
-+ list_for_each_entry(modes_equal, &output_equal->modes, head) {
-+ if (drm_mode_equal (modes, modes_equal)) {
-+ if ((output->possible_clones & output_equal->possible_clones) && (output_equal->crtc == crtc)) {
-+ printk("Cloning %s (0x%lx) to %s (0x%lx)\n",output->name,output->possible_clones,output_equal->name,output_equal->possible_clones);
-+ assigned = 0;
-+ goto clone;
-+ }
-+ }
-+ }
-+ }
-+ }
-+
-+clone:
-+ /* crtc has been assigned skip it */
-+ if (assigned)
-+ continue;
-+
-+ /* Found a CRTC to attach to, do it ! */
-+ output->crtc = crtc;
-+ output->crtc->desired_mode = des_mode;
-+ output->initial_x = 0;
-+ output->initial_y = 0;
-+ DRM_DEBUG("Desired mode for CRTC %d is 0x%x:%s\n",c,des_mode->mode_id, des_mode->name);
-+ break;
-+ }
-+ }
-+}
-+EXPORT_SYMBOL(drm_pick_crtcs);
-+
-+/**
-+ * drm_initial_config - setup a sane initial output configuration
-+ * @dev: DRM device
-+ * @can_grow: this configuration is growable
-+ *
-+ * LOCKING:
-+ * Called at init time, must take mode config lock.
-+ *
-+ * Scan the CRTCs and outputs and try to put together an initial setup.
-+ * At the moment, this is a cloned configuration across all heads with
-+ * a new framebuffer object as the backing store.
-+ *
-+ * RETURNS:
-+ * Zero if everything went ok, nonzero otherwise.
-+ */
-+bool drm_initial_config(struct drm_device *dev, bool can_grow)
-+{
-+ struct drm_output *output;
-+ struct drm_crtc *crtc;
-+ int ret = false;
-+
-+ mutex_lock(&dev->mode_config.mutex);
-+
-+ drm_crtc_probe_output_modes(dev, 2048, 2048);
-+
-+ drm_pick_crtcs(dev);
-+
-+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-+
-+ /* can't setup the crtc if there's no assigned mode */
-+ if (!crtc->desired_mode)
-+ continue;
-+
-+ /* Now setup the fbdev for attached crtcs */
-+ dev->driver->fb_probe(dev, crtc);
-+ }
-+
-+ /* This is a little screwy, as we've already walked the outputs
-+ * above, but it's a little bit of magic too. There's the potential
-+ * for things not to get setup above if an existing device gets
-+ * re-assigned thus confusing the hardware. By walking the outputs
-+ * this fixes up their crtc's.
-+ */
-+ list_for_each_entry(output, &dev->mode_config.output_list, head) {
-+
-+ /* can't setup the output if there's no assigned mode */
-+ if (!output->crtc || !output->crtc->desired_mode)
-+ continue;
-+
-+ /* and needs an attached fb */
-+ if (output->crtc->fb)
-+ drm_crtc_set_mode(output->crtc, output->crtc->desired_mode, 0, 0);
-+ }
-+
-+ drm_disable_unused_functions(dev);
-+
-+ mutex_unlock(&dev->mode_config.mutex);
-+ return ret;
-+}
-+EXPORT_SYMBOL(drm_initial_config);
-+
-+/**
-+ * drm_mode_config_cleanup - free up DRM mode_config info
-+ * @dev: DRM device
-+ *
-+ * LOCKING:
-+ * Caller must hold mode config lock.
-+ *
-+ * Free up all the outputs and CRTCs associated with this DRM device, then
-+ * free up the framebuffers and associated buffer objects.
-+ *
-+ * FIXME: cleanup any dangling user buffer objects too
-+ */
-+void drm_mode_config_cleanup(struct drm_device *dev)
-+{
-+ struct drm_output *output, *ot;
-+ struct drm_crtc *crtc, *ct;
-+ struct drm_framebuffer *fb, *fbt;
-+ struct drm_display_mode *mode, *mt;
-+ struct drm_property *property, *pt;
-+
-+ list_for_each_entry_safe(output, ot, &dev->mode_config.output_list, head) {
-+ drm_output_destroy(output);
-+ }
-+
-+ list_for_each_entry_safe(property, pt, &dev->mode_config.property_list, head) {
-+ drm_property_destroy(dev, property);
-+ }
-+
-+ list_for_each_entry_safe(mode, mt, &dev->mode_config.usermode_list, head) {
-+ drm_mode_destroy(dev, mode);
-+ }
-+
-+ list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) {
-+ if (fb->bo->type != drm_bo_type_kernel)
-+ drm_framebuffer_destroy(fb);
-+ else
-+ dev->driver->fb_remove(dev, drm_crtc_from_fb(dev, fb));
-+ }
-+
-+ list_for_each_entry_safe(crtc, ct, &dev->mode_config.crtc_list, head) {
-+ drm_crtc_destroy(crtc);
-+ }
-+
-+}
-+EXPORT_SYMBOL(drm_mode_config_cleanup);
-+
-+/**
-+ * drm_crtc_set_config - set a new config from userspace
-+ * @crtc: CRTC to setup
-+ * @crtc_info: user provided configuration
-+ * @new_mode: new mode to set
-+ * @output_set: set of outputs for the new config
-+ * @fb: new framebuffer
-+ *
-+ * LOCKING:
-+ * Caller must hold mode config lock.
-+ *
-+ * Setup a new configuration, provided by the user in @crtc_info, and enable
-+ * it.
-+ *
-+ * RETURNS:
-+ * Zero. (FIXME)
-+ */
-+int drm_crtc_set_config(struct drm_crtc *crtc, struct drm_mode_crtc *crtc_info, struct drm_display_mode *new_mode, struct drm_output **output_set, struct drm_framebuffer *fb)
-+{
-+ struct drm_device *dev = crtc->dev;
-+ struct drm_crtc **save_crtcs, *new_crtc;
-+ bool save_enabled = crtc->enabled;
-+ bool changed;
-+ struct drm_output *output;
-+ int count = 0, ro;
-+
-+ save_crtcs = kzalloc(dev->mode_config.num_crtc * sizeof(struct drm_crtc *), GFP_KERNEL);
-+ if (!save_crtcs)
-+ return -ENOMEM;
-+
-+ if (crtc->fb != fb)
-+ changed = true;
-+
-+ if (crtc_info->x != crtc->x || crtc_info->y != crtc->y)
-+ changed = true;
-+
-+ if (new_mode && (crtc->mode.mode_id != new_mode->mode_id))
-+ changed = true;
-+
-+ list_for_each_entry(output, &dev->mode_config.output_list, head) {
-+ save_crtcs[count++] = output->crtc;
-+
-+ if (output->crtc == crtc)
-+ new_crtc = NULL;
-+ else
-+ new_crtc = output->crtc;
-+
-+ for (ro = 0; ro < crtc_info->count_outputs; ro++) {
-+ if (output_set[ro] == output)
-+ new_crtc = crtc;
-+ }
-+ if (new_crtc != output->crtc) {
-+ changed = true;
-+ output->crtc = new_crtc;
-+ }
-+ }
-+
-+ if (changed) {
-+ crtc->fb = fb;
-+ crtc->enabled = (new_mode != NULL);
-+ if (new_mode != NULL) {
-+ DRM_DEBUG("attempting to set mode from userspace\n");
-+ drm_mode_debug_printmodeline(dev, new_mode);
-+ if (!drm_crtc_set_mode(crtc, new_mode, crtc_info->x,
-+ crtc_info->y)) {
-+ crtc->enabled = save_enabled;
-+ count = 0;
-+ list_for_each_entry(output, &dev->mode_config.output_list, head)
-+ output->crtc = save_crtcs[count++];
-+ kfree(save_crtcs);
-+ return -EINVAL;
-+ }
-+ crtc->desired_x = crtc_info->x;
-+ crtc->desired_y = crtc_info->y;
-+ crtc->desired_mode = new_mode;
-+ }
-+ drm_disable_unused_functions(dev);
-+ }
-+ kfree(save_crtcs);
-+ return 0;
-+}
-+
-+/**
-+ * drm_crtc_convert_to_umode - convert a drm_display_mode into a modeinfo
-+ * @out: drm_mode_modeinfo struct to return to the user
-+ * @in: drm_display_mode to use
-+ *
-+ * LOCKING:
-+ * None.
-+ *
-+ * Convert a drm_display_mode into a drm_mode_modeinfo structure to return to
-+ * the user.
-+ */
-+void drm_crtc_convert_to_umode(struct drm_mode_modeinfo *out, struct drm_display_mode *in)
-+{
-+
-+ out->id = in->mode_id;
-+ out->clock = in->clock;
-+ out->hdisplay = in->hdisplay;
-+ out->hsync_start = in->hsync_start;
-+ out->hsync_end = in->hsync_end;
-+ out->htotal = in->htotal;
-+ out->hskew = in->hskew;
-+ out->vdisplay = in->vdisplay;
-+ out->vsync_start = in->vsync_start;
-+ out->vsync_end = in->vsync_end;
-+ out->vtotal = in->vtotal;
-+ out->vscan = in->vscan;
-+ out->vrefresh = in->vrefresh;
-+ out->flags = in->flags;
-+ out->type = in->type;
-+ strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN);
-+ out->name[DRM_DISPLAY_MODE_LEN-1] = 0;
-+}
-+
-+/**
-+ * drm_crtc_convert_to_umode - convert a modeinfo into a drm_display_mode
-+ * @out: drm_display_mode to return to the user
-+ * @in: drm_mode_modeinfo to use
-+ *
-+ * LOCKING:
-+ * None.
-+ *
-+ * Convert a drmo_mode_modeinfo into a drm_display_mode structure to return to
-+ * the caller.
-+ */
-+void drm_crtc_convert_umode(struct drm_display_mode *out, struct drm_mode_modeinfo *in)
-+{
-+ out->clock = in->clock;
-+ out->hdisplay = in->hdisplay;
-+ out->hsync_start = in->hsync_start;
-+ out->hsync_end = in->hsync_end;
-+ out->htotal = in->htotal;
-+ out->hskew = in->hskew;
-+ out->vdisplay = in->vdisplay;
-+ out->vsync_start = in->vsync_start;
-+ out->vsync_end = in->vsync_end;
-+ out->vtotal = in->vtotal;
-+ out->vscan = in->vscan;
-+ out->vrefresh = in->vrefresh;
-+ out->flags = in->flags;
-+ out->type = in->type;
-+ strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN);
-+ out->name[DRM_DISPLAY_MODE_LEN-1] = 0;
-+}
-+
-+/**
-+ * drm_mode_getresources - get graphics configuration
-+ * @inode: inode from the ioctl
-+ * @filp: file * from the ioctl
-+ * @cmd: cmd from ioctl
-+ * @arg: arg from ioctl
-+ *
-+ * LOCKING:
-+ * Takes mode config lock.
-+ *
-+ * Construct a set of configuration description structures and return
-+ * them to the user, including CRTC, output and framebuffer configuration.
-+ *
-+ * Called by the user via ioctl.
-+ *
-+ * RETURNS:
-+ * Zero on success, errno on failure.
-+ */
-+int drm_mode_getresources(struct drm_device *dev,
-+ void *data, struct drm_file *file_priv)
-+{
-+ struct drm_mode_card_res *card_res = data;
-+ struct list_head *lh;
-+ struct drm_framebuffer *fb;
-+ struct drm_output *output;
-+ struct drm_crtc *crtc;
-+ struct drm_mode_modeinfo u_mode;
-+ struct drm_display_mode *mode;
-+ int ret = 0;
-+ int mode_count= 0;
-+ int output_count = 0;
-+ int crtc_count = 0;
-+ int fb_count = 0;
-+ int copied = 0;
-+
-+ memset(&u_mode, 0, sizeof(struct drm_mode_modeinfo));
-+
-+ mutex_lock(&dev->mode_config.mutex);
-+
-+ list_for_each(lh, &dev->mode_config.fb_list)
-+ fb_count++;
-+
-+ list_for_each(lh, &dev->mode_config.crtc_list)
-+ crtc_count++;
-+
-+ list_for_each_entry(output, &dev->mode_config.output_list,
-+ head) {
-+ output_count++;
-+ list_for_each(lh, &output->modes)
-+ mode_count++;
-+ }
-+ list_for_each(lh, &dev->mode_config.usermode_list)
-+ mode_count++;
-+
-+ if (card_res->count_modes == 0) {
-+ DRM_DEBUG("probing modes %dx%d\n", dev->mode_config.max_width, dev->mode_config.max_height);
-+ drm_crtc_probe_output_modes(dev, dev->mode_config.max_width, dev->mode_config.max_height);
-+ mode_count = 0;
-+ list_for_each_entry(output, &dev->mode_config.output_list, head) {
-+ list_for_each(lh, &output->modes)
-+ mode_count++;
-+ }
-+ list_for_each(lh, &dev->mode_config.usermode_list)
-+ mode_count++;
-+ }
-+
-+ /* handle this in 4 parts */
-+ /* FBs */
-+ if (card_res->count_fbs >= fb_count) {
-+ copied = 0;
-+ list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
-+ if (put_user(fb->id, card_res->fb_id + copied))
-+ return -EFAULT;
-+ copied++;
-+ }
-+ }
-+ card_res->count_fbs = fb_count;
-+
-+ /* CRTCs */
-+ if (card_res->count_crtcs >= crtc_count) {
-+ copied = 0;
-+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head){
-+ DRM_DEBUG("CRTC ID is %d\n", crtc->id);
-+ if (put_user(crtc->id, card_res->crtc_id + copied))
-+ return -EFAULT;
-+ copied++;
-+ }
-+ }
-+ card_res->count_crtcs = crtc_count;
-+
-+
-+ /* Outputs */
-+ if (card_res->count_outputs >= output_count) {
-+ copied = 0;
-+ list_for_each_entry(output, &dev->mode_config.output_list,
-+ head) {
-+ DRM_DEBUG("OUTPUT ID is %d\n", output->id);
-+ if (put_user(output->id, card_res->output_id + copied))
-+ return -EFAULT;
-+ copied++;
-+ }
-+ }
-+ card_res->count_outputs = output_count;
-+
-+ /* Modes */
-+ if (card_res->count_modes >= mode_count) {
-+ copied = 0;
-+ list_for_each_entry(output, &dev->mode_config.output_list,
-+ head) {
-+ list_for_each_entry(mode, &output->modes, head) {
-+ drm_crtc_convert_to_umode(&u_mode, mode);
-+ if (copy_to_user(card_res->modes + copied,
-+ &u_mode, sizeof(u_mode)))
-+ return -EFAULT;
-+ copied++;
-+ }
-+ }
-+ /* add in user modes */
-+ list_for_each_entry(mode, &dev->mode_config.usermode_list, head) {
-+ drm_crtc_convert_to_umode(&u_mode, mode);
-+ if (copy_to_user(card_res->modes + copied, &u_mode,
-+ sizeof(u_mode)))
-+ return -EFAULT;
-+ copied++;
-+ }
-+ }
-+ card_res->count_modes = mode_count;
-+
-+ DRM_DEBUG("Counted %d %d %d\n", card_res->count_crtcs,
-+ card_res->count_outputs,
-+ card_res->count_modes);
-+
-+ mutex_unlock(&dev->mode_config.mutex);
-+ return ret;
-+}
-+
-+/**
-+ * drm_mode_getcrtc - get CRTC configuration
-+ * @inode: inode from the ioctl
-+ * @filp: file * from the ioctl
-+ * @cmd: cmd from ioctl
-+ * @arg: arg from ioctl
-+ *
-+ * LOCKING:
-+ * Caller? (FIXME)
-+ *
-+ * Construct a CRTC configuration structure to return to the user.
-+ *
-+ * Called by the user via ioctl.
-+ *
-+ * RETURNS:
-+ * Zero on success, errno on failure.
-+ */
-+int drm_mode_getcrtc(struct drm_device *dev,
-+ void *data, struct drm_file *file_priv)
-+{
-+ struct drm_mode_crtc *crtc_resp = data;
-+ struct drm_crtc *crtc;
-+ struct drm_output *output;
-+ int ocount;
-+ int ret = 0;
-+
-+ mutex_lock(&dev->mode_config.mutex);
-+ crtc = idr_find(&dev->mode_config.crtc_idr, crtc_resp->crtc_id);
-+ if (!crtc || (crtc->id != crtc_resp->crtc_id)) {
-+ ret = -EINVAL;
-+ goto out;
-+ }
-+
-+ crtc_resp->x = crtc->x;
-+ crtc_resp->y = crtc->y;
-+
-+ if (crtc->fb)
-+ crtc_resp->fb_id = crtc->fb->id;
-+ else
-+ crtc_resp->fb_id = 0;
-+
-+ crtc_resp->outputs = 0;
-+ if (crtc->enabled) {
-+
-+ crtc_resp->mode = crtc->mode.mode_id;
-+ ocount = 0;
-+ list_for_each_entry(output, &dev->mode_config.output_list, head) {
-+ if (output->crtc == crtc)
-+ crtc_resp->outputs |= 1 << (ocount++);
-+ }
-+ } else {
-+ crtc_resp->mode = 0;
-+ }
-+
-+out:
-+ mutex_unlock(&dev->mode_config.mutex);
-+ return ret;
-+}
-+
-+/**
-+ * drm_mode_getoutput - get output configuration
-+ * @inode: inode from the ioctl
-+ * @filp: file * from the ioctl
-+ * @cmd: cmd from ioctl
-+ * @arg: arg from ioctl
-+ *
-+ * LOCKING:
-+ * Caller? (FIXME)
-+ *
-+ * Construct a output configuration structure to return to the user.
-+ *
-+ * Called by the user via ioctl.
-+ *
-+ * RETURNS:
-+ * Zero on success, errno on failure.
-+ */
-+int drm_mode_getoutput(struct drm_device *dev,
-+ void *data, struct drm_file *file_priv)
-+{
-+ struct drm_mode_get_output *out_resp = data;
-+ struct drm_output *output;
-+ struct drm_display_mode *mode;
-+ int mode_count = 0;
-+ int props_count = 0;
-+ int ret = 0;
-+ int copied = 0;
-+ int i;
-+
-+ DRM_DEBUG("output id %d:\n", out_resp->output);
-+
-+ mutex_lock(&dev->mode_config.mutex);
-+ output= idr_find(&dev->mode_config.crtc_idr, out_resp->output);
-+ if (!output || (output->id != out_resp->output)) {
-+ ret = -EINVAL;
-+ goto out;
-+ }
-+
-+ list_for_each_entry(mode, &output->modes, head)
-+ mode_count++;
-+
-+ for (i = 0; i < DRM_OUTPUT_MAX_UMODES; i++)
-+ if (output->user_mode_ids[i] != 0)
-+ mode_count++;
-+
-+ for (i = 0; i < DRM_OUTPUT_MAX_PROPERTY; i++) {
-+ if (output->property_ids[i] != 0) {
-+ props_count++;
-+ }
-+ }
-+
-+ strncpy(out_resp->name, output->name, DRM_OUTPUT_NAME_LEN);
-+ out_resp->name[DRM_OUTPUT_NAME_LEN-1] = 0;
-+
-+ out_resp->mm_width = output->mm_width;
-+ out_resp->mm_height = output->mm_height;
-+ out_resp->subpixel = output->subpixel_order;
-+ out_resp->connection = output->status;
-+ if (output->crtc)
-+ out_resp->crtc = output->crtc->id;
-+ else
-+ out_resp->crtc = 0;
-+
-+ out_resp->crtcs = output->possible_crtcs;
-+ out_resp->clones = output->possible_clones;
-+
-+ if ((out_resp->count_modes >= mode_count) && mode_count) {
-+ copied = 0;
-+ list_for_each_entry(mode, &output->modes, head) {
-+ out_resp->modes[copied++] = mode->mode_id;
-+ }
-+ for (i = 0; i < DRM_OUTPUT_MAX_UMODES; i++) {
-+ if (output->user_mode_ids[i] != 0) {
-+ if (put_user(output->user_mode_ids[i], out_resp->modes + copied))
-+ return -EFAULT;
-+ copied++;
-+ }
-+ }
-+ }
-+ out_resp->count_modes = mode_count;
-+
-+ if ((out_resp->count_props >= props_count) && props_count) {
-+ copied = 0;
-+ for (i = 0; i < DRM_OUTPUT_MAX_PROPERTY; i++) {
-+ if (output->property_ids[i] != 0) {
-+ if (put_user(output->property_ids[i], out_resp->props + copied)) {
-+ ret = -EFAULT;
-+ goto out;
-+ }
-+
-+ if (put_user(output->property_values[i], out_resp->prop_values + copied)) {
-+ ret = -EFAULT;
-+ goto out;
-+ }
-+ copied++;
-+ }
-+ }
-+ }
-+ out_resp->count_props = props_count;
-+
-+out:
-+ mutex_unlock(&dev->mode_config.mutex);
-+ return ret;
-+}
-+
-+/**
-+ * drm_mode_setcrtc - set CRTC configuration
-+ * @inode: inode from the ioctl
-+ * @filp: file * from the ioctl
-+ * @cmd: cmd from ioctl
-+ * @arg: arg from ioctl
-+ *
-+ * LOCKING:
-+ * Caller? (FIXME)
-+ *
-+ * Build a new CRTC configuration based on user request.
-+ *
-+ * Called by the user via ioctl.
-+ *
-+ * RETURNS:
-+ * Zero on success, errno on failure.
-+ */
-+int drm_mode_setcrtc(struct drm_device *dev,
-+ void *data, struct drm_file *file_priv)
-+{
-+ struct drm_mode_crtc *crtc_req = data;
-+ struct drm_crtc *crtc;
-+ struct drm_output **output_set = NULL, *output;
-+ struct drm_display_mode *mode;
-+ struct drm_framebuffer *fb = NULL;
-+ int ret = 0;
-+ int i;
-+
-+ mutex_lock(&dev->mode_config.mutex);
-+ crtc = idr_find(&dev->mode_config.crtc_idr, crtc_req->crtc_id);
-+ if (!crtc || (crtc->id != crtc_req->crtc_id)) {
-+ DRM_DEBUG("Unknown CRTC ID %d\n", crtc_req->crtc_id);
-+ ret = -EINVAL;
-+ goto out;
-+ }
-+
-+ if (crtc_req->mode) {
-+ /* if we have a mode we need a framebuffer */
-+ if (crtc_req->fb_id) {
-+ fb = idr_find(&dev->mode_config.crtc_idr, crtc_req->fb_id);
-+ if (!fb || (fb->id != crtc_req->fb_id)) {
-+ DRM_DEBUG("Unknown FB ID%d\n", crtc_req->fb_id);
-+ ret = -EINVAL;
-+ goto out;
-+ }
-+ }
-+ mode = idr_find(&dev->mode_config.crtc_idr, crtc_req->mode);
-+ if (!mode || (mode->mode_id != crtc_req->mode)) {
-+ struct drm_output *output;
-+
-+ list_for_each_entry(output,
-+ &dev->mode_config.output_list,
-+ head) {
-+ list_for_each_entry(mode, &output->modes,
-+ head) {
-+ drm_mode_debug_printmodeline(dev,
-+ mode);
-+ }
-+ }
-+
-+ DRM_DEBUG("Unknown mode id %d, %p\n", crtc_req->mode, mode);
-+ ret = -EINVAL;
-+ goto out;
-+ }
-+ } else
-+ mode = NULL;
-+
-+ if (crtc_req->count_outputs == 0 && mode) {
-+ DRM_DEBUG("Count outputs is 0 but mode set\n");
-+ ret = -EINVAL;
-+ goto out;
-+ }
-+
-+ if (crtc_req->count_outputs > 0 && !mode && !fb) {
-+ DRM_DEBUG("Count outputs is %d but no mode or fb set\n", crtc_req->count_outputs);
-+ ret = -EINVAL;
-+ goto out;
-+ }
-+
-+ if (crtc_req->count_outputs > 0) {
-+ u32 out_id;
-+ output_set = kmalloc(crtc_req->count_outputs *
-+ sizeof(struct drm_output *), GFP_KERNEL);
-+ if (!output_set) {
-+ ret = -ENOMEM;
-+ goto out;
-+ }
-+
-+ for (i = 0; i < crtc_req->count_outputs; i++) {
-+ if (get_user(out_id, &crtc_req->set_outputs[i])) {
-+ ret = -EFAULT;
-+ goto out;
-+ }
-+
-+ output = idr_find(&dev->mode_config.crtc_idr, out_id);
-+ if (!output || (out_id != output->id)) {
-+ DRM_DEBUG("Output id %d unknown\n", out_id);
-+ ret = -EINVAL;
-+ goto out;
-+ }
-+
-+ output_set[i] = output;
-+ }
-+ }
-+
-+ ret = drm_crtc_set_config(crtc, crtc_req, mode, output_set, fb);
-+
-+out:
-+ mutex_unlock(&dev->mode_config.mutex);
-+ return ret;
-+}
-+
-+/**
-+ * drm_mode_addfb - add an FB to the graphics configuration
-+ * @inode: inode from the ioctl
-+ * @filp: file * from the ioctl
-+ * @cmd: cmd from ioctl
-+ * @arg: arg from ioctl
-+ *
-+ * LOCKING:
-+ * Takes mode config lock.
-+ *
-+ * Add a new FB to the specified CRTC, given a user request.
-+ *
-+ * Called by the user via ioctl.
-+ *
-+ * RETURNS:
-+ * Zero on success, errno on failure.
-+ */
-+int drm_mode_addfb(struct drm_device *dev,
-+ void *data, struct drm_file *file_priv)
-+{
-+ struct drm_mode_fb_cmd *r = data;
-+ struct drm_mode_config *config = &dev->mode_config;
-+ struct drm_framebuffer *fb;
-+ struct drm_buffer_object *bo;
-+ struct drm_crtc *crtc;
-+ int ret = 0;
-+
-+ if ((config->min_width > r->width) || (r->width > config->max_width)) {
-+ DRM_ERROR("mode new framebuffer width not within limits\n");
-+ return -EINVAL;
-+ }
-+ if ((config->min_height > r->height) || (r->height > config->max_height)) {
-+ DRM_ERROR("mode new framebuffer height not within limits\n");
-+ return -EINVAL;
-+ }
-+
-+ mutex_lock(&dev->mode_config.mutex);
-+ /* TODO check limits are okay */
-+ ret = drm_get_buffer_object(dev, &bo, r->handle);
-+ if (ret || !bo) {
-+ ret = -EINVAL;
-+ goto out;
-+ }
-+
-+ /* TODO check buffer is sufficently large */
-+ /* TODO setup destructor callback */
-+
-+ fb = drm_framebuffer_create(dev);
-+ if (!fb) {
-+ ret = -EINVAL;
-+ goto out;
-+ }
-+
-+ fb->width = r->width;
-+ fb->height = r->height;
-+ fb->pitch = r->pitch;
-+ fb->bits_per_pixel = r->bpp;
-+ fb->depth = r->depth;
-+ fb->offset = bo->offset;
-+ fb->bo = bo;
-+
-+ r->buffer_id = fb->id;
-+
-+ list_add(&fb->filp_head, &file_priv->fbs);
-+
-+ /* FIXME: bind the fb to the right crtc */
-+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-+ crtc->fb = fb;
-+ dev->driver->fb_probe(dev, crtc);
-+ }
-+
-+out:
-+ mutex_unlock(&dev->mode_config.mutex);
-+ return ret;
-+}
-+
-+/**
-+ * drm_mode_rmfb - remove an FB from the configuration
-+ * @inode: inode from the ioctl
-+ * @filp: file * from the ioctl
-+ * @cmd: cmd from ioctl
-+ * @arg: arg from ioctl
-+ *
-+ * LOCKING:
-+ * Takes mode config lock.
-+ *
-+ * Remove the FB specified by the user.
-+ *
-+ * Called by the user via ioctl.
-+ *
-+ * RETURNS:
-+ * Zero on success, errno on failure.
-+ */
-+int drm_mode_rmfb(struct drm_device *dev,
-+ void *data, struct drm_file *file_priv)
-+{
-+ struct drm_framebuffer *fb = 0;
-+ uint32_t *id = data;
-+ int ret = 0;
-+
-+ mutex_lock(&dev->mode_config.mutex);
-+ fb = idr_find(&dev->mode_config.crtc_idr, *id);
-+ /* TODO check that we realy get a framebuffer back. */
-+ if (!fb || (*id != fb->id)) {
-+ DRM_ERROR("mode invalid framebuffer id\n");
-+ ret = -EINVAL;
-+ goto out;
-+ }
-+
-+ /* TODO check if we own the buffer */
-+ /* TODO release all crtc connected to the framebuffer */
-+ /* bind the fb to the crtc for now */
-+ /* TODO unhock the destructor from the buffer object */
-+
-+ if (fb->bo->type != drm_bo_type_kernel)
-+ drm_framebuffer_destroy(fb);
-+ else
-+ dev->driver->fb_remove(dev, drm_crtc_from_fb(dev, fb));
-+
-+out:
-+ mutex_unlock(&dev->mode_config.mutex);
-+ return ret;
-+}
-+
-+/**
-+ * drm_mode_getfb - get FB info
-+ * @inode: inode from the ioctl
-+ * @filp: file * from the ioctl
-+ * @cmd: cmd from ioctl
-+ * @arg: arg from ioctl
-+ *
-+ * LOCKING:
-+ * Caller? (FIXME)
-+ *
-+ * Lookup the FB given its ID and return info about it.
-+ *
-+ * Called by the user via ioctl.
-+ *
-+ * RETURNS:
-+ * Zero on success, errno on failure.
-+ */
-+int drm_mode_getfb(struct drm_device *dev,
-+ void *data, struct drm_file *file_priv)
-+{
-+ struct drm_mode_fb_cmd *r = data;
-+ struct drm_framebuffer *fb;
-+ int ret = 0;
-+
-+ mutex_lock(&dev->mode_config.mutex);
-+ fb = idr_find(&dev->mode_config.crtc_idr, r->buffer_id);
-+ if (!fb || (r->buffer_id != fb->id)) {
-+ DRM_ERROR("invalid framebuffer id\n");
-+ ret = -EINVAL;
-+ goto out;
-+ }
-+
-+ r->height = fb->height;
-+ r->width = fb->width;
-+ r->depth = fb->depth;
-+ r->bpp = fb->bits_per_pixel;
-+ r->handle = fb->bo->base.hash.key;
-+ r->pitch = fb->pitch;
-+
-+out:
-+ mutex_unlock(&dev->mode_config.mutex);
-+ return ret;
-+}
-+
-+/**
-+ * drm_fb_release - remove and free the FBs on this file
-+ * @filp: file * from the ioctl
-+ *
-+ * LOCKING:
-+ * Takes mode config lock.
-+ *
-+ * Destroy all the FBs associated with @filp.
-+ *
-+ * Called by the user via ioctl.
-+ *
-+ * RETURNS:
-+ * Zero on success, errno on failure.
-+ */
-+void drm_fb_release(struct file *filp)
-+{
-+ struct drm_file *priv = filp->private_data;
-+ struct drm_device *dev = priv->minor->dev;
-+ struct drm_framebuffer *fb, *tfb;
-+
-+ mutex_lock(&dev->mode_config.mutex);
-+ list_for_each_entry_safe(fb, tfb, &priv->fbs, filp_head) {
-+ list_del(&fb->filp_head);
-+ if (fb->bo->type != drm_bo_type_kernel)
-+ drm_framebuffer_destroy(fb);
-+ else
-+ dev->driver->fb_remove(dev, drm_crtc_from_fb(dev, fb));
-+ }
-+ mutex_unlock(&dev->mode_config.mutex);
-+}
-+
-+/*
-+ *
-+ */
-+void drm_mode_addmode(struct drm_device *dev, struct drm_display_mode *user_mode)
-+{
-+ user_mode->type |= DRM_MODE_TYPE_USERDEF;
-+
-+ user_mode->output_count = 0;
-+ list_add(&user_mode->head, &dev->mode_config.usermode_list);
-+}
-+EXPORT_SYMBOL(drm_mode_addmode);
-+
-+int drm_mode_rmmode(struct drm_device *dev, struct drm_display_mode *mode)
-+{
-+ struct drm_display_mode *t;
-+ int ret = -EINVAL;
-+ list_for_each_entry(t, &dev->mode_config.usermode_list, head) {
-+ if (t == mode) {
-+ list_del(&mode->head);
-+ drm_mode_destroy(dev, mode);
-+ ret = 0;
-+ break;
-+ }
-+ }
-+ return ret;
-+}
-+EXPORT_SYMBOL(drm_mode_rmmode);
-+
-+static int drm_mode_attachmode(struct drm_device *dev,
-+ struct drm_output *output,
-+ struct drm_display_mode *mode)
-+{
-+ int ret = 0;
-+ int i;
-+
-+ for (i = 0; i < DRM_OUTPUT_MAX_UMODES; i++) {
-+ if (output->user_mode_ids[i] == 0) {
-+ output->user_mode_ids[i] = mode->mode_id;
-+ mode->output_count++;
-+ break;
-+ }
-+ }
-+
-+ if (i == DRM_OUTPUT_MAX_UMODES)
-+ ret = -ENOSPC;
-+
-+ return ret;
-+}
-+
-+int drm_mode_attachmode_crtc(struct drm_device *dev, struct drm_crtc *crtc,
-+ struct drm_display_mode *mode)
-+{
-+ struct drm_output *output;
-+
-+ list_for_each_entry(output, &dev->mode_config.output_list, head) {
-+ if (output->crtc == crtc)
-+ drm_mode_attachmode(dev, output, mode);
-+ }
-+ return 0;
-+}
-+EXPORT_SYMBOL(drm_mode_attachmode_crtc);
-+
-+static int drm_mode_detachmode(struct drm_device *dev,
-+ struct drm_output *output,
-+ struct drm_display_mode *mode)
-+{
-+ int found = 0;
-+ int ret = 0, i;
-+
-+ for (i = 0; i < DRM_OUTPUT_MAX_UMODES; i++) {
-+ if (output->user_mode_ids[i] == mode->mode_id) {
-+ output->user_mode_ids[i] = 0;
-+ mode->output_count--;
-+ found = 1;
-+ }
-+ }
-+
-+ if (!found)
-+ ret = -EINVAL;
-+
-+ return ret;
-+}
-+
-+int drm_mode_detachmode_crtc(struct drm_device *dev, struct drm_display_mode *mode)
-+{
-+ struct drm_output *output;
-+
-+ list_for_each_entry(output, &dev->mode_config.output_list, head) {
-+ drm_mode_detachmode(dev, output, mode);
-+ }
-+ return 0;
-+}
-+EXPORT_SYMBOL(drm_mode_detachmode_crtc);
-+
-+/**
-+ * drm_fb_addmode - adds a user defined mode
-+ * @inode: inode from the ioctl
-+ * @filp: file * from the ioctl
-+ * @cmd: cmd from ioctl
-+ * @arg: arg from ioctl
-+ *
-+ * Adds a user specified mode to the kernel.
-+ *
-+ * Called by the user via ioctl.
-+ *
-+ * RETURNS:
-+ * writes new mode id into arg.
-+ * Zero on success, errno on failure.
-+ */
-+int drm_mode_addmode_ioctl(struct drm_device *dev,
-+ void *data, struct drm_file *file_priv)
-+{
-+ struct drm_mode_modeinfo *new_mode = data;
-+ struct drm_display_mode *user_mode;
-+ int ret = 0;
-+
-+ mutex_lock(&dev->mode_config.mutex);
-+ user_mode = drm_mode_create(dev);
-+ if (!user_mode) {
-+ ret = -ENOMEM;
-+ goto out;
-+ }
-+
-+ drm_crtc_convert_umode(user_mode, new_mode);
-+
-+ drm_mode_addmode(dev, user_mode);
-+ new_mode->id = user_mode->mode_id;
-+
-+out:
-+ mutex_unlock(&dev->mode_config.mutex);
-+ return ret;
-+}
-+
-+/**
-+ * drm_fb_rmmode - removes a user defined mode
-+ * @inode: inode from the ioctl
-+ * @filp: file * from the ioctl
-+ * @cmd: cmd from ioctl
-+ * @arg: arg from ioctl
-+ *
-+ * Remove the user defined mode specified by the user.
-+ *
-+ * Called by the user via ioctl
-+ *
-+ * RETURNS:
-+ * Zero on success, errno on failure.
-+ */
-+int drm_mode_rmmode_ioctl(struct drm_device *dev,
-+ void *data, struct drm_file *file_priv)
-+{
-+ uint32_t *id = data;
-+ struct drm_display_mode *mode;
-+ int ret = -EINVAL;
-+
-+ mutex_lock(&dev->mode_config.mutex);
-+ mode = idr_find(&dev->mode_config.crtc_idr, *id);
-+ if (!mode || (*id != mode->mode_id)) {
-+ goto out;
-+ }
-+
-+ if (!(mode->type & DRM_MODE_TYPE_USERDEF)) {
-+ goto out;
-+ }
-+
-+ if (mode->output_count) {
-+ goto out;
-+ }
-+
-+ ret = drm_mode_rmmode(dev, mode);
-+
-+out:
-+ mutex_unlock(&dev->mode_config.mutex);
-+ return ret;
-+}
-+
-+/**
-+ * drm_fb_attachmode - Attach a user mode to an output
-+ * @inode: inode from the ioctl
-+ * @filp: file * from the ioctl
-+ * @cmd: cmd from ioctl
-+ * @arg: arg from ioctl
-+ *
-+ * This attaches a user specified mode to an output.
-+ * Called by the user via ioctl.
-+ *
-+ * RETURNS:
-+ * Zero on success, errno on failure.
-+ */
-+int drm_mode_attachmode_ioctl(struct drm_device *dev,
-+ void *data, struct drm_file *file_priv)
-+{
-+ struct drm_mode_mode_cmd *mode_cmd = data;
-+ struct drm_output *output;
-+ struct drm_display_mode *mode;
-+ int ret = 0;
-+
-+ mutex_lock(&dev->mode_config.mutex);
-+
-+ mode = idr_find(&dev->mode_config.crtc_idr, mode_cmd->mode_id);
-+ if (!mode || (mode->mode_id != mode_cmd->mode_id)) {
-+ ret = -EINVAL;
-+ goto out;
-+ }
-+
-+ output = idr_find(&dev->mode_config.crtc_idr, mode_cmd->output_id);
-+ if (!output || (output->id != mode_cmd->output_id)) {
-+ ret = -EINVAL;
-+ goto out;
-+ }
-+
-+ ret = drm_mode_attachmode(dev, output, mode);
-+out:
-+ mutex_unlock(&dev->mode_config.mutex);
-+ return ret;
-+}
-+
-+
-+/**
-+ * drm_fb_detachmode - Detach a user specified mode from an output
-+ * @inode: inode from the ioctl
-+ * @filp: file * from the ioctl
-+ * @cmd: cmd from ioctl
-+ * @arg: arg from ioctl
-+ *
-+ * Called by the user via ioctl.
-+ *
-+ * RETURNS:
-+ * Zero on success, errno on failure.
-+ */
-+int drm_mode_detachmode_ioctl(struct drm_device *dev,
-+ void *data, struct drm_file *file_priv)
-+{
-+ struct drm_mode_mode_cmd *mode_cmd = data;
-+ struct drm_output *output;
-+ struct drm_display_mode *mode;
-+ int ret = 0;
-+
-+ mutex_lock(&dev->mode_config.mutex);
-+
-+ mode = idr_find(&dev->mode_config.crtc_idr, mode_cmd->mode_id);
-+ if (!mode || (mode->mode_id != mode_cmd->mode_id)) {
-+ ret = -EINVAL;
-+ goto out;
-+ }
-+
-+ output = idr_find(&dev->mode_config.crtc_idr, mode_cmd->output_id);
-+ if (!output || (output->id != mode_cmd->output_id)) {
-+ ret = -EINVAL;
-+ goto out;
-+ }
-+
-+
-+ ret = drm_mode_detachmode(dev, output, mode);
-+out:
-+ mutex_unlock(&dev->mode_config.mutex);
-+ return ret;
-+}
-+
-+struct drm_property *drm_property_create(struct drm_device *dev, int flags,
-+ const char *name, int num_values)
-+{
-+ struct drm_property *property = NULL;
-+
-+ property = kzalloc(sizeof(struct drm_output), GFP_KERNEL);
-+ if (!property)
-+ return NULL;
-+
-+ property->values = kzalloc(sizeof(uint32_t)*num_values, GFP_KERNEL);
-+ if (!property->values)
-+ goto fail;
-+
-+ property->id = drm_idr_get(dev, property);
-+ property->flags = flags;
-+ property->num_values = num_values;
-+ INIT_LIST_HEAD(&property->enum_list);
-+
-+ if (name)
-+ strncpy(property->name, name, DRM_PROP_NAME_LEN);
-+
-+ list_add_tail(&property->head, &dev->mode_config.property_list);
-+ return property;
-+fail:
-+ kfree(property);
-+ return NULL;
-+}
-+EXPORT_SYMBOL(drm_property_create);
-+
-+int drm_property_add_enum(struct drm_property *property, int index,
-+ uint32_t value, const char *name)
-+{
-+ struct drm_property_enum *prop_enum;
-+
-+ if (!(property->flags & DRM_MODE_PROP_ENUM))
-+ return -EINVAL;
-+
-+ if (!list_empty(&property->enum_list)) {
-+ list_for_each_entry(prop_enum, &property->enum_list, head) {
-+ if (prop_enum->value == value) {
-+ strncpy(prop_enum->name, name, DRM_PROP_NAME_LEN);
-+ prop_enum->name[DRM_PROP_NAME_LEN-1] = '\0';
-+ return 0;
-+ }
-+ }
-+ }
-+
-+ prop_enum = kzalloc(sizeof(struct drm_property_enum), GFP_KERNEL);
-+ if (!prop_enum)
-+ return -ENOMEM;
-+
-+ strncpy(prop_enum->name, name, DRM_PROP_NAME_LEN);
-+ prop_enum->name[DRM_PROP_NAME_LEN-1] = '\0';
-+ prop_enum->value = value;
-+
-+ property->values[index] = value;
-+ list_add_tail(&prop_enum->head, &property->enum_list);
-+ return 0;
-+}
-+EXPORT_SYMBOL(drm_property_add_enum);
-+
-+void drm_property_destroy(struct drm_device *dev, struct drm_property *property)
-+{
-+ struct drm_property_enum *prop_enum, *pt;
-+
-+ list_for_each_entry_safe(prop_enum, pt, &property->enum_list, head) {
-+ list_del(&prop_enum->head);
-+ kfree(prop_enum);
-+ }
-+
-+ kfree(property->values);
-+ drm_idr_put(dev, property->id);
-+ list_del(&property->head);
-+ kfree(property);
-+}
-+EXPORT_SYMBOL(drm_property_destroy);
-+
-+
-+int drm_output_attach_property(struct drm_output *output,
-+ struct drm_property *property, int init_val)
-+{
-+ int i;
-+
-+ for (i = 0; i < DRM_OUTPUT_MAX_PROPERTY; i++) {
-+ if (output->property_ids[i] == 0) {
-+ output->property_ids[i] = property->id;
-+ output->property_values[i] = init_val;
-+ break;
-+ }
-+ }
-+
-+ if (i == DRM_OUTPUT_MAX_PROPERTY)
-+ return -EINVAL;
-+ return 0;
-+}
-+EXPORT_SYMBOL(drm_output_attach_property);
-+
-+int drm_mode_getproperty_ioctl(struct drm_device *dev,
-+ void *data, struct drm_file *file_priv)
-+{
-+ struct drm_mode_get_property *out_resp = data;
-+ struct drm_property *property;
-+ int enum_count = 0;
-+ int value_count = 0;
-+ int ret = 0, i;
-+ int copied;
-+ struct drm_property_enum *prop_enum;
-+
-+ mutex_lock(&dev->mode_config.mutex);
-+ property = idr_find(&dev->mode_config.crtc_idr, out_resp->prop_id);
-+ if (!property || (property->id != out_resp->prop_id)) {
-+ ret = -EINVAL;
-+ goto done;
-+ }
-+
-+
-+ list_for_each_entry(prop_enum, &property->enum_list, head)
-+ enum_count++;
-+
-+ value_count = property->num_values;
-+
-+ strncpy(out_resp->name, property->name, DRM_PROP_NAME_LEN);
-+ out_resp->name[DRM_PROP_NAME_LEN-1] = 0;
-+ out_resp->flags = property->flags;
-+
-+ if ((out_resp->count_values >= value_count) && value_count) {
-+ for (i = 0; i < value_count; i++) {
-+ if (put_user(property->values[i], out_resp->values + i)) {
-+ ret = -EFAULT;
-+ goto done;
-+ }
-+ }
-+ }
-+ out_resp->count_values = value_count;
-+
-+ if ((out_resp->count_enums >= enum_count) && enum_count) {
-+ copied = 0;
-+ list_for_each_entry(prop_enum, &property->enum_list, head) {
-+ if (put_user(prop_enum->value, &out_resp->enums[copied].value)) {
-+ ret = -EFAULT;
-+ goto done;
-+ }
-+
-+ if (copy_to_user(&out_resp->enums[copied].name,
-+ prop_enum->name, DRM_PROP_NAME_LEN)) {
-+ ret = -EFAULT;
-+ goto done;
-+ }
-+ copied++;
-+ }
-+ }
-+ out_resp->count_enums = enum_count;
-+
-+done:
-+ mutex_unlock(&dev->mode_config.mutex);
-+ return ret;
-+}
-Index: linux-2.6.27/drivers/gpu/drm/drm_drv.c
-===================================================================
---- linux-2.6.27.orig/drivers/gpu/drm/drm_drv.c 2009-02-05 13:29:29.000000000 +0000
-+++ linux-2.6.27/drivers/gpu/drm/drm_drv.c 2009-02-05 13:29:33.000000000 +0000
-@@ -49,6 +49,9 @@
- #include "drmP.h"
- #include "drm_core.h"
-
-+static void drm_cleanup(struct drm_device * dev);
-+int drm_fb_loaded = 0;
-+
- static int drm_version(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-
-@@ -113,16 +116,48 @@
-
- DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
--
- DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, 0),
--
-- DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0),
--
- DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
--
-- DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, 0),
-- DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH),
-- DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH),
-+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_MASTER|DRM_ROOT_ONLY),
-+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_MASTER|DRM_ROOT_ONLY),
-+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETOUTPUT, drm_mode_getoutput, DRM_MASTER|DRM_ROOT_ONLY),
-+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_ROOT_ONLY),
-+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_ROOT_ONLY),
-+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_ROOT_ONLY),
-+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_MASTER|DRM_ROOT_ONLY),
-+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDMODE, drm_mode_addmode_ioctl, DRM_MASTER|DRM_ROOT_ONLY),
-+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMMODE, drm_mode_rmmode_ioctl, DRM_MASTER|DRM_ROOT_ONLY),
-+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_mode_attachmode_ioctl, DRM_MASTER|DRM_ROOT_ONLY),
-+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_mode_detachmode_ioctl, DRM_MASTER|DRM_ROOT_ONLY),
-+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_MASTER | DRM_ROOT_ONLY),
-+
-+ DRM_IOCTL_DEF(DRM_IOCTL_MM_INIT, drm_mm_init_ioctl,
-+ DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-+ DRM_IOCTL_DEF(DRM_IOCTL_MM_TAKEDOWN, drm_mm_takedown_ioctl,
-+ DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-+ DRM_IOCTL_DEF(DRM_IOCTL_MM_LOCK, drm_mm_lock_ioctl,
-+ DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-+ DRM_IOCTL_DEF(DRM_IOCTL_MM_UNLOCK, drm_mm_unlock_ioctl,
-+ DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-+
-+ DRM_IOCTL_DEF(DRM_IOCTL_FENCE_CREATE, drm_fence_create_ioctl, DRM_AUTH),
-+ DRM_IOCTL_DEF(DRM_IOCTL_FENCE_REFERENCE, drm_fence_reference_ioctl, DRM_AUTH),
-+ DRM_IOCTL_DEF(DRM_IOCTL_FENCE_UNREFERENCE, drm_fence_unreference_ioctl, DRM_AUTH),
-+ DRM_IOCTL_DEF(DRM_IOCTL_FENCE_SIGNALED, drm_fence_signaled_ioctl, DRM_AUTH),
-+ DRM_IOCTL_DEF(DRM_IOCTL_FENCE_FLUSH, drm_fence_flush_ioctl, DRM_AUTH),
-+ DRM_IOCTL_DEF(DRM_IOCTL_FENCE_WAIT, drm_fence_wait_ioctl, DRM_AUTH),
-+ DRM_IOCTL_DEF(DRM_IOCTL_FENCE_EMIT, drm_fence_emit_ioctl, DRM_AUTH),
-+ DRM_IOCTL_DEF(DRM_IOCTL_FENCE_BUFFERS, drm_fence_buffers_ioctl, DRM_AUTH),
-+
-+ DRM_IOCTL_DEF(DRM_IOCTL_BO_CREATE, drm_bo_create_ioctl, DRM_AUTH),
-+ DRM_IOCTL_DEF(DRM_IOCTL_BO_MAP, drm_bo_map_ioctl, DRM_AUTH),
-+ DRM_IOCTL_DEF(DRM_IOCTL_BO_UNMAP, drm_bo_unmap_ioctl, DRM_AUTH),
-+ DRM_IOCTL_DEF(DRM_IOCTL_BO_REFERENCE, drm_bo_reference_ioctl, DRM_AUTH),
-+ DRM_IOCTL_DEF(DRM_IOCTL_BO_UNREFERENCE, drm_bo_unreference_ioctl, DRM_AUTH),
-+ DRM_IOCTL_DEF(DRM_IOCTL_BO_SETSTATUS, drm_bo_setstatus_ioctl, DRM_AUTH),
-+ DRM_IOCTL_DEF(DRM_IOCTL_BO_INFO, drm_bo_info_ioctl, DRM_AUTH),
-+ DRM_IOCTL_DEF(DRM_IOCTL_BO_WAIT_IDLE, drm_bo_wait_idle_ioctl, DRM_AUTH),
-+ DRM_IOCTL_DEF(DRM_IOCTL_BO_VERSION, drm_bo_version_ioctl, 0),
- };
-
- #define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
-@@ -164,7 +199,12 @@
- drm_drawable_free_all(dev);
- del_timer(&dev->timer);
-
-- /* Clear pid list */
-+ if (dev->unique) {
-+ drm_free(dev->unique, strlen(dev->unique) + 1, DRM_MEM_DRIVER);
-+ dev->unique = NULL;
-+ dev->unique_len = 0;
-+ }
-+
- if (dev->magicfree.next) {
- list_for_each_entry_safe(pt, next, &dev->magicfree, head) {
- list_del(&pt->head);
-@@ -236,12 +276,24 @@
- dev->lock.file_priv = NULL;
- wake_up_interruptible(&dev->lock.lock_queue);
- }
-+ dev->dev_mapping = NULL;
- mutex_unlock(&dev->struct_mutex);
-
- DRM_DEBUG("lastclose completed\n");
- return 0;
- }
-
-+void drm_cleanup_pci(struct pci_dev *pdev)
-+{
-+ struct drm_device *dev = pci_get_drvdata(pdev);
-+
-+ pci_set_drvdata(pdev, NULL);
-+ pci_release_regions(pdev);
-+ if (dev)
-+ drm_cleanup(dev);
-+}
-+EXPORT_SYMBOL(drm_cleanup_pci);
-+
- /**
- * Module initialization. Called via init_module at module load time, or via
- * linux/init/main.c (this is not currently supported).
-@@ -255,26 +307,68 @@
- * Expands the \c DRIVER_PREINIT and \c DRIVER_POST_INIT macros before and
- * after the initialization for driver customization.
- */
--int drm_init(struct drm_driver *driver)
-+int drm_init(struct drm_driver *driver,
-+ struct pci_device_id *pciidlist)
- {
-- struct pci_dev *pdev = NULL;
-+ struct pci_dev *pdev;
- struct pci_device_id *pid;
-- int i;
-+ int rc, i;
-
- DRM_DEBUG("\n");
-
-- for (i = 0; driver->pci_driver.id_table[i].vendor != 0; i++) {
-- pid = (struct pci_device_id *)&driver->pci_driver.id_table[i];
-+ for (i = 0; (pciidlist[i].vendor != 0) && !drm_fb_loaded; i++) {
-+ pid = &pciidlist[i];
-
- pdev = NULL;
- /* pass back in pdev to account for multiple identical cards */
- while ((pdev =
- pci_get_subsys(pid->vendor, pid->device, pid->subvendor,
-- pid->subdevice, pdev)) != NULL) {
-- /* stealth mode requires a manual probe */
-- pci_dev_get(pdev);
-- drm_get_dev(pdev, pid, driver);
-+ pid->subdevice, pdev))) {
-+ /* Are there device class requirements? */
-+ if ((pid->class != 0)
-+ && ((pdev->class & pid->class_mask) != pid->class)) {
-+ continue;
-+ }
-+ /* is there already a driver loaded, or (short circuit saves work) */
-+ /* does something like VesaFB have control of the memory region? */
-+ if (pci_dev_driver(pdev)
-+ || pci_request_regions(pdev, "DRM scan")) {
-+ /* go into stealth mode */
-+ drm_fb_loaded = 1;
-+ pci_dev_put(pdev);
-+ break;
-+ }
-+ /* no fbdev or vesadev, put things back and wait for normal probe */
-+ pci_release_regions(pdev);
-+ }
-+ }
-+
-+ if (!drm_fb_loaded)
-+ return pci_register_driver(&driver->pci_driver);
-+ else {
-+ for (i = 0; pciidlist[i].vendor != 0; i++) {
-+ pid = &pciidlist[i];
-+
-+ pdev = NULL;
-+ /* pass back in pdev to account for multiple identical cards */
-+ while ((pdev =
-+ pci_get_subsys(pid->vendor, pid->device,
-+ pid->subvendor, pid->subdevice,
-+ pdev))) {
-+ /* Are there device class requirements? */
-+ if ((pid->class != 0)
-+ && ((pdev->class & pid->class_mask) != pid->class)) {
-+ continue;
-+ }
-+ /* stealth mode requires a manual probe */
-+ pci_dev_get(pdev);
-+ if ((rc = drm_get_dev(pdev, &pciidlist[i], driver))) {
-+ pci_dev_put(pdev);
-+ return rc;
-+ }
-+ }
- }
-+ DRM_INFO("Used old pci detect: framebuffer loaded\n");
- }
- return 0;
- }
-@@ -298,6 +392,7 @@
- }
-
- drm_lastclose(dev);
-+ drm_ctxbitmap_cleanup(dev);
-
- if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) &&
- dev->agp && dev->agp->agp_mtrr >= 0) {
-@@ -308,6 +403,9 @@
- DRM_DEBUG("mtrr_del=%d\n", retval);
- }
-
-+ drm_bo_driver_finish(dev);
-+ drm_fence_manager_takedown(dev);
-+
- if (drm_core_has_AGP(dev) && dev->agp) {
- drm_free(dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS);
- dev->agp = NULL;
-@@ -317,7 +415,12 @@
- dev->driver->unload(dev);
-
- drm_ht_remove(&dev->map_hash);
-- drm_ctxbitmap_cleanup(dev);
-+ drm_mm_takedown(&dev->offset_manager);
-+ drm_ht_remove(&dev->object_hash);
-+
-+
-+ if (!drm_fb_loaded)
-+ pci_disable_device(dev->pdev);
-
- drm_put_minor(&dev->primary);
- if (drm_put_dev(dev))
-Index: linux-2.6.27/drivers/gpu/drm/drm_edid.c
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.27/drivers/gpu/drm/drm_edid.c 2009-02-05 13:29:33.000000000 +0000
-@@ -0,0 +1,519 @@
-+/*
-+ * Copyright (c) 2007 Intel Corporation
-+ * Jesse Barnes <jesse.barnes@intel.com>
-+ *
-+ * DDC probing routines (drm_ddc_read & drm_do_probe_ddc_edid) originally from
-+ * FB layer.
-+ * Copyright (C) 2006 Dennis Munsie <dmunsie@cecropia.com>
-+ */
-+#include "drmP.h"
-+#include <linux/i2c-algo-bit.h>
-+#include "drm_edid.h"
-+
-+#include <acpi/acpi_drivers.h>
-+
-+/* Valid EDID header has these bytes */
-+static u8 edid_header[] = { 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
-+
-+int drm_get_acpi_edid(char *method, char *edid, ssize_t length)
-+{
-+ int status;
-+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
-+ union acpi_object *obj;
-+ union acpi_object arg0 = { ACPI_TYPE_INTEGER };
-+ struct acpi_object_list args = { 1, &arg0 };
-+
-+ if (length == 128)
-+ arg0.integer.value = 1;
-+ else if (length == 256)
-+ arg0.integer.value = 2;
-+ else
-+ return -EINVAL;
-+
-+ status = acpi_evaluate_object(NULL, method, &args, &buffer);
-+ if (ACPI_FAILURE(status))
-+ return -ENODEV;
-+
-+ obj = buffer.pointer;
-+
-+ if (obj && obj->type == ACPI_TYPE_BUFFER)
-+ memcpy(edid, obj->buffer.pointer, obj->buffer.length);
-+ else {
-+ printk(KERN_ERR PREFIX "Invalid _DDC data\n");
-+ status = -EFAULT;
-+ kfree(obj);
-+ }
-+
-+ return status;
-+}
-+EXPORT_SYMBOL(drm_get_acpi_edid);
-+
-+/**
-+ * edid_valid - sanity check EDID data
-+ * @edid: EDID data
-+ *
-+ * Sanity check the EDID block by looking at the header, the version number
-+ * and the checksum. Return 0 if the EDID doesn't check out, or 1 if it's
-+ * valid.
-+ */
-+static bool edid_valid(struct edid *edid)
-+{
-+ int i;
-+ u8 csum = 0;
-+ u8 *raw_edid = (u8 *)edid;
-+
-+ if (memcmp(edid->header, edid_header, sizeof(edid_header)))
-+ goto bad;
-+ if (edid->version != 1)
-+ goto bad;
-+ if (edid->revision <= 0 || edid->revision > 3)
-+ goto bad;
-+
-+ for (i = 0; i < EDID_LENGTH; i++)
-+ csum += raw_edid[i];
-+ if (csum)
-+ goto bad;
-+
-+ return 1;
-+
-+bad:
-+ return 0;
-+}
-+
-+/**
-+ * drm_mode_std - convert standard mode info (width, height, refresh) into mode
-+ * @t: standard timing params
-+ *
-+ * Take the standard timing params (in this case width, aspect, and refresh)
-+ * and convert them into a real mode using CVT.
-+ *
-+ * Punts for now, but should eventually use the FB layer's CVT based mode
-+ * generation code.
-+ */
-+struct drm_display_mode *drm_mode_std(struct drm_device *dev,
-+ struct std_timing *t)
-+{
-+// struct fb_videomode mode;
-+
-+// fb_find_mode_cvt(&mode, 0, 0);
-+ /* JJJ: convert to drm_display_mode */
-+ struct drm_display_mode *mode;
-+ int hsize = t->hsize * 8 + 248, vsize;
-+
-+ mode = drm_mode_create(dev);
-+ if (!mode)
-+ return NULL;
-+
-+ if (t->aspect_ratio == 0)
-+ vsize = (hsize * 10) / 16;
-+ else if (t->aspect_ratio == 1)
-+ vsize = (hsize * 3) / 4;
-+ else if (t->aspect_ratio == 2)
-+ vsize = (hsize * 4) / 5;
-+ else
-+ vsize = (hsize * 9) / 16;
-+
-+ drm_mode_set_name(mode);
-+
-+ return mode;
-+}
-+
-+/**
-+ * drm_mode_detailed - create a new mode from an EDID detailed timing section
-+ * @timing: EDID detailed timing info
-+ * @preferred: is this a preferred mode?
-+ *
-+ * An EDID detailed timing block contains enough info for us to create and
-+ * return a new struct drm_display_mode. The @preferred flag will be set
-+ * if this is the display's preferred timing, and we'll use it to indicate
-+ * to the other layers that this mode is desired.
-+ */
-+struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
-+ struct detailed_timing *timing)
-+{
-+ struct drm_display_mode *mode;
-+ struct detailed_pixel_timing *pt = &timing->data.pixel_data;
-+
-+ if (pt->stereo) {
-+ printk(KERN_WARNING "stereo mode not supported\n");
-+ return NULL;
-+ }
-+ if (!pt->separate_sync) {
-+ printk(KERN_WARNING "integrated sync not supported\n");
-+ return NULL;
-+ }
-+
-+ mode = drm_mode_create(dev);
-+ if (!mode)
-+ return NULL;
-+
-+ mode->type = DRM_MODE_TYPE_DRIVER;
-+ mode->clock = timing->pixel_clock * 10;
-+
-+ mode->hdisplay = (pt->hactive_hi << 8) | pt->hactive_lo;
-+ mode->hsync_start = mode->hdisplay + ((pt->hsync_offset_hi << 8) |
-+ pt->hsync_offset_lo);
-+ mode->hsync_end = mode->hsync_start +
-+ ((pt->hsync_pulse_width_hi << 8) |
-+ pt->hsync_pulse_width_lo);
-+ mode->htotal = mode->hdisplay + ((pt->hblank_hi << 8) | pt->hblank_lo);
-+
-+ mode->vdisplay = (pt->vactive_hi << 8) | pt->vactive_lo;
-+ mode->vsync_start = mode->vdisplay + ((pt->vsync_offset_hi << 8) |
-+ pt->vsync_offset_lo);
-+ mode->vsync_end = mode->vsync_start +
-+ ((pt->vsync_pulse_width_hi << 8) |
-+ pt->vsync_pulse_width_lo);
-+ mode->vtotal = mode->vdisplay + ((pt->vblank_hi << 8) | pt->vblank_lo);
-+
-+ drm_mode_set_name(mode);
-+
-+ if (pt->interlaced)
-+ mode->flags |= V_INTERLACE;
-+
-+ mode->flags |= pt->hsync_positive ? V_PHSYNC : V_NHSYNC;
-+ mode->flags |= pt->vsync_positive ? V_PVSYNC : V_NVSYNC;
-+
-+ return mode;
-+}
-+
-+/*
-+ * Detailed mode info for the EDID "established modes" data to use.
-+ */
-+static struct drm_display_mode edid_est_modes[] = {
-+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
-+ 968, 1056, 0, 600, 601, 605, 628, 0,
-+ V_PHSYNC | V_PVSYNC) }, /* 800x600@60Hz */
-+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 36000, 800, 824,
-+ 896, 1024, 0, 600, 601, 603, 625, 0,
-+ V_PHSYNC | V_PVSYNC) }, /* 800x600@56Hz */
-+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 656,
-+ 720, 840, 0, 480, 481, 484, 500, 0,
-+ V_NHSYNC | V_NVSYNC) }, /* 640x480@75Hz */
-+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664,
-+ 704, 832, 0, 480, 489, 491, 520, 0,
-+ V_NHSYNC | V_NVSYNC) }, /* 640x480@72Hz */
-+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 30240, 640, 704,
-+ 768, 864, 0, 480, 483, 486, 525, 0,
-+ V_NHSYNC | V_NVSYNC) }, /* 640x480@67Hz */
-+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25200, 640, 656,
-+ 752, 800, 0, 480, 490, 492, 525, 0,
-+ V_NHSYNC | V_NVSYNC) }, /* 640x480@60Hz */
-+ { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 738,
-+ 846, 900, 0, 400, 421, 423, 449, 0,
-+ V_NHSYNC | V_NVSYNC) }, /* 720x400@88Hz */
-+ { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 28320, 720, 738,
-+ 846, 900, 0, 400, 412, 414, 449, 0,
-+ V_NHSYNC | V_PVSYNC) }, /* 720x400@70Hz */
-+ { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296,
-+ 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
-+ V_PHSYNC | V_PVSYNC) }, /* 1280x1024@75Hz */
-+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78800, 1024, 1040,
-+ 1136, 1312, 0, 768, 769, 772, 800, 0,
-+ V_PHSYNC | V_PVSYNC) }, /* 1024x768@75Hz */
-+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048,
-+ 1184, 1328, 0, 768, 771, 777, 806, 0,
-+ V_NHSYNC | V_NVSYNC) }, /* 1024x768@70Hz */
-+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
-+ 1184, 1344, 0, 768, 771, 777, 806, 0,
-+ V_NHSYNC | V_NVSYNC) }, /* 1024x768@60Hz */
-+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER,44900, 1024, 1032,
-+ 1208, 1264, 0, 768, 768, 776, 817, 0,
-+ V_PHSYNC | V_PVSYNC | V_INTERLACE) }, /* 1024x768@43Hz */
-+ { DRM_MODE("832x624", DRM_MODE_TYPE_DRIVER, 57284, 832, 864,
-+ 928, 1152, 0, 624, 625, 628, 667, 0,
-+ V_NHSYNC | V_NVSYNC) }, /* 832x624@75Hz */
-+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 49500, 800, 816,
-+ 896, 1056, 0, 600, 601, 604, 625, 0,
-+ V_PHSYNC | V_PVSYNC) }, /* 800x600@75Hz */
-+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 50000, 800, 856,
-+ 976, 1040, 0, 600, 637, 643, 666, 0,
-+ V_PHSYNC | V_PVSYNC) }, /* 800x600@72Hz */
-+ { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
-+ 1344, 1600, 0, 864, 865, 868, 900, 0,
-+ V_PHSYNC | V_PVSYNC) }, /* 1152x864@75Hz */
-+};
-+
-+#define EDID_EST_TIMINGS 16
-+#define EDID_STD_TIMINGS 8
-+#define EDID_DETAILED_TIMINGS 4
-+
-+/**
-+ * add_established_modes - get est. modes from EDID and add them
-+ * @edid: EDID block to scan
-+ *
-+ * Each EDID block contains a bitmap of the supported "established modes" list
-+ * (defined above). Tease them out and add them to the global modes list.
-+ */
-+static int add_established_modes(struct drm_output *output, struct edid *edid)
-+{
-+ struct drm_device *dev = output->dev;
-+ unsigned long est_bits = edid->established_timings.t1 |
-+ (edid->established_timings.t2 << 8) |
-+ ((edid->established_timings.mfg_rsvd & 0x80) << 9);
-+ int i, modes = 0;
-+
-+ for (i = 0; i <= EDID_EST_TIMINGS; i++)
-+ if (est_bits & (1<<i)) {
-+ struct drm_display_mode *newmode;
-+ newmode = drm_mode_duplicate(dev, &edid_est_modes[i]);
-+ drm_mode_probed_add(output, newmode);
-+ modes++;
-+ }
-+
-+ return modes;
-+}
-+
-+/**
-+ * add_standard_modes - get std. modes from EDID and add them
-+ * @edid: EDID block to scan
-+ *
-+ * Standard modes can be calculated using the CVT standard. Grab them from
-+ * @edid, calculate them, and add them to the list.
-+ */
-+static int add_standard_modes(struct drm_output *output, struct edid *edid)
-+{
-+ struct drm_device *dev = output->dev;
-+ int i, modes = 0;
-+
-+ for (i = 0; i < EDID_STD_TIMINGS; i++) {
-+ struct std_timing *t = &edid->standard_timings[i];
-+ struct drm_display_mode *newmode;
-+
-+ /* If std timings bytes are 1, 1 it's empty */
-+ if (t->hsize == 1 && (t->aspect_ratio | t->vfreq) == 1)
-+ continue;
-+
-+ newmode = drm_mode_std(dev, &edid->standard_timings[i]);
-+ drm_mode_probed_add(output, newmode);
-+ modes++;
-+ }
-+
-+ return modes;
-+}
-+
-+/**
-+ * add_detailed_modes - get detailed mode info from EDID data
-+ * @edid: EDID block to scan
-+ *
-+ * Some of the detailed timing sections may contain mode information. Grab
-+ * it and add it to the list.
-+ */
-+static int add_detailed_info(struct drm_output *output, struct edid *edid)
-+{
-+ struct drm_device *dev = output->dev;
-+ int i, j, modes = 0;
-+
-+ for (i = 0; i < EDID_DETAILED_TIMINGS; i++) {
-+ struct detailed_timing *timing = &edid->detailed_timings[i];
-+ struct detailed_non_pixel *data = &timing->data.other_data;
-+ struct drm_display_mode *newmode;
-+
-+ /* EDID up to and including 1.2 may put monitor info here */
-+ if (edid->version == 1 && edid->revision < 3)
-+ continue;
-+
-+ /* Detailed mode timing */
-+ if (timing->pixel_clock) {
-+ newmode = drm_mode_detailed(dev, timing);
-+ /* First detailed mode is preferred */
-+ if (i == 0 && edid->preferred_timing)
-+ newmode->type |= DRM_MODE_TYPE_PREFERRED;
-+ drm_mode_probed_add(output, newmode);
-+
-+ modes++;
-+ continue;
-+ }
-+
-+ /* Other timing or info */
-+ switch (data->type) {
-+ case EDID_DETAIL_MONITOR_SERIAL:
-+ break;
-+ case EDID_DETAIL_MONITOR_STRING:
-+ break;
-+ case EDID_DETAIL_MONITOR_RANGE:
-+ /* Get monitor range data */
-+ break;
-+ case EDID_DETAIL_MONITOR_NAME:
-+ break;
-+ case EDID_DETAIL_MONITOR_CPDATA:
-+ break;
-+ case EDID_DETAIL_STD_MODES:
-+ /* Five modes per detailed section */
-+ for (j = 0; j < 5; i++) {
-+ struct std_timing *std;
-+ struct drm_display_mode *newmode;
-+
-+ std = &data->data.timings[j];
-+ newmode = drm_mode_std(dev, std);
-+ drm_mode_probed_add(output, newmode);
-+ modes++;
-+ }
-+ break;
-+ default:
-+ break;
-+ }
-+ }
-+
-+ return modes;
-+}
-+
-+#define DDC_ADDR 0x50
-+
-+static unsigned char *drm_do_probe_ddc_edid(struct i2c_adapter *adapter)
-+{
-+ unsigned char start = 0x0;
-+ unsigned char *buf = kmalloc(EDID_LENGTH, GFP_KERNEL);
-+ struct i2c_msg msgs[] = {
-+ {
-+ .addr = DDC_ADDR,
-+ .flags = 0,
-+ .len = 1,
-+ .buf = &start,
-+ }, {
-+ .addr = DDC_ADDR,
-+ .flags = I2C_M_RD,
-+ .len = EDID_LENGTH,
-+ .buf = buf,
-+ }
-+ };
-+
-+ if (!buf) {
-+ DRM_ERROR("unable to allocate memory for EDID block.\n");
-+ return NULL;
-+ }
-+
-+ if (i2c_transfer(adapter, msgs, 2) == 2)
-+ return buf;
-+
-+ DRM_INFO("unable to read EDID block.\n");
-+ kfree(buf);
-+ return NULL;
-+}
-+
-+unsigned char *drm_ddc_read(struct i2c_adapter *adapter)
-+{
-+ struct i2c_algo_bit_data *algo_data = adapter->algo_data;
-+ unsigned char *edid = NULL;
-+ int i, j;
-+
-+ /*
-+ * Startup the bus:
-+ * Set clock line high (but give it time to come up)
-+ * Then set clock & data low
-+ */
-+ algo_data->setscl(algo_data->data, 1);
-+ udelay(550); /* startup delay */
-+ algo_data->setscl(algo_data->data, 0);
-+ algo_data->setsda(algo_data->data, 0);
-+
-+ for (i = 0; i < 3; i++) {
-+ /* For some old monitors we need the
-+ * following process to initialize/stop DDC
-+ */
-+ algo_data->setsda(algo_data->data, 0);
-+ msleep(13);
-+
-+ algo_data->setscl(algo_data->data, 1);
-+ for (j = 0; j < 5; j++) {
-+ msleep(10);
-+ if (algo_data->getscl(algo_data->data))
-+ break;
-+ }
-+ if (j == 5)
-+ continue;
-+
-+ algo_data->setsda(algo_data->data, 0);
-+ msleep(15);
-+ algo_data->setscl(algo_data->data, 0);
-+ msleep(15);
-+ algo_data->setsda(algo_data->data, 1);
-+ msleep(15);
-+
-+ /* Do the real work */
-+ edid = drm_do_probe_ddc_edid(adapter);
-+ algo_data->setsda(algo_data->data, 0);
-+ algo_data->setscl(algo_data->data, 0);
-+ msleep(15);
-+
-+ algo_data->setscl(algo_data->data, 1);
-+ for (j = 0; j < 10; j++) {
-+ msleep(10);
-+ if (algo_data->getscl(algo_data->data))
-+ break;
-+ }
-+
-+ algo_data->setsda(algo_data->data, 1);
-+ msleep(15);
-+ algo_data->setscl(algo_data->data, 0);
-+ if (edid)
-+ break;
-+ }
-+ /* Release the DDC lines when done or the Apple Cinema HD display
-+ * will switch off
-+ */
-+ algo_data->setsda(algo_data->data, 0);
-+ algo_data->setscl(algo_data->data, 0);
-+ algo_data->setscl(algo_data->data, 1);
-+
-+ return edid;
-+}
-+EXPORT_SYMBOL(drm_ddc_read);
-+
-+/**
-+ * drm_get_edid - get EDID data, if available
-+ * @output: output we're probing
-+ * @adapter: i2c adapter to use for DDC
-+ *
-+ * Poke the given output's i2c channel to grab EDID data if possible.
-+ *
-+ * Return edid data or NULL if we couldn't find any.
-+ */
-+struct edid *drm_get_edid(struct drm_output *output,
-+ struct i2c_adapter *adapter)
-+{
-+ struct edid *edid;
-+
-+ edid = (struct edid *)drm_ddc_read(adapter);
-+ if (!edid) {
-+ dev_warn(&output->dev->pdev->dev, "%s: no EDID data\n",
-+ output->name);
-+ return NULL;
-+ }
-+ if (!edid_valid(edid)) {
-+ dev_warn(&output->dev->pdev->dev, "%s: EDID invalid.\n",
-+ output->name);
-+ kfree(edid);
-+ return NULL;
-+ }
-+ return edid;
-+}
-+EXPORT_SYMBOL(drm_get_edid);
-+
-+/**
-+ * drm_add_edid_modes - add modes from EDID data, if available
-+ * @output: output we're probing
-+ * @edid: edid data
-+ *
-+ * Add the specified modes to the output's mode list.
-+ *
-+ * Return number of modes added or 0 if we couldn't find any.
-+ */
-+int drm_add_edid_modes(struct drm_output *output, struct edid *edid)
-+{
-+ int num_modes = 0;
-+
-+ if (edid == NULL) {
-+ return 0;
-+ }
-+ if (!edid_valid(edid)) {
-+ dev_warn(&output->dev->pdev->dev, "%s: EDID invalid.\n",
-+ output->name);
-+ return 0;
-+ }
-+ num_modes += add_established_modes(output, edid);
-+ num_modes += add_standard_modes(output, edid);
-+ num_modes += add_detailed_info(output, edid);
-+ return num_modes;
-+}
-+EXPORT_SYMBOL(drm_add_edid_modes);
-Index: linux-2.6.27/drivers/gpu/drm/drm_fence.c
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.27/drivers/gpu/drm/drm_fence.c 2009-02-05 13:29:33.000000000 +0000
-@@ -0,0 +1,829 @@
-+/**************************************************************************
-+ *
-+ * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
-+ * All Rights Reserved.
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a
-+ * copy of this software and associated documentation files (the
-+ * "Software"), to deal in the Software without restriction, including
-+ * without limitation the rights to use, copy, modify, merge, publish,
-+ * distribute, sub license, and/or sell copies of the Software, and to
-+ * permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice (including the
-+ * next paragraph) shall be included in all copies or substantial portions
-+ * of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
-+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
-+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
-+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
-+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
-+ *
-+ **************************************************************************/
-+/*
-+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
-+ */
-+
-+#include "drmP.h"
-+
-+
-+/*
-+ * Convenience function to be called by fence::wait methods that
-+ * need polling.
-+ */
-+
-+int drm_fence_wait_polling(struct drm_fence_object *fence, int lazy,
-+ int interruptible, uint32_t mask,
-+ unsigned long end_jiffies)
-+{
-+ struct drm_device *dev = fence->dev;
-+ struct drm_fence_manager *fm = &dev->fm;
-+ struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class];
-+ uint32_t count = 0;
-+ int ret;
-+
-+ DECLARE_WAITQUEUE(entry, current);
-+ add_wait_queue(&fc->fence_queue, &entry);
-+
-+ ret = 0;
-+
-+ for (;;) {
-+ __set_current_state((interruptible) ?
-+ TASK_INTERRUPTIBLE :
-+ TASK_UNINTERRUPTIBLE);
-+ if (drm_fence_object_signaled(fence, mask))
-+ break;
-+ if (time_after_eq(jiffies, end_jiffies)) {
-+ ret = -EBUSY;
-+ break;
-+ }
-+ if (lazy)
-+ schedule_timeout(1);
-+ else if ((++count & 0x0F) == 0){
-+ __set_current_state(TASK_RUNNING);
-+ schedule();
-+ __set_current_state((interruptible) ?
-+ TASK_INTERRUPTIBLE :
-+ TASK_UNINTERRUPTIBLE);
-+ }
-+ if (interruptible && signal_pending(current)) {
-+ ret = -EAGAIN;
-+ break;
-+ }
-+ }
-+ __set_current_state(TASK_RUNNING);
-+ remove_wait_queue(&fc->fence_queue, &entry);
-+ return ret;
-+}
-+EXPORT_SYMBOL(drm_fence_wait_polling);
-+
-+/*
-+ * Typically called by the IRQ handler.
-+ */
-+
-+void drm_fence_handler(struct drm_device *dev, uint32_t fence_class,
-+ uint32_t sequence, uint32_t type, uint32_t error)
-+{
-+ int wake = 0;
-+ uint32_t diff;
-+ uint32_t relevant_type;
-+ uint32_t new_type;
-+ struct drm_fence_manager *fm = &dev->fm;
-+ struct drm_fence_class_manager *fc = &fm->fence_class[fence_class];
-+ struct drm_fence_driver *driver = dev->driver->fence_driver;
-+ struct list_head *head;
-+ struct drm_fence_object *fence, *next;
-+ int found = 0;
-+
-+ if (list_empty(&fc->ring))
-+ return;
-+
-+ list_for_each_entry(fence, &fc->ring, ring) {
-+ diff = (sequence - fence->sequence) & driver->sequence_mask;
-+ if (diff > driver->wrap_diff) {
-+ found = 1;
-+ break;
-+ }
-+ }
-+
-+ fc->waiting_types &= ~type;
-+ head = (found) ? &fence->ring : &fc->ring;
-+
-+ list_for_each_entry_safe_reverse(fence, next, head, ring) {
-+ if (&fence->ring == &fc->ring)
-+ break;
-+
-+ if (error) {
-+ fence->error = error;
-+ fence->signaled_types = fence->type;
-+ list_del_init(&fence->ring);
-+ wake = 1;
-+ break;
-+ }
-+
-+ if (type & DRM_FENCE_TYPE_EXE)
-+ type |= fence->native_types;
-+
-+ relevant_type = type & fence->type;
-+ new_type = (fence->signaled_types | relevant_type) ^
-+ fence->signaled_types;
-+
-+ if (new_type) {
-+ fence->signaled_types |= new_type;
-+ DRM_DEBUG("Fence 0x%08lx signaled 0x%08x\n",
-+ fence->base.hash.key, fence->signaled_types);
-+
-+ if (driver->needed_flush)
-+ fc->pending_flush |= driver->needed_flush(fence);
-+
-+ if (new_type & fence->waiting_types)
-+ wake = 1;
-+ }
-+
-+ fc->waiting_types |= fence->waiting_types & ~fence->signaled_types;
-+
-+ if (!(fence->type & ~fence->signaled_types)) {
-+ DRM_DEBUG("Fence completely signaled 0x%08lx\n",
-+ fence->base.hash.key);
-+ list_del_init(&fence->ring);
-+ }
-+ }
-+
-+ /*
-+ * Reinstate lost waiting types.
-+ */
-+
-+ if ((fc->waiting_types & type) != type) {
-+ head = head->prev;
-+ list_for_each_entry(fence, head, ring) {
-+ if (&fence->ring == &fc->ring)
-+ break;
-+ diff = (fc->highest_waiting_sequence - fence->sequence) &
-+ driver->sequence_mask;
-+ if (diff > driver->wrap_diff)
-+ break;
-+
-+ fc->waiting_types |= fence->waiting_types & ~fence->signaled_types;
-+ }
-+ }
-+
-+ if (wake)
-+ wake_up_all(&fc->fence_queue);
-+}
-+EXPORT_SYMBOL(drm_fence_handler);
-+
-+static void drm_fence_unring(struct drm_device *dev, struct list_head *ring)
-+{
-+ struct drm_fence_manager *fm = &dev->fm;
-+ unsigned long flags;
-+
-+ write_lock_irqsave(&fm->lock, flags);
-+ list_del_init(ring);
-+ write_unlock_irqrestore(&fm->lock, flags);
-+}
-+
-+void drm_fence_usage_deref_locked(struct drm_fence_object **fence)
-+{
-+ struct drm_fence_object *tmp_fence = *fence;
-+ struct drm_device *dev = tmp_fence->dev;
-+ struct drm_fence_manager *fm = &dev->fm;
-+
-+ DRM_ASSERT_LOCKED(&dev->struct_mutex);
-+ *fence = NULL;
-+ if (atomic_dec_and_test(&tmp_fence->usage)) {
-+ drm_fence_unring(dev, &tmp_fence->ring);
-+ DRM_DEBUG("Destroyed a fence object 0x%08lx\n",
-+ tmp_fence->base.hash.key);
-+ atomic_dec(&fm->count);
-+ BUG_ON(!list_empty(&tmp_fence->base.list));
-+ drm_free(tmp_fence, sizeof(*tmp_fence), DRM_MEM_FENCE);
-+ }
-+}
-+EXPORT_SYMBOL(drm_fence_usage_deref_locked);
-+
-+void drm_fence_usage_deref_unlocked(struct drm_fence_object **fence)
-+{
-+ struct drm_fence_object *tmp_fence = *fence;
-+ struct drm_device *dev = tmp_fence->dev;
-+ struct drm_fence_manager *fm = &dev->fm;
-+
-+ *fence = NULL;
-+ if (atomic_dec_and_test(&tmp_fence->usage)) {
-+ mutex_lock(&dev->struct_mutex);
-+ if (atomic_read(&tmp_fence->usage) == 0) {
-+ drm_fence_unring(dev, &tmp_fence->ring);
-+ atomic_dec(&fm->count);
-+ BUG_ON(!list_empty(&tmp_fence->base.list));
-+ drm_free(tmp_fence, sizeof(*tmp_fence), DRM_MEM_FENCE);
-+ }
-+ mutex_unlock(&dev->struct_mutex);
-+ }
-+}
-+EXPORT_SYMBOL(drm_fence_usage_deref_unlocked);
-+
-+struct drm_fence_object
-+*drm_fence_reference_locked(struct drm_fence_object *src)
-+{
-+ DRM_ASSERT_LOCKED(&src->dev->struct_mutex);
-+
-+ atomic_inc(&src->usage);
-+ return src;
-+}
-+
-+void drm_fence_reference_unlocked(struct drm_fence_object **dst,
-+ struct drm_fence_object *src)
-+{
-+ mutex_lock(&src->dev->struct_mutex);
-+ *dst = src;
-+ atomic_inc(&src->usage);
-+ mutex_unlock(&src->dev->struct_mutex);
-+}
-+EXPORT_SYMBOL(drm_fence_reference_unlocked);
-+
-+static void drm_fence_object_destroy(struct drm_file *priv,
-+ struct drm_user_object *base)
-+{
-+ struct drm_fence_object *fence =
-+ drm_user_object_entry(base, struct drm_fence_object, base);
-+
-+ drm_fence_usage_deref_locked(&fence);
-+}
-+
-+int drm_fence_object_signaled(struct drm_fence_object *fence, uint32_t mask)
-+{
-+ unsigned long flags;
-+ int signaled;
-+ struct drm_device *dev = fence->dev;
-+ struct drm_fence_manager *fm = &dev->fm;
-+ struct drm_fence_driver *driver = dev->driver->fence_driver;
-+
-+ mask &= fence->type;
-+ read_lock_irqsave(&fm->lock, flags);
-+ signaled = (mask & fence->signaled_types) == mask;
-+ read_unlock_irqrestore(&fm->lock, flags);
-+ if (!signaled && driver->poll) {
-+ write_lock_irqsave(&fm->lock, flags);
-+ driver->poll(dev, fence->fence_class, mask);
-+ signaled = (mask & fence->signaled_types) == mask;
-+ write_unlock_irqrestore(&fm->lock, flags);
-+ }
-+ return signaled;
-+}
-+EXPORT_SYMBOL(drm_fence_object_signaled);
-+
-+
-+int drm_fence_object_flush(struct drm_fence_object *fence,
-+ uint32_t type)
-+{
-+ struct drm_device *dev = fence->dev;
-+ struct drm_fence_manager *fm = &dev->fm;
-+ struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class];
-+ struct drm_fence_driver *driver = dev->driver->fence_driver;
-+ unsigned long irq_flags;
-+ uint32_t saved_pending_flush;
-+ uint32_t diff;
-+ int call_flush;
-+
-+ if (type & ~fence->type) {
-+ DRM_ERROR("Flush trying to extend fence type, "
-+ "0x%x, 0x%x\n", type, fence->type);
-+ return -EINVAL;
-+ }
-+
-+ write_lock_irqsave(&fm->lock, irq_flags);
-+ fence->waiting_types |= type;
-+ fc->waiting_types |= fence->waiting_types;
-+ diff = (fence->sequence - fc->highest_waiting_sequence) &
-+ driver->sequence_mask;
-+
-+ if (diff < driver->wrap_diff)
-+ fc->highest_waiting_sequence = fence->sequence;
-+
-+ /*
-+ * fence->waiting_types has changed. Determine whether
-+ * we need to initiate some kind of flush as a result of this.
-+ */
-+
-+ saved_pending_flush = fc->pending_flush;
-+ if (driver->needed_flush)
-+ fc->pending_flush |= driver->needed_flush(fence);
-+
-+ if (driver->poll)
-+ driver->poll(dev, fence->fence_class, fence->waiting_types);
-+
-+ call_flush = fc->pending_flush;
-+ write_unlock_irqrestore(&fm->lock, irq_flags);
-+
-+ if (call_flush && driver->flush)
-+ driver->flush(dev, fence->fence_class);
-+
-+ return 0;
-+}
-+EXPORT_SYMBOL(drm_fence_object_flush);
-+
-+/*
-+ * Make sure old fence objects are signaled before their fence sequences are
-+ * wrapped around and reused.
-+ */
-+
-+void drm_fence_flush_old(struct drm_device *dev, uint32_t fence_class,
-+ uint32_t sequence)
-+{
-+ struct drm_fence_manager *fm = &dev->fm;
-+ struct drm_fence_class_manager *fc = &fm->fence_class[fence_class];
-+ struct drm_fence_object *fence;
-+ unsigned long irq_flags;
-+ struct drm_fence_driver *driver = dev->driver->fence_driver;
-+ int call_flush;
-+
-+ uint32_t diff;
-+
-+ write_lock_irqsave(&fm->lock, irq_flags);
-+
-+ list_for_each_entry_reverse(fence, &fc->ring, ring) {
-+ diff = (sequence - fence->sequence) & driver->sequence_mask;
-+ if (diff <= driver->flush_diff)
-+ break;
-+
-+ fence->waiting_types = fence->type;
-+ fc->waiting_types |= fence->type;
-+
-+ if (driver->needed_flush)
-+ fc->pending_flush |= driver->needed_flush(fence);
-+ }
-+
-+ if (driver->poll)
-+ driver->poll(dev, fence_class, fc->waiting_types);
-+
-+ call_flush = fc->pending_flush;
-+ write_unlock_irqrestore(&fm->lock, irq_flags);
-+
-+ if (call_flush && driver->flush)
-+ driver->flush(dev, fence->fence_class);
-+
-+ /*
-+ * FIXME: Shold we implement a wait here for really old fences?
-+ */
-+
-+}
-+EXPORT_SYMBOL(drm_fence_flush_old);
-+
-+int drm_fence_object_wait(struct drm_fence_object *fence,
-+ int lazy, int ignore_signals, uint32_t mask)
-+{
-+ struct drm_device *dev = fence->dev;
-+ struct drm_fence_driver *driver = dev->driver->fence_driver;
-+ struct drm_fence_manager *fm = &dev->fm;
-+ struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class];
-+ int ret = 0;
-+ unsigned long _end = 3 * DRM_HZ;
-+
-+ if (mask & ~fence->type) {
-+ DRM_ERROR("Wait trying to extend fence type"
-+ " 0x%08x 0x%08x\n", mask, fence->type);
-+ BUG();
-+ return -EINVAL;
-+ }
-+
-+ if (driver->wait)
-+ return driver->wait(fence, lazy, !ignore_signals, mask);
-+
-+
-+ drm_fence_object_flush(fence, mask);
-+ if (driver->has_irq(dev, fence->fence_class, mask)) {
-+ if (!ignore_signals)
-+ ret = wait_event_interruptible_timeout
-+ (fc->fence_queue,
-+ drm_fence_object_signaled(fence, mask),
-+ 3 * DRM_HZ);
-+ else
-+ ret = wait_event_timeout
-+ (fc->fence_queue,
-+ drm_fence_object_signaled(fence, mask),
-+ 3 * DRM_HZ);
-+
-+ if (unlikely(ret == -ERESTARTSYS))
-+ return -EAGAIN;
-+
-+ if (unlikely(ret == 0))
-+ return -EBUSY;
-+
-+ return 0;
-+ }
-+
-+ return drm_fence_wait_polling(fence, lazy, !ignore_signals, mask,
-+ _end);
-+}
-+EXPORT_SYMBOL(drm_fence_object_wait);
-+
-+
-+
-+int drm_fence_object_emit(struct drm_fence_object *fence, uint32_t fence_flags,
-+ uint32_t fence_class, uint32_t type)
-+{
-+ struct drm_device *dev = fence->dev;
-+ struct drm_fence_manager *fm = &dev->fm;
-+ struct drm_fence_driver *driver = dev->driver->fence_driver;
-+ struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class];
-+ unsigned long flags;
-+ uint32_t sequence;
-+ uint32_t native_types;
-+ int ret;
-+
-+ drm_fence_unring(dev, &fence->ring);
-+ ret = driver->emit(dev, fence_class, fence_flags, &sequence,
-+ &native_types);
-+ if (ret)
-+ return ret;
-+
-+ write_lock_irqsave(&fm->lock, flags);
-+ fence->fence_class = fence_class;
-+ fence->type = type;
-+ fence->waiting_types = 0;
-+ fence->signaled_types = 0;
-+ fence->error = 0;
-+ fence->sequence = sequence;
-+ fence->native_types = native_types;
-+ if (list_empty(&fc->ring))
-+ fc->highest_waiting_sequence = sequence - 1;
-+ list_add_tail(&fence->ring, &fc->ring);
-+ fc->latest_queued_sequence = sequence;
-+ write_unlock_irqrestore(&fm->lock, flags);
-+ return 0;
-+}
-+EXPORT_SYMBOL(drm_fence_object_emit);
-+
-+static int drm_fence_object_init(struct drm_device *dev, uint32_t fence_class,
-+ uint32_t type,
-+ uint32_t fence_flags,
-+ struct drm_fence_object *fence)
-+{
-+ int ret = 0;
-+ unsigned long flags;
-+ struct drm_fence_manager *fm = &dev->fm;
-+
-+ mutex_lock(&dev->struct_mutex);
-+ atomic_set(&fence->usage, 1);
-+ mutex_unlock(&dev->struct_mutex);
-+
-+ write_lock_irqsave(&fm->lock, flags);
-+ INIT_LIST_HEAD(&fence->ring);
-+
-+ /*
-+ * Avoid hitting BUG() for kernel-only fence objects.
-+ */
-+
-+ INIT_LIST_HEAD(&fence->base.list);
-+ fence->fence_class = fence_class;
-+ fence->type = type;
-+ fence->signaled_types = 0;
-+ fence->waiting_types = 0;
-+ fence->sequence = 0;
-+ fence->error = 0;
-+ fence->dev = dev;
-+ write_unlock_irqrestore(&fm->lock, flags);
-+ if (fence_flags & DRM_FENCE_FLAG_EMIT) {
-+ ret = drm_fence_object_emit(fence, fence_flags,
-+ fence->fence_class, type);
-+ }
-+ return ret;
-+}
-+
-+int drm_fence_add_user_object(struct drm_file *priv,
-+ struct drm_fence_object *fence, int shareable)
-+{
-+ struct drm_device *dev = priv->minor->dev;
-+ int ret;
-+
-+ mutex_lock(&dev->struct_mutex);
-+ ret = drm_add_user_object(priv, &fence->base, shareable);
-+ if (ret)
-+ goto out;
-+ atomic_inc(&fence->usage);
-+ fence->base.type = drm_fence_type;
-+ fence->base.remove = &drm_fence_object_destroy;
-+ DRM_DEBUG("Fence 0x%08lx created\n", fence->base.hash.key);
-+out:
-+ mutex_unlock(&dev->struct_mutex);
-+ return ret;
-+}
-+EXPORT_SYMBOL(drm_fence_add_user_object);
-+
-+int drm_fence_object_create(struct drm_device *dev, uint32_t fence_class,
-+ uint32_t type, unsigned flags,
-+ struct drm_fence_object **c_fence)
-+{
-+ struct drm_fence_object *fence;
-+ int ret;
-+ struct drm_fence_manager *fm = &dev->fm;
-+
-+ fence = drm_calloc(1, sizeof(*fence), DRM_MEM_FENCE);
-+ if (!fence) {
-+ DRM_INFO("Out of memory creating fence object.\n");
-+ return -ENOMEM;
-+ }
-+ ret = drm_fence_object_init(dev, fence_class, type, flags, fence);
-+ if (ret) {
-+ drm_fence_usage_deref_unlocked(&fence);
-+ return ret;
-+ }
-+ *c_fence = fence;
-+ atomic_inc(&fm->count);
-+
-+ return 0;
-+}
-+EXPORT_SYMBOL(drm_fence_object_create);
-+
-+void drm_fence_manager_init(struct drm_device *dev)
-+{
-+ struct drm_fence_manager *fm = &dev->fm;
-+ struct drm_fence_class_manager *fence_class;
-+ struct drm_fence_driver *fed = dev->driver->fence_driver;
-+ int i;
-+ unsigned long flags;
-+
-+ rwlock_init(&fm->lock);
-+ write_lock_irqsave(&fm->lock, flags);
-+ fm->initialized = 0;
-+ if (!fed)
-+ goto out_unlock;
-+
-+ fm->initialized = 1;
-+ fm->num_classes = fed->num_classes;
-+ BUG_ON(fm->num_classes > _DRM_FENCE_CLASSES);
-+
-+ for (i = 0; i < fm->num_classes; ++i) {
-+ fence_class = &fm->fence_class[i];
-+
-+ memset(fence_class, 0, sizeof(*fence_class));
-+ INIT_LIST_HEAD(&fence_class->ring);
-+ DRM_INIT_WAITQUEUE(&fence_class->fence_queue);
-+ }
-+
-+ atomic_set(&fm->count, 0);
-+ out_unlock:
-+ write_unlock_irqrestore(&fm->lock, flags);
-+}
-+
-+void drm_fence_fill_arg(struct drm_fence_object *fence,
-+ struct drm_fence_arg *arg)
-+{
-+ struct drm_device *dev = fence->dev;
-+ struct drm_fence_manager *fm = &dev->fm;
-+ unsigned long irq_flags;
-+
-+ read_lock_irqsave(&fm->lock, irq_flags);
-+ arg->handle = fence->base.hash.key;
-+ arg->fence_class = fence->fence_class;
-+ arg->type = fence->type;
-+ arg->signaled = fence->signaled_types;
-+ arg->error = fence->error;
-+ arg->sequence = fence->sequence;
-+ read_unlock_irqrestore(&fm->lock, irq_flags);
-+}
-+EXPORT_SYMBOL(drm_fence_fill_arg);
-+
-+void drm_fence_manager_takedown(struct drm_device *dev)
-+{
-+}
-+
-+struct drm_fence_object *drm_lookup_fence_object(struct drm_file *priv,
-+ uint32_t handle)
-+{
-+ struct drm_device *dev = priv->minor->dev;
-+ struct drm_user_object *uo;
-+ struct drm_fence_object *fence;
-+
-+ mutex_lock(&dev->struct_mutex);
-+ uo = drm_lookup_user_object(priv, handle);
-+ if (!uo || (uo->type != drm_fence_type)) {
-+ mutex_unlock(&dev->struct_mutex);
-+ return NULL;
-+ }
-+ fence = drm_fence_reference_locked(drm_user_object_entry(uo, struct drm_fence_object, base));
-+ mutex_unlock(&dev->struct_mutex);
-+ return fence;
-+}
-+
-+int drm_fence_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
-+{
-+ int ret;
-+ struct drm_fence_manager *fm = &dev->fm;
-+ struct drm_fence_arg *arg = data;
-+ struct drm_fence_object *fence;
-+ ret = 0;
-+
-+ if (!fm->initialized) {
-+ DRM_ERROR("The DRM driver does not support fencing.\n");
-+ return -EINVAL;
-+ }
-+
-+ if (arg->flags & DRM_FENCE_FLAG_EMIT)
-+ LOCK_TEST_WITH_RETURN(dev, file_priv);
-+ ret = drm_fence_object_create(dev, arg->fence_class,
-+ arg->type, arg->flags, &fence);
-+ if (ret)
-+ return ret;
-+ ret = drm_fence_add_user_object(file_priv, fence,
-+ arg->flags &
-+ DRM_FENCE_FLAG_SHAREABLE);
-+ if (ret) {
-+ drm_fence_usage_deref_unlocked(&fence);
-+ return ret;
-+ }
-+
-+ /*
-+ * usage > 0. No need to lock dev->struct_mutex;
-+ */
-+
-+ arg->handle = fence->base.hash.key;
-+
-+ drm_fence_fill_arg(fence, arg);
-+ drm_fence_usage_deref_unlocked(&fence);
-+
-+ return ret;
-+}
-+
-+int drm_fence_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
-+{
-+ int ret;
-+ struct drm_fence_manager *fm = &dev->fm;
-+ struct drm_fence_arg *arg = data;
-+ struct drm_fence_object *fence;
-+ struct drm_user_object *uo;
-+ ret = 0;
-+
-+ if (!fm->initialized) {
-+ DRM_ERROR("The DRM driver does not support fencing.\n");
-+ return -EINVAL;
-+ }
-+
-+ ret = drm_user_object_ref(file_priv, arg->handle, drm_fence_type, &uo);
-+ if (ret)
-+ return ret;
-+ fence = drm_lookup_fence_object(file_priv, arg->handle);
-+ drm_fence_fill_arg(fence, arg);
-+ drm_fence_usage_deref_unlocked(&fence);
-+
-+ return ret;
-+}
-+
-+
-+int drm_fence_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
-+{
-+ int ret;
-+ struct drm_fence_manager *fm = &dev->fm;
-+ struct drm_fence_arg *arg = data;
-+ ret = 0;
-+
-+ if (!fm->initialized) {
-+ DRM_ERROR("The DRM driver does not support fencing.\n");
-+ return -EINVAL;
-+ }
-+
-+ return drm_user_object_unref(file_priv, arg->handle, drm_fence_type);
-+}
-+
-+int drm_fence_signaled_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
-+{
-+ int ret;
-+ struct drm_fence_manager *fm = &dev->fm;
-+ struct drm_fence_arg *arg = data;
-+ struct drm_fence_object *fence;
-+ ret = 0;
-+
-+ if (!fm->initialized) {
-+ DRM_ERROR("The DRM driver does not support fencing.\n");
-+ return -EINVAL;
-+ }
-+
-+ fence = drm_lookup_fence_object(file_priv, arg->handle);
-+ if (!fence)
-+ return -EINVAL;
-+
-+ drm_fence_fill_arg(fence, arg);
-+ drm_fence_usage_deref_unlocked(&fence);
-+
-+ return ret;
-+}
-+
-+int drm_fence_flush_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
-+{
-+ int ret;
-+ struct drm_fence_manager *fm = &dev->fm;
-+ struct drm_fence_arg *arg = data;
-+ struct drm_fence_object *fence;
-+ ret = 0;
-+
-+ if (!fm->initialized) {
-+ DRM_ERROR("The DRM driver does not support fencing.\n");
-+ return -EINVAL;
-+ }
-+
-+ fence = drm_lookup_fence_object(file_priv, arg->handle);
-+ if (!fence)
-+ return -EINVAL;
-+ ret = drm_fence_object_flush(fence, arg->type);
-+
-+ drm_fence_fill_arg(fence, arg);
-+ drm_fence_usage_deref_unlocked(&fence);
-+
-+ return ret;
-+}
-+
-+
-+int drm_fence_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
-+{
-+ int ret;
-+ struct drm_fence_manager *fm = &dev->fm;
-+ struct drm_fence_arg *arg = data;
-+ struct drm_fence_object *fence;
-+ ret = 0;
-+
-+ if (!fm->initialized) {
-+ DRM_ERROR("The DRM driver does not support fencing.\n");
-+ return -EINVAL;
-+ }
-+
-+ fence = drm_lookup_fence_object(file_priv, arg->handle);
-+ if (!fence)
-+ return -EINVAL;
-+ ret = drm_fence_object_wait(fence,
-+ arg->flags & DRM_FENCE_FLAG_WAIT_LAZY,
-+ 0, arg->type);
-+
-+ drm_fence_fill_arg(fence, arg);
-+ drm_fence_usage_deref_unlocked(&fence);
-+
-+ return ret;
-+}
-+
-+
-+int drm_fence_emit_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
-+{
-+ int ret;
-+ struct drm_fence_manager *fm = &dev->fm;
-+ struct drm_fence_arg *arg = data;
-+ struct drm_fence_object *fence;
-+ ret = 0;
-+
-+ if (!fm->initialized) {
-+ DRM_ERROR("The DRM driver does not support fencing.\n");
-+ return -EINVAL;
-+ }
-+
-+ LOCK_TEST_WITH_RETURN(dev, file_priv);
-+ fence = drm_lookup_fence_object(file_priv, arg->handle);
-+ if (!fence)
-+ return -EINVAL;
-+ ret = drm_fence_object_emit(fence, arg->flags, arg->fence_class,
-+ arg->type);
-+
-+ drm_fence_fill_arg(fence, arg);
-+ drm_fence_usage_deref_unlocked(&fence);
-+
-+ return ret;
-+}
-+
-+int drm_fence_buffers_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
-+{
-+ int ret;
-+ struct drm_fence_manager *fm = &dev->fm;
-+ struct drm_fence_arg *arg = data;
-+ struct drm_fence_object *fence;
-+ ret = 0;
-+
-+ if (!fm->initialized) {
-+ DRM_ERROR("The DRM driver does not support fencing.\n");
-+ return -EINVAL;
-+ }
-+
-+ if (!dev->bm.initialized) {
-+ DRM_ERROR("Buffer object manager is not initialized\n");
-+ return -EINVAL;
-+ }
-+ LOCK_TEST_WITH_RETURN(dev, file_priv);
-+ ret = drm_fence_buffer_objects(dev, NULL, arg->flags,
-+ NULL, &fence);
-+ if (ret)
-+ return ret;
-+
-+ if (!(arg->flags & DRM_FENCE_FLAG_NO_USER)) {
-+ ret = drm_fence_add_user_object(file_priv, fence,
-+ arg->flags &
-+ DRM_FENCE_FLAG_SHAREABLE);
-+ if (ret)
-+ return ret;
-+ }
-+
-+ arg->handle = fence->base.hash.key;
-+
-+ drm_fence_fill_arg(fence, arg);
-+ drm_fence_usage_deref_unlocked(&fence);
-+
-+ return ret;
-+}
-Index: linux-2.6.27/drivers/gpu/drm/drm_fops.c
-===================================================================
---- linux-2.6.27.orig/drivers/gpu/drm/drm_fops.c 2009-02-05 13:29:29.000000000 +0000
-+++ linux-2.6.27/drivers/gpu/drm/drm_fops.c 2009-02-05 13:29:33.000000000 +0000
-@@ -231,6 +231,7 @@
- int minor_id = iminor(inode);
- struct drm_file *priv;
- int ret;
-+ int i, j;
-
- if (filp->f_flags & O_EXCL)
- return -EBUSY; /* No exclusive opens */
-@@ -255,9 +256,21 @@
- priv->lock_count = 0;
-
- INIT_LIST_HEAD(&priv->lhead);
-+ INIT_LIST_HEAD(&priv->refd_objects);
-+ INIT_LIST_HEAD(&priv->fbs);
-
-- if (dev->driver->driver_features & DRIVER_GEM)
-- drm_gem_open(dev, priv);
-+ for (i = 0; i < _DRM_NO_REF_TYPES; ++i) {
-+ ret = drm_ht_create(&priv->refd_object_hash[i],
-+ DRM_FILE_HASH_ORDER);
-+ if (ret)
-+ break;
-+ }
-+
-+ if (ret) {
-+ for (j = 0; j < i; ++j)
-+ drm_ht_remove(&priv->refd_object_hash[j]);
-+ goto out_free;
-+ }
-
- if (dev->driver->open) {
- ret = dev->driver->open(dev, priv);
-@@ -314,6 +327,33 @@
- }
- EXPORT_SYMBOL(drm_fasync);
-
-+static void drm_object_release(struct file *filp)
-+{
-+ struct drm_file *priv = filp->private_data;
-+ struct list_head *head;
-+ struct drm_ref_object *ref_object;
-+ int i;
-+
-+ /*
-+ * Free leftover ref objects created by me. Note that we cannot use
-+ * list_for_each() here, as the struct_mutex may be temporarily
-+ * released by the remove_() functions, and thus the lists may be
-+ * altered.
-+ * Also, a drm_remove_ref_object() will not remove it
-+ * from the list unless its refcount is 1.
-+ */
-+
-+ head = &priv->refd_objects;
-+ while (head->next != head) {
-+ ref_object = list_entry(head->next, struct drm_ref_object, list);
-+ drm_remove_ref_object(priv, ref_object);
-+ head = &priv->refd_objects;
-+ }
-+
-+ for (i = 0; i < _DRM_NO_REF_TYPES; ++i)
-+ drm_ht_remove(&priv->refd_object_hash[i]);
-+}
-+
- /**
- * Release file.
- *
-@@ -403,9 +443,6 @@
- dev->driver->reclaim_buffers(dev, file_priv);
- }
-
-- if (dev->driver->driver_features & DRIVER_GEM)
-- drm_gem_release(dev, file_priv);
--
- drm_fasync(-1, filp, 0);
-
- mutex_lock(&dev->ctxlist_mutex);
-@@ -430,6 +467,8 @@
- mutex_unlock(&dev->ctxlist_mutex);
-
- mutex_lock(&dev->struct_mutex);
-+ drm_fb_release(filp);
-+ drm_object_release(filp);
- if (file_priv->remove_auth_on_close == 1) {
- struct drm_file *temp;
-
-Index: linux-2.6.27/drivers/gpu/drm/drm_hashtab.c
-===================================================================
---- linux-2.6.27.orig/drivers/gpu/drm/drm_hashtab.c 2008-10-09 23:13:53.000000000 +0100
-+++ linux-2.6.27/drivers/gpu/drm/drm_hashtab.c 2009-02-05 13:29:33.000000000 +0000
-@@ -29,7 +29,7 @@
- * Simple open hash tab implementation.
- *
- * Authors:
-- * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
-+ * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
- */
-
- #include "drmP.h"
-Index: linux-2.6.27/drivers/gpu/drm/drm_irq.c
-===================================================================
---- linux-2.6.27.orig/drivers/gpu/drm/drm_irq.c 2009-02-05 13:29:29.000000000 +0000
-+++ linux-2.6.27/drivers/gpu/drm/drm_irq.c 2009-02-05 13:29:33.000000000 +0000
-@@ -70,6 +70,7 @@
-
- return 0;
- }
-+#if 0
-
- static void vblank_disable_fn(unsigned long arg)
- {
-@@ -184,6 +185,8 @@
- }
- EXPORT_SYMBOL(drm_vblank_init);
-
-+#endif
-+
- /**
- * Install IRQ handler.
- *
-@@ -221,6 +224,17 @@
-
- DRM_DEBUG("irq=%d\n", dev->pdev->irq);
-
-+ if (drm_core_check_feature(dev, DRIVER_IRQ_VBL)) {
-+ init_waitqueue_head(&dev->vbl_queue);
-+
-+ spin_lock_init(&dev->vbl_lock);
-+
-+ INIT_LIST_HEAD(&dev->vbl_sigs);
-+ INIT_LIST_HEAD(&dev->vbl_sigs2);
-+
-+ dev->vbl_pending = 0;
-+ }
-+
- /* Before installing handler */
- dev->driver->irq_preinstall(dev);
-
-@@ -281,8 +295,6 @@
-
- free_irq(dev->pdev->irq, dev);
-
-- drm_vblank_cleanup(dev);
--
- dev->locked_tasklet_func = NULL;
-
- return 0;
-@@ -326,174 +338,6 @@
- }
-
- /**
-- * drm_vblank_count - retrieve "cooked" vblank counter value
-- * @dev: DRM device
-- * @crtc: which counter to retrieve
-- *
-- * Fetches the "cooked" vblank count value that represents the number of
-- * vblank events since the system was booted, including lost events due to
-- * modesetting activity.
-- */
--u32 drm_vblank_count(struct drm_device *dev, int crtc)
--{
-- return atomic_read(&dev->_vblank_count[crtc]);
--}
--EXPORT_SYMBOL(drm_vblank_count);
--
--/**
-- * drm_update_vblank_count - update the master vblank counter
-- * @dev: DRM device
-- * @crtc: counter to update
-- *
-- * Call back into the driver to update the appropriate vblank counter
-- * (specified by @crtc). Deal with wraparound, if it occurred, and
-- * update the last read value so we can deal with wraparound on the next
-- * call if necessary.
-- *
-- * Only necessary when going from off->on, to account for frames we
-- * didn't get an interrupt for.
-- *
-- * Note: caller must hold dev->vbl_lock since this reads & writes
-- * device vblank fields.
-- */
--static void drm_update_vblank_count(struct drm_device *dev, int crtc)
--{
-- u32 cur_vblank, diff;
--
-- /*
-- * Interrupts were disabled prior to this call, so deal with counter
-- * wrap if needed.
-- * NOTE! It's possible we lost a full dev->max_vblank_count events
-- * here if the register is small or we had vblank interrupts off for
-- * a long time.
-- */
-- cur_vblank = dev->driver->get_vblank_counter(dev, crtc);
-- diff = cur_vblank - dev->last_vblank[crtc];
-- if (cur_vblank < dev->last_vblank[crtc]) {
-- diff += dev->max_vblank_count;
--
-- DRM_DEBUG("last_vblank[%d]=0x%x, cur_vblank=0x%x => diff=0x%x\n",
-- crtc, dev->last_vblank[crtc], cur_vblank, diff);
-- }
--
-- DRM_DEBUG("enabling vblank interrupts on crtc %d, missed %d\n",
-- crtc, diff);
--
-- atomic_add(diff, &dev->_vblank_count[crtc]);
--}
--
--/**
-- * drm_vblank_get - get a reference count on vblank events
-- * @dev: DRM device
-- * @crtc: which CRTC to own
-- *
-- * Acquire a reference count on vblank events to avoid having them disabled
-- * while in use.
-- *
-- * RETURNS
-- * Zero on success, nonzero on failure.
-- */
--int drm_vblank_get(struct drm_device *dev, int crtc)
--{
-- unsigned long irqflags;
-- int ret = 0;
--
-- spin_lock_irqsave(&dev->vbl_lock, irqflags);
-- /* Going from 0->1 means we have to enable interrupts again */
-- if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1 &&
-- !dev->vblank_enabled[crtc]) {
-- ret = dev->driver->enable_vblank(dev, crtc);
-- DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", crtc, ret);
-- if (ret)
-- atomic_dec(&dev->vblank_refcount[crtc]);
-- else {
-- dev->vblank_enabled[crtc] = 1;
-- drm_update_vblank_count(dev, crtc);
-- }
-- }
-- spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
--
-- return ret;
--}
--EXPORT_SYMBOL(drm_vblank_get);
--
--/**
-- * drm_vblank_put - give up ownership of vblank events
-- * @dev: DRM device
-- * @crtc: which counter to give up
-- *
-- * Release ownership of a given vblank counter, turning off interrupts
-- * if possible.
-- */
--void drm_vblank_put(struct drm_device *dev, int crtc)
--{
-- /* Last user schedules interrupt disable */
-- if (atomic_dec_and_test(&dev->vblank_refcount[crtc]))
-- mod_timer(&dev->vblank_disable_timer, jiffies + 5*DRM_HZ);
--}
--EXPORT_SYMBOL(drm_vblank_put);
--
--/**
-- * drm_modeset_ctl - handle vblank event counter changes across mode switch
-- * @DRM_IOCTL_ARGS: standard ioctl arguments
-- *
-- * Applications should call the %_DRM_PRE_MODESET and %_DRM_POST_MODESET
-- * ioctls around modesetting so that any lost vblank events are accounted for.
-- *
-- * Generally the counter will reset across mode sets. If interrupts are
-- * enabled around this call, we don't have to do anything since the counter
-- * will have already been incremented.
-- */
--int drm_modeset_ctl(struct drm_device *dev, void *data,
-- struct drm_file *file_priv)
--{
-- struct drm_modeset_ctl *modeset = data;
-- unsigned long irqflags;
-- int crtc, ret = 0;
--
-- /* If drm_vblank_init() hasn't been called yet, just no-op */
-- if (!dev->num_crtcs)
-- goto out;
--
-- crtc = modeset->crtc;
-- if (crtc >= dev->num_crtcs) {
-- ret = -EINVAL;
-- goto out;
-- }
--
-- /*
-- * To avoid all the problems that might happen if interrupts
-- * were enabled/disabled around or between these calls, we just
-- * have the kernel take a reference on the CRTC (just once though
-- * to avoid corrupting the count if multiple, mismatch calls occur),
-- * so that interrupts remain enabled in the interim.
-- */
-- switch (modeset->cmd) {
-- case _DRM_PRE_MODESET:
-- if (!dev->vblank_inmodeset[crtc]) {
-- dev->vblank_inmodeset[crtc] = 1;
-- drm_vblank_get(dev, crtc);
-- }
-- break;
-- case _DRM_POST_MODESET:
-- if (dev->vblank_inmodeset[crtc]) {
-- spin_lock_irqsave(&dev->vbl_lock, irqflags);
-- dev->vblank_disable_allowed = 1;
-- dev->vblank_inmodeset[crtc] = 0;
-- spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
-- drm_vblank_put(dev, crtc);
-- }
-- break;
-- default:
-- ret = -EINVAL;
-- break;
-- }
--
--out:
-- return ret;
--}
--
--/**
- * Wait for VBLANK.
- *
- * \param inode device inode.
-@@ -512,14 +356,14 @@
- *
- * If a signal is not requested, then calls vblank_wait().
- */
--int drm_wait_vblank(struct drm_device *dev, void *data,
-- struct drm_file *file_priv)
-+int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_priv)
- {
- union drm_wait_vblank *vblwait = data;
-+ struct timeval now;
- int ret = 0;
-- unsigned int flags, seq, crtc;
-+ unsigned int flags, seq;
-
-- if ((!dev->pdev->irq) || (!dev->irq_enabled))
-+ if ((!dev->irq) || (!dev->irq_enabled))
- return -EINVAL;
-
- if (vblwait->request.type &
-@@ -531,17 +375,13 @@
- }
-
- flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK;
-- crtc = flags & _DRM_VBLANK_SECONDARY ? 1 : 0;
-
-- if (crtc >= dev->num_crtcs)
-+ if (!drm_core_check_feature(dev, (flags & _DRM_VBLANK_SECONDARY) ?
-+ DRIVER_IRQ_VBL2 : DRIVER_IRQ_VBL))
- return -EINVAL;
-
-- ret = drm_vblank_get(dev, crtc);
-- if (ret) {
-- DRM_ERROR("failed to acquire vblank counter, %d\n", ret);
-- return ret;
-- }
-- seq = drm_vblank_count(dev, crtc);
-+ seq = atomic_read((flags & _DRM_VBLANK_SECONDARY) ? &dev->vbl_received2
-+ : &dev->vbl_received);
-
- switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) {
- case _DRM_VBLANK_RELATIVE:
-@@ -550,8 +390,7 @@
- case _DRM_VBLANK_ABSOLUTE:
- break;
- default:
-- ret = -EINVAL;
-- goto done;
-+ return -EINVAL;
- }
-
- if ((flags & _DRM_VBLANK_NEXTONMISS) &&
-@@ -561,7 +400,8 @@
-
- if (flags & _DRM_VBLANK_SIGNAL) {
- unsigned long irqflags;
-- struct list_head *vbl_sigs = &dev->vbl_sigs[crtc];
-+ struct list_head *vbl_sigs = (flags & _DRM_VBLANK_SECONDARY)
-+ ? &dev->vbl_sigs2 : &dev->vbl_sigs;
- struct drm_vbl_sig *vbl_sig;
-
- spin_lock_irqsave(&dev->vbl_lock, irqflags);
-@@ -582,29 +422,22 @@
- }
- }
-
-- if (atomic_read(&dev->vbl_signal_pending) >= 100) {
-+ if (dev->vbl_pending >= 100) {
- spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
-- ret = -EBUSY;
-- goto done;
-+ return -EBUSY;
- }
-
-- spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
-+ dev->vbl_pending++;
-
-- vbl_sig = drm_calloc(1, sizeof(struct drm_vbl_sig),
-- DRM_MEM_DRIVER);
-- if (!vbl_sig) {
-- ret = -ENOMEM;
-- goto done;
-- }
-+ spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
-
-- ret = drm_vblank_get(dev, crtc);
-- if (ret) {
-- drm_free(vbl_sig, sizeof(struct drm_vbl_sig),
-- DRM_MEM_DRIVER);
-- return ret;
-+ if (!
-+ (vbl_sig =
-+ drm_alloc(sizeof(struct drm_vbl_sig), DRM_MEM_DRIVER))) {
-+ return -ENOMEM;
- }
-
-- atomic_inc(&dev->vbl_signal_pending);
-+ memset((void *)vbl_sig, 0, sizeof(*vbl_sig));
-
- vbl_sig->sequence = vblwait->request.sequence;
- vbl_sig->info.si_signo = vblwait->request.signal;
-@@ -618,29 +451,20 @@
-
- vblwait->reply.sequence = seq;
- } else {
-- DRM_DEBUG("waiting on vblank count %d, crtc %d\n",
-- vblwait->request.sequence, crtc);
-- DRM_WAIT_ON(ret, dev->vbl_queue[crtc], 3 * DRM_HZ,
-- ((drm_vblank_count(dev, crtc)
-- - vblwait->request.sequence) <= (1 << 23)));
--
-- if (ret != -EINTR) {
-- struct timeval now;
--
-- do_gettimeofday(&now);
--
-- vblwait->reply.tval_sec = now.tv_sec;
-- vblwait->reply.tval_usec = now.tv_usec;
-- vblwait->reply.sequence = drm_vblank_count(dev, crtc);
-- DRM_DEBUG("returning %d to client\n",
-- vblwait->reply.sequence);
-- } else {
-- DRM_DEBUG("vblank wait interrupted by signal\n");
-- }
-+ if (flags & _DRM_VBLANK_SECONDARY) {
-+ if (dev->driver->vblank_wait2)
-+ ret = dev->driver->vblank_wait2(dev, &vblwait->request.sequence);
-+ } else if (dev->driver->vblank_wait)
-+ ret =
-+ dev->driver->vblank_wait(dev,
-+ &vblwait->request.sequence);
-+
-+ do_gettimeofday(&now);
-+ vblwait->reply.tval_sec = now.tv_sec;
-+ vblwait->reply.tval_usec = now.tv_usec;
- }
-
--done:
-- drm_vblank_put(dev, crtc);
-+ done:
- return ret;
- }
-
-@@ -648,57 +472,43 @@
- * Send the VBLANK signals.
- *
- * \param dev DRM device.
-- * \param crtc CRTC where the vblank event occurred
- *
- * Sends a signal for each task in drm_device::vbl_sigs and empties the list.
- *
- * If a signal is not requested, then calls vblank_wait().
- */
--static void drm_vbl_send_signals(struct drm_device *dev, int crtc)
-+void drm_vbl_send_signals(struct drm_device * dev)
- {
-- struct drm_vbl_sig *vbl_sig, *tmp;
-- struct list_head *vbl_sigs;
-- unsigned int vbl_seq;
- unsigned long flags;
-+ int i;
-
- spin_lock_irqsave(&dev->vbl_lock, flags);
-
-- vbl_sigs = &dev->vbl_sigs[crtc];
-- vbl_seq = drm_vblank_count(dev, crtc);
-+ for (i = 0; i < 2; i++) {
-+ struct drm_vbl_sig *vbl_sig, *tmp;
-+ struct list_head *vbl_sigs = i ? &dev->vbl_sigs2 : &dev->vbl_sigs;
-+ unsigned int vbl_seq = atomic_read(i ? &dev->vbl_received2 :
-+ &dev->vbl_received);
-+
-+ list_for_each_entry_safe(vbl_sig, tmp, vbl_sigs, head) {
-+ if ((vbl_seq - vbl_sig->sequence) <= (1 << 23)) {
-+ vbl_sig->info.si_code = vbl_seq;
-+ send_sig_info(vbl_sig->info.si_signo,
-+ &vbl_sig->info, vbl_sig->task);
-+
-+ list_del(&vbl_sig->head);
-
-- list_for_each_entry_safe(vbl_sig, tmp, vbl_sigs, head) {
-- if ((vbl_seq - vbl_sig->sequence) <= (1 << 23)) {
-- vbl_sig->info.si_code = vbl_seq;
-- send_sig_info(vbl_sig->info.si_signo,
-- &vbl_sig->info, vbl_sig->task);
--
-- list_del(&vbl_sig->head);
--
-- drm_free(vbl_sig, sizeof(*vbl_sig),
-- DRM_MEM_DRIVER);
-- atomic_dec(&dev->vbl_signal_pending);
-- drm_vblank_put(dev, crtc);
-- }
-+ drm_free(vbl_sig, sizeof(*vbl_sig),
-+ DRM_MEM_DRIVER);
-+
-+ dev->vbl_pending--;
-+ }
-+ }
- }
-
- spin_unlock_irqrestore(&dev->vbl_lock, flags);
- }
--
--/**
-- * drm_handle_vblank - handle a vblank event
-- * @dev: DRM device
-- * @crtc: where this event occurred
-- *
-- * Drivers should call this routine in their vblank interrupt handlers to
-- * update the vblank counter and send any signals that may be pending.
-- */
--void drm_handle_vblank(struct drm_device *dev, int crtc)
--{
-- atomic_inc(&dev->_vblank_count[crtc]);
-- DRM_WAKEUP(&dev->vbl_queue[crtc]);
-- drm_vbl_send_signals(dev, crtc);
--}
--EXPORT_SYMBOL(drm_handle_vblank);
-+EXPORT_SYMBOL(drm_vbl_send_signals);
-
- /**
- * Tasklet wrapper function.
-Index: linux-2.6.27/drivers/gpu/drm/drm_mm.c
-===================================================================
---- linux-2.6.27.orig/drivers/gpu/drm/drm_mm.c 2009-02-05 13:29:29.000000000 +0000
-+++ linux-2.6.27/drivers/gpu/drm/drm_mm.c 2009-02-05 13:29:33.000000000 +0000
-@@ -38,7 +38,7 @@
- * Aligned allocations can also see improvement.
- *
- * Authors:
-- * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
-+ * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
- */
-
- #include "drmP.h"
-Index: linux-2.6.27/drivers/gpu/drm/drm_modes.c
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.27/drivers/gpu/drm/drm_modes.c 2009-02-05 13:29:33.000000000 +0000
-@@ -0,0 +1,560 @@
-+/*
-+ * Copyright © 1997-2003 by The XFree86 Project, Inc.
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a
-+ * copy of this software and associated documentation files (the "Software"),
-+ * to deal in the Software without restriction, including without limitation
-+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
-+ * and/or sell copies of the Software, and to permit persons to whom the
-+ * Software is furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
-+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
-+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
-+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-+ * OTHER DEALINGS IN THE SOFTWARE.
-+ *
-+ * Except as contained in this notice, the name of the copyright holder(s)
-+ * and author(s) shall not be used in advertising or otherwise to promote
-+ * the sale, use or other dealings in this Software without prior written
-+ * authorization from the copyright holder(s) and author(s).
-+ */
-+/*
-+ * Copyright © 2007 Dave Airlie
-+ */
-+
-+#include <linux/list.h>
-+#include "drmP.h"
-+#include "drm.h"
-+#include "drm_crtc.h"
-+
-+/**
-+ * drm_mode_debug_printmodeline - debug print a mode
-+ * @dev: DRM device
-+ * @mode: mode to print
-+ *
-+ * LOCKING:
-+ * None.
-+ *
-+ * Describe @mode using DRM_DEBUG.
-+ */
-+void drm_mode_debug_printmodeline(struct drm_device *dev,
-+ struct drm_display_mode *mode)
-+{
-+ DRM_DEBUG("Modeline %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x\n",
-+ mode->mode_id, mode->name, mode->vrefresh, mode->clock,
-+ mode->hdisplay, mode->hsync_start,
-+ mode->hsync_end, mode->htotal,
-+ mode->vdisplay, mode->vsync_start,
-+ mode->vsync_end, mode->vtotal, mode->type);
-+}
-+EXPORT_SYMBOL(drm_mode_debug_printmodeline);
-+
-+/**
-+ * drm_mode_set_name - set the name on a mode
-+ * @mode: name will be set in this mode
-+ *
-+ * LOCKING:
-+ * None.
-+ *
-+ * Set the name of @mode to a standard format.
-+ */
-+void drm_mode_set_name(struct drm_display_mode *mode)
-+{
-+ snprintf(mode->name, DRM_DISPLAY_MODE_LEN, "%dx%d", mode->hdisplay,
-+ mode->vdisplay);
-+}
-+EXPORT_SYMBOL(drm_mode_set_name);
-+
-+/**
-+ * drm_mode_list_concat - move modes from one list to another
-+ * @head: source list
-+ * @new: dst list
-+ *
-+ * LOCKING:
-+ * Caller must ensure both lists are locked.
-+ *
-+ * Move all the modes from @head to @new.
-+ */
-+void drm_mode_list_concat(struct list_head *head, struct list_head *new)
-+{
-+
-+ struct list_head *entry, *tmp;
-+
-+ list_for_each_safe(entry, tmp, head) {
-+ list_move_tail(entry, new);
-+ }
-+}
-+
-+/**
-+ * drm_mode_width - get the width of a mode
-+ * @mode: mode
-+ *
-+ * LOCKING:
-+ * None.
-+ *
-+ * Return @mode's width (hdisplay) value.
-+ *
-+ * FIXME: is this needed?
-+ *
-+ * RETURNS:
-+ * @mode->hdisplay
-+ */
-+int drm_mode_width(struct drm_display_mode *mode)
-+{
-+ return mode->hdisplay;
-+
-+}
-+EXPORT_SYMBOL(drm_mode_width);
-+
-+/**
-+ * drm_mode_height - get the height of a mode
-+ * @mode: mode
-+ *
-+ * LOCKING:
-+ * None.
-+ *
-+ * Return @mode's height (vdisplay) value.
-+ *
-+ * FIXME: is this needed?
-+ *
-+ * RETURNS:
-+ * @mode->vdisplay
-+ */
-+int drm_mode_height(struct drm_display_mode *mode)
-+{
-+ return mode->vdisplay;
-+}
-+EXPORT_SYMBOL(drm_mode_height);
-+
-+/**
-+ * drm_mode_vrefresh - get the vrefresh of a mode
-+ * @mode: mode
-+ *
-+ * LOCKING:
-+ * None.
-+ *
-+ * Return @mode's vrefresh rate or calculate it if necessary.
-+ *
-+ * FIXME: why is this needed? shouldn't vrefresh be set already?
-+ *
-+ * RETURNS:
-+ * Vertical refresh rate of @mode x 1000. For precision reasons.
-+ */
-+int drm_mode_vrefresh(struct drm_display_mode *mode)
-+{
-+ int refresh = 0;
-+ unsigned int calc_val;
-+
-+ if (mode->vrefresh > 0)
-+ refresh = mode->vrefresh;
-+ else if (mode->htotal > 0 && mode->vtotal > 0) {
-+ /* work out vrefresh the value will be x1000 */
-+ calc_val = (mode->clock * 1000);
-+
-+ calc_val /= mode->htotal;
-+ calc_val *= 1000;
-+ calc_val /= mode->vtotal;
-+
-+ refresh = calc_val;
-+ if (mode->flags & V_INTERLACE)
-+ refresh *= 2;
-+ if (mode->flags & V_DBLSCAN)
-+ refresh /= 2;
-+ if (mode->vscan > 1)
-+ refresh /= mode->vscan;
-+ }
-+ return refresh;
-+}
-+EXPORT_SYMBOL(drm_mode_vrefresh);
-+
-+/**
-+ * drm_mode_set_crtcinfo - set CRTC modesetting parameters
-+ * @p: mode
-+ * @adjust_flags: unused? (FIXME)
-+ *
-+ * LOCKING:
-+ * None.
-+ *
-+ * Setup the CRTC modesetting parameters for @p, adjusting if necessary.
-+ */
-+void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags)
-+{
-+ if ((p == NULL) || ((p->type & DRM_MODE_TYPE_CRTC_C) == DRM_MODE_TYPE_BUILTIN))
-+ return;
-+
-+ p->crtc_hdisplay = p->hdisplay;
-+ p->crtc_hsync_start = p->hsync_start;
-+ p->crtc_hsync_end = p->hsync_end;
-+ p->crtc_htotal = p->htotal;
-+ p->crtc_hskew = p->hskew;
-+ p->crtc_vdisplay = p->vdisplay;
-+ p->crtc_vsync_start = p->vsync_start;
-+ p->crtc_vsync_end = p->vsync_end;
-+ p->crtc_vtotal = p->vtotal;
-+
-+ if (p->flags & V_INTERLACE) {
-+ if (adjust_flags & CRTC_INTERLACE_HALVE_V) {
-+ p->crtc_vdisplay /= 2;
-+ p->crtc_vsync_start /= 2;
-+ p->crtc_vsync_end /= 2;
-+ p->crtc_vtotal /= 2;
-+ }
-+
-+ p->crtc_vtotal |= 1;
-+ }
-+
-+ if (p->flags & V_DBLSCAN) {
-+ p->crtc_vdisplay *= 2;
-+ p->crtc_vsync_start *= 2;
-+ p->crtc_vsync_end *= 2;
-+ p->crtc_vtotal *= 2;
-+ }
-+
-+ if (p->vscan > 1) {
-+ p->crtc_vdisplay *= p->vscan;
-+ p->crtc_vsync_start *= p->vscan;
-+ p->crtc_vsync_end *= p->vscan;
-+ p->crtc_vtotal *= p->vscan;
-+ }
-+
-+ p->crtc_vblank_start = min(p->crtc_vsync_start, p->crtc_vdisplay);
-+ p->crtc_vblank_end = max(p->crtc_vsync_end, p->crtc_vtotal);
-+ p->crtc_hblank_start = min(p->crtc_hsync_start, p->crtc_hdisplay);
-+ p->crtc_hblank_end = max(p->crtc_hsync_end, p->crtc_htotal);
-+
-+ p->crtc_hadjusted = false;
-+ p->crtc_vadjusted = false;
-+}
-+EXPORT_SYMBOL(drm_mode_set_crtcinfo);
-+
-+
-+/**
-+ * drm_mode_duplicate - allocate and duplicate an existing mode
-+ * @m: mode to duplicate
-+ *
-+ * LOCKING:
-+ * None.
-+ *
-+ * Just allocate a new mode, copy the existing mode into it, and return
-+ * a pointer to it. Used to create new instances of established modes.
-+ */
-+struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev,
-+ struct drm_display_mode *mode)
-+{
-+ struct drm_display_mode *nmode;
-+ int new_id;
-+
-+ nmode = drm_mode_create(dev);
-+ if (!nmode)
-+ return NULL;
-+
-+ new_id = nmode->mode_id;
-+ *nmode = *mode;
-+ nmode->mode_id = new_id;
-+ INIT_LIST_HEAD(&nmode->head);
-+ return nmode;
-+}
-+EXPORT_SYMBOL(drm_mode_duplicate);
-+
-+/**
-+ * drm_mode_equal - test modes for equality
-+ * @mode1: first mode
-+ * @mode2: second mode
-+ *
-+ * LOCKING:
-+ * None.
-+ *
-+ * Check to see if @mode1 and @mode2 are equivalent.
-+ *
-+ * RETURNS:
-+ * True if the modes are equal, false otherwise.
-+ */
-+bool drm_mode_equal(struct drm_display_mode *mode1, struct drm_display_mode *mode2)
-+{
-+ if (mode1->clock == mode2->clock &&
-+ mode1->hdisplay == mode2->hdisplay &&
-+ mode1->hsync_start == mode2->hsync_start &&
-+ mode1->hsync_end == mode2->hsync_end &&
-+ mode1->htotal == mode2->htotal &&
-+ mode1->hskew == mode2->hskew &&
-+ mode1->vdisplay == mode2->vdisplay &&
-+ mode1->vsync_start == mode2->vsync_start &&
-+ mode1->vsync_end == mode2->vsync_end &&
-+ mode1->vtotal == mode2->vtotal &&
-+ mode1->vscan == mode2->vscan &&
-+ mode1->flags == mode2->flags)
-+ return true;
-+
-+ return false;
-+}
-+EXPORT_SYMBOL(drm_mode_equal);
-+
-+/**
-+ * drm_mode_validate_size - make sure modes adhere to size constraints
-+ * @dev: DRM device
-+ * @mode_list: list of modes to check
-+ * @maxX: maximum width
-+ * @maxY: maximum height
-+ * @maxPitch: max pitch
-+ *
-+ * LOCKING:
-+ * Caller must hold a lock protecting @mode_list.
-+ *
-+ * The DRM device (@dev) has size and pitch limits. Here we validate the
-+ * modes we probed for @dev against those limits and set their status as
-+ * necessary.
-+ */
-+void drm_mode_validate_size(struct drm_device *dev,
-+ struct list_head *mode_list,
-+ int maxX, int maxY, int maxPitch)
-+{
-+ struct drm_display_mode *mode;
-+
-+ list_for_each_entry(mode, mode_list, head) {
-+ if (maxPitch > 0 && mode->hdisplay > maxPitch)
-+ mode->status = MODE_BAD_WIDTH;
-+
-+ if (maxX > 0 && mode->hdisplay > maxX)
-+ mode->status = MODE_VIRTUAL_X;
-+
-+ if (maxY > 0 && mode->vdisplay > maxY)
-+ mode->status = MODE_VIRTUAL_Y;
-+ }
-+}
-+EXPORT_SYMBOL(drm_mode_validate_size);
-+
-+/**
-+ * drm_mode_validate_clocks - validate modes against clock limits
-+ * @dev: DRM device
-+ * @mode_list: list of modes to check
-+ * @min: minimum clock rate array
-+ * @max: maximum clock rate array
-+ * @n_ranges: number of clock ranges (size of arrays)
-+ *
-+ * LOCKING:
-+ * Caller must hold a lock protecting @mode_list.
-+ *
-+ * Some code may need to check a mode list against the clock limits of the
-+ * device in question. This function walks the mode list, testing to make
-+ * sure each mode falls within a given range (defined by @min and @max
-+ * arrays) and sets @mode->status as needed.
-+ */
-+void drm_mode_validate_clocks(struct drm_device *dev,
-+ struct list_head *mode_list,
-+ int *min, int *max, int n_ranges)
-+{
-+ struct drm_display_mode *mode;
-+ int i;
-+
-+ list_for_each_entry(mode, mode_list, head) {
-+ bool good = false;
-+ for (i = 0; i < n_ranges; i++) {
-+ if (mode->clock >= min[i] && mode->clock <= max[i]) {
-+ good = true;
-+ break;
-+ }
-+ }
-+ if (!good)
-+ mode->status = MODE_CLOCK_RANGE;
-+ }
-+}
-+EXPORT_SYMBOL(drm_mode_validate_clocks);
-+
-+/**
-+ * drm_mode_prune_invalid - remove invalid modes from mode list
-+ * @dev: DRM device
-+ * @mode_list: list of modes to check
-+ * @verbose: be verbose about it
-+ *
-+ * LOCKING:
-+ * Caller must hold a lock protecting @mode_list.
-+ *
-+ * Once mode list generation is complete, a caller can use this routine to
-+ * remove invalid modes from a mode list. If any of the modes have a
-+ * status other than %MODE_OK, they are removed from @mode_list and freed.
-+ */
-+void drm_mode_prune_invalid(struct drm_device *dev,
-+ struct list_head *mode_list, bool verbose)
-+{
-+ struct drm_display_mode *mode, *t;
-+
-+ list_for_each_entry_safe(mode, t, mode_list, head) {
-+ if (mode->status != MODE_OK) {
-+ list_del(&mode->head);
-+ if (verbose) {
-+ drm_mode_debug_printmodeline(dev, mode);
-+ DRM_DEBUG("Not using %s mode %d\n", mode->name, mode->status);
-+ }
-+ kfree(mode);
-+ }
-+ }
-+}
-+
-+/**
-+ * drm_mode_compare - compare modes for favorability
-+ * @lh_a: list_head for first mode
-+ * @lh_b: list_head for second mode
-+ *
-+ * LOCKING:
-+ * None.
-+ *
-+ * Compare two modes, given by @lh_a and @lh_b, returning a value indicating
-+ * which is better.
-+ *
-+ * RETURNS:
-+ * Negative if @lh_a is better than @lh_b, zero if they're equivalent, or
-+ * positive if @lh_b is better than @lh_a.
-+ */
-+static int drm_mode_compare(struct list_head *lh_a, struct list_head *lh_b)
-+{
-+ struct drm_display_mode *a = list_entry(lh_a, struct drm_display_mode, head);
-+ struct drm_display_mode *b = list_entry(lh_b, struct drm_display_mode, head);
-+ int diff;
-+
-+ diff = ((b->type & DRM_MODE_TYPE_PREFERRED) != 0) -
-+ ((a->type & DRM_MODE_TYPE_PREFERRED) != 0);
-+ if (diff)
-+ return diff;
-+ diff = b->hdisplay * b->vdisplay - a->hdisplay * a->vdisplay;
-+ if (diff)
-+ return diff;
-+ diff = b->clock - a->clock;
-+ return diff;
-+}
-+
-+/* FIXME: what we don't have a list sort function? */
-+/* list sort from Mark J Roberts (mjr@znex.org) */
-+void list_sort(struct list_head *head, int (*cmp)(struct list_head *a, struct list_head *b))
-+{
-+ struct list_head *p, *q, *e, *list, *tail, *oldhead;
-+ int insize, nmerges, psize, qsize, i;
-+
-+ list = head->next;
-+ list_del(head);
-+ insize = 1;
-+ for (;;) {
-+ p = oldhead = list;
-+ list = tail = NULL;
-+ nmerges = 0;
-+
-+ while (p) {
-+ nmerges++;
-+ q = p;
-+ psize = 0;
-+ for (i = 0; i < insize; i++) {
-+ psize++;
-+ q = q->next == oldhead ? NULL : q->next;
-+ if (!q)
-+ break;
-+ }
-+
-+ qsize = insize;
-+ while (psize > 0 || (qsize > 0 && q)) {
-+ if (!psize) {
-+ e = q;
-+ q = q->next;
-+ qsize--;
-+ if (q == oldhead)
-+ q = NULL;
-+ } else if (!qsize || !q) {
-+ e = p;
-+ p = p->next;
-+ psize--;
-+ if (p == oldhead)
-+ p = NULL;
-+ } else if (cmp(p, q) <= 0) {
-+ e = p;
-+ p = p->next;
-+ psize--;
-+ if (p == oldhead)
-+ p = NULL;
-+ } else {
-+ e = q;
-+ q = q->next;
-+ qsize--;
-+ if (q == oldhead)
-+ q = NULL;
-+ }
-+ if (tail)
-+ tail->next = e;
-+ else
-+ list = e;
-+ e->prev = tail;
-+ tail = e;
-+ }
-+ p = q;
-+ }
-+
-+ tail->next = list;
-+ list->prev = tail;
-+
-+ if (nmerges <= 1)
-+ break;
-+
-+ insize *= 2;
-+ }
-+
-+ head->next = list;
-+ head->prev = list->prev;
-+ list->prev->next = head;
-+ list->prev = head;
-+}
-+
-+/**
-+ * drm_mode_sort - sort mode list
-+ * @mode_list: list to sort
-+ *
-+ * LOCKING:
-+ * Caller must hold a lock protecting @mode_list.
-+ *
-+ * Sort @mode_list by favorability, putting good modes first.
-+ */
-+void drm_mode_sort(struct list_head *mode_list)
-+{
-+ list_sort(mode_list, drm_mode_compare);
-+}
-+
-+
-+/**
-+ * drm_mode_output_list_update - update the mode list for the output
-+ * @output: the output to update
-+ *
-+ * LOCKING:
-+ * Caller must hold a lock protecting @mode_list.
-+ *
-+ * This moves the modes from the @output probed_modes list
-+ * to the actual mode list. It compares the probed mode against the current
-+ * list and only adds different modes. All modes unverified after this point
-+ * will be removed by the prune invalid modes.
-+ */
-+void drm_mode_output_list_update(struct drm_output *output)
-+{
-+ struct drm_display_mode *mode;
-+ struct drm_display_mode *pmode, *pt;
-+ int found_it;
-+ list_for_each_entry_safe(pmode, pt, &output->probed_modes,
-+ head) {
-+ found_it = 0;
-+ /* go through current modes checking for the new probed mode */
-+ list_for_each_entry(mode, &output->modes, head) {
-+ if (drm_mode_equal(pmode, mode)) {
-+ found_it = 1;
-+ /* if equal delete the probed mode */
-+ mode->status = pmode->status;
-+ list_del(&pmode->head);
-+ kfree(pmode);
-+ break;
-+ }
-+ }
-+
-+ if (!found_it) {
-+ list_move_tail(&pmode->head, &output->modes);
-+ }
-+ }
-+}
-Index: linux-2.6.27/drivers/gpu/drm/drm_object.c
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.27/drivers/gpu/drm/drm_object.c 2009-02-05 13:29:33.000000000 +0000
-@@ -0,0 +1,294 @@
-+/**************************************************************************
-+ *
-+ * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
-+ * All Rights Reserved.
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a
-+ * copy of this software and associated documentation files (the
-+ * "Software"), to deal in the Software without restriction, including
-+ * without limitation the rights to use, copy, modify, merge, publish,
-+ * distribute, sub license, and/or sell copies of the Software, and to
-+ * permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice (including the
-+ * next paragraph) shall be included in all copies or substantial portions
-+ * of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
-+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
-+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
-+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
-+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
-+ *
-+ **************************************************************************/
-+/*
-+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
-+ */
-+
-+#include "drmP.h"
-+
-+int drm_add_user_object(struct drm_file *priv, struct drm_user_object *item,
-+ int shareable)
-+{
-+ struct drm_device *dev = priv->minor->dev;
-+ int ret;
-+
-+ DRM_ASSERT_LOCKED(&dev->struct_mutex);
-+
-+ /* The refcount will be bumped to 1 when we add the ref object below. */
-+ atomic_set(&item->refcount, 0);
-+ item->shareable = shareable;
-+ item->owner = priv;
-+
-+ ret = drm_ht_just_insert_please(&dev->object_hash, &item->hash,
-+ (unsigned long)item, 32, 0, 0);
-+ if (ret)
-+ return ret;
-+
-+ ret = drm_add_ref_object(priv, item, _DRM_REF_USE);
-+ if (ret)
-+ ret = drm_ht_remove_item(&dev->object_hash, &item->hash);
-+
-+ return ret;
-+}
-+EXPORT_SYMBOL(drm_add_user_object);
-+
-+struct drm_user_object *drm_lookup_user_object(struct drm_file *priv, uint32_t key)
-+{
-+ struct drm_device *dev = priv->minor->dev;
-+ struct drm_hash_item *hash;
-+ int ret;
-+ struct drm_user_object *item;
-+
-+ DRM_ASSERT_LOCKED(&dev->struct_mutex);
-+
-+ ret = drm_ht_find_item(&dev->object_hash, key, &hash);
-+ if (ret)
-+ return NULL;
-+
-+ item = drm_hash_entry(hash, struct drm_user_object, hash);
-+
-+ if (priv != item->owner) {
-+ struct drm_open_hash *ht = &priv->refd_object_hash[_DRM_REF_USE];
-+ ret = drm_ht_find_item(ht, (unsigned long)item, &hash);
-+ if (ret) {
-+ DRM_ERROR("Object not registered for usage\n");
-+ return NULL;
-+ }
-+ }
-+ return item;
-+}
-+EXPORT_SYMBOL(drm_lookup_user_object);
-+
-+static void drm_deref_user_object(struct drm_file *priv, struct drm_user_object *item)
-+{
-+ struct drm_device *dev = priv->minor->dev;
-+ int ret;
-+
-+ if (atomic_dec_and_test(&item->refcount)) {
-+ ret = drm_ht_remove_item(&dev->object_hash, &item->hash);
-+ BUG_ON(ret);
-+ item->remove(priv, item);
-+ }
-+}
-+
-+static int drm_object_ref_action(struct drm_file *priv, struct drm_user_object *ro,
-+ enum drm_ref_type action)
-+{
-+ int ret = 0;
-+
-+ switch (action) {
-+ case _DRM_REF_USE:
-+ atomic_inc(&ro->refcount);
-+ break;
-+ default:
-+ if (!ro->ref_struct_locked) {
-+ break;
-+ } else {
-+ ro->ref_struct_locked(priv, ro, action);
-+ }
-+ }
-+ return ret;
-+}
-+
-+int drm_add_ref_object(struct drm_file *priv, struct drm_user_object *referenced_object,
-+ enum drm_ref_type ref_action)
-+{
-+ int ret = 0;
-+ struct drm_ref_object *item;
-+ struct drm_open_hash *ht = &priv->refd_object_hash[ref_action];
-+
-+ DRM_ASSERT_LOCKED(&priv->minor->dev->struct_mutex);
-+ if (!referenced_object->shareable && priv != referenced_object->owner) {
-+ DRM_ERROR("Not allowed to reference this object\n");
-+ return -EINVAL;
-+ }
-+
-+ /*
-+ * If this is not a usage reference, Check that usage has been registered
-+ * first. Otherwise strange things may happen on destruction.
-+ */
-+
-+ if ((ref_action != _DRM_REF_USE) && priv != referenced_object->owner) {
-+ item =
-+ drm_lookup_ref_object(priv, referenced_object,
-+ _DRM_REF_USE);
-+ if (!item) {
-+ DRM_ERROR
-+ ("Object not registered for usage by this client\n");
-+ return -EINVAL;
-+ }
-+ }
-+
-+ if (NULL !=
-+ (item =
-+ drm_lookup_ref_object(priv, referenced_object, ref_action))) {
-+ atomic_inc(&item->refcount);
-+ return drm_object_ref_action(priv, referenced_object,
-+ ref_action);
-+ }
-+
-+ item = drm_calloc(1, sizeof(*item), DRM_MEM_OBJECTS);
-+ if (item == NULL) {
-+ DRM_ERROR("Could not allocate reference object\n");
-+ return -ENOMEM;
-+ }
-+
-+ atomic_set(&item->refcount, 1);
-+ item->hash.key = (unsigned long)referenced_object;
-+ ret = drm_ht_insert_item(ht, &item->hash);
-+ item->unref_action = ref_action;
-+
-+ if (ret)
-+ goto out;
-+
-+ list_add(&item->list, &priv->refd_objects);
-+ ret = drm_object_ref_action(priv, referenced_object, ref_action);
-+out:
-+ return ret;
-+}
-+
-+struct drm_ref_object *drm_lookup_ref_object(struct drm_file *priv,
-+ struct drm_user_object *referenced_object,
-+ enum drm_ref_type ref_action)
-+{
-+ struct drm_hash_item *hash;
-+ int ret;
-+
-+ DRM_ASSERT_LOCKED(&priv->minor->dev->struct_mutex);
-+ ret = drm_ht_find_item(&priv->refd_object_hash[ref_action],
-+ (unsigned long)referenced_object, &hash);
-+ if (ret)
-+ return NULL;
-+
-+ return drm_hash_entry(hash, struct drm_ref_object, hash);
-+}
-+EXPORT_SYMBOL(drm_lookup_ref_object);
-+
-+static void drm_remove_other_references(struct drm_file *priv,
-+ struct drm_user_object *ro)
-+{
-+ int i;
-+ struct drm_open_hash *ht;
-+ struct drm_hash_item *hash;
-+ struct drm_ref_object *item;
-+
-+ for (i = _DRM_REF_USE + 1; i < _DRM_NO_REF_TYPES; ++i) {
-+ ht = &priv->refd_object_hash[i];
-+ while (!drm_ht_find_item(ht, (unsigned long)ro, &hash)) {
-+ item = drm_hash_entry(hash, struct drm_ref_object, hash);
-+ drm_remove_ref_object(priv, item);
-+ }
-+ }
-+}
-+
-+void drm_remove_ref_object(struct drm_file *priv, struct drm_ref_object *item)
-+{
-+ int ret;
-+ struct drm_user_object *user_object = (struct drm_user_object *) item->hash.key;
-+ struct drm_open_hash *ht = &priv->refd_object_hash[item->unref_action];
-+ enum drm_ref_type unref_action;
-+
-+ DRM_ASSERT_LOCKED(&priv->minor->dev->struct_mutex);
-+ unref_action = item->unref_action;
-+ if (atomic_dec_and_test(&item->refcount)) {
-+ ret = drm_ht_remove_item(ht, &item->hash);
-+ BUG_ON(ret);
-+ list_del_init(&item->list);
-+ if (unref_action == _DRM_REF_USE)
-+ drm_remove_other_references(priv, user_object);
-+ drm_free(item, sizeof(*item), DRM_MEM_OBJECTS);
-+ }
-+
-+ switch (unref_action) {
-+ case _DRM_REF_USE:
-+ drm_deref_user_object(priv, user_object);
-+ break;
-+ default:
-+ BUG_ON(!user_object->unref);
-+ user_object->unref(priv, user_object, unref_action);
-+ break;
-+ }
-+
-+}
-+EXPORT_SYMBOL(drm_remove_ref_object);
-+
-+int drm_user_object_ref(struct drm_file *priv, uint32_t user_token,
-+ enum drm_object_type type, struct drm_user_object **object)
-+{
-+ struct drm_device *dev = priv->minor->dev;
-+ struct drm_user_object *uo;
-+ struct drm_hash_item *hash;
-+ int ret;
-+
-+ mutex_lock(&dev->struct_mutex);
-+ ret = drm_ht_find_item(&dev->object_hash, user_token, &hash);
-+ if (ret) {
-+ DRM_ERROR("Could not find user object to reference.\n");
-+ goto out_err;
-+ }
-+ uo = drm_hash_entry(hash, struct drm_user_object, hash);
-+ if (uo->type != type) {
-+ ret = -EINVAL;
-+ goto out_err;
-+ }
-+ ret = drm_add_ref_object(priv, uo, _DRM_REF_USE);
-+ if (ret)
-+ goto out_err;
-+ mutex_unlock(&dev->struct_mutex);
-+ *object = uo;
-+ return 0;
-+out_err:
-+ mutex_unlock(&dev->struct_mutex);
-+ return ret;
-+}
-+
-+int drm_user_object_unref(struct drm_file *priv, uint32_t user_token,
-+ enum drm_object_type type)
-+{
-+ struct drm_device *dev = priv->minor->dev;
-+ struct drm_user_object *uo;
-+ struct drm_ref_object *ro;
-+ int ret;
-+
-+ mutex_lock(&dev->struct_mutex);
-+ uo = drm_lookup_user_object(priv, user_token);
-+ if (!uo || (uo->type != type)) {
-+ ret = -EINVAL;
-+ goto out_err;
-+ }
-+ ro = drm_lookup_ref_object(priv, uo, _DRM_REF_USE);
-+ if (!ro) {
-+ ret = -EINVAL;
-+ goto out_err;
-+ }
-+ drm_remove_ref_object(priv, ro);
-+ mutex_unlock(&dev->struct_mutex);
-+ return 0;
-+out_err:
-+ mutex_unlock(&dev->struct_mutex);
-+ return ret;
-+}
-Index: linux-2.6.27/drivers/gpu/drm/drm_regman.c
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.27/drivers/gpu/drm/drm_regman.c 2009-02-05 13:29:33.000000000 +0000
-@@ -0,0 +1,200 @@
-+/**************************************************************************
-+ * Copyright (c) 2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
-+ * All Rights Reserved.
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a
-+ * copy of this software and associated documentation files (the
-+ * "Software"), to deal in the Software without restriction, including
-+ * without limitation the rights to use, copy, modify, merge, publish,
-+ * distribute, sub license, and/or sell copies of the Software, and to
-+ * permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice (including the
-+ * next paragraph) shall be included in all copies or substantial portions
-+ * of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
-+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
-+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
-+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
-+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
-+ *
-+ **************************************************************************/
-+/*
-+ * An allocate-fence manager implementation intended for sets of base-registers
-+ * or tiling-registers.
-+ */
-+
-+#include "drmP.h"
-+
-+/*
-+ * Allocate a compatible register and put it on the unfenced list.
-+ */
-+
-+int drm_regs_alloc(struct drm_reg_manager *manager,
-+ const void *data,
-+ uint32_t fence_class,
-+ uint32_t fence_type,
-+ int interruptible, int no_wait, struct drm_reg **reg)
-+{
-+ struct drm_reg *entry, *next_entry;
-+ int ret;
-+
-+ *reg = NULL;
-+
-+ /*
-+ * Search the unfenced list.
-+ */
-+
-+ list_for_each_entry(entry, &manager->unfenced, head) {
-+ if (manager->reg_reusable(entry, data)) {
-+ entry->new_fence_type |= fence_type;
-+ goto out;
-+ }
-+ }
-+
-+ /*
-+ * Search the lru list.
-+ */
-+
-+ list_for_each_entry_safe(entry, next_entry, &manager->lru, head) {
-+ struct drm_fence_object *fence = entry->fence;
-+ if (fence->fence_class == fence_class &&
-+ (entry->fence_type & fence_type) == entry->fence_type &&
-+ manager->reg_reusable(entry, data)) {
-+ list_del(&entry->head);
-+ entry->new_fence_type = fence_type;
-+ list_add_tail(&entry->head, &manager->unfenced);
-+ goto out;
-+ }
-+ }
-+
-+ /*
-+ * Search the free list.
-+ */
-+
-+ list_for_each_entry(entry, &manager->free, head) {
-+ list_del(&entry->head);
-+ entry->new_fence_type = fence_type;
-+ list_add_tail(&entry->head, &manager->unfenced);
-+ goto out;
-+ }
-+
-+ if (no_wait)
-+ return -EBUSY;
-+
-+ /*
-+ * Go back to the lru list and try to expire fences.
-+ */
-+
-+ list_for_each_entry_safe(entry, next_entry, &manager->lru, head) {
-+ BUG_ON(!entry->fence);
-+ ret = drm_fence_object_wait(entry->fence, 0, !interruptible,
-+ entry->fence_type);
-+ if (ret)
-+ return ret;
-+
-+ drm_fence_usage_deref_unlocked(&entry->fence);
-+ list_del(&entry->head);
-+ entry->new_fence_type = fence_type;
-+ list_add_tail(&entry->head, &manager->unfenced);
-+ goto out;
-+ }
-+
-+ /*
-+ * Oops. All registers are used up :(.
-+ */
-+
-+ return -EBUSY;
-+out:
-+ *reg = entry;
-+ return 0;
-+}
-+EXPORT_SYMBOL(drm_regs_alloc);
-+
-+void drm_regs_fence(struct drm_reg_manager *manager,
-+ struct drm_fence_object *fence)
-+{
-+ struct drm_reg *entry;
-+ struct drm_reg *next_entry;
-+
-+ if (!fence) {
-+
-+ /*
-+ * Old fence (if any) is still valid.
-+ * Put back on free and lru lists.
-+ */
-+
-+ list_for_each_entry_safe_reverse(entry, next_entry,
-+ &manager->unfenced, head) {
-+ list_del(&entry->head);
-+ list_add(&entry->head, (entry->fence) ?
-+ &manager->lru : &manager->free);
-+ }
-+ } else {
-+
-+ /*
-+ * Fence with a new fence and put on lru list.
-+ */
-+
-+ list_for_each_entry_safe(entry, next_entry, &manager->unfenced,
-+ head) {
-+ list_del(&entry->head);
-+ if (entry->fence)
-+ drm_fence_usage_deref_unlocked(&entry->fence);
-+ drm_fence_reference_unlocked(&entry->fence, fence);
-+
-+ entry->fence_type = entry->new_fence_type;
-+ BUG_ON((entry->fence_type & fence->type) !=
-+ entry->fence_type);
-+
-+ list_add_tail(&entry->head, &manager->lru);
-+ }
-+ }
-+}
-+EXPORT_SYMBOL(drm_regs_fence);
-+
-+void drm_regs_free(struct drm_reg_manager *manager)
-+{
-+ struct drm_reg *entry;
-+ struct drm_reg *next_entry;
-+
-+ drm_regs_fence(manager, NULL);
-+
-+ list_for_each_entry_safe(entry, next_entry, &manager->free, head) {
-+ list_del(&entry->head);
-+ manager->reg_destroy(entry);
-+ }
-+
-+ list_for_each_entry_safe(entry, next_entry, &manager->lru, head) {
-+
-+ (void)drm_fence_object_wait(entry->fence, 1, 1,
-+ entry->fence_type);
-+ list_del(&entry->head);
-+ drm_fence_usage_deref_unlocked(&entry->fence);
-+ manager->reg_destroy(entry);
-+ }
-+}
-+EXPORT_SYMBOL(drm_regs_free);
-+
-+void drm_regs_add(struct drm_reg_manager *manager, struct drm_reg *reg)
-+{
-+ reg->fence = NULL;
-+ list_add_tail(&reg->head, &manager->free);
-+}
-+EXPORT_SYMBOL(drm_regs_add);
-+
-+void drm_regs_init(struct drm_reg_manager *manager,
-+ int (*reg_reusable) (const struct drm_reg *, const void *),
-+ void (*reg_destroy) (struct drm_reg *))
-+{
-+ INIT_LIST_HEAD(&manager->free);
-+ INIT_LIST_HEAD(&manager->lru);
-+ INIT_LIST_HEAD(&manager->unfenced);
-+ manager->reg_reusable = reg_reusable;
-+ manager->reg_destroy = reg_destroy;
-+}
-+EXPORT_SYMBOL(drm_regs_init);
-Index: linux-2.6.27/drivers/gpu/drm/drm_sman.c
-===================================================================
---- linux-2.6.27.orig/drivers/gpu/drm/drm_sman.c 2008-10-09 23:13:53.000000000 +0100
-+++ linux-2.6.27/drivers/gpu/drm/drm_sman.c 2009-02-05 13:29:33.000000000 +0000
-@@ -33,7 +33,7 @@
- * struct or a context identifier.
- *
- * Authors:
-- * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
-+ * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
- */
-
- #include "drm_sman.h"
-Index: linux-2.6.27/drivers/gpu/drm/drm_stub.c
-===================================================================
---- linux-2.6.27.orig/drivers/gpu/drm/drm_stub.c 2009-02-05 13:29:29.000000000 +0000
-+++ linux-2.6.27/drivers/gpu/drm/drm_stub.c 2009-02-05 13:29:33.000000000 +0000
-@@ -97,6 +97,7 @@
- init_timer(&dev->timer);
- mutex_init(&dev->struct_mutex);
- mutex_init(&dev->ctxlist_mutex);
-+ mutex_init(&dev->bm.evict_mutex);
-
- idr_init(&dev->drw_idr);
-
-@@ -113,6 +114,18 @@
- return -ENOMEM;
- }
-
-+ if (drm_mm_init(&dev->offset_manager, DRM_FILE_PAGE_OFFSET_START,
-+ DRM_FILE_PAGE_OFFSET_SIZE)) {
-+ drm_ht_remove(&dev->map_hash);
-+ return -ENOMEM;
-+ }
-+
-+ if (drm_ht_create(&dev->object_hash, DRM_OBJECT_HASH_ORDER)) {
-+ drm_ht_remove(&dev->map_hash);
-+ drm_mm_takedown(&dev->offset_manager);
-+ return -ENOMEM;
-+ }
-+
- /* the DRM has 6 basic counters */
- dev->counters = 6;
- dev->types[0] = _DRM_STAT_LOCK;
-@@ -152,15 +165,7 @@
- goto error_out_unreg;
- }
-
-- if (driver->driver_features & DRIVER_GEM) {
-- retcode = drm_gem_init(dev);
-- if (retcode) {
-- DRM_ERROR("Cannot initialize graphics execution "
-- "manager (GEM)\n");
-- goto error_out_unreg;
-- }
-- }
--
-+ drm_fence_manager_init(dev);
- return 0;
-
- error_out_unreg:
-@@ -284,6 +289,8 @@
- drm_free(dev, sizeof(*dev), DRM_MEM_STUB);
- return ret;
- }
-+EXPORT_SYMBOL(drm_get_dev);
-+
-
- /**
- * Put a device minor number.
-Index: linux-2.6.27/drivers/gpu/drm/drm_ttm.c
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.27/drivers/gpu/drm/drm_ttm.c 2009-02-05 13:29:33.000000000 +0000
-@@ -0,0 +1,430 @@
-+/**************************************************************************
-+ *
-+ * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
-+ * All Rights Reserved.
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a
-+ * copy of this software and associated documentation files (the
-+ * "Software"), to deal in the Software without restriction, including
-+ * without limitation the rights to use, copy, modify, merge, publish,
-+ * distribute, sub license, and/or sell copies of the Software, and to
-+ * permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice (including the
-+ * next paragraph) shall be included in all copies or substantial portions
-+ * of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
-+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
-+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
-+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
-+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
-+ *
-+ **************************************************************************/
-+/*
-+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
-+ */
-+
-+#include "drmP.h"
-+#include <asm/agp.h>
-+
-+static void drm_ttm_ipi_handler(void *null)
-+{
-+ flush_agp_cache();
-+}
-+
-+void drm_ttm_cache_flush(void)
-+{
-+ if (on_each_cpu(drm_ttm_ipi_handler, NULL, 1) != 0)
-+ DRM_ERROR("Timed out waiting for drm cache flush.\n");
-+}
-+EXPORT_SYMBOL(drm_ttm_cache_flush);
-+
-+/*
-+ * Use kmalloc if possible. Otherwise fall back to vmalloc.
-+ */
-+
-+static void ttm_alloc_pages(struct drm_ttm *ttm)
-+{
-+ unsigned long size = ttm->num_pages * sizeof(*ttm->pages);
-+ ttm->pages = NULL;
-+
-+ if (size <= PAGE_SIZE)
-+ ttm->pages = drm_calloc(1, size, DRM_MEM_TTM);
-+
-+ if (!ttm->pages) {
-+ ttm->pages = vmalloc_user(size);
-+ if (ttm->pages)
-+ ttm->page_flags |= DRM_TTM_PAGE_VMALLOC;
-+ }
-+}
-+
-+static void ttm_free_pages(struct drm_ttm *ttm)
-+{
-+ unsigned long size = ttm->num_pages * sizeof(*ttm->pages);
-+
-+ if (ttm->page_flags & DRM_TTM_PAGE_VMALLOC) {
-+ vfree(ttm->pages);
-+ ttm->page_flags &= ~DRM_TTM_PAGE_VMALLOC;
-+ } else {
-+ drm_free(ttm->pages, size, DRM_MEM_TTM);
-+ }
-+ ttm->pages = NULL;
-+}
-+
-+static struct page *drm_ttm_alloc_page(void)
-+{
-+ struct page *page;
-+
-+ page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
-+ if (!page)
-+ return NULL;
-+ return page;
-+}
-+
-+/*
-+ * Change caching policy for the linear kernel map
-+ * for range of pages in a ttm.
-+ */
-+
-+static int drm_set_caching(struct drm_ttm *ttm, int noncached)
-+{
-+ int i;
-+ struct page **cur_page;
-+ int do_tlbflush = 0;
-+
-+ if ((ttm->page_flags & DRM_TTM_PAGE_UNCACHED) == noncached)
-+ return 0;
-+
-+ if (noncached)
-+ drm_ttm_cache_flush();
-+
-+ for (i = 0; i < ttm->num_pages; ++i) {
-+ cur_page = ttm->pages + i;
-+ if (*cur_page) {
-+ if (!PageHighMem(*cur_page)) {
-+ if (noncached) {
-+ map_page_into_agp(*cur_page);
-+ } else {
-+ unmap_page_from_agp(*cur_page);
-+ }
-+ do_tlbflush = 1;
-+ }
-+ }
-+ }
-+ //if (do_tlbflush)
-+ // flush_agp_mappings();
-+
-+ DRM_FLAG_MASKED(ttm->page_flags, noncached, DRM_TTM_PAGE_UNCACHED);
-+
-+ return 0;
-+}
-+
-+
-+static void drm_ttm_free_user_pages(struct drm_ttm *ttm)
-+{
-+ int write;
-+ int dirty;
-+ struct page *page;
-+ int i;
-+
-+ BUG_ON(!(ttm->page_flags & DRM_TTM_PAGE_USER));
-+ write = ((ttm->page_flags & DRM_TTM_PAGE_USER_WRITE) != 0);
-+ dirty = ((ttm->page_flags & DRM_TTM_PAGE_USER_DIRTY) != 0);
-+
-+ for (i = 0; i < ttm->num_pages; ++i) {
-+ page = ttm->pages[i];
-+ if (page == NULL)
-+ continue;
-+
-+ if (page == ttm->dummy_read_page) {
-+ BUG_ON(write);
-+ continue;
-+ }
-+
-+ if (write && dirty && !PageReserved(page))
-+ set_page_dirty_lock(page);
-+
-+ ttm->pages[i] = NULL;
-+ put_page(page);
-+ }
-+}
-+
-+static void drm_ttm_free_alloced_pages(struct drm_ttm *ttm)
-+{
-+ int i;
-+ struct drm_buffer_manager *bm = &ttm->dev->bm;
-+ struct page **cur_page;
-+
-+ for (i = 0; i < ttm->num_pages; ++i) {
-+ cur_page = ttm->pages + i;
-+ if (*cur_page) {
-+ if (page_count(*cur_page) != 1)
-+ DRM_ERROR("Erroneous page count. Leaking pages.\n");
-+ if (page_mapped(*cur_page))
-+ DRM_ERROR("Erroneous map count. Leaking page mappings.\n");
-+ __free_page(*cur_page);
-+ --bm->cur_pages;
-+ }
-+ }
-+}
-+
-+/*
-+ * Free all resources associated with a ttm.
-+ */
-+
-+int drm_destroy_ttm(struct drm_ttm *ttm)
-+{
-+ struct drm_ttm_backend *be;
-+
-+ if (!ttm)
-+ return 0;
-+
-+ be = ttm->be;
-+ if (be) {
-+ be->func->destroy(be);
-+ ttm->be = NULL;
-+ }
-+
-+ if (ttm->pages) {
-+ if (ttm->page_flags & DRM_TTM_PAGE_UNCACHED)
-+ drm_set_caching(ttm, 0);
-+
-+ if (ttm->page_flags & DRM_TTM_PAGE_USER)
-+ drm_ttm_free_user_pages(ttm);
-+ else
-+ drm_ttm_free_alloced_pages(ttm);
-+
-+ ttm_free_pages(ttm);
-+ }
-+
-+ return 0;
-+}
-+
-+struct page *drm_ttm_get_page(struct drm_ttm *ttm, int index)
-+{
-+ struct page *p;
-+ struct drm_buffer_manager *bm = &ttm->dev->bm;
-+
-+ p = ttm->pages[index];
-+ if (!p) {
-+ p = drm_ttm_alloc_page();
-+ if (!p)
-+ return NULL;
-+ ttm->pages[index] = p;
-+ ++bm->cur_pages;
-+ }
-+ return p;
-+}
-+EXPORT_SYMBOL(drm_ttm_get_page);
-+
-+int drm_ttm_set_user(struct drm_ttm *ttm,
-+ struct task_struct *tsk,
-+ int write,
-+ unsigned long start,
-+ unsigned long num_pages,
-+ struct page *dummy_read_page)
-+{
-+ struct mm_struct *mm = tsk->mm;
-+ int ret;
-+ int i;
-+
-+ BUG_ON(num_pages != ttm->num_pages);
-+
-+ ttm->dummy_read_page = dummy_read_page;
-+ ttm->page_flags |= DRM_TTM_PAGE_USER |
-+ ((write) ? DRM_TTM_PAGE_USER_WRITE : 0);
-+
-+
-+ down_read(&mm->mmap_sem);
-+ ret = get_user_pages(tsk, mm, start, num_pages,
-+ write, 0, ttm->pages, NULL);
-+ up_read(&mm->mmap_sem);
-+
-+ if (ret != num_pages && write) {
-+ drm_ttm_free_user_pages(ttm);
-+ return -ENOMEM;
-+ }
-+
-+ for (i = 0; i < num_pages; ++i) {
-+ if (ttm->pages[i] == NULL)
-+ ttm->pages[i] = ttm->dummy_read_page;
-+ }
-+
-+ return 0;
-+}
-+
-+int drm_ttm_populate(struct drm_ttm *ttm)
-+{
-+ struct page *page;
-+ unsigned long i;
-+ struct drm_ttm_backend *be;
-+
-+ if (ttm->state != ttm_unpopulated)
-+ return 0;
-+
-+ be = ttm->be;
-+ for (i = 0; i < ttm->num_pages; ++i) {
-+ page = drm_ttm_get_page(ttm, i);
-+ if (!page)
-+ return -ENOMEM;
-+ }
-+ be->func->populate(be, ttm->num_pages, ttm->pages);
-+ ttm->state = ttm_unbound;
-+ return 0;
-+}
-+
-+static inline size_t drm_size_align(size_t size)
-+{
-+ size_t tmpSize = 4;
-+ if (size > PAGE_SIZE)
-+ return PAGE_ALIGN(size);
-+ while (tmpSize < size)
-+ tmpSize <<= 1;
-+
-+ return (size_t) tmpSize;
-+}
-+
-+/*
-+ * Calculate the estimated pinned memory usage of a ttm.
-+ */
-+
-+unsigned long drm_ttm_size(struct drm_device *dev,
-+ unsigned long num_pages,
-+ int user_bo)
-+{
-+ struct drm_bo_driver *bo_driver = dev->driver->bo_driver;
-+ unsigned long tmp;
-+
-+ tmp = drm_size_align(sizeof(struct drm_ttm)) +
-+ drm_size_align(num_pages * sizeof(struct page *)) +
-+ ((user_bo) ? 0 : drm_size_align(num_pages * PAGE_SIZE));
-+
-+ if (bo_driver->backend_size)
-+ tmp += bo_driver->backend_size(dev, num_pages);
-+ else
-+ tmp += drm_size_align(num_pages * sizeof(struct page *)) +
-+ 3*drm_size_align(sizeof(struct drm_ttm_backend));
-+ return tmp;
-+}
-+
-+
-+/*
-+ * Initialize a ttm.
-+ */
-+
-+struct drm_ttm *drm_ttm_init(struct drm_device *dev, unsigned long size)
-+{
-+ struct drm_bo_driver *bo_driver = dev->driver->bo_driver;
-+ struct drm_ttm *ttm;
-+
-+ if (!bo_driver)
-+ return NULL;
-+
-+ ttm = drm_calloc(1, sizeof(*ttm), DRM_MEM_TTM);
-+ if (!ttm)
-+ return NULL;
-+
-+ ttm->dev = dev;
-+ atomic_set(&ttm->vma_count, 0);
-+
-+ ttm->destroy = 0;
-+ ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
-+
-+ ttm->page_flags = 0;
-+
-+ /*
-+ * Account also for AGP module memory usage.
-+ */
-+
-+ ttm_alloc_pages(ttm);
-+ if (!ttm->pages) {
-+ drm_destroy_ttm(ttm);
-+ DRM_ERROR("Failed allocating page table\n");
-+ return NULL;
-+ }
-+ ttm->be = bo_driver->create_ttm_backend_entry(dev);
-+ if (!ttm->be) {
-+ drm_destroy_ttm(ttm);
-+ DRM_ERROR("Failed creating ttm backend entry\n");
-+ return NULL;
-+ }
-+ ttm->state = ttm_unpopulated;
-+ return ttm;
-+}
-+
-+/*
-+ * Unbind a ttm region from the aperture.
-+ */
-+
-+void drm_ttm_evict(struct drm_ttm *ttm)
-+{
-+ struct drm_ttm_backend *be = ttm->be;
-+ int ret;
-+
-+ if (ttm->state == ttm_bound) {
-+ ret = be->func->unbind(be);
-+ BUG_ON(ret);
-+ }
-+
-+ ttm->state = ttm_evicted;
-+}
-+
-+void drm_ttm_fixup_caching(struct drm_ttm *ttm)
-+{
-+
-+ if (ttm->state == ttm_evicted) {
-+ struct drm_ttm_backend *be = ttm->be;
-+ if (be->func->needs_ub_cache_adjust(be))
-+ drm_set_caching(ttm, 0);
-+ ttm->state = ttm_unbound;
-+ }
-+}
-+
-+void drm_ttm_unbind(struct drm_ttm *ttm)
-+{
-+ if (ttm->state == ttm_bound)
-+ drm_ttm_evict(ttm);
-+
-+ drm_ttm_fixup_caching(ttm);
-+}
-+
-+int drm_bind_ttm(struct drm_ttm *ttm, struct drm_bo_mem_reg *bo_mem)
-+{
-+ struct drm_bo_driver *bo_driver = ttm->dev->driver->bo_driver;
-+ int ret = 0;
-+ struct drm_ttm_backend *be;
-+
-+ if (!ttm)
-+ return -EINVAL;
-+ if (ttm->state == ttm_bound)
-+ return 0;
-+
-+ be = ttm->be;
-+
-+ ret = drm_ttm_populate(ttm);
-+ if (ret)
-+ return ret;
-+
-+ if (ttm->state == ttm_unbound && !(bo_mem->flags & DRM_BO_FLAG_CACHED))
-+ drm_set_caching(ttm, DRM_TTM_PAGE_UNCACHED);
-+ else if ((bo_mem->flags & DRM_BO_FLAG_CACHED_MAPPED) &&
-+ bo_driver->ttm_cache_flush)
-+ bo_driver->ttm_cache_flush(ttm);
-+
-+ ret = be->func->bind(be, bo_mem);
-+ if (ret) {
-+ ttm->state = ttm_evicted;
-+ DRM_ERROR("Couldn't bind backend.\n");
-+ return ret;
-+ }
-+
-+ ttm->state = ttm_bound;
-+ if (ttm->page_flags & DRM_TTM_PAGE_USER)
-+ ttm->page_flags |= DRM_TTM_PAGE_USER_DIRTY;
-+ return 0;
-+}
-+EXPORT_SYMBOL(drm_bind_ttm);
-Index: linux-2.6.27/drivers/gpu/drm/drm_vm.c
-===================================================================
---- linux-2.6.27.orig/drivers/gpu/drm/drm_vm.c 2008-10-09 23:13:53.000000000 +0100
-+++ linux-2.6.27/drivers/gpu/drm/drm_vm.c 2009-02-05 13:29:33.000000000 +0000
-@@ -40,6 +40,10 @@
-
- static void drm_vm_open(struct vm_area_struct *vma);
- static void drm_vm_close(struct vm_area_struct *vma);
-+static int drm_bo_mmap_locked(struct vm_area_struct *vma,
-+ struct file *filp,
-+ drm_local_map_t *map);
-+
-
- static pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma)
- {
-@@ -267,6 +271,9 @@
- dmah.size = map->size;
- __drm_pci_free(dev, &dmah);
- break;
-+ case _DRM_TTM:
-+ BUG_ON(1);
-+ break;
- }
- drm_free(map, sizeof(*map), DRM_MEM_MAPS);
- }
-@@ -647,6 +654,8 @@
- vma->vm_flags |= VM_RESERVED;
- vma->vm_page_prot = drm_dma_prot(map->type, vma);
- break;
-+ case _DRM_TTM:
-+ return drm_bo_mmap_locked(vma, filp, map);
- default:
- return -EINVAL; /* This should never happen. */
- }
-@@ -671,3 +680,213 @@
- return ret;
- }
- EXPORT_SYMBOL(drm_mmap);
-+
-+/**
-+ * buffer object vm functions.
-+ */
-+
-+/**
-+ * \c Pagefault method for buffer objects.
-+ *
-+ * \param vma Virtual memory area.
-+ * \param address File offset.
-+ * \return Error or refault. The pfn is manually inserted.
-+ *
-+ * It's important that pfns are inserted while holding the bo->mutex lock.
-+ * otherwise we might race with unmap_mapping_range() which is always
-+ * called with the bo->mutex lock held.
-+ *
-+ * We're modifying the page attribute bits of the vma->vm_page_prot field,
-+ * without holding the mmap_sem in write mode. Only in read mode.
-+ * These bits are not used by the mm subsystem code, and we consider them
-+ * protected by the bo->mutex lock.
-+ */
-+
-+#define DRM_NOPFN_EXTRA 15 /* Fault 16 pages at a time in */
-+
-+int drm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
-+{
-+ struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
-+ unsigned long page_offset;
-+ struct page *page = NULL;
-+ struct drm_ttm *ttm = NULL;
-+ struct drm_device *dev;
-+ unsigned long pfn;
-+ int err;
-+ unsigned long bus_base;
-+ unsigned long bus_offset;
-+ unsigned long bus_size;
-+ int i;
-+ unsigned long ret = VM_FAULT_NOPAGE;
-+ unsigned long address = (unsigned long)vmf->virtual_address;
-+
-+ if (address > vma->vm_end)
-+ return VM_FAULT_SIGBUS;
-+
-+ dev = bo->dev;
-+ err = drm_bo_read_lock(&dev->bm.bm_lock);
-+ if (err)
-+ return VM_FAULT_NOPAGE;
-+
-+ err = mutex_lock_interruptible(&bo->mutex);
-+ if (err) {
-+ drm_bo_read_unlock(&dev->bm.bm_lock);
-+ return VM_FAULT_NOPAGE;
-+ }
-+
-+ err = drm_bo_wait(bo, 0, 0, 0);
-+ if (err) {
-+ ret = (err != -EAGAIN) ? VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
-+ goto out_unlock;
-+ }
-+
-+ /*
-+ * If buffer happens to be in a non-mappable location,
-+ * move it to a mappable.
-+ */
-+
-+ if (!(bo->mem.flags & DRM_BO_FLAG_MAPPABLE)) {
-+ uint32_t new_mask = bo->mem.mask |
-+ DRM_BO_FLAG_MAPPABLE |
-+ DRM_BO_FLAG_FORCE_MAPPABLE;
-+ err = drm_bo_move_buffer(bo, new_mask, 0, 0);
-+ if (err) {
-+ ret = (err != -EAGAIN) ? VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
-+ goto out_unlock;
-+ }
-+ }
-+
-+ err = drm_bo_pci_offset(dev, &bo->mem, &bus_base, &bus_offset,
-+ &bus_size);
-+
-+ if (err) {
-+ ret = VM_FAULT_SIGBUS;
-+ goto out_unlock;
-+ }
-+
-+ page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
-+
-+ if (bus_size) {
-+ struct drm_mem_type_manager *man = &dev->bm.man[bo->mem.mem_type];
-+
-+ pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) + page_offset;
-+ vma->vm_page_prot = drm_io_prot(man->drm_bus_maptype, vma);
-+ } else {
-+ ttm = bo->ttm;
-+
-+ drm_ttm_fixup_caching(ttm);
-+ page = drm_ttm_get_page(ttm, page_offset);
-+ if (!page) {
-+ ret = VM_FAULT_OOM;
-+ goto out_unlock;
-+ }
-+ pfn = page_to_pfn(page);
-+ vma->vm_page_prot = (bo->mem.flags & DRM_BO_FLAG_CACHED) ?
-+ vm_get_page_prot(vma->vm_flags) :
-+ drm_io_prot(_DRM_TTM, vma);
-+ }
-+
-+ err = vm_insert_pfn(vma, address, pfn);
-+ if (err) {
-+ ret = (err != -EAGAIN) ? VM_FAULT_OOM : VM_FAULT_NOPAGE;
-+ goto out_unlock;
-+ }
-+
-+ for (i=0; i<DRM_NOPFN_EXTRA; ++i) {
-+
-+ if (++page_offset == bo->mem.num_pages)
-+ break;
-+ address = vma->vm_start + (page_offset << PAGE_SHIFT);
-+ if (address >= vma->vm_end)
-+ break;
-+ if (bus_size) {
-+ pfn = ((bus_base + bus_offset) >> PAGE_SHIFT)
-+ + page_offset;
-+ } else {
-+ page = drm_ttm_get_page(ttm, page_offset);
-+ if (!page)
-+ break;
-+ pfn = page_to_pfn(page);
-+ }
-+ if (vm_insert_pfn(vma, address, pfn))
-+ break;
-+ }
-+out_unlock:
-+ mutex_unlock(&bo->mutex);
-+ drm_bo_read_unlock(&dev->bm.bm_lock);
-+ return ret;
-+}
-+EXPORT_SYMBOL(drm_bo_vm_fault);
-+
-+static void drm_bo_vm_open_locked(struct vm_area_struct *vma)
-+{
-+ struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
-+
-+ drm_vm_open_locked(vma);
-+ atomic_inc(&bo->usage);
-+}
-+
-+/**
-+ * \c vma open method for buffer objects.
-+ *
-+ * \param vma virtual memory area.
-+ */
-+
-+static void drm_bo_vm_open(struct vm_area_struct *vma)
-+{
-+ struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
-+ struct drm_device *dev = bo->dev;
-+
-+ mutex_lock(&dev->struct_mutex);
-+ drm_bo_vm_open_locked(vma);
-+ mutex_unlock(&dev->struct_mutex);
-+}
-+
-+/**
-+ * \c vma close method for buffer objects.
-+ *
-+ * \param vma virtual memory area.
-+ */
-+
-+static void drm_bo_vm_close(struct vm_area_struct *vma)
-+{
-+ struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
-+ struct drm_device *dev = bo->dev;
-+
-+ drm_vm_close(vma);
-+ if (bo) {
-+ mutex_lock(&dev->struct_mutex);
-+ drm_bo_usage_deref_locked((struct drm_buffer_object **)
-+ &vma->vm_private_data);
-+ mutex_unlock(&dev->struct_mutex);
-+ }
-+ return;
-+}
-+
-+static struct vm_operations_struct drm_bo_vm_ops = {
-+ .fault = drm_bo_vm_fault,
-+ .open = drm_bo_vm_open,
-+ .close = drm_bo_vm_close,
-+};
-+
-+/**
-+ * mmap buffer object memory.
-+ *
-+ * \param vma virtual memory area.
-+ * \param file_priv DRM file private.
-+ * \param map The buffer object drm map.
-+ * \return zero on success or a negative number on failure.
-+ */
-+
-+int drm_bo_mmap_locked(struct vm_area_struct *vma,
-+ struct file *filp,
-+ drm_local_map_t *map)
-+{
-+ vma->vm_ops = &drm_bo_vm_ops;
-+ vma->vm_private_data = map->handle;
-+ vma->vm_file = filp;
-+ vma->vm_flags |= VM_RESERVED | VM_IO;
-+ vma->vm_flags |= VM_PFNMAP;
-+ drm_bo_vm_open_locked(vma);
-+ return 0;
-+}
-Index: linux-2.6.27/drivers/gpu/drm/psb/Makefile
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.27/drivers/gpu/drm/psb/Makefile 2009-02-05 13:29:33.000000000 +0000
-@@ -0,0 +1,13 @@
-+#
-+# Makefile for the drm device driver. This driver provides support for the
-+# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
-+
-+ccflags-y := -Iinclude/drm
-+
-+psb-y := psb_drv.o psb_mmu.o psb_sgx.o psb_irq.o psb_fence.o psb_buffer.o \
-+ psb_gtt.o psb_setup.o psb_i2c.o psb_fb.o psb_msvdx.o \
-+ psb_msvdxinit.o psb_regman.o psb_reset.o psb_scene.o \
-+ psb_schedule.o psb_xhw.o
-+
-+
-+obj-$(CONFIG_DRM_PSB) += psb.o
-Index: linux-2.6.27/drivers/gpu/drm/psb/i915_drv.h
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.27/drivers/gpu/drm/psb/i915_drv.h 2009-02-05 13:29:33.000000000 +0000
-@@ -0,0 +1,795 @@
-+/* i915_drv.h -- Private header for the I915 driver -*- linux-c -*-
-+ */
-+/*
-+ *
-+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
-+ * All Rights Reserved.
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a
-+ * copy of this software and associated documentation files (the
-+ * "Software"), to deal in the Software without restriction, including
-+ * without limitation the rights to use, copy, modify, merge, publish,
-+ * distribute, sub license, and/or sell copies of the Software, and to
-+ * permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice (including the
-+ * next paragraph) shall be included in all copies or substantial portions
-+ * of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
-+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
-+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
-+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-+ *
-+ */
-+
-+#ifndef _I915_DRV_H_
-+#define _I915_DRV_H_
-+
-+#include "i915_reg.h"
-+
-+/* General customization:
-+ */
-+
-+#define DRIVER_AUTHOR "Tungsten Graphics, Inc."
-+
-+#define DRIVER_NAME "i915"
-+#define DRIVER_DESC "Intel Graphics"
-+#define DRIVER_DATE "20070209"
-+
-+#if defined(__linux__)
-+#define I915_HAVE_FENCE
-+#define I915_HAVE_BUFFER
-+#endif
-+
-+/* Interface history:
-+ *
-+ * 1.1: Original.
-+ * 1.2: Add Power Management
-+ * 1.3: Add vblank support
-+ * 1.4: Fix cmdbuffer path, add heap destroy
-+ * 1.5: Add vblank pipe configuration
-+ * 1.6: - New ioctl for scheduling buffer swaps on vertical blank
-+ * - Support vertical blank on secondary display pipe
-+ * 1.8: New ioctl for ARB_Occlusion_Query
-+ * 1.9: Usable page flipping and triple buffering
-+ * 1.10: Plane/pipe disentangling
-+ * 1.11: TTM superioctl
-+ */
-+#define DRIVER_MAJOR 1
-+#if defined(I915_HAVE_FENCE) && defined(I915_HAVE_BUFFER)
-+#define DRIVER_MINOR 11
-+#else
-+#define DRIVER_MINOR 6
-+#endif
-+#define DRIVER_PATCHLEVEL 0
-+
-+#define DRM_DRIVER_PRIVATE_T struct drm_i915_private
-+
-+#ifdef I915_HAVE_BUFFER
-+#define I915_MAX_VALIDATE_BUFFERS 4096
-+#endif
-+
-+struct drm_i915_ring_buffer {
-+ int tail_mask;
-+ unsigned long Start;
-+ unsigned long End;
-+ unsigned long Size;
-+ u8 *virtual_start;
-+ int head;
-+ int tail;
-+ int space;
-+ drm_local_map_t map;
-+};
-+
-+struct mem_block {
-+ struct mem_block *next;
-+ struct mem_block *prev;
-+ int start;
-+ int size;
-+ struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */
-+};
-+
-+struct drm_i915_vbl_swap {
-+ struct list_head head;
-+ drm_drawable_t drw_id;
-+ unsigned int plane;
-+ unsigned int sequence;
-+ int flip;
-+};
-+
-+struct drm_i915_private {
-+ struct drm_buffer_object *ring_buffer;
-+ drm_local_map_t *sarea;
-+ drm_local_map_t *mmio_map;
-+
-+ unsigned long mmiobase;
-+ unsigned long mmiolen;
-+
-+ struct drm_i915_sarea *sarea_priv;
-+ struct drm_i915_ring_buffer ring;
-+
-+ struct drm_dma_handle *status_page_dmah;
-+ void *hw_status_page;
-+ dma_addr_t dma_status_page;
-+ uint32_t counter;
-+ unsigned int status_gfx_addr;
-+ drm_local_map_t hws_map;
-+
-+ unsigned int cpp;
-+ int use_mi_batchbuffer_start;
-+
-+ wait_queue_head_t irq_queue;
-+ atomic_t irq_received;
-+ atomic_t irq_emitted;
-+
-+ int tex_lru_log_granularity;
-+ int allow_batchbuffer;
-+ struct mem_block *agp_heap;
-+ unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
-+ int vblank_pipe;
-+ DRM_SPINTYPE user_irq_lock;
-+ int user_irq_refcount;
-+ int fence_irq_on;
-+ uint32_t irq_enable_reg;
-+ int irq_enabled;
-+
-+#ifdef I915_HAVE_FENCE
-+ uint32_t flush_sequence;
-+ uint32_t flush_flags;
-+ uint32_t flush_pending;
-+ uint32_t saved_flush_status;
-+ uint32_t reported_sequence;
-+ int reported_sequence_valid;
-+#endif
-+#ifdef I915_HAVE_BUFFER
-+ void *agp_iomap;
-+ unsigned int max_validate_buffers;
-+ struct mutex cmdbuf_mutex;
-+#endif
-+
-+ DRM_SPINTYPE swaps_lock;
-+ struct drm_i915_vbl_swap vbl_swaps;
-+ unsigned int swaps_pending;
-+
-+ /* LVDS info */
-+ int backlight_duty_cycle; /* restore backlight to this value */
-+ bool panel_wants_dither;
-+ struct drm_display_mode *panel_fixed_mode;
-+
-+ /* Register state */
-+ u8 saveLBB;
-+ u32 saveDSPACNTR;
-+ u32 saveDSPBCNTR;
-+ u32 savePIPEACONF;
-+ u32 savePIPEBCONF;
-+ u32 savePIPEASRC;
-+ u32 savePIPEBSRC;
-+ u32 saveFPA0;
-+ u32 saveFPA1;
-+ u32 saveDPLL_A;
-+ u32 saveDPLL_A_MD;
-+ u32 saveHTOTAL_A;
-+ u32 saveHBLANK_A;
-+ u32 saveHSYNC_A;
-+ u32 saveVTOTAL_A;
-+ u32 saveVBLANK_A;
-+ u32 saveVSYNC_A;
-+ u32 saveBCLRPAT_A;
-+ u32 saveDSPASTRIDE;
-+ u32 saveDSPASIZE;
-+ u32 saveDSPAPOS;
-+ u32 saveDSPABASE;
-+ u32 saveDSPASURF;
-+ u32 saveDSPATILEOFF;
-+ u32 savePFIT_PGM_RATIOS;
-+ u32 saveBLC_PWM_CTL;
-+ u32 saveBLC_PWM_CTL2;
-+ u32 saveFPB0;
-+ u32 saveFPB1;
-+ u32 saveDPLL_B;
-+ u32 saveDPLL_B_MD;
-+ u32 saveHTOTAL_B;
-+ u32 saveHBLANK_B;
-+ u32 saveHSYNC_B;
-+ u32 saveVTOTAL_B;
-+ u32 saveVBLANK_B;
-+ u32 saveVSYNC_B;
-+ u32 saveBCLRPAT_B;
-+ u32 saveDSPBSTRIDE;
-+ u32 saveDSPBSIZE;
-+ u32 saveDSPBPOS;
-+ u32 saveDSPBBASE;
-+ u32 saveDSPBSURF;
-+ u32 saveDSPBTILEOFF;
-+ u32 saveVCLK_DIVISOR_VGA0;
-+ u32 saveVCLK_DIVISOR_VGA1;
-+ u32 saveVCLK_POST_DIV;
-+ u32 saveVGACNTRL;
-+ u32 saveADPA;
-+ u32 saveLVDS;
-+ u32 saveLVDSPP_ON;
-+ u32 saveLVDSPP_OFF;
-+ u32 saveDVOA;
-+ u32 saveDVOB;
-+ u32 saveDVOC;
-+ u32 savePP_ON;
-+ u32 savePP_OFF;
-+ u32 savePP_CONTROL;
-+ u32 savePP_CYCLE;
-+ u32 savePFIT_CONTROL;
-+ u32 save_palette_a[256];
-+ u32 save_palette_b[256];
-+ u32 saveFBC_CFB_BASE;
-+ u32 saveFBC_LL_BASE;
-+ u32 saveFBC_CONTROL;
-+ u32 saveFBC_CONTROL2;
-+ u32 saveSWF0[16];
-+ u32 saveSWF1[16];
-+ u32 saveSWF2[3];
-+ u8 saveMSR;
-+ u8 saveSR[8];
-+ u8 saveGR[24];
-+ u8 saveAR_INDEX;
-+ u8 saveAR[20];
-+ u8 saveDACMASK;
-+ u8 saveDACDATA[256*3]; /* 256 3-byte colors */
-+ u8 saveCR[36];
-+};
-+
-+enum intel_chip_family {
-+ CHIP_I8XX = 0x01,
-+ CHIP_I9XX = 0x02,
-+ CHIP_I915 = 0x04,
-+ CHIP_I965 = 0x08,
-+ CHIP_POULSBO = 0x10,
-+};
-+
-+extern struct drm_ioctl_desc i915_ioctls[];
-+extern int i915_max_ioctl;
-+
-+ /* i915_dma.c */
-+extern void i915_kernel_lost_context(struct drm_device * dev);
-+extern int i915_driver_load(struct drm_device *, unsigned long flags);
-+extern int i915_driver_unload(struct drm_device *dev);
-+extern void i915_driver_lastclose(struct drm_device * dev);
-+extern void i915_driver_preclose(struct drm_device *dev,
-+ struct drm_file *file_priv);
-+extern int i915_driver_device_is_agp(struct drm_device * dev);
-+extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
-+ unsigned long arg);
-+extern void i915_emit_breadcrumb(struct drm_device *dev);
-+extern void i915_dispatch_flip(struct drm_device * dev, int pipes, int sync);
-+extern int i915_emit_mi_flush(struct drm_device *dev, uint32_t flush);
-+extern int i915_driver_firstopen(struct drm_device *dev);
-+extern int i915_do_cleanup_pageflip(struct drm_device *dev);
-+extern int i915_dma_cleanup(struct drm_device *dev);
-+
-+/* i915_irq.c */
-+extern int i915_irq_emit(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv);
-+extern int i915_irq_wait(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv);
-+
-+extern void i915_driver_wait_next_vblank(struct drm_device *dev, int pipe);
-+extern int i915_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence);
-+extern int i915_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence);
-+extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
-+extern void i915_driver_irq_preinstall(struct drm_device * dev);
-+extern void i915_driver_irq_postinstall(struct drm_device * dev);
-+extern void i915_driver_irq_uninstall(struct drm_device * dev);
-+extern int i915_vblank_pipe_set(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv);
-+extern int i915_vblank_pipe_get(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv);
-+extern int i915_emit_irq(struct drm_device * dev);
-+extern void i915_user_irq_on(struct drm_i915_private *dev_priv);
-+extern void i915_user_irq_off(struct drm_i915_private *dev_priv);
-+extern void i915_enable_interrupt (struct drm_device *dev);
-+extern int i915_vblank_swap(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv);
-+
-+/* i915_mem.c */
-+extern int i915_mem_alloc(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv);
-+extern int i915_mem_free(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv);
-+extern int i915_mem_init_heap(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv);
-+extern int i915_mem_destroy_heap(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv);
-+extern void i915_mem_takedown(struct mem_block **heap);
-+extern void i915_mem_release(struct drm_device * dev,
-+ struct drm_file *file_priv,
-+ struct mem_block *heap);
-+#ifdef I915_HAVE_FENCE
-+/* i915_fence.c */
-+extern void i915_fence_handler(struct drm_device *dev);
-+extern void i915_invalidate_reported_sequence(struct drm_device *dev);
-+
-+#endif
-+
-+#ifdef I915_HAVE_BUFFER
-+/* i915_buffer.c */
-+extern struct drm_ttm_backend *i915_create_ttm_backend_entry(struct drm_device *dev);
-+extern int i915_fence_types(struct drm_buffer_object *bo, uint32_t *fclass,
-+ uint32_t *type);
-+extern int i915_invalidate_caches(struct drm_device *dev, uint64_t buffer_flags);
-+extern int i915_init_mem_type(struct drm_device *dev, uint32_t type,
-+ struct drm_mem_type_manager *man);
-+extern uint32_t i915_evict_mask(struct drm_buffer_object *bo);
-+extern int i915_move(struct drm_buffer_object *bo, int evict,
-+ int no_wait, struct drm_bo_mem_reg *new_mem);
-+void i915_flush_ttm(struct drm_ttm *ttm);
-+#endif
-+
-+#ifdef __linux__
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
-+extern void intel_init_chipset_flush_compat(struct drm_device *dev);
-+extern void intel_fini_chipset_flush_compat(struct drm_device *dev);
-+#endif
-+#endif
-+
-+
-+/* modesetting */
-+extern void intel_modeset_init(struct drm_device *dev);
-+extern void intel_modeset_cleanup(struct drm_device *dev);
-+
-+
-+#define I915_READ(reg) DRM_READ32(dev_priv->mmio_map, (reg))
-+#define I915_WRITE(reg,val) DRM_WRITE32(dev_priv->mmio_map, (reg), (val))
-+#define I915_READ16(reg) DRM_READ16(dev_priv->mmio_map, (reg))
-+#define I915_WRITE16(reg,val) DRM_WRITE16(dev_priv->mmio_map, (reg), (val))
-+
-+#define I915_VERBOSE 0
-+
-+#define RING_LOCALS unsigned int outring, ringmask, outcount; \
-+ volatile char *virt;
-+
-+#define BEGIN_LP_RING(n) do { \
-+ if (I915_VERBOSE) \
-+ DRM_DEBUG("BEGIN_LP_RING(%d) in %s\n", \
-+ (n), __FUNCTION__); \
-+ if (dev_priv->ring.space < (n)*4) \
-+ i915_wait_ring(dev, (n)*4, __FUNCTION__); \
-+ outcount = 0; \
-+ outring = dev_priv->ring.tail; \
-+ ringmask = dev_priv->ring.tail_mask; \
-+ virt = dev_priv->ring.virtual_start; \
-+} while (0)
-+
-+#define OUT_RING(n) do { \
-+ if (I915_VERBOSE) DRM_DEBUG(" OUT_RING %x\n", (int)(n)); \
-+ *(volatile unsigned int *)(virt + outring) = (n); \
-+ outcount++; \
-+ outring += 4; \
-+ outring &= ringmask; \
-+} while (0)
-+
-+#define ADVANCE_LP_RING() do { \
-+ if (I915_VERBOSE) DRM_DEBUG("ADVANCE_LP_RING %x\n", outring); \
-+ dev_priv->ring.tail = outring; \
-+ dev_priv->ring.space -= outcount * 4; \
-+ I915_WRITE(LP_RING + RING_TAIL, outring); \
-+} while(0)
-+
-+#define MI_NOOP (0x00 << 23)
-+
-+extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
-+
-+/*
-+ * The Bridge device's PCI config space has information about the
-+ * fb aperture size and the amount of pre-reserved memory.
-+ */
-+#define INTEL_GMCH_CTRL 0x52
-+#define INTEL_GMCH_ENABLED 0x4
-+#define INTEL_GMCH_MEM_MASK 0x1
-+#define INTEL_GMCH_MEM_64M 0x1
-+#define INTEL_GMCH_MEM_128M 0
-+
-+#define INTEL_855_GMCH_GMS_MASK (0x7 << 4)
-+#define INTEL_855_GMCH_GMS_DISABLED (0x0 << 4)
-+#define INTEL_855_GMCH_GMS_STOLEN_1M (0x1 << 4)
-+#define INTEL_855_GMCH_GMS_STOLEN_4M (0x2 << 4)
-+#define INTEL_855_GMCH_GMS_STOLEN_8M (0x3 << 4)
-+#define INTEL_855_GMCH_GMS_STOLEN_16M (0x4 << 4)
-+#define INTEL_855_GMCH_GMS_STOLEN_32M (0x5 << 4)
-+
-+#define INTEL_915G_GMCH_GMS_STOLEN_48M (0x6 << 4)
-+#define INTEL_915G_GMCH_GMS_STOLEN_64M (0x7 << 4)
-+
-+/* Extended config space */
-+#define LBB 0xf4
-+
-+/* VGA stuff */
-+
-+#define VGA_ST01_MDA 0x3ba
-+#define VGA_ST01_CGA 0x3da
-+
-+#define VGA_MSR_WRITE 0x3c2
-+#define VGA_MSR_READ 0x3cc
-+#define VGA_MSR_MEM_EN (1<<1)
-+#define VGA_MSR_CGA_MODE (1<<0)
-+
-+#define VGA_SR_INDEX 0x3c4
-+#define VGA_SR_DATA 0x3c5
-+
-+#define VGA_AR_INDEX 0x3c0
-+#define VGA_AR_VID_EN (1<<5)
-+#define VGA_AR_DATA_WRITE 0x3c0
-+#define VGA_AR_DATA_READ 0x3c1
-+
-+#define VGA_GR_INDEX 0x3ce
-+#define VGA_GR_DATA 0x3cf
-+/* GR05 */
-+#define VGA_GR_MEM_READ_MODE_SHIFT 3
-+#define VGA_GR_MEM_READ_MODE_PLANE 1
-+/* GR06 */
-+#define VGA_GR_MEM_MODE_MASK 0xc
-+#define VGA_GR_MEM_MODE_SHIFT 2
-+#define VGA_GR_MEM_A0000_AFFFF 0
-+#define VGA_GR_MEM_A0000_BFFFF 1
-+#define VGA_GR_MEM_B0000_B7FFF 2
-+#define VGA_GR_MEM_B0000_BFFFF 3
-+
-+#define VGA_DACMASK 0x3c6
-+#define VGA_DACRX 0x3c7
-+#define VGA_DACWX 0x3c8
-+#define VGA_DACDATA 0x3c9
-+
-+#define VGA_CR_INDEX_MDA 0x3b4
-+#define VGA_CR_DATA_MDA 0x3b5
-+#define VGA_CR_INDEX_CGA 0x3d4
-+#define VGA_CR_DATA_CGA 0x3d5
-+
-+#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23))
-+#define GFX_OP_BREAKPOINT_INTERRUPT ((0<<29)|(1<<23))
-+#define CMD_REPORT_HEAD (7<<23)
-+#define CMD_STORE_DWORD_IDX ((0x21<<23) | 0x1)
-+#define CMD_OP_BATCH_BUFFER ((0x0<<29)|(0x30<<23)|0x1)
-+
-+#define CMD_MI_FLUSH (0x04 << 23)
-+#define MI_NO_WRITE_FLUSH (1 << 2)
-+#define MI_READ_FLUSH (1 << 0)
-+#define MI_EXE_FLUSH (1 << 1)
-+#define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */
-+#define MI_SCENE_COUNT (1 << 3) /* just increment scene count */
-+
-+/* Packet to load a register value from the ring/batch command stream:
-+ */
-+#define CMD_MI_LOAD_REGISTER_IMM ((0x22 << 23)|0x1)
-+
-+#define BB1_START_ADDR_MASK (~0x7)
-+#define BB1_PROTECTED (1<<0)
-+#define BB1_UNPROTECTED (0<<0)
-+#define BB2_END_ADDR_MASK (~0x7)
-+
-+#define I915REG_HWS_PGA 0x02080
-+
-+/* Framebuffer compression */
-+#define FBC_CFB_BASE 0x03200 /* 4k page aligned */
-+#define FBC_LL_BASE 0x03204 /* 4k page aligned */
-+#define FBC_CONTROL 0x03208
-+#define FBC_CTL_EN (1<<31)
-+#define FBC_CTL_PERIODIC (1<<30)
-+#define FBC_CTL_INTERVAL_SHIFT (16)
-+#define FBC_CTL_UNCOMPRESSIBLE (1<<14)
-+#define FBC_CTL_STRIDE_SHIFT (5)
-+#define FBC_CTL_FENCENO (1<<0)
-+#define FBC_COMMAND 0x0320c
-+#define FBC_CMD_COMPRESS (1<<0)
-+#define FBC_STATUS 0x03210
-+#define FBC_STAT_COMPRESSING (1<<31)
-+#define FBC_STAT_COMPRESSED (1<<30)
-+#define FBC_STAT_MODIFIED (1<<29)
-+#define FBC_STAT_CURRENT_LINE (1<<0)
-+#define FBC_CONTROL2 0x03214
-+#define FBC_CTL_FENCE_DBL (0<<4)
-+#define FBC_CTL_IDLE_IMM (0<<2)
-+#define FBC_CTL_IDLE_FULL (1<<2)
-+#define FBC_CTL_IDLE_LINE (2<<2)
-+#define FBC_CTL_IDLE_DEBUG (3<<2)
-+#define FBC_CTL_CPU_FENCE (1<<1)
-+#define FBC_CTL_PLANEA (0<<0)
-+#define FBC_CTL_PLANEB (1<<0)
-+#define FBC_FENCE_OFF 0x0321b
-+
-+#define FBC_LL_SIZE (1536)
-+#define FBC_LL_PAD (32)
-+
-+/* Interrupt bits:
-+ */
-+#define USER_INT_FLAG (1<<1)
-+#define VSYNC_PIPEB_FLAG (1<<5)
-+#define VSYNC_PIPEA_FLAG (1<<7)
-+#define HWB_OOM_FLAG (1<<13) /* binner out of memory */
-+
-+#define I915REG_HWSTAM 0x02098
-+#define I915REG_INT_IDENTITY_R 0x020a4
-+#define I915REG_INT_MASK_R 0x020a8
-+#define I915REG_INT_ENABLE_R 0x020a0
-+#define I915REG_INSTPM 0x020c0
-+
-+#define I915REG_PIPEASTAT 0x70024
-+#define I915REG_PIPEBSTAT 0x71024
-+
-+#define I915_VBLANK_INTERRUPT_ENABLE (1UL<<17)
-+#define I915_VBLANK_CLEAR (1UL<<1)
-+
-+#define GPIOA 0x5010
-+#define GPIOB 0x5014
-+#define GPIOC 0x5018
-+#define GPIOD 0x501c
-+#define GPIOE 0x5020
-+#define GPIOF 0x5024
-+#define GPIOG 0x5028
-+#define GPIOH 0x502c
-+# define GPIO_CLOCK_DIR_MASK (1 << 0)
-+# define GPIO_CLOCK_DIR_IN (0 << 1)
-+# define GPIO_CLOCK_DIR_OUT (1 << 1)
-+# define GPIO_CLOCK_VAL_MASK (1 << 2)
-+# define GPIO_CLOCK_VAL_OUT (1 << 3)
-+# define GPIO_CLOCK_VAL_IN (1 << 4)
-+# define GPIO_CLOCK_PULLUP_DISABLE (1 << 5)
-+# define GPIO_DATA_DIR_MASK (1 << 8)
-+# define GPIO_DATA_DIR_IN (0 << 9)
-+# define GPIO_DATA_DIR_OUT (1 << 9)
-+# define GPIO_DATA_VAL_MASK (1 << 10)
-+# define GPIO_DATA_VAL_OUT (1 << 11)
-+# define GPIO_DATA_VAL_IN (1 << 12)
-+# define GPIO_DATA_PULLUP_DISABLE (1 << 13)
-+
-+/* p317, 319
-+ */
-+#define VCLK2_VCO_M 0x6008 /* treat as 16 bit? (includes msbs) */
-+#define VCLK2_VCO_N 0x600a
-+#define VCLK2_VCO_DIV_SEL 0x6012
-+
-+#define VCLK_DIVISOR_VGA0 0x6000
-+#define VCLK_DIVISOR_VGA1 0x6004
-+#define VCLK_POST_DIV 0x6010
-+/** Selects a post divisor of 4 instead of 2. */
-+# define VGA1_PD_P2_DIV_4 (1 << 15)
-+/** Overrides the p2 post divisor field */
-+# define VGA1_PD_P1_DIV_2 (1 << 13)
-+# define VGA1_PD_P1_SHIFT 8
-+/** P1 value is 2 greater than this field */
-+# define VGA1_PD_P1_MASK (0x1f << 8)
-+/** Selects a post divisor of 4 instead of 2. */
-+# define VGA0_PD_P2_DIV_4 (1 << 7)
-+/** Overrides the p2 post divisor field */
-+# define VGA0_PD_P1_DIV_2 (1 << 5)
-+# define VGA0_PD_P1_SHIFT 0
-+/** P1 value is 2 greater than this field */
-+# define VGA0_PD_P1_MASK (0x1f << 0)
-+
-+#define POST_DIV_SELECT 0x70
-+#define POST_DIV_1 0x00
-+#define POST_DIV_2 0x10
-+#define POST_DIV_4 0x20
-+#define POST_DIV_8 0x30
-+#define POST_DIV_16 0x40
-+#define POST_DIV_32 0x50
-+#define VCO_LOOP_DIV_BY_4M 0x00
-+#define VCO_LOOP_DIV_BY_16M 0x04
-+
-+#define SRX_INDEX 0x3c4
-+#define SRX_DATA 0x3c5
-+#define SR01 1
-+#define SR01_SCREEN_OFF (1<<5)
-+
-+#define PPCR 0x61204
-+#define PPCR_ON (1<<0)
-+
-+#define DVOA 0x61120
-+#define DVOA_ON (1<<31)
-+#define DVOB 0x61140
-+#define DVOB_ON (1<<31)
-+#define DVOC 0x61160
-+#define DVOC_ON (1<<31)
-+#define LVDS 0x61180
-+#define LVDS_ON (1<<31)
-+
-+#define ADPA 0x61100
-+#define ADPA_DPMS_MASK (~(3<<10))
-+#define ADPA_DPMS_ON (0<<10)
-+#define ADPA_DPMS_SUSPEND (1<<10)
-+#define ADPA_DPMS_STANDBY (2<<10)
-+#define ADPA_DPMS_OFF (3<<10)
-+
-+#define NOPID 0x2094
-+#define LP_RING 0x2030
-+#define HP_RING 0x2040
-+/* The binner has its own ring buffer:
-+ */
-+#define HWB_RING 0x2400
-+
-+#define RING_TAIL 0x00
-+#define TAIL_ADDR 0x001FFFF8
-+#define RING_HEAD 0x04
-+#define HEAD_WRAP_COUNT 0xFFE00000
-+#define HEAD_WRAP_ONE 0x00200000
-+#define HEAD_ADDR 0x001FFFFC
-+#define RING_START 0x08
-+#define START_ADDR 0x0xFFFFF000
-+#define RING_LEN 0x0C
-+#define RING_NR_PAGES 0x001FF000
-+#define RING_REPORT_MASK 0x00000006
-+#define RING_REPORT_64K 0x00000002
-+#define RING_REPORT_128K 0x00000004
-+#define RING_NO_REPORT 0x00000000
-+#define RING_VALID_MASK 0x00000001
-+#define RING_VALID 0x00000001
-+#define RING_INVALID 0x00000000
-+
-+/* Instruction parser error reg:
-+ */
-+#define IPEIR 0x2088
-+
-+/* Scratch pad debug 0 reg:
-+ */
-+#define SCPD0 0x209c
-+
-+/* Error status reg:
-+ */
-+#define ESR 0x20b8
-+
-+/* Secondary DMA fetch address debug reg:
-+ */
-+#define DMA_FADD_S 0x20d4
-+
-+/* Cache mode 0 reg.
-+ * - Manipulating render cache behaviour is central
-+ * to the concept of zone rendering, tuning this reg can help avoid
-+ * unnecessary render cache reads and even writes (for z/stencil)
-+ * at beginning and end of scene.
-+ *
-+ * - To change a bit, write to this reg with a mask bit set and the
-+ * bit of interest either set or cleared. EG: (BIT<<16) | BIT to set.
-+ */
-+#define Cache_Mode_0 0x2120
-+#define CM0_MASK_SHIFT 16
-+#define CM0_IZ_OPT_DISABLE (1<<6)
-+#define CM0_ZR_OPT_DISABLE (1<<5)
-+#define CM0_DEPTH_EVICT_DISABLE (1<<4)
-+#define CM0_COLOR_EVICT_DISABLE (1<<3)
-+#define CM0_DEPTH_WRITE_DISABLE (1<<1)
-+#define CM0_RC_OP_FLUSH_DISABLE (1<<0)
-+
-+
-+/* Graphics flush control. A CPU write flushes the GWB of all writes.
-+ * The data is discarded.
-+ */
-+#define GFX_FLSH_CNTL 0x2170
-+
-+/* Binner control. Defines the location of the bin pointer list:
-+ */
-+#define BINCTL 0x2420
-+#define BC_MASK (1 << 9)
-+
-+/* Binned scene info.
-+ */
-+#define BINSCENE 0x2428
-+#define BS_OP_LOAD (1 << 8)
-+#define BS_MASK (1 << 22)
-+
-+/* Bin command parser debug reg:
-+ */
-+#define BCPD 0x2480
-+
-+/* Bin memory control debug reg:
-+ */
-+#define BMCD 0x2484
-+
-+/* Bin data cache debug reg:
-+ */
-+#define BDCD 0x2488
-+
-+/* Binner pointer cache debug reg:
-+ */
-+#define BPCD 0x248c
-+
-+/* Binner scratch pad debug reg:
-+ */
-+#define BINSKPD 0x24f0
-+
-+/* HWB scratch pad debug reg:
-+ */
-+#define HWBSKPD 0x24f4
-+
-+/* Binner memory pool reg:
-+ */
-+#define BMP_BUFFER 0x2430
-+#define BMP_PAGE_SIZE_4K (0 << 10)
-+#define BMP_BUFFER_SIZE_SHIFT 1
-+#define BMP_ENABLE (1 << 0)
-+
-+/* Get/put memory from the binner memory pool:
-+ */
-+#define BMP_GET 0x2438
-+#define BMP_PUT 0x2440
-+#define BMP_OFFSET_SHIFT 5
-+
-+/* 3D state packets:
-+ */
-+#define GFX_OP_RASTER_RULES ((0x3<<29)|(0x7<<24))
-+
-+#define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19))
-+#define SC_UPDATE_SCISSOR (0x1<<1)
-+#define SC_ENABLE_MASK (0x1<<0)
-+#define SC_ENABLE (0x1<<0)
-+
-+#define GFX_OP_LOAD_INDIRECT ((0x3<<29)|(0x1d<<24)|(0x7<<16))
-+
-+#define GFX_OP_SCISSOR_INFO ((0x3<<29)|(0x1d<<24)|(0x81<<16)|(0x1))
-+#define SCI_YMIN_MASK (0xffff<<16)
-+#define SCI_XMIN_MASK (0xffff<<0)
-+#define SCI_YMAX_MASK (0xffff<<16)
-+#define SCI_XMAX_MASK (0xffff<<0)
-+
-+#define GFX_OP_SCISSOR_ENABLE ((0x3<<29)|(0x1c<<24)|(0x10<<19))
-+#define GFX_OP_SCISSOR_RECT ((0x3<<29)|(0x1d<<24)|(0x81<<16)|1)
-+#define GFX_OP_COLOR_FACTOR ((0x3<<29)|(0x1d<<24)|(0x1<<16)|0x0)
-+#define GFX_OP_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16))
-+#define GFX_OP_MAP_INFO ((0x3<<29)|(0x1d<<24)|0x4)
-+#define GFX_OP_DESTBUFFER_VARS ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0)
-+#define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3))
-+
-+#define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2)
-+
-+#define SRC_COPY_BLT_CMD ((2<<29)|(0x43<<22)|4)
-+#define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6)
-+#define XY_MONO_SRC_COPY_IMM_BLT ((2<<29)|(0x71<<22)|5)
-+#define XY_SRC_COPY_BLT_WRITE_ALPHA (1<<21)
-+#define XY_SRC_COPY_BLT_WRITE_RGB (1<<20)
-+#define BLT_DEPTH_8 (0<<24)
-+#define BLT_DEPTH_16_565 (1<<24)
-+#define BLT_DEPTH_16_1555 (2<<24)
-+#define BLT_DEPTH_32 (3<<24)
-+#define BLT_ROP_GXCOPY (0xcc<<16)
-+
-+#define MI_BATCH_BUFFER ((0x30<<23)|1)
-+#define MI_BATCH_BUFFER_START (0x31<<23)
-+#define MI_BATCH_BUFFER_END (0xA<<23)
-+#define MI_BATCH_NON_SECURE (1)
-+
-+#define MI_BATCH_NON_SECURE_I965 (1<<8)
-+
-+#define MI_WAIT_FOR_EVENT ((0x3<<23))
-+#define MI_WAIT_FOR_PLANE_B_FLIP (1<<6)
-+#define MI_WAIT_FOR_PLANE_A_FLIP (1<<2)
-+#define MI_WAIT_FOR_PLANE_A_SCANLINES (1<<1)
-+
-+#define MI_LOAD_SCAN_LINES_INCL ((0x12<<23))
-+
-+#define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2)
-+#define ASYNC_FLIP (1<<22)
-+#define DISPLAY_PLANE_A (0<<20)
-+#define DISPLAY_PLANE_B (1<<20)
-+
-+/* Display regs */
-+#define DSPACNTR 0x70180
-+#define DSPBCNTR 0x71180
-+#define DISPPLANE_SEL_PIPE_MASK (1<<24)
-+
-+/* Define the region of interest for the binner:
-+ */
-+#define CMD_OP_BIN_CONTROL ((0x3<<29)|(0x1d<<24)|(0x84<<16)|4)
-+
-+#define CMD_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1)
-+
-+#define BREADCRUMB_BITS 31
-+#define BREADCRUMB_MASK ((1U << BREADCRUMB_BITS) - 1)
-+
-+#define READ_BREADCRUMB(dev_priv) (((volatile u32*)(dev_priv->hw_status_page))[5])
-+#define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hw_status_page))[reg])
-+
-+#define PRIMARY_RINGBUFFER_SIZE (128*1024)
-+
-+#define BLC_PWM_CTL2 0x61250
-+
-+#endif
-Index: linux-2.6.27/drivers/gpu/drm/psb/i915_reg.h
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.27/drivers/gpu/drm/psb/i915_reg.h 2009-02-05 18:29:58.000000000 +0000
-@@ -0,0 +1,98 @@
-+/* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
-+ * All Rights Reserved.
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a
-+ * copy of this software and associated documentation files (the
-+ * "Software"), to deal in the Software without restriction, including
-+ * without limitation the rights to use, copy, modify, merge, publish,
-+ * distribute, sub license, and/or sell copies of the Software, and to
-+ * permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice (including the
-+ * next paragraph) shall be included in all copies or substantial portions
-+ * of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
-+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
-+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
-+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-+ */
-+
-+#include "../i915/i915_reg.h"
-+
-+#define I915_GCFGC 0xf0
-+#define I915_LOW_FREQUENCY_ENABLE (1 << 7)
-+#define I915_DISPLAY_CLOCK_190_200_MHZ (0 << 4)
-+#define I915_DISPLAY_CLOCK_333_MHZ (4 << 4)
-+#define I915_DISPLAY_CLOCK_MASK (7 << 4)
-+
-+#define I855_HPLLCC 0xc0
-+#define I855_CLOCK_CONTROL_MASK (3 << 0)
-+#define I855_CLOCK_133_200 (0 << 0)
-+#define I855_CLOCK_100_200 (1 << 0)
-+#define I855_CLOCK_100_133 (2 << 0)
-+#define I855_CLOCK_166_250 (3 << 0)
-+
-+#define LVDSPP_ON 0x61208
-+#define LVDSPP_OFF 0x6120c
-+#define PP_CYCLE 0x61210
-+
-+
-+
-+#define IS_I830(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82830_CGC)
-+#define IS_845G(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82845G_IG)
-+#define IS_I85X(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82855GM_IG)
-+#define IS_I855(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82855GM_IG)
-+#define IS_I865G(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82865_IG)
-+
-+#define IS_I915G(dev) (dev->pci_device == PCI_DEVICE_ID_INTEL_82915G_IG)/* || dev->pci_device == PCI_DEVICE_ID_INTELPCI_CHIP_E7221_G)*/
-+#define IS_I915GM(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82915GM_IG)
-+#define IS_I945G(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82945G_IG)
-+#define IS_I945GM(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82945GM_IG)
-+
-+#define IS_I965G(dev) ((dev)->pci_device == 0x2972 || \
-+ (dev)->pci_device == 0x2982 || \
-+ (dev)->pci_device == 0x2992 || \
-+ (dev)->pci_device == 0x29A2 || \
-+ (dev)->pci_device == 0x2A02 || \
-+ (dev)->pci_device == 0x2A12)
-+
-+#define IS_I965GM(dev) ((dev)->pci_device == 0x2A02)
-+
-+#define IS_G33(dev) ((dev)->pci_device == 0x29C2 || \
-+ (dev)->pci_device == 0x29B2 || \
-+ (dev)->pci_device == 0x29D2)
-+
-+#define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \
-+ IS_I945GM(dev) || IS_I965G(dev) || IS_POULSBO(dev))
-+
-+#define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \
-+ IS_I945GM(dev) || IS_I965GM(dev) || IS_POULSBO(dev))
-+
-+#define IS_POULSBO(dev) (((dev)->pci_device == 0x8108) || \
-+ ((dev)->pci_device == 0x8109))
-+
-+#define FPA0 0x06040
-+#define FPA1 0x06044
-+#define FPB0 0x06048
-+#define FPB1 0x0604c
-+#define FP_N_DIV_MASK 0x003f0000
-+#define FP_N_DIV_SHIFT 16
-+#define FP_M1_DIV_MASK 0x00003f00
-+#define FP_M1_DIV_SHIFT 8
-+#define FP_M2_DIV_MASK 0x0000003f
-+#define FP_M2_DIV_SHIFT 0
-+
-+#define DSPABASE 0x70184
-+#define DSPBBASE 0x71184
-+#define DSPAKEYVAL 0x70194
-+#define DSPAKEYMASK 0x70198
-+
-+#define VSYNCSHIFT_A 0x60028
-+#define VSYNCSHIFT_B 0x61028
-+#define DPLL_B_MD 0x06020
-+
-Index: linux-2.6.27/drivers/gpu/drm/psb/intel_crt.c
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.27/drivers/gpu/drm/psb/intel_crt.c 2009-02-05 13:29:33.000000000 +0000
-@@ -0,0 +1,242 @@
-+/*
-+ * Copyright © 2006-2007 Intel Corporation
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a
-+ * copy of this software and associated documentation files (the "Software"),
-+ * to deal in the Software without restriction, including without limitation
-+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
-+ * and/or sell copies of the Software, and to permit persons to whom the
-+ * Software is furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice (including the next
-+ * paragraph) shall be included in all copies or substantial portions of the
-+ * Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
-+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-+ * DEALINGS IN THE SOFTWARE.
-+ *
-+ * Authors:
-+ * Eric Anholt <eric@anholt.net>
-+ */
-+
-+#include <linux/i2c.h>
-+
-+static void intel_crt_dpms(struct drm_output *output, int mode)
-+{
-+ struct drm_device *dev = output->dev;
-+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
-+ u32 temp;
-+
-+ temp = I915_READ(ADPA);
-+ temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
-+ temp &= ~ADPA_DAC_ENABLE;
-+
-+ switch(mode) {
-+ case DPMSModeOn:
-+ temp |= ADPA_DAC_ENABLE;
-+ break;
-+ case DPMSModeStandby:
-+ temp |= ADPA_DAC_ENABLE | ADPA_HSYNC_CNTL_DISABLE;
-+ break;
-+ case DPMSModeSuspend:
-+ temp |= ADPA_DAC_ENABLE | ADPA_VSYNC_CNTL_DISABLE;
-+ break;
-+ case DPMSModeOff:
-+ temp |= ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE;
-+ break;
-+ }
-+
-+ I915_WRITE(ADPA, temp);
-+}
-+
-+static void intel_crt_save(struct drm_output *output)
-+{
-+
-+}
-+
-+static void intel_crt_restore(struct drm_output *output)
-+{
-+
-+}
-+
-+static int intel_crt_mode_valid(struct drm_output *output,
-+ struct drm_display_mode *mode)
-+{
-+ if (mode->flags & V_DBLSCAN)
-+ return MODE_NO_DBLESCAN;
-+
-+ if (mode->clock > 400000 || mode->clock < 25000)
-+ return MODE_CLOCK_RANGE;
-+
-+ return MODE_OK;
-+}
-+
-+static bool intel_crt_mode_fixup(struct drm_output *output,
-+ struct drm_display_mode *mode,
-+ struct drm_display_mode *adjusted_mode)
-+{
-+ return true;
-+}
-+
-+static void intel_crt_mode_set(struct drm_output *output,
-+ struct drm_display_mode *mode,
-+ struct drm_display_mode *adjusted_mode)
-+{
-+ struct drm_device *dev = output->dev;
-+ struct drm_crtc *crtc = output->crtc;
-+ struct intel_crtc *intel_crtc = crtc->driver_private;
-+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
-+ int dpll_md_reg;
-+ u32 adpa, dpll_md;
-+
-+ if (intel_crtc->pipe == 0)
-+ dpll_md_reg = DPLL_A_MD;
-+ else
-+ dpll_md_reg = DPLL_B_MD;
-+
-+ /*
-+ * Disable separate mode multiplier used when cloning SDVO to CRT
-+ * XXX this needs to be adjusted when we really are cloning
-+ */
-+ if (IS_I965G(dev)) {
-+ dpll_md = I915_READ(dpll_md_reg);
-+ I915_WRITE(dpll_md_reg,
-+ dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK);
-+ }
-+
-+ adpa = 0;
-+ if (adjusted_mode->flags & V_PHSYNC)
-+ adpa |= ADPA_HSYNC_ACTIVE_HIGH;
-+ if (adjusted_mode->flags & V_PVSYNC)
-+ adpa |= ADPA_VSYNC_ACTIVE_HIGH;
-+
-+ if (intel_crtc->pipe == 0)
-+ adpa |= ADPA_PIPE_A_SELECT;
-+ else
-+ adpa |= ADPA_PIPE_B_SELECT;
-+
-+ I915_WRITE(ADPA, adpa);
-+}
-+
-+/**
-+ * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect CRT presence.
-+ *
-+ * Only for I945G/GM.
-+ *
-+ * \return TRUE if CRT is connected.
-+ * \return FALSE if CRT is disconnected.
-+ */
-+static bool intel_crt_detect_hotplug(struct drm_output *output)
-+{
-+ struct drm_device *dev = output->dev;
-+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
-+ u32 temp;
-+ unsigned long timeout = jiffies + msecs_to_jiffies(1000);
-+
-+ temp = I915_READ(PORT_HOTPLUG_EN);
-+
-+ I915_WRITE(PORT_HOTPLUG_EN,
-+ temp | CRT_HOTPLUG_FORCE_DETECT | (1 << 5));
-+
-+ do {
-+ if (!(I915_READ(PORT_HOTPLUG_EN) & CRT_HOTPLUG_FORCE_DETECT))
-+ break;
-+ msleep(1);
-+ } while (time_after(timeout, jiffies));
-+
-+ if ((I915_READ(PORT_HOTPLUG_STAT) & CRT_HOTPLUG_MONITOR_MASK) ==
-+ CRT_HOTPLUG_MONITOR_COLOR)
-+ return true;
-+
-+ return false;
-+}
-+
-+static bool intel_crt_detect_ddc(struct drm_output *output)
-+{
-+ struct intel_output *intel_output = output->driver_private;
-+
-+ /* CRT should always be at 0, but check anyway */
-+ if (intel_output->type != INTEL_OUTPUT_ANALOG)
-+ return false;
-+
-+ return intel_ddc_probe(output);
-+}
-+
-+static enum drm_output_status intel_crt_detect(struct drm_output *output)
-+{
-+ struct drm_device *dev = output->dev;
-+
-+ if (IS_I945G(dev) || IS_I945GM(dev) || IS_I965G(dev)) {
-+ if (intel_crt_detect_hotplug(output))
-+ return output_status_connected;
-+ else
-+ return output_status_disconnected;
-+ }
-+
-+ if (intel_crt_detect_ddc(output))
-+ return output_status_connected;
-+
-+ /* TODO use load detect */
-+ return output_status_unknown;
-+}
-+
-+static void intel_crt_destroy(struct drm_output *output)
-+{
-+ struct intel_output *intel_output = output->driver_private;
-+
-+ intel_i2c_destroy(intel_output->ddc_bus);
-+ kfree(output->driver_private);
-+}
-+
-+static int intel_crt_get_modes(struct drm_output *output)
-+{
-+ return intel_ddc_get_modes(output);
-+}
-+
-+/*
-+ * Routines for controlling stuff on the analog port
-+ */
-+static const struct drm_output_funcs intel_crt_output_funcs = {
-+ .dpms = intel_crt_dpms,
-+ .save = intel_crt_save,
-+ .restore = intel_crt_restore,
-+ .mode_valid = intel_crt_mode_valid,
-+ .mode_fixup = intel_crt_mode_fixup,
-+ .prepare = intel_output_prepare,
-+ .mode_set = intel_crt_mode_set,
-+ .commit = intel_output_commit,
-+ .detect = intel_crt_detect,
-+ .get_modes = intel_crt_get_modes,
-+ .cleanup = intel_crt_destroy,
-+};
-+
-+void intel_crt_init(struct drm_device *dev)
-+{
-+ struct drm_output *output;
-+ struct intel_output *intel_output;
-+
-+ output = drm_output_create(dev, &intel_crt_output_funcs, "VGA");
-+
-+ intel_output = kmalloc(sizeof(struct intel_output), GFP_KERNEL);
-+ if (!intel_output) {
-+ drm_output_destroy(output);
-+ return;
-+ }
-+ /* Set up the DDC bus. */
-+ intel_output->ddc_bus = intel_i2c_create(dev, GPIOA, "CRTDDC_A");
-+ if (!intel_output->ddc_bus) {
-+ dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
-+ "failed.\n");
-+ return;
-+ }
-+
-+ intel_output->type = INTEL_OUTPUT_ANALOG;
-+ output->driver_private = intel_output;
-+ output->interlace_allowed = 0;
-+ output->doublescan_allowed = 0;
-+}
-Index: linux-2.6.27/drivers/gpu/drm/psb/intel_display.c
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.27/drivers/gpu/drm/psb/intel_display.c 2009-02-05 13:29:33.000000000 +0000
-@@ -0,0 +1,1472 @@
-+/*
-+ * Copyright © 2006-2007 Intel Corporation
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a
-+ * copy of this software and associated documentation files (the "Software"),
-+ * to deal in the Software without restriction, including without limitation
-+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
-+ * and/or sell copies of the Software, and to permit persons to whom the
-+ * Software is furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice (including the next
-+ * paragraph) shall be included in all copies or substantial portions of the
-+ * Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
-+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-+ * DEALINGS IN THE SOFTWARE.
-+ *
-+ * Authors:
-+ * Eric Anholt <eric@anholt.net>
-+ */
-+
-+#include <linux/i2c.h>
-+
-+bool intel_pipe_has_type (struct drm_crtc *crtc, int type);
-+
-+typedef struct {
-+ /* given values */
-+ int n;
-+ int m1, m2;
-+ int p1, p2;
-+ /* derived values */
-+ int dot;
-+ int vco;
-+ int m;
-+ int p;
-+} intel_clock_t;
-+
-+typedef struct {
-+ int min, max;
-+} intel_range_t;
-+
-+typedef struct {
-+ int dot_limit;
-+ int p2_slow, p2_fast;
-+} intel_p2_t;
-+
-+#define INTEL_P2_NUM 2
-+
-+typedef struct {
-+ intel_range_t dot, vco, n, m, m1, m2, p, p1;
-+ intel_p2_t p2;
-+} intel_limit_t;
-+
-+#define I8XX_DOT_MIN 25000
-+#define I8XX_DOT_MAX 350000
-+#define I8XX_VCO_MIN 930000
-+#define I8XX_VCO_MAX 1400000
-+#define I8XX_N_MIN 3
-+#define I8XX_N_MAX 16
-+#define I8XX_M_MIN 96
-+#define I8XX_M_MAX 140
-+#define I8XX_M1_MIN 18
-+#define I8XX_M1_MAX 26
-+#define I8XX_M2_MIN 6
-+#define I8XX_M2_MAX 16
-+#define I8XX_P_MIN 4
-+#define I8XX_P_MAX 128
-+#define I8XX_P1_MIN 2
-+#define I8XX_P1_MAX 33
-+#define I8XX_P1_LVDS_MIN 1
-+#define I8XX_P1_LVDS_MAX 6
-+#define I8XX_P2_SLOW 4
-+#define I8XX_P2_FAST 2
-+#define I8XX_P2_LVDS_SLOW 14
-+#define I8XX_P2_LVDS_FAST 14 /* No fast option */
-+#define I8XX_P2_SLOW_LIMIT 165000
-+
-+#define I9XX_DOT_MIN 20000
-+#define I9XX_DOT_MAX 400000
-+#define I9XX_VCO_MIN 1400000
-+#define I9XX_VCO_MAX 2800000
-+#define I9XX_N_MIN 3
-+#define I9XX_N_MAX 8
-+#define I9XX_M_MIN 70
-+#define I9XX_M_MAX 120
-+#define I9XX_M1_MIN 10
-+#define I9XX_M1_MAX 20
-+#define I9XX_M2_MIN 5
-+#define I9XX_M2_MAX 9
-+#define I9XX_P_SDVO_DAC_MIN 5
-+#define I9XX_P_SDVO_DAC_MAX 80
-+#define I9XX_P_LVDS_MIN 7
-+#define I9XX_P_LVDS_MAX 98
-+#define I9XX_P1_MIN 1
-+#define I9XX_P1_MAX 8
-+#define I9XX_P2_SDVO_DAC_SLOW 10
-+#define I9XX_P2_SDVO_DAC_FAST 5
-+#define I9XX_P2_SDVO_DAC_SLOW_LIMIT 200000
-+#define I9XX_P2_LVDS_SLOW 14
-+#define I9XX_P2_LVDS_FAST 7
-+#define I9XX_P2_LVDS_SLOW_LIMIT 112000
-+
-+#define INTEL_LIMIT_I8XX_DVO_DAC 0
-+#define INTEL_LIMIT_I8XX_LVDS 1
-+#define INTEL_LIMIT_I9XX_SDVO_DAC 2
-+#define INTEL_LIMIT_I9XX_LVDS 3
-+
-+static const intel_limit_t intel_limits[] = {
-+ { /* INTEL_LIMIT_I8XX_DVO_DAC */
-+ .dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX },
-+ .vco = { .min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX },
-+ .n = { .min = I8XX_N_MIN, .max = I8XX_N_MAX },
-+ .m = { .min = I8XX_M_MIN, .max = I8XX_M_MAX },
-+ .m1 = { .min = I8XX_M1_MIN, .max = I8XX_M1_MAX },
-+ .m2 = { .min = I8XX_M2_MIN, .max = I8XX_M2_MAX },
-+ .p = { .min = I8XX_P_MIN, .max = I8XX_P_MAX },
-+ .p1 = { .min = I8XX_P1_MIN, .max = I8XX_P1_MAX },
-+ .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT,
-+ .p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST },
-+ },
-+ { /* INTEL_LIMIT_I8XX_LVDS */
-+ .dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX },
-+ .vco = { .min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX },
-+ .n = { .min = I8XX_N_MIN, .max = I8XX_N_MAX },
-+ .m = { .min = I8XX_M_MIN, .max = I8XX_M_MAX },
-+ .m1 = { .min = I8XX_M1_MIN, .max = I8XX_M1_MAX },
-+ .m2 = { .min = I8XX_M2_MIN, .max = I8XX_M2_MAX },
-+ .p = { .min = I8XX_P_MIN, .max = I8XX_P_MAX },
-+ .p1 = { .min = I8XX_P1_LVDS_MIN, .max = I8XX_P1_LVDS_MAX },
-+ .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT,
-+ .p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST },
-+ },
-+ { /* INTEL_LIMIT_I9XX_SDVO_DAC */
-+ .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX },
-+ .vco = { .min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX },
-+ .n = { .min = I9XX_N_MIN, .max = I9XX_N_MAX },
-+ .m = { .min = I9XX_M_MIN, .max = I9XX_M_MAX },
-+ .m1 = { .min = I9XX_M1_MIN, .max = I9XX_M1_MAX },
-+ .m2 = { .min = I9XX_M2_MIN, .max = I9XX_M2_MAX },
-+ .p = { .min = I9XX_P_SDVO_DAC_MIN, .max = I9XX_P_SDVO_DAC_MAX },
-+ .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX },
-+ .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
-+ .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST },
-+ },
-+ { /* INTEL_LIMIT_I9XX_LVDS */
-+ .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX },
-+ .vco = { .min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX },
-+ .n = { .min = I9XX_N_MIN, .max = I9XX_N_MAX },
-+ .m = { .min = I9XX_M_MIN, .max = I9XX_M_MAX },
-+ .m1 = { .min = I9XX_M1_MIN, .max = I9XX_M1_MAX },
-+ .m2 = { .min = I9XX_M2_MIN, .max = I9XX_M2_MAX },
-+ .p = { .min = I9XX_P_LVDS_MIN, .max = I9XX_P_LVDS_MAX },
-+ .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX },
-+ /* The single-channel range is 25-112Mhz, and dual-channel
-+ * is 80-224Mhz. Prefer single channel as much as possible.
-+ */
-+ .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
-+ .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST },
-+ },
-+};
-+
-+static const intel_limit_t *intel_limit(struct drm_crtc *crtc)
-+{
-+ struct drm_device *dev = crtc->dev;
-+ const intel_limit_t *limit;
-+
-+ if (IS_I9XX(dev)) {
-+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
-+ limit = &intel_limits[INTEL_LIMIT_I9XX_LVDS];
-+ else
-+ limit = &intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC];
-+ } else {
-+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
-+ limit = &intel_limits[INTEL_LIMIT_I8XX_LVDS];
-+ else
-+ limit = &intel_limits[INTEL_LIMIT_I8XX_DVO_DAC];
-+ }
-+ return limit;
-+}
-+
-+/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */
-+
-+static void i8xx_clock(int refclk, intel_clock_t *clock)
-+{
-+ clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
-+ clock->p = clock->p1 * clock->p2;
-+ clock->vco = refclk * clock->m / (clock->n + 2);
-+ clock->dot = clock->vco / clock->p;
-+}
-+
-+/** Derive the pixel clock for the given refclk and divisors for 9xx chips. */
-+
-+static void i9xx_clock(int refclk, intel_clock_t *clock)
-+{
-+ clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
-+ clock->p = clock->p1 * clock->p2;
-+ clock->vco = refclk * clock->m / (clock->n + 2);
-+ clock->dot = clock->vco / clock->p;
-+}
-+
-+static void intel_clock(struct drm_device *dev, int refclk,
-+ intel_clock_t *clock)
-+{
-+ if (IS_I9XX(dev))
-+ return i9xx_clock (refclk, clock);
-+ else
-+ return i8xx_clock (refclk, clock);
-+}
-+
-+/**
-+ * Returns whether any output on the specified pipe is of the specified type
-+ */
-+bool intel_pipe_has_type (struct drm_crtc *crtc, int type)
-+{
-+ struct drm_device *dev = crtc->dev;
-+ struct drm_mode_config *mode_config = &dev->mode_config;
-+ struct drm_output *l_entry;
-+
-+ list_for_each_entry(l_entry, &mode_config->output_list, head) {
-+ if (l_entry->crtc == crtc) {
-+ struct intel_output *intel_output = l_entry->driver_private;
-+ if (intel_output->type == type)
-+ return true;
-+ }
-+ }
-+ return false;
-+}
-+
-+#define INTELPllInvalid(s) { /* ErrorF (s) */; return false; }
-+/**
-+ * Returns whether the given set of divisors are valid for a given refclk with
-+ * the given outputs.
-+ */
-+
-+static bool intel_PLL_is_valid(struct drm_crtc *crtc, intel_clock_t *clock)
-+{
-+ const intel_limit_t *limit = intel_limit (crtc);
-+
-+ if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
-+ INTELPllInvalid ("p1 out of range\n");
-+ if (clock->p < limit->p.min || limit->p.max < clock->p)
-+ INTELPllInvalid ("p out of range\n");
-+ if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
-+ INTELPllInvalid ("m2 out of range\n");
-+ if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
-+ INTELPllInvalid ("m1 out of range\n");
-+ if (clock->m1 <= clock->m2)
-+ INTELPllInvalid ("m1 <= m2\n");
-+ if (clock->m < limit->m.min || limit->m.max < clock->m)
-+ INTELPllInvalid ("m out of range\n");
-+ if (clock->n < limit->n.min || limit->n.max < clock->n)
-+ INTELPllInvalid ("n out of range\n");
-+ if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
-+ INTELPllInvalid ("vco out of range\n");
-+ /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
-+ * output, etc., rather than just a single range.
-+ */
-+ if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
-+ INTELPllInvalid ("dot out of range\n");
-+
-+ return true;
-+}
-+
-+/**
-+ * Returns a set of divisors for the desired target clock with the given
-+ * refclk, or FALSE. The returned values represent the clock equation:
-+ * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
-+ */
-+static bool intel_find_best_PLL(struct drm_crtc *crtc, int target,
-+ int refclk, intel_clock_t *best_clock)
-+{
-+ struct drm_device *dev = crtc->dev;
-+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
-+ intel_clock_t clock;
-+ const intel_limit_t *limit = intel_limit(crtc);
-+ int err = target;
-+
-+ if (IS_I9XX(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
-+ (I915_READ(LVDS) & LVDS_PORT_EN) != 0) {
-+ /*
-+ * For LVDS, if the panel is on, just rely on its current
-+ * settings for dual-channel. We haven't figured out how to
-+ * reliably set up different single/dual channel state, if we
-+ * even can.
-+ */
-+ if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
-+ LVDS_CLKB_POWER_UP)
-+ clock.p2 = limit->p2.p2_fast;
-+ else
-+ clock.p2 = limit->p2.p2_slow;
-+ } else {
-+ if (target < limit->p2.dot_limit)
-+ clock.p2 = limit->p2.p2_slow;
-+ else
-+ clock.p2 = limit->p2.p2_fast;
-+ }
-+
-+ memset (best_clock, 0, sizeof (*best_clock));
-+
-+ for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
-+ for (clock.m2 = limit->m2.min; clock.m2 < clock.m1 &&
-+ clock.m2 <= limit->m2.max; clock.m2++) {
-+ for (clock.n = limit->n.min; clock.n <= limit->n.max;
-+ clock.n++) {
-+ for (clock.p1 = limit->p1.min;
-+ clock.p1 <= limit->p1.max; clock.p1++) {
-+ int this_err;
-+
-+ intel_clock(dev, refclk, &clock);
-+
-+ if (!intel_PLL_is_valid(crtc, &clock))
-+ continue;
-+
-+ this_err = abs(clock.dot - target);
-+ if (this_err < err) {
-+ *best_clock = clock;
-+ err = this_err;
-+ }
-+ }
-+ }
-+ }
-+ }
-+
-+ return (err != target);
-+}
-+
-+#if 0
-+void
-+intel_set_vblank(struct drm_device *dev)
-+{
-+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
-+ struct drm_crtc *crtc;
-+ struct intel_crtc *intel_crtc;
-+ int vbl_pipe = 0;
-+
-+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-+ intel_crtc = crtc->driver_private;
-+
-+ if (crtc->enabled)
-+ vbl_pipe |= (1<<intel_crtc->pipe);
-+ }
-+
-+ dev_priv->vblank_pipe = vbl_pipe;
-+ i915_enable_interrupt(dev);
-+}
-+#endif
-+
-+void
-+intel_wait_for_vblank(struct drm_device *dev)
-+{
-+ /* Wait for 20ms, i.e. one cycle at 50hz. */
-+ udelay(20000);
-+}
-+
-+void
-+intel_pipe_set_base(struct drm_crtc *crtc, int x, int y)
-+{
-+ struct drm_device *dev = crtc->dev;
-+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
-+ struct intel_crtc *intel_crtc = crtc->driver_private;
-+ int pipe = intel_crtc->pipe;
-+ unsigned long Start, Offset;
-+ int dspbase = (pipe == 0 ? DSPABASE : DSPBBASE);
-+ int dspsurf = (pipe == 0 ? DSPASURF : DSPBSURF);
-+
-+ Start = crtc->fb->offset;
-+ Offset = y * crtc->fb->pitch + x;
-+
-+ DRM_DEBUG("Writing base %08lX %08lX %d %d\n", Start, Offset, x, y);
-+ if (IS_I965G(dev)) {
-+ I915_WRITE(dspbase, Offset);
-+ I915_READ(dspbase);
-+ I915_WRITE(dspsurf, Start);
-+ I915_READ(dspsurf);
-+ } else {
-+ I915_WRITE(dspbase, Start + Offset);
-+ I915_READ(dspbase);
-+ }
-+
-+
-+ if (!dev_priv->sarea_priv)
-+ return;
-+
-+ switch (pipe) {
-+ case 0:
-+ dev_priv->sarea_priv->planeA_x = x;
-+ dev_priv->sarea_priv->planeA_y = y;
-+ break;
-+ case 1:
-+ dev_priv->sarea_priv->planeB_x = x;
-+ dev_priv->sarea_priv->planeB_y = y;
-+ break;
-+ default:
-+ DRM_ERROR("Can't update pipe %d in SAREA\n", pipe);
-+ break;
-+ }
-+}
-+
-+/**
-+ * Sets the power management mode of the pipe and plane.
-+ *
-+ * This code should probably grow support for turning the cursor off and back
-+ * on appropriately at the same time as we're turning the pipe off/on.
-+ */
-+static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
-+{
-+ struct drm_device *dev = crtc->dev;
-+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
-+ struct intel_crtc *intel_crtc = crtc->driver_private;
-+ int pipe = intel_crtc->pipe;
-+ int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
-+ int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
-+ int dspbase_reg = (pipe == 0) ? DSPABASE : DSPBBASE;
-+ int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
-+ u32 temp, temp2;
-+ bool enabled;
-+
-+ /* XXX: When our outputs are all unaware of DPMS modes other than off
-+ * and on, we should map those modes to DPMSModeOff in the CRTC.
-+ */
-+ switch (mode) {
-+ case DPMSModeOn:
-+ case DPMSModeStandby:
-+ case DPMSModeSuspend:
-+ /* Enable the DPLL */
-+ temp = I915_READ(dpll_reg);
-+ if ((temp & DPLL_VCO_ENABLE) == 0) {
-+ I915_WRITE(dpll_reg, temp);
-+ I915_READ(dpll_reg);
-+ /* Wait for the clocks to stabilize. */
-+ udelay(150);
-+ I915_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
-+ I915_READ(dpll_reg);
-+ /* Wait for the clocks to stabilize. */
-+ udelay(150);
-+ I915_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
-+ I915_READ(dpll_reg);
-+ /* Wait for the clocks to stabilize. */
-+ udelay(150);
-+ }
-+
-+ /* Enable the pipe */
-+ temp = I915_READ(pipeconf_reg);
-+ if ((temp & PIPEACONF_ENABLE) == 0)
-+ I915_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
-+
-+ /* Enable the plane */
-+ temp = I915_READ(dspcntr_reg);
-+ if (mode != DPMSModeOn)
-+ temp2 = temp & ~DISPLAY_PLANE_ENABLE;
-+ else
-+ temp2 = temp | DISPLAY_PLANE_ENABLE;
-+
-+ if (temp != temp2) {
-+ I915_WRITE(dspcntr_reg, temp2);
-+ /* Flush the plane changes */
-+ I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
-+ }
-+
-+ intel_crtc_load_lut(crtc);
-+
-+ /* Give the overlay scaler a chance to enable if it's on this pipe */
-+ //intel_crtc_dpms_video(crtc, TRUE); TODO
-+ break;
-+ case DPMSModeOff:
-+ /* Give the overlay scaler a chance to disable if it's on this pipe */
-+ //intel_crtc_dpms_video(crtc, FALSE); TODO
-+
-+ /* Disable display plane */
-+ temp = I915_READ(dspcntr_reg);
-+ if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
-+ I915_WRITE(dspcntr_reg, temp & ~DISPLAY_PLANE_ENABLE);
-+ /* Flush the plane changes */
-+ I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
-+ I915_READ(dspbase_reg);
-+ }
-+
-+ if (!IS_I9XX(dev)) {
-+ /* Wait for vblank for the disable to take effect */
-+ intel_wait_for_vblank(dev);
-+ }
-+
-+ /* Next, disable display pipes */
-+ temp = I915_READ(pipeconf_reg);
-+ if ((temp & PIPEACONF_ENABLE) != 0) {
-+ I915_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
-+ I915_READ(pipeconf_reg);
-+ }
-+
-+ /* Wait for vblank for the disable to take effect. */
-+ intel_wait_for_vblank(dev);
-+
-+ temp = I915_READ(dpll_reg);
-+ if ((temp & DPLL_VCO_ENABLE) != 0) {
-+ I915_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE);
-+ I915_READ(dpll_reg);
-+ }
-+
-+ /* Wait for the clocks to turn off. */
-+ udelay(150);
-+ break;
-+ }
-+
-+
-+ if (!dev_priv->sarea_priv)
-+ return;
-+
-+ enabled = crtc->enabled && mode != DPMSModeOff;
-+
-+ switch (pipe) {
-+ case 0:
-+ dev_priv->sarea_priv->planeA_w = enabled ? crtc->mode.hdisplay : 0;
-+ dev_priv->sarea_priv->planeA_h = enabled ? crtc->mode.vdisplay : 0;
-+ break;
-+ case 1:
-+ dev_priv->sarea_priv->planeB_w = enabled ? crtc->mode.hdisplay : 0;
-+ dev_priv->sarea_priv->planeB_h = enabled ? crtc->mode.vdisplay : 0;
-+ break;
-+ default:
-+ DRM_ERROR("Can't update pipe %d in SAREA\n", pipe);
-+ break;
-+ }
-+}
-+
-+static bool intel_crtc_lock(struct drm_crtc *crtc)
-+{
-+ /* Sync the engine before mode switch */
-+// i830WaitSync(crtc->scrn);
-+
-+#if 0 // TODO def XF86DRI
-+ return I830DRILock(crtc->scrn);
-+#else
-+ return FALSE;
-+#endif
-+}
-+
-+static void intel_crtc_unlock (struct drm_crtc *crtc)
-+{
-+#if 0 // TODO def XF86DRI
-+ I830DRIUnlock (crtc->scrn);
-+#endif
-+}
-+
-+static void intel_crtc_prepare (struct drm_crtc *crtc)
-+{
-+ crtc->funcs->dpms(crtc, DPMSModeOff);
-+}
-+
-+static void intel_crtc_commit (struct drm_crtc *crtc)
-+{
-+ crtc->funcs->dpms(crtc, DPMSModeOn);
-+}
-+
-+void intel_output_prepare (struct drm_output *output)
-+{
-+ /* lvds has its own version of prepare see intel_lvds_prepare */
-+ output->funcs->dpms(output, DPMSModeOff);
-+}
-+
-+void intel_output_commit (struct drm_output *output)
-+{
-+ /* lvds has its own version of commit see intel_lvds_commit */
-+ output->funcs->dpms(output, DPMSModeOn);
-+}
-+
-+static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
-+ struct drm_display_mode *mode,
-+ struct drm_display_mode *adjusted_mode)
-+{
-+ return true;
-+}
-+
-+
-+/** Returns the core display clock speed for i830 - i945 */
-+int intel_get_core_clock_speed(struct drm_device *dev)
-+{
-+
-+ /* Core clock values taken from the published datasheets.
-+ * The 830 may go up to 166 Mhz, which we should check.
-+ */
-+ if (IS_I945G(dev))
-+ return 400000;
-+ else if (IS_I915G(dev))
-+ return 333000;
-+ else if (IS_I945GM(dev) || IS_POULSBO(dev) || IS_845G(dev))
-+ return 200000;
-+ else if (IS_I915GM(dev)) {
-+ u16 gcfgc = 0;
-+
-+ pci_read_config_word(dev->pdev, I915_GCFGC, &gcfgc);
-+
-+ if (gcfgc & I915_LOW_FREQUENCY_ENABLE)
-+ return 133000;
-+ else {
-+ switch (gcfgc & I915_DISPLAY_CLOCK_MASK) {
-+ case I915_DISPLAY_CLOCK_333_MHZ:
-+ return 333000;
-+ default:
-+ case I915_DISPLAY_CLOCK_190_200_MHZ:
-+ return 190000;
-+ }
-+ }
-+ } else if (IS_I865G(dev))
-+ return 266000;
-+ else if (IS_I855(dev)) {
-+#if 0
-+ PCITAG bridge = pciTag(0, 0, 0); /* This is always the host bridge */
-+ u16 hpllcc = pciReadWord(bridge, I855_HPLLCC);
-+
-+#endif
-+ u16 hpllcc = 0;
-+ /* Assume that the hardware is in the high speed state. This
-+ * should be the default.
-+ */
-+ switch (hpllcc & I855_CLOCK_CONTROL_MASK) {
-+ case I855_CLOCK_133_200:
-+ case I855_CLOCK_100_200:
-+ return 200000;
-+ case I855_CLOCK_166_250:
-+ return 250000;
-+ case I855_CLOCK_100_133:
-+ return 133000;
-+ }
-+ } else /* 852, 830 */
-+ return 133000;
-+
-+ return 0; /* Silence gcc warning */
-+}
-+
-+
-+/**
-+ * Return the pipe currently connected to the panel fitter,
-+ * or -1 if the panel fitter is not present or not in use
-+ */
-+int intel_panel_fitter_pipe (struct drm_device *dev)
-+{
-+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
-+ u32 pfit_control;
-+
-+ /* i830 doesn't have a panel fitter */
-+ if (IS_I830(dev))
-+ return -1;
-+
-+ pfit_control = I915_READ(PFIT_CONTROL);
-+
-+ /* See if the panel fitter is in use */
-+ if ((pfit_control & PFIT_ENABLE) == 0)
-+ return -1;
-+
-+ /* 965 can place panel fitter on either pipe */
-+ if (IS_I965G(dev))
-+ return (pfit_control >> 29) & 0x3;
-+
-+ /* older chips can only use pipe 1 */
-+ return 1;
-+}
-+
-+#define WA_NO_FB_GARBAGE_DISPLAY
-+#ifdef WA_NO_FB_GARBAGE_DISPLAY
-+static u32 fp_reg_value[2];
-+static u32 dpll_reg_value[2];
-+static u32 dpll_md_reg_value[2];
-+static u32 dspcntr_reg_value[2];
-+static u32 pipeconf_reg_value[2];
-+static u32 htot_reg_value[2];
-+static u32 hblank_reg_value[2];
-+static u32 hsync_reg_value[2];
-+static u32 vtot_reg_value[2];
-+static u32 vblank_reg_value[2];
-+static u32 vsync_reg_value[2];
-+static u32 dspsize_reg_value[2];
-+static u32 dspstride_reg_value[2];
-+static u32 dsppos_reg_value[2];
-+static u32 pipesrc_reg_value[2];
-+
-+static u32 dspbase_value[2];
-+
-+static u32 lvds_reg_value[2];
-+static u32 vgacntrl_reg_value[2];
-+static u32 pfit_control_reg_value[2];
-+
-+void intel_crtc_mode_restore(struct drm_crtc *crtc)
-+{
-+ struct drm_device *dev = crtc->dev;
-+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
-+ struct intel_crtc *intel_crtc = crtc->driver_private;
-+ int pipe = intel_crtc->pipe;
-+ int fp_reg = (pipe == 0) ? FPA0 : FPB0;
-+ int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
-+ int dpll_md_reg = (intel_crtc->pipe == 0) ? DPLL_A_MD : DPLL_B_MD;
-+ int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
-+ int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
-+ int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
-+ int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
-+ int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
-+ int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
-+ int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
-+ int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
-+ int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE;
-+ int dspstride_reg = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE;
-+ int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS;
-+ int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
-+ int dspbase = (pipe == 0 ? DSPABASE : DSPBBASE);
-+
-+ bool ok, is_sdvo = false, is_dvo = false;
-+ bool is_crt = false, is_lvds = false, is_tv = false;
-+ struct drm_mode_config *mode_config = &dev->mode_config;
-+ struct drm_output *output;
-+
-+ list_for_each_entry(output, &mode_config->output_list, head) {
-+ struct intel_output *intel_output = output->driver_private;
-+
-+ if (output->crtc != crtc)
-+ continue;
-+
-+ switch (intel_output->type) {
-+ case INTEL_OUTPUT_LVDS:
-+ is_lvds = TRUE;
-+ break;
-+ case INTEL_OUTPUT_SDVO:
-+ is_sdvo = TRUE;
-+ break;
-+ case INTEL_OUTPUT_DVO:
-+ is_dvo = TRUE;
-+ break;
-+ case INTEL_OUTPUT_TVOUT:
-+ is_tv = TRUE;
-+ break;
-+ case INTEL_OUTPUT_ANALOG:
-+ is_crt = TRUE;
-+ break;
-+ }
-+ if(is_lvds && ((lvds_reg_value[pipe] & LVDS_PORT_EN) == 0))
-+ {
-+ printk("%s: is_lvds but not the boot display, so return\n",
-+ __FUNCTION__);
-+ return;
-+ }
-+ output->funcs->prepare(output);
-+ }
-+
-+ intel_crtc_prepare(crtc);
-+ /* Disable the panel fitter if it was on our pipe */
-+ if (intel_panel_fitter_pipe(dev) == pipe)
-+ I915_WRITE(PFIT_CONTROL, 0);
-+
-+ if (dpll_reg_value[pipe] & DPLL_VCO_ENABLE) {
-+ I915_WRITE(fp_reg, fp_reg_value[pipe]);
-+ I915_WRITE(dpll_reg, dpll_reg_value[pipe]& ~DPLL_VCO_ENABLE);
-+ I915_READ(dpll_reg);
-+ udelay(150);
-+ }
-+
-+ /*
-+ if(is_lvds)
-+ I915_WRITE(LVDS, lvds_reg_value[pipe]);
-+ */
-+ if (is_lvds) {
-+ I915_WRITE(LVDS, lvds_reg_value[pipe]);
-+ I915_READ(LVDS);
-+ }
-+
-+ I915_WRITE(fp_reg, fp_reg_value[pipe]);
-+ I915_WRITE(dpll_reg, dpll_reg_value[pipe]);
-+ I915_READ(dpll_reg);
-+ udelay(150);
-+ //I915_WRITE(dpll_md_reg, dpll_md_reg_value[pipe]);
-+ I915_WRITE(dpll_reg, dpll_reg_value[pipe]);
-+ I915_READ(dpll_reg);
-+ udelay(150);
-+ I915_WRITE(htot_reg, htot_reg_value[pipe]);
-+ I915_WRITE(hblank_reg, hblank_reg_value[pipe]);
-+ I915_WRITE(hsync_reg, hsync_reg_value[pipe]);
-+ I915_WRITE(vtot_reg, vtot_reg_value[pipe]);
-+ I915_WRITE(vblank_reg, vblank_reg_value[pipe]);
-+ I915_WRITE(vsync_reg, vsync_reg_value[pipe]);
-+ I915_WRITE(dspstride_reg, dspstride_reg_value[pipe]);
-+ I915_WRITE(dspsize_reg, dspsize_reg_value[pipe]);
-+ I915_WRITE(dsppos_reg, dsppos_reg_value[pipe]);
-+ I915_WRITE(pipesrc_reg, pipesrc_reg_value[pipe]);
-+ I915_WRITE(pipeconf_reg, pipeconf_reg_value[pipe]);
-+ I915_READ(pipeconf_reg);
-+ intel_wait_for_vblank(dev);
-+ I915_WRITE(dspcntr_reg, dspcntr_reg_value[pipe]);
-+ I915_WRITE(dspbase, dspbase_value[pipe]);
-+ I915_READ(dspbase);
-+ I915_WRITE(VGACNTRL, vgacntrl_reg_value[pipe]);
-+ intel_wait_for_vblank(dev);
-+ I915_WRITE(PFIT_CONTROL, pfit_control_reg_value[pipe]);
-+
-+ intel_crtc_commit(crtc);
-+ list_for_each_entry(output, &mode_config->output_list, head) {
-+ if (output->crtc != crtc)
-+ continue;
-+
-+ output->funcs->commit(output);
-+ //output->funcs->dpms(output, DPMSModeOff);
-+ //printk("turn off the display first\n");
-+ }
-+ return;
-+}
-+
-+void intel_crtc_mode_save(struct drm_crtc *crtc)
-+{
-+ struct drm_device *dev = crtc->dev;
-+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
-+ struct intel_crtc *intel_crtc = crtc->driver_private;
-+ int pipe = intel_crtc->pipe;
-+ int fp_reg = (pipe == 0) ? FPA0 : FPB0;
-+ int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
-+ int dpll_md_reg = (intel_crtc->pipe == 0) ? DPLL_A_MD : DPLL_B_MD;
-+ int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
-+ int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
-+ int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
-+ int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
-+ int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
-+ int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
-+ int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
-+ int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
-+ int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE;
-+ int dspstride_reg = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE;
-+ int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS;
-+ int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
-+ int dspbase = (pipe == 0 ? DSPABASE : DSPBBASE);
-+ bool ok, is_sdvo = false, is_dvo = false;
-+ bool is_crt = false, is_lvds = false, is_tv = false;
-+ struct drm_mode_config *mode_config = &dev->mode_config;
-+ struct drm_output *output;
-+
-+ list_for_each_entry(output, &mode_config->output_list, head) {
-+ struct intel_output *intel_output = output->driver_private;
-+
-+ if (output->crtc != crtc)
-+ continue;
-+
-+ switch (intel_output->type) {
-+ case INTEL_OUTPUT_LVDS:
-+ is_lvds = TRUE;
-+ break;
-+ case INTEL_OUTPUT_SDVO:
-+ is_sdvo = TRUE;
-+ break;
-+ case INTEL_OUTPUT_DVO:
-+ is_dvo = TRUE;
-+ break;
-+ case INTEL_OUTPUT_TVOUT:
-+ is_tv = TRUE;
-+ break;
-+ case INTEL_OUTPUT_ANALOG:
-+ is_crt = TRUE;
-+ break;
-+ }
-+ }
-+
-+ fp_reg_value[pipe] = I915_READ(fp_reg);
-+ dpll_reg_value[pipe] = I915_READ(dpll_reg);
-+ dpll_md_reg_value[pipe] = I915_READ(dpll_md_reg);
-+ dspcntr_reg_value[pipe] = I915_READ(dspcntr_reg);
-+ pipeconf_reg_value[pipe] = I915_READ(pipeconf_reg);
-+ htot_reg_value[pipe] = I915_READ(htot_reg);
-+ hblank_reg_value[pipe] = I915_READ(hblank_reg);
-+ hsync_reg_value[pipe] = I915_READ(hsync_reg);
-+ vtot_reg_value[pipe] = I915_READ(vtot_reg);
-+ vblank_reg_value[pipe] = I915_READ(vblank_reg);
-+ vsync_reg_value[pipe] = I915_READ(vsync_reg);
-+ dspsize_reg_value[pipe] = I915_READ(dspsize_reg);
-+ dspstride_reg_value[pipe] = I915_READ(dspstride_reg);
-+ dsppos_reg_value[pipe] = I915_READ(dsppos_reg);
-+ pipesrc_reg_value[pipe] = I915_READ(pipesrc_reg);
-+ dspbase_value[pipe] = I915_READ(dspbase);
-+ if(is_lvds)
-+ lvds_reg_value[pipe] = I915_READ(LVDS);
-+ vgacntrl_reg_value[pipe] = I915_READ(VGACNTRL);
-+ pfit_control_reg_value[pipe] = I915_READ(PFIT_CONTROL);
-+}
-+#endif
-+
-+static void intel_crtc_mode_set(struct drm_crtc *crtc,
-+ struct drm_display_mode *mode,
-+ struct drm_display_mode *adjusted_mode,
-+ int x, int y)
-+{
-+ struct drm_device *dev = crtc->dev;
-+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
-+ struct intel_crtc *intel_crtc = crtc->driver_private;
-+ int pipe = intel_crtc->pipe;
-+ int fp_reg = (pipe == 0) ? FPA0 : FPB0;
-+ int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
-+ int dpll_md_reg = (intel_crtc->pipe == 0) ? DPLL_A_MD : DPLL_B_MD;
-+ int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
-+ int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
-+ int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
-+ int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
-+ int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
-+ int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
-+ int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
-+ int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
-+ int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE;
-+ int dspstride_reg = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE;
-+ int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS;
-+ int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
-+ int refclk;
-+ intel_clock_t clock;
-+ u32 dpll = 0, fp = 0, dspcntr, pipeconf;
-+ bool ok, is_sdvo = false, is_dvo = false;
-+ bool is_crt = false, is_lvds = false, is_tv = false;
-+ struct drm_mode_config *mode_config = &dev->mode_config;
-+ struct drm_output *output;
-+
-+ if (!crtc->fb) {
-+ DRM_ERROR("Can't set mode without attached fb\n");
-+ return;
-+ }
-+
-+ list_for_each_entry(output, &mode_config->output_list, head) {
-+ struct intel_output *intel_output = output->driver_private;
-+
-+ if (output->crtc != crtc)
-+ continue;
-+
-+ switch (intel_output->type) {
-+ case INTEL_OUTPUT_LVDS:
-+ is_lvds = TRUE;
-+ break;
-+ case INTEL_OUTPUT_SDVO:
-+ is_sdvo = TRUE;
-+ break;
-+ case INTEL_OUTPUT_DVO:
-+ is_dvo = TRUE;
-+ break;
-+ case INTEL_OUTPUT_TVOUT:
-+ is_tv = TRUE;
-+ break;
-+ case INTEL_OUTPUT_ANALOG:
-+ is_crt = TRUE;
-+ break;
-+ }
-+ }
-+
-+ if (IS_I9XX(dev)) {
-+ refclk = 96000;
-+ } else {
-+ refclk = 48000;
-+ }
-+
-+ ok = intel_find_best_PLL(crtc, adjusted_mode->clock, refclk, &clock);
-+ if (!ok) {
-+ DRM_ERROR("Couldn't find PLL settings for mode!\n");
-+ return;
-+ }
-+
-+ fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
-+
-+ dpll = DPLL_VGA_MODE_DIS;
-+ if (IS_I9XX(dev)) {
-+ if (is_lvds) {
-+ dpll |= DPLLB_MODE_LVDS;
-+ if (IS_POULSBO(dev))
-+ dpll |= DPLL_DVO_HIGH_SPEED;
-+ } else
-+ dpll |= DPLLB_MODE_DAC_SERIAL;
-+ if (is_sdvo) {
-+ dpll |= DPLL_DVO_HIGH_SPEED;
-+ if (IS_I945G(dev) || IS_I945GM(dev) || IS_POULSBO(dev)) {
-+ int sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
-+ dpll |= (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
-+ }
-+ }
-+
-+ /* compute bitmask from p1 value */
-+ dpll |= (1 << (clock.p1 - 1)) << 16;
-+ switch (clock.p2) {
-+ case 5:
-+ dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
-+ break;
-+ case 7:
-+ dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
-+ break;
-+ case 10:
-+ dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
-+ break;
-+ case 14:
-+ dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
-+ break;
-+ }
-+ if (IS_I965G(dev))
-+ dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
-+ } else {
-+ if (is_lvds) {
-+ dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
-+ } else {
-+ if (clock.p1 == 2)
-+ dpll |= PLL_P1_DIVIDE_BY_TWO;
-+ else
-+ dpll |= (clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
-+ if (clock.p2 == 4)
-+ dpll |= PLL_P2_DIVIDE_BY_4;
-+ }
-+ }
-+
-+ if (is_tv) {
-+ /* XXX: just matching BIOS for now */
-+/* dpll |= PLL_REF_INPUT_TVCLKINBC; */
-+ dpll |= 3;
-+ }
-+#if 0
-+ else if (is_lvds)
-+ dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
-+#endif
-+ else
-+ dpll |= PLL_REF_INPUT_DREFCLK;
-+
-+ /* setup pipeconf */
-+ pipeconf = I915_READ(pipeconf_reg);
-+
-+ /* Set up the display plane register */
-+ dspcntr = DISPPLANE_GAMMA_ENABLE;
-+
-+ switch (crtc->fb->bits_per_pixel) {
-+ case 8:
-+ dspcntr |= DISPPLANE_8BPP;
-+ break;
-+ case 16:
-+ if (crtc->fb->depth == 15)
-+ dspcntr |= DISPPLANE_15_16BPP;
-+ else
-+ dspcntr |= DISPPLANE_16BPP;
-+ break;
-+ case 32:
-+ dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
-+ break;
-+ default:
-+ DRM_ERROR("Unknown color depth\n");
-+ return;
-+ }
-+
-+
-+ if (pipe == 0)
-+ dspcntr |= DISPPLANE_SEL_PIPE_A;
-+ else
-+ dspcntr |= DISPPLANE_SEL_PIPE_B;
-+
-+ if (pipe == 0 && !IS_I965G(dev)) {
-+ /* Enable pixel doubling when the dot clock is > 90% of the (display)
-+ * core speed.
-+ *
-+ * XXX: No double-wide on 915GM pipe B. Is that the only reason for the
-+ * pipe == 0 check?
-+ */
-+ if (mode->clock > intel_get_core_clock_speed(dev) * 9 / 10)
-+ pipeconf |= PIPEACONF_DOUBLE_WIDE;
-+ else
-+ pipeconf &= ~PIPEACONF_DOUBLE_WIDE;
-+ }
-+
-+ dspcntr |= DISPLAY_PLANE_ENABLE;
-+ pipeconf |= PIPEACONF_ENABLE;
-+ dpll |= DPLL_VCO_ENABLE;
-+
-+
-+ /* Disable the panel fitter if it was on our pipe */
-+ if (intel_panel_fitter_pipe(dev) == pipe)
-+ I915_WRITE(PFIT_CONTROL, 0);
-+
-+ DRM_DEBUG("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
-+ drm_mode_debug_printmodeline(dev, mode);
-+
-+ /*psbPrintPll("chosen", &clock);*/
-+ DRM_DEBUG("clock regs: 0x%08x, 0x%08x,dspntr is 0x%8x, pipeconf is 0x%8x\n", (int)dpll,
-+ (int)fp,(int)dspcntr,(int)pipeconf);
-+#if 0
-+ if (!xf86ModesEqual(mode, adjusted_mode)) {
-+ xf86DrvMsg(pScrn->scrnIndex, X_INFO,
-+ "Adjusted mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
-+ xf86PrintModeline(pScrn->scrnIndex, mode);
-+ }
-+ i830PrintPll("chosen", &clock);
-+#endif
-+
-+ if (dpll & DPLL_VCO_ENABLE) {
-+ I915_WRITE(fp_reg, fp);
-+ I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
-+ I915_READ(dpll_reg);
-+ udelay(150);
-+ }
-+
-+ /* The LVDS pin pair needs to be on before the DPLLs are enabled.
-+ * This is an exception to the general rule that mode_set doesn't turn
-+ * things on.
-+ */
-+ if (is_lvds) {
-+ u32 lvds = I915_READ(LVDS);
-+
-+ lvds |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP | LVDS_PIPEB_SELECT;
-+ /* Set the B0-B3 data pairs corresponding to whether we're going to
-+ * set the DPLLs for dual-channel mode or not.
-+ */
-+ if (clock.p2 == 7)
-+ lvds |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
-+ else
-+ lvds &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
-+
-+ /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
-+ * appropriately here, but we need to look more thoroughly into how
-+ * panels behave in the two modes.
-+ */
-+
-+ I915_WRITE(LVDS, lvds);
-+ I915_READ(LVDS);
-+ }
-+
-+ I915_WRITE(fp_reg, fp);
-+ I915_WRITE(dpll_reg, dpll);
-+ I915_READ(dpll_reg);
-+ /* Wait for the clocks to stabilize. */
-+ udelay(150);
-+
-+ if (IS_I965G(dev)) {
-+ int sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
-+ I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) |
-+ ((sdvo_pixel_multiply - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT));
-+ } else {
-+ /* write it again -- the BIOS does, after all */
-+ I915_WRITE(dpll_reg, dpll);
-+ }
-+ I915_READ(dpll_reg);
-+ /* Wait for the clocks to stabilize. */
-+ udelay(150);
-+
-+ I915_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
-+ ((adjusted_mode->crtc_htotal - 1) << 16));
-+ I915_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
-+ ((adjusted_mode->crtc_hblank_end - 1) << 16));
-+ I915_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
-+ ((adjusted_mode->crtc_hsync_end - 1) << 16));
-+ I915_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
-+ ((adjusted_mode->crtc_vtotal - 1) << 16));
-+ I915_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
-+ ((adjusted_mode->crtc_vblank_end - 1) << 16));
-+ I915_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
-+ ((adjusted_mode->crtc_vsync_end - 1) << 16));
-+ I915_WRITE(dspstride_reg, crtc->fb->pitch);
-+ /* pipesrc and dspsize control the size that is scaled from, which should
-+ * always be the user's requested size.
-+ */
-+ I915_WRITE(dspsize_reg, ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
-+ I915_WRITE(dsppos_reg, 0);
-+ I915_WRITE(pipesrc_reg, ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
-+ I915_WRITE(pipeconf_reg, pipeconf);
-+ I915_READ(pipeconf_reg);
-+
-+ intel_wait_for_vblank(dev);
-+
-+ I915_WRITE(dspcntr_reg, dspcntr);
-+
-+ /* Flush the plane changes */
-+ intel_pipe_set_base(crtc, x, y);
-+
-+#if 0
-+ intel_set_vblank(dev);
-+#endif
-+
-+ /* Disable the VGA plane that we never use */
-+ I915_WRITE(VGACNTRL, VGA_DISP_DISABLE);
-+
-+ intel_wait_for_vblank(dev);
-+}
-+
-+/** Loads the palette/gamma unit for the CRTC with the prepared values */
-+void intel_crtc_load_lut(struct drm_crtc *crtc)
-+{
-+ struct drm_device *dev = crtc->dev;
-+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
-+ struct intel_crtc *intel_crtc = crtc->driver_private;
-+ int palreg = (intel_crtc->pipe == 0) ? PALETTE_A : PALETTE_B;
-+ int i;
-+
-+ /* The clocks have to be on to load the palette. */
-+ if (!crtc->enabled)
-+ return;
-+
-+ for (i = 0; i < 256; i++) {
-+ I915_WRITE(palreg + 4 * i,
-+ (intel_crtc->lut_r[i] << 16) |
-+ (intel_crtc->lut_g[i] << 8) |
-+ intel_crtc->lut_b[i]);
-+ }
-+}
-+
-+/** Sets the color ramps on behalf of RandR */
-+static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
-+ u16 blue, int regno)
-+{
-+ struct intel_crtc *intel_crtc = crtc->driver_private;
-+
-+ intel_crtc->lut_r[regno] = red >> 8;
-+ intel_crtc->lut_g[regno] = green >> 8;
-+ intel_crtc->lut_b[regno] = blue >> 8;
-+}
-+
-+/* Returns the clock of the currently programmed mode of the given pipe. */
-+static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
-+{
-+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
-+ struct intel_crtc *intel_crtc = crtc->driver_private;
-+ int pipe = intel_crtc->pipe;
-+ u32 dpll = I915_READ((pipe == 0) ? DPLL_A : DPLL_B);
-+ u32 fp;
-+ intel_clock_t clock;
-+
-+ if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
-+ fp = I915_READ((pipe == 0) ? FPA0 : FPB0);
-+ else
-+ fp = I915_READ((pipe == 0) ? FPA1 : FPB1);
-+
-+ clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
-+ clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
-+ clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
-+ if (IS_I9XX(dev)) {
-+ clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
-+ DPLL_FPA01_P1_POST_DIV_SHIFT);
-+
-+ switch (dpll & DPLL_MODE_MASK) {
-+ case DPLLB_MODE_DAC_SERIAL:
-+ clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
-+ 5 : 10;
-+ break;
-+ case DPLLB_MODE_LVDS:
-+ clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
-+ 7 : 14;
-+ break;
-+ default:
-+ DRM_DEBUG("Unknown DPLL mode %08x in programmed "
-+ "mode\n", (int)(dpll & DPLL_MODE_MASK));
-+ return 0;
-+ }
-+
-+ /* XXX: Handle the 100Mhz refclk */
-+ i9xx_clock(96000, &clock);
-+ } else {
-+ bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN);
-+
-+ if (is_lvds) {
-+ clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
-+ DPLL_FPA01_P1_POST_DIV_SHIFT);
-+ clock.p2 = 14;
-+
-+ if ((dpll & PLL_REF_INPUT_MASK) ==
-+ PLLB_REF_INPUT_SPREADSPECTRUMIN) {
-+ /* XXX: might not be 66MHz */
-+ i8xx_clock(66000, &clock);
-+ } else
-+ i8xx_clock(48000, &clock);
-+ } else {
-+ if (dpll & PLL_P1_DIVIDE_BY_TWO)
-+ clock.p1 = 2;
-+ else {
-+ clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
-+ DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
-+ }
-+ if (dpll & PLL_P2_DIVIDE_BY_4)
-+ clock.p2 = 4;
-+ else
-+ clock.p2 = 2;
-+
-+ i8xx_clock(48000, &clock);
-+ }
-+ }
-+
-+ /* XXX: It would be nice to validate the clocks, but we can't reuse
-+ * i830PllIsValid() because it relies on the xf86_config output
-+ * configuration being accurate, which it isn't necessarily.
-+ */
-+
-+ return clock.dot;
-+}
-+
-+/** Returns the currently programmed mode of the given pipe. */
-+struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
-+ struct drm_crtc *crtc)
-+{
-+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
-+ struct intel_crtc *intel_crtc = crtc->driver_private;
-+ int pipe = intel_crtc->pipe;
-+ struct drm_display_mode *mode;
-+ int htot = I915_READ((pipe == 0) ? HTOTAL_A : HTOTAL_B);
-+ int hsync = I915_READ((pipe == 0) ? HSYNC_A : HSYNC_B);
-+ int vtot = I915_READ((pipe == 0) ? VTOTAL_A : VTOTAL_B);
-+ int vsync = I915_READ((pipe == 0) ? VSYNC_A : VSYNC_B);
-+
-+ mode = kzalloc(sizeof(*mode), GFP_KERNEL);
-+ if (!mode)
-+ return NULL;
-+
-+ mode->clock = intel_crtc_clock_get(dev, crtc);
-+ mode->hdisplay = (htot & 0xffff) + 1;
-+ mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
-+ mode->hsync_start = (hsync & 0xffff) + 1;
-+ mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
-+ mode->vdisplay = (vtot & 0xffff) + 1;
-+ mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
-+ mode->vsync_start = (vsync & 0xffff) + 1;
-+ mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
-+
-+ drm_mode_set_name(mode);
-+ drm_mode_set_crtcinfo(mode, 0);
-+
-+ return mode;
-+}
-+
-+static const struct drm_crtc_funcs intel_crtc_funcs = {
-+ .dpms = intel_crtc_dpms,
-+ .lock = intel_crtc_lock,
-+ .unlock = intel_crtc_unlock,
-+ .mode_fixup = intel_crtc_mode_fixup,
-+ .mode_set = intel_crtc_mode_set,
-+ .gamma_set = intel_crtc_gamma_set,
-+ .prepare = intel_crtc_prepare,
-+ .commit = intel_crtc_commit,
-+};
-+
-+
-+void intel_crtc_init(struct drm_device *dev, int pipe)
-+{
-+ struct drm_crtc *crtc;
-+ struct intel_crtc *intel_crtc;
-+ int i;
-+
-+ crtc = drm_crtc_create(dev, &intel_crtc_funcs);
-+ if (crtc == NULL)
-+ return;
-+
-+ intel_crtc = kzalloc(sizeof(struct intel_crtc), GFP_KERNEL);
-+ if (intel_crtc == NULL) {
-+ kfree(crtc);
-+ return;
-+ }
-+
-+ intel_crtc->pipe = pipe;
-+ for (i = 0; i < 256; i++) {
-+ intel_crtc->lut_r[i] = i;
-+ intel_crtc->lut_g[i] = i;
-+ intel_crtc->lut_b[i] = i;
-+ }
-+
-+ crtc->driver_private = intel_crtc;
-+}
-+
-+struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe)
-+{
-+ struct drm_crtc *crtc = NULL;
-+
-+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-+ struct intel_crtc *intel_crtc = crtc->driver_private;
-+ if (intel_crtc->pipe == pipe)
-+ break;
-+ }
-+ return crtc;
-+}
-+
-+int intel_output_clones(struct drm_device *dev, int type_mask)
-+{
-+ int index_mask = 0;
-+ struct drm_output *output;
-+ int entry = 0;
-+
-+ list_for_each_entry(output, &dev->mode_config.output_list, head) {
-+ struct intel_output *intel_output = output->driver_private;
-+ if (type_mask & (1 << intel_output->type))
-+ index_mask |= (1 << entry);
-+ entry++;
-+ }
-+ return index_mask;
-+}
-+
-+
-+static void intel_setup_outputs(struct drm_device *dev)
-+{
-+ struct drm_output *output;
-+
-+ if (!IS_POULSBO(dev))
-+ intel_crt_init(dev);
-+
-+ /* Set up integrated LVDS */
-+ if (IS_MOBILE(dev) && !IS_I830(dev))
-+ intel_lvds_init(dev);
-+
-+ if (IS_I9XX(dev)) {
-+ intel_sdvo_init(dev, SDVOB);
-+ intel_sdvo_init(dev, SDVOC);
-+ }
-+
-+ list_for_each_entry(output, &dev->mode_config.output_list, head) {
-+ struct intel_output *intel_output = output->driver_private;
-+ int crtc_mask = 0, clone_mask = 0;
-+
-+ /* valid crtcs */
-+ switch(intel_output->type) {
-+ case INTEL_OUTPUT_DVO:
-+ case INTEL_OUTPUT_SDVO:
-+ crtc_mask = ((1 << 0)|
-+ (1 << 1));
-+ clone_mask = ((1 << INTEL_OUTPUT_ANALOG) |
-+ (1 << INTEL_OUTPUT_DVO) |
-+ (1 << INTEL_OUTPUT_SDVO));
-+ break;
-+ case INTEL_OUTPUT_ANALOG:
-+ crtc_mask = ((1 << 0)|
-+ (1 << 1));
-+ clone_mask = ((1 << INTEL_OUTPUT_ANALOG) |
-+ (1 << INTEL_OUTPUT_DVO) |
-+ (1 << INTEL_OUTPUT_SDVO));
-+ break;
-+ case INTEL_OUTPUT_LVDS:
-+ crtc_mask = (1 << 1);
-+ clone_mask = (1 << INTEL_OUTPUT_LVDS);
-+ break;
-+ case INTEL_OUTPUT_TVOUT:
-+ crtc_mask = ((1 << 0) |
-+ (1 << 1));
-+ clone_mask = (1 << INTEL_OUTPUT_TVOUT);
-+ break;
-+ }
-+ output->possible_crtcs = crtc_mask;
-+ output->possible_clones = intel_output_clones(dev, clone_mask);
-+ }
-+}
-+
-+void intel_modeset_init(struct drm_device *dev)
-+{
-+ int num_pipe;
-+ int i;
-+
-+ drm_mode_config_init(dev);
-+
-+ dev->mode_config.min_width = 0;
-+ dev->mode_config.min_height = 0;
-+
-+ dev->mode_config.max_width = 4096;
-+ dev->mode_config.max_height = 4096;
-+
-+ /* set memory base */
-+ if (IS_I9XX(dev))
-+ dev->mode_config.fb_base = pci_resource_start(dev->pdev, 2);
-+ else
-+ dev->mode_config.fb_base = pci_resource_start(dev->pdev, 0);
-+
-+ if (IS_MOBILE(dev) || IS_I9XX(dev))
-+ num_pipe = 2;
-+ else
-+ num_pipe = 1;
-+ DRM_DEBUG("%d display pipe%s available.\n",
-+ num_pipe, num_pipe > 1 ? "s" : "");
-+
-+ for (i = 0; i < num_pipe; i++) {
-+ intel_crtc_init(dev, i);
-+ }
-+
-+ intel_setup_outputs(dev);
-+
-+ //drm_initial_config(dev, false);
-+}
-+
-+void intel_modeset_cleanup(struct drm_device *dev)
-+{
-+ drm_mode_config_cleanup(dev);
-+}
-Index: linux-2.6.27/drivers/gpu/drm/psb/intel_drv.h
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.27/drivers/gpu/drm/psb/intel_drv.h 2009-02-05 13:29:33.000000000 +0000
-@@ -0,0 +1,91 @@
-+/*
-+ * Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
-+ * Copyright (c) 2007 Intel Corporation
-+ * Jesse Barnes <jesse.barnes@intel.com>
-+ */
-+#ifndef __INTEL_DRV_H__
-+#define __INTEL_DRV_H__
-+
-+#include <linux/i2c.h>
-+#include <linux/i2c-id.h>
-+#include <linux/i2c-algo-bit.h>
-+#include "drm_crtc.h"
-+
-+/*
-+ * Display related stuff
-+ */
-+
-+/* store information about an Ixxx DVO */
-+/* The i830->i865 use multiple DVOs with multiple i2cs */
-+/* the i915, i945 have a single sDVO i2c bus - which is different */
-+#define MAX_OUTPUTS 6
-+
-+#define INTEL_I2C_BUS_DVO 1
-+#define INTEL_I2C_BUS_SDVO 2
-+
-+/* these are outputs from the chip - integrated only
-+ external chips are via DVO or SDVO output */
-+#define INTEL_OUTPUT_UNUSED 0
-+#define INTEL_OUTPUT_ANALOG 1
-+#define INTEL_OUTPUT_DVO 2
-+#define INTEL_OUTPUT_SDVO 3
-+#define INTEL_OUTPUT_LVDS 4
-+#define INTEL_OUTPUT_TVOUT 5
-+
-+#define INTEL_DVO_CHIP_NONE 0
-+#define INTEL_DVO_CHIP_LVDS 1
-+#define INTEL_DVO_CHIP_TMDS 2
-+#define INTEL_DVO_CHIP_TVOUT 4
-+
-+struct intel_i2c_chan {
-+ struct drm_device *drm_dev; /* for getting at dev. private (mmio etc.) */
-+ u32 reg; /* GPIO reg */
-+ struct i2c_adapter adapter;
-+ struct i2c_algo_bit_data algo;
-+ u8 slave_addr;
-+};
-+
-+struct intel_output {
-+ int type;
-+ struct intel_i2c_chan *i2c_bus; /* for control functions */
-+ struct intel_i2c_chan *ddc_bus; /* for DDC only stuff */
-+ bool load_detect_tmp;
-+ void *dev_priv;
-+};
-+
-+struct intel_crtc {
-+ int pipe;
-+ u8 lut_r[256], lut_g[256], lut_b[256];
-+};
-+
-+struct intel_i2c_chan *intel_i2c_create(struct drm_device *dev, const u32 reg,
-+ const char *name);
-+void intel_i2c_destroy(struct intel_i2c_chan *chan);
-+int intel_ddc_get_modes(struct drm_output *output);
-+extern bool intel_ddc_probe(struct drm_output *output);
-+
-+extern void intel_crt_init(struct drm_device *dev);
-+extern void intel_sdvo_init(struct drm_device *dev, int output_device);
-+extern void intel_lvds_init(struct drm_device *dev);
-+
-+extern void intel_crtc_load_lut(struct drm_crtc *crtc);
-+extern void intel_output_prepare (struct drm_output *output);
-+extern void intel_output_commit (struct drm_output *output);
-+extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
-+ struct drm_crtc *crtc);
-+extern void intel_wait_for_vblank(struct drm_device *dev);
-+extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe);
-+
-+extern int intelfb_probe(struct drm_device *dev, struct drm_crtc *crtc);
-+extern int intelfb_remove(struct drm_device *dev, struct drm_crtc *crtc);
-+
-+extern void intel_modeset_init(struct drm_device *dev);
-+extern void intel_modeset_cleanup(struct drm_device *dev);
-+
-+#define WA_NO_FB_GARBAGE_DISPLAY
-+#ifdef WA_NO_FB_GARBAGE_DISPLAY
-+extern void intel_crtc_mode_restore(struct drm_crtc *crtc);
-+extern void intel_crtc_mode_save(struct drm_crtc *crtc);
-+#endif
-+
-+#endif /* __INTEL_DRV_H__ */
-Index: linux-2.6.27/drivers/gpu/drm/psb/intel_lvds.c
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.27/drivers/gpu/drm/psb/intel_lvds.c 2009-02-05 13:29:33.000000000 +0000
-@@ -0,0 +1,913 @@
-+/*
-+ * Copyright © 2006-2007 Intel Corporation
-+ * Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a
-+ * copy of this software and associated documentation files (the "Software"),
-+ * to deal in the Software without restriction, including without limitation
-+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
-+ * and/or sell copies of the Software, and to permit persons to whom the
-+ * Software is furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice (including the next
-+ * paragraph) shall be included in all copies or substantial portions of the
-+ * Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
-+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-+ * DEALINGS IN THE SOFTWARE.
-+ *
-+ * Authors:
-+ * Eric Anholt <eric@anholt.net>
-+ * Dave Airlie <airlied@linux.ie>
-+ * Jesse Barnes <jesse.barnes@intel.com>
-+ */
-+
-+#include <linux/i2c.h>
-+#include <linux/backlight.h>
-+#include "drm_crtc.h"
-+#include "drm_edid.h"
-+#include "intel_lvds.h"
-+
-+#include <acpi/acpi_drivers.h>
-+
-+int drm_intel_ignore_acpi = 0;
-+MODULE_PARM_DESC(ignore_acpi, "Ignore ACPI");
-+module_param_named(ignore_acpi, drm_intel_ignore_acpi, int, 0600);
-+
-+uint8_t blc_type;
-+uint8_t blc_pol;
-+uint8_t blc_freq;
-+uint8_t blc_minbrightness;
-+uint8_t blc_i2caddr;
-+uint8_t blc_brightnesscmd;
-+int lvds_backlight; /* restore backlight to this value */
-+
-+struct intel_i2c_chan *lvds_i2c_bus;
-+u32 CoreClock;
-+u32 PWMControlRegFreq;
-+
-+unsigned char * dev_OpRegion = NULL;
-+unsigned int dev_OpRegionSize;
-+
-+#define PCI_PORT5_REG80_FFUSE 0xD0058000
-+#define PCI_PORT5_REG80_MAXRES_INT_EN 0x0040
-+#define MAX_HDISPLAY 800
-+#define MAX_VDISPLAY 480
-+bool sku_bMaxResEnableInt = false;
-+
-+/** Set BLC through I2C*/
-+static int
-+LVDSI2CSetBacklight(struct drm_device *dev, unsigned char ch)
-+{
-+ u8 out_buf[2];
-+ struct i2c_msg msgs[] = {
-+ {
-+ .addr = lvds_i2c_bus->slave_addr,
-+ .flags = 0,
-+ .len = 2,
-+ .buf = out_buf,
-+ }
-+ };
-+
-+ DRM_INFO("LVDSI2CSetBacklight: the slave_addr is 0x%x, the backlight value is %d\n", lvds_i2c_bus->slave_addr, ch);
-+
-+ out_buf[0] = blc_brightnesscmd;
-+ out_buf[1] = ch;
-+
-+ if (i2c_transfer(&lvds_i2c_bus->adapter, msgs, 1) == 1)
-+ {
-+ DRM_INFO("LVDSI2CSetBacklight: i2c_transfer done\n");
-+ return true;
-+ }
-+
-+ DRM_ERROR("msg: i2c_transfer error\n");
-+ return false;
-+}
-+
-+/**
-+ * Calculate PWM control register value.
-+ */
-+static int
-+LVDSCalculatePWMCtrlRegFreq(struct drm_device *dev)
-+{
-+ unsigned long value = 0;
-+
-+ DRM_INFO("Enter LVDSCalculatePWMCtrlRegFreq.\n");
-+ if (blc_freq == 0) {
-+ DRM_ERROR("LVDSCalculatePWMCtrlRegFreq: Frequency Requested is 0.\n");
-+ return FALSE;
-+ }
-+ value = (CoreClock * MHz);
-+ value = (value / BLC_PWM_FREQ_CALC_CONSTANT);
-+ value = (value * BLC_PWM_PRECISION_FACTOR);
-+ value = (value / blc_freq);
-+ value = (value / BLC_PWM_PRECISION_FACTOR);
-+
-+ if (value > (unsigned long)BLC_MAX_PWM_REG_FREQ ||
-+ value < (unsigned long)BLC_MIN_PWM_REG_FREQ) {
-+ return FALSE;
-+ } else {
-+ PWMControlRegFreq = ((u32)value & ~BLC_PWM_LEGACY_MODE_ENABLE);
-+ return TRUE;
-+ }
-+}
-+
-+/**
-+ * Returns the maximum level of the backlight duty cycle field.
-+ */
-+static u32
-+LVDSGetPWMMaxBacklight(struct drm_device *dev)
-+{
-+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
-+ u32 max_pwm_blc = 0;
-+
-+ max_pwm_blc = ((I915_READ(BLC_PWM_CTL) & BACKLIGHT_MODULATION_FREQ_MASK) >> \
-+ BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
-+
-+ if (!(max_pwm_blc & BLC_MAX_PWM_REG_FREQ)) {
-+ if (LVDSCalculatePWMCtrlRegFreq(dev)) {
-+ max_pwm_blc = PWMControlRegFreq;
-+ }
-+ }
-+
-+ DRM_INFO("LVDSGetPWMMaxBacklight: the max_pwm_blc is %d.\n", max_pwm_blc);
-+ return max_pwm_blc;
-+}
-+
-+
-+/**
-+ * Sets the backlight level.
-+ *
-+ * \param level backlight level, from 0 to intel_lvds_get_max_backlight().
-+ */
-+static void intel_lvds_set_backlight(struct drm_device *dev, int level)
-+{
-+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
-+ //u32 blc_pwm_ctl;
-+
-+ /*
-+ blc_pwm_ctl = I915_READ(BLC_PWM_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
-+ I915_WRITE(BLC_PWM_CTL, (blc_pwm_ctl |
-+ (level << BACKLIGHT_DUTY_CYCLE_SHIFT)));
-+ */
-+ u32 newbacklight = 0;
-+
-+ DRM_INFO("intel_lvds_set_backlight: the level is %d\n", level);
-+
-+ if(blc_type == BLC_I2C_TYPE){
-+ newbacklight = BRIGHTNESS_MASK & ((unsigned long)level * \
-+ BRIGHTNESS_MASK /BRIGHTNESS_MAX_LEVEL);
-+
-+ if (blc_pol == BLC_POLARITY_INVERSE) {
-+ newbacklight = BRIGHTNESS_MASK - newbacklight;
-+ }
-+
-+ LVDSI2CSetBacklight(dev, newbacklight);
-+
-+ } else if (blc_type == BLC_PWM_TYPE) {
-+ u32 max_pwm_blc = LVDSGetPWMMaxBacklight(dev);
-+
-+ u32 blc_pwm_duty_cycle;
-+
-+ /* Provent LVDS going to total black */
-+ if ( level < 20) {
-+ level = 20;
-+ }
-+ blc_pwm_duty_cycle = level * max_pwm_blc/BRIGHTNESS_MAX_LEVEL;
-+
-+ if (blc_pol == BLC_POLARITY_INVERSE) {
-+ blc_pwm_duty_cycle = max_pwm_blc - blc_pwm_duty_cycle;
-+ }
-+
-+ blc_pwm_duty_cycle &= BACKLIGHT_PWM_POLARITY_BIT_CLEAR;
-+
-+ I915_WRITE(BLC_PWM_CTL,
-+ (max_pwm_blc << BACKLIGHT_PWM_CTL_SHIFT)| (blc_pwm_duty_cycle));
-+ }
-+}
-+
-+/**
-+ * Returns the maximum level of the backlight duty cycle field.
-+ */
-+static u32 intel_lvds_get_max_backlight(struct drm_device *dev)
-+{
-+ return BRIGHTNESS_MAX_LEVEL;
-+ /*
-+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
-+
-+ return ((I915_READ(BLC_PWM_CTL) & BACKLIGHT_MODULATION_FREQ_MASK) >>
-+ BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
-+ */
-+}
-+
-+/**
-+ * Sets the power state for the panel.
-+ */
-+static void intel_lvds_set_power(struct drm_device *dev, bool on)
-+{
-+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
-+ u32 pp_status;
-+
-+ DRM_INFO("intel_lvds_set_power: %d\n", on);
-+ if (on) {
-+ I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) |
-+ POWER_TARGET_ON);
-+ do {
-+ pp_status = I915_READ(PP_STATUS);
-+ } while ((pp_status & PP_ON) == 0);
-+
-+ intel_lvds_set_backlight(dev, lvds_backlight);
-+ } else {
-+ intel_lvds_set_backlight(dev, 0);
-+
-+ I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) &
-+ ~POWER_TARGET_ON);
-+ do {
-+ pp_status = I915_READ(PP_STATUS);
-+ } while (pp_status & PP_ON);
-+ }
-+}
-+
-+static void intel_lvds_dpms(struct drm_output *output, int mode)
-+{
-+ struct drm_device *dev = output->dev;
-+
-+ DRM_INFO("intel_lvds_dpms: the mode is %d\n", mode);
-+ if (mode == DPMSModeOn)
-+ intel_lvds_set_power(dev, true);
-+ else
-+ intel_lvds_set_power(dev, false);
-+
-+ /* XXX: We never power down the LVDS pairs. */
-+}
-+
-+static void intel_lvds_save(struct drm_output *output)
-+{
-+ struct drm_device *dev = output->dev;
-+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
-+
-+ dev_priv->savePP_ON = I915_READ(LVDSPP_ON);
-+ dev_priv->savePP_OFF = I915_READ(LVDSPP_OFF);
-+ dev_priv->savePP_CONTROL = I915_READ(PP_CONTROL);
-+ dev_priv->savePP_CYCLE = I915_READ(PP_CYCLE);
-+ dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
-+ dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL &
-+ BACKLIGHT_DUTY_CYCLE_MASK);
-+
-+ /*
-+ * If the light is off at server startup, just make it full brightness
-+ */
-+ if (dev_priv->backlight_duty_cycle == 0)
-+ lvds_backlight=
-+ intel_lvds_get_max_backlight(dev);
-+}
-+
-+static void intel_lvds_restore(struct drm_output *output)
-+{
-+ struct drm_device *dev = output->dev;
-+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
-+
-+ I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL);
-+ I915_WRITE(LVDSPP_ON, dev_priv->savePP_ON);
-+ I915_WRITE(LVDSPP_OFF, dev_priv->savePP_OFF);
-+ I915_WRITE(PP_CYCLE, dev_priv->savePP_CYCLE);
-+ I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL);
-+ if (dev_priv->savePP_CONTROL & POWER_TARGET_ON)
-+ intel_lvds_set_power(dev, true);
-+ else
-+ intel_lvds_set_power(dev, false);
-+}
-+
-+static int intel_lvds_mode_valid(struct drm_output *output,
-+ struct drm_display_mode *mode)
-+{
-+ struct drm_device *dev = output->dev;
-+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
-+ struct drm_display_mode *fixed_mode = dev_priv->panel_fixed_mode;
-+
-+ if (fixed_mode) {
-+ if (mode->hdisplay > fixed_mode->hdisplay)
-+ return MODE_PANEL;
-+ if (mode->vdisplay > fixed_mode->vdisplay)
-+ return MODE_PANEL;
-+ }
-+
-+ if (IS_POULSBO(dev) && sku_bMaxResEnableInt) {
-+ if (mode->hdisplay > MAX_HDISPLAY)
-+ return MODE_PANEL;
-+ if (mode->vdisplay > MAX_VDISPLAY)
-+ return MODE_PANEL;
-+ }
-+
-+ return MODE_OK;
-+}
-+
-+static bool intel_lvds_mode_fixup(struct drm_output *output,
-+ struct drm_display_mode *mode,
-+ struct drm_display_mode *adjusted_mode)
-+{
-+ struct drm_device *dev = output->dev;
-+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
-+ struct intel_crtc *intel_crtc = output->crtc->driver_private;
-+ struct drm_output *tmp_output;
-+
-+ /* Should never happen!! */
-+ if (!IS_I965G(dev) && intel_crtc->pipe == 0) {
-+ DRM_ERROR(KERN_ERR "Can't support LVDS on pipe A\n");
-+ return false;
-+ }
-+
-+ /* Should never happen!! */
-+ list_for_each_entry(tmp_output, &dev->mode_config.output_list, head) {
-+ if (tmp_output != output && tmp_output->crtc == output->crtc) {
-+ DRM_ERROR("Can't enable LVDS and another "
-+ "output on the same pipe\n");
-+ return false;
-+ }
-+ }
-+
-+ /*
-+ * If we have timings from the BIOS for the panel, put them in
-+ * to the adjusted mode. The CRTC will be set up for this mode,
-+ * with the panel scaling set up to source from the H/VDisplay
-+ * of the original mode.
-+ */
-+ if (dev_priv->panel_fixed_mode != NULL) {
-+ adjusted_mode->hdisplay = dev_priv->panel_fixed_mode->hdisplay;
-+ adjusted_mode->hsync_start =
-+ dev_priv->panel_fixed_mode->hsync_start;
-+ adjusted_mode->hsync_end =
-+ dev_priv->panel_fixed_mode->hsync_end;
-+ adjusted_mode->htotal = dev_priv->panel_fixed_mode->htotal;
-+ adjusted_mode->vdisplay = dev_priv->panel_fixed_mode->vdisplay;
-+ adjusted_mode->vsync_start =
-+ dev_priv->panel_fixed_mode->vsync_start;
-+ adjusted_mode->vsync_end =
-+ dev_priv->panel_fixed_mode->vsync_end;
-+ adjusted_mode->vtotal = dev_priv->panel_fixed_mode->vtotal;
-+ adjusted_mode->clock = dev_priv->panel_fixed_mode->clock;
-+ drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
-+ }
-+
-+ /*
-+ * XXX: It would be nice to support lower refresh rates on the
-+ * panels to reduce power consumption, and perhaps match the
-+ * user's requested refresh rate.
-+ */
-+
-+ return true;
-+}
-+
-+static void intel_lvds_prepare(struct drm_output *output)
-+{
-+ struct drm_device *dev = output->dev;
-+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
-+
-+ DRM_INFO("intel_lvds_prepare\n");
-+ dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
-+ dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL &
-+ BACKLIGHT_DUTY_CYCLE_MASK);
-+
-+ intel_lvds_set_power(dev, false);
-+}
-+
-+static void intel_lvds_commit( struct drm_output *output)
-+{
-+ struct drm_device *dev = output->dev;
-+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
-+
-+ DRM_INFO("intel_lvds_commit\n");
-+ if (dev_priv->backlight_duty_cycle == 0)
-+ //dev_priv->backlight_duty_cycle =
-+ lvds_backlight =
-+ intel_lvds_get_max_backlight(dev);
-+
-+ intel_lvds_set_power(dev, true);
-+}
-+
-+static void intel_lvds_mode_set(struct drm_output *output,
-+ struct drm_display_mode *mode,
-+ struct drm_display_mode *adjusted_mode)
-+{
-+ struct drm_device *dev = output->dev;
-+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
-+ struct intel_crtc *intel_crtc = output->crtc->driver_private;
-+ u32 pfit_control;
-+
-+ /*
-+ * The LVDS pin pair will already have been turned on in the
-+ * intel_crtc_mode_set since it has a large impact on the DPLL
-+ * settings.
-+ */
-+
-+ /*
-+ * Enable automatic panel scaling so that non-native modes fill the
-+ * screen. Should be enabled before the pipe is enabled, according to
-+ * register description and PRM.
-+ */
-+ if (mode->hdisplay != adjusted_mode->hdisplay ||
-+ mode->vdisplay != adjusted_mode->vdisplay)
-+ pfit_control = (PFIT_ENABLE | VERT_AUTO_SCALE |
-+ HORIZ_AUTO_SCALE | VERT_INTERP_BILINEAR |
-+ HORIZ_INTERP_BILINEAR);
-+ else
-+ pfit_control = 0;
-+
-+ if (!IS_I965G(dev)) {
-+ if (dev_priv->panel_wants_dither)
-+ pfit_control |= PANEL_8TO6_DITHER_ENABLE;
-+ }
-+ else
-+ pfit_control |= intel_crtc->pipe << PFIT_PIPE_SHIFT;
-+
-+ I915_WRITE(PFIT_CONTROL, pfit_control);
-+}
-+
-+/**
-+ * Detect the LVDS connection.
-+ *
-+ * This always returns OUTPUT_STATUS_CONNECTED. This output should only have
-+ * been set up if the LVDS was actually connected anyway.
-+ */
-+static enum drm_output_status intel_lvds_detect(struct drm_output *output)
-+{
-+ return output_status_connected;
-+}
-+
-+/**
-+ * Return the list of DDC modes if available.
-+ */
-+static int intel_lvds_get_modes(struct drm_output *output)
-+{
-+ struct drm_device *dev = output->dev;
-+ struct intel_output *intel_output = output->driver_private;
-+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
-+ struct edid *edid;
-+
-+ /* Try reading DDC from the adapter */
-+ edid = (struct edid *)drm_ddc_read(&intel_output->ddc_bus->adapter);
-+
-+ if (!edid) {
-+ DRM_INFO("%s: no EDID data from device, reading ACPI _DDC data.\n",
-+ output->name);
-+ edid = kzalloc(sizeof(struct edid), GFP_KERNEL);
-+ drm_get_acpi_edid(ACPI_EDID_LCD, (char*)edid, 128);
-+ }
-+
-+ if (edid)
-+ drm_add_edid_modes(output, edid);
-+
-+ /* Didn't get an EDID */
-+ if (!output->monitor_info) {
-+ struct drm_display_info *dspinfo;
-+ dspinfo = kzalloc(sizeof(*output->monitor_info), GFP_KERNEL);
-+ if (!dspinfo)
-+ goto out;
-+
-+ /* Set wide sync ranges so we get all modes
-+ * handed to valid_mode for checking
-+ */
-+ dspinfo->min_vfreq = 0;
-+ dspinfo->max_vfreq = 200;
-+ dspinfo->min_hfreq = 0;
-+ dspinfo->max_hfreq = 200;
-+ output->monitor_info = dspinfo;
-+ }
-+
-+out:
-+ if (dev_priv->panel_fixed_mode != NULL) {
-+ struct drm_display_mode *mode =
-+ drm_mode_duplicate(dev, dev_priv->panel_fixed_mode);
-+ drm_mode_probed_add(output, mode);
-+ return 1;
-+ }
-+
-+ return 0;
-+}
-+
-+/* added by alek du to add /sys/class/backlight interface */
-+static int update_bl_status(struct backlight_device *bd)
-+{
-+ int value = bd->props.brightness;
-+
-+ struct drm_device *dev = bl_get_data(bd);
-+
-+ lvds_backlight = value;
-+ intel_lvds_set_backlight(dev, value);
-+ /*value = (bd->props.power == FB_BLANK_UNBLANK) ? 1 : 0;
-+ intel_lvds_set_power(dev,value);*/
-+ return 0;
-+}
-+
-+static int read_brightness(struct backlight_device *bd)
-+{
-+ return bd->props.brightness;
-+}
-+
-+static struct backlight_device *psbbl_device = NULL;
-+static struct backlight_ops psbbl_ops = {
-+ .get_brightness = read_brightness,
-+ .update_status = update_bl_status,
-+};
-+
-+/**
-+ * intel_lvds_destroy - unregister and free LVDS structures
-+ * @output: output to free
-+ *
-+ * Unregister the DDC bus for this output then free the driver private
-+ * structure.
-+ */
-+static void intel_lvds_destroy(struct drm_output *output)
-+{
-+ struct intel_output *intel_output = output->driver_private;
-+
-+ if (psbbl_device){
-+ backlight_device_unregister(psbbl_device);
-+ }
-+ if(dev_OpRegion != NULL)
-+ iounmap(dev_OpRegion);
-+ intel_i2c_destroy(intel_output->ddc_bus);
-+ intel_i2c_destroy(lvds_i2c_bus);
-+ kfree(output->driver_private);
-+}
-+
-+static const struct drm_output_funcs intel_lvds_output_funcs = {
-+ .dpms = intel_lvds_dpms,
-+ .save = intel_lvds_save,
-+ .restore = intel_lvds_restore,
-+ .mode_valid = intel_lvds_mode_valid,
-+ .mode_fixup = intel_lvds_mode_fixup,
-+ .prepare = intel_lvds_prepare,
-+ .mode_set = intel_lvds_mode_set,
-+ .commit = intel_lvds_commit,
-+ .detect = intel_lvds_detect,
-+ .get_modes = intel_lvds_get_modes,
-+ .cleanup = intel_lvds_destroy
-+};
-+
-+int intel_get_acpi_dod(char *method)
-+{
-+ int status;
-+ int found = 0;
-+ int i;
-+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
-+ union acpi_object *dod = NULL;
-+ union acpi_object *obj;
-+
-+ status = acpi_evaluate_object(NULL, method, NULL, &buffer);
-+ if (ACPI_FAILURE(status))
-+ return -ENODEV;
-+
-+ dod = buffer.pointer;
-+ if (!dod || (dod->type != ACPI_TYPE_PACKAGE)) {
-+ status = -EFAULT;
-+ goto out;
-+ }
-+
-+ DRM_DEBUG("Found %d video heads in _DOD\n", dod->package.count);
-+
-+ for (i = 0; i < dod->package.count; i++) {
-+ obj = &dod->package.elements[i];
-+
-+ if (obj->type != ACPI_TYPE_INTEGER) {
-+ DRM_DEBUG("Invalid _DOD data\n");
-+ } else {
-+ DRM_DEBUG("dod element[%d] = 0x%x\n", i,
-+ (int)obj->integer.value);
-+
-+ /* look for an LVDS type */
-+ if (obj->integer.value & 0x00000400)
-+ found = 1;
-+ }
-+ }
-+ out:
-+ kfree(buffer.pointer);
-+ return found;
-+}
-+/**
-+ * intel_lvds_init - setup LVDS outputs on this device
-+ * @dev: drm device
-+ *
-+ * Create the output, register the LVDS DDC bus, and try to figure out what
-+ * modes we can display on the LVDS panel (if present).
-+ */
-+void intel_lvds_init(struct drm_device *dev)
-+{
-+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
-+ struct drm_output *output;
-+ struct intel_output *intel_output;
-+ struct drm_display_mode *scan; /* *modes, *bios_mode; */
-+ struct drm_crtc *crtc;
-+ u32 lvds;
-+ int pipe;
-+
-+ if (!drm_intel_ignore_acpi && !intel_get_acpi_dod(ACPI_DOD))
-+ return;
-+
-+ output = drm_output_create(dev, &intel_lvds_output_funcs, "LVDS");
-+ if (!output)
-+ return;
-+
-+ intel_output = kmalloc(sizeof(struct intel_output), GFP_KERNEL);
-+ if (!intel_output) {
-+ drm_output_destroy(output);
-+ return;
-+ }
-+
-+ intel_output->type = INTEL_OUTPUT_LVDS;
-+ output->driver_private = intel_output;
-+ output->subpixel_order = SubPixelHorizontalRGB;
-+ output->interlace_allowed = FALSE;
-+ output->doublescan_allowed = FALSE;
-+
-+ //initialize the I2C bus and BLC data
-+ lvds_i2c_bus = intel_i2c_create(dev, GPIOB, "LVDSBLC_B");
-+ if (!lvds_i2c_bus) {
-+ dev_printk(KERN_ERR, &dev->pdev->dev, "i2c bus registration "
-+ "failed.\n");
-+ return;
-+ }
-+ lvds_i2c_bus->slave_addr = 0x2c;//0x58;
-+ lvds_backlight = BRIGHTNESS_MAX_LEVEL;
-+ blc_type = 0;
-+ blc_pol = 0;
-+
-+ if (1) { //get the BLC init data from VBT
-+ u32 OpRegion_Phys;
-+ unsigned int OpRegion_Size = 0x100;
-+ OpRegionPtr OpRegion;
-+ char *OpRegion_String = "IntelGraphicsMem";
-+
-+ struct vbt_header *vbt;
-+ struct bdb_header *bdb;
-+ int vbt_off, bdb_off, bdb_block_off, block_size;
-+ int panel_type = -1;
-+ unsigned char *bios;
-+ unsigned char *vbt_buf;
-+
-+ pci_read_config_dword(dev->pdev, 0xFC, &OpRegion_Phys);
-+
-+ //dev_OpRegion = phys_to_virt(OpRegion_Phys);
-+ dev_OpRegion = ioremap(OpRegion_Phys, OpRegion_Size);
-+ dev_OpRegionSize = OpRegion_Size;
-+
-+ OpRegion = (OpRegionPtr) dev_OpRegion;
-+
-+ if (!memcmp(OpRegion->sign, OpRegion_String, 16)) {
-+ unsigned int OpRegion_NewSize;
-+
-+ OpRegion_NewSize = OpRegion->size * 1024;
-+
-+ dev_OpRegionSize = OpRegion_NewSize;
-+
-+ iounmap(dev_OpRegion);
-+ dev_OpRegion = ioremap(OpRegion_Phys, OpRegion_NewSize);
-+ } else {
-+ iounmap(dev_OpRegion);
-+ dev_OpRegion = NULL;
-+ }
-+
-+ if((dev_OpRegion != NULL)&&(dev_OpRegionSize >= OFFSET_OPREGION_VBT)) {
-+ DRM_INFO("intel_lvds_init: OpRegion has the VBT address\n");
-+ vbt_buf = dev_OpRegion + OFFSET_OPREGION_VBT;
-+ vbt = (struct vbt_header *)(dev_OpRegion + OFFSET_OPREGION_VBT);
-+ } else {
-+ DRM_INFO("intel_lvds_init: No OpRegion, use the bios at fixed address 0xc0000\n");
-+ bios = phys_to_virt(0xC0000);
-+ if(*((u16 *)bios) != 0xAA55){
-+ bios = NULL;
-+ DRM_ERROR("the bios is incorrect\n");
-+ goto blc_out;
-+ }
-+ vbt_off = bios[0x1a] | (bios[0x1a + 1] << 8);
-+ DRM_INFO("intel_lvds_init: the vbt off is %x\n", vbt_off);
-+ vbt_buf = bios + vbt_off;
-+ vbt = (struct vbt_header *)(bios + vbt_off);
-+ }
-+
-+ bdb_off = vbt->bdb_offset;
-+ bdb = (struct bdb_header *)(vbt_buf + bdb_off);
-+
-+ DRM_INFO("intel_lvds_init: The bdb->signature is %s, the bdb_off is %d\n",bdb->signature, bdb_off);
-+
-+ if (memcmp(bdb->signature, "BIOS_DATA_BLOCK ", 16) != 0) {
-+ DRM_ERROR("the vbt is error\n");
-+ goto blc_out;
-+ }
-+
-+ for (bdb_block_off = bdb->header_size; bdb_block_off < bdb->bdb_size;
-+ bdb_block_off += block_size) {
-+ int start = bdb_off + bdb_block_off;
-+ int id, num_entries;
-+ struct lvds_bdb_1 *lvds1;
-+ struct lvds_blc *lvdsblc;
-+ struct lvds_bdb_blc *bdbblc;
-+
-+ id = vbt_buf[start];
-+ block_size = (vbt_buf[start + 1] | (vbt_buf[start + 2] << 8)) + 3;
-+ switch (id) {
-+ case 40:
-+ lvds1 = (struct lvds_bdb_1 *)(vbt_buf+ start);
-+ panel_type = lvds1->panel_type;
-+ //if (lvds1->caps & LVDS_CAP_DITHER)
-+ // *panelWantsDither = TRUE;
-+ break;
-+
-+ case 43:
-+ bdbblc = (struct lvds_bdb_blc *)(vbt_buf + start);
-+ num_entries = bdbblc->table_size? (bdbblc->size - \
-+ sizeof(bdbblc->table_size))/bdbblc->table_size : 0;
-+ if (num_entries << 16 && bdbblc->table_size == sizeof(struct lvds_blc)) {
-+ lvdsblc = (struct lvds_blc *)(vbt_buf + start + sizeof(struct lvds_bdb_blc));
-+ lvdsblc += panel_type;
-+ blc_type = lvdsblc->type;
-+ blc_pol = lvdsblc->pol;
-+ blc_freq = lvdsblc->freq;
-+ blc_minbrightness = lvdsblc->minbrightness;
-+ blc_i2caddr = lvdsblc->i2caddr;
-+ blc_brightnesscmd = lvdsblc->brightnesscmd;
-+ DRM_INFO("intel_lvds_init: BLC Data in BIOS VBT tables: datasize=%d paneltype=%d \
-+ type=0x%02x pol=0x%02x freq=0x%04x minlevel=0x%02x \
-+ i2caddr=0x%02x cmd=0x%02x \n",
-+ 0,
-+ panel_type,
-+ lvdsblc->type,
-+ lvdsblc->pol,
-+ lvdsblc->freq,
-+ lvdsblc->minbrightness,
-+ lvdsblc->i2caddr,
-+ lvdsblc->brightnesscmd);
-+ }
-+ break;
-+ }
-+ }
-+
-+ }
-+
-+ if(1){
-+ //get the Core Clock for calculating MAX PWM value
-+ //check whether the MaxResEnableInt is
-+ struct pci_dev * pci_root = pci_get_bus_and_slot(0, 0);
-+ u32 clock;
-+ u32 sku_value = 0;
-+ unsigned int CoreClocks[] = {
-+ 100,
-+ 133,
-+ 150,
-+ 178,
-+ 200,
-+ 266,
-+ 266,
-+ 266
-+ };
-+ if(pci_root)
-+ {
-+ pci_write_config_dword(pci_root, 0xD0, 0xD0050300);
-+ pci_read_config_dword(pci_root, 0xD4, &clock);
-+ CoreClock = CoreClocks[clock & 0x07];
-+ DRM_INFO("intel_lvds_init: the CoreClock is %d\n", CoreClock);
-+
-+ pci_write_config_dword(pci_root, 0xD0, PCI_PORT5_REG80_FFUSE);
-+ pci_read_config_dword(pci_root, 0xD4, &sku_value);
-+ sku_bMaxResEnableInt = (sku_value & PCI_PORT5_REG80_MAXRES_INT_EN)? true : false;
-+ DRM_INFO("intel_lvds_init: sku_value is 0x%08x\n", sku_value);
-+ DRM_INFO("intel_lvds_init: sku_bMaxResEnableInt is %d\n", sku_bMaxResEnableInt);
-+ }
-+ }
-+
-+ if ((blc_type == BLC_I2C_TYPE) || (blc_type == BLC_PWM_TYPE)){
-+ /* add /sys/class/backlight interface as standard */
-+ psbbl_device = backlight_device_register("psblvds", &dev->pdev->dev, dev, &psbbl_ops);
-+ if (psbbl_device){
-+ psbbl_device->props.max_brightness = BRIGHTNESS_MAX_LEVEL;
-+ psbbl_device->props.brightness = lvds_backlight;
-+ psbbl_device->props.power = FB_BLANK_UNBLANK;
-+ backlight_update_status(psbbl_device);
-+ }
-+ }
-+
-+blc_out:
-+
-+ /* Set up the DDC bus. */
-+ intel_output->ddc_bus = intel_i2c_create(dev, GPIOC, "LVDSDDC_C");
-+ if (!intel_output->ddc_bus) {
-+ dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
-+ "failed.\n");
-+ intel_i2c_destroy(lvds_i2c_bus);
-+ return;
-+ }
-+
-+ /*
-+ * Attempt to get the fixed panel mode from DDC. Assume that the
-+ * preferred mode is the right one.
-+ */
-+ intel_lvds_get_modes(output);
-+
-+ list_for_each_entry(scan, &output->probed_modes, head) {
-+ if (scan->type & DRM_MODE_TYPE_PREFERRED) {
-+ dev_priv->panel_fixed_mode =
-+ drm_mode_duplicate(dev, scan);
-+ goto out; /* FIXME: check for quirks */
-+ }
-+ }
-+
-+ /*
-+ * If we didn't get EDID, try checking if the panel is already turned
-+ * on. If so, assume that whatever is currently programmed is the
-+ * correct mode.
-+ */
-+ lvds = I915_READ(LVDS);
-+ pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0;
-+ crtc = intel_get_crtc_from_pipe(dev, pipe);
-+
-+ if (crtc && (lvds & LVDS_PORT_EN)) {
-+ dev_priv->panel_fixed_mode = intel_crtc_mode_get(dev, crtc);
-+ if (dev_priv->panel_fixed_mode) {
-+ dev_priv->panel_fixed_mode->type |=
-+ DRM_MODE_TYPE_PREFERRED;
-+ goto out; /* FIXME: check for quirks */
-+ }
-+ }
-+
-+ /* If we still don't have a mode after all that, give up. */
-+ if (!dev_priv->panel_fixed_mode)
-+ goto failed;
-+
-+ /* FIXME: probe the BIOS for modes and check for LVDS quirks */
-+#if 0
-+ /* Get the LVDS fixed mode out of the BIOS. We should support LVDS
-+ * with the BIOS being unavailable or broken, but lack the
-+ * configuration options for now.
-+ */
-+ bios_mode = intel_bios_get_panel_mode(pScrn);
-+ if (bios_mode != NULL) {
-+ if (dev_priv->panel_fixed_mode != NULL) {
-+ if (dev_priv->debug_modes &&
-+ !xf86ModesEqual(dev_priv->panel_fixed_mode,
-+ bios_mode))
-+ {
-+ xf86DrvMsg(pScrn->scrnIndex, X_WARNING,
-+ "BIOS panel mode data doesn't match probed data, "
-+ "continuing with probed.\n");
-+ xf86DrvMsg(pScrn->scrnIndex, X_INFO, "BIOS mode:\n");
-+ xf86PrintModeline(pScrn->scrnIndex, bios_mode);
-+ xf86DrvMsg(pScrn->scrnIndex, X_INFO, "probed mode:\n");
-+ xf86PrintModeline(pScrn->scrnIndex, dev_priv->panel_fixed_mode);
-+ xfree(bios_mode->name);
-+ xfree(bios_mode);
-+ }
-+ } else {
-+ dev_priv->panel_fixed_mode = bios_mode;
-+ }
-+ } else {
-+ xf86DrvMsg(pScrn->scrnIndex, X_WARNING,
-+ "Couldn't detect panel mode. Disabling panel\n");
-+ goto disable_exit;
-+ }
-+
-+ /*
-+ * Blacklist machines with BIOSes that list an LVDS panel without
-+ * actually having one.
-+ */
-+ if (dev_priv->PciInfo->chipType == PCI_CHIP_I945_GM) {
-+ /* aopen mini pc */
-+ if (dev_priv->PciInfo->subsysVendor == 0xa0a0)
-+ goto disable_exit;
-+
-+ if ((dev_priv->PciInfo->subsysVendor == 0x8086) &&
-+ (dev_priv->PciInfo->subsysCard == 0x7270)) {
-+ /* It's a Mac Mini or Macbook Pro.
-+ *
-+ * Apple hardware is out to get us. The macbook pro
-+ * has a real LVDS panel, but the mac mini does not,
-+ * and they have the same device IDs. We'll
-+ * distinguish by panel size, on the assumption
-+ * that Apple isn't about to make any machines with an
-+ * 800x600 display.
-+ */
-+
-+ if (dev_priv->panel_fixed_mode != NULL &&
-+ dev_priv->panel_fixed_mode->HDisplay == 800 &&
-+ dev_priv->panel_fixed_mode->VDisplay == 600)
-+ {
-+ xf86DrvMsg(pScrn->scrnIndex, X_INFO,
-+ "Suspected Mac Mini, ignoring the LVDS\n");
-+ goto disable_exit;
-+ }
-+ }
-+ }
-+
-+#endif
-+
-+out:
-+ return;
-+
-+failed:
-+ DRM_DEBUG("No LVDS modes found, disabling.\n");
-+ drm_output_destroy(output); /* calls intel_lvds_destroy above */
-+}
-Index: linux-2.6.27/drivers/gpu/drm/psb/intel_lvds.h
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.27/drivers/gpu/drm/psb/intel_lvds.h 2009-02-05 13:29:33.000000000 +0000
-@@ -0,0 +1,174 @@
-+/*
-+ * Copyright © 2006-2007 Intel Corporation
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a
-+ * copy of this software and associated documentation files (the "Software"),
-+ * to deal in the Software without restriction, including without limitation
-+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
-+ * and/or sell copies of the Software, and to permit persons to whom the
-+ * Software is furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice (including the next
-+ * paragraph) shall be included in all copies or substantial portions of the
-+ * Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
-+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-+ * DEALINGS IN THE SOFTWARE.
-+ *
-+ */
-+
-+/**
-+ * @file lvds definitions and structures.
-+ */
-+
-+#define BLC_I2C_TYPE 0x01
-+#define BLC_PWM_TYPE 0x02
-+#define BRIGHTNESS_MASK 0xff
-+#define BRIGHTNESS_MAX_LEVEL 100
-+#define BLC_POLARITY_NORMAL 0
-+#define BLC_POLARITY_INVERSE 1
-+#define BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xfffe)
-+#define BACKLIGHT_PWM_CTL_SHIFT (16)
-+#define BLC_MAX_PWM_REG_FREQ 0xfffe
-+#define BLC_MIN_PWM_REG_FREQ 0x2
-+#define BLC_PWM_LEGACY_MODE_ENABLE 0x0001
-+#define BLC_PWM_PRECISION_FACTOR 10//10000000
-+#define BLC_PWM_FREQ_CALC_CONSTANT 32
-+#define MHz 1000000
-+#define OFFSET_OPREGION_VBT 0x400
-+
-+typedef struct OpRegion_Header
-+{
-+ char sign[16];
-+ u32 size;
-+ u32 over;
-+ char sver[32];
-+ char vver[16];
-+ char gver[16];
-+ u32 mbox;
-+ char rhd1[164];
-+} OpRegionRec, *OpRegionPtr;
-+
-+struct vbt_header
-+{
-+ char signature[20]; /**< Always starts with 'VBT$' */
-+ u16 version; /**< decimal */
-+ u16 header_size; /**< in bytes */
-+ u16 vbt_size; /**< in bytes */
-+ u8 vbt_checksum;
-+ u8 reserved0;
-+ u32 bdb_offset; /**< from beginning of VBT */
-+ u32 aim1_offset; /**< from beginning of VBT */
-+ u32 aim2_offset; /**< from beginning of VBT */
-+ u32 aim3_offset; /**< from beginning of VBT */
-+ u32 aim4_offset; /**< from beginning of VBT */
-+} __attribute__ ((packed));
-+
-+struct bdb_header
-+{
-+ char signature[16]; /**< Always 'BIOS_DATA_BLOCK' */
-+ u16 version; /**< decimal */
-+ u16 header_size; /**< in bytes */
-+ u16 bdb_size; /**< in bytes */
-+} __attribute__ ((packed));
-+
-+#define LVDS_CAP_EDID (1 << 6)
-+#define LVDS_CAP_DITHER (1 << 5)
-+#define LVDS_CAP_PFIT_AUTO_RATIO (1 << 4)
-+#define LVDS_CAP_PFIT_GRAPHICS_MODE (1 << 3)
-+#define LVDS_CAP_PFIT_TEXT_MODE (1 << 2)
-+#define LVDS_CAP_PFIT_GRAPHICS (1 << 1)
-+#define LVDS_CAP_PFIT_TEXT (1 << 0)
-+struct lvds_bdb_1
-+{
-+ u8 id; /**< 40 */
-+ u16 size;
-+ u8 panel_type;
-+ u8 reserved0;
-+ u16 caps;
-+} __attribute__ ((packed));
-+
-+struct lvds_bdb_2_fp_params
-+{
-+ u16 x_res;
-+ u16 y_res;
-+ u32 lvds_reg;
-+ u32 lvds_reg_val;
-+ u32 pp_on_reg;
-+ u32 pp_on_reg_val;
-+ u32 pp_off_reg;
-+ u32 pp_off_reg_val;
-+ u32 pp_cycle_reg;
-+ u32 pp_cycle_reg_val;
-+ u32 pfit_reg;
-+ u32 pfit_reg_val;
-+ u16 terminator;
-+} __attribute__ ((packed));
-+
-+struct lvds_bdb_2_fp_edid_dtd
-+{
-+ u16 dclk; /**< In 10khz */
-+ u8 hactive;
-+ u8 hblank;
-+ u8 high_h; /**< 7:4 = hactive 11:8, 3:0 = hblank 11:8 */
-+ u8 vactive;
-+ u8 vblank;
-+ u8 high_v; /**< 7:4 = vactive 11:8, 3:0 = vblank 11:8 */
-+ u8 hsync_off;
-+ u8 hsync_pulse_width;
-+ u8 vsync_off;
-+ u8 high_hsync_off; /**< 7:6 = hsync off 9:8 */
-+ u8 h_image;
-+ u8 v_image;
-+ u8 max_hv;
-+ u8 h_border;
-+ u8 v_border;
-+ u8 flags;
-+#define FP_EDID_FLAG_VSYNC_POSITIVE (1 << 2)
-+#define FP_EDID_FLAG_HSYNC_POSITIVE (1 << 1)
-+} __attribute__ ((packed));
-+
-+struct lvds_bdb_2_entry
-+{
-+ u16 fp_params_offset; /**< From beginning of BDB */
-+ u8 fp_params_size;
-+ u16 fp_edid_dtd_offset;
-+ u8 fp_edid_dtd_size;
-+ u16 fp_edid_pid_offset;
-+ u8 fp_edid_pid_size;
-+} __attribute__ ((packed));
-+
-+struct lvds_bdb_2
-+{
-+ u8 id; /**< 41 */
-+ u16 size;
-+ u8 table_size; /* not sure on this one */
-+ struct lvds_bdb_2_entry panels[16];
-+} __attribute__ ((packed));
-+
-+
-+struct lvds_bdb_blc
-+{
-+ u8 id; /**< 43 */
-+ u16 size;
-+ u8 table_size;
-+} __attribute__ ((packed));
-+
-+struct lvds_blc
-+{
-+ u8 type:2;
-+ u8 pol:1;
-+ u8 gpio:3;
-+ u8 gmbus:2;
-+ u16 freq;
-+ u8 minbrightness;
-+ u8 i2caddr;
-+ u8 brightnesscmd;
-+ /* more... */
-+} __attribute__ ((packed));
-+
-Index: linux-2.6.27/drivers/gpu/drm/psb/intel_modes.c
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.27/drivers/gpu/drm/psb/intel_modes.c 2009-02-05 13:29:33.000000000 +0000
-@@ -0,0 +1,60 @@
-+/*
-+ * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
-+ * Copyright (c) 2007 Intel Corporation
-+ * Jesse Barnes <jesse.barnes@intel.com>
-+ */
-+
-+#include <linux/i2c.h>
-+#include <linux/fb.h>
-+
-+/**
-+ * intel_ddc_probe
-+ *
-+ */
-+bool intel_ddc_probe(struct drm_output *output)
-+{
-+ struct intel_output *intel_output = output->driver_private;
-+ u8 out_buf[] = { 0x0, 0x0};
-+ u8 buf[2];
-+ int ret;
-+ struct i2c_msg msgs[] = {
-+ {
-+ .addr = 0x50,
-+ .flags = 0,
-+ .len = 1,
-+ .buf = out_buf,
-+ },
-+ {
-+ .addr = 0x50,
-+ .flags = I2C_M_RD,
-+ .len = 1,
-+ .buf = buf,
-+ }
-+ };
-+
-+ ret = i2c_transfer(&intel_output->ddc_bus->adapter, msgs, 2);
-+ if (ret == 2)
-+ return true;
-+
-+ return false;
-+}
-+
-+/**
-+ * intel_ddc_get_modes - get modelist from monitor
-+ * @output: DRM output device to use
-+ *
-+ * Fetch the EDID information from @output using the DDC bus.
-+ */
-+int intel_ddc_get_modes(struct drm_output *output)
-+{
-+ struct intel_output *intel_output = output->driver_private;
-+ struct edid *edid;
-+ int ret = 0;
-+
-+ edid = drm_get_edid(output, &intel_output->ddc_bus->adapter);
-+ if (edid) {
-+ ret = drm_add_edid_modes(output, edid);
-+ kfree(edid);
-+ }
-+ return ret;
-+}
-Index: linux-2.6.27/drivers/gpu/drm/psb/intel_sdvo.c
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.27/drivers/gpu/drm/psb/intel_sdvo.c 2009-02-05 13:29:33.000000000 +0000
-@@ -0,0 +1,3973 @@
-+/*
-+ * Copyright © 2006-2007 Intel Corporation
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a
-+ * copy of this software and associated documentation files (the "Software"),
-+ * to deal in the Software without restriction, including without limitation
-+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
-+ * and/or sell copies of the Software, and to permit persons to whom the
-+ * Software is furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice (including the next
-+ * paragraph) shall be included in all copies or substantial portions of the
-+ * Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
-+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-+ * DEALINGS IN THE SOFTWARE.
-+ *
-+ * Authors:
-+ * Eric Anholt <eric@anholt.net>
-+ */
-+/*
-+ * Copyright 2006 Dave Airlie <airlied@linux.ie>
-+ * Jesse Barnes <jesse.barnes@intel.com>
-+ */
-+
-+#include <linux/i2c.h>
-+#include <linux/delay.h>
-+#include "drm_crtc.h"
-+#include "intel_sdvo_regs.h"
-+
-+#define MAX_VAL 1000
-+#define DPLL_CLOCK_PHASE_9 (1<<9 | 1<<12)
-+
-+#define PCI_PORT5_REG80_FFUSE 0xD0058000
-+#define PCI_PORT5_REG80_SDVO_DISABLE 0x0020
-+
-+#define SII_1392_WA
-+#ifdef SII_1392_WA
-+int SII_1392=0;
-+extern int drm_psb_no_fb;
-+#endif
-+
-+typedef struct _EXTVDATA
-+{
-+ u32 Value;
-+ u32 Default;
-+ u32 Min;
-+ u32 Max;
-+ u32 Step; // arbitrary unit (e.g. pixel, percent) returned during VP_COMMAND_GET
-+} EXTVDATA, *PEXTVDATA;
-+
-+typedef struct _sdvo_display_params
-+{
-+ EXTVDATA FlickerFilter; /* Flicker Filter : for TV onl */
-+ EXTVDATA AdaptiveFF; /* Adaptive Flicker Filter : for TV onl */
-+ EXTVDATA TwoD_FlickerFilter; /* 2D Flicker Filter : for TV onl */
-+ EXTVDATA Brightness; /* Brightness : for TV & CRT onl */
-+ EXTVDATA Contrast; /* Contrast : for TV & CRT onl */
-+ EXTVDATA PositionX; /* Horizontal Position : for all device */
-+ EXTVDATA PositionY; /* Vertical Position : for all device */
-+ /*EXTVDATA OverScanX; Horizontal Overscan : for TV onl */
-+ EXTVDATA DotCrawl; /* Dot crawl value : for TV onl */
-+ EXTVDATA ChromaFilter; /* Chroma Filter : for TV onl */
-+ /* EXTVDATA OverScanY; Vertical Overscan : for TV onl */
-+ EXTVDATA LumaFilter; /* Luma Filter : for TV only */
-+ EXTVDATA Sharpness; /* Sharpness : for TV & CRT onl */
-+ EXTVDATA Saturation; /* Saturation : for TV & CRT onl */
-+ EXTVDATA Hue; /* Hue : for TV & CRT onl */
-+ EXTVDATA Dither; /* Dither : For LVDS onl */
-+} sdvo_display_params;
-+
-+typedef enum _SDVO_PICTURE_ASPECT_RATIO_T
-+{
-+ UAIM_PAR_NO_DATA = 0x00000000,
-+ UAIM_PAR_4_3 = 0x00000100,
-+ UAIM_PAR_16_9 = 0x00000200,
-+ UAIM_PAR_FUTURE = 0x00000300,
-+ UAIM_PAR_MASK = 0x00000300,
-+} SDVO_PICTURE_ASPECT_RATIO_T;
-+
-+typedef enum _SDVO_FORMAT_ASPECT_RATIO_T
-+{
-+ UAIM_FAR_NO_DATA = 0x00000000,
-+ UAIM_FAR_SAME_AS_PAR = 0x00002000,
-+ UAIM_FAR_4_BY_3_CENTER = 0x00002400,
-+ UAIM_FAR_16_BY_9_CENTER = 0x00002800,
-+ UAIM_FAR_14_BY_9_CENTER = 0x00002C00,
-+ UAIM_FAR_16_BY_9_LETTERBOX_TOP = 0x00000800,
-+ UAIM_FAR_14_BY_9_LETTERBOX_TOP = 0x00000C00,
-+ UAIM_FAR_GT_16_BY_9_LETTERBOX_CENTER = 0x00002000,
-+ UAIM_FAR_4_BY_3_SNP_14_BY_9_CENTER = 0x00003400, /* With shoot and protect 14:9 cente */
-+ UAIM_FAR_16_BY_9_SNP_14_BY_9_CENTER = 0x00003800, /* With shoot and protect 14:9 cente */
-+ UAIM_FAR_16_BY_9_SNP_4_BY_3_CENTER = 0x00003C00, /* With shoot and protect 4:3 cente */
-+ UAIM_FAR_MASK = 0x00003C00,
-+} SDVO_FORMAT_ASPECT_RATIO_T;
-+
-+// TV image aspect ratio
-+typedef enum _CP_IMAGE_ASPECT_RATIO
-+{
-+ CP_ASPECT_RATIO_FF_4_BY_3 = 0,
-+ CP_ASPECT_RATIO_14_BY_9_CENTER = 1,
-+ CP_ASPECT_RATIO_14_BY_9_TOP = 2,
-+ CP_ASPECT_RATIO_16_BY_9_CENTER = 3,
-+ CP_ASPECT_RATIO_16_BY_9_TOP = 4,
-+ CP_ASPECT_RATIO_GT_16_BY_9_CENTER = 5,
-+ CP_ASPECT_RATIO_FF_4_BY_3_PROT_CENTER = 6,
-+ CP_ASPECT_RATIO_FF_16_BY_9_ANAMORPHIC = 7,
-+} CP_IMAGE_ASPECT_RATIO;
-+
-+typedef struct _SDVO_ANCILLARY_INFO_T
-+{
-+ CP_IMAGE_ASPECT_RATIO AspectRatio;
-+ u32 RedistCtrlFlag; /* Redistribution control flag (get and set */
-+} SDVO_ANCILLARY_INFO_T, *PSDVO_ANCILLARY_INFO_T;
-+
-+struct intel_sdvo_priv {
-+ struct intel_i2c_chan *i2c_bus;
-+ int slaveaddr;
-+ int output_device;
-+
-+ u16 active_outputs;
-+
-+ struct intel_sdvo_caps caps;
-+ int pixel_clock_min, pixel_clock_max;
-+
-+ int save_sdvo_mult;
-+ u16 save_active_outputs;
-+ struct intel_sdvo_dtd save_input_dtd_1, save_input_dtd_2;
-+ struct intel_sdvo_dtd save_output_dtd[16];
-+ u32 save_SDVOX;
-+ /**
-+ * SDVO TV encoder support
-+ */
-+ u32 ActiveDevice; /* CRT, TV, LVDS, TMDS */
-+ u32 TVStandard; /* PAL, NTSC */
-+ int TVOutput; /* S-Video, CVBS,YPbPr,RGB */
-+ int TVMode; /* SDTV/HDTV/SECAM mod */
-+ u32 TVStdBitmask;
-+ u32 dwSDVOHDTVBitMask;
-+ u32 dwSDVOSDTVBitMask;
-+ u8 byInputWiring;
-+ bool bGetClk;
-+ u32 dwMaxDotClk;
-+ u32 dwMinDotClk;
-+
-+ u32 dwMaxInDotClk;
-+ u32 dwMinInDotClk;
-+
-+ u32 dwMaxOutDotClk;
-+ u32 dwMinOutDotClk;
-+ u32 dwSupportedEnhancements;
-+ EXTVDATA OverScanY; /* Vertical Overscan : for TV onl */
-+ EXTVDATA OverScanX; /* Horizontal Overscan : for TV onl */
-+ sdvo_display_params dispParams;
-+ SDVO_ANCILLARY_INFO_T AncillaryInfo;
-+};
-+
-+/* Define TV mode type */
-+/* The full set are defined in xf86str.h*/
-+#define M_T_TV 0x80
-+
-+typedef struct _tv_mode_t
-+{
-+ /* the following data is detailed mode information as it would be passed to the hardware: */
-+ struct drm_display_mode mode_entry;
-+ u32 dwSupportedSDTVvss;
-+ u32 dwSupportedHDTVvss;
-+ bool m_preferred;
-+ bool isTVMode;
-+} tv_mode_t;
-+
-+static tv_mode_t tv_modes[] = {
-+ {
-+ .mode_entry =
-+ {DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER | M_T_TV, 0x2625a00 / 1000, 800, 840, 968, 1056, 0,
-+ 600, 601,
-+ 604, 628, 0, V_PHSYNC | V_PVSYNC)},
-+ .dwSupportedSDTVvss = TVSTANDARD_SDTV_ALL,
-+ .dwSupportedHDTVvss = TVSTANDARD_HDTV_ALL,
-+ .m_preferred = TRUE,
-+ .isTVMode = TRUE,
-+ },
-+ {
-+ .mode_entry =
-+ {DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER | M_T_TV, 0x3dfd240 / 1000, 1024, 0x418, 0x49f, 0x540,
-+ 0, 768,
-+ 0x303, 0x308, 0x325, 0, V_PHSYNC | V_PVSYNC)},
-+ .dwSupportedSDTVvss = TVSTANDARD_SDTV_ALL,
-+ .dwSupportedHDTVvss = TVSTANDARD_HDTV_ALL,
-+ .m_preferred = FALSE,
-+ .isTVMode = TRUE,
-+ },
-+ {
-+ .mode_entry =
-+ {DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER | M_T_TV, 0x1978ff0 / 1000, 720, 0x2e1, 0x326, 0x380, 0,
-+ 480,
-+ 0x1f0, 0x1e1, 0x1f1, 0, V_PHSYNC | V_PVSYNC)},
-+ .dwSupportedSDTVvss =
-+ TVSTANDARD_NTSC_M | TVSTANDARD_NTSC_M_J | TVSTANDARD_NTSC_433,
-+ .dwSupportedHDTVvss = 0x0,
-+ .m_preferred = FALSE,
-+ .isTVMode = TRUE,
-+ },
-+ {
-+ /*Modeline "720x576_SDVO" 0.96 720 756 788 864 576 616 618 700 +vsync */
-+ .mode_entry =
-+ {DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER | M_T_TV, 0x1f25a20 / 1000, 720, 756, 788, 864, 0, 576,
-+ 616,
-+ 618, 700, 0, V_PHSYNC | V_PVSYNC)},
-+ .dwSupportedSDTVvss =
-+ (TVSTANDARD_PAL_B | TVSTANDARD_PAL_D | TVSTANDARD_PAL_H |
-+ TVSTANDARD_PAL_I | TVSTANDARD_PAL_N | TVSTANDARD_SECAM_B |
-+ TVSTANDARD_SECAM_D | TVSTANDARD_SECAM_G | TVSTANDARD_SECAM_H |
-+ TVSTANDARD_SECAM_K | TVSTANDARD_SECAM_K1 | TVSTANDARD_SECAM_L |
-+ TVSTANDARD_PAL_G | TVSTANDARD_SECAM_L1),
-+ .dwSupportedHDTVvss = 0x0,
-+ .m_preferred = FALSE,
-+ .isTVMode = TRUE,
-+ },
-+ {
-+ .mode_entry =
-+ {DRM_MODE("1280x720@60",DRM_MODE_TYPE_DRIVER | M_T_TV, 74250000 / 1000, 1280, 1390, 1430, 1650, 0,
-+ 720,
-+ 725, 730, 750, 0, V_PHSYNC | V_PVSYNC)},
-+ .dwSupportedSDTVvss = 0x0,
-+ .dwSupportedHDTVvss = HDTV_SMPTE_296M_720p60,
-+ .m_preferred = FALSE,
-+ .isTVMode = TRUE,
-+ },
-+ {
-+ .mode_entry =
-+ {DRM_MODE("1280x720@50", DRM_MODE_TYPE_DRIVER | M_T_TV, 74250000 / 1000, 1280, 1720, 1759, 1980, 0,
-+ 720,
-+ 725, 730, 750, 0, V_PHSYNC | V_PVSYNC)},
-+ .dwSupportedSDTVvss = 0x0,
-+ .dwSupportedHDTVvss = HDTV_SMPTE_296M_720p50,
-+ .m_preferred = FALSE,
-+ .isTVMode = TRUE,
-+ },
-+ {
-+ .mode_entry =
-+ {DRM_MODE("1920x1080@60", DRM_MODE_TYPE_DRIVER | M_T_TV, 148500000 / 1000, 1920, 2008, 2051, 2200, 0,
-+ 1080,
-+ 1084, 1088, 1124, 0, V_PHSYNC | V_PVSYNC)},
-+ .dwSupportedSDTVvss = 0x0,
-+ .dwSupportedHDTVvss = HDTV_SMPTE_274M_1080i60,
-+ .m_preferred = FALSE,
-+ .isTVMode = TRUE,
-+ },
-+};
-+
-+#define NUM_TV_MODES sizeof(tv_modes) / sizeof (tv_modes[0])
-+
-+typedef struct {
-+ /* given values */
-+ int n;
-+ int m1, m2;
-+ int p1, p2;
-+ /* derived values */
-+ int dot;
-+ int vco;
-+ int m;
-+ int p;
-+} ex_intel_clock_t;
-+
-+
-+/**
-+ * Writes the SDVOB or SDVOC with the given value, but always writes both
-+ * SDVOB and SDVOC to work around apparent hardware issues (according to
-+ * comments in the BIOS).
-+ */
-+static void intel_sdvo_write_sdvox(struct drm_output *output, u32 val)
-+{
-+ struct drm_device *dev = output->dev;
-+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
-+ struct intel_output *intel_output = output->driver_private;
-+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
-+ u32 bval = val, cval = val;
-+ int i;
-+
-+ if (sdvo_priv->output_device == SDVOB)
-+ cval = I915_READ(SDVOC);
-+ else
-+ bval = I915_READ(SDVOB);
-+ /*
-+ * Write the registers twice for luck. Sometimes,
-+ * writing them only once doesn't appear to 'stick'.
-+ * The BIOS does this too. Yay, magic
-+ */
-+ for (i = 0; i < 2; i++)
-+ {
-+ I915_WRITE(SDVOB, bval);
-+ I915_READ(SDVOB);
-+ I915_WRITE(SDVOC, cval);
-+ I915_READ(SDVOC);
-+ }
-+}
-+
-+static bool intel_sdvo_read_byte(struct drm_output *output, u8 addr,
-+ u8 *ch)
-+{
-+ struct intel_output *intel_output = output->driver_private;
-+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
-+ u8 out_buf[2];
-+ u8 buf[2];
-+ int ret;
-+
-+ struct i2c_msg msgs[] = {
-+ {
-+ .addr = sdvo_priv->i2c_bus->slave_addr,
-+ .flags = 0,
-+ .len = 1,
-+ .buf = out_buf,
-+ },
-+ {
-+ .addr = sdvo_priv->i2c_bus->slave_addr,
-+ .flags = I2C_M_RD,
-+ .len = 1,
-+ .buf = buf,
-+ }
-+ };
-+
-+ out_buf[0] = addr;
-+ out_buf[1] = 0;
-+
-+ if ((ret = i2c_transfer(&sdvo_priv->i2c_bus->adapter, msgs, 2)) == 2)
-+ {
-+// DRM_DEBUG("got back from addr %02X = %02x\n", out_buf[0], buf[0]);
-+ *ch = buf[0];
-+ return true;
-+ }
-+
-+ DRM_DEBUG("i2c transfer returned %d\n", ret);
-+ return false;
-+}
-+
-+
-+#if 0
-+static bool intel_sdvo_read_byte_quiet(struct drm_output *output, int addr,
-+ u8 *ch)
-+{
-+ return true;
-+
-+}
-+#endif
-+
-+static bool intel_sdvo_write_byte(struct drm_output *output, int addr,
-+ u8 ch)
-+{
-+ struct intel_output *intel_output = output->driver_private;
-+ u8 out_buf[2];
-+ struct i2c_msg msgs[] = {
-+ {
-+ .addr = intel_output->i2c_bus->slave_addr,
-+ .flags = 0,
-+ .len = 2,
-+ .buf = out_buf,
-+ }
-+ };
-+
-+ out_buf[0] = addr;
-+ out_buf[1] = ch;
-+
-+ if (i2c_transfer(&intel_output->i2c_bus->adapter, msgs, 1) == 1)
-+ {
-+ return true;
-+ }
-+ return false;
-+}
-+
-+#define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd}
-+/** Mapping of command numbers to names, for debug output */
-+const static struct _sdvo_cmd_name {
-+ u8 cmd;
-+ char *name;
-+} sdvo_cmd_names[] = {
-+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET),
-+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS),
-+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FIRMWARE_REV),
-+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TRAINED_INPUTS),
-+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_OUTPUTS),
-+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_OUTPUTS),
-+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_IN_OUT_MAP),
-+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_IN_OUT_MAP),
-+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ATTACHED_DISPLAYS),
-+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HOT_PLUG_SUPPORT),
-+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_HOT_PLUG),
-+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_HOT_PLUG),
-+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE),
-+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_INPUT),
-+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_OUTPUT),
-+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART1),
-+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART2),
-+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
-+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART2),
-+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
-+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART1),
-+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART2),
-+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART1),
-+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART2),
-+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING),
-+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1),
-+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2),
-+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE),
-+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE),
-+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS),
-+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CLOCK_RATE_MULT),
-+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CLOCK_RATE_MULT),
-+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_TV_FORMATS),
-+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_FORMAT),
-+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_FORMAT),
-+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_RESOLUTION_SUPPORT),
-+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTROL_BUS_SWITCH),
-+};
-+
-+#define SDVO_NAME(dev_priv) ((dev_priv)->output_device == SDVOB ? "SDVOB" : "SDVOC")
-+#define SDVO_PRIV(output) ((struct intel_sdvo_priv *) (output)->dev_priv)
-+
-+static void intel_sdvo_write_cmd(struct drm_output *output, u8 cmd,
-+ void *args, int args_len)
-+{
-+ struct intel_output *intel_output = output->driver_private;
-+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
-+ int i;
-+
-+ if (drm_debug) {
-+ DRM_DEBUG("%s: W: %02X ", SDVO_NAME(sdvo_priv), cmd);
-+ for (i = 0; i < args_len; i++)
-+ printk("%02X ", ((u8 *)args)[i]);
-+ for (; i < 8; i++)
-+ printk(" ");
-+ for (i = 0; i < sizeof(sdvo_cmd_names) / sizeof(sdvo_cmd_names[0]); i++) {
-+ if (cmd == sdvo_cmd_names[i].cmd) {
-+ printk("(%s)", sdvo_cmd_names[i].name);
-+ break;
-+ }
-+ }
-+ if (i == sizeof(sdvo_cmd_names)/ sizeof(sdvo_cmd_names[0]))
-+ printk("(%02X)",cmd);
-+ printk("\n");
-+ }
-+
-+ for (i = 0; i < args_len; i++) {
-+ intel_sdvo_write_byte(output, SDVO_I2C_ARG_0 - i, ((u8*)args)[i]);
-+ }
-+
-+ intel_sdvo_write_byte(output, SDVO_I2C_OPCODE, cmd);
-+}
-+
-+static const char *cmd_status_names[] = {
-+ "Power on",
-+ "Success",
-+ "Not supported",
-+ "Invalid arg",
-+ "Pending",
-+ "Target not specified",
-+ "Scaling not supported"
-+};
-+
-+static u8 intel_sdvo_read_response(struct drm_output *output, void *response,
-+ int response_len)
-+{
-+ struct intel_output *intel_output = output->driver_private;
-+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
-+ int i;
-+ u8 status;
-+ u8 retry = 50;
-+
-+ while (retry--) {
-+ /* Read the command response */
-+ for (i = 0; i < response_len; i++) {
-+ intel_sdvo_read_byte(output, SDVO_I2C_RETURN_0 + i,
-+ &((u8 *)response)[i]);
-+ }
-+
-+ /* read the return status */
-+ intel_sdvo_read_byte(output, SDVO_I2C_CMD_STATUS, &status);
-+
-+ if (drm_debug) {
-+ DRM_DEBUG("%s: R: ", SDVO_NAME(sdvo_priv));
-+ for (i = 0; i < response_len; i++)
-+ printk("%02X ", ((u8 *)response)[i]);
-+ for (; i < 8; i++)
-+ printk(" ");
-+ if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP)
-+ printk("(%s)", cmd_status_names[status]);
-+ else
-+ printk("(??? %d)", status);
-+ printk("\n");
-+ }
-+
-+ if (status != SDVO_CMD_STATUS_PENDING)
-+ return status;
-+
-+ mdelay(50);
-+ }
-+
-+ return status;
-+}
-+
-+int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
-+{
-+ if (mode->clock >= 100000)
-+ return 1;
-+ else if (mode->clock >= 50000)
-+ return 2;
-+ else
-+ return 4;
-+}
-+
-+/**
-+ * Don't check status code from this as it switches the bus back to the
-+ * SDVO chips which defeats the purpose of doing a bus switch in the first
-+ * place.
-+ */
-+void intel_sdvo_set_control_bus_switch(struct drm_output *output, u8 target)
-+{
-+ intel_sdvo_write_cmd(output, SDVO_CMD_SET_CONTROL_BUS_SWITCH, &target, 1);
-+}
-+
-+static bool intel_sdvo_set_target_input(struct drm_output *output, bool target_0, bool target_1)
-+{
-+ struct intel_sdvo_set_target_input_args targets = {0};
-+ u8 status;
-+
-+ if (target_0 && target_1)
-+ return SDVO_CMD_STATUS_NOTSUPP;
-+
-+ if (target_1)
-+ targets.target_1 = 1;
-+
-+ intel_sdvo_write_cmd(output, SDVO_CMD_SET_TARGET_INPUT, &targets,
-+ sizeof(targets));
-+
-+ status = intel_sdvo_read_response(output, NULL, 0);
-+
-+ return (status == SDVO_CMD_STATUS_SUCCESS);
-+}
-+
-+/**
-+ * Return whether each input is trained.
-+ *
-+ * This function is making an assumption about the layout of the response,
-+ * which should be checked against the docs.
-+ */
-+static bool intel_sdvo_get_trained_inputs(struct drm_output *output, bool *input_1, bool *input_2)
-+{
-+ struct intel_sdvo_get_trained_inputs_response response;
-+ u8 status;
-+
-+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_TRAINED_INPUTS, NULL, 0);
-+ status = intel_sdvo_read_response(output, &response, sizeof(response));
-+ if (status != SDVO_CMD_STATUS_SUCCESS)
-+ return false;
-+
-+ *input_1 = response.input0_trained;
-+ *input_2 = response.input1_trained;
-+ return true;
-+}
-+
-+static bool intel_sdvo_get_active_outputs(struct drm_output *output,
-+ u16 *outputs)
-+{
-+ u8 status;
-+
-+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_ACTIVE_OUTPUTS, NULL, 0);
-+ status = intel_sdvo_read_response(output, outputs, sizeof(*outputs));
-+
-+ return (status == SDVO_CMD_STATUS_SUCCESS);
-+}
-+
-+static bool intel_sdvo_set_active_outputs(struct drm_output *output,
-+ u16 outputs)
-+{
-+ u8 status;
-+
-+ intel_sdvo_write_cmd(output, SDVO_CMD_SET_ACTIVE_OUTPUTS, &outputs,
-+ sizeof(outputs));
-+ status = intel_sdvo_read_response(output, NULL, 0);
-+ return (status == SDVO_CMD_STATUS_SUCCESS);
-+}
-+
-+static bool intel_sdvo_set_encoder_power_state(struct drm_output *output,
-+ int mode)
-+{
-+ u8 status, state = SDVO_ENCODER_STATE_ON;
-+
-+ switch (mode) {
-+ case DPMSModeOn:
-+ state = SDVO_ENCODER_STATE_ON;
-+ break;
-+ case DPMSModeStandby:
-+ state = SDVO_ENCODER_STATE_STANDBY;
-+ break;
-+ case DPMSModeSuspend:
-+ state = SDVO_ENCODER_STATE_SUSPEND;
-+ break;
-+ case DPMSModeOff:
-+ state = SDVO_ENCODER_STATE_OFF;
-+ break;
-+ }
-+
-+ intel_sdvo_write_cmd(output, SDVO_CMD_SET_ENCODER_POWER_STATE, &state,
-+ sizeof(state));
-+ status = intel_sdvo_read_response(output, NULL, 0);
-+
-+ return (status == SDVO_CMD_STATUS_SUCCESS);
-+}
-+
-+static bool intel_sdvo_get_input_pixel_clock_range(struct drm_output *output,
-+ int *clock_min,
-+ int *clock_max)
-+{
-+ struct intel_sdvo_pixel_clock_range clocks;
-+ u8 status;
-+
-+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE,
-+ NULL, 0);
-+
-+ status = intel_sdvo_read_response(output, &clocks, sizeof(clocks));
-+
-+ if (status != SDVO_CMD_STATUS_SUCCESS)
-+ return false;
-+
-+ /* Convert the values from units of 10 kHz to kHz. */
-+ *clock_min = clocks.min * 10;
-+ *clock_max = clocks.max * 10;
-+
-+ return true;
-+}
-+
-+static bool intel_sdvo_set_target_output(struct drm_output *output,
-+ u16 outputs)
-+{
-+ u8 status;
-+
-+ intel_sdvo_write_cmd(output, SDVO_CMD_SET_TARGET_OUTPUT, &outputs,
-+ sizeof(outputs));
-+
-+ status = intel_sdvo_read_response(output, NULL, 0);
-+ return (status == SDVO_CMD_STATUS_SUCCESS);
-+}
-+
-+static bool intel_sdvo_get_timing(struct drm_output *output, u8 cmd,
-+ struct intel_sdvo_dtd *dtd)
-+{
-+ u8 status;
-+
-+ intel_sdvo_write_cmd(output, cmd, NULL, 0);
-+ status = intel_sdvo_read_response(output, &dtd->part1,
-+ sizeof(dtd->part1));
-+ if (status != SDVO_CMD_STATUS_SUCCESS)
-+ return false;
-+
-+ intel_sdvo_write_cmd(output, cmd + 1, NULL, 0);
-+ status = intel_sdvo_read_response(output, &dtd->part2,
-+ sizeof(dtd->part2));
-+ if (status != SDVO_CMD_STATUS_SUCCESS)
-+ return false;
-+
-+ return true;
-+}
-+
-+static bool intel_sdvo_get_input_timing(struct drm_output *output,
-+ struct intel_sdvo_dtd *dtd)
-+{
-+ return intel_sdvo_get_timing(output,
-+ SDVO_CMD_GET_INPUT_TIMINGS_PART1, dtd);
-+}
-+
-+static bool intel_sdvo_get_output_timing(struct drm_output *output,
-+ struct intel_sdvo_dtd *dtd)
-+{
-+ return intel_sdvo_get_timing(output,
-+ SDVO_CMD_GET_OUTPUT_TIMINGS_PART1, dtd);
-+}
-+
-+static bool intel_sdvo_set_timing(struct drm_output *output, u8 cmd,
-+ struct intel_sdvo_dtd *dtd)
-+{
-+ u8 status;
-+
-+ intel_sdvo_write_cmd(output, cmd, &dtd->part1, sizeof(dtd->part1));
-+ status = intel_sdvo_read_response(output, NULL, 0);
-+ if (status != SDVO_CMD_STATUS_SUCCESS)
-+ return false;
-+
-+ intel_sdvo_write_cmd(output, cmd + 1, &dtd->part2, sizeof(dtd->part2));
-+ status = intel_sdvo_read_response(output, NULL, 0);
-+ if (status != SDVO_CMD_STATUS_SUCCESS)
-+ return false;
-+
-+ return true;
-+}
-+
-+static bool intel_sdvo_set_input_timing(struct drm_output *output,
-+ struct intel_sdvo_dtd *dtd)
-+{
-+ return intel_sdvo_set_timing(output,
-+ SDVO_CMD_SET_INPUT_TIMINGS_PART1, dtd);
-+}
-+
-+static bool intel_sdvo_set_output_timing(struct drm_output *output,
-+ struct intel_sdvo_dtd *dtd)
-+{
-+ return intel_sdvo_set_timing(output,
-+ SDVO_CMD_SET_OUTPUT_TIMINGS_PART1, dtd);
-+}
-+
-+#if 0
-+static bool intel_sdvo_get_preferred_input_timing(struct drm_output *output,
-+ struct intel_sdvo_dtd *dtd)
-+{
-+ struct intel_output *intel_output = output->driver_private;
-+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
-+ u8 status;
-+
-+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1,
-+ NULL, 0);
-+
-+ status = intel_sdvo_read_response(output, &dtd->part1,
-+ sizeof(dtd->part1));
-+ if (status != SDVO_CMD_STATUS_SUCCESS)
-+ return false;
-+
-+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2,
-+ NULL, 0);
-+ status = intel_sdvo_read_response(output, &dtd->part2,
-+ sizeof(dtd->part2));
-+ if (status != SDVO_CMD_STATUS_SUCCESS)
-+ return false;
-+
-+ return true;
-+}
-+#endif
-+
-+static int intel_sdvo_get_clock_rate_mult(struct drm_output *output)
-+{
-+ u8 response, status;
-+
-+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_CLOCK_RATE_MULT, NULL, 0);
-+ status = intel_sdvo_read_response(output, &response, 1);
-+
-+ if (status != SDVO_CMD_STATUS_SUCCESS) {
-+ DRM_DEBUG("Couldn't get SDVO clock rate multiplier\n");
-+ return SDVO_CLOCK_RATE_MULT_1X;
-+ } else {
-+ DRM_DEBUG("Current clock rate multiplier: %d\n", response);
-+ }
-+
-+ return response;
-+}
-+
-+static bool intel_sdvo_set_clock_rate_mult(struct drm_output *output, u8 val)
-+{
-+ u8 status;
-+
-+ intel_sdvo_write_cmd(output, SDVO_CMD_SET_CLOCK_RATE_MULT, &val, 1);
-+ status = intel_sdvo_read_response(output, NULL, 0);
-+ if (status != SDVO_CMD_STATUS_SUCCESS)
-+ return false;
-+
-+ return true;
-+}
-+
-+static bool intel_sdvo_mode_fixup(struct drm_output *output,
-+ struct drm_display_mode *mode,
-+ struct drm_display_mode *adjusted_mode)
-+{
-+ /* Make the CRTC code factor in the SDVO pixel multiplier. The SDVO
-+ * device will be told of the multiplier during mode_set.
-+ */
-+ DRM_DEBUG("xxintel_sdvo_fixup\n");
-+ adjusted_mode->clock *= intel_sdvo_get_pixel_multiplier(mode);
-+ return true;
-+}
-+
-+#if 0
-+static void i830_sdvo_map_hdtvstd_bitmask(struct drm_output * output)
-+{
-+ struct intel_output *intel_output = output->driver_private;
-+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
-+
-+ switch (sdvo_priv->TVStandard) {
-+ case HDTV_SMPTE_274M_1080i50:
-+ sdvo_priv->TVStdBitmask = SDVO_HDTV_STD_274M_1080i50;
-+ break;
-+
-+ case HDTV_SMPTE_274M_1080i59:
-+ sdvo_priv->TVStdBitmask = SDVO_HDTV_STD_274M_1080i59;
-+ break;
-+
-+ case HDTV_SMPTE_274M_1080i60:
-+ sdvo_priv->TVStdBitmask = SDVO_HDTV_STD_274M_1080i60;
-+ break;
-+ case HDTV_SMPTE_274M_1080p60:
-+ sdvo_priv->TVStdBitmask = SDVO_HDTV_STD_274M_1080p60;
-+ break;
-+ case HDTV_SMPTE_296M_720p59:
-+ sdvo_priv->TVStdBitmask = SDVO_HDTV_STD_296M_720p59;
-+ break;
-+
-+ case HDTV_SMPTE_296M_720p60:
-+ sdvo_priv->TVStdBitmask = SDVO_HDTV_STD_296M_720p60;
-+ break;
-+
-+ case HDTV_SMPTE_296M_720p50:
-+ sdvo_priv->TVStdBitmask = SDVO_HDTV_STD_296M_720p50;
-+ break;
-+
-+ case HDTV_SMPTE_293M_480p59:
-+ sdvo_priv->TVStdBitmask = SDVO_HDTV_STD_293M_480p59;
-+ break;
-+
-+ case HDTV_SMPTE_293M_480p60:
-+ sdvo_priv->TVStdBitmask = SDVO_HDTV_STD_EIA_7702A_480p60;
-+ break;
-+
-+ case HDTV_SMPTE_170M_480i59:
-+ sdvo_priv->TVStdBitmask = SDVO_HDTV_STD_170M_480i59;
-+ break;
-+
-+ case HDTV_ITURBT601_576i50:
-+ sdvo_priv->TVStdBitmask = SDVO_HDTV_STD_ITURBT601_576i50;
-+ break;
-+
-+ case HDTV_ITURBT601_576p50:
-+ sdvo_priv->TVStdBitmask = SDVO_HDTV_STD_ITURBT601_576p50;
-+ break;
-+ default:
-+ DRM_DEBUG("ERROR: Unknown TV Standard!!!\n");
-+ /*Invalid return 0 */
-+ sdvo_priv->TVStdBitmask = 0;
-+ }
-+
-+}
-+
-+static void i830_sdvo_map_sdtvstd_bitmask(struct drm_output * output)
-+{
-+ struct intel_output *intel_output = output->driver_private;
-+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
-+
-+ switch (sdvo_priv->TVStandard) {
-+ case TVSTANDARD_NTSC_M:
-+ sdvo_priv->TVStdBitmask = SDVO_NTSC_M;
-+ break;
-+
-+ case TVSTANDARD_NTSC_M_J:
-+ sdvo_priv->TVStdBitmask = SDVO_NTSC_M_J;
-+ break;
-+
-+ case TVSTANDARD_NTSC_433:
-+ sdvo_priv->TVStdBitmask = SDVO_NTSC_433;
-+ break;
-+
-+ case TVSTANDARD_PAL_B:
-+ sdvo_priv->TVStdBitmask = SDVO_PAL_B;
-+ break;
-+
-+ case TVSTANDARD_PAL_D:
-+ sdvo_priv->TVStdBitmask = SDVO_PAL_D;
-+ break;
-+
-+ case TVSTANDARD_PAL_G:
-+ sdvo_priv->TVStdBitmask = SDVO_PAL_G;
-+ break;
-+
-+ case TVSTANDARD_PAL_H:
-+ sdvo_priv->TVStdBitmask = SDVO_PAL_H;
-+ break;
-+
-+ case TVSTANDARD_PAL_I:
-+ sdvo_priv->TVStdBitmask = SDVO_PAL_I;
-+ break;
-+
-+ case TVSTANDARD_PAL_M:
-+ sdvo_priv->TVStdBitmask = SDVO_PAL_M;
-+ break;
-+
-+ case TVSTANDARD_PAL_N:
-+ sdvo_priv->TVStdBitmask = SDVO_PAL_N;
-+ break;
-+
-+ case TVSTANDARD_PAL_60:
-+ sdvo_priv->TVStdBitmask = SDVO_PAL_60;
-+ break;
-+
-+ case TVSTANDARD_SECAM_B:
-+ sdvo_priv->TVStdBitmask = SDVO_SECAM_B;
-+ break;
-+
-+ case TVSTANDARD_SECAM_D:
-+ sdvo_priv->TVStdBitmask = SDVO_SECAM_D;
-+ break;
-+
-+ case TVSTANDARD_SECAM_G:
-+ sdvo_priv->TVStdBitmask = SDVO_SECAM_G;
-+ break;
-+
-+ case TVSTANDARD_SECAM_K:
-+ sdvo_priv->TVStdBitmask = SDVO_SECAM_K;
-+ break;
-+
-+ case TVSTANDARD_SECAM_K1:
-+ sdvo_priv->TVStdBitmask = SDVO_SECAM_K1;
-+ break;
-+
-+ case TVSTANDARD_SECAM_L:
-+ sdvo_priv->TVStdBitmask = SDVO_SECAM_L;
-+ break;
-+
-+ case TVSTANDARD_SECAM_L1:
-+ DRM_DEBUG("TVSTANDARD_SECAM_L1 not supported by encoder\n");
-+ break;
-+
-+ case TVSTANDARD_SECAM_H:
-+ DRM_DEBUG("TVSTANDARD_SECAM_H not supported by encoder\n");
-+ break;
-+
-+ default:
-+ DRM_DEBUG("ERROR: Unknown TV Standard\n");
-+ /*Invalid return 0 */
-+ sdvo_priv->TVStdBitmask = 0;
-+ break;
-+ }
-+}
-+#endif
-+
-+static bool i830_sdvo_set_tvoutputs_formats(struct drm_output * output)
-+{
-+ u8 byArgs[6];
-+ u8 status;
-+ struct intel_output *intel_output = output->driver_private;
-+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
-+
-+
-+ /* Make all fields of the args/ret to zero */
-+ memset(byArgs, 0, sizeof(byArgs));
-+
-+ if (sdvo_priv->TVMode & (TVMODE_SDTV)) {
-+ /* Fill up the arguement value */
-+ byArgs[0] = (u8) (sdvo_priv->TVStdBitmask & 0xFF);
-+ byArgs[1] = (u8) ((sdvo_priv->TVStdBitmask >> 8) & 0xFF);
-+ byArgs[2] = (u8) ((sdvo_priv->TVStdBitmask >> 16) & 0xFF);
-+ } else {
-+ /* Fill up the arguement value */
-+ byArgs[0] = 0;
-+ byArgs[1] = 0;
-+ byArgs[2] = (u8) ((sdvo_priv->TVStdBitmask & 0xFF));
-+ byArgs[3] = (u8) ((sdvo_priv->TVStdBitmask >> 8) & 0xFF);
-+ byArgs[4] = (u8) ((sdvo_priv->TVStdBitmask >> 16) & 0xFF);
-+ byArgs[5] = (u8) ((sdvo_priv->TVStdBitmask >> 24) & 0xFF);
-+ }
-+
-+ intel_sdvo_write_cmd(output, SDVO_CMD_SET_TV_FORMATS, byArgs, 6);
-+ status = intel_sdvo_read_response(output, NULL, 0);
-+
-+ if (status != SDVO_CMD_STATUS_SUCCESS)
-+ return FALSE;
-+
-+ return TRUE;
-+
-+}
-+
-+static bool i830_sdvo_create_preferred_input_timing(struct drm_output * output,
-+ struct drm_display_mode * mode)
-+{
-+ u8 byArgs[7];
-+ u8 status;
-+ u32 dwClk;
-+ u32 dwHActive, dwVActive;
-+ bool bIsInterlaced, bIsScaled;
-+
-+ /* Make all fields of the args/ret to zero */
-+ memset(byArgs, 0, sizeof(byArgs));
-+
-+ /* Fill up the arguement values */
-+ dwHActive = mode->crtc_hdisplay;
-+ dwVActive = mode->crtc_vdisplay;
-+
-+ dwClk = mode->clock * 1000 / 10000;
-+ byArgs[0] = (u8) (dwClk & 0xFF);
-+ byArgs[1] = (u8) ((dwClk >> 8) & 0xFF);
-+
-+ /* HActive & VActive should not exceed 12 bits each. So check it */
-+ if ((dwHActive > 0xFFF) || (dwVActive > 0xFFF))
-+ return FALSE;
-+
-+ byArgs[2] = (u8) (dwHActive & 0xFF);
-+ byArgs[3] = (u8) ((dwHActive >> 8) & 0xF);
-+ byArgs[4] = (u8) (dwVActive & 0xFF);
-+ byArgs[5] = (u8) ((dwVActive >> 8) & 0xF);
-+
-+ bIsInterlaced = 1;
-+ bIsScaled = 0;
-+
-+ byArgs[6] = bIsInterlaced ? 1 : 0;
-+ byArgs[6] |= bIsScaled ? 2 : 0;
-+
-+ intel_sdvo_write_cmd(output, SDVO_CMD_CREATE_PREFERRED_INPUT_TIMINGS,
-+ byArgs, 7);
-+ status = intel_sdvo_read_response(output, NULL, 0);
-+
-+ if (status != SDVO_CMD_STATUS_SUCCESS)
-+ return FALSE;
-+
-+ return TRUE;
-+
-+}
-+
-+static bool i830_sdvo_get_preferred_input_timing(struct drm_output * output,
-+ struct intel_sdvo_dtd *output_dtd)
-+{
-+ return intel_sdvo_get_timing(output,
-+ SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1,
-+ output_dtd);
-+}
-+
-+static bool i830_sdvo_set_current_inoutmap(struct drm_output * output, u32 in0outputmask,
-+ u32 in1outputmask)
-+{
-+ u8 byArgs[4];
-+ u8 status;
-+
-+ /* Make all fields of the args/ret to zero */
-+ memset(byArgs, 0, sizeof(byArgs));
-+
-+ /* Fill up the arguement values; */
-+ byArgs[0] = (u8) (in0outputmask & 0xFF);
-+ byArgs[1] = (u8) ((in0outputmask >> 8) & 0xFF);
-+ byArgs[2] = (u8) (in1outputmask & 0xFF);
-+ byArgs[3] = (u8) ((in1outputmask >> 8) & 0xFF);
-+ intel_sdvo_write_cmd(output, SDVO_CMD_SET_IN_OUT_MAP, byArgs, 4);
-+ status = intel_sdvo_read_response(output, NULL, 0);
-+
-+ if (status != SDVO_CMD_STATUS_SUCCESS)
-+ return FALSE;
-+
-+ return TRUE;
-+
-+}
-+
-+void i830_sdvo_set_iomap(struct drm_output * output)
-+{
-+ u32 dwCurrentSDVOIn0 = 0;
-+ u32 dwCurrentSDVOIn1 = 0;
-+ u32 dwDevMask = 0;
-+
-+ struct intel_output *intel_output = output->driver_private;
-+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
-+
-+
-+ /* Please DO NOT change the following code. */
-+ /* SDVOB_IN0 or SDVOB_IN1 ==> sdvo_in0 */
-+ /* SDVOC_IN0 or SDVOC_IN1 ==> sdvo_in1 */
-+ if (sdvo_priv->byInputWiring & (SDVOB_IN0 | SDVOC_IN0)) {
-+ switch (sdvo_priv->ActiveDevice) {
-+ case SDVO_DEVICE_LVDS:
-+ dwDevMask = SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1;
-+ break;
-+
-+ case SDVO_DEVICE_TMDS:
-+ dwDevMask = SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1;
-+ break;
-+
-+ case SDVO_DEVICE_TV:
-+ dwDevMask =
-+ SDVO_OUTPUT_YPRPB0 | SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_CVBS0 |
-+ SDVO_OUTPUT_YPRPB1 | SDVO_OUTPUT_SVID1 | SDVO_OUTPUT_CVBS1 |
-+ SDVO_OUTPUT_SCART0 | SDVO_OUTPUT_SCART1;
-+ break;
-+
-+ case SDVO_DEVICE_CRT:
-+ dwDevMask = SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1;
-+ break;
-+ }
-+ dwCurrentSDVOIn0 = (sdvo_priv->active_outputs & dwDevMask);
-+ } else if (sdvo_priv->byInputWiring & (SDVOB_IN1 | SDVOC_IN1)) {
-+ switch (sdvo_priv->ActiveDevice) {
-+ case SDVO_DEVICE_LVDS:
-+ dwDevMask = SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1;
-+ break;
-+
-+ case SDVO_DEVICE_TMDS:
-+ dwDevMask = SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1;
-+ break;
-+
-+ case SDVO_DEVICE_TV:
-+ dwDevMask =
-+ SDVO_OUTPUT_YPRPB0 | SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_CVBS0 |
-+ SDVO_OUTPUT_YPRPB1 | SDVO_OUTPUT_SVID1 | SDVO_OUTPUT_CVBS1 |
-+ SDVO_OUTPUT_SCART0 | SDVO_OUTPUT_SCART1;
-+ break;
-+
-+ case SDVO_DEVICE_CRT:
-+ dwDevMask = SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1;
-+ break;
-+ }
-+ dwCurrentSDVOIn1 = (sdvo_priv->active_outputs & dwDevMask);
-+ }
-+
-+ i830_sdvo_set_current_inoutmap(output, dwCurrentSDVOIn0,
-+ dwCurrentSDVOIn1);
-+}
-+
-+static bool i830_sdvo_get_input_output_pixelclock_range(struct drm_output * output,
-+ bool direction)
-+{
-+ u8 byRets[4];
-+ u8 status;
-+
-+ struct intel_output *intel_output = output->driver_private;
-+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
-+
-+ /* Make all fields of the args/ret to zero */
-+ memset(byRets, 0, sizeof(byRets));
-+ if (direction) /* output pixel clock */
-+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE,
-+ NULL, 0);
-+ else
-+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE,
-+ NULL, 0);
-+ status = intel_sdvo_read_response(output, byRets, 4);
-+
-+ if (status != SDVO_CMD_STATUS_SUCCESS)
-+ return FALSE;
-+
-+ if (direction) {
-+ /* Fill up the return values. */
-+ sdvo_priv->dwMinOutDotClk =
-+ (u32) byRets[0] | ((u32) byRets[1] << 8);
-+ sdvo_priv->dwMaxOutDotClk =
-+ (u32) byRets[2] | ((u32) byRets[3] << 8);
-+
-+ /* Multiply 10000 with the clocks obtained */
-+ sdvo_priv->dwMinOutDotClk = (sdvo_priv->dwMinOutDotClk) * 10000;
-+ sdvo_priv->dwMaxOutDotClk = (sdvo_priv->dwMaxOutDotClk) * 10000;
-+
-+ } else {
-+ /* Fill up the return values. */
-+ sdvo_priv->dwMinInDotClk = (u32) byRets[0] | ((u32) byRets[1] << 8);
-+ sdvo_priv->dwMaxInDotClk = (u32) byRets[2] | ((u32) byRets[3] << 8);
-+
-+ /* Multiply 10000 with the clocks obtained */
-+ sdvo_priv->dwMinInDotClk = (sdvo_priv->dwMinInDotClk) * 10000;
-+ sdvo_priv->dwMaxInDotClk = (sdvo_priv->dwMaxInDotClk) * 10000;
-+ }
-+ DRM_DEBUG("MinDotClk = 0x%x\n", sdvo_priv->dwMinInDotClk);
-+ DRM_DEBUG("MaxDotClk = 0x%x\n", sdvo_priv->dwMaxInDotClk);
-+
-+ return TRUE;
-+
-+}
-+
-+static bool i830_sdvo_get_supported_tvoutput_formats(struct drm_output * output,
-+ u32 * pTVStdMask,
-+ u32 * pHDTVStdMask, u32 *pTVStdFormat)
-+{
-+ struct intel_output *intel_output = output->driver_private;
-+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
-+
-+ u8 byRets[6];
-+ u8 status;
-+
-+ /* Make all fields of the args/ret to zero */
-+ memset(byRets, 0, sizeof(byRets));
-+
-+ /* Send the arguements & SDVO opcode to the h/w */
-+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_SUPPORTED_TV_FORMATS, NULL, 0);
-+
-+ status = intel_sdvo_read_response(output, byRets, 6);
-+ if (status != SDVO_CMD_STATUS_SUCCESS)
-+ return FALSE;
-+
-+ /* Fill up the return values; */
-+ *pTVStdMask = (((u32) byRets[0]) |
-+ ((u32) byRets[1] << 8) |
-+ ((u32) (byRets[2] & 0x7) << 16));
-+
-+ *pHDTVStdMask = (((u32) byRets[2] & 0xF8) |
-+ ((u32) byRets[3] << 8) |
-+ ((u32) byRets[4] << 16) | ((u32) byRets[5] << 24));
-+
-+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_TV_FORMATS, NULL, 0);
-+
-+ status = intel_sdvo_read_response(output, byRets, 6);
-+ if (status != SDVO_CMD_STATUS_SUCCESS)
-+ return FALSE;
-+
-+ /* Fill up the return values; */
-+ if(sdvo_priv->TVMode == TVMODE_SDTV)
-+ *pTVStdFormat = (((u32) byRets[0]) |
-+ ((u32) byRets[1] << 8) |
-+ ((u32) (byRets[2] & 0x7) << 16));
-+ else
-+ *pTVStdFormat = (((u32) byRets[2] & 0xF8) |
-+ ((u32) byRets[3] << 8) |
-+ ((u32) byRets[4] << 16) | ((u32) byRets[5] << 24));
-+ DRM_DEBUG("BIOS TV format is %d\n",*pTVStdFormat);
-+ return TRUE;
-+
-+}
-+
-+static bool i830_sdvo_get_supported_enhancements(struct drm_output * output,
-+ u32 * psupported_enhancements)
-+{
-+
-+ u8 status;
-+ u8 byRets[2];
-+ struct intel_output *intel_output = output->driver_private;
-+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
-+
-+
-+ /* Make all fields of the args/ret to zero */
-+ memset(byRets, 0, sizeof(byRets));
-+
-+ /* Send the arguements & SDVO opcode to the h/w */
-+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS, NULL, 0);
-+
-+ status = intel_sdvo_read_response(output, byRets, 2);
-+ if (status != SDVO_CMD_STATUS_SUCCESS)
-+ return FALSE;
-+
-+ sdvo_priv->dwSupportedEnhancements = *psupported_enhancements =
-+ ((u32) byRets[0] | ((u32) byRets[1] << 8));
-+ return TRUE;
-+
-+}
-+
-+static bool i830_sdvo_get_max_horizontal_overscan(struct drm_output * output, u32 * pMaxVal,
-+ u32 * pDefaultVal)
-+{
-+ u8 byRets[4];
-+ u8 status;
-+
-+ /* Make all fields of the args/ret to zero */
-+ memset(byRets, 0, sizeof(byRets));
-+
-+ /* Send the arguements & SDVO opcode to the h/w */
-+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_MAX_HORIZONTAL_OVERSCAN, NULL,
-+ 0);
-+
-+ status = intel_sdvo_read_response(output, byRets, 4);
-+ if (status != SDVO_CMD_STATUS_SUCCESS)
-+ return FALSE;
-+ /* Fill up the return values. */
-+ *pMaxVal = (u32) byRets[0] | ((u32) byRets[1] << 8);
-+ *pDefaultVal = (u32) byRets[2] | ((u32) byRets[3] << 8);
-+ return TRUE;
-+}
-+
-+static bool i830_sdvo_get_max_vertical_overscan(struct drm_output * output, u32 * pMaxVal,
-+ u32 * pDefaultVal)
-+{
-+ u8 byRets[4];
-+ u8 status;
-+
-+ /* Make all fields of the args/ret to zero */
-+ memset(byRets, 0, sizeof(byRets));
-+
-+ /* Send the arguements & SDVO opcode to the h/w */
-+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_MAX_VERTICAL_OVERSCAN, NULL, 0);
-+
-+ status = intel_sdvo_read_response(output, byRets, 4);
-+ if (status != SDVO_CMD_STATUS_SUCCESS)
-+ return FALSE;
-+ /* Fill up the return values. */
-+ *pMaxVal = (u32) byRets[0] | ((u32) byRets[1] << 8);
-+ *pDefaultVal = (u32) byRets[2] | ((u32) byRets[3] << 8);
-+ return TRUE;
-+}
-+
-+static bool i830_sdvo_get_max_horizontal_position(struct drm_output * output, u32 * pMaxVal,
-+ u32 * pDefaultVal)
-+{
-+
-+ u8 byRets[4];
-+ u8 status;
-+
-+ /* Make all fields of the args/ret to zero */
-+ memset(byRets, 0, sizeof(byRets));
-+
-+ /* Send the arguements & SDVO opcode to the h/w */
-+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_MAX_HORIZONTAL_POSITION, NULL,
-+ 0);
-+
-+ status = intel_sdvo_read_response(output, byRets, 4);
-+ if (status != SDVO_CMD_STATUS_SUCCESS)
-+ return FALSE;
-+
-+ /* Fill up the return values. */
-+ *pMaxVal = (u32) byRets[0] | ((u32) byRets[1] << 8);
-+ *pDefaultVal = (u32) byRets[2] | ((u32) byRets[3] << 8);
-+
-+ return TRUE;
-+}
-+
-+static bool i830_sdvo_get_max_vertical_position(struct drm_output * output,
-+ u32 * pMaxVal, u32 * pDefaultVal)
-+{
-+
-+ u8 byRets[4];
-+ u8 status;
-+
-+ /* Make all fields of the args/ret to zero */
-+ memset(byRets, 0, sizeof(byRets));
-+
-+ /* Send the arguements & SDVO opcode to the h/w */
-+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_MAX_VERTICAL_POSITION, NULL, 0);
-+
-+ status = intel_sdvo_read_response(output, byRets, 4);
-+ if (status != SDVO_CMD_STATUS_SUCCESS)
-+ return FALSE;
-+
-+ /* Fill up the return values. */
-+ *pMaxVal = (u32) byRets[0] | ((u32) byRets[1] << 8);
-+ *pDefaultVal = (u32) byRets[2] | ((u32) byRets[3] << 8);
-+
-+ return TRUE;
-+}
-+
-+static bool i830_sdvo_get_max_flickerfilter(struct drm_output * output,
-+ u32 * pMaxVal, u32 * pDefaultVal)
-+{
-+
-+ u8 byRets[4];
-+ u8 status;
-+
-+ /* Make all fields of the args/ret to zero */
-+ memset(byRets, 0, sizeof(byRets));
-+
-+ /* Send the arguements & SDVO opcode to the h/w */
-+
-+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_MAX_FLICKER_FILTER, NULL, 0);
-+
-+ status = intel_sdvo_read_response(output, byRets, 4);
-+ if (status != SDVO_CMD_STATUS_SUCCESS)
-+ return FALSE;
-+ /* Fill up the return values. */
-+ *pMaxVal = (u32) byRets[0] | ((u32) byRets[1] << 8);
-+ *pDefaultVal = (u32) byRets[2] | ((u32) byRets[3] << 8);
-+
-+ return TRUE;
-+}
-+
-+static bool i830_sdvo_get_max_brightness(struct drm_output * output,
-+ u32 * pMaxVal, u32 * pDefaultVal)
-+{
-+
-+ u8 byRets[4];
-+ u8 status;
-+
-+ /* Make all fields of the args/ret to zero */
-+ memset(byRets, 0, sizeof(byRets));
-+
-+ /* Send the arguements & SDVO opcode to the h/w */
-+
-+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_MAX_BRIGHTNESS, NULL, 0);
-+
-+ status = intel_sdvo_read_response(output, byRets, 4);
-+ if (status != SDVO_CMD_STATUS_SUCCESS)
-+ return FALSE;
-+ /* Fill up the return values. */
-+ *pMaxVal = (u32) byRets[0] | ((u32) byRets[1] << 8);
-+ *pDefaultVal = (u32) byRets[2] | ((u32) byRets[3] << 8);
-+
-+ return TRUE;
-+}
-+
-+static bool i830_sdvo_get_max_contrast(struct drm_output * output,
-+ u32 * pMaxVal, u32 * pDefaultVal)
-+{
-+
-+ u8 byRets[4];
-+ u8 status;
-+
-+ /* Make all fields of the args/ret to zero */
-+ memset(byRets, 0, sizeof(byRets));
-+
-+ /* Send the arguements & SDVO opcode to the h/w */
-+
-+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_MAX_CONTRAST, NULL, 0);
-+
-+ status = intel_sdvo_read_response(output, byRets, 4);
-+ if (status != SDVO_CMD_STATUS_SUCCESS)
-+ return FALSE;
-+ /* Fill up the return values. */
-+ *pMaxVal = (u32) byRets[0] | ((u32) byRets[1] << 8);
-+ *pDefaultVal = (u32) byRets[2] | ((u32) byRets[3] << 8);
-+
-+ return TRUE;
-+}
-+
-+static bool i830_sdvo_get_max_sharpness(struct drm_output * output,
-+ u32 * pMaxVal, u32 * pDefaultVal)
-+{
-+
-+ u8 byRets[4];
-+ u8 status;
-+
-+ /* Make all fields of the args/ret to zero */
-+ memset(byRets, 0, sizeof(byRets));
-+
-+ /* Send the arguements & SDVO opcode to the h/w */
-+
-+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_MAX_SHARPNESS, NULL, 0);
-+
-+ status = intel_sdvo_read_response(output, byRets, 4);
-+ if (status != SDVO_CMD_STATUS_SUCCESS)
-+ return FALSE;
-+
-+ /* Fill up the return values. */
-+ *pMaxVal = (u32) byRets[0] | ((u32) byRets[1] << 8);
-+ *pDefaultVal = (u32) byRets[2] | ((u32) byRets[3] << 8);
-+
-+ return TRUE;
-+}
-+
-+static bool i830_sdvo_get_max_hue(struct drm_output * output,
-+ u32 * pMaxVal, u32 * pDefaultVal)
-+{
-+ u8 byRets[4];
-+ u8 status;
-+
-+ /* Make all fields of the args/ret to zero */
-+ memset(byRets, 0, sizeof(byRets));
-+
-+ /* Send the arguements & SDVO opcode to the h/w */
-+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_MAX_HUE, NULL, 0);
-+
-+ status = intel_sdvo_read_response(output, byRets, 4);
-+ if (status != SDVO_CMD_STATUS_SUCCESS)
-+ return FALSE;
-+
-+ /* Fill up the return values. */
-+ *pMaxVal = (u32) byRets[0] | ((u32) byRets[1] << 8);
-+ *pDefaultVal = (u32) byRets[2] | ((u32) byRets[3] << 8);
-+
-+ return TRUE;
-+}
-+
-+static bool i830_sdvo_get_max_saturation(struct drm_output * output,
-+ u32 * pMaxVal, u32 * pDefaultVal)
-+{
-+
-+ u8 byRets[4];
-+ u8 status;
-+
-+ /* Make all fields of the args/ret to zero */
-+ memset(byRets, 0, sizeof(byRets));
-+
-+ /* Send the arguements & SDVO opcode to the h/w */
-+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_MAX_SATURATION, NULL, 0);
-+
-+ status = intel_sdvo_read_response(output, byRets, 4);
-+ if (status != SDVO_CMD_STATUS_SUCCESS)
-+ return FALSE;
-+
-+ /* Fill up the return values. */
-+ *pMaxVal = (u32) byRets[0] | ((u32) byRets[1] << 8);
-+ *pDefaultVal = (u32) byRets[2] | ((u32) byRets[3] << 8);
-+
-+ return TRUE;
-+}
-+
-+static bool i830_sdvo_get_max_adaptive_flickerfilter(struct drm_output * output,
-+ u32 * pMaxVal,
-+ u32 * pDefaultVal)
-+{
-+ u8 byRets[4];
-+ u8 status;
-+
-+ /* Make all fields of the args/ret to zero */
-+ memset(byRets, 0, sizeof(byRets));
-+
-+ /* Send the arguements & SDVO opcode to the h/w */
-+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_MAX_ADAPTIVE_FLICKER_FILTER,
-+ NULL, 0);
-+ status = intel_sdvo_read_response(output, byRets, 4);
-+ if (status != SDVO_CMD_STATUS_SUCCESS)
-+ return FALSE;
-+
-+ /* Fill up the return values. */
-+ *pMaxVal = (u32) byRets[0] | ((u32) byRets[1] << 8);
-+ *pDefaultVal = (u32) byRets[2] | ((u32) byRets[3] << 8);
-+
-+ return TRUE;
-+}
-+
-+static bool i830_sdvo_get_max_lumafilter(struct drm_output * output,
-+ u32 * pMaxVal, u32 * pDefaultVal)
-+{
-+
-+ u8 byRets[4];
-+ u8 status;
-+
-+ /* Make all fields of the args/ret to zero */
-+ memset(byRets, 0, sizeof(byRets));
-+
-+ /* Send the arguements & SDVO opcode to the h/w */
-+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_MAX_TV_LUMA_FILTER, NULL, 0);
-+ status = intel_sdvo_read_response(output, byRets, 4);
-+ if (status != SDVO_CMD_STATUS_SUCCESS)
-+ return FALSE;
-+
-+ /* Fill up the return values. */
-+ *pMaxVal = (u32) byRets[0] | ((u32) byRets[1] << 8);
-+ *pDefaultVal = (u32) byRets[2] | ((u32) byRets[3] << 8);
-+
-+ return TRUE;
-+}
-+
-+static bool i830_sdvo_get_max_chromafilter(struct drm_output * output,
-+ u32 * pMaxVal, u32 * pDefaultVal)
-+{
-+
-+ u8 byRets[4];
-+ u8 status;
-+
-+ /* Make all fields of the args/ret to zero */
-+ memset(byRets, 0, sizeof(byRets));
-+
-+ /* Send the arguements & SDVO opcode to the h/w */
-+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_MAX_TV_CHROMA_FILTER, NULL, 0);
-+ status = intel_sdvo_read_response(output, byRets, 4);
-+ if (status != SDVO_CMD_STATUS_SUCCESS)
-+ return FALSE;
-+
-+ /* Fill up the return values. */
-+ *pMaxVal = (u32) byRets[0] | ((u32) byRets[1] << 8);
-+ *pDefaultVal = (u32) byRets[2] | ((u32) byRets[3] << 8);
-+
-+ return TRUE;
-+}
-+
-+static bool i830_sdvo_get_dotcrawl(struct drm_output * output,
-+ u32 * pCurrentVal, u32 * pDefaultVal)
-+{
-+
-+ u8 byRets[2];
-+ u8 status;
-+
-+ /* Make all fields of the args/ret to zero */
-+ memset(byRets, 0, sizeof(byRets));
-+
-+ /* Send the arguements & SDVO opcode to the h/w */
-+
-+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_DOT_CRAWL, NULL, 0);
-+ status = intel_sdvo_read_response(output, byRets, 2);
-+ if (status != SDVO_CMD_STATUS_SUCCESS)
-+ return FALSE;
-+
-+ /* Tibet issue 1603772: Dot crawl do not persist after reboot/Hibernate */
-+ /* Details : Bit0 is considered as DotCrawl Max value. But according to EDS, Bit0 */
-+ /* represents the Current DotCrawl value. */
-+ /* Fix : The current value is updated with Bit0. */
-+
-+ /* Fill up the return values. */
-+ *pCurrentVal = (u32) (byRets[0] & 0x1);
-+ *pDefaultVal = (u32) ((byRets[0] >> 1) & 0x1);
-+ return TRUE;
-+}
-+
-+static bool i830_sdvo_get_max_2D_flickerfilter(struct drm_output * output,
-+ u32 * pMaxVal, u32 * pDefaultVal)
-+{
-+
-+ u8 byRets[4];
-+ u8 status;
-+
-+ /* Make all fields of the args/ret to zero */
-+ memset(byRets, 0, sizeof(byRets));
-+
-+ /* Send the arguements & SDVO opcode to the h/w */
-+
-+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_MAX_2D_FLICKER_FILTER, NULL, 0);
-+ status = intel_sdvo_read_response(output, byRets, 4);
-+ if (status != SDVO_CMD_STATUS_SUCCESS)
-+ return FALSE;
-+
-+ /* Fill up the return values. */
-+ *pMaxVal = (u32) byRets[0] | ((u32) byRets[1] << 8);
-+ *pDefaultVal = (u32) byRets[2] | ((u32) byRets[3] << 8);
-+
-+ return TRUE;
-+}
-+
-+static bool i830_sdvo_set_horizontal_overscan(struct drm_output * output, u32 dwVal)
-+{
-+
-+ u8 byArgs[2];
-+ u8 status;
-+
-+ /* Make all fields of the args/ret to zero */
-+ memset(byArgs, 0, sizeof(byArgs));
-+
-+ /* Fill up the arguement value */
-+ byArgs[0] = (u8) (dwVal & 0xFF);
-+ byArgs[1] = (u8) ((dwVal >> 8) & 0xFF);
-+
-+ /* Send the arguements & SDVO opcode to the h/w */
-+
-+ intel_sdvo_write_cmd(output, SDVO_CMD_SET_HORIZONTAL_OVERSCAN, byArgs, 2);
-+ status = intel_sdvo_read_response(output, NULL, 0);
-+
-+ if (status != SDVO_CMD_STATUS_SUCCESS)
-+ return FALSE;
-+ return TRUE;
-+}
-+
-+static bool i830_sdvo_set_vertical_overscan(struct drm_output * output, u32 dwVal)
-+{
-+
-+ u8 byArgs[2];
-+ u8 status;
-+
-+ /* Make all fields of the args/ret to zero */
-+ memset(byArgs, 0, sizeof(byArgs));
-+
-+ /* Fill up the arguement value */
-+ byArgs[0] = (u8) (dwVal & 0xFF);
-+ byArgs[1] = (u8) ((dwVal >> 8) & 0xFF);
-+
-+ /* Send the arguements & SDVO opcode to the h/w */
-+
-+ intel_sdvo_write_cmd(output, SDVO_CMD_SET_VERTICAL_OVERSCAN, byArgs, 2);
-+ status = intel_sdvo_read_response(output, NULL, 0);
-+
-+ if (status != SDVO_CMD_STATUS_SUCCESS)
-+ return FALSE;
-+ return TRUE;
-+}
-+
-+static bool i830_sdvo_set_horizontal_position(struct drm_output * output, u32 dwVal)
-+{
-+
-+ u8 byArgs[2];
-+ u8 status;
-+
-+ /* Make all fields of the args/ret to zero */
-+ memset(byArgs, 0, sizeof(byArgs));
-+
-+ /* Fill up the arguement value */
-+ byArgs[0] = (u8) (dwVal & 0xFF);
-+ byArgs[1] = (u8) ((dwVal >> 8) & 0xFF);
-+
-+ /* Send the arguements & SDVO opcode to the h/w */
-+
-+ intel_sdvo_write_cmd(output, SDVO_CMD_SET_HORIZONTAL_POSITION, byArgs, 2);
-+ status = intel_sdvo_read_response(output, NULL, 0);
-+
-+ if (status != SDVO_CMD_STATUS_SUCCESS)
-+ return FALSE;
-+
-+ return TRUE;
-+}
-+
-+static bool i830_sdvo_set_vertical_position(struct drm_output * output, u32 dwVal)
-+{
-+
-+ u8 byArgs[2];
-+ u8 status;
-+
-+ /* Make all fields of the args/ret to zero */
-+ memset(byArgs, 0, sizeof(byArgs));
-+
-+ /* Fill up the arguement value */
-+ byArgs[0] = (u8) (dwVal & 0xFF);
-+ byArgs[1] = (u8) ((dwVal >> 8) & 0xFF);
-+
-+ /* Send the arguements & SDVO opcode to the h/w */
-+ intel_sdvo_write_cmd(output, SDVO_CMD_SET_VERTICAL_POSITION, byArgs, 2);
-+ status = intel_sdvo_read_response(output, NULL, 0);
-+
-+ if (status != SDVO_CMD_STATUS_SUCCESS)
-+ return FALSE;
-+
-+ return TRUE;
-+
-+}
-+
-+static bool i830_sdvo_set_flickerilter(struct drm_output * output, u32 dwVal)
-+{
-+
-+ u8 byArgs[2];
-+ u8 status;
-+
-+ /* Make all fields of the args/ret to zero */
-+ memset(byArgs, 0, sizeof(byArgs));
-+
-+ /* Fill up the arguement value */
-+ byArgs[0] = (u8) (dwVal & 0xFF);
-+ byArgs[1] = (u8) ((dwVal >> 8) & 0xFF);
-+
-+ /* Send the arguements & SDVO opcode to the h/w */
-+ intel_sdvo_write_cmd(output, SDVO_CMD_SET_FLICKER_FILTER, byArgs, 2);
-+ status = intel_sdvo_read_response(output, NULL, 0);
-+
-+ if (status != SDVO_CMD_STATUS_SUCCESS)
-+ return FALSE;
-+
-+ return TRUE;
-+}
-+
-+static bool i830_sdvo_set_brightness(struct drm_output * output, u32 dwVal)
-+{
-+
-+ u8 byArgs[2];
-+ u8 status;
-+
-+ /* Make all fields of the args/ret to zero */
-+ memset(byArgs, 0, sizeof(byArgs));
-+
-+ /* Fill up the arguement value */
-+ byArgs[0] = (u8) (dwVal & 0xFF);
-+ byArgs[1] = (u8) ((dwVal >> 8) & 0xFF);
-+
-+ /* Send the arguements & SDVO opcode to the h/w */
-+ intel_sdvo_write_cmd(output, SDVO_CMD_SET_BRIGHTNESS, byArgs, 2);
-+ status = intel_sdvo_read_response(output, NULL, 0);
-+
-+ if (status != SDVO_CMD_STATUS_SUCCESS)
-+ return FALSE;
-+
-+ return TRUE;
-+}
-+
-+static bool i830_sdvo_set_contrast(struct drm_output * output, u32 dwVal)
-+{
-+
-+ u8 byArgs[2];
-+ u8 status;
-+
-+ /* Make all fields of the args/ret to zero */
-+ memset(byArgs, 0, sizeof(byArgs));
-+ /* Fill up the arguement value */
-+ byArgs[0] = (u8) (dwVal & 0xFF);
-+ byArgs[1] = (u8) ((dwVal >> 8) & 0xFF);
-+
-+ /* Send the arguements & SDVO opcode to the h/w */
-+ intel_sdvo_write_cmd(output, SDVO_CMD_SET_CONTRAST, byArgs, 2);
-+ status = intel_sdvo_read_response(output, NULL, 0);
-+
-+ if (status != SDVO_CMD_STATUS_SUCCESS)
-+ return FALSE;
-+
-+ return TRUE;
-+}
-+
-+static bool i830_sdvo_set_sharpness(struct drm_output * output, u32 dwVal)
-+{
-+
-+ u8 byArgs[2];
-+ u8 status;
-+
-+ /* Make all fields of the args/ret to zero */
-+ memset(byArgs, 0, sizeof(byArgs));
-+
-+ /* Fill up the arguement value */
-+ byArgs[0] = (u8) (dwVal & 0xFF);
-+ byArgs[1] = (u8) ((dwVal >> 8) & 0xFF);
-+
-+ /* Send the arguements & SDVO opcode to the h/w */
-+ intel_sdvo_write_cmd(output, SDVO_CMD_SET_SHARPNESS, byArgs, 2);
-+ status = intel_sdvo_read_response(output, NULL, 0);
-+
-+ if (status != SDVO_CMD_STATUS_SUCCESS)
-+ return FALSE;
-+
-+ return TRUE;
-+}
-+
-+static bool i830_sdvo_set_hue(struct drm_output * output, u32 dwVal)
-+{
-+
-+ u8 byArgs[2];
-+ u8 status;
-+
-+ /* Make all fields of the args/ret to zero */
-+ memset(byArgs, 0, sizeof(byArgs));
-+
-+ /* Fill up the arguement value */
-+ byArgs[0] = (u8) (dwVal & 0xFF);
-+ byArgs[1] = (u8) ((dwVal >> 8) & 0xFF);
-+
-+ /* Send the arguements & SDVO opcode to the h/w */
-+
-+ intel_sdvo_write_cmd(output, SDVO_CMD_SET_HUE, byArgs, 2);
-+ status = intel_sdvo_read_response(output, NULL, 0);
-+
-+ if (status != SDVO_CMD_STATUS_SUCCESS)
-+ return FALSE;
-+
-+ return TRUE;
-+}
-+
-+static bool i830_sdvo_set_saturation(struct drm_output * output, u32 dwVal)
-+{
-+
-+ u8 byArgs[2];
-+ u8 status;
-+
-+ /* Make all fields of the args/ret to zero */
-+ memset(byArgs, 0, sizeof(byArgs));
-+
-+ /* Fill up the arguement value */
-+ byArgs[0] = (u8) (dwVal & 0xFF);
-+ byArgs[1] = (u8) ((dwVal >> 8) & 0xFF);
-+
-+ /* Send the arguements & SDVO opcode to the h/w */
-+ intel_sdvo_write_cmd(output, SDVO_CMD_SET_SATURATION, byArgs, 2);
-+ status = intel_sdvo_read_response(output, NULL, 0);
-+
-+ if (status != SDVO_CMD_STATUS_SUCCESS)
-+ return FALSE;
-+
-+ return TRUE;
-+}
-+
-+static bool i830_sdvo_set_adaptive_flickerfilter(struct drm_output * output, u32 dwVal)
-+{
-+ u8 byArgs[2];
-+ u8 status;
-+
-+ /* Make all fields of the args/ret to zero */
-+ memset(byArgs, 0, sizeof(byArgs));
-+
-+ /* Fill up the arguement value */
-+ byArgs[0] = (u8) (dwVal & 0xFF);
-+ byArgs[1] = (u8) ((dwVal >> 8) & 0xFF);
-+
-+ /* Send the arguements & SDVO opcode to the h/w */
-+
-+ intel_sdvo_write_cmd(output, SDVO_CMD_SET_ADAPTIVE_FLICKER_FILTER, byArgs,
-+ 2);
-+ status = intel_sdvo_read_response(output, NULL, 0);
-+
-+ if (status != SDVO_CMD_STATUS_SUCCESS)
-+ return FALSE;
-+
-+ return TRUE;
-+
-+}
-+
-+static bool i830_sdvo_set_lumafilter(struct drm_output * output, u32 dwVal)
-+{
-+ u8 byArgs[2];
-+ u8 status;
-+
-+ /* Make all fields of the args/ret to zero */
-+ memset(byArgs, 0, sizeof(byArgs));
-+
-+ /* Fill up the arguement value */
-+ byArgs[0] = (u8) (dwVal & 0xFF);
-+ byArgs[1] = (u8) ((dwVal >> 8) & 0xFF);
-+
-+ /* Send the arguements & SDVO opcode to the h/w */
-+
-+ intel_sdvo_write_cmd(output, SDVO_CMD_SET_TV_LUMA_FILTER, byArgs, 2);
-+ status = intel_sdvo_read_response(output, NULL, 0);
-+
-+ if (status != SDVO_CMD_STATUS_SUCCESS)
-+ return FALSE;
-+
-+ return TRUE;
-+}
-+
-+static bool i830_sdvo_set_chromafilter(struct drm_output * output, u32 dwVal)
-+{
-+
-+ u8 byArgs[2];
-+ u8 status;
-+
-+ /* Make all fields of the args/ret to zero */
-+ memset(byArgs, 0, sizeof(byArgs));
-+
-+ /* Fill up the arguement value */
-+ byArgs[0] = (u8) (dwVal & 0xFF);
-+ byArgs[1] = (u8) ((dwVal >> 8) & 0xFF);
-+
-+ /* Send the arguements & SDVO opcode to the h/w */
-+
-+ intel_sdvo_write_cmd(output, SDVO_CMD_SET_TV_CHROMA_FILTER, byArgs, 2);
-+ status = intel_sdvo_read_response(output, NULL, 0);
-+
-+ if (status != SDVO_CMD_STATUS_SUCCESS)
-+ return FALSE;
-+
-+ return TRUE;
-+}
-+
-+static bool i830_sdvo_set_dotcrawl(struct drm_output * output, u32 dwVal)
-+{
-+
-+ u8 byArgs[2];
-+ u8 status;
-+
-+ /* Make all fields of the args/ret to zero */
-+ memset(byArgs, 0, sizeof(byArgs));
-+
-+ /* Fill up the arguement value */
-+ byArgs[0] = (u8) (dwVal & 0xFF);
-+ byArgs[1] = (u8) ((dwVal >> 8) & 0xFF);
-+
-+ /* Send the arguements & SDVO opcode to the h/w */
-+
-+ intel_sdvo_write_cmd(output, SDVO_CMD_SET_DOT_CRAWL, byArgs, 2);
-+ status = intel_sdvo_read_response(output, NULL, 0);
-+ if (status != SDVO_CMD_STATUS_SUCCESS)
-+ return FALSE;
-+
-+ return TRUE;
-+}
-+
-+static bool i830_sdvo_set_2D_flickerfilter(struct drm_output * output, u32 dwVal)
-+{
-+
-+ u8 byArgs[2];
-+ u8 status;
-+
-+ /* Make all fields of the args/ret to zero */
-+ memset(byArgs, 0, sizeof(byArgs));
-+
-+ /* Fill up the arguement value */
-+ byArgs[0] = (u8) (dwVal & 0xFF);
-+ byArgs[1] = (u8) ((dwVal >> 8) & 0xFF);
-+
-+ /* Send the arguements & SDVO opcode to the h/w */
-+
-+ intel_sdvo_write_cmd(output, SDVO_CMD_SET_2D_FLICKER_FILTER, byArgs, 2);
-+ status = intel_sdvo_read_response(output, NULL, 0);
-+
-+ if (status != SDVO_CMD_STATUS_SUCCESS)
-+ return FALSE;
-+
-+ return TRUE;
-+}
-+
-+#if 0
-+static bool i830_sdvo_set_ancillary_video_information(struct drm_output * output)
-+{
-+
-+ u8 status;
-+ u8 byArgs[4];
-+ u32 dwAncillaryBits = 0;
-+ struct intel_output *intel_output = output->driver_private;
-+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
-+
-+
-+ PSDVO_ANCILLARY_INFO_T pAncillaryInfo = &sdvo_priv->AncillaryInfo;
-+
-+ /* Make all fields of the args/ret to zero */
-+ memset(byArgs, 0, sizeof(byArgs));
-+
-+ /* Handle picture aspect ratio (bits 8, 9) and */
-+ /* active format aspect ratio (bits 10, 13) */
-+ switch (pAncillaryInfo->AspectRatio) {
-+ case CP_ASPECT_RATIO_FF_4_BY_3:
-+ dwAncillaryBits |= UAIM_PAR_4_3;
-+ dwAncillaryBits |= UAIM_FAR_4_BY_3_CENTER;
-+ break;
-+ case CP_ASPECT_RATIO_14_BY_9_CENTER:
-+ dwAncillaryBits |= UAIM_FAR_14_BY_9_CENTER;
-+ break;
-+ case CP_ASPECT_RATIO_14_BY_9_TOP:
-+ dwAncillaryBits |= UAIM_FAR_14_BY_9_LETTERBOX_TOP;
-+ break;
-+ case CP_ASPECT_RATIO_16_BY_9_CENTER:
-+ dwAncillaryBits |= UAIM_PAR_16_9;
-+ dwAncillaryBits |= UAIM_FAR_16_BY_9_CENTER;
-+ break;
-+ case CP_ASPECT_RATIO_16_BY_9_TOP:
-+ dwAncillaryBits |= UAIM_PAR_16_9;
-+ dwAncillaryBits |= UAIM_FAR_16_BY_9_LETTERBOX_TOP;
-+ break;
-+ case CP_ASPECT_RATIO_GT_16_BY_9_CENTER:
-+ dwAncillaryBits |= UAIM_PAR_16_9;
-+ dwAncillaryBits |= UAIM_FAR_GT_16_BY_9_LETTERBOX_CENTER;
-+ break;
-+ case CP_ASPECT_RATIO_FF_4_BY_3_PROT_CENTER:
-+ dwAncillaryBits |= UAIM_FAR_4_BY_3_SNP_14_BY_9_CENTER;
-+ break;
-+ case CP_ASPECT_RATIO_FF_16_BY_9_ANAMORPHIC:
-+ dwAncillaryBits |= UAIM_PAR_16_9;
-+ break;
-+ default:
-+ DRM_DEBUG("fail to set ancillary video info\n");
-+ return FALSE;
-+
-+ }
-+
-+ /* Fill up the argument value */
-+ byArgs[0] = (u8) ((dwAncillaryBits >> 0) & 0xFF);
-+ byArgs[1] = (u8) ((dwAncillaryBits >> 8) & 0xFF);
-+ byArgs[2] = (u8) ((dwAncillaryBits >> 16) & 0xFF);
-+ byArgs[3] = (u8) ((dwAncillaryBits >> 24) & 0xFF);
-+
-+ /* Send the arguements & SDVO opcode to the h/w */
-+
-+ intel_sdvo_write_cmd(output, SDVO_CMD_SET_ANCILLARY_VIDEO_INFORMATION,
-+ byArgs, 4);
-+ status = intel_sdvo_read_response(output, NULL, 0);
-+
-+ if (status != SDVO_CMD_STATUS_SUCCESS)
-+ return FALSE;
-+
-+ return TRUE;
-+
-+}
-+#endif
-+static bool i830_tv_program_display_params(struct drm_output * output)
-+
-+{
-+ u8 status;
-+ u32 dwMaxVal = 0;
-+ u32 dwDefaultVal = 0;
-+ u32 dwCurrentVal = 0;
-+
-+ struct intel_output *intel_output = output->driver_private;
-+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
-+
-+
-+ /* X & Y Positions */
-+
-+ /* Horizontal postition */
-+ if (sdvo_priv->dwSupportedEnhancements & SDVO_HORIZONTAL_POSITION) {
-+ status =
-+ i830_sdvo_get_max_horizontal_position(output, &dwMaxVal,
-+ &dwDefaultVal);
-+
-+ if (status) {
-+ /*Tibet issue 1596943: After changing mode from 8x6 to 10x7 open CUI and press Restore Defaults */
-+ /*Position changes. */
-+
-+ /* Tibet:1629992 : can't keep previous TV setting status if re-boot system after TV setting(screen position & size) of CUI */
-+ /* Fix : compare whether current postion is greater than max value and then assign the default value. Earlier the check was */
-+ /* against the pAim->PositionX.Max value to dwMaxVal. When we boot the PositionX.Max value is 0 and so after every reboot, */
-+ /* position is set to default. */
-+
-+ if (sdvo_priv->dispParams.PositionX.Value > dwMaxVal)
-+ sdvo_priv->dispParams.PositionX.Value = dwDefaultVal;
-+
-+ status =
-+ i830_sdvo_set_horizontal_position(output,
-+ sdvo_priv->dispParams.PositionX.
-+ Value);
-+
-+ if (!status)
-+ return status;
-+
-+ sdvo_priv->dispParams.PositionX.Max = dwMaxVal;
-+ sdvo_priv->dispParams.PositionX.Min = 0;
-+ sdvo_priv->dispParams.PositionX.Default = dwDefaultVal;
-+ sdvo_priv->dispParams.PositionX.Step = 1;
-+ } else {
-+ return status;
-+ }
-+ }
-+
-+ /* Vertical position */
-+ if (sdvo_priv->dwSupportedEnhancements & SDVO_VERTICAL_POSITION) {
-+ status =
-+ i830_sdvo_get_max_vertical_position(output, &dwMaxVal,
-+ &dwDefaultVal);
-+
-+ if (status) {
-+
-+ /*Tibet issue 1596943: After changing mode from 8x6 to 10x7 open CUI and press Restore Defaults */
-+ /*Position changes. */
-+ /*currently if we are out of range get back to default */
-+
-+ /* Tibet:1629992 : can't keep previous TV setting status if re-boot system after TV setting(screen position & size) of CUI */
-+ /* Fix : compare whether current postion is greater than max value and then assign the default value. Earlier the check was */
-+ /* against the pAim->PositionY.Max value to dwMaxVal. When we boot the PositionX.Max value is 0 and so after every reboot, */
-+ /* position is set to default. */
-+
-+ if (sdvo_priv->dispParams.PositionY.Value > dwMaxVal)
-+ sdvo_priv->dispParams.PositionY.Value = dwDefaultVal;
-+
-+ status =
-+ i830_sdvo_set_vertical_position(output,
-+ sdvo_priv->dispParams.PositionY.
-+ Value);
-+ if (!status)
-+ return status;
-+
-+ sdvo_priv->dispParams.PositionY.Max = dwMaxVal;
-+ sdvo_priv->dispParams.PositionY.Min = 0;
-+ sdvo_priv->dispParams.PositionY.Default = dwDefaultVal;
-+ sdvo_priv->dispParams.PositionY.Step = 1;
-+ } else {
-+ return status;
-+ }
-+ }
-+
-+ /* Flicker Filter */
-+ if (sdvo_priv->dwSupportedEnhancements & SDVO_FLICKER_FILTER) {
-+ status =
-+ i830_sdvo_get_max_flickerfilter(output, &dwMaxVal, &dwDefaultVal);
-+
-+ if (status) {
-+ /*currently if we are out of range get back to default */
-+ if (sdvo_priv->dispParams.FlickerFilter.Value > dwMaxVal)
-+ sdvo_priv->dispParams.FlickerFilter.Value = dwDefaultVal;
-+
-+ status =
-+ i830_sdvo_set_flickerilter(output,
-+ sdvo_priv->dispParams.FlickerFilter.
-+ Value);
-+ if (!status)
-+ return status;
-+
-+ sdvo_priv->dispParams.FlickerFilter.Max = dwMaxVal;
-+ sdvo_priv->dispParams.FlickerFilter.Min = 0;
-+ sdvo_priv->dispParams.FlickerFilter.Default = dwDefaultVal;
-+ sdvo_priv->dispParams.FlickerFilter.Step = 1;
-+ } else {
-+ return status;
-+ }
-+ }
-+
-+ /* Brightness */
-+ if (sdvo_priv->dwSupportedEnhancements & SDVO_BRIGHTNESS) {
-+
-+ status =
-+ i830_sdvo_get_max_brightness(output, &dwMaxVal, &dwDefaultVal);
-+
-+ if (status) {
-+ /*check whether the value is beyond the max value, min value as per EDS is always 0 so */
-+ /*no need to check it. */
-+ if (sdvo_priv->dispParams.Brightness.Value > dwMaxVal)
-+ sdvo_priv->dispParams.Brightness.Value = dwDefaultVal;
-+
-+ /* Program the device */
-+ status =
-+ i830_sdvo_set_brightness(output,
-+ sdvo_priv->dispParams.Brightness.Value);
-+ if (!status)
-+ return status;
-+
-+ sdvo_priv->dispParams.Brightness.Max = dwMaxVal;
-+ sdvo_priv->dispParams.Brightness.Min = 0;
-+ sdvo_priv->dispParams.Brightness.Default = dwDefaultVal;
-+ sdvo_priv->dispParams.Brightness.Step = 1;
-+ } else {
-+ return status;
-+ }
-+
-+ }
-+
-+ /* Contrast */
-+ if (sdvo_priv->dwSupportedEnhancements & SDVO_CONTRAST) {
-+
-+ status = i830_sdvo_get_max_contrast(output, &dwMaxVal, &dwDefaultVal);
-+
-+ if (status) {
-+ /*check whether the value is beyond the max value, min value as per EDS is always 0 so */
-+ /*no need to check it. */
-+ if (sdvo_priv->dispParams.Contrast.Value > dwMaxVal)
-+ sdvo_priv->dispParams.Contrast.Value = dwDefaultVal;
-+
-+ /* Program the device */
-+ status =
-+ i830_sdvo_set_contrast(output,
-+ sdvo_priv->dispParams.Contrast.Value);
-+ if (!status)
-+ return status;
-+
-+ sdvo_priv->dispParams.Contrast.Max = dwMaxVal;
-+ sdvo_priv->dispParams.Contrast.Min = 0;
-+ sdvo_priv->dispParams.Contrast.Default = dwDefaultVal;
-+
-+ sdvo_priv->dispParams.Contrast.Step = 1;
-+
-+ } else {
-+ return status;
-+ }
-+ }
-+
-+ /* Sharpness */
-+ if (sdvo_priv->dwSupportedEnhancements & SDVO_SHARPNESS) {
-+
-+ status =
-+ i830_sdvo_get_max_sharpness(output, &dwMaxVal, &dwDefaultVal);
-+
-+ if (status) {
-+ /*check whether the value is beyond the max value, min value as per EDS is always 0 so */
-+ /*no need to check it. */
-+ if (sdvo_priv->dispParams.Sharpness.Value > dwMaxVal)
-+ sdvo_priv->dispParams.Sharpness.Value = dwDefaultVal;
-+
-+ /* Program the device */
-+ status =
-+ i830_sdvo_set_sharpness(output,
-+ sdvo_priv->dispParams.Sharpness.Value);
-+ if (!status)
-+ return status;
-+ sdvo_priv->dispParams.Sharpness.Max = dwMaxVal;
-+ sdvo_priv->dispParams.Sharpness.Min = 0;
-+ sdvo_priv->dispParams.Sharpness.Default = dwDefaultVal;
-+
-+ sdvo_priv->dispParams.Sharpness.Step = 1;
-+ } else {
-+ return status;
-+ }
-+ }
-+
-+ /* Hue */
-+ if (sdvo_priv->dwSupportedEnhancements & SDVO_HUE) {
-+
-+ status = i830_sdvo_get_max_hue(output, &dwMaxVal, &dwDefaultVal);
-+
-+ if (status) {
-+ /*check whether the value is beyond the max value, min value as per EDS is always 0 so */
-+ /*no need to check it. */
-+ if (sdvo_priv->dispParams.Hue.Value > dwMaxVal)
-+ sdvo_priv->dispParams.Hue.Value = dwDefaultVal;
-+
-+ /* Program the device */
-+ status = i830_sdvo_set_hue(output, sdvo_priv->dispParams.Hue.Value);
-+ if (!status)
-+ return status;
-+
-+ sdvo_priv->dispParams.Hue.Max = dwMaxVal;
-+ sdvo_priv->dispParams.Hue.Min = 0;
-+ sdvo_priv->dispParams.Hue.Default = dwDefaultVal;
-+
-+ sdvo_priv->dispParams.Hue.Step = 1;
-+
-+ } else {
-+ return status;
-+ }
-+ }
-+
-+ /* Saturation */
-+ if (sdvo_priv->dwSupportedEnhancements & SDVO_SATURATION) {
-+ status =
-+ i830_sdvo_get_max_saturation(output, &dwMaxVal, &dwDefaultVal);
-+
-+ if (status) {
-+ /*check whether the value is beyond the max value, min value as per EDS is always 0 so */
-+ /*no need to check it. */
-+ if (sdvo_priv->dispParams.Saturation.Value > dwMaxVal)
-+ sdvo_priv->dispParams.Saturation.Value = dwDefaultVal;
-+
-+ /* Program the device */
-+ status =
-+ i830_sdvo_set_saturation(output,
-+ sdvo_priv->dispParams.Saturation.Value);
-+ if (!status)
-+ return status;
-+
-+ sdvo_priv->dispParams.Saturation.Max = dwMaxVal;
-+ sdvo_priv->dispParams.Saturation.Min = 0;
-+ sdvo_priv->dispParams.Saturation.Default = dwDefaultVal;
-+ sdvo_priv->dispParams.Saturation.Step = 1;
-+ } else {
-+ return status;
-+ }
-+
-+ }
-+
-+ /* Adaptive Flicker filter */
-+ if (sdvo_priv->dwSupportedEnhancements & SDVO_ADAPTIVE_FLICKER_FILTER) {
-+ status =
-+ i830_sdvo_get_max_adaptive_flickerfilter(output, &dwMaxVal,
-+ &dwDefaultVal);
-+
-+ if (status) {
-+ /*check whether the value is beyond the max value, min value as per EDS is always 0 so */
-+ /*no need to check it. */
-+ if (sdvo_priv->dispParams.AdaptiveFF.Value > dwMaxVal)
-+ sdvo_priv->dispParams.AdaptiveFF.Value = dwDefaultVal;
-+
-+ status =
-+ i830_sdvo_set_adaptive_flickerfilter(output,
-+ sdvo_priv->dispParams.
-+ AdaptiveFF.Value);
-+ if (!status)
-+ return status;
-+
-+ sdvo_priv->dispParams.AdaptiveFF.Max = dwMaxVal;
-+ sdvo_priv->dispParams.AdaptiveFF.Min = 0;
-+ sdvo_priv->dispParams.AdaptiveFF.Default = dwDefaultVal;
-+ sdvo_priv->dispParams.AdaptiveFF.Step = 1;
-+ } else {
-+ return status;
-+ }
-+ }
-+
-+ /* 2D Flicker filter */
-+ if (sdvo_priv->dwSupportedEnhancements & SDVO_2D_FLICKER_FILTER) {
-+
-+ status =
-+ i830_sdvo_get_max_2D_flickerfilter(output, &dwMaxVal,
-+ &dwDefaultVal);
-+
-+ if (status) {
-+ /*check whether the value is beyond the max value, min value as per EDS is always 0 so */
-+ /*no need to check it. */
-+ if (sdvo_priv->dispParams.TwoD_FlickerFilter.Value > dwMaxVal)
-+ sdvo_priv->dispParams.TwoD_FlickerFilter.Value = dwDefaultVal;
-+
-+ status =
-+ i830_sdvo_set_2D_flickerfilter(output,
-+ sdvo_priv->dispParams.
-+ TwoD_FlickerFilter.Value);
-+ if (!status)
-+ return status;
-+
-+ sdvo_priv->dispParams.TwoD_FlickerFilter.Max = dwMaxVal;
-+ sdvo_priv->dispParams.TwoD_FlickerFilter.Min = 0;
-+ sdvo_priv->dispParams.TwoD_FlickerFilter.Default = dwDefaultVal;
-+ sdvo_priv->dispParams.TwoD_FlickerFilter.Step = 1;
-+ } else {
-+ return status;
-+ }
-+ }
-+
-+ /* Luma Filter */
-+ if (sdvo_priv->dwSupportedEnhancements & SDVO_TV_MAX_LUMA_FILTER) {
-+ status =
-+ i830_sdvo_get_max_lumafilter(output, &dwMaxVal, &dwDefaultVal);
-+
-+ if (status) {
-+ /*check whether the value is beyond the max value, min value as per EDS is always 0 so */
-+ /*no need to check it. */
-+ if (sdvo_priv->dispParams.LumaFilter.Value > dwMaxVal)
-+ sdvo_priv->dispParams.LumaFilter.Value = dwDefaultVal;
-+
-+ /* Program the device */
-+ status =
-+ i830_sdvo_set_lumafilter(output,
-+ sdvo_priv->dispParams.LumaFilter.Value);
-+ if (!status)
-+ return status;
-+
-+ sdvo_priv->dispParams.LumaFilter.Max = dwMaxVal;
-+ sdvo_priv->dispParams.LumaFilter.Min = 0;
-+ sdvo_priv->dispParams.LumaFilter.Default = dwDefaultVal;
-+ sdvo_priv->dispParams.LumaFilter.Step = 1;
-+
-+ } else {
-+ return status;
-+ }
-+
-+ }
-+
-+ /* Chroma Filter */
-+ if (sdvo_priv->dwSupportedEnhancements & SDVO_MAX_TV_CHROMA_FILTER) {
-+
-+ status =
-+ i830_sdvo_get_max_chromafilter(output, &dwMaxVal, &dwDefaultVal);
-+
-+ if (status) {
-+ /*check whether the value is beyond the max value, min value as per EDS is always 0 so */
-+ /*no need to check it. */
-+ if (sdvo_priv->dispParams.ChromaFilter.Value > dwMaxVal)
-+ sdvo_priv->dispParams.ChromaFilter.Value = dwDefaultVal;
-+
-+ /* Program the device */
-+ status =
-+ i830_sdvo_set_chromafilter(output,
-+ sdvo_priv->dispParams.ChromaFilter.
-+ Value);
-+ if (!status)
-+ return status;
-+
-+ sdvo_priv->dispParams.ChromaFilter.Max = dwMaxVal;
-+ sdvo_priv->dispParams.ChromaFilter.Min = 0;
-+ sdvo_priv->dispParams.ChromaFilter.Default = dwDefaultVal;
-+ sdvo_priv->dispParams.ChromaFilter.Step = 1;
-+ } else {
-+ return status;
-+ }
-+
-+ }
-+
-+ /* Dot Crawl */
-+ if (sdvo_priv->dwSupportedEnhancements & SDVO_DOT_CRAWL) {
-+ status = i830_sdvo_get_dotcrawl(output, &dwCurrentVal, &dwDefaultVal);
-+
-+ if (status) {
-+
-+ dwMaxVal = 1;
-+ /*check whether the value is beyond the max value, min value as per EDS is always 0 so */
-+ /*no need to check it. */
-+
-+ /* Tibet issue 1603772: Dot crawl do not persist after reboot/Hibernate */
-+ /* Details : "Dotcrawl.value" is compared with "dwDefaultVal". Since */
-+ /* dwDefaultVal is always 0, dotCrawl value is always set to 0. */
-+ /* Fix : Compare the current dotCrawl value with dwMaxValue. */
-+
-+ if (sdvo_priv->dispParams.DotCrawl.Value > dwMaxVal)
-+
-+ sdvo_priv->dispParams.DotCrawl.Value = dwMaxVal;
-+
-+ status =
-+ i830_sdvo_set_dotcrawl(output,
-+ sdvo_priv->dispParams.DotCrawl.Value);
-+ if (!status)
-+ return status;
-+
-+ sdvo_priv->dispParams.DotCrawl.Max = dwMaxVal;
-+ sdvo_priv->dispParams.DotCrawl.Min = 0;
-+ sdvo_priv->dispParams.DotCrawl.Default = dwMaxVal;
-+ sdvo_priv->dispParams.DotCrawl.Step = 1;
-+ } else {
-+ return status;
-+ }
-+ }
-+
-+ return TRUE;
-+}
-+
-+static bool i830_tv_set_overscan_parameters(struct drm_output * output)
-+{
-+ u8 status;
-+
-+ u32 dwDefaultVal = 0;
-+ u32 dwMaxVal = 0;
-+ u32 dwPercentageValue = 0;
-+ u32 dwDefOverscanXValue = 0;
-+ u32 dwDefOverscanYValue = 0;
-+ u32 dwOverscanValue = 0;
-+ u32 dwSupportedEnhancements;
-+ struct intel_output *intel_output = output->driver_private;
-+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
-+
-+
-+ /* Get supported picture enhancements */
-+ status =
-+ i830_sdvo_get_supported_enhancements(output,
-+ &dwSupportedEnhancements);
-+ if (!status)
-+ return status;
-+
-+ /* Horizontal Overscan */
-+ if (dwSupportedEnhancements & SDVO_HORIZONTAL_OVERSCAN) {
-+ status =
-+ i830_sdvo_get_max_horizontal_overscan(output, &dwMaxVal,
-+ &dwDefaultVal);
-+ if (!status)
-+ return status;
-+
-+ /*Calculate the default value in terms of percentage */
-+ dwDefOverscanXValue = ((dwDefaultVal * 100) / dwMaxVal);
-+
-+ /*Calculate the default value in 0-1000 range */
-+ dwDefOverscanXValue = (dwDefOverscanXValue * 10);
-+
-+ /*Overscan is in the range of 0 to 10000 as per MS spec */
-+ if (sdvo_priv->OverScanX.Value > MAX_VAL)
-+ sdvo_priv->OverScanX.Value = dwDefOverscanXValue;
-+
-+ /*Calculate the percentage(0-100%) of the overscan value */
-+ dwPercentageValue = (sdvo_priv->OverScanX.Value * 100) / 1000;
-+
-+ /* Now map the % value to absolute value to be programed to the encoder */
-+ dwOverscanValue = (dwMaxVal * dwPercentageValue) / 100;
-+
-+ status = i830_sdvo_set_horizontal_overscan(output, dwOverscanValue);
-+ if (!status)
-+ return status;
-+
-+ sdvo_priv->OverScanX.Max = 1000;
-+ sdvo_priv->OverScanX.Min = 0;
-+ sdvo_priv->OverScanX.Default = dwDefOverscanXValue;
-+ sdvo_priv->OverScanX.Step = 20;
-+ }
-+
-+ /* Horizontal Overscan */
-+ /* vertical Overscan */
-+ if (dwSupportedEnhancements & SDVO_VERTICAL_OVERSCAN) {
-+ status =
-+ i830_sdvo_get_max_vertical_overscan(output, &dwMaxVal,
-+ &dwDefaultVal);
-+ if (!status)
-+ return status;
-+
-+ /*Calculate the default value in terms of percentage */
-+ dwDefOverscanYValue = ((dwDefaultVal * 100) / dwMaxVal);
-+
-+ /*Calculate the default value in 0-1000 range */
-+ dwDefOverscanYValue = (dwDefOverscanYValue * 10);
-+
-+ /*Overscan is in the range of 0 to 10000 as per MS spec */
-+ if (sdvo_priv->OverScanY.Value > MAX_VAL)
-+ sdvo_priv->OverScanY.Value = dwDefOverscanYValue;
-+
-+ /*Calculate the percentage(0-100%) of the overscan value */
-+ dwPercentageValue = (sdvo_priv->OverScanY.Value * 100) / 1000;
-+
-+ /* Now map the % value to absolute value to be programed to the encoder */
-+ dwOverscanValue = (dwMaxVal * dwPercentageValue) / 100;
-+
-+ status = i830_sdvo_set_vertical_overscan(output, dwOverscanValue);
-+ if (!status)
-+ return status;
-+
-+ sdvo_priv->OverScanY.Max = 1000;
-+ sdvo_priv->OverScanY.Min = 0;
-+ sdvo_priv->OverScanY.Default = dwDefOverscanYValue;
-+ sdvo_priv->OverScanY.Step = 20;
-+
-+ }
-+ /* vertical Overscan */
-+ return TRUE;
-+}
-+
-+static bool i830_translate_dtd2timing(struct drm_display_mode * pTimingInfo,
-+ struct intel_sdvo_dtd *pDTD)
-+{
-+
-+ u32 dwHBLHigh = 0;
-+ u32 dwVBLHigh = 0;
-+ u32 dwHSHigh1 = 0;
-+ u32 dwHSHigh2 = 0;
-+ u32 dwVSHigh1 = 0;
-+ u32 dwVSHigh2 = 0;
-+ u32 dwVPWLow = 0;
-+ bool status = FALSE;
-+
-+ if ((pDTD == NULL) || (pTimingInfo == NULL)) {
-+ return status;
-+ }
-+
-+ pTimingInfo->clock= pDTD->part1.clock * 10000 / 1000; /*fix me if i am wrong */
-+
-+ pTimingInfo->hdisplay = pTimingInfo->crtc_hdisplay =
-+ (u32) pDTD->part1.
-+ h_active | ((u32) (pDTD->part1.h_high & 0xF0) << 4);
-+
-+ pTimingInfo->vdisplay = pTimingInfo->crtc_vdisplay =
-+ (u32) pDTD->part1.
-+ v_active | ((u32) (pDTD->part1.v_high & 0xF0) << 4);
-+
-+ pTimingInfo->crtc_hblank_start = pTimingInfo->crtc_hdisplay;
-+
-+ /* Horizontal Total = Horizontal Active + Horizontal Blanking */
-+ dwHBLHigh = (u32) (pDTD->part1.h_high & 0x0F);
-+ pTimingInfo->htotal = pTimingInfo->crtc_htotal =
-+ pTimingInfo->crtc_hdisplay + (u32) pDTD->part1.h_blank +
-+ (dwHBLHigh << 8);
-+
-+ pTimingInfo->crtc_hblank_end = pTimingInfo->crtc_htotal - 1;
-+
-+ /* Vertical Total = Vertical Active + Vertical Blanking */
-+ dwVBLHigh = (u32) (pDTD->part1.v_high & 0x0F);
-+ pTimingInfo->vtotal = pTimingInfo->crtc_vtotal =
-+ pTimingInfo->crtc_vdisplay + (u32) pDTD->part1.v_blank +
-+ (dwVBLHigh << 8);
-+ pTimingInfo->crtc_vblank_start = pTimingInfo->crtc_vdisplay;
-+ pTimingInfo->crtc_vblank_end = pTimingInfo->crtc_vtotal - 1;
-+
-+ /* Horz Sync Start = Horz Blank Start + Horz Sync Offset */
-+ dwHSHigh1 = (u32) (pDTD->part2.sync_off_width_high & 0xC0);
-+ pTimingInfo->hsync_start = pTimingInfo->crtc_hsync_start =
-+ pTimingInfo->crtc_hblank_start + (u32) pDTD->part2.h_sync_off +
-+ (dwHSHigh1 << 2);
-+
-+ /* Horz Sync End = Horz Sync Start + Horz Sync Pulse Width */
-+ dwHSHigh2 = (u32) (pDTD->part2.sync_off_width_high & 0x30);
-+ pTimingInfo->hsync_end = pTimingInfo->crtc_hsync_end =
-+ pTimingInfo->crtc_hsync_start + (u32) pDTD->part2.h_sync_width +
-+ (dwHSHigh2 << 4) - 1;
-+
-+ /* Vert Sync Start = Vert Blank Start + Vert Sync Offset */
-+ dwVSHigh1 = (u32) (pDTD->part2.sync_off_width_high & 0x0C);
-+ dwVPWLow = (u32) (pDTD->part2.v_sync_off_width & 0xF0);
-+
-+ pTimingInfo->vsync_start = pTimingInfo->crtc_vsync_start =
-+ pTimingInfo->crtc_vblank_start + (dwVPWLow >> 4) + (dwVSHigh1 << 2);
-+
-+ /* Vert Sync End = Vert Sync Start + Vert Sync Pulse Width */
-+ dwVSHigh2 = (u32) (pDTD->part2.sync_off_width_high & 0x03);
-+ pTimingInfo->vsync_end = pTimingInfo->crtc_vsync_end =
-+ pTimingInfo->crtc_vsync_start +
-+ (u32) (pDTD->part2.v_sync_off_width & 0x0F) + (dwVSHigh2 << 4) - 1;
-+
-+ /* Fillup flags */
-+ status = TRUE;
-+
-+ return status;
-+}
-+
-+static void i830_translate_timing2dtd(struct drm_display_mode * mode, struct intel_sdvo_dtd *dtd)
-+{
-+ u16 width, height;
-+ u16 h_blank_len, h_sync_len, v_blank_len, v_sync_len;
-+ u16 h_sync_offset, v_sync_offset;
-+
-+ width = mode->crtc_hdisplay;
-+ height = mode->crtc_vdisplay;
-+
-+ /* do some mode translations */
-+ h_blank_len = mode->crtc_hblank_end - mode->crtc_hblank_start;
-+ h_sync_len = mode->crtc_hsync_end - mode->crtc_hsync_start;
-+
-+ v_blank_len = mode->crtc_vblank_end - mode->crtc_vblank_start;
-+ v_sync_len = mode->crtc_vsync_end - mode->crtc_vsync_start;
-+
-+ h_sync_offset = mode->crtc_hsync_start - mode->crtc_hblank_start;
-+ v_sync_offset = mode->crtc_vsync_start - mode->crtc_vblank_start;
-+
-+ dtd->part1.clock = mode->clock * 1000 / 10000; /*xiaolin, fixme, do i need to by 1k hz */
-+ dtd->part1.h_active = width & 0xff;
-+ dtd->part1.h_blank = h_blank_len & 0xff;
-+ dtd->part1.h_high = (((width >> 8) & 0xf) << 4) |
-+ ((h_blank_len >> 8) & 0xf);
-+ dtd->part1.v_active = height & 0xff;
-+ dtd->part1.v_blank = v_blank_len & 0xff;
-+ dtd->part1.v_high = (((height >> 8) & 0xf) << 4) |
-+ ((v_blank_len >> 8) & 0xf);
-+
-+ dtd->part2.h_sync_off = h_sync_offset;
-+ dtd->part2.h_sync_width = h_sync_len & 0xff;
-+ dtd->part2.v_sync_off_width = ((v_sync_offset & 0xf) << 4 |
-+ (v_sync_len & 0xf)) + 1;
-+ dtd->part2.sync_off_width_high = ((h_sync_offset & 0x300) >> 2) |
-+ ((h_sync_len & 0x300) >> 4) | ((v_sync_offset & 0x30) >> 2) |
-+ ((v_sync_len & 0x30) >> 4);
-+
-+ dtd->part2.dtd_flags = 0x18;
-+ if (mode->flags & V_PHSYNC)
-+ dtd->part2.dtd_flags |= 0x2;
-+ if (mode->flags & V_PVSYNC)
-+ dtd->part2.dtd_flags |= 0x4;
-+
-+ dtd->part2.sdvo_flags = 0;
-+ dtd->part2.v_sync_off_high = v_sync_offset & 0xc0;
-+ dtd->part2.reserved = 0;
-+
-+}
-+
-+static bool i830_tv_set_target_io(struct drm_output* output)
-+{
-+ bool status;
-+ struct intel_output *intel_output = output->driver_private;
-+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
-+
-+ status = intel_sdvo_set_target_input(output, TRUE, FALSE);
-+ if (status)
-+ status = intel_sdvo_set_target_output(output, sdvo_priv->active_outputs);
-+
-+ return status;
-+}
-+
-+static bool i830_tv_get_max_min_dotclock(struct drm_output* output)
-+{
-+ u32 dwMaxClkRateMul = 1;
-+ u32 dwMinClkRateMul = 1;
-+ u8 status;
-+
-+ struct intel_output *intel_output = output->driver_private;
-+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
-+
-+
-+ /* Set Target Input/Outputs */
-+ status = i830_tv_set_target_io(output);
-+ if (!status) {
-+ DRM_DEBUG("SetTargetIO function FAILED!!! \n");
-+ return status;
-+ }
-+
-+ /* Get the clock rate multiplies supported by the encoder */
-+ dwMinClkRateMul = 1;
-+#if 0
-+ /* why we need do this, some time, tv can't bring up for the wrong setting in the last time */
-+ dwClkRateMulMask = i830_sdvo_get_clock_rate_mult(output);
-+
-+ /* Find the minimum clock rate multiplier supported */
-+
-+ if (dwClkRateMulMask & SDVO_CLOCK_RATE_MULT_1X)
-+ dwMinClkRateMul = 1;
-+ else if (dwClkRateMulMask & SDVO_CLOCK_RATE_MULT_2X)
-+ dwMinClkRateMul = 2;
-+ else if (dwClkRateMulMask & SDVO_CLOCK_RATE_MULT_3X)
-+ dwMinClkRateMul = 3;
-+ else if (dwClkRateMulMask & SDVO_CLOCK_RATE_MULT_4X)
-+ dwMinClkRateMul = 4;
-+ else if (dwClkRateMulMask & SDVO_CLOCK_RATE_MULT_5X)
-+ dwMinClkRateMul = 5;
-+ else
-+ return FALSE;
-+#endif
-+ /* Get the min and max input Dot Clock supported by the encoder */
-+ status = i830_sdvo_get_input_output_pixelclock_range(output, FALSE); /* input */
-+
-+ if (!status) {
-+ DRM_DEBUG("SDVOGetInputPixelClockRange() FAILED!!! \n");
-+ return status;
-+ }
-+
-+ /* Get the min and max output Dot Clock supported by the encoder */
-+ status = i830_sdvo_get_input_output_pixelclock_range(output, TRUE); /* output */
-+
-+ if (!status) {
-+ DRM_DEBUG("SDVOGetOutputPixelClockRange() FAILED!!! \n");
-+ return status;
-+ }
-+
-+ /* Maximum Dot Clock supported should be the minimum of the maximum */
-+ /* dot clock supported by the encoder & the SDVO bus clock rate */
-+ sdvo_priv->dwMaxDotClk =
-+ ((sdvo_priv->dwMaxInDotClk * dwMaxClkRateMul) <
-+ (sdvo_priv->dwMaxOutDotClk)) ? (sdvo_priv->dwMaxInDotClk *
-+ dwMaxClkRateMul) : (sdvo_priv->dwMaxOutDotClk);
-+
-+ /* Minimum Dot Clock supported should be the maximum of the minimum */
-+ /* dot clocks supported by the input & output */
-+ sdvo_priv->dwMinDotClk =
-+ ((sdvo_priv->dwMinInDotClk * dwMinClkRateMul) >
-+ (sdvo_priv->dwMinOutDotClk)) ? (sdvo_priv->dwMinInDotClk *
-+ dwMinClkRateMul) : (sdvo_priv->dwMinOutDotClk);
-+
-+ DRM_DEBUG("leave, i830_tv_get_max_min_dotclock() !!! \n");
-+
-+ return TRUE;
-+
-+}
-+
-+bool i830_tv_mode_check_support(struct drm_output* output, struct drm_display_mode* pMode)
-+{
-+ u32 dwDotClk = 0;
-+ bool status;
-+ struct intel_output *intel_output = output->driver_private;
-+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
-+
-+
-+ dwDotClk = pMode->clock * 1000;
-+
-+ /*TODO: Need to fix this from SoftBios side........ */
-+ if (sdvo_priv->TVMode == TVMODE_HDTV) {
-+ if (((pMode->hdisplay == 1920) && (pMode->vdisplay== 1080)) ||
-+ ((pMode->hdisplay== 1864) && (pMode->vdisplay== 1050)) ||
-+ ((pMode->hdisplay== 1704) && (pMode->vdisplay== 960)) ||
-+ ((pMode->hdisplay== 640) && (pMode->vdisplay== 448)))
-+ return true;
-+ }
-+
-+ if (sdvo_priv->bGetClk) {
-+ status = i830_tv_get_max_min_dotclock(output);
-+ if (!status) {
-+ DRM_DEBUG("get max min dotclok failed\n");
-+ return status;
-+ }
-+ sdvo_priv->bGetClk = false;
-+ }
-+
-+ /* Check the Dot clock first. If the requested Dot Clock should fall */
-+ /* in the supported range for the mode to be supported */
-+ if ((dwDotClk <= sdvo_priv->dwMinDotClk) || (dwDotClk >= sdvo_priv->dwMaxDotClk)) {
-+ DRM_DEBUG("dwDotClk value is out of range\n");
-+ /*TODO: now consider VBT add and Remove mode. */
-+ /* This mode can't be supported */
-+ return false;
-+ }
-+ DRM_DEBUG("i830_tv_mode_check_support leave\n");
-+ return true;
-+
-+}
-+
-+void print_Pll(char *prefix, ex_intel_clock_t * clock)
-+{
-+ DRM_DEBUG("%s: dotclock %d vco %d ((m %d, m1 %d, m2 %d), n %d, (p %d, p1 %d, p2 %d))\n",
-+ prefix, clock->dot, clock->vco, clock->m, clock->m1, clock->m2,
-+ clock->n, clock->p, clock->p1, clock->p2);
-+}
-+
-+extern int intel_panel_fitter_pipe (struct drm_device *dev);
-+extern int intel_get_core_clock_speed(struct drm_device *dev);
-+
-+void i830_sdvo_tv_settiming(struct drm_crtc *crtc, struct drm_display_mode * mode,
-+ struct drm_display_mode * adjusted_mode)
-+{
-+
-+ struct drm_device *dev = crtc->dev;
-+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
-+
-+ int pipe = 0;
-+ int fp_reg = (pipe == 0) ? FPA0 : FPB0;
-+ int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
-+ int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
-+ int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
-+ int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
-+ int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
-+ int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
-+ int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
-+ int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
-+ int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
-+ int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE;
-+ int dspstride_reg = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE;
-+ int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS;
-+ int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
-+ ex_intel_clock_t clock;
-+ u32 dpll = 0, fp = 0, dspcntr, pipeconf;
-+ bool ok, is_sdvo = FALSE;
-+ int centerX = 0, centerY = 0;
-+ u32 ulPortMultiplier, ulTemp, ulDotClock;
-+ int sdvo_pixel_multiply;
-+ u32 dotclock;
-+
-+ /* Set up some convenient bools for what outputs are connected to
-+ * our pipe, used in DPLL setup.
-+ */
-+ if (!crtc->fb) {
-+ DRM_ERROR("Can't set mode without attached fb\n");
-+ return;
-+ }
-+ is_sdvo = TRUE;
-+ ok = TRUE;
-+ ulDotClock = mode->clock * 1000 / 1000; /*xiaolin, fixme, do i need to by 1k hz */
-+ for (ulPortMultiplier = 1; ulPortMultiplier <= 5; ulPortMultiplier++) {
-+ ulTemp = ulDotClock * ulPortMultiplier;
-+ if ((ulTemp >= 100000) && (ulTemp <= 200000)) {
-+ if ((ulPortMultiplier == 3) || (ulPortMultiplier == 5))
-+ continue;
-+ else
-+ break;
-+ }
-+ }
-+ /* ulPortMultiplier is 2, dotclok is 1babc, fall into the first one case */
-+ /* add two to each m and n value -- optimizes (slightly) the search algo. */
-+ dotclock = ulPortMultiplier * (mode->clock * 1000) / 1000;
-+ DRM_DEBUG("mode->clock is %x, dotclock is %x,!\n", mode->clock,dotclock);
-+
-+ if ((dotclock >= 100000) && (dotclock < 140500)) {
-+ DRM_DEBUG("dotclock is between 10000 and 140500!\n");
-+ clock.p1 = 0x2;
-+ clock.p2 = 0x00;
-+ clock.n = 0x3;
-+ clock.m1 = 0x10;
-+ clock.m2 = 0x8;
-+ } else if ((dotclock >= 140500) && (dotclock <= 200000)) {
-+
-+ DRM_DEBUG("dotclock is between 140500 and 200000!\n");
-+ clock.p1 = 0x1;
-+ /*CG was using 0x10 from spreadsheet it should be 0 */
-+ /*pClock_Data->Clk_P2 = 0x10; */
-+ clock.p2 = 0x00;
-+ clock.n = 0x6;
-+ clock.m1 = 0xC;
-+ clock.m2 = 0x8;
-+ } else
-+ ok = FALSE;
-+
-+ if (!ok)
-+ DRM_DEBUG("Couldn't find PLL settings for mode!\n");
-+
-+ fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
-+
-+ dpll = DPLL_VGA_MODE_DIS | DPLL_CLOCK_PHASE_9;
-+
-+ dpll |= DPLLB_MODE_DAC_SERIAL;
-+
-+ sdvo_pixel_multiply = ulPortMultiplier;
-+ dpll |= DPLL_DVO_HIGH_SPEED;
-+ dpll |= (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
-+
-+ /* compute bitmask from p1 value */
-+ dpll |= (clock.p1 << 16);
-+ dpll |= (clock.p2 << 24);
-+
-+ dpll |= PLL_REF_INPUT_TVCLKINBC;
-+
-+ /* Set up the display plane register */
-+ dspcntr = DISPPLANE_GAMMA_ENABLE;
-+ switch (crtc->fb->bits_per_pixel) {
-+ case 8:
-+ dspcntr |= DISPPLANE_8BPP;
-+ break;
-+ case 16:
-+ if (crtc->fb->depth == 15)
-+ dspcntr |= DISPPLANE_15_16BPP;
-+ else
-+ dspcntr |= DISPPLANE_16BPP;
-+ break;
-+ case 32:
-+ dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
-+ break;
-+ default:
-+ DRM_DEBUG("unknown display bpp\n");
-+ }
-+
-+ if (pipe == 0)
-+ dspcntr |= DISPPLANE_SEL_PIPE_A;
-+ else
-+ dspcntr |= DISPPLANE_SEL_PIPE_B;
-+
-+ pipeconf = I915_READ(pipeconf_reg);
-+ if (pipe == 0) {
-+ /* Enable pixel doubling when the dot clock is > 90% of the (display)
-+ * core speed.
-+ *
-+ * XXX: No double-wide on 915GM pipe B. Is that the only reason for the
-+ * pipe == 0 check?
-+ */
-+ if (mode->clock * 1000 > (intel_get_core_clock_speed(dev)) * 9 / 10) /*xiaolin, fixme, do i need to by 1k hz */
-+ { pipeconf |= PIPEACONF_DOUBLE_WIDE; DRM_DEBUG("PIPEACONF_DOUBLE_WIDE\n");}
-+ else
-+ { pipeconf &= ~PIPEACONF_DOUBLE_WIDE; DRM_DEBUG("non PIPEACONF_DOUBLE_WIDE\n");}
-+ }
-+
-+ dspcntr |= DISPLAY_PLANE_ENABLE;
-+ pipeconf |= PIPEACONF_ENABLE;
-+ dpll |= DPLL_VCO_ENABLE;
-+
-+ /* Disable the panel fitter if it was on our pipe */
-+ if (intel_panel_fitter_pipe(dev) == pipe)
-+ I915_WRITE(PFIT_CONTROL, 0);
-+
-+ print_Pll("chosen", &clock);
-+ DRM_DEBUG("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
-+ drm_mode_debug_printmodeline(dev, mode);
-+ DRM_DEBUG("Modeline %d:\"%s\" %d %d %d %d %d %d %d %d\n",
-+ mode->mode_id, mode->name, mode->crtc_htotal, mode->crtc_hdisplay,
-+ mode->crtc_hblank_end, mode->crtc_hblank_start,
-+ mode->crtc_vtotal, mode->crtc_vdisplay,
-+ mode->crtc_vblank_end, mode->crtc_vblank_start);
-+ DRM_DEBUG("clock regs: 0x%08x, 0x%08x,dspntr is 0x%8x, pipeconf is 0x%8x\n", (int)dpll,
-+ (int)fp,(int)dspcntr,(int)pipeconf);
-+
-+ if (dpll & DPLL_VCO_ENABLE) {
-+ I915_WRITE(fp_reg, fp);
-+ I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
-+ (void)I915_READ(dpll_reg);
-+ udelay(150);
-+ }
-+ I915_WRITE(fp_reg, fp);
-+ I915_WRITE(dpll_reg, dpll);
-+ (void)I915_READ(dpll_reg);
-+ /* Wait for the clocks to stabilize. */
-+ udelay(150);
-+
-+ /* write it again -- the BIOS does, after all */
-+ I915_WRITE(dpll_reg, dpll);
-+ I915_READ(dpll_reg);
-+ /* Wait for the clocks to stabilize. */
-+ udelay(150);
-+
-+ I915_WRITE(htot_reg, (mode->crtc_hdisplay - 1) |
-+ ((mode->crtc_htotal - 1) << 16));
-+ I915_WRITE(hblank_reg, (mode->crtc_hblank_start - 1) |
-+ ((mode->crtc_hblank_end - 1) << 16));
-+ I915_WRITE(hsync_reg, (mode->crtc_hsync_start - 1) |
-+ ((mode->crtc_hsync_end - 1) << 16));
-+ I915_WRITE(vtot_reg, (mode->crtc_vdisplay - 1) |
-+ ((mode->crtc_vtotal - 1) << 16));
-+ I915_WRITE(vblank_reg, (mode->crtc_vblank_start - 1) |
-+ ((mode->crtc_vblank_end - 1) << 16));
-+ I915_WRITE(vsync_reg, (mode->crtc_vsync_start - 1) |
-+ ((mode->crtc_vsync_end - 1) << 16));
-+ I915_WRITE(dspstride_reg, crtc->fb->pitch);
-+
-+ if (0) {
-+
-+ centerX = (adjusted_mode->crtc_hdisplay - mode->hdisplay) / 2;
-+ centerY = (adjusted_mode->crtc_vdisplay - mode->vdisplay) / 2;
-+ I915_WRITE(dspsize_reg,
-+ ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
-+
-+ I915_WRITE(dsppos_reg, centerY << 16 | centerX);
-+ I915_WRITE(pipesrc_reg,
-+ ((adjusted_mode->crtc_hdisplay -
-+ 1) << 16) | (adjusted_mode->crtc_vdisplay - 1));
-+ } else {
-+ /* pipesrc and dspsize control the size that is scaled from, which should
-+ * always be the user's requested size.
-+ */
-+ I915_WRITE(dspsize_reg,
-+ ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
-+ I915_WRITE(dsppos_reg, 0);
-+ I915_WRITE(pipesrc_reg,
-+ ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
-+
-+ }
-+ I915_WRITE(pipeconf_reg, pipeconf);
-+ I915_READ(pipeconf_reg);
-+
-+ intel_wait_for_vblank(dev);
-+
-+ I915_WRITE(dspcntr_reg, dspcntr);
-+ /* Flush the plane changes */
-+ //intel_pipe_set_base(crtc, 0, 0);
-+ /* Disable the VGA plane that we never use */
-+ //I915_WRITE(VGACNTRL, VGA_DISP_DISABLE);
-+ //intel_wait_for_vblank(dev);
-+
-+}
-+
-+static void intel_sdvo_mode_set(struct drm_output *output,
-+ struct drm_display_mode *mode,
-+ struct drm_display_mode *adjusted_mode)
-+{
-+ struct drm_device *dev = output->dev;
-+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
-+ struct drm_crtc *crtc = output->crtc;
-+ struct intel_crtc *intel_crtc = crtc->driver_private;
-+ struct intel_output *intel_output = output->driver_private;
-+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
-+
-+ u32 sdvox;
-+ struct intel_sdvo_dtd output_dtd;
-+ int sdvo_pixel_multiply;
-+ bool success;
-+ struct drm_display_mode * save_mode;
-+ DRM_DEBUG("xxintel_sdvo_mode_set\n");
-+
-+ if (!mode)
-+ return;
-+
-+ if (sdvo_priv->ActiveDevice == SDVO_DEVICE_TV) {
-+ if (!i830_tv_mode_check_support(output, mode)) {
-+ DRM_DEBUG("mode setting failed, use the forced mode\n");
-+ mode = &tv_modes[0].mode_entry;
-+ drm_mode_set_crtcinfo(mode, 0);
-+ }
-+ }
-+ save_mode = mode;
-+#if 0
-+ width = mode->crtc_hdisplay;
-+ height = mode->crtc_vdisplay;
-+
-+ /* do some mode translations */
-+ h_blank_len = mode->crtc_hblank_end - mode->crtc_hblank_start;
-+ h_sync_len = mode->crtc_hsync_end - mode->crtc_hsync_start;
-+
-+ v_blank_len = mode->crtc_vblank_end - mode->crtc_vblank_start;
-+ v_sync_len = mode->crtc_vsync_end - mode->crtc_vsync_start;
-+
-+ h_sync_offset = mode->crtc_hsync_start - mode->crtc_hblank_start;
-+ v_sync_offset = mode->crtc_vsync_start - mode->crtc_vblank_start;
-+
-+ output_dtd.part1.clock = mode->clock / 10;
-+ output_dtd.part1.h_active = width & 0xff;
-+ output_dtd.part1.h_blank = h_blank_len & 0xff;
-+ output_dtd.part1.h_high = (((width >> 8) & 0xf) << 4) |
-+ ((h_blank_len >> 8) & 0xf);
-+ output_dtd.part1.v_active = height & 0xff;
-+ output_dtd.part1.v_blank = v_blank_len & 0xff;
-+ output_dtd.part1.v_high = (((height >> 8) & 0xf) << 4) |
-+ ((v_blank_len >> 8) & 0xf);
-+
-+ output_dtd.part2.h_sync_off = h_sync_offset;
-+ output_dtd.part2.h_sync_width = h_sync_len & 0xff;
-+ output_dtd.part2.v_sync_off_width = (v_sync_offset & 0xf) << 4 |
-+ (v_sync_len & 0xf);
-+ output_dtd.part2.sync_off_width_high = ((h_sync_offset & 0x300) >> 2) |
-+ ((h_sync_len & 0x300) >> 4) | ((v_sync_offset & 0x30) >> 2) |
-+ ((v_sync_len & 0x30) >> 4);
-+
-+ output_dtd.part2.dtd_flags = 0x18;
-+ if (mode->flags & V_PHSYNC)
-+ output_dtd.part2.dtd_flags |= 0x2;
-+ if (mode->flags & V_PVSYNC)
-+ output_dtd.part2.dtd_flags |= 0x4;
-+
-+ output_dtd.part2.sdvo_flags = 0;
-+ output_dtd.part2.v_sync_off_high = v_sync_offset & 0xc0;
-+ output_dtd.part2.reserved = 0;
-+#else
-+ /* disable and enable the display output */
-+ intel_sdvo_set_target_output(output, 0);
-+
-+ //intel_sdvo_set_active_outputs(output, sdvo_priv->active_outputs);
-+ memset(&output_dtd, 0, sizeof(struct intel_sdvo_dtd));
-+ /* check if this mode can be supported or not */
-+
-+ i830_translate_timing2dtd(mode, &output_dtd);
-+#endif
-+ intel_sdvo_set_target_output(output, 0);
-+ /* set the target input & output first */
-+ /* Set the input timing to the screen. Assume always input 0. */
-+ intel_sdvo_set_target_output(output, sdvo_priv->active_outputs);
-+ intel_sdvo_set_output_timing(output, &output_dtd);
-+ intel_sdvo_set_target_input(output, true, false);
-+
-+ if (sdvo_priv->ActiveDevice == SDVO_DEVICE_TV) {
-+ i830_tv_set_overscan_parameters(output);
-+ /* Set TV standard */
-+ #if 0
-+ if (sdvo_priv->TVMode == TVMODE_HDTV)
-+ i830_sdvo_map_hdtvstd_bitmask(output);
-+ else
-+ i830_sdvo_map_sdtvstd_bitmask(output);
-+ #endif
-+ /* Set TV format */
-+ i830_sdvo_set_tvoutputs_formats(output);
-+ /* We would like to use i830_sdvo_create_preferred_input_timing() to
-+ * provide the device with a timing it can support, if it supports that
-+ * feature. However, presumably we would need to adjust the CRTC to output
-+ * the preferred timing, and we don't support that currently.
-+ */
-+ success = i830_sdvo_create_preferred_input_timing(output, mode);
-+ if (success) {
-+ i830_sdvo_get_preferred_input_timing(output, &output_dtd);
-+ }
-+ /* Set the overscan values now as input timing is dependent on overscan values */
-+
-+ }
-+
-+
-+ /* We would like to use i830_sdvo_create_preferred_input_timing() to
-+ * provide the device with a timing it can support, if it supports that
-+ * feature. However, presumably we would need to adjust the CRTC to
-+ * output the preferred timing, and we don't support that currently.
-+ */
-+#if 0
-+ success = intel_sdvo_create_preferred_input_timing(output, clock,
-+ width, height);
-+ if (success) {
-+ struct intel_sdvo_dtd *input_dtd;
-+
-+ intel_sdvo_get_preferred_input_timing(output, &input_dtd);
-+ intel_sdvo_set_input_timing(output, &input_dtd);
-+ }
-+#else
-+ /* Set input timing (in DTD) */
-+ intel_sdvo_set_input_timing(output, &output_dtd);
-+#endif
-+ if (sdvo_priv->ActiveDevice == SDVO_DEVICE_TV) {
-+
-+ DRM_DEBUG("xxintel_sdvo_mode_set tv path\n");
-+ i830_tv_program_display_params(output);
-+ /* translate dtd 2 timing */
-+ i830_translate_dtd2timing(mode, &output_dtd);
-+ /* Program clock rate multiplier, 2x,clock is = 0x360b730 */
-+ if ((mode->clock * 1000 >= 24000000)
-+ && (mode->clock * 1000 < 50000000)) {
-+ intel_sdvo_set_clock_rate_mult(output, SDVO_CLOCK_RATE_MULT_4X);
-+ } else if ((mode->clock * 1000 >= 50000000)
-+ && (mode->clock * 1000 < 100000000)) {
-+ intel_sdvo_set_clock_rate_mult(output, SDVO_CLOCK_RATE_MULT_2X);
-+ } else if ((mode->clock * 1000 >= 100000000)
-+ && (mode->clock * 1000 < 200000000)) {
-+ intel_sdvo_set_clock_rate_mult(output, SDVO_CLOCK_RATE_MULT_1X);
-+ } else
-+ DRM_DEBUG("i830_sdvo_set_clock_rate is failed\n");
-+
-+ i830_sdvo_tv_settiming(output->crtc, mode, adjusted_mode);
-+ //intel_crtc_mode_set(output->crtc, mode,adjusted_mode,0,0);
-+ mode = save_mode;
-+ } else {
-+ DRM_DEBUG("xxintel_sdvo_mode_set - non tv path\n");
-+ switch (intel_sdvo_get_pixel_multiplier(mode)) {
-+ case 1:
-+ intel_sdvo_set_clock_rate_mult(output,
-+ SDVO_CLOCK_RATE_MULT_1X);
-+ break;
-+ case 2:
-+ intel_sdvo_set_clock_rate_mult(output,
-+ SDVO_CLOCK_RATE_MULT_2X);
-+ break;
-+ case 4:
-+ intel_sdvo_set_clock_rate_mult(output,
-+ SDVO_CLOCK_RATE_MULT_4X);
-+ break;
-+ }
-+ }
-+ /* Set the SDVO control regs. */
-+ if (0/*IS_I965GM(dev)*/) {
-+ sdvox = SDVO_BORDER_ENABLE;
-+ } else {
-+ sdvox = I915_READ(sdvo_priv->output_device);
-+ switch (sdvo_priv->output_device) {
-+ case SDVOB:
-+ sdvox &= SDVOB_PRESERVE_MASK;
-+ break;
-+ case SDVOC:
-+ sdvox &= SDVOC_PRESERVE_MASK;
-+ break;
-+ }
-+ sdvox |= (9 << 19) | SDVO_BORDER_ENABLE;
-+ }
-+ if (intel_crtc->pipe == 1)
-+ sdvox |= SDVO_PIPE_B_SELECT;
-+
-+ sdvo_pixel_multiply = intel_sdvo_get_pixel_multiplier(mode);
-+ if (IS_I965G(dev)) {
-+ /* done in crtc_mode_set as the dpll_md reg must be written
-+ early */
-+ } else if (IS_POULSBO(dev) || IS_I945G(dev) || IS_I945GM(dev)) {
-+ /* done in crtc_mode_set as it lives inside the
-+ dpll register */
-+ } else {
-+ sdvox |= (sdvo_pixel_multiply - 1) << SDVO_PORT_MULTIPLY_SHIFT;
-+ }
-+
-+ intel_sdvo_write_sdvox(output, sdvox);
-+ i830_sdvo_set_iomap(output);
-+}
-+
-+static void intel_sdvo_dpms(struct drm_output *output, int mode)
-+{
-+ struct drm_device *dev = output->dev;
-+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
-+ struct intel_output *intel_output = output->driver_private;
-+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
-+ u32 temp;
-+
-+ DRM_DEBUG("xxintel_sdvo_dpms, dpms mode is %d, active output is %d\n",mode,sdvo_priv->active_outputs);
-+
-+#ifdef SII_1392_WA
-+ if((SII_1392==1) && (drm_psb_no_fb ==1)) {
-+ DRM_DEBUG("don't touch 1392 card when no_fb=1\n");
-+ return;
-+ }
-+#endif
-+
-+ if (mode != DPMSModeOn) {
-+ intel_sdvo_set_active_outputs(output, sdvo_priv->output_device);
-+ if (0)
-+ intel_sdvo_set_encoder_power_state(output, mode);
-+
-+ if (mode == DPMSModeOff) {
-+ temp = I915_READ(sdvo_priv->output_device);
-+ if ((temp & SDVO_ENABLE) != 0) {
-+ intel_sdvo_write_sdvox(output, temp & ~SDVO_ENABLE);
-+ }
-+ }
-+ } else {
-+ bool input1, input2;
-+ int i;
-+ u8 status;
-+
-+ temp = I915_READ(sdvo_priv->output_device);
-+ if ((temp & SDVO_ENABLE) == 0)
-+ intel_sdvo_write_sdvox(output, temp | SDVO_ENABLE);
-+ for (i = 0; i < 2; i++)
-+ intel_wait_for_vblank(dev);
-+
-+ status = intel_sdvo_get_trained_inputs(output, &input1,
-+ &input2);
-+
-+
-+ /* Warn if the device reported failure to sync.
-+ * A lot of SDVO devices fail to notify of sync, but it's
-+ * a given it the status is a success, we succeeded.
-+ */
-+ if (status == SDVO_CMD_STATUS_SUCCESS && !input1) {
-+ DRM_DEBUG("First %s output reported failure to sync\n",
-+ SDVO_NAME(sdvo_priv));
-+ }
-+
-+ if (0)
-+ intel_sdvo_set_encoder_power_state(output, mode);
-+
-+ DRM_DEBUG("xiaolin active output is %d\n",sdvo_priv->active_outputs);
-+ intel_sdvo_set_active_outputs(output, sdvo_priv->active_outputs);
-+ }
-+ return;
-+}
-+
-+static void intel_sdvo_save(struct drm_output *output)
-+{
-+ struct drm_device *dev = output->dev;
-+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
-+ struct intel_output *intel_output = output->driver_private;
-+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
-+
-+ DRM_DEBUG("xxintel_sdvo_save\n");
-+
-+ sdvo_priv->save_sdvo_mult = intel_sdvo_get_clock_rate_mult(output);
-+ intel_sdvo_get_active_outputs(output, &sdvo_priv->save_active_outputs);
-+
-+ if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) {
-+ intel_sdvo_set_target_input(output, true, false);
-+ intel_sdvo_get_input_timing(output,
-+ &sdvo_priv->save_input_dtd_1);
-+ }
-+
-+ if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) {
-+ intel_sdvo_set_target_input(output, false, true);
-+ intel_sdvo_get_input_timing(output,
-+ &sdvo_priv->save_input_dtd_2);
-+ }
-+
-+ intel_sdvo_set_target_output(output, sdvo_priv->active_outputs);
-+ intel_sdvo_get_output_timing(output,
-+ &sdvo_priv->save_output_dtd[sdvo_priv->active_outputs]);
-+ sdvo_priv->save_SDVOX = I915_READ(sdvo_priv->output_device);
-+}
-+
-+static void intel_sdvo_restore(struct drm_output *output)
-+{
-+ struct drm_device *dev = output->dev;
-+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
-+ struct intel_output *intel_output = output->driver_private;
-+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
-+ int i;
-+ bool input1, input2;
-+ u8 status;
-+ DRM_DEBUG("xxintel_sdvo_restore\n");
-+
-+ intel_sdvo_set_active_outputs(output, 0);
-+
-+ intel_sdvo_set_target_output(output, sdvo_priv->save_active_outputs);
-+ intel_sdvo_set_output_timing(output,
-+ &sdvo_priv->save_output_dtd[sdvo_priv->save_active_outputs]);
-+ if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) {
-+ intel_sdvo_set_target_input(output, true, false);
-+ intel_sdvo_set_input_timing(output, &sdvo_priv->save_input_dtd_1);
-+ }
-+
-+ if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) {
-+ intel_sdvo_set_target_input(output, false, true);
-+ intel_sdvo_set_input_timing(output, &sdvo_priv->save_input_dtd_2);
-+ }
-+
-+ intel_sdvo_set_clock_rate_mult(output, sdvo_priv->save_sdvo_mult);
-+
-+ I915_WRITE(sdvo_priv->output_device, sdvo_priv->save_SDVOX);
-+
-+ if (sdvo_priv->save_SDVOX & SDVO_ENABLE)
-+ {
-+ for (i = 0; i < 2; i++)
-+ intel_wait_for_vblank(dev);
-+ status = intel_sdvo_get_trained_inputs(output, &input1, &input2);
-+ if (status == SDVO_CMD_STATUS_SUCCESS && !input1)
-+ DRM_DEBUG("First %s output reported failure to sync\n",
-+ SDVO_NAME(sdvo_priv));
-+ }
-+
-+ i830_sdvo_set_iomap(output);
-+ intel_sdvo_set_active_outputs(output, sdvo_priv->save_active_outputs);
-+}
-+
-+static bool i830_tv_mode_find(struct drm_output * output,struct drm_display_mode * pMode)
-+{
-+ struct intel_output *intel_output = output->driver_private;
-+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
-+
-+ bool find = FALSE;
-+ int i;
-+
-+ DRM_DEBUG("i830_tv_mode_find,0x%x\n", sdvo_priv->TVStandard);
-+
-+ for (i = 0; i < NUM_TV_MODES; i++)
-+ {
-+ const tv_mode_t *tv_mode = &tv_modes[i];
-+ if (strcmp (tv_mode->mode_entry.name, pMode->name) == 0
-+ && (pMode->type & M_T_TV)) {
-+ find = TRUE;
-+ break;
-+ }
-+ }
-+ return find;
-+}
-+
-+
-+static int intel_sdvo_mode_valid(struct drm_output *output,
-+ struct drm_display_mode *mode)
-+{
-+ struct intel_output *intel_output = output->driver_private;
-+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
-+
-+ bool status = TRUE;
-+ DRM_DEBUG("xxintel_sdvo_mode_valid\n");
-+
-+ if (sdvo_priv->ActiveDevice == SDVO_DEVICE_TV) {
-+ status = i830_tv_mode_check_support(output, mode);
-+ if (status) {
-+ if(i830_tv_mode_find(output,mode)) {
-+ DRM_DEBUG("%s is ok\n", mode->name);
-+ return MODE_OK;
-+ }
-+ else
-+ return MODE_CLOCK_RANGE;
-+ } else {
-+ DRM_DEBUG("%s is failed\n",
-+ mode->name);
-+ return MODE_CLOCK_RANGE;
-+ }
-+ }
-+
-+ if (mode->flags & V_DBLSCAN)
-+ return MODE_NO_DBLESCAN;
-+
-+ if (sdvo_priv->pixel_clock_min > mode->clock)
-+ return MODE_CLOCK_LOW;
-+
-+ if (sdvo_priv->pixel_clock_max < mode->clock)
-+ return MODE_CLOCK_HIGH;
-+
-+ return MODE_OK;
-+}
-+
-+static bool intel_sdvo_get_capabilities(struct drm_output *output, struct intel_sdvo_caps *caps)
-+{
-+ u8 status;
-+
-+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_DEVICE_CAPS, NULL, 0);
-+ status = intel_sdvo_read_response(output, caps, sizeof(*caps));
-+ if (status != SDVO_CMD_STATUS_SUCCESS)
-+ return false;
-+
-+ return true;
-+}
-+
-+void i830_tv_get_default_params(struct drm_output * output)
-+{
-+ u32 dwSupportedSDTVBitMask = 0;
-+ u32 dwSupportedHDTVBitMask = 0;
-+ u32 dwTVStdBitmask = 0;
-+
-+ struct intel_output *intel_output = output->driver_private;
-+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
-+
-+
-+ /* Get supported TV Standard */
-+ i830_sdvo_get_supported_tvoutput_formats(output, &dwSupportedSDTVBitMask,
-+ &dwSupportedHDTVBitMask,&dwTVStdBitmask);
-+
-+ sdvo_priv->dwSDVOSDTVBitMask = dwSupportedSDTVBitMask;
-+ sdvo_priv->dwSDVOHDTVBitMask = dwSupportedHDTVBitMask;
-+ sdvo_priv->TVStdBitmask = dwTVStdBitmask;
-+
-+}
-+
-+static enum drm_output_status intel_sdvo_detect(struct drm_output *output)
-+{
-+ u8 response[2];
-+ u8 status;
-+ u8 count = 5;
-+
-+ char deviceName[256];
-+ char *name_suffix;
-+ char *name_prefix;
-+ unsigned char bytes[2];
-+
-+ struct drm_device *dev = output->dev;
-+
-+ struct intel_output *intel_output = output->driver_private;
-+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
-+
-+ DRM_DEBUG("xxintel_sdvo_detect\n");
-+ intel_sdvo_dpms(output, DPMSModeOn);
-+
-+ if (!intel_sdvo_get_capabilities(output, &sdvo_priv->caps)) {
-+ /*No SDVO support, power down the pipe */
-+ intel_sdvo_dpms(output, DPMSModeOff);
-+ return output_status_disconnected;
-+ }
-+
-+#ifdef SII_1392_WA
-+ if ((sdvo_priv->caps.vendor_id == 0x04) && (sdvo_priv->caps.device_id==0xAE)){
-+ /*Leave the control of 1392 to X server*/
-+ SII_1392=1;
-+ printk("%s: detect 1392 card, leave the setting to up level\n", __FUNCTION__);
-+ if (drm_psb_no_fb == 0)
-+ intel_sdvo_dpms(output, DPMSModeOff);
-+ return output_status_disconnected;
-+ }
-+#endif
-+ while (count--) {
-+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0);
-+ status = intel_sdvo_read_response(output, &response, 2);
-+
-+ if(count >3 && status == SDVO_CMD_STATUS_PENDING) {
-+ intel_sdvo_write_cmd(output,SDVO_CMD_RESET,NULL,0);
-+ intel_sdvo_read_response(output, &response, 2);
-+ continue;
-+ }
-+
-+ if ((status != SDVO_CMD_STATUS_SUCCESS) || (response[0] == 0 && response[1] == 0)) {
-+ udelay(500);
-+ continue;
-+ } else
-+ break;
-+ }
-+ if (response[0] != 0 || response[1] != 0) {
-+ /*Check what device types are connected to the hardware CRT/HDTV/S-Video/Composite */
-+ /*in case of CRT and multiple TV's attached give preference in the order mentioned below */
-+ /* 1. RGB */
-+ /* 2. HDTV */
-+ /* 3. S-Video */
-+ /* 4. composite */
-+ if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS0) {
-+ sdvo_priv->active_outputs = SDVO_OUTPUT_TMDS0;
-+ output->subpixel_order = SubPixelHorizontalRGB;
-+ name_prefix = "TMDS";
-+ sdvo_priv->ActiveDevice = SDVO_DEVICE_TMDS;
-+ } else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS1) {
-+ sdvo_priv->active_outputs = SDVO_OUTPUT_TMDS1;
-+ output->subpixel_order = SubPixelHorizontalRGB;
-+ name_prefix = "TMDS";
-+ sdvo_priv->ActiveDevice = SDVO_DEVICE_TMDS;
-+ } else if (response[0] & SDVO_OUTPUT_RGB0) {
-+ sdvo_priv->active_outputs = SDVO_OUTPUT_RGB0;
-+ output->subpixel_order = SubPixelHorizontalRGB;
-+ name_prefix = "RGB0";
-+ sdvo_priv->ActiveDevice = SDVO_DEVICE_CRT;
-+ } else if ((response[1] << 8 | response[0]) & SDVO_OUTPUT_RGB1) {
-+ sdvo_priv->active_outputs = SDVO_OUTPUT_RGB1;
-+ output->subpixel_order = SubPixelHorizontalRGB;
-+ name_prefix = "RGB1";
-+ sdvo_priv->ActiveDevice = SDVO_DEVICE_CRT;
-+ } else if (response[0] & SDVO_OUTPUT_YPRPB0) {
-+ sdvo_priv->active_outputs = SDVO_OUTPUT_YPRPB0;
-+ } else if ((response[1] << 8 | response[0]) & SDVO_OUTPUT_YPRPB1) {
-+ sdvo_priv->active_outputs = SDVO_OUTPUT_YPRPB1;
-+ }
-+ /* SCART is given Second preference */
-+ else if (response[0] & SDVO_OUTPUT_SCART0) {
-+ sdvo_priv->active_outputs = SDVO_OUTPUT_SCART0;
-+
-+ } else if ((response[1] << 8 | response[0]) & SDVO_OUTPUT_SCART1) {
-+ sdvo_priv->active_outputs = SDVO_OUTPUT_SCART1;
-+ }
-+ /* if S-Video type TV is connected along with Composite type TV give preference to S-Video */
-+ else if (response[0] & SDVO_OUTPUT_SVID0) {
-+ sdvo_priv->active_outputs = SDVO_OUTPUT_SVID0;
-+
-+ } else if ((response[1] << 8 | response[0]) & SDVO_OUTPUT_SVID1) {
-+ sdvo_priv->active_outputs = SDVO_OUTPUT_SVID1;
-+ }
-+ /* Composite is given least preference */
-+ else if (response[0] & SDVO_OUTPUT_CVBS0) {
-+ sdvo_priv->active_outputs = SDVO_OUTPUT_CVBS0;
-+ } else if ((response[1] << 8 | response[0]) & SDVO_OUTPUT_CVBS1) {
-+ sdvo_priv->active_outputs = SDVO_OUTPUT_CVBS1;
-+ } else {
-+ DRM_DEBUG("no display attached\n");
-+
-+ memcpy(bytes, &sdvo_priv->caps.output_flags, 2);
-+ DRM_DEBUG("%s: No active TMDS or RGB outputs (0x%02x%02x) 0x%08x\n",
-+ SDVO_NAME(sdvo_priv), bytes[0], bytes[1],
-+ sdvo_priv->caps.output_flags);
-+ name_prefix = "Unknown";
-+ }
-+
-+ /* init para for TV connector */
-+ if (sdvo_priv->active_outputs & SDVO_OUTPUT_TV0) {
-+ DRM_INFO("TV is attaced\n");
-+ output->subpixel_order = SubPixelHorizontalRGB;
-+ name_prefix = "TV0";
-+ /* Init TV mode setting para */
-+ sdvo_priv->ActiveDevice = SDVO_DEVICE_TV;
-+ sdvo_priv->bGetClk = TRUE;
-+ if (sdvo_priv->active_outputs == SDVO_OUTPUT_YPRPB0 ||
-+ sdvo_priv->active_outputs == SDVO_OUTPUT_YPRPB1) {
-+ /*sdvo_priv->TVStandard = HDTV_SMPTE_274M_1080i60;*/
-+ sdvo_priv->TVMode = TVMODE_HDTV;
-+ } else {
-+ /*sdvo_priv->TVStandard = TVSTANDARD_NTSC_M;*/
-+ sdvo_priv->TVMode = TVMODE_SDTV;
-+ }
-+
-+ /*intel_output->pDevice->TVEnabled = TRUE;*/
-+
-+ i830_tv_get_default_params(output);
-+ /*Init Display parameter for TV */
-+ sdvo_priv->OverScanX.Value = 0xffffffff;
-+ sdvo_priv->OverScanY.Value = 0xffffffff;
-+ sdvo_priv->dispParams.Brightness.Value = 0x80;
-+ sdvo_priv->dispParams.FlickerFilter.Value = 0xffffffff;
-+ sdvo_priv->dispParams.AdaptiveFF.Value = 7;
-+ sdvo_priv->dispParams.TwoD_FlickerFilter.Value = 0xffffffff;
-+ sdvo_priv->dispParams.Contrast.Value = 0x40;
-+ sdvo_priv->dispParams.PositionX.Value = 0x200;
-+ sdvo_priv->dispParams.PositionY.Value = 0x200;
-+ sdvo_priv->dispParams.DotCrawl.Value = 1;
-+ sdvo_priv->dispParams.ChromaFilter.Value = 1;
-+ sdvo_priv->dispParams.LumaFilter.Value = 2;
-+ sdvo_priv->dispParams.Sharpness.Value = 4;
-+ sdvo_priv->dispParams.Saturation.Value = 0x45;
-+ sdvo_priv->dispParams.Hue.Value = 0x40;
-+ sdvo_priv->dispParams.Dither.Value = 0;
-+
-+ }
-+ else {
-+ name_prefix = "RGB0";
-+ DRM_INFO("non TV is attaced\n");
-+ }
-+ if (sdvo_priv->output_device == SDVOB) {
-+ name_suffix = "-1";
-+ } else {
-+ name_suffix = "-2";
-+ }
-+
-+ strcpy(deviceName, name_prefix);
-+ strcat(deviceName, name_suffix);
-+
-+ if(output->name && (strcmp(output->name,deviceName) != 0)){
-+ DRM_DEBUG("change the output name to %s\n", deviceName);
-+ if (!drm_output_rename(output, deviceName)) {
-+ drm_output_destroy(output);
-+ return output_status_disconnected;
-+ }
-+
-+ }
-+ i830_sdvo_set_iomap(output);
-+
-+ DRM_INFO("get attached displays=0x%x,0x%x,connectedouputs=0x%x\n",
-+ response[0], response[1], sdvo_priv->active_outputs);
-+ return output_status_connected;
-+ } else {
-+ /*No SDVO display device attached */
-+ intel_sdvo_dpms(output, DPMSModeOff);
-+ sdvo_priv->ActiveDevice = SDVO_DEVICE_NONE;
-+ return output_status_disconnected;
-+ }
-+}
-+
-+static int i830_sdvo_get_tvmode_from_table(struct drm_output *output)
-+{
-+ struct intel_output *intel_output = output->driver_private;
-+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
-+ struct drm_device *dev = output->dev;
-+
-+ int i, modes = 0;
-+
-+ for (i = 0; i < NUM_TV_MODES; i++)
-+ if (((sdvo_priv->TVMode == TVMODE_HDTV) && /*hdtv mode list */
-+ (tv_modes[i].dwSupportedHDTVvss & TVSTANDARD_HDTV_ALL)) ||
-+ ((sdvo_priv->TVMode == TVMODE_SDTV) && /*sdtv mode list */
-+ (tv_modes[i].dwSupportedSDTVvss & TVSTANDARD_SDTV_ALL))) {
-+ struct drm_display_mode *newmode;
-+ newmode = drm_mode_duplicate(dev, &tv_modes[i].mode_entry);
-+ drm_mode_set_crtcinfo(newmode,0);
-+ drm_mode_probed_add(output, newmode);
-+ modes++;
-+ }
-+
-+ return modes;
-+
-+}
-+
-+static int intel_sdvo_get_modes(struct drm_output *output)
-+{
-+ struct intel_output *intel_output = output->driver_private;
-+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
-+
-+ DRM_DEBUG("xxintel_sdvo_get_modes\n");
-+
-+ if (sdvo_priv->ActiveDevice == SDVO_DEVICE_TV) {
-+ DRM_DEBUG("SDVO_DEVICE_TV\n");
-+ i830_sdvo_get_tvmode_from_table(output);
-+ if (list_empty(&output->probed_modes))
-+ return 0;
-+ return 1;
-+
-+ } else {
-+ /* set the bus switch and get the modes */
-+ intel_sdvo_set_control_bus_switch(output, SDVO_CONTROL_BUS_DDC2);
-+ intel_ddc_get_modes(output);
-+
-+ if (list_empty(&output->probed_modes))
-+ return 0;
-+ return 1;
-+ }
-+#if 0
-+ /* Mac mini hack. On this device, I get DDC through the analog, which
-+ * load-detects as disconnected. I fail to DDC through the SDVO DDC,
-+ * but it does load-detect as connected. So, just steal the DDC bits
-+ * from analog when we fail at finding it the right way.
-+ */
-+ /* TODO */
-+ return NULL;
-+
-+ return NULL;
-+#endif
-+}
-+
-+static void intel_sdvo_destroy(struct drm_output *output)
-+{
-+ struct intel_output *intel_output = output->driver_private;
-+ DRM_DEBUG("xxintel_sdvo_destroy\n");
-+
-+ if (intel_output->i2c_bus)
-+ intel_i2c_destroy(intel_output->i2c_bus);
-+
-+ if (intel_output) {
-+ kfree(intel_output);
-+ output->driver_private = NULL;
-+ }
-+}
-+
-+static const struct drm_output_funcs intel_sdvo_output_funcs = {
-+ .dpms = intel_sdvo_dpms,
-+ .save = intel_sdvo_save,
-+ .restore = intel_sdvo_restore,
-+ .mode_valid = intel_sdvo_mode_valid,
-+ .mode_fixup = intel_sdvo_mode_fixup,
-+ .prepare = intel_output_prepare,
-+ .mode_set = intel_sdvo_mode_set,
-+ .commit = intel_output_commit,
-+ .detect = intel_sdvo_detect,
-+ .get_modes = intel_sdvo_get_modes,
-+ .cleanup = intel_sdvo_destroy
-+};
-+
-+void intel_sdvo_init(struct drm_device *dev, int output_device)
-+{
-+ struct drm_output *output;
-+ struct intel_output *intel_output;
-+ struct intel_sdvo_priv *sdvo_priv;
-+ struct intel_i2c_chan *i2cbus = NULL;
-+ u8 ch[0x40];
-+ int i;
-+ char name[DRM_OUTPUT_LEN];
-+ char *name_prefix;
-+ char *name_suffix;
-+
-+ int count = 3;
-+ u8 response[2];
-+ u8 status;
-+ unsigned char bytes[2];
-+
-+ DRM_DEBUG("xxintel_sdvo_init\n");
-+
-+ if (IS_POULSBO(dev)) {
-+ struct pci_dev * pci_root = pci_get_bus_and_slot(0, 0);
-+ u32 sku_value = 0;
-+ bool sku_bSDVOEnable = true;
-+ if(pci_root)
-+ {
-+ pci_write_config_dword(pci_root, 0xD0, PCI_PORT5_REG80_FFUSE);
-+ pci_read_config_dword(pci_root, 0xD4, &sku_value);
-+ sku_bSDVOEnable = (sku_value & PCI_PORT5_REG80_SDVO_DISABLE)?false : true;
-+ DRM_INFO("intel_sdvo_init: sku_value is 0x%08x\n", sku_value);
-+ DRM_INFO("intel_sdvo_init: sku_bSDVOEnable is %d\n", sku_bSDVOEnable);
-+ if (sku_bSDVOEnable == false)
-+ return;
-+ }
-+ }
-+
-+ output = drm_output_create(dev, &intel_sdvo_output_funcs, NULL);
-+ if (!output)
-+ return;
-+
-+ intel_output = kcalloc(sizeof(struct intel_output)+sizeof(struct intel_sdvo_priv), 1, GFP_KERNEL);
-+ if (!intel_output) {
-+ drm_output_destroy(output);
-+ return;
-+ }
-+
-+ sdvo_priv = (struct intel_sdvo_priv *)(intel_output + 1);
-+ intel_output->type = INTEL_OUTPUT_SDVO;
-+ output->driver_private = intel_output;
-+ output->interlace_allowed = 0;
-+ output->doublescan_allowed = 0;
-+
-+ /* setup the DDC bus. */
-+ if (output_device == SDVOB)
-+ i2cbus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOB");
-+ else
-+ i2cbus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOC");
-+
-+ if (i2cbus == NULL) {
-+ drm_output_destroy(output);
-+ return;
-+ }
-+
-+ sdvo_priv->i2c_bus = i2cbus;
-+
-+ if (output_device == SDVOB) {
-+ name_suffix = "-1";
-+ sdvo_priv->i2c_bus->slave_addr = 0x38;
-+ sdvo_priv->byInputWiring = SDVOB_IN0;
-+ } else {
-+ name_suffix = "-2";
-+ sdvo_priv->i2c_bus->slave_addr = 0x39;
-+ }
-+
-+ sdvo_priv->output_device = output_device;
-+ intel_output->i2c_bus = i2cbus;
-+ intel_output->dev_priv = sdvo_priv;
-+
-+
-+ /* Read the regs to test if we can talk to the device */
-+ for (i = 0; i < 0x40; i++) {
-+ if (!intel_sdvo_read_byte(output, i, &ch[i])) {
-+ DRM_DEBUG("No SDVO device found on SDVO%c\n",
-+ output_device == SDVOB ? 'B' : 'C');
-+ drm_output_destroy(output);
-+ return;
-+ }
-+ }
-+
-+ intel_sdvo_get_capabilities(output, &sdvo_priv->caps);
-+
-+#ifdef SII_1392_WA
-+ if ((sdvo_priv->caps.vendor_id == 0x04) && (sdvo_priv->caps.device_id==0xAE)){
-+ /*Leave the control of 1392 to X server*/
-+ SII_1392=1;
-+ printk("%s: detect 1392 card, leave the setting to up level\n", __FUNCTION__);
-+ if (drm_psb_no_fb == 0)
-+ intel_sdvo_dpms(output, DPMSModeOff);
-+ sdvo_priv->active_outputs = 0;
-+ output->subpixel_order = SubPixelHorizontalRGB;
-+ name_prefix = "SDVO";
-+ sdvo_priv->ActiveDevice = SDVO_DEVICE_NONE;
-+ strcpy(name, name_prefix);
-+ strcat(name, name_suffix);
-+ if (!drm_output_rename(output, name)) {
-+ drm_output_destroy(output);
-+ return;
-+ }
-+ return;
-+ }
-+#endif
-+ memset(&sdvo_priv->active_outputs, 0, sizeof(sdvo_priv->active_outputs));
-+
-+ while (count--) {
-+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0);
-+ status = intel_sdvo_read_response(output, &response, 2);
-+
-+ if (status != SDVO_CMD_STATUS_SUCCESS) {
-+ udelay(1000);
-+ continue;
-+ }
-+ if (status == SDVO_CMD_STATUS_SUCCESS)
-+ break;
-+ }
-+ if (response[0] != 0 || response[1] != 0) {
-+ /*Check what device types are connected to the hardware CRT/HDTV/S-Video/Composite */
-+ /*in case of CRT and multiple TV's attached give preference in the order mentioned below */
-+ /* 1. RGB */
-+ /* 2. HDTV */
-+ /* 3. S-Video */
-+ /* 4. composite */
-+ if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS0) {
-+ sdvo_priv->active_outputs = SDVO_OUTPUT_TMDS0;
-+ output->subpixel_order = SubPixelHorizontalRGB;
-+ name_prefix = "TMDS";
-+ sdvo_priv->ActiveDevice = SDVO_DEVICE_TMDS;
-+ } else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS1) {
-+ sdvo_priv->active_outputs = SDVO_OUTPUT_TMDS1;
-+ output->subpixel_order = SubPixelHorizontalRGB;
-+ name_prefix = "TMDS";
-+ sdvo_priv->ActiveDevice = SDVO_DEVICE_TMDS;
-+ } else if (response[0] & SDVO_OUTPUT_RGB0) {
-+ sdvo_priv->active_outputs = SDVO_OUTPUT_RGB0;
-+ output->subpixel_order = SubPixelHorizontalRGB;
-+ name_prefix = "RGB0";
-+ sdvo_priv->ActiveDevice = SDVO_DEVICE_CRT;
-+ } else if ((response[1] << 8 | response[0]) & SDVO_OUTPUT_RGB1) {
-+ sdvo_priv->active_outputs = SDVO_OUTPUT_RGB1;
-+ output->subpixel_order = SubPixelHorizontalRGB;
-+ name_prefix = "RGB1";
-+ sdvo_priv->ActiveDevice = SDVO_DEVICE_CRT;
-+ } else if (response[0] & SDVO_OUTPUT_YPRPB0) {
-+ sdvo_priv->active_outputs = SDVO_OUTPUT_YPRPB0;
-+ } else if ((response[1] << 8 | response[0]) & SDVO_OUTPUT_YPRPB1) {
-+ sdvo_priv->active_outputs = SDVO_OUTPUT_YPRPB1;
-+ }
-+ /* SCART is given Second preference */
-+ else if (response[0] & SDVO_OUTPUT_SCART0) {
-+ sdvo_priv->active_outputs = SDVO_OUTPUT_SCART0;
-+
-+ } else if ((response[1] << 8 | response[0]) & SDVO_OUTPUT_SCART1) {
-+ sdvo_priv->active_outputs = SDVO_OUTPUT_SCART1;
-+ }
-+ /* if S-Video type TV is connected along with Composite type TV give preference to S-Video */
-+ else if (response[0] & SDVO_OUTPUT_SVID0) {
-+ sdvo_priv->active_outputs = SDVO_OUTPUT_SVID0;
-+
-+ } else if ((response[1] << 8 | response[0]) & SDVO_OUTPUT_SVID1) {
-+ sdvo_priv->active_outputs = SDVO_OUTPUT_SVID1;
-+ }
-+ /* Composite is given least preference */
-+ else if (response[0] & SDVO_OUTPUT_CVBS0) {
-+ sdvo_priv->active_outputs = SDVO_OUTPUT_CVBS0;
-+ } else if ((response[1] << 8 | response[0]) & SDVO_OUTPUT_CVBS1) {
-+ sdvo_priv->active_outputs = SDVO_OUTPUT_CVBS1;
-+ } else {
-+ DRM_DEBUG("no display attached\n");
-+
-+ memcpy(bytes, &sdvo_priv->caps.output_flags, 2);
-+ DRM_INFO("%s: No active TMDS or RGB outputs (0x%02x%02x) 0x%08x\n",
-+ SDVO_NAME(sdvo_priv), bytes[0], bytes[1],
-+ sdvo_priv->caps.output_flags);
-+ name_prefix = "Unknown";
-+ }
-+
-+ /* init para for TV connector */
-+ if (sdvo_priv->active_outputs & SDVO_OUTPUT_TV0) {
-+ DRM_INFO("TV is attaced\n");
-+ output->subpixel_order = SubPixelHorizontalRGB;
-+ name_prefix = "TV0";
-+ /* Init TV mode setting para */
-+ sdvo_priv->ActiveDevice = SDVO_DEVICE_TV;
-+ sdvo_priv->bGetClk = TRUE;
-+ if (sdvo_priv->active_outputs == SDVO_OUTPUT_YPRPB0 ||
-+ sdvo_priv->active_outputs == SDVO_OUTPUT_YPRPB1) {
-+ sdvo_priv->TVStandard = HDTV_SMPTE_274M_1080i60;
-+ sdvo_priv->TVMode = TVMODE_HDTV;
-+ } else {
-+ sdvo_priv->TVStandard = TVSTANDARD_NTSC_M;
-+ sdvo_priv->TVMode = TVMODE_SDTV;
-+ }
-+ /*intel_output->pDevice->TVEnabled = TRUE;*/
-+ /*Init Display parameter for TV */
-+ sdvo_priv->OverScanX.Value = 0xffffffff;
-+ sdvo_priv->OverScanY.Value = 0xffffffff;
-+ sdvo_priv->dispParams.Brightness.Value = 0x80;
-+ sdvo_priv->dispParams.FlickerFilter.Value = 0xffffffff;
-+ sdvo_priv->dispParams.AdaptiveFF.Value = 7;
-+ sdvo_priv->dispParams.TwoD_FlickerFilter.Value = 0xffffffff;
-+ sdvo_priv->dispParams.Contrast.Value = 0x40;
-+ sdvo_priv->dispParams.PositionX.Value = 0x200;
-+ sdvo_priv->dispParams.PositionY.Value = 0x200;
-+ sdvo_priv->dispParams.DotCrawl.Value = 1;
-+ sdvo_priv->dispParams.ChromaFilter.Value = 1;
-+ sdvo_priv->dispParams.LumaFilter.Value = 2;
-+ sdvo_priv->dispParams.Sharpness.Value = 4;
-+ sdvo_priv->dispParams.Saturation.Value = 0x45;
-+ sdvo_priv->dispParams.Hue.Value = 0x40;
-+ sdvo_priv->dispParams.Dither.Value = 0;
-+ }
-+ else {
-+ name_prefix = "RGB0";
-+ DRM_INFO("non TV is attaced\n");
-+ }
-+
-+ strcpy(name, name_prefix);
-+ strcat(name, name_suffix);
-+ if (!drm_output_rename(output, name)) {
-+ drm_output_destroy(output);
-+ return;
-+ }
-+ } else {
-+ /*No SDVO display device attached */
-+ intel_sdvo_dpms(output, DPMSModeOff);
-+ sdvo_priv->active_outputs = 0;
-+ output->subpixel_order = SubPixelHorizontalRGB;
-+ name_prefix = "SDVO";
-+ sdvo_priv->ActiveDevice = SDVO_DEVICE_NONE;
-+ strcpy(name, name_prefix);
-+ strcat(name, name_suffix);
-+ if (!drm_output_rename(output, name)) {
-+ drm_output_destroy(output);
-+ return;
-+ }
-+
-+ }
-+
-+ /*(void)intel_sdvo_set_active_outputs(output, sdvo_priv->active_outputs);*/
-+
-+ /* Set the input timing to the screen. Assume always input 0. */
-+ intel_sdvo_set_target_input(output, true, false);
-+
-+ intel_sdvo_get_input_pixel_clock_range(output,
-+ &sdvo_priv->pixel_clock_min,
-+ &sdvo_priv->pixel_clock_max);
-+
-+
-+ DRM_DEBUG("%s device VID/DID: %02X:%02X.%02X, "
-+ "clock range %dMHz - %dMHz, "
-+ "input 1: %c, input 2: %c, "
-+ "output 1: %c, output 2: %c\n",
-+ SDVO_NAME(sdvo_priv),
-+ sdvo_priv->caps.vendor_id, sdvo_priv->caps.device_id,
-+ sdvo_priv->caps.device_rev_id,
-+ sdvo_priv->pixel_clock_min / 1000,
-+ sdvo_priv->pixel_clock_max / 1000,
-+ (sdvo_priv->caps.sdvo_inputs_mask & 0x1) ? 'Y' : 'N',
-+ (sdvo_priv->caps.sdvo_inputs_mask & 0x2) ? 'Y' : 'N',
-+ /* check currently supported outputs */
-+ sdvo_priv->caps.output_flags &
-+ (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_RGB0) ? 'Y' : 'N',
-+ sdvo_priv->caps.output_flags &
-+ (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N');
-+
-+ intel_output->ddc_bus = i2cbus;
-+}
-Index: linux-2.6.27/drivers/gpu/drm/psb/intel_sdvo_regs.h
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.27/drivers/gpu/drm/psb/intel_sdvo_regs.h 2009-02-05 13:29:33.000000000 +0000
-@@ -0,0 +1,580 @@
-+/*
-+ * Copyright ?2006-2007 Intel Corporation
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a
-+ * copy of this software and associated documentation files (the "Software"),
-+ * to deal in the Software without restriction, including without limitation
-+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
-+ * and/or sell copies of the Software, and to permit persons to whom the
-+ * Software is furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice (including the next
-+ * paragraph) shall be included in all copies or substantial portions of the
-+ * Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
-+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-+ * DEALINGS IN THE SOFTWARE.
-+ *
-+ * Authors:
-+ * Eric Anholt <eric@anholt.net>
-+ */
-+
-+/**
-+ * @file SDVO command definitions and structures.
-+ */
-+
-+#define SDVO_OUTPUT_FIRST (0)
-+#define SDVO_OUTPUT_TMDS0 (1 << 0)
-+#define SDVO_OUTPUT_RGB0 (1 << 1)
-+#define SDVO_OUTPUT_CVBS0 (1 << 2)
-+#define SDVO_OUTPUT_SVID0 (1 << 3)
-+#define SDVO_OUTPUT_YPRPB0 (1 << 4)
-+#define SDVO_OUTPUT_SCART0 (1 << 5)
-+#define SDVO_OUTPUT_LVDS0 (1 << 6)
-+#define SDVO_OUTPUT_TMDS1 (1 << 8)
-+#define SDVO_OUTPUT_RGB1 (1 << 9)
-+#define SDVO_OUTPUT_CVBS1 (1 << 10)
-+#define SDVO_OUTPUT_SVID1 (1 << 11)
-+#define SDVO_OUTPUT_YPRPB1 (1 << 12)
-+#define SDVO_OUTPUT_SCART1 (1 << 13)
-+#define SDVO_OUTPUT_LVDS1 (1 << 14)
-+#define SDVO_OUTPUT_LAST (14)
-+
-+struct intel_sdvo_caps {
-+ u8 vendor_id;
-+ u8 device_id;
-+ u8 device_rev_id;
-+ u8 sdvo_version_major;
-+ u8 sdvo_version_minor;
-+ unsigned int sdvo_inputs_mask:2;
-+ unsigned int smooth_scaling:1;
-+ unsigned int sharp_scaling:1;
-+ unsigned int up_scaling:1;
-+ unsigned int down_scaling:1;
-+ unsigned int stall_support:1;
-+ unsigned int pad:1;
-+ u16 output_flags;
-+} __attribute__((packed));
-+
-+/** This matches the EDID DTD structure, more or less */
-+struct intel_sdvo_dtd {
-+ struct {
-+ u16 clock; /**< pixel clock, in 10kHz units */
-+ u8 h_active; /**< lower 8 bits (pixels) */
-+ u8 h_blank; /**< lower 8 bits (pixels) */
-+ u8 h_high; /**< upper 4 bits each h_active, h_blank */
-+ u8 v_active; /**< lower 8 bits (lines) */
-+ u8 v_blank; /**< lower 8 bits (lines) */
-+ u8 v_high; /**< upper 4 bits each v_active, v_blank */
-+ } part1;
-+
-+ struct {
-+ u8 h_sync_off; /**< lower 8 bits, from hblank start */
-+ u8 h_sync_width; /**< lower 8 bits (pixels) */
-+ /** lower 4 bits each vsync offset, vsync width */
-+ u8 v_sync_off_width;
-+ /**
-+ * 2 high bits of hsync offset, 2 high bits of hsync width,
-+ * bits 4-5 of vsync offset, and 2 high bits of vsync width.
-+ */
-+ u8 sync_off_width_high;
-+ u8 dtd_flags;
-+ u8 sdvo_flags;
-+ /** bits 6-7 of vsync offset at bits 6-7 */
-+ u8 v_sync_off_high;
-+ u8 reserved;
-+ } part2;
-+} __attribute__((packed));
-+
-+struct intel_sdvo_pixel_clock_range {
-+ u16 min; /**< pixel clock, in 10kHz units */
-+ u16 max; /**< pixel clock, in 10kHz units */
-+} __attribute__((packed));
-+
-+struct intel_sdvo_preferred_input_timing_args {
-+ u16 clock;
-+ u16 width;
-+ u16 height;
-+} __attribute__((packed));
-+
-+/* I2C registers for SDVO */
-+#define SDVO_I2C_ARG_0 0x07
-+#define SDVO_I2C_ARG_1 0x06
-+#define SDVO_I2C_ARG_2 0x05
-+#define SDVO_I2C_ARG_3 0x04
-+#define SDVO_I2C_ARG_4 0x03
-+#define SDVO_I2C_ARG_5 0x02
-+#define SDVO_I2C_ARG_6 0x01
-+#define SDVO_I2C_ARG_7 0x00
-+#define SDVO_I2C_OPCODE 0x08
-+#define SDVO_I2C_CMD_STATUS 0x09
-+#define SDVO_I2C_RETURN_0 0x0a
-+#define SDVO_I2C_RETURN_1 0x0b
-+#define SDVO_I2C_RETURN_2 0x0c
-+#define SDVO_I2C_RETURN_3 0x0d
-+#define SDVO_I2C_RETURN_4 0x0e
-+#define SDVO_I2C_RETURN_5 0x0f
-+#define SDVO_I2C_RETURN_6 0x10
-+#define SDVO_I2C_RETURN_7 0x11
-+#define SDVO_I2C_VENDOR_BEGIN 0x20
-+
-+/* Status results */
-+#define SDVO_CMD_STATUS_POWER_ON 0x0
-+#define SDVO_CMD_STATUS_SUCCESS 0x1
-+#define SDVO_CMD_STATUS_NOTSUPP 0x2
-+#define SDVO_CMD_STATUS_INVALID_ARG 0x3
-+#define SDVO_CMD_STATUS_PENDING 0x4
-+#define SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED 0x5
-+#define SDVO_CMD_STATUS_SCALING_NOT_SUPP 0x6
-+
-+/* SDVO commands, argument/result registers */
-+
-+#define SDVO_CMD_RESET 0x01
-+
-+/** Returns a struct intel_sdvo_caps */
-+#define SDVO_CMD_GET_DEVICE_CAPS 0x02
-+
-+#define SDVO_CMD_GET_FIRMWARE_REV 0x86
-+# define SDVO_DEVICE_FIRMWARE_MINOR SDVO_I2C_RETURN_0
-+# define SDVO_DEVICE_FIRMWARE_MAJOR SDVO_I2C_RETURN_1
-+# define SDVO_DEVICE_FIRMWARE_PATCH SDVO_I2C_RETURN_2
-+
-+/**
-+ * Reports which inputs are trained (managed to sync).
-+ *
-+ * Devices must have trained within 2 vsyncs of a mode change.
-+ */
-+#define SDVO_CMD_GET_TRAINED_INPUTS 0x03
-+struct intel_sdvo_get_trained_inputs_response {
-+ unsigned int input0_trained:1;
-+ unsigned int input1_trained:1;
-+ unsigned int pad:6;
-+} __attribute__((packed));
-+
-+/** Returns a struct intel_sdvo_output_flags of active outputs. */
-+#define SDVO_CMD_GET_ACTIVE_OUTPUTS 0x04
-+
-+/**
-+ * Sets the current set of active outputs.
-+ *
-+ * Takes a struct intel_sdvo_output_flags. Must be preceded by a SET_IN_OUT_MAP
-+ * on multi-output devices.
-+ */
-+#define SDVO_CMD_SET_ACTIVE_OUTPUTS 0x05
-+
-+/**
-+ * Returns the current mapping of SDVO inputs to outputs on the device.
-+ *
-+ * Returns two struct intel_sdvo_output_flags structures.
-+ */
-+#define SDVO_CMD_GET_IN_OUT_MAP 0x06
-+
-+/**
-+ * Sets the current mapping of SDVO inputs to outputs on the device.
-+ *
-+ * Takes two struct i380_sdvo_output_flags structures.
-+ */
-+#define SDVO_CMD_SET_IN_OUT_MAP 0x07
-+
-+/**
-+ * Returns a struct intel_sdvo_output_flags of attached displays.
-+ */
-+#define SDVO_CMD_GET_ATTACHED_DISPLAYS 0x0b
-+
-+/**
-+ * Returns a struct intel_sdvo_ouptut_flags of displays supporting hot plugging.
-+ */
-+#define SDVO_CMD_GET_HOT_PLUG_SUPPORT 0x0c
-+
-+/**
-+ * Takes a struct intel_sdvo_output_flags.
-+ */
-+#define SDVO_CMD_SET_ACTIVE_HOT_PLUG 0x0d
-+
-+/**
-+ * Returns a struct intel_sdvo_output_flags of displays with hot plug
-+ * interrupts enabled.
-+ */
-+#define SDVO_CMD_GET_ACTIVE_HOT_PLUG 0x0e
-+
-+#define SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE 0x0f
-+struct intel_sdvo_get_interrupt_event_source_response {
-+ u16 interrupt_status;
-+ unsigned int ambient_light_interrupt:1;
-+ unsigned int pad:7;
-+} __attribute__((packed));
-+
-+/**
-+ * Selects which input is affected by future input commands.
-+ *
-+ * Commands affected include SET_INPUT_TIMINGS_PART[12],
-+ * GET_INPUT_TIMINGS_PART[12], GET_PREFERRED_INPUT_TIMINGS_PART[12],
-+ * GET_INPUT_PIXEL_CLOCK_RANGE, and CREATE_PREFERRED_INPUT_TIMINGS.
-+ */
-+#define SDVO_CMD_SET_TARGET_INPUT 0x10
-+struct intel_sdvo_set_target_input_args {
-+ unsigned int target_1:1;
-+ unsigned int pad:7;
-+} __attribute__((packed));
-+
-+/**
-+ * Takes a struct intel_sdvo_output_flags of which outputs are targetted by
-+ * future output commands.
-+ *
-+ * Affected commands inclue SET_OUTPUT_TIMINGS_PART[12],
-+ * GET_OUTPUT_TIMINGS_PART[12], and GET_OUTPUT_PIXEL_CLOCK_RANGE.
-+ */
-+#define SDVO_CMD_SET_TARGET_OUTPUT 0x11
-+
-+#define SDVO_CMD_GET_INPUT_TIMINGS_PART1 0x12
-+#define SDVO_CMD_GET_INPUT_TIMINGS_PART2 0x13
-+#define SDVO_CMD_SET_INPUT_TIMINGS_PART1 0x14
-+#define SDVO_CMD_SET_INPUT_TIMINGS_PART2 0x15
-+#define SDVO_CMD_SET_OUTPUT_TIMINGS_PART1 0x16
-+#define SDVO_CMD_SET_OUTPUT_TIMINGS_PART2 0x17
-+#define SDVO_CMD_GET_OUTPUT_TIMINGS_PART1 0x18
-+#define SDVO_CMD_GET_OUTPUT_TIMINGS_PART2 0x19
-+/* Part 1 */
-+# define SDVO_DTD_CLOCK_LOW SDVO_I2C_ARG_0
-+# define SDVO_DTD_CLOCK_HIGH SDVO_I2C_ARG_1
-+# define SDVO_DTD_H_ACTIVE SDVO_I2C_ARG_2
-+# define SDVO_DTD_H_BLANK SDVO_I2C_ARG_3
-+# define SDVO_DTD_H_HIGH SDVO_I2C_ARG_4
-+# define SDVO_DTD_V_ACTIVE SDVO_I2C_ARG_5
-+# define SDVO_DTD_V_BLANK SDVO_I2C_ARG_6
-+# define SDVO_DTD_V_HIGH SDVO_I2C_ARG_7
-+/* Part 2 */
-+# define SDVO_DTD_HSYNC_OFF SDVO_I2C_ARG_0
-+# define SDVO_DTD_HSYNC_WIDTH SDVO_I2C_ARG_1
-+# define SDVO_DTD_VSYNC_OFF_WIDTH SDVO_I2C_ARG_2
-+# define SDVO_DTD_SYNC_OFF_WIDTH_HIGH SDVO_I2C_ARG_3
-+# define SDVO_DTD_DTD_FLAGS SDVO_I2C_ARG_4
-+# define SDVO_DTD_DTD_FLAG_INTERLACED (1 << 7)
-+# define SDVO_DTD_DTD_FLAG_STEREO_MASK (3 << 5)
-+# define SDVO_DTD_DTD_FLAG_INPUT_MASK (3 << 3)
-+# define SDVO_DTD_DTD_FLAG_SYNC_MASK (3 << 1)
-+# define SDVO_DTD_SDVO_FLAS SDVO_I2C_ARG_5
-+# define SDVO_DTD_SDVO_FLAG_STALL (1 << 7)
-+# define SDVO_DTD_SDVO_FLAG_CENTERED (0 << 6)
-+# define SDVO_DTD_SDVO_FLAG_UPPER_LEFT (1 << 6)
-+# define SDVO_DTD_SDVO_FLAG_SCALING_MASK (3 << 4)
-+# define SDVO_DTD_SDVO_FLAG_SCALING_NONE (0 << 4)
-+# define SDVO_DTD_SDVO_FLAG_SCALING_SHARP (1 << 4)
-+# define SDVO_DTD_SDVO_FLAG_SCALING_SMOOTH (2 << 4)
-+# define SDVO_DTD_VSYNC_OFF_HIGH SDVO_I2C_ARG_6
-+
-+/**
-+ * Generates a DTD based on the given width, height, and flags.
-+ *
-+ * This will be supported by any device supporting scaling or interlaced
-+ * modes.
-+ */
-+#define SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING 0x1a
-+# define SDVO_PREFERRED_INPUT_TIMING_CLOCK_LOW SDVO_I2C_ARG_0
-+# define SDVO_PREFERRED_INPUT_TIMING_CLOCK_HIGH SDVO_I2C_ARG_1
-+# define SDVO_PREFERRED_INPUT_TIMING_WIDTH_LOW SDVO_I2C_ARG_2
-+# define SDVO_PREFERRED_INPUT_TIMING_WIDTH_HIGH SDVO_I2C_ARG_3
-+# define SDVO_PREFERRED_INPUT_TIMING_HEIGHT_LOW SDVO_I2C_ARG_4
-+# define SDVO_PREFERRED_INPUT_TIMING_HEIGHT_HIGH SDVO_I2C_ARG_5
-+# define SDVO_PREFERRED_INPUT_TIMING_FLAGS SDVO_I2C_ARG_6
-+# define SDVO_PREFERRED_INPUT_TIMING_FLAGS_INTERLACED (1 << 0)
-+# define SDVO_PREFERRED_INPUT_TIMING_FLAGS_SCALED (1 << 1)
-+
-+#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1 0x1b
-+#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2 0x1c
-+
-+/** Returns a struct intel_sdvo_pixel_clock_range */
-+#define SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE 0x1d
-+/** Returns a struct intel_sdvo_pixel_clock_range */
-+#define SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE 0x1e
-+
-+/** Returns a byte bitfield containing SDVO_CLOCK_RATE_MULT_* flags */
-+#define SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS 0x1f
-+
-+/** Returns a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
-+#define SDVO_CMD_GET_CLOCK_RATE_MULT 0x20
-+/** Takes a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
-+#define SDVO_CMD_SET_CLOCK_RATE_MULT 0x21
-+# define SDVO_CLOCK_RATE_MULT_1X (1 << 0)
-+# define SDVO_CLOCK_RATE_MULT_2X (1 << 1)
-+# define SDVO_CLOCK_RATE_MULT_4X (1 << 3)
-+
-+#define SDVO_CMD_GET_SUPPORTED_TV_FORMATS 0x27
-+
-+#define SDVO_CMD_GET_TV_FORMAT 0x28
-+
-+#define SDVO_CMD_SET_TV_FORMAT 0x29
-+
-+#define SDVO_CMD_GET_SUPPORTED_POWER_STATES 0x2a
-+#define SDVO_CMD_GET_ENCODER_POWER_STATE 0x2b
-+#define SDVO_CMD_SET_ENCODER_POWER_STATE 0x2c
-+# define SDVO_ENCODER_STATE_ON (1 << 0)
-+# define SDVO_ENCODER_STATE_STANDBY (1 << 1)
-+# define SDVO_ENCODER_STATE_SUSPEND (1 << 2)
-+# define SDVO_ENCODER_STATE_OFF (1 << 3)
-+
-+#define SDVO_CMD_SET_TV_RESOLUTION_SUPPORT 0x93
-+
-+#define SDVO_CMD_SET_CONTROL_BUS_SWITCH 0x7a
-+# define SDVO_CONTROL_BUS_PROM 0x0
-+# define SDVO_CONTROL_BUS_DDC1 0x1
-+# define SDVO_CONTROL_BUS_DDC2 0x2
-+# define SDVO_CONTROL_BUS_DDC3 0x3
-+
-+/* xiaolin, to support add-on SDVO TV Encoder */
-+/* SDVO Bus & SDVO Inputs wiring details*/
-+/* Bit 0: Is SDVOB connected to In0 (1 = yes, 0 = no*/
-+/* Bit 1: Is SDVOB connected to In1 (1 = yes, 0 = no*/
-+/* Bit 2: Is SDVOC connected to In0 (1 = yes, 0 = no*/
-+/* Bit 3: Is SDVOC connected to In1 (1 = yes, 0 = no*/
-+#define SDVOB_IN0 0x01
-+#define SDVOB_IN1 0x02
-+#define SDVOC_IN0 0x04
-+#define SDVOC_IN1 0x08
-+
-+#define SDVO_OUTPUT_TV0 0x003C
-+#define SDVO_OUTPUT_TV1 0x3C00
-+#define SDVO_OUTPUT_LAST (14)
-+
-+#define SDVO_OUTPUT_CRT (SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1 )
-+#define SDVO_OUTPUT_TV (SDVO_OUTPUT_TV0 | SDVO_OUTPUT_TV1)
-+#define SDVO_OUTPUT_LVDS (SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1)
-+#define SDVO_OUTPUT_TMDS (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)
-+
-+
-+
-+#define SDVO_DEVICE_NONE 0x00
-+#define SDVO_DEVICE_CRT 0x01
-+#define SDVO_DEVICE_TV 0x02
-+#define SDVO_DEVICE_LVDS 0x04
-+#define SDVO_DEVICE_TMDS 0x08
-+
-+/* Different TV mode*/
-+#define TVMODE_OFF 0x0000
-+#define TVMODE_SDTV 0x0001
-+#define TVMODE_HDTV 0x0002
-+
-+#define TVSTANDARD_NONE 0x00
-+#define TVSTANDARD_NTSC_M 0x0001 // 75 IRE Setup
-+#define TVSTANDARD_NTSC_M_J 0x0002 // Japan, 0 IRE Setup
-+#define TVSTANDARD_PAL_B 0x0004
-+#define TVSTANDARD_PAL_D 0x0008
-+#define TVSTANDARD_PAL_H 0x0010
-+#define TVSTANDARD_PAL_I 0x0020
-+#define TVSTANDARD_PAL_M 0x0040
-+#define TVSTANDARD_PAL_N 0x0080
-+#define TVSTANDARD_SECAM_B 0x0100
-+#define TVSTANDARD_SECAM_D 0x0200
-+#define TVSTANDARD_SECAM_G 0x0400
-+#define TVSTANDARD_SECAM_H 0x0800
-+#define TVSTANDARD_SECAM_K 0x1000
-+#define TVSTANDARD_SECAM_K1 0x2000
-+#define TVSTANDARD_SECAM_L 0x4000
-+#define TVSTANDARD_WIN_VGA 0x8000
-+/*and the rest*/
-+#define TVSTANDARD_NTSC_433 0x00010000
-+#define TVSTANDARD_PAL_G 0x00020000
-+#define TVSTANDARD_PAL_60 0x00040000
-+#define TVSTANDARD_SECAM_L1 0x00080000
-+#define TVSTANDARD_SDTV_ALL 0x000FFFFF
-+
-+
-+/*HDTV standard defination added using the unused upper 12 bits of dwTVStandard*/
-+#define HDTV_SMPTE_170M_480i59 0x00100000
-+#define HDTV_SMPTE_293M_480p60 0x00200000
-+#define HDTV_SMPTE_293M_480p59 0x00400000
-+#define HDTV_ITURBT601_576i50 0x00800000
-+#define HDTV_ITURBT601_576p50 0x01000000
-+#define HDTV_SMPTE_296M_720p50 0x02000000
-+#define HDTV_SMPTE_296M_720p59 0x04000000
-+#define HDTV_SMPTE_296M_720p60 0x08000000
-+#define HDTV_SMPTE_274M_1080i50 0x10000000
-+#define HDTV_SMPTE_274M_1080i59 0x20000000
-+#define HDTV_SMPTE_274M_1080i60 0x40000000
-+#define HDTV_SMPTE_274M_1080p60 0x80000000
-+#define TVSTANDARD_HDTV_ALL 0xFFF00000
-+
-+
-+#define TVSTANDARD_NTSC 0x01
-+#define TVSTANDARD_PAL 0x02
-+
-+#define TVOUTPUT_NONE 0x00
-+#define TVOUTPUT_COMPOSITE 0x01
-+#define TVOUTPUT_SVIDEO 0x02
-+#define TVOUTPUT_RGB 0x04
-+#define TVOUTPUT_YCBCR 0x08
-+#define TVOUTPUT_SC 0x16
-+
-+/* Encoder supported TV standard bit mask per SDVO ED*/
-+#define SDVO_NTSC_M 0x00000001
-+#define SDVO_NTSC_M_J 0x00000002
-+#define SDVO_NTSC_433 0x00000004
-+#define SDVO_PAL_B 0x00000008
-+#define SDVO_PAL_D 0x00000010
-+#define SDVO_PAL_G 0x00000020
-+#define SDVO_PAL_H 0x00000040
-+#define SDVO_PAL_I 0x00000080
-+#define SDVO_PAL_M 0x00000100
-+#define SDVO_PAL_N 0x00000200
-+#define SDVO_PAL_NC 0x00000400
-+#define SDVO_PAL_60 0x00000800
-+#define SDVO_SECAM_B 0x00001000
-+#define SDVO_SECAM_D 0x00002000
-+#define SDVO_SECAM_G 0x00004000
-+#define SDVO_SECAM_K 0x00008000
-+#define SDVO_SECAM_K1 0x00010000
-+#define SDVO_SECAM_L 0x00020000
-+#define SDVO_SECAM_60 0x00040000
-+
-+/* Number of SDTV format*/
-+#define SDTV_NUM_STANDARDS 19
-+
-+/* Encoder supported HDTV standard bit mask per SDVO ED*/
-+#define SDVO_HDTV_STD_240M_1080i59 0x00000008
-+#define SDVO_HDTV_STD_240M_1080i60 0x00000010
-+#define SDVO_HDTV_STD_260M_1080i59 0x00000020
-+#define SDVO_HDTV_STD_260M_1080i60 0x00000040
-+#define SDVO_HDTV_STD_274M_1080i50 0x00000080
-+#define SDVO_HDTV_STD_274M_1080i59 0x00000100
-+#define SDVO_HDTV_STD_274M_1080i60 0x00000200
-+#define SDVO_HDTV_STD_274M_1080p23 0x00000400
-+#define SDVO_HDTV_STD_274M_1080p24 0x00000800
-+#define SDVO_HDTV_STD_274M_1080p25 0x00001000
-+#define SDVO_HDTV_STD_274M_1080p29 0x00002000
-+#define SDVO_HDTV_STD_274M_1080p30 0x00004000
-+#define SDVO_HDTV_STD_274M_1080p50 0x00008000
-+#define SDVO_HDTV_STD_274M_1080p59 0x00010000
-+#define SDVO_HDTV_STD_274M_1080p60 0x00020000
-+#define SDVO_HDTV_STD_295M_1080i50 0x00040000
-+#define SDVO_HDTV_STD_295M_1080p50 0x00080000
-+#define SDVO_HDTV_STD_296M_720p59 0x00100000
-+#define SDVO_HDTV_STD_296M_720p60 0x00200000
-+#define SDVO_HDTV_STD_296M_720p50 0x00400000
-+#define SDVO_HDTV_STD_293M_480p59 0x00800000
-+#define SDVO_HDTV_STD_170M_480i59 0x01000000
-+#define SDVO_HDTV_STD_ITURBT601_576i50 0x02000000
-+#define SDVO_HDTV_STD_ITURBT601_576p50 0x04000000
-+#define SDVO_HDTV_STD_EIA_7702A_480i60 0x08000000
-+#define SDVO_HDTV_STD_EIA_7702A_480p60 0x10000000
-+
-+/* SDTV resolution*/
-+#define SDVO_SDTV_320x200 0x00000001
-+#define SDVO_SDTV_320x240 0x00000002
-+#define SDVO_SDTV_400x300 0x00000004
-+#define SDVO_SDTV_640x350 0x00000008
-+#define SDVO_SDTV_640x400 0x00000010
-+#define SDVO_SDTV_640x480 0x00000020
-+#define SDVO_SDTV_704x480 0x00000040
-+#define SDVO_SDTV_704x576 0x00000080
-+#define SDVO_SDTV_720x350 0x00000100
-+#define SDVO_SDTV_720x400 0x00000200
-+#define SDVO_SDTV_720x480 0x00000400
-+#define SDVO_SDTV_720x540 0x00000800
-+#define SDVO_SDTV_720x576 0x00001000
-+#define SDVO_SDTV_768x576 0x00002000
-+#define SDVO_SDTV_800x600 0x00004000
-+#define SDVO_SDTV_832x624 0x00008000
-+#define SDVO_SDTV_920x766 0x00010000
-+#define SDVO_SDTV_1024x768 0x00020000
-+#define SDVO_SDTV_1280x1024 0x00040000
-+
-+
-+#define SDVO_HDTV_640x480 0x00000001
-+#define SDVO_HDTV_800x600 0x00000002
-+#define SDVO_HDTV_1024x768 0x00000004
-+#define SDVO_HDTV_1064x600 0x00020000
-+#define SDVO_HDTV_1280x720 0x00040000
-+#define SDVO_HDTV_1704x960 0x00100000
-+#define SDVO_HDTV_1864x1050 0x00200000
-+#define SDVO_HDTV_1920x1080 0x00400000
-+#define SDVO_HDTV_640x400 0x02000000
-+
-+/* Number of SDTV mode*/
-+#define SDTV_NUM_MODES 19
-+
-+/* sdvo cmd for sdvo tv */
-+#define SDVO_CMD_CREATE_PREFERRED_INPUT_TIMINGS 0x1A
-+#define SDVO_CMD_GET_SUPPORTED_TV_FORMATS 0x27
-+#define SDVO_CMD_GET_TV_FORMATS 0x28
-+#define SDVO_CMD_SET_TV_FORMATS 0x29
-+
-+#define SDVO_CMD_GET_SUPPORTED_POWER_STATES 0x2a
-+#define SDVO_CMD_GET_ENCODER_POWER_STATE 0x2b
-+#define SDVO_CMD_SET_ENCODER_POWER_STATE 0x2c
-+#define SDVO_ENCODER_STATE_ON (1 << 0)
-+#define SDVO_ENCODER_STATE_STANDBY (1 << 1)
-+#define SDVO_ENCODER_STATE_SUSPEND (1 << 2)
-+#define SDVO_ENCODER_STATE_OFF (1 << 3)
-+
-+/* Bit mask of picture enhancement*/
-+#define SDVO_FLICKER_FILTER 0x00000001
-+#define SDVO_ADAPTIVE_FLICKER_FILTER 0x00000002
-+#define SDVO_2D_FLICKER_FILTER 0x00000004
-+#define SDVO_SATURATION 0x00000008
-+#define SDVO_HUE 0x00000010
-+#define SDVO_BRIGHTNESS 0x00000020
-+#define SDVO_CONTRAST 0x00000040
-+#define SDVO_HORIZONTAL_OVERSCAN 0x00000080
-+#define SDVO_VERTICAL_OVERSCAN 0x00000100
-+#define SDVO_HORIZONTAL_POSITION 0x00000200
-+#define SDVO_VERTICAL_POSITION 0x00000400
-+#define SDVO_SHARPNESS 0x00000800
-+#define SDVO_DOT_CRAWL 0x00001000
-+#define SDVO_DITHER 0x00002000
-+#define SDVO_MAX_TV_CHROMA_FILTER 0x00004000
-+#define SDVO_TV_MAX_LUMA_FILTER 0x00008000
-+
-+#define SDVO_CMD_GET_ANCILLARY_VIDEO_INFORMATION 0x3A
-+#define SDVO_CMD_SET_ANCILLARY_VIDEO_INFORMATION 0x3B
-+
-+#define SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS 0x84
-+#define SDVO_CMD_GET_MAX_FLICKER_FILTER 0x4D
-+#define SDVO_CMD_GET_FLICKER_FILTER 0x4E
-+#define SDVO_CMD_SET_FLICKER_FILTER 0x4F
-+#define SDVO_CMD_GET_ADAPTIVE_FLICKER_FILTER 0x50
-+#define SDVO_CMD_SET_ADAPTIVE_FLICKER_FILTER 0x51
-+#define SDVO_CMD_GET_MAX_2D_FLICKER_FILTER 0x52
-+#define SDVO_CMD_GET_2D_FLICKER_FILTER 0x53
-+#define SDVO_CMD_SET_2D_FLICKER_FILTER 0x54
-+#define SDVO_CMD_GET_MAX_SATURATION 0x55
-+#define SDVO_CMD_GET_SATURATION 0x56
-+#define SDVO_CMD_SET_SATURATION 0x57
-+#define SDVO_CMD_GET_MAX_HUE 0x58
-+#define SDVO_CMD_GET_HUE 0x59
-+#define SDVO_CMD_SET_HUE 0x5A
-+#define SDVO_CMD_GET_MAX_BRIGHTNESS 0x5B
-+#define SDVO_CMD_GET_BRIGHTNESS 0x5C
-+#define SDVO_CMD_SET_BRIGHTNESS 0x5D
-+#define SDVO_CMD_GET_MAX_CONTRAST 0x5E
-+#define SDVO_CMD_GET_CONTRAST 0x5F
-+#define SDVO_CMD_SET_CONTRAST 0x60
-+
-+#define SDVO_CMD_GET_MAX_HORIZONTAL_OVERSCAN 0x61
-+#define SDVO_CMD_GET_HORIZONTAL_OVERSCAN 0x62
-+#define SDVO_CMD_SET_HORIZONTAL_OVERSCAN 0x63
-+#define SDVO_CMD_GET_MAX_VERTICAL_OVERSCAN 0x64
-+#define SDVO_CMD_GET_VERTICAL_OVERSCAN 0x65
-+#define SDVO_CMD_SET_VERTICAL_OVERSCAN 0x66
-+#define SDVO_CMD_GET_MAX_HORIZONTAL_POSITION 0x67
-+#define SDVO_CMD_GET_HORIZONTAL_POSITION 0x68
-+#define SDVO_CMD_SET_HORIZONTAL_POSITION 0x69
-+#define SDVO_CMD_GET_MAX_VERTICAL_POSITION 0x6A
-+#define SDVO_CMD_GET_VERTICAL_POSITION 0x6B
-+#define SDVO_CMD_SET_VERTICAL_POSITION 0x6C
-+#define SDVO_CMD_GET_MAX_SHARPNESS 0x6D
-+#define SDVO_CMD_GET_SHARPNESS 0x6E
-+#define SDVO_CMD_SET_SHARPNESS 0x6F
-+#define SDVO_CMD_GET_DOT_CRAWL 0x70
-+#define SDVO_CMD_SET_DOT_CRAWL 0x71
-+#define SDVO_CMD_GET_MAX_TV_CHROMA_FILTER 0x74
-+#define SDVO_CMD_GET_TV_CHROMA_FILTER 0x75
-+#define SDVO_CMD_SET_TV_CHROMA_FILTER 0x76
-+#define SDVO_CMD_GET_MAX_TV_LUMA_FILTER 0x77
-+#define SDVO_CMD_GET_TV_LUMA_FILTER 0x78
-+#define SDVO_CMD_SET_TV_LUMA_FILTER 0x79
-+#define SDVO_CMD_GET_MAX_ADAPTIVE_FLICKER_FILTER 0x7B
-Index: linux-2.6.27/drivers/gpu/drm/psb/psb_buffer.c
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.27/drivers/gpu/drm/psb/psb_buffer.c 2009-02-05 13:29:33.000000000 +0000
-@@ -0,0 +1,437 @@
-+/**************************************************************************
-+ * Copyright (c) 2007, Intel Corporation.
-+ * All Rights Reserved.
-+ *
-+ * This program is free software; you can redistribute it and/or modify it
-+ * under the terms and conditions of the GNU General Public License,
-+ * version 2, as published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope it will be useful, but WITHOUT
-+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-+ * more details.
-+ *
-+ * You should have received a copy of the GNU General Public License along with
-+ * this program; if not, write to the Free Software Foundation, Inc.,
-+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-+ *
-+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
-+ * develop this driver.
-+ *
-+ **************************************************************************/
-+/*
-+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
-+ */
-+#include "drmP.h"
-+#include "psb_drv.h"
-+#include "psb_schedule.h"
-+
-+struct drm_psb_ttm_backend {
-+ struct drm_ttm_backend base;
-+ struct page **pages;
-+ unsigned int desired_tile_stride;
-+ unsigned int hw_tile_stride;
-+ int mem_type;
-+ unsigned long offset;
-+ unsigned long num_pages;
-+};
-+
-+int psb_fence_types(struct drm_buffer_object *bo, uint32_t * class,
-+ uint32_t * type)
-+{
-+ switch (*class) {
-+ case PSB_ENGINE_TA:
-+ *type = DRM_FENCE_TYPE_EXE |
-+ _PSB_FENCE_TYPE_TA_DONE | _PSB_FENCE_TYPE_RASTER_DONE;
-+ if (bo->mem.mask & PSB_BO_FLAG_TA)
-+ *type &= ~_PSB_FENCE_TYPE_RASTER_DONE;
-+ if (bo->mem.mask & PSB_BO_FLAG_SCENE)
-+ *type |= _PSB_FENCE_TYPE_SCENE_DONE;
-+ if (bo->mem.mask & PSB_BO_FLAG_FEEDBACK)
-+ *type |= _PSB_FENCE_TYPE_FEEDBACK;
-+ break;
-+ default:
-+ *type = DRM_FENCE_TYPE_EXE;
-+ }
-+ return 0;
-+}
-+
-+static inline size_t drm_size_align(size_t size)
-+{
-+ size_t tmpSize = 4;
-+ if (size > PAGE_SIZE)
-+ return PAGE_ALIGN(size);
-+ while (tmpSize < size)
-+ tmpSize <<= 1;
-+
-+ return (size_t) tmpSize;
-+}
-+
-+/*
-+ * Poulsbo GPU virtual space looks like this
-+ * (We currently use only one MMU context).
-+ *
-+ * gatt_start = Start of GATT aperture in bus space.
-+ * stolen_end = End of GATT populated by stolen memory in bus space.
-+ * gatt_end = End of GATT
-+ * twod_end = MIN(gatt_start + 256_MEM, gatt_end)
-+ *
-+ * 0x00000000 -> 0x10000000 Temporary mapping space for tiling- and copy operations.
-+ * This space is not managed and is protected by the
-+ * temp_mem mutex.
-+ *
-+ * 0x10000000 -> 0x20000000 DRM_PSB_MEM_KERNEL For kernel buffers.
-+ *
-+ * 0x20000000 -> gatt_start DRM_PSB_MEM_MMU For generic MMU-only use.
-+ *
-+ * gatt_start -> stolen_end DRM_BO_MEM_VRAM Pre-populated GATT pages.
-+ *
-+ * stolen_end -> twod_end DRM_BO_MEM_TT GATT memory usable by 2D engine.
-+ *
-+ * twod_end -> gatt_end DRM_BO_MEM_APER GATT memory not usable by 2D engine.
-+ *
-+ * gatt_end -> 0xffffffff Currently unused.
-+ */
-+
-+int psb_init_mem_type(struct drm_device *dev, uint32_t type,
-+ struct drm_mem_type_manager *man)
-+{
-+ struct drm_psb_private *dev_priv =
-+ (struct drm_psb_private *)dev->dev_private;
-+ struct psb_gtt *pg = dev_priv->pg;
-+
-+ switch (type) {
-+ case DRM_BO_MEM_LOCAL:
-+ man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
-+ _DRM_FLAG_MEMTYPE_CACHED;
-+ man->drm_bus_maptype = 0;
-+ break;
-+ case DRM_PSB_MEM_KERNEL:
-+ man->io_offset = 0x00000000;
-+ man->io_size = 0x00000000;
-+ man->io_addr = NULL;
-+ man->drm_bus_maptype = _DRM_TTM;
-+ man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
-+ _DRM_FLAG_MEMTYPE_CSELECT | _DRM_FLAG_MEMTYPE_CMA;
-+ man->gpu_offset = PSB_MEM_KERNEL_START;
-+ break;
-+ case DRM_PSB_MEM_MMU:
-+ man->io_offset = 0x00000000;
-+ man->io_size = 0x00000000;
-+ man->io_addr = NULL;
-+ man->drm_bus_maptype = _DRM_TTM;
-+ man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
-+ _DRM_FLAG_MEMTYPE_CSELECT | _DRM_FLAG_MEMTYPE_CMA;
-+ man->gpu_offset = PSB_MEM_MMU_START;
-+ break;
-+ case DRM_PSB_MEM_PDS:
-+ man->io_offset = 0x00000000;
-+ man->io_size = 0x00000000;
-+ man->io_addr = NULL;
-+ man->drm_bus_maptype = _DRM_TTM;
-+ man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
-+ _DRM_FLAG_MEMTYPE_CSELECT | _DRM_FLAG_MEMTYPE_CMA;
-+ man->gpu_offset = PSB_MEM_PDS_START;
-+ break;
-+ case DRM_PSB_MEM_RASTGEOM:
-+ man->io_offset = 0x00000000;
-+ man->io_size = 0x00000000;
-+ man->io_addr = NULL;
-+ man->drm_bus_maptype = _DRM_TTM;
-+ man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
-+ _DRM_FLAG_MEMTYPE_CSELECT | _DRM_FLAG_MEMTYPE_CMA;
-+ man->gpu_offset = PSB_MEM_RASTGEOM_START;
-+ break;
-+ case DRM_BO_MEM_VRAM:
-+ man->io_addr = NULL;
-+ man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
-+ _DRM_FLAG_MEMTYPE_FIXED | _DRM_FLAG_NEEDS_IOREMAP;
-+#ifdef PSB_WORKING_HOST_MMU_ACCESS
-+ man->drm_bus_maptype = _DRM_AGP;
-+ man->io_offset = pg->gatt_start;
-+ man->io_size = pg->gatt_pages << PAGE_SHIFT;
-+#else
-+ man->drm_bus_maptype = _DRM_TTM; /* Forces uncached */
-+ man->io_offset = pg->stolen_base;
-+ man->io_size = pg->stolen_size;
-+#endif
-+ man->gpu_offset = pg->gatt_start;
-+ break;
-+ case DRM_BO_MEM_TT: /* Mappable GATT memory */
-+ man->io_offset = pg->gatt_start;
-+ man->io_size = pg->gatt_pages << PAGE_SHIFT;
-+ man->io_addr = NULL;
-+#ifdef PSB_WORKING_HOST_MMU_ACCESS
-+ man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
-+ _DRM_FLAG_MEMTYPE_CSELECT | _DRM_FLAG_NEEDS_IOREMAP;
-+ man->drm_bus_maptype = _DRM_AGP;
-+#else
-+ man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
-+ _DRM_FLAG_MEMTYPE_CSELECT | _DRM_FLAG_MEMTYPE_CMA;
-+ man->drm_bus_maptype = _DRM_TTM;
-+#endif
-+ man->gpu_offset = pg->gatt_start;
-+ break;
-+ case DRM_PSB_MEM_APER: /*MMU memory. Mappable. Not usable for 2D. */
-+ man->io_offset = pg->gatt_start;
-+ man->io_size = pg->gatt_pages << PAGE_SHIFT;
-+ man->io_addr = NULL;
-+#ifdef PSB_WORKING_HOST_MMU_ACCESS
-+ man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
-+ _DRM_FLAG_MEMTYPE_CSELECT | _DRM_FLAG_NEEDS_IOREMAP;
-+ man->drm_bus_maptype = _DRM_AGP;
-+#else
-+ man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
-+ _DRM_FLAG_MEMTYPE_CSELECT | _DRM_FLAG_MEMTYPE_CMA;
-+ man->drm_bus_maptype = _DRM_TTM;
-+#endif
-+ man->gpu_offset = pg->gatt_start;
-+ break;
-+ default:
-+ DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
-+ return -EINVAL;
-+ }
-+ return 0;
-+}
-+
-+uint32_t psb_evict_mask(struct drm_buffer_object * bo)
-+{
-+ switch (bo->mem.mem_type) {
-+ case DRM_BO_MEM_VRAM:
-+ return DRM_BO_FLAG_MEM_TT;
-+ default:
-+ return DRM_BO_FLAG_MEM_LOCAL;
-+ }
-+}
-+
-+int psb_invalidate_caches(struct drm_device *dev, uint64_t flags)
-+{
-+ return 0;
-+}
-+
-+static int psb_move_blit(struct drm_buffer_object *bo,
-+ int evict, int no_wait, struct drm_bo_mem_reg *new_mem)
-+{
-+ struct drm_bo_mem_reg *old_mem = &bo->mem;
-+ int dir = 0;
-+
-+ if ((old_mem->mem_type == new_mem->mem_type) &&
-+ (new_mem->mm_node->start <
-+ old_mem->mm_node->start + old_mem->mm_node->size)) {
-+ dir = 1;
-+ }
-+
-+ psb_emit_2d_copy_blit(bo->dev,
-+ old_mem->mm_node->start << PAGE_SHIFT,
-+ new_mem->mm_node->start << PAGE_SHIFT,
-+ new_mem->num_pages, dir);
-+
-+ return drm_bo_move_accel_cleanup(bo, evict, no_wait, 0,
-+ DRM_FENCE_TYPE_EXE, 0, new_mem);
-+}
-+
-+/*
-+ * Flip destination ttm into cached-coherent GATT,
-+ * then blit and subsequently move out again.
-+ */
-+
-+static int psb_move_flip(struct drm_buffer_object *bo,
-+ int evict, int no_wait, struct drm_bo_mem_reg *new_mem)
-+{
-+ struct drm_device *dev = bo->dev;
-+ struct drm_bo_mem_reg tmp_mem;
-+ int ret;
-+
-+ tmp_mem = *new_mem;
-+ tmp_mem.mm_node = NULL;
-+ tmp_mem.mask = DRM_BO_FLAG_MEM_TT |
-+ DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING;
-+
-+ ret = drm_bo_mem_space(bo, &tmp_mem, no_wait);
-+ if (ret)
-+ return ret;
-+ ret = drm_bind_ttm(bo->ttm, &tmp_mem);
-+ if (ret)
-+ goto out_cleanup;
-+ ret = psb_move_blit(bo, 1, no_wait, &tmp_mem);
-+ if (ret)
-+ goto out_cleanup;
-+
-+ ret = drm_bo_move_ttm(bo, evict, no_wait, new_mem);
-+ out_cleanup:
-+ if (tmp_mem.mm_node) {
-+ mutex_lock(&dev->struct_mutex);
-+ if (tmp_mem.mm_node != bo->pinned_node)
-+ drm_mm_put_block(tmp_mem.mm_node);
-+ tmp_mem.mm_node = NULL;
-+ mutex_unlock(&dev->struct_mutex);
-+ }
-+ return ret;
-+}
-+
-+int psb_move(struct drm_buffer_object *bo,
-+ int evict, int no_wait, struct drm_bo_mem_reg *new_mem)
-+{
-+ struct drm_bo_mem_reg *old_mem = &bo->mem;
-+
-+ if (old_mem->mem_type == DRM_BO_MEM_LOCAL) {
-+ return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
-+ } else if (new_mem->mem_type == DRM_BO_MEM_LOCAL) {
-+ if (psb_move_flip(bo, evict, no_wait, new_mem))
-+ return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
-+ } else {
-+ if (psb_move_blit(bo, evict, no_wait, new_mem))
-+ return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
-+ }
-+ return 0;
-+}
-+
-+static int drm_psb_tbe_nca(struct drm_ttm_backend *backend)
-+{
-+ return ((backend->flags & DRM_BE_FLAG_BOUND_CACHED) ? 0 : 1);
-+}
-+
-+static int drm_psb_tbe_populate(struct drm_ttm_backend *backend,
-+ unsigned long num_pages, struct page **pages)
-+{
-+ struct drm_psb_ttm_backend *psb_be =
-+ container_of(backend, struct drm_psb_ttm_backend, base);
-+
-+ psb_be->pages = pages;
-+ return 0;
-+}
-+
-+static int drm_psb_tbe_unbind(struct drm_ttm_backend *backend)
-+{
-+ struct drm_device *dev = backend->dev;
-+ struct drm_psb_private *dev_priv =
-+ (struct drm_psb_private *)dev->dev_private;
-+ struct drm_psb_ttm_backend *psb_be =
-+ container_of(backend, struct drm_psb_ttm_backend, base);
-+ struct psb_mmu_pd *pd = psb_mmu_get_default_pd(dev_priv->mmu);
-+ struct drm_mem_type_manager *man = &dev->bm.man[psb_be->mem_type];
-+
-+ PSB_DEBUG_RENDER("MMU unbind.\n");
-+
-+ if (psb_be->mem_type == DRM_BO_MEM_TT) {
-+ uint32_t gatt_p_offset = (psb_be->offset - man->gpu_offset) >>
-+ PAGE_SHIFT;
-+
-+ (void)psb_gtt_remove_pages(dev_priv->pg, gatt_p_offset,
-+ psb_be->num_pages,
-+ psb_be->desired_tile_stride,
-+ psb_be->hw_tile_stride);
-+ }
-+
-+ psb_mmu_remove_pages(pd, psb_be->offset,
-+ psb_be->num_pages,
-+ psb_be->desired_tile_stride,
-+ psb_be->hw_tile_stride);
-+
-+ return 0;
-+}
-+
-+static int drm_psb_tbe_bind(struct drm_ttm_backend *backend,
-+ struct drm_bo_mem_reg *bo_mem)
-+{
-+ struct drm_device *dev = backend->dev;
-+ struct drm_psb_private *dev_priv =
-+ (struct drm_psb_private *)dev->dev_private;
-+ struct drm_psb_ttm_backend *psb_be =
-+ container_of(backend, struct drm_psb_ttm_backend, base);
-+ struct psb_mmu_pd *pd = psb_mmu_get_default_pd(dev_priv->mmu);
-+ struct drm_mem_type_manager *man = &dev->bm.man[bo_mem->mem_type];
-+ int type;
-+ int ret = 0;
-+
-+ psb_be->mem_type = bo_mem->mem_type;
-+ psb_be->num_pages = bo_mem->num_pages;
-+ psb_be->desired_tile_stride = bo_mem->desired_tile_stride;
-+ psb_be->hw_tile_stride = bo_mem->hw_tile_stride;
-+ psb_be->desired_tile_stride = 0;
-+ psb_be->hw_tile_stride = 0;
-+ psb_be->offset = (bo_mem->mm_node->start << PAGE_SHIFT) +
-+ man->gpu_offset;
-+
-+ type = (bo_mem->flags & DRM_BO_FLAG_CACHED) ? PSB_MMU_CACHED_MEMORY : 0;
-+
-+ PSB_DEBUG_RENDER("MMU bind.\n");
-+ if (psb_be->mem_type == DRM_BO_MEM_TT) {
-+ uint32_t gatt_p_offset = (psb_be->offset - man->gpu_offset) >>
-+ PAGE_SHIFT;
-+
-+ ret = psb_gtt_insert_pages(dev_priv->pg, psb_be->pages,
-+ gatt_p_offset,
-+ psb_be->num_pages,
-+ psb_be->desired_tile_stride,
-+ psb_be->hw_tile_stride, type);
-+ }
-+
-+ ret = psb_mmu_insert_pages(pd, psb_be->pages,
-+ psb_be->offset, psb_be->num_pages,
-+ psb_be->desired_tile_stride,
-+ psb_be->hw_tile_stride, type);
-+ if (ret)
-+ goto out_err;
-+
-+ DRM_FLAG_MASKED(backend->flags, (bo_mem->flags & DRM_BO_FLAG_CACHED) ?
-+ DRM_BE_FLAG_BOUND_CACHED : 0, DRM_BE_FLAG_BOUND_CACHED);
-+
-+ return 0;
-+ out_err:
-+ drm_psb_tbe_unbind(backend);
-+ return ret;
-+
-+}
-+
-+static void drm_psb_tbe_clear(struct drm_ttm_backend *backend)
-+{
-+ struct drm_psb_ttm_backend *psb_be =
-+ container_of(backend, struct drm_psb_ttm_backend, base);
-+
-+ psb_be->pages = NULL;
-+ return;
-+}
-+
-+static void drm_psb_tbe_destroy(struct drm_ttm_backend *backend)
-+{
-+ struct drm_psb_ttm_backend *psb_be =
-+ container_of(backend, struct drm_psb_ttm_backend, base);
-+
-+ if (backend)
-+ drm_free(psb_be, sizeof(*psb_be), DRM_MEM_TTM);
-+}
-+
-+static struct drm_ttm_backend_func psb_ttm_backend = {
-+ .needs_ub_cache_adjust = drm_psb_tbe_nca,
-+ .populate = drm_psb_tbe_populate,
-+ .clear = drm_psb_tbe_clear,
-+ .bind = drm_psb_tbe_bind,
-+ .unbind = drm_psb_tbe_unbind,
-+ .destroy = drm_psb_tbe_destroy,
-+};
-+
-+struct drm_ttm_backend *drm_psb_tbe_init(struct drm_device *dev)
-+{
-+ struct drm_psb_ttm_backend *psb_be;
-+
-+ psb_be = drm_calloc(1, sizeof(*psb_be), DRM_MEM_TTM);
-+ if (!psb_be)
-+ return NULL;
-+ psb_be->pages = NULL;
-+ psb_be->base.func = &psb_ttm_backend;
-+ psb_be->base.dev = dev;
-+
-+ return &psb_be->base;
-+}
-+
-+int psb_tbe_size(struct drm_device *dev, unsigned long num_pages)
-+{
-+ /*
-+ * Return the size of the structures themselves and the
-+ * estimated size of the pagedir and pagetable entries.
-+ */
-+
-+ return drm_size_align(sizeof(struct drm_psb_ttm_backend)) +
-+ 8*num_pages;
-+}
-Index: linux-2.6.27/drivers/gpu/drm/psb/psb_drm.h
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.27/drivers/gpu/drm/psb/psb_drm.h 2009-02-05 13:29:33.000000000 +0000
-@@ -0,0 +1,370 @@
-+/**************************************************************************
-+ * Copyright (c) 2007, Intel Corporation.
-+ * All Rights Reserved.
-+ *
-+ * This program is free software; you can redistribute it and/or modify it
-+ * under the terms and conditions of the GNU General Public License,
-+ * version 2, as published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope it will be useful, but WITHOUT
-+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-+ * more details.
-+ *
-+ * You should have received a copy of the GNU General Public License along with
-+ * this program; if not, write to the Free Software Foundation, Inc.,
-+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-+ *
-+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
-+ * develop this driver.
-+ *
-+ **************************************************************************/
-+/*
-+ */
-+
-+#ifndef _PSB_DRM_H_
-+#define _PSB_DRM_H_
-+
-+#if defined(__linux__) && !defined(__KERNEL__)
-+#include<stdint.h>
-+#endif
-+
-+/*
-+ * Intel Poulsbo driver package version.
-+ *
-+ */
-+/* #define PSB_PACKAGE_VERSION "ED"__DATE__*/
-+#define PSB_PACKAGE_VERSION "2.1.0.32L.0019"
-+
-+#define DRM_PSB_SAREA_MAJOR 0
-+#define DRM_PSB_SAREA_MINOR 1
-+#define PSB_FIXED_SHIFT 16
-+
-+/*
-+ * Public memory types.
-+ */
-+
-+#define DRM_PSB_MEM_MMU DRM_BO_MEM_PRIV1
-+#define DRM_PSB_FLAG_MEM_MMU DRM_BO_FLAG_MEM_PRIV1
-+#define DRM_PSB_MEM_PDS DRM_BO_MEM_PRIV2
-+#define DRM_PSB_FLAG_MEM_PDS DRM_BO_FLAG_MEM_PRIV2
-+#define DRM_PSB_MEM_APER DRM_BO_MEM_PRIV3
-+#define DRM_PSB_FLAG_MEM_APER DRM_BO_FLAG_MEM_PRIV3
-+#define DRM_PSB_MEM_RASTGEOM DRM_BO_MEM_PRIV4
-+#define DRM_PSB_FLAG_MEM_RASTGEOM DRM_BO_FLAG_MEM_PRIV4
-+#define PSB_MEM_RASTGEOM_START 0x30000000
-+
-+typedef int32_t psb_fixed;
-+typedef uint32_t psb_ufixed;
-+
-+static inline psb_fixed psb_int_to_fixed(int a)
-+{
-+ return a * (1 << PSB_FIXED_SHIFT);
-+}
-+
-+static inline psb_ufixed psb_unsigned_to_ufixed(unsigned int a)
-+{
-+ return a << PSB_FIXED_SHIFT;
-+}
-+
-+/*Status of the command sent to the gfx device.*/
-+typedef enum {
-+ DRM_CMD_SUCCESS,
-+ DRM_CMD_FAILED,
-+ DRM_CMD_HANG
-+} drm_cmd_status_t;
-+
-+struct drm_psb_scanout {
-+ uint32_t buffer_id; /* DRM buffer object ID */
-+ uint32_t rotation; /* Rotation as in RR_rotation definitions */
-+ uint32_t stride; /* Buffer stride in bytes */
-+ uint32_t depth; /* Buffer depth in bits (NOT) bpp */
-+ uint32_t width; /* Buffer width in pixels */
-+ uint32_t height; /* Buffer height in lines */
-+ psb_fixed transform[3][3]; /* Buffer composite transform */
-+ /* (scaling, rot, reflect) */
-+};
-+
-+#define DRM_PSB_SAREA_OWNERS 16
-+#define DRM_PSB_SAREA_OWNER_2D 0
-+#define DRM_PSB_SAREA_OWNER_3D 1
-+
-+#define DRM_PSB_SAREA_SCANOUTS 3
-+
-+struct drm_psb_sarea {
-+ /* Track changes of this data structure */
-+
-+ uint32_t major;
-+ uint32_t minor;
-+
-+ /* Last context to touch part of hw */
-+ uint32_t ctx_owners[DRM_PSB_SAREA_OWNERS];
-+
-+ /* Definition of front- and rotated buffers */
-+ uint32_t num_scanouts;
-+ struct drm_psb_scanout scanouts[DRM_PSB_SAREA_SCANOUTS];
-+
-+ int planeA_x;
-+ int planeA_y;
-+ int planeA_w;
-+ int planeA_h;
-+ int planeB_x;
-+ int planeB_y;
-+ int planeB_w;
-+ int planeB_h;
-+ uint32_t msvdx_state;
-+ uint32_t msvdx_context;
-+};
-+
-+#define PSB_RELOC_MAGIC 0x67676767
-+#define PSB_RELOC_SHIFT_MASK 0x0000FFFF
-+#define PSB_RELOC_SHIFT_SHIFT 0
-+#define PSB_RELOC_ALSHIFT_MASK 0xFFFF0000
-+#define PSB_RELOC_ALSHIFT_SHIFT 16
-+
-+#define PSB_RELOC_OP_OFFSET 0 /* Offset of the indicated
-+ * buffer
-+ */
-+#define PSB_RELOC_OP_2D_OFFSET 1 /* Offset of the indicated
-+ * buffer, relative to 2D
-+ * base address
-+ */
-+#define PSB_RELOC_OP_PDS_OFFSET 2 /* Offset of the indicated buffer,
-+ * relative to PDS base address
-+ */
-+#define PSB_RELOC_OP_STRIDE 3 /* Stride of the indicated
-+ * buffer (for tiling)
-+ */
-+#define PSB_RELOC_OP_USE_OFFSET 4 /* Offset of USE buffer
-+ * relative to base reg
-+ */
-+#define PSB_RELOC_OP_USE_REG 5 /* Base reg of USE buffer */
-+
-+struct drm_psb_reloc {
-+ uint32_t reloc_op;
-+ uint32_t where; /* offset in destination buffer */
-+ uint32_t buffer; /* Buffer reloc applies to */
-+ uint32_t mask; /* Destination format: */
-+ uint32_t shift; /* Destination format: */
-+ uint32_t pre_add; /* Destination format: */
-+ uint32_t background; /* Destination add */
-+ uint32_t dst_buffer; /* Destination buffer. Index into buffer_list */
-+ uint32_t arg0; /* Reloc-op dependant */
-+ uint32_t arg1;
-+};
-+
-+#define PSB_BO_FLAG_TA (1ULL << 48)
-+#define PSB_BO_FLAG_SCENE (1ULL << 49)
-+#define PSB_BO_FLAG_FEEDBACK (1ULL << 50)
-+#define PSB_BO_FLAG_USSE (1ULL << 51)
-+
-+#define PSB_ENGINE_2D 0
-+#define PSB_ENGINE_VIDEO 1
-+#define PSB_ENGINE_RASTERIZER 2
-+#define PSB_ENGINE_TA 3
-+#define PSB_ENGINE_HPRAST 4
-+
-+/*
-+ * For this fence class we have a couple of
-+ * fence types.
-+ */
-+
-+#define _PSB_FENCE_EXE_SHIFT 0
-+#define _PSB_FENCE_TA_DONE_SHIFT 1
-+#define _PSB_FENCE_RASTER_DONE_SHIFT 2
-+#define _PSB_FENCE_SCENE_DONE_SHIFT 3
-+#define _PSB_FENCE_FEEDBACK_SHIFT 4
-+
-+#define _PSB_ENGINE_TA_FENCE_TYPES 5
-+#define _PSB_FENCE_TYPE_TA_DONE (1 << _PSB_FENCE_TA_DONE_SHIFT)
-+#define _PSB_FENCE_TYPE_RASTER_DONE (1 << _PSB_FENCE_RASTER_DONE_SHIFT)
-+#define _PSB_FENCE_TYPE_SCENE_DONE (1 << _PSB_FENCE_SCENE_DONE_SHIFT)
-+#define _PSB_FENCE_TYPE_FEEDBACK (1 << _PSB_FENCE_FEEDBACK_SHIFT)
-+
-+#define PSB_ENGINE_HPRAST 4
-+#define PSB_NUM_ENGINES 5
-+
-+#define PSB_TA_FLAG_FIRSTPASS (1 << 0)
-+#define PSB_TA_FLAG_LASTPASS (1 << 1)
-+
-+#define PSB_FEEDBACK_OP_VISTEST (1 << 0)
-+
-+struct drm_psb_scene {
-+ int handle_valid;
-+ uint32_t handle;
-+ uint32_t w;
-+ uint32_t h;
-+ uint32_t num_buffers;
-+};
-+
-+struct drm_psb_hw_info
-+{
-+ uint32_t rev_id;
-+ uint32_t caps;
-+};
-+
-+typedef struct drm_psb_cmdbuf_arg {
-+ uint64_t buffer_list; /* List of buffers to validate */
-+ uint64_t clip_rects; /* See i915 counterpart */
-+ uint64_t scene_arg;
-+ uint64_t fence_arg;
-+
-+ uint32_t ta_flags;
-+
-+ uint32_t ta_handle; /* TA reg-value pairs */
-+ uint32_t ta_offset;
-+ uint32_t ta_size;
-+
-+ uint32_t oom_handle;
-+ uint32_t oom_offset;
-+ uint32_t oom_size;
-+
-+ uint32_t cmdbuf_handle; /* 2D Command buffer object or, */
-+ uint32_t cmdbuf_offset; /* rasterizer reg-value pairs */
-+ uint32_t cmdbuf_size;
-+
-+ uint32_t reloc_handle; /* Reloc buffer object */
-+ uint32_t reloc_offset;
-+ uint32_t num_relocs;
-+
-+ int32_t damage; /* Damage front buffer with cliprects */
-+ /* Not implemented yet */
-+ uint32_t fence_flags;
-+ uint32_t engine;
-+
-+ /*
-+ * Feedback;
-+ */
-+
-+ uint32_t feedback_ops;
-+ uint32_t feedback_handle;
-+ uint32_t feedback_offset;
-+ uint32_t feedback_breakpoints;
-+ uint32_t feedback_size;
-+} drm_psb_cmdbuf_arg_t;
-+
-+struct drm_psb_xhw_init_arg {
-+ uint32_t operation;
-+ uint32_t buffer_handle;
-+};
-+
-+/*
-+ * Feedback components:
-+ */
-+
-+/*
-+ * Vistest component. The number of these in the feedback buffer
-+ * equals the number of vistest breakpoints + 1.
-+ * This is currently the only feedback component.
-+ */
-+
-+struct drm_psb_vistest {
-+ uint32_t vt[8];
-+};
-+
-+#define PSB_HW_COOKIE_SIZE 16
-+#define PSB_HW_FEEDBACK_SIZE 8
-+#define PSB_HW_OOM_CMD_SIZE 6
-+
-+struct drm_psb_xhw_arg {
-+ uint32_t op;
-+ int ret;
-+ uint32_t irq_op;
-+ uint32_t issue_irq;
-+ uint32_t cookie[PSB_HW_COOKIE_SIZE];
-+ union {
-+ struct {
-+ uint32_t w;
-+ uint32_t h;
-+ uint32_t size;
-+ uint32_t clear_p_start;
-+ uint32_t clear_num_pages;
-+ } si;
-+ struct {
-+ uint32_t fire_flags;
-+ uint32_t hw_context;
-+ uint32_t offset;
-+ uint32_t engine;
-+ uint32_t flags;
-+ uint32_t rca;
-+ uint32_t num_oom_cmds;
-+ uint32_t oom_cmds[PSB_HW_OOM_CMD_SIZE];
-+ } sb;
-+ struct {
-+ uint32_t pages;
-+ uint32_t size;
-+ } bi;
-+ struct {
-+ uint32_t bca;
-+ uint32_t rca;
-+ uint32_t flags;
-+ } oom;
-+ struct {
-+ uint32_t pt_offset;
-+ uint32_t param_offset;
-+ uint32_t flags;
-+ } bl;
-+ struct {
-+ uint32_t value;
-+ } cl;
-+ uint32_t feedback[PSB_HW_FEEDBACK_SIZE];
-+ } arg;
-+};
-+
-+#define DRM_PSB_CMDBUF 0x00
-+#define DRM_PSB_XHW_INIT 0x01
-+#define DRM_PSB_XHW 0x02
-+#define DRM_PSB_SCENE_UNREF 0x03
-+/* Controlling the kernel modesetting buffers */
-+#define DRM_PSB_KMS_OFF 0x04
-+#define DRM_PSB_KMS_ON 0x05
-+#define DRM_PSB_HW_INFO 0x06
-+
-+#define PSB_XHW_INIT 0x00
-+#define PSB_XHW_TAKEDOWN 0x01
-+
-+#define PSB_XHW_FIRE_RASTER 0x00
-+#define PSB_XHW_SCENE_INFO 0x01
-+#define PSB_XHW_SCENE_BIND_FIRE 0x02
-+#define PSB_XHW_TA_MEM_INFO 0x03
-+#define PSB_XHW_RESET_DPM 0x04
-+#define PSB_XHW_OOM 0x05
-+#define PSB_XHW_TERMINATE 0x06
-+#define PSB_XHW_VISTEST 0x07
-+#define PSB_XHW_RESUME 0x08
-+#define PSB_XHW_TA_MEM_LOAD 0x09
-+#define PSB_XHW_CHECK_LOCKUP 0x0a
-+
-+#define PSB_SCENE_FLAG_DIRTY (1 << 0)
-+#define PSB_SCENE_FLAG_COMPLETE (1 << 1)
-+#define PSB_SCENE_FLAG_SETUP (1 << 2)
-+#define PSB_SCENE_FLAG_SETUP_ONLY (1 << 3)
-+#define PSB_SCENE_FLAG_CLEARED (1 << 4)
-+
-+#define PSB_TA_MEM_FLAG_TA (1 << 0)
-+#define PSB_TA_MEM_FLAG_RASTER (1 << 1)
-+#define PSB_TA_MEM_FLAG_HOSTA (1 << 2)
-+#define PSB_TA_MEM_FLAG_HOSTD (1 << 3)
-+#define PSB_TA_MEM_FLAG_INIT (1 << 4)
-+#define PSB_TA_MEM_FLAG_NEW_PT_OFFSET (1 << 5)
-+
-+/*Raster fire will deallocate memory */
-+#define PSB_FIRE_FLAG_RASTER_DEALLOC (1 << 0)
-+/*Isp reset needed due to change in ZLS format */
-+#define PSB_FIRE_FLAG_NEEDS_ISP_RESET (1 << 1)
-+/*These are set by Xpsb. */
-+#define PSB_FIRE_FLAG_XHW_MASK 0xff000000
-+/*The task has had at least one OOM and Xpsb will
-+ send back messages on each fire. */
-+#define PSB_FIRE_FLAG_XHW_OOM (1 << 24)
-+
-+#define PSB_SCENE_ENGINE_TA 0
-+#define PSB_SCENE_ENGINE_RASTER 1
-+#define PSB_SCENE_NUM_ENGINES 2
-+
-+struct drm_psb_dev_info_arg {
-+ uint32_t num_use_attribute_registers;
-+};
-+#define DRM_PSB_DEVINFO 0x01
-+
-+#endif
-Index: linux-2.6.27/drivers/gpu/drm/psb/psb_drv.c
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.27/drivers/gpu/drm/psb/psb_drv.c 2009-02-05 13:29:33.000000000 +0000
-@@ -0,0 +1,1006 @@
-+/**************************************************************************
-+ * Copyright (c) 2007, Intel Corporation.
-+ * All Rights Reserved.
-+ *
-+ * This program is free software; you can redistribute it and/or modify it
-+ * under the terms and conditions of the GNU General Public License,
-+ * version 2, as published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope it will be useful, but WITHOUT
-+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-+ * more details.
-+ *
-+ * You should have received a copy of the GNU General Public License along with
-+ * this program; if not, write to the Free Software Foundation, Inc.,
-+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-+ *
-+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
-+ * develop this driver.
-+ *
-+ **************************************************************************/
-+/*
-+ */
-+
-+#include "drmP.h"
-+#include "drm.h"
-+#include "psb_drm.h"
-+#include "psb_drv.h"
-+#include "psb_reg.h"
-+#include "i915_reg.h"
-+#include "psb_msvdx.h"
-+#include "drm_pciids.h"
-+#include "psb_scene.h"
-+#include <linux/cpu.h>
-+#include <linux/notifier.h>
-+#include <linux/fb.h>
-+
-+int drm_psb_debug = 0;
-+EXPORT_SYMBOL(drm_psb_debug);
-+static int drm_psb_trap_pagefaults = 0;
-+static int drm_psb_clock_gating = 0;
-+static int drm_psb_ta_mem_size = 32 * 1024;
-+int drm_psb_disable_vsync = 1;
-+int drm_psb_no_fb = 0;
-+int drm_psb_force_pipeb = 0;
-+char* psb_init_mode;
-+int psb_init_xres;
-+int psb_init_yres;
-+/*
-+ *
-+ */
-+#define SII_1392_WA
-+#ifdef SII_1392_WA
-+extern int SII_1392;
-+#endif
-+
-+MODULE_PARM_DESC(debug, "Enable debug output");
-+MODULE_PARM_DESC(clock_gating, "clock gating");
-+MODULE_PARM_DESC(no_fb, "Disable FBdev");
-+MODULE_PARM_DESC(trap_pagefaults, "Error and reset on MMU pagefaults");
-+MODULE_PARM_DESC(disable_vsync, "Disable vsync interrupts");
-+MODULE_PARM_DESC(force_pipeb, "Forces PIPEB to become primary fb");
-+MODULE_PARM_DESC(ta_mem_size, "TA memory size in kiB");
-+MODULE_PARM_DESC(mode, "initial mode name");
-+MODULE_PARM_DESC(xres, "initial mode width");
-+MODULE_PARM_DESC(yres, "initial mode height");
-+
-+module_param_named(debug, drm_psb_debug, int, 0600);
-+module_param_named(clock_gating, drm_psb_clock_gating, int, 0600);
-+module_param_named(no_fb, drm_psb_no_fb, int, 0600);
-+module_param_named(trap_pagefaults, drm_psb_trap_pagefaults, int, 0600);
-+module_param_named(disable_vsync, drm_psb_disable_vsync, int, 0600);
-+module_param_named(force_pipeb, drm_psb_force_pipeb, int, 0600);
-+module_param_named(ta_mem_size, drm_psb_ta_mem_size, int, 0600);
-+module_param_named(mode, psb_init_mode, charp, 0600);
-+module_param_named(xres, psb_init_xres, int, 0600);
-+module_param_named(yres, psb_init_yres, int, 0600);
-+
-+static struct pci_device_id pciidlist[] = {
-+ psb_PCI_IDS
-+};
-+
-+#define DRM_PSB_CMDBUF_IOCTL DRM_IOW(DRM_PSB_CMDBUF, \
-+ struct drm_psb_cmdbuf_arg)
-+#define DRM_PSB_XHW_INIT_IOCTL DRM_IOR(DRM_PSB_XHW_INIT, \
-+ struct drm_psb_xhw_init_arg)
-+#define DRM_PSB_XHW_IOCTL DRM_IO(DRM_PSB_XHW)
-+
-+#define DRM_PSB_SCENE_UNREF_IOCTL DRM_IOWR(DRM_PSB_SCENE_UNREF, \
-+ struct drm_psb_scene)
-+#define DRM_PSB_HW_INFO_IOCTL DRM_IOR(DRM_PSB_HW_INFO, \
-+ struct drm_psb_hw_info)
-+
-+#define DRM_PSB_KMS_OFF_IOCTL DRM_IO(DRM_PSB_KMS_OFF)
-+#define DRM_PSB_KMS_ON_IOCTL DRM_IO(DRM_PSB_KMS_ON)
-+
-+static struct drm_ioctl_desc psb_ioctls[] = {
-+ DRM_IOCTL_DEF(DRM_PSB_CMDBUF_IOCTL, psb_cmdbuf_ioctl, DRM_AUTH),
-+ DRM_IOCTL_DEF(DRM_PSB_XHW_INIT_IOCTL, psb_xhw_init_ioctl,
-+ DRM_ROOT_ONLY),
-+ DRM_IOCTL_DEF(DRM_PSB_XHW_IOCTL, psb_xhw_ioctl, DRM_ROOT_ONLY),
-+ DRM_IOCTL_DEF(DRM_PSB_SCENE_UNREF_IOCTL, drm_psb_scene_unref_ioctl,
-+ DRM_AUTH),
-+ DRM_IOCTL_DEF(DRM_PSB_KMS_OFF_IOCTL, psbfb_kms_off_ioctl,
-+ DRM_ROOT_ONLY),
-+ DRM_IOCTL_DEF(DRM_PSB_KMS_ON_IOCTL, psbfb_kms_on_ioctl, DRM_ROOT_ONLY),
-+ DRM_IOCTL_DEF(DRM_PSB_HW_INFO_IOCTL, psb_hw_info_ioctl, DRM_AUTH),
-+};
-+static int psb_max_ioctl = DRM_ARRAY_SIZE(psb_ioctls);
-+
-+static int probe(struct pci_dev *pdev, const struct pci_device_id *ent);
-+
-+#ifdef USE_PAT_WC
-+#warning Init pat
-+static int __cpuinit psb_cpu_callback(struct notifier_block *nfb,
-+ unsigned long action,
-+ void *hcpu)
-+{
-+ if (action == CPU_ONLINE)
-+ drm_init_pat();
-+
-+ return 0;
-+}
-+
-+static struct notifier_block __cpuinitdata psb_nb = {
-+ .notifier_call = psb_cpu_callback,
-+ .priority = 1
-+};
-+#endif
-+
-+static int dri_library_name(struct drm_device *dev, char *buf)
-+{
-+ return snprintf(buf, PAGE_SIZE, "psb\n");
-+}
-+
-+static void psb_set_uopt(struct drm_psb_uopt *uopt)
-+{
-+ uopt->clock_gating = drm_psb_clock_gating;
-+}
-+
-+static void psb_lastclose(struct drm_device *dev)
-+{
-+ struct drm_psb_private *dev_priv =
-+ (struct drm_psb_private *)dev->dev_private;
-+
-+ if (!dev->dev_private)
-+ return;
-+
-+ mutex_lock(&dev->struct_mutex);
-+ if (dev_priv->ta_mem)
-+ psb_ta_mem_unref_devlocked(&dev_priv->ta_mem);
-+ mutex_unlock(&dev->struct_mutex);
-+ mutex_lock(&dev_priv->cmdbuf_mutex);
-+ if (dev_priv->buffers) {
-+ vfree(dev_priv->buffers);
-+ dev_priv->buffers = NULL;
-+ }
-+ mutex_unlock(&dev_priv->cmdbuf_mutex);
-+}
-+
-+static void psb_do_takedown(struct drm_device *dev)
-+{
-+ struct drm_psb_private *dev_priv =
-+ (struct drm_psb_private *)dev->dev_private;
-+
-+ mutex_lock(&dev->struct_mutex);
-+ if (dev->bm.initialized) {
-+ if (dev_priv->have_mem_rastgeom) {
-+ drm_bo_clean_mm(dev, DRM_PSB_MEM_RASTGEOM);
-+ dev_priv->have_mem_rastgeom = 0;
-+ }
-+ if (dev_priv->have_mem_mmu) {
-+ drm_bo_clean_mm(dev, DRM_PSB_MEM_MMU);
-+ dev_priv->have_mem_mmu = 0;
-+ }
-+ if (dev_priv->have_mem_aper) {
-+ drm_bo_clean_mm(dev, DRM_PSB_MEM_APER);
-+ dev_priv->have_mem_aper = 0;
-+ }
-+ if (dev_priv->have_tt) {
-+ drm_bo_clean_mm(dev, DRM_BO_MEM_TT);
-+ dev_priv->have_tt = 0;
-+ }
-+ if (dev_priv->have_vram) {
-+ drm_bo_clean_mm(dev, DRM_BO_MEM_VRAM);
-+ dev_priv->have_vram = 0;
-+ }
-+ }
-+ mutex_unlock(&dev->struct_mutex);
-+
-+ if (dev_priv->has_msvdx)
-+ psb_msvdx_uninit(dev);
-+
-+ if (dev_priv->comm) {
-+ kunmap(dev_priv->comm_page);
-+ dev_priv->comm = NULL;
-+ }
-+ if (dev_priv->comm_page) {
-+ __free_page(dev_priv->comm_page);
-+ dev_priv->comm_page = NULL;
-+ }
-+}
-+
-+void psb_clockgating(struct drm_psb_private *dev_priv)
-+{
-+ uint32_t clock_gating;
-+
-+ if (dev_priv->uopt.clock_gating == 1) {
-+ PSB_DEBUG_INIT("Disabling clock gating.\n");
-+
-+ clock_gating = (_PSB_C_CLKGATECTL_CLKG_DISABLED <<
-+ _PSB_C_CLKGATECTL_2D_CLKG_SHIFT) |
-+ (_PSB_C_CLKGATECTL_CLKG_DISABLED <<
-+ _PSB_C_CLKGATECTL_ISP_CLKG_SHIFT) |
-+ (_PSB_C_CLKGATECTL_CLKG_DISABLED <<
-+ _PSB_C_CLKGATECTL_TSP_CLKG_SHIFT) |
-+ (_PSB_C_CLKGATECTL_CLKG_DISABLED <<
-+ _PSB_C_CLKGATECTL_TA_CLKG_SHIFT) |
-+ (_PSB_C_CLKGATECTL_CLKG_DISABLED <<
-+ _PSB_C_CLKGATECTL_DPM_CLKG_SHIFT) |
-+ (_PSB_C_CLKGATECTL_CLKG_DISABLED <<
-+ _PSB_C_CLKGATECTL_USE_CLKG_SHIFT);
-+
-+ } else if (dev_priv->uopt.clock_gating == 2) {
-+ PSB_DEBUG_INIT("Enabling clock gating.\n");
-+
-+ clock_gating = (_PSB_C_CLKGATECTL_CLKG_AUTO <<
-+ _PSB_C_CLKGATECTL_2D_CLKG_SHIFT) |
-+ (_PSB_C_CLKGATECTL_CLKG_AUTO <<
-+ _PSB_C_CLKGATECTL_ISP_CLKG_SHIFT) |
-+ (_PSB_C_CLKGATECTL_CLKG_AUTO <<
-+ _PSB_C_CLKGATECTL_TSP_CLKG_SHIFT) |
-+ (_PSB_C_CLKGATECTL_CLKG_AUTO <<
-+ _PSB_C_CLKGATECTL_TA_CLKG_SHIFT) |
-+ (_PSB_C_CLKGATECTL_CLKG_AUTO <<
-+ _PSB_C_CLKGATECTL_DPM_CLKG_SHIFT) |
-+ (_PSB_C_CLKGATECTL_CLKG_AUTO <<
-+ _PSB_C_CLKGATECTL_USE_CLKG_SHIFT);
-+ } else
-+ clock_gating = PSB_RSGX32(PSB_CR_CLKGATECTL);
-+
-+#ifdef FIX_TG_2D_CLOCKGATE
-+ clock_gating &= ~_PSB_C_CLKGATECTL_2D_CLKG_MASK;
-+ clock_gating |= (_PSB_C_CLKGATECTL_CLKG_DISABLED <<
-+ _PSB_C_CLKGATECTL_2D_CLKG_SHIFT);
-+#endif
-+ PSB_WSGX32(clock_gating, PSB_CR_CLKGATECTL);
-+ (void)PSB_RSGX32(PSB_CR_CLKGATECTL);
-+}
-+
-+static int psb_do_init(struct drm_device *dev)
-+{
-+ struct drm_psb_private *dev_priv =
-+ (struct drm_psb_private *)dev->dev_private;
-+ struct psb_gtt *pg = dev_priv->pg;
-+
-+ uint32_t stolen_gtt;
-+ uint32_t tt_start;
-+ uint32_t tt_pages;
-+
-+ int ret = -ENOMEM;
-+
-+ DRM_ERROR("Debug is 0x%08x\n", drm_psb_debug);
-+
-+ dev_priv->ta_mem_pages =
-+ PSB_ALIGN_TO(drm_psb_ta_mem_size * 1024, PAGE_SIZE) >> PAGE_SHIFT;
-+ dev_priv->comm_page = alloc_page(GFP_KERNEL);
-+ if (!dev_priv->comm_page)
-+ goto out_err;
-+
-+ dev_priv->comm = kmap(dev_priv->comm_page);
-+ memset((void *)dev_priv->comm, 0, PAGE_SIZE);
-+
-+ dev_priv->has_msvdx = 1;
-+ if (psb_msvdx_init(dev))
-+ dev_priv->has_msvdx = 0;
-+
-+ /*
-+ * Initialize sequence numbers for the different command
-+ * submission mechanisms.
-+ */
-+
-+ dev_priv->sequence[PSB_ENGINE_2D] = 0;
-+ dev_priv->sequence[PSB_ENGINE_RASTERIZER] = 0;
-+ dev_priv->sequence[PSB_ENGINE_TA] = 0;
-+ dev_priv->sequence[PSB_ENGINE_HPRAST] = 0;
-+
-+ if (pg->gatt_start & 0x0FFFFFFF) {
-+ DRM_ERROR("Gatt must be 256M aligned. This is a bug.\n");
-+ ret = -EINVAL;
-+ goto out_err;
-+ }
-+
-+ stolen_gtt = (pg->stolen_size >> PAGE_SHIFT) * 4;
-+ stolen_gtt = (stolen_gtt + PAGE_SIZE - 1) >> PAGE_SHIFT;
-+ stolen_gtt = (stolen_gtt < pg->gtt_pages) ? stolen_gtt : pg->gtt_pages;
-+
-+ dev_priv->gatt_free_offset = pg->gatt_start +
-+ (stolen_gtt << PAGE_SHIFT) * 1024;
-+
-+ /*
-+ * Insert a cache-coherent communications page in mmu space
-+ * just after the stolen area. Will be used for fencing etc.
-+ */
-+
-+ dev_priv->comm_mmu_offset = dev_priv->gatt_free_offset;
-+ dev_priv->gatt_free_offset += PAGE_SIZE;
-+
-+ ret = psb_mmu_insert_pages(psb_mmu_get_default_pd(dev_priv->mmu),
-+ &dev_priv->comm_page,
-+ dev_priv->comm_mmu_offset, 1, 0, 0,
-+ PSB_MMU_CACHED_MEMORY);
-+
-+ if (ret)
-+ goto out_err;
-+
-+ if (1 || drm_debug) {
-+ uint32_t core_id = PSB_RSGX32(PSB_CR_CORE_ID);
-+ uint32_t core_rev = PSB_RSGX32(PSB_CR_CORE_REVISION);
-+ DRM_INFO("SGX core id = 0x%08x\n", core_id);
-+ DRM_INFO("SGX core rev major = 0x%02x, minor = 0x%02x\n",
-+ (core_rev & _PSB_CC_REVISION_MAJOR_MASK) >>
-+ _PSB_CC_REVISION_MAJOR_SHIFT,
-+ (core_rev & _PSB_CC_REVISION_MINOR_MASK) >>
-+ _PSB_CC_REVISION_MINOR_SHIFT);
-+ DRM_INFO
-+ ("SGX core rev maintenance = 0x%02x, designer = 0x%02x\n",
-+ (core_rev & _PSB_CC_REVISION_MAINTENANCE_MASK) >>
-+ _PSB_CC_REVISION_MAINTENANCE_SHIFT,
-+ (core_rev & _PSB_CC_REVISION_DESIGNER_MASK) >>
-+ _PSB_CC_REVISION_DESIGNER_SHIFT);
-+ }
-+
-+ dev_priv->irqmask_lock = SPIN_LOCK_UNLOCKED;
-+ dev_priv->fence0_irq_on = 0;
-+
-+ tt_pages = (pg->gatt_pages < PSB_TT_PRIV0_PLIMIT) ?
-+ pg->gatt_pages : PSB_TT_PRIV0_PLIMIT;
-+ tt_start = dev_priv->gatt_free_offset - pg->gatt_start;
-+ tt_pages -= tt_start >> PAGE_SHIFT;
-+
-+ mutex_lock(&dev->struct_mutex);
-+
-+ if (!drm_bo_init_mm(dev, DRM_BO_MEM_VRAM, 0,
-+ pg->stolen_size >> PAGE_SHIFT)) {
-+ dev_priv->have_vram = 1;
-+ }
-+
-+ if (!drm_bo_init_mm(dev, DRM_BO_MEM_TT, tt_start >> PAGE_SHIFT,
-+ tt_pages)) {
-+ dev_priv->have_tt = 1;
-+ }
-+
-+ if (!drm_bo_init_mm(dev, DRM_PSB_MEM_MMU, 0x00000000,
-+ (pg->gatt_start -
-+ PSB_MEM_MMU_START) >> PAGE_SHIFT)) {
-+ dev_priv->have_mem_mmu = 1;
-+ }
-+
-+ if (!drm_bo_init_mm(dev, DRM_PSB_MEM_RASTGEOM, 0x00000000,
-+ (PSB_MEM_MMU_START -
-+ PSB_MEM_RASTGEOM_START) >> PAGE_SHIFT)) {
-+ dev_priv->have_mem_rastgeom = 1;
-+ }
-+#if 0
-+ if (pg->gatt_pages > PSB_TT_PRIV0_PLIMIT) {
-+ if (!drm_bo_init_mm(dev, DRM_PSB_MEM_APER, PSB_TT_PRIV0_PLIMIT,
-+ pg->gatt_pages - PSB_TT_PRIV0_PLIMIT)) {
-+ dev_priv->have_mem_aper = 1;
-+ }
-+ }
-+#endif
-+
-+ mutex_unlock(&dev->struct_mutex);
-+
-+ return 0;
-+ out_err:
-+ psb_do_takedown(dev);
-+ return ret;
-+}
-+
-+static int psb_driver_unload(struct drm_device *dev)
-+{
-+ struct drm_psb_private *dev_priv =
-+ (struct drm_psb_private *)dev->dev_private;
-+
-+ intel_modeset_cleanup(dev);
-+
-+ if (dev_priv) {
-+ psb_watchdog_takedown(dev_priv);
-+ psb_do_takedown(dev);
-+ psb_xhw_takedown(dev_priv);
-+ psb_scheduler_takedown(&dev_priv->scheduler);
-+
-+ mutex_lock(&dev->struct_mutex);
-+ if (dev_priv->have_mem_pds) {
-+ drm_bo_clean_mm(dev, DRM_PSB_MEM_PDS);
-+ dev_priv->have_mem_pds = 0;
-+ }
-+ if (dev_priv->have_mem_kernel) {
-+ drm_bo_clean_mm(dev, DRM_PSB_MEM_KERNEL);
-+ dev_priv->have_mem_kernel = 0;
-+ }
-+ mutex_unlock(&dev->struct_mutex);
-+
-+ (void)drm_bo_driver_finish(dev);
-+
-+ if (dev_priv->pf_pd) {
-+ psb_mmu_free_pagedir(dev_priv->pf_pd);
-+ dev_priv->pf_pd = NULL;
-+ }
-+ if (dev_priv->mmu) {
-+ struct psb_gtt *pg = dev_priv->pg;
-+
-+ down_read(&pg->sem);
-+ psb_mmu_remove_pfn_sequence(psb_mmu_get_default_pd
-+ (dev_priv->mmu),
-+ pg->gatt_start,
-+ pg->
-+ stolen_size >> PAGE_SHIFT);
-+ up_read(&pg->sem);
-+ psb_mmu_driver_takedown(dev_priv->mmu);
-+ dev_priv->mmu = NULL;
-+ }
-+ psb_gtt_takedown(dev_priv->pg, 1);
-+ if (dev_priv->scratch_page) {
-+ __free_page(dev_priv->scratch_page);
-+ dev_priv->scratch_page = NULL;
-+ }
-+ psb_takedown_use_base(dev_priv);
-+ if (dev_priv->vdc_reg) {
-+ iounmap(dev_priv->vdc_reg);
-+ dev_priv->vdc_reg = NULL;
-+ }
-+ if (dev_priv->sgx_reg) {
-+ iounmap(dev_priv->sgx_reg);
-+ dev_priv->sgx_reg = NULL;
-+ }
-+ if (dev_priv->msvdx_reg) {
-+ iounmap(dev_priv->msvdx_reg);
-+ dev_priv->msvdx_reg = NULL;
-+ }
-+
-+ drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER);
-+ dev->dev_private = NULL;
-+ }
-+ return 0;
-+}
-+
-+extern int drm_crtc_probe_output_modes(struct drm_device *dev, int, int);
-+extern int drm_pick_crtcs(struct drm_device *dev);
-+extern char drm_init_mode[32];
-+extern int drm_init_xres;
-+extern int drm_init_yres;
-+
-+static int psb_initial_config(struct drm_device *dev, bool can_grow)
-+{
-+ struct drm_psb_private *dev_priv = dev->dev_private;
-+ struct drm_output *output;
-+ struct drm_crtc *crtc;
-+ int ret = false;
-+
-+ mutex_lock(&dev->mode_config.mutex);
-+
-+ drm_crtc_probe_output_modes(dev, 2048, 2048);
-+
-+ /* strncpy(drm_init_mode, psb_init_mode, strlen(psb_init_mode)); */
-+ drm_init_xres = psb_init_xres;
-+ drm_init_yres = psb_init_yres;
-+
-+ drm_pick_crtcs(dev);
-+
-+ if ((I915_READ(PIPEACONF) & PIPEACONF_ENABLE) && !drm_psb_force_pipeb)
-+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-+ if (!crtc->desired_mode)
-+ continue;
-+
-+ dev->driver->fb_probe(dev, crtc);
-+ } else
-+ list_for_each_entry_reverse(crtc, &dev->mode_config.crtc_list,
-+ head) {
-+ if (!crtc->desired_mode)
-+ continue;
-+
-+ dev->driver->fb_probe(dev, crtc);
-+ }
-+
-+ list_for_each_entry(output, &dev->mode_config.output_list, head) {
-+
-+ if (!output->crtc || !output->crtc->desired_mode)
-+ continue;
-+
-+ if (output->crtc->fb)
-+ drm_crtc_set_mode(output->crtc,
-+ output->crtc->desired_mode, 0, 0);
-+ }
-+
-+#ifdef SII_1392_WA
-+ if((SII_1392 != 1) || (drm_psb_no_fb==0))
-+ drm_disable_unused_functions(dev);
-+#else
-+ drm_disable_unused_functions(dev);
-+#endif
-+
-+
-+ mutex_unlock(&dev->mode_config.mutex);
-+
-+ return ret;
-+
-+}
-+
-+static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
-+{
-+ struct drm_psb_private *dev_priv;
-+ unsigned long resource_start;
-+ struct psb_gtt *pg;
-+ int ret = -ENOMEM;
-+
-+ DRM_INFO("psb - %s\n", PSB_PACKAGE_VERSION);
-+ dev_priv = drm_calloc(1, sizeof(*dev_priv), DRM_MEM_DRIVER);
-+ if (dev_priv == NULL)
-+ return -ENOMEM;
-+
-+ mutex_init(&dev_priv->temp_mem);
-+ mutex_init(&dev_priv->cmdbuf_mutex);
-+ mutex_init(&dev_priv->reset_mutex);
-+ psb_init_disallowed();
-+
-+ atomic_set(&dev_priv->msvdx_mmu_invaldc, 0);
-+
-+#ifdef FIX_TG_16
-+ atomic_set(&dev_priv->lock_2d, 0);
-+ atomic_set(&dev_priv->ta_wait_2d, 0);
-+ atomic_set(&dev_priv->ta_wait_2d_irq, 0);
-+ atomic_set(&dev_priv->waiters_2d, 0);;
-+ DRM_INIT_WAITQUEUE(&dev_priv->queue_2d);
-+#else
-+ mutex_init(&dev_priv->mutex_2d);
-+#endif
-+
-+ spin_lock_init(&dev_priv->reloc_lock);
-+
-+ DRM_INIT_WAITQUEUE(&dev_priv->rel_mapped_queue);
-+ DRM_INIT_WAITQUEUE(&dev_priv->event_2d_queue);
-+
-+ dev->dev_private = (void *)dev_priv;
-+ dev_priv->chipset = chipset;
-+ psb_set_uopt(&dev_priv->uopt);
-+
-+ psb_watchdog_init(dev_priv);
-+ psb_scheduler_init(dev, &dev_priv->scheduler);
-+
-+ resource_start = pci_resource_start(dev->pdev, PSB_MMIO_RESOURCE);
-+
-+ dev_priv->msvdx_reg =
-+ ioremap(resource_start + PSB_MSVDX_OFFSET, PSB_MSVDX_SIZE);
-+ if (!dev_priv->msvdx_reg)
-+ goto out_err;
-+
-+ dev_priv->vdc_reg =
-+ ioremap(resource_start + PSB_VDC_OFFSET, PSB_VDC_SIZE);
-+ if (!dev_priv->vdc_reg)
-+ goto out_err;
-+
-+ dev_priv->sgx_reg =
-+ ioremap(resource_start + PSB_SGX_OFFSET, PSB_SGX_SIZE);
-+ if (!dev_priv->sgx_reg)
-+ goto out_err;
-+
-+ psb_clockgating(dev_priv);
-+ if (psb_init_use_base(dev_priv, 3, 13))
-+ goto out_err;
-+
-+ dev_priv->scratch_page = alloc_page(GFP_DMA32 | __GFP_ZERO);
-+ if (!dev_priv->scratch_page)
-+ goto out_err;
-+
-+ dev_priv->pg = psb_gtt_alloc(dev);
-+ if (!dev_priv->pg)
-+ goto out_err;
-+
-+ ret = psb_gtt_init(dev_priv->pg, 0);
-+ if (ret)
-+ goto out_err;
-+
-+ dev_priv->mmu = psb_mmu_driver_init(dev_priv->sgx_reg,
-+ drm_psb_trap_pagefaults, 0,
-+ &dev_priv->msvdx_mmu_invaldc);
-+ if (!dev_priv->mmu)
-+ goto out_err;
-+
-+ pg = dev_priv->pg;
-+
-+ /*
-+ * Make sgx MMU aware of the stolen memory area we call VRAM.
-+ */
-+
-+ down_read(&pg->sem);
-+ ret =
-+ psb_mmu_insert_pfn_sequence(psb_mmu_get_default_pd(dev_priv->mmu),
-+ pg->stolen_base >> PAGE_SHIFT,
-+ pg->gatt_start,
-+ pg->stolen_size >> PAGE_SHIFT, 0);
-+ up_read(&pg->sem);
-+ if (ret)
-+ goto out_err;
-+
-+ dev_priv->pf_pd = psb_mmu_alloc_pd(dev_priv->mmu, 1, 0);
-+ if (!dev_priv->pf_pd)
-+ goto out_err;
-+
-+ /*
-+ * Make all presumably unused requestors page-fault by making them
-+ * use context 1 which does not have any valid mappings.
-+ */
-+
-+ PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK0);
-+ PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK1);
-+ PSB_RSGX32(PSB_CR_BIF_BANK1);
-+
-+ psb_mmu_set_pd_context(psb_mmu_get_default_pd(dev_priv->mmu), 0);
-+ psb_mmu_set_pd_context(dev_priv->pf_pd, 1);
-+ psb_mmu_enable_requestor(dev_priv->mmu, _PSB_MMU_ER_MASK);
-+
-+ psb_init_2d(dev_priv);
-+
-+ ret = drm_bo_driver_init(dev);
-+ if (ret)
-+ goto out_err;
-+
-+ ret = drm_bo_init_mm(dev, DRM_PSB_MEM_KERNEL, 0x00000000,
-+ (PSB_MEM_PDS_START - PSB_MEM_KERNEL_START)
-+ >> PAGE_SHIFT);
-+ if (ret)
-+ goto out_err;
-+ dev_priv->have_mem_kernel = 1;
-+
-+ ret = drm_bo_init_mm(dev, DRM_PSB_MEM_PDS, 0x00000000,
-+ (PSB_MEM_RASTGEOM_START - PSB_MEM_PDS_START)
-+ >> PAGE_SHIFT);
-+ if (ret)
-+ goto out_err;
-+ dev_priv->have_mem_pds = 1;
-+
-+ ret = psb_do_init(dev);
-+ if (ret)
-+ return ret;
-+
-+ ret = psb_xhw_init(dev);
-+ if (ret)
-+ return ret;
-+
-+ PSB_WSGX32(PSB_MEM_PDS_START, PSB_CR_PDS_EXEC_BASE);
-+ PSB_WSGX32(PSB_MEM_RASTGEOM_START, PSB_CR_BIF_3D_REQ_BASE);
-+
-+ intel_modeset_init(dev);
-+ psb_initial_config(dev, false);
-+
-+#ifdef USE_PAT_WC
-+#warning Init pat
-+ register_cpu_notifier(&psb_nb);
-+#endif
-+
-+ return 0;
-+ out_err:
-+ psb_driver_unload(dev);
-+ return ret;
-+}
-+
-+int psb_driver_device_is_agp(struct drm_device *dev)
-+{
-+ return 0;
-+}
-+
-+static int psb_prepare_msvdx_suspend(struct drm_device *dev)
-+{
-+ struct drm_psb_private *dev_priv =
-+ (struct drm_psb_private *)dev->dev_private;
-+ struct drm_fence_manager *fm = &dev->fm;
-+ struct drm_fence_class_manager *fc = &fm->fence_class[PSB_ENGINE_VIDEO];
-+ struct drm_fence_object *fence;
-+ int ret = 0;
-+ int signaled = 0;
-+ int count = 0;
-+ unsigned long _end = jiffies + 3 * DRM_HZ;
-+
-+ PSB_DEBUG_GENERAL("MSVDXACPI Entering psb_prepare_msvdx_suspend....\n");
-+
-+ /*set the msvdx-reset flag here.. */
-+ dev_priv->msvdx_needs_reset = 1;
-+
-+ /*Ensure that all pending IRQs are serviced, */
-+ list_for_each_entry(fence, &fc->ring, ring) {
-+ count++;
-+ do {
-+ DRM_WAIT_ON(ret, fc->fence_queue, 3 * DRM_HZ,
-+ (signaled =
-+ drm_fence_object_signaled(fence,
-+ DRM_FENCE_TYPE_EXE)));
-+ if (signaled)
-+ break;
-+ if (time_after_eq(jiffies, _end))
-+ PSB_DEBUG_GENERAL
-+ ("MSVDXACPI: fence 0x%x didn't get signaled for 3 secs; we will suspend anyways\n",
-+ (unsigned int)fence);
-+ } while (ret == -EINTR);
-+
-+ }
-+
-+ /* Issue software reset */
-+ PSB_WMSVDX32 (msvdx_sw_reset_all, MSVDX_CONTROL);
-+
-+ ret = psb_wait_for_register (dev_priv, MSVDX_CONTROL, 0,
-+ MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_MASK);
-+
-+ PSB_DEBUG_GENERAL("MSVDXACPI: All MSVDX IRQs (%d) serviced...\n",
-+ count);
-+ return 0;
-+}
-+
-+static int psb_suspend(struct pci_dev *pdev, pm_message_t state)
-+{
-+ struct drm_device *dev = pci_get_drvdata(pdev);
-+ struct drm_psb_private *dev_priv =
-+ (struct drm_psb_private *)dev->dev_private;
-+ struct drm_output *output;
-+
-+ if (drm_psb_no_fb == 0)
-+ psbfb_suspend(dev);
-+#ifdef WA_NO_FB_GARBAGE_DISPLAY
-+ else {
-+ if(num_registered_fb)
-+ {
-+ list_for_each_entry(output, &dev->mode_config.output_list, head) {
-+ if(output->crtc != NULL)
-+ intel_crtc_mode_save(output->crtc);
-+ //if(output->funcs->save)
-+ // output->funcs->save(output);
-+ }
-+ }
-+ }
-+#endif
-+
-+ dev_priv->saveCLOCKGATING = PSB_RSGX32(PSB_CR_CLKGATECTL);
-+ (void)psb_idle_3d(dev);
-+ (void)psb_idle_2d(dev);
-+ flush_scheduled_work();
-+
-+ psb_takedown_use_base(dev_priv);
-+
-+ if (dev_priv->has_msvdx)
-+ psb_prepare_msvdx_suspend(dev);
-+
-+ pci_save_state(pdev);
-+ pci_disable_device(pdev);
-+ pci_set_power_state(pdev, PCI_D3hot);
-+
-+ return 0;
-+}
-+
-+static int psb_resume(struct pci_dev *pdev)
-+{
-+ struct drm_device *dev = pci_get_drvdata(pdev);
-+ struct drm_psb_private *dev_priv =
-+ (struct drm_psb_private *)dev->dev_private;
-+ struct psb_gtt *pg = dev_priv->pg;
-+ struct drm_output *output;
-+ int ret;
-+
-+ pci_set_power_state(pdev, PCI_D0);
-+ pci_restore_state(pdev);
-+ ret = pci_enable_device(pdev);
-+ if (ret)
-+ return ret;
-+
-+#ifdef USE_PAT_WC
-+#warning Init pat
-+ /* for single CPU's we do it here, then for more than one CPU we
-+ * use the CPU notifier to reinit PAT on those CPU's.
-+ */
-+ drm_init_pat();
-+#endif
-+
-+ INIT_LIST_HEAD(&dev_priv->resume_buf.head);
-+ dev_priv->msvdx_needs_reset = 1;
-+
-+ PSB_WVDC32(pg->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL);
-+ pci_write_config_word(pdev, PSB_GMCH_CTRL,
-+ pg->gmch_ctrl | _PSB_GMCH_ENABLED);
-+
-+ /*
-+ * The GTT page tables are probably not saved.
-+ * However, TT and VRAM is empty at this point.
-+ */
-+
-+ psb_gtt_init(dev_priv->pg, 1);
-+
-+ /*
-+ * The SGX loses it's register contents.
-+ * Restore BIF registers. The MMU page tables are
-+ * "normal" pages, so their contents should be kept.
-+ */
-+
-+ PSB_WSGX32(dev_priv->saveCLOCKGATING, PSB_CR_CLKGATECTL);
-+ PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK0);
-+ PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK1);
-+ PSB_RSGX32(PSB_CR_BIF_BANK1);
-+
-+ psb_mmu_set_pd_context(psb_mmu_get_default_pd(dev_priv->mmu), 0);
-+ psb_mmu_set_pd_context(dev_priv->pf_pd, 1);
-+ psb_mmu_enable_requestor(dev_priv->mmu, _PSB_MMU_ER_MASK);
-+
-+ /*
-+ * 2D Base registers..
-+ */
-+ psb_init_2d(dev_priv);
-+
-+ if (drm_psb_no_fb == 0) {
-+ list_for_each_entry(output, &dev->mode_config.output_list, head) {
-+ if(output->crtc != NULL)
-+ drm_crtc_set_mode(output->crtc, &output->crtc->mode,
-+ output->crtc->x, output->crtc->y);
-+ }
-+ }
-+
-+ /*
-+ * Persistant 3D base registers and USSE base registers..
-+ */
-+
-+ PSB_WSGX32(PSB_MEM_PDS_START, PSB_CR_PDS_EXEC_BASE);
-+ PSB_WSGX32(PSB_MEM_RASTGEOM_START, PSB_CR_BIF_3D_REQ_BASE);
-+ psb_init_use_base(dev_priv, 3, 13);
-+
-+ /*
-+ * Now, re-initialize the 3D engine.
-+ */
-+
-+ psb_xhw_resume(dev_priv, &dev_priv->resume_buf);
-+
-+ psb_scheduler_ta_mem_check(dev_priv);
-+ if (dev_priv->ta_mem && !dev_priv->force_ta_mem_load) {
-+ psb_xhw_ta_mem_load(dev_priv, &dev_priv->resume_buf,
-+ PSB_TA_MEM_FLAG_TA |
-+ PSB_TA_MEM_FLAG_RASTER |
-+ PSB_TA_MEM_FLAG_HOSTA |
-+ PSB_TA_MEM_FLAG_HOSTD |
-+ PSB_TA_MEM_FLAG_INIT,
-+ dev_priv->ta_mem->ta_memory->offset,
-+ dev_priv->ta_mem->hw_data->offset,
-+ dev_priv->ta_mem->hw_cookie);
-+ }
-+
-+ if (drm_psb_no_fb == 0)
-+ psbfb_resume(dev);
-+#ifdef WA_NO_FB_GARBAGE_DISPLAY
-+ else {
-+ if(num_registered_fb)
-+ {
-+ struct fb_info *fb_info=registered_fb[0];
-+ list_for_each_entry(output, &dev->mode_config.output_list, head) {
-+ if(output->crtc != NULL)
-+ intel_crtc_mode_restore(output->crtc);
-+ }
-+ if(fb_info)
-+ {
-+ fb_set_suspend(fb_info, 0);
-+ printk("set the fb_set_suspend resume end\n");
-+ }
-+ }
-+ }
-+#endif
-+
-+ return 0;
-+}
-+
-+/* always available as we are SIGIO'd */
-+static unsigned int psb_poll(struct file *filp, struct poll_table_struct *wait)
-+{
-+ return (POLLIN | POLLRDNORM);
-+}
-+
-+static int psb_release(struct inode *inode, struct file *filp)
-+{
-+ struct drm_file *file_priv = (struct drm_file *)filp->private_data;
-+ struct drm_device *dev = file_priv->minor->dev;
-+ struct drm_psb_private *dev_priv =
-+ (struct drm_psb_private *)dev->dev_private;
-+
-+ if (dev_priv && dev_priv->xhw_file) {
-+ psb_xhw_init_takedown(dev_priv, file_priv, 1);
-+ }
-+ return drm_release(inode, filp);
-+}
-+
-+extern struct drm_fence_driver psb_fence_driver;
-+
-+/*
-+ * Use this memory type priority if no eviction is needed.
-+ */
-+static uint32_t psb_mem_prios[] = { DRM_BO_MEM_VRAM,
-+ DRM_BO_MEM_TT,
-+ DRM_PSB_MEM_KERNEL,
-+ DRM_PSB_MEM_MMU,
-+ DRM_PSB_MEM_RASTGEOM,
-+ DRM_PSB_MEM_PDS,
-+ DRM_PSB_MEM_APER,
-+ DRM_BO_MEM_LOCAL
-+};
-+
-+/*
-+ * Use this memory type priority if need to evict.
-+ */
-+static uint32_t psb_busy_prios[] = { DRM_BO_MEM_TT,
-+ DRM_BO_MEM_VRAM,
-+ DRM_PSB_MEM_KERNEL,
-+ DRM_PSB_MEM_MMU,
-+ DRM_PSB_MEM_RASTGEOM,
-+ DRM_PSB_MEM_PDS,
-+ DRM_PSB_MEM_APER,
-+ DRM_BO_MEM_LOCAL
-+};
-+
-+static struct drm_bo_driver psb_bo_driver = {
-+ .mem_type_prio = psb_mem_prios,
-+ .mem_busy_prio = psb_busy_prios,
-+ .num_mem_type_prio = ARRAY_SIZE(psb_mem_prios),
-+ .num_mem_busy_prio = ARRAY_SIZE(psb_busy_prios),
-+ .create_ttm_backend_entry = drm_psb_tbe_init,
-+ .fence_type = psb_fence_types,
-+ .invalidate_caches = psb_invalidate_caches,
-+ .init_mem_type = psb_init_mem_type,
-+ .evict_mask = psb_evict_mask,
-+ .move = psb_move,
-+ .backend_size = psb_tbe_size,
-+ .command_stream_barrier = NULL,
-+};
-+
-+static struct drm_driver driver = {
-+ .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
-+ DRIVER_IRQ_VBL | DRIVER_IRQ_VBL2,
-+ .load = psb_driver_load,
-+ .unload = psb_driver_unload,
-+ .dri_library_name = dri_library_name,
-+ .get_reg_ofs = drm_core_get_reg_ofs,
-+ .ioctls = psb_ioctls,
-+ .device_is_agp = psb_driver_device_is_agp,
-+ .vblank_wait = psb_vblank_wait,
-+ .vblank_wait2 = psb_vblank_wait2,
-+ .irq_preinstall = psb_irq_preinstall,
-+ .irq_postinstall = psb_irq_postinstall,
-+ .irq_uninstall = psb_irq_uninstall,
-+ .irq_handler = psb_irq_handler,
-+ .fb_probe = psbfb_probe,
-+ .fb_remove = psbfb_remove,
-+ .firstopen = NULL,
-+ .lastclose = psb_lastclose,
-+ .fops = {
-+ .owner = THIS_MODULE,
-+ .open = drm_open,
-+ .release = psb_release,
-+ .ioctl = drm_ioctl,
-+ .mmap = drm_mmap,
-+ .poll = psb_poll,
-+ .fasync = drm_fasync,
-+ },
-+ .pci_driver = {
-+ .name = DRIVER_NAME,
-+ .id_table = pciidlist,
-+ .probe = probe,
-+ .remove = __devexit_p(drm_cleanup_pci),
-+ .resume = psb_resume,
-+ .suspend = psb_suspend,
-+ },
-+ .fence_driver = &psb_fence_driver,
-+ .bo_driver = &psb_bo_driver,
-+ .name = DRIVER_NAME,
-+ .desc = DRIVER_DESC,
-+ .date = PSB_DRM_DRIVER_DATE,
-+ .major = PSB_DRM_DRIVER_MAJOR,
-+ .minor = PSB_DRM_DRIVER_MINOR,
-+ .patchlevel = PSB_DRM_DRIVER_PATCHLEVEL
-+};
-+
-+static int probe(struct pci_dev *pdev, const struct pci_device_id *ent)
-+{
-+ return drm_get_dev(pdev, ent, &driver);
-+}
-+
-+static int __init psb_init(void)
-+{
-+ driver.num_ioctls = psb_max_ioctl;
-+
-+ return drm_init(&driver, pciidlist);
-+}
-+
-+static void __exit psb_exit(void)
-+{
-+ drm_exit(&driver);
-+}
-+
-+module_init(psb_init);
-+module_exit(psb_exit);
-+
-+MODULE_AUTHOR(DRIVER_AUTHOR);
-+MODULE_DESCRIPTION(DRIVER_DESC);
-+MODULE_LICENSE("GPL");
-Index: linux-2.6.27/drivers/gpu/drm/psb/psb_drv.h
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.27/drivers/gpu/drm/psb/psb_drv.h 2009-02-05 13:29:33.000000000 +0000
-@@ -0,0 +1,775 @@
-+/**************************************************************************
-+ * Copyright (c) 2007, Intel Corporation.
-+ * All Rights Reserved.
-+ *
-+ * This program is free software; you can redistribute it and/or modify it
-+ * under the terms and conditions of the GNU General Public License,
-+ * version 2, as published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope it will be useful, but WITHOUT
-+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-+ * more details.
-+ *
-+ * You should have received a copy of the GNU General Public License along with
-+ * this program; if not, write to the Free Software Foundation, Inc.,
-+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-+ *
-+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
-+ * develop this driver.
-+ *
-+ **************************************************************************/
-+/*
-+ */
-+#ifndef _PSB_DRV_H_
-+#define _PSB_DRV_H_
-+
-+#include "drmP.h"
-+#include "psb_drm.h"
-+#include "psb_reg.h"
-+#include "psb_schedule.h"
-+#include "intel_drv.h"
-+
-+enum {
-+ CHIP_PSB_8108 = 0,
-+ CHIP_PSB_8109 = 1
-+};
-+
-+/*
-+ * Hardware bugfixes
-+ */
-+
-+#define FIX_TG_16
-+#define FIX_TG_2D_CLOCKGATE
-+
-+#define DRIVER_NAME "psb"
-+#define DRIVER_DESC "drm driver for the Intel GMA500"
-+#define DRIVER_AUTHOR "Tungsten Graphics Inc."
-+
-+#define PSB_DRM_DRIVER_DATE "20080613"
-+#define PSB_DRM_DRIVER_MAJOR 4
-+#define PSB_DRM_DRIVER_MINOR 12
-+#define PSB_DRM_DRIVER_PATCHLEVEL 0
-+
-+#define PSB_VDC_OFFSET 0x00000000
-+#define PSB_VDC_SIZE 0x000080000
-+#define PSB_SGX_SIZE 0x8000
-+#define PSB_SGX_OFFSET 0x00040000
-+#define PSB_MMIO_RESOURCE 0
-+#define PSB_GATT_RESOURCE 2
-+#define PSB_GTT_RESOURCE 3
-+#define PSB_GMCH_CTRL 0x52
-+#define PSB_BSM 0x5C
-+#define _PSB_GMCH_ENABLED 0x4
-+#define PSB_PGETBL_CTL 0x2020
-+#define _PSB_PGETBL_ENABLED 0x00000001
-+#define PSB_SGX_2D_SLAVE_PORT 0x4000
-+#define PSB_TT_PRIV0_LIMIT (256*1024*1024)
-+#define PSB_TT_PRIV0_PLIMIT (PSB_TT_PRIV0_LIMIT >> PAGE_SHIFT)
-+#define PSB_NUM_VALIDATE_BUFFERS 1024
-+#define PSB_MEM_KERNEL_START 0x10000000
-+#define PSB_MEM_PDS_START 0x20000000
-+#define PSB_MEM_MMU_START 0x40000000
-+
-+#define DRM_PSB_MEM_KERNEL DRM_BO_MEM_PRIV0
-+#define DRM_PSB_FLAG_MEM_KERNEL DRM_BO_FLAG_MEM_PRIV0
-+
-+/*
-+ * Flags for external memory type field.
-+ */
-+
-+#define PSB_MSVDX_OFFSET 0x50000 /*MSVDX Base offset */
-+#define PSB_MSVDX_SIZE 0x8000 /*MSVDX MMIO region is 0x50000 - 0x57fff ==> 32KB */
-+
-+#define PSB_MMU_CACHED_MEMORY 0x0001 /* Bind to MMU only */
-+#define PSB_MMU_RO_MEMORY 0x0002 /* MMU RO memory */
-+#define PSB_MMU_WO_MEMORY 0x0004 /* MMU WO memory */
-+
-+/*
-+ * PTE's and PDE's
-+ */
-+
-+#define PSB_PDE_MASK 0x003FFFFF
-+#define PSB_PDE_SHIFT 22
-+#define PSB_PTE_SHIFT 12
-+
-+#define PSB_PTE_VALID 0x0001 /* PTE / PDE valid */
-+#define PSB_PTE_WO 0x0002 /* Write only */
-+#define PSB_PTE_RO 0x0004 /* Read only */
-+#define PSB_PTE_CACHED 0x0008 /* CPU cache coherent */
-+
-+/*
-+ * VDC registers and bits
-+ */
-+#define PSB_HWSTAM 0x2098
-+#define PSB_INSTPM 0x20C0
-+#define PSB_INT_IDENTITY_R 0x20A4
-+#define _PSB_VSYNC_PIPEB_FLAG (1<<5)
-+#define _PSB_VSYNC_PIPEA_FLAG (1<<7)
-+#define _PSB_IRQ_SGX_FLAG (1<<18)
-+#define _PSB_IRQ_MSVDX_FLAG (1<<19)
-+#define PSB_INT_MASK_R 0x20A8
-+#define PSB_INT_ENABLE_R 0x20A0
-+#define PSB_PIPEASTAT 0x70024
-+#define _PSB_VBLANK_INTERRUPT_ENABLE (1 << 17)
-+#define _PSB_VBLANK_CLEAR (1 << 1)
-+#define PSB_PIPEBSTAT 0x71024
-+
-+#define _PSB_MMU_ER_MASK 0x0001FF00
-+#define _PSB_MMU_ER_HOST (1 << 16)
-+#define GPIOA 0x5010
-+#define GPIOB 0x5014
-+#define GPIOC 0x5018
-+#define GPIOD 0x501c
-+#define GPIOE 0x5020
-+#define GPIOF 0x5024
-+#define GPIOG 0x5028
-+#define GPIOH 0x502c
-+#define GPIO_CLOCK_DIR_MASK (1 << 0)
-+#define GPIO_CLOCK_DIR_IN (0 << 1)
-+#define GPIO_CLOCK_DIR_OUT (1 << 1)
-+#define GPIO_CLOCK_VAL_MASK (1 << 2)
-+#define GPIO_CLOCK_VAL_OUT (1 << 3)
-+#define GPIO_CLOCK_VAL_IN (1 << 4)
-+#define GPIO_CLOCK_PULLUP_DISABLE (1 << 5)
-+#define GPIO_DATA_DIR_MASK (1 << 8)
-+#define GPIO_DATA_DIR_IN (0 << 9)
-+#define GPIO_DATA_DIR_OUT (1 << 9)
-+#define GPIO_DATA_VAL_MASK (1 << 10)
-+#define GPIO_DATA_VAL_OUT (1 << 11)
-+#define GPIO_DATA_VAL_IN (1 << 12)
-+#define GPIO_DATA_PULLUP_DISABLE (1 << 13)
-+
-+#define VCLK_DIVISOR_VGA0 0x6000
-+#define VCLK_DIVISOR_VGA1 0x6004
-+#define VCLK_POST_DIV 0x6010
-+
-+#define DRM_DRIVER_PRIVATE_T struct drm_psb_private
-+#define I915_WRITE(_offs, _val) \
-+ iowrite32(_val, dev_priv->vdc_reg + (_offs))
-+#define I915_READ(_offs) \
-+ ioread32(dev_priv->vdc_reg + (_offs))
-+
-+#define PSB_COMM_2D (PSB_ENGINE_2D << 4)
-+#define PSB_COMM_3D (PSB_ENGINE_3D << 4)
-+#define PSB_COMM_TA (PSB_ENGINE_TA << 4)
-+#define PSB_COMM_HP (PSB_ENGINE_HP << 4)
-+#define PSB_COMM_USER_IRQ (1024 >> 2)
-+#define PSB_COMM_USER_IRQ_LOST (PSB_COMM_USER_IRQ + 1)
-+#define PSB_COMM_FW (2048 >> 2)
-+
-+#define PSB_UIRQ_VISTEST 1
-+#define PSB_UIRQ_OOM_REPLY 2
-+#define PSB_UIRQ_FIRE_TA_REPLY 3
-+#define PSB_UIRQ_FIRE_RASTER_REPLY 4
-+
-+#define PSB_2D_SIZE (256*1024*1024)
-+#define PSB_MAX_RELOC_PAGES 1024
-+
-+#define PSB_LOW_REG_OFFS 0x0204
-+#define PSB_HIGH_REG_OFFS 0x0600
-+
-+#define PSB_NUM_VBLANKS 2
-+
-+#define PSB_COMM_2D (PSB_ENGINE_2D << 4)
-+#define PSB_COMM_3D (PSB_ENGINE_3D << 4)
-+#define PSB_COMM_TA (PSB_ENGINE_TA << 4)
-+#define PSB_COMM_HP (PSB_ENGINE_HP << 4)
-+#define PSB_COMM_FW (2048 >> 2)
-+
-+#define PSB_2D_SIZE (256*1024*1024)
-+#define PSB_MAX_RELOC_PAGES 1024
-+
-+#define PSB_LOW_REG_OFFS 0x0204
-+#define PSB_HIGH_REG_OFFS 0x0600
-+
-+#define PSB_NUM_VBLANKS 2
-+#define PSB_WATCHDOG_DELAY (DRM_HZ / 10)
-+
-+/*
-+ * User options.
-+ */
-+
-+struct drm_psb_uopt {
-+ int clock_gating;
-+};
-+
-+struct psb_gtt {
-+ struct drm_device *dev;
-+ int initialized;
-+ uint32_t gatt_start;
-+ uint32_t gtt_start;
-+ uint32_t gtt_phys_start;
-+ unsigned gtt_pages;
-+ unsigned gatt_pages;
-+ uint32_t stolen_base;
-+ uint32_t pge_ctl;
-+ u16 gmch_ctrl;
-+ unsigned long stolen_size;
-+ uint32_t *gtt_map;
-+ struct rw_semaphore sem;
-+};
-+
-+struct psb_use_base {
-+ struct list_head head;
-+ struct drm_fence_object *fence;
-+ unsigned int reg;
-+ unsigned long offset;
-+ unsigned int dm;
-+};
-+
-+struct psb_buflist_item;
-+
-+struct psb_msvdx_cmd_queue {
-+ struct list_head head;
-+ void *cmd;
-+ unsigned long cmd_size;
-+ uint32_t sequence;
-+};
-+
-+struct drm_psb_private {
-+ unsigned long chipset;
-+ uint8_t psb_rev_id;
-+
-+ struct psb_xhw_buf resume_buf;
-+ struct drm_psb_dev_info_arg dev_info;
-+ struct drm_psb_uopt uopt;
-+
-+ struct psb_gtt *pg;
-+
-+ struct page *scratch_page;
-+ struct page *comm_page;
-+
-+ volatile uint32_t *comm;
-+ uint32_t comm_mmu_offset;
-+ uint32_t mmu_2d_offset;
-+ uint32_t sequence[PSB_NUM_ENGINES];
-+ uint32_t last_sequence[PSB_NUM_ENGINES];
-+ int idle[PSB_NUM_ENGINES];
-+ uint32_t last_submitted_seq[PSB_NUM_ENGINES];
-+ int engine_lockup_2d;
-+
-+ struct psb_mmu_driver *mmu;
-+ struct psb_mmu_pd *pf_pd;
-+
-+ uint8_t *sgx_reg;
-+ uint8_t *vdc_reg;
-+ uint8_t *msvdx_reg;
-+
-+ /*
-+ * MSVDX
-+ */
-+ int msvdx_needs_reset;
-+ int has_msvdx;
-+ uint32_t gatt_free_offset;
-+ atomic_t msvdx_mmu_invaldc;
-+
-+ /*
-+ * Fencing / irq.
-+ */
-+
-+ uint32_t sgx_irq_mask;
-+ uint32_t sgx2_irq_mask;
-+ uint32_t vdc_irq_mask;
-+
-+ spinlock_t irqmask_lock;
-+ spinlock_t sequence_lock;
-+ int fence0_irq_on;
-+ int irq_enabled;
-+ unsigned int irqen_count_2d;
-+ wait_queue_head_t event_2d_queue;
-+
-+#ifdef FIX_TG_16
-+ wait_queue_head_t queue_2d;
-+ atomic_t lock_2d;
-+ atomic_t ta_wait_2d;
-+ atomic_t ta_wait_2d_irq;
-+ atomic_t waiters_2d;
-+#else
-+ struct mutex mutex_2d;
-+#endif
-+ uint32_t msvdx_current_sequence;
-+ uint32_t msvdx_last_sequence;
-+#define MSVDX_MAX_IDELTIME HZ*30
-+ uint32_t msvdx_finished_sequence;
-+ uint32_t msvdx_start_idle;
-+ unsigned long msvdx_idle_start_jiffies;
-+
-+ int fence2_irq_on;
-+
-+ /*
-+ * MSVDX Rendec Memory
-+ */
-+ struct drm_buffer_object *ccb0;
-+ uint32_t base_addr0;
-+ struct drm_buffer_object *ccb1;
-+ uint32_t base_addr1;
-+
-+ /*
-+ * Memory managers
-+ */
-+
-+ int have_vram;
-+ int have_tt;
-+ int have_mem_mmu;
-+ int have_mem_aper;
-+ int have_mem_kernel;
-+ int have_mem_pds;
-+ int have_mem_rastgeom;
-+ struct mutex temp_mem;
-+
-+ /*
-+ * Relocation buffer mapping.
-+ */
-+
-+ spinlock_t reloc_lock;
-+ unsigned int rel_mapped_pages;
-+ wait_queue_head_t rel_mapped_queue;
-+
-+ /*
-+ * SAREA
-+ */
-+ struct drm_psb_sarea *sarea_priv;
-+
-+ /*
-+ * LVDS info
-+ */
-+ int backlight_duty_cycle; /* restore backlight to this value */
-+ bool panel_wants_dither;
-+ struct drm_display_mode *panel_fixed_mode;
-+
-+ /*
-+ * Register state
-+ */
-+ uint32_t saveDSPACNTR;
-+ uint32_t saveDSPBCNTR;
-+ uint32_t savePIPEACONF;
-+ uint32_t savePIPEBCONF;
-+ uint32_t savePIPEASRC;
-+ uint32_t savePIPEBSRC;
-+ uint32_t saveFPA0;
-+ uint32_t saveFPA1;
-+ uint32_t saveDPLL_A;
-+ uint32_t saveDPLL_A_MD;
-+ uint32_t saveHTOTAL_A;
-+ uint32_t saveHBLANK_A;
-+ uint32_t saveHSYNC_A;
-+ uint32_t saveVTOTAL_A;
-+ uint32_t saveVBLANK_A;
-+ uint32_t saveVSYNC_A;
-+ uint32_t saveDSPASTRIDE;
-+ uint32_t saveDSPASIZE;
-+ uint32_t saveDSPAPOS;
-+ uint32_t saveDSPABASE;
-+ uint32_t saveDSPASURF;
-+ uint32_t saveFPB0;
-+ uint32_t saveFPB1;
-+ uint32_t saveDPLL_B;
-+ uint32_t saveDPLL_B_MD;
-+ uint32_t saveHTOTAL_B;
-+ uint32_t saveHBLANK_B;
-+ uint32_t saveHSYNC_B;
-+ uint32_t saveVTOTAL_B;
-+ uint32_t saveVBLANK_B;
-+ uint32_t saveVSYNC_B;
-+ uint32_t saveDSPBSTRIDE;
-+ uint32_t saveDSPBSIZE;
-+ uint32_t saveDSPBPOS;
-+ uint32_t saveDSPBBASE;
-+ uint32_t saveDSPBSURF;
-+ uint32_t saveVCLK_DIVISOR_VGA0;
-+ uint32_t saveVCLK_DIVISOR_VGA1;
-+ uint32_t saveVCLK_POST_DIV;
-+ uint32_t saveVGACNTRL;
-+ uint32_t saveADPA;
-+ uint32_t saveLVDS;
-+ uint32_t saveDVOA;
-+ uint32_t saveDVOB;
-+ uint32_t saveDVOC;
-+ uint32_t savePP_ON;
-+ uint32_t savePP_OFF;
-+ uint32_t savePP_CONTROL;
-+ uint32_t savePP_CYCLE;
-+ uint32_t savePFIT_CONTROL;
-+ uint32_t savePaletteA[256];
-+ uint32_t savePaletteB[256];
-+ uint32_t saveBLC_PWM_CTL;
-+ uint32_t saveCLOCKGATING;
-+
-+ /*
-+ * USE code base register management.
-+ */
-+
-+ struct drm_reg_manager use_manager;
-+
-+ /*
-+ * Xhw
-+ */
-+
-+ uint32_t *xhw;
-+ struct drm_buffer_object *xhw_bo;
-+ struct drm_bo_kmap_obj xhw_kmap;
-+ struct list_head xhw_in;
-+ spinlock_t xhw_lock;
-+ atomic_t xhw_client;
-+ struct drm_file *xhw_file;
-+ wait_queue_head_t xhw_queue;
-+ wait_queue_head_t xhw_caller_queue;
-+ struct mutex xhw_mutex;
-+ struct psb_xhw_buf *xhw_cur_buf;
-+ int xhw_submit_ok;
-+ int xhw_on;
-+
-+ /*
-+ * Scheduling.
-+ */
-+
-+ struct mutex reset_mutex;
-+ struct mutex cmdbuf_mutex;
-+ struct psb_scheduler scheduler;
-+ struct psb_buflist_item *buffers;
-+ uint32_t ta_mem_pages;
-+ struct psb_ta_mem *ta_mem;
-+ int force_ta_mem_load;
-+
-+ /*
-+ * Watchdog
-+ */
-+
-+ spinlock_t watchdog_lock;
-+ struct timer_list watchdog_timer;
-+ struct work_struct watchdog_wq;
-+ struct work_struct msvdx_watchdog_wq;
-+ int timer_available;
-+
-+ /*
-+ * msvdx command queue
-+ */
-+ spinlock_t msvdx_lock;
-+ struct mutex msvdx_mutex;
-+ struct list_head msvdx_queue;
-+ int msvdx_busy;
-+
-+};
-+
-+struct psb_mmu_driver;
-+
-+extern struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers,
-+ int trap_pagefaults,
-+ int invalid_type,
-+ atomic_t *msvdx_mmu_invaldc);
-+extern void psb_mmu_driver_takedown(struct psb_mmu_driver *driver);
-+extern struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver *driver);
-+extern void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd, uint32_t mmu_offset,
-+ uint32_t gtt_start, uint32_t gtt_pages);
-+extern void psb_mmu_test(struct psb_mmu_driver *driver, uint32_t offset);
-+extern struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
-+ int trap_pagefaults,
-+ int invalid_type);
-+extern void psb_mmu_free_pagedir(struct psb_mmu_pd *pd);
-+extern void psb_mmu_flush(struct psb_mmu_driver *driver);
-+extern void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
-+ unsigned long address,
-+ uint32_t num_pages);
-+extern int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd,
-+ uint32_t start_pfn,
-+ unsigned long address,
-+ uint32_t num_pages, int type);
-+extern int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
-+ unsigned long *pfn);
-+
-+/*
-+ * Enable / disable MMU for different requestors.
-+ */
-+
-+extern void psb_mmu_enable_requestor(struct psb_mmu_driver *driver,
-+ uint32_t mask);
-+extern void psb_mmu_disable_requestor(struct psb_mmu_driver *driver,
-+ uint32_t mask);
-+extern void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context);
-+extern int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
-+ unsigned long address, uint32_t num_pages,
-+ uint32_t desired_tile_stride,
-+ uint32_t hw_tile_stride, int type);
-+extern void psb_mmu_remove_pages(struct psb_mmu_pd *pd, unsigned long address,
-+ uint32_t num_pages,
-+ uint32_t desired_tile_stride,
-+ uint32_t hw_tile_stride);
-+/*
-+ * psb_sgx.c
-+ */
-+
-+extern int psb_blit_sequence(struct drm_psb_private *dev_priv,
-+ uint32_t sequence);
-+extern void psb_init_2d(struct drm_psb_private *dev_priv);
-+extern int psb_idle_2d(struct drm_device *dev);
-+extern int psb_idle_3d(struct drm_device *dev);
-+extern int psb_emit_2d_copy_blit(struct drm_device *dev,
-+ uint32_t src_offset,
-+ uint32_t dst_offset, uint32_t pages,
-+ int direction);
-+extern int psb_cmdbuf_ioctl(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv);
-+extern int psb_reg_submit(struct drm_psb_private *dev_priv, uint32_t * regs,
-+ unsigned int cmds);
-+extern int psb_submit_copy_cmdbuf(struct drm_device *dev,
-+ struct drm_buffer_object *cmd_buffer,
-+ unsigned long cmd_offset,
-+ unsigned long cmd_size, int engine,
-+ uint32_t * copy_buffer);
-+extern void psb_fence_or_sync(struct drm_file *priv,
-+ int engine,
-+ struct drm_psb_cmdbuf_arg *arg,
-+ struct drm_fence_arg *fence_arg,
-+ struct drm_fence_object **fence_p);
-+extern void psb_init_disallowed(void);
-+
-+/*
-+ * psb_irq.c
-+ */
-+
-+extern irqreturn_t psb_irq_handler(DRM_IRQ_ARGS);
-+extern void psb_irq_preinstall(struct drm_device *dev);
-+extern int psb_irq_postinstall(struct drm_device *dev);
-+extern void psb_irq_uninstall(struct drm_device *dev);
-+extern int psb_vblank_wait2(struct drm_device *dev, unsigned int *sequence);
-+extern int psb_vblank_wait(struct drm_device *dev, unsigned int *sequence);
-+
-+/*
-+ * psb_fence.c
-+ */
-+
-+extern void psb_fence_handler(struct drm_device *dev, uint32_t class);
-+extern void psb_2D_irq_off(struct drm_psb_private *dev_priv);
-+extern void psb_2D_irq_on(struct drm_psb_private *dev_priv);
-+extern uint32_t psb_fence_advance_sequence(struct drm_device *dev,
-+ uint32_t class);
-+extern int psb_fence_emit_sequence(struct drm_device *dev, uint32_t fence_class,
-+ uint32_t flags, uint32_t * sequence,
-+ uint32_t * native_type);
-+extern void psb_fence_error(struct drm_device *dev,
-+ uint32_t class,
-+ uint32_t sequence, uint32_t type, int error);
-+
-+/*MSVDX stuff*/
-+extern void psb_msvdx_irq_off(struct drm_psb_private *dev_priv);
-+extern void psb_msvdx_irq_on(struct drm_psb_private *dev_priv);
-+extern int psb_hw_info_ioctl(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv);
-+
-+/*
-+ * psb_buffer.c
-+ */
-+extern struct drm_ttm_backend *drm_psb_tbe_init(struct drm_device *dev);
-+extern int psb_fence_types(struct drm_buffer_object *bo, uint32_t * class,
-+ uint32_t * type);
-+extern uint32_t psb_evict_mask(struct drm_buffer_object *bo);
-+extern int psb_invalidate_caches(struct drm_device *dev, uint64_t flags);
-+extern int psb_init_mem_type(struct drm_device *dev, uint32_t type,
-+ struct drm_mem_type_manager *man);
-+extern int psb_move(struct drm_buffer_object *bo,
-+ int evict, int no_wait, struct drm_bo_mem_reg *new_mem);
-+extern int psb_tbe_size(struct drm_device *dev, unsigned long num_pages);
-+
-+/*
-+ * psb_gtt.c
-+ */
-+extern int psb_gtt_init(struct psb_gtt *pg, int resume);
-+extern int psb_gtt_insert_pages(struct psb_gtt *pg, struct page **pages,
-+ unsigned offset_pages, unsigned num_pages,
-+ unsigned desired_tile_stride,
-+ unsigned hw_tile_stride, int type);
-+extern int psb_gtt_remove_pages(struct psb_gtt *pg, unsigned offset_pages,
-+ unsigned num_pages,
-+ unsigned desired_tile_stride,
-+ unsigned hw_tile_stride);
-+
-+extern struct psb_gtt *psb_gtt_alloc(struct drm_device *dev);
-+extern void psb_gtt_takedown(struct psb_gtt *pg, int free);
-+
-+/*
-+ * psb_fb.c
-+ */
-+extern int psbfb_probe(struct drm_device *dev, struct drm_crtc *crtc);
-+extern int psbfb_remove(struct drm_device *dev, struct drm_crtc *crtc);
-+extern int psbfb_kms_off_ioctl(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv);
-+extern int psbfb_kms_on_ioctl(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv);
-+extern void psbfb_suspend(struct drm_device *dev);
-+extern void psbfb_resume(struct drm_device *dev);
-+
-+/*
-+ * psb_reset.c
-+ */
-+
-+extern void psb_reset(struct drm_psb_private *dev_priv, int reset_2d);
-+extern void psb_schedule_watchdog(struct drm_psb_private *dev_priv);
-+extern void psb_watchdog_init(struct drm_psb_private *dev_priv);
-+extern void psb_watchdog_takedown(struct drm_psb_private *dev_priv);
-+extern void psb_print_pagefault(struct drm_psb_private *dev_priv);
-+
-+/*
-+ * psb_regman.c
-+ */
-+
-+extern void psb_takedown_use_base(struct drm_psb_private *dev_priv);
-+extern int psb_grab_use_base(struct drm_psb_private *dev_priv,
-+ unsigned long dev_virtual,
-+ unsigned long size,
-+ unsigned int data_master,
-+ uint32_t fence_class,
-+ uint32_t fence_type,
-+ int no_wait,
-+ int ignore_signals,
-+ int *r_reg, uint32_t * r_offset);
-+extern int psb_init_use_base(struct drm_psb_private *dev_priv,
-+ unsigned int reg_start, unsigned int reg_num);
-+
-+/*
-+ * psb_xhw.c
-+ */
-+
-+extern int psb_xhw_ioctl(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv);
-+extern int psb_xhw_init_ioctl(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv);
-+extern int psb_xhw_init(struct drm_device *dev);
-+extern void psb_xhw_takedown(struct drm_psb_private *dev_priv);
-+extern void psb_xhw_init_takedown(struct drm_psb_private *dev_priv,
-+ struct drm_file *file_priv, int closing);
-+extern int psb_xhw_scene_bind_fire(struct drm_psb_private *dev_priv,
-+ struct psb_xhw_buf *buf,
-+ uint32_t fire_flags,
-+ uint32_t hw_context,
-+ uint32_t * cookie,
-+ uint32_t * oom_cmds,
-+ uint32_t num_oom_cmds,
-+ uint32_t offset,
-+ uint32_t engine, uint32_t flags);
-+extern int psb_xhw_fire_raster(struct drm_psb_private *dev_priv,
-+ struct psb_xhw_buf *buf, uint32_t fire_flags);
-+extern int psb_xhw_scene_info(struct drm_psb_private *dev_priv,
-+ struct psb_xhw_buf *buf,
-+ uint32_t w,
-+ uint32_t h,
-+ uint32_t * hw_cookie,
-+ uint32_t * bo_size,
-+ uint32_t * clear_p_start,
-+ uint32_t * clear_num_pages);
-+
-+extern int psb_xhw_reset_dpm(struct drm_psb_private *dev_priv,
-+ struct psb_xhw_buf *buf);
-+extern int psb_xhw_check_lockup(struct drm_psb_private *dev_priv,
-+ struct psb_xhw_buf *buf, uint32_t * value);
-+extern int psb_xhw_ta_mem_info(struct drm_psb_private *dev_priv,
-+ struct psb_xhw_buf *buf,
-+ uint32_t pages,
-+ uint32_t * hw_cookie, uint32_t * size);
-+extern int psb_xhw_ta_oom(struct drm_psb_private *dev_priv,
-+ struct psb_xhw_buf *buf, uint32_t * cookie);
-+extern void psb_xhw_ta_oom_reply(struct drm_psb_private *dev_priv,
-+ struct psb_xhw_buf *buf,
-+ uint32_t * cookie,
-+ uint32_t * bca,
-+ uint32_t * rca, uint32_t * flags);
-+extern int psb_xhw_vistest(struct drm_psb_private *dev_priv,
-+ struct psb_xhw_buf *buf);
-+extern int psb_xhw_handler(struct drm_psb_private *dev_priv);
-+extern int psb_xhw_resume(struct drm_psb_private *dev_priv,
-+ struct psb_xhw_buf *buf);
-+extern void psb_xhw_fire_reply(struct drm_psb_private *dev_priv,
-+ struct psb_xhw_buf *buf, uint32_t * cookie);
-+extern int psb_xhw_ta_mem_load(struct drm_psb_private *dev_priv,
-+ struct psb_xhw_buf *buf,
-+ uint32_t flags,
-+ uint32_t param_offset,
-+ uint32_t pt_offset, uint32_t * hw_cookie);
-+extern void psb_xhw_clean_buf(struct drm_psb_private *dev_priv,
-+ struct psb_xhw_buf *buf);
-+
-+/*
-+ * psb_schedule.c: HW bug fixing.
-+ */
-+
-+#ifdef FIX_TG_16
-+
-+extern void psb_2d_unlock(struct drm_psb_private *dev_priv);
-+extern void psb_2d_lock(struct drm_psb_private *dev_priv);
-+extern void psb_resume_ta_2d_idle(struct drm_psb_private *dev_priv);
-+
-+#else
-+
-+#define psb_2d_lock(_dev_priv) mutex_lock(&(_dev_priv)->mutex_2d)
-+#define psb_2d_unlock(_dev_priv) mutex_unlock(&(_dev_priv)->mutex_2d)
-+
-+#endif
-+
-+/*
-+ * Utilities
-+ */
-+
-+#define PSB_ALIGN_TO(_val, _align) \
-+ (((_val) + ((_align) - 1)) & ~((_align) - 1))
-+#define PSB_WVDC32(_val, _offs) \
-+ iowrite32(_val, dev_priv->vdc_reg + (_offs))
-+#define PSB_RVDC32(_offs) \
-+ ioread32(dev_priv->vdc_reg + (_offs))
-+#define PSB_WSGX32(_val, _offs) \
-+ iowrite32(_val, dev_priv->sgx_reg + (_offs))
-+#define PSB_RSGX32(_offs) \
-+ ioread32(dev_priv->sgx_reg + (_offs))
-+#define PSB_WMSVDX32(_val, _offs) \
-+ iowrite32(_val, dev_priv->msvdx_reg + (_offs))
-+#define PSB_RMSVDX32(_offs) \
-+ ioread32(dev_priv->msvdx_reg + (_offs))
-+
-+#define PSB_ALPL(_val, _base) \
-+ (((_val) >> (_base ## _ALIGNSHIFT)) << (_base ## _SHIFT))
-+#define PSB_ALPLM(_val, _base) \
-+ ((((_val) >> (_base ## _ALIGNSHIFT)) << (_base ## _SHIFT)) & (_base ## _MASK))
-+
-+#define PSB_D_RENDER (1 << 16)
-+
-+#define PSB_D_GENERAL (1 << 0)
-+#define PSB_D_INIT (1 << 1)
-+#define PSB_D_IRQ (1 << 2)
-+#define PSB_D_FW (1 << 3)
-+#define PSB_D_PERF (1 << 4)
-+#define PSB_D_TMP (1 << 5)
-+#define PSB_D_RELOC (1 << 6)
-+
-+extern int drm_psb_debug;
-+extern int drm_psb_no_fb;
-+extern int drm_psb_disable_vsync;
-+
-+#define PSB_DEBUG_FW(_fmt, _arg...) \
-+ PSB_DEBUG(PSB_D_FW, _fmt, ##_arg)
-+#define PSB_DEBUG_GENERAL(_fmt, _arg...) \
-+ PSB_DEBUG(PSB_D_GENERAL, _fmt, ##_arg)
-+#define PSB_DEBUG_INIT(_fmt, _arg...) \
-+ PSB_DEBUG(PSB_D_INIT, _fmt, ##_arg)
-+#define PSB_DEBUG_IRQ(_fmt, _arg...) \
-+ PSB_DEBUG(PSB_D_IRQ, _fmt, ##_arg)
-+#define PSB_DEBUG_RENDER(_fmt, _arg...) \
-+ PSB_DEBUG(PSB_D_RENDER, _fmt, ##_arg)
-+#define PSB_DEBUG_PERF(_fmt, _arg...) \
-+ PSB_DEBUG(PSB_D_PERF, _fmt, ##_arg)
-+#define PSB_DEBUG_TMP(_fmt, _arg...) \
-+ PSB_DEBUG(PSB_D_TMP, _fmt, ##_arg)
-+#define PSB_DEBUG_RELOC(_fmt, _arg...) \
-+ PSB_DEBUG(PSB_D_RELOC, _fmt, ##_arg)
-+
-+#if DRM_DEBUG_CODE
-+#define PSB_DEBUG(_flag, _fmt, _arg...) \
-+ do { \
-+ if (unlikely((_flag) & drm_psb_debug)) \
-+ printk(KERN_DEBUG \
-+ "[psb:0x%02x:%s] " _fmt , _flag, \
-+ __FUNCTION__ , ##_arg); \
-+ } while (0)
-+#else
-+#define PSB_DEBUG(_fmt, _arg...) do { } while (0)
-+#endif
-+
-+#endif
-Index: linux-2.6.27/drivers/gpu/drm/psb/psb_fb.c
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.27/drivers/gpu/drm/psb/psb_fb.c 2009-02-05 13:29:33.000000000 +0000
-@@ -0,0 +1,1330 @@
-+/**************************************************************************
-+ * Copyright (c) 2007, Intel Corporation.
-+ * All Rights Reserved.
-+ *
-+ * This program is free software; you can redistribute it and/or modify it
-+ * under the terms and conditions of the GNU General Public License,
-+ * version 2, as published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope it will be useful, but WITHOUT
-+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-+ * more details.
-+ *
-+ * You should have received a copy of the GNU General Public License along with
-+ * this program; if not, write to the Free Software Foundation, Inc.,
-+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-+ *
-+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
-+ * develop this driver.
-+ *
-+ **************************************************************************/
-+
-+#include <linux/module.h>
-+#include <linux/kernel.h>
-+#include <linux/errno.h>
-+#include <linux/string.h>
-+#include <linux/mm.h>
-+#include <linux/tty.h>
-+#include <linux/slab.h>
-+#include <linux/delay.h>
-+#include <linux/fb.h>
-+#include <linux/init.h>
-+#include <linux/console.h>
-+
-+#include "drmP.h"
-+#include "drm.h"
-+#include "drm_crtc.h"
-+#include "psb_drv.h"
-+
-+#define SII_1392_WA
-+#ifdef SII_1392_WA
-+extern int SII_1392;
-+#endif
-+
-+struct psbfb_vm_info {
-+ struct drm_buffer_object *bo;
-+ struct address_space *f_mapping;
-+ struct mutex vm_mutex;
-+ atomic_t refcount;
-+};
-+
-+struct psbfb_par {
-+ struct drm_device *dev;
-+ struct drm_crtc *crtc;
-+ struct drm_output *output;
-+ struct psbfb_vm_info *vi;
-+ int dpms_state;
-+};
-+
-+static void psbfb_vm_info_deref(struct psbfb_vm_info **vi)
-+{
-+ struct psbfb_vm_info *tmp = *vi;
-+ *vi = NULL;
-+ if (atomic_dec_and_test(&tmp->refcount)) {
-+ drm_bo_usage_deref_unlocked(&tmp->bo);
-+ drm_free(tmp, sizeof(*tmp), DRM_MEM_MAPS);
-+ }
-+}
-+
-+static struct psbfb_vm_info *psbfb_vm_info_ref(struct psbfb_vm_info *vi)
-+{
-+ atomic_inc(&vi->refcount);
-+ return vi;
-+}
-+
-+static struct psbfb_vm_info *psbfb_vm_info_create(void)
-+{
-+ struct psbfb_vm_info *vi;
-+
-+ vi = drm_calloc(1, sizeof(*vi), DRM_MEM_MAPS);
-+ if (!vi)
-+ return NULL;
-+
-+ mutex_init(&vi->vm_mutex);
-+ atomic_set(&vi->refcount, 1);
-+ return vi;
-+}
-+
-+#define CMAP_TOHW(_val, _width) ((((_val) << (_width)) + 0x7FFF - (_val)) >> 16)
-+
-+static int psbfb_setcolreg(unsigned regno, unsigned red, unsigned green,
-+ unsigned blue, unsigned transp, struct fb_info *info)
-+{
-+ struct psbfb_par *par = info->par;
-+ struct drm_crtc *crtc = par->crtc;
-+ uint32_t v;
-+
-+ if (!crtc->fb)
-+ return -ENOMEM;
-+
-+ if (regno > 255)
-+ return 1;
-+
-+ if (crtc->funcs->gamma_set)
-+ crtc->funcs->gamma_set(crtc, red, green, blue, regno);
-+
-+ red = CMAP_TOHW(red, info->var.red.length);
-+ blue = CMAP_TOHW(blue, info->var.blue.length);
-+ green = CMAP_TOHW(green, info->var.green.length);
-+ transp = CMAP_TOHW(transp, info->var.transp.length);
-+
-+ v = (red << info->var.red.offset) |
-+ (green << info->var.green.offset) |
-+ (blue << info->var.blue.offset) |
-+ (transp << info->var.transp.offset);
-+
-+ switch (crtc->fb->bits_per_pixel) {
-+ case 16:
-+ ((uint32_t *) info->pseudo_palette)[regno] = v;
-+ break;
-+ case 24:
-+ case 32:
-+ ((uint32_t *) info->pseudo_palette)[regno] = v;
-+ break;
-+ }
-+
-+ return 0;
-+}
-+
-+static int psbfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
-+{
-+ struct psbfb_par *par = info->par;
-+ struct drm_device *dev = par->dev;
-+ struct drm_framebuffer *fb = par->crtc->fb;
-+ struct drm_display_mode *drm_mode;
-+ struct drm_output *output;
-+ int depth;
-+ int pitch;
-+ int bpp = var->bits_per_pixel;
-+
-+ if (!fb)
-+ return -ENOMEM;
-+
-+ if (!var->pixclock)
-+ return -EINVAL;
-+
-+ /* don't support virtuals for now */
-+ if (var->xres_virtual > var->xres)
-+ return -EINVAL;
-+
-+ if (var->yres_virtual > var->yres)
-+ return -EINVAL;
-+
-+ switch (bpp) {
-+ case 8:
-+ depth = 8;
-+ break;
-+ case 16:
-+ depth = (var->green.length == 6) ? 16 : 15;
-+ break;
-+ case 24: /* assume this is 32bpp / depth 24 */
-+ bpp = 32;
-+ /* fallthrough */
-+ case 32:
-+ depth = (var->transp.length > 0) ? 32 : 24;
-+ break;
-+ default:
-+ return -EINVAL;
-+ }
-+
-+ pitch = ((var->xres * ((bpp + 1) / 8)) + 0x3f) & ~0x3f;
-+
-+ /* Check that we can resize */
-+ if ((pitch * var->yres) > (fb->bo->num_pages << PAGE_SHIFT)) {
-+#if 1
-+ /* Need to resize the fb object.
-+ * But the generic fbdev code doesn't really understand
-+ * that we can do this. So disable for now.
-+ */
-+ DRM_INFO("Can't support requested size, too big!\n");
-+ return -EINVAL;
-+#else
-+ int ret;
-+ struct drm_buffer_object *fbo = NULL;
-+ struct drm_bo_kmap_obj tmp_kmap;
-+
-+ /* a temporary BO to check if we could resize in setpar.
-+ * Therefore no need to set NO_EVICT.
-+ */
-+ ret = drm_buffer_object_create(dev,
-+ pitch * var->yres,
-+ drm_bo_type_kernel,
-+ DRM_BO_FLAG_READ |
-+ DRM_BO_FLAG_WRITE |
-+ DRM_BO_FLAG_MEM_TT |
-+ DRM_BO_FLAG_MEM_VRAM,
-+ DRM_BO_HINT_DONT_FENCE,
-+ 0, 0, &fbo);
-+ if (ret || !fbo)
-+ return -ENOMEM;
-+
-+ ret = drm_bo_kmap(fbo, 0, fbo->num_pages, &tmp_kmap);
-+ if (ret) {
-+ drm_bo_usage_deref_unlocked(&fbo);
-+ return -EINVAL;
-+ }
-+
-+ drm_bo_kunmap(&tmp_kmap);
-+ /* destroy our current fbo! */
-+ drm_bo_usage_deref_unlocked(&fbo);
-+#endif
-+ }
-+
-+ switch (depth) {
-+ case 8:
-+ var->red.offset = 0;
-+ var->green.offset = 0;
-+ var->blue.offset = 0;
-+ var->red.length = 8;
-+ var->green.length = 8;
-+ var->blue.length = 8;
-+ var->transp.length = 0;
-+ var->transp.offset = 0;
-+ break;
-+ case 15:
-+ var->red.offset = 10;
-+ var->green.offset = 5;
-+ var->blue.offset = 0;
-+ var->red.length = 5;
-+ var->green.length = 5;
-+ var->blue.length = 5;
-+ var->transp.length = 1;
-+ var->transp.offset = 15;
-+ break;
-+ case 16:
-+ var->red.offset = 11;
-+ var->green.offset = 5;
-+ var->blue.offset = 0;
-+ var->red.length = 5;
-+ var->green.length = 6;
-+ var->blue.length = 5;
-+ var->transp.length = 0;
-+ var->transp.offset = 0;
-+ break;
-+ case 24:
-+ var->red.offset = 16;
-+ var->green.offset = 8;
-+ var->blue.offset = 0;
-+ var->red.length = 8;
-+ var->green.length = 8;
-+ var->blue.length = 8;
-+ var->transp.length = 0;
-+ var->transp.offset = 0;
-+ break;
-+ case 32:
-+ var->red.offset = 16;
-+ var->green.offset = 8;
-+ var->blue.offset = 0;
-+ var->red.length = 8;
-+ var->green.length = 8;
-+ var->blue.length = 8;
-+ var->transp.length = 8;
-+ var->transp.offset = 24;
-+ break;
-+ default:
-+ return -EINVAL;
-+ }
-+
-+#if 0
-+ /* Here we walk the output mode list and look for modes. If we haven't
-+ * got it, then bail. Not very nice, so this is disabled.
-+ * In the set_par code, we create our mode based on the incoming
-+ * parameters. Nicer, but may not be desired by some.
-+ */
-+ list_for_each_entry(output, &dev->mode_config.output_list, head) {
-+ if (output->crtc == par->crtc)
-+ break;
-+ }
-+
-+ list_for_each_entry(drm_mode, &output->modes, head) {
-+ if (drm_mode->hdisplay == var->xres &&
-+ drm_mode->vdisplay == var->yres && drm_mode->clock != 0)
-+ break;
-+ }
-+
-+ if (!drm_mode)
-+ return -EINVAL;
-+#else
-+ (void)dev; /* silence warnings */
-+ (void)output;
-+ (void)drm_mode;
-+#endif
-+
-+ return 0;
-+}
-+
-+static int psbfb_move_fb_bo(struct fb_info *info, struct drm_buffer_object *bo,
-+ uint64_t mem_type_flags)
-+{
-+ struct psbfb_par *par;
-+ loff_t holelen;
-+ int ret;
-+
-+ /*
-+ * Kill all user-space mappings of this device. They will be
-+ * faulted back using nopfn when accessed.
-+ */
-+
-+ par = info->par;
-+ holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
-+ mutex_lock(&par->vi->vm_mutex);
-+ if (par->vi->f_mapping) {
-+ unmap_mapping_range(par->vi->f_mapping, 0, holelen, 1);
-+ }
-+
-+ ret = drm_bo_do_validate(bo,
-+ mem_type_flags,
-+ DRM_BO_MASK_MEM |
-+ DRM_BO_FLAG_NO_EVICT,
-+ DRM_BO_HINT_DONT_FENCE, 0, 1, NULL);
-+
-+ mutex_unlock(&par->vi->vm_mutex);
-+ return ret;
-+}
-+
-+/* this will let fbcon do the mode init */
-+static int psbfb_set_par(struct fb_info *info)
-+{
-+ struct psbfb_par *par = info->par;
-+ struct drm_framebuffer *fb = par->crtc->fb;
-+ struct drm_device *dev = par->dev;
-+ struct drm_display_mode *drm_mode;
-+ struct fb_var_screeninfo *var = &info->var;
-+ struct drm_psb_private *dev_priv = dev->dev_private;
-+ struct drm_output *output;
-+ int pitch;
-+ int depth;
-+ int bpp = var->bits_per_pixel;
-+
-+ if (!fb)
-+ return -ENOMEM;
-+
-+ switch (bpp) {
-+ case 8:
-+ depth = 8;
-+ break;
-+ case 16:
-+ depth = (var->green.length == 6) ? 16 : 15;
-+ break;
-+ case 24: /* assume this is 32bpp / depth 24 */
-+ bpp = 32;
-+ /* fallthrough */
-+ case 32:
-+ depth = (var->transp.length > 0) ? 32 : 24;
-+ break;
-+ default:
-+ return -EINVAL;
-+ }
-+
-+ pitch = ((var->xres * ((bpp + 1) / 8)) + 0x3f) & ~0x3f;
-+
-+ if ((pitch * var->yres) > (fb->bo->num_pages << PAGE_SHIFT)) {
-+#if 1
-+ /* Need to resize the fb object.
-+ * But the generic fbdev code doesn't really understand
-+ * that we can do this. So disable for now.
-+ */
-+ DRM_INFO("Can't support requested size, too big!\n");
-+ return -EINVAL;
-+#else
-+ int ret;
-+ struct drm_buffer_object *fbo = NULL, *tfbo;
-+ struct drm_bo_kmap_obj tmp_kmap, tkmap;
-+
-+ ret = drm_buffer_object_create(dev,
-+ pitch * var->yres,
-+ drm_bo_type_kernel,
-+ DRM_BO_FLAG_READ |
-+ DRM_BO_FLAG_WRITE |
-+ DRM_BO_FLAG_MEM_TT |
-+ DRM_BO_FLAG_MEM_VRAM |
-+ DRM_BO_FLAG_NO_EVICT,
-+ DRM_BO_HINT_DONT_FENCE,
-+ 0, 0, &fbo);
-+ if (ret || !fbo) {
-+ DRM_ERROR
-+ ("failed to allocate new resized framebuffer\n");
-+ return -ENOMEM;
-+ }
-+
-+ ret = drm_bo_kmap(fbo, 0, fbo->num_pages, &tmp_kmap);
-+ if (ret) {
-+ DRM_ERROR("failed to kmap framebuffer.\n");
-+ drm_bo_usage_deref_unlocked(&fbo);
-+ return -EINVAL;
-+ }
-+
-+ DRM_DEBUG("allocated %dx%d fb: 0x%08lx, bo %p\n", fb->width,
-+ fb->height, fb->offset, fbo);
-+
-+ /* set new screen base */
-+ info->screen_base = tmp_kmap.virtual;
-+
-+ tkmap = fb->kmap;
-+ fb->kmap = tmp_kmap;
-+ drm_bo_kunmap(&tkmap);
-+
-+ tfbo = fb->bo;
-+ fb->bo = fbo;
-+ drm_bo_usage_deref_unlocked(&tfbo);
-+#endif
-+ }
-+
-+ fb->offset = fb->bo->offset - dev_priv->pg->gatt_start;
-+ fb->width = var->xres;
-+ fb->height = var->yres;
-+ fb->bits_per_pixel = bpp;
-+ fb->pitch = pitch;
-+ fb->depth = depth;
-+
-+ info->fix.line_length = fb->pitch;
-+ info->fix.visual =
-+ (fb->depth == 8) ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_DIRECTCOLOR;
-+
-+ /* some fbdev's apps don't want these to change */
-+ info->fix.smem_start = dev->mode_config.fb_base + fb->offset;
-+
-+ /* we have to align the output base address because the fb->bo
-+ may be moved in the previous drm_bo_do_validate().
-+ Otherwise the output screens may go black when exit the X
-+ window and re-enter the console */
-+ info->screen_base = fb->kmap.virtual;
-+
-+#if 0
-+ /* relates to resize - disable */
-+ info->fix.smem_len = info->fix.line_length * var->yres;
-+ info->screen_size = info->fix.smem_len; /* ??? */
-+#endif
-+
-+ /* Should we walk the output's modelist or just create our own ???
-+ * For now, we create and destroy a mode based on the incoming
-+ * parameters. But there's commented out code below which scans
-+ * the output list too.
-+ */
-+#if 0
-+ list_for_each_entry(output, &dev->mode_config.output_list, head) {
-+ if (output->crtc == par->crtc)
-+ break;
-+ }
-+
-+ list_for_each_entry(drm_mode, &output->modes, head) {
-+ if (drm_mode->hdisplay == var->xres &&
-+ drm_mode->vdisplay == var->yres && drm_mode->clock != 0)
-+ break;
-+ }
-+#else
-+ (void)output; /* silence warning */
-+
-+ drm_mode = drm_mode_create(dev);
-+ drm_mode->hdisplay = var->xres;
-+ drm_mode->hsync_start = drm_mode->hdisplay + var->right_margin;
-+ drm_mode->hsync_end = drm_mode->hsync_start + var->hsync_len;
-+ drm_mode->htotal = drm_mode->hsync_end + var->left_margin;
-+ drm_mode->vdisplay = var->yres;
-+ drm_mode->vsync_start = drm_mode->vdisplay + var->lower_margin;
-+ drm_mode->vsync_end = drm_mode->vsync_start + var->vsync_len;
-+ drm_mode->vtotal = drm_mode->vsync_end + var->upper_margin;
-+ drm_mode->clock = PICOS2KHZ(var->pixclock);
-+ drm_mode->vrefresh = drm_mode_vrefresh(drm_mode);
-+ drm_mode_set_name(drm_mode);
-+ drm_mode_set_crtcinfo(drm_mode, CRTC_INTERLACE_HALVE_V);
-+#endif
-+
-+ if (!drm_crtc_set_mode(par->crtc, drm_mode, 0, 0))
-+ return -EINVAL;
-+
-+ /* Have to destroy our created mode if we're not searching the mode
-+ * list for it.
-+ */
-+#if 1
-+ drm_mode_destroy(dev, drm_mode);
-+#endif
-+
-+ return 0;
-+}
-+
-+extern int psb_2d_submit(struct drm_psb_private *, uint32_t *, uint32_t);;
-+
-+static int psb_accel_2d_fillrect(struct drm_psb_private *dev_priv,
-+ uint32_t dst_offset, uint32_t dst_stride,
-+ uint32_t dst_format, uint16_t dst_x,
-+ uint16_t dst_y, uint16_t size_x,
-+ uint16_t size_y, uint32_t fill)
-+{
-+ uint32_t buffer[10];
-+ uint32_t *buf;
-+ int ret;
-+
-+ buf = buffer;
-+
-+ *buf++ = PSB_2D_FENCE_BH;
-+
-+ *buf++ =
-+ PSB_2D_DST_SURF_BH | dst_format | (dst_stride <<
-+ PSB_2D_DST_STRIDE_SHIFT);
-+ *buf++ = dst_offset;
-+
-+ *buf++ =
-+ PSB_2D_BLIT_BH |
-+ PSB_2D_ROT_NONE |
-+ PSB_2D_COPYORDER_TL2BR |
-+ PSB_2D_DSTCK_DISABLE |
-+ PSB_2D_SRCCK_DISABLE | PSB_2D_USE_FILL | PSB_2D_ROP3_PATCOPY;
-+
-+ *buf++ = fill << PSB_2D_FILLCOLOUR_SHIFT;
-+ *buf++ =
-+ (dst_x << PSB_2D_DST_XSTART_SHIFT) | (dst_y <<
-+ PSB_2D_DST_YSTART_SHIFT);
-+ *buf++ =
-+ (size_x << PSB_2D_DST_XSIZE_SHIFT) | (size_y <<
-+ PSB_2D_DST_YSIZE_SHIFT);
-+ *buf++ = PSB_2D_FLUSH_BH;
-+
-+ psb_2d_lock(dev_priv);
-+ ret = psb_2d_submit(dev_priv, buffer, buf - buffer);
-+ psb_2d_unlock(dev_priv);
-+
-+ return ret;
-+}
-+
-+static void psbfb_fillrect_accel(struct fb_info *info,
-+ const struct fb_fillrect *r)
-+{
-+ struct psbfb_par *par = info->par;
-+ struct drm_framebuffer *fb = par->crtc->fb;
-+ struct drm_psb_private *dev_priv = par->dev->dev_private;
-+ uint32_t offset;
-+ uint32_t stride;
-+ uint32_t format;
-+
-+ if (!fb)
-+ return;
-+
-+ offset = fb->offset;
-+ stride = fb->pitch;
-+
-+ switch (fb->depth) {
-+ case 8:
-+ format = PSB_2D_DST_332RGB;
-+ break;
-+ case 15:
-+ format = PSB_2D_DST_555RGB;
-+ break;
-+ case 16:
-+ format = PSB_2D_DST_565RGB;
-+ break;
-+ case 24:
-+ case 32:
-+ /* this is wrong but since we don't do blending its okay */
-+ format = PSB_2D_DST_8888ARGB;
-+ break;
-+ default:
-+ /* software fallback */
-+ cfb_fillrect(info, r);
-+ return;
-+ }
-+
-+ psb_accel_2d_fillrect(dev_priv,
-+ offset, stride, format,
-+ r->dx, r->dy, r->width, r->height, r->color);
-+}
-+
-+static void psbfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
-+{
-+ if (info->state != FBINFO_STATE_RUNNING)
-+ return;
-+ if (info->flags & FBINFO_HWACCEL_DISABLED) {
-+ cfb_fillrect(info, rect);
-+ return;
-+ }
-+ if (in_interrupt() || in_atomic()) {
-+ /*
-+ * Catch case when we're shutting down.
-+ */
-+ cfb_fillrect(info, rect);
-+ return;
-+ }
-+ psbfb_fillrect_accel(info, rect);
-+}
-+
-+uint32_t psb_accel_2d_copy_direction(int xdir, int ydir)
-+{
-+ if (xdir < 0)
-+ return ((ydir <
-+ 0) ? PSB_2D_COPYORDER_BR2TL : PSB_2D_COPYORDER_TR2BL);
-+ else
-+ return ((ydir <
-+ 0) ? PSB_2D_COPYORDER_BL2TR : PSB_2D_COPYORDER_TL2BR);
-+}
-+
-+/*
-+ * @srcOffset in bytes
-+ * @srcStride in bytes
-+ * @srcFormat psb 2D format defines
-+ * @dstOffset in bytes
-+ * @dstStride in bytes
-+ * @dstFormat psb 2D format defines
-+ * @srcX offset in pixels
-+ * @srcY offset in pixels
-+ * @dstX offset in pixels
-+ * @dstY offset in pixels
-+ * @sizeX of the copied area
-+ * @sizeY of the copied area
-+ */
-+static int psb_accel_2d_copy(struct drm_psb_private *dev_priv,
-+ uint32_t src_offset, uint32_t src_stride,
-+ uint32_t src_format, uint32_t dst_offset,
-+ uint32_t dst_stride, uint32_t dst_format,
-+ uint16_t src_x, uint16_t src_y, uint16_t dst_x,
-+ uint16_t dst_y, uint16_t size_x, uint16_t size_y)
-+{
-+ uint32_t blit_cmd;
-+ uint32_t buffer[10];
-+ uint32_t *buf;
-+ uint32_t direction;
-+ int ret;
-+
-+ buf = buffer;
-+
-+ direction = psb_accel_2d_copy_direction(src_x - dst_x, src_y - dst_y);
-+
-+ if (direction == PSB_2D_COPYORDER_BR2TL ||
-+ direction == PSB_2D_COPYORDER_TR2BL) {
-+ src_x += size_x - 1;
-+ dst_x += size_x - 1;
-+ }
-+ if (direction == PSB_2D_COPYORDER_BR2TL ||
-+ direction == PSB_2D_COPYORDER_BL2TR) {
-+ src_y += size_y - 1;
-+ dst_y += size_y - 1;
-+ }
-+
-+ blit_cmd =
-+ PSB_2D_BLIT_BH |
-+ PSB_2D_ROT_NONE |
-+ PSB_2D_DSTCK_DISABLE |
-+ PSB_2D_SRCCK_DISABLE |
-+ PSB_2D_USE_PAT | PSB_2D_ROP3_SRCCOPY | direction;
-+
-+ *buf++ = PSB_2D_FENCE_BH;
-+ *buf++ =
-+ PSB_2D_DST_SURF_BH | dst_format | (dst_stride <<
-+ PSB_2D_DST_STRIDE_SHIFT);
-+ *buf++ = dst_offset;
-+ *buf++ =
-+ PSB_2D_SRC_SURF_BH | src_format | (src_stride <<
-+ PSB_2D_SRC_STRIDE_SHIFT);
-+ *buf++ = src_offset;
-+ *buf++ =
-+ PSB_2D_SRC_OFF_BH | (src_x << PSB_2D_SRCOFF_XSTART_SHIFT) | (src_y
-+ <<
-+ PSB_2D_SRCOFF_YSTART_SHIFT);
-+ *buf++ = blit_cmd;
-+ *buf++ =
-+ (dst_x << PSB_2D_DST_XSTART_SHIFT) | (dst_y <<
-+ PSB_2D_DST_YSTART_SHIFT);
-+ *buf++ =
-+ (size_x << PSB_2D_DST_XSIZE_SHIFT) | (size_y <<
-+ PSB_2D_DST_YSIZE_SHIFT);
-+ *buf++ = PSB_2D_FLUSH_BH;
-+
-+ psb_2d_lock(dev_priv);
-+ ret = psb_2d_submit(dev_priv, buffer, buf - buffer);
-+ psb_2d_unlock(dev_priv);
-+ return ret;
-+}
-+
-+static void psbfb_copyarea_accel(struct fb_info *info,
-+ const struct fb_copyarea *a)
-+{
-+ struct psbfb_par *par = info->par;
-+ struct drm_framebuffer *fb = par->crtc->fb;
-+ struct drm_psb_private *dev_priv = par->dev->dev_private;
-+ uint32_t offset;
-+ uint32_t stride;
-+ uint32_t src_format;
-+ uint32_t dst_format;
-+
-+ if (!fb)
-+ return;
-+
-+ offset = fb->offset;
-+ stride = fb->pitch;
-+
-+ if (a->width == 8 || a->height == 8) {
-+ psb_2d_lock(dev_priv);
-+ psb_idle_2d(par->dev);
-+ psb_2d_unlock(dev_priv);
-+ cfb_copyarea(info, a);
-+ return;
-+ }
-+
-+ switch (fb->depth) {
-+ case 8:
-+ src_format = PSB_2D_SRC_332RGB;
-+ dst_format = PSB_2D_DST_332RGB;
-+ break;
-+ case 15:
-+ src_format = PSB_2D_SRC_555RGB;
-+ dst_format = PSB_2D_DST_555RGB;
-+ break;
-+ case 16:
-+ src_format = PSB_2D_SRC_565RGB;
-+ dst_format = PSB_2D_DST_565RGB;
-+ break;
-+ case 24:
-+ case 32:
-+ /* this is wrong but since we don't do blending its okay */
-+ src_format = PSB_2D_SRC_8888ARGB;
-+ dst_format = PSB_2D_DST_8888ARGB;
-+ break;
-+ default:
-+ /* software fallback */
-+ cfb_copyarea(info, a);
-+ return;
-+ }
-+
-+ psb_accel_2d_copy(dev_priv,
-+ offset, stride, src_format,
-+ offset, stride, dst_format,
-+ a->sx, a->sy, a->dx, a->dy, a->width, a->height);
-+}
-+
-+static void psbfb_copyarea(struct fb_info *info,
-+ const struct fb_copyarea *region)
-+{
-+ if (info->state != FBINFO_STATE_RUNNING)
-+ return;
-+ if (info->flags & FBINFO_HWACCEL_DISABLED) {
-+ cfb_copyarea(info, region);
-+ return;
-+ }
-+ if (in_interrupt() || in_atomic()) {
-+ /*
-+ * Catch case when we're shutting down.
-+ */
-+ cfb_copyarea(info, region);
-+ return;
-+ }
-+
-+ psbfb_copyarea_accel(info, region);
-+}
-+
-+void psbfb_imageblit(struct fb_info *info, const struct fb_image *image)
-+{
-+ if (info->state != FBINFO_STATE_RUNNING)
-+ return;
-+ if (info->flags & FBINFO_HWACCEL_DISABLED) {
-+ cfb_imageblit(info, image);
-+ return;
-+ }
-+ if (in_interrupt() || in_atomic()) {
-+ cfb_imageblit(info, image);
-+ return;
-+ }
-+
-+ cfb_imageblit(info, image);
-+}
-+
-+static int psbfb_blank(int blank_mode, struct fb_info *info)
-+{
-+ int dpms_mode;
-+ struct psbfb_par *par = info->par;
-+ struct drm_output *output;
-+
-+ par->dpms_state = blank_mode;
-+
-+ switch(blank_mode) {
-+ case FB_BLANK_UNBLANK:
-+ dpms_mode = DPMSModeOn;
-+ break;
-+ case FB_BLANK_NORMAL:
-+ if (!par->crtc)
-+ return 0;
-+ (*par->crtc->funcs->dpms)(par->crtc, DPMSModeStandby);
-+ return 0;
-+ case FB_BLANK_HSYNC_SUSPEND:
-+ default:
-+ dpms_mode = DPMSModeStandby;
-+ break;
-+ case FB_BLANK_VSYNC_SUSPEND:
-+ dpms_mode = DPMSModeSuspend;
-+ break;
-+ case FB_BLANK_POWERDOWN:
-+ dpms_mode = DPMSModeOff;
-+ break;
-+ }
-+
-+ if (!par->crtc)
-+ return 0;
-+
-+ list_for_each_entry(output, &par->dev->mode_config.output_list, head) {
-+ if (output->crtc == par->crtc)
-+ (*output->funcs->dpms)(output, dpms_mode);
-+ }
-+
-+ (*par->crtc->funcs->dpms)(par->crtc, dpms_mode);
-+ return 0;
-+}
-+
-+
-+static int psbfb_kms_off(struct drm_device *dev, int suspend)
-+{
-+ struct drm_framebuffer *fb = 0;
-+ struct drm_buffer_object *bo = 0;
-+ struct drm_psb_private *dev_priv = dev->dev_private;
-+ int ret = 0;
-+
-+ DRM_DEBUG("psbfb_kms_off_ioctl\n");
-+
-+ mutex_lock(&dev->mode_config.mutex);
-+ list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
-+ struct fb_info *info = fb->fbdev;
-+ struct psbfb_par *par = info->par;
-+ int save_dpms_state;
-+
-+ if (suspend)
-+ fb_set_suspend(info, 1);
-+ else
-+ info->state &= ~FBINFO_STATE_RUNNING;
-+
-+ info->screen_base = NULL;
-+
-+ bo = fb->bo;
-+
-+ if (!bo)
-+ continue;
-+
-+ drm_bo_kunmap(&fb->kmap);
-+
-+ /*
-+ * We don't take the 2D lock here as we assume that the
-+ * 2D engine will eventually idle anyway.
-+ */
-+
-+ if (!suspend) {
-+ uint32_t dummy2 = 0;
-+ (void) psb_fence_emit_sequence(dev, PSB_ENGINE_2D, 0,
-+ &dummy2, &dummy2);
-+ psb_2d_lock(dev_priv);
-+ (void)psb_idle_2d(dev);
-+ psb_2d_unlock(dev_priv);
-+ } else
-+ psb_idle_2d(dev);
-+
-+ save_dpms_state = par->dpms_state;
-+ psbfb_blank(FB_BLANK_NORMAL, info);
-+ par->dpms_state = save_dpms_state;
-+
-+ ret = psbfb_move_fb_bo(info, bo, DRM_BO_FLAG_MEM_LOCAL);
-+
-+ if (ret)
-+ goto out_err;
-+ }
-+ out_err:
-+ mutex_unlock(&dev->mode_config.mutex);
-+
-+ return ret;
-+}
-+
-+int psbfb_kms_off_ioctl(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv)
-+{
-+ int ret;
-+
-+ acquire_console_sem();
-+ ret = psbfb_kms_off(dev, 0);
-+ release_console_sem();
-+
-+ return ret;
-+}
-+
-+static int psbfb_kms_on(struct drm_device *dev, int resume)
-+{
-+ struct drm_framebuffer *fb = 0;
-+ struct drm_buffer_object *bo = 0;
-+ struct drm_psb_private *dev_priv = dev->dev_private;
-+ int ret = 0;
-+ int dummy;
-+
-+ DRM_DEBUG("psbfb_kms_on_ioctl\n");
-+
-+ if (!resume) {
-+ uint32_t dummy2 = 0;
-+ (void) psb_fence_emit_sequence(dev, PSB_ENGINE_2D, 0,
-+ &dummy2, &dummy2);
-+ psb_2d_lock(dev_priv);
-+ (void)psb_idle_2d(dev);
-+ psb_2d_unlock(dev_priv);
-+ } else
-+ psb_idle_2d(dev);
-+
-+ mutex_lock(&dev->mode_config.mutex);
-+ list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
-+ struct fb_info *info = fb->fbdev;
-+ struct psbfb_par *par = info->par;
-+
-+ bo = fb->bo;
-+ if (!bo)
-+ continue;
-+
-+ ret = psbfb_move_fb_bo(info, bo,
-+ DRM_BO_FLAG_MEM_TT |
-+ DRM_BO_FLAG_MEM_VRAM |
-+ DRM_BO_FLAG_NO_EVICT);
-+ if (ret)
-+ goto out_err;
-+
-+ ret = drm_bo_kmap(bo, 0, bo->num_pages, &fb->kmap);
-+ if (ret)
-+ goto out_err;
-+
-+ info->screen_base = drm_bmo_virtual(&fb->kmap, &dummy);
-+ fb->offset = bo->offset - dev_priv->pg->gatt_start;
-+
-+ if (ret)
-+ goto out_err;
-+
-+ if (resume)
-+ fb_set_suspend(info, 0);
-+ else
-+ info->state |= FBINFO_STATE_RUNNING;
-+
-+ /*
-+ * Re-run modesetting here, since the VDS scanout offset may
-+ * have changed.
-+ */
-+
-+ if (par->crtc->enabled) {
-+ psbfb_set_par(info);
-+ psbfb_blank(par->dpms_state, info);
-+ }
-+ }
-+ out_err:
-+ mutex_unlock(&dev->mode_config.mutex);
-+
-+ return ret;
-+}
-+
-+int psbfb_kms_on_ioctl(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv)
-+{
-+ int ret;
-+
-+ acquire_console_sem();
-+ ret = psbfb_kms_on(dev, 0);
-+ release_console_sem();
-+#ifdef SII_1392_WA
-+ if((SII_1392 != 1) || (drm_psb_no_fb==0))
-+ drm_disable_unused_functions(dev);
-+#else
-+ drm_disable_unused_functions(dev);
-+#endif
-+ return ret;
-+}
-+
-+void psbfb_suspend(struct drm_device *dev)
-+{
-+ acquire_console_sem();
-+ psbfb_kms_off(dev, 1);
-+ release_console_sem();
-+}
-+
-+void psbfb_resume(struct drm_device *dev)
-+{
-+ acquire_console_sem();
-+ psbfb_kms_on(dev, 1);
-+ release_console_sem();
-+#ifdef SII_1392_WA
-+ if((SII_1392 != 1) || (drm_psb_no_fb==0))
-+ drm_disable_unused_functions(dev);
-+#else
-+ drm_disable_unused_functions(dev);
-+#endif
-+}
-+
-+/*
-+ * FIXME: Before kernel inclusion, migrate nopfn to fault.
-+ * Also, these should be the default vm ops for buffer object type fbs.
-+ */
-+
-+extern unsigned long drm_bo_vm_fault(struct vm_area_struct *vma,
-+ struct vm_fault *vmf);
-+
-+/*
-+ * This wrapper is a bit ugly and is here because we need access to a mutex
-+ * that we can lock both around nopfn and around unmap_mapping_range + move.
-+ * Normally, this would've been done using the bo mutex, but unfortunately
-+ * we cannot lock it around drm_bo_do_validate(), since that would imply
-+ * recursive locking.
-+ */
-+
-+static int psbfb_fault(struct vm_area_struct *vma,
-+ struct vm_fault *vmf)
-+{
-+ struct psbfb_vm_info *vi = (struct psbfb_vm_info *)vma->vm_private_data;
-+ struct vm_area_struct tmp_vma;
-+ int ret;
-+
-+ mutex_lock(&vi->vm_mutex);
-+ tmp_vma = *vma;
-+ tmp_vma.vm_private_data = vi->bo;
-+ ret = drm_bo_vm_fault(&tmp_vma, vmf);
-+ mutex_unlock(&vi->vm_mutex);
-+ return ret;
-+}
-+
-+static void psbfb_vm_open(struct vm_area_struct *vma)
-+{
-+ struct psbfb_vm_info *vi = (struct psbfb_vm_info *)vma->vm_private_data;
-+
-+ atomic_inc(&vi->refcount);
-+}
-+
-+static void psbfb_vm_close(struct vm_area_struct *vma)
-+{
-+ psbfb_vm_info_deref((struct psbfb_vm_info **)&vma->vm_private_data);
-+}
-+
-+static struct vm_operations_struct psbfb_vm_ops = {
-+ .fault = psbfb_fault,
-+ .open = psbfb_vm_open,
-+ .close = psbfb_vm_close,
-+};
-+
-+static int psbfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
-+{
-+ struct psbfb_par *par = info->par;
-+ struct drm_framebuffer *fb = par->crtc->fb;
-+ struct drm_buffer_object *bo = fb->bo;
-+ unsigned long size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
-+ unsigned long offset = vma->vm_pgoff;
-+
-+ if (vma->vm_pgoff != 0)
-+ return -EINVAL;
-+ if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
-+ return -EINVAL;
-+ if (offset + size > bo->num_pages)
-+ return -EINVAL;
-+
-+ mutex_lock(&par->vi->vm_mutex);
-+ if (!par->vi->f_mapping)
-+ par->vi->f_mapping = vma->vm_file->f_mapping;
-+ mutex_unlock(&par->vi->vm_mutex);
-+
-+ vma->vm_private_data = psbfb_vm_info_ref(par->vi);
-+
-+ vma->vm_ops = &psbfb_vm_ops;
-+ vma->vm_flags |= VM_PFNMAP;
-+
-+ return 0;
-+}
-+
-+int psbfb_sync(struct fb_info *info)
-+{
-+ struct psbfb_par *par = info->par;
-+ struct drm_psb_private *dev_priv = par->dev->dev_private;
-+
-+ psb_2d_lock(dev_priv);
-+ psb_idle_2d(par->dev);
-+ psb_2d_unlock(dev_priv);
-+
-+ return 0;
-+}
-+
-+static struct fb_ops psbfb_ops = {
-+ .owner = THIS_MODULE,
-+ .fb_check_var = psbfb_check_var,
-+ .fb_set_par = psbfb_set_par,
-+ .fb_setcolreg = psbfb_setcolreg,
-+ .fb_fillrect = psbfb_fillrect,
-+ .fb_copyarea = psbfb_copyarea,
-+ .fb_imageblit = psbfb_imageblit,
-+ .fb_mmap = psbfb_mmap,
-+ .fb_sync = psbfb_sync,
-+ .fb_blank = psbfb_blank,
-+};
-+
-+int psbfb_probe(struct drm_device *dev, struct drm_crtc *crtc)
-+{
-+ struct fb_info *info;
-+ struct psbfb_par *par;
-+ struct device *device = &dev->pdev->dev;
-+ struct drm_framebuffer *fb;
-+ struct drm_display_mode *mode = crtc->desired_mode;
-+ struct drm_psb_private *dev_priv =
-+ (struct drm_psb_private *)dev->dev_private;
-+ struct drm_buffer_object *fbo = NULL;
-+ int ret;
-+ int is_iomem;
-+
-+ if (drm_psb_no_fb) {
-+ /* need to do this as the DRM will disable the output */
-+ crtc->enabled = 1;
-+ return 0;
-+ }
-+
-+ info = framebuffer_alloc(sizeof(struct psbfb_par), device);
-+ if (!info) {
-+ return -ENOMEM;
-+ }
-+
-+ fb = drm_framebuffer_create(dev);
-+ if (!fb) {
-+ framebuffer_release(info);
-+ DRM_ERROR("failed to allocate fb.\n");
-+ return -ENOMEM;
-+ }
-+ crtc->fb = fb;
-+
-+ fb->width = mode->hdisplay;
-+ fb->height = mode->vdisplay;
-+
-+ fb->bits_per_pixel = 32;
-+ fb->depth = 24;
-+ fb->pitch =
-+ ((fb->width * ((fb->bits_per_pixel + 1) / 8)) + 0x3f) & ~0x3f;
-+
-+ ret = drm_buffer_object_create(dev,
-+ fb->pitch * fb->height,
-+ drm_bo_type_kernel,
-+ DRM_BO_FLAG_READ |
-+ DRM_BO_FLAG_WRITE |
-+ DRM_BO_FLAG_MEM_TT |
-+ DRM_BO_FLAG_MEM_VRAM |
-+ DRM_BO_FLAG_NO_EVICT,
-+ DRM_BO_HINT_DONT_FENCE, 0, 0, &fbo);
-+ if (ret || !fbo) {
-+ DRM_ERROR("failed to allocate framebuffer\n");
-+ goto out_err0;
-+ }
-+
-+ fb->offset = fbo->offset - dev_priv->pg->gatt_start;
-+ fb->bo = fbo;
-+ DRM_DEBUG("allocated %dx%d fb: 0x%08lx, bo %p\n", fb->width,
-+ fb->height, fb->offset, fbo);
-+
-+ fb->fbdev = info;
-+
-+ par = info->par;
-+
-+ par->dev = dev;
-+ par->crtc = crtc;
-+ par->vi = psbfb_vm_info_create();
-+ if (!par->vi)
-+ goto out_err1;
-+
-+ mutex_lock(&dev->struct_mutex);
-+ par->vi->bo = fbo;
-+ atomic_inc(&fbo->usage);
-+ mutex_unlock(&dev->struct_mutex);
-+
-+ par->vi->f_mapping = NULL;
-+ info->fbops = &psbfb_ops;
-+
-+ strcpy(info->fix.id, "psbfb");
-+ info->fix.type = FB_TYPE_PACKED_PIXELS;
-+ info->fix.visual = FB_VISUAL_DIRECTCOLOR;
-+ info->fix.type_aux = 0;
-+ info->fix.xpanstep = 1;
-+ info->fix.ypanstep = 1;
-+ info->fix.ywrapstep = 0;
-+ info->fix.accel = FB_ACCEL_NONE; /* ??? */
-+ info->fix.type_aux = 0;
-+ info->fix.mmio_start = 0;
-+ info->fix.mmio_len = 0;
-+ info->fix.line_length = fb->pitch;
-+ info->fix.smem_start = dev->mode_config.fb_base + fb->offset;
-+ info->fix.smem_len = info->fix.line_length * fb->height;
-+
-+ info->flags = FBINFO_DEFAULT |
-+ FBINFO_PARTIAL_PAN_OK /*| FBINFO_MISC_ALWAYS_SETPAR */ ;
-+
-+ ret = drm_bo_kmap(fb->bo, 0, fb->bo->num_pages, &fb->kmap);
-+ if (ret) {
-+ DRM_ERROR("error mapping fb: %d\n", ret);
-+ goto out_err2;
-+ }
-+
-+ info->screen_base = drm_bmo_virtual(&fb->kmap, &is_iomem);
-+ memset(info->screen_base, 0x00, fb->pitch*fb->height);
-+ info->screen_size = info->fix.smem_len; /* FIXME */
-+ info->pseudo_palette = fb->pseudo_palette;
-+ info->var.xres_virtual = fb->width;
-+ info->var.yres_virtual = fb->height;
-+ info->var.bits_per_pixel = fb->bits_per_pixel;
-+ info->var.xoffset = 0;
-+ info->var.yoffset = 0;
-+ info->var.activate = FB_ACTIVATE_NOW;
-+ info->var.height = -1;
-+ info->var.width = -1;
-+ info->var.vmode = FB_VMODE_NONINTERLACED;
-+
-+ info->var.xres = mode->hdisplay;
-+ info->var.right_margin = mode->hsync_start - mode->hdisplay;
-+ info->var.hsync_len = mode->hsync_end - mode->hsync_start;
-+ info->var.left_margin = mode->htotal - mode->hsync_end;
-+ info->var.yres = mode->vdisplay;
-+ info->var.lower_margin = mode->vsync_start - mode->vdisplay;
-+ info->var.vsync_len = mode->vsync_end - mode->vsync_start;
-+ info->var.upper_margin = mode->vtotal - mode->vsync_end;
-+ info->var.pixclock = 10000000 / mode->htotal * 1000 /
-+ mode->vtotal * 100;
-+ /* avoid overflow */
-+ info->var.pixclock = info->var.pixclock * 1000 / mode->vrefresh;
-+
-+ info->pixmap.size = 64 * 1024;
-+ info->pixmap.buf_align = 8;
-+ info->pixmap.access_align = 32;
-+ info->pixmap.flags = FB_PIXMAP_SYSTEM;
-+ info->pixmap.scan_align = 1;
-+
-+ DRM_DEBUG("fb depth is %d\n", fb->depth);
-+ DRM_DEBUG(" pitch is %d\n", fb->pitch);
-+ switch (fb->depth) {
-+ case 8:
-+ info->var.red.offset = 0;
-+ info->var.green.offset = 0;
-+ info->var.blue.offset = 0;
-+ info->var.red.length = 8; /* 8bit DAC */
-+ info->var.green.length = 8;
-+ info->var.blue.length = 8;
-+ info->var.transp.offset = 0;
-+ info->var.transp.length = 0;
-+ break;
-+ case 15:
-+ info->var.red.offset = 10;
-+ info->var.green.offset = 5;
-+ info->var.blue.offset = 0;
-+ info->var.red.length = info->var.green.length =
-+ info->var.blue.length = 5;
-+ info->var.transp.offset = 15;
-+ info->var.transp.length = 1;
-+ break;
-+ case 16:
-+ info->var.red.offset = 11;
-+ info->var.green.offset = 5;
-+ info->var.blue.offset = 0;
-+ info->var.red.length = 5;
-+ info->var.green.length = 6;
-+ info->var.blue.length = 5;
-+ info->var.transp.offset = 0;
-+ break;
-+ case 24:
-+ info->var.red.offset = 16;
-+ info->var.green.offset = 8;
-+ info->var.blue.offset = 0;
-+ info->var.red.length = info->var.green.length =
-+ info->var.blue.length = 8;
-+ info->var.transp.offset = 0;
-+ info->var.transp.length = 0;
-+ break;
-+ case 32:
-+ info->var.red.offset = 16;
-+ info->var.green.offset = 8;
-+ info->var.blue.offset = 0;
-+ info->var.red.length = info->var.green.length =
-+ info->var.blue.length = 8;
-+ info->var.transp.offset = 24;
-+ info->var.transp.length = 8;
-+ break;
-+ default:
-+ break;
-+ }
-+
-+ if (register_framebuffer(info) < 0)
-+ goto out_err3;
-+
-+ if (psbfb_check_var(&info->var, info) < 0)
-+ goto out_err4;
-+
-+ psbfb_set_par(info);
-+
-+ DRM_INFO("fb%d: %s frame buffer device\n", info->node, info->fix.id);
-+
-+ return 0;
-+ out_err4:
-+ unregister_framebuffer(info);
-+ out_err3:
-+ drm_bo_kunmap(&fb->kmap);
-+ out_err2:
-+ psbfb_vm_info_deref(&par->vi);
-+ out_err1:
-+ drm_bo_usage_deref_unlocked(&fb->bo);
-+ out_err0:
-+ drm_framebuffer_destroy(fb);
-+ framebuffer_release(info);
-+ crtc->fb = NULL;
-+ return -EINVAL;
-+}
-+
-+EXPORT_SYMBOL(psbfb_probe);
-+
-+int psbfb_remove(struct drm_device *dev, struct drm_crtc *crtc)
-+{
-+ struct drm_framebuffer *fb;
-+ struct fb_info *info;
-+ struct psbfb_par *par;
-+
-+ if (drm_psb_no_fb)
-+ return 0;
-+
-+ fb = crtc->fb;
-+ info = fb->fbdev;
-+
-+ if (info) {
-+ unregister_framebuffer(info);
-+ drm_bo_kunmap(&fb->kmap);
-+ par = info->par;
-+ if (par)
-+ psbfb_vm_info_deref(&par->vi);
-+ drm_bo_usage_deref_unlocked(&fb->bo);
-+ drm_framebuffer_destroy(fb);
-+ framebuffer_release(info);
-+ }
-+ return 0;
-+}
-+
-+EXPORT_SYMBOL(psbfb_remove);
-+
-Index: linux-2.6.27/drivers/gpu/drm/psb/psb_fence.c
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.27/drivers/gpu/drm/psb/psb_fence.c 2009-02-05 13:29:33.000000000 +0000
-@@ -0,0 +1,285 @@
-+/**************************************************************************
-+ * Copyright (c) 2007, Intel Corporation.
-+ * All Rights Reserved.
-+ *
-+ * This program is free software; you can redistribute it and/or modify it
-+ * under the terms and conditions of the GNU General Public License,
-+ * version 2, as published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope it will be useful, but WITHOUT
-+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-+ * more details.
-+ *
-+ * You should have received a copy of the GNU General Public License along with
-+ * this program; if not, write to the Free Software Foundation, Inc.,
-+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-+ *
-+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
-+ * develop this driver.
-+ *
-+ **************************************************************************/
-+/*
-+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
-+ */
-+
-+#include "drmP.h"
-+#include "psb_drv.h"
-+
-+static void psb_poll_ta(struct drm_device *dev, uint32_t waiting_types)
-+{
-+ struct drm_psb_private *dev_priv =
-+ (struct drm_psb_private *)dev->dev_private;
-+ struct drm_fence_driver *driver = dev->driver->fence_driver;
-+ uint32_t cur_flag = 1;
-+ uint32_t flags = 0;
-+ uint32_t sequence = 0;
-+ uint32_t remaining = 0xFFFFFFFF;
-+ uint32_t diff;
-+
-+ struct psb_scheduler *scheduler;
-+ struct psb_scheduler_seq *seq;
-+ struct drm_fence_class_manager *fc =
-+ &dev->fm.fence_class[PSB_ENGINE_TA];
-+
-+ if (unlikely(!dev_priv))
-+ return;
-+
-+ scheduler = &dev_priv->scheduler;
-+ seq = scheduler->seq;
-+
-+ while (likely(waiting_types & remaining)) {
-+ if (!(waiting_types & cur_flag))
-+ goto skip;
-+ if (seq->reported)
-+ goto skip;
-+ if (flags == 0)
-+ sequence = seq->sequence;
-+ else if (sequence != seq->sequence) {
-+ drm_fence_handler(dev, PSB_ENGINE_TA,
-+ sequence, flags, 0);
-+ sequence = seq->sequence;
-+ flags = 0;
-+ }
-+ flags |= cur_flag;
-+
-+ /*
-+ * Sequence may not have ended up on the ring yet.
-+ * In that case, report it but don't mark it as
-+ * reported. A subsequent poll will report it again.
-+ */
-+
-+ diff = (fc->latest_queued_sequence - sequence) &
-+ driver->sequence_mask;
-+ if (diff < driver->wrap_diff)
-+ seq->reported = 1;
-+
-+ skip:
-+ cur_flag <<= 1;
-+ remaining <<= 1;
-+ seq++;
-+ }
-+
-+ if (flags) {
-+ drm_fence_handler(dev, PSB_ENGINE_TA, sequence, flags, 0);
-+ }
-+}
-+
-+static void psb_poll_other(struct drm_device *dev, uint32_t fence_class,
-+ uint32_t waiting_types)
-+{
-+ struct drm_psb_private *dev_priv =
-+ (struct drm_psb_private *)dev->dev_private;
-+ struct drm_fence_manager *fm = &dev->fm;
-+ struct drm_fence_class_manager *fc = &fm->fence_class[fence_class];
-+ uint32_t sequence;
-+
-+ if (unlikely(!dev_priv))
-+ return;
-+
-+ if (waiting_types) {
-+ if (fence_class == PSB_ENGINE_VIDEO)
-+ sequence = dev_priv->msvdx_current_sequence;
-+ else
-+ sequence = dev_priv->comm[fence_class << 4];
-+
-+ drm_fence_handler(dev, fence_class, sequence,
-+ DRM_FENCE_TYPE_EXE, 0);
-+
-+ switch (fence_class) {
-+ case PSB_ENGINE_2D:
-+ if (dev_priv->fence0_irq_on && !fc->waiting_types) {
-+ psb_2D_irq_off(dev_priv);
-+ dev_priv->fence0_irq_on = 0;
-+ } else if (!dev_priv->fence0_irq_on
-+ && fc->waiting_types) {
-+ psb_2D_irq_on(dev_priv);
-+ dev_priv->fence0_irq_on = 1;
-+ }
-+ break;
-+#if 0
-+ /*
-+ * FIXME: MSVDX irq switching
-+ */
-+
-+ case PSB_ENGINE_VIDEO:
-+ if (dev_priv->fence2_irq_on && !fc->waiting_types) {
-+ psb_msvdx_irq_off(dev_priv);
-+ dev_priv->fence2_irq_on = 0;
-+ } else if (!dev_priv->fence2_irq_on
-+ && fc->pending_exe_flush) {
-+ psb_msvdx_irq_on(dev_priv);
-+ dev_priv->fence2_irq_on = 1;
-+ }
-+ break;
-+#endif
-+ default:
-+ return;
-+ }
-+ }
-+}
-+
-+static void psb_fence_poll(struct drm_device *dev,
-+ uint32_t fence_class, uint32_t waiting_types)
-+{
-+ switch (fence_class) {
-+ case PSB_ENGINE_TA:
-+ psb_poll_ta(dev, waiting_types);
-+ break;
-+ default:
-+ psb_poll_other(dev, fence_class, waiting_types);
-+ break;
-+ }
-+}
-+
-+void psb_fence_error(struct drm_device *dev,
-+ uint32_t fence_class,
-+ uint32_t sequence, uint32_t type, int error)
-+{
-+ struct drm_fence_manager *fm = &dev->fm;
-+ unsigned long irq_flags;
-+
-+ BUG_ON(fence_class >= PSB_NUM_ENGINES);
-+ write_lock_irqsave(&fm->lock, irq_flags);
-+ drm_fence_handler(dev, fence_class, sequence, type, error);
-+ write_unlock_irqrestore(&fm->lock, irq_flags);
-+}
-+
-+int psb_fence_emit_sequence(struct drm_device *dev, uint32_t fence_class,
-+ uint32_t flags, uint32_t * sequence,
-+ uint32_t * native_type)
-+{
-+ struct drm_psb_private *dev_priv =
-+ (struct drm_psb_private *)dev->dev_private;
-+ uint32_t seq = 0;
-+ int ret;
-+
-+ if (!dev_priv)
-+ return -EINVAL;
-+
-+ if (fence_class >= PSB_NUM_ENGINES)
-+ return -EINVAL;
-+
-+ switch (fence_class) {
-+ case PSB_ENGINE_2D:
-+ spin_lock(&dev_priv->sequence_lock);
-+ seq = ++dev_priv->sequence[fence_class];
-+ spin_unlock(&dev_priv->sequence_lock);
-+ ret = psb_blit_sequence(dev_priv, seq);
-+ if (ret)
-+ return ret;
-+ break;
-+ case PSB_ENGINE_VIDEO:
-+ spin_lock(&dev_priv->sequence_lock);
-+ seq = ++dev_priv->sequence[fence_class];
-+ spin_unlock(&dev_priv->sequence_lock);
-+ break;
-+ default:
-+ spin_lock(&dev_priv->sequence_lock);
-+ seq = dev_priv->sequence[fence_class];
-+ spin_unlock(&dev_priv->sequence_lock);
-+ }
-+
-+ *sequence = seq;
-+ *native_type = DRM_FENCE_TYPE_EXE;
-+
-+ return 0;
-+}
-+
-+uint32_t psb_fence_advance_sequence(struct drm_device * dev,
-+ uint32_t fence_class)
-+{
-+ struct drm_psb_private *dev_priv =
-+ (struct drm_psb_private *)dev->dev_private;
-+ uint32_t sequence;
-+
-+ spin_lock(&dev_priv->sequence_lock);
-+ sequence = ++dev_priv->sequence[fence_class];
-+ spin_unlock(&dev_priv->sequence_lock);
-+
-+ return sequence;
-+}
-+
-+void psb_fence_handler(struct drm_device *dev, uint32_t fence_class)
-+{
-+ struct drm_fence_manager *fm = &dev->fm;
-+ struct drm_fence_class_manager *fc = &fm->fence_class[fence_class];
-+
-+#ifdef FIX_TG_16
-+ if (fence_class == 0) {
-+ struct drm_psb_private *dev_priv =
-+ (struct drm_psb_private *)dev->dev_private;
-+
-+ if ((atomic_read(&dev_priv->ta_wait_2d_irq) == 1) &&
-+ (PSB_RSGX32(PSB_CR_2D_SOCIF) == _PSB_C2_SOCIF_EMPTY) &&
-+ ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) &
-+ _PSB_C2B_STATUS_BUSY) == 0))
-+ psb_resume_ta_2d_idle(dev_priv);
-+ }
-+#endif
-+ write_lock(&fm->lock);
-+ psb_fence_poll(dev, fence_class, fc->waiting_types);
-+ write_unlock(&fm->lock);
-+}
-+
-+static int psb_fence_wait(struct drm_fence_object *fence,
-+ int lazy, int interruptible, uint32_t mask)
-+{
-+ struct drm_device *dev = fence->dev;
-+ struct drm_fence_class_manager *fc =
-+ &dev->fm.fence_class[fence->fence_class];
-+ int ret = 0;
-+ unsigned long timeout = DRM_HZ *
-+ ((fence->fence_class == PSB_ENGINE_TA) ? 30 : 3);
-+
-+ drm_fence_object_flush(fence, mask);
-+ if (interruptible)
-+ ret = wait_event_interruptible_timeout
-+ (fc->fence_queue, drm_fence_object_signaled(fence, mask),
-+ timeout);
-+ else
-+ ret = wait_event_timeout
-+ (fc->fence_queue, drm_fence_object_signaled(fence, mask),
-+ timeout);
-+
-+ if (unlikely(ret == -ERESTARTSYS))
-+ return -EAGAIN;
-+
-+ if (unlikely(ret == 0))
-+ return -EBUSY;
-+
-+ return 0;
-+}
-+
-+struct drm_fence_driver psb_fence_driver = {
-+ .num_classes = PSB_NUM_ENGINES,
-+ .wrap_diff = (1 << 30),
-+ .flush_diff = (1 << 29),
-+ .sequence_mask = 0xFFFFFFFFU,
-+ .has_irq = NULL,
-+ .emit = psb_fence_emit_sequence,
-+ .flush = NULL,
-+ .poll = psb_fence_poll,
-+ .needed_flush = NULL,
-+ .wait = psb_fence_wait
-+};
-Index: linux-2.6.27/drivers/gpu/drm/psb/psb_gtt.c
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.27/drivers/gpu/drm/psb/psb_gtt.c 2009-02-05 13:29:33.000000000 +0000
-@@ -0,0 +1,233 @@
-+/**************************************************************************
-+ * Copyright (c) 2007, Intel Corporation.
-+ * All Rights Reserved.
-+ *
-+ * This program is free software; you can redistribute it and/or modify it
-+ * under the terms and conditions of the GNU General Public License,
-+ * version 2, as published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope it will be useful, but WITHOUT
-+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-+ * more details.
-+ *
-+ * You should have received a copy of the GNU General Public License along with
-+ * this program; if not, write to the Free Software Foundation, Inc.,
-+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-+ *
-+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
-+ * develop this driver.
-+ *
-+ **************************************************************************/
-+/*
-+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
-+ */
-+#include "drmP.h"
-+#include "psb_drv.h"
-+
-+static inline uint32_t psb_gtt_mask_pte(uint32_t pfn, int type)
-+{
-+ uint32_t mask = PSB_PTE_VALID;
-+
-+ if (type & PSB_MMU_CACHED_MEMORY)
-+ mask |= PSB_PTE_CACHED;
-+ if (type & PSB_MMU_RO_MEMORY)
-+ mask |= PSB_PTE_RO;
-+ if (type & PSB_MMU_WO_MEMORY)
-+ mask |= PSB_PTE_WO;
-+
-+ return (pfn << PAGE_SHIFT) | mask;
-+}
-+
-+struct psb_gtt *psb_gtt_alloc(struct drm_device *dev)
-+{
-+ struct psb_gtt *tmp = drm_calloc(1, sizeof(*tmp), DRM_MEM_DRIVER);
-+
-+ if (!tmp)
-+ return NULL;
-+
-+ init_rwsem(&tmp->sem);
-+ tmp->dev = dev;
-+
-+ return tmp;
-+}
-+
-+void psb_gtt_takedown(struct psb_gtt *pg, int free)
-+{
-+ struct drm_psb_private *dev_priv = pg->dev->dev_private;
-+
-+ if (!pg)
-+ return;
-+
-+ if (pg->gtt_map) {
-+ iounmap(pg->gtt_map);
-+ pg->gtt_map = NULL;
-+ }
-+ if (pg->initialized) {
-+ pci_write_config_word(pg->dev->pdev, PSB_GMCH_CTRL,
-+ pg->gmch_ctrl);
-+ PSB_WVDC32(pg->pge_ctl, PSB_PGETBL_CTL);
-+ (void)PSB_RVDC32(PSB_PGETBL_CTL);
-+ }
-+ if (free)
-+ drm_free(pg, sizeof(*pg), DRM_MEM_DRIVER);
-+}
-+
-+int psb_gtt_init(struct psb_gtt *pg, int resume)
-+{
-+ struct drm_device *dev = pg->dev;
-+ struct drm_psb_private *dev_priv = dev->dev_private;
-+ unsigned gtt_pages;
-+ unsigned long stolen_size;
-+ unsigned i, num_pages;
-+ unsigned pfn_base;
-+
-+ int ret = 0;
-+ uint32_t pte;
-+
-+ pci_read_config_word(dev->pdev, PSB_GMCH_CTRL, &pg->gmch_ctrl);
-+ pci_write_config_word(dev->pdev, PSB_GMCH_CTRL,
-+ pg->gmch_ctrl | _PSB_GMCH_ENABLED);
-+
-+ pg->pge_ctl = PSB_RVDC32(PSB_PGETBL_CTL);
-+ PSB_WVDC32(pg->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL);
-+ (void)PSB_RVDC32(PSB_PGETBL_CTL);
-+
-+ pg->initialized = 1;
-+
-+ pg->gtt_phys_start = pg->pge_ctl & PAGE_MASK;
-+ pg->gatt_start = pci_resource_start(dev->pdev, PSB_GATT_RESOURCE);
-+ pg->gtt_start = pci_resource_start(dev->pdev, PSB_GTT_RESOURCE);
-+ gtt_pages = pci_resource_len(dev->pdev, PSB_GTT_RESOURCE) >> PAGE_SHIFT;
-+ pg->gatt_pages = pci_resource_len(dev->pdev, PSB_GATT_RESOURCE)
-+ >> PAGE_SHIFT;
-+ pci_read_config_dword(dev->pdev, PSB_BSM, &pg->stolen_base);
-+ stolen_size = pg->gtt_phys_start - pg->stolen_base - PAGE_SIZE;
-+
-+ PSB_DEBUG_INIT("GTT phys start: 0x%08x.\n", pg->gtt_phys_start);
-+ PSB_DEBUG_INIT("GTT start: 0x%08x.\n", pg->gtt_start);
-+ PSB_DEBUG_INIT("GATT start: 0x%08x.\n", pg->gatt_start);
-+ PSB_DEBUG_INIT("GTT pages: %u\n", gtt_pages);
-+ PSB_DEBUG_INIT("Stolen size: %lu kiB\n", stolen_size / 1024);
-+
-+ if (resume && (gtt_pages != pg->gtt_pages) &&
-+ (stolen_size != pg->stolen_size)) {
-+ DRM_ERROR("GTT resume error.\n");
-+ ret = -EINVAL;
-+ goto out_err;
-+ }
-+
-+ pg->gtt_pages = gtt_pages;
-+ pg->stolen_size = stolen_size;
-+ pg->gtt_map =
-+ ioremap_nocache(pg->gtt_phys_start, gtt_pages << PAGE_SHIFT);
-+ if (!pg->gtt_map) {
-+ DRM_ERROR("Failure to map gtt.\n");
-+ ret = -ENOMEM;
-+ goto out_err;
-+ }
-+
-+ /*
-+ * insert stolen pages.
-+ */
-+
-+ pfn_base = pg->stolen_base >> PAGE_SHIFT;
-+ num_pages = stolen_size >> PAGE_SHIFT;
-+ PSB_DEBUG_INIT("Set up %d stolen pages starting at 0x%08x\n",
-+ num_pages, pfn_base);
-+ for (i = 0; i < num_pages; ++i) {
-+ pte = psb_gtt_mask_pte(pfn_base + i, 0);
-+ iowrite32(pte, pg->gtt_map + i);
-+ }
-+
-+ /*
-+ * Init rest of gtt.
-+ */
-+
-+ pfn_base = page_to_pfn(dev_priv->scratch_page);
-+ pte = psb_gtt_mask_pte(pfn_base, 0);
-+ PSB_DEBUG_INIT("Initializing the rest of a total "
-+ "of %d gtt pages.\n", pg->gatt_pages);
-+
-+ for (; i < pg->gatt_pages; ++i)
-+ iowrite32(pte, pg->gtt_map + i);
-+ (void)ioread32(pg->gtt_map + i - 1);
-+
-+ return 0;
-+
-+ out_err:
-+ psb_gtt_takedown(pg, 0);
-+ return ret;
-+}
-+
-+int psb_gtt_insert_pages(struct psb_gtt *pg, struct page **pages,
-+ unsigned offset_pages, unsigned num_pages,
-+ unsigned desired_tile_stride, unsigned hw_tile_stride,
-+ int type)
-+{
-+ unsigned rows = 1;
-+ unsigned add;
-+ unsigned row_add;
-+ unsigned i;
-+ unsigned j;
-+ uint32_t *cur_page = NULL;
-+ uint32_t pte;
-+
-+ if (hw_tile_stride)
-+ rows = num_pages / desired_tile_stride;
-+ else
-+ desired_tile_stride = num_pages;
-+
-+ add = desired_tile_stride;
-+ row_add = hw_tile_stride;
-+
-+ down_read(&pg->sem);
-+ for (i = 0; i < rows; ++i) {
-+ cur_page = pg->gtt_map + offset_pages;
-+ for (j = 0; j < desired_tile_stride; ++j) {
-+ pte = psb_gtt_mask_pte(page_to_pfn(*pages++), type);
-+ iowrite32(pte, cur_page++);
-+ }
-+ offset_pages += add;
-+ }
-+ (void)ioread32(cur_page - 1);
-+ up_read(&pg->sem);
-+
-+ return 0;
-+}
-+
-+int psb_gtt_remove_pages(struct psb_gtt *pg, unsigned offset_pages,
-+ unsigned num_pages, unsigned desired_tile_stride,
-+ unsigned hw_tile_stride)
-+{
-+ struct drm_psb_private *dev_priv = pg->dev->dev_private;
-+ unsigned rows = 1;
-+ unsigned add;
-+ unsigned row_add;
-+ unsigned i;
-+ unsigned j;
-+ uint32_t *cur_page = NULL;
-+ unsigned pfn_base = page_to_pfn(dev_priv->scratch_page);
-+ uint32_t pte = psb_gtt_mask_pte(pfn_base, 0);
-+
-+ if (hw_tile_stride)
-+ rows = num_pages / desired_tile_stride;
-+ else
-+ desired_tile_stride = num_pages;
-+
-+ add = desired_tile_stride;
-+ row_add = hw_tile_stride;
-+
-+ down_read(&pg->sem);
-+ for (i = 0; i < rows; ++i) {
-+ cur_page = pg->gtt_map + offset_pages;
-+ for (j = 0; j < desired_tile_stride; ++j) {
-+ iowrite32(pte, cur_page++);
-+ }
-+ offset_pages += add;
-+ }
-+ (void)ioread32(cur_page - 1);
-+ up_read(&pg->sem);
-+
-+ return 0;
-+}
-Index: linux-2.6.27/drivers/gpu/drm/psb/psb_i2c.c
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.27/drivers/gpu/drm/psb/psb_i2c.c 2009-02-05 13:29:33.000000000 +0000
-@@ -0,0 +1,179 @@
-+/*
-+ * Copyright © 2006-2007 Intel Corporation
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a
-+ * copy of this software and associated documentation files (the "Software"),
-+ * to deal in the Software without restriction, including without limitation
-+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
-+ * and/or sell copies of the Software, and to permit persons to whom the
-+ * Software is furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice (including the next
-+ * paragraph) shall be included in all copies or substantial portions of the
-+ * Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
-+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-+ * DEALINGS IN THE SOFTWARE.
-+ *
-+ * Authors:
-+ * Eric Anholt <eric@anholt.net>
-+ */
-+/*
-+ * Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
-+ * Jesse Barnes <jesse.barnes@intel.com>
-+ */
-+
-+#include <linux/i2c.h>
-+#include <linux/i2c-id.h>
-+#include <linux/i2c-algo-bit.h>
-+#include "drmP.h"
-+#include "drm.h"
-+#include "intel_drv.h"
-+#include "psb_drv.h"
-+
-+/*
-+ * Intel GPIO access functions
-+ */
-+
-+#define I2C_RISEFALL_TIME 20
-+
-+static int get_clock(void *data)
-+{
-+ struct intel_i2c_chan *chan = data;
-+ struct drm_psb_private *dev_priv = chan->drm_dev->dev_private;
-+ uint32_t val;
-+
-+ val = PSB_RVDC32(chan->reg);
-+ return ((val & GPIO_CLOCK_VAL_IN) != 0);
-+}
-+
-+static int get_data(void *data)
-+{
-+ struct intel_i2c_chan *chan = data;
-+ struct drm_psb_private *dev_priv = chan->drm_dev->dev_private;
-+ uint32_t val;
-+
-+ val = PSB_RVDC32(chan->reg);
-+ return ((val & GPIO_DATA_VAL_IN) != 0);
-+}
-+
-+static void set_clock(void *data, int state_high)
-+{
-+ struct intel_i2c_chan *chan = data;
-+ struct drm_psb_private *dev_priv = chan->drm_dev->dev_private;
-+ uint32_t reserved = 0, clock_bits;
-+
-+ /* On most chips, these bits must be preserved in software. */
-+ reserved = PSB_RVDC32(chan->reg) & (GPIO_DATA_PULLUP_DISABLE |
-+ GPIO_CLOCK_PULLUP_DISABLE);
-+
-+ if (state_high)
-+ clock_bits = GPIO_CLOCK_DIR_IN | GPIO_CLOCK_DIR_MASK;
-+ else
-+ clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK |
-+ GPIO_CLOCK_VAL_MASK;
-+ PSB_WVDC32(reserved | clock_bits, chan->reg);
-+ udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */
-+}
-+
-+static void set_data(void *data, int state_high)
-+{
-+ struct intel_i2c_chan *chan = data;
-+ struct drm_psb_private *dev_priv = chan->drm_dev->dev_private;
-+ uint32_t reserved = 0, data_bits;
-+
-+ /* On most chips, these bits must be preserved in software. */
-+ reserved = PSB_RVDC32(chan->reg) & (GPIO_DATA_PULLUP_DISABLE |
-+ GPIO_CLOCK_PULLUP_DISABLE);
-+
-+ if (state_high)
-+ data_bits = GPIO_DATA_DIR_IN | GPIO_DATA_DIR_MASK;
-+ else
-+ data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK |
-+ GPIO_DATA_VAL_MASK;
-+
-+ PSB_WVDC32(data_bits, chan->reg);
-+ udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */
-+}
-+
-+/**
-+ * intel_i2c_create - instantiate an Intel i2c bus using the specified GPIO reg
-+ * @dev: DRM device
-+ * @output: driver specific output device
-+ * @reg: GPIO reg to use
-+ * @name: name for this bus
-+ *
-+ * Creates and registers a new i2c bus with the Linux i2c layer, for use
-+ * in output probing and control (e.g. DDC or SDVO control functions).
-+ *
-+ * Possible values for @reg include:
-+ * %GPIOA
-+ * %GPIOB
-+ * %GPIOC
-+ * %GPIOD
-+ * %GPIOE
-+ * %GPIOF
-+ * %GPIOG
-+ * %GPIOH
-+ * see PRM for details on how these different busses are used.
-+ */
-+struct intel_i2c_chan *intel_i2c_create(struct drm_device *dev,
-+ const uint32_t reg, const char *name)
-+{
-+ struct intel_i2c_chan *chan;
-+
-+ chan = kzalloc(sizeof(struct intel_i2c_chan), GFP_KERNEL);
-+ if (!chan)
-+ goto out_free;
-+
-+ chan->drm_dev = dev;
-+ chan->reg = reg;
-+ snprintf(chan->adapter.name, I2C_NAME_SIZE, "intel drm %s", name);
-+ chan->adapter.owner = THIS_MODULE;
-+ chan->adapter.id = I2C_HW_B_INTELFB;
-+ chan->adapter.algo_data = &chan->algo;
-+ chan->adapter.dev.parent = &dev->pdev->dev;
-+ chan->algo.setsda = set_data;
-+ chan->algo.setscl = set_clock;
-+ chan->algo.getsda = get_data;
-+ chan->algo.getscl = get_clock;
-+ chan->algo.udelay = 20;
-+ chan->algo.timeout = usecs_to_jiffies(2200);
-+ chan->algo.data = chan;
-+
-+ i2c_set_adapdata(&chan->adapter, chan);
-+
-+ if (i2c_bit_add_bus(&chan->adapter))
-+ goto out_free;
-+
-+ /* JJJ: raise SCL and SDA? */
-+ set_data(chan, 1);
-+ set_clock(chan, 1);
-+ udelay(20);
-+
-+ return chan;
-+
-+ out_free:
-+ kfree(chan);
-+ return NULL;
-+}
-+
-+/**
-+ * intel_i2c_destroy - unregister and free i2c bus resources
-+ * @output: channel to free
-+ *
-+ * Unregister the adapter from the i2c layer, then free the structure.
-+ */
-+void intel_i2c_destroy(struct intel_i2c_chan *chan)
-+{
-+ if (!chan)
-+ return;
-+
-+ i2c_del_adapter(&chan->adapter);
-+ kfree(chan);
-+}
-Index: linux-2.6.27/drivers/gpu/drm/psb/psb_irq.c
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.27/drivers/gpu/drm/psb/psb_irq.c 2009-02-05 13:29:33.000000000 +0000
-@@ -0,0 +1,382 @@
-+/**************************************************************************
-+ * Copyright (c) 2007, Intel Corporation.
-+ * All Rights Reserved.
-+ *
-+ * This program is free software; you can redistribute it and/or modify it
-+ * under the terms and conditions of the GNU General Public License,
-+ * version 2, as published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope it will be useful, but WITHOUT
-+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-+ * more details.
-+ *
-+ * You should have received a copy of the GNU General Public License along with
-+ * this program; if not, write to the Free Software Foundation, Inc.,
-+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-+ *
-+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
-+ * develop this driver.
-+ *
-+ **************************************************************************/
-+/*
-+ */
-+
-+#include "drmP.h"
-+#include "psb_drv.h"
-+#include "psb_reg.h"
-+#include "psb_msvdx.h"
-+
-+/*
-+ * Video display controller interrupt.
-+ */
-+
-+static void psb_vdc_interrupt(struct drm_device *dev, uint32_t vdc_stat)
-+{
-+ struct drm_psb_private *dev_priv =
-+ (struct drm_psb_private *)dev->dev_private;
-+ uint32_t pipe_stats;
-+ int wake = 0;
-+
-+ if (!drm_psb_disable_vsync && (vdc_stat & _PSB_VSYNC_PIPEA_FLAG)) {
-+ pipe_stats = PSB_RVDC32(PSB_PIPEASTAT);
-+ atomic_inc(&dev->vbl_received);
-+ wake = 1;
-+ PSB_WVDC32(pipe_stats | _PSB_VBLANK_INTERRUPT_ENABLE |
-+ _PSB_VBLANK_CLEAR, PSB_PIPEASTAT);
-+ }
-+
-+ if (!drm_psb_disable_vsync && (vdc_stat & _PSB_VSYNC_PIPEB_FLAG)) {
-+ pipe_stats = PSB_RVDC32(PSB_PIPEBSTAT);
-+ atomic_inc(&dev->vbl_received2);
-+ wake = 1;
-+ PSB_WVDC32(pipe_stats | _PSB_VBLANK_INTERRUPT_ENABLE |
-+ _PSB_VBLANK_CLEAR, PSB_PIPEBSTAT);
-+ }
-+
-+ PSB_WVDC32(vdc_stat, PSB_INT_IDENTITY_R);
-+ (void)PSB_RVDC32(PSB_INT_IDENTITY_R);
-+ DRM_READMEMORYBARRIER();
-+
-+ if (wake) {
-+ DRM_WAKEUP(&dev->vbl_queue);
-+ drm_vbl_send_signals(dev);
-+ }
-+}
-+
-+/*
-+ * SGX interrupt source 1.
-+ */
-+
-+static void psb_sgx_interrupt(struct drm_device *dev, uint32_t sgx_stat,
-+ uint32_t sgx_stat2)
-+{
-+ struct drm_psb_private *dev_priv =
-+ (struct drm_psb_private *)dev->dev_private;
-+
-+ if (sgx_stat & _PSB_CE_TWOD_COMPLETE) {
-+ DRM_WAKEUP(&dev_priv->event_2d_queue);
-+ psb_fence_handler(dev, 0);
-+ }
-+
-+ if (unlikely(sgx_stat2 & _PSB_CE2_BIF_REQUESTER_FAULT))
-+ psb_print_pagefault(dev_priv);
-+
-+ psb_scheduler_handler(dev_priv, sgx_stat);
-+}
-+
-+/*
-+ * MSVDX interrupt.
-+ */
-+static void psb_msvdx_interrupt(struct drm_device *dev, uint32_t msvdx_stat)
-+{
-+ struct drm_psb_private *dev_priv =
-+ (struct drm_psb_private *)dev->dev_private;
-+
-+ if (msvdx_stat & MSVDX_INTERRUPT_STATUS_CR_MMU_FAULT_IRQ_MASK) {
-+ /*Ideally we should we should never get to this */
-+ PSB_DEBUG_GENERAL
-+ ("******MSVDX: msvdx_stat: 0x%x fence2_irq_on=%d ***** (MMU FAULT)\n",
-+ msvdx_stat, dev_priv->fence2_irq_on);
-+
-+ /* Pause MMU */
-+ PSB_WMSVDX32(MSVDX_MMU_CONTROL0_CR_MMU_PAUSE_MASK,
-+ MSVDX_MMU_CONTROL0);
-+ DRM_WRITEMEMORYBARRIER();
-+
-+ /* Clear this interupt bit only */
-+ PSB_WMSVDX32(MSVDX_INTERRUPT_STATUS_CR_MMU_FAULT_IRQ_MASK,
-+ MSVDX_INTERRUPT_CLEAR);
-+ PSB_RMSVDX32(MSVDX_INTERRUPT_CLEAR);
-+ DRM_READMEMORYBARRIER();
-+
-+ dev_priv->msvdx_needs_reset = 1;
-+ } else if (msvdx_stat & MSVDX_INTERRUPT_STATUS_CR_MTX_IRQ_MASK) {
-+ PSB_DEBUG_GENERAL
-+ ("******MSVDX: msvdx_stat: 0x%x fence2_irq_on=%d ***** (MTX)\n",
-+ msvdx_stat, dev_priv->fence2_irq_on);
-+
-+ /* Clear all interupt bits */
-+ PSB_WMSVDX32(0xffff, MSVDX_INTERRUPT_CLEAR);
-+ PSB_RMSVDX32(MSVDX_INTERRUPT_CLEAR);
-+ DRM_READMEMORYBARRIER();
-+
-+ psb_msvdx_mtx_interrupt(dev);
-+ }
-+}
-+
-+irqreturn_t psb_irq_handler(DRM_IRQ_ARGS)
-+{
-+ struct drm_device *dev = (struct drm_device *)arg;
-+ struct drm_psb_private *dev_priv =
-+ (struct drm_psb_private *)dev->dev_private;
-+
-+ uint32_t vdc_stat;
-+ uint32_t sgx_stat;
-+ uint32_t sgx_stat2;
-+ uint32_t msvdx_stat;
-+ int handled = 0;
-+
-+ spin_lock(&dev_priv->irqmask_lock);
-+
-+ vdc_stat = PSB_RVDC32(PSB_INT_IDENTITY_R);
-+ sgx_stat = PSB_RSGX32(PSB_CR_EVENT_STATUS);
-+ sgx_stat2 = PSB_RSGX32(PSB_CR_EVENT_STATUS2);
-+ msvdx_stat = PSB_RMSVDX32(MSVDX_INTERRUPT_STATUS);
-+
-+ sgx_stat2 &= dev_priv->sgx2_irq_mask;
-+ sgx_stat &= dev_priv->sgx_irq_mask;
-+ PSB_WSGX32(sgx_stat2, PSB_CR_EVENT_HOST_CLEAR2);
-+ PSB_WSGX32(sgx_stat, PSB_CR_EVENT_HOST_CLEAR);
-+ (void)PSB_RSGX32(PSB_CR_EVENT_HOST_CLEAR);
-+
-+ vdc_stat &= dev_priv->vdc_irq_mask;
-+ spin_unlock(&dev_priv->irqmask_lock);
-+
-+ if (msvdx_stat) {
-+ psb_msvdx_interrupt(dev, msvdx_stat);
-+ handled = 1;
-+ }
-+
-+ if (vdc_stat) {
-+ /* MSVDX IRQ status is part of vdc_irq_mask */
-+ psb_vdc_interrupt(dev, vdc_stat);
-+ handled = 1;
-+ }
-+
-+ if (sgx_stat || sgx_stat2) {
-+ psb_sgx_interrupt(dev, sgx_stat, sgx_stat2);
-+ handled = 1;
-+ }
-+
-+ if (!handled) {
-+ return IRQ_NONE;
-+ }
-+
-+ return IRQ_HANDLED;
-+}
-+
-+void psb_msvdx_irq_preinstall(struct drm_psb_private *dev_priv)
-+{
-+ unsigned long mtx_int = 0;
-+ dev_priv->vdc_irq_mask |= _PSB_IRQ_MSVDX_FLAG;
-+
-+ /*Clear MTX interrupt */
-+ REGIO_WRITE_FIELD_LITE(mtx_int, MSVDX_INTERRUPT_STATUS, CR_MTX_IRQ, 1);
-+ PSB_WMSVDX32(mtx_int, MSVDX_INTERRUPT_CLEAR);
-+}
-+
-+void psb_irq_preinstall(struct drm_device *dev)
-+{
-+ struct drm_psb_private *dev_priv =
-+ (struct drm_psb_private *)dev->dev_private;
-+ spin_lock(&dev_priv->irqmask_lock);
-+ PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
-+ PSB_WVDC32(0x00000000, PSB_INT_MASK_R);
-+ PSB_WVDC32(0x00000000, PSB_INT_ENABLE_R);
-+ PSB_WSGX32(0x00000000, PSB_CR_EVENT_HOST_ENABLE);
-+ (void)PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE);
-+
-+ dev_priv->sgx_irq_mask = _PSB_CE_PIXELBE_END_RENDER |
-+ _PSB_CE_DPM_3D_MEM_FREE |
-+ _PSB_CE_TA_FINISHED |
-+ _PSB_CE_DPM_REACHED_MEM_THRESH |
-+ _PSB_CE_DPM_OUT_OF_MEMORY_GBL |
-+ _PSB_CE_DPM_OUT_OF_MEMORY_MT |
-+ _PSB_CE_TA_TERMINATE | _PSB_CE_SW_EVENT;
-+
-+ dev_priv->sgx2_irq_mask = _PSB_CE2_BIF_REQUESTER_FAULT;
-+
-+ dev_priv->vdc_irq_mask = _PSB_IRQ_SGX_FLAG | _PSB_IRQ_MSVDX_FLAG;
-+
-+ if (!drm_psb_disable_vsync)
-+ dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG |
-+ _PSB_VSYNC_PIPEB_FLAG;
-+
-+ /*Clear MTX interrupt */
-+ {
-+ unsigned long mtx_int = 0;
-+ REGIO_WRITE_FIELD_LITE(mtx_int, MSVDX_INTERRUPT_STATUS,
-+ CR_MTX_IRQ, 1);
-+ PSB_WMSVDX32(mtx_int, MSVDX_INTERRUPT_CLEAR);
-+ }
-+ spin_unlock(&dev_priv->irqmask_lock);
-+}
-+
-+void psb_msvdx_irq_postinstall(struct drm_psb_private *dev_priv)
-+{
-+ /* Enable Mtx Interupt to host */
-+ unsigned long enables = 0;
-+ PSB_DEBUG_GENERAL("Setting up MSVDX IRQs.....\n");
-+ REGIO_WRITE_FIELD_LITE(enables, MSVDX_INTERRUPT_STATUS, CR_MTX_IRQ, 1);
-+ PSB_WMSVDX32(enables, MSVDX_HOST_INTERRUPT_ENABLE);
-+}
-+
-+int psb_irq_postinstall(struct drm_device *dev)
-+{
-+ struct drm_psb_private *dev_priv =
-+ (struct drm_psb_private *)dev->dev_private;
-+ unsigned long irqflags;
-+
-+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
-+ PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
-+ PSB_WSGX32(dev_priv->sgx2_irq_mask, PSB_CR_EVENT_HOST_ENABLE2);
-+ PSB_WSGX32(dev_priv->sgx_irq_mask, PSB_CR_EVENT_HOST_ENABLE);
-+ (void)PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE);
-+ /****MSVDX IRQ Setup...*****/
-+ /* Enable Mtx Interupt to host */
-+ {
-+ unsigned long enables = 0;
-+ PSB_DEBUG_GENERAL("Setting up MSVDX IRQs.....\n");
-+ REGIO_WRITE_FIELD_LITE(enables, MSVDX_INTERRUPT_STATUS,
-+ CR_MTX_IRQ, 1);
-+ PSB_WMSVDX32(enables, MSVDX_HOST_INTERRUPT_ENABLE);
-+ }
-+ dev_priv->irq_enabled = 1;
-+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
-+ return 0;
-+}
-+
-+void psb_irq_uninstall(struct drm_device *dev)
-+{
-+ struct drm_psb_private *dev_priv =
-+ (struct drm_psb_private *)dev->dev_private;
-+ unsigned long irqflags;
-+
-+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
-+
-+ dev_priv->sgx_irq_mask = 0x00000000;
-+ dev_priv->sgx2_irq_mask = 0x00000000;
-+ dev_priv->vdc_irq_mask = 0x00000000;
-+
-+ PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
-+ PSB_WVDC32(0xFFFFFFFF, PSB_INT_MASK_R);
-+ PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
-+ PSB_WSGX32(dev_priv->sgx_irq_mask, PSB_CR_EVENT_HOST_ENABLE);
-+ PSB_WSGX32(dev_priv->sgx2_irq_mask, PSB_CR_EVENT_HOST_ENABLE2);
-+ wmb();
-+ PSB_WVDC32(PSB_RVDC32(PSB_INT_IDENTITY_R), PSB_INT_IDENTITY_R);
-+ PSB_WSGX32(PSB_RSGX32(PSB_CR_EVENT_STATUS), PSB_CR_EVENT_HOST_CLEAR);
-+ PSB_WSGX32(PSB_RSGX32(PSB_CR_EVENT_STATUS2), PSB_CR_EVENT_HOST_CLEAR2);
-+
-+ /****MSVDX IRQ Setup...*****/
-+ /* Clear interrupt enabled flag */
-+ PSB_WMSVDX32(0, MSVDX_HOST_INTERRUPT_ENABLE);
-+
-+ dev_priv->irq_enabled = 0;
-+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
-+
-+}
-+
-+void psb_2D_irq_off(struct drm_psb_private *dev_priv)
-+{
-+ unsigned long irqflags;
-+ uint32_t old_mask;
-+ uint32_t cleared_mask;
-+
-+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
-+ --dev_priv->irqen_count_2d;
-+ if (dev_priv->irq_enabled && dev_priv->irqen_count_2d == 0) {
-+
-+ old_mask = dev_priv->sgx_irq_mask;
-+ dev_priv->sgx_irq_mask &= ~_PSB_CE_TWOD_COMPLETE;
-+ PSB_WSGX32(dev_priv->sgx_irq_mask, PSB_CR_EVENT_HOST_ENABLE);
-+ (void)PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE);
-+
-+ cleared_mask = (old_mask ^ dev_priv->sgx_irq_mask) & old_mask;
-+ PSB_WSGX32(cleared_mask, PSB_CR_EVENT_HOST_CLEAR);
-+ (void)PSB_RSGX32(PSB_CR_EVENT_HOST_CLEAR);
-+ }
-+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
-+}
-+
-+void psb_2D_irq_on(struct drm_psb_private *dev_priv)
-+{
-+ unsigned long irqflags;
-+
-+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
-+ if (dev_priv->irq_enabled && dev_priv->irqen_count_2d == 0) {
-+ dev_priv->sgx_irq_mask |= _PSB_CE_TWOD_COMPLETE;
-+ PSB_WSGX32(dev_priv->sgx_irq_mask, PSB_CR_EVENT_HOST_ENABLE);
-+ (void)PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE);
-+ }
-+ ++dev_priv->irqen_count_2d;
-+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
-+}
-+
-+static int psb_vblank_do_wait(struct drm_device *dev, unsigned int *sequence,
-+ atomic_t * counter)
-+{
-+ unsigned int cur_vblank;
-+ int ret = 0;
-+
-+ DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
-+ (((cur_vblank = atomic_read(counter))
-+ - *sequence) <= (1 << 23)));
-+
-+ *sequence = cur_vblank;
-+
-+ return ret;
-+}
-+
-+int psb_vblank_wait(struct drm_device *dev, unsigned int *sequence)
-+{
-+ int ret;
-+
-+ ret = psb_vblank_do_wait(dev, sequence, &dev->vbl_received);
-+ return ret;
-+}
-+
-+int psb_vblank_wait2(struct drm_device *dev, unsigned int *sequence)
-+{
-+ int ret;
-+
-+ ret = psb_vblank_do_wait(dev, sequence, &dev->vbl_received2);
-+ return ret;
-+}
-+
-+void psb_msvdx_irq_off(struct drm_psb_private *dev_priv)
-+{
-+ unsigned long irqflags;
-+
-+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
-+ if (dev_priv->irq_enabled) {
-+ dev_priv->vdc_irq_mask &= ~_PSB_IRQ_MSVDX_FLAG;
-+ PSB_WSGX32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
-+ (void)PSB_RSGX32(PSB_INT_ENABLE_R);
-+ }
-+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
-+}
-+
-+void psb_msvdx_irq_on(struct drm_psb_private *dev_priv)
-+{
-+ unsigned long irqflags;
-+
-+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
-+ if (dev_priv->irq_enabled) {
-+ dev_priv->vdc_irq_mask |= _PSB_IRQ_MSVDX_FLAG;
-+ PSB_WSGX32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
-+ (void)PSB_RSGX32(PSB_INT_ENABLE_R);
-+ }
-+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
-+}
-Index: linux-2.6.27/drivers/gpu/drm/psb/psb_mmu.c
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.27/drivers/gpu/drm/psb/psb_mmu.c 2009-02-05 13:29:33.000000000 +0000
-@@ -0,0 +1,1037 @@
-+/**************************************************************************
-+ * Copyright (c) 2007, Intel Corporation.
-+ * All Rights Reserved.
-+ *
-+ * This program is free software; you can redistribute it and/or modify it
-+ * under the terms and conditions of the GNU General Public License,
-+ * version 2, as published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope it will be useful, but WITHOUT
-+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-+ * more details.
-+ *
-+ * You should have received a copy of the GNU General Public License along with
-+ * this program; if not, write to the Free Software Foundation, Inc.,
-+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-+ *
-+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
-+ * develop this driver.
-+ *
-+ **************************************************************************/
-+#include "drmP.h"
-+#include "psb_drv.h"
-+#include "psb_reg.h"
-+
-+/*
-+ * Code for the SGX MMU:
-+ */
-+
-+/*
-+ * clflush on one processor only:
-+ * clflush should apparently flush the cache line on all processors in an
-+ * SMP system.
-+ */
-+
-+/*
-+ * kmap atomic:
-+ * The usage of the slots must be completely encapsulated within a spinlock, and
-+ * no other functions that may be using the locks for other purposed may be
-+ * called from within the locked region.
-+ * Since the slots are per processor, this will guarantee that we are the only
-+ * user.
-+ */
-+
-+/*
-+ * TODO: Inserting ptes from an interrupt handler:
-+ * This may be desirable for some SGX functionality where the GPU can fault in
-+ * needed pages. For that, we need to make an atomic insert_pages function, that
-+ * may fail.
-+ * If it fails, the caller need to insert the page using a workqueue function,
-+ * but on average it should be fast.
-+ */
-+
-+struct psb_mmu_driver {
-+ /* protects driver- and pd structures. Always take in read mode
-+ * before taking the page table spinlock.
-+ */
-+ struct rw_semaphore sem;
-+
-+ /* protects page tables, directory tables and pt tables.
-+ * and pt structures.
-+ */
-+ spinlock_t lock;
-+
-+ atomic_t needs_tlbflush;
-+ atomic_t *msvdx_mmu_invaldc;
-+ uint8_t __iomem *register_map;
-+ struct psb_mmu_pd *default_pd;
-+ uint32_t bif_ctrl;
-+ int has_clflush;
-+ int clflush_add;
-+ unsigned long clflush_mask;
-+};
-+
-+struct psb_mmu_pd;
-+
-+struct psb_mmu_pt {
-+ struct psb_mmu_pd *pd;
-+ uint32_t index;
-+ uint32_t count;
-+ struct page *p;
-+ uint32_t *v;
-+};
-+
-+struct psb_mmu_pd {
-+ struct psb_mmu_driver *driver;
-+ int hw_context;
-+ struct psb_mmu_pt **tables;
-+ struct page *p;
-+ struct page *dummy_pt;
-+ struct page *dummy_page;
-+ uint32_t pd_mask;
-+ uint32_t invalid_pde;
-+ uint32_t invalid_pte;
-+};
-+
-+static inline uint32_t psb_mmu_pt_index(uint32_t offset)
-+{
-+ return (offset >> PSB_PTE_SHIFT) & 0x3FF;
-+}
-+static inline uint32_t psb_mmu_pd_index(uint32_t offset)
-+{
-+ return (offset >> PSB_PDE_SHIFT);
-+}
-+
-+#if defined(CONFIG_X86)
-+static inline void psb_clflush(void *addr)
-+{
-+ __asm__ __volatile__("clflush (%0)\n"::"r"(addr):"memory");
-+}
-+
-+static inline void psb_mmu_clflush(struct psb_mmu_driver *driver, void *addr)
-+{
-+ if (!driver->has_clflush)
-+ return;
-+
-+ mb();
-+ psb_clflush(addr);
-+ mb();
-+}
-+#else
-+
-+static inline void psb_mmu_clflush(struct psb_mmu_driver *driver, void *addr)
-+{;
-+}
-+
-+#endif
-+
-+static inline void psb_iowrite32(const struct psb_mmu_driver *d,
-+ uint32_t val, uint32_t offset)
-+{
-+ iowrite32(val, d->register_map + offset);
-+}
-+
-+static inline uint32_t psb_ioread32(const struct psb_mmu_driver *d,
-+ uint32_t offset)
-+{
-+ return ioread32(d->register_map + offset);
-+}
-+
-+static void psb_mmu_flush_pd_locked(struct psb_mmu_driver *driver, int force)
-+{
-+ if (atomic_read(&driver->needs_tlbflush) || force) {
-+ uint32_t val = psb_ioread32(driver, PSB_CR_BIF_CTRL);
-+ psb_iowrite32(driver, val | _PSB_CB_CTRL_INVALDC,
-+ PSB_CR_BIF_CTRL);
-+ wmb();
-+ psb_iowrite32(driver, val & ~_PSB_CB_CTRL_INVALDC,
-+ PSB_CR_BIF_CTRL);
-+ (void)psb_ioread32(driver, PSB_CR_BIF_CTRL);
-+ if (driver->msvdx_mmu_invaldc)
-+ atomic_set(driver->msvdx_mmu_invaldc, 1);
-+ }
-+ atomic_set(&driver->needs_tlbflush, 0);
-+}
-+
-+static void psb_mmu_flush_pd(struct psb_mmu_driver *driver, int force)
-+{
-+ down_write(&driver->sem);
-+ psb_mmu_flush_pd_locked(driver, force);
-+ up_write(&driver->sem);
-+}
-+
-+void psb_mmu_flush(struct psb_mmu_driver *driver)
-+{
-+ uint32_t val;
-+
-+ down_write(&driver->sem);
-+ val = psb_ioread32(driver, PSB_CR_BIF_CTRL);
-+ if (atomic_read(&driver->needs_tlbflush))
-+ psb_iowrite32(driver, val | _PSB_CB_CTRL_INVALDC,
-+ PSB_CR_BIF_CTRL);
-+ else
-+ psb_iowrite32(driver, val | _PSB_CB_CTRL_FLUSH,
-+ PSB_CR_BIF_CTRL);
-+ wmb();
-+ psb_iowrite32(driver,
-+ val & ~(_PSB_CB_CTRL_FLUSH | _PSB_CB_CTRL_INVALDC),
-+ PSB_CR_BIF_CTRL);
-+ (void)psb_ioread32(driver, PSB_CR_BIF_CTRL);
-+ atomic_set(&driver->needs_tlbflush, 0);
-+ if (driver->msvdx_mmu_invaldc)
-+ atomic_set(driver->msvdx_mmu_invaldc, 1);
-+ up_write(&driver->sem);
-+}
-+
-+void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context)
-+{
-+ uint32_t offset = (hw_context == 0) ? PSB_CR_BIF_DIR_LIST_BASE0 :
-+ PSB_CR_BIF_DIR_LIST_BASE1 + hw_context * 4;
-+
-+ drm_ttm_cache_flush();
-+ down_write(&pd->driver->sem);
-+ psb_iowrite32(pd->driver, (page_to_pfn(pd->p) << PAGE_SHIFT), offset);
-+ wmb();
-+ psb_mmu_flush_pd_locked(pd->driver, 1);
-+ pd->hw_context = hw_context;
-+ up_write(&pd->driver->sem);
-+
-+}
-+
-+static inline unsigned long psb_pd_addr_end(unsigned long addr,
-+ unsigned long end)
-+{
-+
-+ addr = (addr + PSB_PDE_MASK + 1) & ~PSB_PDE_MASK;
-+ return (addr < end) ? addr : end;
-+}
-+
-+static inline uint32_t psb_mmu_mask_pte(uint32_t pfn, int type)
-+{
-+ uint32_t mask = PSB_PTE_VALID;
-+
-+ if (type & PSB_MMU_CACHED_MEMORY)
-+ mask |= PSB_PTE_CACHED;
-+ if (type & PSB_MMU_RO_MEMORY)
-+ mask |= PSB_PTE_RO;
-+ if (type & PSB_MMU_WO_MEMORY)
-+ mask |= PSB_PTE_WO;
-+
-+ return (pfn << PAGE_SHIFT) | mask;
-+}
-+
-+struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
-+ int trap_pagefaults, int invalid_type)
-+{
-+ struct psb_mmu_pd *pd = kmalloc(sizeof(*pd), GFP_KERNEL);
-+ uint32_t *v;
-+ int i;
-+
-+ if (!pd)
-+ return NULL;
-+
-+ pd->p = alloc_page(GFP_DMA32);
-+ if (!pd->p)
-+ goto out_err1;
-+ pd->dummy_pt = alloc_page(GFP_DMA32);
-+ if (!pd->dummy_pt)
-+ goto out_err2;
-+ pd->dummy_page = alloc_page(GFP_DMA32);
-+ if (!pd->dummy_page)
-+ goto out_err3;
-+
-+ if (!trap_pagefaults) {
-+ pd->invalid_pde = psb_mmu_mask_pte(page_to_pfn(pd->dummy_pt),
-+ invalid_type |
-+ PSB_MMU_CACHED_MEMORY);
-+ pd->invalid_pte = psb_mmu_mask_pte(page_to_pfn(pd->dummy_page),
-+ invalid_type |
-+ PSB_MMU_CACHED_MEMORY);
-+ } else {
-+ pd->invalid_pde = 0;
-+ pd->invalid_pte = 0;
-+ }
-+
-+ v = kmap(pd->dummy_pt);
-+ for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i) {
-+ v[i] = pd->invalid_pte;
-+ }
-+ kunmap(pd->dummy_pt);
-+
-+ v = kmap(pd->p);
-+ for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i) {
-+ v[i] = pd->invalid_pde;
-+ }
-+ kunmap(pd->p);
-+
-+ clear_page(kmap(pd->dummy_page));
-+ kunmap(pd->dummy_page);
-+
-+ pd->tables = vmalloc_user(sizeof(struct psb_mmu_pt *) * 1024);
-+ if (!pd->tables)
-+ goto out_err4;
-+
-+ pd->hw_context = -1;
-+ pd->pd_mask = PSB_PTE_VALID;
-+ pd->driver = driver;
-+
-+ return pd;
-+
-+ out_err4:
-+ __free_page(pd->dummy_page);
-+ out_err3:
-+ __free_page(pd->dummy_pt);
-+ out_err2:
-+ __free_page(pd->p);
-+ out_err1:
-+ kfree(pd);
-+ return NULL;
-+}
-+
-+void psb_mmu_free_pt(struct psb_mmu_pt *pt)
-+{
-+ __free_page(pt->p);
-+ kfree(pt);
-+}
-+
-+void psb_mmu_free_pagedir(struct psb_mmu_pd *pd)
-+{
-+ struct psb_mmu_driver *driver = pd->driver;
-+ struct psb_mmu_pt *pt;
-+ int i;
-+
-+ down_write(&driver->sem);
-+ if (pd->hw_context != -1) {
-+ psb_iowrite32(driver, 0,
-+ PSB_CR_BIF_DIR_LIST_BASE0 + pd->hw_context * 4);
-+ psb_mmu_flush_pd_locked(driver, 1);
-+ }
-+
-+ /* Should take the spinlock here, but we don't need to do that
-+ since we have the semaphore in write mode. */
-+
-+ for (i = 0; i < 1024; ++i) {
-+ pt = pd->tables[i];
-+ if (pt)
-+ psb_mmu_free_pt(pt);
-+ }
-+
-+ vfree(pd->tables);
-+ __free_page(pd->dummy_page);
-+ __free_page(pd->dummy_pt);
-+ __free_page(pd->p);
-+ kfree(pd);
-+ up_write(&driver->sem);
-+}
-+
-+static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
-+{
-+ struct psb_mmu_pt *pt = kmalloc(sizeof(*pt), GFP_KERNEL);
-+ void *v;
-+ uint32_t clflush_add = pd->driver->clflush_add >> PAGE_SHIFT;
-+ uint32_t clflush_count = PAGE_SIZE / clflush_add;
-+ spinlock_t *lock = &pd->driver->lock;
-+ uint8_t *clf;
-+ uint32_t *ptes;
-+ int i;
-+
-+ if (!pt)
-+ return NULL;
-+
-+ pt->p = alloc_page(GFP_DMA32);
-+ if (!pt->p) {
-+ kfree(pt);
-+ return NULL;
-+ }
-+
-+ spin_lock(lock);
-+
-+ v = kmap_atomic(pt->p, KM_USER0);
-+ clf = (uint8_t *) v;
-+ ptes = (uint32_t *) v;
-+ for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i) {
-+ *ptes++ = pd->invalid_pte;
-+ }
-+
-+#if defined(CONFIG_X86)
-+ if (pd->driver->has_clflush && pd->hw_context != -1) {
-+ mb();
-+ for (i = 0; i < clflush_count; ++i) {
-+ psb_clflush(clf);
-+ clf += clflush_add;
-+ }
-+ mb();
-+ }
-+#endif
-+ kunmap_atomic(v, KM_USER0);
-+ spin_unlock(lock);
-+
-+ pt->count = 0;
-+ pt->pd = pd;
-+ pt->index = 0;
-+
-+ return pt;
-+}
-+
-+struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd,
-+ unsigned long addr)
-+{
-+ uint32_t index = psb_mmu_pd_index(addr);
-+ struct psb_mmu_pt *pt;
-+ volatile uint32_t *v;
-+ spinlock_t *lock = &pd->driver->lock;
-+
-+ spin_lock(lock);
-+ pt = pd->tables[index];
-+ while (!pt) {
-+ spin_unlock(lock);
-+ pt = psb_mmu_alloc_pt(pd);
-+ if (!pt)
-+ return NULL;
-+ spin_lock(lock);
-+
-+ if (pd->tables[index]) {
-+ spin_unlock(lock);
-+ psb_mmu_free_pt(pt);
-+ spin_lock(lock);
-+ pt = pd->tables[index];
-+ continue;
-+ }
-+
-+ v = kmap_atomic(pd->p, KM_USER0);
-+ pd->tables[index] = pt;
-+ v[index] = (page_to_pfn(pt->p) << 12) | pd->pd_mask;
-+ pt->index = index;
-+ kunmap_atomic((void *)v, KM_USER0);
-+
-+ if (pd->hw_context != -1) {
-+ psb_mmu_clflush(pd->driver, (void *)&v[index]);
-+ atomic_set(&pd->driver->needs_tlbflush, 1);
-+ }
-+ }
-+ pt->v = kmap_atomic(pt->p, KM_USER0);
-+ return pt;
-+}
-+
-+static struct psb_mmu_pt *psb_mmu_pt_map_lock(struct psb_mmu_pd *pd,
-+ unsigned long addr)
-+{
-+ uint32_t index = psb_mmu_pd_index(addr);
-+ struct psb_mmu_pt *pt;
-+ spinlock_t *lock = &pd->driver->lock;
-+
-+ spin_lock(lock);
-+ pt = pd->tables[index];
-+ if (!pt) {
-+ spin_unlock(lock);
-+ return NULL;
-+ }
-+ pt->v = kmap_atomic(pt->p, KM_USER0);
-+ return pt;
-+}
-+
-+static void psb_mmu_pt_unmap_unlock(struct psb_mmu_pt *pt)
-+{
-+ struct psb_mmu_pd *pd = pt->pd;
-+ volatile uint32_t *v;
-+
-+ kunmap_atomic(pt->v, KM_USER0);
-+ if (pt->count == 0) {
-+ v = kmap_atomic(pd->p, KM_USER0);
-+ v[pt->index] = pd->invalid_pde;
-+ pd->tables[pt->index] = NULL;
-+
-+ if (pd->hw_context != -1) {
-+ psb_mmu_clflush(pd->driver, (void *)&v[pt->index]);
-+ atomic_set(&pd->driver->needs_tlbflush, 1);
-+ }
-+ kunmap_atomic(pt->v, KM_USER0);
-+ spin_unlock(&pd->driver->lock);
-+ psb_mmu_free_pt(pt);
-+ return;
-+ }
-+ spin_unlock(&pd->driver->lock);
-+}
-+
-+static inline void psb_mmu_set_pte(struct psb_mmu_pt *pt, unsigned long addr,
-+ uint32_t pte)
-+{
-+ pt->v[psb_mmu_pt_index(addr)] = pte;
-+}
-+
-+static inline void psb_mmu_invalidate_pte(struct psb_mmu_pt *pt,
-+ unsigned long addr)
-+{
-+ pt->v[psb_mmu_pt_index(addr)] = pt->pd->invalid_pte;
-+}
-+
-+#if 0
-+static uint32_t psb_mmu_check_pte_locked(struct psb_mmu_pd *pd,
-+ uint32_t mmu_offset)
-+{
-+ uint32_t *v;
-+ uint32_t pfn;
-+
-+ v = kmap_atomic(pd->p, KM_USER0);
-+ if (!v) {
-+ printk(KERN_INFO "Could not kmap pde page.\n");
-+ return 0;
-+ }
-+ pfn = v[psb_mmu_pd_index(mmu_offset)];
-+ // printk(KERN_INFO "pde is 0x%08x\n",pfn);
-+ kunmap_atomic(v, KM_USER0);
-+ if (((pfn & 0x0F) != PSB_PTE_VALID)) {
-+ printk(KERN_INFO "Strange pde at 0x%08x: 0x%08x.\n",
-+ mmu_offset, pfn);
-+ }
-+ v = ioremap(pfn & 0xFFFFF000, 4096);
-+ if (!v) {
-+ printk(KERN_INFO "Could not kmap pte page.\n");
-+ return 0;
-+ }
-+ pfn = v[psb_mmu_pt_index(mmu_offset)];
-+ // printk(KERN_INFO "pte is 0x%08x\n",pfn);
-+ iounmap(v);
-+ if (((pfn & 0x0F) != PSB_PTE_VALID)) {
-+ printk(KERN_INFO "Strange pte at 0x%08x: 0x%08x.\n",
-+ mmu_offset, pfn);
-+ }
-+ return pfn >> PAGE_SHIFT;
-+}
-+
-+static void psb_mmu_check_mirrored_gtt(struct psb_mmu_pd *pd,
-+ uint32_t mmu_offset, uint32_t gtt_pages)
-+{
-+ uint32_t start;
-+ uint32_t next;
-+
-+ printk(KERN_INFO "Checking mirrored gtt 0x%08x %d\n",
-+ mmu_offset, gtt_pages);
-+ down_read(&pd->driver->sem);
-+ start = psb_mmu_check_pte_locked(pd, mmu_offset);
-+ mmu_offset += PAGE_SIZE;
-+ gtt_pages -= 1;
-+ while (gtt_pages--) {
-+ next = psb_mmu_check_pte_locked(pd, mmu_offset);
-+ if (next != start + 1) {
-+ printk(KERN_INFO "Ptes out of order: 0x%08x, 0x%08x.\n",
-+ start, next);
-+ }
-+ start = next;
-+ mmu_offset += PAGE_SIZE;
-+ }
-+ up_read(&pd->driver->sem);
-+}
-+
-+#endif
-+
-+void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd,
-+ uint32_t mmu_offset, uint32_t gtt_start,
-+ uint32_t gtt_pages)
-+{
-+ uint32_t *v;
-+ uint32_t start = psb_mmu_pd_index(mmu_offset);
-+ struct psb_mmu_driver *driver = pd->driver;
-+
-+ down_read(&driver->sem);
-+ spin_lock(&driver->lock);
-+
-+ v = kmap_atomic(pd->p, KM_USER0);
-+ v += start;
-+
-+ while (gtt_pages--) {
-+ *v++ = gtt_start | pd->pd_mask;
-+ gtt_start += PAGE_SIZE;
-+ }
-+
-+ drm_ttm_cache_flush();
-+ kunmap_atomic(v, KM_USER0);
-+ spin_unlock(&driver->lock);
-+
-+ if (pd->hw_context != -1)
-+ atomic_set(&pd->driver->needs_tlbflush, 1);
-+
-+ up_read(&pd->driver->sem);
-+ psb_mmu_flush_pd(pd->driver, 0);
-+}
-+
-+struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver *driver)
-+{
-+ struct psb_mmu_pd *pd;
-+
-+ down_read(&driver->sem);
-+ pd = driver->default_pd;
-+ up_read(&driver->sem);
-+
-+ return pd;
-+}
-+
-+/* Returns the physical address of the PD shared by sgx/msvdx */
-+uint32_t psb_get_default_pd_addr(struct psb_mmu_driver * driver)
-+{
-+ struct psb_mmu_pd *pd;
-+
-+ pd = psb_mmu_get_default_pd(driver);
-+ return ((page_to_pfn(pd->p) << PAGE_SHIFT));
-+}
-+
-+void psb_mmu_driver_takedown(struct psb_mmu_driver *driver)
-+{
-+ psb_iowrite32(driver, driver->bif_ctrl, PSB_CR_BIF_CTRL);
-+ psb_mmu_free_pagedir(driver->default_pd);
-+ kfree(driver);
-+}
-+
-+struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers,
-+ int trap_pagefaults,
-+ int invalid_type,
-+ atomic_t *msvdx_mmu_invaldc)
-+{
-+ struct psb_mmu_driver *driver;
-+
-+ driver = (struct psb_mmu_driver *)kmalloc(sizeof(*driver), GFP_KERNEL);
-+
-+ if (!driver)
-+ return NULL;
-+
-+ driver->default_pd = psb_mmu_alloc_pd(driver, trap_pagefaults,
-+ invalid_type);
-+ if (!driver->default_pd)
-+ goto out_err1;
-+
-+ spin_lock_init(&driver->lock);
-+ init_rwsem(&driver->sem);
-+ down_write(&driver->sem);
-+ driver->register_map = registers;
-+ atomic_set(&driver->needs_tlbflush, 1);
-+ driver->msvdx_mmu_invaldc = msvdx_mmu_invaldc;
-+
-+ driver->bif_ctrl = psb_ioread32(driver, PSB_CR_BIF_CTRL);
-+ psb_iowrite32(driver, driver->bif_ctrl | _PSB_CB_CTRL_CLEAR_FAULT,
-+ PSB_CR_BIF_CTRL);
-+ psb_iowrite32(driver, driver->bif_ctrl & ~_PSB_CB_CTRL_CLEAR_FAULT,
-+ PSB_CR_BIF_CTRL);
-+
-+ driver->has_clflush = 0;
-+
-+#if defined(CONFIG_X86)
-+ if (boot_cpu_has(X86_FEATURE_CLFLSH)) {
-+ uint32_t tfms, misc, cap0, cap4, clflush_size;
-+
-+ /*
-+ * clflush size is determined at kernel setup for x86_64 but not for
-+ * i386. We have to do it here.
-+ */
-+
-+ cpuid(0x00000001, &tfms, &misc, &cap0, &cap4);
-+ clflush_size = ((misc >> 8) & 0xff) * 8;
-+ driver->has_clflush = 1;
-+ driver->clflush_add =
-+ PAGE_SIZE * clflush_size / sizeof(uint32_t);
-+ driver->clflush_mask = driver->clflush_add - 1;
-+ driver->clflush_mask = ~driver->clflush_mask;
-+ }
-+#endif
-+
-+ up_write(&driver->sem);
-+ return driver;
-+
-+ out_err1:
-+ kfree(driver);
-+ return NULL;
-+}
-+
-+#if defined(CONFIG_X86)
-+static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, unsigned long address,
-+ uint32_t num_pages, uint32_t desired_tile_stride,
-+ uint32_t hw_tile_stride)
-+{
-+ struct psb_mmu_pt *pt;
-+ uint32_t rows = 1;
-+ uint32_t i;
-+ unsigned long addr;
-+ unsigned long end;
-+ unsigned long next;
-+ unsigned long add;
-+ unsigned long row_add;
-+ unsigned long clflush_add = pd->driver->clflush_add;
-+ unsigned long clflush_mask = pd->driver->clflush_mask;
-+
-+ if (!pd->driver->has_clflush) {
-+ drm_ttm_cache_flush();
-+ return;
-+ }
-+
-+ if (hw_tile_stride)
-+ rows = num_pages / desired_tile_stride;
-+ else
-+ desired_tile_stride = num_pages;
-+
-+ add = desired_tile_stride << PAGE_SHIFT;
-+ row_add = hw_tile_stride << PAGE_SHIFT;
-+ mb();
-+ for (i = 0; i < rows; ++i) {
-+
-+ addr = address;
-+ end = addr + add;
-+
-+ do {
-+ next = psb_pd_addr_end(addr, end);
-+ pt = psb_mmu_pt_map_lock(pd, addr);
-+ if (!pt)
-+ continue;
-+ do {
-+ psb_clflush(&pt->v[psb_mmu_pt_index(addr)]);
-+ } while (addr += clflush_add,
-+ (addr & clflush_mask) < next);
-+
-+ psb_mmu_pt_unmap_unlock(pt);
-+ } while (addr = next, next != end);
-+ address += row_add;
-+ }
-+ mb();
-+}
-+#else
-+static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, unsigned long address,
-+ uint32_t num_pages, uint32_t desired_tile_stride,
-+ uint32_t hw_tile_stride)
-+{
-+ drm_ttm_cache_flush();
-+}
-+#endif
-+
-+void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
-+ unsigned long address, uint32_t num_pages)
-+{
-+ struct psb_mmu_pt *pt;
-+ unsigned long addr;
-+ unsigned long end;
-+ unsigned long next;
-+ unsigned long f_address = address;
-+
-+ down_read(&pd->driver->sem);
-+
-+ addr = address;
-+ end = addr + (num_pages << PAGE_SHIFT);
-+
-+ do {
-+ next = psb_pd_addr_end(addr, end);
-+ pt = psb_mmu_pt_alloc_map_lock(pd, addr);
-+ if (!pt)
-+ goto out;
-+ do {
-+ psb_mmu_invalidate_pte(pt, addr);
-+ --pt->count;
-+ } while (addr += PAGE_SIZE, addr < next);
-+ psb_mmu_pt_unmap_unlock(pt);
-+
-+ } while (addr = next, next != end);
-+
-+ out:
-+ if (pd->hw_context != -1)
-+ psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
-+
-+ up_read(&pd->driver->sem);
-+
-+ if (pd->hw_context != -1)
-+ psb_mmu_flush(pd->driver);
-+
-+ return;
-+}
-+
-+void psb_mmu_remove_pages(struct psb_mmu_pd *pd, unsigned long address,
-+ uint32_t num_pages, uint32_t desired_tile_stride,
-+ uint32_t hw_tile_stride)
-+{
-+ struct psb_mmu_pt *pt;
-+ uint32_t rows = 1;
-+ uint32_t i;
-+ unsigned long addr;
-+ unsigned long end;
-+ unsigned long next;
-+ unsigned long add;
-+ unsigned long row_add;
-+ unsigned long f_address = address;
-+
-+ if (hw_tile_stride)
-+ rows = num_pages / desired_tile_stride;
-+ else
-+ desired_tile_stride = num_pages;
-+
-+ add = desired_tile_stride << PAGE_SHIFT;
-+ row_add = hw_tile_stride << PAGE_SHIFT;
-+
-+ down_read(&pd->driver->sem);
-+
-+ /* Make sure we only need to flush this processor's cache */
-+
-+ for (i = 0; i < rows; ++i) {
-+
-+ addr = address;
-+ end = addr + add;
-+
-+ do {
-+ next = psb_pd_addr_end(addr, end);
-+ pt = psb_mmu_pt_map_lock(pd, addr);
-+ if (!pt)
-+ continue;
-+ do {
-+ psb_mmu_invalidate_pte(pt, addr);
-+ --pt->count;
-+
-+ } while (addr += PAGE_SIZE, addr < next);
-+ psb_mmu_pt_unmap_unlock(pt);
-+
-+ } while (addr = next, next != end);
-+ address += row_add;
-+ }
-+ if (pd->hw_context != -1)
-+ psb_mmu_flush_ptes(pd, f_address, num_pages,
-+ desired_tile_stride, hw_tile_stride);
-+
-+ up_read(&pd->driver->sem);
-+
-+ if (pd->hw_context != -1)
-+ psb_mmu_flush(pd->driver);
-+}
-+
-+int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn,
-+ unsigned long address, uint32_t num_pages,
-+ int type)
-+{
-+ struct psb_mmu_pt *pt;
-+ uint32_t pte;
-+ unsigned long addr;
-+ unsigned long end;
-+ unsigned long next;
-+ unsigned long f_address = address;
-+ int ret = -ENOMEM;
-+
-+ down_read(&pd->driver->sem);
-+
-+ addr = address;
-+ end = addr + (num_pages << PAGE_SHIFT);
-+
-+ do {
-+ next = psb_pd_addr_end(addr, end);
-+ pt = psb_mmu_pt_alloc_map_lock(pd, addr);
-+ if (!pt) {
-+ ret = -ENOMEM;
-+ goto out;
-+ }
-+ do {
-+ pte = psb_mmu_mask_pte(start_pfn++, type);
-+ psb_mmu_set_pte(pt, addr, pte);
-+ pt->count++;
-+ } while (addr += PAGE_SIZE, addr < next);
-+ psb_mmu_pt_unmap_unlock(pt);
-+
-+ } while (addr = next, next != end);
-+ ret = 0;
-+
-+ out:
-+ if (pd->hw_context != -1)
-+ psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
-+
-+ up_read(&pd->driver->sem);
-+
-+ if (pd->hw_context != -1)
-+ psb_mmu_flush(pd->driver);
-+
-+ return 0;
-+}
-+
-+int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
-+ unsigned long address, uint32_t num_pages,
-+ uint32_t desired_tile_stride, uint32_t hw_tile_stride,
-+ int type)
-+{
-+ struct psb_mmu_pt *pt;
-+ uint32_t rows = 1;
-+ uint32_t i;
-+ uint32_t pte;
-+ unsigned long addr;
-+ unsigned long end;
-+ unsigned long next;
-+ unsigned long add;
-+ unsigned long row_add;
-+ unsigned long f_address = address;
-+ int ret = -ENOMEM;
-+
-+ if (hw_tile_stride) {
-+ if (num_pages % desired_tile_stride != 0)
-+ return -EINVAL;
-+ rows = num_pages / desired_tile_stride;
-+ } else {
-+ desired_tile_stride = num_pages;
-+ }
-+
-+ add = desired_tile_stride << PAGE_SHIFT;
-+ row_add = hw_tile_stride << PAGE_SHIFT;
-+
-+ down_read(&pd->driver->sem);
-+
-+ for (i = 0; i < rows; ++i) {
-+
-+ addr = address;
-+ end = addr + add;
-+
-+ do {
-+ next = psb_pd_addr_end(addr, end);
-+ pt = psb_mmu_pt_alloc_map_lock(pd, addr);
-+ if (!pt)
-+ goto out;
-+ do {
-+ pte = psb_mmu_mask_pte(page_to_pfn(*pages++),
-+ type);
-+ psb_mmu_set_pte(pt, addr, pte);
-+ pt->count++;
-+ } while (addr += PAGE_SIZE, addr < next);
-+ psb_mmu_pt_unmap_unlock(pt);
-+
-+ } while (addr = next, next != end);
-+
-+ address += row_add;
-+ }
-+ ret = 0;
-+ out:
-+ if (pd->hw_context != -1)
-+ psb_mmu_flush_ptes(pd, f_address, num_pages,
-+ desired_tile_stride, hw_tile_stride);
-+
-+ up_read(&pd->driver->sem);
-+
-+ if (pd->hw_context != -1)
-+ psb_mmu_flush(pd->driver);
-+
-+ return 0;
-+}
-+
-+void psb_mmu_enable_requestor(struct psb_mmu_driver *driver, uint32_t mask)
-+{
-+ mask &= _PSB_MMU_ER_MASK;
-+ psb_iowrite32(driver, psb_ioread32(driver, PSB_CR_BIF_CTRL) & ~mask,
-+ PSB_CR_BIF_CTRL);
-+ (void)psb_ioread32(driver, PSB_CR_BIF_CTRL);
-+}
-+
-+void psb_mmu_disable_requestor(struct psb_mmu_driver *driver, uint32_t mask)
-+{
-+ mask &= _PSB_MMU_ER_MASK;
-+ psb_iowrite32(driver, psb_ioread32(driver, PSB_CR_BIF_CTRL) | mask,
-+ PSB_CR_BIF_CTRL);
-+ (void)psb_ioread32(driver, PSB_CR_BIF_CTRL);
-+}
-+
-+int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
-+ unsigned long *pfn)
-+{
-+ int ret;
-+ struct psb_mmu_pt *pt;
-+ uint32_t tmp;
-+ spinlock_t *lock = &pd->driver->lock;
-+
-+ down_read(&pd->driver->sem);
-+ pt = psb_mmu_pt_map_lock(pd, virtual);
-+ if (!pt) {
-+ uint32_t *v;
-+
-+ spin_lock(lock);
-+ v = kmap_atomic(pd->p, KM_USER0);
-+ tmp = v[psb_mmu_pd_index(virtual)];
-+ kunmap_atomic(v, KM_USER0);
-+ spin_unlock(lock);
-+
-+ if (tmp != pd->invalid_pde || !(tmp & PSB_PTE_VALID) ||
-+ !(pd->invalid_pte & PSB_PTE_VALID)) {
-+ ret = -EINVAL;
-+ goto out;
-+ }
-+ ret = 0;
-+ *pfn = pd->invalid_pte >> PAGE_SHIFT;
-+ goto out;
-+ }
-+ tmp = pt->v[psb_mmu_pt_index(virtual)];
-+ if (!(tmp & PSB_PTE_VALID)) {
-+ ret = -EINVAL;
-+ } else {
-+ ret = 0;
-+ *pfn = tmp >> PAGE_SHIFT;
-+ }
-+ psb_mmu_pt_unmap_unlock(pt);
-+ out:
-+ up_read(&pd->driver->sem);
-+ return ret;
-+}
-+
-+void psb_mmu_test(struct psb_mmu_driver *driver, uint32_t offset)
-+{
-+ struct page *p;
-+ unsigned long pfn;
-+ int ret = 0;
-+ struct psb_mmu_pd *pd;
-+ uint32_t *v;
-+ uint32_t *vmmu;
-+
-+ pd = driver->default_pd;
-+ if (!pd) {
-+ printk(KERN_WARNING "Could not get default pd\n");
-+ }
-+
-+ p = alloc_page(GFP_DMA32);
-+
-+ if (!p) {
-+ printk(KERN_WARNING "Failed allocating page\n");
-+ return;
-+ }
-+
-+ v = kmap(p);
-+ memset(v, 0x67, PAGE_SIZE);
-+
-+ pfn = (offset >> PAGE_SHIFT);
-+
-+ ret = psb_mmu_insert_pages(pd, &p, pfn << PAGE_SHIFT, 1, 0, 0,
-+ PSB_MMU_CACHED_MEMORY);
-+ if (ret) {
-+ printk(KERN_WARNING "Failed inserting mmu page\n");
-+ goto out_err1;
-+ }
-+
-+ /* Ioremap the page through the GART aperture */
-+
-+ vmmu = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
-+ if (!vmmu) {
-+ printk(KERN_WARNING "Failed ioremapping page\n");
-+ goto out_err2;
-+ }
-+
-+ /* Read from the page with mmu disabled. */
-+ printk(KERN_INFO "Page first dword is 0x%08x\n", ioread32(vmmu));
-+
-+ /* Enable the mmu for host accesses and read again. */
-+ psb_mmu_enable_requestor(driver, _PSB_MMU_ER_HOST);
-+
-+ printk(KERN_INFO "MMU Page first dword is (0x67676767) 0x%08x\n",
-+ ioread32(vmmu));
-+ *v = 0x15243705;
-+ printk(KERN_INFO "MMU Page new dword is (0x15243705) 0x%08x\n",
-+ ioread32(vmmu));
-+ iowrite32(0x16243355, vmmu);
-+ (void)ioread32(vmmu);
-+ printk(KERN_INFO "Page new dword is (0x16243355) 0x%08x\n", *v);
-+
-+ printk(KERN_INFO "Int stat is 0x%08x\n",
-+ psb_ioread32(driver, PSB_CR_BIF_INT_STAT));
-+ printk(KERN_INFO "Fault is 0x%08x\n",
-+ psb_ioread32(driver, PSB_CR_BIF_FAULT));
-+
-+ /* Disable MMU for host accesses and clear page fault register */
-+ psb_mmu_disable_requestor(driver, _PSB_MMU_ER_HOST);
-+ iounmap(vmmu);
-+ out_err2:
-+ psb_mmu_remove_pages(pd, pfn << PAGE_SHIFT, 1, 0, 0);
-+ out_err1:
-+ kunmap(p);
-+ __free_page(p);
-+}
-Index: linux-2.6.27/drivers/gpu/drm/psb/psb_msvdx.c
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.27/drivers/gpu/drm/psb/psb_msvdx.c 2009-02-05 13:29:33.000000000 +0000
-@@ -0,0 +1,676 @@
-+/**
-+ * file psb_msvdx.c
-+ * MSVDX I/O operations and IRQ handling
-+ *
-+ */
-+
-+/**************************************************************************
-+ *
-+ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
-+ * Copyright (c) Imagination Technologies Limited, UK
-+ * All Rights Reserved.
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a
-+ * copy of this software and associated documentation files (the
-+ * "Software"), to deal in the Software without restriction, including
-+ * without limitation the rights to use, copy, modify, merge, publish,
-+ * distribute, sub license, and/or sell copies of the Software, and to
-+ * permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice (including the
-+ * next paragraph) shall be included in all copies or substantial portions
-+ * of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
-+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
-+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
-+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
-+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
-+ *
-+ **************************************************************************/
-+
-+#include "drmP.h"
-+#include "drm_os_linux.h"
-+#include "psb_drv.h"
-+#include "psb_drm.h"
-+#include "psb_msvdx.h"
-+
-+#include <asm/io.h>
-+#include <linux/delay.h>
-+
-+#ifndef list_first_entry
-+#define list_first_entry(ptr, type, member) \
-+ list_entry((ptr)->next, type, member)
-+#endif
-+
-+static int psb_msvdx_send (struct drm_device *dev, void *cmd,
-+ unsigned long cmd_size);
-+
-+int
-+psb_msvdx_dequeue_send (struct drm_device *dev)
-+{
-+ struct drm_psb_private *dev_priv = dev->dev_private;
-+ struct psb_msvdx_cmd_queue *msvdx_cmd = NULL;
-+ int ret = 0;
-+
-+ if (list_empty (&dev_priv->msvdx_queue))
-+ {
-+ PSB_DEBUG_GENERAL ("MSVDXQUE: msvdx list empty.\n");
-+ dev_priv->msvdx_busy = 0;
-+ return -EINVAL;
-+ }
-+ msvdx_cmd =
-+ list_first_entry (&dev_priv->msvdx_queue, struct psb_msvdx_cmd_queue,
-+ head);
-+ PSB_DEBUG_GENERAL ("MSVDXQUE: Queue has id %08x\n", msvdx_cmd->sequence);
-+ ret = psb_msvdx_send (dev, msvdx_cmd->cmd, msvdx_cmd->cmd_size);
-+ if (ret)
-+ {
-+ PSB_DEBUG_GENERAL ("MSVDXQUE: psb_msvdx_send failed\n");
-+ ret = -EINVAL;
-+ }
-+ list_del (&msvdx_cmd->head);
-+ kfree (msvdx_cmd->cmd);
-+ drm_free (msvdx_cmd, sizeof (struct psb_msvdx_cmd_queue), DRM_MEM_DRIVER);
-+ return ret;
-+}
-+
-+int
-+psb_msvdx_map_command (struct drm_device *dev,
-+ struct drm_buffer_object *cmd_buffer,
-+ unsigned long cmd_offset, unsigned long cmd_size,
-+ void **msvdx_cmd, uint32_t sequence, int copy_cmd)
-+{
-+ struct drm_psb_private *dev_priv = dev->dev_private;
-+ int ret = 0;
-+ unsigned long cmd_page_offset = cmd_offset & ~PAGE_MASK;
-+ unsigned long cmd_size_remaining;
-+ struct drm_bo_kmap_obj cmd_kmap;
-+ void *cmd, *tmp, *cmd_start;
-+ int is_iomem;
-+
-+ /* command buffers may not exceed page boundary */
-+ if (cmd_size + cmd_page_offset > PAGE_SIZE)
-+ return -EINVAL;
-+
-+ ret = drm_bo_kmap (cmd_buffer, cmd_offset >> PAGE_SHIFT, 2, &cmd_kmap);
-+
-+ if (ret)
-+ {
-+ PSB_DEBUG_GENERAL ("MSVDXQUE:ret:%d\n", ret);
-+ return ret;
-+ }
-+
-+ cmd_start =
-+ (void *) drm_bmo_virtual (&cmd_kmap, &is_iomem) + cmd_page_offset;
-+ cmd = cmd_start;
-+ cmd_size_remaining = cmd_size;
-+
-+ while (cmd_size_remaining > 0)
-+ {
-+ uint32_t mmu_ptd;
-+ uint32_t cur_cmd_size = MEMIO_READ_FIELD (cmd, FWRK_GENMSG_SIZE);
-+ uint32_t cur_cmd_id = MEMIO_READ_FIELD (cmd, FWRK_GENMSG_ID);
-+ PSB_DEBUG_GENERAL
-+ ("cmd start at %08x cur_cmd_size = %d cur_cmd_id = %02x fence = %08x\n",
-+ (uint32_t) cmd, cur_cmd_size, cur_cmd_id, sequence);
-+ if ((cur_cmd_size % sizeof (uint32_t))
-+ || (cur_cmd_size > cmd_size_remaining))
-+ {
-+ ret = -EINVAL;
-+ PSB_DEBUG_GENERAL ("MSVDX: ret:%d\n", ret);
-+ goto out;
-+ }
-+
-+ switch (cur_cmd_id)
-+ {
-+ case VA_MSGID_RENDER:
-+ /* Fence ID */
-+ MEMIO_WRITE_FIELD (cmd, FW_VA_RENDER_FENCE_VALUE, sequence);
-+
-+ mmu_ptd = psb_get_default_pd_addr (dev_priv->mmu);
-+ if (atomic_cmpxchg(&dev_priv->msvdx_mmu_invaldc, 1, 0) == 1)
-+ {
-+ mmu_ptd |= 1;
-+ PSB_DEBUG_GENERAL ("MSVDX: Setting MMU invalidate flag\n");
-+ }
-+ /* PTD */
-+ MEMIO_WRITE_FIELD (cmd, FW_VA_RENDER_MMUPTD, mmu_ptd);
-+ break;
-+
-+ default:
-+ /* Msg not supported */
-+ ret = -EINVAL;
-+ PSB_DEBUG_GENERAL ("MSVDX: ret:%d\n", ret);
-+ goto out;
-+ }
-+
-+ cmd += cur_cmd_size;
-+ cmd_size_remaining -= cur_cmd_size;
-+ }
-+
-+ if (copy_cmd)
-+ {
-+ PSB_DEBUG_GENERAL
-+ ("MSVDXQUE: psb_msvdx_map_command copying command...\n");
-+ tmp = drm_calloc (1, cmd_size, DRM_MEM_DRIVER);
-+ if (tmp == NULL)
-+ {
-+ ret = -ENOMEM;
-+ PSB_DEBUG_GENERAL ("MSVDX: ret:%d\n", ret);
-+ goto out;
-+ }
-+ memcpy (tmp, cmd_start, cmd_size);
-+ *msvdx_cmd = tmp;
-+ }
-+ else
-+ {
-+ PSB_DEBUG_GENERAL
-+ ("MSVDXQUE: psb_msvdx_map_command did NOT copy command...\n");
-+ ret = psb_msvdx_send (dev, cmd_start, cmd_size);
-+ if (ret)
-+ {
-+ PSB_DEBUG_GENERAL ("MSVDXQUE: psb_msvdx_send failed\n");
-+ ret = -EINVAL;
-+ }
-+ }
-+
-+out:
-+ drm_bo_kunmap (&cmd_kmap);
-+
-+ return ret;
-+}
-+
-+int
-+psb_submit_video_cmdbuf (struct drm_device *dev,
-+ struct drm_buffer_object *cmd_buffer,
-+ unsigned long cmd_offset, unsigned long cmd_size,
-+ struct drm_fence_object *fence)
-+{
-+ struct drm_psb_private *dev_priv = dev->dev_private;
-+ uint32_t sequence = fence->sequence;
-+ unsigned long irq_flags;
-+ int ret = 0;
-+
-+ mutex_lock (&dev_priv->msvdx_mutex);
-+ psb_schedule_watchdog (dev_priv);
-+
-+ spin_lock_irqsave (&dev_priv->msvdx_lock, irq_flags);
-+ if (dev_priv->msvdx_needs_reset)
-+ {
-+ spin_unlock_irqrestore (&dev_priv->msvdx_lock, irq_flags);
-+ PSB_DEBUG_GENERAL ("MSVDX: Needs reset\n");
-+ if (psb_msvdx_reset (dev_priv))
-+ {
-+ mutex_unlock (&dev_priv->msvdx_mutex);
-+ ret = -EBUSY;
-+ PSB_DEBUG_GENERAL ("MSVDX: Reset failed\n");
-+ return ret;
-+ }
-+ PSB_DEBUG_GENERAL ("MSVDX: Reset ok\n");
-+ dev_priv->msvdx_needs_reset = 0;
-+ dev_priv->msvdx_busy = 0;
-+ dev_priv->msvdx_start_idle = 0;
-+
-+ psb_msvdx_init (dev);
-+ psb_msvdx_irq_preinstall (dev_priv);
-+ psb_msvdx_irq_postinstall (dev_priv);
-+ PSB_DEBUG_GENERAL ("MSVDX: Init ok\n");
-+ spin_lock_irqsave (&dev_priv->msvdx_lock, irq_flags);
-+ }
-+
-+ if (!dev_priv->msvdx_busy)
-+ {
-+ dev_priv->msvdx_busy = 1;
-+ spin_unlock_irqrestore (&dev_priv->msvdx_lock, irq_flags);
-+ PSB_DEBUG_GENERAL
-+ ("MSVDXQUE: nothing in the queue sending sequence:%08x..\n",
-+ sequence);
-+ ret =
-+ psb_msvdx_map_command (dev, cmd_buffer, cmd_offset, cmd_size,
-+ NULL, sequence, 0);
-+ if (ret)
-+ {
-+ mutex_unlock (&dev_priv->msvdx_mutex);
-+ PSB_DEBUG_GENERAL ("MSVDXQUE: Failed to extract cmd...\n");
-+ return ret;
-+ }
-+ }
-+ else
-+ {
-+ struct psb_msvdx_cmd_queue *msvdx_cmd;
-+ void *cmd = NULL;
-+
-+ spin_unlock_irqrestore (&dev_priv->msvdx_lock, irq_flags);
-+ /*queue the command to be sent when the h/w is ready */
-+ PSB_DEBUG_GENERAL ("MSVDXQUE: queueing sequence:%08x..\n", sequence);
-+ msvdx_cmd =
-+ drm_calloc (1, sizeof (struct psb_msvdx_cmd_queue), DRM_MEM_DRIVER);
-+ if (msvdx_cmd == NULL)
-+ {
-+ mutex_unlock (&dev_priv->msvdx_mutex);
-+ PSB_DEBUG_GENERAL ("MSVDXQUE: Out of memory...\n");
-+ return -ENOMEM;
-+ }
-+
-+ ret =
-+ psb_msvdx_map_command (dev, cmd_buffer, cmd_offset, cmd_size,
-+ &cmd, sequence, 1);
-+ if (ret)
-+ {
-+ mutex_unlock (&dev_priv->msvdx_mutex);
-+ PSB_DEBUG_GENERAL ("MSVDXQUE: Failed to extract cmd...\n");
-+ drm_free (msvdx_cmd, sizeof (struct psb_msvdx_cmd_queue),
-+ DRM_MEM_DRIVER);
-+ return ret;
-+ }
-+ msvdx_cmd->cmd = cmd;
-+ msvdx_cmd->cmd_size = cmd_size;
-+ msvdx_cmd->sequence = sequence;
-+ spin_lock_irqsave (&dev_priv->msvdx_lock, irq_flags);
-+ list_add_tail (&msvdx_cmd->head, &dev_priv->msvdx_queue);
-+ if (!dev_priv->msvdx_busy)
-+ {
-+ dev_priv->msvdx_busy = 1;
-+ PSB_DEBUG_GENERAL ("MSVDXQUE: Need immediate dequeue\n");
-+ psb_msvdx_dequeue_send (dev);
-+ }
-+ spin_unlock_irqrestore (&dev_priv->msvdx_lock, irq_flags);
-+ }
-+ mutex_unlock (&dev_priv->msvdx_mutex);
-+ return ret;
-+}
-+
-+int
-+psb_msvdx_send (struct drm_device *dev, void *cmd, unsigned long cmd_size)
-+{
-+ int ret = 0;
-+ struct drm_psb_private *dev_priv = dev->dev_private;
-+
-+ while (cmd_size > 0)
-+ {
-+ uint32_t cur_cmd_size = MEMIO_READ_FIELD (cmd, FWRK_GENMSG_SIZE);
-+ if (cur_cmd_size > cmd_size)
-+ {
-+ ret = -EINVAL;
-+ PSB_DEBUG_GENERAL
-+ ("MSVDX: cmd_size = %d cur_cmd_size = %d\n",
-+ (int) cmd_size, cur_cmd_size);
-+ goto out;
-+ }
-+ /* Send the message to h/w */
-+ ret = psb_mtx_send (dev_priv, cmd);
-+ if (ret)
-+ {
-+ PSB_DEBUG_GENERAL ("MSVDX: ret:%d\n", ret);
-+ goto out;
-+ }
-+ cmd += cur_cmd_size;
-+ cmd_size -= cur_cmd_size;
-+ }
-+
-+out:
-+ PSB_DEBUG_GENERAL ("MSVDX: ret:%d\n", ret);
-+ return ret;
-+}
-+
-+/***********************************************************************************
-+ * Function Name : psb_mtx_send
-+ * Inputs :
-+ * Outputs :
-+ * Returns :
-+ * Description :
-+ ************************************************************************************/
-+int
-+psb_mtx_send (struct drm_psb_private *dev_priv, const void *pvMsg)
-+{
-+
-+ static uint32_t padMessage[FWRK_PADMSG_SIZE];
-+
-+ const uint32_t *pui32Msg = (uint32_t *) pvMsg;
-+ uint32_t msgNumWords, wordsFree, readIndex, writeIndex;
-+ int ret = 0;
-+
-+ PSB_DEBUG_GENERAL ("MSVDX: psb_mtx_send\n");
-+
-+ /* we need clocks enabled before we touch VEC local ram */
-+ PSB_WMSVDX32 (clk_enable_all, MSVDX_MAN_CLK_ENABLE);
-+
-+ msgNumWords = (MEMIO_READ_FIELD (pvMsg, FWRK_GENMSG_SIZE) + 3) / 4;
-+
-+ if (msgNumWords > NUM_WORDS_MTX_BUF)
-+ {
-+ ret = -EINVAL;
-+ PSB_DEBUG_GENERAL ("MSVDX: ret:%d\n", ret);
-+ goto out;
-+ }
-+
-+ readIndex = PSB_RMSVDX32 (MSVDX_COMMS_TO_MTX_RD_INDEX);
-+ writeIndex = PSB_RMSVDX32 (MSVDX_COMMS_TO_MTX_WRT_INDEX);
-+
-+ if (writeIndex + msgNumWords > NUM_WORDS_MTX_BUF)
-+ { /* message would wrap, need to send a pad message */
-+ BUG_ON (MEMIO_READ_FIELD (pvMsg, FWRK_GENMSG_ID) == FWRK_MSGID_PADDING); /* Shouldn't happen for a PAD message itself */
-+ /* if the read pointer is at zero then we must wait for it to change otherwise the write
-+ * pointer will equal the read pointer,which should only happen when the buffer is empty
-+ *
-+ * This will only happens if we try to overfill the queue, queue management should make
-+ * sure this never happens in the first place.
-+ */
-+ BUG_ON (0 == readIndex);
-+ if (0 == readIndex)
-+ {
-+ ret = -EINVAL;
-+ PSB_DEBUG_GENERAL ("MSVDX: ret:%d\n", ret);
-+ goto out;
-+ }
-+ /* Send a pad message */
-+ MEMIO_WRITE_FIELD (padMessage, FWRK_GENMSG_SIZE,
-+ (NUM_WORDS_MTX_BUF - writeIndex) << 2);
-+ MEMIO_WRITE_FIELD (padMessage, FWRK_GENMSG_ID, FWRK_MSGID_PADDING);
-+ psb_mtx_send (dev_priv, padMessage);
-+ writeIndex = PSB_RMSVDX32 (MSVDX_COMMS_TO_MTX_WRT_INDEX);
-+ }
-+
-+ wordsFree =
-+ (writeIndex >=
-+ readIndex) ? NUM_WORDS_MTX_BUF - (writeIndex -
-+ readIndex) : readIndex - writeIndex;
-+
-+ BUG_ON (msgNumWords > wordsFree);
-+ if (msgNumWords > wordsFree)
-+ {
-+ ret = -EINVAL;
-+ PSB_DEBUG_GENERAL ("MSVDX: ret:%d\n", ret);
-+ goto out;
-+ }
-+
-+ while (msgNumWords > 0)
-+ {
-+ PSB_WMSVDX32 (*pui32Msg++, MSVDX_COMMS_TO_MTX_BUF + (writeIndex << 2));
-+ msgNumWords--;
-+ writeIndex++;
-+ if (NUM_WORDS_MTX_BUF == writeIndex)
-+ {
-+ writeIndex = 0;
-+ }
-+ }
-+ PSB_WMSVDX32 (writeIndex, MSVDX_COMMS_TO_MTX_WRT_INDEX);
-+
-+ /* Make sure clocks are enabled before we kick */
-+ PSB_WMSVDX32 (clk_enable_all, MSVDX_MAN_CLK_ENABLE);
-+
-+ /* signal an interrupt to let the mtx know there is a new message */
-+ PSB_WMSVDX32 (1, MSVDX_MTX_KICKI);
-+
-+out:
-+ return ret;
-+}
-+
-+/*
-+ * MSVDX MTX interrupt
-+ */
-+void
-+psb_msvdx_mtx_interrupt (struct drm_device *dev)
-+{
-+ static uint32_t msgBuffer[128];
-+ uint32_t readIndex, writeIndex;
-+ uint32_t msgNumWords, msgWordOffset;
-+ struct drm_psb_private *dev_priv =
-+ (struct drm_psb_private *) dev->dev_private;
-+
-+ /* Are clocks enabled - If not enable before attempting to read from VLR */
-+ if (PSB_RMSVDX32 (MSVDX_MAN_CLK_ENABLE) != (clk_enable_all))
-+ {
-+ PSB_DEBUG_GENERAL
-+ ("MSVDX: Warning - Clocks disabled when Interupt set\n");
-+ PSB_WMSVDX32 (clk_enable_all, MSVDX_MAN_CLK_ENABLE);
-+ }
-+
-+ for (;;)
-+ {
-+ readIndex = PSB_RMSVDX32 (MSVDX_COMMS_TO_HOST_RD_INDEX);
-+ writeIndex = PSB_RMSVDX32 (MSVDX_COMMS_TO_HOST_WRT_INDEX);
-+
-+ if (readIndex != writeIndex)
-+ {
-+ msgWordOffset = 0;
-+
-+ msgBuffer[msgWordOffset] =
-+ PSB_RMSVDX32 (MSVDX_COMMS_TO_HOST_BUF + (readIndex << 2));
-+
-+ msgNumWords = (MEMIO_READ_FIELD (msgBuffer, FWRK_GENMSG_SIZE) + 3) / 4; /* round to nearest word */
-+
-+ /*ASSERT(msgNumWords <= sizeof(msgBuffer) / sizeof(uint32_t)); */
-+
-+ if (++readIndex >= NUM_WORDS_HOST_BUF)
-+ readIndex = 0;
-+
-+ for (msgWordOffset++; msgWordOffset < msgNumWords; msgWordOffset++)
-+ {
-+ msgBuffer[msgWordOffset] =
-+ PSB_RMSVDX32 (MSVDX_COMMS_TO_HOST_BUF + (readIndex << 2));
-+
-+ if (++readIndex >= NUM_WORDS_HOST_BUF)
-+ {
-+ readIndex = 0;
-+ }
-+ }
-+
-+ /* Update the Read index */
-+ PSB_WMSVDX32 (readIndex, MSVDX_COMMS_TO_HOST_RD_INDEX);
-+
-+ if (!dev_priv->msvdx_needs_reset)
-+ switch (MEMIO_READ_FIELD (msgBuffer, FWRK_GENMSG_ID))
-+ {
-+ case VA_MSGID_CMD_HW_PANIC:
-+ case VA_MSGID_CMD_FAILED:
-+ {
-+ uint32_t ui32Fence = MEMIO_READ_FIELD (msgBuffer,
-+ FW_VA_CMD_FAILED_FENCE_VALUE);
-+ uint32_t ui32FaultStatus = MEMIO_READ_FIELD (msgBuffer,
-+ FW_VA_CMD_FAILED_IRQSTATUS);
-+
-+ if(MEMIO_READ_FIELD (msgBuffer, FWRK_GENMSG_ID) == VA_MSGID_CMD_HW_PANIC )
-+ PSB_DEBUG_GENERAL
-+ ("MSVDX: VA_MSGID_CMD_HW_PANIC: Msvdx fault detected - Fence: %08x, Status: %08x - resetting and ignoring error\n",
-+ ui32Fence, ui32FaultStatus);
-+ else
-+ PSB_DEBUG_GENERAL
-+ ("MSVDX: VA_MSGID_CMD_FAILED: Msvdx fault detected - Fence: %08x, Status: %08x - resetting and ignoring error\n",
-+ ui32Fence, ui32FaultStatus);
-+
-+ dev_priv->msvdx_needs_reset = 1;
-+
-+ if(MEMIO_READ_FIELD (msgBuffer, FWRK_GENMSG_ID) == VA_MSGID_CMD_HW_PANIC)
-+ {
-+ if (dev_priv->
-+ msvdx_current_sequence
-+ - dev_priv->sequence[PSB_ENGINE_VIDEO] > 0x0FFFFFFF)
-+ dev_priv->msvdx_current_sequence++;
-+ PSB_DEBUG_GENERAL
-+ ("MSVDX: Fence ID missing, assuming %08x\n",
-+ dev_priv->msvdx_current_sequence);
-+ }
-+ else
-+ dev_priv->msvdx_current_sequence = ui32Fence;
-+
-+ psb_fence_error (dev,
-+ PSB_ENGINE_VIDEO,
-+ dev_priv->
-+ msvdx_current_sequence,
-+ DRM_FENCE_TYPE_EXE, DRM_CMD_FAILED);
-+
-+ /* Flush the command queue */
-+ psb_msvdx_flush_cmd_queue (dev);
-+
-+ goto isrExit;
-+ break;
-+ }
-+ case VA_MSGID_CMD_COMPLETED:
-+ {
-+ uint32_t ui32Fence = MEMIO_READ_FIELD (msgBuffer,
-+ FW_VA_CMD_COMPLETED_FENCE_VALUE);
-+ uint32_t ui32Flags =
-+ MEMIO_READ_FIELD (msgBuffer, FW_VA_CMD_COMPLETED_FLAGS);
-+
-+ PSB_DEBUG_GENERAL
-+ ("msvdx VA_MSGID_CMD_COMPLETED: FenceID: %08x, flags: 0x%x\n",
-+ ui32Fence, ui32Flags);
-+ dev_priv->msvdx_current_sequence = ui32Fence;
-+
-+ psb_fence_handler (dev, PSB_ENGINE_VIDEO);
-+
-+
-+ if (ui32Flags & FW_VA_RENDER_HOST_INT)
-+ {
-+ /*Now send the next command from the msvdx cmd queue */
-+ psb_msvdx_dequeue_send (dev);
-+ goto isrExit;
-+ }
-+ break;
-+ }
-+ case VA_MSGID_ACK:
-+ PSB_DEBUG_GENERAL ("msvdx VA_MSGID_ACK\n");
-+ break;
-+
-+ case VA_MSGID_TEST1:
-+ PSB_DEBUG_GENERAL ("msvdx VA_MSGID_TEST1\n");
-+ break;
-+
-+ case VA_MSGID_TEST2:
-+ PSB_DEBUG_GENERAL ("msvdx VA_MSGID_TEST2\n");
-+ break;
-+ /* Don't need to do anything with these messages */
-+
-+ case VA_MSGID_DEBLOCK_REQUIRED:
-+ {
-+ uint32_t ui32ContextId = MEMIO_READ_FIELD (msgBuffer,
-+ FW_VA_DEBLOCK_REQUIRED_CONTEXT);
-+
-+ /* The BE we now be locked. */
-+
-+ /* Unblock rendec by reading the mtx2mtx end of slice */
-+ (void) PSB_RMSVDX32 (MSVDX_RENDEC_READ_DATA);
-+
-+ PSB_DEBUG_GENERAL
-+ ("msvdx VA_MSGID_DEBLOCK_REQUIRED Context=%08x\n",
-+ ui32ContextId);
-+ goto isrExit;
-+ break;
-+ }
-+
-+ default:
-+ {
-+ PSB_DEBUG_GENERAL
-+ ("ERROR: msvdx Unknown message from MTX \n");
-+ }
-+ break;
-+
-+ }
-+ }
-+ else
-+ {
-+ /* Get out of here if nothing */
-+ break;
-+ }
-+ }
-+isrExit:
-+
-+#if 1
-+ if (!dev_priv->msvdx_busy)
-+ {
-+ /* check that clocks are enabled before reading VLR */
-+ if( PSB_RMSVDX32( MSVDX_MAN_CLK_ENABLE ) != (clk_enable_all) )
-+ PSB_WMSVDX32 (clk_enable_all, MSVDX_MAN_CLK_ENABLE);
-+
-+ /* If the firmware says the hardware is idle and the CCB is empty then we can power down */
-+ uint32_t ui32FWStatus = PSB_RMSVDX32( MSVDX_COMMS_FW_STATUS );
-+ uint32_t ui32CCBRoff = PSB_RMSVDX32 ( MSVDX_COMMS_TO_MTX_RD_INDEX );
-+ uint32_t ui32CCBWoff = PSB_RMSVDX32 ( MSVDX_COMMS_TO_MTX_WRT_INDEX );
-+
-+ if( (ui32FWStatus & MSVDX_FW_STATUS_HW_IDLE) && (ui32CCBRoff == ui32CCBWoff))
-+ {
-+ PSB_DEBUG_GENERAL("MSVDX_CLOCK: Setting clock to minimal...\n");
-+ PSB_WMSVDX32 (clk_enable_minimal, MSVDX_MAN_CLK_ENABLE);
-+ }
-+ }
-+#endif
-+ DRM_MEMORYBARRIER ();
-+}
-+
-+void
-+psb_msvdx_lockup (struct drm_psb_private *dev_priv,
-+ int *msvdx_lockup, int *msvdx_idle)
-+{
-+ unsigned long irq_flags;
-+// struct psb_scheduler *scheduler = &dev_priv->scheduler;
-+
-+ spin_lock_irqsave (&dev_priv->msvdx_lock, irq_flags);
-+ *msvdx_lockup = 0;
-+ *msvdx_idle = 1;
-+
-+ if (!dev_priv->has_msvdx)
-+ {
-+ spin_unlock_irqrestore (&dev_priv->msvdx_lock, irq_flags);
-+ return;
-+ }
-+#if 0
-+ PSB_DEBUG_GENERAL ("MSVDXTimer: current_sequence:%d "
-+ "last_sequence:%d and last_submitted_sequence :%d\n",
-+ dev_priv->msvdx_current_sequence,
-+ dev_priv->msvdx_last_sequence,
-+ dev_priv->sequence[PSB_ENGINE_VIDEO]);
-+#endif
-+ if (dev_priv->msvdx_current_sequence -
-+ dev_priv->sequence[PSB_ENGINE_VIDEO] > 0x0FFFFFFF)
-+ {
-+
-+ if (dev_priv->msvdx_current_sequence == dev_priv->msvdx_last_sequence)
-+ {
-+ PSB_DEBUG_GENERAL
-+ ("MSVDXTimer: msvdx locked-up for sequence:%d\n",
-+ dev_priv->msvdx_current_sequence);
-+ *msvdx_lockup = 1;
-+ }
-+ else
-+ {
-+ PSB_DEBUG_GENERAL ("MSVDXTimer: msvdx responded fine so far...\n");
-+ dev_priv->msvdx_last_sequence = dev_priv->msvdx_current_sequence;
-+ *msvdx_idle = 0;
-+ }
-+ if (dev_priv->msvdx_start_idle)
-+ dev_priv->msvdx_start_idle = 0;
-+ }
-+ else
-+ {
-+ if (dev_priv->msvdx_needs_reset == 0)
-+ {
-+ if (dev_priv->msvdx_start_idle && (dev_priv->msvdx_finished_sequence == dev_priv->msvdx_current_sequence))
-+ {
-+ //if (dev_priv->msvdx_idle_start_jiffies + MSVDX_MAX_IDELTIME >= jiffies)
-+ if (time_after_eq(jiffies, dev_priv->msvdx_idle_start_jiffies + MSVDX_MAX_IDELTIME))
-+ {
-+ printk("set the msvdx clock to 0 in the %s\n", __FUNCTION__);
-+ PSB_WMSVDX32 (0, MSVDX_MAN_CLK_ENABLE);
-+ dev_priv->msvdx_needs_reset = 1;
-+ }
-+ else
-+ {
-+ *msvdx_idle = 0;
-+ }
-+ }
-+ else
-+ {
-+ dev_priv->msvdx_start_idle = 1;
-+ dev_priv->msvdx_idle_start_jiffies = jiffies;
-+ dev_priv->msvdx_finished_sequence = dev_priv->msvdx_current_sequence;
-+ *msvdx_idle = 0;
-+ }
-+ }
-+ }
-+ spin_unlock_irqrestore (&dev_priv->msvdx_lock, irq_flags);
-+}
-Index: linux-2.6.27/drivers/gpu/drm/psb/psb_msvdx.h
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.27/drivers/gpu/drm/psb/psb_msvdx.h 2009-02-05 13:29:33.000000000 +0000
-@@ -0,0 +1,564 @@
-+/**************************************************************************
-+ *
-+ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
-+ * Copyright (c) Imagination Technologies Limited, UK
-+ * All Rights Reserved.
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a
-+ * copy of this software and associated documentation files (the
-+ * "Software"), to deal in the Software without restriction, including
-+ * without limitation the rights to use, copy, modify, merge, publish,
-+ * distribute, sub license, and/or sell copies of the Software, and to
-+ * permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice (including the
-+ * next paragraph) shall be included in all copies or substantial portions
-+ * of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
-+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
-+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
-+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
-+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
-+ *
-+ **************************************************************************/
-+
-+#ifndef _PSB_MSVDX_H_
-+#define _PSB_MSVDX_H_
-+
-+#define assert(expr) \
-+ if(unlikely(!(expr))) { \
-+ printk(KERN_ERR "Assertion failed! %s,%s,%s,line=%d\n", \
-+ #expr,__FILE__,__FUNCTION__,__LINE__); \
-+ }
-+
-+#define PSB_ASSERT(x) assert (x)
-+#define IMG_ASSERT(x) assert (x)
-+
-+#include "psb_drv.h"
-+int
-+psb_wait_for_register (struct drm_psb_private *dev_priv,
-+ uint32_t ui32Offset,
-+ uint32_t ui32Value, uint32_t ui32Enable);
-+
-+void psb_msvdx_mtx_interrupt (struct drm_device *dev);
-+int psb_msvdx_init (struct drm_device *dev);
-+int psb_msvdx_uninit (struct drm_device *dev);
-+int psb_msvdx_reset (struct drm_psb_private *dev_priv);
-+uint32_t psb_get_default_pd_addr (struct psb_mmu_driver *driver);
-+int psb_mtx_send (struct drm_psb_private *dev_priv, const void *pvMsg);
-+void psb_msvdx_irq_preinstall (struct drm_psb_private *dev_priv);
-+void psb_msvdx_irq_postinstall (struct drm_psb_private *dev_priv);
-+void psb_msvdx_flush_cmd_queue (struct drm_device *dev);
-+extern void psb_msvdx_lockup (struct drm_psb_private *dev_priv,
-+ int *msvdx_lockup, int *msvdx_idle);
-+#define MSVDX_DEVICE_NODE_FLAGS_MMU_NONOPT_INV 2 /* Non-Optimal Invalidation is not default */
-+#define FW_VA_RENDER_HOST_INT 0x00004000
-+#define MSVDX_DEVICE_NODE_FLAGS_MMU_HW_INVALIDATION 0x00000020
-+
-+#define MSVDX_DEVICE_NODE_FLAG_BRN23154_BLOCK_ON_FE 0x00000200
-+
-+#define MSVDX_DEVICE_NODE_FLAGS_DEFAULT_D0 (MSVDX_DEVICE_NODE_FLAGS_MMU_NONOPT_INV | MSVDX_DEVICE_NODE_FLAGS_MMU_HW_INVALIDATION \
-+ | MSVDX_DEVICE_NODE_FLAG_BRN23154_BLOCK_ON_FE)
-+#define MSVDX_DEVICE_NODE_FLAGS_DEFAULT_D1 (MSVDX_DEVICE_NODE_FLAGS_MMU_HW_INVALIDATION \
-+ | MSVDX_DEVICE_NODE_FLAG_BRN23154_BLOCK_ON_FE)
-+
-+
-+#define POULSBO_D0 0x5
-+#define POULSBO_D1 0x6
-+#define PSB_REVID_OFFSET 0x8
-+
-+#define MSVDX_FW_STATUS_HW_IDLE 0x00000001 /* There is no work currently underway on the hardware*/
-+
-+#define clk_enable_all MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_CORE_MAN_CLK_ENABLE_MASK | \
-+ MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_PROCESS_MAN_CLK_ENABLE_MASK | \
-+ MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_ACCESS_MAN_CLK_ENABLE_MASK | \
-+ MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDMC_MAN_CLK_ENABLE_MASK | \
-+ MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ENTDEC_MAN_CLK_ENABLE_MASK | \
-+ MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ITRANS_MAN_CLK_ENABLE_MASK | \
-+ MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_MTX_MAN_CLK_ENABLE_MASK
-+
-+#define clk_enable_minimal MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_CORE_MAN_CLK_ENABLE_MASK | \
-+ MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_MTX_MAN_CLK_ENABLE_MASK
-+
-+#define clk_enable_auto MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_PROCESS_AUTO_CLK_ENABLE_MASK | \
-+ MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_ACCESS_AUTO_CLK_ENABLE_MASK | \
-+ MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDMC_AUTO_CLK_ENABLE_MASK | \
-+ MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ENTDEC_AUTO_CLK_ENABLE_MASK | \
-+ MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ITRANS_AUTO_CLK_ENABLE_MASK | \
-+ MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_CORE_MAN_CLK_ENABLE_MASK | \
-+ MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_MTX_MAN_CLK_ENABLE_MASK
-+
-+#define msvdx_sw_reset_all MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_MASK | \
-+ MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_FE_SOFT_RESET_MASK | \
-+ MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_BE_SOFT_RESET_MASK | \
-+ MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_VEC_MEMIF_SOFT_RESET_MASK | \
-+ MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_VEC_RENDEC_DEC_SOFT_RESET_MASK
-+
-+
-+#define PCI_PORT5_REG80_FFUSE 0xD0058000
-+#define MTX_CODE_BASE (0x80900000)
-+#define MTX_DATA_BASE (0x82880000)
-+#define PC_START_ADDRESS (0x80900000)
-+
-+#define MTX_CORE_CODE_MEM (0x10 )
-+#define MTX_CORE_DATA_MEM (0x18 )
-+
-+#define MTX_INTERNAL_REG( R_SPECIFIER , U_SPECIFIER ) ( ((R_SPECIFIER)<<4) | (U_SPECIFIER) )
-+#define MTX_PC MTX_INTERNAL_REG( 0 , 5 )
-+
-+#define RENDEC_A_SIZE ( 2 * 1024* 1024 )
-+#define RENDEC_B_SIZE ( RENDEC_A_SIZE / 4 )
-+
-+#define MEMIO_READ_FIELD(vpMem, field) \
-+ ((uint32_t)(((*((field##_TYPE *)(((uint32_t)vpMem) + field##_OFFSET))) & field##_MASK) >> field##_SHIFT))
-+
-+#define MEMIO_WRITE_FIELD(vpMem, field, ui32Value) \
-+ (*((field##_TYPE *)(((uint32_t)vpMem) + field##_OFFSET))) = \
-+ ((*((field##_TYPE *)(((uint32_t)vpMem) + field##_OFFSET))) & (field##_TYPE)~field##_MASK) | \
-+ (field##_TYPE)(( (uint32_t) (ui32Value) << field##_SHIFT) & field##_MASK);
-+
-+#define MEMIO_WRITE_FIELD_LITE(vpMem, field, ui32Value) \
-+ (*((field##_TYPE *)(((uint32_t)vpMem) + field##_OFFSET))) = \
-+ ((*((field##_TYPE *)(((uint32_t)vpMem) + field##_OFFSET))) | \
-+ (field##_TYPE) (( (uint32_t) (ui32Value) << field##_SHIFT)) );
-+
-+#define REGIO_READ_FIELD(ui32RegValue, reg, field) \
-+ ((ui32RegValue & reg##_##field##_MASK) >> reg##_##field##_SHIFT)
-+
-+#define REGIO_WRITE_FIELD(ui32RegValue, reg, field, ui32Value) \
-+ (ui32RegValue) = \
-+ ((ui32RegValue) & ~(reg##_##field##_MASK)) | \
-+ (((ui32Value) << (reg##_##field##_SHIFT)) & (reg##_##field##_MASK));
-+
-+#define REGIO_WRITE_FIELD_LITE(ui32RegValue, reg, field, ui32Value) \
-+ (ui32RegValue) = \
-+ ( (ui32RegValue) | ( (ui32Value) << (reg##_##field##_SHIFT) ) );
-+
-+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_CORE_MAN_CLK_ENABLE_MASK (0x00000001)
-+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_PROCESS_MAN_CLK_ENABLE_MASK (0x00000002)
-+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_ACCESS_MAN_CLK_ENABLE_MASK (0x00000004)
-+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDMC_MAN_CLK_ENABLE_MASK (0x00000008)
-+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ENTDEC_MAN_CLK_ENABLE_MASK (0x00000010)
-+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ITRANS_MAN_CLK_ENABLE_MASK (0x00000020)
-+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_MTX_MAN_CLK_ENABLE_MASK (0x00000040)
-+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_ACCESS_AUTO_CLK_ENABLE_MASK (0x00040000)
-+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDMC_AUTO_CLK_ENABLE_MASK (0x00080000)
-+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ENTDEC_AUTO_CLK_ENABLE_MASK (0x00100000)
-+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ITRANS_AUTO_CLK_ENABLE_MASK (0x00200000)
-+#define MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_MASK (0x00000100)
-+#define MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_FE_SOFT_RESET_MASK (0x00010000)
-+#define MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_BE_SOFT_RESET_MASK (0x00100000)
-+#define MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_VEC_MEMIF_SOFT_RESET_MASK (0x01000000)
-+#define MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_VEC_RENDEC_DEC_SOFT_RESET_MASK (0x10000000)
-+
-+/* MTX registers */
-+#define MSVDX_MTX_ENABLE (0x0000)
-+#define MSVDX_MTX_KICKI (0x0088)
-+#define MSVDX_MTX_REGISTER_READ_WRITE_REQUEST (0x00FC)
-+#define MSVDX_MTX_REGISTER_READ_WRITE_DATA (0x00F8)
-+#define MSVDX_MTX_RAM_ACCESS_DATA_TRANSFER (0x0104)
-+#define MSVDX_MTX_RAM_ACCESS_CONTROL (0x0108)
-+#define MSVDX_MTX_RAM_ACCESS_STATUS (0x010C)
-+#define MSVDX_MTX_SOFT_RESET (0x0200)
-+
-+/* MSVDX registers */
-+#define MSVDX_CONTROL (0x0600)
-+#define MSVDX_INTERRUPT_CLEAR (0x060C)
-+#define MSVDX_INTERRUPT_STATUS (0x0608)
-+#define MSVDX_HOST_INTERRUPT_ENABLE (0x0610)
-+#define MSVDX_MMU_CONTROL0 (0x0680)
-+#define MSVDX_MTX_RAM_BANK (0x06F0)
-+#define MSVDX_MAN_CLK_ENABLE (0x0620)
-+
-+/* RENDEC registers */
-+#define MSVDX_RENDEC_CONTROL0 (0x0868)
-+#define MSVDX_RENDEC_CONTROL1 (0x086C)
-+#define MSVDX_RENDEC_BUFFER_SIZE (0x0870)
-+#define MSVDX_RENDEC_BASE_ADDR0 (0x0874)
-+#define MSVDX_RENDEC_BASE_ADDR1 (0x0878)
-+#define MSVDX_RENDEC_READ_DATA (0x0898)
-+#define MSVDX_RENDEC_CONTEXT0 (0x0950)
-+#define MSVDX_RENDEC_CONTEXT1 (0x0954)
-+#define MSVDX_RENDEC_CONTEXT2 (0x0958)
-+#define MSVDX_RENDEC_CONTEXT3 (0x095C)
-+#define MSVDX_RENDEC_CONTEXT4 (0x0960)
-+#define MSVDX_RENDEC_CONTEXT5 (0x0964)
-+
-+/*
-+ * This defines the MSVDX communication buffer
-+ */
-+#define MSVDX_COMMS_SIGNATURE_VALUE (0xA5A5A5A5) /*!< Signature value */
-+#define NUM_WORDS_HOST_BUF (100) /*!< Host buffer size (in 32-bit words) */
-+#define NUM_WORDS_MTX_BUF (100) /*!< MTX buffer size (in 32-bit words) */
-+
-+#define MSVDX_COMMS_AREA_ADDR (0x02cc0)
-+
-+#define MSVDX_COMMS_FW_STATUS (MSVDX_COMMS_AREA_ADDR - 0x10)
-+#define MSVDX_COMMS_SCRATCH (MSVDX_COMMS_AREA_ADDR - 0x08)
-+#define MSVDX_COMMS_MSG_COUNTER (MSVDX_COMMS_AREA_ADDR - 0x04)
-+#define MSVDX_COMMS_SIGNATURE (MSVDX_COMMS_AREA_ADDR + 0x00)
-+#define MSVDX_COMMS_TO_HOST_BUF_SIZE (MSVDX_COMMS_AREA_ADDR + 0x04)
-+#define MSVDX_COMMS_TO_HOST_RD_INDEX (MSVDX_COMMS_AREA_ADDR + 0x08)
-+#define MSVDX_COMMS_TO_HOST_WRT_INDEX (MSVDX_COMMS_AREA_ADDR + 0x0C)
-+#define MSVDX_COMMS_TO_MTX_BUF_SIZE (MSVDX_COMMS_AREA_ADDR + 0x10)
-+#define MSVDX_COMMS_TO_MTX_RD_INDEX (MSVDX_COMMS_AREA_ADDR + 0x14)
-+#define MSVDX_COMMS_OFFSET_FLAGS (MSVDX_COMMS_AREA_ADDR + 0x18)
-+#define MSVDX_COMMS_TO_MTX_WRT_INDEX (MSVDX_COMMS_AREA_ADDR + 0x1C)
-+#define MSVDX_COMMS_TO_HOST_BUF (MSVDX_COMMS_AREA_ADDR + 0x20)
-+#define MSVDX_COMMS_TO_MTX_BUF (MSVDX_COMMS_TO_HOST_BUF + (NUM_WORDS_HOST_BUF << 2))
-+
-+#define MSVDX_COMMS_AREA_END (MSVDX_COMMS_TO_MTX_BUF + (NUM_WORDS_HOST_BUF << 2))
-+
-+#if (MSVDX_COMMS_AREA_END != 0x03000)
-+#error
-+#endif
-+
-+#define MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK (0x80000000)
-+#define MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_SHIFT (31)
-+
-+#define MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_RNW_MASK (0x00010000)
-+#define MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_RNW_SHIFT (16)
-+
-+#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMID_MASK (0x0FF00000)
-+#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMID_SHIFT (20)
-+
-+#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCM_ADDR_MASK (0x000FFFFC)
-+#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCM_ADDR_SHIFT (2)
-+
-+#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMAI_MASK (0x00000002)
-+#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMAI_SHIFT (1)
-+
-+#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMR_MASK (0x00000001)
-+#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMR_SHIFT (0)
-+
-+#define MSVDX_MTX_SOFT_RESET_MTX_RESET_MASK (0x00000001)
-+#define MSVDX_MTX_SOFT_RESET_MTX_RESET_SHIFT (0)
-+
-+#define MSVDX_MTX_ENABLE_MTX_ENABLE_MASK (0x00000001)
-+#define MSVDX_MTX_ENABLE_MTX_ENABLE_SHIFT (0)
-+
-+#define MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_MASK (0x00000100)
-+#define MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_SHIFT (8)
-+
-+#define MSVDX_INTERRUPT_STATUS_CR_MMU_FAULT_IRQ_MASK (0x00000F00)
-+#define MSVDX_INTERRUPT_STATUS_CR_MMU_FAULT_IRQ_SHIFT (8)
-+
-+#define MSVDX_INTERRUPT_STATUS_CR_MTX_IRQ_MASK (0x00004000)
-+#define MSVDX_INTERRUPT_STATUS_CR_MTX_IRQ_SHIFT (14)
-+
-+#define MSVDX_MMU_CONTROL0_CR_MMU_PAUSE_MASK (0x00000002)
-+#define MSVDX_MMU_CONTROL0_CR_MMU_PAUSE_SHIFT (1)
-+
-+#define MSVDX_MTX_RAM_BANK_CR_MTX_RAM_BANK_SIZE_MASK (0x000F0000)
-+#define MSVDX_MTX_RAM_BANK_CR_MTX_RAM_BANK_SIZE_SHIFT (16)
-+
-+#define MSVDX_RENDEC_BUFFER_SIZE_RENDEC_BUFFER_SIZE0_MASK (0x0000FFFF)
-+#define MSVDX_RENDEC_BUFFER_SIZE_RENDEC_BUFFER_SIZE0_SHIFT (0)
-+
-+#define MSVDX_RENDEC_BUFFER_SIZE_RENDEC_BUFFER_SIZE1_MASK (0xFFFF0000)
-+#define MSVDX_RENDEC_BUFFER_SIZE_RENDEC_BUFFER_SIZE1_SHIFT (16)
-+
-+#define MSVDX_RENDEC_CONTROL1_RENDEC_DECODE_START_SIZE_MASK (0x000000FF)
-+#define MSVDX_RENDEC_CONTROL1_RENDEC_DECODE_START_SIZE_SHIFT (0)
-+
-+#define MSVDX_RENDEC_CONTROL1_RENDEC_BURST_SIZE_W_MASK (0x000C0000)
-+#define MSVDX_RENDEC_CONTROL1_RENDEC_BURST_SIZE_W_SHIFT (18)
-+
-+#define MSVDX_RENDEC_CONTROL1_RENDEC_BURST_SIZE_R_MASK (0x00030000)
-+#define MSVDX_RENDEC_CONTROL1_RENDEC_BURST_SIZE_R_SHIFT (16)
-+
-+#define MSVDX_RENDEC_CONTROL1_RENDEC_EXTERNAL_MEMORY_MASK (0x01000000)
-+#define MSVDX_RENDEC_CONTROL1_RENDEC_EXTERNAL_MEMORY_SHIFT (24)
-+
-+#define MSVDX_RENDEC_CONTROL0_RENDEC_INITIALISE_MASK (0x00000001)
-+#define MSVDX_RENDEC_CONTROL0_RENDEC_INITIALISE_SHIFT (0)
-+
-+#define FWRK_MSGID_START_PSR_HOSTMTX_MSG (0x80) /*!< Start of parser specific Host->MTX messages. */
-+#define FWRK_MSGID_START_PSR_MTXHOST_MSG (0xC0) /*!< Start of parser specific MTX->Host messages. */
-+#define FWRK_MSGID_PADDING ( 0 )
-+
-+#define FWRK_GENMSG_SIZE_TYPE uint8_t
-+#define FWRK_GENMSG_SIZE_MASK (0xFF)
-+#define FWRK_GENMSG_SIZE_SHIFT (0)
-+#define FWRK_GENMSG_SIZE_OFFSET (0x0000)
-+#define FWRK_GENMSG_ID_TYPE uint8_t
-+#define FWRK_GENMSG_ID_MASK (0xFF)
-+#define FWRK_GENMSG_ID_SHIFT (0)
-+#define FWRK_GENMSG_ID_OFFSET (0x0001)
-+#define FWRK_PADMSG_SIZE (2)
-+
-+/*!
-+******************************************************************************
-+ This type defines the framework specified message ids
-+******************************************************************************/
-+enum
-+{
-+ /*! Sent by the DXVA driver on the host to the mtx firmware.
-+ */
-+ VA_MSGID_INIT = FWRK_MSGID_START_PSR_HOSTMTX_MSG,
-+ VA_MSGID_RENDER,
-+ VA_MSGID_DEBLOCK,
-+ VA_MSGID_OOLD,
-+
-+ /* Test Messages */
-+ VA_MSGID_TEST1,
-+ VA_MSGID_TEST2,
-+
-+ /*! Sent by the mtx firmware to itself.
-+ */
-+ VA_MSGID_RENDER_MC_INTERRUPT,
-+
-+ /*! Sent by the DXVA firmware on the MTX to the host.
-+ */
-+ VA_MSGID_CMD_COMPLETED = FWRK_MSGID_START_PSR_MTXHOST_MSG,
-+ VA_MSGID_CMD_COMPLETED_BATCH,
-+ VA_MSGID_DEBLOCK_REQUIRED,
-+ VA_MSGID_TEST_RESPONCE,
-+ VA_MSGID_ACK,
-+
-+ VA_MSGID_CMD_FAILED,
-+ VA_MSGID_CMD_UNSUPPORTED,
-+ VA_MSGID_CMD_HW_PANIC,
-+};
-+
-+/* MSVDX Firmware interface */
-+
-+#define FW_VA_RENDER_SIZE (32)
-+
-+// FW_VA_RENDER MSG_SIZE
-+#define FW_VA_RENDER_MSG_SIZE_ALIGNMENT (1)
-+#define FW_VA_RENDER_MSG_SIZE_TYPE uint8_t
-+#define FW_VA_RENDER_MSG_SIZE_MASK (0xFF)
-+#define FW_VA_RENDER_MSG_SIZE_LSBMASK (0xFF)
-+#define FW_VA_RENDER_MSG_SIZE_OFFSET (0x0000)
-+#define FW_VA_RENDER_MSG_SIZE_SHIFT (0)
-+
-+// FW_VA_RENDER ID
-+#define FW_VA_RENDER_ID_ALIGNMENT (1)
-+#define FW_VA_RENDER_ID_TYPE uint8_t
-+#define FW_VA_RENDER_ID_MASK (0xFF)
-+#define FW_VA_RENDER_ID_LSBMASK (0xFF)
-+#define FW_VA_RENDER_ID_OFFSET (0x0001)
-+#define FW_VA_RENDER_ID_SHIFT (0)
-+
-+// FW_VA_RENDER BUFFER_SIZE
-+#define FW_VA_RENDER_BUFFER_SIZE_ALIGNMENT (2)
-+#define FW_VA_RENDER_BUFFER_SIZE_TYPE uint16_t
-+#define FW_VA_RENDER_BUFFER_SIZE_MASK (0x0FFF)
-+#define FW_VA_RENDER_BUFFER_SIZE_LSBMASK (0x0FFF)
-+#define FW_VA_RENDER_BUFFER_SIZE_OFFSET (0x0002)
-+#define FW_VA_RENDER_BUFFER_SIZE_SHIFT (0)
-+
-+// FW_VA_RENDER MMUPTD
-+#define FW_VA_RENDER_MMUPTD_ALIGNMENT (4)
-+#define FW_VA_RENDER_MMUPTD_TYPE uint32_t
-+#define FW_VA_RENDER_MMUPTD_MASK (0xFFFFFFFF)
-+#define FW_VA_RENDER_MMUPTD_LSBMASK (0xFFFFFFFF)
-+#define FW_VA_RENDER_MMUPTD_OFFSET (0x0004)
-+#define FW_VA_RENDER_MMUPTD_SHIFT (0)
-+
-+// FW_VA_RENDER LLDMA_ADDRESS
-+#define FW_VA_RENDER_LLDMA_ADDRESS_ALIGNMENT (4)
-+#define FW_VA_RENDER_LLDMA_ADDRESS_TYPE uint32_t
-+#define FW_VA_RENDER_LLDMA_ADDRESS_MASK (0xFFFFFFFF)
-+#define FW_VA_RENDER_LLDMA_ADDRESS_LSBMASK (0xFFFFFFFF)
-+#define FW_VA_RENDER_LLDMA_ADDRESS_OFFSET (0x0008)
-+#define FW_VA_RENDER_LLDMA_ADDRESS_SHIFT (0)
-+
-+// FW_VA_RENDER CONTEXT
-+#define FW_VA_RENDER_CONTEXT_ALIGNMENT (4)
-+#define FW_VA_RENDER_CONTEXT_TYPE uint32_t
-+#define FW_VA_RENDER_CONTEXT_MASK (0xFFFFFFFF)
-+#define FW_VA_RENDER_CONTEXT_LSBMASK (0xFFFFFFFF)
-+#define FW_VA_RENDER_CONTEXT_OFFSET (0x000C)
-+#define FW_VA_RENDER_CONTEXT_SHIFT (0)
-+
-+// FW_VA_RENDER FENCE_VALUE
-+#define FW_VA_RENDER_FENCE_VALUE_ALIGNMENT (4)
-+#define FW_VA_RENDER_FENCE_VALUE_TYPE uint32_t
-+#define FW_VA_RENDER_FENCE_VALUE_MASK (0xFFFFFFFF)
-+#define FW_VA_RENDER_FENCE_VALUE_LSBMASK (0xFFFFFFFF)
-+#define FW_VA_RENDER_FENCE_VALUE_OFFSET (0x0010)
-+#define FW_VA_RENDER_FENCE_VALUE_SHIFT (0)
-+
-+// FW_VA_RENDER OPERATING_MODE
-+#define FW_VA_RENDER_OPERATING_MODE_ALIGNMENT (4)
-+#define FW_VA_RENDER_OPERATING_MODE_TYPE uint32_t
-+#define FW_VA_RENDER_OPERATING_MODE_MASK (0xFFFFFFFF)
-+#define FW_VA_RENDER_OPERATING_MODE_LSBMASK (0xFFFFFFFF)
-+#define FW_VA_RENDER_OPERATING_MODE_OFFSET (0x0014)
-+#define FW_VA_RENDER_OPERATING_MODE_SHIFT (0)
-+
-+// FW_VA_RENDER FIRST_MB_IN_SLICE
-+#define FW_VA_RENDER_FIRST_MB_IN_SLICE_ALIGNMENT (2)
-+#define FW_VA_RENDER_FIRST_MB_IN_SLICE_TYPE uint16_t
-+#define FW_VA_RENDER_FIRST_MB_IN_SLICE_MASK (0xFFFF)
-+#define FW_VA_RENDER_FIRST_MB_IN_SLICE_LSBMASK (0xFFFF)
-+#define FW_VA_RENDER_FIRST_MB_IN_SLICE_OFFSET (0x0018)
-+#define FW_VA_RENDER_FIRST_MB_IN_SLICE_SHIFT (0)
-+
-+// FW_VA_RENDER LAST_MB_IN_FRAME
-+#define FW_VA_RENDER_LAST_MB_IN_FRAME_ALIGNMENT (2)
-+#define FW_VA_RENDER_LAST_MB_IN_FRAME_TYPE uint16_t
-+#define FW_VA_RENDER_LAST_MB_IN_FRAME_MASK (0xFFFF)
-+#define FW_VA_RENDER_LAST_MB_IN_FRAME_LSBMASK (0xFFFF)
-+#define FW_VA_RENDER_LAST_MB_IN_FRAME_OFFSET (0x001A)
-+#define FW_VA_RENDER_LAST_MB_IN_FRAME_SHIFT (0)
-+
-+// FW_VA_RENDER FLAGS
-+#define FW_VA_RENDER_FLAGS_ALIGNMENT (4)
-+#define FW_VA_RENDER_FLAGS_TYPE uint32_t
-+#define FW_VA_RENDER_FLAGS_MASK (0xFFFFFFFF)
-+#define FW_VA_RENDER_FLAGS_LSBMASK (0xFFFFFFFF)
-+#define FW_VA_RENDER_FLAGS_OFFSET (0x001C)
-+#define FW_VA_RENDER_FLAGS_SHIFT (0)
-+
-+#define FW_VA_CMD_COMPLETED_SIZE (12)
-+
-+// FW_VA_CMD_COMPLETED MSG_SIZE
-+#define FW_VA_CMD_COMPLETED_MSG_SIZE_ALIGNMENT (1)
-+#define FW_VA_CMD_COMPLETED_MSG_SIZE_TYPE uint8_t
-+#define FW_VA_CMD_COMPLETED_MSG_SIZE_MASK (0xFF)
-+#define FW_VA_CMD_COMPLETED_MSG_SIZE_LSBMASK (0xFF)
-+#define FW_VA_CMD_COMPLETED_MSG_SIZE_OFFSET (0x0000)
-+#define FW_VA_CMD_COMPLETED_MSG_SIZE_SHIFT (0)
-+
-+// FW_VA_CMD_COMPLETED ID
-+#define FW_VA_CMD_COMPLETED_ID_ALIGNMENT (1)
-+#define FW_VA_CMD_COMPLETED_ID_TYPE uint8_t
-+#define FW_VA_CMD_COMPLETED_ID_MASK (0xFF)
-+#define FW_VA_CMD_COMPLETED_ID_LSBMASK (0xFF)
-+#define FW_VA_CMD_COMPLETED_ID_OFFSET (0x0001)
-+#define FW_VA_CMD_COMPLETED_ID_SHIFT (0)
-+
-+// FW_VA_CMD_COMPLETED FENCE_VALUE
-+#define FW_VA_CMD_COMPLETED_FENCE_VALUE_ALIGNMENT (4)
-+#define FW_VA_CMD_COMPLETED_FENCE_VALUE_TYPE uint32_t
-+#define FW_VA_CMD_COMPLETED_FENCE_VALUE_MASK (0xFFFFFFFF)
-+#define FW_VA_CMD_COMPLETED_FENCE_VALUE_LSBMASK (0xFFFFFFFF)
-+#define FW_VA_CMD_COMPLETED_FENCE_VALUE_OFFSET (0x0004)
-+#define FW_VA_CMD_COMPLETED_FENCE_VALUE_SHIFT (0)
-+
-+// FW_VA_CMD_COMPLETED FLAGS
-+#define FW_VA_CMD_COMPLETED_FLAGS_ALIGNMENT (4)
-+#define FW_VA_CMD_COMPLETED_FLAGS_TYPE uint32_t
-+#define FW_VA_CMD_COMPLETED_FLAGS_MASK (0xFFFFFFFF)
-+#define FW_VA_CMD_COMPLETED_FLAGS_LSBMASK (0xFFFFFFFF)
-+#define FW_VA_CMD_COMPLETED_FLAGS_OFFSET (0x0008)
-+#define FW_VA_CMD_COMPLETED_FLAGS_SHIFT (0)
-+
-+#define FW_VA_CMD_FAILED_SIZE (12)
-+
-+// FW_VA_CMD_FAILED MSG_SIZE
-+#define FW_VA_CMD_FAILED_MSG_SIZE_ALIGNMENT (1)
-+#define FW_VA_CMD_FAILED_MSG_SIZE_TYPE uint8_t
-+#define FW_VA_CMD_FAILED_MSG_SIZE_MASK (0xFF)
-+#define FW_VA_CMD_FAILED_MSG_SIZE_LSBMASK (0xFF)
-+#define FW_VA_CMD_FAILED_MSG_SIZE_OFFSET (0x0000)
-+#define FW_VA_CMD_FAILED_MSG_SIZE_SHIFT (0)
-+
-+// FW_VA_CMD_FAILED ID
-+#define FW_VA_CMD_FAILED_ID_ALIGNMENT (1)
-+#define FW_VA_CMD_FAILED_ID_TYPE uint8_t
-+#define FW_VA_CMD_FAILED_ID_MASK (0xFF)
-+#define FW_VA_CMD_FAILED_ID_LSBMASK (0xFF)
-+#define FW_VA_CMD_FAILED_ID_OFFSET (0x0001)
-+#define FW_VA_CMD_FAILED_ID_SHIFT (0)
-+
-+// FW_VA_CMD_FAILED FLAGS
-+#define FW_VA_CMD_FAILED_FLAGS_ALIGNMENT (2)
-+#define FW_VA_CMD_FAILED_FLAGS_TYPE uint16_t
-+#define FW_VA_CMD_FAILED_FLAGS_MASK (0xFFFF)
-+#define FW_VA_CMD_FAILED_FLAGS_LSBMASK (0xFFFF)
-+#define FW_VA_CMD_FAILED_FLAGS_OFFSET (0x0002)
-+#define FW_VA_CMD_FAILED_FLAGS_SHIFT (0)
-+
-+// FW_VA_CMD_FAILED FENCE_VALUE
-+#define FW_VA_CMD_FAILED_FENCE_VALUE_ALIGNMENT (4)
-+#define FW_VA_CMD_FAILED_FENCE_VALUE_TYPE uint32_t
-+#define FW_VA_CMD_FAILED_FENCE_VALUE_MASK (0xFFFFFFFF)
-+#define FW_VA_CMD_FAILED_FENCE_VALUE_LSBMASK (0xFFFFFFFF)
-+#define FW_VA_CMD_FAILED_FENCE_VALUE_OFFSET (0x0004)
-+#define FW_VA_CMD_FAILED_FENCE_VALUE_SHIFT (0)
-+
-+// FW_VA_CMD_FAILED IRQSTATUS
-+#define FW_VA_CMD_FAILED_IRQSTATUS_ALIGNMENT (4)
-+#define FW_VA_CMD_FAILED_IRQSTATUS_TYPE uint32_t
-+#define FW_VA_CMD_FAILED_IRQSTATUS_MASK (0xFFFFFFFF)
-+#define FW_VA_CMD_FAILED_IRQSTATUS_LSBMASK (0xFFFFFFFF)
-+#define FW_VA_CMD_FAILED_IRQSTATUS_OFFSET (0x0008)
-+#define FW_VA_CMD_FAILED_IRQSTATUS_SHIFT (0)
-+
-+#define FW_VA_DEBLOCK_REQUIRED_SIZE (8)
-+
-+// FW_VA_DEBLOCK_REQUIRED MSG_SIZE
-+#define FW_VA_DEBLOCK_REQUIRED_MSG_SIZE_ALIGNMENT (1)
-+#define FW_VA_DEBLOCK_REQUIRED_MSG_SIZE_TYPE uint8_t
-+#define FW_VA_DEBLOCK_REQUIRED_MSG_SIZE_MASK (0xFF)
-+#define FW_VA_DEBLOCK_REQUIRED_MSG_SIZE_LSBMASK (0xFF)
-+#define FW_VA_DEBLOCK_REQUIRED_MSG_SIZE_OFFSET (0x0000)
-+#define FW_VA_DEBLOCK_REQUIRED_MSG_SIZE_SHIFT (0)
-+
-+// FW_VA_DEBLOCK_REQUIRED ID
-+#define FW_VA_DEBLOCK_REQUIRED_ID_ALIGNMENT (1)
-+#define FW_VA_DEBLOCK_REQUIRED_ID_TYPE uint8_t
-+#define FW_VA_DEBLOCK_REQUIRED_ID_MASK (0xFF)
-+#define FW_VA_DEBLOCK_REQUIRED_ID_LSBMASK (0xFF)
-+#define FW_VA_DEBLOCK_REQUIRED_ID_OFFSET (0x0001)
-+#define FW_VA_DEBLOCK_REQUIRED_ID_SHIFT (0)
-+
-+// FW_VA_DEBLOCK_REQUIRED CONTEXT
-+#define FW_VA_DEBLOCK_REQUIRED_CONTEXT_ALIGNMENT (4)
-+#define FW_VA_DEBLOCK_REQUIRED_CONTEXT_TYPE uint32_t
-+#define FW_VA_DEBLOCK_REQUIRED_CONTEXT_MASK (0xFFFFFFFF)
-+#define FW_VA_DEBLOCK_REQUIRED_CONTEXT_LSBMASK (0xFFFFFFFF)
-+#define FW_VA_DEBLOCK_REQUIRED_CONTEXT_OFFSET (0x0004)
-+#define FW_VA_DEBLOCK_REQUIRED_CONTEXT_SHIFT (0)
-+
-+#define FW_VA_HW_PANIC_SIZE (12)
-+
-+// FW_VA_HW_PANIC FLAGS
-+#define FW_VA_HW_PANIC_FLAGS_ALIGNMENT (2)
-+#define FW_VA_HW_PANIC_FLAGS_TYPE uint16_t
-+#define FW_VA_HW_PANIC_FLAGS_MASK (0xFFFF)
-+#define FW_VA_HW_PANIC_FLAGS_LSBMASK (0xFFFF)
-+#define FW_VA_HW_PANIC_FLAGS_OFFSET (0x0002)
-+#define FW_VA_HW_PANIC_FLAGS_SHIFT (0)
-+
-+// FW_VA_HW_PANIC MSG_SIZE
-+#define FW_VA_HW_PANIC_MSG_SIZE_ALIGNMENT (1)
-+#define FW_VA_HW_PANIC_MSG_SIZE_TYPE uint8_t
-+#define FW_VA_HW_PANIC_MSG_SIZE_MASK (0xFF)
-+#define FW_VA_HW_PANIC_MSG_SIZE_LSBMASK (0xFF)
-+#define FW_VA_HW_PANIC_MSG_SIZE_OFFSET (0x0000)
-+#define FW_VA_HW_PANIC_MSG_SIZE_SHIFT (0)
-+
-+// FW_VA_HW_PANIC ID
-+#define FW_VA_HW_PANIC_ID_ALIGNMENT (1)
-+#define FW_VA_HW_PANIC_ID_TYPE uint8_t
-+#define FW_VA_HW_PANIC_ID_MASK (0xFF)
-+#define FW_VA_HW_PANIC_ID_LSBMASK (0xFF)
-+#define FW_VA_HW_PANIC_ID_OFFSET (0x0001)
-+#define FW_VA_HW_PANIC_ID_SHIFT (0)
-+
-+// FW_VA_HW_PANIC FENCE_VALUE
-+#define FW_VA_HW_PANIC_FENCE_VALUE_ALIGNMENT (4)
-+#define FW_VA_HW_PANIC_FENCE_VALUE_TYPE uint32_t
-+#define FW_VA_HW_PANIC_FENCE_VALUE_MASK (0xFFFFFFFF)
-+#define FW_VA_HW_PANIC_FENCE_VALUE_LSBMASK (0xFFFFFFFF)
-+#define FW_VA_HW_PANIC_FENCE_VALUE_OFFSET (0x0004)
-+#define FW_VA_HW_PANIC_FENCE_VALUE_SHIFT (0)
-+
-+// FW_VA_HW_PANIC IRQSTATUS
-+#define FW_VA_HW_PANIC_IRQSTATUS_ALIGNMENT (4)
-+#define FW_VA_HW_PANIC_IRQSTATUS_TYPE uint32_t
-+#define FW_VA_HW_PANIC_IRQSTATUS_MASK (0xFFFFFFFF)
-+#define FW_VA_HW_PANIC_IRQSTATUS_LSBMASK (0xFFFFFFFF)
-+#define FW_VA_HW_PANIC_IRQSTATUS_OFFSET (0x0008)
-+#define FW_VA_HW_PANIC_IRQSTATUS_SHIFT (0)
-+
-+#endif
-Index: linux-2.6.27/drivers/gpu/drm/psb/psb_msvdxinit.c
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.27/drivers/gpu/drm/psb/psb_msvdxinit.c 2009-02-05 13:29:33.000000000 +0000
-@@ -0,0 +1,625 @@
-+/**
-+ * file psb_msvdxinit.c
-+ * MSVDX initialization and mtx-firmware upload
-+ *
-+ */
-+
-+/**************************************************************************
-+ *
-+ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
-+ * Copyright (c) Imagination Technologies Limited, UK
-+ * All Rights Reserved.
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a
-+ * copy of this software and associated documentation files (the
-+ * "Software"), to deal in the Software without restriction, including
-+ * without limitation the rights to use, copy, modify, merge, publish,
-+ * distribute, sub license, and/or sell copies of the Software, and to
-+ * permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice (including the
-+ * next paragraph) shall be included in all copies or substantial portions
-+ * of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
-+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
-+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
-+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
-+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
-+ *
-+ **************************************************************************/
-+
-+#include "drmP.h"
-+#include "drm.h"
-+#include "psb_drv.h"
-+#include "psb_msvdx.h"
-+#include <linux/firmware.h>
-+
-+/*MSVDX FW header*/
-+struct msvdx_fw
-+{
-+ uint32_t ver;
-+ uint32_t text_size;
-+ uint32_t data_size;
-+ uint32_t data_location;
-+};
-+
-+int
-+psb_wait_for_register (struct drm_psb_private *dev_priv,
-+ uint32_t ui32Offset,
-+ uint32_t ui32Value, uint32_t ui32Enable)
-+{
-+ uint32_t ui32Temp;
-+ uint32_t ui32PollCount = 1000;
-+ while (ui32PollCount)
-+ {
-+ ui32Temp = PSB_RMSVDX32 (ui32Offset);
-+ if (ui32Value == (ui32Temp & ui32Enable)) /* All the bits are reset */
-+ return 0; /* So exit */
-+
-+ /* Wait a bit */
-+ DRM_UDELAY (100);
-+ ui32PollCount--;
-+ }
-+ PSB_DEBUG_GENERAL
-+ ("MSVDX: Timeout while waiting for register %08x: expecting %08x (mask %08x), got %08x\n",
-+ ui32Offset, ui32Value, ui32Enable, ui32Temp);
-+ return 1;
-+}
-+
-+int
-+psb_poll_mtx_irq (struct drm_psb_private *dev_priv)
-+{
-+ int ret = 0;
-+ uint32_t MtxInt = 0;
-+ REGIO_WRITE_FIELD_LITE (MtxInt, MSVDX_INTERRUPT_STATUS, CR_MTX_IRQ, 1);
-+
-+ ret = psb_wait_for_register (dev_priv, MSVDX_INTERRUPT_STATUS, MtxInt, /* Required value */
-+ MtxInt /* Enabled bits */ );
-+ if (ret)
-+ {
-+ PSB_DEBUG_GENERAL
-+ ("MSVDX: Error Mtx did not return int within a resonable time\n");
-+
-+ return ret;
-+ }
-+
-+ PSB_DEBUG_GENERAL ("MSVDX: Got MTX Int\n");
-+
-+ /* Got it so clear the bit */
-+ PSB_WMSVDX32 (MtxInt, MSVDX_INTERRUPT_CLEAR);
-+
-+ return ret;
-+}
-+
-+void
-+psb_write_mtx_core_reg (struct drm_psb_private *dev_priv,
-+ const uint32_t ui32CoreRegister,
-+ const uint32_t ui32Val)
-+{
-+ uint32_t ui32Reg = 0;
-+
-+ /* Put data in MTX_RW_DATA */
-+ PSB_WMSVDX32 (ui32Val, MSVDX_MTX_REGISTER_READ_WRITE_DATA);
-+
-+ /* DREADY is set to 0 and request a write */
-+ ui32Reg = ui32CoreRegister;
-+ REGIO_WRITE_FIELD_LITE (ui32Reg, MSVDX_MTX_REGISTER_READ_WRITE_REQUEST,
-+ MTX_RNW, 0);
-+ REGIO_WRITE_FIELD_LITE (ui32Reg, MSVDX_MTX_REGISTER_READ_WRITE_REQUEST,
-+ MTX_DREADY, 0);
-+ PSB_WMSVDX32 (ui32Reg, MSVDX_MTX_REGISTER_READ_WRITE_REQUEST);
-+
-+ psb_wait_for_register (dev_priv, MSVDX_MTX_REGISTER_READ_WRITE_REQUEST, MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK, /* Required Value */
-+ MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK);
-+}
-+
-+void
-+psb_upload_fw (struct drm_psb_private *dev_priv, const uint32_t ui32DataMem,
-+ uint32_t ui32RamBankSize, uint32_t ui32Address,
-+ const unsigned int uiWords, const uint32_t * const pui32Data)
-+{
-+ uint32_t ui32Loop, ui32Ctrl, ui32RamId, ui32Addr, ui32CurrBank =
-+ (uint32_t) ~ 0;
-+ uint32_t ui32AccessControl;
-+
-+ /* Save the access control register... */
-+ ui32AccessControl = PSB_RMSVDX32 (MSVDX_MTX_RAM_ACCESS_CONTROL);
-+
-+ /* Wait for MCMSTAT to become be idle 1 */
-+ psb_wait_for_register (dev_priv, MSVDX_MTX_RAM_ACCESS_STATUS, 1, /* Required Value */
-+ 0xffffffff /* Enables */ );
-+
-+ for (ui32Loop = 0; ui32Loop < uiWords; ui32Loop++)
-+ {
-+ ui32RamId = ui32DataMem + (ui32Address / ui32RamBankSize);
-+
-+ if (ui32RamId != ui32CurrBank)
-+ {
-+ ui32Addr = ui32Address >> 2;
-+
-+ ui32Ctrl = 0;
-+
-+ REGIO_WRITE_FIELD_LITE (ui32Ctrl,
-+ MSVDX_MTX_RAM_ACCESS_CONTROL,
-+ MTX_MCMID, ui32RamId);
-+ REGIO_WRITE_FIELD_LITE (ui32Ctrl,
-+ MSVDX_MTX_RAM_ACCESS_CONTROL,
-+ MTX_MCM_ADDR, ui32Addr);
-+ REGIO_WRITE_FIELD_LITE (ui32Ctrl,
-+ MSVDX_MTX_RAM_ACCESS_CONTROL, MTX_MCMAI, 1);
-+
-+ PSB_WMSVDX32 (ui32Ctrl, MSVDX_MTX_RAM_ACCESS_CONTROL);
-+
-+ ui32CurrBank = ui32RamId;
-+ }
-+ ui32Address += 4;
-+
-+ PSB_WMSVDX32 (pui32Data[ui32Loop], MSVDX_MTX_RAM_ACCESS_DATA_TRANSFER);
-+
-+ /* Wait for MCMSTAT to become be idle 1 */
-+ psb_wait_for_register (dev_priv, MSVDX_MTX_RAM_ACCESS_STATUS, 1, /* Required Value */
-+ 0xffffffff /* Enables */ );
-+ }
-+ PSB_DEBUG_GENERAL ("MSVDX: Upload done\n");
-+
-+ /* Restore the access control register... */
-+ PSB_WMSVDX32 (ui32AccessControl, MSVDX_MTX_RAM_ACCESS_CONTROL);
-+}
-+
-+static int
-+psb_verify_fw (struct drm_psb_private *dev_priv,
-+ const uint32_t ui32RamBankSize,
-+ const uint32_t ui32DataMem, uint32_t ui32Address,
-+ const uint32_t uiWords, const uint32_t * const pui32Data)
-+{
-+ uint32_t ui32Loop, ui32Ctrl, ui32RamId, ui32Addr, ui32CurrBank =
-+ (uint32_t) ~ 0;
-+ uint32_t ui32AccessControl;
-+ int ret = 0;
-+
-+ /* Save the access control register... */
-+ ui32AccessControl = PSB_RMSVDX32 (MSVDX_MTX_RAM_ACCESS_CONTROL);
-+
-+ /* Wait for MCMSTAT to become be idle 1 */
-+ psb_wait_for_register (dev_priv, MSVDX_MTX_RAM_ACCESS_STATUS, 1, /* Required Value */
-+ 0xffffffff /* Enables */ );
-+
-+ for (ui32Loop = 0; ui32Loop < uiWords; ui32Loop++)
-+ {
-+ uint32_t ui32ReadBackVal;
-+ ui32RamId = ui32DataMem + (ui32Address / ui32RamBankSize);
-+
-+ if (ui32RamId != ui32CurrBank)
-+ {
-+ ui32Addr = ui32Address >> 2;
-+ ui32Ctrl = 0;
-+ REGIO_WRITE_FIELD_LITE (ui32Ctrl,
-+ MSVDX_MTX_RAM_ACCESS_CONTROL,
-+ MTX_MCMID, ui32RamId);
-+ REGIO_WRITE_FIELD_LITE (ui32Ctrl,
-+ MSVDX_MTX_RAM_ACCESS_CONTROL,
-+ MTX_MCM_ADDR, ui32Addr);
-+ REGIO_WRITE_FIELD_LITE (ui32Ctrl,
-+ MSVDX_MTX_RAM_ACCESS_CONTROL, MTX_MCMAI, 1);
-+ REGIO_WRITE_FIELD_LITE (ui32Ctrl,
-+ MSVDX_MTX_RAM_ACCESS_CONTROL, MTX_MCMR, 1);
-+
-+ PSB_WMSVDX32 (ui32Ctrl, MSVDX_MTX_RAM_ACCESS_CONTROL);
-+
-+ ui32CurrBank = ui32RamId;
-+ }
-+ ui32Address += 4;
-+
-+ /* Wait for MCMSTAT to become be idle 1 */
-+ psb_wait_for_register (dev_priv, MSVDX_MTX_RAM_ACCESS_STATUS, 1, /* Required Value */
-+ 0xffffffff /* Enables */ );
-+
-+ ui32ReadBackVal = PSB_RMSVDX32 (MSVDX_MTX_RAM_ACCESS_DATA_TRANSFER);
-+ if (pui32Data[ui32Loop] != ui32ReadBackVal)
-+ {
-+ DRM_ERROR
-+ ("psb: Firmware validation fails at index=%08x\n", ui32Loop);
-+ ret = 1;
-+ break;
-+ }
-+ }
-+
-+ /* Restore the access control register... */
-+ PSB_WMSVDX32 (ui32AccessControl, MSVDX_MTX_RAM_ACCESS_CONTROL);
-+
-+ return ret;
-+}
-+
-+static uint32_t *
-+msvdx_get_fw (struct drm_device *dev,
-+ const struct firmware **raw, uint8_t * name)
-+{
-+ int rc;
-+ int *ptr = NULL;
-+
-+ rc = request_firmware (raw, name, &dev->pdev->dev);
-+ if (rc < 0)
-+ {
-+ DRM_ERROR ("MSVDX: %s request_firmware failed: Reason %d\n", name, rc);
-+ return NULL;
-+ }
-+
-+ if ((*raw)->size < sizeof (struct msvdx_fw))
-+ {
-+ PSB_DEBUG_GENERAL ("MSVDX: %s is is not correct size(%zd)\n",
-+ name, (*raw)->size);
-+ return NULL;
-+ }
-+
-+ ptr = (int *) ((*raw))->data;
-+
-+ if (!ptr)
-+ {
-+ PSB_DEBUG_GENERAL ("MSVDX: Failed to load %s\n", name);
-+ return NULL;
-+ }
-+ /*another sanity check... */
-+ if ((*raw)->size !=
-+ (sizeof (struct msvdx_fw) +
-+ sizeof (uint32_t) * ((struct msvdx_fw *) ptr)->text_size +
-+ sizeof (uint32_t) * ((struct msvdx_fw *) ptr)->data_size))
-+ {
-+ PSB_DEBUG_GENERAL ("MSVDX: %s is is not correct size(%zd)\n",
-+ name, (*raw)->size);
-+ return NULL;
-+ }
-+ return ptr;
-+}
-+
-+static int
-+psb_setup_fw (struct drm_device *dev)
-+{
-+ struct drm_psb_private *dev_priv = dev->dev_private;
-+ int ret = 0;
-+
-+ uint32_t ram_bank_size;
-+ struct msvdx_fw *fw;
-+ uint32_t *fw_ptr = NULL;
-+ uint32_t *text_ptr = NULL;
-+ uint32_t *data_ptr = NULL;
-+ const struct firmware *raw = NULL;
-+ /* todo : Assert the clock is on - if not turn it on to upload code */
-+
-+ PSB_DEBUG_GENERAL ("MSVDX: psb_setup_fw\n");
-+
-+ /* Reset MTX */
-+ PSB_WMSVDX32 (MSVDX_MTX_SOFT_RESET_MTX_RESET_MASK, MSVDX_MTX_SOFT_RESET);
-+
-+ /* Initialses Communication controll area to 0 */
-+ if(dev_priv->psb_rev_id >= POULSBO_D1)
-+ {
-+ PSB_DEBUG_GENERAL("MSVDX: Detected Poulsbo D1 or later revision.\n");
-+ PSB_WMSVDX32 (MSVDX_DEVICE_NODE_FLAGS_DEFAULT_D1, MSVDX_COMMS_OFFSET_FLAGS);
-+ }
-+ else
-+ {
-+ PSB_DEBUG_GENERAL("MSVDX: Detected Poulsbo D0 or earlier revision.\n");
-+ PSB_WMSVDX32 (MSVDX_DEVICE_NODE_FLAGS_DEFAULT_D0, MSVDX_COMMS_OFFSET_FLAGS);
-+ }
-+
-+ PSB_WMSVDX32 (0, MSVDX_COMMS_MSG_COUNTER);
-+ PSB_WMSVDX32 (0, MSVDX_COMMS_SIGNATURE);
-+ PSB_WMSVDX32 (0, MSVDX_COMMS_TO_HOST_RD_INDEX);
-+ PSB_WMSVDX32 (0, MSVDX_COMMS_TO_HOST_WRT_INDEX);
-+ PSB_WMSVDX32 (0, MSVDX_COMMS_TO_MTX_RD_INDEX);
-+ PSB_WMSVDX32 (0, MSVDX_COMMS_TO_MTX_WRT_INDEX);
-+ PSB_WMSVDX32 (0, MSVDX_COMMS_FW_STATUS);
-+
-+ /* read register bank size */
-+ {
-+ uint32_t ui32BankSize, ui32Reg;
-+ ui32Reg = PSB_RMSVDX32 (MSVDX_MTX_RAM_BANK);
-+ ui32BankSize =
-+ REGIO_READ_FIELD (ui32Reg, MSVDX_MTX_RAM_BANK, CR_MTX_RAM_BANK_SIZE);
-+ ram_bank_size = (uint32_t) (1 << (ui32BankSize + 2));
-+ }
-+
-+ PSB_DEBUG_GENERAL ("MSVDX: RAM bank size = %d bytes\n", ram_bank_size);
-+
-+ fw_ptr = msvdx_get_fw (dev, &raw, "msvdx_fw.bin");
-+
-+ if (!fw_ptr)
-+ {
-+ DRM_ERROR ("psb: No valid msvdx_fw.bin firmware found.\n");
-+ ret = 1;
-+ goto out;
-+ }
-+
-+ fw = (struct msvdx_fw *) fw_ptr;
-+ if (fw->ver != 0x02)
-+ {
-+ DRM_ERROR
-+ ("psb: msvdx_fw.bin firmware version mismatch, got version=%02x expected version=%02x\n",
-+ fw->ver, 0x02);
-+ ret = 1;
-+ goto out;
-+ }
-+
-+ text_ptr = (uint32_t *) ((uint8_t *) fw_ptr + sizeof (struct msvdx_fw));
-+ data_ptr = text_ptr + fw->text_size;
-+
-+ PSB_DEBUG_GENERAL ("MSVDX: Retrieved pointers for firmware\n");
-+ PSB_DEBUG_GENERAL ("MSVDX: text_size: %d\n", fw->text_size);
-+ PSB_DEBUG_GENERAL ("MSVDX: data_size: %d\n", fw->data_size);
-+ PSB_DEBUG_GENERAL ("MSVDX: data_location: 0x%x\n", fw->data_location);
-+ PSB_DEBUG_GENERAL ("MSVDX: First 4 bytes of text: 0x%x\n", *text_ptr);
-+ PSB_DEBUG_GENERAL ("MSVDX: First 4 bytes of data: 0x%x\n", *data_ptr);
-+
-+ PSB_DEBUG_GENERAL ("MSVDX: Uploading firmware\n");
-+ psb_upload_fw (dev_priv, MTX_CORE_CODE_MEM, ram_bank_size,
-+ PC_START_ADDRESS - MTX_CODE_BASE, fw->text_size, text_ptr);
-+ psb_upload_fw (dev_priv, MTX_CORE_DATA_MEM, ram_bank_size,
-+ fw->data_location - MTX_DATA_BASE, fw->data_size, data_ptr);
-+
-+ /*todo : Verify code upload possibly only in debug */
-+ if (psb_verify_fw
-+ (dev_priv, ram_bank_size, MTX_CORE_CODE_MEM,
-+ PC_START_ADDRESS - MTX_CODE_BASE, fw->text_size, text_ptr))
-+ {
-+ /* Firmware code upload failed */
-+ ret = 1;
-+ goto out;
-+ }
-+ if (psb_verify_fw
-+ (dev_priv, ram_bank_size, MTX_CORE_DATA_MEM,
-+ fw->data_location - MTX_DATA_BASE, fw->data_size, data_ptr))
-+ {
-+ /* Firmware data upload failed */
-+ ret = 1;
-+ goto out;
-+ }
-+
-+ /* -- Set starting PC address */
-+ psb_write_mtx_core_reg (dev_priv, MTX_PC, PC_START_ADDRESS);
-+
-+ /* -- Turn on the thread */
-+ PSB_WMSVDX32 (MSVDX_MTX_ENABLE_MTX_ENABLE_MASK, MSVDX_MTX_ENABLE);
-+
-+ /* Wait for the signature value to be written back */
-+ ret = psb_wait_for_register (dev_priv, MSVDX_COMMS_SIGNATURE, MSVDX_COMMS_SIGNATURE_VALUE, /* Required value */
-+ 0xffffffff /* Enabled bits */ );
-+ if (ret)
-+ {
-+ DRM_ERROR ("psb: MSVDX firmware fails to initialize.\n");
-+ goto out;
-+ }
-+
-+ PSB_DEBUG_GENERAL ("MSVDX: MTX Initial indications OK\n");
-+ PSB_DEBUG_GENERAL ("MSVDX: MSVDX_COMMS_AREA_ADDR = %08x\n",
-+ MSVDX_COMMS_AREA_ADDR);
-+out:
-+ if (raw)
-+ {
-+ PSB_DEBUG_GENERAL ("MSVDX releasing firmware resouces....\n");
-+ release_firmware (raw);
-+ }
-+ return ret;
-+}
-+
-+static void
-+psb_free_ccb (struct drm_buffer_object **ccb)
-+{
-+ drm_bo_usage_deref_unlocked (ccb);
-+ *ccb = NULL;
-+}
-+
-+/*******************************************************************************
-+
-+ @Function psb_msvdx_reset
-+
-+ @Description
-+
-+ Reset chip and disable interrupts.
-+
-+ @Input psDeviceNode - device info. structure
-+
-+ @Return 0 - Success
-+ 1 - Failure
-+
-+******************************************************************************/
-+int
-+psb_msvdx_reset (struct drm_psb_private *dev_priv)
-+{
-+ int ret = 0;
-+
-+ /* Issue software reset */
-+ PSB_WMSVDX32 (msvdx_sw_reset_all, MSVDX_CONTROL);
-+
-+ ret = psb_wait_for_register (dev_priv, MSVDX_CONTROL, 0, /* Required value */
-+ MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_MASK
-+ /* Enabled bits */ );
-+
-+ if (!ret)
-+ {
-+ /* Clear interrupt enabled flag */
-+ PSB_WMSVDX32 (0, MSVDX_HOST_INTERRUPT_ENABLE);
-+
-+ /* Clear any pending interrupt flags */
-+ PSB_WMSVDX32 (0xFFFFFFFF, MSVDX_INTERRUPT_CLEAR);
-+ }
-+
-+ mutex_destroy (&dev_priv->msvdx_mutex);
-+
-+ return ret;
-+}
-+
-+static int
-+psb_allocate_ccb (struct drm_device *dev,
-+ struct drm_buffer_object **ccb,
-+ uint32_t * base_addr, int size)
-+{
-+ int ret;
-+ struct drm_bo_kmap_obj tmp_kmap;
-+ int is_iomem;
-+
-+ ret = drm_buffer_object_create (dev, size,
-+ drm_bo_type_kernel,
-+ DRM_BO_FLAG_READ |
-+ DRM_PSB_FLAG_MEM_KERNEL |
-+ DRM_BO_FLAG_NO_EVICT,
-+ DRM_BO_HINT_DONT_FENCE, 0, 0, ccb);
-+ if (ret)
-+ {
-+ PSB_DEBUG_GENERAL ("Failed to allocate CCB.\n");
-+ *ccb = NULL;
-+ return 1;
-+ }
-+
-+ ret = drm_bo_kmap (*ccb, 0, (*ccb)->num_pages, &tmp_kmap);
-+ if (ret)
-+ {
-+ PSB_DEBUG_GENERAL ("drm_bo_kmap failed ret: %d\n", ret);
-+ drm_bo_usage_deref_unlocked (ccb);
-+ *ccb = NULL;
-+ return 1;
-+ }
-+
-+ memset (drm_bmo_virtual (&tmp_kmap, &is_iomem), 0, size);
-+ drm_bo_kunmap (&tmp_kmap);
-+
-+ *base_addr = (*ccb)->offset;
-+ return 0;
-+}
-+
-+int
-+psb_msvdx_init (struct drm_device *dev)
-+{
-+ struct drm_psb_private *dev_priv = dev->dev_private;
-+ uint32_t ui32Cmd;
-+ int ret;
-+
-+ PSB_DEBUG_GENERAL ("MSVDX: psb_msvdx_init\n");
-+
-+ /*Initialize command msvdx queueing */
-+ INIT_LIST_HEAD (&dev_priv->msvdx_queue);
-+ mutex_init (&dev_priv->msvdx_mutex);
-+ spin_lock_init (&dev_priv->msvdx_lock);
-+ dev_priv->msvdx_busy = 0;
-+
-+ /*figure out the stepping*/
-+ pci_read_config_byte(dev->pdev, PSB_REVID_OFFSET, &dev_priv->psb_rev_id );
-+
-+ /* Enable Clocks */
-+ PSB_DEBUG_GENERAL ("Enabling clocks\n");
-+ PSB_WMSVDX32 (clk_enable_all, MSVDX_MAN_CLK_ENABLE);
-+
-+ /* Enable MMU by removing all bypass bits */
-+ PSB_WMSVDX32 (0, MSVDX_MMU_CONTROL0);
-+
-+ PSB_DEBUG_GENERAL ("MSVDX: Setting up RENDEC\n");
-+ /* Allocate device virtual memory as required by rendec.... */
-+ if (!dev_priv->ccb0)
-+ {
-+ ret =
-+ psb_allocate_ccb (dev, &dev_priv->ccb0,
-+ &dev_priv->base_addr0, RENDEC_A_SIZE);
-+ if (ret)
-+ goto err_exit;
-+ }
-+
-+ if (!dev_priv->ccb1)
-+ {
-+ ret =
-+ psb_allocate_ccb (dev, &dev_priv->ccb1,
-+ &dev_priv->base_addr1, RENDEC_B_SIZE);
-+ if (ret)
-+ goto err_exit;
-+ }
-+
-+ PSB_DEBUG_GENERAL ("MSVDX: RENDEC A: %08x RENDEC B: %08x\n",
-+ dev_priv->base_addr0, dev_priv->base_addr1);
-+
-+ PSB_WMSVDX32 (dev_priv->base_addr0, MSVDX_RENDEC_BASE_ADDR0);
-+ PSB_WMSVDX32 (dev_priv->base_addr1, MSVDX_RENDEC_BASE_ADDR1);
-+
-+ ui32Cmd = 0;
-+ REGIO_WRITE_FIELD (ui32Cmd, MSVDX_RENDEC_BUFFER_SIZE,
-+ RENDEC_BUFFER_SIZE0, RENDEC_A_SIZE / 4096);
-+ REGIO_WRITE_FIELD (ui32Cmd, MSVDX_RENDEC_BUFFER_SIZE,
-+ RENDEC_BUFFER_SIZE1, RENDEC_B_SIZE / 4096);
-+ PSB_WMSVDX32 (ui32Cmd, MSVDX_RENDEC_BUFFER_SIZE);
-+
-+ ui32Cmd = 0;
-+ REGIO_WRITE_FIELD (ui32Cmd, MSVDX_RENDEC_CONTROL1,
-+ RENDEC_DECODE_START_SIZE, 0);
-+ REGIO_WRITE_FIELD (ui32Cmd, MSVDX_RENDEC_CONTROL1, RENDEC_BURST_SIZE_W, 1);
-+ REGIO_WRITE_FIELD (ui32Cmd, MSVDX_RENDEC_CONTROL1, RENDEC_BURST_SIZE_R, 1);
-+ REGIO_WRITE_FIELD (ui32Cmd, MSVDX_RENDEC_CONTROL1,
-+ RENDEC_EXTERNAL_MEMORY, 1);
-+ PSB_WMSVDX32 (ui32Cmd, MSVDX_RENDEC_CONTROL1);
-+
-+ ui32Cmd = 0x00101010;
-+ PSB_WMSVDX32 (ui32Cmd, MSVDX_RENDEC_CONTEXT0);
-+ PSB_WMSVDX32 (ui32Cmd, MSVDX_RENDEC_CONTEXT1);
-+ PSB_WMSVDX32 (ui32Cmd, MSVDX_RENDEC_CONTEXT2);
-+ PSB_WMSVDX32 (ui32Cmd, MSVDX_RENDEC_CONTEXT3);
-+ PSB_WMSVDX32 (ui32Cmd, MSVDX_RENDEC_CONTEXT4);
-+ PSB_WMSVDX32 (ui32Cmd, MSVDX_RENDEC_CONTEXT5);
-+
-+ ui32Cmd = 0;
-+ REGIO_WRITE_FIELD (ui32Cmd, MSVDX_RENDEC_CONTROL0, RENDEC_INITIALISE, 1);
-+ PSB_WMSVDX32 (ui32Cmd, MSVDX_RENDEC_CONTROL0);
-+
-+ ret = psb_setup_fw (dev);
-+ if (ret)
-+ goto err_exit;
-+
-+ PSB_WMSVDX32 (clk_enable_minimal, MSVDX_MAN_CLK_ENABLE);
-+
-+ return 0;
-+
-+err_exit:
-+ if (dev_priv->ccb0)
-+ psb_free_ccb (&dev_priv->ccb0);
-+ if (dev_priv->ccb1)
-+ psb_free_ccb (&dev_priv->ccb1);
-+
-+ return 1;
-+}
-+
-+int
-+psb_msvdx_uninit (struct drm_device *dev)
-+{
-+ struct drm_psb_private *dev_priv = dev->dev_private;
-+
-+ /*Reset MSVDX chip */
-+ psb_msvdx_reset (dev_priv);
-+
-+// PSB_WMSVDX32 (clk_enable_minimal, MSVDX_MAN_CLK_ENABLE);
-+ printk("set the msvdx clock to 0 in the %s\n", __FUNCTION__);
-+ PSB_WMSVDX32 (0, MSVDX_MAN_CLK_ENABLE);
-+
-+ /*Clean up resources...*/
-+ if (dev_priv->ccb0)
-+ psb_free_ccb (&dev_priv->ccb0);
-+ if (dev_priv->ccb1)
-+ psb_free_ccb (&dev_priv->ccb1);
-+
-+ return 0;
-+}
-+
-+int psb_hw_info_ioctl(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv)
-+{
-+ struct drm_psb_private *dev_priv = dev->dev_private;
-+ struct drm_psb_hw_info *hw_info = data;
-+ struct pci_dev * pci_root = pci_get_bus_and_slot(0, 0);
-+
-+ hw_info->rev_id = dev_priv->psb_rev_id;
-+
-+ /*read the fuse info to determine the caps*/
-+ pci_write_config_dword(pci_root, 0xD0, PCI_PORT5_REG80_FFUSE);
-+ pci_read_config_dword(pci_root, 0xD4, &hw_info->caps);
-+
-+ PSB_DEBUG_GENERAL("MSVDX: PSB caps: 0x%x\n", hw_info->caps);
-+ return 0;
-+}
-Index: linux-2.6.27/drivers/gpu/drm/psb/psb_reg.h
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.27/drivers/gpu/drm/psb/psb_reg.h 2009-02-05 13:29:33.000000000 +0000
-@@ -0,0 +1,562 @@
-+/**************************************************************************
-+ *
-+ * Copyright (c) (2005-2007) Imagination Technologies Limited.
-+ * Copyright (c) 2007, Intel Corporation.
-+ * All Rights Reserved.
-+ *
-+ * This program is free software; you can redistribute it and/or modify it
-+ * under the terms and conditions of the GNU General Public License,
-+ * version 2, as published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope it will be useful, but WITHOUT
-+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-+ * more details.
-+ *
-+ * You should have received a copy of the GNU General Public License along with
-+ * this program; if not, write to the Free Software Foundation, Inc.,
-+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-+ *
-+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
-+ * develop this driver.
-+ *
-+ **************************************************************************/
-+/*
-+ */
-+#ifndef _PSB_REG_H_
-+#define _PSB_REG_H_
-+
-+#define PSB_CR_CLKGATECTL 0x0000
-+#define _PSB_C_CLKGATECTL_AUTO_MAN_REG (1 << 24)
-+#define _PSB_C_CLKGATECTL_USE_CLKG_SHIFT (20)
-+#define _PSB_C_CLKGATECTL_USE_CLKG_MASK (0x3 << 20)
-+#define _PSB_C_CLKGATECTL_DPM_CLKG_SHIFT (16)
-+#define _PSB_C_CLKGATECTL_DPM_CLKG_MASK (0x3 << 16)
-+#define _PSB_C_CLKGATECTL_TA_CLKG_SHIFT (12)
-+#define _PSB_C_CLKGATECTL_TA_CLKG_MASK (0x3 << 12)
-+#define _PSB_C_CLKGATECTL_TSP_CLKG_SHIFT (8)
-+#define _PSB_C_CLKGATECTL_TSP_CLKG_MASK (0x3 << 8)
-+#define _PSB_C_CLKGATECTL_ISP_CLKG_SHIFT (4)
-+#define _PSB_C_CLKGATECTL_ISP_CLKG_MASK (0x3 << 4)
-+#define _PSB_C_CLKGATECTL_2D_CLKG_SHIFT (0)
-+#define _PSB_C_CLKGATECTL_2D_CLKG_MASK (0x3 << 0)
-+#define _PSB_C_CLKGATECTL_CLKG_ENABLED (0)
-+#define _PSB_C_CLKGATECTL_CLKG_DISABLED (1)
-+#define _PSB_C_CLKGATECTL_CLKG_AUTO (2)
-+
-+#define PSB_CR_CORE_ID 0x0010
-+#define _PSB_CC_ID_ID_SHIFT (16)
-+#define _PSB_CC_ID_ID_MASK (0xFFFF << 16)
-+#define _PSB_CC_ID_CONFIG_SHIFT (0)
-+#define _PSB_CC_ID_CONFIG_MASK (0xFFFF << 0)
-+
-+#define PSB_CR_CORE_REVISION 0x0014
-+#define _PSB_CC_REVISION_DESIGNER_SHIFT (24)
-+#define _PSB_CC_REVISION_DESIGNER_MASK (0xFF << 24)
-+#define _PSB_CC_REVISION_MAJOR_SHIFT (16)
-+#define _PSB_CC_REVISION_MAJOR_MASK (0xFF << 16)
-+#define _PSB_CC_REVISION_MINOR_SHIFT (8)
-+#define _PSB_CC_REVISION_MINOR_MASK (0xFF << 8)
-+#define _PSB_CC_REVISION_MAINTENANCE_SHIFT (0)
-+#define _PSB_CC_REVISION_MAINTENANCE_MASK (0xFF << 0)
-+
-+#define PSB_CR_DESIGNER_REV_FIELD1 0x0018
-+
-+#define PSB_CR_SOFT_RESET 0x0080
-+#define _PSB_CS_RESET_TSP_RESET (1 << 6)
-+#define _PSB_CS_RESET_ISP_RESET (1 << 5)
-+#define _PSB_CS_RESET_USE_RESET (1 << 4)
-+#define _PSB_CS_RESET_TA_RESET (1 << 3)
-+#define _PSB_CS_RESET_DPM_RESET (1 << 2)
-+#define _PSB_CS_RESET_TWOD_RESET (1 << 1)
-+#define _PSB_CS_RESET_BIF_RESET (1 << 0)
-+
-+#define PSB_CR_DESIGNER_REV_FIELD2 0x001C
-+
-+#define PSB_CR_EVENT_HOST_ENABLE2 0x0110
-+
-+#define PSB_CR_EVENT_STATUS2 0x0118
-+
-+#define PSB_CR_EVENT_HOST_CLEAR2 0x0114
-+#define _PSB_CE2_BIF_REQUESTER_FAULT (1 << 4)
-+
-+#define PSB_CR_EVENT_STATUS 0x012C
-+
-+#define PSB_CR_EVENT_HOST_ENABLE 0x0130
-+
-+#define PSB_CR_EVENT_HOST_CLEAR 0x0134
-+#define _PSB_CE_MASTER_INTERRUPT (1 << 31)
-+#define _PSB_CE_TA_DPM_FAULT (1 << 28)
-+#define _PSB_CE_TWOD_COMPLETE (1 << 27)
-+#define _PSB_CE_DPM_OUT_OF_MEMORY_ZLS (1 << 25)
-+#define _PSB_CE_DPM_TA_MEM_FREE (1 << 24)
-+#define _PSB_CE_PIXELBE_END_RENDER (1 << 18)
-+#define _PSB_CE_SW_EVENT (1 << 14)
-+#define _PSB_CE_TA_FINISHED (1 << 13)
-+#define _PSB_CE_TA_TERMINATE (1 << 12)
-+#define _PSB_CE_DPM_REACHED_MEM_THRESH (1 << 3)
-+#define _PSB_CE_DPM_OUT_OF_MEMORY_GBL (1 << 2)
-+#define _PSB_CE_DPM_OUT_OF_MEMORY_MT (1 << 1)
-+#define _PSB_CE_DPM_3D_MEM_FREE (1 << 0)
-+
-+
-+#define PSB_USE_OFFSET_MASK 0x0007FFFF
-+#define PSB_USE_OFFSET_SIZE (PSB_USE_OFFSET_MASK + 1)
-+#define PSB_CR_USE_CODE_BASE0 0x0A0C
-+#define PSB_CR_USE_CODE_BASE1 0x0A10
-+#define PSB_CR_USE_CODE_BASE2 0x0A14
-+#define PSB_CR_USE_CODE_BASE3 0x0A18
-+#define PSB_CR_USE_CODE_BASE4 0x0A1C
-+#define PSB_CR_USE_CODE_BASE5 0x0A20
-+#define PSB_CR_USE_CODE_BASE6 0x0A24
-+#define PSB_CR_USE_CODE_BASE7 0x0A28
-+#define PSB_CR_USE_CODE_BASE8 0x0A2C
-+#define PSB_CR_USE_CODE_BASE9 0x0A30
-+#define PSB_CR_USE_CODE_BASE10 0x0A34
-+#define PSB_CR_USE_CODE_BASE11 0x0A38
-+#define PSB_CR_USE_CODE_BASE12 0x0A3C
-+#define PSB_CR_USE_CODE_BASE13 0x0A40
-+#define PSB_CR_USE_CODE_BASE14 0x0A44
-+#define PSB_CR_USE_CODE_BASE15 0x0A48
-+#define PSB_CR_USE_CODE_BASE(_i) (0x0A0C + ((_i) << 2))
-+#define _PSB_CUC_BASE_DM_SHIFT (25)
-+#define _PSB_CUC_BASE_DM_MASK (0x3 << 25)
-+#define _PSB_CUC_BASE_ADDR_SHIFT (0) // 1024-bit aligned address?
-+#define _PSB_CUC_BASE_ADDR_ALIGNSHIFT (7)
-+#define _PSB_CUC_BASE_ADDR_MASK (0x1FFFFFF << 0)
-+#define _PSB_CUC_DM_VERTEX (0)
-+#define _PSB_CUC_DM_PIXEL (1)
-+#define _PSB_CUC_DM_RESERVED (2)
-+#define _PSB_CUC_DM_EDM (3)
-+
-+#define PSB_CR_PDS_EXEC_BASE 0x0AB8
-+#define _PSB_CR_PDS_EXEC_BASE_ADDR_SHIFT (20) // 1MB aligned address
-+#define _PSB_CR_PDS_EXEC_BASE_ADDR_ALIGNSHIFT (20)
-+
-+#define PSB_CR_EVENT_KICKER 0x0AC4
-+#define _PSB_CE_KICKER_ADDRESS_SHIFT (4) // 128-bit aligned address
-+
-+#define PSB_CR_EVENT_KICK 0x0AC8
-+#define _PSB_CE_KICK_NOW (1 << 0)
-+
-+
-+#define PSB_CR_BIF_DIR_LIST_BASE1 0x0C38
-+
-+#define PSB_CR_BIF_CTRL 0x0C00
-+#define _PSB_CB_CTRL_CLEAR_FAULT (1 << 4)
-+#define _PSB_CB_CTRL_INVALDC (1 << 3)
-+#define _PSB_CB_CTRL_FLUSH (1 << 2)
-+
-+#define PSB_CR_BIF_INT_STAT 0x0C04
-+
-+#define PSB_CR_BIF_FAULT 0x0C08
-+#define _PSB_CBI_STAT_PF_N_RW (1 << 14)
-+#define _PSB_CBI_STAT_FAULT_SHIFT (0)
-+#define _PSB_CBI_STAT_FAULT_MASK (0x3FFF << 0)
-+#define _PSB_CBI_STAT_FAULT_CACHE (1 << 1)
-+#define _PSB_CBI_STAT_FAULT_TA (1 << 2)
-+#define _PSB_CBI_STAT_FAULT_VDM (1 << 3)
-+#define _PSB_CBI_STAT_FAULT_2D (1 << 4)
-+#define _PSB_CBI_STAT_FAULT_PBE (1 << 5)
-+#define _PSB_CBI_STAT_FAULT_TSP (1 << 6)
-+#define _PSB_CBI_STAT_FAULT_ISP (1 << 7)
-+#define _PSB_CBI_STAT_FAULT_USSEPDS (1 << 8)
-+#define _PSB_CBI_STAT_FAULT_HOST (1 << 9)
-+
-+#define PSB_CR_BIF_BANK0 0x0C78
-+
-+#define PSB_CR_BIF_BANK1 0x0C7C
-+
-+#define PSB_CR_BIF_DIR_LIST_BASE0 0x0C84
-+
-+#define PSB_CR_BIF_TWOD_REQ_BASE 0x0C88
-+#define PSB_CR_BIF_3D_REQ_BASE 0x0CAC
-+
-+#define PSB_CR_2D_SOCIF 0x0E18
-+#define _PSB_C2_SOCIF_FREESPACE_SHIFT (0)
-+#define _PSB_C2_SOCIF_FREESPACE_MASK (0xFF << 0)
-+#define _PSB_C2_SOCIF_EMPTY (0x80 << 0)
-+
-+#define PSB_CR_2D_BLIT_STATUS 0x0E04
-+#define _PSB_C2B_STATUS_BUSY (1 << 24)
-+#define _PSB_C2B_STATUS_COMPLETE_SHIFT (0)
-+#define _PSB_C2B_STATUS_COMPLETE_MASK (0xFFFFFF << 0)
-+
-+/*
-+ * 2D defs.
-+ */
-+
-+/*
-+ * 2D Slave Port Data : Block Header's Object Type
-+ */
-+
-+#define PSB_2D_CLIP_BH (0x00000000)
-+#define PSB_2D_PAT_BH (0x10000000)
-+#define PSB_2D_CTRL_BH (0x20000000)
-+#define PSB_2D_SRC_OFF_BH (0x30000000)
-+#define PSB_2D_MASK_OFF_BH (0x40000000)
-+#define PSB_2D_RESERVED1_BH (0x50000000)
-+#define PSB_2D_RESERVED2_BH (0x60000000)
-+#define PSB_2D_FENCE_BH (0x70000000)
-+#define PSB_2D_BLIT_BH (0x80000000)
-+#define PSB_2D_SRC_SURF_BH (0x90000000)
-+#define PSB_2D_DST_SURF_BH (0xA0000000)
-+#define PSB_2D_PAT_SURF_BH (0xB0000000)
-+#define PSB_2D_SRC_PAL_BH (0xC0000000)
-+#define PSB_2D_PAT_PAL_BH (0xD0000000)
-+#define PSB_2D_MASK_SURF_BH (0xE0000000)
-+#define PSB_2D_FLUSH_BH (0xF0000000)
-+
-+/*
-+ * Clip Definition block (PSB_2D_CLIP_BH)
-+ */
-+#define PSB_2D_CLIPCOUNT_MAX (1)
-+#define PSB_2D_CLIPCOUNT_MASK (0x00000000)
-+#define PSB_2D_CLIPCOUNT_CLRMASK (0xFFFFFFFF)
-+#define PSB_2D_CLIPCOUNT_SHIFT (0)
-+// clip rectangle min & max
-+#define PSB_2D_CLIP_XMAX_MASK (0x00FFF000)
-+#define PSB_2D_CLIP_XMAX_CLRMASK (0xFF000FFF)
-+#define PSB_2D_CLIP_XMAX_SHIFT (12)
-+#define PSB_2D_CLIP_XMIN_MASK (0x00000FFF)
-+#define PSB_2D_CLIP_XMIN_CLRMASK (0x00FFF000)
-+#define PSB_2D_CLIP_XMIN_SHIFT (0)
-+// clip rectangle offset
-+#define PSB_2D_CLIP_YMAX_MASK (0x00FFF000)
-+#define PSB_2D_CLIP_YMAX_CLRMASK (0xFF000FFF)
-+#define PSB_2D_CLIP_YMAX_SHIFT (12)
-+#define PSB_2D_CLIP_YMIN_MASK (0x00000FFF)
-+#define PSB_2D_CLIP_YMIN_CLRMASK (0x00FFF000)
-+#define PSB_2D_CLIP_YMIN_SHIFT (0)
-+
-+/*
-+ * Pattern Control (PSB_2D_PAT_BH)
-+ */
-+#define PSB_2D_PAT_HEIGHT_MASK (0x0000001F)
-+#define PSB_2D_PAT_HEIGHT_SHIFT (0)
-+#define PSB_2D_PAT_WIDTH_MASK (0x000003E0)
-+#define PSB_2D_PAT_WIDTH_SHIFT (5)
-+#define PSB_2D_PAT_YSTART_MASK (0x00007C00)
-+#define PSB_2D_PAT_YSTART_SHIFT (10)
-+#define PSB_2D_PAT_XSTART_MASK (0x000F8000)
-+#define PSB_2D_PAT_XSTART_SHIFT (15)
-+
-+/*
-+ * 2D Control block (PSB_2D_CTRL_BH)
-+ */
-+// Present Flags
-+#define PSB_2D_SRCCK_CTRL (0x00000001)
-+#define PSB_2D_DSTCK_CTRL (0x00000002)
-+#define PSB_2D_ALPHA_CTRL (0x00000004)
-+// Colour Key Colour (SRC/DST)
-+#define PSB_2D_CK_COL_MASK (0xFFFFFFFF)
-+#define PSB_2D_CK_COL_CLRMASK (0x00000000)
-+#define PSB_2D_CK_COL_SHIFT (0)
-+// Colour Key Mask (SRC/DST)
-+#define PSB_2D_CK_MASK_MASK (0xFFFFFFFF)
-+#define PSB_2D_CK_MASK_CLRMASK (0x00000000)
-+#define PSB_2D_CK_MASK_SHIFT (0)
-+// Alpha Control (Alpha/RGB)
-+#define PSB_2D_GBLALPHA_MASK (0x000FF000)
-+#define PSB_2D_GBLALPHA_CLRMASK (0xFFF00FFF)
-+#define PSB_2D_GBLALPHA_SHIFT (12)
-+#define PSB_2D_SRCALPHA_OP_MASK (0x00700000)
-+#define PSB_2D_SRCALPHA_OP_CLRMASK (0xFF8FFFFF)
-+#define PSB_2D_SRCALPHA_OP_SHIFT (20)
-+#define PSB_2D_SRCALPHA_OP_ONE (0x00000000)
-+#define PSB_2D_SRCALPHA_OP_SRC (0x00100000)
-+#define PSB_2D_SRCALPHA_OP_DST (0x00200000)
-+#define PSB_2D_SRCALPHA_OP_SG (0x00300000)
-+#define PSB_2D_SRCALPHA_OP_DG (0x00400000)
-+#define PSB_2D_SRCALPHA_OP_GBL (0x00500000)
-+#define PSB_2D_SRCALPHA_OP_ZERO (0x00600000)
-+#define PSB_2D_SRCALPHA_INVERT (0x00800000)
-+#define PSB_2D_SRCALPHA_INVERT_CLR (0xFF7FFFFF)
-+#define PSB_2D_DSTALPHA_OP_MASK (0x07000000)
-+#define PSB_2D_DSTALPHA_OP_CLRMASK (0xF8FFFFFF)
-+#define PSB_2D_DSTALPHA_OP_SHIFT (24)
-+#define PSB_2D_DSTALPHA_OP_ONE (0x00000000)
-+#define PSB_2D_DSTALPHA_OP_SRC (0x01000000)
-+#define PSB_2D_DSTALPHA_OP_DST (0x02000000)
-+#define PSB_2D_DSTALPHA_OP_SG (0x03000000)
-+#define PSB_2D_DSTALPHA_OP_DG (0x04000000)
-+#define PSB_2D_DSTALPHA_OP_GBL (0x05000000)
-+#define PSB_2D_DSTALPHA_OP_ZERO (0x06000000)
-+#define PSB_2D_DSTALPHA_INVERT (0x08000000)
-+#define PSB_2D_DSTALPHA_INVERT_CLR (0xF7FFFFFF)
-+
-+#define PSB_2D_PRE_MULTIPLICATION_ENABLE (0x10000000)
-+#define PSB_2D_PRE_MULTIPLICATION_CLRMASK (0xEFFFFFFF)
-+#define PSB_2D_ZERO_SOURCE_ALPHA_ENABLE (0x20000000)
-+#define PSB_2D_ZERO_SOURCE_ALPHA_CLRMASK (0xDFFFFFFF)
-+
-+/*
-+ *Source Offset (PSB_2D_SRC_OFF_BH)
-+ */
-+#define PSB_2D_SRCOFF_XSTART_MASK ((0x00000FFF) << 12)
-+#define PSB_2D_SRCOFF_XSTART_SHIFT (12)
-+#define PSB_2D_SRCOFF_YSTART_MASK (0x00000FFF)
-+#define PSB_2D_SRCOFF_YSTART_SHIFT (0)
-+
-+/*
-+ * Mask Offset (PSB_2D_MASK_OFF_BH)
-+ */
-+#define PSB_2D_MASKOFF_XSTART_MASK ((0x00000FFF) << 12)
-+#define PSB_2D_MASKOFF_XSTART_SHIFT (12)
-+#define PSB_2D_MASKOFF_YSTART_MASK (0x00000FFF)
-+#define PSB_2D_MASKOFF_YSTART_SHIFT (0)
-+
-+/*
-+ * 2D Fence (see PSB_2D_FENCE_BH): bits 0:27 are ignored
-+ */
-+
-+/*
-+ *Blit Rectangle (PSB_2D_BLIT_BH)
-+ */
-+
-+#define PSB_2D_ROT_MASK (3<<25)
-+#define PSB_2D_ROT_CLRMASK (~PSB_2D_ROT_MASK)
-+#define PSB_2D_ROT_NONE (0<<25)
-+#define PSB_2D_ROT_90DEGS (1<<25)
-+#define PSB_2D_ROT_180DEGS (2<<25)
-+#define PSB_2D_ROT_270DEGS (3<<25)
-+
-+#define PSB_2D_COPYORDER_MASK (3<<23)
-+#define PSB_2D_COPYORDER_CLRMASK (~PSB_2D_COPYORDER_MASK)
-+#define PSB_2D_COPYORDER_TL2BR (0<<23)
-+#define PSB_2D_COPYORDER_BR2TL (1<<23)
-+#define PSB_2D_COPYORDER_TR2BL (2<<23)
-+#define PSB_2D_COPYORDER_BL2TR (3<<23)
-+
-+#define PSB_2D_DSTCK_CLRMASK (0xFF9FFFFF)
-+#define PSB_2D_DSTCK_DISABLE (0x00000000)
-+#define PSB_2D_DSTCK_PASS (0x00200000)
-+#define PSB_2D_DSTCK_REJECT (0x00400000)
-+
-+#define PSB_2D_SRCCK_CLRMASK (0xFFE7FFFF)
-+#define PSB_2D_SRCCK_DISABLE (0x00000000)
-+#define PSB_2D_SRCCK_PASS (0x00080000)
-+#define PSB_2D_SRCCK_REJECT (0x00100000)
-+
-+#define PSB_2D_CLIP_ENABLE (0x00040000)
-+
-+#define PSB_2D_ALPHA_ENABLE (0x00020000)
-+
-+#define PSB_2D_PAT_CLRMASK (0xFFFEFFFF)
-+#define PSB_2D_PAT_MASK (0x00010000)
-+#define PSB_2D_USE_PAT (0x00010000)
-+#define PSB_2D_USE_FILL (0x00000000)
-+/*
-+ * Tungsten Graphics note on rop codes: If rop A and rop B are
-+ * identical, the mask surface will not be read and need not be
-+ * set up.
-+ */
-+
-+#define PSB_2D_ROP3B_MASK (0x0000FF00)
-+#define PSB_2D_ROP3B_CLRMASK (0xFFFF00FF)
-+#define PSB_2D_ROP3B_SHIFT (8)
-+// rop code A
-+#define PSB_2D_ROP3A_MASK (0x000000FF)
-+#define PSB_2D_ROP3A_CLRMASK (0xFFFFFF00)
-+#define PSB_2D_ROP3A_SHIFT (0)
-+
-+#define PSB_2D_ROP4_MASK (0x0000FFFF)
-+/*
-+ * DWORD0: (Only pass if Pattern control == Use Fill Colour)
-+ * Fill Colour RGBA8888
-+ */
-+#define PSB_2D_FILLCOLOUR_MASK (0xFFFFFFFF)
-+#define PSB_2D_FILLCOLOUR_SHIFT (0)
-+/*
-+ * DWORD1: (Always Present)
-+ * X Start (Dest)
-+ * Y Start (Dest)
-+ */
-+#define PSB_2D_DST_XSTART_MASK (0x00FFF000)
-+#define PSB_2D_DST_XSTART_CLRMASK (0xFF000FFF)
-+#define PSB_2D_DST_XSTART_SHIFT (12)
-+#define PSB_2D_DST_YSTART_MASK (0x00000FFF)
-+#define PSB_2D_DST_YSTART_CLRMASK (0xFFFFF000)
-+#define PSB_2D_DST_YSTART_SHIFT (0)
-+/*
-+ * DWORD2: (Always Present)
-+ * X Size (Dest)
-+ * Y Size (Dest)
-+ */
-+#define PSB_2D_DST_XSIZE_MASK (0x00FFF000)
-+#define PSB_2D_DST_XSIZE_CLRMASK (0xFF000FFF)
-+#define PSB_2D_DST_XSIZE_SHIFT (12)
-+#define PSB_2D_DST_YSIZE_MASK (0x00000FFF)
-+#define PSB_2D_DST_YSIZE_CLRMASK (0xFFFFF000)
-+#define PSB_2D_DST_YSIZE_SHIFT (0)
-+
-+/*
-+ * Source Surface (PSB_2D_SRC_SURF_BH)
-+ */
-+/*
-+ * WORD 0
-+ */
-+
-+#define PSB_2D_SRC_FORMAT_MASK (0x00078000)
-+#define PSB_2D_SRC_1_PAL (0x00000000)
-+#define PSB_2D_SRC_2_PAL (0x00008000)
-+#define PSB_2D_SRC_4_PAL (0x00010000)
-+#define PSB_2D_SRC_8_PAL (0x00018000)
-+#define PSB_2D_SRC_8_ALPHA (0x00020000)
-+#define PSB_2D_SRC_4_ALPHA (0x00028000)
-+#define PSB_2D_SRC_332RGB (0x00030000)
-+#define PSB_2D_SRC_4444ARGB (0x00038000)
-+#define PSB_2D_SRC_555RGB (0x00040000)
-+#define PSB_2D_SRC_1555ARGB (0x00048000)
-+#define PSB_2D_SRC_565RGB (0x00050000)
-+#define PSB_2D_SRC_0888ARGB (0x00058000)
-+#define PSB_2D_SRC_8888ARGB (0x00060000)
-+#define PSB_2D_SRC_8888UYVY (0x00068000)
-+#define PSB_2D_SRC_RESERVED (0x00070000)
-+#define PSB_2D_SRC_1555ARGB_LOOKUP (0x00078000)
-+
-+
-+#define PSB_2D_SRC_STRIDE_MASK (0x00007FFF)
-+#define PSB_2D_SRC_STRIDE_CLRMASK (0xFFFF8000)
-+#define PSB_2D_SRC_STRIDE_SHIFT (0)
-+/*
-+ * WORD 1 - Base Address
-+ */
-+#define PSB_2D_SRC_ADDR_MASK (0x0FFFFFFC)
-+#define PSB_2D_SRC_ADDR_CLRMASK (0x00000003)
-+#define PSB_2D_SRC_ADDR_SHIFT (2)
-+#define PSB_2D_SRC_ADDR_ALIGNSHIFT (2)
-+
-+/*
-+ * Pattern Surface (PSB_2D_PAT_SURF_BH)
-+ */
-+/*
-+ * WORD 0
-+ */
-+
-+#define PSB_2D_PAT_FORMAT_MASK (0x00078000)
-+#define PSB_2D_PAT_1_PAL (0x00000000)
-+#define PSB_2D_PAT_2_PAL (0x00008000)
-+#define PSB_2D_PAT_4_PAL (0x00010000)
-+#define PSB_2D_PAT_8_PAL (0x00018000)
-+#define PSB_2D_PAT_8_ALPHA (0x00020000)
-+#define PSB_2D_PAT_4_ALPHA (0x00028000)
-+#define PSB_2D_PAT_332RGB (0x00030000)
-+#define PSB_2D_PAT_4444ARGB (0x00038000)
-+#define PSB_2D_PAT_555RGB (0x00040000)
-+#define PSB_2D_PAT_1555ARGB (0x00048000)
-+#define PSB_2D_PAT_565RGB (0x00050000)
-+#define PSB_2D_PAT_0888ARGB (0x00058000)
-+#define PSB_2D_PAT_8888ARGB (0x00060000)
-+
-+#define PSB_2D_PAT_STRIDE_MASK (0x00007FFF)
-+#define PSB_2D_PAT_STRIDE_CLRMASK (0xFFFF8000)
-+#define PSB_2D_PAT_STRIDE_SHIFT (0)
-+/*
-+ * WORD 1 - Base Address
-+ */
-+#define PSB_2D_PAT_ADDR_MASK (0x0FFFFFFC)
-+#define PSB_2D_PAT_ADDR_CLRMASK (0x00000003)
-+#define PSB_2D_PAT_ADDR_SHIFT (2)
-+#define PSB_2D_PAT_ADDR_ALIGNSHIFT (2)
-+
-+/*
-+ * Destination Surface (PSB_2D_DST_SURF_BH)
-+ */
-+/*
-+ * WORD 0
-+ */
-+
-+#define PSB_2D_DST_FORMAT_MASK (0x00078000)
-+#define PSB_2D_DST_332RGB (0x00030000)
-+#define PSB_2D_DST_4444ARGB (0x00038000)
-+#define PSB_2D_DST_555RGB (0x00040000)
-+#define PSB_2D_DST_1555ARGB (0x00048000)
-+#define PSB_2D_DST_565RGB (0x00050000)
-+#define PSB_2D_DST_0888ARGB (0x00058000)
-+#define PSB_2D_DST_8888ARGB (0x00060000)
-+#define PSB_2D_DST_8888AYUV (0x00070000)
-+
-+#define PSB_2D_DST_STRIDE_MASK (0x00007FFF)
-+#define PSB_2D_DST_STRIDE_CLRMASK (0xFFFF8000)
-+#define PSB_2D_DST_STRIDE_SHIFT (0)
-+/*
-+ * WORD 1 - Base Address
-+ */
-+#define PSB_2D_DST_ADDR_MASK (0x0FFFFFFC)
-+#define PSB_2D_DST_ADDR_CLRMASK (0x00000003)
-+#define PSB_2D_DST_ADDR_SHIFT (2)
-+#define PSB_2D_DST_ADDR_ALIGNSHIFT (2)
-+
-+/*
-+ * Mask Surface (PSB_2D_MASK_SURF_BH)
-+ */
-+/*
-+ * WORD 0
-+ */
-+#define PSB_2D_MASK_STRIDE_MASK (0x00007FFF)
-+#define PSB_2D_MASK_STRIDE_CLRMASK (0xFFFF8000)
-+#define PSB_2D_MASK_STRIDE_SHIFT (0)
-+/*
-+ * WORD 1 - Base Address
-+ */
-+#define PSB_2D_MASK_ADDR_MASK (0x0FFFFFFC)
-+#define PSB_2D_MASK_ADDR_CLRMASK (0x00000003)
-+#define PSB_2D_MASK_ADDR_SHIFT (2)
-+#define PSB_2D_MASK_ADDR_ALIGNSHIFT (2)
-+
-+/*
-+ * Source Palette (PSB_2D_SRC_PAL_BH)
-+ */
-+
-+#define PSB_2D_SRCPAL_ADDR_SHIFT (0)
-+#define PSB_2D_SRCPAL_ADDR_CLRMASK (0xF0000007)
-+#define PSB_2D_SRCPAL_ADDR_MASK (0x0FFFFFF8)
-+#define PSB_2D_SRCPAL_BYTEALIGN (1024)
-+
-+/*
-+ * Pattern Palette (PSB_2D_PAT_PAL_BH)
-+ */
-+
-+#define PSB_2D_PATPAL_ADDR_SHIFT (0)
-+#define PSB_2D_PATPAL_ADDR_CLRMASK (0xF0000007)
-+#define PSB_2D_PATPAL_ADDR_MASK (0x0FFFFFF8)
-+#define PSB_2D_PATPAL_BYTEALIGN (1024)
-+
-+/*
-+ * Rop3 Codes (2 LS bytes)
-+ */
-+
-+#define PSB_2D_ROP3_SRCCOPY (0xCCCC)
-+#define PSB_2D_ROP3_PATCOPY (0xF0F0)
-+#define PSB_2D_ROP3_WHITENESS (0xFFFF)
-+#define PSB_2D_ROP3_BLACKNESS (0x0000)
-+#define PSB_2D_ROP3_SRC (0xCC)
-+#define PSB_2D_ROP3_PAT (0xF0)
-+#define PSB_2D_ROP3_DST (0xAA)
-+
-+
-+/*
-+ * Sizes.
-+ */
-+
-+#define PSB_SCENE_HW_COOKIE_SIZE 16
-+#define PSB_TA_MEM_HW_COOKIE_SIZE 16
-+
-+/*
-+ * Scene stuff.
-+ */
-+
-+#define PSB_NUM_HW_SCENES 2
-+
-+/*
-+ * Scheduler completion actions.
-+ */
-+
-+#define PSB_RASTER_BLOCK 0
-+#define PSB_RASTER 1
-+#define PSB_RETURN 2
-+#define PSB_TA 3
-+
-+
-+#endif
-Index: linux-2.6.27/drivers/gpu/drm/psb/psb_regman.c
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.27/drivers/gpu/drm/psb/psb_regman.c 2009-02-05 13:29:33.000000000 +0000
-@@ -0,0 +1,175 @@
-+/**************************************************************************
-+ * Copyright (c) 2007, Intel Corporation.
-+ * All Rights Reserved.
-+ *
-+ * This program is free software; you can redistribute it and/or modify it
-+ * under the terms and conditions of the GNU General Public License,
-+ * version 2, as published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope it will be useful, but WITHOUT
-+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-+ * more details.
-+ *
-+ * You should have received a copy of the GNU General Public License along with
-+ * this program; if not, write to the Free Software Foundation, Inc.,
-+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-+ *
-+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
-+ * develop this driver.
-+ *
-+ **************************************************************************/
-+/*
-+ */
-+
-+#include "drmP.h"
-+#include "psb_drv.h"
-+
-+struct psb_use_reg {
-+ struct drm_reg reg;
-+ struct drm_psb_private *dev_priv;
-+ uint32_t reg_seq;
-+ uint32_t base;
-+ uint32_t data_master;
-+};
-+
-+struct psb_use_reg_data {
-+ uint32_t base;
-+ uint32_t size;
-+ uint32_t data_master;
-+};
-+
-+static int psb_use_reg_reusable(const struct drm_reg *reg, const void *data)
-+{
-+ struct psb_use_reg *use_reg =
-+ container_of(reg, struct psb_use_reg, reg);
-+ struct psb_use_reg_data *use_data = (struct psb_use_reg_data *)data;
-+
-+ return ((use_reg->base <= use_data->base) &&
-+ (use_reg->base + PSB_USE_OFFSET_SIZE >
-+ use_data->base + use_data->size) &&
-+ use_reg->data_master == use_data->data_master);
-+}
-+
-+static int psb_use_reg_set(struct psb_use_reg *use_reg,
-+ const struct psb_use_reg_data *use_data)
-+{
-+ struct drm_psb_private *dev_priv = use_reg->dev_priv;
-+
-+ if (use_reg->reg.fence == NULL)
-+ use_reg->data_master = use_data->data_master;
-+
-+ if (use_reg->reg.fence == NULL &&
-+ !psb_use_reg_reusable(&use_reg->reg, (const void *)use_data)) {
-+
-+ use_reg->base = use_data->base & ~PSB_USE_OFFSET_MASK;
-+ use_reg->data_master = use_data->data_master;
-+
-+ if (!psb_use_reg_reusable(&use_reg->reg,
-+ (const void *)use_data)) {
-+ DRM_ERROR("USE base mechanism didn't support "
-+ "buffer size or alignment\n");
-+ return -EINVAL;
-+ }
-+
-+ PSB_WSGX32(PSB_ALPL(use_reg->base, _PSB_CUC_BASE_ADDR) |
-+ (use_reg->data_master << _PSB_CUC_BASE_DM_SHIFT),
-+ PSB_CR_USE_CODE_BASE(use_reg->reg_seq));
-+ }
-+ return 0;
-+
-+}
-+
-+int psb_grab_use_base(struct drm_psb_private *dev_priv,
-+ unsigned long base,
-+ unsigned long size,
-+ unsigned int data_master,
-+ uint32_t fence_class,
-+ uint32_t fence_type,
-+ int no_wait,
-+ int interruptible, int *r_reg, uint32_t * r_offset)
-+{
-+ struct psb_use_reg_data use_data = {
-+ .base = base,
-+ .size = size,
-+ .data_master = data_master
-+ };
-+ int ret;
-+
-+ struct drm_reg *reg;
-+ struct psb_use_reg *use_reg;
-+
-+ ret = drm_regs_alloc(&dev_priv->use_manager,
-+ (const void *)&use_data,
-+ fence_class,
-+ fence_type, interruptible, no_wait, &reg);
-+ if (ret)
-+ return ret;
-+
-+ use_reg = container_of(reg, struct psb_use_reg, reg);
-+ ret = psb_use_reg_set(use_reg, &use_data);
-+
-+ if (ret)
-+ return ret;
-+
-+ *r_reg = use_reg->reg_seq;
-+ *r_offset = base - use_reg->base;
-+
-+ return 0;
-+};
-+
-+static void psb_use_reg_destroy(struct drm_reg *reg)
-+{
-+ struct psb_use_reg *use_reg =
-+ container_of(reg, struct psb_use_reg, reg);
-+ struct drm_psb_private *dev_priv = use_reg->dev_priv;
-+
-+ PSB_WSGX32(PSB_ALPL(0, _PSB_CUC_BASE_ADDR),
-+ PSB_CR_USE_CODE_BASE(use_reg->reg_seq));
-+
-+ drm_free(use_reg, sizeof(*use_reg), DRM_MEM_DRIVER);
-+}
-+
-+int psb_init_use_base(struct drm_psb_private *dev_priv,
-+ unsigned int reg_start, unsigned int reg_num)
-+{
-+ struct psb_use_reg *use_reg;
-+ int i;
-+ int ret = 0;
-+
-+ mutex_lock(&dev_priv->cmdbuf_mutex);
-+
-+ drm_regs_init(&dev_priv->use_manager,
-+ &psb_use_reg_reusable, &psb_use_reg_destroy);
-+
-+ for (i = reg_start; i < reg_start + reg_num; ++i) {
-+ use_reg = drm_calloc(1, sizeof(*use_reg), DRM_MEM_DRIVER);
-+ if (!use_reg) {
-+ ret = -ENOMEM;
-+ goto out;
-+ }
-+
-+ use_reg->dev_priv = dev_priv;
-+ use_reg->reg_seq = i;
-+ use_reg->base = 0;
-+ use_reg->data_master = _PSB_CUC_DM_PIXEL;
-+
-+ PSB_WSGX32(PSB_ALPL(use_reg->base, _PSB_CUC_BASE_ADDR) |
-+ (use_reg->data_master << _PSB_CUC_BASE_DM_SHIFT),
-+ PSB_CR_USE_CODE_BASE(use_reg->reg_seq));
-+
-+ drm_regs_add(&dev_priv->use_manager, &use_reg->reg);
-+ }
-+ out:
-+ mutex_unlock(&dev_priv->cmdbuf_mutex);
-+
-+ return ret;
-+
-+}
-+
-+void psb_takedown_use_base(struct drm_psb_private *dev_priv)
-+{
-+ mutex_lock(&dev_priv->cmdbuf_mutex);
-+ drm_regs_free(&dev_priv->use_manager);
-+ mutex_unlock(&dev_priv->cmdbuf_mutex);
-+}
-Index: linux-2.6.27/drivers/gpu/drm/psb/psb_reset.c
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.27/drivers/gpu/drm/psb/psb_reset.c 2009-02-05 13:29:33.000000000 +0000
-@@ -0,0 +1,374 @@
-+/**************************************************************************
-+ * Copyright (c) 2007, Intel Corporation.
-+ * All Rights Reserved.
-+ *
-+ * This program is free software; you can redistribute it and/or modify it
-+ * under the terms and conditions of the GNU General Public License,
-+ * version 2, as published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope it will be useful, but WITHOUT
-+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-+ * more details.
-+ *
-+ * You should have received a copy of the GNU General Public License along with
-+ * this program; if not, write to the Free Software Foundation, Inc.,
-+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-+ *
-+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
-+ * develop this driver.
-+ *
-+ **************************************************************************/
-+/*
-+ * Authors:
-+ * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
-+ */
-+
-+#include "drmP.h"
-+#include "psb_drv.h"
-+#include "psb_reg.h"
-+#include "psb_scene.h"
-+#include "psb_msvdx.h"
-+
-+#define PSB_2D_TIMEOUT_MSEC 100
-+
-+void psb_reset(struct drm_psb_private *dev_priv, int reset_2d)
-+{
-+ uint32_t val;
-+
-+ val = _PSB_CS_RESET_BIF_RESET |
-+ _PSB_CS_RESET_DPM_RESET |
-+ _PSB_CS_RESET_TA_RESET |
-+ _PSB_CS_RESET_USE_RESET |
-+ _PSB_CS_RESET_ISP_RESET | _PSB_CS_RESET_TSP_RESET;
-+
-+ if (reset_2d)
-+ val |= _PSB_CS_RESET_TWOD_RESET;
-+
-+ PSB_WSGX32(val, PSB_CR_SOFT_RESET);
-+ (void)PSB_RSGX32(PSB_CR_SOFT_RESET);
-+
-+ msleep(1);
-+
-+ PSB_WSGX32(0, PSB_CR_SOFT_RESET);
-+ wmb();
-+ PSB_WSGX32(PSB_RSGX32(PSB_CR_BIF_CTRL) | _PSB_CB_CTRL_CLEAR_FAULT,
-+ PSB_CR_BIF_CTRL);
-+ wmb();
-+ (void)PSB_RSGX32(PSB_CR_BIF_CTRL);
-+
-+ msleep(1);
-+ PSB_WSGX32(PSB_RSGX32(PSB_CR_BIF_CTRL) & ~_PSB_CB_CTRL_CLEAR_FAULT,
-+ PSB_CR_BIF_CTRL);
-+ (void)PSB_RSGX32(PSB_CR_BIF_CTRL);
-+}
-+
-+void psb_print_pagefault(struct drm_psb_private *dev_priv)
-+{
-+ uint32_t val;
-+ uint32_t addr;
-+
-+ val = PSB_RSGX32(PSB_CR_BIF_INT_STAT);
-+ addr = PSB_RSGX32(PSB_CR_BIF_FAULT);
-+
-+ if (val) {
-+ if (val & _PSB_CBI_STAT_PF_N_RW)
-+ DRM_ERROR("Poulsbo MMU page fault:\n");
-+ else
-+ DRM_ERROR("Poulsbo MMU read / write "
-+ "protection fault:\n");
-+
-+ if (val & _PSB_CBI_STAT_FAULT_CACHE)
-+ DRM_ERROR("\tCache requestor.\n");
-+ if (val & _PSB_CBI_STAT_FAULT_TA)
-+ DRM_ERROR("\tTA requestor.\n");
-+ if (val & _PSB_CBI_STAT_FAULT_VDM)
-+ DRM_ERROR("\tVDM requestor.\n");
-+ if (val & _PSB_CBI_STAT_FAULT_2D)
-+ DRM_ERROR("\t2D requestor.\n");
-+ if (val & _PSB_CBI_STAT_FAULT_PBE)
-+ DRM_ERROR("\tPBE requestor.\n");
-+ if (val & _PSB_CBI_STAT_FAULT_TSP)
-+ DRM_ERROR("\tTSP requestor.\n");
-+ if (val & _PSB_CBI_STAT_FAULT_ISP)
-+ DRM_ERROR("\tISP requestor.\n");
-+ if (val & _PSB_CBI_STAT_FAULT_USSEPDS)
-+ DRM_ERROR("\tUSSEPDS requestor.\n");
-+ if (val & _PSB_CBI_STAT_FAULT_HOST)
-+ DRM_ERROR("\tHost requestor.\n");
-+
-+ DRM_ERROR("\tMMU failing address is 0x%08x.\n", (unsigned)addr);
-+ }
-+}
-+
-+void psb_schedule_watchdog(struct drm_psb_private *dev_priv)
-+{
-+ struct timer_list *wt = &dev_priv->watchdog_timer;
-+ unsigned long irq_flags;
-+
-+ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
-+ if (dev_priv->timer_available && !timer_pending(wt)) {
-+ wt->expires = jiffies + PSB_WATCHDOG_DELAY;
-+ add_timer(wt);
-+ }
-+ spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
-+}
-+
-+#if 0
-+static void psb_seq_lockup_idle(struct drm_psb_private *dev_priv,
-+ unsigned int engine, int *lockup, int *idle)
-+{
-+ uint32_t received_seq;
-+
-+ received_seq = dev_priv->comm[engine << 4];
-+ spin_lock(&dev_priv->sequence_lock);
-+ *idle = (received_seq == dev_priv->sequence[engine]);
-+ spin_unlock(&dev_priv->sequence_lock);
-+
-+ if (*idle) {
-+ dev_priv->idle[engine] = 1;
-+ *lockup = 0;
-+ return;
-+ }
-+
-+ if (dev_priv->idle[engine]) {
-+ dev_priv->idle[engine] = 0;
-+ dev_priv->last_sequence[engine] = received_seq;
-+ *lockup = 0;
-+ return;
-+ }
-+
-+ *lockup = (dev_priv->last_sequence[engine] == received_seq);
-+}
-+
-+#endif
-+static void psb_watchdog_func(unsigned long data)
-+{
-+ struct drm_psb_private *dev_priv = (struct drm_psb_private *)data;
-+ int lockup;
-+ int msvdx_lockup;
-+ int msvdx_idle;
-+ int lockup_2d;
-+ int idle_2d;
-+ int idle;
-+ unsigned long irq_flags;
-+
-+ psb_scheduler_lockup(dev_priv, &lockup, &idle);
-+ psb_msvdx_lockup(dev_priv, &msvdx_lockup, &msvdx_idle);
-+#if 0
-+ psb_seq_lockup_idle(dev_priv, PSB_ENGINE_2D, &lockup_2d, &idle_2d);
-+#else
-+ lockup_2d = 0;
-+ idle_2d = 1;
-+#endif
-+ if (lockup || msvdx_lockup || lockup_2d) {
-+ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
-+ dev_priv->timer_available = 0;
-+ spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
-+ if (lockup) {
-+ psb_print_pagefault(dev_priv);
-+ schedule_work(&dev_priv->watchdog_wq);
-+ }
-+ if (msvdx_lockup)
-+ schedule_work(&dev_priv->msvdx_watchdog_wq);
-+ }
-+ if (!idle || !msvdx_idle || !idle_2d)
-+ psb_schedule_watchdog(dev_priv);
-+}
-+
-+void psb_msvdx_flush_cmd_queue(struct drm_device *dev)
-+{
-+ struct drm_psb_private *dev_priv = dev->dev_private;
-+ struct psb_msvdx_cmd_queue *msvdx_cmd;
-+ struct list_head *list, *next;
-+ /*Flush the msvdx cmd queue and signal all fences in the queue */
-+ list_for_each_safe(list, next, &dev_priv->msvdx_queue) {
-+ msvdx_cmd = list_entry(list, struct psb_msvdx_cmd_queue, head);
-+ PSB_DEBUG_GENERAL("MSVDXQUE: flushing sequence:%d\n",
-+ msvdx_cmd->sequence);
-+ dev_priv->msvdx_current_sequence = msvdx_cmd->sequence;
-+ psb_fence_error(dev, PSB_ENGINE_VIDEO,
-+ dev_priv->msvdx_current_sequence,
-+ DRM_FENCE_TYPE_EXE, DRM_CMD_HANG);
-+ list_del(list);
-+ kfree(msvdx_cmd->cmd);
-+ drm_free(msvdx_cmd, sizeof(struct psb_msvdx_cmd_queue),
-+ DRM_MEM_DRIVER);
-+ }
-+}
-+
-+static void psb_msvdx_reset_wq(struct work_struct *work)
-+{
-+ struct drm_psb_private *dev_priv =
-+ container_of(work, struct drm_psb_private, msvdx_watchdog_wq);
-+
-+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
-+ unsigned long irq_flags;
-+
-+ mutex_lock(&dev_priv->msvdx_mutex);
-+ dev_priv->msvdx_needs_reset = 1;
-+ dev_priv->msvdx_current_sequence++;
-+ PSB_DEBUG_GENERAL
-+ ("MSVDXFENCE: incremented msvdx_current_sequence to :%d\n",
-+ dev_priv->msvdx_current_sequence);
-+
-+ psb_fence_error(scheduler->dev, PSB_ENGINE_VIDEO,
-+ dev_priv->msvdx_current_sequence, DRM_FENCE_TYPE_EXE,
-+ DRM_CMD_HANG);
-+
-+ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
-+ dev_priv->timer_available = 1;
-+ spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
-+
-+ spin_lock_irqsave(&dev_priv->msvdx_lock, irq_flags);
-+ psb_msvdx_flush_cmd_queue(scheduler->dev);
-+ spin_unlock_irqrestore(&dev_priv->msvdx_lock, irq_flags);
-+
-+ psb_schedule_watchdog(dev_priv);
-+ mutex_unlock(&dev_priv->msvdx_mutex);
-+}
-+
-+static int psb_xhw_mmu_reset(struct drm_psb_private *dev_priv)
-+{
-+ struct psb_xhw_buf buf;
-+ uint32_t bif_ctrl;
-+
-+ INIT_LIST_HEAD(&buf.head);
-+ psb_mmu_set_pd_context(psb_mmu_get_default_pd(dev_priv->mmu), 0);
-+ bif_ctrl = PSB_RSGX32(PSB_CR_BIF_CTRL);
-+ PSB_WSGX32(bif_ctrl |
-+ _PSB_CB_CTRL_CLEAR_FAULT |
-+ _PSB_CB_CTRL_INVALDC, PSB_CR_BIF_CTRL);
-+ (void)PSB_RSGX32(PSB_CR_BIF_CTRL);
-+ msleep(1);
-+ PSB_WSGX32(bif_ctrl, PSB_CR_BIF_CTRL);
-+ (void)PSB_RSGX32(PSB_CR_BIF_CTRL);
-+ return psb_xhw_reset_dpm(dev_priv, &buf);
-+}
-+
-+/*
-+ * Block command submission and reset hardware and schedulers.
-+ */
-+
-+static void psb_reset_wq(struct work_struct *work)
-+{
-+ struct drm_psb_private *dev_priv =
-+ container_of(work, struct drm_psb_private, watchdog_wq);
-+ int lockup_2d;
-+ int idle_2d;
-+ unsigned long irq_flags;
-+ int ret;
-+ int reset_count = 0;
-+ struct psb_xhw_buf buf;
-+ uint32_t xhw_lockup;
-+
-+ /*
-+ * Block command submission.
-+ */
-+
-+ mutex_lock(&dev_priv->reset_mutex);
-+
-+ INIT_LIST_HEAD(&buf.head);
-+ if (psb_xhw_check_lockup(dev_priv, &buf, &xhw_lockup) == 0) {
-+ if (xhw_lockup == 0 && psb_extend_raster_timeout(dev_priv) == 0) {
-+ /*
-+ * no lockup, just re-schedule
-+ */
-+ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
-+ dev_priv->timer_available = 1;
-+ spin_unlock_irqrestore(&dev_priv->watchdog_lock,
-+ irq_flags);
-+ psb_schedule_watchdog(dev_priv);
-+ mutex_unlock(&dev_priv->reset_mutex);
-+ return;
-+ }
-+ }
-+#if 0
-+ msleep(PSB_2D_TIMEOUT_MSEC);
-+
-+ psb_seq_lockup_idle(dev_priv, PSB_ENGINE_2D, &lockup_2d, &idle_2d);
-+
-+ if (lockup_2d) {
-+ uint32_t seq_2d;
-+ spin_lock(&dev_priv->sequence_lock);
-+ seq_2d = dev_priv->sequence[PSB_ENGINE_2D];
-+ spin_unlock(&dev_priv->sequence_lock);
-+ psb_fence_error(dev_priv->scheduler.dev,
-+ PSB_ENGINE_2D,
-+ seq_2d, DRM_FENCE_TYPE_EXE, -EBUSY);
-+ DRM_INFO("Resetting 2D engine.\n");
-+ }
-+
-+ psb_reset(dev_priv, lockup_2d);
-+#else
-+ (void)lockup_2d;
-+ (void)idle_2d;
-+ psb_reset(dev_priv, 0);
-+#endif
-+ (void)psb_xhw_mmu_reset(dev_priv);
-+ DRM_INFO("Resetting scheduler.\n");
-+ psb_scheduler_pause(dev_priv);
-+ psb_scheduler_reset(dev_priv, -EBUSY);
-+ psb_scheduler_ta_mem_check(dev_priv);
-+
-+ while (dev_priv->ta_mem &&
-+ !dev_priv->force_ta_mem_load && ++reset_count < 10) {
-+
-+ /*
-+ * TA memory is currently fenced so offsets
-+ * are valid. Reload offsets into the dpm now.
-+ */
-+
-+ struct psb_xhw_buf buf;
-+ INIT_LIST_HEAD(&buf.head);
-+
-+ msleep(100);
-+ DRM_INFO("Trying to reload TA memory.\n");
-+ ret = psb_xhw_ta_mem_load(dev_priv, &buf,
-+ PSB_TA_MEM_FLAG_TA |
-+ PSB_TA_MEM_FLAG_RASTER |
-+ PSB_TA_MEM_FLAG_HOSTA |
-+ PSB_TA_MEM_FLAG_HOSTD |
-+ PSB_TA_MEM_FLAG_INIT,
-+ dev_priv->ta_mem->ta_memory->offset,
-+ dev_priv->ta_mem->hw_data->offset,
-+ dev_priv->ta_mem->hw_cookie);
-+ if (!ret)
-+ break;
-+
-+ psb_reset(dev_priv, 0);
-+ (void)psb_xhw_mmu_reset(dev_priv);
-+ }
-+
-+ psb_scheduler_restart(dev_priv);
-+ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
-+ dev_priv->timer_available = 1;
-+ spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
-+ mutex_unlock(&dev_priv->reset_mutex);
-+}
-+
-+void psb_watchdog_init(struct drm_psb_private *dev_priv)
-+{
-+ struct timer_list *wt = &dev_priv->watchdog_timer;
-+ unsigned long irq_flags;
-+
-+ dev_priv->watchdog_lock = SPIN_LOCK_UNLOCKED;
-+ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
-+ init_timer(wt);
-+ INIT_WORK(&dev_priv->watchdog_wq, &psb_reset_wq);
-+ INIT_WORK(&dev_priv->msvdx_watchdog_wq, &psb_msvdx_reset_wq);
-+ wt->data = (unsigned long)dev_priv;
-+ wt->function = &psb_watchdog_func;
-+ dev_priv->timer_available = 1;
-+ spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
-+}
-+
-+void psb_watchdog_takedown(struct drm_psb_private *dev_priv)
-+{
-+ unsigned long irq_flags;
-+
-+ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
-+ dev_priv->timer_available = 0;
-+ spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
-+ (void)del_timer_sync(&dev_priv->watchdog_timer);
-+}
-Index: linux-2.6.27/drivers/gpu/drm/psb/psb_scene.c
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.27/drivers/gpu/drm/psb/psb_scene.c 2009-02-05 13:29:33.000000000 +0000
-@@ -0,0 +1,531 @@
-+/**************************************************************************
-+ * Copyright (c) 2007, Intel Corporation.
-+ * All Rights Reserved.
-+ *
-+ * This program is free software; you can redistribute it and/or modify it
-+ * under the terms and conditions of the GNU General Public License,
-+ * version 2, as published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope it will be useful, but WITHOUT
-+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-+ * more details.
-+ *
-+ * You should have received a copy of the GNU General Public License along with
-+ * this program; if not, write to the Free Software Foundation, Inc.,
-+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-+ *
-+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
-+ * develop this driver.
-+ *
-+ **************************************************************************/
-+/*
-+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
-+ */
-+
-+#include "drmP.h"
-+#include "psb_drv.h"
-+#include "psb_scene.h"
-+
-+void psb_clear_scene_atomic(struct psb_scene *scene)
-+{
-+ int i;
-+ struct page *page;
-+ void *v;
-+
-+ for (i = 0; i < scene->clear_num_pages; ++i) {
-+ page = drm_ttm_get_page(scene->hw_data->ttm,
-+ scene->clear_p_start + i);
-+ if (in_irq())
-+ v = kmap_atomic(page, KM_IRQ0);
-+ else
-+ v = kmap_atomic(page, KM_USER0);
-+
-+ memset(v, 0, PAGE_SIZE);
-+
-+ if (in_irq())
-+ kunmap_atomic(v, KM_IRQ0);
-+ else
-+ kunmap_atomic(v, KM_USER0);
-+ }
-+}
-+
-+int psb_clear_scene(struct psb_scene *scene)
-+{
-+ struct drm_bo_kmap_obj bmo;
-+ int is_iomem;
-+ void *addr;
-+
-+ int ret = drm_bo_kmap(scene->hw_data, scene->clear_p_start,
-+ scene->clear_num_pages, &bmo);
-+
-+ PSB_DEBUG_RENDER("Scene clear\n");
-+ if (ret)
-+ return ret;
-+
-+ addr = drm_bmo_virtual(&bmo, &is_iomem);
-+ BUG_ON(is_iomem);
-+ memset(addr, 0, scene->clear_num_pages << PAGE_SHIFT);
-+ drm_bo_kunmap(&bmo);
-+
-+ return 0;
-+}
-+
-+static void psb_destroy_scene_devlocked(struct psb_scene *scene)
-+{
-+ if (!scene)
-+ return;
-+
-+ PSB_DEBUG_RENDER("Scene destroy\n");
-+ drm_bo_usage_deref_locked(&scene->hw_data);
-+ drm_free(scene, sizeof(*scene), DRM_MEM_DRIVER);
-+}
-+
-+void psb_scene_unref_devlocked(struct psb_scene **scene)
-+{
-+ struct psb_scene *tmp_scene = *scene;
-+
-+ PSB_DEBUG_RENDER("Scene unref\n");
-+ *scene = NULL;
-+ if (atomic_dec_and_test(&tmp_scene->ref_count)) {
-+ psb_scheduler_remove_scene_refs(tmp_scene);
-+ psb_destroy_scene_devlocked(tmp_scene);
-+ }
-+}
-+
-+struct psb_scene *psb_scene_ref(struct psb_scene *src)
-+{
-+ PSB_DEBUG_RENDER("Scene ref\n");
-+ atomic_inc(&src->ref_count);
-+ return src;
-+}
-+
-+static struct psb_scene *psb_alloc_scene(struct drm_device *dev,
-+ uint32_t w, uint32_t h)
-+{
-+ struct drm_psb_private *dev_priv =
-+ (struct drm_psb_private *)dev->dev_private;
-+ int ret = -EINVAL;
-+ struct psb_scene *scene;
-+ uint32_t bo_size;
-+ struct psb_xhw_buf buf;
-+
-+ PSB_DEBUG_RENDER("Alloc scene w %u h %u\n", w, h);
-+
-+ scene = drm_calloc(1, sizeof(*scene), DRM_MEM_DRIVER);
-+
-+ if (!scene) {
-+ DRM_ERROR("Out of memory allocating scene object.\n");
-+ return NULL;
-+ }
-+
-+ scene->dev = dev;
-+ scene->w = w;
-+ scene->h = h;
-+ scene->hw_scene = NULL;
-+ atomic_set(&scene->ref_count, 1);
-+
-+ INIT_LIST_HEAD(&buf.head);
-+ ret = psb_xhw_scene_info(dev_priv, &buf, scene->w, scene->h,
-+ scene->hw_cookie, &bo_size,
-+ &scene->clear_p_start,
-+ &scene->clear_num_pages);
-+ if (ret)
-+ goto out_err;
-+
-+ ret = drm_buffer_object_create(dev, bo_size, drm_bo_type_kernel,
-+ DRM_PSB_FLAG_MEM_MMU |
-+ DRM_BO_FLAG_READ |
-+ DRM_BO_FLAG_CACHED |
-+ PSB_BO_FLAG_SCENE |
-+ DRM_BO_FLAG_WRITE,
-+ DRM_BO_HINT_DONT_FENCE,
-+ 0, 0, &scene->hw_data);
-+ if (ret)
-+ goto out_err;
-+
-+ return scene;
-+ out_err:
-+ drm_free(scene, sizeof(*scene), DRM_MEM_DRIVER);
-+ return NULL;
-+}
-+
-+int psb_validate_scene_pool(struct psb_scene_pool *pool, uint64_t flags,
-+ uint64_t mask,
-+ uint32_t hint,
-+ uint32_t w,
-+ uint32_t h,
-+ int final_pass, struct psb_scene **scene_p)
-+{
-+ struct drm_device *dev = pool->dev;
-+ struct drm_psb_private *dev_priv =
-+ (struct drm_psb_private *)dev->dev_private;
-+ struct psb_scene *scene = pool->scenes[pool->cur_scene];
-+ int ret;
-+ unsigned long irq_flags;
-+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
-+ uint32_t bin_pt_offset;
-+ uint32_t bin_param_offset;
-+
-+ PSB_DEBUG_RENDER("Validate scene pool. Scene %u\n", pool->cur_scene);
-+
-+ if (unlikely(!dev_priv->ta_mem)) {
-+ dev_priv->ta_mem =
-+ psb_alloc_ta_mem(dev, dev_priv->ta_mem_pages);
-+ if (!dev_priv->ta_mem)
-+ return -ENOMEM;
-+
-+ bin_pt_offset = ~0;
-+ bin_param_offset = ~0;
-+ } else {
-+ bin_pt_offset = dev_priv->ta_mem->hw_data->offset;
-+ bin_param_offset = dev_priv->ta_mem->ta_memory->offset;
-+ }
-+
-+ pool->w = w;
-+ pool->h = h;
-+ if (scene && (scene->w != pool->w || scene->h != pool->h)) {
-+ spin_lock_irqsave(&scheduler->lock, irq_flags);
-+ if (scene->flags & PSB_SCENE_FLAG_DIRTY) {
-+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
-+ DRM_ERROR("Trying to resize a dirty scene.\n");
-+ return -EINVAL;
-+ }
-+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
-+ mutex_lock(&dev->struct_mutex);
-+ psb_scene_unref_devlocked(&pool->scenes[pool->cur_scene]);
-+ mutex_unlock(&dev->struct_mutex);
-+ scene = NULL;
-+ }
-+
-+ if (!scene) {
-+ pool->scenes[pool->cur_scene] = scene =
-+ psb_alloc_scene(pool->dev, pool->w, pool->h);
-+
-+ if (!scene)
-+ return -ENOMEM;
-+
-+ scene->flags = PSB_SCENE_FLAG_CLEARED;
-+ }
-+
-+ /*
-+ * FIXME: We need atomic bit manipulation here for the
-+ * scheduler. For now use the spinlock.
-+ */
-+
-+ spin_lock_irqsave(&scheduler->lock, irq_flags);
-+ if (!(scene->flags & PSB_SCENE_FLAG_CLEARED)) {
-+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
-+ PSB_DEBUG_RENDER("Waiting to clear scene memory.\n");
-+ mutex_lock(&scene->hw_data->mutex);
-+ ret = drm_bo_wait(scene->hw_data, 0, 0, 0);
-+ mutex_unlock(&scene->hw_data->mutex);
-+ if (ret)
-+ return ret;
-+
-+ ret = psb_clear_scene(scene);
-+
-+ if (ret)
-+ return ret;
-+ spin_lock_irqsave(&scheduler->lock, irq_flags);
-+ scene->flags |= PSB_SCENE_FLAG_CLEARED;
-+ }
-+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
-+
-+ ret = drm_bo_do_validate(scene->hw_data, flags, mask, hint,
-+ PSB_ENGINE_TA, 0, NULL);
-+ if (ret)
-+ return ret;
-+ ret = drm_bo_do_validate(dev_priv->ta_mem->hw_data, 0, 0, 0,
-+ PSB_ENGINE_TA, 0, NULL);
-+ if (ret)
-+ return ret;
-+ ret = drm_bo_do_validate(dev_priv->ta_mem->ta_memory, 0, 0, 0,
-+ PSB_ENGINE_TA, 0, NULL);
-+ if (ret)
-+ return ret;
-+
-+ if (unlikely(bin_param_offset !=
-+ dev_priv->ta_mem->ta_memory->offset ||
-+ bin_pt_offset !=
-+ dev_priv->ta_mem->hw_data->offset ||
-+ dev_priv->force_ta_mem_load)) {
-+
-+ struct psb_xhw_buf buf;
-+
-+ INIT_LIST_HEAD(&buf.head);
-+ ret = psb_xhw_ta_mem_load(dev_priv, &buf,
-+ PSB_TA_MEM_FLAG_TA |
-+ PSB_TA_MEM_FLAG_RASTER |
-+ PSB_TA_MEM_FLAG_HOSTA |
-+ PSB_TA_MEM_FLAG_HOSTD |
-+ PSB_TA_MEM_FLAG_INIT,
-+ dev_priv->ta_mem->ta_memory->offset,
-+ dev_priv->ta_mem->hw_data->offset,
-+ dev_priv->ta_mem->hw_cookie);
-+ if (ret)
-+ return ret;
-+
-+ dev_priv->force_ta_mem_load = 0;
-+ }
-+
-+ if (final_pass) {
-+
-+ /*
-+ * Clear the scene on next use. Advance the scene counter.
-+ */
-+
-+ spin_lock_irqsave(&scheduler->lock, irq_flags);
-+ scene->flags &= ~PSB_SCENE_FLAG_CLEARED;
-+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
-+ pool->cur_scene = (pool->cur_scene + 1) % pool->num_scenes;
-+ }
-+
-+ *scene_p = psb_scene_ref(scene);
-+ return 0;
-+}
-+
-+static void psb_scene_pool_destroy_devlocked(struct psb_scene_pool *pool)
-+{
-+ int i;
-+
-+ if (!pool)
-+ return;
-+
-+ PSB_DEBUG_RENDER("Scene pool destroy.\n");
-+ for (i = 0; i < pool->num_scenes; ++i) {
-+ PSB_DEBUG_RENDER("scenes %d is 0x%08lx\n", i,
-+ (unsigned long)pool->scenes[i]);
-+ if (pool->scenes[i])
-+ psb_scene_unref_devlocked(&pool->scenes[i]);
-+ }
-+ drm_free(pool, sizeof(*pool), DRM_MEM_DRIVER);
-+}
-+
-+void psb_scene_pool_unref_devlocked(struct psb_scene_pool **pool)
-+{
-+ struct psb_scene_pool *tmp_pool = *pool;
-+ struct drm_device *dev = tmp_pool->dev;
-+
-+ PSB_DEBUG_RENDER("Scene pool unref\n");
-+ (void)dev;
-+ DRM_ASSERT_LOCKED(&dev->struct_mutex);
-+ *pool = NULL;
-+ if (--tmp_pool->ref_count == 0)
-+ psb_scene_pool_destroy_devlocked(tmp_pool);
-+}
-+
-+struct psb_scene_pool *psb_scene_pool_ref_devlocked(struct psb_scene_pool *src)
-+{
-+ ++src->ref_count;
-+ return src;
-+}
-+
-+/*
-+ * Callback for user object manager.
-+ */
-+
-+static void psb_scene_pool_destroy(struct drm_file *priv,
-+ struct drm_user_object *base)
-+{
-+ struct psb_scene_pool *pool =
-+ drm_user_object_entry(base, struct psb_scene_pool, user);
-+
-+ psb_scene_pool_unref_devlocked(&pool);
-+}
-+
-+struct psb_scene_pool *psb_scene_pool_lookup_devlocked(struct drm_file *priv,
-+ uint32_t handle,
-+ int check_owner)
-+{
-+ struct drm_user_object *uo;
-+ struct psb_scene_pool *pool;
-+
-+ uo = drm_lookup_user_object(priv, handle);
-+ if (!uo || (uo->type != PSB_USER_OBJECT_SCENE_POOL)) {
-+ DRM_ERROR("Could not find scene pool object 0x%08x\n", handle);
-+ return NULL;
-+ }
-+
-+ if (check_owner && priv != uo->owner) {
-+ if (!drm_lookup_ref_object(priv, uo, _DRM_REF_USE))
-+ return NULL;
-+ }
-+
-+ pool = drm_user_object_entry(uo, struct psb_scene_pool, user);
-+ return psb_scene_pool_ref_devlocked(pool);
-+}
-+
-+struct psb_scene_pool *psb_scene_pool_alloc(struct drm_file *priv,
-+ int shareable,
-+ uint32_t num_scenes,
-+ uint32_t w, uint32_t h)
-+{
-+ struct drm_device *dev = priv->minor->dev;
-+ struct psb_scene_pool *pool;
-+ int ret;
-+
-+ PSB_DEBUG_RENDER("Scene pool alloc\n");
-+ pool = drm_calloc(1, sizeof(*pool), DRM_MEM_DRIVER);
-+ if (!pool) {
-+ DRM_ERROR("Out of memory allocating scene pool object.\n");
-+ return NULL;
-+ }
-+ pool->w = w;
-+ pool->h = h;
-+ pool->dev = dev;
-+ pool->num_scenes = num_scenes;
-+
-+ mutex_lock(&dev->struct_mutex);
-+ ret = drm_add_user_object(priv, &pool->user, shareable);
-+ if (ret)
-+ goto out_err;
-+
-+ pool->user.type = PSB_USER_OBJECT_SCENE_POOL;
-+ pool->user.remove = &psb_scene_pool_destroy;
-+ pool->ref_count = 2;
-+ mutex_unlock(&dev->struct_mutex);
-+ return pool;
-+ out_err:
-+ drm_free(pool, sizeof(*pool), DRM_MEM_DRIVER);
-+ return NULL;
-+}
-+
-+/*
-+ * Code to support multiple ta memory buffers.
-+ */
-+
-+static void psb_destroy_ta_mem_devlocked(struct psb_ta_mem *ta_mem)
-+{
-+ if (!ta_mem)
-+ return;
-+
-+ drm_bo_usage_deref_locked(&ta_mem->hw_data);
-+ drm_bo_usage_deref_locked(&ta_mem->ta_memory);
-+ drm_free(ta_mem, sizeof(*ta_mem), DRM_MEM_DRIVER);
-+}
-+
-+void psb_ta_mem_unref_devlocked(struct psb_ta_mem **ta_mem)
-+{
-+ struct psb_ta_mem *tmp_ta_mem = *ta_mem;
-+ struct drm_device *dev = tmp_ta_mem->dev;
-+
-+ (void)dev;
-+ DRM_ASSERT_LOCKED(&dev->struct_mutex);
-+ *ta_mem = NULL;
-+ if (--tmp_ta_mem->ref_count == 0)
-+ psb_destroy_ta_mem_devlocked(tmp_ta_mem);
-+}
-+
-+void psb_ta_mem_ref_devlocked(struct psb_ta_mem **dst, struct psb_ta_mem *src)
-+{
-+ struct drm_device *dev = src->dev;
-+
-+ (void)dev;
-+ DRM_ASSERT_LOCKED(&dev->struct_mutex);
-+ *dst = src;
-+ ++src->ref_count;
-+}
-+
-+struct psb_ta_mem *psb_alloc_ta_mem(struct drm_device *dev, uint32_t pages)
-+{
-+ struct drm_psb_private *dev_priv =
-+ (struct drm_psb_private *)dev->dev_private;
-+ int ret = -EINVAL;
-+ struct psb_ta_mem *ta_mem;
-+ uint32_t bo_size;
-+ struct psb_xhw_buf buf;
-+
-+ INIT_LIST_HEAD(&buf.head);
-+
-+ ta_mem = drm_calloc(1, sizeof(*ta_mem), DRM_MEM_DRIVER);
-+
-+ if (!ta_mem) {
-+ DRM_ERROR("Out of memory allocating parameter memory.\n");
-+ return NULL;
-+ }
-+
-+ ret = psb_xhw_ta_mem_info(dev_priv, &buf, pages,
-+ ta_mem->hw_cookie, &bo_size);
-+ if (ret == -ENOMEM) {
-+ DRM_ERROR("Parameter memory size is too small.\n");
-+ DRM_INFO("Attempted to use %u kiB of parameter memory.\n",
-+ (unsigned int)(pages * (PAGE_SIZE / 1024)));
-+ DRM_INFO("The Xpsb driver thinks this is too small and\n");
-+ DRM_INFO("suggests %u kiB. Check the psb DRM\n",
-+ (unsigned int)(bo_size / 1024));
-+ DRM_INFO("\"ta_mem_size\" parameter!\n");
-+ }
-+ if (ret)
-+ goto out_err0;
-+
-+ bo_size = pages * PAGE_SIZE;
-+ ta_mem->dev = dev;
-+ ret = drm_buffer_object_create(dev, bo_size, drm_bo_type_kernel,
-+ DRM_PSB_FLAG_MEM_MMU | DRM_BO_FLAG_READ |
-+ DRM_BO_FLAG_WRITE |
-+ PSB_BO_FLAG_SCENE,
-+ DRM_BO_HINT_DONT_FENCE, 0, 0,
-+ &ta_mem->hw_data);
-+ if (ret)
-+ goto out_err0;
-+
-+ ret =
-+ drm_buffer_object_create(dev, pages << PAGE_SHIFT,
-+ drm_bo_type_kernel,
-+ DRM_PSB_FLAG_MEM_RASTGEOM |
-+ DRM_BO_FLAG_READ |
-+ DRM_BO_FLAG_WRITE |
-+ PSB_BO_FLAG_SCENE,
-+ DRM_BO_HINT_DONT_FENCE, 0,
-+ 1024 * 1024 >> PAGE_SHIFT,
-+ &ta_mem->ta_memory);
-+ if (ret)
-+ goto out_err1;
-+
-+ ta_mem->ref_count = 1;
-+ return ta_mem;
-+ out_err1:
-+ drm_bo_usage_deref_unlocked(&ta_mem->hw_data);
-+ out_err0:
-+ drm_free(ta_mem, sizeof(*ta_mem), DRM_MEM_DRIVER);
-+ return NULL;
-+}
-+
-+int drm_psb_scene_unref_ioctl(struct drm_device *dev,
-+ void *data, struct drm_file *file_priv)
-+{
-+ struct drm_psb_scene *scene = (struct drm_psb_scene *)data;
-+ struct drm_user_object *uo;
-+ struct drm_ref_object *ro;
-+ int ret = 0;
-+
-+ mutex_lock(&dev->struct_mutex);
-+ if (!scene->handle_valid)
-+ goto out_unlock;
-+
-+ uo = drm_lookup_user_object(file_priv, scene->handle);
-+ if (!uo) {
-+ ret = -EINVAL;
-+ goto out_unlock;
-+ }
-+ if (uo->type != PSB_USER_OBJECT_SCENE_POOL) {
-+ DRM_ERROR("Not a scene pool object.\n");
-+ ret = -EINVAL;
-+ goto out_unlock;
-+ }
-+ if (uo->owner != file_priv) {
-+ DRM_ERROR("Not owner of scene pool object.\n");
-+ ret = -EPERM;
-+ goto out_unlock;
-+ }
-+
-+ scene->handle_valid = 0;
-+ ro = drm_lookup_ref_object(file_priv, uo, _DRM_REF_USE);
-+ BUG_ON(!ro);
-+ drm_remove_ref_object(file_priv, ro);
-+
-+ out_unlock:
-+ mutex_unlock(&dev->struct_mutex);
-+ return ret;
-+}
-Index: linux-2.6.27/drivers/gpu/drm/psb/psb_scene.h
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.27/drivers/gpu/drm/psb/psb_scene.h 2009-02-05 13:29:33.000000000 +0000
-@@ -0,0 +1,112 @@
-+/**************************************************************************
-+ * Copyright (c) 2007, Intel Corporation.
-+ * All Rights Reserved.
-+ *
-+ * This program is free software; you can redistribute it and/or modify it
-+ * under the terms and conditions of the GNU General Public License,
-+ * version 2, as published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope it will be useful, but WITHOUT
-+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-+ * more details.
-+ *
-+ * You should have received a copy of the GNU General Public License along with
-+ * this program; if not, write to the Free Software Foundation, Inc.,
-+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-+ *
-+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
-+ * develop this driver.
-+ *
-+ **************************************************************************/
-+/*
-+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
-+ */
-+
-+#ifndef _PSB_SCENE_H_
-+#define _PSB_SCENE_H_
-+
-+#define PSB_USER_OBJECT_SCENE_POOL drm_driver_type0
-+#define PSB_USER_OBJECT_TA_MEM drm_driver_type1
-+#define PSB_MAX_NUM_SCENES 8
-+
-+struct psb_hw_scene;
-+struct psb_hw_ta_mem;
-+
-+struct psb_scene_pool {
-+ struct drm_device *dev;
-+ struct drm_user_object user;
-+ uint32_t ref_count;
-+ uint32_t w;
-+ uint32_t h;
-+ uint32_t cur_scene;
-+ struct psb_scene *scenes[PSB_MAX_NUM_SCENES];
-+ uint32_t num_scenes;
-+};
-+
-+struct psb_scene {
-+ struct drm_device *dev;
-+ atomic_t ref_count;
-+ uint32_t hw_cookie[PSB_SCENE_HW_COOKIE_SIZE];
-+ uint32_t bo_size;
-+ uint32_t w;
-+ uint32_t h;
-+ struct psb_ta_mem *ta_mem;
-+ struct psb_hw_scene *hw_scene;
-+ struct drm_buffer_object *hw_data;
-+ uint32_t flags;
-+ uint32_t clear_p_start;
-+ uint32_t clear_num_pages;
-+};
-+
-+struct psb_scene_entry {
-+ struct list_head head;
-+ struct psb_scene *scene;
-+};
-+
-+struct psb_user_scene {
-+ struct drm_device *dev;
-+ struct drm_user_object user;
-+};
-+
-+struct psb_ta_mem {
-+ struct drm_device *dev;
-+ struct drm_user_object user;
-+ uint32_t ref_count;
-+ uint32_t hw_cookie[PSB_TA_MEM_HW_COOKIE_SIZE];
-+ uint32_t bo_size;
-+ struct drm_buffer_object *ta_memory;
-+ struct drm_buffer_object *hw_data;
-+ int is_deallocating;
-+ int deallocating_scheduled;
-+};
-+
-+extern struct psb_scene_pool *psb_scene_pool_alloc(struct drm_file *priv,
-+ int shareable,
-+ uint32_t num_scenes,
-+ uint32_t w, uint32_t h);
-+extern void psb_scene_pool_unref_devlocked(struct psb_scene_pool **pool);
-+extern struct psb_scene_pool *psb_scene_pool_lookup_devlocked(struct drm_file
-+ *priv,
-+ uint32_t handle,
-+ int check_owner);
-+extern int psb_validate_scene_pool(struct psb_scene_pool *pool, uint64_t flags,
-+ uint64_t mask, uint32_t hint, uint32_t w,
-+ uint32_t h, int final_pass,
-+ struct psb_scene **scene_p);
-+extern void psb_scene_unref_devlocked(struct psb_scene **scene);
-+extern struct psb_scene *psb_scene_ref(struct psb_scene *src);
-+extern int drm_psb_scene_unref_ioctl(struct drm_device *dev,
-+ void *data, struct drm_file *file_priv);
-+
-+static inline uint32_t psb_scene_pool_handle(struct psb_scene_pool *pool)
-+{
-+ return pool->user.hash.key;
-+}
-+extern struct psb_ta_mem *psb_alloc_ta_mem(struct drm_device *dev,
-+ uint32_t pages);
-+extern void psb_ta_mem_ref_devlocked(struct psb_ta_mem **dst,
-+ struct psb_ta_mem *src);
-+extern void psb_ta_mem_unref_devlocked(struct psb_ta_mem **ta_mem);
-+
-+#endif
-Index: linux-2.6.27/drivers/gpu/drm/psb/psb_schedule.c
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.27/drivers/gpu/drm/psb/psb_schedule.c 2009-02-05 13:29:33.000000000 +0000
-@@ -0,0 +1,1445 @@
-+/**************************************************************************
-+ * Copyright (c) 2007, Intel Corporation.
-+ * All Rights Reserved.
-+ *
-+ * This program is free software; you can redistribute it and/or modify it
-+ * under the terms and conditions of the GNU General Public License,
-+ * version 2, as published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope it will be useful, but WITHOUT
-+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-+ * more details.
-+ *
-+ * You should have received a copy of the GNU General Public License along with
-+ * this program; if not, write to the Free Software Foundation, Inc.,
-+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-+ *
-+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
-+ * develop this driver.
-+ *
-+ **************************************************************************/
-+/*
-+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
-+ */
-+
-+#include "drmP.h"
-+#include "psb_drm.h"
-+#include "psb_drv.h"
-+#include "psb_reg.h"
-+#include "psb_scene.h"
-+
-+#define PSB_ALLOWED_RASTER_RUNTIME (DRM_HZ * 20)
-+#define PSB_RASTER_TIMEOUT (DRM_HZ / 2)
-+#define PSB_TA_TIMEOUT (DRM_HZ / 5)
-+
-+#undef PSB_SOFTWARE_WORKAHEAD
-+
-+#ifdef PSB_STABLE_SETTING
-+
-+/*
-+ * Software blocks completely while the engines are working so there can be no
-+ * overlap.
-+ */
-+
-+#define PSB_WAIT_FOR_RASTER_COMPLETION
-+#define PSB_WAIT_FOR_TA_COMPLETION
-+
-+#elif defined(PSB_PARANOID_SETTING)
-+/*
-+ * Software blocks "almost" while the engines are working so there can be no
-+ * overlap.
-+ */
-+
-+#define PSB_WAIT_FOR_RASTER_COMPLETION
-+#define PSB_WAIT_FOR_TA_COMPLETION
-+#define PSB_BE_PARANOID
-+
-+#elif defined(PSB_SOME_OVERLAP_BUT_LOCKUP)
-+/*
-+ * Software leaps ahead while the rasterizer is running and prepares
-+ * a new ta job that can be scheduled before the rasterizer has
-+ * finished.
-+ */
-+
-+#define PSB_WAIT_FOR_TA_COMPLETION
-+
-+#elif defined(PSB_SOFTWARE_WORKAHEAD)
-+/*
-+ * Don't sync, but allow software to work ahead. and queue a number of jobs.
-+ * But block overlapping in the scheduler.
-+ */
-+
-+#define PSB_BLOCK_OVERLAP
-+#define ONLY_ONE_JOB_IN_RASTER_QUEUE
-+
-+#endif
-+
-+/*
-+ * Avoid pixelbe pagefaults on C0.
-+ */
-+#if 0
-+#define PSB_BLOCK_OVERLAP
-+#endif
-+
-+static void psb_dispatch_ta(struct drm_psb_private *dev_priv,
-+ struct psb_scheduler *scheduler,
-+ uint32_t reply_flag);
-+static void psb_dispatch_raster(struct drm_psb_private *dev_priv,
-+ struct psb_scheduler *scheduler,
-+ uint32_t reply_flag);
-+
-+#ifdef FIX_TG_16
-+
-+static void psb_2d_atomic_unlock(struct drm_psb_private *dev_priv);
-+static int psb_2d_trylock(struct drm_psb_private *dev_priv);
-+static int psb_check_2d_idle(struct drm_psb_private *dev_priv);
-+
-+#endif
-+
-+void psb_scheduler_lockup(struct drm_psb_private *dev_priv,
-+ int *lockup, int *idle)
-+{
-+ unsigned long irq_flags;
-+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
-+
-+ *lockup = 0;
-+ *idle = 1;
-+
-+ spin_lock_irqsave(&scheduler->lock, irq_flags);
-+
-+ if (scheduler->current_task[PSB_SCENE_ENGINE_TA] != NULL &&
-+ time_after_eq(jiffies, scheduler->ta_end_jiffies)) {
-+ *lockup = 1;
-+ }
-+ if (!*lockup
-+ && (scheduler->current_task[PSB_SCENE_ENGINE_RASTER] != NULL)
-+ && time_after_eq(jiffies, scheduler->raster_end_jiffies)) {
-+ *lockup = 1;
-+ }
-+ if (!*lockup)
-+ *idle = scheduler->idle;
-+
-+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
-+}
-+
-+static inline void psb_set_idle(struct psb_scheduler *scheduler)
-+{
-+ scheduler->idle =
-+ (scheduler->current_task[PSB_SCENE_ENGINE_RASTER] == NULL) &&
-+ (scheduler->current_task[PSB_SCENE_ENGINE_TA] == NULL);
-+ if (scheduler->idle)
-+ wake_up(&scheduler->idle_queue);
-+}
-+
-+/*
-+ * Call with the scheduler spinlock held.
-+ * Assigns a scene context to either the ta or the rasterizer,
-+ * flushing out other scenes to memory if necessary.
-+ */
-+
-+static int psb_set_scene_fire(struct psb_scheduler *scheduler,
-+ struct psb_scene *scene,
-+ int engine, struct psb_task *task)
-+{
-+ uint32_t flags = 0;
-+ struct psb_hw_scene *hw_scene;
-+ struct drm_device *dev = scene->dev;
-+ struct drm_psb_private *dev_priv =
-+ (struct drm_psb_private *)dev->dev_private;
-+
-+ hw_scene = scene->hw_scene;
-+ if (hw_scene && hw_scene->last_scene == scene) {
-+
-+ /*
-+ * Reuse the last hw scene context and delete it from the
-+ * free list.
-+ */
-+
-+ PSB_DEBUG_RENDER("Reusing hw scene %d.\n",
-+ hw_scene->context_number);
-+ if (scene->flags & PSB_SCENE_FLAG_DIRTY) {
-+
-+ /*
-+ * No hw context initialization to be done.
-+ */
-+
-+ flags |= PSB_SCENE_FLAG_SETUP_ONLY;
-+ }
-+
-+ list_del_init(&hw_scene->head);
-+
-+ } else {
-+ struct list_head *list;
-+ hw_scene = NULL;
-+
-+ /*
-+ * Grab a new hw scene context.
-+ */
-+
-+ list_for_each(list, &scheduler->hw_scenes) {
-+ hw_scene = list_entry(list, struct psb_hw_scene, head);
-+ break;
-+ }
-+ BUG_ON(!hw_scene);
-+ PSB_DEBUG_RENDER("New hw scene %d.\n",
-+ hw_scene->context_number);
-+
-+ list_del_init(list);
-+ }
-+ scene->hw_scene = hw_scene;
-+ hw_scene->last_scene = scene;
-+
-+ flags |= PSB_SCENE_FLAG_SETUP;
-+
-+ /*
-+ * Switch context and setup the engine.
-+ */
-+
-+ return psb_xhw_scene_bind_fire(dev_priv,
-+ &task->buf,
-+ task->flags,
-+ hw_scene->context_number,
-+ scene->hw_cookie,
-+ task->oom_cmds,
-+ task->oom_cmd_size,
-+ scene->hw_data->offset,
-+ engine, flags | scene->flags);
-+}
-+
-+static inline void psb_report_fence(struct psb_scheduler *scheduler,
-+ uint32_t class,
-+ uint32_t sequence,
-+ uint32_t type, int call_handler)
-+{
-+ struct psb_scheduler_seq *seq = &scheduler->seq[type];
-+
-+ seq->sequence = sequence;
-+ seq->reported = 0;
-+ if (call_handler)
-+ psb_fence_handler(scheduler->dev, class);
-+}
-+
-+static void psb_schedule_raster(struct drm_psb_private *dev_priv,
-+ struct psb_scheduler *scheduler);
-+
-+static void psb_schedule_ta(struct drm_psb_private *dev_priv,
-+ struct psb_scheduler *scheduler)
-+{
-+ struct psb_task *task = NULL;
-+ struct list_head *list, *next;
-+ int pushed_raster_task = 0;
-+
-+ PSB_DEBUG_RENDER("schedule ta\n");
-+
-+ if (scheduler->idle_count != 0)
-+ return;
-+
-+ if (scheduler->current_task[PSB_SCENE_ENGINE_TA] != NULL)
-+ return;
-+
-+ if (scheduler->ta_state)
-+ return;
-+
-+ /*
-+ * Skip the ta stage for rasterization-only
-+ * tasks. They arrive here to make sure we're rasterizing
-+ * tasks in the correct order.
-+ */
-+
-+ list_for_each_safe(list, next, &scheduler->ta_queue) {
-+ task = list_entry(list, struct psb_task, head);
-+ if (task->task_type != psb_raster_task)
-+ break;
-+
-+ list_del_init(list);
-+ list_add_tail(list, &scheduler->raster_queue);
-+ psb_report_fence(scheduler, task->engine, task->sequence,
-+ _PSB_FENCE_TA_DONE_SHIFT, 1);
-+ task = NULL;
-+ pushed_raster_task = 1;
-+ }
-+
-+ if (pushed_raster_task)
-+ psb_schedule_raster(dev_priv, scheduler);
-+
-+ if (!task)
-+ return;
-+
-+ /*
-+ * Still waiting for a vistest?
-+ */
-+
-+ if (scheduler->feedback_task == task)
-+ return;
-+
-+#ifdef ONLY_ONE_JOB_IN_RASTER_QUEUE
-+
-+ /*
-+ * Block ta from trying to use both hardware contexts
-+ * without the rasterizer starting to render from one of them.
-+ */
-+
-+ if (!list_empty(&scheduler->raster_queue)) {
-+ return;
-+ }
-+#endif
-+
-+#ifdef PSB_BLOCK_OVERLAP
-+ /*
-+ * Make sure rasterizer isn't doing anything.
-+ */
-+ if (scheduler->current_task[PSB_SCENE_ENGINE_RASTER] != NULL)
-+ return;
-+#endif
-+ if (list_empty(&scheduler->hw_scenes))
-+ return;
-+
-+#ifdef FIX_TG_16
-+ if (psb_check_2d_idle(dev_priv))
-+ return;
-+#endif
-+
-+ list_del_init(&task->head);
-+ if (task->flags & PSB_FIRE_FLAG_XHW_OOM)
-+ scheduler->ta_state = 1;
-+
-+ scheduler->current_task[PSB_SCENE_ENGINE_TA] = task;
-+ scheduler->idle = 0;
-+ scheduler->ta_end_jiffies = jiffies + PSB_TA_TIMEOUT;
-+
-+ task->reply_flags = (task->flags & PSB_FIRE_FLAG_XHW_OOM) ?
-+ 0x00000000 : PSB_RF_FIRE_TA;
-+
-+ (void)psb_reg_submit(dev_priv, task->ta_cmds, task->ta_cmd_size);
-+ psb_set_scene_fire(scheduler, task->scene, PSB_SCENE_ENGINE_TA, task);
-+ psb_schedule_watchdog(dev_priv);
-+}
-+
-+static int psb_fire_raster(struct psb_scheduler *scheduler,
-+ struct psb_task *task)
-+{
-+ struct drm_device *dev = scheduler->dev;
-+ struct drm_psb_private *dev_priv = (struct drm_psb_private *)
-+ dev->dev_private;
-+
-+ PSB_DEBUG_RENDER("Fire raster %d\n", task->sequence);
-+
-+ return psb_xhw_fire_raster(dev_priv, &task->buf, task->flags);
-+}
-+
-+/*
-+ * Take the first rasterization task from the hp raster queue or from the
-+ * raster queue and fire the rasterizer.
-+ */
-+
-+static void psb_schedule_raster(struct drm_psb_private *dev_priv,
-+ struct psb_scheduler *scheduler)
-+{
-+ struct psb_task *task;
-+ struct list_head *list;
-+
-+ if (scheduler->idle_count != 0)
-+ return;
-+
-+ if (scheduler->current_task[PSB_SCENE_ENGINE_RASTER] != NULL) {
-+ PSB_DEBUG_RENDER("Raster busy.\n");
-+ return;
-+ }
-+#ifdef PSB_BLOCK_OVERLAP
-+ if (scheduler->current_task[PSB_SCENE_ENGINE_TA] != NULL) {
-+ PSB_DEBUG_RENDER("TA busy.\n");
-+ return;
-+ }
-+#endif
-+
-+ if (!list_empty(&scheduler->hp_raster_queue))
-+ list = scheduler->hp_raster_queue.next;
-+ else if (!list_empty(&scheduler->raster_queue))
-+ list = scheduler->raster_queue.next;
-+ else {
-+ PSB_DEBUG_RENDER("Nothing in list\n");
-+ return;
-+ }
-+
-+ task = list_entry(list, struct psb_task, head);
-+
-+ /*
-+ * Sometimes changing ZLS format requires an ISP reset.
-+ * Doesn't seem to consume too much time.
-+ */
-+
-+ if (task->scene)
-+ PSB_WSGX32(_PSB_CS_RESET_ISP_RESET, PSB_CR_SOFT_RESET);
-+
-+ scheduler->current_task[PSB_SCENE_ENGINE_RASTER] = task;
-+
-+ list_del_init(list);
-+ scheduler->idle = 0;
-+ scheduler->raster_end_jiffies = jiffies + PSB_RASTER_TIMEOUT;
-+ scheduler->total_raster_jiffies = 0;
-+
-+ if (task->scene)
-+ PSB_WSGX32(0, PSB_CR_SOFT_RESET);
-+
-+ (void)psb_reg_submit(dev_priv, task->raster_cmds,
-+ task->raster_cmd_size);
-+
-+ if (task->scene) {
-+ task->reply_flags = (task->flags & PSB_FIRE_FLAG_XHW_OOM) ?
-+ 0x00000000 : PSB_RF_FIRE_RASTER;
-+ psb_set_scene_fire(scheduler,
-+ task->scene, PSB_SCENE_ENGINE_RASTER, task);
-+ } else {
-+ task->reply_flags = PSB_RF_DEALLOC | PSB_RF_FIRE_RASTER;
-+ psb_fire_raster(scheduler, task);
-+ }
-+ psb_schedule_watchdog(dev_priv);
-+}
-+
-+int psb_extend_raster_timeout(struct drm_psb_private *dev_priv)
-+{
-+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
-+ unsigned long irq_flags;
-+ int ret;
-+
-+ spin_lock_irqsave(&scheduler->lock, irq_flags);
-+ scheduler->total_raster_jiffies +=
-+ jiffies - scheduler->raster_end_jiffies + PSB_RASTER_TIMEOUT;
-+ scheduler->raster_end_jiffies = jiffies + PSB_RASTER_TIMEOUT;
-+ ret = (scheduler->total_raster_jiffies > PSB_ALLOWED_RASTER_RUNTIME) ?
-+ -EBUSY : 0;
-+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
-+ return ret;
-+}
-+
-+/*
-+ * TA done handler.
-+ */
-+
-+static void psb_ta_done(struct drm_psb_private *dev_priv,
-+ struct psb_scheduler *scheduler)
-+{
-+ struct psb_task *task = scheduler->current_task[PSB_SCENE_ENGINE_TA];
-+ struct psb_scene *scene = task->scene;
-+
-+ PSB_DEBUG_RENDER("TA done %u\n", task->sequence);
-+
-+ switch (task->ta_complete_action) {
-+ case PSB_RASTER_BLOCK:
-+ scheduler->ta_state = 1;
-+ scene->flags |=
-+ (PSB_SCENE_FLAG_DIRTY | PSB_SCENE_FLAG_COMPLETE);
-+ list_add_tail(&task->head, &scheduler->raster_queue);
-+ break;
-+ case PSB_RASTER:
-+ scene->flags |=
-+ (PSB_SCENE_FLAG_DIRTY | PSB_SCENE_FLAG_COMPLETE);
-+ list_add_tail(&task->head, &scheduler->raster_queue);
-+ break;
-+ case PSB_RETURN:
-+ scheduler->ta_state = 0;
-+ scene->flags |= PSB_SCENE_FLAG_DIRTY;
-+ list_add_tail(&scene->hw_scene->head, &scheduler->hw_scenes);
-+
-+ break;
-+ }
-+
-+ scheduler->current_task[PSB_SCENE_ENGINE_TA] = NULL;
-+
-+#ifdef FIX_TG_16
-+ psb_2d_atomic_unlock(dev_priv);
-+#endif
-+
-+ if (task->ta_complete_action != PSB_RASTER_BLOCK)
-+ psb_report_fence(scheduler, task->engine, task->sequence,
-+ _PSB_FENCE_TA_DONE_SHIFT, 1);
-+
-+ psb_schedule_raster(dev_priv, scheduler);
-+ psb_schedule_ta(dev_priv, scheduler);
-+ psb_set_idle(scheduler);
-+
-+ if (task->ta_complete_action != PSB_RETURN)
-+ return;
-+
-+ list_add_tail(&task->head, &scheduler->task_done_queue);
-+ schedule_delayed_work(&scheduler->wq, 1);
-+}
-+
-+/*
-+ * Rasterizer done handler.
-+ */
-+
-+static void psb_raster_done(struct drm_psb_private *dev_priv,
-+ struct psb_scheduler *scheduler)
-+{
-+ struct psb_task *task =
-+ scheduler->current_task[PSB_SCENE_ENGINE_RASTER];
-+ struct psb_scene *scene = task->scene;
-+ uint32_t complete_action = task->raster_complete_action;
-+
-+ PSB_DEBUG_RENDER("Raster done %u\n", task->sequence);
-+
-+ scheduler->current_task[PSB_SCENE_ENGINE_RASTER] = NULL;
-+
-+ if (complete_action != PSB_RASTER)
-+ psb_schedule_raster(dev_priv, scheduler);
-+
-+ if (scene) {
-+ if (task->feedback.page) {
-+ if (unlikely(scheduler->feedback_task)) {
-+ /*
-+ * This should never happen, since the previous
-+ * feedback query will return before the next
-+ * raster task is fired.
-+ */
-+ DRM_ERROR("Feedback task busy.\n");
-+ }
-+ scheduler->feedback_task = task;
-+ psb_xhw_vistest(dev_priv, &task->buf);
-+ }
-+ switch (complete_action) {
-+ case PSB_RETURN:
-+ scene->flags &=
-+ ~(PSB_SCENE_FLAG_DIRTY | PSB_SCENE_FLAG_COMPLETE);
-+ list_add_tail(&scene->hw_scene->head,
-+ &scheduler->hw_scenes);
-+ psb_report_fence(scheduler, task->engine,
-+ task->sequence,
-+ _PSB_FENCE_SCENE_DONE_SHIFT, 1);
-+ if (task->flags & PSB_FIRE_FLAG_XHW_OOM) {
-+ scheduler->ta_state = 0;
-+ }
-+ break;
-+ case PSB_RASTER:
-+ list_add(&task->head, &scheduler->raster_queue);
-+ task->raster_complete_action = PSB_RETURN;
-+ psb_schedule_raster(dev_priv, scheduler);
-+ break;
-+ case PSB_TA:
-+ list_add(&task->head, &scheduler->ta_queue);
-+ scheduler->ta_state = 0;
-+ task->raster_complete_action = PSB_RETURN;
-+ task->ta_complete_action = PSB_RASTER;
-+ break;
-+
-+ }
-+ }
-+ psb_schedule_ta(dev_priv, scheduler);
-+ psb_set_idle(scheduler);
-+
-+ if (complete_action == PSB_RETURN) {
-+ if (task->scene == NULL) {
-+ psb_report_fence(scheduler, task->engine,
-+ task->sequence,
-+ _PSB_FENCE_RASTER_DONE_SHIFT, 1);
-+ }
-+ if (!task->feedback.page) {
-+ list_add_tail(&task->head, &scheduler->task_done_queue);
-+ schedule_delayed_work(&scheduler->wq, 1);
-+ }
-+ }
-+}
-+
-+void psb_scheduler_pause(struct drm_psb_private *dev_priv)
-+{
-+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
-+ unsigned long irq_flags;
-+
-+ spin_lock_irqsave(&scheduler->lock, irq_flags);
-+ scheduler->idle_count++;
-+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
-+}
-+
-+void psb_scheduler_restart(struct drm_psb_private *dev_priv)
-+{
-+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
-+ unsigned long irq_flags;
-+
-+ spin_lock_irqsave(&scheduler->lock, irq_flags);
-+ if (--scheduler->idle_count == 0) {
-+ psb_schedule_ta(dev_priv, scheduler);
-+ psb_schedule_raster(dev_priv, scheduler);
-+ }
-+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
-+}
-+
-+int psb_scheduler_idle(struct drm_psb_private *dev_priv)
-+{
-+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
-+ unsigned long irq_flags;
-+ int ret;
-+ spin_lock_irqsave(&scheduler->lock, irq_flags);
-+ ret = scheduler->idle_count != 0 && scheduler->idle;
-+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
-+ return ret;
-+}
-+
-+int psb_scheduler_finished(struct drm_psb_private *dev_priv)
-+{
-+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
-+ unsigned long irq_flags;
-+ int ret;
-+ spin_lock_irqsave(&scheduler->lock, irq_flags);
-+ ret = (scheduler->idle &&
-+ list_empty(&scheduler->raster_queue) &&
-+ list_empty(&scheduler->ta_queue) &&
-+ list_empty(&scheduler->hp_raster_queue));
-+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
-+ return ret;
-+}
-+
-+static void psb_ta_oom(struct drm_psb_private *dev_priv,
-+ struct psb_scheduler *scheduler)
-+{
-+
-+ struct psb_task *task = scheduler->current_task[PSB_SCENE_ENGINE_TA];
-+ if (!task)
-+ return;
-+
-+ if (task->aborting)
-+ return;
-+ task->aborting = 1;
-+
-+ DRM_INFO("Info: TA out of parameter memory.\n");
-+
-+ (void)psb_xhw_ta_oom(dev_priv, &task->buf, task->scene->hw_cookie);
-+}
-+
-+static void psb_ta_oom_reply(struct drm_psb_private *dev_priv,
-+ struct psb_scheduler *scheduler)
-+{
-+
-+ struct psb_task *task = scheduler->current_task[PSB_SCENE_ENGINE_TA];
-+ uint32_t flags;
-+ if (!task)
-+ return;
-+
-+ psb_xhw_ta_oom_reply(dev_priv, &task->buf,
-+ task->scene->hw_cookie,
-+ &task->ta_complete_action,
-+ &task->raster_complete_action, &flags);
-+ task->flags |= flags;
-+ task->aborting = 0;
-+ psb_dispatch_ta(dev_priv, scheduler, PSB_RF_OOM_REPLY);
-+}
-+
-+static void psb_ta_hw_scene_freed(struct drm_psb_private *dev_priv,
-+ struct psb_scheduler *scheduler)
-+{
-+ DRM_ERROR("TA hw scene freed.\n");
-+}
-+
-+static void psb_vistest_reply(struct drm_psb_private *dev_priv,
-+ struct psb_scheduler *scheduler)
-+{
-+ struct psb_task *task = scheduler->feedback_task;
-+ uint8_t *feedback_map;
-+ uint32_t add;
-+ uint32_t cur;
-+ struct drm_psb_vistest *vistest;
-+ int i;
-+
-+ scheduler->feedback_task = NULL;
-+ if (!task) {
-+ DRM_ERROR("No Poulsbo feedback task.\n");
-+ return;
-+ }
-+ if (!task->feedback.page) {
-+ DRM_ERROR("No Poulsbo feedback page.\n");
-+ goto out;
-+ }
-+
-+ if (in_irq())
-+ feedback_map = kmap_atomic(task->feedback.page, KM_IRQ0);
-+ else
-+ feedback_map = kmap_atomic(task->feedback.page, KM_USER0);
-+
-+ /*
-+ * Loop over all requested vistest components here.
-+ * Only one (vistest) currently.
-+ */
-+
-+ vistest = (struct drm_psb_vistest *)
-+ (feedback_map + task->feedback.offset);
-+
-+ for (i = 0; i < PSB_HW_FEEDBACK_SIZE; ++i) {
-+ add = task->buf.arg.arg.feedback[i];
-+ cur = vistest->vt[i];
-+
-+ /*
-+ * Vistest saturates.
-+ */
-+
-+ vistest->vt[i] = (cur + add < cur) ? ~0 : cur + add;
-+ }
-+ if (in_irq())
-+ kunmap_atomic(feedback_map, KM_IRQ0);
-+ else
-+ kunmap_atomic(feedback_map, KM_USER0);
-+ out:
-+ psb_report_fence(scheduler, task->engine, task->sequence,
-+ _PSB_FENCE_FEEDBACK_SHIFT, 1);
-+
-+ if (list_empty(&task->head)) {
-+ list_add_tail(&task->head, &scheduler->task_done_queue);
-+ schedule_delayed_work(&scheduler->wq, 1);
-+ } else
-+ psb_schedule_ta(dev_priv, scheduler);
-+}
-+
-+static void psb_ta_fire_reply(struct drm_psb_private *dev_priv,
-+ struct psb_scheduler *scheduler)
-+{
-+ struct psb_task *task = scheduler->current_task[PSB_SCENE_ENGINE_TA];
-+
-+ psb_xhw_fire_reply(dev_priv, &task->buf, task->scene->hw_cookie);
-+
-+ psb_dispatch_ta(dev_priv, scheduler, PSB_RF_FIRE_TA);
-+}
-+
-+static void psb_raster_fire_reply(struct drm_psb_private *dev_priv,
-+ struct psb_scheduler *scheduler)
-+{
-+ struct psb_task *task =
-+ scheduler->current_task[PSB_SCENE_ENGINE_RASTER];
-+ uint32_t reply_flags;
-+
-+ if (!task) {
-+ DRM_ERROR("Null task.\n");
-+ return;
-+ }
-+
-+ task->raster_complete_action = task->buf.arg.arg.sb.rca;
-+ psb_xhw_fire_reply(dev_priv, &task->buf, task->scene->hw_cookie);
-+
-+ reply_flags = PSB_RF_FIRE_RASTER;
-+ if (task->raster_complete_action == PSB_RASTER)
-+ reply_flags |= PSB_RF_DEALLOC;
-+
-+ psb_dispatch_raster(dev_priv, scheduler, reply_flags);
-+}
-+
-+static int psb_user_interrupt(struct drm_psb_private *dev_priv,
-+ struct psb_scheduler *scheduler)
-+{
-+ uint32_t type;
-+ int ret;
-+ unsigned long irq_flags;
-+
-+ /*
-+ * Xhw cannot write directly to the comm page, so
-+ * do it here. Firmware would have written directly.
-+ */
-+
-+ ret = psb_xhw_handler(dev_priv);
-+ if (unlikely(ret))
-+ return ret;
-+
-+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
-+ type = dev_priv->comm[PSB_COMM_USER_IRQ];
-+ dev_priv->comm[PSB_COMM_USER_IRQ] = 0;
-+ if (dev_priv->comm[PSB_COMM_USER_IRQ_LOST]) {
-+ dev_priv->comm[PSB_COMM_USER_IRQ_LOST] = 0;
-+ DRM_ERROR("Lost Poulsbo hardware event.\n");
-+ }
-+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
-+
-+ if (type == 0)
-+ return 0;
-+
-+ switch (type) {
-+ case PSB_UIRQ_VISTEST:
-+ psb_vistest_reply(dev_priv, scheduler);
-+ break;
-+ case PSB_UIRQ_OOM_REPLY:
-+ psb_ta_oom_reply(dev_priv, scheduler);
-+ break;
-+ case PSB_UIRQ_FIRE_TA_REPLY:
-+ psb_ta_fire_reply(dev_priv, scheduler);
-+ break;
-+ case PSB_UIRQ_FIRE_RASTER_REPLY:
-+ psb_raster_fire_reply(dev_priv, scheduler);
-+ break;
-+ default:
-+ DRM_ERROR("Unknown Poulsbo hardware event. %d\n", type);
-+ }
-+ return 0;
-+}
-+
-+int psb_forced_user_interrupt(struct drm_psb_private *dev_priv)
-+{
-+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
-+ unsigned long irq_flags;
-+ int ret;
-+
-+ spin_lock_irqsave(&scheduler->lock, irq_flags);
-+ ret = psb_user_interrupt(dev_priv, scheduler);
-+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
-+ return ret;
-+}
-+
-+static void psb_dispatch_ta(struct drm_psb_private *dev_priv,
-+ struct psb_scheduler *scheduler,
-+ uint32_t reply_flag)
-+{
-+ struct psb_task *task = scheduler->current_task[PSB_SCENE_ENGINE_TA];
-+ uint32_t flags;
-+ uint32_t mask;
-+
-+ task->reply_flags |= reply_flag;
-+ flags = task->reply_flags;
-+ mask = PSB_RF_FIRE_TA;
-+
-+ if (!(flags & mask))
-+ return;
-+
-+ mask = PSB_RF_TA_DONE;
-+ if ((flags & mask) == mask) {
-+ task->reply_flags &= ~mask;
-+ psb_ta_done(dev_priv, scheduler);
-+ }
-+
-+ mask = PSB_RF_OOM;
-+ if ((flags & mask) == mask) {
-+ task->reply_flags &= ~mask;
-+ psb_ta_oom(dev_priv, scheduler);
-+ }
-+
-+ mask = (PSB_RF_OOM_REPLY | PSB_RF_TERMINATE);
-+ if ((flags & mask) == mask) {
-+ task->reply_flags &= ~mask;
-+ psb_ta_done(dev_priv, scheduler);
-+ }
-+}
-+
-+static void psb_dispatch_raster(struct drm_psb_private *dev_priv,
-+ struct psb_scheduler *scheduler,
-+ uint32_t reply_flag)
-+{
-+ struct psb_task *task =
-+ scheduler->current_task[PSB_SCENE_ENGINE_RASTER];
-+ uint32_t flags;
-+ uint32_t mask;
-+
-+ task->reply_flags |= reply_flag;
-+ flags = task->reply_flags;
-+ mask = PSB_RF_FIRE_RASTER;
-+
-+ if (!(flags & mask))
-+ return;
-+
-+ /*
-+ * For rasterizer-only tasks, don't report fence done here,
-+ * as this is time consuming and the rasterizer wants a new
-+ * task immediately. For other tasks, the hardware is probably
-+ * still busy deallocating TA memory, so we can report
-+ * fence done in parallel.
-+ */
-+
-+ if (task->raster_complete_action == PSB_RETURN &&
-+ (reply_flag & PSB_RF_RASTER_DONE) && task->scene != NULL) {
-+ psb_report_fence(scheduler, task->engine, task->sequence,
-+ _PSB_FENCE_RASTER_DONE_SHIFT, 1);
-+ }
-+
-+ mask = PSB_RF_RASTER_DONE | PSB_RF_DEALLOC;
-+ if ((flags & mask) == mask) {
-+ task->reply_flags &= ~mask;
-+ psb_raster_done(dev_priv, scheduler);
-+ }
-+}
-+
-+void psb_scheduler_handler(struct drm_psb_private *dev_priv, uint32_t status)
-+{
-+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
-+
-+ spin_lock(&scheduler->lock);
-+
-+ if (status & _PSB_CE_PIXELBE_END_RENDER) {
-+ psb_dispatch_raster(dev_priv, scheduler, PSB_RF_RASTER_DONE);
-+ }
-+ if (status & _PSB_CE_DPM_3D_MEM_FREE) {
-+ psb_dispatch_raster(dev_priv, scheduler, PSB_RF_DEALLOC);
-+ }
-+ if (status & _PSB_CE_TA_FINISHED) {
-+ psb_dispatch_ta(dev_priv, scheduler, PSB_RF_TA_DONE);
-+ }
-+ if (status & _PSB_CE_TA_TERMINATE) {
-+ psb_dispatch_ta(dev_priv, scheduler, PSB_RF_TERMINATE);
-+ }
-+ if (status & (_PSB_CE_DPM_REACHED_MEM_THRESH |
-+ _PSB_CE_DPM_OUT_OF_MEMORY_GBL |
-+ _PSB_CE_DPM_OUT_OF_MEMORY_MT)) {
-+ psb_dispatch_ta(dev_priv, scheduler, PSB_RF_OOM);
-+ }
-+ if (status & _PSB_CE_DPM_TA_MEM_FREE) {
-+ psb_ta_hw_scene_freed(dev_priv, scheduler);
-+ }
-+ if (status & _PSB_CE_SW_EVENT) {
-+ psb_user_interrupt(dev_priv, scheduler);
-+ }
-+ spin_unlock(&scheduler->lock);
-+}
-+
-+static void psb_free_task_wq(struct work_struct *work)
-+{
-+ struct psb_scheduler *scheduler =
-+ container_of(work, struct psb_scheduler, wq.work);
-+
-+ struct drm_device *dev = scheduler->dev;
-+ struct list_head *list, *next;
-+ unsigned long irq_flags;
-+ struct psb_task *task;
-+
-+ if (!mutex_trylock(&scheduler->task_wq_mutex))
-+ return;
-+
-+ spin_lock_irqsave(&scheduler->lock, irq_flags);
-+ list_for_each_safe(list, next, &scheduler->task_done_queue) {
-+ task = list_entry(list, struct psb_task, head);
-+ list_del_init(list);
-+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
-+
-+ PSB_DEBUG_RENDER("Checking Task %d: Scene 0x%08lx, "
-+ "Feedback bo 0x%08lx, done %d\n",
-+ task->sequence, (unsigned long)task->scene,
-+ (unsigned long)task->feedback.bo,
-+ atomic_read(&task->buf.done));
-+
-+ if (task->scene) {
-+ mutex_lock(&dev->struct_mutex);
-+ PSB_DEBUG_RENDER("Unref scene %d\n", task->sequence);
-+ psb_scene_unref_devlocked(&task->scene);
-+ if (task->feedback.bo) {
-+ PSB_DEBUG_RENDER("Unref feedback bo %d\n",
-+ task->sequence);
-+ drm_bo_usage_deref_locked(&task->feedback.bo);
-+ }
-+ mutex_unlock(&dev->struct_mutex);
-+ }
-+
-+ if (atomic_read(&task->buf.done)) {
-+ PSB_DEBUG_RENDER("Deleting task %d\n", task->sequence);
-+ drm_free(task, sizeof(*task), DRM_MEM_DRIVER);
-+ task = NULL;
-+ }
-+ spin_lock_irqsave(&scheduler->lock, irq_flags);
-+ if (task != NULL)
-+ list_add(list, &scheduler->task_done_queue);
-+ }
-+ if (!list_empty(&scheduler->task_done_queue)) {
-+ PSB_DEBUG_RENDER("Rescheduling wq\n");
-+ schedule_delayed_work(&scheduler->wq, 1);
-+ }
-+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
-+
-+ mutex_unlock(&scheduler->task_wq_mutex);
-+}
-+
-+/*
-+ * Check if any of the tasks in the queues is using a scene.
-+ * In that case we know the TA memory buffer objects are
-+ * fenced and will not be evicted until that fence is signaled.
-+ */
-+
-+void psb_scheduler_ta_mem_check(struct drm_psb_private *dev_priv)
-+{
-+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
-+ unsigned long irq_flags;
-+ struct psb_task *task;
-+ struct psb_task *next_task;
-+
-+ dev_priv->force_ta_mem_load = 1;
-+ spin_lock_irqsave(&scheduler->lock, irq_flags);
-+ list_for_each_entry_safe(task, next_task, &scheduler->ta_queue, head) {
-+ if (task->scene) {
-+ dev_priv->force_ta_mem_load = 0;
-+ break;
-+ }
-+ }
-+ list_for_each_entry_safe(task, next_task, &scheduler->raster_queue,
-+ head) {
-+ if (task->scene) {
-+ dev_priv->force_ta_mem_load = 0;
-+ break;
-+ }
-+ }
-+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
-+}
-+
-+void psb_scheduler_reset(struct drm_psb_private *dev_priv, int error_condition)
-+{
-+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
-+ unsigned long wait_jiffies;
-+ unsigned long cur_jiffies;
-+ struct psb_task *task;
-+ struct psb_task *next_task;
-+ unsigned long irq_flags;
-+
-+ psb_scheduler_pause(dev_priv);
-+ if (!psb_scheduler_idle(dev_priv)) {
-+ spin_lock_irqsave(&scheduler->lock, irq_flags);
-+
-+ cur_jiffies = jiffies;
-+ wait_jiffies = cur_jiffies;
-+ if (scheduler->current_task[PSB_SCENE_ENGINE_TA] &&
-+ time_after_eq(scheduler->ta_end_jiffies, wait_jiffies))
-+ wait_jiffies = scheduler->ta_end_jiffies;
-+ if (scheduler->current_task[PSB_SCENE_ENGINE_RASTER] &&
-+ time_after_eq(scheduler->raster_end_jiffies, wait_jiffies))
-+ wait_jiffies = scheduler->raster_end_jiffies;
-+
-+ wait_jiffies -= cur_jiffies;
-+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
-+
-+ (void)wait_event_timeout(scheduler->idle_queue,
-+ psb_scheduler_idle(dev_priv),
-+ wait_jiffies);
-+ }
-+
-+ if (!psb_scheduler_idle(dev_priv)) {
-+ spin_lock_irqsave(&scheduler->lock, irq_flags);
-+ task = scheduler->current_task[PSB_SCENE_ENGINE_RASTER];
-+ if (task) {
-+ DRM_ERROR("Detected Poulsbo rasterizer lockup.\n");
-+ if (task->engine == PSB_ENGINE_HPRAST) {
-+ psb_fence_error(scheduler->dev,
-+ PSB_ENGINE_HPRAST,
-+ task->sequence,
-+ _PSB_FENCE_TYPE_RASTER_DONE,
-+ error_condition);
-+
-+ list_del(&task->head);
-+ psb_xhw_clean_buf(dev_priv, &task->buf);
-+ list_add_tail(&task->head,
-+ &scheduler->task_done_queue);
-+ } else {
-+ list_add(&task->head, &scheduler->raster_queue);
-+ }
-+ }
-+ scheduler->current_task[PSB_SCENE_ENGINE_RASTER] = NULL;
-+ task = scheduler->current_task[PSB_SCENE_ENGINE_TA];
-+ if (task) {
-+ DRM_ERROR("Detected Poulsbo ta lockup.\n");
-+ list_add_tail(&task->head, &scheduler->raster_queue);
-+#ifdef FIX_TG_16
-+ psb_2d_atomic_unlock(dev_priv);
-+#endif
-+ }
-+ scheduler->current_task[PSB_SCENE_ENGINE_TA] = NULL;
-+ scheduler->ta_state = 0;
-+
-+#ifdef FIX_TG_16
-+ atomic_set(&dev_priv->ta_wait_2d, 0);
-+ atomic_set(&dev_priv->ta_wait_2d_irq, 0);
-+ wake_up(&dev_priv->queue_2d);
-+#endif
-+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
-+ }
-+
-+ /*
-+ * Empty raster queue.
-+ */
-+
-+ spin_lock_irqsave(&scheduler->lock, irq_flags);
-+ list_for_each_entry_safe(task, next_task, &scheduler->raster_queue,
-+ head) {
-+ struct psb_scene *scene = task->scene;
-+
-+ psb_fence_error(scheduler->dev,
-+ task->engine,
-+ task->sequence,
-+ _PSB_FENCE_TYPE_TA_DONE |
-+ _PSB_FENCE_TYPE_RASTER_DONE |
-+ _PSB_FENCE_TYPE_SCENE_DONE |
-+ _PSB_FENCE_TYPE_FEEDBACK, error_condition);
-+ if (scene) {
-+ scene->flags = 0;
-+ if (scene->hw_scene) {
-+ list_add_tail(&scene->hw_scene->head,
-+ &scheduler->hw_scenes);
-+ scene->hw_scene = NULL;
-+ }
-+ }
-+
-+ psb_xhw_clean_buf(dev_priv, &task->buf);
-+ list_del(&task->head);
-+ list_add_tail(&task->head, &scheduler->task_done_queue);
-+ }
-+
-+ schedule_delayed_work(&scheduler->wq, 1);
-+ scheduler->idle = 1;
-+ wake_up(&scheduler->idle_queue);
-+
-+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
-+ psb_scheduler_restart(dev_priv);
-+
-+}
-+
-+int psb_scheduler_init(struct drm_device *dev, struct psb_scheduler *scheduler)
-+{
-+ struct psb_hw_scene *hw_scene;
-+ int i;
-+
-+ memset(scheduler, 0, sizeof(*scheduler));
-+ scheduler->dev = dev;
-+ mutex_init(&scheduler->task_wq_mutex);
-+ scheduler->lock = SPIN_LOCK_UNLOCKED;
-+ scheduler->idle = 1;
-+
-+ INIT_LIST_HEAD(&scheduler->ta_queue);
-+ INIT_LIST_HEAD(&scheduler->raster_queue);
-+ INIT_LIST_HEAD(&scheduler->hp_raster_queue);
-+ INIT_LIST_HEAD(&scheduler->hw_scenes);
-+ INIT_LIST_HEAD(&scheduler->task_done_queue);
-+ INIT_DELAYED_WORK(&scheduler->wq, &psb_free_task_wq);
-+ init_waitqueue_head(&scheduler->idle_queue);
-+
-+ for (i = 0; i < PSB_NUM_HW_SCENES; ++i) {
-+ hw_scene = &scheduler->hs[i];
-+ hw_scene->context_number = i;
-+ list_add_tail(&hw_scene->head, &scheduler->hw_scenes);
-+ }
-+
-+ for (i = 0; i < _PSB_ENGINE_TA_FENCE_TYPES; ++i) {
-+ scheduler->seq[i].reported = 0;
-+ }
-+
-+ return 0;
-+}
-+
-+/*
-+ * Scene references maintained by the scheduler are not refcounted.
-+ * Remove all references to a particular scene here.
-+ */
-+
-+void psb_scheduler_remove_scene_refs(struct psb_scene *scene)
-+{
-+ struct drm_psb_private *dev_priv =
-+ (struct drm_psb_private *)scene->dev->dev_private;
-+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
-+ struct psb_hw_scene *hw_scene;
-+ unsigned long irq_flags;
-+ unsigned int i;
-+
-+ spin_lock_irqsave(&scheduler->lock, irq_flags);
-+ for (i = 0; i < PSB_NUM_HW_SCENES; ++i) {
-+ hw_scene = &scheduler->hs[i];
-+ if (hw_scene->last_scene == scene) {
-+ BUG_ON(list_empty(&hw_scene->head));
-+ hw_scene->last_scene = NULL;
-+ }
-+ }
-+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
-+}
-+
-+void psb_scheduler_takedown(struct psb_scheduler *scheduler)
-+{
-+ flush_scheduled_work();
-+}
-+
-+static int psb_setup_task_devlocked(struct drm_device *dev,
-+ struct drm_psb_cmdbuf_arg *arg,
-+ struct drm_buffer_object *raster_cmd_buffer,
-+ struct drm_buffer_object *ta_cmd_buffer,
-+ struct drm_buffer_object *oom_cmd_buffer,
-+ struct psb_scene *scene,
-+ enum psb_task_type task_type,
-+ uint32_t engine,
-+ uint32_t flags, struct psb_task **task_p)
-+{
-+ struct psb_task *task;
-+ int ret;
-+
-+ if (ta_cmd_buffer && arg->ta_size > PSB_MAX_TA_CMDS) {
-+ DRM_ERROR("Too many ta cmds %d.\n", arg->ta_size);
-+ return -EINVAL;
-+ }
-+ if (raster_cmd_buffer && arg->cmdbuf_size > PSB_MAX_RASTER_CMDS) {
-+ DRM_ERROR("Too many raster cmds %d.\n", arg->cmdbuf_size);
-+ return -EINVAL;
-+ }
-+ if (oom_cmd_buffer && arg->oom_size > PSB_MAX_OOM_CMDS) {
-+ DRM_ERROR("Too many raster cmds %d.\n", arg->oom_size);
-+ return -EINVAL;
-+ }
-+
-+ task = drm_calloc(1, sizeof(*task), DRM_MEM_DRIVER);
-+ if (!task)
-+ return -ENOMEM;
-+
-+ atomic_set(&task->buf.done, 1);
-+ task->engine = engine;
-+ INIT_LIST_HEAD(&task->head);
-+ INIT_LIST_HEAD(&task->buf.head);
-+ if (ta_cmd_buffer && arg->ta_size != 0) {
-+ task->ta_cmd_size = arg->ta_size;
-+ ret = psb_submit_copy_cmdbuf(dev, ta_cmd_buffer,
-+ arg->ta_offset,
-+ arg->ta_size,
-+ PSB_ENGINE_TA, task->ta_cmds);
-+ if (ret)
-+ goto out_err;
-+ }
-+ if (raster_cmd_buffer) {
-+ task->raster_cmd_size = arg->cmdbuf_size;
-+ ret = psb_submit_copy_cmdbuf(dev, raster_cmd_buffer,
-+ arg->cmdbuf_offset,
-+ arg->cmdbuf_size,
-+ PSB_ENGINE_TA, task->raster_cmds);
-+ if (ret)
-+ goto out_err;
-+ }
-+ if (oom_cmd_buffer && arg->oom_size != 0) {
-+ task->oom_cmd_size = arg->oom_size;
-+ ret = psb_submit_copy_cmdbuf(dev, oom_cmd_buffer,
-+ arg->oom_offset,
-+ arg->oom_size,
-+ PSB_ENGINE_TA, task->oom_cmds);
-+ if (ret)
-+ goto out_err;
-+ }
-+ task->task_type = task_type;
-+ task->flags = flags;
-+ if (scene)
-+ task->scene = psb_scene_ref(scene);
-+
-+ *task_p = task;
-+ return 0;
-+ out_err:
-+ drm_free(task, sizeof(*task), DRM_MEM_DRIVER);
-+ *task_p = NULL;
-+ return ret;
-+}
-+
-+int psb_cmdbuf_ta(struct drm_file *priv,
-+ struct drm_psb_cmdbuf_arg *arg,
-+ struct drm_buffer_object *cmd_buffer,
-+ struct drm_buffer_object *ta_buffer,
-+ struct drm_buffer_object *oom_buffer,
-+ struct psb_scene *scene,
-+ struct psb_feedback_info *feedback,
-+ struct drm_fence_arg *fence_arg)
-+{
-+ struct drm_device *dev = priv->minor->dev;
-+ struct drm_psb_private *dev_priv = dev->dev_private;
-+ struct drm_fence_object *fence = NULL;
-+ struct psb_task *task = NULL;
-+ int ret;
-+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
-+ unsigned long irq_flags;
-+
-+ PSB_DEBUG_RENDER("Cmdbuf ta\n");
-+
-+ ret = mutex_lock_interruptible(&dev_priv->reset_mutex);
-+ if (ret)
-+ return -EAGAIN;
-+
-+ mutex_lock(&dev->struct_mutex);
-+ ret = psb_setup_task_devlocked(dev, arg, cmd_buffer, ta_buffer,
-+ oom_buffer, scene,
-+ psb_ta_task, PSB_ENGINE_TA,
-+ PSB_FIRE_FLAG_RASTER_DEALLOC, &task);
-+ mutex_unlock(&dev->struct_mutex);
-+
-+ if (ret)
-+ goto out_err;
-+
-+ task->feedback = *feedback;
-+
-+ /*
-+ * Hand the task over to the scheduler.
-+ */
-+
-+ spin_lock_irqsave(&scheduler->lock, irq_flags);
-+ task->sequence = psb_fence_advance_sequence(dev, PSB_ENGINE_TA);
-+
-+ psb_report_fence(scheduler, PSB_ENGINE_TA, task->sequence, 0, 1);
-+
-+ task->ta_complete_action = PSB_RASTER;
-+ task->raster_complete_action = PSB_RETURN;
-+
-+ list_add_tail(&task->head, &scheduler->ta_queue);
-+ PSB_DEBUG_RENDER("queued ta %u\n", task->sequence);
-+
-+ psb_schedule_ta(dev_priv, scheduler);
-+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
-+
-+ psb_fence_or_sync(priv, PSB_ENGINE_TA, arg, fence_arg, &fence);
-+ drm_regs_fence(&dev_priv->use_manager, fence);
-+ if (fence)
-+ fence_arg->signaled |= 0x1;
-+
-+ out_err:
-+ if (ret && ret != -EAGAIN)
-+ DRM_ERROR("TA task queue job failed.\n");
-+
-+ if (fence) {
-+#ifdef PSB_WAIT_FOR_TA_COMPLETION
-+ drm_fence_object_wait(fence, 1, 1, DRM_FENCE_TYPE_EXE |
-+ _PSB_FENCE_TYPE_TA_DONE);
-+#ifdef PSB_BE_PARANOID
-+ drm_fence_object_wait(fence, 1, 1, DRM_FENCE_TYPE_EXE |
-+ _PSB_FENCE_TYPE_SCENE_DONE);
-+#endif
-+#endif
-+ drm_fence_usage_deref_unlocked(&fence);
-+ }
-+ mutex_unlock(&dev_priv->reset_mutex);
-+
-+ return ret;
-+}
-+
-+int psb_cmdbuf_raster(struct drm_file *priv,
-+ struct drm_psb_cmdbuf_arg *arg,
-+ struct drm_buffer_object *cmd_buffer,
-+ struct drm_fence_arg *fence_arg)
-+{
-+ struct drm_device *dev = priv->minor->dev;
-+ struct drm_psb_private *dev_priv = dev->dev_private;
-+ struct drm_fence_object *fence = NULL;
-+ struct psb_task *task = NULL;
-+ int ret;
-+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
-+ unsigned long irq_flags;
-+
-+ PSB_DEBUG_RENDER("Cmdbuf Raster\n");
-+
-+ ret = mutex_lock_interruptible(&dev_priv->reset_mutex);
-+ if (ret)
-+ return -EAGAIN;
-+
-+ mutex_lock(&dev->struct_mutex);
-+ ret = psb_setup_task_devlocked(dev, arg, cmd_buffer, NULL, NULL,
-+ NULL, psb_raster_task,
-+ PSB_ENGINE_TA, 0, &task);
-+ mutex_unlock(&dev->struct_mutex);
-+
-+ if (ret)
-+ goto out_err;
-+
-+ /*
-+ * Hand the task over to the scheduler.
-+ */
-+
-+ spin_lock_irqsave(&scheduler->lock, irq_flags);
-+ task->sequence = psb_fence_advance_sequence(dev, PSB_ENGINE_TA);
-+ psb_report_fence(scheduler, PSB_ENGINE_TA, task->sequence, 0, 1);
-+ task->ta_complete_action = PSB_RASTER;
-+ task->raster_complete_action = PSB_RETURN;
-+
-+ list_add_tail(&task->head, &scheduler->ta_queue);
-+ PSB_DEBUG_RENDER("queued raster %u\n", task->sequence);
-+ psb_schedule_ta(dev_priv, scheduler);
-+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
-+
-+ psb_fence_or_sync(priv, PSB_ENGINE_TA, arg, fence_arg, &fence);
-+ drm_regs_fence(&dev_priv->use_manager, fence);
-+ if (fence)
-+ fence_arg->signaled |= 0x1;
-+ out_err:
-+ if (ret && ret != -EAGAIN)
-+ DRM_ERROR("Raster task queue job failed.\n");
-+
-+ if (fence) {
-+#ifdef PSB_WAIT_FOR_RASTER_COMPLETION
-+ drm_fence_object_wait(fence, 1, 1, fence->type);
-+#endif
-+ drm_fence_usage_deref_unlocked(&fence);
-+ }
-+
-+ mutex_unlock(&dev_priv->reset_mutex);
-+
-+ return ret;
-+}
-+
-+#ifdef FIX_TG_16
-+
-+static int psb_check_2d_idle(struct drm_psb_private *dev_priv)
-+{
-+ if (psb_2d_trylock(dev_priv)) {
-+ if ((PSB_RSGX32(PSB_CR_2D_SOCIF) == _PSB_C2_SOCIF_EMPTY) &&
-+ !((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) &
-+ _PSB_C2B_STATUS_BUSY))) {
-+ return 0;
-+ }
-+ if (atomic_cmpxchg(&dev_priv->ta_wait_2d_irq, 0, 1) == 0)
-+ psb_2D_irq_on(dev_priv);
-+
-+ PSB_WSGX32(PSB_2D_FENCE_BH, PSB_SGX_2D_SLAVE_PORT);
-+ PSB_WSGX32(PSB_2D_FLUSH_BH, PSB_SGX_2D_SLAVE_PORT);
-+ (void)PSB_RSGX32(PSB_SGX_2D_SLAVE_PORT);
-+
-+ psb_2d_atomic_unlock(dev_priv);
-+ }
-+
-+ atomic_set(&dev_priv->ta_wait_2d, 1);
-+ return -EBUSY;
-+}
-+
-+static void psb_atomic_resume_ta_2d_idle(struct drm_psb_private *dev_priv)
-+{
-+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
-+
-+ if (atomic_cmpxchg(&dev_priv->ta_wait_2d, 1, 0) == 1) {
-+ psb_schedule_ta(dev_priv, scheduler);
-+ if (atomic_read(&dev_priv->waiters_2d) != 0)
-+ wake_up(&dev_priv->queue_2d);
-+ }
-+}
-+
-+void psb_resume_ta_2d_idle(struct drm_psb_private *dev_priv)
-+{
-+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
-+ unsigned long irq_flags;
-+
-+ spin_lock_irqsave(&scheduler->lock, irq_flags);
-+ if (atomic_cmpxchg(&dev_priv->ta_wait_2d_irq, 1, 0) == 1) {
-+ atomic_set(&dev_priv->ta_wait_2d, 0);
-+ psb_2D_irq_off(dev_priv);
-+ psb_schedule_ta(dev_priv, scheduler);
-+ if (atomic_read(&dev_priv->waiters_2d) != 0)
-+ wake_up(&dev_priv->queue_2d);
-+ }
-+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
-+}
-+
-+/*
-+ * 2D locking functions. Can't use a mutex since the trylock() and
-+ * unlock() methods need to be accessible from interrupt context.
-+ */
-+
-+static int psb_2d_trylock(struct drm_psb_private *dev_priv)
-+{
-+ return (atomic_cmpxchg(&dev_priv->lock_2d, 0, 1) == 0);
-+}
-+
-+static void psb_2d_atomic_unlock(struct drm_psb_private *dev_priv)
-+{
-+ atomic_set(&dev_priv->lock_2d, 0);
-+ if (atomic_read(&dev_priv->waiters_2d) != 0)
-+ wake_up(&dev_priv->queue_2d);
-+}
-+
-+void psb_2d_unlock(struct drm_psb_private *dev_priv)
-+{
-+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
-+ unsigned long irq_flags;
-+
-+ spin_lock_irqsave(&scheduler->lock, irq_flags);
-+ psb_2d_atomic_unlock(dev_priv);
-+ if (atomic_read(&dev_priv->ta_wait_2d) != 0)
-+ psb_atomic_resume_ta_2d_idle(dev_priv);
-+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
-+}
-+
-+void psb_2d_lock(struct drm_psb_private *dev_priv)
-+{
-+ atomic_inc(&dev_priv->waiters_2d);
-+ wait_event(dev_priv->queue_2d, atomic_read(&dev_priv->ta_wait_2d) == 0);
-+ wait_event(dev_priv->queue_2d, psb_2d_trylock(dev_priv));
-+ atomic_dec(&dev_priv->waiters_2d);
-+}
-+
-+#endif
-Index: linux-2.6.27/drivers/gpu/drm/psb/psb_schedule.h
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.27/drivers/gpu/drm/psb/psb_schedule.h 2009-02-05 13:29:33.000000000 +0000
-@@ -0,0 +1,170 @@
-+/**************************************************************************
-+ * Copyright (c) 2007, Intel Corporation.
-+ * All Rights Reserved.
-+ *
-+ * This program is free software; you can redistribute it and/or modify it
-+ * under the terms and conditions of the GNU General Public License,
-+ * version 2, as published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope it will be useful, but WITHOUT
-+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-+ * more details.
-+ *
-+ * You should have received a copy of the GNU General Public License along with
-+ * this program; if not, write to the Free Software Foundation, Inc.,
-+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-+ *
-+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
-+ * develop this driver.
-+ *
-+ **************************************************************************/
-+/*
-+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
-+ */
-+
-+#ifndef _PSB_SCHEDULE_H_
-+#define _PSB_SCHEDULE_H_
-+
-+#include "drmP.h"
-+
-+enum psb_task_type {
-+ psb_ta_midscene_task,
-+ psb_ta_task,
-+ psb_raster_task,
-+ psb_freescene_task
-+};
-+
-+#define PSB_MAX_TA_CMDS 60
-+#define PSB_MAX_RASTER_CMDS 60
-+#define PSB_MAX_OOM_CMDS 6
-+
-+struct psb_xhw_buf {
-+ struct list_head head;
-+ int copy_back;
-+ atomic_t done;
-+ struct drm_psb_xhw_arg arg;
-+
-+};
-+
-+struct psb_feedback_info {
-+ struct drm_buffer_object *bo;
-+ struct page *page;
-+ uint32_t offset;
-+};
-+
-+struct psb_task {
-+ struct list_head head;
-+ struct psb_scene *scene;
-+ struct psb_feedback_info feedback;
-+ enum psb_task_type task_type;
-+ uint32_t engine;
-+ uint32_t sequence;
-+ uint32_t ta_cmds[PSB_MAX_TA_CMDS];
-+ uint32_t raster_cmds[PSB_MAX_RASTER_CMDS];
-+ uint32_t oom_cmds[PSB_MAX_OOM_CMDS];
-+ uint32_t ta_cmd_size;
-+ uint32_t raster_cmd_size;
-+ uint32_t oom_cmd_size;
-+ uint32_t feedback_offset;
-+ uint32_t ta_complete_action;
-+ uint32_t raster_complete_action;
-+ uint32_t hw_cookie;
-+ uint32_t flags;
-+ uint32_t reply_flags;
-+ uint32_t aborting;
-+ struct psb_xhw_buf buf;
-+};
-+
-+struct psb_hw_scene {
-+ struct list_head head;
-+ uint32_t context_number;
-+
-+ /*
-+ * This pointer does not refcount the last_scene_buffer,
-+ * so we must make sure it is set to NULL before destroying
-+ * the corresponding task.
-+ */
-+
-+ struct psb_scene *last_scene;
-+};
-+
-+struct psb_scene;
-+struct drm_psb_private;
-+
-+struct psb_scheduler_seq {
-+ uint32_t sequence;
-+ int reported;
-+};
-+
-+struct psb_scheduler {
-+ struct drm_device *dev;
-+ struct psb_scheduler_seq seq[_PSB_ENGINE_TA_FENCE_TYPES];
-+ struct psb_hw_scene hs[PSB_NUM_HW_SCENES];
-+ struct mutex task_wq_mutex;
-+ spinlock_t lock;
-+ struct list_head hw_scenes;
-+ struct list_head ta_queue;
-+ struct list_head raster_queue;
-+ struct list_head hp_raster_queue;
-+ struct list_head task_done_queue;
-+ struct psb_task *current_task[PSB_SCENE_NUM_ENGINES];
-+ struct psb_task *feedback_task;
-+ int ta_state;
-+ struct psb_hw_scene *pending_hw_scene;
-+ uint32_t pending_hw_scene_seq;
-+ struct delayed_work wq;
-+ struct psb_scene_pool *pool;
-+ uint32_t idle_count;
-+ int idle;
-+ wait_queue_head_t idle_queue;
-+ unsigned long ta_end_jiffies;
-+ unsigned long raster_end_jiffies;
-+ unsigned long total_raster_jiffies;
-+};
-+
-+#define PSB_RF_FIRE_TA (1 << 0)
-+#define PSB_RF_OOM (1 << 1)
-+#define PSB_RF_OOM_REPLY (1 << 2)
-+#define PSB_RF_TERMINATE (1 << 3)
-+#define PSB_RF_TA_DONE (1 << 4)
-+#define PSB_RF_FIRE_RASTER (1 << 5)
-+#define PSB_RF_RASTER_DONE (1 << 6)
-+#define PSB_RF_DEALLOC (1 << 7)
-+
-+extern struct psb_scene_pool *psb_alloc_scene_pool(struct drm_file *priv,
-+ int shareable, uint32_t w,
-+ uint32_t h);
-+extern uint32_t psb_scene_handle(struct psb_scene *scene);
-+extern int psb_scheduler_init(struct drm_device *dev,
-+ struct psb_scheduler *scheduler);
-+extern void psb_scheduler_takedown(struct psb_scheduler *scheduler);
-+extern int psb_cmdbuf_ta(struct drm_file *priv,
-+ struct drm_psb_cmdbuf_arg *arg,
-+ struct drm_buffer_object *cmd_buffer,
-+ struct drm_buffer_object *ta_buffer,
-+ struct drm_buffer_object *oom_buffer,
-+ struct psb_scene *scene,
-+ struct psb_feedback_info *feedback,
-+ struct drm_fence_arg *fence_arg);
-+extern int psb_cmdbuf_raster(struct drm_file *priv,
-+ struct drm_psb_cmdbuf_arg *arg,
-+ struct drm_buffer_object *cmd_buffer,
-+ struct drm_fence_arg *fence_arg);
-+extern void psb_scheduler_handler(struct drm_psb_private *dev_priv,
-+ uint32_t status);
-+extern void psb_scheduler_pause(struct drm_psb_private *dev_priv);
-+extern void psb_scheduler_restart(struct drm_psb_private *dev_priv);
-+extern int psb_scheduler_idle(struct drm_psb_private *dev_priv);
-+extern int psb_scheduler_finished(struct drm_psb_private *dev_priv);
-+
-+extern void psb_scheduler_lockup(struct drm_psb_private *dev_priv,
-+ int *lockup, int *idle);
-+extern void psb_scheduler_reset(struct drm_psb_private *dev_priv,
-+ int error_condition);
-+extern int psb_forced_user_interrupt(struct drm_psb_private *dev_priv);
-+extern void psb_scheduler_remove_scene_refs(struct psb_scene *scene);
-+extern void psb_scheduler_ta_mem_check(struct drm_psb_private *dev_priv);
-+extern int psb_extend_raster_timeout(struct drm_psb_private *dev_priv);
-+
-+#endif
-Index: linux-2.6.27/drivers/gpu/drm/psb/psb_setup.c
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.27/drivers/gpu/drm/psb/psb_setup.c 2009-02-05 13:29:33.000000000 +0000
-@@ -0,0 +1,17 @@
-+#include "drmP.h"
-+#include "drm.h"
-+#include "drm_crtc.h"
-+#include "drm_edid.h"
-+#include "intel_drv.h"
-+#include "psb_drv.h"
-+#include "i915_reg.h"
-+#include "intel_crt.c"
-+
-+/* Fixed name */
-+#define ACPI_EDID_LCD "\\_SB_.PCI0.GFX0.DD04._DDC"
-+#define ACPI_DOD "\\_SB_.PCI0.GFX0._DOD"
-+
-+#include "intel_lvds.c"
-+#include "intel_sdvo.c"
-+#include "intel_display.c"
-+#include "intel_modes.c"
-Index: linux-2.6.27/drivers/gpu/drm/psb/psb_sgx.c
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.27/drivers/gpu/drm/psb/psb_sgx.c 2009-02-05 13:29:33.000000000 +0000
-@@ -0,0 +1,1422 @@
-+/**************************************************************************
-+ * Copyright (c) 2007, Intel Corporation.
-+ * All Rights Reserved.
-+ *
-+ * This program is free software; you can redistribute it and/or modify it
-+ * under the terms and conditions of the GNU General Public License,
-+ * version 2, as published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope it will be useful, but WITHOUT
-+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-+ * more details.
-+ *
-+ * You should have received a copy of the GNU General Public License along with
-+ * this program; if not, write to the Free Software Foundation, Inc.,
-+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-+ *
-+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
-+ * develop this driver.
-+ *
-+ **************************************************************************/
-+/*
-+ */
-+
-+#include "drmP.h"
-+#include "psb_drv.h"
-+#include "psb_drm.h"
-+#include "psb_reg.h"
-+#include "psb_scene.h"
-+
-+#include "psb_msvdx.h"
-+
-+int psb_submit_video_cmdbuf(struct drm_device *dev,
-+ struct drm_buffer_object *cmd_buffer,
-+ unsigned long cmd_offset, unsigned long cmd_size,
-+ struct drm_fence_object *fence);
-+
-+struct psb_dstbuf_cache {
-+ unsigned int dst;
-+ uint32_t *use_page;
-+ unsigned int use_index;
-+ uint32_t use_background;
-+ struct drm_buffer_object *dst_buf;
-+ unsigned long dst_offset;
-+ uint32_t *dst_page;
-+ unsigned int dst_page_offset;
-+ struct drm_bo_kmap_obj dst_kmap;
-+ int dst_is_iomem;
-+};
-+
-+struct psb_buflist_item {
-+ struct drm_buffer_object *bo;
-+ void __user *data;
-+ int ret;
-+ int presumed_offset_correct;
-+};
-+
-+
-+#define PSB_REG_GRAN_SHIFT 2
-+#define PSB_REG_GRANULARITY (1 << PSB_REG_GRAN_SHIFT)
-+#define PSB_MAX_REG 0x1000
-+
-+static const uint32_t disallowed_ranges[][2] = {
-+ {0x0000, 0x0200},
-+ {0x0208, 0x0214},
-+ {0x021C, 0x0224},
-+ {0x0230, 0x0234},
-+ {0x0248, 0x024C},
-+ {0x0254, 0x0358},
-+ {0x0428, 0x0428},
-+ {0x0430, 0x043C},
-+ {0x0498, 0x04B4},
-+ {0x04CC, 0x04D8},
-+ {0x04E0, 0x07FC},
-+ {0x0804, 0x0A58},
-+ {0x0A68, 0x0A80},
-+ {0x0AA0, 0x0B1C},
-+ {0x0B2C, 0x0CAC},
-+ {0x0CB4, PSB_MAX_REG - PSB_REG_GRANULARITY}
-+};
-+
-+static uint32_t psb_disallowed_regs[PSB_MAX_REG /
-+ (PSB_REG_GRANULARITY *
-+ (sizeof(uint32_t) << 3))];
-+
-+static inline int psb_disallowed(uint32_t reg)
-+{
-+ reg >>= PSB_REG_GRAN_SHIFT;
-+ return ((psb_disallowed_regs[reg >> 5] & (1 << (reg & 31))) != 0);
-+}
-+
-+void psb_init_disallowed(void)
-+{
-+ int i;
-+ uint32_t reg, tmp;
-+ static int initialized = 0;
-+
-+ if (initialized)
-+ return;
-+
-+ initialized = 1;
-+ memset(psb_disallowed_regs, 0, sizeof(psb_disallowed_regs));
-+
-+ for (i = 0; i < (sizeof(disallowed_ranges) / (2 * sizeof(uint32_t)));
-+ ++i) {
-+ for (reg = disallowed_ranges[i][0];
-+ reg <= disallowed_ranges[i][1]; reg += 4) {
-+ tmp = reg >> 2;
-+ psb_disallowed_regs[tmp >> 5] |= (1 << (tmp & 31));
-+ }
-+ }
-+}
-+
-+static int psb_memcpy_check(uint32_t * dst, const uint32_t * src, uint32_t size)
-+{
-+ size >>= 3;
-+ while (size--) {
-+ if (unlikely((*src >= 0x1000) || psb_disallowed(*src))) {
-+ DRM_ERROR("Forbidden SGX register access: "
-+ "0x%04x.\n", *src);
-+ return -EPERM;
-+ }
-+ *dst++ = *src++;
-+ *dst++ = *src++;
-+ }
-+ return 0;
-+}
-+
-+static int psb_2d_wait_available(struct drm_psb_private *dev_priv,
-+ unsigned size)
-+{
-+ uint32_t avail = PSB_RSGX32(PSB_CR_2D_SOCIF);
-+ int ret = 0;
-+
-+ retry:
-+ if (avail < size) {
-+#if 0
-+ /* We'd ideally
-+ * like to have an IRQ-driven event here.
-+ */
-+
-+ psb_2D_irq_on(dev_priv);
-+ DRM_WAIT_ON(ret, dev_priv->event_2d_queue, DRM_HZ,
-+ ((avail = PSB_RSGX32(PSB_CR_2D_SOCIF)) >= size));
-+ psb_2D_irq_off(dev_priv);
-+ if (ret == 0)
-+ return 0;
-+ if (ret == -EINTR) {
-+ ret = 0;
-+ goto retry;
-+ }
-+#else
-+ avail = PSB_RSGX32(PSB_CR_2D_SOCIF);
-+ goto retry;
-+#endif
-+ }
-+ return ret;
-+}
-+
-+int psb_2d_submit(struct drm_psb_private *dev_priv, uint32_t * cmdbuf,
-+ unsigned size)
-+{
-+ int ret = 0;
-+ int i;
-+ unsigned submit_size;
-+
-+ while (size > 0) {
-+ submit_size = (size < 0x60) ? size : 0x60;
-+ size -= submit_size;
-+ ret = psb_2d_wait_available(dev_priv, submit_size);
-+ if (ret)
-+ return ret;
-+
-+ submit_size <<= 2;
-+
-+ for (i = 0; i < submit_size; i += 4) {
-+ PSB_WSGX32(*cmdbuf++, PSB_SGX_2D_SLAVE_PORT + i);
-+ }
-+ (void)PSB_RSGX32(PSB_SGX_2D_SLAVE_PORT + i - 4);
-+ }
-+ return 0;
-+}
-+
-+int psb_blit_sequence(struct drm_psb_private *dev_priv, uint32_t sequence)
-+{
-+ uint32_t buffer[8];
-+ uint32_t *bufp = buffer;
-+ int ret;
-+
-+ *bufp++ = PSB_2D_FENCE_BH;
-+
-+ *bufp++ = PSB_2D_DST_SURF_BH |
-+ PSB_2D_DST_8888ARGB | (4 << PSB_2D_DST_STRIDE_SHIFT);
-+ *bufp++ = dev_priv->comm_mmu_offset - dev_priv->mmu_2d_offset;
-+
-+ *bufp++ = PSB_2D_BLIT_BH |
-+ PSB_2D_ROT_NONE |
-+ PSB_2D_COPYORDER_TL2BR |
-+ PSB_2D_DSTCK_DISABLE |
-+ PSB_2D_SRCCK_DISABLE | PSB_2D_USE_FILL | PSB_2D_ROP3_PATCOPY;
-+
-+ *bufp++ = sequence << PSB_2D_FILLCOLOUR_SHIFT;
-+ *bufp++ = (0 << PSB_2D_DST_XSTART_SHIFT) |
-+ (0 << PSB_2D_DST_YSTART_SHIFT);
-+ *bufp++ = (1 << PSB_2D_DST_XSIZE_SHIFT) | (1 << PSB_2D_DST_YSIZE_SHIFT);
-+
-+ *bufp++ = PSB_2D_FLUSH_BH;
-+
-+ psb_2d_lock(dev_priv);
-+ ret = psb_2d_submit(dev_priv, buffer, bufp - buffer);
-+ psb_2d_unlock(dev_priv);
-+
-+ if (!ret)
-+ psb_schedule_watchdog(dev_priv);
-+ return ret;
-+}
-+
-+int psb_emit_2d_copy_blit(struct drm_device *dev,
-+ uint32_t src_offset,
-+ uint32_t dst_offset, uint32_t pages, int direction)
-+{
-+ uint32_t cur_pages;
-+ struct drm_psb_private *dev_priv = dev->dev_private;
-+ uint32_t buf[10];
-+ uint32_t *bufp;
-+ uint32_t xstart;
-+ uint32_t ystart;
-+ uint32_t blit_cmd;
-+ uint32_t pg_add;
-+ int ret = 0;
-+
-+ if (!dev_priv)
-+ return 0;
-+
-+ if (direction) {
-+ pg_add = (pages - 1) << PAGE_SHIFT;
-+ src_offset += pg_add;
-+ dst_offset += pg_add;
-+ }
-+
-+ blit_cmd = PSB_2D_BLIT_BH |
-+ PSB_2D_ROT_NONE |
-+ PSB_2D_DSTCK_DISABLE |
-+ PSB_2D_SRCCK_DISABLE |
-+ PSB_2D_USE_PAT |
-+ PSB_2D_ROP3_SRCCOPY |
-+ (direction ? PSB_2D_COPYORDER_BR2TL : PSB_2D_COPYORDER_TL2BR);
-+ xstart = (direction) ? ((PAGE_SIZE - 1) >> 2) : 0;
-+
-+ psb_2d_lock(dev_priv);
-+ while (pages > 0) {
-+ cur_pages = pages;
-+ if (cur_pages > 2048)
-+ cur_pages = 2048;
-+ pages -= cur_pages;
-+ ystart = (direction) ? cur_pages - 1 : 0;
-+
-+ bufp = buf;
-+ *bufp++ = PSB_2D_FENCE_BH;
-+
-+ *bufp++ = PSB_2D_DST_SURF_BH | PSB_2D_DST_8888ARGB |
-+ (PAGE_SIZE << PSB_2D_DST_STRIDE_SHIFT);
-+ *bufp++ = dst_offset;
-+ *bufp++ = PSB_2D_SRC_SURF_BH | PSB_2D_SRC_8888ARGB |
-+ (PAGE_SIZE << PSB_2D_SRC_STRIDE_SHIFT);
-+ *bufp++ = src_offset;
-+ *bufp++ =
-+ PSB_2D_SRC_OFF_BH | (xstart << PSB_2D_SRCOFF_XSTART_SHIFT) |
-+ (ystart << PSB_2D_SRCOFF_YSTART_SHIFT);
-+ *bufp++ = blit_cmd;
-+ *bufp++ = (xstart << PSB_2D_DST_XSTART_SHIFT) |
-+ (ystart << PSB_2D_DST_YSTART_SHIFT);
-+ *bufp++ = ((PAGE_SIZE >> 2) << PSB_2D_DST_XSIZE_SHIFT) |
-+ (cur_pages << PSB_2D_DST_YSIZE_SHIFT);
-+
-+ ret = psb_2d_submit(dev_priv, buf, bufp - buf);
-+ if (ret)
-+ goto out;
-+ pg_add = (cur_pages << PAGE_SHIFT) * ((direction) ? -1 : 1);
-+ src_offset += pg_add;
-+ dst_offset += pg_add;
-+ }
-+ out:
-+ psb_2d_unlock(dev_priv);
-+ return ret;
-+}
-+
-+void psb_init_2d(struct drm_psb_private *dev_priv)
-+{
-+ dev_priv->sequence_lock = SPIN_LOCK_UNLOCKED;
-+ psb_reset(dev_priv, 1);
-+ dev_priv->mmu_2d_offset = dev_priv->pg->gatt_start;
-+ PSB_WSGX32(dev_priv->mmu_2d_offset, PSB_CR_BIF_TWOD_REQ_BASE);
-+ (void)PSB_RSGX32(PSB_CR_BIF_TWOD_REQ_BASE);
-+}
-+
-+int psb_idle_2d(struct drm_device *dev)
-+{
-+ struct drm_psb_private *dev_priv = dev->dev_private;
-+ unsigned long _end = jiffies + DRM_HZ;
-+ int busy = 0;
-+
-+ /*
-+ * First idle the 2D engine.
-+ */
-+
-+ if (dev_priv->engine_lockup_2d)
-+ return -EBUSY;
-+
-+ if ((PSB_RSGX32(PSB_CR_2D_SOCIF) == _PSB_C2_SOCIF_EMPTY) &&
-+ ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) & _PSB_C2B_STATUS_BUSY) == 0))
-+ goto out;
-+
-+ do {
-+ busy = (PSB_RSGX32(PSB_CR_2D_SOCIF) != _PSB_C2_SOCIF_EMPTY);
-+ } while (busy && !time_after_eq(jiffies, _end));
-+
-+ if (busy)
-+ busy = (PSB_RSGX32(PSB_CR_2D_SOCIF) != _PSB_C2_SOCIF_EMPTY);
-+ if (busy)
-+ goto out;
-+
-+ do {
-+ busy =
-+ ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) & _PSB_C2B_STATUS_BUSY)
-+ != 0);
-+ } while (busy && !time_after_eq(jiffies, _end));
-+ if (busy)
-+ busy =
-+ ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) & _PSB_C2B_STATUS_BUSY)
-+ != 0);
-+
-+ out:
-+ if (busy)
-+ dev_priv->engine_lockup_2d = 1;
-+
-+ return (busy) ? -EBUSY : 0;
-+}
-+
-+int psb_idle_3d(struct drm_device *dev)
-+{
-+ struct drm_psb_private *dev_priv = dev->dev_private;
-+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
-+ int ret;
-+
-+ ret = wait_event_timeout(scheduler->idle_queue,
-+ psb_scheduler_finished(dev_priv), DRM_HZ * 10);
-+
-+ return (ret < 1) ? -EBUSY : 0;
-+}
-+
-+static void psb_dereference_buffers_locked(struct psb_buflist_item *buffers,
-+ unsigned num_buffers)
-+{
-+ while (num_buffers--)
-+ drm_bo_usage_deref_locked(&((buffers++)->bo));
-+
-+}
-+
-+static int psb_check_presumed(struct drm_bo_op_arg *arg,
-+ struct drm_buffer_object *bo,
-+ uint32_t __user * data, int *presumed_ok)
-+{
-+ struct drm_bo_op_req *req = &arg->d.req;
-+ uint32_t hint_offset;
-+ uint32_t hint = req->bo_req.hint;
-+
-+ *presumed_ok = 0;
-+
-+ if (!(hint & DRM_BO_HINT_PRESUMED_OFFSET))
-+ return 0;
-+ if (bo->mem.mem_type == DRM_BO_MEM_LOCAL) {
-+ *presumed_ok = 1;
-+ return 0;
-+ }
-+ if (bo->offset == req->bo_req.presumed_offset) {
-+ *presumed_ok = 1;
-+ return 0;
-+ }
-+
-+ /*
-+ * We need to turn off the HINT_PRESUMED_OFFSET for this buffer in
-+ * the user-space IOCTL argument list, since the buffer has moved,
-+ * we're about to apply relocations and we might subsequently
-+ * hit an -EAGAIN. In that case the argument list will be reused by
-+ * user-space, but the presumed offset is no longer valid.
-+ *
-+ * Needless to say, this is a bit ugly.
-+ */
-+
-+ hint_offset = (uint32_t *) & req->bo_req.hint - (uint32_t *) arg;
-+ hint &= ~DRM_BO_HINT_PRESUMED_OFFSET;
-+ return __put_user(hint, data + hint_offset);
-+}
-+
-+static int psb_validate_buffer_list(struct drm_file *file_priv,
-+ unsigned fence_class,
-+ unsigned long data,
-+ struct psb_buflist_item *buffers,
-+ unsigned *num_buffers)
-+{
-+ struct drm_bo_op_arg arg;
-+ struct drm_bo_op_req *req = &arg.d.req;
-+ int ret = 0;
-+ unsigned buf_count = 0;
-+ struct psb_buflist_item *item = buffers;
-+
-+ do {
-+ if (buf_count >= *num_buffers) {
-+ DRM_ERROR("Buffer count exceeded %d\n.", *num_buffers);
-+ ret = -EINVAL;
-+ goto out_err;
-+ }
-+ item = buffers + buf_count;
-+ item->bo = NULL;
-+
-+ if (copy_from_user(&arg, (void __user *)data, sizeof(arg))) {
-+ ret = -EFAULT;
-+ DRM_ERROR("Error copying validate list.\n"
-+ "\tbuffer %d, user addr 0x%08lx %d\n",
-+ buf_count, (unsigned long)data, sizeof(arg));
-+ goto out_err;
-+ }
-+
-+ ret = 0;
-+ if (req->op != drm_bo_validate) {
-+ DRM_ERROR
-+ ("Buffer object operation wasn't \"validate\".\n");
-+ ret = -EINVAL;
-+ goto out_err;
-+ }
-+
-+ item->ret = 0;
-+ item->data = (void *)__user data;
-+ ret = drm_bo_handle_validate(file_priv,
-+ req->bo_req.handle,
-+ fence_class,
-+ req->bo_req.flags,
-+ req->bo_req.mask,
-+ req->bo_req.hint,
-+ 0, NULL, &item->bo);
-+ if (ret)
-+ goto out_err;
-+
-+ PSB_DEBUG_GENERAL("Validated buffer at 0x%08lx\n",
-+ buffers[buf_count].bo->offset);
-+
-+ buf_count++;
-+
-+
-+ ret = psb_check_presumed(&arg, item->bo,
-+ (uint32_t __user *)
-+ (unsigned long) data,
-+ &item->presumed_offset_correct);
-+
-+ if (ret)
-+ goto out_err;
-+
-+ data = arg.next;
-+ } while (data);
-+
-+ *num_buffers = buf_count;
-+
-+ return 0;
-+ out_err:
-+
-+ *num_buffers = buf_count;
-+ item->ret = (ret != -EAGAIN) ? ret : 0;
-+ return ret;
-+}
-+
-+int
-+psb_reg_submit(struct drm_psb_private *dev_priv, uint32_t * regs,
-+ unsigned int cmds)
-+{
-+ int i;
-+
-+ /*
-+ * cmds is 32-bit words.
-+ */
-+
-+ cmds >>= 1;
-+ for (i = 0; i < cmds; ++i) {
-+ PSB_WSGX32(regs[1], regs[0]);
-+ regs += 2;
-+ }
-+ wmb();
-+ return 0;
-+}
-+
-+/*
-+ * Security: Block user-space writing to MMU mapping registers.
-+ * This is important for security and brings Poulsbo DRM
-+ * up to par with the other DRM drivers. Using this,
-+ * user-space should not be able to map arbitrary memory
-+ * pages to graphics memory, but all user-space processes
-+ * basically have access to all buffer objects mapped to
-+ * graphics memory.
-+ */
-+
-+int
-+psb_submit_copy_cmdbuf(struct drm_device *dev,
-+ struct drm_buffer_object *cmd_buffer,
-+ unsigned long cmd_offset,
-+ unsigned long cmd_size,
-+ int engine, uint32_t * copy_buffer)
-+{
-+ unsigned long cmd_end = cmd_offset + (cmd_size << 2);
-+ struct drm_psb_private *dev_priv = dev->dev_private;
-+ unsigned long cmd_page_offset = cmd_offset - (cmd_offset & PAGE_MASK);
-+ unsigned long cmd_next;
-+ struct drm_bo_kmap_obj cmd_kmap;
-+ uint32_t *cmd_page;
-+ unsigned cmds;
-+ int is_iomem;
-+ int ret = 0;
-+
-+ if (cmd_size == 0)
-+ return 0;
-+
-+ if (engine == PSB_ENGINE_2D)
-+ psb_2d_lock(dev_priv);
-+
-+ do {
-+ cmd_next = drm_bo_offset_end(cmd_offset, cmd_end);
-+ ret = drm_bo_kmap(cmd_buffer, cmd_offset >> PAGE_SHIFT,
-+ 1, &cmd_kmap);
-+
-+ if (ret)
-+ return ret;
-+ cmd_page = drm_bmo_virtual(&cmd_kmap, &is_iomem);
-+ cmd_page_offset = (cmd_offset & ~PAGE_MASK) >> 2;
-+ cmds = (cmd_next - cmd_offset) >> 2;
-+
-+ switch (engine) {
-+ case PSB_ENGINE_2D:
-+ ret =
-+ psb_2d_submit(dev_priv, cmd_page + cmd_page_offset,
-+ cmds);
-+ break;
-+ case PSB_ENGINE_RASTERIZER:
-+ case PSB_ENGINE_TA:
-+ case PSB_ENGINE_HPRAST:
-+ PSB_DEBUG_GENERAL("Reg copy.\n");
-+ ret = psb_memcpy_check(copy_buffer,
-+ cmd_page + cmd_page_offset,
-+ cmds * sizeof(uint32_t));
-+ copy_buffer += cmds;
-+ break;
-+ default:
-+ ret = -EINVAL;
-+ }
-+ drm_bo_kunmap(&cmd_kmap);
-+ if (ret)
-+ break;
-+ } while (cmd_offset = cmd_next, cmd_offset != cmd_end);
-+
-+ if (engine == PSB_ENGINE_2D)
-+ psb_2d_unlock(dev_priv);
-+
-+ return ret;
-+}
-+
-+static void psb_clear_dstbuf_cache(struct psb_dstbuf_cache *dst_cache)
-+{
-+ if (dst_cache->dst_page) {
-+ drm_bo_kunmap(&dst_cache->dst_kmap);
-+ dst_cache->dst_page = NULL;
-+ }
-+ dst_cache->dst_buf = NULL;
-+ dst_cache->dst = ~0;
-+ dst_cache->use_page = NULL;
-+}
-+
-+static int psb_update_dstbuf_cache(struct psb_dstbuf_cache *dst_cache,
-+ struct psb_buflist_item *buffers,
-+ unsigned int dst, unsigned long dst_offset)
-+{
-+ int ret;
-+
-+ PSB_DEBUG_RELOC("Destination buffer is %d.\n", dst);
-+
-+ if (unlikely(dst != dst_cache->dst || NULL == dst_cache->dst_buf)) {
-+ psb_clear_dstbuf_cache(dst_cache);
-+ dst_cache->dst = dst;
-+ dst_cache->dst_buf = buffers[dst].bo;
-+ }
-+
-+ if (unlikely(dst_offset > dst_cache->dst_buf->num_pages * PAGE_SIZE)) {
-+ DRM_ERROR("Relocation destination out of bounds.\n");
-+ return -EINVAL;
-+ }
-+
-+ if (!drm_bo_same_page(dst_cache->dst_offset, dst_offset) ||
-+ NULL == dst_cache->dst_page) {
-+ if (NULL != dst_cache->dst_page) {
-+ drm_bo_kunmap(&dst_cache->dst_kmap);
-+ dst_cache->dst_page = NULL;
-+ }
-+
-+ ret = drm_bo_kmap(dst_cache->dst_buf, dst_offset >> PAGE_SHIFT,
-+ 1, &dst_cache->dst_kmap);
-+ if (ret) {
-+ DRM_ERROR("Could not map destination buffer for "
-+ "relocation.\n");
-+ return ret;
-+ }
-+
-+ dst_cache->dst_page = drm_bmo_virtual(&dst_cache->dst_kmap,
-+ &dst_cache->dst_is_iomem);
-+ dst_cache->dst_offset = dst_offset & PAGE_MASK;
-+ dst_cache->dst_page_offset = dst_cache->dst_offset >> 2;
-+ }
-+ return 0;
-+}
-+
-+static int psb_apply_reloc(struct drm_psb_private *dev_priv,
-+ uint32_t fence_class,
-+ const struct drm_psb_reloc *reloc,
-+ struct psb_buflist_item *buffers,
-+ int num_buffers,
-+ struct psb_dstbuf_cache *dst_cache,
-+ int no_wait, int interruptible)
-+{
-+ int reg;
-+ uint32_t val;
-+ uint32_t background;
-+ unsigned int index;
-+ int ret;
-+ unsigned int shift;
-+ unsigned int align_shift;
-+ uint32_t fence_type;
-+ struct drm_buffer_object *reloc_bo;
-+
-+ PSB_DEBUG_RELOC("Reloc type %d\n"
-+ "\t where 0x%04x\n"
-+ "\t buffer 0x%04x\n"
-+ "\t mask 0x%08x\n"
-+ "\t shift 0x%08x\n"
-+ "\t pre_add 0x%08x\n"
-+ "\t background 0x%08x\n"
-+ "\t dst_buffer 0x%08x\n"
-+ "\t arg0 0x%08x\n"
-+ "\t arg1 0x%08x\n",
-+ reloc->reloc_op,
-+ reloc->where,
-+ reloc->buffer,
-+ reloc->mask,
-+ reloc->shift,
-+ reloc->pre_add,
-+ reloc->background,
-+ reloc->dst_buffer, reloc->arg0, reloc->arg1);
-+
-+ if (unlikely(reloc->buffer >= num_buffers)) {
-+ DRM_ERROR("Illegal relocation buffer %d.\n", reloc->buffer);
-+ return -EINVAL;
-+ }
-+
-+ if (buffers[reloc->buffer].presumed_offset_correct)
-+ return 0;
-+
-+ if (unlikely(reloc->dst_buffer >= num_buffers)) {
-+ DRM_ERROR("Illegal destination buffer for relocation %d.\n",
-+ reloc->dst_buffer);
-+ return -EINVAL;
-+ }
-+
-+ ret = psb_update_dstbuf_cache(dst_cache, buffers, reloc->dst_buffer,
-+ reloc->where << 2);
-+ if (ret)
-+ return ret;
-+
-+ reloc_bo = buffers[reloc->buffer].bo;
-+
-+ if (unlikely(reloc->pre_add > (reloc_bo->num_pages << PAGE_SHIFT))) {
-+ DRM_ERROR("Illegal relocation offset add.\n");
-+ return -EINVAL;
-+ }
-+
-+ switch (reloc->reloc_op) {
-+ case PSB_RELOC_OP_OFFSET:
-+ val = reloc_bo->offset + reloc->pre_add;
-+ break;
-+ case PSB_RELOC_OP_2D_OFFSET:
-+ val = reloc_bo->offset + reloc->pre_add -
-+ dev_priv->mmu_2d_offset;
-+ if (unlikely(val >= PSB_2D_SIZE)) {
-+ DRM_ERROR("2D relocation out of bounds\n");
-+ return -EINVAL;
-+ }
-+ break;
-+ case PSB_RELOC_OP_PDS_OFFSET:
-+ val = reloc_bo->offset + reloc->pre_add - PSB_MEM_PDS_START;
-+ if (unlikely(val >= (PSB_MEM_MMU_START - PSB_MEM_PDS_START))) {
-+ DRM_ERROR("PDS relocation out of bounds\n");
-+ return -EINVAL;
-+ }
-+ break;
-+ case PSB_RELOC_OP_USE_OFFSET:
-+ case PSB_RELOC_OP_USE_REG:
-+
-+ /*
-+ * Security:
-+ * Only allow VERTEX or PIXEL data masters, as
-+ * shaders run under other data masters may in theory
-+ * alter MMU mappings.
-+ */
-+
-+ if (unlikely(reloc->arg1 != _PSB_CUC_DM_PIXEL &&
-+ reloc->arg1 != _PSB_CUC_DM_VERTEX)) {
-+ DRM_ERROR("Invalid data master in relocation. %d\n",
-+ reloc->arg1);
-+ return -EPERM;
-+ }
-+
-+ fence_type = reloc_bo->fence_type;
-+ ret = psb_grab_use_base(dev_priv,
-+ reloc_bo->offset +
-+ reloc->pre_add, reloc->arg0,
-+ reloc->arg1, fence_class,
-+ fence_type, no_wait,
-+ interruptible, &reg, &val);
-+ if (ret)
-+ return ret;
-+
-+ val = (reloc->reloc_op == PSB_RELOC_OP_USE_REG) ? reg : val;
-+ break;
-+ default:
-+ DRM_ERROR("Unimplemented relocation.\n");
-+ return -EINVAL;
-+ }
-+
-+ shift = (reloc->shift & PSB_RELOC_SHIFT_MASK) >> PSB_RELOC_SHIFT_SHIFT;
-+ align_shift = (reloc->shift & PSB_RELOC_ALSHIFT_MASK) >>
-+ PSB_RELOC_ALSHIFT_SHIFT;
-+
-+ val = ((val >> align_shift) << shift);
-+ index = reloc->where - dst_cache->dst_page_offset;
-+
-+ background = reloc->background;
-+
-+ if (reloc->reloc_op == PSB_RELOC_OP_USE_OFFSET) {
-+ if (dst_cache->use_page == dst_cache->dst_page &&
-+ dst_cache->use_index == index)
-+ background = dst_cache->use_background;
-+ else
-+ background = dst_cache->dst_page[index];
-+ }
-+#if 0
-+ if (dst_cache->dst_page[index] != PSB_RELOC_MAGIC &&
-+ reloc->reloc_op != PSB_RELOC_OP_USE_OFFSET)
-+ DRM_ERROR("Inconsistent relocation 0x%08lx.\n",
-+ (unsigned long)dst_cache->dst_page[index]);
-+#endif
-+
-+ val = (background & ~reloc->mask) | (val & reloc->mask);
-+ dst_cache->dst_page[index] = val;
-+
-+ if (reloc->reloc_op == PSB_RELOC_OP_USE_OFFSET ||
-+ reloc->reloc_op == PSB_RELOC_OP_USE_REG) {
-+ dst_cache->use_page = dst_cache->dst_page;
-+ dst_cache->use_index = index;
-+ dst_cache->use_background = val;
-+ }
-+
-+ PSB_DEBUG_RELOC("Reloc buffer %d index 0x%08x, value 0x%08x\n",
-+ reloc->dst_buffer, index, dst_cache->dst_page[index]);
-+
-+ return 0;
-+}
-+
-+static int psb_ok_to_map_reloc(struct drm_psb_private *dev_priv,
-+ unsigned int num_pages)
-+{
-+ int ret = 0;
-+
-+ spin_lock(&dev_priv->reloc_lock);
-+ if (dev_priv->rel_mapped_pages + num_pages <= PSB_MAX_RELOC_PAGES) {
-+ dev_priv->rel_mapped_pages += num_pages;
-+ ret = 1;
-+ }
-+ spin_unlock(&dev_priv->reloc_lock);
-+ return ret;
-+}
-+
-+static int psb_fixup_relocs(struct drm_file *file_priv,
-+ uint32_t fence_class,
-+ unsigned int num_relocs,
-+ unsigned int reloc_offset,
-+ uint32_t reloc_handle,
-+ struct psb_buflist_item *buffers,
-+ unsigned int num_buffers,
-+ int no_wait, int interruptible)
-+{
-+ struct drm_device *dev = file_priv->minor->dev;
-+ struct drm_psb_private *dev_priv =
-+ (struct drm_psb_private *)dev->dev_private;
-+ struct drm_buffer_object *reloc_buffer = NULL;
-+ unsigned int reloc_num_pages;
-+ unsigned int reloc_first_page;
-+ unsigned int reloc_last_page;
-+ struct psb_dstbuf_cache dst_cache;
-+ struct drm_psb_reloc *reloc;
-+ struct drm_bo_kmap_obj reloc_kmap;
-+ int reloc_is_iomem;
-+ int count;
-+ int ret = 0;
-+ int registered = 0;
-+ int short_circuit = 1;
-+ int i;
-+
-+ if (num_relocs == 0)
-+ return 0;
-+
-+ for (i=0; i<num_buffers; ++i) {
-+ if (!buffers[i].presumed_offset_correct) {
-+ short_circuit = 0;
-+ break;
-+ }
-+ }
-+
-+ if (short_circuit)
-+ return 0;
-+
-+ memset(&dst_cache, 0, sizeof(dst_cache));
-+ memset(&reloc_kmap, 0, sizeof(reloc_kmap));
-+
-+ mutex_lock(&dev->struct_mutex);
-+ reloc_buffer = drm_lookup_buffer_object(file_priv, reloc_handle, 1);
-+ mutex_unlock(&dev->struct_mutex);
-+ if (!reloc_buffer)
-+ goto out;
-+
-+ reloc_first_page = reloc_offset >> PAGE_SHIFT;
-+ reloc_last_page =
-+ (reloc_offset +
-+ num_relocs * sizeof(struct drm_psb_reloc)) >> PAGE_SHIFT;
-+ reloc_num_pages = reloc_last_page - reloc_first_page + 1;
-+ reloc_offset &= ~PAGE_MASK;
-+
-+ if (reloc_num_pages > PSB_MAX_RELOC_PAGES) {
-+ DRM_ERROR("Relocation buffer is too large\n");
-+ ret = -EINVAL;
-+ goto out;
-+ }
-+
-+ DRM_WAIT_ON(ret, dev_priv->rel_mapped_queue, 3 * DRM_HZ,
-+ (registered =
-+ psb_ok_to_map_reloc(dev_priv, reloc_num_pages)));
-+
-+ if (ret == -EINTR) {
-+ ret = -EAGAIN;
-+ goto out;
-+ }
-+ if (ret) {
-+ DRM_ERROR("Error waiting for space to map "
-+ "relocation buffer.\n");
-+ goto out;
-+ }
-+
-+ ret = drm_bo_kmap(reloc_buffer, reloc_first_page,
-+ reloc_num_pages, &reloc_kmap);
-+
-+ if (ret) {
-+ DRM_ERROR("Could not map relocation buffer.\n"
-+ "\tReloc buffer id 0x%08x.\n"
-+ "\tReloc first page %d.\n"
-+ "\tReloc num pages %d.\n",
-+ reloc_handle, reloc_first_page, reloc_num_pages);
-+ goto out;
-+ }
-+
-+ reloc = (struct drm_psb_reloc *)
-+ ((unsigned long)drm_bmo_virtual(&reloc_kmap, &reloc_is_iomem) +
-+ reloc_offset);
-+
-+ for (count = 0; count < num_relocs; ++count) {
-+ ret = psb_apply_reloc(dev_priv, fence_class,
-+ reloc, buffers,
-+ num_buffers, &dst_cache,
-+ no_wait, interruptible);
-+ if (ret)
-+ goto out1;
-+ reloc++;
-+ }
-+
-+ out1:
-+ drm_bo_kunmap(&reloc_kmap);
-+ out:
-+ if (registered) {
-+ spin_lock(&dev_priv->reloc_lock);
-+ dev_priv->rel_mapped_pages -= reloc_num_pages;
-+ spin_unlock(&dev_priv->reloc_lock);
-+ DRM_WAKEUP(&dev_priv->rel_mapped_queue);
-+ }
-+
-+ psb_clear_dstbuf_cache(&dst_cache);
-+ if (reloc_buffer)
-+ drm_bo_usage_deref_unlocked(&reloc_buffer);
-+ return ret;
-+}
-+
-+static int psb_cmdbuf_2d(struct drm_file *priv,
-+ struct drm_psb_cmdbuf_arg *arg,
-+ struct drm_buffer_object *cmd_buffer,
-+ struct drm_fence_arg *fence_arg)
-+{
-+ struct drm_device *dev = priv->minor->dev;
-+ struct drm_psb_private *dev_priv =
-+ (struct drm_psb_private *)dev->dev_private;
-+ int ret;
-+
-+ ret = mutex_lock_interruptible(&dev_priv->reset_mutex);
-+ if (ret)
-+ return -EAGAIN;
-+
-+ ret = psb_submit_copy_cmdbuf(dev, cmd_buffer, arg->cmdbuf_offset,
-+ arg->cmdbuf_size, PSB_ENGINE_2D, NULL);
-+ if (ret)
-+ goto out_unlock;
-+
-+ psb_fence_or_sync(priv, PSB_ENGINE_2D, arg, fence_arg, NULL);
-+
-+ mutex_lock(&cmd_buffer->mutex);
-+ if (cmd_buffer->fence != NULL)
-+ drm_fence_usage_deref_unlocked(&cmd_buffer->fence);
-+ mutex_unlock(&cmd_buffer->mutex);
-+ out_unlock:
-+ mutex_unlock(&dev_priv->reset_mutex);
-+ return ret;
-+}
-+
-+#if 0
-+static int psb_dump_page(struct drm_buffer_object *bo,
-+ unsigned int page_offset, unsigned int num)
-+{
-+ struct drm_bo_kmap_obj kmobj;
-+ int is_iomem;
-+ uint32_t *p;
-+ int ret;
-+ unsigned int i;
-+
-+ ret = drm_bo_kmap(bo, page_offset, 1, &kmobj);
-+ if (ret)
-+ return ret;
-+
-+ p = drm_bmo_virtual(&kmobj, &is_iomem);
-+ for (i = 0; i < num; ++i)
-+ PSB_DEBUG_GENERAL("0x%04x: 0x%08x\n", i, *p++);
-+
-+ drm_bo_kunmap(&kmobj);
-+ return 0;
-+}
-+#endif
-+
-+static void psb_idle_engine(struct drm_device *dev, int engine)
-+{
-+ struct drm_psb_private *dev_priv =
-+ (struct drm_psb_private *)dev->dev_private;
-+ uint32_t dummy;
-+
-+ switch (engine) {
-+ case PSB_ENGINE_2D:
-+
-+ /*
-+ * Make sure we flush 2D properly using a dummy
-+ * fence sequence emit.
-+ */
-+
-+ (void)psb_fence_emit_sequence(dev, PSB_ENGINE_2D, 0,
-+ &dummy, &dummy);
-+ psb_2d_lock(dev_priv);
-+ (void)psb_idle_2d(dev);
-+ psb_2d_unlock(dev_priv);
-+ break;
-+ case PSB_ENGINE_TA:
-+ case PSB_ENGINE_RASTERIZER:
-+ case PSB_ENGINE_HPRAST:
-+ (void)psb_idle_3d(dev);
-+ break;
-+ default:
-+
-+ /*
-+ * FIXME: Insert video engine idle command here.
-+ */
-+
-+ break;
-+ }
-+}
-+
-+void psb_fence_or_sync(struct drm_file *priv,
-+ int engine,
-+ struct drm_psb_cmdbuf_arg *arg,
-+ struct drm_fence_arg *fence_arg,
-+ struct drm_fence_object **fence_p)
-+{
-+ struct drm_device *dev = priv->minor->dev;
-+ int ret;
-+ struct drm_fence_object *fence;
-+
-+ ret = drm_fence_buffer_objects(dev, NULL, arg->fence_flags,
-+ NULL, &fence);
-+
-+ if (ret) {
-+
-+ /*
-+ * Fence creation failed.
-+ * Fall back to synchronous operation and idle the engine.
-+ */
-+
-+ psb_idle_engine(dev, engine);
-+ if (!(arg->fence_flags & DRM_FENCE_FLAG_NO_USER)) {
-+
-+ /*
-+ * Communicate to user-space that
-+ * fence creation has failed and that
-+ * the engine is idle.
-+ */
-+
-+ fence_arg->handle = ~0;
-+ fence_arg->error = ret;
-+ }
-+
-+ drm_putback_buffer_objects(dev);
-+ if (fence_p)
-+ *fence_p = NULL;
-+ return;
-+ }
-+
-+ if (!(arg->fence_flags & DRM_FENCE_FLAG_NO_USER)) {
-+
-+ ret = drm_fence_add_user_object(priv, fence,
-+ arg->fence_flags &
-+ DRM_FENCE_FLAG_SHAREABLE);
-+ if (!ret)
-+ drm_fence_fill_arg(fence, fence_arg);
-+ else {
-+ /*
-+ * Fence user object creation failed.
-+ * We must idle the engine here as well, as user-
-+ * space expects a fence object to wait on. Since we
-+ * have a fence object we wait for it to signal
-+ * to indicate engine "sufficiently" idle.
-+ */
-+
-+ (void)drm_fence_object_wait(fence, 0, 1, fence->type);
-+ drm_fence_usage_deref_unlocked(&fence);
-+ fence_arg->handle = ~0;
-+ fence_arg->error = ret;
-+ }
-+ }
-+
-+ if (fence_p)
-+ *fence_p = fence;
-+ else if (fence)
-+ drm_fence_usage_deref_unlocked(&fence);
-+}
-+
-+int psb_handle_copyback(struct drm_device *dev,
-+ struct psb_buflist_item *buffers,
-+ unsigned int num_buffers, int ret, void *data)
-+{
-+ struct drm_psb_private *dev_priv =
-+ (struct drm_psb_private *)dev->dev_private;
-+ struct drm_bo_op_arg arg;
-+ struct psb_buflist_item *item = buffers;
-+ struct drm_buffer_object *bo;
-+ int err = ret;
-+ int i;
-+
-+ /*
-+ * Clear the unfenced use base register lists and buffer lists.
-+ */
-+
-+ if (ret) {
-+ drm_regs_fence(&dev_priv->use_manager, NULL);
-+ drm_putback_buffer_objects(dev);
-+ }
-+
-+ if (ret != -EAGAIN) {
-+ for (i = 0; i < num_buffers; ++i) {
-+ arg.handled = 1;
-+ arg.d.rep.ret = item->ret;
-+ bo = item->bo;
-+ mutex_lock(&bo->mutex);
-+ drm_bo_fill_rep_arg(bo, &arg.d.rep.bo_info);
-+ mutex_unlock(&bo->mutex);
-+ if (copy_to_user(item->data, &arg, sizeof(arg)))
-+ err = -EFAULT;
-+ ++item;
-+ }
-+ }
-+
-+ return err;
-+}
-+
-+static int psb_cmdbuf_video(struct drm_file *priv,
-+ struct drm_psb_cmdbuf_arg *arg,
-+ unsigned int num_buffers,
-+ struct drm_buffer_object *cmd_buffer,
-+ struct drm_fence_arg *fence_arg)
-+{
-+ struct drm_device *dev = priv->minor->dev;
-+ struct drm_fence_object *fence;
-+ int ret;
-+
-+ /*
-+ * Check this. Doesn't seem right. Have fencing done AFTER command
-+ * submission and make sure drm_psb_idle idles the MSVDX completely.
-+ */
-+
-+ psb_fence_or_sync(priv, PSB_ENGINE_VIDEO, arg, fence_arg, &fence);
-+ ret = psb_submit_video_cmdbuf(dev, cmd_buffer, arg->cmdbuf_offset,
-+ arg->cmdbuf_size, fence);
-+
-+ if (ret)
-+ return ret;
-+
-+ drm_fence_usage_deref_unlocked(&fence);
-+ mutex_lock(&cmd_buffer->mutex);
-+ if (cmd_buffer->fence != NULL)
-+ drm_fence_usage_deref_unlocked(&cmd_buffer->fence);
-+ mutex_unlock(&cmd_buffer->mutex);
-+ return 0;
-+}
-+
-+int psb_feedback_buf(struct drm_file *file_priv,
-+ uint32_t feedback_ops,
-+ uint32_t handle,
-+ uint32_t offset,
-+ uint32_t feedback_breakpoints,
-+ uint32_t feedback_size, struct psb_feedback_info *feedback)
-+{
-+ struct drm_buffer_object *bo;
-+ struct page *page;
-+ uint32_t page_no;
-+ uint32_t page_offset;
-+ int ret;
-+
-+ if (feedback_ops & ~PSB_FEEDBACK_OP_VISTEST) {
-+ DRM_ERROR("Illegal feedback op.\n");
-+ return -EINVAL;
-+ }
-+
-+ if (feedback_breakpoints != 0) {
-+ DRM_ERROR("Feedback breakpoints not implemented yet.\n");
-+ return -EINVAL;
-+ }
-+
-+ if (feedback_size < PSB_HW_FEEDBACK_SIZE * sizeof(uint32_t)) {
-+ DRM_ERROR("Feedback buffer size too small.\n");
-+ return -EINVAL;
-+ }
-+
-+ page_offset = offset & ~PAGE_MASK;
-+ if ((PAGE_SIZE - PSB_HW_FEEDBACK_SIZE * sizeof(uint32_t))
-+ < page_offset) {
-+ DRM_ERROR("Illegal feedback buffer alignment.\n");
-+ return -EINVAL;
-+ }
-+
-+ ret = drm_bo_handle_validate(file_priv,
-+ handle,
-+ PSB_ENGINE_TA,
-+ DRM_BO_FLAG_MEM_LOCAL |
-+ DRM_BO_FLAG_CACHED |
-+ DRM_BO_FLAG_WRITE |
-+ PSB_BO_FLAG_FEEDBACK,
-+ DRM_BO_MASK_MEM |
-+ DRM_BO_FLAG_CACHED |
-+ DRM_BO_FLAG_WRITE |
-+ PSB_BO_FLAG_FEEDBACK, 0, 0, NULL, &bo);
-+ if (ret)
-+ return ret;
-+
-+ page_no = offset >> PAGE_SHIFT;
-+ if (page_no >= bo->num_pages) {
-+ ret = -EINVAL;
-+ DRM_ERROR("Illegal feedback buffer offset.\n");
-+ goto out_unref;
-+ }
-+
-+ if (bo->ttm == NULL) {
-+ ret = -EINVAL;
-+ DRM_ERROR("Vistest buffer without TTM.\n");
-+ goto out_unref;
-+ }
-+
-+ page = drm_ttm_get_page(bo->ttm, page_no);
-+ if (!page) {
-+ ret = -ENOMEM;
-+ goto out_unref;
-+ }
-+
-+ feedback->page = page;
-+ feedback->bo = bo;
-+ feedback->offset = page_offset;
-+ return 0;
-+
-+ out_unref:
-+ drm_bo_usage_deref_unlocked(&bo);
-+ return ret;
-+}
-+
-+int psb_cmdbuf_ioctl(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv)
-+{
-+ drm_psb_cmdbuf_arg_t *arg = data;
-+ int ret = 0;
-+ unsigned num_buffers;
-+ struct drm_buffer_object *cmd_buffer = NULL;
-+ struct drm_buffer_object *ta_buffer = NULL;
-+ struct drm_buffer_object *oom_buffer = NULL;
-+ struct drm_fence_arg fence_arg;
-+ struct drm_psb_scene user_scene;
-+ struct psb_scene_pool *pool = NULL;
-+ struct psb_scene *scene = NULL;
-+ struct drm_psb_private *dev_priv =
-+ (struct drm_psb_private *)file_priv->minor->dev->dev_private;
-+ int engine;
-+ struct psb_feedback_info feedback;
-+
-+ if (!dev_priv)
-+ return -EINVAL;
-+
-+ ret = drm_bo_read_lock(&dev->bm.bm_lock);
-+ if (ret)
-+ return ret;
-+
-+ num_buffers = PSB_NUM_VALIDATE_BUFFERS;
-+
-+ ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
-+ if (ret) {
-+ drm_bo_read_unlock(&dev->bm.bm_lock);
-+ return -EAGAIN;
-+ }
-+ if (unlikely(dev_priv->buffers == NULL)) {
-+ dev_priv->buffers = vmalloc(PSB_NUM_VALIDATE_BUFFERS *
-+ sizeof(*dev_priv->buffers));
-+ if (dev_priv->buffers == NULL) {
-+ drm_bo_read_unlock(&dev->bm.bm_lock);
-+ return -ENOMEM;
-+ }
-+ }
-+
-+
-+ engine = (arg->engine == PSB_ENGINE_RASTERIZER) ?
-+ PSB_ENGINE_TA : arg->engine;
-+
-+ ret =
-+ psb_validate_buffer_list(file_priv, engine,
-+ (unsigned long)arg->buffer_list,
-+ dev_priv->buffers, &num_buffers);
-+ if (ret)
-+ goto out_err0;
-+
-+ ret = psb_fixup_relocs(file_priv, engine, arg->num_relocs,
-+ arg->reloc_offset, arg->reloc_handle,
-+ dev_priv->buffers, num_buffers, 0, 1);
-+ if (ret)
-+ goto out_err0;
-+
-+ mutex_lock(&dev->struct_mutex);
-+ cmd_buffer = drm_lookup_buffer_object(file_priv, arg->cmdbuf_handle, 1);
-+ mutex_unlock(&dev->struct_mutex);
-+ if (!cmd_buffer) {
-+ ret = -EINVAL;
-+ goto out_err0;
-+ }
-+
-+ switch (arg->engine) {
-+ case PSB_ENGINE_2D:
-+ ret = psb_cmdbuf_2d(file_priv, arg, cmd_buffer, &fence_arg);
-+ if (ret)
-+ goto out_err0;
-+ break;
-+ case PSB_ENGINE_VIDEO:
-+ ret =
-+ psb_cmdbuf_video(file_priv, arg, num_buffers, cmd_buffer,
-+ &fence_arg);
-+ if (ret)
-+ goto out_err0;
-+ break;
-+ case PSB_ENGINE_RASTERIZER:
-+ ret = psb_cmdbuf_raster(file_priv, arg, cmd_buffer, &fence_arg);
-+ if (ret)
-+ goto out_err0;
-+ break;
-+ case PSB_ENGINE_TA:
-+ if (arg->ta_handle == arg->cmdbuf_handle) {
-+ mutex_lock(&dev->struct_mutex);
-+ atomic_inc(&cmd_buffer->usage);
-+ ta_buffer = cmd_buffer;
-+ mutex_unlock(&dev->struct_mutex);
-+ } else {
-+ mutex_lock(&dev->struct_mutex);
-+ ta_buffer =
-+ drm_lookup_buffer_object(file_priv,
-+ arg->ta_handle, 1);
-+ mutex_unlock(&dev->struct_mutex);
-+ if (!ta_buffer) {
-+ ret = -EINVAL;
-+ goto out_err0;
-+ }
-+ }
-+ if (arg->oom_size != 0) {
-+ if (arg->oom_handle == arg->cmdbuf_handle) {
-+ mutex_lock(&dev->struct_mutex);
-+ atomic_inc(&cmd_buffer->usage);
-+ oom_buffer = cmd_buffer;
-+ mutex_unlock(&dev->struct_mutex);
-+ } else {
-+ mutex_lock(&dev->struct_mutex);
-+ oom_buffer =
-+ drm_lookup_buffer_object(file_priv,
-+ arg->oom_handle,
-+ 1);
-+ mutex_unlock(&dev->struct_mutex);
-+ if (!oom_buffer) {
-+ ret = -EINVAL;
-+ goto out_err0;
-+ }
-+ }
-+ }
-+
-+ ret = copy_from_user(&user_scene, (void __user *)
-+ ((unsigned long)arg->scene_arg),
-+ sizeof(user_scene));
-+ if (ret)
-+ goto out_err0;
-+
-+ if (!user_scene.handle_valid) {
-+ pool = psb_scene_pool_alloc(file_priv, 0,
-+ user_scene.num_buffers,
-+ user_scene.w, user_scene.h);
-+ if (!pool) {
-+ ret = -ENOMEM;
-+ goto out_err0;
-+ }
-+
-+ user_scene.handle = psb_scene_pool_handle(pool);
-+ user_scene.handle_valid = 1;
-+ ret = copy_to_user((void __user *)
-+ ((unsigned long)arg->scene_arg),
-+ &user_scene, sizeof(user_scene));
-+
-+ if (ret)
-+ goto out_err0;
-+ } else {
-+ mutex_lock(&dev->struct_mutex);
-+ pool = psb_scene_pool_lookup_devlocked(file_priv,
-+ user_scene.
-+ handle, 1);
-+ mutex_unlock(&dev->struct_mutex);
-+ if (!pool) {
-+ ret = -EINVAL;
-+ goto out_err0;
-+ }
-+ }
-+
-+ mutex_lock(&dev_priv->reset_mutex);
-+ ret = psb_validate_scene_pool(pool, 0, 0, 0,
-+ user_scene.w,
-+ user_scene.h,
-+ arg->ta_flags &
-+ PSB_TA_FLAG_LASTPASS, &scene);
-+ mutex_unlock(&dev_priv->reset_mutex);
-+
-+ if (ret)
-+ goto out_err0;
-+
-+ memset(&feedback, 0, sizeof(feedback));
-+ if (arg->feedback_ops) {
-+ ret = psb_feedback_buf(file_priv,
-+ arg->feedback_ops,
-+ arg->feedback_handle,
-+ arg->feedback_offset,
-+ arg->feedback_breakpoints,
-+ arg->feedback_size, &feedback);
-+ if (ret)
-+ goto out_err0;
-+ }
-+ ret = psb_cmdbuf_ta(file_priv, arg, cmd_buffer, ta_buffer,
-+ oom_buffer, scene, &feedback, &fence_arg);
-+ if (ret)
-+ goto out_err0;
-+ break;
-+ default:
-+ DRM_ERROR("Unimplemented command submission mechanism (%x).\n",
-+ arg->engine);
-+ ret = -EINVAL;
-+ goto out_err0;
-+ }
-+
-+ if (!(arg->fence_flags & DRM_FENCE_FLAG_NO_USER)) {
-+ ret = copy_to_user((void __user *)
-+ ((unsigned long)arg->fence_arg),
-+ &fence_arg, sizeof(fence_arg));
-+ }
-+
-+ out_err0:
-+ ret =
-+ psb_handle_copyback(dev, dev_priv->buffers, num_buffers, ret, data);
-+ mutex_lock(&dev->struct_mutex);
-+ if (scene)
-+ psb_scene_unref_devlocked(&scene);
-+ if (pool)
-+ psb_scene_pool_unref_devlocked(&pool);
-+ if (cmd_buffer)
-+ drm_bo_usage_deref_locked(&cmd_buffer);
-+ if (ta_buffer)
-+ drm_bo_usage_deref_locked(&ta_buffer);
-+ if (oom_buffer)
-+ drm_bo_usage_deref_locked(&oom_buffer);
-+
-+ psb_dereference_buffers_locked(dev_priv->buffers, num_buffers);
-+ mutex_unlock(&dev->struct_mutex);
-+ mutex_unlock(&dev_priv->cmdbuf_mutex);
-+
-+ drm_bo_read_unlock(&dev->bm.bm_lock);
-+ return ret;
-+}
-Index: linux-2.6.27/drivers/gpu/drm/psb/psb_xhw.c
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.27/drivers/gpu/drm/psb/psb_xhw.c 2009-02-05 13:29:33.000000000 +0000
-@@ -0,0 +1,614 @@
-+/**************************************************************************
-+ * Copyright (c) 2007, Intel Corporation.
-+ * All Rights Reserved.
-+ *
-+ * This program is free software; you can redistribute it and/or modify it
-+ * under the terms and conditions of the GNU General Public License,
-+ * version 2, as published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope it will be useful, but WITHOUT
-+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-+ * more details.
-+ *
-+ * You should have received a copy of the GNU General Public License along with
-+ * this program; if not, write to the Free Software Foundation, Inc.,
-+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-+ *
-+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
-+ * develop this driver.
-+ *
-+ **************************************************************************/
-+/*
-+ * Make calls into closed source X server code.
-+ */
-+
-+#include "drmP.h"
-+#include "psb_drv.h"
-+
-+void
-+psb_xhw_clean_buf(struct drm_psb_private *dev_priv, struct psb_xhw_buf *buf)
-+{
-+ unsigned long irq_flags;
-+
-+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
-+ list_del_init(&buf->head);
-+ if (dev_priv->xhw_cur_buf == buf)
-+ dev_priv->xhw_cur_buf = NULL;
-+ atomic_set(&buf->done, 1);
-+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
-+}
-+
-+static inline int psb_xhw_add(struct drm_psb_private *dev_priv,
-+ struct psb_xhw_buf *buf)
-+{
-+ unsigned long irq_flags;
-+
-+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
-+ atomic_set(&buf->done, 0);
-+ if (unlikely(!dev_priv->xhw_submit_ok)) {
-+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
-+ DRM_ERROR("No Xpsb 3D extension available.\n");
-+ return -EINVAL;
-+ }
-+ if (!list_empty(&buf->head)) {
-+ DRM_ERROR("Recursive list adding.\n");
-+ goto out;
-+ }
-+ list_add_tail(&buf->head, &dev_priv->xhw_in);
-+ wake_up_interruptible(&dev_priv->xhw_queue);
-+ out:
-+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
-+ return 0;
-+}
-+
-+int psb_xhw_scene_info(struct drm_psb_private *dev_priv,
-+ struct psb_xhw_buf *buf,
-+ uint32_t w,
-+ uint32_t h,
-+ uint32_t * hw_cookie,
-+ uint32_t * bo_size,
-+ uint32_t * clear_p_start, uint32_t * clear_num_pages)
-+{
-+ struct drm_psb_xhw_arg *xa = &buf->arg;
-+ int ret;
-+
-+ buf->copy_back = 1;
-+ xa->op = PSB_XHW_SCENE_INFO;
-+ xa->irq_op = 0;
-+ xa->issue_irq = 0;
-+ xa->arg.si.w = w;
-+ xa->arg.si.h = h;
-+
-+ ret = psb_xhw_add(dev_priv, buf);
-+ if (ret)
-+ return ret;
-+
-+ (void)wait_event_timeout(dev_priv->xhw_caller_queue,
-+ atomic_read(&buf->done), DRM_HZ);
-+
-+ if (!atomic_read(&buf->done)) {
-+ psb_xhw_clean_buf(dev_priv, buf);
-+ return -EBUSY;
-+ }
-+
-+ if (!xa->ret) {
-+ memcpy(hw_cookie, xa->cookie, sizeof(xa->cookie));
-+ *bo_size = xa->arg.si.size;
-+ *clear_p_start = xa->arg.si.clear_p_start;
-+ *clear_num_pages = xa->arg.si.clear_num_pages;
-+ }
-+ return xa->ret;
-+}
-+
-+int psb_xhw_fire_raster(struct drm_psb_private *dev_priv,
-+ struct psb_xhw_buf *buf, uint32_t fire_flags)
-+{
-+ struct drm_psb_xhw_arg *xa = &buf->arg;
-+
-+ buf->copy_back = 0;
-+ xa->op = PSB_XHW_FIRE_RASTER;
-+ xa->issue_irq = 0;
-+ xa->arg.sb.fire_flags = 0;
-+
-+ return psb_xhw_add(dev_priv, buf);
-+}
-+
-+int psb_xhw_vistest(struct drm_psb_private *dev_priv, struct psb_xhw_buf *buf)
-+{
-+ struct drm_psb_xhw_arg *xa = &buf->arg;
-+
-+ buf->copy_back = 1;
-+ xa->op = PSB_XHW_VISTEST;
-+ /*
-+ * Could perhaps decrease latency somewhat by
-+ * issuing an irq in this case.
-+ */
-+ xa->issue_irq = 0;
-+ xa->irq_op = PSB_UIRQ_VISTEST;
-+ return psb_xhw_add(dev_priv, buf);
-+}
-+
-+int psb_xhw_scene_bind_fire(struct drm_psb_private *dev_priv,
-+ struct psb_xhw_buf *buf,
-+ uint32_t fire_flags,
-+ uint32_t hw_context,
-+ uint32_t * cookie,
-+ uint32_t * oom_cmds,
-+ uint32_t num_oom_cmds,
-+ uint32_t offset, uint32_t engine, uint32_t flags)
-+{
-+ struct drm_psb_xhw_arg *xa = &buf->arg;
-+
-+ buf->copy_back = (fire_flags & PSB_FIRE_FLAG_XHW_OOM);
-+ xa->op = PSB_XHW_SCENE_BIND_FIRE;
-+ xa->issue_irq = (buf->copy_back) ? 1 : 0;
-+ if (unlikely(buf->copy_back))
-+ xa->irq_op = (engine == PSB_SCENE_ENGINE_TA) ?
-+ PSB_UIRQ_FIRE_TA_REPLY : PSB_UIRQ_FIRE_RASTER_REPLY;
-+ else
-+ xa->irq_op = 0;
-+ xa->arg.sb.fire_flags = fire_flags;
-+ xa->arg.sb.hw_context = hw_context;
-+ xa->arg.sb.offset = offset;
-+ xa->arg.sb.engine = engine;
-+ xa->arg.sb.flags = flags;
-+ xa->arg.sb.num_oom_cmds = num_oom_cmds;
-+ memcpy(xa->cookie, cookie, sizeof(xa->cookie));
-+ if (num_oom_cmds)
-+ memcpy(xa->arg.sb.oom_cmds, oom_cmds,
-+ sizeof(uint32_t) * num_oom_cmds);
-+ return psb_xhw_add(dev_priv, buf);
-+}
-+
-+int psb_xhw_reset_dpm(struct drm_psb_private *dev_priv, struct psb_xhw_buf *buf)
-+{
-+ struct drm_psb_xhw_arg *xa = &buf->arg;
-+ int ret;
-+
-+ buf->copy_back = 1;
-+ xa->op = PSB_XHW_RESET_DPM;
-+ xa->issue_irq = 0;
-+ xa->irq_op = 0;
-+
-+ ret = psb_xhw_add(dev_priv, buf);
-+ if (ret)
-+ return ret;
-+
-+ (void)wait_event_timeout(dev_priv->xhw_caller_queue,
-+ atomic_read(&buf->done), 3 * DRM_HZ);
-+
-+ if (!atomic_read(&buf->done)) {
-+ psb_xhw_clean_buf(dev_priv, buf);
-+ return -EBUSY;
-+ }
-+
-+ return xa->ret;
-+}
-+
-+int psb_xhw_check_lockup(struct drm_psb_private *dev_priv,
-+ struct psb_xhw_buf *buf, uint32_t * value)
-+{
-+ struct drm_psb_xhw_arg *xa = &buf->arg;
-+ int ret;
-+
-+ *value = 0;
-+
-+ buf->copy_back = 1;
-+ xa->op = PSB_XHW_CHECK_LOCKUP;
-+ xa->issue_irq = 0;
-+ xa->irq_op = 0;
-+
-+ ret = psb_xhw_add(dev_priv, buf);
-+ if (ret)
-+ return ret;
-+
-+ (void)wait_event_timeout(dev_priv->xhw_caller_queue,
-+ atomic_read(&buf->done), DRM_HZ * 3);
-+
-+ if (!atomic_read(&buf->done)) {
-+ psb_xhw_clean_buf(dev_priv, buf);
-+ return -EBUSY;
-+ }
-+
-+ if (!xa->ret)
-+ *value = xa->arg.cl.value;
-+
-+ return xa->ret;
-+}
-+
-+static int psb_xhw_terminate(struct drm_psb_private *dev_priv,
-+ struct psb_xhw_buf *buf)
-+{
-+ struct drm_psb_xhw_arg *xa = &buf->arg;
-+ unsigned long irq_flags;
-+
-+ buf->copy_back = 0;
-+ xa->op = PSB_XHW_TERMINATE;
-+ xa->issue_irq = 0;
-+
-+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
-+ dev_priv->xhw_submit_ok = 0;
-+ atomic_set(&buf->done, 0);
-+ if (!list_empty(&buf->head)) {
-+ DRM_ERROR("Recursive list adding.\n");
-+ goto out;
-+ }
-+ list_add_tail(&buf->head, &dev_priv->xhw_in);
-+ out:
-+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
-+ wake_up_interruptible(&dev_priv->xhw_queue);
-+
-+ (void)wait_event_timeout(dev_priv->xhw_caller_queue,
-+ atomic_read(&buf->done), DRM_HZ / 10);
-+
-+ if (!atomic_read(&buf->done)) {
-+ DRM_ERROR("Xpsb terminate timeout.\n");
-+ psb_xhw_clean_buf(dev_priv, buf);
-+ return -EBUSY;
-+ }
-+
-+ return 0;
-+}
-+
-+int psb_xhw_ta_mem_info(struct drm_psb_private *dev_priv,
-+ struct psb_xhw_buf *buf,
-+ uint32_t pages, uint32_t * hw_cookie, uint32_t * size)
-+{
-+ struct drm_psb_xhw_arg *xa = &buf->arg;
-+ int ret;
-+
-+ buf->copy_back = 1;
-+ xa->op = PSB_XHW_TA_MEM_INFO;
-+ xa->issue_irq = 0;
-+ xa->irq_op = 0;
-+ xa->arg.bi.pages = pages;
-+
-+ ret = psb_xhw_add(dev_priv, buf);
-+ if (ret)
-+ return ret;
-+
-+ (void)wait_event_timeout(dev_priv->xhw_caller_queue,
-+ atomic_read(&buf->done), DRM_HZ);
-+
-+ if (!atomic_read(&buf->done)) {
-+ psb_xhw_clean_buf(dev_priv, buf);
-+ return -EBUSY;
-+ }
-+
-+ if (!xa->ret)
-+ memcpy(hw_cookie, xa->cookie, sizeof(xa->cookie));
-+
-+ *size = xa->arg.bi.size;
-+ return xa->ret;
-+}
-+
-+int psb_xhw_ta_mem_load(struct drm_psb_private *dev_priv,
-+ struct psb_xhw_buf *buf,
-+ uint32_t flags,
-+ uint32_t param_offset,
-+ uint32_t pt_offset, uint32_t * hw_cookie)
-+{
-+ struct drm_psb_xhw_arg *xa = &buf->arg;
-+ int ret;
-+
-+ buf->copy_back = 1;
-+ xa->op = PSB_XHW_TA_MEM_LOAD;
-+ xa->issue_irq = 0;
-+ xa->irq_op = 0;
-+ xa->arg.bl.flags = flags;
-+ xa->arg.bl.param_offset = param_offset;
-+ xa->arg.bl.pt_offset = pt_offset;
-+ memcpy(xa->cookie, hw_cookie, sizeof(xa->cookie));
-+
-+ ret = psb_xhw_add(dev_priv, buf);
-+ if (ret)
-+ return ret;
-+
-+ (void)wait_event_timeout(dev_priv->xhw_caller_queue,
-+ atomic_read(&buf->done), 3 * DRM_HZ);
-+
-+ if (!atomic_read(&buf->done)) {
-+ psb_xhw_clean_buf(dev_priv, buf);
-+ return -EBUSY;
-+ }
-+
-+ if (!xa->ret)
-+ memcpy(hw_cookie, xa->cookie, sizeof(xa->cookie));
-+
-+ return xa->ret;
-+}
-+
-+int psb_xhw_ta_oom(struct drm_psb_private *dev_priv,
-+ struct psb_xhw_buf *buf, uint32_t * cookie)
-+{
-+ struct drm_psb_xhw_arg *xa = &buf->arg;
-+
-+ /*
-+ * This calls the extensive closed source
-+ * OOM handler, which resolves the condition and
-+ * sends a reply telling the scheduler what to do
-+ * with the task.
-+ */
-+
-+ buf->copy_back = 1;
-+ xa->op = PSB_XHW_OOM;
-+ xa->issue_irq = 1;
-+ xa->irq_op = PSB_UIRQ_OOM_REPLY;
-+ memcpy(xa->cookie, cookie, sizeof(xa->cookie));
-+
-+ return psb_xhw_add(dev_priv, buf);
-+}
-+
-+void psb_xhw_ta_oom_reply(struct drm_psb_private *dev_priv,
-+ struct psb_xhw_buf *buf,
-+ uint32_t * cookie,
-+ uint32_t * bca, uint32_t * rca, uint32_t * flags)
-+{
-+ struct drm_psb_xhw_arg *xa = &buf->arg;
-+
-+ /*
-+ * Get info about how to schedule an OOM task.
-+ */
-+
-+ memcpy(cookie, xa->cookie, sizeof(xa->cookie));
-+ *bca = xa->arg.oom.bca;
-+ *rca = xa->arg.oom.rca;
-+ *flags = xa->arg.oom.flags;
-+}
-+
-+void psb_xhw_fire_reply(struct drm_psb_private *dev_priv,
-+ struct psb_xhw_buf *buf, uint32_t * cookie)
-+{
-+ struct drm_psb_xhw_arg *xa = &buf->arg;
-+
-+ memcpy(cookie, xa->cookie, sizeof(xa->cookie));
-+}
-+
-+int psb_xhw_resume(struct drm_psb_private *dev_priv, struct psb_xhw_buf *buf)
-+{
-+ struct drm_psb_xhw_arg *xa = &buf->arg;
-+
-+ buf->copy_back = 0;
-+ xa->op = PSB_XHW_RESUME;
-+ xa->issue_irq = 0;
-+ xa->irq_op = 0;
-+ return psb_xhw_add(dev_priv, buf);
-+}
-+
-+void psb_xhw_takedown(struct drm_psb_private *dev_priv)
-+{
-+}
-+
-+int psb_xhw_init(struct drm_device *dev)
-+{
-+ struct drm_psb_private *dev_priv =
-+ (struct drm_psb_private *)dev->dev_private;
-+ unsigned long irq_flags;
-+
-+ INIT_LIST_HEAD(&dev_priv->xhw_in);
-+ dev_priv->xhw_lock = SPIN_LOCK_UNLOCKED;
-+ atomic_set(&dev_priv->xhw_client, 0);
-+ init_waitqueue_head(&dev_priv->xhw_queue);
-+ init_waitqueue_head(&dev_priv->xhw_caller_queue);
-+ mutex_init(&dev_priv->xhw_mutex);
-+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
-+ dev_priv->xhw_on = 0;
-+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
-+
-+ return 0;
-+}
-+
-+static int psb_xhw_init_init(struct drm_device *dev,
-+ struct drm_file *file_priv,
-+ struct drm_psb_xhw_init_arg *arg)
-+{
-+ struct drm_psb_private *dev_priv =
-+ (struct drm_psb_private *)dev->dev_private;
-+ int ret;
-+ int is_iomem;
-+
-+ if (atomic_add_unless(&dev_priv->xhw_client, 1, 1)) {
-+ unsigned long irq_flags;
-+
-+ mutex_lock(&dev->struct_mutex);
-+ dev_priv->xhw_bo =
-+ drm_lookup_buffer_object(file_priv, arg->buffer_handle, 1);
-+ mutex_unlock(&dev->struct_mutex);
-+ if (!dev_priv->xhw_bo) {
-+ ret = -EINVAL;
-+ goto out_err;
-+ }
-+ ret = drm_bo_kmap(dev_priv->xhw_bo, 0,
-+ dev_priv->xhw_bo->num_pages,
-+ &dev_priv->xhw_kmap);
-+ if (ret) {
-+ DRM_ERROR("Failed mapping X server "
-+ "communications buffer.\n");
-+ goto out_err0;
-+ }
-+ dev_priv->xhw = drm_bmo_virtual(&dev_priv->xhw_kmap, &is_iomem);
-+ if (is_iomem) {
-+ DRM_ERROR("X server communications buffer"
-+ "is in device memory.\n");
-+ ret = -EINVAL;
-+ goto out_err1;
-+ }
-+ dev_priv->xhw_file = file_priv;
-+
-+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
-+ dev_priv->xhw_on = 1;
-+ dev_priv->xhw_submit_ok = 1;
-+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
-+ return 0;
-+ } else {
-+ DRM_ERROR("Xhw is already initialized.\n");
-+ return -EBUSY;
-+ }
-+ out_err1:
-+ dev_priv->xhw = NULL;
-+ drm_bo_kunmap(&dev_priv->xhw_kmap);
-+ out_err0:
-+ drm_bo_usage_deref_unlocked(&dev_priv->xhw_bo);
-+ out_err:
-+ atomic_dec(&dev_priv->xhw_client);
-+ return ret;
-+}
-+
-+static void psb_xhw_queue_empty(struct drm_psb_private *dev_priv)
-+{
-+ struct psb_xhw_buf *cur_buf, *next;
-+ unsigned long irq_flags;
-+
-+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
-+ dev_priv->xhw_submit_ok = 0;
-+
-+ list_for_each_entry_safe(cur_buf, next, &dev_priv->xhw_in, head) {
-+ list_del_init(&cur_buf->head);
-+ if (cur_buf->copy_back) {
-+ cur_buf->arg.ret = -EINVAL;
-+ }
-+ atomic_set(&cur_buf->done, 1);
-+ }
-+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
-+ wake_up(&dev_priv->xhw_caller_queue);
-+}
-+
-+void psb_xhw_init_takedown(struct drm_psb_private *dev_priv,
-+ struct drm_file *file_priv, int closing)
-+{
-+
-+ if (dev_priv->xhw_file == file_priv &&
-+ atomic_add_unless(&dev_priv->xhw_client, -1, 0)) {
-+
-+ if (closing)
-+ psb_xhw_queue_empty(dev_priv);
-+ else {
-+ struct psb_xhw_buf buf;
-+ INIT_LIST_HEAD(&buf.head);
-+
-+ psb_xhw_terminate(dev_priv, &buf);
-+ psb_xhw_queue_empty(dev_priv);
-+ }
-+
-+ dev_priv->xhw = NULL;
-+ drm_bo_kunmap(&dev_priv->xhw_kmap);
-+ drm_bo_usage_deref_unlocked(&dev_priv->xhw_bo);
-+ dev_priv->xhw_file = NULL;
-+ }
-+}
-+
-+int psb_xhw_init_ioctl(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv)
-+{
-+ struct drm_psb_xhw_init_arg *arg = (struct drm_psb_xhw_init_arg *)data;
-+ struct drm_psb_private *dev_priv =
-+ (struct drm_psb_private *)dev->dev_private;
-+
-+ switch (arg->operation) {
-+ case PSB_XHW_INIT:
-+ return psb_xhw_init_init(dev, file_priv, arg);
-+ case PSB_XHW_TAKEDOWN:
-+ psb_xhw_init_takedown(dev_priv, file_priv, 0);
-+ }
-+ return 0;
-+}
-+
-+static int psb_xhw_in_empty(struct drm_psb_private *dev_priv)
-+{
-+ int empty;
-+ unsigned long irq_flags;
-+
-+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
-+ empty = list_empty(&dev_priv->xhw_in);
-+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
-+ return empty;
-+}
-+
-+int psb_xhw_handler(struct drm_psb_private *dev_priv)
-+{
-+ unsigned long irq_flags;
-+ struct drm_psb_xhw_arg *xa;
-+ struct psb_xhw_buf *buf;
-+
-+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
-+
-+ if (!dev_priv->xhw_on) {
-+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
-+ return -EINVAL;
-+ }
-+
-+ buf = dev_priv->xhw_cur_buf;
-+ if (buf && buf->copy_back) {
-+ xa = &buf->arg;
-+ memcpy(xa, dev_priv->xhw, sizeof(*xa));
-+ dev_priv->comm[PSB_COMM_USER_IRQ] = xa->irq_op;
-+ atomic_set(&buf->done, 1);
-+ wake_up(&dev_priv->xhw_caller_queue);
-+ } else
-+ dev_priv->comm[PSB_COMM_USER_IRQ] = 0;
-+
-+ dev_priv->xhw_cur_buf = 0;
-+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
-+ return 0;
-+}
-+
-+int psb_xhw_ioctl(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv)
-+{
-+ struct drm_psb_private *dev_priv =
-+ (struct drm_psb_private *)dev->dev_private;
-+ unsigned long irq_flags;
-+ struct drm_psb_xhw_arg *xa;
-+ int ret;
-+ struct list_head *list;
-+ struct psb_xhw_buf *buf;
-+
-+ if (!dev_priv)
-+ return -EINVAL;
-+
-+ if (mutex_lock_interruptible(&dev_priv->xhw_mutex))
-+ return -EAGAIN;
-+
-+ if (psb_forced_user_interrupt(dev_priv)) {
-+ mutex_unlock(&dev_priv->xhw_mutex);
-+ return -EINVAL;
-+ }
-+
-+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
-+ while (list_empty(&dev_priv->xhw_in)) {
-+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
-+ ret = wait_event_interruptible_timeout(dev_priv->xhw_queue,
-+ !psb_xhw_in_empty
-+ (dev_priv), DRM_HZ);
-+ if (ret == -ERESTARTSYS || ret == 0) {
-+ mutex_unlock(&dev_priv->xhw_mutex);
-+ return -EAGAIN;
-+ }
-+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
-+ }
-+
-+ list = dev_priv->xhw_in.next;
-+ list_del_init(list);
-+
-+ buf = list_entry(list, struct psb_xhw_buf, head);
-+ xa = &buf->arg;
-+ memcpy(dev_priv->xhw, xa, sizeof(*xa));
-+
-+ if (unlikely(buf->copy_back))
-+ dev_priv->xhw_cur_buf = buf;
-+ else {
-+ atomic_set(&buf->done, 1);
-+ dev_priv->xhw_cur_buf = NULL;
-+ }
-+
-+ if (xa->op == PSB_XHW_TERMINATE) {
-+ dev_priv->xhw_on = 0;
-+ wake_up(&dev_priv->xhw_caller_queue);
-+ }
-+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
-+
-+ mutex_unlock(&dev_priv->xhw_mutex);
-+
-+ return 0;
-+}
-Index: linux-2.6.27/drivers/gpu/drm/Kconfig
-===================================================================
---- linux-2.6.27.orig/drivers/gpu/drm/Kconfig 2008-10-09 23:13:53.000000000 +0100
-+++ linux-2.6.27/drivers/gpu/drm/Kconfig 2009-02-05 13:29:33.000000000 +0000
-@@ -105,3 +105,9 @@
- help
- Choose this option if you have a Savage3D/4/SuperSavage/Pro/Twister
- chipset. If M is selected the module will be called savage.
-+
-+config DRM_PSB
-+ tristate "Intel Poulsbo"
-+ depends on DRM && PCI && I2C_ALGOBIT
-+ help
-+ Choose this option if you have an Intel Poulsbo chipset.
-Index: linux-2.6.27/include/drm/drm_crtc.h
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.27/include/drm/drm_crtc.h 2009-02-05 13:29:33.000000000 +0000
-@@ -0,0 +1,592 @@
-+/*
-+ * Copyright © 2006 Keith Packard
-+ * Copyright © 2007 Intel Corporation
-+ * Jesse Barnes <jesse.barnes@intel.com>
-+ */
-+#ifndef __DRM_CRTC_H__
-+#define __DRM_CRTC_H__
-+
-+#include <linux/i2c.h>
-+#include <linux/spinlock.h>
-+#include <linux/types.h>
-+#include <linux/idr.h>
-+
-+#include <linux/fb.h>
-+
-+struct drm_device;
-+
-+/*
-+ * Note on terminology: here, for brevity and convenience, we refer to output
-+ * control chips as 'CRTCs'. They can control any type of output, VGA, LVDS,
-+ * DVI, etc. And 'screen' refers to the whole of the visible display, which
-+ * may span multiple monitors (and therefore multiple CRTC and output
-+ * structures).
-+ */
-+
-+enum drm_mode_status {
-+ MODE_OK = 0, /* Mode OK */
-+ MODE_HSYNC, /* hsync out of range */
-+ MODE_VSYNC, /* vsync out of range */
-+ MODE_H_ILLEGAL, /* mode has illegal horizontal timings */
-+ MODE_V_ILLEGAL, /* mode has illegal horizontal timings */
-+ MODE_BAD_WIDTH, /* requires an unsupported linepitch */
-+ MODE_NOMODE, /* no mode with a maching name */
-+ MODE_NO_INTERLACE, /* interlaced mode not supported */
-+ MODE_NO_DBLESCAN, /* doublescan mode not supported */
-+ MODE_NO_VSCAN, /* multiscan mode not supported */
-+ MODE_MEM, /* insufficient video memory */
-+ MODE_VIRTUAL_X, /* mode width too large for specified virtual size */
-+ MODE_VIRTUAL_Y, /* mode height too large for specified virtual size */
-+ MODE_MEM_VIRT, /* insufficient video memory given virtual size */
-+ MODE_NOCLOCK, /* no fixed clock available */
-+ MODE_CLOCK_HIGH, /* clock required is too high */
-+ MODE_CLOCK_LOW, /* clock required is too low */
-+ MODE_CLOCK_RANGE, /* clock/mode isn't in a ClockRange */
-+ MODE_BAD_HVALUE, /* horizontal timing was out of range */
-+ MODE_BAD_VVALUE, /* vertical timing was out of range */
-+ MODE_BAD_VSCAN, /* VScan value out of range */
-+ MODE_HSYNC_NARROW, /* horizontal sync too narrow */
-+ MODE_HSYNC_WIDE, /* horizontal sync too wide */
-+ MODE_HBLANK_NARROW, /* horizontal blanking too narrow */
-+ MODE_HBLANK_WIDE, /* horizontal blanking too wide */
-+ MODE_VSYNC_NARROW, /* vertical sync too narrow */
-+ MODE_VSYNC_WIDE, /* vertical sync too wide */
-+ MODE_VBLANK_NARROW, /* vertical blanking too narrow */
-+ MODE_VBLANK_WIDE, /* vertical blanking too wide */
-+ MODE_PANEL, /* exceeds panel dimensions */
-+ MODE_INTERLACE_WIDTH, /* width too large for interlaced mode */
-+ MODE_ONE_WIDTH, /* only one width is supported */
-+ MODE_ONE_HEIGHT, /* only one height is supported */
-+ MODE_ONE_SIZE, /* only one resolution is supported */
-+ MODE_NO_REDUCED, /* monitor doesn't accept reduced blanking */
-+ MODE_UNVERIFIED = -3, /* mode needs to reverified */
-+ MODE_BAD = -2, /* unspecified reason */
-+ MODE_ERROR = -1 /* error condition */
-+};
-+
-+#define DRM_MODE_TYPE_CLOCK_CRTC_C (DRM_MODE_TYPE_CLOCK_C | \
-+ DRM_MODE_TYPE_CRTC_C)
-+
-+#define DRM_MODE(nm, t, c, hd, hss, hse, ht, hsk, vd, vss, vse, vt, vs, f) \
-+ .name = nm, .status = 0, .type = (t), .clock = (c), \
-+ .hdisplay = (hd), .hsync_start = (hss), .hsync_end = (hse), \
-+ .htotal = (ht), .hskew = (hsk), .vdisplay = (vd), \
-+ .vsync_start = (vss), .vsync_end = (vse), .vtotal = (vt), \
-+ .vscan = (vs), .flags = (f), .vrefresh = 0
-+
-+struct drm_display_mode {
-+ /* Header */
-+ struct list_head head;
-+ char name[DRM_DISPLAY_MODE_LEN];
-+ int mode_id;
-+ int output_count;
-+ enum drm_mode_status status;
-+ int type;
-+
-+ /* Proposed mode values */
-+ int clock;
-+ int hdisplay;
-+ int hsync_start;
-+ int hsync_end;
-+ int htotal;
-+ int hskew;
-+ int vdisplay;
-+ int vsync_start;
-+ int vsync_end;
-+ int vtotal;
-+ int vscan;
-+ unsigned int flags;
-+
-+ /* Actual mode we give to hw */
-+ int clock_index;
-+ int synth_clock;
-+ int crtc_hdisplay;
-+ int crtc_hblank_start;
-+ int crtc_hblank_end;
-+ int crtc_hsync_start;
-+ int crtc_hsync_end;
-+ int crtc_htotal;
-+ int crtc_hskew;
-+ int crtc_vdisplay;
-+ int crtc_vblank_start;
-+ int crtc_vblank_end;
-+ int crtc_vsync_start;
-+ int crtc_vsync_end;
-+ int crtc_vtotal;
-+ int crtc_hadjusted;
-+ int crtc_vadjusted;
-+
-+ /* Driver private mode info */
-+ int private_size;
-+ int *private;
-+ int private_flags;
-+
-+ int vrefresh;
-+ float hsync;
-+};
-+
-+/* Video mode flags */
-+#define V_PHSYNC (1<<0)
-+#define V_NHSYNC (1<<1)
-+#define V_PVSYNC (1<<2)
-+#define V_NVSYNC (1<<3)
-+#define V_INTERLACE (1<<4)
-+#define V_DBLSCAN (1<<5)
-+#define V_CSYNC (1<<6)
-+#define V_PCSYNC (1<<7)
-+#define V_NCSYNC (1<<8)
-+#define V_HSKEW (1<<9) /* hskew provided */
-+#define V_BCAST (1<<10)
-+#define V_PIXMUX (1<<11)
-+#define V_DBLCLK (1<<12)
-+#define V_CLKDIV2 (1<<13)
-+
-+#define CRTC_INTERLACE_HALVE_V 0x1 /* halve V values for interlacing */
-+#define DPMSModeOn 0
-+#define DPMSModeStandby 1
-+#define DPMSModeSuspend 2
-+#define DPMSModeOff 3
-+
-+enum drm_output_status {
-+ output_status_connected = 1,
-+ output_status_disconnected = 2,
-+ output_status_unknown = 3,
-+};
-+
-+enum subpixel_order {
-+ SubPixelUnknown = 0,
-+ SubPixelHorizontalRGB,
-+ SubPixelHorizontalBGR,
-+ SubPixelVerticalRGB,
-+ SubPixelVerticalBGR,
-+ SubPixelNone,
-+};
-+
-+/*
-+ * Describes a given display (e.g. CRT or flat panel) and its limitations.
-+ */
-+struct drm_display_info {
-+ char name[DRM_DISPLAY_INFO_LEN];
-+ /* Input info */
-+ bool serration_vsync;
-+ bool sync_on_green;
-+ bool composite_sync;
-+ bool separate_syncs;
-+ bool blank_to_black;
-+ unsigned char video_level;
-+ bool digital;
-+ /* Physical size */
-+ unsigned int width_mm;
-+ unsigned int height_mm;
-+
-+ /* Display parameters */
-+ unsigned char gamma; /* FIXME: storage format */
-+ bool gtf_supported;
-+ bool standard_color;
-+ enum {
-+ monochrome,
-+ rgb,
-+ other,
-+ unknown,
-+ } display_type;
-+ bool active_off_supported;
-+ bool suspend_supported;
-+ bool standby_supported;
-+
-+ /* Color info FIXME: storage format */
-+ unsigned short redx, redy;
-+ unsigned short greenx, greeny;
-+ unsigned short bluex, bluey;
-+ unsigned short whitex, whitey;
-+
-+ /* Clock limits FIXME: storage format */
-+ unsigned int min_vfreq, max_vfreq;
-+ unsigned int min_hfreq, max_hfreq;
-+ unsigned int pixel_clock;
-+
-+ /* White point indices FIXME: storage format */
-+ unsigned int wpx1, wpy1;
-+ unsigned int wpgamma1;
-+ unsigned int wpx2, wpy2;
-+ unsigned int wpgamma2;
-+
-+ /* Preferred mode (if any) */
-+ struct drm_display_mode *preferred_mode;
-+ char *raw_edid; /* if any */
-+};
-+
-+struct drm_framebuffer {
-+ struct drm_device *dev;
-+ struct list_head head;
-+ int id; /* idr assigned */
-+ unsigned int pitch;
-+ unsigned long offset;
-+ unsigned int width;
-+ unsigned int height;
-+ /* depth can be 15 or 16 */
-+ unsigned int depth;
-+ int bits_per_pixel;
-+ int flags;
-+ struct drm_buffer_object *bo;
-+ void *fbdev;
-+ u32 pseudo_palette[16];
-+ struct drm_bo_kmap_obj kmap;
-+ struct list_head filp_head;
-+};
-+
-+struct drm_property_enum {
-+ struct list_head head;
-+ uint32_t value;
-+ unsigned char name[DRM_PROP_NAME_LEN];
-+};
-+
-+struct drm_property {
-+ struct list_head head;
-+ int id; /* idr assigned */
-+ uint32_t flags;
-+ char name[DRM_PROP_NAME_LEN];
-+ uint32_t num_values;
-+ uint32_t *values;
-+
-+ struct list_head enum_list;
-+};
-+
-+struct drm_crtc;
-+struct drm_output;
-+
-+/**
-+ * drm_crtc_funcs - control CRTCs for a given device
-+ * @dpms: control display power levels
-+ * @save: save CRTC state
-+ * @resore: restore CRTC state
-+ * @lock: lock the CRTC
-+ * @unlock: unlock the CRTC
-+ * @shadow_allocate: allocate shadow pixmap
-+ * @shadow_create: create shadow pixmap for rotation support
-+ * @shadow_destroy: free shadow pixmap
-+ * @mode_fixup: fixup proposed mode
-+ * @mode_set: set the desired mode on the CRTC
-+ * @gamma_set: specify color ramp for CRTC
-+ * @cleanup: cleanup driver private state prior to close
-+ *
-+ * The drm_crtc_funcs structure is the central CRTC management structure
-+ * in the DRM. Each CRTC controls one or more outputs (note that the name
-+ * CRTC is simply historical, a CRTC may control LVDS, VGA, DVI, TV out, etc.
-+ * outputs, not just CRTs).
-+ *
-+ * Each driver is responsible for filling out this structure at startup time,
-+ * in addition to providing other modesetting features, like i2c and DDC
-+ * bus accessors.
-+ */
-+struct drm_crtc_funcs {
-+ /*
-+ * Control power levels on the CRTC. If the mode passed in is
-+ * unsupported, the provider must use the next lowest power level.
-+ */
-+ void (*dpms)(struct drm_crtc *crtc, int mode);
-+
-+ /* JJJ: Are these needed? */
-+ /* Save CRTC state */
-+ void (*save)(struct drm_crtc *crtc); /* suspend? */
-+ /* Restore CRTC state */
-+ void (*restore)(struct drm_crtc *crtc); /* resume? */
-+ bool (*lock)(struct drm_crtc *crtc);
-+ void (*unlock)(struct drm_crtc *crtc);
-+
-+ void (*prepare)(struct drm_crtc *crtc);
-+ void (*commit)(struct drm_crtc *crtc);
-+
-+ /* Provider can fixup or change mode timings before modeset occurs */
-+ bool (*mode_fixup)(struct drm_crtc *crtc,
-+ struct drm_display_mode *mode,
-+ struct drm_display_mode *adjusted_mode);
-+ /* Actually set the mode */
-+ void (*mode_set)(struct drm_crtc *crtc, struct drm_display_mode *mode,
-+ struct drm_display_mode *adjusted_mode, int x, int y);
-+ /* Set gamma on the CRTC */
-+ void (*gamma_set)(struct drm_crtc *crtc, u16 r, u16 g, u16 b,
-+ int regno);
-+ /* Driver cleanup routine */
-+ void (*cleanup)(struct drm_crtc *crtc);
-+};
-+
-+/**
-+ * drm_crtc - central CRTC control structure
-+ * @enabled: is this CRTC enabled?
-+ * @x: x position on screen
-+ * @y: y position on screen
-+ * @desired_mode: new desired mode
-+ * @desired_x: desired x for desired_mode
-+ * @desired_y: desired y for desired_mode
-+ * @funcs: CRTC control functions
-+ * @driver_private: arbitrary driver data
-+ *
-+ * Each CRTC may have one or more outputs associated with it. This structure
-+ * allows the CRTC to be controlled.
-+ */
-+struct drm_crtc {
-+ struct drm_device *dev;
-+ struct list_head head;
-+
-+ int id; /* idr assigned */
-+
-+ /* framebuffer the output is currently bound to */
-+ struct drm_framebuffer *fb;
-+
-+ bool enabled;
-+
-+ /* JJJ: are these needed? */
-+ bool cursor_in_range;
-+ bool cursor_shown;
-+
-+ struct drm_display_mode mode;
-+
-+ int x, y;
-+ struct drm_display_mode *desired_mode;
-+ int desired_x, desired_y;
-+ const struct drm_crtc_funcs *funcs;
-+ void *driver_private;
-+
-+ /* RRCrtcPtr randr_crtc? */
-+};
-+
-+extern struct drm_crtc *drm_crtc_create(struct drm_device *dev,
-+ const struct drm_crtc_funcs *funcs);
-+
-+/**
-+ * drm_output_funcs - control outputs on a given device
-+ * @init: setup this output
-+ * @dpms: set power state (see drm_crtc_funcs above)
-+ * @save: save output state
-+ * @restore: restore output state
-+ * @mode_valid: is this mode valid on the given output?
-+ * @mode_fixup: try to fixup proposed mode for this output
-+ * @mode_set: set this mode
-+ * @detect: is this output active?
-+ * @get_modes: get mode list for this output
-+ * @set_property: property for this output may need update
-+ * @cleanup: output is going away, cleanup
-+ *
-+ * Each CRTC may have one or more outputs attached to it. The functions
-+ * below allow the core DRM code to control outputs, enumerate available modes,
-+ * etc.
-+ */
-+struct drm_output_funcs {
-+ void (*init)(struct drm_output *output);
-+ void (*dpms)(struct drm_output *output, int mode);
-+ void (*save)(struct drm_output *output);
-+ void (*restore)(struct drm_output *output);
-+ int (*mode_valid)(struct drm_output *output,
-+ struct drm_display_mode *mode);
-+ bool (*mode_fixup)(struct drm_output *output,
-+ struct drm_display_mode *mode,
-+ struct drm_display_mode *adjusted_mode);
-+ void (*prepare)(struct drm_output *output);
-+ void (*commit)(struct drm_output *output);
-+ void (*mode_set)(struct drm_output *output,
-+ struct drm_display_mode *mode,
-+ struct drm_display_mode *adjusted_mode);
-+ enum drm_output_status (*detect)(struct drm_output *output);
-+ int (*get_modes)(struct drm_output *output);
-+ /* JJJ: type checking for properties via property value type */
-+ bool (*set_property)(struct drm_output *output, int prop, void *val);
-+ void (*cleanup)(struct drm_output *output);
-+};
-+
-+#define DRM_OUTPUT_MAX_UMODES 16
-+#define DRM_OUTPUT_MAX_PROPERTY 16
-+#define DRM_OUTPUT_LEN 32
-+/**
-+ * drm_output - central DRM output control structure
-+ * @crtc: CRTC this output is currently connected to, NULL if none
-+ * @possible_crtcs: bitmap of CRTCS this output could be attached to
-+ * @possible_clones: bitmap of possible outputs this output could clone
-+ * @interlace_allowed: can this output handle interlaced modes?
-+ * @doublescan_allowed: can this output handle doublescan?
-+ * @available_modes: modes available on this output (from get_modes() + user)
-+ * @initial_x: initial x position for this output
-+ * @initial_y: initial y position for this output
-+ * @status: output connected?
-+ * @subpixel_order: for this output
-+ * @mm_width: displayable width of output in mm
-+ * @mm_height: displayable height of output in mm
-+ * @name: name of output (should be one of a few standard names)
-+ * @funcs: output control functions
-+ * @driver_private: private driver data
-+ *
-+ * Each output may be connected to one or more CRTCs, or may be clonable by
-+ * another output if they can share a CRTC. Each output also has a specific
-+ * position in the broader display (referred to as a 'screen' though it could
-+ * span multiple monitors).
-+ */
-+struct drm_output {
-+ struct drm_device *dev;
-+ struct list_head head;
-+ struct drm_crtc *crtc;
-+ int id; /* idr assigned */
-+ unsigned long possible_crtcs;
-+ unsigned long possible_clones;
-+ bool interlace_allowed;
-+ bool doublescan_allowed;
-+ struct list_head modes; /* list of modes on this output */
-+
-+ /*
-+ OptionInfoPtr options;
-+ XF86ConfMonitorPtr conf_monitor;
-+ */
-+ int initial_x, initial_y;
-+ enum drm_output_status status;
-+
-+ /* these are modes added by probing with DDC or the BIOS */
-+ struct list_head probed_modes;
-+
-+ /* xf86MonPtr MonInfo; */
-+ enum subpixel_order subpixel_order;
-+ int mm_width, mm_height;
-+ struct drm_display_info *monitor_info; /* if any */
-+ char name[DRM_OUTPUT_LEN];
-+ const struct drm_output_funcs *funcs;
-+ void *driver_private;
-+
-+ u32 user_mode_ids[DRM_OUTPUT_MAX_UMODES];
-+
-+ u32 property_ids[DRM_OUTPUT_MAX_PROPERTY];
-+ u32 property_values[DRM_OUTPUT_MAX_PROPERTY];
-+};
-+
-+/**
-+ * struct drm_mode_config_funcs - configure CRTCs for a given screen layout
-+ * @resize: adjust CRTCs as necessary for the proposed layout
-+ *
-+ * Currently only a resize hook is available. DRM will call back into the
-+ * driver with a new screen width and height. If the driver can't support
-+ * the proposed size, it can return false. Otherwise it should adjust
-+ * the CRTC<->output mappings as needed and update its view of the screen.
-+ */
-+struct drm_mode_config_funcs {
-+ bool (*resize)(struct drm_device *dev, int width, int height);
-+};
-+
-+/**
-+ * drm_mode_config - Mode configuration control structure
-+ *
-+ */
-+struct drm_mode_config {
-+ struct mutex mutex; /* protects configuration and IDR */
-+ struct idr crtc_idr; /* use this idr for all IDs, fb, crtc, output, modes - just makes life easier */
-+ /* this is limited to one for now */
-+ int num_fb;
-+ struct list_head fb_list;
-+ int num_output;
-+ struct list_head output_list;
-+
-+ /* int compat_output? */
-+ int num_crtc;
-+ struct list_head crtc_list;
-+
-+ struct list_head usermode_list;
-+
-+ struct list_head property_list;
-+
-+ int min_width, min_height;
-+ int max_width, max_height;
-+ /* DamagePtr rotationDamage? */
-+ /* DGA stuff? */
-+ struct drm_mode_config_funcs *funcs;
-+ unsigned long fb_base;
-+};
-+
-+struct drm_output *drm_output_create(struct drm_device *dev,
-+ const struct drm_output_funcs *funcs,
-+ const char *name);
-+extern void drm_output_destroy(struct drm_output *output);
-+extern bool drm_output_rename(struct drm_output *output, const char *name);
-+extern void drm_fb_release(struct file *filp);
-+
-+extern struct edid *drm_get_edid(struct drm_output *output,
-+ struct i2c_adapter *adapter);
-+extern int drm_add_edid_modes(struct drm_output *output, struct edid *edid);
-+extern void drm_mode_probed_add(struct drm_output *output, struct drm_display_mode *mode);
-+extern void drm_mode_remove(struct drm_output *output, struct drm_display_mode *mode);
-+extern struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev,
-+ struct drm_display_mode *mode);
-+extern void drm_mode_debug_printmodeline(struct drm_device *dev,
-+ struct drm_display_mode *mode);
-+extern void drm_mode_config_init(struct drm_device *dev);
-+extern void drm_mode_config_cleanup(struct drm_device *dev);
-+extern void drm_mode_set_name(struct drm_display_mode *mode);
-+extern bool drm_mode_equal(struct drm_display_mode *mode1, struct drm_display_mode *mode2);
-+extern void drm_disable_unused_functions(struct drm_device *dev);
-+
-+extern void drm_mode_addmode(struct drm_device *dev, struct drm_display_mode *user_mode);
-+extern int drm_mode_rmmode(struct drm_device *dev, struct drm_display_mode *mode);
-+
-+/* for us by fb module */
-+extern int drm_mode_attachmode_crtc(struct drm_device *dev,
-+ struct drm_crtc *crtc,
-+ struct drm_display_mode *mode);
-+extern int drm_mode_detachmode_crtc(struct drm_device *dev, struct drm_display_mode *mode);
-+
-+extern struct drm_display_mode *drm_mode_create(struct drm_device *dev);
-+extern void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode);
-+extern void drm_mode_list_concat(struct list_head *head,
-+ struct list_head *new);
-+extern void drm_mode_validate_size(struct drm_device *dev,
-+ struct list_head *mode_list,
-+ int maxX, int maxY, int maxPitch);
-+extern void drm_mode_prune_invalid(struct drm_device *dev,
-+ struct list_head *mode_list, bool verbose);
-+extern void drm_mode_sort(struct list_head *mode_list);
-+extern int drm_mode_vrefresh(struct drm_display_mode *mode);
-+extern void drm_mode_set_crtcinfo(struct drm_display_mode *p,
-+ int adjust_flags);
-+extern void drm_mode_output_list_update(struct drm_output *output);
-+
-+extern struct drm_display_mode *drm_crtc_mode_create(struct drm_device *dev);
-+extern bool drm_initial_config(struct drm_device *dev, bool cangrow);
-+extern void drm_framebuffer_set_object(struct drm_device *dev,
-+ unsigned long handle);
-+extern struct drm_framebuffer *drm_framebuffer_create(struct drm_device *dev);
-+extern void drm_framebuffer_destroy(struct drm_framebuffer *fb);
-+extern int drmfb_probe(struct drm_device *dev, struct drm_crtc *crtc);
-+extern int drmfb_remove(struct drm_device *dev, struct drm_crtc *crtc);
-+extern bool drm_crtc_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
-+ int x, int y);
-+
-+extern int drm_output_attach_property(struct drm_output *output,
-+ struct drm_property *property, int init_val);
-+extern struct drm_property *drm_property_create(struct drm_device *dev, int flags,
-+ const char *name, int num_values);
-+extern void drm_property_destroy(struct drm_device *dev, struct drm_property *property);
-+extern int drm_property_add_enum(struct drm_property *property, int index,
-+ uint32_t value, const char *name);
-+
-+/* IOCTLs */
-+extern int drm_mode_getresources(struct drm_device *dev,
-+ void *data, struct drm_file *file_priv);
-+
-+extern int drm_mode_getcrtc(struct drm_device *dev,
-+ void *data, struct drm_file *file_priv);
-+extern int drm_mode_getoutput(struct drm_device *dev,
-+ void *data, struct drm_file *file_priv);
-+extern int drm_mode_setcrtc(struct drm_device *dev,
-+ void *data, struct drm_file *file_priv);
-+extern int drm_mode_addfb(struct drm_device *dev,
-+ void *data, struct drm_file *file_priv);
-+extern int drm_mode_rmfb(struct drm_device *dev,
-+ void *data, struct drm_file *file_priv);
-+extern int drm_mode_getfb(struct drm_device *dev,
-+ void *data, struct drm_file *file_priv);
-+extern int drm_mode_addmode_ioctl(struct drm_device *dev,
-+ void *data, struct drm_file *file_priv);
-+extern int drm_mode_rmmode_ioctl(struct drm_device *dev,
-+ void *data, struct drm_file *file_priv);
-+extern int drm_mode_attachmode_ioctl(struct drm_device *dev,
-+ void *data, struct drm_file *file_priv);
-+extern int drm_mode_detachmode_ioctl(struct drm_device *dev,
-+ void *data, struct drm_file *file_priv);
-+
-+extern int drm_mode_getproperty_ioctl(struct drm_device *dev,
-+ void *data, struct drm_file *file_priv);
-+#endif /* __DRM_CRTC_H__ */
-+
-Index: linux-2.6.27/include/drm/drm_edid.h
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.27/include/drm/drm_edid.h 2009-02-05 13:29:33.000000000 +0000
-@@ -0,0 +1,179 @@
-+#ifndef __DRM_EDID_H__
-+#define __DRM_EDID_H__
-+
-+#include <linux/types.h>
-+
-+#define EDID_LENGTH 128
-+#define DDC_ADDR 0x50
-+
-+#ifdef BIG_ENDIAN
-+#error "EDID structure is little endian, need big endian versions"
-+#endif
-+
-+struct est_timings {
-+ u8 t1;
-+ u8 t2;
-+ u8 mfg_rsvd;
-+} __attribute__((packed));
-+
-+struct std_timing {
-+ u8 hsize; /* need to multiply by 8 then add 248 */
-+ u8 vfreq:6; /* need to add 60 */
-+ u8 aspect_ratio:2; /* 00=16:10, 01=4:3, 10=5:4, 11=16:9 */
-+} __attribute__((packed));
-+
-+/* If detailed data is pixel timing */
-+struct detailed_pixel_timing {
-+ u8 hactive_lo;
-+ u8 hblank_lo;
-+ u8 hblank_hi:4;
-+ u8 hactive_hi:4;
-+ u8 vactive_lo;
-+ u8 vblank_lo;
-+ u8 vblank_hi:4;
-+ u8 vactive_hi:4;
-+ u8 hsync_offset_lo;
-+ u8 hsync_pulse_width_lo;
-+ u8 vsync_pulse_width_lo:4;
-+ u8 vsync_offset_lo:4;
-+ u8 hsync_pulse_width_hi:2;
-+ u8 hsync_offset_hi:2;
-+ u8 vsync_pulse_width_hi:2;
-+ u8 vsync_offset_hi:2;
-+ u8 width_mm_lo;
-+ u8 height_mm_lo;
-+ u8 height_mm_hi:4;
-+ u8 width_mm_hi:4;
-+ u8 hborder;
-+ u8 vborder;
-+ u8 unknown0:1;
-+ u8 vsync_positive:1;
-+ u8 hsync_positive:1;
-+ u8 separate_sync:2;
-+ u8 stereo:1;
-+ u8 unknown6:1;
-+ u8 interlaced:1;
-+} __attribute__((packed));
-+
-+/* If it's not pixel timing, it'll be one of the below */
-+struct detailed_data_string {
-+ u8 str[13];
-+} __attribute__((packed));
-+
-+struct detailed_data_monitor_range {
-+ u8 min_vfreq;
-+ u8 max_vfreq;
-+ u8 min_hfreq_khz;
-+ u8 max_hfreq_khz;
-+ u8 pixel_clock_mhz; /* need to multiply by 10 */
-+ u16 sec_gtf_toggle; /* A000=use above, 20=use below */ /* FIXME: byte order */
-+ u8 hfreq_start_khz; /* need to multiply by 2 */
-+ u8 c; /* need to divide by 2 */
-+ u16 m; /* FIXME: byte order */
-+ u8 k;
-+ u8 j; /* need to divide by 2 */
-+} __attribute__((packed));
-+
-+struct detailed_data_wpindex {
-+ u8 white_y_lo:2;
-+ u8 white_x_lo:2;
-+ u8 pad:4;
-+ u8 white_x_hi;
-+ u8 white_y_hi;
-+ u8 gamma; /* need to divide by 100 then add 1 */
-+} __attribute__((packed));
-+
-+struct detailed_data_color_point {
-+ u8 windex1;
-+ u8 wpindex1[3];
-+ u8 windex2;
-+ u8 wpindex2[3];
-+} __attribute__((packed));
-+
-+struct detailed_non_pixel {
-+ u8 pad1;
-+ u8 type; /* ff=serial, fe=string, fd=monitor range, fc=monitor name
-+ fb=color point data, fa=standard timing data,
-+ f9=undefined, f8=mfg. reserved */
-+ u8 pad2;
-+ union {
-+ struct detailed_data_string str;
-+ struct detailed_data_monitor_range range;
-+ struct detailed_data_wpindex color;
-+ struct std_timing timings[5];
-+ } data;
-+} __attribute__((packed));
-+
-+#define EDID_DETAIL_STD_MODES 0xfa
-+#define EDID_DETAIL_MONITOR_CPDATA 0xfb
-+#define EDID_DETAIL_MONITOR_NAME 0xfc
-+#define EDID_DETAIL_MONITOR_RANGE 0xfd
-+#define EDID_DETAIL_MONITOR_STRING 0xfe
-+#define EDID_DETAIL_MONITOR_SERIAL 0xff
-+
-+struct detailed_timing {
-+ u16 pixel_clock; /* need to multiply by 10 KHz */ /* FIXME: byte order */
-+ union {
-+ struct detailed_pixel_timing pixel_data;
-+ struct detailed_non_pixel other_data;
-+ } data;
-+} __attribute__((packed));
-+
-+struct edid {
-+ u8 header[8];
-+ /* Vendor & product info */
-+ u16 mfg_id; /* FIXME: byte order */
-+ u16 prod_code; /* FIXME: byte order */
-+ u32 serial; /* FIXME: byte order */
-+ u8 mfg_week;
-+ u8 mfg_year;
-+ /* EDID version */
-+ u8 version;
-+ u8 revision;
-+ /* Display info: */
-+ /* input definition */
-+ u8 serration_vsync:1;
-+ u8 sync_on_green:1;
-+ u8 composite_sync:1;
-+ u8 separate_syncs:1;
-+ u8 blank_to_black:1;
-+ u8 video_level:2;
-+ u8 digital:1; /* bits below must be zero if set */
-+ u8 width_cm;
-+ u8 height_cm;
-+ u8 gamma;
-+ /* feature support */
-+ u8 default_gtf:1;
-+ u8 preferred_timing:1;
-+ u8 standard_color:1;
-+ u8 display_type:2; /* 00=mono, 01=rgb, 10=non-rgb, 11=unknown */
-+ u8 pm_active_off:1;
-+ u8 pm_suspend:1;
-+ u8 pm_standby:1;
-+ /* Color characteristics */
-+ u8 red_green_lo;
-+ u8 black_white_lo;
-+ u8 red_x;
-+ u8 red_y;
-+ u8 green_x;
-+ u8 green_y;
-+ u8 blue_x;
-+ u8 blue_y;
-+ u8 white_x;
-+ u8 white_y;
-+ /* Est. timings and mfg rsvd timings*/
-+ struct est_timings established_timings;
-+ /* Standard timings 1-8*/
-+ struct std_timing standard_timings[8];
-+ /* Detailing timings 1-4 */
-+ struct detailed_timing detailed_timings[4];
-+ /* Number of 128 byte ext. blocks */
-+ u8 extensions;
-+ /* Checksum */
-+ u8 checksum;
-+} __attribute__((packed));
-+
-+extern unsigned char *drm_ddc_read(struct i2c_adapter *adapter);
-+extern int drm_get_acpi_edid(char *method, char *edid, ssize_t length);
-+
-+#endif /* __DRM_EDID_H__ */
-Index: linux-2.6.27/include/drm/drm_objects.h
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.27/include/drm/drm_objects.h 2009-02-05 13:29:33.000000000 +0000
-@@ -0,0 +1,717 @@
-+/**************************************************************************
-+ *
-+ * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
-+ * All Rights Reserved.
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a
-+ * copy of this software and associated documentation files (the
-+ * "Software"), to deal in the Software without restriction, including
-+ * without limitation the rights to use, copy, modify, merge, publish,
-+ * distribute, sub license, and/or sell copies of the Software, and to
-+ * permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice (including the
-+ * next paragraph) shall be included in all copies or substantial portions
-+ * of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
-+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
-+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
-+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
-+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
-+ *
-+ **************************************************************************/
-+/*
-+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
-+ */
-+
-+#ifndef _DRM_OBJECTS_H
-+#define _DRM_OBJECTS_H
-+
-+struct drm_device;
-+struct drm_bo_mem_reg;
-+
-+/***************************************************
-+ * User space objects. (drm_object.c)
-+ */
-+
-+#define drm_user_object_entry(_ptr, _type, _member) container_of(_ptr, _type, _member)
-+
-+enum drm_object_type {
-+ drm_fence_type,
-+ drm_buffer_type,
-+ drm_lock_type,
-+ /*
-+ * Add other user space object types here.
-+ */
-+ drm_driver_type0 = 256,
-+ drm_driver_type1,
-+ drm_driver_type2,
-+ drm_driver_type3,
-+ drm_driver_type4
-+};
-+
-+/*
-+ * A user object is a structure that helps the drm give out user handles
-+ * to kernel internal objects and to keep track of these objects so that
-+ * they can be destroyed, for example when the user space process exits.
-+ * Designed to be accessible using a user space 32-bit handle.
-+ */
-+
-+struct drm_user_object {
-+ struct drm_hash_item hash;
-+ struct list_head list;
-+ enum drm_object_type type;
-+ atomic_t refcount;
-+ int shareable;
-+ struct drm_file *owner;
-+ void (*ref_struct_locked) (struct drm_file *priv,
-+ struct drm_user_object *obj,
-+ enum drm_ref_type ref_action);
-+ void (*unref) (struct drm_file *priv, struct drm_user_object *obj,
-+ enum drm_ref_type unref_action);
-+ void (*remove) (struct drm_file *priv, struct drm_user_object *obj);
-+};
-+
-+/*
-+ * A ref object is a structure which is used to
-+ * keep track of references to user objects and to keep track of these
-+ * references so that they can be destroyed for example when the user space
-+ * process exits. Designed to be accessible using a pointer to the _user_ object.
-+ */
-+
-+struct drm_ref_object {
-+ struct drm_hash_item hash;
-+ struct list_head list;
-+ atomic_t refcount;
-+ enum drm_ref_type unref_action;
-+};
-+
-+/**
-+ * Must be called with the struct_mutex held.
-+ */
-+
-+extern int drm_add_user_object(struct drm_file *priv, struct drm_user_object *item,
-+ int shareable);
-+/**
-+ * Must be called with the struct_mutex held.
-+ */
-+
-+extern struct drm_user_object *drm_lookup_user_object(struct drm_file *priv,
-+ uint32_t key);
-+
-+/*
-+ * Must be called with the struct_mutex held. May temporarily release it.
-+ */
-+
-+extern int drm_add_ref_object(struct drm_file *priv,
-+ struct drm_user_object *referenced_object,
-+ enum drm_ref_type ref_action);
-+
-+/*
-+ * Must be called with the struct_mutex held.
-+ */
-+
-+struct drm_ref_object *drm_lookup_ref_object(struct drm_file *priv,
-+ struct drm_user_object *referenced_object,
-+ enum drm_ref_type ref_action);
-+/*
-+ * Must be called with the struct_mutex held.
-+ * If "item" has been obtained by a call to drm_lookup_ref_object. You may not
-+ * release the struct_mutex before calling drm_remove_ref_object.
-+ * This function may temporarily release the struct_mutex.
-+ */
-+
-+extern void drm_remove_ref_object(struct drm_file *priv, struct drm_ref_object *item);
-+extern int drm_user_object_ref(struct drm_file *priv, uint32_t user_token,
-+ enum drm_object_type type,
-+ struct drm_user_object **object);
-+extern int drm_user_object_unref(struct drm_file *priv, uint32_t user_token,
-+ enum drm_object_type type);
-+
-+/***************************************************
-+ * Fence objects. (drm_fence.c)
-+ */
-+
-+struct drm_fence_object {
-+ struct drm_user_object base;
-+ struct drm_device *dev;
-+ atomic_t usage;
-+
-+ /*
-+ * The below three fields are protected by the fence manager spinlock.
-+ */
-+
-+ struct list_head ring;
-+ int fence_class;
-+ uint32_t native_types;
-+ uint32_t type;
-+ uint32_t signaled_types;
-+ uint32_t sequence;
-+ uint32_t waiting_types;
-+ uint32_t error;
-+};
-+
-+#define _DRM_FENCE_CLASSES 8
-+
-+struct drm_fence_class_manager {
-+ struct list_head ring;
-+ uint32_t pending_flush;
-+ uint32_t waiting_types;
-+ wait_queue_head_t fence_queue;
-+ uint32_t highest_waiting_sequence;
-+ uint32_t latest_queued_sequence;
-+};
-+
-+struct drm_fence_manager {
-+ int initialized;
-+ rwlock_t lock;
-+ struct drm_fence_class_manager fence_class[_DRM_FENCE_CLASSES];
-+ uint32_t num_classes;
-+ atomic_t count;
-+};
-+
-+struct drm_fence_driver {
-+ unsigned long *waiting_jiffies;
-+ uint32_t num_classes;
-+ uint32_t wrap_diff;
-+ uint32_t flush_diff;
-+ uint32_t sequence_mask;
-+
-+ /*
-+ * Driver implemented functions:
-+ * has_irq() : 1 if the hardware can update the indicated type_flags using an
-+ * irq handler. 0 if polling is required.
-+ *
-+ * emit() : Emit a sequence number to the command stream.
-+ * Return the sequence number.
-+ *
-+ * flush() : Make sure the flags indicated in fc->pending_flush will eventually
-+ * signal for fc->highest_received_sequence and all preceding sequences.
-+ * Acknowledge by clearing the flags fc->pending_flush.
-+ *
-+ * poll() : Call drm_fence_handler with any new information.
-+ *
-+ * needed_flush() : Given the current state of the fence->type flags and previusly
-+ * executed or queued flushes, return the type_flags that need flushing.
-+ *
-+ * wait(): Wait for the "mask" flags to signal on a given fence, performing
-+ * whatever's necessary to make this happen.
-+ */
-+
-+ int (*has_irq) (struct drm_device *dev, uint32_t fence_class,
-+ uint32_t flags);
-+ int (*emit) (struct drm_device *dev, uint32_t fence_class,
-+ uint32_t flags, uint32_t *breadcrumb,
-+ uint32_t *native_type);
-+ void (*flush) (struct drm_device *dev, uint32_t fence_class);
-+ void (*poll) (struct drm_device *dev, uint32_t fence_class,
-+ uint32_t types);
-+ uint32_t (*needed_flush) (struct drm_fence_object *fence);
-+ int (*wait) (struct drm_fence_object *fence, int lazy,
-+ int interruptible, uint32_t mask);
-+};
-+
-+extern int drm_fence_wait_polling(struct drm_fence_object *fence, int lazy,
-+ int interruptible, uint32_t mask,
-+ unsigned long end_jiffies);
-+extern void drm_fence_handler(struct drm_device *dev, uint32_t fence_class,
-+ uint32_t sequence, uint32_t type,
-+ uint32_t error);
-+extern void drm_fence_manager_init(struct drm_device *dev);
-+extern void drm_fence_manager_takedown(struct drm_device *dev);
-+extern void drm_fence_flush_old(struct drm_device *dev, uint32_t fence_class,
-+ uint32_t sequence);
-+extern int drm_fence_object_flush(struct drm_fence_object *fence,
-+ uint32_t type);
-+extern int drm_fence_object_signaled(struct drm_fence_object *fence,
-+ uint32_t type);
-+extern void drm_fence_usage_deref_locked(struct drm_fence_object **fence);
-+extern void drm_fence_usage_deref_unlocked(struct drm_fence_object **fence);
-+extern struct drm_fence_object *drm_fence_reference_locked(struct drm_fence_object *src);
-+extern void drm_fence_reference_unlocked(struct drm_fence_object **dst,
-+ struct drm_fence_object *src);
-+extern int drm_fence_object_wait(struct drm_fence_object *fence,
-+ int lazy, int ignore_signals, uint32_t mask);
-+extern int drm_fence_object_create(struct drm_device *dev, uint32_t type,
-+ uint32_t fence_flags, uint32_t fence_class,
-+ struct drm_fence_object **c_fence);
-+extern int drm_fence_object_emit(struct drm_fence_object *fence,
-+ uint32_t fence_flags, uint32_t class,
-+ uint32_t type);
-+extern void drm_fence_fill_arg(struct drm_fence_object *fence,
-+ struct drm_fence_arg *arg);
-+
-+extern int drm_fence_add_user_object(struct drm_file *priv,
-+ struct drm_fence_object *fence,
-+ int shareable);
-+
-+extern int drm_fence_create_ioctl(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv);
-+extern int drm_fence_destroy_ioctl(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv);
-+extern int drm_fence_reference_ioctl(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv);
-+extern int drm_fence_unreference_ioctl(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv);
-+extern int drm_fence_signaled_ioctl(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv);
-+extern int drm_fence_flush_ioctl(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv);
-+extern int drm_fence_wait_ioctl(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv);
-+extern int drm_fence_emit_ioctl(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv);
-+extern int drm_fence_buffers_ioctl(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv);
-+/**************************************************
-+ *TTMs
-+ */
-+
-+/*
-+ * The ttm backend GTT interface. (In our case AGP).
-+ * Any similar type of device (PCIE?)
-+ * needs only to implement these functions to be usable with the TTM interface.
-+ * The AGP backend implementation lives in drm_agpsupport.c
-+ * basically maps these calls to available functions in agpgart.
-+ * Each drm device driver gets an
-+ * additional function pointer that creates these types,
-+ * so that the device can choose the correct aperture.
-+ * (Multiple AGP apertures, etc.)
-+ * Most device drivers will let this point to the standard AGP implementation.
-+ */
-+
-+#define DRM_BE_FLAG_NEEDS_FREE 0x00000001
-+#define DRM_BE_FLAG_BOUND_CACHED 0x00000002
-+
-+struct drm_ttm_backend;
-+struct drm_ttm_backend_func {
-+ int (*needs_ub_cache_adjust) (struct drm_ttm_backend *backend);
-+ int (*populate) (struct drm_ttm_backend *backend,
-+ unsigned long num_pages, struct page **pages);
-+ void (*clear) (struct drm_ttm_backend *backend);
-+ int (*bind) (struct drm_ttm_backend *backend,
-+ struct drm_bo_mem_reg *bo_mem);
-+ int (*unbind) (struct drm_ttm_backend *backend);
-+ void (*destroy) (struct drm_ttm_backend *backend);
-+};
-+
-+
-+struct drm_ttm_backend {
-+ struct drm_device *dev;
-+ uint32_t flags;
-+ struct drm_ttm_backend_func *func;
-+};
-+
-+struct drm_ttm {
-+ struct page *dummy_read_page;
-+ struct page **pages;
-+ uint32_t page_flags;
-+ unsigned long num_pages;
-+ atomic_t vma_count;
-+ struct drm_device *dev;
-+ int destroy;
-+ uint32_t mapping_offset;
-+ struct drm_ttm_backend *be;
-+ enum {
-+ ttm_bound,
-+ ttm_evicted,
-+ ttm_unbound,
-+ ttm_unpopulated,
-+ } state;
-+
-+};
-+
-+extern struct drm_ttm *drm_ttm_init(struct drm_device *dev, unsigned long size);
-+extern int drm_bind_ttm(struct drm_ttm *ttm, struct drm_bo_mem_reg *bo_mem);
-+extern void drm_ttm_unbind(struct drm_ttm *ttm);
-+extern void drm_ttm_evict(struct drm_ttm *ttm);
-+extern void drm_ttm_fixup_caching(struct drm_ttm *ttm);
-+extern struct page *drm_ttm_get_page(struct drm_ttm *ttm, int index);
-+extern void drm_ttm_cache_flush(void);
-+extern int drm_ttm_populate(struct drm_ttm *ttm);
-+extern int drm_ttm_set_user(struct drm_ttm *ttm,
-+ struct task_struct *tsk,
-+ int write,
-+ unsigned long start,
-+ unsigned long num_pages,
-+ struct page *dummy_read_page);
-+unsigned long drm_ttm_size(struct drm_device *dev,
-+ unsigned long num_pages,
-+ int user_bo);
-+
-+
-+/*
-+ * Destroy a ttm. The user normally calls drmRmMap or a similar IOCTL to do
-+ * this which calls this function iff there are no vmas referencing it anymore.
-+ * Otherwise it is called when the last vma exits.
-+ */
-+
-+extern int drm_destroy_ttm(struct drm_ttm *ttm);
-+
-+#define DRM_FLAG_MASKED(_old, _new, _mask) {\
-+(_old) ^= (((_old) ^ (_new)) & (_mask)); \
-+}
-+
-+#define DRM_TTM_MASK_FLAGS ((1 << PAGE_SHIFT) - 1)
-+#define DRM_TTM_MASK_PFN (0xFFFFFFFFU - DRM_TTM_MASK_FLAGS)
-+
-+/*
-+ * Page flags.
-+ */
-+
-+#define DRM_TTM_PAGE_UNCACHED (1 << 0)
-+#define DRM_TTM_PAGE_USED (1 << 1)
-+#define DRM_TTM_PAGE_BOUND (1 << 2)
-+#define DRM_TTM_PAGE_PRESENT (1 << 3)
-+#define DRM_TTM_PAGE_VMALLOC (1 << 4)
-+#define DRM_TTM_PAGE_USER (1 << 5)
-+#define DRM_TTM_PAGE_USER_WRITE (1 << 6)
-+#define DRM_TTM_PAGE_USER_DIRTY (1 << 7)
-+#define DRM_TTM_PAGE_USER_DMA (1 << 8)
-+
-+/***************************************************
-+ * Buffer objects. (drm_bo.c, drm_bo_move.c)
-+ */
-+
-+struct drm_bo_mem_reg {
-+ struct drm_mm_node *mm_node;
-+ unsigned long size;
-+ unsigned long num_pages;
-+ uint32_t page_alignment;
-+ uint32_t mem_type;
-+ uint64_t flags;
-+ uint64_t mask;
-+ uint32_t desired_tile_stride;
-+ uint32_t hw_tile_stride;
-+};
-+
-+enum drm_bo_type {
-+ drm_bo_type_dc,
-+ drm_bo_type_user,
-+ drm_bo_type_kernel, /* for initial kernel allocations */
-+};
-+
-+struct drm_buffer_object {
-+ struct drm_device *dev;
-+ struct drm_user_object base;
-+
-+ /*
-+ * If there is a possibility that the usage variable is zero,
-+ * then dev->struct_mutext should be locked before incrementing it.
-+ */
-+
-+ atomic_t usage;
-+ unsigned long buffer_start;
-+ enum drm_bo_type type;
-+ unsigned long offset;
-+ atomic_t mapped;
-+ struct drm_bo_mem_reg mem;
-+
-+ struct list_head lru;
-+ struct list_head ddestroy;
-+
-+ uint32_t fence_type;
-+ uint32_t fence_class;
-+ uint32_t new_fence_type;
-+ uint32_t new_fence_class;
-+ struct drm_fence_object *fence;
-+ uint32_t priv_flags;
-+ wait_queue_head_t event_queue;
-+ struct mutex mutex;
-+ unsigned long num_pages;
-+ unsigned long reserved_size;
-+
-+ /* For pinned buffers */
-+ struct drm_mm_node *pinned_node;
-+ uint32_t pinned_mem_type;
-+ struct list_head pinned_lru;
-+
-+ /* For vm */
-+ struct drm_ttm *ttm;
-+ struct drm_map_list map_list;
-+ uint32_t memory_type;
-+ unsigned long bus_offset;
-+ uint32_t vm_flags;
-+ void *iomap;
-+
-+#ifdef DRM_ODD_MM_COMPAT
-+ /* dev->struct_mutex only protected. */
-+ struct list_head vma_list;
-+ struct list_head p_mm_list;
-+#endif
-+
-+};
-+
-+#define _DRM_BO_FLAG_UNFENCED 0x00000001
-+#define _DRM_BO_FLAG_EVICTED 0x00000002
-+
-+struct drm_mem_type_manager {
-+ int has_type;
-+ int use_type;
-+ struct drm_mm manager;
-+ struct list_head lru;
-+ struct list_head pinned;
-+ uint32_t flags;
-+ uint32_t drm_bus_maptype;
-+ unsigned long gpu_offset;
-+ unsigned long io_offset;
-+ unsigned long io_size;
-+ void *io_addr;
-+};
-+
-+struct drm_bo_lock {
-+ struct drm_user_object base;
-+ wait_queue_head_t queue;
-+ atomic_t write_lock_pending;
-+ atomic_t readers;
-+};
-+
-+#define _DRM_FLAG_MEMTYPE_FIXED 0x00000001 /* Fixed (on-card) PCI memory */
-+#define _DRM_FLAG_MEMTYPE_MAPPABLE 0x00000002 /* Memory mappable */
-+#define _DRM_FLAG_MEMTYPE_CACHED 0x00000004 /* Cached binding */
-+#define _DRM_FLAG_NEEDS_IOREMAP 0x00000008 /* Fixed memory needs ioremap
-+ before kernel access. */
-+#define _DRM_FLAG_MEMTYPE_CMA 0x00000010 /* Can't map aperture */
-+#define _DRM_FLAG_MEMTYPE_CSELECT 0x00000020 /* Select caching */
-+
-+struct drm_buffer_manager {
-+ struct drm_bo_lock bm_lock;
-+ struct mutex evict_mutex;
-+ int nice_mode;
-+ int initialized;
-+ struct drm_file *last_to_validate;
-+ struct drm_mem_type_manager man[DRM_BO_MEM_TYPES];
-+ struct list_head unfenced;
-+ struct list_head ddestroy;
-+ struct delayed_work wq;
-+ uint32_t fence_type;
-+ unsigned long cur_pages;
-+ atomic_t count;
-+ struct page *dummy_read_page;
-+};
-+
-+struct drm_bo_driver {
-+ const uint32_t *mem_type_prio;
-+ const uint32_t *mem_busy_prio;
-+ uint32_t num_mem_type_prio;
-+ uint32_t num_mem_busy_prio;
-+ struct drm_ttm_backend *(*create_ttm_backend_entry)
-+ (struct drm_device *dev);
-+ int (*backend_size) (struct drm_device *dev,
-+ unsigned long num_pages);
-+ int (*fence_type) (struct drm_buffer_object *bo, uint32_t *fclass,
-+ uint32_t *type);
-+ int (*invalidate_caches) (struct drm_device *dev, uint64_t flags);
-+ int (*init_mem_type) (struct drm_device *dev, uint32_t type,
-+ struct drm_mem_type_manager *man);
-+ uint32_t(*evict_mask) (struct drm_buffer_object *bo);
-+ int (*move) (struct drm_buffer_object *bo,
-+ int evict, int no_wait, struct drm_bo_mem_reg *new_mem);
-+ void (*ttm_cache_flush)(struct drm_ttm *ttm);
-+
-+ /*
-+ * command_stream_barrier
-+ *
-+ * @dev: The drm device.
-+ *
-+ * @bo: The buffer object to validate.
-+ *
-+ * @new_fence_class: The new fence class for the buffer object.
-+ *
-+ * @new_fence_type: The new fence type for the buffer object.
-+ *
-+ * @no_wait: whether this should give up and return -EBUSY
-+ * if this operation would require sleeping
-+ *
-+ * Insert a command stream barrier that makes sure that the
-+ * buffer is idle once the commands associated with the
-+ * current validation are starting to execute. If an error
-+ * condition is returned, or the function pointer is NULL,
-+ * the drm core will force buffer idle
-+ * during validation.
-+ */
-+
-+ int (*command_stream_barrier) (struct drm_buffer_object *bo,
-+ uint32_t new_fence_class,
-+ uint32_t new_fence_type,
-+ int no_wait);
-+};
-+
-+/*
-+ * buffer objects (drm_bo.c)
-+ */
-+extern int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
-+extern int drm_bo_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
-+extern int drm_bo_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
-+extern int drm_bo_unmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
-+extern int drm_bo_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
-+extern int drm_bo_set_pin(struct drm_device *dev, struct drm_buffer_object *bo, int pin);
-+extern int drm_bo_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
-+extern int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
-+extern int drm_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
-+extern int drm_bo_setstatus_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
-+extern int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
-+extern int drm_mm_takedown_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
-+extern int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
-+extern int drm_mm_unlock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
-+extern int drm_bo_version_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
-+extern int drm_bo_driver_finish(struct drm_device *dev);
-+extern int drm_bo_driver_init(struct drm_device *dev);
-+extern int drm_bo_pci_offset(struct drm_device *dev,
-+ struct drm_bo_mem_reg *mem,
-+ unsigned long *bus_base,
-+ unsigned long *bus_offset,
-+ unsigned long *bus_size);
-+extern int drm_mem_reg_is_pci(struct drm_device *dev, struct drm_bo_mem_reg *mem);
-+
-+extern void drm_bo_usage_deref_locked(struct drm_buffer_object **bo);
-+extern void drm_bo_usage_deref_unlocked(struct drm_buffer_object **bo);
-+extern void drm_putback_buffer_objects(struct drm_device *dev);
-+extern int drm_fence_buffer_objects(struct drm_device *dev,
-+ struct list_head *list,
-+ uint32_t fence_flags,
-+ struct drm_fence_object *fence,
-+ struct drm_fence_object **used_fence);
-+extern void drm_bo_add_to_lru(struct drm_buffer_object *bo);
-+extern int drm_buffer_object_create(struct drm_device *dev, unsigned long size,
-+ enum drm_bo_type type, uint64_t mask,
-+ uint32_t hint, uint32_t page_alignment,
-+ unsigned long buffer_start,
-+ struct drm_buffer_object **bo);
-+extern int drm_bo_wait(struct drm_buffer_object *bo, int lazy, int ignore_signals,
-+ int no_wait);
-+extern int drm_bo_mem_space(struct drm_buffer_object *bo,
-+ struct drm_bo_mem_reg *mem, int no_wait);
-+extern int drm_bo_move_buffer(struct drm_buffer_object *bo,
-+ uint64_t new_mem_flags,
-+ int no_wait, int move_unfenced);
-+extern int drm_bo_clean_mm(struct drm_device *dev, unsigned mem_type);
-+extern int drm_bo_init_mm(struct drm_device *dev, unsigned type,
-+ unsigned long p_offset, unsigned long p_size);
-+extern int drm_bo_handle_validate(struct drm_file *file_priv, uint32_t handle,
-+ uint32_t fence_class, uint64_t flags,
-+ uint64_t mask, uint32_t hint,
-+ int use_old_fence_class,
-+ struct drm_bo_info_rep *rep,
-+ struct drm_buffer_object **bo_rep);
-+extern struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file *file_priv,
-+ uint32_t handle,
-+ int check_owner);
-+extern int drm_bo_do_validate(struct drm_buffer_object *bo,
-+ uint64_t flags, uint64_t mask, uint32_t hint,
-+ uint32_t fence_class,
-+ int no_wait,
-+ struct drm_bo_info_rep *rep);
-+extern void drm_bo_fill_rep_arg(struct drm_buffer_object *bo,
-+ struct drm_bo_info_rep *rep);
-+/*
-+ * Buffer object memory move- and map helpers.
-+ * drm_bo_move.c
-+ */
-+
-+extern int drm_bo_move_ttm(struct drm_buffer_object *bo,
-+ int evict, int no_wait,
-+ struct drm_bo_mem_reg *new_mem);
-+extern int drm_bo_move_memcpy(struct drm_buffer_object *bo,
-+ int evict,
-+ int no_wait, struct drm_bo_mem_reg *new_mem);
-+extern int drm_bo_move_accel_cleanup(struct drm_buffer_object *bo,
-+ int evict, int no_wait,
-+ uint32_t fence_class, uint32_t fence_type,
-+ uint32_t fence_flags,
-+ struct drm_bo_mem_reg *new_mem);
-+extern int drm_bo_same_page(unsigned long offset, unsigned long offset2);
-+extern unsigned long drm_bo_offset_end(unsigned long offset,
-+ unsigned long end);
-+
-+struct drm_bo_kmap_obj {
-+ void *virtual;
-+ struct page *page;
-+ enum {
-+ bo_map_iomap,
-+ bo_map_vmap,
-+ bo_map_kmap,
-+ bo_map_premapped,
-+ } bo_kmap_type;
-+};
-+
-+static inline void *drm_bmo_virtual(struct drm_bo_kmap_obj *map, int *is_iomem)
-+{
-+ *is_iomem = (map->bo_kmap_type == bo_map_iomap ||
-+ map->bo_kmap_type == bo_map_premapped);
-+ return map->virtual;
-+}
-+extern void drm_bo_kunmap(struct drm_bo_kmap_obj *map);
-+extern int drm_bo_kmap(struct drm_buffer_object *bo, unsigned long start_page,
-+ unsigned long num_pages, struct drm_bo_kmap_obj *map);
-+
-+
-+/*
-+ * drm_regman.c
-+ */
-+
-+struct drm_reg {
-+ struct list_head head;
-+ struct drm_fence_object *fence;
-+ uint32_t fence_type;
-+ uint32_t new_fence_type;
-+};
-+
-+struct drm_reg_manager {
-+ struct list_head free;
-+ struct list_head lru;
-+ struct list_head unfenced;
-+
-+ int (*reg_reusable)(const struct drm_reg *reg, const void *data);
-+ void (*reg_destroy)(struct drm_reg *reg);
-+};
-+
-+extern int drm_regs_alloc(struct drm_reg_manager *manager,
-+ const void *data,
-+ uint32_t fence_class,
-+ uint32_t fence_type,
-+ int interruptible,
-+ int no_wait,
-+ struct drm_reg **reg);
-+
-+extern void drm_regs_fence(struct drm_reg_manager *regs,
-+ struct drm_fence_object *fence);
-+
-+extern void drm_regs_free(struct drm_reg_manager *manager);
-+extern void drm_regs_add(struct drm_reg_manager *manager, struct drm_reg *reg);
-+extern void drm_regs_init(struct drm_reg_manager *manager,
-+ int (*reg_reusable)(const struct drm_reg *,
-+ const void *),
-+ void (*reg_destroy)(struct drm_reg *));
-+
-+extern int drm_mem_reg_ioremap(struct drm_device *dev, struct drm_bo_mem_reg * mem,
-+ void **virtual);
-+extern void drm_mem_reg_iounmap(struct drm_device *dev, struct drm_bo_mem_reg * mem,
-+ void *virtual);
-+/*
-+ * drm_bo_lock.c
-+ * Simple replacement for the hardware lock on buffer manager init and clean.
-+ */
-+
-+
-+extern void drm_bo_init_lock(struct drm_bo_lock *lock);
-+extern void drm_bo_read_unlock(struct drm_bo_lock *lock);
-+extern int drm_bo_read_lock(struct drm_bo_lock *lock);
-+extern int drm_bo_write_lock(struct drm_bo_lock *lock,
-+ struct drm_file *file_priv);
-+
-+extern int drm_bo_write_unlock(struct drm_bo_lock *lock,
-+ struct drm_file *file_priv);
-+
-+#ifdef CONFIG_DEBUG_MUTEXES
-+#define DRM_ASSERT_LOCKED(_mutex) \
-+ BUG_ON(!mutex_is_locked(_mutex) || \
-+ ((_mutex)->owner != current_thread_info()))
-+#else
-+#define DRM_ASSERT_LOCKED(_mutex)
-+#endif
-+#endif
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.28+2.6.29-rc2/i915_split.patch b/meta-moblin/packages/linux/linux-moblin-2.6.28+2.6.29-rc2/i915_split.patch
deleted file mode 100644
index 1841a681d..000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.28+2.6.29-rc2/i915_split.patch
+++ /dev/null
@@ -1,1627 +0,0 @@
-Index: linux-2.6.28/drivers/gpu/drm/i915/intel_tv.c
-===================================================================
---- linux-2.6.28.orig/drivers/gpu/drm/i915/intel_tv.c 2009-02-19 12:59:22.000000000 +0000
-+++ linux-2.6.28/drivers/gpu/drm/i915/intel_tv.c 2009-02-19 12:59:28.000000000 +0000
-@@ -902,7 +902,7 @@
- intel_tv_dpms(struct drm_encoder *encoder, int mode)
- {
- struct drm_device *dev = encoder->dev;
-- struct drm_i915_private *dev_priv = dev->dev_private;
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
-
- switch(mode) {
- case DRM_MODE_DPMS_ON:
-@@ -920,7 +920,7 @@
- intel_tv_save(struct drm_connector *connector)
- {
- struct drm_device *dev = connector->dev;
-- struct drm_i915_private *dev_priv = dev->dev_private;
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_tv_priv *tv_priv = intel_output->dev_priv;
- int i;
-@@ -970,7 +970,7 @@
- intel_tv_restore(struct drm_connector *connector)
- {
- struct drm_device *dev = connector->dev;
-- struct drm_i915_private *dev_priv = dev->dev_private;
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_tv_priv *tv_priv = intel_output->dev_priv;
- struct drm_crtc *crtc = connector->encoder->crtc;
-@@ -1117,7 +1117,7 @@
- struct drm_display_mode *adjusted_mode)
- {
- struct drm_device *dev = encoder->dev;
-- struct drm_i915_private *dev_priv = dev->dev_private;
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
- struct drm_crtc *crtc = encoder->crtc;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_output *intel_output = enc_to_intel_output(encoder);
-@@ -1362,6 +1362,7 @@
- struct drm_encoder *encoder = &intel_output->enc;
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
- unsigned long irqflags;
- u32 tv_ctl, save_tv_ctl;
- u32 tv_dac, save_tv_dac;
-@@ -1626,6 +1627,7 @@
- intel_tv_init(struct drm_device *dev)
- {
- struct drm_i915_private *dev_priv = dev->dev_private;
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
- struct drm_connector *connector;
- struct intel_output *intel_output;
- struct intel_tv_priv *tv_priv;
-Index: linux-2.6.28/drivers/gpu/drm/i915/intel_modes.c
-===================================================================
---- linux-2.6.28.orig/drivers/gpu/drm/i915/intel_modes.c 2009-02-19 12:59:22.000000000 +0000
-+++ linux-2.6.28/drivers/gpu/drm/i915/intel_modes.c 2009-02-19 12:59:28.000000000 +0000
-@@ -81,3 +81,6 @@
-
- return ret;
- }
-+EXPORT_SYMBOL(intel_ddc_get_modes);
-+
-+MODULE_LICENSE("GPL and additional rights");
-Index: linux-2.6.28/drivers/gpu/drm/i915/intel_i2c.c
-===================================================================
---- linux-2.6.28.orig/drivers/gpu/drm/i915/intel_i2c.c 2009-02-19 12:59:22.000000000 +0000
-+++ linux-2.6.28/drivers/gpu/drm/i915/intel_i2c.c 2009-02-20 14:50:20.000000000 +0000
-@@ -43,7 +43,7 @@
- static int get_clock(void *data)
- {
- struct intel_i2c_chan *chan = data;
-- struct drm_i915_private *dev_priv = chan->drm_dev->dev_private;
-+ struct drm_i915_common_private *dev_priv_common = chan->drm_dev->dev_private;
- u32 val;
-
- val = I915_READ(chan->reg);
-@@ -53,7 +53,7 @@
- static int get_data(void *data)
- {
- struct intel_i2c_chan *chan = data;
-- struct drm_i915_private *dev_priv = chan->drm_dev->dev_private;
-+ struct drm_i915_common_private *dev_priv_common = chan->drm_dev->dev_private;
- u32 val;
-
- val = I915_READ(chan->reg);
-@@ -64,7 +64,7 @@
- {
- struct intel_i2c_chan *chan = data;
- struct drm_device *dev = chan->drm_dev;
-- struct drm_i915_private *dev_priv = chan->drm_dev->dev_private;
-+ struct drm_i915_common_private *dev_priv_common = chan->drm_dev->dev_private;
- u32 reserved = 0, clock_bits;
-
- /* On most chips, these bits must be preserved in software. */
-@@ -85,7 +85,7 @@
- {
- struct intel_i2c_chan *chan = data;
- struct drm_device *dev = chan->drm_dev;
-- struct drm_i915_private *dev_priv = chan->drm_dev->dev_private;
-+ struct drm_i915_common_private *dev_priv_common = chan->drm_dev->dev_private;
- u32 reserved = 0, data_bits;
-
- /* On most chips, these bits must be preserved in software. */
-@@ -167,6 +167,7 @@
- kfree(chan);
- return NULL;
- }
-+EXPORT_SYMBOL(intel_i2c_create);
-
- /**
- * intel_i2c_destroy - unregister and free i2c bus resources
-@@ -182,3 +183,4 @@
- i2c_del_adapter(&chan->adapter);
- kfree(chan);
- }
-+EXPORT_SYMBOL(intel_i2c_destroy);
-Index: linux-2.6.28/drivers/gpu/drm/i915/intel_dvo.c
-===================================================================
---- linux-2.6.28.orig/drivers/gpu/drm/i915/intel_dvo.c 2009-02-19 12:59:22.000000000 +0000
-+++ linux-2.6.28/drivers/gpu/drm/i915/intel_dvo.c 2009-02-19 15:14:20.000000000 +0000
-@@ -78,7 +78,7 @@
-
- static void intel_dvo_dpms(struct drm_encoder *encoder, int mode)
- {
-- struct drm_i915_private *dev_priv = encoder->dev->dev_private;
-+ struct drm_i915_common_private *dev_priv_common = encoder->dev->dev_private;
- struct intel_output *intel_output = enc_to_intel_output(encoder);
- struct intel_dvo_device *dvo = intel_output->dev_priv;
- u32 dvo_reg = dvo->dvo_reg;
-@@ -98,15 +98,16 @@
- static void intel_dvo_save(struct drm_connector *connector)
- {
- struct drm_i915_private *dev_priv = connector->dev->dev_private;
-+ struct drm_i915_common_private *dev_priv_common = connector->dev->dev_private;
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_dvo_device *dvo = intel_output->dev_priv;
-
- /* Each output should probably just save the registers it touches,
- * but for now, use more overkill.
- */
-- dev_priv->saveDVOA = I915_READ(DVOA);
-- dev_priv->saveDVOB = I915_READ(DVOB);
-- dev_priv->saveDVOC = I915_READ(DVOC);
-+ dev_priv->common.saveDVOA = I915_READ(DVOA);
-+ dev_priv->common.saveDVOB = I915_READ(DVOB);
-+ dev_priv->common.saveDVOC = I915_READ(DVOC);
-
- dvo->dev_ops->save(dvo);
- }
-@@ -114,14 +115,15 @@
- static void intel_dvo_restore(struct drm_connector *connector)
- {
- struct drm_i915_private *dev_priv = connector->dev->dev_private;
-+ struct drm_i915_common_private *dev_priv_common = connector->dev->dev_private;
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_dvo_device *dvo = intel_output->dev_priv;
-
- dvo->dev_ops->restore(dvo);
-
-- I915_WRITE(DVOA, dev_priv->saveDVOA);
-- I915_WRITE(DVOB, dev_priv->saveDVOB);
-- I915_WRITE(DVOC, dev_priv->saveDVOC);
-+ I915_WRITE(DVOA, dev_priv->common.saveDVOA);
-+ I915_WRITE(DVOB, dev_priv->common.saveDVOB);
-+ I915_WRITE(DVOC, dev_priv->common.saveDVOC);
- }
-
- static int intel_dvo_mode_valid(struct drm_connector *connector,
-@@ -183,7 +185,7 @@
- struct drm_display_mode *adjusted_mode)
- {
- struct drm_device *dev = encoder->dev;
-- struct drm_i915_private *dev_priv = dev->dev_private;
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
- struct intel_output *intel_output = enc_to_intel_output(encoder);
- struct intel_dvo_device *dvo = intel_output->dev_priv;
-@@ -349,7 +351,7 @@
- intel_dvo_get_current_mode (struct drm_connector *connector)
- {
- struct drm_device *dev = connector->dev;
-- struct drm_i915_private *dev_priv = dev->dev_private;
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_dvo_device *dvo = intel_output->dev_priv;
- uint32_t dvo_reg = dvo->dvo_reg;
-Index: linux-2.6.28/drivers/gpu/drm/i915/intel_hdmi.c
-===================================================================
---- linux-2.6.28.orig/drivers/gpu/drm/i915/intel_hdmi.c 2009-02-19 12:59:22.000000000 +0000
-+++ linux-2.6.28/drivers/gpu/drm/i915/intel_hdmi.c 2009-02-19 12:59:28.000000000 +0000
-@@ -46,7 +46,7 @@
- struct drm_display_mode *adjusted_mode)
- {
- struct drm_device *dev = encoder->dev;
-- struct drm_i915_private *dev_priv = dev->dev_private;
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
- struct drm_crtc *crtc = encoder->crtc;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_output *intel_output = enc_to_intel_output(encoder);
-@@ -71,7 +71,7 @@
- static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
- {
- struct drm_device *dev = encoder->dev;
-- struct drm_i915_private *dev_priv = dev->dev_private;
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
- struct intel_output *intel_output = enc_to_intel_output(encoder);
- struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv;
- u32 temp;
-@@ -89,7 +89,7 @@
- static void intel_hdmi_save(struct drm_connector *connector)
- {
- struct drm_device *dev = connector->dev;
-- struct drm_i915_private *dev_priv = dev->dev_private;
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv;
-
-@@ -99,7 +99,7 @@
- static void intel_hdmi_restore(struct drm_connector *connector)
- {
- struct drm_device *dev = connector->dev;
-- struct drm_i915_private *dev_priv = dev->dev_private;
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv;
-
-@@ -132,7 +132,7 @@
- intel_hdmi_detect(struct drm_connector *connector)
- {
- struct drm_device *dev = connector->dev;
-- struct drm_i915_private *dev_priv = dev->dev_private;
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv;
- u32 temp, bit;
-@@ -220,7 +220,7 @@
-
- void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
- {
-- struct drm_i915_private *dev_priv = dev->dev_private;
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
- struct drm_connector *connector;
- struct intel_output *intel_output;
- struct intel_hdmi_priv *hdmi_priv;
-Index: linux-2.6.28/drivers/gpu/drm/i915/i915_suspend.c
-===================================================================
---- linux-2.6.28.orig/drivers/gpu/drm/i915/i915_suspend.c 2009-02-19 12:59:22.000000000 +0000
-+++ linux-2.6.28/drivers/gpu/drm/i915/i915_suspend.c 2009-02-19 12:59:28.000000000 +0000
-@@ -31,7 +31,7 @@
-
- static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
- {
-- struct drm_i915_private *dev_priv = dev->dev_private;
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
-
- if (pipe == PIPE_A)
- return (I915_READ(DPLL_A) & DPLL_VCO_ENABLE);
-@@ -41,7 +41,7 @@
-
- static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
- {
-- struct drm_i915_private *dev_priv = dev->dev_private;
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
- unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B);
- u32 *array;
- int i;
-@@ -50,9 +50,9 @@
- return;
-
- if (pipe == PIPE_A)
-- array = dev_priv->save_palette_a;
-+ array = dev_priv_common->save_palette_a;
- else
-- array = dev_priv->save_palette_b;
-+ array = dev_priv_common->save_palette_b;
-
- for(i = 0; i < 256; i++)
- array[i] = I915_READ(reg + (i << 2));
-@@ -60,7 +60,7 @@
-
- static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
- {
-- struct drm_i915_private *dev_priv = dev->dev_private;
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
- unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B);
- u32 *array;
- int i;
-@@ -69,9 +69,9 @@
- return;
-
- if (pipe == PIPE_A)
-- array = dev_priv->save_palette_a;
-+ array = dev_priv_common->save_palette_a;
- else
-- array = dev_priv->save_palette_b;
-+ array = dev_priv_common->save_palette_b;
-
- for(i = 0; i < 256; i++)
- I915_WRITE(reg + (i << 2), array[i]);
-@@ -79,7 +79,7 @@
-
- static u8 i915_read_indexed(struct drm_device *dev, u16 index_port, u16 data_port, u8 reg)
- {
-- struct drm_i915_private *dev_priv = dev->dev_private;
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
-
- I915_WRITE8(index_port, reg);
- return I915_READ8(data_port);
-@@ -87,7 +87,7 @@
-
- static u8 i915_read_ar(struct drm_device *dev, u16 st01, u8 reg, u16 palette_enable)
- {
-- struct drm_i915_private *dev_priv = dev->dev_private;
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
-
- I915_READ8(st01);
- I915_WRITE8(VGA_AR_INDEX, palette_enable | reg);
-@@ -96,7 +96,7 @@
-
- static void i915_write_ar(struct drm_device *dev, u16 st01, u8 reg, u8 val, u16 palette_enable)
- {
-- struct drm_i915_private *dev_priv = dev->dev_private;
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
-
- I915_READ8(st01);
- I915_WRITE8(VGA_AR_INDEX, palette_enable | reg);
-@@ -105,7 +105,7 @@
-
- static void i915_write_indexed(struct drm_device *dev, u16 index_port, u16 data_port, u8 reg, u8 val)
- {
-- struct drm_i915_private *dev_priv = dev->dev_private;
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
-
- I915_WRITE8(index_port, reg);
- I915_WRITE8(data_port, val);
-@@ -113,7 +113,8 @@
-
- static void i915_save_vga(struct drm_device *dev)
- {
-- struct drm_i915_private *dev_priv = dev->dev_private;
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
-+ struct drm_i915_common_private *dev_priv = dev->dev_private;
- int i;
- u16 cr_index, cr_data, st01;
-
-@@ -176,7 +177,8 @@
-
- static void i915_restore_vga(struct drm_device *dev)
- {
-- struct drm_i915_private *dev_priv = dev->dev_private;
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
-+ struct drm_i915_common_private *dev_priv = dev->dev_private;
- int i;
- u16 cr_index, cr_data, st01;
-
-@@ -235,7 +237,8 @@
-
- int i915_save_state(struct drm_device *dev)
- {
-- struct drm_i915_private *dev_priv = dev->dev_private;
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
-+ struct drm_i915_common_private *dev_priv = dev->dev_private;
- int i;
-
- pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
-@@ -367,7 +370,8 @@
-
- int i915_restore_state(struct drm_device *dev)
- {
-- struct drm_i915_private *dev_priv = dev->dev_private;
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
-+ struct drm_i915_common_private *dev_priv = dev->dev_private;
- int i;
-
- pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB);
-Index: linux-2.6.28/drivers/gpu/drm/i915/i915_opregion.c
-===================================================================
---- linux-2.6.28.orig/drivers/gpu/drm/i915/i915_opregion.c 2009-02-19 12:59:22.000000000 +0000
-+++ linux-2.6.28/drivers/gpu/drm/i915/i915_opregion.c 2009-02-19 12:59:28.000000000 +0000
-@@ -139,6 +139,7 @@
- static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
- {
- struct drm_i915_private *dev_priv = dev->dev_private;
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
- struct opregion_asle *asle = dev_priv->opregion.asle;
- u32 blc_pwm_ctl, blc_pwm_ctl2;
-
-@@ -172,7 +173,8 @@
-
- static u32 asle_set_pwm_freq(struct drm_device *dev, u32 pfmb)
- {
-- struct drm_i915_private *dev_priv = dev->dev_private;
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
-+
- if (pfmb & ASLE_PFMB_PWM_VALID) {
- u32 blc_pwm_ctl = I915_READ(BLC_PWM_CTL);
- u32 pwm = pfmb & ASLE_PFMB_PWM_MASK;
-Index: linux-2.6.28/drivers/gpu/drm/i915/i915_gem.c
-===================================================================
---- linux-2.6.28.orig/drivers/gpu/drm/i915/i915_gem.c 2009-02-19 12:59:22.000000000 +0000
-+++ linux-2.6.28/drivers/gpu/drm/i915/i915_gem.c 2009-02-19 12:59:28.000000000 +0000
-@@ -877,6 +877,7 @@
- i915_add_request(struct drm_device *dev, uint32_t flush_domains)
- {
- drm_i915_private_t *dev_priv = dev->dev_private;
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
- struct drm_i915_gem_request *request;
- uint32_t seqno;
- int was_empty;
-@@ -942,6 +943,7 @@
- static uint32_t
- i915_retire_commands(struct drm_device *dev)
- {
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
- drm_i915_private_t *dev_priv = dev->dev_private;
- uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
- uint32_t flush_domains = 0;
-@@ -1049,12 +1051,14 @@
- void
- i915_gem_retire_work_handler(struct work_struct *work)
- {
-+ struct drm_i915_common_private *dev_priv_common;
- drm_i915_private_t *dev_priv;
- struct drm_device *dev;
-
- dev_priv = container_of(work, drm_i915_private_t,
- mm.retire_work.work);
- dev = dev_priv->dev;
-+ dev_priv_common = dev->dev_private;
-
- mutex_lock(&dev->struct_mutex);
- i915_gem_retire_requests(dev);
-@@ -1109,6 +1113,7 @@
- uint32_t invalidate_domains,
- uint32_t flush_domains)
- {
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
- drm_i915_private_t *dev_priv = dev->dev_private;
- uint32_t cmd;
- RING_LOCALS;
-@@ -1422,7 +1427,7 @@
- {
- struct drm_gem_object *obj = reg->obj;
- struct drm_device *dev = obj->dev;
-- drm_i915_private_t *dev_priv = dev->dev_private;
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
- int regnum = obj_priv->fence_reg;
- uint64_t val;
-@@ -1442,8 +1447,8 @@
- {
- struct drm_gem_object *obj = reg->obj;
- struct drm_device *dev = obj->dev;
-- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
- int regnum = obj_priv->fence_reg;
- uint32_t val;
- uint32_t pitch_val;
-@@ -1475,7 +1480,7 @@
- {
- struct drm_gem_object *obj = reg->obj;
- struct drm_device *dev = obj->dev;
-- drm_i915_private_t *dev_priv = dev->dev_private;
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
- int regnum = obj_priv->fence_reg;
- uint32_t val;
-@@ -1605,6 +1610,7 @@
- {
- struct drm_device *dev = obj->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
-
- if (IS_I965G(dev))
-@@ -2327,6 +2333,7 @@
- uint64_t exec_offset)
- {
- drm_i915_private_t *dev_priv = dev->dev_private;
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
- struct drm_clip_rect __user *boxes = (struct drm_clip_rect __user *)
- (uintptr_t) exec->cliprects_ptr;
- int nbox = exec->num_cliprects;
-@@ -3035,6 +3042,7 @@
- i915_gem_init_hws(struct drm_device *dev)
- {
- drm_i915_private_t *dev_priv = dev->dev_private;
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
- int ret;
-@@ -3081,6 +3089,7 @@
- i915_gem_init_ringbuffer(struct drm_device *dev)
- {
- drm_i915_private_t *dev_priv = dev->dev_private;
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
- drm_i915_ring_buffer_t *ring = &dev_priv->ring;
-@@ -3186,6 +3195,7 @@
- void
- i915_gem_cleanup_ringbuffer(struct drm_device *dev)
- {
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
- drm_i915_private_t *dev_priv = dev->dev_private;
-
- if (dev_priv->ring.ring_obj == NULL)
-Index: linux-2.6.28/drivers/gpu/drm/i915/i915_gem_proc.c
-===================================================================
---- linux-2.6.28.orig/drivers/gpu/drm/i915/i915_gem_proc.c 2009-02-19 12:59:22.000000000 +0000
-+++ linux-2.6.28/drivers/gpu/drm/i915/i915_gem_proc.c 2009-02-19 12:59:28.000000000 +0000
-@@ -213,6 +213,7 @@
- struct drm_minor *minor = (struct drm_minor *) data;
- struct drm_device *dev = minor->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
- int len = 0;
-
- if (offset > DRM_PROC_LIMIT) {
-Index: linux-2.6.28/drivers/gpu/drm/i915/i915_gem_tiling.c
-===================================================================
---- linux-2.6.28.orig/drivers/gpu/drm/i915/i915_gem_tiling.c 2009-02-19 12:59:22.000000000 +0000
-+++ linux-2.6.28/drivers/gpu/drm/i915/i915_gem_tiling.c 2009-02-19 12:59:28.000000000 +0000
-@@ -87,6 +87,7 @@
- i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
- {
- drm_i915_private_t *dev_priv = dev->dev_private;
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
- uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
- uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
-
-Index: linux-2.6.28/drivers/gpu/drm/i915/i915_irq.c
-===================================================================
---- linux-2.6.28.orig/drivers/gpu/drm/i915/i915_irq.c 2009-02-19 12:59:22.000000000 +0000
-+++ linux-2.6.28/drivers/gpu/drm/i915/i915_irq.c 2009-02-20 14:53:08.000000000 +0000
-@@ -64,6 +64,8 @@
- void
- i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
- {
-+ struct drm_i915_common_private *dev_priv_common = (struct drm_i915_common_private *) dev_priv;
-+
- if ((dev_priv->irq_mask_reg & mask) != 0) {
- dev_priv->irq_mask_reg &= ~mask;
- I915_WRITE(IMR, dev_priv->irq_mask_reg);
-@@ -74,6 +76,8 @@
- static inline void
- i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
- {
-+ struct drm_i915_common_private *dev_priv_common = (struct drm_i915_common_private *) dev_priv;
-+
- if ((dev_priv->irq_mask_reg & mask) != mask) {
- dev_priv->irq_mask_reg |= mask;
- I915_WRITE(IMR, dev_priv->irq_mask_reg);
-@@ -94,6 +98,8 @@
- void
- i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
- {
-+ struct drm_i915_common_private *dev_priv_common = (struct drm_i915_common_private *) dev_priv;
-+
- if ((dev_priv->pipestat[pipe] & mask) != mask) {
- u32 reg = i915_pipestat(pipe);
-
-@@ -107,6 +113,8 @@
- void
- i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
- {
-+ struct drm_i915_common_private *dev_priv_common = (struct drm_i915_common_private *) dev_priv;
-+
- if ((dev_priv->pipestat[pipe] & mask) != 0) {
- u32 reg = i915_pipestat(pipe);
-
-@@ -128,7 +136,7 @@
- static int
- i915_pipe_enabled(struct drm_device *dev, int pipe)
- {
-- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
- unsigned long pipeconf = pipe ? PIPEBCONF : PIPEACONF;
-
- if (I915_READ(pipeconf) & PIPEACONF_ENABLE)
-@@ -142,7 +150,7 @@
- */
- u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
- {
-- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
- unsigned long high_frame;
- unsigned long low_frame;
- u32 high1, high2, low, count;
-@@ -178,6 +186,7 @@
- {
- struct drm_device *dev = (struct drm_device *) arg;
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
- struct drm_i915_master_private *master_priv;
- u32 iir, new_iir;
- u32 pipea_stats, pipeb_stats;
-@@ -284,6 +293,7 @@
- static int i915_emit_irq(struct drm_device * dev)
- {
- drm_i915_private_t *dev_priv = dev->dev_private;
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
- struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
- RING_LOCALS;
-
-@@ -409,6 +419,7 @@
- */
- int i915_enable_vblank(struct drm_device *dev, int pipe)
- {
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- unsigned long irqflags;
- int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
-@@ -510,6 +521,7 @@
- */
- void i915_driver_irq_preinstall(struct drm_device * dev)
- {
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-
- atomic_set(&dev_priv->irq_received, 0);
-@@ -554,6 +566,7 @@
-
- void i915_driver_irq_uninstall(struct drm_device * dev)
- {
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-
- if (!dev_priv)
-Index: linux-2.6.28/drivers/gpu/drm/i915/Makefile
-===================================================================
---- linux-2.6.28.orig/drivers/gpu/drm/i915/Makefile 2009-02-19 12:59:23.000000000 +0000
-+++ linux-2.6.28/drivers/gpu/drm/i915/Makefile 2009-02-19 12:59:28.000000000 +0000
-@@ -9,24 +9,29 @@
- i915_gem_debug.o \
- i915_gem_proc.o \
- i915_gem_tiling.o \
-- intel_display.o \
-- intel_crt.o \
-- intel_lvds.o \
- intel_bios.o \
-- intel_hdmi.o \
-- intel_sdvo.o \
-- intel_modes.o \
-- intel_i2c.o \
- intel_fb.o \
- intel_tv.o \
-+
-+intel_gfx_common-y := \
-+ intel_display.o \
-+ intel_modes.o \
-+ intel_i2c.o \
-+ intel_crt.o \
- intel_dvo.o \
-+ intel_hdmi.o \
-+ intel_lvds.o \
-+ intel_sdvo.o \
- dvo_ch7xxx.o \
- dvo_ch7017.o \
- dvo_ivch.o \
- dvo_tfp410.o \
- dvo_sil164.o
-
-+
- i915-$(CONFIG_ACPI) += i915_opregion.o
- i915-$(CONFIG_COMPAT) += i915_ioc32.o
-
- obj-$(CONFIG_DRM_I915) += i915.o
-+
-+obj-$(CONFIG_DRM_INTEL_COMMON) += intel_gfx_common.o
-Index: linux-2.6.28/drivers/gpu/drm/i915/i915_common.h
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.28/drivers/gpu/drm/i915/i915_common.h 2009-02-20 14:49:42.000000000 +0000
-@@ -0,0 +1,184 @@
-+/*
-+ *
-+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
-+ * All Rights Reserved.
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a
-+ * copy of this software and associated documentation files (the
-+ * "Software"), to deal in the Software without restriction, including
-+ * without limitation the rights to use, copy, modify, merge, publish,
-+ * distribute, sub license, and/or sell copies of the Software, and to
-+ * permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice (including the
-+ * next paragraph) shall be included in all copies or substantial portions
-+ * of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
-+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
-+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
-+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-+ *
-+ */
-+
-+#ifndef _I915_COMMON_H_
-+#define _I915_COMMON_H_
-+
-+typedef struct drm_i915_common_private {
-+ //struct drm_device *dev;
-+
-+ void __iomem *regs;
-+
-+ //drm_dma_handle_t *status_page_dmah;
-+ //void *hw_status_page;
-+ //dma_addr_t dma_status_page;
-+ //uint32_t counter;
-+ //unsigned int status_gfx_addr;
-+ //drm_local_map_t hws_map;
-+ //struct drm_gem_object *hws_obj;
-+
-+ //unsigned int cpp;
-+ //int back_offset;
-+ //int front_offset;
-+ //int current_page;
-+ //int page_flipping;
-+
-+ //wait_queue_head_t irq_queue;
-+ //atomic_t irq_received;
-+ /** Protects user_irq_refcount and irq_mask_reg */
-+ //spinlock_t user_irq_lock;
-+ /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */
-+ //int user_irq_refcount;
-+ /** Cached value of IMR to avoid reads in updating the bitfield */
-+ //u32 irq_mask_reg;
-+ //u32 pipestat[2];
-+
-+ //int tex_lru_log_granularity;
-+ //int allow_batchbuffer;
-+ //struct mem_block *agp_heap;
-+ //unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
-+ //int vblank_pipe;
-+
-+ //bool cursor_needs_physical;
-+
-+ //struct drm_mm vram;
-+
-+ //int irq_enabled;
-+
-+ /* LVDS info */
-+ int backlight_duty_cycle; /* restore backlight to this value */
-+ bool panel_wants_dither;
-+ struct drm_display_mode *panel_fixed_mode;
-+ //struct drm_display_mode *vbt_mode; /* if any */
-+
-+ /* Feature bits from the VBIOS */
-+ //unsigned int int_tv_support:1;
-+ //unsigned int lvds_dither:1;
-+ //unsigned int lvds_vbt:1;
-+ //unsigned int int_crt_support:1;
-+
-+ //int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
-+ //int num_fence_regs; /* 8 on pre-965, 16 otherwise */
-+
-+ /* Register state */
-+ u8 saveLBB;
-+ u32 saveDSPACNTR;
-+ u32 saveDSPBCNTR;
-+ u32 saveDSPARB;
-+ u32 saveRENDERSTANDBY;
-+ u32 saveHWS;
-+ u32 savePIPEACONF;
-+ u32 savePIPEBCONF;
-+ u32 savePIPEASRC;
-+ u32 savePIPEBSRC;
-+ u32 saveFPA0;
-+ u32 saveFPA1;
-+ u32 saveDPLL_A;
-+ u32 saveDPLL_A_MD;
-+ u32 saveHTOTAL_A;
-+ u32 saveHBLANK_A;
-+ u32 saveHSYNC_A;
-+ u32 saveVTOTAL_A;
-+ u32 saveVBLANK_A;
-+ u32 saveVSYNC_A;
-+ u32 saveBCLRPAT_A;
-+ u32 savePIPEASTAT;
-+ u32 saveDSPASTRIDE;
-+ u32 saveDSPASIZE;
-+ u32 saveDSPAPOS;
-+ u32 saveDSPAADDR;
-+ u32 saveDSPASURF;
-+ u32 saveDSPATILEOFF;
-+ u32 savePFIT_PGM_RATIOS;
-+ u32 saveBLC_PWM_CTL;
-+ u32 saveBLC_PWM_CTL2;
-+ u32 saveFPB0;
-+ u32 saveFPB1;
-+ u32 saveDPLL_B;
-+ u32 saveDPLL_B_MD;
-+ u32 saveHTOTAL_B;
-+ u32 saveHBLANK_B;
-+ u32 saveHSYNC_B;
-+ u32 saveVTOTAL_B;
-+ u32 saveVBLANK_B;
-+ u32 saveVSYNC_B;
-+ u32 saveBCLRPAT_B;
-+ u32 savePIPEBSTAT;
-+ u32 saveDSPBSTRIDE;
-+ u32 saveDSPBSIZE;
-+ u32 saveDSPBPOS;
-+ u32 saveDSPBADDR;
-+ u32 saveDSPBSURF;
-+ u32 saveDSPBTILEOFF;
-+ u32 saveVGA0;
-+ u32 saveVGA1;
-+ u32 saveVGA_PD;
-+ u32 saveVGACNTRL;
-+ u32 saveADPA;
-+ u32 saveLVDS;
-+ u32 savePP_ON_DELAYS;
-+ u32 savePP_OFF_DELAYS;
-+ u32 saveDVOA;
-+ u32 saveDVOB;
-+ u32 saveDVOC;
-+ u32 savePP_ON;
-+ u32 savePP_OFF;
-+ u32 savePP_CONTROL;
-+ u32 savePP_DIVISOR;
-+ u32 savePFIT_CONTROL;
-+ u32 save_palette_a[256];
-+ u32 save_palette_b[256];
-+ u32 saveFBC_CFB_BASE;
-+ u32 saveFBC_LL_BASE;
-+ u32 saveFBC_CONTROL;
-+ u32 saveFBC_CONTROL2;
-+ u32 saveIER;
-+ u32 saveIIR;
-+ u32 saveIMR;
-+ u32 saveCACHE_MODE_0;
-+ u32 saveD_STATE;
-+ u32 saveCG_2D_DIS;
-+ u32 saveMI_ARB_STATE;
-+ u32 saveSWF0[16];
-+ u32 saveSWF1[16];
-+ u32 saveSWF2[3];
-+ u8 saveMSR;
-+ u8 saveSR[8];
-+ u8 saveGR[25];
-+ u8 saveAR_INDEX;
-+ u8 saveAR[21];
-+ u8 saveDACMASK;
-+ u8 saveDACDATA[256*3]; /* 256 3-byte colors */
-+ u8 saveCR[37];
-+} drm_i915_common_private_t;
-+
-+struct drm_i915_master_private {
-+ drm_local_map_t *sarea;
-+ struct _drm_i915_sarea *sarea_priv;
-+};
-+
-+#endif
-Index: linux-2.6.28/drivers/gpu/drm/i915/i915_drv.h
-===================================================================
---- linux-2.6.28.orig/drivers/gpu/drm/i915/i915_drv.h 2009-02-19 12:59:23.000000000 +0000
-+++ linux-2.6.28/drivers/gpu/drm/i915/i915_drv.h 2009-02-19 16:30:19.000000000 +0000
-@@ -32,6 +32,7 @@
-
- #include "i915_reg.h"
- #include "intel_bios.h"
-+#include "i915_common.h"
- #include <linux/io-mapping.h>
-
- /* General customization:
-@@ -116,10 +117,6 @@
- int enabled;
- };
-
--struct drm_i915_master_private {
-- drm_local_map_t *sarea;
-- struct _drm_i915_sarea *sarea_priv;
--};
- #define I915_FENCE_REG_NONE -1
-
- struct drm_i915_fence_reg {
-@@ -127,12 +124,15 @@
- };
-
- typedef struct drm_i915_private {
-- struct drm_device *dev;
-+ /* common is assumed to be the first item in this structure */
-+ struct drm_i915_common_private common;
-
-- void __iomem *regs;
--
-- drm_i915_ring_buffer_t ring;
-+ struct drm_device *dev;
-
-+ //void __iomem *regs;
-+
-+ drm_i915_ring_buffer_t ring;
-+
- drm_dma_handle_t *status_page_dmah;
- void *hw_status_page;
- dma_addr_t dma_status_page;
-@@ -169,12 +169,12 @@
-
- int irq_enabled;
-
-- struct intel_opregion opregion;
--
-+ struct intel_opregion opregion;
-+
- /* LVDS info */
-- int backlight_duty_cycle; /* restore backlight to this value */
-- bool panel_wants_dither;
-- struct drm_display_mode *panel_fixed_mode;
-+ //int backlight_duty_cycle; /* restore backlight to this value */
-+ //bool panel_wants_dither;
-+ //struct drm_display_mode *panel_fixed_mode;
- struct drm_display_mode *vbt_mode; /* if any */
-
- /* Feature bits from the VBIOS */
-@@ -183,101 +183,10 @@
- unsigned int lvds_vbt:1;
- unsigned int int_crt_support:1;
-
-- struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */
-+ struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */
- int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
- int num_fence_regs; /* 8 on pre-965, 16 otherwise */
-
-- /* Register state */
-- u8 saveLBB;
-- u32 saveDSPACNTR;
-- u32 saveDSPBCNTR;
-- u32 saveDSPARB;
-- u32 saveRENDERSTANDBY;
-- u32 saveHWS;
-- u32 savePIPEACONF;
-- u32 savePIPEBCONF;
-- u32 savePIPEASRC;
-- u32 savePIPEBSRC;
-- u32 saveFPA0;
-- u32 saveFPA1;
-- u32 saveDPLL_A;
-- u32 saveDPLL_A_MD;
-- u32 saveHTOTAL_A;
-- u32 saveHBLANK_A;
-- u32 saveHSYNC_A;
-- u32 saveVTOTAL_A;
-- u32 saveVBLANK_A;
-- u32 saveVSYNC_A;
-- u32 saveBCLRPAT_A;
-- u32 savePIPEASTAT;
-- u32 saveDSPASTRIDE;
-- u32 saveDSPASIZE;
-- u32 saveDSPAPOS;
-- u32 saveDSPAADDR;
-- u32 saveDSPASURF;
-- u32 saveDSPATILEOFF;
-- u32 savePFIT_PGM_RATIOS;
-- u32 saveBLC_PWM_CTL;
-- u32 saveBLC_PWM_CTL2;
-- u32 saveFPB0;
-- u32 saveFPB1;
-- u32 saveDPLL_B;
-- u32 saveDPLL_B_MD;
-- u32 saveHTOTAL_B;
-- u32 saveHBLANK_B;
-- u32 saveHSYNC_B;
-- u32 saveVTOTAL_B;
-- u32 saveVBLANK_B;
-- u32 saveVSYNC_B;
-- u32 saveBCLRPAT_B;
-- u32 savePIPEBSTAT;
-- u32 saveDSPBSTRIDE;
-- u32 saveDSPBSIZE;
-- u32 saveDSPBPOS;
-- u32 saveDSPBADDR;
-- u32 saveDSPBSURF;
-- u32 saveDSPBTILEOFF;
-- u32 saveVGA0;
-- u32 saveVGA1;
-- u32 saveVGA_PD;
-- u32 saveVGACNTRL;
-- u32 saveADPA;
-- u32 saveLVDS;
-- u32 savePP_ON_DELAYS;
-- u32 savePP_OFF_DELAYS;
-- u32 saveDVOA;
-- u32 saveDVOB;
-- u32 saveDVOC;
-- u32 savePP_ON;
-- u32 savePP_OFF;
-- u32 savePP_CONTROL;
-- u32 savePP_DIVISOR;
-- u32 savePFIT_CONTROL;
-- u32 save_palette_a[256];
-- u32 save_palette_b[256];
-- u32 saveFBC_CFB_BASE;
-- u32 saveFBC_LL_BASE;
-- u32 saveFBC_CONTROL;
-- u32 saveFBC_CONTROL2;
-- u32 saveIER;
-- u32 saveIIR;
-- u32 saveIMR;
-- u32 saveCACHE_MODE_0;
-- u32 saveD_STATE;
-- u32 saveCG_2D_DIS;
-- u32 saveMI_ARB_STATE;
-- u32 saveSWF0[16];
-- u32 saveSWF1[16];
-- u32 saveSWF2[3];
-- u8 saveMSR;
-- u8 saveSR[8];
-- u8 saveGR[25];
-- u8 saveAR_INDEX;
-- u8 saveAR[21];
-- u8 saveDACMASK;
-- u8 saveDACDATA[256*3]; /* 256 3-byte colors */
-- u8 saveCR[37];
--
- struct {
- struct drm_mm gtt_space;
-
-@@ -672,17 +581,18 @@
- LOCK_TEST_WITH_RETURN(dev, file_priv); \
- } while (0)
-
--#define I915_READ(reg) readl(dev_priv->regs + (reg))
--#define I915_WRITE(reg, val) writel(val, dev_priv->regs + (reg))
--#define I915_READ16(reg) readw(dev_priv->regs + (reg))
--#define I915_WRITE16(reg, val) writel(val, dev_priv->regs + (reg))
--#define I915_READ8(reg) readb(dev_priv->regs + (reg))
--#define I915_WRITE8(reg, val) writeb(val, dev_priv->regs + (reg))
-+
-+#define I915_READ(reg) readl(dev_priv_common->regs + (reg))
-+#define I915_WRITE(reg, val) writel(val, dev_priv_common->regs + (reg))
-+#define I915_READ16(reg) readw(dev_priv_common->regs + (reg))
-+#define I915_WRITE16(reg, val) writel(val, dev_priv_common->regs + (reg))
-+#define I915_READ8(reg) readb(dev_priv_common->regs + (reg))
-+#define I915_WRITE8(reg, val) writeb(val, dev_priv_common->regs + (reg))
- #ifdef writeq
--#define I915_WRITE64(reg, val) writeq(val, dev_priv->regs + (reg))
-+#define I915_WRITE64(reg, val) writeq(val, dev_priv_common->regs + (reg))
- #else
--#define I915_WRITE64(reg, val) (writel(val, dev_priv->regs + (reg)), \
-- writel(upper_32_bits(val), dev_priv->regs + \
-+#define I915_WRITE64(reg, val) (writel(val, dev_priv_common->regs + (reg)), \
-+ writel(upper_32_bits(val), dev_priv_common->regs + \
- (reg) + 4))
- #endif
- #define POSTING_READ(reg) (void)I915_READ(reg)
-@@ -776,10 +686,15 @@
- (dev)->pci_device == 0x29D2)
-
- #define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \
-- IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev))
-+ IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev) || \
-+ IS_POULSBO(dev))
-+
-+#define IS_POULSBO(dev) (((dev)->pci_device == 0x8108) || \
-+ ((dev)->pci_device == 0x8109))
-
- #define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \
-- IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev))
-+ IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev) || \
-+ IS_POULSBO(dev))
-
- #define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev))
- #define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev))
-Index: linux-2.6.28/drivers/gpu/drm/i915/intel_display.c
-===================================================================
---- linux-2.6.28.orig/drivers/gpu/drm/i915/intel_display.c 2009-02-19 12:59:23.000000000 +0000
-+++ linux-2.6.28/drivers/gpu/drm/i915/intel_display.c 2009-02-20 14:53:08.000000000 +0000
-@@ -282,7 +282,7 @@
- int refclk, intel_clock_t *best_clock)
- {
- struct drm_device *dev = crtc->dev;
-- struct drm_i915_private *dev_priv = dev->dev_private;
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
- intel_clock_t clock;
- const intel_limit_t *limit = intel_limit(crtc);
- int err = target;
-@@ -475,7 +475,7 @@
- {
- struct drm_device *dev = crtc->dev;
- struct drm_i915_master_private *master_priv;
-- struct drm_i915_private *dev_priv = dev->dev_private;
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
- int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
-@@ -613,6 +613,7 @@
- /* lvds has its own version of prepare see intel_lvds_prepare */
- encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
- }
-+EXPORT_SYMBOL(intel_encoder_prepare);
-
- void intel_encoder_commit (struct drm_encoder *encoder)
- {
-@@ -620,6 +621,7 @@
- /* lvds has its own version of commit see intel_lvds_commit */
- encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
- }
-+EXPORT_SYMBOL(intel_encoder_commit);
-
- static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
-@@ -687,7 +689,7 @@
- */
- static int intel_panel_fitter_pipe (struct drm_device *dev)
- {
-- struct drm_i915_private *dev_priv = dev->dev_private;
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
- u32 pfit_control;
-
- /* i830 doesn't have a panel fitter */
-@@ -715,7 +717,7 @@
- struct drm_framebuffer *old_fb)
- {
- struct drm_device *dev = crtc->dev;
-- struct drm_i915_private *dev_priv = dev->dev_private;
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
- int fp_reg = (pipe == 0) ? FPA0 : FPB0;
-@@ -980,7 +982,7 @@
- uint32_t width, uint32_t height)
- {
- struct drm_device *dev = crtc->dev;
-- struct drm_i915_private *dev_priv = dev->dev_private;
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct drm_gem_object *bo;
- struct drm_i915_gem_object *obj_priv;
-@@ -1071,7 +1073,7 @@
- static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
- {
- struct drm_device *dev = crtc->dev;
-- struct drm_i915_private *dev_priv = dev->dev_private;
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
- uint32_t temp = 0;
-@@ -1106,6 +1108,7 @@
- intel_crtc->lut_g[regno] = green >> 8;
- intel_crtc->lut_b[regno] = blue >> 8;
- }
-+EXPORT_SYMBOL(intel_crtc_fb_gamma_set);
-
- static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, uint32_t size)
-@@ -1228,6 +1231,7 @@
-
- return crtc;
- }
-+EXPORT_SYMBOL(intel_get_load_detect_pipe);
-
- void intel_release_load_detect_pipe(struct intel_output *intel_output, int dpms_mode)
- {
-@@ -1251,11 +1255,12 @@
- crtc_funcs->dpms(crtc, dpms_mode);
- }
- }
-+EXPORT_SYMBOL(intel_release_load_detect_pipe);
-
- /* Returns the clock of the currently programmed mode of the given pipe. */
- static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
- {
-- struct drm_i915_private *dev_priv = dev->dev_private;
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
- u32 dpll = I915_READ((pipe == 0) ? DPLL_A : DPLL_B);
-@@ -1333,7 +1338,7 @@
- struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
- struct drm_crtc *crtc)
- {
-- struct drm_i915_private *dev_priv = dev->dev_private;
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
- struct drm_display_mode *mode;
-@@ -1361,6 +1366,7 @@
-
- return mode;
- }
-+EXPORT_SYMBOL(intel_crtc_mode_get);
-
- static void intel_crtc_destroy(struct drm_crtc *crtc)
- {
-@@ -1415,11 +1421,6 @@
- intel_crtc->mode_set.connectors = (struct drm_connector **)(intel_crtc + 1);
- intel_crtc->mode_set.num_connectors = 0;
-
-- if (i915_fbpercrtc) {
--
--
--
-- }
- }
-
- struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe)
-@@ -1433,6 +1434,7 @@
- }
- return crtc;
- }
-+EXPORT_SYMBOL(intel_get_crtc_from_pipe);
-
- static int intel_connector_clones(struct drm_device *dev, int type_mask)
- {
-@@ -1575,7 +1577,7 @@
-
- return 0;
- }
--
-+EXPORT_SYMBOL(intel_framebuffer_create);
-
- static struct drm_framebuffer *
- intel_user_framebuffer_create(struct drm_device *dev,
-@@ -1643,12 +1645,13 @@
-
- intel_setup_outputs(dev);
- }
-+EXPORT_SYMBOL(intel_modeset_init);
-
- void intel_modeset_cleanup(struct drm_device *dev)
- {
- drm_mode_config_cleanup(dev);
- }
--
-+EXPORT_SYMBOL(intel_modeset_cleanup);
-
- /* current intel driver doesn't take advantage of encoders
- always give back the encoder for the connector
-@@ -1659,3 +1662,5 @@
-
- return &intel_output->enc;
- }
-+EXPORT_SYMBOL(intel_best_encoder);
-+
-Index: linux-2.6.28/drivers/gpu/drm/i915/intel_crt.c
-===================================================================
---- linux-2.6.28.orig/drivers/gpu/drm/i915/intel_crt.c 2009-02-19 12:59:23.000000000 +0000
-+++ linux-2.6.28/drivers/gpu/drm/i915/intel_crt.c 2009-02-20 14:53:08.000000000 +0000
-@@ -36,7 +36,7 @@
- static void intel_crt_dpms(struct drm_encoder *encoder, int mode)
- {
- struct drm_device *dev = encoder->dev;
-- struct drm_i915_private *dev_priv = dev->dev_private;
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
- u32 temp;
-
- temp = I915_READ(ADPA);
-@@ -88,7 +88,7 @@
- struct drm_device *dev = encoder->dev;
- struct drm_crtc *crtc = encoder->crtc;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-- struct drm_i915_private *dev_priv = dev->dev_private;
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
- int dpll_md_reg;
- u32 adpa, dpll_md;
-
-@@ -132,7 +132,7 @@
- static bool intel_crt_detect_hotplug(struct drm_connector *connector)
- {
- struct drm_device *dev = connector->dev;
-- struct drm_i915_private *dev_priv = dev->dev_private;
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
- u32 temp;
-
- unsigned long timeout = jiffies + msecs_to_jiffies(1000);
-Index: linux-2.6.28/drivers/gpu/drm/i915/i915_dma.c
-===================================================================
---- linux-2.6.28.orig/drivers/gpu/drm/i915/i915_dma.c 2009-02-19 12:59:23.000000000 +0000
-+++ linux-2.6.28/drivers/gpu/drm/i915/i915_dma.c 2009-02-20 12:12:41.000000000 +0000
-@@ -41,6 +41,7 @@
- int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
- {
- drm_i915_private_t *dev_priv = dev->dev_private;
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
- struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
- drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
- u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
-@@ -82,6 +83,7 @@
- static int i915_init_phys_hws(struct drm_device *dev)
- {
- drm_i915_private_t *dev_priv = dev->dev_private;
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
- /* Program Hardware Status Page */
- dev_priv->status_page_dmah =
- drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff);
-@@ -107,6 +109,8 @@
- static void i915_free_hws(struct drm_device *dev)
- {
- drm_i915_private_t *dev_priv = dev->dev_private;
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
-+
- if (dev_priv->status_page_dmah) {
- drm_pci_free(dev, dev_priv->status_page_dmah);
- dev_priv->status_page_dmah = NULL;
-@@ -124,6 +128,7 @@
- void i915_kernel_lost_context(struct drm_device * dev)
- {
- drm_i915_private_t *dev_priv = dev->dev_private;
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
- struct drm_i915_master_private *master_priv;
- drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
-
-@@ -231,6 +236,7 @@
- static int i915_dma_resume(struct drm_device * dev)
- {
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
-
- DRM_DEBUG("%s\n", __func__);
-
-@@ -358,6 +364,7 @@
-
- static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, int dwords)
- {
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
- drm_i915_private_t *dev_priv = dev->dev_private;
- int i;
- RING_LOCALS;
-@@ -401,6 +408,7 @@
- int i, int DR1, int DR4)
- {
- drm_i915_private_t *dev_priv = dev->dev_private;
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
- struct drm_clip_rect box;
- RING_LOCALS;
-
-@@ -442,6 +450,7 @@
- static void i915_emit_breadcrumb(struct drm_device *dev)
- {
- drm_i915_private_t *dev_priv = dev->dev_private;
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
- struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
- RING_LOCALS;
-
-@@ -495,6 +504,7 @@
- drm_i915_batchbuffer_t * batch)
- {
- drm_i915_private_t *dev_priv = dev->dev_private;
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
- struct drm_clip_rect __user *boxes = batch->cliprects;
- int nbox = batch->num_cliprects;
- int i = 0, count;
-@@ -544,6 +554,7 @@
-
- static int i915_dispatch_flip(struct drm_device * dev)
- {
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_master_private *master_priv =
- dev->primary->master->driver_priv;
-@@ -775,6 +786,7 @@
- static int i915_set_status_page(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
- {
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
- drm_i915_private_t *dev_priv = dev->dev_private;
- drm_i915_hws_addr_t *hws = data;
-
-@@ -930,6 +942,7 @@
-
- static int i915_load_modeset_init(struct drm_device *dev)
- {
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
- struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long agp_size, prealloc_size;
- int fb_bar = IS_I9XX(dev) ? 2 : 0;
-@@ -1073,8 +1086,8 @@
- base = drm_get_resource_start(dev, mmio_bar);
- size = drm_get_resource_len(dev, mmio_bar);
-
-- dev_priv->regs = ioremap(base, size);
-- if (!dev_priv->regs) {
-+ dev_priv->common.regs = ioremap(base, size);
-+ if (!dev_priv->common.regs) {
- DRM_ERROR("failed to map registers\n");
- ret = -EIO;
- goto free_priv;
-@@ -1126,7 +1139,7 @@
- return 0;
-
- out_rmmap:
-- iounmap(dev_priv->regs);
-+ iounmap(dev_priv->common.regs);
- free_priv:
- drm_free(dev_priv, sizeof(struct drm_i915_private), DRM_MEM_DRIVER);
- return ret;
-@@ -1144,8 +1157,8 @@
- if (dev->pdev->msi_enabled)
- pci_disable_msi(dev->pdev);
-
-- if (dev_priv->regs != NULL)
-- iounmap(dev_priv->regs);
-+ if (dev_priv->common.regs != NULL)
-+ iounmap(dev_priv->common.regs);
-
- intel_opregion_free(dev);
-
-Index: linux-2.6.28/drivers/gpu/drm/i915/intel_sdvo.c
-===================================================================
---- linux-2.6.28.orig/drivers/gpu/drm/i915/intel_sdvo.c 2009-02-19 12:59:23.000000000 +0000
-+++ linux-2.6.28/drivers/gpu/drm/i915/intel_sdvo.c 2009-02-20 14:53:08.000000000 +0000
-@@ -62,7 +62,7 @@
- static void intel_sdvo_write_sdvox(struct intel_output *intel_output, u32 val)
- {
- struct drm_device *dev = intel_output->base.dev;
-- struct drm_i915_private *dev_priv = dev->dev_private;
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
- u32 bval = val, cval = val;
- int i;
-@@ -552,7 +552,7 @@
- struct drm_display_mode *adjusted_mode)
- {
- struct drm_device *dev = encoder->dev;
-- struct drm_i915_private *dev_priv = dev->dev_private;
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
- struct drm_crtc *crtc = encoder->crtc;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_output *intel_output = enc_to_intel_output(encoder);
-@@ -659,7 +659,7 @@
- if (IS_I965G(dev)) {
- /* done in crtc_mode_set as the dpll_md reg must be written
- early */
-- } else if (IS_I945G(dev) || IS_I945GM(dev)) {
-+ } else if (IS_POULSBO(dev) || IS_I945G(dev) || IS_I945GM(dev)) {
- /* done in crtc_mode_set as it lives inside the
- dpll register */
- } else {
-@@ -672,7 +672,7 @@
- static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
- {
- struct drm_device *dev = encoder->dev;
-- struct drm_i915_private *dev_priv = dev->dev_private;
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
- struct intel_output *intel_output = enc_to_intel_output(encoder);
- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
- u32 temp;
-@@ -722,7 +722,7 @@
- static void intel_sdvo_save(struct drm_connector *connector)
- {
- struct drm_device *dev = connector->dev;
-- struct drm_i915_private *dev_priv = dev->dev_private;
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
- int o;
-@@ -759,7 +759,7 @@
- static void intel_sdvo_restore(struct drm_connector *connector)
- {
- struct drm_device *dev = connector->dev;
-- struct drm_i915_private *dev_priv = dev->dev_private;
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
- struct intel_output *intel_output = to_intel_output(connector);
- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
- int o;
-Index: linux-2.6.28/drivers/gpu/drm/i915/intel_lvds.c
-===================================================================
---- linux-2.6.28.orig/drivers/gpu/drm/i915/intel_lvds.c 2009-02-19 12:59:23.000000000 +0000
-+++ linux-2.6.28/drivers/gpu/drm/i915/intel_lvds.c 2009-02-20 14:53:08.000000000 +0000
-@@ -67,7 +67,7 @@
- */
- static void intel_lvds_set_power(struct drm_device *dev, bool on)
- {
-- struct drm_i915_private *dev_priv = dev->dev_private;
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
- u32 pp_status;
-
- if (on) {
-@@ -104,35 +104,35 @@
- static void intel_lvds_save(struct drm_connector *connector)
- {
- struct drm_device *dev = connector->dev;
-- struct drm_i915_private *dev_priv = dev->dev_private;
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
-
-- dev_priv->savePP_ON = I915_READ(PP_ON_DELAYS);
-- dev_priv->savePP_OFF = I915_READ(PP_OFF_DELAYS);
-- dev_priv->savePP_CONTROL = I915_READ(PP_CONTROL);
-- dev_priv->savePP_DIVISOR = I915_READ(PP_DIVISOR);
-- dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
-- dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL &
-+ dev_priv_common->savePP_ON = I915_READ(PP_ON_DELAYS);
-+ dev_priv_common->savePP_OFF = I915_READ(PP_OFF_DELAYS);
-+ dev_priv_common->savePP_CONTROL = I915_READ(PP_CONTROL);
-+ dev_priv_common->savePP_DIVISOR = I915_READ(PP_DIVISOR);
-+ dev_priv_common->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
-+ dev_priv_common->backlight_duty_cycle = (dev_priv_common->saveBLC_PWM_CTL &
- BACKLIGHT_DUTY_CYCLE_MASK);
-
- /*
- * If the light is off at server startup, just make it full brightness
- */
-- if (dev_priv->backlight_duty_cycle == 0)
-- dev_priv->backlight_duty_cycle =
-+ if (dev_priv_common->backlight_duty_cycle == 0)
-+ lvds_backlight=
- intel_lvds_get_max_backlight(dev);
- }
-
- static void intel_lvds_restore(struct drm_connector *connector)
- {
- struct drm_device *dev = connector->dev;
-- struct drm_i915_private *dev_priv = dev->dev_private;
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
-
-- I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL);
-- I915_WRITE(PP_ON_DELAYS, dev_priv->savePP_ON);
-- I915_WRITE(PP_OFF_DELAYS, dev_priv->savePP_OFF);
-- I915_WRITE(PP_DIVISOR, dev_priv->savePP_DIVISOR);
-- I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL);
-- if (dev_priv->savePP_CONTROL & POWER_TARGET_ON)
-+ I915_WRITE(BLC_PWM_CTL, dev_priv_common->saveBLC_PWM_CTL);
-+ I915_WRITE(PP_ON_DELAYS, dev_priv_common->savePP_ON);
-+ I915_WRITE(PP_OFF_DELAYS, dev_priv_common->savePP_OFF);
-+ I915_WRITE(PP_DIVISOR, dev_priv_common->savePP_DIVISOR);
-+ I915_WRITE(PP_CONTROL, dev_priv_common->savePP_CONTROL);
-+ if (dev_priv_common->savePP_CONTROL & POWER_TARGET_ON)
- intel_lvds_set_power(dev, true);
- else
- intel_lvds_set_power(dev, false);
-@@ -142,8 +142,8 @@
- struct drm_display_mode *mode)
- {
- struct drm_device *dev = connector->dev;
-- struct drm_i915_private *dev_priv = dev->dev_private;
-- struct drm_display_mode *fixed_mode = dev_priv->panel_fixed_mode;
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
-+ struct drm_display_mode *fixed_mode = dev_priv_common->panel_fixed_mode;
-
- if (fixed_mode) {
- if (mode->hdisplay > fixed_mode->hdisplay)
-@@ -160,7 +160,7 @@
- struct drm_display_mode *adjusted_mode)
- {
- struct drm_device *dev = encoder->dev;
-- struct drm_i915_private *dev_priv = dev->dev_private;
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
- struct drm_encoder *tmp_encoder;
-
-@@ -240,7 +240,7 @@
- struct drm_display_mode *adjusted_mode)
- {
- struct drm_device *dev = encoder->dev;
-- struct drm_i915_private *dev_priv = dev->dev_private;
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
- u32 pfit_control;
-
-@@ -264,7 +264,7 @@
- pfit_control = 0;
-
- if (!IS_I965G(dev)) {
-- if (dev_priv->panel_wants_dither)
-+ if (dev_priv_common->panel_wants_dither)
- pfit_control |= PANEL_8TO6_DITHER_ENABLE;
- }
- else
-@@ -475,16 +475,16 @@
- crtc = intel_get_crtc_from_pipe(dev, pipe);
-
- if (crtc && (lvds & LVDS_PORT_EN)) {
-- dev_priv->panel_fixed_mode = intel_crtc_mode_get(dev, crtc);
-- if (dev_priv->panel_fixed_mode) {
-- dev_priv->panel_fixed_mode->type |=
-+ dev_priv_common->panel_fixed_mode = intel_crtc_mode_get(dev, crtc);
-+ if (dev_priv_common->panel_fixed_mode) {
-+ dev_priv_common->panel_fixed_mode->type |=
- DRM_MODE_TYPE_PREFERRED;
- goto out; /* FIXME: check for quirks */
- }
- }
-
- /* If we still don't have a mode after all that, give up. */
-- if (!dev_priv->panel_fixed_mode)
-+ if (!dev_priv_common->panel_fixed_mode)
- goto failed;
-
- /* FIXME: detect aopen & mac mini type stuff automatically? */
-@@ -509,9 +509,9 @@
- * 800x600 display.
- */
-
-- if (dev_priv->panel_fixed_mode != NULL &&
-- dev_priv->panel_fixed_mode->hdisplay == 800 &&
-- dev_priv->panel_fixed_mode->vdisplay == 600) {
-+ if (dev_priv_common->panel_fixed_mode != NULL &&
-+ dev_priv_common->panel_fixed_mode->hdisplay == 800 &&
-+ dev_priv_common->panel_fixed_mode->vdisplay == 600) {
- DRM_DEBUG("Suspected Mac Mini, ignoring the LVDS\n");
- goto failed;
- }
-Index: linux-2.6.28/drivers/gpu/drm/Kconfig
-===================================================================
---- linux-2.6.28.orig/drivers/gpu/drm/Kconfig 2009-02-19 12:59:22.000000000 +0000
-+++ linux-2.6.28/drivers/gpu/drm/Kconfig 2009-02-20 14:53:08.000000000 +0000
-@@ -43,6 +43,11 @@
-
- If M is selected, the module will be called radeon.
-
-+config DRM_INTEL_COMMON
-+ tristate
-+ help
-+ Code common to several Intel drivers (autoselected)
-+
- config DRM_I810
- tristate "Intel I810"
- depends on DRM && AGP && AGP_INTEL
-@@ -70,6 +75,7 @@
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
-+ select DRM_INTEL_COMMON
- depends on FB
- tristate "i915 driver"
- help
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.28+2.6.29-rc2/psb-driver.patch b/meta-moblin/packages/linux/linux-moblin-2.6.28+2.6.29-rc2/psb-driver.patch
deleted file mode 100644
index 5b20badff..000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.28+2.6.29-rc2/psb-driver.patch
+++ /dev/null
@@ -1,21564 +0,0 @@
-Index: linux-2.6.28/include/drm/drm.h
-===================================================================
---- linux-2.6.28.orig/include/drm/drm.h 2009-02-20 12:22:53.000000000 +0000
-+++ linux-2.6.28/include/drm/drm.h 2009-02-20 12:23:06.000000000 +0000
-@@ -174,6 +174,7 @@
- _DRM_SCATTER_GATHER = 4, /**< Scatter/gather memory for PCI DMA */
- _DRM_CONSISTENT = 5, /**< Consistent memory for PCI DMA */
- _DRM_GEM = 6, /**< GEM object */
-+ _DRM_TTM = 7,
- };
-
- /**
-@@ -601,6 +602,271 @@
-
- #include "drm_mode.h"
-
-+#define DRM_FENCE_FLAG_EMIT 0x00000001
-+#define DRM_FENCE_FLAG_SHAREABLE 0x00000002
-+#define DRM_FENCE_FLAG_WAIT_LAZY 0x00000004
-+#define DRM_FENCE_FLAG_WAIT_IGNORE_SIGNALS 0x00000008
-+#define DRM_FENCE_FLAG_NO_USER 0x00000010
-+
-+/* Reserved for driver use */
-+#define DRM_FENCE_MASK_DRIVER 0xFF000000
-+
-+#define DRM_FENCE_TYPE_EXE 0x00000001
-+
-+struct drm_fence_arg {
-+ unsigned int handle;
-+ unsigned int fence_class;
-+ unsigned int type;
-+ unsigned int flags;
-+ unsigned int signaled;
-+ unsigned int error;
-+ unsigned int sequence;
-+ unsigned int pad64;
-+ uint64_t expand_pad[2]; /*Future expansion */
-+};
-+
-+/* Buffer permissions, referring to how the GPU uses the buffers.
-+ * these translate to fence types used for the buffers.
-+ * Typically a texture buffer is read, A destination buffer is write and
-+ * a command (batch-) buffer is exe. Can be or-ed together.
-+ */
-+
-+#define DRM_BO_FLAG_READ (1ULL << 0)
-+#define DRM_BO_FLAG_WRITE (1ULL << 1)
-+#define DRM_BO_FLAG_EXE (1ULL << 2)
-+
-+/*
-+ * Status flags. Can be read to determine the actual state of a buffer.
-+ * Can also be set in the buffer mask before validation.
-+ */
-+
-+/*
-+ * Mask: Never evict this buffer. Not even with force. This type of buffer is only
-+ * available to root and must be manually removed before buffer manager shutdown
-+ * or lock.
-+ * Flags: Acknowledge
-+ */
-+#define DRM_BO_FLAG_NO_EVICT (1ULL << 4)
-+
-+/*
-+ * Mask: Require that the buffer is placed in mappable memory when validated.
-+ * If not set the buffer may or may not be in mappable memory when validated.
-+ * Flags: If set, the buffer is in mappable memory.
-+ */
-+#define DRM_BO_FLAG_MAPPABLE (1ULL << 5)
-+
-+/* Mask: The buffer should be shareable with other processes.
-+ * Flags: The buffer is shareable with other processes.
-+ */
-+#define DRM_BO_FLAG_SHAREABLE (1ULL << 6)
-+
-+/* Mask: If set, place the buffer in cache-coherent memory if available.
-+ * If clear, never place the buffer in cache coherent memory if validated.
-+ * Flags: The buffer is currently in cache-coherent memory.
-+ */
-+#define DRM_BO_FLAG_CACHED (1ULL << 7)
-+
-+/* Mask: Make sure that every time this buffer is validated,
-+ * it ends up on the same location provided that the memory mask is the same.
-+ * The buffer will also not be evicted when claiming space for
-+ * other buffers. Basically a pinned buffer but it may be thrown out as
-+ * part of buffer manager shutdown or locking.
-+ * Flags: Acknowledge.
-+ */
-+#define DRM_BO_FLAG_NO_MOVE (1ULL << 8)
-+
-+/* Mask: Make sure the buffer is in cached memory when mapped
-+ * Flags: Acknowledge.
-+ * Buffers allocated with this flag should not be used for suballocators
-+ * This type may have issues on CPUs with over-aggressive caching
-+ * http://marc.info/?l=linux-kernel&m=102376926732464&w=2
-+ */
-+#define DRM_BO_FLAG_CACHED_MAPPED (1ULL << 19)
-+
-+
-+/* Mask: Force DRM_BO_FLAG_CACHED flag strictly also if it is set.
-+ * Flags: Acknowledge.
-+ */
-+#define DRM_BO_FLAG_FORCE_CACHING (1ULL << 13)
-+
-+/*
-+ * Mask: Force DRM_BO_FLAG_MAPPABLE flag strictly also if it is clear.
-+ * Flags: Acknowledge.
-+ */
-+#define DRM_BO_FLAG_FORCE_MAPPABLE (1ULL << 14)
-+#define DRM_BO_FLAG_TILE (1ULL << 15)
-+
-+/*
-+ * Memory type flags that can be or'ed together in the mask, but only
-+ * one appears in flags.
-+ */
-+
-+/* System memory */
-+#define DRM_BO_FLAG_MEM_LOCAL (1ULL << 24)
-+/* Translation table memory */
-+#define DRM_BO_FLAG_MEM_TT (1ULL << 25)
-+/* Vram memory */
-+#define DRM_BO_FLAG_MEM_VRAM (1ULL << 26)
-+/* Up to the driver to define. */
-+#define DRM_BO_FLAG_MEM_PRIV0 (1ULL << 27)
-+#define DRM_BO_FLAG_MEM_PRIV1 (1ULL << 28)
-+#define DRM_BO_FLAG_MEM_PRIV2 (1ULL << 29)
-+#define DRM_BO_FLAG_MEM_PRIV3 (1ULL << 30)
-+#define DRM_BO_FLAG_MEM_PRIV4 (1ULL << 31)
-+/* We can add more of these now with a 64-bit flag type */
-+
-+/* Memory flag mask */
-+#define DRM_BO_MASK_MEM 0x00000000FF000000ULL
-+#define DRM_BO_MASK_MEMTYPE 0x00000000FF0800A0ULL
-+
-+/* Driver-private flags */
-+#define DRM_BO_MASK_DRIVER 0xFFFF000000000000ULL
-+
-+/* Don't block on validate and map */
-+#define DRM_BO_HINT_DONT_BLOCK 0x00000002
-+/* Don't place this buffer on the unfenced list.*/
-+#define DRM_BO_HINT_DONT_FENCE 0x00000004
-+#define DRM_BO_HINT_WAIT_LAZY 0x00000008
-+#define DRM_BO_HINT_PRESUMED_OFFSET 0x00000010
-+
-+#define DRM_BO_INIT_MAGIC 0xfe769812
-+#define DRM_BO_INIT_MAJOR 1
-+#define DRM_BO_INIT_MINOR 0
-+#define DRM_BO_INIT_PATCH 0
-+
-+
-+struct drm_bo_info_req {
-+ uint64_t mask;
-+ uint64_t flags;
-+ unsigned int handle;
-+ unsigned int hint;
-+ unsigned int fence_class;
-+ unsigned int desired_tile_stride;
-+ unsigned int tile_info;
-+ unsigned int pad64;
-+ uint64_t presumed_offset;
-+};
-+
-+struct drm_bo_create_req {
-+ uint64_t mask;
-+ uint64_t size;
-+ uint64_t buffer_start;
-+ unsigned int hint;
-+ unsigned int page_alignment;
-+};
-+
-+
-+/*
-+ * Reply flags
-+ */
-+
-+#define DRM_BO_REP_BUSY 0x00000001
-+
-+struct drm_bo_info_rep {
-+ uint64_t flags;
-+ uint64_t mask;
-+ uint64_t size;
-+ uint64_t offset;
-+ uint64_t arg_handle;
-+ uint64_t buffer_start;
-+ unsigned int handle;
-+ unsigned int fence_flags;
-+ unsigned int rep_flags;
-+ unsigned int page_alignment;
-+ unsigned int desired_tile_stride;
-+ unsigned int hw_tile_stride;
-+ unsigned int tile_info;
-+ unsigned int pad64;
-+ uint64_t expand_pad[4]; /*Future expansion */
-+};
-+
-+struct drm_bo_arg_rep {
-+ struct drm_bo_info_rep bo_info;
-+ int ret;
-+ unsigned int pad64;
-+};
-+
-+struct drm_bo_create_arg {
-+ union {
-+ struct drm_bo_create_req req;
-+ struct drm_bo_info_rep rep;
-+ } d;
-+};
-+
-+struct drm_bo_handle_arg {
-+ unsigned int handle;
-+};
-+
-+struct drm_bo_reference_info_arg {
-+ union {
-+ struct drm_bo_handle_arg req;
-+ struct drm_bo_info_rep rep;
-+ } d;
-+};
-+
-+struct drm_bo_map_wait_idle_arg {
-+ union {
-+ struct drm_bo_info_req req;
-+ struct drm_bo_info_rep rep;
-+ } d;
-+};
-+
-+struct drm_bo_op_req {
-+ enum {
-+ drm_bo_validate,
-+ drm_bo_fence,
-+ drm_bo_ref_fence,
-+ } op;
-+ unsigned int arg_handle;
-+ struct drm_bo_info_req bo_req;
-+};
-+
-+
-+struct drm_bo_op_arg {
-+ uint64_t next;
-+ union {
-+ struct drm_bo_op_req req;
-+ struct drm_bo_arg_rep rep;
-+ } d;
-+ int handled;
-+ unsigned int pad64;
-+};
-+
-+
-+#define DRM_BO_MEM_LOCAL 0
-+#define DRM_BO_MEM_TT 1
-+#define DRM_BO_MEM_VRAM 2
-+#define DRM_BO_MEM_PRIV0 3
-+#define DRM_BO_MEM_PRIV1 4
-+#define DRM_BO_MEM_PRIV2 5
-+#define DRM_BO_MEM_PRIV3 6
-+#define DRM_BO_MEM_PRIV4 7
-+
-+#define DRM_BO_MEM_TYPES 8 /* For now. */
-+
-+#define DRM_BO_LOCK_UNLOCK_BM (1 << 0)
-+#define DRM_BO_LOCK_IGNORE_NO_EVICT (1 << 1)
-+
-+struct drm_bo_version_arg {
-+ uint32_t major;
-+ uint32_t minor;
-+ uint32_t patchlevel;
-+};
-+
-+struct drm_mm_type_arg {
-+ unsigned int mem_type;
-+ unsigned int lock_flags;
-+};
-+
-+struct drm_mm_init_arg {
-+ unsigned int magic;
-+ unsigned int major;
-+ unsigned int minor;
-+ unsigned int mem_type;
-+ uint64_t p_offset;
-+ uint64_t p_size;
-+};
-+
- #define DRM_IOCTL_BASE 'd'
- #define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr)
- #define DRM_IOR(nr,type) _IOR(DRM_IOCTL_BASE,nr,type)
-@@ -688,6 +954,39 @@
- #define DRM_IOCTL_MODE_ADDFB DRM_IOWR(0xAE, struct drm_mode_fb_cmd)
- #define DRM_IOCTL_MODE_RMFB DRM_IOWR(0xAF, unsigned int)
-
-+#define DRM_IOCTL_MM_INIT DRM_IOWR(0xc0, struct drm_mm_init_arg)
-+#define DRM_IOCTL_MM_TAKEDOWN DRM_IOWR(0xc1, struct drm_mm_type_arg)
-+#define DRM_IOCTL_MM_LOCK DRM_IOWR(0xc2, struct drm_mm_type_arg)
-+#define DRM_IOCTL_MM_UNLOCK DRM_IOWR(0xc3, struct drm_mm_type_arg)
-+
-+#define DRM_IOCTL_FENCE_CREATE DRM_IOWR(0xc4, struct drm_fence_arg)
-+#define DRM_IOCTL_FENCE_REFERENCE DRM_IOWR(0xc6, struct drm_fence_arg)
-+#define DRM_IOCTL_FENCE_UNREFERENCE DRM_IOWR(0xc7, struct drm_fence_arg)
-+#define DRM_IOCTL_FENCE_SIGNALED DRM_IOWR(0xc8, struct drm_fence_arg)
-+#define DRM_IOCTL_FENCE_FLUSH DRM_IOWR(0xc9, struct drm_fence_arg)
-+#define DRM_IOCTL_FENCE_WAIT DRM_IOWR(0xca, struct drm_fence_arg)
-+#define DRM_IOCTL_FENCE_EMIT DRM_IOWR(0xcb, struct drm_fence_arg)
-+#define DRM_IOCTL_FENCE_BUFFERS DRM_IOWR(0xcc, struct drm_fence_arg)
-+
-+#define DRM_IOCTL_BO_CREATE DRM_IOWR(0xcd, struct drm_bo_create_arg)
-+#define DRM_IOCTL_BO_MAP DRM_IOWR(0xcf, struct drm_bo_map_wait_idle_arg)
-+#define DRM_IOCTL_BO_UNMAP DRM_IOWR(0xd0, struct drm_bo_handle_arg)
-+#define DRM_IOCTL_BO_REFERENCE DRM_IOWR(0xd1, struct drm_bo_reference_info_arg)
-+#define DRM_IOCTL_BO_UNREFERENCE DRM_IOWR(0xd2, struct drm_bo_handle_arg)
-+#define DRM_IOCTL_BO_SETSTATUS DRM_IOWR(0xd3, struct drm_bo_map_wait_idle_arg)
-+#define DRM_IOCTL_BO_INFO DRM_IOWR(0xd4, struct drm_bo_reference_info_arg)
-+#define DRM_IOCTL_BO_WAIT_IDLE DRM_IOWR(0xd5, struct drm_bo_map_wait_idle_arg)
-+#define DRM_IOCTL_BO_VERSION DRM_IOR(0xd6, struct drm_bo_version_arg)
-+
-+
-+#define DRM_IOCTL_MODE_GETRESOURCES DRM_IOWR(0xA0, struct drm_mode_card_res)
-+#define DRM_IOCTL_MODE_GETCRTC DRM_IOWR(0xA1, struct drm_mode_crtc)
-+#define DRM_IOCTL_MODE_GETOUTPUT DRM_IOWR(0xA2, struct drm_mode_get_output)
-+
-+#define DRM_IOCTL_MODE_ADDMODE DRM_IOWR(0xA7, struct drm_mode_modeinfo)
-+#define DRM_IOCTL_MODE_RMMODE DRM_IOWR(0xA8, unsigned int)
-+/*@}*/
-+
- /**
- * Device specific ioctls should only be in their respective headers
- * The device specific ioctl range is from 0x40 to 0x99.
-@@ -742,6 +1041,11 @@
- typedef struct drm_agp_info drm_agp_info_t;
- typedef struct drm_scatter_gather drm_scatter_gather_t;
- typedef struct drm_set_version drm_set_version_t;
-+
-+typedef struct drm_fence_arg drm_fence_arg_t;
-+typedef struct drm_mm_type_arg drm_mm_type_arg_t;
-+typedef struct drm_mm_init_arg drm_mm_init_arg_t;
-+typedef enum drm_bo_type drm_bo_type_t;
- #endif
-
- #endif
-Index: linux-2.6.28/include/drm/drmP.h
-===================================================================
---- linux-2.6.28.orig/include/drm/drmP.h 2009-02-20 12:22:53.000000000 +0000
-+++ linux-2.6.28/include/drm/drmP.h 2009-02-20 12:30:10.000000000 +0000
-@@ -57,6 +57,7 @@
- #include <linux/dma-mapping.h>
- #include <linux/mm.h>
- #include <linux/cdev.h>
-+#include <linux/i2c.h>
- #include <linux/mutex.h>
- #if defined(__alpha__) || defined(__powerpc__)
- #include <asm/pgtable.h> /* For pte_wrprotect */
-@@ -147,9 +148,24 @@
- #define DRM_MEM_CTXLIST 21
- #define DRM_MEM_MM 22
- #define DRM_MEM_HASHTAB 23
-+#define DRM_MEM_OBJECTS 24
-+#define DRM_MEM_FENCE 25
-+#define DRM_MEM_TTM 26
-+#define DRM_MEM_BUFOBJ 27
-
- #define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8)
- #define DRM_MAP_HASH_OFFSET 0x10000000
-+#define DRM_MAP_HASH_ORDER 12
-+#define DRM_OBJECT_HASH_ORDER 12
-+#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
-+#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
-+/*
-+ * This should be small enough to allow the use of kmalloc for hash tables
-+ * instead of vmalloc.
-+ */
-+
-+#define DRM_FILE_HASH_ORDER 8
-+#define DRM_MM_INIT_MAX_PAGES 256
-
- /*@}*/
-
-@@ -378,6 +394,14 @@
- struct drm_freelist freelist;
- };
-
-+
-+enum drm_ref_type {
-+ _DRM_REF_USE = 0,
-+ _DRM_REF_TYPE1,
-+ _DRM_NO_REF_TYPES
-+};
-+
-+
- /** File private data */
- struct drm_file {
- int authenticated;
-@@ -387,6 +411,7 @@
- unsigned long ioctl_count;
- struct list_head lhead;
- struct drm_minor *minor;
-+ int remove_auth_on_close;
- unsigned long lock_count;
-
- /** Mapping of mm object handles to object pointers. */
-@@ -394,6 +419,16 @@
- /** Lock for synchronization of access to object_idr. */
- spinlock_t table_lock;
-
-+ /*
-+ * The user object hash table is global and resides in the
-+ * drm_device structure. We protect the lists and hash tables with the
-+ * device struct_mutex. A bit coarse-grained but probably the best
-+ * option.
-+ */
-+
-+ struct list_head refd_objects;
-+
-+ struct drm_open_hash refd_object_hash[_DRM_NO_REF_TYPES];
- struct file *filp;
- void *driver_priv;
-
-@@ -659,6 +694,10 @@
- void *driver_priv; /**< Private structure for driver to use */
- };
-
-+#include "drm_objects.h"
-+#include "drm_edid.h"
-+#include "drm_crtc.h"
-+
- /**
- * DRM driver structure. This structure represent the common code for
- * a family of cards. There will one drm_device for each card present
-@@ -766,6 +805,13 @@
- int (*proc_init)(struct drm_minor *minor);
- void (*proc_cleanup)(struct drm_minor *minor);
-
-+ /* FB routines, if present */
-+ int (*fb_probe)(struct drm_device *dev, struct drm_crtc *crtc);
-+ int (*fb_remove)(struct drm_device *dev, struct drm_crtc *crtc);
-+
-+ struct drm_fence_driver *fence_driver;
-+ struct drm_bo_driver *bo_driver;
-+
- /**
- * Driver-specific constructor for drm_gem_objects, to set up
- * obj->driver_private.
-@@ -821,8 +867,11 @@
- */
- struct drm_device {
- struct list_head driver_item; /**< list of devices per driver */
-+ //char *unique; /**< Unique identifier: e.g., busid */
-+ //int unique_len; /**< Length of unique field */
- char *devname; /**< For /proc/interrupts */
- int if_version; /**< Highest interface version set */
-+ //int blocked; /**< Blocked due to VC switch? */
-
- /** \name Locks */
- /*@{ */
-@@ -847,12 +896,18 @@
- /*@} */
-
- struct list_head filelist;
-+ struct drm_open_hash magiclist; /**< magic hash table */
-+ struct list_head magicfree;
-
- /** \name Memory management */
- /*@{ */
- struct list_head maplist; /**< Linked list of regions */
- int map_count; /**< Number of mappable regions */
- struct drm_open_hash map_hash; /**< User token hash table for maps */
-+ struct drm_mm offset_manager; /**< User token manager */
-+ struct drm_open_hash object_hash; /**< User token hash table for objects */
-+ struct address_space *dev_mapping; /**< For unmap_mapping_range() */
-+ struct page *ttm_dummy_page;
-
- /** \name Context handle management */
- /*@{ */
-@@ -864,6 +919,7 @@
-
- struct list_head vmalist; /**< List of vmas (for debugging) */
-
-+ struct drm_lock_data lock; /**< Information on hardware lock */
- /*@} */
-
- /** \name DMA queues (contexts) */
-@@ -936,7 +992,6 @@
- int num_crtcs; /**< Number of CRTCs on this device */
- void *dev_private; /**< device private data */
- void *mm_private;
-- struct address_space *dev_mapping;
- struct drm_sigdata sigdata; /**< For block_all_signals */
- sigset_t sigmask;
-
-@@ -945,6 +1000,8 @@
- unsigned int agp_buffer_token;
- struct drm_minor *control; /**< Control node for card */
- struct drm_minor *primary; /**< render type primary screen head */
-+ struct drm_fence_manager fm;
-+ struct drm_buffer_manager bm;
-
- /** \name Drawable information */
- /*@{ */
-@@ -976,6 +1033,27 @@
- return dev->pdev->irq;
- }
-
-+#if __OS_HAS_AGP
-+struct drm_agp_ttm_backend {
-+ struct drm_ttm_backend backend;
-+ DRM_AGP_MEM *mem;
-+ struct agp_bridge_data *bridge;
-+ int populated;
-+};
-+#endif
-+
-+typedef struct ati_pcigart_ttm_backend {
-+ struct drm_ttm_backend backend;
-+ int populated;
-+ void (*gart_flush_fn)(struct drm_device *dev);
-+ struct drm_ati_pcigart_info *gart_info;
-+ unsigned long offset;
-+ struct page **pages;
-+ int num_pages;
-+ int bound;
-+ struct drm_device *dev;
-+} ati_pcigart_ttm_backend_t;
-+
- static __inline__ int drm_core_check_feature(struct drm_device *dev,
- int feature)
- {
-@@ -1042,6 +1120,9 @@
- /* Driver support (drm_drv.h) */
- extern int drm_init(struct drm_driver *driver);
- extern void drm_exit(struct drm_driver *driver);
-+extern void drm_cleanup_pci(struct pci_dev *pdev);
-+extern void drm_vbl_send_signals(struct drm_device *dev, int crtc);
-+extern struct drm_ttm_backend *drm_agp_init_ttm(struct drm_device *dev);
- extern int drm_ioctl(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg);
- extern long drm_compat_ioctl(struct file *filp,
-Index: linux-2.6.28/include/drm/drm_pciids.h
-===================================================================
---- linux-2.6.28.orig/include/drm/drm_pciids.h 2009-02-20 12:22:53.000000000 +0000
-+++ linux-2.6.28/include/drm/drm_pciids.h 2009-02-20 12:23:06.000000000 +0000
-@@ -419,3 +419,9 @@
- {0x8086, 0x2e12, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
- {0x8086, 0x2e22, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
- {0, 0, 0}
-+
-+#define psb_PCI_IDS \
-+ {0x8086, 0x8108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PSB_8108}, \
-+ {0x8086, 0x8109, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PSB_8109}, \
-+ {0, 0, 0}
-+
-Index: linux-2.6.28/drivers/gpu/drm/Makefile
-===================================================================
---- linux-2.6.28.orig/drivers/gpu/drm/Makefile 2009-02-20 12:22:53.000000000 +0000
-+++ linux-2.6.28/drivers/gpu/drm/Makefile 2009-02-20 12:23:06.000000000 +0000
-@@ -10,6 +10,8 @@
- drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
- drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \
- drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o \
-+ drm_fence.o drm_object.o drm_ttm.o drm_bo.o \
-+ drm_bo_lock.o drm_bo_move.o drm_regman.o \
- drm_crtc.o drm_crtc_helper.o drm_modes.o drm_edid.o
-
- drm-$(CONFIG_COMPAT) += drm_ioc32.o
-@@ -22,6 +24,7 @@
- obj-$(CONFIG_DRM_I810) += i810/
- obj-$(CONFIG_DRM_I830) += i830/
- obj-$(CONFIG_DRM_I915) += i915/
-+obj-$(CONFIG_DRM_PSB) += psb/
- obj-$(CONFIG_DRM_SIS) += sis/
- obj-$(CONFIG_DRM_SAVAGE)+= savage/
- obj-$(CONFIG_DRM_VIA) +=via/
-Index: linux-2.6.28/drivers/gpu/drm/drm_agpsupport.c
-===================================================================
---- linux-2.6.28.orig/drivers/gpu/drm/drm_agpsupport.c 2009-02-20 12:22:53.000000000 +0000
-+++ linux-2.6.28/drivers/gpu/drm/drm_agpsupport.c 2009-02-20 12:23:06.000000000 +0000
-@@ -502,4 +502,156 @@
- }
- EXPORT_SYMBOL(drm_agp_chipset_flush);
-
-+/*
-+ * AGP ttm backend interface.
-+ */
-+
-+#ifndef AGP_USER_TYPES
-+#define AGP_USER_TYPES (1 << 16)
-+#define AGP_USER_MEMORY (AGP_USER_TYPES)
-+#define AGP_USER_CACHED_MEMORY (AGP_USER_TYPES + 1)
-+#endif
-+#define AGP_REQUIRED_MAJOR 0
-+#define AGP_REQUIRED_MINOR 102
-+
-+static int drm_agp_needs_unbind_cache_adjust(struct drm_ttm_backend *backend)
-+{
-+ return ((backend->flags & DRM_BE_FLAG_BOUND_CACHED) ? 0 : 1);
-+}
-+
-+
-+static int drm_agp_populate(struct drm_ttm_backend *backend,
-+ unsigned long num_pages, struct page **pages)
-+{
-+ struct drm_agp_ttm_backend *agp_be =
-+ container_of(backend, struct drm_agp_ttm_backend, backend);
-+ struct page **cur_page, **last_page = pages + num_pages;
-+ DRM_AGP_MEM *mem;
-+
-+ DRM_DEBUG("drm_agp_populate_ttm\n");
-+ mem = drm_agp_allocate_memory(agp_be->bridge, num_pages, AGP_USER_MEMORY);
-+ if (!mem)
-+ return -ENOMEM;
-+
-+ DRM_DEBUG("Current page count is %ld\n", (long) mem->page_count);
-+ mem->page_count = 0;
-+ for (cur_page = pages; cur_page < last_page; ++cur_page)
-+ mem->memory[mem->page_count++] = phys_to_gart(page_to_phys(*cur_page));
-+ agp_be->mem = mem;
-+ return 0;
-+}
-+
-+static int drm_agp_bind_ttm(struct drm_ttm_backend *backend,
-+ struct drm_bo_mem_reg *bo_mem)
-+{
-+ struct drm_agp_ttm_backend *agp_be =
-+ container_of(backend, struct drm_agp_ttm_backend, backend);
-+ DRM_AGP_MEM *mem = agp_be->mem;
-+ int ret;
-+ int snooped = (bo_mem->flags & DRM_BO_FLAG_CACHED) && !(bo_mem->flags & DRM_BO_FLAG_CACHED_MAPPED);
-+
-+ DRM_DEBUG("drm_agp_bind_ttm\n");
-+ mem->is_flushed = 1;
-+ mem->type = AGP_USER_MEMORY;
-+ /* CACHED MAPPED implies not snooped memory */
-+ if (snooped)
-+ mem->type = AGP_USER_CACHED_MEMORY;
-+
-+ ret = drm_agp_bind_memory(mem, bo_mem->mm_node->start);
-+ if (ret)
-+ DRM_ERROR("AGP Bind memory failed\n");
-+
-+ DRM_FLAG_MASKED(backend->flags, (bo_mem->flags & DRM_BO_FLAG_CACHED) ?
-+ DRM_BE_FLAG_BOUND_CACHED : 0,
-+ DRM_BE_FLAG_BOUND_CACHED);
-+ return ret;
-+}
-+
-+static int drm_agp_unbind_ttm(struct drm_ttm_backend *backend)
-+{
-+ struct drm_agp_ttm_backend *agp_be =
-+ container_of(backend, struct drm_agp_ttm_backend, backend);
-+
-+ DRM_DEBUG("drm_agp_unbind_ttm\n");
-+ if (agp_be->mem->is_bound)
-+ return drm_agp_unbind_memory(agp_be->mem);
-+ else
-+ return 0;
-+}
-+
-+static void drm_agp_clear_ttm(struct drm_ttm_backend *backend)
-+{
-+ struct drm_agp_ttm_backend *agp_be =
-+ container_of(backend, struct drm_agp_ttm_backend, backend);
-+ DRM_AGP_MEM *mem = agp_be->mem;
-+
-+ DRM_DEBUG("drm_agp_clear_ttm\n");
-+ if (mem) {
-+ backend->func->unbind(backend);
-+ agp_free_memory(mem);
-+ }
-+ agp_be->mem = NULL;
-+}
-+
-+static void drm_agp_destroy_ttm(struct drm_ttm_backend *backend)
-+{
-+ struct drm_agp_ttm_backend *agp_be;
-+
-+ if (backend) {
-+ DRM_DEBUG("drm_agp_destroy_ttm\n");
-+ agp_be = container_of(backend, struct drm_agp_ttm_backend, backend);
-+ if (agp_be && agp_be->mem)
-+ backend->func->clear(backend);
-+ }
-+}
-+
-+static struct drm_ttm_backend_func agp_ttm_backend = {
-+ .needs_ub_cache_adjust = drm_agp_needs_unbind_cache_adjust,
-+ .populate = drm_agp_populate,
-+ .clear = drm_agp_clear_ttm,
-+ .bind = drm_agp_bind_ttm,
-+ .unbind = drm_agp_unbind_ttm,
-+ .destroy = drm_agp_destroy_ttm,
-+};
-+
-+struct drm_ttm_backend *drm_agp_init_ttm(struct drm_device *dev)
-+{
-+
-+ struct drm_agp_ttm_backend *agp_be;
-+ struct agp_kern_info *info;
-+
-+ if (!dev->agp) {
-+ DRM_ERROR("AGP is not initialized.\n");
-+ return NULL;
-+ }
-+ info = &dev->agp->agp_info;
-+
-+ if (info->version.major != AGP_REQUIRED_MAJOR ||
-+ info->version.minor < AGP_REQUIRED_MINOR) {
-+ DRM_ERROR("Wrong agpgart version %d.%d\n"
-+ "\tYou need at least version %d.%d.\n",
-+ info->version.major,
-+ info->version.minor,
-+ AGP_REQUIRED_MAJOR,
-+ AGP_REQUIRED_MINOR);
-+ return NULL;
-+ }
-+
-+
-+ agp_be = drm_calloc(1, sizeof(*agp_be), DRM_MEM_TTM);
-+ if (!agp_be)
-+ return NULL;
-+
-+ agp_be->mem = NULL;
-+
-+ agp_be->bridge = dev->agp->bridge;
-+ agp_be->populated = 0;
-+ agp_be->backend.func = &agp_ttm_backend;
-+ agp_be->backend.dev = dev;
-+
-+ return &agp_be->backend;
-+}
-+EXPORT_SYMBOL(drm_agp_init_ttm);
-+
-+
- #endif /* __OS_HAS_AGP */
-Index: linux-2.6.28/drivers/gpu/drm/drm_bo.c
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.28/drivers/gpu/drm/drm_bo.c 2009-02-20 12:23:06.000000000 +0000
-@@ -0,0 +1,2660 @@
-+/**************************************************************************
-+ *
-+ * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
-+ * All Rights Reserved.
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a
-+ * copy of this software and associated documentation files (the
-+ * "Software"), to deal in the Software without restriction, including
-+ * without limitation the rights to use, copy, modify, merge, publish,
-+ * distribute, sub license, and/or sell copies of the Software, and to
-+ * permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice (including the
-+ * next paragraph) shall be included in all copies or substantial portions
-+ * of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
-+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
-+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
-+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
-+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
-+ *
-+ **************************************************************************/
-+/*
-+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
-+ */
-+
-+#include "drmP.h"
-+
-+/*
-+ * Locking may look a bit complicated but isn't really:
-+ *
-+ * The buffer usage atomic_t needs to be protected by dev->struct_mutex
-+ * when there is a chance that it can be zero before or after the operation.
-+ *
-+ * dev->struct_mutex also protects all lists and list heads,
-+ * Hash tables and hash heads.
-+ *
-+ * bo->mutex protects the buffer object itself excluding the usage field.
-+ * bo->mutex does also protect the buffer list heads, so to manipulate those,
-+ * we need both the bo->mutex and the dev->struct_mutex.
-+ *
-+ * Locking order is bo->mutex, dev->struct_mutex. Therefore list traversal
-+ * is a bit complicated. When dev->struct_mutex is released to grab bo->mutex,
-+ * the list traversal will, in general, need to be restarted.
-+ *
-+ */
-+
-+static void drm_bo_destroy_locked(struct drm_buffer_object *bo);
-+static int drm_bo_setup_vm_locked(struct drm_buffer_object *bo);
-+static void drm_bo_takedown_vm_locked(struct drm_buffer_object *bo);
-+static void drm_bo_unmap_virtual(struct drm_buffer_object *bo);
-+
-+static inline uint64_t drm_bo_type_flags(unsigned type)
-+{
-+ return (1ULL << (24 + type));
-+}
-+
-+/*
-+ * bo locked. dev->struct_mutex locked.
-+ */
-+
-+void drm_bo_add_to_pinned_lru(struct drm_buffer_object *bo)
-+{
-+ struct drm_mem_type_manager *man;
-+
-+ DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
-+ DRM_ASSERT_LOCKED(&bo->mutex);
-+
-+ man = &bo->dev->bm.man[bo->pinned_mem_type];
-+ list_add_tail(&bo->pinned_lru, &man->pinned);
-+}
-+
-+void drm_bo_add_to_lru(struct drm_buffer_object *bo)
-+{
-+ struct drm_mem_type_manager *man;
-+
-+ DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
-+
-+ if (!(bo->mem.mask & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))
-+ || bo->mem.mem_type != bo->pinned_mem_type) {
-+ man = &bo->dev->bm.man[bo->mem.mem_type];
-+ list_add_tail(&bo->lru, &man->lru);
-+ } else {
-+ INIT_LIST_HEAD(&bo->lru);
-+ }
-+}
-+
-+static int drm_bo_vm_pre_move(struct drm_buffer_object *bo, int old_is_pci)
-+{
-+#ifdef DRM_ODD_MM_COMPAT
-+ int ret;
-+
-+ if (!bo->map_list.map)
-+ return 0;
-+
-+ ret = drm_bo_lock_kmm(bo);
-+ if (ret)
-+ return ret;
-+ drm_bo_unmap_virtual(bo);
-+ if (old_is_pci)
-+ drm_bo_finish_unmap(bo);
-+#else
-+ if (!bo->map_list.map)
-+ return 0;
-+
-+ drm_bo_unmap_virtual(bo);
-+#endif
-+ return 0;
-+}
-+
-+static void drm_bo_vm_post_move(struct drm_buffer_object *bo)
-+{
-+#ifdef DRM_ODD_MM_COMPAT
-+ int ret;
-+
-+ if (!bo->map_list.map)
-+ return;
-+
-+ ret = drm_bo_remap_bound(bo);
-+ if (ret) {
-+ DRM_ERROR("Failed to remap a bound buffer object.\n"
-+ "\tThis might cause a sigbus later.\n");
-+ }
-+ drm_bo_unlock_kmm(bo);
-+#endif
-+}
-+
-+/*
-+ * Call bo->mutex locked.
-+ */
-+
-+static int drm_bo_add_ttm(struct drm_buffer_object *bo)
-+{
-+ struct drm_device *dev = bo->dev;
-+ int ret = 0;
-+
-+ DRM_ASSERT_LOCKED(&bo->mutex);
-+ bo->ttm = NULL;
-+
-+ switch (bo->type) {
-+ case drm_bo_type_dc:
-+ case drm_bo_type_kernel:
-+ bo->ttm = drm_ttm_init(dev, bo->num_pages << PAGE_SHIFT);
-+ if (!bo->ttm)
-+ ret = -ENOMEM;
-+ break;
-+ case drm_bo_type_user:
-+ bo->ttm = drm_ttm_init(dev, bo->num_pages << PAGE_SHIFT);
-+ if (!bo->ttm)
-+ ret = -ENOMEM;
-+
-+ ret = drm_ttm_set_user(bo->ttm, current,
-+ bo->mem.mask & DRM_BO_FLAG_WRITE,
-+ bo->buffer_start,
-+ bo->num_pages,
-+ dev->bm.dummy_read_page);
-+ if (ret)
-+ return ret;
-+
-+ break;
-+ default:
-+ DRM_ERROR("Illegal buffer object type\n");
-+ ret = -EINVAL;
-+ break;
-+ }
-+
-+ return ret;
-+}
-+
-+static int drm_bo_handle_move_mem(struct drm_buffer_object *bo,
-+ struct drm_bo_mem_reg *mem,
-+ int evict, int no_wait)
-+{
-+ struct drm_device *dev = bo->dev;
-+ struct drm_buffer_manager *bm = &dev->bm;
-+ int old_is_pci = drm_mem_reg_is_pci(dev, &bo->mem);
-+ int new_is_pci = drm_mem_reg_is_pci(dev, mem);
-+ struct drm_mem_type_manager *old_man = &bm->man[bo->mem.mem_type];
-+ struct drm_mem_type_manager *new_man = &bm->man[mem->mem_type];
-+ int ret = 0;
-+
-+ if (old_is_pci || new_is_pci ||
-+ ((mem->flags ^ bo->mem.flags) & DRM_BO_FLAG_CACHED))
-+ ret = drm_bo_vm_pre_move(bo, old_is_pci);
-+ if (ret)
-+ return ret;
-+
-+ /*
-+ * Create and bind a ttm if required.
-+ */
-+
-+ if (!(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm == NULL)) {
-+ ret = drm_bo_add_ttm(bo);
-+ if (ret)
-+ goto out_err;
-+
-+ if (mem->mem_type != DRM_BO_MEM_LOCAL) {
-+ ret = drm_bind_ttm(bo->ttm, mem);
-+ if (ret)
-+ goto out_err;
-+ }
-+
-+ if (bo->mem.mem_type == DRM_BO_MEM_LOCAL) {
-+
-+ struct drm_bo_mem_reg *old_mem = &bo->mem;
-+ uint64_t save_flags = old_mem->flags;
-+ uint64_t save_mask = old_mem->mask;
-+
-+ *old_mem = *mem;
-+ mem->mm_node = NULL;
-+ old_mem->mask = save_mask;
-+ DRM_FLAG_MASKED(save_flags, mem->flags,
-+ DRM_BO_MASK_MEMTYPE);
-+ goto moved;
-+ }
-+
-+ }
-+
-+ if (!(old_man->flags & _DRM_FLAG_MEMTYPE_FIXED) &&
-+ !(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED)) {
-+
-+ ret = drm_bo_move_ttm(bo, evict, no_wait, mem);
-+
-+ } else if (dev->driver->bo_driver->move) {
-+ ret = dev->driver->bo_driver->move(bo, evict, no_wait, mem);
-+
-+ } else {
-+
-+ ret = drm_bo_move_memcpy(bo, evict, no_wait, mem);
-+
-+ }
-+
-+ if (ret)
-+ goto out_err;
-+
-+moved:
-+ if (old_is_pci || new_is_pci)
-+ drm_bo_vm_post_move(bo);
-+
-+ if (bo->priv_flags & _DRM_BO_FLAG_EVICTED) {
-+ ret =
-+ dev->driver->bo_driver->invalidate_caches(dev,
-+ bo->mem.flags);
-+ if (ret)
-+ DRM_ERROR("Can not flush read caches\n");
-+ }
-+
-+ DRM_FLAG_MASKED(bo->priv_flags,
-+ (evict) ? _DRM_BO_FLAG_EVICTED : 0,
-+ _DRM_BO_FLAG_EVICTED);
-+
-+ if (bo->mem.mm_node)
-+ bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) +
-+ bm->man[bo->mem.mem_type].gpu_offset;
-+
-+
-+ return 0;
-+
-+out_err:
-+ if (old_is_pci || new_is_pci)
-+ drm_bo_vm_post_move(bo);
-+
-+ new_man = &bm->man[bo->mem.mem_type];
-+ if ((new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && bo->ttm) {
-+ drm_ttm_unbind(bo->ttm);
-+ drm_destroy_ttm(bo->ttm);
-+ bo->ttm = NULL;
-+ }
-+
-+ return ret;
-+}
-+
-+/*
-+ * Call bo->mutex locked.
-+ * Wait until the buffer is idle.
-+ */
-+
-+int drm_bo_wait(struct drm_buffer_object *bo, int lazy, int ignore_signals,
-+ int no_wait)
-+{
-+ int ret;
-+
-+ DRM_ASSERT_LOCKED(&bo->mutex);
-+
-+ if (bo->fence) {
-+ if (drm_fence_object_signaled(bo->fence, bo->fence_type)) {
-+ drm_fence_usage_deref_unlocked(&bo->fence);
-+ return 0;
-+ }
-+ if (no_wait)
-+ return -EBUSY;
-+
-+ ret = drm_fence_object_wait(bo->fence, lazy, ignore_signals,
-+ bo->fence_type);
-+ if (ret)
-+ return ret;
-+
-+ drm_fence_usage_deref_unlocked(&bo->fence);
-+ }
-+ return 0;
-+}
-+EXPORT_SYMBOL(drm_bo_wait);
-+
-+static int drm_bo_expire_fence(struct drm_buffer_object *bo, int allow_errors)
-+{
-+ struct drm_device *dev = bo->dev;
-+ struct drm_buffer_manager *bm = &dev->bm;
-+
-+ if (bo->fence) {
-+ if (bm->nice_mode) {
-+ unsigned long _end = jiffies + 3 * DRM_HZ;
-+ int ret;
-+ do {
-+ ret = drm_bo_wait(bo, 0, 1, 0);
-+ if (ret && allow_errors)
-+ return ret;
-+
-+ } while (ret && !time_after_eq(jiffies, _end));
-+
-+ if (bo->fence) {
-+ bm->nice_mode = 0;
-+ DRM_ERROR("Detected GPU lockup or "
-+ "fence driver was taken down. "
-+ "Evicting buffer.\n");
-+ }
-+ }
-+ if (bo->fence)
-+ drm_fence_usage_deref_unlocked(&bo->fence);
-+ }
-+ return 0;
-+}
-+
-+/*
-+ * Call dev->struct_mutex locked.
-+ * Attempts to remove all private references to a buffer by expiring its
-+ * fence object and removing from lru lists and memory managers.
-+ */
-+
-+static void drm_bo_cleanup_refs(struct drm_buffer_object *bo, int remove_all)
-+{
-+ struct drm_device *dev = bo->dev;
-+ struct drm_buffer_manager *bm = &dev->bm;
-+
-+ DRM_ASSERT_LOCKED(&dev->struct_mutex);
-+
-+ atomic_inc(&bo->usage);
-+ mutex_unlock(&dev->struct_mutex);
-+ mutex_lock(&bo->mutex);
-+
-+ DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
-+
-+ if (bo->fence && drm_fence_object_signaled(bo->fence,
-+ bo->fence_type))
-+ drm_fence_usage_deref_unlocked(&bo->fence);
-+
-+ if (bo->fence && remove_all)
-+ (void)drm_bo_expire_fence(bo, 0);
-+
-+ mutex_lock(&dev->struct_mutex);
-+
-+ if (!atomic_dec_and_test(&bo->usage))
-+ goto out;
-+
-+ if (!bo->fence) {
-+ list_del_init(&bo->lru);
-+ if (bo->mem.mm_node) {
-+ drm_mm_put_block(bo->mem.mm_node);
-+ if (bo->pinned_node == bo->mem.mm_node)
-+ bo->pinned_node = NULL;
-+ bo->mem.mm_node = NULL;
-+ }
-+ list_del_init(&bo->pinned_lru);
-+ if (bo->pinned_node) {
-+ drm_mm_put_block(bo->pinned_node);
-+ bo->pinned_node = NULL;
-+ }
-+ list_del_init(&bo->ddestroy);
-+ mutex_unlock(&bo->mutex);
-+ drm_bo_destroy_locked(bo);
-+ return;
-+ }
-+
-+ if (list_empty(&bo->ddestroy)) {
-+ drm_fence_object_flush(bo->fence, bo->fence_type);
-+ list_add_tail(&bo->ddestroy, &bm->ddestroy);
-+ schedule_delayed_work(&bm->wq,
-+ ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
-+ }
-+
-+out:
-+ mutex_unlock(&bo->mutex);
-+ return;
-+}
-+
-+static void drm_bo_unreserve_size(unsigned long size)
-+{
-+ //drm_free_memctl(size);
-+}
-+
-+/*
-+ * Verify that refcount is 0 and that there are no internal references
-+ * to the buffer object. Then destroy it.
-+ */
-+
-+static void drm_bo_destroy_locked(struct drm_buffer_object *bo)
-+{
-+ struct drm_device *dev = bo->dev;
-+ struct drm_buffer_manager *bm = &dev->bm;
-+ unsigned long reserved_size;
-+
-+ DRM_ASSERT_LOCKED(&dev->struct_mutex);
-+
-+ if (list_empty(&bo->lru) && bo->mem.mm_node == NULL &&
-+ list_empty(&bo->pinned_lru) && bo->pinned_node == NULL &&
-+ list_empty(&bo->ddestroy) && atomic_read(&bo->usage) == 0) {
-+ if (bo->fence != NULL) {
-+ DRM_ERROR("Fence was non-zero.\n");
-+ drm_bo_cleanup_refs(bo, 0);
-+ return;
-+ }
-+
-+#ifdef DRM_ODD_MM_COMPAT
-+ BUG_ON(!list_empty(&bo->vma_list));
-+ BUG_ON(!list_empty(&bo->p_mm_list));
-+#endif
-+
-+ if (bo->ttm) {
-+ drm_ttm_unbind(bo->ttm);
-+ drm_destroy_ttm(bo->ttm);
-+ bo->ttm = NULL;
-+ }
-+
-+ atomic_dec(&bm->count);
-+
-+ reserved_size = bo->reserved_size;
-+
-+ drm_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ);
-+ drm_bo_unreserve_size(reserved_size);
-+
-+ return;
-+ }
-+
-+ /*
-+ * Some stuff is still trying to reference the buffer object.
-+ * Get rid of those references.
-+ */
-+
-+ drm_bo_cleanup_refs(bo, 0);
-+
-+ return;
-+}
-+
-+/*
-+ * Call dev->struct_mutex locked.
-+ */
-+
-+static void drm_bo_delayed_delete(struct drm_device *dev, int remove_all)
-+{
-+ struct drm_buffer_manager *bm = &dev->bm;
-+
-+ struct drm_buffer_object *entry, *nentry;
-+ struct list_head *list, *next;
-+
-+ list_for_each_safe(list, next, &bm->ddestroy) {
-+ entry = list_entry(list, struct drm_buffer_object, ddestroy);
-+
-+ nentry = NULL;
-+ if (next != &bm->ddestroy) {
-+ nentry = list_entry(next, struct drm_buffer_object,
-+ ddestroy);
-+ atomic_inc(&nentry->usage);
-+ }
-+
-+ drm_bo_cleanup_refs(entry, remove_all);
-+
-+ if (nentry)
-+ atomic_dec(&nentry->usage);
-+ }
-+}
-+
-+static void drm_bo_delayed_workqueue(struct work_struct *work)
-+{
-+ struct drm_buffer_manager *bm =
-+ container_of(work, struct drm_buffer_manager, wq.work);
-+ struct drm_device *dev = container_of(bm, struct drm_device, bm);
-+
-+ DRM_DEBUG("Delayed delete Worker\n");
-+
-+ mutex_lock(&dev->struct_mutex);
-+ if (!bm->initialized) {
-+ mutex_unlock(&dev->struct_mutex);
-+ return;
-+ }
-+ drm_bo_delayed_delete(dev, 0);
-+ if (bm->initialized && !list_empty(&bm->ddestroy)) {
-+ schedule_delayed_work(&bm->wq,
-+ ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
-+ }
-+ mutex_unlock(&dev->struct_mutex);
-+}
-+
-+void drm_bo_usage_deref_locked(struct drm_buffer_object **bo)
-+{
-+ struct drm_buffer_object *tmp_bo = *bo;
-+ bo = NULL;
-+
-+ DRM_ASSERT_LOCKED(&tmp_bo->dev->struct_mutex);
-+
-+ if (atomic_dec_and_test(&tmp_bo->usage))
-+ drm_bo_destroy_locked(tmp_bo);
-+}
-+EXPORT_SYMBOL(drm_bo_usage_deref_locked);
-+
-+static void drm_bo_base_deref_locked(struct drm_file *file_priv,
-+ struct drm_user_object *uo)
-+{
-+ struct drm_buffer_object *bo =
-+ drm_user_object_entry(uo, struct drm_buffer_object, base);
-+
-+ DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
-+
-+ drm_bo_takedown_vm_locked(bo);
-+ drm_bo_usage_deref_locked(&bo);
-+}
-+
-+void drm_bo_usage_deref_unlocked(struct drm_buffer_object **bo)
-+{
-+ struct drm_buffer_object *tmp_bo = *bo;
-+ struct drm_device *dev = tmp_bo->dev;
-+
-+ *bo = NULL;
-+ if (atomic_dec_and_test(&tmp_bo->usage)) {
-+ mutex_lock(&dev->struct_mutex);
-+ if (atomic_read(&tmp_bo->usage) == 0)
-+ drm_bo_destroy_locked(tmp_bo);
-+ mutex_unlock(&dev->struct_mutex);
-+ }
-+}
-+EXPORT_SYMBOL(drm_bo_usage_deref_unlocked);
-+
-+void drm_putback_buffer_objects(struct drm_device *dev)
-+{
-+ struct drm_buffer_manager *bm = &dev->bm;
-+ struct list_head *list = &bm->unfenced;
-+ struct drm_buffer_object *entry, *next;
-+
-+ mutex_lock(&dev->struct_mutex);
-+ list_for_each_entry_safe(entry, next, list, lru) {
-+ atomic_inc(&entry->usage);
-+ mutex_unlock(&dev->struct_mutex);
-+
-+ mutex_lock(&entry->mutex);
-+ BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
-+ mutex_lock(&dev->struct_mutex);
-+
-+ list_del_init(&entry->lru);
-+ DRM_FLAG_MASKED(entry->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
-+ wake_up_all(&entry->event_queue);
-+
-+ /*
-+ * FIXME: Might want to put back on head of list
-+ * instead of tail here.
-+ */
-+
-+ drm_bo_add_to_lru(entry);
-+ mutex_unlock(&entry->mutex);
-+ drm_bo_usage_deref_locked(&entry);
-+ }
-+ mutex_unlock(&dev->struct_mutex);
-+}
-+EXPORT_SYMBOL(drm_putback_buffer_objects);
-+
-+
-+/*
-+ * Note. The caller has to register (if applicable)
-+ * and deregister fence object usage.
-+ */
-+
-+int drm_fence_buffer_objects(struct drm_device *dev,
-+ struct list_head *list,
-+ uint32_t fence_flags,
-+ struct drm_fence_object *fence,
-+ struct drm_fence_object **used_fence)
-+{
-+ struct drm_buffer_manager *bm = &dev->bm;
-+ struct drm_buffer_object *entry;
-+ uint32_t fence_type = 0;
-+ uint32_t fence_class = ~0;
-+ int count = 0;
-+ int ret = 0;
-+ struct list_head *l;
-+
-+ mutex_lock(&dev->struct_mutex);
-+
-+ if (!list)
-+ list = &bm->unfenced;
-+
-+ if (fence)
-+ fence_class = fence->fence_class;
-+
-+ list_for_each_entry(entry, list, lru) {
-+ BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
-+ fence_type |= entry->new_fence_type;
-+ if (fence_class == ~0)
-+ fence_class = entry->new_fence_class;
-+ else if (entry->new_fence_class != fence_class) {
-+ DRM_ERROR("Unmatching fence classes on unfenced list: "
-+ "%d and %d.\n",
-+ fence_class,
-+ entry->new_fence_class);
-+ ret = -EINVAL;
-+ goto out;
-+ }
-+ count++;
-+ }
-+
-+ if (!count) {
-+ ret = -EINVAL;
-+ goto out;
-+ }
-+
-+ if (fence) {
-+ if ((fence_type & fence->type) != fence_type ||
-+ (fence->fence_class != fence_class)) {
-+ DRM_ERROR("Given fence doesn't match buffers "
-+ "on unfenced list.\n");
-+ ret = -EINVAL;
-+ goto out;
-+ }
-+ } else {
-+ mutex_unlock(&dev->struct_mutex);
-+ ret = drm_fence_object_create(dev, fence_class, fence_type,
-+ fence_flags | DRM_FENCE_FLAG_EMIT,
-+ &fence);
-+ mutex_lock(&dev->struct_mutex);
-+ if (ret)
-+ goto out;
-+ }
-+
-+ count = 0;
-+ l = list->next;
-+ while (l != list) {
-+ prefetch(l->next);
-+ entry = list_entry(l, struct drm_buffer_object, lru);
-+ atomic_inc(&entry->usage);
-+ mutex_unlock(&dev->struct_mutex);
-+ mutex_lock(&entry->mutex);
-+ mutex_lock(&dev->struct_mutex);
-+ list_del_init(l);
-+ if (entry->priv_flags & _DRM_BO_FLAG_UNFENCED) {
-+ count++;
-+ if (entry->fence)
-+ drm_fence_usage_deref_locked(&entry->fence);
-+ entry->fence = drm_fence_reference_locked(fence);
-+ entry->fence_class = entry->new_fence_class;
-+ entry->fence_type = entry->new_fence_type;
-+ DRM_FLAG_MASKED(entry->priv_flags, 0,
-+ _DRM_BO_FLAG_UNFENCED);
-+ wake_up_all(&entry->event_queue);
-+ drm_bo_add_to_lru(entry);
-+ }
-+ mutex_unlock(&entry->mutex);
-+ drm_bo_usage_deref_locked(&entry);
-+ l = list->next;
-+ }
-+ DRM_DEBUG("Fenced %d buffers\n", count);
-+out:
-+ mutex_unlock(&dev->struct_mutex);
-+ *used_fence = fence;
-+ return ret;
-+}
-+EXPORT_SYMBOL(drm_fence_buffer_objects);
-+
-+/*
-+ * bo->mutex locked
-+ */
-+
-+static int drm_bo_evict(struct drm_buffer_object *bo, unsigned mem_type,
-+ int no_wait)
-+{
-+ int ret = 0;
-+ struct drm_device *dev = bo->dev;
-+ struct drm_bo_mem_reg evict_mem;
-+
-+ /*
-+ * Someone might have modified the buffer before we took the
-+ * buffer mutex.
-+ */
-+
-+ if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED)
-+ goto out;
-+ if (bo->mem.mem_type != mem_type)
-+ goto out;
-+
-+ ret = drm_bo_wait(bo, 0, 0, no_wait);
-+
-+ if (ret && ret != -EAGAIN) {
-+ DRM_ERROR("Failed to expire fence before "
-+ "buffer eviction.\n");
-+ goto out;
-+ }
-+
-+ evict_mem = bo->mem;
-+ evict_mem.mm_node = NULL;
-+
-+ evict_mem = bo->mem;
-+ evict_mem.mask = dev->driver->bo_driver->evict_mask(bo);
-+ ret = drm_bo_mem_space(bo, &evict_mem, no_wait);
-+
-+ if (ret) {
-+ if (ret != -EAGAIN)
-+ DRM_ERROR("Failed to find memory space for "
-+ "buffer 0x%p eviction.\n", bo);
-+ goto out;
-+ }
-+
-+ ret = drm_bo_handle_move_mem(bo, &evict_mem, 1, no_wait);
-+
-+ if (ret) {
-+ if (ret != -EAGAIN)
-+ DRM_ERROR("Buffer eviction failed\n");
-+ goto out;
-+ }
-+
-+ mutex_lock(&dev->struct_mutex);
-+ if (evict_mem.mm_node) {
-+ if (evict_mem.mm_node != bo->pinned_node)
-+ drm_mm_put_block(evict_mem.mm_node);
-+ evict_mem.mm_node = NULL;
-+ }
-+ list_del(&bo->lru);
-+ drm_bo_add_to_lru(bo);
-+ mutex_unlock(&dev->struct_mutex);
-+
-+ DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_EVICTED,
-+ _DRM_BO_FLAG_EVICTED);
-+
-+out:
-+ return ret;
-+}
-+
-+/**
-+ * Repeatedly evict memory from the LRU for @mem_type until we create enough
-+ * space, or we've evicted everything and there isn't enough space.
-+ */
-+static int drm_bo_mem_force_space(struct drm_device *dev,
-+ struct drm_bo_mem_reg *mem,
-+ uint32_t mem_type, int no_wait)
-+{
-+ struct drm_mm_node *node;
-+ struct drm_buffer_manager *bm = &dev->bm;
-+ struct drm_buffer_object *entry;
-+ struct drm_mem_type_manager *man = &bm->man[mem_type];
-+ struct list_head *lru;
-+ unsigned long num_pages = mem->num_pages;
-+ int ret;
-+
-+ mutex_lock(&dev->struct_mutex);
-+ do {
-+ node = drm_mm_search_free(&man->manager, num_pages,
-+ mem->page_alignment, 1);
-+ if (node)
-+ break;
-+
-+ lru = &man->lru;
-+ if (lru->next == lru)
-+ break;
-+
-+ entry = list_entry(lru->next, struct drm_buffer_object, lru);
-+ atomic_inc(&entry->usage);
-+ mutex_unlock(&dev->struct_mutex);
-+ mutex_lock(&entry->mutex);
-+ BUG_ON(entry->mem.flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT));
-+
-+ ret = drm_bo_evict(entry, mem_type, no_wait);
-+ mutex_unlock(&entry->mutex);
-+ drm_bo_usage_deref_unlocked(&entry);
-+ if (ret)
-+ return ret;
-+ mutex_lock(&dev->struct_mutex);
-+ } while (1);
-+
-+ if (!node) {
-+ mutex_unlock(&dev->struct_mutex);
-+ return -ENOMEM;
-+ }
-+
-+ node = drm_mm_get_block(node, num_pages, mem->page_alignment);
-+ if (!node) {
-+ mutex_unlock(&dev->struct_mutex);
-+ return -ENOMEM;
-+ }
-+
-+ mutex_unlock(&dev->struct_mutex);
-+ mem->mm_node = node;
-+ mem->mem_type = mem_type;
-+ return 0;
-+}
-+
-+static int drm_bo_mt_compatible(struct drm_mem_type_manager *man,
-+ int disallow_fixed,
-+ uint32_t mem_type,
-+ uint64_t mask, uint32_t *res_mask)
-+{
-+ uint64_t cur_flags = drm_bo_type_flags(mem_type);
-+ uint64_t flag_diff;
-+
-+ if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && disallow_fixed)
-+ return 0;
-+ if (man->flags & _DRM_FLAG_MEMTYPE_CACHED)
-+ cur_flags |= DRM_BO_FLAG_CACHED;
-+ if (man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE)
-+ cur_flags |= DRM_BO_FLAG_MAPPABLE;
-+ if (man->flags & _DRM_FLAG_MEMTYPE_CSELECT)
-+ DRM_FLAG_MASKED(cur_flags, mask, DRM_BO_FLAG_CACHED);
-+
-+ if ((cur_flags & mask & DRM_BO_MASK_MEM) == 0)
-+ return 0;
-+
-+ if (mem_type == DRM_BO_MEM_LOCAL) {
-+ *res_mask = cur_flags;
-+ return 1;
-+ }
-+
-+ flag_diff = (mask ^ cur_flags);
-+ if (flag_diff & DRM_BO_FLAG_CACHED_MAPPED)
-+ cur_flags |= DRM_BO_FLAG_CACHED_MAPPED;
-+
-+ if ((flag_diff & DRM_BO_FLAG_CACHED) &&
-+ (!(mask & DRM_BO_FLAG_CACHED) ||
-+ (mask & DRM_BO_FLAG_FORCE_CACHING)))
-+ return 0;
-+
-+ if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
-+ ((mask & DRM_BO_FLAG_MAPPABLE) ||
-+ (mask & DRM_BO_FLAG_FORCE_MAPPABLE)))
-+ return 0;
-+
-+ *res_mask = cur_flags;
-+ return 1;
-+}
-+
-+/**
-+ * Creates space for memory region @mem according to its type.
-+ *
-+ * This function first searches for free space in compatible memory types in
-+ * the priority order defined by the driver. If free space isn't found, then
-+ * drm_bo_mem_force_space is attempted in priority order to evict and find
-+ * space.
-+ */
-+int drm_bo_mem_space(struct drm_buffer_object *bo,
-+ struct drm_bo_mem_reg *mem, int no_wait)
-+{
-+ struct drm_device *dev = bo->dev;
-+ struct drm_buffer_manager *bm = &dev->bm;
-+ struct drm_mem_type_manager *man;
-+
-+ uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio;
-+ const uint32_t *prios = dev->driver->bo_driver->mem_type_prio;
-+ uint32_t i;
-+ uint32_t mem_type = DRM_BO_MEM_LOCAL;
-+ uint32_t cur_flags;
-+ int type_found = 0;
-+ int type_ok = 0;
-+ int has_eagain = 0;
-+ struct drm_mm_node *node = NULL;
-+ int ret;
-+
-+ mem->mm_node = NULL;
-+ for (i = 0; i < num_prios; ++i) {
-+ mem_type = prios[i];
-+ man = &bm->man[mem_type];
-+
-+ type_ok = drm_bo_mt_compatible(man,
-+ bo->type == drm_bo_type_user,
-+ mem_type, mem->mask,
-+ &cur_flags);
-+
-+ if (!type_ok)
-+ continue;
-+
-+ if (mem_type == DRM_BO_MEM_LOCAL)
-+ break;
-+
-+ if ((mem_type == bo->pinned_mem_type) &&
-+ (bo->pinned_node != NULL)) {
-+ node = bo->pinned_node;
-+ break;
-+ }
-+
-+ mutex_lock(&dev->struct_mutex);
-+ if (man->has_type && man->use_type) {
-+ type_found = 1;
-+ node = drm_mm_search_free(&man->manager, mem->num_pages,
-+ mem->page_alignment, 1);
-+ if (node)
-+ node = drm_mm_get_block(node, mem->num_pages,
-+ mem->page_alignment);
-+ }
-+ mutex_unlock(&dev->struct_mutex);
-+ if (node)
-+ break;
-+ }
-+
-+ if ((type_ok && (mem_type == DRM_BO_MEM_LOCAL)) || node) {
-+ mem->mm_node = node;
-+ mem->mem_type = mem_type;
-+ mem->flags = cur_flags;
-+ return 0;
-+ }
-+
-+ if (!type_found)
-+ return -EINVAL;
-+
-+ num_prios = dev->driver->bo_driver->num_mem_busy_prio;
-+ prios = dev->driver->bo_driver->mem_busy_prio;
-+
-+ for (i = 0; i < num_prios; ++i) {
-+ mem_type = prios[i];
-+ man = &bm->man[mem_type];
-+
-+ if (!man->has_type)
-+ continue;
-+
-+ if (!drm_bo_mt_compatible(man,
-+ bo->type == drm_bo_type_user,
-+ mem_type,
-+ mem->mask,
-+ &cur_flags))
-+ continue;
-+
-+ ret = drm_bo_mem_force_space(dev, mem, mem_type, no_wait);
-+
-+ if (ret == 0 && mem->mm_node) {
-+ mem->flags = cur_flags;
-+ return 0;
-+ }
-+
-+ if (ret == -EAGAIN)
-+ has_eagain = 1;
-+ }
-+
-+ ret = (has_eagain) ? -EAGAIN : -ENOMEM;
-+ return ret;
-+}
-+EXPORT_SYMBOL(drm_bo_mem_space);
-+
-+static int drm_bo_new_mask(struct drm_buffer_object *bo,
-+ uint64_t new_flags, uint64_t used_mask)
-+{
-+ uint32_t new_props;
-+
-+ if (bo->type == drm_bo_type_user &&
-+ ((new_flags & (DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING)) !=
-+ (DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING))) {
-+ DRM_ERROR("User buffers require cache-coherent memory.\n");
-+ return -EINVAL;
-+ }
-+
-+ if ((used_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) {
-+ DRM_ERROR("DRM_BO_FLAG_NO_EVICT is only available to priviliged processes.\n");
-+ return -EPERM;
-+ }
-+
-+ if (likely(used_mask & DRM_BO_MASK_MEM) &&
-+ (bo->mem.flags & DRM_BO_FLAG_NO_EVICT) &&
-+ !DRM_SUSER(DRM_CURPROC)) {
-+ if (likely(bo->mem.flags & new_flags & used_mask &
-+ DRM_BO_MASK_MEM))
-+ new_flags = (new_flags & ~DRM_BO_MASK_MEM) |
-+ (bo->mem.flags & DRM_BO_MASK_MEM);
-+ else {
-+ DRM_ERROR("Incompatible memory type specification "
-+ "for NO_EVICT buffer.\n");
-+ return -EPERM;
-+ }
-+ }
-+
-+ if ((new_flags & DRM_BO_FLAG_NO_MOVE)) {
-+ DRM_ERROR("DRM_BO_FLAG_NO_MOVE is not properly implemented yet.\n");
-+ return -EPERM;
-+ }
-+
-+ new_props = new_flags & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE |
-+ DRM_BO_FLAG_READ);
-+
-+ if (!new_props) {
-+ DRM_ERROR("Invalid buffer object rwx properties\n");
-+ return -EINVAL;
-+ }
-+
-+ bo->mem.mask = new_flags;
-+ return 0;
-+}
-+
-+/*
-+ * Call dev->struct_mutex locked.
-+ */
-+
-+struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file *file_priv,
-+ uint32_t handle, int check_owner)
-+{
-+ struct drm_user_object *uo;
-+ struct drm_buffer_object *bo;
-+
-+ uo = drm_lookup_user_object(file_priv, handle);
-+
-+ if (!uo || (uo->type != drm_buffer_type)) {
-+ DRM_ERROR("Could not find buffer object 0x%08x\n", handle);
-+ return NULL;
-+ }
-+
-+ if (check_owner && file_priv != uo->owner) {
-+ if (!drm_lookup_ref_object(file_priv, uo, _DRM_REF_USE))
-+ return NULL;
-+ }
-+
-+ bo = drm_user_object_entry(uo, struct drm_buffer_object, base);
-+ atomic_inc(&bo->usage);
-+ return bo;
-+}
-+EXPORT_SYMBOL(drm_lookup_buffer_object);
-+
-+/*
-+ * Call bo->mutex locked.
-+ * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
-+ * Doesn't do any fence flushing as opposed to the drm_bo_busy function.
-+ */
-+
-+static int drm_bo_quick_busy(struct drm_buffer_object *bo)
-+{
-+ struct drm_fence_object *fence = bo->fence;
-+
-+ BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
-+ if (fence) {
-+ if (drm_fence_object_signaled(fence, bo->fence_type)) {
-+ drm_fence_usage_deref_unlocked(&bo->fence);
-+ return 0;
-+ }
-+ return 1;
-+ }
-+ return 0;
-+}
-+
-+/*
-+ * Call bo->mutex locked.
-+ * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
-+ */
-+
-+static int drm_bo_busy(struct drm_buffer_object *bo)
-+{
-+ struct drm_fence_object *fence = bo->fence;
-+
-+ BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
-+ if (fence) {
-+ if (drm_fence_object_signaled(fence, bo->fence_type)) {
-+ drm_fence_usage_deref_unlocked(&bo->fence);
-+ return 0;
-+ }
-+ drm_fence_object_flush(fence, DRM_FENCE_TYPE_EXE);
-+ if (drm_fence_object_signaled(fence, bo->fence_type)) {
-+ drm_fence_usage_deref_unlocked(&bo->fence);
-+ return 0;
-+ }
-+ return 1;
-+ }
-+ return 0;
-+}
-+
-+static int drm_bo_evict_cached(struct drm_buffer_object *bo)
-+{
-+ int ret = 0;
-+
-+ BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
-+ if (bo->mem.mm_node)
-+ ret = drm_bo_evict(bo, DRM_BO_MEM_TT, 1);
-+ return ret;
-+}
-+
-+/*
-+ * Wait until a buffer is unmapped.
-+ */
-+
-+static int drm_bo_wait_unmapped(struct drm_buffer_object *bo, int no_wait)
-+{
-+ int ret = 0;
-+
-+ if ((atomic_read(&bo->mapped) >= 0) && no_wait)
-+ return -EBUSY;
-+
-+ DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
-+ atomic_read(&bo->mapped) == -1);
-+
-+ if (ret == -EINTR)
-+ ret = -EAGAIN;
-+
-+ return ret;
-+}
-+
-+static int drm_bo_check_unfenced(struct drm_buffer_object *bo)
-+{
-+ int ret;
-+
-+ mutex_lock(&bo->mutex);
-+ ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
-+ mutex_unlock(&bo->mutex);
-+ return ret;
-+}
-+
-+/*
-+ * Wait until a buffer, scheduled to be fenced moves off the unfenced list.
-+ * Until then, we cannot really do anything with it except delete it.
-+ */
-+
-+static int drm_bo_wait_unfenced(struct drm_buffer_object *bo, int no_wait,
-+ int eagain_if_wait)
-+{
-+ int ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
-+
-+ if (ret && no_wait)
-+ return -EBUSY;
-+ else if (!ret)
-+ return 0;
-+
-+ ret = 0;
-+ mutex_unlock(&bo->mutex);
-+ DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
-+ !drm_bo_check_unfenced(bo));
-+ mutex_lock(&bo->mutex);
-+ if (ret == -EINTR)
-+ return -EAGAIN;
-+ ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
-+ if (ret) {
-+ DRM_ERROR("Timeout waiting for buffer to become fenced\n");
-+ return -EBUSY;
-+ }
-+ if (eagain_if_wait)
-+ return -EAGAIN;
-+
-+ return 0;
-+}
-+
-+/*
-+ * Fill in the ioctl reply argument with buffer info.
-+ * Bo locked.
-+ */
-+
-+void drm_bo_fill_rep_arg(struct drm_buffer_object *bo,
-+ struct drm_bo_info_rep *rep)
-+{
-+ if (!rep)
-+ return;
-+
-+ rep->handle = bo->base.hash.key;
-+ rep->flags = bo->mem.flags;
-+ rep->size = bo->num_pages * PAGE_SIZE;
-+ rep->offset = bo->offset;
-+
-+ if (bo->type == drm_bo_type_dc)
-+ rep->arg_handle = bo->map_list.user_token;
-+ else
-+ rep->arg_handle = 0;
-+
-+ rep->mask = bo->mem.mask;
-+ rep->buffer_start = bo->buffer_start;
-+ rep->fence_flags = bo->fence_type;
-+ rep->rep_flags = 0;
-+ rep->page_alignment = bo->mem.page_alignment;
-+
-+ if ((bo->priv_flags & _DRM_BO_FLAG_UNFENCED) || drm_bo_quick_busy(bo)) {
-+ DRM_FLAG_MASKED(rep->rep_flags, DRM_BO_REP_BUSY,
-+ DRM_BO_REP_BUSY);
-+ }
-+}
-+EXPORT_SYMBOL(drm_bo_fill_rep_arg);
-+
-+/*
-+ * Wait for buffer idle and register that we've mapped the buffer.
-+ * Mapping is registered as a drm_ref_object with type _DRM_REF_TYPE1,
-+ * so that if the client dies, the mapping is automatically
-+ * unregistered.
-+ */
-+
-+static int drm_buffer_object_map(struct drm_file *file_priv, uint32_t handle,
-+ uint32_t map_flags, unsigned hint,
-+ struct drm_bo_info_rep *rep)
-+{
-+ struct drm_buffer_object *bo;
-+ struct drm_device *dev = file_priv->minor->dev;
-+ int ret = 0;
-+ int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
-+
-+ mutex_lock(&dev->struct_mutex);
-+ bo = drm_lookup_buffer_object(file_priv, handle, 1);
-+ mutex_unlock(&dev->struct_mutex);
-+
-+ if (!bo)
-+ return -EINVAL;
-+
-+ mutex_lock(&bo->mutex);
-+ ret = drm_bo_wait_unfenced(bo, no_wait, 0);
-+ if (ret)
-+ goto out;
-+
-+ /*
-+ * If this returns true, we are currently unmapped.
-+ * We need to do this test, because unmapping can
-+ * be done without the bo->mutex held.
-+ */
-+
-+ while (1) {
-+ if (atomic_inc_and_test(&bo->mapped)) {
-+ if (no_wait && drm_bo_busy(bo)) {
-+ atomic_dec(&bo->mapped);
-+ ret = -EBUSY;
-+ goto out;
-+ }
-+ ret = drm_bo_wait(bo, 0, 0, no_wait);
-+ if (ret) {
-+ atomic_dec(&bo->mapped);
-+ goto out;
-+ }
-+
-+ if (bo->mem.flags & DRM_BO_FLAG_CACHED_MAPPED)
-+ drm_bo_evict_cached(bo);
-+
-+ break;
-+ } else if (bo->mem.flags & DRM_BO_FLAG_CACHED_MAPPED) {
-+
-+ /*
-+ * We are already mapped with different flags.
-+ * need to wait for unmap.
-+ */
-+
-+ ret = drm_bo_wait_unmapped(bo, no_wait);
-+ if (ret)
-+ goto out;
-+
-+ continue;
-+ }
-+ break;
-+ }
-+
-+ mutex_lock(&dev->struct_mutex);
-+ ret = drm_add_ref_object(file_priv, &bo->base, _DRM_REF_TYPE1);
-+ mutex_unlock(&dev->struct_mutex);
-+ if (ret) {
-+ if (atomic_add_negative(-1, &bo->mapped))
-+ wake_up_all(&bo->event_queue);
-+
-+ } else
-+ drm_bo_fill_rep_arg(bo, rep);
-+out:
-+ mutex_unlock(&bo->mutex);
-+ drm_bo_usage_deref_unlocked(&bo);
-+ return ret;
-+}
-+
-+static int drm_buffer_object_unmap(struct drm_file *file_priv, uint32_t handle)
-+{
-+ struct drm_device *dev = file_priv->minor->dev;
-+ struct drm_buffer_object *bo;
-+ struct drm_ref_object *ro;
-+ int ret = 0;
-+
-+ mutex_lock(&dev->struct_mutex);
-+
-+ bo = drm_lookup_buffer_object(file_priv, handle, 1);
-+ if (!bo) {
-+ ret = -EINVAL;
-+ goto out;
-+ }
-+
-+ ro = drm_lookup_ref_object(file_priv, &bo->base, _DRM_REF_TYPE1);
-+ if (!ro) {
-+ ret = -EINVAL;
-+ goto out;
-+ }
-+
-+ drm_remove_ref_object(file_priv, ro);
-+ drm_bo_usage_deref_locked(&bo);
-+out:
-+ mutex_unlock(&dev->struct_mutex);
-+ return ret;
-+}
-+
-+/*
-+ * Call struct-sem locked.
-+ */
-+
-+static void drm_buffer_user_object_unmap(struct drm_file *file_priv,
-+ struct drm_user_object *uo,
-+ enum drm_ref_type action)
-+{
-+ struct drm_buffer_object *bo =
-+ drm_user_object_entry(uo, struct drm_buffer_object, base);
-+
-+ /*
-+ * We DON'T want to take the bo->lock here, because we want to
-+ * hold it when we wait for unmapped buffer.
-+ */
-+
-+ BUG_ON(action != _DRM_REF_TYPE1);
-+
-+ if (atomic_add_negative(-1, &bo->mapped))
-+ wake_up_all(&bo->event_queue);
-+}
-+
-+/*
-+ * bo->mutex locked.
-+ * Note that new_mem_flags are NOT transferred to the bo->mem.mask.
-+ */
-+
-+int drm_bo_move_buffer(struct drm_buffer_object *bo, uint64_t new_mem_flags,
-+ int no_wait, int move_unfenced)
-+{
-+ struct drm_device *dev = bo->dev;
-+ struct drm_buffer_manager *bm = &dev->bm;
-+ int ret = 0;
-+ struct drm_bo_mem_reg mem;
-+ /*
-+ * Flush outstanding fences.
-+ */
-+
-+ drm_bo_busy(bo);
-+
-+ /*
-+ * Wait for outstanding fences.
-+ */
-+
-+ ret = drm_bo_wait(bo, 0, 0, no_wait);
-+ if (ret)
-+ return ret;
-+
-+ mem.num_pages = bo->num_pages;
-+ mem.size = mem.num_pages << PAGE_SHIFT;
-+ mem.mask = new_mem_flags;
-+ mem.page_alignment = bo->mem.page_alignment;
-+
-+ mutex_lock(&bm->evict_mutex);
-+ mutex_lock(&dev->struct_mutex);
-+ list_del_init(&bo->lru);
-+ mutex_unlock(&dev->struct_mutex);
-+
-+ /*
-+ * Determine where to move the buffer.
-+ */
-+ ret = drm_bo_mem_space(bo, &mem, no_wait);
-+ if (ret)
-+ goto out_unlock;
-+
-+ ret = drm_bo_handle_move_mem(bo, &mem, 0, no_wait);
-+
-+out_unlock:
-+ mutex_lock(&dev->struct_mutex);
-+ if (ret || !move_unfenced) {
-+ if (mem.mm_node) {
-+ if (mem.mm_node != bo->pinned_node)
-+ drm_mm_put_block(mem.mm_node);
-+ mem.mm_node = NULL;
-+ }
-+ drm_bo_add_to_lru(bo);
-+ if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) {
-+ wake_up_all(&bo->event_queue);
-+ DRM_FLAG_MASKED(bo->priv_flags, 0,
-+ _DRM_BO_FLAG_UNFENCED);
-+ }
-+ } else {
-+ list_add_tail(&bo->lru, &bm->unfenced);
-+ DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
-+ _DRM_BO_FLAG_UNFENCED);
-+ }
-+ mutex_unlock(&dev->struct_mutex);
-+ mutex_unlock(&bm->evict_mutex);
-+ return ret;
-+}
-+
-+static int drm_bo_mem_compat(struct drm_bo_mem_reg *mem)
-+{
-+ uint32_t flag_diff = (mem->mask ^ mem->flags);
-+
-+ if ((mem->mask & mem->flags & DRM_BO_MASK_MEM) == 0)
-+ return 0;
-+ if ((flag_diff & DRM_BO_FLAG_CACHED) &&
-+ (/* !(mem->mask & DRM_BO_FLAG_CACHED) ||*/
-+ (mem->mask & DRM_BO_FLAG_FORCE_CACHING)))
-+ return 0;
-+
-+ if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
-+ ((mem->mask & DRM_BO_FLAG_MAPPABLE) ||
-+ (mem->mask & DRM_BO_FLAG_FORCE_MAPPABLE)))
-+ return 0;
-+ return 1;
-+}
-+
-+/*
-+ * bo locked.
-+ */
-+
-+static int drm_buffer_object_validate(struct drm_buffer_object *bo,
-+ uint32_t fence_class,
-+ int move_unfenced, int no_wait)
-+{
-+ struct drm_device *dev = bo->dev;
-+ struct drm_buffer_manager *bm = &dev->bm;
-+ struct drm_bo_driver *driver = dev->driver->bo_driver;
-+ uint32_t ftype;
-+ int ret;
-+
-+ DRM_DEBUG("New flags 0x%016llx, Old flags 0x%016llx\n",
-+ (unsigned long long) bo->mem.mask,
-+ (unsigned long long) bo->mem.flags);
-+
-+ ret = driver->fence_type(bo, &fence_class, &ftype);
-+
-+ if (ret) {
-+ DRM_ERROR("Driver did not support given buffer permissions\n");
-+ return ret;
-+ }
-+
-+ /*
-+ * We're switching command submission mechanism,
-+ * or cannot simply rely on the hardware serializing for us.
-+ *
-+ * Insert a driver-dependant barrier or wait for buffer idle.
-+ */
-+
-+ if ((fence_class != bo->fence_class) ||
-+ ((ftype ^ bo->fence_type) & bo->fence_type)) {
-+
-+ ret = -EINVAL;
-+ if (driver->command_stream_barrier) {
-+ ret = driver->command_stream_barrier(bo,
-+ fence_class,
-+ ftype,
-+ no_wait);
-+ }
-+ if (ret)
-+ ret = drm_bo_wait(bo, 0, 0, no_wait);
-+
-+ if (ret)
-+ return ret;
-+
-+ }
-+
-+ bo->new_fence_class = fence_class;
-+ bo->new_fence_type = ftype;
-+
-+ ret = drm_bo_wait_unmapped(bo, no_wait);
-+ if (ret) {
-+ DRM_ERROR("Timed out waiting for buffer unmap.\n");
-+ return ret;
-+ }
-+
-+ /*
-+ * Check whether we need to move buffer.
-+ */
-+
-+ if (!drm_bo_mem_compat(&bo->mem)) {
-+ ret = drm_bo_move_buffer(bo, bo->mem.mask, no_wait,
-+ move_unfenced);
-+ if (ret) {
-+ if (ret != -EAGAIN)
-+ DRM_ERROR("Failed moving buffer.\n");
-+ if (ret == -ENOMEM)
-+ DRM_ERROR("Out of aperture space.\n");
-+ return ret;
-+ }
-+ }
-+
-+ /*
-+ * Pinned buffers.
-+ */
-+
-+ if (bo->mem.mask & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) {
-+ bo->pinned_mem_type = bo->mem.mem_type;
-+ mutex_lock(&dev->struct_mutex);
-+ list_del_init(&bo->pinned_lru);
-+ drm_bo_add_to_pinned_lru(bo);
-+
-+ if (bo->pinned_node != bo->mem.mm_node) {
-+ if (bo->pinned_node != NULL)
-+ drm_mm_put_block(bo->pinned_node);
-+ bo->pinned_node = bo->mem.mm_node;
-+ }
-+
-+ mutex_unlock(&dev->struct_mutex);
-+
-+ } else if (bo->pinned_node != NULL) {
-+
-+ mutex_lock(&dev->struct_mutex);
-+
-+ if (bo->pinned_node != bo->mem.mm_node)
-+ drm_mm_put_block(bo->pinned_node);
-+
-+ list_del_init(&bo->pinned_lru);
-+ bo->pinned_node = NULL;
-+ mutex_unlock(&dev->struct_mutex);
-+
-+ }
-+
-+ /*
-+ * We might need to add a TTM.
-+ */
-+
-+ if (bo->mem.mem_type == DRM_BO_MEM_LOCAL && bo->ttm == NULL) {
-+ ret = drm_bo_add_ttm(bo);
-+ if (ret)
-+ return ret;
-+ }
-+ DRM_FLAG_MASKED(bo->mem.flags, bo->mem.mask, ~DRM_BO_MASK_MEMTYPE);
-+
-+ /*
-+ * Finally, adjust lru to be sure.
-+ */
-+
-+ mutex_lock(&dev->struct_mutex);
-+ list_del(&bo->lru);
-+ if (move_unfenced) {
-+ list_add_tail(&bo->lru, &bm->unfenced);
-+ DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
-+ _DRM_BO_FLAG_UNFENCED);
-+ } else {
-+ drm_bo_add_to_lru(bo);
-+ if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) {
-+ wake_up_all(&bo->event_queue);
-+ DRM_FLAG_MASKED(bo->priv_flags, 0,
-+ _DRM_BO_FLAG_UNFENCED);
-+ }
-+ }
-+ mutex_unlock(&dev->struct_mutex);
-+
-+ return 0;
-+}
-+
-+int drm_bo_do_validate(struct drm_buffer_object *bo,
-+ uint64_t flags, uint64_t mask, uint32_t hint,
-+ uint32_t fence_class,
-+ int no_wait,
-+ struct drm_bo_info_rep *rep)
-+{
-+ int ret;
-+
-+ mutex_lock(&bo->mutex);
-+ ret = drm_bo_wait_unfenced(bo, no_wait, 0);
-+
-+ if (ret)
-+ goto out;
-+
-+ DRM_FLAG_MASKED(flags, bo->mem.mask, ~mask);
-+ ret = drm_bo_new_mask(bo, flags, mask);
-+ if (ret)
-+ goto out;
-+
-+ ret = drm_buffer_object_validate(bo,
-+ fence_class,
-+ !(hint & DRM_BO_HINT_DONT_FENCE),
-+ no_wait);
-+out:
-+ if (rep)
-+ drm_bo_fill_rep_arg(bo, rep);
-+
-+ mutex_unlock(&bo->mutex);
-+ return ret;
-+}
-+EXPORT_SYMBOL(drm_bo_do_validate);
-+
-+
-+int drm_bo_handle_validate(struct drm_file *file_priv, uint32_t handle,
-+ uint32_t fence_class,
-+ uint64_t flags, uint64_t mask,
-+ uint32_t hint,
-+ int use_old_fence_class,
-+ struct drm_bo_info_rep *rep,
-+ struct drm_buffer_object **bo_rep)
-+{
-+ struct drm_device *dev = file_priv->minor->dev;
-+ struct drm_buffer_object *bo;
-+ int ret;
-+ int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
-+
-+ mutex_lock(&dev->struct_mutex);
-+ bo = drm_lookup_buffer_object(file_priv, handle, 1);
-+ mutex_unlock(&dev->struct_mutex);
-+
-+ if (!bo)
-+ return -EINVAL;
-+
-+ if (use_old_fence_class)
-+ fence_class = bo->fence_class;
-+
-+ /*
-+ * Only allow creator to change shared buffer mask.
-+ */
-+
-+ if (bo->base.owner != file_priv)
-+ mask &= ~(DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE);
-+
-+
-+ ret = drm_bo_do_validate(bo, flags, mask, hint, fence_class,
-+ no_wait, rep);
-+
-+ if (!ret && bo_rep)
-+ *bo_rep = bo;
-+ else
-+ drm_bo_usage_deref_unlocked(&bo);
-+
-+ return ret;
-+}
-+EXPORT_SYMBOL(drm_bo_handle_validate);
-+
-+static int drm_bo_handle_info(struct drm_file *file_priv, uint32_t handle,
-+ struct drm_bo_info_rep *rep)
-+{
-+ struct drm_device *dev = file_priv->minor->dev;
-+ struct drm_buffer_object *bo;
-+
-+ mutex_lock(&dev->struct_mutex);
-+ bo = drm_lookup_buffer_object(file_priv, handle, 1);
-+ mutex_unlock(&dev->struct_mutex);
-+
-+ if (!bo)
-+ return -EINVAL;
-+
-+ mutex_lock(&bo->mutex);
-+ if (!(bo->priv_flags & _DRM_BO_FLAG_UNFENCED))
-+ (void)drm_bo_busy(bo);
-+ drm_bo_fill_rep_arg(bo, rep);
-+ mutex_unlock(&bo->mutex);
-+ drm_bo_usage_deref_unlocked(&bo);
-+ return 0;
-+}
-+
-+static int drm_bo_handle_wait(struct drm_file *file_priv, uint32_t handle,
-+ uint32_t hint,
-+ struct drm_bo_info_rep *rep)
-+{
-+ struct drm_device *dev = file_priv->minor->dev;
-+ struct drm_buffer_object *bo;
-+ int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
-+ int ret;
-+
-+ mutex_lock(&dev->struct_mutex);
-+ bo = drm_lookup_buffer_object(file_priv, handle, 1);
-+ mutex_unlock(&dev->struct_mutex);
-+
-+ if (!bo)
-+ return -EINVAL;
-+
-+ mutex_lock(&bo->mutex);
-+ ret = drm_bo_wait_unfenced(bo, no_wait, 0);
-+ if (ret)
-+ goto out;
-+ ret = drm_bo_wait(bo, hint & DRM_BO_HINT_WAIT_LAZY, 0, no_wait);
-+ if (ret)
-+ goto out;
-+
-+ drm_bo_fill_rep_arg(bo, rep);
-+
-+out:
-+ mutex_unlock(&bo->mutex);
-+ drm_bo_usage_deref_unlocked(&bo);
-+ return ret;
-+}
-+
-+static inline size_t drm_size_align(size_t size)
-+{
-+ size_t tmpSize = 4;
-+ if (size > PAGE_SIZE)
-+ return PAGE_ALIGN(size);
-+ while (tmpSize < size)
-+ tmpSize <<= 1;
-+
-+ return (size_t) tmpSize;
-+}
-+
-+static int drm_bo_reserve_size(struct drm_device *dev,
-+ int user_bo,
-+ unsigned long num_pages,
-+ unsigned long *size)
-+{
-+ struct drm_bo_driver *driver = dev->driver->bo_driver;
-+
-+ *size = drm_size_align(sizeof(struct drm_buffer_object)) +
-+ /* Always account for a TTM, even for fixed memory types */
-+ drm_ttm_size(dev, num_pages, user_bo) +
-+ /* user space mapping structure */
-+ drm_size_align(sizeof(drm_local_map_t)) +
-+ /* file offset space, aperture space, pinned space */
-+ 3*drm_size_align(sizeof(struct drm_mm_node *)) +
-+ /* ttm backend */
-+ driver->backend_size(dev, num_pages);
-+
-+ // FIXME - ENOMEM?
-+ return 0;
-+}
-+
-+int drm_buffer_object_create(struct drm_device *dev,
-+ unsigned long size,
-+ enum drm_bo_type type,
-+ uint64_t mask,
-+ uint32_t hint,
-+ uint32_t page_alignment,
-+ unsigned long buffer_start,
-+ struct drm_buffer_object **buf_obj)
-+{
-+ struct drm_buffer_manager *bm = &dev->bm;
-+ struct drm_buffer_object *bo;
-+ int ret = 0;
-+ unsigned long num_pages;
-+ unsigned long reserved_size;
-+
-+ size += buffer_start & ~PAGE_MASK;
-+ num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
-+ if (num_pages == 0) {
-+ DRM_ERROR("Illegal buffer object size.\n");
-+ return -EINVAL;
-+ }
-+
-+ ret = drm_bo_reserve_size(dev, type == drm_bo_type_user,
-+ num_pages, &reserved_size);
-+
-+ if (ret) {
-+ DRM_DEBUG("Failed reserving space for buffer object.\n");
-+ return ret;
-+ }
-+
-+ bo = drm_calloc(1, sizeof(*bo), DRM_MEM_BUFOBJ);
-+
-+ if (!bo) {
-+ drm_bo_unreserve_size(num_pages);
-+ return -ENOMEM;
-+ }
-+
-+ mutex_init(&bo->mutex);
-+ mutex_lock(&bo->mutex);
-+
-+ bo->reserved_size = reserved_size;
-+ atomic_set(&bo->usage, 1);
-+ atomic_set(&bo->mapped, -1);
-+ DRM_INIT_WAITQUEUE(&bo->event_queue);
-+ INIT_LIST_HEAD(&bo->lru);
-+ INIT_LIST_HEAD(&bo->pinned_lru);
-+ INIT_LIST_HEAD(&bo->ddestroy);
-+#ifdef DRM_ODD_MM_COMPAT
-+ INIT_LIST_HEAD(&bo->p_mm_list);
-+ INIT_LIST_HEAD(&bo->vma_list);
-+#endif
-+ bo->dev = dev;
-+ bo->type = type;
-+ bo->num_pages = num_pages;
-+ bo->mem.mem_type = DRM_BO_MEM_LOCAL;
-+ bo->mem.num_pages = bo->num_pages;
-+ bo->mem.mm_node = NULL;
-+ bo->mem.page_alignment = page_alignment;
-+ bo->buffer_start = buffer_start & PAGE_MASK;
-+ bo->priv_flags = 0;
-+ bo->mem.flags = DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED |
-+ DRM_BO_FLAG_MAPPABLE;
-+ bo->mem.mask = DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED |
-+ DRM_BO_FLAG_MAPPABLE;
-+ atomic_inc(&bm->count);
-+ ret = drm_bo_new_mask(bo, mask, mask);
-+ if (ret)
-+ goto out_err;
-+
-+ if (bo->type == drm_bo_type_dc) {
-+ mutex_lock(&dev->struct_mutex);
-+ ret = drm_bo_setup_vm_locked(bo);
-+ mutex_unlock(&dev->struct_mutex);
-+ if (ret)
-+ goto out_err;
-+ }
-+
-+ ret = drm_buffer_object_validate(bo, 0, 0, hint & DRM_BO_HINT_DONT_BLOCK);
-+ if (ret)
-+ goto out_err;
-+
-+ mutex_unlock(&bo->mutex);
-+ *buf_obj = bo;
-+ return 0;
-+
-+out_err:
-+ mutex_unlock(&bo->mutex);
-+
-+ drm_bo_usage_deref_unlocked(&bo);
-+ return ret;
-+}
-+EXPORT_SYMBOL(drm_buffer_object_create);
-+
-+
-+static int drm_bo_add_user_object(struct drm_file *file_priv,
-+ struct drm_buffer_object *bo, int shareable)
-+{
-+ struct drm_device *dev = file_priv->minor->dev;
-+ int ret;
-+
-+ mutex_lock(&dev->struct_mutex);
-+ ret = drm_add_user_object(file_priv, &bo->base, shareable);
-+ if (ret)
-+ goto out;
-+
-+ bo->base.remove = drm_bo_base_deref_locked;
-+ bo->base.type = drm_buffer_type;
-+ bo->base.ref_struct_locked = NULL;
-+ bo->base.unref = drm_buffer_user_object_unmap;
-+
-+out:
-+ mutex_unlock(&dev->struct_mutex);
-+ return ret;
-+}
-+
-+int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
-+{
-+ struct drm_bo_create_arg *arg = data;
-+ struct drm_bo_create_req *req = &arg->d.req;
-+ struct drm_bo_info_rep *rep = &arg->d.rep;
-+ struct drm_buffer_object *entry;
-+ enum drm_bo_type bo_type;
-+ int ret = 0;
-+
-+ DRM_DEBUG("drm_bo_create_ioctl: %dkb, %dkb align\n",
-+ (int)(req->size / 1024), req->page_alignment * 4);
-+
-+ if (!dev->bm.initialized) {
-+ DRM_ERROR("Buffer object manager is not initialized.\n");
-+ return -EINVAL;
-+ }
-+
-+ bo_type = (req->buffer_start) ? drm_bo_type_user : drm_bo_type_dc;
-+
-+ if (bo_type == drm_bo_type_user)
-+ req->mask &= ~DRM_BO_FLAG_SHAREABLE;
-+
-+ ret = drm_buffer_object_create(file_priv->minor->dev,
-+ req->size, bo_type, req->mask,
-+ req->hint, req->page_alignment,
-+ req->buffer_start, &entry);
-+ if (ret)
-+ goto out;
-+
-+ ret = drm_bo_add_user_object(file_priv, entry,
-+ req->mask & DRM_BO_FLAG_SHAREABLE);
-+ if (ret) {
-+ drm_bo_usage_deref_unlocked(&entry);
-+ goto out;
-+ }
-+
-+ mutex_lock(&entry->mutex);
-+ drm_bo_fill_rep_arg(entry, rep);
-+ mutex_unlock(&entry->mutex);
-+
-+out:
-+ return ret;
-+}
-+
-+int drm_bo_setstatus_ioctl(struct drm_device *dev,
-+ void *data, struct drm_file *file_priv)
-+{
-+ struct drm_bo_map_wait_idle_arg *arg = data;
-+ struct drm_bo_info_req *req = &arg->d.req;
-+ struct drm_bo_info_rep *rep = &arg->d.rep;
-+ int ret;
-+
-+ if (!dev->bm.initialized) {
-+ DRM_ERROR("Buffer object manager is not initialized.\n");
-+ return -EINVAL;
-+ }
-+
-+ ret = drm_bo_read_lock(&dev->bm.bm_lock);
-+ if (ret)
-+ return ret;
-+
-+ ret = drm_bo_handle_validate(file_priv, req->handle, req->fence_class,
-+ req->flags,
-+ req->mask,
-+ req->hint | DRM_BO_HINT_DONT_FENCE,
-+ 1,
-+ rep, NULL);
-+
-+ (void) drm_bo_read_unlock(&dev->bm.bm_lock);
-+ if (ret)
-+ return ret;
-+
-+ return 0;
-+}
-+
-+int drm_bo_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
-+{
-+ struct drm_bo_map_wait_idle_arg *arg = data;
-+ struct drm_bo_info_req *req = &arg->d.req;
-+ struct drm_bo_info_rep *rep = &arg->d.rep;
-+ int ret;
-+ if (!dev->bm.initialized) {
-+ DRM_ERROR("Buffer object manager is not initialized.\n");
-+ return -EINVAL;
-+ }
-+
-+ ret = drm_buffer_object_map(file_priv, req->handle, req->mask,
-+ req->hint, rep);
-+ if (ret)
-+ return ret;
-+
-+ return 0;
-+}
-+
-+int drm_bo_unmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
-+{
-+ struct drm_bo_handle_arg *arg = data;
-+ int ret;
-+ if (!dev->bm.initialized) {
-+ DRM_ERROR("Buffer object manager is not initialized.\n");
-+ return -EINVAL;
-+ }
-+
-+ ret = drm_buffer_object_unmap(file_priv, arg->handle);
-+ return ret;
-+}
-+
-+
-+int drm_bo_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
-+{
-+ struct drm_bo_reference_info_arg *arg = data;
-+ struct drm_bo_handle_arg *req = &arg->d.req;
-+ struct drm_bo_info_rep *rep = &arg->d.rep;
-+ struct drm_user_object *uo;
-+ int ret;
-+
-+ if (!dev->bm.initialized) {
-+ DRM_ERROR("Buffer object manager is not initialized.\n");
-+ return -EINVAL;
-+ }
-+
-+ ret = drm_user_object_ref(file_priv, req->handle,
-+ drm_buffer_type, &uo);
-+ if (ret)
-+ return ret;
-+
-+ ret = drm_bo_handle_info(file_priv, req->handle, rep);
-+ if (ret)
-+ return ret;
-+
-+ return 0;
-+}
-+
-+int drm_bo_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
-+{
-+ struct drm_bo_handle_arg *arg = data;
-+ int ret = 0;
-+
-+ if (!dev->bm.initialized) {
-+ DRM_ERROR("Buffer object manager is not initialized.\n");
-+ return -EINVAL;
-+ }
-+
-+ ret = drm_user_object_unref(file_priv, arg->handle, drm_buffer_type);
-+ return ret;
-+}
-+
-+int drm_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
-+{
-+ struct drm_bo_reference_info_arg *arg = data;
-+ struct drm_bo_handle_arg *req = &arg->d.req;
-+ struct drm_bo_info_rep *rep = &arg->d.rep;
-+ int ret;
-+
-+ if (!dev->bm.initialized) {
-+ DRM_ERROR("Buffer object manager is not initialized.\n");
-+ return -EINVAL;
-+ }
-+
-+ ret = drm_bo_handle_info(file_priv, req->handle, rep);
-+ if (ret)
-+ return ret;
-+
-+ return 0;
-+}
-+
-+int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
-+{
-+ struct drm_bo_map_wait_idle_arg *arg = data;
-+ struct drm_bo_info_req *req = &arg->d.req;
-+ struct drm_bo_info_rep *rep = &arg->d.rep;
-+ int ret;
-+ if (!dev->bm.initialized) {
-+ DRM_ERROR("Buffer object manager is not initialized.\n");
-+ return -EINVAL;
-+ }
-+
-+ ret = drm_bo_handle_wait(file_priv, req->handle,
-+ req->hint, rep);
-+ if (ret)
-+ return ret;
-+
-+ return 0;
-+}
-+
-+static int drm_bo_leave_list(struct drm_buffer_object *bo,
-+ uint32_t mem_type,
-+ int free_pinned,
-+ int allow_errors)
-+{
-+ struct drm_device *dev = bo->dev;
-+ int ret = 0;
-+
-+ mutex_lock(&bo->mutex);
-+
-+ ret = drm_bo_expire_fence(bo, allow_errors);
-+ if (ret)
-+ goto out;
-+
-+ if (free_pinned) {
-+ DRM_FLAG_MASKED(bo->mem.flags, 0, DRM_BO_FLAG_NO_MOVE);
-+ mutex_lock(&dev->struct_mutex);
-+ list_del_init(&bo->pinned_lru);
-+ if (bo->pinned_node == bo->mem.mm_node)
-+ bo->pinned_node = NULL;
-+ if (bo->pinned_node != NULL) {
-+ drm_mm_put_block(bo->pinned_node);
-+ bo->pinned_node = NULL;
-+ }
-+ mutex_unlock(&dev->struct_mutex);
-+ }
-+
-+ if (bo->mem.flags & DRM_BO_FLAG_NO_EVICT) {
-+ DRM_ERROR("A DRM_BO_NO_EVICT buffer present at "
-+ "cleanup. Removing flag and evicting.\n");
-+ bo->mem.flags &= ~DRM_BO_FLAG_NO_EVICT;
-+ bo->mem.mask &= ~DRM_BO_FLAG_NO_EVICT;
-+ }
-+
-+ if (bo->mem.mem_type == mem_type)
-+ ret = drm_bo_evict(bo, mem_type, 0);
-+
-+ if (ret) {
-+ if (allow_errors) {
-+ goto out;
-+ } else {
-+ ret = 0;
-+ DRM_ERROR("Cleanup eviction failed\n");
-+ }
-+ }
-+
-+out:
-+ mutex_unlock(&bo->mutex);
-+ return ret;
-+}
-+
-+
-+static struct drm_buffer_object *drm_bo_entry(struct list_head *list,
-+ int pinned_list)
-+{
-+ if (pinned_list)
-+ return list_entry(list, struct drm_buffer_object, pinned_lru);
-+ else
-+ return list_entry(list, struct drm_buffer_object, lru);
-+}
-+
-+/*
-+ * dev->struct_mutex locked.
-+ */
-+
-+static int drm_bo_force_list_clean(struct drm_device *dev,
-+ struct list_head *head,
-+ unsigned mem_type,
-+ int free_pinned,
-+ int allow_errors,
-+ int pinned_list)
-+{
-+ struct list_head *list, *next, *prev;
-+ struct drm_buffer_object *entry, *nentry;
-+ int ret;
-+ int do_restart;
-+
-+ /*
-+ * The list traversal is a bit odd here, because an item may
-+ * disappear from the list when we release the struct_mutex or
-+ * when we decrease the usage count. Also we're not guaranteed
-+ * to drain pinned lists, so we can't always restart.
-+ */
-+
-+restart:
-+ nentry = NULL;
-+ list_for_each_safe(list, next, head) {
-+ prev = list->prev;
-+
-+ entry = (nentry != NULL) ? nentry: drm_bo_entry(list, pinned_list);
-+ atomic_inc(&entry->usage);
-+ if (nentry) {
-+ atomic_dec(&nentry->usage);
-+ nentry = NULL;
-+ }
-+
-+ /*
-+ * Protect the next item from destruction, so we can check
-+ * its list pointers later on.
-+ */
-+
-+ if (next != head) {
-+ nentry = drm_bo_entry(next, pinned_list);
-+ atomic_inc(&nentry->usage);
-+ }
-+ mutex_unlock(&dev->struct_mutex);
-+
-+ ret = drm_bo_leave_list(entry, mem_type, free_pinned,
-+ allow_errors);
-+ mutex_lock(&dev->struct_mutex);
-+
-+ drm_bo_usage_deref_locked(&entry);
-+ if (ret)
-+ return ret;
-+
-+ /*
-+ * Has the next item disappeared from the list?
-+ */
-+
-+ do_restart = ((next->prev != list) && (next->prev != prev));
-+
-+ if (nentry != NULL && do_restart)
-+ drm_bo_usage_deref_locked(&nentry);
-+
-+ if (do_restart)
-+ goto restart;
-+ }
-+ return 0;
-+}
-+
-+int drm_bo_clean_mm(struct drm_device *dev, unsigned mem_type)
-+{
-+ struct drm_buffer_manager *bm = &dev->bm;
-+ struct drm_mem_type_manager *man = &bm->man[mem_type];
-+ int ret = -EINVAL;
-+
-+ if (mem_type >= DRM_BO_MEM_TYPES) {
-+ DRM_ERROR("Illegal memory type %d\n", mem_type);
-+ return ret;
-+ }
-+
-+ if (!man->has_type) {
-+ DRM_ERROR("Trying to take down uninitialized "
-+ "memory manager type %u\n", mem_type);
-+ return ret;
-+ }
-+ man->use_type = 0;
-+ man->has_type = 0;
-+
-+ ret = 0;
-+ if (mem_type > 0) {
-+ BUG_ON(!list_empty(&bm->unfenced));
-+ drm_bo_force_list_clean(dev, &man->lru, mem_type, 1, 0, 0);
-+ drm_bo_force_list_clean(dev, &man->pinned, mem_type, 1, 0, 1);
-+
-+ if (drm_mm_clean(&man->manager)) {
-+ drm_mm_takedown(&man->manager);
-+ } else {
-+ ret = -EBUSY;
-+ }
-+ }
-+
-+ return ret;
-+}
-+EXPORT_SYMBOL(drm_bo_clean_mm);
-+
-+/**
-+ *Evict all buffers of a particular mem_type, but leave memory manager
-+ *regions for NO_MOVE buffers intact. New buffers cannot be added at this
-+ *point since we have the hardware lock.
-+ */
-+
-+static int drm_bo_lock_mm(struct drm_device *dev, unsigned mem_type)
-+{
-+ int ret;
-+ struct drm_buffer_manager *bm = &dev->bm;
-+ struct drm_mem_type_manager *man = &bm->man[mem_type];
-+
-+ if (mem_type == 0 || mem_type >= DRM_BO_MEM_TYPES) {
-+ DRM_ERROR("Illegal memory manager memory type %u.\n", mem_type);
-+ return -EINVAL;
-+ }
-+
-+ if (!man->has_type) {
-+ DRM_ERROR("Memory type %u has not been initialized.\n",
-+ mem_type);
-+ return 0;
-+ }
-+
-+ ret = drm_bo_force_list_clean(dev, &man->lru, mem_type, 0, 1, 0);
-+ if (ret)
-+ return ret;
-+ ret = drm_bo_force_list_clean(dev, &man->pinned, mem_type, 0, 1, 1);
-+
-+ return ret;
-+}
-+
-+int drm_bo_init_mm(struct drm_device *dev,
-+ unsigned type,
-+ unsigned long p_offset, unsigned long p_size)
-+{
-+ struct drm_buffer_manager *bm = &dev->bm;
-+ int ret = -EINVAL;
-+ struct drm_mem_type_manager *man;
-+
-+ if (type >= DRM_BO_MEM_TYPES) {
-+ DRM_ERROR("Illegal memory type %d\n", type);
-+ return ret;
-+ }
-+
-+ man = &bm->man[type];
-+ if (man->has_type) {
-+ DRM_ERROR("Memory manager already initialized for type %d\n",
-+ type);
-+ return ret;
-+ }
-+
-+ ret = dev->driver->bo_driver->init_mem_type(dev, type, man);
-+ if (ret)
-+ return ret;
-+
-+ ret = 0;
-+ if (type != DRM_BO_MEM_LOCAL) {
-+ if (!p_size) {
-+ DRM_ERROR("Zero size memory manager type %d\n", type);
-+ return ret;
-+ }
-+ ret = drm_mm_init(&man->manager, p_offset, p_size);
-+ if (ret)
-+ return ret;
-+ }
-+ man->has_type = 1;
-+ man->use_type = 1;
-+
-+ INIT_LIST_HEAD(&man->lru);
-+ INIT_LIST_HEAD(&man->pinned);
-+
-+ return 0;
-+}
-+EXPORT_SYMBOL(drm_bo_init_mm);
-+
-+/*
-+ * This function is intended to be called on drm driver unload.
-+ * If you decide to call it from lastclose, you must protect the call
-+ * from a potentially racing drm_bo_driver_init in firstopen.
-+ * (This may happen on X server restart).
-+ */
-+
-+int drm_bo_driver_finish(struct drm_device *dev)
-+{
-+ struct drm_buffer_manager *bm = &dev->bm;
-+ int ret = 0;
-+ unsigned i = DRM_BO_MEM_TYPES;
-+ struct drm_mem_type_manager *man;
-+
-+ mutex_lock(&dev->struct_mutex);
-+
-+ if (!bm->initialized)
-+ goto out;
-+ bm->initialized = 0;
-+
-+ while (i--) {
-+ man = &bm->man[i];
-+ if (man->has_type) {
-+ man->use_type = 0;
-+ if ((i != DRM_BO_MEM_LOCAL) && drm_bo_clean_mm(dev, i)) {
-+ ret = -EBUSY;
-+ DRM_ERROR("DRM memory manager type %d "
-+ "is not clean.\n", i);
-+ }
-+ man->has_type = 0;
-+ }
-+ }
-+ mutex_unlock(&dev->struct_mutex);
-+
-+ if (!cancel_delayed_work(&bm->wq))
-+ flush_scheduled_work();
-+
-+ mutex_lock(&dev->struct_mutex);
-+ drm_bo_delayed_delete(dev, 1);
-+ if (list_empty(&bm->ddestroy))
-+ DRM_DEBUG("Delayed destroy list was clean\n");
-+
-+ if (list_empty(&bm->man[0].lru))
-+ DRM_DEBUG("Swap list was clean\n");
-+
-+ if (list_empty(&bm->man[0].pinned))
-+ DRM_DEBUG("NO_MOVE list was clean\n");
-+
-+ if (list_empty(&bm->unfenced))
-+ DRM_DEBUG("Unfenced list was clean\n");
-+
-+ __free_page(bm->dummy_read_page);
-+
-+out:
-+ mutex_unlock(&dev->struct_mutex);
-+ return ret;
-+}
-+EXPORT_SYMBOL(drm_bo_driver_finish);
-+
-+/*
-+ * This function is intended to be called on drm driver load.
-+ * If you decide to call it from firstopen, you must protect the call
-+ * from a potentially racing drm_bo_driver_finish in lastclose.
-+ * (This may happen on X server restart).
-+ */
-+
-+int drm_bo_driver_init(struct drm_device *dev)
-+{
-+ struct drm_bo_driver *driver = dev->driver->bo_driver;
-+ struct drm_buffer_manager *bm = &dev->bm;
-+ int ret = -EINVAL;
-+
-+ bm->dummy_read_page = NULL;
-+ drm_bo_init_lock(&bm->bm_lock);
-+ mutex_lock(&dev->struct_mutex);
-+ if (!driver)
-+ goto out_unlock;
-+
-+ bm->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
-+ if (!bm->dummy_read_page) {
-+ ret = -ENOMEM;
-+ goto out_unlock;
-+ }
-+
-+
-+ /*
-+ * Initialize the system memory buffer type.
-+ * Other types need to be driver / IOCTL initialized.
-+ */
-+ ret = drm_bo_init_mm(dev, DRM_BO_MEM_LOCAL, 0, 0);
-+ if (ret)
-+ goto out_unlock;
-+
-+ INIT_DELAYED_WORK(&bm->wq, drm_bo_delayed_workqueue);
-+
-+ bm->initialized = 1;
-+ bm->nice_mode = 1;
-+ atomic_set(&bm->count, 0);
-+ bm->cur_pages = 0;
-+ INIT_LIST_HEAD(&bm->unfenced);
-+ INIT_LIST_HEAD(&bm->ddestroy);
-+out_unlock:
-+ mutex_unlock(&dev->struct_mutex);
-+ return ret;
-+}
-+EXPORT_SYMBOL(drm_bo_driver_init);
-+
-+int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
-+{
-+ struct drm_mm_init_arg *arg = data;
-+ struct drm_buffer_manager *bm = &dev->bm;
-+ struct drm_bo_driver *driver = dev->driver->bo_driver;
-+ int ret;
-+
-+ if (!driver) {
-+ DRM_ERROR("Buffer objects are not supported by this driver\n");
-+ return -EINVAL;
-+ }
-+
-+ ret = drm_bo_write_lock(&bm->bm_lock, file_priv);
-+ if (ret)
-+ return ret;
-+
-+ ret = -EINVAL;
-+ if (arg->magic != DRM_BO_INIT_MAGIC) {
-+ DRM_ERROR("You are using an old libdrm that is not compatible with\n"
-+ "\tthe kernel DRM module. Please upgrade your libdrm.\n");
-+ return -EINVAL;
-+ }
-+ if (arg->major != DRM_BO_INIT_MAJOR) {
-+ DRM_ERROR("libdrm and kernel DRM buffer object interface major\n"
-+ "\tversion don't match. Got %d, expected %d.\n",
-+ arg->major, DRM_BO_INIT_MAJOR);
-+ return -EINVAL;
-+ }
-+
-+ mutex_lock(&dev->struct_mutex);
-+ if (!bm->initialized) {
-+ DRM_ERROR("DRM memory manager was not initialized.\n");
-+ goto out;
-+ }
-+ if (arg->mem_type == 0) {
-+ DRM_ERROR("System memory buffers already initialized.\n");
-+ goto out;
-+ }
-+ ret = drm_bo_init_mm(dev, arg->mem_type,
-+ arg->p_offset, arg->p_size);
-+
-+out:
-+ mutex_unlock(&dev->struct_mutex);
-+ (void) drm_bo_write_unlock(&bm->bm_lock, file_priv);
-+
-+ if (ret)
-+ return ret;
-+
-+ return 0;
-+}
-+
-+int drm_mm_takedown_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
-+{
-+ struct drm_mm_type_arg *arg = data;
-+ struct drm_buffer_manager *bm = &dev->bm;
-+ struct drm_bo_driver *driver = dev->driver->bo_driver;
-+ int ret;
-+
-+ if (!driver) {
-+ DRM_ERROR("Buffer objects are not supported by this driver\n");
-+ return -EINVAL;
-+ }
-+
-+ ret = drm_bo_write_lock(&bm->bm_lock, file_priv);
-+ if (ret)
-+ return ret;
-+
-+ mutex_lock(&dev->struct_mutex);
-+ ret = -EINVAL;
-+ if (!bm->initialized) {
-+ DRM_ERROR("DRM memory manager was not initialized\n");
-+ goto out;
-+ }
-+ if (arg->mem_type == 0) {
-+ DRM_ERROR("No takedown for System memory buffers.\n");
-+ goto out;
-+ }
-+ ret = 0;
-+ if (drm_bo_clean_mm(dev, arg->mem_type)) {
-+ DRM_ERROR("Memory manager type %d not clean. "
-+ "Delaying takedown\n", arg->mem_type);
-+ }
-+out:
-+ mutex_unlock(&dev->struct_mutex);
-+ (void) drm_bo_write_unlock(&bm->bm_lock, file_priv);
-+
-+ if (ret)
-+ return ret;
-+
-+ return 0;
-+}
-+
-+int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
-+{
-+ struct drm_mm_type_arg *arg = data;
-+ struct drm_bo_driver *driver = dev->driver->bo_driver;
-+ int ret;
-+
-+ if (!driver) {
-+ DRM_ERROR("Buffer objects are not supported by this driver\n");
-+ return -EINVAL;
-+ }
-+
-+ if (arg->lock_flags & DRM_BO_LOCK_IGNORE_NO_EVICT) {
-+ DRM_ERROR("Lock flag DRM_BO_LOCK_IGNORE_NO_EVICT not supported yet.\n");
-+ return -EINVAL;
-+ }
-+
-+ if (arg->lock_flags & DRM_BO_LOCK_UNLOCK_BM) {
-+ ret = drm_bo_write_lock(&dev->bm.bm_lock, file_priv);
-+ if (ret)
-+ return ret;
-+ }
-+
-+ mutex_lock(&dev->struct_mutex);
-+ ret = drm_bo_lock_mm(dev, arg->mem_type);
-+ mutex_unlock(&dev->struct_mutex);
-+ if (ret) {
-+ (void) drm_bo_write_unlock(&dev->bm.bm_lock, file_priv);
-+ return ret;
-+ }
-+
-+ return 0;
-+}
-+
-+int drm_mm_unlock_ioctl(struct drm_device *dev,
-+ void *data,
-+ struct drm_file *file_priv)
-+{
-+ struct drm_mm_type_arg *arg = data;
-+ struct drm_bo_driver *driver = dev->driver->bo_driver;
-+ int ret;
-+
-+ if (!driver) {
-+ DRM_ERROR("Buffer objects are not supported by this driver\n");
-+ return -EINVAL;
-+ }
-+
-+ if (arg->lock_flags & DRM_BO_LOCK_UNLOCK_BM) {
-+ ret = drm_bo_write_unlock(&dev->bm.bm_lock, file_priv);
-+ if (ret)
-+ return ret;
-+ }
-+
-+ return 0;
-+}
-+
-+/*
-+ * buffer object vm functions.
-+ */
-+
-+int drm_mem_reg_is_pci(struct drm_device *dev, struct drm_bo_mem_reg *mem)
-+{
-+ struct drm_buffer_manager *bm = &dev->bm;
-+ struct drm_mem_type_manager *man = &bm->man[mem->mem_type];
-+
-+ if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED)) {
-+ if (mem->mem_type == DRM_BO_MEM_LOCAL)
-+ return 0;
-+
-+ if (man->flags & _DRM_FLAG_MEMTYPE_CMA)
-+ return 0;
-+
-+ if (mem->flags & DRM_BO_FLAG_CACHED)
-+ return 0;
-+ }
-+ return 1;
-+}
-+EXPORT_SYMBOL(drm_mem_reg_is_pci);
-+
-+/**
-+ * \c Get the PCI offset for the buffer object memory.
-+ *
-+ * \param bo The buffer object.
-+ * \param bus_base On return the base of the PCI region
-+ * \param bus_offset On return the byte offset into the PCI region
-+ * \param bus_size On return the byte size of the buffer object or zero if
-+ * the buffer object memory is not accessible through a PCI region.
-+ * \return Failure indication.
-+ *
-+ * Returns -EINVAL if the buffer object is currently not mappable.
-+ * Otherwise returns zero.
-+ */
-+
-+int drm_bo_pci_offset(struct drm_device *dev,
-+ struct drm_bo_mem_reg *mem,
-+ unsigned long *bus_base,
-+ unsigned long *bus_offset, unsigned long *bus_size)
-+{
-+ struct drm_buffer_manager *bm = &dev->bm;
-+ struct drm_mem_type_manager *man = &bm->man[mem->mem_type];
-+
-+ *bus_size = 0;
-+ if (!(man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE))
-+ return -EINVAL;
-+
-+ if (drm_mem_reg_is_pci(dev, mem)) {
-+ *bus_offset = mem->mm_node->start << PAGE_SHIFT;
-+ *bus_size = mem->num_pages << PAGE_SHIFT;
-+ *bus_base = man->io_offset;
-+ }
-+
-+ return 0;
-+}
-+
-+/**
-+ * \c Kill all user-space virtual mappings of this buffer object.
-+ *
-+ * \param bo The buffer object.
-+ *
-+ * Call bo->mutex locked.
-+ */
-+
-+void drm_bo_unmap_virtual(struct drm_buffer_object *bo)
-+{
-+ struct drm_device *dev = bo->dev;
-+ loff_t offset = ((loff_t) bo->map_list.hash.key) << PAGE_SHIFT;
-+ loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
-+
-+ if (!dev->dev_mapping)
-+ return;
-+
-+ unmap_mapping_range(dev->dev_mapping, offset, holelen, 1);
-+}
-+
-+static void drm_bo_takedown_vm_locked(struct drm_buffer_object *bo)
-+{
-+ struct drm_map_list *list;
-+ drm_local_map_t *map;
-+ struct drm_device *dev = bo->dev;
-+
-+ DRM_ASSERT_LOCKED(&dev->struct_mutex);
-+ if (bo->type != drm_bo_type_dc)
-+ return;
-+
-+ list = &bo->map_list;
-+ if (list->user_token) {
-+ drm_ht_remove_item(&dev->map_hash, &list->hash);
-+ list->user_token = 0;
-+ }
-+ if (list->file_offset_node) {
-+ drm_mm_put_block(list->file_offset_node);
-+ list->file_offset_node = NULL;
-+ }
-+
-+ map = list->map;
-+ if (!map)
-+ return;
-+
-+ drm_free(map, sizeof(*map), DRM_MEM_BUFOBJ);
-+ list->map = NULL;
-+ list->user_token = 0ULL;
-+ drm_bo_usage_deref_locked(&bo);
-+}
-+
-+static int drm_bo_setup_vm_locked(struct drm_buffer_object *bo)
-+{
-+ struct drm_map_list *list = &bo->map_list;
-+ drm_local_map_t *map;
-+ struct drm_device *dev = bo->dev;
-+
-+ DRM_ASSERT_LOCKED(&dev->struct_mutex);
-+ list->map = drm_calloc(1, sizeof(*map), DRM_MEM_BUFOBJ);
-+ if (!list->map)
-+ return -ENOMEM;
-+
-+ map = list->map;
-+ map->offset = 0;
-+ map->type = _DRM_TTM;
-+ map->flags = _DRM_REMOVABLE;
-+ map->size = bo->mem.num_pages * PAGE_SIZE;
-+ atomic_inc(&bo->usage);
-+ map->handle = (void *)bo;
-+
-+ list->file_offset_node = drm_mm_search_free(&dev->offset_manager,
-+ bo->mem.num_pages, 0, 0);
-+
-+ if (!list->file_offset_node) {
-+ drm_bo_takedown_vm_locked(bo);
-+ return -ENOMEM;
-+ }
-+
-+ list->file_offset_node = drm_mm_get_block(list->file_offset_node,
-+ bo->mem.num_pages, 0);
-+ if (!list->file_offset_node) {
-+ drm_bo_takedown_vm_locked(bo);
-+ return -ENOMEM;
-+ }
-+
-+ list->hash.key = list->file_offset_node->start;
-+ if (drm_ht_insert_item(&dev->map_hash, &list->hash)) {
-+ drm_bo_takedown_vm_locked(bo);
-+ return -ENOMEM;
-+ }
-+
-+ list->user_token = ((uint64_t) list->hash.key) << PAGE_SHIFT;
-+
-+ return 0;
-+}
-+
-+int drm_bo_version_ioctl(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv)
-+{
-+ struct drm_bo_version_arg *arg = (struct drm_bo_version_arg *)data;
-+
-+ arg->major = DRM_BO_INIT_MAJOR;
-+ arg->minor = DRM_BO_INIT_MINOR;
-+ arg->patchlevel = DRM_BO_INIT_PATCH;
-+
-+ return 0;
-+}
-Index: linux-2.6.28/drivers/gpu/drm/drm_bo_lock.c
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.28/drivers/gpu/drm/drm_bo_lock.c 2009-02-20 12:23:06.000000000 +0000
-@@ -0,0 +1,175 @@
-+/**************************************************************************
-+ *
-+ * Copyright (c) 2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
-+ * All Rights Reserved.
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a
-+ * copy of this software and associated documentation files (the
-+ * "Software"), to deal in the Software without restriction, including
-+ * without limitation the rights to use, copy, modify, merge, publish,
-+ * distribute, sub license, and/or sell copies of the Software, and to
-+ * permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice (including the
-+ * next paragraph) shall be included in all copies or substantial portions
-+ * of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
-+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
-+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
-+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
-+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
-+ *
-+ **************************************************************************/
-+/*
-+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
-+ */
-+
-+/*
-+ * This file implements a simple replacement for the buffer manager use
-+ * of the heavyweight hardware lock.
-+ * The lock is a read-write lock. Taking it in read mode is fast, and
-+ * intended for in-kernel use only.
-+ * Taking it in write mode is slow.
-+ *
-+ * The write mode is used only when there is a need to block all
-+ * user-space processes from allocating a
-+ * new memory area.
-+ * Typical use in write mode is X server VT switching, and it's allowed
-+ * to leave kernel space with the write lock held. If a user-space process
-+ * dies while having the write-lock, it will be released during the file
-+ * descriptor release.
-+ *
-+ * The read lock is typically placed at the start of an IOCTL- or
-+ * user-space callable function that may end up allocating a memory area.
-+ * This includes setstatus, super-ioctls and no_pfn; the latter may move
-+ * unmappable regions to mappable. It's a bug to leave kernel space with the
-+ * read lock held.
-+ *
-+ * Both read- and write lock taking is interruptible for low signal-delivery
-+ * latency. The locking functions will return -EAGAIN if interrupted by a
-+ * signal.
-+ *
-+ * Locking order: The lock should be taken BEFORE any kernel mutexes
-+ * or spinlocks.
-+ */
-+
-+#include "drmP.h"
-+
-+void drm_bo_init_lock(struct drm_bo_lock *lock)
-+{
-+ DRM_INIT_WAITQUEUE(&lock->queue);
-+ atomic_set(&lock->write_lock_pending, 0);
-+ atomic_set(&lock->readers, 0);
-+}
-+
-+void drm_bo_read_unlock(struct drm_bo_lock *lock)
-+{
-+ if (unlikely(atomic_add_negative(-1, &lock->readers)))
-+ BUG();
-+ if (atomic_read(&lock->readers) == 0)
-+ wake_up_interruptible(&lock->queue);
-+}
-+EXPORT_SYMBOL(drm_bo_read_unlock);
-+
-+int drm_bo_read_lock(struct drm_bo_lock *lock)
-+{
-+ while (unlikely(atomic_read(&lock->write_lock_pending) != 0)) {
-+ int ret;
-+ ret = wait_event_interruptible
-+ (lock->queue, atomic_read(&lock->write_lock_pending) == 0);
-+ if (ret)
-+ return -EAGAIN;
-+ }
-+
-+ while (unlikely(!atomic_add_unless(&lock->readers, 1, -1))) {
-+ int ret;
-+ ret = wait_event_interruptible
-+ (lock->queue, atomic_add_unless(&lock->readers, 1, -1));
-+ if (ret)
-+ return -EAGAIN;
-+ }
-+ return 0;
-+}
-+EXPORT_SYMBOL(drm_bo_read_lock);
-+
-+static int __drm_bo_write_unlock(struct drm_bo_lock *lock)
-+{
-+ if (unlikely(atomic_cmpxchg(&lock->readers, -1, 0) != -1))
-+ return -EINVAL;
-+ if (unlikely(atomic_cmpxchg(&lock->write_lock_pending, 1, 0) != 1))
-+ return -EINVAL;
-+ wake_up_interruptible(&lock->queue);
-+ return 0;
-+}
-+
-+static void drm_bo_write_lock_remove(struct drm_file *file_priv,
-+ struct drm_user_object *item)
-+{
-+ struct drm_bo_lock *lock = container_of(item, struct drm_bo_lock, base);
-+ int ret;
-+
-+ ret = __drm_bo_write_unlock(lock);
-+ BUG_ON(ret);
-+}
-+
-+int drm_bo_write_lock(struct drm_bo_lock *lock, struct drm_file *file_priv)
-+{
-+ int ret = 0;
-+ struct drm_device *dev;
-+
-+ if (unlikely(atomic_cmpxchg(&lock->write_lock_pending, 0, 1) != 0))
-+ return -EINVAL;
-+
-+ while (unlikely(atomic_cmpxchg(&lock->readers, 0, -1) != 0)) {
-+ ret = wait_event_interruptible
-+ (lock->queue, atomic_cmpxchg(&lock->readers, 0, -1) == 0);
-+
-+ if (ret) {
-+ atomic_set(&lock->write_lock_pending, 0);
-+ wake_up_interruptible(&lock->queue);
-+ return -EAGAIN;
-+ }
-+ }
-+
-+ /*
-+ * Add a dummy user-object, the destructor of which will
-+ * make sure the lock is released if the client dies
-+ * while holding it.
-+ */
-+
-+ dev = file_priv->minor->dev;
-+ mutex_lock(&dev->struct_mutex);
-+ ret = drm_add_user_object(file_priv, &lock->base, 0);
-+ lock->base.remove = &drm_bo_write_lock_remove;
-+ lock->base.type = drm_lock_type;
-+ if (ret)
-+ (void)__drm_bo_write_unlock(lock);
-+
-+ mutex_unlock(&dev->struct_mutex);
-+
-+ return ret;
-+}
-+
-+int drm_bo_write_unlock(struct drm_bo_lock *lock, struct drm_file *file_priv)
-+{
-+ struct drm_device *dev = file_priv->minor->dev;
-+ struct drm_ref_object *ro;
-+
-+ mutex_lock(&dev->struct_mutex);
-+
-+ if (lock->base.owner != file_priv) {
-+ mutex_unlock(&dev->struct_mutex);
-+ return -EINVAL;
-+ }
-+ ro = drm_lookup_ref_object(file_priv, &lock->base, _DRM_REF_USE);
-+ BUG_ON(!ro);
-+ drm_remove_ref_object(file_priv, ro);
-+ lock->base.owner = NULL;
-+
-+ mutex_unlock(&dev->struct_mutex);
-+ return 0;
-+}
-Index: linux-2.6.28/drivers/gpu/drm/drm_bo_move.c
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.28/drivers/gpu/drm/drm_bo_move.c 2009-02-20 12:23:06.000000000 +0000
-@@ -0,0 +1,590 @@
-+/**************************************************************************
-+ *
-+ * Copyright (c) 2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
-+ * All Rights Reserved.
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a
-+ * copy of this software and associated documentation files (the
-+ * "Software"), to deal in the Software without restriction, including
-+ * without limitation the rights to use, copy, modify, merge, publish,
-+ * distribute, sub license, and/or sell copies of the Software, and to
-+ * permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice (including the
-+ * next paragraph) shall be included in all copies or substantial portions
-+ * of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
-+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
-+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
-+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
-+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
-+ *
-+ **************************************************************************/
-+/*
-+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
-+ */
-+
-+#include "drmP.h"
-+
-+/**
-+ * Free the old memory node unless it's a pinned region and we
-+ * have not been requested to free also pinned regions.
-+ */
-+
-+static void drm_bo_free_old_node(struct drm_buffer_object *bo)
-+{
-+ struct drm_bo_mem_reg *old_mem = &bo->mem;
-+
-+ if (old_mem->mm_node && (old_mem->mm_node != bo->pinned_node)) {
-+ mutex_lock(&bo->dev->struct_mutex);
-+ drm_mm_put_block(old_mem->mm_node);
-+ mutex_unlock(&bo->dev->struct_mutex);
-+ }
-+ old_mem->mm_node = NULL;
-+}
-+
-+int drm_bo_move_ttm(struct drm_buffer_object *bo,
-+ int evict, int no_wait, struct drm_bo_mem_reg *new_mem)
-+{
-+ struct drm_ttm *ttm = bo->ttm;
-+ struct drm_bo_mem_reg *old_mem = &bo->mem;
-+ uint64_t save_flags = old_mem->flags;
-+ uint64_t save_mask = old_mem->mask;
-+ int ret;
-+
-+ if (old_mem->mem_type != DRM_BO_MEM_LOCAL) {
-+ if (evict)
-+ drm_ttm_evict(ttm);
-+ else
-+ drm_ttm_unbind(ttm);
-+
-+ drm_bo_free_old_node(bo);
-+ DRM_FLAG_MASKED(old_mem->flags,
-+ DRM_BO_FLAG_CACHED | DRM_BO_FLAG_MAPPABLE |
-+ DRM_BO_FLAG_MEM_LOCAL, DRM_BO_MASK_MEMTYPE);
-+ old_mem->mem_type = DRM_BO_MEM_LOCAL;
-+ save_flags = old_mem->flags;
-+ }
-+ if (new_mem->mem_type != DRM_BO_MEM_LOCAL) {
-+ ret = drm_bind_ttm(ttm, new_mem);
-+ if (ret)
-+ return ret;
-+ }
-+
-+ *old_mem = *new_mem;
-+ new_mem->mm_node = NULL;
-+ old_mem->mask = save_mask;
-+ DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
-+ return 0;
-+}
-+EXPORT_SYMBOL(drm_bo_move_ttm);
-+
-+/**
-+ * \c Return a kernel virtual address to the buffer object PCI memory.
-+ *
-+ * \param bo The buffer object.
-+ * \return Failure indication.
-+ *
-+ * Returns -EINVAL if the buffer object is currently not mappable.
-+ * Returns -ENOMEM if the ioremap operation failed.
-+ * Otherwise returns zero.
-+ *
-+ * After a successfull call, bo->iomap contains the virtual address, or NULL
-+ * if the buffer object content is not accessible through PCI space.
-+ * Call bo->mutex locked.
-+ */
-+
-+int drm_mem_reg_ioremap(struct drm_device *dev, struct drm_bo_mem_reg *mem,
-+ void **virtual)
-+{
-+ struct drm_buffer_manager *bm = &dev->bm;
-+ struct drm_mem_type_manager *man = &bm->man[mem->mem_type];
-+ unsigned long bus_offset;
-+ unsigned long bus_size;
-+ unsigned long bus_base;
-+ int ret;
-+ void *addr;
-+
-+ *virtual = NULL;
-+ ret = drm_bo_pci_offset(dev, mem, &bus_base, &bus_offset, &bus_size);
-+ if (ret || bus_size == 0)
-+ return ret;
-+
-+ if (!(man->flags & _DRM_FLAG_NEEDS_IOREMAP))
-+ addr = (void *)(((u8 *) man->io_addr) + bus_offset);
-+ else {
-+ addr = ioremap_nocache(bus_base + bus_offset, bus_size);
-+ if (!addr)
-+ return -ENOMEM;
-+ }
-+ *virtual = addr;
-+ return 0;
-+}
-+EXPORT_SYMBOL(drm_mem_reg_ioremap);
-+
-+/**
-+ * \c Unmap mapping obtained using drm_bo_ioremap
-+ *
-+ * \param bo The buffer object.
-+ *
-+ * Call bo->mutex locked.
-+ */
-+
-+void drm_mem_reg_iounmap(struct drm_device *dev, struct drm_bo_mem_reg *mem,
-+ void *virtual)
-+{
-+ struct drm_buffer_manager *bm;
-+ struct drm_mem_type_manager *man;
-+
-+ bm = &dev->bm;
-+ man = &bm->man[mem->mem_type];
-+
-+ if (virtual && (man->flags & _DRM_FLAG_NEEDS_IOREMAP))
-+ iounmap(virtual);
-+}
-+EXPORT_SYMBOL(drm_mem_reg_iounmap);
-+
-+static int drm_copy_io_page(void *dst, void *src, unsigned long page)
-+{
-+ uint32_t *dstP =
-+ (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
-+ uint32_t *srcP =
-+ (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
-+
-+ int i;
-+ for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
-+ iowrite32(ioread32(srcP++), dstP++);
-+ return 0;
-+}
-+
-+static int drm_copy_io_ttm_page(struct drm_ttm *ttm, void *src,
-+ unsigned long page)
-+{
-+ struct page *d = drm_ttm_get_page(ttm, page);
-+ void *dst;
-+
-+ if (!d)
-+ return -ENOMEM;
-+
-+ src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
-+ dst = kmap(d);
-+ if (!dst)
-+ return -ENOMEM;
-+
-+ memcpy_fromio(dst, src, PAGE_SIZE);
-+ kunmap(d);
-+ return 0;
-+}
-+
-+static int drm_copy_ttm_io_page(struct drm_ttm *ttm, void *dst, unsigned long page)
-+{
-+ struct page *s = drm_ttm_get_page(ttm, page);
-+ void *src;
-+
-+ if (!s)
-+ return -ENOMEM;
-+
-+ dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
-+ src = kmap(s);
-+ if (!src)
-+ return -ENOMEM;
-+
-+ memcpy_toio(dst, src, PAGE_SIZE);
-+ kunmap(s);
-+ return 0;
-+}
-+
-+int drm_bo_move_memcpy(struct drm_buffer_object *bo,
-+ int evict, int no_wait, struct drm_bo_mem_reg *new_mem)
-+{
-+ struct drm_device *dev = bo->dev;
-+ struct drm_mem_type_manager *man = &dev->bm.man[new_mem->mem_type];
-+ struct drm_ttm *ttm = bo->ttm;
-+ struct drm_bo_mem_reg *old_mem = &bo->mem;
-+ struct drm_bo_mem_reg old_copy = *old_mem;
-+ void *old_iomap;
-+ void *new_iomap;
-+ int ret;
-+ uint64_t save_flags = old_mem->flags;
-+ uint64_t save_mask = old_mem->mask;
-+ unsigned long i;
-+ unsigned long page;
-+ unsigned long add = 0;
-+ int dir;
-+
-+ ret = drm_mem_reg_ioremap(dev, old_mem, &old_iomap);
-+ if (ret)
-+ return ret;
-+ ret = drm_mem_reg_ioremap(dev, new_mem, &new_iomap);
-+ if (ret)
-+ goto out;
-+
-+ if (old_iomap == NULL && new_iomap == NULL)
-+ goto out2;
-+ if (old_iomap == NULL && ttm == NULL)
-+ goto out2;
-+
-+ add = 0;
-+ dir = 1;
-+
-+ if ((old_mem->mem_type == new_mem->mem_type) &&
-+ (new_mem->mm_node->start <
-+ old_mem->mm_node->start + old_mem->mm_node->size)) {
-+ dir = -1;
-+ add = new_mem->num_pages - 1;
-+ }
-+
-+ for (i = 0; i < new_mem->num_pages; ++i) {
-+ page = i * dir + add;
-+ if (old_iomap == NULL)
-+ ret = drm_copy_ttm_io_page(ttm, new_iomap, page);
-+ else if (new_iomap == NULL)
-+ ret = drm_copy_io_ttm_page(ttm, old_iomap, page);
-+ else
-+ ret = drm_copy_io_page(new_iomap, old_iomap, page);
-+ if (ret)
-+ goto out1;
-+ }
-+ mb();
-+out2:
-+ drm_bo_free_old_node(bo);
-+
-+ *old_mem = *new_mem;
-+ new_mem->mm_node = NULL;
-+ old_mem->mask = save_mask;
-+ DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
-+
-+ if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (ttm != NULL)) {
-+ drm_ttm_unbind(ttm);
-+ drm_destroy_ttm(ttm);
-+ bo->ttm = NULL;
-+ }
-+
-+out1:
-+ drm_mem_reg_iounmap(dev, new_mem, new_iomap);
-+out:
-+ drm_mem_reg_iounmap(dev, &old_copy, old_iomap);
-+ return ret;
-+}
-+EXPORT_SYMBOL(drm_bo_move_memcpy);
-+
-+/*
-+ * Transfer a buffer object's memory and LRU status to a newly
-+ * created object. User-space references remains with the old
-+ * object. Call bo->mutex locked.
-+ */
-+
-+int drm_buffer_object_transfer(struct drm_buffer_object *bo,
-+ struct drm_buffer_object **new_obj)
-+{
-+ struct drm_buffer_object *fbo;
-+ struct drm_device *dev = bo->dev;
-+ struct drm_buffer_manager *bm = &dev->bm;
-+
-+ fbo = drm_calloc(1, sizeof(*fbo), DRM_MEM_BUFOBJ);
-+ if (!fbo)
-+ return -ENOMEM;
-+
-+ *fbo = *bo;
-+ mutex_init(&fbo->mutex);
-+ mutex_lock(&fbo->mutex);
-+ mutex_lock(&dev->struct_mutex);
-+
-+ DRM_INIT_WAITQUEUE(&bo->event_queue);
-+ INIT_LIST_HEAD(&fbo->ddestroy);
-+ INIT_LIST_HEAD(&fbo->lru);
-+ INIT_LIST_HEAD(&fbo->pinned_lru);
-+#ifdef DRM_ODD_MM_COMPAT
-+ INIT_LIST_HEAD(&fbo->vma_list);
-+ INIT_LIST_HEAD(&fbo->p_mm_list);
-+#endif
-+
-+ fbo->fence = drm_fence_reference_locked(bo->fence);
-+ fbo->pinned_node = NULL;
-+ fbo->mem.mm_node->private = (void *)fbo;
-+ atomic_set(&fbo->usage, 1);
-+ atomic_inc(&bm->count);
-+ mutex_unlock(&dev->struct_mutex);
-+ mutex_unlock(&fbo->mutex);
-+ bo->reserved_size = 0;
-+ *new_obj = fbo;
-+ return 0;
-+}
-+
-+/*
-+ * Since move is underway, we need to block signals in this function.
-+ * We cannot restart until it has finished.
-+ */
-+
-+int drm_bo_move_accel_cleanup(struct drm_buffer_object *bo,
-+ int evict, int no_wait, uint32_t fence_class,
-+ uint32_t fence_type, uint32_t fence_flags,
-+ struct drm_bo_mem_reg *new_mem)
-+{
-+ struct drm_device *dev = bo->dev;
-+ struct drm_mem_type_manager *man = &dev->bm.man[new_mem->mem_type];
-+ struct drm_bo_mem_reg *old_mem = &bo->mem;
-+ int ret;
-+ uint64_t save_flags = old_mem->flags;
-+ uint64_t save_mask = old_mem->mask;
-+ struct drm_buffer_object *old_obj;
-+
-+ if (bo->fence)
-+ drm_fence_usage_deref_unlocked(&bo->fence);
-+ ret = drm_fence_object_create(dev, fence_class, fence_type,
-+ fence_flags | DRM_FENCE_FLAG_EMIT,
-+ &bo->fence);
-+ bo->fence_type = fence_type;
-+ if (ret)
-+ return ret;
-+
-+#ifdef DRM_ODD_MM_COMPAT
-+ /*
-+ * In this mode, we don't allow pipelining a copy blit,
-+ * since the buffer will be accessible from user space
-+ * the moment we return and rebuild the page tables.
-+ *
-+ * With normal vm operation, page tables are rebuilt
-+ * on demand using fault(), which waits for buffer idle.
-+ */
-+ if (1)
-+#else
-+ if (evict || ((bo->mem.mm_node == bo->pinned_node) &&
-+ bo->mem.mm_node != NULL))
-+#endif
-+ {
-+ ret = drm_bo_wait(bo, 0, 1, 0);
-+ if (ret)
-+ return ret;
-+
-+ drm_bo_free_old_node(bo);
-+
-+ if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm != NULL)) {
-+ drm_ttm_unbind(bo->ttm);
-+ drm_destroy_ttm(bo->ttm);
-+ bo->ttm = NULL;
-+ }
-+ } else {
-+
-+ /* This should help pipeline ordinary buffer moves.
-+ *
-+ * Hang old buffer memory on a new buffer object,
-+ * and leave it to be released when the GPU
-+ * operation has completed.
-+ */
-+
-+ ret = drm_buffer_object_transfer(bo, &old_obj);
-+
-+ if (ret)
-+ return ret;
-+
-+ if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED))
-+ old_obj->ttm = NULL;
-+ else
-+ bo->ttm = NULL;
-+
-+ mutex_lock(&dev->struct_mutex);
-+ list_del_init(&old_obj->lru);
-+ DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
-+ drm_bo_add_to_lru(old_obj);
-+
-+ drm_bo_usage_deref_locked(&old_obj);
-+ mutex_unlock(&dev->struct_mutex);
-+
-+ }
-+
-+ *old_mem = *new_mem;
-+ new_mem->mm_node = NULL;
-+ old_mem->mask = save_mask;
-+ DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
-+ return 0;
-+}
-+EXPORT_SYMBOL(drm_bo_move_accel_cleanup);
-+
-+int drm_bo_same_page(unsigned long offset,
-+ unsigned long offset2)
-+{
-+ return (offset & PAGE_MASK) == (offset2 & PAGE_MASK);
-+}
-+EXPORT_SYMBOL(drm_bo_same_page);
-+
-+unsigned long drm_bo_offset_end(unsigned long offset,
-+ unsigned long end)
-+{
-+ offset = (offset + PAGE_SIZE) & PAGE_MASK;
-+ return (end < offset) ? end : offset;
-+}
-+EXPORT_SYMBOL(drm_bo_offset_end);
-+
-+static pgprot_t drm_kernel_io_prot(uint32_t map_type)
-+{
-+ pgprot_t tmp = PAGE_KERNEL;
-+
-+#if defined(__i386__) || defined(__x86_64__)
-+ if (boot_cpu_data.x86 > 3 && map_type != _DRM_AGP) {
-+ pgprot_val(tmp) |= _PAGE_PCD;
-+ pgprot_val(tmp) &= ~_PAGE_PWT;
-+ }
-+#elif defined(__powerpc__)
-+ pgprot_val(tmp) |= _PAGE_NO_CACHE;
-+ if (map_type == _DRM_REGISTERS)
-+ pgprot_val(tmp) |= _PAGE_GUARDED;
-+#endif
-+#if defined(__ia64__)
-+ if (map_type == _DRM_TTM)
-+ tmp = pgprot_writecombine(tmp);
-+ else
-+ tmp = pgprot_noncached(tmp);
-+#endif
-+ return tmp;
-+}
-+
-+static int drm_bo_ioremap(struct drm_buffer_object *bo, unsigned long bus_base,
-+ unsigned long bus_offset, unsigned long bus_size,
-+ struct drm_bo_kmap_obj *map)
-+{
-+ struct drm_device *dev = bo->dev;
-+ struct drm_bo_mem_reg *mem = &bo->mem;
-+ struct drm_mem_type_manager *man = &dev->bm.man[mem->mem_type];
-+
-+ if (!(man->flags & _DRM_FLAG_NEEDS_IOREMAP)) {
-+ map->bo_kmap_type = bo_map_premapped;
-+ map->virtual = (void *)(((u8 *) man->io_addr) + bus_offset);
-+ } else {
-+ map->bo_kmap_type = bo_map_iomap;
-+ map->virtual = ioremap_nocache(bus_base + bus_offset, bus_size);
-+ }
-+ return (!map->virtual) ? -ENOMEM : 0;
-+}
-+
-+static int drm_bo_kmap_ttm(struct drm_buffer_object *bo,
-+ unsigned long start_page, unsigned long num_pages,
-+ struct drm_bo_kmap_obj *map)
-+{
-+ struct drm_device *dev = bo->dev;
-+ struct drm_bo_mem_reg *mem = &bo->mem;
-+ struct drm_mem_type_manager *man = &dev->bm.man[mem->mem_type];
-+ pgprot_t prot;
-+ struct drm_ttm *ttm = bo->ttm;
-+ struct page *d;
-+ int i;
-+
-+ BUG_ON(!ttm);
-+
-+ if (num_pages == 1 && (mem->flags & DRM_BO_FLAG_CACHED)) {
-+
-+ /*
-+ * We're mapping a single page, and the desired
-+ * page protection is consistent with the bo.
-+ */
-+
-+ map->bo_kmap_type = bo_map_kmap;
-+ map->page = drm_ttm_get_page(ttm, start_page);
-+ map->virtual = kmap(map->page);
-+ } else {
-+ /*
-+ * Populate the part we're mapping;
-+ */
-+
-+ for (i = start_page; i < start_page + num_pages; ++i) {
-+ d = drm_ttm_get_page(ttm, i);
-+ if (!d)
-+ return -ENOMEM;
-+ }
-+
-+ /*
-+ * We need to use vmap to get the desired page protection
-+ * or to make the buffer object look contigous.
-+ */
-+
-+ prot = (mem->flags & DRM_BO_FLAG_CACHED) ?
-+ PAGE_KERNEL :
-+ drm_kernel_io_prot(man->drm_bus_maptype);
-+ map->bo_kmap_type = bo_map_vmap;
-+ map->virtual = vmap(ttm->pages + start_page,
-+ num_pages, 0, prot);
-+ }
-+ return (!map->virtual) ? -ENOMEM : 0;
-+}
-+
-+/*
-+ * This function is to be used for kernel mapping of buffer objects.
-+ * It chooses the appropriate mapping method depending on the memory type
-+ * and caching policy the buffer currently has.
-+ * Mapping multiple pages or buffers that live in io memory is a bit slow and
-+ * consumes vmalloc space. Be restrictive with such mappings.
-+ * Mapping single pages usually returns the logical kernel address,
-+ * (which is fast)
-+ * BUG may use slower temporary mappings for high memory pages or
-+ * uncached / write-combined pages.
-+ *
-+ * The function fills in a drm_bo_kmap_obj which can be used to return the
-+ * kernel virtual address of the buffer.
-+ *
-+ * Code servicing a non-priviliged user request is only allowed to map one
-+ * page at a time. We might need to implement a better scheme to stop such
-+ * processes from consuming all vmalloc space.
-+ */
-+
-+int drm_bo_kmap(struct drm_buffer_object *bo, unsigned long start_page,
-+ unsigned long num_pages, struct drm_bo_kmap_obj *map)
-+{
-+ int ret;
-+ unsigned long bus_base;
-+ unsigned long bus_offset;
-+ unsigned long bus_size;
-+
-+ map->virtual = NULL;
-+
-+ if (num_pages > bo->num_pages)
-+ return -EINVAL;
-+ if (start_page > bo->num_pages)
-+ return -EINVAL;
-+#if 0
-+ if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
-+ return -EPERM;
-+#endif
-+ ret = drm_bo_pci_offset(bo->dev, &bo->mem, &bus_base,
-+ &bus_offset, &bus_size);
-+
-+ if (ret)
-+ return ret;
-+
-+ if (bus_size == 0) {
-+ return drm_bo_kmap_ttm(bo, start_page, num_pages, map);
-+ } else {
-+ bus_offset += start_page << PAGE_SHIFT;
-+ bus_size = num_pages << PAGE_SHIFT;
-+ return drm_bo_ioremap(bo, bus_base, bus_offset, bus_size, map);
-+ }
-+}
-+EXPORT_SYMBOL(drm_bo_kmap);
-+
-+void drm_bo_kunmap(struct drm_bo_kmap_obj *map)
-+{
-+ if (!map->virtual)
-+ return;
-+
-+ switch (map->bo_kmap_type) {
-+ case bo_map_iomap:
-+ iounmap(map->virtual);
-+ break;
-+ case bo_map_vmap:
-+ vunmap(map->virtual);
-+ break;
-+ case bo_map_kmap:
-+ kunmap(map->page);
-+ break;
-+ case bo_map_premapped:
-+ break;
-+ default:
-+ BUG();
-+ }
-+ map->virtual = NULL;
-+ map->page = NULL;
-+}
-+EXPORT_SYMBOL(drm_bo_kunmap);
-Index: linux-2.6.28/drivers/gpu/drm/drm_bufs.c
-===================================================================
---- linux-2.6.28.orig/drivers/gpu/drm/drm_bufs.c 2009-02-20 12:22:53.000000000 +0000
-+++ linux-2.6.28/drivers/gpu/drm/drm_bufs.c 2009-02-20 12:23:06.000000000 +0000
-@@ -435,6 +435,8 @@
- case _DRM_GEM:
- DRM_ERROR("tried to rmmap GEM object\n");
- break;
-+ case _DRM_TTM:
-+ BUG_ON(1);
- }
- drm_free(map, sizeof(*map), DRM_MEM_MAPS);
-
-Index: linux-2.6.28/drivers/gpu/drm/drm_drv.c
-===================================================================
---- linux-2.6.28.orig/drivers/gpu/drm/drm_drv.c 2009-02-20 12:22:53.000000000 +0000
-+++ linux-2.6.28/drivers/gpu/drm/drm_drv.c 2009-02-20 12:27:53.000000000 +0000
-@@ -143,6 +143,34 @@
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW),
-+
-+ DRM_IOCTL_DEF(DRM_IOCTL_MM_INIT, drm_mm_init_ioctl,
-+ DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-+ DRM_IOCTL_DEF(DRM_IOCTL_MM_TAKEDOWN, drm_mm_takedown_ioctl,
-+ DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-+ DRM_IOCTL_DEF(DRM_IOCTL_MM_LOCK, drm_mm_lock_ioctl,
-+ DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-+ DRM_IOCTL_DEF(DRM_IOCTL_MM_UNLOCK, drm_mm_unlock_ioctl,
-+ DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-+
-+ DRM_IOCTL_DEF(DRM_IOCTL_FENCE_CREATE, drm_fence_create_ioctl, DRM_AUTH),
-+ DRM_IOCTL_DEF(DRM_IOCTL_FENCE_REFERENCE, drm_fence_reference_ioctl, DRM_AUTH),
-+ DRM_IOCTL_DEF(DRM_IOCTL_FENCE_UNREFERENCE, drm_fence_unreference_ioctl, DRM_AUTH),
-+ DRM_IOCTL_DEF(DRM_IOCTL_FENCE_SIGNALED, drm_fence_signaled_ioctl, DRM_AUTH),
-+ DRM_IOCTL_DEF(DRM_IOCTL_FENCE_FLUSH, drm_fence_flush_ioctl, DRM_AUTH),
-+ DRM_IOCTL_DEF(DRM_IOCTL_FENCE_WAIT, drm_fence_wait_ioctl, DRM_AUTH),
-+ DRM_IOCTL_DEF(DRM_IOCTL_FENCE_EMIT, drm_fence_emit_ioctl, DRM_AUTH),
-+ DRM_IOCTL_DEF(DRM_IOCTL_FENCE_BUFFERS, drm_fence_buffers_ioctl, DRM_AUTH),
-+
-+ DRM_IOCTL_DEF(DRM_IOCTL_BO_CREATE, drm_bo_create_ioctl, DRM_AUTH),
-+ DRM_IOCTL_DEF(DRM_IOCTL_BO_MAP, drm_bo_map_ioctl, DRM_AUTH),
-+ DRM_IOCTL_DEF(DRM_IOCTL_BO_UNMAP, drm_bo_unmap_ioctl, DRM_AUTH),
-+ DRM_IOCTL_DEF(DRM_IOCTL_BO_REFERENCE, drm_bo_reference_ioctl, DRM_AUTH),
-+ DRM_IOCTL_DEF(DRM_IOCTL_BO_UNREFERENCE, drm_bo_unreference_ioctl, DRM_AUTH),
-+ DRM_IOCTL_DEF(DRM_IOCTL_BO_SETSTATUS, drm_bo_setstatus_ioctl, DRM_AUTH),
-+ DRM_IOCTL_DEF(DRM_IOCTL_BO_INFO, drm_bo_info_ioctl, DRM_AUTH),
-+ DRM_IOCTL_DEF(DRM_IOCTL_BO_WAIT_IDLE, drm_bo_wait_idle_ioctl, DRM_AUTH),
-+ DRM_IOCTL_DEF(DRM_IOCTL_BO_VERSION, drm_bo_version_ioctl, 0),
- };
-
- #define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
-@@ -317,6 +345,9 @@
- if (dev->driver->unload)
- dev->driver->unload(dev);
-
-+ drm_bo_driver_finish(dev);
-+ drm_fence_manager_takedown(dev);
-+
- if (drm_core_has_AGP(dev) && dev->agp) {
- drm_free(dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS);
- dev->agp = NULL;
-@@ -324,6 +355,8 @@
-
- drm_ht_remove(&dev->map_hash);
- drm_ctxbitmap_cleanup(dev);
-+ drm_mm_takedown(&dev->offset_manager);
-+ drm_ht_remove(&dev->object_hash);
-
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- drm_put_minor(&dev->control);
-@@ -336,6 +369,17 @@
- DRM_ERROR("Cannot unload module\n");
- }
-
-+void drm_cleanup_pci(struct pci_dev *pdev)
-+{
-+ struct drm_device *dev = pci_get_drvdata(pdev);
-+
-+ pci_set_drvdata(pdev, NULL);
-+ pci_release_regions(pdev);
-+ if (dev)
-+ drm_cleanup(dev);
-+}
-+EXPORT_SYMBOL(drm_cleanup_pci);
-+
- void drm_exit(struct drm_driver *driver)
- {
- struct drm_device *dev, *tmp;
-Index: linux-2.6.28/drivers/gpu/drm/drm_fence.c
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.28/drivers/gpu/drm/drm_fence.c 2009-02-20 12:23:06.000000000 +0000
-@@ -0,0 +1,829 @@
-+/**************************************************************************
-+ *
-+ * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
-+ * All Rights Reserved.
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a
-+ * copy of this software and associated documentation files (the
-+ * "Software"), to deal in the Software without restriction, including
-+ * without limitation the rights to use, copy, modify, merge, publish,
-+ * distribute, sub license, and/or sell copies of the Software, and to
-+ * permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice (including the
-+ * next paragraph) shall be included in all copies or substantial portions
-+ * of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
-+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
-+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
-+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
-+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
-+ *
-+ **************************************************************************/
-+/*
-+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
-+ */
-+
-+#include "drmP.h"
-+
-+
-+/*
-+ * Convenience function to be called by fence::wait methods that
-+ * need polling.
-+ */
-+
-+int drm_fence_wait_polling(struct drm_fence_object *fence, int lazy,
-+ int interruptible, uint32_t mask,
-+ unsigned long end_jiffies)
-+{
-+ struct drm_device *dev = fence->dev;
-+ struct drm_fence_manager *fm = &dev->fm;
-+ struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class];
-+ uint32_t count = 0;
-+ int ret;
-+
-+ DECLARE_WAITQUEUE(entry, current);
-+ add_wait_queue(&fc->fence_queue, &entry);
-+
-+ ret = 0;
-+
-+ for (;;) {
-+ __set_current_state((interruptible) ?
-+ TASK_INTERRUPTIBLE :
-+ TASK_UNINTERRUPTIBLE);
-+ if (drm_fence_object_signaled(fence, mask))
-+ break;
-+ if (time_after_eq(jiffies, end_jiffies)) {
-+ ret = -EBUSY;
-+ break;
-+ }
-+ if (lazy)
-+ schedule_timeout(1);
-+ else if ((++count & 0x0F) == 0){
-+ __set_current_state(TASK_RUNNING);
-+ schedule();
-+ __set_current_state((interruptible) ?
-+ TASK_INTERRUPTIBLE :
-+ TASK_UNINTERRUPTIBLE);
-+ }
-+ if (interruptible && signal_pending(current)) {
-+ ret = -EAGAIN;
-+ break;
-+ }
-+ }
-+ __set_current_state(TASK_RUNNING);
-+ remove_wait_queue(&fc->fence_queue, &entry);
-+ return ret;
-+}
-+EXPORT_SYMBOL(drm_fence_wait_polling);
-+
-+/*
-+ * Typically called by the IRQ handler.
-+ */
-+
-+void drm_fence_handler(struct drm_device *dev, uint32_t fence_class,
-+ uint32_t sequence, uint32_t type, uint32_t error)
-+{
-+ int wake = 0;
-+ uint32_t diff;
-+ uint32_t relevant_type;
-+ uint32_t new_type;
-+ struct drm_fence_manager *fm = &dev->fm;
-+ struct drm_fence_class_manager *fc = &fm->fence_class[fence_class];
-+ struct drm_fence_driver *driver = dev->driver->fence_driver;
-+ struct list_head *head;
-+ struct drm_fence_object *fence, *next;
-+ int found = 0;
-+
-+ if (list_empty(&fc->ring))
-+ return;
-+
-+ list_for_each_entry(fence, &fc->ring, ring) {
-+ diff = (sequence - fence->sequence) & driver->sequence_mask;
-+ if (diff > driver->wrap_diff) {
-+ found = 1;
-+ break;
-+ }
-+ }
-+
-+ fc->waiting_types &= ~type;
-+ head = (found) ? &fence->ring : &fc->ring;
-+
-+ list_for_each_entry_safe_reverse(fence, next, head, ring) {
-+ if (&fence->ring == &fc->ring)
-+ break;
-+
-+ if (error) {
-+ fence->error = error;
-+ fence->signaled_types = fence->type;
-+ list_del_init(&fence->ring);
-+ wake = 1;
-+ break;
-+ }
-+
-+ if (type & DRM_FENCE_TYPE_EXE)
-+ type |= fence->native_types;
-+
-+ relevant_type = type & fence->type;
-+ new_type = (fence->signaled_types | relevant_type) ^
-+ fence->signaled_types;
-+
-+ if (new_type) {
-+ fence->signaled_types |= new_type;
-+ DRM_DEBUG("Fence 0x%08lx signaled 0x%08x\n",
-+ fence->base.hash.key, fence->signaled_types);
-+
-+ if (driver->needed_flush)
-+ fc->pending_flush |= driver->needed_flush(fence);
-+
-+ if (new_type & fence->waiting_types)
-+ wake = 1;
-+ }
-+
-+ fc->waiting_types |= fence->waiting_types & ~fence->signaled_types;
-+
-+ if (!(fence->type & ~fence->signaled_types)) {
-+ DRM_DEBUG("Fence completely signaled 0x%08lx\n",
-+ fence->base.hash.key);
-+ list_del_init(&fence->ring);
-+ }
-+ }
-+
-+ /*
-+ * Reinstate lost waiting types.
-+ */
-+
-+ if ((fc->waiting_types & type) != type) {
-+ head = head->prev;
-+ list_for_each_entry(fence, head, ring) {
-+ if (&fence->ring == &fc->ring)
-+ break;
-+ diff = (fc->highest_waiting_sequence - fence->sequence) &
-+ driver->sequence_mask;
-+ if (diff > driver->wrap_diff)
-+ break;
-+
-+ fc->waiting_types |= fence->waiting_types & ~fence->signaled_types;
-+ }
-+ }
-+
-+ if (wake)
-+ wake_up_all(&fc->fence_queue);
-+}
-+EXPORT_SYMBOL(drm_fence_handler);
-+
-+static void drm_fence_unring(struct drm_device *dev, struct list_head *ring)
-+{
-+ struct drm_fence_manager *fm = &dev->fm;
-+ unsigned long flags;
-+
-+ write_lock_irqsave(&fm->lock, flags);
-+ list_del_init(ring);
-+ write_unlock_irqrestore(&fm->lock, flags);
-+}
-+
-+void drm_fence_usage_deref_locked(struct drm_fence_object **fence)
-+{
-+ struct drm_fence_object *tmp_fence = *fence;
-+ struct drm_device *dev = tmp_fence->dev;
-+ struct drm_fence_manager *fm = &dev->fm;
-+
-+ DRM_ASSERT_LOCKED(&dev->struct_mutex);
-+ *fence = NULL;
-+ if (atomic_dec_and_test(&tmp_fence->usage)) {
-+ drm_fence_unring(dev, &tmp_fence->ring);
-+ DRM_DEBUG("Destroyed a fence object 0x%08lx\n",
-+ tmp_fence->base.hash.key);
-+ atomic_dec(&fm->count);
-+ BUG_ON(!list_empty(&tmp_fence->base.list));
-+ drm_free(tmp_fence, sizeof(*tmp_fence), DRM_MEM_FENCE);
-+ }
-+}
-+EXPORT_SYMBOL(drm_fence_usage_deref_locked);
-+
-+void drm_fence_usage_deref_unlocked(struct drm_fence_object **fence)
-+{
-+ struct drm_fence_object *tmp_fence = *fence;
-+ struct drm_device *dev = tmp_fence->dev;
-+ struct drm_fence_manager *fm = &dev->fm;
-+
-+ *fence = NULL;
-+ if (atomic_dec_and_test(&tmp_fence->usage)) {
-+ mutex_lock(&dev->struct_mutex);
-+ if (atomic_read(&tmp_fence->usage) == 0) {
-+ drm_fence_unring(dev, &tmp_fence->ring);
-+ atomic_dec(&fm->count);
-+ BUG_ON(!list_empty(&tmp_fence->base.list));
-+ drm_free(tmp_fence, sizeof(*tmp_fence), DRM_MEM_FENCE);
-+ }
-+ mutex_unlock(&dev->struct_mutex);
-+ }
-+}
-+EXPORT_SYMBOL(drm_fence_usage_deref_unlocked);
-+
-+struct drm_fence_object
-+*drm_fence_reference_locked(struct drm_fence_object *src)
-+{
-+ DRM_ASSERT_LOCKED(&src->dev->struct_mutex);
-+
-+ atomic_inc(&src->usage);
-+ return src;
-+}
-+
-+void drm_fence_reference_unlocked(struct drm_fence_object **dst,
-+ struct drm_fence_object *src)
-+{
-+ mutex_lock(&src->dev->struct_mutex);
-+ *dst = src;
-+ atomic_inc(&src->usage);
-+ mutex_unlock(&src->dev->struct_mutex);
-+}
-+EXPORT_SYMBOL(drm_fence_reference_unlocked);
-+
-+static void drm_fence_object_destroy(struct drm_file *priv,
-+ struct drm_user_object *base)
-+{
-+ struct drm_fence_object *fence =
-+ drm_user_object_entry(base, struct drm_fence_object, base);
-+
-+ drm_fence_usage_deref_locked(&fence);
-+}
-+
-+int drm_fence_object_signaled(struct drm_fence_object *fence, uint32_t mask)
-+{
-+ unsigned long flags;
-+ int signaled;
-+ struct drm_device *dev = fence->dev;
-+ struct drm_fence_manager *fm = &dev->fm;
-+ struct drm_fence_driver *driver = dev->driver->fence_driver;
-+
-+ mask &= fence->type;
-+ read_lock_irqsave(&fm->lock, flags);
-+ signaled = (mask & fence->signaled_types) == mask;
-+ read_unlock_irqrestore(&fm->lock, flags);
-+ if (!signaled && driver->poll) {
-+ write_lock_irqsave(&fm->lock, flags);
-+ driver->poll(dev, fence->fence_class, mask);
-+ signaled = (mask & fence->signaled_types) == mask;
-+ write_unlock_irqrestore(&fm->lock, flags);
-+ }
-+ return signaled;
-+}
-+EXPORT_SYMBOL(drm_fence_object_signaled);
-+
-+
-+int drm_fence_object_flush(struct drm_fence_object *fence,
-+ uint32_t type)
-+{
-+ struct drm_device *dev = fence->dev;
-+ struct drm_fence_manager *fm = &dev->fm;
-+ struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class];
-+ struct drm_fence_driver *driver = dev->driver->fence_driver;
-+ unsigned long irq_flags;
-+ uint32_t saved_pending_flush;
-+ uint32_t diff;
-+ int call_flush;
-+
-+ if (type & ~fence->type) {
-+ DRM_ERROR("Flush trying to extend fence type, "
-+ "0x%x, 0x%x\n", type, fence->type);
-+ return -EINVAL;
-+ }
-+
-+ write_lock_irqsave(&fm->lock, irq_flags);
-+ fence->waiting_types |= type;
-+ fc->waiting_types |= fence->waiting_types;
-+ diff = (fence->sequence - fc->highest_waiting_sequence) &
-+ driver->sequence_mask;
-+
-+ if (diff < driver->wrap_diff)
-+ fc->highest_waiting_sequence = fence->sequence;
-+
-+ /*
-+ * fence->waiting_types has changed. Determine whether
-+ * we need to initiate some kind of flush as a result of this.
-+ */
-+
-+ saved_pending_flush = fc->pending_flush;
-+ if (driver->needed_flush)
-+ fc->pending_flush |= driver->needed_flush(fence);
-+
-+ if (driver->poll)
-+ driver->poll(dev, fence->fence_class, fence->waiting_types);
-+
-+ call_flush = fc->pending_flush;
-+ write_unlock_irqrestore(&fm->lock, irq_flags);
-+
-+ if (call_flush && driver->flush)
-+ driver->flush(dev, fence->fence_class);
-+
-+ return 0;
-+}
-+EXPORT_SYMBOL(drm_fence_object_flush);
-+
-+/*
-+ * Make sure old fence objects are signaled before their fence sequences are
-+ * wrapped around and reused.
-+ */
-+
-+void drm_fence_flush_old(struct drm_device *dev, uint32_t fence_class,
-+ uint32_t sequence)
-+{
-+ struct drm_fence_manager *fm = &dev->fm;
-+ struct drm_fence_class_manager *fc = &fm->fence_class[fence_class];
-+ struct drm_fence_object *fence;
-+ unsigned long irq_flags;
-+ struct drm_fence_driver *driver = dev->driver->fence_driver;
-+ int call_flush;
-+
-+ uint32_t diff;
-+
-+ write_lock_irqsave(&fm->lock, irq_flags);
-+
-+ list_for_each_entry_reverse(fence, &fc->ring, ring) {
-+ diff = (sequence - fence->sequence) & driver->sequence_mask;
-+ if (diff <= driver->flush_diff)
-+ break;
-+
-+ fence->waiting_types = fence->type;
-+ fc->waiting_types |= fence->type;
-+
-+ if (driver->needed_flush)
-+ fc->pending_flush |= driver->needed_flush(fence);
-+ }
-+
-+ if (driver->poll)
-+ driver->poll(dev, fence_class, fc->waiting_types);
-+
-+ call_flush = fc->pending_flush;
-+ write_unlock_irqrestore(&fm->lock, irq_flags);
-+
-+ if (call_flush && driver->flush)
-+ driver->flush(dev, fence->fence_class);
-+
-+ /*
-+ * FIXME: Shold we implement a wait here for really old fences?
-+ */
-+
-+}
-+EXPORT_SYMBOL(drm_fence_flush_old);
-+
-+int drm_fence_object_wait(struct drm_fence_object *fence,
-+ int lazy, int ignore_signals, uint32_t mask)
-+{
-+ struct drm_device *dev = fence->dev;
-+ struct drm_fence_driver *driver = dev->driver->fence_driver;
-+ struct drm_fence_manager *fm = &dev->fm;
-+ struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class];
-+ int ret = 0;
-+ unsigned long _end = 3 * DRM_HZ;
-+
-+ if (mask & ~fence->type) {
-+ DRM_ERROR("Wait trying to extend fence type"
-+ " 0x%08x 0x%08x\n", mask, fence->type);
-+ BUG();
-+ return -EINVAL;
-+ }
-+
-+ if (driver->wait)
-+ return driver->wait(fence, lazy, !ignore_signals, mask);
-+
-+
-+ drm_fence_object_flush(fence, mask);
-+ if (driver->has_irq(dev, fence->fence_class, mask)) {
-+ if (!ignore_signals)
-+ ret = wait_event_interruptible_timeout
-+ (fc->fence_queue,
-+ drm_fence_object_signaled(fence, mask),
-+ 3 * DRM_HZ);
-+ else
-+ ret = wait_event_timeout
-+ (fc->fence_queue,
-+ drm_fence_object_signaled(fence, mask),
-+ 3 * DRM_HZ);
-+
-+ if (unlikely(ret == -ERESTARTSYS))
-+ return -EAGAIN;
-+
-+ if (unlikely(ret == 0))
-+ return -EBUSY;
-+
-+ return 0;
-+ }
-+
-+ return drm_fence_wait_polling(fence, lazy, !ignore_signals, mask,
-+ _end);
-+}
-+EXPORT_SYMBOL(drm_fence_object_wait);
-+
-+
-+
-+int drm_fence_object_emit(struct drm_fence_object *fence, uint32_t fence_flags,
-+ uint32_t fence_class, uint32_t type)
-+{
-+ struct drm_device *dev = fence->dev;
-+ struct drm_fence_manager *fm = &dev->fm;
-+ struct drm_fence_driver *driver = dev->driver->fence_driver;
-+ struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class];
-+ unsigned long flags;
-+ uint32_t sequence;
-+ uint32_t native_types;
-+ int ret;
-+
-+ drm_fence_unring(dev, &fence->ring);
-+ ret = driver->emit(dev, fence_class, fence_flags, &sequence,
-+ &native_types);
-+ if (ret)
-+ return ret;
-+
-+ write_lock_irqsave(&fm->lock, flags);
-+ fence->fence_class = fence_class;
-+ fence->type = type;
-+ fence->waiting_types = 0;
-+ fence->signaled_types = 0;
-+ fence->error = 0;
-+ fence->sequence = sequence;
-+ fence->native_types = native_types;
-+ if (list_empty(&fc->ring))
-+ fc->highest_waiting_sequence = sequence - 1;
-+ list_add_tail(&fence->ring, &fc->ring);
-+ fc->latest_queued_sequence = sequence;
-+ write_unlock_irqrestore(&fm->lock, flags);
-+ return 0;
-+}
-+EXPORT_SYMBOL(drm_fence_object_emit);
-+
-+static int drm_fence_object_init(struct drm_device *dev, uint32_t fence_class,
-+ uint32_t type,
-+ uint32_t fence_flags,
-+ struct drm_fence_object *fence)
-+{
-+ int ret = 0;
-+ unsigned long flags;
-+ struct drm_fence_manager *fm = &dev->fm;
-+
-+ mutex_lock(&dev->struct_mutex);
-+ atomic_set(&fence->usage, 1);
-+ mutex_unlock(&dev->struct_mutex);
-+
-+ write_lock_irqsave(&fm->lock, flags);
-+ INIT_LIST_HEAD(&fence->ring);
-+
-+ /*
-+ * Avoid hitting BUG() for kernel-only fence objects.
-+ */
-+
-+ INIT_LIST_HEAD(&fence->base.list);
-+ fence->fence_class = fence_class;
-+ fence->type = type;
-+ fence->signaled_types = 0;
-+ fence->waiting_types = 0;
-+ fence->sequence = 0;
-+ fence->error = 0;
-+ fence->dev = dev;
-+ write_unlock_irqrestore(&fm->lock, flags);
-+ if (fence_flags & DRM_FENCE_FLAG_EMIT) {
-+ ret = drm_fence_object_emit(fence, fence_flags,
-+ fence->fence_class, type);
-+ }
-+ return ret;
-+}
-+
-+int drm_fence_add_user_object(struct drm_file *priv,
-+ struct drm_fence_object *fence, int shareable)
-+{
-+ struct drm_device *dev = priv->minor->dev;
-+ int ret;
-+
-+ mutex_lock(&dev->struct_mutex);
-+ ret = drm_add_user_object(priv, &fence->base, shareable);
-+ if (ret)
-+ goto out;
-+ atomic_inc(&fence->usage);
-+ fence->base.type = drm_fence_type;
-+ fence->base.remove = &drm_fence_object_destroy;
-+ DRM_DEBUG("Fence 0x%08lx created\n", fence->base.hash.key);
-+out:
-+ mutex_unlock(&dev->struct_mutex);
-+ return ret;
-+}
-+EXPORT_SYMBOL(drm_fence_add_user_object);
-+
-+int drm_fence_object_create(struct drm_device *dev, uint32_t fence_class,
-+ uint32_t type, unsigned flags,
-+ struct drm_fence_object **c_fence)
-+{
-+ struct drm_fence_object *fence;
-+ int ret;
-+ struct drm_fence_manager *fm = &dev->fm;
-+
-+ fence = drm_calloc(1, sizeof(*fence), DRM_MEM_FENCE);
-+ if (!fence) {
-+ DRM_INFO("Out of memory creating fence object.\n");
-+ return -ENOMEM;
-+ }
-+ ret = drm_fence_object_init(dev, fence_class, type, flags, fence);
-+ if (ret) {
-+ drm_fence_usage_deref_unlocked(&fence);
-+ return ret;
-+ }
-+ *c_fence = fence;
-+ atomic_inc(&fm->count);
-+
-+ return 0;
-+}
-+EXPORT_SYMBOL(drm_fence_object_create);
-+
-+void drm_fence_manager_init(struct drm_device *dev)
-+{
-+ struct drm_fence_manager *fm = &dev->fm;
-+ struct drm_fence_class_manager *fence_class;
-+ struct drm_fence_driver *fed = dev->driver->fence_driver;
-+ int i;
-+ unsigned long flags;
-+
-+ rwlock_init(&fm->lock);
-+ write_lock_irqsave(&fm->lock, flags);
-+ fm->initialized = 0;
-+ if (!fed)
-+ goto out_unlock;
-+
-+ fm->initialized = 1;
-+ fm->num_classes = fed->num_classes;
-+ BUG_ON(fm->num_classes > _DRM_FENCE_CLASSES);
-+
-+ for (i = 0; i < fm->num_classes; ++i) {
-+ fence_class = &fm->fence_class[i];
-+
-+ memset(fence_class, 0, sizeof(*fence_class));
-+ INIT_LIST_HEAD(&fence_class->ring);
-+ DRM_INIT_WAITQUEUE(&fence_class->fence_queue);
-+ }
-+
-+ atomic_set(&fm->count, 0);
-+ out_unlock:
-+ write_unlock_irqrestore(&fm->lock, flags);
-+}
-+
-+void drm_fence_fill_arg(struct drm_fence_object *fence,
-+ struct drm_fence_arg *arg)
-+{
-+ struct drm_device *dev = fence->dev;
-+ struct drm_fence_manager *fm = &dev->fm;
-+ unsigned long irq_flags;
-+
-+ read_lock_irqsave(&fm->lock, irq_flags);
-+ arg->handle = fence->base.hash.key;
-+ arg->fence_class = fence->fence_class;
-+ arg->type = fence->type;
-+ arg->signaled = fence->signaled_types;
-+ arg->error = fence->error;
-+ arg->sequence = fence->sequence;
-+ read_unlock_irqrestore(&fm->lock, irq_flags);
-+}
-+EXPORT_SYMBOL(drm_fence_fill_arg);
-+
-+void drm_fence_manager_takedown(struct drm_device *dev)
-+{
-+}
-+
-+struct drm_fence_object *drm_lookup_fence_object(struct drm_file *priv,
-+ uint32_t handle)
-+{
-+ struct drm_device *dev = priv->minor->dev;
-+ struct drm_user_object *uo;
-+ struct drm_fence_object *fence;
-+
-+ mutex_lock(&dev->struct_mutex);
-+ uo = drm_lookup_user_object(priv, handle);
-+ if (!uo || (uo->type != drm_fence_type)) {
-+ mutex_unlock(&dev->struct_mutex);
-+ return NULL;
-+ }
-+ fence = drm_fence_reference_locked(drm_user_object_entry(uo, struct drm_fence_object, base));
-+ mutex_unlock(&dev->struct_mutex);
-+ return fence;
-+}
-+
-+int drm_fence_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
-+{
-+ int ret;
-+ struct drm_fence_manager *fm = &dev->fm;
-+ struct drm_fence_arg *arg = data;
-+ struct drm_fence_object *fence;
-+ ret = 0;
-+
-+ if (!fm->initialized) {
-+ DRM_ERROR("The DRM driver does not support fencing.\n");
-+ return -EINVAL;
-+ }
-+
-+ if (arg->flags & DRM_FENCE_FLAG_EMIT)
-+ LOCK_TEST_WITH_RETURN(dev, file_priv);
-+ ret = drm_fence_object_create(dev, arg->fence_class,
-+ arg->type, arg->flags, &fence);
-+ if (ret)
-+ return ret;
-+ ret = drm_fence_add_user_object(file_priv, fence,
-+ arg->flags &
-+ DRM_FENCE_FLAG_SHAREABLE);
-+ if (ret) {
-+ drm_fence_usage_deref_unlocked(&fence);
-+ return ret;
-+ }
-+
-+ /*
-+ * usage > 0. No need to lock dev->struct_mutex;
-+ */
-+
-+ arg->handle = fence->base.hash.key;
-+
-+ drm_fence_fill_arg(fence, arg);
-+ drm_fence_usage_deref_unlocked(&fence);
-+
-+ return ret;
-+}
-+
-+int drm_fence_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
-+{
-+ int ret;
-+ struct drm_fence_manager *fm = &dev->fm;
-+ struct drm_fence_arg *arg = data;
-+ struct drm_fence_object *fence;
-+ struct drm_user_object *uo;
-+ ret = 0;
-+
-+ if (!fm->initialized) {
-+ DRM_ERROR("The DRM driver does not support fencing.\n");
-+ return -EINVAL;
-+ }
-+
-+ ret = drm_user_object_ref(file_priv, arg->handle, drm_fence_type, &uo);
-+ if (ret)
-+ return ret;
-+ fence = drm_lookup_fence_object(file_priv, arg->handle);
-+ drm_fence_fill_arg(fence, arg);
-+ drm_fence_usage_deref_unlocked(&fence);
-+
-+ return ret;
-+}
-+
-+
-+int drm_fence_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
-+{
-+ int ret;
-+ struct drm_fence_manager *fm = &dev->fm;
-+ struct drm_fence_arg *arg = data;
-+ ret = 0;
-+
-+ if (!fm->initialized) {
-+ DRM_ERROR("The DRM driver does not support fencing.\n");
-+ return -EINVAL;
-+ }
-+
-+ return drm_user_object_unref(file_priv, arg->handle, drm_fence_type);
-+}
-+
-+int drm_fence_signaled_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
-+{
-+ int ret;
-+ struct drm_fence_manager *fm = &dev->fm;
-+ struct drm_fence_arg *arg = data;
-+ struct drm_fence_object *fence;
-+ ret = 0;
-+
-+ if (!fm->initialized) {
-+ DRM_ERROR("The DRM driver does not support fencing.\n");
-+ return -EINVAL;
-+ }
-+
-+ fence = drm_lookup_fence_object(file_priv, arg->handle);
-+ if (!fence)
-+ return -EINVAL;
-+
-+ drm_fence_fill_arg(fence, arg);
-+ drm_fence_usage_deref_unlocked(&fence);
-+
-+ return ret;
-+}
-+
-+int drm_fence_flush_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
-+{
-+ int ret;
-+ struct drm_fence_manager *fm = &dev->fm;
-+ struct drm_fence_arg *arg = data;
-+ struct drm_fence_object *fence;
-+ ret = 0;
-+
-+ if (!fm->initialized) {
-+ DRM_ERROR("The DRM driver does not support fencing.\n");
-+ return -EINVAL;
-+ }
-+
-+ fence = drm_lookup_fence_object(file_priv, arg->handle);
-+ if (!fence)
-+ return -EINVAL;
-+ ret = drm_fence_object_flush(fence, arg->type);
-+
-+ drm_fence_fill_arg(fence, arg);
-+ drm_fence_usage_deref_unlocked(&fence);
-+
-+ return ret;
-+}
-+
-+
-+int drm_fence_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
-+{
-+ int ret;
-+ struct drm_fence_manager *fm = &dev->fm;
-+ struct drm_fence_arg *arg = data;
-+ struct drm_fence_object *fence;
-+ ret = 0;
-+
-+ if (!fm->initialized) {
-+ DRM_ERROR("The DRM driver does not support fencing.\n");
-+ return -EINVAL;
-+ }
-+
-+ fence = drm_lookup_fence_object(file_priv, arg->handle);
-+ if (!fence)
-+ return -EINVAL;
-+ ret = drm_fence_object_wait(fence,
-+ arg->flags & DRM_FENCE_FLAG_WAIT_LAZY,
-+ 0, arg->type);
-+
-+ drm_fence_fill_arg(fence, arg);
-+ drm_fence_usage_deref_unlocked(&fence);
-+
-+ return ret;
-+}
-+
-+
-+int drm_fence_emit_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
-+{
-+ int ret;
-+ struct drm_fence_manager *fm = &dev->fm;
-+ struct drm_fence_arg *arg = data;
-+ struct drm_fence_object *fence;
-+ ret = 0;
-+
-+ if (!fm->initialized) {
-+ DRM_ERROR("The DRM driver does not support fencing.\n");
-+ return -EINVAL;
-+ }
-+
-+ LOCK_TEST_WITH_RETURN(dev, file_priv);
-+ fence = drm_lookup_fence_object(file_priv, arg->handle);
-+ if (!fence)
-+ return -EINVAL;
-+ ret = drm_fence_object_emit(fence, arg->flags, arg->fence_class,
-+ arg->type);
-+
-+ drm_fence_fill_arg(fence, arg);
-+ drm_fence_usage_deref_unlocked(&fence);
-+
-+ return ret;
-+}
-+
-+int drm_fence_buffers_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
-+{
-+ int ret;
-+ struct drm_fence_manager *fm = &dev->fm;
-+ struct drm_fence_arg *arg = data;
-+ struct drm_fence_object *fence;
-+ ret = 0;
-+
-+ if (!fm->initialized) {
-+ DRM_ERROR("The DRM driver does not support fencing.\n");
-+ return -EINVAL;
-+ }
-+
-+ if (!dev->bm.initialized) {
-+ DRM_ERROR("Buffer object manager is not initialized\n");
-+ return -EINVAL;
-+ }
-+ LOCK_TEST_WITH_RETURN(dev, file_priv);
-+ ret = drm_fence_buffer_objects(dev, NULL, arg->flags,
-+ NULL, &fence);
-+ if (ret)
-+ return ret;
-+
-+ if (!(arg->flags & DRM_FENCE_FLAG_NO_USER)) {
-+ ret = drm_fence_add_user_object(file_priv, fence,
-+ arg->flags &
-+ DRM_FENCE_FLAG_SHAREABLE);
-+ if (ret)
-+ return ret;
-+ }
-+
-+ arg->handle = fence->base.hash.key;
-+
-+ drm_fence_fill_arg(fence, arg);
-+ drm_fence_usage_deref_unlocked(&fence);
-+
-+ return ret;
-+}
-Index: linux-2.6.28/drivers/gpu/drm/drm_fops.c
-===================================================================
---- linux-2.6.28.orig/drivers/gpu/drm/drm_fops.c 2009-02-20 12:22:53.000000000 +0000
-+++ linux-2.6.28/drivers/gpu/drm/drm_fops.c 2009-02-20 12:31:55.000000000 +0000
-@@ -1,3 +1,4 @@
-+
- /**
- * \file drm_fops.c
- * File operations for DRM
-@@ -232,6 +233,7 @@
- int minor_id = iminor(inode);
- struct drm_file *priv;
- int ret;
-+ int i, j;
-
- if (filp->f_flags & O_EXCL)
- return -EBUSY; /* No exclusive opens */
-@@ -257,10 +259,24 @@
-
- INIT_LIST_HEAD(&priv->lhead);
- INIT_LIST_HEAD(&priv->fbs);
-+ INIT_LIST_HEAD(&priv->refd_objects);
-
- if (dev->driver->driver_features & DRIVER_GEM)
- drm_gem_open(dev, priv);
-
-+ for (i = 0; i < _DRM_NO_REF_TYPES; ++i) {
-+ ret = drm_ht_create(&priv->refd_object_hash[i],
-+ DRM_FILE_HASH_ORDER);
-+ if (ret)
-+ break;
-+ }
-+
-+ if (ret) {
-+ for (j = 0; j < i; ++j)
-+ drm_ht_remove(&priv->refd_object_hash[j]);
-+ goto out_free;
-+ }
-+
- if (dev->driver->open) {
- ret = dev->driver->open(dev, priv);
- if (ret < 0)
-Index: linux-2.6.28/drivers/gpu/drm/drm_irq.c
-===================================================================
---- linux-2.6.28.orig/drivers/gpu/drm/drm_irq.c 2009-02-20 12:22:53.000000000 +0000
-+++ linux-2.6.28/drivers/gpu/drm/drm_irq.c 2009-02-20 12:23:06.000000000 +0000
-@@ -124,6 +124,7 @@
-
- dev->num_crtcs = 0;
- }
-+EXPORT_SYMBOL(drm_vblank_cleanup);
-
- int drm_vblank_init(struct drm_device *dev, int num_crtcs)
- {
-@@ -697,7 +698,7 @@
- *
- * If a signal is not requested, then calls vblank_wait().
- */
--static void drm_vbl_send_signals(struct drm_device *dev, int crtc)
-+void drm_vbl_send_signals(struct drm_device *dev, int crtc)
- {
- struct drm_vbl_sig *vbl_sig, *tmp;
- struct list_head *vbl_sigs;
-@@ -726,6 +727,7 @@
-
- spin_unlock_irqrestore(&dev->vbl_lock, flags);
- }
-+EXPORT_SYMBOL(drm_vbl_send_signals);
-
- /**
- * drm_handle_vblank - handle a vblank event
-Index: linux-2.6.28/drivers/gpu/drm/drm_object.c
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.28/drivers/gpu/drm/drm_object.c 2009-02-20 12:23:06.000000000 +0000
-@@ -0,0 +1,294 @@
-+/**************************************************************************
-+ *
-+ * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
-+ * All Rights Reserved.
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a
-+ * copy of this software and associated documentation files (the
-+ * "Software"), to deal in the Software without restriction, including
-+ * without limitation the rights to use, copy, modify, merge, publish,
-+ * distribute, sub license, and/or sell copies of the Software, and to
-+ * permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice (including the
-+ * next paragraph) shall be included in all copies or substantial portions
-+ * of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
-+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
-+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
-+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
-+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
-+ *
-+ **************************************************************************/
-+/*
-+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
-+ */
-+
-+#include "drmP.h"
-+
-+int drm_add_user_object(struct drm_file *priv, struct drm_user_object *item,
-+ int shareable)
-+{
-+ struct drm_device *dev = priv->minor->dev;
-+ int ret;
-+
-+ DRM_ASSERT_LOCKED(&dev->struct_mutex);
-+
-+ /* The refcount will be bumped to 1 when we add the ref object below. */
-+ atomic_set(&item->refcount, 0);
-+ item->shareable = shareable;
-+ item->owner = priv;
-+
-+ ret = drm_ht_just_insert_please(&dev->object_hash, &item->hash,
-+ (unsigned long)item, 32, 0, 0);
-+ if (ret)
-+ return ret;
-+
-+ ret = drm_add_ref_object(priv, item, _DRM_REF_USE);
-+ if (ret)
-+ ret = drm_ht_remove_item(&dev->object_hash, &item->hash);
-+
-+ return ret;
-+}
-+EXPORT_SYMBOL(drm_add_user_object);
-+
-+struct drm_user_object *drm_lookup_user_object(struct drm_file *priv, uint32_t key)
-+{
-+ struct drm_device *dev = priv->minor->dev;
-+ struct drm_hash_item *hash;
-+ int ret;
-+ struct drm_user_object *item;
-+
-+ DRM_ASSERT_LOCKED(&dev->struct_mutex);
-+
-+ ret = drm_ht_find_item(&dev->object_hash, key, &hash);
-+ if (ret)
-+ return NULL;
-+
-+ item = drm_hash_entry(hash, struct drm_user_object, hash);
-+
-+ if (priv != item->owner) {
-+ struct drm_open_hash *ht = &priv->refd_object_hash[_DRM_REF_USE];
-+ ret = drm_ht_find_item(ht, (unsigned long)item, &hash);
-+ if (ret) {
-+ DRM_ERROR("Object not registered for usage\n");
-+ return NULL;
-+ }
-+ }
-+ return item;
-+}
-+EXPORT_SYMBOL(drm_lookup_user_object);
-+
-+static void drm_deref_user_object(struct drm_file *priv, struct drm_user_object *item)
-+{
-+ struct drm_device *dev = priv->minor->dev;
-+ int ret;
-+
-+ if (atomic_dec_and_test(&item->refcount)) {
-+ ret = drm_ht_remove_item(&dev->object_hash, &item->hash);
-+ BUG_ON(ret);
-+ item->remove(priv, item);
-+ }
-+}
-+
-+static int drm_object_ref_action(struct drm_file *priv, struct drm_user_object *ro,
-+ enum drm_ref_type action)
-+{
-+ int ret = 0;
-+
-+ switch (action) {
-+ case _DRM_REF_USE:
-+ atomic_inc(&ro->refcount);
-+ break;
-+ default:
-+ if (!ro->ref_struct_locked) {
-+ break;
-+ } else {
-+ ro->ref_struct_locked(priv, ro, action);
-+ }
-+ }
-+ return ret;
-+}
-+
-+int drm_add_ref_object(struct drm_file *priv, struct drm_user_object *referenced_object,
-+ enum drm_ref_type ref_action)
-+{
-+ int ret = 0;
-+ struct drm_ref_object *item;
-+ struct drm_open_hash *ht = &priv->refd_object_hash[ref_action];
-+
-+ DRM_ASSERT_LOCKED(&priv->minor->dev->struct_mutex);
-+ if (!referenced_object->shareable && priv != referenced_object->owner) {
-+ DRM_ERROR("Not allowed to reference this object\n");
-+ return -EINVAL;
-+ }
-+
-+ /*
-+ * If this is not a usage reference, Check that usage has been registered
-+ * first. Otherwise strange things may happen on destruction.
-+ */
-+
-+ if ((ref_action != _DRM_REF_USE) && priv != referenced_object->owner) {
-+ item =
-+ drm_lookup_ref_object(priv, referenced_object,
-+ _DRM_REF_USE);
-+ if (!item) {
-+ DRM_ERROR
-+ ("Object not registered for usage by this client\n");
-+ return -EINVAL;
-+ }
-+ }
-+
-+ if (NULL !=
-+ (item =
-+ drm_lookup_ref_object(priv, referenced_object, ref_action))) {
-+ atomic_inc(&item->refcount);
-+ return drm_object_ref_action(priv, referenced_object,
-+ ref_action);
-+ }
-+
-+ item = drm_calloc(1, sizeof(*item), DRM_MEM_OBJECTS);
-+ if (item == NULL) {
-+ DRM_ERROR("Could not allocate reference object\n");
-+ return -ENOMEM;
-+ }
-+
-+ atomic_set(&item->refcount, 1);
-+ item->hash.key = (unsigned long)referenced_object;
-+ ret = drm_ht_insert_item(ht, &item->hash);
-+ item->unref_action = ref_action;
-+
-+ if (ret)
-+ goto out;
-+
-+ list_add(&item->list, &priv->refd_objects);
-+ ret = drm_object_ref_action(priv, referenced_object, ref_action);
-+out:
-+ return ret;
-+}
-+
-+struct drm_ref_object *drm_lookup_ref_object(struct drm_file *priv,
-+ struct drm_user_object *referenced_object,
-+ enum drm_ref_type ref_action)
-+{
-+ struct drm_hash_item *hash;
-+ int ret;
-+
-+ DRM_ASSERT_LOCKED(&priv->minor->dev->struct_mutex);
-+ ret = drm_ht_find_item(&priv->refd_object_hash[ref_action],
-+ (unsigned long)referenced_object, &hash);
-+ if (ret)
-+ return NULL;
-+
-+ return drm_hash_entry(hash, struct drm_ref_object, hash);
-+}
-+EXPORT_SYMBOL(drm_lookup_ref_object);
-+
-+static void drm_remove_other_references(struct drm_file *priv,
-+ struct drm_user_object *ro)
-+{
-+ int i;
-+ struct drm_open_hash *ht;
-+ struct drm_hash_item *hash;
-+ struct drm_ref_object *item;
-+
-+ for (i = _DRM_REF_USE + 1; i < _DRM_NO_REF_TYPES; ++i) {
-+ ht = &priv->refd_object_hash[i];
-+ while (!drm_ht_find_item(ht, (unsigned long)ro, &hash)) {
-+ item = drm_hash_entry(hash, struct drm_ref_object, hash);
-+ drm_remove_ref_object(priv, item);
-+ }
-+ }
-+}
-+
-+void drm_remove_ref_object(struct drm_file *priv, struct drm_ref_object *item)
-+{
-+ int ret;
-+ struct drm_user_object *user_object = (struct drm_user_object *) item->hash.key;
-+ struct drm_open_hash *ht = &priv->refd_object_hash[item->unref_action];
-+ enum drm_ref_type unref_action;
-+
-+ DRM_ASSERT_LOCKED(&priv->minor->dev->struct_mutex);
-+ unref_action = item->unref_action;
-+ if (atomic_dec_and_test(&item->refcount)) {
-+ ret = drm_ht_remove_item(ht, &item->hash);
-+ BUG_ON(ret);
-+ list_del_init(&item->list);
-+ if (unref_action == _DRM_REF_USE)
-+ drm_remove_other_references(priv, user_object);
-+ drm_free(item, sizeof(*item), DRM_MEM_OBJECTS);
-+ }
-+
-+ switch (unref_action) {
-+ case _DRM_REF_USE:
-+ drm_deref_user_object(priv, user_object);
-+ break;
-+ default:
-+ BUG_ON(!user_object->unref);
-+ user_object->unref(priv, user_object, unref_action);
-+ break;
-+ }
-+
-+}
-+EXPORT_SYMBOL(drm_remove_ref_object);
-+
-+int drm_user_object_ref(struct drm_file *priv, uint32_t user_token,
-+ enum drm_object_type type, struct drm_user_object **object)
-+{
-+ struct drm_device *dev = priv->minor->dev;
-+ struct drm_user_object *uo;
-+ struct drm_hash_item *hash;
-+ int ret;
-+
-+ mutex_lock(&dev->struct_mutex);
-+ ret = drm_ht_find_item(&dev->object_hash, user_token, &hash);
-+ if (ret) {
-+ DRM_ERROR("Could not find user object to reference.\n");
-+ goto out_err;
-+ }
-+ uo = drm_hash_entry(hash, struct drm_user_object, hash);
-+ if (uo->type != type) {
-+ ret = -EINVAL;
-+ goto out_err;
-+ }
-+ ret = drm_add_ref_object(priv, uo, _DRM_REF_USE);
-+ if (ret)
-+ goto out_err;
-+ mutex_unlock(&dev->struct_mutex);
-+ *object = uo;
-+ return 0;
-+out_err:
-+ mutex_unlock(&dev->struct_mutex);
-+ return ret;
-+}
-+
-+int drm_user_object_unref(struct drm_file *priv, uint32_t user_token,
-+ enum drm_object_type type)
-+{
-+ struct drm_device *dev = priv->minor->dev;
-+ struct drm_user_object *uo;
-+ struct drm_ref_object *ro;
-+ int ret;
-+
-+ mutex_lock(&dev->struct_mutex);
-+ uo = drm_lookup_user_object(priv, user_token);
-+ if (!uo || (uo->type != type)) {
-+ ret = -EINVAL;
-+ goto out_err;
-+ }
-+ ro = drm_lookup_ref_object(priv, uo, _DRM_REF_USE);
-+ if (!ro) {
-+ ret = -EINVAL;
-+ goto out_err;
-+ }
-+ drm_remove_ref_object(priv, ro);
-+ mutex_unlock(&dev->struct_mutex);
-+ return 0;
-+out_err:
-+ mutex_unlock(&dev->struct_mutex);
-+ return ret;
-+}
-Index: linux-2.6.28/drivers/gpu/drm/drm_regman.c
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.28/drivers/gpu/drm/drm_regman.c 2009-02-20 12:23:06.000000000 +0000
-@@ -0,0 +1,200 @@
-+/**************************************************************************
-+ * Copyright (c) 2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
-+ * All Rights Reserved.
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a
-+ * copy of this software and associated documentation files (the
-+ * "Software"), to deal in the Software without restriction, including
-+ * without limitation the rights to use, copy, modify, merge, publish,
-+ * distribute, sub license, and/or sell copies of the Software, and to
-+ * permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice (including the
-+ * next paragraph) shall be included in all copies or substantial portions
-+ * of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
-+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
-+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
-+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
-+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
-+ *
-+ **************************************************************************/
-+/*
-+ * An allocate-fence manager implementation intended for sets of base-registers
-+ * or tiling-registers.
-+ */
-+
-+#include "drmP.h"
-+
-+/*
-+ * Allocate a compatible register and put it on the unfenced list.
-+ */
-+
-+int drm_regs_alloc(struct drm_reg_manager *manager,
-+ const void *data,
-+ uint32_t fence_class,
-+ uint32_t fence_type,
-+ int interruptible, int no_wait, struct drm_reg **reg)
-+{
-+ struct drm_reg *entry, *next_entry;
-+ int ret;
-+
-+ *reg = NULL;
-+
-+ /*
-+ * Search the unfenced list.
-+ */
-+
-+ list_for_each_entry(entry, &manager->unfenced, head) {
-+ if (manager->reg_reusable(entry, data)) {
-+ entry->new_fence_type |= fence_type;
-+ goto out;
-+ }
-+ }
-+
-+ /*
-+ * Search the lru list.
-+ */
-+
-+ list_for_each_entry_safe(entry, next_entry, &manager->lru, head) {
-+ struct drm_fence_object *fence = entry->fence;
-+ if (fence->fence_class == fence_class &&
-+ (entry->fence_type & fence_type) == entry->fence_type &&
-+ manager->reg_reusable(entry, data)) {
-+ list_del(&entry->head);
-+ entry->new_fence_type = fence_type;
-+ list_add_tail(&entry->head, &manager->unfenced);
-+ goto out;
-+ }
-+ }
-+
-+ /*
-+ * Search the free list.
-+ */
-+
-+ list_for_each_entry(entry, &manager->free, head) {
-+ list_del(&entry->head);
-+ entry->new_fence_type = fence_type;
-+ list_add_tail(&entry->head, &manager->unfenced);
-+ goto out;
-+ }
-+
-+ if (no_wait)
-+ return -EBUSY;
-+
-+ /*
-+ * Go back to the lru list and try to expire fences.
-+ */
-+
-+ list_for_each_entry_safe(entry, next_entry, &manager->lru, head) {
-+ BUG_ON(!entry->fence);
-+ ret = drm_fence_object_wait(entry->fence, 0, !interruptible,
-+ entry->fence_type);
-+ if (ret)
-+ return ret;
-+
-+ drm_fence_usage_deref_unlocked(&entry->fence);
-+ list_del(&entry->head);
-+ entry->new_fence_type = fence_type;
-+ list_add_tail(&entry->head, &manager->unfenced);
-+ goto out;
-+ }
-+
-+ /*
-+ * Oops. All registers are used up :(.
-+ */
-+
-+ return -EBUSY;
-+out:
-+ *reg = entry;
-+ return 0;
-+}
-+EXPORT_SYMBOL(drm_regs_alloc);
-+
-+void drm_regs_fence(struct drm_reg_manager *manager,
-+ struct drm_fence_object *fence)
-+{
-+ struct drm_reg *entry;
-+ struct drm_reg *next_entry;
-+
-+ if (!fence) {
-+
-+ /*
-+ * Old fence (if any) is still valid.
-+ * Put back on free and lru lists.
-+ */
-+
-+ list_for_each_entry_safe_reverse(entry, next_entry,
-+ &manager->unfenced, head) {
-+ list_del(&entry->head);
-+ list_add(&entry->head, (entry->fence) ?
-+ &manager->lru : &manager->free);
-+ }
-+ } else {
-+
-+ /*
-+ * Fence with a new fence and put on lru list.
-+ */
-+
-+ list_for_each_entry_safe(entry, next_entry, &manager->unfenced,
-+ head) {
-+ list_del(&entry->head);
-+ if (entry->fence)
-+ drm_fence_usage_deref_unlocked(&entry->fence);
-+ drm_fence_reference_unlocked(&entry->fence, fence);
-+
-+ entry->fence_type = entry->new_fence_type;
-+ BUG_ON((entry->fence_type & fence->type) !=
-+ entry->fence_type);
-+
-+ list_add_tail(&entry->head, &manager->lru);
-+ }
-+ }
-+}
-+EXPORT_SYMBOL(drm_regs_fence);
-+
-+void drm_regs_free(struct drm_reg_manager *manager)
-+{
-+ struct drm_reg *entry;
-+ struct drm_reg *next_entry;
-+
-+ drm_regs_fence(manager, NULL);
-+
-+ list_for_each_entry_safe(entry, next_entry, &manager->free, head) {
-+ list_del(&entry->head);
-+ manager->reg_destroy(entry);
-+ }
-+
-+ list_for_each_entry_safe(entry, next_entry, &manager->lru, head) {
-+
-+ (void)drm_fence_object_wait(entry->fence, 1, 1,
-+ entry->fence_type);
-+ list_del(&entry->head);
-+ drm_fence_usage_deref_unlocked(&entry->fence);
-+ manager->reg_destroy(entry);
-+ }
-+}
-+EXPORT_SYMBOL(drm_regs_free);
-+
-+void drm_regs_add(struct drm_reg_manager *manager, struct drm_reg *reg)
-+{
-+ reg->fence = NULL;
-+ list_add_tail(&reg->head, &manager->free);
-+}
-+EXPORT_SYMBOL(drm_regs_add);
-+
-+void drm_regs_init(struct drm_reg_manager *manager,
-+ int (*reg_reusable) (const struct drm_reg *, const void *),
-+ void (*reg_destroy) (struct drm_reg *))
-+{
-+ INIT_LIST_HEAD(&manager->free);
-+ INIT_LIST_HEAD(&manager->lru);
-+ INIT_LIST_HEAD(&manager->unfenced);
-+ manager->reg_reusable = reg_reusable;
-+ manager->reg_destroy = reg_destroy;
-+}
-+EXPORT_SYMBOL(drm_regs_init);
-Index: linux-2.6.28/drivers/gpu/drm/drm_stub.c
-===================================================================
---- linux-2.6.28.orig/drivers/gpu/drm/drm_stub.c 2009-02-20 12:22:53.000000000 +0000
-+++ linux-2.6.28/drivers/gpu/drm/drm_stub.c 2009-02-20 12:33:16.000000000 +0000
-@@ -201,6 +201,7 @@
- init_timer(&dev->timer);
- mutex_init(&dev->struct_mutex);
- mutex_init(&dev->ctxlist_mutex);
-+ mutex_init(&dev->bm.evict_mutex);
-
- idr_init(&dev->drw_idr);
-
-@@ -216,6 +217,18 @@
- return -ENOMEM;
- }
-
-+ if (drm_mm_init(&dev->offset_manager, DRM_FILE_PAGE_OFFSET_START,
-+ DRM_FILE_PAGE_OFFSET_SIZE)) {
-+ drm_ht_remove(&dev->map_hash);
-+ return -ENOMEM;
-+ }
-+
-+ if (drm_ht_create(&dev->object_hash, DRM_OBJECT_HASH_ORDER)) {
-+ drm_ht_remove(&dev->map_hash);
-+ drm_mm_takedown(&dev->offset_manager);
-+ return -ENOMEM;
-+ }
-+
- /* the DRM has 6 basic counters */
- dev->counters = 6;
- dev->types[0] = _DRM_STAT_LOCK;
-@@ -261,6 +274,7 @@
- }
- }
-
-+ drm_fence_manager_init(dev);
- return 0;
-
- error_out_unreg:
-@@ -409,6 +423,8 @@
- drm_free(dev, sizeof(*dev), DRM_MEM_STUB);
- return ret;
- }
-+EXPORT_SYMBOL(drm_get_dev);
-+
-
- /**
- * Put a device minor number.
-Index: linux-2.6.28/drivers/gpu/drm/drm_ttm.c
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.28/drivers/gpu/drm/drm_ttm.c 2009-02-20 12:23:06.000000000 +0000
-@@ -0,0 +1,430 @@
-+/**************************************************************************
-+ *
-+ * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
-+ * All Rights Reserved.
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a
-+ * copy of this software and associated documentation files (the
-+ * "Software"), to deal in the Software without restriction, including
-+ * without limitation the rights to use, copy, modify, merge, publish,
-+ * distribute, sub license, and/or sell copies of the Software, and to
-+ * permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice (including the
-+ * next paragraph) shall be included in all copies or substantial portions
-+ * of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
-+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
-+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
-+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
-+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
-+ *
-+ **************************************************************************/
-+/*
-+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
-+ */
-+
-+#include "drmP.h"
-+#include <asm/agp.h>
-+
-+static void drm_ttm_ipi_handler(void *null)
-+{
-+ flush_agp_cache();
-+}
-+
-+void drm_ttm_cache_flush(void)
-+{
-+ if (on_each_cpu(drm_ttm_ipi_handler, NULL, 1) != 0)
-+ DRM_ERROR("Timed out waiting for drm cache flush.\n");
-+}
-+EXPORT_SYMBOL(drm_ttm_cache_flush);
-+
-+/*
-+ * Use kmalloc if possible. Otherwise fall back to vmalloc.
-+ */
-+
-+static void ttm_alloc_pages(struct drm_ttm *ttm)
-+{
-+ unsigned long size = ttm->num_pages * sizeof(*ttm->pages);
-+ ttm->pages = NULL;
-+
-+ if (size <= PAGE_SIZE)
-+ ttm->pages = drm_calloc(1, size, DRM_MEM_TTM);
-+
-+ if (!ttm->pages) {
-+ ttm->pages = vmalloc_user(size);
-+ if (ttm->pages)
-+ ttm->page_flags |= DRM_TTM_PAGE_VMALLOC;
-+ }
-+}
-+
-+static void ttm_free_pages(struct drm_ttm *ttm)
-+{
-+ unsigned long size = ttm->num_pages * sizeof(*ttm->pages);
-+
-+ if (ttm->page_flags & DRM_TTM_PAGE_VMALLOC) {
-+ vfree(ttm->pages);
-+ ttm->page_flags &= ~DRM_TTM_PAGE_VMALLOC;
-+ } else {
-+ drm_free(ttm->pages, size, DRM_MEM_TTM);
-+ }
-+ ttm->pages = NULL;
-+}
-+
-+static struct page *drm_ttm_alloc_page(void)
-+{
-+ struct page *page;
-+
-+ page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
-+ if (!page)
-+ return NULL;
-+ return page;
-+}
-+
-+/*
-+ * Change caching policy for the linear kernel map
-+ * for range of pages in a ttm.
-+ */
-+
-+static int drm_set_caching(struct drm_ttm *ttm, int noncached)
-+{
-+ int i;
-+ struct page **cur_page;
-+ int do_tlbflush = 0;
-+
-+ if ((ttm->page_flags & DRM_TTM_PAGE_UNCACHED) == noncached)
-+ return 0;
-+
-+ if (noncached)
-+ drm_ttm_cache_flush();
-+
-+ for (i = 0; i < ttm->num_pages; ++i) {
-+ cur_page = ttm->pages + i;
-+ if (*cur_page) {
-+ if (!PageHighMem(*cur_page)) {
-+ if (noncached) {
-+ map_page_into_agp(*cur_page);
-+ } else {
-+ unmap_page_from_agp(*cur_page);
-+ }
-+ do_tlbflush = 1;
-+ }
-+ }
-+ }
-+ //if (do_tlbflush)
-+ // flush_agp_mappings();
-+
-+ DRM_FLAG_MASKED(ttm->page_flags, noncached, DRM_TTM_PAGE_UNCACHED);
-+
-+ return 0;
-+}
-+
-+
-+static void drm_ttm_free_user_pages(struct drm_ttm *ttm)
-+{
-+ int write;
-+ int dirty;
-+ struct page *page;
-+ int i;
-+
-+ BUG_ON(!(ttm->page_flags & DRM_TTM_PAGE_USER));
-+ write = ((ttm->page_flags & DRM_TTM_PAGE_USER_WRITE) != 0);
-+ dirty = ((ttm->page_flags & DRM_TTM_PAGE_USER_DIRTY) != 0);
-+
-+ for (i = 0; i < ttm->num_pages; ++i) {
-+ page = ttm->pages[i];
-+ if (page == NULL)
-+ continue;
-+
-+ if (page == ttm->dummy_read_page) {
-+ BUG_ON(write);
-+ continue;
-+ }
-+
-+ if (write && dirty && !PageReserved(page))
-+ set_page_dirty_lock(page);
-+
-+ ttm->pages[i] = NULL;
-+ put_page(page);
-+ }
-+}
-+
-+static void drm_ttm_free_alloced_pages(struct drm_ttm *ttm)
-+{
-+ int i;
-+ struct drm_buffer_manager *bm = &ttm->dev->bm;
-+ struct page **cur_page;
-+
-+ for (i = 0; i < ttm->num_pages; ++i) {
-+ cur_page = ttm->pages + i;
-+ if (*cur_page) {
-+ if (page_count(*cur_page) != 1)
-+ DRM_ERROR("Erroneous page count. Leaking pages.\n");
-+ if (page_mapped(*cur_page))
-+ DRM_ERROR("Erroneous map count. Leaking page mappings.\n");
-+ __free_page(*cur_page);
-+ --bm->cur_pages;
-+ }
-+ }
-+}
-+
-+/*
-+ * Free all resources associated with a ttm.
-+ */
-+
-+int drm_destroy_ttm(struct drm_ttm *ttm)
-+{
-+ struct drm_ttm_backend *be;
-+
-+ if (!ttm)
-+ return 0;
-+
-+ be = ttm->be;
-+ if (be) {
-+ be->func->destroy(be);
-+ ttm->be = NULL;
-+ }
-+
-+ if (ttm->pages) {
-+ if (ttm->page_flags & DRM_TTM_PAGE_UNCACHED)
-+ drm_set_caching(ttm, 0);
-+
-+ if (ttm->page_flags & DRM_TTM_PAGE_USER)
-+ drm_ttm_free_user_pages(ttm);
-+ else
-+ drm_ttm_free_alloced_pages(ttm);
-+
-+ ttm_free_pages(ttm);
-+ }
-+
-+ return 0;
-+}
-+
-+struct page *drm_ttm_get_page(struct drm_ttm *ttm, int index)
-+{
-+ struct page *p;
-+ struct drm_buffer_manager *bm = &ttm->dev->bm;
-+
-+ p = ttm->pages[index];
-+ if (!p) {
-+ p = drm_ttm_alloc_page();
-+ if (!p)
-+ return NULL;
-+ ttm->pages[index] = p;
-+ ++bm->cur_pages;
-+ }
-+ return p;
-+}
-+EXPORT_SYMBOL(drm_ttm_get_page);
-+
-+int drm_ttm_set_user(struct drm_ttm *ttm,
-+ struct task_struct *tsk,
-+ int write,
-+ unsigned long start,
-+ unsigned long num_pages,
-+ struct page *dummy_read_page)
-+{
-+ struct mm_struct *mm = tsk->mm;
-+ int ret;
-+ int i;
-+
-+ BUG_ON(num_pages != ttm->num_pages);
-+
-+ ttm->dummy_read_page = dummy_read_page;
-+ ttm->page_flags |= DRM_TTM_PAGE_USER |
-+ ((write) ? DRM_TTM_PAGE_USER_WRITE : 0);
-+
-+
-+ down_read(&mm->mmap_sem);
-+ ret = get_user_pages(tsk, mm, start, num_pages,
-+ write, 0, ttm->pages, NULL);
-+ up_read(&mm->mmap_sem);
-+
-+ if (ret != num_pages && write) {
-+ drm_ttm_free_user_pages(ttm);
-+ return -ENOMEM;
-+ }
-+
-+ for (i = 0; i < num_pages; ++i) {
-+ if (ttm->pages[i] == NULL)
-+ ttm->pages[i] = ttm->dummy_read_page;
-+ }
-+
-+ return 0;
-+}
-+
-+int drm_ttm_populate(struct drm_ttm *ttm)
-+{
-+ struct page *page;
-+ unsigned long i;
-+ struct drm_ttm_backend *be;
-+
-+ if (ttm->state != ttm_unpopulated)
-+ return 0;
-+
-+ be = ttm->be;
-+ for (i = 0; i < ttm->num_pages; ++i) {
-+ page = drm_ttm_get_page(ttm, i);
-+ if (!page)
-+ return -ENOMEM;
-+ }
-+ be->func->populate(be, ttm->num_pages, ttm->pages);
-+ ttm->state = ttm_unbound;
-+ return 0;
-+}
-+
-+static inline size_t drm_size_align(size_t size)
-+{
-+ size_t tmpSize = 4;
-+ if (size > PAGE_SIZE)
-+ return PAGE_ALIGN(size);
-+ while (tmpSize < size)
-+ tmpSize <<= 1;
-+
-+ return (size_t) tmpSize;
-+}
-+
-+/*
-+ * Calculate the estimated pinned memory usage of a ttm.
-+ */
-+
-+unsigned long drm_ttm_size(struct drm_device *dev,
-+ unsigned long num_pages,
-+ int user_bo)
-+{
-+ struct drm_bo_driver *bo_driver = dev->driver->bo_driver;
-+ unsigned long tmp;
-+
-+ tmp = drm_size_align(sizeof(struct drm_ttm)) +
-+ drm_size_align(num_pages * sizeof(struct page *)) +
-+ ((user_bo) ? 0 : drm_size_align(num_pages * PAGE_SIZE));
-+
-+ if (bo_driver->backend_size)
-+ tmp += bo_driver->backend_size(dev, num_pages);
-+ else
-+ tmp += drm_size_align(num_pages * sizeof(struct page *)) +
-+ 3*drm_size_align(sizeof(struct drm_ttm_backend));
-+ return tmp;
-+}
-+
-+
-+/*
-+ * Initialize a ttm.
-+ */
-+
-+struct drm_ttm *drm_ttm_init(struct drm_device *dev, unsigned long size)
-+{
-+ struct drm_bo_driver *bo_driver = dev->driver->bo_driver;
-+ struct drm_ttm *ttm;
-+
-+ if (!bo_driver)
-+ return NULL;
-+
-+ ttm = drm_calloc(1, sizeof(*ttm), DRM_MEM_TTM);
-+ if (!ttm)
-+ return NULL;
-+
-+ ttm->dev = dev;
-+ atomic_set(&ttm->vma_count, 0);
-+
-+ ttm->destroy = 0;
-+ ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
-+
-+ ttm->page_flags = 0;
-+
-+ /*
-+ * Account also for AGP module memory usage.
-+ */
-+
-+ ttm_alloc_pages(ttm);
-+ if (!ttm->pages) {
-+ drm_destroy_ttm(ttm);
-+ DRM_ERROR("Failed allocating page table\n");
-+ return NULL;
-+ }
-+ ttm->be = bo_driver->create_ttm_backend_entry(dev);
-+ if (!ttm->be) {
-+ drm_destroy_ttm(ttm);
-+ DRM_ERROR("Failed creating ttm backend entry\n");
-+ return NULL;
-+ }
-+ ttm->state = ttm_unpopulated;
-+ return ttm;
-+}
-+
-+/*
-+ * Unbind a ttm region from the aperture.
-+ */
-+
-+void drm_ttm_evict(struct drm_ttm *ttm)
-+{
-+ struct drm_ttm_backend *be = ttm->be;
-+ int ret;
-+
-+ if (ttm->state == ttm_bound) {
-+ ret = be->func->unbind(be);
-+ BUG_ON(ret);
-+ }
-+
-+ ttm->state = ttm_evicted;
-+}
-+
-+void drm_ttm_fixup_caching(struct drm_ttm *ttm)
-+{
-+
-+ if (ttm->state == ttm_evicted) {
-+ struct drm_ttm_backend *be = ttm->be;
-+ if (be->func->needs_ub_cache_adjust(be))
-+ drm_set_caching(ttm, 0);
-+ ttm->state = ttm_unbound;
-+ }
-+}
-+
-+void drm_ttm_unbind(struct drm_ttm *ttm)
-+{
-+ if (ttm->state == ttm_bound)
-+ drm_ttm_evict(ttm);
-+
-+ drm_ttm_fixup_caching(ttm);
-+}
-+
-+int drm_bind_ttm(struct drm_ttm *ttm, struct drm_bo_mem_reg *bo_mem)
-+{
-+ struct drm_bo_driver *bo_driver = ttm->dev->driver->bo_driver;
-+ int ret = 0;
-+ struct drm_ttm_backend *be;
-+
-+ if (!ttm)
-+ return -EINVAL;
-+ if (ttm->state == ttm_bound)
-+ return 0;
-+
-+ be = ttm->be;
-+
-+ ret = drm_ttm_populate(ttm);
-+ if (ret)
-+ return ret;
-+
-+ if (ttm->state == ttm_unbound && !(bo_mem->flags & DRM_BO_FLAG_CACHED))
-+ drm_set_caching(ttm, DRM_TTM_PAGE_UNCACHED);
-+ else if ((bo_mem->flags & DRM_BO_FLAG_CACHED_MAPPED) &&
-+ bo_driver->ttm_cache_flush)
-+ bo_driver->ttm_cache_flush(ttm);
-+
-+ ret = be->func->bind(be, bo_mem);
-+ if (ret) {
-+ ttm->state = ttm_evicted;
-+ DRM_ERROR("Couldn't bind backend.\n");
-+ return ret;
-+ }
-+
-+ ttm->state = ttm_bound;
-+ if (ttm->page_flags & DRM_TTM_PAGE_USER)
-+ ttm->page_flags |= DRM_TTM_PAGE_USER_DIRTY;
-+ return 0;
-+}
-+EXPORT_SYMBOL(drm_bind_ttm);
-Index: linux-2.6.28/drivers/gpu/drm/drm_vm.c
-===================================================================
---- linux-2.6.28.orig/drivers/gpu/drm/drm_vm.c 2009-02-20 12:22:53.000000000 +0000
-+++ linux-2.6.28/drivers/gpu/drm/drm_vm.c 2009-02-20 12:23:06.000000000 +0000
-@@ -40,6 +40,10 @@
-
- static void drm_vm_open(struct vm_area_struct *vma);
- static void drm_vm_close(struct vm_area_struct *vma);
-+static int drm_bo_mmap_locked(struct vm_area_struct *vma,
-+ struct file *filp,
-+ drm_local_map_t *map);
-+
-
- static pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma)
- {
-@@ -270,6 +274,9 @@
- case _DRM_GEM:
- DRM_ERROR("tried to rmmap GEM object\n");
- break;
-+ case _DRM_TTM:
-+ BUG_ON(1);
-+ break;
- }
- drm_free(map, sizeof(*map), DRM_MEM_MAPS);
- }
-@@ -650,6 +657,8 @@
- vma->vm_flags |= VM_RESERVED;
- vma->vm_page_prot = drm_dma_prot(map->type, vma);
- break;
-+ case _DRM_TTM:
-+ return drm_bo_mmap_locked(vma, filp, map);
- default:
- return -EINVAL; /* This should never happen. */
- }
-@@ -674,3 +683,213 @@
- return ret;
- }
- EXPORT_SYMBOL(drm_mmap);
-+
-+/**
-+ * buffer object vm functions.
-+ */
-+
-+/**
-+ * \c Pagefault method for buffer objects.
-+ *
-+ * \param vma Virtual memory area.
-+ * \param address File offset.
-+ * \return Error or refault. The pfn is manually inserted.
-+ *
-+ * It's important that pfns are inserted while holding the bo->mutex lock.
-+ * otherwise we might race with unmap_mapping_range() which is always
-+ * called with the bo->mutex lock held.
-+ *
-+ * We're modifying the page attribute bits of the vma->vm_page_prot field,
-+ * without holding the mmap_sem in write mode. Only in read mode.
-+ * These bits are not used by the mm subsystem code, and we consider them
-+ * protected by the bo->mutex lock.
-+ */
-+
-+#define DRM_NOPFN_EXTRA 15 /* Fault 16 pages at a time in */
-+
-+int drm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
-+{
-+ struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
-+ unsigned long page_offset;
-+ struct page *page = NULL;
-+ struct drm_ttm *ttm = NULL;
-+ struct drm_device *dev;
-+ unsigned long pfn;
-+ int err;
-+ unsigned long bus_base;
-+ unsigned long bus_offset;
-+ unsigned long bus_size;
-+ int i;
-+ unsigned long ret = VM_FAULT_NOPAGE;
-+ unsigned long address = (unsigned long)vmf->virtual_address;
-+
-+ if (address > vma->vm_end)
-+ return VM_FAULT_SIGBUS;
-+
-+ dev = bo->dev;
-+ err = drm_bo_read_lock(&dev->bm.bm_lock);
-+ if (err)
-+ return VM_FAULT_NOPAGE;
-+
-+ err = mutex_lock_interruptible(&bo->mutex);
-+ if (err) {
-+ drm_bo_read_unlock(&dev->bm.bm_lock);
-+ return VM_FAULT_NOPAGE;
-+ }
-+
-+ err = drm_bo_wait(bo, 0, 0, 0);
-+ if (err) {
-+ ret = (err != -EAGAIN) ? VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
-+ goto out_unlock;
-+ }
-+
-+ /*
-+ * If buffer happens to be in a non-mappable location,
-+ * move it to a mappable.
-+ */
-+
-+ if (!(bo->mem.flags & DRM_BO_FLAG_MAPPABLE)) {
-+ uint32_t new_mask = bo->mem.mask |
-+ DRM_BO_FLAG_MAPPABLE |
-+ DRM_BO_FLAG_FORCE_MAPPABLE;
-+ err = drm_bo_move_buffer(bo, new_mask, 0, 0);
-+ if (err) {
-+ ret = (err != -EAGAIN) ? VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
-+ goto out_unlock;
-+ }
-+ }
-+
-+ err = drm_bo_pci_offset(dev, &bo->mem, &bus_base, &bus_offset,
-+ &bus_size);
-+
-+ if (err) {
-+ ret = VM_FAULT_SIGBUS;
-+ goto out_unlock;
-+ }
-+
-+ page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
-+
-+ if (bus_size) {
-+ struct drm_mem_type_manager *man = &dev->bm.man[bo->mem.mem_type];
-+
-+ pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) + page_offset;
-+ vma->vm_page_prot = drm_io_prot(man->drm_bus_maptype, vma);
-+ } else {
-+ ttm = bo->ttm;
-+
-+ drm_ttm_fixup_caching(ttm);
-+ page = drm_ttm_get_page(ttm, page_offset);
-+ if (!page) {
-+ ret = VM_FAULT_OOM;
-+ goto out_unlock;
-+ }
-+ pfn = page_to_pfn(page);
-+ vma->vm_page_prot = (bo->mem.flags & DRM_BO_FLAG_CACHED) ?
-+ vm_get_page_prot(vma->vm_flags) :
-+ drm_io_prot(_DRM_TTM, vma);
-+ }
-+
-+ err = vm_insert_pfn(vma, address, pfn);
-+ if (err) {
-+ ret = (err != -EAGAIN) ? VM_FAULT_OOM : VM_FAULT_NOPAGE;
-+ goto out_unlock;
-+ }
-+
-+ for (i=0; i<DRM_NOPFN_EXTRA; ++i) {
-+
-+ if (++page_offset == bo->mem.num_pages)
-+ break;
-+ address = vma->vm_start + (page_offset << PAGE_SHIFT);
-+ if (address >= vma->vm_end)
-+ break;
-+ if (bus_size) {
-+ pfn = ((bus_base + bus_offset) >> PAGE_SHIFT)
-+ + page_offset;
-+ } else {
-+ page = drm_ttm_get_page(ttm, page_offset);
-+ if (!page)
-+ break;
-+ pfn = page_to_pfn(page);
-+ }
-+ if (vm_insert_pfn(vma, address, pfn))
-+ break;
-+ }
-+out_unlock:
-+ mutex_unlock(&bo->mutex);
-+ drm_bo_read_unlock(&dev->bm.bm_lock);
-+ return ret;
-+}
-+EXPORT_SYMBOL(drm_bo_vm_fault);
-+
-+static void drm_bo_vm_open_locked(struct vm_area_struct *vma)
-+{
-+ struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
-+
-+ drm_vm_open_locked(vma);
-+ atomic_inc(&bo->usage);
-+}
-+
-+/**
-+ * \c vma open method for buffer objects.
-+ *
-+ * \param vma virtual memory area.
-+ */
-+
-+static void drm_bo_vm_open(struct vm_area_struct *vma)
-+{
-+ struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
-+ struct drm_device *dev = bo->dev;
-+
-+ mutex_lock(&dev->struct_mutex);
-+ drm_bo_vm_open_locked(vma);
-+ mutex_unlock(&dev->struct_mutex);
-+}
-+
-+/**
-+ * \c vma close method for buffer objects.
-+ *
-+ * \param vma virtual memory area.
-+ */
-+
-+static void drm_bo_vm_close(struct vm_area_struct *vma)
-+{
-+ struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
-+ struct drm_device *dev = bo->dev;
-+
-+ drm_vm_close(vma);
-+ if (bo) {
-+ mutex_lock(&dev->struct_mutex);
-+ drm_bo_usage_deref_locked((struct drm_buffer_object **)
-+ &vma->vm_private_data);
-+ mutex_unlock(&dev->struct_mutex);
-+ }
-+ return;
-+}
-+
-+static struct vm_operations_struct drm_bo_vm_ops = {
-+ .fault = drm_bo_vm_fault,
-+ .open = drm_bo_vm_open,
-+ .close = drm_bo_vm_close,
-+};
-+
-+/**
-+ * mmap buffer object memory.
-+ *
-+ * \param vma virtual memory area.
-+ * \param file_priv DRM file private.
-+ * \param map The buffer object drm map.
-+ * \return zero on success or a negative number on failure.
-+ */
-+
-+int drm_bo_mmap_locked(struct vm_area_struct *vma,
-+ struct file *filp,
-+ drm_local_map_t *map)
-+{
-+ vma->vm_ops = &drm_bo_vm_ops;
-+ vma->vm_private_data = map->handle;
-+ vma->vm_file = filp;
-+ vma->vm_flags |= VM_RESERVED | VM_IO;
-+ vma->vm_flags |= VM_PFNMAP;
-+ drm_bo_vm_open_locked(vma);
-+ return 0;
-+}
-Index: linux-2.6.28/drivers/gpu/drm/psb/Makefile
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.28/drivers/gpu/drm/psb/Makefile 2009-02-20 14:48:03.000000000 +0000
-@@ -0,0 +1,12 @@
-+#
-+# Makefile for the drm device driver. This driver provides support for the
-+# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
-+
-+ccflags-y := -Iinclude/drm
-+
-+psb-y := psb_drv.o psb_mmu.o psb_sgx.o psb_irq.o psb_fence.o psb_buffer.o \
-+ psb_gtt.o psb_fb.o psb_msvdx.o \
-+ psb_msvdxinit.o psb_regman.o psb_reset.o psb_scene.o \
-+ psb_schedule.o psb_xhw.o
-+
-+obj-$(CONFIG_DRM_PSB) += psb.o
-Index: linux-2.6.28/drivers/gpu/drm/psb/psb_buffer.c
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.28/drivers/gpu/drm/psb/psb_buffer.c 2009-02-20 12:23:06.000000000 +0000
-@@ -0,0 +1,437 @@
-+/**************************************************************************
-+ * Copyright (c) 2007, Intel Corporation.
-+ * All Rights Reserved.
-+ *
-+ * This program is free software; you can redistribute it and/or modify it
-+ * under the terms and conditions of the GNU General Public License,
-+ * version 2, as published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope it will be useful, but WITHOUT
-+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-+ * more details.
-+ *
-+ * You should have received a copy of the GNU General Public License along with
-+ * this program; if not, write to the Free Software Foundation, Inc.,
-+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-+ *
-+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
-+ * develop this driver.
-+ *
-+ **************************************************************************/
-+/*
-+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
-+ */
-+#include "drmP.h"
-+#include "psb_drv.h"
-+#include "psb_schedule.h"
-+
-+struct drm_psb_ttm_backend {
-+ struct drm_ttm_backend base;
-+ struct page **pages;
-+ unsigned int desired_tile_stride;
-+ unsigned int hw_tile_stride;
-+ int mem_type;
-+ unsigned long offset;
-+ unsigned long num_pages;
-+};
-+
-+int psb_fence_types(struct drm_buffer_object *bo, uint32_t * class,
-+ uint32_t * type)
-+{
-+ switch (*class) {
-+ case PSB_ENGINE_TA:
-+ *type = DRM_FENCE_TYPE_EXE |
-+ _PSB_FENCE_TYPE_TA_DONE | _PSB_FENCE_TYPE_RASTER_DONE;
-+ if (bo->mem.mask & PSB_BO_FLAG_TA)
-+ *type &= ~_PSB_FENCE_TYPE_RASTER_DONE;
-+ if (bo->mem.mask & PSB_BO_FLAG_SCENE)
-+ *type |= _PSB_FENCE_TYPE_SCENE_DONE;
-+ if (bo->mem.mask & PSB_BO_FLAG_FEEDBACK)
-+ *type |= _PSB_FENCE_TYPE_FEEDBACK;
-+ break;
-+ default:
-+ *type = DRM_FENCE_TYPE_EXE;
-+ }
-+ return 0;
-+}
-+
-+static inline size_t drm_size_align(size_t size)
-+{
-+ size_t tmpSize = 4;
-+ if (size > PAGE_SIZE)
-+ return PAGE_ALIGN(size);
-+ while (tmpSize < size)
-+ tmpSize <<= 1;
-+
-+ return (size_t) tmpSize;
-+}
-+
-+/*
-+ * Poulsbo GPU virtual space looks like this
-+ * (We currently use only one MMU context).
-+ *
-+ * gatt_start = Start of GATT aperture in bus space.
-+ * stolen_end = End of GATT populated by stolen memory in bus space.
-+ * gatt_end = End of GATT
-+ * twod_end = MIN(gatt_start + 256_MEM, gatt_end)
-+ *
-+ * 0x00000000 -> 0x10000000 Temporary mapping space for tiling- and copy operations.
-+ * This space is not managed and is protected by the
-+ * temp_mem mutex.
-+ *
-+ * 0x10000000 -> 0x20000000 DRM_PSB_MEM_KERNEL For kernel buffers.
-+ *
-+ * 0x20000000 -> gatt_start DRM_PSB_MEM_MMU For generic MMU-only use.
-+ *
-+ * gatt_start -> stolen_end DRM_BO_MEM_VRAM Pre-populated GATT pages.
-+ *
-+ * stolen_end -> twod_end DRM_BO_MEM_TT GATT memory usable by 2D engine.
-+ *
-+ * twod_end -> gatt_end DRM_BO_MEM_APER GATT memory not usable by 2D engine.
-+ *
-+ * gatt_end -> 0xffffffff Currently unused.
-+ */
-+
-+int psb_init_mem_type(struct drm_device *dev, uint32_t type,
-+ struct drm_mem_type_manager *man)
-+{
-+ struct drm_psb_private *dev_priv =
-+ (struct drm_psb_private *)dev->dev_private;
-+ struct psb_gtt *pg = dev_priv->pg;
-+
-+ switch (type) {
-+ case DRM_BO_MEM_LOCAL:
-+ man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
-+ _DRM_FLAG_MEMTYPE_CACHED;
-+ man->drm_bus_maptype = 0;
-+ break;
-+ case DRM_PSB_MEM_KERNEL:
-+ man->io_offset = 0x00000000;
-+ man->io_size = 0x00000000;
-+ man->io_addr = NULL;
-+ man->drm_bus_maptype = _DRM_TTM;
-+ man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
-+ _DRM_FLAG_MEMTYPE_CSELECT | _DRM_FLAG_MEMTYPE_CMA;
-+ man->gpu_offset = PSB_MEM_KERNEL_START;
-+ break;
-+ case DRM_PSB_MEM_MMU:
-+ man->io_offset = 0x00000000;
-+ man->io_size = 0x00000000;
-+ man->io_addr = NULL;
-+ man->drm_bus_maptype = _DRM_TTM;
-+ man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
-+ _DRM_FLAG_MEMTYPE_CSELECT | _DRM_FLAG_MEMTYPE_CMA;
-+ man->gpu_offset = PSB_MEM_MMU_START;
-+ break;
-+ case DRM_PSB_MEM_PDS:
-+ man->io_offset = 0x00000000;
-+ man->io_size = 0x00000000;
-+ man->io_addr = NULL;
-+ man->drm_bus_maptype = _DRM_TTM;
-+ man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
-+ _DRM_FLAG_MEMTYPE_CSELECT | _DRM_FLAG_MEMTYPE_CMA;
-+ man->gpu_offset = PSB_MEM_PDS_START;
-+ break;
-+ case DRM_PSB_MEM_RASTGEOM:
-+ man->io_offset = 0x00000000;
-+ man->io_size = 0x00000000;
-+ man->io_addr = NULL;
-+ man->drm_bus_maptype = _DRM_TTM;
-+ man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
-+ _DRM_FLAG_MEMTYPE_CSELECT | _DRM_FLAG_MEMTYPE_CMA;
-+ man->gpu_offset = PSB_MEM_RASTGEOM_START;
-+ break;
-+ case DRM_BO_MEM_VRAM:
-+ man->io_addr = NULL;
-+ man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
-+ _DRM_FLAG_MEMTYPE_FIXED | _DRM_FLAG_NEEDS_IOREMAP;
-+#ifdef PSB_WORKING_HOST_MMU_ACCESS
-+ man->drm_bus_maptype = _DRM_AGP;
-+ man->io_offset = pg->gatt_start;
-+ man->io_size = pg->gatt_pages << PAGE_SHIFT;
-+#else
-+ man->drm_bus_maptype = _DRM_TTM; /* Forces uncached */
-+ man->io_offset = pg->stolen_base;
-+ man->io_size = pg->stolen_size;
-+#endif
-+ man->gpu_offset = pg->gatt_start;
-+ break;
-+ case DRM_BO_MEM_TT: /* Mappable GATT memory */
-+ man->io_offset = pg->gatt_start;
-+ man->io_size = pg->gatt_pages << PAGE_SHIFT;
-+ man->io_addr = NULL;
-+#ifdef PSB_WORKING_HOST_MMU_ACCESS
-+ man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
-+ _DRM_FLAG_MEMTYPE_CSELECT | _DRM_FLAG_NEEDS_IOREMAP;
-+ man->drm_bus_maptype = _DRM_AGP;
-+#else
-+ man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
-+ _DRM_FLAG_MEMTYPE_CSELECT | _DRM_FLAG_MEMTYPE_CMA;
-+ man->drm_bus_maptype = _DRM_TTM;
-+#endif
-+ man->gpu_offset = pg->gatt_start;
-+ break;
-+ case DRM_PSB_MEM_APER: /*MMU memory. Mappable. Not usable for 2D. */
-+ man->io_offset = pg->gatt_start;
-+ man->io_size = pg->gatt_pages << PAGE_SHIFT;
-+ man->io_addr = NULL;
-+#ifdef PSB_WORKING_HOST_MMU_ACCESS
-+ man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
-+ _DRM_FLAG_MEMTYPE_CSELECT | _DRM_FLAG_NEEDS_IOREMAP;
-+ man->drm_bus_maptype = _DRM_AGP;
-+#else
-+ man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
-+ _DRM_FLAG_MEMTYPE_CSELECT | _DRM_FLAG_MEMTYPE_CMA;
-+ man->drm_bus_maptype = _DRM_TTM;
-+#endif
-+ man->gpu_offset = pg->gatt_start;
-+ break;
-+ default:
-+ DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
-+ return -EINVAL;
-+ }
-+ return 0;
-+}
-+
-+uint32_t psb_evict_mask(struct drm_buffer_object * bo)
-+{
-+ switch (bo->mem.mem_type) {
-+ case DRM_BO_MEM_VRAM:
-+ return DRM_BO_FLAG_MEM_TT;
-+ default:
-+ return DRM_BO_FLAG_MEM_LOCAL;
-+ }
-+}
-+
-+int psb_invalidate_caches(struct drm_device *dev, uint64_t flags)
-+{
-+ return 0;
-+}
-+
-+static int psb_move_blit(struct drm_buffer_object *bo,
-+ int evict, int no_wait, struct drm_bo_mem_reg *new_mem)
-+{
-+ struct drm_bo_mem_reg *old_mem = &bo->mem;
-+ int dir = 0;
-+
-+ if ((old_mem->mem_type == new_mem->mem_type) &&
-+ (new_mem->mm_node->start <
-+ old_mem->mm_node->start + old_mem->mm_node->size)) {
-+ dir = 1;
-+ }
-+
-+ psb_emit_2d_copy_blit(bo->dev,
-+ old_mem->mm_node->start << PAGE_SHIFT,
-+ new_mem->mm_node->start << PAGE_SHIFT,
-+ new_mem->num_pages, dir);
-+
-+ return drm_bo_move_accel_cleanup(bo, evict, no_wait, 0,
-+ DRM_FENCE_TYPE_EXE, 0, new_mem);
-+}
-+
-+/*
-+ * Flip destination ttm into cached-coherent GATT,
-+ * then blit and subsequently move out again.
-+ */
-+
-+static int psb_move_flip(struct drm_buffer_object *bo,
-+ int evict, int no_wait, struct drm_bo_mem_reg *new_mem)
-+{
-+ struct drm_device *dev = bo->dev;
-+ struct drm_bo_mem_reg tmp_mem;
-+ int ret;
-+
-+ tmp_mem = *new_mem;
-+ tmp_mem.mm_node = NULL;
-+ tmp_mem.mask = DRM_BO_FLAG_MEM_TT |
-+ DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING;
-+
-+ ret = drm_bo_mem_space(bo, &tmp_mem, no_wait);
-+ if (ret)
-+ return ret;
-+ ret = drm_bind_ttm(bo->ttm, &tmp_mem);
-+ if (ret)
-+ goto out_cleanup;
-+ ret = psb_move_blit(bo, 1, no_wait, &tmp_mem);
-+ if (ret)
-+ goto out_cleanup;
-+
-+ ret = drm_bo_move_ttm(bo, evict, no_wait, new_mem);
-+ out_cleanup:
-+ if (tmp_mem.mm_node) {
-+ mutex_lock(&dev->struct_mutex);
-+ if (tmp_mem.mm_node != bo->pinned_node)
-+ drm_mm_put_block(tmp_mem.mm_node);
-+ tmp_mem.mm_node = NULL;
-+ mutex_unlock(&dev->struct_mutex);
-+ }
-+ return ret;
-+}
-+
-+int psb_move(struct drm_buffer_object *bo,
-+ int evict, int no_wait, struct drm_bo_mem_reg *new_mem)
-+{
-+ struct drm_bo_mem_reg *old_mem = &bo->mem;
-+
-+ if (old_mem->mem_type == DRM_BO_MEM_LOCAL) {
-+ return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
-+ } else if (new_mem->mem_type == DRM_BO_MEM_LOCAL) {
-+ if (psb_move_flip(bo, evict, no_wait, new_mem))
-+ return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
-+ } else {
-+ if (psb_move_blit(bo, evict, no_wait, new_mem))
-+ return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
-+ }
-+ return 0;
-+}
-+
-+static int drm_psb_tbe_nca(struct drm_ttm_backend *backend)
-+{
-+ return ((backend->flags & DRM_BE_FLAG_BOUND_CACHED) ? 0 : 1);
-+}
-+
-+static int drm_psb_tbe_populate(struct drm_ttm_backend *backend,
-+ unsigned long num_pages, struct page **pages)
-+{
-+ struct drm_psb_ttm_backend *psb_be =
-+ container_of(backend, struct drm_psb_ttm_backend, base);
-+
-+ psb_be->pages = pages;
-+ return 0;
-+}
-+
-+static int drm_psb_tbe_unbind(struct drm_ttm_backend *backend)
-+{
-+ struct drm_device *dev = backend->dev;
-+ struct drm_psb_private *dev_priv =
-+ (struct drm_psb_private *)dev->dev_private;
-+ struct drm_psb_ttm_backend *psb_be =
-+ container_of(backend, struct drm_psb_ttm_backend, base);
-+ struct psb_mmu_pd *pd = psb_mmu_get_default_pd(dev_priv->mmu);
-+ struct drm_mem_type_manager *man = &dev->bm.man[psb_be->mem_type];
-+
-+ PSB_DEBUG_RENDER("MMU unbind.\n");
-+
-+ if (psb_be->mem_type == DRM_BO_MEM_TT) {
-+ uint32_t gatt_p_offset = (psb_be->offset - man->gpu_offset) >>
-+ PAGE_SHIFT;
-+
-+ (void)psb_gtt_remove_pages(dev_priv->pg, gatt_p_offset,
-+ psb_be->num_pages,
-+ psb_be->desired_tile_stride,
-+ psb_be->hw_tile_stride);
-+ }
-+
-+ psb_mmu_remove_pages(pd, psb_be->offset,
-+ psb_be->num_pages,
-+ psb_be->desired_tile_stride,
-+ psb_be->hw_tile_stride);
-+
-+ return 0;
-+}
-+
-+static int drm_psb_tbe_bind(struct drm_ttm_backend *backend,
-+ struct drm_bo_mem_reg *bo_mem)
-+{
-+ struct drm_device *dev = backend->dev;
-+ struct drm_psb_private *dev_priv =
-+ (struct drm_psb_private *)dev->dev_private;
-+ struct drm_psb_ttm_backend *psb_be =
-+ container_of(backend, struct drm_psb_ttm_backend, base);
-+ struct psb_mmu_pd *pd = psb_mmu_get_default_pd(dev_priv->mmu);
-+ struct drm_mem_type_manager *man = &dev->bm.man[bo_mem->mem_type];
-+ int type;
-+ int ret = 0;
-+
-+ psb_be->mem_type = bo_mem->mem_type;
-+ psb_be->num_pages = bo_mem->num_pages;
-+ psb_be->desired_tile_stride = bo_mem->desired_tile_stride;
-+ psb_be->hw_tile_stride = bo_mem->hw_tile_stride;
-+ psb_be->desired_tile_stride = 0;
-+ psb_be->hw_tile_stride = 0;
-+ psb_be->offset = (bo_mem->mm_node->start << PAGE_SHIFT) +
-+ man->gpu_offset;
-+
-+ type = (bo_mem->flags & DRM_BO_FLAG_CACHED) ? PSB_MMU_CACHED_MEMORY : 0;
-+
-+ PSB_DEBUG_RENDER("MMU bind.\n");
-+ if (psb_be->mem_type == DRM_BO_MEM_TT) {
-+ uint32_t gatt_p_offset = (psb_be->offset - man->gpu_offset) >>
-+ PAGE_SHIFT;
-+
-+ ret = psb_gtt_insert_pages(dev_priv->pg, psb_be->pages,
-+ gatt_p_offset,
-+ psb_be->num_pages,
-+ psb_be->desired_tile_stride,
-+ psb_be->hw_tile_stride, type);
-+ }
-+
-+ ret = psb_mmu_insert_pages(pd, psb_be->pages,
-+ psb_be->offset, psb_be->num_pages,
-+ psb_be->desired_tile_stride,
-+ psb_be->hw_tile_stride, type);
-+ if (ret)
-+ goto out_err;
-+
-+ DRM_FLAG_MASKED(backend->flags, (bo_mem->flags & DRM_BO_FLAG_CACHED) ?
-+ DRM_BE_FLAG_BOUND_CACHED : 0, DRM_BE_FLAG_BOUND_CACHED);
-+
-+ return 0;
-+ out_err:
-+ drm_psb_tbe_unbind(backend);
-+ return ret;
-+
-+}
-+
-+static void drm_psb_tbe_clear(struct drm_ttm_backend *backend)
-+{
-+ struct drm_psb_ttm_backend *psb_be =
-+ container_of(backend, struct drm_psb_ttm_backend, base);
-+
-+ psb_be->pages = NULL;
-+ return;
-+}
-+
-+static void drm_psb_tbe_destroy(struct drm_ttm_backend *backend)
-+{
-+ struct drm_psb_ttm_backend *psb_be =
-+ container_of(backend, struct drm_psb_ttm_backend, base);
-+
-+ if (backend)
-+ drm_free(psb_be, sizeof(*psb_be), DRM_MEM_TTM);
-+}
-+
-+static struct drm_ttm_backend_func psb_ttm_backend = {
-+ .needs_ub_cache_adjust = drm_psb_tbe_nca,
-+ .populate = drm_psb_tbe_populate,
-+ .clear = drm_psb_tbe_clear,
-+ .bind = drm_psb_tbe_bind,
-+ .unbind = drm_psb_tbe_unbind,
-+ .destroy = drm_psb_tbe_destroy,
-+};
-+
-+struct drm_ttm_backend *drm_psb_tbe_init(struct drm_device *dev)
-+{
-+ struct drm_psb_ttm_backend *psb_be;
-+
-+ psb_be = drm_calloc(1, sizeof(*psb_be), DRM_MEM_TTM);
-+ if (!psb_be)
-+ return NULL;
-+ psb_be->pages = NULL;
-+ psb_be->base.func = &psb_ttm_backend;
-+ psb_be->base.dev = dev;
-+
-+ return &psb_be->base;
-+}
-+
-+int psb_tbe_size(struct drm_device *dev, unsigned long num_pages)
-+{
-+ /*
-+ * Return the size of the structures themselves and the
-+ * estimated size of the pagedir and pagetable entries.
-+ */
-+
-+ return drm_size_align(sizeof(struct drm_psb_ttm_backend)) +
-+ 8*num_pages;
-+}
-Index: linux-2.6.28/drivers/gpu/drm/psb/psb_drm.h
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.28/drivers/gpu/drm/psb/psb_drm.h 2009-02-20 12:23:06.000000000 +0000
-@@ -0,0 +1,370 @@
-+/**************************************************************************
-+ * Copyright (c) 2007, Intel Corporation.
-+ * All Rights Reserved.
-+ *
-+ * This program is free software; you can redistribute it and/or modify it
-+ * under the terms and conditions of the GNU General Public License,
-+ * version 2, as published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope it will be useful, but WITHOUT
-+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-+ * more details.
-+ *
-+ * You should have received a copy of the GNU General Public License along with
-+ * this program; if not, write to the Free Software Foundation, Inc.,
-+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-+ *
-+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
-+ * develop this driver.
-+ *
-+ **************************************************************************/
-+/*
-+ */
-+
-+#ifndef _PSB_DRM_H_
-+#define _PSB_DRM_H_
-+
-+#if defined(__linux__) && !defined(__KERNEL__)
-+#include<stdint.h>
-+#endif
-+
-+/*
-+ * Intel Poulsbo driver package version.
-+ *
-+ */
-+/* #define PSB_PACKAGE_VERSION "ED"__DATE__*/
-+#define PSB_PACKAGE_VERSION "2.1.0.32L.0019"
-+
-+#define DRM_PSB_SAREA_MAJOR 0
-+#define DRM_PSB_SAREA_MINOR 1
-+#define PSB_FIXED_SHIFT 16
-+
-+/*
-+ * Public memory types.
-+ */
-+
-+#define DRM_PSB_MEM_MMU DRM_BO_MEM_PRIV1
-+#define DRM_PSB_FLAG_MEM_MMU DRM_BO_FLAG_MEM_PRIV1
-+#define DRM_PSB_MEM_PDS DRM_BO_MEM_PRIV2
-+#define DRM_PSB_FLAG_MEM_PDS DRM_BO_FLAG_MEM_PRIV2
-+#define DRM_PSB_MEM_APER DRM_BO_MEM_PRIV3
-+#define DRM_PSB_FLAG_MEM_APER DRM_BO_FLAG_MEM_PRIV3
-+#define DRM_PSB_MEM_RASTGEOM DRM_BO_MEM_PRIV4
-+#define DRM_PSB_FLAG_MEM_RASTGEOM DRM_BO_FLAG_MEM_PRIV4
-+#define PSB_MEM_RASTGEOM_START 0x30000000
-+
-+typedef int32_t psb_fixed;
-+typedef uint32_t psb_ufixed;
-+
-+static inline psb_fixed psb_int_to_fixed(int a)
-+{
-+ return a * (1 << PSB_FIXED_SHIFT);
-+}
-+
-+static inline psb_ufixed psb_unsigned_to_ufixed(unsigned int a)
-+{
-+ return a << PSB_FIXED_SHIFT;
-+}
-+
-+/*Status of the command sent to the gfx device.*/
-+typedef enum {
-+ DRM_CMD_SUCCESS,
-+ DRM_CMD_FAILED,
-+ DRM_CMD_HANG
-+} drm_cmd_status_t;
-+
-+struct drm_psb_scanout {
-+ uint32_t buffer_id; /* DRM buffer object ID */
-+ uint32_t rotation; /* Rotation as in RR_rotation definitions */
-+ uint32_t stride; /* Buffer stride in bytes */
-+ uint32_t depth; /* Buffer depth in bits (NOT) bpp */
-+ uint32_t width; /* Buffer width in pixels */
-+ uint32_t height; /* Buffer height in lines */
-+ psb_fixed transform[3][3]; /* Buffer composite transform */
-+ /* (scaling, rot, reflect) */
-+};
-+
-+#define DRM_PSB_SAREA_OWNERS 16
-+#define DRM_PSB_SAREA_OWNER_2D 0
-+#define DRM_PSB_SAREA_OWNER_3D 1
-+
-+#define DRM_PSB_SAREA_SCANOUTS 3
-+
-+struct drm_psb_sarea {
-+ /* Track changes of this data structure */
-+
-+ uint32_t major;
-+ uint32_t minor;
-+
-+ /* Last context to touch part of hw */
-+ uint32_t ctx_owners[DRM_PSB_SAREA_OWNERS];
-+
-+ /* Definition of front- and rotated buffers */
-+ uint32_t num_scanouts;
-+ struct drm_psb_scanout scanouts[DRM_PSB_SAREA_SCANOUTS];
-+
-+ int pipeA_x;
-+ int pipeA_y;
-+ int pipeA_w;
-+ int pipeA_h;
-+ int pipeB_x;
-+ int pipeB_y;
-+ int pipeB_w;
-+ int pipeB_h;
-+ uint32_t msvdx_state;
-+ uint32_t msvdx_context;
-+};
-+
-+#define PSB_RELOC_MAGIC 0x67676767
-+#define PSB_RELOC_SHIFT_MASK 0x0000FFFF
-+#define PSB_RELOC_SHIFT_SHIFT 0
-+#define PSB_RELOC_ALSHIFT_MASK 0xFFFF0000
-+#define PSB_RELOC_ALSHIFT_SHIFT 16
-+
-+#define PSB_RELOC_OP_OFFSET 0 /* Offset of the indicated
-+ * buffer
-+ */
-+#define PSB_RELOC_OP_2D_OFFSET 1 /* Offset of the indicated
-+ * buffer, relative to 2D
-+ * base address
-+ */
-+#define PSB_RELOC_OP_PDS_OFFSET 2 /* Offset of the indicated buffer,
-+ * relative to PDS base address
-+ */
-+#define PSB_RELOC_OP_STRIDE 3 /* Stride of the indicated
-+ * buffer (for tiling)
-+ */
-+#define PSB_RELOC_OP_USE_OFFSET 4 /* Offset of USE buffer
-+ * relative to base reg
-+ */
-+#define PSB_RELOC_OP_USE_REG 5 /* Base reg of USE buffer */
-+
-+struct drm_psb_reloc {
-+ uint32_t reloc_op;
-+ uint32_t where; /* offset in destination buffer */
-+ uint32_t buffer; /* Buffer reloc applies to */
-+ uint32_t mask; /* Destination format: */
-+ uint32_t shift; /* Destination format: */
-+ uint32_t pre_add; /* Destination format: */
-+ uint32_t background; /* Destination add */
-+ uint32_t dst_buffer; /* Destination buffer. Index into buffer_list */
-+ uint32_t arg0; /* Reloc-op dependant */
-+ uint32_t arg1;
-+};
-+
-+#define PSB_BO_FLAG_TA (1ULL << 48)
-+#define PSB_BO_FLAG_SCENE (1ULL << 49)
-+#define PSB_BO_FLAG_FEEDBACK (1ULL << 50)
-+#define PSB_BO_FLAG_USSE (1ULL << 51)
-+
-+#define PSB_ENGINE_2D 0
-+#define PSB_ENGINE_VIDEO 1
-+#define PSB_ENGINE_RASTERIZER 2
-+#define PSB_ENGINE_TA 3
-+#define PSB_ENGINE_HPRAST 4
-+
-+/*
-+ * For this fence class we have a couple of
-+ * fence types.
-+ */
-+
-+#define _PSB_FENCE_EXE_SHIFT 0
-+#define _PSB_FENCE_TA_DONE_SHIFT 1
-+#define _PSB_FENCE_RASTER_DONE_SHIFT 2
-+#define _PSB_FENCE_SCENE_DONE_SHIFT 3
-+#define _PSB_FENCE_FEEDBACK_SHIFT 4
-+
-+#define _PSB_ENGINE_TA_FENCE_TYPES 5
-+#define _PSB_FENCE_TYPE_TA_DONE (1 << _PSB_FENCE_TA_DONE_SHIFT)
-+#define _PSB_FENCE_TYPE_RASTER_DONE (1 << _PSB_FENCE_RASTER_DONE_SHIFT)
-+#define _PSB_FENCE_TYPE_SCENE_DONE (1 << _PSB_FENCE_SCENE_DONE_SHIFT)
-+#define _PSB_FENCE_TYPE_FEEDBACK (1 << _PSB_FENCE_FEEDBACK_SHIFT)
-+
-+#define PSB_ENGINE_HPRAST 4
-+#define PSB_NUM_ENGINES 5
-+
-+#define PSB_TA_FLAG_FIRSTPASS (1 << 0)
-+#define PSB_TA_FLAG_LASTPASS (1 << 1)
-+
-+#define PSB_FEEDBACK_OP_VISTEST (1 << 0)
-+
-+struct drm_psb_scene {
-+ int handle_valid;
-+ uint32_t handle;
-+ uint32_t w;
-+ uint32_t h;
-+ uint32_t num_buffers;
-+};
-+
-+struct drm_psb_hw_info
-+{
-+ uint32_t rev_id;
-+ uint32_t caps;
-+};
-+
-+typedef struct drm_psb_cmdbuf_arg {
-+ uint64_t buffer_list; /* List of buffers to validate */
-+ uint64_t clip_rects; /* See i915 counterpart */
-+ uint64_t scene_arg;
-+ uint64_t fence_arg;
-+
-+ uint32_t ta_flags;
-+
-+ uint32_t ta_handle; /* TA reg-value pairs */
-+ uint32_t ta_offset;
-+ uint32_t ta_size;
-+
-+ uint32_t oom_handle;
-+ uint32_t oom_offset;
-+ uint32_t oom_size;
-+
-+ uint32_t cmdbuf_handle; /* 2D Command buffer object or, */
-+ uint32_t cmdbuf_offset; /* rasterizer reg-value pairs */
-+ uint32_t cmdbuf_size;
-+
-+ uint32_t reloc_handle; /* Reloc buffer object */
-+ uint32_t reloc_offset;
-+ uint32_t num_relocs;
-+
-+ int32_t damage; /* Damage front buffer with cliprects */
-+ /* Not implemented yet */
-+ uint32_t fence_flags;
-+ uint32_t engine;
-+
-+ /*
-+ * Feedback;
-+ */
-+
-+ uint32_t feedback_ops;
-+ uint32_t feedback_handle;
-+ uint32_t feedback_offset;
-+ uint32_t feedback_breakpoints;
-+ uint32_t feedback_size;
-+} drm_psb_cmdbuf_arg_t;
-+
-+struct drm_psb_xhw_init_arg {
-+ uint32_t operation;
-+ uint32_t buffer_handle;
-+};
-+
-+/*
-+ * Feedback components:
-+ */
-+
-+/*
-+ * Vistest component. The number of these in the feedback buffer
-+ * equals the number of vistest breakpoints + 1.
-+ * This is currently the only feedback component.
-+ */
-+
-+struct drm_psb_vistest {
-+ uint32_t vt[8];
-+};
-+
-+#define PSB_HW_COOKIE_SIZE 16
-+#define PSB_HW_FEEDBACK_SIZE 8
-+#define PSB_HW_OOM_CMD_SIZE 6
-+
-+struct drm_psb_xhw_arg {
-+ uint32_t op;
-+ int ret;
-+ uint32_t irq_op;
-+ uint32_t issue_irq;
-+ uint32_t cookie[PSB_HW_COOKIE_SIZE];
-+ union {
-+ struct {
-+ uint32_t w;
-+ uint32_t h;
-+ uint32_t size;
-+ uint32_t clear_p_start;
-+ uint32_t clear_num_pages;
-+ } si;
-+ struct {
-+ uint32_t fire_flags;
-+ uint32_t hw_context;
-+ uint32_t offset;
-+ uint32_t engine;
-+ uint32_t flags;
-+ uint32_t rca;
-+ uint32_t num_oom_cmds;
-+ uint32_t oom_cmds[PSB_HW_OOM_CMD_SIZE];
-+ } sb;
-+ struct {
-+ uint32_t pages;
-+ uint32_t size;
-+ } bi;
-+ struct {
-+ uint32_t bca;
-+ uint32_t rca;
-+ uint32_t flags;
-+ } oom;
-+ struct {
-+ uint32_t pt_offset;
-+ uint32_t param_offset;
-+ uint32_t flags;
-+ } bl;
-+ struct {
-+ uint32_t value;
-+ } cl;
-+ uint32_t feedback[PSB_HW_FEEDBACK_SIZE];
-+ } arg;
-+};
-+
-+#define DRM_PSB_CMDBUF 0x00
-+#define DRM_PSB_XHW_INIT 0x01
-+#define DRM_PSB_XHW 0x02
-+#define DRM_PSB_SCENE_UNREF 0x03
-+/* Controlling the kernel modesetting buffers */
-+#define DRM_PSB_KMS_OFF 0x04
-+#define DRM_PSB_KMS_ON 0x05
-+#define DRM_PSB_HW_INFO 0x06
-+
-+#define PSB_XHW_INIT 0x00
-+#define PSB_XHW_TAKEDOWN 0x01
-+
-+#define PSB_XHW_FIRE_RASTER 0x00
-+#define PSB_XHW_SCENE_INFO 0x01
-+#define PSB_XHW_SCENE_BIND_FIRE 0x02
-+#define PSB_XHW_TA_MEM_INFO 0x03
-+#define PSB_XHW_RESET_DPM 0x04
-+#define PSB_XHW_OOM 0x05
-+#define PSB_XHW_TERMINATE 0x06
-+#define PSB_XHW_VISTEST 0x07
-+#define PSB_XHW_RESUME 0x08
-+#define PSB_XHW_TA_MEM_LOAD 0x09
-+#define PSB_XHW_CHECK_LOCKUP 0x0a
-+
-+#define PSB_SCENE_FLAG_DIRTY (1 << 0)
-+#define PSB_SCENE_FLAG_COMPLETE (1 << 1)
-+#define PSB_SCENE_FLAG_SETUP (1 << 2)
-+#define PSB_SCENE_FLAG_SETUP_ONLY (1 << 3)
-+#define PSB_SCENE_FLAG_CLEARED (1 << 4)
-+
-+#define PSB_TA_MEM_FLAG_TA (1 << 0)
-+#define PSB_TA_MEM_FLAG_RASTER (1 << 1)
-+#define PSB_TA_MEM_FLAG_HOSTA (1 << 2)
-+#define PSB_TA_MEM_FLAG_HOSTD (1 << 3)
-+#define PSB_TA_MEM_FLAG_INIT (1 << 4)
-+#define PSB_TA_MEM_FLAG_NEW_PT_OFFSET (1 << 5)
-+
-+/*Raster fire will deallocate memory */
-+#define PSB_FIRE_FLAG_RASTER_DEALLOC (1 << 0)
-+/*Isp reset needed due to change in ZLS format */
-+#define PSB_FIRE_FLAG_NEEDS_ISP_RESET (1 << 1)
-+/*These are set by Xpsb. */
-+#define PSB_FIRE_FLAG_XHW_MASK 0xff000000
-+/*The task has had at least one OOM and Xpsb will
-+ send back messages on each fire. */
-+#define PSB_FIRE_FLAG_XHW_OOM (1 << 24)
-+
-+#define PSB_SCENE_ENGINE_TA 0
-+#define PSB_SCENE_ENGINE_RASTER 1
-+#define PSB_SCENE_NUM_ENGINES 2
-+
-+struct drm_psb_dev_info_arg {
-+ uint32_t num_use_attribute_registers;
-+};
-+#define DRM_PSB_DEVINFO 0x01
-+
-+#endif
-Index: linux-2.6.28/drivers/gpu/drm/psb/psb_drv.c
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.28/drivers/gpu/drm/psb/psb_drv.c 2009-02-20 14:48:16.000000000 +0000
-@@ -0,0 +1,1028 @@
-+/**************************************************************************
-+ * Copyright (c) 2007, Intel Corporation.
-+ * All Rights Reserved.
-+ *
-+ * This program is free software; you can redistribute it and/or modify it
-+ * under the terms and conditions of the GNU General Public License,
-+ * version 2, as published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope it will be useful, but WITHOUT
-+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-+ * more details.
-+ *
-+ * You should have received a copy of the GNU General Public License along with
-+ * this program; if not, write to the Free Software Foundation, Inc.,
-+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-+ *
-+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
-+ * develop this driver.
-+ *
-+ **************************************************************************/
-+/*
-+ */
-+
-+#include "drmP.h"
-+#include "drm.h"
-+#include "psb_drm.h"
-+#include "psb_drv.h"
-+#include "psb_reg.h"
-+#include "../i915/i915_reg.h"
-+#include "psb_msvdx.h"
-+#include "drm_pciids.h"
-+#include "psb_scene.h"
-+#include "drm_crtc.h"
-+#include "drm_crtc_helper.h"
-+#include <linux/cpu.h>
-+#include <linux/notifier.h>
-+#include <linux/fb.h>
-+
-+extern int drm_helper_probe_connector_modes(struct drm_device *dev, uint32_t maxX, uint32_t maxY);
-+
-+int drm_psb_debug = 0;
-+EXPORT_SYMBOL(drm_psb_debug);
-+static int drm_psb_trap_pagefaults = 0;
-+static int drm_psb_clock_gating = 0;
-+static int drm_psb_ta_mem_size = 32 * 1024;
-+int drm_psb_disable_vsync = 1;
-+int drm_psb_no_fb = 0;
-+int drm_psb_force_pipeb = 0;
-+char* psb_init_mode;
-+
-+
-+MODULE_PARM_DESC(debug, "Enable debug output");
-+MODULE_PARM_DESC(clock_gating, "clock gating");
-+MODULE_PARM_DESC(no_fb, "Disable FBdev");
-+MODULE_PARM_DESC(trap_pagefaults, "Error and reset on MMU pagefaults");
-+MODULE_PARM_DESC(disable_vsync, "Disable vsync interrupts");
-+MODULE_PARM_DESC(force_pipeb, "Forces PIPEB to become primary fb");
-+MODULE_PARM_DESC(ta_mem_size, "TA memory size in kiB");
-+MODULE_PARM_DESC(mode, "initial mode name");
-+MODULE_PARM_DESC(xres, "initial mode width");
-+MODULE_PARM_DESC(yres, "initial mode height");
-+
-+module_param_named(debug, drm_psb_debug, int, 0600);
-+module_param_named(clock_gating, drm_psb_clock_gating, int, 0600);
-+module_param_named(no_fb, drm_psb_no_fb, int, 0600);
-+module_param_named(trap_pagefaults, drm_psb_trap_pagefaults, int, 0600);
-+module_param_named(disable_vsync, drm_psb_disable_vsync, int, 0600);
-+module_param_named(force_pipeb, drm_psb_force_pipeb, int, 0600);
-+module_param_named(ta_mem_size, drm_psb_ta_mem_size, int, 0600);
-+module_param_named(mode, psb_init_mode, charp, 0600);
-+
-+static struct pci_device_id pciidlist[] = {
-+ psb_PCI_IDS
-+};
-+
-+#define DRM_PSB_CMDBUF_IOCTL DRM_IOW(DRM_PSB_CMDBUF, \
-+ struct drm_psb_cmdbuf_arg)
-+#define DRM_PSB_XHW_INIT_IOCTL DRM_IOR(DRM_PSB_XHW_INIT, \
-+ struct drm_psb_xhw_init_arg)
-+#define DRM_PSB_XHW_IOCTL DRM_IO(DRM_PSB_XHW)
-+
-+#define DRM_PSB_SCENE_UNREF_IOCTL DRM_IOWR(DRM_PSB_SCENE_UNREF, \
-+ struct drm_psb_scene)
-+#define DRM_PSB_HW_INFO_IOCTL DRM_IOR(DRM_PSB_HW_INFO, \
-+ struct drm_psb_hw_info)
-+
-+#define DRM_PSB_KMS_OFF_IOCTL DRM_IO(DRM_PSB_KMS_OFF)
-+#define DRM_PSB_KMS_ON_IOCTL DRM_IO(DRM_PSB_KMS_ON)
-+
-+static struct drm_ioctl_desc psb_ioctls[] = {
-+ DRM_IOCTL_DEF(DRM_PSB_CMDBUF_IOCTL, psb_cmdbuf_ioctl, DRM_AUTH),
-+ DRM_IOCTL_DEF(DRM_PSB_XHW_INIT_IOCTL, psb_xhw_init_ioctl,
-+ DRM_ROOT_ONLY),
-+ DRM_IOCTL_DEF(DRM_PSB_XHW_IOCTL, psb_xhw_ioctl, DRM_ROOT_ONLY),
-+ DRM_IOCTL_DEF(DRM_PSB_SCENE_UNREF_IOCTL, drm_psb_scene_unref_ioctl,
-+ DRM_AUTH),
-+ DRM_IOCTL_DEF(DRM_PSB_KMS_OFF_IOCTL, psbfb_kms_off_ioctl,
-+ DRM_ROOT_ONLY),
-+ DRM_IOCTL_DEF(DRM_PSB_KMS_ON_IOCTL, psbfb_kms_on_ioctl, DRM_ROOT_ONLY),
-+ DRM_IOCTL_DEF(DRM_PSB_HW_INFO_IOCTL, psb_hw_info_ioctl, DRM_AUTH),
-+};
-+static int psb_max_ioctl = DRM_ARRAY_SIZE(psb_ioctls);
-+
-+static int probe(struct pci_dev *pdev, const struct pci_device_id *ent);
-+
-+static int dri_library_name(struct drm_device *dev, char *buf)
-+{
-+ return snprintf(buf, PAGE_SIZE, "psb\n");
-+}
-+
-+static void psb_set_uopt(struct drm_psb_uopt *uopt)
-+{
-+ uopt->clock_gating = drm_psb_clock_gating;
-+}
-+
-+static void psb_lastclose(struct drm_device *dev)
-+{
-+ struct drm_psb_private *dev_priv =
-+ (struct drm_psb_private *)dev->dev_private;
-+
-+ if (!dev->dev_private)
-+ return;
-+
-+ mutex_lock(&dev->struct_mutex);
-+ if (dev_priv->ta_mem)
-+ psb_ta_mem_unref_devlocked(&dev_priv->ta_mem);
-+ mutex_unlock(&dev->struct_mutex);
-+ mutex_lock(&dev_priv->cmdbuf_mutex);
-+ if (dev_priv->buffers) {
-+ vfree(dev_priv->buffers);
-+ dev_priv->buffers = NULL;
-+ }
-+ mutex_unlock(&dev_priv->cmdbuf_mutex);
-+}
-+
-+static void psb_do_takedown(struct drm_device *dev)
-+{
-+ struct drm_psb_private *dev_priv =
-+ (struct drm_psb_private *)dev->dev_private;
-+
-+ mutex_lock(&dev->struct_mutex);
-+ if (dev->bm.initialized) {
-+ if (dev_priv->have_mem_rastgeom) {
-+ drm_bo_clean_mm(dev, DRM_PSB_MEM_RASTGEOM);
-+ dev_priv->have_mem_rastgeom = 0;
-+ }
-+ if (dev_priv->have_mem_mmu) {
-+ drm_bo_clean_mm(dev, DRM_PSB_MEM_MMU);
-+ dev_priv->have_mem_mmu = 0;
-+ }
-+ if (dev_priv->have_mem_aper) {
-+ drm_bo_clean_mm(dev, DRM_PSB_MEM_APER);
-+ dev_priv->have_mem_aper = 0;
-+ }
-+ if (dev_priv->have_tt) {
-+ drm_bo_clean_mm(dev, DRM_BO_MEM_TT);
-+ dev_priv->have_tt = 0;
-+ }
-+ if (dev_priv->have_vram) {
-+ drm_bo_clean_mm(dev, DRM_BO_MEM_VRAM);
-+ dev_priv->have_vram = 0;
-+ }
-+ }
-+ mutex_unlock(&dev->struct_mutex);
-+
-+ if (dev_priv->has_msvdx)
-+ psb_msvdx_uninit(dev);
-+
-+ if (dev_priv->comm) {
-+ kunmap(dev_priv->comm_page);
-+ dev_priv->comm = NULL;
-+ }
-+ if (dev_priv->comm_page) {
-+ __free_page(dev_priv->comm_page);
-+ dev_priv->comm_page = NULL;
-+ }
-+}
-+
-+void psb_clockgating(struct drm_psb_private *dev_priv)
-+{
-+ uint32_t clock_gating;
-+
-+ if (dev_priv->uopt.clock_gating == 1) {
-+ PSB_DEBUG_INIT("Disabling clock gating.\n");
-+
-+ clock_gating = (_PSB_C_CLKGATECTL_CLKG_DISABLED <<
-+ _PSB_C_CLKGATECTL_2D_CLKG_SHIFT) |
-+ (_PSB_C_CLKGATECTL_CLKG_DISABLED <<
-+ _PSB_C_CLKGATECTL_ISP_CLKG_SHIFT) |
-+ (_PSB_C_CLKGATECTL_CLKG_DISABLED <<
-+ _PSB_C_CLKGATECTL_TSP_CLKG_SHIFT) |
-+ (_PSB_C_CLKGATECTL_CLKG_DISABLED <<
-+ _PSB_C_CLKGATECTL_TA_CLKG_SHIFT) |
-+ (_PSB_C_CLKGATECTL_CLKG_DISABLED <<
-+ _PSB_C_CLKGATECTL_DPM_CLKG_SHIFT) |
-+ (_PSB_C_CLKGATECTL_CLKG_DISABLED <<
-+ _PSB_C_CLKGATECTL_USE_CLKG_SHIFT);
-+
-+ } else if (dev_priv->uopt.clock_gating == 2) {
-+ PSB_DEBUG_INIT("Enabling clock gating.\n");
-+
-+ clock_gating = (_PSB_C_CLKGATECTL_CLKG_AUTO <<
-+ _PSB_C_CLKGATECTL_2D_CLKG_SHIFT) |
-+ (_PSB_C_CLKGATECTL_CLKG_AUTO <<
-+ _PSB_C_CLKGATECTL_ISP_CLKG_SHIFT) |
-+ (_PSB_C_CLKGATECTL_CLKG_AUTO <<
-+ _PSB_C_CLKGATECTL_TSP_CLKG_SHIFT) |
-+ (_PSB_C_CLKGATECTL_CLKG_AUTO <<
-+ _PSB_C_CLKGATECTL_TA_CLKG_SHIFT) |
-+ (_PSB_C_CLKGATECTL_CLKG_AUTO <<
-+ _PSB_C_CLKGATECTL_DPM_CLKG_SHIFT) |
-+ (_PSB_C_CLKGATECTL_CLKG_AUTO <<
-+ _PSB_C_CLKGATECTL_USE_CLKG_SHIFT);
-+ } else
-+ clock_gating = PSB_RSGX32(PSB_CR_CLKGATECTL);
-+
-+#ifdef FIX_TG_2D_CLOCKGATE
-+ clock_gating &= ~_PSB_C_CLKGATECTL_2D_CLKG_MASK;
-+ clock_gating |= (_PSB_C_CLKGATECTL_CLKG_DISABLED <<
-+ _PSB_C_CLKGATECTL_2D_CLKG_SHIFT);
-+#endif
-+ PSB_WSGX32(clock_gating, PSB_CR_CLKGATECTL);
-+ (void)PSB_RSGX32(PSB_CR_CLKGATECTL);
-+}
-+
-+static int psb_master_create(struct drm_device *dev, struct drm_master *master)
-+{
-+ struct drm_i915_master_private *master_priv;
-+
-+ master_priv = drm_calloc(1, sizeof(*master_priv), DRM_MEM_DRIVER);
-+ if (!master_priv)
-+ return -ENOMEM;
-+
-+ master->driver_priv = master_priv;
-+ return 0;
-+}
-+
-+static void psb_master_destroy(struct drm_device *dev, struct drm_master *master)
-+{
-+ struct drm_i915_master_private *master_priv = master->driver_priv;
-+
-+ if (!master_priv)
-+ return;
-+
-+ drm_free(master_priv, sizeof(*master_priv), DRM_MEM_DRIVER);
-+
-+ master->driver_priv = NULL;
-+}
-+
-+
-+static int psb_do_init(struct drm_device *dev)
-+{
-+ struct drm_psb_private *dev_priv =
-+ (struct drm_psb_private *)dev->dev_private;
-+ struct psb_gtt *pg = dev_priv->pg;
-+
-+ uint32_t stolen_gtt;
-+ uint32_t tt_start;
-+ uint32_t tt_pages;
-+
-+ int ret = -ENOMEM;
-+
-+ DRM_ERROR("Debug is 0x%08x\n", drm_psb_debug);
-+
-+ dev_priv->ta_mem_pages =
-+ PSB_ALIGN_TO(drm_psb_ta_mem_size * 1024, PAGE_SIZE) >> PAGE_SHIFT;
-+ dev_priv->comm_page = alloc_page(GFP_KERNEL);
-+ if (!dev_priv->comm_page)
-+ goto out_err;
-+
-+ dev_priv->comm = kmap(dev_priv->comm_page);
-+ memset((void *)dev_priv->comm, 0, PAGE_SIZE);
-+
-+ dev_priv->has_msvdx = 1;
-+ if (psb_msvdx_init(dev))
-+ dev_priv->has_msvdx = 0;
-+
-+ /*
-+ * Initialize sequence numbers for the different command
-+ * submission mechanisms.
-+ */
-+
-+ dev_priv->sequence[PSB_ENGINE_2D] = 0;
-+ dev_priv->sequence[PSB_ENGINE_RASTERIZER] = 0;
-+ dev_priv->sequence[PSB_ENGINE_TA] = 0;
-+ dev_priv->sequence[PSB_ENGINE_HPRAST] = 0;
-+
-+ if (pg->gatt_start & 0x0FFFFFFF) {
-+ DRM_ERROR("Gatt must be 256M aligned. This is a bug.\n");
-+ ret = -EINVAL;
-+ goto out_err;
-+ }
-+
-+ stolen_gtt = (pg->stolen_size >> PAGE_SHIFT) * 4;
-+ stolen_gtt = (stolen_gtt + PAGE_SIZE - 1) >> PAGE_SHIFT;
-+ stolen_gtt = (stolen_gtt < pg->gtt_pages) ? stolen_gtt : pg->gtt_pages;
-+
-+ dev_priv->gatt_free_offset = pg->gatt_start +
-+ (stolen_gtt << PAGE_SHIFT) * 1024;
-+
-+ /*
-+ * Insert a cache-coherent communications page in mmu space
-+ * just after the stolen area. Will be used for fencing etc.
-+ */
-+
-+ dev_priv->comm_mmu_offset = dev_priv->gatt_free_offset;
-+ dev_priv->gatt_free_offset += PAGE_SIZE;
-+
-+ ret = psb_mmu_insert_pages(psb_mmu_get_default_pd(dev_priv->mmu),
-+ &dev_priv->comm_page,
-+ dev_priv->comm_mmu_offset, 1, 0, 0,
-+ PSB_MMU_CACHED_MEMORY);
-+
-+ if (ret)
-+ goto out_err;
-+
-+ if (1 || drm_debug) {
-+ uint32_t core_id = PSB_RSGX32(PSB_CR_CORE_ID);
-+ uint32_t core_rev = PSB_RSGX32(PSB_CR_CORE_REVISION);
-+ DRM_INFO("SGX core id = 0x%08x\n", core_id);
-+ DRM_INFO("SGX core rev major = 0x%02x, minor = 0x%02x\n",
-+ (core_rev & _PSB_CC_REVISION_MAJOR_MASK) >>
-+ _PSB_CC_REVISION_MAJOR_SHIFT,
-+ (core_rev & _PSB_CC_REVISION_MINOR_MASK) >>
-+ _PSB_CC_REVISION_MINOR_SHIFT);
-+ DRM_INFO
-+ ("SGX core rev maintenance = 0x%02x, designer = 0x%02x\n",
-+ (core_rev & _PSB_CC_REVISION_MAINTENANCE_MASK) >>
-+ _PSB_CC_REVISION_MAINTENANCE_SHIFT,
-+ (core_rev & _PSB_CC_REVISION_DESIGNER_MASK) >>
-+ _PSB_CC_REVISION_DESIGNER_SHIFT);
-+ }
-+
-+ dev_priv->irqmask_lock = SPIN_LOCK_UNLOCKED;
-+ dev_priv->fence0_irq_on = 0;
-+
-+ tt_pages = (pg->gatt_pages < PSB_TT_PRIV0_PLIMIT) ?
-+ pg->gatt_pages : PSB_TT_PRIV0_PLIMIT;
-+ tt_start = dev_priv->gatt_free_offset - pg->gatt_start;
-+ tt_pages -= tt_start >> PAGE_SHIFT;
-+
-+ mutex_lock(&dev->struct_mutex);
-+
-+ if (!drm_bo_init_mm(dev, DRM_BO_MEM_VRAM, 0,
-+ pg->stolen_size >> PAGE_SHIFT)) {
-+ dev_priv->have_vram = 1;
-+ }
-+
-+ if (!drm_bo_init_mm(dev, DRM_BO_MEM_TT, tt_start >> PAGE_SHIFT,
-+ tt_pages)) {
-+ dev_priv->have_tt = 1;
-+ }
-+
-+ if (!drm_bo_init_mm(dev, DRM_PSB_MEM_MMU, 0x00000000,
-+ (pg->gatt_start -
-+ PSB_MEM_MMU_START) >> PAGE_SHIFT)) {
-+ dev_priv->have_mem_mmu = 1;
-+ }
-+
-+ if (!drm_bo_init_mm(dev, DRM_PSB_MEM_RASTGEOM, 0x00000000,
-+ (PSB_MEM_MMU_START -
-+ PSB_MEM_RASTGEOM_START) >> PAGE_SHIFT)) {
-+ dev_priv->have_mem_rastgeom = 1;
-+ }
-+#if 0
-+ if (pg->gatt_pages > PSB_TT_PRIV0_PLIMIT) {
-+ if (!drm_bo_init_mm(dev, DRM_PSB_MEM_APER, PSB_TT_PRIV0_PLIMIT,
-+ pg->gatt_pages - PSB_TT_PRIV0_PLIMIT)) {
-+ dev_priv->have_mem_aper = 1;
-+ }
-+ }
-+#endif
-+
-+ mutex_unlock(&dev->struct_mutex);
-+
-+ return 0;
-+ out_err:
-+ psb_do_takedown(dev);
-+ return ret;
-+}
-+
-+static int psb_driver_unload(struct drm_device *dev)
-+{
-+ struct drm_psb_private *dev_priv =
-+ (struct drm_psb_private *)dev->dev_private;
-+
-+ intel_modeset_cleanup(dev);
-+
-+ if (dev_priv) {
-+ psb_watchdog_takedown(dev_priv);
-+ psb_do_takedown(dev);
-+ psb_xhw_takedown(dev_priv);
-+ psb_scheduler_takedown(&dev_priv->scheduler);
-+
-+ mutex_lock(&dev->struct_mutex);
-+ if (dev_priv->have_mem_pds) {
-+ drm_bo_clean_mm(dev, DRM_PSB_MEM_PDS);
-+ dev_priv->have_mem_pds = 0;
-+ }
-+ if (dev_priv->have_mem_kernel) {
-+ drm_bo_clean_mm(dev, DRM_PSB_MEM_KERNEL);
-+ dev_priv->have_mem_kernel = 0;
-+ }
-+ mutex_unlock(&dev->struct_mutex);
-+
-+ (void)drm_bo_driver_finish(dev);
-+
-+ if (dev_priv->pf_pd) {
-+ psb_mmu_free_pagedir(dev_priv->pf_pd);
-+ dev_priv->pf_pd = NULL;
-+ }
-+ if (dev_priv->mmu) {
-+ struct psb_gtt *pg = dev_priv->pg;
-+
-+ down_read(&pg->sem);
-+ psb_mmu_remove_pfn_sequence(psb_mmu_get_default_pd
-+ (dev_priv->mmu),
-+ pg->gatt_start,
-+ pg->
-+ stolen_size >> PAGE_SHIFT);
-+ up_read(&pg->sem);
-+ psb_mmu_driver_takedown(dev_priv->mmu);
-+ dev_priv->mmu = NULL;
-+ }
-+ psb_gtt_takedown(dev_priv->pg, 1);
-+ if (dev_priv->scratch_page) {
-+ __free_page(dev_priv->scratch_page);
-+ dev_priv->scratch_page = NULL;
-+ }
-+ psb_takedown_use_base(dev_priv);
-+ if (dev_priv->common.regs) {
-+ iounmap(dev_priv->common.regs);
-+ dev_priv->common.regs = NULL;
-+ }
-+ if (dev_priv->sgx_reg) {
-+ iounmap(dev_priv->sgx_reg);
-+ dev_priv->sgx_reg = NULL;
-+ }
-+ if (dev_priv->msvdx_reg) {
-+ iounmap(dev_priv->msvdx_reg);
-+ dev_priv->msvdx_reg = NULL;
-+ }
-+
-+ drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER);
-+ dev->dev_private = NULL;
-+ }
-+ return 0;
-+}
-+
-+static void psb_user_framebuffer_destroy(struct drm_framebuffer *fb)
-+{
-+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
-+ struct drm_device *dev = fb->dev;
-+
-+ //if (fb->fbdev)
-+ // intelfb_remove(dev, fb);
-+
-+ drm_framebuffer_cleanup(fb);
-+ mutex_lock(&dev->struct_mutex);
-+ drm_gem_object_unreference(intel_fb->obj);
-+ mutex_unlock(&dev->struct_mutex);
-+
-+ kfree(intel_fb);
-+}
-+
-+static int psb_user_framebuffer_create_handle(struct drm_framebuffer *fb,
-+ struct drm_file *file_priv,
-+ unsigned int *handle)
-+{
-+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
-+ struct drm_gem_object *object = intel_fb->obj;
-+
-+ return drm_gem_handle_create(file_priv, object, handle);
-+}
-+
-+static const struct drm_framebuffer_funcs psb_fb_funcs = {
-+ .destroy = psb_user_framebuffer_destroy,
-+ .create_handle = psb_user_framebuffer_create_handle,
-+};
-+
-+int psb_framebuffer_create(struct drm_device *dev,
-+ struct drm_mode_fb_cmd *mode_cmd,
-+ struct drm_framebuffer **fb,
-+ struct drm_gem_object *obj)
-+{
-+ struct intel_framebuffer *intel_fb;
-+ int ret;
-+
-+ intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
-+ if (!intel_fb)
-+ return -ENOMEM;
-+
-+ ret = drm_framebuffer_init(dev, &intel_fb->base, &psb_fb_funcs);
-+ if (ret) {
-+ DRM_ERROR("framebuffer init failed %d\n", ret);
-+ return ret;
-+ }
-+
-+ drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
-+
-+ intel_fb->obj = obj;
-+
-+ *fb = &intel_fb->base;
-+
-+ return 0;
-+}
-+
-+
-+static struct drm_framebuffer *
-+psb_user_framebuffer_create(struct drm_device *dev,
-+ struct drm_file *filp,
-+ struct drm_mode_fb_cmd *mode_cmd)
-+{
-+ struct drm_gem_object *obj;
-+ struct drm_framebuffer *fb;
-+ int ret;
-+
-+ obj = drm_gem_object_lookup(dev, filp, mode_cmd->handle);
-+ if (!obj)
-+ return NULL;
-+
-+ ret = psb_framebuffer_create(dev, mode_cmd, &fb, obj);
-+ if (ret) {
-+ drm_gem_object_unreference(obj);
-+ return NULL;
-+ }
-+
-+ return fb;
-+}
-+
-+
-+int psbfb_probe2(struct drm_device *dev)
-+{
-+ return 0;
-+}
-+
-+static const struct drm_mode_config_funcs psb_mode_funcs = {
-+ .fb_create = psb_user_framebuffer_create,
-+ .fb_changed = psbfb_probe2,
-+};
-+
-+static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
-+{
-+ struct drm_psb_private *dev_priv;
-+ unsigned long resource_start;
-+ struct psb_gtt *pg;
-+ int ret = -ENOMEM;
-+
-+ DRM_INFO("psb - %s\n", PSB_PACKAGE_VERSION);
-+ dev_priv = drm_calloc(1, sizeof(*dev_priv), DRM_MEM_DRIVER);
-+ if (dev_priv == NULL)
-+ return -ENOMEM;
-+
-+ mutex_init(&dev_priv->temp_mem);
-+ mutex_init(&dev_priv->cmdbuf_mutex);
-+ mutex_init(&dev_priv->reset_mutex);
-+ psb_init_disallowed();
-+
-+ atomic_set(&dev_priv->msvdx_mmu_invaldc, 0);
-+
-+#ifdef FIX_TG_16
-+ atomic_set(&dev_priv->lock_2d, 0);
-+ atomic_set(&dev_priv->ta_wait_2d, 0);
-+ atomic_set(&dev_priv->ta_wait_2d_irq, 0);
-+ atomic_set(&dev_priv->waiters_2d, 0);;
-+ DRM_INIT_WAITQUEUE(&dev_priv->queue_2d);
-+#else
-+ mutex_init(&dev_priv->mutex_2d);
-+#endif
-+
-+ spin_lock_init(&dev_priv->reloc_lock);
-+
-+ DRM_INIT_WAITQUEUE(&dev_priv->rel_mapped_queue);
-+ DRM_INIT_WAITQUEUE(&dev_priv->event_2d_queue);
-+
-+ dev->dev_private = (void *)dev_priv;
-+ dev_priv->chipset = chipset;
-+ psb_set_uopt(&dev_priv->uopt);
-+
-+ psb_watchdog_init(dev_priv);
-+ psb_scheduler_init(dev, &dev_priv->scheduler);
-+
-+ resource_start = pci_resource_start(dev->pdev, PSB_MMIO_RESOURCE);
-+
-+ dev_priv->msvdx_reg =
-+ ioremap(resource_start + PSB_MSVDX_OFFSET, PSB_MSVDX_SIZE);
-+ if (!dev_priv->msvdx_reg)
-+ goto out_err;
-+
-+ dev_priv->common.regs =
-+ ioremap(resource_start + PSB_VDC_OFFSET, PSB_VDC_SIZE);
-+ if (!dev_priv->common.regs)
-+ goto out_err;
-+
-+ dev_priv->sgx_reg =
-+ ioremap(resource_start + PSB_SGX_OFFSET, PSB_SGX_SIZE);
-+ if (!dev_priv->sgx_reg)
-+ goto out_err;
-+
-+ psb_clockgating(dev_priv);
-+ if (psb_init_use_base(dev_priv, 3, 13))
-+ goto out_err;
-+
-+ dev_priv->scratch_page = alloc_page(GFP_DMA32 | __GFP_ZERO);
-+ if (!dev_priv->scratch_page)
-+ goto out_err;
-+
-+ dev_priv->pg = psb_gtt_alloc(dev);
-+ if (!dev_priv->pg)
-+ goto out_err;
-+
-+ ret = psb_gtt_init(dev_priv->pg, 0);
-+ if (ret)
-+ goto out_err;
-+
-+ dev_priv->mmu = psb_mmu_driver_init(dev_priv->sgx_reg,
-+ drm_psb_trap_pagefaults, 0,
-+ &dev_priv->msvdx_mmu_invaldc);
-+ if (!dev_priv->mmu)
-+ goto out_err;
-+
-+ pg = dev_priv->pg;
-+
-+ /*
-+ * Make sgx MMU aware of the stolen memory area we call VRAM.
-+ */
-+
-+ down_read(&pg->sem);
-+ ret =
-+ psb_mmu_insert_pfn_sequence(psb_mmu_get_default_pd(dev_priv->mmu),
-+ pg->stolen_base >> PAGE_SHIFT,
-+ pg->gatt_start,
-+ pg->stolen_size >> PAGE_SHIFT, 0);
-+ up_read(&pg->sem);
-+ if (ret)
-+ goto out_err;
-+
-+ dev_priv->pf_pd = psb_mmu_alloc_pd(dev_priv->mmu, 1, 0);
-+ if (!dev_priv->pf_pd)
-+ goto out_err;
-+
-+ /*
-+ * Make all presumably unused requestors page-fault by making them
-+ * use context 1 which does not have any valid mappings.
-+ */
-+
-+ PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK0);
-+ PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK1);
-+ PSB_RSGX32(PSB_CR_BIF_BANK1);
-+
-+ psb_mmu_set_pd_context(psb_mmu_get_default_pd(dev_priv->mmu), 0);
-+ psb_mmu_set_pd_context(dev_priv->pf_pd, 1);
-+ psb_mmu_enable_requestor(dev_priv->mmu, _PSB_MMU_ER_MASK);
-+
-+ psb_init_2d(dev_priv);
-+
-+ ret = drm_bo_driver_init(dev);
-+ if (ret)
-+ goto out_err;
-+
-+ ret = drm_bo_init_mm(dev, DRM_PSB_MEM_KERNEL, 0x00000000,
-+ (PSB_MEM_PDS_START - PSB_MEM_KERNEL_START)
-+ >> PAGE_SHIFT);
-+ if (ret)
-+ goto out_err;
-+ dev_priv->have_mem_kernel = 1;
-+
-+ ret = drm_bo_init_mm(dev, DRM_PSB_MEM_PDS, 0x00000000,
-+ (PSB_MEM_RASTGEOM_START - PSB_MEM_PDS_START)
-+ >> PAGE_SHIFT);
-+ if (ret)
-+ goto out_err;
-+ dev_priv->have_mem_pds = 1;
-+
-+ ret = psb_do_init(dev);
-+ if (ret)
-+ return ret;
-+
-+ ret = psb_xhw_init(dev);
-+ if (ret)
-+ return ret;
-+
-+ PSB_WSGX32(PSB_MEM_PDS_START, PSB_CR_PDS_EXEC_BASE);
-+ PSB_WSGX32(PSB_MEM_RASTGEOM_START, PSB_CR_BIF_3D_REQ_BASE);
-+
-+ intel_modeset_init(dev);
-+
-+ dev->mode_config.funcs = (void *)&psb_mode_funcs;
-+
-+ drm_helper_initial_config(dev, false);
-+
-+ return 0;
-+ out_err:
-+ psb_driver_unload(dev);
-+ return ret;
-+}
-+
-+int psb_driver_device_is_agp(struct drm_device *dev)
-+{
-+ return 0;
-+}
-+
-+static int psb_prepare_msvdx_suspend(struct drm_device *dev)
-+{
-+ struct drm_psb_private *dev_priv =
-+ (struct drm_psb_private *)dev->dev_private;
-+ struct drm_fence_manager *fm = &dev->fm;
-+ struct drm_fence_class_manager *fc = &fm->fence_class[PSB_ENGINE_VIDEO];
-+ struct drm_fence_object *fence;
-+ int ret = 0;
-+ int signaled = 0;
-+ int count = 0;
-+ unsigned long _end = jiffies + 3 * DRM_HZ;
-+
-+ PSB_DEBUG_GENERAL("MSVDXACPI Entering psb_prepare_msvdx_suspend....\n");
-+
-+ /*set the msvdx-reset flag here.. */
-+ dev_priv->msvdx_needs_reset = 1;
-+
-+ /*Ensure that all pending IRQs are serviced, */
-+ list_for_each_entry(fence, &fc->ring, ring) {
-+ count++;
-+ do {
-+ DRM_WAIT_ON(ret, fc->fence_queue, 3 * DRM_HZ,
-+ (signaled =
-+ drm_fence_object_signaled(fence,
-+ DRM_FENCE_TYPE_EXE)));
-+ if (signaled)
-+ break;
-+ if (time_after_eq(jiffies, _end))
-+ PSB_DEBUG_GENERAL
-+ ("MSVDXACPI: fence 0x%x didn't get signaled for 3 secs; we will suspend anyways\n",
-+ (unsigned int)fence);
-+ } while (ret == -EINTR);
-+
-+ }
-+
-+ /* Issue software reset */
-+ PSB_WMSVDX32 (msvdx_sw_reset_all, MSVDX_CONTROL);
-+
-+ ret = psb_wait_for_register (dev_priv, MSVDX_CONTROL, 0,
-+ MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_MASK);
-+
-+ PSB_DEBUG_GENERAL("MSVDXACPI: All MSVDX IRQs (%d) serviced...\n",
-+ count);
-+ return 0;
-+}
-+
-+static int psb_suspend(struct pci_dev *pdev, pm_message_t state)
-+{
-+ struct drm_device *dev = pci_get_drvdata(pdev);
-+ struct drm_psb_private *dev_priv =
-+ (struct drm_psb_private *)dev->dev_private;
-+ struct drm_connector *output;
-+
-+ if (drm_psb_no_fb == 0)
-+ psbfb_suspend(dev);
-+ else {
-+ if(num_registered_fb)
-+ {
-+ list_for_each_entry(output, &dev->mode_config.connector_list, head) {
-+ //if(output->encoder->crtc != NULL)
-+ // intel_crtc_mode_save(output->encoder->crtc);
-+ //if(output->funcs->save)
-+ // output->funcs->save(output);
-+ }
-+ }
-+ }
-+
-+ dev_priv->saveCLOCKGATING = PSB_RSGX32(PSB_CR_CLKGATECTL);
-+ (void)psb_idle_3d(dev);
-+ (void)psb_idle_2d(dev);
-+ flush_scheduled_work();
-+
-+ psb_takedown_use_base(dev_priv);
-+
-+ if (dev_priv->has_msvdx)
-+ psb_prepare_msvdx_suspend(dev);
-+
-+ pci_save_state(pdev);
-+ pci_disable_device(pdev);
-+ pci_set_power_state(pdev, PCI_D3hot);
-+
-+ return 0;
-+}
-+
-+static int psb_resume(struct pci_dev *pdev)
-+{
-+ struct drm_device *dev = pci_get_drvdata(pdev);
-+ struct drm_psb_private *dev_priv =
-+ (struct drm_psb_private *)dev->dev_private;
-+ struct psb_gtt *pg = dev_priv->pg;
-+ struct drm_connector *output;
-+ int ret;
-+
-+ pci_set_power_state(pdev, PCI_D0);
-+ pci_restore_state(pdev);
-+ ret = pci_enable_device(pdev);
-+ if (ret)
-+ return ret;
-+
-+ INIT_LIST_HEAD(&dev_priv->resume_buf.head);
-+ dev_priv->msvdx_needs_reset = 1;
-+
-+ PSB_WVDC32(pg->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL);
-+ pci_write_config_word(pdev, PSB_GMCH_CTRL,
-+ pg->gmch_ctrl | _PSB_GMCH_ENABLED);
-+
-+ /*
-+ * The GTT page tables are probably not saved.
-+ * However, TT and VRAM is empty at this point.
-+ */
-+
-+ psb_gtt_init(dev_priv->pg, 1);
-+
-+ /*
-+ * The SGX loses it's register contents.
-+ * Restore BIF registers. The MMU page tables are
-+ * "normal" pages, so their contents should be kept.
-+ */
-+
-+ PSB_WSGX32(dev_priv->saveCLOCKGATING, PSB_CR_CLKGATECTL);
-+ PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK0);
-+ PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK1);
-+ PSB_RSGX32(PSB_CR_BIF_BANK1);
-+
-+ psb_mmu_set_pd_context(psb_mmu_get_default_pd(dev_priv->mmu), 0);
-+ psb_mmu_set_pd_context(dev_priv->pf_pd, 1);
-+ psb_mmu_enable_requestor(dev_priv->mmu, _PSB_MMU_ER_MASK);
-+
-+ /*
-+ * 2D Base registers..
-+ */
-+ psb_init_2d(dev_priv);
-+
-+ if (drm_psb_no_fb == 0) {
-+ list_for_each_entry(output, &dev->mode_config.connector_list, head) {
-+ if(output->encoder->crtc != NULL)
-+ drm_crtc_helper_set_mode(output->encoder->crtc, &output->encoder->crtc->mode,
-+ output->encoder->crtc->x, output->encoder->crtc->y, NULL);
-+ }
-+ }
-+
-+ /*
-+ * Persistant 3D base registers and USSE base registers..
-+ */
-+
-+ PSB_WSGX32(PSB_MEM_PDS_START, PSB_CR_PDS_EXEC_BASE);
-+ PSB_WSGX32(PSB_MEM_RASTGEOM_START, PSB_CR_BIF_3D_REQ_BASE);
-+ psb_init_use_base(dev_priv, 3, 13);
-+
-+ /*
-+ * Now, re-initialize the 3D engine.
-+ */
-+
-+ psb_xhw_resume(dev_priv, &dev_priv->resume_buf);
-+
-+ psb_scheduler_ta_mem_check(dev_priv);
-+ if (dev_priv->ta_mem && !dev_priv->force_ta_mem_load) {
-+ psb_xhw_ta_mem_load(dev_priv, &dev_priv->resume_buf,
-+ PSB_TA_MEM_FLAG_TA |
-+ PSB_TA_MEM_FLAG_RASTER |
-+ PSB_TA_MEM_FLAG_HOSTA |
-+ PSB_TA_MEM_FLAG_HOSTD |
-+ PSB_TA_MEM_FLAG_INIT,
-+ dev_priv->ta_mem->ta_memory->offset,
-+ dev_priv->ta_mem->hw_data->offset,
-+ dev_priv->ta_mem->hw_cookie);
-+ }
-+
-+ if (drm_psb_no_fb == 0)
-+ psbfb_resume(dev);
-+
-+ else {
-+ if(num_registered_fb)
-+ {
-+ struct fb_info *fb_info=registered_fb[0];
-+ list_for_each_entry(output, &dev->mode_config.connector_list, head) {
-+ //if(output->encoder->crtc != NULL)
-+ // intel_crtc_mode_restore(output->encoder->crtc);
-+ }
-+ if(fb_info)
-+ {
-+ fb_set_suspend(fb_info, 0);
-+ printk("set the fb_set_suspend resume end\n");
-+ }
-+ }
-+ }
-+
-+
-+ return 0;
-+}
-+
-+/* always available as we are SIGIO'd */
-+static unsigned int psb_poll(struct file *filp, struct poll_table_struct *wait)
-+{
-+ return (POLLIN | POLLRDNORM);
-+}
-+
-+static int psb_release(struct inode *inode, struct file *filp)
-+{
-+ struct drm_file *file_priv = (struct drm_file *)filp->private_data;
-+ struct drm_device *dev = file_priv->minor->dev;
-+ struct drm_psb_private *dev_priv =
-+ (struct drm_psb_private *)dev->dev_private;
-+
-+ if (dev_priv && dev_priv->xhw_file) {
-+ psb_xhw_init_takedown(dev_priv, file_priv, 1);
-+ }
-+ return drm_release(inode, filp);
-+}
-+
-+extern struct drm_fence_driver psb_fence_driver;
-+
-+/*
-+ * Use this memory type priority if no eviction is needed.
-+ */
-+static uint32_t psb_mem_prios[] = { DRM_BO_MEM_VRAM,
-+ DRM_BO_MEM_TT,
-+ DRM_PSB_MEM_KERNEL,
-+ DRM_PSB_MEM_MMU,
-+ DRM_PSB_MEM_RASTGEOM,
-+ DRM_PSB_MEM_PDS,
-+ DRM_PSB_MEM_APER,
-+ DRM_BO_MEM_LOCAL
-+};
-+
-+/*
-+ * Use this memory type priority if need to evict.
-+ */
-+static uint32_t psb_busy_prios[] = { DRM_BO_MEM_TT,
-+ DRM_BO_MEM_VRAM,
-+ DRM_PSB_MEM_KERNEL,
-+ DRM_PSB_MEM_MMU,
-+ DRM_PSB_MEM_RASTGEOM,
-+ DRM_PSB_MEM_PDS,
-+ DRM_PSB_MEM_APER,
-+ DRM_BO_MEM_LOCAL
-+};
-+
-+static struct drm_bo_driver psb_bo_driver = {
-+ .mem_type_prio = psb_mem_prios,
-+ .mem_busy_prio = psb_busy_prios,
-+ .num_mem_type_prio = ARRAY_SIZE(psb_mem_prios),
-+ .num_mem_busy_prio = ARRAY_SIZE(psb_busy_prios),
-+ .create_ttm_backend_entry = drm_psb_tbe_init,
-+ .fence_type = psb_fence_types,
-+ .invalidate_caches = psb_invalidate_caches,
-+ .init_mem_type = psb_init_mem_type,
-+ .evict_mask = psb_evict_mask,
-+ .move = psb_move,
-+ .backend_size = psb_tbe_size,
-+ .command_stream_barrier = NULL,
-+};
-+
-+static struct drm_driver driver = {
-+ .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
-+ DRIVER_IRQ_VBL | DRIVER_IRQ_VBL2,
-+ .load = psb_driver_load,
-+ .unload = psb_driver_unload,
-+ .dri_library_name = dri_library_name,
-+ .get_reg_ofs = drm_core_get_reg_ofs,
-+ .ioctls = psb_ioctls,
-+ .device_is_agp = psb_driver_device_is_agp,
-+ .get_vblank_counter = psb_get_vblank_counter,
-+ .enable_vblank = psb_enable_vblank,
-+ .disable_vblank = psb_disable_vblank,
-+ .irq_preinstall = psb_irq_preinstall,
-+ .irq_postinstall = psb_irq_postinstall,
-+ .irq_uninstall = psb_irq_uninstall,
-+ .irq_handler = psb_irq_handler,
-+ .master_create = psb_master_create,
-+ .master_destroy = psb_master_destroy,
-+ .fb_probe = psbfb_probe,
-+ .fb_remove = psbfb_remove,
-+ .firstopen = NULL,
-+ .lastclose = psb_lastclose,
-+ .fops = {
-+ .owner = THIS_MODULE,
-+ .open = drm_open,
-+ .release = psb_release,
-+ .ioctl = drm_ioctl,
-+ .mmap = drm_mmap,
-+ .poll = psb_poll,
-+ .fasync = drm_fasync,
-+ },
-+ .pci_driver = {
-+ .name = DRIVER_NAME,
-+ .id_table = pciidlist,
-+ .probe = probe,
-+ .remove = __devexit_p(drm_cleanup_pci),
-+ .resume = psb_resume,
-+ .suspend = psb_suspend,
-+ },
-+ .fence_driver = &psb_fence_driver,
-+ .bo_driver = &psb_bo_driver,
-+ .name = DRIVER_NAME,
-+ .desc = DRIVER_DESC,
-+ .date = PSB_DRM_DRIVER_DATE,
-+ .major = PSB_DRM_DRIVER_MAJOR,
-+ .minor = PSB_DRM_DRIVER_MINOR,
-+ .patchlevel = PSB_DRM_DRIVER_PATCHLEVEL
-+};
-+
-+static int probe(struct pci_dev *pdev, const struct pci_device_id *ent)
-+{
-+ return drm_get_dev(pdev, ent, &driver);
-+}
-+
-+static int __init psb_init(void)
-+{
-+ driver.num_ioctls = psb_max_ioctl;
-+
-+ return drm_init(&driver);
-+}
-+
-+static void __exit psb_exit(void)
-+{
-+ drm_exit(&driver);
-+}
-+
-+module_init(psb_init);
-+module_exit(psb_exit);
-+
-+MODULE_AUTHOR(DRIVER_AUTHOR);
-+MODULE_DESCRIPTION(DRIVER_DESC);
-+MODULE_LICENSE("GPL");
-Index: linux-2.6.28/drivers/gpu/drm/psb/psb_drv.h
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.28/drivers/gpu/drm/psb/psb_drv.h 2009-02-20 12:23:06.000000000 +0000
-@@ -0,0 +1,549 @@
-+/**************************************************************************
-+ * Copyright (c) 2007, Intel Corporation.
-+ * All Rights Reserved.
-+ *
-+ * This program is free software; you can redistribute it and/or modify it
-+ * under the terms and conditions of the GNU General Public License,
-+ * version 2, as published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope it will be useful, but WITHOUT
-+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-+ * more details.
-+ *
-+ * You should have received a copy of the GNU General Public License along with
-+ * this program; if not, write to the Free Software Foundation, Inc.,
-+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-+ *
-+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
-+ * develop this driver.
-+ *
-+ **************************************************************************/
-+/*
-+ */
-+#ifndef _PSB_DRV_H_
-+#define _PSB_DRV_H_
-+
-+#include "drmP.h"
-+#include "psb_drm.h"
-+#include "psb_reg.h"
-+#include "psb_schedule.h"
-+#include "psb_priv.h"
-+#include "../i915/intel_drv.h"
-+
-+
-+enum {
-+ CHIP_PSB_8108 = 0,
-+ CHIP_PSB_8109 = 1
-+};
-+
-+/*
-+ * Hardware bugfixes
-+ */
-+
-+#define FIX_TG_16
-+#define FIX_TG_2D_CLOCKGATE
-+
-+#define DRIVER_NAME "psb"
-+#define DRIVER_DESC "drm driver for the Intel GMA500"
-+#define DRIVER_AUTHOR "Tungsten Graphics Inc."
-+
-+#define PSB_DRM_DRIVER_DATE "20080613"
-+#define PSB_DRM_DRIVER_MAJOR 4
-+#define PSB_DRM_DRIVER_MINOR 12
-+#define PSB_DRM_DRIVER_PATCHLEVEL 0
-+
-+#define PSB_VDC_OFFSET 0x00000000
-+#define PSB_VDC_SIZE 0x000080000
-+#define PSB_SGX_SIZE 0x8000
-+#define PSB_SGX_OFFSET 0x00040000
-+#define PSB_MMIO_RESOURCE 0
-+#define PSB_GATT_RESOURCE 2
-+#define PSB_GTT_RESOURCE 3
-+#define PSB_GMCH_CTRL 0x52
-+#define PSB_BSM 0x5C
-+#define _PSB_GMCH_ENABLED 0x4
-+#define PSB_PGETBL_CTL 0x2020
-+#define _PSB_PGETBL_ENABLED 0x00000001
-+#define PSB_SGX_2D_SLAVE_PORT 0x4000
-+#define PSB_TT_PRIV0_LIMIT (256*1024*1024)
-+#define PSB_TT_PRIV0_PLIMIT (PSB_TT_PRIV0_LIMIT >> PAGE_SHIFT)
-+#define PSB_NUM_VALIDATE_BUFFERS 1024
-+#define PSB_MEM_KERNEL_START 0x10000000
-+#define PSB_MEM_PDS_START 0x20000000
-+#define PSB_MEM_MMU_START 0x40000000
-+
-+#define DRM_PSB_MEM_KERNEL DRM_BO_MEM_PRIV0
-+#define DRM_PSB_FLAG_MEM_KERNEL DRM_BO_FLAG_MEM_PRIV0
-+
-+/*
-+ * Flags for external memory type field.
-+ */
-+
-+#define PSB_MSVDX_OFFSET 0x50000 /*MSVDX Base offset */
-+#define PSB_MSVDX_SIZE 0x8000 /*MSVDX MMIO region is 0x50000 - 0x57fff ==> 32KB */
-+
-+#define PSB_MMU_CACHED_MEMORY 0x0001 /* Bind to MMU only */
-+#define PSB_MMU_RO_MEMORY 0x0002 /* MMU RO memory */
-+#define PSB_MMU_WO_MEMORY 0x0004 /* MMU WO memory */
-+
-+/*
-+ * PTE's and PDE's
-+ */
-+
-+#define PSB_PDE_MASK 0x003FFFFF
-+#define PSB_PDE_SHIFT 22
-+#define PSB_PTE_SHIFT 12
-+
-+#define PSB_PTE_VALID 0x0001 /* PTE / PDE valid */
-+#define PSB_PTE_WO 0x0002 /* Write only */
-+#define PSB_PTE_RO 0x0004 /* Read only */
-+#define PSB_PTE_CACHED 0x0008 /* CPU cache coherent */
-+
-+/*
-+ * VDC registers and bits
-+ */
-+#define PSB_HWSTAM 0x2098
-+#define PSB_INSTPM 0x20C0
-+#define PSB_INT_IDENTITY_R 0x20A4
-+#define _PSB_VSYNC_PIPEB_FLAG (1<<5)
-+#define _PSB_VSYNC_PIPEA_FLAG (1<<7)
-+#define _PSB_IRQ_SGX_FLAG (1<<18)
-+#define _PSB_IRQ_MSVDX_FLAG (1<<19)
-+#define PSB_INT_MASK_R 0x20A8
-+#define PSB_INT_ENABLE_R 0x20A0
-+#define PSB_PIPEASTAT 0x70024
-+#define _PSB_VBLANK_INTERRUPT_ENABLE (1 << 17)
-+#define _PSB_VBLANK_CLEAR (1 << 1)
-+#define PSB_PIPEBSTAT 0x71024
-+
-+#define _PSB_MMU_ER_MASK 0x0001FF00
-+#define _PSB_MMU_ER_HOST (1 << 16)
-+#define GPIOA 0x5010
-+#define GPIOB 0x5014
-+#define GPIOC 0x5018
-+#define GPIOD 0x501c
-+#define GPIOE 0x5020
-+#define GPIOF 0x5024
-+#define GPIOG 0x5028
-+#define GPIOH 0x502c
-+#define GPIO_CLOCK_DIR_MASK (1 << 0)
-+#define GPIO_CLOCK_DIR_IN (0 << 1)
-+#define GPIO_CLOCK_DIR_OUT (1 << 1)
-+#define GPIO_CLOCK_VAL_MASK (1 << 2)
-+#define GPIO_CLOCK_VAL_OUT (1 << 3)
-+#define GPIO_CLOCK_VAL_IN (1 << 4)
-+#define GPIO_CLOCK_PULLUP_DISABLE (1 << 5)
-+#define GPIO_DATA_DIR_MASK (1 << 8)
-+#define GPIO_DATA_DIR_IN (0 << 9)
-+#define GPIO_DATA_DIR_OUT (1 << 9)
-+#define GPIO_DATA_VAL_MASK (1 << 10)
-+#define GPIO_DATA_VAL_OUT (1 << 11)
-+#define GPIO_DATA_VAL_IN (1 << 12)
-+#define GPIO_DATA_PULLUP_DISABLE (1 << 13)
-+
-+#define VCLK_DIVISOR_VGA0 0x6000
-+#define VCLK_DIVISOR_VGA1 0x6004
-+#define VCLK_POST_DIV 0x6010
-+
-+#define I915_READ(reg) readl(dev_priv->common.regs + (reg))
-+#define I915_WRITE(reg, val) writel(val, dev_priv->common.regs + (reg))
-+
-+#define PSB_COMM_2D (PSB_ENGINE_2D << 4)
-+#define PSB_COMM_3D (PSB_ENGINE_3D << 4)
-+#define PSB_COMM_TA (PSB_ENGINE_TA << 4)
-+#define PSB_COMM_HP (PSB_ENGINE_HP << 4)
-+#define PSB_COMM_USER_IRQ (1024 >> 2)
-+#define PSB_COMM_USER_IRQ_LOST (PSB_COMM_USER_IRQ + 1)
-+#define PSB_COMM_FW (2048 >> 2)
-+
-+#define PSB_UIRQ_VISTEST 1
-+#define PSB_UIRQ_OOM_REPLY 2
-+#define PSB_UIRQ_FIRE_TA_REPLY 3
-+#define PSB_UIRQ_FIRE_RASTER_REPLY 4
-+
-+#define PSB_2D_SIZE (256*1024*1024)
-+#define PSB_MAX_RELOC_PAGES 1024
-+
-+#define PSB_LOW_REG_OFFS 0x0204
-+#define PSB_HIGH_REG_OFFS 0x0600
-+
-+#define PSB_NUM_VBLANKS 2
-+
-+#define PSB_COMM_2D (PSB_ENGINE_2D << 4)
-+#define PSB_COMM_3D (PSB_ENGINE_3D << 4)
-+#define PSB_COMM_TA (PSB_ENGINE_TA << 4)
-+#define PSB_COMM_HP (PSB_ENGINE_HP << 4)
-+#define PSB_COMM_FW (2048 >> 2)
-+
-+#define PSB_2D_SIZE (256*1024*1024)
-+#define PSB_MAX_RELOC_PAGES 1024
-+
-+#define PSB_LOW_REG_OFFS 0x0204
-+#define PSB_HIGH_REG_OFFS 0x0600
-+
-+#define PSB_NUM_VBLANKS 2
-+#define PSB_WATCHDOG_DELAY (DRM_HZ / 10)
-+
-+/*
-+ * User options.
-+ */
-+
-+
-+struct psb_gtt {
-+ struct drm_device *dev;
-+ int initialized;
-+ uint32_t gatt_start;
-+ uint32_t gtt_start;
-+ uint32_t gtt_phys_start;
-+ unsigned gtt_pages;
-+ unsigned gatt_pages;
-+ uint32_t stolen_base;
-+ uint32_t pge_ctl;
-+ u16 gmch_ctrl;
-+ unsigned long stolen_size;
-+ uint32_t *gtt_map;
-+ struct rw_semaphore sem;
-+};
-+
-+struct psb_use_base {
-+ struct list_head head;
-+ struct drm_fence_object *fence;
-+ unsigned int reg;
-+ unsigned long offset;
-+ unsigned int dm;
-+};
-+
-+struct psb_buflist_item;
-+
-+struct psb_msvdx_cmd_queue {
-+ struct list_head head;
-+ void *cmd;
-+ unsigned long cmd_size;
-+ uint32_t sequence;
-+};
-+
-+
-+struct psb_mmu_driver;
-+
-+extern struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers,
-+ int trap_pagefaults,
-+ int invalid_type,
-+ atomic_t *msvdx_mmu_invaldc);
-+extern void psb_mmu_driver_takedown(struct psb_mmu_driver *driver);
-+extern struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver *driver);
-+extern void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd, uint32_t mmu_offset,
-+ uint32_t gtt_start, uint32_t gtt_pages);
-+extern void psb_mmu_test(struct psb_mmu_driver *driver, uint32_t offset);
-+extern struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
-+ int trap_pagefaults,
-+ int invalid_type);
-+extern void psb_mmu_free_pagedir(struct psb_mmu_pd *pd);
-+extern void psb_mmu_flush(struct psb_mmu_driver *driver);
-+extern void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
-+ unsigned long address,
-+ uint32_t num_pages);
-+extern int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd,
-+ uint32_t start_pfn,
-+ unsigned long address,
-+ uint32_t num_pages, int type);
-+extern int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
-+ unsigned long *pfn);
-+
-+/*
-+ * Enable / disable MMU for different requestors.
-+ */
-+
-+extern void psb_mmu_enable_requestor(struct psb_mmu_driver *driver,
-+ uint32_t mask);
-+extern void psb_mmu_disable_requestor(struct psb_mmu_driver *driver,
-+ uint32_t mask);
-+extern void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context);
-+extern int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
-+ unsigned long address, uint32_t num_pages,
-+ uint32_t desired_tile_stride,
-+ uint32_t hw_tile_stride, int type);
-+extern void psb_mmu_remove_pages(struct psb_mmu_pd *pd, unsigned long address,
-+ uint32_t num_pages,
-+ uint32_t desired_tile_stride,
-+ uint32_t hw_tile_stride);
-+/*
-+ * psb_sgx.c
-+ */
-+
-+extern int psb_blit_sequence(struct drm_psb_private *dev_priv,
-+ uint32_t sequence);
-+extern void psb_init_2d(struct drm_psb_private *dev_priv);
-+extern int psb_idle_2d(struct drm_device *dev);
-+extern int psb_idle_3d(struct drm_device *dev);
-+extern int psb_emit_2d_copy_blit(struct drm_device *dev,
-+ uint32_t src_offset,
-+ uint32_t dst_offset, uint32_t pages,
-+ int direction);
-+extern int psb_cmdbuf_ioctl(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv);
-+extern int psb_reg_submit(struct drm_psb_private *dev_priv, uint32_t * regs,
-+ unsigned int cmds);
-+extern int psb_submit_copy_cmdbuf(struct drm_device *dev,
-+ struct drm_buffer_object *cmd_buffer,
-+ unsigned long cmd_offset,
-+ unsigned long cmd_size, int engine,
-+ uint32_t * copy_buffer);
-+extern void psb_fence_or_sync(struct drm_file *priv,
-+ int engine,
-+ struct drm_psb_cmdbuf_arg *arg,
-+ struct drm_fence_arg *fence_arg,
-+ struct drm_fence_object **fence_p);
-+extern void psb_init_disallowed(void);
-+
-+/*
-+ * psb_irq.c
-+ */
-+
-+extern u32 psb_get_vblank_counter(struct drm_device *dev, int pipe);
-+extern int psb_enable_vblank(struct drm_device *dev, int crtc);
-+extern void psb_disable_vblank(struct drm_device *dev, int crtc);
-+extern irqreturn_t psb_irq_handler(DRM_IRQ_ARGS);
-+extern void psb_irq_preinstall(struct drm_device *dev);
-+extern int psb_irq_postinstall(struct drm_device *dev);
-+extern void psb_irq_uninstall(struct drm_device *dev);
-+
-+/*
-+ * psb_fence.c
-+ */
-+
-+extern void psb_fence_handler(struct drm_device *dev, uint32_t class);
-+extern void psb_2D_irq_off(struct drm_psb_private *dev_priv);
-+extern void psb_2D_irq_on(struct drm_psb_private *dev_priv);
-+extern uint32_t psb_fence_advance_sequence(struct drm_device *dev,
-+ uint32_t class);
-+extern int psb_fence_emit_sequence(struct drm_device *dev, uint32_t fence_class,
-+ uint32_t flags, uint32_t * sequence,
-+ uint32_t * native_type);
-+extern void psb_fence_error(struct drm_device *dev,
-+ uint32_t class,
-+ uint32_t sequence, uint32_t type, int error);
-+
-+/*MSVDX stuff*/
-+extern void psb_msvdx_irq_off(struct drm_psb_private *dev_priv);
-+extern void psb_msvdx_irq_on(struct drm_psb_private *dev_priv);
-+extern int psb_hw_info_ioctl(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv);
-+
-+/*
-+ * psb_buffer.c
-+ */
-+extern struct drm_ttm_backend *drm_psb_tbe_init(struct drm_device *dev);
-+extern int psb_fence_types(struct drm_buffer_object *bo, uint32_t * class,
-+ uint32_t * type);
-+extern uint32_t psb_evict_mask(struct drm_buffer_object *bo);
-+extern int psb_invalidate_caches(struct drm_device *dev, uint64_t flags);
-+extern int psb_init_mem_type(struct drm_device *dev, uint32_t type,
-+ struct drm_mem_type_manager *man);
-+extern int psb_move(struct drm_buffer_object *bo,
-+ int evict, int no_wait, struct drm_bo_mem_reg *new_mem);
-+extern int psb_tbe_size(struct drm_device *dev, unsigned long num_pages);
-+
-+/*
-+ * psb_gtt.c
-+ */
-+extern int psb_gtt_init(struct psb_gtt *pg, int resume);
-+extern int psb_gtt_insert_pages(struct psb_gtt *pg, struct page **pages,
-+ unsigned offset_pages, unsigned num_pages,
-+ unsigned desired_tile_stride,
-+ unsigned hw_tile_stride, int type);
-+extern int psb_gtt_remove_pages(struct psb_gtt *pg, unsigned offset_pages,
-+ unsigned num_pages,
-+ unsigned desired_tile_stride,
-+ unsigned hw_tile_stride);
-+
-+extern struct psb_gtt *psb_gtt_alloc(struct drm_device *dev);
-+extern void psb_gtt_takedown(struct psb_gtt *pg, int free);
-+
-+/*
-+ * psb_fb.c
-+ */
-+extern int psbfb_probe(struct drm_device *dev, struct drm_crtc *crtc);
-+extern int psbfb_remove(struct drm_device *dev, struct drm_crtc *crtc);
-+extern int psbfb_kms_off_ioctl(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv);
-+extern int psbfb_kms_on_ioctl(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv);
-+extern void psbfb_suspend(struct drm_device *dev);
-+extern void psbfb_resume(struct drm_device *dev);
-+
-+/*
-+ * psb_reset.c
-+ */
-+
-+extern void psb_reset(struct drm_psb_private *dev_priv, int reset_2d);
-+extern void psb_schedule_watchdog(struct drm_psb_private *dev_priv);
-+extern void psb_watchdog_init(struct drm_psb_private *dev_priv);
-+extern void psb_watchdog_takedown(struct drm_psb_private *dev_priv);
-+extern void psb_print_pagefault(struct drm_psb_private *dev_priv);
-+
-+/*
-+ * psb_regman.c
-+ */
-+
-+extern void psb_takedown_use_base(struct drm_psb_private *dev_priv);
-+extern int psb_grab_use_base(struct drm_psb_private *dev_priv,
-+ unsigned long dev_virtual,
-+ unsigned long size,
-+ unsigned int data_master,
-+ uint32_t fence_class,
-+ uint32_t fence_type,
-+ int no_wait,
-+ int ignore_signals,
-+ int *r_reg, uint32_t * r_offset);
-+extern int psb_init_use_base(struct drm_psb_private *dev_priv,
-+ unsigned int reg_start, unsigned int reg_num);
-+
-+/*
-+ * psb_xhw.c
-+ */
-+
-+extern int psb_xhw_ioctl(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv);
-+extern int psb_xhw_init_ioctl(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv);
-+extern int psb_xhw_init(struct drm_device *dev);
-+extern void psb_xhw_takedown(struct drm_psb_private *dev_priv);
-+extern void psb_xhw_init_takedown(struct drm_psb_private *dev_priv,
-+ struct drm_file *file_priv, int closing);
-+extern int psb_xhw_scene_bind_fire(struct drm_psb_private *dev_priv,
-+ struct psb_xhw_buf *buf,
-+ uint32_t fire_flags,
-+ uint32_t hw_context,
-+ uint32_t * cookie,
-+ uint32_t * oom_cmds,
-+ uint32_t num_oom_cmds,
-+ uint32_t offset,
-+ uint32_t engine, uint32_t flags);
-+extern int psb_xhw_fire_raster(struct drm_psb_private *dev_priv,
-+ struct psb_xhw_buf *buf, uint32_t fire_flags);
-+extern int psb_xhw_scene_info(struct drm_psb_private *dev_priv,
-+ struct psb_xhw_buf *buf,
-+ uint32_t w,
-+ uint32_t h,
-+ uint32_t * hw_cookie,
-+ uint32_t * bo_size,
-+ uint32_t * clear_p_start,
-+ uint32_t * clear_num_pages);
-+
-+extern int psb_xhw_reset_dpm(struct drm_psb_private *dev_priv,
-+ struct psb_xhw_buf *buf);
-+extern int psb_xhw_check_lockup(struct drm_psb_private *dev_priv,
-+ struct psb_xhw_buf *buf, uint32_t * value);
-+extern int psb_xhw_ta_mem_info(struct drm_psb_private *dev_priv,
-+ struct psb_xhw_buf *buf,
-+ uint32_t pages,
-+ uint32_t * hw_cookie, uint32_t * size);
-+extern int psb_xhw_ta_oom(struct drm_psb_private *dev_priv,
-+ struct psb_xhw_buf *buf, uint32_t * cookie);
-+extern void psb_xhw_ta_oom_reply(struct drm_psb_private *dev_priv,
-+ struct psb_xhw_buf *buf,
-+ uint32_t * cookie,
-+ uint32_t * bca,
-+ uint32_t * rca, uint32_t * flags);
-+extern int psb_xhw_vistest(struct drm_psb_private *dev_priv,
-+ struct psb_xhw_buf *buf);
-+extern int psb_xhw_handler(struct drm_psb_private *dev_priv);
-+extern int psb_xhw_resume(struct drm_psb_private *dev_priv,
-+ struct psb_xhw_buf *buf);
-+extern void psb_xhw_fire_reply(struct drm_psb_private *dev_priv,
-+ struct psb_xhw_buf *buf, uint32_t * cookie);
-+extern int psb_xhw_ta_mem_load(struct drm_psb_private *dev_priv,
-+ struct psb_xhw_buf *buf,
-+ uint32_t flags,
-+ uint32_t param_offset,
-+ uint32_t pt_offset, uint32_t * hw_cookie);
-+extern void psb_xhw_clean_buf(struct drm_psb_private *dev_priv,
-+ struct psb_xhw_buf *buf);
-+
-+extern void psb_i2c_init(struct drm_psb_private *dev_priv);
-+
-+/*
-+ * psb_schedule.c: HW bug fixing.
-+ */
-+
-+#ifdef FIX_TG_16
-+
-+extern void psb_2d_unlock(struct drm_psb_private *dev_priv);
-+extern void psb_2d_lock(struct drm_psb_private *dev_priv);
-+extern void psb_resume_ta_2d_idle(struct drm_psb_private *dev_priv);
-+
-+#else
-+
-+#define psb_2d_lock(_dev_priv) mutex_lock(&(_dev_priv)->mutex_2d)
-+#define psb_2d_unlock(_dev_priv) mutex_unlock(&(_dev_priv)->mutex_2d)
-+
-+#endif
-+
-+/*
-+ * Utilities
-+ */
-+
-+#define PSB_WVDC32(_val, _offs) I915_WRITE(_offs, _val)
-+#define PSB_RVDC32(_offs) I915_READ(_offs)
-+
-+#define PSB_ALIGN_TO(_val, _align) \
-+ (((_val) + ((_align) - 1)) & ~((_align) - 1))
-+#define PSB_WSGX32(_val, _offs) \
-+ iowrite32(_val, dev_priv->sgx_reg + (_offs))
-+#define PSB_RSGX32(_offs) \
-+ ioread32(dev_priv->sgx_reg + (_offs))
-+#define PSB_WMSVDX32(_val, _offs) \
-+ iowrite32(_val, dev_priv->msvdx_reg + (_offs))
-+#define PSB_RMSVDX32(_offs) \
-+ ioread32(dev_priv->msvdx_reg + (_offs))
-+
-+#define PSB_ALPL(_val, _base) \
-+ (((_val) >> (_base ## _ALIGNSHIFT)) << (_base ## _SHIFT))
-+#define PSB_ALPLM(_val, _base) \
-+ ((((_val) >> (_base ## _ALIGNSHIFT)) << (_base ## _SHIFT)) & (_base ## _MASK))
-+
-+#define PSB_D_RENDER (1 << 16)
-+
-+#define PSB_D_GENERAL (1 << 0)
-+#define PSB_D_INIT (1 << 1)
-+#define PSB_D_IRQ (1 << 2)
-+#define PSB_D_FW (1 << 3)
-+#define PSB_D_PERF (1 << 4)
-+#define PSB_D_TMP (1 << 5)
-+#define PSB_D_RELOC (1 << 6)
-+
-+extern int drm_psb_debug;
-+extern int drm_psb_no_fb;
-+extern int drm_psb_disable_vsync;
-+
-+#define PSB_DEBUG_FW(_fmt, _arg...) \
-+ PSB_DEBUG(PSB_D_FW, _fmt, ##_arg)
-+#define PSB_DEBUG_GENERAL(_fmt, _arg...) \
-+ PSB_DEBUG(PSB_D_GENERAL, _fmt, ##_arg)
-+#define PSB_DEBUG_INIT(_fmt, _arg...) \
-+ PSB_DEBUG(PSB_D_INIT, _fmt, ##_arg)
-+#define PSB_DEBUG_IRQ(_fmt, _arg...) \
-+ PSB_DEBUG(PSB_D_IRQ, _fmt, ##_arg)
-+#define PSB_DEBUG_RENDER(_fmt, _arg...) \
-+ PSB_DEBUG(PSB_D_RENDER, _fmt, ##_arg)
-+#define PSB_DEBUG_PERF(_fmt, _arg...) \
-+ PSB_DEBUG(PSB_D_PERF, _fmt, ##_arg)
-+#define PSB_DEBUG_TMP(_fmt, _arg...) \
-+ PSB_DEBUG(PSB_D_TMP, _fmt, ##_arg)
-+#define PSB_DEBUG_RELOC(_fmt, _arg...) \
-+ PSB_DEBUG(PSB_D_RELOC, _fmt, ##_arg)
-+
-+#if DRM_DEBUG_CODE
-+#define PSB_DEBUG(_flag, _fmt, _arg...) \
-+ do { \
-+ if (unlikely((_flag) & drm_psb_debug)) \
-+ printk(KERN_DEBUG \
-+ "[psb:0x%02x:%s] " _fmt , _flag, \
-+ __FUNCTION__ , ##_arg); \
-+ } while (0)
-+#else
-+#define PSB_DEBUG(_fmt, _arg...) do { } while (0)
-+#endif
-+
-+#endif
-Index: linux-2.6.28/drivers/gpu/drm/psb/psb_fb.c
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.28/drivers/gpu/drm/psb/psb_fb.c 2009-02-20 12:23:06.000000000 +0000
-@@ -0,0 +1,1219 @@
-+/**************************************************************************
-+ * Copyright (c) 2007, Intel Corporation.
-+ * All Rights Reserved.
-+ *
-+ * This program is free software; you can redistribute it and/or modify it
-+ * under the terms and conditions of the GNU General Public License,
-+ * version 2, as published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope it will be useful, but WITHOUT
-+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-+ * more details.
-+ *
-+ * You should have received a copy of the GNU General Public License along with
-+ * this program; if not, write to the Free Software Foundation, Inc.,
-+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-+ *
-+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
-+ * develop this driver.
-+ *
-+ **************************************************************************/
-+
-+#include <linux/module.h>
-+#include <linux/kernel.h>
-+#include <linux/errno.h>
-+#include <linux/string.h>
-+#include <linux/mm.h>
-+#include <linux/tty.h>
-+#include <linux/slab.h>
-+#include <linux/delay.h>
-+#include <linux/fb.h>
-+#include <linux/init.h>
-+#include <linux/console.h>
-+
-+#include "drmP.h"
-+#include "drm.h"
-+#include "drm_crtc.h"
-+#include "psb_drv.h"
-+
-+struct psbfb_vm_info {
-+ struct drm_buffer_object *bo;
-+ struct address_space *f_mapping;
-+ struct mutex vm_mutex;
-+ atomic_t refcount;
-+};
-+
-+struct psbfb_par {
-+ struct drm_device *dev;
-+ struct drm_crtc *crtc;
-+ struct drm_connector *output;
-+ struct psbfb_vm_info *vi;
-+ int dpms_state;
-+};
-+
-+static void psbfb_vm_info_deref(struct psbfb_vm_info **vi)
-+{
-+ struct psbfb_vm_info *tmp = *vi;
-+ *vi = NULL;
-+ if (atomic_dec_and_test(&tmp->refcount)) {
-+ drm_bo_usage_deref_unlocked(&tmp->bo);
-+ drm_free(tmp, sizeof(*tmp), DRM_MEM_MAPS);
-+ }
-+}
-+
-+static struct psbfb_vm_info *psbfb_vm_info_ref(struct psbfb_vm_info *vi)
-+{
-+ atomic_inc(&vi->refcount);
-+ return vi;
-+}
-+
-+static struct psbfb_vm_info *psbfb_vm_info_create(void)
-+{
-+ struct psbfb_vm_info *vi;
-+
-+ vi = drm_calloc(1, sizeof(*vi), DRM_MEM_MAPS);
-+ if (!vi)
-+ return NULL;
-+
-+ mutex_init(&vi->vm_mutex);
-+ atomic_set(&vi->refcount, 1);
-+ return vi;
-+}
-+
-+#define CMAP_TOHW(_val, _width) ((((_val) << (_width)) + 0x7FFF - (_val)) >> 16)
-+
-+static int psbfb_setcolreg(unsigned regno, unsigned red, unsigned green,
-+ unsigned blue, unsigned transp, struct fb_info *info)
-+{
-+ struct psbfb_par *par = info->par;
-+ struct drm_crtc *crtc = par->crtc;
-+ uint32_t v;
-+
-+ if (!crtc->fb)
-+ return -ENOMEM;
-+
-+ if (regno > 255)
-+ return 1;
-+
-+ if (crtc->fb->depth == 8) {
-+ intel_crtc_fb_gamma_set(crtc, red, green, blue, regno);
-+ return 0;
-+ }
-+
-+ red = CMAP_TOHW(red, info->var.red.length);
-+ blue = CMAP_TOHW(blue, info->var.blue.length);
-+ green = CMAP_TOHW(green, info->var.green.length);
-+ transp = CMAP_TOHW(transp, info->var.transp.length);
-+
-+ v = (red << info->var.red.offset) |
-+ (green << info->var.green.offset) |
-+ (blue << info->var.blue.offset) |
-+ (transp << info->var.transp.offset);
-+
-+ switch (crtc->fb->bits_per_pixel) {
-+ case 16:
-+ ((uint32_t *) info->pseudo_palette)[regno] = v;
-+ break;
-+ case 24:
-+ case 32:
-+ ((uint32_t *) info->pseudo_palette)[regno] = v;
-+ break;
-+ }
-+
-+ return 0;
-+}
-+
-+static int psbfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
-+{
-+ struct psbfb_par *par = info->par;
-+ //struct drm_device *dev = par->dev;
-+ struct drm_framebuffer *fb = par->crtc->fb;
-+ //struct drm_display_mode *drm_mode;
-+ //struct drm_connector *output;
-+ int depth;
-+ int pitch;
-+ int bpp = var->bits_per_pixel;
-+
-+ if (!fb)
-+ return -ENOMEM;
-+
-+ if (!var->pixclock)
-+ return -EINVAL;
-+
-+ /* don't support virtuals for now */
-+ if (var->xres_virtual > var->xres)
-+ return -EINVAL;
-+
-+ if (var->yres_virtual > var->yres)
-+ return -EINVAL;
-+
-+ switch (bpp) {
-+ case 8:
-+ depth = 8;
-+ break;
-+ case 16:
-+ depth = (var->green.length == 6) ? 16 : 15;
-+ break;
-+ case 24: /* assume this is 32bpp / depth 24 */
-+ bpp = 32;
-+ /* fallthrough */
-+ case 32:
-+ depth = (var->transp.length > 0) ? 32 : 24;
-+ break;
-+ default:
-+ return -EINVAL;
-+ }
-+
-+ pitch = ((var->xres * ((bpp + 1) / 8)) + 0x3f) & ~0x3f;
-+
-+ /* Check that we can resize */
-+ if ((pitch * var->yres) > (fb->bo->num_pages << PAGE_SHIFT)) {
-+ /* Need to resize the fb object.
-+ * But the generic fbdev code doesn't really understand
-+ * that we can do this. So disable for now.
-+ */
-+ DRM_INFO("Can't support requested size, too big!\n");
-+ return -EINVAL;
-+ }
-+
-+ switch (depth) {
-+ case 8:
-+ var->red.offset = 0;
-+ var->green.offset = 0;
-+ var->blue.offset = 0;
-+ var->red.length = 8;
-+ var->green.length = 8;
-+ var->blue.length = 8;
-+ var->transp.length = 0;
-+ var->transp.offset = 0;
-+ break;
-+ case 15:
-+ var->red.offset = 10;
-+ var->green.offset = 5;
-+ var->blue.offset = 0;
-+ var->red.length = 5;
-+ var->green.length = 5;
-+ var->blue.length = 5;
-+ var->transp.length = 1;
-+ var->transp.offset = 15;
-+ break;
-+ case 16:
-+ var->red.offset = 11;
-+ var->green.offset = 5;
-+ var->blue.offset = 0;
-+ var->red.length = 5;
-+ var->green.length = 6;
-+ var->blue.length = 5;
-+ var->transp.length = 0;
-+ var->transp.offset = 0;
-+ break;
-+ case 24:
-+ var->red.offset = 16;
-+ var->green.offset = 8;
-+ var->blue.offset = 0;
-+ var->red.length = 8;
-+ var->green.length = 8;
-+ var->blue.length = 8;
-+ var->transp.length = 0;
-+ var->transp.offset = 0;
-+ break;
-+ case 32:
-+ var->red.offset = 16;
-+ var->green.offset = 8;
-+ var->blue.offset = 0;
-+ var->red.length = 8;
-+ var->green.length = 8;
-+ var->blue.length = 8;
-+ var->transp.length = 8;
-+ var->transp.offset = 24;
-+ break;
-+ default:
-+ return -EINVAL;
-+ }
-+
-+ return 0;
-+}
-+
-+static int psbfb_move_fb_bo(struct fb_info *info, struct drm_buffer_object *bo,
-+ uint64_t mem_type_flags)
-+{
-+ struct psbfb_par *par;
-+ loff_t holelen;
-+ int ret;
-+
-+ /*
-+ * Kill all user-space mappings of this device. They will be
-+ * faulted back using nopfn when accessed.
-+ */
-+
-+ par = info->par;
-+ holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
-+ mutex_lock(&par->vi->vm_mutex);
-+ if (par->vi->f_mapping) {
-+ unmap_mapping_range(par->vi->f_mapping, 0, holelen, 1);
-+ }
-+
-+ ret = drm_bo_do_validate(bo,
-+ mem_type_flags,
-+ DRM_BO_MASK_MEM |
-+ DRM_BO_FLAG_NO_EVICT,
-+ DRM_BO_HINT_DONT_FENCE, 0, 1, NULL);
-+
-+ mutex_unlock(&par->vi->vm_mutex);
-+ return ret;
-+}
-+
-+/* this will let fbcon do the mode init */
-+static int psbfb_set_par(struct fb_info *info)
-+{
-+ struct psbfb_par *par = info->par;
-+ struct drm_framebuffer *fb = par->crtc->fb;
-+ struct drm_device *dev = par->dev;
-+ struct drm_display_mode *drm_mode;
-+ struct fb_var_screeninfo *var = &info->var;
-+ struct drm_psb_private *dev_priv = dev->dev_private;
-+ //struct drm_connector *output;
-+ int pitch;
-+ int depth;
-+ int bpp = var->bits_per_pixel;
-+
-+ if (!fb)
-+ return -ENOMEM;
-+
-+ switch (bpp) {
-+ case 8:
-+ depth = 8;
-+ break;
-+ case 16:
-+ depth = (var->green.length == 6) ? 16 : 15;
-+ break;
-+ case 24: /* assume this is 32bpp / depth 24 */
-+ bpp = 32;
-+ /* fallthrough */
-+ case 32:
-+ depth = (var->transp.length > 0) ? 32 : 24;
-+ break;
-+ default:
-+ return -EINVAL;
-+ }
-+
-+ pitch = ((var->xres * ((bpp + 1) / 8)) + 0x3f) & ~0x3f;
-+
-+ if ((pitch * var->yres) > (fb->bo->num_pages << PAGE_SHIFT)) {
-+ /* Need to resize the fb object.
-+ * But the generic fbdev code doesn't really understand
-+ * that we can do this. So disable for now.
-+ */
-+ DRM_INFO("Can't support requested size, too big!\n");
-+ return -EINVAL;
-+ }
-+
-+ fb->offset = fb->bo->offset - dev_priv->pg->gatt_start;
-+ fb->width = var->xres;
-+ fb->height = var->yres;
-+ fb->bits_per_pixel = bpp;
-+ fb->pitch = pitch;
-+ fb->depth = depth;
-+
-+ info->fix.line_length = fb->pitch;
-+ info->fix.visual =
-+ (fb->depth == 8) ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_DIRECTCOLOR;
-+
-+ /* some fbdev's apps don't want these to change */
-+ info->fix.smem_start = dev->mode_config.fb_base + fb->offset;
-+
-+ /* we have to align the output base address because the fb->bo
-+ may be moved in the previous drm_bo_do_validate().
-+ Otherwise the output screens may go black when exit the X
-+ window and re-enter the console */
-+ info->screen_base = fb->kmap.virtual;
-+
-+ /* Should we walk the output's modelist or just create our own ???
-+ * For now, we create and destroy a mode based on the incoming
-+ * parameters. But there's commented out code below which scans
-+ * the output list too.
-+ */
-+
-+ drm_mode = drm_mode_create(dev);
-+ drm_mode->hdisplay = var->xres;
-+ drm_mode->hsync_start = drm_mode->hdisplay + var->right_margin;
-+ drm_mode->hsync_end = drm_mode->hsync_start + var->hsync_len;
-+ drm_mode->htotal = drm_mode->hsync_end + var->left_margin;
-+ drm_mode->vdisplay = var->yres;
-+ drm_mode->vsync_start = drm_mode->vdisplay + var->lower_margin;
-+ drm_mode->vsync_end = drm_mode->vsync_start + var->vsync_len;
-+ drm_mode->vtotal = drm_mode->vsync_end + var->upper_margin;
-+ drm_mode->clock = PICOS2KHZ(var->pixclock);
-+ drm_mode->vrefresh = drm_mode_vrefresh(drm_mode);
-+ drm_mode_set_name(drm_mode);
-+ drm_mode_set_crtcinfo(drm_mode, CRTC_INTERLACE_HALVE_V);
-+
-+
-+ if (!drm_crtc_helper_set_mode(par->crtc, drm_mode, 0, 0, NULL))
-+ return -EINVAL;
-+
-+ /* Have to destroy our created mode if we're not searching the mode
-+ * list for it.
-+ */
-+ drm_mode_destroy(dev, drm_mode);
-+
-+ return 0;
-+}
-+
-+extern int psb_2d_submit(struct drm_psb_private *, uint32_t *, uint32_t);;
-+
-+static int psb_accel_2d_fillrect(struct drm_psb_private *dev_priv,
-+ uint32_t dst_offset, uint32_t dst_stride,
-+ uint32_t dst_format, uint16_t dst_x,
-+ uint16_t dst_y, uint16_t size_x,
-+ uint16_t size_y, uint32_t fill)
-+{
-+ uint32_t buffer[10];
-+ uint32_t *buf;
-+ int ret;
-+
-+ buf = buffer;
-+
-+ *buf++ = PSB_2D_FENCE_BH;
-+
-+ *buf++ =
-+ PSB_2D_DST_SURF_BH | dst_format | (dst_stride <<
-+ PSB_2D_DST_STRIDE_SHIFT);
-+ *buf++ = dst_offset;
-+
-+ *buf++ =
-+ PSB_2D_BLIT_BH |
-+ PSB_2D_ROT_NONE |
-+ PSB_2D_COPYORDER_TL2BR |
-+ PSB_2D_DSTCK_DISABLE |
-+ PSB_2D_SRCCK_DISABLE | PSB_2D_USE_FILL | PSB_2D_ROP3_PATCOPY;
-+
-+ *buf++ = fill << PSB_2D_FILLCOLOUR_SHIFT;
-+ *buf++ =
-+ (dst_x << PSB_2D_DST_XSTART_SHIFT) | (dst_y <<
-+ PSB_2D_DST_YSTART_SHIFT);
-+ *buf++ =
-+ (size_x << PSB_2D_DST_XSIZE_SHIFT) | (size_y <<
-+ PSB_2D_DST_YSIZE_SHIFT);
-+ *buf++ = PSB_2D_FLUSH_BH;
-+
-+ psb_2d_lock(dev_priv);
-+ ret = psb_2d_submit(dev_priv, buffer, buf - buffer);
-+ psb_2d_unlock(dev_priv);
-+
-+ return ret;
-+}
-+
-+static void psbfb_fillrect_accel(struct fb_info *info,
-+ const struct fb_fillrect *r)
-+{
-+ struct psbfb_par *par = info->par;
-+ struct drm_framebuffer *fb = par->crtc->fb;
-+ struct drm_psb_private *dev_priv = par->dev->dev_private;
-+ uint32_t offset;
-+ uint32_t stride;
-+ uint32_t format;
-+
-+ if (!fb)
-+ return;
-+
-+ offset = fb->offset;
-+ stride = fb->pitch;
-+
-+ switch (fb->depth) {
-+ case 8:
-+ format = PSB_2D_DST_332RGB;
-+ break;
-+ case 15:
-+ format = PSB_2D_DST_555RGB;
-+ break;
-+ case 16:
-+ format = PSB_2D_DST_565RGB;
-+ break;
-+ case 24:
-+ case 32:
-+ /* this is wrong but since we don't do blending its okay */
-+ format = PSB_2D_DST_8888ARGB;
-+ break;
-+ default:
-+ /* software fallback */
-+ cfb_fillrect(info, r);
-+ return;
-+ }
-+
-+ psb_accel_2d_fillrect(dev_priv,
-+ offset, stride, format,
-+ r->dx, r->dy, r->width, r->height, r->color);
-+}
-+
-+static void psbfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
-+{
-+ if (info->state != FBINFO_STATE_RUNNING)
-+ return;
-+ if (info->flags & FBINFO_HWACCEL_DISABLED) {
-+ cfb_fillrect(info, rect);
-+ return;
-+ }
-+ if (in_interrupt() || in_atomic()) {
-+ /*
-+ * Catch case when we're shutting down.
-+ */
-+ cfb_fillrect(info, rect);
-+ return;
-+ }
-+ psbfb_fillrect_accel(info, rect);
-+}
-+
-+uint32_t psb_accel_2d_copy_direction(int xdir, int ydir)
-+{
-+ if (xdir < 0)
-+ return ((ydir <
-+ 0) ? PSB_2D_COPYORDER_BR2TL : PSB_2D_COPYORDER_TR2BL);
-+ else
-+ return ((ydir <
-+ 0) ? PSB_2D_COPYORDER_BL2TR : PSB_2D_COPYORDER_TL2BR);
-+}
-+
-+/*
-+ * @srcOffset in bytes
-+ * @srcStride in bytes
-+ * @srcFormat psb 2D format defines
-+ * @dstOffset in bytes
-+ * @dstStride in bytes
-+ * @dstFormat psb 2D format defines
-+ * @srcX offset in pixels
-+ * @srcY offset in pixels
-+ * @dstX offset in pixels
-+ * @dstY offset in pixels
-+ * @sizeX of the copied area
-+ * @sizeY of the copied area
-+ */
-+static int psb_accel_2d_copy(struct drm_psb_private *dev_priv,
-+ uint32_t src_offset, uint32_t src_stride,
-+ uint32_t src_format, uint32_t dst_offset,
-+ uint32_t dst_stride, uint32_t dst_format,
-+ uint16_t src_x, uint16_t src_y, uint16_t dst_x,
-+ uint16_t dst_y, uint16_t size_x, uint16_t size_y)
-+{
-+ uint32_t blit_cmd;
-+ uint32_t buffer[10];
-+ uint32_t *buf;
-+ uint32_t direction;
-+ int ret;
-+
-+ buf = buffer;
-+
-+ direction = psb_accel_2d_copy_direction(src_x - dst_x, src_y - dst_y);
-+
-+ if (direction == PSB_2D_COPYORDER_BR2TL ||
-+ direction == PSB_2D_COPYORDER_TR2BL) {
-+ src_x += size_x - 1;
-+ dst_x += size_x - 1;
-+ }
-+ if (direction == PSB_2D_COPYORDER_BR2TL ||
-+ direction == PSB_2D_COPYORDER_BL2TR) {
-+ src_y += size_y - 1;
-+ dst_y += size_y - 1;
-+ }
-+
-+ blit_cmd =
-+ PSB_2D_BLIT_BH |
-+ PSB_2D_ROT_NONE |
-+ PSB_2D_DSTCK_DISABLE |
-+ PSB_2D_SRCCK_DISABLE |
-+ PSB_2D_USE_PAT | PSB_2D_ROP3_SRCCOPY | direction;
-+
-+ *buf++ = PSB_2D_FENCE_BH;
-+ *buf++ =
-+ PSB_2D_DST_SURF_BH | dst_format | (dst_stride <<
-+ PSB_2D_DST_STRIDE_SHIFT);
-+ *buf++ = dst_offset;
-+ *buf++ =
-+ PSB_2D_SRC_SURF_BH | src_format | (src_stride <<
-+ PSB_2D_SRC_STRIDE_SHIFT);
-+ *buf++ = src_offset;
-+ *buf++ =
-+ PSB_2D_SRC_OFF_BH | (src_x << PSB_2D_SRCOFF_XSTART_SHIFT) | (src_y
-+ <<
-+ PSB_2D_SRCOFF_YSTART_SHIFT);
-+ *buf++ = blit_cmd;
-+ *buf++ =
-+ (dst_x << PSB_2D_DST_XSTART_SHIFT) | (dst_y <<
-+ PSB_2D_DST_YSTART_SHIFT);
-+ *buf++ =
-+ (size_x << PSB_2D_DST_XSIZE_SHIFT) | (size_y <<
-+ PSB_2D_DST_YSIZE_SHIFT);
-+ *buf++ = PSB_2D_FLUSH_BH;
-+
-+ psb_2d_lock(dev_priv);
-+ ret = psb_2d_submit(dev_priv, buffer, buf - buffer);
-+ psb_2d_unlock(dev_priv);
-+ return ret;
-+}
-+
-+static void psbfb_copyarea_accel(struct fb_info *info,
-+ const struct fb_copyarea *a)
-+{
-+ struct psbfb_par *par = info->par;
-+ struct drm_framebuffer *fb = par->crtc->fb;
-+ struct drm_psb_private *dev_priv = par->dev->dev_private;
-+ uint32_t offset;
-+ uint32_t stride;
-+ uint32_t src_format;
-+ uint32_t dst_format;
-+
-+ if (!fb)
-+ return;
-+
-+ offset = fb->offset;
-+ stride = fb->pitch;
-+
-+ if (a->width == 8 || a->height == 8) {
-+ psb_2d_lock(dev_priv);
-+ psb_idle_2d(par->dev);
-+ psb_2d_unlock(dev_priv);
-+ cfb_copyarea(info, a);
-+ return;
-+ }
-+
-+ switch (fb->depth) {
-+ case 8:
-+ src_format = PSB_2D_SRC_332RGB;
-+ dst_format = PSB_2D_DST_332RGB;
-+ break;
-+ case 15:
-+ src_format = PSB_2D_SRC_555RGB;
-+ dst_format = PSB_2D_DST_555RGB;
-+ break;
-+ case 16:
-+ src_format = PSB_2D_SRC_565RGB;
-+ dst_format = PSB_2D_DST_565RGB;
-+ break;
-+ case 24:
-+ case 32:
-+ /* this is wrong but since we don't do blending its okay */
-+ src_format = PSB_2D_SRC_8888ARGB;
-+ dst_format = PSB_2D_DST_8888ARGB;
-+ break;
-+ default:
-+ /* software fallback */
-+ cfb_copyarea(info, a);
-+ return;
-+ }
-+
-+ psb_accel_2d_copy(dev_priv,
-+ offset, stride, src_format,
-+ offset, stride, dst_format,
-+ a->sx, a->sy, a->dx, a->dy, a->width, a->height);
-+}
-+
-+static void psbfb_copyarea(struct fb_info *info,
-+ const struct fb_copyarea *region)
-+{
-+ if (info->state != FBINFO_STATE_RUNNING)
-+ return;
-+ if (info->flags & FBINFO_HWACCEL_DISABLED) {
-+ cfb_copyarea(info, region);
-+ return;
-+ }
-+ if (in_interrupt() || in_atomic()) {
-+ /*
-+ * Catch case when we're shutting down.
-+ */
-+ cfb_copyarea(info, region);
-+ return;
-+ }
-+
-+ psbfb_copyarea_accel(info, region);
-+}
-+
-+void psbfb_imageblit(struct fb_info *info, const struct fb_image *image)
-+{
-+ if (info->state != FBINFO_STATE_RUNNING)
-+ return;
-+ if (info->flags & FBINFO_HWACCEL_DISABLED) {
-+ cfb_imageblit(info, image);
-+ return;
-+ }
-+ if (in_interrupt() || in_atomic()) {
-+ cfb_imageblit(info, image);
-+ return;
-+ }
-+
-+ cfb_imageblit(info, image);
-+}
-+
-+static int psbfb_blank(int blank_mode, struct fb_info *info)
-+{
-+ int dpms_mode;
-+ struct psbfb_par *par = info->par;
-+ struct drm_connector *output;
-+ struct drm_crtc_helper_funcs *crtc_funcs;
-+
-+ par->dpms_state = blank_mode;
-+
-+ switch(blank_mode) {
-+ case FB_BLANK_UNBLANK:
-+ dpms_mode = DRM_MODE_DPMS_ON;
-+ break;
-+ case FB_BLANK_NORMAL:
-+ if (!par->crtc)
-+ return 0;
-+ crtc_funcs = par->crtc->helper_private;
-+
-+ (*crtc_funcs->dpms)(par->crtc, DRM_MODE_DPMS_STANDBY);
-+ return 0;
-+ case FB_BLANK_HSYNC_SUSPEND:
-+ default:
-+ dpms_mode = DRM_MODE_DPMS_STANDBY;
-+ break;
-+ case FB_BLANK_VSYNC_SUSPEND:
-+ dpms_mode = DRM_MODE_DPMS_SUSPEND;
-+ break;
-+ case FB_BLANK_POWERDOWN:
-+ dpms_mode = DRM_MODE_DPMS_OFF;
-+ break;
-+ }
-+
-+ if (!par->crtc)
-+ return 0;
-+
-+ crtc_funcs = par->crtc->helper_private;
-+
-+ list_for_each_entry(output, &par->dev->mode_config.connector_list, head) {
-+ if (output->encoder->crtc == par->crtc)
-+ (*output->funcs->dpms)(output, dpms_mode);
-+ }
-+
-+ return 0;
-+}
-+
-+
-+static int psbfb_kms_off(struct drm_device *dev, int suspend)
-+{
-+ struct drm_framebuffer *fb = 0;
-+ struct drm_buffer_object *bo = 0;
-+ struct drm_psb_private *dev_priv = dev->dev_private;
-+ int ret = 0;
-+
-+ DRM_DEBUG("psbfb_kms_off_ioctl\n");
-+
-+ mutex_lock(&dev->mode_config.mutex);
-+ list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
-+ struct fb_info *info = fb->fbdev;
-+ struct psbfb_par *par = info->par;
-+ int save_dpms_state;
-+
-+ if (suspend)
-+ fb_set_suspend(info, 1);
-+ else
-+ info->state &= ~FBINFO_STATE_RUNNING;
-+
-+ info->screen_base = NULL;
-+
-+ bo = fb->bo;
-+
-+ if (!bo)
-+ continue;
-+
-+ drm_bo_kunmap(&fb->kmap);
-+
-+ /*
-+ * We don't take the 2D lock here as we assume that the
-+ * 2D engine will eventually idle anyway.
-+ */
-+
-+ if (!suspend) {
-+ uint32_t dummy2 = 0;
-+ (void) psb_fence_emit_sequence(dev, PSB_ENGINE_2D, 0,
-+ &dummy2, &dummy2);
-+ psb_2d_lock(dev_priv);
-+ (void)psb_idle_2d(dev);
-+ psb_2d_unlock(dev_priv);
-+ } else
-+ psb_idle_2d(dev);
-+
-+ save_dpms_state = par->dpms_state;
-+ psbfb_blank(FB_BLANK_NORMAL, info);
-+ par->dpms_state = save_dpms_state;
-+
-+ ret = psbfb_move_fb_bo(info, bo, DRM_BO_FLAG_MEM_LOCAL);
-+
-+ if (ret)
-+ goto out_err;
-+ }
-+ out_err:
-+ mutex_unlock(&dev->mode_config.mutex);
-+
-+ return ret;
-+}
-+
-+int psbfb_kms_off_ioctl(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv)
-+{
-+ int ret;
-+
-+ acquire_console_sem();
-+ ret = psbfb_kms_off(dev, 0);
-+ release_console_sem();
-+
-+ return ret;
-+}
-+
-+static int psbfb_kms_on(struct drm_device *dev, int resume)
-+{
-+ struct drm_framebuffer *fb = 0;
-+ struct drm_buffer_object *bo = 0;
-+ struct drm_psb_private *dev_priv = dev->dev_private;
-+ int ret = 0;
-+ int dummy;
-+
-+ DRM_DEBUG("psbfb_kms_on_ioctl\n");
-+
-+ if (!resume) {
-+ uint32_t dummy2 = 0;
-+ (void) psb_fence_emit_sequence(dev, PSB_ENGINE_2D, 0,
-+ &dummy2, &dummy2);
-+ psb_2d_lock(dev_priv);
-+ (void)psb_idle_2d(dev);
-+ psb_2d_unlock(dev_priv);
-+ } else
-+ psb_idle_2d(dev);
-+
-+ mutex_lock(&dev->mode_config.mutex);
-+ list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
-+ struct fb_info *info = fb->fbdev;
-+ struct psbfb_par *par = info->par;
-+
-+ bo = fb->bo;
-+ if (!bo)
-+ continue;
-+
-+ ret = psbfb_move_fb_bo(info, bo,
-+ DRM_BO_FLAG_MEM_TT |
-+ DRM_BO_FLAG_MEM_VRAM |
-+ DRM_BO_FLAG_NO_EVICT);
-+ if (ret)
-+ goto out_err;
-+
-+ ret = drm_bo_kmap(bo, 0, bo->num_pages, &fb->kmap);
-+ if (ret)
-+ goto out_err;
-+
-+ info->screen_base = drm_bmo_virtual(&fb->kmap, &dummy);
-+ fb->offset = bo->offset - dev_priv->pg->gatt_start;
-+
-+ if (ret)
-+ goto out_err;
-+
-+ if (resume)
-+ fb_set_suspend(info, 0);
-+ else
-+ info->state |= FBINFO_STATE_RUNNING;
-+
-+ /*
-+ * Re-run modesetting here, since the VDS scanout offset may
-+ * have changed.
-+ */
-+
-+ if (par->crtc->enabled) {
-+ psbfb_set_par(info);
-+ psbfb_blank(par->dpms_state, info);
-+ }
-+ }
-+ out_err:
-+ mutex_unlock(&dev->mode_config.mutex);
-+
-+ return ret;
-+}
-+
-+int psbfb_kms_on_ioctl(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv)
-+{
-+ int ret;
-+
-+ acquire_console_sem();
-+ ret = psbfb_kms_on(dev, 0);
-+ release_console_sem();
-+
-+ drm_helper_disable_unused_functions(dev);
-+
-+ return ret;
-+}
-+
-+void psbfb_suspend(struct drm_device *dev)
-+{
-+ acquire_console_sem();
-+ psbfb_kms_off(dev, 1);
-+ release_console_sem();
-+}
-+
-+void psbfb_resume(struct drm_device *dev)
-+{
-+ acquire_console_sem();
-+ psbfb_kms_on(dev, 1);
-+ release_console_sem();
-+
-+ drm_helper_disable_unused_functions(dev);
-+}
-+
-+/*
-+ * FIXME: Before kernel inclusion, migrate nopfn to fault.
-+ * Also, these should be the default vm ops for buffer object type fbs.
-+ */
-+
-+extern unsigned long drm_bo_vm_fault(struct vm_area_struct *vma,
-+ struct vm_fault *vmf);
-+
-+/*
-+ * This wrapper is a bit ugly and is here because we need access to a mutex
-+ * that we can lock both around nopfn and around unmap_mapping_range + move.
-+ * Normally, this would've been done using the bo mutex, but unfortunately
-+ * we cannot lock it around drm_bo_do_validate(), since that would imply
-+ * recursive locking.
-+ */
-+
-+static int psbfb_fault(struct vm_area_struct *vma,
-+ struct vm_fault *vmf)
-+{
-+ struct psbfb_vm_info *vi = (struct psbfb_vm_info *)vma->vm_private_data;
-+ struct vm_area_struct tmp_vma;
-+ int ret;
-+
-+ mutex_lock(&vi->vm_mutex);
-+ tmp_vma = *vma;
-+ tmp_vma.vm_private_data = vi->bo;
-+ ret = drm_bo_vm_fault(&tmp_vma, vmf);
-+ mutex_unlock(&vi->vm_mutex);
-+ return ret;
-+}
-+
-+static void psbfb_vm_open(struct vm_area_struct *vma)
-+{
-+ struct psbfb_vm_info *vi = (struct psbfb_vm_info *)vma->vm_private_data;
-+
-+ atomic_inc(&vi->refcount);
-+}
-+
-+static void psbfb_vm_close(struct vm_area_struct *vma)
-+{
-+ psbfb_vm_info_deref((struct psbfb_vm_info **)&vma->vm_private_data);
-+}
-+
-+static struct vm_operations_struct psbfb_vm_ops = {
-+ .fault = psbfb_fault,
-+ .open = psbfb_vm_open,
-+ .close = psbfb_vm_close,
-+};
-+
-+static int psbfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
-+{
-+ struct psbfb_par *par = info->par;
-+ struct drm_framebuffer *fb = par->crtc->fb;
-+ struct drm_buffer_object *bo = fb->bo;
-+ unsigned long size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
-+ unsigned long offset = vma->vm_pgoff;
-+
-+ if (vma->vm_pgoff != 0)
-+ return -EINVAL;
-+ if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
-+ return -EINVAL;
-+ if (offset + size > bo->num_pages)
-+ return -EINVAL;
-+
-+ mutex_lock(&par->vi->vm_mutex);
-+ if (!par->vi->f_mapping)
-+ par->vi->f_mapping = vma->vm_file->f_mapping;
-+ mutex_unlock(&par->vi->vm_mutex);
-+
-+ vma->vm_private_data = psbfb_vm_info_ref(par->vi);
-+
-+ vma->vm_ops = &psbfb_vm_ops;
-+ vma->vm_flags |= VM_PFNMAP;
-+
-+ return 0;
-+}
-+
-+int psbfb_sync(struct fb_info *info)
-+{
-+ struct psbfb_par *par = info->par;
-+ struct drm_psb_private *dev_priv = par->dev->dev_private;
-+
-+ psb_2d_lock(dev_priv);
-+ psb_idle_2d(par->dev);
-+ psb_2d_unlock(dev_priv);
-+
-+ return 0;
-+}
-+
-+static struct fb_ops psbfb_ops = {
-+ .owner = THIS_MODULE,
-+ .fb_check_var = psbfb_check_var,
-+ .fb_set_par = psbfb_set_par,
-+ .fb_setcolreg = psbfb_setcolreg,
-+ .fb_fillrect = psbfb_fillrect,
-+ .fb_copyarea = psbfb_copyarea,
-+ .fb_imageblit = psbfb_imageblit,
-+ .fb_mmap = psbfb_mmap,
-+ .fb_sync = psbfb_sync,
-+ .fb_blank = psbfb_blank,
-+};
-+
-+static void psb_user_framebuffer_destroy(struct drm_framebuffer *fb)
-+{
-+ drm_framebuffer_cleanup(fb);
-+ kfree(fb);
-+}
-+
-+static const struct drm_framebuffer_funcs psb_fb_funcs = {
-+ .destroy = psb_user_framebuffer_destroy,
-+};
-+
-+int psbfb_probe(struct drm_device *dev, struct drm_crtc *crtc)
-+{
-+ struct fb_info *info;
-+ struct psbfb_par *par;
-+ struct device *device = &dev->pdev->dev;
-+ struct drm_framebuffer *fb;
-+ struct drm_display_mode *mode = crtc->desired_mode;
-+ struct drm_psb_private *dev_priv =
-+ (struct drm_psb_private *)dev->dev_private;
-+ struct drm_buffer_object *fbo = NULL;
-+ int ret;
-+ int is_iomem;
-+
-+ if (drm_psb_no_fb) {
-+ /* need to do this as the DRM will disable the output */
-+ crtc->enabled = 1;
-+ return 0;
-+ }
-+
-+ fb = kzalloc(sizeof(struct drm_framebuffer), GFP_KERNEL);
-+ if (!fb)
-+ return -ENOMEM;
-+
-+
-+ ret = drm_framebuffer_init(dev, fb, &psb_fb_funcs);
-+ if (!fb) {
-+ DRM_ERROR("failed to allocate fb.\n");
-+ return -ENOMEM;
-+ }
-+ crtc->fb = fb;
-+
-+ fb->width = mode->hdisplay;
-+ fb->height = mode->vdisplay;
-+
-+ fb->bits_per_pixel = 32;
-+ fb->depth = 24;
-+ fb->pitch =
-+ ((fb->width * ((fb->bits_per_pixel + 1) / 8)) + 0x3f) & ~0x3f;
-+
-+ info = framebuffer_alloc(sizeof(struct psbfb_par), device);
-+ if (!info) {
-+ kfree(fb);
-+ return -ENOMEM;
-+ }
-+
-+ ret = drm_buffer_object_create(dev,
-+ fb->pitch * fb->height,
-+ drm_bo_type_kernel,
-+ DRM_BO_FLAG_READ |
-+ DRM_BO_FLAG_WRITE |
-+ DRM_BO_FLAG_MEM_TT |
-+ DRM_BO_FLAG_MEM_VRAM |
-+ DRM_BO_FLAG_NO_EVICT,
-+ DRM_BO_HINT_DONT_FENCE, 0, 0, &fbo);
-+ if (ret || !fbo) {
-+ DRM_ERROR("failed to allocate framebuffer\n");
-+ goto out_err0;
-+ }
-+
-+ fb->offset = fbo->offset - dev_priv->pg->gatt_start;
-+ fb->bo = fbo;
-+ DRM_DEBUG("allocated %dx%d fb: 0x%08lx, bo %p\n", fb->width,
-+ fb->height, fb->offset, fbo);
-+
-+ fb->fbdev = info;
-+
-+ par = info->par;
-+
-+ par->dev = dev;
-+ par->crtc = crtc;
-+ par->vi = psbfb_vm_info_create();
-+ if (!par->vi)
-+ goto out_err1;
-+
-+ mutex_lock(&dev->struct_mutex);
-+ par->vi->bo = fbo;
-+ atomic_inc(&fbo->usage);
-+ mutex_unlock(&dev->struct_mutex);
-+
-+ par->vi->f_mapping = NULL;
-+ info->fbops = &psbfb_ops;
-+
-+ strcpy(info->fix.id, "psbfb");
-+ info->fix.type = FB_TYPE_PACKED_PIXELS;
-+ info->fix.visual = FB_VISUAL_DIRECTCOLOR;
-+ info->fix.type_aux = 0;
-+ info->fix.xpanstep = 1;
-+ info->fix.ypanstep = 1;
-+ info->fix.ywrapstep = 0;
-+ info->fix.accel = FB_ACCEL_NONE; /* ??? */
-+ info->fix.type_aux = 0;
-+ info->fix.mmio_start = 0;
-+ info->fix.mmio_len = 0;
-+ info->fix.line_length = fb->pitch;
-+ info->fix.smem_start = dev->mode_config.fb_base + fb->offset;
-+ info->fix.smem_len = info->fix.line_length * fb->height;
-+
-+ info->flags = FBINFO_DEFAULT |
-+ FBINFO_PARTIAL_PAN_OK /*| FBINFO_MISC_ALWAYS_SETPAR */ ;
-+
-+ ret = drm_bo_kmap(fb->bo, 0, fb->bo->num_pages, &fb->kmap);
-+ if (ret) {
-+ DRM_ERROR("error mapping fb: %d\n", ret);
-+ goto out_err2;
-+ }
-+
-+ info->screen_base = drm_bmo_virtual(&fb->kmap, &is_iomem);
-+ memset(info->screen_base, 0x00, fb->pitch*fb->height);
-+ info->screen_size = info->fix.smem_len; /* FIXME */
-+ info->pseudo_palette = fb->pseudo_palette;
-+ info->var.xres_virtual = fb->width;
-+ info->var.yres_virtual = fb->height;
-+ info->var.bits_per_pixel = fb->bits_per_pixel;
-+ info->var.xoffset = 0;
-+ info->var.yoffset = 0;
-+ info->var.activate = FB_ACTIVATE_NOW;
-+ info->var.height = -1;
-+ info->var.width = -1;
-+ info->var.vmode = FB_VMODE_NONINTERLACED;
-+
-+ info->var.xres = mode->hdisplay;
-+ info->var.right_margin = mode->hsync_start - mode->hdisplay;
-+ info->var.hsync_len = mode->hsync_end - mode->hsync_start;
-+ info->var.left_margin = mode->htotal - mode->hsync_end;
-+ info->var.yres = mode->vdisplay;
-+ info->var.lower_margin = mode->vsync_start - mode->vdisplay;
-+ info->var.vsync_len = mode->vsync_end - mode->vsync_start;
-+ info->var.upper_margin = mode->vtotal - mode->vsync_end;
-+ info->var.pixclock = 10000000 / mode->htotal * 1000 /
-+ mode->vtotal * 100;
-+ /* avoid overflow */
-+ info->var.pixclock = info->var.pixclock * 1000 / mode->vrefresh;
-+
-+ info->pixmap.size = 64 * 1024;
-+ info->pixmap.buf_align = 8;
-+ info->pixmap.access_align = 32;
-+ info->pixmap.flags = FB_PIXMAP_SYSTEM;
-+ info->pixmap.scan_align = 1;
-+
-+ DRM_DEBUG("fb depth is %d\n", fb->depth);
-+ DRM_DEBUG(" pitch is %d\n", fb->pitch);
-+ switch (fb->depth) {
-+ case 8:
-+ info->var.red.offset = 0;
-+ info->var.green.offset = 0;
-+ info->var.blue.offset = 0;
-+ info->var.red.length = 8; /* 8bit DAC */
-+ info->var.green.length = 8;
-+ info->var.blue.length = 8;
-+ info->var.transp.offset = 0;
-+ info->var.transp.length = 0;
-+ break;
-+ case 15:
-+ info->var.red.offset = 10;
-+ info->var.green.offset = 5;
-+ info->var.blue.offset = 0;
-+ info->var.red.length = info->var.green.length =
-+ info->var.blue.length = 5;
-+ info->var.transp.offset = 15;
-+ info->var.transp.length = 1;
-+ break;
-+ case 16:
-+ info->var.red.offset = 11;
-+ info->var.green.offset = 5;
-+ info->var.blue.offset = 0;
-+ info->var.red.length = 5;
-+ info->var.green.length = 6;
-+ info->var.blue.length = 5;
-+ info->var.transp.offset = 0;
-+ break;
-+ case 24:
-+ info->var.red.offset = 16;
-+ info->var.green.offset = 8;
-+ info->var.blue.offset = 0;
-+ info->var.red.length = info->var.green.length =
-+ info->var.blue.length = 8;
-+ info->var.transp.offset = 0;
-+ info->var.transp.length = 0;
-+ break;
-+ case 32:
-+ info->var.red.offset = 16;
-+ info->var.green.offset = 8;
-+ info->var.blue.offset = 0;
-+ info->var.red.length = info->var.green.length =
-+ info->var.blue.length = 8;
-+ info->var.transp.offset = 24;
-+ info->var.transp.length = 8;
-+ break;
-+ default:
-+ break;
-+ }
-+
-+ if (register_framebuffer(info) < 0)
-+ goto out_err3;
-+
-+ if (psbfb_check_var(&info->var, info) < 0)
-+ goto out_err4;
-+
-+ psbfb_set_par(info);
-+
-+ DRM_INFO("fb%d: %s frame buffer device\n", info->node, info->fix.id);
-+
-+ return 0;
-+ out_err4:
-+ unregister_framebuffer(info);
-+ out_err3:
-+ drm_bo_kunmap(&fb->kmap);
-+ out_err2:
-+ psbfb_vm_info_deref(&par->vi);
-+ out_err1:
-+ drm_bo_usage_deref_unlocked(&fb->bo);
-+ out_err0:
-+ drm_framebuffer_cleanup(fb);
-+ framebuffer_release(info);
-+ crtc->fb = NULL;
-+ return -EINVAL;
-+}
-+
-+EXPORT_SYMBOL(psbfb_probe);
-+
-+int psbfb_remove(struct drm_device *dev, struct drm_crtc *crtc)
-+{
-+ struct drm_framebuffer *fb;
-+ struct fb_info *info;
-+ struct psbfb_par *par;
-+
-+ if (drm_psb_no_fb)
-+ return 0;
-+
-+ fb = crtc->fb;
-+ info = fb->fbdev;
-+
-+ if (info) {
-+ unregister_framebuffer(info);
-+ drm_bo_kunmap(&fb->kmap);
-+ par = info->par;
-+ if (par)
-+ psbfb_vm_info_deref(&par->vi);
-+ drm_bo_usage_deref_unlocked(&fb->bo);
-+ drm_framebuffer_cleanup(fb);
-+ framebuffer_release(info);
-+ }
-+ return 0;
-+}
-+
-+EXPORT_SYMBOL(psbfb_remove);
-+
-Index: linux-2.6.28/drivers/gpu/drm/psb/psb_fence.c
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.28/drivers/gpu/drm/psb/psb_fence.c 2009-02-20 12:23:06.000000000 +0000
-@@ -0,0 +1,285 @@
-+/**************************************************************************
-+ * Copyright (c) 2007, Intel Corporation.
-+ * All Rights Reserved.
-+ *
-+ * This program is free software; you can redistribute it and/or modify it
-+ * under the terms and conditions of the GNU General Public License,
-+ * version 2, as published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope it will be useful, but WITHOUT
-+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-+ * more details.
-+ *
-+ * You should have received a copy of the GNU General Public License along with
-+ * this program; if not, write to the Free Software Foundation, Inc.,
-+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-+ *
-+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
-+ * develop this driver.
-+ *
-+ **************************************************************************/
-+/*
-+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
-+ */
-+
-+#include "drmP.h"
-+#include "psb_drv.h"
-+
-+static void psb_poll_ta(struct drm_device *dev, uint32_t waiting_types)
-+{
-+ struct drm_psb_private *dev_priv =
-+ (struct drm_psb_private *)dev->dev_private;
-+ struct drm_fence_driver *driver = dev->driver->fence_driver;
-+ uint32_t cur_flag = 1;
-+ uint32_t flags = 0;
-+ uint32_t sequence = 0;
-+ uint32_t remaining = 0xFFFFFFFF;
-+ uint32_t diff;
-+
-+ struct psb_scheduler *scheduler;
-+ struct psb_scheduler_seq *seq;
-+ struct drm_fence_class_manager *fc =
-+ &dev->fm.fence_class[PSB_ENGINE_TA];
-+
-+ if (unlikely(!dev_priv))
-+ return;
-+
-+ scheduler = &dev_priv->scheduler;
-+ seq = scheduler->seq;
-+
-+ while (likely(waiting_types & remaining)) {
-+ if (!(waiting_types & cur_flag))
-+ goto skip;
-+ if (seq->reported)
-+ goto skip;
-+ if (flags == 0)
-+ sequence = seq->sequence;
-+ else if (sequence != seq->sequence) {
-+ drm_fence_handler(dev, PSB_ENGINE_TA,
-+ sequence, flags, 0);
-+ sequence = seq->sequence;
-+ flags = 0;
-+ }
-+ flags |= cur_flag;
-+
-+ /*
-+ * Sequence may not have ended up on the ring yet.
-+ * In that case, report it but don't mark it as
-+ * reported. A subsequent poll will report it again.
-+ */
-+
-+ diff = (fc->latest_queued_sequence - sequence) &
-+ driver->sequence_mask;
-+ if (diff < driver->wrap_diff)
-+ seq->reported = 1;
-+
-+ skip:
-+ cur_flag <<= 1;
-+ remaining <<= 1;
-+ seq++;
-+ }
-+
-+ if (flags) {
-+ drm_fence_handler(dev, PSB_ENGINE_TA, sequence, flags, 0);
-+ }
-+}
-+
-+static void psb_poll_other(struct drm_device *dev, uint32_t fence_class,
-+ uint32_t waiting_types)
-+{
-+ struct drm_psb_private *dev_priv =
-+ (struct drm_psb_private *)dev->dev_private;
-+ struct drm_fence_manager *fm = &dev->fm;
-+ struct drm_fence_class_manager *fc = &fm->fence_class[fence_class];
-+ uint32_t sequence;
-+
-+ if (unlikely(!dev_priv))
-+ return;
-+
-+ if (waiting_types) {
-+ if (fence_class == PSB_ENGINE_VIDEO)
-+ sequence = dev_priv->msvdx_current_sequence;
-+ else
-+ sequence = dev_priv->comm[fence_class << 4];
-+
-+ drm_fence_handler(dev, fence_class, sequence,
-+ DRM_FENCE_TYPE_EXE, 0);
-+
-+ switch (fence_class) {
-+ case PSB_ENGINE_2D:
-+ if (dev_priv->fence0_irq_on && !fc->waiting_types) {
-+ psb_2D_irq_off(dev_priv);
-+ dev_priv->fence0_irq_on = 0;
-+ } else if (!dev_priv->fence0_irq_on
-+ && fc->waiting_types) {
-+ psb_2D_irq_on(dev_priv);
-+ dev_priv->fence0_irq_on = 1;
-+ }
-+ break;
-+#if 0
-+ /*
-+ * FIXME: MSVDX irq switching
-+ */
-+
-+ case PSB_ENGINE_VIDEO:
-+ if (dev_priv->fence2_irq_on && !fc->waiting_types) {
-+ psb_msvdx_irq_off(dev_priv);
-+ dev_priv->fence2_irq_on = 0;
-+ } else if (!dev_priv->fence2_irq_on
-+ && fc->pending_exe_flush) {
-+ psb_msvdx_irq_on(dev_priv);
-+ dev_priv->fence2_irq_on = 1;
-+ }
-+ break;
-+#endif
-+ default:
-+ return;
-+ }
-+ }
-+}
-+
-+static void psb_fence_poll(struct drm_device *dev,
-+ uint32_t fence_class, uint32_t waiting_types)
-+{
-+ switch (fence_class) {
-+ case PSB_ENGINE_TA:
-+ psb_poll_ta(dev, waiting_types);
-+ break;
-+ default:
-+ psb_poll_other(dev, fence_class, waiting_types);
-+ break;
-+ }
-+}
-+
-+void psb_fence_error(struct drm_device *dev,
-+ uint32_t fence_class,
-+ uint32_t sequence, uint32_t type, int error)
-+{
-+ struct drm_fence_manager *fm = &dev->fm;
-+ unsigned long irq_flags;
-+
-+ BUG_ON(fence_class >= PSB_NUM_ENGINES);
-+ write_lock_irqsave(&fm->lock, irq_flags);
-+ drm_fence_handler(dev, fence_class, sequence, type, error);
-+ write_unlock_irqrestore(&fm->lock, irq_flags);
-+}
-+
-+int psb_fence_emit_sequence(struct drm_device *dev, uint32_t fence_class,
-+ uint32_t flags, uint32_t * sequence,
-+ uint32_t * native_type)
-+{
-+ struct drm_psb_private *dev_priv =
-+ (struct drm_psb_private *)dev->dev_private;
-+ uint32_t seq = 0;
-+ int ret;
-+
-+ if (!dev_priv)
-+ return -EINVAL;
-+
-+ if (fence_class >= PSB_NUM_ENGINES)
-+ return -EINVAL;
-+
-+ switch (fence_class) {
-+ case PSB_ENGINE_2D:
-+ spin_lock(&dev_priv->sequence_lock);
-+ seq = ++dev_priv->sequence[fence_class];
-+ spin_unlock(&dev_priv->sequence_lock);
-+ ret = psb_blit_sequence(dev_priv, seq);
-+ if (ret)
-+ return ret;
-+ break;
-+ case PSB_ENGINE_VIDEO:
-+ spin_lock(&dev_priv->sequence_lock);
-+ seq = ++dev_priv->sequence[fence_class];
-+ spin_unlock(&dev_priv->sequence_lock);
-+ break;
-+ default:
-+ spin_lock(&dev_priv->sequence_lock);
-+ seq = dev_priv->sequence[fence_class];
-+ spin_unlock(&dev_priv->sequence_lock);
-+ }
-+
-+ *sequence = seq;
-+ *native_type = DRM_FENCE_TYPE_EXE;
-+
-+ return 0;
-+}
-+
-+uint32_t psb_fence_advance_sequence(struct drm_device * dev,
-+ uint32_t fence_class)
-+{
-+ struct drm_psb_private *dev_priv =
-+ (struct drm_psb_private *)dev->dev_private;
-+ uint32_t sequence;
-+
-+ spin_lock(&dev_priv->sequence_lock);
-+ sequence = ++dev_priv->sequence[fence_class];
-+ spin_unlock(&dev_priv->sequence_lock);
-+
-+ return sequence;
-+}
-+
-+void psb_fence_handler(struct drm_device *dev, uint32_t fence_class)
-+{
-+ struct drm_fence_manager *fm = &dev->fm;
-+ struct drm_fence_class_manager *fc = &fm->fence_class[fence_class];
-+
-+#ifdef FIX_TG_16
-+ if (fence_class == 0) {
-+ struct drm_psb_private *dev_priv =
-+ (struct drm_psb_private *)dev->dev_private;
-+
-+ if ((atomic_read(&dev_priv->ta_wait_2d_irq) == 1) &&
-+ (PSB_RSGX32(PSB_CR_2D_SOCIF) == _PSB_C2_SOCIF_EMPTY) &&
-+ ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) &
-+ _PSB_C2B_STATUS_BUSY) == 0))
-+ psb_resume_ta_2d_idle(dev_priv);
-+ }
-+#endif
-+ write_lock(&fm->lock);
-+ psb_fence_poll(dev, fence_class, fc->waiting_types);
-+ write_unlock(&fm->lock);
-+}
-+
-+static int psb_fence_wait(struct drm_fence_object *fence,
-+ int lazy, int interruptible, uint32_t mask)
-+{
-+ struct drm_device *dev = fence->dev;
-+ struct drm_fence_class_manager *fc =
-+ &dev->fm.fence_class[fence->fence_class];
-+ int ret = 0;
-+ unsigned long timeout = DRM_HZ *
-+ ((fence->fence_class == PSB_ENGINE_TA) ? 30 : 3);
-+
-+ drm_fence_object_flush(fence, mask);
-+ if (interruptible)
-+ ret = wait_event_interruptible_timeout
-+ (fc->fence_queue, drm_fence_object_signaled(fence, mask),
-+ timeout);
-+ else
-+ ret = wait_event_timeout
-+ (fc->fence_queue, drm_fence_object_signaled(fence, mask),
-+ timeout);
-+
-+ if (unlikely(ret == -ERESTARTSYS))
-+ return -EAGAIN;
-+
-+ if (unlikely(ret == 0))
-+ return -EBUSY;
-+
-+ return 0;
-+}
-+
-+struct drm_fence_driver psb_fence_driver = {
-+ .num_classes = PSB_NUM_ENGINES,
-+ .wrap_diff = (1 << 30),
-+ .flush_diff = (1 << 29),
-+ .sequence_mask = 0xFFFFFFFFU,
-+ .has_irq = NULL,
-+ .emit = psb_fence_emit_sequence,
-+ .flush = NULL,
-+ .poll = psb_fence_poll,
-+ .needed_flush = NULL,
-+ .wait = psb_fence_wait
-+};
-Index: linux-2.6.28/drivers/gpu/drm/psb/psb_gtt.c
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.28/drivers/gpu/drm/psb/psb_gtt.c 2009-02-20 12:23:06.000000000 +0000
-@@ -0,0 +1,253 @@
-+/**************************************************************************
-+ * Copyright (c) 2007, Intel Corporation.
-+ * All Rights Reserved.
-+ *
-+ * This program is free software; you can redistribute it and/or modify it
-+ * under the terms and conditions of the GNU General Public License,
-+ * version 2, as published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope it will be useful, but WITHOUT
-+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-+ * more details.
-+ *
-+ * You should have received a copy of the GNU General Public License along with
-+ * this program; if not, write to the Free Software Foundation, Inc.,
-+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-+ *
-+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
-+ * develop this driver.
-+ *
-+ **************************************************************************/
-+/*
-+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
-+ */
-+#include "drmP.h"
-+#include "psb_drv.h"
-+
-+static inline uint32_t psb_gtt_mask_pte(uint32_t pfn, int type)
-+{
-+ uint32_t mask = PSB_PTE_VALID;
-+
-+ if (type & PSB_MMU_CACHED_MEMORY)
-+ mask |= PSB_PTE_CACHED;
-+ if (type & PSB_MMU_RO_MEMORY)
-+ mask |= PSB_PTE_RO;
-+ if (type & PSB_MMU_WO_MEMORY)
-+ mask |= PSB_PTE_WO;
-+
-+ return (pfn << PAGE_SHIFT) | mask;
-+}
-+
-+struct psb_gtt *psb_gtt_alloc(struct drm_device *dev)
-+{
-+ struct psb_gtt *tmp = drm_calloc(1, sizeof(*tmp), DRM_MEM_DRIVER);
-+
-+ if (!tmp)
-+ return NULL;
-+
-+ init_rwsem(&tmp->sem);
-+ tmp->dev = dev;
-+
-+ return tmp;
-+}
-+
-+void psb_gtt_takedown(struct psb_gtt *pg, int free)
-+{
-+ struct drm_psb_private *dev_priv = pg->dev->dev_private;
-+
-+ if (!pg)
-+ return;
-+
-+ if (pg->gtt_map) {
-+ iounmap(pg->gtt_map);
-+ pg->gtt_map = NULL;
-+ }
-+ if (pg->initialized) {
-+ pci_write_config_word(pg->dev->pdev, PSB_GMCH_CTRL,
-+ pg->gmch_ctrl);
-+ PSB_WVDC32(pg->pge_ctl, PSB_PGETBL_CTL);
-+ (void)PSB_RVDC32(PSB_PGETBL_CTL);
-+ }
-+ if (free)
-+ drm_free(pg, sizeof(*pg), DRM_MEM_DRIVER);
-+}
-+
-+int psb_gtt_init(struct psb_gtt *pg, int resume)
-+{
-+ struct drm_device *dev = pg->dev;
-+ struct drm_psb_private *dev_priv = dev->dev_private;
-+ unsigned gtt_pages;
-+ unsigned long stolen_size;
-+ unsigned i, num_pages;
-+ unsigned pfn_base;
-+
-+ int ret = 0;
-+ uint32_t pte;
-+
-+ printk(KERN_ERR "Bar A1\n");
-+
-+ pci_read_config_word(dev->pdev, PSB_GMCH_CTRL, &pg->gmch_ctrl);
-+ pci_write_config_word(dev->pdev, PSB_GMCH_CTRL,
-+ pg->gmch_ctrl | _PSB_GMCH_ENABLED);
-+
-+ printk(KERN_ERR "Bar A2\n");
-+
-+ pg->pge_ctl = PSB_RVDC32(PSB_PGETBL_CTL);
-+ PSB_WVDC32(pg->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL);
-+ (void)PSB_RVDC32(PSB_PGETBL_CTL);
-+
-+ printk(KERN_ERR "Bar A3\n");
-+
-+ pg->initialized = 1;
-+
-+ pg->gtt_phys_start = pg->pge_ctl & PAGE_MASK;
-+ pg->gatt_start = pci_resource_start(dev->pdev, PSB_GATT_RESOURCE);
-+ pg->gtt_start = pci_resource_start(dev->pdev, PSB_GTT_RESOURCE);
-+ gtt_pages = pci_resource_len(dev->pdev, PSB_GTT_RESOURCE) >> PAGE_SHIFT;
-+ pg->gatt_pages = pci_resource_len(dev->pdev, PSB_GATT_RESOURCE)
-+ >> PAGE_SHIFT;
-+
-+ printk(KERN_ERR "Bar A4\n");
-+ pci_read_config_dword(dev->pdev, PSB_BSM, &pg->stolen_base);
-+ stolen_size = pg->gtt_phys_start - pg->stolen_base - PAGE_SIZE;
-+
-+ printk(KERN_ERR "Bar A5\n");
-+
-+ PSB_DEBUG_INIT("GTT phys start: 0x%08x.\n", pg->gtt_phys_start);
-+ PSB_DEBUG_INIT("GTT start: 0x%08x.\n", pg->gtt_start);
-+ PSB_DEBUG_INIT("GATT start: 0x%08x.\n", pg->gatt_start);
-+ PSB_DEBUG_INIT("GTT pages: %u\n", gtt_pages);
-+ PSB_DEBUG_INIT("Stolen size: %lu kiB\n", stolen_size / 1024);
-+
-+ if (resume && (gtt_pages != pg->gtt_pages) &&
-+ (stolen_size != pg->stolen_size)) {
-+ DRM_ERROR("GTT resume error.\n");
-+ ret = -EINVAL;
-+ goto out_err;
-+ }
-+
-+ printk(KERN_ERR "Bar A6\n");
-+
-+ pg->gtt_pages = gtt_pages;
-+ pg->stolen_size = stolen_size;
-+ pg->gtt_map =
-+ ioremap_nocache(pg->gtt_phys_start, gtt_pages << PAGE_SHIFT);
-+ if (!pg->gtt_map) {
-+ DRM_ERROR("Failure to map gtt.\n");
-+ ret = -ENOMEM;
-+ goto out_err;
-+ }
-+
-+ printk(KERN_ERR "Bar A7\n");
-+
-+ /*
-+ * insert stolen pages.
-+ */
-+
-+ pfn_base = pg->stolen_base >> PAGE_SHIFT;
-+ num_pages = stolen_size >> PAGE_SHIFT;
-+ PSB_DEBUG_INIT("Set up %d stolen pages starting at 0x%08x\n",
-+ num_pages, pfn_base);
-+ for (i = 0; i < num_pages; ++i) {
-+ pte = psb_gtt_mask_pte(pfn_base + i, 0);
-+ iowrite32(pte, pg->gtt_map + i);
-+ }
-+
-+ printk(KERN_ERR "Bar A8\n");
-+
-+ /*
-+ * Init rest of gtt.
-+ */
-+
-+ pfn_base = page_to_pfn(dev_priv->scratch_page);
-+ pte = psb_gtt_mask_pte(pfn_base, 0);
-+ PSB_DEBUG_INIT("Initializing the rest of a total "
-+ "of %d gtt pages.\n", pg->gatt_pages);
-+
-+ printk(KERN_ERR "Bar A10\n");
-+
-+ for (; i < pg->gatt_pages; ++i)
-+ iowrite32(pte, pg->gtt_map + i);
-+ (void)ioread32(pg->gtt_map + i - 1);
-+
-+ printk(KERN_ERR "Bar A11\n");
-+
-+ return 0;
-+
-+ out_err:
-+ psb_gtt_takedown(pg, 0);
-+ return ret;
-+}
-+
-+int psb_gtt_insert_pages(struct psb_gtt *pg, struct page **pages,
-+ unsigned offset_pages, unsigned num_pages,
-+ unsigned desired_tile_stride, unsigned hw_tile_stride,
-+ int type)
-+{
-+ unsigned rows = 1;
-+ unsigned add;
-+ unsigned row_add;
-+ unsigned i;
-+ unsigned j;
-+ uint32_t *cur_page = NULL;
-+ uint32_t pte;
-+
-+ if (hw_tile_stride)
-+ rows = num_pages / desired_tile_stride;
-+ else
-+ desired_tile_stride = num_pages;
-+
-+ add = desired_tile_stride;
-+ row_add = hw_tile_stride;
-+
-+ down_read(&pg->sem);
-+ for (i = 0; i < rows; ++i) {
-+ cur_page = pg->gtt_map + offset_pages;
-+ for (j = 0; j < desired_tile_stride; ++j) {
-+ pte = psb_gtt_mask_pte(page_to_pfn(*pages++), type);
-+ iowrite32(pte, cur_page++);
-+ }
-+ offset_pages += add;
-+ }
-+ (void)ioread32(cur_page - 1);
-+ up_read(&pg->sem);
-+
-+ return 0;
-+}
-+
-+int psb_gtt_remove_pages(struct psb_gtt *pg, unsigned offset_pages,
-+ unsigned num_pages, unsigned desired_tile_stride,
-+ unsigned hw_tile_stride)
-+{
-+ struct drm_psb_private *dev_priv = pg->dev->dev_private;
-+ unsigned rows = 1;
-+ unsigned add;
-+ unsigned row_add;
-+ unsigned i;
-+ unsigned j;
-+ uint32_t *cur_page = NULL;
-+ unsigned pfn_base = page_to_pfn(dev_priv->scratch_page);
-+ uint32_t pte = psb_gtt_mask_pte(pfn_base, 0);
-+
-+ if (hw_tile_stride)
-+ rows = num_pages / desired_tile_stride;
-+ else
-+ desired_tile_stride = num_pages;
-+
-+ add = desired_tile_stride;
-+ row_add = hw_tile_stride;
-+
-+ down_read(&pg->sem);
-+ for (i = 0; i < rows; ++i) {
-+ cur_page = pg->gtt_map + offset_pages;
-+ for (j = 0; j < desired_tile_stride; ++j) {
-+ iowrite32(pte, cur_page++);
-+ }
-+ offset_pages += add;
-+ }
-+ (void)ioread32(cur_page - 1);
-+ up_read(&pg->sem);
-+
-+ return 0;
-+}
-Index: linux-2.6.28/drivers/gpu/drm/psb/psb_irq.c
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.28/drivers/gpu/drm/psb/psb_irq.c 2009-02-20 12:23:06.000000000 +0000
-@@ -0,0 +1,519 @@
-+/**************************************************************************
-+ * Copyright (c) 2007, Intel Corporation.
-+ * All Rights Reserved.
-+ *
-+ * This program is free software; you can redistribute it and/or modify it
-+ * under the terms and conditions of the GNU General Public License,
-+ * version 2, as published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope it will be useful, but WITHOUT
-+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-+ * more details.
-+ *
-+ * You should have received a copy of the GNU General Public License along with
-+ * this program; if not, write to the Free Software Foundation, Inc.,
-+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-+ *
-+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
-+ * develop this driver.
-+ *
-+ **************************************************************************/
-+/*
-+ */
-+
-+#include "drmP.h"
-+#include "psb_drv.h"
-+#include "psb_reg.h"
-+#include "psb_msvdx.h"
-+#include "../i915/i915_reg.h"
-+
-+/*
-+ * Video display controller interrupt.
-+ */
-+
-+static inline u32
-+psb_pipestat(int pipe)
-+{
-+ if (pipe == 0)
-+ return PIPEASTAT;
-+ if (pipe == 1)
-+ return PIPEBSTAT;
-+ BUG();
-+}
-+
-+void
-+psb_enable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask)
-+{
-+ //struct drm_i915_common_private *dev_priv_common = dev_priv;
-+
-+ if ((dev_priv->pipestat[pipe] & mask) != mask) {
-+ u32 reg = psb_pipestat(pipe);
-+
-+ dev_priv->pipestat[pipe] |= mask;
-+ /* Enable the interrupt, clear any pending status */
-+ I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16));
-+ (void) I915_READ(reg);
-+ }
-+}
-+
-+void
-+psb_disable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask)
-+{
-+ //struct drm_i915_common_private *dev_priv_common = (struct drm_i915_common_private *) dev_priv;
-+
-+ if ((dev_priv->pipestat[pipe] & mask) != 0) {
-+ u32 reg = psb_pipestat(pipe);
-+
-+ dev_priv->pipestat[pipe] &= ~mask;
-+ I915_WRITE(reg, dev_priv->pipestat[pipe]);
-+ (void) I915_READ(reg);
-+ }
-+}
-+
-+
-+/**
-+ * i915_pipe_enabled - check if a pipe is enabled
-+ * @dev: DRM device
-+ * @pipe: pipe to check
-+ *
-+ * Reading certain registers when the pipe is disabled can hang the chip.
-+ * Use this routine to make sure the PLL is running and the pipe is active
-+ * before reading such registers if unsure.
-+ */
-+static int
-+i915_pipe_enabled(struct drm_device *dev, int pipe)
-+{
-+ struct drm_psb_private *dev_priv = dev->dev_private;
-+ unsigned long pipeconf = pipe ? PIPEBCONF : PIPEACONF;
-+
-+ if (I915_READ(pipeconf) & PIPEACONF_ENABLE)
-+ return 1;
-+
-+ return 0;
-+}
-+
-+/* Called from drm generic code, passed a 'crtc', which
-+ * we use as a pipe index
-+ */
-+u32 psb_get_vblank_counter(struct drm_device *dev, int pipe)
-+{
-+ struct drm_psb_private *dev_priv = dev->dev_private;
-+ unsigned long high_frame;
-+ unsigned long low_frame;
-+ u32 high1, high2, low, count;
-+
-+ high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH;
-+ low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL;
-+
-+ if (!i915_pipe_enabled(dev, pipe)) {
-+ DRM_ERROR("trying to get vblank count for disabled pipe %d\n", pipe);
-+ return 0;
-+ }
-+
-+ /*
-+ * High & low register fields aren't synchronized, so make sure
-+ * we get a low value that's stable across two reads of the high
-+ * register.
-+ */
-+ do {
-+ high1 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
-+ PIPE_FRAME_HIGH_SHIFT);
-+ low = ((I915_READ(low_frame) & PIPE_FRAME_LOW_MASK) >>
-+ PIPE_FRAME_LOW_SHIFT);
-+ high2 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
-+ PIPE_FRAME_HIGH_SHIFT);
-+ } while (high1 != high2);
-+
-+ count = (high1 << 8) | low;
-+
-+ return count;
-+}
-+
-+/* Called from drm generic code, passed 'crtc' which
-+ * we use as a pipe index
-+ */
-+int psb_enable_vblank(struct drm_device *dev, int pipe)
-+{
-+ struct drm_psb_private *dev_priv = dev->dev_private;
-+ unsigned long irqflags;
-+ int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
-+ u32 pipeconf;
-+
-+ pipeconf = I915_READ(pipeconf_reg);
-+ if (!(pipeconf & PIPEACONF_ENABLE))
-+ return -EINVAL;
-+
-+ spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
-+ psb_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_ENABLE);
-+ spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
-+ return 0;
-+}
-+
-+/* Called from drm generic code, passed 'crtc' which
-+ * we use as a pipe index
-+ */
-+void psb_disable_vblank(struct drm_device *dev, int pipe)
-+{
-+ struct drm_psb_private *dev_priv = dev->dev_private;
-+ unsigned long irqflags;
-+
-+ spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
-+ psb_disable_pipestat(dev_priv, pipe,
-+ PIPE_VBLANK_INTERRUPT_ENABLE |
-+ PIPE_START_VBLANK_INTERRUPT_ENABLE);
-+ spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
-+}
-+
-+
-+
-+static void psb_vdc_interrupt(struct drm_device *dev, uint32_t vdc_stat)
-+{
-+ struct drm_psb_private *dev_priv = dev->dev_private; uint32_t pipe_stats;
-+ int wake = 0;
-+
-+ if (!drm_psb_disable_vsync && (vdc_stat & _PSB_VSYNC_PIPEA_FLAG)) {
-+ pipe_stats = PSB_RVDC32(PSB_PIPEASTAT);
-+ atomic_inc(&dev->_vblank_count[0]);
-+ wake = 1;
-+ PSB_WVDC32(pipe_stats | _PSB_VBLANK_INTERRUPT_ENABLE |
-+ _PSB_VBLANK_CLEAR, PSB_PIPEASTAT);
-+ }
-+
-+ if (!drm_psb_disable_vsync && (vdc_stat & _PSB_VSYNC_PIPEB_FLAG)) {
-+ pipe_stats = PSB_RVDC32(PSB_PIPEBSTAT);
-+ atomic_inc(&dev->_vblank_count[1]);
-+ wake = 1;
-+ PSB_WVDC32(pipe_stats | _PSB_VBLANK_INTERRUPT_ENABLE |
-+ _PSB_VBLANK_CLEAR, PSB_PIPEBSTAT);
-+ }
-+
-+ PSB_WVDC32(vdc_stat, PSB_INT_IDENTITY_R);
-+ (void)PSB_RVDC32(PSB_INT_IDENTITY_R);
-+ DRM_READMEMORYBARRIER();
-+
-+ if (wake) {
-+ int i;
-+ DRM_WAKEUP(dev->vbl_queue);
-+
-+ for (i = 0; i < 2; i++)
-+ drm_vbl_send_signals(dev, i);
-+ }
-+}
-+
-+/*
-+ * SGX interrupt source 1.
-+ */
-+
-+static void psb_sgx_interrupt(struct drm_device *dev, uint32_t sgx_stat,
-+ uint32_t sgx_stat2)
-+{
-+ struct drm_psb_private *dev_priv = dev->dev_private;
-+
-+ if (sgx_stat & _PSB_CE_TWOD_COMPLETE) {
-+ DRM_WAKEUP(&dev_priv->event_2d_queue);
-+ psb_fence_handler(dev, 0);
-+ }
-+
-+ if (unlikely(sgx_stat2 & _PSB_CE2_BIF_REQUESTER_FAULT))
-+ psb_print_pagefault(dev_priv);
-+
-+ psb_scheduler_handler(dev_priv, sgx_stat);
-+}
-+
-+/*
-+ * MSVDX interrupt.
-+ */
-+static void psb_msvdx_interrupt(struct drm_device *dev, uint32_t msvdx_stat)
-+{
-+ struct drm_psb_private *dev_priv =
-+ (struct drm_psb_private *)dev->dev_private;
-+
-+ if (msvdx_stat & MSVDX_INTERRUPT_STATUS_CR_MMU_FAULT_IRQ_MASK) {
-+ /*Ideally we should we should never get to this */
-+ PSB_DEBUG_GENERAL
-+ ("******MSVDX: msvdx_stat: 0x%x fence2_irq_on=%d ***** (MMU FAULT)\n",
-+ msvdx_stat, dev_priv->fence2_irq_on);
-+
-+ /* Pause MMU */
-+ PSB_WMSVDX32(MSVDX_MMU_CONTROL0_CR_MMU_PAUSE_MASK,
-+ MSVDX_MMU_CONTROL0);
-+ DRM_WRITEMEMORYBARRIER();
-+
-+ /* Clear this interupt bit only */
-+ PSB_WMSVDX32(MSVDX_INTERRUPT_STATUS_CR_MMU_FAULT_IRQ_MASK,
-+ MSVDX_INTERRUPT_CLEAR);
-+ PSB_RMSVDX32(MSVDX_INTERRUPT_CLEAR);
-+ DRM_READMEMORYBARRIER();
-+
-+ dev_priv->msvdx_needs_reset = 1;
-+ } else if (msvdx_stat & MSVDX_INTERRUPT_STATUS_CR_MTX_IRQ_MASK) {
-+ PSB_DEBUG_GENERAL
-+ ("******MSVDX: msvdx_stat: 0x%x fence2_irq_on=%d ***** (MTX)\n",
-+ msvdx_stat, dev_priv->fence2_irq_on);
-+
-+ /* Clear all interupt bits */
-+ PSB_WMSVDX32(0xffff, MSVDX_INTERRUPT_CLEAR);
-+ PSB_RMSVDX32(MSVDX_INTERRUPT_CLEAR);
-+ DRM_READMEMORYBARRIER();
-+
-+ psb_msvdx_mtx_interrupt(dev);
-+ }
-+}
-+
-+irqreturn_t psb_irq_handler(DRM_IRQ_ARGS)
-+{
-+ struct drm_device *dev = (struct drm_device *)arg;
-+ struct drm_psb_private *dev_priv =
-+ (struct drm_psb_private *)dev->dev_private;
-+
-+ uint32_t vdc_stat;
-+ uint32_t sgx_stat;
-+ uint32_t sgx_stat2;
-+ uint32_t msvdx_stat;
-+ int handled = 0;
-+
-+ spin_lock(&dev_priv->irqmask_lock);
-+
-+ vdc_stat = PSB_RVDC32(PSB_INT_IDENTITY_R);
-+ sgx_stat = PSB_RSGX32(PSB_CR_EVENT_STATUS);
-+ sgx_stat2 = PSB_RSGX32(PSB_CR_EVENT_STATUS2);
-+ msvdx_stat = PSB_RMSVDX32(MSVDX_INTERRUPT_STATUS);
-+
-+ sgx_stat2 &= dev_priv->sgx2_irq_mask;
-+ sgx_stat &= dev_priv->sgx_irq_mask;
-+ PSB_WSGX32(sgx_stat2, PSB_CR_EVENT_HOST_CLEAR2);
-+ PSB_WSGX32(sgx_stat, PSB_CR_EVENT_HOST_CLEAR);
-+ (void)PSB_RSGX32(PSB_CR_EVENT_HOST_CLEAR);
-+
-+ vdc_stat &= dev_priv->vdc_irq_mask;
-+ spin_unlock(&dev_priv->irqmask_lock);
-+
-+ if (msvdx_stat) {
-+ psb_msvdx_interrupt(dev, msvdx_stat);
-+ handled = 1;
-+ }
-+
-+ if (vdc_stat) {
-+ /* MSVDX IRQ status is part of vdc_irq_mask */
-+ psb_vdc_interrupt(dev, vdc_stat);
-+ handled = 1;
-+ }
-+
-+ if (sgx_stat || sgx_stat2) {
-+ psb_sgx_interrupt(dev, sgx_stat, sgx_stat2);
-+ handled = 1;
-+ }
-+
-+ if (!handled) {
-+ return IRQ_NONE;
-+ }
-+
-+ return IRQ_HANDLED;
-+}
-+
-+void psb_msvdx_irq_preinstall(struct drm_psb_private *dev_priv)
-+{
-+ unsigned long mtx_int = 0;
-+ dev_priv->vdc_irq_mask |= _PSB_IRQ_MSVDX_FLAG;
-+
-+ /*Clear MTX interrupt */
-+ REGIO_WRITE_FIELD_LITE(mtx_int, MSVDX_INTERRUPT_STATUS, CR_MTX_IRQ, 1);
-+ PSB_WMSVDX32(mtx_int, MSVDX_INTERRUPT_CLEAR);
-+}
-+
-+void psb_irq_preinstall(struct drm_device *dev)
-+{
-+ struct drm_psb_private *dev_priv =
-+ (struct drm_psb_private *)dev->dev_private;
-+ spin_lock(&dev_priv->irqmask_lock);
-+ PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
-+ PSB_WVDC32(0x00000000, PSB_INT_MASK_R);
-+ PSB_WVDC32(0x00000000, PSB_INT_ENABLE_R);
-+ PSB_WSGX32(0x00000000, PSB_CR_EVENT_HOST_ENABLE);
-+ (void)PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE);
-+
-+ dev_priv->sgx_irq_mask = _PSB_CE_PIXELBE_END_RENDER |
-+ _PSB_CE_DPM_3D_MEM_FREE |
-+ _PSB_CE_TA_FINISHED |
-+ _PSB_CE_DPM_REACHED_MEM_THRESH |
-+ _PSB_CE_DPM_OUT_OF_MEMORY_GBL |
-+ _PSB_CE_DPM_OUT_OF_MEMORY_MT |
-+ _PSB_CE_TA_TERMINATE | _PSB_CE_SW_EVENT;
-+
-+ dev_priv->sgx2_irq_mask = _PSB_CE2_BIF_REQUESTER_FAULT;
-+
-+ dev_priv->vdc_irq_mask = _PSB_IRQ_SGX_FLAG | _PSB_IRQ_MSVDX_FLAG;
-+
-+ if (!drm_psb_disable_vsync)
-+ dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG |
-+ _PSB_VSYNC_PIPEB_FLAG;
-+
-+ /*Clear MTX interrupt */
-+ {
-+ unsigned long mtx_int = 0;
-+ REGIO_WRITE_FIELD_LITE(mtx_int, MSVDX_INTERRUPT_STATUS,
-+ CR_MTX_IRQ, 1);
-+ PSB_WMSVDX32(mtx_int, MSVDX_INTERRUPT_CLEAR);
-+ }
-+ spin_unlock(&dev_priv->irqmask_lock);
-+}
-+
-+void psb_msvdx_irq_postinstall(struct drm_psb_private *dev_priv)
-+{
-+ /* Enable Mtx Interupt to host */
-+ unsigned long enables = 0;
-+ PSB_DEBUG_GENERAL("Setting up MSVDX IRQs.....\n");
-+ REGIO_WRITE_FIELD_LITE(enables, MSVDX_INTERRUPT_STATUS, CR_MTX_IRQ, 1);
-+ PSB_WMSVDX32(enables, MSVDX_HOST_INTERRUPT_ENABLE);
-+}
-+
-+int psb_irq_postinstall(struct drm_device *dev)
-+{
-+ struct drm_psb_private *dev_priv =
-+ (struct drm_psb_private *)dev->dev_private;
-+ unsigned long irqflags;
-+
-+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
-+ PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
-+ PSB_WSGX32(dev_priv->sgx2_irq_mask, PSB_CR_EVENT_HOST_ENABLE2);
-+ PSB_WSGX32(dev_priv->sgx_irq_mask, PSB_CR_EVENT_HOST_ENABLE);
-+ (void)PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE);
-+ /****MSVDX IRQ Setup...*****/
-+ /* Enable Mtx Interupt to host */
-+ {
-+ unsigned long enables = 0;
-+ PSB_DEBUG_GENERAL("Setting up MSVDX IRQs.....\n");
-+ REGIO_WRITE_FIELD_LITE(enables, MSVDX_INTERRUPT_STATUS,
-+ CR_MTX_IRQ, 1);
-+ PSB_WMSVDX32(enables, MSVDX_HOST_INTERRUPT_ENABLE);
-+ }
-+ dev_priv->irq_enabled = 1;
-+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
-+ return 0;
-+}
-+
-+void psb_irq_uninstall(struct drm_device *dev)
-+{
-+ struct drm_psb_private *dev_priv =
-+ (struct drm_psb_private *)dev->dev_private;
-+ unsigned long irqflags;
-+
-+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
-+
-+ dev_priv->sgx_irq_mask = 0x00000000;
-+ dev_priv->sgx2_irq_mask = 0x00000000;
-+ dev_priv->vdc_irq_mask = 0x00000000;
-+
-+ PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
-+ PSB_WVDC32(0xFFFFFFFF, PSB_INT_MASK_R);
-+ PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
-+ PSB_WSGX32(dev_priv->sgx_irq_mask, PSB_CR_EVENT_HOST_ENABLE);
-+ PSB_WSGX32(dev_priv->sgx2_irq_mask, PSB_CR_EVENT_HOST_ENABLE2);
-+ wmb();
-+ PSB_WVDC32(PSB_RVDC32(PSB_INT_IDENTITY_R), PSB_INT_IDENTITY_R);
-+ PSB_WSGX32(PSB_RSGX32(PSB_CR_EVENT_STATUS), PSB_CR_EVENT_HOST_CLEAR);
-+ PSB_WSGX32(PSB_RSGX32(PSB_CR_EVENT_STATUS2), PSB_CR_EVENT_HOST_CLEAR2);
-+
-+ /****MSVDX IRQ Setup...*****/
-+ /* Clear interrupt enabled flag */
-+ PSB_WMSVDX32(0, MSVDX_HOST_INTERRUPT_ENABLE);
-+
-+ dev_priv->irq_enabled = 0;
-+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
-+
-+}
-+
-+void psb_2D_irq_off(struct drm_psb_private *dev_priv)
-+{
-+ unsigned long irqflags;
-+ uint32_t old_mask;
-+ uint32_t cleared_mask;
-+
-+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
-+ --dev_priv->irqen_count_2d;
-+ if (dev_priv->irq_enabled && dev_priv->irqen_count_2d == 0) {
-+
-+ old_mask = dev_priv->sgx_irq_mask;
-+ dev_priv->sgx_irq_mask &= ~_PSB_CE_TWOD_COMPLETE;
-+ PSB_WSGX32(dev_priv->sgx_irq_mask, PSB_CR_EVENT_HOST_ENABLE);
-+ (void)PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE);
-+
-+ cleared_mask = (old_mask ^ dev_priv->sgx_irq_mask) & old_mask;
-+ PSB_WSGX32(cleared_mask, PSB_CR_EVENT_HOST_CLEAR);
-+ (void)PSB_RSGX32(PSB_CR_EVENT_HOST_CLEAR);
-+ }
-+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
-+}
-+
-+void psb_2D_irq_on(struct drm_psb_private *dev_priv)
-+{
-+ unsigned long irqflags;
-+
-+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
-+ if (dev_priv->irq_enabled && dev_priv->irqen_count_2d == 0) {
-+ dev_priv->sgx_irq_mask |= _PSB_CE_TWOD_COMPLETE;
-+ PSB_WSGX32(dev_priv->sgx_irq_mask, PSB_CR_EVENT_HOST_ENABLE);
-+ (void)PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE);
-+ }
-+ ++dev_priv->irqen_count_2d;
-+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
-+}
-+#if 0
-+static int psb_vblank_do_wait(struct drm_device *dev, unsigned int *sequence,
-+ atomic_t * counter, int crtc)
-+{
-+ unsigned int cur_vblank;
-+ int ret = 0;
-+
-+ DRM_WAIT_ON(ret, dev->vbl_queue[crtc], 3 * DRM_HZ,
-+ (((cur_vblank = atomic_read(counter))
-+ - *sequence) <= (1 << 23)));
-+
-+ *sequence = cur_vblank;
-+
-+ return ret;
-+}
-+
-+int psb_vblank_wait(struct drm_device *dev, unsigned int *sequence)
-+{
-+ int ret;
-+
-+ ret = psb_vblank_do_wait(dev, sequence, &dev->_vblank_count[0], 0);
-+ return ret;
-+}
-+
-+int psb_vblank_wait2(struct drm_device *dev, unsigned int *sequence)
-+{
-+ int ret;
-+
-+ ret = psb_vblank_do_wait(dev, sequence, &dev->_vblank_count[1], 1);
-+ return ret;
-+}
-+#endif
-+
-+void psb_msvdx_irq_off(struct drm_psb_private *dev_priv)
-+{
-+ unsigned long irqflags;
-+
-+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
-+ if (dev_priv->irq_enabled) {
-+ dev_priv->vdc_irq_mask &= ~_PSB_IRQ_MSVDX_FLAG;
-+ PSB_WSGX32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
-+ (void)PSB_RSGX32(PSB_INT_ENABLE_R);
-+ }
-+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
-+}
-+
-+void psb_msvdx_irq_on(struct drm_psb_private *dev_priv)
-+{
-+ unsigned long irqflags;
-+
-+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
-+ if (dev_priv->irq_enabled) {
-+ dev_priv->vdc_irq_mask |= _PSB_IRQ_MSVDX_FLAG;
-+ PSB_WSGX32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
-+ (void)PSB_RSGX32(PSB_INT_ENABLE_R);
-+ }
-+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
-+}
-Index: linux-2.6.28/drivers/gpu/drm/psb/psb_mmu.c
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.28/drivers/gpu/drm/psb/psb_mmu.c 2009-02-20 12:23:06.000000000 +0000
-@@ -0,0 +1,1037 @@
-+/**************************************************************************
-+ * Copyright (c) 2007, Intel Corporation.
-+ * All Rights Reserved.
-+ *
-+ * This program is free software; you can redistribute it and/or modify it
-+ * under the terms and conditions of the GNU General Public License,
-+ * version 2, as published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope it will be useful, but WITHOUT
-+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-+ * more details.
-+ *
-+ * You should have received a copy of the GNU General Public License along with
-+ * this program; if not, write to the Free Software Foundation, Inc.,
-+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-+ *
-+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
-+ * develop this driver.
-+ *
-+ **************************************************************************/
-+#include "drmP.h"
-+#include "psb_drv.h"
-+#include "psb_reg.h"
-+
-+/*
-+ * Code for the SGX MMU:
-+ */
-+
-+/*
-+ * clflush on one processor only:
-+ * clflush should apparently flush the cache line on all processors in an
-+ * SMP system.
-+ */
-+
-+/*
-+ * kmap atomic:
-+ * The usage of the slots must be completely encapsulated within a spinlock, and
-+ * no other functions that may be using the locks for other purposed may be
-+ * called from within the locked region.
-+ * Since the slots are per processor, this will guarantee that we are the only
-+ * user.
-+ */
-+
-+/*
-+ * TODO: Inserting ptes from an interrupt handler:
-+ * This may be desirable for some SGX functionality where the GPU can fault in
-+ * needed pages. For that, we need to make an atomic insert_pages function, that
-+ * may fail.
-+ * If it fails, the caller need to insert the page using a workqueue function,
-+ * but on average it should be fast.
-+ */
-+
-+struct psb_mmu_driver {
-+ /* protects driver- and pd structures. Always take in read mode
-+ * before taking the page table spinlock.
-+ */
-+ struct rw_semaphore sem;
-+
-+ /* protects page tables, directory tables and pt tables.
-+ * and pt structures.
-+ */
-+ spinlock_t lock;
-+
-+ atomic_t needs_tlbflush;
-+ atomic_t *msvdx_mmu_invaldc;
-+ uint8_t __iomem *register_map;
-+ struct psb_mmu_pd *default_pd;
-+ uint32_t bif_ctrl;
-+ int has_clflush;
-+ int clflush_add;
-+ unsigned long clflush_mask;
-+};
-+
-+struct psb_mmu_pd;
-+
-+struct psb_mmu_pt {
-+ struct psb_mmu_pd *pd;
-+ uint32_t index;
-+ uint32_t count;
-+ struct page *p;
-+ uint32_t *v;
-+};
-+
-+struct psb_mmu_pd {
-+ struct psb_mmu_driver *driver;
-+ int hw_context;
-+ struct psb_mmu_pt **tables;
-+ struct page *p;
-+ struct page *dummy_pt;
-+ struct page *dummy_page;
-+ uint32_t pd_mask;
-+ uint32_t invalid_pde;
-+ uint32_t invalid_pte;
-+};
-+
-+static inline uint32_t psb_mmu_pt_index(uint32_t offset)
-+{
-+ return (offset >> PSB_PTE_SHIFT) & 0x3FF;
-+}
-+static inline uint32_t psb_mmu_pd_index(uint32_t offset)
-+{
-+ return (offset >> PSB_PDE_SHIFT);
-+}
-+
-+#if defined(CONFIG_X86)
-+static inline void psb_clflush(void *addr)
-+{
-+ __asm__ __volatile__("clflush (%0)\n"::"r"(addr):"memory");
-+}
-+
-+static inline void psb_mmu_clflush(struct psb_mmu_driver *driver, void *addr)
-+{
-+ if (!driver->has_clflush)
-+ return;
-+
-+ mb();
-+ psb_clflush(addr);
-+ mb();
-+}
-+#else
-+
-+static inline void psb_mmu_clflush(struct psb_mmu_driver *driver, void *addr)
-+{;
-+}
-+
-+#endif
-+
-+static inline void psb_iowrite32(const struct psb_mmu_driver *d,
-+ uint32_t val, uint32_t offset)
-+{
-+ iowrite32(val, d->register_map + offset);
-+}
-+
-+static inline uint32_t psb_ioread32(const struct psb_mmu_driver *d,
-+ uint32_t offset)
-+{
-+ return ioread32(d->register_map + offset);
-+}
-+
-+static void psb_mmu_flush_pd_locked(struct psb_mmu_driver *driver, int force)
-+{
-+ if (atomic_read(&driver->needs_tlbflush) || force) {
-+ uint32_t val = psb_ioread32(driver, PSB_CR_BIF_CTRL);
-+ psb_iowrite32(driver, val | _PSB_CB_CTRL_INVALDC,
-+ PSB_CR_BIF_CTRL);
-+ wmb();
-+ psb_iowrite32(driver, val & ~_PSB_CB_CTRL_INVALDC,
-+ PSB_CR_BIF_CTRL);
-+ (void)psb_ioread32(driver, PSB_CR_BIF_CTRL);
-+ if (driver->msvdx_mmu_invaldc)
-+ atomic_set(driver->msvdx_mmu_invaldc, 1);
-+ }
-+ atomic_set(&driver->needs_tlbflush, 0);
-+}
-+
-+static void psb_mmu_flush_pd(struct psb_mmu_driver *driver, int force)
-+{
-+ down_write(&driver->sem);
-+ psb_mmu_flush_pd_locked(driver, force);
-+ up_write(&driver->sem);
-+}
-+
-+void psb_mmu_flush(struct psb_mmu_driver *driver)
-+{
-+ uint32_t val;
-+
-+ down_write(&driver->sem);
-+ val = psb_ioread32(driver, PSB_CR_BIF_CTRL);
-+ if (atomic_read(&driver->needs_tlbflush))
-+ psb_iowrite32(driver, val | _PSB_CB_CTRL_INVALDC,
-+ PSB_CR_BIF_CTRL);
-+ else
-+ psb_iowrite32(driver, val | _PSB_CB_CTRL_FLUSH,
-+ PSB_CR_BIF_CTRL);
-+ wmb();
-+ psb_iowrite32(driver,
-+ val & ~(_PSB_CB_CTRL_FLUSH | _PSB_CB_CTRL_INVALDC),
-+ PSB_CR_BIF_CTRL);
-+ (void)psb_ioread32(driver, PSB_CR_BIF_CTRL);
-+ atomic_set(&driver->needs_tlbflush, 0);
-+ if (driver->msvdx_mmu_invaldc)
-+ atomic_set(driver->msvdx_mmu_invaldc, 1);
-+ up_write(&driver->sem);
-+}
-+
-+void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context)
-+{
-+ uint32_t offset = (hw_context == 0) ? PSB_CR_BIF_DIR_LIST_BASE0 :
-+ PSB_CR_BIF_DIR_LIST_BASE1 + hw_context * 4;
-+
-+ drm_ttm_cache_flush();
-+ down_write(&pd->driver->sem);
-+ psb_iowrite32(pd->driver, (page_to_pfn(pd->p) << PAGE_SHIFT), offset);
-+ wmb();
-+ psb_mmu_flush_pd_locked(pd->driver, 1);
-+ pd->hw_context = hw_context;
-+ up_write(&pd->driver->sem);
-+
-+}
-+
-+static inline unsigned long psb_pd_addr_end(unsigned long addr,
-+ unsigned long end)
-+{
-+
-+ addr = (addr + PSB_PDE_MASK + 1) & ~PSB_PDE_MASK;
-+ return (addr < end) ? addr : end;
-+}
-+
-+static inline uint32_t psb_mmu_mask_pte(uint32_t pfn, int type)
-+{
-+ uint32_t mask = PSB_PTE_VALID;
-+
-+ if (type & PSB_MMU_CACHED_MEMORY)
-+ mask |= PSB_PTE_CACHED;
-+ if (type & PSB_MMU_RO_MEMORY)
-+ mask |= PSB_PTE_RO;
-+ if (type & PSB_MMU_WO_MEMORY)
-+ mask |= PSB_PTE_WO;
-+
-+ return (pfn << PAGE_SHIFT) | mask;
-+}
-+
-+struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
-+ int trap_pagefaults, int invalid_type)
-+{
-+ struct psb_mmu_pd *pd = kmalloc(sizeof(*pd), GFP_KERNEL);
-+ uint32_t *v;
-+ int i;
-+
-+ if (!pd)
-+ return NULL;
-+
-+ pd->p = alloc_page(GFP_DMA32);
-+ if (!pd->p)
-+ goto out_err1;
-+ pd->dummy_pt = alloc_page(GFP_DMA32);
-+ if (!pd->dummy_pt)
-+ goto out_err2;
-+ pd->dummy_page = alloc_page(GFP_DMA32);
-+ if (!pd->dummy_page)
-+ goto out_err3;
-+
-+ if (!trap_pagefaults) {
-+ pd->invalid_pde = psb_mmu_mask_pte(page_to_pfn(pd->dummy_pt),
-+ invalid_type |
-+ PSB_MMU_CACHED_MEMORY);
-+ pd->invalid_pte = psb_mmu_mask_pte(page_to_pfn(pd->dummy_page),
-+ invalid_type |
-+ PSB_MMU_CACHED_MEMORY);
-+ } else {
-+ pd->invalid_pde = 0;
-+ pd->invalid_pte = 0;
-+ }
-+
-+ v = kmap(pd->dummy_pt);
-+ for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i) {
-+ v[i] = pd->invalid_pte;
-+ }
-+ kunmap(pd->dummy_pt);
-+
-+ v = kmap(pd->p);
-+ for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i) {
-+ v[i] = pd->invalid_pde;
-+ }
-+ kunmap(pd->p);
-+
-+ clear_page(kmap(pd->dummy_page));
-+ kunmap(pd->dummy_page);
-+
-+ pd->tables = vmalloc_user(sizeof(struct psb_mmu_pt *) * 1024);
-+ if (!pd->tables)
-+ goto out_err4;
-+
-+ pd->hw_context = -1;
-+ pd->pd_mask = PSB_PTE_VALID;
-+ pd->driver = driver;
-+
-+ return pd;
-+
-+ out_err4:
-+ __free_page(pd->dummy_page);
-+ out_err3:
-+ __free_page(pd->dummy_pt);
-+ out_err2:
-+ __free_page(pd->p);
-+ out_err1:
-+ kfree(pd);
-+ return NULL;
-+}
-+
-+void psb_mmu_free_pt(struct psb_mmu_pt *pt)
-+{
-+ __free_page(pt->p);
-+ kfree(pt);
-+}
-+
-+void psb_mmu_free_pagedir(struct psb_mmu_pd *pd)
-+{
-+ struct psb_mmu_driver *driver = pd->driver;
-+ struct psb_mmu_pt *pt;
-+ int i;
-+
-+ down_write(&driver->sem);
-+ if (pd->hw_context != -1) {
-+ psb_iowrite32(driver, 0,
-+ PSB_CR_BIF_DIR_LIST_BASE0 + pd->hw_context * 4);
-+ psb_mmu_flush_pd_locked(driver, 1);
-+ }
-+
-+ /* Should take the spinlock here, but we don't need to do that
-+ since we have the semaphore in write mode. */
-+
-+ for (i = 0; i < 1024; ++i) {
-+ pt = pd->tables[i];
-+ if (pt)
-+ psb_mmu_free_pt(pt);
-+ }
-+
-+ vfree(pd->tables);
-+ __free_page(pd->dummy_page);
-+ __free_page(pd->dummy_pt);
-+ __free_page(pd->p);
-+ kfree(pd);
-+ up_write(&driver->sem);
-+}
-+
-+static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
-+{
-+ struct psb_mmu_pt *pt = kmalloc(sizeof(*pt), GFP_KERNEL);
-+ void *v;
-+ uint32_t clflush_add = pd->driver->clflush_add >> PAGE_SHIFT;
-+ uint32_t clflush_count = PAGE_SIZE / clflush_add;
-+ spinlock_t *lock = &pd->driver->lock;
-+ uint8_t *clf;
-+ uint32_t *ptes;
-+ int i;
-+
-+ if (!pt)
-+ return NULL;
-+
-+ pt->p = alloc_page(GFP_DMA32);
-+ if (!pt->p) {
-+ kfree(pt);
-+ return NULL;
-+ }
-+
-+ spin_lock(lock);
-+
-+ v = kmap_atomic(pt->p, KM_USER0);
-+ clf = (uint8_t *) v;
-+ ptes = (uint32_t *) v;
-+ for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i) {
-+ *ptes++ = pd->invalid_pte;
-+ }
-+
-+#if defined(CONFIG_X86)
-+ if (pd->driver->has_clflush && pd->hw_context != -1) {
-+ mb();
-+ for (i = 0; i < clflush_count; ++i) {
-+ psb_clflush(clf);
-+ clf += clflush_add;
-+ }
-+ mb();
-+ }
-+#endif
-+ kunmap_atomic(v, KM_USER0);
-+ spin_unlock(lock);
-+
-+ pt->count = 0;
-+ pt->pd = pd;
-+ pt->index = 0;
-+
-+ return pt;
-+}
-+
-+struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd,
-+ unsigned long addr)
-+{
-+ uint32_t index = psb_mmu_pd_index(addr);
-+ struct psb_mmu_pt *pt;
-+ volatile uint32_t *v;
-+ spinlock_t *lock = &pd->driver->lock;
-+
-+ spin_lock(lock);
-+ pt = pd->tables[index];
-+ while (!pt) {
-+ spin_unlock(lock);
-+ pt = psb_mmu_alloc_pt(pd);
-+ if (!pt)
-+ return NULL;
-+ spin_lock(lock);
-+
-+ if (pd->tables[index]) {
-+ spin_unlock(lock);
-+ psb_mmu_free_pt(pt);
-+ spin_lock(lock);
-+ pt = pd->tables[index];
-+ continue;
-+ }
-+
-+ v = kmap_atomic(pd->p, KM_USER0);
-+ pd->tables[index] = pt;
-+ v[index] = (page_to_pfn(pt->p) << 12) | pd->pd_mask;
-+ pt->index = index;
-+ kunmap_atomic((void *)v, KM_USER0);
-+
-+ if (pd->hw_context != -1) {
-+ psb_mmu_clflush(pd->driver, (void *)&v[index]);
-+ atomic_set(&pd->driver->needs_tlbflush, 1);
-+ }
-+ }
-+ pt->v = kmap_atomic(pt->p, KM_USER0);
-+ return pt;
-+}
-+
-+static struct psb_mmu_pt *psb_mmu_pt_map_lock(struct psb_mmu_pd *pd,
-+ unsigned long addr)
-+{
-+ uint32_t index = psb_mmu_pd_index(addr);
-+ struct psb_mmu_pt *pt;
-+ spinlock_t *lock = &pd->driver->lock;
-+
-+ spin_lock(lock);
-+ pt = pd->tables[index];
-+ if (!pt) {
-+ spin_unlock(lock);
-+ return NULL;
-+ }
-+ pt->v = kmap_atomic(pt->p, KM_USER0);
-+ return pt;
-+}
-+
-+static void psb_mmu_pt_unmap_unlock(struct psb_mmu_pt *pt)
-+{
-+ struct psb_mmu_pd *pd = pt->pd;
-+ volatile uint32_t *v;
-+
-+ kunmap_atomic(pt->v, KM_USER0);
-+ if (pt->count == 0) {
-+ v = kmap_atomic(pd->p, KM_USER0);
-+ v[pt->index] = pd->invalid_pde;
-+ pd->tables[pt->index] = NULL;
-+
-+ if (pd->hw_context != -1) {
-+ psb_mmu_clflush(pd->driver, (void *)&v[pt->index]);
-+ atomic_set(&pd->driver->needs_tlbflush, 1);
-+ }
-+ kunmap_atomic(pt->v, KM_USER0);
-+ spin_unlock(&pd->driver->lock);
-+ psb_mmu_free_pt(pt);
-+ return;
-+ }
-+ spin_unlock(&pd->driver->lock);
-+}
-+
-+static inline void psb_mmu_set_pte(struct psb_mmu_pt *pt, unsigned long addr,
-+ uint32_t pte)
-+{
-+ pt->v[psb_mmu_pt_index(addr)] = pte;
-+}
-+
-+static inline void psb_mmu_invalidate_pte(struct psb_mmu_pt *pt,
-+ unsigned long addr)
-+{
-+ pt->v[psb_mmu_pt_index(addr)] = pt->pd->invalid_pte;
-+}
-+
-+#if 0
-+static uint32_t psb_mmu_check_pte_locked(struct psb_mmu_pd *pd,
-+ uint32_t mmu_offset)
-+{
-+ uint32_t *v;
-+ uint32_t pfn;
-+
-+ v = kmap_atomic(pd->p, KM_USER0);
-+ if (!v) {
-+ printk(KERN_INFO "Could not kmap pde page.\n");
-+ return 0;
-+ }
-+ pfn = v[psb_mmu_pd_index(mmu_offset)];
-+ // printk(KERN_INFO "pde is 0x%08x\n",pfn);
-+ kunmap_atomic(v, KM_USER0);
-+ if (((pfn & 0x0F) != PSB_PTE_VALID)) {
-+ printk(KERN_INFO "Strange pde at 0x%08x: 0x%08x.\n",
-+ mmu_offset, pfn);
-+ }
-+ v = ioremap(pfn & 0xFFFFF000, 4096);
-+ if (!v) {
-+ printk(KERN_INFO "Could not kmap pte page.\n");
-+ return 0;
-+ }
-+ pfn = v[psb_mmu_pt_index(mmu_offset)];
-+ // printk(KERN_INFO "pte is 0x%08x\n",pfn);
-+ iounmap(v);
-+ if (((pfn & 0x0F) != PSB_PTE_VALID)) {
-+ printk(KERN_INFO "Strange pte at 0x%08x: 0x%08x.\n",
-+ mmu_offset, pfn);
-+ }
-+ return pfn >> PAGE_SHIFT;
-+}
-+
-+static void psb_mmu_check_mirrored_gtt(struct psb_mmu_pd *pd,
-+ uint32_t mmu_offset, uint32_t gtt_pages)
-+{
-+ uint32_t start;
-+ uint32_t next;
-+
-+ printk(KERN_INFO "Checking mirrored gtt 0x%08x %d\n",
-+ mmu_offset, gtt_pages);
-+ down_read(&pd->driver->sem);
-+ start = psb_mmu_check_pte_locked(pd, mmu_offset);
-+ mmu_offset += PAGE_SIZE;
-+ gtt_pages -= 1;
-+ while (gtt_pages--) {
-+ next = psb_mmu_check_pte_locked(pd, mmu_offset);
-+ if (next != start + 1) {
-+ printk(KERN_INFO "Ptes out of order: 0x%08x, 0x%08x.\n",
-+ start, next);
-+ }
-+ start = next;
-+ mmu_offset += PAGE_SIZE;
-+ }
-+ up_read(&pd->driver->sem);
-+}
-+
-+#endif
-+
-+void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd,
-+ uint32_t mmu_offset, uint32_t gtt_start,
-+ uint32_t gtt_pages)
-+{
-+ uint32_t *v;
-+ uint32_t start = psb_mmu_pd_index(mmu_offset);
-+ struct psb_mmu_driver *driver = pd->driver;
-+
-+ down_read(&driver->sem);
-+ spin_lock(&driver->lock);
-+
-+ v = kmap_atomic(pd->p, KM_USER0);
-+ v += start;
-+
-+ while (gtt_pages--) {
-+ *v++ = gtt_start | pd->pd_mask;
-+ gtt_start += PAGE_SIZE;
-+ }
-+
-+ drm_ttm_cache_flush();
-+ kunmap_atomic(v, KM_USER0);
-+ spin_unlock(&driver->lock);
-+
-+ if (pd->hw_context != -1)
-+ atomic_set(&pd->driver->needs_tlbflush, 1);
-+
-+ up_read(&pd->driver->sem);
-+ psb_mmu_flush_pd(pd->driver, 0);
-+}
-+
-+struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver *driver)
-+{
-+ struct psb_mmu_pd *pd;
-+
-+ down_read(&driver->sem);
-+ pd = driver->default_pd;
-+ up_read(&driver->sem);
-+
-+ return pd;
-+}
-+
-+/* Returns the physical address of the PD shared by sgx/msvdx */
-+uint32_t psb_get_default_pd_addr(struct psb_mmu_driver * driver)
-+{
-+ struct psb_mmu_pd *pd;
-+
-+ pd = psb_mmu_get_default_pd(driver);
-+ return ((page_to_pfn(pd->p) << PAGE_SHIFT));
-+}
-+
-+void psb_mmu_driver_takedown(struct psb_mmu_driver *driver)
-+{
-+ psb_iowrite32(driver, driver->bif_ctrl, PSB_CR_BIF_CTRL);
-+ psb_mmu_free_pagedir(driver->default_pd);
-+ kfree(driver);
-+}
-+
-+struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers,
-+ int trap_pagefaults,
-+ int invalid_type,
-+ atomic_t *msvdx_mmu_invaldc)
-+{
-+ struct psb_mmu_driver *driver;
-+
-+ driver = (struct psb_mmu_driver *)kmalloc(sizeof(*driver), GFP_KERNEL);
-+
-+ if (!driver)
-+ return NULL;
-+
-+ driver->default_pd = psb_mmu_alloc_pd(driver, trap_pagefaults,
-+ invalid_type);
-+ if (!driver->default_pd)
-+ goto out_err1;
-+
-+ spin_lock_init(&driver->lock);
-+ init_rwsem(&driver->sem);
-+ down_write(&driver->sem);
-+ driver->register_map = registers;
-+ atomic_set(&driver->needs_tlbflush, 1);
-+ driver->msvdx_mmu_invaldc = msvdx_mmu_invaldc;
-+
-+ driver->bif_ctrl = psb_ioread32(driver, PSB_CR_BIF_CTRL);
-+ psb_iowrite32(driver, driver->bif_ctrl | _PSB_CB_CTRL_CLEAR_FAULT,
-+ PSB_CR_BIF_CTRL);
-+ psb_iowrite32(driver, driver->bif_ctrl & ~_PSB_CB_CTRL_CLEAR_FAULT,
-+ PSB_CR_BIF_CTRL);
-+
-+ driver->has_clflush = 0;
-+
-+#if defined(CONFIG_X86)
-+ if (boot_cpu_has(X86_FEATURE_CLFLSH)) {
-+ uint32_t tfms, misc, cap0, cap4, clflush_size;
-+
-+ /*
-+ * clflush size is determined at kernel setup for x86_64 but not for
-+ * i386. We have to do it here.
-+ */
-+
-+ cpuid(0x00000001, &tfms, &misc, &cap0, &cap4);
-+ clflush_size = ((misc >> 8) & 0xff) * 8;
-+ driver->has_clflush = 1;
-+ driver->clflush_add =
-+ PAGE_SIZE * clflush_size / sizeof(uint32_t);
-+ driver->clflush_mask = driver->clflush_add - 1;
-+ driver->clflush_mask = ~driver->clflush_mask;
-+ }
-+#endif
-+
-+ up_write(&driver->sem);
-+ return driver;
-+
-+ out_err1:
-+ kfree(driver);
-+ return NULL;
-+}
-+
-+#if defined(CONFIG_X86)
-+static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, unsigned long address,
-+ uint32_t num_pages, uint32_t desired_tile_stride,
-+ uint32_t hw_tile_stride)
-+{
-+ struct psb_mmu_pt *pt;
-+ uint32_t rows = 1;
-+ uint32_t i;
-+ unsigned long addr;
-+ unsigned long end;
-+ unsigned long next;
-+ unsigned long add;
-+ unsigned long row_add;
-+ unsigned long clflush_add = pd->driver->clflush_add;
-+ unsigned long clflush_mask = pd->driver->clflush_mask;
-+
-+ if (!pd->driver->has_clflush) {
-+ drm_ttm_cache_flush();
-+ return;
-+ }
-+
-+ if (hw_tile_stride)
-+ rows = num_pages / desired_tile_stride;
-+ else
-+ desired_tile_stride = num_pages;
-+
-+ add = desired_tile_stride << PAGE_SHIFT;
-+ row_add = hw_tile_stride << PAGE_SHIFT;
-+ mb();
-+ for (i = 0; i < rows; ++i) {
-+
-+ addr = address;
-+ end = addr + add;
-+
-+ do {
-+ next = psb_pd_addr_end(addr, end);
-+ pt = psb_mmu_pt_map_lock(pd, addr);
-+ if (!pt)
-+ continue;
-+ do {
-+ psb_clflush(&pt->v[psb_mmu_pt_index(addr)]);
-+ } while (addr += clflush_add,
-+ (addr & clflush_mask) < next);
-+
-+ psb_mmu_pt_unmap_unlock(pt);
-+ } while (addr = next, next != end);
-+ address += row_add;
-+ }
-+ mb();
-+}
-+#else
-+static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, unsigned long address,
-+ uint32_t num_pages, uint32_t desired_tile_stride,
-+ uint32_t hw_tile_stride)
-+{
-+ drm_ttm_cache_flush();
-+}
-+#endif
-+
-+void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
-+ unsigned long address, uint32_t num_pages)
-+{
-+ struct psb_mmu_pt *pt;
-+ unsigned long addr;
-+ unsigned long end;
-+ unsigned long next;
-+ unsigned long f_address = address;
-+
-+ down_read(&pd->driver->sem);
-+
-+ addr = address;
-+ end = addr + (num_pages << PAGE_SHIFT);
-+
-+ do {
-+ next = psb_pd_addr_end(addr, end);
-+ pt = psb_mmu_pt_alloc_map_lock(pd, addr);
-+ if (!pt)
-+ goto out;
-+ do {
-+ psb_mmu_invalidate_pte(pt, addr);
-+ --pt->count;
-+ } while (addr += PAGE_SIZE, addr < next);
-+ psb_mmu_pt_unmap_unlock(pt);
-+
-+ } while (addr = next, next != end);
-+
-+ out:
-+ if (pd->hw_context != -1)
-+ psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
-+
-+ up_read(&pd->driver->sem);
-+
-+ if (pd->hw_context != -1)
-+ psb_mmu_flush(pd->driver);
-+
-+ return;
-+}
-+
-+void psb_mmu_remove_pages(struct psb_mmu_pd *pd, unsigned long address,
-+ uint32_t num_pages, uint32_t desired_tile_stride,
-+ uint32_t hw_tile_stride)
-+{
-+ struct psb_mmu_pt *pt;
-+ uint32_t rows = 1;
-+ uint32_t i;
-+ unsigned long addr;
-+ unsigned long end;
-+ unsigned long next;
-+ unsigned long add;
-+ unsigned long row_add;
-+ unsigned long f_address = address;
-+
-+ if (hw_tile_stride)
-+ rows = num_pages / desired_tile_stride;
-+ else
-+ desired_tile_stride = num_pages;
-+
-+ add = desired_tile_stride << PAGE_SHIFT;
-+ row_add = hw_tile_stride << PAGE_SHIFT;
-+
-+ down_read(&pd->driver->sem);
-+
-+ /* Make sure we only need to flush this processor's cache */
-+
-+ for (i = 0; i < rows; ++i) {
-+
-+ addr = address;
-+ end = addr + add;
-+
-+ do {
-+ next = psb_pd_addr_end(addr, end);
-+ pt = psb_mmu_pt_map_lock(pd, addr);
-+ if (!pt)
-+ continue;
-+ do {
-+ psb_mmu_invalidate_pte(pt, addr);
-+ --pt->count;
-+
-+ } while (addr += PAGE_SIZE, addr < next);
-+ psb_mmu_pt_unmap_unlock(pt);
-+
-+ } while (addr = next, next != end);
-+ address += row_add;
-+ }
-+ if (pd->hw_context != -1)
-+ psb_mmu_flush_ptes(pd, f_address, num_pages,
-+ desired_tile_stride, hw_tile_stride);
-+
-+ up_read(&pd->driver->sem);
-+
-+ if (pd->hw_context != -1)
-+ psb_mmu_flush(pd->driver);
-+}
-+
-+int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn,
-+ unsigned long address, uint32_t num_pages,
-+ int type)
-+{
-+ struct psb_mmu_pt *pt;
-+ uint32_t pte;
-+ unsigned long addr;
-+ unsigned long end;
-+ unsigned long next;
-+ unsigned long f_address = address;
-+ int ret = -ENOMEM;
-+
-+ down_read(&pd->driver->sem);
-+
-+ addr = address;
-+ end = addr + (num_pages << PAGE_SHIFT);
-+
-+ do {
-+ next = psb_pd_addr_end(addr, end);
-+ pt = psb_mmu_pt_alloc_map_lock(pd, addr);
-+ if (!pt) {
-+ ret = -ENOMEM;
-+ goto out;
-+ }
-+ do {
-+ pte = psb_mmu_mask_pte(start_pfn++, type);
-+ psb_mmu_set_pte(pt, addr, pte);
-+ pt->count++;
-+ } while (addr += PAGE_SIZE, addr < next);
-+ psb_mmu_pt_unmap_unlock(pt);
-+
-+ } while (addr = next, next != end);
-+ ret = 0;
-+
-+ out:
-+ if (pd->hw_context != -1)
-+ psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
-+
-+ up_read(&pd->driver->sem);
-+
-+ if (pd->hw_context != -1)
-+ psb_mmu_flush(pd->driver);
-+
-+ return 0;
-+}
-+
-+int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
-+ unsigned long address, uint32_t num_pages,
-+ uint32_t desired_tile_stride, uint32_t hw_tile_stride,
-+ int type)
-+{
-+ struct psb_mmu_pt *pt;
-+ uint32_t rows = 1;
-+ uint32_t i;
-+ uint32_t pte;
-+ unsigned long addr;
-+ unsigned long end;
-+ unsigned long next;
-+ unsigned long add;
-+ unsigned long row_add;
-+ unsigned long f_address = address;
-+ int ret = -ENOMEM;
-+
-+ if (hw_tile_stride) {
-+ if (num_pages % desired_tile_stride != 0)
-+ return -EINVAL;
-+ rows = num_pages / desired_tile_stride;
-+ } else {
-+ desired_tile_stride = num_pages;
-+ }
-+
-+ add = desired_tile_stride << PAGE_SHIFT;
-+ row_add = hw_tile_stride << PAGE_SHIFT;
-+
-+ down_read(&pd->driver->sem);
-+
-+ for (i = 0; i < rows; ++i) {
-+
-+ addr = address;
-+ end = addr + add;
-+
-+ do {
-+ next = psb_pd_addr_end(addr, end);
-+ pt = psb_mmu_pt_alloc_map_lock(pd, addr);
-+ if (!pt)
-+ goto out;
-+ do {
-+ pte = psb_mmu_mask_pte(page_to_pfn(*pages++),
-+ type);
-+ psb_mmu_set_pte(pt, addr, pte);
-+ pt->count++;
-+ } while (addr += PAGE_SIZE, addr < next);
-+ psb_mmu_pt_unmap_unlock(pt);
-+
-+ } while (addr = next, next != end);
-+
-+ address += row_add;
-+ }
-+ ret = 0;
-+ out:
-+ if (pd->hw_context != -1)
-+ psb_mmu_flush_ptes(pd, f_address, num_pages,
-+ desired_tile_stride, hw_tile_stride);
-+
-+ up_read(&pd->driver->sem);
-+
-+ if (pd->hw_context != -1)
-+ psb_mmu_flush(pd->driver);
-+
-+ return 0;
-+}
-+
-+void psb_mmu_enable_requestor(struct psb_mmu_driver *driver, uint32_t mask)
-+{
-+ mask &= _PSB_MMU_ER_MASK;
-+ psb_iowrite32(driver, psb_ioread32(driver, PSB_CR_BIF_CTRL) & ~mask,
-+ PSB_CR_BIF_CTRL);
-+ (void)psb_ioread32(driver, PSB_CR_BIF_CTRL);
-+}
-+
-+void psb_mmu_disable_requestor(struct psb_mmu_driver *driver, uint32_t mask)
-+{
-+ mask &= _PSB_MMU_ER_MASK;
-+ psb_iowrite32(driver, psb_ioread32(driver, PSB_CR_BIF_CTRL) | mask,
-+ PSB_CR_BIF_CTRL);
-+ (void)psb_ioread32(driver, PSB_CR_BIF_CTRL);
-+}
-+
-+int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
-+ unsigned long *pfn)
-+{
-+ int ret;
-+ struct psb_mmu_pt *pt;
-+ uint32_t tmp;
-+ spinlock_t *lock = &pd->driver->lock;
-+
-+ down_read(&pd->driver->sem);
-+ pt = psb_mmu_pt_map_lock(pd, virtual);
-+ if (!pt) {
-+ uint32_t *v;
-+
-+ spin_lock(lock);
-+ v = kmap_atomic(pd->p, KM_USER0);
-+ tmp = v[psb_mmu_pd_index(virtual)];
-+ kunmap_atomic(v, KM_USER0);
-+ spin_unlock(lock);
-+
-+ if (tmp != pd->invalid_pde || !(tmp & PSB_PTE_VALID) ||
-+ !(pd->invalid_pte & PSB_PTE_VALID)) {
-+ ret = -EINVAL;
-+ goto out;
-+ }
-+ ret = 0;
-+ *pfn = pd->invalid_pte >> PAGE_SHIFT;
-+ goto out;
-+ }
-+ tmp = pt->v[psb_mmu_pt_index(virtual)];
-+ if (!(tmp & PSB_PTE_VALID)) {
-+ ret = -EINVAL;
-+ } else {
-+ ret = 0;
-+ *pfn = tmp >> PAGE_SHIFT;
-+ }
-+ psb_mmu_pt_unmap_unlock(pt);
-+ out:
-+ up_read(&pd->driver->sem);
-+ return ret;
-+}
-+
-+void psb_mmu_test(struct psb_mmu_driver *driver, uint32_t offset)
-+{
-+ struct page *p;
-+ unsigned long pfn;
-+ int ret = 0;
-+ struct psb_mmu_pd *pd;
-+ uint32_t *v;
-+ uint32_t *vmmu;
-+
-+ pd = driver->default_pd;
-+ if (!pd) {
-+ printk(KERN_WARNING "Could not get default pd\n");
-+ }
-+
-+ p = alloc_page(GFP_DMA32);
-+
-+ if (!p) {
-+ printk(KERN_WARNING "Failed allocating page\n");
-+ return;
-+ }
-+
-+ v = kmap(p);
-+ memset(v, 0x67, PAGE_SIZE);
-+
-+ pfn = (offset >> PAGE_SHIFT);
-+
-+ ret = psb_mmu_insert_pages(pd, &p, pfn << PAGE_SHIFT, 1, 0, 0,
-+ PSB_MMU_CACHED_MEMORY);
-+ if (ret) {
-+ printk(KERN_WARNING "Failed inserting mmu page\n");
-+ goto out_err1;
-+ }
-+
-+ /* Ioremap the page through the GART aperture */
-+
-+ vmmu = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
-+ if (!vmmu) {
-+ printk(KERN_WARNING "Failed ioremapping page\n");
-+ goto out_err2;
-+ }
-+
-+ /* Read from the page with mmu disabled. */
-+ printk(KERN_INFO "Page first dword is 0x%08x\n", ioread32(vmmu));
-+
-+ /* Enable the mmu for host accesses and read again. */
-+ psb_mmu_enable_requestor(driver, _PSB_MMU_ER_HOST);
-+
-+ printk(KERN_INFO "MMU Page first dword is (0x67676767) 0x%08x\n",
-+ ioread32(vmmu));
-+ *v = 0x15243705;
-+ printk(KERN_INFO "MMU Page new dword is (0x15243705) 0x%08x\n",
-+ ioread32(vmmu));
-+ iowrite32(0x16243355, vmmu);
-+ (void)ioread32(vmmu);
-+ printk(KERN_INFO "Page new dword is (0x16243355) 0x%08x\n", *v);
-+
-+ printk(KERN_INFO "Int stat is 0x%08x\n",
-+ psb_ioread32(driver, PSB_CR_BIF_INT_STAT));
-+ printk(KERN_INFO "Fault is 0x%08x\n",
-+ psb_ioread32(driver, PSB_CR_BIF_FAULT));
-+
-+ /* Disable MMU for host accesses and clear page fault register */
-+ psb_mmu_disable_requestor(driver, _PSB_MMU_ER_HOST);
-+ iounmap(vmmu);
-+ out_err2:
-+ psb_mmu_remove_pages(pd, pfn << PAGE_SHIFT, 1, 0, 0);
-+ out_err1:
-+ kunmap(p);
-+ __free_page(p);
-+}
-Index: linux-2.6.28/drivers/gpu/drm/psb/psb_msvdx.c
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.28/drivers/gpu/drm/psb/psb_msvdx.c 2009-02-20 12:47:58.000000000 +0000
-@@ -0,0 +1,671 @@
-+/**
-+ * file psb_msvdx.c
-+ * MSVDX I/O operations and IRQ handling
-+ *
-+ */
-+
-+/**************************************************************************
-+ *
-+ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
-+ * Copyright (c) Imagination Technologies Limited, UK
-+ * All Rights Reserved.
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a
-+ * copy of this software and associated documentation files (the
-+ * "Software"), to deal in the Software without restriction, including
-+ * without limitation the rights to use, copy, modify, merge, publish,
-+ * distribute, sub license, and/or sell copies of the Software, and to
-+ * permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice (including the
-+ * next paragraph) shall be included in all copies or substantial portions
-+ * of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
-+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
-+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
-+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
-+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
-+ *
-+ **************************************************************************/
-+
-+#include "drmP.h"
-+#include "drm_os_linux.h"
-+#include "psb_drv.h"
-+#include "psb_drm.h"
-+#include "psb_msvdx.h"
-+
-+#include <asm/io.h>
-+#include <linux/delay.h>
-+
-+#ifndef list_first_entry
-+#define list_first_entry(ptr, type, member) \
-+ list_entry((ptr)->next, type, member)
-+#endif
-+
-+static int psb_msvdx_send (struct drm_device *dev, void *cmd,
-+ unsigned long cmd_size);
-+
-+int
-+psb_msvdx_dequeue_send (struct drm_device *dev)
-+{
-+ struct drm_psb_private *dev_priv = dev->dev_private;
-+ struct psb_msvdx_cmd_queue *msvdx_cmd = NULL;
-+ int ret = 0;
-+
-+ if (list_empty (&dev_priv->msvdx_queue))
-+ {
-+ PSB_DEBUG_GENERAL ("MSVDXQUE: msvdx list empty.\n");
-+ dev_priv->msvdx_busy = 0;
-+ return -EINVAL;
-+ }
-+ msvdx_cmd =
-+ list_first_entry (&dev_priv->msvdx_queue, struct psb_msvdx_cmd_queue,
-+ head);
-+ PSB_DEBUG_GENERAL ("MSVDXQUE: Queue has id %08x\n", msvdx_cmd->sequence);
-+ ret = psb_msvdx_send (dev, msvdx_cmd->cmd, msvdx_cmd->cmd_size);
-+ if (ret)
-+ {
-+ PSB_DEBUG_GENERAL ("MSVDXQUE: psb_msvdx_send failed\n");
-+ ret = -EINVAL;
-+ }
-+ list_del (&msvdx_cmd->head);
-+ kfree (msvdx_cmd->cmd);
-+ drm_free (msvdx_cmd, sizeof (struct psb_msvdx_cmd_queue), DRM_MEM_DRIVER);
-+ return ret;
-+}
-+
-+int
-+psb_msvdx_map_command (struct drm_device *dev,
-+ struct drm_buffer_object *cmd_buffer,
-+ unsigned long cmd_offset, unsigned long cmd_size,
-+ void **msvdx_cmd, uint32_t sequence, int copy_cmd)
-+{
-+ struct drm_psb_private *dev_priv = dev->dev_private;
-+ int ret = 0;
-+ unsigned long cmd_page_offset = cmd_offset & ~PAGE_MASK;
-+ unsigned long cmd_size_remaining;
-+ struct drm_bo_kmap_obj cmd_kmap;
-+ void *cmd, *tmp, *cmd_start;
-+ int is_iomem;
-+
-+ /* command buffers may not exceed page boundary */
-+ if (cmd_size + cmd_page_offset > PAGE_SIZE)
-+ return -EINVAL;
-+
-+ ret = drm_bo_kmap (cmd_buffer, cmd_offset >> PAGE_SHIFT, 2, &cmd_kmap);
-+
-+ if (ret)
-+ {
-+ PSB_DEBUG_GENERAL ("MSVDXQUE:ret:%d\n", ret);
-+ return ret;
-+ }
-+
-+ cmd_start =
-+ (void *) drm_bmo_virtual (&cmd_kmap, &is_iomem) + cmd_page_offset;
-+ cmd = cmd_start;
-+ cmd_size_remaining = cmd_size;
-+
-+ while (cmd_size_remaining > 0)
-+ {
-+ uint32_t mmu_ptd;
-+ uint32_t cur_cmd_size = MEMIO_READ_FIELD (cmd, FWRK_GENMSG_SIZE);
-+ uint32_t cur_cmd_id = MEMIO_READ_FIELD (cmd, FWRK_GENMSG_ID);
-+ PSB_DEBUG_GENERAL
-+ ("cmd start at %08x cur_cmd_size = %d cur_cmd_id = %02x fence = %08x\n",
-+ (uint32_t) cmd, cur_cmd_size, cur_cmd_id, sequence);
-+ if ((cur_cmd_size % sizeof (uint32_t))
-+ || (cur_cmd_size > cmd_size_remaining))
-+ {
-+ ret = -EINVAL;
-+ PSB_DEBUG_GENERAL ("MSVDX: ret:%d\n", ret);
-+ goto out;
-+ }
-+
-+ switch (cur_cmd_id)
-+ {
-+ case VA_MSGID_RENDER:
-+ /* Fence ID */
-+ MEMIO_WRITE_FIELD (cmd, FW_VA_RENDER_FENCE_VALUE, sequence);
-+
-+ mmu_ptd = psb_get_default_pd_addr (dev_priv->mmu);
-+ if (atomic_cmpxchg(&dev_priv->msvdx_mmu_invaldc, 1, 0) == 1)
-+ {
-+ mmu_ptd |= 1;
-+ PSB_DEBUG_GENERAL ("MSVDX: Setting MMU invalidate flag\n");
-+ }
-+ /* PTD */
-+ MEMIO_WRITE_FIELD (cmd, FW_VA_RENDER_MMUPTD, mmu_ptd);
-+ break;
-+
-+ default:
-+ /* Msg not supported */
-+ ret = -EINVAL;
-+ PSB_DEBUG_GENERAL ("MSVDX: ret:%d\n", ret);
-+ goto out;
-+ }
-+
-+ cmd += cur_cmd_size;
-+ cmd_size_remaining -= cur_cmd_size;
-+ }
-+
-+ if (copy_cmd)
-+ {
-+ PSB_DEBUG_GENERAL
-+ ("MSVDXQUE: psb_msvdx_map_command copying command...\n");
-+ tmp = drm_calloc (1, cmd_size, DRM_MEM_DRIVER);
-+ if (tmp == NULL)
-+ {
-+ ret = -ENOMEM;
-+ PSB_DEBUG_GENERAL ("MSVDX: ret:%d\n", ret);
-+ goto out;
-+ }
-+ memcpy (tmp, cmd_start, cmd_size);
-+ *msvdx_cmd = tmp;
-+ }
-+ else
-+ {
-+ PSB_DEBUG_GENERAL
-+ ("MSVDXQUE: psb_msvdx_map_command did NOT copy command...\n");
-+ ret = psb_msvdx_send (dev, cmd_start, cmd_size);
-+ if (ret)
-+ {
-+ PSB_DEBUG_GENERAL ("MSVDXQUE: psb_msvdx_send failed\n");
-+ ret = -EINVAL;
-+ }
-+ }
-+
-+out:
-+ drm_bo_kunmap (&cmd_kmap);
-+
-+ return ret;
-+}
-+
-+int
-+psb_submit_video_cmdbuf (struct drm_device *dev,
-+ struct drm_buffer_object *cmd_buffer,
-+ unsigned long cmd_offset, unsigned long cmd_size,
-+ struct drm_fence_object *fence)
-+{
-+ struct drm_psb_private *dev_priv = dev->dev_private;
-+ uint32_t sequence = fence->sequence;
-+ unsigned long irq_flags;
-+ int ret = 0;
-+
-+ mutex_lock (&dev_priv->msvdx_mutex);
-+ psb_schedule_watchdog (dev_priv);
-+
-+ spin_lock_irqsave (&dev_priv->msvdx_lock, irq_flags);
-+ if (dev_priv->msvdx_needs_reset)
-+ {
-+ spin_unlock_irqrestore (&dev_priv->msvdx_lock, irq_flags);
-+ PSB_DEBUG_GENERAL ("MSVDX: Needs reset\n");
-+ if (psb_msvdx_reset (dev_priv))
-+ {
-+ mutex_unlock (&dev_priv->msvdx_mutex);
-+ ret = -EBUSY;
-+ PSB_DEBUG_GENERAL ("MSVDX: Reset failed\n");
-+ return ret;
-+ }
-+ PSB_DEBUG_GENERAL ("MSVDX: Reset ok\n");
-+ dev_priv->msvdx_needs_reset = 0;
-+ dev_priv->msvdx_busy = 0;
-+ dev_priv->msvdx_start_idle = 0;
-+
-+ psb_msvdx_init (dev);
-+ psb_msvdx_irq_preinstall (dev_priv);
-+ psb_msvdx_irq_postinstall (dev_priv);
-+ PSB_DEBUG_GENERAL ("MSVDX: Init ok\n");
-+ spin_lock_irqsave (&dev_priv->msvdx_lock, irq_flags);
-+ }
-+
-+ if (!dev_priv->msvdx_busy)
-+ {
-+ dev_priv->msvdx_busy = 1;
-+ spin_unlock_irqrestore (&dev_priv->msvdx_lock, irq_flags);
-+ PSB_DEBUG_GENERAL
-+ ("MSVDXQUE: nothing in the queue sending sequence:%08x..\n",
-+ sequence);
-+ ret =
-+ psb_msvdx_map_command (dev, cmd_buffer, cmd_offset, cmd_size,
-+ NULL, sequence, 0);
-+ if (ret)
-+ {
-+ mutex_unlock (&dev_priv->msvdx_mutex);
-+ PSB_DEBUG_GENERAL ("MSVDXQUE: Failed to extract cmd...\n");
-+ return ret;
-+ }
-+ }
-+ else
-+ {
-+ struct psb_msvdx_cmd_queue *msvdx_cmd;
-+ void *cmd = NULL;
-+
-+ spin_unlock_irqrestore (&dev_priv->msvdx_lock, irq_flags);
-+ /*queue the command to be sent when the h/w is ready */
-+ PSB_DEBUG_GENERAL ("MSVDXQUE: queueing sequence:%08x..\n", sequence);
-+ msvdx_cmd =
-+ drm_calloc (1, sizeof (struct psb_msvdx_cmd_queue), DRM_MEM_DRIVER);
-+ if (msvdx_cmd == NULL)
-+ {
-+ mutex_unlock (&dev_priv->msvdx_mutex);
-+ PSB_DEBUG_GENERAL ("MSVDXQUE: Out of memory...\n");
-+ return -ENOMEM;
-+ }
-+
-+ ret =
-+ psb_msvdx_map_command (dev, cmd_buffer, cmd_offset, cmd_size,
-+ &cmd, sequence, 1);
-+ if (ret)
-+ {
-+ mutex_unlock (&dev_priv->msvdx_mutex);
-+ PSB_DEBUG_GENERAL ("MSVDXQUE: Failed to extract cmd...\n");
-+ drm_free (msvdx_cmd, sizeof (struct psb_msvdx_cmd_queue),
-+ DRM_MEM_DRIVER);
-+ return ret;
-+ }
-+ msvdx_cmd->cmd = cmd;
-+ msvdx_cmd->cmd_size = cmd_size;
-+ msvdx_cmd->sequence = sequence;
-+ spin_lock_irqsave (&dev_priv->msvdx_lock, irq_flags);
-+ list_add_tail (&msvdx_cmd->head, &dev_priv->msvdx_queue);
-+ if (!dev_priv->msvdx_busy)
-+ {
-+ dev_priv->msvdx_busy = 1;
-+ PSB_DEBUG_GENERAL ("MSVDXQUE: Need immediate dequeue\n");
-+ psb_msvdx_dequeue_send (dev);
-+ }
-+ spin_unlock_irqrestore (&dev_priv->msvdx_lock, irq_flags);
-+ }
-+ mutex_unlock (&dev_priv->msvdx_mutex);
-+ return ret;
-+}
-+
-+int
-+psb_msvdx_send (struct drm_device *dev, void *cmd, unsigned long cmd_size)
-+{
-+ int ret = 0;
-+ struct drm_psb_private *dev_priv = dev->dev_private;
-+
-+ while (cmd_size > 0)
-+ {
-+ uint32_t cur_cmd_size = MEMIO_READ_FIELD (cmd, FWRK_GENMSG_SIZE);
-+ if (cur_cmd_size > cmd_size)
-+ {
-+ ret = -EINVAL;
-+ PSB_DEBUG_GENERAL
-+ ("MSVDX: cmd_size = %d cur_cmd_size = %d\n",
-+ (int) cmd_size, cur_cmd_size);
-+ goto out;
-+ }
-+ /* Send the message to h/w */
-+ ret = psb_mtx_send (dev_priv, cmd);
-+ if (ret)
-+ {
-+ PSB_DEBUG_GENERAL ("MSVDX: ret:%d\n", ret);
-+ goto out;
-+ }
-+ cmd += cur_cmd_size;
-+ cmd_size -= cur_cmd_size;
-+ }
-+
-+out:
-+ PSB_DEBUG_GENERAL ("MSVDX: ret:%d\n", ret);
-+ return ret;
-+}
-+
-+int
-+psb_mtx_send (struct drm_psb_private *dev_priv, const void *pvMsg)
-+{
-+
-+ static uint32_t padMessage[FWRK_PADMSG_SIZE];
-+
-+ const uint32_t *pui32Msg = (uint32_t *) pvMsg;
-+ uint32_t msgNumWords, wordsFree, readIndex, writeIndex;
-+ int ret = 0;
-+
-+ PSB_DEBUG_GENERAL ("MSVDX: psb_mtx_send\n");
-+
-+ /* we need clocks enabled before we touch VEC local ram */
-+ PSB_WMSVDX32 (clk_enable_all, MSVDX_MAN_CLK_ENABLE);
-+
-+ msgNumWords = (MEMIO_READ_FIELD (pvMsg, FWRK_GENMSG_SIZE) + 3) / 4;
-+
-+ if (msgNumWords > NUM_WORDS_MTX_BUF)
-+ {
-+ ret = -EINVAL;
-+ PSB_DEBUG_GENERAL ("MSVDX: ret:%d\n", ret);
-+ goto out;
-+ }
-+
-+ readIndex = PSB_RMSVDX32 (MSVDX_COMMS_TO_MTX_RD_INDEX);
-+ writeIndex = PSB_RMSVDX32 (MSVDX_COMMS_TO_MTX_WRT_INDEX);
-+
-+ if (writeIndex + msgNumWords > NUM_WORDS_MTX_BUF)
-+ { /* message would wrap, need to send a pad message */
-+ BUG_ON (MEMIO_READ_FIELD (pvMsg, FWRK_GENMSG_ID) == FWRK_MSGID_PADDING); /* Shouldn't happen for a PAD message itself */
-+ /* if the read pointer is at zero then we must wait for it to change otherwise the write
-+ * pointer will equal the read pointer,which should only happen when the buffer is empty
-+ *
-+ * This will only happens if we try to overfill the queue, queue management should make
-+ * sure this never happens in the first place.
-+ */
-+ BUG_ON (0 == readIndex);
-+ if (0 == readIndex)
-+ {
-+ ret = -EINVAL;
-+ PSB_DEBUG_GENERAL ("MSVDX: ret:%d\n", ret);
-+ goto out;
-+ }
-+ /* Send a pad message */
-+ MEMIO_WRITE_FIELD (padMessage, FWRK_GENMSG_SIZE,
-+ (NUM_WORDS_MTX_BUF - writeIndex) << 2);
-+ MEMIO_WRITE_FIELD (padMessage, FWRK_GENMSG_ID, FWRK_MSGID_PADDING);
-+ psb_mtx_send (dev_priv, padMessage);
-+ writeIndex = PSB_RMSVDX32 (MSVDX_COMMS_TO_MTX_WRT_INDEX);
-+ }
-+
-+ wordsFree =
-+ (writeIndex >=
-+ readIndex) ? NUM_WORDS_MTX_BUF - (writeIndex -
-+ readIndex) : readIndex - writeIndex;
-+
-+ BUG_ON (msgNumWords > wordsFree);
-+ if (msgNumWords > wordsFree)
-+ {
-+ ret = -EINVAL;
-+ PSB_DEBUG_GENERAL ("MSVDX: ret:%d\n", ret);
-+ goto out;
-+ }
-+
-+ while (msgNumWords > 0)
-+ {
-+ PSB_WMSVDX32 (*pui32Msg++, MSVDX_COMMS_TO_MTX_BUF + (writeIndex << 2));
-+ msgNumWords--;
-+ writeIndex++;
-+ if (NUM_WORDS_MTX_BUF == writeIndex)
-+ {
-+ writeIndex = 0;
-+ }
-+ }
-+ PSB_WMSVDX32 (writeIndex, MSVDX_COMMS_TO_MTX_WRT_INDEX);
-+
-+ /* Make sure clocks are enabled before we kick */
-+ PSB_WMSVDX32 (clk_enable_all, MSVDX_MAN_CLK_ENABLE);
-+
-+ /* signal an interrupt to let the mtx know there is a new message */
-+ PSB_WMSVDX32 (1, MSVDX_MTX_KICKI);
-+
-+out:
-+ return ret;
-+}
-+
-+/*
-+ * MSVDX MTX interrupt
-+ */
-+void
-+psb_msvdx_mtx_interrupt (struct drm_device *dev)
-+{
-+ static uint32_t msgBuffer[128];
-+ uint32_t readIndex, writeIndex;
-+ uint32_t msgNumWords, msgWordOffset;
-+ struct drm_psb_private *dev_priv =
-+ (struct drm_psb_private *) dev->dev_private;
-+
-+ /* Are clocks enabled - If not enable before attempting to read from VLR */
-+ if (PSB_RMSVDX32 (MSVDX_MAN_CLK_ENABLE) != (clk_enable_all))
-+ {
-+ PSB_DEBUG_GENERAL
-+ ("MSVDX: Warning - Clocks disabled when Interupt set\n");
-+ PSB_WMSVDX32 (clk_enable_all, MSVDX_MAN_CLK_ENABLE);
-+ }
-+
-+ for (;;)
-+ {
-+ readIndex = PSB_RMSVDX32 (MSVDX_COMMS_TO_HOST_RD_INDEX);
-+ writeIndex = PSB_RMSVDX32 (MSVDX_COMMS_TO_HOST_WRT_INDEX);
-+
-+ if (readIndex != writeIndex)
-+ {
-+ msgWordOffset = 0;
-+
-+ msgBuffer[msgWordOffset] =
-+ PSB_RMSVDX32 (MSVDX_COMMS_TO_HOST_BUF + (readIndex << 2));
-+
-+ msgNumWords = (MEMIO_READ_FIELD (msgBuffer, FWRK_GENMSG_SIZE) + 3) / 4; /* round to nearest word */
-+
-+ /*ASSERT(msgNumWords <= sizeof(msgBuffer) / sizeof(uint32_t)); */
-+
-+ if (++readIndex >= NUM_WORDS_HOST_BUF)
-+ readIndex = 0;
-+
-+ for (msgWordOffset++; msgWordOffset < msgNumWords; msgWordOffset++)
-+ {
-+ msgBuffer[msgWordOffset] =
-+ PSB_RMSVDX32 (MSVDX_COMMS_TO_HOST_BUF + (readIndex << 2));
-+
-+ if (++readIndex >= NUM_WORDS_HOST_BUF)
-+ {
-+ readIndex = 0;
-+ }
-+ }
-+
-+ /* Update the Read index */
-+ PSB_WMSVDX32 (readIndex, MSVDX_COMMS_TO_HOST_RD_INDEX);
-+
-+ if (!dev_priv->msvdx_needs_reset)
-+ switch (MEMIO_READ_FIELD (msgBuffer, FWRK_GENMSG_ID))
-+ {
-+ case VA_MSGID_CMD_HW_PANIC:
-+ case VA_MSGID_CMD_FAILED:
-+ {
-+ uint32_t ui32Fence = MEMIO_READ_FIELD (msgBuffer,
-+ FW_VA_CMD_FAILED_FENCE_VALUE);
-+ uint32_t ui32FaultStatus = MEMIO_READ_FIELD (msgBuffer,
-+ FW_VA_CMD_FAILED_IRQSTATUS);
-+
-+ if(MEMIO_READ_FIELD (msgBuffer, FWRK_GENMSG_ID) == VA_MSGID_CMD_HW_PANIC )
-+ PSB_DEBUG_GENERAL
-+ ("MSVDX: VA_MSGID_CMD_HW_PANIC: Msvdx fault detected - Fence: %08x, Status: %08x - resetting and ignoring error\n",
-+ ui32Fence, ui32FaultStatus);
-+ else
-+ PSB_DEBUG_GENERAL
-+ ("MSVDX: VA_MSGID_CMD_FAILED: Msvdx fault detected - Fence: %08x, Status: %08x - resetting and ignoring error\n",
-+ ui32Fence, ui32FaultStatus);
-+
-+ dev_priv->msvdx_needs_reset = 1;
-+
-+ if(MEMIO_READ_FIELD (msgBuffer, FWRK_GENMSG_ID) == VA_MSGID_CMD_HW_PANIC)
-+ {
-+ if (dev_priv->
-+ msvdx_current_sequence
-+ - dev_priv->sequence[PSB_ENGINE_VIDEO] > 0x0FFFFFFF)
-+ dev_priv->msvdx_current_sequence++;
-+ PSB_DEBUG_GENERAL
-+ ("MSVDX: Fence ID missing, assuming %08x\n",
-+ dev_priv->msvdx_current_sequence);
-+ }
-+ else
-+ dev_priv->msvdx_current_sequence = ui32Fence;
-+
-+ psb_fence_error (dev,
-+ PSB_ENGINE_VIDEO,
-+ dev_priv->
-+ msvdx_current_sequence,
-+ DRM_FENCE_TYPE_EXE, DRM_CMD_FAILED);
-+
-+ /* Flush the command queue */
-+ psb_msvdx_flush_cmd_queue (dev);
-+
-+ goto isrExit;
-+ break;
-+ }
-+ case VA_MSGID_CMD_COMPLETED:
-+ {
-+ uint32_t ui32Fence = MEMIO_READ_FIELD (msgBuffer,
-+ FW_VA_CMD_COMPLETED_FENCE_VALUE);
-+ uint32_t ui32Flags =
-+ MEMIO_READ_FIELD (msgBuffer, FW_VA_CMD_COMPLETED_FLAGS);
-+
-+ PSB_DEBUG_GENERAL
-+ ("msvdx VA_MSGID_CMD_COMPLETED: FenceID: %08x, flags: 0x%x\n",
-+ ui32Fence, ui32Flags);
-+ dev_priv->msvdx_current_sequence = ui32Fence;
-+
-+ psb_fence_handler (dev, PSB_ENGINE_VIDEO);
-+
-+
-+ if (ui32Flags & FW_VA_RENDER_HOST_INT)
-+ {
-+ /*Now send the next command from the msvdx cmd queue */
-+ psb_msvdx_dequeue_send (dev);
-+ goto isrExit;
-+ }
-+ break;
-+ }
-+ case VA_MSGID_ACK:
-+ PSB_DEBUG_GENERAL ("msvdx VA_MSGID_ACK\n");
-+ break;
-+
-+ case VA_MSGID_TEST1:
-+ PSB_DEBUG_GENERAL ("msvdx VA_MSGID_TEST1\n");
-+ break;
-+
-+ case VA_MSGID_TEST2:
-+ PSB_DEBUG_GENERAL ("msvdx VA_MSGID_TEST2\n");
-+ break;
-+ /* Don't need to do anything with these messages */
-+
-+ case VA_MSGID_DEBLOCK_REQUIRED:
-+ {
-+ uint32_t ui32ContextId = MEMIO_READ_FIELD (msgBuffer,
-+ FW_VA_DEBLOCK_REQUIRED_CONTEXT);
-+
-+ /* The BE we now be locked. */
-+
-+ /* Unblock rendec by reading the mtx2mtx end of slice */
-+ (void) PSB_RMSVDX32 (MSVDX_RENDEC_READ_DATA);
-+
-+ PSB_DEBUG_GENERAL
-+ ("msvdx VA_MSGID_DEBLOCK_REQUIRED Context=%08x\n",
-+ ui32ContextId);
-+ goto isrExit;
-+ break;
-+ }
-+
-+ default:
-+ {
-+ PSB_DEBUG_GENERAL
-+ ("ERROR: msvdx Unknown message from MTX \n");
-+ }
-+ break;
-+
-+ }
-+ }
-+ else
-+ {
-+ /* Get out of here if nothing */
-+ break;
-+ }
-+ }
-+isrExit:
-+
-+#if 1
-+ if (!dev_priv->msvdx_busy)
-+ {
-+ /* check that clocks are enabled before reading VLR */
-+ if( PSB_RMSVDX32( MSVDX_MAN_CLK_ENABLE ) != (clk_enable_all) )
-+ PSB_WMSVDX32 (clk_enable_all, MSVDX_MAN_CLK_ENABLE);
-+
-+ /* If the firmware says the hardware is idle and the CCB is empty then we can power down */
-+ {
-+ uint32_t ui32FWStatus = PSB_RMSVDX32( MSVDX_COMMS_FW_STATUS );
-+ uint32_t ui32CCBRoff = PSB_RMSVDX32 ( MSVDX_COMMS_TO_MTX_RD_INDEX );
-+ uint32_t ui32CCBWoff = PSB_RMSVDX32 ( MSVDX_COMMS_TO_MTX_WRT_INDEX );
-+
-+ if( (ui32FWStatus & MSVDX_FW_STATUS_HW_IDLE) && (ui32CCBRoff == ui32CCBWoff))
-+ {
-+ PSB_DEBUG_GENERAL("MSVDX_CLOCK: Setting clock to minimal...\n");
-+ PSB_WMSVDX32 (clk_enable_minimal, MSVDX_MAN_CLK_ENABLE);
-+ }
-+ }
-+ }
-+#endif
-+ DRM_MEMORYBARRIER ();
-+}
-+
-+void
-+psb_msvdx_lockup (struct drm_psb_private *dev_priv,
-+ int *msvdx_lockup, int *msvdx_idle)
-+{
-+ unsigned long irq_flags;
-+// struct psb_scheduler *scheduler = &dev_priv->scheduler;
-+
-+ spin_lock_irqsave (&dev_priv->msvdx_lock, irq_flags);
-+ *msvdx_lockup = 0;
-+ *msvdx_idle = 1;
-+
-+ if (!dev_priv->has_msvdx)
-+ {
-+ spin_unlock_irqrestore (&dev_priv->msvdx_lock, irq_flags);
-+ return;
-+ }
-+#if 0
-+ PSB_DEBUG_GENERAL ("MSVDXTimer: current_sequence:%d "
-+ "last_sequence:%d and last_submitted_sequence :%d\n",
-+ dev_priv->msvdx_current_sequence,
-+ dev_priv->msvdx_last_sequence,
-+ dev_priv->sequence[PSB_ENGINE_VIDEO]);
-+#endif
-+ if (dev_priv->msvdx_current_sequence -
-+ dev_priv->sequence[PSB_ENGINE_VIDEO] > 0x0FFFFFFF)
-+ {
-+
-+ if (dev_priv->msvdx_current_sequence == dev_priv->msvdx_last_sequence)
-+ {
-+ PSB_DEBUG_GENERAL
-+ ("MSVDXTimer: msvdx locked-up for sequence:%d\n",
-+ dev_priv->msvdx_current_sequence);
-+ *msvdx_lockup = 1;
-+ }
-+ else
-+ {
-+ PSB_DEBUG_GENERAL ("MSVDXTimer: msvdx responded fine so far...\n");
-+ dev_priv->msvdx_last_sequence = dev_priv->msvdx_current_sequence;
-+ *msvdx_idle = 0;
-+ }
-+ if (dev_priv->msvdx_start_idle)
-+ dev_priv->msvdx_start_idle = 0;
-+ }
-+ else
-+ {
-+ if (dev_priv->msvdx_needs_reset == 0)
-+ {
-+ if (dev_priv->msvdx_start_idle && (dev_priv->msvdx_finished_sequence == dev_priv->msvdx_current_sequence))
-+ {
-+ //if (dev_priv->msvdx_idle_start_jiffies + MSVDX_MAX_IDELTIME >= jiffies)
-+ if (time_after_eq(jiffies, dev_priv->msvdx_idle_start_jiffies + MSVDX_MAX_IDELTIME))
-+ {
-+ printk("set the msvdx clock to 0 in the %s\n", __FUNCTION__);
-+ PSB_WMSVDX32 (0, MSVDX_MAN_CLK_ENABLE);
-+ dev_priv->msvdx_needs_reset = 1;
-+ }
-+ else
-+ {
-+ *msvdx_idle = 0;
-+ }
-+ }
-+ else
-+ {
-+ dev_priv->msvdx_start_idle = 1;
-+ dev_priv->msvdx_idle_start_jiffies = jiffies;
-+ dev_priv->msvdx_finished_sequence = dev_priv->msvdx_current_sequence;
-+ *msvdx_idle = 0;
-+ }
-+ }
-+ }
-+ spin_unlock_irqrestore (&dev_priv->msvdx_lock, irq_flags);
-+}
-Index: linux-2.6.28/drivers/gpu/drm/psb/psb_msvdx.h
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.28/drivers/gpu/drm/psb/psb_msvdx.h 2009-02-20 12:23:06.000000000 +0000
-@@ -0,0 +1,564 @@
-+/**************************************************************************
-+ *
-+ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
-+ * Copyright (c) Imagination Technologies Limited, UK
-+ * All Rights Reserved.
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a
-+ * copy of this software and associated documentation files (the
-+ * "Software"), to deal in the Software without restriction, including
-+ * without limitation the rights to use, copy, modify, merge, publish,
-+ * distribute, sub license, and/or sell copies of the Software, and to
-+ * permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice (including the
-+ * next paragraph) shall be included in all copies or substantial portions
-+ * of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
-+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
-+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
-+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
-+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
-+ *
-+ **************************************************************************/
-+
-+#ifndef _PSB_MSVDX_H_
-+#define _PSB_MSVDX_H_
-+
-+#define assert(expr) \
-+ if(unlikely(!(expr))) { \
-+ printk(KERN_ERR "Assertion failed! %s,%s,%s,line=%d\n", \
-+ #expr,__FILE__,__FUNCTION__,__LINE__); \
-+ }
-+
-+#define PSB_ASSERT(x) assert (x)
-+#define IMG_ASSERT(x) assert (x)
-+
-+#include "psb_drv.h"
-+int
-+psb_wait_for_register (struct drm_psb_private *dev_priv,
-+ uint32_t ui32Offset,
-+ uint32_t ui32Value, uint32_t ui32Enable);
-+
-+void psb_msvdx_mtx_interrupt (struct drm_device *dev);
-+int psb_msvdx_init (struct drm_device *dev);
-+int psb_msvdx_uninit (struct drm_device *dev);
-+int psb_msvdx_reset (struct drm_psb_private *dev_priv);
-+uint32_t psb_get_default_pd_addr (struct psb_mmu_driver *driver);
-+int psb_mtx_send (struct drm_psb_private *dev_priv, const void *pvMsg);
-+void psb_msvdx_irq_preinstall (struct drm_psb_private *dev_priv);
-+void psb_msvdx_irq_postinstall (struct drm_psb_private *dev_priv);
-+void psb_msvdx_flush_cmd_queue (struct drm_device *dev);
-+extern void psb_msvdx_lockup (struct drm_psb_private *dev_priv,
-+ int *msvdx_lockup, int *msvdx_idle);
-+#define MSVDX_DEVICE_NODE_FLAGS_MMU_NONOPT_INV 2 /* Non-Optimal Invalidation is not default */
-+#define FW_VA_RENDER_HOST_INT 0x00004000
-+#define MSVDX_DEVICE_NODE_FLAGS_MMU_HW_INVALIDATION 0x00000020
-+
-+#define MSVDX_DEVICE_NODE_FLAG_BRN23154_BLOCK_ON_FE 0x00000200
-+
-+#define MSVDX_DEVICE_NODE_FLAGS_DEFAULT_D0 (MSVDX_DEVICE_NODE_FLAGS_MMU_NONOPT_INV | MSVDX_DEVICE_NODE_FLAGS_MMU_HW_INVALIDATION \
-+ | MSVDX_DEVICE_NODE_FLAG_BRN23154_BLOCK_ON_FE)
-+#define MSVDX_DEVICE_NODE_FLAGS_DEFAULT_D1 (MSVDX_DEVICE_NODE_FLAGS_MMU_HW_INVALIDATION \
-+ | MSVDX_DEVICE_NODE_FLAG_BRN23154_BLOCK_ON_FE)
-+
-+
-+#define POULSBO_D0 0x5
-+#define POULSBO_D1 0x6
-+#define PSB_REVID_OFFSET 0x8
-+
-+#define MSVDX_FW_STATUS_HW_IDLE 0x00000001 /* There is no work currently underway on the hardware*/
-+
-+#define clk_enable_all MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_CORE_MAN_CLK_ENABLE_MASK | \
-+ MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_PROCESS_MAN_CLK_ENABLE_MASK | \
-+ MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_ACCESS_MAN_CLK_ENABLE_MASK | \
-+ MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDMC_MAN_CLK_ENABLE_MASK | \
-+ MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ENTDEC_MAN_CLK_ENABLE_MASK | \
-+ MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ITRANS_MAN_CLK_ENABLE_MASK | \
-+ MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_MTX_MAN_CLK_ENABLE_MASK
-+
-+#define clk_enable_minimal MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_CORE_MAN_CLK_ENABLE_MASK | \
-+ MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_MTX_MAN_CLK_ENABLE_MASK
-+
-+#define clk_enable_auto MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_PROCESS_AUTO_CLK_ENABLE_MASK | \
-+ MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_ACCESS_AUTO_CLK_ENABLE_MASK | \
-+ MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDMC_AUTO_CLK_ENABLE_MASK | \
-+ MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ENTDEC_AUTO_CLK_ENABLE_MASK | \
-+ MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ITRANS_AUTO_CLK_ENABLE_MASK | \
-+ MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_CORE_MAN_CLK_ENABLE_MASK | \
-+ MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_MTX_MAN_CLK_ENABLE_MASK
-+
-+#define msvdx_sw_reset_all MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_MASK | \
-+ MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_FE_SOFT_RESET_MASK | \
-+ MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_BE_SOFT_RESET_MASK | \
-+ MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_VEC_MEMIF_SOFT_RESET_MASK | \
-+ MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_VEC_RENDEC_DEC_SOFT_RESET_MASK
-+
-+
-+#define PCI_PORT5_REG80_FFUSE 0xD0058000
-+#define MTX_CODE_BASE (0x80900000)
-+#define MTX_DATA_BASE (0x82880000)
-+#define PC_START_ADDRESS (0x80900000)
-+
-+#define MTX_CORE_CODE_MEM (0x10 )
-+#define MTX_CORE_DATA_MEM (0x18 )
-+
-+#define MTX_INTERNAL_REG( R_SPECIFIER , U_SPECIFIER ) ( ((R_SPECIFIER)<<4) | (U_SPECIFIER) )
-+#define MTX_PC MTX_INTERNAL_REG( 0 , 5 )
-+
-+#define RENDEC_A_SIZE ( 2 * 1024* 1024 )
-+#define RENDEC_B_SIZE ( RENDEC_A_SIZE / 4 )
-+
-+#define MEMIO_READ_FIELD(vpMem, field) \
-+ ((uint32_t)(((*((field##_TYPE *)(((uint32_t)vpMem) + field##_OFFSET))) & field##_MASK) >> field##_SHIFT))
-+
-+#define MEMIO_WRITE_FIELD(vpMem, field, ui32Value) \
-+ (*((field##_TYPE *)(((uint32_t)vpMem) + field##_OFFSET))) = \
-+ ((*((field##_TYPE *)(((uint32_t)vpMem) + field##_OFFSET))) & (field##_TYPE)~field##_MASK) | \
-+ (field##_TYPE)(( (uint32_t) (ui32Value) << field##_SHIFT) & field##_MASK);
-+
-+#define MEMIO_WRITE_FIELD_LITE(vpMem, field, ui32Value) \
-+ (*((field##_TYPE *)(((uint32_t)vpMem) + field##_OFFSET))) = \
-+ ((*((field##_TYPE *)(((uint32_t)vpMem) + field##_OFFSET))) | \
-+ (field##_TYPE) (( (uint32_t) (ui32Value) << field##_SHIFT)) );
-+
-+#define REGIO_READ_FIELD(ui32RegValue, reg, field) \
-+ ((ui32RegValue & reg##_##field##_MASK) >> reg##_##field##_SHIFT)
-+
-+#define REGIO_WRITE_FIELD(ui32RegValue, reg, field, ui32Value) \
-+ (ui32RegValue) = \
-+ ((ui32RegValue) & ~(reg##_##field##_MASK)) | \
-+ (((ui32Value) << (reg##_##field##_SHIFT)) & (reg##_##field##_MASK));
-+
-+#define REGIO_WRITE_FIELD_LITE(ui32RegValue, reg, field, ui32Value) \
-+ (ui32RegValue) = \
-+ ( (ui32RegValue) | ( (ui32Value) << (reg##_##field##_SHIFT) ) );
-+
-+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_CORE_MAN_CLK_ENABLE_MASK (0x00000001)
-+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_PROCESS_MAN_CLK_ENABLE_MASK (0x00000002)
-+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_ACCESS_MAN_CLK_ENABLE_MASK (0x00000004)
-+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDMC_MAN_CLK_ENABLE_MASK (0x00000008)
-+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ENTDEC_MAN_CLK_ENABLE_MASK (0x00000010)
-+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ITRANS_MAN_CLK_ENABLE_MASK (0x00000020)
-+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_MTX_MAN_CLK_ENABLE_MASK (0x00000040)
-+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_ACCESS_AUTO_CLK_ENABLE_MASK (0x00040000)
-+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDMC_AUTO_CLK_ENABLE_MASK (0x00080000)
-+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ENTDEC_AUTO_CLK_ENABLE_MASK (0x00100000)
-+#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ITRANS_AUTO_CLK_ENABLE_MASK (0x00200000)
-+#define MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_MASK (0x00000100)
-+#define MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_FE_SOFT_RESET_MASK (0x00010000)
-+#define MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_BE_SOFT_RESET_MASK (0x00100000)
-+#define MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_VEC_MEMIF_SOFT_RESET_MASK (0x01000000)
-+#define MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_VEC_RENDEC_DEC_SOFT_RESET_MASK (0x10000000)
-+
-+/* MTX registers */
-+#define MSVDX_MTX_ENABLE (0x0000)
-+#define MSVDX_MTX_KICKI (0x0088)
-+#define MSVDX_MTX_REGISTER_READ_WRITE_REQUEST (0x00FC)
-+#define MSVDX_MTX_REGISTER_READ_WRITE_DATA (0x00F8)
-+#define MSVDX_MTX_RAM_ACCESS_DATA_TRANSFER (0x0104)
-+#define MSVDX_MTX_RAM_ACCESS_CONTROL (0x0108)
-+#define MSVDX_MTX_RAM_ACCESS_STATUS (0x010C)
-+#define MSVDX_MTX_SOFT_RESET (0x0200)
-+
-+/* MSVDX registers */
-+#define MSVDX_CONTROL (0x0600)
-+#define MSVDX_INTERRUPT_CLEAR (0x060C)
-+#define MSVDX_INTERRUPT_STATUS (0x0608)
-+#define MSVDX_HOST_INTERRUPT_ENABLE (0x0610)
-+#define MSVDX_MMU_CONTROL0 (0x0680)
-+#define MSVDX_MTX_RAM_BANK (0x06F0)
-+#define MSVDX_MAN_CLK_ENABLE (0x0620)
-+
-+/* RENDEC registers */
-+#define MSVDX_RENDEC_CONTROL0 (0x0868)
-+#define MSVDX_RENDEC_CONTROL1 (0x086C)
-+#define MSVDX_RENDEC_BUFFER_SIZE (0x0870)
-+#define MSVDX_RENDEC_BASE_ADDR0 (0x0874)
-+#define MSVDX_RENDEC_BASE_ADDR1 (0x0878)
-+#define MSVDX_RENDEC_READ_DATA (0x0898)
-+#define MSVDX_RENDEC_CONTEXT0 (0x0950)
-+#define MSVDX_RENDEC_CONTEXT1 (0x0954)
-+#define MSVDX_RENDEC_CONTEXT2 (0x0958)
-+#define MSVDX_RENDEC_CONTEXT3 (0x095C)
-+#define MSVDX_RENDEC_CONTEXT4 (0x0960)
-+#define MSVDX_RENDEC_CONTEXT5 (0x0964)
-+
-+/*
-+ * This defines the MSVDX communication buffer
-+ */
-+#define MSVDX_COMMS_SIGNATURE_VALUE (0xA5A5A5A5) /*!< Signature value */
-+#define NUM_WORDS_HOST_BUF (100) /*!< Host buffer size (in 32-bit words) */
-+#define NUM_WORDS_MTX_BUF (100) /*!< MTX buffer size (in 32-bit words) */
-+
-+#define MSVDX_COMMS_AREA_ADDR (0x02cc0)
-+
-+#define MSVDX_COMMS_FW_STATUS (MSVDX_COMMS_AREA_ADDR - 0x10)
-+#define MSVDX_COMMS_SCRATCH (MSVDX_COMMS_AREA_ADDR - 0x08)
-+#define MSVDX_COMMS_MSG_COUNTER (MSVDX_COMMS_AREA_ADDR - 0x04)
-+#define MSVDX_COMMS_SIGNATURE (MSVDX_COMMS_AREA_ADDR + 0x00)
-+#define MSVDX_COMMS_TO_HOST_BUF_SIZE (MSVDX_COMMS_AREA_ADDR + 0x04)
-+#define MSVDX_COMMS_TO_HOST_RD_INDEX (MSVDX_COMMS_AREA_ADDR + 0x08)
-+#define MSVDX_COMMS_TO_HOST_WRT_INDEX (MSVDX_COMMS_AREA_ADDR + 0x0C)
-+#define MSVDX_COMMS_TO_MTX_BUF_SIZE (MSVDX_COMMS_AREA_ADDR + 0x10)
-+#define MSVDX_COMMS_TO_MTX_RD_INDEX (MSVDX_COMMS_AREA_ADDR + 0x14)
-+#define MSVDX_COMMS_OFFSET_FLAGS (MSVDX_COMMS_AREA_ADDR + 0x18)
-+#define MSVDX_COMMS_TO_MTX_WRT_INDEX (MSVDX_COMMS_AREA_ADDR + 0x1C)
-+#define MSVDX_COMMS_TO_HOST_BUF (MSVDX_COMMS_AREA_ADDR + 0x20)
-+#define MSVDX_COMMS_TO_MTX_BUF (MSVDX_COMMS_TO_HOST_BUF + (NUM_WORDS_HOST_BUF << 2))
-+
-+#define MSVDX_COMMS_AREA_END (MSVDX_COMMS_TO_MTX_BUF + (NUM_WORDS_HOST_BUF << 2))
-+
-+#if (MSVDX_COMMS_AREA_END != 0x03000)
-+#error
-+#endif
-+
-+#define MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK (0x80000000)
-+#define MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_SHIFT (31)
-+
-+#define MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_RNW_MASK (0x00010000)
-+#define MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_RNW_SHIFT (16)
-+
-+#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMID_MASK (0x0FF00000)
-+#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMID_SHIFT (20)
-+
-+#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCM_ADDR_MASK (0x000FFFFC)
-+#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCM_ADDR_SHIFT (2)
-+
-+#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMAI_MASK (0x00000002)
-+#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMAI_SHIFT (1)
-+
-+#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMR_MASK (0x00000001)
-+#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMR_SHIFT (0)
-+
-+#define MSVDX_MTX_SOFT_RESET_MTX_RESET_MASK (0x00000001)
-+#define MSVDX_MTX_SOFT_RESET_MTX_RESET_SHIFT (0)
-+
-+#define MSVDX_MTX_ENABLE_MTX_ENABLE_MASK (0x00000001)
-+#define MSVDX_MTX_ENABLE_MTX_ENABLE_SHIFT (0)
-+
-+#define MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_MASK (0x00000100)
-+#define MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_SHIFT (8)
-+
-+#define MSVDX_INTERRUPT_STATUS_CR_MMU_FAULT_IRQ_MASK (0x00000F00)
-+#define MSVDX_INTERRUPT_STATUS_CR_MMU_FAULT_IRQ_SHIFT (8)
-+
-+#define MSVDX_INTERRUPT_STATUS_CR_MTX_IRQ_MASK (0x00004000)
-+#define MSVDX_INTERRUPT_STATUS_CR_MTX_IRQ_SHIFT (14)
-+
-+#define MSVDX_MMU_CONTROL0_CR_MMU_PAUSE_MASK (0x00000002)
-+#define MSVDX_MMU_CONTROL0_CR_MMU_PAUSE_SHIFT (1)
-+
-+#define MSVDX_MTX_RAM_BANK_CR_MTX_RAM_BANK_SIZE_MASK (0x000F0000)
-+#define MSVDX_MTX_RAM_BANK_CR_MTX_RAM_BANK_SIZE_SHIFT (16)
-+
-+#define MSVDX_RENDEC_BUFFER_SIZE_RENDEC_BUFFER_SIZE0_MASK (0x0000FFFF)
-+#define MSVDX_RENDEC_BUFFER_SIZE_RENDEC_BUFFER_SIZE0_SHIFT (0)
-+
-+#define MSVDX_RENDEC_BUFFER_SIZE_RENDEC_BUFFER_SIZE1_MASK (0xFFFF0000)
-+#define MSVDX_RENDEC_BUFFER_SIZE_RENDEC_BUFFER_SIZE1_SHIFT (16)
-+
-+#define MSVDX_RENDEC_CONTROL1_RENDEC_DECODE_START_SIZE_MASK (0x000000FF)
-+#define MSVDX_RENDEC_CONTROL1_RENDEC_DECODE_START_SIZE_SHIFT (0)
-+
-+#define MSVDX_RENDEC_CONTROL1_RENDEC_BURST_SIZE_W_MASK (0x000C0000)
-+#define MSVDX_RENDEC_CONTROL1_RENDEC_BURST_SIZE_W_SHIFT (18)
-+
-+#define MSVDX_RENDEC_CONTROL1_RENDEC_BURST_SIZE_R_MASK (0x00030000)
-+#define MSVDX_RENDEC_CONTROL1_RENDEC_BURST_SIZE_R_SHIFT (16)
-+
-+#define MSVDX_RENDEC_CONTROL1_RENDEC_EXTERNAL_MEMORY_MASK (0x01000000)
-+#define MSVDX_RENDEC_CONTROL1_RENDEC_EXTERNAL_MEMORY_SHIFT (24)
-+
-+#define MSVDX_RENDEC_CONTROL0_RENDEC_INITIALISE_MASK (0x00000001)
-+#define MSVDX_RENDEC_CONTROL0_RENDEC_INITIALISE_SHIFT (0)
-+
-+#define FWRK_MSGID_START_PSR_HOSTMTX_MSG (0x80) /*!< Start of parser specific Host->MTX messages. */
-+#define FWRK_MSGID_START_PSR_MTXHOST_MSG (0xC0) /*!< Start of parser specific MTX->Host messages. */
-+#define FWRK_MSGID_PADDING ( 0 )
-+
-+#define FWRK_GENMSG_SIZE_TYPE uint8_t
-+#define FWRK_GENMSG_SIZE_MASK (0xFF)
-+#define FWRK_GENMSG_SIZE_SHIFT (0)
-+#define FWRK_GENMSG_SIZE_OFFSET (0x0000)
-+#define FWRK_GENMSG_ID_TYPE uint8_t
-+#define FWRK_GENMSG_ID_MASK (0xFF)
-+#define FWRK_GENMSG_ID_SHIFT (0)
-+#define FWRK_GENMSG_ID_OFFSET (0x0001)
-+#define FWRK_PADMSG_SIZE (2)
-+
-+/*!
-+******************************************************************************
-+ This type defines the framework specified message ids
-+******************************************************************************/
-+enum
-+{
-+ /*! Sent by the DXVA driver on the host to the mtx firmware.
-+ */
-+ VA_MSGID_INIT = FWRK_MSGID_START_PSR_HOSTMTX_MSG,
-+ VA_MSGID_RENDER,
-+ VA_MSGID_DEBLOCK,
-+ VA_MSGID_OOLD,
-+
-+ /* Test Messages */
-+ VA_MSGID_TEST1,
-+ VA_MSGID_TEST2,
-+
-+ /*! Sent by the mtx firmware to itself.
-+ */
-+ VA_MSGID_RENDER_MC_INTERRUPT,
-+
-+ /*! Sent by the DXVA firmware on the MTX to the host.
-+ */
-+ VA_MSGID_CMD_COMPLETED = FWRK_MSGID_START_PSR_MTXHOST_MSG,
-+ VA_MSGID_CMD_COMPLETED_BATCH,
-+ VA_MSGID_DEBLOCK_REQUIRED,
-+ VA_MSGID_TEST_RESPONCE,
-+ VA_MSGID_ACK,
-+
-+ VA_MSGID_CMD_FAILED,
-+ VA_MSGID_CMD_UNSUPPORTED,
-+ VA_MSGID_CMD_HW_PANIC,
-+};
-+
-+/* MSVDX Firmware interface */
-+
-+#define FW_VA_RENDER_SIZE (32)
-+
-+// FW_VA_RENDER MSG_SIZE
-+#define FW_VA_RENDER_MSG_SIZE_ALIGNMENT (1)
-+#define FW_VA_RENDER_MSG_SIZE_TYPE uint8_t
-+#define FW_VA_RENDER_MSG_SIZE_MASK (0xFF)
-+#define FW_VA_RENDER_MSG_SIZE_LSBMASK (0xFF)
-+#define FW_VA_RENDER_MSG_SIZE_OFFSET (0x0000)
-+#define FW_VA_RENDER_MSG_SIZE_SHIFT (0)
-+
-+// FW_VA_RENDER ID
-+#define FW_VA_RENDER_ID_ALIGNMENT (1)
-+#define FW_VA_RENDER_ID_TYPE uint8_t
-+#define FW_VA_RENDER_ID_MASK (0xFF)
-+#define FW_VA_RENDER_ID_LSBMASK (0xFF)
-+#define FW_VA_RENDER_ID_OFFSET (0x0001)
-+#define FW_VA_RENDER_ID_SHIFT (0)
-+
-+// FW_VA_RENDER BUFFER_SIZE
-+#define FW_VA_RENDER_BUFFER_SIZE_ALIGNMENT (2)
-+#define FW_VA_RENDER_BUFFER_SIZE_TYPE uint16_t
-+#define FW_VA_RENDER_BUFFER_SIZE_MASK (0x0FFF)
-+#define FW_VA_RENDER_BUFFER_SIZE_LSBMASK (0x0FFF)
-+#define FW_VA_RENDER_BUFFER_SIZE_OFFSET (0x0002)
-+#define FW_VA_RENDER_BUFFER_SIZE_SHIFT (0)
-+
-+// FW_VA_RENDER MMUPTD
-+#define FW_VA_RENDER_MMUPTD_ALIGNMENT (4)
-+#define FW_VA_RENDER_MMUPTD_TYPE uint32_t
-+#define FW_VA_RENDER_MMUPTD_MASK (0xFFFFFFFF)
-+#define FW_VA_RENDER_MMUPTD_LSBMASK (0xFFFFFFFF)
-+#define FW_VA_RENDER_MMUPTD_OFFSET (0x0004)
-+#define FW_VA_RENDER_MMUPTD_SHIFT (0)
-+
-+// FW_VA_RENDER LLDMA_ADDRESS
-+#define FW_VA_RENDER_LLDMA_ADDRESS_ALIGNMENT (4)
-+#define FW_VA_RENDER_LLDMA_ADDRESS_TYPE uint32_t
-+#define FW_VA_RENDER_LLDMA_ADDRESS_MASK (0xFFFFFFFF)
-+#define FW_VA_RENDER_LLDMA_ADDRESS_LSBMASK (0xFFFFFFFF)
-+#define FW_VA_RENDER_LLDMA_ADDRESS_OFFSET (0x0008)
-+#define FW_VA_RENDER_LLDMA_ADDRESS_SHIFT (0)
-+
-+// FW_VA_RENDER CONTEXT
-+#define FW_VA_RENDER_CONTEXT_ALIGNMENT (4)
-+#define FW_VA_RENDER_CONTEXT_TYPE uint32_t
-+#define FW_VA_RENDER_CONTEXT_MASK (0xFFFFFFFF)
-+#define FW_VA_RENDER_CONTEXT_LSBMASK (0xFFFFFFFF)
-+#define FW_VA_RENDER_CONTEXT_OFFSET (0x000C)
-+#define FW_VA_RENDER_CONTEXT_SHIFT (0)
-+
-+// FW_VA_RENDER FENCE_VALUE
-+#define FW_VA_RENDER_FENCE_VALUE_ALIGNMENT (4)
-+#define FW_VA_RENDER_FENCE_VALUE_TYPE uint32_t
-+#define FW_VA_RENDER_FENCE_VALUE_MASK (0xFFFFFFFF)
-+#define FW_VA_RENDER_FENCE_VALUE_LSBMASK (0xFFFFFFFF)
-+#define FW_VA_RENDER_FENCE_VALUE_OFFSET (0x0010)
-+#define FW_VA_RENDER_FENCE_VALUE_SHIFT (0)
-+
-+// FW_VA_RENDER OPERATING_MODE
-+#define FW_VA_RENDER_OPERATING_MODE_ALIGNMENT (4)
-+#define FW_VA_RENDER_OPERATING_MODE_TYPE uint32_t
-+#define FW_VA_RENDER_OPERATING_MODE_MASK (0xFFFFFFFF)
-+#define FW_VA_RENDER_OPERATING_MODE_LSBMASK (0xFFFFFFFF)
-+#define FW_VA_RENDER_OPERATING_MODE_OFFSET (0x0014)
-+#define FW_VA_RENDER_OPERATING_MODE_SHIFT (0)
-+
-+// FW_VA_RENDER FIRST_MB_IN_SLICE
-+#define FW_VA_RENDER_FIRST_MB_IN_SLICE_ALIGNMENT (2)
-+#define FW_VA_RENDER_FIRST_MB_IN_SLICE_TYPE uint16_t
-+#define FW_VA_RENDER_FIRST_MB_IN_SLICE_MASK (0xFFFF)
-+#define FW_VA_RENDER_FIRST_MB_IN_SLICE_LSBMASK (0xFFFF)
-+#define FW_VA_RENDER_FIRST_MB_IN_SLICE_OFFSET (0x0018)
-+#define FW_VA_RENDER_FIRST_MB_IN_SLICE_SHIFT (0)
-+
-+// FW_VA_RENDER LAST_MB_IN_FRAME
-+#define FW_VA_RENDER_LAST_MB_IN_FRAME_ALIGNMENT (2)
-+#define FW_VA_RENDER_LAST_MB_IN_FRAME_TYPE uint16_t
-+#define FW_VA_RENDER_LAST_MB_IN_FRAME_MASK (0xFFFF)
-+#define FW_VA_RENDER_LAST_MB_IN_FRAME_LSBMASK (0xFFFF)
-+#define FW_VA_RENDER_LAST_MB_IN_FRAME_OFFSET (0x001A)
-+#define FW_VA_RENDER_LAST_MB_IN_FRAME_SHIFT (0)
-+
-+// FW_VA_RENDER FLAGS
-+#define FW_VA_RENDER_FLAGS_ALIGNMENT (4)
-+#define FW_VA_RENDER_FLAGS_TYPE uint32_t
-+#define FW_VA_RENDER_FLAGS_MASK (0xFFFFFFFF)
-+#define FW_VA_RENDER_FLAGS_LSBMASK (0xFFFFFFFF)
-+#define FW_VA_RENDER_FLAGS_OFFSET (0x001C)
-+#define FW_VA_RENDER_FLAGS_SHIFT (0)
-+
-+#define FW_VA_CMD_COMPLETED_SIZE (12)
-+
-+// FW_VA_CMD_COMPLETED MSG_SIZE
-+#define FW_VA_CMD_COMPLETED_MSG_SIZE_ALIGNMENT (1)
-+#define FW_VA_CMD_COMPLETED_MSG_SIZE_TYPE uint8_t
-+#define FW_VA_CMD_COMPLETED_MSG_SIZE_MASK (0xFF)
-+#define FW_VA_CMD_COMPLETED_MSG_SIZE_LSBMASK (0xFF)
-+#define FW_VA_CMD_COMPLETED_MSG_SIZE_OFFSET (0x0000)
-+#define FW_VA_CMD_COMPLETED_MSG_SIZE_SHIFT (0)
-+
-+// FW_VA_CMD_COMPLETED ID
-+#define FW_VA_CMD_COMPLETED_ID_ALIGNMENT (1)
-+#define FW_VA_CMD_COMPLETED_ID_TYPE uint8_t
-+#define FW_VA_CMD_COMPLETED_ID_MASK (0xFF)
-+#define FW_VA_CMD_COMPLETED_ID_LSBMASK (0xFF)
-+#define FW_VA_CMD_COMPLETED_ID_OFFSET (0x0001)
-+#define FW_VA_CMD_COMPLETED_ID_SHIFT (0)
-+
-+// FW_VA_CMD_COMPLETED FENCE_VALUE
-+#define FW_VA_CMD_COMPLETED_FENCE_VALUE_ALIGNMENT (4)
-+#define FW_VA_CMD_COMPLETED_FENCE_VALUE_TYPE uint32_t
-+#define FW_VA_CMD_COMPLETED_FENCE_VALUE_MASK (0xFFFFFFFF)
-+#define FW_VA_CMD_COMPLETED_FENCE_VALUE_LSBMASK (0xFFFFFFFF)
-+#define FW_VA_CMD_COMPLETED_FENCE_VALUE_OFFSET (0x0004)
-+#define FW_VA_CMD_COMPLETED_FENCE_VALUE_SHIFT (0)
-+
-+// FW_VA_CMD_COMPLETED FLAGS
-+#define FW_VA_CMD_COMPLETED_FLAGS_ALIGNMENT (4)
-+#define FW_VA_CMD_COMPLETED_FLAGS_TYPE uint32_t
-+#define FW_VA_CMD_COMPLETED_FLAGS_MASK (0xFFFFFFFF)
-+#define FW_VA_CMD_COMPLETED_FLAGS_LSBMASK (0xFFFFFFFF)
-+#define FW_VA_CMD_COMPLETED_FLAGS_OFFSET (0x0008)
-+#define FW_VA_CMD_COMPLETED_FLAGS_SHIFT (0)
-+
-+#define FW_VA_CMD_FAILED_SIZE (12)
-+
-+// FW_VA_CMD_FAILED MSG_SIZE
-+#define FW_VA_CMD_FAILED_MSG_SIZE_ALIGNMENT (1)
-+#define FW_VA_CMD_FAILED_MSG_SIZE_TYPE uint8_t
-+#define FW_VA_CMD_FAILED_MSG_SIZE_MASK (0xFF)
-+#define FW_VA_CMD_FAILED_MSG_SIZE_LSBMASK (0xFF)
-+#define FW_VA_CMD_FAILED_MSG_SIZE_OFFSET (0x0000)
-+#define FW_VA_CMD_FAILED_MSG_SIZE_SHIFT (0)
-+
-+// FW_VA_CMD_FAILED ID
-+#define FW_VA_CMD_FAILED_ID_ALIGNMENT (1)
-+#define FW_VA_CMD_FAILED_ID_TYPE uint8_t
-+#define FW_VA_CMD_FAILED_ID_MASK (0xFF)
-+#define FW_VA_CMD_FAILED_ID_LSBMASK (0xFF)
-+#define FW_VA_CMD_FAILED_ID_OFFSET (0x0001)
-+#define FW_VA_CMD_FAILED_ID_SHIFT (0)
-+
-+// FW_VA_CMD_FAILED FLAGS
-+#define FW_VA_CMD_FAILED_FLAGS_ALIGNMENT (2)
-+#define FW_VA_CMD_FAILED_FLAGS_TYPE uint16_t
-+#define FW_VA_CMD_FAILED_FLAGS_MASK (0xFFFF)
-+#define FW_VA_CMD_FAILED_FLAGS_LSBMASK (0xFFFF)
-+#define FW_VA_CMD_FAILED_FLAGS_OFFSET (0x0002)
-+#define FW_VA_CMD_FAILED_FLAGS_SHIFT (0)
-+
-+// FW_VA_CMD_FAILED FENCE_VALUE
-+#define FW_VA_CMD_FAILED_FENCE_VALUE_ALIGNMENT (4)
-+#define FW_VA_CMD_FAILED_FENCE_VALUE_TYPE uint32_t
-+#define FW_VA_CMD_FAILED_FENCE_VALUE_MASK (0xFFFFFFFF)
-+#define FW_VA_CMD_FAILED_FENCE_VALUE_LSBMASK (0xFFFFFFFF)
-+#define FW_VA_CMD_FAILED_FENCE_VALUE_OFFSET (0x0004)
-+#define FW_VA_CMD_FAILED_FENCE_VALUE_SHIFT (0)
-+
-+// FW_VA_CMD_FAILED IRQSTATUS
-+#define FW_VA_CMD_FAILED_IRQSTATUS_ALIGNMENT (4)
-+#define FW_VA_CMD_FAILED_IRQSTATUS_TYPE uint32_t
-+#define FW_VA_CMD_FAILED_IRQSTATUS_MASK (0xFFFFFFFF)
-+#define FW_VA_CMD_FAILED_IRQSTATUS_LSBMASK (0xFFFFFFFF)
-+#define FW_VA_CMD_FAILED_IRQSTATUS_OFFSET (0x0008)
-+#define FW_VA_CMD_FAILED_IRQSTATUS_SHIFT (0)
-+
-+#define FW_VA_DEBLOCK_REQUIRED_SIZE (8)
-+
-+// FW_VA_DEBLOCK_REQUIRED MSG_SIZE
-+#define FW_VA_DEBLOCK_REQUIRED_MSG_SIZE_ALIGNMENT (1)
-+#define FW_VA_DEBLOCK_REQUIRED_MSG_SIZE_TYPE uint8_t
-+#define FW_VA_DEBLOCK_REQUIRED_MSG_SIZE_MASK (0xFF)
-+#define FW_VA_DEBLOCK_REQUIRED_MSG_SIZE_LSBMASK (0xFF)
-+#define FW_VA_DEBLOCK_REQUIRED_MSG_SIZE_OFFSET (0x0000)
-+#define FW_VA_DEBLOCK_REQUIRED_MSG_SIZE_SHIFT (0)
-+
-+// FW_VA_DEBLOCK_REQUIRED ID
-+#define FW_VA_DEBLOCK_REQUIRED_ID_ALIGNMENT (1)
-+#define FW_VA_DEBLOCK_REQUIRED_ID_TYPE uint8_t
-+#define FW_VA_DEBLOCK_REQUIRED_ID_MASK (0xFF)
-+#define FW_VA_DEBLOCK_REQUIRED_ID_LSBMASK (0xFF)
-+#define FW_VA_DEBLOCK_REQUIRED_ID_OFFSET (0x0001)
-+#define FW_VA_DEBLOCK_REQUIRED_ID_SHIFT (0)
-+
-+// FW_VA_DEBLOCK_REQUIRED CONTEXT
-+#define FW_VA_DEBLOCK_REQUIRED_CONTEXT_ALIGNMENT (4)
-+#define FW_VA_DEBLOCK_REQUIRED_CONTEXT_TYPE uint32_t
-+#define FW_VA_DEBLOCK_REQUIRED_CONTEXT_MASK (0xFFFFFFFF)
-+#define FW_VA_DEBLOCK_REQUIRED_CONTEXT_LSBMASK (0xFFFFFFFF)
-+#define FW_VA_DEBLOCK_REQUIRED_CONTEXT_OFFSET (0x0004)
-+#define FW_VA_DEBLOCK_REQUIRED_CONTEXT_SHIFT (0)
-+
-+#define FW_VA_HW_PANIC_SIZE (12)
-+
-+// FW_VA_HW_PANIC FLAGS
-+#define FW_VA_HW_PANIC_FLAGS_ALIGNMENT (2)
-+#define FW_VA_HW_PANIC_FLAGS_TYPE uint16_t
-+#define FW_VA_HW_PANIC_FLAGS_MASK (0xFFFF)
-+#define FW_VA_HW_PANIC_FLAGS_LSBMASK (0xFFFF)
-+#define FW_VA_HW_PANIC_FLAGS_OFFSET (0x0002)
-+#define FW_VA_HW_PANIC_FLAGS_SHIFT (0)
-+
-+// FW_VA_HW_PANIC MSG_SIZE
-+#define FW_VA_HW_PANIC_MSG_SIZE_ALIGNMENT (1)
-+#define FW_VA_HW_PANIC_MSG_SIZE_TYPE uint8_t
-+#define FW_VA_HW_PANIC_MSG_SIZE_MASK (0xFF)
-+#define FW_VA_HW_PANIC_MSG_SIZE_LSBMASK (0xFF)
-+#define FW_VA_HW_PANIC_MSG_SIZE_OFFSET (0x0000)
-+#define FW_VA_HW_PANIC_MSG_SIZE_SHIFT (0)
-+
-+// FW_VA_HW_PANIC ID
-+#define FW_VA_HW_PANIC_ID_ALIGNMENT (1)
-+#define FW_VA_HW_PANIC_ID_TYPE uint8_t
-+#define FW_VA_HW_PANIC_ID_MASK (0xFF)
-+#define FW_VA_HW_PANIC_ID_LSBMASK (0xFF)
-+#define FW_VA_HW_PANIC_ID_OFFSET (0x0001)
-+#define FW_VA_HW_PANIC_ID_SHIFT (0)
-+
-+// FW_VA_HW_PANIC FENCE_VALUE
-+#define FW_VA_HW_PANIC_FENCE_VALUE_ALIGNMENT (4)
-+#define FW_VA_HW_PANIC_FENCE_VALUE_TYPE uint32_t
-+#define FW_VA_HW_PANIC_FENCE_VALUE_MASK (0xFFFFFFFF)
-+#define FW_VA_HW_PANIC_FENCE_VALUE_LSBMASK (0xFFFFFFFF)
-+#define FW_VA_HW_PANIC_FENCE_VALUE_OFFSET (0x0004)
-+#define FW_VA_HW_PANIC_FENCE_VALUE_SHIFT (0)
-+
-+// FW_VA_HW_PANIC IRQSTATUS
-+#define FW_VA_HW_PANIC_IRQSTATUS_ALIGNMENT (4)
-+#define FW_VA_HW_PANIC_IRQSTATUS_TYPE uint32_t
-+#define FW_VA_HW_PANIC_IRQSTATUS_MASK (0xFFFFFFFF)
-+#define FW_VA_HW_PANIC_IRQSTATUS_LSBMASK (0xFFFFFFFF)
-+#define FW_VA_HW_PANIC_IRQSTATUS_OFFSET (0x0008)
-+#define FW_VA_HW_PANIC_IRQSTATUS_SHIFT (0)
-+
-+#endif
-Index: linux-2.6.28/drivers/gpu/drm/psb/psb_msvdxinit.c
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.28/drivers/gpu/drm/psb/psb_msvdxinit.c 2009-02-20 12:23:06.000000000 +0000
-@@ -0,0 +1,625 @@
-+/**
-+ * file psb_msvdxinit.c
-+ * MSVDX initialization and mtx-firmware upload
-+ *
-+ */
-+
-+/**************************************************************************
-+ *
-+ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
-+ * Copyright (c) Imagination Technologies Limited, UK
-+ * All Rights Reserved.
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a
-+ * copy of this software and associated documentation files (the
-+ * "Software"), to deal in the Software without restriction, including
-+ * without limitation the rights to use, copy, modify, merge, publish,
-+ * distribute, sub license, and/or sell copies of the Software, and to
-+ * permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice (including the
-+ * next paragraph) shall be included in all copies or substantial portions
-+ * of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
-+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
-+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
-+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
-+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
-+ *
-+ **************************************************************************/
-+
-+#include "drmP.h"
-+#include "drm.h"
-+#include "psb_drv.h"
-+#include "psb_msvdx.h"
-+#include <linux/firmware.h>
-+
-+/*MSVDX FW header*/
-+struct msvdx_fw
-+{
-+ uint32_t ver;
-+ uint32_t text_size;
-+ uint32_t data_size;
-+ uint32_t data_location;
-+};
-+
-+int
-+psb_wait_for_register (struct drm_psb_private *dev_priv,
-+ uint32_t ui32Offset,
-+ uint32_t ui32Value, uint32_t ui32Enable)
-+{
-+ uint32_t ui32Temp;
-+ uint32_t ui32PollCount = 1000;
-+ while (ui32PollCount)
-+ {
-+ ui32Temp = PSB_RMSVDX32 (ui32Offset);
-+ if (ui32Value == (ui32Temp & ui32Enable)) /* All the bits are reset */
-+ return 0; /* So exit */
-+
-+ /* Wait a bit */
-+ DRM_UDELAY (100);
-+ ui32PollCount--;
-+ }
-+ PSB_DEBUG_GENERAL
-+ ("MSVDX: Timeout while waiting for register %08x: expecting %08x (mask %08x), got %08x\n",
-+ ui32Offset, ui32Value, ui32Enable, ui32Temp);
-+ return 1;
-+}
-+
-+int
-+psb_poll_mtx_irq (struct drm_psb_private *dev_priv)
-+{
-+ int ret = 0;
-+ uint32_t MtxInt = 0;
-+ REGIO_WRITE_FIELD_LITE (MtxInt, MSVDX_INTERRUPT_STATUS, CR_MTX_IRQ, 1);
-+
-+ ret = psb_wait_for_register (dev_priv, MSVDX_INTERRUPT_STATUS, MtxInt, /* Required value */
-+ MtxInt /* Enabled bits */ );
-+ if (ret)
-+ {
-+ PSB_DEBUG_GENERAL
-+ ("MSVDX: Error Mtx did not return int within a resonable time\n");
-+
-+ return ret;
-+ }
-+
-+ PSB_DEBUG_GENERAL ("MSVDX: Got MTX Int\n");
-+
-+ /* Got it so clear the bit */
-+ PSB_WMSVDX32 (MtxInt, MSVDX_INTERRUPT_CLEAR);
-+
-+ return ret;
-+}
-+
-+void
-+psb_write_mtx_core_reg (struct drm_psb_private *dev_priv,
-+ const uint32_t ui32CoreRegister,
-+ const uint32_t ui32Val)
-+{
-+ uint32_t ui32Reg = 0;
-+
-+ /* Put data in MTX_RW_DATA */
-+ PSB_WMSVDX32 (ui32Val, MSVDX_MTX_REGISTER_READ_WRITE_DATA);
-+
-+ /* DREADY is set to 0 and request a write */
-+ ui32Reg = ui32CoreRegister;
-+ REGIO_WRITE_FIELD_LITE (ui32Reg, MSVDX_MTX_REGISTER_READ_WRITE_REQUEST,
-+ MTX_RNW, 0);
-+ REGIO_WRITE_FIELD_LITE (ui32Reg, MSVDX_MTX_REGISTER_READ_WRITE_REQUEST,
-+ MTX_DREADY, 0);
-+ PSB_WMSVDX32 (ui32Reg, MSVDX_MTX_REGISTER_READ_WRITE_REQUEST);
-+
-+ psb_wait_for_register (dev_priv, MSVDX_MTX_REGISTER_READ_WRITE_REQUEST, MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK, /* Required Value */
-+ MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK);
-+}
-+
-+void
-+psb_upload_fw (struct drm_psb_private *dev_priv, const uint32_t ui32DataMem,
-+ uint32_t ui32RamBankSize, uint32_t ui32Address,
-+ const unsigned int uiWords, const uint32_t * const pui32Data)
-+{
-+ uint32_t ui32Loop, ui32Ctrl, ui32RamId, ui32Addr, ui32CurrBank =
-+ (uint32_t) ~ 0;
-+ uint32_t ui32AccessControl;
-+
-+ /* Save the access control register... */
-+ ui32AccessControl = PSB_RMSVDX32 (MSVDX_MTX_RAM_ACCESS_CONTROL);
-+
-+ /* Wait for MCMSTAT to become be idle 1 */
-+ psb_wait_for_register (dev_priv, MSVDX_MTX_RAM_ACCESS_STATUS, 1, /* Required Value */
-+ 0xffffffff /* Enables */ );
-+
-+ for (ui32Loop = 0; ui32Loop < uiWords; ui32Loop++)
-+ {
-+ ui32RamId = ui32DataMem + (ui32Address / ui32RamBankSize);
-+
-+ if (ui32RamId != ui32CurrBank)
-+ {
-+ ui32Addr = ui32Address >> 2;
-+
-+ ui32Ctrl = 0;
-+
-+ REGIO_WRITE_FIELD_LITE (ui32Ctrl,
-+ MSVDX_MTX_RAM_ACCESS_CONTROL,
-+ MTX_MCMID, ui32RamId);
-+ REGIO_WRITE_FIELD_LITE (ui32Ctrl,
-+ MSVDX_MTX_RAM_ACCESS_CONTROL,
-+ MTX_MCM_ADDR, ui32Addr);
-+ REGIO_WRITE_FIELD_LITE (ui32Ctrl,
-+ MSVDX_MTX_RAM_ACCESS_CONTROL, MTX_MCMAI, 1);
-+
-+ PSB_WMSVDX32 (ui32Ctrl, MSVDX_MTX_RAM_ACCESS_CONTROL);
-+
-+ ui32CurrBank = ui32RamId;
-+ }
-+ ui32Address += 4;
-+
-+ PSB_WMSVDX32 (pui32Data[ui32Loop], MSVDX_MTX_RAM_ACCESS_DATA_TRANSFER);
-+
-+ /* Wait for MCMSTAT to become be idle 1 */
-+ psb_wait_for_register (dev_priv, MSVDX_MTX_RAM_ACCESS_STATUS, 1, /* Required Value */
-+ 0xffffffff /* Enables */ );
-+ }
-+ PSB_DEBUG_GENERAL ("MSVDX: Upload done\n");
-+
-+ /* Restore the access control register... */
-+ PSB_WMSVDX32 (ui32AccessControl, MSVDX_MTX_RAM_ACCESS_CONTROL);
-+}
-+
-+static int
-+psb_verify_fw (struct drm_psb_private *dev_priv,
-+ const uint32_t ui32RamBankSize,
-+ const uint32_t ui32DataMem, uint32_t ui32Address,
-+ const uint32_t uiWords, const uint32_t * const pui32Data)
-+{
-+ uint32_t ui32Loop, ui32Ctrl, ui32RamId, ui32Addr, ui32CurrBank =
-+ (uint32_t) ~ 0;
-+ uint32_t ui32AccessControl;
-+ int ret = 0;
-+
-+ /* Save the access control register... */
-+ ui32AccessControl = PSB_RMSVDX32 (MSVDX_MTX_RAM_ACCESS_CONTROL);
-+
-+ /* Wait for MCMSTAT to become be idle 1 */
-+ psb_wait_for_register (dev_priv, MSVDX_MTX_RAM_ACCESS_STATUS, 1, /* Required Value */
-+ 0xffffffff /* Enables */ );
-+
-+ for (ui32Loop = 0; ui32Loop < uiWords; ui32Loop++)
-+ {
-+ uint32_t ui32ReadBackVal;
-+ ui32RamId = ui32DataMem + (ui32Address / ui32RamBankSize);
-+
-+ if (ui32RamId != ui32CurrBank)
-+ {
-+ ui32Addr = ui32Address >> 2;
-+ ui32Ctrl = 0;
-+ REGIO_WRITE_FIELD_LITE (ui32Ctrl,
-+ MSVDX_MTX_RAM_ACCESS_CONTROL,
-+ MTX_MCMID, ui32RamId);
-+ REGIO_WRITE_FIELD_LITE (ui32Ctrl,
-+ MSVDX_MTX_RAM_ACCESS_CONTROL,
-+ MTX_MCM_ADDR, ui32Addr);
-+ REGIO_WRITE_FIELD_LITE (ui32Ctrl,
-+ MSVDX_MTX_RAM_ACCESS_CONTROL, MTX_MCMAI, 1);
-+ REGIO_WRITE_FIELD_LITE (ui32Ctrl,
-+ MSVDX_MTX_RAM_ACCESS_CONTROL, MTX_MCMR, 1);
-+
-+ PSB_WMSVDX32 (ui32Ctrl, MSVDX_MTX_RAM_ACCESS_CONTROL);
-+
-+ ui32CurrBank = ui32RamId;
-+ }
-+ ui32Address += 4;
-+
-+ /* Wait for MCMSTAT to become be idle 1 */
-+ psb_wait_for_register (dev_priv, MSVDX_MTX_RAM_ACCESS_STATUS, 1, /* Required Value */
-+ 0xffffffff /* Enables */ );
-+
-+ ui32ReadBackVal = PSB_RMSVDX32 (MSVDX_MTX_RAM_ACCESS_DATA_TRANSFER);
-+ if (pui32Data[ui32Loop] != ui32ReadBackVal)
-+ {
-+ DRM_ERROR
-+ ("psb: Firmware validation fails at index=%08x\n", ui32Loop);
-+ ret = 1;
-+ break;
-+ }
-+ }
-+
-+ /* Restore the access control register... */
-+ PSB_WMSVDX32 (ui32AccessControl, MSVDX_MTX_RAM_ACCESS_CONTROL);
-+
-+ return ret;
-+}
-+
-+static uint32_t *
-+msvdx_get_fw (struct drm_device *dev,
-+ const struct firmware **raw, uint8_t * name)
-+{
-+ int rc;
-+ int *ptr = NULL;
-+
-+ rc = request_firmware (raw, name, &dev->pdev->dev);
-+ if (rc < 0)
-+ {
-+ DRM_ERROR ("MSVDX: %s request_firmware failed: Reason %d\n", name, rc);
-+ return NULL;
-+ }
-+
-+ if ((*raw)->size < sizeof (struct msvdx_fw))
-+ {
-+ PSB_DEBUG_GENERAL ("MSVDX: %s is is not correct size(%zd)\n",
-+ name, (*raw)->size);
-+ return NULL;
-+ }
-+
-+ ptr = (int *) ((*raw))->data;
-+
-+ if (!ptr)
-+ {
-+ PSB_DEBUG_GENERAL ("MSVDX: Failed to load %s\n", name);
-+ return NULL;
-+ }
-+ /*another sanity check... */
-+ if ((*raw)->size !=
-+ (sizeof (struct msvdx_fw) +
-+ sizeof (uint32_t) * ((struct msvdx_fw *) ptr)->text_size +
-+ sizeof (uint32_t) * ((struct msvdx_fw *) ptr)->data_size))
-+ {
-+ PSB_DEBUG_GENERAL ("MSVDX: %s is is not correct size(%zd)\n",
-+ name, (*raw)->size);
-+ return NULL;
-+ }
-+ return ptr;
-+}
-+
-+static int
-+psb_setup_fw (struct drm_device *dev)
-+{
-+ struct drm_psb_private *dev_priv = dev->dev_private;
-+ int ret = 0;
-+
-+ uint32_t ram_bank_size;
-+ struct msvdx_fw *fw;
-+ uint32_t *fw_ptr = NULL;
-+ uint32_t *text_ptr = NULL;
-+ uint32_t *data_ptr = NULL;
-+ const struct firmware *raw = NULL;
-+ /* todo : Assert the clock is on - if not turn it on to upload code */
-+
-+ PSB_DEBUG_GENERAL ("MSVDX: psb_setup_fw\n");
-+
-+ /* Reset MTX */
-+ PSB_WMSVDX32 (MSVDX_MTX_SOFT_RESET_MTX_RESET_MASK, MSVDX_MTX_SOFT_RESET);
-+
-+ /* Initialses Communication controll area to 0 */
-+ if(dev_priv->psb_rev_id >= POULSBO_D1)
-+ {
-+ PSB_DEBUG_GENERAL("MSVDX: Detected Poulsbo D1 or later revision.\n");
-+ PSB_WMSVDX32 (MSVDX_DEVICE_NODE_FLAGS_DEFAULT_D1, MSVDX_COMMS_OFFSET_FLAGS);
-+ }
-+ else
-+ {
-+ PSB_DEBUG_GENERAL("MSVDX: Detected Poulsbo D0 or earlier revision.\n");
-+ PSB_WMSVDX32 (MSVDX_DEVICE_NODE_FLAGS_DEFAULT_D0, MSVDX_COMMS_OFFSET_FLAGS);
-+ }
-+
-+ PSB_WMSVDX32 (0, MSVDX_COMMS_MSG_COUNTER);
-+ PSB_WMSVDX32 (0, MSVDX_COMMS_SIGNATURE);
-+ PSB_WMSVDX32 (0, MSVDX_COMMS_TO_HOST_RD_INDEX);
-+ PSB_WMSVDX32 (0, MSVDX_COMMS_TO_HOST_WRT_INDEX);
-+ PSB_WMSVDX32 (0, MSVDX_COMMS_TO_MTX_RD_INDEX);
-+ PSB_WMSVDX32 (0, MSVDX_COMMS_TO_MTX_WRT_INDEX);
-+ PSB_WMSVDX32 (0, MSVDX_COMMS_FW_STATUS);
-+
-+ /* read register bank size */
-+ {
-+ uint32_t ui32BankSize, ui32Reg;
-+ ui32Reg = PSB_RMSVDX32 (MSVDX_MTX_RAM_BANK);
-+ ui32BankSize =
-+ REGIO_READ_FIELD (ui32Reg, MSVDX_MTX_RAM_BANK, CR_MTX_RAM_BANK_SIZE);
-+ ram_bank_size = (uint32_t) (1 << (ui32BankSize + 2));
-+ }
-+
-+ PSB_DEBUG_GENERAL ("MSVDX: RAM bank size = %d bytes\n", ram_bank_size);
-+
-+ fw_ptr = msvdx_get_fw (dev, &raw, "msvdx_fw.bin");
-+
-+ if (!fw_ptr)
-+ {
-+ DRM_ERROR ("psb: No valid msvdx_fw.bin firmware found.\n");
-+ ret = 1;
-+ goto out;
-+ }
-+
-+ fw = (struct msvdx_fw *) fw_ptr;
-+ if (fw->ver != 0x02)
-+ {
-+ DRM_ERROR
-+ ("psb: msvdx_fw.bin firmware version mismatch, got version=%02x expected version=%02x\n",
-+ fw->ver, 0x02);
-+ ret = 1;
-+ goto out;
-+ }
-+
-+ text_ptr = (uint32_t *) ((uint8_t *) fw_ptr + sizeof (struct msvdx_fw));
-+ data_ptr = text_ptr + fw->text_size;
-+
-+ PSB_DEBUG_GENERAL ("MSVDX: Retrieved pointers for firmware\n");
-+ PSB_DEBUG_GENERAL ("MSVDX: text_size: %d\n", fw->text_size);
-+ PSB_DEBUG_GENERAL ("MSVDX: data_size: %d\n", fw->data_size);
-+ PSB_DEBUG_GENERAL ("MSVDX: data_location: 0x%x\n", fw->data_location);
-+ PSB_DEBUG_GENERAL ("MSVDX: First 4 bytes of text: 0x%x\n", *text_ptr);
-+ PSB_DEBUG_GENERAL ("MSVDX: First 4 bytes of data: 0x%x\n", *data_ptr);
-+
-+ PSB_DEBUG_GENERAL ("MSVDX: Uploading firmware\n");
-+ psb_upload_fw (dev_priv, MTX_CORE_CODE_MEM, ram_bank_size,
-+ PC_START_ADDRESS - MTX_CODE_BASE, fw->text_size, text_ptr);
-+ psb_upload_fw (dev_priv, MTX_CORE_DATA_MEM, ram_bank_size,
-+ fw->data_location - MTX_DATA_BASE, fw->data_size, data_ptr);
-+
-+ /*todo : Verify code upload possibly only in debug */
-+ if (psb_verify_fw
-+ (dev_priv, ram_bank_size, MTX_CORE_CODE_MEM,
-+ PC_START_ADDRESS - MTX_CODE_BASE, fw->text_size, text_ptr))
-+ {
-+ /* Firmware code upload failed */
-+ ret = 1;
-+ goto out;
-+ }
-+ if (psb_verify_fw
-+ (dev_priv, ram_bank_size, MTX_CORE_DATA_MEM,
-+ fw->data_location - MTX_DATA_BASE, fw->data_size, data_ptr))
-+ {
-+ /* Firmware data upload failed */
-+ ret = 1;
-+ goto out;
-+ }
-+
-+ /* -- Set starting PC address */
-+ psb_write_mtx_core_reg (dev_priv, MTX_PC, PC_START_ADDRESS);
-+
-+ /* -- Turn on the thread */
-+ PSB_WMSVDX32 (MSVDX_MTX_ENABLE_MTX_ENABLE_MASK, MSVDX_MTX_ENABLE);
-+
-+ /* Wait for the signature value to be written back */
-+ ret = psb_wait_for_register (dev_priv, MSVDX_COMMS_SIGNATURE, MSVDX_COMMS_SIGNATURE_VALUE, /* Required value */
-+ 0xffffffff /* Enabled bits */ );
-+ if (ret)
-+ {
-+ DRM_ERROR ("psb: MSVDX firmware fails to initialize.\n");
-+ goto out;
-+ }
-+
-+ PSB_DEBUG_GENERAL ("MSVDX: MTX Initial indications OK\n");
-+ PSB_DEBUG_GENERAL ("MSVDX: MSVDX_COMMS_AREA_ADDR = %08x\n",
-+ MSVDX_COMMS_AREA_ADDR);
-+out:
-+ if (raw)
-+ {
-+ PSB_DEBUG_GENERAL ("MSVDX releasing firmware resouces....\n");
-+ release_firmware (raw);
-+ }
-+ return ret;
-+}
-+
-+static void
-+psb_free_ccb (struct drm_buffer_object **ccb)
-+{
-+ drm_bo_usage_deref_unlocked (ccb);
-+ *ccb = NULL;
-+}
-+
-+/*******************************************************************************
-+
-+ @Function psb_msvdx_reset
-+
-+ @Description
-+
-+ Reset chip and disable interrupts.
-+
-+ @Input psDeviceNode - device info. structure
-+
-+ @Return 0 - Success
-+ 1 - Failure
-+
-+******************************************************************************/
-+int
-+psb_msvdx_reset (struct drm_psb_private *dev_priv)
-+{
-+ int ret = 0;
-+
-+ /* Issue software reset */
-+ PSB_WMSVDX32 (msvdx_sw_reset_all, MSVDX_CONTROL);
-+
-+ ret = psb_wait_for_register (dev_priv, MSVDX_CONTROL, 0, /* Required value */
-+ MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_MASK
-+ /* Enabled bits */ );
-+
-+ if (!ret)
-+ {
-+ /* Clear interrupt enabled flag */
-+ PSB_WMSVDX32 (0, MSVDX_HOST_INTERRUPT_ENABLE);
-+
-+ /* Clear any pending interrupt flags */
-+ PSB_WMSVDX32 (0xFFFFFFFF, MSVDX_INTERRUPT_CLEAR);
-+ }
-+
-+ mutex_destroy (&dev_priv->msvdx_mutex);
-+
-+ return ret;
-+}
-+
-+static int
-+psb_allocate_ccb (struct drm_device *dev,
-+ struct drm_buffer_object **ccb,
-+ uint32_t * base_addr, int size)
-+{
-+ int ret;
-+ struct drm_bo_kmap_obj tmp_kmap;
-+ int is_iomem;
-+
-+ ret = drm_buffer_object_create (dev, size,
-+ drm_bo_type_kernel,
-+ DRM_BO_FLAG_READ |
-+ DRM_PSB_FLAG_MEM_KERNEL |
-+ DRM_BO_FLAG_NO_EVICT,
-+ DRM_BO_HINT_DONT_FENCE, 0, 0, ccb);
-+ if (ret)
-+ {
-+ PSB_DEBUG_GENERAL ("Failed to allocate CCB.\n");
-+ *ccb = NULL;
-+ return 1;
-+ }
-+
-+ ret = drm_bo_kmap (*ccb, 0, (*ccb)->num_pages, &tmp_kmap);
-+ if (ret)
-+ {
-+ PSB_DEBUG_GENERAL ("drm_bo_kmap failed ret: %d\n", ret);
-+ drm_bo_usage_deref_unlocked (ccb);
-+ *ccb = NULL;
-+ return 1;
-+ }
-+
-+ memset (drm_bmo_virtual (&tmp_kmap, &is_iomem), 0, size);
-+ drm_bo_kunmap (&tmp_kmap);
-+
-+ *base_addr = (*ccb)->offset;
-+ return 0;
-+}
-+
-+int
-+psb_msvdx_init (struct drm_device *dev)
-+{
-+ struct drm_psb_private *dev_priv = dev->dev_private;
-+ uint32_t ui32Cmd;
-+ int ret;
-+
-+ PSB_DEBUG_GENERAL ("MSVDX: psb_msvdx_init\n");
-+
-+ /*Initialize command msvdx queueing */
-+ INIT_LIST_HEAD (&dev_priv->msvdx_queue);
-+ mutex_init (&dev_priv->msvdx_mutex);
-+ spin_lock_init (&dev_priv->msvdx_lock);
-+ dev_priv->msvdx_busy = 0;
-+
-+ /*figure out the stepping*/
-+ pci_read_config_byte(dev->pdev, PSB_REVID_OFFSET, &dev_priv->psb_rev_id );
-+
-+ /* Enable Clocks */
-+ PSB_DEBUG_GENERAL ("Enabling clocks\n");
-+ PSB_WMSVDX32 (clk_enable_all, MSVDX_MAN_CLK_ENABLE);
-+
-+ /* Enable MMU by removing all bypass bits */
-+ PSB_WMSVDX32 (0, MSVDX_MMU_CONTROL0);
-+
-+ PSB_DEBUG_GENERAL ("MSVDX: Setting up RENDEC\n");
-+ /* Allocate device virtual memory as required by rendec.... */
-+ if (!dev_priv->ccb0)
-+ {
-+ ret =
-+ psb_allocate_ccb (dev, &dev_priv->ccb0,
-+ &dev_priv->base_addr0, RENDEC_A_SIZE);
-+ if (ret)
-+ goto err_exit;
-+ }
-+
-+ if (!dev_priv->ccb1)
-+ {
-+ ret =
-+ psb_allocate_ccb (dev, &dev_priv->ccb1,
-+ &dev_priv->base_addr1, RENDEC_B_SIZE);
-+ if (ret)
-+ goto err_exit;
-+ }
-+
-+ PSB_DEBUG_GENERAL ("MSVDX: RENDEC A: %08x RENDEC B: %08x\n",
-+ dev_priv->base_addr0, dev_priv->base_addr1);
-+
-+ PSB_WMSVDX32 (dev_priv->base_addr0, MSVDX_RENDEC_BASE_ADDR0);
-+ PSB_WMSVDX32 (dev_priv->base_addr1, MSVDX_RENDEC_BASE_ADDR1);
-+
-+ ui32Cmd = 0;
-+ REGIO_WRITE_FIELD (ui32Cmd, MSVDX_RENDEC_BUFFER_SIZE,
-+ RENDEC_BUFFER_SIZE0, RENDEC_A_SIZE / 4096);
-+ REGIO_WRITE_FIELD (ui32Cmd, MSVDX_RENDEC_BUFFER_SIZE,
-+ RENDEC_BUFFER_SIZE1, RENDEC_B_SIZE / 4096);
-+ PSB_WMSVDX32 (ui32Cmd, MSVDX_RENDEC_BUFFER_SIZE);
-+
-+ ui32Cmd = 0;
-+ REGIO_WRITE_FIELD (ui32Cmd, MSVDX_RENDEC_CONTROL1,
-+ RENDEC_DECODE_START_SIZE, 0);
-+ REGIO_WRITE_FIELD (ui32Cmd, MSVDX_RENDEC_CONTROL1, RENDEC_BURST_SIZE_W, 1);
-+ REGIO_WRITE_FIELD (ui32Cmd, MSVDX_RENDEC_CONTROL1, RENDEC_BURST_SIZE_R, 1);
-+ REGIO_WRITE_FIELD (ui32Cmd, MSVDX_RENDEC_CONTROL1,
-+ RENDEC_EXTERNAL_MEMORY, 1);
-+ PSB_WMSVDX32 (ui32Cmd, MSVDX_RENDEC_CONTROL1);
-+
-+ ui32Cmd = 0x00101010;
-+ PSB_WMSVDX32 (ui32Cmd, MSVDX_RENDEC_CONTEXT0);
-+ PSB_WMSVDX32 (ui32Cmd, MSVDX_RENDEC_CONTEXT1);
-+ PSB_WMSVDX32 (ui32Cmd, MSVDX_RENDEC_CONTEXT2);
-+ PSB_WMSVDX32 (ui32Cmd, MSVDX_RENDEC_CONTEXT3);
-+ PSB_WMSVDX32 (ui32Cmd, MSVDX_RENDEC_CONTEXT4);
-+ PSB_WMSVDX32 (ui32Cmd, MSVDX_RENDEC_CONTEXT5);
-+
-+ ui32Cmd = 0;
-+ REGIO_WRITE_FIELD (ui32Cmd, MSVDX_RENDEC_CONTROL0, RENDEC_INITIALISE, 1);
-+ PSB_WMSVDX32 (ui32Cmd, MSVDX_RENDEC_CONTROL0);
-+
-+ ret = psb_setup_fw (dev);
-+ if (ret)
-+ goto err_exit;
-+
-+ PSB_WMSVDX32 (clk_enable_minimal, MSVDX_MAN_CLK_ENABLE);
-+
-+ return 0;
-+
-+err_exit:
-+ if (dev_priv->ccb0)
-+ psb_free_ccb (&dev_priv->ccb0);
-+ if (dev_priv->ccb1)
-+ psb_free_ccb (&dev_priv->ccb1);
-+
-+ return 1;
-+}
-+
-+int
-+psb_msvdx_uninit (struct drm_device *dev)
-+{
-+ struct drm_psb_private *dev_priv = dev->dev_private;
-+
-+ /*Reset MSVDX chip */
-+ psb_msvdx_reset (dev_priv);
-+
-+// PSB_WMSVDX32 (clk_enable_minimal, MSVDX_MAN_CLK_ENABLE);
-+ printk("set the msvdx clock to 0 in the %s\n", __FUNCTION__);
-+ PSB_WMSVDX32 (0, MSVDX_MAN_CLK_ENABLE);
-+
-+ /*Clean up resources...*/
-+ if (dev_priv->ccb0)
-+ psb_free_ccb (&dev_priv->ccb0);
-+ if (dev_priv->ccb1)
-+ psb_free_ccb (&dev_priv->ccb1);
-+
-+ return 0;
-+}
-+
-+int psb_hw_info_ioctl(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv)
-+{
-+ struct drm_psb_private *dev_priv = dev->dev_private;
-+ struct drm_psb_hw_info *hw_info = data;
-+ struct pci_dev * pci_root = pci_get_bus_and_slot(0, 0);
-+
-+ hw_info->rev_id = dev_priv->psb_rev_id;
-+
-+ /*read the fuse info to determine the caps*/
-+ pci_write_config_dword(pci_root, 0xD0, PCI_PORT5_REG80_FFUSE);
-+ pci_read_config_dword(pci_root, 0xD4, &hw_info->caps);
-+
-+ PSB_DEBUG_GENERAL("MSVDX: PSB caps: 0x%x\n", hw_info->caps);
-+ return 0;
-+}
-Index: linux-2.6.28/drivers/gpu/drm/psb/psb_reg.h
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.28/drivers/gpu/drm/psb/psb_reg.h 2009-02-20 12:23:06.000000000 +0000
-@@ -0,0 +1,562 @@
-+/**************************************************************************
-+ *
-+ * Copyright (c) (2005-2007) Imagination Technologies Limited.
-+ * Copyright (c) 2007, Intel Corporation.
-+ * All Rights Reserved.
-+ *
-+ * This program is free software; you can redistribute it and/or modify it
-+ * under the terms and conditions of the GNU General Public License,
-+ * version 2, as published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope it will be useful, but WITHOUT
-+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-+ * more details.
-+ *
-+ * You should have received a copy of the GNU General Public License along with
-+ * this program; if not, write to the Free Software Foundation, Inc.,
-+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-+ *
-+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
-+ * develop this driver.
-+ *
-+ **************************************************************************/
-+/*
-+ */
-+#ifndef _PSB_REG_H_
-+#define _PSB_REG_H_
-+
-+#define PSB_CR_CLKGATECTL 0x0000
-+#define _PSB_C_CLKGATECTL_AUTO_MAN_REG (1 << 24)
-+#define _PSB_C_CLKGATECTL_USE_CLKG_SHIFT (20)
-+#define _PSB_C_CLKGATECTL_USE_CLKG_MASK (0x3 << 20)
-+#define _PSB_C_CLKGATECTL_DPM_CLKG_SHIFT (16)
-+#define _PSB_C_CLKGATECTL_DPM_CLKG_MASK (0x3 << 16)
-+#define _PSB_C_CLKGATECTL_TA_CLKG_SHIFT (12)
-+#define _PSB_C_CLKGATECTL_TA_CLKG_MASK (0x3 << 12)
-+#define _PSB_C_CLKGATECTL_TSP_CLKG_SHIFT (8)
-+#define _PSB_C_CLKGATECTL_TSP_CLKG_MASK (0x3 << 8)
-+#define _PSB_C_CLKGATECTL_ISP_CLKG_SHIFT (4)
-+#define _PSB_C_CLKGATECTL_ISP_CLKG_MASK (0x3 << 4)
-+#define _PSB_C_CLKGATECTL_2D_CLKG_SHIFT (0)
-+#define _PSB_C_CLKGATECTL_2D_CLKG_MASK (0x3 << 0)
-+#define _PSB_C_CLKGATECTL_CLKG_ENABLED (0)
-+#define _PSB_C_CLKGATECTL_CLKG_DISABLED (1)
-+#define _PSB_C_CLKGATECTL_CLKG_AUTO (2)
-+
-+#define PSB_CR_CORE_ID 0x0010
-+#define _PSB_CC_ID_ID_SHIFT (16)
-+#define _PSB_CC_ID_ID_MASK (0xFFFF << 16)
-+#define _PSB_CC_ID_CONFIG_SHIFT (0)
-+#define _PSB_CC_ID_CONFIG_MASK (0xFFFF << 0)
-+
-+#define PSB_CR_CORE_REVISION 0x0014
-+#define _PSB_CC_REVISION_DESIGNER_SHIFT (24)
-+#define _PSB_CC_REVISION_DESIGNER_MASK (0xFF << 24)
-+#define _PSB_CC_REVISION_MAJOR_SHIFT (16)
-+#define _PSB_CC_REVISION_MAJOR_MASK (0xFF << 16)
-+#define _PSB_CC_REVISION_MINOR_SHIFT (8)
-+#define _PSB_CC_REVISION_MINOR_MASK (0xFF << 8)
-+#define _PSB_CC_REVISION_MAINTENANCE_SHIFT (0)
-+#define _PSB_CC_REVISION_MAINTENANCE_MASK (0xFF << 0)
-+
-+#define PSB_CR_DESIGNER_REV_FIELD1 0x0018
-+
-+#define PSB_CR_SOFT_RESET 0x0080
-+#define _PSB_CS_RESET_TSP_RESET (1 << 6)
-+#define _PSB_CS_RESET_ISP_RESET (1 << 5)
-+#define _PSB_CS_RESET_USE_RESET (1 << 4)
-+#define _PSB_CS_RESET_TA_RESET (1 << 3)
-+#define _PSB_CS_RESET_DPM_RESET (1 << 2)
-+#define _PSB_CS_RESET_TWOD_RESET (1 << 1)
-+#define _PSB_CS_RESET_BIF_RESET (1 << 0)
-+
-+#define PSB_CR_DESIGNER_REV_FIELD2 0x001C
-+
-+#define PSB_CR_EVENT_HOST_ENABLE2 0x0110
-+
-+#define PSB_CR_EVENT_STATUS2 0x0118
-+
-+#define PSB_CR_EVENT_HOST_CLEAR2 0x0114
-+#define _PSB_CE2_BIF_REQUESTER_FAULT (1 << 4)
-+
-+#define PSB_CR_EVENT_STATUS 0x012C
-+
-+#define PSB_CR_EVENT_HOST_ENABLE 0x0130
-+
-+#define PSB_CR_EVENT_HOST_CLEAR 0x0134
-+#define _PSB_CE_MASTER_INTERRUPT (1 << 31)
-+#define _PSB_CE_TA_DPM_FAULT (1 << 28)
-+#define _PSB_CE_TWOD_COMPLETE (1 << 27)
-+#define _PSB_CE_DPM_OUT_OF_MEMORY_ZLS (1 << 25)
-+#define _PSB_CE_DPM_TA_MEM_FREE (1 << 24)
-+#define _PSB_CE_PIXELBE_END_RENDER (1 << 18)
-+#define _PSB_CE_SW_EVENT (1 << 14)
-+#define _PSB_CE_TA_FINISHED (1 << 13)
-+#define _PSB_CE_TA_TERMINATE (1 << 12)
-+#define _PSB_CE_DPM_REACHED_MEM_THRESH (1 << 3)
-+#define _PSB_CE_DPM_OUT_OF_MEMORY_GBL (1 << 2)
-+#define _PSB_CE_DPM_OUT_OF_MEMORY_MT (1 << 1)
-+#define _PSB_CE_DPM_3D_MEM_FREE (1 << 0)
-+
-+
-+#define PSB_USE_OFFSET_MASK 0x0007FFFF
-+#define PSB_USE_OFFSET_SIZE (PSB_USE_OFFSET_MASK + 1)
-+#define PSB_CR_USE_CODE_BASE0 0x0A0C
-+#define PSB_CR_USE_CODE_BASE1 0x0A10
-+#define PSB_CR_USE_CODE_BASE2 0x0A14
-+#define PSB_CR_USE_CODE_BASE3 0x0A18
-+#define PSB_CR_USE_CODE_BASE4 0x0A1C
-+#define PSB_CR_USE_CODE_BASE5 0x0A20
-+#define PSB_CR_USE_CODE_BASE6 0x0A24
-+#define PSB_CR_USE_CODE_BASE7 0x0A28
-+#define PSB_CR_USE_CODE_BASE8 0x0A2C
-+#define PSB_CR_USE_CODE_BASE9 0x0A30
-+#define PSB_CR_USE_CODE_BASE10 0x0A34
-+#define PSB_CR_USE_CODE_BASE11 0x0A38
-+#define PSB_CR_USE_CODE_BASE12 0x0A3C
-+#define PSB_CR_USE_CODE_BASE13 0x0A40
-+#define PSB_CR_USE_CODE_BASE14 0x0A44
-+#define PSB_CR_USE_CODE_BASE15 0x0A48
-+#define PSB_CR_USE_CODE_BASE(_i) (0x0A0C + ((_i) << 2))
-+#define _PSB_CUC_BASE_DM_SHIFT (25)
-+#define _PSB_CUC_BASE_DM_MASK (0x3 << 25)
-+#define _PSB_CUC_BASE_ADDR_SHIFT (0) // 1024-bit aligned address?
-+#define _PSB_CUC_BASE_ADDR_ALIGNSHIFT (7)
-+#define _PSB_CUC_BASE_ADDR_MASK (0x1FFFFFF << 0)
-+#define _PSB_CUC_DM_VERTEX (0)
-+#define _PSB_CUC_DM_PIXEL (1)
-+#define _PSB_CUC_DM_RESERVED (2)
-+#define _PSB_CUC_DM_EDM (3)
-+
-+#define PSB_CR_PDS_EXEC_BASE 0x0AB8
-+#define _PSB_CR_PDS_EXEC_BASE_ADDR_SHIFT (20) // 1MB aligned address
-+#define _PSB_CR_PDS_EXEC_BASE_ADDR_ALIGNSHIFT (20)
-+
-+#define PSB_CR_EVENT_KICKER 0x0AC4
-+#define _PSB_CE_KICKER_ADDRESS_SHIFT (4) // 128-bit aligned address
-+
-+#define PSB_CR_EVENT_KICK 0x0AC8
-+#define _PSB_CE_KICK_NOW (1 << 0)
-+
-+
-+#define PSB_CR_BIF_DIR_LIST_BASE1 0x0C38
-+
-+#define PSB_CR_BIF_CTRL 0x0C00
-+#define _PSB_CB_CTRL_CLEAR_FAULT (1 << 4)
-+#define _PSB_CB_CTRL_INVALDC (1 << 3)
-+#define _PSB_CB_CTRL_FLUSH (1 << 2)
-+
-+#define PSB_CR_BIF_INT_STAT 0x0C04
-+
-+#define PSB_CR_BIF_FAULT 0x0C08
-+#define _PSB_CBI_STAT_PF_N_RW (1 << 14)
-+#define _PSB_CBI_STAT_FAULT_SHIFT (0)
-+#define _PSB_CBI_STAT_FAULT_MASK (0x3FFF << 0)
-+#define _PSB_CBI_STAT_FAULT_CACHE (1 << 1)
-+#define _PSB_CBI_STAT_FAULT_TA (1 << 2)
-+#define _PSB_CBI_STAT_FAULT_VDM (1 << 3)
-+#define _PSB_CBI_STAT_FAULT_2D (1 << 4)
-+#define _PSB_CBI_STAT_FAULT_PBE (1 << 5)
-+#define _PSB_CBI_STAT_FAULT_TSP (1 << 6)
-+#define _PSB_CBI_STAT_FAULT_ISP (1 << 7)
-+#define _PSB_CBI_STAT_FAULT_USSEPDS (1 << 8)
-+#define _PSB_CBI_STAT_FAULT_HOST (1 << 9)
-+
-+#define PSB_CR_BIF_BANK0 0x0C78
-+
-+#define PSB_CR_BIF_BANK1 0x0C7C
-+
-+#define PSB_CR_BIF_DIR_LIST_BASE0 0x0C84
-+
-+#define PSB_CR_BIF_TWOD_REQ_BASE 0x0C88
-+#define PSB_CR_BIF_3D_REQ_BASE 0x0CAC
-+
-+#define PSB_CR_2D_SOCIF 0x0E18
-+#define _PSB_C2_SOCIF_FREESPACE_SHIFT (0)
-+#define _PSB_C2_SOCIF_FREESPACE_MASK (0xFF << 0)
-+#define _PSB_C2_SOCIF_EMPTY (0x80 << 0)
-+
-+#define PSB_CR_2D_BLIT_STATUS 0x0E04
-+#define _PSB_C2B_STATUS_BUSY (1 << 24)
-+#define _PSB_C2B_STATUS_COMPLETE_SHIFT (0)
-+#define _PSB_C2B_STATUS_COMPLETE_MASK (0xFFFFFF << 0)
-+
-+/*
-+ * 2D defs.
-+ */
-+
-+/*
-+ * 2D Slave Port Data : Block Header's Object Type
-+ */
-+
-+#define PSB_2D_CLIP_BH (0x00000000)
-+#define PSB_2D_PAT_BH (0x10000000)
-+#define PSB_2D_CTRL_BH (0x20000000)
-+#define PSB_2D_SRC_OFF_BH (0x30000000)
-+#define PSB_2D_MASK_OFF_BH (0x40000000)
-+#define PSB_2D_RESERVED1_BH (0x50000000)
-+#define PSB_2D_RESERVED2_BH (0x60000000)
-+#define PSB_2D_FENCE_BH (0x70000000)
-+#define PSB_2D_BLIT_BH (0x80000000)
-+#define PSB_2D_SRC_SURF_BH (0x90000000)
-+#define PSB_2D_DST_SURF_BH (0xA0000000)
-+#define PSB_2D_PAT_SURF_BH (0xB0000000)
-+#define PSB_2D_SRC_PAL_BH (0xC0000000)
-+#define PSB_2D_PAT_PAL_BH (0xD0000000)
-+#define PSB_2D_MASK_SURF_BH (0xE0000000)
-+#define PSB_2D_FLUSH_BH (0xF0000000)
-+
-+/*
-+ * Clip Definition block (PSB_2D_CLIP_BH)
-+ */
-+#define PSB_2D_CLIPCOUNT_MAX (1)
-+#define PSB_2D_CLIPCOUNT_MASK (0x00000000)
-+#define PSB_2D_CLIPCOUNT_CLRMASK (0xFFFFFFFF)
-+#define PSB_2D_CLIPCOUNT_SHIFT (0)
-+// clip rectangle min & max
-+#define PSB_2D_CLIP_XMAX_MASK (0x00FFF000)
-+#define PSB_2D_CLIP_XMAX_CLRMASK (0xFF000FFF)
-+#define PSB_2D_CLIP_XMAX_SHIFT (12)
-+#define PSB_2D_CLIP_XMIN_MASK (0x00000FFF)
-+#define PSB_2D_CLIP_XMIN_CLRMASK (0x00FFF000)
-+#define PSB_2D_CLIP_XMIN_SHIFT (0)
-+// clip rectangle offset
-+#define PSB_2D_CLIP_YMAX_MASK (0x00FFF000)
-+#define PSB_2D_CLIP_YMAX_CLRMASK (0xFF000FFF)
-+#define PSB_2D_CLIP_YMAX_SHIFT (12)
-+#define PSB_2D_CLIP_YMIN_MASK (0x00000FFF)
-+#define PSB_2D_CLIP_YMIN_CLRMASK (0x00FFF000)
-+#define PSB_2D_CLIP_YMIN_SHIFT (0)
-+
-+/*
-+ * Pattern Control (PSB_2D_PAT_BH)
-+ */
-+#define PSB_2D_PAT_HEIGHT_MASK (0x0000001F)
-+#define PSB_2D_PAT_HEIGHT_SHIFT (0)
-+#define PSB_2D_PAT_WIDTH_MASK (0x000003E0)
-+#define PSB_2D_PAT_WIDTH_SHIFT (5)
-+#define PSB_2D_PAT_YSTART_MASK (0x00007C00)
-+#define PSB_2D_PAT_YSTART_SHIFT (10)
-+#define PSB_2D_PAT_XSTART_MASK (0x000F8000)
-+#define PSB_2D_PAT_XSTART_SHIFT (15)
-+
-+/*
-+ * 2D Control block (PSB_2D_CTRL_BH)
-+ */
-+// Present Flags
-+#define PSB_2D_SRCCK_CTRL (0x00000001)
-+#define PSB_2D_DSTCK_CTRL (0x00000002)
-+#define PSB_2D_ALPHA_CTRL (0x00000004)
-+// Colour Key Colour (SRC/DST)
-+#define PSB_2D_CK_COL_MASK (0xFFFFFFFF)
-+#define PSB_2D_CK_COL_CLRMASK (0x00000000)
-+#define PSB_2D_CK_COL_SHIFT (0)
-+// Colour Key Mask (SRC/DST)
-+#define PSB_2D_CK_MASK_MASK (0xFFFFFFFF)
-+#define PSB_2D_CK_MASK_CLRMASK (0x00000000)
-+#define PSB_2D_CK_MASK_SHIFT (0)
-+// Alpha Control (Alpha/RGB)
-+#define PSB_2D_GBLALPHA_MASK (0x000FF000)
-+#define PSB_2D_GBLALPHA_CLRMASK (0xFFF00FFF)
-+#define PSB_2D_GBLALPHA_SHIFT (12)
-+#define PSB_2D_SRCALPHA_OP_MASK (0x00700000)
-+#define PSB_2D_SRCALPHA_OP_CLRMASK (0xFF8FFFFF)
-+#define PSB_2D_SRCALPHA_OP_SHIFT (20)
-+#define PSB_2D_SRCALPHA_OP_ONE (0x00000000)
-+#define PSB_2D_SRCALPHA_OP_SRC (0x00100000)
-+#define PSB_2D_SRCALPHA_OP_DST (0x00200000)
-+#define PSB_2D_SRCALPHA_OP_SG (0x00300000)
-+#define PSB_2D_SRCALPHA_OP_DG (0x00400000)
-+#define PSB_2D_SRCALPHA_OP_GBL (0x00500000)
-+#define PSB_2D_SRCALPHA_OP_ZERO (0x00600000)
-+#define PSB_2D_SRCALPHA_INVERT (0x00800000)
-+#define PSB_2D_SRCALPHA_INVERT_CLR (0xFF7FFFFF)
-+#define PSB_2D_DSTALPHA_OP_MASK (0x07000000)
-+#define PSB_2D_DSTALPHA_OP_CLRMASK (0xF8FFFFFF)
-+#define PSB_2D_DSTALPHA_OP_SHIFT (24)
-+#define PSB_2D_DSTALPHA_OP_ONE (0x00000000)
-+#define PSB_2D_DSTALPHA_OP_SRC (0x01000000)
-+#define PSB_2D_DSTALPHA_OP_DST (0x02000000)
-+#define PSB_2D_DSTALPHA_OP_SG (0x03000000)
-+#define PSB_2D_DSTALPHA_OP_DG (0x04000000)
-+#define PSB_2D_DSTALPHA_OP_GBL (0x05000000)
-+#define PSB_2D_DSTALPHA_OP_ZERO (0x06000000)
-+#define PSB_2D_DSTALPHA_INVERT (0x08000000)
-+#define PSB_2D_DSTALPHA_INVERT_CLR (0xF7FFFFFF)
-+
-+#define PSB_2D_PRE_MULTIPLICATION_ENABLE (0x10000000)
-+#define PSB_2D_PRE_MULTIPLICATION_CLRMASK (0xEFFFFFFF)
-+#define PSB_2D_ZERO_SOURCE_ALPHA_ENABLE (0x20000000)
-+#define PSB_2D_ZERO_SOURCE_ALPHA_CLRMASK (0xDFFFFFFF)
-+
-+/*
-+ *Source Offset (PSB_2D_SRC_OFF_BH)
-+ */
-+#define PSB_2D_SRCOFF_XSTART_MASK ((0x00000FFF) << 12)
-+#define PSB_2D_SRCOFF_XSTART_SHIFT (12)
-+#define PSB_2D_SRCOFF_YSTART_MASK (0x00000FFF)
-+#define PSB_2D_SRCOFF_YSTART_SHIFT (0)
-+
-+/*
-+ * Mask Offset (PSB_2D_MASK_OFF_BH)
-+ */
-+#define PSB_2D_MASKOFF_XSTART_MASK ((0x00000FFF) << 12)
-+#define PSB_2D_MASKOFF_XSTART_SHIFT (12)
-+#define PSB_2D_MASKOFF_YSTART_MASK (0x00000FFF)
-+#define PSB_2D_MASKOFF_YSTART_SHIFT (0)
-+
-+/*
-+ * 2D Fence (see PSB_2D_FENCE_BH): bits 0:27 are ignored
-+ */
-+
-+/*
-+ *Blit Rectangle (PSB_2D_BLIT_BH)
-+ */
-+
-+#define PSB_2D_ROT_MASK (3<<25)
-+#define PSB_2D_ROT_CLRMASK (~PSB_2D_ROT_MASK)
-+#define PSB_2D_ROT_NONE (0<<25)
-+#define PSB_2D_ROT_90DEGS (1<<25)
-+#define PSB_2D_ROT_180DEGS (2<<25)
-+#define PSB_2D_ROT_270DEGS (3<<25)
-+
-+#define PSB_2D_COPYORDER_MASK (3<<23)
-+#define PSB_2D_COPYORDER_CLRMASK (~PSB_2D_COPYORDER_MASK)
-+#define PSB_2D_COPYORDER_TL2BR (0<<23)
-+#define PSB_2D_COPYORDER_BR2TL (1<<23)
-+#define PSB_2D_COPYORDER_TR2BL (2<<23)
-+#define PSB_2D_COPYORDER_BL2TR (3<<23)
-+
-+#define PSB_2D_DSTCK_CLRMASK (0xFF9FFFFF)
-+#define PSB_2D_DSTCK_DISABLE (0x00000000)
-+#define PSB_2D_DSTCK_PASS (0x00200000)
-+#define PSB_2D_DSTCK_REJECT (0x00400000)
-+
-+#define PSB_2D_SRCCK_CLRMASK (0xFFE7FFFF)
-+#define PSB_2D_SRCCK_DISABLE (0x00000000)
-+#define PSB_2D_SRCCK_PASS (0x00080000)
-+#define PSB_2D_SRCCK_REJECT (0x00100000)
-+
-+#define PSB_2D_CLIP_ENABLE (0x00040000)
-+
-+#define PSB_2D_ALPHA_ENABLE (0x00020000)
-+
-+#define PSB_2D_PAT_CLRMASK (0xFFFEFFFF)
-+#define PSB_2D_PAT_MASK (0x00010000)
-+#define PSB_2D_USE_PAT (0x00010000)
-+#define PSB_2D_USE_FILL (0x00000000)
-+/*
-+ * Tungsten Graphics note on rop codes: If rop A and rop B are
-+ * identical, the mask surface will not be read and need not be
-+ * set up.
-+ */
-+
-+#define PSB_2D_ROP3B_MASK (0x0000FF00)
-+#define PSB_2D_ROP3B_CLRMASK (0xFFFF00FF)
-+#define PSB_2D_ROP3B_SHIFT (8)
-+// rop code A
-+#define PSB_2D_ROP3A_MASK (0x000000FF)
-+#define PSB_2D_ROP3A_CLRMASK (0xFFFFFF00)
-+#define PSB_2D_ROP3A_SHIFT (0)
-+
-+#define PSB_2D_ROP4_MASK (0x0000FFFF)
-+/*
-+ * DWORD0: (Only pass if Pattern control == Use Fill Colour)
-+ * Fill Colour RGBA8888
-+ */
-+#define PSB_2D_FILLCOLOUR_MASK (0xFFFFFFFF)
-+#define PSB_2D_FILLCOLOUR_SHIFT (0)
-+/*
-+ * DWORD1: (Always Present)
-+ * X Start (Dest)
-+ * Y Start (Dest)
-+ */
-+#define PSB_2D_DST_XSTART_MASK (0x00FFF000)
-+#define PSB_2D_DST_XSTART_CLRMASK (0xFF000FFF)
-+#define PSB_2D_DST_XSTART_SHIFT (12)
-+#define PSB_2D_DST_YSTART_MASK (0x00000FFF)
-+#define PSB_2D_DST_YSTART_CLRMASK (0xFFFFF000)
-+#define PSB_2D_DST_YSTART_SHIFT (0)
-+/*
-+ * DWORD2: (Always Present)
-+ * X Size (Dest)
-+ * Y Size (Dest)
-+ */
-+#define PSB_2D_DST_XSIZE_MASK (0x00FFF000)
-+#define PSB_2D_DST_XSIZE_CLRMASK (0xFF000FFF)
-+#define PSB_2D_DST_XSIZE_SHIFT (12)
-+#define PSB_2D_DST_YSIZE_MASK (0x00000FFF)
-+#define PSB_2D_DST_YSIZE_CLRMASK (0xFFFFF000)
-+#define PSB_2D_DST_YSIZE_SHIFT (0)
-+
-+/*
-+ * Source Surface (PSB_2D_SRC_SURF_BH)
-+ */
-+/*
-+ * WORD 0
-+ */
-+
-+#define PSB_2D_SRC_FORMAT_MASK (0x00078000)
-+#define PSB_2D_SRC_1_PAL (0x00000000)
-+#define PSB_2D_SRC_2_PAL (0x00008000)
-+#define PSB_2D_SRC_4_PAL (0x00010000)
-+#define PSB_2D_SRC_8_PAL (0x00018000)
-+#define PSB_2D_SRC_8_ALPHA (0x00020000)
-+#define PSB_2D_SRC_4_ALPHA (0x00028000)
-+#define PSB_2D_SRC_332RGB (0x00030000)
-+#define PSB_2D_SRC_4444ARGB (0x00038000)
-+#define PSB_2D_SRC_555RGB (0x00040000)
-+#define PSB_2D_SRC_1555ARGB (0x00048000)
-+#define PSB_2D_SRC_565RGB (0x00050000)
-+#define PSB_2D_SRC_0888ARGB (0x00058000)
-+#define PSB_2D_SRC_8888ARGB (0x00060000)
-+#define PSB_2D_SRC_8888UYVY (0x00068000)
-+#define PSB_2D_SRC_RESERVED (0x00070000)
-+#define PSB_2D_SRC_1555ARGB_LOOKUP (0x00078000)
-+
-+
-+#define PSB_2D_SRC_STRIDE_MASK (0x00007FFF)
-+#define PSB_2D_SRC_STRIDE_CLRMASK (0xFFFF8000)
-+#define PSB_2D_SRC_STRIDE_SHIFT (0)
-+/*
-+ * WORD 1 - Base Address
-+ */
-+#define PSB_2D_SRC_ADDR_MASK (0x0FFFFFFC)
-+#define PSB_2D_SRC_ADDR_CLRMASK (0x00000003)
-+#define PSB_2D_SRC_ADDR_SHIFT (2)
-+#define PSB_2D_SRC_ADDR_ALIGNSHIFT (2)
-+
-+/*
-+ * Pattern Surface (PSB_2D_PAT_SURF_BH)
-+ */
-+/*
-+ * WORD 0
-+ */
-+
-+#define PSB_2D_PAT_FORMAT_MASK (0x00078000)
-+#define PSB_2D_PAT_1_PAL (0x00000000)
-+#define PSB_2D_PAT_2_PAL (0x00008000)
-+#define PSB_2D_PAT_4_PAL (0x00010000)
-+#define PSB_2D_PAT_8_PAL (0x00018000)
-+#define PSB_2D_PAT_8_ALPHA (0x00020000)
-+#define PSB_2D_PAT_4_ALPHA (0x00028000)
-+#define PSB_2D_PAT_332RGB (0x00030000)
-+#define PSB_2D_PAT_4444ARGB (0x00038000)
-+#define PSB_2D_PAT_555RGB (0x00040000)
-+#define PSB_2D_PAT_1555ARGB (0x00048000)
-+#define PSB_2D_PAT_565RGB (0x00050000)
-+#define PSB_2D_PAT_0888ARGB (0x00058000)
-+#define PSB_2D_PAT_8888ARGB (0x00060000)
-+
-+#define PSB_2D_PAT_STRIDE_MASK (0x00007FFF)
-+#define PSB_2D_PAT_STRIDE_CLRMASK (0xFFFF8000)
-+#define PSB_2D_PAT_STRIDE_SHIFT (0)
-+/*
-+ * WORD 1 - Base Address
-+ */
-+#define PSB_2D_PAT_ADDR_MASK (0x0FFFFFFC)
-+#define PSB_2D_PAT_ADDR_CLRMASK (0x00000003)
-+#define PSB_2D_PAT_ADDR_SHIFT (2)
-+#define PSB_2D_PAT_ADDR_ALIGNSHIFT (2)
-+
-+/*
-+ * Destination Surface (PSB_2D_DST_SURF_BH)
-+ */
-+/*
-+ * WORD 0
-+ */
-+
-+#define PSB_2D_DST_FORMAT_MASK (0x00078000)
-+#define PSB_2D_DST_332RGB (0x00030000)
-+#define PSB_2D_DST_4444ARGB (0x00038000)
-+#define PSB_2D_DST_555RGB (0x00040000)
-+#define PSB_2D_DST_1555ARGB (0x00048000)
-+#define PSB_2D_DST_565RGB (0x00050000)
-+#define PSB_2D_DST_0888ARGB (0x00058000)
-+#define PSB_2D_DST_8888ARGB (0x00060000)
-+#define PSB_2D_DST_8888AYUV (0x00070000)
-+
-+#define PSB_2D_DST_STRIDE_MASK (0x00007FFF)
-+#define PSB_2D_DST_STRIDE_CLRMASK (0xFFFF8000)
-+#define PSB_2D_DST_STRIDE_SHIFT (0)
-+/*
-+ * WORD 1 - Base Address
-+ */
-+#define PSB_2D_DST_ADDR_MASK (0x0FFFFFFC)
-+#define PSB_2D_DST_ADDR_CLRMASK (0x00000003)
-+#define PSB_2D_DST_ADDR_SHIFT (2)
-+#define PSB_2D_DST_ADDR_ALIGNSHIFT (2)
-+
-+/*
-+ * Mask Surface (PSB_2D_MASK_SURF_BH)
-+ */
-+/*
-+ * WORD 0
-+ */
-+#define PSB_2D_MASK_STRIDE_MASK (0x00007FFF)
-+#define PSB_2D_MASK_STRIDE_CLRMASK (0xFFFF8000)
-+#define PSB_2D_MASK_STRIDE_SHIFT (0)
-+/*
-+ * WORD 1 - Base Address
-+ */
-+#define PSB_2D_MASK_ADDR_MASK (0x0FFFFFFC)
-+#define PSB_2D_MASK_ADDR_CLRMASK (0x00000003)
-+#define PSB_2D_MASK_ADDR_SHIFT (2)
-+#define PSB_2D_MASK_ADDR_ALIGNSHIFT (2)
-+
-+/*
-+ * Source Palette (PSB_2D_SRC_PAL_BH)
-+ */
-+
-+#define PSB_2D_SRCPAL_ADDR_SHIFT (0)
-+#define PSB_2D_SRCPAL_ADDR_CLRMASK (0xF0000007)
-+#define PSB_2D_SRCPAL_ADDR_MASK (0x0FFFFFF8)
-+#define PSB_2D_SRCPAL_BYTEALIGN (1024)
-+
-+/*
-+ * Pattern Palette (PSB_2D_PAT_PAL_BH)
-+ */
-+
-+#define PSB_2D_PATPAL_ADDR_SHIFT (0)
-+#define PSB_2D_PATPAL_ADDR_CLRMASK (0xF0000007)
-+#define PSB_2D_PATPAL_ADDR_MASK (0x0FFFFFF8)
-+#define PSB_2D_PATPAL_BYTEALIGN (1024)
-+
-+/*
-+ * Rop3 Codes (2 LS bytes)
-+ */
-+
-+#define PSB_2D_ROP3_SRCCOPY (0xCCCC)
-+#define PSB_2D_ROP3_PATCOPY (0xF0F0)
-+#define PSB_2D_ROP3_WHITENESS (0xFFFF)
-+#define PSB_2D_ROP3_BLACKNESS (0x0000)
-+#define PSB_2D_ROP3_SRC (0xCC)
-+#define PSB_2D_ROP3_PAT (0xF0)
-+#define PSB_2D_ROP3_DST (0xAA)
-+
-+
-+/*
-+ * Sizes.
-+ */
-+
-+#define PSB_SCENE_HW_COOKIE_SIZE 16
-+#define PSB_TA_MEM_HW_COOKIE_SIZE 16
-+
-+/*
-+ * Scene stuff.
-+ */
-+
-+#define PSB_NUM_HW_SCENES 2
-+
-+/*
-+ * Scheduler completion actions.
-+ */
-+
-+#define PSB_RASTER_BLOCK 0
-+#define PSB_RASTER 1
-+#define PSB_RETURN 2
-+#define PSB_TA 3
-+
-+
-+#endif
-Index: linux-2.6.28/drivers/gpu/drm/psb/psb_regman.c
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.28/drivers/gpu/drm/psb/psb_regman.c 2009-02-20 12:23:06.000000000 +0000
-@@ -0,0 +1,175 @@
-+/**************************************************************************
-+ * Copyright (c) 2007, Intel Corporation.
-+ * All Rights Reserved.
-+ *
-+ * This program is free software; you can redistribute it and/or modify it
-+ * under the terms and conditions of the GNU General Public License,
-+ * version 2, as published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope it will be useful, but WITHOUT
-+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-+ * more details.
-+ *
-+ * You should have received a copy of the GNU General Public License along with
-+ * this program; if not, write to the Free Software Foundation, Inc.,
-+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-+ *
-+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
-+ * develop this driver.
-+ *
-+ **************************************************************************/
-+/*
-+ */
-+
-+#include "drmP.h"
-+#include "psb_drv.h"
-+
-+struct psb_use_reg {
-+ struct drm_reg reg;
-+ struct drm_psb_private *dev_priv;
-+ uint32_t reg_seq;
-+ uint32_t base;
-+ uint32_t data_master;
-+};
-+
-+struct psb_use_reg_data {
-+ uint32_t base;
-+ uint32_t size;
-+ uint32_t data_master;
-+};
-+
-+static int psb_use_reg_reusable(const struct drm_reg *reg, const void *data)
-+{
-+ struct psb_use_reg *use_reg =
-+ container_of(reg, struct psb_use_reg, reg);
-+ struct psb_use_reg_data *use_data = (struct psb_use_reg_data *)data;
-+
-+ return ((use_reg->base <= use_data->base) &&
-+ (use_reg->base + PSB_USE_OFFSET_SIZE >
-+ use_data->base + use_data->size) &&
-+ use_reg->data_master == use_data->data_master);
-+}
-+
-+static int psb_use_reg_set(struct psb_use_reg *use_reg,
-+ const struct psb_use_reg_data *use_data)
-+{
-+ struct drm_psb_private *dev_priv = use_reg->dev_priv;
-+
-+ if (use_reg->reg.fence == NULL)
-+ use_reg->data_master = use_data->data_master;
-+
-+ if (use_reg->reg.fence == NULL &&
-+ !psb_use_reg_reusable(&use_reg->reg, (const void *)use_data)) {
-+
-+ use_reg->base = use_data->base & ~PSB_USE_OFFSET_MASK;
-+ use_reg->data_master = use_data->data_master;
-+
-+ if (!psb_use_reg_reusable(&use_reg->reg,
-+ (const void *)use_data)) {
-+ DRM_ERROR("USE base mechanism didn't support "
-+ "buffer size or alignment\n");
-+ return -EINVAL;
-+ }
-+
-+ PSB_WSGX32(PSB_ALPL(use_reg->base, _PSB_CUC_BASE_ADDR) |
-+ (use_reg->data_master << _PSB_CUC_BASE_DM_SHIFT),
-+ PSB_CR_USE_CODE_BASE(use_reg->reg_seq));
-+ }
-+ return 0;
-+
-+}
-+
-+int psb_grab_use_base(struct drm_psb_private *dev_priv,
-+ unsigned long base,
-+ unsigned long size,
-+ unsigned int data_master,
-+ uint32_t fence_class,
-+ uint32_t fence_type,
-+ int no_wait,
-+ int interruptible, int *r_reg, uint32_t * r_offset)
-+{
-+ struct psb_use_reg_data use_data = {
-+ .base = base,
-+ .size = size,
-+ .data_master = data_master
-+ };
-+ int ret;
-+
-+ struct drm_reg *reg;
-+ struct psb_use_reg *use_reg;
-+
-+ ret = drm_regs_alloc(&dev_priv->use_manager,
-+ (const void *)&use_data,
-+ fence_class,
-+ fence_type, interruptible, no_wait, &reg);
-+ if (ret)
-+ return ret;
-+
-+ use_reg = container_of(reg, struct psb_use_reg, reg);
-+ ret = psb_use_reg_set(use_reg, &use_data);
-+
-+ if (ret)
-+ return ret;
-+
-+ *r_reg = use_reg->reg_seq;
-+ *r_offset = base - use_reg->base;
-+
-+ return 0;
-+};
-+
-+static void psb_use_reg_destroy(struct drm_reg *reg)
-+{
-+ struct psb_use_reg *use_reg =
-+ container_of(reg, struct psb_use_reg, reg);
-+ struct drm_psb_private *dev_priv = use_reg->dev_priv;
-+
-+ PSB_WSGX32(PSB_ALPL(0, _PSB_CUC_BASE_ADDR),
-+ PSB_CR_USE_CODE_BASE(use_reg->reg_seq));
-+
-+ drm_free(use_reg, sizeof(*use_reg), DRM_MEM_DRIVER);
-+}
-+
-+int psb_init_use_base(struct drm_psb_private *dev_priv,
-+ unsigned int reg_start, unsigned int reg_num)
-+{
-+ struct psb_use_reg *use_reg;
-+ int i;
-+ int ret = 0;
-+
-+ mutex_lock(&dev_priv->cmdbuf_mutex);
-+
-+ drm_regs_init(&dev_priv->use_manager,
-+ &psb_use_reg_reusable, &psb_use_reg_destroy);
-+
-+ for (i = reg_start; i < reg_start + reg_num; ++i) {
-+ use_reg = drm_calloc(1, sizeof(*use_reg), DRM_MEM_DRIVER);
-+ if (!use_reg) {
-+ ret = -ENOMEM;
-+ goto out;
-+ }
-+
-+ use_reg->dev_priv = dev_priv;
-+ use_reg->reg_seq = i;
-+ use_reg->base = 0;
-+ use_reg->data_master = _PSB_CUC_DM_PIXEL;
-+
-+ PSB_WSGX32(PSB_ALPL(use_reg->base, _PSB_CUC_BASE_ADDR) |
-+ (use_reg->data_master << _PSB_CUC_BASE_DM_SHIFT),
-+ PSB_CR_USE_CODE_BASE(use_reg->reg_seq));
-+
-+ drm_regs_add(&dev_priv->use_manager, &use_reg->reg);
-+ }
-+ out:
-+ mutex_unlock(&dev_priv->cmdbuf_mutex);
-+
-+ return ret;
-+
-+}
-+
-+void psb_takedown_use_base(struct drm_psb_private *dev_priv)
-+{
-+ mutex_lock(&dev_priv->cmdbuf_mutex);
-+ drm_regs_free(&dev_priv->use_manager);
-+ mutex_unlock(&dev_priv->cmdbuf_mutex);
-+}
-Index: linux-2.6.28/drivers/gpu/drm/psb/psb_reset.c
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.28/drivers/gpu/drm/psb/psb_reset.c 2009-02-20 12:23:06.000000000 +0000
-@@ -0,0 +1,374 @@
-+/**************************************************************************
-+ * Copyright (c) 2007, Intel Corporation.
-+ * All Rights Reserved.
-+ *
-+ * This program is free software; you can redistribute it and/or modify it
-+ * under the terms and conditions of the GNU General Public License,
-+ * version 2, as published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope it will be useful, but WITHOUT
-+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-+ * more details.
-+ *
-+ * You should have received a copy of the GNU General Public License along with
-+ * this program; if not, write to the Free Software Foundation, Inc.,
-+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-+ *
-+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
-+ * develop this driver.
-+ *
-+ **************************************************************************/
-+/*
-+ * Authors:
-+ * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
-+ */
-+
-+#include "drmP.h"
-+#include "psb_drv.h"
-+#include "psb_reg.h"
-+#include "psb_scene.h"
-+#include "psb_msvdx.h"
-+
-+#define PSB_2D_TIMEOUT_MSEC 100
-+
-+void psb_reset(struct drm_psb_private *dev_priv, int reset_2d)
-+{
-+ uint32_t val;
-+
-+ val = _PSB_CS_RESET_BIF_RESET |
-+ _PSB_CS_RESET_DPM_RESET |
-+ _PSB_CS_RESET_TA_RESET |
-+ _PSB_CS_RESET_USE_RESET |
-+ _PSB_CS_RESET_ISP_RESET | _PSB_CS_RESET_TSP_RESET;
-+
-+ if (reset_2d)
-+ val |= _PSB_CS_RESET_TWOD_RESET;
-+
-+ PSB_WSGX32(val, PSB_CR_SOFT_RESET);
-+ (void)PSB_RSGX32(PSB_CR_SOFT_RESET);
-+
-+ msleep(1);
-+
-+ PSB_WSGX32(0, PSB_CR_SOFT_RESET);
-+ wmb();
-+ PSB_WSGX32(PSB_RSGX32(PSB_CR_BIF_CTRL) | _PSB_CB_CTRL_CLEAR_FAULT,
-+ PSB_CR_BIF_CTRL);
-+ wmb();
-+ (void)PSB_RSGX32(PSB_CR_BIF_CTRL);
-+
-+ msleep(1);
-+ PSB_WSGX32(PSB_RSGX32(PSB_CR_BIF_CTRL) & ~_PSB_CB_CTRL_CLEAR_FAULT,
-+ PSB_CR_BIF_CTRL);
-+ (void)PSB_RSGX32(PSB_CR_BIF_CTRL);
-+}
-+
-+void psb_print_pagefault(struct drm_psb_private *dev_priv)
-+{
-+ uint32_t val;
-+ uint32_t addr;
-+
-+ val = PSB_RSGX32(PSB_CR_BIF_INT_STAT);
-+ addr = PSB_RSGX32(PSB_CR_BIF_FAULT);
-+
-+ if (val) {
-+ if (val & _PSB_CBI_STAT_PF_N_RW)
-+ DRM_ERROR("Poulsbo MMU page fault:\n");
-+ else
-+ DRM_ERROR("Poulsbo MMU read / write "
-+ "protection fault:\n");
-+
-+ if (val & _PSB_CBI_STAT_FAULT_CACHE)
-+ DRM_ERROR("\tCache requestor.\n");
-+ if (val & _PSB_CBI_STAT_FAULT_TA)
-+ DRM_ERROR("\tTA requestor.\n");
-+ if (val & _PSB_CBI_STAT_FAULT_VDM)
-+ DRM_ERROR("\tVDM requestor.\n");
-+ if (val & _PSB_CBI_STAT_FAULT_2D)
-+ DRM_ERROR("\t2D requestor.\n");
-+ if (val & _PSB_CBI_STAT_FAULT_PBE)
-+ DRM_ERROR("\tPBE requestor.\n");
-+ if (val & _PSB_CBI_STAT_FAULT_TSP)
-+ DRM_ERROR("\tTSP requestor.\n");
-+ if (val & _PSB_CBI_STAT_FAULT_ISP)
-+ DRM_ERROR("\tISP requestor.\n");
-+ if (val & _PSB_CBI_STAT_FAULT_USSEPDS)
-+ DRM_ERROR("\tUSSEPDS requestor.\n");
-+ if (val & _PSB_CBI_STAT_FAULT_HOST)
-+ DRM_ERROR("\tHost requestor.\n");
-+
-+ DRM_ERROR("\tMMU failing address is 0x%08x.\n", (unsigned)addr);
-+ }
-+}
-+
-+void psb_schedule_watchdog(struct drm_psb_private *dev_priv)
-+{
-+ struct timer_list *wt = &dev_priv->watchdog_timer;
-+ unsigned long irq_flags;
-+
-+ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
-+ if (dev_priv->timer_available && !timer_pending(wt)) {
-+ wt->expires = jiffies + PSB_WATCHDOG_DELAY;
-+ add_timer(wt);
-+ }
-+ spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
-+}
-+
-+#if 0
-+static void psb_seq_lockup_idle(struct drm_psb_private *dev_priv,
-+ unsigned int engine, int *lockup, int *idle)
-+{
-+ uint32_t received_seq;
-+
-+ received_seq = dev_priv->comm[engine << 4];
-+ spin_lock(&dev_priv->sequence_lock);
-+ *idle = (received_seq == dev_priv->sequence[engine]);
-+ spin_unlock(&dev_priv->sequence_lock);
-+
-+ if (*idle) {
-+ dev_priv->idle[engine] = 1;
-+ *lockup = 0;
-+ return;
-+ }
-+
-+ if (dev_priv->idle[engine]) {
-+ dev_priv->idle[engine] = 0;
-+ dev_priv->last_sequence[engine] = received_seq;
-+ *lockup = 0;
-+ return;
-+ }
-+
-+ *lockup = (dev_priv->last_sequence[engine] == received_seq);
-+}
-+
-+#endif
-+static void psb_watchdog_func(unsigned long data)
-+{
-+ struct drm_psb_private *dev_priv = (struct drm_psb_private *)data;
-+ int lockup;
-+ int msvdx_lockup;
-+ int msvdx_idle;
-+ int lockup_2d;
-+ int idle_2d;
-+ int idle;
-+ unsigned long irq_flags;
-+
-+ psb_scheduler_lockup(dev_priv, &lockup, &idle);
-+ psb_msvdx_lockup(dev_priv, &msvdx_lockup, &msvdx_idle);
-+#if 0
-+ psb_seq_lockup_idle(dev_priv, PSB_ENGINE_2D, &lockup_2d, &idle_2d);
-+#else
-+ lockup_2d = 0;
-+ idle_2d = 1;
-+#endif
-+ if (lockup || msvdx_lockup || lockup_2d) {
-+ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
-+ dev_priv->timer_available = 0;
-+ spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
-+ if (lockup) {
-+ psb_print_pagefault(dev_priv);
-+ schedule_work(&dev_priv->watchdog_wq);
-+ }
-+ if (msvdx_lockup)
-+ schedule_work(&dev_priv->msvdx_watchdog_wq);
-+ }
-+ if (!idle || !msvdx_idle || !idle_2d)
-+ psb_schedule_watchdog(dev_priv);
-+}
-+
-+void psb_msvdx_flush_cmd_queue(struct drm_device *dev)
-+{
-+ struct drm_psb_private *dev_priv = dev->dev_private;
-+ struct psb_msvdx_cmd_queue *msvdx_cmd;
-+ struct list_head *list, *next;
-+ /*Flush the msvdx cmd queue and signal all fences in the queue */
-+ list_for_each_safe(list, next, &dev_priv->msvdx_queue) {
-+ msvdx_cmd = list_entry(list, struct psb_msvdx_cmd_queue, head);
-+ PSB_DEBUG_GENERAL("MSVDXQUE: flushing sequence:%d\n",
-+ msvdx_cmd->sequence);
-+ dev_priv->msvdx_current_sequence = msvdx_cmd->sequence;
-+ psb_fence_error(dev, PSB_ENGINE_VIDEO,
-+ dev_priv->msvdx_current_sequence,
-+ DRM_FENCE_TYPE_EXE, DRM_CMD_HANG);
-+ list_del(list);
-+ kfree(msvdx_cmd->cmd);
-+ drm_free(msvdx_cmd, sizeof(struct psb_msvdx_cmd_queue),
-+ DRM_MEM_DRIVER);
-+ }
-+}
-+
-+static void psb_msvdx_reset_wq(struct work_struct *work)
-+{
-+ struct drm_psb_private *dev_priv =
-+ container_of(work, struct drm_psb_private, msvdx_watchdog_wq);
-+
-+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
-+ unsigned long irq_flags;
-+
-+ mutex_lock(&dev_priv->msvdx_mutex);
-+ dev_priv->msvdx_needs_reset = 1;
-+ dev_priv->msvdx_current_sequence++;
-+ PSB_DEBUG_GENERAL
-+ ("MSVDXFENCE: incremented msvdx_current_sequence to :%d\n",
-+ dev_priv->msvdx_current_sequence);
-+
-+ psb_fence_error(scheduler->dev, PSB_ENGINE_VIDEO,
-+ dev_priv->msvdx_current_sequence, DRM_FENCE_TYPE_EXE,
-+ DRM_CMD_HANG);
-+
-+ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
-+ dev_priv->timer_available = 1;
-+ spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
-+
-+ spin_lock_irqsave(&dev_priv->msvdx_lock, irq_flags);
-+ psb_msvdx_flush_cmd_queue(scheduler->dev);
-+ spin_unlock_irqrestore(&dev_priv->msvdx_lock, irq_flags);
-+
-+ psb_schedule_watchdog(dev_priv);
-+ mutex_unlock(&dev_priv->msvdx_mutex);
-+}
-+
-+static int psb_xhw_mmu_reset(struct drm_psb_private *dev_priv)
-+{
-+ struct psb_xhw_buf buf;
-+ uint32_t bif_ctrl;
-+
-+ INIT_LIST_HEAD(&buf.head);
-+ psb_mmu_set_pd_context(psb_mmu_get_default_pd(dev_priv->mmu), 0);
-+ bif_ctrl = PSB_RSGX32(PSB_CR_BIF_CTRL);
-+ PSB_WSGX32(bif_ctrl |
-+ _PSB_CB_CTRL_CLEAR_FAULT |
-+ _PSB_CB_CTRL_INVALDC, PSB_CR_BIF_CTRL);
-+ (void)PSB_RSGX32(PSB_CR_BIF_CTRL);
-+ msleep(1);
-+ PSB_WSGX32(bif_ctrl, PSB_CR_BIF_CTRL);
-+ (void)PSB_RSGX32(PSB_CR_BIF_CTRL);
-+ return psb_xhw_reset_dpm(dev_priv, &buf);
-+}
-+
-+/*
-+ * Block command submission and reset hardware and schedulers.
-+ */
-+
-+static void psb_reset_wq(struct work_struct *work)
-+{
-+ struct drm_psb_private *dev_priv =
-+ container_of(work, struct drm_psb_private, watchdog_wq);
-+ int lockup_2d;
-+ int idle_2d;
-+ unsigned long irq_flags;
-+ int ret;
-+ int reset_count = 0;
-+ struct psb_xhw_buf buf;
-+ uint32_t xhw_lockup;
-+
-+ /*
-+ * Block command submission.
-+ */
-+
-+ mutex_lock(&dev_priv->reset_mutex);
-+
-+ INIT_LIST_HEAD(&buf.head);
-+ if (psb_xhw_check_lockup(dev_priv, &buf, &xhw_lockup) == 0) {
-+ if (xhw_lockup == 0 && psb_extend_raster_timeout(dev_priv) == 0) {
-+ /*
-+ * no lockup, just re-schedule
-+ */
-+ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
-+ dev_priv->timer_available = 1;
-+ spin_unlock_irqrestore(&dev_priv->watchdog_lock,
-+ irq_flags);
-+ psb_schedule_watchdog(dev_priv);
-+ mutex_unlock(&dev_priv->reset_mutex);
-+ return;
-+ }
-+ }
-+#if 0
-+ msleep(PSB_2D_TIMEOUT_MSEC);
-+
-+ psb_seq_lockup_idle(dev_priv, PSB_ENGINE_2D, &lockup_2d, &idle_2d);
-+
-+ if (lockup_2d) {
-+ uint32_t seq_2d;
-+ spin_lock(&dev_priv->sequence_lock);
-+ seq_2d = dev_priv->sequence[PSB_ENGINE_2D];
-+ spin_unlock(&dev_priv->sequence_lock);
-+ psb_fence_error(dev_priv->scheduler.dev,
-+ PSB_ENGINE_2D,
-+ seq_2d, DRM_FENCE_TYPE_EXE, -EBUSY);
-+ DRM_INFO("Resetting 2D engine.\n");
-+ }
-+
-+ psb_reset(dev_priv, lockup_2d);
-+#else
-+ (void)lockup_2d;
-+ (void)idle_2d;
-+ psb_reset(dev_priv, 0);
-+#endif
-+ (void)psb_xhw_mmu_reset(dev_priv);
-+ DRM_INFO("Resetting scheduler.\n");
-+ psb_scheduler_pause(dev_priv);
-+ psb_scheduler_reset(dev_priv, -EBUSY);
-+ psb_scheduler_ta_mem_check(dev_priv);
-+
-+ while (dev_priv->ta_mem &&
-+ !dev_priv->force_ta_mem_load && ++reset_count < 10) {
-+
-+ /*
-+ * TA memory is currently fenced so offsets
-+ * are valid. Reload offsets into the dpm now.
-+ */
-+
-+ struct psb_xhw_buf buf;
-+ INIT_LIST_HEAD(&buf.head);
-+
-+ msleep(100);
-+ DRM_INFO("Trying to reload TA memory.\n");
-+ ret = psb_xhw_ta_mem_load(dev_priv, &buf,
-+ PSB_TA_MEM_FLAG_TA |
-+ PSB_TA_MEM_FLAG_RASTER |
-+ PSB_TA_MEM_FLAG_HOSTA |
-+ PSB_TA_MEM_FLAG_HOSTD |
-+ PSB_TA_MEM_FLAG_INIT,
-+ dev_priv->ta_mem->ta_memory->offset,
-+ dev_priv->ta_mem->hw_data->offset,
-+ dev_priv->ta_mem->hw_cookie);
-+ if (!ret)
-+ break;
-+
-+ psb_reset(dev_priv, 0);
-+ (void)psb_xhw_mmu_reset(dev_priv);
-+ }
-+
-+ psb_scheduler_restart(dev_priv);
-+ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
-+ dev_priv->timer_available = 1;
-+ spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
-+ mutex_unlock(&dev_priv->reset_mutex);
-+}
-+
-+void psb_watchdog_init(struct drm_psb_private *dev_priv)
-+{
-+ struct timer_list *wt = &dev_priv->watchdog_timer;
-+ unsigned long irq_flags;
-+
-+ dev_priv->watchdog_lock = SPIN_LOCK_UNLOCKED;
-+ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
-+ init_timer(wt);
-+ INIT_WORK(&dev_priv->watchdog_wq, &psb_reset_wq);
-+ INIT_WORK(&dev_priv->msvdx_watchdog_wq, &psb_msvdx_reset_wq);
-+ wt->data = (unsigned long)dev_priv;
-+ wt->function = &psb_watchdog_func;
-+ dev_priv->timer_available = 1;
-+ spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
-+}
-+
-+void psb_watchdog_takedown(struct drm_psb_private *dev_priv)
-+{
-+ unsigned long irq_flags;
-+
-+ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
-+ dev_priv->timer_available = 0;
-+ spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
-+ (void)del_timer_sync(&dev_priv->watchdog_timer);
-+}
-Index: linux-2.6.28/drivers/gpu/drm/psb/psb_scene.c
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.28/drivers/gpu/drm/psb/psb_scene.c 2009-02-20 12:23:06.000000000 +0000
-@@ -0,0 +1,531 @@
-+/**************************************************************************
-+ * Copyright (c) 2007, Intel Corporation.
-+ * All Rights Reserved.
-+ *
-+ * This program is free software; you can redistribute it and/or modify it
-+ * under the terms and conditions of the GNU General Public License,
-+ * version 2, as published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope it will be useful, but WITHOUT
-+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-+ * more details.
-+ *
-+ * You should have received a copy of the GNU General Public License along with
-+ * this program; if not, write to the Free Software Foundation, Inc.,
-+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-+ *
-+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
-+ * develop this driver.
-+ *
-+ **************************************************************************/
-+/*
-+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
-+ */
-+
-+#include "drmP.h"
-+#include "psb_drv.h"
-+#include "psb_scene.h"
-+
-+void psb_clear_scene_atomic(struct psb_scene *scene)
-+{
-+ int i;
-+ struct page *page;
-+ void *v;
-+
-+ for (i = 0; i < scene->clear_num_pages; ++i) {
-+ page = drm_ttm_get_page(scene->hw_data->ttm,
-+ scene->clear_p_start + i);
-+ if (in_irq())
-+ v = kmap_atomic(page, KM_IRQ0);
-+ else
-+ v = kmap_atomic(page, KM_USER0);
-+
-+ memset(v, 0, PAGE_SIZE);
-+
-+ if (in_irq())
-+ kunmap_atomic(v, KM_IRQ0);
-+ else
-+ kunmap_atomic(v, KM_USER0);
-+ }
-+}
-+
-+int psb_clear_scene(struct psb_scene *scene)
-+{
-+ struct drm_bo_kmap_obj bmo;
-+ int is_iomem;
-+ void *addr;
-+
-+ int ret = drm_bo_kmap(scene->hw_data, scene->clear_p_start,
-+ scene->clear_num_pages, &bmo);
-+
-+ PSB_DEBUG_RENDER("Scene clear\n");
-+ if (ret)
-+ return ret;
-+
-+ addr = drm_bmo_virtual(&bmo, &is_iomem);
-+ BUG_ON(is_iomem);
-+ memset(addr, 0, scene->clear_num_pages << PAGE_SHIFT);
-+ drm_bo_kunmap(&bmo);
-+
-+ return 0;
-+}
-+
-+static void psb_destroy_scene_devlocked(struct psb_scene *scene)
-+{
-+ if (!scene)
-+ return;
-+
-+ PSB_DEBUG_RENDER("Scene destroy\n");
-+ drm_bo_usage_deref_locked(&scene->hw_data);
-+ drm_free(scene, sizeof(*scene), DRM_MEM_DRIVER);
-+}
-+
-+void psb_scene_unref_devlocked(struct psb_scene **scene)
-+{
-+ struct psb_scene *tmp_scene = *scene;
-+
-+ PSB_DEBUG_RENDER("Scene unref\n");
-+ *scene = NULL;
-+ if (atomic_dec_and_test(&tmp_scene->ref_count)) {
-+ psb_scheduler_remove_scene_refs(tmp_scene);
-+ psb_destroy_scene_devlocked(tmp_scene);
-+ }
-+}
-+
-+struct psb_scene *psb_scene_ref(struct psb_scene *src)
-+{
-+ PSB_DEBUG_RENDER("Scene ref\n");
-+ atomic_inc(&src->ref_count);
-+ return src;
-+}
-+
-+static struct psb_scene *psb_alloc_scene(struct drm_device *dev,
-+ uint32_t w, uint32_t h)
-+{
-+ struct drm_psb_private *dev_priv =
-+ (struct drm_psb_private *)dev->dev_private;
-+ int ret = -EINVAL;
-+ struct psb_scene *scene;
-+ uint32_t bo_size;
-+ struct psb_xhw_buf buf;
-+
-+ PSB_DEBUG_RENDER("Alloc scene w %u h %u\n", w, h);
-+
-+ scene = drm_calloc(1, sizeof(*scene), DRM_MEM_DRIVER);
-+
-+ if (!scene) {
-+ DRM_ERROR("Out of memory allocating scene object.\n");
-+ return NULL;
-+ }
-+
-+ scene->dev = dev;
-+ scene->w = w;
-+ scene->h = h;
-+ scene->hw_scene = NULL;
-+ atomic_set(&scene->ref_count, 1);
-+
-+ INIT_LIST_HEAD(&buf.head);
-+ ret = psb_xhw_scene_info(dev_priv, &buf, scene->w, scene->h,
-+ scene->hw_cookie, &bo_size,
-+ &scene->clear_p_start,
-+ &scene->clear_num_pages);
-+ if (ret)
-+ goto out_err;
-+
-+ ret = drm_buffer_object_create(dev, bo_size, drm_bo_type_kernel,
-+ DRM_PSB_FLAG_MEM_MMU |
-+ DRM_BO_FLAG_READ |
-+ DRM_BO_FLAG_CACHED |
-+ PSB_BO_FLAG_SCENE |
-+ DRM_BO_FLAG_WRITE,
-+ DRM_BO_HINT_DONT_FENCE,
-+ 0, 0, &scene->hw_data);
-+ if (ret)
-+ goto out_err;
-+
-+ return scene;
-+ out_err:
-+ drm_free(scene, sizeof(*scene), DRM_MEM_DRIVER);
-+ return NULL;
-+}
-+
-+int psb_validate_scene_pool(struct psb_scene_pool *pool, uint64_t flags,
-+ uint64_t mask,
-+ uint32_t hint,
-+ uint32_t w,
-+ uint32_t h,
-+ int final_pass, struct psb_scene **scene_p)
-+{
-+ struct drm_device *dev = pool->dev;
-+ struct drm_psb_private *dev_priv =
-+ (struct drm_psb_private *)dev->dev_private;
-+ struct psb_scene *scene = pool->scenes[pool->cur_scene];
-+ int ret;
-+ unsigned long irq_flags;
-+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
-+ uint32_t bin_pt_offset;
-+ uint32_t bin_param_offset;
-+
-+ PSB_DEBUG_RENDER("Validate scene pool. Scene %u\n", pool->cur_scene);
-+
-+ if (unlikely(!dev_priv->ta_mem)) {
-+ dev_priv->ta_mem =
-+ psb_alloc_ta_mem(dev, dev_priv->ta_mem_pages);
-+ if (!dev_priv->ta_mem)
-+ return -ENOMEM;
-+
-+ bin_pt_offset = ~0;
-+ bin_param_offset = ~0;
-+ } else {
-+ bin_pt_offset = dev_priv->ta_mem->hw_data->offset;
-+ bin_param_offset = dev_priv->ta_mem->ta_memory->offset;
-+ }
-+
-+ pool->w = w;
-+ pool->h = h;
-+ if (scene && (scene->w != pool->w || scene->h != pool->h)) {
-+ spin_lock_irqsave(&scheduler->lock, irq_flags);
-+ if (scene->flags & PSB_SCENE_FLAG_DIRTY) {
-+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
-+ DRM_ERROR("Trying to resize a dirty scene.\n");
-+ return -EINVAL;
-+ }
-+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
-+ mutex_lock(&dev->struct_mutex);
-+ psb_scene_unref_devlocked(&pool->scenes[pool->cur_scene]);
-+ mutex_unlock(&dev->struct_mutex);
-+ scene = NULL;
-+ }
-+
-+ if (!scene) {
-+ pool->scenes[pool->cur_scene] = scene =
-+ psb_alloc_scene(pool->dev, pool->w, pool->h);
-+
-+ if (!scene)
-+ return -ENOMEM;
-+
-+ scene->flags = PSB_SCENE_FLAG_CLEARED;
-+ }
-+
-+ /*
-+ * FIXME: We need atomic bit manipulation here for the
-+ * scheduler. For now use the spinlock.
-+ */
-+
-+ spin_lock_irqsave(&scheduler->lock, irq_flags);
-+ if (!(scene->flags & PSB_SCENE_FLAG_CLEARED)) {
-+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
-+ PSB_DEBUG_RENDER("Waiting to clear scene memory.\n");
-+ mutex_lock(&scene->hw_data->mutex);
-+ ret = drm_bo_wait(scene->hw_data, 0, 0, 0);
-+ mutex_unlock(&scene->hw_data->mutex);
-+ if (ret)
-+ return ret;
-+
-+ ret = psb_clear_scene(scene);
-+
-+ if (ret)
-+ return ret;
-+ spin_lock_irqsave(&scheduler->lock, irq_flags);
-+ scene->flags |= PSB_SCENE_FLAG_CLEARED;
-+ }
-+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
-+
-+ ret = drm_bo_do_validate(scene->hw_data, flags, mask, hint,
-+ PSB_ENGINE_TA, 0, NULL);
-+ if (ret)
-+ return ret;
-+ ret = drm_bo_do_validate(dev_priv->ta_mem->hw_data, 0, 0, 0,
-+ PSB_ENGINE_TA, 0, NULL);
-+ if (ret)
-+ return ret;
-+ ret = drm_bo_do_validate(dev_priv->ta_mem->ta_memory, 0, 0, 0,
-+ PSB_ENGINE_TA, 0, NULL);
-+ if (ret)
-+ return ret;
-+
-+ if (unlikely(bin_param_offset !=
-+ dev_priv->ta_mem->ta_memory->offset ||
-+ bin_pt_offset !=
-+ dev_priv->ta_mem->hw_data->offset ||
-+ dev_priv->force_ta_mem_load)) {
-+
-+ struct psb_xhw_buf buf;
-+
-+ INIT_LIST_HEAD(&buf.head);
-+ ret = psb_xhw_ta_mem_load(dev_priv, &buf,
-+ PSB_TA_MEM_FLAG_TA |
-+ PSB_TA_MEM_FLAG_RASTER |
-+ PSB_TA_MEM_FLAG_HOSTA |
-+ PSB_TA_MEM_FLAG_HOSTD |
-+ PSB_TA_MEM_FLAG_INIT,
-+ dev_priv->ta_mem->ta_memory->offset,
-+ dev_priv->ta_mem->hw_data->offset,
-+ dev_priv->ta_mem->hw_cookie);
-+ if (ret)
-+ return ret;
-+
-+ dev_priv->force_ta_mem_load = 0;
-+ }
-+
-+ if (final_pass) {
-+
-+ /*
-+ * Clear the scene on next use. Advance the scene counter.
-+ */
-+
-+ spin_lock_irqsave(&scheduler->lock, irq_flags);
-+ scene->flags &= ~PSB_SCENE_FLAG_CLEARED;
-+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
-+ pool->cur_scene = (pool->cur_scene + 1) % pool->num_scenes;
-+ }
-+
-+ *scene_p = psb_scene_ref(scene);
-+ return 0;
-+}
-+
-+static void psb_scene_pool_destroy_devlocked(struct psb_scene_pool *pool)
-+{
-+ int i;
-+
-+ if (!pool)
-+ return;
-+
-+ PSB_DEBUG_RENDER("Scene pool destroy.\n");
-+ for (i = 0; i < pool->num_scenes; ++i) {
-+ PSB_DEBUG_RENDER("scenes %d is 0x%08lx\n", i,
-+ (unsigned long)pool->scenes[i]);
-+ if (pool->scenes[i])
-+ psb_scene_unref_devlocked(&pool->scenes[i]);
-+ }
-+ drm_free(pool, sizeof(*pool), DRM_MEM_DRIVER);
-+}
-+
-+void psb_scene_pool_unref_devlocked(struct psb_scene_pool **pool)
-+{
-+ struct psb_scene_pool *tmp_pool = *pool;
-+ struct drm_device *dev = tmp_pool->dev;
-+
-+ PSB_DEBUG_RENDER("Scene pool unref\n");
-+ (void)dev;
-+ DRM_ASSERT_LOCKED(&dev->struct_mutex);
-+ *pool = NULL;
-+ if (--tmp_pool->ref_count == 0)
-+ psb_scene_pool_destroy_devlocked(tmp_pool);
-+}
-+
-+struct psb_scene_pool *psb_scene_pool_ref_devlocked(struct psb_scene_pool *src)
-+{
-+ ++src->ref_count;
-+ return src;
-+}
-+
-+/*
-+ * Callback for user object manager.
-+ */
-+
-+static void psb_scene_pool_destroy(struct drm_file *priv,
-+ struct drm_user_object *base)
-+{
-+ struct psb_scene_pool *pool =
-+ drm_user_object_entry(base, struct psb_scene_pool, user);
-+
-+ psb_scene_pool_unref_devlocked(&pool);
-+}
-+
-+struct psb_scene_pool *psb_scene_pool_lookup_devlocked(struct drm_file *priv,
-+ uint32_t handle,
-+ int check_owner)
-+{
-+ struct drm_user_object *uo;
-+ struct psb_scene_pool *pool;
-+
-+ uo = drm_lookup_user_object(priv, handle);
-+ if (!uo || (uo->type != PSB_USER_OBJECT_SCENE_POOL)) {
-+ DRM_ERROR("Could not find scene pool object 0x%08x\n", handle);
-+ return NULL;
-+ }
-+
-+ if (check_owner && priv != uo->owner) {
-+ if (!drm_lookup_ref_object(priv, uo, _DRM_REF_USE))
-+ return NULL;
-+ }
-+
-+ pool = drm_user_object_entry(uo, struct psb_scene_pool, user);
-+ return psb_scene_pool_ref_devlocked(pool);
-+}
-+
-+struct psb_scene_pool *psb_scene_pool_alloc(struct drm_file *priv,
-+ int shareable,
-+ uint32_t num_scenes,
-+ uint32_t w, uint32_t h)
-+{
-+ struct drm_device *dev = priv->minor->dev;
-+ struct psb_scene_pool *pool;
-+ int ret;
-+
-+ PSB_DEBUG_RENDER("Scene pool alloc\n");
-+ pool = drm_calloc(1, sizeof(*pool), DRM_MEM_DRIVER);
-+ if (!pool) {
-+ DRM_ERROR("Out of memory allocating scene pool object.\n");
-+ return NULL;
-+ }
-+ pool->w = w;
-+ pool->h = h;
-+ pool->dev = dev;
-+ pool->num_scenes = num_scenes;
-+
-+ mutex_lock(&dev->struct_mutex);
-+ ret = drm_add_user_object(priv, &pool->user, shareable);
-+ if (ret)
-+ goto out_err;
-+
-+ pool->user.type = PSB_USER_OBJECT_SCENE_POOL;
-+ pool->user.remove = &psb_scene_pool_destroy;
-+ pool->ref_count = 2;
-+ mutex_unlock(&dev->struct_mutex);
-+ return pool;
-+ out_err:
-+ drm_free(pool, sizeof(*pool), DRM_MEM_DRIVER);
-+ return NULL;
-+}
-+
-+/*
-+ * Code to support multiple ta memory buffers.
-+ */
-+
-+static void psb_destroy_ta_mem_devlocked(struct psb_ta_mem *ta_mem)
-+{
-+ if (!ta_mem)
-+ return;
-+
-+ drm_bo_usage_deref_locked(&ta_mem->hw_data);
-+ drm_bo_usage_deref_locked(&ta_mem->ta_memory);
-+ drm_free(ta_mem, sizeof(*ta_mem), DRM_MEM_DRIVER);
-+}
-+
-+void psb_ta_mem_unref_devlocked(struct psb_ta_mem **ta_mem)
-+{
-+ struct psb_ta_mem *tmp_ta_mem = *ta_mem;
-+ struct drm_device *dev = tmp_ta_mem->dev;
-+
-+ (void)dev;
-+ DRM_ASSERT_LOCKED(&dev->struct_mutex);
-+ *ta_mem = NULL;
-+ if (--tmp_ta_mem->ref_count == 0)
-+ psb_destroy_ta_mem_devlocked(tmp_ta_mem);
-+}
-+
-+void psb_ta_mem_ref_devlocked(struct psb_ta_mem **dst, struct psb_ta_mem *src)
-+{
-+ struct drm_device *dev = src->dev;
-+
-+ (void)dev;
-+ DRM_ASSERT_LOCKED(&dev->struct_mutex);
-+ *dst = src;
-+ ++src->ref_count;
-+}
-+
-+struct psb_ta_mem *psb_alloc_ta_mem(struct drm_device *dev, uint32_t pages)
-+{
-+ struct drm_psb_private *dev_priv =
-+ (struct drm_psb_private *)dev->dev_private;
-+ int ret = -EINVAL;
-+ struct psb_ta_mem *ta_mem;
-+ uint32_t bo_size;
-+ struct psb_xhw_buf buf;
-+
-+ INIT_LIST_HEAD(&buf.head);
-+
-+ ta_mem = drm_calloc(1, sizeof(*ta_mem), DRM_MEM_DRIVER);
-+
-+ if (!ta_mem) {
-+ DRM_ERROR("Out of memory allocating parameter memory.\n");
-+ return NULL;
-+ }
-+
-+ ret = psb_xhw_ta_mem_info(dev_priv, &buf, pages,
-+ ta_mem->hw_cookie, &bo_size);
-+ if (ret == -ENOMEM) {
-+ DRM_ERROR("Parameter memory size is too small.\n");
-+ DRM_INFO("Attempted to use %u kiB of parameter memory.\n",
-+ (unsigned int)(pages * (PAGE_SIZE / 1024)));
-+ DRM_INFO("The Xpsb driver thinks this is too small and\n");
-+ DRM_INFO("suggests %u kiB. Check the psb DRM\n",
-+ (unsigned int)(bo_size / 1024));
-+ DRM_INFO("\"ta_mem_size\" parameter!\n");
-+ }
-+ if (ret)
-+ goto out_err0;
-+
-+ bo_size = pages * PAGE_SIZE;
-+ ta_mem->dev = dev;
-+ ret = drm_buffer_object_create(dev, bo_size, drm_bo_type_kernel,
-+ DRM_PSB_FLAG_MEM_MMU | DRM_BO_FLAG_READ |
-+ DRM_BO_FLAG_WRITE |
-+ PSB_BO_FLAG_SCENE,
-+ DRM_BO_HINT_DONT_FENCE, 0, 0,
-+ &ta_mem->hw_data);
-+ if (ret)
-+ goto out_err0;
-+
-+ ret =
-+ drm_buffer_object_create(dev, pages << PAGE_SHIFT,
-+ drm_bo_type_kernel,
-+ DRM_PSB_FLAG_MEM_RASTGEOM |
-+ DRM_BO_FLAG_READ |
-+ DRM_BO_FLAG_WRITE |
-+ PSB_BO_FLAG_SCENE,
-+ DRM_BO_HINT_DONT_FENCE, 0,
-+ 1024 * 1024 >> PAGE_SHIFT,
-+ &ta_mem->ta_memory);
-+ if (ret)
-+ goto out_err1;
-+
-+ ta_mem->ref_count = 1;
-+ return ta_mem;
-+ out_err1:
-+ drm_bo_usage_deref_unlocked(&ta_mem->hw_data);
-+ out_err0:
-+ drm_free(ta_mem, sizeof(*ta_mem), DRM_MEM_DRIVER);
-+ return NULL;
-+}
-+
-+int drm_psb_scene_unref_ioctl(struct drm_device *dev,
-+ void *data, struct drm_file *file_priv)
-+{
-+ struct drm_psb_scene *scene = (struct drm_psb_scene *)data;
-+ struct drm_user_object *uo;
-+ struct drm_ref_object *ro;
-+ int ret = 0;
-+
-+ mutex_lock(&dev->struct_mutex);
-+ if (!scene->handle_valid)
-+ goto out_unlock;
-+
-+ uo = drm_lookup_user_object(file_priv, scene->handle);
-+ if (!uo) {
-+ ret = -EINVAL;
-+ goto out_unlock;
-+ }
-+ if (uo->type != PSB_USER_OBJECT_SCENE_POOL) {
-+ DRM_ERROR("Not a scene pool object.\n");
-+ ret = -EINVAL;
-+ goto out_unlock;
-+ }
-+ if (uo->owner != file_priv) {
-+ DRM_ERROR("Not owner of scene pool object.\n");
-+ ret = -EPERM;
-+ goto out_unlock;
-+ }
-+
-+ scene->handle_valid = 0;
-+ ro = drm_lookup_ref_object(file_priv, uo, _DRM_REF_USE);
-+ BUG_ON(!ro);
-+ drm_remove_ref_object(file_priv, ro);
-+
-+ out_unlock:
-+ mutex_unlock(&dev->struct_mutex);
-+ return ret;
-+}
-Index: linux-2.6.28/drivers/gpu/drm/psb/psb_scene.h
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.28/drivers/gpu/drm/psb/psb_scene.h 2009-02-20 12:23:06.000000000 +0000
-@@ -0,0 +1,112 @@
-+/**************************************************************************
-+ * Copyright (c) 2007, Intel Corporation.
-+ * All Rights Reserved.
-+ *
-+ * This program is free software; you can redistribute it and/or modify it
-+ * under the terms and conditions of the GNU General Public License,
-+ * version 2, as published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope it will be useful, but WITHOUT
-+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-+ * more details.
-+ *
-+ * You should have received a copy of the GNU General Public License along with
-+ * this program; if not, write to the Free Software Foundation, Inc.,
-+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-+ *
-+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
-+ * develop this driver.
-+ *
-+ **************************************************************************/
-+/*
-+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
-+ */
-+
-+#ifndef _PSB_SCENE_H_
-+#define _PSB_SCENE_H_
-+
-+#define PSB_USER_OBJECT_SCENE_POOL drm_driver_type0
-+#define PSB_USER_OBJECT_TA_MEM drm_driver_type1
-+#define PSB_MAX_NUM_SCENES 8
-+
-+struct psb_hw_scene;
-+struct psb_hw_ta_mem;
-+
-+struct psb_scene_pool {
-+ struct drm_device *dev;
-+ struct drm_user_object user;
-+ uint32_t ref_count;
-+ uint32_t w;
-+ uint32_t h;
-+ uint32_t cur_scene;
-+ struct psb_scene *scenes[PSB_MAX_NUM_SCENES];
-+ uint32_t num_scenes;
-+};
-+
-+struct psb_scene {
-+ struct drm_device *dev;
-+ atomic_t ref_count;
-+ uint32_t hw_cookie[PSB_SCENE_HW_COOKIE_SIZE];
-+ uint32_t bo_size;
-+ uint32_t w;
-+ uint32_t h;
-+ struct psb_ta_mem *ta_mem;
-+ struct psb_hw_scene *hw_scene;
-+ struct drm_buffer_object *hw_data;
-+ uint32_t flags;
-+ uint32_t clear_p_start;
-+ uint32_t clear_num_pages;
-+};
-+
-+struct psb_scene_entry {
-+ struct list_head head;
-+ struct psb_scene *scene;
-+};
-+
-+struct psb_user_scene {
-+ struct drm_device *dev;
-+ struct drm_user_object user;
-+};
-+
-+struct psb_ta_mem {
-+ struct drm_device *dev;
-+ struct drm_user_object user;
-+ uint32_t ref_count;
-+ uint32_t hw_cookie[PSB_TA_MEM_HW_COOKIE_SIZE];
-+ uint32_t bo_size;
-+ struct drm_buffer_object *ta_memory;
-+ struct drm_buffer_object *hw_data;
-+ int is_deallocating;
-+ int deallocating_scheduled;
-+};
-+
-+extern struct psb_scene_pool *psb_scene_pool_alloc(struct drm_file *priv,
-+ int shareable,
-+ uint32_t num_scenes,
-+ uint32_t w, uint32_t h);
-+extern void psb_scene_pool_unref_devlocked(struct psb_scene_pool **pool);
-+extern struct psb_scene_pool *psb_scene_pool_lookup_devlocked(struct drm_file
-+ *priv,
-+ uint32_t handle,
-+ int check_owner);
-+extern int psb_validate_scene_pool(struct psb_scene_pool *pool, uint64_t flags,
-+ uint64_t mask, uint32_t hint, uint32_t w,
-+ uint32_t h, int final_pass,
-+ struct psb_scene **scene_p);
-+extern void psb_scene_unref_devlocked(struct psb_scene **scene);
-+extern struct psb_scene *psb_scene_ref(struct psb_scene *src);
-+extern int drm_psb_scene_unref_ioctl(struct drm_device *dev,
-+ void *data, struct drm_file *file_priv);
-+
-+static inline uint32_t psb_scene_pool_handle(struct psb_scene_pool *pool)
-+{
-+ return pool->user.hash.key;
-+}
-+extern struct psb_ta_mem *psb_alloc_ta_mem(struct drm_device *dev,
-+ uint32_t pages);
-+extern void psb_ta_mem_ref_devlocked(struct psb_ta_mem **dst,
-+ struct psb_ta_mem *src);
-+extern void psb_ta_mem_unref_devlocked(struct psb_ta_mem **ta_mem);
-+
-+#endif
-Index: linux-2.6.28/drivers/gpu/drm/psb/psb_schedule.c
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.28/drivers/gpu/drm/psb/psb_schedule.c 2009-02-20 12:23:06.000000000 +0000
-@@ -0,0 +1,1445 @@
-+/**************************************************************************
-+ * Copyright (c) 2007, Intel Corporation.
-+ * All Rights Reserved.
-+ *
-+ * This program is free software; you can redistribute it and/or modify it
-+ * under the terms and conditions of the GNU General Public License,
-+ * version 2, as published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope it will be useful, but WITHOUT
-+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-+ * more details.
-+ *
-+ * You should have received a copy of the GNU General Public License along with
-+ * this program; if not, write to the Free Software Foundation, Inc.,
-+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-+ *
-+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
-+ * develop this driver.
-+ *
-+ **************************************************************************/
-+/*
-+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
-+ */
-+
-+#include "drmP.h"
-+#include "psb_drm.h"
-+#include "psb_drv.h"
-+#include "psb_reg.h"
-+#include "psb_scene.h"
-+
-+#define PSB_ALLOWED_RASTER_RUNTIME (DRM_HZ * 20)
-+#define PSB_RASTER_TIMEOUT (DRM_HZ / 2)
-+#define PSB_TA_TIMEOUT (DRM_HZ / 5)
-+
-+#undef PSB_SOFTWARE_WORKAHEAD
-+
-+#ifdef PSB_STABLE_SETTING
-+
-+/*
-+ * Software blocks completely while the engines are working so there can be no
-+ * overlap.
-+ */
-+
-+#define PSB_WAIT_FOR_RASTER_COMPLETION
-+#define PSB_WAIT_FOR_TA_COMPLETION
-+
-+#elif defined(PSB_PARANOID_SETTING)
-+/*
-+ * Software blocks "almost" while the engines are working so there can be no
-+ * overlap.
-+ */
-+
-+#define PSB_WAIT_FOR_RASTER_COMPLETION
-+#define PSB_WAIT_FOR_TA_COMPLETION
-+#define PSB_BE_PARANOID
-+
-+#elif defined(PSB_SOME_OVERLAP_BUT_LOCKUP)
-+/*
-+ * Software leaps ahead while the rasterizer is running and prepares
-+ * a new ta job that can be scheduled before the rasterizer has
-+ * finished.
-+ */
-+
-+#define PSB_WAIT_FOR_TA_COMPLETION
-+
-+#elif defined(PSB_SOFTWARE_WORKAHEAD)
-+/*
-+ * Don't sync, but allow software to work ahead. and queue a number of jobs.
-+ * But block overlapping in the scheduler.
-+ */
-+
-+#define PSB_BLOCK_OVERLAP
-+#define ONLY_ONE_JOB_IN_RASTER_QUEUE
-+
-+#endif
-+
-+/*
-+ * Avoid pixelbe pagefaults on C0.
-+ */
-+#if 0
-+#define PSB_BLOCK_OVERLAP
-+#endif
-+
-+static void psb_dispatch_ta(struct drm_psb_private *dev_priv,
-+ struct psb_scheduler *scheduler,
-+ uint32_t reply_flag);
-+static void psb_dispatch_raster(struct drm_psb_private *dev_priv,
-+ struct psb_scheduler *scheduler,
-+ uint32_t reply_flag);
-+
-+#ifdef FIX_TG_16
-+
-+static void psb_2d_atomic_unlock(struct drm_psb_private *dev_priv);
-+static int psb_2d_trylock(struct drm_psb_private *dev_priv);
-+static int psb_check_2d_idle(struct drm_psb_private *dev_priv);
-+
-+#endif
-+
-+void psb_scheduler_lockup(struct drm_psb_private *dev_priv,
-+ int *lockup, int *idle)
-+{
-+ unsigned long irq_flags;
-+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
-+
-+ *lockup = 0;
-+ *idle = 1;
-+
-+ spin_lock_irqsave(&scheduler->lock, irq_flags);
-+
-+ if (scheduler->current_task[PSB_SCENE_ENGINE_TA] != NULL &&
-+ time_after_eq(jiffies, scheduler->ta_end_jiffies)) {
-+ *lockup = 1;
-+ }
-+ if (!*lockup
-+ && (scheduler->current_task[PSB_SCENE_ENGINE_RASTER] != NULL)
-+ && time_after_eq(jiffies, scheduler->raster_end_jiffies)) {
-+ *lockup = 1;
-+ }
-+ if (!*lockup)
-+ *idle = scheduler->idle;
-+
-+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
-+}
-+
-+static inline void psb_set_idle(struct psb_scheduler *scheduler)
-+{
-+ scheduler->idle =
-+ (scheduler->current_task[PSB_SCENE_ENGINE_RASTER] == NULL) &&
-+ (scheduler->current_task[PSB_SCENE_ENGINE_TA] == NULL);
-+ if (scheduler->idle)
-+ wake_up(&scheduler->idle_queue);
-+}
-+
-+/*
-+ * Call with the scheduler spinlock held.
-+ * Assigns a scene context to either the ta or the rasterizer,
-+ * flushing out other scenes to memory if necessary.
-+ */
-+
-+static int psb_set_scene_fire(struct psb_scheduler *scheduler,
-+ struct psb_scene *scene,
-+ int engine, struct psb_task *task)
-+{
-+ uint32_t flags = 0;
-+ struct psb_hw_scene *hw_scene;
-+ struct drm_device *dev = scene->dev;
-+ struct drm_psb_private *dev_priv =
-+ (struct drm_psb_private *)dev->dev_private;
-+
-+ hw_scene = scene->hw_scene;
-+ if (hw_scene && hw_scene->last_scene == scene) {
-+
-+ /*
-+ * Reuse the last hw scene context and delete it from the
-+ * free list.
-+ */
-+
-+ PSB_DEBUG_RENDER("Reusing hw scene %d.\n",
-+ hw_scene->context_number);
-+ if (scene->flags & PSB_SCENE_FLAG_DIRTY) {
-+
-+ /*
-+ * No hw context initialization to be done.
-+ */
-+
-+ flags |= PSB_SCENE_FLAG_SETUP_ONLY;
-+ }
-+
-+ list_del_init(&hw_scene->head);
-+
-+ } else {
-+ struct list_head *list;
-+ hw_scene = NULL;
-+
-+ /*
-+ * Grab a new hw scene context.
-+ */
-+
-+ list_for_each(list, &scheduler->hw_scenes) {
-+ hw_scene = list_entry(list, struct psb_hw_scene, head);
-+ break;
-+ }
-+ BUG_ON(!hw_scene);
-+ PSB_DEBUG_RENDER("New hw scene %d.\n",
-+ hw_scene->context_number);
-+
-+ list_del_init(list);
-+ }
-+ scene->hw_scene = hw_scene;
-+ hw_scene->last_scene = scene;
-+
-+ flags |= PSB_SCENE_FLAG_SETUP;
-+
-+ /*
-+ * Switch context and setup the engine.
-+ */
-+
-+ return psb_xhw_scene_bind_fire(dev_priv,
-+ &task->buf,
-+ task->flags,
-+ hw_scene->context_number,
-+ scene->hw_cookie,
-+ task->oom_cmds,
-+ task->oom_cmd_size,
-+ scene->hw_data->offset,
-+ engine, flags | scene->flags);
-+}
-+
-+static inline void psb_report_fence(struct psb_scheduler *scheduler,
-+ uint32_t class,
-+ uint32_t sequence,
-+ uint32_t type, int call_handler)
-+{
-+ struct psb_scheduler_seq *seq = &scheduler->seq[type];
-+
-+ seq->sequence = sequence;
-+ seq->reported = 0;
-+ if (call_handler)
-+ psb_fence_handler(scheduler->dev, class);
-+}
-+
-+static void psb_schedule_raster(struct drm_psb_private *dev_priv,
-+ struct psb_scheduler *scheduler);
-+
-+static void psb_schedule_ta(struct drm_psb_private *dev_priv,
-+ struct psb_scheduler *scheduler)
-+{
-+ struct psb_task *task = NULL;
-+ struct list_head *list, *next;
-+ int pushed_raster_task = 0;
-+
-+ PSB_DEBUG_RENDER("schedule ta\n");
-+
-+ if (scheduler->idle_count != 0)
-+ return;
-+
-+ if (scheduler->current_task[PSB_SCENE_ENGINE_TA] != NULL)
-+ return;
-+
-+ if (scheduler->ta_state)
-+ return;
-+
-+ /*
-+ * Skip the ta stage for rasterization-only
-+ * tasks. They arrive here to make sure we're rasterizing
-+ * tasks in the correct order.
-+ */
-+
-+ list_for_each_safe(list, next, &scheduler->ta_queue) {
-+ task = list_entry(list, struct psb_task, head);
-+ if (task->task_type != psb_raster_task)
-+ break;
-+
-+ list_del_init(list);
-+ list_add_tail(list, &scheduler->raster_queue);
-+ psb_report_fence(scheduler, task->engine, task->sequence,
-+ _PSB_FENCE_TA_DONE_SHIFT, 1);
-+ task = NULL;
-+ pushed_raster_task = 1;
-+ }
-+
-+ if (pushed_raster_task)
-+ psb_schedule_raster(dev_priv, scheduler);
-+
-+ if (!task)
-+ return;
-+
-+ /*
-+ * Still waiting for a vistest?
-+ */
-+
-+ if (scheduler->feedback_task == task)
-+ return;
-+
-+#ifdef ONLY_ONE_JOB_IN_RASTER_QUEUE
-+
-+ /*
-+ * Block ta from trying to use both hardware contexts
-+ * without the rasterizer starting to render from one of them.
-+ */
-+
-+ if (!list_empty(&scheduler->raster_queue)) {
-+ return;
-+ }
-+#endif
-+
-+#ifdef PSB_BLOCK_OVERLAP
-+ /*
-+ * Make sure rasterizer isn't doing anything.
-+ */
-+ if (scheduler->current_task[PSB_SCENE_ENGINE_RASTER] != NULL)
-+ return;
-+#endif
-+ if (list_empty(&scheduler->hw_scenes))
-+ return;
-+
-+#ifdef FIX_TG_16
-+ if (psb_check_2d_idle(dev_priv))
-+ return;
-+#endif
-+
-+ list_del_init(&task->head);
-+ if (task->flags & PSB_FIRE_FLAG_XHW_OOM)
-+ scheduler->ta_state = 1;
-+
-+ scheduler->current_task[PSB_SCENE_ENGINE_TA] = task;
-+ scheduler->idle = 0;
-+ scheduler->ta_end_jiffies = jiffies + PSB_TA_TIMEOUT;
-+
-+ task->reply_flags = (task->flags & PSB_FIRE_FLAG_XHW_OOM) ?
-+ 0x00000000 : PSB_RF_FIRE_TA;
-+
-+ (void)psb_reg_submit(dev_priv, task->ta_cmds, task->ta_cmd_size);
-+ psb_set_scene_fire(scheduler, task->scene, PSB_SCENE_ENGINE_TA, task);
-+ psb_schedule_watchdog(dev_priv);
-+}
-+
-+static int psb_fire_raster(struct psb_scheduler *scheduler,
-+ struct psb_task *task)
-+{
-+ struct drm_device *dev = scheduler->dev;
-+ struct drm_psb_private *dev_priv = (struct drm_psb_private *)
-+ dev->dev_private;
-+
-+ PSB_DEBUG_RENDER("Fire raster %d\n", task->sequence);
-+
-+ return psb_xhw_fire_raster(dev_priv, &task->buf, task->flags);
-+}
-+
-+/*
-+ * Take the first rasterization task from the hp raster queue or from the
-+ * raster queue and fire the rasterizer.
-+ */
-+
-+static void psb_schedule_raster(struct drm_psb_private *dev_priv,
-+ struct psb_scheduler *scheduler)
-+{
-+ struct psb_task *task;
-+ struct list_head *list;
-+
-+ if (scheduler->idle_count != 0)
-+ return;
-+
-+ if (scheduler->current_task[PSB_SCENE_ENGINE_RASTER] != NULL) {
-+ PSB_DEBUG_RENDER("Raster busy.\n");
-+ return;
-+ }
-+#ifdef PSB_BLOCK_OVERLAP
-+ if (scheduler->current_task[PSB_SCENE_ENGINE_TA] != NULL) {
-+ PSB_DEBUG_RENDER("TA busy.\n");
-+ return;
-+ }
-+#endif
-+
-+ if (!list_empty(&scheduler->hp_raster_queue))
-+ list = scheduler->hp_raster_queue.next;
-+ else if (!list_empty(&scheduler->raster_queue))
-+ list = scheduler->raster_queue.next;
-+ else {
-+ PSB_DEBUG_RENDER("Nothing in list\n");
-+ return;
-+ }
-+
-+ task = list_entry(list, struct psb_task, head);
-+
-+ /*
-+ * Sometimes changing ZLS format requires an ISP reset.
-+ * Doesn't seem to consume too much time.
-+ */
-+
-+ if (task->scene)
-+ PSB_WSGX32(_PSB_CS_RESET_ISP_RESET, PSB_CR_SOFT_RESET);
-+
-+ scheduler->current_task[PSB_SCENE_ENGINE_RASTER] = task;
-+
-+ list_del_init(list);
-+ scheduler->idle = 0;
-+ scheduler->raster_end_jiffies = jiffies + PSB_RASTER_TIMEOUT;
-+ scheduler->total_raster_jiffies = 0;
-+
-+ if (task->scene)
-+ PSB_WSGX32(0, PSB_CR_SOFT_RESET);
-+
-+ (void)psb_reg_submit(dev_priv, task->raster_cmds,
-+ task->raster_cmd_size);
-+
-+ if (task->scene) {
-+ task->reply_flags = (task->flags & PSB_FIRE_FLAG_XHW_OOM) ?
-+ 0x00000000 : PSB_RF_FIRE_RASTER;
-+ psb_set_scene_fire(scheduler,
-+ task->scene, PSB_SCENE_ENGINE_RASTER, task);
-+ } else {
-+ task->reply_flags = PSB_RF_DEALLOC | PSB_RF_FIRE_RASTER;
-+ psb_fire_raster(scheduler, task);
-+ }
-+ psb_schedule_watchdog(dev_priv);
-+}
-+
-+int psb_extend_raster_timeout(struct drm_psb_private *dev_priv)
-+{
-+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
-+ unsigned long irq_flags;
-+ int ret;
-+
-+ spin_lock_irqsave(&scheduler->lock, irq_flags);
-+ scheduler->total_raster_jiffies +=
-+ jiffies - scheduler->raster_end_jiffies + PSB_RASTER_TIMEOUT;
-+ scheduler->raster_end_jiffies = jiffies + PSB_RASTER_TIMEOUT;
-+ ret = (scheduler->total_raster_jiffies > PSB_ALLOWED_RASTER_RUNTIME) ?
-+ -EBUSY : 0;
-+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
-+ return ret;
-+}
-+
-+/*
-+ * TA done handler.
-+ */
-+
-+static void psb_ta_done(struct drm_psb_private *dev_priv,
-+ struct psb_scheduler *scheduler)
-+{
-+ struct psb_task *task = scheduler->current_task[PSB_SCENE_ENGINE_TA];
-+ struct psb_scene *scene = task->scene;
-+
-+ PSB_DEBUG_RENDER("TA done %u\n", task->sequence);
-+
-+ switch (task->ta_complete_action) {
-+ case PSB_RASTER_BLOCK:
-+ scheduler->ta_state = 1;
-+ scene->flags |=
-+ (PSB_SCENE_FLAG_DIRTY | PSB_SCENE_FLAG_COMPLETE);
-+ list_add_tail(&task->head, &scheduler->raster_queue);
-+ break;
-+ case PSB_RASTER:
-+ scene->flags |=
-+ (PSB_SCENE_FLAG_DIRTY | PSB_SCENE_FLAG_COMPLETE);
-+ list_add_tail(&task->head, &scheduler->raster_queue);
-+ break;
-+ case PSB_RETURN:
-+ scheduler->ta_state = 0;
-+ scene->flags |= PSB_SCENE_FLAG_DIRTY;
-+ list_add_tail(&scene->hw_scene->head, &scheduler->hw_scenes);
-+
-+ break;
-+ }
-+
-+ scheduler->current_task[PSB_SCENE_ENGINE_TA] = NULL;
-+
-+#ifdef FIX_TG_16
-+ psb_2d_atomic_unlock(dev_priv);
-+#endif
-+
-+ if (task->ta_complete_action != PSB_RASTER_BLOCK)
-+ psb_report_fence(scheduler, task->engine, task->sequence,
-+ _PSB_FENCE_TA_DONE_SHIFT, 1);
-+
-+ psb_schedule_raster(dev_priv, scheduler);
-+ psb_schedule_ta(dev_priv, scheduler);
-+ psb_set_idle(scheduler);
-+
-+ if (task->ta_complete_action != PSB_RETURN)
-+ return;
-+
-+ list_add_tail(&task->head, &scheduler->task_done_queue);
-+ schedule_delayed_work(&scheduler->wq, 1);
-+}
-+
-+/*
-+ * Rasterizer done handler.
-+ */
-+
-+static void psb_raster_done(struct drm_psb_private *dev_priv,
-+ struct psb_scheduler *scheduler)
-+{
-+ struct psb_task *task =
-+ scheduler->current_task[PSB_SCENE_ENGINE_RASTER];
-+ struct psb_scene *scene = task->scene;
-+ uint32_t complete_action = task->raster_complete_action;
-+
-+ PSB_DEBUG_RENDER("Raster done %u\n", task->sequence);
-+
-+ scheduler->current_task[PSB_SCENE_ENGINE_RASTER] = NULL;
-+
-+ if (complete_action != PSB_RASTER)
-+ psb_schedule_raster(dev_priv, scheduler);
-+
-+ if (scene) {
-+ if (task->feedback.page) {
-+ if (unlikely(scheduler->feedback_task)) {
-+ /*
-+ * This should never happen, since the previous
-+ * feedback query will return before the next
-+ * raster task is fired.
-+ */
-+ DRM_ERROR("Feedback task busy.\n");
-+ }
-+ scheduler->feedback_task = task;
-+ psb_xhw_vistest(dev_priv, &task->buf);
-+ }
-+ switch (complete_action) {
-+ case PSB_RETURN:
-+ scene->flags &=
-+ ~(PSB_SCENE_FLAG_DIRTY | PSB_SCENE_FLAG_COMPLETE);
-+ list_add_tail(&scene->hw_scene->head,
-+ &scheduler->hw_scenes);
-+ psb_report_fence(scheduler, task->engine,
-+ task->sequence,
-+ _PSB_FENCE_SCENE_DONE_SHIFT, 1);
-+ if (task->flags & PSB_FIRE_FLAG_XHW_OOM) {
-+ scheduler->ta_state = 0;
-+ }
-+ break;
-+ case PSB_RASTER:
-+ list_add(&task->head, &scheduler->raster_queue);
-+ task->raster_complete_action = PSB_RETURN;
-+ psb_schedule_raster(dev_priv, scheduler);
-+ break;
-+ case PSB_TA:
-+ list_add(&task->head, &scheduler->ta_queue);
-+ scheduler->ta_state = 0;
-+ task->raster_complete_action = PSB_RETURN;
-+ task->ta_complete_action = PSB_RASTER;
-+ break;
-+
-+ }
-+ }
-+ psb_schedule_ta(dev_priv, scheduler);
-+ psb_set_idle(scheduler);
-+
-+ if (complete_action == PSB_RETURN) {
-+ if (task->scene == NULL) {
-+ psb_report_fence(scheduler, task->engine,
-+ task->sequence,
-+ _PSB_FENCE_RASTER_DONE_SHIFT, 1);
-+ }
-+ if (!task->feedback.page) {
-+ list_add_tail(&task->head, &scheduler->task_done_queue);
-+ schedule_delayed_work(&scheduler->wq, 1);
-+ }
-+ }
-+}
-+
-+void psb_scheduler_pause(struct drm_psb_private *dev_priv)
-+{
-+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
-+ unsigned long irq_flags;
-+
-+ spin_lock_irqsave(&scheduler->lock, irq_flags);
-+ scheduler->idle_count++;
-+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
-+}
-+
-+void psb_scheduler_restart(struct drm_psb_private *dev_priv)
-+{
-+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
-+ unsigned long irq_flags;
-+
-+ spin_lock_irqsave(&scheduler->lock, irq_flags);
-+ if (--scheduler->idle_count == 0) {
-+ psb_schedule_ta(dev_priv, scheduler);
-+ psb_schedule_raster(dev_priv, scheduler);
-+ }
-+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
-+}
-+
-+int psb_scheduler_idle(struct drm_psb_private *dev_priv)
-+{
-+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
-+ unsigned long irq_flags;
-+ int ret;
-+ spin_lock_irqsave(&scheduler->lock, irq_flags);
-+ ret = scheduler->idle_count != 0 && scheduler->idle;
-+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
-+ return ret;
-+}
-+
-+int psb_scheduler_finished(struct drm_psb_private *dev_priv)
-+{
-+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
-+ unsigned long irq_flags;
-+ int ret;
-+ spin_lock_irqsave(&scheduler->lock, irq_flags);
-+ ret = (scheduler->idle &&
-+ list_empty(&scheduler->raster_queue) &&
-+ list_empty(&scheduler->ta_queue) &&
-+ list_empty(&scheduler->hp_raster_queue));
-+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
-+ return ret;
-+}
-+
-+static void psb_ta_oom(struct drm_psb_private *dev_priv,
-+ struct psb_scheduler *scheduler)
-+{
-+
-+ struct psb_task *task = scheduler->current_task[PSB_SCENE_ENGINE_TA];
-+ if (!task)
-+ return;
-+
-+ if (task->aborting)
-+ return;
-+ task->aborting = 1;
-+
-+ DRM_INFO("Info: TA out of parameter memory.\n");
-+
-+ (void)psb_xhw_ta_oom(dev_priv, &task->buf, task->scene->hw_cookie);
-+}
-+
-+static void psb_ta_oom_reply(struct drm_psb_private *dev_priv,
-+ struct psb_scheduler *scheduler)
-+{
-+
-+ struct psb_task *task = scheduler->current_task[PSB_SCENE_ENGINE_TA];
-+ uint32_t flags;
-+ if (!task)
-+ return;
-+
-+ psb_xhw_ta_oom_reply(dev_priv, &task->buf,
-+ task->scene->hw_cookie,
-+ &task->ta_complete_action,
-+ &task->raster_complete_action, &flags);
-+ task->flags |= flags;
-+ task->aborting = 0;
-+ psb_dispatch_ta(dev_priv, scheduler, PSB_RF_OOM_REPLY);
-+}
-+
-+static void psb_ta_hw_scene_freed(struct drm_psb_private *dev_priv,
-+ struct psb_scheduler *scheduler)
-+{
-+ DRM_ERROR("TA hw scene freed.\n");
-+}
-+
-+static void psb_vistest_reply(struct drm_psb_private *dev_priv,
-+ struct psb_scheduler *scheduler)
-+{
-+ struct psb_task *task = scheduler->feedback_task;
-+ uint8_t *feedback_map;
-+ uint32_t add;
-+ uint32_t cur;
-+ struct drm_psb_vistest *vistest;
-+ int i;
-+
-+ scheduler->feedback_task = NULL;
-+ if (!task) {
-+ DRM_ERROR("No Poulsbo feedback task.\n");
-+ return;
-+ }
-+ if (!task->feedback.page) {
-+ DRM_ERROR("No Poulsbo feedback page.\n");
-+ goto out;
-+ }
-+
-+ if (in_irq())
-+ feedback_map = kmap_atomic(task->feedback.page, KM_IRQ0);
-+ else
-+ feedback_map = kmap_atomic(task->feedback.page, KM_USER0);
-+
-+ /*
-+ * Loop over all requested vistest components here.
-+ * Only one (vistest) currently.
-+ */
-+
-+ vistest = (struct drm_psb_vistest *)
-+ (feedback_map + task->feedback.offset);
-+
-+ for (i = 0; i < PSB_HW_FEEDBACK_SIZE; ++i) {
-+ add = task->buf.arg.arg.feedback[i];
-+ cur = vistest->vt[i];
-+
-+ /*
-+ * Vistest saturates.
-+ */
-+
-+ vistest->vt[i] = (cur + add < cur) ? ~0 : cur + add;
-+ }
-+ if (in_irq())
-+ kunmap_atomic(feedback_map, KM_IRQ0);
-+ else
-+ kunmap_atomic(feedback_map, KM_USER0);
-+ out:
-+ psb_report_fence(scheduler, task->engine, task->sequence,
-+ _PSB_FENCE_FEEDBACK_SHIFT, 1);
-+
-+ if (list_empty(&task->head)) {
-+ list_add_tail(&task->head, &scheduler->task_done_queue);
-+ schedule_delayed_work(&scheduler->wq, 1);
-+ } else
-+ psb_schedule_ta(dev_priv, scheduler);
-+}
-+
-+static void psb_ta_fire_reply(struct drm_psb_private *dev_priv,
-+ struct psb_scheduler *scheduler)
-+{
-+ struct psb_task *task = scheduler->current_task[PSB_SCENE_ENGINE_TA];
-+
-+ psb_xhw_fire_reply(dev_priv, &task->buf, task->scene->hw_cookie);
-+
-+ psb_dispatch_ta(dev_priv, scheduler, PSB_RF_FIRE_TA);
-+}
-+
-+static void psb_raster_fire_reply(struct drm_psb_private *dev_priv,
-+ struct psb_scheduler *scheduler)
-+{
-+ struct psb_task *task =
-+ scheduler->current_task[PSB_SCENE_ENGINE_RASTER];
-+ uint32_t reply_flags;
-+
-+ if (!task) {
-+ DRM_ERROR("Null task.\n");
-+ return;
-+ }
-+
-+ task->raster_complete_action = task->buf.arg.arg.sb.rca;
-+ psb_xhw_fire_reply(dev_priv, &task->buf, task->scene->hw_cookie);
-+
-+ reply_flags = PSB_RF_FIRE_RASTER;
-+ if (task->raster_complete_action == PSB_RASTER)
-+ reply_flags |= PSB_RF_DEALLOC;
-+
-+ psb_dispatch_raster(dev_priv, scheduler, reply_flags);
-+}
-+
-+static int psb_user_interrupt(struct drm_psb_private *dev_priv,
-+ struct psb_scheduler *scheduler)
-+{
-+ uint32_t type;
-+ int ret;
-+ unsigned long irq_flags;
-+
-+ /*
-+ * Xhw cannot write directly to the comm page, so
-+ * do it here. Firmware would have written directly.
-+ */
-+
-+ ret = psb_xhw_handler(dev_priv);
-+ if (unlikely(ret))
-+ return ret;
-+
-+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
-+ type = dev_priv->comm[PSB_COMM_USER_IRQ];
-+ dev_priv->comm[PSB_COMM_USER_IRQ] = 0;
-+ if (dev_priv->comm[PSB_COMM_USER_IRQ_LOST]) {
-+ dev_priv->comm[PSB_COMM_USER_IRQ_LOST] = 0;
-+ DRM_ERROR("Lost Poulsbo hardware event.\n");
-+ }
-+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
-+
-+ if (type == 0)
-+ return 0;
-+
-+ switch (type) {
-+ case PSB_UIRQ_VISTEST:
-+ psb_vistest_reply(dev_priv, scheduler);
-+ break;
-+ case PSB_UIRQ_OOM_REPLY:
-+ psb_ta_oom_reply(dev_priv, scheduler);
-+ break;
-+ case PSB_UIRQ_FIRE_TA_REPLY:
-+ psb_ta_fire_reply(dev_priv, scheduler);
-+ break;
-+ case PSB_UIRQ_FIRE_RASTER_REPLY:
-+ psb_raster_fire_reply(dev_priv, scheduler);
-+ break;
-+ default:
-+ DRM_ERROR("Unknown Poulsbo hardware event. %d\n", type);
-+ }
-+ return 0;
-+}
-+
-+int psb_forced_user_interrupt(struct drm_psb_private *dev_priv)
-+{
-+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
-+ unsigned long irq_flags;
-+ int ret;
-+
-+ spin_lock_irqsave(&scheduler->lock, irq_flags);
-+ ret = psb_user_interrupt(dev_priv, scheduler);
-+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
-+ return ret;
-+}
-+
-+static void psb_dispatch_ta(struct drm_psb_private *dev_priv,
-+ struct psb_scheduler *scheduler,
-+ uint32_t reply_flag)
-+{
-+ struct psb_task *task = scheduler->current_task[PSB_SCENE_ENGINE_TA];
-+ uint32_t flags;
-+ uint32_t mask;
-+
-+ task->reply_flags |= reply_flag;
-+ flags = task->reply_flags;
-+ mask = PSB_RF_FIRE_TA;
-+
-+ if (!(flags & mask))
-+ return;
-+
-+ mask = PSB_RF_TA_DONE;
-+ if ((flags & mask) == mask) {
-+ task->reply_flags &= ~mask;
-+ psb_ta_done(dev_priv, scheduler);
-+ }
-+
-+ mask = PSB_RF_OOM;
-+ if ((flags & mask) == mask) {
-+ task->reply_flags &= ~mask;
-+ psb_ta_oom(dev_priv, scheduler);
-+ }
-+
-+ mask = (PSB_RF_OOM_REPLY | PSB_RF_TERMINATE);
-+ if ((flags & mask) == mask) {
-+ task->reply_flags &= ~mask;
-+ psb_ta_done(dev_priv, scheduler);
-+ }
-+}
-+
-+static void psb_dispatch_raster(struct drm_psb_private *dev_priv,
-+ struct psb_scheduler *scheduler,
-+ uint32_t reply_flag)
-+{
-+ struct psb_task *task =
-+ scheduler->current_task[PSB_SCENE_ENGINE_RASTER];
-+ uint32_t flags;
-+ uint32_t mask;
-+
-+ task->reply_flags |= reply_flag;
-+ flags = task->reply_flags;
-+ mask = PSB_RF_FIRE_RASTER;
-+
-+ if (!(flags & mask))
-+ return;
-+
-+ /*
-+ * For rasterizer-only tasks, don't report fence done here,
-+ * as this is time consuming and the rasterizer wants a new
-+ * task immediately. For other tasks, the hardware is probably
-+ * still busy deallocating TA memory, so we can report
-+ * fence done in parallel.
-+ */
-+
-+ if (task->raster_complete_action == PSB_RETURN &&
-+ (reply_flag & PSB_RF_RASTER_DONE) && task->scene != NULL) {
-+ psb_report_fence(scheduler, task->engine, task->sequence,
-+ _PSB_FENCE_RASTER_DONE_SHIFT, 1);
-+ }
-+
-+ mask = PSB_RF_RASTER_DONE | PSB_RF_DEALLOC;
-+ if ((flags & mask) == mask) {
-+ task->reply_flags &= ~mask;
-+ psb_raster_done(dev_priv, scheduler);
-+ }
-+}
-+
-+void psb_scheduler_handler(struct drm_psb_private *dev_priv, uint32_t status)
-+{
-+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
-+
-+ spin_lock(&scheduler->lock);
-+
-+ if (status & _PSB_CE_PIXELBE_END_RENDER) {
-+ psb_dispatch_raster(dev_priv, scheduler, PSB_RF_RASTER_DONE);
-+ }
-+ if (status & _PSB_CE_DPM_3D_MEM_FREE) {
-+ psb_dispatch_raster(dev_priv, scheduler, PSB_RF_DEALLOC);
-+ }
-+ if (status & _PSB_CE_TA_FINISHED) {
-+ psb_dispatch_ta(dev_priv, scheduler, PSB_RF_TA_DONE);
-+ }
-+ if (status & _PSB_CE_TA_TERMINATE) {
-+ psb_dispatch_ta(dev_priv, scheduler, PSB_RF_TERMINATE);
-+ }
-+ if (status & (_PSB_CE_DPM_REACHED_MEM_THRESH |
-+ _PSB_CE_DPM_OUT_OF_MEMORY_GBL |
-+ _PSB_CE_DPM_OUT_OF_MEMORY_MT)) {
-+ psb_dispatch_ta(dev_priv, scheduler, PSB_RF_OOM);
-+ }
-+ if (status & _PSB_CE_DPM_TA_MEM_FREE) {
-+ psb_ta_hw_scene_freed(dev_priv, scheduler);
-+ }
-+ if (status & _PSB_CE_SW_EVENT) {
-+ psb_user_interrupt(dev_priv, scheduler);
-+ }
-+ spin_unlock(&scheduler->lock);
-+}
-+
-+static void psb_free_task_wq(struct work_struct *work)
-+{
-+ struct psb_scheduler *scheduler =
-+ container_of(work, struct psb_scheduler, wq.work);
-+
-+ struct drm_device *dev = scheduler->dev;
-+ struct list_head *list, *next;
-+ unsigned long irq_flags;
-+ struct psb_task *task;
-+
-+ if (!mutex_trylock(&scheduler->task_wq_mutex))
-+ return;
-+
-+ spin_lock_irqsave(&scheduler->lock, irq_flags);
-+ list_for_each_safe(list, next, &scheduler->task_done_queue) {
-+ task = list_entry(list, struct psb_task, head);
-+ list_del_init(list);
-+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
-+
-+ PSB_DEBUG_RENDER("Checking Task %d: Scene 0x%08lx, "
-+ "Feedback bo 0x%08lx, done %d\n",
-+ task->sequence, (unsigned long)task->scene,
-+ (unsigned long)task->feedback.bo,
-+ atomic_read(&task->buf.done));
-+
-+ if (task->scene) {
-+ mutex_lock(&dev->struct_mutex);
-+ PSB_DEBUG_RENDER("Unref scene %d\n", task->sequence);
-+ psb_scene_unref_devlocked(&task->scene);
-+ if (task->feedback.bo) {
-+ PSB_DEBUG_RENDER("Unref feedback bo %d\n",
-+ task->sequence);
-+ drm_bo_usage_deref_locked(&task->feedback.bo);
-+ }
-+ mutex_unlock(&dev->struct_mutex);
-+ }
-+
-+ if (atomic_read(&task->buf.done)) {
-+ PSB_DEBUG_RENDER("Deleting task %d\n", task->sequence);
-+ drm_free(task, sizeof(*task), DRM_MEM_DRIVER);
-+ task = NULL;
-+ }
-+ spin_lock_irqsave(&scheduler->lock, irq_flags);
-+ if (task != NULL)
-+ list_add(list, &scheduler->task_done_queue);
-+ }
-+ if (!list_empty(&scheduler->task_done_queue)) {
-+ PSB_DEBUG_RENDER("Rescheduling wq\n");
-+ schedule_delayed_work(&scheduler->wq, 1);
-+ }
-+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
-+
-+ mutex_unlock(&scheduler->task_wq_mutex);
-+}
-+
-+/*
-+ * Check if any of the tasks in the queues is using a scene.
-+ * In that case we know the TA memory buffer objects are
-+ * fenced and will not be evicted until that fence is signaled.
-+ */
-+
-+void psb_scheduler_ta_mem_check(struct drm_psb_private *dev_priv)
-+{
-+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
-+ unsigned long irq_flags;
-+ struct psb_task *task;
-+ struct psb_task *next_task;
-+
-+ dev_priv->force_ta_mem_load = 1;
-+ spin_lock_irqsave(&scheduler->lock, irq_flags);
-+ list_for_each_entry_safe(task, next_task, &scheduler->ta_queue, head) {
-+ if (task->scene) {
-+ dev_priv->force_ta_mem_load = 0;
-+ break;
-+ }
-+ }
-+ list_for_each_entry_safe(task, next_task, &scheduler->raster_queue,
-+ head) {
-+ if (task->scene) {
-+ dev_priv->force_ta_mem_load = 0;
-+ break;
-+ }
-+ }
-+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
-+}
-+
-+void psb_scheduler_reset(struct drm_psb_private *dev_priv, int error_condition)
-+{
-+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
-+ unsigned long wait_jiffies;
-+ unsigned long cur_jiffies;
-+ struct psb_task *task;
-+ struct psb_task *next_task;
-+ unsigned long irq_flags;
-+
-+ psb_scheduler_pause(dev_priv);
-+ if (!psb_scheduler_idle(dev_priv)) {
-+ spin_lock_irqsave(&scheduler->lock, irq_flags);
-+
-+ cur_jiffies = jiffies;
-+ wait_jiffies = cur_jiffies;
-+ if (scheduler->current_task[PSB_SCENE_ENGINE_TA] &&
-+ time_after_eq(scheduler->ta_end_jiffies, wait_jiffies))
-+ wait_jiffies = scheduler->ta_end_jiffies;
-+ if (scheduler->current_task[PSB_SCENE_ENGINE_RASTER] &&
-+ time_after_eq(scheduler->raster_end_jiffies, wait_jiffies))
-+ wait_jiffies = scheduler->raster_end_jiffies;
-+
-+ wait_jiffies -= cur_jiffies;
-+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
-+
-+ (void)wait_event_timeout(scheduler->idle_queue,
-+ psb_scheduler_idle(dev_priv),
-+ wait_jiffies);
-+ }
-+
-+ if (!psb_scheduler_idle(dev_priv)) {
-+ spin_lock_irqsave(&scheduler->lock, irq_flags);
-+ task = scheduler->current_task[PSB_SCENE_ENGINE_RASTER];
-+ if (task) {
-+ DRM_ERROR("Detected Poulsbo rasterizer lockup.\n");
-+ if (task->engine == PSB_ENGINE_HPRAST) {
-+ psb_fence_error(scheduler->dev,
-+ PSB_ENGINE_HPRAST,
-+ task->sequence,
-+ _PSB_FENCE_TYPE_RASTER_DONE,
-+ error_condition);
-+
-+ list_del(&task->head);
-+ psb_xhw_clean_buf(dev_priv, &task->buf);
-+ list_add_tail(&task->head,
-+ &scheduler->task_done_queue);
-+ } else {
-+ list_add(&task->head, &scheduler->raster_queue);
-+ }
-+ }
-+ scheduler->current_task[PSB_SCENE_ENGINE_RASTER] = NULL;
-+ task = scheduler->current_task[PSB_SCENE_ENGINE_TA];
-+ if (task) {
-+ DRM_ERROR("Detected Poulsbo ta lockup.\n");
-+ list_add_tail(&task->head, &scheduler->raster_queue);
-+#ifdef FIX_TG_16
-+ psb_2d_atomic_unlock(dev_priv);
-+#endif
-+ }
-+ scheduler->current_task[PSB_SCENE_ENGINE_TA] = NULL;
-+ scheduler->ta_state = 0;
-+
-+#ifdef FIX_TG_16
-+ atomic_set(&dev_priv->ta_wait_2d, 0);
-+ atomic_set(&dev_priv->ta_wait_2d_irq, 0);
-+ wake_up(&dev_priv->queue_2d);
-+#endif
-+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
-+ }
-+
-+ /*
-+ * Empty raster queue.
-+ */
-+
-+ spin_lock_irqsave(&scheduler->lock, irq_flags);
-+ list_for_each_entry_safe(task, next_task, &scheduler->raster_queue,
-+ head) {
-+ struct psb_scene *scene = task->scene;
-+
-+ psb_fence_error(scheduler->dev,
-+ task->engine,
-+ task->sequence,
-+ _PSB_FENCE_TYPE_TA_DONE |
-+ _PSB_FENCE_TYPE_RASTER_DONE |
-+ _PSB_FENCE_TYPE_SCENE_DONE |
-+ _PSB_FENCE_TYPE_FEEDBACK, error_condition);
-+ if (scene) {
-+ scene->flags = 0;
-+ if (scene->hw_scene) {
-+ list_add_tail(&scene->hw_scene->head,
-+ &scheduler->hw_scenes);
-+ scene->hw_scene = NULL;
-+ }
-+ }
-+
-+ psb_xhw_clean_buf(dev_priv, &task->buf);
-+ list_del(&task->head);
-+ list_add_tail(&task->head, &scheduler->task_done_queue);
-+ }
-+
-+ schedule_delayed_work(&scheduler->wq, 1);
-+ scheduler->idle = 1;
-+ wake_up(&scheduler->idle_queue);
-+
-+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
-+ psb_scheduler_restart(dev_priv);
-+
-+}
-+
-+int psb_scheduler_init(struct drm_device *dev, struct psb_scheduler *scheduler)
-+{
-+ struct psb_hw_scene *hw_scene;
-+ int i;
-+
-+ memset(scheduler, 0, sizeof(*scheduler));
-+ scheduler->dev = dev;
-+ mutex_init(&scheduler->task_wq_mutex);
-+ scheduler->lock = SPIN_LOCK_UNLOCKED;
-+ scheduler->idle = 1;
-+
-+ INIT_LIST_HEAD(&scheduler->ta_queue);
-+ INIT_LIST_HEAD(&scheduler->raster_queue);
-+ INIT_LIST_HEAD(&scheduler->hp_raster_queue);
-+ INIT_LIST_HEAD(&scheduler->hw_scenes);
-+ INIT_LIST_HEAD(&scheduler->task_done_queue);
-+ INIT_DELAYED_WORK(&scheduler->wq, &psb_free_task_wq);
-+ init_waitqueue_head(&scheduler->idle_queue);
-+
-+ for (i = 0; i < PSB_NUM_HW_SCENES; ++i) {
-+ hw_scene = &scheduler->hs[i];
-+ hw_scene->context_number = i;
-+ list_add_tail(&hw_scene->head, &scheduler->hw_scenes);
-+ }
-+
-+ for (i = 0; i < _PSB_ENGINE_TA_FENCE_TYPES; ++i) {
-+ scheduler->seq[i].reported = 0;
-+ }
-+
-+ return 0;
-+}
-+
-+/*
-+ * Scene references maintained by the scheduler are not refcounted.
-+ * Remove all references to a particular scene here.
-+ */
-+
-+void psb_scheduler_remove_scene_refs(struct psb_scene *scene)
-+{
-+ struct drm_psb_private *dev_priv =
-+ (struct drm_psb_private *)scene->dev->dev_private;
-+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
-+ struct psb_hw_scene *hw_scene;
-+ unsigned long irq_flags;
-+ unsigned int i;
-+
-+ spin_lock_irqsave(&scheduler->lock, irq_flags);
-+ for (i = 0; i < PSB_NUM_HW_SCENES; ++i) {
-+ hw_scene = &scheduler->hs[i];
-+ if (hw_scene->last_scene == scene) {
-+ BUG_ON(list_empty(&hw_scene->head));
-+ hw_scene->last_scene = NULL;
-+ }
-+ }
-+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
-+}
-+
-+void psb_scheduler_takedown(struct psb_scheduler *scheduler)
-+{
-+ flush_scheduled_work();
-+}
-+
-+static int psb_setup_task_devlocked(struct drm_device *dev,
-+ struct drm_psb_cmdbuf_arg *arg,
-+ struct drm_buffer_object *raster_cmd_buffer,
-+ struct drm_buffer_object *ta_cmd_buffer,
-+ struct drm_buffer_object *oom_cmd_buffer,
-+ struct psb_scene *scene,
-+ enum psb_task_type task_type,
-+ uint32_t engine,
-+ uint32_t flags, struct psb_task **task_p)
-+{
-+ struct psb_task *task;
-+ int ret;
-+
-+ if (ta_cmd_buffer && arg->ta_size > PSB_MAX_TA_CMDS) {
-+ DRM_ERROR("Too many ta cmds %d.\n", arg->ta_size);
-+ return -EINVAL;
-+ }
-+ if (raster_cmd_buffer && arg->cmdbuf_size > PSB_MAX_RASTER_CMDS) {
-+ DRM_ERROR("Too many raster cmds %d.\n", arg->cmdbuf_size);
-+ return -EINVAL;
-+ }
-+ if (oom_cmd_buffer && arg->oom_size > PSB_MAX_OOM_CMDS) {
-+ DRM_ERROR("Too many raster cmds %d.\n", arg->oom_size);
-+ return -EINVAL;
-+ }
-+
-+ task = drm_calloc(1, sizeof(*task), DRM_MEM_DRIVER);
-+ if (!task)
-+ return -ENOMEM;
-+
-+ atomic_set(&task->buf.done, 1);
-+ task->engine = engine;
-+ INIT_LIST_HEAD(&task->head);
-+ INIT_LIST_HEAD(&task->buf.head);
-+ if (ta_cmd_buffer && arg->ta_size != 0) {
-+ task->ta_cmd_size = arg->ta_size;
-+ ret = psb_submit_copy_cmdbuf(dev, ta_cmd_buffer,
-+ arg->ta_offset,
-+ arg->ta_size,
-+ PSB_ENGINE_TA, task->ta_cmds);
-+ if (ret)
-+ goto out_err;
-+ }
-+ if (raster_cmd_buffer) {
-+ task->raster_cmd_size = arg->cmdbuf_size;
-+ ret = psb_submit_copy_cmdbuf(dev, raster_cmd_buffer,
-+ arg->cmdbuf_offset,
-+ arg->cmdbuf_size,
-+ PSB_ENGINE_TA, task->raster_cmds);
-+ if (ret)
-+ goto out_err;
-+ }
-+ if (oom_cmd_buffer && arg->oom_size != 0) {
-+ task->oom_cmd_size = arg->oom_size;
-+ ret = psb_submit_copy_cmdbuf(dev, oom_cmd_buffer,
-+ arg->oom_offset,
-+ arg->oom_size,
-+ PSB_ENGINE_TA, task->oom_cmds);
-+ if (ret)
-+ goto out_err;
-+ }
-+ task->task_type = task_type;
-+ task->flags = flags;
-+ if (scene)
-+ task->scene = psb_scene_ref(scene);
-+
-+ *task_p = task;
-+ return 0;
-+ out_err:
-+ drm_free(task, sizeof(*task), DRM_MEM_DRIVER);
-+ *task_p = NULL;
-+ return ret;
-+}
-+
-+int psb_cmdbuf_ta(struct drm_file *priv,
-+ struct drm_psb_cmdbuf_arg *arg,
-+ struct drm_buffer_object *cmd_buffer,
-+ struct drm_buffer_object *ta_buffer,
-+ struct drm_buffer_object *oom_buffer,
-+ struct psb_scene *scene,
-+ struct psb_feedback_info *feedback,
-+ struct drm_fence_arg *fence_arg)
-+{
-+ struct drm_device *dev = priv->minor->dev;
-+ struct drm_psb_private *dev_priv = dev->dev_private;
-+ struct drm_fence_object *fence = NULL;
-+ struct psb_task *task = NULL;
-+ int ret;
-+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
-+ unsigned long irq_flags;
-+
-+ PSB_DEBUG_RENDER("Cmdbuf ta\n");
-+
-+ ret = mutex_lock_interruptible(&dev_priv->reset_mutex);
-+ if (ret)
-+ return -EAGAIN;
-+
-+ mutex_lock(&dev->struct_mutex);
-+ ret = psb_setup_task_devlocked(dev, arg, cmd_buffer, ta_buffer,
-+ oom_buffer, scene,
-+ psb_ta_task, PSB_ENGINE_TA,
-+ PSB_FIRE_FLAG_RASTER_DEALLOC, &task);
-+ mutex_unlock(&dev->struct_mutex);
-+
-+ if (ret)
-+ goto out_err;
-+
-+ task->feedback = *feedback;
-+
-+ /*
-+ * Hand the task over to the scheduler.
-+ */
-+
-+ spin_lock_irqsave(&scheduler->lock, irq_flags);
-+ task->sequence = psb_fence_advance_sequence(dev, PSB_ENGINE_TA);
-+
-+ psb_report_fence(scheduler, PSB_ENGINE_TA, task->sequence, 0, 1);
-+
-+ task->ta_complete_action = PSB_RASTER;
-+ task->raster_complete_action = PSB_RETURN;
-+
-+ list_add_tail(&task->head, &scheduler->ta_queue);
-+ PSB_DEBUG_RENDER("queued ta %u\n", task->sequence);
-+
-+ psb_schedule_ta(dev_priv, scheduler);
-+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
-+
-+ psb_fence_or_sync(priv, PSB_ENGINE_TA, arg, fence_arg, &fence);
-+ drm_regs_fence(&dev_priv->use_manager, fence);
-+ if (fence)
-+ fence_arg->signaled |= 0x1;
-+
-+ out_err:
-+ if (ret && ret != -EAGAIN)
-+ DRM_ERROR("TA task queue job failed.\n");
-+
-+ if (fence) {
-+#ifdef PSB_WAIT_FOR_TA_COMPLETION
-+ drm_fence_object_wait(fence, 1, 1, DRM_FENCE_TYPE_EXE |
-+ _PSB_FENCE_TYPE_TA_DONE);
-+#ifdef PSB_BE_PARANOID
-+ drm_fence_object_wait(fence, 1, 1, DRM_FENCE_TYPE_EXE |
-+ _PSB_FENCE_TYPE_SCENE_DONE);
-+#endif
-+#endif
-+ drm_fence_usage_deref_unlocked(&fence);
-+ }
-+ mutex_unlock(&dev_priv->reset_mutex);
-+
-+ return ret;
-+}
-+
-+int psb_cmdbuf_raster(struct drm_file *priv,
-+ struct drm_psb_cmdbuf_arg *arg,
-+ struct drm_buffer_object *cmd_buffer,
-+ struct drm_fence_arg *fence_arg)
-+{
-+ struct drm_device *dev = priv->minor->dev;
-+ struct drm_psb_private *dev_priv = dev->dev_private;
-+ struct drm_fence_object *fence = NULL;
-+ struct psb_task *task = NULL;
-+ int ret;
-+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
-+ unsigned long irq_flags;
-+
-+ PSB_DEBUG_RENDER("Cmdbuf Raster\n");
-+
-+ ret = mutex_lock_interruptible(&dev_priv->reset_mutex);
-+ if (ret)
-+ return -EAGAIN;
-+
-+ mutex_lock(&dev->struct_mutex);
-+ ret = psb_setup_task_devlocked(dev, arg, cmd_buffer, NULL, NULL,
-+ NULL, psb_raster_task,
-+ PSB_ENGINE_TA, 0, &task);
-+ mutex_unlock(&dev->struct_mutex);
-+
-+ if (ret)
-+ goto out_err;
-+
-+ /*
-+ * Hand the task over to the scheduler.
-+ */
-+
-+ spin_lock_irqsave(&scheduler->lock, irq_flags);
-+ task->sequence = psb_fence_advance_sequence(dev, PSB_ENGINE_TA);
-+ psb_report_fence(scheduler, PSB_ENGINE_TA, task->sequence, 0, 1);
-+ task->ta_complete_action = PSB_RASTER;
-+ task->raster_complete_action = PSB_RETURN;
-+
-+ list_add_tail(&task->head, &scheduler->ta_queue);
-+ PSB_DEBUG_RENDER("queued raster %u\n", task->sequence);
-+ psb_schedule_ta(dev_priv, scheduler);
-+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
-+
-+ psb_fence_or_sync(priv, PSB_ENGINE_TA, arg, fence_arg, &fence);
-+ drm_regs_fence(&dev_priv->use_manager, fence);
-+ if (fence)
-+ fence_arg->signaled |= 0x1;
-+ out_err:
-+ if (ret && ret != -EAGAIN)
-+ DRM_ERROR("Raster task queue job failed.\n");
-+
-+ if (fence) {
-+#ifdef PSB_WAIT_FOR_RASTER_COMPLETION
-+ drm_fence_object_wait(fence, 1, 1, fence->type);
-+#endif
-+ drm_fence_usage_deref_unlocked(&fence);
-+ }
-+
-+ mutex_unlock(&dev_priv->reset_mutex);
-+
-+ return ret;
-+}
-+
-+#ifdef FIX_TG_16
-+
-+static int psb_check_2d_idle(struct drm_psb_private *dev_priv)
-+{
-+ if (psb_2d_trylock(dev_priv)) {
-+ if ((PSB_RSGX32(PSB_CR_2D_SOCIF) == _PSB_C2_SOCIF_EMPTY) &&
-+ !((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) &
-+ _PSB_C2B_STATUS_BUSY))) {
-+ return 0;
-+ }
-+ if (atomic_cmpxchg(&dev_priv->ta_wait_2d_irq, 0, 1) == 0)
-+ psb_2D_irq_on(dev_priv);
-+
-+ PSB_WSGX32(PSB_2D_FENCE_BH, PSB_SGX_2D_SLAVE_PORT);
-+ PSB_WSGX32(PSB_2D_FLUSH_BH, PSB_SGX_2D_SLAVE_PORT);
-+ (void)PSB_RSGX32(PSB_SGX_2D_SLAVE_PORT);
-+
-+ psb_2d_atomic_unlock(dev_priv);
-+ }
-+
-+ atomic_set(&dev_priv->ta_wait_2d, 1);
-+ return -EBUSY;
-+}
-+
-+static void psb_atomic_resume_ta_2d_idle(struct drm_psb_private *dev_priv)
-+{
-+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
-+
-+ if (atomic_cmpxchg(&dev_priv->ta_wait_2d, 1, 0) == 1) {
-+ psb_schedule_ta(dev_priv, scheduler);
-+ if (atomic_read(&dev_priv->waiters_2d) != 0)
-+ wake_up(&dev_priv->queue_2d);
-+ }
-+}
-+
-+void psb_resume_ta_2d_idle(struct drm_psb_private *dev_priv)
-+{
-+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
-+ unsigned long irq_flags;
-+
-+ spin_lock_irqsave(&scheduler->lock, irq_flags);
-+ if (atomic_cmpxchg(&dev_priv->ta_wait_2d_irq, 1, 0) == 1) {
-+ atomic_set(&dev_priv->ta_wait_2d, 0);
-+ psb_2D_irq_off(dev_priv);
-+ psb_schedule_ta(dev_priv, scheduler);
-+ if (atomic_read(&dev_priv->waiters_2d) != 0)
-+ wake_up(&dev_priv->queue_2d);
-+ }
-+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
-+}
-+
-+/*
-+ * 2D locking functions. Can't use a mutex since the trylock() and
-+ * unlock() methods need to be accessible from interrupt context.
-+ */
-+
-+static int psb_2d_trylock(struct drm_psb_private *dev_priv)
-+{
-+ return (atomic_cmpxchg(&dev_priv->lock_2d, 0, 1) == 0);
-+}
-+
-+static void psb_2d_atomic_unlock(struct drm_psb_private *dev_priv)
-+{
-+ atomic_set(&dev_priv->lock_2d, 0);
-+ if (atomic_read(&dev_priv->waiters_2d) != 0)
-+ wake_up(&dev_priv->queue_2d);
-+}
-+
-+void psb_2d_unlock(struct drm_psb_private *dev_priv)
-+{
-+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
-+ unsigned long irq_flags;
-+
-+ spin_lock_irqsave(&scheduler->lock, irq_flags);
-+ psb_2d_atomic_unlock(dev_priv);
-+ if (atomic_read(&dev_priv->ta_wait_2d) != 0)
-+ psb_atomic_resume_ta_2d_idle(dev_priv);
-+ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
-+}
-+
-+void psb_2d_lock(struct drm_psb_private *dev_priv)
-+{
-+ atomic_inc(&dev_priv->waiters_2d);
-+ wait_event(dev_priv->queue_2d, atomic_read(&dev_priv->ta_wait_2d) == 0);
-+ wait_event(dev_priv->queue_2d, psb_2d_trylock(dev_priv));
-+ atomic_dec(&dev_priv->waiters_2d);
-+}
-+
-+#endif
-Index: linux-2.6.28/drivers/gpu/drm/psb/psb_schedule.h
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.28/drivers/gpu/drm/psb/psb_schedule.h 2009-02-20 12:23:06.000000000 +0000
-@@ -0,0 +1,170 @@
-+/**************************************************************************
-+ * Copyright (c) 2007, Intel Corporation.
-+ * All Rights Reserved.
-+ *
-+ * This program is free software; you can redistribute it and/or modify it
-+ * under the terms and conditions of the GNU General Public License,
-+ * version 2, as published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope it will be useful, but WITHOUT
-+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-+ * more details.
-+ *
-+ * You should have received a copy of the GNU General Public License along with
-+ * this program; if not, write to the Free Software Foundation, Inc.,
-+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-+ *
-+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
-+ * develop this driver.
-+ *
-+ **************************************************************************/
-+/*
-+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
-+ */
-+
-+#ifndef _PSB_SCHEDULE_H_
-+#define _PSB_SCHEDULE_H_
-+
-+#include "drmP.h"
-+
-+enum psb_task_type {
-+ psb_ta_midscene_task,
-+ psb_ta_task,
-+ psb_raster_task,
-+ psb_freescene_task
-+};
-+
-+#define PSB_MAX_TA_CMDS 60
-+#define PSB_MAX_RASTER_CMDS 60
-+#define PSB_MAX_OOM_CMDS 6
-+
-+struct psb_xhw_buf {
-+ struct list_head head;
-+ int copy_back;
-+ atomic_t done;
-+ struct drm_psb_xhw_arg arg;
-+
-+};
-+
-+struct psb_feedback_info {
-+ struct drm_buffer_object *bo;
-+ struct page *page;
-+ uint32_t offset;
-+};
-+
-+struct psb_task {
-+ struct list_head head;
-+ struct psb_scene *scene;
-+ struct psb_feedback_info feedback;
-+ enum psb_task_type task_type;
-+ uint32_t engine;
-+ uint32_t sequence;
-+ uint32_t ta_cmds[PSB_MAX_TA_CMDS];
-+ uint32_t raster_cmds[PSB_MAX_RASTER_CMDS];
-+ uint32_t oom_cmds[PSB_MAX_OOM_CMDS];
-+ uint32_t ta_cmd_size;
-+ uint32_t raster_cmd_size;
-+ uint32_t oom_cmd_size;
-+ uint32_t feedback_offset;
-+ uint32_t ta_complete_action;
-+ uint32_t raster_complete_action;
-+ uint32_t hw_cookie;
-+ uint32_t flags;
-+ uint32_t reply_flags;
-+ uint32_t aborting;
-+ struct psb_xhw_buf buf;
-+};
-+
-+struct psb_hw_scene {
-+ struct list_head head;
-+ uint32_t context_number;
-+
-+ /*
-+ * This pointer does not refcount the last_scene_buffer,
-+ * so we must make sure it is set to NULL before destroying
-+ * the corresponding task.
-+ */
-+
-+ struct psb_scene *last_scene;
-+};
-+
-+struct psb_scene;
-+struct drm_psb_private;
-+
-+struct psb_scheduler_seq {
-+ uint32_t sequence;
-+ int reported;
-+};
-+
-+struct psb_scheduler {
-+ struct drm_device *dev;
-+ struct psb_scheduler_seq seq[_PSB_ENGINE_TA_FENCE_TYPES];
-+ struct psb_hw_scene hs[PSB_NUM_HW_SCENES];
-+ struct mutex task_wq_mutex;
-+ spinlock_t lock;
-+ struct list_head hw_scenes;
-+ struct list_head ta_queue;
-+ struct list_head raster_queue;
-+ struct list_head hp_raster_queue;
-+ struct list_head task_done_queue;
-+ struct psb_task *current_task[PSB_SCENE_NUM_ENGINES];
-+ struct psb_task *feedback_task;
-+ int ta_state;
-+ struct psb_hw_scene *pending_hw_scene;
-+ uint32_t pending_hw_scene_seq;
-+ struct delayed_work wq;
-+ struct psb_scene_pool *pool;
-+ uint32_t idle_count;
-+ int idle;
-+ wait_queue_head_t idle_queue;
-+ unsigned long ta_end_jiffies;
-+ unsigned long raster_end_jiffies;
-+ unsigned long total_raster_jiffies;
-+};
-+
-+#define PSB_RF_FIRE_TA (1 << 0)
-+#define PSB_RF_OOM (1 << 1)
-+#define PSB_RF_OOM_REPLY (1 << 2)
-+#define PSB_RF_TERMINATE (1 << 3)
-+#define PSB_RF_TA_DONE (1 << 4)
-+#define PSB_RF_FIRE_RASTER (1 << 5)
-+#define PSB_RF_RASTER_DONE (1 << 6)
-+#define PSB_RF_DEALLOC (1 << 7)
-+
-+extern struct psb_scene_pool *psb_alloc_scene_pool(struct drm_file *priv,
-+ int shareable, uint32_t w,
-+ uint32_t h);
-+extern uint32_t psb_scene_handle(struct psb_scene *scene);
-+extern int psb_scheduler_init(struct drm_device *dev,
-+ struct psb_scheduler *scheduler);
-+extern void psb_scheduler_takedown(struct psb_scheduler *scheduler);
-+extern int psb_cmdbuf_ta(struct drm_file *priv,
-+ struct drm_psb_cmdbuf_arg *arg,
-+ struct drm_buffer_object *cmd_buffer,
-+ struct drm_buffer_object *ta_buffer,
-+ struct drm_buffer_object *oom_buffer,
-+ struct psb_scene *scene,
-+ struct psb_feedback_info *feedback,
-+ struct drm_fence_arg *fence_arg);
-+extern int psb_cmdbuf_raster(struct drm_file *priv,
-+ struct drm_psb_cmdbuf_arg *arg,
-+ struct drm_buffer_object *cmd_buffer,
-+ struct drm_fence_arg *fence_arg);
-+extern void psb_scheduler_handler(struct drm_psb_private *dev_priv,
-+ uint32_t status);
-+extern void psb_scheduler_pause(struct drm_psb_private *dev_priv);
-+extern void psb_scheduler_restart(struct drm_psb_private *dev_priv);
-+extern int psb_scheduler_idle(struct drm_psb_private *dev_priv);
-+extern int psb_scheduler_finished(struct drm_psb_private *dev_priv);
-+
-+extern void psb_scheduler_lockup(struct drm_psb_private *dev_priv,
-+ int *lockup, int *idle);
-+extern void psb_scheduler_reset(struct drm_psb_private *dev_priv,
-+ int error_condition);
-+extern int psb_forced_user_interrupt(struct drm_psb_private *dev_priv);
-+extern void psb_scheduler_remove_scene_refs(struct psb_scene *scene);
-+extern void psb_scheduler_ta_mem_check(struct drm_psb_private *dev_priv);
-+extern int psb_extend_raster_timeout(struct drm_psb_private *dev_priv);
-+
-+#endif
-Index: linux-2.6.28/drivers/gpu/drm/psb/psb_sgx.c
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.28/drivers/gpu/drm/psb/psb_sgx.c 2009-02-20 12:23:06.000000000 +0000
-@@ -0,0 +1,1422 @@
-+/**************************************************************************
-+ * Copyright (c) 2007, Intel Corporation.
-+ * All Rights Reserved.
-+ *
-+ * This program is free software; you can redistribute it and/or modify it
-+ * under the terms and conditions of the GNU General Public License,
-+ * version 2, as published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope it will be useful, but WITHOUT
-+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-+ * more details.
-+ *
-+ * You should have received a copy of the GNU General Public License along with
-+ * this program; if not, write to the Free Software Foundation, Inc.,
-+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-+ *
-+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
-+ * develop this driver.
-+ *
-+ **************************************************************************/
-+/*
-+ */
-+
-+#include "drmP.h"
-+#include "psb_drv.h"
-+#include "psb_drm.h"
-+#include "psb_reg.h"
-+#include "psb_scene.h"
-+
-+#include "psb_msvdx.h"
-+
-+int psb_submit_video_cmdbuf(struct drm_device *dev,
-+ struct drm_buffer_object *cmd_buffer,
-+ unsigned long cmd_offset, unsigned long cmd_size,
-+ struct drm_fence_object *fence);
-+
-+struct psb_dstbuf_cache {
-+ unsigned int dst;
-+ uint32_t *use_page;
-+ unsigned int use_index;
-+ uint32_t use_background;
-+ struct drm_buffer_object *dst_buf;
-+ unsigned long dst_offset;
-+ uint32_t *dst_page;
-+ unsigned int dst_page_offset;
-+ struct drm_bo_kmap_obj dst_kmap;
-+ int dst_is_iomem;
-+};
-+
-+struct psb_buflist_item {
-+ struct drm_buffer_object *bo;
-+ void __user *data;
-+ int ret;
-+ int presumed_offset_correct;
-+};
-+
-+
-+#define PSB_REG_GRAN_SHIFT 2
-+#define PSB_REG_GRANULARITY (1 << PSB_REG_GRAN_SHIFT)
-+#define PSB_MAX_REG 0x1000
-+
-+static const uint32_t disallowed_ranges[][2] = {
-+ {0x0000, 0x0200},
-+ {0x0208, 0x0214},
-+ {0x021C, 0x0224},
-+ {0x0230, 0x0234},
-+ {0x0248, 0x024C},
-+ {0x0254, 0x0358},
-+ {0x0428, 0x0428},
-+ {0x0430, 0x043C},
-+ {0x0498, 0x04B4},
-+ {0x04CC, 0x04D8},
-+ {0x04E0, 0x07FC},
-+ {0x0804, 0x0A58},
-+ {0x0A68, 0x0A80},
-+ {0x0AA0, 0x0B1C},
-+ {0x0B2C, 0x0CAC},
-+ {0x0CB4, PSB_MAX_REG - PSB_REG_GRANULARITY}
-+};
-+
-+static uint32_t psb_disallowed_regs[PSB_MAX_REG /
-+ (PSB_REG_GRANULARITY *
-+ (sizeof(uint32_t) << 3))];
-+
-+static inline int psb_disallowed(uint32_t reg)
-+{
-+ reg >>= PSB_REG_GRAN_SHIFT;
-+ return ((psb_disallowed_regs[reg >> 5] & (1 << (reg & 31))) != 0);
-+}
-+
-+void psb_init_disallowed(void)
-+{
-+ int i;
-+ uint32_t reg, tmp;
-+ static int initialized = 0;
-+
-+ if (initialized)
-+ return;
-+
-+ initialized = 1;
-+ memset(psb_disallowed_regs, 0, sizeof(psb_disallowed_regs));
-+
-+ for (i = 0; i < (sizeof(disallowed_ranges) / (2 * sizeof(uint32_t)));
-+ ++i) {
-+ for (reg = disallowed_ranges[i][0];
-+ reg <= disallowed_ranges[i][1]; reg += 4) {
-+ tmp = reg >> 2;
-+ psb_disallowed_regs[tmp >> 5] |= (1 << (tmp & 31));
-+ }
-+ }
-+}
-+
-+static int psb_memcpy_check(uint32_t * dst, const uint32_t * src, uint32_t size)
-+{
-+ size >>= 3;
-+ while (size--) {
-+ if (unlikely((*src >= 0x1000) || psb_disallowed(*src))) {
-+ DRM_ERROR("Forbidden SGX register access: "
-+ "0x%04x.\n", *src);
-+ return -EPERM;
-+ }
-+ *dst++ = *src++;
-+ *dst++ = *src++;
-+ }
-+ return 0;
-+}
-+
-+static int psb_2d_wait_available(struct drm_psb_private *dev_priv,
-+ unsigned size)
-+{
-+ uint32_t avail = PSB_RSGX32(PSB_CR_2D_SOCIF);
-+ int ret = 0;
-+
-+ retry:
-+ if (avail < size) {
-+#if 0
-+ /* We'd ideally
-+ * like to have an IRQ-driven event here.
-+ */
-+
-+ psb_2D_irq_on(dev_priv);
-+ DRM_WAIT_ON(ret, dev_priv->event_2d_queue, DRM_HZ,
-+ ((avail = PSB_RSGX32(PSB_CR_2D_SOCIF)) >= size));
-+ psb_2D_irq_off(dev_priv);
-+ if (ret == 0)
-+ return 0;
-+ if (ret == -EINTR) {
-+ ret = 0;
-+ goto retry;
-+ }
-+#else
-+ avail = PSB_RSGX32(PSB_CR_2D_SOCIF);
-+ goto retry;
-+#endif
-+ }
-+ return ret;
-+}
-+
-+int psb_2d_submit(struct drm_psb_private *dev_priv, uint32_t * cmdbuf,
-+ unsigned size)
-+{
-+ int ret = 0;
-+ int i;
-+ unsigned submit_size;
-+
-+ while (size > 0) {
-+ submit_size = (size < 0x60) ? size : 0x60;
-+ size -= submit_size;
-+ ret = psb_2d_wait_available(dev_priv, submit_size);
-+ if (ret)
-+ return ret;
-+
-+ submit_size <<= 2;
-+
-+ for (i = 0; i < submit_size; i += 4) {
-+ PSB_WSGX32(*cmdbuf++, PSB_SGX_2D_SLAVE_PORT + i);
-+ }
-+ (void)PSB_RSGX32(PSB_SGX_2D_SLAVE_PORT + i - 4);
-+ }
-+ return 0;
-+}
-+
-+int psb_blit_sequence(struct drm_psb_private *dev_priv, uint32_t sequence)
-+{
-+ uint32_t buffer[8];
-+ uint32_t *bufp = buffer;
-+ int ret;
-+
-+ *bufp++ = PSB_2D_FENCE_BH;
-+
-+ *bufp++ = PSB_2D_DST_SURF_BH |
-+ PSB_2D_DST_8888ARGB | (4 << PSB_2D_DST_STRIDE_SHIFT);
-+ *bufp++ = dev_priv->comm_mmu_offset - dev_priv->mmu_2d_offset;
-+
-+ *bufp++ = PSB_2D_BLIT_BH |
-+ PSB_2D_ROT_NONE |
-+ PSB_2D_COPYORDER_TL2BR |
-+ PSB_2D_DSTCK_DISABLE |
-+ PSB_2D_SRCCK_DISABLE | PSB_2D_USE_FILL | PSB_2D_ROP3_PATCOPY;
-+
-+ *bufp++ = sequence << PSB_2D_FILLCOLOUR_SHIFT;
-+ *bufp++ = (0 << PSB_2D_DST_XSTART_SHIFT) |
-+ (0 << PSB_2D_DST_YSTART_SHIFT);
-+ *bufp++ = (1 << PSB_2D_DST_XSIZE_SHIFT) | (1 << PSB_2D_DST_YSIZE_SHIFT);
-+
-+ *bufp++ = PSB_2D_FLUSH_BH;
-+
-+ psb_2d_lock(dev_priv);
-+ ret = psb_2d_submit(dev_priv, buffer, bufp - buffer);
-+ psb_2d_unlock(dev_priv);
-+
-+ if (!ret)
-+ psb_schedule_watchdog(dev_priv);
-+ return ret;
-+}
-+
-+int psb_emit_2d_copy_blit(struct drm_device *dev,
-+ uint32_t src_offset,
-+ uint32_t dst_offset, uint32_t pages, int direction)
-+{
-+ uint32_t cur_pages;
-+ struct drm_psb_private *dev_priv = dev->dev_private;
-+ uint32_t buf[10];
-+ uint32_t *bufp;
-+ uint32_t xstart;
-+ uint32_t ystart;
-+ uint32_t blit_cmd;
-+ uint32_t pg_add;
-+ int ret = 0;
-+
-+ if (!dev_priv)
-+ return 0;
-+
-+ if (direction) {
-+ pg_add = (pages - 1) << PAGE_SHIFT;
-+ src_offset += pg_add;
-+ dst_offset += pg_add;
-+ }
-+
-+ blit_cmd = PSB_2D_BLIT_BH |
-+ PSB_2D_ROT_NONE |
-+ PSB_2D_DSTCK_DISABLE |
-+ PSB_2D_SRCCK_DISABLE |
-+ PSB_2D_USE_PAT |
-+ PSB_2D_ROP3_SRCCOPY |
-+ (direction ? PSB_2D_COPYORDER_BR2TL : PSB_2D_COPYORDER_TL2BR);
-+ xstart = (direction) ? ((PAGE_SIZE - 1) >> 2) : 0;
-+
-+ psb_2d_lock(dev_priv);
-+ while (pages > 0) {
-+ cur_pages = pages;
-+ if (cur_pages > 2048)
-+ cur_pages = 2048;
-+ pages -= cur_pages;
-+ ystart = (direction) ? cur_pages - 1 : 0;
-+
-+ bufp = buf;
-+ *bufp++ = PSB_2D_FENCE_BH;
-+
-+ *bufp++ = PSB_2D_DST_SURF_BH | PSB_2D_DST_8888ARGB |
-+ (PAGE_SIZE << PSB_2D_DST_STRIDE_SHIFT);
-+ *bufp++ = dst_offset;
-+ *bufp++ = PSB_2D_SRC_SURF_BH | PSB_2D_SRC_8888ARGB |
-+ (PAGE_SIZE << PSB_2D_SRC_STRIDE_SHIFT);
-+ *bufp++ = src_offset;
-+ *bufp++ =
-+ PSB_2D_SRC_OFF_BH | (xstart << PSB_2D_SRCOFF_XSTART_SHIFT) |
-+ (ystart << PSB_2D_SRCOFF_YSTART_SHIFT);
-+ *bufp++ = blit_cmd;
-+ *bufp++ = (xstart << PSB_2D_DST_XSTART_SHIFT) |
-+ (ystart << PSB_2D_DST_YSTART_SHIFT);
-+ *bufp++ = ((PAGE_SIZE >> 2) << PSB_2D_DST_XSIZE_SHIFT) |
-+ (cur_pages << PSB_2D_DST_YSIZE_SHIFT);
-+
-+ ret = psb_2d_submit(dev_priv, buf, bufp - buf);
-+ if (ret)
-+ goto out;
-+ pg_add = (cur_pages << PAGE_SHIFT) * ((direction) ? -1 : 1);
-+ src_offset += pg_add;
-+ dst_offset += pg_add;
-+ }
-+ out:
-+ psb_2d_unlock(dev_priv);
-+ return ret;
-+}
-+
-+void psb_init_2d(struct drm_psb_private *dev_priv)
-+{
-+ dev_priv->sequence_lock = SPIN_LOCK_UNLOCKED;
-+ psb_reset(dev_priv, 1);
-+ dev_priv->mmu_2d_offset = dev_priv->pg->gatt_start;
-+ PSB_WSGX32(dev_priv->mmu_2d_offset, PSB_CR_BIF_TWOD_REQ_BASE);
-+ (void)PSB_RSGX32(PSB_CR_BIF_TWOD_REQ_BASE);
-+}
-+
-+int psb_idle_2d(struct drm_device *dev)
-+{
-+ struct drm_psb_private *dev_priv = dev->dev_private;
-+ unsigned long _end = jiffies + DRM_HZ;
-+ int busy = 0;
-+
-+ /*
-+ * First idle the 2D engine.
-+ */
-+
-+ if (dev_priv->engine_lockup_2d)
-+ return -EBUSY;
-+
-+ if ((PSB_RSGX32(PSB_CR_2D_SOCIF) == _PSB_C2_SOCIF_EMPTY) &&
-+ ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) & _PSB_C2B_STATUS_BUSY) == 0))
-+ goto out;
-+
-+ do {
-+ busy = (PSB_RSGX32(PSB_CR_2D_SOCIF) != _PSB_C2_SOCIF_EMPTY);
-+ } while (busy && !time_after_eq(jiffies, _end));
-+
-+ if (busy)
-+ busy = (PSB_RSGX32(PSB_CR_2D_SOCIF) != _PSB_C2_SOCIF_EMPTY);
-+ if (busy)
-+ goto out;
-+
-+ do {
-+ busy =
-+ ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) & _PSB_C2B_STATUS_BUSY)
-+ != 0);
-+ } while (busy && !time_after_eq(jiffies, _end));
-+ if (busy)
-+ busy =
-+ ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) & _PSB_C2B_STATUS_BUSY)
-+ != 0);
-+
-+ out:
-+ if (busy)
-+ dev_priv->engine_lockup_2d = 1;
-+
-+ return (busy) ? -EBUSY : 0;
-+}
-+
-+int psb_idle_3d(struct drm_device *dev)
-+{
-+ struct drm_psb_private *dev_priv = dev->dev_private;
-+ struct psb_scheduler *scheduler = &dev_priv->scheduler;
-+ int ret;
-+
-+ ret = wait_event_timeout(scheduler->idle_queue,
-+ psb_scheduler_finished(dev_priv), DRM_HZ * 10);
-+
-+ return (ret < 1) ? -EBUSY : 0;
-+}
-+
-+static void psb_dereference_buffers_locked(struct psb_buflist_item *buffers,
-+ unsigned num_buffers)
-+{
-+ while (num_buffers--)
-+ drm_bo_usage_deref_locked(&((buffers++)->bo));
-+
-+}
-+
-+static int psb_check_presumed(struct drm_bo_op_arg *arg,
-+ struct drm_buffer_object *bo,
-+ uint32_t __user * data, int *presumed_ok)
-+{
-+ struct drm_bo_op_req *req = &arg->d.req;
-+ uint32_t hint_offset;
-+ uint32_t hint = req->bo_req.hint;
-+
-+ *presumed_ok = 0;
-+
-+ if (!(hint & DRM_BO_HINT_PRESUMED_OFFSET))
-+ return 0;
-+ if (bo->mem.mem_type == DRM_BO_MEM_LOCAL) {
-+ *presumed_ok = 1;
-+ return 0;
-+ }
-+ if (bo->offset == req->bo_req.presumed_offset) {
-+ *presumed_ok = 1;
-+ return 0;
-+ }
-+
-+ /*
-+ * We need to turn off the HINT_PRESUMED_OFFSET for this buffer in
-+ * the user-space IOCTL argument list, since the buffer has moved,
-+ * we're about to apply relocations and we might subsequently
-+ * hit an -EAGAIN. In that case the argument list will be reused by
-+ * user-space, but the presumed offset is no longer valid.
-+ *
-+ * Needless to say, this is a bit ugly.
-+ */
-+
-+ hint_offset = (uint32_t *) & req->bo_req.hint - (uint32_t *) arg;
-+ hint &= ~DRM_BO_HINT_PRESUMED_OFFSET;
-+ return __put_user(hint, data + hint_offset);
-+}
-+
-+static int psb_validate_buffer_list(struct drm_file *file_priv,
-+ unsigned fence_class,
-+ unsigned long data,
-+ struct psb_buflist_item *buffers,
-+ unsigned *num_buffers)
-+{
-+ struct drm_bo_op_arg arg;
-+ struct drm_bo_op_req *req = &arg.d.req;
-+ int ret = 0;
-+ unsigned buf_count = 0;
-+ struct psb_buflist_item *item = buffers;
-+
-+ do {
-+ if (buf_count >= *num_buffers) {
-+ DRM_ERROR("Buffer count exceeded %d\n.", *num_buffers);
-+ ret = -EINVAL;
-+ goto out_err;
-+ }
-+ item = buffers + buf_count;
-+ item->bo = NULL;
-+
-+ if (copy_from_user(&arg, (void __user *)data, sizeof(arg))) {
-+ ret = -EFAULT;
-+ DRM_ERROR("Error copying validate list.\n"
-+ "\tbuffer %d, user addr 0x%08lx %d\n",
-+ buf_count, (unsigned long)data, sizeof(arg));
-+ goto out_err;
-+ }
-+
-+ ret = 0;
-+ if (req->op != drm_bo_validate) {
-+ DRM_ERROR
-+ ("Buffer object operation wasn't \"validate\".\n");
-+ ret = -EINVAL;
-+ goto out_err;
-+ }
-+
-+ item->ret = 0;
-+ item->data = (void *)__user data;
-+ ret = drm_bo_handle_validate(file_priv,
-+ req->bo_req.handle,
-+ fence_class,
-+ req->bo_req.flags,
-+ req->bo_req.mask,
-+ req->bo_req.hint,
-+ 0, NULL, &item->bo);
-+ if (ret)
-+ goto out_err;
-+
-+ PSB_DEBUG_GENERAL("Validated buffer at 0x%08lx\n",
-+ buffers[buf_count].bo->offset);
-+
-+ buf_count++;
-+
-+
-+ ret = psb_check_presumed(&arg, item->bo,
-+ (uint32_t __user *)
-+ (unsigned long) data,
-+ &item->presumed_offset_correct);
-+
-+ if (ret)
-+ goto out_err;
-+
-+ data = arg.next;
-+ } while (data);
-+
-+ *num_buffers = buf_count;
-+
-+ return 0;
-+ out_err:
-+
-+ *num_buffers = buf_count;
-+ item->ret = (ret != -EAGAIN) ? ret : 0;
-+ return ret;
-+}
-+
-+int
-+psb_reg_submit(struct drm_psb_private *dev_priv, uint32_t * regs,
-+ unsigned int cmds)
-+{
-+ int i;
-+
-+ /*
-+ * cmds is 32-bit words.
-+ */
-+
-+ cmds >>= 1;
-+ for (i = 0; i < cmds; ++i) {
-+ PSB_WSGX32(regs[1], regs[0]);
-+ regs += 2;
-+ }
-+ wmb();
-+ return 0;
-+}
-+
-+/*
-+ * Security: Block user-space writing to MMU mapping registers.
-+ * This is important for security and brings Poulsbo DRM
-+ * up to par with the other DRM drivers. Using this,
-+ * user-space should not be able to map arbitrary memory
-+ * pages to graphics memory, but all user-space processes
-+ * basically have access to all buffer objects mapped to
-+ * graphics memory.
-+ */
-+
-+int
-+psb_submit_copy_cmdbuf(struct drm_device *dev,
-+ struct drm_buffer_object *cmd_buffer,
-+ unsigned long cmd_offset,
-+ unsigned long cmd_size,
-+ int engine, uint32_t * copy_buffer)
-+{
-+ unsigned long cmd_end = cmd_offset + (cmd_size << 2);
-+ struct drm_psb_private *dev_priv = dev->dev_private;
-+ unsigned long cmd_page_offset = cmd_offset - (cmd_offset & PAGE_MASK);
-+ unsigned long cmd_next;
-+ struct drm_bo_kmap_obj cmd_kmap;
-+ uint32_t *cmd_page;
-+ unsigned cmds;
-+ int is_iomem;
-+ int ret = 0;
-+
-+ if (cmd_size == 0)
-+ return 0;
-+
-+ if (engine == PSB_ENGINE_2D)
-+ psb_2d_lock(dev_priv);
-+
-+ do {
-+ cmd_next = drm_bo_offset_end(cmd_offset, cmd_end);
-+ ret = drm_bo_kmap(cmd_buffer, cmd_offset >> PAGE_SHIFT,
-+ 1, &cmd_kmap);
-+
-+ if (ret)
-+ return ret;
-+ cmd_page = drm_bmo_virtual(&cmd_kmap, &is_iomem);
-+ cmd_page_offset = (cmd_offset & ~PAGE_MASK) >> 2;
-+ cmds = (cmd_next - cmd_offset) >> 2;
-+
-+ switch (engine) {
-+ case PSB_ENGINE_2D:
-+ ret =
-+ psb_2d_submit(dev_priv, cmd_page + cmd_page_offset,
-+ cmds);
-+ break;
-+ case PSB_ENGINE_RASTERIZER:
-+ case PSB_ENGINE_TA:
-+ case PSB_ENGINE_HPRAST:
-+ PSB_DEBUG_GENERAL("Reg copy.\n");
-+ ret = psb_memcpy_check(copy_buffer,
-+ cmd_page + cmd_page_offset,
-+ cmds * sizeof(uint32_t));
-+ copy_buffer += cmds;
-+ break;
-+ default:
-+ ret = -EINVAL;
-+ }
-+ drm_bo_kunmap(&cmd_kmap);
-+ if (ret)
-+ break;
-+ } while (cmd_offset = cmd_next, cmd_offset != cmd_end);
-+
-+ if (engine == PSB_ENGINE_2D)
-+ psb_2d_unlock(dev_priv);
-+
-+ return ret;
-+}
-+
-+static void psb_clear_dstbuf_cache(struct psb_dstbuf_cache *dst_cache)
-+{
-+ if (dst_cache->dst_page) {
-+ drm_bo_kunmap(&dst_cache->dst_kmap);
-+ dst_cache->dst_page = NULL;
-+ }
-+ dst_cache->dst_buf = NULL;
-+ dst_cache->dst = ~0;
-+ dst_cache->use_page = NULL;
-+}
-+
-+static int psb_update_dstbuf_cache(struct psb_dstbuf_cache *dst_cache,
-+ struct psb_buflist_item *buffers,
-+ unsigned int dst, unsigned long dst_offset)
-+{
-+ int ret;
-+
-+ PSB_DEBUG_RELOC("Destination buffer is %d.\n", dst);
-+
-+ if (unlikely(dst != dst_cache->dst || NULL == dst_cache->dst_buf)) {
-+ psb_clear_dstbuf_cache(dst_cache);
-+ dst_cache->dst = dst;
-+ dst_cache->dst_buf = buffers[dst].bo;
-+ }
-+
-+ if (unlikely(dst_offset > dst_cache->dst_buf->num_pages * PAGE_SIZE)) {
-+ DRM_ERROR("Relocation destination out of bounds.\n");
-+ return -EINVAL;
-+ }
-+
-+ if (!drm_bo_same_page(dst_cache->dst_offset, dst_offset) ||
-+ NULL == dst_cache->dst_page) {
-+ if (NULL != dst_cache->dst_page) {
-+ drm_bo_kunmap(&dst_cache->dst_kmap);
-+ dst_cache->dst_page = NULL;
-+ }
-+
-+ ret = drm_bo_kmap(dst_cache->dst_buf, dst_offset >> PAGE_SHIFT,
-+ 1, &dst_cache->dst_kmap);
-+ if (ret) {
-+ DRM_ERROR("Could not map destination buffer for "
-+ "relocation.\n");
-+ return ret;
-+ }
-+
-+ dst_cache->dst_page = drm_bmo_virtual(&dst_cache->dst_kmap,
-+ &dst_cache->dst_is_iomem);
-+ dst_cache->dst_offset = dst_offset & PAGE_MASK;
-+ dst_cache->dst_page_offset = dst_cache->dst_offset >> 2;
-+ }
-+ return 0;
-+}
-+
-+static int psb_apply_reloc(struct drm_psb_private *dev_priv,
-+ uint32_t fence_class,
-+ const struct drm_psb_reloc *reloc,
-+ struct psb_buflist_item *buffers,
-+ int num_buffers,
-+ struct psb_dstbuf_cache *dst_cache,
-+ int no_wait, int interruptible)
-+{
-+ int reg;
-+ uint32_t val;
-+ uint32_t background;
-+ unsigned int index;
-+ int ret;
-+ unsigned int shift;
-+ unsigned int align_shift;
-+ uint32_t fence_type;
-+ struct drm_buffer_object *reloc_bo;
-+
-+ PSB_DEBUG_RELOC("Reloc type %d\n"
-+ "\t where 0x%04x\n"
-+ "\t buffer 0x%04x\n"
-+ "\t mask 0x%08x\n"
-+ "\t shift 0x%08x\n"
-+ "\t pre_add 0x%08x\n"
-+ "\t background 0x%08x\n"
-+ "\t dst_buffer 0x%08x\n"
-+ "\t arg0 0x%08x\n"
-+ "\t arg1 0x%08x\n",
-+ reloc->reloc_op,
-+ reloc->where,
-+ reloc->buffer,
-+ reloc->mask,
-+ reloc->shift,
-+ reloc->pre_add,
-+ reloc->background,
-+ reloc->dst_buffer, reloc->arg0, reloc->arg1);
-+
-+ if (unlikely(reloc->buffer >= num_buffers)) {
-+ DRM_ERROR("Illegal relocation buffer %d.\n", reloc->buffer);
-+ return -EINVAL;
-+ }
-+
-+ if (buffers[reloc->buffer].presumed_offset_correct)
-+ return 0;
-+
-+ if (unlikely(reloc->dst_buffer >= num_buffers)) {
-+ DRM_ERROR("Illegal destination buffer for relocation %d.\n",
-+ reloc->dst_buffer);
-+ return -EINVAL;
-+ }
-+
-+ ret = psb_update_dstbuf_cache(dst_cache, buffers, reloc->dst_buffer,
-+ reloc->where << 2);
-+ if (ret)
-+ return ret;
-+
-+ reloc_bo = buffers[reloc->buffer].bo;
-+
-+ if (unlikely(reloc->pre_add > (reloc_bo->num_pages << PAGE_SHIFT))) {
-+ DRM_ERROR("Illegal relocation offset add.\n");
-+ return -EINVAL;
-+ }
-+
-+ switch (reloc->reloc_op) {
-+ case PSB_RELOC_OP_OFFSET:
-+ val = reloc_bo->offset + reloc->pre_add;
-+ break;
-+ case PSB_RELOC_OP_2D_OFFSET:
-+ val = reloc_bo->offset + reloc->pre_add -
-+ dev_priv->mmu_2d_offset;
-+ if (unlikely(val >= PSB_2D_SIZE)) {
-+ DRM_ERROR("2D relocation out of bounds\n");
-+ return -EINVAL;
-+ }
-+ break;
-+ case PSB_RELOC_OP_PDS_OFFSET:
-+ val = reloc_bo->offset + reloc->pre_add - PSB_MEM_PDS_START;
-+ if (unlikely(val >= (PSB_MEM_MMU_START - PSB_MEM_PDS_START))) {
-+ DRM_ERROR("PDS relocation out of bounds\n");
-+ return -EINVAL;
-+ }
-+ break;
-+ case PSB_RELOC_OP_USE_OFFSET:
-+ case PSB_RELOC_OP_USE_REG:
-+
-+ /*
-+ * Security:
-+ * Only allow VERTEX or PIXEL data masters, as
-+ * shaders run under other data masters may in theory
-+ * alter MMU mappings.
-+ */
-+
-+ if (unlikely(reloc->arg1 != _PSB_CUC_DM_PIXEL &&
-+ reloc->arg1 != _PSB_CUC_DM_VERTEX)) {
-+ DRM_ERROR("Invalid data master in relocation. %d\n",
-+ reloc->arg1);
-+ return -EPERM;
-+ }
-+
-+ fence_type = reloc_bo->fence_type;
-+ ret = psb_grab_use_base(dev_priv,
-+ reloc_bo->offset +
-+ reloc->pre_add, reloc->arg0,
-+ reloc->arg1, fence_class,
-+ fence_type, no_wait,
-+ interruptible, &reg, &val);
-+ if (ret)
-+ return ret;
-+
-+ val = (reloc->reloc_op == PSB_RELOC_OP_USE_REG) ? reg : val;
-+ break;
-+ default:
-+ DRM_ERROR("Unimplemented relocation.\n");
-+ return -EINVAL;
-+ }
-+
-+ shift = (reloc->shift & PSB_RELOC_SHIFT_MASK) >> PSB_RELOC_SHIFT_SHIFT;
-+ align_shift = (reloc->shift & PSB_RELOC_ALSHIFT_MASK) >>
-+ PSB_RELOC_ALSHIFT_SHIFT;
-+
-+ val = ((val >> align_shift) << shift);
-+ index = reloc->where - dst_cache->dst_page_offset;
-+
-+ background = reloc->background;
-+
-+ if (reloc->reloc_op == PSB_RELOC_OP_USE_OFFSET) {
-+ if (dst_cache->use_page == dst_cache->dst_page &&
-+ dst_cache->use_index == index)
-+ background = dst_cache->use_background;
-+ else
-+ background = dst_cache->dst_page[index];
-+ }
-+#if 0
-+ if (dst_cache->dst_page[index] != PSB_RELOC_MAGIC &&
-+ reloc->reloc_op != PSB_RELOC_OP_USE_OFFSET)
-+ DRM_ERROR("Inconsistent relocation 0x%08lx.\n",
-+ (unsigned long)dst_cache->dst_page[index]);
-+#endif
-+
-+ val = (background & ~reloc->mask) | (val & reloc->mask);
-+ dst_cache->dst_page[index] = val;
-+
-+ if (reloc->reloc_op == PSB_RELOC_OP_USE_OFFSET ||
-+ reloc->reloc_op == PSB_RELOC_OP_USE_REG) {
-+ dst_cache->use_page = dst_cache->dst_page;
-+ dst_cache->use_index = index;
-+ dst_cache->use_background = val;
-+ }
-+
-+ PSB_DEBUG_RELOC("Reloc buffer %d index 0x%08x, value 0x%08x\n",
-+ reloc->dst_buffer, index, dst_cache->dst_page[index]);
-+
-+ return 0;
-+}
-+
-+static int psb_ok_to_map_reloc(struct drm_psb_private *dev_priv,
-+ unsigned int num_pages)
-+{
-+ int ret = 0;
-+
-+ spin_lock(&dev_priv->reloc_lock);
-+ if (dev_priv->rel_mapped_pages + num_pages <= PSB_MAX_RELOC_PAGES) {
-+ dev_priv->rel_mapped_pages += num_pages;
-+ ret = 1;
-+ }
-+ spin_unlock(&dev_priv->reloc_lock);
-+ return ret;
-+}
-+
-+static int psb_fixup_relocs(struct drm_file *file_priv,
-+ uint32_t fence_class,
-+ unsigned int num_relocs,
-+ unsigned int reloc_offset,
-+ uint32_t reloc_handle,
-+ struct psb_buflist_item *buffers,
-+ unsigned int num_buffers,
-+ int no_wait, int interruptible)
-+{
-+ struct drm_device *dev = file_priv->minor->dev;
-+ struct drm_psb_private *dev_priv =
-+ (struct drm_psb_private *)dev->dev_private;
-+ struct drm_buffer_object *reloc_buffer = NULL;
-+ unsigned int reloc_num_pages;
-+ unsigned int reloc_first_page;
-+ unsigned int reloc_last_page;
-+ struct psb_dstbuf_cache dst_cache;
-+ struct drm_psb_reloc *reloc;
-+ struct drm_bo_kmap_obj reloc_kmap;
-+ int reloc_is_iomem;
-+ int count;
-+ int ret = 0;
-+ int registered = 0;
-+ int short_circuit = 1;
-+ int i;
-+
-+ if (num_relocs == 0)
-+ return 0;
-+
-+ for (i=0; i<num_buffers; ++i) {
-+ if (!buffers[i].presumed_offset_correct) {
-+ short_circuit = 0;
-+ break;
-+ }
-+ }
-+
-+ if (short_circuit)
-+ return 0;
-+
-+ memset(&dst_cache, 0, sizeof(dst_cache));
-+ memset(&reloc_kmap, 0, sizeof(reloc_kmap));
-+
-+ mutex_lock(&dev->struct_mutex);
-+ reloc_buffer = drm_lookup_buffer_object(file_priv, reloc_handle, 1);
-+ mutex_unlock(&dev->struct_mutex);
-+ if (!reloc_buffer)
-+ goto out;
-+
-+ reloc_first_page = reloc_offset >> PAGE_SHIFT;
-+ reloc_last_page =
-+ (reloc_offset +
-+ num_relocs * sizeof(struct drm_psb_reloc)) >> PAGE_SHIFT;
-+ reloc_num_pages = reloc_last_page - reloc_first_page + 1;
-+ reloc_offset &= ~PAGE_MASK;
-+
-+ if (reloc_num_pages > PSB_MAX_RELOC_PAGES) {
-+ DRM_ERROR("Relocation buffer is too large\n");
-+ ret = -EINVAL;
-+ goto out;
-+ }
-+
-+ DRM_WAIT_ON(ret, dev_priv->rel_mapped_queue, 3 * DRM_HZ,
-+ (registered =
-+ psb_ok_to_map_reloc(dev_priv, reloc_num_pages)));
-+
-+ if (ret == -EINTR) {
-+ ret = -EAGAIN;
-+ goto out;
-+ }
-+ if (ret) {
-+ DRM_ERROR("Error waiting for space to map "
-+ "relocation buffer.\n");
-+ goto out;
-+ }
-+
-+ ret = drm_bo_kmap(reloc_buffer, reloc_first_page,
-+ reloc_num_pages, &reloc_kmap);
-+
-+ if (ret) {
-+ DRM_ERROR("Could not map relocation buffer.\n"
-+ "\tReloc buffer id 0x%08x.\n"
-+ "\tReloc first page %d.\n"
-+ "\tReloc num pages %d.\n",
-+ reloc_handle, reloc_first_page, reloc_num_pages);
-+ goto out;
-+ }
-+
-+ reloc = (struct drm_psb_reloc *)
-+ ((unsigned long)drm_bmo_virtual(&reloc_kmap, &reloc_is_iomem) +
-+ reloc_offset);
-+
-+ for (count = 0; count < num_relocs; ++count) {
-+ ret = psb_apply_reloc(dev_priv, fence_class,
-+ reloc, buffers,
-+ num_buffers, &dst_cache,
-+ no_wait, interruptible);
-+ if (ret)
-+ goto out1;
-+ reloc++;
-+ }
-+
-+ out1:
-+ drm_bo_kunmap(&reloc_kmap);
-+ out:
-+ if (registered) {
-+ spin_lock(&dev_priv->reloc_lock);
-+ dev_priv->rel_mapped_pages -= reloc_num_pages;
-+ spin_unlock(&dev_priv->reloc_lock);
-+ DRM_WAKEUP(&dev_priv->rel_mapped_queue);
-+ }
-+
-+ psb_clear_dstbuf_cache(&dst_cache);
-+ if (reloc_buffer)
-+ drm_bo_usage_deref_unlocked(&reloc_buffer);
-+ return ret;
-+}
-+
-+static int psb_cmdbuf_2d(struct drm_file *priv,
-+ struct drm_psb_cmdbuf_arg *arg,
-+ struct drm_buffer_object *cmd_buffer,
-+ struct drm_fence_arg *fence_arg)
-+{
-+ struct drm_device *dev = priv->minor->dev;
-+ struct drm_psb_private *dev_priv =
-+ (struct drm_psb_private *)dev->dev_private;
-+ int ret;
-+
-+ ret = mutex_lock_interruptible(&dev_priv->reset_mutex);
-+ if (ret)
-+ return -EAGAIN;
-+
-+ ret = psb_submit_copy_cmdbuf(dev, cmd_buffer, arg->cmdbuf_offset,
-+ arg->cmdbuf_size, PSB_ENGINE_2D, NULL);
-+ if (ret)
-+ goto out_unlock;
-+
-+ psb_fence_or_sync(priv, PSB_ENGINE_2D, arg, fence_arg, NULL);
-+
-+ mutex_lock(&cmd_buffer->mutex);
-+ if (cmd_buffer->fence != NULL)
-+ drm_fence_usage_deref_unlocked(&cmd_buffer->fence);
-+ mutex_unlock(&cmd_buffer->mutex);
-+ out_unlock:
-+ mutex_unlock(&dev_priv->reset_mutex);
-+ return ret;
-+}
-+
-+#if 0
-+static int psb_dump_page(struct drm_buffer_object *bo,
-+ unsigned int page_offset, unsigned int num)
-+{
-+ struct drm_bo_kmap_obj kmobj;
-+ int is_iomem;
-+ uint32_t *p;
-+ int ret;
-+ unsigned int i;
-+
-+ ret = drm_bo_kmap(bo, page_offset, 1, &kmobj);
-+ if (ret)
-+ return ret;
-+
-+ p = drm_bmo_virtual(&kmobj, &is_iomem);
-+ for (i = 0; i < num; ++i)
-+ PSB_DEBUG_GENERAL("0x%04x: 0x%08x\n", i, *p++);
-+
-+ drm_bo_kunmap(&kmobj);
-+ return 0;
-+}
-+#endif
-+
-+static void psb_idle_engine(struct drm_device *dev, int engine)
-+{
-+ struct drm_psb_private *dev_priv =
-+ (struct drm_psb_private *)dev->dev_private;
-+ uint32_t dummy;
-+
-+ switch (engine) {
-+ case PSB_ENGINE_2D:
-+
-+ /*
-+ * Make sure we flush 2D properly using a dummy
-+ * fence sequence emit.
-+ */
-+
-+ (void)psb_fence_emit_sequence(dev, PSB_ENGINE_2D, 0,
-+ &dummy, &dummy);
-+ psb_2d_lock(dev_priv);
-+ (void)psb_idle_2d(dev);
-+ psb_2d_unlock(dev_priv);
-+ break;
-+ case PSB_ENGINE_TA:
-+ case PSB_ENGINE_RASTERIZER:
-+ case PSB_ENGINE_HPRAST:
-+ (void)psb_idle_3d(dev);
-+ break;
-+ default:
-+
-+ /*
-+ * FIXME: Insert video engine idle command here.
-+ */
-+
-+ break;
-+ }
-+}
-+
-+void psb_fence_or_sync(struct drm_file *priv,
-+ int engine,
-+ struct drm_psb_cmdbuf_arg *arg,
-+ struct drm_fence_arg *fence_arg,
-+ struct drm_fence_object **fence_p)
-+{
-+ struct drm_device *dev = priv->minor->dev;
-+ int ret;
-+ struct drm_fence_object *fence;
-+
-+ ret = drm_fence_buffer_objects(dev, NULL, arg->fence_flags,
-+ NULL, &fence);
-+
-+ if (ret) {
-+
-+ /*
-+ * Fence creation failed.
-+ * Fall back to synchronous operation and idle the engine.
-+ */
-+
-+ psb_idle_engine(dev, engine);
-+ if (!(arg->fence_flags & DRM_FENCE_FLAG_NO_USER)) {
-+
-+ /*
-+ * Communicate to user-space that
-+ * fence creation has failed and that
-+ * the engine is idle.
-+ */
-+
-+ fence_arg->handle = ~0;
-+ fence_arg->error = ret;
-+ }
-+
-+ drm_putback_buffer_objects(dev);
-+ if (fence_p)
-+ *fence_p = NULL;
-+ return;
-+ }
-+
-+ if (!(arg->fence_flags & DRM_FENCE_FLAG_NO_USER)) {
-+
-+ ret = drm_fence_add_user_object(priv, fence,
-+ arg->fence_flags &
-+ DRM_FENCE_FLAG_SHAREABLE);
-+ if (!ret)
-+ drm_fence_fill_arg(fence, fence_arg);
-+ else {
-+ /*
-+ * Fence user object creation failed.
-+ * We must idle the engine here as well, as user-
-+ * space expects a fence object to wait on. Since we
-+ * have a fence object we wait for it to signal
-+ * to indicate engine "sufficiently" idle.
-+ */
-+
-+ (void)drm_fence_object_wait(fence, 0, 1, fence->type);
-+ drm_fence_usage_deref_unlocked(&fence);
-+ fence_arg->handle = ~0;
-+ fence_arg->error = ret;
-+ }
-+ }
-+
-+ if (fence_p)
-+ *fence_p = fence;
-+ else if (fence)
-+ drm_fence_usage_deref_unlocked(&fence);
-+}
-+
-+int psb_handle_copyback(struct drm_device *dev,
-+ struct psb_buflist_item *buffers,
-+ unsigned int num_buffers, int ret, void *data)
-+{
-+ struct drm_psb_private *dev_priv =
-+ (struct drm_psb_private *)dev->dev_private;
-+ struct drm_bo_op_arg arg;
-+ struct psb_buflist_item *item = buffers;
-+ struct drm_buffer_object *bo;
-+ int err = ret;
-+ int i;
-+
-+ /*
-+ * Clear the unfenced use base register lists and buffer lists.
-+ */
-+
-+ if (ret) {
-+ drm_regs_fence(&dev_priv->use_manager, NULL);
-+ drm_putback_buffer_objects(dev);
-+ }
-+
-+ if (ret != -EAGAIN) {
-+ for (i = 0; i < num_buffers; ++i) {
-+ arg.handled = 1;
-+ arg.d.rep.ret = item->ret;
-+ bo = item->bo;
-+ mutex_lock(&bo->mutex);
-+ drm_bo_fill_rep_arg(bo, &arg.d.rep.bo_info);
-+ mutex_unlock(&bo->mutex);
-+ if (copy_to_user(item->data, &arg, sizeof(arg)))
-+ err = -EFAULT;
-+ ++item;
-+ }
-+ }
-+
-+ return err;
-+}
-+
-+static int psb_cmdbuf_video(struct drm_file *priv,
-+ struct drm_psb_cmdbuf_arg *arg,
-+ unsigned int num_buffers,
-+ struct drm_buffer_object *cmd_buffer,
-+ struct drm_fence_arg *fence_arg)
-+{
-+ struct drm_device *dev = priv->minor->dev;
-+ struct drm_fence_object *fence;
-+ int ret;
-+
-+ /*
-+ * Check this. Doesn't seem right. Have fencing done AFTER command
-+ * submission and make sure drm_psb_idle idles the MSVDX completely.
-+ */
-+
-+ psb_fence_or_sync(priv, PSB_ENGINE_VIDEO, arg, fence_arg, &fence);
-+ ret = psb_submit_video_cmdbuf(dev, cmd_buffer, arg->cmdbuf_offset,
-+ arg->cmdbuf_size, fence);
-+
-+ if (ret)
-+ return ret;
-+
-+ drm_fence_usage_deref_unlocked(&fence);
-+ mutex_lock(&cmd_buffer->mutex);
-+ if (cmd_buffer->fence != NULL)
-+ drm_fence_usage_deref_unlocked(&cmd_buffer->fence);
-+ mutex_unlock(&cmd_buffer->mutex);
-+ return 0;
-+}
-+
-+int psb_feedback_buf(struct drm_file *file_priv,
-+ uint32_t feedback_ops,
-+ uint32_t handle,
-+ uint32_t offset,
-+ uint32_t feedback_breakpoints,
-+ uint32_t feedback_size, struct psb_feedback_info *feedback)
-+{
-+ struct drm_buffer_object *bo;
-+ struct page *page;
-+ uint32_t page_no;
-+ uint32_t page_offset;
-+ int ret;
-+
-+ if (feedback_ops & ~PSB_FEEDBACK_OP_VISTEST) {
-+ DRM_ERROR("Illegal feedback op.\n");
-+ return -EINVAL;
-+ }
-+
-+ if (feedback_breakpoints != 0) {
-+ DRM_ERROR("Feedback breakpoints not implemented yet.\n");
-+ return -EINVAL;
-+ }
-+
-+ if (feedback_size < PSB_HW_FEEDBACK_SIZE * sizeof(uint32_t)) {
-+ DRM_ERROR("Feedback buffer size too small.\n");
-+ return -EINVAL;
-+ }
-+
-+ page_offset = offset & ~PAGE_MASK;
-+ if ((PAGE_SIZE - PSB_HW_FEEDBACK_SIZE * sizeof(uint32_t))
-+ < page_offset) {
-+ DRM_ERROR("Illegal feedback buffer alignment.\n");
-+ return -EINVAL;
-+ }
-+
-+ ret = drm_bo_handle_validate(file_priv,
-+ handle,
-+ PSB_ENGINE_TA,
-+ DRM_BO_FLAG_MEM_LOCAL |
-+ DRM_BO_FLAG_CACHED |
-+ DRM_BO_FLAG_WRITE |
-+ PSB_BO_FLAG_FEEDBACK,
-+ DRM_BO_MASK_MEM |
-+ DRM_BO_FLAG_CACHED |
-+ DRM_BO_FLAG_WRITE |
-+ PSB_BO_FLAG_FEEDBACK, 0, 0, NULL, &bo);
-+ if (ret)
-+ return ret;
-+
-+ page_no = offset >> PAGE_SHIFT;
-+ if (page_no >= bo->num_pages) {
-+ ret = -EINVAL;
-+ DRM_ERROR("Illegal feedback buffer offset.\n");
-+ goto out_unref;
-+ }
-+
-+ if (bo->ttm == NULL) {
-+ ret = -EINVAL;
-+ DRM_ERROR("Vistest buffer without TTM.\n");
-+ goto out_unref;
-+ }
-+
-+ page = drm_ttm_get_page(bo->ttm, page_no);
-+ if (!page) {
-+ ret = -ENOMEM;
-+ goto out_unref;
-+ }
-+
-+ feedback->page = page;
-+ feedback->bo = bo;
-+ feedback->offset = page_offset;
-+ return 0;
-+
-+ out_unref:
-+ drm_bo_usage_deref_unlocked(&bo);
-+ return ret;
-+}
-+
-+int psb_cmdbuf_ioctl(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv)
-+{
-+ drm_psb_cmdbuf_arg_t *arg = data;
-+ int ret = 0;
-+ unsigned num_buffers;
-+ struct drm_buffer_object *cmd_buffer = NULL;
-+ struct drm_buffer_object *ta_buffer = NULL;
-+ struct drm_buffer_object *oom_buffer = NULL;
-+ struct drm_fence_arg fence_arg;
-+ struct drm_psb_scene user_scene;
-+ struct psb_scene_pool *pool = NULL;
-+ struct psb_scene *scene = NULL;
-+ struct drm_psb_private *dev_priv =
-+ (struct drm_psb_private *)file_priv->minor->dev->dev_private;
-+ int engine;
-+ struct psb_feedback_info feedback;
-+
-+ if (!dev_priv)
-+ return -EINVAL;
-+
-+ ret = drm_bo_read_lock(&dev->bm.bm_lock);
-+ if (ret)
-+ return ret;
-+
-+ num_buffers = PSB_NUM_VALIDATE_BUFFERS;
-+
-+ ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
-+ if (ret) {
-+ drm_bo_read_unlock(&dev->bm.bm_lock);
-+ return -EAGAIN;
-+ }
-+ if (unlikely(dev_priv->buffers == NULL)) {
-+ dev_priv->buffers = vmalloc(PSB_NUM_VALIDATE_BUFFERS *
-+ sizeof(*dev_priv->buffers));
-+ if (dev_priv->buffers == NULL) {
-+ drm_bo_read_unlock(&dev->bm.bm_lock);
-+ return -ENOMEM;
-+ }
-+ }
-+
-+
-+ engine = (arg->engine == PSB_ENGINE_RASTERIZER) ?
-+ PSB_ENGINE_TA : arg->engine;
-+
-+ ret =
-+ psb_validate_buffer_list(file_priv, engine,
-+ (unsigned long)arg->buffer_list,
-+ dev_priv->buffers, &num_buffers);
-+ if (ret)
-+ goto out_err0;
-+
-+ ret = psb_fixup_relocs(file_priv, engine, arg->num_relocs,
-+ arg->reloc_offset, arg->reloc_handle,
-+ dev_priv->buffers, num_buffers, 0, 1);
-+ if (ret)
-+ goto out_err0;
-+
-+ mutex_lock(&dev->struct_mutex);
-+ cmd_buffer = drm_lookup_buffer_object(file_priv, arg->cmdbuf_handle, 1);
-+ mutex_unlock(&dev->struct_mutex);
-+ if (!cmd_buffer) {
-+ ret = -EINVAL;
-+ goto out_err0;
-+ }
-+
-+ switch (arg->engine) {
-+ case PSB_ENGINE_2D:
-+ ret = psb_cmdbuf_2d(file_priv, arg, cmd_buffer, &fence_arg);
-+ if (ret)
-+ goto out_err0;
-+ break;
-+ case PSB_ENGINE_VIDEO:
-+ ret =
-+ psb_cmdbuf_video(file_priv, arg, num_buffers, cmd_buffer,
-+ &fence_arg);
-+ if (ret)
-+ goto out_err0;
-+ break;
-+ case PSB_ENGINE_RASTERIZER:
-+ ret = psb_cmdbuf_raster(file_priv, arg, cmd_buffer, &fence_arg);
-+ if (ret)
-+ goto out_err0;
-+ break;
-+ case PSB_ENGINE_TA:
-+ if (arg->ta_handle == arg->cmdbuf_handle) {
-+ mutex_lock(&dev->struct_mutex);
-+ atomic_inc(&cmd_buffer->usage);
-+ ta_buffer = cmd_buffer;
-+ mutex_unlock(&dev->struct_mutex);
-+ } else {
-+ mutex_lock(&dev->struct_mutex);
-+ ta_buffer =
-+ drm_lookup_buffer_object(file_priv,
-+ arg->ta_handle, 1);
-+ mutex_unlock(&dev->struct_mutex);
-+ if (!ta_buffer) {
-+ ret = -EINVAL;
-+ goto out_err0;
-+ }
-+ }
-+ if (arg->oom_size != 0) {
-+ if (arg->oom_handle == arg->cmdbuf_handle) {
-+ mutex_lock(&dev->struct_mutex);
-+ atomic_inc(&cmd_buffer->usage);
-+ oom_buffer = cmd_buffer;
-+ mutex_unlock(&dev->struct_mutex);
-+ } else {
-+ mutex_lock(&dev->struct_mutex);
-+ oom_buffer =
-+ drm_lookup_buffer_object(file_priv,
-+ arg->oom_handle,
-+ 1);
-+ mutex_unlock(&dev->struct_mutex);
-+ if (!oom_buffer) {
-+ ret = -EINVAL;
-+ goto out_err0;
-+ }
-+ }
-+ }
-+
-+ ret = copy_from_user(&user_scene, (void __user *)
-+ ((unsigned long)arg->scene_arg),
-+ sizeof(user_scene));
-+ if (ret)
-+ goto out_err0;
-+
-+ if (!user_scene.handle_valid) {
-+ pool = psb_scene_pool_alloc(file_priv, 0,
-+ user_scene.num_buffers,
-+ user_scene.w, user_scene.h);
-+ if (!pool) {
-+ ret = -ENOMEM;
-+ goto out_err0;
-+ }
-+
-+ user_scene.handle = psb_scene_pool_handle(pool);
-+ user_scene.handle_valid = 1;
-+ ret = copy_to_user((void __user *)
-+ ((unsigned long)arg->scene_arg),
-+ &user_scene, sizeof(user_scene));
-+
-+ if (ret)
-+ goto out_err0;
-+ } else {
-+ mutex_lock(&dev->struct_mutex);
-+ pool = psb_scene_pool_lookup_devlocked(file_priv,
-+ user_scene.
-+ handle, 1);
-+ mutex_unlock(&dev->struct_mutex);
-+ if (!pool) {
-+ ret = -EINVAL;
-+ goto out_err0;
-+ }
-+ }
-+
-+ mutex_lock(&dev_priv->reset_mutex);
-+ ret = psb_validate_scene_pool(pool, 0, 0, 0,
-+ user_scene.w,
-+ user_scene.h,
-+ arg->ta_flags &
-+ PSB_TA_FLAG_LASTPASS, &scene);
-+ mutex_unlock(&dev_priv->reset_mutex);
-+
-+ if (ret)
-+ goto out_err0;
-+
-+ memset(&feedback, 0, sizeof(feedback));
-+ if (arg->feedback_ops) {
-+ ret = psb_feedback_buf(file_priv,
-+ arg->feedback_ops,
-+ arg->feedback_handle,
-+ arg->feedback_offset,
-+ arg->feedback_breakpoints,
-+ arg->feedback_size, &feedback);
-+ if (ret)
-+ goto out_err0;
-+ }
-+ ret = psb_cmdbuf_ta(file_priv, arg, cmd_buffer, ta_buffer,
-+ oom_buffer, scene, &feedback, &fence_arg);
-+ if (ret)
-+ goto out_err0;
-+ break;
-+ default:
-+ DRM_ERROR("Unimplemented command submission mechanism (%x).\n",
-+ arg->engine);
-+ ret = -EINVAL;
-+ goto out_err0;
-+ }
-+
-+ if (!(arg->fence_flags & DRM_FENCE_FLAG_NO_USER)) {
-+ ret = copy_to_user((void __user *)
-+ ((unsigned long)arg->fence_arg),
-+ &fence_arg, sizeof(fence_arg));
-+ }
-+
-+ out_err0:
-+ ret =
-+ psb_handle_copyback(dev, dev_priv->buffers, num_buffers, ret, data);
-+ mutex_lock(&dev->struct_mutex);
-+ if (scene)
-+ psb_scene_unref_devlocked(&scene);
-+ if (pool)
-+ psb_scene_pool_unref_devlocked(&pool);
-+ if (cmd_buffer)
-+ drm_bo_usage_deref_locked(&cmd_buffer);
-+ if (ta_buffer)
-+ drm_bo_usage_deref_locked(&ta_buffer);
-+ if (oom_buffer)
-+ drm_bo_usage_deref_locked(&oom_buffer);
-+
-+ psb_dereference_buffers_locked(dev_priv->buffers, num_buffers);
-+ mutex_unlock(&dev->struct_mutex);
-+ mutex_unlock(&dev_priv->cmdbuf_mutex);
-+
-+ drm_bo_read_unlock(&dev->bm.bm_lock);
-+ return ret;
-+}
-Index: linux-2.6.28/drivers/gpu/drm/psb/psb_xhw.c
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.28/drivers/gpu/drm/psb/psb_xhw.c 2009-02-20 12:23:06.000000000 +0000
-@@ -0,0 +1,614 @@
-+/**************************************************************************
-+ * Copyright (c) 2007, Intel Corporation.
-+ * All Rights Reserved.
-+ *
-+ * This program is free software; you can redistribute it and/or modify it
-+ * under the terms and conditions of the GNU General Public License,
-+ * version 2, as published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope it will be useful, but WITHOUT
-+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-+ * more details.
-+ *
-+ * You should have received a copy of the GNU General Public License along with
-+ * this program; if not, write to the Free Software Foundation, Inc.,
-+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-+ *
-+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
-+ * develop this driver.
-+ *
-+ **************************************************************************/
-+/*
-+ * Make calls into closed source X server code.
-+ */
-+
-+#include "drmP.h"
-+#include "psb_drv.h"
-+
-+void
-+psb_xhw_clean_buf(struct drm_psb_private *dev_priv, struct psb_xhw_buf *buf)
-+{
-+ unsigned long irq_flags;
-+
-+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
-+ list_del_init(&buf->head);
-+ if (dev_priv->xhw_cur_buf == buf)
-+ dev_priv->xhw_cur_buf = NULL;
-+ atomic_set(&buf->done, 1);
-+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
-+}
-+
-+static inline int psb_xhw_add(struct drm_psb_private *dev_priv,
-+ struct psb_xhw_buf *buf)
-+{
-+ unsigned long irq_flags;
-+
-+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
-+ atomic_set(&buf->done, 0);
-+ if (unlikely(!dev_priv->xhw_submit_ok)) {
-+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
-+ DRM_ERROR("No Xpsb 3D extension available.\n");
-+ return -EINVAL;
-+ }
-+ if (!list_empty(&buf->head)) {
-+ DRM_ERROR("Recursive list adding.\n");
-+ goto out;
-+ }
-+ list_add_tail(&buf->head, &dev_priv->xhw_in);
-+ wake_up_interruptible(&dev_priv->xhw_queue);
-+ out:
-+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
-+ return 0;
-+}
-+
-+int psb_xhw_scene_info(struct drm_psb_private *dev_priv,
-+ struct psb_xhw_buf *buf,
-+ uint32_t w,
-+ uint32_t h,
-+ uint32_t * hw_cookie,
-+ uint32_t * bo_size,
-+ uint32_t * clear_p_start, uint32_t * clear_num_pages)
-+{
-+ struct drm_psb_xhw_arg *xa = &buf->arg;
-+ int ret;
-+
-+ buf->copy_back = 1;
-+ xa->op = PSB_XHW_SCENE_INFO;
-+ xa->irq_op = 0;
-+ xa->issue_irq = 0;
-+ xa->arg.si.w = w;
-+ xa->arg.si.h = h;
-+
-+ ret = psb_xhw_add(dev_priv, buf);
-+ if (ret)
-+ return ret;
-+
-+ (void)wait_event_timeout(dev_priv->xhw_caller_queue,
-+ atomic_read(&buf->done), DRM_HZ);
-+
-+ if (!atomic_read(&buf->done)) {
-+ psb_xhw_clean_buf(dev_priv, buf);
-+ return -EBUSY;
-+ }
-+
-+ if (!xa->ret) {
-+ memcpy(hw_cookie, xa->cookie, sizeof(xa->cookie));
-+ *bo_size = xa->arg.si.size;
-+ *clear_p_start = xa->arg.si.clear_p_start;
-+ *clear_num_pages = xa->arg.si.clear_num_pages;
-+ }
-+ return xa->ret;
-+}
-+
-+int psb_xhw_fire_raster(struct drm_psb_private *dev_priv,
-+ struct psb_xhw_buf *buf, uint32_t fire_flags)
-+{
-+ struct drm_psb_xhw_arg *xa = &buf->arg;
-+
-+ buf->copy_back = 0;
-+ xa->op = PSB_XHW_FIRE_RASTER;
-+ xa->issue_irq = 0;
-+ xa->arg.sb.fire_flags = 0;
-+
-+ return psb_xhw_add(dev_priv, buf);
-+}
-+
-+int psb_xhw_vistest(struct drm_psb_private *dev_priv, struct psb_xhw_buf *buf)
-+{
-+ struct drm_psb_xhw_arg *xa = &buf->arg;
-+
-+ buf->copy_back = 1;
-+ xa->op = PSB_XHW_VISTEST;
-+ /*
-+ * Could perhaps decrease latency somewhat by
-+ * issuing an irq in this case.
-+ */
-+ xa->issue_irq = 0;
-+ xa->irq_op = PSB_UIRQ_VISTEST;
-+ return psb_xhw_add(dev_priv, buf);
-+}
-+
-+int psb_xhw_scene_bind_fire(struct drm_psb_private *dev_priv,
-+ struct psb_xhw_buf *buf,
-+ uint32_t fire_flags,
-+ uint32_t hw_context,
-+ uint32_t * cookie,
-+ uint32_t * oom_cmds,
-+ uint32_t num_oom_cmds,
-+ uint32_t offset, uint32_t engine, uint32_t flags)
-+{
-+ struct drm_psb_xhw_arg *xa = &buf->arg;
-+
-+ buf->copy_back = (fire_flags & PSB_FIRE_FLAG_XHW_OOM);
-+ xa->op = PSB_XHW_SCENE_BIND_FIRE;
-+ xa->issue_irq = (buf->copy_back) ? 1 : 0;
-+ if (unlikely(buf->copy_back))
-+ xa->irq_op = (engine == PSB_SCENE_ENGINE_TA) ?
-+ PSB_UIRQ_FIRE_TA_REPLY : PSB_UIRQ_FIRE_RASTER_REPLY;
-+ else
-+ xa->irq_op = 0;
-+ xa->arg.sb.fire_flags = fire_flags;
-+ xa->arg.sb.hw_context = hw_context;
-+ xa->arg.sb.offset = offset;
-+ xa->arg.sb.engine = engine;
-+ xa->arg.sb.flags = flags;
-+ xa->arg.sb.num_oom_cmds = num_oom_cmds;
-+ memcpy(xa->cookie, cookie, sizeof(xa->cookie));
-+ if (num_oom_cmds)
-+ memcpy(xa->arg.sb.oom_cmds, oom_cmds,
-+ sizeof(uint32_t) * num_oom_cmds);
-+ return psb_xhw_add(dev_priv, buf);
-+}
-+
-+int psb_xhw_reset_dpm(struct drm_psb_private *dev_priv, struct psb_xhw_buf *buf)
-+{
-+ struct drm_psb_xhw_arg *xa = &buf->arg;
-+ int ret;
-+
-+ buf->copy_back = 1;
-+ xa->op = PSB_XHW_RESET_DPM;
-+ xa->issue_irq = 0;
-+ xa->irq_op = 0;
-+
-+ ret = psb_xhw_add(dev_priv, buf);
-+ if (ret)
-+ return ret;
-+
-+ (void)wait_event_timeout(dev_priv->xhw_caller_queue,
-+ atomic_read(&buf->done), 3 * DRM_HZ);
-+
-+ if (!atomic_read(&buf->done)) {
-+ psb_xhw_clean_buf(dev_priv, buf);
-+ return -EBUSY;
-+ }
-+
-+ return xa->ret;
-+}
-+
-+int psb_xhw_check_lockup(struct drm_psb_private *dev_priv,
-+ struct psb_xhw_buf *buf, uint32_t * value)
-+{
-+ struct drm_psb_xhw_arg *xa = &buf->arg;
-+ int ret;
-+
-+ *value = 0;
-+
-+ buf->copy_back = 1;
-+ xa->op = PSB_XHW_CHECK_LOCKUP;
-+ xa->issue_irq = 0;
-+ xa->irq_op = 0;
-+
-+ ret = psb_xhw_add(dev_priv, buf);
-+ if (ret)
-+ return ret;
-+
-+ (void)wait_event_timeout(dev_priv->xhw_caller_queue,
-+ atomic_read(&buf->done), DRM_HZ * 3);
-+
-+ if (!atomic_read(&buf->done)) {
-+ psb_xhw_clean_buf(dev_priv, buf);
-+ return -EBUSY;
-+ }
-+
-+ if (!xa->ret)
-+ *value = xa->arg.cl.value;
-+
-+ return xa->ret;
-+}
-+
-+static int psb_xhw_terminate(struct drm_psb_private *dev_priv,
-+ struct psb_xhw_buf *buf)
-+{
-+ struct drm_psb_xhw_arg *xa = &buf->arg;
-+ unsigned long irq_flags;
-+
-+ buf->copy_back = 0;
-+ xa->op = PSB_XHW_TERMINATE;
-+ xa->issue_irq = 0;
-+
-+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
-+ dev_priv->xhw_submit_ok = 0;
-+ atomic_set(&buf->done, 0);
-+ if (!list_empty(&buf->head)) {
-+ DRM_ERROR("Recursive list adding.\n");
-+ goto out;
-+ }
-+ list_add_tail(&buf->head, &dev_priv->xhw_in);
-+ out:
-+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
-+ wake_up_interruptible(&dev_priv->xhw_queue);
-+
-+ (void)wait_event_timeout(dev_priv->xhw_caller_queue,
-+ atomic_read(&buf->done), DRM_HZ / 10);
-+
-+ if (!atomic_read(&buf->done)) {
-+ DRM_ERROR("Xpsb terminate timeout.\n");
-+ psb_xhw_clean_buf(dev_priv, buf);
-+ return -EBUSY;
-+ }
-+
-+ return 0;
-+}
-+
-+int psb_xhw_ta_mem_info(struct drm_psb_private *dev_priv,
-+ struct psb_xhw_buf *buf,
-+ uint32_t pages, uint32_t * hw_cookie, uint32_t * size)
-+{
-+ struct drm_psb_xhw_arg *xa = &buf->arg;
-+ int ret;
-+
-+ buf->copy_back = 1;
-+ xa->op = PSB_XHW_TA_MEM_INFO;
-+ xa->issue_irq = 0;
-+ xa->irq_op = 0;
-+ xa->arg.bi.pages = pages;
-+
-+ ret = psb_xhw_add(dev_priv, buf);
-+ if (ret)
-+ return ret;
-+
-+ (void)wait_event_timeout(dev_priv->xhw_caller_queue,
-+ atomic_read(&buf->done), DRM_HZ);
-+
-+ if (!atomic_read(&buf->done)) {
-+ psb_xhw_clean_buf(dev_priv, buf);
-+ return -EBUSY;
-+ }
-+
-+ if (!xa->ret)
-+ memcpy(hw_cookie, xa->cookie, sizeof(xa->cookie));
-+
-+ *size = xa->arg.bi.size;
-+ return xa->ret;
-+}
-+
-+int psb_xhw_ta_mem_load(struct drm_psb_private *dev_priv,
-+ struct psb_xhw_buf *buf,
-+ uint32_t flags,
-+ uint32_t param_offset,
-+ uint32_t pt_offset, uint32_t * hw_cookie)
-+{
-+ struct drm_psb_xhw_arg *xa = &buf->arg;
-+ int ret;
-+
-+ buf->copy_back = 1;
-+ xa->op = PSB_XHW_TA_MEM_LOAD;
-+ xa->issue_irq = 0;
-+ xa->irq_op = 0;
-+ xa->arg.bl.flags = flags;
-+ xa->arg.bl.param_offset = param_offset;
-+ xa->arg.bl.pt_offset = pt_offset;
-+ memcpy(xa->cookie, hw_cookie, sizeof(xa->cookie));
-+
-+ ret = psb_xhw_add(dev_priv, buf);
-+ if (ret)
-+ return ret;
-+
-+ (void)wait_event_timeout(dev_priv->xhw_caller_queue,
-+ atomic_read(&buf->done), 3 * DRM_HZ);
-+
-+ if (!atomic_read(&buf->done)) {
-+ psb_xhw_clean_buf(dev_priv, buf);
-+ return -EBUSY;
-+ }
-+
-+ if (!xa->ret)
-+ memcpy(hw_cookie, xa->cookie, sizeof(xa->cookie));
-+
-+ return xa->ret;
-+}
-+
-+int psb_xhw_ta_oom(struct drm_psb_private *dev_priv,
-+ struct psb_xhw_buf *buf, uint32_t * cookie)
-+{
-+ struct drm_psb_xhw_arg *xa = &buf->arg;
-+
-+ /*
-+ * This calls the extensive closed source
-+ * OOM handler, which resolves the condition and
-+ * sends a reply telling the scheduler what to do
-+ * with the task.
-+ */
-+
-+ buf->copy_back = 1;
-+ xa->op = PSB_XHW_OOM;
-+ xa->issue_irq = 1;
-+ xa->irq_op = PSB_UIRQ_OOM_REPLY;
-+ memcpy(xa->cookie, cookie, sizeof(xa->cookie));
-+
-+ return psb_xhw_add(dev_priv, buf);
-+}
-+
-+void psb_xhw_ta_oom_reply(struct drm_psb_private *dev_priv,
-+ struct psb_xhw_buf *buf,
-+ uint32_t * cookie,
-+ uint32_t * bca, uint32_t * rca, uint32_t * flags)
-+{
-+ struct drm_psb_xhw_arg *xa = &buf->arg;
-+
-+ /*
-+ * Get info about how to schedule an OOM task.
-+ */
-+
-+ memcpy(cookie, xa->cookie, sizeof(xa->cookie));
-+ *bca = xa->arg.oom.bca;
-+ *rca = xa->arg.oom.rca;
-+ *flags = xa->arg.oom.flags;
-+}
-+
-+void psb_xhw_fire_reply(struct drm_psb_private *dev_priv,
-+ struct psb_xhw_buf *buf, uint32_t * cookie)
-+{
-+ struct drm_psb_xhw_arg *xa = &buf->arg;
-+
-+ memcpy(cookie, xa->cookie, sizeof(xa->cookie));
-+}
-+
-+int psb_xhw_resume(struct drm_psb_private *dev_priv, struct psb_xhw_buf *buf)
-+{
-+ struct drm_psb_xhw_arg *xa = &buf->arg;
-+
-+ buf->copy_back = 0;
-+ xa->op = PSB_XHW_RESUME;
-+ xa->issue_irq = 0;
-+ xa->irq_op = 0;
-+ return psb_xhw_add(dev_priv, buf);
-+}
-+
-+void psb_xhw_takedown(struct drm_psb_private *dev_priv)
-+{
-+}
-+
-+int psb_xhw_init(struct drm_device *dev)
-+{
-+ struct drm_psb_private *dev_priv =
-+ (struct drm_psb_private *)dev->dev_private;
-+ unsigned long irq_flags;
-+
-+ INIT_LIST_HEAD(&dev_priv->xhw_in);
-+ dev_priv->xhw_lock = SPIN_LOCK_UNLOCKED;
-+ atomic_set(&dev_priv->xhw_client, 0);
-+ init_waitqueue_head(&dev_priv->xhw_queue);
-+ init_waitqueue_head(&dev_priv->xhw_caller_queue);
-+ mutex_init(&dev_priv->xhw_mutex);
-+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
-+ dev_priv->xhw_on = 0;
-+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
-+
-+ return 0;
-+}
-+
-+static int psb_xhw_init_init(struct drm_device *dev,
-+ struct drm_file *file_priv,
-+ struct drm_psb_xhw_init_arg *arg)
-+{
-+ struct drm_psb_private *dev_priv =
-+ (struct drm_psb_private *)dev->dev_private;
-+ int ret;
-+ int is_iomem;
-+
-+ if (atomic_add_unless(&dev_priv->xhw_client, 1, 1)) {
-+ unsigned long irq_flags;
-+
-+ mutex_lock(&dev->struct_mutex);
-+ dev_priv->xhw_bo =
-+ drm_lookup_buffer_object(file_priv, arg->buffer_handle, 1);
-+ mutex_unlock(&dev->struct_mutex);
-+ if (!dev_priv->xhw_bo) {
-+ ret = -EINVAL;
-+ goto out_err;
-+ }
-+ ret = drm_bo_kmap(dev_priv->xhw_bo, 0,
-+ dev_priv->xhw_bo->num_pages,
-+ &dev_priv->xhw_kmap);
-+ if (ret) {
-+ DRM_ERROR("Failed mapping X server "
-+ "communications buffer.\n");
-+ goto out_err0;
-+ }
-+ dev_priv->xhw = drm_bmo_virtual(&dev_priv->xhw_kmap, &is_iomem);
-+ if (is_iomem) {
-+ DRM_ERROR("X server communications buffer"
-+ "is in device memory.\n");
-+ ret = -EINVAL;
-+ goto out_err1;
-+ }
-+ dev_priv->xhw_file = file_priv;
-+
-+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
-+ dev_priv->xhw_on = 1;
-+ dev_priv->xhw_submit_ok = 1;
-+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
-+ return 0;
-+ } else {
-+ DRM_ERROR("Xhw is already initialized.\n");
-+ return -EBUSY;
-+ }
-+ out_err1:
-+ dev_priv->xhw = NULL;
-+ drm_bo_kunmap(&dev_priv->xhw_kmap);
-+ out_err0:
-+ drm_bo_usage_deref_unlocked(&dev_priv->xhw_bo);
-+ out_err:
-+ atomic_dec(&dev_priv->xhw_client);
-+ return ret;
-+}
-+
-+static void psb_xhw_queue_empty(struct drm_psb_private *dev_priv)
-+{
-+ struct psb_xhw_buf *cur_buf, *next;
-+ unsigned long irq_flags;
-+
-+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
-+ dev_priv->xhw_submit_ok = 0;
-+
-+ list_for_each_entry_safe(cur_buf, next, &dev_priv->xhw_in, head) {
-+ list_del_init(&cur_buf->head);
-+ if (cur_buf->copy_back) {
-+ cur_buf->arg.ret = -EINVAL;
-+ }
-+ atomic_set(&cur_buf->done, 1);
-+ }
-+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
-+ wake_up(&dev_priv->xhw_caller_queue);
-+}
-+
-+void psb_xhw_init_takedown(struct drm_psb_private *dev_priv,
-+ struct drm_file *file_priv, int closing)
-+{
-+
-+ if (dev_priv->xhw_file == file_priv &&
-+ atomic_add_unless(&dev_priv->xhw_client, -1, 0)) {
-+
-+ if (closing)
-+ psb_xhw_queue_empty(dev_priv);
-+ else {
-+ struct psb_xhw_buf buf;
-+ INIT_LIST_HEAD(&buf.head);
-+
-+ psb_xhw_terminate(dev_priv, &buf);
-+ psb_xhw_queue_empty(dev_priv);
-+ }
-+
-+ dev_priv->xhw = NULL;
-+ drm_bo_kunmap(&dev_priv->xhw_kmap);
-+ drm_bo_usage_deref_unlocked(&dev_priv->xhw_bo);
-+ dev_priv->xhw_file = NULL;
-+ }
-+}
-+
-+int psb_xhw_init_ioctl(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv)
-+{
-+ struct drm_psb_xhw_init_arg *arg = (struct drm_psb_xhw_init_arg *)data;
-+ struct drm_psb_private *dev_priv =
-+ (struct drm_psb_private *)dev->dev_private;
-+
-+ switch (arg->operation) {
-+ case PSB_XHW_INIT:
-+ return psb_xhw_init_init(dev, file_priv, arg);
-+ case PSB_XHW_TAKEDOWN:
-+ psb_xhw_init_takedown(dev_priv, file_priv, 0);
-+ }
-+ return 0;
-+}
-+
-+static int psb_xhw_in_empty(struct drm_psb_private *dev_priv)
-+{
-+ int empty;
-+ unsigned long irq_flags;
-+
-+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
-+ empty = list_empty(&dev_priv->xhw_in);
-+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
-+ return empty;
-+}
-+
-+int psb_xhw_handler(struct drm_psb_private *dev_priv)
-+{
-+ unsigned long irq_flags;
-+ struct drm_psb_xhw_arg *xa;
-+ struct psb_xhw_buf *buf;
-+
-+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
-+
-+ if (!dev_priv->xhw_on) {
-+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
-+ return -EINVAL;
-+ }
-+
-+ buf = dev_priv->xhw_cur_buf;
-+ if (buf && buf->copy_back) {
-+ xa = &buf->arg;
-+ memcpy(xa, dev_priv->xhw, sizeof(*xa));
-+ dev_priv->comm[PSB_COMM_USER_IRQ] = xa->irq_op;
-+ atomic_set(&buf->done, 1);
-+ wake_up(&dev_priv->xhw_caller_queue);
-+ } else
-+ dev_priv->comm[PSB_COMM_USER_IRQ] = 0;
-+
-+ dev_priv->xhw_cur_buf = 0;
-+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
-+ return 0;
-+}
-+
-+int psb_xhw_ioctl(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv)
-+{
-+ struct drm_psb_private *dev_priv =
-+ (struct drm_psb_private *)dev->dev_private;
-+ unsigned long irq_flags;
-+ struct drm_psb_xhw_arg *xa;
-+ int ret;
-+ struct list_head *list;
-+ struct psb_xhw_buf *buf;
-+
-+ if (!dev_priv)
-+ return -EINVAL;
-+
-+ if (mutex_lock_interruptible(&dev_priv->xhw_mutex))
-+ return -EAGAIN;
-+
-+ if (psb_forced_user_interrupt(dev_priv)) {
-+ mutex_unlock(&dev_priv->xhw_mutex);
-+ return -EINVAL;
-+ }
-+
-+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
-+ while (list_empty(&dev_priv->xhw_in)) {
-+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
-+ ret = wait_event_interruptible_timeout(dev_priv->xhw_queue,
-+ !psb_xhw_in_empty
-+ (dev_priv), DRM_HZ);
-+ if (ret == -ERESTARTSYS || ret == 0) {
-+ mutex_unlock(&dev_priv->xhw_mutex);
-+ return -EAGAIN;
-+ }
-+ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
-+ }
-+
-+ list = dev_priv->xhw_in.next;
-+ list_del_init(list);
-+
-+ buf = list_entry(list, struct psb_xhw_buf, head);
-+ xa = &buf->arg;
-+ memcpy(dev_priv->xhw, xa, sizeof(*xa));
-+
-+ if (unlikely(buf->copy_back))
-+ dev_priv->xhw_cur_buf = buf;
-+ else {
-+ atomic_set(&buf->done, 1);
-+ dev_priv->xhw_cur_buf = NULL;
-+ }
-+
-+ if (xa->op == PSB_XHW_TERMINATE) {
-+ dev_priv->xhw_on = 0;
-+ wake_up(&dev_priv->xhw_caller_queue);
-+ }
-+ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
-+
-+ mutex_unlock(&dev_priv->xhw_mutex);
-+
-+ return 0;
-+}
-Index: linux-2.6.28/drivers/gpu/drm/Kconfig
-===================================================================
---- linux-2.6.28.orig/drivers/gpu/drm/Kconfig 2009-02-20 12:22:54.000000000 +0000
-+++ linux-2.6.28/drivers/gpu/drm/Kconfig 2009-02-20 12:23:06.000000000 +0000
-@@ -129,3 +129,10 @@
- help
- Choose this option if you have a Savage3D/4/SuperSavage/Pro/Twister
- chipset. If M is selected the module will be called savage.
-+
-+config DRM_PSB
-+ tristate "Intel Poulsbo"
-+ depends on DRM && PCI && I2C_ALGOBIT
-+ select DRM_INTEL_COMMON
-+ help
-+ Choose this option if you have an Intel Poulsbo chipset.
-Index: linux-2.6.28/include/drm/drm_objects.h
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.28/include/drm/drm_objects.h 2009-02-20 12:23:06.000000000 +0000
-@@ -0,0 +1,717 @@
-+/**************************************************************************
-+ *
-+ * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
-+ * All Rights Reserved.
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a
-+ * copy of this software and associated documentation files (the
-+ * "Software"), to deal in the Software without restriction, including
-+ * without limitation the rights to use, copy, modify, merge, publish,
-+ * distribute, sub license, and/or sell copies of the Software, and to
-+ * permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice (including the
-+ * next paragraph) shall be included in all copies or substantial portions
-+ * of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
-+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
-+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
-+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
-+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
-+ *
-+ **************************************************************************/
-+/*
-+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
-+ */
-+
-+#ifndef _DRM_OBJECTS_H
-+#define _DRM_OBJECTS_H
-+
-+struct drm_device;
-+struct drm_bo_mem_reg;
-+
-+/***************************************************
-+ * User space objects. (drm_object.c)
-+ */
-+
-+#define drm_user_object_entry(_ptr, _type, _member) container_of(_ptr, _type, _member)
-+
-+enum drm_object_type {
-+ drm_fence_type,
-+ drm_buffer_type,
-+ drm_lock_type,
-+ /*
-+ * Add other user space object types here.
-+ */
-+ drm_driver_type0 = 256,
-+ drm_driver_type1,
-+ drm_driver_type2,
-+ drm_driver_type3,
-+ drm_driver_type4
-+};
-+
-+/*
-+ * A user object is a structure that helps the drm give out user handles
-+ * to kernel internal objects and to keep track of these objects so that
-+ * they can be destroyed, for example when the user space process exits.
-+ * Designed to be accessible using a user space 32-bit handle.
-+ */
-+
-+struct drm_user_object {
-+ struct drm_hash_item hash;
-+ struct list_head list;
-+ enum drm_object_type type;
-+ atomic_t refcount;
-+ int shareable;
-+ struct drm_file *owner;
-+ void (*ref_struct_locked) (struct drm_file *priv,
-+ struct drm_user_object *obj,
-+ enum drm_ref_type ref_action);
-+ void (*unref) (struct drm_file *priv, struct drm_user_object *obj,
-+ enum drm_ref_type unref_action);
-+ void (*remove) (struct drm_file *priv, struct drm_user_object *obj);
-+};
-+
-+/*
-+ * A ref object is a structure which is used to
-+ * keep track of references to user objects and to keep track of these
-+ * references so that they can be destroyed for example when the user space
-+ * process exits. Designed to be accessible using a pointer to the _user_ object.
-+ */
-+
-+struct drm_ref_object {
-+ struct drm_hash_item hash;
-+ struct list_head list;
-+ atomic_t refcount;
-+ enum drm_ref_type unref_action;
-+};
-+
-+/**
-+ * Must be called with the struct_mutex held.
-+ */
-+
-+extern int drm_add_user_object(struct drm_file *priv, struct drm_user_object *item,
-+ int shareable);
-+/**
-+ * Must be called with the struct_mutex held.
-+ */
-+
-+extern struct drm_user_object *drm_lookup_user_object(struct drm_file *priv,
-+ uint32_t key);
-+
-+/*
-+ * Must be called with the struct_mutex held. May temporarily release it.
-+ */
-+
-+extern int drm_add_ref_object(struct drm_file *priv,
-+ struct drm_user_object *referenced_object,
-+ enum drm_ref_type ref_action);
-+
-+/*
-+ * Must be called with the struct_mutex held.
-+ */
-+
-+struct drm_ref_object *drm_lookup_ref_object(struct drm_file *priv,
-+ struct drm_user_object *referenced_object,
-+ enum drm_ref_type ref_action);
-+/*
-+ * Must be called with the struct_mutex held.
-+ * If "item" has been obtained by a call to drm_lookup_ref_object. You may not
-+ * release the struct_mutex before calling drm_remove_ref_object.
-+ * This function may temporarily release the struct_mutex.
-+ */
-+
-+extern void drm_remove_ref_object(struct drm_file *priv, struct drm_ref_object *item);
-+extern int drm_user_object_ref(struct drm_file *priv, uint32_t user_token,
-+ enum drm_object_type type,
-+ struct drm_user_object **object);
-+extern int drm_user_object_unref(struct drm_file *priv, uint32_t user_token,
-+ enum drm_object_type type);
-+
-+/***************************************************
-+ * Fence objects. (drm_fence.c)
-+ */
-+
-+struct drm_fence_object {
-+ struct drm_user_object base;
-+ struct drm_device *dev;
-+ atomic_t usage;
-+
-+ /*
-+ * The below three fields are protected by the fence manager spinlock.
-+ */
-+
-+ struct list_head ring;
-+ int fence_class;
-+ uint32_t native_types;
-+ uint32_t type;
-+ uint32_t signaled_types;
-+ uint32_t sequence;
-+ uint32_t waiting_types;
-+ uint32_t error;
-+};
-+
-+#define _DRM_FENCE_CLASSES 8
-+
-+struct drm_fence_class_manager {
-+ struct list_head ring;
-+ uint32_t pending_flush;
-+ uint32_t waiting_types;
-+ wait_queue_head_t fence_queue;
-+ uint32_t highest_waiting_sequence;
-+ uint32_t latest_queued_sequence;
-+};
-+
-+struct drm_fence_manager {
-+ int initialized;
-+ rwlock_t lock;
-+ struct drm_fence_class_manager fence_class[_DRM_FENCE_CLASSES];
-+ uint32_t num_classes;
-+ atomic_t count;
-+};
-+
-+struct drm_fence_driver {
-+ unsigned long *waiting_jiffies;
-+ uint32_t num_classes;
-+ uint32_t wrap_diff;
-+ uint32_t flush_diff;
-+ uint32_t sequence_mask;
-+
-+ /*
-+ * Driver implemented functions:
-+ * has_irq() : 1 if the hardware can update the indicated type_flags using an
-+ * irq handler. 0 if polling is required.
-+ *
-+ * emit() : Emit a sequence number to the command stream.
-+ * Return the sequence number.
-+ *
-+ * flush() : Make sure the flags indicated in fc->pending_flush will eventually
-+ * signal for fc->highest_received_sequence and all preceding sequences.
-+ * Acknowledge by clearing the flags fc->pending_flush.
-+ *
-+ * poll() : Call drm_fence_handler with any new information.
-+ *
-+ * needed_flush() : Given the current state of the fence->type flags and previusly
-+ * executed or queued flushes, return the type_flags that need flushing.
-+ *
-+ * wait(): Wait for the "mask" flags to signal on a given fence, performing
-+ * whatever's necessary to make this happen.
-+ */
-+
-+ int (*has_irq) (struct drm_device *dev, uint32_t fence_class,
-+ uint32_t flags);
-+ int (*emit) (struct drm_device *dev, uint32_t fence_class,
-+ uint32_t flags, uint32_t *breadcrumb,
-+ uint32_t *native_type);
-+ void (*flush) (struct drm_device *dev, uint32_t fence_class);
-+ void (*poll) (struct drm_device *dev, uint32_t fence_class,
-+ uint32_t types);
-+ uint32_t (*needed_flush) (struct drm_fence_object *fence);
-+ int (*wait) (struct drm_fence_object *fence, int lazy,
-+ int interruptible, uint32_t mask);
-+};
-+
-+extern int drm_fence_wait_polling(struct drm_fence_object *fence, int lazy,
-+ int interruptible, uint32_t mask,
-+ unsigned long end_jiffies);
-+extern void drm_fence_handler(struct drm_device *dev, uint32_t fence_class,
-+ uint32_t sequence, uint32_t type,
-+ uint32_t error);
-+extern void drm_fence_manager_init(struct drm_device *dev);
-+extern void drm_fence_manager_takedown(struct drm_device *dev);
-+extern void drm_fence_flush_old(struct drm_device *dev, uint32_t fence_class,
-+ uint32_t sequence);
-+extern int drm_fence_object_flush(struct drm_fence_object *fence,
-+ uint32_t type);
-+extern int drm_fence_object_signaled(struct drm_fence_object *fence,
-+ uint32_t type);
-+extern void drm_fence_usage_deref_locked(struct drm_fence_object **fence);
-+extern void drm_fence_usage_deref_unlocked(struct drm_fence_object **fence);
-+extern struct drm_fence_object *drm_fence_reference_locked(struct drm_fence_object *src);
-+extern void drm_fence_reference_unlocked(struct drm_fence_object **dst,
-+ struct drm_fence_object *src);
-+extern int drm_fence_object_wait(struct drm_fence_object *fence,
-+ int lazy, int ignore_signals, uint32_t mask);
-+extern int drm_fence_object_create(struct drm_device *dev, uint32_t type,
-+ uint32_t fence_flags, uint32_t fence_class,
-+ struct drm_fence_object **c_fence);
-+extern int drm_fence_object_emit(struct drm_fence_object *fence,
-+ uint32_t fence_flags, uint32_t class,
-+ uint32_t type);
-+extern void drm_fence_fill_arg(struct drm_fence_object *fence,
-+ struct drm_fence_arg *arg);
-+
-+extern int drm_fence_add_user_object(struct drm_file *priv,
-+ struct drm_fence_object *fence,
-+ int shareable);
-+
-+extern int drm_fence_create_ioctl(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv);
-+extern int drm_fence_destroy_ioctl(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv);
-+extern int drm_fence_reference_ioctl(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv);
-+extern int drm_fence_unreference_ioctl(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv);
-+extern int drm_fence_signaled_ioctl(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv);
-+extern int drm_fence_flush_ioctl(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv);
-+extern int drm_fence_wait_ioctl(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv);
-+extern int drm_fence_emit_ioctl(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv);
-+extern int drm_fence_buffers_ioctl(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv);
-+/**************************************************
-+ *TTMs
-+ */
-+
-+/*
-+ * The ttm backend GTT interface. (In our case AGP).
-+ * Any similar type of device (PCIE?)
-+ * needs only to implement these functions to be usable with the TTM interface.
-+ * The AGP backend implementation lives in drm_agpsupport.c
-+ * basically maps these calls to available functions in agpgart.
-+ * Each drm device driver gets an
-+ * additional function pointer that creates these types,
-+ * so that the device can choose the correct aperture.
-+ * (Multiple AGP apertures, etc.)
-+ * Most device drivers will let this point to the standard AGP implementation.
-+ */
-+
-+#define DRM_BE_FLAG_NEEDS_FREE 0x00000001
-+#define DRM_BE_FLAG_BOUND_CACHED 0x00000002
-+
-+struct drm_ttm_backend;
-+struct drm_ttm_backend_func {
-+ int (*needs_ub_cache_adjust) (struct drm_ttm_backend *backend);
-+ int (*populate) (struct drm_ttm_backend *backend,
-+ unsigned long num_pages, struct page **pages);
-+ void (*clear) (struct drm_ttm_backend *backend);
-+ int (*bind) (struct drm_ttm_backend *backend,
-+ struct drm_bo_mem_reg *bo_mem);
-+ int (*unbind) (struct drm_ttm_backend *backend);
-+ void (*destroy) (struct drm_ttm_backend *backend);
-+};
-+
-+
-+struct drm_ttm_backend {
-+ struct drm_device *dev;
-+ uint32_t flags;
-+ struct drm_ttm_backend_func *func;
-+};
-+
-+struct drm_ttm {
-+ struct page *dummy_read_page;
-+ struct page **pages;
-+ uint32_t page_flags;
-+ unsigned long num_pages;
-+ atomic_t vma_count;
-+ struct drm_device *dev;
-+ int destroy;
-+ uint32_t mapping_offset;
-+ struct drm_ttm_backend *be;
-+ enum {
-+ ttm_bound,
-+ ttm_evicted,
-+ ttm_unbound,
-+ ttm_unpopulated,
-+ } state;
-+
-+};
-+
-+extern struct drm_ttm *drm_ttm_init(struct drm_device *dev, unsigned long size);
-+extern int drm_bind_ttm(struct drm_ttm *ttm, struct drm_bo_mem_reg *bo_mem);
-+extern void drm_ttm_unbind(struct drm_ttm *ttm);
-+extern void drm_ttm_evict(struct drm_ttm *ttm);
-+extern void drm_ttm_fixup_caching(struct drm_ttm *ttm);
-+extern struct page *drm_ttm_get_page(struct drm_ttm *ttm, int index);
-+extern void drm_ttm_cache_flush(void);
-+extern int drm_ttm_populate(struct drm_ttm *ttm);
-+extern int drm_ttm_set_user(struct drm_ttm *ttm,
-+ struct task_struct *tsk,
-+ int write,
-+ unsigned long start,
-+ unsigned long num_pages,
-+ struct page *dummy_read_page);
-+unsigned long drm_ttm_size(struct drm_device *dev,
-+ unsigned long num_pages,
-+ int user_bo);
-+
-+
-+/*
-+ * Destroy a ttm. The user normally calls drmRmMap or a similar IOCTL to do
-+ * this which calls this function iff there are no vmas referencing it anymore.
-+ * Otherwise it is called when the last vma exits.
-+ */
-+
-+extern int drm_destroy_ttm(struct drm_ttm *ttm);
-+
-+#define DRM_FLAG_MASKED(_old, _new, _mask) {\
-+(_old) ^= (((_old) ^ (_new)) & (_mask)); \
-+}
-+
-+#define DRM_TTM_MASK_FLAGS ((1 << PAGE_SHIFT) - 1)
-+#define DRM_TTM_MASK_PFN (0xFFFFFFFFU - DRM_TTM_MASK_FLAGS)
-+
-+/*
-+ * Page flags.
-+ */
-+
-+#define DRM_TTM_PAGE_UNCACHED (1 << 0)
-+#define DRM_TTM_PAGE_USED (1 << 1)
-+#define DRM_TTM_PAGE_BOUND (1 << 2)
-+#define DRM_TTM_PAGE_PRESENT (1 << 3)
-+#define DRM_TTM_PAGE_VMALLOC (1 << 4)
-+#define DRM_TTM_PAGE_USER (1 << 5)
-+#define DRM_TTM_PAGE_USER_WRITE (1 << 6)
-+#define DRM_TTM_PAGE_USER_DIRTY (1 << 7)
-+#define DRM_TTM_PAGE_USER_DMA (1 << 8)
-+
-+/***************************************************
-+ * Buffer objects. (drm_bo.c, drm_bo_move.c)
-+ */
-+
-+struct drm_bo_mem_reg {
-+ struct drm_mm_node *mm_node;
-+ unsigned long size;
-+ unsigned long num_pages;
-+ uint32_t page_alignment;
-+ uint32_t mem_type;
-+ uint64_t flags;
-+ uint64_t mask;
-+ uint32_t desired_tile_stride;
-+ uint32_t hw_tile_stride;
-+};
-+
-+enum drm_bo_type {
-+ drm_bo_type_dc,
-+ drm_bo_type_user,
-+ drm_bo_type_kernel, /* for initial kernel allocations */
-+};
-+
-+struct drm_buffer_object {
-+ struct drm_device *dev;
-+ struct drm_user_object base;
-+
-+ /*
-+ * If there is a possibility that the usage variable is zero,
-+ * then dev->struct_mutext should be locked before incrementing it.
-+ */
-+
-+ atomic_t usage;
-+ unsigned long buffer_start;
-+ enum drm_bo_type type;
-+ unsigned long offset;
-+ atomic_t mapped;
-+ struct drm_bo_mem_reg mem;
-+
-+ struct list_head lru;
-+ struct list_head ddestroy;
-+
-+ uint32_t fence_type;
-+ uint32_t fence_class;
-+ uint32_t new_fence_type;
-+ uint32_t new_fence_class;
-+ struct drm_fence_object *fence;
-+ uint32_t priv_flags;
-+ wait_queue_head_t event_queue;
-+ struct mutex mutex;
-+ unsigned long num_pages;
-+ unsigned long reserved_size;
-+
-+ /* For pinned buffers */
-+ struct drm_mm_node *pinned_node;
-+ uint32_t pinned_mem_type;
-+ struct list_head pinned_lru;
-+
-+ /* For vm */
-+ struct drm_ttm *ttm;
-+ struct drm_map_list map_list;
-+ uint32_t memory_type;
-+ unsigned long bus_offset;
-+ uint32_t vm_flags;
-+ void *iomap;
-+
-+#ifdef DRM_ODD_MM_COMPAT
-+ /* dev->struct_mutex only protected. */
-+ struct list_head vma_list;
-+ struct list_head p_mm_list;
-+#endif
-+
-+};
-+
-+#define _DRM_BO_FLAG_UNFENCED 0x00000001
-+#define _DRM_BO_FLAG_EVICTED 0x00000002
-+
-+struct drm_mem_type_manager {
-+ int has_type;
-+ int use_type;
-+ struct drm_mm manager;
-+ struct list_head lru;
-+ struct list_head pinned;
-+ uint32_t flags;
-+ uint32_t drm_bus_maptype;
-+ unsigned long gpu_offset;
-+ unsigned long io_offset;
-+ unsigned long io_size;
-+ void *io_addr;
-+};
-+
-+struct drm_bo_lock {
-+ struct drm_user_object base;
-+ wait_queue_head_t queue;
-+ atomic_t write_lock_pending;
-+ atomic_t readers;
-+};
-+
-+#define _DRM_FLAG_MEMTYPE_FIXED 0x00000001 /* Fixed (on-card) PCI memory */
-+#define _DRM_FLAG_MEMTYPE_MAPPABLE 0x00000002 /* Memory mappable */
-+#define _DRM_FLAG_MEMTYPE_CACHED 0x00000004 /* Cached binding */
-+#define _DRM_FLAG_NEEDS_IOREMAP 0x00000008 /* Fixed memory needs ioremap
-+ before kernel access. */
-+#define _DRM_FLAG_MEMTYPE_CMA 0x00000010 /* Can't map aperture */
-+#define _DRM_FLAG_MEMTYPE_CSELECT 0x00000020 /* Select caching */
-+
-+struct drm_buffer_manager {
-+ struct drm_bo_lock bm_lock;
-+ struct mutex evict_mutex;
-+ int nice_mode;
-+ int initialized;
-+ struct drm_file *last_to_validate;
-+ struct drm_mem_type_manager man[DRM_BO_MEM_TYPES];
-+ struct list_head unfenced;
-+ struct list_head ddestroy;
-+ struct delayed_work wq;
-+ uint32_t fence_type;
-+ unsigned long cur_pages;
-+ atomic_t count;
-+ struct page *dummy_read_page;
-+};
-+
-+struct drm_bo_driver {
-+ const uint32_t *mem_type_prio;
-+ const uint32_t *mem_busy_prio;
-+ uint32_t num_mem_type_prio;
-+ uint32_t num_mem_busy_prio;
-+ struct drm_ttm_backend *(*create_ttm_backend_entry)
-+ (struct drm_device *dev);
-+ int (*backend_size) (struct drm_device *dev,
-+ unsigned long num_pages);
-+ int (*fence_type) (struct drm_buffer_object *bo, uint32_t *fclass,
-+ uint32_t *type);
-+ int (*invalidate_caches) (struct drm_device *dev, uint64_t flags);
-+ int (*init_mem_type) (struct drm_device *dev, uint32_t type,
-+ struct drm_mem_type_manager *man);
-+ uint32_t(*evict_mask) (struct drm_buffer_object *bo);
-+ int (*move) (struct drm_buffer_object *bo,
-+ int evict, int no_wait, struct drm_bo_mem_reg *new_mem);
-+ void (*ttm_cache_flush)(struct drm_ttm *ttm);
-+
-+ /*
-+ * command_stream_barrier
-+ *
-+ * @dev: The drm device.
-+ *
-+ * @bo: The buffer object to validate.
-+ *
-+ * @new_fence_class: The new fence class for the buffer object.
-+ *
-+ * @new_fence_type: The new fence type for the buffer object.
-+ *
-+ * @no_wait: whether this should give up and return -EBUSY
-+ * if this operation would require sleeping
-+ *
-+ * Insert a command stream barrier that makes sure that the
-+ * buffer is idle once the commands associated with the
-+ * current validation are starting to execute. If an error
-+ * condition is returned, or the function pointer is NULL,
-+ * the drm core will force buffer idle
-+ * during validation.
-+ */
-+
-+ int (*command_stream_barrier) (struct drm_buffer_object *bo,
-+ uint32_t new_fence_class,
-+ uint32_t new_fence_type,
-+ int no_wait);
-+};
-+
-+/*
-+ * buffer objects (drm_bo.c)
-+ */
-+extern int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
-+extern int drm_bo_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
-+extern int drm_bo_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
-+extern int drm_bo_unmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
-+extern int drm_bo_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
-+extern int drm_bo_set_pin(struct drm_device *dev, struct drm_buffer_object *bo, int pin);
-+extern int drm_bo_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
-+extern int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
-+extern int drm_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
-+extern int drm_bo_setstatus_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
-+extern int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
-+extern int drm_mm_takedown_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
-+extern int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
-+extern int drm_mm_unlock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
-+extern int drm_bo_version_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
-+extern int drm_bo_driver_finish(struct drm_device *dev);
-+extern int drm_bo_driver_init(struct drm_device *dev);
-+extern int drm_bo_pci_offset(struct drm_device *dev,
-+ struct drm_bo_mem_reg *mem,
-+ unsigned long *bus_base,
-+ unsigned long *bus_offset,
-+ unsigned long *bus_size);
-+extern int drm_mem_reg_is_pci(struct drm_device *dev, struct drm_bo_mem_reg *mem);
-+
-+extern void drm_bo_usage_deref_locked(struct drm_buffer_object **bo);
-+extern void drm_bo_usage_deref_unlocked(struct drm_buffer_object **bo);
-+extern void drm_putback_buffer_objects(struct drm_device *dev);
-+extern int drm_fence_buffer_objects(struct drm_device *dev,
-+ struct list_head *list,
-+ uint32_t fence_flags,
-+ struct drm_fence_object *fence,
-+ struct drm_fence_object **used_fence);
-+extern void drm_bo_add_to_lru(struct drm_buffer_object *bo);
-+extern int drm_buffer_object_create(struct drm_device *dev, unsigned long size,
-+ enum drm_bo_type type, uint64_t mask,
-+ uint32_t hint, uint32_t page_alignment,
-+ unsigned long buffer_start,
-+ struct drm_buffer_object **bo);
-+extern int drm_bo_wait(struct drm_buffer_object *bo, int lazy, int ignore_signals,
-+ int no_wait);
-+extern int drm_bo_mem_space(struct drm_buffer_object *bo,
-+ struct drm_bo_mem_reg *mem, int no_wait);
-+extern int drm_bo_move_buffer(struct drm_buffer_object *bo,
-+ uint64_t new_mem_flags,
-+ int no_wait, int move_unfenced);
-+extern int drm_bo_clean_mm(struct drm_device *dev, unsigned mem_type);
-+extern int drm_bo_init_mm(struct drm_device *dev, unsigned type,
-+ unsigned long p_offset, unsigned long p_size);
-+extern int drm_bo_handle_validate(struct drm_file *file_priv, uint32_t handle,
-+ uint32_t fence_class, uint64_t flags,
-+ uint64_t mask, uint32_t hint,
-+ int use_old_fence_class,
-+ struct drm_bo_info_rep *rep,
-+ struct drm_buffer_object **bo_rep);
-+extern struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file *file_priv,
-+ uint32_t handle,
-+ int check_owner);
-+extern int drm_bo_do_validate(struct drm_buffer_object *bo,
-+ uint64_t flags, uint64_t mask, uint32_t hint,
-+ uint32_t fence_class,
-+ int no_wait,
-+ struct drm_bo_info_rep *rep);
-+extern void drm_bo_fill_rep_arg(struct drm_buffer_object *bo,
-+ struct drm_bo_info_rep *rep);
-+/*
-+ * Buffer object memory move- and map helpers.
-+ * drm_bo_move.c
-+ */
-+
-+extern int drm_bo_move_ttm(struct drm_buffer_object *bo,
-+ int evict, int no_wait,
-+ struct drm_bo_mem_reg *new_mem);
-+extern int drm_bo_move_memcpy(struct drm_buffer_object *bo,
-+ int evict,
-+ int no_wait, struct drm_bo_mem_reg *new_mem);
-+extern int drm_bo_move_accel_cleanup(struct drm_buffer_object *bo,
-+ int evict, int no_wait,
-+ uint32_t fence_class, uint32_t fence_type,
-+ uint32_t fence_flags,
-+ struct drm_bo_mem_reg *new_mem);
-+extern int drm_bo_same_page(unsigned long offset, unsigned long offset2);
-+extern unsigned long drm_bo_offset_end(unsigned long offset,
-+ unsigned long end);
-+
-+struct drm_bo_kmap_obj {
-+ void *virtual;
-+ struct page *page;
-+ enum {
-+ bo_map_iomap,
-+ bo_map_vmap,
-+ bo_map_kmap,
-+ bo_map_premapped,
-+ } bo_kmap_type;
-+};
-+
-+static inline void *drm_bmo_virtual(struct drm_bo_kmap_obj *map, int *is_iomem)
-+{
-+ *is_iomem = (map->bo_kmap_type == bo_map_iomap ||
-+ map->bo_kmap_type == bo_map_premapped);
-+ return map->virtual;
-+}
-+extern void drm_bo_kunmap(struct drm_bo_kmap_obj *map);
-+extern int drm_bo_kmap(struct drm_buffer_object *bo, unsigned long start_page,
-+ unsigned long num_pages, struct drm_bo_kmap_obj *map);
-+
-+
-+/*
-+ * drm_regman.c
-+ */
-+
-+struct drm_reg {
-+ struct list_head head;
-+ struct drm_fence_object *fence;
-+ uint32_t fence_type;
-+ uint32_t new_fence_type;
-+};
-+
-+struct drm_reg_manager {
-+ struct list_head free;
-+ struct list_head lru;
-+ struct list_head unfenced;
-+
-+ int (*reg_reusable)(const struct drm_reg *reg, const void *data);
-+ void (*reg_destroy)(struct drm_reg *reg);
-+};
-+
-+extern int drm_regs_alloc(struct drm_reg_manager *manager,
-+ const void *data,
-+ uint32_t fence_class,
-+ uint32_t fence_type,
-+ int interruptible,
-+ int no_wait,
-+ struct drm_reg **reg);
-+
-+extern void drm_regs_fence(struct drm_reg_manager *regs,
-+ struct drm_fence_object *fence);
-+
-+extern void drm_regs_free(struct drm_reg_manager *manager);
-+extern void drm_regs_add(struct drm_reg_manager *manager, struct drm_reg *reg);
-+extern void drm_regs_init(struct drm_reg_manager *manager,
-+ int (*reg_reusable)(const struct drm_reg *,
-+ const void *),
-+ void (*reg_destroy)(struct drm_reg *));
-+
-+extern int drm_mem_reg_ioremap(struct drm_device *dev, struct drm_bo_mem_reg * mem,
-+ void **virtual);
-+extern void drm_mem_reg_iounmap(struct drm_device *dev, struct drm_bo_mem_reg * mem,
-+ void *virtual);
-+/*
-+ * drm_bo_lock.c
-+ * Simple replacement for the hardware lock on buffer manager init and clean.
-+ */
-+
-+
-+extern void drm_bo_init_lock(struct drm_bo_lock *lock);
-+extern void drm_bo_read_unlock(struct drm_bo_lock *lock);
-+extern int drm_bo_read_lock(struct drm_bo_lock *lock);
-+extern int drm_bo_write_lock(struct drm_bo_lock *lock,
-+ struct drm_file *file_priv);
-+
-+extern int drm_bo_write_unlock(struct drm_bo_lock *lock,
-+ struct drm_file *file_priv);
-+
-+#ifdef CONFIG_DEBUG_MUTEXES
-+#define DRM_ASSERT_LOCKED(_mutex) \
-+ BUG_ON(!mutex_is_locked(_mutex) || \
-+ ((_mutex)->owner != current_thread_info()))
-+#else
-+#define DRM_ASSERT_LOCKED(_mutex)
-+#endif
-+#endif
-Index: linux-2.6.28/drivers/gpu/drm/drm_crtc.c
-===================================================================
---- linux-2.6.28.orig/drivers/gpu/drm/drm_crtc.c 2009-02-20 12:22:54.000000000 +0000
-+++ linux-2.6.28/drivers/gpu/drm/drm_crtc.c 2009-02-20 12:23:06.000000000 +0000
-@@ -807,6 +807,53 @@
- }
- EXPORT_SYMBOL(drm_mode_config_init);
-
-+/**
-+ * drm_get_buffer_object - find the buffer object for a given handle
-+ * @dev: DRM device
-+ * @bo: pointer to caller's buffer_object pointer
-+ * @handle: handle to lookup
-+ *
-+ * LOCKING:
-+ * Must take @dev's struct_mutex to protect buffer object lookup.
-+ *
-+ * Given @handle, lookup the buffer object in @dev and put it in the caller's
-+ * @bo pointer.
-+ *
-+ * RETURNS:
-+ * Zero on success, -EINVAL if the handle couldn't be found.
-+ */
-+static int drm_get_buffer_object(struct drm_device *dev, struct drm_buffer_object **bo, unsigned long handle)
-+{
-+ struct drm_user_object *uo;
-+ struct drm_hash_item *hash;
-+ int ret;
-+
-+ *bo = NULL;
-+
-+ mutex_lock(&dev->struct_mutex);
-+ ret = drm_ht_find_item(&dev->object_hash, handle, &hash);
-+ if (ret) {
-+ DRM_ERROR("Couldn't find handle.\n");
-+ ret = -EINVAL;
-+ goto out_err;
-+ }
-+
-+ uo = drm_hash_entry(hash, struct drm_user_object, hash);
-+ if (uo->type != drm_buffer_type) {
-+ ret = -EINVAL;
-+ goto out_err;
-+ }
-+
-+ *bo = drm_user_object_entry(uo, struct drm_buffer_object, base);
-+ ret = 0;
-+out_err:
-+ mutex_unlock(&dev->struct_mutex);
-+ return ret;
-+}
-+
-+char drm_init_mode[32];
-+EXPORT_SYMBOL(drm_init_mode);
-+
- int drm_mode_group_init(struct drm_device *dev, struct drm_mode_group *group)
- {
- uint32_t total_objects = 0;
-@@ -1588,6 +1635,8 @@
- struct drm_mode_fb_cmd *r = data;
- struct drm_mode_config *config = &dev->mode_config;
- struct drm_framebuffer *fb;
-+ struct drm_buffer_object *bo;
-+ struct drm_crtc *crtc;
- int ret = 0;
-
- if ((config->min_width > r->width) || (r->width > config->max_width)) {
-@@ -1600,20 +1649,46 @@
- }
-
- mutex_lock(&dev->mode_config.mutex);
-+ /* TODO check limits are okay */
-+ ret = drm_get_buffer_object(dev, &bo, r->handle);
-+ if (ret || !bo) {
-+ ret = -EINVAL;
-+ goto out;
-+ }
-
- /* TODO check buffer is sufficently large */
- /* TODO setup destructor callback */
-
-- fb = dev->mode_config.funcs->fb_create(dev, file_priv, r);
-+ fb = kzalloc(sizeof(struct drm_framebuffer), GFP_KERNEL);
-+ if (!fb) {
-+ ret = -ENOMEM;
-+ goto out;
-+ }
-+
-+ drm_framebuffer_init(dev, fb, NULL);
- if (!fb) {
- DRM_ERROR("could not create framebuffer\n");
- ret = -EINVAL;
- goto out;
- }
-
-+ fb->width = r->width;
-+ fb->height = r->height;
-+ fb->pitch = r->pitch;
-+ fb->bits_per_pixel = r->bpp;
-+ fb->depth = r->depth;
-+ fb->offset = bo->offset;
-+ fb->bo = bo;
-+
- r->fb_id = fb->base.id;
- list_add(&fb->filp_head, &file_priv->fbs);
-
-+ /* FIXME: bind the fb to the right crtc */
-+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-+ crtc->fb = fb;
-+ dev->driver->fb_probe(dev, crtc);
-+ }
-+
- out:
- mutex_unlock(&dev->mode_config.mutex);
- return ret;
-@@ -1669,8 +1744,10 @@
- /* TODO release all crtc connected to the framebuffer */
- /* TODO unhock the destructor from the buffer object */
-
-- list_del(&fb->filp_head);
-- fb->funcs->destroy(fb);
-+ if (fb->bo->type != drm_bo_type_kernel)
-+ drm_framebuffer_cleanup(fb);
-+ else
-+ dev->driver->fb_remove(dev, drm_crtc_from_fb(dev, fb));
-
- out:
- mutex_unlock(&dev->mode_config.mutex);
-@@ -1716,7 +1793,7 @@
- r->depth = fb->depth;
- r->bpp = fb->bits_per_pixel;
- r->pitch = fb->pitch;
-- fb->funcs->create_handle(fb, file_priv, &r->handle);
-+ r->handle = fb->bo->base.hash.key;
-
- out:
- mutex_unlock(&dev->mode_config.mutex);
-@@ -1746,7 +1823,10 @@
- mutex_lock(&dev->mode_config.mutex);
- list_for_each_entry_safe(fb, tfb, &priv->fbs, filp_head) {
- list_del(&fb->filp_head);
-- fb->funcs->destroy(fb);
-+ if (fb->bo->type != drm_bo_type_kernel)
-+ drm_framebuffer_cleanup(fb);
-+ else
-+ dev->driver->fb_remove(dev, drm_crtc_from_fb(dev, fb));
- }
- mutex_unlock(&dev->mode_config.mutex);
- }
-Index: linux-2.6.28/include/drm/drm_crtc.h
-===================================================================
---- linux-2.6.28.orig/include/drm/drm_crtc.h 2009-02-20 12:22:53.000000000 +0000
-+++ linux-2.6.28/include/drm/drm_crtc.h 2009-02-20 12:23:06.000000000 +0000
-@@ -50,6 +50,8 @@
- uint32_t type;
- };
-
-+#include <drm/drm_objects.h>
-+
- /*
- * Note on terminology: here, for brevity and convenience, we refer to connector
- * control chips as 'CRTCs'. They can control any type of connector, VGA, LVDS,
-@@ -258,6 +260,9 @@
- int flags;
- void *fbdev;
- u32 pseudo_palette[17];
-+ unsigned long offset;
-+ struct drm_buffer_object *bo;
-+ struct drm_bo_kmap_obj kmap;
- struct list_head filp_head;
- };
-
-Index: linux-2.6.28/drivers/gpu/drm/i915/intel_lvds.c
-===================================================================
---- linux-2.6.28.orig/drivers/gpu/drm/i915/intel_lvds.c 2009-02-20 12:22:54.000000000 +0000
-+++ linux-2.6.28/drivers/gpu/drm/i915/intel_lvds.c 2009-02-20 12:23:06.000000000 +0000
-@@ -36,6 +36,259 @@
- #include "i915_drm.h"
- #include "i915_drv.h"
-
-+#include <acpi/acpi_drivers.h>
-+
-+#define BLC_I2C_TYPE 0x01
-+#define BLC_PWM_TYPE 0x02
-+#define BRIGHTNESS_MASK 0xff
-+#define BRIGHTNESS_MAX_LEVEL 100
-+#define BLC_POLARITY_NORMAL 0
-+#define BLC_POLARITY_INVERSE 1
-+#define BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xfffe)
-+#define BACKLIGHT_PWM_CTL_SHIFT (16)
-+#define BLC_MAX_PWM_REG_FREQ 0xfffe
-+#define BLC_MIN_PWM_REG_FREQ 0x2
-+#define BLC_PWM_LEGACY_MODE_ENABLE 0x0001
-+#define BLC_PWM_PRECISION_FACTOR 10//10000000
-+#define BLC_PWM_FREQ_CALC_CONSTANT 32
-+#define MHz 1000000
-+#define OFFSET_OPREGION_VBT 0x400
-+
-+typedef struct OpRegion_Header
-+{
-+ char sign[16];
-+ u32 size;
-+ u32 over;
-+ char sver[32];
-+ char vver[16];
-+ char gver[16];
-+ u32 mbox;
-+ char rhd1[164];
-+} OpRegionRec, *OpRegionPtr;
-+
-+struct vbt_header2
-+{
-+ char signature[20]; /**< Always starts with 'VBT$' */
-+ u16 version; /**< decimal */
-+ u16 header_size; /**< in bytes */
-+ u16 vbt_size; /**< in bytes */
-+ u8 vbt_checksum;
-+ u8 reserved0;
-+ u32 bdb_offset; /**< from beginning of VBT */
-+ u32 aim1_offset; /**< from beginning of VBT */
-+ u32 aim2_offset; /**< from beginning of VBT */
-+ u32 aim3_offset; /**< from beginning of VBT */
-+ u32 aim4_offset; /**< from beginning of VBT */
-+} __attribute__ ((packed));
-+
-+struct bdb_header2
-+{
-+ char signature[16]; /**< Always 'BIOS_DATA_BLOCK' */
-+ u16 version; /**< decimal */
-+ u16 header_size; /**< in bytes */
-+ u16 bdb_size; /**< in bytes */
-+} __attribute__ ((packed));
-+
-+#define LVDS_CAP_EDID (1 << 6)
-+#define LVDS_CAP_DITHER (1 << 5)
-+#define LVDS_CAP_PFIT_AUTO_RATIO (1 << 4)
-+#define LVDS_CAP_PFIT_GRAPHICS_MODE (1 << 3)
-+#define LVDS_CAP_PFIT_TEXT_MODE (1 << 2)
-+#define LVDS_CAP_PFIT_GRAPHICS (1 << 1)
-+#define LVDS_CAP_PFIT_TEXT (1 << 0)
-+struct lvds_bdb_1
-+{
-+ u8 id; /**< 40 */
-+ u16 size;
-+ u8 panel_type;
-+ u8 reserved0;
-+ u16 caps;
-+} __attribute__ ((packed));
-+
-+struct lvds_bdb_2_fp_params
-+{
-+ u16 x_res;
-+ u16 y_res;
-+ u32 lvds_reg;
-+ u32 lvds_reg_val;
-+ u32 pp_on_reg;
-+ u32 pp_on_reg_val;
-+ u32 pp_off_reg;
-+ u32 pp_off_reg_val;
-+ u32 pp_cycle_reg;
-+ u32 pp_cycle_reg_val;
-+ u32 pfit_reg;
-+ u32 pfit_reg_val;
-+ u16 terminator;
-+} __attribute__ ((packed));
-+
-+struct lvds_bdb_2_fp_edid_dtd
-+{
-+ u16 dclk; /**< In 10khz */
-+ u8 hactive;
-+ u8 hblank;
-+ u8 high_h; /**< 7:4 = hactive 11:8, 3:0 = hblank 11:8 */
-+ u8 vactive;
-+ u8 vblank;
-+ u8 high_v; /**< 7:4 = vactive 11:8, 3:0 = vblank 11:8 */
-+ u8 hsync_off;
-+ u8 hsync_pulse_width;
-+ u8 vsync_off;
-+ u8 high_hsync_off; /**< 7:6 = hsync off 9:8 */
-+ u8 h_image;
-+ u8 v_image;
-+ u8 max_hv;
-+ u8 h_border;
-+ u8 v_border;
-+ u8 flags;
-+#define FP_EDID_FLAG_VSYNC_POSITIVE (1 << 2)
-+#define FP_EDID_FLAG_HSYNC_POSITIVE (1 << 1)
-+} __attribute__ ((packed));
-+
-+struct lvds_bdb_2_entry
-+{
-+ u16 fp_params_offset; /**< From beginning of BDB */
-+ u8 fp_params_size;
-+ u16 fp_edid_dtd_offset;
-+ u8 fp_edid_dtd_size;
-+ u16 fp_edid_pid_offset;
-+ u8 fp_edid_pid_size;
-+} __attribute__ ((packed));
-+
-+struct lvds_bdb_2
-+{
-+ u8 id; /**< 41 */
-+ u16 size;
-+ u8 table_size; /* not sure on this one */
-+ struct lvds_bdb_2_entry panels[16];
-+} __attribute__ ((packed));
-+
-+
-+struct lvds_bdb_blc
-+{
-+ u8 id; /**< 43 */
-+ u16 size;
-+ u8 table_size;
-+} __attribute__ ((packed));
-+
-+struct lvds_blc
-+{
-+ u8 type:2;
-+ u8 pol:1;
-+ u8 gpio:3;
-+ u8 gmbus:2;
-+ u16 freq;
-+ u8 minbrightness;
-+ u8 i2caddr;
-+ u8 brightnesscmd;
-+ /* more... */
-+} __attribute__ ((packed));
-+
-+int drm_intel_ignore_acpi = 0;
-+MODULE_PARM_DESC(ignore_acpi, "Ignore ACPI");
-+module_param_named(ignore_acpi, drm_intel_ignore_acpi, int, 0600);
-+
-+uint8_t blc_type;
-+uint8_t blc_pol;
-+uint8_t blc_freq;
-+uint8_t blc_minbrightness;
-+uint8_t blc_i2caddr;
-+uint8_t blc_brightnesscmd;
-+int lvds_backlight; /* restore backlight to this value */
-+
-+struct intel_i2c_chan *lvds_i2c_bus;
-+u32 CoreClock;
-+u32 PWMControlRegFreq;
-+
-+unsigned char * dev_OpRegion = NULL;
-+unsigned int dev_OpRegionSize;
-+
-+#define PCI_PORT5_REG80_FFUSE 0xD0058000
-+#define PCI_PORT5_REG80_MAXRES_INT_EN 0x0040
-+#define MAX_HDISPLAY 800
-+#define MAX_VDISPLAY 480
-+bool sku_bMaxResEnableInt = false;
-+
-+/** Set BLC through I2C*/
-+static int
-+LVDSI2CSetBacklight(struct drm_device *dev, unsigned char ch)
-+{
-+ u8 out_buf[2];
-+ struct i2c_msg msgs[] = {
-+ {
-+ .addr = lvds_i2c_bus->slave_addr,
-+ .flags = 0,
-+ .len = 2,
-+ .buf = out_buf,
-+ }
-+ };
-+
-+ DRM_INFO("LVDSI2CSetBacklight: the slave_addr is 0x%x, the backlight value is %d\n", lvds_i2c_bus->slave_addr, ch);
-+
-+ out_buf[0] = blc_brightnesscmd;
-+ out_buf[1] = ch;
-+
-+ if (i2c_transfer(&lvds_i2c_bus->adapter, msgs, 1) == 1)
-+ {
-+ DRM_INFO("LVDSI2CSetBacklight: i2c_transfer done\n");
-+ return true;
-+ }
-+
-+ DRM_ERROR("msg: i2c_transfer error\n");
-+ return false;
-+}
-+
-+/**
-+ * Calculate PWM control register value.
-+ */
-+static int
-+LVDSCalculatePWMCtrlRegFreq(struct drm_device *dev)
-+{
-+ unsigned long value = 0;
-+
-+ DRM_INFO("Enter LVDSCalculatePWMCtrlRegFreq.\n");
-+ if (blc_freq == 0) {
-+ DRM_ERROR("LVDSCalculatePWMCtrlRegFreq: Frequency Requested is 0.\n");
-+ return FALSE;
-+ }
-+ value = (CoreClock * MHz);
-+ value = (value / BLC_PWM_FREQ_CALC_CONSTANT);
-+ value = (value * BLC_PWM_PRECISION_FACTOR);
-+ value = (value / blc_freq);
-+ value = (value / BLC_PWM_PRECISION_FACTOR);
-+
-+ if (value > (unsigned long)BLC_MAX_PWM_REG_FREQ ||
-+ value < (unsigned long)BLC_MIN_PWM_REG_FREQ) {
-+ return FALSE;
-+ } else {
-+ PWMControlRegFreq = ((u32)value & ~BLC_PWM_LEGACY_MODE_ENABLE);
-+ return TRUE;
-+ }
-+}
-+
-+/**
-+ * Returns the maximum level of the backlight duty cycle field.
-+ */
-+static u32
-+LVDSGetPWMMaxBacklight(struct drm_device *dev)
-+{
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
-+ u32 max_pwm_blc = 0;
-+
-+ max_pwm_blc = ((I915_READ(BLC_PWM_CTL) & BACKLIGHT_MODULATION_FREQ_MASK) >> \
-+ BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
-+
-+ if (!(max_pwm_blc & BLC_MAX_PWM_REG_FREQ)) {
-+ if (LVDSCalculatePWMCtrlRegFreq(dev)) {
-+ max_pwm_blc = PWMControlRegFreq;
-+ }
-+ }
-+
-+ DRM_INFO("LVDSGetPWMMaxBacklight: the max_pwm_blc is %d.\n", max_pwm_blc);
-+ return max_pwm_blc;
-+}
-+
-+
- /**
- * Sets the backlight level.
- *
-@@ -43,12 +296,48 @@
- */
- static void intel_lvds_set_backlight(struct drm_device *dev, int level)
- {
-- struct drm_i915_private *dev_priv = dev->dev_private;
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
-+ /*
- u32 blc_pwm_ctl;
-
- blc_pwm_ctl = I915_READ(BLC_PWM_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
- I915_WRITE(BLC_PWM_CTL, (blc_pwm_ctl |
- (level << BACKLIGHT_DUTY_CYCLE_SHIFT)));
-+ */
-+ u32 newbacklight = 0;
-+
-+ DRM_INFO("intel_lvds_set_backlight: the level is %d\n", level);
-+
-+ if(blc_type == BLC_I2C_TYPE){
-+ newbacklight = BRIGHTNESS_MASK & ((unsigned long)level * \
-+ BRIGHTNESS_MASK /BRIGHTNESS_MAX_LEVEL);
-+
-+ if (blc_pol == BLC_POLARITY_INVERSE) {
-+ newbacklight = BRIGHTNESS_MASK - newbacklight;
-+ }
-+
-+ LVDSI2CSetBacklight(dev, newbacklight);
-+
-+ } else if (blc_type == BLC_PWM_TYPE) {
-+ u32 max_pwm_blc = LVDSGetPWMMaxBacklight(dev);
-+
-+ u32 blc_pwm_duty_cycle;
-+
-+ /* Provent LVDS going to total black */
-+ if ( level < 20) {
-+ level = 20;
-+ }
-+ blc_pwm_duty_cycle = level * max_pwm_blc/BRIGHTNESS_MAX_LEVEL;
-+
-+ if (blc_pol == BLC_POLARITY_INVERSE) {
-+ blc_pwm_duty_cycle = max_pwm_blc - blc_pwm_duty_cycle;
-+ }
-+
-+ blc_pwm_duty_cycle &= BACKLIGHT_PWM_POLARITY_BIT_CLEAR;
-+
-+ I915_WRITE(BLC_PWM_CTL,
-+ (max_pwm_blc << BACKLIGHT_PWM_CTL_SHIFT)| (blc_pwm_duty_cycle));
-+ }
- }
-
- /**
-@@ -56,10 +345,13 @@
- */
- static u32 intel_lvds_get_max_backlight(struct drm_device *dev)
- {
-- struct drm_i915_private *dev_priv = dev->dev_private;
-+ return BRIGHTNESS_MAX_LEVEL;
-+ /*
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
-
- return ((I915_READ(BLC_PWM_CTL) & BACKLIGHT_MODULATION_FREQ_MASK) >>
- BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
-+ */
- }
-
- /**
-@@ -77,7 +369,7 @@
- pp_status = I915_READ(PP_STATUS);
- } while ((pp_status & PP_ON) == 0);
-
-- intel_lvds_set_backlight(dev, dev_priv->backlight_duty_cycle);
-+ intel_lvds_set_backlight(dev, lvds_backlight);
- } else {
- intel_lvds_set_backlight(dev, 0);
-
-@@ -93,6 +385,7 @@
- {
- struct drm_device *dev = encoder->dev;
-
-+ DRM_INFO("intel_lvds_dpms: the mode is %d\n", mode);
- if (mode == DRM_MODE_DPMS_ON)
- intel_lvds_set_power(dev, true);
- else
-@@ -152,6 +445,13 @@
- return MODE_PANEL;
- }
-
-+ if (IS_POULSBO(dev) && sku_bMaxResEnableInt) {
-+ if (mode->hdisplay > MAX_HDISPLAY)
-+ return MODE_PANEL;
-+ if (mode->vdisplay > MAX_VDISPLAY)
-+ return MODE_PANEL;
-+ }
-+
- return MODE_OK;
- }
-
-@@ -185,20 +485,20 @@
- * with the panel scaling set up to source from the H/VDisplay
- * of the original mode.
- */
-- if (dev_priv->panel_fixed_mode != NULL) {
-- adjusted_mode->hdisplay = dev_priv->panel_fixed_mode->hdisplay;
-+ if (dev_priv_common->panel_fixed_mode != NULL) {
-+ adjusted_mode->hdisplay = dev_priv_common->panel_fixed_mode->hdisplay;
- adjusted_mode->hsync_start =
-- dev_priv->panel_fixed_mode->hsync_start;
-+ dev_priv_common->panel_fixed_mode->hsync_start;
- adjusted_mode->hsync_end =
-- dev_priv->panel_fixed_mode->hsync_end;
-- adjusted_mode->htotal = dev_priv->panel_fixed_mode->htotal;
-- adjusted_mode->vdisplay = dev_priv->panel_fixed_mode->vdisplay;
-+ dev_priv_common->panel_fixed_mode->hsync_end;
-+ adjusted_mode->htotal = dev_priv_common->panel_fixed_mode->htotal;
-+ adjusted_mode->vdisplay = dev_priv_common->panel_fixed_mode->vdisplay;
- adjusted_mode->vsync_start =
-- dev_priv->panel_fixed_mode->vsync_start;
-+ dev_priv_common->panel_fixed_mode->vsync_start;
- adjusted_mode->vsync_end =
-- dev_priv->panel_fixed_mode->vsync_end;
-- adjusted_mode->vtotal = dev_priv->panel_fixed_mode->vtotal;
-- adjusted_mode->clock = dev_priv->panel_fixed_mode->clock;
-+ dev_priv_common->panel_fixed_mode->vsync_end;
-+ adjusted_mode->vtotal = dev_priv_common->panel_fixed_mode->vtotal;
-+ adjusted_mode->clock = dev_priv_common->panel_fixed_mode->clock;
- drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
- }
-
-@@ -214,10 +514,10 @@
- static void intel_lvds_prepare(struct drm_encoder *encoder)
- {
- struct drm_device *dev = encoder->dev;
-- struct drm_i915_private *dev_priv = dev->dev_private;
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
-
-- dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
-- dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL &
-+ dev_priv_common->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
-+ dev_priv_common->backlight_duty_cycle = (dev_priv_common->saveBLC_PWM_CTL &
- BACKLIGHT_DUTY_CYCLE_MASK);
-
- intel_lvds_set_power(dev, false);
-@@ -226,10 +526,11 @@
- static void intel_lvds_commit( struct drm_encoder *encoder)
- {
- struct drm_device *dev = encoder->dev;
-- struct drm_i915_private *dev_priv = dev->dev_private;
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
-
-- if (dev_priv->backlight_duty_cycle == 0)
-- dev_priv->backlight_duty_cycle =
-+ if (dev_priv_common->backlight_duty_cycle == 0)
-+ //dev_priv_common->backlight_duty_cycle =
-+ lvds_backlight =
- intel_lvds_get_max_backlight(dev);
-
- intel_lvds_set_power(dev, true);
-@@ -291,10 +592,12 @@
- {
- struct drm_device *dev = connector->dev;
- struct intel_output *intel_output = to_intel_output(connector);
-- struct drm_i915_private *dev_priv = dev->dev_private;
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
- int ret = 0;
-
-+ mutex_lock(&dev->mode_config.mutex);
- ret = intel_ddc_get_modes(intel_output);
-+ mutex_unlock(&dev->mode_config.mutex);
-
- if (ret)
- return ret;
-@@ -308,11 +611,11 @@
- connector->display_info.min_hfreq = 0;
- connector->display_info.max_hfreq = 200;
-
-- if (dev_priv->panel_fixed_mode != NULL) {
-+ if (dev_priv_common->panel_fixed_mode != NULL) {
- struct drm_display_mode *mode;
-
- mutex_unlock(&dev->mode_config.mutex);
-- mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode);
-+ mode = drm_mode_duplicate(dev, dev_priv_common->panel_fixed_mode);
- drm_mode_probed_add(connector, mode);
- mutex_unlock(&dev->mode_config.mutex);
-
-@@ -333,8 +636,11 @@
- {
- struct intel_output *intel_output = to_intel_output(connector);
-
-+ if(dev_OpRegion != NULL)
-+ iounmap(dev_OpRegion);
- if (intel_output->ddc_bus)
- intel_i2c_destroy(intel_output->ddc_bus);
-+ intel_i2c_destroy(lvds_i2c_bus);
- drm_sysfs_connector_remove(connector);
- drm_connector_cleanup(connector);
- kfree(connector);
-@@ -373,7 +679,45 @@
- };
-
-
--
-+int intel_get_acpi_dod(char *method)
-+{
-+ int status;
-+ int found = 0;
-+ int i;
-+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
-+ union acpi_object *dod = NULL;
-+ union acpi_object *obj;
-+
-+ status = acpi_evaluate_object(NULL, method, NULL, &buffer);
-+ if (ACPI_FAILURE(status))
-+ return -ENODEV;
-+
-+ dod = buffer.pointer;
-+ if (!dod || (dod->type != ACPI_TYPE_PACKAGE)) {
-+ status = -EFAULT;
-+ goto out;
-+ }
-+
-+ DRM_DEBUG("Found %d video heads in _DOD\n", dod->package.count);
-+
-+ for (i = 0; i < dod->package.count; i++) {
-+ obj = &dod->package.elements[i];
-+
-+ if (obj->type != ACPI_TYPE_INTEGER) {
-+ DRM_DEBUG("Invalid _DOD data\n");
-+ } else {
-+ DRM_DEBUG("dod element[%d] = 0x%x\n", i,
-+ (int)obj->integer.value);
-+
-+ /* look for an LVDS type */
-+ if (obj->integer.value & 0x00000400)
-+ found = 1;
-+ }
-+ }
-+ out:
-+ kfree(buffer.pointer);
-+ return found;
-+}
- /**
- * intel_lvds_init - setup LVDS connectors on this device
- * @dev: drm device
-@@ -383,7 +727,7 @@
- */
- void intel_lvds_init(struct drm_device *dev)
- {
-- struct drm_i915_private *dev_priv = dev->dev_private;
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
- struct intel_output *intel_output;
- struct drm_connector *connector;
- struct drm_encoder *encoder;
-@@ -391,12 +735,38 @@
- struct drm_crtc *crtc;
- u32 lvds;
- int pipe;
-+ u32 OpRegion_Phys;
-+ unsigned int OpRegion_Size = 0x100;
-+ OpRegionPtr OpRegion;
-+ char *OpRegion_String = "IntelGraphicsMem";
-+ struct pci_dev * pci_root = pci_get_bus_and_slot(0, 0);
-+ u32 clock;
-+ u32 sku_value = 0;
-+ unsigned int CoreClocks[] = {
-+ 100,
-+ 133,
-+ 150,
-+ 178,
-+ 200,
-+ 266,
-+ 266,
-+ 266
-+ };
-+ struct vbt_header *vbt;
-+ struct bdb_header *bdb;
-+ int vbt_off, bdb_off, bdb_block_off, block_size;
-+ int panel_type = -1;
-+ unsigned char *bios;
-+ unsigned char *vbt_buf;
-
- intel_output = kzalloc(sizeof(struct intel_output), GFP_KERNEL);
- if (!intel_output) {
- return;
- }
-
-+ //if (!drm_intel_ignore_acpi && !intel_get_acpi_dod(ACPI_DOD))
-+ // return;
-+
- connector = &intel_output->base;
- encoder = &intel_output->enc;
- drm_connector_init(dev, &intel_output->base, &intel_lvds_connector_funcs,
-@@ -414,16 +784,139 @@
- connector->interlace_allowed = false;
- connector->doublescan_allowed = false;
-
-+ //initialize the I2C bus and BLC data
-+ lvds_i2c_bus = intel_i2c_create(dev, GPIOB, "LVDSBLC_B");
-+ if (!lvds_i2c_bus) {
-+ dev_printk(KERN_ERR, &dev->pdev->dev, "i2c bus registration "
-+ "failed.\n");
-+ return;
-+ }
-+ lvds_i2c_bus->slave_addr = 0x2c;//0x58;
-+ lvds_backlight = BRIGHTNESS_MAX_LEVEL;
-+ blc_type = 0;
-+ blc_pol = 0;
-
-- /*
-- * LVDS discovery:
-- * 1) check for EDID on DDC
-- * 2) check for VBT data
-- * 3) check to see if LVDS is already on
-- * if none of the above, no panel
-- * 4) make sure lid is open
-- * if closed, act like it's not there for now
-- */
-+ //get the BLC init data from VBT
-+
-+
-+
-+
-+ pci_read_config_dword(dev->pdev, 0xFC, &OpRegion_Phys);
-+
-+ dev_OpRegion = ioremap(OpRegion_Phys, OpRegion_Size);
-+ dev_OpRegionSize = OpRegion_Size;
-+
-+ OpRegion = (OpRegionPtr) dev_OpRegion;
-+
-+ if (!memcmp(OpRegion->sign, OpRegion_String, 16)) {
-+ unsigned int OpRegion_NewSize;
-+
-+ OpRegion_NewSize = OpRegion->size * 1024;
-+
-+ dev_OpRegionSize = OpRegion_NewSize;
-+
-+ iounmap(dev_OpRegion);
-+ dev_OpRegion = ioremap(OpRegion_Phys, OpRegion_NewSize);
-+ } else {
-+ iounmap(dev_OpRegion);
-+ dev_OpRegion = NULL;
-+ }
-+
-+ if((dev_OpRegion != NULL)&&(dev_OpRegionSize >= OFFSET_OPREGION_VBT)) {
-+ DRM_INFO("intel_lvds_init: OpRegion has the VBT address\n");
-+ vbt_buf = dev_OpRegion + OFFSET_OPREGION_VBT;
-+ vbt = (struct vbt_header *)(dev_OpRegion + OFFSET_OPREGION_VBT);
-+ } else {
-+ DRM_INFO("intel_lvds_init: No OpRegion, use the bios at fixed address 0xc0000\n");
-+ bios = phys_to_virt(0xC0000);
-+ if(*((u16 *)bios) != 0xAA55){
-+ bios = NULL;
-+ DRM_ERROR("the bios is incorrect\n");
-+ goto blc_out;
-+ }
-+ vbt_off = bios[0x1a] | (bios[0x1a + 1] << 8);
-+ DRM_INFO("intel_lvds_init: the vbt off is %x\n", vbt_off);
-+ vbt_buf = bios + vbt_off;
-+ vbt = (struct vbt_header *)(bios + vbt_off);
-+ }
-+
-+ bdb_off = vbt->bdb_offset;
-+ bdb = (struct bdb_header *)(vbt_buf + bdb_off);
-+
-+ DRM_INFO("intel_lvds_init: The bdb->signature is %s, the bdb_off is %d\n",bdb->signature, bdb_off);
-+
-+ if (memcmp(bdb->signature, "BIOS_DATA_BLOCK ", 16) != 0) {
-+ DRM_ERROR("the vbt is error\n");
-+ goto blc_out;
-+ }
-+
-+ for (bdb_block_off = bdb->header_size; bdb_block_off < bdb->bdb_size;
-+ bdb_block_off += block_size) {
-+ int start = bdb_off + bdb_block_off;
-+ int id, num_entries;
-+ struct lvds_bdb_1 *lvds1;
-+ struct lvds_blc *lvdsblc;
-+ struct lvds_bdb_blc *bdbblc;
-+
-+ id = vbt_buf[start];
-+ block_size = (vbt_buf[start + 1] | (vbt_buf[start + 2] << 8)) + 3;
-+ switch (id) {
-+ case 40:
-+ lvds1 = (struct lvds_bdb_1 *)(vbt_buf+ start);
-+ panel_type = lvds1->panel_type;
-+ //if (lvds1->caps & LVDS_CAP_DITHER)
-+ // *panelWantsDither = TRUE;
-+ break;
-+
-+ case 43:
-+ bdbblc = (struct lvds_bdb_blc *)(vbt_buf + start);
-+ num_entries = bdbblc->table_size? (bdbblc->size - \
-+ sizeof(bdbblc->table_size))/bdbblc->table_size : 0;
-+ if (num_entries << 16 && bdbblc->table_size == sizeof(struct lvds_blc)) {
-+ lvdsblc = (struct lvds_blc *)(vbt_buf + start + sizeof(struct lvds_bdb_blc));
-+ lvdsblc += panel_type;
-+ blc_type = lvdsblc->type;
-+ blc_pol = lvdsblc->pol;
-+ blc_freq = lvdsblc->freq;
-+ blc_minbrightness = lvdsblc->minbrightness;
-+ blc_i2caddr = lvdsblc->i2caddr;
-+ blc_brightnesscmd = lvdsblc->brightnesscmd;
-+ DRM_INFO("intel_lvds_init: BLC Data in BIOS VBT tables: datasize=%d paneltype=%d \
-+ type=0x%02x pol=0x%02x freq=0x%04x minlevel=0x%02x \
-+ i2caddr=0x%02x cmd=0x%02x \n",
-+ 0,
-+ panel_type,
-+ lvdsblc->type,
-+ lvdsblc->pol,
-+ lvdsblc->freq,
-+ lvdsblc->minbrightness,
-+ lvdsblc->i2caddr,
-+ lvdsblc->brightnesscmd);
-+ }
-+ break;
-+ }
-+ }
-+
-+ //get the Core Clock for calculating MAX PWM value
-+ //check whether the MaxResEnableInt is
-+
-+ if(pci_root)
-+ {
-+ pci_write_config_dword(pci_root, 0xD0, 0xD0050300);
-+ pci_read_config_dword(pci_root, 0xD4, &clock);
-+ CoreClock = CoreClocks[clock & 0x07];
-+ DRM_INFO("intel_lvds_init: the CoreClock is %d\n", CoreClock);
-+
-+ pci_write_config_dword(pci_root, 0xD0, PCI_PORT5_REG80_FFUSE);
-+ pci_read_config_dword(pci_root, 0xD4, &sku_value);
-+ sku_bMaxResEnableInt = (sku_value & PCI_PORT5_REG80_MAXRES_INT_EN)? true : false;
-+ DRM_INFO("intel_lvds_init: sku_value is 0x%08x\n", sku_value);
-+ DRM_INFO("intel_lvds_init: sku_bMaxResEnableInt is %d\n", sku_bMaxResEnableInt);
-+ }
-+
-+
-+
-+blc_out:
-
- /* Set up the DDC bus. */
- intel_output->ddc_bus = intel_i2c_create(dev, GPIOC, "LVDSDDC_C");
-@@ -437,12 +930,14 @@
- * Attempt to get the fixed panel mode from DDC. Assume that the
- * preferred mode is the right one.
- */
-+ mutex_lock(&dev->mode_config.mutex);
- intel_ddc_get_modes(intel_output);
-+ mutex_unlock(&dev->mode_config.mutex);
-
- list_for_each_entry(scan, &connector->probed_modes, head) {
- mutex_lock(&dev->mode_config.mutex);
- if (scan->type & DRM_MODE_TYPE_PREFERRED) {
-- dev_priv->panel_fixed_mode =
-+ dev_priv_common->panel_fixed_mode =
- drm_mode_duplicate(dev, scan);
- mutex_unlock(&dev->mode_config.mutex);
- goto out; /* FIXME: check for quirks */
-@@ -450,21 +945,6 @@
- mutex_unlock(&dev->mode_config.mutex);
- }
-
-- /* Failed to get EDID, what about VBT? */
-- if (dev_priv->vbt_mode) {
-- mutex_lock(&dev->mode_config.mutex);
-- dev_priv->panel_fixed_mode =
-- drm_mode_duplicate(dev, dev_priv->vbt_mode);
-- mutex_unlock(&dev->mode_config.mutex);
-- if (dev_priv->panel_fixed_mode) {
-- dev_priv->panel_fixed_mode->type |=
-- DRM_MODE_TYPE_PREFERRED;
-- drm_mode_probed_add(connector,
-- dev_priv->panel_fixed_mode);
-- goto out;
-- }
-- }
--
- /*
- * If we didn't get EDID, try checking if the panel is already turned
- * on. If so, assume that whatever is currently programmed is the
-Index: linux-2.6.28/drivers/gpu/drm/i915/intel_sdvo.c
-===================================================================
---- linux-2.6.28.orig/drivers/gpu/drm/i915/intel_sdvo.c 2009-02-20 12:22:54.000000000 +0000
-+++ linux-2.6.28/drivers/gpu/drm/i915/intel_sdvo.c 2009-02-20 12:23:06.000000000 +0000
-@@ -37,6 +37,9 @@
-
- #undef SDVO_DEBUG
-
-+#define PCI_PORT5_REG80_FFUSE 0xD0058000
-+#define PCI_PORT5_REG80_SDVO_DISABLE 0x0020
-+
- struct intel_sdvo_priv {
- struct intel_i2c_chan *i2c_bus;
- int slaveaddr;
-@@ -989,6 +992,21 @@
- int i;
- int encoder_type, output_id;
-
-+ if (IS_POULSBO(dev)) {
-+ struct pci_dev * pci_root = pci_get_bus_and_slot(0, 0);
-+ u32 sku_value = 0;
-+ bool sku_bSDVOEnable = true;
-+ if(pci_root) {
-+ pci_write_config_dword(pci_root, 0xD0, PCI_PORT5_REG80_FFUSE);
-+ pci_read_config_dword(pci_root, 0xD4, &sku_value);
-+ sku_bSDVOEnable = (sku_value & PCI_PORT5_REG80_SDVO_DISABLE)?false : true;
-+ DRM_INFO("intel_sdvo_init: sku_value is 0x%08x\n", sku_value);
-+ DRM_INFO("intel_sdvo_init: sku_bSDVOEnable is %d\n", sku_bSDVOEnable);
-+ if (sku_bSDVOEnable == false)
-+ return false;
-+ }
-+ }
-+
- intel_output = kcalloc(sizeof(struct intel_output)+sizeof(struct intel_sdvo_priv), 1, GFP_KERNEL);
- if (!intel_output) {
- return false;
-Index: linux-2.6.28/drivers/gpu/drm/psb/psb_priv.h
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.28/drivers/gpu/drm/psb/psb_priv.h 2009-02-20 12:23:06.000000000 +0000
-@@ -0,0 +1,181 @@
-+#include "psb_drm.h"
-+#include "psb_reg.h"
-+#include "psb_schedule.h"
-+#include "../i915/i915_common.h"
-+
-+#define DRM_DRIVER_PRIVATE_T struct drm_i915_common_private
-+
-+struct drm_psb_uopt {
-+ int clock_gating;
-+};
-+
-+struct drm_psb_private {
-+ /* common is assumed to be the first item in this structure */
-+ struct drm_i915_common_private common;
-+
-+ unsigned long chipset;
-+ uint8_t psb_rev_id;
-+
-+ struct psb_xhw_buf resume_buf;
-+ struct drm_psb_dev_info_arg dev_info;
-+ struct drm_psb_uopt uopt;
-+
-+ struct psb_gtt *pg;
-+
-+ struct page *scratch_page;
-+ struct page *comm_page;
-+
-+ volatile uint32_t *comm;
-+ uint32_t comm_mmu_offset;
-+ uint32_t mmu_2d_offset;
-+ uint32_t sequence[PSB_NUM_ENGINES];
-+ uint32_t last_sequence[PSB_NUM_ENGINES];
-+ int idle[PSB_NUM_ENGINES];
-+ uint32_t last_submitted_seq[PSB_NUM_ENGINES];
-+ int engine_lockup_2d;
-+
-+ /** Protects user_irq_refcount and irq_mask_reg */
-+ spinlock_t user_irq_lock;
-+ u32 pipestat[2];
-+
-+ struct psb_mmu_driver *mmu;
-+ struct psb_mmu_pd *pf_pd;
-+
-+ uint8_t *sgx_reg;
-+ //uint8_t *vdc_reg;
-+ uint8_t *msvdx_reg;
-+
-+ /*
-+ * MSVDX
-+ */
-+ int msvdx_needs_reset;
-+ int has_msvdx;
-+ uint32_t gatt_free_offset;
-+ atomic_t msvdx_mmu_invaldc;
-+
-+ /*
-+ * Fencing / irq.
-+ */
-+
-+ uint32_t sgx_irq_mask;
-+ uint32_t sgx2_irq_mask;
-+ uint32_t vdc_irq_mask;
-+
-+ spinlock_t irqmask_lock;
-+ spinlock_t sequence_lock;
-+ int fence0_irq_on;
-+ int irq_enabled;
-+ unsigned int irqen_count_2d;
-+ wait_queue_head_t event_2d_queue;
-+
-+ wait_queue_head_t queue_2d;
-+ atomic_t lock_2d;
-+ atomic_t ta_wait_2d;
-+ atomic_t ta_wait_2d_irq;
-+ atomic_t waiters_2d;
-+
-+ uint32_t msvdx_current_sequence;
-+ uint32_t msvdx_last_sequence;
-+#define MSVDX_MAX_IDELTIME HZ*30
-+ uint32_t msvdx_finished_sequence;
-+ uint32_t msvdx_start_idle;
-+ unsigned long msvdx_idle_start_jiffies;
-+
-+ int fence2_irq_on;
-+
-+ /*
-+ * MSVDX Rendec Memory
-+ */
-+ struct drm_buffer_object *ccb0;
-+ uint32_t base_addr0;
-+ struct drm_buffer_object *ccb1;
-+ uint32_t base_addr1;
-+
-+ /*
-+ * Memory managers
-+ */
-+
-+ int have_vram;
-+ int have_tt;
-+ int have_mem_mmu;
-+ int have_mem_aper;
-+ int have_mem_kernel;
-+ int have_mem_pds;
-+ int have_mem_rastgeom;
-+ struct mutex temp_mem;
-+
-+ /*
-+ * Relocation buffer mapping.
-+ */
-+
-+ spinlock_t reloc_lock;
-+ unsigned int rel_mapped_pages;
-+ wait_queue_head_t rel_mapped_queue;
-+
-+ /*
-+ * Register state
-+ */
-+ uint32_t saveCLOCKGATING;
-+
-+ /*
-+ * USE code base register management.
-+ */
-+
-+ struct drm_reg_manager use_manager;
-+
-+ /*
-+ * Xhw
-+ */
-+
-+ uint32_t *xhw;
-+ struct drm_buffer_object *xhw_bo;
-+ struct drm_bo_kmap_obj xhw_kmap;
-+ struct list_head xhw_in;
-+ spinlock_t xhw_lock;
-+ atomic_t xhw_client;
-+ struct drm_file *xhw_file;
-+ wait_queue_head_t xhw_queue;
-+ wait_queue_head_t xhw_caller_queue;
-+ struct mutex xhw_mutex;
-+ struct psb_xhw_buf *xhw_cur_buf;
-+ int xhw_submit_ok;
-+ int xhw_on;
-+
-+ /*
-+ * Scheduling.
-+ */
-+
-+ struct mutex reset_mutex;
-+ struct mutex cmdbuf_mutex;
-+ struct psb_scheduler scheduler;
-+ struct psb_buflist_item *buffers;
-+ uint32_t ta_mem_pages;
-+ struct psb_ta_mem *ta_mem;
-+ int force_ta_mem_load;
-+
-+ /*
-+ * Watchdog
-+ */
-+
-+ spinlock_t watchdog_lock;
-+ struct timer_list watchdog_timer;
-+ struct work_struct watchdog_wq;
-+ struct work_struct msvdx_watchdog_wq;
-+ int timer_available;
-+
-+ /*
-+ * msvdx command queue
-+ */
-+ spinlock_t msvdx_lock;
-+ struct mutex msvdx_mutex;
-+ struct list_head msvdx_queue;
-+ int msvdx_busy;
-+
-+};
-+
-+
-+extern void intel_modeset_init(struct drm_device *dev);
-+extern void intel_modeset_cleanup(struct drm_device *dev);
-+
-+extern void intel_crtc_mode_restore(struct drm_crtc *crtc);
-+extern void intel_crtc_mode_save(struct drm_crtc *crtc);
-Index: linux-2.6.28/drivers/gpu/drm/i915/intel_display.c
-===================================================================
---- linux-2.6.28.orig/drivers/gpu/drm/i915/intel_display.c 2009-02-20 12:22:54.000000000 +0000
-+++ linux-2.6.28/drivers/gpu/drm/i915/intel_display.c 2009-02-20 12:23:06.000000000 +0000
-@@ -342,60 +342,25 @@
- /* Wait for 20ms, i.e. one cycle at 50hz. */
- udelay(20000);
- }
-+EXPORT_SYMBOL(intel_wait_for_vblank);
-
- static void
- intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
- struct drm_framebuffer *old_fb)
- {
- struct drm_device *dev = crtc->dev;
-- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_master_private *master_priv;
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-- struct intel_framebuffer *intel_fb;
-- struct drm_i915_gem_object *obj_priv;
-- struct drm_gem_object *obj;
- int pipe = intel_crtc->pipe;
- unsigned long Start, Offset;
- int dspbase = (pipe == 0 ? DSPAADDR : DSPBADDR);
- int dspsurf = (pipe == 0 ? DSPASURF : DSPBSURF);
- int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE;
- int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
-- u32 dspcntr, alignment;
--
-- /* no fb bound */
-- if (!crtc->fb) {
-- DRM_DEBUG("No FB bound\n");
-- return;
-- }
--
-- intel_fb = to_intel_framebuffer(crtc->fb);
-- obj = intel_fb->obj;
-- obj_priv = obj->driver_private;
--
-- switch (obj_priv->tiling_mode) {
-- case I915_TILING_NONE:
-- alignment = 64 * 1024;
-- break;
-- case I915_TILING_X:
-- if (IS_I9XX(dev))
-- alignment = 1024 * 1024;
-- else
-- alignment = 512 * 1024;
-- break;
-- case I915_TILING_Y:
-- /* FIXME: Is this true? */
-- DRM_ERROR("Y tiled not allowed for scan out buffers\n");
-- return;
-- default:
-- BUG();
-- }
-+ u32 dspcntr;
-
-- if (i915_gem_object_pin(intel_fb->obj, alignment))
-- return;
--
-- i915_gem_object_set_to_gtt_domain(intel_fb->obj, 1);
--
-- Start = obj_priv->gtt_offset;
-+ Start = crtc->fb->offset;
- Offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8);
-
- I915_WRITE(dspstride, crtc->fb->pitch);
-@@ -434,13 +399,6 @@
- I915_READ(dspbase);
- }
-
-- intel_wait_for_vblank(dev);
--
-- if (old_fb) {
-- intel_fb = to_intel_framebuffer(old_fb);
-- i915_gem_object_unpin(intel_fb->obj);
-- }
--
- if (!dev->primary->master)
- return;
-
-@@ -642,7 +600,7 @@
- return 400000;
- else if (IS_I915G(dev))
- return 333000;
-- else if (IS_I945GM(dev) || IS_845G(dev))
-+ else if (IS_I945GM(dev) || IS_POULSBO(dev) || IS_845G(dev))
- return 200000;
- else if (IS_I915GM(dev)) {
- u16 gcfgc = 0;
-@@ -786,13 +744,15 @@
-
- dpll = DPLL_VGA_MODE_DIS;
- if (IS_I9XX(dev)) {
-- if (is_lvds)
-+ if (is_lvds) {
- dpll |= DPLLB_MODE_LVDS;
-- else
-+ if (IS_POULSBO(dev))
-+ dpll |= DPLL_DVO_HIGH_SPEED;
-+ } else
- dpll |= DPLLB_MODE_DAC_SERIAL;
- if (is_sdvo) {
- dpll |= DPLL_DVO_HIGH_SPEED;
-- if (IS_I945G(dev) || IS_I945GM(dev)) {
-+ if (IS_I945G(dev) || IS_I945GM(dev) || IS_POULSBO(dev)) {
- int sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
- dpll |= (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
- }
-@@ -959,7 +919,7 @@
- void intel_crtc_load_lut(struct drm_crtc *crtc)
- {
- struct drm_device *dev = crtc->dev;
-- struct drm_i915_private *dev_priv = dev->dev_private;
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int palreg = (intel_crtc->pipe == 0) ? PALETTE_A : PALETTE_B;
- int i;
-@@ -1021,7 +981,7 @@
- ret = -ENOMEM;
- goto fail;
- }
--
-+#if 0
- /* we only need to pin inside GTT if cursor is non-phy */
- if (!dev_priv->cursor_needs_physical) {
- ret = i915_gem_object_pin(bo, PAGE_SIZE);
-@@ -1038,7 +998,7 @@
- }
- addr = obj_priv->phys_obj->handle->busaddr;
- }
--
-+#endif
- temp = 0;
- /* set the pipe for the cursor */
- temp |= (pipe << 28);
-@@ -1049,6 +1009,7 @@
- I915_WRITE(base, addr);
-
- if (intel_crtc->cursor_bo) {
-+#if 0
- if (dev_priv->cursor_needs_physical) {
- if (intel_crtc->cursor_bo != bo)
- i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
-@@ -1057,6 +1018,7 @@
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(intel_crtc->cursor_bo);
- mutex_unlock(&dev->struct_mutex);
-+#endif
- }
-
- intel_crtc->cursor_addr = addr;
-@@ -1456,7 +1418,8 @@
- {
- struct drm_connector *connector;
-
-- intel_crt_init(dev);
-+ if (!IS_POULSBO(dev))
-+ intel_crt_init(dev);
-
- /* Set up integrated LVDS */
- if (IS_MOBILE(dev) && !IS_I830(dev))
-@@ -1472,12 +1435,9 @@
- found = intel_sdvo_init(dev, SDVOC);
- if (!found && SUPPORTS_INTEGRATED_HDMI(dev))
- intel_hdmi_init(dev, SDVOC);
-- } else
-+ } else
- intel_dvo_init(dev);
-
-- if (IS_I9XX(dev) && IS_MOBILE(dev))
-- intel_tv_init(dev);
--
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- struct intel_output *intel_output = to_intel_output(connector);
- struct drm_encoder *encoder = &intel_output->enc;
-@@ -1525,8 +1485,8 @@
- struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- struct drm_device *dev = fb->dev;
-
-- if (fb->fbdev)
-- intelfb_remove(dev, fb);
-+ //if (fb->fbdev)
-+ // intelfb_remove(dev, fb);
-
- drm_framebuffer_cleanup(fb);
- mutex_lock(&dev->struct_mutex);
-@@ -1603,7 +1563,7 @@
-
- static const struct drm_mode_config_funcs intel_mode_funcs = {
- .fb_create = intel_user_framebuffer_create,
-- .fb_changed = intelfb_probe,
-+// .fb_changed = intelfb_probe,
- };
-
- void intel_modeset_init(struct drm_device *dev)
-Index: linux-2.6.28/drivers/gpu/drm/i915/i915_irq.c
-===================================================================
---- linux-2.6.28.orig/drivers/gpu/drm/i915/i915_irq.c 2009-02-20 12:22:54.000000000 +0000
-+++ linux-2.6.28/drivers/gpu/drm/i915/i915_irq.c 2009-02-20 12:23:06.000000000 +0000
-@@ -536,6 +536,7 @@
-
- int i915_driver_irq_postinstall(struct drm_device *dev)
- {
-+ struct drm_i915_common_private *dev_priv_common = dev->dev_private;
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-
- dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/0001-drm-Split-out-the-mm-declarations-in-a-separate-hea.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/0001-drm-Split-out-the-mm-declarations-in-a-separate-hea.patch
new file mode 100644
index 000000000..2655acfaa
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/0001-drm-Split-out-the-mm-declarations-in-a-separate-hea.patch
@@ -0,0 +1,486 @@
+From 84e7ccff650b8f124585ba7d5b9a1544f53457e7 Mon Sep 17 00:00:00 2001
+From: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+Date: Fri, 27 Feb 2009 16:53:11 +0100
+Subject: [PATCH 1/8] drm: Split out the mm declarations in a separate header. Add atomic operations.
+
+Signed-off-by: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+---
+ drivers/gpu/drm/drm_mm.c | 173 ++++++++++++++++++++++++++++++++++++++--------
+ include/drm/drmP.h | 37 +----------
+ include/drm/drm_mm.h | 90 ++++++++++++++++++++++++
+ 3 files changed, 235 insertions(+), 65 deletions(-)
+ create mode 100644 include/drm/drm_mm.h
+
+Index: linux-2.6.28/drivers/gpu/drm/drm_mm.c
+===================================================================
+--- linux-2.6.28.orig/drivers/gpu/drm/drm_mm.c 2009-03-09 19:19:52.000000000 +0000
++++ linux-2.6.28/drivers/gpu/drm/drm_mm.c 2009-03-12 13:15:05.000000000 +0000
+@@ -42,8 +43,11 @@
+ */
+
+ #include "drmP.h"
++#include "drm_mm.h"
+ #include <linux/slab.h>
+
++#define MM_UNUSED_TARGET 4
++
+ unsigned long drm_mm_tail_space(struct drm_mm *mm)
+ {
+ struct list_head *tail_node;
+@@ -74,16 +78,66 @@
+ return 0;
+ }
+
++static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
++{
++ struct drm_mm_node *child;
++
++ if (atomic) {
++ child =
++ (struct drm_mm_node *)kmalloc(sizeof(*child), GFP_ATOMIC);
++ } else {
++ child =
++ (struct drm_mm_node *)kmalloc(sizeof(*child), GFP_KERNEL);
++ }
++
++ if (unlikely(child == NULL)) {
++ spin_lock(&mm->unused_lock);
++ if (list_empty(&mm->unused_nodes))
++ child = NULL;
++ else {
++ child =
++ list_entry(mm->unused_nodes.next,
++ struct drm_mm_node, fl_entry);
++ list_del(&child->fl_entry);
++ --mm->num_unused;
++ }
++ spin_unlock(&mm->unused_lock);
++ }
++ return child;
++}
++
++int drm_mm_pre_get(struct drm_mm *mm)
++{
++ struct drm_mm_node *node;
++
++ spin_lock(&mm->unused_lock);
++ while (mm->num_unused < MM_UNUSED_TARGET) {
++ spin_unlock(&mm->unused_lock);
++ node = kmalloc(sizeof(*node), GFP_KERNEL);
++ spin_lock(&mm->unused_lock);
++
++ if (unlikely(node == NULL)) {
++ int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
++ spin_unlock(&mm->unused_lock);
++ return ret;
++ }
++ ++mm->num_unused;
++ list_add_tail(&node->fl_entry, &mm->unused_nodes);
++ }
++ spin_unlock(&mm->unused_lock);
++ return 0;
++}
++
++EXPORT_SYMBOL(drm_mm_pre_get);
+
+ static int drm_mm_create_tail_node(struct drm_mm *mm,
+- unsigned long start,
+- unsigned long size)
++ unsigned long start,
++ unsigned long size, int atomic)
+ {
+ struct drm_mm_node *child;
+
+- child = (struct drm_mm_node *)
+- drm_alloc(sizeof(*child), DRM_MEM_MM);
+- if (!child)
++ child = drm_mm_kmalloc(mm, atomic);
++ if (unlikely(child == NULL))
+ return -ENOMEM;
+
+ child->free = 1;
+@@ -97,8 +151,7 @@
+ return 0;
+ }
+
+-
+-int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size)
++int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size, int atomic)
+ {
+ struct list_head *tail_node;
+ struct drm_mm_node *entry;
+@@ -106,20 +159,21 @@
+ tail_node = mm->ml_entry.prev;
+ entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
+ if (!entry->free) {
+- return drm_mm_create_tail_node(mm, entry->start + entry->size, size);
++ return drm_mm_create_tail_node(mm, entry->start + entry->size,
++ size, atomic);
+ }
+ entry->size += size;
+ return 0;
+ }
+
+ static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent,
+- unsigned long size)
++ unsigned long size,
++ int atomic)
+ {
+ struct drm_mm_node *child;
+
+- child = (struct drm_mm_node *)
+- drm_alloc(sizeof(*child), DRM_MEM_MM);
+- if (!child)
++ child = drm_mm_kmalloc(parent->mm, atomic);
++ if (unlikely(child == NULL))
+ return NULL;
+
+ INIT_LIST_HEAD(&child->fl_entry);
+@@ -151,8 +205,9 @@
+ tmp = parent->start % alignment;
+
+ if (tmp) {
+- align_splitoff = drm_mm_split_at_start(parent, alignment - tmp);
+- if (!align_splitoff)
++ align_splitoff =
++ drm_mm_split_at_start(parent, alignment - tmp, 0);
++ if (unlikely(align_splitoff == NULL))
+ return NULL;
+ }
+
+@@ -161,7 +216,7 @@
+ parent->free = 0;
+ return parent;
+ } else {
+- child = drm_mm_split_at_start(parent, size);
++ child = drm_mm_split_at_start(parent, size, 0);
+ }
+
+ if (align_splitoff)
+@@ -169,14 +224,50 @@
+
+ return child;
+ }
++
+ EXPORT_SYMBOL(drm_mm_get_block);
+
++struct drm_mm_node *drm_mm_get_block_atomic(struct drm_mm_node *parent,
++ unsigned long size,
++ unsigned alignment)
++{
++
++ struct drm_mm_node *align_splitoff = NULL;
++ struct drm_mm_node *child;
++ unsigned tmp = 0;
++
++ if (alignment)
++ tmp = parent->start % alignment;
++
++ if (tmp) {
++ align_splitoff =
++ drm_mm_split_at_start(parent, alignment - tmp, 1);
++ if (unlikely(align_splitoff == NULL))
++ return NULL;
++ }
++
++ if (parent->size == size) {
++ list_del_init(&parent->fl_entry);
++ parent->free = 0;
++ return parent;
++ } else {
++ child = drm_mm_split_at_start(parent, size, 1);
++ }
++
++ if (align_splitoff)
++ drm_mm_put_block(align_splitoff);
++
++ return child;
++}
++
++EXPORT_SYMBOL(drm_mm_get_block_atomic);
++
+ /*
+ * Put a block. Merge with the previous and / or next block if they are free.
+ * Otherwise add to the free stack.
+ */
+
+-void drm_mm_put_block(struct drm_mm_node * cur)
++void drm_mm_put_block(struct drm_mm_node *cur)
+ {
+
+ struct drm_mm *mm = cur->mm;
+@@ -188,21 +279,27 @@
+ int merged = 0;
+
+ if (cur_head->prev != root_head) {
+- prev_node = list_entry(cur_head->prev, struct drm_mm_node, ml_entry);
++ prev_node =
++ list_entry(cur_head->prev, struct drm_mm_node, ml_entry);
+ if (prev_node->free) {
+ prev_node->size += cur->size;
+ merged = 1;
+ }
+ }
+ if (cur_head->next != root_head) {
+- next_node = list_entry(cur_head->next, struct drm_mm_node, ml_entry);
++ next_node =
++ list_entry(cur_head->next, struct drm_mm_node, ml_entry);
+ if (next_node->free) {
+ if (merged) {
+ prev_node->size += next_node->size;
+ list_del(&next_node->ml_entry);
+ list_del(&next_node->fl_entry);
+- drm_free(next_node, sizeof(*next_node),
+- DRM_MEM_MM);
++ if (mm->num_unused < MM_UNUSED_TARGET) {
++ list_add(&next_node->fl_entry,
++ &mm->unused_nodes);
++ ++mm->num_unused;
++ } else
++ kfree(next_node);
+ } else {
+ next_node->size += cur->size;
+ next_node->start = cur->start;
+@@ -215,14 +312,19 @@
+ list_add(&cur->fl_entry, &mm->fl_entry);
+ } else {
+ list_del(&cur->ml_entry);
+- drm_free(cur, sizeof(*cur), DRM_MEM_MM);
++ if (mm->num_unused < MM_UNUSED_TARGET) {
++ list_add(&cur->fl_entry, &mm->unused_nodes);
++ ++mm->num_unused;
++ } else
++ kfree(cur);
+ }
+ }
++
+ EXPORT_SYMBOL(drm_mm_put_block);
+
+-struct drm_mm_node *drm_mm_search_free(const struct drm_mm * mm,
+- unsigned long size,
+- unsigned alignment, int best_match)
++struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
++ unsigned long size,
++ unsigned alignment, int best_match)
+ {
+ struct list_head *list;
+ const struct list_head *free_stack = &mm->fl_entry;
+@@ -247,7 +349,6 @@
+ wasted += alignment - tmp;
+ }
+
+-
+ if (entry->size >= size + wasted) {
+ if (!best_match)
+ return entry;
+@@ -260,6 +361,7 @@
+
+ return best;
+ }
++EXPORT_SYMBOL(drm_mm_search_free);
+
+ int drm_mm_clean(struct drm_mm * mm)
+ {
+@@ -267,14 +369,17 @@
+
+ return (head->next->next == head);
+ }
+-EXPORT_SYMBOL(drm_mm_search_free);
++EXPORT_SYMBOL(drm_mm_clean);
+
+ int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
+ {
+ INIT_LIST_HEAD(&mm->ml_entry);
+ INIT_LIST_HEAD(&mm->fl_entry);
++ INIT_LIST_HEAD(&mm->unused_nodes);
++ mm->num_unused = 0;
++ spin_lock_init(&mm->unused_lock);
+
+- return drm_mm_create_tail_node(mm, start, size);
++ return drm_mm_create_tail_node(mm, start, size, 0);
+ }
+ EXPORT_SYMBOL(drm_mm_init);
+
+@@ -282,6 +387,7 @@
+ {
+ struct list_head *bnode = mm->fl_entry.next;
+ struct drm_mm_node *entry;
++ struct drm_mm_node *next;
+
+ entry = list_entry(bnode, struct drm_mm_node, fl_entry);
+
+@@ -293,7 +399,16 @@
+
+ list_del(&entry->fl_entry);
+ list_del(&entry->ml_entry);
++ kfree(entry);
++
++ spin_lock(&mm->unused_lock);
++ list_for_each_entry_safe(entry, next, &mm->unused_nodes, fl_entry) {
++ list_del(&entry->fl_entry);
++ kfree(entry);
++ --mm->num_unused;
++ }
++ spin_unlock(&mm->unused_lock);
+
+- drm_free(entry, sizeof(*entry), DRM_MEM_MM);
++ BUG_ON(mm->num_unused != 0);
+ }
+ EXPORT_SYMBOL(drm_mm_takedown);
+Index: linux-2.6.28/include/drm/drmP.h
+===================================================================
+--- linux-2.6.28.orig/include/drm/drmP.h 2009-03-12 13:13:54.000000000 +0000
++++ linux-2.6.28/include/drm/drmP.h 2009-03-12 13:37:59.000000000 +0000
+@@ -86,6 +86,7 @@
+
+ #include "drm_os_linux.h"
+ #include "drm_hashtab.h"
++#include "drm_mm.h"
+
+ /***********************************************************************/
+ /** \name DRM template customization defaults */
+@@ -502,26 +503,6 @@
+ };
+
+
+-/*
+- * Generic memory manager structs
+- */
+-
+-struct drm_mm_node {
+- struct list_head fl_entry;
+- struct list_head ml_entry;
+- int free;
+- unsigned long start;
+- unsigned long size;
+- struct drm_mm *mm;
+- void *private;
+-};
+-
+-struct drm_mm {
+- struct list_head fl_entry;
+- struct list_head ml_entry;
+-};
+-
+-
+ /**
+ * Mappings list
+ */
+@@ -1307,22 +1288,6 @@
+ extern int drm_sysfs_connector_add(struct drm_connector *connector);
+ extern void drm_sysfs_connector_remove(struct drm_connector *connector);
+
+-/*
+- * Basic memory manager support (drm_mm.c)
+- */
+-extern struct drm_mm_node *drm_mm_get_block(struct drm_mm_node * parent,
+- unsigned long size,
+- unsigned alignment);
+-extern void drm_mm_put_block(struct drm_mm_node * cur);
+-extern struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm, unsigned long size,
+- unsigned alignment, int best_match);
+-extern int drm_mm_init(struct drm_mm *mm, unsigned long start, unsigned long size);
+-extern void drm_mm_takedown(struct drm_mm *mm);
+-extern int drm_mm_clean(struct drm_mm *mm);
+-extern unsigned long drm_mm_tail_space(struct drm_mm *mm);
+-extern int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size);
+-extern int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size);
+-
+ /* Graphics Execution Manager library functions (drm_gem.c) */
+ int drm_gem_init(struct drm_device *dev);
+ void drm_gem_destroy(struct drm_device *dev);
+Index: linux-2.6.28/include/drm/drm_mm.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.28/include/drm/drm_mm.h 2009-03-12 13:15:05.000000000 +0000
+@@ -0,0 +1,90 @@
++/**************************************************************************
++ *
++ * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX. USA.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ *
++ **************************************************************************/
++/*
++ * Authors:
++ * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ */
++
++#ifndef _DRM_MM_H_
++#define _DRM_MM_H_
++
++/*
++ * Generic range manager structs
++ */
++#include <linux/list.h>
++
++struct drm_mm_node {
++ struct list_head fl_entry;
++ struct list_head ml_entry;
++ int free;
++ unsigned long start;
++ unsigned long size;
++ struct drm_mm *mm;
++ void *private;
++};
++
++struct drm_mm {
++ struct list_head fl_entry;
++ struct list_head ml_entry;
++ struct list_head unused_nodes;
++ int num_unused;
++ spinlock_t unused_lock;
++};
++
++/*
++ * Basic range manager support (drm_mm.c)
++ */
++
++extern struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *parent,
++ unsigned long size,
++ unsigned alignment);
++extern struct drm_mm_node *drm_mm_get_block_atomic(struct drm_mm_node *parent,
++ unsigned long size,
++ unsigned alignment);
++extern void drm_mm_put_block(struct drm_mm_node *cur);
++extern struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
++ unsigned long size,
++ unsigned alignment,
++ int best_match);
++extern int drm_mm_init(struct drm_mm *mm, unsigned long start,
++ unsigned long size);
++extern void drm_mm_takedown(struct drm_mm *mm);
++extern int drm_mm_clean(struct drm_mm *mm);
++extern unsigned long drm_mm_tail_space(struct drm_mm *mm);
++extern int drm_mm_remove_space_from_tail(struct drm_mm *mm,
++ unsigned long size);
++extern int drm_mm_add_space_to_tail(struct drm_mm *mm,
++ unsigned long size, int atomic);
++extern int drm_mm_pre_get(struct drm_mm *mm);
++
++static inline struct drm_mm *drm_get_mm(struct drm_mm_node *block)
++{
++ return block->mm;
++}
++
++#endif
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/0002-drm-Add-a-tracker-for-global-objects.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/0002-drm-Add-a-tracker-for-global-objects.patch
new file mode 100644
index 000000000..3f07b91f2
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/0002-drm-Add-a-tracker-for-global-objects.patch
@@ -0,0 +1,191 @@
+From cd04a0500d70ea012089ec38183f20c0c30f8ba5 Mon Sep 17 00:00:00 2001
+From: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+Date: Fri, 27 Feb 2009 12:31:58 +0100
+Subject: [PATCH 2/8] drm: Add a tracker for global objects.
+
+Signed-off-by: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+---
+ drivers/gpu/drm/Makefile | 3 +-
+ drivers/gpu/drm/drm_drv.c | 3 +
+ drivers/gpu/drm/drm_global.c | 107 ++++++++++++++++++++++++++++++++++++++++++
+ include/drm/drmP.h | 20 ++++++++
+ 4 files changed, 132 insertions(+), 1 deletions(-)
+ create mode 100644 drivers/gpu/drm/drm_global.c
+
+Index: linux-2.6.28/drivers/gpu/drm/Makefile
+===================================================================
+--- linux-2.6.28.orig/drivers/gpu/drm/Makefile 2009-03-12 13:13:54.000000000 +0000
++++ linux-2.6.28/drivers/gpu/drm/Makefile 2009-03-12 13:15:18.000000000 +0000
+@@ -10,7 +10,8 @@
+ drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
+ drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \
+ drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o \
+- drm_crtc.o drm_crtc_helper.o drm_modes.o drm_edid.o
++ drm_crtc.o drm_crtc_helper.o drm_modes.o drm_edid.o \
++ drm_global.o
+
+ drm-$(CONFIG_COMPAT) += drm_ioc32.o
+
+Index: linux-2.6.28/drivers/gpu/drm/drm_drv.c
+===================================================================
+--- linux-2.6.28.orig/drivers/gpu/drm/drm_drv.c 2009-03-12 13:13:54.000000000 +0000
++++ linux-2.6.28/drivers/gpu/drm/drm_drv.c 2009-03-12 13:37:56.000000000 +0000
+@@ -382,6 +382,8 @@
+
+ DRM_INFO("Initialized %s %d.%d.%d %s\n",
+ CORE_NAME, CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE);
++ drm_global_init();
++
+ return 0;
+ err_p3:
+ drm_sysfs_destroy();
+@@ -395,6 +397,7 @@
+
+ static void __exit drm_core_exit(void)
+ {
++ drm_global_release();
+ remove_proc_entry("dri", NULL);
+ drm_sysfs_destroy();
+
+Index: linux-2.6.28/drivers/gpu/drm/drm_global.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-2.6.28/drivers/gpu/drm/drm_global.c 2009-03-12 13:15:18.000000000 +0000
+@@ -0,0 +1,107 @@
++/**************************************************************************
++ *
++ * Copyright 2008-2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ **************************************************************************/
++#include <drmP.h>
++struct drm_global_item {
++ struct mutex mutex;
++ void *object;
++ int refcount;
++};
++
++static struct drm_global_item glob[DRM_GLOBAL_NUM];
++
++void drm_global_init(void)
++{
++ int i;
++
++ for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
++ struct drm_global_item *item = &glob[i];
++ mutex_init(&item->mutex);
++ item->object = NULL;
++ item->refcount = 0;
++ }
++}
++
++void drm_global_release(void)
++{
++ int i;
++ for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
++ struct drm_global_item *item = &glob[i];
++ BUG_ON(item->object != NULL);
++ BUG_ON(item->refcount != 0);
++ }
++}
++
++int drm_global_item_ref(struct drm_global_reference *ref)
++{
++ int ret;
++ struct drm_global_item *item = &glob[ref->global_type];
++ void *object;
++
++ mutex_lock(&item->mutex);
++ if (item->refcount == 0) {
++ item->object = kmalloc(ref->size, GFP_KERNEL);
++ if (unlikely(item->object == NULL)) {
++ ret = -ENOMEM;
++ goto out_err;
++ }
++
++ ref->object = item->object;
++ ret = ref->init(ref);
++ if (unlikely(ret != 0))
++ goto out_err;
++
++ ++item->refcount;
++ }
++ ref->object = item->object;
++ object = item->object;
++ mutex_unlock(&item->mutex);
++ return 0;
++ out_err:
++ kfree(item->object);
++ mutex_unlock(&item->mutex);
++ item->object = NULL;
++ return ret;
++}
++
++EXPORT_SYMBOL(drm_global_item_ref);
++
++void drm_global_item_unref(struct drm_global_reference *ref)
++{
++ struct drm_global_item *item = &glob[ref->global_type];
++
++ mutex_lock(&item->mutex);
++ BUG_ON(item->refcount == 0);
++ BUG_ON(ref->object != item->object);
++ if (--item->refcount == 0) {
++ ref->release(ref);
++ kfree(item->object);
++ item->object = NULL;
++ }
++ mutex_unlock(&item->mutex);
++}
++
++EXPORT_SYMBOL(drm_global_item_unref);
+Index: linux-2.6.28/include/drm/drmP.h
+===================================================================
+--- linux-2.6.28.orig/include/drm/drmP.h 2009-03-12 13:15:05.000000000 +0000
++++ linux-2.6.28/include/drm/drmP.h 2009-03-12 13:37:56.000000000 +0000
+@@ -1412,5 +1412,25 @@
+
+ /*@}*/
+
++enum drm_global_types {
++ DRM_GLOBAL_TTM_MEM = 0,
++ DRM_GLOBAL_TTM_BO,
++ DRM_GLOBAL_TTM_OBJECT,
++ DRM_GLOBAL_NUM
++};
++
++struct drm_global_reference {
++ enum drm_global_types global_type;
++ size_t size;
++ void *object;
++ int (*init) (struct drm_global_reference *);
++ void (*release) (struct drm_global_reference *);
++};
++
++extern void drm_global_init(void);
++extern void drm_global_release(void);
++extern int drm_global_item_ref(struct drm_global_reference *ref);
++extern void drm_global_item_unref(struct drm_global_reference *ref);
++
+ #endif /* __KERNEL__ */
+ #endif
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/0003-drm-Export-hash-table-functionality.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/0003-drm-Export-hash-table-functionality.patch
new file mode 100644
index 000000000..a54a3cf28
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/0003-drm-Export-hash-table-functionality.patch
@@ -0,0 +1,58 @@
+From 723cc597790fb648506a44e811415eb88b9dcdfa Mon Sep 17 00:00:00 2001
+From: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+Date: Fri, 27 Feb 2009 17:18:37 +0100
+Subject: [PATCH 3/8] drm: Export hash table functionality.
+
+Also fix include file.
+
+Signed-off-by: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+---
+ drivers/gpu/drm/drm_hashtab.c | 4 ++++
+ include/drm/drm_hashtab.h | 1 +
+ 2 files changed, 5 insertions(+), 0 deletions(-)
+
+Index: linux-2.6.28/drivers/gpu/drm/drm_hashtab.c
+===================================================================
+--- linux-2.6.28.orig/drivers/gpu/drm/drm_hashtab.c 2009-03-09 19:19:52.000000000 +0000
++++ linux-2.6.28/drivers/gpu/drm/drm_hashtab.c 2009-03-12 13:15:25.000000000 +0000
+@@ -62,6 +62,7 @@
+ }
+ return 0;
+ }
++EXPORT_SYMBOL(drm_ht_create);
+
+ void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key)
+ {
+@@ -156,6 +157,7 @@
+ }
+ return 0;
+ }
++EXPORT_SYMBOL(drm_ht_just_insert_please);
+
+ int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key,
+ struct drm_hash_item **item)
+@@ -169,6 +171,7 @@
+ *item = hlist_entry(list, struct drm_hash_item, head);
+ return 0;
+ }
++EXPORT_SYMBOL(drm_ht_find_item);
+
+ int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key)
+ {
+@@ -202,3 +205,4 @@
+ ht->table = NULL;
+ }
+ }
++EXPORT_SYMBOL(drm_ht_remove);
+Index: linux-2.6.28/include/drm/drm_hashtab.h
+===================================================================
+--- linux-2.6.28.orig/include/drm/drm_hashtab.h 2008-12-24 23:26:37.000000000 +0000
++++ linux-2.6.28/include/drm/drm_hashtab.h 2009-03-12 13:15:25.000000000 +0000
+@@ -34,6 +34,7 @@
+
+ #ifndef DRM_HASHTAB_H
+ #define DRM_HASHTAB_H
++#include <linux/list.h>
+
+ #define drm_hash_entry(_ptr, _type, _member) container_of(_ptr, _type, _member)
+
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/0007-drm-Add-unlocked-IOCTL-functionality-from-the-drm-r.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/0007-drm-Add-unlocked-IOCTL-functionality-from-the-drm-r.patch
new file mode 100644
index 000000000..a475cc1b7
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/0007-drm-Add-unlocked-IOCTL-functionality-from-the-drm-r.patch
@@ -0,0 +1,53 @@
+From a5fef5986c407d56f4e4cf618d6099e122a096ef Mon Sep 17 00:00:00 2001
+From: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+Date: Fri, 27 Feb 2009 13:04:46 +0100
+Subject: [PATCH 7/8] drm: Add unlocked IOCTL functionality from the drm repo.
+
+---
+ drivers/gpu/drm/drm_drv.c | 11 ++++++++++-
+ include/drm/drmP.h | 2 ++
+ 2 files changed, 12 insertions(+), 1 deletions(-)
+
+Index: linux-2.6.28/drivers/gpu/drm/drm_drv.c
+===================================================================
+--- linux-2.6.28.orig/drivers/gpu/drm/drm_drv.c 2009-03-12 13:15:18.000000000 +0000
++++ linux-2.6.28/drivers/gpu/drm/drm_drv.c 2009-03-12 13:15:41.000000000 +0000
+@@ -448,9 +450,16 @@
+ * Looks up the ioctl function in the ::ioctls table, checking for root
+ * previleges if so required, and dispatches to the respective function.
+ */
++
+ int drm_ioctl(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg)
+ {
++ return drm_unlocked_ioctl(filp, cmd, arg);
++}
++EXPORT_SYMBOL(drm_ioctl);
++
++long drm_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
++{
+ struct drm_file *file_priv = filp->private_data;
+ struct drm_device *dev = file_priv->minor->dev;
+ struct drm_ioctl_desc *ioctl;
+@@ -527,7 +536,7 @@
+ return retcode;
+ }
+
+-EXPORT_SYMBOL(drm_ioctl);
++EXPORT_SYMBOL(drm_unlocked_ioctl);
+
+ drm_local_map_t *drm_getsarea(struct drm_device *dev)
+ {
+Index: linux-2.6.28/include/drm/drmP.h
+===================================================================
+--- linux-2.6.28.orig/include/drm/drmP.h 2009-03-12 13:15:18.000000000 +0000
++++ linux-2.6.28/include/drm/drmP.h 2009-03-12 13:15:41.000000000 +0000
+@@ -1025,6 +1025,8 @@
+ extern void drm_exit(struct drm_driver *driver);
+ extern int drm_ioctl(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
++extern long drm_unlocked_ioctl(struct file *filp,
++ unsigned int cmd, unsigned long arg);
+ extern long drm_compat_ioctl(struct file *filp,
+ unsigned int cmd, unsigned long arg);
+ extern int drm_lastclose(struct drm_device *dev);
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.28+2.6.29-rc2/defconfig-netbook b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/config-generic
index b52043508..edf61c21a 100644
--- a/meta-moblin/packages/linux/linux-moblin-2.6.28+2.6.29-rc2/defconfig-netbook
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/config-generic
@@ -1,14 +1,13 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.27
-# Wed Nov 5 17:17:12 2008
+# Linux kernel version: 2.6.28.rc7-4.netbook
+# Mon Dec 8 01:05:27 2008
#
# CONFIG_64BIT is not set
CONFIG_X86_32=y
# CONFIG_X86_64 is not set
CONFIG_X86=y
CONFIG_ARCH_DEFCONFIG="arch/x86/configs/i386_defconfig"
-# CONFIG_GENERIC_LOCKBREAK is not set
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CMOS_UPDATE=y
CONFIG_CLOCKSOURCE_WATCHDOG=y
@@ -24,16 +23,14 @@ CONFIG_GENERIC_ISA_DMA=y
CONFIG_GENERIC_IOMAP=y
CONFIG_GENERIC_BUG=y
CONFIG_GENERIC_HWEIGHT=y
-# CONFIG_GENERIC_GPIO is not set
CONFIG_ARCH_MAY_HAVE_PC_FDC=y
# CONFIG_RWSEM_GENERIC_SPINLOCK is not set
CONFIG_RWSEM_XCHGADD_ALGORITHM=y
-# CONFIG_ARCH_HAS_ILOG2_U32 is not set
-# CONFIG_ARCH_HAS_ILOG2_U64 is not set
CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_GENERIC_CALIBRATE_DELAY=y
# CONFIG_GENERIC_TIME_VSYSCALL is not set
CONFIG_ARCH_HAS_CPU_RELAX=y
+CONFIG_ARCH_HAS_DEFAULT_IDLE=y
CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y
CONFIG_HAVE_SETUP_PER_CPU_AREA=y
# CONFIG_HAVE_CPUMASK_OF_CPU_MAP is not set
@@ -42,12 +39,12 @@ CONFIG_ARCH_SUSPEND_POSSIBLE=y
# CONFIG_ZONE_DMA32 is not set
CONFIG_ARCH_POPULATES_NODE_MAP=y
# CONFIG_AUDIT_ARCH is not set
-CONFIG_ARCH_SUPPORTS_AOUT=y
CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_IRQ_PROBE=y
CONFIG_GENERIC_PENDING_IRQ=y
CONFIG_X86_SMP=y
+CONFIG_USE_GENERIC_SMP_HELPERS=y
CONFIG_X86_32_SMP=y
CONFIG_X86_HT=y
CONFIG_X86_BIOS_REBOOT=y
@@ -61,7 +58,7 @@ CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
CONFIG_EXPERIMENTAL=y
CONFIG_LOCK_KERNEL=y
CONFIG_INIT_ENV_ARG_LIMIT=32
-CONFIG_LOCALVERSION="-netbook"
+CONFIG_LOCALVERSION=""
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
@@ -69,15 +66,9 @@ CONFIG_SYSVIPC_SYSCTL=y
CONFIG_POSIX_MQUEUE=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y
-CONFIG_TASKSTATS=y
-CONFIG_TASK_DELAY_ACCT=y
-CONFIG_TASK_XACCT=y
-CONFIG_TASK_IO_ACCOUNTING=y
-CONFIG_AUDIT=y
-CONFIG_AUDITSYSCALL=y
-CONFIG_AUDIT_TREE=y
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
+# CONFIG_TASKSTATS is not set
+# CONFIG_AUDIT is not set
+# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=17
# CONFIG_CGROUPS is not set
CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y
@@ -87,7 +78,7 @@ CONFIG_RELAY=y
CONFIG_NAMESPACES=y
# CONFIG_UTS_NS is not set
# CONFIG_IPC_NS is not set
-CONFIG_USER_NS=y
+# CONFIG_USER_NS is not set
# CONFIG_PID_NS is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_INITRAMFS_SOURCE=""
@@ -100,6 +91,7 @@ CONFIG_SYSCTL_SYSCALL=y
CONFIG_KALLSYMS=y
CONFIG_KALLSYMS_ALL=y
CONFIG_KALLSYMS_EXTRA_PASS=y
+CONFIG_KALLSYMS_STRIP_GENERATED=y
CONFIG_HOTPLUG=y
CONFIG_PRINTK=y
CONFIG_BUG=y
@@ -114,7 +106,9 @@ CONFIG_SIGNALFD=y
CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
+CONFIG_AIO=y
CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_PCI_QUIRKS=y
CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
@@ -127,11 +121,7 @@ CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y
CONFIG_HAVE_IOREMAP_PROT=y
CONFIG_HAVE_KPROBES=y
CONFIG_HAVE_KRETPROBES=y
-# CONFIG_HAVE_ARCH_TRACEHOOK is not set
-# CONFIG_HAVE_DMA_ATTRS is not set
-CONFIG_USE_GENERIC_SMP_HELPERS=y
-# CONFIG_HAVE_CLK is not set
-CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_GENERIC_DMA_COHERENT=y
CONFIG_SLABINFO=y
CONFIG_RT_MUTEXES=y
@@ -146,7 +136,7 @@ CONFIG_MODULE_UNLOAD=y
CONFIG_KMOD=y
CONFIG_STOP_MACHINE=y
CONFIG_BLOCK=y
-# CONFIG_LBD is not set
+CONFIG_LBD=y
CONFIG_BLK_DEV_IO_TRACE=y
# CONFIG_LSF is not set
CONFIG_BLK_DEV_BSG=y
@@ -165,6 +155,7 @@ CONFIG_DEFAULT_CFQ=y
# CONFIG_DEFAULT_NOOP is not set
CONFIG_DEFAULT_IOSCHED="cfq"
CONFIG_CLASSIC_RCU=y
+CONFIG_FREEZER=y
#
# Processor type and features
@@ -174,26 +165,33 @@ CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
CONFIG_SMP=y
+# CONFIG_SPARSE_IRQ is not set
CONFIG_X86_FIND_SMP_CONFIG=y
CONFIG_X86_MPPARSE=y
-CONFIG_X86_PC=y
+# CONFIG_X86_PC is not set
# CONFIG_X86_ELAN is not set
# CONFIG_X86_VOYAGER is not set
-# CONFIG_X86_GENERICARCH is not set
+CONFIG_X86_GENERICARCH=y
+# CONFIG_X86_NUMAQ is not set
+# CONFIG_X86_SUMMIT is not set
+# CONFIG_X86_ES7000 is not set
+# CONFIG_X86_BIGSMP is not set
# CONFIG_X86_VSMP is not set
# CONFIG_X86_RDC321X is not set
CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
+# CONFIG_SCHED_OMIT_FRAME_POINTER is not set
# CONFIG_PARAVIRT_GUEST is not set
# CONFIG_MEMTEST is not set
+CONFIG_X86_CYCLONE_TIMER=y
# CONFIG_M386 is not set
# CONFIG_M486 is not set
# CONFIG_M586 is not set
# CONFIG_M586TSC is not set
# CONFIG_M586MMX is not set
-CONFIG_M686=y
+# CONFIG_M686 is not set
# CONFIG_MPENTIUMII is not set
# CONFIG_MPENTIUMIII is not set
-# CONFIG_MPENTIUMM is not set
+CONFIG_MPENTIUMM=y
# CONFIG_MPENTIUM4 is not set
# CONFIG_MK6 is not set
# CONFIG_MK7 is not set
@@ -201,7 +199,6 @@ CONFIG_M686=y
# CONFIG_MCRUSOE is not set
# CONFIG_MEFFICEON is not set
# CONFIG_MWINCHIPC6 is not set
-# CONFIG_MWINCHIP2 is not set
# CONFIG_MWINCHIP3D is not set
# CONFIG_MGEODEGX1 is not set
# CONFIG_MGEODE_LX is not set
@@ -211,78 +208,93 @@ CONFIG_M686=y
# CONFIG_MPSC is not set
# CONFIG_MCORE2 is not set
# CONFIG_GENERIC_CPU is not set
-# CONFIG_X86_GENERIC is not set
+CONFIG_X86_GENERIC=y
CONFIG_X86_CPU=y
CONFIG_X86_CMPXCHG=y
-CONFIG_X86_L1_CACHE_SHIFT=5
+CONFIG_X86_L1_CACHE_SHIFT=7
CONFIG_X86_XADD=y
-# CONFIG_X86_PPRO_FENCE is not set
CONFIG_X86_WP_WORKS_OK=y
CONFIG_X86_INVLPG=y
CONFIG_X86_BSWAP=y
CONFIG_X86_POPAD_OK=y
+CONFIG_X86_INTEL_USERCOPY=y
CONFIG_X86_USE_PPRO_CHECKSUM=y
CONFIG_X86_TSC=y
+CONFIG_X86_CMPXCHG64=y
CONFIG_X86_CMOV=y
CONFIG_X86_MINIMUM_CPU_FAMILY=4
CONFIG_X86_DEBUGCTLMSR=y
+CONFIG_CPU_SUP_INTEL=y
+CONFIG_CPU_SUP_CYRIX_32=y
+CONFIG_CPU_SUP_AMD=y
+CONFIG_CPU_SUP_CENTAUR_32=y
+CONFIG_CPU_SUP_TRANSMETA_32=y
+CONFIG_CPU_SUP_UMC_32=y
+# CONFIG_X86_DS is not set
+# CONFIG_X86_PTRACE_BTS is not set
CONFIG_HPET_TIMER=y
CONFIG_HPET_EMULATE_RTC=y
CONFIG_DMI=y
# CONFIG_IOMMU_HELPER is not set
-CONFIG_NR_CPUS=2
+CONFIG_NR_CPUS=8
CONFIG_SCHED_SMT=y
CONFIG_SCHED_MC=y
# CONFIG_PREEMPT_NONE is not set
-CONFIG_PREEMPT_VOLUNTARY=y
-# CONFIG_PREEMPT is not set
+# CONFIG_PREEMPT_VOLUNTARY is not set
+CONFIG_PREEMPT=y
+# CONFIG_DEBUG_PREEMPT is not set
+# CONFIG_PREEMPT_TRACER is not set
CONFIG_X86_LOCAL_APIC=y
CONFIG_X86_IO_APIC=y
+# CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS is not set
CONFIG_X86_MCE=y
# CONFIG_X86_MCE_NONFATAL is not set
-# CONFIG_X86_MCE_P4THERMAL is not set
+CONFIG_X86_MCE_P4THERMAL=y
CONFIG_VM86=y
-# CONFIG_TOSHIBA is not set
-# CONFIG_I8K is not set
-# CONFIG_X86_REBOOTFIXUPS is not set
+CONFIG_TOSHIBA=m
+CONFIG_I8K=m
+CONFIG_X86_REBOOTFIXUPS=y
CONFIG_MICROCODE=y
+CONFIG_MICROCODE_INTEL=y
+# CONFIG_MICROCODE_AMD is not set
CONFIG_MICROCODE_OLD_INTERFACE=y
CONFIG_X86_MSR=y
CONFIG_X86_CPUID=y
# CONFIG_NOHIGHMEM is not set
-CONFIG_HIGHMEM4G=y
-# CONFIG_HIGHMEM64G is not set
+# CONFIG_HIGHMEM4G is not set
+CONFIG_HIGHMEM64G=y
CONFIG_PAGE_OFFSET=0xC0000000
CONFIG_HIGHMEM=y
-CONFIG_NEED_NODE_MEMMAP_SIZE=y
+CONFIG_X86_PAE=y
+CONFIG_ARCH_PHYS_ADDR_T_64BIT=y
CONFIG_ARCH_FLATMEM_ENABLE=y
CONFIG_ARCH_SPARSEMEM_ENABLE=y
CONFIG_ARCH_SELECT_MEMORY_MODEL=y
CONFIG_SELECT_MEMORY_MODEL=y
-# CONFIG_FLATMEM_MANUAL is not set
+CONFIG_FLATMEM_MANUAL=y
# CONFIG_DISCONTIGMEM_MANUAL is not set
-CONFIG_SPARSEMEM_MANUAL=y
-CONFIG_SPARSEMEM=y
-CONFIG_HAVE_MEMORY_PRESENT=y
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
CONFIG_SPARSEMEM_STATIC=y
-# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
-
-#
-# Memory hotplug is currently incompatible with Software Suspend
-#
CONFIG_PAGEFLAGS_EXTENDED=y
CONFIG_SPLIT_PTLOCK_CPUS=4
CONFIG_RESOURCES_64BIT=y
+CONFIG_PHYS_ADDR_T_64BIT=y
CONFIG_ZONE_DMA_FLAG=1
CONFIG_BOUNCE=y
CONFIG_VIRT_TO_BUS=y
-# CONFIG_HIGHPTE is not set
+CONFIG_UNEVICTABLE_LRU=y
+CONFIG_HIGHPTE=y
+# CONFIG_X86_CHECK_BIOS_CORRUPTION is not set
+CONFIG_X86_RESERVE_LOW_64K=y
# CONFIG_MATH_EMULATION is not set
CONFIG_MTRR=y
-# CONFIG_MTRR_SANITIZER is not set
-# CONFIG_X86_PAT is not set
-# CONFIG_EFI is not set
-# CONFIG_IRQBALANCE is not set
+CONFIG_MTRR_SANITIZER=y
+CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT=1
+CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT=1
+CONFIG_X86_PAT=y
+CONFIG_EFI=y
# CONFIG_SECCOMP is not set
# CONFIG_HZ_100 is not set
# CONFIG_HZ_250 is not set
@@ -290,18 +302,20 @@ CONFIG_MTRR=y
CONFIG_HZ_1000=y
CONFIG_HZ=1000
CONFIG_SCHED_HRTICK=y
-CONFIG_KEXEC=y
-CONFIG_CRASH_DUMP=y
-# CONFIG_KEXEC_JUMP is not set
-CONFIG_PHYSICAL_START=0x400000
-CONFIG_RELOCATABLE=y
-CONFIG_PHYSICAL_ALIGN=0x200000
+# CONFIG_KEXEC is not set
+# CONFIG_CRASH_DUMP is not set
+CONFIG_PHYSICAL_START=0x100000
+# CONFIG_RELOCATABLE is not set
+CONFIG_PHYSICAL_ALIGN=0x400000
CONFIG_HOTPLUG_CPU=y
-CONFIG_COMPAT_VDSO=y
+# CONFIG_COMPAT_VDSO is not set
+# CONFIG_CMDLINE_BOOL is not set
+# CONFIG_CMDLINE is not set
+# CONFIG_CMDLINE_OVERRIDE is not set
CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
#
-# Power management options
+# Power management and ACPI options
#
CONFIG_PM=y
CONFIG_PM_DEBUG=y
@@ -328,19 +342,16 @@ CONFIG_ACPI_BUTTON=y
CONFIG_ACPI_VIDEO=y
CONFIG_ACPI_FAN=y
CONFIG_ACPI_DOCK=y
-# CONFIG_ACPI_BAY is not set
CONFIG_ACPI_PROCESSOR=y
CONFIG_ACPI_HOTPLUG_CPU=y
CONFIG_ACPI_THERMAL=y
-CONFIG_ACPI_WMI=m
-CONFIG_ACPI_ASUS=y
-# CONFIG_ACPI_TOSHIBA is not set
+CONFIG_ACPI_WMI=y
+CONFIG_ACPI_ASUS=m
+CONFIG_ACPI_TOSHIBA=m
# CONFIG_ACPI_CUSTOM_DSDT is not set
-CONFIG_ACPI_BLACKLIST_YEAR=0
+CONFIG_ACPI_BLACKLIST_YEAR=1999
# CONFIG_ACPI_DEBUG is not set
-CONFIG_ACPI_EC=y
# CONFIG_ACPI_PCI_SLOT is not set
-CONFIG_ACPI_POWER=y
CONFIG_ACPI_SYSTEM=y
CONFIG_X86_PM_TIMER=y
CONFIG_ACPI_CONTAINER=y
@@ -353,12 +364,12 @@ CONFIG_ACPI_SBS=m
CONFIG_CPU_FREQ=y
CONFIG_CPU_FREQ_TABLE=y
CONFIG_CPU_FREQ_DEBUG=y
-CONFIG_CPU_FREQ_STAT=m
+CONFIG_CPU_FREQ_STAT=y
CONFIG_CPU_FREQ_STAT_DETAILS=y
-CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y
+# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set
# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set
# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set
-# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set
+CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set
CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
# CONFIG_CPU_FREQ_GOV_POWERSAVE is not set
@@ -406,34 +417,54 @@ CONFIG_PCI_DIRECT=y
CONFIG_PCI_MMCONFIG=y
CONFIG_PCI_DOMAINS=y
CONFIG_PCIEPORTBUS=y
-CONFIG_PCIEAER=y
-CONFIG_PCIEASPM=y
+# CONFIG_PCIEAER is not set
+# CONFIG_PCIEASPM is not set
# CONFIG_PCIEASPM_DEBUG is not set
CONFIG_ARCH_SUPPORTS_MSI=y
CONFIG_PCI_MSI=y
-CONFIG_PCI_LEGACY=y
+# CONFIG_PCI_LEGACY is not set
# CONFIG_PCI_DEBUG is not set
-CONFIG_HT_IRQ=y
+# CONFIG_PCI_STUB is not set
+# CONFIG_HT_IRQ is not set
CONFIG_ISA_DMA_API=y
-# CONFIG_ISA is not set
+CONFIG_ISA=y
+# CONFIG_EISA is not set
# CONFIG_MCA is not set
# CONFIG_SCx200 is not set
# CONFIG_OLPC is not set
-CONFIG_K8_NB=y
-# CONFIG_PCCARD is not set
+CONFIG_PCCARD=y
+# CONFIG_PCMCIA_DEBUG is not set
+# CONFIG_PCMCIA is not set
+CONFIG_CARDBUS=y
+
+#
+# PC-card bridges
+#
+CONFIG_YENTA=y
+CONFIG_YENTA_O2=y
+CONFIG_YENTA_RICOH=y
+CONFIG_YENTA_TI=y
+CONFIG_YENTA_ENE_TUNE=y
+CONFIG_YENTA_TOSHIBA=y
+CONFIG_PCMCIA_PROBE=y
+CONFIG_PCCARD_NONSTATIC=y
# CONFIG_HOTPLUG_PCI is not set
#
# Executable file formats / Emulations
#
CONFIG_BINFMT_ELF=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_HAVE_AOUT=y
# CONFIG_BINFMT_AOUT is not set
CONFIG_BINFMT_MISC=y
+CONFIG_HAVE_ATOMIC_IOMAP=y
CONFIG_NET=y
#
# Networking options
#
+# CONFIG_NET_NS is not set
CONFIG_PACKET=y
CONFIG_PACKET_MMAP=y
CONFIG_UNIX=y
@@ -489,7 +520,6 @@ CONFIG_DEFAULT_CUBIC=y
# CONFIG_DEFAULT_RENO is not set
CONFIG_DEFAULT_TCP_CONG="cubic"
CONFIG_TCP_MD5SIG=y
-# CONFIG_IP_VS is not set
CONFIG_IPV6=y
CONFIG_IPV6_PRIVACY=y
CONFIG_IPV6_ROUTER_PREF=y
@@ -511,7 +541,6 @@ CONFIG_IPV6_TUNNEL=m
CONFIG_IPV6_MULTIPLE_TABLES=y
CONFIG_IPV6_SUBTREES=y
# CONFIG_IPV6_MROUTE is not set
-CONFIG_NETLABEL=y
CONFIG_NETWORK_SECMARK=y
CONFIG_NETFILTER=y
# CONFIG_NETFILTER_DEBUG is not set
@@ -542,18 +571,19 @@ CONFIG_NF_CONNTRACK_SANE=m
CONFIG_NF_CONNTRACK_SIP=m
CONFIG_NF_CONNTRACK_TFTP=m
CONFIG_NF_CT_NETLINK=m
+# CONFIG_NETFILTER_TPROXY is not set
CONFIG_NETFILTER_XTABLES=y
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m
CONFIG_NETFILTER_XT_TARGET_DSCP=m
CONFIG_NETFILTER_XT_TARGET_MARK=m
-CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
CONFIG_NETFILTER_XT_TARGET_NFLOG=m
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
CONFIG_NETFILTER_XT_TARGET_RATEEST=m
CONFIG_NETFILTER_XT_TARGET_TRACE=m
CONFIG_NETFILTER_XT_TARGET_SECMARK=m
-CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m
CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
CONFIG_NETFILTER_XT_MATCH_COMMENT=m
@@ -564,19 +594,21 @@ CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
# CONFIG_NETFILTER_XT_MATCH_DCCP is not set
CONFIG_NETFILTER_XT_MATCH_DSCP=m
CONFIG_NETFILTER_XT_MATCH_ESP=m
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
CONFIG_NETFILTER_XT_MATCH_HELPER=m
CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
CONFIG_NETFILTER_XT_MATCH_LENGTH=m
CONFIG_NETFILTER_XT_MATCH_LIMIT=m
CONFIG_NETFILTER_XT_MATCH_MAC=m
CONFIG_NETFILTER_XT_MATCH_MARK=m
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
CONFIG_NETFILTER_XT_MATCH_OWNER=m
CONFIG_NETFILTER_XT_MATCH_POLICY=m
-CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
CONFIG_NETFILTER_XT_MATCH_QUOTA=m
CONFIG_NETFILTER_XT_MATCH_RATEEST=m
CONFIG_NETFILTER_XT_MATCH_REALM=m
+# CONFIG_NETFILTER_XT_MATCH_RECENT is not set
CONFIG_NETFILTER_XT_MATCH_SCTP=m
CONFIG_NETFILTER_XT_MATCH_STATE=y
CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
@@ -584,20 +616,20 @@ CONFIG_NETFILTER_XT_MATCH_STRING=m
CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
CONFIG_NETFILTER_XT_MATCH_TIME=m
CONFIG_NETFILTER_XT_MATCH_U32=m
-CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
+# CONFIG_IP_VS is not set
#
# IP: Netfilter Configuration
#
+CONFIG_NF_DEFRAG_IPV4=y
CONFIG_NF_CONNTRACK_IPV4=y
# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set
CONFIG_IP_NF_QUEUE=m
CONFIG_IP_NF_IPTABLES=y
-CONFIG_IP_NF_MATCH_RECENT=m
-CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_ADDRTYPE=m
CONFIG_IP_NF_MATCH_AH=m
+CONFIG_IP_NF_MATCH_ECN=m
CONFIG_IP_NF_MATCH_TTL=m
-CONFIG_IP_NF_MATCH_ADDRTYPE=m
CONFIG_IP_NF_FILTER=y
CONFIG_IP_NF_TARGET_REJECT=y
CONFIG_IP_NF_TARGET_LOG=m
@@ -605,8 +637,8 @@ CONFIG_IP_NF_TARGET_ULOG=m
CONFIG_NF_NAT=m
CONFIG_NF_NAT_NEEDED=y
CONFIG_IP_NF_TARGET_MASQUERADE=m
-CONFIG_IP_NF_TARGET_REDIRECT=m
CONFIG_IP_NF_TARGET_NETMAP=m
+CONFIG_IP_NF_TARGET_REDIRECT=m
CONFIG_NF_NAT_SNMP_BASIC=m
CONFIG_NF_NAT_PROTO_GRE=m
CONFIG_NF_NAT_PROTO_UDPLITE=m
@@ -619,11 +651,10 @@ CONFIG_NF_NAT_PPTP=m
CONFIG_NF_NAT_H323=m
CONFIG_NF_NAT_SIP=m
CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_TARGET_CLUSTERIP=m
CONFIG_IP_NF_TARGET_ECN=m
CONFIG_IP_NF_TARGET_TTL=m
-CONFIG_IP_NF_TARGET_CLUSTERIP=m
CONFIG_IP_NF_RAW=m
-# CONFIG_IP_NF_SECURITY is not set
CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
@@ -634,26 +665,26 @@ CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=y
CONFIG_IP6_NF_QUEUE=m
CONFIG_IP6_NF_IPTABLES=y
-CONFIG_IP6_NF_MATCH_RT=m
-CONFIG_IP6_NF_MATCH_OPTS=m
+CONFIG_IP6_NF_MATCH_AH=m
+CONFIG_IP6_NF_MATCH_EUI64=m
CONFIG_IP6_NF_MATCH_FRAG=m
+CONFIG_IP6_NF_MATCH_OPTS=m
CONFIG_IP6_NF_MATCH_HL=m
CONFIG_IP6_NF_MATCH_IPV6HEADER=m
-CONFIG_IP6_NF_MATCH_AH=m
CONFIG_IP6_NF_MATCH_MH=m
-CONFIG_IP6_NF_MATCH_EUI64=m
-CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_MATCH_RT=m
CONFIG_IP6_NF_TARGET_LOG=m
+CONFIG_IP6_NF_FILTER=y
CONFIG_IP6_NF_TARGET_REJECT=y
CONFIG_IP6_NF_MANGLE=m
CONFIG_IP6_NF_TARGET_HL=m
CONFIG_IP6_NF_RAW=m
-# CONFIG_IP6_NF_SECURITY is not set
# CONFIG_IP_DCCP is not set
# CONFIG_IP_SCTP is not set
# CONFIG_TIPC is not set
# CONFIG_ATM is not set
# CONFIG_BRIDGE is not set
+# CONFIG_NET_DSA is not set
# CONFIG_VLAN_8021Q is not set
# CONFIG_DECNET is not set
# CONFIG_LLC2 is not set
@@ -665,6 +696,7 @@ CONFIG_IP6_NF_RAW=m
# CONFIG_WAN_ROUTER is not set
# CONFIG_NET_SCHED is not set
CONFIG_NET_CLS_ROUTE=y
+# CONFIG_DCB is not set
#
# Network testing
@@ -673,22 +705,20 @@ CONFIG_NET_CLS_ROUTE=y
# CONFIG_HAMRADIO is not set
# CONFIG_CAN is not set
# CONFIG_IRDA is not set
-CONFIG_BT=m
-CONFIG_BT_L2CAP=m
-CONFIG_BT_SCO=m
-CONFIG_BT_RFCOMM=m
+CONFIG_BT=y
+CONFIG_BT_L2CAP=y
+CONFIG_BT_SCO=y
+CONFIG_BT_RFCOMM=y
CONFIG_BT_RFCOMM_TTY=y
-CONFIG_BT_BNEP=m
-# CONFIG_BT_BNEP_MC_FILTER is not set
-# CONFIG_BT_BNEP_PROTO_FILTER is not set
-# CONFIG_BT_HIDP is not set
+CONFIG_BT_BNEP=y
+CONFIG_BT_BNEP_MC_FILTER=y
+CONFIG_BT_BNEP_PROTO_FILTER=y
+CONFIG_BT_HIDP=y
#
# Bluetooth device drivers
#
-CONFIG_BT_HCIUSB=m
-CONFIG_BT_HCIUSB_SCO=y
-# CONFIG_BT_HCIBTUSB is not set
+CONFIG_BT_HCIBTUSB=y
CONFIG_BT_HCIBTSDIO=m
CONFIG_BT_HCIUART=m
CONFIG_BT_HCIUART_H4=y
@@ -699,22 +729,28 @@ CONFIG_BT_HCIBPA10X=m
CONFIG_BT_HCIBFUSB=m
CONFIG_BT_HCIVHCI=m
# CONFIG_AF_RXRPC is not set
+# CONFIG_PHONET is not set
CONFIG_FIB_RULES=y
-
-#
-# Wireless
-#
-CONFIG_CFG80211=m
+CONFIG_WIRELESS=y
+CONFIG_CFG80211=y
+# CONFIG_CFG80211_REG_DEBUG is not set
CONFIG_NL80211=y
+CONFIG_WIRELESS_OLD_REGULATORY=y
CONFIG_WIRELESS_EXT=y
-# CONFIG_WIRELESS_EXT_SYSFS is not set
-CONFIG_MAC80211=m
+CONFIG_WIRELESS_EXT_SYSFS=y
+CONFIG_LIB80211=m
+CONFIG_LIB80211_CRYPT_WEP=m
+CONFIG_LIB80211_CRYPT_CCMP=m
+CONFIG_LIB80211_CRYPT_TKIP=m
+CONFIG_MAC80211=y
#
# Rate control algorithm selection
#
CONFIG_MAC80211_RC_PID=y
+# CONFIG_MAC80211_RC_MINSTREL is not set
CONFIG_MAC80211_RC_DEFAULT_PID=y
+# CONFIG_MAC80211_RC_DEFAULT_MINSTREL is not set
CONFIG_MAC80211_RC_DEFAULT="pid"
CONFIG_MAC80211_MESH=y
CONFIG_MAC80211_LEDS=y
@@ -725,8 +761,10 @@ CONFIG_IEEE80211=m
CONFIG_IEEE80211_CRYPT_WEP=m
CONFIG_IEEE80211_CRYPT_CCMP=m
CONFIG_IEEE80211_CRYPT_TKIP=m
-CONFIG_RFKILL=m
-CONFIG_RFKILL_INPUT=m
+CONFIG_WIMAX=m
+CONFIG_WIMAX_DEBUG_LEVEL=8
+CONFIG_RFKILL=y
+CONFIG_RFKILL_INPUT=y
CONFIG_RFKILL_LEDS=y
# CONFIG_NET_9P is not set
@@ -737,7 +775,7 @@ CONFIG_RFKILL_LEDS=y
#
# Generic Driver Options
#
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_UEVENT_HELPER_PATH=""
CONFIG_STANDALONE=y
CONFIG_PREVENT_FIRMWARE_BUILD=y
CONFIG_FW_LOADER=y
@@ -746,19 +784,21 @@ CONFIG_EXTRA_FIRMWARE=""
# CONFIG_DEBUG_DRIVER is not set
CONFIG_DEBUG_DEVRES=y
# CONFIG_SYS_HYPERVISOR is not set
-CONFIG_CONNECTOR=y
-CONFIG_PROC_EVENTS=y
+# CONFIG_CONNECTOR is not set
# CONFIG_MTD is not set
# CONFIG_PARPORT is not set
CONFIG_PNP=y
-# CONFIG_PNP_DEBUG is not set
+# CONFIG_PNP_DEBUG_MESSAGES is not set
#
# Protocols
#
+# CONFIG_ISAPNP is not set
+# CONFIG_PNPBIOS is not set
CONFIG_PNPACPI=y
CONFIG_BLK_DEV=y
# CONFIG_BLK_DEV_FD is not set
+# CONFIG_BLK_DEV_XD is not set
# CONFIG_BLK_CPQ_DA is not set
# CONFIG_BLK_CPQ_CISS_DA is not set
# CONFIG_BLK_DEV_DAC960 is not set
@@ -769,7 +809,10 @@ CONFIG_BLK_DEV_CRYPTOLOOP=m
# CONFIG_BLK_DEV_NBD is not set
# CONFIG_BLK_DEV_SX8 is not set
# CONFIG_BLK_DEV_UB is not set
-# CONFIG_BLK_DEV_RAM is not set
+CONFIG_BLK_DEV_RAM=m
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=16384
+# CONFIG_BLK_DEV_XIP is not set
CONFIG_CDROM_PKTCDVD=m
CONFIG_CDROM_PKTCDVD_BUFFERS=8
# CONFIG_CDROM_PKTCDVD_WCACHE is not set
@@ -781,19 +824,29 @@ CONFIG_MISC_DEVICES=y
CONFIG_EEPROM_93CX6=m
# CONFIG_SGI_IOC4 is not set
CONFIG_TIFM_CORE=m
-CONFIG_TIFM_7XX1=m
+# CONFIG_TIFM_7XX1 is not set
# CONFIG_ACER_WMI is not set
-# CONFIG_FUJITSU_LAPTOP is not set
-# CONFIG_TC1100_WMI is not set
-# CONFIG_HP_WMI is not set
-# CONFIG_MSI_LAPTOP is not set
-# CONFIG_COMPAL_LAPTOP is not set
-# CONFIG_SONY_LAPTOP is not set
-# CONFIG_THINKPAD_ACPI is not set
+CONFIG_ASUS_LAPTOP=m
+CONFIG_FUJITSU_LAPTOP=m
+# CONFIG_FUJITSU_LAPTOP_DEBUG is not set
+CONFIG_TC1100_WMI=m
+CONFIG_HP_WMI=m
+# CONFIG_ICS932S401 is not set
+CONFIG_MSI_LAPTOP=m
+CONFIG_PANASONIC_LAPTOP=m
+CONFIG_COMPAL_LAPTOP=m
+CONFIG_SONY_LAPTOP=m
+# CONFIG_SONYPI_COMPAT is not set
+CONFIG_THINKPAD_ACPI=m
+# CONFIG_THINKPAD_ACPI_DEBUG is not set
+CONFIG_THINKPAD_ACPI_BAY=y
+CONFIG_THINKPAD_ACPI_VIDEO=y
+CONFIG_THINKPAD_ACPI_HOTKEY_POLL=y
# CONFIG_INTEL_MENLOW is not set
-CONFIG_EEEPC_LAPTOP=y
+# CONFIG_EEEPC_LAPTOP is not set
# CONFIG_ENCLOSURE_SERVICES is not set
# CONFIG_HP_ILO is not set
+# CONFIG_C2PORT is not set
CONFIG_HAVE_IDE=y
# CONFIG_IDE is not set
@@ -811,12 +864,12 @@ CONFIG_SCSI_PROC_FS=y
# SCSI support type (disk, tape, CD-ROM)
#
CONFIG_BLK_DEV_SD=y
-CONFIG_CHR_DEV_ST=m
+# CONFIG_CHR_DEV_ST is not set
# CONFIG_CHR_DEV_OSST is not set
CONFIG_BLK_DEV_SR=y
CONFIG_BLK_DEV_SR_VENDOR=y
-# CONFIG_CHR_DEV_SG is not set
-CONFIG_CHR_DEV_SCH=m
+CONFIG_CHR_DEV_SG=y
+# CONFIG_CHR_DEV_SCH is not set
#
# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
@@ -838,9 +891,13 @@ CONFIG_SCSI_WAIT_SCAN=m
# CONFIG_SCSI_SRP_ATTRS is not set
CONFIG_SCSI_LOWLEVEL=y
# CONFIG_ISCSI_TCP is not set
+# CONFIG_SCSI_CXGB3_ISCSI is not set
# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
# CONFIG_SCSI_3W_9XXX is not set
+# CONFIG_SCSI_7000FASST is not set
# CONFIG_SCSI_ACARD is not set
+# CONFIG_SCSI_AHA152X is not set
+# CONFIG_SCSI_AHA1542 is not set
# CONFIG_SCSI_AACRAID is not set
# CONFIG_SCSI_AIC7XXX is not set
# CONFIG_SCSI_AIC7XXX_OLD is not set
@@ -848,29 +905,42 @@ CONFIG_SCSI_LOWLEVEL=y
# CONFIG_SCSI_AIC94XX is not set
# CONFIG_SCSI_DPT_I2O is not set
# CONFIG_SCSI_ADVANSYS is not set
+# CONFIG_SCSI_IN2000 is not set
# CONFIG_SCSI_ARCMSR is not set
# CONFIG_MEGARAID_NEWGEN is not set
# CONFIG_MEGARAID_LEGACY is not set
# CONFIG_MEGARAID_SAS is not set
# CONFIG_SCSI_HPTIOP is not set
# CONFIG_SCSI_BUSLOGIC is not set
+# CONFIG_LIBFC is not set
+# CONFIG_FCOE is not set
# CONFIG_SCSI_DMX3191D is not set
+# CONFIG_SCSI_DTC3280 is not set
# CONFIG_SCSI_EATA is not set
# CONFIG_SCSI_FUTURE_DOMAIN is not set
# CONFIG_SCSI_GDTH is not set
+# CONFIG_SCSI_GENERIC_NCR5380 is not set
+# CONFIG_SCSI_GENERIC_NCR5380_MMIO is not set
# CONFIG_SCSI_IPS is not set
# CONFIG_SCSI_INITIO is not set
# CONFIG_SCSI_INIA100 is not set
# CONFIG_SCSI_MVSAS is not set
+# CONFIG_SCSI_NCR53C406A is not set
# CONFIG_SCSI_STEX is not set
# CONFIG_SCSI_SYM53C8XX_2 is not set
# CONFIG_SCSI_IPR is not set
+# CONFIG_SCSI_PAS16 is not set
+# CONFIG_SCSI_QLOGIC_FAS is not set
# CONFIG_SCSI_QLOGIC_1280 is not set
# CONFIG_SCSI_QLA_FC is not set
# CONFIG_SCSI_QLA_ISCSI is not set
# CONFIG_SCSI_LPFC is not set
+# CONFIG_SCSI_SYM53C416 is not set
# CONFIG_SCSI_DC395x is not set
# CONFIG_SCSI_DC390T is not set
+# CONFIG_SCSI_T128 is not set
+# CONFIG_SCSI_U14_34F is not set
+# CONFIG_SCSI_ULTRASTOR is not set
# CONFIG_SCSI_NSP32 is not set
# CONFIG_SCSI_DEBUG is not set
# CONFIG_SCSI_SRP is not set
@@ -909,17 +979,19 @@ CONFIG_ATA_PIIX=y
# CONFIG_PATA_CS5536 is not set
# CONFIG_PATA_CYPRESS is not set
# CONFIG_PATA_EFAR is not set
-# CONFIG_ATA_GENERIC is not set
+CONFIG_ATA_GENERIC=y
# CONFIG_PATA_HPT366 is not set
# CONFIG_PATA_HPT37X is not set
# CONFIG_PATA_HPT3X2N is not set
# CONFIG_PATA_HPT3X3 is not set
+# CONFIG_PATA_ISAPNP is not set
# CONFIG_PATA_IT821X is not set
# CONFIG_PATA_IT8213 is not set
# CONFIG_PATA_JMICRON is not set
+# CONFIG_PATA_LEGACY is not set
# CONFIG_PATA_TRIFLEX is not set
# CONFIG_PATA_MARVELL is not set
-# CONFIG_PATA_MPIIX is not set
+CONFIG_PATA_MPIIX=y
# CONFIG_PATA_OLDPIIX is not set
# CONFIG_PATA_NETCELL is not set
# CONFIG_PATA_NINJA32 is not set
@@ -928,6 +1000,7 @@ CONFIG_ATA_PIIX=y
# CONFIG_PATA_OPTI is not set
# CONFIG_PATA_OPTIDMA is not set
# CONFIG_PATA_PDC_OLD is not set
+# CONFIG_PATA_QDI is not set
# CONFIG_PATA_RADISYS is not set
# CONFIG_PATA_RZ1000 is not set
# CONFIG_PATA_SC1200 is not set
@@ -937,9 +1010,27 @@ CONFIG_ATA_PIIX=y
# CONFIG_PATA_SIS is not set
# CONFIG_PATA_VIA is not set
# CONFIG_PATA_WINBOND is not set
+# CONFIG_PATA_WINBOND_VLB is not set
CONFIG_PATA_SCH=y
-# CONFIG_MD is not set
-# CONFIG_FUSION is not set
+CONFIG_MD=y
+# CONFIG_BLK_DEV_MD is not set
+CONFIG_BLK_DEV_DM=m
+CONFIG_DM_DEBUG=y
+# CONFIG_DM_CRYPT is not set
+CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_MIRROR=m
+CONFIG_DM_ZERO=m
+CONFIG_DM_MULTIPATH=m
+CONFIG_DM_DELAY=m
+# CONFIG_DM_UEVENT is not set
+CONFIG_FUSION=y
+CONFIG_FUSION_SPI=m
+CONFIG_FUSION_FC=m
+CONFIG_FUSION_SAS=m
+CONFIG_FUSION_MAX_SGE=40
+CONFIG_FUSION_CTL=m
+CONFIG_FUSION_LAN=m
+CONFIG_FUSION_LOGGING=y
#
# IEEE 1394 (FireWire) support
@@ -957,7 +1048,7 @@ CONFIG_NETDEVICES=y
# CONFIG_BONDING is not set
CONFIG_MACVLAN=m
# CONFIG_EQUALIZER is not set
-# CONFIG_TUN is not set
+CONFIG_TUN=y
# CONFIG_VETH is not set
# CONFIG_NET_SB1000 is not set
# CONFIG_ARCNET is not set
@@ -978,42 +1069,97 @@ CONFIG_ICPLUS_PHY=m
CONFIG_REALTEK_PHY=m
CONFIG_MDIO_BITBANG=m
CONFIG_NET_ETHERNET=y
-CONFIG_MII=m
-CONFIG_HAPPYMEAL=m
-CONFIG_SUNGEM=m
-CONFIG_CASSINI=m
-CONFIG_NET_VENDOR_3COM=y
-# CONFIG_VORTEX is not set
-# CONFIG_TYPHOON is not set
+CONFIG_MII=y
+# CONFIG_NATIONAL_PHY is not set
+# CONFIG_STE10XP is not set
+# CONFIG_LSI_ET1011C_PHY is not set
+# CONFIG_HAPPYMEAL is not set
+# CONFIG_SUNGEM is not set
+# CONFIG_CASSINI is not set
+# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_LANCE is not set
+# CONFIG_NET_VENDOR_SMC is not set
+# CONFIG_NET_VENDOR_RACAL is not set
# CONFIG_NET_TULIP is not set
+# CONFIG_AT1700 is not set
+# CONFIG_DEPCA is not set
# CONFIG_HP100 is not set
+# CONFIG_NET_ISA is not set
# CONFIG_IBM_NEW_EMAC_ZMII is not set
# CONFIG_IBM_NEW_EMAC_RGMII is not set
# CONFIG_IBM_NEW_EMAC_TAH is not set
# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
-# CONFIG_NET_PCI is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
+CONFIG_NET_PCI=y
+CONFIG_PCNET32=m
+# CONFIG_AMD8111_ETH is not set
+# CONFIG_ADAPTEC_STARFIRE is not set
+# CONFIG_AC3200 is not set
+# CONFIG_APRICOT is not set
# CONFIG_B44 is not set
+# CONFIG_FORCEDETH is not set
+# CONFIG_CS89x0 is not set
+# CONFIG_EEPRO100 is not set
+CONFIG_E100=y
+# CONFIG_FEALNX is not set
+# CONFIG_NATSEMI is not set
+# CONFIG_NE2K_PCI is not set
+CONFIG_8139CP=m
+CONFIG_8139TOO=m
+CONFIG_8139TOO_PIO=y
+# CONFIG_8139TOO_TUNE_TWISTER is not set
+# CONFIG_8139TOO_8129 is not set
+# CONFIG_8139_OLD_RX_RESET is not set
+# CONFIG_R6040 is not set
+CONFIG_SIS900=m
+# CONFIG_EPIC100 is not set
+# CONFIG_SMSC9420 is not set
+# CONFIG_SUNDANCE is not set
+# CONFIG_TLAN is not set
+# CONFIG_VIA_RHINE is not set
+# CONFIG_SC92031 is not set
+CONFIG_ATL2=m
CONFIG_NETDEV_1000=y
# CONFIG_ACENIC is not set
# CONFIG_DL2K is not set
-# CONFIG_E1000 is not set
-# CONFIG_E1000E is not set
+CONFIG_E1000=y
+CONFIG_E1000E=y
# CONFIG_IP1000 is not set
-# CONFIG_IGB is not set
+CONFIG_IGB=y
+# CONFIG_IGB_LRO is not set
# CONFIG_NS83820 is not set
# CONFIG_HAMACHI is not set
# CONFIG_YELLOWFIN is not set
# CONFIG_R8169 is not set
-# CONFIG_SIS190 is not set
+CONFIG_SIS190=m
# CONFIG_SKGE is not set
-# CONFIG_SKY2 is not set
+CONFIG_SKY2=m
+# CONFIG_SKY2_DEBUG is not set
# CONFIG_VIA_VELOCITY is not set
-# CONFIG_TIGON3 is not set
-# CONFIG_BNX2 is not set
+CONFIG_TIGON3=m
+CONFIG_BNX2=m
# CONFIG_QLA3XXX is not set
CONFIG_ATL1=m
-CONFIG_ATL1E=m
-# CONFIG_NETDEV_10000 is not set
+# CONFIG_ATL1E is not set
+# CONFIG_JME is not set
+CONFIG_NETDEV_10000=y
+# CONFIG_CHELSIO_T1 is not set
+# CONFIG_CHELSIO_T3 is not set
+# CONFIG_ENIC is not set
+CONFIG_IXGBE=m
+CONFIG_IXGB=m
+# CONFIG_S2IO is not set
+# CONFIG_MYRI10GE is not set
+# CONFIG_NETXEN_NIC is not set
+# CONFIG_NIU is not set
+# CONFIG_MLX4_EN is not set
+# CONFIG_MLX4_CORE is not set
+# CONFIG_TEHUTI is not set
+CONFIG_BNX2X=m
+# CONFIG_QLGE is not set
+# CONFIG_SFC is not set
# CONFIG_TR is not set
#
@@ -1021,10 +1167,21 @@ CONFIG_ATL1E=m
#
CONFIG_WLAN_PRE80211=y
# CONFIG_STRIP is not set
+# CONFIG_ARLAN is not set
+# CONFIG_WAVELAN is not set
CONFIG_WLAN_80211=y
-# CONFIG_IPW2100 is not set
-# CONFIG_IPW2200 is not set
+CONFIG_IPW2100=m
+# CONFIG_IPW2100_MONITOR is not set
+# CONFIG_IPW2100_DEBUG is not set
+CONFIG_IPW2200=m
+# CONFIG_IPW2200_MONITOR is not set
+CONFIG_IPW2200_QOS=y
+# CONFIG_IPW2200_DEBUG is not set
+# CONFIG_LIBIPW_DEBUG is not set
# CONFIG_LIBERTAS is not set
+# CONFIG_LIBERTAS_THINFIRM is not set
+# CONFIG_LIBERTAS_USB is not set
+# CONFIG_LIBERTAS_DEBUG is not set
# CONFIG_AIRO is not set
# CONFIG_HERMES is not set
# CONFIG_ATMEL is not set
@@ -1035,49 +1192,62 @@ CONFIG_RTL8180=m
CONFIG_RTL8187=m
# CONFIG_ADM8211 is not set
# CONFIG_MAC80211_HWSIM is not set
-# CONFIG_P54_COMMON is not set
-CONFIG_ATH5K=m
-# CONFIG_ATH5K_DEBUG is not set
-# CONFIG_ATH9K is not set
+CONFIG_P54_COMMON=m
+CONFIG_P54_USB=m
+CONFIG_P54_PCI=m
+# CONFIG_ATH5K is not set
+CONFIG_ATH9K=m
+# CONFIG_ATH9K_DEBUG is not set
CONFIG_IWLWIFI=m
CONFIG_IWLCORE=m
# CONFIG_IWLWIFI_LEDS is not set
CONFIG_IWLWIFI_RFKILL=y
# CONFIG_IWLWIFI_DEBUG is not set
-# CONFIG_IWLAGN is not set
+CONFIG_IWLAGN=m
+# CONFIG_IWLAGN_SPECTRUM_MEASUREMENT is not set
+# CONFIG_IWLAGN_LEDS is not set
+CONFIG_IWL4965=y
+CONFIG_IWL5000=y
CONFIG_IWL3945=m
CONFIG_IWL3945_RFKILL=y
# CONFIG_IWL3945_SPECTRUM_MEASUREMENT is not set
# CONFIG_IWL3945_LEDS is not set
# CONFIG_IWL3945_DEBUG is not set
# CONFIG_HOSTAP is not set
-# CONFIG_B43 is not set
+CONFIG_B43=m
+CONFIG_B43_PCI_AUTOSELECT=y
+CONFIG_B43_PCICORE_AUTOSELECT=y
+CONFIG_B43_LEDS=y
+CONFIG_B43_RFKILL=y
+# CONFIG_B43_DEBUG is not set
# CONFIG_B43LEGACY is not set
# CONFIG_ZD1211RW is not set
CONFIG_RT2X00=m
-CONFIG_RT2X00_LIB=m
-CONFIG_RT2X00_LIB_PCI=m
-CONFIG_RT2X00_LIB_USB=m
-CONFIG_RT2X00_LIB_FIRMWARE=y
-CONFIG_RT2X00_LIB_RFKILL=y
-CONFIG_RT2X00_LIB_LEDS=y
CONFIG_RT2400PCI=m
-CONFIG_RT2400PCI_RFKILL=y
-CONFIG_RT2400PCI_LEDS=y
CONFIG_RT2500PCI=m
-CONFIG_RT2500PCI_RFKILL=y
-CONFIG_RT2500PCI_LEDS=y
CONFIG_RT61PCI=m
-CONFIG_RT61PCI_RFKILL=y
-CONFIG_RT61PCI_LEDS=y
CONFIG_RT2500USB=m
-CONFIG_RT2500USB_LEDS=y
CONFIG_RT73USB=m
-CONFIG_RT73USB_LEDS=y
-CONFIG_RT2X00_LIB_DEBUGFS=y
+CONFIG_RT2X00_LIB_PCI=m
+CONFIG_RT2X00_LIB_USB=m
+CONFIG_RT2X00_LIB=m
+CONFIG_RT2X00_LIB_FIRMWARE=y
+CONFIG_RT2X00_LIB_CRYPTO=y
+CONFIG_RT2X00_LIB_RFKILL=y
+CONFIG_RT2X00_LIB_LEDS=y
+# CONFIG_RT2X00_LIB_DEBUGFS is not set
# CONFIG_RT2X00_DEBUG is not set
#
+# WiMAX Wireless Broadband devices
+#
+CONFIG_WIMAX_I2400M_USB=m
+CONFIG_WIMAX_I2400M_DEBUG_LEVEL=8
+# CONFIG_WIMAX_I2400M_SDIO is not set
+#
+#
+
+#
# USB Network Adapters
#
CONFIG_USB_CATC=m
@@ -1088,6 +1258,7 @@ CONFIG_USB_USBNET=m
CONFIG_USB_NET_AX8817X=m
CONFIG_USB_NET_CDCETHER=m
CONFIG_USB_NET_DM9601=m
+CONFIG_USB_NET_SMSC95XX=m
CONFIG_USB_NET_GL620A=m
CONFIG_USB_NET_NET1080=m
CONFIG_USB_NET_PLUSB=m
@@ -1101,7 +1272,7 @@ CONFIG_USB_ARMLINUX=y
CONFIG_USB_EPSON2888=y
CONFIG_USB_KC2190=y
CONFIG_USB_NET_ZAURUS=m
-# CONFIG_USB_HSO is not set
+CONFIG_USB_HSO=m
# CONFIG_WAN is not set
# CONFIG_FDDI is not set
# CONFIG_HIPPI is not set
@@ -1111,18 +1282,16 @@ CONFIG_PPP_FILTER=y
CONFIG_PPP_ASYNC=m
CONFIG_PPP_SYNC_TTY=m
CONFIG_PPP_DEFLATE=m
-# CONFIG_PPP_BSDCOMP is not set
+CONFIG_PPP_BSDCOMP=m
CONFIG_PPP_MPPE=m
CONFIG_PPPOE=m
CONFIG_PPPOL2TP=m
# CONFIG_SLIP is not set
CONFIG_SLHC=m
-CONFIG_NET_FC=y
-CONFIG_NETCONSOLE=m
-CONFIG_NETCONSOLE_DYNAMIC=y
-CONFIG_NETPOLL=y
-CONFIG_NETPOLL_TRAP=y
-CONFIG_NET_POLL_CONTROLLER=y
+# CONFIG_NET_FC is not set
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
# CONFIG_ISDN is not set
# CONFIG_PHONE is not set
@@ -1161,10 +1330,14 @@ CONFIG_MOUSE_PS2_LOGIPS2PP=y
CONFIG_MOUSE_PS2_SYNAPTICS=y
CONFIG_MOUSE_PS2_LIFEBOOK=y
CONFIG_MOUSE_PS2_TRACKPOINT=y
-# CONFIG_MOUSE_PS2_TOUCHKIT is not set
+# CONFIG_MOUSE_PS2_ELANTECH is not set
+CONFIG_MOUSE_PS2_TOUCHKIT=y
CONFIG_MOUSE_SERIAL=m
# CONFIG_MOUSE_APPLETOUCH is not set
# CONFIG_MOUSE_BCM5974 is not set
+# CONFIG_MOUSE_INPORT is not set
+# CONFIG_MOUSE_LOGIBM is not set
+# CONFIG_MOUSE_PC110PAD is not set
CONFIG_MOUSE_VSXXXAA=m
CONFIG_INPUT_JOYSTICK=y
# CONFIG_JOYSTICK_ANALOG is not set
@@ -1193,14 +1366,18 @@ CONFIG_INPUT_TOUCHSCREEN=y
CONFIG_TOUCHSCREEN_FUJITSU=m
CONFIG_TOUCHSCREEN_GUNZE=m
CONFIG_TOUCHSCREEN_ELO=m
+# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set
CONFIG_TOUCHSCREEN_MTOUCH=m
CONFIG_TOUCHSCREEN_INEXIO=m
CONFIG_TOUCHSCREEN_MK712=m
+CONFIG_TOUCHSCREEN_HTCPEN=m
CONFIG_TOUCHSCREEN_PENMOUNT=m
CONFIG_TOUCHSCREEN_TOUCHRIGHT=m
CONFIG_TOUCHSCREEN_TOUCHWIN=m
-CONFIG_TOUCHSCREEN_UCB1400=m
-# CONFIG_TOUCHSCREEN_WM97XX is not set
+CONFIG_TOUCHSCREEN_WM97XX=m
+CONFIG_TOUCHSCREEN_WM9705=y
+CONFIG_TOUCHSCREEN_WM9712=y
+CONFIG_TOUCHSCREEN_WM9713=y
CONFIG_TOUCHSCREEN_USB_COMPOSITE=m
CONFIG_TOUCHSCREEN_USB_EGALAX=y
CONFIG_TOUCHSCREEN_USB_PANJIT=y
@@ -1214,17 +1391,19 @@ CONFIG_TOUCHSCREEN_USB_IDEALTEK=y
CONFIG_TOUCHSCREEN_USB_GENERAL_TOUCH=y
CONFIG_TOUCHSCREEN_USB_GOTOP=y
CONFIG_TOUCHSCREEN_TOUCHIT213=m
+# CONFIG_TOUCHSCREEN_TSC2007 is not set
CONFIG_INPUT_MISC=y
# CONFIG_INPUT_PCSPKR is not set
# CONFIG_INPUT_APANEL is not set
-# CONFIG_INPUT_WISTRON_BTNS is not set
-CONFIG_INPUT_ATLAS_BTNS=m
-CONFIG_INPUT_ATI_REMOTE=m
-CONFIG_INPUT_ATI_REMOTE2=m
+CONFIG_INPUT_WISTRON_BTNS=m
+# CONFIG_INPUT_ATLAS_BTNS is not set
+# CONFIG_INPUT_ATI_REMOTE is not set
+# CONFIG_INPUT_ATI_REMOTE2 is not set
CONFIG_INPUT_KEYSPAN_REMOTE=m
CONFIG_INPUT_POWERMATE=m
CONFIG_INPUT_YEALINK=m
-CONFIG_INPUT_UINPUT=m
+# CONFIG_INPUT_CM109 is not set
+# CONFIG_INPUT_UINPUT is not set
#
# Hardware I/O ports
@@ -1253,18 +1432,31 @@ CONFIG_VT_HW_CONSOLE_BINDING=y
#
# Serial drivers
#
-# CONFIG_SERIAL_8250 is not set
+CONFIG_SERIAL_8250=y
+# CONFIG_SERIAL_8250_CONSOLE is not set
CONFIG_FIX_EARLYCON_MEM=y
+CONFIG_SERIAL_8250_PCI=y
+CONFIG_SERIAL_8250_PNP=y
+CONFIG_SERIAL_8250_NR_UARTS=4
+CONFIG_SERIAL_8250_RUNTIME_UARTS=4
+# CONFIG_SERIAL_8250_EXTENDED is not set
#
# Non-8250 serial port support
#
+CONFIG_SERIAL_CORE=y
# CONFIG_SERIAL_JSM is not set
CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
# CONFIG_LEGACY_PTYS is not set
# CONFIG_IPMI_HANDLER is not set
-# CONFIG_HW_RANDOM is not set
-# CONFIG_NVRAM is not set
+CONFIG_HW_RANDOM=m
+# CONFIG_HW_RANDOM_INTEL is not set
+# CONFIG_HW_RANDOM_AMD is not set
+# CONFIG_HW_RANDOM_GEODE is not set
+# CONFIG_HW_RANDOM_VIA is not set
+CONFIG_NVRAM=m
+# CONFIG_DTLK is not set
# CONFIG_R3964 is not set
# CONFIG_APPLICOM is not set
# CONFIG_SONYPI is not set
@@ -1282,8 +1474,14 @@ CONFIG_DEVPORT=y
CONFIG_I2C=y
CONFIG_I2C_BOARDINFO=y
# CONFIG_I2C_CHARDEV is not set
-CONFIG_I2C_HELPER_AUTO=y
-CONFIG_I2C_ALGOBIT=y
+# CONFIG_I2C_HELPER_AUTO is not set
+
+#
+# I2C Algorithms
+#
+# CONFIG_I2C_ALGOBIT is not set
+# CONFIG_I2C_ALGOPCF is not set
+# CONFIG_I2C_ALGOPCA is not set
#
# I2C Hardware Bus support
@@ -1328,6 +1526,7 @@ CONFIG_I2C_ALGOBIT=y
#
# Other I2C/SMBus bus drivers
#
+# CONFIG_I2C_PCA_ISA is not set
# CONFIG_I2C_PCA_PLATFORM is not set
# CONFIG_I2C_STUB is not set
# CONFIG_SCx200_ACB is not set
@@ -1338,6 +1537,8 @@ CONFIG_I2C_ALGOBIT=y
# CONFIG_DS1682 is not set
# CONFIG_AT24 is not set
# CONFIG_SENSORS_EEPROM is not set
+# CONFIG_EEPROM_AT24 is not set
+# CONFIG_EEPROM_LEGACY is not set
# CONFIG_SENSORS_PCF8574 is not set
# CONFIG_PCF8575 is not set
# CONFIG_SENSORS_PCA9539 is not set
@@ -1356,6 +1557,7 @@ CONFIG_POWER_SUPPLY=y
# CONFIG_POWER_SUPPLY_DEBUG is not set
# CONFIG_PDA_POWER is not set
# CONFIG_BATTERY_DS2760 is not set
+# CONFIG_BATTERY_BQ27x00 is not set
CONFIG_HWMON=y
# CONFIG_HWMON_VID is not set
# CONFIG_SENSORS_ABITUGURU is not set
@@ -1368,6 +1570,7 @@ CONFIG_HWMON=y
# CONFIG_SENSORS_ADM1029 is not set
# CONFIG_SENSORS_ADM1031 is not set
# CONFIG_SENSORS_ADM9240 is not set
+# CONFIG_SENSORS_ADT7462 is not set
# CONFIG_SENSORS_ADT7470 is not set
# CONFIG_SENSORS_ADT7473 is not set
# CONFIG_SENSORS_K8TEMP is not set
@@ -1396,6 +1599,7 @@ CONFIG_HWMON=y
# CONFIG_SENSORS_LM90 is not set
# CONFIG_SENSORS_LM92 is not set
# CONFIG_SENSORS_LM93 is not set
+# CONFIG_SENSORS_LTC4245 is not set
# CONFIG_SENSORS_MAX1619 is not set
# CONFIG_SENSORS_MAX6650 is not set
# CONFIG_SENSORS_PC87360 is not set
@@ -1419,17 +1623,25 @@ CONFIG_HWMON=y
# CONFIG_SENSORS_W83627HF is not set
# CONFIG_SENSORS_W83627EHF is not set
# CONFIG_SENSORS_HDAPS is not set
+# CONFIG_SENSORS_LIS3LV02D is not set
# CONFIG_SENSORS_APPLESMC is not set
# CONFIG_HWMON_DEBUG_CHIP is not set
CONFIG_THERMAL=y
-# CONFIG_THERMAL_HWMON is not set
+CONFIG_THERMAL_HWMON=y
# CONFIG_WATCHDOG is not set
+CONFIG_SSB_POSSIBLE=y
#
# Sonics Silicon Backplane
#
-CONFIG_SSB_POSSIBLE=y
-# CONFIG_SSB is not set
+CONFIG_SSB=m
+CONFIG_SSB_SPROM=y
+CONFIG_SSB_PCIHOST_POSSIBLE=y
+CONFIG_SSB_PCIHOST=y
+CONFIG_SSB_B43_PCI_BRIDGE=y
+# CONFIG_SSB_DEBUG is not set
+CONFIG_SSB_DRIVER_PCICORE_POSSIBLE=y
+CONFIG_SSB_DRIVER_PCICORE=y
#
# Multifunction device drivers
@@ -1437,7 +1649,12 @@ CONFIG_SSB_POSSIBLE=y
# CONFIG_MFD_CORE is not set
# CONFIG_MFD_SM501 is not set
# CONFIG_HTC_PASIC3 is not set
+# CONFIG_TWL4030_CORE is not set
# CONFIG_MFD_TMIO is not set
+# CONFIG_PMIC_DA903X is not set
+# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_REGULATOR is not set
#
# Multimedia devices
@@ -1450,26 +1667,29 @@ CONFIG_VIDEO_DEV=y
CONFIG_VIDEO_V4L2_COMMON=y
# CONFIG_VIDEO_ALLOW_V4L1 is not set
CONFIG_VIDEO_V4L1_COMPAT=y
-CONFIG_DVB_CORE=y
-CONFIG_VIDEO_MEDIA=y
+CONFIG_DVB_CORE=m
+CONFIG_VIDEO_MEDIA=m
#
# Multimedia drivers
#
-# CONFIG_MEDIA_ATTACH is not set
-CONFIG_MEDIA_TUNER=y
+CONFIG_MEDIA_ATTACH=y
+CONFIG_MEDIA_TUNER=m
# CONFIG_MEDIA_TUNER_CUSTOMIZE is not set
-CONFIG_MEDIA_TUNER_SIMPLE=y
-CONFIG_MEDIA_TUNER_TDA8290=y
-CONFIG_MEDIA_TUNER_TDA9887=y
-CONFIG_MEDIA_TUNER_TEA5761=y
-CONFIG_MEDIA_TUNER_TEA5767=y
-CONFIG_MEDIA_TUNER_MT20XX=y
-CONFIG_MEDIA_TUNER_XC2028=y
-CONFIG_MEDIA_TUNER_XC5000=y
+CONFIG_MEDIA_TUNER_SIMPLE=m
+CONFIG_MEDIA_TUNER_TDA8290=m
+CONFIG_MEDIA_TUNER_TDA9887=m
+CONFIG_MEDIA_TUNER_TEA5761=m
+CONFIG_MEDIA_TUNER_TEA5767=m
+CONFIG_MEDIA_TUNER_MT20XX=m
+CONFIG_MEDIA_TUNER_XC2028=m
+CONFIG_MEDIA_TUNER_XC5000=m
CONFIG_VIDEO_V4L2=y
+CONFIG_VIDEOBUF_GEN=m
+CONFIG_VIDEOBUF_VMALLOC=m
CONFIG_VIDEO_CAPTURE_DRIVERS=y
# CONFIG_VIDEO_ADV_DEBUG is not set
+# CONFIG_VIDEO_FIXED_MINOR_RANGES is not set
CONFIG_VIDEO_HELPER_CHIPS_AUTO=y
# CONFIG_VIDEO_VIVI is not set
# CONFIG_VIDEO_BT848 is not set
@@ -1481,24 +1701,49 @@ CONFIG_VIDEO_HELPER_CHIPS_AUTO=y
# CONFIG_VIDEO_CX88 is not set
# CONFIG_VIDEO_CX23885 is not set
# CONFIG_VIDEO_AU0828 is not set
+# CONFIG_VIDEO_IVTV is not set
# CONFIG_VIDEO_CX18 is not set
# CONFIG_VIDEO_CAFE_CCIC is not set
+# CONFIG_SOC_CAMERA is not set
CONFIG_V4L_USB_DRIVERS=y
CONFIG_USB_VIDEO_CLASS=m
CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV=y
-# CONFIG_USB_GSPCA is not set
+CONFIG_USB_GSPCA=m
+# CONFIG_USB_M5602 is not set
+# CONFIG_USB_STV06XX is not set
+# CONFIG_USB_GSPCA_CONEX is not set
+# CONFIG_USB_GSPCA_ETOMS is not set
+# CONFIG_USB_GSPCA_FINEPIX is not set
+# CONFIG_USB_GSPCA_MARS is not set
+# CONFIG_USB_GSPCA_OV519 is not set
+# CONFIG_USB_GSPCA_OV534 is not set
+# CONFIG_USB_GSPCA_PAC207 is not set
+# CONFIG_USB_GSPCA_PAC7311 is not set
+# CONFIG_USB_GSPCA_SONIXB is not set
+# CONFIG_USB_GSPCA_SONIXJ is not set
+# CONFIG_USB_GSPCA_SPCA500 is not set
+# CONFIG_USB_GSPCA_SPCA501 is not set
+# CONFIG_USB_GSPCA_SPCA505 is not set
+# CONFIG_USB_GSPCA_SPCA506 is not set
+# CONFIG_USB_GSPCA_SPCA508 is not set
+# CONFIG_USB_GSPCA_SPCA561 is not set
+# CONFIG_USB_GSPCA_STK014 is not set
+# CONFIG_USB_GSPCA_SUNPLUS is not set
+# CONFIG_USB_GSPCA_T613 is not set
+# CONFIG_USB_GSPCA_TV8532 is not set
+# CONFIG_USB_GSPCA_VC032X is not set
+# CONFIG_USB_GSPCA_ZC3XX is not set
# CONFIG_VIDEO_PVRUSB2 is not set
# CONFIG_VIDEO_EM28XX is not set
# CONFIG_VIDEO_USBVISION is not set
-# CONFIG_USB_ET61X251 is not set
-# CONFIG_USB_SN9C102 is not set
-# CONFIG_USB_ZC0301 is not set
-# CONFIG_USB_ZR364XX is not set
-# CONFIG_USB_STKWEBCAM is not set
-# CONFIG_USB_S2255 is not set
-# CONFIG_SOC_CAMERA is not set
-# CONFIG_VIDEO_SH_MOBILE_CEU is not set
+CONFIG_USB_ET61X251=m
+CONFIG_USB_SN9C102=m
+CONFIG_USB_ZC0301=m
+CONFIG_USB_ZR364XX=m
+CONFIG_USB_STKWEBCAM=m
+CONFIG_USB_S2255=m
# CONFIG_RADIO_ADAPTERS is not set
+# CONFIG_DVB_DYNAMIC_MINORS is not set
# CONFIG_DVB_CAPTURE_DRIVERS is not set
# CONFIG_DAB is not set
@@ -1509,7 +1754,7 @@ CONFIG_AGP=y
# CONFIG_AGP_ALI is not set
# CONFIG_AGP_ATI is not set
# CONFIG_AGP_AMD is not set
-CONFIG_AGP_AMD64=y
+# CONFIG_AGP_AMD64 is not set
CONFIG_AGP_INTEL=y
# CONFIG_AGP_NVIDIA is not set
# CONFIG_AGP_SIS is not set
@@ -1523,33 +1768,43 @@ CONFIG_DRM=y
CONFIG_DRM_I810=y
# CONFIG_DRM_I830 is not set
CONFIG_DRM_I915=y
+# CONFIG_DRM_I915_KMS is not set
# CONFIG_DRM_MGA is not set
# CONFIG_DRM_SIS is not set
# CONFIG_DRM_VIA is not set
# CONFIG_DRM_SAVAGE is not set
# CONFIG_VGASTATE is not set
+CONFIG_DRM_PSB=m
CONFIG_VIDEO_OUTPUT_CONTROL=y
CONFIG_FB=y
-CONFIG_FIRMWARE_EDID=y
-CONFIG_FB_DDC=y
-CONFIG_FB_CFB_FILLRECT=y
-CONFIG_FB_CFB_COPYAREA=y
-CONFIG_FB_CFB_IMAGEBLIT=y
-# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
-# CONFIG_FB_SYS_FILLRECT is not set
-# CONFIG_FB_SYS_COPYAREA is not set
-# CONFIG_FB_SYS_IMAGEBLIT is not set
-# CONFIG_FB_FOREIGN_ENDIAN is not set
-# CONFIG_FB_SYS_FOPS is not set
-# CONFIG_FB_SVGALIB is not set
-# CONFIG_FB_MACMODES is not set
-# CONFIG_FB_BACKLIGHT is not set
-CONFIG_FB_MODE_HELPERS=y
-# CONFIG_FB_TILEBLITTING is not set
+# CONFIG_FIRMWARE_EDID is not set
+# CONFIG_FB_TRIDENT_ACCEL is not set
+# CONFIG_FB_ARK is not set
+# CONFIG_FB_PM3 is not set
+# CONFIG_FB_CARMINE is not set
+# CONFIG_FB_GEODE is not set
+# CONFIG_FB_VIRTUAL is not set
+# CONFIG_FB_METRONOME is not set
+# CONFIG_FB_MB862XX is not set
+
+
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+# CONFIG_LCD_CLASS_DEVICE is not set
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+CONFIG_BACKLIGHT_GENERIC=y
+# CONFIG_BACKLIGHT_CORGI is not set
+# CONFIG_BACKLIGHT_PROGEAR is not set
+# CONFIG_BACKLIGHT_MBP_NVIDIA is not set
+# CONFIG_BACKLIGHT_SAHARA is not set
+
#
# Frame buffer hardware drivers
#
+# CONFIG_FB_TILEBLITTING is not set
+# CONFIG_FB_FOREIGN_ENDIAN is not set
+# CONFIG_FB_PM2_FIFO_DISCONNECT is not set
+# CONFIG_FB_3DFX_ACCEL is not set
# CONFIG_FB_CIRRUS is not set
# CONFIG_FB_PM2 is not set
# CONFIG_FB_CYBER2000 is not set
@@ -1557,7 +1812,6 @@ CONFIG_FB_MODE_HELPERS=y
# CONFIG_FB_ASILIANT is not set
# CONFIG_FB_IMSTT is not set
# CONFIG_FB_VGA16 is not set
-# CONFIG_FB_UVESA is not set
# CONFIG_FB_VESA is not set
# CONFIG_FB_EFI is not set
# CONFIG_FB_N411 is not set
@@ -1565,18 +1819,26 @@ CONFIG_FB_MODE_HELPERS=y
# CONFIG_FB_S1D13XXX is not set
# CONFIG_FB_NVIDIA is not set
# CONFIG_FB_RIVA is not set
-# CONFIG_FB_I810 is not set
+CONFIG_FB_I810=m
+# CONFIG_FB_I810_GTF is not set
# CONFIG_FB_LE80578 is not set
-CONFIG_FB_INTEL=y
-CONFIG_FB_INTEL_DEBUG=y
-CONFIG_FB_INTEL_I2C=y
+# CONFIG_FB_CARILLO_RANCH is not set
+# CONFIG_FB_INTEL is not set
+# CONFIG_FB_INTEL_DEBUG is not set
+# CONFIG_FB_INTEL_I2C is not set
# CONFIG_FB_MATROX is not set
# CONFIG_FB_RADEON is not set
+CONFIG_FB_RADEON_I2C=y
+# CONFIG_FB_RADEON_BACKLIGHT is not set
+# CONFIG_FB_RADEON_DEBUG is not set
# CONFIG_FB_ATY128 is not set
# CONFIG_FB_ATY is not set
# CONFIG_FB_S3 is not set
# CONFIG_FB_SAVAGE is not set
# CONFIG_FB_SIS is not set
+# CONFIG_FB_SIS_300 is not set
+# CONFIG_FB_SIS_315 is not set
+# CONFIG_FB_VIA is not set
# CONFIG_FB_NEOMAGIC is not set
# CONFIG_FB_KYRO is not set
# CONFIG_FB_3DFX is not set
@@ -1589,23 +1851,13 @@ CONFIG_FB_INTEL_I2C=y
# CONFIG_FB_CARMINE is not set
# CONFIG_FB_GEODE is not set
# CONFIG_FB_VIRTUAL is not set
-CONFIG_BACKLIGHT_LCD_SUPPORT=y
-CONFIG_LCD_CLASS_DEVICE=y
-# CONFIG_LCD_ILI9320 is not set
-CONFIG_LCD_PLATFORM=y
-CONFIG_BACKLIGHT_CLASS_DEVICE=y
-# CONFIG_BACKLIGHT_CORGI is not set
-# CONFIG_BACKLIGHT_PROGEAR is not set
-CONFIG_BACKLIGHT_MBP_NVIDIA=y
+# CONFIG_FB_METRONOME is not set
+# CONFIG_FB_MB862XX is not set
#
# Display device support
#
-CONFIG_DISPLAY_SUPPORT=y
-
-#
-# Display hardware drivers
-#
+# CONFIG_DISPLAY_SUPPORT is not set
#
# Console display driver support
@@ -1613,16 +1865,13 @@ CONFIG_DISPLAY_SUPPORT=y
CONFIG_VGA_CONSOLE=y
CONFIG_VGACON_SOFT_SCROLLBACK=y
CONFIG_VGACON_SOFT_SCROLLBACK_SIZE=64
-CONFIG_VIDEO_SELECT=y
+# CONFIG_MDA_CONSOLE is not set
CONFIG_DUMMY_CONSOLE=y
-CONFIG_FRAMEBUFFER_CONSOLE=y
-# CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY is not set
-# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
-# CONFIG_FONTS is not set
-CONFIG_FONT_8x8=y
+# CONFIG_FRAMEBUFFER_CONSOLE is not set
CONFIG_FONT_8x16=y
# CONFIG_LOGO is not set
CONFIG_SOUND=y
+# CONFIG_SOUND_OSS_CORE is not set
CONFIG_SND=y
CONFIG_SND_TIMER=y
CONFIG_SND_PCM=y
@@ -1630,9 +1879,11 @@ CONFIG_SND_HWDEP=y
CONFIG_SND_RAWMIDI=m
CONFIG_SND_SEQUENCER=y
CONFIG_SND_SEQ_DUMMY=y
+# CONFIG_SND_OSSEMUL is not set
# CONFIG_SND_MIXER_OSS is not set
# CONFIG_SND_PCM_OSS is not set
# CONFIG_SND_SEQUENCER_OSS is not set
+# CONFIG_SND_HRTIMER is not set
CONFIG_SND_DYNAMIC_MINORS=y
# CONFIG_SND_SUPPORT_OLD_API is not set
CONFIG_SND_VERBOSE_PROCFS=y
@@ -1643,7 +1894,6 @@ CONFIG_SND_PCM_XRUN_DEBUG=y
CONFIG_SND_VMASTER=y
CONFIG_SND_AC97_CODEC=y
CONFIG_SND_DRIVERS=y
-# CONFIG_SND_PCSP is not set
# CONFIG_SND_DUMMY is not set
# CONFIG_SND_VIRMIDI is not set
# CONFIG_SND_MTPAV is not set
@@ -1651,6 +1901,7 @@ CONFIG_SND_DRIVERS=y
# CONFIG_SND_MPU401 is not set
CONFIG_SND_AC97_POWER_SAVE=y
CONFIG_SND_AC97_POWER_SAVE_DEFAULT=5
+# CONFIG_SND_ISA is not set
CONFIG_SND_PCI=y
# CONFIG_SND_AD1889 is not set
# CONFIG_SND_ALS300 is not set
@@ -1692,17 +1943,21 @@ CONFIG_SND_PCI=y
# CONFIG_SND_FM801 is not set
CONFIG_SND_HDA_INTEL=y
CONFIG_SND_HDA_HWDEP=y
+# CONFIG_SND_HDA_RECONFIG is not set
+# CONFIG_SND_HDA_INPUT_BEEP is not set
CONFIG_SND_HDA_CODEC_REALTEK=y
CONFIG_SND_HDA_CODEC_ANALOG=y
CONFIG_SND_HDA_CODEC_SIGMATEL=y
CONFIG_SND_HDA_CODEC_VIA=y
CONFIG_SND_HDA_CODEC_ATIHDMI=y
+CONFIG_SND_HDA_CODEC_NVHDMI=y
+CONFIG_SND_HDA_CODEC_INTELHDMI=y
CONFIG_SND_HDA_CODEC_CONEXANT=y
CONFIG_SND_HDA_CODEC_CMEDIA=y
CONFIG_SND_HDA_CODEC_SI3054=y
CONFIG_SND_HDA_GENERIC=y
CONFIG_SND_HDA_POWER_SAVE=y
-CONFIG_SND_HDA_POWER_SAVE_DEFAULT=0
+CONFIG_SND_HDA_POWER_SAVE_DEFAULT=5
# CONFIG_SND_HDSP is not set
# CONFIG_SND_HDSPM is not set
# CONFIG_SND_HIFIER is not set
@@ -1732,6 +1987,7 @@ CONFIG_SND_USB_AUDIO=m
CONFIG_SND_USB_USX2Y=m
CONFIG_SND_USB_CAIAQ=m
CONFIG_SND_USB_CAIAQ_INPUT=y
+# CONFIG_SND_USB_US122L is not set
# CONFIG_SND_SOC is not set
# CONFIG_SOUND_PRIME is not set
CONFIG_AC97_BUS=y
@@ -1744,15 +2000,37 @@ CONFIG_HIDRAW=y
# USB Input Devices
#
CONFIG_USB_HID=y
-CONFIG_USB_HIDINPUT_POWERBOOK=y
-CONFIG_HID_FF=y
CONFIG_HID_PID=y
-CONFIG_LOGITECH_FF=y
-# CONFIG_LOGIRUMBLEPAD2_FF is not set
-CONFIG_PANTHERLORD_FF=y
-CONFIG_THRUSTMASTER_FF=y
-CONFIG_ZEROPLUS_FF=y
CONFIG_USB_HIDDEV=y
+
+#
+# Special HID drivers
+#
+CONFIG_HID_COMPAT=y
+CONFIG_HID_A4TECH=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_BELKIN=y
+CONFIG_HID_BRIGHT=y
+CONFIG_HID_CHERRY=y
+CONFIG_HID_CHICONY=y
+CONFIG_HID_CYPRESS=y
+CONFIG_HID_DELL=y
+CONFIG_HID_EZKEY=y
+CONFIG_HID_GYRATION=y
+CONFIG_HID_LOGITECH=y
+# CONFIG_LOGITECH_FF is not set
+# CONFIG_LOGIRUMBLEPAD2_FF is not set
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MONTEREY=y
+CONFIG_HID_PANTHERLORD=y
+# CONFIG_PANTHERLORD_FF is not set
+CONFIG_HID_PETALYNX=y
+CONFIG_HID_SAMSUNG=y
+CONFIG_HID_SONY=y
+CONFIG_HID_SUNPLUS=y
+# CONFIG_GREENASIA_FF is not set
+# CONFIG_THRUSTMASTER_FF is not set
+# CONFIG_ZEROPLUS_FF is not set
CONFIG_USB_SUPPORT=y
CONFIG_USB_ARCH_HAS_HCD=y
CONFIG_USB_ARCH_HAS_OHCI=y
@@ -1770,6 +2048,8 @@ CONFIG_USB_DEVICEFS=y
CONFIG_USB_SUSPEND=y
# CONFIG_USB_OTG is not set
CONFIG_USB_MON=y
+CONFIG_USB_WUSB=m
+# CONFIG_USB_WUSB_CBAF is not set
#
# USB Host Controller Drivers
@@ -1778,30 +2058,34 @@ CONFIG_USB_MON=y
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_EHCI_ROOT_HUB_TT=y
CONFIG_USB_EHCI_TT_NEWSCHED=y
-# CONFIG_USB_ISP116X_HCD is not set
+# CONFIG_USB_OXU210HP_HCD is not set
+CONFIG_USB_ISP116X_HCD=m
# CONFIG_USB_ISP1760_HCD is not set
-CONFIG_USB_OHCI_HCD=m
+CONFIG_USB_OHCI_HCD=y
# CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set
# CONFIG_USB_OHCI_BIG_ENDIAN_MMIO is not set
CONFIG_USB_OHCI_LITTLE_ENDIAN=y
-CONFIG_USB_UHCI_HCD=m
+CONFIG_USB_UHCI_HCD=y
CONFIG_USB_U132_HCD=m
CONFIG_USB_SL811_HCD=m
# CONFIG_USB_R8A66597_HCD is not set
+CONFIG_USB_WHCI_HCD=m
+CONFIG_USB_HWA_HCD=m
#
# USB Device Class drivers
#
CONFIG_USB_ACM=m
CONFIG_USB_PRINTER=m
-# CONFIG_USB_WDM is not set
+CONFIG_USB_WDM=m
+# CONFIG_USB_TMC is not set
#
-# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may also be needed;
#
#
-# may also be needed; see USB_STORAGE Help for more information
+# see USB_STORAGE Help for more information
#
CONFIG_USB_STORAGE=y
# CONFIG_USB_STORAGE_DEBUG is not set
@@ -1817,7 +2101,7 @@ CONFIG_USB_STORAGE_ALAUDA=y
# CONFIG_USB_STORAGE_ONETOUCH is not set
CONFIG_USB_STORAGE_KARMA=y
# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set
-# CONFIG_USB_LIBUSUAL is not set
+CONFIG_USB_LIBUSUAL=y
#
# USB Imaging devices
@@ -1877,12 +2161,14 @@ CONFIG_USB_SERIAL_OTI6858=m
CONFIG_USB_SERIAL_HP4X=m
CONFIG_USB_SERIAL_SAFE=m
CONFIG_USB_SERIAL_SAFE_PADDED=y
+# CONFIG_USB_SERIAL_SIEMENS_MPI is not set
CONFIG_USB_SERIAL_SIERRAWIRELESS=m
CONFIG_USB_SERIAL_TI=m
CONFIG_USB_SERIAL_CYBERJACK=m
CONFIG_USB_SERIAL_XIRCOM=m
CONFIG_USB_SERIAL_OPTION=m
CONFIG_USB_SERIAL_OMNINET=m
+# CONFIG_USB_SERIAL_OPTICON is not set
CONFIG_USB_SERIAL_DEBUG=m
#
@@ -1891,6 +2177,7 @@ CONFIG_USB_SERIAL_DEBUG=m
CONFIG_USB_EMI62=m
CONFIG_USB_EMI26=m
CONFIG_USB_ADUTUX=m
+# CONFIG_USB_SEVSEG is not set
# CONFIG_USB_RIO500 is not set
CONFIG_USB_LEGOTOWER=m
CONFIG_USB_LCD=m
@@ -1912,47 +2199,45 @@ CONFIG_USB_TRANCEVIBRATOR=m
CONFIG_USB_IOWARRIOR=m
# CONFIG_USB_TEST is not set
# CONFIG_USB_ISIGHTFW is not set
+# CONFIG_USB_VST is not set
# CONFIG_USB_GADGET is not set
-CONFIG_MMC=m
+CONFIG_UWB=m
+CONFIG_UWB_HWA=m
+CONFIG_UWB_WHCI=m
+# CONFIG_UWB_WLP is not set
+# CONFIG_UWB_I1480U is not set
+CONFIG_MMC=y
# CONFIG_MMC_DEBUG is not set
# CONFIG_MMC_UNSAFE_RESUME is not set
#
-# MMC/SD Card Drivers
+# MMC/SD/SDIO Card Drivers
#
-CONFIG_MMC_BLOCK=m
+CONFIG_MMC_BLOCK=y
CONFIG_MMC_BLOCK_BOUNCE=y
CONFIG_SDIO_UART=m
# CONFIG_MMC_TEST is not set
#
-# MMC/SD Host Controller Drivers
+# MMC/SD/SDIO Host Controller Drivers
#
-CONFIG_MMC_SDHCI=m
-# CONFIG_MMC_SDHCI_PCI is not set
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PCI=y
+# CONFIG_MMC_RICOH_MMC is not set
CONFIG_MMC_WBSD=m
CONFIG_MMC_TIFM_SD=m
-CONFIG_MEMSTICK=m
-CONFIG_MEMSTICK_DEBUG=y
-
-#
-# MemoryStick drivers
-#
-# CONFIG_MEMSTICK_UNSAFE_RESUME is not set
-CONFIG_MSPRO_BLOCK=m
-
-#
-# MemoryStick Host Controller Drivers
-#
-# CONFIG_MEMSTICK_TIFM_MS is not set
-# CONFIG_MEMSTICK_JMICRON_38X is not set
+# CONFIG_MEMSTICK is not set
CONFIG_NEW_LEDS=y
-CONFIG_LEDS_CLASS=m
+CONFIG_LEDS_CLASS=y
+# CONFIG_MMC_CEATA_WR is not set
+# CONFIG_MMC_SPI is not set
#
# LED drivers
#
+# CONFIG_LEDS_ALIX2 is not set
# CONFIG_LEDS_PCA9532 is not set
+# CONFIG_LEDS_HP_DISK is not set
# CONFIG_LEDS_CLEVO_MAIL is not set
# CONFIG_LEDS_PCA955X is not set
@@ -1962,6 +2247,7 @@ CONFIG_LEDS_CLASS=m
CONFIG_LEDS_TRIGGERS=y
# CONFIG_LEDS_TRIGGER_TIMER is not set
# CONFIG_LEDS_TRIGGER_HEARTBEAT is not set
+# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set
# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set
# CONFIG_ACCESSIBILITY is not set
# CONFIG_INFINIBAND is not set
@@ -1995,6 +2281,7 @@ CONFIG_RTC_INTF_DEV=y
# CONFIG_RTC_DRV_M41T80 is not set
# CONFIG_RTC_DRV_S35390A is not set
# CONFIG_RTC_DRV_FM3130 is not set
+# CONFIG_RTC_DRV_RX8581 is not set
#
# SPI RTC drivers
@@ -2004,56 +2291,96 @@ CONFIG_RTC_INTF_DEV=y
# Platform RTC drivers
#
CONFIG_RTC_DRV_CMOS=y
+# CONFIG_RTC_DRV_DS1286 is not set
# CONFIG_RTC_DRV_DS1511 is not set
# CONFIG_RTC_DRV_DS1553 is not set
# CONFIG_RTC_DRV_DS1742 is not set
# CONFIG_RTC_DRV_STK17TA8 is not set
# CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T35 is not set
# CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_BQ4802 is not set
# CONFIG_RTC_DRV_V3020 is not set
#
# on-CPU RTC drivers
#
-# CONFIG_DMADEVICES is not set
# CONFIG_UIO is not set
+CONFIG_STAGING=y
+# CONFIG_STAGING_EXCLUDE_BUILD is not set
+# CONFIG_ET131X is not set
+# CONFIG_SLICOSS is not set
+# CONFIG_SXG is not set
+# CONFIG_ME4000 is not set
+# CONFIG_MEILHAUS is not set
+# CONFIG_VIDEO_GO7007 is not set
+CONFIG_USB_IP_COMMON=m
+CONFIG_USB_IP_VHCI_HCD=m
+CONFIG_USB_IP_HOST=m
+# CONFIG_W35UND is not set
+CONFIG_PRISM2_USB=m
+# CONFIG_ECHO is not set
+CONFIG_RT2860=m
+CONFIG_RT2870=m
+# CONFIG_BENET is not set
+# CONFIG_COMEDI is not set
+# CONFIG_ASUS_OLED is not set
+# CONFIG_USB_ATMEL is not set
+# CONFIG_AGNX is not set
+# CONFIG_OTUS is not set
+# CONFIG_ALTERA_PCIE_CHDMA is not set
+# CONFIG_RTL8187SE is not set
+# CONFIG_INPUT_MIMIO is not set
+# CONFIG_TRANZPORT is not set
+# CONFIG_EPL is not set
+
+#
+# Android
+#
+# CONFIG_ANDROID is not set
+# CONFIG_ANDROID_BINDER_IPC is not set
+# CONFIG_ANDROID_LOGGER is not set
+# CONFIG_ANDROID_RAM_CONSOLE is not set
+# CONFIG_ANDROID_LOW_MEMORY_KILLER is not set
+CONFIG_X86_PLATFORM_DEVICES=y
#
# Firmware Drivers
#
# CONFIG_EDD is not set
CONFIG_FIRMWARE_MEMMAP=y
+CONFIG_EFI_VARS=m
# CONFIG_DELL_RBU is not set
# CONFIG_DCDBAS is not set
-# CONFIG_DMIID is not set
+CONFIG_DMIID=y
# CONFIG_ISCSI_IBFT_FIND is not set
#
# File systems
#
-# CONFIG_EXT2_FS is not set
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT2_FS_XIP is not set
CONFIG_EXT3_FS=y
CONFIG_EXT3_FS_XATTR=y
CONFIG_EXT3_FS_POSIX_ACL=y
CONFIG_EXT3_FS_SECURITY=y
-# CONFIG_EXT4DEV_FS is not set
+# CONFIG_EXT4_FS is not set
CONFIG_JBD=y
# CONFIG_JBD_DEBUG is not set
CONFIG_FS_MBCACHE=y
# CONFIG_REISERFS_FS is not set
# CONFIG_JFS_FS is not set
CONFIG_FS_POSIX_ACL=y
+CONFIG_FILE_LOCKING=y
# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
# CONFIG_OCFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
CONFIG_DNOTIFY=y
CONFIG_INOTIFY=y
CONFIG_INOTIFY_USER=y
-CONFIG_QUOTA=y
-CONFIG_QUOTA_NETLINK_INTERFACE=y
-# CONFIG_PRINT_QUOTA_WARNING is not set
-# CONFIG_QFMT_V1 is not set
-CONFIG_QFMT_V2=y
-CONFIG_QUOTACTL=y
+# CONFIG_QUOTA is not set
# CONFIG_AUTOFS_FS is not set
# CONFIG_AUTOFS4_FS is not set
CONFIG_FUSE_FS=m
@@ -2082,28 +2409,31 @@ CONFIG_FAT_DEFAULT_IOCHARSET="ascii"
# Pseudo filesystems
#
CONFIG_PROC_FS=y
-CONFIG_PROC_KCORE=y
-CONFIG_PROC_VMCORE=y
+# CONFIG_PROC_KCORE is not set
CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
CONFIG_SYSFS=y
CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
-CONFIG_HUGETLBFS=y
-CONFIG_HUGETLB_PAGE=y
+# CONFIG_HUGETLBFS is not set
+# CONFIG_HUGETLB_PAGE is not set
CONFIG_CONFIGFS_FS=m
#
# Miscellaneous filesystems
#
+CONFIG_MISC_FILESYSTEMS=y
# CONFIG_ADFS_FS is not set
# CONFIG_AFFS_FS is not set
-# CONFIG_ECRYPT_FS is not set
# CONFIG_HFS_FS is not set
# CONFIG_HFSPLUS_FS is not set
# CONFIG_BEFS_FS is not set
# CONFIG_BFS_FS is not set
# CONFIG_EFS_FS is not set
# CONFIG_CRAMFS is not set
+CONFIG_SQUASHFS=y
+# CONFIG_SQUASHFS_EMBEDDED is not set
+CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3
# CONFIG_VXFS_FS is not set
# CONFIG_MINIX_FS is not set
# CONFIG_OMFS_FS is not set
@@ -2116,7 +2446,12 @@ CONFIG_NETWORK_FILESYSTEMS=y
# CONFIG_NFS_FS is not set
# CONFIG_NFSD is not set
# CONFIG_SMB_FS is not set
-# CONFIG_CIFS is not set
+CONFIG_CIFS=m
+# CONFIG_CIFS_STATS is not set
+CONFIG_CIFS_WEAK_PW_HASH=y
+# CONFIG_CIFS_XATTR is not set
+# CONFIG_CIFS_DEBUG2 is not set
+# CONFIG_CIFS_EXPERIMENTAL is not set
# CONFIG_NCP_FS is not set
# CONFIG_CODA_FS is not set
# CONFIG_AFS_FS is not set
@@ -2126,20 +2461,21 @@ CONFIG_NETWORK_FILESYSTEMS=y
#
CONFIG_PARTITION_ADVANCED=y
# CONFIG_ACORN_PARTITION is not set
-CONFIG_OSF_PARTITION=y
-CONFIG_AMIGA_PARTITION=y
+# CONFIG_OSF_PARTITION is not set
+# CONFIG_AMIGA_PARTITION is not set
# CONFIG_ATARI_PARTITION is not set
-CONFIG_MAC_PARTITION=y
+# CONFIG_MAC_PARTITION is not set
CONFIG_MSDOS_PARTITION=y
CONFIG_BSD_DISKLABEL=y
-CONFIG_MINIX_SUBPARTITION=y
-CONFIG_SOLARIS_X86_PARTITION=y
-CONFIG_UNIXWARE_DISKLABEL=y
-# CONFIG_LDM_PARTITION is not set
-CONFIG_SGI_PARTITION=y
+# CONFIG_MINIX_SUBPARTITION is not set
+# CONFIG_SOLARIS_X86_PARTITION is not set
+# CONFIG_UNIXWARE_DISKLABEL is not set
+CONFIG_LDM_PARTITION=y
+# CONFIG_LDM_DEBUG is not set
+# CONFIG_SGI_PARTITION is not set
# CONFIG_ULTRIX_PARTITION is not set
-CONFIG_SUN_PARTITION=y
-CONFIG_KARMA_PARTITION=y
+# CONFIG_SUN_PARTITION is not set
+# CONFIG_KARMA_PARTITION is not set
CONFIG_EFI_PARTITION=y
# CONFIG_SYSV68_PARTITION is not set
CONFIG_NLS=y
@@ -2193,7 +2529,7 @@ CONFIG_PRINTK_TIME=y
CONFIG_ENABLE_MUST_CHECK=y
CONFIG_FRAME_WARN=1024
CONFIG_MAGIC_SYSRQ=y
-CONFIG_UNUSED_SYMBOLS=y
+# CONFIG_UNUSED_SYMBOLS is not set
CONFIG_DEBUG_FS=y
# CONFIG_HEADERS_CHECK is not set
CONFIG_DEBUG_KERNEL=y
@@ -2201,58 +2537,75 @@ CONFIG_DEBUG_SHIRQ=y
CONFIG_DETECT_SOFTLOCKUP=y
# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
-CONFIG_SCHED_DEBUG=y
+# CONFIG_SCHED_DEBUG is not set
CONFIG_SCHEDSTATS=y
CONFIG_TIMER_STATS=y
# CONFIG_DEBUG_OBJECTS is not set
# CONFIG_DEBUG_SLAB is not set
# CONFIG_DEBUG_RT_MUTEXES is not set
# CONFIG_RT_MUTEX_TESTER is not set
-# CONFIG_DEBUG_SPINLOCK is not set
+CONFIG_DEBUG_SPINLOCK=y
# CONFIG_DEBUG_MUTEXES is not set
# CONFIG_DEBUG_LOCK_ALLOC is not set
# CONFIG_PROVE_LOCKING is not set
+# CONFIG_DEBUG_LOCKDEP is not set
# CONFIG_LOCK_STAT is not set
CONFIG_DEBUG_SPINLOCK_SLEEP=y
# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
CONFIG_STACKTRACE=y
# CONFIG_DEBUG_KOBJECT is not set
-# CONFIG_DEBUG_HIGHMEM is not set
+CONFIG_DEBUG_HIGHMEM=y
CONFIG_DEBUG_BUGVERBOSE=y
-# CONFIG_DEBUG_INFO is not set
+CONFIG_DEBUG_INFO=y
# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_VIRTUAL is not set
# CONFIG_DEBUG_WRITECOUNT is not set
-CONFIG_DEBUG_MEMORY_INIT=y
+# CONFIG_DEBUG_MEMORY_INIT it not set
CONFIG_DEBUG_LIST=y
# CONFIG_DEBUG_SG is not set
+CONFIG_DEBUG_NOTIFIERS=y
CONFIG_FRAME_POINTER=y
CONFIG_BOOT_PRINTK_DELAY=y
# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
# CONFIG_FAULT_INJECTION is not set
CONFIG_LATENCYTOP=y
CONFIG_SYSCTL_SYSCALL_CHECK=y
-CONFIG_HAVE_FTRACE=y
+CONFIG_HAVE_FUNCTION_TRACER=y
CONFIG_HAVE_DYNAMIC_FTRACE=y
-CONFIG_TRACING=y
-# CONFIG_FTRACE is not set
+CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+
+# CONFIG_X86_VISWS is not set
+# CONFIG_FTRACE_STARTUP_TEST is not set
+#
+# Tracers
+#
+# CONFIG_FUNCTION_TRACER is not set
# CONFIG_IRQSOFF_TRACER is not set
CONFIG_SYSPROF_TRACER=y
# CONFIG_SCHED_TRACER is not set
# CONFIG_CONTEXT_SWITCH_TRACER is not set
-# CONFIG_FTRACE_STARTUP_TEST is not set
+CONFIG_OPEN_CLOSE_TRACER=y
+# CONFIG_BOOT_TRACER is not set
+CONFIG_POWER_TRACER=y
+# CONFIG_TRACE_BRANCH_PROFILING is not set
+# CONFIG_STACK_TRACER is not set
# CONFIG_PROVIDE_OHCI1394_DMA_INIT is not set
+# CONFIG_DYNAMIC_PRINTK_DEBUG is not set
# CONFIG_SAMPLES is not set
CONFIG_HAVE_ARCH_KGDB=y
# CONFIG_KGDB is not set
-# CONFIG_STRICT_DEVMEM is not set
+CONFIG_STRICT_DEVMEM=y
CONFIG_X86_VERBOSE_BOOTUP=y
CONFIG_EARLY_PRINTK=y
+# CONFIG_EARLY_PRINTK_DBGP is not set
# CONFIG_DEBUG_STACKOVERFLOW is not set
# CONFIG_DEBUG_STACK_USAGE is not set
# CONFIG_DEBUG_PAGEALLOC is not set
# CONFIG_DEBUG_PER_CPU_MAPS is not set
-CONFIG_X86_PTDUMP=y
+# CONFIG_X86_PTDUMP is not set
CONFIG_DEBUG_RODATA=y
# CONFIG_DEBUG_RODATA_TEST is not set
# CONFIG_DEBUG_NX_TEST is not set
@@ -2275,25 +2628,21 @@ CONFIG_DEBUG_BOOT_PARAMS=y
#
# Security options
#
-CONFIG_KEYS=y
-CONFIG_KEYS_DEBUG_PROC_KEYS=y
-CONFIG_SECURITY=y
-CONFIG_SECURITY_NETWORK=y
-CONFIG_SECURITY_NETWORK_XFRM=y
-CONFIG_SECURITY_FILE_CAPABILITIES=y
-# CONFIG_SECURITY_ROOTPLUG is not set
-CONFIG_SECURITY_DEFAULT_MMAP_MIN_ADDR=65536
-# CONFIG_SECURITY_SELINUX is not set
-# CONFIG_SECURITY_SMACK is not set
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+# CONFIG_SECURITYFS is not set
+# CONFIG_SECURITY_FILE_CAPABILITIES is not set
CONFIG_CRYPTO=y
#
# Crypto core or helper
#
+# CONFIG_CRYPTO_FIPS is not set
CONFIG_CRYPTO_ALGAPI=y
-CONFIG_CRYPTO_AEAD=m
-CONFIG_CRYPTO_BLKCIPHER=m
+CONFIG_CRYPTO_AEAD=y
+CONFIG_CRYPTO_BLKCIPHER=y
CONFIG_CRYPTO_HASH=y
+CONFIG_CRYPTO_RNG=y
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_GF128MUL=m
CONFIG_CRYPTO_NULL=m
@@ -2314,7 +2663,7 @@ CONFIG_CRYPTO_SEQIV=m
CONFIG_CRYPTO_CBC=m
CONFIG_CRYPTO_CTR=m
# CONFIG_CRYPTO_CTS is not set
-CONFIG_CRYPTO_ECB=m
+CONFIG_CRYPTO_ECB=y
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
@@ -2329,6 +2678,7 @@ CONFIG_CRYPTO_XCBC=m
# Digest
#
CONFIG_CRYPTO_CRC32C=m
+# CONFIG_CRYPTO_CRC32C_INTEL is not set
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -2345,10 +2695,10 @@ CONFIG_CRYPTO_WP512=m
#
# Ciphers
#
-CONFIG_CRYPTO_AES=m
-# CONFIG_CRYPTO_AES_586 is not set
+CONFIG_CRYPTO_AES=y
+CONFIG_CRYPTO_AES_586=m
CONFIG_CRYPTO_ANUBIS=m
-CONFIG_CRYPTO_ARC4=m
+CONFIG_CRYPTO_ARC4=y
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAMELLIA=m
CONFIG_CRYPTO_CAST5=m
@@ -2357,19 +2707,24 @@ CONFIG_CRYPTO_DES=m
CONFIG_CRYPTO_FCRYPT=m
CONFIG_CRYPTO_KHAZAD=m
CONFIG_CRYPTO_SALSA20=m
-# CONFIG_CRYPTO_SALSA20_586 is not set
+CONFIG_CRYPTO_SALSA20_586=m
CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_TWOFISH_COMMON=m
-# CONFIG_CRYPTO_TWOFISH_586 is not set
+CONFIG_CRYPTO_TWOFISH_586=m
#
# Compression
#
CONFIG_CRYPTO_DEFLATE=m
# CONFIG_CRYPTO_LZO is not set
+
+#
+# Random Number Generation
+#
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRYPTO_HW=y
# CONFIG_CRYPTO_DEV_PADLOCK is not set
# CONFIG_CRYPTO_DEV_GEODE is not set
@@ -2385,12 +2740,11 @@ CONFIG_GENERIC_FIND_FIRST_BIT=y
CONFIG_GENERIC_FIND_NEXT_BIT=y
CONFIG_CRC_CCITT=m
CONFIG_CRC16=m
-CONFIG_CRC_T10DIF=y
+# CONFIG_CRC_T10DIF is not set
CONFIG_CRC_ITU_T=m
CONFIG_CRC32=y
# CONFIG_CRC7 is not set
CONFIG_LIBCRC32C=m
-CONFIG_AUDIT_GENERIC=y
CONFIG_ZLIB_INFLATE=y
CONFIG_ZLIB_DEFLATE=m
CONFIG_TEXTSEARCH=y
@@ -2401,3 +2755,90 @@ CONFIG_PLIST=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
+CONFIG_CHECK_SIGNATURE=y
+
+
+# CONFIG_THINKPAD_ACPI_DEBUGFACILITIES is not set
+# CONFIG_MFD_PCF50633 is not set
+# CONFIG_SENSORS_ADT7475 is not set
+# CONFIG_LIB80211_DEBUG is not set
+# CONFIG_DNET is not set
+# CONFIG_BE2NET is not set
+
+
+
+# CONFIG_LNW_IPC is not set
+# CONFIG_MRST is not set
+# CONFIG_SFI is not set
+# CONFIG_MDIO_GPIO is not set
+# CONFIG_KEYBOARD_GPIO is not set
+# CONFIG_MOUSE_GPIO is not set
+# CONFIG_I2C_GPIO is not set
+# CONFIG_DEBUG_GPIO is not set
+# CONFIG_GPIO_SYSFS is not set
+# CONFIG_GPIO_LANGWELL is not set
+# CONFIG_GPIO_MAX732X is not set
+# CONFIG_GPIO_PCA953X is not set
+# CONFIG_GPIO_PCF857X is not set
+# CONFIG_GPIO_BT8XX is not set
+# CONFIG_UCB1400_CORE is not set
+# CONFIG_TPS65010 is not set
+# CONFIG_USB_GPIO_VBUS is not set
+# CONFIG_LEDS_GPIO is not set
+# CONFIG_ANDROID_TIMED_GPIO is not set
+# CONFIG_X86_MRST_EARLY_PRINTK is not set
+
+# CONFIG_APB_TIMER is not set
+# CONFIG_MRST_SPI_UART_BOOT_MSG is not set
+# CONFIG_SFI_DEBUG is not set
+# CONFIG_SFI_PROCFS is not set
+# CONFIG_TOUCHSCREEN_UCB1400 is not set
+# CONFIG_GPIO_LNWPMIC is not set
+# CONFIG_RTC_DRV_VRTC is not set
+# CONFIG_MRST_NAND is not set
+# CONFIG_USB_LANGWELL_OTG is not set
+# CONFIG_KEYBOARD_MRST is not set
+# CONFIG_I2C_MRST is not set
+# CONFIG_USB_OTG_WHITELIST is not set
+# CONFIG_USB_OTG_BLACKLIST_HUB is not set
+# CONFIG_SND_PCM_OSS_PLUGINS is not set
+# CONFIG_SND_INTEL_LPE is not set
+# CONFIG_LPE_IPC_NOT_INCLUDED is not set
+# CONFIG_SND_INTELMID is not set
+# CONFIG_TOUCHSCREEN_INTEL_MRST is not set
+# CONFIG_ATL1C is not set
+# CONFIG_MRST_MMC_WR is not set
+
+
+# CONFIG_VIDEO_MRSTCI is not set
+# CONFIG_VIDEO_MRST_ISP is not set
+# CONFIG_VIDEO_MRST_SENSOR is not set
+# CONFIG_VIDEO_MRST_OV2650 is not set
+# CONFIG_VIDEO_MRST_OV5630 is not set
+# CONFIG_SENSORS_MRST_THERMAL is not set
+# CONFIG_SPI2_MRST is not set
+
+# CONFIG_SFI_PM is not set
+# CONFIG_SFI_CPUIDLE is not set
+# CONFIG_SFI_PROCESSOR_PM is not set
+# CONFIG_X86_SFI_CPUFREQ is not set
+# CONFIG_MSTWN_POWER_MGMT is not set
+# CONFIG_USB_NET_MBM is not set
+
+# CONFIG_USB_GADGET_LANGWELL is not set
+# CONFIG_USB_LANGWELL is not set
+
+# CONFIG_INTEL_LNW_DMAC1 is not set
+# CONFIG_INTEL_LNW_DMAC2 is not set
+# CONFIG_LNW_DMA_DEBUG is not set
+# CONFIG_NET_DMA is not set
+# CONFIG_DMATEST is not set
+# CONFIG_8688_RC is not set
+# CONFIG_SSB_SILENT is not set
+
+# CONFIG_TOUCHSCREEN_TSC2003 is not set
+# CONFIG_MFD_TIMBERDALE is not set
+# CONFIG_MMC_SDHCI_PLTFM is not set
+# CONFIG_SPI_XILINX is not set
+# CONFIG_SPI_MRST is not set
+# CONFIG_GPE is not set
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/config-ivi b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/config-ivi
new file mode 100644
index 000000000..0f61bd77e
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/config-ivi
@@ -0,0 +1,127 @@
+CONFIG_LOCALVERSION="-ivi"
+CONFIG_INTEL_MENLOW=y
+CONFIG_DRM_PSB=y
+
+#
+# Cgroups
+#
+CONFIG_CGROUPS=y
+# CONFIG_CGROUP_DEBUG is not set
+CONFIG_CGROUP_NS=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_DEVICE=y
+# CONFIG_CPUSETS is not set
+# CONFIG_CGROUP_CPUACCT is not set
+# CONFIG_RESOURCE_COUNTERS is not set
+
+CONFIG_4KSTACKS=y
+CONFIG_ACER_WMI=y
+CONFIG_ARCH_WANT_FRAME_POINTERS=y
+# CONFIG_ATH5K_DEBUG is not set
+CONFIG_ATH5K=y
+CONFIG_ATL1E=y
+# CONFIG_BNX2X is not set
+CONFIG_CHELSIO_T3_DEPENDS=y
+CONFIG_COMPAT_NET_DEV_OPS=y
+CONFIG_CRYPTO_AEAD2=y
+CONFIG_CRYPTO_AEAD=m
+CONFIG_CRYPTO_ALGAPI2=y
+CONFIG_CRYPTO_BLKCIPHER2=y
+CONFIG_CRYPTO_HASH2=y
+CONFIG_CRYPTO_MANAGER2=y
+CONFIG_CRYPTO_RNG2=y
+CONFIG_CRYPTO_RNG=m
+# CONFIG_DEBUG_NOTIFIERS is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+CONFIG_EEEPC_LAPTOP=y
+# CONFIG_EEPROM_AT25 is not set
+# CONFIG_ENC28J60 is not set
+# CONFIG_FB_BACKLIGHT is not set
+# CONFIG_FB_BOOT_VESA_SUPPORT is not set
+CONFIG_FB_CFB_COPYAREA=y
+CONFIG_FB_CFB_FILLRECT=y
+CONFIG_FB_CFB_IMAGEBLIT=y
+# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
+# CONFIG_FB_DDC is not set
+# CONFIG_FB_MACMODES is not set
+CONFIG_FB_MODE_HELPERS=y
+# CONFIG_FB_SVGALIB is not set
+# CONFIG_FB_SYS_COPYAREA is not set
+# CONFIG_FB_SYS_FILLRECT is not set
+# CONFIG_FB_SYS_FOPS is not set
+# CONFIG_FB_SYS_IMAGEBLIT is not set
+# CONFIG_FB_TMIO is not set
+CONFIG_GENERIC_FIND_LAST_BIT=y
+CONFIG_GENERIC_GPIO=y
+CONFIG_GPIOLIB=y
+# CONFIG_GPIO_MAX7301 is not set
+# CONFIG_GPIO_MCP23S08 is not set
+CONFIG_GPIO_SYSFS=y
+CONFIG_GPIO_TIMBERDALE=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
+CONFIG_HAVE_MMIOTRACE_SUPPORT=y
+CONFIG_HID_NTRIG=y
+CONFIG_HID_TOPSEED=y
+CONFIG_I2C_ALGOBIT=y
+CONFIG_I2C_CHARDEV=m
+CONFIG_I2C_OCORES=m
+# CONFIG_IOMMU_API is not set
+# CONFIG_KS8842 is not set
+CONFIG_LIBIPW=m
+CONFIG_MAC80211_RC_DEFAULT="minstrel"
+CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y
+# CONFIG_MAC80211_RC_DEFAULT_PID is not set
+CONFIG_MAC80211_RC_MINSTREL=y
+CONFIG_MFD_CORE=y
+CONFIG_MFD_TIMBERDALE_DMA=m
+CONFIG_MFD_TIMBERDALE_I2S=m
+CONFIG_MFD_TIMBERDALE=y
+CONFIG_MMC_SDHCI_PLTFM=m
+# CONFIG_MOUSE_PS2_TOUCHKIT is not set
+# CONFIG_PREEMPT is not set
+# CONFIG_PREEMPT_RCU is not set
+# CONFIG_PREEMPT_RCU_TRACE is not set
+CONFIG_PREEMPT_VOLUNTARY=y
+CONFIG_R8169=y
+# CONFIG_RT2860 is not set
+# CONFIG_RT2870 is not set
+# CONFIG_RTC_DRV_DS1305 is not set
+# CONFIG_RTC_DRV_DS1390 is not set
+# CONFIG_RTC_DRV_DS3234 is not set
+# CONFIG_RTC_DRV_M41T94 is not set
+# CONFIG_RTC_DRV_MAX6902 is not set
+# CONFIG_RTC_DRV_R9701 is not set
+# CONFIG_RTC_DRV_RS5C348 is not set
+CONFIG_SCSI_FC_ATTRS=m
+CONFIG_SCSI_NETLINK=y
+CONFIG_SCSI_SAS_ATTRS=m
+CONFIG_SCSI_SPI_ATTRS=m
+# CONFIG_SENSORS_ADCXX is not set
+# CONFIG_SENSORS_LM70 is not set
+# CONFIG_SENSORS_MAX1111 is not set
+CONFIG_SERIAL_TIMBERDALE=m
+CONFIG_SND_HDA_ELD=y
+CONFIG_SND_HDA_POWER_SAVE_DEFAULT=0
+CONFIG_SND_JACK=y
+CONFIG_SND_SPI=y
+CONFIG_SPI_BITBANG=m
+# CONFIG_SPI_DEBUG is not set
+# CONFIG_SPI_GPIO is not set
+CONFIG_SPI_MASTER=y
+# CONFIG_SPI_SPIDEV is not set
+# CONFIG_SPI_TLE62X0 is not set
+CONFIG_SPI_XILINX=m
+CONFIG_SPI_XILINX_PLTFM=m
+CONFIG_SPI=y
+# CONFIG_TOUCHSCREEN_ADS7846 is not set
+CONFIG_TOUCHSCREEN_TSC2003=m
+CONFIG_TOUCHSCREEN_TSC2007=m
+CONFIG_TRACEPOINTS=y
+# CONFIG_TREE_RCU is not set
+# CONFIG_TREE_RCU_TRACE is not set
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_USER_STACKTRACE_SUPPORT=y
+CONFIG_VGASTATE=m
+CONFIG_VIDEO_TIMBERDALE=m
+CONFIG_WIMAX_I2400M=m
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/config-menlow b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/config-menlow
new file mode 100644
index 000000000..3f66175e1
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/config-menlow
@@ -0,0 +1,8 @@
+CONFIG_LOCALVERSION="-menlow"
+
+CONFIG_INTEL_MENLOW=y
+CONFIG_DRM_PSB=y
+
+# LIBERTAS works with Menlow sd8686
+CONFIG_LIBERTAS=m
+CONFIG_LIBERTAS_SDIO=m
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/config-mrst b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/config-mrst
new file mode 100644
index 000000000..8b067c47c
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/config-mrst
@@ -0,0 +1,2316 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.29-rc8
+# Wed Mar 25 08:57:27 2009
+#
+
+#
+#MRST DRIVERS
+#
+
+# Option GTM501L SPI 3g driver
+CONFIG_SPI_MRST_GTM501=y
+
+# Marvell 8688 WiFi and BT
+CONFIG_8688_RC=y
+
+# Ericsson MBM 3G Driver
+CONFIG_USB_NET_MBM=y
+
+# MRST Poulsbo gfx driver
+CONFIG_DRM_PSB=y
+
+# MRST NAND DRIVER
+CONFIG_MRST_NAND=y
+CONFIG_MRST_NAND_POLL=y
+# CONFIG_MRST_NAND_CDMA is not set
+# CONFIG_MRST_NAND_ESL is not set
+# CONFIG_MRST_NAND_EMU is not set
+
+# MRST SFI C and P states
+CONFIG_SFI=y
+CONFIG_SFI_CPUIDLE=y
+CONFIG_SFI_PM=y
+CONFIG_SFI_PROCESSOR_PM=y
+CONFIG_X86_SFI_CPUFREQ=y
+
+# MRST MMC
+CONFIG_MRST_MMC_WR=y
+CONFIG_MMC_CEATA_WR=n
+
+# MRST THERMAL
+CONFIG_SENSORS_MRST_THERMAL=y
+
+# MRST SPI2
+CONFIG_SPI2_MRST=y
+
+# MRST I2C
+CONFIG_I2C_MRST=y
+
+# MRST KEYPAD
+CONFIG_KEYBOARD_MRST=y
+
+# MRST RESISTIVE TOUCHSCREEN
+CONFIG_TOUCHSCREEN_INTEL_MRST=y
+
+# USB OTC ClIENT
+CONFIG_USB_GADGET_LANGWELL=y
+CONFIG_USB_LANGWELL=m
+
+# MRST CAMERA
+CONFIG_VIDEO_V4L2=y
+CONFIG_VIDEO_MRSTCI=y
+CONFIG_I2C=y
+CONFIG_VIDEO_MRST_ISP=y
+CONFIG_VIDEO_MRST_SENSOR=y
+CONFIG_VIDEO_MRST_OV2650=y
+CONFIG_VIDEO_MRST_OV5630=y
+
+# MRST AUDIO
+CONFIG_SND_INTEL_LPE=y
+CONFIG_LPE_OSPM_SUPPORT=y
+CONFIG_LPE_DBG_PRINT=y
+# CONFIG_LPE_IPC_NOT_INCLUDED is not set
+CONFIG_SND_INTELMID=y
+CONFIG_MID_DBG_PRINT=y
+
+# MRST OSPM
+CONFIG_MSTWN_POWER_MGMT=y
+
+# CONFIG_64BIT is not set
+CONFIG_X86_32=y
+# CONFIG_X86_64 is not set
+CONFIG_X86=y
+CONFIG_ARCH_DEFCONFIG="arch/x86/configs/i386_defconfig"
+CONFIG_GENERIC_TIME=y
+CONFIG_GENERIC_CMOS_UPDATE=y
+CONFIG_CLOCKSOURCE_WATCHDOG=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_HAVE_LATENCYTOP_SUPPORT=y
+CONFIG_FAST_CMPXCHG_LOCAL=y
+CONFIG_MMU=y
+CONFIG_ZONE_DMA=y
+CONFIG_GENERIC_ISA_DMA=y
+CONFIG_GENERIC_IOMAP=y
+CONFIG_GENERIC_BUG=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_GPIO=y
+CONFIG_ARCH_MAY_HAVE_PC_FDC=y
+# CONFIG_RWSEM_GENERIC_SPINLOCK is not set
+CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+# CONFIG_GENERIC_TIME_VSYSCALL is not set
+CONFIG_ARCH_HAS_CPU_RELAX=y
+CONFIG_ARCH_HAS_DEFAULT_IDLE=y
+CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y
+CONFIG_HAVE_SETUP_PER_CPU_AREA=y
+# CONFIG_HAVE_CPUMASK_OF_CPU_MAP is not set
+CONFIG_ARCH_HIBERNATION_POSSIBLE=y
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
+# CONFIG_ZONE_DMA32 is not set
+CONFIG_ARCH_POPULATES_NODE_MAP=y
+# CONFIG_AUDIT_ARCH is not set
+CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_GENERIC_PENDING_IRQ=y
+CONFIG_X86_SMP=y
+CONFIG_USE_GENERIC_SMP_HELPERS=y
+CONFIG_X86_32_SMP=y
+CONFIG_X86_HT=y
+CONFIG_X86_BIOS_REBOOT=y
+CONFIG_X86_TRAMPOLINE=y
+CONFIG_KTIME_SCALAR=y
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+
+#
+# General setup
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_LOCK_KERNEL=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_LOCALVERSION="-mrst"
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_BSD_PROCESS_ACCT=y
+# CONFIG_BSD_PROCESS_ACCT_V3 is not set
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_AUDIT=y
+CONFIG_AUDITSYSCALL=y
+CONFIG_AUDIT_TREE=y
+
+#
+# RCU Subsystem
+#
+CONFIG_CLASSIC_RCU=y
+# CONFIG_TREE_RCU is not set
+# CONFIG_PREEMPT_RCU is not set
+# CONFIG_TREE_RCU_TRACE is not set
+# CONFIG_PREEMPT_RCU_TRACE is not set
+# CONFIG_IKCONFIG is not set
+CONFIG_LOG_BUF_SHIFT=18
+CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y
+CONFIG_GROUP_SCHED=y
+CONFIG_FAIR_GROUP_SCHED=y
+# CONFIG_RT_GROUP_SCHED is not set
+# CONFIG_USER_SCHED is not set
+CONFIG_CGROUP_SCHED=y
+CONFIG_CGROUPS=y
+# CONFIG_CGROUP_DEBUG is not set
+CONFIG_CGROUP_NS=y
+# CONFIG_CGROUP_FREEZER is not set
+# CONFIG_CGROUP_DEVICE is not set
+CONFIG_CPUSETS=y
+CONFIG_PROC_PID_CPUSET=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_RESOURCE_COUNTERS=y
+# CONFIG_CGROUP_MEM_RES_CTLR is not set
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
+CONFIG_RELAY=y
+# CONFIG_NAMESPACES is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
+CONFIG_EMBEDDED=y
+CONFIG_UID16=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_KALLSYMS=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_KALLSYMS_EXTRA_PASS=y
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_PCSPKR_PLATFORM=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_AIO=y
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_PCI_QUIRKS=y
+CONFIG_SLUB_DEBUG=y
+# CONFIG_COMPAT_BRK is not set
+# CONFIG_SLAB is not set
+CONFIG_SLUB=y
+# CONFIG_SLOB is not set
+CONFIG_PROFILING=y
+CONFIG_TRACEPOINTS=y
+CONFIG_MARKERS=y
+# CONFIG_OPROFILE is not set
+CONFIG_HAVE_OPROFILE=y
+CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y
+CONFIG_HAVE_IOREMAP_PROT=y
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_KRETPROBES=y
+CONFIG_HAVE_ARCH_TRACEHOOK=y
+CONFIG_HAVE_GENERIC_DMA_COHERENT=y
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+CONFIG_BASE_SMALL=0
+CONFIG_MODULES=y
+CONFIG_STOP_MACHINE=y
+CONFIG_BLOCK=y
+# CONFIG_LBD is not set
+CONFIG_BLK_DEV_IO_TRACE=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+# CONFIG_DEFAULT_AS is not set
+# CONFIG_DEFAULT_DEADLINE is not set
+CONFIG_DEFAULT_CFQ=y
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="cfq"
+# CONFIG_FREEZER is not set
+
+#
+# Processor type and features
+#
+CONFIG_TICK_ONESHOT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+CONFIG_SMP=y
+# CONFIG_SPARSE_IRQ is not set
+CONFIG_X86_FIND_SMP_CONFIG=y
+CONFIG_X86_MPPARSE=y
+CONFIG_X86_PC=y
+# CONFIG_X86_ELAN is not set
+# CONFIG_X86_VOYAGER is not set
+# CONFIG_X86_GENERICARCH is not set
+# CONFIG_X86_VSMP is not set
+# CONFIG_X86_RDC321X is not set
+CONFIG_SCHED_OMIT_FRAME_POINTER=y
+# CONFIG_PARAVIRT_GUEST is not set
+# CONFIG_MEMTEST is not set
+# CONFIG_M386 is not set
+# CONFIG_M486 is not set
+CONFIG_M586=y
+# CONFIG_M586TSC is not set
+# CONFIG_M586MMX is not set
+# CONFIG_M686 is not set
+# CONFIG_MPENTIUMII is not set
+# CONFIG_MPENTIUMIII is not set
+# CONFIG_MPENTIUMM is not set
+# CONFIG_MPENTIUM4 is not set
+# CONFIG_MK6 is not set
+# CONFIG_MK7 is not set
+# CONFIG_MK8 is not set
+# CONFIG_MCRUSOE is not set
+# CONFIG_MEFFICEON is not set
+# CONFIG_MWINCHIPC6 is not set
+# CONFIG_MWINCHIP3D is not set
+# CONFIG_MGEODEGX1 is not set
+# CONFIG_MGEODE_LX is not set
+# CONFIG_MCYRIXIII is not set
+# CONFIG_MVIAC3_2 is not set
+# CONFIG_MVIAC7 is not set
+# CONFIG_MPSC is not set
+# CONFIG_MCORE2 is not set
+# CONFIG_GENERIC_CPU is not set
+CONFIG_X86_GENERIC=y
+CONFIG_X86_CPU=y
+CONFIG_X86_CMPXCHG=y
+CONFIG_X86_L1_CACHE_SHIFT=7
+CONFIG_X86_XADD=y
+# CONFIG_X86_PPRO_FENCE is not set
+CONFIG_X86_F00F_BUG=y
+CONFIG_X86_WP_WORKS_OK=y
+CONFIG_X86_INVLPG=y
+CONFIG_X86_BSWAP=y
+CONFIG_X86_POPAD_OK=y
+CONFIG_X86_ALIGNMENT_16=y
+CONFIG_X86_INTEL_USERCOPY=y
+CONFIG_X86_MINIMUM_CPU_FAMILY=4
+# CONFIG_PROCESSOR_SELECT is not set
+CONFIG_CPU_SUP_INTEL=y
+CONFIG_CPU_SUP_CYRIX_32=y
+CONFIG_CPU_SUP_AMD=y
+CONFIG_CPU_SUP_CENTAUR_32=y
+CONFIG_CPU_SUP_TRANSMETA_32=y
+CONFIG_CPU_SUP_UMC_32=y
+# CONFIG_HPET_TIMER is not set
+CONFIG_APB_TIMER=y
+CONFIG_LNW_IPC=y
+# CONFIG_DMI is not set
+# CONFIG_IOMMU_HELPER is not set
+# CONFIG_IOMMU_API is not set
+CONFIG_NR_CPUS=64
+CONFIG_SCHED_SMT=y
+# CONFIG_SCHED_MC is not set
+# CONFIG_PREEMPT_NONE is not set
+CONFIG_PREEMPT_VOLUNTARY=y
+# CONFIG_PREEMPT is not set
+CONFIG_X86_LOCAL_APIC=y
+CONFIG_X86_IO_APIC=y
+# CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS is not set
+# CONFIG_X86_MCE is not set
+# CONFIG_VM86 is not set
+# CONFIG_TOSHIBA is not set
+# CONFIG_I8K is not set
+CONFIG_X86_REBOOTFIXUPS=y
+CONFIG_MICROCODE=y
+CONFIG_MICROCODE_INTEL=y
+# CONFIG_MICROCODE_AMD is not set
+CONFIG_MICROCODE_OLD_INTERFACE=y
+CONFIG_X86_MSR=y
+CONFIG_X86_CPUID=y
+CONFIG_NOHIGHMEM=y
+# CONFIG_HIGHMEM4G is not set
+# CONFIG_HIGHMEM64G is not set
+CONFIG_VMSPLIT_3G=y
+# CONFIG_VMSPLIT_3G_OPT is not set
+# CONFIG_VMSPLIT_2G is not set
+# CONFIG_VMSPLIT_2G_OPT is not set
+# CONFIG_VMSPLIT_1G is not set
+CONFIG_PAGE_OFFSET=0xC0000000
+# CONFIG_X86_PAE is not set
+# CONFIG_ARCH_PHYS_ADDR_T_64BIT is not set
+CONFIG_ARCH_FLATMEM_ENABLE=y
+CONFIG_ARCH_SPARSEMEM_ENABLE=y
+CONFIG_ARCH_SELECT_MEMORY_MODEL=y
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+CONFIG_SPARSEMEM_STATIC=y
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=4
+# CONFIG_PHYS_ADDR_T_64BIT is not set
+CONFIG_ZONE_DMA_FLAG=1
+CONFIG_BOUNCE=y
+CONFIG_VIRT_TO_BUS=y
+CONFIG_UNEVICTABLE_LRU=y
+# CONFIG_X86_CHECK_BIOS_CORRUPTION is not set
+# CONFIG_X86_RESERVE_LOW_64K is not set
+# CONFIG_MATH_EMULATION is not set
+CONFIG_MTRR=y
+# CONFIG_MTRR_SANITIZER is not set
+# CONFIG_X86_PAT is not set
+# CONFIG_SECCOMP is not set
+CONFIG_HZ_100=y
+# CONFIG_HZ_250 is not set
+# CONFIG_HZ_300 is not set
+# CONFIG_HZ_1000 is not set
+CONFIG_HZ=100
+CONFIG_SCHED_HRTICK=y
+CONFIG_KEXEC=y
+CONFIG_PHYSICAL_START=0x100000
+CONFIG_RELOCATABLE=y
+CONFIG_PHYSICAL_ALIGN=0x100000
+CONFIG_HOTPLUG_CPU=y
+# CONFIG_COMPAT_VDSO is not set
+# CONFIG_CMDLINE_BOOL is not set
+CONFIG_MRST=y
+CONFIG_MRST_SPI_UART_BOOT_MSG=y
+
+#
+# Power management and ACPI options
+#
+CONFIG_PM=y
+CONFIG_PM_DEBUG=y
+CONFIG_PM_VERBOSE=y
+# CONFIG_SUSPEND is not set
+# CONFIG_HIBERNATION is not set
+# CONFIG_ACPI is not set
+CONFIG_SFI=y
+# CONFIG_SFI_DEBUG is not set
+
+#
+# CPU Frequency scaling
+#
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_IDLE=n
+
+#
+# Bus options (PCI etc.)
+#
+CONFIG_PCI=y
+# CONFIG_PCI_GOBIOS is not set
+# CONFIG_PCI_GOMMCONFIG is not set
+# CONFIG_PCI_GODIRECT is not set
+# CONFIG_PCI_GOOLPC is not set
+CONFIG_PCI_GOANY=y
+CONFIG_PCI_BIOS=y
+CONFIG_PCI_DIRECT=y
+CONFIG_PCI_MMCONFIG=y
+CONFIG_PCI_DOMAINS=y
+CONFIG_PCIEPORTBUS=y
+# CONFIG_PCIEAER is not set
+# CONFIG_PCIEASPM is not set
+CONFIG_ARCH_SUPPORTS_MSI=y
+CONFIG_PCI_MSI=y
+# CONFIG_PCI_LEGACY is not set
+CONFIG_PCI_DEBUG=y
+# CONFIG_PCI_STUB is not set
+# CONFIG_HT_IRQ is not set
+CONFIG_ISA_DMA_API=y
+# CONFIG_ISA is not set
+# CONFIG_MCA is not set
+# CONFIG_SCx200 is not set
+# CONFIG_OLPC is not set
+CONFIG_K8_NB=y
+# CONFIG_PCCARD is not set
+# CONFIG_HOTPLUG_PCI is not set
+
+#
+# Executable file formats / Emulations
+#
+CONFIG_BINFMT_ELF=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_HAVE_AOUT=y
+# CONFIG_BINFMT_AOUT is not set
+CONFIG_BINFMT_MISC=y
+CONFIG_HAVE_ATOMIC_IOMAP=y
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_COMPAT_NET_DEV_OPS=y
+CONFIG_PACKET=y
+CONFIG_PACKET_MMAP=y
+CONFIG_UNIX=y
+CONFIG_XFRM=y
+CONFIG_XFRM_USER=y
+# CONFIG_XFRM_SUB_POLICY is not set
+# CONFIG_XFRM_MIGRATE is not set
+# CONFIG_XFRM_STATISTICS is not set
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_ASK_IP_FIB_HASH=y
+# CONFIG_IP_FIB_TRIE is not set
+CONFIG_IP_FIB_HASH=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+# CONFIG_ARPD is not set
+CONFIG_SYN_COOKIES=y
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+CONFIG_INET_TUNNEL=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+CONFIG_INET_LRO=y
+# CONFIG_INET_DIAG is not set
+CONFIG_TCP_CONG_ADVANCED=y
+# CONFIG_TCP_CONG_BIC is not set
+CONFIG_TCP_CONG_CUBIC=y
+# CONFIG_TCP_CONG_WESTWOOD is not set
+# CONFIG_TCP_CONG_HTCP is not set
+# CONFIG_TCP_CONG_HSTCP is not set
+# CONFIG_TCP_CONG_HYBLA is not set
+# CONFIG_TCP_CONG_VEGAS is not set
+# CONFIG_TCP_CONG_SCALABLE is not set
+# CONFIG_TCP_CONG_LP is not set
+# CONFIG_TCP_CONG_VENO is not set
+# CONFIG_TCP_CONG_YEAH is not set
+# CONFIG_TCP_CONG_ILLINOIS is not set
+# CONFIG_DEFAULT_BIC is not set
+CONFIG_DEFAULT_CUBIC=y
+# CONFIG_DEFAULT_HTCP is not set
+# CONFIG_DEFAULT_VEGAS is not set
+# CONFIG_DEFAULT_WESTWOOD is not set
+# CONFIG_DEFAULT_RENO is not set
+CONFIG_DEFAULT_TCP_CONG="cubic"
+CONFIG_TCP_MD5SIG=y
+CONFIG_IPV6=y
+# CONFIG_IPV6_PRIVACY is not set
+# CONFIG_IPV6_ROUTER_PREF is not set
+# CONFIG_IPV6_OPTIMISTIC_DAD is not set
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+# CONFIG_INET6_IPCOMP is not set
+# CONFIG_IPV6_MIP6 is not set
+# CONFIG_INET6_XFRM_TUNNEL is not set
+# CONFIG_INET6_TUNNEL is not set
+CONFIG_INET6_XFRM_MODE_TRANSPORT=y
+CONFIG_INET6_XFRM_MODE_TUNNEL=y
+CONFIG_INET6_XFRM_MODE_BEET=y
+# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set
+CONFIG_IPV6_SIT=y
+CONFIG_IPV6_NDISC_NODETYPE=y
+# CONFIG_IPV6_TUNNEL is not set
+# CONFIG_IPV6_MULTIPLE_TABLES is not set
+# CONFIG_IPV6_MROUTE is not set
+CONFIG_NETLABEL=y
+CONFIG_NETWORK_SECMARK=y
+CONFIG_NETFILTER=y
+# CONFIG_NETFILTER_DEBUG is not set
+# CONFIG_NETFILTER_ADVANCED is not set
+
+#
+# Core Netfilter Configuration
+#
+CONFIG_NETFILTER_NETLINK=y
+CONFIG_NETFILTER_NETLINK_LOG=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_SIP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NETFILTER_XTABLES=y
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFLOG=y
+CONFIG_NETFILTER_XT_TARGET_SECMARK=y
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+# CONFIG_IP_VS is not set
+
+#
+# IP: Netfilter Configuration
+#
+CONFIG_NF_DEFRAG_IPV4=y
+CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_NF_CONNTRACK_PROC_COMPAT=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_TARGET_LOG=y
+CONFIG_IP_NF_TARGET_ULOG=y
+CONFIG_NF_NAT=y
+CONFIG_NF_NAT_NEEDED=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_NF_NAT_FTP=y
+CONFIG_NF_NAT_IRC=y
+# CONFIG_NF_NAT_TFTP is not set
+# CONFIG_NF_NAT_AMANDA is not set
+# CONFIG_NF_NAT_PPTP is not set
+# CONFIG_NF_NAT_H323 is not set
+CONFIG_NF_NAT_SIP=y
+CONFIG_IP_NF_MANGLE=y
+
+#
+# IPv6: Netfilter Configuration
+#
+CONFIG_NF_CONNTRACK_IPV6=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_MATCH_IPV6HEADER=y
+CONFIG_IP6_NF_TARGET_LOG=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_MANGLE=y
+# CONFIG_IP_DCCP is not set
+# CONFIG_IP_SCTP is not set
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_NET_DSA is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+CONFIG_NET_SCHED=y
+
+#
+# Queueing/Scheduling
+#
+# CONFIG_NET_SCH_CBQ is not set
+# CONFIG_NET_SCH_HTB is not set
+# CONFIG_NET_SCH_HFSC is not set
+# CONFIG_NET_SCH_PRIO is not set
+# CONFIG_NET_SCH_MULTIQ is not set
+# CONFIG_NET_SCH_RED is not set
+# CONFIG_NET_SCH_SFQ is not set
+# CONFIG_NET_SCH_TEQL is not set
+# CONFIG_NET_SCH_TBF is not set
+# CONFIG_NET_SCH_GRED is not set
+# CONFIG_NET_SCH_DSMARK is not set
+# CONFIG_NET_SCH_NETEM is not set
+# CONFIG_NET_SCH_DRR is not set
+# CONFIG_NET_SCH_INGRESS is not set
+
+#
+# Classification
+#
+CONFIG_NET_CLS=y
+# CONFIG_NET_CLS_BASIC is not set
+# CONFIG_NET_CLS_TCINDEX is not set
+# CONFIG_NET_CLS_ROUTE4 is not set
+# CONFIG_NET_CLS_FW is not set
+# CONFIG_NET_CLS_U32 is not set
+# CONFIG_NET_CLS_RSVP is not set
+# CONFIG_NET_CLS_RSVP6 is not set
+# CONFIG_NET_CLS_FLOW is not set
+# CONFIG_NET_CLS_CGROUP is not set
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_STACK=32
+# CONFIG_NET_EMATCH_CMP is not set
+# CONFIG_NET_EMATCH_NBYTE is not set
+# CONFIG_NET_EMATCH_U32 is not set
+# CONFIG_NET_EMATCH_META is not set
+# CONFIG_NET_EMATCH_TEXT is not set
+CONFIG_NET_CLS_ACT=y
+# CONFIG_NET_ACT_POLICE is not set
+# CONFIG_NET_ACT_GACT is not set
+# CONFIG_NET_ACT_MIRRED is not set
+# CONFIG_NET_ACT_IPT is not set
+# CONFIG_NET_ACT_NAT is not set
+# CONFIG_NET_ACT_PEDIT is not set
+# CONFIG_NET_ACT_SIMP is not set
+# CONFIG_NET_ACT_SKBEDIT is not set
+CONFIG_NET_SCH_FIFO=y
+# CONFIG_DCB is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+CONFIG_HAMRADIO=y
+
+#
+# Packet Radio protocols
+#
+# CONFIG_AX25 is not set
+# CONFIG_CAN is not set
+# CONFIG_IRDA is not set
+CONFIG_BT=y
+CONFIG_BT_L2CAP=y
+CONFIG_BT_SCO=y
+CONFIG_BT_RFCOMM=y
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_BNEP=y
+CONFIG_BT_BNEP_MC_FILTER=y
+CONFIG_BT_BNEP_PROTO_FILTER=y
+CONFIG_BT_HIDP=y
+
+#
+# Bluetooth device drivers
+#
+CONFIG_BT_HCIBTUSB=y
+CONFIG_BT_HCIBTSDIO=y
+CONFIG_BT_HCIUART=y
+CONFIG_BT_HCIUART_H4=y
+CONFIG_BT_HCIUART_BCSP=y
+CONFIG_BT_HCIUART_LL=y
+CONFIG_BT_HCIBCM203X=y
+CONFIG_BT_HCIBPA10X=y
+CONFIG_BT_HCIBFUSB=y
+CONFIG_BT_HCIVHCI=y
+# CONFIG_AF_RXRPC is not set
+# CONFIG_PHONET is not set
+CONFIG_FIB_RULES=y
+CONFIG_WIRELESS=y
+CONFIG_CFG80211=y
+# CONFIG_CFG80211_REG_DEBUG is not set
+CONFIG_NL80211=y
+CONFIG_WIRELESS_OLD_REGULATORY=y
+CONFIG_WIRELESS_EXT=y
+CONFIG_WIRELESS_EXT_SYSFS=y
+# CONFIG_LIB80211 is not set
+CONFIG_MAC80211=y
+
+#
+# Rate control algorithm selection
+#
+# CONFIG_MAC80211_RC_PID is not set
+CONFIG_MAC80211_RC_MINSTREL=y
+# CONFIG_MAC80211_RC_DEFAULT_PID is not set
+CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y
+CONFIG_MAC80211_RC_DEFAULT="minstrel"
+# CONFIG_MAC80211_MESH is not set
+CONFIG_MAC80211_LEDS=y
+# CONFIG_MAC80211_DEBUGFS is not set
+# CONFIG_MAC80211_DEBUG_MENU is not set
+# CONFIG_WIMAX is not set
+# CONFIG_RFKILL is not set
+# CONFIG_NET_9P is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+CONFIG_FW_LOADER=y
+CONFIG_FIRMWARE_IN_KERNEL=y
+CONFIG_EXTRA_FIRMWARE="mrvl/sd8688.bin mrvl/helper_sd.bin"
+CONFIG_EXTRA_FIRMWARE_DIR="firmware"
+# CONFIG_DEBUG_DRIVER is not set
+CONFIG_DEBUG_DEVRES=y
+# CONFIG_SYS_HYPERVISOR is not set
+CONFIG_CONNECTOR=y
+CONFIG_PROC_EVENTS=y
+# CONFIG_MTD is not set
+# CONFIG_PARPORT is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_FD is not set
+# CONFIG_BLK_CPQ_DA is not set
+# CONFIG_BLK_CPQ_CISS_DA is not set
+# CONFIG_BLK_DEV_DAC960 is not set
+# CONFIG_BLK_DEV_UMEM is not set
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=y
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_SX8 is not set
+# CONFIG_BLK_DEV_UB is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=16384
+# CONFIG_BLK_DEV_XIP is not set
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+# CONFIG_BLK_DEV_HD is not set
+CONFIG_MISC_DEVICES=y
+# CONFIG_IBM_ASM is not set
+# CONFIG_PHANTOM is not set
+# CONFIG_SGI_IOC4 is not set
+# CONFIG_TIFM_CORE is not set
+# CONFIG_ICS932S401 is not set
+# CONFIG_ENCLOSURE_SERVICES is not set
+# CONFIG_HP_ILO is not set
+# CONFIG_C2PORT is not set
+
+#
+# EEPROM support
+#
+# CONFIG_EEPROM_AT24 is not set
+# CONFIG_EEPROM_AT25 is not set
+# CONFIG_EEPROM_LEGACY is not set
+# CONFIG_EEPROM_93CX6 is not set
+CONFIG_HAVE_IDE=y
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+CONFIG_SCSI=y
+CONFIG_SCSI_DMA=y
+# CONFIG_SCSI_TGT is not set
+# CONFIG_SCSI_NETLINK is not set
+CONFIG_SCSI_PROC_FS=y
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=y
+# CONFIG_CHR_DEV_ST is not set
+# CONFIG_CHR_DEV_OSST is not set
+CONFIG_BLK_DEV_SR=y
+CONFIG_BLK_DEV_SR_VENDOR=y
+CONFIG_CHR_DEV_SG=y
+# CONFIG_CHR_DEV_SCH is not set
+
+#
+# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
+#
+# CONFIG_SCSI_MULTI_LUN is not set
+CONFIG_SCSI_CONSTANTS=y
+# CONFIG_SCSI_LOGGING is not set
+# CONFIG_SCSI_SCAN_ASYNC is not set
+
+#
+# SCSI Transports
+#
+CONFIG_SCSI_SPI_ATTRS=y
+# CONFIG_SCSI_FC_ATTRS is not set
+CONFIG_SCSI_ISCSI_ATTRS=y
+# CONFIG_SCSI_SAS_LIBSAS is not set
+# CONFIG_SCSI_SRP_ATTRS is not set
+# CONFIG_SCSI_LOWLEVEL is not set
+# CONFIG_SCSI_DH is not set
+# CONFIG_ATA is not set
+# CONFIG_MD is not set
+# CONFIG_FUSION is not set
+
+#
+# IEEE 1394 (FireWire) support
+#
+
+#
+# Enable only one of the two stacks, unless you know what you are doing
+#
+# CONFIG_FIREWIRE is not set
+# CONFIG_IEEE1394 is not set
+# CONFIG_I2O is not set
+# CONFIG_MACINTOSH_DRIVERS is not set
+CONFIG_NETDEVICES=y
+# CONFIG_IFB is not set
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_MACVLAN is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+# CONFIG_VETH is not set
+# CONFIG_ARCNET is not set
+# CONFIG_NET_ETHERNET is not set
+CONFIG_MII=y
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+# CONFIG_TR is not set
+
+#
+# Wireless LAN
+#
+# CONFIG_WLAN_PRE80211 is not set
+CONFIG_WLAN_80211=y
+# CONFIG_IWLWIFI_LEDS is not set
+
+#
+# Enable WiMAX (Networking options) to see the WiMAX drivers
+#
+
+#
+# USB Network Adapters
+#
+# CONFIG_USB_CATC is not set
+# CONFIG_USB_KAWETH is not set
+# CONFIG_USB_PEGASUS is not set
+# CONFIG_USB_RTL8150 is not set
+CONFIG_USB_USBNET=y
+CONFIG_USB_NET_AX8817X=y
+CONFIG_USB_NET_CDCETHER=y
+# CONFIG_USB_NET_DM9601 is not set
+# CONFIG_USB_NET_SMSC95XX is not set
+# CONFIG_USB_NET_GL620A is not set
+CONFIG_USB_NET_NET1080=y
+# CONFIG_USB_NET_PLUSB is not set
+# CONFIG_USB_NET_MCS7830 is not set
+# CONFIG_USB_NET_RNDIS_HOST is not set
+CONFIG_USB_NET_CDC_SUBSET=y
+# CONFIG_USB_ALI_M5632 is not set
+# CONFIG_USB_AN2720 is not set
+# CONFIG_USB_BELKIN is not set
+# CONFIG_USB_ARMLINUX is not set
+# CONFIG_USB_EPSON2888 is not set
+# CONFIG_USB_KC2190 is not set
+# CONFIG_USB_NET_ZAURUS is not set
+# CONFIG_WAN is not set
+# CONFIG_FDDI is not set
+# CONFIG_HIPPI is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_NET_FC is not set
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_ISDN is not set
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+CONFIG_INPUT_FF_MEMLESS=y
+CONFIG_INPUT_POLLDEV=y
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=y
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+# CONFIG_INPUT_JOYDEV is not set
+CONFIG_INPUT_EVDEV=y
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+CONFIG_INPUT_KEYBOARD=y
+CONFIG_KEYBOARD_ATKBD=y
+# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_KEYBOARD_LKKBD is not set
+# CONFIG_KEYBOARD_XTKBD is not set
+# CONFIG_KEYBOARD_NEWTON is not set
+# CONFIG_KEYBOARD_STOWAWAY is not set
+CONFIG_KEYBOARD_GPIO=y
+CONFIG_INPUT_MOUSE=y
+CONFIG_MOUSE_PS2=y
+CONFIG_MOUSE_PS2_ALPS=y
+CONFIG_MOUSE_PS2_LOGIPS2PP=y
+CONFIG_MOUSE_PS2_SYNAPTICS=y
+CONFIG_MOUSE_PS2_LIFEBOOK=y
+CONFIG_MOUSE_PS2_TRACKPOINT=y
+# CONFIG_MOUSE_PS2_ELANTECH is not set
+# CONFIG_MOUSE_PS2_TOUCHKIT is not set
+# CONFIG_MOUSE_SERIAL is not set
+# CONFIG_MOUSE_APPLETOUCH is not set
+# CONFIG_MOUSE_BCM5974 is not set
+# CONFIG_MOUSE_VSXXXAA is not set
+# CONFIG_MOUSE_GPIO is not set
+CONFIG_INPUT_JOYSTICK=y
+# CONFIG_JOYSTICK_ANALOG is not set
+# CONFIG_JOYSTICK_A3D is not set
+# CONFIG_JOYSTICK_ADI is not set
+# CONFIG_JOYSTICK_COBRA is not set
+# CONFIG_JOYSTICK_GF2K is not set
+# CONFIG_JOYSTICK_GRIP is not set
+# CONFIG_JOYSTICK_GRIP_MP is not set
+# CONFIG_JOYSTICK_GUILLEMOT is not set
+# CONFIG_JOYSTICK_INTERACT is not set
+# CONFIG_JOYSTICK_SIDEWINDER is not set
+# CONFIG_JOYSTICK_TMDC is not set
+# CONFIG_JOYSTICK_IFORCE is not set
+# CONFIG_JOYSTICK_WARRIOR is not set
+# CONFIG_JOYSTICK_MAGELLAN is not set
+# CONFIG_JOYSTICK_SPACEORB is not set
+# CONFIG_JOYSTICK_SPACEBALL is not set
+# CONFIG_JOYSTICK_STINGER is not set
+# CONFIG_JOYSTICK_TWIDJOY is not set
+# CONFIG_JOYSTICK_ZHENHUA is not set
+# CONFIG_JOYSTICK_JOYDUMP is not set
+# CONFIG_JOYSTICK_XPAD is not set
+CONFIG_INPUT_TABLET=y
+# CONFIG_TABLET_USB_ACECAD is not set
+# CONFIG_TABLET_USB_AIPTEK is not set
+# CONFIG_TABLET_USB_GTCO is not set
+# CONFIG_TABLET_USB_KBTAB is not set
+# CONFIG_TABLET_USB_WACOM is not set
+CONFIG_INPUT_TOUCHSCREEN=y
+# CONFIG_TOUCHSCREEN_ADS7846 is not set
+# CONFIG_TOUCHSCREEN_FUJITSU is not set
+# CONFIG_TOUCHSCREEN_GUNZE is not set
+# CONFIG_TOUCHSCREEN_ELO is not set
+# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set
+# CONFIG_TOUCHSCREEN_MTOUCH is not set
+# CONFIG_TOUCHSCREEN_INEXIO is not set
+# CONFIG_TOUCHSCREEN_MK712 is not set
+# CONFIG_TOUCHSCREEN_PENMOUNT is not set
+# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set
+# CONFIG_TOUCHSCREEN_TOUCHWIN is not set
+# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set
+# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set
+# CONFIG_TOUCHSCREEN_TSC2007 is not set
+CONFIG_INPUT_MISC=y
+# CONFIG_INPUT_PCSPKR is not set
+# CONFIG_INPUT_WISTRON_BTNS is not set
+# CONFIG_INPUT_ATI_REMOTE is not set
+# CONFIG_INPUT_ATI_REMOTE2 is not set
+# CONFIG_INPUT_KEYSPAN_REMOTE is not set
+# CONFIG_INPUT_POWERMATE is not set
+# CONFIG_INPUT_YEALINK is not set
+# CONFIG_INPUT_CM109 is not set
+# CONFIG_INPUT_UINPUT is not set
+
+#
+# Hardware I/O ports
+#
+CONFIG_SERIO=y
+CONFIG_SERIO_SERPORT=y
+# CONFIG_SERIO_CT82C710 is not set
+# CONFIG_SERIO_PCIPS2 is not set
+CONFIG_SERIO_LIBPS2=y
+# CONFIG_SERIO_RAW is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_CONSOLE_TRANSLATIONS=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_DEVKMEM=y
+CONFIG_SERIAL_NONSTANDARD=y
+# CONFIG_COMPUTONE is not set
+# CONFIG_ROCKETPORT is not set
+# CONFIG_CYCLADES is not set
+# CONFIG_DIGIEPCA is not set
+# CONFIG_MOXA_INTELLIO is not set
+# CONFIG_MOXA_SMARTIO is not set
+# CONFIG_ISI is not set
+# CONFIG_SYNCLINK is not set
+# CONFIG_SYNCLINKMP is not set
+# CONFIG_SYNCLINK_GT is not set
+# CONFIG_N_HDLC is not set
+# CONFIG_RISCOM8 is not set
+# CONFIG_SPECIALIX is not set
+# CONFIG_SX is not set
+# CONFIG_RIO is not set
+# CONFIG_STALDRV is not set
+# CONFIG_NOZOMI is not set
+
+#
+# Serial drivers
+#
+CONFIG_FIX_EARLYCON_MEM=y
+
+#
+# Non-8250 serial port support
+#
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_JSM is not set
+CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_IPMI_HANDLER is not set
+CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_INTEL=y
+CONFIG_HW_RANDOM_AMD=y
+CONFIG_HW_RANDOM_GEODE=y
+CONFIG_HW_RANDOM_VIA=y
+CONFIG_NVRAM=y
+# CONFIG_R3964 is not set
+# CONFIG_APPLICOM is not set
+# CONFIG_SONYPI is not set
+# CONFIG_MWAVE is not set
+# CONFIG_PC8736x_GPIO is not set
+# CONFIG_NSC_GPIO is not set
+# CONFIG_CS5535_GPIO is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_HANGCHECK_TIMER is not set
+# CONFIG_TCG_TPM is not set
+# CONFIG_TELCLOCK is not set
+CONFIG_DEVPORT=y
+CONFIG_I2C=y
+CONFIG_I2C_BOARDINFO=y
+# CONFIG_I2C_CHARDEV is not set
+CONFIG_I2C_HELPER_AUTO=y
+CONFIG_I2C_ALGOBIT=y
+
+#
+# I2C Hardware Bus support
+#
+
+#
+# PC SMBus host controller drivers
+#
+# CONFIG_I2C_ALI1535 is not set
+# CONFIG_I2C_ALI1563 is not set
+# CONFIG_I2C_ALI15X3 is not set
+# CONFIG_I2C_AMD756 is not set
+# CONFIG_I2C_AMD8111 is not set
+CONFIG_I2C_I801=y
+# CONFIG_I2C_ISCH is not set
+# CONFIG_I2C_PIIX4 is not set
+# CONFIG_I2C_NFORCE2 is not set
+# CONFIG_I2C_SIS5595 is not set
+# CONFIG_I2C_SIS630 is not set
+# CONFIG_I2C_SIS96X is not set
+# CONFIG_I2C_VIA is not set
+# CONFIG_I2C_VIAPRO is not set
+
+#
+# I2C system bus drivers (mostly embedded / system-on-chip)
+#
+# CONFIG_I2C_GPIO is not set
+# CONFIG_I2C_OCORES is not set
+# CONFIG_I2C_SIMTEC is not set
+
+#
+# External I2C/SMBus adapter drivers
+#
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_TAOS_EVM is not set
+# CONFIG_I2C_TINY_USB is not set
+
+#
+# Graphics adapter I2C/DDC channel drivers
+#
+# CONFIG_I2C_VOODOO3 is not set
+
+#
+# Other I2C/SMBus bus drivers
+#
+# CONFIG_I2C_PCA_PLATFORM is not set
+# CONFIG_SCx200_ACB is not set
+
+#
+# Miscellaneous I2C Chip support
+#
+# CONFIG_DS1682 is not set
+# CONFIG_SENSORS_PCF8574 is not set
+# CONFIG_PCF8575 is not set
+# CONFIG_SENSORS_PCA9539 is not set
+# CONFIG_SENSORS_PCF8591 is not set
+# CONFIG_SENSORS_MAX6875 is not set
+# CONFIG_SENSORS_TSL2550 is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+# CONFIG_I2C_DEBUG_CHIP is not set
+CONFIG_SPI=y
+# CONFIG_SPI_DEBUG is not set
+CONFIG_SPI_MASTER=y
+
+#
+# SPI Master Controller Drivers
+#
+CONFIG_SPI_BITBANG=y
+# CONFIG_SPI_GPIO is not set
+CONFIG_SPI_MRST=y
+CONFIG_SPI_MRST_DMA=y
+
+#
+# SPI Protocol Masters
+#
+CONFIG_SPI_MRST_MAX3110=y
+# CONFIG_MRST_MAX3110_IRQ is not set
+# CONFIG_SPI_SPIDEV is not set
+# CONFIG_SPI_TLE62X0 is not set
+CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
+CONFIG_GPIOLIB=y
+# CONFIG_DEBUG_GPIO is not set
+CONFIG_GPIO_SYSFS=y
+CONFIG_GPE=y
+CONFIG_GPIO_LANGWELL=y
+CONFIG_GPIO_LNWPMIC=y
+# CONFIG_GPIO_LNWPMIC_NEC_WORKAROUND is not set
+CONFIG_MRST_PMIC_BUTTON=y
+
+#
+# Memory mapped GPIO expanders:
+#
+
+#
+# I2C GPIO expanders:
+#
+# CONFIG_GPIO_MAX732X is not set
+# CONFIG_GPIO_PCA953X is not set
+# CONFIG_GPIO_PCF857X is not set
+
+#
+# PCI GPIO expanders:
+#
+# CONFIG_GPIO_BT8XX is not set
+
+#
+# SPI GPIO expanders:
+#
+# CONFIG_GPIO_MAX7301 is not set
+# CONFIG_GPIO_MCP23S08 is not set
+# CONFIG_W1 is not set
+# CONFIG_POWER_SUPPLY is not set
+CONFIG_HWMON=y
+# CONFIG_HWMON_VID is not set
+# CONFIG_SENSORS_ABITUGURU is not set
+# CONFIG_SENSORS_ABITUGURU3 is not set
+# CONFIG_SENSORS_AD7414 is not set
+# CONFIG_SENSORS_AD7418 is not set
+# CONFIG_SENSORS_ADCXX is not set
+# CONFIG_SENSORS_ADM1021 is not set
+# CONFIG_SENSORS_ADM1025 is not set
+# CONFIG_SENSORS_ADM1026 is not set
+# CONFIG_SENSORS_ADM1029 is not set
+# CONFIG_SENSORS_ADM1031 is not set
+# CONFIG_SENSORS_ADM9240 is not set
+# CONFIG_SENSORS_ADT7462 is not set
+# CONFIG_SENSORS_ADT7470 is not set
+# CONFIG_SENSORS_ADT7473 is not set
+# CONFIG_SENSORS_ADT7475 is not set
+# CONFIG_SENSORS_K8TEMP is not set
+# CONFIG_SENSORS_ASB100 is not set
+# CONFIG_SENSORS_ATXP1 is not set
+# CONFIG_SENSORS_DS1621 is not set
+# CONFIG_SENSORS_I5K_AMB is not set
+# CONFIG_SENSORS_F71805F is not set
+# CONFIG_SENSORS_F71882FG is not set
+# CONFIG_SENSORS_F75375S is not set
+# CONFIG_SENSORS_FSCHER is not set
+# CONFIG_SENSORS_FSCPOS is not set
+# CONFIG_SENSORS_FSCHMD is not set
+# CONFIG_SENSORS_GL518SM is not set
+# CONFIG_SENSORS_GL520SM is not set
+# CONFIG_SENSORS_CORETEMP is not set
+# CONFIG_SENSORS_IT87 is not set
+# CONFIG_SENSORS_LM63 is not set
+# CONFIG_SENSORS_LM70 is not set
+# CONFIG_SENSORS_LM75 is not set
+# CONFIG_SENSORS_LM77 is not set
+# CONFIG_SENSORS_LM78 is not set
+# CONFIG_SENSORS_LM80 is not set
+# CONFIG_SENSORS_LM83 is not set
+# CONFIG_SENSORS_LM85 is not set
+# CONFIG_SENSORS_LM87 is not set
+# CONFIG_SENSORS_LM90 is not set
+# CONFIG_SENSORS_LM92 is not set
+# CONFIG_SENSORS_LM93 is not set
+# CONFIG_SENSORS_LTC4245 is not set
+# CONFIG_SENSORS_MAX1111 is not set
+# CONFIG_SENSORS_MAX1619 is not set
+# CONFIG_SENSORS_MAX6650 is not set
+# CONFIG_SENSORS_PC87360 is not set
+# CONFIG_SENSORS_PC87427 is not set
+# CONFIG_SENSORS_SIS5595 is not set
+# CONFIG_SENSORS_DME1737 is not set
+# CONFIG_SENSORS_SMSC47M1 is not set
+# CONFIG_SENSORS_SMSC47M192 is not set
+# CONFIG_SENSORS_SMSC47B397 is not set
+# CONFIG_SENSORS_ADS7828 is not set
+# CONFIG_SENSORS_THMC50 is not set
+# CONFIG_SENSORS_VIA686A is not set
+# CONFIG_SENSORS_VT1211 is not set
+# CONFIG_SENSORS_VT8231 is not set
+# CONFIG_SENSORS_W83781D is not set
+# CONFIG_SENSORS_W83791D is not set
+# CONFIG_SENSORS_W83792D is not set
+# CONFIG_SENSORS_W83793 is not set
+# CONFIG_SENSORS_W83L785TS is not set
+# CONFIG_SENSORS_W83L786NG is not set
+# CONFIG_SENSORS_W83627HF is not set
+# CONFIG_SENSORS_W83627EHF is not set
+# CONFIG_SENSORS_HDAPS is not set
+# CONFIG_SENSORS_APPLESMC is not set
+# CONFIG_HWMON_DEBUG_CHIP is not set
+# CONFIG_THERMAL is not set
+# CONFIG_THERMAL_HWMON is not set
+# CONFIG_WATCHDOG is not set
+CONFIG_SSB_POSSIBLE=y
+
+#
+# Sonics Silicon Backplane
+#
+# CONFIG_SSB is not set
+
+#
+# Multifunction device drivers
+#
+# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_TPS65010 is not set
+# CONFIG_TWL4030_CORE is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_PMIC_DA903X is not set
+# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_PCF50633 is not set
+# CONFIG_REGULATOR is not set
+
+#
+# Multimedia devices
+#
+
+#
+# Multimedia core support
+#
+CONFIG_VIDEO_DEV=y
+CONFIG_VIDEO_V4L2_COMMON=y
+# CONFIG_VIDEO_ALLOW_V4L1 is not set
+# CONFIG_VIDEO_V4L1_COMPAT is not set
+# CONFIG_DVB_CORE is not set
+CONFIG_VIDEO_MEDIA=y
+
+#
+# Multimedia drivers
+#
+# CONFIG_MEDIA_ATTACH is not set
+CONFIG_MEDIA_TUNER=y
+CONFIG_MEDIA_TUNER_CUSTOMIZE=y
+# CONFIG_MEDIA_TUNER_SIMPLE is not set
+# CONFIG_MEDIA_TUNER_TDA8290 is not set
+# CONFIG_MEDIA_TUNER_TDA827X is not set
+# CONFIG_MEDIA_TUNER_TDA18271 is not set
+# CONFIG_MEDIA_TUNER_TDA9887 is not set
+# CONFIG_MEDIA_TUNER_TEA5761 is not set
+# CONFIG_MEDIA_TUNER_TEA5767 is not set
+# CONFIG_MEDIA_TUNER_MT20XX is not set
+# CONFIG_MEDIA_TUNER_MT2060 is not set
+# CONFIG_MEDIA_TUNER_MT2266 is not set
+# CONFIG_MEDIA_TUNER_MT2131 is not set
+# CONFIG_MEDIA_TUNER_QT1010 is not set
+# CONFIG_MEDIA_TUNER_XC2028 is not set
+# CONFIG_MEDIA_TUNER_XC5000 is not set
+# CONFIG_MEDIA_TUNER_MXL5005S is not set
+# CONFIG_MEDIA_TUNER_MXL5007T is not set
+CONFIG_VIDEO_V4L2=y
+CONFIG_VIDEO_CAPTURE_DRIVERS=y
+# CONFIG_VIDEO_ADV_DEBUG is not set
+# CONFIG_VIDEO_FIXED_MINOR_RANGES is not set
+# CONFIG_VIDEO_HELPER_CHIPS_AUTO is not set
+
+#
+# Encoders/decoders and other helper chips
+#
+
+#
+# Audio decoders
+#
+# CONFIG_VIDEO_TVAUDIO is not set
+# CONFIG_VIDEO_TDA7432 is not set
+# CONFIG_VIDEO_TDA9840 is not set
+# CONFIG_VIDEO_TDA9875 is not set
+# CONFIG_VIDEO_TEA6415C is not set
+# CONFIG_VIDEO_TEA6420 is not set
+# CONFIG_VIDEO_MSP3400 is not set
+# CONFIG_VIDEO_CS5345 is not set
+# CONFIG_VIDEO_CS53L32A is not set
+# CONFIG_VIDEO_M52790 is not set
+# CONFIG_VIDEO_TLV320AIC23B is not set
+# CONFIG_VIDEO_WM8775 is not set
+# CONFIG_VIDEO_WM8739 is not set
+# CONFIG_VIDEO_VP27SMPX is not set
+
+#
+# Video decoders
+#
+# CONFIG_VIDEO_OV7670 is not set
+# CONFIG_VIDEO_TCM825X is not set
+# CONFIG_VIDEO_SAA711X is not set
+# CONFIG_VIDEO_SAA717X is not set
+# CONFIG_VIDEO_TVP514X is not set
+# CONFIG_VIDEO_TVP5150 is not set
+
+#
+# Video and audio decoders
+#
+# CONFIG_VIDEO_CX25840 is not set
+
+#
+# MPEG video encoders
+#
+# CONFIG_VIDEO_CX2341X is not set
+
+#
+# Video encoders
+#
+# CONFIG_VIDEO_SAA7127 is not set
+
+#
+# Video improvement chips
+#
+# CONFIG_VIDEO_UPD64031A is not set
+# CONFIG_VIDEO_UPD64083 is not set
+# CONFIG_VIDEO_VIVI is not set
+# CONFIG_VIDEO_BT848 is not set
+# CONFIG_VIDEO_SAA5246A is not set
+# CONFIG_VIDEO_SAA5249 is not set
+# CONFIG_VIDEO_SAA7134 is not set
+# CONFIG_VIDEO_HEXIUM_ORION is not set
+# CONFIG_VIDEO_HEXIUM_GEMINI is not set
+# CONFIG_VIDEO_CX88 is not set
+# CONFIG_VIDEO_IVTV is not set
+# CONFIG_VIDEO_CAFE_CCIC is not set
+# CONFIG_SOC_CAMERA is not set
+# CONFIG_V4L_USB_DRIVERS is not set
+CONFIG_VIDEO_MRSTCI=y
+CONFIG_VIDEO_MRST_ISP=y
+CONFIG_VIDEO_MRST_SENSOR=y
+CONFIG_VIDEO_MRST_OV2650=y
+CONFIG_VIDEO_MRST_OV5630=y
+# CONFIG_RADIO_ADAPTERS is not set
+CONFIG_DAB=y
+# CONFIG_USB_DABUSB is not set
+
+#
+# Graphics support
+#
+CONFIG_AGP=y
+# CONFIG_AGP_ALI is not set
+# CONFIG_AGP_ATI is not set
+# CONFIG_AGP_AMD is not set
+CONFIG_AGP_AMD64=y
+CONFIG_AGP_INTEL=y
+# CONFIG_AGP_NVIDIA is not set
+# CONFIG_AGP_SIS is not set
+# CONFIG_AGP_SWORKS is not set
+# CONFIG_AGP_VIA is not set
+# CONFIG_AGP_EFFICEON is not set
+CONFIG_DRM=y
+# CONFIG_DRM_TDFX is not set
+# CONFIG_DRM_R128 is not set
+# CONFIG_DRM_RADEON is not set
+# CONFIG_DRM_I810 is not set
+# CONFIG_DRM_I830 is not set
+CONFIG_DRM_I915=y
+# CONFIG_DRM_I915_KMS is not set
+# CONFIG_DRM_MGA is not set
+# CONFIG_DRM_SIS is not set
+# CONFIG_DRM_VIA is not set
+# CONFIG_DRM_SAVAGE is not set
+# CONFIG_VGASTATE is not set
+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
+CONFIG_FB=y
+# CONFIG_FIRMWARE_EDID is not set
+# CONFIG_FB_DDC is not set
+# CONFIG_FB_BOOT_VESA_SUPPORT is not set
+CONFIG_FB_CFB_FILLRECT=y
+CONFIG_FB_CFB_COPYAREA=y
+CONFIG_FB_CFB_IMAGEBLIT=y
+# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
+# CONFIG_FB_SYS_FILLRECT is not set
+# CONFIG_FB_SYS_COPYAREA is not set
+# CONFIG_FB_SYS_IMAGEBLIT is not set
+# CONFIG_FB_FOREIGN_ENDIAN is not set
+# CONFIG_FB_SYS_FOPS is not set
+# CONFIG_FB_SVGALIB is not set
+# CONFIG_FB_MACMODES is not set
+# CONFIG_FB_BACKLIGHT is not set
+CONFIG_FB_MODE_HELPERS=y
+CONFIG_FB_TILEBLITTING=y
+
+#
+# Frame buffer hardware drivers
+#
+# CONFIG_FB_CIRRUS is not set
+# CONFIG_FB_PM2 is not set
+# CONFIG_FB_CYBER2000 is not set
+# CONFIG_FB_ARC is not set
+# CONFIG_FB_ASILIANT is not set
+# CONFIG_FB_IMSTT is not set
+# CONFIG_FB_VGA16 is not set
+# CONFIG_FB_UVESA is not set
+# CONFIG_FB_VESA is not set
+# CONFIG_FB_N411 is not set
+# CONFIG_FB_HGA is not set
+# CONFIG_FB_S1D13XXX is not set
+# CONFIG_FB_NVIDIA is not set
+# CONFIG_FB_RIVA is not set
+# CONFIG_FB_I810 is not set
+# CONFIG_FB_LE80578 is not set
+# CONFIG_FB_INTEL is not set
+# CONFIG_FB_MATROX is not set
+# CONFIG_FB_RADEON is not set
+# CONFIG_FB_ATY128 is not set
+# CONFIG_FB_ATY is not set
+# CONFIG_FB_S3 is not set
+# CONFIG_FB_SAVAGE is not set
+# CONFIG_FB_SIS is not set
+# CONFIG_FB_VIA is not set
+# CONFIG_FB_NEOMAGIC is not set
+# CONFIG_FB_KYRO is not set
+# CONFIG_FB_3DFX is not set
+# CONFIG_FB_VOODOO1 is not set
+# CONFIG_FB_VT8623 is not set
+# CONFIG_FB_CYBLA is not set
+# CONFIG_FB_TRIDENT is not set
+# CONFIG_FB_ARK is not set
+# CONFIG_FB_PM3 is not set
+# CONFIG_FB_CARMINE is not set
+# CONFIG_FB_GEODE is not set
+# CONFIG_FB_VIRTUAL is not set
+# CONFIG_FB_METRONOME is not set
+# CONFIG_FB_MB862XX is not set
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+# CONFIG_LCD_CLASS_DEVICE is not set
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+CONFIG_BACKLIGHT_GENERIC=y
+# CONFIG_BACKLIGHT_PROGEAR is not set
+# CONFIG_BACKLIGHT_MBP_NVIDIA is not set
+# CONFIG_BACKLIGHT_SAHARA is not set
+
+#
+# Display device support
+#
+CONFIG_DISPLAY_SUPPORT=y
+
+#
+# Console display driver support
+#
+CONFIG_VGA_CONSOLE=y
+CONFIG_VGACON_SOFT_SCROLLBACK=y
+CONFIG_VGACON_SOFT_SCROLLBACK_SIZE=64
+CONFIG_DUMMY_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
+CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
+# CONFIG_FONTS is not set
+CONFIG_FONT_8x8=y
+CONFIG_FONT_8x16=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+CONFIG_LOGO_LINUX_CLUT224=y
+CONFIG_SOUND=y
+CONFIG_SOUND_OSS_CORE=y
+CONFIG_SND=y
+CONFIG_SND_TIMER=y
+CONFIG_SND_PCM=y
+# CONFIG_SND_SEQUENCER is not set
+CONFIG_SND_OSSEMUL=y
+CONFIG_SND_MIXER_OSS=y
+CONFIG_SND_PCM_OSS=y
+# CONFIG_SND_PCM_OSS_PLUGINS is not set
+# CONFIG_SND_HRTIMER is not set
+# CONFIG_SND_DYNAMIC_MINORS is not set
+# CONFIG_SND_SUPPORT_OLD_API is not set
+# CONFIG_SND_VERBOSE_PROCFS is not set
+# CONFIG_SND_VERBOSE_PRINTK is not set
+# CONFIG_SND_DEBUG is not set
+# CONFIG_SND_DRIVERS is not set
+CONFIG_SND_PCI=y
+# CONFIG_SND_AD1889 is not set
+# CONFIG_SND_ALS300 is not set
+# CONFIG_SND_ALS4000 is not set
+# CONFIG_SND_ALI5451 is not set
+# CONFIG_SND_ATIIXP is not set
+# CONFIG_SND_ATIIXP_MODEM is not set
+# CONFIG_SND_AU8810 is not set
+# CONFIG_SND_AU8820 is not set
+# CONFIG_SND_AU8830 is not set
+# CONFIG_SND_AW2 is not set
+# CONFIG_SND_AZT3328 is not set
+# CONFIG_SND_BT87X is not set
+# CONFIG_SND_CA0106 is not set
+# CONFIG_SND_CMIPCI is not set
+# CONFIG_SND_OXYGEN is not set
+# CONFIG_SND_CS4281 is not set
+# CONFIG_SND_CS46XX is not set
+# CONFIG_SND_CS5530 is not set
+# CONFIG_SND_CS5535AUDIO is not set
+# CONFIG_SND_DARLA20 is not set
+# CONFIG_SND_GINA20 is not set
+# CONFIG_SND_LAYLA20 is not set
+# CONFIG_SND_DARLA24 is not set
+# CONFIG_SND_GINA24 is not set
+# CONFIG_SND_LAYLA24 is not set
+# CONFIG_SND_MONA is not set
+# CONFIG_SND_MIA is not set
+# CONFIG_SND_ECHO3G is not set
+# CONFIG_SND_INDIGO is not set
+# CONFIG_SND_INDIGOIO is not set
+# CONFIG_SND_INDIGODJ is not set
+# CONFIG_SND_EMU10K1 is not set
+# CONFIG_SND_EMU10K1X is not set
+# CONFIG_SND_ENS1370 is not set
+# CONFIG_SND_ENS1371 is not set
+# CONFIG_SND_ES1938 is not set
+# CONFIG_SND_ES1968 is not set
+# CONFIG_SND_FM801 is not set
+# CONFIG_SND_HDA_INTEL is not set
+# CONFIG_SND_HDSP is not set
+# CONFIG_SND_HDSPM is not set
+# CONFIG_SND_HIFIER is not set
+# CONFIG_SND_ICE1712 is not set
+# CONFIG_SND_ICE1724 is not set
+# CONFIG_SND_INTEL8X0 is not set
+# CONFIG_SND_INTEL8X0M is not set
+# CONFIG_SND_KORG1212 is not set
+# CONFIG_SND_MAESTRO3 is not set
+# CONFIG_SND_MIXART is not set
+# CONFIG_SND_NM256 is not set
+# CONFIG_SND_PCXHR is not set
+# CONFIG_SND_RIPTIDE is not set
+# CONFIG_SND_RME32 is not set
+# CONFIG_SND_RME96 is not set
+# CONFIG_SND_RME9652 is not set
+# CONFIG_SND_SIS7019 is not set
+# CONFIG_SND_SONICVIBES is not set
+# CONFIG_SND_TRIDENT is not set
+# CONFIG_SND_VIA82XX is not set
+# CONFIG_SND_VIA82XX_MODEM is not set
+# CONFIG_SND_VIRTUOSO is not set
+# CONFIG_SND_VX222 is not set
+# CONFIG_SND_YMFPCI is not set
+# CONFIG_SND_SPI is not set
+# CONFIG_SND_USB is not set
+# CONFIG_SND_SOC is not set
+# CONFIG_SOUND_PRIME is not set
+CONFIG_HID_SUPPORT=y
+CONFIG_HID=y
+CONFIG_HID_DEBUG=y
+CONFIG_HIDRAW=y
+
+#
+# USB Input Devices
+#
+CONFIG_USB_HID=y
+CONFIG_HID_PID=y
+# CONFIG_USB_HIDDEV is not set
+
+#
+# Special HID drivers
+#
+# CONFIG_HID_COMPAT is not set
+# CONFIG_HID_A4TECH is not set
+# CONFIG_HID_APPLE is not set
+# CONFIG_HID_BELKIN is not set
+# CONFIG_HID_CHERRY is not set
+# CONFIG_HID_CHICONY is not set
+# CONFIG_HID_CYPRESS is not set
+# CONFIG_HID_EZKEY is not set
+# CONFIG_HID_GYRATION is not set
+# CONFIG_HID_LOGITECH is not set
+# CONFIG_HID_MICROSOFT is not set
+# CONFIG_HID_MONTEREY is not set
+# CONFIG_HID_NTRIG is not set
+# CONFIG_HID_PANTHERLORD is not set
+# CONFIG_HID_PETALYNX is not set
+# CONFIG_HID_SAMSUNG is not set
+# CONFIG_HID_SONY is not set
+# CONFIG_HID_SUNPLUS is not set
+# CONFIG_GREENASIA_FF is not set
+# CONFIG_HID_TOPSEED is not set
+# CONFIG_THRUSTMASTER_FF is not set
+# CONFIG_ZEROPLUS_FF is not set
+CONFIG_USB_SUPPORT=y
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB_ARCH_HAS_OHCI=y
+CONFIG_USB_ARCH_HAS_EHCI=y
+CONFIG_USB=y
+CONFIG_USB_DEBUG=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+
+#
+# Miscellaneous USB options
+#
+CONFIG_USB_DEVICEFS=y
+# CONFIG_USB_DEVICE_CLASS is not set
+# CONFIG_USB_DYNAMIC_MINORS is not set
+CONFIG_USB_SUSPEND=y
+CONFIG_USB_OTG=y
+# CONFIG_USB_OTG_WHITELIST is not set
+# CONFIG_USB_OTG_BLACKLIST_HUB is not set
+CONFIG_USB_MON=y
+# CONFIG_USB_WUSB is not set
+# CONFIG_USB_WUSB_CBAF is not set
+
+#
+# OTG and related infrastructure
+#
+CONFIG_USB_OTG_UTILS=y
+# CONFIG_USB_GPIO_VBUS is not set
+CONFIG_USB_LANGWELL_OTG=y
+
+#
+# USB Host Controller Drivers
+#
+# CONFIG_USB_C67X00_HCD is not set
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_ROOT_HUB_TT=y
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+# CONFIG_USB_OXU210HP_HCD is not set
+# CONFIG_USB_ISP116X_HCD is not set
+# CONFIG_USB_ISP1760_HCD is not set
+# CONFIG_USB_OHCI_HCD is not set
+# CONFIG_USB_UHCI_HCD is not set
+# CONFIG_USB_SL811_HCD is not set
+# CONFIG_USB_R8A66597_HCD is not set
+# CONFIG_USB_WHCI_HCD is not set
+# CONFIG_USB_HWA_HCD is not set
+# CONFIG_USB_GADGET_MUSB_HDRC is not set
+
+#
+# USB Device Class drivers
+#
+# CONFIG_USB_ACM is not set
+CONFIG_USB_PRINTER=y
+# CONFIG_USB_WDM is not set
+# CONFIG_USB_TMC is not set
+
+#
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may also be needed;
+#
+
+#
+# see USB_STORAGE Help for more information
+#
+CONFIG_USB_STORAGE=y
+# CONFIG_USB_STORAGE_DEBUG is not set
+# CONFIG_USB_STORAGE_DATAFAB is not set
+# CONFIG_USB_STORAGE_FREECOM is not set
+# CONFIG_USB_STORAGE_ISD200 is not set
+# CONFIG_USB_STORAGE_USBAT is not set
+# CONFIG_USB_STORAGE_SDDR09 is not set
+# CONFIG_USB_STORAGE_SDDR55 is not set
+# CONFIG_USB_STORAGE_JUMPSHOT is not set
+# CONFIG_USB_STORAGE_ALAUDA is not set
+# CONFIG_USB_STORAGE_ONETOUCH is not set
+# CONFIG_USB_STORAGE_KARMA is not set
+# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set
+CONFIG_USB_LIBUSUAL=y
+
+#
+# USB Imaging devices
+#
+# CONFIG_USB_MDC800 is not set
+# CONFIG_USB_MICROTEK is not set
+
+#
+# USB port drivers
+#
+# CONFIG_USB_SERIAL is not set
+
+#
+# USB Miscellaneous drivers
+#
+# CONFIG_USB_EMI62 is not set
+# CONFIG_USB_EMI26 is not set
+# CONFIG_USB_ADUTUX is not set
+# CONFIG_USB_SEVSEG is not set
+# CONFIG_USB_RIO500 is not set
+# CONFIG_USB_LEGOTOWER is not set
+# CONFIG_USB_LCD is not set
+# CONFIG_USB_BERRY_CHARGE is not set
+# CONFIG_USB_LED is not set
+# CONFIG_USB_CYPRESS_CY7C63 is not set
+# CONFIG_USB_CYTHERM is not set
+# CONFIG_USB_PHIDGET is not set
+# CONFIG_USB_IDMOUSE is not set
+# CONFIG_USB_FTDI_ELAN is not set
+# CONFIG_USB_APPLEDISPLAY is not set
+# CONFIG_USB_SISUSBVGA is not set
+# CONFIG_USB_LD is not set
+# CONFIG_USB_TRANCEVIBRATOR is not set
+# CONFIG_USB_IOWARRIOR is not set
+# CONFIG_USB_TEST is not set
+# CONFIG_USB_ISIGHTFW is not set
+# CONFIG_USB_VST is not set
+CONFIG_USB_GADGET=m
+# CONFIG_USB_GADGET_DEBUG is not set
+# CONFIG_USB_GADGET_DEBUG_FILES is not set
+# CONFIG_USB_GADGET_DEBUG_FS is not set
+CONFIG_USB_GADGET_VBUS_DRAW=2
+CONFIG_USB_GADGET_SELECTED=y
+# CONFIG_USB_GADGET_AT91 is not set
+# CONFIG_USB_GADGET_ATMEL_USBA is not set
+# CONFIG_USB_GADGET_FSL_USB2 is not set
+# CONFIG_USB_GADGET_LH7A40X is not set
+# CONFIG_USB_GADGET_OMAP is not set
+# CONFIG_USB_GADGET_PXA25X is not set
+# CONFIG_USB_GADGET_PXA27X is not set
+# CONFIG_USB_GADGET_S3C2410 is not set
+# CONFIG_USB_GADGET_IMX is not set
+# CONFIG_USB_GADGET_M66592 is not set
+# CONFIG_USB_GADGET_AMD5536UDC is not set
+# CONFIG_USB_GADGET_FSL_QE is not set
+# CONFIG_USB_GADGET_CI13XXX is not set
+# CONFIG_USB_GADGET_NET2280 is not set
+# CONFIG_USB_GADGET_GOKU is not set
+# CONFIG_USB_GADGET_DUMMY_HCD is not set
+CONFIG_USB_GADGET_DUALSPEED=y
+# CONFIG_USB_ZERO is not set
+CONFIG_USB_ETH=m
+CONFIG_USB_ETH_RNDIS=y
+# CONFIG_USB_GADGETFS is not set
+CONFIG_USB_FILE_STORAGE=m
+# CONFIG_USB_FILE_STORAGE_TEST is not set
+# CONFIG_USB_G_SERIAL is not set
+# CONFIG_USB_MIDI_GADGET is not set
+# CONFIG_USB_G_PRINTER is not set
+# CONFIG_USB_CDC_COMPOSITE is not set
+# CONFIG_UWB is not set
+CONFIG_MMC=y
+# CONFIG_MEMSTICK is not set
+CONFIG_NEW_LEDS=y
+# CONFIG_LEDS_CLASS is not set
+
+#
+# LED drivers
+#
+
+#
+# LED Triggers
+#
+CONFIG_LEDS_TRIGGERS=y
+# CONFIG_LEDS_TRIGGER_TIMER is not set
+# CONFIG_LEDS_TRIGGER_HEARTBEAT is not set
+# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set
+# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set
+# CONFIG_ACCESSIBILITY is not set
+# CONFIG_INFINIBAND is not set
+# CONFIG_EDAC is not set
+CONFIG_RTC_LIB=y
+CONFIG_RTC_CLASS=y
+# CONFIG_RTC_HCTOSYS is not set
+# CONFIG_RTC_DEBUG is not set
+
+#
+# RTC interfaces
+#
+CONFIG_RTC_INTF_SYSFS=y
+CONFIG_RTC_INTF_PROC=y
+CONFIG_RTC_INTF_DEV=y
+# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
+# CONFIG_RTC_DRV_TEST is not set
+
+#
+# I2C RTC drivers
+#
+# CONFIG_RTC_DRV_DS1307 is not set
+# CONFIG_RTC_DRV_DS1374 is not set
+# CONFIG_RTC_DRV_DS1672 is not set
+# CONFIG_RTC_DRV_MAX6900 is not set
+# CONFIG_RTC_DRV_RS5C372 is not set
+# CONFIG_RTC_DRV_ISL1208 is not set
+# CONFIG_RTC_DRV_X1205 is not set
+# CONFIG_RTC_DRV_PCF8563 is not set
+# CONFIG_RTC_DRV_PCF8583 is not set
+# CONFIG_RTC_DRV_M41T80 is not set
+# CONFIG_RTC_DRV_S35390A is not set
+# CONFIG_RTC_DRV_FM3130 is not set
+# CONFIG_RTC_DRV_RX8581 is not set
+
+#
+# SPI RTC drivers
+#
+# CONFIG_RTC_DRV_M41T94 is not set
+# CONFIG_RTC_DRV_DS1305 is not set
+# CONFIG_RTC_DRV_DS1390 is not set
+# CONFIG_RTC_DRV_MAX6902 is not set
+# CONFIG_RTC_DRV_R9701 is not set
+# CONFIG_RTC_DRV_RS5C348 is not set
+# CONFIG_RTC_DRV_DS3234 is not set
+
+#
+# Platform RTC drivers
+#
+# CONFIG_RTC_DRV_CMOS is not set
+CONFIG_RTC_DRV_VRTC=y
+# CONFIG_RTC_DRV_DS1286 is not set
+# CONFIG_RTC_DRV_DS1511 is not set
+# CONFIG_RTC_DRV_DS1553 is not set
+# CONFIG_RTC_DRV_DS1742 is not set
+# CONFIG_RTC_DRV_STK17TA8 is not set
+# CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T35 is not set
+# CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_BQ4802 is not set
+# CONFIG_RTC_DRV_V3020 is not set
+
+#
+# on-CPU RTC drivers
+#
+CONFIG_DMADEVICES=y
+
+#
+# DMA Devices
+#
+# CONFIG_INTEL_IOATDMA is not set
+# CONFIG_UIO is not set
+# CONFIG_STAGING is not set
+# CONFIG_X86_PLATFORM_DEVICES is not set
+
+#
+# Firmware Drivers
+#
+# CONFIG_EDD is not set
+CONFIG_FIRMWARE_MEMMAP=y
+# CONFIG_DELL_RBU is not set
+# CONFIG_DCDBAS is not set
+# CONFIG_ISCSI_IBFT_FIND is not set
+
+#
+# File systems
+#
+# CONFIG_EXT2_FS is not set
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_XATTR=y
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+# CONFIG_EXT4_FS is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+CONFIG_FS_MBCACHE=y
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+CONFIG_FS_POSIX_ACL=y
+CONFIG_FILE_LOCKING=y
+# CONFIG_XFS_FS is not set
+# CONFIG_OCFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
+CONFIG_DNOTIFY=y
+CONFIG_INOTIFY=y
+CONFIG_INOTIFY_USER=y
+CONFIG_QUOTA=y
+CONFIG_QUOTA_NETLINK_INTERFACE=y
+# CONFIG_PRINT_QUOTA_WARNING is not set
+CONFIG_QUOTA_TREE=y
+# CONFIG_QFMT_V1 is not set
+CONFIG_QFMT_V2=y
+CONFIG_QUOTACTL=y
+# CONFIG_AUTOFS_FS is not set
+CONFIG_AUTOFS4_FS=y
+# CONFIG_FUSE_FS is not set
+CONFIG_GENERIC_ACL=y
+
+#
+# CD-ROM/DVD Filesystems
+#
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_HUGETLBFS=y
+CONFIG_HUGETLB_PAGE=y
+# CONFIG_CONFIGFS_FS is not set
+CONFIG_MISC_FILESYSTEMS=y
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_ECRYPT_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_SQUASHFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+CONFIG_NETWORK_FILESYSTEMS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_V4=y
+CONFIG_ROOT_NFS=y
+# CONFIG_NFSD is not set
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_NFS_ACL_SUPPORT=y
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=y
+CONFIG_SUNRPC_GSS=y
+# CONFIG_SUNRPC_REGISTER_V4 is not set
+CONFIG_RPCSEC_GSS_KRB5=y
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_ACORN_PARTITION is not set
+CONFIG_OSF_PARTITION=y
+CONFIG_AMIGA_PARTITION=y
+# CONFIG_ATARI_PARTITION is not set
+CONFIG_MAC_PARTITION=y
+CONFIG_MSDOS_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+# CONFIG_LDM_PARTITION is not set
+CONFIG_SGI_PARTITION=y
+# CONFIG_ULTRIX_PARTITION is not set
+CONFIG_SUN_PARTITION=y
+CONFIG_KARMA_PARTITION=y
+CONFIG_EFI_PARTITION=y
+# CONFIG_SYSV68_PARTITION is not set
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="utf8"
+CONFIG_NLS_CODEPAGE_437=y
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+CONFIG_NLS_UTF8=y
+# CONFIG_DLM is not set
+
+#
+# Kernel hacking
+#
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+CONFIG_PRINTK_TIME=y
+CONFIG_ENABLE_WARN_DEPRECATED=y
+CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=2048
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_UNUSED_SYMBOLS is not set
+CONFIG_DEBUG_FS=y
+# CONFIG_HEADERS_CHECK is not set
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DEBUG_SHIRQ=y
+CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
+# CONFIG_SCHED_DEBUG is not set
+CONFIG_SCHEDSTATS=y
+CONFIG_TIMER_STATS=y
+# CONFIG_DEBUG_OBJECTS is not set
+# CONFIG_SLUB_DEBUG_ON is not set
+# CONFIG_SLUB_STATS is not set
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_MUTEXES=y
+CONFIG_DEBUG_LOCK_ALLOC=y
+# CONFIG_PROVE_LOCKING is not set
+CONFIG_LOCKDEP=y
+# CONFIG_LOCK_STAT is not set
+# CONFIG_DEBUG_LOCKDEP is not set
+CONFIG_DEBUG_SPINLOCK_SLEEP=y
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+CONFIG_STACKTRACE=y
+# CONFIG_DEBUG_KOBJECT is not set
+CONFIG_DEBUG_BUGVERBOSE=y
+
+# CONFIG_DEBUG_INFO is not set
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_VIRTUAL is not set
+# CONFIG_DEBUG_WRITECOUNT is not set
+# CONFIG_DEBUG_MEMORY_INIT is not set
+# CONFIG_DEBUG_LIST is not set
+# CONFIG_DEBUG_SG is not set
+# CONFIG_DEBUG_NOTIFIERS is not set
+CONFIG_ARCH_WANT_FRAME_POINTERS=y
+CONFIG_FRAME_POINTER=y
+# CONFIG_BOOT_PRINTK_DELAY is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_FAULT_INJECTION is not set
+# CONFIG_LATENCYTOP is not set
+CONFIG_SYSCTL_SYSCALL_CHECK=y
+CONFIG_USER_STACKTRACE_SUPPORT=y
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
+CONFIG_HAVE_DYNAMIC_FTRACE=y
+CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+
+#
+# Tracers
+#
+# CONFIG_FUNCTION_TRACER is not set
+# CONFIG_IRQSOFF_TRACER is not set
+# CONFIG_SYSPROF_TRACER is not set
+# CONFIG_SCHED_TRACER is not set
+# CONFIG_CONTEXT_SWITCH_TRACER is not set
+# CONFIG_BOOT_TRACER is not set
+# CONFIG_TRACE_BRANCH_PROFILING is not set
+# CONFIG_POWER_TRACER is not set
+# CONFIG_STACK_TRACER is not set
+# CONFIG_MMIOTRACE is not set
+CONFIG_PROVIDE_OHCI1394_DMA_INIT=y
+# CONFIG_DYNAMIC_PRINTK_DEBUG is not set
+# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+# CONFIG_KGDB is not set
+# CONFIG_STRICT_DEVMEM is not set
+CONFIG_X86_VERBOSE_BOOTUP=y
+CONFIG_EARLY_PRINTK=y
+CONFIG_X86_MRST_EARLY_PRINTK=y
+# CONFIG_EARLY_PRINTK_DBGP is not set
+CONFIG_DEBUG_STACKOVERFLOW=y
+CONFIG_DEBUG_STACK_USAGE=y
+# CONFIG_DEBUG_PAGEALLOC is not set
+# CONFIG_DEBUG_PER_CPU_MAPS is not set
+# CONFIG_X86_PTDUMP is not set
+CONFIG_DEBUG_RODATA=y
+# CONFIG_DEBUG_RODATA_TEST is not set
+# CONFIG_4KSTACKS is not set
+CONFIG_DOUBLEFAULT=y
+CONFIG_HAVE_MMIOTRACE_SUPPORT=y
+CONFIG_IO_DELAY_TYPE_0X80=0
+CONFIG_IO_DELAY_TYPE_0XED=1
+CONFIG_IO_DELAY_TYPE_UDELAY=2
+CONFIG_IO_DELAY_TYPE_NONE=3
+CONFIG_IO_DELAY_0X80=y
+# CONFIG_IO_DELAY_0XED is not set
+# CONFIG_IO_DELAY_UDELAY is not set
+# CONFIG_IO_DELAY_NONE is not set
+CONFIG_DEFAULT_IO_DELAY_TYPE=0
+CONFIG_DEBUG_BOOT_PARAMS=y
+# CONFIG_CPA_DEBUG is not set
+CONFIG_OPTIMIZE_INLINING=y
+
+#
+# Security options
+#
+CONFIG_KEYS=y
+CONFIG_KEYS_DEBUG_PROC_KEYS=y
+CONFIG_SECURITY=y
+# CONFIG_SECURITYFS is not set
+CONFIG_SECURITY_NETWORK=y
+# CONFIG_SECURITY_NETWORK_XFRM is not set
+# CONFIG_SECURITY_PATH is not set
+CONFIG_SECURITY_FILE_CAPABILITIES=y
+# CONFIG_SECURITY_ROOTPLUG is not set
+CONFIG_SECURITY_DEFAULT_MMAP_MIN_ADDR=65536
+# CONFIG_SECURITY_SMACK is not set
+CONFIG_CRYPTO=y
+# CONFIG_SECURITY_SELINUX is not set
+
+#
+# Crypto core or helper
+#
+# CONFIG_CRYPTO_FIPS is not set
+CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_ALGAPI2=y
+CONFIG_CRYPTO_AEAD=y
+CONFIG_CRYPTO_AEAD2=y
+CONFIG_CRYPTO_BLKCIPHER=y
+CONFIG_CRYPTO_BLKCIPHER2=y
+CONFIG_CRYPTO_HASH=y
+CONFIG_CRYPTO_HASH2=y
+CONFIG_CRYPTO_RNG2=y
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_MANAGER2=y
+# CONFIG_CRYPTO_GF128MUL is not set
+# CONFIG_CRYPTO_NULL is not set
+# CONFIG_CRYPTO_CRYPTD is not set
+CONFIG_CRYPTO_AUTHENC=y
+
+#
+# Authenticated Encryption with Associated Data
+#
+# CONFIG_CRYPTO_CCM is not set
+# CONFIG_CRYPTO_GCM is not set
+# CONFIG_CRYPTO_SEQIV is not set
+
+#
+# Block modes
+#
+CONFIG_CRYPTO_CBC=y
+# CONFIG_CRYPTO_CTR is not set
+# CONFIG_CRYPTO_CTS is not set
+CONFIG_CRYPTO_ECB=y
+# CONFIG_CRYPTO_LRW is not set
+# CONFIG_CRYPTO_PCBC is not set
+# CONFIG_CRYPTO_XTS is not set
+
+#
+# Hash modes
+#
+CONFIG_CRYPTO_HMAC=y
+# CONFIG_CRYPTO_XCBC is not set
+
+#
+# Digest
+#
+# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_CRC32C_INTEL is not set
+# CONFIG_CRYPTO_MD4 is not set
+CONFIG_CRYPTO_MD5=y
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_RMD128 is not set
+# CONFIG_CRYPTO_RMD160 is not set
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
+CONFIG_CRYPTO_SHA1=y
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_TGR192 is not set
+# CONFIG_CRYPTO_WP512 is not set
+
+#
+# Ciphers
+#
+CONFIG_CRYPTO_AES=y
+CONFIG_CRYPTO_AES_586=y
+# CONFIG_CRYPTO_ANUBIS is not set
+CONFIG_CRYPTO_ARC4=y
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_CAMELLIA is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+CONFIG_CRYPTO_DES=y
+# CONFIG_CRYPTO_FCRYPT is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_SALSA20_586 is not set
+# CONFIG_CRYPTO_SEED is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+# CONFIG_CRYPTO_TWOFISH_586 is not set
+
+#
+# Compression
+#
+# CONFIG_CRYPTO_DEFLATE is not set
+# CONFIG_CRYPTO_LZO is not set
+
+#
+# Random Number Generation
+#
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_HW=y
+# CONFIG_CRYPTO_DEV_PADLOCK is not set
+# CONFIG_CRYPTO_DEV_GEODE is not set
+# CONFIG_CRYPTO_DEV_HIFN_795X is not set
+CONFIG_HAVE_KVM=y
+CONFIG_VIRTUALIZATION=y
+# CONFIG_KVM is not set
+# CONFIG_LGUEST is not set
+# CONFIG_VIRTIO_PCI is not set
+# CONFIG_VIRTIO_BALLOON is not set
+
+#
+# Library routines
+#
+CONFIG_BITREVERSE=y
+CONFIG_GENERIC_FIND_FIRST_BIT=y
+CONFIG_GENERIC_FIND_NEXT_BIT=y
+CONFIG_GENERIC_FIND_LAST_BIT=y
+# CONFIG_CRC_CCITT is not set
+# CONFIG_CRC16 is not set
+CONFIG_CRC_T10DIF=y
+# CONFIG_CRC_ITU_T is not set
+CONFIG_CRC32=y
+# CONFIG_CRC7 is not set
+# CONFIG_LIBCRC32C is not set
+CONFIG_AUDIT_GENERIC=y
+CONFIG_ZLIB_INFLATE=y
+CONFIG_PLIST=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
+
+CONFIG_INTEL_LNW_DMAC1=y
+CONFIG_INTEL_LNW_DMAC2=y
+# CONFIG_LNW_DMA_DEBUG is not set
+# CONFIG_NET_DMA is not set
+# CONFIG_DMATEST is not set
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/config-netbook b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/config-netbook
new file mode 100644
index 000000000..9174ff6d5
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/config-netbook
@@ -0,0 +1,52 @@
+CONFIG_LOCALVERSION="-netbook"
+
+CONFIG_ACER_WMI=y
+
+CONFIG_EEEPC_LAPTOP=m
+
+CONFIG_R8169=y
+# CONFIG_R8169_VLAN is not set
+
+CONFIG_ATL1E=y
+
+CONFIG_ATH5K=y
+# CONFIG_ATH5K_DEBUG is not set
+
+CONFIG_RT2860=m
+
+CONFIG_RT2860=m
+
+CONFIG_RTL8187SE=m
+
+
+CONFIG_DRM_I915_KMS=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
+# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
+CONFIG_FONTS=y
+CONFIG_FONT_8x8=y
+CONFIG_FONT_6x11=y
+CONFIG_FONT_7x14=y
+# CONFIG_FONT_PEARL_8x8 is not set
+# CONFIG_FONT_ACORN_8x8 is not set
+# CONFIG_FONT_MINI_4x6 is not set
+# CONFIG_FONT_SUN8x16 is not set
+# CONFIG_FONT_SUN12x22 is not set
+CONFIG_FONT_10x18=y
+
+
+#
+# Enable KVM
+#
+CONFIG_VIRTUALIZATION=y
+CONFIG_KVM=m
+CONFIG_KVM_INTEL=m
+# CONFIG_KVM_AMD is not set
+# CONFIG_KVM_TRACE is not set
+# CONFIG_VIRTIO_PCI is not set
+# CONFIG_VIRTIO_BALLOON is not set
+
+#
+# For VMWARE support
+#
+CONFIG_FUSION_SPI=y
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.28+2.6.29-rc2/defconfig-menlow b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/defconfig-menlow
index 30c165622..ec7e1d66e 100644
--- a/meta-moblin/packages/linux/linux-moblin-2.6.28+2.6.29-rc2/defconfig-menlow
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/defconfig-menlow
@@ -1,14 +1,13 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.27
-# Wed Jan 14 11:45:36 2009
+# Linux kernel version: 2.6.29-rc8
+# Wed Apr 1 13:38:03 2009
#
# CONFIG_64BIT is not set
CONFIG_X86_32=y
# CONFIG_X86_64 is not set
CONFIG_X86=y
CONFIG_ARCH_DEFCONFIG="arch/x86/configs/i386_defconfig"
-# CONFIG_GENERIC_LOCKBREAK is not set
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CMOS_UPDATE=y
CONFIG_CLOCKSOURCE_WATCHDOG=y
@@ -24,16 +23,14 @@ CONFIG_GENERIC_ISA_DMA=y
CONFIG_GENERIC_IOMAP=y
CONFIG_GENERIC_BUG=y
CONFIG_GENERIC_HWEIGHT=y
-# CONFIG_GENERIC_GPIO is not set
CONFIG_ARCH_MAY_HAVE_PC_FDC=y
# CONFIG_RWSEM_GENERIC_SPINLOCK is not set
CONFIG_RWSEM_XCHGADD_ALGORITHM=y
-# CONFIG_ARCH_HAS_ILOG2_U32 is not set
-# CONFIG_ARCH_HAS_ILOG2_U64 is not set
CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_GENERIC_CALIBRATE_DELAY=y
# CONFIG_GENERIC_TIME_VSYSCALL is not set
CONFIG_ARCH_HAS_CPU_RELAX=y
+CONFIG_ARCH_HAS_DEFAULT_IDLE=y
CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y
CONFIG_HAVE_SETUP_PER_CPU_AREA=y
# CONFIG_HAVE_CPUMASK_OF_CPU_MAP is not set
@@ -42,12 +39,12 @@ CONFIG_ARCH_SUSPEND_POSSIBLE=y
# CONFIG_ZONE_DMA32 is not set
CONFIG_ARCH_POPULATES_NODE_MAP=y
# CONFIG_AUDIT_ARCH is not set
-CONFIG_ARCH_SUPPORTS_AOUT=y
CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_IRQ_PROBE=y
CONFIG_GENERIC_PENDING_IRQ=y
CONFIG_X86_SMP=y
+CONFIG_USE_GENERIC_SMP_HELPERS=y
CONFIG_X86_32_SMP=y
CONFIG_X86_HT=y
CONFIG_X86_BIOS_REBOOT=y
@@ -75,12 +72,21 @@ CONFIG_TASK_DELAY_ACCT=y
CONFIG_AUDIT=y
CONFIG_AUDITSYSCALL=y
CONFIG_AUDIT_TREE=y
+
+#
+# RCU Subsystem
+#
+CONFIG_CLASSIC_RCU=y
+# CONFIG_TREE_RCU is not set
+# CONFIG_PREEMPT_RCU is not set
+# CONFIG_TREE_RCU_TRACE is not set
+# CONFIG_PREEMPT_RCU_TRACE is not set
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=15
-# CONFIG_CGROUPS is not set
CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y
# CONFIG_GROUP_SCHED is not set
+# CONFIG_CGROUPS is not set
CONFIG_SYSFS_DEPRECATED=y
CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_RELAY=y
@@ -89,11 +95,12 @@ CONFIG_NAMESPACES=y
# CONFIG_IPC_NS is not set
# CONFIG_USER_NS is not set
# CONFIG_PID_NS is not set
+# CONFIG_NET_NS is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_INITRAMFS_SOURCE=""
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
-# CONFIG_FASTBOOT is not set
CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
# CONFIG_EMBEDDED is not set
CONFIG_UID16=y
CONFIG_SYSCTL_SYSCALL=y
@@ -105,20 +112,22 @@ CONFIG_PRINTK=y
CONFIG_BUG=y
CONFIG_ELF_CORE=y
CONFIG_PCSPKR_PLATFORM=y
-CONFIG_COMPAT_BRK=y
CONFIG_BASE_FULL=y
CONFIG_FUTEX=y
-CONFIG_ANON_INODES=y
CONFIG_EPOLL=y
CONFIG_SIGNALFD=y
CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
+CONFIG_AIO=y
CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_PCI_QUIRKS=y
+CONFIG_COMPAT_BRK=y
CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
CONFIG_PROFILING=y
+CONFIG_TRACEPOINTS=y
# CONFIG_MARKERS is not set
# CONFIG_OPROFILE is not set
CONFIG_HAVE_OPROFILE=y
@@ -127,15 +136,10 @@ CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y
CONFIG_HAVE_IOREMAP_PROT=y
CONFIG_HAVE_KPROBES=y
CONFIG_HAVE_KRETPROBES=y
-# CONFIG_HAVE_ARCH_TRACEHOOK is not set
-# CONFIG_HAVE_DMA_ATTRS is not set
-CONFIG_USE_GENERIC_SMP_HELPERS=y
-# CONFIG_HAVE_CLK is not set
-CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_GENERIC_DMA_COHERENT=y
CONFIG_SLABINFO=y
CONFIG_RT_MUTEXES=y
-# CONFIG_TINY_SHMEM is not set
CONFIG_BASE_SMALL=0
CONFIG_MODULES=y
# CONFIG_MODULE_FORCE_LOAD is not set
@@ -143,12 +147,10 @@ CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
-CONFIG_KMOD=y
CONFIG_STOP_MACHINE=y
CONFIG_BLOCK=y
CONFIG_LBD=y
CONFIG_BLK_DEV_IO_TRACE=y
-CONFIG_LSF=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_BLK_DEV_INTEGRITY is not set
@@ -164,7 +166,7 @@ CONFIG_IOSCHED_CFQ=y
CONFIG_DEFAULT_CFQ=y
# CONFIG_DEFAULT_NOOP is not set
CONFIG_DEFAULT_IOSCHED="cfq"
-CONFIG_CLASSIC_RCU=y
+CONFIG_FREEZER=y
#
# Processor type and features
@@ -174,6 +176,7 @@ CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
CONFIG_SMP=y
+# CONFIG_SPARSE_IRQ is not set
CONFIG_X86_FIND_SMP_CONFIG=y
CONFIG_X86_MPPARSE=y
# CONFIG_X86_PC is not set
@@ -186,7 +189,7 @@ CONFIG_X86_GENERICARCH=y
# CONFIG_X86_BIGSMP is not set
# CONFIG_X86_VSMP is not set
# CONFIG_X86_RDC321X is not set
-CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
+CONFIG_SCHED_OMIT_FRAME_POINTER=y
# CONFIG_PARAVIRT_GUEST is not set
# CONFIG_MEMTEST is not set
CONFIG_X86_CYCLONE_TIMER=y
@@ -206,7 +209,6 @@ CONFIG_M586=y
# CONFIG_MCRUSOE is not set
# CONFIG_MEFFICEON is not set
# CONFIG_MWINCHIPC6 is not set
-# CONFIG_MWINCHIP2 is not set
# CONFIG_MWINCHIP3D is not set
# CONFIG_MGEODEGX1 is not set
# CONFIG_MGEODE_LX is not set
@@ -230,9 +232,16 @@ CONFIG_X86_POPAD_OK=y
CONFIG_X86_ALIGNMENT_16=y
CONFIG_X86_INTEL_USERCOPY=y
CONFIG_X86_MINIMUM_CPU_FAMILY=4
+CONFIG_CPU_SUP_INTEL=y
+CONFIG_CPU_SUP_CYRIX_32=y
+CONFIG_CPU_SUP_AMD=y
+CONFIG_CPU_SUP_CENTAUR_32=y
+CONFIG_CPU_SUP_TRANSMETA_32=y
+CONFIG_CPU_SUP_UMC_32=y
CONFIG_HPET_TIMER=y
CONFIG_DMI=y
# CONFIG_IOMMU_HELPER is not set
+# CONFIG_IOMMU_API is not set
CONFIG_NR_CPUS=8
# CONFIG_SCHED_SMT is not set
CONFIG_SCHED_MC=y
@@ -241,6 +250,7 @@ CONFIG_PREEMPT_VOLUNTARY=y
# CONFIG_PREEMPT is not set
CONFIG_X86_LOCAL_APIC=y
CONFIG_X86_IO_APIC=y
+# CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS is not set
CONFIG_X86_MCE=y
CONFIG_X86_MCE_NONFATAL=y
# CONFIG_X86_MCE_P4THERMAL is not set
@@ -249,6 +259,8 @@ CONFIG_VM86=y
# CONFIG_I8K is not set
CONFIG_X86_REBOOTFIXUPS=y
CONFIG_MICROCODE=m
+CONFIG_MICROCODE_INTEL=y
+# CONFIG_MICROCODE_AMD is not set
CONFIG_MICROCODE_OLD_INTERFACE=y
CONFIG_X86_MSR=m
CONFIG_X86_CPUID=m
@@ -257,27 +269,32 @@ CONFIG_HIGHMEM4G=y
# CONFIG_HIGHMEM64G is not set
CONFIG_PAGE_OFFSET=0xC0000000
CONFIG_HIGHMEM=y
+# CONFIG_ARCH_PHYS_ADDR_T_64BIT is not set
+CONFIG_ARCH_FLATMEM_ENABLE=y
+CONFIG_ARCH_SPARSEMEM_ENABLE=y
+CONFIG_ARCH_SELECT_MEMORY_MODEL=y
CONFIG_SELECT_MEMORY_MODEL=y
CONFIG_FLATMEM_MANUAL=y
# CONFIG_DISCONTIGMEM_MANUAL is not set
# CONFIG_SPARSEMEM_MANUAL is not set
CONFIG_FLATMEM=y
CONFIG_FLAT_NODE_MEM_MAP=y
-# CONFIG_SPARSEMEM_STATIC is not set
-# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
+CONFIG_SPARSEMEM_STATIC=y
CONFIG_PAGEFLAGS_EXTENDED=y
CONFIG_SPLIT_PTLOCK_CPUS=4
-# CONFIG_RESOURCES_64BIT is not set
+# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=1
CONFIG_BOUNCE=y
CONFIG_VIRT_TO_BUS=y
+CONFIG_UNEVICTABLE_LRU=y
CONFIG_HIGHPTE=y
+# CONFIG_X86_CHECK_BIOS_CORRUPTION is not set
+CONFIG_X86_RESERVE_LOW_64K=y
# CONFIG_MATH_EMULATION is not set
CONFIG_MTRR=y
# CONFIG_MTRR_SANITIZER is not set
# CONFIG_X86_PAT is not set
CONFIG_EFI=y
-# CONFIG_IRQBALANCE is not set
CONFIG_SECCOMP=y
# CONFIG_HZ_100 is not set
CONFIG_HZ_250=y
@@ -293,13 +310,18 @@ CONFIG_PHYSICAL_START=0x100000
CONFIG_PHYSICAL_ALIGN=0x100000
CONFIG_HOTPLUG_CPU=y
CONFIG_COMPAT_VDSO=y
+# CONFIG_CMDLINE_BOOL is not set
CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
#
-# Power management options
+# Power management and ACPI options
#
CONFIG_PM=y
-# CONFIG_PM_DEBUG is not set
+CONFIG_PM_DEBUG=y
+CONFIG_PM_VERBOSE=y
+CONFIG_CAN_PM_TRACE=y
+CONFIG_PM_TRACE=y
+CONFIG_PM_TRACE_RTC=y
CONFIG_PM_SLEEP_SMP=y
CONFIG_PM_SLEEP=y
CONFIG_SUSPEND=y
@@ -318,21 +340,14 @@ CONFIG_ACPI_BUTTON=y
CONFIG_ACPI_VIDEO=y
CONFIG_ACPI_FAN=y
CONFIG_ACPI_DOCK=y
-# CONFIG_ACPI_BAY is not set
CONFIG_ACPI_PROCESSOR=y
CONFIG_ACPI_HOTPLUG_CPU=y
CONFIG_ACPI_THERMAL=y
-# CONFIG_ACPI_WMI is not set
-# CONFIG_ACPI_ASUS is not set
-# CONFIG_ACPI_TOSHIBA is not set
CONFIG_ACPI_CUSTOM_DSDT_FILE=""
# CONFIG_ACPI_CUSTOM_DSDT is not set
CONFIG_ACPI_BLACKLIST_YEAR=2001
# CONFIG_ACPI_DEBUG is not set
-CONFIG_ACPI_EC=y
# CONFIG_ACPI_PCI_SLOT is not set
-CONFIG_ACPI_POWER=y
-CONFIG_ACPI_SYSTEM=y
CONFIG_X86_PM_TIMER=y
CONFIG_ACPI_CONTAINER=y
CONFIG_ACPI_SBS=y
@@ -343,7 +358,6 @@ CONFIG_APM_DO_ENABLE=y
# CONFIG_APM_CPU_IDLE is not set
CONFIG_APM_DISPLAY_BLANK=y
CONFIG_APM_ALLOW_INTS=y
-# CONFIG_APM_REAL_MODE_POWER_OFF is not set
#
# CPU Frequency scaling
@@ -385,7 +399,6 @@ CONFIG_X86_P4_CLOCKMOD=m
#
# shared options
#
-# CONFIG_X86_ACPI_CPUFREQ_PROC_INTF is not set
CONFIG_X86_SPEEDSTEP_LIB=m
CONFIG_X86_SPEEDSTEP_RELAXED_CAP_CHECK=y
CONFIG_CPU_IDLE=y
@@ -413,6 +426,7 @@ CONFIG_ARCH_SUPPORTS_MSI=y
CONFIG_PCI_MSI=y
CONFIG_PCI_LEGACY=y
# CONFIG_PCI_DEBUG is not set
+# CONFIG_PCI_STUB is not set
CONFIG_HT_IRQ=y
CONFIG_ISA_DMA_API=y
CONFIG_ISA=y
@@ -436,13 +450,17 @@ CONFIG_HOTPLUG_PCI_SHPC=m
# Executable file formats / Emulations
#
CONFIG_BINFMT_ELF=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_HAVE_AOUT=y
CONFIG_BINFMT_AOUT=m
CONFIG_BINFMT_MISC=m
+CONFIG_HAVE_ATOMIC_IOMAP=y
CONFIG_NET=y
#
# Networking options
#
+CONFIG_COMPAT_NET_DEV_OPS=y
CONFIG_PACKET=m
CONFIG_PACKET_MMAP=y
CONFIG_UNIX=y
@@ -507,36 +525,6 @@ CONFIG_TCP_CONG_VENO=m
CONFIG_DEFAULT_RENO=y
CONFIG_DEFAULT_TCP_CONG="reno"
# CONFIG_TCP_MD5SIG is not set
-CONFIG_IP_VS=m
-# CONFIG_IP_VS_DEBUG is not set
-CONFIG_IP_VS_TAB_BITS=12
-
-#
-# IPVS transport protocol load balancing support
-#
-CONFIG_IP_VS_PROTO_TCP=y
-CONFIG_IP_VS_PROTO_UDP=y
-CONFIG_IP_VS_PROTO_ESP=y
-CONFIG_IP_VS_PROTO_AH=y
-
-#
-# IPVS scheduler
-#
-CONFIG_IP_VS_RR=m
-CONFIG_IP_VS_WRR=m
-CONFIG_IP_VS_LC=m
-CONFIG_IP_VS_WLC=m
-CONFIG_IP_VS_LBLC=m
-CONFIG_IP_VS_LBLCR=m
-CONFIG_IP_VS_DH=m
-CONFIG_IP_VS_SH=m
-CONFIG_IP_VS_SED=m
-CONFIG_IP_VS_NQ=m
-
-#
-# IPVS application helper
-#
-CONFIG_IP_VS_FTP=m
CONFIG_IPV6=m
CONFIG_IPV6_PRIVACY=y
CONFIG_IPV6_ROUTER_PREF=y
@@ -575,8 +563,8 @@ CONFIG_NETFILTER_XTABLES=m
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
# CONFIG_NETFILTER_XT_TARGET_DSCP is not set
CONFIG_NETFILTER_XT_TARGET_MARK=m
-CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
# CONFIG_NETFILTER_XT_TARGET_NFLOG is not set
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
# CONFIG_NETFILTER_XT_TARGET_RATEEST is not set
# CONFIG_NETFILTER_XT_TARGET_TRACE is not set
CONFIG_NETFILTER_XT_TARGET_SECMARK=m
@@ -586,37 +574,70 @@ CONFIG_NETFILTER_XT_MATCH_COMMENT=m
CONFIG_NETFILTER_XT_MATCH_DCCP=m
# CONFIG_NETFILTER_XT_MATCH_DSCP is not set
CONFIG_NETFILTER_XT_MATCH_ESP=m
+# CONFIG_NETFILTER_XT_MATCH_HASHLIMIT is not set
# CONFIG_NETFILTER_XT_MATCH_IPRANGE is not set
CONFIG_NETFILTER_XT_MATCH_LENGTH=m
CONFIG_NETFILTER_XT_MATCH_LIMIT=m
CONFIG_NETFILTER_XT_MATCH_MAC=m
CONFIG_NETFILTER_XT_MATCH_MARK=m
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
# CONFIG_NETFILTER_XT_MATCH_OWNER is not set
CONFIG_NETFILTER_XT_MATCH_POLICY=m
-CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
CONFIG_NETFILTER_XT_MATCH_QUOTA=m
# CONFIG_NETFILTER_XT_MATCH_RATEEST is not set
CONFIG_NETFILTER_XT_MATCH_REALM=m
+# CONFIG_NETFILTER_XT_MATCH_RECENT is not set
CONFIG_NETFILTER_XT_MATCH_SCTP=m
CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
CONFIG_NETFILTER_XT_MATCH_STRING=m
CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
# CONFIG_NETFILTER_XT_MATCH_TIME is not set
# CONFIG_NETFILTER_XT_MATCH_U32 is not set
-# CONFIG_NETFILTER_XT_MATCH_HASHLIMIT is not set
+CONFIG_IP_VS=m
+# CONFIG_IP_VS_IPV6 is not set
+# CONFIG_IP_VS_DEBUG is not set
+CONFIG_IP_VS_TAB_BITS=12
+
+#
+# IPVS transport protocol load balancing support
+#
+CONFIG_IP_VS_PROTO_TCP=y
+CONFIG_IP_VS_PROTO_UDP=y
+CONFIG_IP_VS_PROTO_AH_ESP=y
+CONFIG_IP_VS_PROTO_ESP=y
+CONFIG_IP_VS_PROTO_AH=y
+
+#
+# IPVS scheduler
+#
+CONFIG_IP_VS_RR=m
+CONFIG_IP_VS_WRR=m
+CONFIG_IP_VS_LC=m
+CONFIG_IP_VS_WLC=m
+CONFIG_IP_VS_LBLC=m
+CONFIG_IP_VS_LBLCR=m
+CONFIG_IP_VS_DH=m
+CONFIG_IP_VS_SH=m
+CONFIG_IP_VS_SED=m
+CONFIG_IP_VS_NQ=m
+
+#
+# IPVS application helper
+#
+CONFIG_IP_VS_FTP=m
#
# IP: Netfilter Configuration
#
+# CONFIG_NF_DEFRAG_IPV4 is not set
CONFIG_IP_NF_QUEUE=m
CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_MATCH_RECENT=m
-CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_ADDRTYPE=m
CONFIG_IP_NF_MATCH_AH=m
+CONFIG_IP_NF_MATCH_ECN=m
CONFIG_IP_NF_MATCH_TTL=m
-CONFIG_IP_NF_MATCH_ADDRTYPE=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
CONFIG_IP_NF_TARGET_LOG=m
@@ -635,16 +656,16 @@ CONFIG_IP_NF_ARP_MANGLE=m
#
CONFIG_IP6_NF_QUEUE=m
CONFIG_IP6_NF_IPTABLES=m
-CONFIG_IP6_NF_MATCH_RT=m
-CONFIG_IP6_NF_MATCH_OPTS=m
+CONFIG_IP6_NF_MATCH_AH=m
+CONFIG_IP6_NF_MATCH_EUI64=m
CONFIG_IP6_NF_MATCH_FRAG=m
+CONFIG_IP6_NF_MATCH_OPTS=m
CONFIG_IP6_NF_MATCH_HL=m
CONFIG_IP6_NF_MATCH_IPV6HEADER=m
-CONFIG_IP6_NF_MATCH_AH=m
# CONFIG_IP6_NF_MATCH_MH is not set
-CONFIG_IP6_NF_MATCH_EUI64=m
-CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_MATCH_RT=m
CONFIG_IP6_NF_TARGET_LOG=m
+CONFIG_IP6_NF_FILTER=m
CONFIG_IP6_NF_TARGET_REJECT=m
CONFIG_IP6_NF_MANGLE=m
CONFIG_IP6_NF_TARGET_HL=m
@@ -655,10 +676,6 @@ CONFIG_IP6_NF_RAW=m
# DECnet: Netfilter Configuration
#
CONFIG_DECNET_NF_GRABULATOR=m
-
-#
-# Bridge: Netfilter Configuration
-#
CONFIG_BRIDGE_NF_EBTABLES=m
CONFIG_BRIDGE_EBT_BROUTE=m
CONFIG_BRIDGE_EBT_T_FILTER=m
@@ -683,17 +700,15 @@ CONFIG_BRIDGE_EBT_ULOG=m
# CONFIG_BRIDGE_EBT_NFLOG is not set
CONFIG_IP_DCCP=m
CONFIG_INET_DCCP_DIAG=m
-CONFIG_IP_DCCP_ACKVEC=y
#
# DCCP CCIDs Configuration (EXPERIMENTAL)
#
-CONFIG_IP_DCCP_CCID2=m
# CONFIG_IP_DCCP_CCID2_DEBUG is not set
-CONFIG_IP_DCCP_CCID3=m
+CONFIG_IP_DCCP_CCID3=y
# CONFIG_IP_DCCP_CCID3_DEBUG is not set
CONFIG_IP_DCCP_CCID3_RTO=100
-CONFIG_IP_DCCP_TFRC_LIB=m
+CONFIG_IP_DCCP_TFRC_LIB=y
#
# DCCP Kernel Hacking
@@ -715,6 +730,7 @@ CONFIG_ATM_BR2684=m
# CONFIG_ATM_BR2684_IPFILTER is not set
CONFIG_STP=m
CONFIG_BRIDGE=m
+# CONFIG_NET_DSA is not set
CONFIG_VLAN_8021Q=m
# CONFIG_VLAN_8021Q_GVRP is not set
CONFIG_DECNET=m
@@ -748,6 +764,7 @@ CONFIG_NET_SCH_HTB=m
CONFIG_NET_SCH_HFSC=m
CONFIG_NET_SCH_ATM=m
CONFIG_NET_SCH_PRIO=m
+# CONFIG_NET_SCH_MULTIQ is not set
CONFIG_NET_SCH_RED=m
CONFIG_NET_SCH_SFQ=m
CONFIG_NET_SCH_TEQL=m
@@ -755,6 +772,7 @@ CONFIG_NET_SCH_TBF=m
CONFIG_NET_SCH_GRED=m
CONFIG_NET_SCH_DSMARK=m
CONFIG_NET_SCH_NETEM=m
+# CONFIG_NET_SCH_DRR is not set
CONFIG_NET_SCH_INGRESS=m
#
@@ -782,8 +800,10 @@ CONFIG_NET_ACT_IPT=m
# CONFIG_NET_ACT_NAT is not set
CONFIG_NET_ACT_PEDIT=m
CONFIG_NET_ACT_SIMP=m
+# CONFIG_NET_ACT_SKBEDIT is not set
# CONFIG_NET_CLS_IND is not set
CONFIG_NET_SCH_FIFO=y
+# CONFIG_DCB is not set
#
# Network testing
@@ -805,8 +825,6 @@ CONFIG_BT_HIDP=m
#
# Bluetooth device drivers
#
-CONFIG_BT_HCIUSB=m
-CONFIG_BT_HCIUSB_SCO=y
# CONFIG_BT_HCIBTUSB is not set
# CONFIG_BT_HCIBTSDIO is not set
CONFIG_BT_HCIUART=m
@@ -818,20 +836,19 @@ CONFIG_BT_HCIBPA10X=m
CONFIG_BT_HCIBFUSB=m
CONFIG_BT_HCIVHCI=m
# CONFIG_AF_RXRPC is not set
+# CONFIG_PHONET is not set
CONFIG_FIB_RULES=y
-
-#
-# Wireless
-#
+CONFIG_WIRELESS=y
# CONFIG_CFG80211 is not set
+CONFIG_WIRELESS_OLD_REGULATORY=y
CONFIG_WIRELESS_EXT=y
CONFIG_WIRELESS_EXT_SYSFS=y
+CONFIG_LIB80211=m
+CONFIG_LIB80211_CRYPT_WEP=m
+CONFIG_LIB80211_CRYPT_CCMP=m
+CONFIG_LIB80211_CRYPT_TKIP=m
# CONFIG_MAC80211 is not set
-CONFIG_IEEE80211=m
-# CONFIG_IEEE80211_DEBUG is not set
-CONFIG_IEEE80211_CRYPT_WEP=m
-CONFIG_IEEE80211_CRYPT_CCMP=m
-CONFIG_IEEE80211_CRYPT_TKIP=m
+# CONFIG_WIMAX is not set
# CONFIG_RFKILL is not set
# CONFIG_NET_9P is not set
@@ -857,6 +874,7 @@ CONFIG_MTD=m
# CONFIG_MTD_DEBUG is not set
CONFIG_MTD_CONCAT=m
CONFIG_MTD_PARTITIONS=y
+# CONFIG_MTD_TESTS is not set
CONFIG_MTD_REDBOOT_PARTS=m
CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-1
# CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED is not set
@@ -867,6 +885,7 @@ CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-1
# User Modules And Translation Layers
#
CONFIG_MTD_CHAR=m
+CONFIG_HAVE_MTD_OTP=y
CONFIG_MTD_BLKDEVS=m
CONFIG_MTD_BLOCK=m
# CONFIG_MTD_BLOCK_RO is not set
@@ -912,9 +931,7 @@ CONFIG_MTD_ABSENT=m
#
CONFIG_MTD_COMPLEX_MAPPINGS=y
CONFIG_MTD_PHYSMAP=m
-CONFIG_MTD_PHYSMAP_START=0x8000000
-CONFIG_MTD_PHYSMAP_LEN=0x4000000
-CONFIG_MTD_PHYSMAP_BANKWIDTH=2
+# CONFIG_MTD_PHYSMAP_COMPAT is not set
CONFIG_MTD_SC520CDP=m
CONFIG_MTD_NETSC520=m
CONFIG_MTD_TS5500=m
@@ -980,12 +997,17 @@ CONFIG_MTD_ONENAND_OTP=y
# CONFIG_MTD_ONENAND_SIM is not set
#
+# LPDDR flash memory drivers
+#
+# CONFIG_MTD_LPDDR is not set
+
+#
# UBI - Unsorted block images
#
# CONFIG_MTD_UBI is not set
# CONFIG_PARPORT is not set
CONFIG_PNP=y
-# CONFIG_PNP_DEBUG is not set
+CONFIG_PNP_DEBUG_MESSAGES=y
#
# Protocols
@@ -1020,21 +1042,20 @@ CONFIG_ATA_OVER_ETH=m
CONFIG_MISC_DEVICES=y
# CONFIG_IBM_ASM is not set
# CONFIG_PHANTOM is not set
-# CONFIG_EEPROM_93CX6 is not set
# CONFIG_SGI_IOC4 is not set
# CONFIG_TIFM_CORE is not set
-# CONFIG_ACER_WMI is not set
-# CONFIG_ASUS_LAPTOP is not set
-# CONFIG_FUJITSU_LAPTOP is not set
-# CONFIG_TC1100_WMI is not set
-# CONFIG_MSI_LAPTOP is not set
-# CONFIG_COMPAL_LAPTOP is not set
-# CONFIG_SONY_LAPTOP is not set
-# CONFIG_THINKPAD_ACPI is not set
-# CONFIG_INTEL_MENLOW is not set
-# CONFIG_EEEPC_LAPTOP is not set
+# CONFIG_ICS932S401 is not set
# CONFIG_ENCLOSURE_SERVICES is not set
# CONFIG_HP_ILO is not set
+# CONFIG_C2PORT is not set
+
+#
+# EEPROM support
+#
+# CONFIG_EEPROM_AT24 is not set
+# CONFIG_EEPROM_AT25 is not set
+# CONFIG_EEPROM_LEGACY is not set
+# CONFIG_EEPROM_93CX6 is not set
CONFIG_HAVE_IDE=y
# CONFIG_IDE is not set
@@ -1078,6 +1099,7 @@ CONFIG_SCSI_ISCSI_ATTRS=m
# CONFIG_SCSI_SRP_ATTRS is not set
CONFIG_SCSI_LOWLEVEL=y
# CONFIG_ISCSI_TCP is not set
+# CONFIG_SCSI_CXGB3_ISCSI is not set
# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
# CONFIG_SCSI_3W_9XXX is not set
# CONFIG_SCSI_7000FASST is not set
@@ -1098,6 +1120,8 @@ CONFIG_SCSI_LOWLEVEL=y
# CONFIG_MEGARAID_SAS is not set
# CONFIG_SCSI_HPTIOP is not set
# CONFIG_SCSI_BUSLOGIC is not set
+# CONFIG_LIBFC is not set
+# CONFIG_FCOE is not set
# CONFIG_SCSI_DMX3191D is not set
# CONFIG_SCSI_DTC3280 is not set
# CONFIG_SCSI_EATA is not set
@@ -1252,6 +1276,9 @@ CONFIG_SMSC_PHY=m
# CONFIG_BROADCOM_PHY is not set
# CONFIG_ICPLUS_PHY is not set
# CONFIG_REALTEK_PHY is not set
+# CONFIG_NATIONAL_PHY is not set
+# CONFIG_STE10XP is not set
+# CONFIG_LSI_ET1011C_PHY is not set
# CONFIG_MDIO_BITBANG is not set
CONFIG_NET_ETHERNET=y
CONFIG_MII=y
@@ -1293,6 +1320,9 @@ CONFIG_ULI526X=m
# CONFIG_IBM_NEW_EMAC_RGMII is not set
# CONFIG_IBM_NEW_EMAC_TAH is not set
# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
CONFIG_NET_PCI=y
# CONFIG_PCNET32 is not set
# CONFIG_AMD8111_ETH is not set
@@ -1302,7 +1332,6 @@ CONFIG_NET_PCI=y
# CONFIG_B44 is not set
# CONFIG_FORCEDETH is not set
# CONFIG_CS89x0 is not set
-# CONFIG_EEPRO100 is not set
CONFIG_E100=m
# CONFIG_FEALNX is not set
# CONFIG_NATSEMI is not set
@@ -1316,22 +1345,24 @@ CONFIG_8139TOO_8129=y
# CONFIG_R6040 is not set
# CONFIG_SIS900 is not set
CONFIG_EPIC100=m
+# CONFIG_SMSC9420 is not set
# CONFIG_SUNDANCE is not set
# CONFIG_TLAN is not set
# CONFIG_VIA_RHINE is not set
# CONFIG_SC92031 is not set
+# CONFIG_ATL2 is not set
CONFIG_NETDEV_1000=y
# CONFIG_ACENIC is not set
# CONFIG_DL2K is not set
CONFIG_E1000=m
-CONFIG_E1000_DISABLE_PACKET_SPLIT=y
# CONFIG_E1000E is not set
# CONFIG_IP1000 is not set
# CONFIG_IGB is not set
# CONFIG_NS83820 is not set
# CONFIG_HAMACHI is not set
# CONFIG_YELLOWFIN is not set
-# CONFIG_R8169 is not set
+CONFIG_R8169=m
+CONFIG_R8169_VLAN=y
# CONFIG_SIS190 is not set
CONFIG_SKGE=y
# CONFIG_SKGE_DEBUG is not set
@@ -1343,18 +1374,24 @@ CONFIG_SKY2=y
# CONFIG_QLA3XXX is not set
# CONFIG_ATL1 is not set
# CONFIG_ATL1E is not set
+# CONFIG_ATL1C is not set
+# CONFIG_JME is not set
CONFIG_NETDEV_10000=y
# CONFIG_CHELSIO_T1 is not set
+CONFIG_CHELSIO_T3_DEPENDS=y
# CONFIG_CHELSIO_T3 is not set
+# CONFIG_ENIC is not set
# CONFIG_IXGBE is not set
CONFIG_IXGB=m
# CONFIG_S2IO is not set
# CONFIG_MYRI10GE is not set
# CONFIG_NETXEN_NIC is not set
# CONFIG_NIU is not set
+# CONFIG_MLX4_EN is not set
# CONFIG_MLX4_CORE is not set
# CONFIG_TEHUTI is not set
# CONFIG_BNX2X is not set
+# CONFIG_QLGE is not set
# CONFIG_SFC is not set
# CONFIG_TR is not set
@@ -1363,13 +1400,6 @@ CONFIG_IXGB=m
#
# CONFIG_WLAN_PRE80211 is not set
CONFIG_WLAN_80211=y
-CONFIG_IPW2100=m
-# CONFIG_IPW2100_MONITOR is not set
-# CONFIG_IPW2100_DEBUG is not set
-CONFIG_IPW2200=m
-# CONFIG_IPW2200_MONITOR is not set
-# CONFIG_IPW2200_QOS is not set
-# CONFIG_IPW2200_DEBUG is not set
# CONFIG_LIBERTAS is not set
# CONFIG_AIRO is not set
# CONFIG_HERMES is not set
@@ -1377,10 +1407,23 @@ CONFIG_IPW2200=m
# CONFIG_PRISM54 is not set
# CONFIG_USB_ZD1201 is not set
# CONFIG_USB_NET_RNDIS_WLAN is not set
+CONFIG_IPW2100=m
+# CONFIG_IPW2100_MONITOR is not set
+# CONFIG_IPW2100_DEBUG is not set
+CONFIG_IPW2200=m
+# CONFIG_IPW2200_MONITOR is not set
+# CONFIG_IPW2200_QOS is not set
+# CONFIG_IPW2200_DEBUG is not set
+CONFIG_LIBIPW=m
+# CONFIG_LIBIPW_DEBUG is not set
# CONFIG_IWLWIFI_LEDS is not set
# CONFIG_HOSTAP is not set
#
+# Enable WiMAX (Networking options) to see the WiMAX drivers
+#
+
+#
# USB Network Adapters
#
CONFIG_USB_CATC=m
@@ -1391,6 +1434,7 @@ CONFIG_USB_USBNET=y
CONFIG_USB_NET_AX8817X=y
CONFIG_USB_NET_CDCETHER=m
# CONFIG_USB_NET_DM9601 is not set
+# CONFIG_USB_NET_SMSC95XX is not set
CONFIG_USB_NET_GL620A=m
CONFIG_USB_NET_NET1080=m
CONFIG_USB_NET_PLUSB=m
@@ -1419,6 +1463,7 @@ CONFIG_ATM_DRIVERS=y
# CONFIG_ATM_IA is not set
# CONFIG_ATM_FORE200E is not set
# CONFIG_ATM_HE is not set
+# CONFIG_ATM_SOLOS is not set
# CONFIG_FDDI is not set
# CONFIG_HIPPI is not set
CONFIG_PPP=m
@@ -1482,6 +1527,7 @@ CONFIG_MOUSE_PS2_LOGIPS2PP=y
CONFIG_MOUSE_PS2_SYNAPTICS=y
CONFIG_MOUSE_PS2_LIFEBOOK=y
CONFIG_MOUSE_PS2_TRACKPOINT=y
+# CONFIG_MOUSE_PS2_ELANTECH is not set
# CONFIG_MOUSE_PS2_TOUCHKIT is not set
CONFIG_MOUSE_SERIAL=m
# CONFIG_MOUSE_APPLETOUCH is not set
@@ -1521,6 +1567,7 @@ CONFIG_TOUCHSCREEN_ADS7846=m
# CONFIG_TOUCHSCREEN_FUJITSU is not set
CONFIG_TOUCHSCREEN_GUNZE=m
CONFIG_TOUCHSCREEN_ELO=m
+# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set
CONFIG_TOUCHSCREEN_MTOUCH=m
# CONFIG_TOUCHSCREEN_INEXIO is not set
CONFIG_TOUCHSCREEN_MK712=m
@@ -1528,10 +1575,10 @@ CONFIG_TOUCHSCREEN_MK712=m
# CONFIG_TOUCHSCREEN_PENMOUNT is not set
# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set
# CONFIG_TOUCHSCREEN_TOUCHWIN is not set
-# CONFIG_TOUCHSCREEN_UCB1400 is not set
# CONFIG_TOUCHSCREEN_WM97XX is not set
# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set
# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set
+# CONFIG_TOUCHSCREEN_TSC2007 is not set
CONFIG_INPUT_MISC=y
CONFIG_INPUT_PCSPKR=y
# CONFIG_INPUT_APANEL is not set
@@ -1542,6 +1589,7 @@ CONFIG_INPUT_WISTRON_BTNS=m
# CONFIG_INPUT_KEYSPAN_REMOTE is not set
# CONFIG_INPUT_POWERMATE is not set
# CONFIG_INPUT_YEALINK is not set
+# CONFIG_INPUT_CM109 is not set
CONFIG_INPUT_UINPUT=m
#
@@ -1574,7 +1622,6 @@ CONFIG_SERIAL_NONSTANDARD=y
# CONFIG_ROCKETPORT is not set
# CONFIG_CYCLADES is not set
# CONFIG_DIGIEPCA is not set
-# CONFIG_ESPSERIAL is not set
# CONFIG_MOXA_INTELLIO is not set
# CONFIG_MOXA_SMARTIO is not set
# CONFIG_ISI is not set
@@ -1612,6 +1659,7 @@ CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
CONFIG_SERIAL_JSM=y
CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
CONFIG_LEGACY_PTYS=y
CONFIG_LEGACY_PTY_COUNT=64
CONFIG_IPMI_HANDLER=m
@@ -1704,8 +1752,6 @@ CONFIG_SCx200_ACB=m
# Miscellaneous I2C Chip support
#
# CONFIG_DS1682 is not set
-# CONFIG_AT24 is not set
-CONFIG_SENSORS_EEPROM=m
CONFIG_SENSORS_PCF8574=m
# CONFIG_PCF8575 is not set
CONFIG_SENSORS_PCA9539=m
@@ -1728,7 +1774,6 @@ CONFIG_SPI_BITBANG=m
#
# SPI Protocol Masters
#
-# CONFIG_SPI_AT25 is not set
# CONFIG_SPI_SPIDEV is not set
# CONFIG_SPI_TLE62X0 is not set
CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
@@ -1748,13 +1793,16 @@ CONFIG_W1_MASTER_DS2482=m
#
CONFIG_W1_SLAVE_THERM=m
CONFIG_W1_SLAVE_SMEM=m
+# CONFIG_W1_SLAVE_DS2431 is not set
CONFIG_W1_SLAVE_DS2433=m
CONFIG_W1_SLAVE_DS2433_CRC=y
# CONFIG_W1_SLAVE_DS2760 is not set
+# CONFIG_W1_SLAVE_BQ27000 is not set
CONFIG_POWER_SUPPLY=y
# CONFIG_POWER_SUPPLY_DEBUG is not set
# CONFIG_PDA_POWER is not set
# CONFIG_BATTERY_DS2760 is not set
+# CONFIG_BATTERY_BQ27x00 is not set
CONFIG_HWMON=y
CONFIG_HWMON_VID=m
# CONFIG_SENSORS_ABITUGURU is not set
@@ -1768,8 +1816,10 @@ CONFIG_HWMON_VID=m
# CONFIG_SENSORS_ADM1029 is not set
# CONFIG_SENSORS_ADM1031 is not set
# CONFIG_SENSORS_ADM9240 is not set
+# CONFIG_SENSORS_ADT7462 is not set
# CONFIG_SENSORS_ADT7470 is not set
# CONFIG_SENSORS_ADT7473 is not set
+# CONFIG_SENSORS_ADT7475 is not set
# CONFIG_SENSORS_K8TEMP is not set
# CONFIG_SENSORS_ASB100 is not set
# CONFIG_SENSORS_ATXP1 is not set
@@ -1799,6 +1849,8 @@ CONFIG_SENSORS_LM85=m
# CONFIG_SENSORS_LM90 is not set
# CONFIG_SENSORS_LM92 is not set
# CONFIG_SENSORS_LM93 is not set
+# CONFIG_SENSORS_LTC4245 is not set
+# CONFIG_SENSORS_MAX1111 is not set
# CONFIG_SENSORS_MAX1619 is not set
# CONFIG_SENSORS_MAX6650 is not set
# CONFIG_SENSORS_PC87360 is not set
@@ -1822,16 +1874,17 @@ CONFIG_SENSORS_LM85=m
# CONFIG_SENSORS_W83627HF is not set
# CONFIG_SENSORS_W83627EHF is not set
# CONFIG_SENSORS_HDAPS is not set
+# CONFIG_SENSORS_LIS3LV02D is not set
# CONFIG_SENSORS_APPLESMC is not set
# CONFIG_HWMON_DEBUG_CHIP is not set
CONFIG_THERMAL=y
# CONFIG_THERMAL_HWMON is not set
# CONFIG_WATCHDOG is not set
+CONFIG_SSB_POSSIBLE=y
#
# Sonics Silicon Backplane
#
-CONFIG_SSB_POSSIBLE=y
# CONFIG_SSB is not set
#
@@ -1841,6 +1894,10 @@ CONFIG_SSB_POSSIBLE=y
# CONFIG_MFD_SM501 is not set
# CONFIG_HTC_PASIC3 is not set
# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_PCF50633 is not set
+# CONFIG_REGULATOR is not set
#
# Multimedia devices
@@ -1881,6 +1938,7 @@ CONFIG_VIDEO_TVEEPROM=m
CONFIG_VIDEO_TUNER=m
CONFIG_VIDEO_CAPTURE_DRIVERS=y
# CONFIG_VIDEO_ADV_DEBUG is not set
+# CONFIG_VIDEO_FIXED_MINOR_RANGES is not set
CONFIG_VIDEO_HELPER_CHIPS_AUTO=y
CONFIG_VIDEO_IR_I2C=m
CONFIG_VIDEO_MSP3400=m
@@ -1897,12 +1955,10 @@ CONFIG_VIDEO_CX2341X=m
# CONFIG_VIDEO_CPIA2 is not set
# CONFIG_VIDEO_SAA5246A is not set
# CONFIG_VIDEO_SAA5249 is not set
-# CONFIG_TUNER_3036 is not set
# CONFIG_VIDEO_STRADIS is not set
# CONFIG_VIDEO_ZORAN is not set
# CONFIG_VIDEO_SAA7134 is not set
# CONFIG_VIDEO_MXB is not set
-# CONFIG_VIDEO_DPC is not set
# CONFIG_VIDEO_HEXIUM_ORION is not set
# CONFIG_VIDEO_HEXIUM_GEMINI is not set
# CONFIG_VIDEO_CX88 is not set
@@ -1911,6 +1967,7 @@ CONFIG_VIDEO_CX2341X=m
# CONFIG_VIDEO_IVTV is not set
# CONFIG_VIDEO_CX18 is not set
# CONFIG_VIDEO_CAFE_CCIC is not set
+# CONFIG_SOC_CAMERA is not set
CONFIG_V4L_USB_DRIVERS=y
# CONFIG_USB_VIDEO_CLASS is not set
# CONFIG_USB_GSPCA is not set
@@ -1940,8 +1997,6 @@ CONFIG_USB_PWC=m
# CONFIG_USB_ZR364XX is not set
# CONFIG_USB_STKWEBCAM is not set
# CONFIG_USB_S2255 is not set
-# CONFIG_SOC_CAMERA is not set
-# CONFIG_VIDEO_SH_MOBILE_CEU is not set
CONFIG_RADIO_ADAPTERS=y
# CONFIG_RADIO_CADET is not set
# CONFIG_RADIO_RTRACK is not set
@@ -1959,6 +2014,9 @@ CONFIG_RADIO_ADAPTERS=y
# CONFIG_RADIO_ZOLTRIX is not set
# CONFIG_USB_DSBR is not set
# CONFIG_USB_SI470X is not set
+# CONFIG_USB_MR800 is not set
+# CONFIG_RADIO_TEA5764 is not set
+# CONFIG_DVB_DYNAMIC_MINORS is not set
CONFIG_DVB_CAPTURE_DRIVERS=y
#
@@ -1993,10 +2051,12 @@ CONFIG_DVB_USB_DTT200U=m
# CONFIG_DVB_USB_OPERA1 is not set
# CONFIG_DVB_USB_AF9005 is not set
# CONFIG_DVB_USB_DW2102 is not set
+# CONFIG_DVB_USB_CINERGY_T2 is not set
# CONFIG_DVB_USB_ANYSEE is not set
+# CONFIG_DVB_USB_DTV5100 is not set
+# CONFIG_DVB_USB_AF9015 is not set
# CONFIG_DVB_TTUSB_BUDGET is not set
# CONFIG_DVB_TTUSB_DEC is not set
-# CONFIG_DVB_CINERGYT2 is not set
# CONFIG_DVB_SIANO_SMS1XXX is not set
#
@@ -2014,6 +2074,16 @@ CONFIG_DVB_USB_DTT200U=m
# CONFIG_DVB_PLUTO2 is not set
#
+# Supported SDMC DM1105 Adapters
+#
+# CONFIG_DVB_DM1105 is not set
+
+#
+# Supported FireWire (IEEE 1394) Adapters
+#
+# CONFIG_DVB_FIREDTV is not set
+
+#
# Supported DVB Frontends
#
@@ -2023,19 +2093,31 @@ CONFIG_DVB_USB_DTT200U=m
# CONFIG_DVB_FE_CUSTOMISE is not set
#
+# Multistandard (satellite) frontends
+#
+# CONFIG_DVB_STB0899 is not set
+# CONFIG_DVB_STB6100 is not set
+
+#
# DVB-S (satellite) frontends
#
CONFIG_DVB_CX24110=m
CONFIG_DVB_CX24123=m
CONFIG_DVB_MT312=m
CONFIG_DVB_S5H1420=m
+# CONFIG_DVB_STV0288 is not set
+# CONFIG_DVB_STB6000 is not set
CONFIG_DVB_STV0299=m
CONFIG_DVB_TDA8083=m
CONFIG_DVB_TDA10086=m
+# CONFIG_DVB_TDA8261 is not set
CONFIG_DVB_VES1X93=m
# CONFIG_DVB_TUNER_ITD1000 is not set
+# CONFIG_DVB_TUNER_CX24113 is not set
CONFIG_DVB_TDA826X=m
CONFIG_DVB_TUA6100=m
+# CONFIG_DVB_CX24116 is not set
+# CONFIG_DVB_SI21XX is not set
#
# DVB-T (terrestrial) frontends
@@ -2072,11 +2154,17 @@ CONFIG_DVB_OR51211=m
CONFIG_DVB_OR51132=m
CONFIG_DVB_BCM3510=m
CONFIG_DVB_LGDT330X=m
+# CONFIG_DVB_LGDT3304 is not set
CONFIG_DVB_S5H1409=m
# CONFIG_DVB_AU8522 is not set
CONFIG_DVB_S5H1411=m
#
+# ISDB-T (terrestrial) frontends
+#
+# CONFIG_DVB_S921 is not set
+
+#
# Digital terrestrial only tuners/PLL
#
CONFIG_DVB_PLL=m
@@ -2088,6 +2176,13 @@ CONFIG_DVB_PLL=m
CONFIG_DVB_LNBP21=m
# CONFIG_DVB_ISL6405 is not set
CONFIG_DVB_ISL6421=m
+# CONFIG_DVB_LGS8GL5 is not set
+
+#
+# Tools to develop new frontends
+#
+# CONFIG_DVB_DUMMY_FE is not set
+# CONFIG_DVB_AF9013 is not set
CONFIG_DAB=y
CONFIG_USB_DABUSB=m
@@ -2109,22 +2204,24 @@ CONFIG_DRM=m
# CONFIG_DRM_TDFX is not set
# CONFIG_DRM_R128 is not set
# CONFIG_DRM_RADEON is not set
+CONFIG_DRM_INTEL_COMMON=m
# CONFIG_DRM_I810 is not set
# CONFIG_DRM_I830 is not set
-# CONFIG_DRM_I915 is not set
+CONFIG_DRM_I915=m
+CONFIG_DRM_I915_KMS=y
# CONFIG_DRM_MGA is not set
# CONFIG_DRM_SIS is not set
# CONFIG_DRM_VIA is not set
# CONFIG_DRM_SAVAGE is not set
-CONFIG_DRM_PSB=m
CONFIG_VGASTATE=m
CONFIG_VIDEO_OUTPUT_CONTROL=y
CONFIG_FB=y
CONFIG_FIRMWARE_EDID=y
CONFIG_FB_DDC=m
-CONFIG_FB_CFB_FILLRECT=y
-CONFIG_FB_CFB_COPYAREA=y
-CONFIG_FB_CFB_IMAGEBLIT=y
+CONFIG_FB_BOOT_VESA_SUPPORT=y
+CONFIG_FB_CFB_FILLRECT=m
+CONFIG_FB_CFB_COPYAREA=m
+CONFIG_FB_CFB_IMAGEBLIT=m
# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
# CONFIG_FB_SYS_FILLRECT is not set
# CONFIG_FB_SYS_COPYAREA is not set
@@ -2148,9 +2245,8 @@ CONFIG_FB_TILEBLITTING=y
# CONFIG_FB_IMSTT is not set
CONFIG_FB_VGA16=m
# CONFIG_FB_UVESA is not set
-CONFIG_FB_VESA=y
+# CONFIG_FB_VESA is not set
# CONFIG_FB_EFI is not set
-# CONFIG_FB_IMAC is not set
# CONFIG_FB_N411 is not set
# CONFIG_FB_HGA is not set
# CONFIG_FB_S1D13XXX is not set
@@ -2183,6 +2279,7 @@ CONFIG_FB_ATY_BACKLIGHT=y
# CONFIG_FB_S3 is not set
# CONFIG_FB_SAVAGE is not set
# CONFIG_FB_SIS is not set
+# CONFIG_FB_VIA is not set
# CONFIG_FB_NEOMAGIC is not set
# CONFIG_FB_KYRO is not set
# CONFIG_FB_3DFX is not set
@@ -2195,16 +2292,20 @@ CONFIG_FB_ATY_BACKLIGHT=y
# CONFIG_FB_CARMINE is not set
# CONFIG_FB_GEODE is not set
# CONFIG_FB_VIRTUAL is not set
+# CONFIG_FB_METRONOME is not set
+# CONFIG_FB_MB862XX is not set
CONFIG_BACKLIGHT_LCD_SUPPORT=y
CONFIG_LCD_CLASS_DEVICE=m
# CONFIG_LCD_LTV350QV is not set
# CONFIG_LCD_ILI9320 is not set
+# CONFIG_LCD_TDO24M is not set
# CONFIG_LCD_VGG2432A4 is not set
# CONFIG_LCD_PLATFORM is not set
CONFIG_BACKLIGHT_CLASS_DEVICE=y
-# CONFIG_BACKLIGHT_CORGI is not set
+CONFIG_BACKLIGHT_GENERIC=y
# CONFIG_BACKLIGHT_PROGEAR is not set
# CONFIG_BACKLIGHT_MBP_NVIDIA is not set
+# CONFIG_BACKLIGHT_SAHARA is not set
#
# Display device support
@@ -2217,7 +2318,6 @@ CONFIG_BACKLIGHT_CLASS_DEVICE=y
CONFIG_VGA_CONSOLE=y
CONFIG_VGACON_SOFT_SCROLLBACK=y
CONFIG_VGACON_SOFT_SCROLLBACK_SIZE=64
-CONFIG_VIDEO_SELECT=y
CONFIG_MDA_CONSOLE=m
CONFIG_DUMMY_CONSOLE=y
CONFIG_FRAMEBUFFER_CONSOLE=y
@@ -2228,11 +2328,13 @@ CONFIG_FONT_8x8=y
CONFIG_FONT_8x16=y
# CONFIG_LOGO is not set
CONFIG_SOUND=m
+CONFIG_SOUND_OSS_CORE=y
CONFIG_SND=m
CONFIG_SND_TIMER=m
CONFIG_SND_PCM=m
CONFIG_SND_HWDEP=m
CONFIG_SND_RAWMIDI=m
+CONFIG_SND_JACK=y
CONFIG_SND_SEQUENCER=m
CONFIG_SND_SEQ_DUMMY=m
CONFIG_SND_OSSEMUL=y
@@ -2240,6 +2342,7 @@ CONFIG_SND_MIXER_OSS=m
CONFIG_SND_PCM_OSS=m
CONFIG_SND_PCM_OSS_PLUGINS=y
CONFIG_SND_SEQUENCER_OSS=y
+# CONFIG_SND_HRTIMER is not set
CONFIG_SND_DYNAMIC_MINORS=y
CONFIG_SND_SUPPORT_OLD_API=y
CONFIG_SND_VERBOSE_PROCFS=y
@@ -2330,11 +2433,15 @@ CONFIG_SND_PCI=y
# CONFIG_SND_FM801 is not set
CONFIG_SND_HDA_INTEL=m
# CONFIG_SND_HDA_HWDEP is not set
+# CONFIG_SND_HDA_INPUT_BEEP is not set
CONFIG_SND_HDA_CODEC_REALTEK=y
CONFIG_SND_HDA_CODEC_ANALOG=y
CONFIG_SND_HDA_CODEC_SIGMATEL=y
CONFIG_SND_HDA_CODEC_VIA=y
CONFIG_SND_HDA_CODEC_ATIHDMI=y
+CONFIG_SND_HDA_CODEC_NVHDMI=y
+CONFIG_SND_HDA_CODEC_INTELHDMI=y
+CONFIG_SND_HDA_ELD=y
CONFIG_SND_HDA_CODEC_CONEXANT=y
CONFIG_SND_HDA_CODEC_CMEDIA=y
CONFIG_SND_HDA_CODEC_SI3054=y
@@ -2369,6 +2476,7 @@ CONFIG_SND_USB=y
CONFIG_SND_USB_AUDIO=m
# CONFIG_SND_USB_USX2Y is not set
# CONFIG_SND_USB_CAIAQ is not set
+# CONFIG_SND_USB_US122L is not set
# CONFIG_SND_SOC is not set
# CONFIG_SOUND_PRIME is not set
CONFIG_AC97_BUS=m
@@ -2381,15 +2489,37 @@ CONFIG_HID=y
# USB Input Devices
#
CONFIG_USB_HID=y
-CONFIG_USB_HIDINPUT_POWERBOOK=y
-CONFIG_HID_FF=y
CONFIG_HID_PID=y
+CONFIG_USB_HIDDEV=y
+
+#
+# Special HID drivers
+#
+CONFIG_HID_COMPAT=y
+CONFIG_HID_A4TECH=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_BELKIN=y
+CONFIG_HID_CHERRY=y
+CONFIG_HID_CHICONY=y
+CONFIG_HID_CYPRESS=y
+CONFIG_HID_EZKEY=y
+CONFIG_HID_GYRATION=y
+CONFIG_HID_LOGITECH=y
CONFIG_LOGITECH_FF=y
# CONFIG_LOGIRUMBLEPAD2_FF is not set
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MONTEREY=y
+CONFIG_HID_NTRIG=y
+CONFIG_HID_PANTHERLORD=y
# CONFIG_PANTHERLORD_FF is not set
+CONFIG_HID_PETALYNX=y
+CONFIG_HID_SAMSUNG=y
+CONFIG_HID_SONY=y
+CONFIG_HID_SUNPLUS=y
+# CONFIG_GREENASIA_FF is not set
+CONFIG_HID_TOPSEED=y
CONFIG_THRUSTMASTER_FF=y
# CONFIG_ZEROPLUS_FF is not set
-CONFIG_USB_HIDDEV=y
CONFIG_USB_SUPPORT=y
CONFIG_USB_ARCH_HAS_HCD=y
CONFIG_USB_ARCH_HAS_OHCI=y
@@ -2407,6 +2537,8 @@ CONFIG_USB_DEVICE_CLASS=y
CONFIG_USB_SUSPEND=y
# CONFIG_USB_OTG is not set
CONFIG_USB_MON=y
+# CONFIG_USB_WUSB is not set
+# CONFIG_USB_WUSB_CBAF is not set
#
# USB Host Controller Drivers
@@ -2415,6 +2547,7 @@ CONFIG_USB_MON=y
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_EHCI_ROOT_HUB_TT=y
CONFIG_USB_EHCI_TT_NEWSCHED=y
+# CONFIG_USB_OXU210HP_HCD is not set
# CONFIG_USB_ISP116X_HCD is not set
# CONFIG_USB_ISP1760_HCD is not set
CONFIG_USB_OHCI_HCD=y
@@ -2424,6 +2557,8 @@ CONFIG_USB_OHCI_LITTLE_ENDIAN=y
CONFIG_USB_UHCI_HCD=y
# CONFIG_USB_SL811_HCD is not set
# CONFIG_USB_R8A66597_HCD is not set
+# CONFIG_USB_WHCI_HCD is not set
+# CONFIG_USB_HWA_HCD is not set
# CONFIG_USB_GADGET_MUSB_HDRC is not set
#
@@ -2432,20 +2567,20 @@ CONFIG_USB_UHCI_HCD=y
CONFIG_USB_ACM=m
CONFIG_USB_PRINTER=m
# CONFIG_USB_WDM is not set
+# CONFIG_USB_TMC is not set
#
-# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may also be needed;
#
#
-# may also be needed; see USB_STORAGE Help for more information
+# see USB_STORAGE Help for more information
#
CONFIG_USB_STORAGE=y
# CONFIG_USB_STORAGE_DEBUG is not set
CONFIG_USB_STORAGE_DATAFAB=y
CONFIG_USB_STORAGE_FREECOM=y
# CONFIG_USB_STORAGE_ISD200 is not set
-CONFIG_USB_STORAGE_DPCM=y
CONFIG_USB_STORAGE_USBAT=y
CONFIG_USB_STORAGE_SDDR09=y
CONFIG_USB_STORAGE_SDDR55=y
@@ -2514,12 +2649,14 @@ CONFIG_USB_SERIAL_PL2303=m
CONFIG_USB_SERIAL_HP4X=m
CONFIG_USB_SERIAL_SAFE=m
CONFIG_USB_SERIAL_SAFE_PADDED=y
+# CONFIG_USB_SERIAL_SIEMENS_MPI is not set
CONFIG_USB_SERIAL_SIERRAWIRELESS=m
CONFIG_USB_SERIAL_TI=m
CONFIG_USB_SERIAL_CYBERJACK=m
CONFIG_USB_SERIAL_XIRCOM=m
CONFIG_USB_SERIAL_OPTION=m
CONFIG_USB_SERIAL_OMNINET=m
+# CONFIG_USB_SERIAL_OPTICON is not set
# CONFIG_USB_SERIAL_DEBUG is not set
#
@@ -2528,6 +2665,7 @@ CONFIG_USB_SERIAL_OMNINET=m
CONFIG_USB_EMI62=m
CONFIG_USB_EMI26=m
# CONFIG_USB_ADUTUX is not set
+# CONFIG_USB_SEVSEG is not set
CONFIG_USB_RIO500=m
CONFIG_USB_LEGOTOWER=m
CONFIG_USB_LCD=m
@@ -2546,6 +2684,7 @@ CONFIG_USB_LD=m
# CONFIG_USB_IOWARRIOR is not set
# CONFIG_USB_TEST is not set
# CONFIG_USB_ISIGHTFW is not set
+# CONFIG_USB_VST is not set
CONFIG_USB_ATM=m
CONFIG_USB_SPEEDTOUCH=m
CONFIG_USB_CXACRU=m
@@ -2555,20 +2694,24 @@ CONFIG_USB_GADGET=y
# CONFIG_USB_GADGET_DEBUG is not set
CONFIG_USB_GADGET_DEBUG_FILES=y
# CONFIG_USB_GADGET_DEBUG_FS is not set
+CONFIG_USB_GADGET_VBUS_DRAW=2
CONFIG_USB_GADGET_SELECTED=y
-CONFIG_USB_GADGET_AMD5536UDC=y
-CONFIG_USB_AMD5536UDC=y
+# CONFIG_USB_GADGET_AT91 is not set
# CONFIG_USB_GADGET_ATMEL_USBA is not set
# CONFIG_USB_GADGET_FSL_USB2 is not set
-# CONFIG_USB_GADGET_NET2280 is not set
-# CONFIG_USB_GADGET_PXA25X is not set
-# CONFIG_USB_GADGET_M66592 is not set
-# CONFIG_USB_GADGET_PXA27X is not set
-# CONFIG_USB_GADGET_GOKU is not set
# CONFIG_USB_GADGET_LH7A40X is not set
# CONFIG_USB_GADGET_OMAP is not set
+# CONFIG_USB_GADGET_PXA25X is not set
+# CONFIG_USB_GADGET_PXA27X is not set
# CONFIG_USB_GADGET_S3C2410 is not set
-# CONFIG_USB_GADGET_AT91 is not set
+# CONFIG_USB_GADGET_IMX is not set
+# CONFIG_USB_GADGET_M66592 is not set
+CONFIG_USB_GADGET_AMD5536UDC=y
+CONFIG_USB_AMD5536UDC=y
+# CONFIG_USB_GADGET_FSL_QE is not set
+# CONFIG_USB_GADGET_CI13XXX is not set
+# CONFIG_USB_GADGET_NET2280 is not set
+# CONFIG_USB_GADGET_GOKU is not set
# CONFIG_USB_GADGET_DUMMY_HCD is not set
CONFIG_USB_GADGET_DUALSPEED=y
# CONFIG_USB_ZERO is not set
@@ -2581,12 +2724,17 @@ CONFIG_USB_FILE_STORAGE_TEST=y
# CONFIG_USB_MIDI_GADGET is not set
# CONFIG_USB_G_PRINTER is not set
# CONFIG_USB_CDC_COMPOSITE is not set
+
+#
+# OTG and related infrastructure
+#
+# CONFIG_UWB is not set
CONFIG_MMC=y
# CONFIG_MMC_DEBUG is not set
CONFIG_MMC_UNSAFE_RESUME=y
#
-# MMC/SD Card Drivers
+# MMC/SD/SDIO Card Drivers
#
CONFIG_MMC_BLOCK=y
CONFIG_MMC_BLOCK_BOUNCE=y
@@ -2594,7 +2742,7 @@ CONFIG_MMC_BLOCK_BOUNCE=y
# CONFIG_MMC_TEST is not set
#
-# MMC/SD Host Controller Drivers
+# MMC/SD/SDIO Host Controller Drivers
#
CONFIG_MMC_SDHCI=y
# CONFIG_MMC_SDHCI_PCI is not set
@@ -2607,6 +2755,7 @@ CONFIG_LEDS_CLASS=m
#
# LED drivers
#
+# CONFIG_LEDS_ALIX2 is not set
# CONFIG_LEDS_PCA9532 is not set
# CONFIG_LEDS_CLEVO_MAIL is not set
# CONFIG_LEDS_PCA955X is not set
@@ -2617,6 +2766,7 @@ CONFIG_LEDS_CLASS=m
CONFIG_LEDS_TRIGGERS=y
CONFIG_LEDS_TRIGGER_TIMER=m
CONFIG_LEDS_TRIGGER_HEARTBEAT=m
+# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set
# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set
# CONFIG_ACCESSIBILITY is not set
# CONFIG_INFINIBAND is not set
@@ -2648,26 +2798,32 @@ CONFIG_RTC_DRV_PCF8583=m
# CONFIG_RTC_DRV_M41T80 is not set
# CONFIG_RTC_DRV_S35390A is not set
# CONFIG_RTC_DRV_FM3130 is not set
+# CONFIG_RTC_DRV_RX8581 is not set
#
# SPI RTC drivers
#
# CONFIG_RTC_DRV_M41T94 is not set
# CONFIG_RTC_DRV_DS1305 is not set
+# CONFIG_RTC_DRV_DS1390 is not set
CONFIG_RTC_DRV_MAX6902=m
# CONFIG_RTC_DRV_R9701 is not set
CONFIG_RTC_DRV_RS5C348=m
+# CONFIG_RTC_DRV_DS3234 is not set
#
# Platform RTC drivers
#
# CONFIG_RTC_DRV_CMOS is not set
+# CONFIG_RTC_DRV_DS1286 is not set
# CONFIG_RTC_DRV_DS1511 is not set
CONFIG_RTC_DRV_DS1553=m
CONFIG_RTC_DRV_DS1742=m
# CONFIG_RTC_DRV_STK17TA8 is not set
CONFIG_RTC_DRV_M48T86=m
+# CONFIG_RTC_DRV_M48T35 is not set
# CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_BQ4802 is not set
CONFIG_RTC_DRV_V3020=m
#
@@ -2675,6 +2831,21 @@ CONFIG_RTC_DRV_V3020=m
#
# CONFIG_DMADEVICES is not set
# CONFIG_UIO is not set
+# CONFIG_STAGING is not set
+CONFIG_X86_PLATFORM_DEVICES=y
+# CONFIG_ASUS_LAPTOP is not set
+# CONFIG_FUJITSU_LAPTOP is not set
+# CONFIG_TC1100_WMI is not set
+# CONFIG_MSI_LAPTOP is not set
+# CONFIG_PANASONIC_LAPTOP is not set
+# CONFIG_COMPAL_LAPTOP is not set
+# CONFIG_SONY_LAPTOP is not set
+# CONFIG_THINKPAD_ACPI is not set
+# CONFIG_INTEL_MENLOW is not set
+# CONFIG_EEEPC_LAPTOP is not set
+# CONFIG_ACPI_WMI is not set
+# CONFIG_ACPI_ASUS is not set
+# CONFIG_ACPI_TOSHIBA is not set
#
# Firmware Drivers
@@ -2700,7 +2871,7 @@ CONFIG_EXT3_FS=y
CONFIG_EXT3_FS_XATTR=y
CONFIG_EXT3_FS_POSIX_ACL=y
CONFIG_EXT3_FS_SECURITY=y
-# CONFIG_EXT4DEV_FS is not set
+# CONFIG_EXT4_FS is not set
CONFIG_JBD=y
# CONFIG_JBD_DEBUG is not set
CONFIG_FS_MBCACHE=y
@@ -2716,15 +2887,18 @@ CONFIG_JFS_SECURITY=y
# CONFIG_JFS_DEBUG is not set
CONFIG_JFS_STATISTICS=y
CONFIG_FS_POSIX_ACL=y
+CONFIG_FILE_LOCKING=y
# CONFIG_XFS_FS is not set
# CONFIG_GFS2_FS is not set
# CONFIG_OCFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
CONFIG_DNOTIFY=y
CONFIG_INOTIFY=y
CONFIG_INOTIFY_USER=y
CONFIG_QUOTA=y
# CONFIG_QUOTA_NETLINK_INTERFACE is not set
CONFIG_PRINT_QUOTA_WARNING=y
+CONFIG_QUOTA_TREE=m
CONFIG_QFMT_V1=m
CONFIG_QFMT_V2=m
CONFIG_QUOTACTL=y
@@ -2760,16 +2934,14 @@ CONFIG_NTFS_RW=y
CONFIG_PROC_FS=y
CONFIG_PROC_KCORE=y
CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
CONFIG_SYSFS=y
CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
CONFIG_HUGETLBFS=y
CONFIG_HUGETLB_PAGE=y
CONFIG_CONFIGFS_FS=m
-
-#
-# Miscellaneous filesystems
-#
+CONFIG_MISC_FILESYSTEMS=y
CONFIG_ADFS_FS=m
# CONFIG_ADFS_FS_RW is not set
CONFIG_AFFS_FS=m
@@ -2798,6 +2970,7 @@ CONFIG_JFFS2_CMODE_PRIORITY=y
# CONFIG_JFFS2_CMODE_SIZE is not set
# CONFIG_JFFS2_CMODE_FAVOURLZO is not set
CONFIG_CRAMFS=y
+# CONFIG_SQUASHFS is not set
CONFIG_VXFS_FS=m
# CONFIG_MINIX_FS is not set
# CONFIG_OMFS_FS is not set
@@ -2825,6 +2998,7 @@ CONFIG_NFS_ACL_SUPPORT=m
CONFIG_NFS_COMMON=y
CONFIG_SUNRPC=m
CONFIG_SUNRPC_GSS=m
+# CONFIG_SUNRPC_REGISTER_V4 is not set
CONFIG_RPCSEC_GSS_KRB5=m
CONFIG_RPCSEC_GSS_SPKM3=m
CONFIG_SMB_FS=y
@@ -2942,31 +3116,51 @@ CONFIG_TIMER_STATS=y
CONFIG_DEBUG_BUGVERBOSE=y
# CONFIG_DEBUG_INFO is not set
# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_VIRTUAL is not set
# CONFIG_DEBUG_WRITECOUNT is not set
CONFIG_DEBUG_MEMORY_INIT=y
# CONFIG_DEBUG_LIST is not set
# CONFIG_DEBUG_SG is not set
+# CONFIG_DEBUG_NOTIFIERS is not set
+CONFIG_ARCH_WANT_FRAME_POINTERS=y
# CONFIG_FRAME_POINTER is not set
# CONFIG_BOOT_PRINTK_DELAY is not set
# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
# CONFIG_FAULT_INJECTION is not set
# CONFIG_LATENCYTOP is not set
# CONFIG_SYSCTL_SYSCALL_CHECK is not set
-CONFIG_HAVE_FTRACE=y
+CONFIG_USER_STACKTRACE_SUPPORT=y
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
CONFIG_HAVE_DYNAMIC_FTRACE=y
-# CONFIG_FTRACE is not set
+CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+
+#
+# Tracers
+#
+# CONFIG_FUNCTION_TRACER is not set
# CONFIG_IRQSOFF_TRACER is not set
# CONFIG_SYSPROF_TRACER is not set
# CONFIG_SCHED_TRACER is not set
# CONFIG_CONTEXT_SWITCH_TRACER is not set
+# CONFIG_BOOT_TRACER is not set
+# CONFIG_TRACE_BRANCH_PROFILING is not set
+# CONFIG_POWER_TRACER is not set
+# CONFIG_STACK_TRACER is not set
+# CONFIG_MMIOTRACE is not set
# CONFIG_PROVIDE_OHCI1394_DMA_INIT is not set
+# CONFIG_DYNAMIC_PRINTK_DEBUG is not set
# CONFIG_SAMPLES is not set
CONFIG_HAVE_ARCH_KGDB=y
# CONFIG_KGDB is not set
# CONFIG_STRICT_DEVMEM is not set
CONFIG_X86_VERBOSE_BOOTUP=y
CONFIG_EARLY_PRINTK=y
+# CONFIG_EARLY_PRINTK_DBGP is not set
# CONFIG_DEBUG_STACKOVERFLOW is not set
# CONFIG_DEBUG_STACK_USAGE is not set
# CONFIG_DEBUG_PAGEALLOC is not set
@@ -2976,7 +3170,7 @@ CONFIG_EARLY_PRINTK=y
# CONFIG_DEBUG_NX_TEST is not set
# CONFIG_4KSTACKS is not set
CONFIG_DOUBLEFAULT=y
-# CONFIG_MMIOTRACE is not set
+CONFIG_HAVE_MMIOTRACE_SUPPORT=y
CONFIG_IO_DELAY_TYPE_0X80=0
CONFIG_IO_DELAY_TYPE_0XED=1
CONFIG_IO_DELAY_TYPE_UDELAY=2
@@ -2996,8 +3190,10 @@ CONFIG_DEFAULT_IO_DELAY_TYPE=0
CONFIG_KEYS=y
CONFIG_KEYS_DEBUG_PROC_KEYS=y
CONFIG_SECURITY=y
+# CONFIG_SECURITYFS is not set
CONFIG_SECURITY_NETWORK=y
# CONFIG_SECURITY_NETWORK_XFRM is not set
+# CONFIG_SECURITY_PATH is not set
# CONFIG_SECURITY_FILE_CAPABILITIES is not set
# CONFIG_SECURITY_ROOTPLUG is not set
CONFIG_SECURITY_DEFAULT_MMAP_MIN_ADDR=0
@@ -3008,18 +3204,24 @@ CONFIG_SECURITY_SELINUX_DISABLE=y
CONFIG_SECURITY_SELINUX_DEVELOP=y
CONFIG_SECURITY_SELINUX_AVC_STATS=y
CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1
-# CONFIG_SECURITY_SELINUX_ENABLE_SECMARK_DEFAULT is not set
# CONFIG_SECURITY_SELINUX_POLICYDB_VERSION_MAX is not set
CONFIG_CRYPTO=y
#
# Crypto core or helper
#
+# CONFIG_CRYPTO_FIPS is not set
CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_ALGAPI2=y
CONFIG_CRYPTO_AEAD=m
+CONFIG_CRYPTO_AEAD2=y
CONFIG_CRYPTO_BLKCIPHER=y
+CONFIG_CRYPTO_BLKCIPHER2=y
CONFIG_CRYPTO_HASH=y
+CONFIG_CRYPTO_HASH2=y
+CONFIG_CRYPTO_RNG2=y
CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_MANAGER2=y
# CONFIG_CRYPTO_GF128MUL is not set
CONFIG_CRYPTO_NULL=m
# CONFIG_CRYPTO_CRYPTD is not set
@@ -3054,6 +3256,7 @@ CONFIG_CRYPTO_HMAC=y
# Digest
#
CONFIG_CRYPTO_CRC32C=m
+# CONFIG_CRYPTO_CRC32C_INTEL is not set
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -3095,6 +3298,11 @@ CONFIG_CRYPTO_TWOFISH_COMMON=m
#
CONFIG_CRYPTO_DEFLATE=m
# CONFIG_CRYPTO_LZO is not set
+
+#
+# Random Number Generation
+#
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRYPTO_HW=y
CONFIG_CRYPTO_DEV_PADLOCK=m
CONFIG_CRYPTO_DEV_PADLOCK_AES=m
@@ -3114,6 +3322,7 @@ CONFIG_VIRTUALIZATION=y
CONFIG_BITREVERSE=y
CONFIG_GENERIC_FIND_FIRST_BIT=y
CONFIG_GENERIC_FIND_NEXT_BIT=y
+CONFIG_GENERIC_FIND_LAST_BIT=y
CONFIG_CRC_CCITT=m
CONFIG_CRC16=m
# CONFIG_CRC_T10DIF is not set
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/defconfig-netbook b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/defconfig-netbook
index b52043508..67373a29d 100644
--- a/meta-moblin/packages/linux/linux-moblin-2.6.27/defconfig-netbook
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/defconfig-netbook
@@ -1003,7 +1003,7 @@ CONFIG_NETDEV_1000=y
# CONFIG_NS83820 is not set
# CONFIG_HAMACHI is not set
# CONFIG_YELLOWFIN is not set
-# CONFIG_R8169 is not set
+CONFIG_R8169=y
# CONFIG_SIS190 is not set
# CONFIG_SKGE is not set
# CONFIG_SKY2 is not set
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6-build-nonintconfig.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6-build-nonintconfig.patch
new file mode 100644
index 000000000..bd65daf51
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6-build-nonintconfig.patch
@@ -0,0 +1,128 @@
+diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile
+index 32e8c5a..8020453 100644
+--- a/scripts/kconfig/Makefile
++++ b/scripts/kconfig/Makefile
+@@ -24,6 +24,11 @@ oldconfig: $(obj)/conf
+ silentoldconfig: $(obj)/conf
+ $< -s $(Kconfig)
+
++nonint_oldconfig: $(obj)/conf
++ $< -b $(Kconfig)
++loose_nonint_oldconfig: $(obj)/conf
++ $< -B $(Kconfig)
++
+ # Create new linux.pot file
+ # Adjust charset to UTF-8 in .po file to accept UTF-8 in Kconfig files
+ # The symlink is used to repair a deficiency in arch/um
+diff --git a/scripts/kconfig/conf.c b/scripts/kconfig/conf.c
+index fda6313..ed33b66 100644
+--- a/scripts/kconfig/conf.c
++++ b/scripts/kconfig/conf.c
+@@ -22,6 +22,8 @@
+ ask_all,
+ ask_new,
+ ask_silent,
++ dont_ask,
++ dont_ask_dont_tell,
+ set_default,
+ set_yes,
+ set_mod,
+@@ -39,6 +41,8 @@
+
+ static char nohelp_text[] = N_("Sorry, no help available for this option yet.\n");
+
++static int return_value = 0;
++
+ static const char *get_help(struct menu *menu)
+ {
+ if (menu_has_help(menu))
+@@ -359,7 +363,10 @@
+
+ switch (prop->type) {
+ case P_MENU:
+- if (input_mode == ask_silent && rootEntry != menu) {
++ if ((input_mode == ask_silent ||
++ input_mode == dont_ask ||
++ input_mode == dont_ask_dont_tell) &&
++ rootEntry != menu) {
+ check_conf(menu);
+ return;
+ }
+@@ -417,12 +424,21 @@
+ if (sym && !sym_has_value(sym)) {
+ if (sym_is_changable(sym) ||
+ (sym_is_choice(sym) && sym_get_tristate_value(sym) == yes)) {
++ if (input_mode == dont_ask ||
++ input_mode == dont_ask_dont_tell) {
++ if (input_mode == dont_ask &&
++ sym->name && !sym_is_choice_value(sym)) {
++ fprintf(stderr,"CONFIG_%s\n",sym->name);
++ ++return_value;
++ }
++ } else {
+ if (!conf_cnt++)
+ printf(_("*\n* Restart config...\n*\n"));
+ rootEntry = menu_get_parent_menu(menu);
+ conf(rootEntry);
+ }
+ }
++ }
+
+ for (child = menu->list; child; child = child->next)
+ check_conf(child);
+@@ -438,7 +454,7 @@
+ bindtextdomain(PACKAGE, LOCALEDIR);
+ textdomain(PACKAGE);
+
+- while ((opt = getopt(ac, av, "osdD:nmyrh")) != -1) {
++ while ((opt = getopt(ac, av, "osbBdD:nmyrh")) != -1) {
+ switch (opt) {
+ case 'o':
+ input_mode = ask_silent;
+@@ -447,6 +463,12 @@
+ input_mode = ask_silent;
+ sync_kconfig = 1;
+ break;
++ case 'b':
++ input_mode = dont_ask;
++ break;
++ case 'B':
++ input_mode = dont_ask_dont_tell;
++ break;
+ case 'd':
+ input_mode = set_default;
+ break;
+@@ -510,6 +532,8 @@
+ case ask_silent:
+ case ask_all:
+ case ask_new:
++ case dont_ask:
++ case dont_ask_dont_tell:
+ conf_read(NULL);
+ break;
+ case set_no:
+@@ -571,12 +595,16 @@
+ conf(&rootmenu);
+ input_mode = ask_silent;
+ /* fall through */
++ case dont_ask:
++ case dont_ask_dont_tell:
+ case ask_silent:
+ /* Update until a loop caused no more changes */
+ do {
+ conf_cnt = 0;
+ check_conf(&rootmenu);
+- } while (conf_cnt);
++ } while (conf_cnt &&
++ (input_mode != dont_ask &&
++ input_mode != dont_ask_dont_tell));
+ break;
+ }
+
+@@ -598,5 +626,5 @@
+ exit(1);
+ }
+ }
+- return 0;
++ return return_value;
+ }
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.19-modesetting-by-default.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.19-modesetting-by-default.patch
new file mode 100644
index 000000000..32b99a99b
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.19-modesetting-by-default.patch
@@ -0,0 +1,11 @@
+--- linux-2.6.28/drivers/gpu/drm/i915/i915_drv.c~ 2009-02-20 21:36:06.000000000 -0800
++++ linux-2.6.28/drivers/gpu/drm/i915/i915_drv.c 2009-02-20 21:36:06.000000000 -0800
+@@ -35,7 +35,7 @@
+ #include "drm_pciids.h"
+ #include <linux/console.h>
+
+-static unsigned int i915_modeset = -1;
++static unsigned int i915_modeset = 1;
+ module_param_named(modeset, i915_modeset, int, 0400);
+
+ unsigned int i915_fbpercrtc = 0;
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.28+2.6.29-rc2/0002-fastboot-remove-wait-for-all-devices-before-mounti.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-dont-wait-for-mouse.patch
index 9ea6d62a6..02a4474ca 100644
--- a/meta-moblin/packages/linux/linux-moblin-2.6.28+2.6.29-rc2/0002-fastboot-remove-wait-for-all-devices-before-mounti.patch
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-dont-wait-for-mouse.patch
@@ -22,16 +22,21 @@ Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
---
--- a/init/do_mounts.c 2009-01-07 18:42:10.000000000 -0800
+++ b/init/do_mounts.c 2009-01-07 18:43:02.000000000 -0800
-@@ -370,10 +370,12 @@ void __init prepare_namespace(void)
+@@ -370,14 +370,17 @@ void __init prepare_namespace(void)
ssleep(root_delay);
}
+#if 0
- /* wait for the known devices to complete their probing */
- while (driver_probe_done() != 0)
- msleep(100);
+ /*
+ * wait for the known devices to complete their probing
+ *
+ * Note: this is a potential source of long boot delays.
+ * For example, it is not atypical to wait 5 seconds here
+ * for the touchpad of a laptop to initialize.
+ */
+ wait_for_device_probe();
+#endif
- async_synchronize_full();
++ async_synchronize_full();
md_run_setup();
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-drm-i915-Fix-LVDS-dither-setting.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-drm-i915-Fix-LVDS-dither-setting.patch
new file mode 100644
index 000000000..a8d68338b
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-drm-i915-Fix-LVDS-dither-setting.patch
@@ -0,0 +1,56 @@
+From 2b5cde2b272f56ec67b56a2af8c067d42eff7328 Mon Sep 17 00:00:00 2001
+From: Li Peng <peng.li@intel.com>
+Date: Fri, 13 Mar 2009 10:25:07 +0800
+Subject: drm/i915: Fix LVDS dither setting
+
+Update bdb_lvds_options structure according to its defination in
+2D driver. Then we can parse and set 'lvds_dither' bit correctly
+on non-965 chips.
+
+Signed-off-by: Li Peng <peng.li@intel.com>
+Signed-off-by: Eric Anholt <eric@anholt.net>
+---
+ drivers/gpu/drm/i915/intel_bios.h | 12 ++++++------
+ drivers/gpu/drm/i915/intel_lvds.c | 2 +-
+ 2 files changed, 7 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
+index 5ea715a..de621aa 100644
+--- a/drivers/gpu/drm/i915/intel_bios.h
++++ b/drivers/gpu/drm/i915/intel_bios.h
+@@ -162,13 +162,13 @@ struct bdb_lvds_options {
+ u8 panel_type;
+ u8 rsvd1;
+ /* LVDS capabilities, stored in a dword */
+- u8 rsvd2:1;
+- u8 lvds_edid:1;
+- u8 pixel_dither:1;
+- u8 pfit_ratio_auto:1;
+- u8 pfit_gfx_mode_enhanced:1;
+- u8 pfit_text_mode_enhanced:1;
+ u8 pfit_mode:2;
++ u8 pfit_text_mode_enhanced:1;
++ u8 pfit_gfx_mode_enhanced:1;
++ u8 pfit_ratio_auto:1;
++ u8 pixel_dither:1;
++ u8 lvds_edid:1;
++ u8 rsvd2:1;
+ u8 rsvd4;
+ } __attribute__((packed));
+
+diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
+index 0d211af..6619f26 100644
+--- a/drivers/gpu/drm/i915/intel_lvds.c
++++ b/drivers/gpu/drm/i915/intel_lvds.c
+@@ -265,7 +265,7 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
+ pfit_control = 0;
+
+ if (!IS_I965G(dev)) {
+- if (dev_priv->panel_wants_dither)
++ if (dev_priv->panel_wants_dither || dev_priv->lvds_dither)
+ pfit_control |= PANEL_8TO6_DITHER_ENABLE;
+ }
+ else
+--
+1.6.1.3
+
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.28+2.6.29-rc2/0006-Revert-drm-i915-GEM-on-PAE-has-problems-disable.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-drm-revert.patch
index 77c9fa6ef..850fa161e 100644
--- a/meta-moblin/packages/linux/linux-moblin-2.6.28+2.6.29-rc2/0006-Revert-drm-i915-GEM-on-PAE-has-problems-disable.patch
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-drm-revert.patch
@@ -23,8 +23,8 @@ index afa8a12..553dd4b 100644
default:
DRM_ERROR("Unknown parameter %d\n", param->param);
@@ -830,14 +830,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
-
- dev_priv->regs = ioremap(base, size);
+ "performance may suffer.\n");
+ }
-#ifdef CONFIG_HIGHMEM64G
- /* don't enable GEM on PAE - needs agp + set_memory_* interface fixes */
@@ -34,9 +34,9 @@ index afa8a12..553dd4b 100644
- dev_priv->has_gem = 1;
-#endif
-
- i915_gem_load(dev);
-
- /* Init HWS */
+ dev->driver->get_vblank_counter = i915_get_vblank_counter;
+ if (IS_GM45(dev))
+ dev->driver->get_vblank_counter = gm45_get_vblank_counter;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index b3cc473..adc972c 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-e100-add-support-for-82552-10-100-adapter.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-e100-add-support-for-82552-10-100-adapter.patch
new file mode 100644
index 000000000..9291362f0
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-e100-add-support-for-82552-10-100-adapter.patch
@@ -0,0 +1,208 @@
+From b55de80e49892002a1878013ab9aee1a30970be6 Mon Sep 17 00:00:00 2001
+From: Bruce Allan <bruce.w.allan@intel.com>
+Date: Sat, 21 Mar 2009 13:25:25 -0700
+Subject: [PATCH] e100: add support for 82552 10/100 adapter
+
+This patch enables support for the new Intel 82552 adapter (new PHY paired
+with the existing MAC in the ICH7 chipset). No new features are added to
+the driver, however there are minor changes due to updated registers and a
+few workarounds for hardware errata.
+
+Signed-off-by: Bruce Allan <bruce.w.allan@intel.com>
+Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/e100.c | 93 +++++++++++++++++++++++++++++++++++++++++++---------
+ 1 files changed, 77 insertions(+), 16 deletions(-)
+
+diff --git a/drivers/net/e100.c b/drivers/net/e100.c
+index 861d2ee..0504db9 100644
+--- a/drivers/net/e100.c
++++ b/drivers/net/e100.c
+@@ -167,7 +167,7 @@
+
+ #define DRV_NAME "e100"
+ #define DRV_EXT "-NAPI"
+-#define DRV_VERSION "3.5.23-k6"DRV_EXT
++#define DRV_VERSION "3.5.24-k2"DRV_EXT
+ #define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver"
+ #define DRV_COPYRIGHT "Copyright(c) 1999-2006 Intel Corporation"
+ #define PFX DRV_NAME ": "
+@@ -240,6 +240,7 @@ static struct pci_device_id e100_id_table[] = {
+ INTEL_8255X_ETHERNET_DEVICE(0x1093, 7),
+ INTEL_8255X_ETHERNET_DEVICE(0x1094, 7),
+ INTEL_8255X_ETHERNET_DEVICE(0x1095, 7),
++ INTEL_8255X_ETHERNET_DEVICE(0x10fe, 7),
+ INTEL_8255X_ETHERNET_DEVICE(0x1209, 0),
+ INTEL_8255X_ETHERNET_DEVICE(0x1229, 0),
+ INTEL_8255X_ETHERNET_DEVICE(0x2449, 2),
+@@ -275,6 +276,7 @@ enum phy {
+ phy_82562_em = 0x032002A8,
+ phy_82562_ek = 0x031002A8,
+ phy_82562_eh = 0x017002A8,
++ phy_82552_v = 0xd061004d,
+ phy_unknown = 0xFFFFFFFF,
+ };
+
+@@ -943,6 +945,22 @@ static int mdio_read(struct net_device *netdev, int addr, int reg)
+
+ static void mdio_write(struct net_device *netdev, int addr, int reg, int data)
+ {
++ struct nic *nic = netdev_priv(netdev);
++
++ if ((nic->phy == phy_82552_v) && (reg == MII_BMCR) &&
++ (data & (BMCR_ANRESTART | BMCR_ANENABLE))) {
++ u16 advert = mdio_read(netdev, nic->mii.phy_id, MII_ADVERTISE);
++
++ /*
++ * Workaround Si issue where sometimes the part will not
++ * autoneg to 100Mbps even when advertised.
++ */
++ if (advert & ADVERTISE_100FULL)
++ data |= BMCR_SPEED100 | BMCR_FULLDPLX;
++ else if (advert & ADVERTISE_100HALF)
++ data |= BMCR_SPEED100;
++ }
++
+ mdio_ctrl(netdev_priv(netdev), addr, mdi_write, reg, data);
+ }
+
+@@ -1276,16 +1294,12 @@ static int e100_phy_init(struct nic *nic)
+ if (addr == 32)
+ return -EAGAIN;
+
+- /* Selected the phy and isolate the rest */
+- for (addr = 0; addr < 32; addr++) {
+- if (addr != nic->mii.phy_id) {
+- mdio_write(netdev, addr, MII_BMCR, BMCR_ISOLATE);
+- } else {
+- bmcr = mdio_read(netdev, addr, MII_BMCR);
+- mdio_write(netdev, addr, MII_BMCR,
+- bmcr & ~BMCR_ISOLATE);
+- }
+- }
++ /* Isolate all the PHY ids */
++ for (addr = 0; addr < 32; addr++)
++ mdio_write(netdev, addr, MII_BMCR, BMCR_ISOLATE);
++ /* Select the discovered PHY */
++ bmcr &= ~BMCR_ISOLATE;
++ mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr);
+
+ /* Get phy ID */
+ id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1);
+@@ -1303,7 +1317,18 @@ static int e100_phy_init(struct nic *nic)
+ mdio_write(netdev, nic->mii.phy_id, MII_NSC_CONG, cong);
+ }
+
+- if ((nic->mac >= mac_82550_D102) || ((nic->flags & ich) &&
++ if (nic->phy == phy_82552_v) {
++ u16 advert = mdio_read(netdev, nic->mii.phy_id, MII_ADVERTISE);
++
++ /* Workaround Si not advertising flow-control during autoneg */
++ advert |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
++ mdio_write(netdev, nic->mii.phy_id, MII_ADVERTISE, advert);
++
++ /* Reset for the above changes to take effect */
++ bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR);
++ bmcr |= BMCR_RESET;
++ mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr);
++ } else if ((nic->mac >= mac_82550_D102) || ((nic->flags & ich) &&
+ (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) &&
+ !(nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) {
+ /* enable/disable MDI/MDI-X auto-switching. */
+@@ -2134,6 +2159,9 @@ err_clean_rx:
+ }
+
+ #define MII_LED_CONTROL 0x1B
++#define E100_82552_LED_OVERRIDE 0x19
++#define E100_82552_LED_ON 0x000F /* LEDTX and LED_RX both on */
++#define E100_82552_LED_OFF 0x000A /* LEDTX and LED_RX both off */
+ static void e100_blink_led(unsigned long data)
+ {
+ struct nic *nic = (struct nic *)data;
+@@ -2143,10 +2171,19 @@ static void e100_blink_led(unsigned long data)
+ led_on_559 = 0x05,
+ led_on_557 = 0x07,
+ };
++ u16 led_reg = MII_LED_CONTROL;
++
++ if (nic->phy == phy_82552_v) {
++ led_reg = E100_82552_LED_OVERRIDE;
+
+- nic->leds = (nic->leds & led_on) ? led_off :
+- (nic->mac < mac_82559_D101M) ? led_on_557 : led_on_559;
+- mdio_write(nic->netdev, nic->mii.phy_id, MII_LED_CONTROL, nic->leds);
++ nic->leds = (nic->leds == E100_82552_LED_ON) ?
++ E100_82552_LED_OFF : E100_82552_LED_ON;
++ } else {
++ nic->leds = (nic->leds & led_on) ? led_off :
++ (nic->mac < mac_82559_D101M) ? led_on_557 :
++ led_on_559;
++ }
++ mdio_write(nic->netdev, nic->mii.phy_id, led_reg, nic->leds);
+ mod_timer(&nic->blink_timer, jiffies + HZ / 4);
+ }
+
+@@ -2375,13 +2412,15 @@ static void e100_diag_test(struct net_device *netdev,
+ static int e100_phys_id(struct net_device *netdev, u32 data)
+ {
+ struct nic *nic = netdev_priv(netdev);
++ u16 led_reg = (nic->phy == phy_82552_v) ? E100_82552_LED_OVERRIDE :
++ MII_LED_CONTROL;
+
+ if (!data || data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ))
+ data = (u32)(MAX_SCHEDULE_TIMEOUT / HZ);
+ mod_timer(&nic->blink_timer, jiffies);
+ msleep_interruptible(data * 1000);
+ del_timer_sync(&nic->blink_timer);
+- mdio_write(netdev, nic->mii.phy_id, MII_LED_CONTROL, 0);
++ mdio_write(netdev, nic->mii.phy_id, led_reg, 0);
+
+ return 0;
+ }
+@@ -2686,6 +2725,9 @@ static void __devexit e100_remove(struct pci_dev *pdev)
+ }
+ }
+
++#define E100_82552_SMARTSPEED 0x14 /* SmartSpeed Ctrl register */
++#define E100_82552_REV_ANEG 0x0200 /* Reverse auto-negotiation */
++#define E100_82552_ANEG_NOW 0x0400 /* Auto-negotiate now */
+ static int e100_suspend(struct pci_dev *pdev, pm_message_t state)
+ {
+ struct net_device *netdev = pci_get_drvdata(pdev);
+@@ -2698,6 +2740,15 @@ static int e100_suspend(struct pci_dev *pdev, pm_message_t state)
+ pci_save_state(pdev);
+
+ if ((nic->flags & wol_magic) | e100_asf(nic)) {
++ /* enable reverse auto-negotiation */
++ if (nic->phy == phy_82552_v) {
++ u16 smartspeed = mdio_read(netdev, nic->mii.phy_id,
++ E100_82552_SMARTSPEED);
++
++ mdio_write(netdev, nic->mii.phy_id,
++ E100_82552_SMARTSPEED, smartspeed |
++ E100_82552_REV_ANEG | E100_82552_ANEG_NOW);
++ }
+ if (pci_enable_wake(pdev, PCI_D3cold, true))
+ pci_enable_wake(pdev, PCI_D3hot, true);
+ } else {
+@@ -2721,6 +2772,16 @@ static int e100_resume(struct pci_dev *pdev)
+ /* ack any pending wake events, disable PME */
+ pci_enable_wake(pdev, 0, 0);
+
++ /* disbale reverse auto-negotiation */
++ if (nic->phy == phy_82552_v) {
++ u16 smartspeed = mdio_read(netdev, nic->mii.phy_id,
++ E100_82552_SMARTSPEED);
++
++ mdio_write(netdev, nic->mii.phy_id,
++ E100_82552_SMARTSPEED,
++ smartspeed & ~(E100_82552_REV_ANEG));
++ }
++
+ netif_device_attach(netdev);
+ if (netif_running(netdev))
+ e100_up(nic);
+--
+1.5.5.1
+
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.28+2.6.29-rc2/0005-fastboot-async-enable-default.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-enable-async-by-default.patch
index 6eea4f6e1..6eea4f6e1 100644
--- a/meta-moblin/packages/linux/linux-moblin-2.6.28+2.6.29-rc2/0005-fastboot-async-enable-default.patch
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-enable-async-by-default.patch
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-even-faster-kms.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-even-faster-kms.patch
new file mode 100644
index 000000000..80d1edf0a
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-even-faster-kms.patch
@@ -0,0 +1,20 @@
+--- linux-2.6.28/drivers/gpu/drm/i915/intel_lvds.c.org 2009-03-21 19:57:13.000000000 -0700
++++ linux-2.6.28/drivers/gpu/drm/i915/intel_lvds.c 2009-03-21 19:57:25.000000000 -0700
+@@ -221,7 +221,7 @@ static void intel_lvds_prepare(struct dr
+ dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL &
+ BACKLIGHT_DUTY_CYCLE_MASK);
+
+- intel_lvds_set_power(dev, false);
++// intel_lvds_set_power(dev, false);
+ }
+
+ static void intel_lvds_commit( struct drm_encoder *encoder)
+@@ -233,7 +233,7 @@ static void intel_lvds_commit( struct dr
+ dev_priv->backlight_duty_cycle =
+ intel_lvds_get_max_backlight(dev);
+
+- intel_lvds_set_power(dev, true);
++// intel_lvds_set_power(dev, true);
+ }
+
+ static void intel_lvds_mode_set(struct drm_encoder *encoder,
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.28+2.6.29-rc2/0003-fastboot-remove-duplicate-unpack_to_rootfs.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-fast-initrd.patch
index ea4c617ed..ea4c617ed 100644
--- a/meta-moblin/packages/linux/linux-moblin-2.6.28+2.6.29-rc2/0003-fastboot-remove-duplicate-unpack_to_rootfs.patch
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-fast-initrd.patch
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-fast-kms.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-fast-kms.patch
new file mode 100644
index 000000000..f213958bf
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-fast-kms.patch
@@ -0,0 +1,285 @@
+diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
+index 1c3a8c5..144624a 100644
+--- a/drivers/gpu/drm/drm_crtc_helper.c
++++ b/drivers/gpu/drm/drm_crtc_helper.c
+@@ -29,6 +29,8 @@
+ * Jesse Barnes <jesse.barnes@intel.com>
+ */
+
++#include <linux/async.h>
++
+ #include "drmP.h"
+ #include "drm_crtc.h"
+ #include "drm_crtc_helper.h"
+@@ -42,6 +44,8 @@ static struct drm_display_mode std_modes[] = {
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ };
+
++LIST_HEAD(drm_async_list);
++
+ /**
+ * drm_helper_probe_connector_modes - get complete set of display modes
+ * @dev: DRM device
+@@ -137,6 +141,26 @@ int drm_helper_probe_connector_modes(struct drm_device *dev, uint32_t maxX,
+ }
+ EXPORT_SYMBOL(drm_helper_probe_connector_modes);
+
++int drm_helper_probe_connector_modes_fast(struct drm_device *dev, uint32_t maxX,
++ uint32_t maxY)
++{
++ struct drm_connector *connector;
++ int count = 0;
++
++ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
++ count += drm_helper_probe_single_connector_modes(connector,
++ maxX, maxY);
++ /*
++ * If we found a 'good' connector, we stop probing futher.
++ */
++ if (count > 0)
++ break;
++ }
++
++ return count;
++}
++EXPORT_SYMBOL(drm_helper_probe_connector_modes_fast);
++
+ static void drm_helper_add_std_modes(struct drm_device *dev,
+ struct drm_connector *connector)
+ {
+@@ -882,6 +906,24 @@ bool drm_helper_plugged_event(struct drm_device *dev)
+ /* FIXME: send hotplug event */
+ return true;
+ }
++
++static void async_notify_fb_changed(void *data, async_cookie_t cookie)
++{
++ struct drm_device *dev = data;
++ dev->mode_config.funcs->fb_changed(dev);
++}
++
++static void async_probe_hard(void *data, async_cookie_t cookie)
++{
++ struct drm_device *dev = data;
++ /* Need to wait for async_notify_fb_changed to be done */
++ async_synchronize_cookie_domain(cookie, &drm_async_list);
++ drm_helper_probe_connector_modes(dev,
++ dev->mode_config.max_width,
++ dev->mode_config.max_height);
++}
++
++
+ /**
+ * drm_initial_config - setup a sane initial connector configuration
+ * @dev: DRM device
+@@ -902,7 +944,7 @@ bool drm_helper_initial_config(struct drm_device *dev, bool can_grow)
+ struct drm_connector *connector;
+ int count = 0;
+
+- count = drm_helper_probe_connector_modes(dev,
++ count = drm_helper_probe_connector_modes_fast(dev,
+ dev->mode_config.max_width,
+ dev->mode_config.max_height);
+
+@@ -921,7 +963,9 @@ bool drm_helper_initial_config(struct drm_device *dev, bool can_grow)
+ drm_setup_crtcs(dev);
+
+ /* alert the driver fb layer */
+- dev->mode_config.funcs->fb_changed(dev);
++ async_schedule_domain(async_notify_fb_changed, dev, &drm_async_list);
++ /* probe further outputs */
++ async_schedule_domain(async_probe_hard, dev, &drm_async_list);
+
+ return 0;
+ }
+diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
+index 14c7a23..ef52021 100644
+--- a/drivers/gpu/drm/drm_drv.c
++++ b/drivers/gpu/drm/drm_drv.c
+@@ -48,6 +48,7 @@
+
+ #include "drmP.h"
+ #include "drm_core.h"
++#include <linux/async.h>
+
+ static int drm_version(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+@@ -345,6 +346,9 @@ void drm_exit(struct drm_driver *driver)
+ struct drm_device *dev, *tmp;
+ DRM_DEBUG("\n");
+
++ /* make sure all async DRM operations are finished */
++ async_synchronize_full_domain(&drm_async_list);
++
+ list_for_each_entry_safe(dev, tmp, &driver->device_list, driver_item)
+ drm_cleanup(dev);
+
+diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
+index a839a28..069b189 100644
+--- a/drivers/gpu/drm/drm_edid.c
++++ b/drivers/gpu/drm/drm_edid.c
+@@ -588,20 +588,22 @@ static unsigned char *drm_ddc_read(struct i2c_adapter *adapter)
+ {
+ struct i2c_algo_bit_data *algo_data = adapter->algo_data;
+ unsigned char *edid = NULL;
++ int divider = 5;
+ int i, j;
+
+ algo_data->setscl(algo_data->data, 1);
+
+- for (i = 0; i < 1; i++) {
++ for (i = 0; i < 2; i++) {
+ /* For some old monitors we need the
+ * following process to initialize/stop DDC
+ */
++
+ algo_data->setsda(algo_data->data, 1);
+- msleep(13);
++ msleep(13 / divider);
+
+ algo_data->setscl(algo_data->data, 1);
+ for (j = 0; j < 5; j++) {
+- msleep(10);
++ msleep(10 / divider);
+ if (algo_data->getscl(algo_data->data))
+ break;
+ }
+@@ -609,31 +611,33 @@ static unsigned char *drm_ddc_read(struct i2c_adapter *adapter)
+ continue;
+
+ algo_data->setsda(algo_data->data, 0);
+- msleep(15);
++ msleep(15 / divider);
+ algo_data->setscl(algo_data->data, 0);
+- msleep(15);
++ msleep(15 / divider);
+ algo_data->setsda(algo_data->data, 1);
+- msleep(15);
++ msleep(15 / divider);
+
+ /* Do the real work */
+ edid = drm_do_probe_ddc_edid(adapter);
+ algo_data->setsda(algo_data->data, 0);
+ algo_data->setscl(algo_data->data, 0);
+- msleep(15);
++ msleep(15 / divider);
+
+ algo_data->setscl(algo_data->data, 1);
+ for (j = 0; j < 10; j++) {
+- msleep(10);
++ msleep(10 / divider);
+ if (algo_data->getscl(algo_data->data))
+ break;
+ }
+
+ algo_data->setsda(algo_data->data, 1);
+- msleep(15);
++ msleep(15 / divider);
+ algo_data->setscl(algo_data->data, 0);
+ algo_data->setsda(algo_data->data, 0);
++
+ if (edid)
+ break;
++ divider = 1;
+ }
+ /* Release the DDC lines when done or the Apple Cinema HD display
+ * will switch off
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index a283427..6f2eced 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -319,7 +319,7 @@ void
+ intel_wait_for_vblank(struct drm_device *dev)
+ {
+ /* Wait for 20ms, i.e. one cycle at 50hz. */
+- udelay(20000);
++ mdelay(20);
+ }
+
+ static int
+@@ -1466,12 +1466,12 @@ static void intel_setup_outputs(struct drm_device *dev)
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_connector *connector;
+
+- intel_crt_init(dev);
+-
+- /* Set up integrated LVDS */
++ /* Set up integrated LVDS -- will skip if the lid is closed */
+ if (IS_MOBILE(dev) && !IS_I830(dev))
+ intel_lvds_init(dev);
+
++ intel_crt_init(dev);
++
+ if (IS_I9XX(dev)) {
+ int found;
+
+diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
+index 957daef..22a74bd 100644
+--- a/drivers/gpu/drm/i915/intel_drv.h
++++ b/drivers/gpu/drm/i915/intel_drv.h
+@@ -81,6 +81,7 @@ struct intel_output {
+ int type;
+ struct intel_i2c_chan *i2c_bus; /* for control functions */
+ struct intel_i2c_chan *ddc_bus; /* for DDC only stuff */
++ struct edid *edid;
+ bool load_detect_temp;
+ bool needs_tv_clock;
+ void *dev_priv;
+diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
+index 0d211af..dc4fecc 100644
+--- a/drivers/gpu/drm/i915/intel_lvds.c
++++ b/drivers/gpu/drm/i915/intel_lvds.c
+@@ -336,6 +336,7 @@ static void intel_lvds_destroy(struct drm_connector *connector)
+ intel_i2c_destroy(intel_output->ddc_bus);
+ drm_sysfs_connector_remove(connector);
+ drm_connector_cleanup(connector);
++ kfree(intel_output->edid);
+ kfree(connector);
+ }
+
+@@ -516,5 +517,6 @@ failed:
+ if (intel_output->ddc_bus)
+ intel_i2c_destroy(intel_output->ddc_bus);
+ drm_connector_cleanup(connector);
++ kfree(intel_output->edid);
+ kfree(connector);
+ }
+diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
+index e42019e..8c0d5f6 100644
+--- a/drivers/gpu/drm/i915/intel_modes.c
++++ b/drivers/gpu/drm/i915/intel_modes.c
+@@ -70,13 +70,21 @@ int intel_ddc_get_modes(struct intel_output *intel_output)
+ struct edid *edid;
+ int ret = 0;
+
++ if (intel_output->edid) {
++ printk(KERN_INFO "Skipping EDID probe due to cached edid\n");
++ return ret;
++ }
++
+ edid = drm_get_edid(&intel_output->base,
+ &intel_output->ddc_bus->adapter);
+ if (edid) {
+ drm_mode_connector_update_edid_property(&intel_output->base,
+ edid);
+ ret = drm_add_edid_modes(&intel_output->base, edid);
+- kfree(edid);
++ if (intel_output->type == INTEL_OUTPUT_LVDS)
++ intel_output->edid = edid;
++ else
++ kfree(edid);
+ }
+
+ return ret;
+diff --git a/include/drm/drmP.h b/include/drm/drmP.h
+index e5f4ae9..69ce4f4 100644
+--- a/include/drm/drmP.h
++++ b/include/drm/drmP.h
+@@ -304,6 +304,7 @@ struct drm_vma_entry {
+ pid_t pid;
+ };
+
++extern struct list_head drm_async_list;
+ /**
+ * DMA buffer.
+ */
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-flip-ide-net.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-flip-ide-net.patch
new file mode 100644
index 000000000..eda77564c
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-flip-ide-net.patch
@@ -0,0 +1,40 @@
+From: Arjan van de Ven <arjan@linux.intel.com>
+Date: Mon, 26 Jan 2009 18:58:11 -0800
+Subject: [PATCH] ide/net: flip the order of SATA and network init
+
+this patch flips the order in which sata and network drivers are initialized.
+
+SATA probing takes quite a bit of time, and with the asynchronous infrastructure
+other drivers that run after it can execute in parallel. Network drivers do tend
+to take some real time talking to the hardware, so running these later is
+a good thing (the sata probe then runs concurrent)
+
+This saves about 15% of my kernels boot time.
+
+Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
+---
+ drivers/Makefile | 5 +++--
+ 1 files changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/Makefile b/drivers/Makefile
+index c1bf417..2618a61 100644
+--- a/drivers/Makefile
++++ b/drivers/Makefile
+@@ -36,13 +36,14 @@
+ obj-$(CONFIG_FB_INTEL) += video/intelfb/
+ obj-y += serial/
+ obj-$(CONFIG_PARPORT) += parport/
+-obj-y += base/ block/ misc/ mfd/ net/ media/
++obj-y += base/ block/ misc/ mfd/ media/
+ obj-$(CONFIG_NUBUS) += nubus/
+-obj-$(CONFIG_ATM) += atm/
+ obj-y += macintosh/
+ obj-$(CONFIG_IDE) += ide/
+ obj-$(CONFIG_SCSI) += scsi/
+ obj-$(CONFIG_ATA) += ata/
++obj-y += net/
++obj-$(CONFIG_ATM) += atm/
+ obj-$(CONFIG_FUSION) += message/
+ obj-$(CONFIG_FIREWIRE) += firewire/
+ obj-y += ieee1394/
+
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-input-introduce-a-tougher-i8042.reset.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-input-introduce-a-tougher-i8042.reset.patch
new file mode 100644
index 000000000..1ae825720
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-input-introduce-a-tougher-i8042.reset.patch
@@ -0,0 +1,92 @@
+From 2c5ccde448ae5f4062802bcd6002f856acbd268f Mon Sep 17 00:00:00 2001
+From: Arjan van de Ven <arjan@linux.intel.com>
+Date: Tue, 3 Feb 2009 16:26:16 -0800
+Subject: [PATCH] input: introduce a tougher i8042.reset
+
+Some bad touchpads don't reset right the first time (MSI Wind U-100 for
+example). This patch will retry the reset up to 5 times.
+
+In addition, this patch also adds a module parameter to not treat
+reset failures as fatal to the usage of the device. This prevents
+a touchpad failure from also disabling the keyboard....
+
+Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
+---
+ Documentation/kernel-parameters.txt | 2 ++
+ drivers/input/serio/i8042.c | 33 ++++++++++++++++++++++++---------
+ 2 files changed, 26 insertions(+), 9 deletions(-)
+
+diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
+index ac613a6..a43e3bd 100644
+--- a/Documentation/kernel-parameters.txt
++++ b/Documentation/kernel-parameters.txt
+@@ -855,6 +855,8 @@ and is between 256 and 4096 characters. It is defined in the file
+ [HW] Frequency with which keyboard LEDs should blink
+ when kernel panics (default is 0.5 sec)
+ i8042.reset [HW] Reset the controller during init and cleanup
++ i8042.nonfatal [HW] Don't treat i8042.reset failures as fatal for the
++ device initialization.
+ i8042.unlock [HW] Unlock (ignore) the keylock
+
+ i810= [HW,DRM]
+diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
+index 170f71e..2473a9a 100644
+--- a/drivers/input/serio/i8042.c
++++ b/drivers/input/serio/i8042.c
+@@ -47,6 +47,10 @@ static unsigned int i8042_reset;
+ module_param_named(reset, i8042_reset, bool, 0);
+ MODULE_PARM_DESC(reset, "Reset controller during init and cleanup.");
+
++static unsigned int i8042_nonfatal;
++module_param_named(nonfatal, i8042_nonfatal, bool, 0);
++MODULE_PARM_DESC(reset, "Treat controller test failures as non-fatal.");
++
+ static unsigned int i8042_direct;
+ module_param_named(direct, i8042_direct, bool, 0);
+ MODULE_PARM_DESC(direct, "Put keyboard port into non-translated mode.");
+@@ -712,22 +716,33 @@ static int i8042_controller_check(void)
+ static int i8042_controller_selftest(void)
+ {
+ unsigned char param;
++ int i = 0;
+
+ if (!i8042_reset)
+ return 0;
+
+- if (i8042_command(&param, I8042_CMD_CTL_TEST)) {
+- printk(KERN_ERR "i8042.c: i8042 controller self test timeout.\n");
+- return -ENODEV;
+- }
++ /*
++ * We try this 5 times; on some really fragile systems this does not
++ * take the first time...
++ */
++ do {
++
++ if (i8042_command(&param, I8042_CMD_CTL_TEST)) {
++ printk(KERN_ERR "i8042.c: i8042 controller self test timeout.\n");
++ return -ENODEV;
++ }
++
++ if (param == I8042_RET_CTL_TEST)
++ return 0;
+
+- if (param != I8042_RET_CTL_TEST) {
+ printk(KERN_ERR "i8042.c: i8042 controller selftest failed. (%#x != %#x)\n",
+- param, I8042_RET_CTL_TEST);
+- return -EIO;
+- }
++ param, I8042_RET_CTL_TEST);
++ msleep(50);
++ } while (i++ < 5);
+
+- return 0;
++ if (i8042_nonfatal)
++ return 0;
++ return -EIO;
+ }
+
+ /*
+--
+1.6.0.6
+
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-jbd-longer-commit-interval.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-jbd-longer-commit-interval.patch
new file mode 100644
index 000000000..d7bd92151
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-jbd-longer-commit-interval.patch
@@ -0,0 +1,28 @@
+From 0143f8eb8afcaccba5a78196fb3db4361e0097a7 Mon Sep 17 00:00:00 2001
+From: Arjan van de Ven <arjan@linux.intel.com>
+Date: Mon, 9 Feb 2009 21:25:32 -0800
+Subject: [PATCH] jbd: longer commit interval
+
+... 5 seconds is rather harsh on ssd's..
+
+Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
+---
+ include/linux/jbd.h | 2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+diff --git a/include/linux/jbd.h b/include/linux/jbd.h
+index 64246dc..d64b7fd 100644
+--- a/include/linux/jbd.h
++++ b/include/linux/jbd.h
+@@ -46,7 +46,7 @@
+ /*
+ * The default maximum commit age, in seconds.
+ */
+-#define JBD_DEFAULT_MAX_COMMIT_AGE 5
++#define JBD_DEFAULT_MAX_COMMIT_AGE 15
+
+ #ifdef CONFIG_JBD_DEBUG
+ /*
+--
+1.6.0.6
+
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-kms-after-sata.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-kms-after-sata.patch
new file mode 100644
index 000000000..663b36797
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-kms-after-sata.patch
@@ -0,0 +1,32 @@
+--- linux-2.6.28/drivers/Makefile~ 2009-03-21 21:23:28.000000000 -0700
++++ linux-2.6.28/drivers/Makefile 2009-03-21 21:23:28.000000000 -0700
+@@ -25,15 +25,8 @@
+ # default.
+ obj-y += char/
+
+-# gpu/ comes after char for AGP vs DRM startup
+-obj-y += gpu/
+-
+ obj-$(CONFIG_CONNECTOR) += connector/
+
+-# i810fb and intelfb depend on char/agp/
+-obj-$(CONFIG_FB_I810) += video/i810/
+-obj-$(CONFIG_FB_INTEL) += video/intelfb/
+-
+ obj-y += serial/
+ obj-$(CONFIG_PARPORT) += parport/
+ obj-y += base/ block/ misc/ mfd/ media/
+@@ -43,6 +36,13 @@
+ obj-$(CONFIG_SCSI) += scsi/
+ obj-$(CONFIG_ATA) += ata/
+ obj-y += net/
++
++# gpu/ comes after char for AGP vs DRM startup
++obj-y += gpu/
++# i810fb and intelfb depend on char/agp/
++obj-$(CONFIG_FB_I810) += video/i810/
++obj-$(CONFIG_FB_INTEL) += video/intelfb/
++
+ obj-$(CONFIG_ATM) += atm/
+ obj-$(CONFIG_FUSION) += message/
+ obj-$(CONFIG_FIREWIRE) += firewire/
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-msiwind.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-msiwind.patch
new file mode 100644
index 000000000..e7fded41e
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-msiwind.patch
@@ -0,0 +1,57 @@
+Patch to get the touchpad on the MSI Wind U-100 working
+
+
+--- linux-2.6.28/drivers/input/serio/i8042-x86ia64io.h.org 2009-02-01 18:31:29.000000000 -0800
++++ linux-2.6.28/drivers/input/serio/i8042-x86ia64io.h 2009-02-01 18:35:26.000000000 -0800
+@@ -378,6 +378,13 @@ static struct dmi_system_id __initdata i
+ DMI_MATCH(DMI_BOARD_VENDOR, "Intel Corporation"),
+ },
+ },
++ {
++ .ident = "MSI Wind U-100",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_NAME, "U-100"),
++ DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"),
++ },
++ },
+ { }
+ };
+ #endif
+@@ -448,6 +455,25 @@ static struct dmi_system_id __initdata i
+ { }
+ };
+
++static struct dmi_system_id __initdata i8042_dmi_reset_table[] = {
++ {
++ .ident = "MSI Wind U-100",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_NAME, "U-100"),
++ DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"),
++ },
++ },
++ {
++ .ident = "LG Electronics X110",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_NAME, "X110"),
++ DMI_MATCH(DMI_BOARD_VENDOR, "LG Electronics Inc."),
++ },
++ },
++ { }
++};
++
++
+ #endif /* CONFIG_X86 */
+
+ #ifdef CONFIG_PNP
+@@ -564,6 +583,11 @@ static int __init i8042_pnp_init(void)
+ i8042_nopnp = 1;
+ #endif
+
++ if (dmi_check_system(i8042_dmi_reset_table)) {
++ i8042_reset = 1;
++ i8042_nonfatal = 1;
++ }
++
+ if (i8042_nopnp) {
+ printk(KERN_INFO "i8042: PNP detection disabled\n");
+ return 0;
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-pnv-agp.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-pnv-agp.patch
new file mode 100644
index 000000000..77e553956
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-pnv-agp.patch
@@ -0,0 +1,83 @@
+From eaf05431b9ea8676d23106e6373b7d2b8ff2d97d Mon Sep 17 00:00:00 2001
+From: Shaohua Li <shaohua.li@intel.com>
+Date: Mon, 23 Feb 2009 15:19:16 +0800
+Subject: agp/intel: Add support for new intel chipset.
+
+This is a G33-like desktop and mobile chipset.
+
+Signed-off-by: Shaohua Li <shaohua.li@intel.com>
+Signed-off-by: Eric Anholt <eric@anholt.net>
+---
+ drivers/char/agp/intel-agp.c | 21 ++++++++++++++++++---
+ 1 files changed, 18 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
+index c771418..0232cfc 100644
+--- a/drivers/char/agp/intel-agp.c
++++ b/drivers/char/agp/intel-agp.c
+@@ -26,6 +26,10 @@
+ #define PCI_DEVICE_ID_INTEL_82965GME_IG 0x2A12
+ #define PCI_DEVICE_ID_INTEL_82945GME_HB 0x27AC
+ #define PCI_DEVICE_ID_INTEL_82945GME_IG 0x27AE
++#define PCI_DEVICE_ID_INTEL_IGDGM_HB 0xA010
++#define PCI_DEVICE_ID_INTEL_IGDGM_IG 0xA011
++#define PCI_DEVICE_ID_INTEL_IGDG_HB 0xA000
++#define PCI_DEVICE_ID_INTEL_IGDG_IG 0xA001
+ #define PCI_DEVICE_ID_INTEL_G33_HB 0x29C0
+ #define PCI_DEVICE_ID_INTEL_G33_IG 0x29C2
+ #define PCI_DEVICE_ID_INTEL_Q35_HB 0x29B0
+@@ -60,7 +64,12 @@
+
+ #define IS_G33 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G33_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q35_HB || \
+- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q33_HB)
++ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q33_HB || \
++ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDGM_HB || \
++ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDG_HB)
++
++#define IS_IGD (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDGM_HB || \
++ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDG_HB)
+
+ #define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGD_E_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \
+@@ -510,7 +519,7 @@ static void intel_i830_init_gtt_entries(void)
+ size = 512;
+ }
+ size += 4; /* add in BIOS popup space */
+- } else if (IS_G33) {
++ } else if (IS_G33 && !IS_IGD) {
+ /* G33's GTT size defined in gmch_ctrl */
+ switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) {
+ case G33_PGETBL_SIZE_1M:
+@@ -526,7 +535,7 @@ static void intel_i830_init_gtt_entries(void)
+ size = 512;
+ }
+ size += 4;
+- } else if (IS_G4X) {
++ } else if (IS_G4X || IS_IGD) {
+ /* On 4 series hardware, GTT stolen is separate from graphics
+ * stolen, ignore it in stolen gtt entries counting. However,
+ * 4KB of the stolen memory doesn't get mapped to the GTT.
+@@ -2159,6 +2168,10 @@ static const struct intel_driver_description {
+ NULL, &intel_g33_driver },
+ { PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, 0, "Q33",
+ NULL, &intel_g33_driver },
++ { PCI_DEVICE_ID_INTEL_IGDGM_HB, PCI_DEVICE_ID_INTEL_IGDGM_IG, 0, "IGD",
++ NULL, &intel_g33_driver },
++ { PCI_DEVICE_ID_INTEL_IGDG_HB, PCI_DEVICE_ID_INTEL_IGDG_IG, 0, "IGD",
++ NULL, &intel_g33_driver },
+ { PCI_DEVICE_ID_INTEL_GM45_HB, PCI_DEVICE_ID_INTEL_GM45_IG, 0,
+ "Mobile Intel® GM45 Express", NULL, &intel_i965_driver },
+ { PCI_DEVICE_ID_INTEL_IGD_E_HB, PCI_DEVICE_ID_INTEL_IGD_E_IG, 0,
+@@ -2353,6 +2366,8 @@ static struct pci_device_id agp_intel_pci_table[] = {
+ ID(PCI_DEVICE_ID_INTEL_82945G_HB),
+ ID(PCI_DEVICE_ID_INTEL_82945GM_HB),
+ ID(PCI_DEVICE_ID_INTEL_82945GME_HB),
++ ID(PCI_DEVICE_ID_INTEL_IGDGM_HB),
++ ID(PCI_DEVICE_ID_INTEL_IGDG_HB),
+ ID(PCI_DEVICE_ID_INTEL_82946GZ_HB),
+ ID(PCI_DEVICE_ID_INTEL_82G35_HB),
+ ID(PCI_DEVICE_ID_INTEL_82965Q_HB),
+--
+1.6.1.3
+
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-pnv-drm.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-pnv-drm.patch
new file mode 100644
index 000000000..1e7b86694
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-pnv-drm.patch
@@ -0,0 +1,336 @@
+From 8b941bea1d0fe0c5cf0de938cd0bd89ce6640dbb Mon Sep 17 00:00:00 2001
+From: Shaohua Li <shaohua.li@intel.com>
+Date: Mon, 23 Feb 2009 15:19:19 +0800
+Subject: drm/i915: Add support for new G33-like chipset.
+
+This chip is nearly the same, but has new clock settings required.
+
+Signed-off-by: Shaohua Li <shaohua.li@intel.com>
+Signed-off-by: Eric Anholt <eric@anholt.net>
+---
+ drivers/gpu/drm/i915/i915_drv.h | 10 +++-
+ drivers/gpu/drm/i915/i915_reg.h | 4 +
+ drivers/gpu/drm/i915/intel_display.c | 111 +++++++++++++++++++++++++++++-----
+ include/drm/drm_pciids.h | 2 +
+ 4 files changed, 109 insertions(+), 18 deletions(-)
+
+diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
+index 0e27854..36d6bc3 100644
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -787,15 +787,21 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
+ (dev)->pci_device == 0x2E22 || \
+ IS_GM45(dev))
+
++#define IS_IGDG(dev) ((dev)->pci_device == 0xa001)
++#define IS_IGDGM(dev) ((dev)->pci_device == 0xa011)
++#define IS_IGD(dev) (IS_IGDG(dev) || IS_IGDGM(dev))
++
+ #define IS_G33(dev) ((dev)->pci_device == 0x29C2 || \
+ (dev)->pci_device == 0x29B2 || \
+- (dev)->pci_device == 0x29D2)
++ (dev)->pci_device == 0x29D2 || \
++ (IS_IGD(dev)))
+
+ #define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \
+ IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev))
+
+ #define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \
+- IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev))
++ IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev) || \
++ IS_IGD(dev))
+
+ #define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev))
+ /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
+diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
+index 9d6539a..f07d315 100644
+--- a/drivers/gpu/drm/i915/i915_reg.h
++++ b/drivers/gpu/drm/i915/i915_reg.h
+@@ -358,6 +358,7 @@
+ #define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */
+ #define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */
+ #define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
++#define DPLL_FPA01_P1_POST_DIV_MASK_IGD 0x00ff8000 /* IGD */
+
+ #define I915_FIFO_UNDERRUN_STATUS (1UL<<31)
+ #define I915_CRC_ERROR_ENABLE (1UL<<29)
+@@ -434,6 +435,7 @@
+ */
+ #define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS 0x003f0000
+ #define DPLL_FPA01_P1_POST_DIV_SHIFT 16
++#define DPLL_FPA01_P1_POST_DIV_SHIFT_IGD 15
+ /* i830, required in DVO non-gang */
+ #define PLL_P2_DIVIDE_BY_4 (1 << 23)
+ #define PLL_P1_DIVIDE_BY_TWO (1 << 21) /* i830 */
+@@ -500,10 +502,12 @@
+ #define FPB0 0x06048
+ #define FPB1 0x0604c
+ #define FP_N_DIV_MASK 0x003f0000
++#define FP_N_IGD_DIV_MASK 0x00ff0000
+ #define FP_N_DIV_SHIFT 16
+ #define FP_M1_DIV_MASK 0x00003f00
+ #define FP_M1_DIV_SHIFT 8
+ #define FP_M2_DIV_MASK 0x0000003f
++#define FP_M2_IGD_DIV_MASK 0x000000ff
+ #define FP_M2_DIV_SHIFT 0
+ #define DPLL_TEST 0x606c
+ #define DPLLB_TEST_SDVO_DIV_1 (0 << 22)
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index a283427..1702564 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -90,18 +90,32 @@ typedef struct {
+ #define I9XX_DOT_MAX 400000
+ #define I9XX_VCO_MIN 1400000
+ #define I9XX_VCO_MAX 2800000
++#define IGD_VCO_MIN 1700000
++#define IGD_VCO_MAX 3500000
+ #define I9XX_N_MIN 1
+ #define I9XX_N_MAX 6
++/* IGD's Ncounter is a ring counter */
++#define IGD_N_MIN 3
++#define IGD_N_MAX 6
+ #define I9XX_M_MIN 70
+ #define I9XX_M_MAX 120
++#define IGD_M_MIN 2
++#define IGD_M_MAX 256
+ #define I9XX_M1_MIN 10
+ #define I9XX_M1_MAX 22
+ #define I9XX_M2_MIN 5
+ #define I9XX_M2_MAX 9
++/* IGD M1 is reserved, and must be 0 */
++#define IGD_M1_MIN 0
++#define IGD_M1_MAX 0
++#define IGD_M2_MIN 0
++#define IGD_M2_MAX 254
+ #define I9XX_P_SDVO_DAC_MIN 5
+ #define I9XX_P_SDVO_DAC_MAX 80
+ #define I9XX_P_LVDS_MIN 7
+ #define I9XX_P_LVDS_MAX 98
++#define IGD_P_LVDS_MIN 7
++#define IGD_P_LVDS_MAX 112
+ #define I9XX_P1_MIN 1
+ #define I9XX_P1_MAX 8
+ #define I9XX_P2_SDVO_DAC_SLOW 10
+@@ -115,6 +129,8 @@ typedef struct {
+ #define INTEL_LIMIT_I8XX_LVDS 1
+ #define INTEL_LIMIT_I9XX_SDVO_DAC 2
+ #define INTEL_LIMIT_I9XX_LVDS 3
++#define INTEL_LIMIT_IGD_SDVO_DAC 4
++#define INTEL_LIMIT_IGD_LVDS 5
+
+ static const intel_limit_t intel_limits[] = {
+ { /* INTEL_LIMIT_I8XX_DVO_DAC */
+@@ -168,6 +184,32 @@ static const intel_limit_t intel_limits[] = {
+ .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
+ .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST },
+ },
++ { /* INTEL_LIMIT_IGD_SDVO */
++ .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX},
++ .vco = { .min = IGD_VCO_MIN, .max = IGD_VCO_MAX },
++ .n = { .min = IGD_N_MIN, .max = IGD_N_MAX },
++ .m = { .min = IGD_M_MIN, .max = IGD_M_MAX },
++ .m1 = { .min = IGD_M1_MIN, .max = IGD_M1_MAX },
++ .m2 = { .min = IGD_M2_MIN, .max = IGD_M2_MAX },
++ .p = { .min = I9XX_P_SDVO_DAC_MIN, .max = I9XX_P_SDVO_DAC_MAX },
++ .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX },
++ .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
++ .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST },
++ },
++ { /* INTEL_LIMIT_IGD_LVDS */
++ .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX },
++ .vco = { .min = IGD_VCO_MIN, .max = IGD_VCO_MAX },
++ .n = { .min = IGD_N_MIN, .max = IGD_N_MAX },
++ .m = { .min = IGD_M_MIN, .max = IGD_M_MAX },
++ .m1 = { .min = IGD_M1_MIN, .max = IGD_M1_MAX },
++ .m2 = { .min = IGD_M2_MIN, .max = IGD_M2_MAX },
++ .p = { .min = IGD_P_LVDS_MIN, .max = IGD_P_LVDS_MAX },
++ .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX },
++ /* IGD only supports single-channel mode. */
++ .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
++ .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW },
++ },
++
+ };
+
+ static const intel_limit_t *intel_limit(struct drm_crtc *crtc)
+@@ -175,11 +217,16 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc)
+ struct drm_device *dev = crtc->dev;
+ const intel_limit_t *limit;
+
+- if (IS_I9XX(dev)) {
++ if (IS_I9XX(dev) && !IS_IGD(dev)) {
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
+ limit = &intel_limits[INTEL_LIMIT_I9XX_LVDS];
+ else
+ limit = &intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC];
++ } else if (IS_IGD(dev)) {
++ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
++ limit = &intel_limits[INTEL_LIMIT_IGD_LVDS];
++ else
++ limit = &intel_limits[INTEL_LIMIT_IGD_SDVO_DAC];
+ } else {
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
+ limit = &intel_limits[INTEL_LIMIT_I8XX_LVDS];
+@@ -189,8 +236,21 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc)
+ return limit;
+ }
+
+-static void intel_clock(int refclk, intel_clock_t *clock)
++/* m1 is reserved as 0 in IGD, n is a ring counter */
++static void igd_clock(int refclk, intel_clock_t *clock)
+ {
++ clock->m = clock->m2 + 2;
++ clock->p = clock->p1 * clock->p2;
++ clock->vco = refclk * clock->m / clock->n;
++ clock->dot = clock->vco / clock->p;
++}
++
++static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock)
++{
++ if (IS_IGD(dev)) {
++ igd_clock(refclk, clock);
++ return;
++ }
+ clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
+ clock->p = clock->p1 * clock->p2;
+ clock->vco = refclk * clock->m / (clock->n + 2);
+@@ -226,6 +286,7 @@ bool intel_pipe_has_type (struct drm_crtc *crtc, int type)
+ static bool intel_PLL_is_valid(struct drm_crtc *crtc, intel_clock_t *clock)
+ {
+ const intel_limit_t *limit = intel_limit (crtc);
++ struct drm_device *dev = crtc->dev;
+
+ if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
+ INTELPllInvalid ("p1 out of range\n");
+@@ -235,7 +296,7 @@ static bool intel_PLL_is_valid(struct drm_crtc *crtc, intel_clock_t *clock)
+ INTELPllInvalid ("m2 out of range\n");
+ if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
+ INTELPllInvalid ("m1 out of range\n");
+- if (clock->m1 <= clock->m2)
++ if (clock->m1 <= clock->m2 && !IS_IGD(dev))
+ INTELPllInvalid ("m1 <= m2\n");
+ if (clock->m < limit->m.min || limit->m.max < clock->m)
+ INTELPllInvalid ("m out of range\n");
+@@ -289,15 +350,17 @@ static bool intel_find_best_PLL(struct drm_crtc *crtc, int target,
+ memset (best_clock, 0, sizeof (*best_clock));
+
+ for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
+- for (clock.m2 = limit->m2.min; clock.m2 < clock.m1 &&
+- clock.m2 <= limit->m2.max; clock.m2++) {
++ for (clock.m2 = limit->m2.min; clock.m2 <= limit->m2.max; clock.m2++) {
++ /* m1 is always 0 in IGD */
++ if (clock.m2 >= clock.m1 && !IS_IGD(dev))
++ break;
+ for (clock.n = limit->n.min; clock.n <= limit->n.max;
+ clock.n++) {
+ for (clock.p1 = limit->p1.min;
+ clock.p1 <= limit->p1.max; clock.p1++) {
+ int this_err;
+
+- intel_clock(refclk, &clock);
++ intel_clock(dev, refclk, &clock);
+
+ if (!intel_PLL_is_valid(crtc, &clock))
+ continue;
+@@ -634,7 +697,7 @@ static int intel_get_core_clock_speed(struct drm_device *dev)
+ return 400000;
+ else if (IS_I915G(dev))
+ return 333000;
+- else if (IS_I945GM(dev) || IS_845G(dev))
++ else if (IS_I945GM(dev) || IS_845G(dev) || IS_IGDGM(dev))
+ return 200000;
+ else if (IS_I915GM(dev)) {
+ u16 gcfgc = 0;
+@@ -782,7 +845,10 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
+ return -EINVAL;
+ }
+
+- fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
++ if (IS_IGD(dev))
++ fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2;
++ else
++ fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
+
+ dpll = DPLL_VGA_MODE_DIS;
+ if (IS_I9XX(dev)) {
+@@ -799,7 +865,10 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
+ }
+
+ /* compute bitmask from p1 value */
+- dpll |= (1 << (clock.p1 - 1)) << 16;
++ if (IS_IGD(dev))
++ dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_IGD;
++ else
++ dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+ switch (clock.p2) {
+ case 5:
+ dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
+@@ -1279,10 +1348,20 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
+ fp = I915_READ((pipe == 0) ? FPA1 : FPB1);
+
+ clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
+- clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
+- clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
++ if (IS_IGD(dev)) {
++ clock.n = ffs((fp & FP_N_IGD_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
++ clock.m2 = (fp & FP_M2_IGD_DIV_MASK) >> FP_M2_DIV_SHIFT;
++ } else {
++ clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
++ clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
++ }
++
+ if (IS_I9XX(dev)) {
+- clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
++ if (IS_IGD(dev))
++ clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_IGD) >>
++ DPLL_FPA01_P1_POST_DIV_SHIFT_IGD);
++ else
++ clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
+ DPLL_FPA01_P1_POST_DIV_SHIFT);
+
+ switch (dpll & DPLL_MODE_MASK) {
+@@ -1301,7 +1380,7 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
+ }
+
+ /* XXX: Handle the 100Mhz refclk */
+- intel_clock(96000, &clock);
++ intel_clock(dev, 96000, &clock);
+ } else {
+ bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN);
+
+@@ -1313,9 +1392,9 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
+ if ((dpll & PLL_REF_INPUT_MASK) ==
+ PLLB_REF_INPUT_SPREADSPECTRUMIN) {
+ /* XXX: might not be 66MHz */
+- intel_clock(66000, &clock);
++ intel_clock(dev, 66000, &clock);
+ } else
+- intel_clock(48000, &clock);
++ intel_clock(dev, 48000, &clock);
+ } else {
+ if (dpll & PLL_P1_DIVIDE_BY_TWO)
+ clock.p1 = 2;
+@@ -1328,7 +1407,7 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
+ else
+ clock.p2 = 2;
+
+- intel_clock(48000, &clock);
++ intel_clock(dev, 48000, &clock);
+ }
+ }
+
+diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
+index 5165f24..76c4c82 100644
+--- a/include/drm/drm_pciids.h
++++ b/include/drm/drm_pciids.h
+@@ -418,4 +418,6 @@
+ {0x8086, 0x2e02, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
+ {0x8086, 0x2e12, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
+ {0x8086, 0x2e22, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
++ {0x8086, 0xa001, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
++ {0x8086, 0xa011, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
+ {0, 0, 0}
+--
+1.6.1.3
+
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-pnv-fix-gtt-size.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-pnv-fix-gtt-size.patch
new file mode 100644
index 000000000..c16350f9f
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-pnv-fix-gtt-size.patch
@@ -0,0 +1,21 @@
+IGD device only has last 1 page used by GTT. this should align to AGP gart code.
+
+Signed-off-by: Shaohua Li <shaohua.li@intel.com>
+---
+ drivers/gpu/drm/i915/i915_dma.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+Index: linux/drivers/gpu/drm/i915/i915_dma.c
+===================================================================
+--- linux.orig/drivers/gpu/drm/i915/i915_dma.c 2009-03-13 15:36:12.000000000 +0800
++++ linux/drivers/gpu/drm/i915/i915_dma.c 2009-03-13 15:37:26.000000000 +0800
+@@ -880,7 +880,7 @@ static int i915_probe_agp(struct drm_dev
+ * Some of the preallocated space is taken by the GTT
+ * and popup. GTT is 1K per MB of aperture size, and popup is 4K.
+ */
+- if (IS_G4X(dev))
++ if (IS_G4X(dev) || IS_IGD(dev))
+ overhead = 4096;
+ else
+ overhead = (*aperture_size / 1024) + 4096;
+
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-pnv-fix-i2c.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-pnv-fix-i2c.patch
new file mode 100644
index 000000000..00a6cf481
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-pnv-fix-i2c.patch
@@ -0,0 +1,38 @@
+In IGD, DPCUNIT_CLOCK_GATE_DISABLE bit should be set, otherwise i2c
+access will be wrong.
+
+Signed-off-by: Shaohua Li <shaohua.li@intel.com>
+---
+ drivers/gpu/drm/i915/i915_reg.h | 1 +
+ drivers/gpu/drm/i915/intel_display.c | 5 +++++
+ 2 files changed, 6 insertions(+)
+
+Index: linux/drivers/gpu/drm/i915/i915_reg.h
+===================================================================
+--- linux.orig/drivers/gpu/drm/i915/i915_reg.h 2009-03-16 14:18:27.000000000 +0800
++++ linux/drivers/gpu/drm/i915/i915_reg.h 2009-03-16 14:28:09.000000000 +0800
+@@ -523,6 +523,7 @@
+ #define DPLLA_INPUT_BUFFER_ENABLE (1 << 0)
+ #define D_STATE 0x6104
+ #define CG_2D_DIS 0x6200
++#define DPCUNIT_CLOCK_GATE_DISABLE (1 << 24)
+ #define CG_3D_DIS 0x6204
+
+ /*
+Index: linux/drivers/gpu/drm/i915/intel_display.c
+===================================================================
+--- linux.orig/drivers/gpu/drm/i915/intel_display.c 2009-03-16 14:16:11.000000000 +0800
++++ linux/drivers/gpu/drm/i915/intel_display.c 2009-03-16 14:27:46.000000000 +0800
+@@ -1545,6 +1545,11 @@ static void intel_setup_outputs(struct d
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_connector *connector;
+
++ /* When using bit bashing for I2C, this bit needs to be set to 1 */
++ if (IS_IGD(dev))
++ I915_WRITE(CG_2D_DIS,
++ I915_READ(CG_2D_DIS) | DPCUNIT_CLOCK_GATE_DISABLE);
++
+ intel_crt_init(dev);
+
+ /* Set up integrated LVDS */
+
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-psb-S0i1_and_S0i3_OSPM_support.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-psb-S0i1_and_S0i3_OSPM_support.patch
new file mode 100644
index 000000000..100376553
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-psb-S0i1_and_S0i3_OSPM_support.patch
@@ -0,0 +1,28 @@
+diff --git a/drivers/gpu/drm/psb/psb_fb.c b/drivers/gpu/drm/psb/psb_fb.c
+index 67934c0..8fc5221 100644
+--- a/drivers/gpu/drm/psb/psb_fb.c
++++ b/drivers/gpu/drm/psb/psb_fb.c
+@@ -896,8 +896,10 @@ static int psbfb_kms_off(struct drm_device *dev, int suspend)
+ list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
+ struct fb_info *info = fb->fbdev;
+
+- if (suspend)
++ if (suspend) {
+ fb_set_suspend(info, 1);
++ psbfb_blank(FB_BLANK_POWERDOWN, info);
++ }
+ }
+ mutex_unlock(&dev->mode_config.mutex);
+
+@@ -928,8 +930,10 @@ static int psbfb_kms_on(struct drm_device *dev, int resume)
+ list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
+ struct fb_info *info = fb->fbdev;
+
+- if (resume)
++ if (resume) {
+ fb_set_suspend(info, 0);
++ psbfb_blank(FB_BLANK_UNBLANK, info);
++ }
+
+ }
+ mutex_unlock(&dev->mode_config.mutex);
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-psb-driver.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-psb-driver.patch
new file mode 100644
index 000000000..4ffda75e1
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-psb-driver.patch
@@ -0,0 +1,37524 @@
+diff -uNr a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
+--- a/drivers/gpu/drm/Kconfig 2009-03-23 15:12:14.000000000 -0800
++++ b/drivers/gpu/drm/Kconfig 2009-04-07 13:28:38.000000000 -0700
+@@ -122,3 +122,14 @@
+ help
+ Choose this option if you have a Savage3D/4/SuperSavage/Pro/Twister
+ chipset. If M is selected the module will be called savage.
++
++config DRM_PSB
++ tristate "Intel Poulsbo/Moorestown"
++ depends on DRM && PCI
++ select FB_CFB_COPYAREA
++ select FB_CFB_FILLRECT
++ select FB_CFB_IMAGEBLIT
++ help
++ Choose this option if you have a Poulsbo or Moorestown platform.
++ If M is selected the module will be called psb.
++
+diff -uNr a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
+--- a/drivers/gpu/drm/Makefile 2009-03-23 15:12:14.000000000 -0800
++++ b/drivers/gpu/drm/Makefile 2009-04-07 13:28:38.000000000 -0700
+@@ -25,4 +25,5 @@
+ obj-$(CONFIG_DRM_SIS) += sis/
+ obj-$(CONFIG_DRM_SAVAGE)+= savage/
+ obj-$(CONFIG_DRM_VIA) +=via/
++obj-$(CONFIG_DRM_PSB) +=psb/
+
+diff -uNr a/drivers/gpu/drm/psb/lnc_topaz.c b/drivers/gpu/drm/psb/lnc_topaz.c
+--- a/drivers/gpu/drm/psb/lnc_topaz.c 1969-12-31 16:00:00.000000000 -0800
++++ b/drivers/gpu/drm/psb/lnc_topaz.c 2009-04-07 13:28:38.000000000 -0700
+@@ -0,0 +1,695 @@
++/**
++ * file lnc_topaz.c
++ * TOPAZ I/O operations and IRQ handling
++ *
++ */
++
++/**************************************************************************
++ *
++ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
++ * Copyright (c) Imagination Technologies Limited, UK
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ **************************************************************************/
++
++/* include headers */
++/* #define DRM_DEBUG_CODE 2 */
++
++#include <drm/drmP.h>
++#include <drm/drm_os_linux.h>
++
++#include "psb_drv.h"
++#include "psb_drm.h"
++#include "lnc_topaz.h"
++
++#include <linux/io.h>
++#include <linux/delay.h>
++
++static int drm_psb_ospmxxx = 0x0;
++
++/* static function define */
++static int lnc_topaz_deliver_command(struct drm_device *dev,
++ struct ttm_buffer_object *cmd_buffer,
++ unsigned long cmd_offset,
++ unsigned long cmd_size,
++ void **topaz_cmd, uint32_t sequence,
++ int copy_cmd);
++static int lnc_topaz_send(struct drm_device *dev, void *cmd,
++ unsigned long cmd_size, uint32_t sync_seq);
++static int lnc_mtx_send(struct drm_psb_private *dev_priv, const void *cmd);
++static int lnc_topaz_dequeue_send(struct drm_device *dev);
++static int lnc_topaz_save_command(struct drm_device *dev, void *cmd,
++ unsigned long cmd_size, uint32_t sequence);
++
++void lnc_topaz_interrupt(struct drm_device *dev, uint32_t topaz_stat)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private;
++ uint32_t clr_flag = lnc_topaz_queryirq(dev);
++
++ lnc_topaz_clearirq(dev, clr_flag);
++
++ /* ignore non-SYNC interrupts */
++ if ((CCB_CTRL_SEQ(dev_priv) & 0x8000) == 0)
++ return;
++
++ dev_priv->topaz_current_sequence =
++ *(uint32_t *)dev_priv->topaz_sync_addr;
++
++ PSB_DEBUG_IRQ("TOPAZ:Got SYNC IRQ,sync seq:0x%08x (MTX) vs 0x%08x\n",
++ dev_priv->topaz_current_sequence,
++ dev_priv->sequence[LNC_ENGINE_ENCODE]);
++
++ psb_fence_handler(dev, LNC_ENGINE_ENCODE);
++
++ dev_priv->topaz_busy = 1;
++ lnc_topaz_dequeue_send(dev);
++}
++
++static int lnc_submit_encode_cmdbuf(struct drm_device *dev,
++ struct ttm_buffer_object *cmd_buffer,
++ unsigned long cmd_offset, unsigned long cmd_size,
++ struct ttm_fence_object *fence)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ unsigned long irq_flags;
++ int ret = 0;
++ void *cmd;
++ uint32_t sequence = dev_priv->sequence[LNC_ENGINE_ENCODE];
++
++ PSB_DEBUG_GENERAL("TOPAZ: command submit\n");
++
++ /* # lock topaz's mutex [msvdx_mutex] */
++ mutex_lock(&dev_priv->topaz_mutex);
++
++ PSB_DEBUG_GENERAL("TOPAZ: topaz busy = %d\n", dev_priv->topaz_busy);
++
++ if (dev_priv->topaz_fw_loaded == 0) {
++ /* #.# load fw to driver */
++ PSB_DEBUG_INIT("TOPAZ: load /lib/firmware/topaz_fw.bin\n");
++ ret = topaz_init_fw(dev);
++ if (ret != 0) {
++ mutex_unlock(&dev_priv->topaz_mutex);
++
++ /* FIXME: find a proper return value */
++ DRM_ERROR("TOPAX:load /lib/firmware/topaz_fw.bin fail,"
++ "ensure udevd is configured correctly!\n");
++
++ return -EFAULT;
++ }
++ dev_priv->topaz_fw_loaded = 1;
++ } else {
++ /* OSPM power state change */
++ /* FIXME: why here? why not in the NEW_CODEC case? */
++ if (drm_psb_ospmxxx & ENABLE_TOPAZ_OSPM_D0IX) {
++ psb_power_up_topaz(dev);
++ lnc_topaz_restore_mtx_state(dev);
++ }
++ }
++
++ /* # schedule watchdog */
++ /* psb_schedule_watchdog(dev_priv); */
++
++ /* # spin lock irq save [msvdx_lock] */
++ spin_lock_irqsave(&dev_priv->topaz_lock, irq_flags);
++
++ /* # if topaz need to reset, reset it */
++ if (dev_priv->topaz_needs_reset) {
++ /* #.# reset it */
++ spin_unlock_irqrestore(&dev_priv->topaz_lock, irq_flags);
++ PSB_DEBUG_GENERAL("TOPAZ: needs reset.\n");
++
++ if (lnc_topaz_reset(dev_priv)) {
++ mutex_unlock(&dev_priv->topaz_mutex);
++ ret = -EBUSY;
++ DRM_ERROR("TOPAZ: reset failed.\n");
++ return ret;
++ }
++
++ PSB_DEBUG_GENERAL("TOPAZ: reset ok.\n");
++
++ /* #.# reset any related flags */
++ dev_priv->topaz_needs_reset = 0;
++ dev_priv->topaz_busy = 0;
++ PSB_DEBUG_GENERAL("XXX: does we need idle flag??\n");
++ dev_priv->topaz_start_idle = 0;
++
++ /* #.# init topaz */
++ lnc_topaz_init(dev);
++
++ /* avoid another fw init */
++ dev_priv->topaz_fw_loaded = 1;
++
++ spin_lock_irqsave(&dev_priv->topaz_lock, irq_flags);
++ }
++
++ if (!dev_priv->topaz_busy) {
++ /* # direct map topaz command if topaz is free */
++ PSB_DEBUG_GENERAL("TOPAZ:direct send command,sequence %08x \n",
++ sequence);
++
++ dev_priv->topaz_busy = 1;
++ spin_unlock_irqrestore(&dev_priv->topaz_lock, irq_flags);
++
++ ret = lnc_topaz_deliver_command(dev, cmd_buffer, cmd_offset,
++ cmd_size, NULL, sequence, 0);
++
++ if (ret) {
++ DRM_ERROR("TOPAZ: failed to extract cmd...\n");
++ mutex_unlock(&dev_priv->topaz_mutex);
++ return ret;
++ }
++ } else {
++ PSB_DEBUG_GENERAL("TOPAZ: queue command,sequence %08x \n",
++ sequence);
++ cmd = NULL;
++
++ spin_unlock_irqrestore(&dev_priv->topaz_lock, irq_flags);
++
++ ret = lnc_topaz_deliver_command(dev, cmd_buffer, cmd_offset,
++ cmd_size, &cmd, sequence, 1);
++ if (cmd == NULL || ret) {
++ DRM_ERROR("TOPAZ: map command for save fialed\n");
++ mutex_unlock(&dev_priv->topaz_mutex);
++ return ret;
++ }
++
++ ret = lnc_topaz_save_command(dev, cmd, cmd_size, sequence);
++ if (ret)
++ DRM_ERROR("TOPAZ: save command failed\n");
++ }
++
++ /* OPSM D0IX power state change */
++ if (drm_psb_ospmxxx & ENABLE_TOPAZ_OSPM_D0IX)
++ lnc_topaz_save_mtx_state(dev);
++
++ mutex_unlock(&dev_priv->topaz_mutex);
++
++ return ret;
++}
++
++static int lnc_topaz_save_command(struct drm_device *dev, void *cmd,
++ unsigned long cmd_size, uint32_t sequence)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ struct lnc_topaz_cmd_queue *topaz_cmd;
++ unsigned long irq_flags;
++
++ PSB_DEBUG_GENERAL("TOPAZ: queue command,sequence: %08x..\n",
++ sequence);
++
++ topaz_cmd = drm_calloc(1, sizeof(struct lnc_topaz_cmd_queue),
++ DRM_MEM_DRIVER);
++ if (topaz_cmd == NULL) {
++ mutex_unlock(&dev_priv->topaz_mutex);
++ DRM_ERROR("TOPAZ: out of memory....\n");
++ return -ENOMEM;
++ }
++
++ topaz_cmd->cmd = cmd;
++ topaz_cmd->cmd_size = cmd_size;
++ topaz_cmd->sequence = sequence;
++
++ spin_lock_irqsave(&dev_priv->topaz_lock, irq_flags);
++ list_add_tail(&topaz_cmd->head, &dev_priv->topaz_queue);
++ if (!dev_priv->topaz_busy) {
++ /* dev_priv->topaz_busy = 1; */
++ PSB_DEBUG_GENERAL("TOPAZ: need immediate dequeue...\n");
++ lnc_topaz_dequeue_send(dev);
++ PSB_DEBUG_GENERAL("TOPAZ: after dequeue command\n");
++ }
++
++ spin_unlock_irqrestore(&dev_priv->topaz_lock, irq_flags);
++
++ return 0;
++}
++
++
++int lnc_cmdbuf_video(struct drm_file *priv,
++ struct list_head *validate_list,
++ uint32_t fence_type,
++ struct drm_psb_cmdbuf_arg *arg,
++ struct ttm_buffer_object *cmd_buffer,
++ struct psb_ttm_fence_rep *fence_arg)
++{
++ struct drm_device *dev = priv->minor->dev;
++ struct ttm_fence_object *fence = NULL;
++ int ret;
++
++ ret = lnc_submit_encode_cmdbuf(dev, cmd_buffer, arg->cmdbuf_offset,
++ arg->cmdbuf_size, fence);
++ if (ret)
++ return ret;
++
++#if LNC_TOPAZ_NO_IRQ /* workaround for interrupt issue */
++ psb_fence_or_sync(priv, LNC_ENGINE_ENCODE, fence_type, arg->fence_flags,
++ validate_list, fence_arg, &fence);
++
++ if (fence)
++ ttm_fence_object_unref(&fence);
++#endif
++
++ mutex_lock(&cmd_buffer->mutex);
++ if (cmd_buffer->sync_obj != NULL)
++ ttm_fence_sync_obj_unref(&cmd_buffer->sync_obj);
++ mutex_unlock(&cmd_buffer->mutex);
++
++ return 0;
++}
++
++static int lnc_topaz_sync(struct drm_device *dev, uint32_t sync_seq)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ uint32_t sync_cmd[3];
++ int count = 10000;
++#if 0
++ struct ttm_fence_device *fdev = &dev_priv->fdev;
++ struct ttm_fence_class_manager *fc =
++ &fdev->fence_class[LNC_ENGINE_ENCODE];
++ unsigned long irq_flags;
++#endif
++ uint32_t *sync_p = (uint32_t *)dev_priv->topaz_sync_addr;
++
++ /* insert a SYNC command here */
++ dev_priv->topaz_sync_cmd_seq = (1 << 15) | dev_priv->topaz_cmd_seq++;
++ sync_cmd[0] = MTX_CMDID_SYNC | (3 << 8) |
++ (dev_priv->topaz_sync_cmd_seq << 16);
++ sync_cmd[1] = dev_priv->topaz_sync_offset;
++ sync_cmd[2] = sync_seq;
++
++ PSB_DEBUG_GENERAL("TOPAZ:MTX_CMDID_SYNC: size(3),cmd seq (0x%04x),"
++ "sync_seq (0x%08x)\n",
++ dev_priv->topaz_sync_cmd_seq, sync_seq);
++
++ lnc_mtx_send(dev_priv, sync_cmd);
++
++#if LNC_TOPAZ_NO_IRQ /* workaround for interrupt issue */
++ /* # poll topaz register for certain times */
++ while (count && *sync_p != sync_seq) {
++ DRM_UDELAY(100);
++ --count;
++ }
++ if ((count == 0) && (*sync_p != sync_seq)) {
++ DRM_ERROR("TOPAZ: wait sycn timeout (0x%08x),actual 0x%08x\n",
++ sync_seq, *sync_p);
++ return -EBUSY;
++ }
++ PSB_DEBUG_GENERAL("TOPAZ: SYNC done, seq=0x%08x\n", *sync_p);
++
++ dev_priv->topaz_busy = 0;
++
++ /* XXX: check psb_fence_handler is suitable for topaz */
++ dev_priv->topaz_current_sequence = *sync_p;
++#if 0
++ write_lock_irqsave(&fc->lock, irq_flags);
++ ttm_fence_handler(fdev, LNC_ENGINE_ENCODE,
++ dev_priv->topaz_current_sequence,
++ _PSB_FENCE_TYPE_EXE, 0);
++ write_unlock_irqrestore(&fc->lock, irq_flags);
++#endif
++#endif
++ return 0;
++}
++
++int
++lnc_topaz_deliver_command(struct drm_device *dev,
++ struct ttm_buffer_object *cmd_buffer,
++ unsigned long cmd_offset, unsigned long cmd_size,
++ void **topaz_cmd, uint32_t sequence,
++ int copy_cmd)
++{
++ unsigned long cmd_page_offset = cmd_offset & ~PAGE_MASK;
++ struct ttm_bo_kmap_obj cmd_kmap;
++ bool is_iomem;
++ int ret;
++ unsigned char *cmd_start, *tmp;
++
++ ret = ttm_bo_kmap(cmd_buffer, cmd_offset >> PAGE_SHIFT, 2,
++ &cmd_kmap);
++ if (ret) {
++ DRM_ERROR("TOPAZ: drm_bo_kmap failed: %d\n", ret);
++ return ret;
++ }
++ cmd_start = (unsigned char *) ttm_kmap_obj_virtual(&cmd_kmap,
++ &is_iomem) + cmd_page_offset;
++
++ if (copy_cmd) {
++ PSB_DEBUG_GENERAL("TOPAZ: queue commands\n");
++ tmp = drm_calloc(1, cmd_size, DRM_MEM_DRIVER);
++ if (tmp == NULL) {
++ ret = -ENOMEM;
++ goto out;
++ }
++ memcpy(tmp, cmd_start, cmd_size);
++ *topaz_cmd = tmp;
++ } else {
++ PSB_DEBUG_GENERAL("TOPAZ: directly send the command\n");
++ ret = lnc_topaz_send(dev, cmd_start, cmd_size, sequence);
++ if (ret) {
++ DRM_ERROR("TOPAZ: commit commands failed.\n");
++ ret = -EINVAL;
++ }
++ }
++
++out:
++ PSB_DEBUG_GENERAL("TOPAZ:cmd_size(%ld), sequence(%d) copy_cmd(%d)\n",
++ cmd_size, sequence, copy_cmd);
++
++ ttm_bo_kunmap(&cmd_kmap);
++
++ return ret;
++}
++
++int
++lnc_topaz_send(struct drm_device *dev, void *cmd,
++ unsigned long cmd_size, uint32_t sync_seq)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ int ret = 0;
++ unsigned char *command = (unsigned char *) cmd;
++ struct topaz_cmd_header *cur_cmd_header;
++ uint32_t cur_cmd_size, cur_cmd_id;
++ uint32_t codec;
++
++ PSB_DEBUG_GENERAL("TOPAZ: send the command in the buffer one by one\n");
++
++ while (cmd_size > 0) {
++ cur_cmd_header = (struct topaz_cmd_header *) command;
++ cur_cmd_size = cur_cmd_header->size * 4;
++ cur_cmd_id = cur_cmd_header->id;
++
++ switch (cur_cmd_id) {
++ case MTX_CMDID_SW_NEW_CODEC:
++ codec = *((uint32_t *) cmd + 1);
++
++ PSB_DEBUG_GENERAL("TOPAZ: setup new codec %s (%d)\n",
++ codec_to_string(codec), codec);
++ if (topaz_setup_fw(dev, codec)) {
++ DRM_ERROR("TOPAZ: upload FW to HW failed\n");
++ return -EBUSY;
++ }
++
++ dev_priv->topaz_cur_codec = codec;
++ break;
++
++ case MTX_CMDID_SW_ENTER_LOWPOWER:
++ PSB_DEBUG_GENERAL("TOPAZ: enter lowpower.... \n");
++ PSB_DEBUG_GENERAL("XXX: implement it\n");
++ break;
++
++ case MTX_CMDID_SW_LEAVE_LOWPOWER:
++ PSB_DEBUG_GENERAL("TOPAZ: leave lowpower... \n");
++ PSB_DEBUG_GENERAL("XXX: implement it\n");
++ break;
++
++ /* ordinary commmand */
++ case MTX_CMDID_START_PIC:
++ /* XXX: specially handle START_PIC hw command */
++ CCB_CTRL_SET_QP(dev_priv,
++ *(command + cur_cmd_size - 4));
++ /* strip the QP parameter (it's software arg) */
++ cur_cmd_header->size--;
++ default:
++ cur_cmd_header->seq = 0x7fff &
++ dev_priv->topaz_cmd_seq++;
++
++ PSB_DEBUG_GENERAL("TOPAZ: %s: size(%d),"
++ " seq (0x%04x)\n",
++ cmd_to_string(cur_cmd_id),
++ cur_cmd_size, cur_cmd_header->seq);
++ ret = lnc_mtx_send(dev_priv, command);
++ if (ret) {
++ DRM_ERROR("TOPAZ: error -- ret(%d)\n", ret);
++ goto out;
++ }
++ break;
++ }
++
++ command += cur_cmd_size;
++ cmd_size -= cur_cmd_size;
++ }
++ lnc_topaz_sync(dev, sync_seq);
++out:
++ return ret;
++}
++
++static int lnc_mtx_send(struct drm_psb_private *dev_priv, const void *cmd)
++{
++ struct topaz_cmd_header *cur_cmd_header =
++ (struct topaz_cmd_header *) cmd;
++ uint32_t cmd_size = cur_cmd_header->size;
++ uint32_t read_index, write_index;
++ const uint32_t *cmd_pointer = (uint32_t *) cmd;
++
++ int ret = 0;
++
++ /* <msvdx does> # enable all clock */
++
++ write_index = dev_priv->topaz_cmd_windex;
++ if (write_index + cmd_size + 1 > dev_priv->topaz_ccb_size) {
++ int free_space = dev_priv->topaz_ccb_size - write_index;
++
++ PSB_DEBUG_GENERAL("TOPAZ: -------will wrap CCB write point.\n");
++ if (free_space > 0) {
++ struct topaz_cmd_header pad_cmd;
++
++ pad_cmd.id = MTX_CMDID_NULL;
++ pad_cmd.size = free_space;
++ pad_cmd.seq = 0x7fff & dev_priv->topaz_cmd_seq++;
++
++ PSB_DEBUG_GENERAL("TOPAZ: MTX_CMDID_NULL:"
++ " size(%d),seq (0x%04x)\n",
++ pad_cmd.size, pad_cmd.seq);
++
++ TOPAZ_BEGIN_CCB(dev_priv);
++ TOPAZ_OUT_CCB(dev_priv, pad_cmd.val);
++ TOPAZ_END_CCB(dev_priv, 1);
++ }
++ POLL_WB_RINDEX(dev_priv, 0);
++ if (ret == 0)
++ dev_priv->topaz_cmd_windex = 0;
++ else {
++ DRM_ERROR("TOPAZ: poll rindex timeout\n");
++ return ret; /* HW may hang, need reset */
++ }
++ PSB_DEBUG_GENERAL("TOPAZ: -------wrap CCB was done.\n");
++ }
++
++ read_index = CCB_CTRL_RINDEX(dev_priv);/* temperily use CCB CTRL */
++ write_index = dev_priv->topaz_cmd_windex;
++
++ PSB_DEBUG_GENERAL("TOPAZ: write index(%d), read index(%d,WB=%d)\n",
++ write_index, read_index, WB_CCB_CTRL_RINDEX(dev_priv));
++ TOPAZ_BEGIN_CCB(dev_priv);
++ while (cmd_size > 0) {
++ TOPAZ_OUT_CCB(dev_priv, *cmd_pointer++);
++ --cmd_size;
++ }
++ TOPAZ_END_CCB(dev_priv, 1);
++
++ POLL_WB_RINDEX(dev_priv, dev_priv->topaz_cmd_windex);
++
++#if 0
++ DRM_UDELAY(1000);
++ lnc_topaz_clearirq(dev,
++ lnc_topaz_queryirq(dev));
++ LNC_TRACEL("TOPAZ: after clear, query again\n");
++ lnc_topaz_queryirq(dev_priv);
++#endif
++
++ return ret;
++}
++
++int lnc_topaz_dequeue_send(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ struct lnc_topaz_cmd_queue *topaz_cmd = NULL;
++ int ret;
++
++ PSB_DEBUG_GENERAL("TOPAZ: dequeue command and send it to topaz\n");
++
++ if (list_empty(&dev_priv->topaz_queue)) {
++ dev_priv->topaz_busy = 0;
++ return 0;
++ }
++
++ topaz_cmd = list_first_entry(&dev_priv->topaz_queue,
++ struct lnc_topaz_cmd_queue, head);
++
++ PSB_DEBUG_GENERAL("TOPAZ: queue has id %08x\n", topaz_cmd->sequence);
++ ret = lnc_topaz_send(dev, topaz_cmd->cmd, topaz_cmd->cmd_size,
++ topaz_cmd->sequence);
++ if (ret) {
++ DRM_ERROR("TOPAZ: lnc_topaz_send failed.\n");
++ ret = -EINVAL;
++ }
++
++ list_del(&topaz_cmd->head);
++ kfree(topaz_cmd->cmd);
++ drm_free(topaz_cmd, sizeof(struct lnc_topaz_cmd_queue),
++ DRM_MEM_DRIVER);
++
++ return ret;
++}
++
++void
++lnc_topaz_lockup(struct drm_psb_private *dev_priv,
++ int *topaz_lockup, int *topaz_idle)
++{
++ unsigned long irq_flags;
++ uint32_t tmp;
++
++ /* if have printk in this function, you will have plenties here */
++ spin_lock_irqsave(&dev_priv->topaz_lock, irq_flags);
++ *topaz_lockup = 0;
++ *topaz_idle = 1;
++
++ if (!dev_priv->has_topaz) {
++ spin_unlock_irqrestore(&dev_priv->topaz_lock, irq_flags);
++ return;
++ }
++
++ tmp = dev_priv->topaz_current_sequence
++ - dev_priv->sequence[LNC_ENGINE_ENCODE];
++ if (tmp > 0x0FFFFFFF) {
++ if (dev_priv->topaz_current_sequence ==
++ dev_priv->topaz_last_sequence) {
++ *topaz_lockup = 1;
++ } else {
++ dev_priv->topaz_last_sequence =
++ dev_priv->topaz_current_sequence;
++ *topaz_idle = 0;
++ }
++
++ if (dev_priv->topaz_start_idle)
++ dev_priv->topaz_start_idle = 0;
++ } else {
++ if (dev_priv->topaz_needs_reset == 0) {
++ if (dev_priv->topaz_start_idle &&
++ (dev_priv->topaz_finished_sequence
++ == dev_priv->topaz_current_sequence)) {
++ if (time_after_eq(jiffies,
++ dev_priv->topaz_idle_start_jiffies +
++ TOPAZ_MAX_IDELTIME)) {
++
++ /* XXX: disable clock <msvdx does> */
++ dev_priv->topaz_needs_reset = 1;
++ } else
++ *topaz_idle = 0;
++ } else {
++ dev_priv->topaz_start_idle = 1;
++ dev_priv->topaz_idle_start_jiffies = jiffies;
++ dev_priv->topaz_finished_sequence =
++ dev_priv->topaz_current_sequence;
++ *topaz_idle = 0;
++ }
++ }
++ }
++ spin_unlock_irqrestore(&dev_priv->topaz_lock, irq_flags);
++}
++
++
++void topaz_mtx_kick(struct drm_psb_private *dev_priv, uint32_t kick_count)
++{
++ PSB_DEBUG_GENERAL("TOPAZ: kick mtx count(%d).\n", kick_count);
++ MTX_WRITE32(MTX_CR_MTX_KICK, kick_count);
++}
++
++/* power up msvdx, OSPM function */
++int psb_power_up_topaz(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private;
++
++ if (dev_priv->topaz_power_state == LNC_TOPAZ_POWERON)
++ return 0;
++
++ psb_up_island_power(dev, PSB_VIDEO_ENC_ISLAND);
++
++ PSB_DEBUG_GENERAL("FIXME: how to write clock state for topaz?"
++ " so many clock\n");
++ /* PSB_WMSVDX32(dev_priv->topaz_clk_state, MSVDX_MAN_CLK_ENABLE); */
++
++ PSB_DEBUG_GENERAL("FIXME restore registers or init msvdx\n");
++
++ PSB_DEBUG_GENERAL("FIXME: flush all mmu\n");
++
++ dev_priv->topaz_power_state = LNC_TOPAZ_POWERON;
++
++ return 0;
++}
++
++int psb_power_down_topaz(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private;
++
++ if (dev_priv->topaz_power_state == LNC_TOPAZ_POWEROFF)
++ return 0;
++
++ if (dev_priv->topaz_busy) {
++ PSB_DEBUG_GENERAL("FIXME: MSVDX is busy, should wait it\n");
++ return -EBUSY;
++ }
++ PSB_DEBUG_GENERAL("FIXME: how to read clock state for topaz?"
++ " so many clock\n");
++ /* dev_priv->topaz_clk_state = PSB_RMSVDX32(MSVDX_MAN_CLK_ENABLE); */
++ PSB_DEBUG_GENERAL("FIXME: save MSVDX register\n");
++ PSB_DEBUG_GENERAL("FIXME: save MSVDX context\n");
++
++ psb_down_island_power(dev, PSB_VIDEO_ENC_ISLAND);
++
++ dev_priv->topaz_power_state = LNC_TOPAZ_POWEROFF;
++
++ return 0;
++}
++
++int lnc_prepare_topaz_suspend(struct drm_device *dev)
++{
++ /* FIXME: need reset when resume?
++ * Is mtx restore enough for encoder continue run? */
++ /* dev_priv->topaz_needs_reset = 1; */
++
++ /* make sure all IRQs are seviced */
++
++ /* make sure all the fence is signaled */
++
++ /* save mtx context into somewhere */
++ /* lnc_topaz_save_mtx_state(dev); */
++
++ return 0;
++}
++
++int lnc_prepare_topaz_resume(struct drm_device *dev)
++{
++ /* FIXME: need reset when resume?
++ * Is mtx restore enough for encoder continue run? */
++ /* dev_priv->topaz_needs_reset = 1; */
++
++ /* make sure IRQ is open */
++
++ /* restore mtx context */
++ /* lnc_topaz_restore_mtx_state(dev); */
++
++ return 0;
++}
+diff -uNr a/drivers/gpu/drm/psb/lnc_topaz.h b/drivers/gpu/drm/psb/lnc_topaz.h
+--- a/drivers/gpu/drm/psb/lnc_topaz.h 1969-12-31 16:00:00.000000000 -0800
++++ b/drivers/gpu/drm/psb/lnc_topaz.h 2009-04-07 13:28:38.000000000 -0700
+@@ -0,0 +1,803 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
++ * Copyright (c) Imagination Technologies Limited, UK
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ **************************************************************************/
++
++#ifndef _LNC_TOPAZ_H_
++#define _LNC_TOPAZ_H_
++
++#include "psb_drv.h"
++
++#define LNC_TOPAZ_NO_IRQ 1
++#define TOPAZ_MTX_REG_SIZE (34 * 4 + 183 * 4)
++#define ENABLE_TOPAZ_OSPM_D0IX (0x10)
++
++/* extern int drm_psb_ospm; */
++
++int psb_power_up_topaz(struct drm_device *dev);
++int psb_power_down_topaz(struct drm_device *dev);
++int lnc_prepare_topaz_suspend(struct drm_device *dev);
++int lnc_prepare_topaz_resume(struct drm_device *dev);
++
++/*
++ * MACROS to insert values into fields within a word. The basename of the
++ * field must have MASK_BASENAME and SHIFT_BASENAME constants.
++ */
++#define MM_WRITE32(base, offset, value) \
++do { \
++ *((unsigned long *)((unsigned char *)(dev_priv->topaz_reg) \
++ + base + offset)) = value; \
++} while (0)
++
++#define MM_READ32(base, offset, pointer) \
++do { \
++ *(pointer) = *((unsigned long *)((unsigned char *)(dev_priv->topaz_reg)\
++ + base + offset)); \
++} while (0)
++
++#define F_MASK(basename) (MASK_##basename)
++#define F_SHIFT(basename) (SHIFT_##basename)
++
++#define F_ENCODE(val, basename) \
++ (((val) << (F_SHIFT(basename))) & (F_MASK(basename)))
++
++/* MVEA macro */
++#define MVEA_START 0x03000
++
++#define MVEA_WRITE32(offset, value) MM_WRITE32(MVEA_START, offset, value)
++#define MVEA_READ32(offset, pointer) MM_READ32(MVEA_START, offset, pointer);
++
++#define F_MASK_MVEA(basename) (MASK_MVEA_##basename) /* MVEA */
++#define F_SHIFT_MVEA(basename) (SHIFT_MVEA_##basename) /* MVEA */
++#define F_ENCODE_MVEA(val, basename) \
++ (((val)<<(F_SHIFT_MVEA(basename)))&(F_MASK_MVEA(basename)))
++
++/* VLC macro */
++#define TOPAZ_VLC_START 0x05000
++
++/* TOPAZ macro */
++#define TOPAZ_START 0x02000
++
++#define TOPAZ_WRITE32(offset, value) MM_WRITE32(TOPAZ_START, offset, value)
++#define TOPAZ_READ32(offset, pointer) MM_READ32(TOPAZ_START, offset, pointer)
++
++#define F_MASK_TOPAZ(basename) (MASK_TOPAZ_##basename)
++#define F_SHIFT_TOPAZ(basename) (SHIFT_TOPAZ_##basename)
++#define F_ENCODE_TOPAZ(val,basename) \
++ (((val)<<(F_SHIFT_TOPAZ(basename)))&(F_MASK_TOPAZ(basename)))
++
++/* MTX macro */
++#define MTX_START 0x0
++
++#define MTX_WRITE32(offset, value) MM_WRITE32(MTX_START, offset, value)
++#define MTX_READ32(offset, pointer) MM_READ32(MTX_START, offset, pointer)
++
++/* DMAC macro */
++#define DMAC_START 0x0f000
++
++#define DMAC_WRITE32(offset, value) MM_WRITE32(DMAC_START, offset, value)
++#define DMAC_READ32(offset, pointer) MM_READ32(DMAC_START, offset, pointer)
++
++#define F_MASK_DMAC(basename) (MASK_DMAC_##basename)
++#define F_SHIFT_DMAC(basename) (SHIFT_DMAC_##basename)
++#define F_ENCODE_DMAC(val,basename) \
++ (((val)<<(F_SHIFT_DMAC(basename)))&(F_MASK_DMAC(basename)))
++
++
++/* Register CR_IMG_TOPAZ_INTENAB */
++#define TOPAZ_CR_IMG_TOPAZ_INTENAB 0x0008
++#define MASK_TOPAZ_CR_IMG_TOPAZ_INTEN_MVEA 0x00000001
++#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTEN_MVEA 0
++#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTEN_MVEA 0x0008
++
++#define MASK_TOPAZ_CR_IMG_TOPAZ_MAS_INTEN 0x80000000
++#define SHIFT_TOPAZ_CR_IMG_TOPAZ_MAS_INTEN 31
++#define REGNUM_TOPAZ_CR_IMG_TOPAZ_MAS_INTEN 0x0008
++
++#define MASK_TOPAZ_CR_IMG_TOPAZ_INTEN_MMU_FAULT 0x00000008
++#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTEN_MMU_FAULT 3
++#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTEN_MMU_FAULT 0x0008
++
++#define MASK_TOPAZ_CR_IMG_TOPAZ_INTEN_MTX 0x00000002
++#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTEN_MTX 1
++#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTEN_MTX 0x0008
++
++#define MASK_TOPAZ_CR_IMG_TOPAZ_INTEN_MTX_HALT 0x00000004
++#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTEN_MTX_HALT 2
++#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTEN_MTX_HALT 0x0008
++
++#define TOPAZ_CR_IMG_TOPAZ_INTCLEAR 0x000C
++#define MASK_TOPAZ_CR_IMG_TOPAZ_INTCLR_MVEA 0x00000001
++#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTCLR_MVEA 0
++#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTCLR_MVEA 0x000C
++
++#define TOPAZ_CR_IMG_TOPAZ_INTSTAT 0x0004
++#define MASK_TOPAZ_CR_IMG_TOPAZ_INTS_MVEA 0x00000001
++#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTS_MVEA 0
++#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTS_MVEA 0x0004
++
++#define MTX_CCBCTRL_ROFF 0
++#define MTX_CCBCTRL_COMPLETE 4
++#define MTX_CCBCTRL_CCBSIZE 8
++#define MTX_CCBCTRL_QP 12
++#define MTX_CCBCTRL_INITQP 24
++
++#define TOPAZ_CR_MMU_STATUS 0x001C
++#define MASK_TOPAZ_CR_MMU_PF_N_RW 0x00000001
++#define SHIFT_TOPAZ_CR_MMU_PF_N_RW 0
++#define REGNUM_TOPAZ_CR_MMU_PF_N_RW 0x001C
++
++#define MASK_TOPAZ_CR_IMG_TOPAZ_INTCLR_MMU_FAULT 0x00000008
++#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTCLR_MMU_FAULT 3
++#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTCLR_MMU_FAULT 0x000C
++
++#define TOPAZ_CR_MMU_MEM_REQ 0x0020
++#define MASK_TOPAZ_CR_MEM_REQ_STAT_READS 0x000000FF
++#define SHIFT_TOPAZ_CR_MEM_REQ_STAT_READS 0
++#define REGNUM_TOPAZ_CR_MEM_REQ_STAT_READS 0x0020
++
++#define MASK_TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX 0x00000002
++#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX 1
++#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX 0x000C
++
++#define MASK_TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX_HALT 0x00000004
++#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX_HALT 2
++#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX_HALT 0x000C
++
++#define MTX_CR_MTX_KICK 0x0080
++#define MASK_MTX_MTX_KICK 0x0000FFFF
++#define SHIFT_MTX_MTX_KICK 0
++#define REGNUM_MTX_MTX_KICK 0x0080
++
++#define MTX_DATA_MEM_BASE 0x82880000
++
++#define MTX_CR_MTX_RAM_ACCESS_CONTROL 0x0108
++#define MASK_MTX_MTX_MCMR 0x00000001
++#define SHIFT_MTX_MTX_MCMR 0
++#define REGNUM_MTX_MTX_MCMR 0x0108
++
++#define MASK_MTX_MTX_MCMID 0x0FF00000
++#define SHIFT_MTX_MTX_MCMID 20
++#define REGNUM_MTX_MTX_MCMID 0x0108
++
++#define MASK_MTX_MTX_MCM_ADDR 0x000FFFFC
++#define SHIFT_MTX_MTX_MCM_ADDR 2
++#define REGNUM_MTX_MTX_MCM_ADDR 0x0108
++
++#define MTX_CR_MTX_RAM_ACCESS_STATUS 0x010C
++#define MASK_MTX_MTX_MTX_MCM_STAT 0x00000001
++#define SHIFT_MTX_MTX_MTX_MCM_STAT 0
++#define REGNUM_MTX_MTX_MTX_MCM_STAT 0x010C
++
++#define MASK_MTX_MTX_MCMAI 0x00000002
++#define SHIFT_MTX_MTX_MCMAI 1
++#define REGNUM_MTX_MTX_MCMAI 0x0108
++
++#define MTX_CR_MTX_RAM_ACCESS_DATA_TRANSFER 0x0104
++
++#define MVEA_CR_IMG_MVEA_SRST 0x0000
++#define MASK_MVEA_CR_IMG_MVEA_SPE_SOFT_RESET 0x00000001
++#define SHIFT_MVEA_CR_IMG_MVEA_SPE_SOFT_RESET 0
++#define REGNUM_MVEA_CR_IMG_MVEA_SPE_SOFT_RESET 0x0000
++
++#define MASK_MVEA_CR_IMG_MVEA_IPE_SOFT_RESET 0x00000002
++#define SHIFT_MVEA_CR_IMG_MVEA_IPE_SOFT_RESET 1
++#define REGNUM_MVEA_CR_IMG_MVEA_IPE_SOFT_RESET 0x0000
++
++#define MASK_MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET 0x00000004
++#define SHIFT_MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET 2
++#define REGNUM_MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET 0x0000
++
++#define MASK_MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET 0x00000008
++#define SHIFT_MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET 3
++#define REGNUM_MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET 0x0000
++
++#define MASK_MVEA_CR_IMG_MVEA_CMC_SOFT_RESET 0x00000010
++#define SHIFT_MVEA_CR_IMG_MVEA_CMC_SOFT_RESET 4
++#define REGNUM_MVEA_CR_IMG_MVEA_CMC_SOFT_RESET 0x0000
++
++#define MASK_MVEA_CR_IMG_MVEA_DCF_SOFT_RESET 0x00000020
++#define SHIFT_MVEA_CR_IMG_MVEA_DCF_SOFT_RESET 5
++#define REGNUM_MVEA_CR_IMG_MVEA_DCF_SOFT_RESET 0x0000
++
++#define TOPAZ_CR_IMG_TOPAZ_CORE_ID 0x03C0
++#define TOPAZ_CR_IMG_TOPAZ_CORE_REV 0x03D0
++
++#define TOPAZ_MTX_PC (0x00000005)
++#define PC_START_ADDRESS (0x80900000)
++
++#define TOPAZ_CR_TOPAZ_AUTO_CLK_GATE 0x0014
++#define MASK_TOPAZ_CR_TOPAZ_VLC_AUTO_CLK_GATE 0x00000001
++#define SHIFT_TOPAZ_CR_TOPAZ_VLC_AUTO_CLK_GATE 0
++#define REGNUM_TOPAZ_CR_TOPAZ_VLC_AUTO_CLK_GATE 0x0014
++
++#define MASK_TOPAZ_CR_TOPAZ_DB_AUTO_CLK_GATE 0x00000002
++#define SHIFT_TOPAZ_CR_TOPAZ_DB_AUTO_CLK_GATE 1
++#define REGNUM_TOPAZ_CR_TOPAZ_DB_AUTO_CLK_GATE 0x0014
++
++#define MASK_TOPAZ_CR_TOPAZ_MTX_MAN_CLK_GATE 0x00000002
++#define SHIFT_TOPAZ_CR_TOPAZ_MTX_MAN_CLK_GATE 1
++#define REGNUM_TOPAZ_CR_TOPAZ_MTX_MAN_CLK_GATE 0x0010
++
++#define MTX_CORE_CR_MTX_REGISTER_READ_WRITE_DATA_OFFSET 0x000000F8
++#define MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_OFFSET 0x000000FC
++#define MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_RNW_MASK 0x00010000
++#define MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK 0x80000000
++
++#define TOPAZ_CORE_CR_MTX_DEBUG_OFFSET 0x0000003C
++
++#define MASK_TOPAZ_CR_MTX_DBG_IS_SLAVE 0x00000004
++#define SHIFT_TOPAZ_CR_MTX_DBG_IS_SLAVE 2
++#define REGNUM_TOPAZ_CR_MTX_DBG_IS_SLAVE 0x003C
++
++#define MASK_TOPAZ_CR_MTX_DBG_GPIO_OUT 0x00000018
++#define SHIFT_TOPAZ_CR_MTX_DBG_GPIO_OUT 3
++#define REGNUM_TOPAZ_CR_MTX_DBG_GPIO_OUT 0x003C
++
++#define MTX_CORE_CR_MTX_RAM_ACCESS_CONTROL_OFFSET 0x00000108
++
++#define TOPAZ_CR_MMU_CONTROL0 0x0024
++#define MASK_TOPAZ_CR_MMU_BYPASS 0x00000800
++#define SHIFT_TOPAZ_CR_MMU_BYPASS 11
++#define REGNUM_TOPAZ_CR_MMU_BYPASS 0x0024
++
++#define TOPAZ_CR_MMU_DIR_LIST_BASE(X) (0x0030 + (4 * (X)))
++#define MASK_TOPAZ_CR_MMU_DIR_LIST_BASE_ADDR 0xFFFFF000
++#define SHIFT_TOPAZ_CR_MMU_DIR_LIST_BASE_ADDR 12
++#define REGNUM_TOPAZ_CR_MMU_DIR_LIST_BASE_ADDR 0x0030
++
++#define MASK_TOPAZ_CR_MMU_INVALDC 0x00000008
++#define SHIFT_TOPAZ_CR_MMU_INVALDC 3
++#define REGNUM_TOPAZ_CR_MMU_INVALDC 0x0024
++
++#define MASK_TOPAZ_CR_MMU_FLUSH 0x00000004
++#define SHIFT_TOPAZ_CR_MMU_FLUSH 2
++#define REGNUM_TOPAZ_CR_MMU_FLUSH 0x0024
++
++#define TOPAZ_CR_MMU_BANK_INDEX 0x0038
++#define MASK_TOPAZ_CR_MMU_BANK_N_INDEX_M(i) (0x00000003 << (8 + ((i) * 2)))
++#define SHIFT_TOPAZ_CR_MMU_BANK_N_INDEX_M(i) (8 + ((i) * 2))
++#define REGNUM_TOPAZ_CR_MMU_BANK_N_INDEX_M(i) 0x0038
++
++#define TOPAZ_CR_TOPAZ_MAN_CLK_GATE 0x0010
++#define MASK_TOPAZ_CR_TOPAZ_MVEA_MAN_CLK_GATE 0x00000001
++#define SHIFT_TOPAZ_CR_TOPAZ_MVEA_MAN_CLK_GATE 0
++#define REGNUM_TOPAZ_CR_TOPAZ_MVEA_MAN_CLK_GATE 0x0010
++
++#define MTX_CORE_CR_MTX_TXRPT_OFFSET 0x0000000c
++#define TXRPT_WAITONKICK_VALUE 0x8ade0000
++
++#define MTX_CORE_CR_MTX_ENABLE_MTX_TOFF_MASK 0x00000002
++
++#define MTX_CORE_CR_MTX_ENABLE_OFFSET 0x00000000
++#define MTX_CORE_CR_MTX_ENABLE_MTX_ENABLE_MASK 0x00000001
++
++#define MASK_TOPAZ_CR_IMG_TOPAZ_INTS_MTX 0x00000002
++#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTS_MTX 1
++#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTS_MTX 0x0004
++
++#define MTX_CORE_CR_MTX_SOFT_RESET_OFFSET 0x00000200
++#define MTX_CORE_CR_MTX_SOFT_RESET_MTX_RESET_MASK 0x00000001
++
++#define MTX_CR_MTX_SYSC_CDMAA 0x0344
++#define MASK_MTX_CDMAA_ADDRESS 0x03FFFFFC
++#define SHIFT_MTX_CDMAA_ADDRESS 2
++#define REGNUM_MTX_CDMAA_ADDRESS 0x0344
++
++#define MTX_CR_MTX_SYSC_CDMAC 0x0340
++#define MASK_MTX_LENGTH 0x0000FFFF
++#define SHIFT_MTX_LENGTH 0
++#define REGNUM_MTX_LENGTH 0x0340
++
++#define MASK_MTX_BURSTSIZE 0x07000000
++#define SHIFT_MTX_BURSTSIZE 24
++#define REGNUM_MTX_BURSTSIZE 0x0340
++
++#define MASK_MTX_RNW 0x00020000
++#define SHIFT_MTX_RNW 17
++#define REGNUM_MTX_RNW 0x0340
++
++#define MASK_MTX_ENABLE 0x00010000
++#define SHIFT_MTX_ENABLE 16
++#define REGNUM_MTX_ENABLE 0x0340
++
++#define MASK_MTX_LENGTH 0x0000FFFF
++#define SHIFT_MTX_LENGTH 0
++#define REGNUM_MTX_LENGTH 0x0340
++
++#define TOPAZ_CR_IMG_TOPAZ_SRST 0x0000
++#define MASK_TOPAZ_CR_IMG_TOPAZ_MVEA_SOFT_RESET 0x00000001
++#define SHIFT_TOPAZ_CR_IMG_TOPAZ_MVEA_SOFT_RESET 0
++#define REGNUM_TOPAZ_CR_IMG_TOPAZ_MVEA_SOFT_RESET 0x0000
++
++#define MASK_TOPAZ_CR_IMG_TOPAZ_VLC_SOFT_RESET 0x00000008
++#define SHIFT_TOPAZ_CR_IMG_TOPAZ_VLC_SOFT_RESET 3
++#define REGNUM_TOPAZ_CR_IMG_TOPAZ_VLC_SOFT_RESET 0x0000
++
++#define MASK_TOPAZ_CR_IMG_TOPAZ_MTX_SOFT_RESET 0x00000002
++#define SHIFT_TOPAZ_CR_IMG_TOPAZ_MTX_SOFT_RESET 1
++#define REGNUM_TOPAZ_CR_IMG_TOPAZ_MTX_SOFT_RESET 0x0000
++
++#define MVEA_CR_MVEA_AUTO_CLOCK_GATING 0x0024
++#define MASK_MVEA_CR_MVEA_SPE_AUTO_CLK_GATE 0x00000001
++#define SHIFT_MVEA_CR_MVEA_SPE_AUTO_CLK_GATE 0
++#define REGNUM_MVEA_CR_MVEA_SPE_AUTO_CLK_GATE 0x0024
++
++#define MASK_MVEA_CR_MVEA_IPE_AUTO_CLK_GATE 0x00000002
++#define SHIFT_MVEA_CR_MVEA_IPE_AUTO_CLK_GATE 1
++#define REGNUM_MVEA_CR_MVEA_IPE_AUTO_CLK_GATE 0x0024
++
++#define MASK_MVEA_CR_MVEA_CMPRS_AUTO_CLK_GATE 0x00000004
++#define SHIFT_MVEA_CR_MVEA_CMPRS_AUTO_CLK_GATE 2
++#define REGNUM_MVEA_CR_MVEA_CMPRS_AUTO_CLK_GATE 0x0024
++
++#define MASK_MVEA_CR_MVEA_JMCOMP_AUTO_CLK_GATE 0x00000008
++#define SHIFT_MVEA_CR_MVEA_JMCOMP_AUTO_CLK_GATE 3
++#define REGNUM_MVEA_CR_MVEA_JMCOMP_AUTO_CLK_GATE 0x0024
++
++#define TOPAZ_CR_IMG_TOPAZ_DMAC_MODE 0x0040
++#define MASK_TOPAZ_CR_DMAC_MASTER_MODE 0x00000001
++#define SHIFT_TOPAZ_CR_DMAC_MASTER_MODE 0
++#define REGNUM_TOPAZ_CR_DMAC_MASTER_MODE 0x0040
++
++#define MTX_CR_MTX_SYSC_CDMAT 0x0350
++#define MASK_MTX_TRANSFERDATA 0xFFFFFFFF
++#define SHIFT_MTX_TRANSFERDATA 0
++#define REGNUM_MTX_TRANSFERDATA 0x0350
++
++#define IMG_SOC_DMAC_IRQ_STAT(X) (0x000C + (32 * (X)))
++#define MASK_IMG_SOC_TRANSFER_FIN 0x00020000
++#define SHIFT_IMG_SOC_TRANSFER_FIN 17
++#define REGNUM_IMG_SOC_TRANSFER_FIN 0x000C
++
++#define IMG_SOC_DMAC_COUNT(X) (0x0004 + (32 * (X)))
++#define MASK_IMG_SOC_CNT 0x0000FFFF
++#define SHIFT_IMG_SOC_CNT 0
++#define REGNUM_IMG_SOC_CNT 0x0004
++
++#define MASK_IMG_SOC_EN 0x00010000
++#define SHIFT_IMG_SOC_EN 16
++#define REGNUM_IMG_SOC_EN 0x0004
++
++#define MASK_IMG_SOC_LIST_EN 0x00040000
++#define SHIFT_IMG_SOC_LIST_EN 18
++#define REGNUM_IMG_SOC_LIST_EN 0x0004
++
++#define IMG_SOC_DMAC_PER_HOLD(X) (0x0018 + (32 * (X)))
++#define MASK_IMG_SOC_PER_HOLD 0x0000007F
++#define SHIFT_IMG_SOC_PER_HOLD 0
++#define REGNUM_IMG_SOC_PER_HOLD 0x0018
++
++#define IMG_SOC_DMAC_SETUP(X) (0x0000 + (32 * (X)))
++#define MASK_IMG_SOC_START_ADDRESS 0xFFFFFFF
++#define SHIFT_IMG_SOC_START_ADDRESS 0
++#define REGNUM_IMG_SOC_START_ADDRESS 0x0000
++
++#define MASK_IMG_SOC_BSWAP 0x40000000
++#define SHIFT_IMG_SOC_BSWAP 30
++#define REGNUM_IMG_SOC_BSWAP 0x0004
++
++#define MASK_IMG_SOC_PW 0x18000000
++#define SHIFT_IMG_SOC_PW 27
++#define REGNUM_IMG_SOC_PW 0x0004
++
++#define MASK_IMG_SOC_DIR 0x04000000
++#define SHIFT_IMG_SOC_DIR 26
++#define REGNUM_IMG_SOC_DIR 0x0004
++
++#define MASK_IMG_SOC_PI 0x03000000
++#define SHIFT_IMG_SOC_PI 24
++#define REGNUM_IMG_SOC_PI 0x0004
++#define IMG_SOC_PI_1 0x00000002
++#define IMG_SOC_PI_2 0x00000001
++#define IMG_SOC_PI_4 0x00000000
++
++#define MASK_IMG_SOC_TRANSFER_IEN 0x20000000
++#define SHIFT_IMG_SOC_TRANSFER_IEN 29
++#define REGNUM_IMG_SOC_TRANSFER_IEN 0x0004
++
++#define DMAC_VALUE_COUNT(BSWAP, PW, DIR, PERIPH_INCR, COUNT) \
++ ((((BSWAP) << SHIFT_IMG_SOC_BSWAP) & MASK_IMG_SOC_BSWAP)| \
++ (((PW) << SHIFT_IMG_SOC_PW) & MASK_IMG_SOC_PW)| \
++ (((DIR) << SHIFT_IMG_SOC_DIR) & MASK_IMG_SOC_DIR)| \
++ (((PERIPH_INCR) << SHIFT_IMG_SOC_PI) & MASK_IMG_SOC_PI)| \
++ (((COUNT) << SHIFT_IMG_SOC_CNT) & MASK_IMG_SOC_CNT))
++
++#define IMG_SOC_DMAC_PERIPH(X) (0x0008 + (32 * (X)))
++#define MASK_IMG_SOC_EXT_SA 0x0000000F
++#define SHIFT_IMG_SOC_EXT_SA 0
++#define REGNUM_IMG_SOC_EXT_SA 0x0008
++
++#define MASK_IMG_SOC_ACC_DEL 0xE0000000
++#define SHIFT_IMG_SOC_ACC_DEL 29
++#define REGNUM_IMG_SOC_ACC_DEL 0x0008
++
++#define MASK_IMG_SOC_INCR 0x08000000
++#define SHIFT_IMG_SOC_INCR 27
++#define REGNUM_IMG_SOC_INCR 0x0008
++
++#define MASK_IMG_SOC_BURST 0x07000000
++#define SHIFT_IMG_SOC_BURST 24
++#define REGNUM_IMG_SOC_BURST 0x0008
++
++#define DMAC_VALUE_PERIPH_PARAM(ACC_DEL, INCR, BURST) \
++((((ACC_DEL) << SHIFT_IMG_SOC_ACC_DEL) & MASK_IMG_SOC_ACC_DEL)| \
++(((INCR) << SHIFT_IMG_SOC_INCR) & MASK_IMG_SOC_INCR)| \
++(((BURST) << SHIFT_IMG_SOC_BURST) & MASK_IMG_SOC_BURST))
++
++#define IMG_SOC_DMAC_PERIPHERAL_ADDR(X) (0x0014 + (32 * (X)))
++#define MASK_IMG_SOC_ADDR 0x007FFFFF
++#define SHIFT_IMG_SOC_ADDR 0
++#define REGNUM_IMG_SOC_ADDR 0x0014
++
++/* **************** DMAC define **************** */
++enum DMAC_eBSwap {
++ DMAC_BSWAP_NO_SWAP = 0x0,/* !< No byte swapping will be performed. */
++ DMAC_BSWAP_REVERSE = 0x1,/* !< Byte order will be reversed. */
++};
++
++enum DMAC_ePW {
++ DMAC_PWIDTH_32_BIT = 0x0,/* !< Peripheral width 32-bit. */
++ DMAC_PWIDTH_16_BIT = 0x1,/* !< Peripheral width 16-bit. */
++ DMAC_PWIDTH_8_BIT = 0x2,/* !< Peripheral width 8-bit. */
++};
++
++enum DMAC_eAccDel {
++ DMAC_ACC_DEL_0 = 0x0, /* !< Access delay zero clock cycles */
++ DMAC_ACC_DEL_256 = 0x1, /* !< Access delay 256 clock cycles */
++ DMAC_ACC_DEL_512 = 0x2, /* !< Access delay 512 clock cycles */
++ DMAC_ACC_DEL_768 = 0x3, /* !< Access delay 768 clock cycles */
++ DMAC_ACC_DEL_1024 = 0x4,/* !< Access delay 1024 clock cycles */
++ DMAC_ACC_DEL_1280 = 0x5,/* !< Access delay 1280 clock cycles */
++ DMAC_ACC_DEL_1536 = 0x6,/* !< Access delay 1536 clock cycles */
++ DMAC_ACC_DEL_1792 = 0x7,/* !< Access delay 1792 clock cycles */
++};
++
++enum DMAC_eBurst {
++ DMAC_BURST_0 = 0x0, /* !< burst size of 0 */
++ DMAC_BURST_1 = 0x1, /* !< burst size of 1 */
++ DMAC_BURST_2 = 0x2, /* !< burst size of 2 */
++ DMAC_BURST_3 = 0x3, /* !< burst size of 3 */
++ DMAC_BURST_4 = 0x4, /* !< burst size of 4 */
++ DMAC_BURST_5 = 0x5, /* !< burst size of 5 */
++ DMAC_BURST_6 = 0x6, /* !< burst size of 6 */
++ DMAC_BURST_7 = 0x7, /* !< burst size of 7 */
++};
++
++/* commands for topaz,shared with user space driver */
++enum drm_lnc_topaz_cmd {
++ MTX_CMDID_NULL = 0,
++ MTX_CMDID_DO_HEADER = 1,
++ MTX_CMDID_ENCODE_SLICE = 2,
++ MTX_CMDID_WRITEREG = 3,
++ MTX_CMDID_START_PIC = 4,
++ MTX_CMDID_END_PIC = 5,
++ MTX_CMDID_SYNC = 6,
++ MTX_CMDID_ENCODE_ONE_ROW = 7,
++ MTX_CMDID_FLUSH = 8,
++ MTX_CMDID_SW_LEAVE_LOWPOWER = 0xfc,
++ MTX_CMDID_SW_ENTER_LOWPOWER = 0xfe,
++ MTX_CMDID_SW_NEW_CODEC = 0xff
++};
++
++/* codecs topaz supports,shared with user space driver */
++enum drm_lnc_topaz_codec {
++ IMG_CODEC_JPEG = 0,
++ IMG_CODEC_H264_NO_RC,
++ IMG_CODEC_H264_VBR,
++ IMG_CODEC_H264_CBR,
++ IMG_CODEC_H263_NO_RC,
++ IMG_CODEC_H263_VBR,
++ IMG_CODEC_H263_CBR,
++ IMG_CODEC_MPEG4_NO_RC,
++ IMG_CODEC_MPEG4_VBR,
++ IMG_CODEC_MPEG4_CBR,
++ IMG_CODEC_NUM
++};
++
++/* XXX: it's a copy of msvdx cmd queue. should have some change? */
++struct lnc_topaz_cmd_queue {
++ struct list_head head;
++ void *cmd;
++ unsigned long cmd_size;
++ uint32_t sequence;
++};
++
++
++struct topaz_cmd_header {
++ union {
++ struct {
++ unsigned long id:8;
++ unsigned long size:8;
++ unsigned long seq:16;
++ };
++ uint32_t val;
++ };
++};
++
++/* external function declare */
++/* lnc_topazinit.c */
++int lnc_topaz_init(struct drm_device *dev);
++int lnc_topaz_uninit(struct drm_device *dev);
++int lnc_topaz_reset(struct drm_psb_private *dev_priv);
++int topaz_init_fw(struct drm_device *dev);
++int topaz_setup_fw(struct drm_device *dev, enum drm_lnc_topaz_codec codec);
++int topaz_wait_for_register(struct drm_psb_private *dev_priv,
++ uint32_t addr, uint32_t value,
++ uint32_t enable);
++void topaz_write_mtx_mem(struct drm_psb_private *dev_priv,
++ uint32_t byte_addr, uint32_t val);
++uint32_t topaz_read_mtx_mem(struct drm_psb_private *dev_priv,
++ uint32_t byte_addr);
++void topaz_write_mtx_mem_multiple_setup(struct drm_psb_private *dev_priv,
++ uint32_t addr);
++void topaz_write_mtx_mem_multiple(struct drm_psb_private *dev_priv,
++ uint32_t val);
++void topaz_mmu_flushcache(struct drm_psb_private *dev_priv);
++int lnc_topaz_save_mtx_state(struct drm_device *dev);
++int lnc_topaz_restore_mtx_state(struct drm_device *dev);
++
++/* lnc_topaz.c */
++void lnc_topaz_interrupt(struct drm_device *dev, uint32_t topaz_stat);
++
++int lnc_cmdbuf_video(struct drm_file *priv,
++ struct list_head *validate_list,
++ uint32_t fence_type,
++ struct drm_psb_cmdbuf_arg *arg,
++ struct ttm_buffer_object *cmd_buffer,
++ struct psb_ttm_fence_rep *fence_arg);
++
++void lnc_topaz_flush_cmd_queue(struct drm_device *dev);
++void lnc_topaz_lockup(struct drm_psb_private *dev_priv, int *topaz_lockup,
++ int *topaz_idle);
++void topaz_mtx_kick(struct drm_psb_private *dev_priv, uint32_t kick_cout);
++
++uint32_t psb_get_default_pd_addr(struct psb_mmu_driver *driver);
++
++/* macros to get/set CCB control data */
++#define WB_CCB_CTRL_RINDEX(dev_priv) (*((uint32_t *)dev_priv->topaz_ccb_wb))
++#define WB_CCB_CTRL_SEQ(dev_priv) (*((uint32_t *)dev_priv->topaz_ccb_wb+1))
++
++#define POLL_WB_RINDEX(dev_priv,value) \
++do { \
++ int i; \
++ for (i = 0; i < 10000; i++) { \
++ if (WB_CCB_CTRL_RINDEX(dev_priv) == value) \
++ break; \
++ else \
++ DRM_UDELAY(100); \
++ } \
++ if (WB_CCB_CTRL_RINDEX(dev_priv) != value) { \
++ DRM_ERROR("TOPAZ: poll rindex timeout\n"); \
++ ret = -EBUSY; \
++ } \
++} while (0)
++
++#define POLL_WB_SEQ(dev_priv,value) \
++do { \
++ int i; \
++ for (i = 0; i < 10000; i++) { \
++ if (WB_CCB_CTRL_SEQ(dev_priv) == value) \
++ break; \
++ else \
++ DRM_UDELAY(1000); \
++ } \
++ if (WB_CCB_CTRL_SEQ(dev_priv) != value) { \
++ DRM_ERROR("TOPAZ:poll mtxseq timeout,0x%04x(mtx) vs 0x%04x\n",\
++ WB_CCB_CTRL_SEQ(dev_priv), value); \
++ ret = -EBUSY; \
++ } \
++} while (0)
++
++#define CCB_CTRL_RINDEX(dev_priv) \
++ topaz_read_mtx_mem(dev_priv, \
++ dev_priv->topaz_ccb_ctrl_addr + MTX_CCBCTRL_ROFF)
++
++#define CCB_CTRL_RINDEX(dev_priv) \
++ topaz_read_mtx_mem(dev_priv, \
++ dev_priv->topaz_ccb_ctrl_addr + MTX_CCBCTRL_ROFF)
++
++#define CCB_CTRL_QP(dev_priv) \
++ topaz_read_mtx_mem(dev_priv, \
++ dev_priv->topaz_ccb_ctrl_addr + MTX_CCBCTRL_QP)
++
++#define CCB_CTRL_SEQ(dev_priv) \
++ topaz_read_mtx_mem(dev_priv, \
++ dev_priv->topaz_ccb_ctrl_addr + MTX_CCBCTRL_COMPLETE)
++
++#define CCB_CTRL_FRAMESKIP(dev_priv) \
++ topaz_read_mtx_mem(dev_priv, \
++ dev_priv->topaz_ccb_ctrl_addr + MTX_CCBCTRL_FRAMESKIP)
++
++#define CCB_CTRL_SET_QP(dev_priv, qp) \
++ topaz_write_mtx_mem(dev_priv, \
++ dev_priv->topaz_ccb_ctrl_addr + MTX_CCBCTRL_QP, qp)
++
++#define CCB_CTRL_SET_INITIALQP(dev_priv, qp) \
++ topaz_write_mtx_mem(dev_priv, \
++ dev_priv->topaz_ccb_ctrl_addr + MTX_CCBCTRL_INITQP, qp)
++
++
++#define TOPAZ_BEGIN_CCB(dev_priv) \
++ topaz_write_mtx_mem_multiple_setup(dev_priv, \
++ dev_priv->topaz_ccb_buffer_addr + \
++ dev_priv->topaz_cmd_windex * 4)
++
++#define TOPAZ_OUT_CCB(dev_priv, cmd) \
++do { \
++ topaz_write_mtx_mem_multiple(dev_priv, cmd); \
++ dev_priv->topaz_cmd_windex++; \
++} while (0)
++
++#define TOPAZ_END_CCB(dev_priv,kick_count) \
++ topaz_mtx_kick(dev_priv, 1);
++
++static inline char *cmd_to_string(int cmd_id)
++{
++ switch (cmd_id) {
++ case MTX_CMDID_START_PIC:
++ return "MTX_CMDID_START_PIC";
++ case MTX_CMDID_END_PIC:
++ return "MTX_CMDID_END_PIC";
++ case MTX_CMDID_DO_HEADER:
++ return "MTX_CMDID_DO_HEADER";
++ case MTX_CMDID_ENCODE_SLICE:
++ return "MTX_CMDID_ENCODE_SLICE";
++ case MTX_CMDID_SYNC:
++ return "MTX_CMDID_SYNC";
++
++ default:
++ return "Undefined command";
++
++ }
++}
++
++static inline char *codec_to_string(int codec)
++{
++ switch (codec) {
++ case IMG_CODEC_H264_NO_RC:
++ return "H264_NO_RC";
++ case IMG_CODEC_H264_VBR:
++ return "H264_VBR";
++ case IMG_CODEC_H264_CBR:
++ return "H264_CBR";
++ case IMG_CODEC_H263_NO_RC:
++ return "H263_NO_RC";
++ case IMG_CODEC_H263_VBR:
++ return "H263_VBR";
++ case IMG_CODEC_H263_CBR:
++ return "H263_CBR";
++ case IMG_CODEC_MPEG4_NO_RC:
++ return "MPEG4_NO_RC";
++ case IMG_CODEC_MPEG4_VBR:
++ return "MPEG4_VBR";
++ case IMG_CODEC_MPEG4_CBR:
++ return "MPEG4_CBR";
++ default:
++ return "Undefined codec";
++ }
++}
++
++static inline void lnc_topaz_enableirq(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ uint32_t ier = dev_priv->vdc_irq_mask | _LNC_IRQ_TOPAZ_FLAG;
++
++ PSB_DEBUG_IRQ("TOPAZ: enable IRQ\n");
++
++ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_INTENAB,
++ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_MAS_INTEN) |
++ /* F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTEN_MVEA) | */
++ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTEN_MMU_FAULT) |
++ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTEN_MTX) |
++ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTEN_MTX_HALT));
++
++ PSB_WVDC32(ier, PSB_INT_ENABLE_R); /* essential */
++}
++
++static inline void lnc_topaz_disableirq(struct drm_device *dev)
++{
++
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ uint32_t ier = dev_priv->vdc_irq_mask & (~_LNC_IRQ_TOPAZ_FLAG);
++
++ PSB_DEBUG_INIT("TOPAZ: disable IRQ\n");
++
++ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_INTENAB, 0);
++ PSB_WVDC32(ier, PSB_INT_ENABLE_R); /* essential */
++}
++
++static inline void lnc_topaz_clearirq(struct drm_device *dev,
++ uint32_t clear_topaz)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++
++ PSB_DEBUG_INIT("TOPAZ: clear IRQ\n");
++ if (clear_topaz != 0)
++ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_INTCLEAR, clear_topaz);
++
++ PSB_WVDC32(_LNC_IRQ_TOPAZ_FLAG, PSB_INT_IDENTITY_R);
++}
++
++static inline uint32_t lnc_topaz_queryirq(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ uint32_t val, iir, clear = 0;
++
++ TOPAZ_READ32(TOPAZ_CR_IMG_TOPAZ_INTSTAT, &val);
++ iir = PSB_RVDC32(PSB_INT_IDENTITY_R);
++
++ if ((val == 0) && (iir == 0)) {/* no interrupt */
++ PSB_DEBUG_GENERAL("TOPAZ: no interrupt,IIR=TOPAZ_INTSTAT=0\n");
++ return 0;
++ }
++
++ PSB_DEBUG_IRQ("TOPAZ:TOPAZ_INTSTAT=0x%08x,IIR=0%08x\n", val, iir);
++
++ if (val & (1<<31))
++ PSB_DEBUG_IRQ("TOPAZ:IRQ pin activated,cmd seq=0x%04x,"
++ "sync seq: 0x%08x vs 0x%08x (MTX)\n",
++ CCB_CTRL_SEQ(dev_priv),
++ dev_priv->sequence[LNC_ENGINE_ENCODE],
++ *(uint32_t *)dev_priv->topaz_sync_addr);
++ else
++ PSB_DEBUG_IRQ("TOPAZ:IRQ pin not activated,cmd seq=0x%04x,"
++ "sync seq: 0x%08x vs 0x%08x (MTX)\n",
++ CCB_CTRL_SEQ(dev_priv),
++ dev_priv->sequence[LNC_ENGINE_ENCODE],
++ *(uint32_t *)dev_priv->topaz_sync_addr);
++
++ if (val & 0x8) {
++ uint32_t mmu_status, mmu_req;
++
++ TOPAZ_READ32(TOPAZ_CR_MMU_STATUS, &mmu_status);
++ TOPAZ_READ32(TOPAZ_CR_MMU_MEM_REQ, &mmu_req);
++
++ PSB_DEBUG_IRQ("TOPAZ: detect a page fault interrupt, "
++ "address=0x%08x,mem req=0x%08x\n",
++ mmu_status, mmu_req);
++ clear |= F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MMU_FAULT);
++ }
++
++ if (val & 0x4) {
++ PSB_DEBUG_IRQ("TOPAZ: detect a MTX_HALT interrupt\n");
++ clear |= F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX_HALT);
++ }
++
++ if (val & 0x2) {
++ PSB_DEBUG_IRQ("TOPAZ: detect a MTX interrupt\n");
++ clear |= F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX);
++ }
++
++ if (val & 0x1) {
++ PSB_DEBUG_IRQ("TOPAZ: detect a MVEA interrupt\n");
++ clear |= F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MVEA);
++ }
++
++ return clear;
++}
++
++#endif /* _LNC_TOPAZ_H_ */
+diff -uNr a/drivers/gpu/drm/psb/lnc_topazinit.c b/drivers/gpu/drm/psb/lnc_topazinit.c
+--- a/drivers/gpu/drm/psb/lnc_topazinit.c 1969-12-31 16:00:00.000000000 -0800
++++ b/drivers/gpu/drm/psb/lnc_topazinit.c 2009-04-07 13:28:38.000000000 -0700
+@@ -0,0 +1,1896 @@
++/**
++ * file lnc_topazinit.c
++ * TOPAZ initialization and mtx-firmware upload
++ *
++ */
++
++/**************************************************************************
++ *
++ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
++ * Copyright (c) Imagination Technologies Limited, UK
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ **************************************************************************/
++
++/* NOTE: (READ BEFORE REFINE CODE)
++ * 1. The FIRMWARE's SIZE is measured by byte, we have to pass the size
++ * measured by word to DMAC.
++ *
++ *
++ *
++ */
++
++/* include headers */
++
++/* #define DRM_DEBUG_CODE 2 */
++
++#include <linux/firmware.h>
++
++#include <drm/drmP.h>
++#include <drm/drm.h>
++
++#include "psb_drv.h"
++#include "lnc_topaz.h"
++
++/* WARNING: this define is very important */
++#define RAM_SIZE (1024 * 24)
++
++static int drm_psb_ospmxxx = 0x10;
++
++/* register default values
++ * THIS HEADER IS ONLY INCLUDE ONCE*/
++static unsigned long topaz_default_regs[183][3] = {
++ {MVEA_START, 0x00000000, 0x00000000},
++ {MVEA_START, 0x00000004, 0x00000400},
++ {MVEA_START, 0x00000008, 0x00000000},
++ {MVEA_START, 0x0000000C, 0x00000000},
++ {MVEA_START, 0x00000010, 0x00000000},
++ {MVEA_START, 0x00000014, 0x00000000},
++ {MVEA_START, 0x00000018, 0x00000000},
++ {MVEA_START, 0x0000001C, 0x00000000},
++ {MVEA_START, 0x00000020, 0x00000120},
++ {MVEA_START, 0x00000024, 0x00000000},
++ {MVEA_START, 0x00000028, 0x00000000},
++ {MVEA_START, 0x00000100, 0x00000000},
++ {MVEA_START, 0x00000104, 0x00000000},
++ {MVEA_START, 0x00000108, 0x00000000},
++ {MVEA_START, 0x0000010C, 0x00000000},
++ {MVEA_START, 0x0000011C, 0x00000001},
++ {MVEA_START, 0x0000012C, 0x00000000},
++ {MVEA_START, 0x00000180, 0x00000000},
++ {MVEA_START, 0x00000184, 0x00000000},
++ {MVEA_START, 0x00000188, 0x00000000},
++ {MVEA_START, 0x0000018C, 0x00000000},
++ {MVEA_START, 0x00000190, 0x00000000},
++ {MVEA_START, 0x00000194, 0x00000000},
++ {MVEA_START, 0x00000198, 0x00000000},
++ {MVEA_START, 0x0000019C, 0x00000000},
++ {MVEA_START, 0x000001A0, 0x00000000},
++ {MVEA_START, 0x000001A4, 0x00000000},
++ {MVEA_START, 0x000001A8, 0x00000000},
++ {MVEA_START, 0x000001AC, 0x00000000},
++ {MVEA_START, 0x000001B0, 0x00000000},
++ {MVEA_START, 0x000001B4, 0x00000000},
++ {MVEA_START, 0x000001B8, 0x00000000},
++ {MVEA_START, 0x000001BC, 0x00000000},
++ {MVEA_START, 0x000001F8, 0x00000000},
++ {MVEA_START, 0x000001FC, 0x00000000},
++ {MVEA_START, 0x00000200, 0x00000000},
++ {MVEA_START, 0x00000204, 0x00000000},
++ {MVEA_START, 0x00000208, 0x00000000},
++ {MVEA_START, 0x0000020C, 0x00000000},
++ {MVEA_START, 0x00000210, 0x00000000},
++ {MVEA_START, 0x00000220, 0x00000001},
++ {MVEA_START, 0x00000224, 0x0000001F},
++ {MVEA_START, 0x00000228, 0x00000100},
++ {MVEA_START, 0x0000022C, 0x00001F00},
++ {MVEA_START, 0x00000230, 0x00000101},
++ {MVEA_START, 0x00000234, 0x00001F1F},
++ {MVEA_START, 0x00000238, 0x00001F01},
++ {MVEA_START, 0x0000023C, 0x0000011F},
++ {MVEA_START, 0x00000240, 0x00000200},
++ {MVEA_START, 0x00000244, 0x00001E00},
++ {MVEA_START, 0x00000248, 0x00000002},
++ {MVEA_START, 0x0000024C, 0x0000001E},
++ {MVEA_START, 0x00000250, 0x00000003},
++ {MVEA_START, 0x00000254, 0x0000001D},
++ {MVEA_START, 0x00000258, 0x00001F02},
++ {MVEA_START, 0x0000025C, 0x00000102},
++ {MVEA_START, 0x00000260, 0x0000011E},
++ {MVEA_START, 0x00000264, 0x00000000},
++ {MVEA_START, 0x00000268, 0x00000000},
++ {MVEA_START, 0x0000026C, 0x00000000},
++ {MVEA_START, 0x00000270, 0x00000000},
++ {MVEA_START, 0x00000274, 0x00000000},
++ {MVEA_START, 0x00000278, 0x00000000},
++ {MVEA_START, 0x00000280, 0x00008000},
++ {MVEA_START, 0x00000284, 0x00000000},
++ {MVEA_START, 0x00000288, 0x00000000},
++ {MVEA_START, 0x0000028C, 0x00000000},
++ {MVEA_START, 0x00000314, 0x00000000},
++ {MVEA_START, 0x00000318, 0x00000000},
++ {MVEA_START, 0x0000031C, 0x00000000},
++ {MVEA_START, 0x00000320, 0x00000000},
++ {MVEA_START, 0x00000324, 0x00000000},
++ {MVEA_START, 0x00000348, 0x00000000},
++ {MVEA_START, 0x00000380, 0x00000000},
++ {MVEA_START, 0x00000384, 0x00000000},
++ {MVEA_START, 0x00000388, 0x00000000},
++ {MVEA_START, 0x0000038C, 0x00000000},
++ {MVEA_START, 0x00000390, 0x00000000},
++ {MVEA_START, 0x00000394, 0x00000000},
++ {MVEA_START, 0x00000398, 0x00000000},
++ {MVEA_START, 0x0000039C, 0x00000000},
++ {MVEA_START, 0x000003A0, 0x00000000},
++ {MVEA_START, 0x000003A4, 0x00000000},
++ {MVEA_START, 0x000003A8, 0x00000000},
++ {MVEA_START, 0x000003B0, 0x00000000},
++ {MVEA_START, 0x000003B4, 0x00000000},
++ {MVEA_START, 0x000003B8, 0x00000000},
++ {MVEA_START, 0x000003BC, 0x00000000},
++ {MVEA_START, 0x000003D4, 0x00000000},
++ {MVEA_START, 0x000003D8, 0x00000000},
++ {MVEA_START, 0x000003DC, 0x00000000},
++ {MVEA_START, 0x000003E0, 0x00000000},
++ {MVEA_START, 0x000003E4, 0x00000000},
++ {MVEA_START, 0x000003EC, 0x00000000},
++ {MVEA_START, 0x000002D0, 0x00000000},
++ {MVEA_START, 0x000002D4, 0x00000000},
++ {MVEA_START, 0x000002D8, 0x00000000},
++ {MVEA_START, 0x000002DC, 0x00000000},
++ {MVEA_START, 0x000002E0, 0x00000000},
++ {MVEA_START, 0x000002E4, 0x00000000},
++ {MVEA_START, 0x000002E8, 0x00000000},
++ {MVEA_START, 0x000002EC, 0x00000000},
++ {MVEA_START, 0x000002F0, 0x00000000},
++ {MVEA_START, 0x000002F4, 0x00000000},
++ {MVEA_START, 0x000002F8, 0x00000000},
++ {MVEA_START, 0x000002FC, 0x00000000},
++ {MVEA_START, 0x00000300, 0x00000000},
++ {MVEA_START, 0x00000304, 0x00000000},
++ {MVEA_START, 0x00000308, 0x00000000},
++ {MVEA_START, 0x0000030C, 0x00000000},
++ {MVEA_START, 0x00000290, 0x00000000},
++ {MVEA_START, 0x00000294, 0x00000000},
++ {MVEA_START, 0x00000298, 0x00000000},
++ {MVEA_START, 0x0000029C, 0x00000000},
++ {MVEA_START, 0x000002A0, 0x00000000},
++ {MVEA_START, 0x000002A4, 0x00000000},
++ {MVEA_START, 0x000002A8, 0x00000000},
++ {MVEA_START, 0x000002AC, 0x00000000},
++ {MVEA_START, 0x000002B0, 0x00000000},
++ {MVEA_START, 0x000002B4, 0x00000000},
++ {MVEA_START, 0x000002B8, 0x00000000},
++ {MVEA_START, 0x000002BC, 0x00000000},
++ {MVEA_START, 0x000002C0, 0x00000000},
++ {MVEA_START, 0x000002C4, 0x00000000},
++ {MVEA_START, 0x000002C8, 0x00000000},
++ {MVEA_START, 0x000002CC, 0x00000000},
++ {MVEA_START, 0x00000080, 0x00000000},
++ {MVEA_START, 0x00000084, 0x80705700},
++ {MVEA_START, 0x00000088, 0x00000000},
++ {MVEA_START, 0x0000008C, 0x00000000},
++ {MVEA_START, 0x00000090, 0x00000000},
++ {MVEA_START, 0x00000094, 0x00000000},
++ {MVEA_START, 0x00000098, 0x00000000},
++ {MVEA_START, 0x0000009C, 0x00000000},
++ {MVEA_START, 0x000000A0, 0x00000000},
++ {MVEA_START, 0x000000A4, 0x00000000},
++ {MVEA_START, 0x000000A8, 0x00000000},
++ {MVEA_START, 0x000000AC, 0x00000000},
++ {MVEA_START, 0x000000B0, 0x00000000},
++ {MVEA_START, 0x000000B4, 0x00000000},
++ {MVEA_START, 0x000000B8, 0x00000000},
++ {MVEA_START, 0x000000BC, 0x00000000},
++ {MVEA_START, 0x000000C0, 0x00000000},
++ {MVEA_START, 0x000000C4, 0x00000000},
++ {MVEA_START, 0x000000C8, 0x00000000},
++ {MVEA_START, 0x000000CC, 0x00000000},
++ {MVEA_START, 0x000000D0, 0x00000000},
++ {MVEA_START, 0x000000D4, 0x00000000},
++ {MVEA_START, 0x000000D8, 0x00000000},
++ {MVEA_START, 0x000000DC, 0x00000000},
++ {MVEA_START, 0x000000E0, 0x00000000},
++ {MVEA_START, 0x000000E4, 0x00000000},
++ {MVEA_START, 0x000000E8, 0x00000000},
++ {MVEA_START, 0x000000EC, 0x00000000},
++ {MVEA_START, 0x000000F0, 0x00000000},
++ {MVEA_START, 0x000000F4, 0x00000000},
++ {MVEA_START, 0x000000F8, 0x00000000},
++ {MVEA_START, 0x000000FC, 0x00000000},
++ {TOPAZ_VLC_START, 0x00000000, 0x00000000},
++ {TOPAZ_VLC_START, 0x00000004, 0x00000000},
++ {TOPAZ_VLC_START, 0x00000008, 0x00000000},
++ {TOPAZ_VLC_START, 0x0000000C, 0x00000000},
++ {TOPAZ_VLC_START, 0x00000010, 0x00000000},
++ {TOPAZ_VLC_START, 0x00000014, 0x00000000},
++ {TOPAZ_VLC_START, 0x0000001C, 0x00000000},
++ {TOPAZ_VLC_START, 0x00000020, 0x00000000},
++ {TOPAZ_VLC_START, 0x00000024, 0x00000000},
++ {TOPAZ_VLC_START, 0x0000002C, 0x00000000},
++ {TOPAZ_VLC_START, 0x00000034, 0x00000000},
++ {TOPAZ_VLC_START, 0x00000038, 0x00000000},
++ {TOPAZ_VLC_START, 0x0000003C, 0x00000000},
++ {TOPAZ_VLC_START, 0x00000040, 0x00000000},
++ {TOPAZ_VLC_START, 0x00000044, 0x00000000},
++ {TOPAZ_VLC_START, 0x00000048, 0x00000000},
++ {TOPAZ_VLC_START, 0x0000004C, 0x00000000},
++ {TOPAZ_VLC_START, 0x00000050, 0x00000000},
++ {TOPAZ_VLC_START, 0x00000054, 0x00000000},
++ {TOPAZ_VLC_START, 0x00000058, 0x00000000},
++ {TOPAZ_VLC_START, 0x0000005C, 0x00000000},
++ {TOPAZ_VLC_START, 0x00000060, 0x00000000},
++ {TOPAZ_VLC_START, 0x00000064, 0x00000000},
++ {TOPAZ_VLC_START, 0x00000068, 0x00000000},
++ {TOPAZ_VLC_START, 0x0000006C, 0x00000000}
++};
++
++#define FIRMWARE_NAME "topaz_fw.bin"
++
++/* define structure */
++/* firmware file's info head */
++struct topaz_fwinfo {
++ unsigned int ver:16;
++ unsigned int codec:16;
++
++ unsigned int text_size;
++ unsigned int data_size;
++ unsigned int data_location;
++};
++
++/* firmware data array define */
++struct topaz_codec_fw {
++ uint32_t ver;
++ uint32_t codec;
++
++ uint32_t text_size;
++ uint32_t data_size;
++ uint32_t data_location;
++
++ struct ttm_buffer_object *text;
++ struct ttm_buffer_object *data;
++};
++
++
++
++/* static function define */
++static int topaz_upload_fw(struct drm_device *dev,
++ enum drm_lnc_topaz_codec codec);
++static inline void topaz_set_default_regs(struct drm_psb_private
++ *dev_priv);
++
++#define UPLOAD_FW_BY_DMA 1
++
++#if UPLOAD_FW_BY_DMA
++static void topaz_dma_transfer(struct drm_psb_private *dev_priv,
++ uint32_t channel, uint32_t src_phy_addr,
++ uint32_t offset, uint32_t dst_addr,
++ uint32_t byte_num, uint32_t is_increment,
++ uint32_t is_write);
++#else
++static void topaz_mtx_upload_by_register(struct drm_device *dev,
++ uint32_t mtx_mem, uint32_t addr,
++ uint32_t size,
++ struct ttm_buffer_object *buf);
++#endif
++
++static void topaz_write_core_reg(struct drm_psb_private *dev_priv,
++ uint32_t reg, const uint32_t val);
++static void topaz_read_core_reg(struct drm_psb_private *dev_priv,
++ uint32_t reg, uint32_t *ret_val);
++static void get_mtx_control_from_dash(struct drm_psb_private *dev_priv);
++static void release_mtx_control_from_dash(struct drm_psb_private
++ *dev_priv);
++static void topaz_mmu_hwsetup(struct drm_psb_private *dev_priv);
++static void mtx_dma_read(struct drm_device *dev, uint32_t source_addr,
++ uint32_t size);
++static void mtx_dma_write(struct drm_device *dev);
++
++
++#if 0 /* DEBUG_FUNCTION */
++static int topaz_test_null(struct drm_device *dev, uint32_t seq);
++static void topaz_mmu_flush(struct drm_device *dev);
++static void topaz_mmu_test(struct drm_device *dev, uint32_t sync_value);
++#endif
++#if 0
++static void topaz_save_default_regs(struct drm_psb_private *dev_priv,
++ uint32_t *data);
++static void topaz_restore_default_regs(struct drm_psb_private *dev_priv,
++ uint32_t *data);
++#endif
++
++/* globale variable define */
++struct topaz_codec_fw topaz_fw[IMG_CODEC_NUM];
++
++uint32_t topaz_read_mtx_mem(struct drm_psb_private *dev_priv,
++ uint32_t byte_addr)
++{
++ uint32_t read_val;
++ uint32_t reg, bank_size, ram_bank_size, ram_id;
++
++ TOPAZ_READ32(0x3c, &reg);
++ reg = 0x0a0a0606;
++ bank_size = (reg & 0xF0000) >> 16;
++
++ ram_bank_size = (uint32_t) (1 << (bank_size + 2));
++ ram_id = (byte_addr - MTX_DATA_MEM_BASE) / ram_bank_size;
++
++ MTX_WRITE32(MTX_CR_MTX_RAM_ACCESS_CONTROL,
++ F_ENCODE(0x18 + ram_id, MTX_MTX_MCMID) |
++ F_ENCODE(byte_addr >> 2, MTX_MTX_MCM_ADDR) |
++ F_ENCODE(1, MTX_MTX_MCMR));
++
++ /* ?? poll this reg? */
++ topaz_wait_for_register(dev_priv,
++ MTX_START + MTX_CR_MTX_RAM_ACCESS_STATUS,
++ 1, 1);
++
++ MTX_READ32(MTX_CR_MTX_RAM_ACCESS_DATA_TRANSFER, &read_val);
++
++ return read_val;
++}
++
++void topaz_write_mtx_mem(struct drm_psb_private *dev_priv,
++ uint32_t byte_addr, uint32_t val)
++{
++ uint32_t ram_id = 0;
++ uint32_t reg, bank_size, ram_bank_size;
++
++ TOPAZ_READ32(0x3c, &reg);
++
++ /* PSB_DEBUG_GENERAL ("TOPAZ: DEBUG REG(%x)\n", reg); */
++ reg = 0x0a0a0606;
++
++ bank_size = (reg & 0xF0000) >> 16;
++
++ ram_bank_size = (uint32_t) (1 << (bank_size + 2));
++ ram_id = (byte_addr - MTX_DATA_MEM_BASE) / ram_bank_size;
++
++ MTX_WRITE32(MTX_CR_MTX_RAM_ACCESS_CONTROL,
++ F_ENCODE(0x18 + ram_id, MTX_MTX_MCMID) |
++ F_ENCODE(byte_addr >> 2, MTX_MTX_MCM_ADDR));
++
++ MTX_WRITE32(MTX_CR_MTX_RAM_ACCESS_DATA_TRANSFER, val);
++
++ /* ?? poll this reg? */
++ topaz_wait_for_register(dev_priv,
++ MTX_START + MTX_CR_MTX_RAM_ACCESS_STATUS,
++ 1, 1);
++
++ return;
++}
++
++void topaz_write_mtx_mem_multiple_setup(struct drm_psb_private *dev_priv,
++ uint32_t byte_addr)
++{
++ uint32_t ram_id = 0;
++ uint32_t reg, bank_size, ram_bank_size;
++
++ TOPAZ_READ32(0x3c, &reg);
++
++ reg = 0x0a0a0606;
++
++ bank_size = (reg & 0xF0000) >> 16;
++
++ ram_bank_size = (uint32_t) (1 << (bank_size + 2));
++ ram_id = (byte_addr - MTX_DATA_MEM_BASE) / ram_bank_size;
++
++ MTX_WRITE32(MTX_CR_MTX_RAM_ACCESS_CONTROL,
++ F_ENCODE(0x18 + ram_id, MTX_MTX_MCMID) |
++ F_ENCODE(1, MTX_MTX_MCMAI) |
++ F_ENCODE(byte_addr >> 2, MTX_MTX_MCM_ADDR));
++}
++
++void topaz_write_mtx_mem_multiple(struct drm_psb_private *dev_priv,
++ uint32_t val)
++{
++ MTX_WRITE32(MTX_CR_MTX_RAM_ACCESS_DATA_TRANSFER, val);
++}
++
++
++int topaz_wait_for_register(struct drm_psb_private *dev_priv,
++ uint32_t addr, uint32_t value, uint32_t mask)
++{
++ uint32_t tmp;
++ uint32_t count = 10000;
++
++ /* # poll topaz register for certain times */
++ while (count) {
++ /* #.# read */
++ MM_READ32(addr, 0, &tmp);
++
++ if (value == (tmp & mask))
++ return 0;
++
++ /* #.# delay and loop */
++ DRM_UDELAY(100);
++ --count;
++ }
++
++ /* # now waiting is timeout, return 1 indicat failed */
++ /* XXX: testsuit means a timeout 10000 */
++
++ DRM_ERROR("TOPAZ:time out to poll addr(0x%x) expected value(0x%08x), "
++ "actual 0x%08x (0x%08x & 0x%08x)\n",
++ addr, value, tmp & mask, tmp, mask);
++
++ return -EBUSY;
++
++}
++
++
++void lnc_topaz_reset_wq(struct work_struct *work)
++{
++ struct drm_psb_private *dev_priv =
++ container_of(work, struct drm_psb_private, topaz_watchdog_wq);
++
++ struct psb_scheduler *scheduler = &dev_priv->scheduler;
++ unsigned long irq_flags;
++
++ mutex_lock(&dev_priv->topaz_mutex);
++ dev_priv->topaz_needs_reset = 1;
++ dev_priv->topaz_current_sequence++;
++ PSB_DEBUG_GENERAL
++ ("MSVDXFENCE: incremented topaz_current_sequence to :%d\n",
++ dev_priv->topaz_current_sequence);
++
++ psb_fence_error(scheduler->dev, LNC_ENGINE_ENCODE,
++ dev_priv->topaz_current_sequence, _PSB_FENCE_TYPE_EXE,
++ DRM_CMD_HANG);
++
++ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
++ dev_priv->timer_available = 1;
++ spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
++
++ spin_lock_irqsave(&dev_priv->topaz_lock, irq_flags);
++
++ /* psb_msvdx_flush_cmd_queue(scheduler->dev); */
++
++ spin_unlock_irqrestore(&dev_priv->topaz_lock, irq_flags);
++
++ psb_schedule_watchdog(dev_priv);
++ mutex_unlock(&dev_priv->topaz_mutex);
++}
++
++
++/* this function finish the first part of initialization, the rest
++ * should be done in topaz_setup_fw
++ */
++int lnc_topaz_init(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ struct ttm_bo_device *bdev = &dev_priv->bdev;
++ uint32_t core_id, core_rev;
++ void *topaz_bo_virt;
++ int ret = 0;
++ bool is_iomem;
++
++ PSB_DEBUG_GENERAL("TOPAZ: init topaz data structures\n");
++
++ /* # initialize comand topaz queueing [msvdx_queue] */
++ INIT_LIST_HEAD(&dev_priv->topaz_queue);
++ /* # init mutex? CHECK: mutex usage [msvdx_mutex] */
++ mutex_init(&dev_priv->topaz_mutex);
++ /* # spin lock init? CHECK spin lock usage [msvdx_lock] */
++ spin_lock_init(&dev_priv->topaz_lock);
++
++ /* # topaz status init. [msvdx_busy] */
++ dev_priv->topaz_busy = 0;
++ dev_priv->topaz_cmd_seq = 0;
++ dev_priv->topaz_fw_loaded = 0;
++ dev_priv->topaz_cur_codec = 0;
++ dev_priv->topaz_mtx_data_mem = NULL;
++ dev_priv->cur_mtx_data_size = 0;
++
++ dev_priv->topaz_mtx_reg_state = kmalloc(TOPAZ_MTX_REG_SIZE,
++ GFP_KERNEL);
++ if (dev_priv->topaz_mtx_reg_state == NULL) {
++ DRM_ERROR("TOPAZ: failed to allocate space "
++ "for mtx register\n");
++ return -1;
++ }
++
++ /* # gain write back structure,we may only need 32+4=40DW */
++ if (!dev_priv->topaz_bo) {
++ ret = ttm_buffer_object_create(bdev, 4096,
++ ttm_bo_type_kernel,
++ DRM_PSB_FLAG_MEM_MMU | TTM_PL_FLAG_NO_EVICT,
++ 0, 0, 0, NULL, &(dev_priv->topaz_bo));
++ if (ret != 0) {
++ DRM_ERROR("TOPAZ: failed to allocate topaz BO.\n");
++ return ret;
++ }
++ }
++
++ ret = ttm_bo_kmap(dev_priv->topaz_bo, 0,
++ dev_priv->topaz_bo->num_pages,
++ &dev_priv->topaz_bo_kmap);
++ if (ret) {
++ DRM_ERROR("TOPAZ: map topaz BO bo failed......\n");
++ ttm_bo_unref(&dev_priv->topaz_bo);
++ return ret;
++ }
++
++ topaz_bo_virt = ttm_kmap_obj_virtual(&dev_priv->topaz_bo_kmap,
++ &is_iomem);
++ dev_priv->topaz_ccb_wb = (void *) topaz_bo_virt;
++ dev_priv->topaz_wb_offset = dev_priv->topaz_bo->offset;
++ dev_priv->topaz_sync_addr = (uint32_t *) (topaz_bo_virt + 2048);
++ dev_priv->topaz_sync_offset = dev_priv->topaz_wb_offset + 2048;
++ PSB_DEBUG_GENERAL("TOPAZ: allocated BO for WriteBack and SYNC command,"
++ "WB offset=0x%08x, SYNC offset=0x%08x\n",
++ dev_priv->topaz_wb_offset, dev_priv->topaz_sync_offset);
++
++ *(dev_priv->topaz_sync_addr) = ~0; /* reset sync seq */
++
++ /* # reset topaz */
++ MVEA_WRITE32(MVEA_CR_IMG_MVEA_SRST,
++ F_ENCODE(1, MVEA_CR_IMG_MVEA_SPE_SOFT_RESET) |
++ F_ENCODE(1, MVEA_CR_IMG_MVEA_IPE_SOFT_RESET) |
++ F_ENCODE(1, MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET) |
++ F_ENCODE(1, MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET) |
++ F_ENCODE(1, MVEA_CR_IMG_MVEA_CMC_SOFT_RESET) |
++ F_ENCODE(1, MVEA_CR_IMG_MVEA_DCF_SOFT_RESET));
++
++ MVEA_WRITE32(MVEA_CR_IMG_MVEA_SRST,
++ F_ENCODE(0, MVEA_CR_IMG_MVEA_SPE_SOFT_RESET) |
++ F_ENCODE(0, MVEA_CR_IMG_MVEA_IPE_SOFT_RESET) |
++ F_ENCODE(0, MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET) |
++ F_ENCODE(0, MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET) |
++ F_ENCODE(0, MVEA_CR_IMG_MVEA_CMC_SOFT_RESET) |
++ F_ENCODE(0, MVEA_CR_IMG_MVEA_DCF_SOFT_RESET));
++
++ /* # set up MMU */
++ topaz_mmu_hwsetup(dev_priv);
++
++ PSB_DEBUG_GENERAL("TOPAZ: defer firmware loading to the place"
++ "when receiving user space commands\n");
++
++#if 0 /* can't load FW here */
++ /* #.# load fw to driver */
++ PSB_DEBUG_GENERAL("TOPAZ: will init firmware\n");
++ ret = topaz_init_fw(dev);
++ if (ret != 0)
++ return -1;
++
++ topaz_setup_fw(dev, FW_H264_NO_RC);/* just for test */
++#endif
++ /* <msvdx does> # minimal clock */
++
++ /* <msvdx does> # return 0 */
++ TOPAZ_READ32(TOPAZ_CR_IMG_TOPAZ_CORE_ID, &core_id);
++ TOPAZ_READ32(TOPAZ_CR_IMG_TOPAZ_CORE_REV, &core_rev);
++
++ PSB_DEBUG_GENERAL("TOPAZ: core_id(%x) core_rev(%x)\n",
++ core_id, core_rev);
++
++ if (drm_psb_ospmxxx & ENABLE_TOPAZ_OSPM_D0IX)
++ psb_power_down_topaz(dev);
++
++ return 0;
++}
++
++int lnc_topaz_uninit(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ /* int n;*/
++
++ /* flush MMU */
++ PSB_DEBUG_GENERAL("XXX: need to flush mmu cache here??\n");
++ /* topaz_mmu_flushcache (dev_priv); */
++
++ /* # reset TOPAZ chip */
++ lnc_topaz_reset(dev_priv);
++
++ /* release resources */
++ /* # release write back memory */
++ dev_priv->topaz_ccb_wb = NULL;
++
++ ttm_bo_unref(&dev_priv->topaz_bo);
++
++ /* release mtx register save space */
++ kfree(dev_priv->topaz_mtx_reg_state);
++
++ /* release mtx data memory save space */
++ if (dev_priv->topaz_mtx_data_mem)
++ ttm_bo_unref(&dev_priv->topaz_mtx_data_mem);
++
++ /* # release firmware */
++ /* XXX: but this handlnig should be reconsidered */
++ /* XXX: there is no jpeg firmware...... */
++#if 0 /* FIX WHEN FIRMWARE IS LOADED */
++ for (n = 1; n < IMG_CODEC_NUM; ++n) {
++ ttm_bo_unref(&topaz_fw[n].text);
++ ttm_bo_unref(&topaz_fw[n].data);
++ }
++#endif
++ ttm_bo_kunmap(&dev_priv->topaz_bo_kmap);
++ ttm_bo_unref(&dev_priv->topaz_bo);
++
++ return 0;
++}
++
++int lnc_topaz_reset(struct drm_psb_private *dev_priv)
++{
++ return 0;
++#if 0
++ int ret = 0;
++ /* # software reset */
++ MTX_WRITE32(MTX_CORE_CR_MTX_SOFT_RESET_OFFSET,
++ MTX_CORE_CR_MTX_SOFT_RESET_MTX_RESET_MASK);
++
++ /* # call lnc_wait_for_register, wait reset finished */
++ topaz_wait_for_register(dev_priv,
++ MTX_START + MTX_CORE_CR_MTX_ENABLE_OFFSET,
++ MTX_CORE_CR_MTX_ENABLE_MTX_ENABLE_MASK,
++ MTX_CORE_CR_MTX_ENABLE_MTX_ENABLE_MASK);
++
++ /* # if reset finised */
++ PSB_DEBUG_GENERAL("XXX: add condition judgement for topaz wait...\n");
++ /* #.# clear interrupt enable flag */
++
++ /* #.# clear pending interrupt flags */
++ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_INTCLEAR,
++ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX) |
++ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX_HALT) |
++ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MVEA) |
++ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MMU_FAULT)
++ );
++ /* # destroy topaz mutex in drm_psb_privaet [msvdx_mutex] */
++
++ /* # return register value which is waited above */
++
++ PSB_DEBUG_GENERAL("called\n");
++ return 0;
++#endif
++}
++
++/* read firmware bin file and load all data into driver */
++int topaz_init_fw(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ struct ttm_bo_device *bdev = &dev_priv->bdev;
++ const struct firmware *raw = NULL;
++ unsigned char *ptr;
++ int ret = 0;
++ int n;
++ struct topaz_fwinfo *cur_fw;
++ int cur_size;
++ struct topaz_codec_fw *cur_codec;
++ struct ttm_buffer_object **cur_drm_obj;
++ struct ttm_bo_kmap_obj tmp_kmap;
++ bool is_iomem;
++
++ dev_priv->stored_initial_qp = 0;
++
++ /* # get firmware */
++ ret = request_firmware(&raw, FIRMWARE_NAME, &dev->pdev->dev);
++ if (ret != 0) {
++ DRM_ERROR("TOPAZ: request_firmware failed: %d\n", ret);
++ return ret;
++ }
++
++ PSB_DEBUG_GENERAL("TOPAZ: opened firmware\n");
++
++ if (raw && (raw->size < sizeof(struct topaz_fwinfo))) {
++ DRM_ERROR("TOPAZ: firmware file is not correct size.\n");
++ goto out;
++ }
++
++ ptr = (unsigned char *) raw->data;
++
++ if (!ptr) {
++ DRM_ERROR("TOPAZ: failed to load firmware.\n");
++ goto out;
++ }
++
++ /* # load fw from file */
++ PSB_DEBUG_GENERAL("TOPAZ: load firmware.....\n");
++ cur_fw = NULL;
++ /* didn't use the first element */
++ for (n = 1; n < IMG_CODEC_NUM; ++n) {
++ cur_fw = (struct topaz_fwinfo *) ptr;
++
++ cur_codec = &topaz_fw[cur_fw->codec];
++ cur_codec->ver = cur_fw->ver;
++ cur_codec->codec = cur_fw->codec;
++ cur_codec->text_size = cur_fw->text_size;
++ cur_codec->data_size = cur_fw->data_size;
++ cur_codec->data_location = cur_fw->data_location;
++
++ PSB_DEBUG_GENERAL("TOPAZ: load firemware %s.\n",
++ codec_to_string(cur_fw->codec));
++
++ /* #.# handle text section */
++ cur_codec->text = NULL;
++ ptr += sizeof(struct topaz_fwinfo);
++ cur_drm_obj = &cur_codec->text;
++ cur_size = cur_fw->text_size;
++
++ /* #.# malloc DRM object for fw storage */
++ ret = ttm_buffer_object_create(bdev, cur_size,
++ ttm_bo_type_kernel,
++ DRM_PSB_FLAG_MEM_MMU | TTM_PL_FLAG_NO_EVICT,
++ 0, 0, 0, NULL, cur_drm_obj);
++ if (ret) {
++ DRM_ERROR("Failed to allocate firmware.\n");
++ goto out;
++ }
++
++ /* #.# fill DRM object with firmware data */
++ ret = ttm_bo_kmap(*cur_drm_obj, 0, (*cur_drm_obj)->num_pages,
++ &tmp_kmap);
++ if (ret) {
++ PSB_DEBUG_GENERAL("drm_bo_kmap failed: %d\n", ret);
++ ttm_bo_unref(cur_drm_obj);
++ *cur_drm_obj = NULL;
++ goto out;
++ }
++
++ memcpy(ttm_kmap_obj_virtual(&tmp_kmap, &is_iomem), ptr,
++ cur_size);
++
++ ttm_bo_kunmap(&tmp_kmap);
++
++ /* #.# handle data section */
++ cur_codec->data = NULL;
++ ptr += cur_fw->text_size;
++ cur_drm_obj = &cur_codec->data;
++ cur_size = cur_fw->data_size;
++
++ /* #.# malloc DRM object for fw storage */
++ ret = ttm_buffer_object_create(bdev, cur_size,
++ ttm_bo_type_kernel,
++ DRM_PSB_FLAG_MEM_MMU | TTM_PL_FLAG_NO_EVICT,
++ 0, 0, 0, NULL, cur_drm_obj);
++ if (ret) {
++ DRM_ERROR("Failed to allocate firmware.\n");
++ goto out;
++ }
++
++ /* #.# fill DRM object with firmware data */
++ ret = ttm_bo_kmap(*cur_drm_obj, 0, (*cur_drm_obj)->num_pages,
++ &tmp_kmap);
++ if (ret) {
++ PSB_DEBUG_GENERAL("drm_bo_kmap failed: %d\n", ret);
++ ttm_bo_unref(cur_drm_obj);
++ *cur_drm_obj = NULL;
++ goto out;
++ }
++
++ memcpy(ttm_kmap_obj_virtual(&tmp_kmap, &is_iomem), ptr,
++ cur_size);
++
++ ttm_bo_kunmap(&tmp_kmap);
++
++ /* #.# validate firmware */
++
++ /* #.# update ptr */
++ ptr += cur_fw->data_size;
++ }
++
++ release_firmware(raw);
++
++ PSB_DEBUG_GENERAL("TOPAZ: return from firmware init\n");
++
++ return 0;
++
++out:
++ if (raw) {
++ PSB_DEBUG_GENERAL("release firmware....\n");
++ release_firmware(raw);
++ }
++
++ return -1;
++}
++
++/* setup fw when start a new context */
++int topaz_setup_fw(struct drm_device *dev, enum drm_lnc_topaz_codec codec)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ struct ttm_bo_device *bdev = &dev_priv->bdev;
++ uint32_t mem_size = RAM_SIZE; /* follow DDK */
++ uint32_t verify_pc;
++ int ret;
++
++#if 0
++ if (codec == dev_priv->topaz_current_codec) {
++ LNC_TRACEL("TOPAZ: reuse previous codec\n");
++ return 0;
++ }
++#endif
++
++ if (drm_psb_ospmxxx & ENABLE_TOPAZ_OSPM_D0IX)
++ psb_power_up_topaz(dev);
++
++ /* XXX: need to rest topaz? */
++ PSB_DEBUG_GENERAL("XXX: should reset topaz when context change?\n");
++
++ /* XXX: interrupt enable shouldn't be enable here,
++ * this funtion is called when interrupt is enable,
++ * but here, we've no choice since we have to call setup_fw by
++ * manual */
++ /* # upload firmware, clear interruputs and start the firmware
++ * -- from hostutils.c in TestSuits*/
++
++ /* # reset MVEA */
++ MVEA_WRITE32(MVEA_CR_IMG_MVEA_SRST,
++ F_ENCODE(1, MVEA_CR_IMG_MVEA_SPE_SOFT_RESET) |
++ F_ENCODE(1, MVEA_CR_IMG_MVEA_IPE_SOFT_RESET) |
++ F_ENCODE(1, MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET) |
++ F_ENCODE(1, MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET) |
++ F_ENCODE(1, MVEA_CR_IMG_MVEA_CMC_SOFT_RESET) |
++ F_ENCODE(1, MVEA_CR_IMG_MVEA_DCF_SOFT_RESET));
++
++ MVEA_WRITE32(MVEA_CR_IMG_MVEA_SRST,
++ F_ENCODE(0, MVEA_CR_IMG_MVEA_SPE_SOFT_RESET) |
++ F_ENCODE(0, MVEA_CR_IMG_MVEA_IPE_SOFT_RESET) |
++ F_ENCODE(0, MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET) |
++ F_ENCODE(0, MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET) |
++ F_ENCODE(0, MVEA_CR_IMG_MVEA_CMC_SOFT_RESET) |
++ F_ENCODE(0, MVEA_CR_IMG_MVEA_DCF_SOFT_RESET));
++
++
++ topaz_mmu_hwsetup(dev_priv);
++
++#if !LNC_TOPAZ_NO_IRQ
++ lnc_topaz_disableirq(dev);
++#endif
++
++ PSB_DEBUG_GENERAL("TOPAZ: will setup firmware....\n");
++
++ topaz_set_default_regs(dev_priv);
++
++ /* # reset mtx */
++ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_SRST,
++ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_MVEA_SOFT_RESET) |
++ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_MTX_SOFT_RESET) |
++ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_VLC_SOFT_RESET));
++
++ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_SRST, 0x0);
++
++ /* # upload fw by drm */
++ PSB_DEBUG_GENERAL("TOPAZ: will upload firmware\n");
++
++ topaz_upload_fw(dev, codec);
++
++ /* allocate the space for context save & restore if needed */
++ if (dev_priv->topaz_mtx_data_mem == NULL) {
++ ret = ttm_buffer_object_create(bdev,
++ dev_priv->cur_mtx_data_size * 4,
++ ttm_bo_type_kernel,
++ DRM_PSB_FLAG_MEM_MMU |
++ TTM_PL_FLAG_NO_EVICT,
++ 0, 0, 0, NULL,
++ &dev_priv->topaz_mtx_data_mem);
++ if (ret) {
++ DRM_ERROR("TOPAZ: failed to allocate ttm buffer for "
++ "mtx data save\n");
++ return -1;
++ }
++ }
++ PSB_DEBUG_GENERAL("TOPAZ: after upload fw ....\n");
++
++ /* XXX: In power save mode, need to save the complete data memory
++ * and restore it. MTX_FWIF.c record the data size */
++ PSB_DEBUG_GENERAL("TOPAZ:in power save mode need to save memory?\n");
++
++ PSB_DEBUG_GENERAL("TOPAZ: setting up pc address\n");
++ topaz_write_core_reg(dev_priv, TOPAZ_MTX_PC, PC_START_ADDRESS);
++
++ PSB_DEBUG_GENERAL("TOPAZ: verify pc address\n");
++
++ topaz_read_core_reg(dev_priv, TOPAZ_MTX_PC, &verify_pc);
++
++ /* enable auto clock is essential for this driver */
++ TOPAZ_WRITE32(TOPAZ_CR_TOPAZ_AUTO_CLK_GATE,
++ F_ENCODE(1, TOPAZ_CR_TOPAZ_VLC_AUTO_CLK_GATE) |
++ F_ENCODE(1, TOPAZ_CR_TOPAZ_DB_AUTO_CLK_GATE));
++ MVEA_WRITE32(MVEA_CR_MVEA_AUTO_CLOCK_GATING,
++ F_ENCODE(1, MVEA_CR_MVEA_IPE_AUTO_CLK_GATE) |
++ F_ENCODE(1, MVEA_CR_MVEA_SPE_AUTO_CLK_GATE) |
++ F_ENCODE(1, MVEA_CR_MVEA_CMPRS_AUTO_CLK_GATE) |
++ F_ENCODE(1, MVEA_CR_MVEA_JMCOMP_AUTO_CLK_GATE));
++
++ PSB_DEBUG_GENERAL("TOPAZ: current pc(%08X) vs %08X\n",
++ verify_pc, PC_START_ADDRESS);
++
++ /* # turn on MTX */
++ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_INTCLEAR,
++ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX));
++
++ MTX_WRITE32(MTX_CORE_CR_MTX_ENABLE_OFFSET,
++ MTX_CORE_CR_MTX_ENABLE_MTX_ENABLE_MASK);
++
++ /* # poll on the interrupt which the firmware will generate */
++ topaz_wait_for_register(dev_priv,
++ TOPAZ_START + TOPAZ_CR_IMG_TOPAZ_INTSTAT,
++ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTS_MTX),
++ F_MASK(TOPAZ_CR_IMG_TOPAZ_INTS_MTX));
++
++ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_INTCLEAR,
++ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX));
++
++ PSB_DEBUG_GENERAL("TOPAZ: after topaz mtx setup ....\n");
++
++ /* # get ccb buffer addr -- file hostutils.c */
++ dev_priv->topaz_ccb_buffer_addr =
++ topaz_read_mtx_mem(dev_priv,
++ MTX_DATA_MEM_BASE + mem_size - 4);
++ dev_priv->topaz_ccb_ctrl_addr =
++ topaz_read_mtx_mem(dev_priv,
++ MTX_DATA_MEM_BASE + mem_size - 8);
++ dev_priv->topaz_ccb_size =
++ topaz_read_mtx_mem(dev_priv,
++ dev_priv->topaz_ccb_ctrl_addr +
++ MTX_CCBCTRL_CCBSIZE);
++
++ dev_priv->topaz_cmd_windex = 0;
++
++ PSB_DEBUG_GENERAL("TOPAZ:ccb_buffer_addr(%x),ctrl_addr(%x) size(%d)\n",
++ dev_priv->topaz_ccb_buffer_addr,
++ dev_priv->topaz_ccb_ctrl_addr,
++ dev_priv->topaz_ccb_size);
++
++ /* # write back the initial QP Value */
++ topaz_write_mtx_mem(dev_priv,
++ dev_priv->topaz_ccb_ctrl_addr + MTX_CCBCTRL_INITQP,
++ dev_priv->stored_initial_qp);
++
++ PSB_DEBUG_GENERAL("TOPAZ: write WB mem address 0x%08x\n",
++ dev_priv->topaz_wb_offset);
++ topaz_write_mtx_mem(dev_priv, MTX_DATA_MEM_BASE + mem_size - 12,
++ dev_priv->topaz_wb_offset);
++
++ /* this kick is essential for mtx.... */
++ *((uint32_t *) dev_priv->topaz_ccb_wb) = 0x01020304;
++ topaz_mtx_kick(dev_priv, 1);
++ DRM_UDELAY(1000);
++ PSB_DEBUG_GENERAL("TOPAZ: DDK expected 0x12345678 in WB memory,"
++ " and here it is 0x%08x\n",
++ *((uint32_t *) dev_priv->topaz_ccb_wb));
++
++ *((uint32_t *) dev_priv->topaz_ccb_wb) = 0x0;/* reset it to 0 */
++ PSB_DEBUG_GENERAL("TOPAZ: firmware uploaded.\n");
++
++ /* XXX: is there any need to record next cmd num??
++ * we use fence seqence number to record it
++ */
++ dev_priv->topaz_busy = 0;
++ dev_priv->topaz_cmd_seq = 0;
++
++#if !LNC_TOPAZ_NO_IRQ
++ lnc_topaz_enableirq(dev);
++#endif
++
++#if 0
++ /* test sync command */
++ {
++ uint32_t sync_cmd[3];
++ uint32_t *sync_p = (uint32_t *)dev_priv->topaz_sync_addr;
++ int count = 10000;
++
++ /* insert a SYNC command here */
++ sync_cmd[0] = MTX_CMDID_SYNC | (3 << 8) |
++ (0x5b << 16);
++ sync_cmd[1] = dev_priv->topaz_sync_offset;
++ sync_cmd[2] = 0x3c;
++
++ TOPAZ_BEGIN_CCB(dev_priv);
++ TOPAZ_OUT_CCB(dev_priv, sync_cmd[0]);
++ TOPAZ_OUT_CCB(dev_priv, sync_cmd[1]);
++ TOPAZ_OUT_CCB(dev_priv, sync_cmd[2]);
++ TOPAZ_END_CCB(dev_priv, 1);
++
++ while (count && *sync_p != 0x3c) {
++ DRM_UDELAY(1000);
++ --count;
++ }
++ if ((count == 0) && (*sync_p != 0x3c)) {
++ DRM_ERROR("TOPAZ: wait sycn timeout (0x%08x),"
++ "actual 0x%08x\n",
++ 0x3c, *sync_p);
++ }
++ PSB_DEBUG_GENERAL("TOPAZ: SYNC done, seq=0x%08x\n", *sync_p);
++ }
++#endif
++#if 0
++ topaz_mmu_flush(dev);
++
++ topaz_test_null(dev, 0xe1e1);
++ topaz_test_null(dev, 0xe2e2);
++ topaz_mmu_test(dev, 0x12345678);
++ topaz_test_null(dev, 0xe3e3);
++ topaz_mmu_test(dev, 0x8764321);
++
++ topaz_test_null(dev, 0xe4e4);
++ topaz_test_null(dev, 0xf3f3);
++#endif
++
++ return 0;
++}
++
++#if UPLOAD_FW_BY_DMA
++int topaz_upload_fw(struct drm_device *dev, enum drm_lnc_topaz_codec codec)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ const struct topaz_codec_fw *cur_codec_fw;
++ uint32_t text_size, data_size;
++ uint32_t data_location;
++ uint32_t cur_mtx_data_size;
++
++ /* # refer HLD document */
++
++ /* # MTX reset */
++ PSB_DEBUG_GENERAL("TOPAZ: mtx reset.\n");
++ MTX_WRITE32(MTX_CORE_CR_MTX_SOFT_RESET_OFFSET,
++ MTX_CORE_CR_MTX_SOFT_RESET_MTX_RESET_MASK);
++
++ DRM_UDELAY(6000);
++
++ /* # upload the firmware by DMA */
++ cur_codec_fw = &topaz_fw[codec];
++
++ PSB_DEBUG_GENERAL("Topaz:upload codec %s(%d) text sz=%d data sz=%d"
++ " data location(%d)\n", codec_to_string(codec), codec,
++ cur_codec_fw->text_size, cur_codec_fw->data_size,
++ cur_codec_fw->data_location);
++
++ /* # upload text */
++ text_size = cur_codec_fw->text_size / 4;
++
++ /* setup the MTX to start recieving data:
++ use a register for the transfer which will point to the source
++ (MTX_CR_MTX_SYSC_CDMAT) */
++ /* #.# fill the dst addr */
++ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAA, 0x80900000);
++ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAC,
++ F_ENCODE(2, MTX_BURSTSIZE) |
++ F_ENCODE(0, MTX_RNW) |
++ F_ENCODE(1, MTX_ENABLE) |
++ F_ENCODE(text_size, MTX_LENGTH));
++
++ /* #.# set DMAC access to host memory via BIF */
++ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 1);
++
++ /* #.# transfer the codec */
++ topaz_dma_transfer(dev_priv, 0, cur_codec_fw->text->offset, 0,
++ MTX_CR_MTX_SYSC_CDMAT, text_size, 0, 0);
++
++ /* #.# wait dma finish */
++ topaz_wait_for_register(dev_priv,
++ DMAC_START + IMG_SOC_DMAC_IRQ_STAT(0),
++ F_ENCODE(1, IMG_SOC_TRANSFER_FIN),
++ F_ENCODE(1, IMG_SOC_TRANSFER_FIN));
++
++ /* #.# clear interrupt */
++ DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(0), 0);
++
++ /* # return access to topaz core */
++ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 0);
++
++ /* # upload data */
++ data_size = cur_codec_fw->data_size / 4;
++ data_location = cur_codec_fw->data_location;
++
++ /* #.# fill the dst addr */
++ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAA,
++ 0x80900000 + data_location - 0x82880000);
++ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAC,
++ F_ENCODE(2, MTX_BURSTSIZE) |
++ F_ENCODE(0, MTX_RNW) |
++ F_ENCODE(1, MTX_ENABLE) |
++ F_ENCODE(data_size, MTX_LENGTH));
++
++ /* #.# set DMAC access to host memory via BIF */
++ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 1);
++
++ /* #.# transfer the codec */
++ topaz_dma_transfer(dev_priv, 0, cur_codec_fw->data->offset, 0,
++ MTX_CR_MTX_SYSC_CDMAT, data_size, 0, 0);
++
++ /* #.# wait dma finish */
++ topaz_wait_for_register(dev_priv,
++ DMAC_START + IMG_SOC_DMAC_IRQ_STAT(0),
++ F_ENCODE(1, IMG_SOC_TRANSFER_FIN),
++ F_ENCODE(1, IMG_SOC_TRANSFER_FIN));
++
++ /* #.# clear interrupt */
++ DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(0), 0);
++
++ /* # return access to topaz core */
++ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 0);
++
++ /* record this codec's mtx data size for
++ * context save & restore */
++ cur_mtx_data_size = RAM_SIZE - (data_location - 0x82880000);
++ if (dev_priv->cur_mtx_data_size != cur_mtx_data_size) {
++ dev_priv->cur_mtx_data_size = cur_mtx_data_size;
++ if (dev_priv->topaz_mtx_data_mem)
++ ttm_bo_unref(&dev_priv->topaz_mtx_data_mem);
++ dev_priv->topaz_mtx_data_mem = NULL;
++ }
++
++ return 0;
++}
++
++#else
++
++void topaz_mtx_upload_by_register(struct drm_device *dev, uint32_t mtx_mem,
++ uint32_t addr, uint32_t size,
++ struct ttm_buffer_object *buf)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ uint32_t *buf_p;
++ uint32_t debug_reg, bank_size, bank_ram_size, bank_count;
++ uint32_t cur_ram_id, ram_addr , ram_id;
++ int map_ret, lp;
++ struct ttm_bo_kmap_obj bo_kmap;
++ bool is_iomem;
++ uint32_t cur_addr;
++
++ get_mtx_control_from_dash(dev_priv);
++
++ map_ret = ttm_bo_kmap(buf, 0, buf->num_pages, &bo_kmap);
++ if (map_ret) {
++ DRM_ERROR("TOPAZ: drm_bo_kmap failed: %d\n", map_ret);
++ return;
++ }
++ buf_p = (uint32_t *) ttm_kmap_obj_virtual(&bo_kmap, &is_iomem);
++
++
++ TOPAZ_READ32(TOPAZ_CORE_CR_MTX_DEBUG_OFFSET, &debug_reg);
++ debug_reg = 0x0a0a0606;
++ bank_size = (debug_reg & 0xf0000) >> 16;
++ bank_ram_size = 1 << (bank_size + 2);
++
++ bank_count = (debug_reg & 0xf00) >> 8;
++
++ topaz_wait_for_register(dev_priv,
++ MTX_START+MTX_CORE_CR_MTX_RAM_ACCESS_STATUS_OFFSET,
++ MTX_CORE_CR_MTX_RAM_ACCESS_STATUS_MTX_MTX_MCM_STAT_MASK,
++ MTX_CORE_CR_MTX_RAM_ACCESS_STATUS_MTX_MTX_MCM_STAT_MASK);
++
++ cur_ram_id = -1;
++ cur_addr = addr;
++ for (lp = 0; lp < size / 4; ++lp) {
++ ram_id = mtx_mem + (cur_addr / bank_ram_size);
++
++ if (cur_ram_id != ram_id) {
++ ram_addr = cur_addr >> 2;
++
++ MTX_WRITE32(MTX_CORE_CR_MTX_RAM_ACCESS_CONTROL_OFFSET,
++ F_ENCODE(ram_id, MTX_MTX_MCMID) |
++ F_ENCODE(ram_addr, MTX_MTX_MCM_ADDR) |
++ F_ENCODE(1, MTX_MTX_MCMAI));
++
++ cur_ram_id = ram_id;
++ }
++ cur_addr += 4;
++
++ MTX_WRITE32(MTX_CORE_CR_MTX_RAM_ACCESS_DATA_TRANSFER_OFFSET,
++ *(buf_p + lp));
++
++ topaz_wait_for_register(dev_priv,
++ MTX_CORE_CR_MTX_RAM_ACCESS_STATUS_OFFSET + MTX_START,
++ MTX_CORE_CR_MTX_RAM_ACCESS_STATUS_MTX_MTX_MCM_STAT_MASK,
++ MTX_CORE_CR_MTX_RAM_ACCESS_STATUS_MTX_MTX_MCM_STAT_MASK);
++ }
++
++ ttm_bo_kunmap(&bo_kmap);
++
++ PSB_DEBUG_GENERAL("TOPAZ: register data upload done\n");
++ return;
++}
++
++int topaz_upload_fw(struct drm_device *dev, enum drm_lnc_topaz_codec codec)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ const struct topaz_codec_fw *cur_codec_fw;
++ uint32_t text_size, data_size;
++ uint32_t data_location;
++
++ /* # refer HLD document */
++ /* # MTX reset */
++ PSB_DEBUG_GENERAL("TOPAZ: mtx reset.\n");
++ MTX_WRITE32(MTX_CORE_CR_MTX_SOFT_RESET_OFFSET,
++ MTX_CORE_CR_MTX_SOFT_RESET_MTX_RESET_MASK);
++
++ DRM_UDELAY(6000);
++
++ /* # upload the firmware by DMA */
++ cur_codec_fw = &topaz_fw[codec];
++
++ PSB_DEBUG_GENERAL("Topaz: upload codec %s text size(%d) data size(%d)"
++ " data location(0x%08x)\n", codec_to_string(codec),
++ cur_codec_fw->text_size, cur_codec_fw->data_size,
++ cur_codec_fw->data_location);
++
++ /* # upload text */
++ text_size = cur_codec_fw->text_size;
++
++ topaz_mtx_upload_by_register(dev, LNC_MTX_CORE_CODE_MEM,
++ PC_START_ADDRESS - MTX_MEMORY_BASE,
++ text_size, cur_codec_fw->text);
++
++ /* # upload data */
++ data_size = cur_codec_fw->data_size;
++ data_location = cur_codec_fw->data_location;
++
++ topaz_mtx_upload_by_register(dev, LNC_MTX_CORE_DATA_MEM,
++ data_location - 0x82880000, data_size,
++ cur_codec_fw->data);
++
++ return 0;
++}
++
++#endif /* UPLOAD_FW_BY_DMA */
++
++void
++topaz_dma_transfer(struct drm_psb_private *dev_priv, uint32_t channel,
++ uint32_t src_phy_addr, uint32_t offset,
++ uint32_t soc_addr, uint32_t byte_num,
++ uint32_t is_increment, uint32_t is_write)
++{
++ uint32_t dmac_count;
++ uint32_t irq_stat;
++ uint32_t count;
++
++ PSB_DEBUG_GENERAL("TOPAZ: using dma to transfer firmware\n");
++ /* # check that no transfer is currently in progress and no
++ interrupts are outstanding ?? (why care interrupt) */
++ DMAC_READ32(IMG_SOC_DMAC_COUNT(channel), &dmac_count);
++ if (0 != (dmac_count & (MASK_IMG_SOC_EN | MASK_IMG_SOC_LIST_EN)))
++ DRM_ERROR("TOPAZ: there is tranfer in progress\n");
++
++ /* assert(0==(dmac_count & (MASK_IMG_SOC_EN | MASK_IMG_SOC_LIST_EN)));*/
++
++ /* no hold off period */
++ DMAC_WRITE32(IMG_SOC_DMAC_PER_HOLD(channel), 0);
++ /* clear previous interrupts */
++ DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(channel), 0);
++ /* check irq status */
++ DMAC_READ32(IMG_SOC_DMAC_IRQ_STAT(channel), &irq_stat);
++ /* assert(0 == irq_stat); */
++ if (0 != irq_stat)
++ DRM_ERROR("TOPAZ: there is hold up\n");
++
++ DMAC_WRITE32(IMG_SOC_DMAC_SETUP(channel),
++ (src_phy_addr + offset));
++ count = DMAC_VALUE_COUNT(DMAC_BSWAP_NO_SWAP, DMAC_PWIDTH_32_BIT,
++ is_write, DMAC_PWIDTH_32_BIT, byte_num);
++ /* generate an interrupt at the end of transfer */
++ count |= MASK_IMG_SOC_TRANSFER_IEN;
++ count |= F_ENCODE(is_write, IMG_SOC_DIR);
++ DMAC_WRITE32(IMG_SOC_DMAC_COUNT(channel), count);
++
++ DMAC_WRITE32(IMG_SOC_DMAC_PERIPH(channel),
++ DMAC_VALUE_PERIPH_PARAM(DMAC_ACC_DEL_0,
++ is_increment, DMAC_BURST_2));
++
++ DMAC_WRITE32(IMG_SOC_DMAC_PERIPHERAL_ADDR(channel), soc_addr);
++
++ /* Finally, rewrite the count register with
++ * the enable bit set to kick off the transfer
++ */
++ DMAC_WRITE32(IMG_SOC_DMAC_COUNT(channel), count | MASK_IMG_SOC_EN);
++
++ PSB_DEBUG_GENERAL("TOPAZ: dma transfer started.\n");
++
++ return;
++}
++
++void topaz_set_default_regs(struct drm_psb_private *dev_priv)
++{
++ int n;
++ int count = sizeof(topaz_default_regs) / (sizeof(unsigned long) * 3);
++
++ for (n = 0; n < count; n++)
++ MM_WRITE32(topaz_default_regs[n][0],
++ topaz_default_regs[n][1],
++ topaz_default_regs[n][2]);
++
++}
++
++void topaz_write_core_reg(struct drm_psb_private *dev_priv, uint32_t reg,
++ const uint32_t val)
++{
++ uint32_t tmp;
++ get_mtx_control_from_dash(dev_priv);
++
++ /* put data into MTX_RW_DATA */
++ MTX_WRITE32(MTX_CORE_CR_MTX_REGISTER_READ_WRITE_DATA_OFFSET, val);
++
++ /* request a write */
++ tmp = reg &
++ ~MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK;
++ MTX_WRITE32(MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_OFFSET, tmp);
++
++ /* wait for operation finished */
++ topaz_wait_for_register(dev_priv,
++ MTX_START +
++ MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_OFFSET,
++ MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK,
++ MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK);
++
++ release_mtx_control_from_dash(dev_priv);
++}
++
++void topaz_read_core_reg(struct drm_psb_private *dev_priv, uint32_t reg,
++ uint32_t *ret_val)
++{
++ uint32_t tmp;
++
++ get_mtx_control_from_dash(dev_priv);
++
++ /* request a write */
++ tmp = (reg &
++ ~MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK);
++ MTX_WRITE32(MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_OFFSET,
++ MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_RNW_MASK | tmp);
++
++ /* wait for operation finished */
++ topaz_wait_for_register(dev_priv,
++ MTX_START +
++ MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_OFFSET,
++ MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK,
++ MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK);
++
++ /* read */
++ MTX_READ32(MTX_CORE_CR_MTX_REGISTER_READ_WRITE_DATA_OFFSET,
++ ret_val);
++
++ release_mtx_control_from_dash(dev_priv);
++}
++
++void get_mtx_control_from_dash(struct drm_psb_private *dev_priv)
++{
++ int debug_reg_slave_val;
++
++ /* GetMTXControlFromDash */
++ TOPAZ_WRITE32(TOPAZ_CORE_CR_MTX_DEBUG_OFFSET,
++ F_ENCODE(1, TOPAZ_CR_MTX_DBG_IS_SLAVE) |
++ F_ENCODE(2, TOPAZ_CR_MTX_DBG_GPIO_OUT));
++ do {
++ TOPAZ_READ32(TOPAZ_CORE_CR_MTX_DEBUG_OFFSET,
++ &debug_reg_slave_val);
++ } while ((debug_reg_slave_val & 0x18) != 0);
++
++ /* save access control */
++ TOPAZ_READ32(MTX_CORE_CR_MTX_RAM_ACCESS_CONTROL_OFFSET,
++ &dev_priv->topaz_dash_access_ctrl);
++}
++
++void release_mtx_control_from_dash(struct drm_psb_private *dev_priv)
++{
++ /* restore access control */
++ TOPAZ_WRITE32(MTX_CORE_CR_MTX_RAM_ACCESS_CONTROL_OFFSET,
++ dev_priv->topaz_dash_access_ctrl);
++
++ /* release bus */
++ TOPAZ_WRITE32(TOPAZ_CORE_CR_MTX_DEBUG_OFFSET,
++ F_ENCODE(1, TOPAZ_CR_MTX_DBG_IS_SLAVE));
++}
++
++void topaz_mmu_hwsetup(struct drm_psb_private *dev_priv)
++{
++ uint32_t pd_addr = psb_get_default_pd_addr(dev_priv->mmu);
++
++ /* bypass all request while MMU is being configured */
++ TOPAZ_WRITE32(TOPAZ_CR_MMU_CONTROL0,
++ F_ENCODE(1, TOPAZ_CR_MMU_BYPASS));
++
++ /* set MMU hardware at the page table directory */
++ PSB_DEBUG_GENERAL("TOPAZ: write PD phyaddr=0x%08x "
++ "into MMU_DIR_LIST0/1\n", pd_addr);
++ TOPAZ_WRITE32(TOPAZ_CR_MMU_DIR_LIST_BASE(0), pd_addr);
++ TOPAZ_WRITE32(TOPAZ_CR_MMU_DIR_LIST_BASE(1), 0);
++
++ /* setup index register, all pointing to directory bank 0 */
++ TOPAZ_WRITE32(TOPAZ_CR_MMU_BANK_INDEX, 0);
++
++ /* now enable MMU access for all requestors */
++ TOPAZ_WRITE32(TOPAZ_CR_MMU_CONTROL0, 0);
++}
++
++void topaz_mmu_flushcache(struct drm_psb_private *dev_priv)
++{
++ uint32_t mmu_control;
++
++#if 0
++ PSB_DEBUG_GENERAL("XXX: Only one PTD/PTE cache"
++ " so flush using the master core\n");
++#endif
++ /* XXX: disable interrupt */
++
++ TOPAZ_READ32(TOPAZ_CR_MMU_CONTROL0, &mmu_control);
++ mmu_control |= F_ENCODE(1, TOPAZ_CR_MMU_INVALDC);
++ mmu_control |= F_ENCODE(1, TOPAZ_CR_MMU_FLUSH);
++
++#if 0
++ PSB_DEBUG_GENERAL("Set Invalid flag (this causes a flush with MMU\n"
++ "still operating afterwards even if not cleared,\n"
++ "but may want to replace with MMU_FLUSH?\n");
++#endif
++ TOPAZ_WRITE32(TOPAZ_CR_MMU_CONTROL0, mmu_control);
++
++ /* clear it */
++ mmu_control &= (~F_ENCODE(1, TOPAZ_CR_MMU_INVALDC));
++ mmu_control &= (~F_ENCODE(1, TOPAZ_CR_MMU_FLUSH));
++ TOPAZ_WRITE32(TOPAZ_CR_MMU_CONTROL0, mmu_control);
++}
++
++#if 0 /* DEBUG_FUNCTION */
++struct reg_pair {
++ uint32_t base;
++ uint32_t offset;
++};
++
++
++static int ccb_offset;
++
++static int topaz_test_null(struct drm_device *dev, uint32_t seq)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++
++ /* XXX: here we finished firmware setup....
++ * using a NULL command to verify the
++ * correctness of firmware
++ */
++ uint32_t null_cmd;
++ uint32_t cmd_seq;
++
++ null_cmd = 0 | (1 << 8) | (seq) << 16;
++ topaz_write_mtx_mem(dev_priv,
++ dev_priv->topaz_ccb_buffer_addr + ccb_offset,
++ null_cmd);
++
++ topaz_mtx_kick(dev_priv, 1);
++
++ DRM_UDELAY(1000); /* wait to finish */
++
++ cmd_seq = topaz_read_mtx_mem(dev_priv,
++ dev_priv->topaz_ccb_ctrl_addr + 4);
++
++ PSB_DEBUG_GENERAL("Topaz: Sent NULL with sequence=0x%08x,"
++ " got sequence=0x%08x (WB_seq=0x%08x,WB_roff=%d)\n",
++ seq, cmd_seq, WB_SEQ, WB_ROFF);
++
++ PSB_DEBUG_GENERAL("Topaz: after NULL test, query IRQ and clear it\n");
++
++ topaz_test_queryirq(dev);
++ topaz_test_clearirq(dev);
++
++ ccb_offset += 4;
++
++ return 0;
++}
++
++void topaz_mmu_flush(struct drm_psb_private *dev_priv)
++{
++ uint32_t val;
++
++ TOPAZ_READ32(TOPAZ_CR_MMU_CONTROL0, &val);
++ TOPAZ_WRITE32(TOPAZ_CR_MMU_CONTROL0,
++ val | F_ENCODE(1, TOPAZ_CR_MMU_INVALDC));
++ wmb();
++ TOPAZ_WRITE32(TOPAZ_CR_MMU_CONTROL0,
++ val & ~F_ENCODE(0, TOPAZ_CR_MMU_INVALDC));
++ TOPAZ_READ32(TOPAZ_CR_MMU_CONTROL0, &val);
++}
++
++/*
++ * this function will test whether the mmu is correct:
++ * it get a drm_buffer_object and use CMD_SYNC to write
++ * certain value into this buffer.
++ */
++static void topaz_mmu_test(struct drm_device *dev, uint32_t sync_value)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ uint32_t sync_cmd;
++ unsigned long real_pfn;
++ int ret;
++ uint32_t cmd_seq;
++
++ *((uint32_t *)dev_priv->topaz_sync_addr) = 0xeeeeeeee;
++
++ /* topaz_mmu_flush(dev); */
++
++ sync_cmd = MTX_CMDID_SYNC | (3 << 8) | (0xeeee) << 16;
++
++ topaz_write_mtx_mem_multiple_setup(dev_priv,
++ dev_priv->topaz_ccb_buffer_addr + ccb_offset);
++
++ topaz_write_mtx_mem_multiple(dev_priv, sync_cmd);
++ topaz_write_mtx_mem_multiple(dev_priv, dev_priv->topaz_sync_offset);
++ topaz_write_mtx_mem_multiple(dev_priv, sync_value);
++
++ topaz_mtx_kick(dev_priv, 1);
++
++ ret = psb_mmu_virtual_to_pfn(psb_mmu_get_default_pd(dev_priv->mmu),
++ dev_priv->topaz_sync_offset, &real_pfn);
++ if (ret != 0) {
++ PSB_DEBUG_GENERAL("psb_mmu_virtual_to_pfn failed,exit\n");
++ return;
++ }
++ PSB_DEBUG_GENERAL("TOPAZ: issued SYNC command, "
++ "BO offset=0x%08x (pfn=%lu), synch value=0x%08x\n",
++ dev_priv->topaz_sync_offset, real_pfn, sync_value);
++
++ /* XXX: if we can use interrupt, we can wait this command finish */
++ /* topaz_wait_for_register (dev_priv,
++ TOPAZ_START + TOPAZ_CR_IMG_TOPAZ_INTSTAT, 0xf, 0xf); */
++ DRM_UDELAY(1000);
++
++ cmd_seq = topaz_read_mtx_mem(dev_priv,
++ dev_priv->topaz_ccb_ctrl_addr + 4);
++ PSB_DEBUG_GENERAL("Topaz: cmd_seq equals 0x%x, and expected 0x%x "
++ "(WB_seq=0x%08x,WB_roff=%d),synch value is 0x%x,"
++ "expected 0x%08x\n",
++ cmd_seq, 0xeeee, WB_SEQ, WB_ROFF,
++ *((uint32_t *)dev_priv->topaz_sync_addr), sync_value);
++
++ PSB_DEBUG_GENERAL("Topaz: after MMU test, query IRQ and clear it\n");
++ topaz_test_queryirq(dev);
++ topaz_test_clearirq(dev);
++
++ ccb_offset += 3*4; /* shift 3DWs */
++}
++
++#endif
++
++int lnc_topaz_restore_mtx_state(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private;
++ uint32_t reg_val;
++ uint32_t *mtx_reg_state;
++ int i;
++
++ if (dev_priv->topaz_mtx_data_mem == NULL) {
++ DRM_ERROR("TOPAZ: try to restore context without "
++ "space allocated\n");
++ return -1;
++ }
++
++ /* turn on mtx clocks */
++ MTX_READ32(TOPAZ_CR_TOPAZ_MAN_CLK_GATE, &reg_val);
++ MTX_WRITE32(TOPAZ_CR_TOPAZ_MAN_CLK_GATE,
++ reg_val & (~MASK_TOPAZ_CR_TOPAZ_MTX_MAN_CLK_GATE));
++
++ /* reset mtx */
++ /* FIXME: should use core_write??? */
++ MTX_WRITE32(MTX_CORE_CR_MTX_SOFT_RESET_OFFSET,
++ MTX_CORE_CR_MTX_SOFT_RESET_MTX_RESET_MASK);
++ DRM_UDELAY(6000);
++
++ topaz_mmu_hwsetup(dev_priv);
++ /* upload code, restore mtx data */
++ mtx_dma_write(dev);
++
++ mtx_reg_state = dev_priv->topaz_mtx_reg_state;
++ /* restore register */
++ /* FIXME: conside to put read/write into one function */
++ /* Saves 8 Registers of D0 Bank */
++ /* DoRe0, D0Ar6, D0Ar4, D0Ar2, D0FrT, D0.5, D0.6 and D0.7 */
++ for (i = 0; i < 8; i++) {
++ topaz_write_core_reg(dev_priv, 0x1 | (i<<4),
++ *mtx_reg_state);
++ mtx_reg_state++;
++ }
++ /* Saves 8 Registers of D1 Bank */
++ /* D1Re0, D1Ar5, D1Ar3, D1Ar1, D1RtP, D1.5, D1.6 and D1.7 */
++ for (i = 0; i < 8; i++) {
++ topaz_write_core_reg(dev_priv, 0x2 | (i<<4),
++ *mtx_reg_state);
++ mtx_reg_state++;
++ }
++ /* Saves 4 Registers of A0 Bank */
++ /* A0StP, A0FrP, A0.2 and A0.3 */
++ for (i = 0; i < 4; i++) {
++ topaz_write_core_reg(dev_priv, 0x3 | (i<<4),
++ *mtx_reg_state);
++ mtx_reg_state++;
++ }
++ /* Saves 4 Registers of A1 Bank */
++ /* A1GbP, A1LbP, A1.2 and A1.3 */
++ for (i = 0; i < 4; i++) {
++ topaz_write_core_reg(dev_priv, 0x4 | (i<<4),
++ *mtx_reg_state);
++ mtx_reg_state++;
++ }
++ /* Saves PC and PCX */
++ for (i = 0; i < 2; i++) {
++ topaz_write_core_reg(dev_priv, 0x5 | (i<<4),
++ *mtx_reg_state);
++ mtx_reg_state++;
++ }
++ /* Saves 8 Control Registers */
++ /* TXSTAT, TXMASK, TXSTATI, TXMASKI, TXPOLL, TXGPIOI, TXPOLLI,
++ * TXGPIOO */
++ for (i = 0; i < 8; i++) {
++ topaz_write_core_reg(dev_priv, 0x7 | (i<<4),
++ *mtx_reg_state);
++ mtx_reg_state++;
++ }
++
++ /* turn on MTX */
++ MTX_WRITE32(MTX_CORE_CR_MTX_ENABLE_OFFSET,
++ MTX_CORE_CR_MTX_ENABLE_MTX_ENABLE_MASK);
++
++ return 0;
++}
++
++int lnc_topaz_save_mtx_state(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private;
++ uint32_t *mtx_reg_state;
++ int i;
++ struct topaz_codec_fw *cur_codec_fw;
++
++ /* FIXME: make sure the topaz_mtx_data_mem is allocated */
++ if (dev_priv->topaz_mtx_data_mem == NULL) {
++ DRM_ERROR("TOPAZ: try to save context without space "
++ "allocated\n");
++ return -1;
++ }
++
++ topaz_wait_for_register(dev_priv,
++ MTX_START + MTX_CORE_CR_MTX_TXRPT_OFFSET,
++ TXRPT_WAITONKICK_VALUE,
++ 0xffffffff);
++
++ /* stop mtx */
++ MTX_WRITE32(MTX_CORE_CR_MTX_ENABLE_OFFSET,
++ MTX_CORE_CR_MTX_ENABLE_MTX_TOFF_MASK);
++
++ mtx_reg_state = dev_priv->topaz_mtx_reg_state;
++
++ /* FIXME: conside to put read/write into one function */
++ /* Saves 8 Registers of D0 Bank */
++ /* DoRe0, D0Ar6, D0Ar4, D0Ar2, D0FrT, D0.5, D0.6 and D0.7 */
++ for (i = 0; i < 8; i++) {
++ topaz_read_core_reg(dev_priv, 0x1 | (i<<4),
++ mtx_reg_state);
++ mtx_reg_state++;
++ }
++ /* Saves 8 Registers of D1 Bank */
++ /* D1Re0, D1Ar5, D1Ar3, D1Ar1, D1RtP, D1.5, D1.6 and D1.7 */
++ for (i = 0; i < 8; i++) {
++ topaz_read_core_reg(dev_priv, 0x2 | (i<<4),
++ mtx_reg_state);
++ mtx_reg_state++;
++ }
++ /* Saves 4 Registers of A0 Bank */
++ /* A0StP, A0FrP, A0.2 and A0.3 */
++ for (i = 0; i < 4; i++) {
++ topaz_read_core_reg(dev_priv, 0x3 | (i<<4),
++ mtx_reg_state);
++ mtx_reg_state++;
++ }
++ /* Saves 4 Registers of A1 Bank */
++ /* A1GbP, A1LbP, A1.2 and A1.3 */
++ for (i = 0; i < 4; i++) {
++ topaz_read_core_reg(dev_priv, 0x4 | (i<<4),
++ mtx_reg_state);
++ mtx_reg_state++;
++ }
++ /* Saves PC and PCX */
++ for (i = 0; i < 2; i++) {
++ topaz_read_core_reg(dev_priv, 0x5 | (i<<4),
++ mtx_reg_state);
++ mtx_reg_state++;
++ }
++ /* Saves 8 Control Registers */
++ /* TXSTAT, TXMASK, TXSTATI, TXMASKI, TXPOLL, TXGPIOI, TXPOLLI,
++ * TXGPIOO */
++ for (i = 0; i < 8; i++) {
++ topaz_read_core_reg(dev_priv, 0x7 | (i<<4),
++ mtx_reg_state);
++ mtx_reg_state++;
++ }
++
++ /* save mtx data memory */
++ cur_codec_fw = &topaz_fw[dev_priv->topaz_cur_codec];
++
++ mtx_dma_read(dev, cur_codec_fw->data_location + 0x80900000 - 0x82880000,
++ dev_priv->cur_mtx_data_size);
++
++ /* turn off mtx clocks */
++ MTX_WRITE32(TOPAZ_CR_TOPAZ_MAN_CLK_GATE,
++ MASK_TOPAZ_CR_TOPAZ_MTX_MAN_CLK_GATE);
++
++ return 0;
++}
++
++void mtx_dma_read(struct drm_device *dev, uint32_t source_addr, uint32_t size)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private;
++ struct ttm_buffer_object *target;
++
++ /* setup mtx DMAC registers to do transfer */
++ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAA, source_addr);
++ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAC,
++ F_ENCODE(2, MTX_BURSTSIZE) |
++ F_ENCODE(1, MTX_RNW) |
++ F_ENCODE(1, MTX_ENABLE) |
++ F_ENCODE(size, MTX_LENGTH));
++
++ /* give the DMAC access to the host memory via BIF */
++ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 1);
++
++ target = dev_priv->topaz_mtx_data_mem;
++ /* transfert the data */
++ /* FIXME: size is meaured by bytes? */
++ topaz_dma_transfer(dev_priv, 0, target->offset, 0,
++ MTX_CR_MTX_SYSC_CDMAT,
++ size, 0, 1);
++
++ /* wait for it transfer */
++ topaz_wait_for_register(dev_priv, IMG_SOC_DMAC_IRQ_STAT(0) + DMAC_START,
++ F_ENCODE(1, IMG_SOC_TRANSFER_FIN),
++ F_ENCODE(1, IMG_SOC_TRANSFER_FIN));
++ /* clear interrupt */
++ DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(0), 0);
++ /* give access back to topaz core */
++ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 0);
++}
++
++void dmac_transfer(struct drm_device *dev, uint32_t channel, uint32_t dst_addr,
++ uint32_t soc_addr, uint32_t bytes_num,
++ int increment, int rnw)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private;
++ uint32_t count_reg;
++ uint32_t irq_state;
++
++ /* check no transfer is in progress */
++ DMAC_READ32(IMG_SOC_DMAC_COUNT(channel), &count_reg);
++ if (0 != (count_reg & (MASK_IMG_SOC_EN | MASK_IMG_SOC_LIST_EN))) {
++ DRM_ERROR("TOPAZ: there's transfer in progress when wanna "
++ "save mtx data\n");
++ /* FIXME: how to handle this error */
++ return;
++ }
++
++ /* no hold off period */
++ DMAC_WRITE32(IMG_SOC_DMAC_PER_HOLD(channel), 0);
++ /* cleare irq state */
++ DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(channel), 0);
++ DMAC_READ32(IMG_SOC_DMAC_IRQ_STAT(channel), &irq_state);
++ if (0 != irq_state) {
++ DRM_ERROR("TOPAZ: there's irq cann't clear\n");
++ return;
++ }
++
++ DMAC_WRITE32(IMG_SOC_DMAC_SETUP(channel), dst_addr);
++ count_reg = DMAC_VALUE_COUNT(DMAC_BSWAP_NO_SWAP,
++ DMAC_PWIDTH_32_BIT, rnw,
++ DMAC_PWIDTH_32_BIT, bytes_num);
++ /* generate an interrupt at end of transfer */
++ count_reg |= MASK_IMG_SOC_TRANSFER_IEN;
++ count_reg |= F_ENCODE(rnw, IMG_SOC_DIR);
++ DMAC_WRITE32(IMG_SOC_DMAC_COUNT(channel), count_reg);
++
++ DMAC_WRITE32(IMG_SOC_DMAC_PERIPH(channel),
++ DMAC_VALUE_PERIPH_PARAM(DMAC_ACC_DEL_0, increment,
++ DMAC_BURST_2));
++ DMAC_WRITE32(IMG_SOC_DMAC_PERIPHERAL_ADDR(channel), soc_addr);
++
++ /* Finally, rewrite the count register with the enable
++ * bit set to kick off the transfer */
++ DMAC_WRITE32(IMG_SOC_DMAC_COUNT(channel),
++ count_reg | MASK_IMG_SOC_EN);
++}
++
++void mtx_dma_write(struct drm_device *dev)
++{
++ struct topaz_codec_fw *cur_codec_fw;
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private;
++
++ cur_codec_fw = &topaz_fw[dev_priv->topaz_cur_codec];
++
++ /* upload code */
++ /* setup mtx DMAC registers to recieve transfer */
++ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAA, 0x80900000);
++ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAC,
++ F_ENCODE(2, MTX_BURSTSIZE) |
++ F_ENCODE(0, MTX_RNW) |
++ F_ENCODE(1, MTX_ENABLE) |
++ F_ENCODE(cur_codec_fw->text_size / 4, MTX_LENGTH));
++
++ /* give DMAC access to host memory */
++ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 1);
++
++ /* transfer code */
++ topaz_dma_transfer(dev_priv, 0, cur_codec_fw->text->offset, 0,
++ MTX_CR_MTX_SYSC_CDMAT, cur_codec_fw->text_size / 4,
++ 0, 0);
++ /* wait finished */
++ topaz_wait_for_register(dev_priv, IMG_SOC_DMAC_IRQ_STAT(0) + DMAC_START,
++ F_ENCODE(1, IMG_SOC_TRANSFER_FIN),
++ F_ENCODE(1, IMG_SOC_TRANSFER_FIN));
++ /* clear interrupt */
++ DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(0), 0);
++
++ /* setup mtx start recieving data */
++ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAA, 0x80900000 +
++ (cur_codec_fw->data_location) - 0x82880000);
++
++ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAC,
++ F_ENCODE(2, MTX_BURSTSIZE) |
++ F_ENCODE(0, MTX_RNW) |
++ F_ENCODE(1, MTX_ENABLE) |
++ F_ENCODE(dev_priv->cur_mtx_data_size, MTX_LENGTH));
++
++ /* give DMAC access to host memory */
++ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 1);
++
++ /* transfer data */
++ topaz_dma_transfer(dev_priv, 0, dev_priv->topaz_mtx_data_mem->offset,
++ 0, MTX_CR_MTX_SYSC_CDMAT,
++ dev_priv->cur_mtx_data_size,
++ 0, 0);
++ /* wait finished */
++ topaz_wait_for_register(dev_priv, IMG_SOC_DMAC_IRQ_STAT(0) + DMAC_START,
++ F_ENCODE(1, IMG_SOC_TRANSFER_FIN),
++ F_ENCODE(1, IMG_SOC_TRANSFER_FIN));
++ /* clear interrupt */
++ DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(0), 0);
++
++ /* give access back to Topaz Core */
++ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 0);
++}
++
++#if 0
++void topaz_save_default_regs(struct drm_psb_private *dev_priv, uint32_t *data)
++{
++ int n;
++ int count;
++
++ count = sizeof(topaz_default_regs) / (sizeof(unsigned long) * 3);
++ for (n = 0; n < count; n++, ++data)
++ MM_READ32(topaz_default_regs[n][0],
++ topaz_default_regs[n][1],
++ data);
++
++}
++
++void topaz_restore_default_regs(struct drm_psb_private *dev_priv,
++ uint32_t *data)
++{
++ int n;
++ int count;
++
++ count = sizeof(topaz_default_regs) / (sizeof(unsigned long) * 3);
++ for (n = 0; n < count; n++, ++data)
++ MM_WRITE32(topaz_default_regs[n][0],
++ topaz_default_regs[n][1],
++ *data);
++
++}
++#endif
+diff -uNr a/drivers/gpu/drm/psb/Makefile b/drivers/gpu/drm/psb/Makefile
+--- a/drivers/gpu/drm/psb/Makefile 1969-12-31 16:00:00.000000000 -0800
++++ b/drivers/gpu/drm/psb/Makefile 2009-04-07 13:28:38.000000000 -0700
+@@ -0,0 +1,18 @@
++#
++# Makefile for the drm device driver. This driver provides support for the
++# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
++
++ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/psb
++
++psb-y := psb_drv.o psb_mmu.o psb_sgx.o psb_irq.o psb_fence.o \
++ psb_buffer.o psb_gtt.o psb_schedule.o psb_scene.o \
++ psb_reset.o psb_xhw.o psb_msvdx.o \
++ lnc_topaz.o lnc_topazinit.o \
++ psb_msvdxinit.o psb_ttm_glue.o psb_fb.o psb_setup.o \
++ ttm/ttm_object.o ttm/ttm_lock.o ttm/ttm_fence_user.o \
++ ttm/ttm_fence.o ttm/ttm_tt.o ttm/ttm_execbuf_util.o \
++ ttm/ttm_bo.o ttm/ttm_bo_util.o ttm/ttm_placement_user.o \
++ ttm/ttm_bo_vm.o ttm/ttm_pat_compat.o ttm/ttm_memory.o
++
++obj-$(CONFIG_DRM_PSB) += psb.o
++
+diff -uNr a/drivers/gpu/drm/psb/psb_buffer.c b/drivers/gpu/drm/psb/psb_buffer.c
+--- a/drivers/gpu/drm/psb/psb_buffer.c 1969-12-31 16:00:00.000000000 -0800
++++ b/drivers/gpu/drm/psb/psb_buffer.c 2009-04-07 13:28:38.000000000 -0700
+@@ -0,0 +1,504 @@
++/**************************************************************************
++ * Copyright (c) 2007, Intel Corporation.
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
++ * develop this driver.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
++ */
++#include "ttm/ttm_placement_common.h"
++#include "ttm/ttm_execbuf_util.h"
++#include "ttm/ttm_fence_api.h"
++#include <drm/drmP.h>
++#include "psb_drv.h"
++#include "psb_schedule.h"
++
++#define DRM_MEM_TTM 26
++
++struct drm_psb_ttm_backend {
++ struct ttm_backend base;
++ struct page **pages;
++ unsigned int desired_tile_stride;
++ unsigned int hw_tile_stride;
++ int mem_type;
++ unsigned long offset;
++ unsigned long num_pages;
++};
++
++/*
++ * Poulsbo GPU virtual space looks like this
++ * (We currently use only one MMU context).
++ *
++ * gatt_start = Start of GATT aperture in bus space.
++ * stolen_end = End of GATT populated by stolen memory in bus space.
++ * gatt_end = End of GATT
++ * twod_end = MIN(gatt_start + 256_MEM, gatt_end)
++ *
++ * 0x00000000 -> 0x10000000 Temporary mapping space for tiling-
++ * and copy operations.
++ * This space is not managed and is protected by the
++ * temp_mem mutex.
++ *
++ * 0x10000000 -> 0x20000000 DRM_PSB_MEM_KERNEL For kernel buffers.
++ *
++ * 0x20000000 -> gatt_start DRM_PSB_MEM_MMU For generic MMU-only use.
++ *
++ * gatt_start -> stolen_end TTM_PL_VRAM Pre-populated GATT pages.
++ *
++ * stolen_end -> twod_end TTM_PL_TT GATT memory usable by 2D engine.
++ *
++ * twod_end -> gatt_end DRM_BO_MEM_APER GATT memory not
++ * usable by 2D engine.
++ *
++ * gatt_end -> 0xffffffff Currently unused.
++ */
++
++static int psb_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
++ struct ttm_mem_type_manager *man)
++{
++
++ struct drm_psb_private *dev_priv =
++ container_of(bdev, struct drm_psb_private, bdev);
++ struct psb_gtt *pg = dev_priv->pg;
++
++ switch (type) {
++ case TTM_PL_SYSTEM:
++ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
++ man->available_caching = TTM_PL_FLAG_CACHED |
++ TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
++ man->default_caching = TTM_PL_FLAG_CACHED;
++ break;
++ case DRM_PSB_MEM_KERNEL:
++ man->io_offset = 0x00000000;
++ man->io_size = 0x00000000;
++ man->io_addr = NULL;
++ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
++ TTM_MEMTYPE_FLAG_CMA;
++ man->gpu_offset = PSB_MEM_KERNEL_START;
++ man->available_caching = TTM_PL_FLAG_CACHED |
++ TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
++ man->default_caching = TTM_PL_FLAG_WC;
++ break;
++ case DRM_PSB_MEM_MMU:
++ man->io_offset = 0x00000000;
++ man->io_size = 0x00000000;
++ man->io_addr = NULL;
++ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
++ TTM_MEMTYPE_FLAG_CMA;
++ man->gpu_offset = PSB_MEM_MMU_START;
++ man->available_caching = TTM_PL_FLAG_CACHED |
++ TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
++ man->default_caching = TTM_PL_FLAG_WC;
++ break;
++ case DRM_PSB_MEM_PDS:
++ man->io_offset = 0x00000000;
++ man->io_size = 0x00000000;
++ man->io_addr = NULL;
++ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
++ TTM_MEMTYPE_FLAG_CMA;
++ man->gpu_offset = PSB_MEM_PDS_START;
++ man->available_caching = TTM_PL_FLAG_CACHED |
++ TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
++ man->default_caching = TTM_PL_FLAG_WC;
++ break;
++ case DRM_PSB_MEM_RASTGEOM:
++ man->io_offset = 0x00000000;
++ man->io_size = 0x00000000;
++ man->io_addr = NULL;
++ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
++ TTM_MEMTYPE_FLAG_CMA;
++ man->gpu_offset = PSB_MEM_RASTGEOM_START;
++ man->available_caching = TTM_PL_FLAG_CACHED |
++ TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
++ man->default_caching = TTM_PL_FLAG_WC;
++ break;
++ case TTM_PL_VRAM:
++ man->io_addr = NULL;
++ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
++ TTM_MEMTYPE_FLAG_FIXED |
++ TTM_MEMTYPE_FLAG_NEEDS_IOREMAP;
++#ifdef PSB_WORKING_HOST_MMU_ACCESS
++ man->io_offset = pg->gatt_start;
++ man->io_size = pg->gatt_pages << PAGE_SHIFT;
++#else
++ man->io_offset = pg->stolen_base;
++ man->io_size = pg->vram_stolen_size;
++#endif
++ man->gpu_offset = pg->gatt_start;
++ man->available_caching = TTM_PL_FLAG_UNCACHED |
++ TTM_PL_FLAG_WC;
++ man->default_caching = TTM_PL_FLAG_WC;
++ break;
++ case TTM_PL_CI:
++ man->io_addr = NULL;
++ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
++ TTM_MEMTYPE_FLAG_FIXED |
++ TTM_MEMTYPE_FLAG_NEEDS_IOREMAP;
++ man->io_offset = dev_priv->ci_region_start;
++ man->io_size = pg->ci_stolen_size;
++ man->gpu_offset = pg->gatt_start - pg->ci_stolen_size;
++ man->available_caching = TTM_PL_FLAG_UNCACHED;
++ man->default_caching = TTM_PL_FLAG_UNCACHED;
++ break;
++ case TTM_PL_TT: /* Mappable GATT memory */
++ man->io_offset = pg->gatt_start;
++ man->io_size = pg->gatt_pages << PAGE_SHIFT;
++ man->io_addr = NULL;
++#ifdef PSB_WORKING_HOST_MMU_ACCESS
++ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
++ TTM_MEMTYPE_FLAG_NEEDS_IOREMAP;
++#else
++ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
++ TTM_MEMTYPE_FLAG_CMA;
++#endif
++ man->available_caching = TTM_PL_FLAG_CACHED |
++ TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
++ man->default_caching = TTM_PL_FLAG_WC;
++ man->gpu_offset = pg->gatt_start;
++ break;
++ case DRM_PSB_MEM_APER: /*MMU memory. Mappable. Not usable for 2D. */
++ man->io_offset = pg->gatt_start;
++ man->io_size = pg->gatt_pages << PAGE_SHIFT;
++ man->io_addr = NULL;
++#ifdef PSB_WORKING_HOST_MMU_ACCESS
++ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
++ TTM_MEMTYPE_FLAG_NEEDS_IOREMAP;
++#else
++ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
++ TTM_MEMTYPE_FLAG_CMA;
++#endif
++ man->gpu_offset = pg->gatt_start;
++ man->available_caching = TTM_PL_FLAG_CACHED |
++ TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
++ man->default_caching = TTM_PL_FLAG_WC;
++ break;
++ default:
++ DRM_ERROR("Unsupported memory type %u\n", (unsigned) type);
++ return -EINVAL;
++ }
++ return 0;
++}
++
++static uint32_t psb_evict_mask(struct ttm_buffer_object *bo)
++{
++ uint32_t cur_placement = bo->mem.flags & ~TTM_PL_MASK_MEM;
++
++
++ switch (bo->mem.mem_type) {
++ case TTM_PL_VRAM:
++ if (bo->mem.proposed_flags & TTM_PL_FLAG_TT)
++ return cur_placement | TTM_PL_FLAG_TT;
++ else
++ return cur_placement | TTM_PL_FLAG_SYSTEM;
++ default:
++ return cur_placement | TTM_PL_FLAG_SYSTEM;
++ }
++}
++
++static int psb_invalidate_caches(struct ttm_bo_device *bdev,
++ uint32_t placement)
++{
++ return 0;
++}
++
++static int psb_move_blit(struct ttm_buffer_object *bo,
++ bool evict, bool no_wait,
++ struct ttm_mem_reg *new_mem)
++{
++ struct drm_psb_private *dev_priv =
++ container_of(bo->bdev, struct drm_psb_private, bdev);
++ struct drm_device *dev = dev_priv->dev;
++ struct ttm_mem_reg *old_mem = &bo->mem;
++ struct ttm_fence_object *fence;
++ int dir = 0;
++ int ret;
++
++ if ((old_mem->mem_type == new_mem->mem_type) &&
++ (new_mem->mm_node->start <
++ old_mem->mm_node->start + old_mem->mm_node->size)) {
++ dir = 1;
++ }
++
++ psb_emit_2d_copy_blit(dev,
++ old_mem->mm_node->start << PAGE_SHIFT,
++ new_mem->mm_node->start << PAGE_SHIFT,
++ new_mem->num_pages, dir);
++
++ ret = ttm_fence_object_create(&dev_priv->fdev, 0,
++ _PSB_FENCE_TYPE_EXE,
++ TTM_FENCE_FLAG_EMIT,
++ &fence);
++ if (unlikely(ret != 0)) {
++ psb_idle_2d(dev);
++ if (fence)
++ ttm_fence_object_unref(&fence);
++ }
++
++ ret = ttm_bo_move_accel_cleanup(bo, (void *) fence,
++ (void *) (unsigned long)
++ _PSB_FENCE_TYPE_EXE,
++ evict, no_wait, new_mem);
++ if (fence)
++ ttm_fence_object_unref(&fence);
++ return ret;
++}
++
++/*
++ * Flip destination ttm into GATT,
++ * then blit and subsequently move out again.
++ */
++
++static int psb_move_flip(struct ttm_buffer_object *bo,
++ bool evict, bool interruptible, bool no_wait,
++ struct ttm_mem_reg *new_mem)
++{
++ struct ttm_bo_device *bdev = bo->bdev;
++ struct ttm_mem_reg tmp_mem;
++ int ret;
++
++ tmp_mem = *new_mem;
++ tmp_mem.mm_node = NULL;
++ tmp_mem.proposed_flags = TTM_PL_FLAG_TT;
++
++ ret = ttm_bo_mem_space(bo, &tmp_mem, interruptible, no_wait);
++ if (ret)
++ return ret;
++ ret = ttm_tt_bind(bo->ttm, &tmp_mem);
++ if (ret)
++ goto out_cleanup;
++ ret = psb_move_blit(bo, true, no_wait, &tmp_mem);
++ if (ret)
++ goto out_cleanup;
++
++ ret = ttm_bo_move_ttm(bo, evict, no_wait, new_mem);
++out_cleanup:
++ if (tmp_mem.mm_node) {
++ spin_lock(&bdev->lru_lock);
++ drm_mm_put_block(tmp_mem.mm_node);
++ tmp_mem.mm_node = NULL;
++ spin_unlock(&bdev->lru_lock);
++ }
++ return ret;
++}
++
++static int psb_move(struct ttm_buffer_object *bo,
++ bool evict, bool interruptible,
++ bool no_wait, struct ttm_mem_reg *new_mem)
++{
++ struct ttm_mem_reg *old_mem = &bo->mem;
++
++ if (old_mem->mem_type == TTM_PL_SYSTEM) {
++ return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
++ } else if (new_mem->mem_type == TTM_PL_SYSTEM) {
++ int ret = psb_move_flip(bo, evict, interruptible,
++ no_wait, new_mem);
++ if (unlikely(ret != 0)) {
++ if (ret == -ERESTART)
++ return ret;
++ else
++ return ttm_bo_move_memcpy(bo, evict, no_wait,
++ new_mem);
++ }
++ } else {
++ if (psb_move_blit(bo, evict, no_wait, new_mem))
++ return ttm_bo_move_memcpy(bo, evict, no_wait,
++ new_mem);
++ }
++ return 0;
++}
++
++static int drm_psb_tbe_populate(struct ttm_backend *backend,
++ unsigned long num_pages,
++ struct page **pages,
++ struct page *dummy_read_page)
++{
++ struct drm_psb_ttm_backend *psb_be =
++ container_of(backend, struct drm_psb_ttm_backend, base);
++
++ psb_be->pages = pages;
++ return 0;
++}
++
++static int drm_psb_tbe_unbind(struct ttm_backend *backend)
++{
++ struct ttm_bo_device *bdev = backend->bdev;
++ struct drm_psb_private *dev_priv =
++ container_of(bdev, struct drm_psb_private, bdev);
++ struct drm_psb_ttm_backend *psb_be =
++ container_of(backend, struct drm_psb_ttm_backend, base);
++ struct psb_mmu_pd *pd = psb_mmu_get_default_pd(dev_priv->mmu);
++ struct ttm_mem_type_manager *man = &bdev->man[psb_be->mem_type];
++
++ PSB_DEBUG_RENDER("MMU unbind.\n");
++
++ if (psb_be->mem_type == TTM_PL_TT) {
++ uint32_t gatt_p_offset =
++ (psb_be->offset - man->gpu_offset) >> PAGE_SHIFT;
++
++ (void) psb_gtt_remove_pages(dev_priv->pg, gatt_p_offset,
++ psb_be->num_pages,
++ psb_be->desired_tile_stride,
++ psb_be->hw_tile_stride);
++ }
++
++ psb_mmu_remove_pages(pd, psb_be->offset,
++ psb_be->num_pages,
++ psb_be->desired_tile_stride,
++ psb_be->hw_tile_stride);
++
++ return 0;
++}
++
++static int drm_psb_tbe_bind(struct ttm_backend *backend,
++ struct ttm_mem_reg *bo_mem)
++{
++ struct ttm_bo_device *bdev = backend->bdev;
++ struct drm_psb_private *dev_priv =
++ container_of(bdev, struct drm_psb_private, bdev);
++ struct drm_psb_ttm_backend *psb_be =
++ container_of(backend, struct drm_psb_ttm_backend, base);
++ struct psb_mmu_pd *pd = psb_mmu_get_default_pd(dev_priv->mmu);
++ struct ttm_mem_type_manager *man = &bdev->man[bo_mem->mem_type];
++ int type;
++ int ret = 0;
++
++ psb_be->mem_type = bo_mem->mem_type;
++ psb_be->num_pages = bo_mem->num_pages;
++ psb_be->desired_tile_stride = 0;
++ psb_be->hw_tile_stride = 0;
++ psb_be->offset = (bo_mem->mm_node->start << PAGE_SHIFT) +
++ man->gpu_offset;
++
++ type =
++ (bo_mem->
++ flags & TTM_PL_FLAG_CACHED) ? PSB_MMU_CACHED_MEMORY : 0;
++
++ PSB_DEBUG_RENDER("MMU bind.\n");
++ if (psb_be->mem_type == TTM_PL_TT) {
++ uint32_t gatt_p_offset =
++ (psb_be->offset - man->gpu_offset) >> PAGE_SHIFT;
++
++ ret = psb_gtt_insert_pages(dev_priv->pg, psb_be->pages,
++ gatt_p_offset,
++ psb_be->num_pages,
++ psb_be->desired_tile_stride,
++ psb_be->hw_tile_stride, type);
++ }
++
++ ret = psb_mmu_insert_pages(pd, psb_be->pages,
++ psb_be->offset, psb_be->num_pages,
++ psb_be->desired_tile_stride,
++ psb_be->hw_tile_stride, type);
++ if (ret)
++ goto out_err;
++
++ return 0;
++out_err:
++ drm_psb_tbe_unbind(backend);
++ return ret;
++
++}
++
++static void drm_psb_tbe_clear(struct ttm_backend *backend)
++{
++ struct drm_psb_ttm_backend *psb_be =
++ container_of(backend, struct drm_psb_ttm_backend, base);
++
++ psb_be->pages = NULL;
++ return;
++}
++
++static void drm_psb_tbe_destroy(struct ttm_backend *backend)
++{
++ struct drm_psb_ttm_backend *psb_be =
++ container_of(backend, struct drm_psb_ttm_backend, base);
++
++ if (backend)
++ drm_free(psb_be, sizeof(*psb_be), DRM_MEM_TTM);
++}
++
++static struct ttm_backend_func psb_ttm_backend = {
++ .populate = drm_psb_tbe_populate,
++ .clear = drm_psb_tbe_clear,
++ .bind = drm_psb_tbe_bind,
++ .unbind = drm_psb_tbe_unbind,
++ .destroy = drm_psb_tbe_destroy,
++};
++
++static struct ttm_backend *drm_psb_tbe_init(struct ttm_bo_device *bdev)
++{
++ struct drm_psb_ttm_backend *psb_be;
++
++ psb_be = drm_calloc(1, sizeof(*psb_be), DRM_MEM_TTM);
++ if (!psb_be)
++ return NULL;
++ psb_be->pages = NULL;
++ psb_be->base.func = &psb_ttm_backend;
++ psb_be->base.bdev = bdev;
++ return &psb_be->base;
++}
++
++/*
++ * Use this memory type priority if no eviction is needed.
++ */
++static uint32_t psb_mem_prios[] = {
++ TTM_PL_CI,
++ TTM_PL_VRAM,
++ TTM_PL_TT,
++ DRM_PSB_MEM_KERNEL,
++ DRM_PSB_MEM_MMU,
++ DRM_PSB_MEM_RASTGEOM,
++ DRM_PSB_MEM_PDS,
++ DRM_PSB_MEM_APER,
++ TTM_PL_SYSTEM
++};
++
++/*
++ * Use this memory type priority if need to evict.
++ */
++static uint32_t psb_busy_prios[] = {
++ TTM_PL_TT,
++ TTM_PL_VRAM,
++ TTM_PL_CI,
++ DRM_PSB_MEM_KERNEL,
++ DRM_PSB_MEM_MMU,
++ DRM_PSB_MEM_RASTGEOM,
++ DRM_PSB_MEM_PDS,
++ DRM_PSB_MEM_APER,
++ TTM_PL_SYSTEM
++};
++
++
++struct ttm_bo_driver psb_ttm_bo_driver = {
++ .mem_type_prio = psb_mem_prios,
++ .mem_busy_prio = psb_busy_prios,
++ .num_mem_type_prio = ARRAY_SIZE(psb_mem_prios),
++ .num_mem_busy_prio = ARRAY_SIZE(psb_busy_prios),
++ .create_ttm_backend_entry = &drm_psb_tbe_init,
++ .invalidate_caches = &psb_invalidate_caches,
++ .init_mem_type = &psb_init_mem_type,
++ .evict_flags = &psb_evict_mask,
++ .move = &psb_move,
++ .verify_access = &psb_verify_access,
++ .sync_obj_signaled = &ttm_fence_sync_obj_signaled,
++ .sync_obj_wait = &ttm_fence_sync_obj_wait,
++ .sync_obj_flush = &ttm_fence_sync_obj_flush,
++ .sync_obj_unref = &ttm_fence_sync_obj_unref,
++ .sync_obj_ref = &ttm_fence_sync_obj_ref
++};
+diff -uNr a/drivers/gpu/drm/psb/psb_drm.h b/drivers/gpu/drm/psb/psb_drm.h
+--- a/drivers/gpu/drm/psb/psb_drm.h 1969-12-31 16:00:00.000000000 -0800
++++ b/drivers/gpu/drm/psb/psb_drm.h 2009-04-07 13:28:38.000000000 -0700
+@@ -0,0 +1,444 @@
++/**************************************************************************
++ * Copyright (c) 2007, Intel Corporation.
++ * All Rights Reserved.
++ * Copyright (c) 2008, Tungsten Graphics Inc. Cedar Park, TX., USA.
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
++ * develop this driver.
++ *
++ **************************************************************************/
++/*
++ */
++
++#ifndef _PSB_DRM_H_
++#define _PSB_DRM_H_
++
++#if defined(__linux__) && !defined(__KERNEL__)
++#include<stdint.h>
++#endif
++
++#include "ttm/ttm_fence_user.h"
++#include "ttm/ttm_placement_user.h"
++
++#define DRM_PSB_SAREA_MAJOR 0
++#define DRM_PSB_SAREA_MINOR 2
++#define PSB_FIXED_SHIFT 16
++
++#define DRM_PSB_FIRST_TA_USE_REG 3
++#define DRM_PSB_NUM_TA_USE_REG 6
++#define DRM_PSB_FIRST_RASTER_USE_REG 8
++#define DRM_PSB_NUM_RASTER_USE_REG 7
++
++/*
++ * Public memory types.
++ */
++
++#define DRM_PSB_MEM_MMU TTM_PL_PRIV1
++#define DRM_PSB_FLAG_MEM_MMU TTM_PL_FLAG_PRIV1
++#define DRM_PSB_MEM_PDS TTM_PL_PRIV2
++#define DRM_PSB_FLAG_MEM_PDS TTM_PL_FLAG_PRIV2
++#define DRM_PSB_MEM_APER TTM_PL_PRIV3
++#define DRM_PSB_FLAG_MEM_APER TTM_PL_FLAG_PRIV3
++#define DRM_PSB_MEM_RASTGEOM TTM_PL_PRIV4
++#define DRM_PSB_FLAG_MEM_RASTGEOM TTM_PL_FLAG_PRIV4
++#define PSB_MEM_RASTGEOM_START 0x30000000
++
++typedef int32_t psb_fixed;
++typedef uint32_t psb_ufixed;
++
++static inline int32_t psb_int_to_fixed(int a)
++{
++ return a * (1 << PSB_FIXED_SHIFT);
++}
++
++static inline uint32_t psb_unsigned_to_ufixed(unsigned int a)
++{
++ return a << PSB_FIXED_SHIFT;
++}
++
++/*Status of the command sent to the gfx device.*/
++typedef enum {
++ DRM_CMD_SUCCESS,
++ DRM_CMD_FAILED,
++ DRM_CMD_HANG
++} drm_cmd_status_t;
++
++struct drm_psb_scanout {
++ uint32_t buffer_id; /* DRM buffer object ID */
++ uint32_t rotation; /* Rotation as in RR_rotation definitions */
++ uint32_t stride; /* Buffer stride in bytes */
++ uint32_t depth; /* Buffer depth in bits (NOT) bpp */
++ uint32_t width; /* Buffer width in pixels */
++ uint32_t height; /* Buffer height in lines */
++ int32_t transform[3][3]; /* Buffer composite transform */
++ /* (scaling, rot, reflect) */
++};
++
++#define DRM_PSB_SAREA_OWNERS 16
++#define DRM_PSB_SAREA_OWNER_2D 0
++#define DRM_PSB_SAREA_OWNER_3D 1
++
++#define DRM_PSB_SAREA_SCANOUTS 3
++
++struct drm_psb_sarea {
++ /* Track changes of this data structure */
++
++ uint32_t major;
++ uint32_t minor;
++
++ /* Last context to touch part of hw */
++ uint32_t ctx_owners[DRM_PSB_SAREA_OWNERS];
++
++ /* Definition of front- and rotated buffers */
++ uint32_t num_scanouts;
++ struct drm_psb_scanout scanouts[DRM_PSB_SAREA_SCANOUTS];
++
++ int planeA_x;
++ int planeA_y;
++ int planeA_w;
++ int planeA_h;
++ int planeB_x;
++ int planeB_y;
++ int planeB_w;
++ int planeB_h;
++ /* Number of active scanouts */
++ uint32_t num_active_scanouts;
++};
++
++#define PSB_RELOC_MAGIC 0x67676767
++#define PSB_RELOC_SHIFT_MASK 0x0000FFFF
++#define PSB_RELOC_SHIFT_SHIFT 0
++#define PSB_RELOC_ALSHIFT_MASK 0xFFFF0000
++#define PSB_RELOC_ALSHIFT_SHIFT 16
++
++#define PSB_RELOC_OP_OFFSET 0 /* Offset of the indicated
++ * buffer
++ */
++#define PSB_RELOC_OP_2D_OFFSET 1 /* Offset of the indicated
++ * buffer, relative to 2D
++ * base address
++ */
++#define PSB_RELOC_OP_PDS_OFFSET 2 /* Offset of the indicated buffer,
++ * relative to PDS base address
++ */
++#define PSB_RELOC_OP_STRIDE 3 /* Stride of the indicated
++ * buffer (for tiling)
++ */
++#define PSB_RELOC_OP_USE_OFFSET 4 /* Offset of USE buffer
++ * relative to base reg
++ */
++#define PSB_RELOC_OP_USE_REG 5 /* Base reg of USE buffer */
++
++struct drm_psb_reloc {
++ uint32_t reloc_op;
++ uint32_t where; /* offset in destination buffer */
++ uint32_t buffer; /* Buffer reloc applies to */
++ uint32_t mask; /* Destination format: */
++ uint32_t shift; /* Destination format: */
++ uint32_t pre_add; /* Destination format: */
++ uint32_t background; /* Destination add */
++ uint32_t dst_buffer; /* Destination buffer. Index into buffer_list */
++ uint32_t arg0; /* Reloc-op dependant */
++ uint32_t arg1;
++};
++
++
++#define PSB_GPU_ACCESS_READ (1ULL << 32)
++#define PSB_GPU_ACCESS_WRITE (1ULL << 33)
++#define PSB_GPU_ACCESS_MASK (PSB_GPU_ACCESS_READ | PSB_GPU_ACCESS_WRITE)
++
++#define PSB_BO_FLAG_TA (1ULL << 48)
++#define PSB_BO_FLAG_SCENE (1ULL << 49)
++#define PSB_BO_FLAG_FEEDBACK (1ULL << 50)
++#define PSB_BO_FLAG_USSE (1ULL << 51)
++#define PSB_BO_FLAG_COMMAND (1ULL << 52)
++
++#define PSB_ENGINE_2D 0
++#define PSB_ENGINE_VIDEO 1
++#define PSB_ENGINE_RASTERIZER 2
++#define PSB_ENGINE_TA 3
++#define PSB_ENGINE_HPRAST 4
++#define LNC_ENGINE_ENCODE 5
++
++#define PSB_DEVICE_SGX 0x1
++#define PSB_DEVICE_DISLAY 0x2
++#define PSB_DEVICE_MSVDX 0x4
++#define PSB_DEVICE_TOPAZ 0x8
++
++/*
++ * For this fence class we have a couple of
++ * fence types.
++ */
++
++#define _PSB_FENCE_EXE_SHIFT 0
++#define _PSB_FENCE_TA_DONE_SHIFT 1
++#define _PSB_FENCE_RASTER_DONE_SHIFT 2
++#define _PSB_FENCE_SCENE_DONE_SHIFT 3
++#define _PSB_FENCE_FEEDBACK_SHIFT 4
++
++#define _PSB_ENGINE_TA_FENCE_TYPES 5
++#define _PSB_FENCE_TYPE_EXE (1 << _PSB_FENCE_EXE_SHIFT)
++#define _PSB_FENCE_TYPE_TA_DONE (1 << _PSB_FENCE_TA_DONE_SHIFT)
++#define _PSB_FENCE_TYPE_RASTER_DONE (1 << _PSB_FENCE_RASTER_DONE_SHIFT)
++#define _PSB_FENCE_TYPE_SCENE_DONE (1 << _PSB_FENCE_SCENE_DONE_SHIFT)
++#define _PSB_FENCE_TYPE_FEEDBACK (1 << _PSB_FENCE_FEEDBACK_SHIFT)
++
++#define PSB_ENGINE_HPRAST 4
++#define PSB_NUM_ENGINES 6
++
++#define PSB_TA_FLAG_FIRSTPASS (1 << 0)
++#define PSB_TA_FLAG_LASTPASS (1 << 1)
++
++#define PSB_FEEDBACK_OP_VISTEST (1 << 0)
++
++struct drm_psb_extension_rep {
++ int32_t exists;
++ uint32_t driver_ioctl_offset;
++ uint32_t sarea_offset;
++ uint32_t major;
++ uint32_t minor;
++ uint32_t pl;
++};
++
++#define DRM_PSB_EXT_NAME_LEN 128
++
++union drm_psb_extension_arg {
++ char extension[DRM_PSB_EXT_NAME_LEN];
++ struct drm_psb_extension_rep rep;
++};
++
++struct psb_validate_req {
++ uint64_t set_flags;
++ uint64_t clear_flags;
++ uint64_t next;
++ uint64_t presumed_gpu_offset;
++ uint32_t buffer_handle;
++ uint32_t presumed_flags;
++ uint32_t group;
++ uint32_t pad64;
++};
++
++struct psb_validate_rep {
++ uint64_t gpu_offset;
++ uint32_t placement;
++ uint32_t fence_type_mask;
++};
++
++#define PSB_USE_PRESUMED (1 << 0)
++
++struct psb_validate_arg {
++ int handled;
++ int ret;
++ union {
++ struct psb_validate_req req;
++ struct psb_validate_rep rep;
++ } d;
++};
++
++struct drm_psb_scene {
++ int handle_valid;
++ uint32_t handle;
++ uint32_t w; /* also contains msaa info */
++ uint32_t h;
++ uint32_t num_buffers;
++};
++
++#define DRM_PSB_FENCE_NO_USER (1 << 0)
++
++struct psb_ttm_fence_rep {
++ uint32_t handle;
++ uint32_t fence_class;
++ uint32_t fence_type;
++ uint32_t signaled_types;
++ uint32_t error;
++};
++
++typedef struct drm_psb_cmdbuf_arg {
++ uint64_t buffer_list; /* List of buffers to validate */
++ uint64_t clip_rects; /* See i915 counterpart */
++ uint64_t scene_arg;
++ uint64_t fence_arg;
++
++ uint32_t ta_flags;
++
++ uint32_t ta_handle; /* TA reg-value pairs */
++ uint32_t ta_offset;
++ uint32_t ta_size;
++
++ uint32_t oom_handle;
++ uint32_t oom_offset;
++ uint32_t oom_size;
++
++ uint32_t cmdbuf_handle; /* 2D Command buffer object or, */
++ uint32_t cmdbuf_offset; /* rasterizer reg-value pairs */
++ uint32_t cmdbuf_size;
++
++ uint32_t reloc_handle; /* Reloc buffer object */
++ uint32_t reloc_offset;
++ uint32_t num_relocs;
++
++ int32_t damage; /* Damage front buffer with cliprects */
++ /* Not implemented yet */
++ uint32_t fence_flags;
++ uint32_t engine;
++
++ /*
++ * Feedback;
++ */
++
++ uint32_t feedback_ops;
++ uint32_t feedback_handle;
++ uint32_t feedback_offset;
++ uint32_t feedback_breakpoints;
++ uint32_t feedback_size;
++}drm_psb_cmdbuf_arg_t;
++
++struct drm_psb_xhw_init_arg {
++ uint32_t operation;
++ uint32_t buffer_handle;
++};
++
++/*
++ * Feedback components:
++ */
++
++/*
++ * Vistest component. The number of these in the feedback buffer
++ * equals the number of vistest breakpoints + 1.
++ * This is currently the only feedback component.
++ */
++
++struct drm_psb_vistest {
++ uint32_t vt[8];
++};
++
++#define PSB_HW_COOKIE_SIZE 16
++#define PSB_HW_FEEDBACK_SIZE 8
++#define PSB_HW_OOM_CMD_SIZE (6 + DRM_PSB_NUM_RASTER_USE_REG * 2)
++
++struct drm_psb_xhw_arg {
++ uint32_t op;
++ int ret;
++ uint32_t irq_op;
++ uint32_t issue_irq;
++ uint32_t cookie[PSB_HW_COOKIE_SIZE];
++ union {
++ struct {
++ uint32_t w; /* also contains msaa info */
++ uint32_t h;
++ uint32_t size;
++ uint32_t clear_p_start;
++ uint32_t clear_num_pages;
++ } si;
++ struct {
++ uint32_t fire_flags;
++ uint32_t hw_context;
++ uint32_t offset;
++ uint32_t engine;
++ uint32_t flags;
++ uint32_t rca;
++ uint32_t num_oom_cmds;
++ uint32_t oom_cmds[PSB_HW_OOM_CMD_SIZE];
++ } sb;
++ struct {
++ uint32_t pages;
++ uint32_t size;
++ uint32_t ta_min_size;
++ } bi;
++ struct {
++ uint32_t bca;
++ uint32_t rca;
++ uint32_t flags;
++ } oom;
++ struct {
++ uint32_t pt_offset;
++ uint32_t param_offset;
++ uint32_t flags;
++ } bl;
++ struct {
++ uint32_t value;
++ } cl;
++ uint32_t feedback[PSB_HW_FEEDBACK_SIZE];
++ } arg;
++};
++
++/* Controlling the kernel modesetting buffers */
++
++#define DRM_PSB_KMS_OFF 0x00
++#define DRM_PSB_KMS_ON 0x01
++#define DRM_PSB_VT_LEAVE 0x02
++#define DRM_PSB_VT_ENTER 0x03
++#define DRM_PSB_XHW_INIT 0x04
++#define DRM_PSB_XHW 0x05
++#define DRM_PSB_EXTENSION 0x06
++
++/*
++ * Xhw commands.
++ */
++
++#define PSB_XHW_INIT 0x00
++#define PSB_XHW_TAKEDOWN 0x01
++
++#define PSB_XHW_FIRE_RASTER 0x00
++#define PSB_XHW_SCENE_INFO 0x01
++#define PSB_XHW_SCENE_BIND_FIRE 0x02
++#define PSB_XHW_TA_MEM_INFO 0x03
++#define PSB_XHW_RESET_DPM 0x04
++#define PSB_XHW_OOM 0x05
++#define PSB_XHW_TERMINATE 0x06
++#define PSB_XHW_VISTEST 0x07
++#define PSB_XHW_RESUME 0x08
++#define PSB_XHW_TA_MEM_LOAD 0x09
++#define PSB_XHW_CHECK_LOCKUP 0x0a
++
++#define PSB_SCENE_FLAG_DIRTY (1 << 0)
++#define PSB_SCENE_FLAG_COMPLETE (1 << 1)
++#define PSB_SCENE_FLAG_SETUP (1 << 2)
++#define PSB_SCENE_FLAG_SETUP_ONLY (1 << 3)
++#define PSB_SCENE_FLAG_CLEARED (1 << 4)
++
++#define PSB_TA_MEM_FLAG_TA (1 << 0)
++#define PSB_TA_MEM_FLAG_RASTER (1 << 1)
++#define PSB_TA_MEM_FLAG_HOSTA (1 << 2)
++#define PSB_TA_MEM_FLAG_HOSTD (1 << 3)
++#define PSB_TA_MEM_FLAG_INIT (1 << 4)
++#define PSB_TA_MEM_FLAG_NEW_PT_OFFSET (1 << 5)
++
++/*Raster fire will deallocate memory */
++#define PSB_FIRE_FLAG_RASTER_DEALLOC (1 << 0)
++/*Isp reset needed due to change in ZLS format */
++#define PSB_FIRE_FLAG_NEEDS_ISP_RESET (1 << 1)
++/*These are set by Xpsb. */
++#define PSB_FIRE_FLAG_XHW_MASK 0xff000000
++/*The task has had at least one OOM and Xpsb will
++ send back messages on each fire. */
++#define PSB_FIRE_FLAG_XHW_OOM (1 << 24)
++
++#define PSB_SCENE_ENGINE_TA 0
++#define PSB_SCENE_ENGINE_RASTER 1
++#define PSB_SCENE_NUM_ENGINES 2
++
++#define PSB_LOCKUP_RASTER (1 << 0)
++#define PSB_LOCKUP_TA (1 << 1)
++
++struct drm_psb_dev_info_arg {
++ uint32_t num_use_attribute_registers;
++};
++#define DRM_PSB_DEVINFO 0x01
++
++
++#endif
+diff -uNr a/drivers/gpu/drm/psb/psb_drv.c b/drivers/gpu/drm/psb/psb_drv.c
+--- a/drivers/gpu/drm/psb/psb_drv.c 1969-12-31 16:00:00.000000000 -0800
++++ b/drivers/gpu/drm/psb/psb_drv.c 2009-04-07 13:31:58.000000000 -0700
+@@ -0,0 +1,1465 @@
++/**************************************************************************
++ * Copyright (c) 2007, Intel Corporation.
++ * All Rights Reserved.
++ * Copyright (c) 2008, Tungsten Graphics, Inc. Cedar Park, TX., USA.
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
++ * develop this driver.
++ *
++ **************************************************************************/
++/*
++ */
++
++#include <drm/drmP.h>
++#include <drm/drm.h>
++#include "psb_drm.h"
++#include "psb_drv.h"
++#include "psb_reg.h"
++#include "psb_intel_reg.h"
++#include "psb_msvdx.h"
++#include "lnc_topaz.h"
++#include <drm/drm_pciids.h>
++#include "psb_scene.h"
++
++#include <linux/cpu.h>
++#include <linux/notifier.h>
++#include <linux/spinlock.h>
++
++int drm_psb_debug;
++EXPORT_SYMBOL(drm_psb_debug);
++static int drm_psb_trap_pagefaults;
++static int drm_psb_clock_gating;
++static int drm_psb_ta_mem_size = 32 * 1024;
++
++int drm_psb_disable_vsync;
++int drm_psb_no_fb;
++int drm_psb_force_pipeb;
++int drm_idle_check_interval = 5;
++int drm_psb_ospm;
++
++MODULE_PARM_DESC(debug, "Enable debug output");
++MODULE_PARM_DESC(clock_gating, "clock gating");
++MODULE_PARM_DESC(no_fb, "Disable FBdev");
++MODULE_PARM_DESC(trap_pagefaults, "Error and reset on MMU pagefaults");
++MODULE_PARM_DESC(disable_vsync, "Disable vsync interrupts");
++MODULE_PARM_DESC(force_pipeb, "Forces PIPEB to become primary fb");
++MODULE_PARM_DESC(ta_mem_size, "TA memory size in kiB");
++MODULE_PARM_DESC(ospm, "switch for ospm support");
++module_param_named(debug, drm_psb_debug, int, 0600);
++module_param_named(clock_gating, drm_psb_clock_gating, int, 0600);
++module_param_named(no_fb, drm_psb_no_fb, int, 0600);
++module_param_named(trap_pagefaults, drm_psb_trap_pagefaults, int, 0600);
++module_param_named(disable_vsync, drm_psb_disable_vsync, int, 0600);
++module_param_named(force_pipeb, drm_psb_force_pipeb, int, 0600);
++module_param_named(ta_mem_size, drm_psb_ta_mem_size, int, 0600);
++module_param_named(ospm, drm_psb_ospm, int, 0600);
++
++#ifndef CONFIG_X86_PAT
++#warning "Don't build this driver without PAT support!!!"
++#endif
++
++#define psb_PCI_IDS \
++ {0x8086, 0x8108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PSB_8108}, \
++ {0x8086, 0x8109, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PSB_8109}, \
++ {0x8086, 0x4100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
++ {0x8086, 0x4101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
++ {0x8086, 0x4102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
++ {0x8086, 0x4103, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
++ {0x8086, 0x4104, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
++ {0x8086, 0x4105, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
++ {0x8086, 0x4106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
++ {0x8086, 0x4107, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
++ {0, 0, 0}
++
++static struct pci_device_id pciidlist[] = {
++ psb_PCI_IDS
++};
++
++/*
++ * Standard IOCTLs.
++ */
++
++#define DRM_IOCTL_PSB_KMS_OFF DRM_IO(DRM_PSB_KMS_OFF + DRM_COMMAND_BASE)
++#define DRM_IOCTL_PSB_KMS_ON DRM_IO(DRM_PSB_KMS_ON + DRM_COMMAND_BASE)
++#define DRM_IOCTL_PSB_VT_LEAVE DRM_IO(DRM_PSB_VT_LEAVE + DRM_COMMAND_BASE)
++#define DRM_IOCTL_PSB_VT_ENTER DRM_IO(DRM_PSB_VT_ENTER + DRM_COMMAND_BASE)
++#define DRM_IOCTL_PSB_XHW_INIT DRM_IOW(DRM_PSB_XHW_INIT + DRM_COMMAND_BASE, \
++ struct drm_psb_xhw_init_arg)
++#define DRM_IOCTL_PSB_XHW DRM_IO(DRM_PSB_XHW + DRM_COMMAND_BASE)
++#define DRM_IOCTL_PSB_EXTENSION DRM_IOWR(DRM_PSB_EXTENSION + DRM_COMMAND_BASE, \
++ union drm_psb_extension_arg)
++/*
++ * TTM execbuf extension.
++ */
++
++#define DRM_PSB_CMDBUF (DRM_PSB_EXTENSION + 1)
++#define DRM_PSB_SCENE_UNREF (DRM_PSB_CMDBUF + 1)
++#define DRM_IOCTL_PSB_CMDBUF DRM_IOW(DRM_PSB_CMDBUF + DRM_COMMAND_BASE, \
++ struct drm_psb_cmdbuf_arg)
++#define DRM_IOCTL_PSB_SCENE_UNREF DRM_IOW(DRM_PSB_SCENE_UNREF + DRM_COMMAND_BASE, \
++ struct drm_psb_scene)
++#define DRM_IOCTL_PSB_KMS_OFF DRM_IO(DRM_PSB_KMS_OFF + DRM_COMMAND_BASE)
++#define DRM_IOCTL_PSB_KMS_ON DRM_IO(DRM_PSB_KMS_ON + DRM_COMMAND_BASE)
++#define DRM_IOCTL_PSB_EXTENSION DRM_IOWR(DRM_PSB_EXTENSION + DRM_COMMAND_BASE, \
++ union drm_psb_extension_arg)
++/*
++ * TTM placement user extension.
++ */
++
++#define DRM_PSB_PLACEMENT_OFFSET (DRM_PSB_SCENE_UNREF + 1)
++
++#define DRM_PSB_TTM_PL_CREATE (TTM_PL_CREATE + DRM_PSB_PLACEMENT_OFFSET)
++#define DRM_PSB_TTM_PL_REFERENCE (TTM_PL_REFERENCE + DRM_PSB_PLACEMENT_OFFSET)
++#define DRM_PSB_TTM_PL_UNREF (TTM_PL_UNREF + DRM_PSB_PLACEMENT_OFFSET)
++#define DRM_PSB_TTM_PL_SYNCCPU (TTM_PL_SYNCCPU + DRM_PSB_PLACEMENT_OFFSET)
++#define DRM_PSB_TTM_PL_WAITIDLE (TTM_PL_WAITIDLE + DRM_PSB_PLACEMENT_OFFSET)
++#define DRM_PSB_TTM_PL_SETSTATUS (TTM_PL_SETSTATUS + DRM_PSB_PLACEMENT_OFFSET)
++
++/*
++ * TTM fence extension.
++ */
++
++#define DRM_PSB_FENCE_OFFSET (DRM_PSB_TTM_PL_SETSTATUS + 1)
++#define DRM_PSB_TTM_FENCE_SIGNALED (TTM_FENCE_SIGNALED + DRM_PSB_FENCE_OFFSET)
++#define DRM_PSB_TTM_FENCE_FINISH (TTM_FENCE_FINISH + DRM_PSB_FENCE_OFFSET)
++#define DRM_PSB_TTM_FENCE_UNREF (TTM_FENCE_UNREF + DRM_PSB_FENCE_OFFSET)
++
++#define DRM_IOCTL_PSB_TTM_PL_CREATE \
++ DRM_IOWR(DRM_COMMAND_BASE + DRM_PSB_TTM_PL_CREATE,\
++ union ttm_pl_create_arg)
++#define DRM_IOCTL_PSB_TTM_PL_REFERENCE \
++ DRM_IOWR(DRM_COMMAND_BASE + DRM_PSB_TTM_PL_REFERENCE,\
++ union ttm_pl_reference_arg)
++#define DRM_IOCTL_PSB_TTM_PL_UNREF \
++ DRM_IOW(DRM_COMMAND_BASE + DRM_PSB_TTM_PL_UNREF,\
++ struct ttm_pl_reference_req)
++#define DRM_IOCTL_PSB_TTM_PL_SYNCCPU \
++ DRM_IOW(DRM_COMMAND_BASE + DRM_PSB_TTM_PL_SYNCCPU,\
++ struct ttm_pl_synccpu_arg)
++#define DRM_IOCTL_PSB_TTM_PL_WAITIDLE \
++ DRM_IOW(DRM_COMMAND_BASE + DRM_PSB_TTM_PL_WAITIDLE,\
++ struct ttm_pl_waitidle_arg)
++#define DRM_IOCTL_PSB_TTM_PL_SETSTATUS \
++ DRM_IOWR(DRM_COMMAND_BASE + DRM_PSB_TTM_PL_SETSTATUS,\
++ union ttm_pl_setstatus_arg)
++#define DRM_IOCTL_PSB_TTM_FENCE_SIGNALED \
++ DRM_IOWR(DRM_COMMAND_BASE + DRM_PSB_TTM_FENCE_SIGNALED, \
++ union ttm_fence_signaled_arg)
++#define DRM_IOCTL_PSB_TTM_FENCE_FINISH \
++ DRM_IOWR(DRM_COMMAND_BASE + DRM_PSB_TTM_FENCE_FINISH, \
++ union ttm_fence_finish_arg)
++#define DRM_IOCTL_PSB_TTM_FENCE_UNREF \
++ DRM_IOW(DRM_COMMAND_BASE + DRM_PSB_TTM_FENCE_UNREF, \
++ struct ttm_fence_unref_arg)
++
++static int psb_vt_leave_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++static int psb_vt_enter_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++
++#define PSB_IOCTL_DEF(ioctl, func, flags) \
++ [DRM_IOCTL_NR(ioctl) - DRM_COMMAND_BASE] = {ioctl, func, flags}
++
++static struct drm_ioctl_desc psb_ioctls[] = {
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_KMS_OFF, psbfb_kms_off_ioctl,
++ DRM_ROOT_ONLY),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_KMS_ON, psbfb_kms_on_ioctl, DRM_ROOT_ONLY),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_VT_LEAVE, psb_vt_leave_ioctl,
++ DRM_ROOT_ONLY),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_VT_ENTER, psb_vt_enter_ioctl, DRM_ROOT_ONLY),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_XHW_INIT, psb_xhw_init_ioctl,
++ DRM_ROOT_ONLY),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_XHW, psb_xhw_ioctl, DRM_ROOT_ONLY),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_EXTENSION, psb_extension_ioctl, DRM_AUTH),
++
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_CMDBUF, psb_cmdbuf_ioctl, DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_SCENE_UNREF, drm_psb_scene_unref_ioctl,
++ DRM_AUTH),
++
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_PL_CREATE, psb_pl_create_ioctl,
++ DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_PL_REFERENCE, psb_pl_reference_ioctl,
++ DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_PL_UNREF, psb_pl_unref_ioctl,
++ DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_PL_SYNCCPU, psb_pl_synccpu_ioctl,
++ DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_PL_WAITIDLE, psb_pl_waitidle_ioctl,
++ DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_PL_SETSTATUS, psb_pl_setstatus_ioctl,
++ DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_FENCE_SIGNALED,
++ psb_fence_signaled_ioctl, DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_FENCE_FINISH, psb_fence_finish_ioctl,
++ DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_FENCE_UNREF, psb_fence_unref_ioctl,
++ DRM_AUTH)
++};
++
++static int psb_max_ioctl = DRM_ARRAY_SIZE(psb_ioctls);
++
++static void get_ci_info(struct drm_psb_private *dev_priv)
++{
++ struct pci_dev *pdev;
++
++ pdev = pci_get_subsys(0x8086, 0x080b, 0, 0, NULL);
++ if (pdev == NULL) {
++ /* IF no pci_device we set size & addr to 0, no ci
++ * share buffer can be created */
++ dev_priv->ci_region_start = 0;
++ dev_priv->ci_region_size = 0;
++ printk(KERN_ERR "can't find CI device, no ci share buffer\n");
++ return;
++ }
++
++ dev_priv->ci_region_start = pci_resource_start(pdev, 1);
++ dev_priv->ci_region_size = pci_resource_len(pdev, 1);
++
++ printk(KERN_INFO "ci_region_start %x ci_region_size %d\n",
++ dev_priv->ci_region_start, dev_priv->ci_region_size);
++
++ pci_dev_put(pdev);
++
++ return;
++}
++
++static int dri_library_name(struct drm_device *dev, char *buf)
++{
++ return snprintf(buf, PAGE_SIZE, "psb\n");
++}
++
++static void psb_set_uopt(struct drm_psb_uopt *uopt)
++{
++ uopt->clock_gating = drm_psb_clock_gating;
++}
++
++static void psb_lastclose(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++
++ if (!dev->dev_private)
++ return;
++
++ if (dev_priv->ta_mem)
++ psb_ta_mem_unref(&dev_priv->ta_mem);
++ mutex_lock(&dev_priv->cmdbuf_mutex);
++ if (dev_priv->context.buffers) {
++ vfree(dev_priv->context.buffers);
++ dev_priv->context.buffers = NULL;
++ }
++ mutex_unlock(&dev_priv->cmdbuf_mutex);
++}
++
++static void psb_do_takedown(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++ struct ttm_bo_device *bdev = &dev_priv->bdev;
++
++
++ if (dev_priv->have_mem_rastgeom) {
++ ttm_bo_clean_mm(bdev, DRM_PSB_MEM_RASTGEOM);
++ dev_priv->have_mem_rastgeom = 0;
++ }
++ if (dev_priv->have_mem_mmu) {
++ ttm_bo_clean_mm(bdev, DRM_PSB_MEM_MMU);
++ dev_priv->have_mem_mmu = 0;
++ }
++ if (dev_priv->have_mem_aper) {
++ ttm_bo_clean_mm(bdev, DRM_PSB_MEM_APER);
++ dev_priv->have_mem_aper = 0;
++ }
++ if (dev_priv->have_tt) {
++ ttm_bo_clean_mm(bdev, TTM_PL_TT);
++ dev_priv->have_tt = 0;
++ }
++ if (dev_priv->have_vram) {
++ ttm_bo_clean_mm(bdev, TTM_PL_VRAM);
++ dev_priv->have_vram = 0;
++ }
++ if (dev_priv->have_camera) {
++ ttm_bo_clean_mm(bdev, TTM_PL_CI);
++ dev_priv->have_camera = 0;
++ }
++
++ if (dev_priv->has_msvdx)
++ psb_msvdx_uninit(dev);
++
++ if (IS_MRST(dev)) {
++ if (dev_priv->has_topaz)
++ lnc_topaz_uninit(dev);
++ }
++
++ if (dev_priv->comm) {
++ kunmap(dev_priv->comm_page);
++ dev_priv->comm = NULL;
++ }
++ if (dev_priv->comm_page) {
++ __free_page(dev_priv->comm_page);
++ dev_priv->comm_page = NULL;
++ }
++}
++
++void psb_clockgating(struct drm_psb_private *dev_priv)
++{
++ uint32_t clock_gating;
++
++ if (dev_priv->uopt.clock_gating == 1) {
++ PSB_DEBUG_INIT("Disabling clock gating.\n");
++
++ clock_gating = (_PSB_C_CLKGATECTL_CLKG_DISABLED <<
++ _PSB_C_CLKGATECTL_2D_CLKG_SHIFT) |
++ (_PSB_C_CLKGATECTL_CLKG_DISABLED <<
++ _PSB_C_CLKGATECTL_ISP_CLKG_SHIFT) |
++ (_PSB_C_CLKGATECTL_CLKG_DISABLED <<
++ _PSB_C_CLKGATECTL_TSP_CLKG_SHIFT) |
++ (_PSB_C_CLKGATECTL_CLKG_DISABLED <<
++ _PSB_C_CLKGATECTL_TA_CLKG_SHIFT) |
++ (_PSB_C_CLKGATECTL_CLKG_DISABLED <<
++ _PSB_C_CLKGATECTL_DPM_CLKG_SHIFT) |
++ (_PSB_C_CLKGATECTL_CLKG_DISABLED <<
++ _PSB_C_CLKGATECTL_USE_CLKG_SHIFT);
++
++ } else if (dev_priv->uopt.clock_gating == 2) {
++ PSB_DEBUG_INIT("Enabling clock gating.\n");
++
++ clock_gating = (_PSB_C_CLKGATECTL_CLKG_AUTO <<
++ _PSB_C_CLKGATECTL_2D_CLKG_SHIFT) |
++ (_PSB_C_CLKGATECTL_CLKG_AUTO <<
++ _PSB_C_CLKGATECTL_ISP_CLKG_SHIFT) |
++ (_PSB_C_CLKGATECTL_CLKG_AUTO <<
++ _PSB_C_CLKGATECTL_TSP_CLKG_SHIFT) |
++ (_PSB_C_CLKGATECTL_CLKG_AUTO <<
++ _PSB_C_CLKGATECTL_TA_CLKG_SHIFT) |
++ (_PSB_C_CLKGATECTL_CLKG_AUTO <<
++ _PSB_C_CLKGATECTL_DPM_CLKG_SHIFT) |
++ (_PSB_C_CLKGATECTL_CLKG_AUTO <<
++ _PSB_C_CLKGATECTL_USE_CLKG_SHIFT);
++ } else
++ clock_gating = PSB_RSGX32(PSB_CR_CLKGATECTL);
++
++#ifdef FIX_TG_2D_CLOCKGATE
++ clock_gating &= ~_PSB_C_CLKGATECTL_2D_CLKG_MASK;
++ clock_gating |= (_PSB_C_CLKGATECTL_CLKG_DISABLED <<
++ _PSB_C_CLKGATECTL_2D_CLKG_SHIFT);
++#endif
++ PSB_WSGX32(clock_gating, PSB_CR_CLKGATECTL);
++ (void) PSB_RSGX32(PSB_CR_CLKGATECTL);
++}
++
++#define FB_REG06 0xD0810600
++#define FB_MIPI_DISABLE BIT11
++#define FB_REG09 0xD0810900
++#define FB_SKU_MASK (BIT12|BIT13|BIT14)
++#define FB_SKU_SHIFT 12
++#define FB_SKU_100 0
++#define FB_SKU_100L 1
++#define FB_SKU_83 2
++#if 1 /* FIXME remove it after PO */
++#define FB_GFX_CLK_DIVIDE_MASK (BIT20|BIT21|BIT22)
++#define FB_GFX_CLK_DIVIDE_SHIFT 20
++#define FB_VED_CLK_DIVIDE_MASK (BIT23|BIT24)
++#define FB_VED_CLK_DIVIDE_SHIFT 23
++#define FB_VEC_CLK_DIVIDE_MASK (BIT25|BIT26)
++#define FB_VEC_CLK_DIVIDE_SHIFT 25
++#endif /* FIXME remove it after PO */
++
++
++void mrst_get_fuse_settings(struct drm_psb_private *dev_priv)
++{
++ struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
++ uint32_t fuse_value = 0;
++ uint32_t fuse_value_tmp = 0;
++
++ pci_write_config_dword(pci_root, 0xD0, FB_REG06);
++ pci_read_config_dword(pci_root, 0xD4, &fuse_value);
++
++ dev_priv->iLVDS_enable = fuse_value & FB_MIPI_DISABLE;
++
++ DRM_INFO("internal display is %s\n",
++ dev_priv->iLVDS_enable ? "LVDS display" : "MIPI display");
++
++ pci_write_config_dword(pci_root, 0xD0, FB_REG09);
++ pci_read_config_dword(pci_root, 0xD4, &fuse_value);
++
++ DRM_INFO("SKU values is 0x%x. \n", fuse_value);
++ fuse_value_tmp = (fuse_value & FB_SKU_MASK) >> FB_SKU_SHIFT;
++
++ switch (fuse_value_tmp) {
++ case FB_SKU_100:
++ DRM_INFO("SKU values is SKU_100. LNC core clock is 200MHz. \n");
++ dev_priv->sku_100 = true;
++ break;
++ case FB_SKU_100L:
++ DRM_INFO("SKU values is SKU_100L. LNC core clock is 100MHz. \n");
++ dev_priv->sku_100L = true;
++ break;
++ case FB_SKU_83:
++ DRM_INFO("SKU values is SKU_83. LNC core clock is 166MHz. \n");
++ dev_priv->sku_83 = true;
++ break;
++ default:
++ DRM_ERROR("Invalid SKU values, SKU value = 0x%08x\n",
++ fuse_value_tmp);
++ }
++
++#if 1 /* FIXME remove it after PO */
++ fuse_value_tmp = (fuse_value & FB_GFX_CLK_DIVIDE_MASK) >> FB_GFX_CLK_DIVIDE_SHIFT;
++
++ switch (fuse_value_tmp) {
++ case 0:
++ DRM_INFO("Gfx clk : core clk = 1:1. \n");
++ break;
++ case 1:
++ DRM_INFO("Gfx clk : core clk = 4:3. \n");
++ break;
++ case 2:
++ DRM_INFO("Gfx clk : core clk = 8:5. \n");
++ break;
++ case 3:
++ DRM_INFO("Gfx clk : core clk = 2:1. \n");
++ break;
++ case 5:
++ DRM_INFO("Gfx clk : core clk = 8:3. \n");
++ break;
++ case 6:
++ DRM_INFO("Gfx clk : core clk = 16:5. \n");
++ break;
++ default:
++ DRM_ERROR("Invalid GFX CLK DIVIDE values, value = 0x%08x\n",
++ fuse_value_tmp);
++ }
++
++ fuse_value_tmp = (fuse_value & FB_VED_CLK_DIVIDE_MASK) >> FB_VED_CLK_DIVIDE_SHIFT;
++
++ switch (fuse_value_tmp) {
++ case 0:
++ DRM_INFO("Ved clk : core clk = 1:1. \n");
++ break;
++ case 1:
++ DRM_INFO("Ved clk : core clk = 4:3. \n");
++ break;
++ case 2:
++ DRM_INFO("Ved clk : core clk = 8:5. \n");
++ break;
++ case 3:
++ DRM_INFO("Ved clk : core clk = 2:1. \n");
++ break;
++ default:
++ DRM_ERROR("Invalid VED CLK DIVIDE values, value = 0x%08x\n",
++ fuse_value_tmp);
++ }
++
++ fuse_value_tmp = (fuse_value & FB_VEC_CLK_DIVIDE_MASK) >> FB_VEC_CLK_DIVIDE_SHIFT;
++
++ switch (fuse_value_tmp) {
++ case 0:
++ DRM_INFO("Vec clk : core clk = 1:1. \n");
++ break;
++ case 1:
++ DRM_INFO("Vec clk : core clk = 4:3. \n");
++ break;
++ case 2:
++ DRM_INFO("Vec clk : core clk = 8:5. \n");
++ break;
++ case 3:
++ DRM_INFO("Vec clk : core clk = 2:1. \n");
++ break;
++ default:
++ DRM_ERROR("Invalid VEC CLK DIVIDE values, value = 0x%08x\n",
++ fuse_value_tmp);
++ }
++#endif /* FIXME remove it after PO */
++
++ return;
++}
++
++static int psb_do_init(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++ struct ttm_bo_device *bdev = &dev_priv->bdev;
++ struct psb_gtt *pg = dev_priv->pg;
++
++ uint32_t stolen_gtt;
++ uint32_t tt_start;
++ uint32_t tt_pages;
++
++ int ret = -ENOMEM;
++
++ dev_priv->ta_mem_pages =
++ PSB_ALIGN_TO(drm_psb_ta_mem_size * 1024,
++ PAGE_SIZE) >> PAGE_SHIFT;
++ dev_priv->comm_page = alloc_page(GFP_KERNEL);
++ if (!dev_priv->comm_page)
++ goto out_err;
++
++ dev_priv->comm = kmap(dev_priv->comm_page);
++ memset((void *) dev_priv->comm, 0, PAGE_SIZE);
++
++ set_pages_uc(dev_priv->comm_page, 1);
++
++ /*
++ * Initialize sequence numbers for the different command
++ * submission mechanisms.
++ */
++
++ dev_priv->sequence[PSB_ENGINE_2D] = 0;
++ dev_priv->sequence[PSB_ENGINE_RASTERIZER] = 0;
++ dev_priv->sequence[PSB_ENGINE_TA] = 0;
++ dev_priv->sequence[PSB_ENGINE_HPRAST] = 0;
++
++ if (pg->gatt_start & 0x0FFFFFFF) {
++ DRM_ERROR("Gatt must be 256M aligned. This is a bug.\n");
++ ret = -EINVAL;
++ goto out_err;
++ }
++
++ stolen_gtt = (pg->stolen_size >> PAGE_SHIFT) * 4;
++ stolen_gtt = (stolen_gtt + PAGE_SIZE - 1) >> PAGE_SHIFT;
++ stolen_gtt =
++ (stolen_gtt < pg->gtt_pages) ? stolen_gtt : pg->gtt_pages;
++
++ dev_priv->gatt_free_offset = pg->gatt_start +
++ (stolen_gtt << PAGE_SHIFT) * 1024;
++
++ /*
++ * Insert a cache-coherent communications page in mmu space
++ * just after the stolen area. Will be used for fencing etc.
++ */
++
++ dev_priv->comm_mmu_offset = dev_priv->gatt_free_offset;
++ dev_priv->gatt_free_offset += PAGE_SIZE;
++
++ ret = psb_mmu_insert_pages(psb_mmu_get_default_pd(dev_priv->mmu),
++ &dev_priv->comm_page,
++ dev_priv->comm_mmu_offset, 1, 0, 0, 0);
++
++ if (ret)
++ goto out_err;
++
++ if (1 || drm_debug) {
++ uint32_t core_id = PSB_RSGX32(PSB_CR_CORE_ID);
++ uint32_t core_rev = PSB_RSGX32(PSB_CR_CORE_REVISION);
++ DRM_INFO("SGX core id = 0x%08x\n", core_id);
++ DRM_INFO("SGX core rev major = 0x%02x, minor = 0x%02x\n",
++ (core_rev & _PSB_CC_REVISION_MAJOR_MASK) >>
++ _PSB_CC_REVISION_MAJOR_SHIFT,
++ (core_rev & _PSB_CC_REVISION_MINOR_MASK) >>
++ _PSB_CC_REVISION_MINOR_SHIFT);
++ DRM_INFO
++ ("SGX core rev maintenance = 0x%02x, designer = 0x%02x\n",
++ (core_rev & _PSB_CC_REVISION_MAINTENANCE_MASK) >>
++ _PSB_CC_REVISION_MAINTENANCE_SHIFT,
++ (core_rev & _PSB_CC_REVISION_DESIGNER_MASK) >>
++ _PSB_CC_REVISION_DESIGNER_SHIFT);
++ }
++
++ spin_lock_init(&dev_priv->irqmask_lock);
++ dev_priv->fence0_irq_on = 0;
++
++ tt_pages = (pg->gatt_pages < PSB_TT_PRIV0_PLIMIT) ?
++ pg->gatt_pages : PSB_TT_PRIV0_PLIMIT;
++ tt_start = dev_priv->gatt_free_offset - pg->gatt_start;
++ tt_pages -= tt_start >> PAGE_SHIFT;
++
++ if (!ttm_bo_init_mm(bdev, TTM_PL_VRAM, 0,
++ pg->vram_stolen_size >> PAGE_SHIFT)) {
++ dev_priv->have_vram = 1;
++ }
++
++ if (!ttm_bo_init_mm(bdev, TTM_PL_CI, 0,
++ dev_priv->ci_region_size >> PAGE_SHIFT)) {
++ dev_priv->have_camera = 1;
++ }
++
++ if (!ttm_bo_init_mm(bdev, TTM_PL_TT, tt_start >> PAGE_SHIFT,
++ tt_pages)) {
++ dev_priv->have_tt = 1;
++ }
++
++ if (!ttm_bo_init_mm(bdev, DRM_PSB_MEM_MMU, 0x00000000,
++ (pg->gatt_start - PSB_MEM_MMU_START -
++ pg->ci_stolen_size) >> PAGE_SHIFT)) {
++ dev_priv->have_mem_mmu = 1;
++ }
++
++ if (!ttm_bo_init_mm(bdev, DRM_PSB_MEM_RASTGEOM, 0x00000000,
++ (PSB_MEM_MMU_START -
++ PSB_MEM_RASTGEOM_START) >> PAGE_SHIFT)) {
++ dev_priv->have_mem_rastgeom = 1;
++ }
++#if 0
++ if (pg->gatt_pages > PSB_TT_PRIV0_PLIMIT) {
++ if (!ttm_bo_init_mm
++ (bdev, DRM_PSB_MEM_APER, PSB_TT_PRIV0_PLIMIT,
++ pg->gatt_pages - PSB_TT_PRIV0_PLIMIT, 1)) {
++ dev_priv->have_mem_aper = 1;
++ }
++ }
++#endif
++
++ PSB_DEBUG_INIT("Init MSVDX\n");
++ dev_priv->has_msvdx = 1;
++ if (psb_msvdx_init(dev))
++ dev_priv->has_msvdx = 0;
++
++ if (IS_MRST(dev)) {
++ PSB_DEBUG_INIT("Init Topaz\n");
++ dev_priv->has_topaz = 1;
++ if (lnc_topaz_init(dev))
++ dev_priv->has_topaz = 0;
++ }
++ return 0;
++out_err:
++ psb_do_takedown(dev);
++ return ret;
++}
++
++static int psb_driver_unload(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++
++ if (drm_psb_no_fb == 0)
++ psb_modeset_cleanup(dev);
++
++ if (dev_priv) {
++ struct ttm_bo_device *bdev = &dev_priv->bdev;
++
++ psb_watchdog_takedown(dev_priv);
++ psb_do_takedown(dev);
++ psb_xhw_takedown(dev_priv);
++ psb_scheduler_takedown(&dev_priv->scheduler);
++
++ if (dev_priv->have_mem_pds) {
++ ttm_bo_clean_mm(bdev, DRM_PSB_MEM_PDS);
++ dev_priv->have_mem_pds = 0;
++ }
++ if (dev_priv->have_mem_kernel) {
++ ttm_bo_clean_mm(bdev, DRM_PSB_MEM_KERNEL);
++ dev_priv->have_mem_kernel = 0;
++ }
++
++ if (dev_priv->pf_pd) {
++ psb_mmu_free_pagedir(dev_priv->pf_pd);
++ dev_priv->pf_pd = NULL;
++ }
++ if (dev_priv->mmu) {
++ struct psb_gtt *pg = dev_priv->pg;
++
++ down_read(&pg->sem);
++ psb_mmu_remove_pfn_sequence(psb_mmu_get_default_pd
++ (dev_priv->mmu),
++ pg->gatt_start,
++ pg->vram_stolen_size >>
++ PAGE_SHIFT);
++ psb_mmu_remove_pfn_sequence(psb_mmu_get_default_pd
++ (dev_priv->mmu),
++ pg->gatt_start - pg->ci_stolen_size,
++ pg->ci_stolen_size >>
++ PAGE_SHIFT);
++ up_read(&pg->sem);
++ psb_mmu_driver_takedown(dev_priv->mmu);
++ dev_priv->mmu = NULL;
++ }
++ psb_gtt_takedown(dev_priv->pg, 1);
++ if (dev_priv->scratch_page) {
++ __free_page(dev_priv->scratch_page);
++ dev_priv->scratch_page = NULL;
++ }
++ if (dev_priv->has_bo_device) {
++ ttm_bo_device_release(&dev_priv->bdev);
++ dev_priv->has_bo_device = 0;
++ }
++ if (dev_priv->has_fence_device) {
++ ttm_fence_device_release(&dev_priv->fdev);
++ dev_priv->has_fence_device = 0;
++ }
++ if (dev_priv->vdc_reg) {
++ iounmap(dev_priv->vdc_reg);
++ dev_priv->vdc_reg = NULL;
++ }
++ if (dev_priv->sgx_reg) {
++ iounmap(dev_priv->sgx_reg);
++ dev_priv->sgx_reg = NULL;
++ }
++ if (dev_priv->msvdx_reg) {
++ iounmap(dev_priv->msvdx_reg);
++ dev_priv->msvdx_reg = NULL;
++ }
++
++ if (IS_MRST(dev)) {
++ if (dev_priv->topaz_reg) {
++ iounmap(dev_priv->topaz_reg);
++ dev_priv->topaz_reg = NULL;
++ }
++ }
++
++ if (dev_priv->tdev)
++ ttm_object_device_release(&dev_priv->tdev);
++
++ if (dev_priv->has_global)
++ psb_ttm_global_release(dev_priv);
++
++ drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER);
++ dev->dev_private = NULL;
++ }
++ return 0;
++}
++
++
++static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
++{
++ struct drm_psb_private *dev_priv;
++ struct ttm_bo_device *bdev;
++ unsigned long resource_start;
++ struct psb_gtt *pg;
++ int ret = -ENOMEM;
++
++ if (IS_MRST(dev))
++ DRM_INFO("Run drivers on Moorestown platform!\n");
++ else
++ DRM_INFO("Run drivers on Poulsbo platform!\n");
++
++ dev_priv = drm_calloc(1, sizeof(*dev_priv), DRM_MEM_DRIVER);
++ if (dev_priv == NULL)
++ return -ENOMEM;
++
++ dev_priv->dev = dev;
++ bdev = &dev_priv->bdev;
++
++ ret = psb_ttm_global_init(dev_priv);
++ if (unlikely(ret != 0))
++ goto out_err;
++ dev_priv->has_global = 1;
++
++ dev_priv->tdev = ttm_object_device_init
++ (dev_priv->mem_global_ref.object, PSB_OBJECT_HASH_ORDER);
++ if (unlikely(dev_priv->tdev == NULL))
++ goto out_err;
++
++ mutex_init(&dev_priv->temp_mem);
++ mutex_init(&dev_priv->cmdbuf_mutex);
++ mutex_init(&dev_priv->reset_mutex);
++ INIT_LIST_HEAD(&dev_priv->context.validate_list);
++ INIT_LIST_HEAD(&dev_priv->context.kern_validate_list);
++ psb_init_disallowed();
++
++#ifdef FIX_TG_16
++ atomic_set(&dev_priv->lock_2d, 0);
++ atomic_set(&dev_priv->ta_wait_2d, 0);
++ atomic_set(&dev_priv->ta_wait_2d_irq, 0);
++ atomic_set(&dev_priv->waiters_2d, 0);;
++ DRM_INIT_WAITQUEUE(&dev_priv->queue_2d);
++#else
++ mutex_init(&dev_priv->mutex_2d);
++#endif
++
++ spin_lock_init(&dev_priv->reloc_lock);
++
++ DRM_INIT_WAITQUEUE(&dev_priv->rel_mapped_queue);
++ DRM_INIT_WAITQUEUE(&dev_priv->event_2d_queue);
++
++ dev->dev_private = (void *) dev_priv;
++ dev_priv->chipset = chipset;
++ psb_set_uopt(&dev_priv->uopt);
++
++ PSB_DEBUG_GENERAL("Init watchdog and scheduler\n");
++ psb_watchdog_init(dev_priv);
++ psb_scheduler_init(dev, &dev_priv->scheduler);
++
++
++ PSB_DEBUG_INIT("Mapping MMIO\n");
++ resource_start = pci_resource_start(dev->pdev, PSB_MMIO_RESOURCE);
++
++ if (IS_MRST(dev))
++ dev_priv->msvdx_reg =
++ ioremap(resource_start + MRST_MSVDX_OFFSET,
++ PSB_MSVDX_SIZE);
++ else
++ dev_priv->msvdx_reg =
++ ioremap(resource_start + PSB_MSVDX_OFFSET,
++ PSB_MSVDX_SIZE);
++
++ if (!dev_priv->msvdx_reg)
++ goto out_err;
++
++ if (IS_MRST(dev)) {
++ dev_priv->topaz_reg =
++ ioremap(resource_start + LNC_TOPAZ_OFFSET,
++ LNC_TOPAZ_SIZE);
++ if (!dev_priv->topaz_reg)
++ goto out_err;
++ }
++
++ dev_priv->vdc_reg =
++ ioremap(resource_start + PSB_VDC_OFFSET, PSB_VDC_SIZE);
++ if (!dev_priv->vdc_reg)
++ goto out_err;
++
++ if (IS_MRST(dev))
++ dev_priv->sgx_reg =
++ ioremap(resource_start + MRST_SGX_OFFSET,
++ PSB_SGX_SIZE);
++ else
++ dev_priv->sgx_reg =
++ ioremap(resource_start + PSB_SGX_OFFSET, PSB_SGX_SIZE);
++
++ if (!dev_priv->sgx_reg)
++ goto out_err;
++
++ if (IS_MRST(dev))
++ mrst_get_fuse_settings(dev_priv);
++
++ PSB_DEBUG_INIT("Init TTM fence and BO driver\n");
++
++ get_ci_info(dev_priv);
++
++ psb_clockgating(dev_priv);
++
++ ret = psb_ttm_fence_device_init(&dev_priv->fdev);
++ if (unlikely(ret != 0))
++ goto out_err;
++
++ dev_priv->has_fence_device = 1;
++ ret = ttm_bo_device_init(bdev,
++ dev_priv->mem_global_ref.object,
++ &psb_ttm_bo_driver,
++ DRM_PSB_FILE_PAGE_OFFSET);
++ if (unlikely(ret != 0))
++ goto out_err;
++ dev_priv->has_bo_device = 1;
++ ttm_lock_init(&dev_priv->ttm_lock);
++
++ ret = -ENOMEM;
++
++ dev_priv->scratch_page = alloc_page(GFP_DMA32 | __GFP_ZERO);
++ if (!dev_priv->scratch_page)
++ goto out_err;
++
++ set_pages_uc(dev_priv->scratch_page, 1);
++
++ dev_priv->pg = psb_gtt_alloc(dev);
++ if (!dev_priv->pg)
++ goto out_err;
++
++ ret = psb_gtt_init(dev_priv->pg, 0);
++ if (ret)
++ goto out_err;
++
++ dev_priv->mmu = psb_mmu_driver_init(dev_priv->sgx_reg,
++ drm_psb_trap_pagefaults, 0,
++ dev_priv);
++ if (!dev_priv->mmu)
++ goto out_err;
++
++ pg = dev_priv->pg;
++
++ /*
++ * Make sgx MMU aware of the stolen memory area we call VRAM.
++ */
++
++ down_read(&pg->sem);
++ ret =
++ psb_mmu_insert_pfn_sequence(psb_mmu_get_default_pd
++ (dev_priv->mmu),
++ pg->stolen_base >> PAGE_SHIFT,
++ pg->gatt_start,
++ pg->vram_stolen_size >> PAGE_SHIFT, 0);
++ up_read(&pg->sem);
++ if (ret)
++ goto out_err;
++
++ /*
++ * Make sgx MMU aware of the stolen memory area we call VRAM.
++ */
++
++ down_read(&pg->sem);
++ ret =
++ psb_mmu_insert_pfn_sequence(psb_mmu_get_default_pd
++ (dev_priv->mmu),
++ dev_priv->ci_region_start >> PAGE_SHIFT,
++ pg->gatt_start - pg->ci_stolen_size,
++ pg->ci_stolen_size >> PAGE_SHIFT, 0);
++ up_read(&pg->sem);
++ if (ret)
++ goto out_err;
++
++ dev_priv->pf_pd = psb_mmu_alloc_pd(dev_priv->mmu, 1, 0);
++ if (!dev_priv->pf_pd)
++ goto out_err;
++
++ /*
++ * Make all presumably unused requestors page-fault by making them
++ * use context 1 which does not have any valid mappings.
++ */
++
++ PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK0);
++ PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK1);
++ PSB_RSGX32(PSB_CR_BIF_BANK1);
++
++ psb_mmu_set_pd_context(psb_mmu_get_default_pd(dev_priv->mmu), 0);
++ psb_mmu_set_pd_context(dev_priv->pf_pd, 1);
++ psb_mmu_enable_requestor(dev_priv->mmu, _PSB_MMU_ER_MASK);
++
++ psb_init_2d(dev_priv);
++
++ ret = ttm_bo_init_mm(bdev, DRM_PSB_MEM_KERNEL, 0x00000000,
++ (PSB_MEM_PDS_START - PSB_MEM_KERNEL_START)
++ >> PAGE_SHIFT);
++ if (ret)
++ goto out_err;
++ dev_priv->have_mem_kernel = 1;
++
++ ret = ttm_bo_init_mm(bdev, DRM_PSB_MEM_PDS, 0x00000000,
++ (PSB_MEM_RASTGEOM_START - PSB_MEM_PDS_START)
++ >> PAGE_SHIFT);
++ if (ret)
++ goto out_err;
++ dev_priv->have_mem_pds = 1;
++
++ PSB_DEBUG_INIT("Begin to init SGX/MSVDX/Topaz\n");
++
++ ret = psb_do_init(dev);
++ if (ret)
++ return ret;
++
++ ret = psb_xhw_init(dev);
++ if (ret)
++ return ret;
++
++ PSB_WSGX32(PSB_MEM_PDS_START, PSB_CR_PDS_EXEC_BASE);
++ PSB_WSGX32(PSB_MEM_RASTGEOM_START, PSB_CR_BIF_3D_REQ_BASE);
++
++ psb_init_ospm(dev_priv);
++
++ if (drm_psb_no_fb == 0) {
++ psb_modeset_init(dev);
++ drm_helper_initial_config(dev, false);
++ }
++
++ /*initialize the MSI for MRST*/
++ if (IS_MRST(dev)) {
++ if (pci_enable_msi(dev->pdev)) {
++ DRM_ERROR("Enable MSI for MRST failed!\n");
++ } else {
++ PSB_DEBUG_INIT("Enabled MSI IRQ (%d)\n",
++ dev->pdev->irq);
++ /* pci_write_config_word(pdev, 0x04, 0x07); */
++ }
++ }
++
++ /*set SGX in low power mode*/
++ if (drm_psb_ospm && IS_MRST(dev))
++ if (psb_try_power_down_sgx(dev))
++ PSB_DEBUG_PM("initialize SGX to low power failed\n");
++ return 0;
++out_err:
++ psb_driver_unload(dev);
++ return ret;
++}
++
++int psb_driver_device_is_agp(struct drm_device *dev)
++{
++ return 0;
++}
++
++static int psb_prepare_msvdx_suspend(struct drm_device *dev)
++{
++#ifdef PSB_FIXME
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++ struct ttm_fence_device *fdev = &dev_priv->fdev;
++ struct ttm_fence_class_manager *fc =
++ &fdev->fence_class[PSB_ENGINE_VIDEO];
++ struct ttm_fence_object *fence;
++ int ret = 0;
++ int signaled = 0;
++ int count = 0;
++ unsigned long _end = jiffies + 3 * DRM_HZ;
++
++ PSB_DEBUG_GENERAL
++ ("MSVDXACPI Entering psb_prepare_msvdx_suspend....\n");
++
++ /*set the msvdx-reset flag here.. */
++ dev_priv->msvdx_needs_reset = 1;
++
++ /*Ensure that all pending IRQs are serviced, */
++
++ /*
++ * Save the last MSVDX fence in dev_priv instead!!!
++ * Need to be fc->write_locked while accessing a fence from the ring.
++ */
++
++ list_for_each_entry(fence, &fc->ring, ring) {
++ count++;
++ do {
++ DRM_WAIT_ON(ret, fc->fence_queue, 3 * DRM_HZ,
++ (signaled =
++ ttm_fence_object_signaled(fence,
++ DRM_FENCE_TYPE_EXE)));
++ if (signaled)
++ break;
++ if (time_after_eq(jiffies, _end))
++ PSB_DEBUG_GENERAL
++ ("MSVDXACPI: fence 0x%x didn't get"
++ " signaled for 3 secs; "
++ "we will suspend anyways\n",
++ (unsigned int) fence);
++ } while (ret == -EINTR);
++
++ }
++ PSB_DEBUG_GENERAL("MSVDXACPI: All MSVDX IRQs (%d) serviced...\n",
++ count);
++#endif
++ return 0;
++}
++
++static int psb_suspend(struct pci_dev *pdev, pm_message_t state)
++{
++ struct drm_device *dev = pci_get_drvdata(pdev);
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++
++ if (!down_write_trylock(&dev_priv->sgx_sem))
++ return -EBUSY;
++ if (dev_priv->graphics_state != PSB_PWR_STATE_D0i0);
++ PSB_DEBUG_PM("Not suspending from D0i0\n");
++ if (dev_priv->graphics_state == PSB_PWR_STATE_D3)
++ goto exit;
++ if (drm_psb_no_fb == 0){
++ psbfb_suspend(dev);
++ psb_modeset_cleanup(dev);
++ }
++
++ dev_priv->saveCLOCKGATING = PSB_RSGX32(PSB_CR_CLKGATECTL);
++ (void) psb_idle_3d(dev);
++ (void) psb_idle_2d(dev);
++ flush_scheduled_work();
++
++ if (dev_priv->has_msvdx)
++ psb_prepare_msvdx_suspend(dev);
++
++ if (dev_priv->has_topaz)
++ lnc_prepare_topaz_suspend(dev);
++
++#ifdef OSPM_STAT
++ if (dev_priv->graphics_state == PSB_PWR_STATE_D0i0)
++ dev_priv->gfx_d0i0_time += jiffies - dev_priv->gfx_last_mode_change;
++ else if (dev_priv->graphics_state == PSB_PWR_STATE_D0i3)
++ dev_priv->gfx_d0i3_time += jiffies - dev_priv->gfx_last_mode_change;
++ else
++ PSB_DEBUG_PM("suspend: illegal previous power state\n");
++ dev_priv->gfx_last_mode_change = jiffies;
++ dev_priv->gfx_d3_cnt++;
++#endif
++
++ dev_priv->graphics_state = PSB_PWR_STATE_D3;
++ dev_priv->msvdx_state = PSB_PWR_STATE_D3;
++ dev_priv->topaz_power_state = LNC_TOPAZ_POWEROFF;
++ pci_save_state(pdev);
++ pci_disable_device(pdev);
++ pci_set_power_state(pdev, PCI_D3hot);
++ psb_down_island_power(dev, PSB_GRAPHICS_ISLAND | PSB_VIDEO_ENC_ISLAND
++ | PSB_VIDEO_DEC_ISLAND);
++exit:
++ up_write(&dev_priv->sgx_sem);
++ return 0;
++}
++
++static int psb_resume(struct pci_dev *pdev)
++{
++ struct drm_device *dev = pci_get_drvdata(pdev);
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++ struct psb_gtt *pg = dev_priv->pg;
++ int ret;
++ if (dev_priv->graphics_state != PSB_PWR_STATE_D3)
++ return 0;
++
++ psb_up_island_power(dev, PSB_GRAPHICS_ISLAND | PSB_VIDEO_ENC_ISLAND
++ | PSB_VIDEO_DEC_ISLAND);
++ pci_set_power_state(pdev, PCI_D0);
++ pci_restore_state(pdev);
++ ret = pci_enable_device(pdev);
++ if (ret)
++ return ret;
++
++ DRM_ERROR("FIXME: topaz's resume is not ready..\n");
++#ifdef OSPM_STAT
++ if (dev_priv->graphics_state == PSB_PWR_STATE_D3)
++ dev_priv->gfx_d3_time += jiffies - dev_priv->gfx_last_mode_change;
++ else
++ PSB_DEBUG_PM("resume :illegal previous power state\n");
++ dev_priv->gfx_last_mode_change = jiffies;
++ dev_priv->gfx_d0i0_cnt++;
++#endif
++ dev_priv->graphics_state = PSB_PWR_STATE_D0i0;
++ dev_priv->msvdx_state = PSB_PWR_STATE_D0i0;
++ dev_priv->topaz_power_state = LNC_TOPAZ_POWERON;
++ INIT_LIST_HEAD(&dev_priv->resume_buf.head);
++ dev_priv->msvdx_needs_reset = 1;
++
++ lnc_prepare_topaz_resume(dev);
++
++ PSB_WVDC32(pg->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL);
++ pci_write_config_word(pdev, PSB_GMCH_CTRL,
++ pg->gmch_ctrl | _PSB_GMCH_ENABLED);
++
++ /*
++ * Don't reinitialize the GTT as it is unnecessary. The gtt is
++ * stored in memory so it will automatically be restored. All
++ * we need to do is restore the PGETBL_CTL which we already do
++ * above.
++ */
++
++ //psb_gtt_init(dev_priv->pg, 1);
++
++ /*
++ * The SGX loses it's register contents.
++ * Restore BIF registers. The MMU page tables are
++ * "normal" pages, so their contents should be kept.
++ */
++
++ PSB_WSGX32(dev_priv->saveCLOCKGATING, PSB_CR_CLKGATECTL);
++ PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK0);
++ PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK1);
++ PSB_RSGX32(PSB_CR_BIF_BANK1);
++
++ psb_mmu_set_pd_context(psb_mmu_get_default_pd(dev_priv->mmu), 0);
++ psb_mmu_set_pd_context(dev_priv->pf_pd, 1);
++ psb_mmu_enable_requestor(dev_priv->mmu, _PSB_MMU_ER_MASK);
++
++ /*
++ * 2D Base registers..
++ */
++ psb_init_2d(dev_priv);
++
++ /*
++ * Persistant 3D base registers and USSE base registers..
++ */
++
++ PSB_WSGX32(PSB_MEM_PDS_START, PSB_CR_PDS_EXEC_BASE);
++ PSB_WSGX32(PSB_MEM_RASTGEOM_START, PSB_CR_BIF_3D_REQ_BASE);
++ PSB_WSGX32(dev_priv->sgx2_irq_mask, PSB_CR_EVENT_HOST_ENABLE2);
++ PSB_WSGX32(dev_priv->sgx_irq_mask, PSB_CR_EVENT_HOST_ENABLE);
++ (void)PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE);
++
++ /*
++ * Now, re-initialize the 3D engine.
++ */
++
++ psb_xhw_resume(dev_priv, &dev_priv->resume_buf);
++
++ psb_scheduler_ta_mem_check(dev_priv);
++ if (dev_priv->ta_mem && !dev_priv->force_ta_mem_load) {
++ psb_xhw_ta_mem_load(dev_priv, &dev_priv->resume_buf,
++ PSB_TA_MEM_FLAG_TA |
++ PSB_TA_MEM_FLAG_RASTER |
++ PSB_TA_MEM_FLAG_HOSTA |
++ PSB_TA_MEM_FLAG_HOSTD |
++ PSB_TA_MEM_FLAG_INIT,
++ dev_priv->ta_mem->ta_memory->offset,
++ dev_priv->ta_mem->hw_data->offset,
++ dev_priv->ta_mem->hw_cookie);
++ }
++
++ if (drm_psb_no_fb == 0) {
++ psb_modeset_init(dev);
++ drm_helper_initial_config(dev, false);
++ psbfb_resume(dev);
++ }
++ return 0;
++}
++
++int psb_extension_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ union drm_psb_extension_arg *arg = data;
++ struct drm_psb_extension_rep *rep = &arg->rep;
++
++ if (strcmp(arg->extension, "psb_ttm_placement_alphadrop") == 0) {
++ rep->exists = 1;
++ rep->driver_ioctl_offset = DRM_PSB_PLACEMENT_OFFSET;
++ rep->sarea_offset = 0;
++ rep->major = 1;
++ rep->minor = 0;
++ rep->pl = 0;
++ return 0;
++ }
++ if (strcmp(arg->extension, "psb_ttm_fence_alphadrop") == 0) {
++ rep->exists = 1;
++ rep->driver_ioctl_offset = DRM_PSB_FENCE_OFFSET;
++ rep->sarea_offset = 0;
++ rep->major = 1;
++ rep->minor = 0;
++ rep->pl = 0;
++ return 0;
++ }
++ if (strcmp(arg->extension, "psb_ttm_execbuf_alphadrop") == 0) {
++ rep->exists = 1;
++ rep->driver_ioctl_offset = DRM_PSB_CMDBUF;
++ rep->sarea_offset = 0;
++ rep->major = 1;
++ rep->minor = 0;
++ rep->pl = 0;
++ return 0;
++ }
++
++ rep->exists = 0;
++ return 0;
++}
++
++static int psb_vt_leave_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ struct drm_psb_private *dev_priv = psb_priv(dev);
++ struct ttm_bo_device *bdev = &dev_priv->bdev;
++ struct ttm_mem_type_manager *man;
++ int clean;
++ int ret;
++
++ ret = ttm_write_lock(&dev_priv->ttm_lock, 1,
++ psb_fpriv(file_priv)->tfile);
++ if (unlikely(ret != 0))
++ return ret;
++
++ /*
++ * Clean VRAM and TT for fbdev.
++ */
++
++ ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
++ if (unlikely(ret != 0))
++ goto out_unlock;
++
++ man = &bdev->man[TTM_PL_VRAM];
++ spin_lock(&bdev->lru_lock);
++ clean = drm_mm_clean(&man->manager);
++ spin_unlock(&bdev->lru_lock);
++ if (unlikely(!clean))
++ DRM_INFO("Notice: VRAM was not clean after VT switch, if you are running fbdev please ignore.\n");
++
++ ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_TT);
++ if (unlikely(ret != 0))
++ goto out_unlock;
++
++ man = &bdev->man[TTM_PL_TT];
++ spin_lock(&bdev->lru_lock);
++ clean = drm_mm_clean(&man->manager);
++ spin_unlock(&bdev->lru_lock);
++ if (unlikely(!clean))
++ DRM_INFO("Warning: GATT was not clean after VT switch.\n");
++
++ ttm_bo_swapout_all(&dev_priv->bdev);
++
++ return 0;
++out_unlock:
++ (void) ttm_write_unlock(&dev_priv->ttm_lock,
++ psb_fpriv(file_priv)->tfile);
++ return ret;
++}
++
++static int psb_vt_enter_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ struct drm_psb_private *dev_priv = psb_priv(dev);
++ return ttm_write_unlock(&dev_priv->ttm_lock,
++ psb_fpriv(file_priv)->tfile);
++}
++
++/* always available as we are SIGIO'd */
++static unsigned int psb_poll(struct file *filp,
++ struct poll_table_struct *wait)
++{
++ return POLLIN | POLLRDNORM;
++}
++
++int psb_driver_open(struct drm_device *dev, struct drm_file *priv)
++{
++ /*psb_check_power_state(dev, PSB_DEVICE_SGX);*/
++ return 0;
++}
++
++static long psb_unlocked_ioctl(struct file *filp, unsigned int cmd,
++ unsigned long arg)
++{
++ struct drm_file *file_priv = filp->private_data;
++ struct drm_device *dev = file_priv->minor->dev;
++ unsigned int nr = DRM_IOCTL_NR(cmd);
++ long ret;
++
++ /*
++ * The driver private ioctls and TTM ioctls should be
++ * thread-safe.
++ */
++
++ if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
++ && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
++ struct drm_ioctl_desc *ioctl = &psb_ioctls[nr - DRM_COMMAND_BASE];
++
++ if (unlikely(ioctl->cmd != cmd)) {
++ DRM_ERROR("Invalid drm command %d\n",
++ nr - DRM_COMMAND_BASE);
++ return -EINVAL;
++ }
++
++ return drm_unlocked_ioctl(filp, cmd, arg);
++ }
++ /*
++ * Not all old drm ioctls are thread-safe.
++ */
++
++ lock_kernel();
++ ret = drm_unlocked_ioctl(filp, cmd, arg);
++ unlock_kernel();
++ return ret;
++}
++
++static int psb_ospm_read(char *buf, char **start, off_t offset, int request,
++ int *eof, void *data)
++{
++ struct drm_minor *minor = (struct drm_minor *) data;
++ struct drm_device *dev = minor->dev;
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++ int len = 0;
++ unsigned long d0i0 = 0;
++ unsigned long d0i3 = 0;
++ unsigned long d3 = 0;
++ *start = &buf[offset];
++ *eof = 0;
++ DRM_PROC_PRINT("D0i3:%s ", drm_psb_ospm ? "enabled" : "disabled");
++ switch (dev_priv->graphics_state) {
++ case PSB_PWR_STATE_D0i0:
++ DRM_PROC_PRINT("GFX:%s\n", "D0i0");
++ break;
++ case PSB_PWR_STATE_D0i3:
++ DRM_PROC_PRINT("GFX:%s\n", "D0i3");
++ break;
++ case PSB_PWR_STATE_D3:
++ DRM_PROC_PRINT("GFX:%s\n", "D3");
++ break;
++ default:
++ DRM_PROC_PRINT("GFX:%s\n", "unkown");
++ }
++#ifdef OSPM_STAT
++ d0i0 = dev_priv->gfx_d0i0_time * 1000 / HZ;
++ d0i3 = dev_priv->gfx_d0i3_time * 1000 / HZ;
++ d3 = dev_priv->gfx_d3_time * 1000 / HZ;
++ switch (dev_priv->graphics_state) {
++ case PSB_PWR_STATE_D0i0:
++ d0i0 += (jiffies - dev_priv->gfx_last_mode_change) * 1000 / HZ;
++ break;
++ case PSB_PWR_STATE_D0i3:
++ d0i3 += (jiffies - dev_priv->gfx_last_mode_change) * 1000 / HZ;
++ break;
++ case PSB_PWR_STATE_D3:
++ d3 += (jiffies - dev_priv->gfx_last_mode_change) * 1000 / HZ;
++ break;
++ }
++ DRM_PROC_PRINT("GFX(cnt/ms):\n");
++ DRM_PROC_PRINT("D0i0:%lu/%lu, D0i3:%lu/%lu, D3:%lu/%lu \n",
++ dev_priv->gfx_d0i0_cnt, d0i0, dev_priv->gfx_d0i3_cnt, d0i3,
++ dev_priv->gfx_d3_cnt, d3);
++#endif
++ if (len > request + offset)
++ return request;
++ *eof = 1;
++ return len - offset;
++}
++
++static int psb_proc_init(struct drm_minor *minor)
++{
++ struct proc_dir_entry *ent;
++ if (!minor->dev_root)
++ return 0;
++ ent = create_proc_read_entry(OSPM_PROC_ENTRY, 0, minor->dev_root,
++ psb_ospm_read, minor);
++ if (ent)
++ return 0;
++ else
++ return -1;
++}
++
++static void psb_proc_cleanup(struct drm_minor *minor)
++{
++ if (!minor->dev_root)
++ return;
++ remove_proc_entry(OSPM_PROC_ENTRY, minor->dev_root);
++ return;
++}
++
++static struct drm_driver driver = {
++ .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
++ .load = psb_driver_load,
++ .unload = psb_driver_unload,
++ .dri_library_name = dri_library_name,
++ .get_reg_ofs = drm_core_get_reg_ofs,
++ .ioctls = psb_ioctls,
++ .device_is_agp = psb_driver_device_is_agp,
++ .irq_preinstall = psb_irq_preinstall,
++ .irq_postinstall = psb_irq_postinstall,
++ .irq_uninstall = psb_irq_uninstall,
++ .irq_handler = psb_irq_handler,
++ .firstopen = NULL,
++ .lastclose = psb_lastclose,
++ .open = psb_driver_open,
++ .proc_init = psb_proc_init,
++ .proc_cleanup = psb_proc_cleanup,
++ .fops = {
++ .owner = THIS_MODULE,
++ .open = psb_open,
++ .release = psb_release,
++ .unlocked_ioctl = psb_unlocked_ioctl,
++ .mmap = psb_mmap,
++ .poll = psb_poll,
++ .fasync = drm_fasync,
++ },
++ .pci_driver = {
++ .name = DRIVER_NAME,
++ .id_table = pciidlist,
++ .resume = psb_resume,
++ .suspend = psb_suspend,
++ },
++ .name = DRIVER_NAME,
++ .desc = DRIVER_DESC,
++ .date = PSB_DRM_DRIVER_DATE,
++ .major = PSB_DRM_DRIVER_MAJOR,
++ .minor = PSB_DRM_DRIVER_MINOR,
++ .patchlevel = PSB_DRM_DRIVER_PATCHLEVEL
++};
++
++static int __init psb_init(void)
++{
++ driver.num_ioctls = psb_max_ioctl;
++
++ return drm_init(&driver);
++}
++
++static void __exit psb_exit(void)
++{
++ drm_exit(&driver);
++}
++
++module_init(psb_init);
++module_exit(psb_exit);
++
++MODULE_AUTHOR(DRIVER_AUTHOR);
++MODULE_DESCRIPTION(DRIVER_DESC);
++MODULE_LICENSE("GPL");
+diff -uNr a/drivers/gpu/drm/psb/psb_drv.h b/drivers/gpu/drm/psb/psb_drv.h
+--- a/drivers/gpu/drm/psb/psb_drv.h 1969-12-31 16:00:00.000000000 -0800
++++ b/drivers/gpu/drm/psb/psb_drv.h 2009-04-07 13:28:38.000000000 -0700
+@@ -0,0 +1,1129 @@
++/**************************************************************************
++ *Copyright (c) 2007-2008, Intel Corporation.
++ *All Rights Reserved.
++ *
++ *This program is free software; you can redistribute it and/or modify it
++ *under the terms and conditions of the GNU General Public License,
++ *version 2, as published by the Free Software Foundation.
++ *
++ *This program is distributed in the hope it will be useful, but WITHOUT
++ *ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ *FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ *more details.
++ *
++ *You should have received a copy of the GNU General Public License along with
++ *this program; if not, write to the Free Software Foundation, Inc.,
++ *51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ *Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
++ *develop this driver.
++ *
++ **************************************************************************/
++/*
++ */
++#ifndef _PSB_DRV_H_
++#define _PSB_DRV_H_
++
++#include <drm/drmP.h>
++#include "psb_drm.h"
++#include "psb_reg.h"
++#include "psb_schedule.h"
++#include "psb_intel_drv.h"
++#include "ttm/ttm_object.h"
++#include "ttm/ttm_fence_driver.h"
++#include "ttm/ttm_bo_driver.h"
++#include "ttm/ttm_lock.h"
++
++extern struct ttm_bo_driver psb_ttm_bo_driver;
++
++enum {
++ CHIP_PSB_8108 = 0,
++ CHIP_PSB_8109 = 1,
++ CHIP_MRST_4100 = 2
++};
++
++/*
++ *Hardware bugfixes
++ */
++
++#define FIX_TG_16
++#define FIX_TG_2D_CLOCKGATE
++#define OSPM_STAT
++
++#define DRIVER_NAME "psb"
++#define DRIVER_DESC "drm driver for the Intel GMA500"
++#define DRIVER_AUTHOR "Tungsten Graphics Inc."
++#define OSPM_PROC_ENTRY "ospm"
++
++#define PSB_DRM_DRIVER_DATE "2009-02-09"
++#define PSB_DRM_DRIVER_MAJOR 8
++#define PSB_DRM_DRIVER_MINOR 0
++#define PSB_DRM_DRIVER_PATCHLEVEL 0
++
++/*
++ *TTM driver private offsets.
++ */
++
++#define DRM_PSB_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
++
++#define PSB_OBJECT_HASH_ORDER 13
++#define PSB_FILE_OBJECT_HASH_ORDER 12
++#define PSB_BO_HASH_ORDER 12
++
++#define PSB_VDC_OFFSET 0x00000000
++#define PSB_VDC_SIZE 0x000080000
++#define MRST_MMIO_SIZE 0x0000C0000
++#define PSB_SGX_SIZE 0x8000
++#define PSB_SGX_OFFSET 0x00040000
++#define MRST_SGX_OFFSET 0x00080000
++#define PSB_MMIO_RESOURCE 0
++#define PSB_GATT_RESOURCE 2
++#define PSB_GTT_RESOURCE 3
++#define PSB_GMCH_CTRL 0x52
++#define PSB_BSM 0x5C
++#define _PSB_GMCH_ENABLED 0x4
++#define PSB_PGETBL_CTL 0x2020
++#define _PSB_PGETBL_ENABLED 0x00000001
++#define PSB_SGX_2D_SLAVE_PORT 0x4000
++#define PSB_TT_PRIV0_LIMIT (256*1024*1024)
++#define PSB_TT_PRIV0_PLIMIT (PSB_TT_PRIV0_LIMIT >> PAGE_SHIFT)
++#define PSB_NUM_VALIDATE_BUFFERS 2048
++#define PSB_MEM_KERNEL_START 0x10000000
++#define PSB_MEM_PDS_START 0x20000000
++#define PSB_MEM_MMU_START 0x40000000
++
++#define DRM_PSB_MEM_KERNEL TTM_PL_PRIV0
++#define DRM_PSB_FLAG_MEM_KERNEL TTM_PL_FLAG_PRIV0
++
++/*
++ *Flags for external memory type field.
++ */
++
++#define MRST_MSVDX_OFFSET 0x90000 /*MSVDX Base offset */
++#define PSB_MSVDX_OFFSET 0x50000 /*MSVDX Base offset */
++/* MSVDX MMIO region is 0x50000 - 0x57fff ==> 32KB */
++#define PSB_MSVDX_SIZE 0x10000
++
++#define LNC_TOPAZ_OFFSET 0xA0000
++#define LNC_TOPAZ_SIZE 0x10000
++
++#define PSB_MMU_CACHED_MEMORY 0x0001 /* Bind to MMU only */
++#define PSB_MMU_RO_MEMORY 0x0002 /* MMU RO memory */
++#define PSB_MMU_WO_MEMORY 0x0004 /* MMU WO memory */
++
++/*
++ *PTE's and PDE's
++ */
++
++#define PSB_PDE_MASK 0x003FFFFF
++#define PSB_PDE_SHIFT 22
++#define PSB_PTE_SHIFT 12
++
++#define PSB_PTE_VALID 0x0001 /* PTE / PDE valid */
++#define PSB_PTE_WO 0x0002 /* Write only */
++#define PSB_PTE_RO 0x0004 /* Read only */
++#define PSB_PTE_CACHED 0x0008 /* CPU cache coherent */
++
++/*
++ *VDC registers and bits
++ */
++#define PSB_HWSTAM 0x2098
++#define PSB_INSTPM 0x20C0
++#define PSB_INT_IDENTITY_R 0x20A4
++#define _PSB_VSYNC_PIPEB_FLAG (1<<5)
++#define _PSB_VSYNC_PIPEA_FLAG (1<<7)
++#define _PSB_IRQ_SGX_FLAG (1<<18)
++#define _PSB_IRQ_MSVDX_FLAG (1<<19)
++#define _LNC_IRQ_TOPAZ_FLAG (1<<20)
++#define PSB_INT_MASK_R 0x20A8
++#define PSB_INT_ENABLE_R 0x20A0
++#define PSB_PIPEASTAT 0x70024
++#define _PSB_VBLANK_INTERRUPT_ENABLE (1 << 17)
++#define _PSB_VBLANK_CLEAR (1 << 1)
++#define PSB_PIPEBSTAT 0x71024
++
++#define _PSB_MMU_ER_MASK 0x0001FF00
++#define _PSB_MMU_ER_HOST (1 << 16)
++#define GPIOA 0x5010
++#define GPIOB 0x5014
++#define GPIOC 0x5018
++#define GPIOD 0x501c
++#define GPIOE 0x5020
++#define GPIOF 0x5024
++#define GPIOG 0x5028
++#define GPIOH 0x502c
++#define GPIO_CLOCK_DIR_MASK (1 << 0)
++#define GPIO_CLOCK_DIR_IN (0 << 1)
++#define GPIO_CLOCK_DIR_OUT (1 << 1)
++#define GPIO_CLOCK_VAL_MASK (1 << 2)
++#define GPIO_CLOCK_VAL_OUT (1 << 3)
++#define GPIO_CLOCK_VAL_IN (1 << 4)
++#define GPIO_CLOCK_PULLUP_DISABLE (1 << 5)
++#define GPIO_DATA_DIR_MASK (1 << 8)
++#define GPIO_DATA_DIR_IN (0 << 9)
++#define GPIO_DATA_DIR_OUT (1 << 9)
++#define GPIO_DATA_VAL_MASK (1 << 10)
++#define GPIO_DATA_VAL_OUT (1 << 11)
++#define GPIO_DATA_VAL_IN (1 << 12)
++#define GPIO_DATA_PULLUP_DISABLE (1 << 13)
++
++#define VCLK_DIVISOR_VGA0 0x6000
++#define VCLK_DIVISOR_VGA1 0x6004
++#define VCLK_POST_DIV 0x6010
++
++#define PSB_COMM_2D (PSB_ENGINE_2D << 4)
++#define PSB_COMM_3D (PSB_ENGINE_3D << 4)
++#define PSB_COMM_TA (PSB_ENGINE_TA << 4)
++#define PSB_COMM_HP (PSB_ENGINE_HP << 4)
++#define PSB_COMM_USER_IRQ (1024 >> 2)
++#define PSB_COMM_USER_IRQ_LOST (PSB_COMM_USER_IRQ + 1)
++#define PSB_COMM_FW (2048 >> 2)
++
++#define PSB_UIRQ_VISTEST 1
++#define PSB_UIRQ_OOM_REPLY 2
++#define PSB_UIRQ_FIRE_TA_REPLY 3
++#define PSB_UIRQ_FIRE_RASTER_REPLY 4
++
++#define PSB_2D_SIZE (256*1024*1024)
++#define PSB_MAX_RELOC_PAGES 1024
++
++#define PSB_LOW_REG_OFFS 0x0204
++#define PSB_HIGH_REG_OFFS 0x0600
++
++#define PSB_NUM_VBLANKS 2
++
++#define PSB_COMM_2D (PSB_ENGINE_2D << 4)
++#define PSB_COMM_3D (PSB_ENGINE_3D << 4)
++#define PSB_COMM_TA (PSB_ENGINE_TA << 4)
++#define PSB_COMM_HP (PSB_ENGINE_HP << 4)
++#define PSB_COMM_FW (2048 >> 2)
++
++#define PSB_2D_SIZE (256*1024*1024)
++#define PSB_MAX_RELOC_PAGES 1024
++
++#define PSB_LOW_REG_OFFS 0x0204
++#define PSB_HIGH_REG_OFFS 0x0600
++
++#define PSB_NUM_VBLANKS 2
++#define PSB_WATCHDOG_DELAY (DRM_HZ / 10)
++
++#define PSB_PWR_STATE_MASK 0x0F
++#define PSB_PWR_ACTION_MASK 0xF0
++#define PSB_PWR_STATE_D0i0 0x1
++#define PSB_PWR_STATE_D0i3 0x2
++#define PSB_PWR_STATE_D3 0x3
++#define PSB_PWR_ACTION_DOWN 0x10 /*Need to power down*/
++#define PSB_PWR_ACTION_UP 0x20/*Need to power up*/
++#define PSB_GRAPHICS_ISLAND 0x1
++#define PSB_VIDEO_ENC_ISLAND 0x2
++#define PSB_VIDEO_DEC_ISLAND 0x4
++#define LNC_TOPAZ_POWERON 0x1
++#define LNC_TOPAZ_POWEROFF 0x0
++
++/*
++ *User options.
++ */
++
++struct drm_psb_uopt {
++ int clock_gating;
++};
++
++/**
++ *struct psb_context
++ *
++ *@buffers: array of pre-allocated validate buffers.
++ *@used_buffers: number of buffers in @buffers array currently in use.
++ *@validate_buffer: buffers validated from user-space.
++ *@kern_validate_buffers : buffers validated from kernel-space.
++ *@fence_flags : Fence flags to be used for fence creation.
++ *
++ *This structure is used during execbuf validation.
++ */
++
++struct psb_context {
++ struct psb_validate_buffer *buffers;
++ uint32_t used_buffers;
++ struct list_head validate_list;
++ struct list_head kern_validate_list;
++ uint32_t fence_types;
++ uint32_t val_seq;
++};
++
++struct psb_gtt {
++ struct drm_device *dev;
++ int initialized;
++ uint32_t gatt_start;
++ uint32_t gtt_start;
++ uint32_t gtt_phys_start;
++ unsigned gtt_pages;
++ unsigned gatt_pages;
++ uint32_t stolen_base;
++ uint32_t pge_ctl;
++ u16 gmch_ctrl;
++ unsigned long stolen_size;
++ unsigned long vram_stolen_size;
++ unsigned long ci_stolen_size;
++ unsigned long rar_stolen_size;
++ uint32_t *gtt_map;
++ struct rw_semaphore sem;
++};
++
++struct psb_use_base {
++ struct list_head head;
++ struct ttm_fence_object *fence;
++ unsigned int reg;
++ unsigned long offset;
++ unsigned int dm;
++};
++
++struct psb_validate_buffer;
++
++struct psb_msvdx_cmd_queue {
++ struct list_head head;
++ void *cmd;
++ unsigned long cmd_size;
++ uint32_t sequence;
++};
++
++
++struct drm_psb_private {
++
++ /*
++ *TTM Glue.
++ */
++
++ struct drm_global_reference mem_global_ref;
++ int has_global;
++
++ struct drm_device *dev;
++ struct ttm_object_device *tdev;
++ struct ttm_fence_device fdev;
++ struct ttm_bo_device bdev;
++ struct ttm_lock ttm_lock;
++ struct vm_operations_struct *ttm_vm_ops;
++ int has_fence_device;
++ int has_bo_device;
++
++ unsigned long chipset;
++
++ struct psb_xhw_buf resume_buf;
++ struct drm_psb_dev_info_arg dev_info;
++ struct drm_psb_uopt uopt;
++
++ struct psb_gtt *pg;
++
++ struct page *scratch_page;
++ struct page *comm_page;
++ /* Deleted volatile because it is not recommended to use. */
++ uint32_t *comm;
++ uint32_t comm_mmu_offset;
++ uint32_t mmu_2d_offset;
++ uint32_t sequence[PSB_NUM_ENGINES];
++ uint32_t last_sequence[PSB_NUM_ENGINES];
++ int idle[PSB_NUM_ENGINES];
++ uint32_t last_submitted_seq[PSB_NUM_ENGINES];
++ int engine_lockup_2d;
++
++ struct psb_mmu_driver *mmu;
++ struct psb_mmu_pd *pf_pd;
++
++ uint8_t *sgx_reg;
++ uint8_t *vdc_reg;
++ uint32_t gatt_free_offset;
++
++ /*
++ *MSVDX
++ */
++ int has_msvdx;
++ uint8_t *msvdx_reg;
++ int msvdx_needs_reset;
++ atomic_t msvdx_mmu_invaldc;
++
++ /*
++ *TOPAZ
++ */
++ uint8_t *topaz_reg;
++
++ void *topaz_mtx_reg_state;
++ struct ttm_buffer_object *topaz_mtx_data_mem;
++ uint32_t topaz_cur_codec;
++ uint32_t cur_mtx_data_size;
++ int topaz_needs_reset;
++ int has_topaz;
++#define TOPAZ_MAX_IDELTIME (HZ*30)
++ int topaz_start_idle;
++ unsigned long topaz_idle_start_jiffies;
++ /* used by lnc_topaz_lockup */
++ uint32_t topaz_current_sequence;
++ uint32_t topaz_last_sequence;
++ uint32_t topaz_finished_sequence;
++
++ /*
++ *Fencing / irq.
++ */
++
++ uint32_t sgx_irq_mask;
++ uint32_t sgx2_irq_mask;
++ uint32_t vdc_irq_mask;
++
++ spinlock_t irqmask_lock;
++ spinlock_t sequence_lock;
++ int fence0_irq_on;
++ int irq_enabled;
++ unsigned int irqen_count_2d;
++ wait_queue_head_t event_2d_queue;
++
++#ifdef FIX_TG_16
++ wait_queue_head_t queue_2d;
++ atomic_t lock_2d;
++ atomic_t ta_wait_2d;
++ atomic_t ta_wait_2d_irq;
++ atomic_t waiters_2d;
++#else
++ struct mutex mutex_2d;
++#endif
++ uint32_t msvdx_current_sequence;
++ uint32_t msvdx_last_sequence;
++ int fence2_irq_on;
++
++ /*
++ *Modesetting
++ */
++ struct psb_intel_mode_device mode_dev;
++
++ /*
++ *MSVDX Rendec Memory
++ */
++ struct ttm_buffer_object *ccb0;
++ uint32_t base_addr0;
++ struct ttm_buffer_object *ccb1;
++ uint32_t base_addr1;
++
++ /*
++ * CI share buffer
++ */
++ unsigned int ci_region_start;
++ unsigned int ci_region_size;
++
++ /*
++ *Memory managers
++ */
++
++ int have_vram;
++ int have_camera;
++ int have_tt;
++ int have_mem_mmu;
++ int have_mem_aper;
++ int have_mem_kernel;
++ int have_mem_pds;
++ int have_mem_rastgeom;
++ struct mutex temp_mem;
++
++ /*
++ *Relocation buffer mapping.
++ */
++
++ spinlock_t reloc_lock;
++ unsigned int rel_mapped_pages;
++ wait_queue_head_t rel_mapped_queue;
++
++ /*
++ *SAREA
++ */
++ struct drm_psb_sarea *sarea_priv;
++
++ /*
++ *LVDS info
++ */
++ int backlight_duty_cycle; /* restore backlight to this value */
++ bool panel_wants_dither;
++ struct drm_display_mode *panel_fixed_mode;
++
++/* MRST private date start */
++/*FIXME JLIU7 need to revisit */
++ bool sku_83;
++ bool sku_100;
++ bool sku_100L;
++ bool sku_bypass;
++ uint32_t iLVDS_enable;
++
++ /* pipe config register value */
++ uint32_t pipeconf;
++
++ /* plane control register value */
++ uint32_t dspcntr;
++
++/* MRST_DSI private date start */
++ /*
++ *MRST DSI info
++ */
++ /* The DSI device ready */
++ bool dsi_device_ready;
++
++ /* The DPI panel power on */
++ bool dpi_panel_on;
++
++ /* The DBI panel power on */
++ bool dbi_panel_on;
++
++ /* The DPI display */
++ bool dpi;
++
++ /* status */
++ uint32_t videoModeFormat:2;
++ uint32_t laneCount:3;
++ uint32_t status_reserved:27;
++
++ /* dual display - DPI & DBI */
++ bool dual_display;
++
++ /* HS or LP transmission */
++ bool lp_transmission;
++
++ /* configuration phase */
++ bool config_phase;
++
++ /* DSI clock */
++ uint32_t RRate;
++ uint32_t DDR_Clock;
++ uint32_t DDR_Clock_Calculated;
++ uint32_t ClockBits;
++
++ /* DBI Buffer pointer */
++ u8 *p_DBI_commandBuffer_orig;
++ u8 *p_DBI_commandBuffer;
++ uint32_t DBI_CB_pointer;
++ u8 *p_DBI_dataBuffer_orig;
++ u8 *p_DBI_dataBuffer;
++ uint32_t DBI_DB_pointer;
++
++ /* DPI panel spec */
++ uint32_t pixelClock;
++ uint32_t HsyncWidth;
++ uint32_t HbackPorch;
++ uint32_t HfrontPorch;
++ uint32_t HactiveArea;
++ uint32_t VsyncWidth;
++ uint32_t VbackPorch;
++ uint32_t VfrontPorch;
++ uint32_t VactiveArea;
++ uint32_t bpp:5;
++ uint32_t Reserved:27;
++
++ /* DBI panel spec */
++ uint32_t dbi_pixelClock;
++ uint32_t dbi_HsyncWidth;
++ uint32_t dbi_HbackPorch;
++ uint32_t dbi_HfrontPorch;
++ uint32_t dbi_HactiveArea;
++ uint32_t dbi_VsyncWidth;
++ uint32_t dbi_VbackPorch;
++ uint32_t dbi_VfrontPorch;
++ uint32_t dbi_VactiveArea;
++ uint32_t dbi_bpp:5;
++ uint32_t dbi_Reserved:27;
++
++/* MRST_DSI private date end */
++
++ /*
++ *Register state
++ */
++ uint32_t saveDSPACNTR;
++ uint32_t saveDSPBCNTR;
++ uint32_t savePIPEACONF;
++ uint32_t savePIPEBCONF;
++ uint32_t savePIPEASRC;
++ uint32_t savePIPEBSRC;
++ uint32_t saveFPA0;
++ uint32_t saveFPA1;
++ uint32_t saveDPLL_A;
++ uint32_t saveDPLL_A_MD;
++ uint32_t saveHTOTAL_A;
++ uint32_t saveHBLANK_A;
++ uint32_t saveHSYNC_A;
++ uint32_t saveVTOTAL_A;
++ uint32_t saveVBLANK_A;
++ uint32_t saveVSYNC_A;
++ uint32_t saveDSPASTRIDE;
++ uint32_t saveDSPASIZE;
++ uint32_t saveDSPAPOS;
++ uint32_t saveDSPABASE;
++ uint32_t saveDSPASURF;
++ uint32_t saveFPB0;
++ uint32_t saveFPB1;
++ uint32_t saveDPLL_B;
++ uint32_t saveDPLL_B_MD;
++ uint32_t saveHTOTAL_B;
++ uint32_t saveHBLANK_B;
++ uint32_t saveHSYNC_B;
++ uint32_t saveVTOTAL_B;
++ uint32_t saveVBLANK_B;
++ uint32_t saveVSYNC_B;
++ uint32_t saveDSPBSTRIDE;
++ uint32_t saveDSPBSIZE;
++ uint32_t saveDSPBPOS;
++ uint32_t saveDSPBBASE;
++ uint32_t saveDSPBSURF;
++ uint32_t saveVCLK_DIVISOR_VGA0;
++ uint32_t saveVCLK_DIVISOR_VGA1;
++ uint32_t saveVCLK_POST_DIV;
++ uint32_t saveVGACNTRL;
++ uint32_t saveADPA;
++ uint32_t saveLVDS;
++ uint32_t saveDVOA;
++ uint32_t saveDVOB;
++ uint32_t saveDVOC;
++ uint32_t savePP_ON;
++ uint32_t savePP_OFF;
++ uint32_t savePP_CONTROL;
++ uint32_t savePP_CYCLE;
++ uint32_t savePFIT_CONTROL;
++ uint32_t savePaletteA[256];
++ uint32_t savePaletteB[256];
++ uint32_t saveBLC_PWM_CTL;
++ uint32_t saveCLOCKGATING;
++
++ /*
++ *Xhw
++ */
++
++ uint32_t *xhw;
++ struct ttm_buffer_object *xhw_bo;
++ struct ttm_bo_kmap_obj xhw_kmap;
++ struct list_head xhw_in;
++ spinlock_t xhw_lock;
++ atomic_t xhw_client;
++ struct drm_file *xhw_file;
++ wait_queue_head_t xhw_queue;
++ wait_queue_head_t xhw_caller_queue;
++ struct mutex xhw_mutex;
++ struct psb_xhw_buf *xhw_cur_buf;
++ int xhw_submit_ok;
++ int xhw_on;
++
++ /*
++ *Scheduling.
++ */
++
++ struct mutex reset_mutex;
++ struct psb_scheduler scheduler;
++ struct mutex cmdbuf_mutex;
++ uint32_t ta_mem_pages;
++ struct psb_ta_mem *ta_mem;
++ int force_ta_mem_load;
++ atomic_t val_seq;
++
++ /*
++ *TODO: change this to be per drm-context.
++ */
++
++ struct psb_context context;
++
++ /*
++ *Watchdog
++ */
++
++ spinlock_t watchdog_lock;
++ struct timer_list watchdog_timer;
++ struct work_struct watchdog_wq;
++ struct work_struct msvdx_watchdog_wq;
++ struct work_struct topaz_watchdog_wq;
++ int timer_available;
++
++ /*
++ *msvdx command queue
++ */
++ spinlock_t msvdx_lock;
++ struct mutex msvdx_mutex;
++ struct list_head msvdx_queue;
++ int msvdx_busy;
++ int msvdx_fw_loaded;
++ void *msvdx_fw;
++ int msvdx_fw_size;
++
++ /*
++ *topaz command queue
++ */
++ spinlock_t topaz_lock;
++ struct mutex topaz_mutex;
++ struct list_head topaz_queue;
++ int topaz_busy; /* 0 means topaz is free */
++ int topaz_fw_loaded;
++
++ /* topaz ccb data */
++ /* XXX: should the addr stored by 32 bits? more compatible way?? */
++ uint32_t topaz_ccb_buffer_addr;
++ uint32_t topaz_ccb_ctrl_addr;
++ uint32_t topaz_ccb_size;
++ uint32_t topaz_cmd_windex;
++ uint16_t topaz_cmd_seq;
++
++ uint32_t stored_initial_qp;
++ uint32_t topaz_dash_access_ctrl;
++
++ struct ttm_buffer_object *topaz_bo; /* 4K->2K/2K for writeback/sync */
++ struct ttm_bo_kmap_obj topaz_bo_kmap;
++ void *topaz_ccb_wb;
++ uint32_t topaz_wb_offset;
++ uint32_t *topaz_sync_addr;
++ uint32_t topaz_sync_offset;
++ uint32_t topaz_sync_cmd_seq;
++
++ struct rw_semaphore sgx_sem; /*sgx is in used*/
++ struct semaphore pm_sem; /*pm action in process*/
++ unsigned char graphics_state;
++#ifdef OSPM_STAT
++ unsigned long gfx_d0i3_time;
++ unsigned long gfx_d0i0_time;
++ unsigned long gfx_d3_time;
++ unsigned long gfx_last_mode_change;
++ unsigned long gfx_d0i0_cnt;
++ unsigned long gfx_d0i3_cnt;
++ unsigned long gfx_d3_cnt;
++#endif
++
++ /* MSVDX OSPM */
++ unsigned char msvdx_state;
++ unsigned long msvdx_last_action;
++ uint32_t msvdx_clk_state;
++
++ /* TOPAZ OSPM */
++ unsigned char topaz_power_state;
++ unsigned long topaz_last_action;
++ uint32_t topaz_clk_state;
++};
++
++struct psb_fpriv {
++ struct ttm_object_file *tfile;
++};
++
++struct psb_mmu_driver;
++
++extern int drm_crtc_probe_output_modes(struct drm_device *dev, int, int);
++extern int drm_pick_crtcs(struct drm_device *dev);
++
++
++static inline struct psb_fpriv *psb_fpriv(struct drm_file *file_priv)
++{
++ return (struct psb_fpriv *) file_priv->driver_priv;
++}
++
++static inline struct drm_psb_private *psb_priv(struct drm_device *dev)
++{
++ return (struct drm_psb_private *) dev->dev_private;
++}
++
++/*
++ *TTM glue. psb_ttm_glue.c
++ */
++
++extern int psb_open(struct inode *inode, struct file *filp);
++extern int psb_release(struct inode *inode, struct file *filp);
++extern int psb_mmap(struct file *filp, struct vm_area_struct *vma);
++
++extern int psb_fence_signaled_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++extern int psb_verify_access(struct ttm_buffer_object *bo,
++ struct file *filp);
++extern ssize_t psb_ttm_read(struct file *filp, char __user *buf,
++ size_t count, loff_t *f_pos);
++extern ssize_t psb_ttm_write(struct file *filp, const char __user *buf,
++ size_t count, loff_t *f_pos);
++extern int psb_fence_finish_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++extern int psb_fence_unref_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++extern int psb_pl_waitidle_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++extern int psb_pl_setstatus_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++extern int psb_pl_synccpu_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++extern int psb_pl_unref_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++extern int psb_pl_reference_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++extern int psb_pl_create_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++extern int psb_extension_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++extern int psb_ttm_global_init(struct drm_psb_private *dev_priv);
++extern void psb_ttm_global_release(struct drm_psb_private *dev_priv);
++/*
++ *MMU stuff.
++ */
++
++extern struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers,
++ int trap_pagefaults,
++ int invalid_type,
++ struct drm_psb_private *dev_priv);
++extern void psb_mmu_driver_takedown(struct psb_mmu_driver *driver);
++extern struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver
++ *driver);
++extern void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd, uint32_t mmu_offset,
++ uint32_t gtt_start, uint32_t gtt_pages);
++extern void psb_mmu_test(struct psb_mmu_driver *driver, uint32_t offset);
++extern struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
++ int trap_pagefaults,
++ int invalid_type);
++extern void psb_mmu_free_pagedir(struct psb_mmu_pd *pd);
++extern void psb_mmu_flush(struct psb_mmu_driver *driver);
++extern void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
++ unsigned long address,
++ uint32_t num_pages);
++extern int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd,
++ uint32_t start_pfn,
++ unsigned long address,
++ uint32_t num_pages, int type);
++extern int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
++ unsigned long *pfn);
++
++/*
++ *Enable / disable MMU for different requestors.
++ */
++
++extern void psb_mmu_enable_requestor(struct psb_mmu_driver *driver,
++ uint32_t mask);
++extern void psb_mmu_disable_requestor(struct psb_mmu_driver *driver,
++ uint32_t mask);
++extern void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context);
++extern int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
++ unsigned long address, uint32_t num_pages,
++ uint32_t desired_tile_stride,
++ uint32_t hw_tile_stride, int type);
++extern void psb_mmu_remove_pages(struct psb_mmu_pd *pd,
++ unsigned long address, uint32_t num_pages,
++ uint32_t desired_tile_stride,
++ uint32_t hw_tile_stride);
++/*
++ *psb_sgx.c
++ */
++
++extern int psb_blit_sequence(struct drm_psb_private *dev_priv,
++ uint32_t sequence);
++extern void psb_init_2d(struct drm_psb_private *dev_priv);
++extern int psb_idle_2d(struct drm_device *dev);
++extern int psb_idle_3d(struct drm_device *dev);
++extern int psb_emit_2d_copy_blit(struct drm_device *dev,
++ uint32_t src_offset,
++ uint32_t dst_offset, uint32_t pages,
++ int direction);
++extern int psb_cmdbuf_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++extern int psb_reg_submit(struct drm_psb_private *dev_priv,
++ uint32_t *regs, unsigned int cmds);
++extern int psb_submit_copy_cmdbuf(struct drm_device *dev,
++ struct ttm_buffer_object *cmd_buffer,
++ unsigned long cmd_offset,
++ unsigned long cmd_size, int engine,
++ uint32_t *copy_buffer);
++
++extern void psb_init_disallowed(void);
++extern void psb_fence_or_sync(struct drm_file *file_priv,
++ uint32_t engine,
++ uint32_t fence_types,
++ uint32_t fence_flags,
++ struct list_head *list,
++ struct psb_ttm_fence_rep *fence_arg,
++ struct ttm_fence_object **fence_p);
++extern int psb_validate_kernel_buffer(struct psb_context *context,
++ struct ttm_buffer_object *bo,
++ uint32_t fence_class,
++ uint64_t set_flags,
++ uint64_t clr_flags);
++extern void psb_init_ospm(struct drm_psb_private *dev_priv);
++extern void psb_check_power_state(struct drm_device *dev, int devices);
++extern void psb_down_island_power(struct drm_device *dev, int islands);
++extern void psb_up_island_power(struct drm_device *dev, int islands);
++extern int psb_try_power_down_sgx(struct drm_device *dev);
++
++/*
++ *psb_irq.c
++ */
++
++extern irqreturn_t psb_irq_handler(DRM_IRQ_ARGS);
++extern void psb_irq_preinstall(struct drm_device *dev);
++extern int psb_irq_postinstall(struct drm_device *dev);
++extern void psb_irq_uninstall(struct drm_device *dev);
++extern int psb_vblank_wait2(struct drm_device *dev,
++ unsigned int *sequence);
++extern int psb_vblank_wait(struct drm_device *dev, unsigned int *sequence);
++
++/*
++ *psb_fence.c
++ */
++
++extern void psb_fence_handler(struct drm_device *dev, uint32_t class);
++extern void psb_2D_irq_off(struct drm_psb_private *dev_priv);
++extern void psb_2D_irq_on(struct drm_psb_private *dev_priv);
++extern uint32_t psb_fence_advance_sequence(struct drm_device *dev,
++ uint32_t class);
++extern int psb_fence_emit_sequence(struct ttm_fence_device *fdev,
++ uint32_t fence_class,
++ uint32_t flags, uint32_t *sequence,
++ unsigned long *timeout_jiffies);
++extern void psb_fence_error(struct drm_device *dev,
++ uint32_t class,
++ uint32_t sequence, uint32_t type, int error);
++extern int psb_ttm_fence_device_init(struct ttm_fence_device *fdev);
++
++/*MSVDX stuff*/
++extern void psb_msvdx_irq_off(struct drm_psb_private *dev_priv);
++extern void psb_msvdx_irq_on(struct drm_psb_private *dev_priv);
++
++/*
++ *psb_gtt.c
++ */
++extern int psb_gtt_init(struct psb_gtt *pg, int resume);
++extern int psb_gtt_insert_pages(struct psb_gtt *pg, struct page **pages,
++ unsigned offset_pages, unsigned num_pages,
++ unsigned desired_tile_stride,
++ unsigned hw_tile_stride, int type);
++extern int psb_gtt_remove_pages(struct psb_gtt *pg, unsigned offset_pages,
++ unsigned num_pages,
++ unsigned desired_tile_stride,
++ unsigned hw_tile_stride);
++
++extern struct psb_gtt *psb_gtt_alloc(struct drm_device *dev);
++extern void psb_gtt_takedown(struct psb_gtt *pg, int free);
++
++/*
++ *psb_fb.c
++ */
++extern int psbfb_probed(struct drm_device *dev);
++extern int psbfb_remove(struct drm_device *dev,
++ struct drm_framebuffer *fb);
++extern int psbfb_kms_off_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++extern int psbfb_kms_on_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++extern void psbfb_suspend(struct drm_device *dev);
++extern void psbfb_resume(struct drm_device *dev);
++
++/*
++ *psb_reset.c
++ */
++
++extern void psb_reset(struct drm_psb_private *dev_priv, int reset_2d);
++extern void psb_schedule_watchdog(struct drm_psb_private *dev_priv);
++extern void psb_watchdog_init(struct drm_psb_private *dev_priv);
++extern void psb_watchdog_takedown(struct drm_psb_private *dev_priv);
++extern void psb_print_pagefault(struct drm_psb_private *dev_priv);
++
++/*
++ *psb_xhw.c
++ */
++
++extern int psb_xhw_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++extern int psb_xhw_init_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++extern int psb_xhw_init(struct drm_device *dev);
++extern void psb_xhw_takedown(struct drm_psb_private *dev_priv);
++extern void psb_xhw_init_takedown(struct drm_psb_private *dev_priv,
++ struct drm_file *file_priv, int closing);
++extern int psb_xhw_scene_bind_fire(struct drm_psb_private *dev_priv,
++ struct psb_xhw_buf *buf,
++ uint32_t fire_flags,
++ uint32_t hw_context,
++ uint32_t *cookie,
++ uint32_t *oom_cmds,
++ uint32_t num_oom_cmds,
++ uint32_t offset,
++ uint32_t engine, uint32_t flags);
++extern int psb_xhw_fire_raster(struct drm_psb_private *dev_priv,
++ struct psb_xhw_buf *buf,
++ uint32_t fire_flags);
++extern int psb_xhw_scene_info(struct drm_psb_private *dev_priv,
++ struct psb_xhw_buf *buf, uint32_t w,
++ uint32_t h, uint32_t *hw_cookie,
++ uint32_t *bo_size, uint32_t *clear_p_start,
++ uint32_t *clear_num_pages);
++
++extern int psb_xhw_reset_dpm(struct drm_psb_private *dev_priv,
++ struct psb_xhw_buf *buf);
++extern int psb_xhw_check_lockup(struct drm_psb_private *dev_priv,
++ struct psb_xhw_buf *buf, uint32_t *value);
++extern int psb_xhw_ta_mem_info(struct drm_psb_private *dev_priv,
++ struct psb_xhw_buf *buf,
++ uint32_t pages,
++ uint32_t * hw_cookie,
++ uint32_t * size,
++ uint32_t * ta_min_size);
++extern int psb_xhw_ta_oom(struct drm_psb_private *dev_priv,
++ struct psb_xhw_buf *buf, uint32_t *cookie);
++extern void psb_xhw_ta_oom_reply(struct drm_psb_private *dev_priv,
++ struct psb_xhw_buf *buf,
++ uint32_t *cookie,
++ uint32_t *bca,
++ uint32_t *rca, uint32_t *flags);
++extern int psb_xhw_vistest(struct drm_psb_private *dev_priv,
++ struct psb_xhw_buf *buf);
++extern int psb_xhw_handler(struct drm_psb_private *dev_priv);
++extern int psb_xhw_resume(struct drm_psb_private *dev_priv,
++ struct psb_xhw_buf *buf);
++extern void psb_xhw_fire_reply(struct drm_psb_private *dev_priv,
++ struct psb_xhw_buf *buf, uint32_t *cookie);
++extern int psb_xhw_ta_mem_load(struct drm_psb_private *dev_priv,
++ struct psb_xhw_buf *buf,
++ uint32_t flags,
++ uint32_t param_offset,
++ uint32_t pt_offset, uint32_t *hw_cookie);
++extern void psb_xhw_clean_buf(struct drm_psb_private *dev_priv,
++ struct psb_xhw_buf *buf);
++
++/*
++ *psb_schedule.c: HW bug fixing.
++ */
++
++#ifdef FIX_TG_16
++
++extern void psb_2d_unlock(struct drm_psb_private *dev_priv);
++extern void psb_2d_lock(struct drm_psb_private *dev_priv);
++extern int psb_2d_trylock(struct drm_psb_private *dev_priv);
++extern void psb_resume_ta_2d_idle(struct drm_psb_private *dev_priv);
++extern int psb_2d_trylock(struct drm_psb_private *dev_priv);
++extern void psb_2d_atomic_unlock(struct drm_psb_private *dev_priv);
++#else
++
++#define psb_2d_lock(_dev_priv) mutex_lock(&(_dev_priv)->mutex_2d)
++#define psb_2d_unlock(_dev_priv) mutex_unlock(&(_dev_priv)->mutex_2d)
++
++#endif
++
++/* modesetting */
++extern void psb_modeset_init(struct drm_device *dev);
++extern void psb_modeset_cleanup(struct drm_device *dev);
++
++
++/*
++ *Utilities
++ */
++#define DRM_DRIVER_PRIVATE_T struct drm_psb_private
++
++static inline u32 MSG_READ32(uint port, uint offset)
++{
++ int mcr = (0xD0<<24) | (port << 16) | (offset << 8);
++ outl(0x800000D0, 0xCF8);
++ outl(mcr, 0xCFC);
++ outl(0x800000D4, 0xCF8);
++ return inl(0xcfc);
++}
++static inline void MSG_WRITE32(uint port, uint offset, u32 value)
++{
++ int mcr = (0xE0<<24) | (port << 16) | (offset << 8) | 0xF0;
++ outl(0x800000D4, 0xCF8);
++ outl(value, 0xcfc);
++ outl(0x800000D0, 0xCF8);
++ outl(mcr, 0xCFC);
++}
++
++static inline uint32_t REGISTER_READ(struct drm_device *dev, uint32_t reg)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++
++ return ioread32(dev_priv->vdc_reg + (reg));
++}
++
++#define REG_READ(reg) REGISTER_READ(dev, (reg))
++static inline void REGISTER_WRITE(struct drm_device *dev, uint32_t reg,
++ uint32_t val)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++
++ iowrite32((val), dev_priv->vdc_reg + (reg));
++}
++
++#define REG_WRITE(reg, val) REGISTER_WRITE(dev, (reg), (val))
++
++static inline void REGISTER_WRITE16(struct drm_device *dev,
++ uint32_t reg, uint32_t val)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++
++ iowrite16((val), dev_priv->vdc_reg + (reg));
++}
++
++#define REG_WRITE16(reg, val) REGISTER_WRITE16(dev, (reg), (val))
++
++static inline void REGISTER_WRITE8(struct drm_device *dev,
++ uint32_t reg, uint32_t val)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++
++ iowrite8((val), dev_priv->vdc_reg + (reg));
++}
++
++#define REG_WRITE8(reg, val) REGISTER_WRITE8(dev, (reg), (val))
++
++#define PSB_ALIGN_TO(_val, _align) \
++ (((_val) + ((_align) - 1)) & ~((_align) - 1))
++#define PSB_WVDC32(_val, _offs) \
++ iowrite32(_val, dev_priv->vdc_reg + (_offs))
++#define PSB_RVDC32(_offs) \
++ ioread32(dev_priv->vdc_reg + (_offs))
++#define PSB_WSGX32(_val, _offs) \
++ iowrite32(_val, dev_priv->sgx_reg + (_offs))
++#define PSB_RSGX32(_offs) \
++ ioread32(dev_priv->sgx_reg + (_offs))
++#define PSB_WMSVDX32(_val, _offs) \
++ iowrite32(_val, dev_priv->msvdx_reg + (_offs))
++#define PSB_RMSVDX32(_offs) \
++ ioread32(dev_priv->msvdx_reg + (_offs))
++
++#define PSB_ALPL(_val, _base) \
++ (((_val) >> (_base ## _ALIGNSHIFT)) << (_base ## _SHIFT))
++#define PSB_ALPLM(_val, _base) \
++ ((((_val) >> (_base ## _ALIGNSHIFT)) << (_base ## _SHIFT)) & (_base ## _MASK))
++
++#define PSB_D_RENDER (1 << 16)
++
++#define PSB_D_GENERAL (1 << 0)
++#define PSB_D_INIT (1 << 1)
++#define PSB_D_IRQ (1 << 2)
++#define PSB_D_FW (1 << 3)
++#define PSB_D_PERF (1 << 4)
++#define PSB_D_TMP (1 << 5)
++#define PSB_D_PM (1 << 6)
++
++extern int drm_psb_debug;
++extern int drm_psb_no_fb;
++extern int drm_psb_disable_vsync;
++extern int drm_idle_check_interval;
++extern int drm_psb_ospm;
++
++#define PSB_DEBUG_FW(_fmt, _arg...) \
++ PSB_DEBUG(PSB_D_FW, _fmt, ##_arg)
++#define PSB_DEBUG_GENERAL(_fmt, _arg...) \
++ PSB_DEBUG(PSB_D_GENERAL, _fmt, ##_arg)
++#define PSB_DEBUG_INIT(_fmt, _arg...) \
++ PSB_DEBUG(PSB_D_INIT, _fmt, ##_arg)
++#define PSB_DEBUG_IRQ(_fmt, _arg...) \
++ PSB_DEBUG(PSB_D_IRQ, _fmt, ##_arg)
++#define PSB_DEBUG_RENDER(_fmt, _arg...) \
++ PSB_DEBUG(PSB_D_RENDER, _fmt, ##_arg)
++#define PSB_DEBUG_PERF(_fmt, _arg...) \
++ PSB_DEBUG(PSB_D_PERF, _fmt, ##_arg)
++#define PSB_DEBUG_TMP(_fmt, _arg...) \
++ PSB_DEBUG(PSB_D_TMP, _fmt, ##_arg)
++#define PSB_DEBUG_PM(_fmt, _arg...) \
++ PSB_DEBUG(PSB_D_PM, _fmt, ##_arg)
++
++#if DRM_DEBUG_CODE
++#define PSB_DEBUG(_flag, _fmt, _arg...) \
++ do { \
++ if (unlikely((_flag) & drm_psb_debug)) \
++ printk(KERN_DEBUG \
++ "[psb:0x%02x:%s] " _fmt , _flag, \
++ __func__ , ##_arg); \
++ } while (0)
++#else
++#define PSB_DEBUG(_fmt, _arg...) do { } while (0)
++#endif
++
++#define IS_POULSBO(dev) (((dev)->pci_device == 0x8108) || \
++ ((dev)->pci_device == 0x8109))
++
++#define IS_MRST(dev) (((dev)->pci_device & 0xfffc) == 0x4100)
++
++#endif
+diff -uNr a/drivers/gpu/drm/psb/psb_fb.c b/drivers/gpu/drm/psb/psb_fb.c
+--- a/drivers/gpu/drm/psb/psb_fb.c 1969-12-31 16:00:00.000000000 -0800
++++ b/drivers/gpu/drm/psb/psb_fb.c 2009-04-07 13:28:38.000000000 -0700
+@@ -0,0 +1,1687 @@
++/**************************************************************************
++ * Copyright (c) 2007, Intel Corporation.
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
++ * develop this driver.
++ *
++ **************************************************************************/
++
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/string.h>
++#include <linux/mm.h>
++#include <linux/tty.h>
++#include <linux/slab.h>
++#include <linux/delay.h>
++#include <linux/fb.h>
++#include <linux/init.h>
++#include <linux/console.h>
++
++#include <drm/drmP.h>
++#include <drm/drm.h>
++#include <drm/drm_crtc.h>
++
++#include "psb_drv.h"
++#include "psb_intel_reg.h"
++#include "psb_intel_drv.h"
++#include "ttm/ttm_userobj_api.h"
++#include "psb_fb.h"
++#include "psb_sgx.h"
++
++static int fill_fb_bitfield(struct fb_var_screeninfo *var, int depth)
++{
++ switch (depth) {
++ case 8:
++ var->red.offset = 0;
++ var->green.offset = 0;
++ var->blue.offset = 0;
++ var->red.length = 8;
++ var->green.length = 8;
++ var->blue.length = 8;
++ var->transp.length = 0;
++ var->transp.offset = 0;
++ break;
++ case 15:
++ var->red.offset = 10;
++ var->green.offset = 5;
++ var->blue.offset = 0;
++ var->red.length = 5;
++ var->green.length = 5;
++ var->blue.length = 5;
++ var->transp.length = 1;
++ var->transp.offset = 15;
++ break;
++ case 16:
++ var->red.offset = 11;
++ var->green.offset = 5;
++ var->blue.offset = 0;
++ var->red.length = 5;
++ var->green.length = 6;
++ var->blue.length = 5;
++ var->transp.length = 0;
++ var->transp.offset = 0;
++ break;
++ case 24:
++ var->red.offset = 16;
++ var->green.offset = 8;
++ var->blue.offset = 0;
++ var->red.length = 8;
++ var->green.length = 8;
++ var->blue.length = 8;
++ var->transp.length = 0;
++ var->transp.offset = 0;
++ break;
++ case 32:
++ var->red.offset = 16;
++ var->green.offset = 8;
++ var->blue.offset = 0;
++ var->red.length = 8;
++ var->green.length = 8;
++ var->blue.length = 8;
++ var->transp.length = 8;
++ var->transp.offset = 24;
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++static void psb_user_framebuffer_destroy(struct drm_framebuffer *fb);
++static int psb_user_framebuffer_create_handle(struct drm_framebuffer *fb,
++ struct drm_file *file_priv,
++ unsigned int *handle);
++
++static const struct drm_framebuffer_funcs psb_fb_funcs = {
++ .destroy = psb_user_framebuffer_destroy,
++ .create_handle = psb_user_framebuffer_create_handle,
++};
++
++struct psbfb_par {
++ struct drm_device *dev;
++ struct psb_framebuffer *psbfb;
++
++ int dpms_state;
++
++ int crtc_count;
++ /* crtc currently bound to this */
++ uint32_t crtc_ids[2];
++};
++
++#define CMAP_TOHW(_val, _width) ((((_val) << (_width)) + 0x7FFF - (_val)) >> 16)
++
++static int psbfb_setcolreg(unsigned regno, unsigned red, unsigned green,
++ unsigned blue, unsigned transp,
++ struct fb_info *info)
++{
++ struct psbfb_par *par = info->par;
++ struct drm_framebuffer *fb = &par->psbfb->base;
++ uint32_t v;
++
++ if (!fb)
++ return -ENOMEM;
++
++ if (regno > 255)
++ return 1;
++
++#if 0 /* JB: not drop, check that this works */
++ if (fb->bits_per_pixel == 8) {
++ list_for_each_entry(crtc, &dev->mode_config.crtc_list,
++ head) {
++ for (i = 0; i < par->crtc_count; i++)
++ if (crtc->base.id == par->crtc_ids[i])
++ break;
++
++ if (i == par->crtc_count)
++ continue;
++
++ if (crtc->funcs->gamma_set)
++ crtc->funcs->gamma_set(crtc, red, green,
++ blue, regno);
++ }
++ return 0;
++ }
++#endif
++
++ red = CMAP_TOHW(red, info->var.red.length);
++ blue = CMAP_TOHW(blue, info->var.blue.length);
++ green = CMAP_TOHW(green, info->var.green.length);
++ transp = CMAP_TOHW(transp, info->var.transp.length);
++
++ v = (red << info->var.red.offset) |
++ (green << info->var.green.offset) |
++ (blue << info->var.blue.offset) |
++ (transp << info->var.transp.offset);
++
++ if (regno < 16) {
++ switch (fb->bits_per_pixel) {
++ case 16:
++ ((uint32_t *) info->pseudo_palette)[regno] = v;
++ break;
++ case 24:
++ case 32:
++ ((uint32_t *) info->pseudo_palette)[regno] = v;
++ break;
++ }
++ }
++
++ return 0;
++}
++
++static struct drm_display_mode *psbfb_find_first_mode(struct
++ fb_var_screeninfo
++ *var,
++ struct fb_info *info,
++ struct drm_crtc
++ *crtc)
++{
++ struct psbfb_par *par = info->par;
++ struct drm_device *dev = par->dev;
++ struct drm_display_mode *drm_mode;
++ struct drm_display_mode *last_mode = NULL;
++ struct drm_connector *connector;
++ int found;
++
++ found = 0;
++ list_for_each_entry(connector, &dev->mode_config.connector_list,
++ head) {
++ if (connector->encoder && connector->encoder->crtc == crtc) {
++ found = 1;
++ break;
++ }
++ }
++
++ /* found no connector, bail */
++ if (!found)
++ return NULL;
++
++ found = 0;
++ list_for_each_entry(drm_mode, &connector->modes, head) {
++ if (drm_mode->hdisplay == var->xres &&
++ drm_mode->vdisplay == var->yres
++ && drm_mode->clock != 0) {
++ found = 1;
++ last_mode = drm_mode;
++ }
++ }
++
++ /* No mode matching mode found */
++ if (!found)
++ return NULL;
++
++ return last_mode;
++}
++
++static int psbfb_check_var(struct fb_var_screeninfo *var,
++ struct fb_info *info)
++{
++ struct psbfb_par *par = info->par;
++ struct psb_framebuffer *psbfb = par->psbfb;
++ struct drm_device *dev = par->dev;
++ int ret;
++ int depth;
++ int pitch;
++ int bpp = var->bits_per_pixel;
++
++ if (!psbfb)
++ return -ENOMEM;
++
++ if (!var->pixclock)
++ return -EINVAL;
++
++ /* don't support virtuals for now */
++ if (var->xres_virtual > var->xres)
++ return -EINVAL;
++
++ if (var->yres_virtual > var->yres)
++ return -EINVAL;
++
++ switch (bpp) {
++#if 0 /* JB: for now only support true color */
++ case 8:
++ depth = 8;
++ break;
++#endif
++ case 16:
++ depth = (var->green.length == 6) ? 16 : 15;
++ break;
++ case 24: /* assume this is 32bpp / depth 24 */
++ bpp = 32;
++ /* fallthrough */
++ case 32:
++ depth = (var->transp.length > 0) ? 32 : 24;
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ pitch = ((var->xres * ((bpp + 1) / 8)) + 0x3f) & ~0x3f;
++
++ /* Check that we can resize */
++ if ((pitch * var->yres) > (psbfb->bo->num_pages << PAGE_SHIFT)) {
++#if 1
++ /* Need to resize the fb object.
++ * But the generic fbdev code doesn't really understand
++ * that we can do this. So disable for now.
++ */
++ DRM_INFO("Can't support requested size, too big!\n");
++ return -EINVAL;
++#else
++ struct drm_psb_private *dev_priv = psb_priv(dev);
++ struct ttm_bo_device *bdev = &dev_priv->bdev;
++ struct ttm_buffer_object *fbo = NULL;
++ struct ttm_bo_kmap_obj tmp_kmap;
++
++ /* a temporary BO to check if we could resize in setpar.
++ * Therefore no need to set NO_EVICT.
++ */
++ ret = ttm_buffer_object_create(bdev,
++ pitch * var->yres,
++ ttm_bo_type_kernel,
++ TTM_PL_FLAG_TT |
++ TTM_PL_FLAG_VRAM |
++ TTM_PL_FLAG_NO_EVICT,
++ 0, 0, &fbo);
++ if (ret || !fbo)
++ return -ENOMEM;
++
++ ret = ttm_bo_kmap(fbo, 0, fbo->num_pages, &tmp_kmap);
++ if (ret) {
++ ttm_bo_usage_deref_unlocked(&fbo);
++ return -EINVAL;
++ }
++
++ ttm_bo_kunmap(&tmp_kmap);
++ /* destroy our current fbo! */
++ ttm_bo_usage_deref_unlocked(&fbo);
++#endif
++ }
++
++ ret = fill_fb_bitfield(var, depth);
++ if (ret)
++ return ret;
++
++#if 1
++ /* Here we walk the output mode list and look for modes. If we haven't
++ * got it, then bail. Not very nice, so this is disabled.
++ * In the set_par code, we create our mode based on the incoming
++ * parameters. Nicer, but may not be desired by some.
++ */
++ {
++ struct drm_crtc *crtc;
++ int i;
++
++ list_for_each_entry(crtc, &dev->mode_config.crtc_list,
++ head) {
++ struct psb_intel_crtc *psb_intel_crtc =
++ to_psb_intel_crtc(crtc);
++
++ for (i = 0; i < par->crtc_count; i++)
++ if (crtc->base.id == par->crtc_ids[i])
++ break;
++
++ if (i == par->crtc_count)
++ continue;
++
++ if (psb_intel_crtc->mode_set.num_connectors == 0)
++ continue;
++
++ if (!psbfb_find_first_mode(&info->var, info, crtc))
++ return -EINVAL;
++ }
++ }
++#else
++ (void) i;
++ (void) dev; /* silence warnings */
++ (void) crtc;
++ (void) drm_mode;
++ (void) connector;
++#endif
++
++ return 0;
++}
++
++/* this will let fbcon do the mode init */
++static int psbfb_set_par(struct fb_info *info)
++{
++ struct psbfb_par *par = info->par;
++ struct psb_framebuffer *psbfb = par->psbfb;
++ struct drm_framebuffer *fb = &psbfb->base;
++ struct drm_device *dev = par->dev;
++ struct fb_var_screeninfo *var = &info->var;
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ struct drm_display_mode *drm_mode;
++ int pitch;
++ int depth;
++ int bpp = var->bits_per_pixel;
++
++ if (!fb)
++ return -ENOMEM;
++
++ switch (bpp) {
++ case 8:
++ depth = 8;
++ break;
++ case 16:
++ depth = (var->green.length == 6) ? 16 : 15;
++ break;
++ case 24: /* assume this is 32bpp / depth 24 */
++ bpp = 32;
++ /* fallthrough */
++ case 32:
++ depth = (var->transp.length > 0) ? 32 : 24;
++ break;
++ default:
++ DRM_ERROR("Illegal BPP\n");
++ return -EINVAL;
++ }
++
++ pitch = ((var->xres * ((bpp + 1) / 8)) + 0x3f) & ~0x3f;
++
++ if ((pitch * var->yres) > (psbfb->bo->num_pages << PAGE_SHIFT)) {
++#if 1
++ /* Need to resize the fb object.
++ * But the generic fbdev code doesn't really understand
++ * that we can do this. So disable for now.
++ */
++ DRM_INFO("Can't support requested size, too big!\n");
++ return -EINVAL;
++#else
++ int ret;
++ struct ttm_buffer_object *fbo = NULL, *tfbo;
++ struct ttm_bo_kmap_obj tmp_kmap, tkmap;
++
++ ret = ttm_buffer_object_create(bdev,
++ pitch * var->yres,
++ ttm_bo_type_kernel,
++ TTM_PL_FLAG_MEM_TT |
++ TTM_PL_FLAG_MEM_VRAM |
++ TTM_PL_FLAG_NO_EVICT,
++ 0, 0, &fbo);
++ if (ret || !fbo) {
++ DRM_ERROR
++ ("failed to allocate new resized framebuffer\n");
++ return -ENOMEM;
++ }
++
++ ret = ttm_bo_kmap(fbo, 0, fbo->num_pages, &tmp_kmap);
++ if (ret) {
++ DRM_ERROR("failed to kmap framebuffer.\n");
++ ttm_bo_usage_deref_unlocked(&fbo);
++ return -EINVAL;
++ }
++
++ DRM_DEBUG("allocated %dx%d fb: 0x%08lx, bo %p\n",
++ fb->width, fb->height, fb->offset, fbo);
++
++ /* set new screen base */
++ info->screen_base = tmp_kmap.virtual;
++
++ tkmap = fb->kmap;
++ fb->kmap = tmp_kmap;
++ ttm_bo_kunmap(&tkmap);
++
++ tfbo = fb->bo;
++ fb->bo = fbo;
++ ttm_bo_usage_deref_unlocked(&tfbo);
++#endif
++ }
++
++ psbfb->offset = psbfb->bo->offset - dev_priv->pg->gatt_start;
++ fb->width = var->xres;
++ fb->height = var->yres;
++ fb->bits_per_pixel = bpp;
++ fb->pitch = pitch;
++ fb->depth = depth;
++
++ info->fix.line_length = psbfb->base.pitch;
++ info->fix.visual =
++ (psbfb->base.depth ==
++ 8) ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_DIRECTCOLOR;
++
++ /* some fbdev's apps don't want these to change */
++ info->fix.smem_start = dev->mode_config.fb_base + psbfb->offset;
++
++#if 0
++ /* relates to resize - disable */
++ info->fix.smem_len = info->fix.line_length * var->yres;
++ info->screen_size = info->fix.smem_len; /* ??? */
++#endif
++
++ /* Should we walk the output's modelist or just create our own ???
++ * For now, we create and destroy a mode based on the incoming
++ * parameters. But there's commented out code below which scans
++ * the output list too.
++ */
++#if 1
++ /* This code is now in the for loop futher down. */
++#endif
++
++ {
++ struct drm_crtc *crtc;
++ int ret;
++ int i;
++
++ list_for_each_entry(crtc, &dev->mode_config.crtc_list,
++ head) {
++ struct psb_intel_crtc *psb_intel_crtc =
++ to_psb_intel_crtc(crtc);
++
++ for (i = 0; i < par->crtc_count; i++)
++ if (crtc->base.id == par->crtc_ids[i])
++ break;
++
++ if (i == par->crtc_count)
++ continue;
++
++ if (psb_intel_crtc->mode_set.num_connectors == 0)
++ continue;
++
++#if 1
++ drm_mode =
++ psbfb_find_first_mode(&info->var, info, crtc);
++ if (!drm_mode)
++ DRM_ERROR("No matching mode found\n");
++ psb_intel_crtc->mode_set.mode = drm_mode;
++#endif
++
++#if 0 /* FIXME: TH */
++ if (crtc->fb == psb_intel_crtc->mode_set.fb) {
++#endif
++ DRM_DEBUG
++ ("setting mode on crtc %p with id %u\n",
++ crtc, crtc->base.id);
++ ret =
++ crtc->funcs->
++ set_config(&psb_intel_crtc->mode_set);
++ if (ret) {
++ DRM_ERROR("Failed setting mode\n");
++ return ret;
++ }
++#if 0
++ }
++#endif
++ }
++ DRM_DEBUG("Set par returned OK.\n");
++ return 0;
++ }
++
++ return 0;
++}
++
++static int psbfb_2d_submit(struct drm_psb_private *dev_priv, uint32_t *cmdbuf,
++ unsigned size)
++{
++ int ret = 0;
++ int i;
++ unsigned submit_size;
++
++ while (size > 0) {
++ submit_size = (size < 0x60) ? size : 0x60;
++ size -= submit_size;
++ ret = psb_2d_wait_available(dev_priv, submit_size);
++ if (ret)
++ return ret;
++
++ submit_size <<= 2;
++ for (i = 0; i < submit_size; i += 4) {
++ PSB_WSGX32(*cmdbuf++, PSB_SGX_2D_SLAVE_PORT + i);
++ }
++ (void)PSB_RSGX32(PSB_SGX_2D_SLAVE_PORT + i - 4);
++ }
++ return 0;
++}
++
++static int psb_accel_2d_fillrect(struct drm_psb_private *dev_priv,
++ uint32_t dst_offset, uint32_t dst_stride,
++ uint32_t dst_format, uint16_t dst_x,
++ uint16_t dst_y, uint16_t size_x,
++ uint16_t size_y, uint32_t fill)
++{
++ uint32_t buffer[10];
++ uint32_t *buf;
++
++ buf = buffer;
++
++ *buf++ = PSB_2D_FENCE_BH;
++
++ *buf++ =
++ PSB_2D_DST_SURF_BH | dst_format | (dst_stride <<
++ PSB_2D_DST_STRIDE_SHIFT);
++ *buf++ = dst_offset;
++
++ *buf++ =
++ PSB_2D_BLIT_BH |
++ PSB_2D_ROT_NONE |
++ PSB_2D_COPYORDER_TL2BR |
++ PSB_2D_DSTCK_DISABLE |
++ PSB_2D_SRCCK_DISABLE | PSB_2D_USE_FILL | PSB_2D_ROP3_PATCOPY;
++
++ *buf++ = fill << PSB_2D_FILLCOLOUR_SHIFT;
++ *buf++ =
++ (dst_x << PSB_2D_DST_XSTART_SHIFT) | (dst_y <<
++ PSB_2D_DST_YSTART_SHIFT);
++ *buf++ =
++ (size_x << PSB_2D_DST_XSIZE_SHIFT) | (size_y <<
++ PSB_2D_DST_YSIZE_SHIFT);
++ *buf++ = PSB_2D_FLUSH_BH;
++
++ return psbfb_2d_submit(dev_priv, buffer, buf - buffer);
++}
++
++static void psbfb_fillrect_accel(struct fb_info *info,
++ const struct fb_fillrect *r)
++{
++ struct psbfb_par *par = info->par;
++ struct psb_framebuffer *psbfb = par->psbfb;
++ struct drm_framebuffer *fb = &psbfb->base;
++ struct drm_psb_private *dev_priv = par->dev->dev_private;
++ uint32_t offset;
++ uint32_t stride;
++ uint32_t format;
++
++ if (!fb)
++ return;
++
++ offset = psbfb->offset;
++ stride = fb->pitch;
++
++ switch (fb->depth) {
++ case 8:
++ format = PSB_2D_DST_332RGB;
++ break;
++ case 15:
++ format = PSB_2D_DST_555RGB;
++ break;
++ case 16:
++ format = PSB_2D_DST_565RGB;
++ break;
++ case 24:
++ case 32:
++ /* this is wrong but since we don't do blending its okay */
++ format = PSB_2D_DST_8888ARGB;
++ break;
++ default:
++ /* software fallback */
++ cfb_fillrect(info, r);
++ return;
++ }
++
++ psb_accel_2d_fillrect(dev_priv,
++ offset, stride, format,
++ r->dx, r->dy, r->width, r->height, r->color);
++}
++
++static void psbfb_fillrect(struct fb_info *info,
++ const struct fb_fillrect *rect)
++{
++ struct psbfb_par *par = info->par;
++ struct drm_device *dev = par->dev;
++ struct drm_psb_private *dev_priv = dev->dev_private;
++
++ if (unlikely(info->state != FBINFO_STATE_RUNNING))
++ return;
++
++ if (info->flags & FBINFO_HWACCEL_DISABLED)
++ return cfb_fillrect(info, rect);
++
++ if (psb_2d_trylock(dev_priv)) {
++ psb_check_power_state(dev, PSB_DEVICE_SGX);
++ psbfb_fillrect_accel(info, rect);
++ psb_2d_unlock(dev_priv);
++ if (drm_psb_ospm && IS_MRST(dev))
++ schedule_delayed_work(&dev_priv->scheduler.wq, 1);
++ } else
++ cfb_fillrect(info, rect);
++}
++
++uint32_t psb_accel_2d_copy_direction(int xdir, int ydir)
++{
++ if (xdir < 0)
++ return (ydir <
++ 0) ? PSB_2D_COPYORDER_BR2TL :
++ PSB_2D_COPYORDER_TR2BL;
++ else
++ return (ydir <
++ 0) ? PSB_2D_COPYORDER_BL2TR :
++ PSB_2D_COPYORDER_TL2BR;
++}
++
++/*
++ * @srcOffset in bytes
++ * @srcStride in bytes
++ * @srcFormat psb 2D format defines
++ * @dstOffset in bytes
++ * @dstStride in bytes
++ * @dstFormat psb 2D format defines
++ * @srcX offset in pixels
++ * @srcY offset in pixels
++ * @dstX offset in pixels
++ * @dstY offset in pixels
++ * @sizeX of the copied area
++ * @sizeY of the copied area
++ */
++static int psb_accel_2d_copy(struct drm_psb_private *dev_priv,
++ uint32_t src_offset, uint32_t src_stride,
++ uint32_t src_format, uint32_t dst_offset,
++ uint32_t dst_stride, uint32_t dst_format,
++ uint16_t src_x, uint16_t src_y,
++ uint16_t dst_x, uint16_t dst_y,
++ uint16_t size_x, uint16_t size_y)
++{
++ uint32_t blit_cmd;
++ uint32_t buffer[10];
++ uint32_t *buf;
++ uint32_t direction;
++
++ buf = buffer;
++
++ direction =
++ psb_accel_2d_copy_direction(src_x - dst_x, src_y - dst_y);
++
++ if (direction == PSB_2D_COPYORDER_BR2TL ||
++ direction == PSB_2D_COPYORDER_TR2BL) {
++ src_x += size_x - 1;
++ dst_x += size_x - 1;
++ }
++ if (direction == PSB_2D_COPYORDER_BR2TL ||
++ direction == PSB_2D_COPYORDER_BL2TR) {
++ src_y += size_y - 1;
++ dst_y += size_y - 1;
++ }
++
++ blit_cmd =
++ PSB_2D_BLIT_BH |
++ PSB_2D_ROT_NONE |
++ PSB_2D_DSTCK_DISABLE |
++ PSB_2D_SRCCK_DISABLE |
++ PSB_2D_USE_PAT | PSB_2D_ROP3_SRCCOPY | direction;
++
++ *buf++ = PSB_2D_FENCE_BH;
++ *buf++ =
++ PSB_2D_DST_SURF_BH | dst_format | (dst_stride <<
++ PSB_2D_DST_STRIDE_SHIFT);
++ *buf++ = dst_offset;
++ *buf++ =
++ PSB_2D_SRC_SURF_BH | src_format | (src_stride <<
++ PSB_2D_SRC_STRIDE_SHIFT);
++ *buf++ = src_offset;
++ *buf++ =
++ PSB_2D_SRC_OFF_BH | (src_x << PSB_2D_SRCOFF_XSTART_SHIFT) |
++ (src_y << PSB_2D_SRCOFF_YSTART_SHIFT);
++ *buf++ = blit_cmd;
++ *buf++ =
++ (dst_x << PSB_2D_DST_XSTART_SHIFT) | (dst_y <<
++ PSB_2D_DST_YSTART_SHIFT);
++ *buf++ =
++ (size_x << PSB_2D_DST_XSIZE_SHIFT) | (size_y <<
++ PSB_2D_DST_YSIZE_SHIFT);
++ *buf++ = PSB_2D_FLUSH_BH;
++
++ return psbfb_2d_submit(dev_priv, buffer, buf - buffer);
++}
++
++static void psbfb_copyarea_accel(struct fb_info *info,
++ const struct fb_copyarea *a)
++{
++ struct psbfb_par *par = info->par;
++ struct psb_framebuffer *psbfb = par->psbfb;
++ struct drm_framebuffer *fb = &psbfb->base;
++ struct drm_psb_private *dev_priv = par->dev->dev_private;
++ uint32_t offset;
++ uint32_t stride;
++ uint32_t src_format;
++ uint32_t dst_format;
++
++ if (!fb)
++ return;
++
++ offset = psbfb->offset;
++ stride = fb->pitch;
++
++ switch (fb->depth) {
++ case 8:
++ src_format = PSB_2D_SRC_332RGB;
++ dst_format = PSB_2D_DST_332RGB;
++ break;
++ case 15:
++ src_format = PSB_2D_SRC_555RGB;
++ dst_format = PSB_2D_DST_555RGB;
++ break;
++ case 16:
++ src_format = PSB_2D_SRC_565RGB;
++ dst_format = PSB_2D_DST_565RGB;
++ break;
++ case 24:
++ case 32:
++ /* this is wrong but since we don't do blending its okay */
++ src_format = PSB_2D_SRC_8888ARGB;
++ dst_format = PSB_2D_DST_8888ARGB;
++ break;
++ default:
++ /* software fallback */
++ cfb_copyarea(info, a);
++ return;
++ }
++
++ psb_accel_2d_copy(dev_priv,
++ offset, stride, src_format,
++ offset, stride, dst_format,
++ a->sx, a->sy, a->dx, a->dy, a->width, a->height);
++}
++
++static void psbfb_copyarea(struct fb_info *info,
++ const struct fb_copyarea *region)
++{
++ struct psbfb_par *par = info->par;
++ struct drm_device *dev = par->dev;
++ struct drm_psb_private *dev_priv = dev->dev_private;
++
++ if (unlikely(info->state != FBINFO_STATE_RUNNING))
++ return;
++
++ if (info->flags & FBINFO_HWACCEL_DISABLED)
++ return cfb_copyarea(info, region);
++
++ if (psb_2d_trylock(dev_priv)) {
++ psb_check_power_state(dev, PSB_DEVICE_SGX);
++ psbfb_copyarea_accel(info, region);
++ psb_2d_unlock(dev_priv);
++ if (drm_psb_ospm && IS_MRST(dev))
++ schedule_delayed_work(&dev_priv->scheduler.wq, 1);
++ } else
++ cfb_copyarea(info, region);
++}
++
++void psbfb_imageblit(struct fb_info *info, const struct fb_image *image)
++{
++ if (unlikely(info->state != FBINFO_STATE_RUNNING))
++ return;
++
++ cfb_imageblit(info, image);
++}
++
++static void psbfb_onoff(struct fb_info *info, int dpms_mode)
++{
++ struct psbfb_par *par = info->par;
++ struct drm_device *dev = par->dev;
++ struct drm_crtc *crtc;
++ struct drm_encoder *encoder;
++ int i;
++
++ /*
++ * For each CRTC in this fb, find all associated encoders
++ * and turn them off, then turn off the CRTC.
++ */
++ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
++ struct drm_crtc_helper_funcs *crtc_funcs =
++ crtc->helper_private;
++
++ for (i = 0; i < par->crtc_count; i++)
++ if (crtc->base.id == par->crtc_ids[i])
++ break;
++
++ if (i == par->crtc_count)
++ continue;
++
++ if (dpms_mode == DRM_MODE_DPMS_ON)
++ crtc_funcs->dpms(crtc, dpms_mode);
++
++ /* Found a CRTC on this fb, now find encoders */
++ list_for_each_entry(encoder,
++ &dev->mode_config.encoder_list, head) {
++ if (encoder->crtc == crtc) {
++ struct drm_encoder_helper_funcs
++ *encoder_funcs;
++ encoder_funcs = encoder->helper_private;
++ encoder_funcs->dpms(encoder, dpms_mode);
++ }
++ }
++
++ if (dpms_mode == DRM_MODE_DPMS_OFF)
++ crtc_funcs->dpms(crtc, dpms_mode);
++ }
++}
++
++static int psbfb_blank(int blank_mode, struct fb_info *info)
++{
++ struct psbfb_par *par = info->par;
++
++ par->dpms_state = blank_mode;
++ PSB_DEBUG_PM("psbfb_blank \n");
++ switch (blank_mode) {
++ case FB_BLANK_UNBLANK:
++ psbfb_onoff(info, DRM_MODE_DPMS_ON);
++ break;
++ case FB_BLANK_NORMAL:
++ psbfb_onoff(info, DRM_MODE_DPMS_STANDBY);
++ break;
++ case FB_BLANK_HSYNC_SUSPEND:
++ psbfb_onoff(info, DRM_MODE_DPMS_STANDBY);
++ break;
++ case FB_BLANK_VSYNC_SUSPEND:
++ psbfb_onoff(info, DRM_MODE_DPMS_SUSPEND);
++ break;
++ case FB_BLANK_POWERDOWN:
++ psbfb_onoff(info, DRM_MODE_DPMS_OFF);
++ break;
++ }
++
++ return 0;
++}
++
++
++static int psbfb_kms_off(struct drm_device *dev, int suspend)
++{
++ struct drm_framebuffer *fb = 0;
++ DRM_DEBUG("psbfb_kms_off_ioctl\n");
++
++ mutex_lock(&dev->mode_config.mutex);
++ list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
++ struct fb_info *info = fb->fbdev;
++
++ if (suspend)
++ fb_set_suspend(info, 1);
++ }
++ mutex_unlock(&dev->mode_config.mutex);
++
++ return 0;
++}
++
++int psbfb_kms_off_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ int ret;
++
++ if (drm_psb_no_fb)
++ return 0;
++ acquire_console_sem();
++ ret = psbfb_kms_off(dev, 0);
++ release_console_sem();
++
++ return ret;
++}
++
++static int psbfb_kms_on(struct drm_device *dev, int resume)
++{
++ struct drm_framebuffer *fb = 0;
++
++ DRM_DEBUG("psbfb_kms_on_ioctl\n");
++
++ mutex_lock(&dev->mode_config.mutex);
++ list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
++ struct fb_info *info = fb->fbdev;
++
++ if (resume)
++ fb_set_suspend(info, 0);
++
++ }
++ mutex_unlock(&dev->mode_config.mutex);
++
++ return 0;
++}
++
++int psbfb_kms_on_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ int ret;
++
++ if (drm_psb_no_fb)
++ return 0;
++ acquire_console_sem();
++ ret = psbfb_kms_on(dev, 0);
++ release_console_sem();
++ drm_helper_disable_unused_functions(dev);
++ return ret;
++}
++
++void psbfb_suspend(struct drm_device *dev)
++{
++ acquire_console_sem();
++ psbfb_kms_off(dev, 1);
++ release_console_sem();
++}
++
++void psbfb_resume(struct drm_device *dev)
++{
++ acquire_console_sem();
++ psbfb_kms_on(dev, 1);
++ release_console_sem();
++ drm_helper_disable_unused_functions(dev);
++}
++
++static int psbfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
++{
++ struct psbfb_par *par = info->par;
++ struct psb_framebuffer *psbfb = par->psbfb;
++ struct ttm_buffer_object *bo = psbfb->bo;
++ unsigned long size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
++ unsigned long offset = vma->vm_pgoff;
++
++ if (vma->vm_pgoff != 0)
++ return -EINVAL;
++ if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
++ return -EINVAL;
++ if (offset + size > bo->num_pages)
++ return -EINVAL;
++
++ mutex_lock(&bo->mutex);
++ if (!psbfb->addr_space)
++ psbfb->addr_space = vma->vm_file->f_mapping;
++ mutex_unlock(&bo->mutex);
++
++ return ttm_fbdev_mmap(vma, bo);
++}
++
++int psbfb_sync(struct fb_info *info)
++{
++ struct psbfb_par *par = info->par;
++ struct drm_psb_private *dev_priv = par->dev->dev_private;
++
++ if (psb_2d_trylock(dev_priv)) {
++ if (dev_priv->graphics_state == PSB_PWR_STATE_D0i0)
++ psb_idle_2d(par->dev);
++ psb_2d_unlock(dev_priv);
++ } else
++ udelay(5);
++
++ return 0;
++}
++
++static struct fb_ops psbfb_ops = {
++ .owner = THIS_MODULE,
++ .fb_check_var = psbfb_check_var,
++ .fb_set_par = psbfb_set_par,
++ .fb_setcolreg = psbfb_setcolreg,
++ .fb_fillrect = psbfb_fillrect,
++ .fb_copyarea = psbfb_copyarea,
++ .fb_imageblit = psbfb_imageblit,
++ .fb_mmap = psbfb_mmap,
++ .fb_sync = psbfb_sync,
++ .fb_blank = psbfb_blank,
++};
++
++static struct drm_mode_set panic_mode;
++
++int psbfb_panic(struct notifier_block *n, unsigned long ununsed,
++ void *panic_str)
++{
++ DRM_ERROR("panic occurred, switching back to text console\n");
++ drm_crtc_helper_set_config(&panic_mode);
++
++ return 0;
++}
++EXPORT_SYMBOL(psbfb_panic);
++
++static struct notifier_block paniced = {
++ .notifier_call = psbfb_panic,
++};
++
++
++static struct drm_framebuffer *psb_framebuffer_create
++ (struct drm_device *dev, struct drm_mode_fb_cmd *r,
++ void *mm_private)
++{
++ struct psb_framebuffer *fb;
++ int ret;
++
++ fb = kzalloc(sizeof(*fb), GFP_KERNEL);
++ if (!fb)
++ return NULL;
++
++ ret = drm_framebuffer_init(dev, &fb->base, &psb_fb_funcs);
++
++ if (ret)
++ goto err;
++
++ drm_helper_mode_fill_fb_struct(&fb->base, r);
++
++ fb->bo = mm_private;
++
++ return &fb->base;
++
++err:
++ kfree(fb);
++ return NULL;
++}
++
++static struct drm_framebuffer *psb_user_framebuffer_create
++ (struct drm_device *dev, struct drm_file *filp,
++ struct drm_mode_fb_cmd *r)
++{
++ struct ttm_buffer_object *bo = NULL;
++ uint64_t size;
++
++ bo = ttm_buffer_object_lookup(psb_fpriv(filp)->tfile, r->handle);
++ if (!bo)
++ return NULL;
++
++ /* JB: TODO not drop, make smarter */
++ size = ((uint64_t) bo->num_pages) << PAGE_SHIFT;
++ if (size < r->width * r->height * 4)
++ return NULL;
++
++ /* JB: TODO not drop, refcount buffer */
++ return psb_framebuffer_create(dev, r, bo);
++}
++
++int psbfb_create(struct drm_device *dev, uint32_t fb_width,
++ uint32_t fb_height, uint32_t surface_width,
++ uint32_t surface_height, struct psb_framebuffer **psbfb_p)
++{
++ struct fb_info *info;
++ struct psbfb_par *par;
++ struct drm_framebuffer *fb;
++ struct psb_framebuffer *psbfb;
++ struct ttm_bo_kmap_obj tmp_kmap;
++ struct drm_mode_fb_cmd mode_cmd;
++ struct device *device = &dev->pdev->dev;
++ struct ttm_bo_device *bdev = &psb_priv(dev)->bdev;
++ int size, aligned_size, ret;
++ struct ttm_buffer_object *fbo = NULL;
++ bool is_iomem;
++
++ mode_cmd.width = surface_width; /* crtc->desired_mode->hdisplay; */
++ mode_cmd.height = surface_height; /* crtc->desired_mode->vdisplay; */
++
++ mode_cmd.bpp = 32;
++ mode_cmd.pitch = mode_cmd.width * ((mode_cmd.bpp + 1) / 8);
++ mode_cmd.depth = 24;
++
++ size = mode_cmd.pitch * mode_cmd.height;
++ aligned_size = ALIGN(size, PAGE_SIZE);
++ ret = ttm_buffer_object_create(bdev,
++ aligned_size,
++ ttm_bo_type_kernel,
++ TTM_PL_FLAG_TT |
++ TTM_PL_FLAG_VRAM |
++ TTM_PL_FLAG_NO_EVICT,
++ 0, 0, 0, NULL, &fbo);
++
++ if (unlikely(ret != 0)) {
++ DRM_ERROR("failed to allocate framebuffer.\n");
++ return -ENOMEM;
++ }
++
++ mutex_lock(&dev->struct_mutex);
++ fb = psb_framebuffer_create(dev, &mode_cmd, fbo);
++ if (!fb) {
++ DRM_ERROR("failed to allocate fb.\n");
++ ret = -ENOMEM;
++ goto out_err0;
++ }
++ psbfb = to_psb_fb(fb);
++ psbfb->bo = fbo;
++
++ list_add(&fb->filp_head, &dev->mode_config.fb_kernel_list);
++ info = framebuffer_alloc(sizeof(struct psbfb_par), device);
++ if (!info) {
++ ret = -ENOMEM;
++ goto out_err1;
++ }
++
++ par = info->par;
++ par->psbfb = psbfb;
++
++ strcpy(info->fix.id, "psbfb");
++ info->fix.type = FB_TYPE_PACKED_PIXELS;
++ info->fix.visual = FB_VISUAL_TRUECOLOR;
++ info->fix.type_aux = 0;
++ info->fix.xpanstep = 1; /* doing it in hw */
++ info->fix.ypanstep = 1; /* doing it in hw */
++ info->fix.ywrapstep = 0;
++ info->fix.accel = FB_ACCEL_I830;
++ info->fix.type_aux = 0;
++
++ info->flags = FBINFO_DEFAULT;
++
++ info->fbops = &psbfb_ops;
++
++ info->fix.line_length = fb->pitch;
++ info->fix.smem_start =
++ dev->mode_config.fb_base + psbfb->bo->offset;
++ info->fix.smem_len = size;
++
++ info->flags = FBINFO_DEFAULT;
++
++ ret = ttm_bo_kmap(psbfb->bo, 0, psbfb->bo->num_pages, &tmp_kmap);
++ if (ret) {
++ DRM_ERROR("error mapping fb: %d\n", ret);
++ goto out_err2;
++ }
++
++
++ info->screen_base = ttm_kmap_obj_virtual(&tmp_kmap, &is_iomem);
++ info->screen_size = size;
++
++ if (is_iomem)
++ memset_io(info->screen_base, 0, size);
++ else
++ memset(info->screen_base, 0, size);
++
++ info->pseudo_palette = fb->pseudo_palette;
++ info->var.xres_virtual = fb->width;
++ info->var.yres_virtual = fb->height;
++ info->var.bits_per_pixel = fb->bits_per_pixel;
++ info->var.xoffset = 0;
++ info->var.yoffset = 0;
++ info->var.activate = FB_ACTIVATE_NOW;
++ info->var.height = -1;
++ info->var.width = -1;
++
++ info->var.xres = fb_width;
++ info->var.yres = fb_height;
++
++ info->fix.mmio_start = pci_resource_start(dev->pdev, 0);
++ info->fix.mmio_len = pci_resource_len(dev->pdev, 0);
++
++ info->pixmap.size = 64 * 1024;
++ info->pixmap.buf_align = 8;
++ info->pixmap.access_align = 32;
++ info->pixmap.flags = FB_PIXMAP_SYSTEM;
++ info->pixmap.scan_align = 1;
++
++ DRM_DEBUG("fb depth is %d\n", fb->depth);
++ DRM_DEBUG(" pitch is %d\n", fb->pitch);
++ fill_fb_bitfield(&info->var, fb->depth);
++
++ fb->fbdev = info;
++
++ par->dev = dev;
++
++ /* To allow resizing without swapping buffers */
++ printk(KERN_INFO"allocated %dx%d fb: 0x%08lx, bo %p\n",
++ psbfb->base.width,
++ psbfb->base.height, psbfb->bo->offset, psbfb->bo);
++
++ if (psbfb_p)
++ *psbfb_p = psbfb;
++
++ mutex_unlock(&dev->struct_mutex);
++
++ return 0;
++out_err2:
++ unregister_framebuffer(info);
++out_err1:
++ fb->funcs->destroy(fb);
++out_err0:
++ mutex_unlock(&dev->struct_mutex);
++ ttm_bo_unref(&fbo);
++ return ret;
++}
++
++static int psbfb_multi_fb_probe_crtc(struct drm_device *dev,
++ struct drm_crtc *crtc)
++{
++ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++ struct drm_framebuffer *fb = crtc->fb;
++ struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
++ struct drm_connector *connector;
++ struct fb_info *info;
++ struct psbfb_par *par;
++ struct drm_mode_set *modeset;
++ unsigned int width, height;
++ int new_fb = 0;
++ int ret, i, conn_count;
++
++ if (!drm_helper_crtc_in_use(crtc))
++ return 0;
++
++ if (!crtc->desired_mode)
++ return 0;
++
++ width = crtc->desired_mode->hdisplay;
++ height = crtc->desired_mode->vdisplay;
++
++ /* is there an fb bound to this crtc already */
++ if (!psb_intel_crtc->mode_set.fb) {
++ ret =
++ psbfb_create(dev, width, height, width, height,
++ &psbfb);
++ if (ret)
++ return -EINVAL;
++ new_fb = 1;
++ } else {
++ fb = psb_intel_crtc->mode_set.fb;
++ if ((fb->width < width) || (fb->height < height))
++ return -EINVAL;
++ }
++
++ info = fb->fbdev;
++ par = info->par;
++
++ modeset = &psb_intel_crtc->mode_set;
++ modeset->fb = fb;
++ conn_count = 0;
++ list_for_each_entry(connector, &dev->mode_config.connector_list,
++ head) {
++ if (connector->encoder)
++ if (connector->encoder->crtc == modeset->crtc) {
++ modeset->connectors[conn_count] =
++ connector;
++ conn_count++;
++ if (conn_count > INTELFB_CONN_LIMIT)
++ BUG();
++ }
++ }
++
++ for (i = conn_count; i < INTELFB_CONN_LIMIT; i++)
++ modeset->connectors[i] = NULL;
++
++ par->crtc_ids[0] = crtc->base.id;
++
++ modeset->num_connectors = conn_count;
++ if (modeset->mode != modeset->crtc->desired_mode)
++ modeset->mode = modeset->crtc->desired_mode;
++
++ par->crtc_count = 1;
++
++ if (new_fb) {
++ info->var.pixclock = -1;
++ if (register_framebuffer(info) < 0)
++ return -EINVAL;
++ } else
++ psbfb_set_par(info);
++
++ printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
++ info->fix.id);
++
++ /* Switch back to kernel console on panic */
++ panic_mode = *modeset;
++ atomic_notifier_chain_register(&panic_notifier_list, &paniced);
++ printk(KERN_INFO "registered panic notifier\n");
++
++ return 0;
++}
++
++static int psbfb_multi_fb_probe(struct drm_device *dev)
++{
++
++ struct drm_crtc *crtc;
++ int ret = 0;
++
++ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
++ ret = psbfb_multi_fb_probe_crtc(dev, crtc);
++ if (ret)
++ return ret;
++ }
++ return ret;
++}
++
++static int psbfb_single_fb_probe(struct drm_device *dev)
++{
++ struct drm_crtc *crtc;
++ struct drm_connector *connector;
++ unsigned int fb_width = (unsigned) -1, fb_height = (unsigned) -1;
++ unsigned int surface_width = 0, surface_height = 0;
++ int new_fb = 0;
++ int crtc_count = 0;
++ int ret, i, conn_count = 0;
++ struct fb_info *info;
++ struct psbfb_par *par;
++ struct drm_mode_set *modeset = NULL;
++ struct drm_framebuffer *fb = NULL;
++ struct psb_framebuffer *psbfb = NULL;
++
++ /* first up get a count of crtcs now in use and
++ * new min/maxes width/heights */
++ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
++ if (drm_helper_crtc_in_use(crtc)) {
++ if (crtc->desired_mode) {
++ fb = crtc->fb;
++ if (crtc->desired_mode->hdisplay <
++ fb_width)
++ fb_width =
++ crtc->desired_mode->hdisplay;
++
++ if (crtc->desired_mode->vdisplay <
++ fb_height)
++ fb_height =
++ crtc->desired_mode->vdisplay;
++
++ if (crtc->desired_mode->hdisplay >
++ surface_width)
++ surface_width =
++ crtc->desired_mode->hdisplay;
++
++ if (crtc->desired_mode->vdisplay >
++ surface_height)
++ surface_height =
++ crtc->desired_mode->vdisplay;
++
++ }
++ crtc_count++;
++ }
++ }
++
++ if (crtc_count == 0 || fb_width == -1 || fb_height == -1) {
++ /* hmm everyone went away - assume VGA cable just fell out
++ and will come back later. */
++ return 0;
++ }
++
++ /* do we have an fb already? */
++ if (list_empty(&dev->mode_config.fb_kernel_list)) {
++ /* create an fb if we don't have one */
++ ret =
++ psbfb_create(dev, fb_width, fb_height, surface_width,
++ surface_height, &psbfb);
++ if (ret)
++ return -EINVAL;
++ new_fb = 1;
++ fb = &psbfb->base;
++ } else {
++ fb = list_first_entry(&dev->mode_config.fb_kernel_list,
++ struct drm_framebuffer, filp_head);
++
++ /* if someone hotplugs something bigger than we have already
++ * allocated, we are pwned. As really we can't resize an
++ * fbdev that is in the wild currently due to fbdev not really
++ * being designed for the lower layers moving stuff around
++ * under it. - so in the grand style of things - punt. */
++ if ((fb->width < surface_width)
++ || (fb->height < surface_height)) {
++ DRM_ERROR
++ ("Framebuffer not large enough to scale"
++ " console onto.\n");
++ return -EINVAL;
++ }
++ }
++
++ info = fb->fbdev;
++ par = info->par;
++
++ crtc_count = 0;
++ /* okay we need to setup new connector sets in the crtcs */
++ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
++ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++ modeset = &psb_intel_crtc->mode_set;
++ modeset->fb = fb;
++ conn_count = 0;
++ list_for_each_entry(connector,
++ &dev->mode_config.connector_list,
++ head) {
++ if (connector->encoder)
++ if (connector->encoder->crtc ==
++ modeset->crtc) {
++ modeset->connectors[conn_count] =
++ connector;
++ conn_count++;
++ if (conn_count >
++ INTELFB_CONN_LIMIT)
++ BUG();
++ }
++ }
++
++ for (i = conn_count; i < INTELFB_CONN_LIMIT; i++)
++ modeset->connectors[i] = NULL;
++
++ par->crtc_ids[crtc_count++] = crtc->base.id;
++
++ modeset->num_connectors = conn_count;
++ if (modeset->mode != modeset->crtc->desired_mode)
++ modeset->mode = modeset->crtc->desired_mode;
++ }
++ par->crtc_count = crtc_count;
++
++ if (new_fb) {
++ info->var.pixclock = -1;
++ if (register_framebuffer(info) < 0)
++ return -EINVAL;
++ } else
++ psbfb_set_par(info);
++
++ printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
++ info->fix.id);
++
++ /* Switch back to kernel console on panic */
++ panic_mode = *modeset;
++ atomic_notifier_chain_register(&panic_notifier_list, &paniced);
++ printk(KERN_INFO "registered panic notifier\n");
++
++ return 0;
++}
++
++int psbfb_probe(struct drm_device *dev)
++{
++ int ret = 0;
++
++ DRM_DEBUG("\n");
++
++ /* something has changed in the lower levels of hell - deal with it
++ here */
++
++ /* two modes : a) 1 fb to rule all crtcs.
++ b) one fb per crtc.
++ two actions 1) new connected device
++ 2) device removed.
++ case a/1 : if the fb surface isn't big enough -
++ resize the surface fb.
++ if the fb size isn't big enough - resize fb into surface.
++ if everything big enough configure the new crtc/etc.
++ case a/2 : undo the configuration
++ possibly resize down the fb to fit the new configuration.
++ case b/1 : see if it is on a new crtc - setup a new fb and add it.
++ case b/2 : teardown the new fb.
++ */
++
++ /* mode a first */
++ /* search for an fb */
++ if (0 /*i915_fbpercrtc == 1 */)
++ ret = psbfb_multi_fb_probe(dev);
++ else
++ ret = psbfb_single_fb_probe(dev);
++
++ return ret;
++}
++EXPORT_SYMBOL(psbfb_probe);
++
++int psbfb_remove(struct drm_device *dev, struct drm_framebuffer *fb)
++{
++ struct fb_info *info;
++ struct psb_framebuffer *psbfb = to_psb_fb(fb);
++
++ if (drm_psb_no_fb)
++ return 0;
++
++ info = fb->fbdev;
++
++ if (info) {
++ unregister_framebuffer(info);
++ ttm_bo_kunmap(&psbfb->kmap);
++ ttm_bo_unref(&psbfb->bo);
++ framebuffer_release(info);
++ }
++
++ atomic_notifier_chain_unregister(&panic_notifier_list, &paniced);
++ memset(&panic_mode, 0, sizeof(struct drm_mode_set));
++ return 0;
++}
++EXPORT_SYMBOL(psbfb_remove);
++
++static int psb_user_framebuffer_create_handle(struct drm_framebuffer *fb,
++ struct drm_file *file_priv,
++ unsigned int *handle)
++{
++ /* JB: TODO currently we can't go from a bo to a handle with ttm */
++ (void) file_priv;
++ *handle = 0;
++ return 0;
++}
++
++static void psb_user_framebuffer_destroy(struct drm_framebuffer *fb)
++{
++ struct drm_device *dev = fb->dev;
++ if (fb->fbdev)
++ psbfb_remove(dev, fb);
++
++ /* JB: TODO not drop, refcount buffer */
++ drm_framebuffer_cleanup(fb);
++
++ kfree(fb);
++}
++
++static const struct drm_mode_config_funcs psb_mode_funcs = {
++ .fb_create = psb_user_framebuffer_create,
++ .fb_changed = psbfb_probe,
++};
++
++static void psb_setup_outputs(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++ struct drm_connector *connector;
++
++ if (IS_MRST(dev)) {
++ if (dev_priv->iLVDS_enable)
++ /* Set up integrated LVDS for MRST */
++ mrst_lvds_init(dev, &dev_priv->mode_dev);
++ else {
++ /* Set up integrated MIPI for MRST */
++ mrst_dsi_init(dev, &dev_priv->mode_dev);
++ }
++ } else {
++ psb_intel_lvds_init(dev, &dev_priv->mode_dev);
++ /* psb_intel_sdvo_init(dev, SDVOB); */
++ }
++
++ list_for_each_entry(connector, &dev->mode_config.connector_list,
++ head) {
++ struct psb_intel_output *psb_intel_output =
++ to_psb_intel_output(connector);
++ struct drm_encoder *encoder = &psb_intel_output->enc;
++ int crtc_mask = 0, clone_mask = 0;
++
++ /* valid crtcs */
++ switch (psb_intel_output->type) {
++ case INTEL_OUTPUT_SDVO:
++ crtc_mask = ((1 << 0) | (1 << 1));
++ clone_mask = (1 << INTEL_OUTPUT_SDVO);
++ break;
++ case INTEL_OUTPUT_LVDS:
++ if (IS_MRST(dev))
++ crtc_mask = (1 << 0);
++ else
++ crtc_mask = (1 << 1);
++
++ clone_mask = (1 << INTEL_OUTPUT_LVDS);
++ break;
++ case INTEL_OUTPUT_MIPI:
++ crtc_mask = (1 << 0);
++ clone_mask = (1 << INTEL_OUTPUT_MIPI);
++ break;
++ }
++ encoder->possible_crtcs = crtc_mask;
++ encoder->possible_clones =
++ psb_intel_connector_clones(dev, clone_mask);
++ }
++}
++
++static void *psb_bo_from_handle(struct drm_device *dev,
++ struct drm_file *file_priv,
++ unsigned int handle)
++{
++ return ttm_buffer_object_lookup(psb_fpriv(file_priv)->tfile,
++ handle);
++}
++
++static size_t psb_bo_size(struct drm_device *dev, void *bof)
++{
++ struct ttm_buffer_object *bo = bof;
++ return bo->num_pages << PAGE_SHIFT;
++}
++
++static size_t psb_bo_offset(struct drm_device *dev, void *bof)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++ struct ttm_buffer_object *bo = bof;
++
++ size_t offset = bo->offset - dev_priv->pg->gatt_start;
++ DRM_DEBUG("Offset %u\n", offset);
++ return offset;
++}
++
++static int psb_bo_pin_for_scanout(struct drm_device *dev, void *bo)
++{
++#if 0 /* JB: Not used for the drop */
++ struct ttm_buffer_object *bo = bof;
++ We should do things like check if
++ the buffer is in a scanout : able
++ place.And make sure that its pinned.
++#endif
++ return 0;
++ }
++
++ static int psb_bo_unpin_for_scanout(struct drm_device *dev,
++ void *bo) {
++#if 0 /* JB: Not used for the drop */
++ struct ttm_buffer_object *bo = bof;
++#endif
++ return 0;
++ }
++
++ void psb_modeset_init(struct drm_device *dev)
++ {
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++ struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
++ int i;
++ int num_pipe;
++
++ /* Init mm functions */
++ mode_dev->bo_from_handle = psb_bo_from_handle;
++ mode_dev->bo_size = psb_bo_size;
++ mode_dev->bo_offset = psb_bo_offset;
++ mode_dev->bo_pin_for_scanout = psb_bo_pin_for_scanout;
++ mode_dev->bo_unpin_for_scanout = psb_bo_unpin_for_scanout;
++
++ drm_mode_config_init(dev);
++
++ dev->mode_config.min_width = 0;
++ dev->mode_config.min_height = 0;
++
++ dev->mode_config.funcs = (void *) &psb_mode_funcs;
++
++ dev->mode_config.max_width = 2048;
++ dev->mode_config.max_height = 2048;
++
++ /* set memory base */
++ dev->mode_config.fb_base =
++ pci_resource_start(dev->pdev, 0);
++
++ if (IS_MRST(dev))
++ num_pipe = 1;
++ else
++ num_pipe = 2;
++
++
++ for (i = 0; i < num_pipe; i++)
++ psb_intel_crtc_init(dev, i, mode_dev);
++
++ psb_setup_outputs(dev);
++
++ /* setup fbs */
++ /* drm_initial_config(dev, false); */
++ }
++
++ void psb_modeset_cleanup(struct drm_device *dev)
++ {
++ drm_mode_config_cleanup(dev);
++ }
+diff -uNr a/drivers/gpu/drm/psb/psb_fb.h b/drivers/gpu/drm/psb/psb_fb.h
+--- a/drivers/gpu/drm/psb/psb_fb.h 1969-12-31 16:00:00.000000000 -0800
++++ b/drivers/gpu/drm/psb/psb_fb.h 2009-04-07 13:28:38.000000000 -0700
+@@ -0,0 +1,47 @@
++/*
++ * Copyright (c) 2008, Intel Corporation
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
++ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
++ * SOFTWARE.
++ *
++ * Authors:
++ * Eric Anholt <eric@anholt.net>
++ *
++ **/
++
++#ifndef _PSB_FB_H_
++#define _PSB_FB_H_
++
++struct psb_framebuffer {
++ struct drm_framebuffer base;
++ struct address_space *addr_space;
++ struct ttm_buffer_object *bo;
++ struct ttm_bo_kmap_obj kmap;
++ uint64_t offset;
++};
++
++#define to_psb_fb(x) container_of(x, struct psb_framebuffer, base)
++
++
++extern int psb_intel_connector_clones(struct drm_device *dev, int type_mask);
++
++extern int psb_2d_submit(struct drm_psb_private *, uint32_t *, uint32_t);
++
++#endif
++
+diff -uNr a/drivers/gpu/drm/psb/psb_fence.c b/drivers/gpu/drm/psb/psb_fence.c
+--- a/drivers/gpu/drm/psb/psb_fence.c 1969-12-31 16:00:00.000000000 -0800
++++ b/drivers/gpu/drm/psb/psb_fence.c 2009-04-07 13:28:38.000000000 -0700
+@@ -0,0 +1,343 @@
++/**************************************************************************
++ * Copyright (c) 2007, Intel Corporation.
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
++ * develop this driver.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ */
++
++#include <drm/drmP.h>
++#include "psb_drv.h"
++
++static void psb_print_ta_fence_status(struct ttm_fence_device *fdev)
++{
++ struct drm_psb_private *dev_priv =
++ container_of(fdev, struct drm_psb_private, fdev);
++ struct psb_scheduler_seq *seq = dev_priv->scheduler.seq;
++ int i;
++
++ for (i=0; i < _PSB_ENGINE_TA_FENCE_TYPES; ++i) {
++ DRM_INFO("Type 0x%02x, sequence %lu, reported %d\n",
++ (1 << i),
++ (unsigned long) seq->sequence,
++ seq->reported);
++ seq++;
++ }
++}
++
++static void psb_poll_ta(struct ttm_fence_device *fdev,
++ uint32_t waiting_types)
++{
++ struct drm_psb_private *dev_priv =
++ container_of(fdev, struct drm_psb_private, fdev);
++ uint32_t cur_flag = 1;
++ uint32_t flags = 0;
++ uint32_t sequence = 0;
++ uint32_t remaining = 0xFFFFFFFF;
++ uint32_t diff;
++
++ struct psb_scheduler *scheduler;
++ struct psb_scheduler_seq *seq;
++ struct ttm_fence_class_manager *fc =
++ &fdev->fence_class[PSB_ENGINE_TA];
++
++ scheduler = &dev_priv->scheduler;
++ seq = scheduler->seq;
++
++ while (likely(waiting_types & remaining)) {
++ if (!(waiting_types & cur_flag))
++ goto skip;
++ if (seq->reported)
++ goto skip;
++ if (flags == 0)
++ sequence = seq->sequence;
++ else if (sequence != seq->sequence) {
++ ttm_fence_handler(fdev, PSB_ENGINE_TA,
++ sequence, flags, 0);
++ sequence = seq->sequence;
++ flags = 0;
++ }
++ flags |= cur_flag;
++
++ /*
++ * Sequence may not have ended up on the ring yet.
++ * In that case, report it but don't mark it as
++ * reported. A subsequent poll will report it again.
++ */
++
++ diff = (fc->latest_queued_sequence - sequence) &
++ fc->sequence_mask;
++ if (diff < fc->wrap_diff)
++ seq->reported = 1;
++
++skip:
++ cur_flag <<= 1;
++ remaining <<= 1;
++ seq++;
++ }
++
++ if (flags)
++ ttm_fence_handler(fdev, PSB_ENGINE_TA, sequence, flags, 0);
++
++}
++
++static void psb_poll_other(struct ttm_fence_device *fdev,
++ uint32_t fence_class, uint32_t waiting_types)
++{
++ struct drm_psb_private *dev_priv =
++ container_of(fdev, struct drm_psb_private, fdev);
++ struct ttm_fence_class_manager *fc =
++ &fdev->fence_class[fence_class];
++ uint32_t sequence;
++
++ if (unlikely(!dev_priv))
++ return;
++
++ if (waiting_types) {
++ switch (fence_class) {
++ case PSB_ENGINE_VIDEO:
++ sequence = dev_priv->msvdx_current_sequence;
++ break;
++ case LNC_ENGINE_ENCODE:
++ sequence = dev_priv->topaz_current_sequence;
++ break;
++ default:
++ sequence = dev_priv->comm[fence_class << 4];
++ break;
++ }
++
++ ttm_fence_handler(fdev, fence_class, sequence,
++ _PSB_FENCE_TYPE_EXE, 0);
++
++ switch (fence_class) {
++ case PSB_ENGINE_2D:
++ if (dev_priv->fence0_irq_on && !fc->waiting_types) {
++ psb_2D_irq_off(dev_priv);
++ dev_priv->fence0_irq_on = 0;
++ } else if (!dev_priv->fence0_irq_on
++ && fc->waiting_types) {
++ psb_2D_irq_on(dev_priv);
++ dev_priv->fence0_irq_on = 1;
++ }
++ break;
++#if 0
++ /*
++ * FIXME: MSVDX irq switching
++ */
++
++ case PSB_ENGINE_VIDEO:
++ if (dev_priv->fence2_irq_on && !fc->waiting_types) {
++ psb_msvdx_irq_off(dev_priv);
++ dev_priv->fence2_irq_on = 0;
++ } else if (!dev_priv->fence2_irq_on
++ && fc->pending_exe_flush) {
++ psb_msvdx_irq_on(dev_priv);
++ dev_priv->fence2_irq_on = 1;
++ }
++ break;
++#endif
++ default:
++ return;
++ }
++ }
++}
++
++static void psb_fence_poll(struct ttm_fence_device *fdev,
++ uint32_t fence_class, uint32_t waiting_types)
++{
++ if (unlikely((PSB_D_PM & drm_psb_debug) && (fence_class == 0)))
++ PSB_DEBUG_PM("psb_fence_poll: %d\n", fence_class);
++ switch (fence_class) {
++ case PSB_ENGINE_TA:
++ psb_poll_ta(fdev, waiting_types);
++ break;
++ default:
++ psb_poll_other(fdev, fence_class, waiting_types);
++ break;
++ }
++}
++
++void psb_fence_error(struct drm_device *dev,
++ uint32_t fence_class,
++ uint32_t sequence, uint32_t type, int error)
++{
++ struct drm_psb_private *dev_priv = psb_priv(dev);
++ struct ttm_fence_device *fdev = &dev_priv->fdev;
++ unsigned long irq_flags;
++ struct ttm_fence_class_manager *fc =
++ &fdev->fence_class[fence_class];
++
++ BUG_ON(fence_class >= PSB_NUM_ENGINES);
++ write_lock_irqsave(&fc->lock, irq_flags);
++ ttm_fence_handler(fdev, fence_class, sequence, type, error);
++ write_unlock_irqrestore(&fc->lock, irq_flags);
++}
++
++int psb_fence_emit_sequence(struct ttm_fence_device *fdev,
++ uint32_t fence_class,
++ uint32_t flags, uint32_t *sequence,
++ unsigned long *timeout_jiffies)
++{
++ struct drm_psb_private *dev_priv =
++ container_of(fdev, struct drm_psb_private, fdev);
++ uint32_t seq = 0;
++ int ret;
++
++ if (!dev_priv)
++ return -EINVAL;
++
++ if (fence_class >= PSB_NUM_ENGINES)
++ return -EINVAL;
++
++ switch (fence_class) {
++ case PSB_ENGINE_2D:
++ spin_lock(&dev_priv->sequence_lock);
++ seq = ++dev_priv->sequence[fence_class];
++ spin_unlock(&dev_priv->sequence_lock);
++ ret = psb_blit_sequence(dev_priv, seq);
++ if (ret)
++ return ret;
++ break;
++ case PSB_ENGINE_VIDEO:
++ spin_lock(&dev_priv->sequence_lock);
++ seq = dev_priv->sequence[fence_class]++;
++ spin_unlock(&dev_priv->sequence_lock);
++ break;
++ case LNC_ENGINE_ENCODE:
++ spin_lock(&dev_priv->sequence_lock);
++ seq = dev_priv->sequence[fence_class]++;
++ spin_unlock(&dev_priv->sequence_lock);
++ break;
++ default:
++ spin_lock(&dev_priv->sequence_lock);
++ seq = dev_priv->sequence[fence_class];
++ spin_unlock(&dev_priv->sequence_lock);
++ }
++
++ *sequence = seq;
++
++ if (fence_class == PSB_ENGINE_TA)
++ *timeout_jiffies = jiffies + DRM_HZ / 2;
++ else
++ *timeout_jiffies = jiffies + DRM_HZ * 3;
++
++ return 0;
++}
++
++uint32_t psb_fence_advance_sequence(struct drm_device *dev,
++ uint32_t fence_class)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++ uint32_t sequence;
++
++ spin_lock(&dev_priv->sequence_lock);
++ sequence = ++dev_priv->sequence[fence_class];
++ spin_unlock(&dev_priv->sequence_lock);
++
++ return sequence;
++}
++
++static void psb_fence_lockup(struct ttm_fence_object *fence,
++ uint32_t fence_types)
++{
++ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
++
++ if (fence->fence_class == PSB_ENGINE_TA) {
++
++ /*
++ * The 3D engine has its own lockup detection.
++ * Just extend the fence expiry time.
++ */
++
++ DRM_INFO("Extending 3D fence timeout.\n");
++ write_lock(&fc->lock);
++
++ DRM_INFO("Sequence %lu, types 0x%08x signaled 0x%08x\n",
++ (unsigned long) fence->sequence, fence_types,
++ fence->info.signaled_types);
++
++ if (time_after_eq(jiffies, fence->timeout_jiffies))
++ fence->timeout_jiffies = jiffies + DRM_HZ / 2;
++
++ psb_print_ta_fence_status(fence->fdev);
++ write_unlock(&fc->lock);
++ } else {
++ DRM_ERROR
++ ("GPU timeout (probable lockup) detected on engine %u "
++ "fence type 0x%08x\n",
++ (unsigned int) fence->fence_class,
++ (unsigned int) fence_types);
++ write_lock(&fc->lock);
++ ttm_fence_handler(fence->fdev, fence->fence_class,
++ fence->sequence, fence_types, -EBUSY);
++ write_unlock(&fc->lock);
++ }
++}
++
++void psb_fence_handler(struct drm_device *dev, uint32_t fence_class)
++{
++ struct drm_psb_private *dev_priv = psb_priv(dev);
++ struct ttm_fence_device *fdev = &dev_priv->fdev;
++ struct ttm_fence_class_manager *fc =
++ &fdev->fence_class[fence_class];
++ unsigned long irq_flags;
++
++#ifdef FIX_TG_16
++ if (fence_class == PSB_ENGINE_2D) {
++
++ if ((atomic_read(&dev_priv->ta_wait_2d_irq) == 1) &&
++ (PSB_RSGX32(PSB_CR_2D_SOCIF) == _PSB_C2_SOCIF_EMPTY) &&
++ ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) &
++ _PSB_C2B_STATUS_BUSY) == 0))
++ psb_resume_ta_2d_idle(dev_priv);
++ }
++#endif
++ write_lock_irqsave(&fc->lock, irq_flags);
++ psb_fence_poll(fdev, fence_class, fc->waiting_types);
++ write_unlock_irqrestore(&fc->lock, irq_flags);
++}
++
++
++static struct ttm_fence_driver psb_ttm_fence_driver = {
++ .has_irq = NULL,
++ .emit = psb_fence_emit_sequence,
++ .flush = NULL,
++ .poll = psb_fence_poll,
++ .needed_flush = NULL,
++ .wait = NULL,
++ .signaled = NULL,
++ .lockup = psb_fence_lockup,
++};
++
++int psb_ttm_fence_device_init(struct ttm_fence_device *fdev)
++{
++ struct drm_psb_private *dev_priv =
++ container_of(fdev, struct drm_psb_private, fdev);
++ struct ttm_fence_class_init fci = {.wrap_diff = (1 << 30),
++ .flush_diff = (1 << 29),
++ .sequence_mask = 0xFFFFFFFF
++ };
++
++ return ttm_fence_device_init(PSB_NUM_ENGINES,
++ dev_priv->mem_global_ref.object,
++ fdev, &fci, 1,
++ &psb_ttm_fence_driver);
++}
+diff -uNr a/drivers/gpu/drm/psb/psb_gtt.c b/drivers/gpu/drm/psb/psb_gtt.c
+--- a/drivers/gpu/drm/psb/psb_gtt.c 1969-12-31 16:00:00.000000000 -0800
++++ b/drivers/gpu/drm/psb/psb_gtt.c 2009-04-07 13:28:38.000000000 -0700
+@@ -0,0 +1,257 @@
++/**************************************************************************
++ * Copyright (c) 2007, Intel Corporation.
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
++ * develop this driver.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
++ */
++#include <drm/drmP.h>
++#include "psb_drv.h"
++
++static inline uint32_t psb_gtt_mask_pte(uint32_t pfn, int type)
++{
++ uint32_t mask = PSB_PTE_VALID;
++
++ if (type & PSB_MMU_CACHED_MEMORY)
++ mask |= PSB_PTE_CACHED;
++ if (type & PSB_MMU_RO_MEMORY)
++ mask |= PSB_PTE_RO;
++ if (type & PSB_MMU_WO_MEMORY)
++ mask |= PSB_PTE_WO;
++
++ return (pfn << PAGE_SHIFT) | mask;
++}
++
++struct psb_gtt *psb_gtt_alloc(struct drm_device *dev)
++{
++ struct psb_gtt *tmp = drm_calloc(1, sizeof(*tmp), DRM_MEM_DRIVER);
++
++ if (!tmp)
++ return NULL;
++
++ init_rwsem(&tmp->sem);
++ tmp->dev = dev;
++
++ return tmp;
++}
++
++void psb_gtt_takedown(struct psb_gtt *pg, int free)
++{
++ struct drm_psb_private *dev_priv = pg->dev->dev_private;
++
++ if (!pg)
++ return;
++
++ if (pg->gtt_map) {
++ iounmap(pg->gtt_map);
++ pg->gtt_map = NULL;
++ }
++ if (pg->initialized) {
++ pci_write_config_word(pg->dev->pdev, PSB_GMCH_CTRL,
++ pg->gmch_ctrl);
++ PSB_WVDC32(pg->pge_ctl, PSB_PGETBL_CTL);
++ (void) PSB_RVDC32(PSB_PGETBL_CTL);
++ }
++ if (free)
++ drm_free(pg, sizeof(*pg), DRM_MEM_DRIVER);
++}
++
++int psb_gtt_init(struct psb_gtt *pg, int resume)
++{
++ struct drm_device *dev = pg->dev;
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ unsigned gtt_pages;
++ unsigned long stolen_size, vram_stolen_size, ci_stolen_size;
++ unsigned i, num_pages;
++ unsigned pfn_base;
++
++ int ret = 0;
++ uint32_t pte;
++
++ pci_read_config_word(dev->pdev, PSB_GMCH_CTRL, &pg->gmch_ctrl);
++ pci_write_config_word(dev->pdev, PSB_GMCH_CTRL,
++ pg->gmch_ctrl | _PSB_GMCH_ENABLED);
++
++ pg->pge_ctl = PSB_RVDC32(PSB_PGETBL_CTL);
++ PSB_WVDC32(pg->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL);
++ (void) PSB_RVDC32(PSB_PGETBL_CTL);
++
++ pg->initialized = 1;
++
++ pg->gtt_phys_start = pg->pge_ctl & PAGE_MASK;
++
++ pg->gatt_start = pci_resource_start(dev->pdev, PSB_GATT_RESOURCE);
++ pg->gtt_start = pci_resource_start(dev->pdev, PSB_GTT_RESOURCE);
++ gtt_pages =
++ pci_resource_len(dev->pdev, PSB_GTT_RESOURCE) >> PAGE_SHIFT;
++ pg->gatt_pages = pci_resource_len(dev->pdev, PSB_GATT_RESOURCE)
++ >> PAGE_SHIFT;
++
++ pci_read_config_dword(dev->pdev, PSB_BSM, &pg->stolen_base);
++ vram_stolen_size = pg->gtt_phys_start - pg->stolen_base - PAGE_SIZE;
++
++ ci_stolen_size = dev_priv->ci_region_size;
++ /* add CI & RAR share buffer space to stolen_size */
++ /* stolen_size = vram_stolen_size + ci_stolen_size; */
++ stolen_size = vram_stolen_size;
++
++ PSB_DEBUG_INIT("GTT phys start: 0x%08x.\n", pg->gtt_phys_start);
++ PSB_DEBUG_INIT("GTT start: 0x%08x.\n", pg->gtt_start);
++ PSB_DEBUG_INIT("GATT start: 0x%08x.\n", pg->gatt_start);
++ PSB_DEBUG_INIT("GTT pages: %u\n", gtt_pages);
++ PSB_DEBUG_INIT("Stolen size: %lu kiB\n", stolen_size / 1024);
++
++ if (resume && (gtt_pages != pg->gtt_pages) &&
++ (stolen_size != pg->stolen_size)) {
++ DRM_ERROR("GTT resume error.\n");
++ ret = -EINVAL;
++ goto out_err;
++ }
++
++ pg->gtt_pages = gtt_pages;
++ pg->stolen_size = stolen_size;
++ pg->vram_stolen_size = vram_stolen_size;
++ pg->ci_stolen_size = ci_stolen_size;
++ pg->gtt_map =
++ ioremap_nocache(pg->gtt_phys_start, gtt_pages << PAGE_SHIFT);
++ if (!pg->gtt_map) {
++ DRM_ERROR("Failure to map gtt.\n");
++ ret = -ENOMEM;
++ goto out_err;
++ }
++
++ /*
++ * insert vram stolen pages.
++ */
++
++ pfn_base = pg->stolen_base >> PAGE_SHIFT;
++ num_pages = vram_stolen_size >> PAGE_SHIFT;
++ PSB_DEBUG_INIT("Set up %d stolen pages starting at 0x%08x\n",
++ num_pages, pfn_base);
++ for (i = 0; i < num_pages; ++i) {
++ pte = psb_gtt_mask_pte(pfn_base + i, 0);
++ iowrite32(pte, pg->gtt_map + i);
++ }
++#if 0
++ /*
++ * insert CI stolen pages
++ */
++
++ pfn_base = dev_priv->ci_region_start >> PAGE_SHIFT;
++ num_pages = ci_stolen_size >> PAGE_SHIFT;
++ PSB_DEBUG_INIT("Set up %d stolen pages starting at 0x%08x\n",
++ num_pages, pfn_base);
++ for (; i < num_pages; ++i) {
++ pte = psb_gtt_mask_pte(pfn_base + i, 0);
++ iowrite32(pte, pg->gtt_map + i);
++ }
++#endif
++ /*
++ * Init rest of gtt.
++ */
++
++ pfn_base = page_to_pfn(dev_priv->scratch_page);
++ pte = psb_gtt_mask_pte(pfn_base, 0);
++ PSB_DEBUG_INIT("Initializing the rest of a total "
++ "of %d gtt pages.\n", pg->gatt_pages);
++
++ for (; i < pg->gatt_pages; ++i)
++ iowrite32(pte, pg->gtt_map + i);
++ (void) ioread32(pg->gtt_map + i - 1);
++
++ return 0;
++
++out_err:
++ psb_gtt_takedown(pg, 0);
++ return ret;
++}
++
++int psb_gtt_insert_pages(struct psb_gtt *pg, struct page **pages,
++ unsigned offset_pages, unsigned num_pages,
++ unsigned desired_tile_stride,
++ unsigned hw_tile_stride, int type)
++{
++ unsigned rows = 1;
++ unsigned add;
++ unsigned row_add;
++ unsigned i;
++ unsigned j;
++ uint32_t *cur_page = NULL;
++ uint32_t pte;
++
++ if (hw_tile_stride)
++ rows = num_pages / desired_tile_stride;
++ else
++ desired_tile_stride = num_pages;
++
++ add = desired_tile_stride;
++ row_add = hw_tile_stride;
++
++ down_read(&pg->sem);
++ for (i = 0; i < rows; ++i) {
++ cur_page = pg->gtt_map + offset_pages;
++ for (j = 0; j < desired_tile_stride; ++j) {
++ pte =
++ psb_gtt_mask_pte(page_to_pfn(*pages++), type);
++ iowrite32(pte, cur_page++);
++ }
++ offset_pages += add;
++ }
++ (void) ioread32(cur_page - 1);
++ up_read(&pg->sem);
++
++ return 0;
++}
++
++int psb_gtt_remove_pages(struct psb_gtt *pg, unsigned offset_pages,
++ unsigned num_pages, unsigned desired_tile_stride,
++ unsigned hw_tile_stride)
++{
++ struct drm_psb_private *dev_priv = pg->dev->dev_private;
++ unsigned rows = 1;
++ unsigned add;
++ unsigned row_add;
++ unsigned i;
++ unsigned j;
++ uint32_t *cur_page = NULL;
++ unsigned pfn_base = page_to_pfn(dev_priv->scratch_page);
++ uint32_t pte = psb_gtt_mask_pte(pfn_base, 0);
++
++ if (hw_tile_stride)
++ rows = num_pages / desired_tile_stride;
++ else
++ desired_tile_stride = num_pages;
++
++ add = desired_tile_stride;
++ row_add = hw_tile_stride;
++
++ down_read(&pg->sem);
++ for (i = 0; i < rows; ++i) {
++ cur_page = pg->gtt_map + offset_pages;
++ for (j = 0; j < desired_tile_stride; ++j)
++ iowrite32(pte, cur_page++);
++
++ offset_pages += add;
++ }
++ (void) ioread32(cur_page - 1);
++ up_read(&pg->sem);
++
++ return 0;
++}
+diff -uNr a/drivers/gpu/drm/psb/psb_intel_display.c b/drivers/gpu/drm/psb/psb_intel_display.c
+--- a/drivers/gpu/drm/psb/psb_intel_display.c 1969-12-31 16:00:00.000000000 -0800
++++ b/drivers/gpu/drm/psb/psb_intel_display.c 2009-04-07 13:28:38.000000000 -0700
+@@ -0,0 +1,2435 @@
++/*
++ * Copyright © 2006-2007 Intel Corporation
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ * Eric Anholt <eric@anholt.net>
++ */
++
++#include <linux/i2c.h>
++
++#include <drm/drm_crtc_helper.h>
++#include "psb_fb.h"
++#include "psb_intel_display.h"
++
++
++struct psb_intel_clock_t {
++ /* given values */
++ int n;
++ int m1, m2;
++ int p1, p2;
++ /* derived values */
++ int dot;
++ int vco;
++ int m;
++ int p;
++};
++
++struct psb_intel_range_t {
++ int min, max;
++};
++
++struct psb_intel_p2_t {
++ int dot_limit;
++ int p2_slow, p2_fast;
++};
++
++#define INTEL_P2_NUM 2
++
++struct psb_intel_limit_t {
++ struct psb_intel_range_t dot, vco, n, m, m1, m2, p, p1;
++ struct psb_intel_p2_t p2;
++};
++
++#define I8XX_DOT_MIN 25000
++#define I8XX_DOT_MAX 350000
++#define I8XX_VCO_MIN 930000
++#define I8XX_VCO_MAX 1400000
++#define I8XX_N_MIN 3
++#define I8XX_N_MAX 16
++#define I8XX_M_MIN 96
++#define I8XX_M_MAX 140
++#define I8XX_M1_MIN 18
++#define I8XX_M1_MAX 26
++#define I8XX_M2_MIN 6
++#define I8XX_M2_MAX 16
++#define I8XX_P_MIN 4
++#define I8XX_P_MAX 128
++#define I8XX_P1_MIN 2
++#define I8XX_P1_MAX 33
++#define I8XX_P1_LVDS_MIN 1
++#define I8XX_P1_LVDS_MAX 6
++#define I8XX_P2_SLOW 4
++#define I8XX_P2_FAST 2
++#define I8XX_P2_LVDS_SLOW 14
++#define I8XX_P2_LVDS_FAST 14 /* No fast option */
++#define I8XX_P2_SLOW_LIMIT 165000
++
++#define I9XX_DOT_MIN 20000
++#define I9XX_DOT_MAX 400000
++#define I9XX_VCO_MIN 1400000
++#define I9XX_VCO_MAX 2800000
++#define I9XX_N_MIN 3
++#define I9XX_N_MAX 8
++#define I9XX_M_MIN 70
++#define I9XX_M_MAX 120
++#define I9XX_M1_MIN 10
++#define I9XX_M1_MAX 20
++#define I9XX_M2_MIN 5
++#define I9XX_M2_MAX 9
++#define I9XX_P_SDVO_DAC_MIN 5
++#define I9XX_P_SDVO_DAC_MAX 80
++#define I9XX_P_LVDS_MIN 7
++#define I9XX_P_LVDS_MAX 98
++#define I9XX_P1_MIN 1
++#define I9XX_P1_MAX 8
++#define I9XX_P2_SDVO_DAC_SLOW 10
++#define I9XX_P2_SDVO_DAC_FAST 5
++#define I9XX_P2_SDVO_DAC_SLOW_LIMIT 200000
++#define I9XX_P2_LVDS_SLOW 14
++#define I9XX_P2_LVDS_FAST 7
++#define I9XX_P2_LVDS_SLOW_LIMIT 112000
++
++#define INTEL_LIMIT_I8XX_DVO_DAC 0
++#define INTEL_LIMIT_I8XX_LVDS 1
++#define INTEL_LIMIT_I9XX_SDVO_DAC 2
++#define INTEL_LIMIT_I9XX_LVDS 3
++
++static const struct psb_intel_limit_t psb_intel_limits[] = {
++ { /* INTEL_LIMIT_I8XX_DVO_DAC */
++ .dot = {.min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX},
++ .vco = {.min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX},
++ .n = {.min = I8XX_N_MIN, .max = I8XX_N_MAX},
++ .m = {.min = I8XX_M_MIN, .max = I8XX_M_MAX},
++ .m1 = {.min = I8XX_M1_MIN, .max = I8XX_M1_MAX},
++ .m2 = {.min = I8XX_M2_MIN, .max = I8XX_M2_MAX},
++ .p = {.min = I8XX_P_MIN, .max = I8XX_P_MAX},
++ .p1 = {.min = I8XX_P1_MIN, .max = I8XX_P1_MAX},
++ .p2 = {.dot_limit = I8XX_P2_SLOW_LIMIT,
++ .p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST},
++ },
++ { /* INTEL_LIMIT_I8XX_LVDS */
++ .dot = {.min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX},
++ .vco = {.min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX},
++ .n = {.min = I8XX_N_MIN, .max = I8XX_N_MAX},
++ .m = {.min = I8XX_M_MIN, .max = I8XX_M_MAX},
++ .m1 = {.min = I8XX_M1_MIN, .max = I8XX_M1_MAX},
++ .m2 = {.min = I8XX_M2_MIN, .max = I8XX_M2_MAX},
++ .p = {.min = I8XX_P_MIN, .max = I8XX_P_MAX},
++ .p1 = {.min = I8XX_P1_LVDS_MIN, .max = I8XX_P1_LVDS_MAX},
++ .p2 = {.dot_limit = I8XX_P2_SLOW_LIMIT,
++ .p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST},
++ },
++ { /* INTEL_LIMIT_I9XX_SDVO_DAC */
++ .dot = {.min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX},
++ .vco = {.min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX},
++ .n = {.min = I9XX_N_MIN, .max = I9XX_N_MAX},
++ .m = {.min = I9XX_M_MIN, .max = I9XX_M_MAX},
++ .m1 = {.min = I9XX_M1_MIN, .max = I9XX_M1_MAX},
++ .m2 = {.min = I9XX_M2_MIN, .max = I9XX_M2_MAX},
++ .p = {.min = I9XX_P_SDVO_DAC_MIN, .max = I9XX_P_SDVO_DAC_MAX},
++ .p1 = {.min = I9XX_P1_MIN, .max = I9XX_P1_MAX},
++ .p2 = {.dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
++ .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast =
++ I9XX_P2_SDVO_DAC_FAST},
++ },
++ { /* INTEL_LIMIT_I9XX_LVDS */
++ .dot = {.min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX},
++ .vco = {.min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX},
++ .n = {.min = I9XX_N_MIN, .max = I9XX_N_MAX},
++ .m = {.min = I9XX_M_MIN, .max = I9XX_M_MAX},
++ .m1 = {.min = I9XX_M1_MIN, .max = I9XX_M1_MAX},
++ .m2 = {.min = I9XX_M2_MIN, .max = I9XX_M2_MAX},
++ .p = {.min = I9XX_P_LVDS_MIN, .max = I9XX_P_LVDS_MAX},
++ .p1 = {.min = I9XX_P1_MIN, .max = I9XX_P1_MAX},
++ /* The single-channel range is 25-112Mhz, and dual-channel
++ * is 80-224Mhz. Prefer single channel as much as possible.
++ */
++ .p2 = {.dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
++ .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST},
++ },
++};
++
++static const struct psb_intel_limit_t *psb_intel_limit(struct drm_crtc *crtc)
++{
++ struct drm_device *dev = crtc->dev;
++ const struct psb_intel_limit_t *limit;
++
++ if (IS_I9XX(dev)) {
++ if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
++ limit = &psb_intel_limits[INTEL_LIMIT_I9XX_LVDS];
++ else
++ limit = &psb_intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC];
++ } else {
++ if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
++ limit = &psb_intel_limits[INTEL_LIMIT_I8XX_LVDS];
++ else
++ limit = &psb_intel_limits[INTEL_LIMIT_I8XX_DVO_DAC];
++ }
++ return limit;
++}
++
++/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */
++
++static void i8xx_clock(int refclk, struct psb_intel_clock_t *clock)
++{
++ clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
++ clock->p = clock->p1 * clock->p2;
++ clock->vco = refclk * clock->m / (clock->n + 2);
++ clock->dot = clock->vco / clock->p;
++}
++
++/** Derive the pixel clock for the given refclk and divisors for 9xx chips. */
++
++static void i9xx_clock(int refclk, struct psb_intel_clock_t *clock)
++{
++ clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
++ clock->p = clock->p1 * clock->p2;
++ clock->vco = refclk * clock->m / (clock->n + 2);
++ clock->dot = clock->vco / clock->p;
++}
++
++static void psb_intel_clock(struct drm_device *dev, int refclk,
++ struct psb_intel_clock_t *clock)
++{
++ if (IS_I9XX(dev))
++ return i9xx_clock(refclk, clock);
++ else
++ return i8xx_clock(refclk, clock);
++}
++
++/**
++ * Returns whether any output on the specified pipe is of the specified type
++ */
++bool psb_intel_pipe_has_type(struct drm_crtc *crtc, int type)
++{
++ struct drm_device *dev = crtc->dev;
++ struct drm_mode_config *mode_config = &dev->mode_config;
++ struct drm_connector *l_entry;
++
++ list_for_each_entry(l_entry, &mode_config->connector_list, head) {
++ if (l_entry->encoder && l_entry->encoder->crtc == crtc) {
++ struct psb_intel_output *psb_intel_output =
++ to_psb_intel_output(l_entry);
++ if (psb_intel_output->type == type)
++ return true;
++ }
++ }
++ return false;
++}
++
++#define INTELPllInvalid(s) { /* ErrorF (s) */; return false; }
++/**
++ * Returns whether the given set of divisors are valid for a given refclk with
++ * the given connectors.
++ */
++
++static bool psb_intel_PLL_is_valid(struct drm_crtc *crtc,
++ struct psb_intel_clock_t *clock)
++{
++ const struct psb_intel_limit_t *limit = psb_intel_limit(crtc);
++
++ if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
++ INTELPllInvalid("p1 out of range\n");
++ if (clock->p < limit->p.min || limit->p.max < clock->p)
++ INTELPllInvalid("p out of range\n");
++ if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
++ INTELPllInvalid("m2 out of range\n");
++ if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
++ INTELPllInvalid("m1 out of range\n");
++ if (clock->m1 <= clock->m2)
++ INTELPllInvalid("m1 <= m2\n");
++ if (clock->m < limit->m.min || limit->m.max < clock->m)
++ INTELPllInvalid("m out of range\n");
++ if (clock->n < limit->n.min || limit->n.max < clock->n)
++ INTELPllInvalid("n out of range\n");
++ if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
++ INTELPllInvalid("vco out of range\n");
++ /* XXX: We may need to be checking "Dot clock"
++ * depending on the multiplier, connector, etc.,
++ * rather than just a single range.
++ */
++ if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
++ INTELPllInvalid("dot out of range\n");
++
++ return true;
++}
++
++/**
++ * Returns a set of divisors for the desired target clock with the given
++ * refclk, or FALSE. The returned values represent the clock equation:
++ * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
++ */
++static bool psb_intel_find_best_PLL(struct drm_crtc *crtc, int target,
++ int refclk,
++ struct psb_intel_clock_t *best_clock)
++{
++ struct drm_device *dev = crtc->dev;
++ struct psb_intel_clock_t clock;
++ const struct psb_intel_limit_t *limit = psb_intel_limit(crtc);
++ int err = target;
++
++ if (IS_I9XX(dev) && psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
++ (REG_READ(LVDS) & LVDS_PORT_EN) != 0) {
++ /*
++ * For LVDS, if the panel is on, just rely on its current
++ * settings for dual-channel. We haven't figured out how to
++ * reliably set up different single/dual channel state, if we
++ * even can.
++ */
++ if ((REG_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
++ LVDS_CLKB_POWER_UP)
++ clock.p2 = limit->p2.p2_fast;
++ else
++ clock.p2 = limit->p2.p2_slow;
++ } else {
++ if (target < limit->p2.dot_limit)
++ clock.p2 = limit->p2.p2_slow;
++ else
++ clock.p2 = limit->p2.p2_fast;
++ }
++
++ memset(best_clock, 0, sizeof(*best_clock));
++
++ for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
++ clock.m1++) {
++ for (clock.m2 = limit->m2.min;
++ clock.m2 < clock.m1 && clock.m2 <= limit->m2.max;
++ clock.m2++) {
++ for (clock.n = limit->n.min;
++ clock.n <= limit->n.max; clock.n++) {
++ for (clock.p1 = limit->p1.min;
++ clock.p1 <= limit->p1.max;
++ clock.p1++) {
++ int this_err;
++
++ psb_intel_clock(dev, refclk, &clock);
++
++ if (!psb_intel_PLL_is_valid
++ (crtc, &clock))
++ continue;
++
++ this_err = abs(clock.dot - target);
++ if (this_err < err) {
++ *best_clock = clock;
++ err = this_err;
++ }
++ }
++ }
++ }
++ }
++
++ return err != target;
++}
++
++void psb_intel_wait_for_vblank(struct drm_device *dev)
++{
++ /* Wait for 20ms, i.e. one cycle at 50hz. */
++ udelay(20000);
++}
++
++int psb_intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, struct drm_framebuffer *old_fb)
++{
++ struct drm_device *dev = crtc->dev;
++ /* struct drm_i915_master_private *master_priv; */
++ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++ struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
++ struct psb_intel_mode_device *mode_dev = psb_intel_crtc->mode_dev;
++ int pipe = psb_intel_crtc->pipe;
++ unsigned long Start, Offset;
++ int dspbase = (pipe == 0 ? DSPABASE : DSPBBASE);
++ int dspsurf = (pipe == 0 ? DSPASURF : DSPBSURF);
++ int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE;
++ int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
++ u32 dspcntr;
++
++ /* no fb bound */
++ if (!crtc->fb) {
++ DRM_DEBUG("No FB bound\n");
++ return 0;
++ }
++
++ if (IS_MRST(dev) && (pipe == 0))
++ dspbase = MRST_DSPABASE;
++
++ Start = mode_dev->bo_offset(dev, psbfb->bo);
++ Offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8);
++
++ REG_WRITE(dspstride, crtc->fb->pitch);
++
++ dspcntr = REG_READ(dspcntr_reg);
++ switch (crtc->fb->bits_per_pixel) {
++ case 8:
++ dspcntr |= DISPPLANE_8BPP;
++ break;
++ case 16:
++ if (crtc->fb->depth == 15)
++ dspcntr |= DISPPLANE_15_16BPP;
++ else
++ dspcntr |= DISPPLANE_16BPP;
++ break;
++ case 24:
++ case 32:
++ dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
++ break;
++ default:
++ DRM_ERROR("Unknown color depth\n");
++ return -EINVAL;
++ }
++ REG_WRITE(dspcntr_reg, dspcntr);
++
++ DRM_DEBUG("Writing base %08lX %08lX %d %d\n", Start, Offset, x, y);
++ if (IS_I965G(dev) || IS_MRST(dev)) {
++ REG_WRITE(dspbase, Offset);
++ REG_READ(dspbase);
++ REG_WRITE(dspsurf, Start);
++ REG_READ(dspsurf);
++ } else {
++ REG_WRITE(dspbase, Start + Offset);
++ REG_READ(dspbase);
++ }
++
++ if (!dev->primary->master)
++ return 0;
++
++#if 0 /* JB: Enable sarea later */
++ master_priv = dev->primary->master->driver_priv;
++ if (!master_priv->sarea_priv)
++ return 0;
++
++ switch (pipe) {
++ case 0:
++ master_priv->sarea_priv->planeA_x = x;
++ master_priv->sarea_priv->planeA_y = y;
++ break;
++ case 1:
++ master_priv->sarea_priv->planeB_x = x;
++ master_priv->sarea_priv->planeB_y = y;
++ break;
++ default:
++ DRM_ERROR("Can't update pipe %d in SAREA\n", pipe);
++ break;
++ }
++#endif
++}
++
++
++
++/**
++ * Sets the power management mode of the pipe and plane.
++ *
++ * This code should probably grow support for turning the cursor off and back
++ * on appropriately at the same time as we're turning the pipe off/on.
++ */
++static void psb_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
++{
++ struct drm_device *dev = crtc->dev;
++ /* struct drm_i915_master_private *master_priv; */
++ /* struct drm_i915_private *dev_priv = dev->dev_private; */
++ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++ int pipe = psb_intel_crtc->pipe;
++ int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
++ int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
++ int dspbase_reg = (pipe == 0) ? DSPABASE : DSPBBASE;
++ int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
++ u32 temp;
++ bool enabled;
++
++ /* XXX: When our outputs are all unaware of DPMS modes other than off
++ * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
++ */
++ switch (mode) {
++ case DRM_MODE_DPMS_ON:
++ case DRM_MODE_DPMS_STANDBY:
++ case DRM_MODE_DPMS_SUSPEND:
++ /* Enable the DPLL */
++ temp = REG_READ(dpll_reg);
++ if ((temp & DPLL_VCO_ENABLE) == 0) {
++ REG_WRITE(dpll_reg, temp);
++ REG_READ(dpll_reg);
++ /* Wait for the clocks to stabilize. */
++ udelay(150);
++ REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
++ REG_READ(dpll_reg);
++ /* Wait for the clocks to stabilize. */
++ udelay(150);
++ REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
++ REG_READ(dpll_reg);
++ /* Wait for the clocks to stabilize. */
++ udelay(150);
++ }
++
++ /* Enable the pipe */
++ temp = REG_READ(pipeconf_reg);
++ if ((temp & PIPEACONF_ENABLE) == 0)
++ REG_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
++
++ /* Enable the plane */
++ temp = REG_READ(dspcntr_reg);
++ if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
++ REG_WRITE(dspcntr_reg,
++ temp | DISPLAY_PLANE_ENABLE);
++ /* Flush the plane changes */
++ REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
++ }
++
++ psb_intel_crtc_load_lut(crtc);
++
++ /* Give the overlay scaler a chance to enable
++ * if it's on this pipe */
++ /* psb_intel_crtc_dpms_video(crtc, true); TODO */
++ break;
++ case DRM_MODE_DPMS_OFF:
++ /* Give the overlay scaler a chance to disable
++ * if it's on this pipe */
++ /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
++
++ /* Disable the VGA plane that we never use */
++ REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
++
++ /* Disable display plane */
++ temp = REG_READ(dspcntr_reg);
++ if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
++ REG_WRITE(dspcntr_reg,
++ temp & ~DISPLAY_PLANE_ENABLE);
++ /* Flush the plane changes */
++ REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
++ REG_READ(dspbase_reg);
++ }
++
++ if (!IS_I9XX(dev)) {
++ /* Wait for vblank for the disable to take effect */
++ psb_intel_wait_for_vblank(dev);
++ }
++
++ /* Next, disable display pipes */
++ temp = REG_READ(pipeconf_reg);
++ if ((temp & PIPEACONF_ENABLE) != 0) {
++ REG_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
++ REG_READ(pipeconf_reg);
++ }
++
++ /* Wait for vblank for the disable to take effect. */
++ psb_intel_wait_for_vblank(dev);
++
++ temp = REG_READ(dpll_reg);
++ if ((temp & DPLL_VCO_ENABLE) != 0) {
++ REG_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE);
++ REG_READ(dpll_reg);
++ }
++
++ /* Wait for the clocks to turn off. */
++ udelay(150);
++ break;
++ }
++
++ enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
++
++#if 0 /* JB: Add vblank support later */
++ if (enabled)
++ dev_priv->vblank_pipe |= (1 << pipe);
++ else
++ dev_priv->vblank_pipe &= ~(1 << pipe);
++#endif
++
++ psb_intel_crtc->dpms_mode = mode;
++
++#if 0 /* JB: Add sarea support later */
++ if (!dev->primary->master)
++ return 0;
++
++ master_priv = dev->primary->master->driver_priv;
++ if (!master_priv->sarea_priv)
++ return 0;
++
++ switch (pipe) {
++ case 0:
++ master_priv->sarea_priv->planeA_w =
++ enabled ? crtc->mode.hdisplay : 0;
++ master_priv->sarea_priv->planeA_h =
++ enabled ? crtc->mode.vdisplay : 0;
++ break;
++ case 1:
++ master_priv->sarea_priv->planeB_w =
++ enabled ? crtc->mode.hdisplay : 0;
++ master_priv->sarea_priv->planeB_h =
++ enabled ? crtc->mode.vdisplay : 0;
++ break;
++ default:
++ DRM_ERROR("Can't update pipe %d in SAREA\n", pipe);
++ break;
++ }
++#endif
++}
++
++static void psb_intel_crtc_prepare(struct drm_crtc *crtc)
++{
++ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
++ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
++}
++
++static void psb_intel_crtc_commit(struct drm_crtc *crtc)
++{
++ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
++ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
++}
++
++void psb_intel_encoder_prepare(struct drm_encoder *encoder)
++{
++ struct drm_encoder_helper_funcs *encoder_funcs =
++ encoder->helper_private;
++ /* lvds has its own version of prepare see psb_intel_lvds_prepare */
++ encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
++}
++
++void psb_intel_encoder_commit(struct drm_encoder *encoder)
++{
++ struct drm_encoder_helper_funcs *encoder_funcs =
++ encoder->helper_private;
++ /* lvds has its own version of commit see psb_intel_lvds_commit */
++ encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
++}
++
++static bool psb_intel_crtc_mode_fixup(struct drm_crtc *crtc,
++ struct drm_display_mode *mode,
++ struct drm_display_mode *adjusted_mode)
++{
++ return true;
++}
++
++
++/** Returns the core display clock speed for i830 - i945 */
++static int psb_intel_get_core_clock_speed(struct drm_device *dev)
++{
++#if 0 /* JB: Look into this more */
++ /* Core clock values taken from the published datasheets.
++ * The 830 may go up to 166 Mhz, which we should check.
++ */
++ if (IS_I945G(dev))
++ return 400000;
++ else if (IS_I915G(dev))
++ return 333000;
++ else if (IS_I945GM(dev) || IS_845G(dev))
++ return 200000;
++ else if (IS_I915GM(dev)) {
++ u16 gcfgc = 0;
++
++ pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
++
++ if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
++ return 133000;
++ else {
++ switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
++ case GC_DISPLAY_CLOCK_333_MHZ:
++ return 333000;
++ default:
++ case GC_DISPLAY_CLOCK_190_200_MHZ:
++ return 190000;
++ }
++ }
++ } else if (IS_I865G(dev))
++ return 266000;
++ else if (IS_I855(dev)) {
++#if 0
++ PCITAG bridge = pciTag(0, 0, 0);
++ /* This is always the host bridge */
++ u16 hpllcc = pciReadWord(bridge, HPLLCC);
++
++#endif
++ u16 hpllcc = 0;
++ /* Assume that the hardware is in the high speed state. This
++ * should be the default.
++ */
++ switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
++ case GC_CLOCK_133_200:
++ case GC_CLOCK_100_200:
++ return 200000;
++ case GC_CLOCK_166_250:
++ return 250000;
++ case GC_CLOCK_100_133:
++ return 133000;
++ }
++ } else /* 852, 830 */
++ return 133000;
++#endif
++ return 0; /* Silence gcc warning */
++}
++
++
++/**
++ * Return the pipe currently connected to the panel fitter,
++ * or -1 if the panel fitter is not present or not in use
++ */
++static int psb_intel_panel_fitter_pipe(struct drm_device *dev)
++{
++ u32 pfit_control;
++
++ /* i830 doesn't have a panel fitter */
++ if (IS_I830(dev))
++ return -1;
++
++ pfit_control = REG_READ(PFIT_CONTROL);
++
++ /* See if the panel fitter is in use */
++ if ((pfit_control & PFIT_ENABLE) == 0)
++ return -1;
++
++ /* 965 can place panel fitter on either pipe */
++ if (IS_I965G(dev) || IS_MRST(dev))
++ return (pfit_control >> 29) & 0x3;
++
++ /* older chips can only use pipe 1 */
++ return 1;
++}
++
++static int psb_intel_crtc_mode_set(struct drm_crtc *crtc,
++ struct drm_display_mode *mode,
++ struct drm_display_mode *adjusted_mode,
++ int x, int y,
++ struct drm_framebuffer *old_fb)
++{
++ struct drm_device *dev = crtc->dev;
++ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++ int pipe = psb_intel_crtc->pipe;
++ int fp_reg = (pipe == 0) ? FPA0 : FPB0;
++ int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
++ int dpll_md_reg = (psb_intel_crtc->pipe == 0) ? DPLL_A_MD : DPLL_B_MD;
++ int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
++ int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
++ int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
++ int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
++ int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
++ int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
++ int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
++ int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
++ int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE;
++ int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS;
++ int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
++ int refclk;
++ struct psb_intel_clock_t clock;
++ u32 dpll = 0, fp = 0, dspcntr, pipeconf;
++ bool ok, is_sdvo = false, is_dvo = false;
++ bool is_crt = false, is_lvds = false, is_tv = false;
++ struct drm_mode_config *mode_config = &dev->mode_config;
++ struct drm_connector *connector;
++
++ list_for_each_entry(connector, &mode_config->connector_list, head) {
++ struct psb_intel_output *psb_intel_output =
++ to_psb_intel_output(connector);
++
++ if (!connector->encoder
++ || connector->encoder->crtc != crtc)
++ continue;
++
++ switch (psb_intel_output->type) {
++ case INTEL_OUTPUT_LVDS:
++ is_lvds = true;
++ break;
++ case INTEL_OUTPUT_SDVO:
++ is_sdvo = true;
++ break;
++ case INTEL_OUTPUT_DVO:
++ is_dvo = true;
++ break;
++ case INTEL_OUTPUT_TVOUT:
++ is_tv = true;
++ break;
++ case INTEL_OUTPUT_ANALOG:
++ is_crt = true;
++ break;
++ }
++ }
++
++ if (IS_I9XX(dev))
++ refclk = 96000;
++ else
++ refclk = 48000;
++
++ ok = psb_intel_find_best_PLL(crtc, adjusted_mode->clock, refclk,
++ &clock);
++ if (!ok) {
++ DRM_ERROR("Couldn't find PLL settings for mode!\n");
++ return 0;
++ }
++
++ fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
++
++ dpll = DPLL_VGA_MODE_DIS;
++ if (IS_I9XX(dev)) {
++ if (is_lvds) {
++ dpll |= DPLLB_MODE_LVDS;
++ if (IS_POULSBO(dev))
++ dpll |= DPLL_DVO_HIGH_SPEED;
++ } else
++ dpll |= DPLLB_MODE_DAC_SERIAL;
++ if (is_sdvo) {
++ dpll |= DPLL_DVO_HIGH_SPEED;
++ if (IS_I945G(dev) || IS_I945GM(dev)) {
++ int sdvo_pixel_multiply =
++ adjusted_mode->clock / mode->clock;
++ dpll |=
++ (sdvo_pixel_multiply -
++ 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
++ }
++ }
++
++ /* compute bitmask from p1 value */
++ dpll |= (1 << (clock.p1 - 1)) << 16;
++ switch (clock.p2) {
++ case 5:
++ dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
++ break;
++ case 7:
++ dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
++ break;
++ case 10:
++ dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
++ break;
++ case 14:
++ dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
++ break;
++ }
++ if (IS_I965G(dev))
++ dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
++ } else {
++ if (is_lvds) {
++ dpll |=
++ (1 << (clock.p1 - 1)) <<
++ DPLL_FPA01_P1_POST_DIV_SHIFT;
++ } else {
++ if (clock.p1 == 2)
++ dpll |= PLL_P1_DIVIDE_BY_TWO;
++ else
++ dpll |=
++ (clock.p1 -
++ 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
++ if (clock.p2 == 4)
++ dpll |= PLL_P2_DIVIDE_BY_4;
++ }
++ }
++
++ if (is_tv) {
++ /* XXX: just matching BIOS for now */
++/* dpll |= PLL_REF_INPUT_TVCLKINBC; */
++ dpll |= 3;
++ }
++#if 0
++ else if (is_lvds)
++ dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
++#endif
++ else
++ dpll |= PLL_REF_INPUT_DREFCLK;
++
++ /* setup pipeconf */
++ pipeconf = REG_READ(pipeconf_reg);
++
++ /* Set up the display plane register */
++ dspcntr = DISPPLANE_GAMMA_ENABLE;
++
++ if (pipe == 0)
++ dspcntr |= DISPPLANE_SEL_PIPE_A;
++ else
++ dspcntr |= DISPPLANE_SEL_PIPE_B;
++
++ if (pipe == 0 && !IS_I965G(dev)) {
++ /* Enable pixel doubling when the dot clock is > 90%
++ * of the (display) core speed.
++ *
++ * XXX: No double-wide on 915GM pipe B.
++ * Is that the only reason for the
++ * pipe == 0 check?
++ */
++ if (mode->clock > psb_intel_get_core_clock_speed(dev) * 9 / 10)
++ pipeconf |= PIPEACONF_DOUBLE_WIDE;
++ else
++ pipeconf &= ~PIPEACONF_DOUBLE_WIDE;
++ }
++
++ dspcntr |= DISPLAY_PLANE_ENABLE;
++ pipeconf |= PIPEACONF_ENABLE;
++ dpll |= DPLL_VCO_ENABLE;
++
++
++ /* Disable the panel fitter if it was on our pipe */
++ if (psb_intel_panel_fitter_pipe(dev) == pipe)
++ REG_WRITE(PFIT_CONTROL, 0);
++
++ DRM_DEBUG("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
++ drm_mode_debug_printmodeline(mode);
++
++#if 0
++ if (!xf86ModesEqual(mode, adjusted_mode)) {
++ xf86DrvMsg(pScrn->scrnIndex, X_INFO,
++ "Adjusted mode for pipe %c:\n",
++ pipe == 0 ? 'A' : 'B');
++ xf86PrintModeline(pScrn->scrnIndex, mode);
++ }
++ i830PrintPll("chosen", &clock);
++#endif
++
++ if (dpll & DPLL_VCO_ENABLE) {
++ REG_WRITE(fp_reg, fp);
++ REG_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
++ REG_READ(dpll_reg);
++ udelay(150);
++ }
++
++ /* The LVDS pin pair needs to be on before the DPLLs are enabled.
++ * This is an exception to the general rule that mode_set doesn't turn
++ * things on.
++ */
++ if (is_lvds) {
++ u32 lvds = REG_READ(LVDS);
++
++ lvds |=
++ LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP |
++ LVDS_PIPEB_SELECT;
++ /* Set the B0-B3 data pairs corresponding to
++ * whether we're going to
++ * set the DPLLs for dual-channel mode or not.
++ */
++ if (clock.p2 == 7)
++ lvds |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
++ else
++ lvds &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
++
++ /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
++ * appropriately here, but we need to look more
++ * thoroughly into how panels behave in the two modes.
++ */
++
++ REG_WRITE(LVDS, lvds);
++ REG_READ(LVDS);
++ }
++
++ REG_WRITE(fp_reg, fp);
++ REG_WRITE(dpll_reg, dpll);
++ REG_READ(dpll_reg);
++ /* Wait for the clocks to stabilize. */
++ udelay(150);
++
++ if (IS_I965G(dev)) {
++ int sdvo_pixel_multiply =
++ adjusted_mode->clock / mode->clock;
++ REG_WRITE(dpll_md_reg,
++ (0 << DPLL_MD_UDI_DIVIDER_SHIFT) |
++ ((sdvo_pixel_multiply -
++ 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT));
++ } else {
++ /* write it again -- the BIOS does, after all */
++ REG_WRITE(dpll_reg, dpll);
++ }
++ REG_READ(dpll_reg);
++ /* Wait for the clocks to stabilize. */
++ udelay(150);
++
++ REG_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
++ ((adjusted_mode->crtc_htotal - 1) << 16));
++ REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
++ ((adjusted_mode->crtc_hblank_end - 1) << 16));
++ REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
++ ((adjusted_mode->crtc_hsync_end - 1) << 16));
++ REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
++ ((adjusted_mode->crtc_vtotal - 1) << 16));
++ REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
++ ((adjusted_mode->crtc_vblank_end - 1) << 16));
++ REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
++ ((adjusted_mode->crtc_vsync_end - 1) << 16));
++ /* pipesrc and dspsize control the size that is scaled from,
++ * which should always be the user's requested size.
++ */
++ REG_WRITE(dspsize_reg,
++ ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
++ REG_WRITE(dsppos_reg, 0);
++ REG_WRITE(pipesrc_reg,
++ ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
++ REG_WRITE(pipeconf_reg, pipeconf);
++ REG_READ(pipeconf_reg);
++
++ psb_intel_wait_for_vblank(dev);
++
++ REG_WRITE(dspcntr_reg, dspcntr);
++
++ /* Flush the plane changes */
++ {
++ struct drm_crtc_helper_funcs *crtc_funcs =
++ crtc->helper_private;
++ crtc_funcs->mode_set_base(crtc, x, y, old_fb);
++ }
++
++ psb_intel_wait_for_vblank(dev);
++
++ return 0;
++}
++
++/** Loads the palette/gamma unit for the CRTC with the prepared values */
++void psb_intel_crtc_load_lut(struct drm_crtc *crtc)
++{
++ struct drm_device *dev = crtc->dev;
++ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++ int palreg = (psb_intel_crtc->pipe == 0) ? PALETTE_A : PALETTE_B;
++ int i;
++
++ /* The clocks have to be on to load the palette. */
++ if (!crtc->enabled)
++ return;
++
++ for (i = 0; i < 256; i++) {
++ REG_WRITE(palreg + 4 * i,
++ (psb_intel_crtc->lut_r[i] << 16) |
++ (psb_intel_crtc->lut_g[i] << 8) |
++ psb_intel_crtc->lut_b[i]);
++ }
++}
++
++static int psb_intel_crtc_cursor_set(struct drm_crtc *crtc,
++ struct drm_file *file_priv,
++ uint32_t handle,
++ uint32_t width, uint32_t height)
++{
++ struct drm_device *dev = crtc->dev;
++ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++ struct psb_intel_mode_device *mode_dev = psb_intel_crtc->mode_dev;
++ int pipe = psb_intel_crtc->pipe;
++ uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR;
++ uint32_t base = (pipe == 0) ? CURABASE : CURBBASE;
++ uint32_t temp;
++ size_t addr = 0;
++ size_t size;
++ void *bo;
++ int ret;
++
++ DRM_DEBUG("\n");
++
++ /* if we want to turn of the cursor ignore width and height */
++ if (!handle) {
++ DRM_DEBUG("cursor off\n");
++ /* turn of the cursor */
++ temp = 0;
++ temp |= CURSOR_MODE_DISABLE;
++
++ REG_WRITE(control, temp);
++ REG_WRITE(base, 0);
++
++ /* unpin the old bo */
++ if (psb_intel_crtc->cursor_bo) {
++ mode_dev->bo_unpin_for_scanout(dev,
++ psb_intel_crtc->
++ cursor_bo);
++ psb_intel_crtc->cursor_bo = NULL;
++ }
++
++ return 0;
++ }
++
++ /* Currently we only support 64x64 cursors */
++ if (width != 64 || height != 64) {
++ DRM_ERROR("we currently only support 64x64 cursors\n");
++ return -EINVAL;
++ }
++
++ bo = mode_dev->bo_from_handle(dev, file_priv, handle);
++ if (!bo)
++ return -ENOENT;
++
++ ret = mode_dev->bo_pin_for_scanout(dev, bo);
++ if (ret)
++ return ret;
++
++ size = mode_dev->bo_size(dev, bo);
++ if (size < width * height * 4) {
++ DRM_ERROR("buffer is to small\n");
++ return -ENOMEM;
++ }
++
++ addr = mode_dev->bo_size(dev, bo);
++ if (mode_dev->cursor_needs_physical)
++ addr = dev->agp->base + addr;
++
++ psb_intel_crtc->cursor_addr = addr;
++ temp = 0;
++ /* set the pipe for the cursor */
++ temp |= (pipe << 28);
++ temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
++
++ REG_WRITE(control, temp);
++ REG_WRITE(base, addr);
++
++ /* unpin the old bo */
++ if (psb_intel_crtc->cursor_bo && psb_intel_crtc->cursor_bo != bo) {
++ mode_dev->bo_unpin_for_scanout(dev, psb_intel_crtc->cursor_bo);
++ psb_intel_crtc->cursor_bo = bo;
++ }
++
++ return 0;
++}
++
++static int psb_intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
++{
++ struct drm_device *dev = crtc->dev;
++ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++ int pipe = psb_intel_crtc->pipe;
++ uint32_t temp = 0;
++ uint32_t adder;
++
++ if (x < 0) {
++ temp |= (CURSOR_POS_SIGN << CURSOR_X_SHIFT);
++ x = -x;
++ }
++ if (y < 0) {
++ temp |= (CURSOR_POS_SIGN << CURSOR_Y_SHIFT);
++ y = -y;
++ }
++
++ temp |= ((x & CURSOR_POS_MASK) << CURSOR_X_SHIFT);
++ temp |= ((y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT);
++
++ adder = psb_intel_crtc->cursor_addr;
++ REG_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp);
++ REG_WRITE((pipe == 0) ? CURABASE : CURBBASE, adder);
++
++ return 0;
++}
++
++/** Sets the color ramps on behalf of RandR */
++void psb_intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
++ u16 blue, int regno)
++{
++ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++
++ psb_intel_crtc->lut_r[regno] = red >> 8;
++ psb_intel_crtc->lut_g[regno] = green >> 8;
++ psb_intel_crtc->lut_b[regno] = blue >> 8;
++}
++
++static void psb_intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red,
++ u16 *green, u16 *blue, uint32_t size)
++{
++ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++ int i;
++
++ if (size != 256)
++ return;
++
++ for (i = 0; i < 256; i++) {
++ psb_intel_crtc->lut_r[i] = red[i] >> 8;
++ psb_intel_crtc->lut_g[i] = green[i] >> 8;
++ psb_intel_crtc->lut_b[i] = blue[i] >> 8;
++ }
++
++ psb_intel_crtc_load_lut(crtc);
++}
++
++/**
++ * Get a pipe with a simple mode set on it for doing load-based monitor
++ * detection.
++ *
++ * It will be up to the load-detect code to adjust the pipe as appropriate for
++ * its requirements. The pipe will be connected to no other outputs.
++ *
++ * Currently this code will only succeed if there is a pipe with no outputs
++ * configured for it. In the future, it could choose to temporarily disable
++ * some outputs to free up a pipe for its use.
++ *
++ * \return crtc, or NULL if no pipes are available.
++ */
++
++/* VESA 640x480x72Hz mode to set on the pipe */
++static struct drm_display_mode load_detect_mode = {
++ DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
++ 704, 832, 0, 480, 489, 491, 520, 0,
++ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
++};
++
++struct drm_crtc *psb_intel_get_load_detect_pipe(struct psb_intel_output
++ *psb_intel_output,
++ struct drm_display_mode *mode,
++ int *dpms_mode)
++{
++ struct psb_intel_crtc *psb_intel_crtc;
++ struct drm_crtc *possible_crtc;
++ struct drm_crtc *supported_crtc = NULL;
++ struct drm_encoder *encoder = &psb_intel_output->enc;
++ struct drm_crtc *crtc = NULL;
++ struct drm_device *dev = encoder->dev;
++ struct drm_encoder_helper_funcs *encoder_funcs =
++ encoder->helper_private;
++ struct drm_crtc_helper_funcs *crtc_funcs;
++ int i = -1;
++
++ /*
++ * Algorithm gets a little messy:
++ * - if the connector already has an assigned crtc, use it (but make
++ * sure it's on first)
++ * - try to find the first unused crtc that can drive this connector,
++ * and use that if we find one
++ * - if there are no unused crtcs available, try to use the first
++ * one we found that supports the connector
++ */
++
++ /* See if we already have a CRTC for this connector */
++ if (encoder->crtc) {
++ crtc = encoder->crtc;
++ /* Make sure the crtc and connector are running */
++ psb_intel_crtc = to_psb_intel_crtc(crtc);
++ *dpms_mode = psb_intel_crtc->dpms_mode;
++ if (psb_intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) {
++ crtc_funcs = crtc->helper_private;
++ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
++ encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
++ }
++ return crtc;
++ }
++
++ /* Find an unused one (if possible) */
++ list_for_each_entry(possible_crtc, &dev->mode_config.crtc_list,
++ head) {
++ i++;
++ if (!(encoder->possible_crtcs & (1 << i)))
++ continue;
++ if (!possible_crtc->enabled) {
++ crtc = possible_crtc;
++ break;
++ }
++ if (!supported_crtc)
++ supported_crtc = possible_crtc;
++ }
++
++ /*
++ * If we didn't find an unused CRTC, don't use any.
++ */
++ if (!crtc)
++ return NULL;
++
++ encoder->crtc = crtc;
++ psb_intel_output->load_detect_temp = true;
++
++ psb_intel_crtc = to_psb_intel_crtc(crtc);
++ *dpms_mode = psb_intel_crtc->dpms_mode;
++
++ if (!crtc->enabled) {
++ if (!mode)
++ mode = &load_detect_mode;
++ drm_crtc_helper_set_mode(crtc, mode, 0, 0, crtc->fb);
++ } else {
++ if (psb_intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) {
++ crtc_funcs = crtc->helper_private;
++ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
++ }
++
++ /* Add this connector to the crtc */
++ encoder_funcs->mode_set(encoder, &crtc->mode, &crtc->mode);
++ encoder_funcs->commit(encoder);
++ }
++ /* let the connector get through one full cycle before testing */
++ psb_intel_wait_for_vblank(dev);
++
++ return crtc;
++}
++
++void psb_intel_release_load_detect_pipe(struct psb_intel_output *psb_intel_output,
++ int dpms_mode)
++{
++ struct drm_encoder *encoder = &psb_intel_output->enc;
++ struct drm_device *dev = encoder->dev;
++ struct drm_crtc *crtc = encoder->crtc;
++ struct drm_encoder_helper_funcs *encoder_funcs =
++ encoder->helper_private;
++ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
++
++ if (psb_intel_output->load_detect_temp) {
++ encoder->crtc = NULL;
++ psb_intel_output->load_detect_temp = false;
++ crtc->enabled = drm_helper_crtc_in_use(crtc);
++ drm_helper_disable_unused_functions(dev);
++ }
++
++ /* Switch crtc and output back off if necessary */
++ if (crtc->enabled && dpms_mode != DRM_MODE_DPMS_ON) {
++ if (encoder->crtc == crtc)
++ encoder_funcs->dpms(encoder, dpms_mode);
++ crtc_funcs->dpms(crtc, dpms_mode);
++ }
++}
++
++/* Returns the clock of the currently programmed mode of the given pipe. */
++static int psb_intel_crtc_clock_get(struct drm_device *dev,
++ struct drm_crtc *crtc)
++{
++ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++ int pipe = psb_intel_crtc->pipe;
++ u32 dpll = REG_READ((pipe == 0) ? DPLL_A : DPLL_B);
++ u32 fp;
++ struct psb_intel_clock_t clock;
++
++ if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
++ fp = REG_READ((pipe == 0) ? FPA0 : FPB0);
++ else
++ fp = REG_READ((pipe == 0) ? FPA1 : FPB1);
++
++ clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
++ clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
++ clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
++ if (IS_I9XX(dev)) {
++ clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
++ DPLL_FPA01_P1_POST_DIV_SHIFT);
++
++ switch (dpll & DPLL_MODE_MASK) {
++ case DPLLB_MODE_DAC_SERIAL:
++ clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
++ 5 : 10;
++ break;
++ case DPLLB_MODE_LVDS:
++ clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
++ 7 : 14;
++ break;
++ default:
++ DRM_DEBUG("Unknown DPLL mode %08x in programmed "
++ "mode\n", (int) (dpll & DPLL_MODE_MASK));
++ return 0;
++ }
++
++ /* XXX: Handle the 100Mhz refclk */
++ i9xx_clock(96000, &clock);
++ } else {
++ bool is_lvds = (pipe == 1)
++ && (REG_READ(LVDS) & LVDS_PORT_EN);
++
++ if (is_lvds) {
++ clock.p1 =
++ ffs((dpll &
++ DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
++ DPLL_FPA01_P1_POST_DIV_SHIFT);
++ clock.p2 = 14;
++
++ if ((dpll & PLL_REF_INPUT_MASK) ==
++ PLLB_REF_INPUT_SPREADSPECTRUMIN) {
++ /* XXX: might not be 66MHz */
++ i8xx_clock(66000, &clock);
++ } else
++ i8xx_clock(48000, &clock);
++ } else {
++ if (dpll & PLL_P1_DIVIDE_BY_TWO)
++ clock.p1 = 2;
++ else {
++ clock.p1 =
++ ((dpll &
++ DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
++ DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
++ }
++ if (dpll & PLL_P2_DIVIDE_BY_4)
++ clock.p2 = 4;
++ else
++ clock.p2 = 2;
++
++ i8xx_clock(48000, &clock);
++ }
++ }
++
++ /* XXX: It would be nice to validate the clocks, but we can't reuse
++ * i830PllIsValid() because it relies on the xf86_config connector
++ * configuration being accurate, which it isn't necessarily.
++ */
++
++ return clock.dot;
++}
++
++/** Returns the currently programmed mode of the given pipe. */
++struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev,
++ struct drm_crtc *crtc)
++{
++ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++ int pipe = psb_intel_crtc->pipe;
++ struct drm_display_mode *mode;
++ int htot = REG_READ((pipe == 0) ? HTOTAL_A : HTOTAL_B);
++ int hsync = REG_READ((pipe == 0) ? HSYNC_A : HSYNC_B);
++ int vtot = REG_READ((pipe == 0) ? VTOTAL_A : VTOTAL_B);
++ int vsync = REG_READ((pipe == 0) ? VSYNC_A : VSYNC_B);
++
++ mode = kzalloc(sizeof(*mode), GFP_KERNEL);
++ if (!mode)
++ return NULL;
++
++ mode->clock = psb_intel_crtc_clock_get(dev, crtc);
++ mode->hdisplay = (htot & 0xffff) + 1;
++ mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
++ mode->hsync_start = (hsync & 0xffff) + 1;
++ mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
++ mode->vdisplay = (vtot & 0xffff) + 1;
++ mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
++ mode->vsync_start = (vsync & 0xffff) + 1;
++ mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
++
++ drm_mode_set_name(mode);
++ drm_mode_set_crtcinfo(mode, 0);
++
++ return mode;
++}
++
++static void psb_intel_crtc_destroy(struct drm_crtc *crtc)
++{
++ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++
++ drm_crtc_cleanup(crtc);
++ kfree(psb_intel_crtc);
++}
++
++static const struct drm_crtc_helper_funcs psb_intel_helper_funcs = {
++ .dpms = psb_intel_crtc_dpms,
++ .mode_fixup = psb_intel_crtc_mode_fixup,
++ .mode_set = psb_intel_crtc_mode_set,
++ .mode_set_base = psb_intel_pipe_set_base,
++ .prepare = psb_intel_crtc_prepare,
++ .commit = psb_intel_crtc_commit,
++};
++
++static const struct drm_crtc_helper_funcs mrst_helper_funcs;
++
++const struct drm_crtc_funcs psb_intel_crtc_funcs = {
++ .cursor_set = psb_intel_crtc_cursor_set,
++ .cursor_move = psb_intel_crtc_cursor_move,
++ .gamma_set = psb_intel_crtc_gamma_set,
++ .set_config = drm_crtc_helper_set_config,
++ .destroy = psb_intel_crtc_destroy,
++};
++
++
++void psb_intel_crtc_init(struct drm_device *dev, int pipe,
++ struct psb_intel_mode_device *mode_dev)
++{
++ struct psb_intel_crtc *psb_intel_crtc;
++ int i;
++
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 enter psb_intel_crtc_init \n");
++#endif /* PRINT_JLIU7 */
++
++ /* We allocate a extra array of drm_connector pointers
++ * for fbdev after the crtc */
++ psb_intel_crtc =
++ kzalloc(sizeof(struct psb_intel_crtc) +
++ (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)),
++ GFP_KERNEL);
++ if (psb_intel_crtc == NULL)
++ return;
++
++ drm_crtc_init(dev, &psb_intel_crtc->base, &psb_intel_crtc_funcs);
++
++ drm_mode_crtc_set_gamma_size(&psb_intel_crtc->base, 256);
++ psb_intel_crtc->pipe = pipe;
++ for (i = 0; i < 256; i++) {
++ psb_intel_crtc->lut_r[i] = i;
++ psb_intel_crtc->lut_g[i] = i;
++ psb_intel_crtc->lut_b[i] = i;
++ }
++
++ psb_intel_crtc->mode_dev = mode_dev;
++ psb_intel_crtc->cursor_addr = 0;
++ psb_intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF;
++
++ if (IS_MRST(dev)) {
++ drm_crtc_helper_add(&psb_intel_crtc->base, &mrst_helper_funcs);
++ } else {
++ drm_crtc_helper_add(&psb_intel_crtc->base,
++ &psb_intel_helper_funcs);
++ }
++
++ /* Setup the array of drm_connector pointer array */
++ psb_intel_crtc->mode_set.crtc = &psb_intel_crtc->base;
++ psb_intel_crtc->mode_set.connectors =
++ (struct drm_connector **) (psb_intel_crtc + 1);
++ psb_intel_crtc->mode_set.num_connectors = 0;
++
++#if 0 /* JB: not drop, What should go in here? */
++ if (i915_fbpercrtc)
++#endif
++}
++
++struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev, int pipe)
++{
++ struct drm_crtc *crtc = NULL;
++
++ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
++ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++ if (psb_intel_crtc->pipe == pipe)
++ break;
++ }
++ return crtc;
++}
++
++int psb_intel_connector_clones(struct drm_device *dev, int type_mask)
++{
++ int index_mask = 0;
++ struct drm_connector *connector;
++ int entry = 0;
++
++ list_for_each_entry(connector, &dev->mode_config.connector_list,
++ head) {
++ struct psb_intel_output *psb_intel_output =
++ to_psb_intel_output(connector);
++ if (type_mask & (1 << psb_intel_output->type))
++ index_mask |= (1 << entry);
++ entry++;
++ }
++ return index_mask;
++}
++
++#if 0 /* JB: Should be per device */
++static void psb_intel_setup_outputs(struct drm_device *dev)
++{
++ struct drm_connector *connector;
++
++ psb_intel_crt_init(dev);
++
++ /* Set up integrated LVDS */
++ if (IS_MOBILE(dev) && !IS_I830(dev))
++ psb_intel_lvds_init(dev);
++
++ if (IS_I9XX(dev)) {
++ psb_intel_sdvo_init(dev, SDVOB);
++ psb_intel_sdvo_init(dev, SDVOC);
++ } else
++ psb_intel_dvo_init(dev);
++
++ if (IS_I9XX(dev) && !IS_I915G(dev))
++ psb_intel_tv_init(dev);
++
++ list_for_each_entry(connector, &dev->mode_config.connector_list,
++ head) {
++ struct psb_intel_output *psb_intel_output =
++ to_psb_intel_output(connector);
++ struct drm_encoder *encoder = &psb_intel_output->enc;
++ int crtc_mask = 0, clone_mask = 0;
++
++ /* valid crtcs */
++ switch (psb_intel_output->type) {
++ case INTEL_OUTPUT_DVO:
++ case INTEL_OUTPUT_SDVO:
++ crtc_mask = ((1 << 0) | (1 << 1));
++ clone_mask = ((1 << INTEL_OUTPUT_ANALOG) |
++ (1 << INTEL_OUTPUT_DVO) |
++ (1 << INTEL_OUTPUT_SDVO));
++ break;
++ case INTEL_OUTPUT_ANALOG:
++ crtc_mask = ((1 << 0) | (1 << 1));
++ clone_mask = ((1 << INTEL_OUTPUT_ANALOG) |
++ (1 << INTEL_OUTPUT_DVO) |
++ (1 << INTEL_OUTPUT_SDVO));
++ break;
++ case INTEL_OUTPUT_LVDS:
++ crtc_mask = (1 << 1);
++ clone_mask = (1 << INTEL_OUTPUT_LVDS);
++ break;
++ case INTEL_OUTPUT_TVOUT:
++ crtc_mask = ((1 << 0) | (1 << 1));
++ clone_mask = (1 << INTEL_OUTPUT_TVOUT);
++ break;
++ }
++ encoder->possible_crtcs = crtc_mask;
++ encoder->possible_clones =
++ psb_intel_connector_clones(dev, clone_mask);
++ }
++}
++#endif
++
++#if 0 /* JB: Rework framebuffer code into something none device specific */
++static void psb_intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
++{
++ struct psb_intel_framebuffer *psb_intel_fb = to_psb_intel_framebuffer(fb);
++ struct drm_device *dev = fb->dev;
++
++ if (fb->fbdev)
++ intelfb_remove(dev, fb);
++
++ drm_framebuffer_cleanup(fb);
++ drm_gem_object_unreference(fb->mm_private);
++
++ kfree(psb_intel_fb);
++}
++
++static int psb_intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
++ struct drm_file *file_priv,
++ unsigned int *handle)
++{
++ struct drm_gem_object *object = fb->mm_private;
++
++ return drm_gem_handle_create(file_priv, object, handle);
++}
++
++static const struct drm_framebuffer_funcs psb_intel_fb_funcs = {
++ .destroy = psb_intel_user_framebuffer_destroy,
++ .create_handle = psb_intel_user_framebuffer_create_handle,
++};
++
++struct drm_framebuffer *psb_intel_framebuffer_create(struct drm_device *dev,
++ struct drm_mode_fb_cmd
++ *mode_cmd,
++ void *mm_private)
++{
++ struct psb_intel_framebuffer *psb_intel_fb;
++
++ psb_intel_fb = kzalloc(sizeof(*psb_intel_fb), GFP_KERNEL);
++ if (!psb_intel_fb)
++ return NULL;
++
++ if (!drm_framebuffer_init(dev, &psb_intel_fb->base, &psb_intel_fb_funcs))
++ return NULL;
++
++ drm_helper_mode_fill_fb_struct(&psb_intel_fb->base, mode_cmd);
++
++ return &psb_intel_fb->base;
++}
++
++
++static struct drm_framebuffer *psb_intel_user_framebuffer_create(struct
++ drm_device
++ *dev,
++ struct
++ drm_file
++ *filp,
++ struct
++ drm_mode_fb_cmd
++ *mode_cmd)
++{
++ struct drm_gem_object *obj;
++
++ obj = drm_gem_object_lookup(dev, filp, mode_cmd->handle);
++ if (!obj)
++ return NULL;
++
++ return psb_intel_framebuffer_create(dev, mode_cmd, obj);
++}
++
++static int psb_intel_insert_new_fb(struct drm_device *dev,
++ struct drm_file *file_priv,
++ struct drm_framebuffer *fb,
++ struct drm_mode_fb_cmd *mode_cmd)
++{
++ struct psb_intel_framebuffer *psb_intel_fb;
++ struct drm_gem_object *obj;
++ struct drm_crtc *crtc;
++
++ psb_intel_fb = to_psb_intel_framebuffer(fb);
++
++ mutex_lock(&dev->struct_mutex);
++ obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle);
++
++ if (!obj) {
++ mutex_unlock(&dev->struct_mutex);
++ return -EINVAL;
++ }
++ drm_gem_object_unreference(psb_intel_fb->base.mm_private);
++ drm_helper_mode_fill_fb_struct(fb, mode_cmd, obj);
++ mutex_unlock(&dev->struct_mutex);
++
++ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
++ if (crtc->fb == fb) {
++ struct drm_crtc_helper_funcs *crtc_funcs =
++ crtc->helper_private;
++ crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y);
++ }
++ }
++ return 0;
++}
++
++static const struct drm_mode_config_funcs psb_intel_mode_funcs = {
++ .resize_fb = psb_intel_insert_new_fb,
++ .fb_create = psb_intel_user_framebuffer_create,
++ .fb_changed = intelfb_probe,
++};
++#endif
++
++#if 0 /* Should be per device */
++void psb_intel_modeset_init(struct drm_device *dev)
++{
++ int num_pipe;
++ int i;
++
++ drm_mode_config_init(dev);
++
++ dev->mode_config.min_width = 0;
++ dev->mode_config.min_height = 0;
++
++ dev->mode_config.funcs = (void *) &psb_intel_mode_funcs;
++
++ if (IS_I965G(dev)) {
++ dev->mode_config.max_width = 8192;
++ dev->mode_config.max_height = 8192;
++ } else {
++ dev->mode_config.max_width = 2048;
++ dev->mode_config.max_height = 2048;
++ }
++
++ /* set memory base */
++ if (IS_I9XX(dev))
++ dev->mode_config.fb_base =
++ pci_resource_start(dev->pdev, 2);
++ else
++ dev->mode_config.fb_base =
++ pci_resource_start(dev->pdev, 0);
++
++ if (IS_MOBILE(dev) || IS_I9XX(dev))
++ num_pipe = 2;
++ else
++ num_pipe = 1;
++ DRM_DEBUG("%d display pipe%s available.\n",
++ num_pipe, num_pipe > 1 ? "s" : "");
++
++ for (i = 0; i < num_pipe; i++)
++ psb_intel_crtc_init(dev, i);
++
++ psb_intel_setup_outputs(dev);
++
++ /* setup fbs */
++ /* drm_initial_config(dev, false); */
++}
++#endif
++
++void psb_intel_modeset_cleanup(struct drm_device *dev)
++{
++ drm_mode_config_cleanup(dev);
++}
++
++
++/* current intel driver doesn't take advantage of encoders
++ always give back the encoder for the connector
++*/
++struct drm_encoder *psb_intel_best_encoder(struct drm_connector *connector)
++{
++ struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector);
++
++ return &psb_intel_output->enc;
++}
++
++/* MRST_PLATFORM start */
++
++#if DUMP_REGISTER
++void dump_dc_registers(struct drm_device *dev)
++{
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++ unsigned int i = 0;
++
++ DRM_INFO("jliu7 dump_dc_registers\n");
++
++
++ if (0x80000000 & REG_READ(0x70008)) {
++ for (i = 0x20a0; i < 0x20af; i += 4) {
++ DRM_INFO("jliu7 interrupt register=0x%x, value=%x\n", i, (unsigned int) REG_READ(i));
++ }
++
++ for (i = 0xf014; i < 0xf047; i += 4) {
++ DRM_INFO
++ ("jliu7 pipe A dpll register=0x%x, value=%x\n",
++ i, (unsigned int) REG_READ(i));
++ }
++
++ for (i = 0x60000; i < 0x6005f; i += 4) {
++ DRM_INFO
++ ("jliu7 pipe A timing register=0x%x, value=%x\n",
++ i, (unsigned int) REG_READ(i));
++ }
++
++ for (i = 0x61140; i < 0x61143; i += 4) {
++ DRM_INFO("jliu7 SDBOB register=0x%x, value=%x\n",
++ i, (unsigned int) REG_READ(i));
++ }
++
++ for (i = 0x61180; i < 0x6123F; i += 4) {
++ DRM_INFO
++ ("jliu7 LVDS PORT register=0x%x, value=%x\n",
++ i, (unsigned int) REG_READ(i));
++ }
++
++ for (i = 0x61254; i < 0x612AB; i += 4) {
++ DRM_INFO("jliu7 BLC register=0x%x, value=%x\n",
++ i, (unsigned int) REG_READ(i));
++ }
++
++ for (i = 0x70000; i < 0x70047; i += 4) {
++ DRM_INFO
++ ("jliu7 PIPE A control register=0x%x, value=%x\n",
++ i, (unsigned int) REG_READ(i));
++ }
++
++ for (i = 0x70180; i < 0x7020b; i += 4) {
++ DRM_INFO("jliu7 display A control register=0x%x,"
++ "value=%x\n", i,
++ (unsigned int) REG_READ(i));
++ }
++
++ for (i = 0x71400; i < 0x71403; i += 4) {
++ DRM_INFO
++ ("jliu7 VGA Display Plane Control register=0x%x,"
++ "value=%x\n", i, (unsigned int) REG_READ(i));
++ }
++ }
++
++ if (0x80000000 & REG_READ(0x71008)) {
++ for (i = 0x61000; i < 0x6105f; i += 4) {
++ DRM_INFO
++ ("jliu7 pipe B timing register=0x%x, value=%x\n",
++ i, (unsigned int) REG_READ(i));
++ }
++
++ for (i = 0x71000; i < 0x71047; i += 4) {
++ DRM_INFO
++ ("jliu7 PIPE B control register=0x%x, value=%x\n",
++ i, (unsigned int) REG_READ(i));
++ }
++
++ for (i = 0x71180; i < 0x7120b; i += 4) {
++ DRM_INFO("jliu7 display B control register=0x%x,"
++ "value=%x\n", i,
++ (unsigned int) REG_READ(i));
++ }
++ }
++#if 0
++ for (i = 0x70080; i < 0x700df; i += 4) {
++ DRM_INFO("jliu7 cursor A & B register=0x%x, value=%x\n",
++ i, (unsigned int) REG_READ(i));
++ }
++#endif
++
++}
++
++void dump_dsi_registers(struct drm_device *dev)
++{
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++ unsigned int i = 0;
++
++ DRM_INFO("jliu7 dump_dsi_registers\n");
++
++ for (i = 0xb000; i < 0xb064; i += 4) {
++ DRM_INFO("jliu7 MIPI IP register=0x%x, value=%x\n", i,
++ (unsigned int) REG_READ(i));
++ }
++
++ i = 0xb104;
++ DRM_INFO("jliu7 MIPI control register=0x%x, value=%x\n",
++ i, (unsigned int) REG_READ(i));
++}
++#endif /* DUMP_REGISTER */
++
++
++struct mrst_limit_t {
++ struct psb_intel_range_t dot, m, p1;
++};
++
++struct mrst_clock_t {
++ /* derived values */
++ int dot;
++ int m;
++ int p1;
++};
++
++#define MRST_LIMIT_LVDS_100L 0
++#define MRST_LIMIT_LVDS_83 1
++#define MRST_LIMIT_LVDS_100 2
++
++#define MRST_DOT_MIN 19750
++#define MRST_DOT_MAX 120000
++#define MRST_M_MIN_100L 20
++#define MRST_M_MIN_100 10
++#define MRST_M_MIN_83 12
++#define MRST_M_MAX_100L 34
++#define MRST_M_MAX_100 17
++#define MRST_M_MAX_83 20
++#define MRST_P1_MIN 2
++#define MRST_P1_MAX_0 7
++#define MRST_P1_MAX_1 8
++
++static const struct mrst_limit_t mrst_limits[] = {
++ { /* MRST_LIMIT_LVDS_100L */
++ .dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX},
++ .m = {.min = MRST_M_MIN_100L, .max = MRST_M_MAX_100L},
++ .p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_1},
++ },
++ { /* MRST_LIMIT_LVDS_83L */
++ .dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX},
++ .m = {.min = MRST_M_MIN_83, .max = MRST_M_MAX_83},
++ .p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_0},
++ },
++ { /* MRST_LIMIT_LVDS_100 */
++ .dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX},
++ .m = {.min = MRST_M_MIN_100, .max = MRST_M_MAX_100},
++ .p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_1},
++ },
++};
++
++#define MRST_M_MIN 10
++static const u32 mrst_m_converts[] = {
++ 0x2B, 0x15, 0x2A, 0x35, 0x1A, 0x0D, 0x26, 0x33, 0x19, 0x2C,
++ 0x36, 0x3B, 0x1D, 0x2E, 0x37, 0x1B, 0x2D, 0x16, 0x0B, 0x25,
++ 0x12, 0x09, 0x24, 0x32, 0x39, 0x1c,
++};
++
++#define COUNT_MAX 0x10000000
++void mrstWaitForPipeDisable(struct drm_device *dev)
++{
++ int count, temp;
++
++ /* FIXME JLIU7_PO */
++ psb_intel_wait_for_vblank(dev);
++ return;
++
++ /* Wait for for the pipe disable to take effect. */
++ for (count = 0; count < COUNT_MAX; count++) {
++ temp = REG_READ(PIPEACONF);
++ if ((temp & PIPEACONF_PIPE_STATE) == 0)
++ break;
++ }
++
++ if (count == COUNT_MAX) {
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 mrstWaitForPipeDisable time out. \n");
++#endif /* PRINT_JLIU7 */
++ } else {
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 mrstWaitForPipeDisable cout = %d. \n",
++ count);
++#endif /* PRINT_JLIU7 */
++ }
++}
++
++void mrstWaitForPipeEnable(struct drm_device *dev)
++{
++ int count, temp;
++
++ /* FIXME JLIU7_PO */
++ psb_intel_wait_for_vblank(dev);
++ return;
++
++ /* Wait for for the pipe disable to take effect. */
++ for (count = 0; count < COUNT_MAX; count++) {
++ temp = REG_READ(PIPEACONF);
++ if ((temp & PIPEACONF_PIPE_STATE) == 1)
++ break;
++ }
++
++ if (count == COUNT_MAX) {
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 mrstWaitForPipeEnable time out. \n");
++#endif /* PRINT_JLIU7 */
++ } else {
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 mrstWaitForPipeEnable cout = %d. \n",
++ count);
++#endif /* PRINT_JLIU7 */
++ }
++}
++
++static const struct mrst_limit_t *mrst_limit(struct drm_crtc *crtc)
++{
++ const struct mrst_limit_t *limit;
++ struct drm_device *dev = crtc->dev;
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++
++ if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)
++ || psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_MIPI)) {
++ if (dev_priv->sku_100L)
++ limit = &mrst_limits[MRST_LIMIT_LVDS_100L];
++ if (dev_priv->sku_83)
++ limit = &mrst_limits[MRST_LIMIT_LVDS_83];
++ if (dev_priv->sku_100)
++ limit = &mrst_limits[MRST_LIMIT_LVDS_100];
++ } else {
++ limit = NULL;
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 jliu7 mrst_limit Wrong display type. \n");
++#endif /* PRINT_JLIU7 */
++ }
++
++ return limit;
++}
++
++/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */
++static void mrst_clock(int refclk, struct mrst_clock_t *clock)
++{
++ clock->dot = (refclk * clock->m) / (14 * clock->p1);
++}
++
++void mrstPrintPll(char *prefix, struct mrst_clock_t *clock)
++{
++#if PRINT_JLIU7
++ DRM_INFO
++ ("JLIU7 mrstPrintPll %s: dotclock = %d, m = %d, p1 = %d. \n",
++ prefix, clock->dot, clock->m, clock->p1);
++#endif /* PRINT_JLIU7 */
++}
++
++/**
++ * Returns a set of divisors for the desired target clock with the given refclk,
++ * or FALSE. Divisor values are the actual divisors for
++ */
++static bool
++mrstFindBestPLL(struct drm_crtc *crtc, int target, int refclk,
++ struct mrst_clock_t *best_clock)
++{
++ struct mrst_clock_t clock;
++ const struct mrst_limit_t *limit = mrst_limit(crtc);
++ int err = target;
++
++ memset(best_clock, 0, sizeof(*best_clock));
++
++ for (clock.m = limit->m.min; clock.m <= limit->m.max; clock.m++) {
++ for (clock.p1 = limit->p1.min; clock.p1 <= limit->p1.max;
++ clock.p1++) {
++ int this_err;
++
++ mrst_clock(refclk, &clock);
++
++ this_err = abs(clock.dot - target);
++ if (this_err < err) {
++ *best_clock = clock;
++ err = this_err;
++ }
++ }
++ }
++ DRM_DEBUG("mrstFindBestPLL err = %d.\n", err);
++
++ return err != target;
++}
++
++/**
++ * Sets the power management mode of the pipe and plane.
++ *
++ * This code should probably grow support for turning the cursor off and back
++ * on appropriately at the same time as we're turning the pipe off/on.
++ */
++static void mrst_crtc_dpms(struct drm_crtc *crtc, int mode)
++{
++ struct drm_device *dev = crtc->dev;
++ /* struct drm_i915_master_private *master_priv; */
++ /* struct drm_i915_private *dev_priv = dev->dev_private; */
++ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++ int pipe = psb_intel_crtc->pipe;
++ int dpll_reg = (pipe == 0) ? MRST_DPLL_A : DPLL_B;
++ int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
++ int dspbase_reg = (pipe == 0) ? MRST_DSPABASE : DSPBBASE;
++ int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
++ u32 temp;
++ bool enabled;
++
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 enter mrst_crtc_dpms, mode = %d, pipe = %d \n",
++ mode, pipe);
++#endif /* PRINT_JLIU7 */
++
++ /* XXX: When our outputs are all unaware of DPMS modes other than off
++ * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
++ */
++ switch (mode) {
++ case DRM_MODE_DPMS_ON:
++ case DRM_MODE_DPMS_STANDBY:
++ case DRM_MODE_DPMS_SUSPEND:
++ /* Enable the DPLL */
++ temp = REG_READ(dpll_reg);
++ if ((temp & DPLL_VCO_ENABLE) == 0) {
++ REG_WRITE(dpll_reg, temp);
++ REG_READ(dpll_reg);
++ /* Wait for the clocks to stabilize. */
++ udelay(150);
++ REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
++ REG_READ(dpll_reg);
++ /* Wait for the clocks to stabilize. */
++ udelay(150);
++ REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
++ REG_READ(dpll_reg);
++ /* Wait for the clocks to stabilize. */
++ udelay(150);
++ }
++
++ /* Enable the pipe */
++ temp = REG_READ(pipeconf_reg);
++ if ((temp & PIPEACONF_ENABLE) == 0)
++ REG_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
++
++ /* Enable the plane */
++ temp = REG_READ(dspcntr_reg);
++ if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
++ REG_WRITE(dspcntr_reg,
++ temp | DISPLAY_PLANE_ENABLE);
++ /* Flush the plane changes */
++ REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
++ }
++
++ psb_intel_crtc_load_lut(crtc);
++
++ /* Give the overlay scaler a chance to enable
++ if it's on this pipe */
++ /* psb_intel_crtc_dpms_video(crtc, true); TODO */
++ break;
++ case DRM_MODE_DPMS_OFF:
++ /* Give the overlay scaler a chance to disable
++ * if it's on this pipe */
++ /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
++
++ /* Disable the VGA plane that we never use */
++ REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
++
++ /* Disable display plane */
++ temp = REG_READ(dspcntr_reg);
++ if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
++ REG_WRITE(dspcntr_reg,
++ temp & ~DISPLAY_PLANE_ENABLE);
++ /* Flush the plane changes */
++ REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
++ REG_READ(dspbase_reg);
++ }
++
++ if (!IS_I9XX(dev)) {
++ /* Wait for vblank for the disable to take effect */
++ psb_intel_wait_for_vblank(dev);
++ }
++
++ /* Next, disable display pipes */
++ temp = REG_READ(pipeconf_reg);
++ if ((temp & PIPEACONF_ENABLE) != 0) {
++ REG_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
++ REG_READ(pipeconf_reg);
++ }
++
++ /* Wait for for the pipe disable to take effect. */
++ mrstWaitForPipeDisable(dev);
++
++ temp = REG_READ(dpll_reg);
++ if ((temp & DPLL_VCO_ENABLE) != 0) {
++ REG_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE);
++ REG_READ(dpll_reg);
++ }
++
++ /* Wait for the clocks to turn off. */
++ udelay(150);
++ break;
++ }
++
++#if DUMP_REGISTER
++ dump_dc_registers(dev);
++#endif /* DUMP_REGISTER */
++
++ enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
++
++#if 0 /* JB: Add vblank support later */
++ if (enabled)
++ dev_priv->vblank_pipe |= (1 << pipe);
++ else
++ dev_priv->vblank_pipe &= ~(1 << pipe);
++#endif
++
++ psb_intel_crtc->dpms_mode = mode;
++
++#if 0 /* JB: Add sarea support later */
++ if (!dev->primary->master)
++ return;
++
++ master_priv = dev->primary->master->driver_priv;
++ if (!master_priv->sarea_priv)
++ return;
++
++ switch (pipe) {
++ case 0:
++ master_priv->sarea_priv->planeA_w =
++ enabled ? crtc->mode.hdisplay : 0;
++ master_priv->sarea_priv->planeA_h =
++ enabled ? crtc->mode.vdisplay : 0;
++ break;
++ case 1:
++ master_priv->sarea_priv->planeB_w =
++ enabled ? crtc->mode.hdisplay : 0;
++ master_priv->sarea_priv->planeB_h =
++ enabled ? crtc->mode.vdisplay : 0;
++ break;
++ default:
++ DRM_ERROR("Can't update pipe %d in SAREA\n", pipe);
++ break;
++ }
++#endif
++}
++
++static int mrst_crtc_mode_set(struct drm_crtc *crtc,
++ struct drm_display_mode *mode,
++ struct drm_display_mode *adjusted_mode,
++ int x, int y,
++ struct drm_framebuffer *old_fb)
++{
++ struct drm_device *dev = crtc->dev;
++ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++ int pipe = psb_intel_crtc->pipe;
++ int fp_reg = (pipe == 0) ? MRST_FPA0 : FPB0;
++ int dpll_reg = (pipe == 0) ? MRST_DPLL_A : DPLL_B;
++ int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
++ int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
++ int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
++ int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
++ int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
++ int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
++ int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
++ int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
++ int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE;
++ int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
++ int refclk = 0;
++ struct mrst_clock_t clock;
++ u32 dpll = 0, fp = 0, dspcntr, pipeconf, lvdsport;
++ bool ok, is_sdvo = false;
++ bool is_crt = false, is_lvds = false, is_tv = false;
++ bool is_mipi = false;
++ struct drm_mode_config *mode_config = &dev->mode_config;
++ struct drm_connector *connector;
++ struct psb_intel_output *psb_intel_output;
++
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 enter mrst_crtc_mode_set \n");
++#endif /* PRINT_JLIU7 */
++
++ list_for_each_entry(connector, &mode_config->connector_list, head) {
++ psb_intel_output = to_psb_intel_output(connector);
++
++ if (!connector->encoder
++ || connector->encoder->crtc != crtc)
++ continue;
++
++ switch (psb_intel_output->type) {
++ case INTEL_OUTPUT_LVDS:
++ is_lvds = true;
++ break;
++ case INTEL_OUTPUT_SDVO:
++ is_sdvo = true;
++ break;
++ case INTEL_OUTPUT_TVOUT:
++ is_tv = true;
++ break;
++ case INTEL_OUTPUT_ANALOG:
++ is_crt = true;
++ break;
++ case INTEL_OUTPUT_MIPI:
++ is_mipi = true;
++ break;
++ }
++ }
++
++ if (is_lvds | is_mipi) {
++ /*FIXME JLIU7 Get panel power delay parameters from
++ config data */
++ REG_WRITE(0x61208, 0x25807d0);
++ REG_WRITE(0x6120c, 0x1f407d0);
++ REG_WRITE(0x61210, 0x270f04);
++ }
++
++ /* Disable the VGA plane that we never use */
++ REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
++
++ /* Disable the panel fitter if it was on our pipe */
++ if (psb_intel_panel_fitter_pipe(dev) == pipe)
++ REG_WRITE(PFIT_CONTROL, 0);
++
++ DRM_DEBUG("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
++ drm_mode_debug_printmodeline(mode);
++
++ REG_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
++ ((adjusted_mode->crtc_htotal - 1) << 16));
++ REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
++ ((adjusted_mode->crtc_hblank_end - 1) << 16));
++ REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
++ ((adjusted_mode->crtc_hsync_end - 1) << 16));
++ REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
++ ((adjusted_mode->crtc_vtotal - 1) << 16));
++ REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
++ ((adjusted_mode->crtc_vblank_end - 1) << 16));
++ REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
++ ((adjusted_mode->crtc_vsync_end - 1) << 16));
++ /* pipesrc and dspsize control the size that is scaled from,
++ * which should always be the user's requested size.
++ */
++ REG_WRITE(dspsize_reg,
++ ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
++ REG_WRITE(pipesrc_reg,
++ ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
++
++ /* Flush the plane changes */
++ {
++ struct drm_crtc_helper_funcs *crtc_funcs =
++ crtc->helper_private;
++ crtc_funcs->mode_set_base(crtc, x, y, old_fb);
++ }
++
++ /* setup pipeconf */
++ pipeconf = REG_READ(pipeconf_reg);
++
++ /* Set up the display plane register */
++ dspcntr = REG_READ(dspcntr_reg);
++ dspcntr |= DISPPLANE_GAMMA_ENABLE;
++
++ if (pipe == 0)
++ dspcntr |= DISPPLANE_SEL_PIPE_A;
++ else
++ dspcntr |= DISPPLANE_SEL_PIPE_B;
++
++ dev_priv->dspcntr = dspcntr |= DISPLAY_PLANE_ENABLE;
++ dev_priv->pipeconf = pipeconf |= PIPEACONF_ENABLE;
++
++ if (is_mipi)
++ return 0;
++
++ if (dev_priv->sku_100L)
++ refclk = 100000;
++ else if (dev_priv->sku_83)
++ refclk = 166000;
++ else if (dev_priv->sku_100)
++ refclk = 200000;
++
++ dpll = 0; /*BIT16 = 0 for 100MHz reference */
++
++ ok = mrstFindBestPLL(crtc, adjusted_mode->clock, refclk, &clock);
++
++ if (!ok) {
++#if 0 /* FIXME JLIU7 */
++ DRM_ERROR("Couldn't find PLL settings for mode!\n");
++ return;
++#endif /* FIXME JLIU7 */
++#if PRINT_JLIU7
++ DRM_INFO
++ ("JLIU7 mrstFindBestPLL fail in mrst_crtc_mode_set. \n");
++#endif /* PRINT_JLIU7 */
++ } else {
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 mrst_crtc_mode_set pixel clock = %d,"
++ "m = %x, p1 = %x. \n", clock.dot, clock.m,
++ clock.p1);
++#endif /* PRINT_JLIU7 */
++ }
++
++ fp = mrst_m_converts[(clock.m - MRST_M_MIN)] << 8;
++
++ dpll |= DPLL_VGA_MODE_DIS;
++
++
++ dpll |= DPLL_VCO_ENABLE;
++
++ if (is_lvds)
++ dpll |= DPLLA_MODE_LVDS;
++ else
++ dpll |= DPLLB_MODE_DAC_SERIAL;
++
++ if (is_sdvo) {
++ int sdvo_pixel_multiply =
++ adjusted_mode->clock / mode->clock;
++
++ dpll |= DPLL_DVO_HIGH_SPEED;
++ dpll |=
++ (sdvo_pixel_multiply -
++ 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
++ }
++
++
++ /* compute bitmask from p1 value */
++ dpll |= (1 << (clock.p1 - 2)) << 17;
++
++ dpll |= DPLL_VCO_ENABLE;
++
++#if PRINT_JLIU7
++ mrstPrintPll("chosen", &clock);
++#endif /* PRINT_JLIU7 */
++
++#if 0
++ if (!xf86ModesEqual(mode, adjusted_mode)) {
++ xf86DrvMsg(pScrn->scrnIndex, X_INFO,
++ "Adjusted mode for pipe %c:\n",
++ pipe == 0 ? 'A' : 'B');
++ xf86PrintModeline(pScrn->scrnIndex, mode);
++ }
++ i830PrintPll("chosen", &clock);
++#endif
++
++ if (dpll & DPLL_VCO_ENABLE) {
++ REG_WRITE(fp_reg, fp);
++ REG_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
++ REG_READ(dpll_reg);
++/* FIXME jliu7 check the DPLLA lock bit PIPEACONF[29] */
++ udelay(150);
++ }
++
++ /* The LVDS pin pair needs to be on before the DPLLs are enabled.
++ * This is an exception to the general rule that mode_set doesn't turn
++ * things on.
++ */
++ if (is_lvds) {
++
++ /* FIXME JLIU7 need to support 24bit panel */
++#if MRST_24BIT_LVDS
++ lvdsport =
++ (REG_READ(LVDS) & (~LVDS_PIPEB_SELECT)) | LVDS_PORT_EN
++ | LVDS_A3_POWER_UP | LVDS_A0A2_CLKA_POWER_UP;
++
++#if MRST_24BIT_DOT_1
++ lvdsport |= MRST_PANEL_24_DOT_1_FORMAT;
++#endif /* MRST_24BIT_DOT_1 */
++
++#else /* MRST_24BIT_LVDS */
++ lvdsport =
++ (REG_READ(LVDS) & (~LVDS_PIPEB_SELECT)) | LVDS_PORT_EN;
++#endif /* MRST_24BIT_LVDS */
++
++#if MRST_24BIT_WA
++ lvdsport = 0x80300340;
++#else /* MRST_24BIT_DOT_WA */
++ lvdsport = 0x82300300;
++#endif /* MRST_24BIT_DOT_WA */
++
++ REG_WRITE(LVDS, lvdsport);
++ REG_READ(LVDS);
++ }
++
++ REG_WRITE(fp_reg, fp);
++ REG_WRITE(dpll_reg, dpll);
++ REG_READ(dpll_reg);
++ /* Wait for the clocks to stabilize. */
++ udelay(150);
++
++ /* write it again -- the BIOS does, after all */
++ REG_WRITE(dpll_reg, dpll);
++ REG_READ(dpll_reg);
++ /* Wait for the clocks to stabilize. */
++ udelay(150);
++
++ REG_WRITE(pipeconf_reg, pipeconf);
++ REG_READ(pipeconf_reg);
++
++ /* Wait for for the pipe enable to take effect. */
++ mrstWaitForPipeEnable(dev);
++
++ REG_WRITE(dspcntr_reg, dspcntr);
++ psb_intel_wait_for_vblank(dev);
++
++ return 0;
++}
++
++
++static const struct drm_crtc_helper_funcs mrst_helper_funcs = {
++ .dpms = mrst_crtc_dpms,
++ .mode_fixup = psb_intel_crtc_mode_fixup,
++ .mode_set = mrst_crtc_mode_set,
++ .mode_set_base = psb_intel_pipe_set_base,
++ .prepare = psb_intel_crtc_prepare,
++ .commit = psb_intel_crtc_commit,
++};
++
++/* MRST_PLATFORM end */
+diff -uNr a/drivers/gpu/drm/psb/psb_intel_display.h b/drivers/gpu/drm/psb/psb_intel_display.h
+--- a/drivers/gpu/drm/psb/psb_intel_display.h 1969-12-31 16:00:00.000000000 -0800
++++ b/drivers/gpu/drm/psb/psb_intel_display.h 2009-04-07 13:28:38.000000000 -0700
+@@ -0,0 +1,31 @@
++
++/* copyright (c) 2008, Intel Corporation
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ * Eric Anholt <eric@anholt.net>
++ */
++
++#ifndef _INTEL_DISPLAY_H_
++#define _INTEL_DISPLAY_H_
++
++bool psb_intel_pipe_has_type(struct drm_crtc *crtc, int type);
++
++#endif
+diff -uNr a/drivers/gpu/drm/psb/psb_intel_drv.h b/drivers/gpu/drm/psb/psb_intel_drv.h
+--- a/drivers/gpu/drm/psb/psb_intel_drv.h 1969-12-31 16:00:00.000000000 -0800
++++ b/drivers/gpu/drm/psb/psb_intel_drv.h 2009-04-07 13:28:38.000000000 -0700
+@@ -0,0 +1,192 @@
++/*
++ * Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
++ * Copyright (c) 2007 Intel Corporation
++ * Jesse Barnes <jesse.barnes@intel.com>
++ */
++#ifndef __INTEL_DRV_H__
++#define __INTEL_DRV_H__
++
++#include <linux/i2c.h>
++#include <linux/i2c-id.h>
++#include <linux/i2c-algo-bit.h>
++#include <drm/drm_crtc.h>
++
++#include <drm/drm_crtc_helper.h>
++
++/*
++ * MOORESTOWN defines
++ */
++#define MRST_I2C 0
++
++#define DUMP_REGISTER 0
++#define MRST_24BIT_LVDS 1
++#define MRST_24BIT_DOT_1 0
++#define MRST_24BIT_WA 0
++
++#define PRINT_JLIU7 0
++#define DELAY_TIME1 80 /* 1000 = 1ms */
++
++/*
++ * Display related stuff
++ */
++
++/* store information about an Ixxx DVO */
++/* The i830->i865 use multiple DVOs with multiple i2cs */
++/* the i915, i945 have a single sDVO i2c bus - which is different */
++#define MAX_OUTPUTS 6
++/* maximum connectors per crtcs in the mode set */
++#define INTELFB_CONN_LIMIT 4
++
++#define INTEL_I2C_BUS_DVO 1
++#define INTEL_I2C_BUS_SDVO 2
++
++/* these are outputs from the chip - integrated only
++ * external chips are via DVO or SDVO output */
++#define INTEL_OUTPUT_UNUSED 0
++#define INTEL_OUTPUT_ANALOG 1
++#define INTEL_OUTPUT_DVO 2
++#define INTEL_OUTPUT_SDVO 3
++#define INTEL_OUTPUT_LVDS 4
++#define INTEL_OUTPUT_TVOUT 5
++#define INTEL_OUTPUT_MIPI 6
++
++#define INTEL_DVO_CHIP_NONE 0
++#define INTEL_DVO_CHIP_LVDS 1
++#define INTEL_DVO_CHIP_TMDS 2
++#define INTEL_DVO_CHIP_TVOUT 4
++
++/**
++ * Hold information useally put on the device driver privates here,
++ * since it needs to be shared across multiple of devices drivers privates.
++ */
++struct psb_intel_mode_device {
++
++ /*
++ * Abstracted memory manager operations
++ */
++ void *(*bo_from_handle) (struct drm_device *dev,
++ struct drm_file *file_priv,
++ unsigned int handle);
++ size_t(*bo_size) (struct drm_device *dev, void *bo);
++ size_t(*bo_offset) (struct drm_device *dev, void *bo);
++ int (*bo_pin_for_scanout) (struct drm_device *dev, void *bo);
++ int (*bo_unpin_for_scanout) (struct drm_device *dev, void *bo);
++
++ /*
++ * Cursor
++ */
++ int cursor_needs_physical;
++
++ /*
++ * LVDS info
++ */
++ int backlight_duty_cycle; /* restore backlight to this value */
++ bool panel_wants_dither;
++ struct drm_display_mode *panel_fixed_mode;
++ struct drm_display_mode *vbt_mode; /* if any */
++
++ uint32_t saveBLC_PWM_CTL;
++};
++
++struct psb_intel_i2c_chan {
++ /* for getting at dev. private (mmio etc.) */
++ struct drm_device *drm_dev;
++ u32 reg; /* GPIO reg */
++ struct i2c_adapter adapter;
++ struct i2c_algo_bit_data algo;
++ u8 slave_addr;
++};
++
++struct psb_intel_output {
++ struct drm_connector base;
++
++ struct drm_encoder enc;
++ int type;
++ struct psb_intel_i2c_chan *i2c_bus; /* for control functions */
++ struct psb_intel_i2c_chan *ddc_bus; /* for DDC only stuff */
++ bool load_detect_temp;
++ void *dev_priv;
++
++ struct psb_intel_mode_device *mode_dev;
++
++};
++
++struct psb_intel_crtc {
++ struct drm_crtc base;
++ int pipe;
++ int plane;
++ uint32_t cursor_addr;
++ u8 lut_r[256], lut_g[256], lut_b[256];
++ int dpms_mode;
++ struct psb_intel_framebuffer *fbdev_fb;
++ /* a mode_set for fbdev users on this crtc */
++ struct drm_mode_set mode_set;
++
++ /* current bo we scanout from */
++ void *scanout_bo;
++
++ /* current bo we cursor from */
++ void *cursor_bo;
++
++ struct psb_intel_mode_device *mode_dev;
++};
++
++#define to_psb_intel_crtc(x) container_of(x, struct psb_intel_crtc, base)
++#define to_psb_intel_output(x) container_of(x, struct psb_intel_output, base)
++#define enc_to_psb_intel_output(x) container_of(x, struct psb_intel_output, enc)
++#define to_psb_intel_framebuffer(x) container_of(x, struct psb_intel_framebuffer, base)
++
++struct psb_intel_i2c_chan *psb_intel_i2c_create(struct drm_device *dev,
++ const u32 reg, const char *name);
++void psb_intel_i2c_destroy(struct psb_intel_i2c_chan *chan);
++int psb_intel_ddc_get_modes(struct psb_intel_output *psb_intel_output);
++extern bool psb_intel_ddc_probe(struct psb_intel_output *psb_intel_output);
++
++extern void psb_intel_crtc_init(struct drm_device *dev, int pipe,
++ struct psb_intel_mode_device *mode_dev);
++extern void psb_intel_crt_init(struct drm_device *dev);
++extern void psb_intel_sdvo_init(struct drm_device *dev, int output_device);
++extern void psb_intel_dvo_init(struct drm_device *dev);
++extern void psb_intel_tv_init(struct drm_device *dev);
++extern void psb_intel_lvds_init(struct drm_device *dev,
++ struct psb_intel_mode_device *mode_dev);
++extern void mrst_lvds_init(struct drm_device *dev,
++ struct psb_intel_mode_device *mode_dev);
++extern void mrst_dsi_init(struct drm_device *dev,
++ struct psb_intel_mode_device *mode_dev);
++
++extern void psb_intel_crtc_load_lut(struct drm_crtc *crtc);
++extern void psb_intel_encoder_prepare(struct drm_encoder *encoder);
++extern void psb_intel_encoder_commit(struct drm_encoder *encoder);
++
++extern struct drm_encoder *psb_intel_best_encoder(struct drm_connector
++ *connector);
++
++extern struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev,
++ struct drm_crtc *crtc);
++extern void psb_intel_wait_for_vblank(struct drm_device *dev);
++extern struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev,
++ int pipe);
++extern struct drm_crtc *psb_intel_get_load_detect_pipe
++ (struct psb_intel_output *psb_intel_output,
++ struct drm_display_mode *mode, int *dpms_mode);
++extern void psb_intel_release_load_detect_pipe(struct psb_intel_output
++ *psb_intel_output, int dpms_mode);
++
++extern struct drm_connector *psb_intel_sdvo_find(struct drm_device *dev,
++ int sdvoB);
++extern int psb_intel_sdvo_supports_hotplug(struct drm_connector *connector);
++extern void psb_intel_sdvo_set_hotplug(struct drm_connector *connector,
++ int enable);
++extern int intelfb_probe(struct drm_device *dev);
++extern int intelfb_remove(struct drm_device *dev,
++ struct drm_framebuffer *fb);
++extern void psb_intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red,
++ u16 green, u16 blue, int regno);
++
++extern struct drm_framebuffer *psb_intel_framebuffer_create(struct drm_device
++ *dev, struct
++ drm_mode_fb_cmd
++ *mode_cmd,
++ void *mm_private);
++#endif /* __INTEL_DRV_H__ */
+diff -uNr a/drivers/gpu/drm/psb/psb_intel_dsi.c b/drivers/gpu/drm/psb/psb_intel_dsi.c
+--- a/drivers/gpu/drm/psb/psb_intel_dsi.c 1969-12-31 16:00:00.000000000 -0800
++++ b/drivers/gpu/drm/psb/psb_intel_dsi.c 2009-04-07 13:28:38.000000000 -0700
+@@ -0,0 +1,1644 @@
++/*
++ * Copyright © 2006-2007 Intel Corporation
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ * jim liu <jim.liu@intel.com>
++ */
++
++#include <linux/backlight.h>
++#include <drm/drm_crtc.h>
++#include <drm/drm_edid.h>
++
++#define DRM_MODE_ENCODER_MIPI 5
++#define DRM_MODE_CONNECTOR_MIPI 13
++
++#if DUMP_REGISTER
++extern void dump_dsi_registers(struct drm_device *dev);
++#endif /* DUMP_REGISTER */
++
++int dsi_backlight; /* restore backlight to this value */
++
++/**
++ * Returns the maximum level of the backlight duty cycle field.
++ */
++static u32 mrst_dsi_get_max_backlight(struct drm_device *dev)
++{
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 enter mrst_dsi_get_max_backlight \n");
++#endif /* PRINT_JLIU7 */
++
++ return BRIGHTNESS_MAX_LEVEL;
++
++/* FIXME jliu7 need to revisit */
++}
++
++/**
++ * Sets the backlight level.
++ *
++ * \param level backlight level, from 0 to psb_intel_dsi_get_max_backlight().
++ */
++static void mrst_dsi_set_backlight(struct drm_device *dev, int level)
++{
++ u32 blc_pwm_ctl;
++ u32 max_pwm_blc;
++
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 enter mrst_dsi_set_backlight \n");
++#endif /* PRINT_JLIU7 */
++
++#if 1 /* FIXME JLIU7 */
++ return;
++#endif /* FIXME JLIU7 */
++
++ /* Provent LVDS going to total black */
++ if (level < 20)
++ level = 20;
++
++ max_pwm_blc = mrst_lvds_get_PWM_ctrl_freq(dev);
++
++ if (max_pwm_blc ==0)
++ {
++ return;
++ }
++
++ blc_pwm_ctl = level * max_pwm_blc / BRIGHTNESS_MAX_LEVEL;
++
++ if (blc_pol == BLC_POLARITY_INVERSE) {
++ blc_pwm_ctl = max_pwm_blc - blc_pwm_ctl;
++ }
++
++ REG_WRITE(BLC_PWM_CTL,
++ (max_pwm_blc << MRST_BACKLIGHT_MODULATION_FREQ_SHIFT) |
++ blc_pwm_ctl);
++}
++
++/**
++ * Sets the power state for the panel.
++ */
++static void mrst_dsi_set_power(struct drm_device *dev,
++ struct psb_intel_output *output, bool on)
++{
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++ u32 pp_status;
++
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 enter mrst_dsi_set_power \n");
++#endif /* PRINT_JLIU7 */
++ /*
++ * The DIS device must be ready before we can change power state.
++ */
++ if (!dev_priv->dsi_device_ready)
++ {
++ return;
++ }
++
++ /*
++ * We don't support dual DSI yet. May be in POR in the future.
++ */
++ if (dev_priv->dual_display)
++ {
++ return;
++ }
++
++ if (on) {
++ if (dev_priv->dpi & (!dev_priv->dpi_panel_on))
++ {
++
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 mrst_dsi_set_power dpi = on \n");
++#endif /* PRINT_JLIU7 */
++ REG_WRITE(DPI_CONTROL_REG, DPI_TURN_ON);
++#if 0 /*FIXME JLIU7 */
++ REG_WRITE(DPI_DATA_REG, DPI_BACK_LIGHT_ON_DATA);
++ REG_WRITE(DPI_CONTROL_REG, DPI_BACK_LIGHT_ON);
++#endif /*FIXME JLIU7 */
++
++ dev_priv->dpi_panel_on = true;
++
++ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) |
++ POWER_TARGET_ON);
++ do {
++ pp_status = REG_READ(PP_STATUS);
++ } while ((pp_status & (PP_ON | PP_READY)) == PP_READY);
++ }
++ else if ((!dev_priv->dpi) & (!dev_priv->dbi_panel_on))
++ {
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 mrst_dsi_set_power dbi = on \n");
++#endif /* PRINT_JLIU7 */
++
++ dev_priv->DBI_CB_pointer = 0;
++ /* exit sleep mode */
++ *(dev_priv->p_DBI_commandBuffer + dev_priv->DBI_CB_pointer++) = exit_sleep_mode;
++
++#if 0 /*FIXME JLIU7 */
++ /* Check MIPI Adatper command registers */
++ while (REG_READ(MIPI_COMMAND_ADDRESS_REG) & BIT0);
++#endif /*FIXME JLIU7 */
++
++ /* FIXME_jliu7 mapVitualToPhysical(dev_priv->p_DBI_commandBuffer);*/
++ REG_WRITE(MIPI_COMMAND_LENGTH_REG, 1);
++ REG_WRITE(MIPI_COMMAND_ADDRESS_REG, (u32)dev_priv->p_DBI_commandBuffer | BIT0);
++
++ /* The host processor must wait five milliseconds after sending exit_sleep_mode command before sending another
++ command. This delay allows the supply voltages and clock circuits to stabilize */
++ udelay(5000);
++
++ dev_priv->DBI_CB_pointer = 0;
++
++ /* set display on */
++ *(dev_priv->p_DBI_commandBuffer + dev_priv->DBI_CB_pointer++) = set_display_on ;
++
++#if 0 /*FIXME JLIU7 */
++ /* Check MIPI Adatper command registers */
++ while (REG_READ(MIPI_COMMAND_ADDRESS_REG) & BIT0);
++#endif /*FIXME JLIU7 */
++
++ /* FIXME_jliu7 mapVitualToPhysical(dev_priv->p_DBI_commandBuffer);*/
++ REG_WRITE(MIPI_COMMAND_LENGTH_REG, 1);
++ REG_WRITE(MIPI_COMMAND_ADDRESS_REG, (u32)dev_priv->p_DBI_commandBuffer | BIT0);
++
++ dev_priv->dbi_panel_on = true;
++ }
++/*FIXME JLIU7 */
++/* Need to figure out how to control the MIPI panel power on sequence*/
++
++ mrst_dsi_set_backlight(dev, dsi_backlight);
++ }
++ else
++ {
++ mrst_dsi_set_backlight(dev, 0);
++/*FIXME JLIU7 */
++/* Need to figure out how to control the MIPI panel power down sequence*/
++ /*
++ * Only save the current backlight value if we're going from
++ * on to off.
++ */
++ if (dev_priv->dpi & dev_priv->dpi_panel_on)
++ {
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 mrst_dsi_set_power dpi = off \n");
++#endif /* PRINT_JLIU7 */
++
++ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) &
++ ~POWER_TARGET_ON);
++ do {
++ pp_status = REG_READ(PP_STATUS);
++ } while (pp_status & PP_ON);
++
++#if 0 /*FIXME JLIU7 */
++ REG_WRITE(DPI_DATA_REG, DPI_BACK_LIGHT_OFF_DATA);
++ REG_WRITE(DPI_CONTROL_REG, DPI_BACK_LIGHT_OFF);
++#endif /*FIXME JLIU7 */
++ REG_WRITE(DPI_CONTROL_REG, DPI_SHUT_DOWN);
++ dev_priv->dpi_panel_on = false;
++ }
++ else if ((!dev_priv->dpi) & dev_priv->dbi_panel_on)
++ {
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 mrst_dsi_set_power dbi = off \n");
++#endif /* PRINT_JLIU7 */
++ dev_priv->DBI_CB_pointer = 0;
++ /* enter sleep mode */
++ *(dev_priv->p_DBI_commandBuffer + dev_priv->DBI_CB_pointer++) = enter_sleep_mode;
++
++ /* Check MIPI Adatper command registers */
++ while (REG_READ(MIPI_COMMAND_ADDRESS_REG) & BIT0);
++
++ /* FIXME_jliu7 mapVitualToPhysical(dev_priv->p_DBI_commandBuffer);*/
++ REG_WRITE(MIPI_COMMAND_LENGTH_REG, 1);
++ REG_WRITE(MIPI_COMMAND_ADDRESS_REG, (u32)dev_priv->p_DBI_commandBuffer | BIT0);
++ dev_priv->dbi_panel_on = false;
++ }
++ }
++}
++
++static void mrst_dsi_dpms(struct drm_encoder *encoder, int mode)
++{
++ struct drm_device *dev = encoder->dev;
++ struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
++
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 enter mrst_dsi_dpms \n");
++#endif /* PRINT_JLIU7 */
++
++ if (mode == DRM_MODE_DPMS_ON)
++ mrst_dsi_set_power(dev, output, true);
++ else
++ mrst_dsi_set_power(dev, output, false);
++
++ /* XXX: We never power down the DSI pairs. */
++}
++
++static void mrst_dsi_save(struct drm_connector *connector)
++{
++#if 0 /* JB: Disable for drop */
++ struct drm_device *dev = connector->dev;
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 enter mrst_dsi_save \n");
++#endif /* PRINT_JLIU7 */
++
++ dev_priv->savePP_ON = REG_READ(LVDSPP_ON);
++ dev_priv->savePP_OFF = REG_READ(LVDSPP_OFF);
++ dev_priv->savePP_CONTROL = REG_READ(PP_CONTROL);
++ dev_priv->savePP_CYCLE = REG_READ(PP_CYCLE);
++ dev_priv->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
++ dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL &
++ BACKLIGHT_DUTY_CYCLE_MASK);
++
++ /*
++ * make backlight to full brightness
++ */
++ dsi_backlight = mrst_dsi_get_max_backlight(dev);
++#endif
++}
++
++static void mrst_dsi_restore(struct drm_connector *connector)
++{
++#if 0 /* JB: Disable for drop */
++ struct drm_device *dev = connector->dev;
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 enter mrst_dsi_restore \n");
++#endif /* PRINT_JLIU7 */
++
++ REG_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL);
++ REG_WRITE(LVDSPP_ON, dev_priv->savePP_ON);
++ REG_WRITE(LVDSPP_OFF, dev_priv->savePP_OFF);
++ REG_WRITE(PP_CYCLE, dev_priv->savePP_CYCLE);
++ REG_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL);
++ if (dev_priv->savePP_CONTROL & POWER_TARGET_ON)
++ mrst_dsi_set_power(dev, true);
++ else
++ mrst_dsi_set_power(dev, false);
++#endif
++}
++
++static void mrst_dsi_prepare(struct drm_encoder *encoder)
++{
++ struct drm_device *dev = encoder->dev;
++ struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
++ struct psb_intel_mode_device *mode_dev = output->mode_dev;
++
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 enter mrst_dsi_prepare \n");
++#endif /* PRINT_JLIU7 */
++
++ mode_dev->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
++ mode_dev->backlight_duty_cycle = (mode_dev->saveBLC_PWM_CTL &
++ BACKLIGHT_DUTY_CYCLE_MASK);
++
++ mrst_dsi_set_power(dev, output, false);
++}
++
++static void mrst_dsi_commit( struct drm_encoder *encoder)
++{
++ struct drm_device *dev = encoder->dev;
++ struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
++ struct psb_intel_mode_device *mode_dev = output->mode_dev;
++
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 enter mrst_dsi_commit \n");
++#endif /* PRINT_JLIU7 */
++
++ if (mode_dev->backlight_duty_cycle == 0)
++ mode_dev->backlight_duty_cycle =
++ mrst_dsi_get_max_backlight(dev);
++
++ mrst_dsi_set_power(dev, output, true);
++
++#if DUMP_REGISTER
++ dump_dsi_registers(dev);
++#endif /* DUMP_REGISTER */
++}
++
++/* ************************************************************************* *\
++FUNCTION: GetHS_TX_timeoutCount
++ `
++DESCRIPTION: In burst mode, value greater than one DPI line Time in byte clock
++ (txbyteclkhs). To timeout this timer 1+ of the above said value is recommended.
++
++ In non-burst mode, Value greater than one DPI frame time in byte clock(txbyteclkhs).
++ To timeout this timer 1+ of the above said value is recommended.
++
++\* ************************************************************************* */
++static u32 GetHS_TX_timeoutCount(DRM_DRIVER_PRIVATE_T *dev_priv)
++{
++
++ u32 timeoutCount = 0, HTOT_count = 0, VTOT_count = 0, HTotalPixel = 0;
++
++ /* Total pixels need to be transfer per line*/
++ HTotalPixel = (dev_priv->HsyncWidth + dev_priv->HbackPorch + dev_priv->HfrontPorch) * dev_priv->laneCount + dev_priv->HactiveArea;
++
++ /* byte count = (pixel count * bits per pixel) / 8 */
++ HTOT_count = (HTotalPixel * dev_priv->bpp) / 8;
++
++ if (dev_priv->videoModeFormat == BURST_MODE)
++ {
++ timeoutCount = HTOT_count + 1;
++#if 1 /*FIXME remove it after power-on */
++ VTOT_count = dev_priv->VactiveArea + dev_priv->VbackPorch + dev_priv->VfrontPorch
++ + dev_priv->VsyncWidth;
++ /* timeoutCount = (HTOT_count * VTOT_count) + 1; */
++ timeoutCount = (HTOT_count * VTOT_count) + 1;
++#endif
++ }
++ else
++ {
++ VTOT_count = dev_priv->VactiveArea + dev_priv->VbackPorch + dev_priv->VfrontPorch
++ + dev_priv->VsyncWidth;
++ /* timeoutCount = (HTOT_count * VTOT_count) + 1; */
++ timeoutCount = (HTOT_count * VTOT_count) + 1;
++ }
++
++ return timeoutCount & 0xFFFF;
++}
++
++/* ************************************************************************* *\
++FUNCTION: GetLP_RX_timeoutCount
++
++DESCRIPTION: The timeout value is protocol specific. Time out value is calculated
++ from txclkesc(50ns).
++
++ Minimum value =
++ Time to send one Trigger message = 4 X txclkesc [Escape mode entry sequence)
++ + 8-bit trigger message (2x8xtxclkesc)
++ +1 txclksesc [stop_state]
++ = 21 X txclkesc [ 15h]
++
++ Maximum Value =
++ Time to send a long packet with maximum payload data
++ = 4 X txclkesc [Escape mode entry sequence)
++ + 8-bit Low power data transmission Command (2x8xtxclkesc)
++ + packet header [ 4X8X2X txclkesc]
++ +payload [ nX8X2Xtxclkesc]
++ +CRC[2X8X2txclkesc]
++ +1 txclksesc [stop_state]
++ = 117 txclkesc +n[payload in terms of bytes]X16txclkesc.
++
++\* ************************************************************************* */
++static u32 GetLP_RX_timeoutCount(DRM_DRIVER_PRIVATE_T *dev_priv)
++{
++
++ u32 timeoutCount = 0;
++
++ if (dev_priv->config_phase)
++ {
++ /* Assuming 256 byte DDB data.*/
++ timeoutCount = 117 + 256 * 16;
++ }
++ else
++ {
++ /* For DPI video only mode use the minimum value.*/
++ timeoutCount = 0x15;
++#if 1 /*FIXME remove it after power-on */
++ /* Assuming 256 byte DDB data.*/
++ timeoutCount = 117 + 256 * 16;
++#endif
++ }
++
++ return timeoutCount;
++}
++
++/* ************************************************************************* *\
++FUNCTION: GetHSA_Count
++
++DESCRIPTION: Shows the horizontal sync value in terms of byte clock
++ (txbyteclkhs)
++ Minimum HSA period should be sufficient to transmit a hsync start short
++ packet(4 bytes)
++ i) For Non-burst Mode with sync pulse, Min value – 4 in decimal [plus
++ an optional 6 bytes for a zero payload blanking packet]. But if
++ the value is less than 10 but more than 4, then this count will
++ be added to the HBP’s count for one lane.
++ ii) For Non-Burst Sync Event & Burst Mode, there is no HSA, so you
++ can program this to zero. If you program this register, these
++ byte values will be added to HBP.
++ iii) For Burst mode of operation, normally the values programmed in
++ terms of byte clock are based on the principle - time for transfering
++ HSA in Burst mode is the same as in non-bust mode.
++\* ************************************************************************* */
++static u32 GetHSA_Count(DRM_DRIVER_PRIVATE_T *dev_priv)
++{
++ u32 HSA_count;
++ u32 HSA_countX8;
++
++ /* byte clock count = (pixel clock count * bits per pixel) /8 */
++ HSA_countX8 = dev_priv->HsyncWidth * dev_priv->bpp;
++
++ if (dev_priv->videoModeFormat == BURST_MODE)
++ {
++ HSA_countX8 *= dev_priv->DDR_Clock / dev_priv->DDR_Clock_Calculated;
++ }
++
++ HSA_count = HSA_countX8 / 8;
++
++ return HSA_count;
++}
++
++/* ************************************************************************* *\
++FUNCTION: GetHBP_Count
++
++DESCRIPTION: Shows the horizontal back porch value in terms of txbyteclkhs.
++ Minimum HBP period should be sufficient to transmit a “hsync end short
++ packet(4 bytes) + Blanking packet overhead(6 bytes) + RGB packet header(4 bytes)”
++ For Burst mode of operation, normally the values programmed in terms of
++ byte clock are based on the principle - time for transfering HBP
++ in Burst mode is the same as in non-bust mode.
++
++ Min value – 14 in decimal [ accounted with zero payload for blanking packet] for one lane.
++ Max value – any value greater than 14 based on DPI resolution
++\* ************************************************************************* */
++static u32 GetHBP_Count(DRM_DRIVER_PRIVATE_T *dev_priv)
++{
++ u32 HBP_count;
++ u32 HBP_countX8;
++
++ /* byte clock count = (pixel clock count * bits per pixel) /8 */
++ HBP_countX8 = dev_priv->HbackPorch * dev_priv->bpp;
++
++ if (dev_priv->videoModeFormat == BURST_MODE)
++ {
++ HBP_countX8 *= dev_priv->DDR_Clock / dev_priv->DDR_Clock_Calculated;
++ }
++
++ HBP_count = HBP_countX8 / 8;
++
++ return HBP_count;
++}
++
++/* ************************************************************************* *\
++FUNCTION: GetHFP_Count
++
++DESCRIPTION: Shows the horizontal front porch value in terms of txbyteclkhs.
++ Minimum HFP period should be sufficient to transmit “RGB Data packet
++ footer(2 bytes) + Blanking packet overhead(6 bytes)” for non burst mode.
++
++ For burst mode, Minimum HFP period should be sufficient to transmit
++ Blanking packet overhead(6 bytes)”
++
++ For Burst mode of operation, normally the values programmed in terms of
++ byte clock are based on the principle - time for transfering HFP
++ in Burst mode is the same as in non-bust mode.
++
++ Min value – 8 in decimal for non-burst mode [accounted with zero payload
++ for blanking packet] for one lane.
++ Min value – 6 in decimal for burst mode for one lane.
++
++ Max value – any value greater than the minimum vaue based on DPI resolution
++\* ************************************************************************* */
++static u32 GetHFP_Count(DRM_DRIVER_PRIVATE_T *dev_priv)
++{
++ u32 HFP_count;
++ u32 HFP_countX8;
++
++ /* byte clock count = (pixel clock count * bits per pixel) /8 */
++ HFP_countX8 = dev_priv->HfrontPorch * dev_priv->bpp;
++
++ if (dev_priv->videoModeFormat == BURST_MODE)
++ {
++ HFP_countX8 *= dev_priv->DDR_Clock / dev_priv->DDR_Clock_Calculated;
++ }
++
++ HFP_count = HFP_countX8 / 8;
++
++ return HFP_count;
++}
++
++/* ************************************************************************* *\
++FUNCTION: GetHAdr_Count
++
++DESCRIPTION: Shows the horizontal active area value in terms of txbyteclkhs.
++ In Non Burst Mode, Count equal to RGB word count value
++
++ In Burst Mode, RGB pixel packets are time-compressed, leaving more time
++ during a scan line for LP mode (saving power) or for multiplexing
++ other transmissions onto the DSI link. Hence, the count equals the
++ time in txbyteclkhs for sending time compressed RGB pixels plus
++ the time needed for moving to power save mode or the time needed
++ for secondary channel to use the DSI link.
++
++ But if the left out time for moving to low power mode is less than
++ 8 txbyteclkhs [2txbyteclkhs for RGB data packet footer and
++ 6txbyteclkhs for a blanking packet with zero payload], then
++ this count will be added to the HFP's count for one lane.
++
++ Min value – 8 in decimal for non-burst mode [accounted with zero payload
++ for blanking packet] for one lane.
++ Min value – 6 in decimal for burst mode for one lane.
++
++ Max value – any value greater than the minimum vaue based on DPI resolution
++\* ************************************************************************* */
++static u32 GetHAdr_Count(DRM_DRIVER_PRIVATE_T *dev_priv)
++{
++ u32 HAdr_count;
++ u32 HAdr_countX8;
++
++ /* byte clock count = (pixel clock count * bits per pixel) /8 */
++ HAdr_countX8 = dev_priv->HactiveArea * dev_priv->bpp;
++
++ if (dev_priv->videoModeFormat == BURST_MODE)
++ {
++ HAdr_countX8 *= dev_priv->DDR_Clock / dev_priv->DDR_Clock_Calculated;
++ }
++
++ HAdr_count = HAdr_countX8 / 8;
++
++ return HAdr_count;
++}
++
++/* ************************************************************************* *\
++FUNCTION: GetHighLowSwitchCount
++
++DESCRIPTION: High speed to low power or Low power to high speed switching time
++ in terms byte clock (txbyteclkhs). This value is based on the
++ byte clock (txbyteclkhs) and low power clock frequency (txclkesc)
++
++ Typical value - Number of byte clocks required to switch from low power mode
++ to high speed mode after "txrequesths" is asserted.
++
++ The worst count value among the low to high or high to low switching time
++ in terms of txbyteclkhs has to be programmed in this register.
++
++ Usefull Formulae:
++ DDR clock period = 2 times UI
++ txbyteclkhs clock = 8 times UI
++ Tlpx = 1 / txclkesc
++ CALCULATION OF LOW POWER TO HIGH SPEED SWITCH COUNT VALUE (from Standard D-PHY spec)
++ LP01 + LP00 + HS0 = 1Tlpx + 1Tlpx + 3Tlpx [Approx] + 1DDR clock [2UI] + 1txbyteclkhs clock [8UI]
++ CALCULATION OF HIGH SPEED TO LOW POWER SWITCH COUNT VALUE (from Standard D-PHY spec)
++ Ths-trail = 1txbyteclkhs clock [8UI] + 5DDR clock [10UI] + 4 Tlpx [Approx]
++\* ************************************************************************* */
++static u32 GetHighLowSwitchCount(DRM_DRIVER_PRIVATE_T *dev_priv)
++{
++ u32 HighLowSwitchCount, HighToLowSwitchCount, LowToHighSwitchCount;
++
++/* ************************************************************************* *\
++ CALCULATION OF HIGH SPEED TO LOW POWER SWITCH COUNT VALUE (from Standard D-PHY spec)
++ Ths-trail = 1txbyteclkhs clock [8UI] + 5DDR clock [10UI] + 4 Tlpx [Approx]
++
++ Tlpx = 50 ns, Using max txclkesc (20MHz)
++
++ txbyteclkhs_period = 4000 / dev_priv->DDR_Clock; in ns
++ UI_period = 500 / dev_priv->DDR_Clock; in ns
++
++ HS_to_LP = Ths-trail = 18 * UI_period + 4 * Tlpx
++ = 9000 / dev_priv->DDR_Clock + 200;
++
++ HighToLowSwitchCount = HS_to_LP / txbyteclkhs_period
++ = (9000 / dev_priv->DDR_Clock + 200) / (4000 / dev_priv->DDR_Clock)
++ = (9000 + (200 * dev_priv->DDR_Clock)) / 4000
++
++\* ************************************************************************* */
++ HighToLowSwitchCount = (9000 + (200 * dev_priv->DDR_Clock)) / 4000 + 1;
++
++/* ************************************************************************* *\
++ CALCULATION OF LOW POWER TO HIGH SPEED SWITCH COUNT VALUE (from Standard D-PHY spec)
++ LP01 + LP00 + HS0 = 1Tlpx + 1Tlpx + 3Tlpx [Approx] + 1DDR clock [2UI] + 1txbyteclkhs clock [8UI]
++
++ LP_to_HS = 10 * UI_period + 5 * Tlpx =
++ = 5000 / dev_priv->DDR_Clock + 250;
++
++ LowToHighSwitchCount = LP_to_HS / txbyteclkhs_period
++ = (5000 / dev_priv->DDR_Clock + 250) / (4000 / dev_priv->DDR_Clock)
++ = (5000 + (250 * dev_priv->DDR_Clock)) / 4000
++
++\* ************************************************************************* */
++ LowToHighSwitchCount = (5000 + (250 * dev_priv->DDR_Clock)) / 4000 + 1;
++
++ if (HighToLowSwitchCount > LowToHighSwitchCount)
++ {
++ HighLowSwitchCount = HighToLowSwitchCount;
++ }
++ else
++ {
++ HighLowSwitchCount = LowToHighSwitchCount;
++ }
++
++
++ /* FIXME jliu need to fine tune the above formulae and remove the following after power on */
++ if (HighLowSwitchCount < 0x1f)
++ HighLowSwitchCount = 0x1f;
++
++ return HighLowSwitchCount;
++}
++
++/* ************************************************************************* *\
++FUNCTION: mrst_gen_long_write
++ `
++DESCRIPTION:
++
++\* ************************************************************************* */
++static void mrst_gen_long_write(struct drm_device *dev, u32 *data, u16 wc,u8 vc)
++{
++ u32 gen_data_reg = HS_GEN_DATA_REG;
++ u32 gen_ctrl_reg = HS_GEN_CTRL_REG;
++ u32 date_full_bit = HS_DATA_FIFO_FULL;
++ u32 control_full_bit = HS_CTRL_FIFO_FULL;
++ u16 wc_saved = wc;
++
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 enter mrst_gen_long_write \n");
++#endif /* PRINT_JLIU7 */
++
++ /* sanity check */
++ if (vc > 4)
++ {
++ DRM_ERROR(KERN_ERR "MIPI Virtual channel Can't greater than 4. \n");
++ return;
++ }
++
++
++ if (0) /* FIXME JLIU7 check if it is in LP*/
++ {
++ gen_data_reg = LP_GEN_DATA_REG;
++ gen_ctrl_reg = LP_GEN_CTRL_REG;
++ date_full_bit = LP_DATA_FIFO_FULL;
++ control_full_bit = LP_CTRL_FIFO_FULL;
++ }
++
++ while (wc >= 4)
++ {
++ /* Check if MIPI IP generic data fifo is not full */
++ while ((REG_READ(GEN_FIFO_STAT_REG) & date_full_bit) == date_full_bit);
++
++ /* write to data buffer */
++ REG_WRITE(gen_data_reg, *data);
++
++ wc -= 4;
++ data ++;
++ }
++
++ switch (wc)
++ {
++ case 1:
++ REG_WRITE8(gen_data_reg, *((u8 *)data));
++ break;
++ case 2:
++ REG_WRITE16(gen_data_reg, *((u16 *)data));
++ break;
++ case 3:
++ REG_WRITE16(gen_data_reg, *((u16 *)data));
++ data = (u32*)((u8*) data + 2);
++ REG_WRITE8(gen_data_reg, *((u8 *)data));
++ break;
++ }
++
++ /* Check if MIPI IP generic control fifo is not full */
++ while ((REG_READ(GEN_FIFO_STAT_REG) & control_full_bit) == control_full_bit);
++ /* write to control buffer */
++ REG_WRITE(gen_ctrl_reg, 0x29 | (wc_saved << 8) | (vc << 6));
++}
++
++/* ************************************************************************* *\
++FUNCTION: mrst_init_HIMAX_MIPI_bridge
++ `
++DESCRIPTION:
++
++\* ************************************************************************* */
++static void mrst_init_HIMAX_MIPI_bridge(struct drm_device *dev)
++{
++ u32 gen_data[2];
++ u16 wc = 0;
++ u8 vc =0;
++ u32 gen_data_intel = 0x200105;
++
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 enter mrst_init_HIMAX_MIPI_bridge \n");
++#endif /* PRINT_JLIU7 */
++
++ /* exit sleep mode */
++ wc = 0x5;
++ gen_data[0] = gen_data_intel | (0x11 << 24);
++ gen_data[1] = 0;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* set_pixel_format */
++ gen_data[0] = gen_data_intel | (0x3A << 24);
++ gen_data[1] = 0x77;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* Set resolution for (800X480) */
++ wc = 0x8;
++ gen_data[0] = gen_data_intel | (0x2A << 24);
++ gen_data[1] = 0x1F030000;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[0] = gen_data_intel | (0x2B << 24);
++ gen_data[1] = 0xDF010000;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* System control */
++ wc = 0x6;
++ gen_data[0] = gen_data_intel | (0xEE << 24);
++ gen_data[1] = 0x10FA;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* INPUT TIMING FOR TEST PATTERN(800X480) */
++ /* H-size */
++ gen_data[1] = 0x2000;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0x0301;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* V-size */
++ gen_data[1] = 0xE002;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0x0103;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* H-total */
++ gen_data[1] = 0x2004;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0x0405;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* V-total */
++ gen_data[1] = 0x0d06;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0x0207;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* H-blank */
++ gen_data[1] = 0x0308;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0x0009;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* H-blank */
++ gen_data[1] = 0x030A;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0x000B;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* H-start */
++ gen_data[1] = 0xD80C;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0x000D;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* V-start */
++ gen_data[1] = 0x230E;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0x000F;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* RGB domain */
++ gen_data[1] = 0x0027;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* INP_FORM Setting */
++ /* set_1 */
++ gen_data[1] = 0x1C10;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* set_2 */
++ gen_data[1] = 0x0711;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* set_3 */
++ gen_data[1] = 0x0012;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* set_4 */
++ gen_data[1] = 0x0013;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* set_5 */
++ gen_data[1] = 0x2314;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* set_6 */
++ gen_data[1] = 0x0015;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* set_7 */
++ gen_data[1] = 0x2316;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* set_8 */
++ gen_data[1] = 0x0017;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* set_1 */
++ gen_data[1] = 0x0330;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* FRC Setting */
++ /* FRC_set_2 */
++ gen_data[1] = 0x237A;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* FRC_set_3 */
++ gen_data[1] = 0x4C7B;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* FRC_set_4 */
++ gen_data[1] = 0x037C;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* FRC_set_5 */
++ gen_data[1] = 0x3482;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* FRC_set_7 */
++ gen_data[1] = 0x1785;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++#if 0
++ /* FRC_set_8 */
++ gen_data[1] = 0xD08F;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++#endif
++
++ /* OUTPUT TIMING FOR TEST PATTERN (800X480) */
++ /* out_htotal */
++ gen_data[1] = 0x2090;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0x0491;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* out_hsync */
++ gen_data[1] = 0x0392;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0x0093;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* out_hstart */
++ gen_data[1] = 0xD894;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0x0095;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* out_hsize */
++ gen_data[1] = 0x2096;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0x0397;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* out_vtotal */
++ gen_data[1] = 0x0D98;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0x0299;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* out_vsync */
++ gen_data[1] = 0x039A;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0x009B;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* out_vstart */
++ gen_data[1] = 0x239C;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0x009D;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* out_vsize */
++ gen_data[1] = 0xE09E;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0x019F;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* FRC_set_6 */
++ gen_data[1] = 0x9084;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* Other setting */
++ gen_data[1] = 0x0526;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* RBG domain */
++ gen_data[1] = 0x1177;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* rgbw */
++ /* set_1 */
++ gen_data[1] = 0xD28F;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* set_2 */
++ gen_data[1] = 0x02D0;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* set_3 */
++ gen_data[1] = 0x08D1;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* set_4 */
++ gen_data[1] = 0x05D2;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* set_5 */
++ gen_data[1] = 0x24D4;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* set_6 */
++ gen_data[1] = 0x00D5;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0x02D7;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0x00D8;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ gen_data[1] = 0x48F3;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0xD4F2;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0x3D8E;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0x60FD;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0x00B5;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0x48F4;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* inside patten */
++ gen_data[1] = 0x0060;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++}
++
++/* ************************************************************************* *\
++FUNCTION: mrst_init_NSC_MIPI_bridge
++ `
++DESCRIPTION:
++
++\* ************************************************************************* */
++static void mrst_init_NSC_MIPI_bridge(struct drm_device *dev)
++{
++
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 enter mrst_init_NSC_MIPI_bridge.\n");
++#endif /* PRINT_JLIU7 */
++ /* Program MIPI IP to 50MHz DSI, Non-Burst mode with sync event,
++ 1 or 2 Data Lanes */
++
++ udelay(DELAY_TIME1);
++ /* enable RGB24*/
++ REG_WRITE(LP_GEN_CTRL_REG, 0x003205e3);
++
++ udelay(DELAY_TIME1);
++ /* enable all error reporting*/
++ REG_WRITE(LP_GEN_CTRL_REG, 0x000040e3);
++ udelay(DELAY_TIME1);
++ REG_WRITE(LP_GEN_CTRL_REG, 0x000041e3);
++
++ udelay(DELAY_TIME1);
++ /* enable 2 data lane; video shaping & error reporting */
++ REG_WRITE(LP_GEN_CTRL_REG, 0x00a842e3); /* 0x006842e3 for 1 data lane */
++
++ udelay(DELAY_TIME1);
++ /* HS timeout */
++ REG_WRITE(LP_GEN_CTRL_REG, 0x009243e3);
++
++ udelay(DELAY_TIME1);
++ /* setle = 6h; low power timeout = ((2^21)-1)*4TX_esc_clks. */
++ REG_WRITE(LP_GEN_CTRL_REG, 0x00e645e3);
++
++ /* enable all virtual channels */
++ REG_WRITE(LP_GEN_CTRL_REG, 0x000f46e3);
++
++ /* set output strength to low-drive */
++ REG_WRITE(LP_GEN_CTRL_REG, 0x00007de3);
++
++ if (dev_priv->sku_83)
++ {
++ /* set escape clock to divede by 8 */
++ REG_WRITE(LP_GEN_CTRL_REG, 0x000044e3);
++ }
++ else if(dev_priv->sku_100L)
++ {
++ /* set escape clock to divede by 16 */
++ REG_WRITE(LP_GEN_CTRL_REG, 0x001044e3);
++ }
++ else if(dev_priv->sku_100)
++ {
++ /* set escape clock to divede by 32*/
++ REG_WRITE(LP_GEN_CTRL_REG, 0x003044e3);
++
++ /* setle = 6h; low power timeout = ((2^21)-1)*4TX_esc_clks. */
++ REG_WRITE(LP_GEN_CTRL_REG, 0x00ec45e3);
++ }
++
++ /* CFG_VALID=1; RGB_CLK_EN=1. */
++ REG_WRITE(LP_GEN_CTRL_REG, 0x00057fe3);
++
++}
++
++static void mrst_dsi_mode_set(struct drm_encoder *encoder,
++ struct drm_display_mode *mode,
++ struct drm_display_mode *adjusted_mode)
++{
++ struct drm_device *dev = encoder->dev;
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++ u32 pfit_control;
++ u32 dsiFuncPrgValue = 0;
++ u32 SupportedFormat = 0;
++ u32 channelNumber = 0;
++ u32 DBI_dataWidth = 0;
++ u32 resolution = 0;
++ u32 mipiport = 0;
++
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 enter mrst_dsi_mode_set \n");
++#endif /* PRINT_JLIU7 */
++
++ switch (dev_priv->bpp)
++ {
++ case 16:
++ SupportedFormat = RGB_565_FMT;
++ break;
++ case 18:
++ SupportedFormat = RGB_666_FMT;
++ break;
++ case 24:
++ SupportedFormat = RGB_888_FMT;
++ break;
++ default:
++ DRM_INFO("mrst_dsi_mode_set, invalid bpp \n");
++ break;
++ }
++
++ resolution = dev_priv->HactiveArea | (dev_priv->VactiveArea << RES_V_POS);
++
++ if (dev_priv->dpi)
++ {
++ /* Enable automatic panel scaling for non-native modes so that they fill
++ * the screen. Should be enabled before the pipe is enabled, according to
++ * register description and PRM.
++ */
++ /*FIXME JLIU7, enable Auto-scale only */
++ /*
++ * Enable automatic panel scaling so that non-native modes fill the
++ * screen. Should be enabled before the pipe is enabled, according to
++ * register description and PRM.
++ */
++#if 0 /*JLIU7_PO */
++ if (mode->hdisplay != adjusted_mode->hdisplay ||
++ mode->vdisplay != adjusted_mode->vdisplay)
++ {
++ pfit_control = PFIT_ENABLE;
++ }
++ else
++#endif /*JLIU7_PO */
++ {
++ pfit_control = 0;
++ }
++ REG_WRITE(PFIT_CONTROL, pfit_control);
++
++ /* Enable MIPI Port */
++ mipiport = MIPI_PORT_EN;
++ REG_WRITE(MIPI, mipiport);
++
++ /* JLIU7_FIXME set MIPI clock ratio to 1:1 for NSC init */
++ REG_WRITE(MIPI_CONTROL_REG, 0x00000018);
++
++ /* Enable all the error interrupt */
++ REG_WRITE(INTR_EN_REG, 0xffffffff);
++ REG_WRITE(TURN_AROUND_TIMEOUT_REG, 0x0000000F);
++ REG_WRITE(DEVICE_RESET_REG, 0x000000ff); /* old value = 0x00000015 may depends on the DSI RX device*/
++ REG_WRITE(INIT_COUNT_REG, 0x00000fff); /* Minimum value = 0x000007d0 */
++
++ SupportedFormat <<= FMT_DPI_POS;
++ dsiFuncPrgValue = dev_priv->laneCount | SupportedFormat;
++ REG_WRITE(DSI_FUNC_PRG_REG, dsiFuncPrgValue);
++
++ REG_WRITE(DPI_RESOLUTION_REG, resolution);
++ REG_WRITE(DBI_RESOLUTION_REG, 0x00000000);
++
++ REG_WRITE(VERT_SYNC_PAD_COUNT_REG, dev_priv->VsyncWidth);
++ REG_WRITE(VERT_BACK_PORCH_COUNT_REG, dev_priv->VbackPorch);
++ REG_WRITE(VERT_FRONT_PORCH_COUNT_REG, dev_priv->VfrontPorch);
++
++#if 1 /*JLIU7_PO hard coded for NSC PO */
++ REG_WRITE(HORIZ_SYNC_PAD_COUNT_REG, 0x1e);
++ REG_WRITE(HORIZ_BACK_PORCH_COUNT_REG, 0x18);
++ REG_WRITE(HORIZ_FRONT_PORCH_COUNT_REG, 0x8);
++ REG_WRITE(HORIZ_ACTIVE_AREA_COUNT_REG, 0x4b0);
++#else /*JLIU7_PO hard coded for NSC PO */
++ REG_WRITE(HORIZ_SYNC_PAD_COUNT_REG, GetHSA_Count(dev_priv));
++ REG_WRITE(HORIZ_BACK_PORCH_COUNT_REG, GetHBP_Count(dev_priv));
++ REG_WRITE(HORIZ_FRONT_PORCH_COUNT_REG, GetHFP_Count(dev_priv));
++ REG_WRITE(HORIZ_ACTIVE_AREA_COUNT_REG, GetHAdr_Count(dev_priv));
++#endif /*JLIU7_PO hard coded for NSC PO */
++ REG_WRITE(VIDEO_FMT_REG, dev_priv->videoModeFormat);
++ }
++ else
++ {
++ /* JLIU7 FIXME VIRTUAL_CHANNEL_NUMBER_1 or VIRTUAL_CHANNEL_NUMBER_0*/
++ channelNumber = VIRTUAL_CHANNEL_NUMBER_1 << DBI_CHANNEL_NUMBER_POS;
++ DBI_dataWidth = DBI_DATA_WIDTH_16BIT << DBI_DATA_WIDTH_POS;
++ dsiFuncPrgValue = dev_priv->laneCount | channelNumber | DBI_dataWidth;
++ /* JLIU7 FIXME */
++ SupportedFormat <<= FMT_DBI_POS;
++ dsiFuncPrgValue |= SupportedFormat;
++ REG_WRITE(DSI_FUNC_PRG_REG, dsiFuncPrgValue);
++
++ REG_WRITE(DPI_RESOLUTION_REG, 0x00000000);
++ REG_WRITE(DBI_RESOLUTION_REG, resolution);
++ }
++
++#if 1 /*JLIU7_PO hard code for NSC PO */
++ REG_WRITE(HS_TX_TIMEOUT_REG, 0xffff);
++ REG_WRITE(LP_RX_TIMEOUT_REG, 0xffff);
++
++ REG_WRITE(HIGH_LOW_SWITCH_COUNT_REG, 0x46);
++#else /*JLIU7_PO hard code for NSC PO */
++ REG_WRITE(HS_TX_TIMEOUT_REG, GetHS_TX_timeoutCount(dev_priv));
++ REG_WRITE(LP_RX_TIMEOUT_REG, GetLP_RX_timeoutCount(dev_priv));
++
++ REG_WRITE(HIGH_LOW_SWITCH_COUNT_REG, GetHighLowSwitchCount(dev_priv));
++#endif /*JLIU7_PO hard code for NSC PO */
++
++
++ REG_WRITE(EOT_DISABLE_REG, 0x00000000);
++
++ /* FIXME JLIU7 for NSC PO */
++ REG_WRITE(LP_BYTECLK_REG, 0x00000004);
++
++ REG_WRITE(DEVICE_READY_REG, 0x00000001);
++ REG_WRITE(DPI_CONTROL_REG, 0x00000002); /* Turn On */
++
++ dev_priv->dsi_device_ready = true;
++
++#if 0 /*JLIU7_PO */
++ mrst_init_HIMAX_MIPI_bridge(dev);
++#endif /*JLIU7_PO */
++ mrst_init_NSC_MIPI_bridge(dev);
++
++ if (dev_priv->sku_100L)
++ /* Set DSI link to 100MHz; 2:1 clock ratio */
++ REG_WRITE(MIPI_CONTROL_REG, 0x00000009);
++
++ REG_WRITE(PIPEACONF, dev_priv->pipeconf);
++ REG_READ(PIPEACONF);
++
++ /* Wait for 20ms for the pipe enable to take effect. */
++ udelay(20000);
++
++ /* JLIU7_PO hard code for NSC PO Program the display FIFO watermarks */
++ REG_WRITE(DSPARB, 0x00001d9c);
++ REG_WRITE(DSPFW1, 0xfc0f0f18);
++ REG_WRITE(DSPFW5, 0x04140404);
++ REG_WRITE(DSPFW6, 0x000001f0);
++
++ REG_WRITE(DSPACNTR, dev_priv->dspcntr);
++
++ /* Wait for 20ms for the plane enable to take effect. */
++ udelay(20000);
++}
++
++/**
++ * Detect the MIPI connection.
++ *
++ * This always returns CONNECTOR_STATUS_CONNECTED.
++ * This connector should only have
++ * been set up if the MIPI was actually connected anyway.
++ */
++static enum drm_connector_status mrst_dsi_detect(struct drm_connector
++ *connector)
++{
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 enter mrst_dsi_detect \n");
++#endif /* PRINT_JLIU7 */
++
++ return connector_status_connected;
++}
++
++/**
++ * Return the list of MIPI DDB modes if available.
++ */
++static int mrst_dsi_get_modes(struct drm_connector *connector)
++{
++ struct drm_device *dev = connector->dev;
++ struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector);
++ struct psb_intel_mode_device *mode_dev = psb_intel_output->mode_dev;
++
++/* FIXME get the MIPI DDB modes */
++
++ /* Didn't get an DDB, so
++ * Set wide sync ranges so we get all modes
++ * handed to valid_mode for checking
++ */
++ connector->display_info.min_vfreq = 0;
++ connector->display_info.max_vfreq = 200;
++ connector->display_info.min_hfreq = 0;
++ connector->display_info.max_hfreq = 200;
++
++ if (mode_dev->panel_fixed_mode != NULL) {
++ struct drm_display_mode *mode =
++ drm_mode_duplicate(dev, mode_dev->panel_fixed_mode);
++ drm_mode_probed_add(connector, mode);
++ return 1;
++ }
++
++ return 0;
++}
++
++static const struct drm_encoder_helper_funcs mrst_dsi_helper_funcs = {
++ .dpms = mrst_dsi_dpms,
++ .mode_fixup = psb_intel_lvds_mode_fixup,
++ .prepare = mrst_dsi_prepare,
++ .mode_set = mrst_dsi_mode_set,
++ .commit = mrst_dsi_commit,
++};
++
++static const struct drm_connector_helper_funcs
++ mrst_dsi_connector_helper_funcs = {
++ .get_modes = mrst_dsi_get_modes,
++ .mode_valid = psb_intel_lvds_mode_valid,
++ .best_encoder = psb_intel_best_encoder,
++};
++
++static const struct drm_connector_funcs mrst_dsi_connector_funcs = {
++ .save = mrst_dsi_save,
++ .restore = mrst_dsi_restore,
++ .detect = mrst_dsi_detect,
++ .fill_modes = drm_helper_probe_single_connector_modes,
++ .destroy = psb_intel_lvds_destroy,
++};
++
++/** Returns the panel fixed mode from configuration. */
++/** FIXME JLIU7 need to revist it. */
++struct drm_display_mode *mrst_dsi_get_configuration_mode(struct drm_device *dev)
++{
++ struct drm_display_mode *mode;
++
++ mode = kzalloc(sizeof(*mode), GFP_KERNEL);
++ if (!mode)
++ return NULL;
++
++#if 1 /*FIXME jliu7 remove it later */
++ /* copy from SV - hard coded fixed mode for DSI TPO TD043MTEA2 LCD panel */
++ mode->hdisplay = 800;
++ mode->vdisplay = 480;
++ mode->hsync_start = 808;
++ mode->hsync_end = 848;
++ mode->htotal = 880;
++ mode->vsync_start = 482;
++ mode->vsync_end = 483;
++ mode->vtotal = 486;
++ mode->clock = 33264;
++#endif /*FIXME jliu7 remove it later */
++
++#if 0 /*FIXME jliu7 remove it later */
++ /* hard coded fixed mode for DSI TPO TD043MTEA2 LCD panel */
++ mode->hdisplay = 800;
++ mode->vdisplay = 480;
++ mode->hsync_start = 836;
++ mode->hsync_end = 846;
++ mode->htotal = 1056;
++ mode->vsync_start = 489;
++ mode->vsync_end = 491;
++ mode->vtotal = 525;
++ mode->clock = 33264;
++#endif /*FIXME jliu7 remove it later */
++
++#if 0 /*FIXME jliu7 remove it later */
++ /* hard coded fixed mode for LVDS 800x480 */
++ mode->hdisplay = 800;
++ mode->vdisplay = 480;
++ mode->hsync_start = 801;
++ mode->hsync_end = 802;
++ mode->htotal = 1024;
++ mode->vsync_start = 481;
++ mode->vsync_end = 482;
++ mode->vtotal = 525;
++ mode->clock = 30994;
++#endif /*FIXME jliu7 remove it later */
++
++#if 0 /*FIXME jliu7 remove it later, jliu7 modify it according to the spec */
++ /* hard coded fixed mode for Samsung 480wsvga LVDS 1024x600@75 */
++ mode->hdisplay = 1024;
++ mode->vdisplay = 600;
++ mode->hsync_start = 1072;
++ mode->hsync_end = 1104;
++ mode->htotal = 1184;
++ mode->vsync_start = 603;
++ mode->vsync_end = 604;
++ mode->vtotal = 608;
++ mode->clock = 53990;
++#endif /*FIXME jliu7 remove it later */
++
++#if 0 /*FIXME jliu7 remove it, it is copied from SBIOS */
++ /* hard coded fixed mode for Samsung 480wsvga LVDS 1024x600@75 */
++ mode->hdisplay = 1024;
++ mode->vdisplay = 600;
++ mode->hsync_start = 1104;
++ mode->hsync_end = 1136;
++ mode->htotal = 1184;
++ mode->vsync_start = 603;
++ mode->vsync_end = 604;
++ mode->vtotal = 608;
++ mode->clock = 53990;
++#endif /*FIXME jliu7 remove it later */
++
++#if 0 /*FIXME jliu7 remove it later */
++ /* hard coded fixed mode for Sharp wsvga LVDS 1024x600 */
++ mode->hdisplay = 1024;
++ mode->vdisplay = 600;
++ mode->hsync_start = 1124;
++ mode->hsync_end = 1204;
++ mode->htotal = 1312;
++ mode->vsync_start = 607;
++ mode->vsync_end = 610;
++ mode->vtotal = 621;
++ mode->clock = 48885;
++#endif /*FIXME jliu7 remove it later */
++
++#if 0 /*FIXME jliu7 remove it later */
++ /* hard coded fixed mode for LVDS 1024x768 */
++ mode->hdisplay = 1024;
++ mode->vdisplay = 768;
++ mode->hsync_start = 1048;
++ mode->hsync_end = 1184;
++ mode->htotal = 1344;
++ mode->vsync_start = 771;
++ mode->vsync_end = 777;
++ mode->vtotal = 806;
++ mode->clock = 65000;
++#endif /*FIXME jliu7 remove it later */
++
++#if 0 /*FIXME jliu7 remove it later */
++ /* hard coded fixed mode for LVDS 1366x768 */
++ mode->hdisplay = 1366;
++ mode->vdisplay = 768;
++ mode->hsync_start = 1430;
++ mode->hsync_end = 1558;
++ mode->htotal = 1664;
++ mode->vsync_start = 769;
++ mode->vsync_end = 770;
++ mode->vtotal = 776;
++ mode->clock = 77500;
++#endif /*FIXME jliu7 remove it later */
++
++ drm_mode_set_name(mode);
++ drm_mode_set_crtcinfo(mode, 0);
++
++ return mode;
++}
++
++/* ************************************************************************* *\
++FUNCTION: mrstDSI_clockInit
++ `
++DESCRIPTION:
++
++\* ************************************************************************* */
++static u32 sku_83_mipi_2xclk[4] = {166667, 333333, 444444, 666667};
++static u32 sku_100_mipi_2xclk[4] = {200000, 400000, 533333, 800000};
++static u32 sku_100L_mipi_2xclk[4] = {100000, 200000, 266667, 400000};
++#define MIPI_2XCLK_COUNT 0x04
++
++static bool mrstDSI_clockInit(DRM_DRIVER_PRIVATE_T *dev_priv)
++{
++ u32 Htotal = 0, Vtotal = 0, RRate = 0, mipi_2xclk = 0;
++ u32 i = 0;
++ u32 *p_mipi_2xclk = NULL;
++
++ (void)GetHS_TX_timeoutCount;
++ (void)GetLP_RX_timeoutCount;
++ (void)GetHSA_Count;
++ (void)GetHBP_Count;
++ (void)GetHFP_Count;
++ (void)GetHAdr_Count;
++ (void)GetHighLowSwitchCount;
++ (void)mrst_init_HIMAX_MIPI_bridge;
++
++#if 0 /* JLIU7_PO old values */
++ /* FIXME jliu7 DPI hard coded for TPO TD043MTEA2 LCD panel */
++ dev_priv->pixelClock = 33264; /*KHz*/
++ dev_priv->HsyncWidth = 10;
++ dev_priv->HbackPorch = 210;
++ dev_priv->HfrontPorch = 36;
++ dev_priv->HactiveArea = 800;
++ dev_priv->VsyncWidth = 2;
++ dev_priv->VbackPorch = 34;
++ dev_priv->VfrontPorch = 9;
++ dev_priv->VactiveArea = 480;
++ dev_priv->bpp = 24;
++
++ /* FIXME jliu7 DBI hard coded for TPO TD043MTEA2 LCD panel */
++ dev_priv->dbi_pixelClock = 33264; /*KHz*/
++ dev_priv->dbi_HsyncWidth = 10;
++ dev_priv->dbi_HbackPorch = 210;
++ dev_priv->dbi_HfrontPorch = 36;
++ dev_priv->dbi_HactiveArea = 800;
++ dev_priv->dbi_VsyncWidth = 2;
++ dev_priv->dbi_VbackPorch = 34;
++ dev_priv->dbi_VfrontPorch = 9;
++ dev_priv->dbi_VactiveArea = 480;
++ dev_priv->dbi_bpp = 24;
++#else /* JLIU7_PO old values */
++ /* FIXME jliu7 DPI hard coded for TPO TD043MTEA2 LCD panel */
++ /* FIXME Pre-Si value, 1 or 2 lanes; 50MHz; Non-Burst w/ sync event */
++ dev_priv->pixelClock = 33264; /*KHz*/
++ dev_priv->HsyncWidth = 10;
++ dev_priv->HbackPorch = 8;
++ dev_priv->HfrontPorch = 3;
++ dev_priv->HactiveArea = 800;
++ dev_priv->VsyncWidth = 2;
++ dev_priv->VbackPorch = 3;
++ dev_priv->VfrontPorch = 2;
++ dev_priv->VactiveArea = 480;
++ dev_priv->bpp = 24;
++
++ /* FIXME jliu7 DBI hard coded for TPO TD043MTEA2 LCD panel */
++ dev_priv->dbi_pixelClock = 33264; /*KHz*/
++ dev_priv->dbi_HsyncWidth = 10;
++ dev_priv->dbi_HbackPorch = 8;
++ dev_priv->dbi_HfrontPorch = 3;
++ dev_priv->dbi_HactiveArea = 800;
++ dev_priv->dbi_VsyncWidth = 2;
++ dev_priv->dbi_VbackPorch = 3;
++ dev_priv->dbi_VfrontPorch = 2;
++ dev_priv->dbi_VactiveArea = 480;
++ dev_priv->dbi_bpp = 24;
++#endif /* JLIU7_PO old values */
++
++ Htotal = dev_priv->HsyncWidth + dev_priv->HbackPorch + dev_priv->HfrontPorch + dev_priv->HactiveArea;
++ Vtotal = dev_priv->VsyncWidth + dev_priv->VbackPorch + dev_priv->VfrontPorch + dev_priv->VactiveArea;
++
++ RRate = ((dev_priv->pixelClock * 1000) / (Htotal * Vtotal)) + 1;
++
++ dev_priv->RRate = RRate;
++
++ /* ddr clock frequence = (pixel clock frequence * bits per pixel)/2*/
++ mipi_2xclk = (dev_priv->pixelClock * dev_priv->bpp) / dev_priv->laneCount; /* KHz */
++ dev_priv->DDR_Clock_Calculated = mipi_2xclk / 2; /* KHz */
++
++ DRM_DEBUG("mrstDSI_clockInit RRate = %d, mipi_2xclk = %d. \n", RRate, mipi_2xclk);
++
++ if (dev_priv->sku_100)
++ {
++ p_mipi_2xclk = sku_100_mipi_2xclk;
++ }
++ else if (dev_priv->sku_100L)
++ {
++ p_mipi_2xclk = sku_100L_mipi_2xclk;
++ }
++ else
++ {
++ p_mipi_2xclk = sku_83_mipi_2xclk;
++ }
++
++ for (; i < MIPI_2XCLK_COUNT; i++)
++ {
++ if ((dev_priv->DDR_Clock_Calculated * 2) < p_mipi_2xclk[i])
++ break;
++ }
++
++ if (i == MIPI_2XCLK_COUNT)
++ {
++ DRM_DEBUG("mrstDSI_clockInit the DDR clock is too big, DDR_Clock_Calculated is = %d\n", dev_priv->DDR_Clock_Calculated);
++ return false;
++ }
++
++ dev_priv->DDR_Clock = p_mipi_2xclk[i] / 2;
++ dev_priv->ClockBits = i;
++
++#if 0 /*JLIU7_PO */
++#if 0 /* FIXME remove it after power on*/
++ mipiControlReg = REG_READ(MIPI_CONTROL_REG) & (~MIPI_2X_CLOCK_BITS);
++ mipiControlReg |= i;
++ REG_WRITE(MIPI_CONTROL_REG, mipiControlReg);
++#else /* FIXME remove it after power on*/
++ mipiControlReg |= i;
++ REG_WRITE(MIPI_CONTROL_REG, mipiControlReg);
++#endif /* FIXME remove it after power on*/
++#endif /*JLIU7_PO */
++
++#if 1 /* FIXME remove it after power on*/
++ DRM_DEBUG("mrstDSI_clockInit, mipi_2x_clock_divider = 0x%x, DDR_Clock_Calculated is = %d\n", i, dev_priv->DDR_Clock_Calculated);
++#endif /* FIXME remove it after power on*/
++
++ return true;
++}
++
++/**
++ * mrst_dsi_init - setup MIPI connectors on this device
++ * @dev: drm device
++ *
++ * Create the connector, try to figure out what
++ * modes we can display on the MIPI panel (if present).
++ */
++void mrst_dsi_init(struct drm_device *dev,
++ struct psb_intel_mode_device *mode_dev)
++{
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++ struct psb_intel_output *psb_intel_output;
++ struct drm_connector *connector;
++ struct drm_encoder *encoder;
++
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 enter mrst_dsi_init \n");
++#endif /* PRINT_JLIU7 */
++
++ psb_intel_output = kzalloc(sizeof(struct psb_intel_output), GFP_KERNEL);
++ if (!psb_intel_output)
++ return;
++
++ psb_intel_output->mode_dev = mode_dev;
++ connector = &psb_intel_output->base;
++ encoder = &psb_intel_output->enc;
++ drm_connector_init(dev, &psb_intel_output->base,
++ &mrst_dsi_connector_funcs,
++ DRM_MODE_CONNECTOR_MIPI);
++
++ drm_encoder_init(dev, &psb_intel_output->enc, &psb_intel_lvds_enc_funcs,
++ DRM_MODE_ENCODER_MIPI);
++
++ drm_mode_connector_attach_encoder(&psb_intel_output->base,
++ &psb_intel_output->enc);
++ psb_intel_output->type = INTEL_OUTPUT_MIPI;
++
++ drm_encoder_helper_add(encoder, &mrst_dsi_helper_funcs);
++ drm_connector_helper_add(connector,
++ &mrst_dsi_connector_helper_funcs);
++ connector->display_info.subpixel_order = SubPixelHorizontalRGB;
++ connector->interlace_allowed = false;
++ connector->doublescan_allowed = false;
++
++ dsi_backlight = BRIGHTNESS_MAX_LEVEL;
++ blc_pol = BLC_POLARITY_INVERSE;
++ blc_freq = 0xc8;
++
++ /*
++ * MIPI discovery:
++ * 1) check for DDB data
++ * 2) check for VBT data
++ * 4) make sure lid is open
++ * if closed, act like it's not there for now
++ */
++
++ /* FIXME jliu7 we only support DPI */
++ dev_priv->dpi = true;
++
++ /* FIXME hard coded 4 lanes for Himax HX8858-A, 2 lanes for NSC LM2550 */
++ dev_priv->laneCount = 2;
++
++ /* FIXME hard coded for NSC PO. */
++ /* We only support BUST_MODE */
++ dev_priv->videoModeFormat = NON_BURST_MODE_SYNC_EVENTS; /* BURST_MODE */
++ /* FIXME change it to true if GET_DDB works */
++ dev_priv->config_phase = false;
++
++ if (!mrstDSI_clockInit(dev_priv))
++ {
++ DRM_DEBUG("Can't iniitialize MRST DSI clock.\n");
++#if 0 /* FIXME JLIU7 */
++ goto failed_find;
++#endif /* FIXME JLIU7 */
++ }
++
++ /*
++ * If we didn't get DDB data, try geting panel timing
++ * from configuration data
++ */
++ mode_dev->panel_fixed_mode = mrst_dsi_get_configuration_mode(dev);
++
++ if (mode_dev->panel_fixed_mode) {
++ mode_dev->panel_fixed_mode->type |=
++ DRM_MODE_TYPE_PREFERRED;
++ goto out; /* FIXME: check for quirks */
++ }
++
++ /* If we still don't have a mode after all that, give up. */
++ if (!mode_dev->panel_fixed_mode) {
++ DRM_DEBUG
++ ("Found no modes on the lvds, ignoring the LVDS\n");
++ goto failed_find;
++ }
++
++out:
++ drm_sysfs_connector_add(connector);
++ return;
++
++failed_find:
++ DRM_DEBUG("No MIIP modes found, disabling.\n");
++ drm_encoder_cleanup(encoder);
++ drm_connector_cleanup(connector);
++ kfree(connector);
++}
+diff -uNr a/drivers/gpu/drm/psb/psb_intel_i2c.c b/drivers/gpu/drm/psb/psb_intel_i2c.c
+--- a/drivers/gpu/drm/psb/psb_intel_i2c.c 1969-12-31 16:00:00.000000000 -0800
++++ b/drivers/gpu/drm/psb/psb_intel_i2c.c 2009-04-07 13:28:38.000000000 -0700
+@@ -0,0 +1,179 @@
++/*
++ * Copyright © 2006-2007 Intel Corporation
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ * Eric Anholt <eric@anholt.net>
++ */
++/*
++ * Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
++ * Jesse Barnes <jesse.barnes@intel.com>
++ */
++
++#include <linux/i2c.h>
++#include <linux/i2c-id.h>
++#include <linux/i2c-algo-bit.h>
++
++/*
++ * Intel GPIO access functions
++ */
++
++#define I2C_RISEFALL_TIME 20
++
++static int get_clock(void *data)
++{
++ struct psb_intel_i2c_chan *chan = data;
++ struct drm_device *dev = chan->drm_dev;
++ u32 val;
++
++ val = REG_READ(chan->reg);
++ return (val & GPIO_CLOCK_VAL_IN) != 0;
++}
++
++static int get_data(void *data)
++{
++ struct psb_intel_i2c_chan *chan = data;
++ struct drm_device *dev = chan->drm_dev;
++ u32 val;
++
++ val = REG_READ(chan->reg);
++ return (val & GPIO_DATA_VAL_IN) != 0;
++}
++
++static void set_clock(void *data, int state_high)
++{
++ struct psb_intel_i2c_chan *chan = data;
++ struct drm_device *dev = chan->drm_dev;
++ u32 reserved = 0, clock_bits;
++
++ /* On most chips, these bits must be preserved in software. */
++ if (!IS_I830(dev) && !IS_845G(dev))
++ reserved =
++ REG_READ(chan->reg) & (GPIO_DATA_PULLUP_DISABLE |
++ GPIO_CLOCK_PULLUP_DISABLE);
++
++ if (state_high)
++ clock_bits = GPIO_CLOCK_DIR_IN | GPIO_CLOCK_DIR_MASK;
++ else
++ clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK |
++ GPIO_CLOCK_VAL_MASK;
++ REG_WRITE(chan->reg, reserved | clock_bits);
++ udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */
++}
++
++static void set_data(void *data, int state_high)
++{
++ struct psb_intel_i2c_chan *chan = data;
++ struct drm_device *dev = chan->drm_dev;
++ u32 reserved = 0, data_bits;
++
++ /* On most chips, these bits must be preserved in software. */
++ if (!IS_I830(dev) && !IS_845G(dev))
++ reserved =
++ REG_READ(chan->reg) & (GPIO_DATA_PULLUP_DISABLE |
++ GPIO_CLOCK_PULLUP_DISABLE);
++
++ if (state_high)
++ data_bits = GPIO_DATA_DIR_IN | GPIO_DATA_DIR_MASK;
++ else
++ data_bits =
++ GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK |
++ GPIO_DATA_VAL_MASK;
++
++ REG_WRITE(chan->reg, reserved | data_bits);
++ udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */
++}
++
++/**
++ * psb_intel_i2c_create - instantiate an Intel i2c bus using the specified GPIO reg
++ * @dev: DRM device
++ * @output: driver specific output device
++ * @reg: GPIO reg to use
++ * @name: name for this bus
++ *
++ * Creates and registers a new i2c bus with the Linux i2c layer, for use
++ * in output probing and control (e.g. DDC or SDVO control functions).
++ *
++ * Possible values for @reg include:
++ * %GPIOA
++ * %GPIOB
++ * %GPIOC
++ * %GPIOD
++ * %GPIOE
++ * %GPIOF
++ * %GPIOG
++ * %GPIOH
++ * see PRM for details on how these different busses are used.
++ */
++struct psb_intel_i2c_chan *psb_intel_i2c_create(struct drm_device *dev,
++ const u32 reg, const char *name)
++{
++ struct psb_intel_i2c_chan *chan;
++
++ chan = kzalloc(sizeof(struct psb_intel_i2c_chan), GFP_KERNEL);
++ if (!chan)
++ goto out_free;
++
++ chan->drm_dev = dev;
++ chan->reg = reg;
++ snprintf(chan->adapter.name, I2C_NAME_SIZE, "intel drm %s", name);
++ chan->adapter.owner = THIS_MODULE;
++ chan->adapter.algo_data = &chan->algo;
++ chan->adapter.dev.parent = &dev->pdev->dev;
++ chan->algo.setsda = set_data;
++ chan->algo.setscl = set_clock;
++ chan->algo.getsda = get_data;
++ chan->algo.getscl = get_clock;
++ chan->algo.udelay = 20;
++ chan->algo.timeout = usecs_to_jiffies(2200);
++ chan->algo.data = chan;
++
++ i2c_set_adapdata(&chan->adapter, chan);
++
++ if (i2c_bit_add_bus(&chan->adapter))
++ goto out_free;
++
++ /* JJJ: raise SCL and SDA? */
++ set_data(chan, 1);
++ set_clock(chan, 1);
++ udelay(20);
++
++ return chan;
++
++out_free:
++ kfree(chan);
++ return NULL;
++}
++
++/**
++ * psb_intel_i2c_destroy - unregister and free i2c bus resources
++ * @output: channel to free
++ *
++ * Unregister the adapter from the i2c layer, then free the structure.
++ */
++void psb_intel_i2c_destroy(struct psb_intel_i2c_chan *chan)
++{
++ if (!chan)
++ return;
++
++ i2c_del_adapter(&chan->adapter);
++ kfree(chan);
++}
+diff -uNr a/drivers/gpu/drm/psb/psb_intel_lvds.c b/drivers/gpu/drm/psb/psb_intel_lvds.c
+--- a/drivers/gpu/drm/psb/psb_intel_lvds.c 1969-12-31 16:00:00.000000000 -0800
++++ b/drivers/gpu/drm/psb/psb_intel_lvds.c 2009-04-07 13:28:38.000000000 -0700
+@@ -0,0 +1,1015 @@
++/*
++ * Copyright © 2006-2007 Intel Corporation
++ * Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ * Eric Anholt <eric@anholt.net>
++ * Dave Airlie <airlied@linux.ie>
++ * Jesse Barnes <jesse.barnes@intel.com>
++ */
++
++#include <linux/i2c.h>
++#include <drm/drm_crtc.h>
++#include <drm/drm_edid.h>
++/* MRST defines start */
++uint8_t blc_type;
++uint8_t blc_pol;
++uint8_t blc_freq;
++uint8_t blc_minbrightness;
++uint8_t blc_i2caddr;
++uint8_t blc_brightnesscmd;
++int lvds_backlight; /* restore backlight to this value */
++
++u32 CoreClock;
++u32 PWMControlRegFreq;
++/* MRST defines end */
++
++/**
++ * Sets the backlight level.
++ *
++ * \param level backlight level, from 0 to psb_intel_lvds_get_max_backlight().
++ */
++static void psb_intel_lvds_set_backlight(struct drm_device *dev, int level)
++{
++ u32 blc_pwm_ctl;
++
++ blc_pwm_ctl = REG_READ(BLC_PWM_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
++ REG_WRITE(BLC_PWM_CTL, (blc_pwm_ctl |
++ (level << BACKLIGHT_DUTY_CYCLE_SHIFT)));
++}
++
++/**
++ * Returns the maximum level of the backlight duty cycle field.
++ */
++static u32 psb_intel_lvds_get_max_backlight(struct drm_device *dev)
++{
++ return ((REG_READ(BLC_PWM_CTL) & BACKLIGHT_MODULATION_FREQ_MASK) >>
++ BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
++}
++
++/**
++ * Sets the power state for the panel.
++ */
++static void psb_intel_lvds_set_power(struct drm_device *dev,
++ struct psb_intel_output *output, bool on)
++{
++ u32 pp_status;
++
++ if (on) {
++ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) |
++ POWER_TARGET_ON);
++ do {
++ pp_status = REG_READ(PP_STATUS);
++ } while ((pp_status & PP_ON) == 0);
++
++ psb_intel_lvds_set_backlight(dev,
++ output->
++ mode_dev->backlight_duty_cycle);
++ } else {
++ psb_intel_lvds_set_backlight(dev, 0);
++
++ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) &
++ ~POWER_TARGET_ON);
++ do {
++ pp_status = REG_READ(PP_STATUS);
++ } while (pp_status & PP_ON);
++ }
++}
++
++static void psb_intel_lvds_dpms(struct drm_encoder *encoder, int mode)
++{
++ struct drm_device *dev = encoder->dev;
++ struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
++
++ if (mode == DRM_MODE_DPMS_ON)
++ psb_intel_lvds_set_power(dev, output, true);
++ else
++ psb_intel_lvds_set_power(dev, output, false);
++
++ /* XXX: We never power down the LVDS pairs. */
++}
++
++static void psb_intel_lvds_save(struct drm_connector *connector)
++{
++#if 0 /* JB: Disable for drop */
++ struct drm_device *dev = connector->dev;
++
++ dev_priv->savePP_ON = REG_READ(PP_ON_DELAYS);
++ dev_priv->savePP_OFF = REG_READ(PP_OFF_DELAYS);
++ dev_priv->savePP_CONTROL = REG_READ(PP_CONTROL);
++ dev_priv->savePP_DIVISOR = REG_READ(PP_DIVISOR);
++ dev_priv->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
++ dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL &
++ BACKLIGHT_DUTY_CYCLE_MASK);
++
++ /*
++ * If the light is off at server startup, just make it full brightness
++ */
++ if (dev_priv->backlight_duty_cycle == 0)
++ dev_priv->backlight_duty_cycle =
++ psb_intel_lvds_get_max_backlight(dev);
++#endif
++}
++
++static void psb_intel_lvds_restore(struct drm_connector *connector)
++{
++#if 0 /* JB: Disable for drop */
++ struct drm_device *dev = connector->dev;
++
++ REG_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL);
++ REG_WRITE(PP_ON_DELAYS, dev_priv->savePP_ON);
++ REG_WRITE(PP_OFF_DELAYS, dev_priv->savePP_OFF);
++ REG_WRITE(PP_DIVISOR, dev_priv->savePP_DIVISOR);
++ REG_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL);
++ if (dev_priv->savePP_CONTROL & POWER_TARGET_ON)
++ psb_intel_lvds_set_power(dev, true);
++ else
++ psb_intel_lvds_set_power(dev, false);
++#endif
++}
++
++static int psb_intel_lvds_mode_valid(struct drm_connector *connector,
++ struct drm_display_mode *mode)
++{
++ struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector);
++ struct drm_display_mode *fixed_mode =
++ psb_intel_output->mode_dev->panel_fixed_mode;
++
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 enter psb_intel_lvds_mode_valid \n");
++#endif /* PRINT_JLIU7 */
++
++ if (fixed_mode) {
++ if (mode->hdisplay > fixed_mode->hdisplay)
++ return MODE_PANEL;
++ if (mode->vdisplay > fixed_mode->vdisplay)
++ return MODE_PANEL;
++ }
++ return MODE_OK;
++}
++
++static bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder,
++ struct drm_display_mode *mode,
++ struct drm_display_mode *adjusted_mode)
++{
++ struct psb_intel_mode_device *mode_dev =
++ enc_to_psb_intel_output(encoder)->mode_dev;
++ struct drm_device *dev = encoder->dev;
++ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(encoder->crtc);
++ struct drm_encoder *tmp_encoder;
++
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 enter psb_intel_lvds_mode_fixup \n");
++#endif /* PRINT_JLIU7 */
++
++ /* Should never happen!! */
++ if (IS_MRST(dev) && psb_intel_crtc->pipe != 0) {
++ printk(KERN_ERR
++ "Can't support LVDS/MIPI on pipe B on MRST\n");
++ return false;
++ } else if (!IS_MRST(dev) && !IS_I965G(dev)
++ && psb_intel_crtc->pipe == 0) {
++ printk(KERN_ERR "Can't support LVDS on pipe A\n");
++ return false;
++ }
++ /* Should never happen!! */
++ list_for_each_entry(tmp_encoder, &dev->mode_config.encoder_list,
++ head) {
++ if (tmp_encoder != encoder
++ && tmp_encoder->crtc == encoder->crtc) {
++ printk(KERN_ERR "Can't enable LVDS and another "
++ "encoder on the same pipe\n");
++ return false;
++ }
++ }
++
++ /*
++ * If we have timings from the BIOS for the panel, put them in
++ * to the adjusted mode. The CRTC will be set up for this mode,
++ * with the panel scaling set up to source from the H/VDisplay
++ * of the original mode.
++ */
++ if (mode_dev->panel_fixed_mode != NULL) {
++ adjusted_mode->hdisplay =
++ mode_dev->panel_fixed_mode->hdisplay;
++ adjusted_mode->hsync_start =
++ mode_dev->panel_fixed_mode->hsync_start;
++ adjusted_mode->hsync_end =
++ mode_dev->panel_fixed_mode->hsync_end;
++ adjusted_mode->htotal = mode_dev->panel_fixed_mode->htotal;
++ adjusted_mode->vdisplay =
++ mode_dev->panel_fixed_mode->vdisplay;
++ adjusted_mode->vsync_start =
++ mode_dev->panel_fixed_mode->vsync_start;
++ adjusted_mode->vsync_end =
++ mode_dev->panel_fixed_mode->vsync_end;
++ adjusted_mode->vtotal = mode_dev->panel_fixed_mode->vtotal;
++ adjusted_mode->clock = mode_dev->panel_fixed_mode->clock;
++ drm_mode_set_crtcinfo(adjusted_mode,
++ CRTC_INTERLACE_HALVE_V);
++ }
++
++ /*
++ * XXX: It would be nice to support lower refresh rates on the
++ * panels to reduce power consumption, and perhaps match the
++ * user's requested refresh rate.
++ */
++
++ return true;
++}
++
++static void psb_intel_lvds_prepare(struct drm_encoder *encoder)
++{
++ struct drm_device *dev = encoder->dev;
++ struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
++ struct psb_intel_mode_device *mode_dev = output->mode_dev;
++
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 enter psb_intel_lvds_prepare \n");
++#endif /* PRINT_JLIU7 */
++
++ mode_dev->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
++ mode_dev->backlight_duty_cycle = (mode_dev->saveBLC_PWM_CTL &
++ BACKLIGHT_DUTY_CYCLE_MASK);
++
++ psb_intel_lvds_set_power(dev, output, false);
++}
++
++static void psb_intel_lvds_commit(struct drm_encoder *encoder)
++{
++ struct drm_device *dev = encoder->dev;
++ struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
++ struct psb_intel_mode_device *mode_dev = output->mode_dev;
++
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 enter psb_intel_lvds_commit \n");
++#endif /* PRINT_JLIU7 */
++
++ if (mode_dev->backlight_duty_cycle == 0)
++ mode_dev->backlight_duty_cycle =
++ psb_intel_lvds_get_max_backlight(dev);
++
++ psb_intel_lvds_set_power(dev, output, true);
++}
++
++static void psb_intel_lvds_mode_set(struct drm_encoder *encoder,
++ struct drm_display_mode *mode,
++ struct drm_display_mode *adjusted_mode)
++{
++ struct psb_intel_mode_device *mode_dev =
++ enc_to_psb_intel_output(encoder)->mode_dev;
++ struct drm_device *dev = encoder->dev;
++ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(encoder->crtc);
++ u32 pfit_control;
++
++ /*
++ * The LVDS pin pair will already have been turned on in the
++ * psb_intel_crtc_mode_set since it has a large impact on the DPLL
++ * settings.
++ */
++
++ /*
++ * Enable automatic panel scaling so that non-native modes fill the
++ * screen. Should be enabled before the pipe is enabled, according to
++ * register description and PRM.
++ */
++ if (mode->hdisplay != adjusted_mode->hdisplay ||
++ mode->vdisplay != adjusted_mode->vdisplay)
++ pfit_control = (PFIT_ENABLE | VERT_AUTO_SCALE |
++ HORIZ_AUTO_SCALE | VERT_INTERP_BILINEAR |
++ HORIZ_INTERP_BILINEAR);
++ else
++ pfit_control = 0;
++
++ if (!IS_I965G(dev)) {
++ if (mode_dev->panel_wants_dither)
++ pfit_control |= PANEL_8TO6_DITHER_ENABLE;
++ } else
++ pfit_control |= psb_intel_crtc->pipe << PFIT_PIPE_SHIFT;
++
++ REG_WRITE(PFIT_CONTROL, pfit_control);
++}
++
++/**
++ * Detect the LVDS connection.
++ *
++ * This always returns CONNECTOR_STATUS_CONNECTED.
++ * This connector should only have
++ * been set up if the LVDS was actually connected anyway.
++ */
++static enum drm_connector_status psb_intel_lvds_detect(struct drm_connector
++ *connector)
++{
++ return connector_status_connected;
++}
++
++/**
++ * Return the list of DDC modes if available, or the BIOS fixed mode otherwise.
++ */
++static int psb_intel_lvds_get_modes(struct drm_connector *connector)
++{
++ struct drm_device *dev = connector->dev;
++ struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector);
++ struct psb_intel_mode_device *mode_dev = psb_intel_output->mode_dev;
++ int ret = 0;
++
++ if (!IS_MRST(dev))
++ ret = psb_intel_ddc_get_modes(psb_intel_output);
++
++ if (ret)
++ return ret;
++
++ /* Didn't get an EDID, so
++ * Set wide sync ranges so we get all modes
++ * handed to valid_mode for checking
++ */
++ connector->display_info.min_vfreq = 0;
++ connector->display_info.max_vfreq = 200;
++ connector->display_info.min_hfreq = 0;
++ connector->display_info.max_hfreq = 200;
++
++ if (mode_dev->panel_fixed_mode != NULL) {
++ struct drm_display_mode *mode =
++ drm_mode_duplicate(dev, mode_dev->panel_fixed_mode);
++ drm_mode_probed_add(connector, mode);
++ return 1;
++ }
++
++ return 0;
++}
++
++/**
++ * psb_intel_lvds_destroy - unregister and free LVDS structures
++ * @connector: connector to free
++ *
++ * Unregister the DDC bus for this connector then free the driver private
++ * structure.
++ */
++static void psb_intel_lvds_destroy(struct drm_connector *connector)
++{
++ struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector);
++
++ if (psb_intel_output->ddc_bus)
++ psb_intel_i2c_destroy(psb_intel_output->ddc_bus);
++ drm_sysfs_connector_remove(connector);
++ drm_connector_cleanup(connector);
++ kfree(connector);
++}
++
++static const struct drm_encoder_helper_funcs psb_intel_lvds_helper_funcs = {
++ .dpms = psb_intel_lvds_dpms,
++ .mode_fixup = psb_intel_lvds_mode_fixup,
++ .prepare = psb_intel_lvds_prepare,
++ .mode_set = psb_intel_lvds_mode_set,
++ .commit = psb_intel_lvds_commit,
++};
++
++static const struct drm_connector_helper_funcs
++ psb_intel_lvds_connector_helper_funcs = {
++ .get_modes = psb_intel_lvds_get_modes,
++ .mode_valid = psb_intel_lvds_mode_valid,
++ .best_encoder = psb_intel_best_encoder,
++};
++
++static const struct drm_connector_funcs psb_intel_lvds_connector_funcs = {
++ .save = psb_intel_lvds_save,
++ .restore = psb_intel_lvds_restore,
++ .detect = psb_intel_lvds_detect,
++ .fill_modes = drm_helper_probe_single_connector_modes,
++ .destroy = psb_intel_lvds_destroy,
++};
++
++
++static void psb_intel_lvds_enc_destroy(struct drm_encoder *encoder)
++{
++ drm_encoder_cleanup(encoder);
++}
++
++static const struct drm_encoder_funcs psb_intel_lvds_enc_funcs = {
++ .destroy = psb_intel_lvds_enc_destroy,
++};
++
++
++
++/**
++ * psb_intel_lvds_init - setup LVDS connectors on this device
++ * @dev: drm device
++ *
++ * Create the connector, register the LVDS DDC bus, and try to figure out what
++ * modes we can display on the LVDS panel (if present).
++ */
++void psb_intel_lvds_init(struct drm_device *dev,
++ struct psb_intel_mode_device *mode_dev)
++{
++ struct psb_intel_output *psb_intel_output;
++ struct drm_connector *connector;
++ struct drm_encoder *encoder;
++ struct drm_display_mode *scan; /* *modes, *bios_mode; */
++ struct drm_crtc *crtc;
++ u32 lvds;
++ int pipe;
++
++ psb_intel_output = kzalloc(sizeof(struct psb_intel_output), GFP_KERNEL);
++ if (!psb_intel_output)
++ return;
++
++ psb_intel_output->mode_dev = mode_dev;
++ connector = &psb_intel_output->base;
++ encoder = &psb_intel_output->enc;
++ drm_connector_init(dev, &psb_intel_output->base,
++ &psb_intel_lvds_connector_funcs,
++ DRM_MODE_CONNECTOR_LVDS);
++
++ drm_encoder_init(dev, &psb_intel_output->enc, &psb_intel_lvds_enc_funcs,
++ DRM_MODE_ENCODER_LVDS);
++
++ drm_mode_connector_attach_encoder(&psb_intel_output->base,
++ &psb_intel_output->enc);
++ psb_intel_output->type = INTEL_OUTPUT_LVDS;
++
++ drm_encoder_helper_add(encoder, &psb_intel_lvds_helper_funcs);
++ drm_connector_helper_add(connector,
++ &psb_intel_lvds_connector_helper_funcs);
++ connector->display_info.subpixel_order = SubPixelHorizontalRGB;
++ connector->interlace_allowed = false;
++ connector->doublescan_allowed = false;
++
++
++ /*
++ * LVDS discovery:
++ * 1) check for EDID on DDC
++ * 2) check for VBT data
++ * 3) check to see if LVDS is already on
++ * if none of the above, no panel
++ * 4) make sure lid is open
++ * if closed, act like it's not there for now
++ */
++
++ /* Set up the DDC bus. */
++ psb_intel_output->ddc_bus = psb_intel_i2c_create(dev, GPIOC, "LVDSDDC_C");
++ if (!psb_intel_output->ddc_bus) {
++ dev_printk(KERN_ERR, &dev->pdev->dev,
++ "DDC bus registration " "failed.\n");
++ goto failed_ddc;
++ }
++
++ /*
++ * Attempt to get the fixed panel mode from DDC. Assume that the
++ * preferred mode is the right one.
++ */
++ psb_intel_ddc_get_modes(psb_intel_output);
++ list_for_each_entry(scan, &connector->probed_modes, head) {
++ if (scan->type & DRM_MODE_TYPE_PREFERRED) {
++ mode_dev->panel_fixed_mode =
++ drm_mode_duplicate(dev, scan);
++ goto out; /* FIXME: check for quirks */
++ }
++ }
++
++ /* Failed to get EDID, what about VBT? */
++ if (mode_dev->vbt_mode)
++ mode_dev->panel_fixed_mode =
++ drm_mode_duplicate(dev, mode_dev->vbt_mode);
++
++ /*
++ * If we didn't get EDID, try checking if the panel is already turned
++ * on. If so, assume that whatever is currently programmed is the
++ * correct mode.
++ */
++ lvds = REG_READ(LVDS);
++ pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0;
++ crtc = psb_intel_get_crtc_from_pipe(dev, pipe);
++
++ if (crtc && (lvds & LVDS_PORT_EN)) {
++ mode_dev->panel_fixed_mode =
++ psb_intel_crtc_mode_get(dev, crtc);
++ if (mode_dev->panel_fixed_mode) {
++ mode_dev->panel_fixed_mode->type |=
++ DRM_MODE_TYPE_PREFERRED;
++ goto out; /* FIXME: check for quirks */
++ }
++ }
++
++ /* If we still don't have a mode after all that, give up. */
++ if (!mode_dev->panel_fixed_mode) {
++ DRM_DEBUG
++ ("Found no modes on the lvds, ignoring the LVDS\n");
++ goto failed_find;
++ }
++
++ /* FIXME: detect aopen & mac mini type stuff automatically? */
++ /*
++ * Blacklist machines with BIOSes that list an LVDS panel without
++ * actually having one.
++ */
++ if (IS_I945GM(dev)) {
++ /* aopen mini pc */
++ if (dev->pdev->subsystem_vendor == 0xa0a0) {
++ DRM_DEBUG
++ ("Suspected AOpen Mini PC, ignoring the LVDS\n");
++ goto failed_find;
++ }
++
++ if ((dev->pdev->subsystem_vendor == 0x8086) &&
++ (dev->pdev->subsystem_device == 0x7270)) {
++ /* It's a Mac Mini or Macbook Pro. */
++
++ if (mode_dev->panel_fixed_mode != NULL &&
++ mode_dev->panel_fixed_mode->hdisplay == 800 &&
++ mode_dev->panel_fixed_mode->vdisplay == 600) {
++ DRM_DEBUG
++ ("Suspected Mac Mini, ignoring the LVDS\n");
++ goto failed_find;
++ }
++ }
++ }
++
++out:
++ drm_sysfs_connector_add(connector);
++
++#if PRINT_JLIU7
++ DRM_INFO("PRINT_JLIU7 hdisplay = %d\n",
++ mode_dev->panel_fixed_mode->hdisplay);
++ DRM_INFO("PRINT_JLIU7 vdisplay = %d\n",
++ mode_dev->panel_fixed_mode->vdisplay);
++ DRM_INFO("PRINT_JLIU7 hsync_start = %d\n",
++ mode_dev->panel_fixed_mode->hsync_start);
++ DRM_INFO("PRINT_JLIU7 hsync_end = %d\n",
++ mode_dev->panel_fixed_mode->hsync_end);
++ DRM_INFO("PRINT_JLIU7 htotal = %d\n",
++ mode_dev->panel_fixed_mode->htotal);
++ DRM_INFO("PRINT_JLIU7 vsync_start = %d\n",
++ mode_dev->panel_fixed_mode->vsync_start);
++ DRM_INFO("PRINT_JLIU7 vsync_end = %d\n",
++ mode_dev->panel_fixed_mode->vsync_end);
++ DRM_INFO("PRINT_JLIU7 vtotal = %d\n",
++ mode_dev->panel_fixed_mode->vtotal);
++ DRM_INFO("PRINT_JLIU7 clock = %d\n",
++ mode_dev->panel_fixed_mode->clock);
++#endif /* PRINT_JLIU7 */
++ return;
++
++failed_find:
++ if (psb_intel_output->ddc_bus)
++ psb_intel_i2c_destroy(psb_intel_output->ddc_bus);
++failed_ddc:
++ drm_encoder_cleanup(encoder);
++ drm_connector_cleanup(connector);
++ kfree(connector);
++}
++
++/* MRST platform start */
++
++/*
++ * FIXME need to move to register define head file
++ */
++#define MRST_BACKLIGHT_MODULATION_FREQ_SHIFT (16)
++#define MRST_BACKLIGHT_MODULATION_FREQ_MASK (0xffff << 16)
++
++/* The max/min PWM frequency in BPCR[31:17] - */
++/* The smallest number is 1 (not 0) that can fit in the
++ * 15-bit field of the and then*/
++/* shifts to the left by one bit to get the actual 16-bit
++ * value that the 15-bits correspond to.*/
++#define MRST_BLC_MAX_PWM_REG_FREQ 0xFFFF
++
++#define BRIGHTNESS_MAX_LEVEL 100
++#define BLC_PWM_PRECISION_FACTOR 10 /* 10000000 */
++#define BLC_PWM_FREQ_CALC_CONSTANT 32
++#define MHz 1000000
++#define BLC_POLARITY_NORMAL 0
++#define BLC_POLARITY_INVERSE 1
++
++/**
++ * Calculate PWM control register value.
++ */
++static bool mrstLVDSCalculatePWMCtrlRegFreq(struct drm_device *dev)
++{
++ unsigned long value = 0;
++ if (blc_freq == 0) {
++ /* DRM_ERROR(KERN_ERR "mrstLVDSCalculatePWMCtrlRegFreq:
++ * Frequency Requested is 0.\n"); */
++ return false;
++ }
++
++ value = (CoreClock * MHz);
++ value = (value / BLC_PWM_FREQ_CALC_CONSTANT);
++ value = (value * BLC_PWM_PRECISION_FACTOR);
++ value = (value / blc_freq);
++ value = (value / BLC_PWM_PRECISION_FACTOR);
++
++ if (value > (unsigned long) MRST_BLC_MAX_PWM_REG_FREQ) {
++ return 0;
++ } else {
++ PWMControlRegFreq = (u32) value;
++ return 1;
++ }
++}
++
++/**
++ * Returns the maximum level of the backlight duty cycle field.
++ */
++static u32 mrst_lvds_get_PWM_ctrl_freq(struct drm_device *dev)
++{
++ u32 max_pwm_blc = 0;
++
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 enter mrst_lvds_get_PWM_ctrl_freq \n");
++#endif /* PRINT_JLIU7 */
++
++/*FIXME JLIU7 get the PWM frequency from configuration */
++
++ max_pwm_blc =
++ (REG_READ(BLC_PWM_CTL) & MRST_BACKLIGHT_MODULATION_FREQ_MASK)
++ >> MRST_BACKLIGHT_MODULATION_FREQ_SHIFT;
++
++
++ if (!max_pwm_blc) {
++ if (mrstLVDSCalculatePWMCtrlRegFreq(dev))
++ max_pwm_blc = PWMControlRegFreq;
++ }
++
++ return max_pwm_blc;
++}
++
++/**
++ * Sets the backlight level.
++ *
++ * \param level backlight level, from 0 to psb_intel_lvds_get_max_backlight().
++ */
++static void mrst_lvds_set_backlight(struct drm_device *dev, int level)
++{
++ u32 blc_pwm_ctl;
++ u32 max_pwm_blc;
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 enter mrst_lvds_set_backlight \n");
++#endif /* PRINT_JLIU7 */
++
++#if 1 /* FIXME JLIU7 */
++ return;
++#endif /* FIXME JLIU7 */
++
++ /* Provent LVDS going to total black */
++ if (level < 20)
++ level = 20;
++
++ max_pwm_blc = mrst_lvds_get_PWM_ctrl_freq(dev);
++
++ if (max_pwm_blc == 0)
++ return;
++
++ blc_pwm_ctl = level * max_pwm_blc / BRIGHTNESS_MAX_LEVEL;
++
++ if (blc_pol == BLC_POLARITY_INVERSE)
++ blc_pwm_ctl = max_pwm_blc - blc_pwm_ctl;
++
++ REG_WRITE(BLC_PWM_CTL,
++ (max_pwm_blc << MRST_BACKLIGHT_MODULATION_FREQ_SHIFT) |
++ blc_pwm_ctl);
++}
++
++/**
++ * Sets the power state for the panel.
++ */
++static void mrst_lvds_set_power(struct drm_device *dev,
++ struct psb_intel_output *output, bool on)
++{
++ u32 pp_status;
++
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 enter mrst_lvds_set_power \n");
++#endif /* PRINT_JLIU7 */
++
++ if (on) {
++ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) |
++ POWER_TARGET_ON);
++ do {
++ pp_status = REG_READ(PP_STATUS);
++ } while ((pp_status & (PP_ON | PP_READY)) == PP_READY);
++
++ mrst_lvds_set_backlight(dev, lvds_backlight);
++ } else {
++ mrst_lvds_set_backlight(dev, 0);
++
++ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) &
++ ~POWER_TARGET_ON);
++ do {
++ pp_status = REG_READ(PP_STATUS);
++ } while (pp_status & PP_ON);
++ }
++}
++
++static void mrst_lvds_dpms(struct drm_encoder *encoder, int mode)
++{
++ struct drm_device *dev = encoder->dev;
++ struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
++
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 enter mrst_lvds_dpms \n");
++#endif /* PRINT_JLIU7 */
++
++ if (mode == DRM_MODE_DPMS_ON)
++ mrst_lvds_set_power(dev, output, true);
++ else
++ mrst_lvds_set_power(dev, output, false);
++
++ /* XXX: We never power down the LVDS pairs. */
++}
++
++static void mrst_lvds_mode_set(struct drm_encoder *encoder,
++ struct drm_display_mode *mode,
++ struct drm_display_mode *adjusted_mode)
++{
++ struct psb_intel_mode_device *mode_dev =
++ enc_to_psb_intel_output(encoder)->mode_dev;
++ struct drm_device *dev = encoder->dev;
++ u32 pfit_control;
++ u32 lvds_port;
++
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 enter mrst_lvds_mode_set \n");
++#endif /* PRINT_JLIU7 */
++
++ /*
++ * The LVDS pin pair will already have been turned on in the
++ * psb_intel_crtc_mode_set since it has a large impact on the DPLL
++ * settings.
++ */
++ /*FIXME JLIU7 Get panel power delay parameters from config data */
++ REG_WRITE(0x61208, 0x25807d0);
++ REG_WRITE(0x6120c, 0x1f407d0);
++ REG_WRITE(0x61210, 0x270f04);
++
++ lvds_port = (REG_READ(LVDS) & (~LVDS_PIPEB_SELECT)) | LVDS_PORT_EN;
++
++ if (mode_dev->panel_wants_dither)
++ lvds_port |= MRST_PANEL_8TO6_DITHER_ENABLE;
++
++ REG_WRITE(LVDS, lvds_port);
++
++ /*
++ * Enable automatic panel scaling so that non-native modes fill the
++ * screen. Should be enabled before the pipe is enabled, according to
++ * register description and PRM.
++ */
++ if (mode->hdisplay != adjusted_mode->hdisplay ||
++ mode->vdisplay != adjusted_mode->vdisplay)
++ pfit_control = PFIT_ENABLE;
++ else
++ pfit_control = 0;
++
++ REG_WRITE(PFIT_CONTROL, pfit_control);
++}
++
++
++static const struct drm_encoder_helper_funcs mrst_lvds_helper_funcs = {
++ .dpms = mrst_lvds_dpms,
++ .mode_fixup = psb_intel_lvds_mode_fixup,
++ .prepare = psb_intel_lvds_prepare,
++ .mode_set = mrst_lvds_mode_set,
++ .commit = psb_intel_lvds_commit,
++};
++
++/** Returns the panel fixed mode from configuration. */
++/** FIXME JLIU7 need to revist it. */
++struct drm_display_mode *mrst_lvds_get_configuration_mode(struct drm_device
++ *dev)
++{
++ struct drm_display_mode *mode;
++
++ mode = kzalloc(sizeof(*mode), GFP_KERNEL);
++ if (!mode)
++ return NULL;
++
++#if 0 /*FIXME jliu7 remove it later */
++ /* hard coded fixed mode for TPO LTPS LPJ040K001A */
++ mode->hdisplay = 800;
++ mode->vdisplay = 480;
++ mode->hsync_start = 836;
++ mode->hsync_end = 846;
++ mode->htotal = 1056;
++ mode->vsync_start = 489;
++ mode->vsync_end = 491;
++ mode->vtotal = 525;
++ mode->clock = 33264;
++#endif /*FIXME jliu7 remove it later */
++
++#if 0 /*FIXME jliu7 remove it later */
++ /* hard coded fixed mode for LVDS 800x480 */
++ mode->hdisplay = 800;
++ mode->vdisplay = 480;
++ mode->hsync_start = 801;
++ mode->hsync_end = 802;
++ mode->htotal = 1024;
++ mode->vsync_start = 481;
++ mode->vsync_end = 482;
++ mode->vtotal = 525;
++ mode->clock = 30994;
++#endif /*FIXME jliu7 remove it later */
++
++#if 1 /*FIXME jliu7 remove it later, jliu7 modify it according to the spec */
++ /* hard coded fixed mode for Samsung 480wsvga LVDS 1024x600@75 */
++ mode->hdisplay = 1024;
++ mode->vdisplay = 600;
++ mode->hsync_start = 1072;
++ mode->hsync_end = 1104;
++ mode->htotal = 1184;
++ mode->vsync_start = 603;
++ mode->vsync_end = 604;
++ mode->vtotal = 608;
++ mode->clock = 53990;
++#endif /*FIXME jliu7 remove it later */
++
++#if 0 /*FIXME jliu7 remove it, it is copied from SBIOS */
++ /* hard coded fixed mode for Samsung 480wsvga LVDS 1024x600@75 */
++ mode->hdisplay = 1024;
++ mode->vdisplay = 600;
++ mode->hsync_start = 1104;
++ mode->hsync_end = 1136;
++ mode->htotal = 1184;
++ mode->vsync_start = 603;
++ mode->vsync_end = 604;
++ mode->vtotal = 608;
++ mode->clock = 53990;
++#endif /*FIXME jliu7 remove it later */
++
++#if 0 /*FIXME jliu7 remove it later */
++ /* hard coded fixed mode for Sharp wsvga LVDS 1024x600 */
++ mode->hdisplay = 1024;
++ mode->vdisplay = 600;
++ mode->hsync_start = 1124;
++ mode->hsync_end = 1204;
++ mode->htotal = 1312;
++ mode->vsync_start = 607;
++ mode->vsync_end = 610;
++ mode->vtotal = 621;
++ mode->clock = 48885;
++#endif /*FIXME jliu7 remove it later */
++
++#if 0 /*FIXME jliu7 remove it later */
++ /* hard coded fixed mode for LVDS 1024x768 */
++ mode->hdisplay = 1024;
++ mode->vdisplay = 768;
++ mode->hsync_start = 1048;
++ mode->hsync_end = 1184;
++ mode->htotal = 1344;
++ mode->vsync_start = 771;
++ mode->vsync_end = 777;
++ mode->vtotal = 806;
++ mode->clock = 65000;
++#endif /*FIXME jliu7 remove it later */
++
++#if 0 /*FIXME jliu7 remove it later */
++ /* hard coded fixed mode for LVDS 1366x768 */
++ mode->hdisplay = 1366;
++ mode->vdisplay = 768;
++ mode->hsync_start = 1430;
++ mode->hsync_end = 1558;
++ mode->htotal = 1664;
++ mode->vsync_start = 769;
++ mode->vsync_end = 770;
++ mode->vtotal = 776;
++ mode->clock = 77500;
++#endif /*FIXME jliu7 remove it later */
++
++ drm_mode_set_name(mode);
++ drm_mode_set_crtcinfo(mode, 0);
++
++ return mode;
++}
++
++/**
++ * mrst_lvds_init - setup LVDS connectors on this device
++ * @dev: drm device
++ *
++ * Create the connector, register the LVDS DDC bus, and try to figure out what
++ * modes we can display on the LVDS panel (if present).
++ */
++void mrst_lvds_init(struct drm_device *dev,
++ struct psb_intel_mode_device *mode_dev)
++{
++ struct psb_intel_output *psb_intel_output;
++ struct drm_connector *connector;
++ struct drm_encoder *encoder;
++#if MRST_I2C
++ struct drm_display_mode *scan; /* *modes, *bios_mode; */
++#endif
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 enter mrst_lvds_init \n");
++#endif /* PRINT_JLIU7 */
++
++ psb_intel_output = kzalloc(sizeof(struct psb_intel_output), GFP_KERNEL);
++ if (!psb_intel_output)
++ return;
++
++ psb_intel_output->mode_dev = mode_dev;
++ connector = &psb_intel_output->base;
++ encoder = &psb_intel_output->enc;
++ drm_connector_init(dev, &psb_intel_output->base,
++ &psb_intel_lvds_connector_funcs,
++ DRM_MODE_CONNECTOR_LVDS);
++
++ drm_encoder_init(dev, &psb_intel_output->enc, &psb_intel_lvds_enc_funcs,
++ DRM_MODE_ENCODER_LVDS);
++
++ drm_mode_connector_attach_encoder(&psb_intel_output->base,
++ &psb_intel_output->enc);
++ psb_intel_output->type = INTEL_OUTPUT_LVDS;
++
++ drm_encoder_helper_add(encoder, &mrst_lvds_helper_funcs);
++ drm_connector_helper_add(connector,
++ &psb_intel_lvds_connector_helper_funcs);
++ connector->display_info.subpixel_order = SubPixelHorizontalRGB;
++ connector->interlace_allowed = false;
++ connector->doublescan_allowed = false;
++
++ lvds_backlight = BRIGHTNESS_MAX_LEVEL;
++
++ /*
++ * LVDS discovery:
++ * 1) check for EDID on DDC
++ * 2) check for VBT data
++ * 3) check to see if LVDS is already on
++ * if none of the above, no panel
++ * 4) make sure lid is open
++ * if closed, act like it's not there for now
++ */
++
++#if MRST_I2C
++ /* Set up the DDC bus. */
++ psb_intel_output->ddc_bus = psb_intel_i2c_create(dev, GPIOC, "LVDSDDC_C");
++ if (!psb_intel_output->ddc_bus) {
++ dev_printk(KERN_ERR, &dev->pdev->dev,
++ "DDC bus registration " "failed.\n");
++ goto failed_ddc;
++ }
++
++ /*
++ * Attempt to get the fixed panel mode from DDC. Assume that the
++ * preferred mode is the right one.
++ */
++ psb_intel_ddc_get_modes(psb_intel_output);
++ list_for_each_entry(scan, &connector->probed_modes, head) {
++ if (scan->type & DRM_MODE_TYPE_PREFERRED) {
++ mode_dev->panel_fixed_mode =
++ drm_mode_duplicate(dev, scan);
++ goto out; /* FIXME: check for quirks */
++ }
++ }
++#endif /* MRST_I2C */
++
++ /*
++ * If we didn't get EDID, try geting panel timing
++ * from configuration data
++ */
++ mode_dev->panel_fixed_mode = mrst_lvds_get_configuration_mode(dev);
++
++ if (mode_dev->panel_fixed_mode) {
++ mode_dev->panel_fixed_mode->type |=
++ DRM_MODE_TYPE_PREFERRED;
++ goto out; /* FIXME: check for quirks */
++ }
++
++ /* If we still don't have a mode after all that, give up. */
++ if (!mode_dev->panel_fixed_mode) {
++ DRM_DEBUG
++ ("Found no modes on the lvds, ignoring the LVDS\n");
++ goto failed_find;
++ }
++
++out:
++ drm_sysfs_connector_add(connector);
++ return;
++
++failed_find:
++ DRM_DEBUG("No LVDS modes found, disabling.\n");
++ if (psb_intel_output->ddc_bus)
++ psb_intel_i2c_destroy(psb_intel_output->ddc_bus);
++#if MRST_I2C
++failed_ddc:
++#endif
++ drm_encoder_cleanup(encoder);
++ drm_connector_cleanup(connector);
++ kfree(connector);
++}
++
++/* MRST platform end */
+diff -uNr a/drivers/gpu/drm/psb/psb_intel_modes.c b/drivers/gpu/drm/psb/psb_intel_modes.c
+--- a/drivers/gpu/drm/psb/psb_intel_modes.c 1969-12-31 16:00:00.000000000 -0800
++++ b/drivers/gpu/drm/psb/psb_intel_modes.c 2009-04-07 13:28:38.000000000 -0700
+@@ -0,0 +1,64 @@
++/*
++ * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
++ * Copyright (c) 2007 Intel Corporation
++ * Jesse Barnes <jesse.barnes@intel.com>
++ */
++
++#include <linux/i2c.h>
++#include <linux/fb.h>
++#include <drm/drmP.h>
++#include "psb_intel_drv.h"
++
++/**
++ * psb_intel_ddc_probe
++ *
++ */
++bool psb_intel_ddc_probe(struct psb_intel_output *psb_intel_output)
++{
++ u8 out_buf[] = { 0x0, 0x0 };
++ u8 buf[2];
++ int ret;
++ struct i2c_msg msgs[] = {
++ {
++ .addr = 0x50,
++ .flags = 0,
++ .len = 1,
++ .buf = out_buf,
++ },
++ {
++ .addr = 0x50,
++ .flags = I2C_M_RD,
++ .len = 1,
++ .buf = buf,
++ }
++ };
++
++ ret = i2c_transfer(&psb_intel_output->ddc_bus->adapter, msgs, 2);
++ if (ret == 2)
++ return true;
++
++ return false;
++}
++
++/**
++ * psb_intel_ddc_get_modes - get modelist from monitor
++ * @connector: DRM connector device to use
++ *
++ * Fetch the EDID information from @connector using the DDC bus.
++ */
++int psb_intel_ddc_get_modes(struct psb_intel_output *psb_intel_output)
++{
++ struct edid *edid;
++ int ret = 0;
++
++ edid =
++ drm_get_edid(&psb_intel_output->base,
++ &psb_intel_output->ddc_bus->adapter);
++ if (edid) {
++ drm_mode_connector_update_edid_property(&psb_intel_output->
++ base, edid);
++ ret = drm_add_edid_modes(&psb_intel_output->base, edid);
++ kfree(edid);
++ }
++ return ret;
++}
+diff -uNr a/drivers/gpu/drm/psb/psb_intel_reg.h b/drivers/gpu/drm/psb/psb_intel_reg.h
+--- a/drivers/gpu/drm/psb/psb_intel_reg.h 1969-12-31 16:00:00.000000000 -0800
++++ b/drivers/gpu/drm/psb/psb_intel_reg.h 2009-04-07 13:28:38.000000000 -0700
+@@ -0,0 +1,972 @@
++#define BLC_PWM_CTL 0x61254
++#define BLC_PWM_CTL2 0x61250
++#define BACKLIGHT_MODULATION_FREQ_SHIFT (17)
++/**
++ * This is the most significant 15 bits of the number of backlight cycles in a
++ * complete cycle of the modulated backlight control.
++ *
++ * The actual value is this field multiplied by two.
++ */
++#define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17)
++#define BLM_LEGACY_MODE (1 << 16)
++/**
++ * This is the number of cycles out of the backlight modulation cycle for which
++ * the backlight is on.
++ *
++ * This field must be no greater than the number of cycles in the complete
++ * backlight modulation cycle.
++ */
++#define BACKLIGHT_DUTY_CYCLE_SHIFT (0)
++#define BACKLIGHT_DUTY_CYCLE_MASK (0xffff)
++
++#define I915_GCFGC 0xf0
++#define I915_LOW_FREQUENCY_ENABLE (1 << 7)
++#define I915_DISPLAY_CLOCK_190_200_MHZ (0 << 4)
++#define I915_DISPLAY_CLOCK_333_MHZ (4 << 4)
++#define I915_DISPLAY_CLOCK_MASK (7 << 4)
++
++#define I855_HPLLCC 0xc0
++#define I855_CLOCK_CONTROL_MASK (3 << 0)
++#define I855_CLOCK_133_200 (0 << 0)
++#define I855_CLOCK_100_200 (1 << 0)
++#define I855_CLOCK_100_133 (2 << 0)
++#define I855_CLOCK_166_250 (3 << 0)
++
++/* I830 CRTC registers */
++#define HTOTAL_A 0x60000
++#define HBLANK_A 0x60004
++#define HSYNC_A 0x60008
++#define VTOTAL_A 0x6000c
++#define VBLANK_A 0x60010
++#define VSYNC_A 0x60014
++#define PIPEASRC 0x6001c
++#define BCLRPAT_A 0x60020
++#define VSYNCSHIFT_A 0x60028
++
++#define HTOTAL_B 0x61000
++#define HBLANK_B 0x61004
++#define HSYNC_B 0x61008
++#define VTOTAL_B 0x6100c
++#define VBLANK_B 0x61010
++#define VSYNC_B 0x61014
++#define PIPEBSRC 0x6101c
++#define BCLRPAT_B 0x61020
++#define VSYNCSHIFT_B 0x61028
++
++#define PP_STATUS 0x61200
++# define PP_ON (1 << 31)
++/**
++ * Indicates that all dependencies of the panel are on:
++ *
++ * - PLL enabled
++ * - pipe enabled
++ * - LVDS/DVOB/DVOC on
++ */
++# define PP_READY (1 << 30)
++# define PP_SEQUENCE_NONE (0 << 28)
++# define PP_SEQUENCE_ON (1 << 28)
++# define PP_SEQUENCE_OFF (2 << 28)
++# define PP_SEQUENCE_MASK 0x30000000
++#define PP_CONTROL 0x61204
++# define POWER_TARGET_ON (1 << 0)
++
++#define LVDSPP_ON 0x61208
++#define LVDSPP_OFF 0x6120c
++#define PP_CYCLE 0x61210
++
++#define PFIT_CONTROL 0x61230
++# define PFIT_ENABLE (1 << 31)
++# define PFIT_PIPE_MASK (3 << 29)
++# define PFIT_PIPE_SHIFT 29
++# define VERT_INTERP_DISABLE (0 << 10)
++# define VERT_INTERP_BILINEAR (1 << 10)
++# define VERT_INTERP_MASK (3 << 10)
++# define VERT_AUTO_SCALE (1 << 9)
++# define HORIZ_INTERP_DISABLE (0 << 6)
++# define HORIZ_INTERP_BILINEAR (1 << 6)
++# define HORIZ_INTERP_MASK (3 << 6)
++# define HORIZ_AUTO_SCALE (1 << 5)
++# define PANEL_8TO6_DITHER_ENABLE (1 << 3)
++
++#define PFIT_PGM_RATIOS 0x61234
++# define PFIT_VERT_SCALE_MASK 0xfff00000
++# define PFIT_HORIZ_SCALE_MASK 0x0000fff0
++
++#define PFIT_AUTO_RATIOS 0x61238
++
++
++#define DPLL_A 0x06014
++#define DPLL_B 0x06018
++# define DPLL_VCO_ENABLE (1 << 31)
++# define DPLL_DVO_HIGH_SPEED (1 << 30)
++# define DPLL_SYNCLOCK_ENABLE (1 << 29)
++# define DPLL_VGA_MODE_DIS (1 << 28)
++# define DPLLB_MODE_DAC_SERIAL (1 << 26) /* i915 */
++# define DPLLB_MODE_LVDS (2 << 26) /* i915 */
++# define DPLL_MODE_MASK (3 << 26)
++# define DPLL_DAC_SERIAL_P2_CLOCK_DIV_10 (0 << 24) /* i915 */
++# define DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 (1 << 24) /* i915 */
++# define DPLLB_LVDS_P2_CLOCK_DIV_14 (0 << 24) /* i915 */
++# define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */
++# define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */
++# define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
++/**
++ * The i830 generation, in DAC/serial mode, defines p1 as two plus this
++ * bitfield, or just 2 if PLL_P1_DIVIDE_BY_TWO is set.
++ */
++# define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000
++/**
++ * The i830 generation, in LVDS mode, defines P1 as the bit number set within
++ * this field (only one bit may be set).
++ */
++# define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS 0x003f0000
++# define DPLL_FPA01_P1_POST_DIV_SHIFT 16
++# define PLL_P2_DIVIDE_BY_4 (1 << 23) /* i830, required
++ * in DVO non-gang */
++# define PLL_P1_DIVIDE_BY_TWO (1 << 21) /* i830 */
++# define PLL_REF_INPUT_DREFCLK (0 << 13)
++# define PLL_REF_INPUT_TVCLKINA (1 << 13) /* i830 */
++# define PLL_REF_INPUT_TVCLKINBC (2 << 13) /* SDVO
++ * TVCLKIN */
++# define PLLB_REF_INPUT_SPREADSPECTRUMIN (3 << 13)
++# define PLL_REF_INPUT_MASK (3 << 13)
++# define PLL_LOAD_PULSE_PHASE_SHIFT 9
++/*
++ * Parallel to Serial Load Pulse phase selection.
++ * Selects the phase for the 10X DPLL clock for the PCIe
++ * digital display port. The range is 4 to 13; 10 or more
++ * is just a flip delay. The default is 6
++ */
++# define PLL_LOAD_PULSE_PHASE_MASK (0xf << PLL_LOAD_PULSE_PHASE_SHIFT)
++# define DISPLAY_RATE_SELECT_FPA1 (1 << 8)
++
++/**
++ * SDVO multiplier for 945G/GM. Not used on 965.
++ *
++ * \sa DPLL_MD_UDI_MULTIPLIER_MASK
++ */
++# define SDVO_MULTIPLIER_MASK 0x000000ff
++# define SDVO_MULTIPLIER_SHIFT_HIRES 4
++# define SDVO_MULTIPLIER_SHIFT_VGA 0
++
++/** @defgroup DPLL_MD
++ * @{
++ */
++/** Pipe A SDVO/UDI clock multiplier/divider register for G965. */
++#define DPLL_A_MD 0x0601c
++/** Pipe B SDVO/UDI clock multiplier/divider register for G965. */
++#define DPLL_B_MD 0x06020
++/**
++ * UDI pixel divider, controlling how many pixels are stuffed into a packet.
++ *
++ * Value is pixels minus 1. Must be set to 1 pixel for SDVO.
++ */
++# define DPLL_MD_UDI_DIVIDER_MASK 0x3f000000
++# define DPLL_MD_UDI_DIVIDER_SHIFT 24
++/** UDI pixel divider for VGA, same as DPLL_MD_UDI_DIVIDER_MASK. */
++# define DPLL_MD_VGA_UDI_DIVIDER_MASK 0x003f0000
++# define DPLL_MD_VGA_UDI_DIVIDER_SHIFT 16
++/**
++ * SDVO/UDI pixel multiplier.
++ *
++ * SDVO requires that the bus clock rate be between 1 and 2 Ghz, and the bus
++ * clock rate is 10 times the DPLL clock. At low resolution/refresh rate
++ * modes, the bus rate would be below the limits, so SDVO allows for stuffing
++ * dummy bytes in the datastream at an increased clock rate, with both sides of
++ * the link knowing how many bytes are fill.
++ *
++ * So, for a mode with a dotclock of 65Mhz, we would want to double the clock
++ * rate to 130Mhz to get a bus rate of 1.30Ghz. The DPLL clock rate would be
++ * set to 130Mhz, and the SDVO multiplier set to 2x in this register and
++ * through an SDVO command.
++ *
++ * This register field has values of multiplication factor minus 1, with
++ * a maximum multiplier of 5 for SDVO.
++ */
++# define DPLL_MD_UDI_MULTIPLIER_MASK 0x00003f00
++# define DPLL_MD_UDI_MULTIPLIER_SHIFT 8
++/** SDVO/UDI pixel multiplier for VGA, same as DPLL_MD_UDI_MULTIPLIER_MASK.
++ * This best be set to the default value (3) or the CRT won't work. No,
++ * I don't entirely understand what this does...
++ */
++# define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f
++# define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
++/** @} */
++
++#define DPLL_TEST 0x606c
++# define DPLLB_TEST_SDVO_DIV_1 (0 << 22)
++# define DPLLB_TEST_SDVO_DIV_2 (1 << 22)
++# define DPLLB_TEST_SDVO_DIV_4 (2 << 22)
++# define DPLLB_TEST_SDVO_DIV_MASK (3 << 22)
++# define DPLLB_TEST_N_BYPASS (1 << 19)
++# define DPLLB_TEST_M_BYPASS (1 << 18)
++# define DPLLB_INPUT_BUFFER_ENABLE (1 << 16)
++# define DPLLA_TEST_N_BYPASS (1 << 3)
++# define DPLLA_TEST_M_BYPASS (1 << 2)
++# define DPLLA_INPUT_BUFFER_ENABLE (1 << 0)
++
++#define ADPA 0x61100
++#define ADPA_DAC_ENABLE (1<<31)
++#define ADPA_DAC_DISABLE 0
++#define ADPA_PIPE_SELECT_MASK (1<<30)
++#define ADPA_PIPE_A_SELECT 0
++#define ADPA_PIPE_B_SELECT (1<<30)
++#define ADPA_USE_VGA_HVPOLARITY (1<<15)
++#define ADPA_SETS_HVPOLARITY 0
++#define ADPA_VSYNC_CNTL_DISABLE (1<<11)
++#define ADPA_VSYNC_CNTL_ENABLE 0
++#define ADPA_HSYNC_CNTL_DISABLE (1<<10)
++#define ADPA_HSYNC_CNTL_ENABLE 0
++#define ADPA_VSYNC_ACTIVE_HIGH (1<<4)
++#define ADPA_VSYNC_ACTIVE_LOW 0
++#define ADPA_HSYNC_ACTIVE_HIGH (1<<3)
++#define ADPA_HSYNC_ACTIVE_LOW 0
++
++#define FPA0 0x06040
++#define FPA1 0x06044
++#define FPB0 0x06048
++#define FPB1 0x0604c
++# define FP_N_DIV_MASK 0x003f0000
++# define FP_N_DIV_SHIFT 16
++# define FP_M1_DIV_MASK 0x00003f00
++# define FP_M1_DIV_SHIFT 8
++# define FP_M2_DIV_MASK 0x0000003f
++# define FP_M2_DIV_SHIFT 0
++
++
++#define PORT_HOTPLUG_EN 0x61110
++# define SDVOB_HOTPLUG_INT_EN (1 << 26)
++# define SDVOC_HOTPLUG_INT_EN (1 << 25)
++# define TV_HOTPLUG_INT_EN (1 << 18)
++# define CRT_HOTPLUG_INT_EN (1 << 9)
++# define CRT_HOTPLUG_FORCE_DETECT (1 << 3)
++
++#define PORT_HOTPLUG_STAT 0x61114
++# define CRT_HOTPLUG_INT_STATUS (1 << 11)
++# define TV_HOTPLUG_INT_STATUS (1 << 10)
++# define CRT_HOTPLUG_MONITOR_MASK (3 << 8)
++# define CRT_HOTPLUG_MONITOR_COLOR (3 << 8)
++# define CRT_HOTPLUG_MONITOR_MONO (2 << 8)
++# define CRT_HOTPLUG_MONITOR_NONE (0 << 8)
++# define SDVOC_HOTPLUG_INT_STATUS (1 << 7)
++# define SDVOB_HOTPLUG_INT_STATUS (1 << 6)
++
++#define SDVOB 0x61140
++#define SDVOC 0x61160
++#define SDVO_ENABLE (1 << 31)
++#define SDVO_PIPE_B_SELECT (1 << 30)
++#define SDVO_STALL_SELECT (1 << 29)
++#define SDVO_INTERRUPT_ENABLE (1 << 26)
++/**
++ * 915G/GM SDVO pixel multiplier.
++ *
++ * Programmed value is multiplier - 1, up to 5x.
++ *
++ * \sa DPLL_MD_UDI_MULTIPLIER_MASK
++ */
++#define SDVO_PORT_MULTIPLY_MASK (7 << 23)
++#define SDVO_PORT_MULTIPLY_SHIFT 23
++#define SDVO_PHASE_SELECT_MASK (15 << 19)
++#define SDVO_PHASE_SELECT_DEFAULT (6 << 19)
++#define SDVO_CLOCK_OUTPUT_INVERT (1 << 18)
++#define SDVOC_GANG_MODE (1 << 16)
++#define SDVO_BORDER_ENABLE (1 << 7)
++#define SDVOB_PCIE_CONCURRENCY (1 << 3)
++#define SDVO_DETECTED (1 << 2)
++/* Bits to be preserved when writing */
++#define SDVOB_PRESERVE_MASK ((1 << 17) | (1 << 16) | (1 << 14))
++#define SDVOC_PRESERVE_MASK (1 << 17)
++
++/** @defgroup LVDS
++ * @{
++ */
++/**
++ * This register controls the LVDS output enable, pipe selection, and data
++ * format selection.
++ *
++ * All of the clock/data pairs are force powered down by power sequencing.
++ */
++#define LVDS 0x61180
++/**
++ * Enables the LVDS port. This bit must be set before DPLLs are enabled, as
++ * the DPLL semantics change when the LVDS is assigned to that pipe.
++ */
++# define LVDS_PORT_EN (1 << 31)
++/** Selects pipe B for LVDS data. Must be set on pre-965. */
++# define LVDS_PIPEB_SELECT (1 << 30)
++
++/**
++ * Enables the A0-A2 data pairs and CLKA, containing 18 bits of color data per
++ * pixel.
++ */
++# define LVDS_A0A2_CLKA_POWER_MASK (3 << 8)
++# define LVDS_A0A2_CLKA_POWER_DOWN (0 << 8)
++# define LVDS_A0A2_CLKA_POWER_UP (3 << 8)
++/**
++ * Controls the A3 data pair, which contains the additional LSBs for 24 bit
++ * mode. Only enabled if LVDS_A0A2_CLKA_POWER_UP also indicates it should be
++ * on.
++ */
++# define LVDS_A3_POWER_MASK (3 << 6)
++# define LVDS_A3_POWER_DOWN (0 << 6)
++# define LVDS_A3_POWER_UP (3 << 6)
++/**
++ * Controls the CLKB pair. This should only be set when LVDS_B0B3_POWER_UP
++ * is set.
++ */
++# define LVDS_CLKB_POWER_MASK (3 << 4)
++# define LVDS_CLKB_POWER_DOWN (0 << 4)
++# define LVDS_CLKB_POWER_UP (3 << 4)
++
++/**
++ * Controls the B0-B3 data pairs. This must be set to match the DPLL p2
++ * setting for whether we are in dual-channel mode. The B3 pair will
++ * additionally only be powered up when LVDS_A3_POWER_UP is set.
++ */
++# define LVDS_B0B3_POWER_MASK (3 << 2)
++# define LVDS_B0B3_POWER_DOWN (0 << 2)
++# define LVDS_B0B3_POWER_UP (3 << 2)
++
++#define PIPEACONF 0x70008
++#define PIPEACONF_ENABLE (1<<31)
++#define PIPEACONF_DISABLE 0
++#define PIPEACONF_DOUBLE_WIDE (1<<30)
++#define I965_PIPECONF_ACTIVE (1<<30)
++#define PIPEACONF_SINGLE_WIDE 0
++#define PIPEACONF_PIPE_UNLOCKED 0
++#define PIPEACONF_PIPE_LOCKED (1<<25)
++#define PIPEACONF_PALETTE 0
++#define PIPEACONF_GAMMA (1<<24)
++#define PIPECONF_FORCE_BORDER (1<<25)
++#define PIPECONF_PROGRESSIVE (0 << 21)
++#define PIPECONF_INTERLACE_W_FIELD_INDICATION (6 << 21)
++#define PIPECONF_INTERLACE_FIELD_0_ONLY (7 << 21)
++
++#define PIPEBCONF 0x71008
++#define PIPEBCONF_ENABLE (1<<31)
++#define PIPEBCONF_DISABLE 0
++#define PIPEBCONF_DOUBLE_WIDE (1<<30)
++#define PIPEBCONF_DISABLE 0
++#define PIPEBCONF_GAMMA (1<<24)
++#define PIPEBCONF_PALETTE 0
++
++#define PIPEBGCMAXRED 0x71010
++#define PIPEBGCMAXGREEN 0x71014
++#define PIPEBGCMAXBLUE 0x71018
++#define PIPEBSTAT 0x71024
++#define PIPEBFRAMEHIGH 0x71040
++#define PIPEBFRAMEPIXEL 0x71044
++
++#define DSPARB 0x70030
++#define DSPFW1 0x70034
++#define DSPFW2 0x70038
++#define DSPFW3 0x7003c
++#define DSPFW4 0x70050
++#define DSPFW5 0x70054
++#define DSPFW6 0x70058
++
++#define DSPACNTR 0x70180
++#define DSPBCNTR 0x71180
++#define DISPLAY_PLANE_ENABLE (1<<31)
++#define DISPLAY_PLANE_DISABLE 0
++#define DISPPLANE_GAMMA_ENABLE (1<<30)
++#define DISPPLANE_GAMMA_DISABLE 0
++#define DISPPLANE_PIXFORMAT_MASK (0xf<<26)
++#define DISPPLANE_8BPP (0x2<<26)
++#define DISPPLANE_15_16BPP (0x4<<26)
++#define DISPPLANE_16BPP (0x5<<26)
++#define DISPPLANE_32BPP_NO_ALPHA (0x6<<26)
++#define DISPPLANE_32BPP (0x7<<26)
++#define DISPPLANE_STEREO_ENABLE (1<<25)
++#define DISPPLANE_STEREO_DISABLE 0
++#define DISPPLANE_SEL_PIPE_MASK (1<<24)
++#define DISPPLANE_SEL_PIPE_A 0
++#define DISPPLANE_SEL_PIPE_B (1<<24)
++#define DISPPLANE_SRC_KEY_ENABLE (1<<22)
++#define DISPPLANE_SRC_KEY_DISABLE 0
++#define DISPPLANE_LINE_DOUBLE (1<<20)
++#define DISPPLANE_NO_LINE_DOUBLE 0
++#define DISPPLANE_STEREO_POLARITY_FIRST 0
++#define DISPPLANE_STEREO_POLARITY_SECOND (1<<18)
++/* plane B only */
++#define DISPPLANE_ALPHA_TRANS_ENABLE (1<<15)
++#define DISPPLANE_ALPHA_TRANS_DISABLE 0
++#define DISPPLANE_SPRITE_ABOVE_DISPLAYA 0
++#define DISPPLANE_SPRITE_ABOVE_OVERLAY (1)
++
++#define DSPABASE 0x70184
++#define DSPASTRIDE 0x70188
++
++#define DSPBBASE 0x71184
++#define DSPBADDR DSPBBASE
++#define DSPBSTRIDE 0x71188
++
++#define DSPAKEYVAL 0x70194
++#define DSPAKEYMASK 0x70198
++
++#define DSPAPOS 0x7018C /* reserved */
++#define DSPASIZE 0x70190
++#define DSPBPOS 0x7118C
++#define DSPBSIZE 0x71190
++
++#define DSPASURF 0x7019C
++#define DSPATILEOFF 0x701A4
++
++#define DSPBSURF 0x7119C
++#define DSPBTILEOFF 0x711A4
++
++#define VGACNTRL 0x71400
++# define VGA_DISP_DISABLE (1 << 31)
++# define VGA_2X_MODE (1 << 30)
++# define VGA_PIPE_B_SELECT (1 << 29)
++
++/*
++ * Some BIOS scratch area registers. The 845 (and 830?) store the amount
++ * of video memory available to the BIOS in SWF1.
++ */
++
++#define SWF0 0x71410
++#define SWF1 0x71414
++#define SWF2 0x71418
++#define SWF3 0x7141c
++#define SWF4 0x71420
++#define SWF5 0x71424
++#define SWF6 0x71428
++
++/*
++ * 855 scratch registers.
++ */
++#define SWF00 0x70410
++#define SWF01 0x70414
++#define SWF02 0x70418
++#define SWF03 0x7041c
++#define SWF04 0x70420
++#define SWF05 0x70424
++#define SWF06 0x70428
++
++#define SWF10 SWF0
++#define SWF11 SWF1
++#define SWF12 SWF2
++#define SWF13 SWF3
++#define SWF14 SWF4
++#define SWF15 SWF5
++#define SWF16 SWF6
++
++#define SWF30 0x72414
++#define SWF31 0x72418
++#define SWF32 0x7241c
++
++
++/*
++ * Palette registers
++ */
++#define PALETTE_A 0x0a000
++#define PALETTE_B 0x0a800
++
++#define IS_I830(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82830_CGC)
++#define IS_845G(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82845G_IG)
++#define IS_I85X(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82855GM_IG)
++#define IS_I855(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82855GM_IG)
++#define IS_I865G(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82865_IG)
++
++
++/* || dev->pci_device == PCI_DEVICE_ID_INTELPCI_CHIP_E7221_G) */
++#define IS_I915G(dev) (dev->pci_device == PCI_DEVICE_ID_INTEL_82915G_IG)
++#define IS_I915GM(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82915GM_IG)
++#define IS_I945G(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82945G_IG)
++#define IS_I945GM(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82945GM_IG)
++
++#define IS_I965G(dev) ((dev)->pci_device == 0x2972 || \
++ (dev)->pci_device == 0x2982 || \
++ (dev)->pci_device == 0x2992 || \
++ (dev)->pci_device == 0x29A2 || \
++ (dev)->pci_device == 0x2A02 || \
++ (dev)->pci_device == 0x2A12)
++
++#define IS_I965GM(dev) ((dev)->pci_device == 0x2A02)
++
++#define IS_G33(dev) ((dev)->pci_device == 0x29C2 || \
++ (dev)->pci_device == 0x29B2 || \
++ (dev)->pci_device == 0x29D2)
++
++#define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \
++ IS_I945GM(dev) || IS_I965G(dev) || IS_POULSBO(dev) || \
++ IS_MRST(dev))
++
++#define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \
++ IS_I945GM(dev) || IS_I965GM(dev) || \
++ IS_POULSBO(dev) || IS_MRST(dev))
++
++/* Cursor A & B regs */
++#define CURACNTR 0x70080
++#define CURSOR_MODE_DISABLE 0x00
++#define CURSOR_MODE_64_32B_AX 0x07
++#define CURSOR_MODE_64_ARGB_AX ((1 << 5) | CURSOR_MODE_64_32B_AX)
++#define MCURSOR_GAMMA_ENABLE (1 << 26)
++#define CURABASE 0x70084
++#define CURAPOS 0x70088
++#define CURSOR_POS_MASK 0x007FF
++#define CURSOR_POS_SIGN 0x8000
++#define CURSOR_X_SHIFT 0
++#define CURSOR_Y_SHIFT 16
++#define CURBCNTR 0x700c0
++#define CURBBASE 0x700c4
++#define CURBPOS 0x700c8
++
++/*
++ * MOORESTOWN delta registers
++ */
++#define MRST_DPLL_A 0x0f014
++#define DPLLA_MODE_LVDS (2 << 26) /* mrst */
++#define MRST_FPA0 0x0f040
++#define MRST_FPA1 0x0f044
++
++/* #define LVDS 0x61180 */
++# define MRST_PANEL_8TO6_DITHER_ENABLE (1 << 25)
++# define MRST_PANEL_24_DOT_1_FORMAT (1 << 24)
++# define LVDS_A3_POWER_UP_0_OUTPUT (1 << 6)
++
++#define MIPI 0x61190
++# define MIPI_PORT_EN (1 << 31)
++
++/* #define PP_CONTROL 0x61204 */
++# define POWER_DOWN_ON_RESET (1 << 1)
++
++/* #define PFIT_CONTROL 0x61230 */
++# define PFIT_PIPE_SELECT (3 << 29)
++# define PFIT_PIPE_SELECT_SHIFT (29)
++
++/* #define BLC_PWM_CTL 0x61254 */
++#define MRST_BACKLIGHT_MODULATION_FREQ_SHIFT (16)
++#define MRST_BACKLIGHT_MODULATION_FREQ_MASK (0xffff << 16)
++
++/* #define PIPEACONF 0x70008 */
++#define PIPEACONF_PIPE_STATE (1<<30)
++/* #define DSPACNTR 0x70180 */
++#if 0 /*FIXME JLIU7 need to define the following */
++1000 = 32 - bit RGBX(10 : 10 : 10 : 2)
++pixel format.Ignore alpha.1010 = BGRX 10 : 10 : 10 : 2 1100 = 64 - bit RGBX
++(16 : 16 : 16 : 16) 16 bit floating point pixel format.
++Ignore alpha.1110 = 32 - bit RGBX(8 : 8 : 8 : 8) pixel format.
++ Ignore
++ alpha.
++#endif /*FIXME JLIU7 need to define the following */
++
++#define MRST_DSPABASE 0x7019c
++
++/*
++ * MOORESTOWN reserved registers
++ */
++#if 0
++#define DSPAPOS 0x7018C /* reserved */
++#define DSPASIZE 0x70190
++#endif
++/*
++ * Moorestown registers.
++ */
++/*===========================================================================
++; General Constants
++;--------------------------------------------------------------------------*/
++#define BIT0 0x00000001
++#define BIT1 0x00000002
++#define BIT2 0x00000004
++#define BIT3 0x00000008
++#define BIT4 0x00000010
++#define BIT5 0x00000020
++#define BIT6 0x00000040
++#define BIT7 0x00000080
++#define BIT8 0x00000100
++#define BIT9 0x00000200
++#define BIT10 0x00000400
++#define BIT11 0x00000800
++#define BIT12 0x00001000
++#define BIT13 0x00002000
++#define BIT14 0x00004000
++#define BIT15 0x00008000
++#define BIT16 0x00010000
++#define BIT17 0x00020000
++#define BIT18 0x00040000
++#define BIT19 0x00080000
++#define BIT20 0x00100000
++#define BIT21 0x00200000
++#define BIT22 0x00400000
++#define BIT23 0x00800000
++#define BIT24 0x01000000
++#define BIT25 0x02000000
++#define BIT26 0x04000000
++#define BIT27 0x08000000
++#define BIT28 0x10000000
++#define BIT29 0x20000000
++#define BIT30 0x40000000
++#define BIT31 0x80000000
++/*===========================================================================
++; MIPI IP registers
++;--------------------------------------------------------------------------*/
++#define DEVICE_READY_REG 0xb000
++#define INTR_STAT_REG 0xb004
++#define RX_SOT_ERROR BIT0
++#define RX_SOT_SYNC_ERROR BIT1
++#define RX_ESCAPE_MODE_ENTRY_ERROR BIT3
++#define RX_LP_TX_SYNC_ERROR BIT4
++#define RX_HS_RECEIVE_TIMEOUT_ERROR BIT5
++#define RX_FALSE_CONTROL_ERROR BIT6
++#define RX_ECC_SINGLE_BIT_ERROR BIT7
++#define RX_ECC_MULTI_BIT_ERROR BIT8
++#define RX_CHECKSUM_ERROR BIT9
++#define RX_DSI_DATA_TYPE_NOT_RECOGNIZED BIT10
++#define RX_DSI_VC_ID_INVALID BIT11
++#define TX_FALSE_CONTROL_ERROR BIT12
++#define TX_ECC_SINGLE_BIT_ERROR BIT13
++#define TX_ECC_MULTI_BIT_ERROR BIT14
++#define TX_CHECKSUM_ERROR BIT15
++#define TX_DSI_DATA_TYPE_NOT_RECOGNIZED BIT16
++#define TX_DSI_VC_ID_INVALID BIT17
++#define HIGH_CONTENTION BIT18
++#define LOW_CONTENTION BIT19
++#define DPI_FIFO_UNDER_RUN BIT20
++#define HS_TX_TIMEOUT BIT21
++#define LP_RX_TIMEOUT BIT22
++#define TURN_AROUND_ACK_TIMEOUT BIT23
++#define ACK_WITH_NO_ERROR BIT24
++#define INTR_EN_REG 0xb008
++#define DSI_FUNC_PRG_REG 0xb00c
++#define DPI_CHANNEL_NUMBER_POS 0x03
++#define DBI_CHANNEL_NUMBER_POS 0x05
++#define FMT_DPI_POS 0x07
++#define FMT_DBI_POS 0x0A
++#define DBI_DATA_WIDTH_POS 0x0D
++#define HS_TX_TIMEOUT_REG 0xb010
++#define LP_RX_TIMEOUT_REG 0xb014
++#define TURN_AROUND_TIMEOUT_REG 0xb018
++#define DEVICE_RESET_REG 0xb01C
++#define DPI_RESOLUTION_REG 0xb020
++#define RES_V_POS 0x10
++#define DBI_RESOLUTION_REG 0xb024
++#define HORIZ_SYNC_PAD_COUNT_REG 0xb028
++#define HORIZ_BACK_PORCH_COUNT_REG 0xb02C
++#define HORIZ_FRONT_PORCH_COUNT_REG 0xb030
++#define HORIZ_ACTIVE_AREA_COUNT_REG 0xb034
++#define VERT_SYNC_PAD_COUNT_REG 0xb038
++#define VERT_BACK_PORCH_COUNT_REG 0xb03c
++#define VERT_FRONT_PORCH_COUNT_REG 0xb040
++#define HIGH_LOW_SWITCH_COUNT_REG 0xb044
++#define DPI_CONTROL_REG 0xb048
++#define DPI_SHUT_DOWN BIT0
++#define DPI_TURN_ON BIT1
++#define DPI_COLOR_MODE_ON BIT2
++#define DPI_COLOR_MODE_OFF BIT3
++#define DPI_BACK_LIGHT_ON BIT4
++#define DPI_BACK_LIGHT_OFF BIT5
++#define DPI_LP BIT6
++#define DPI_DATA_REG 0xb04c
++#define DPI_BACK_LIGHT_ON_DATA 0x07
++#define DPI_BACK_LIGHT_OFF_DATA 0x17
++#define INIT_COUNT_REG 0xb050
++#define MAX_RET_PAK_REG 0xb054
++#define VIDEO_FMT_REG 0xb058
++#define EOT_DISABLE_REG 0xb05c
++#define LP_BYTECLK_REG 0xb060
++#define LP_GEN_DATA_REG 0xb064
++#define HS_GEN_DATA_REG 0xb068
++#define LP_GEN_CTRL_REG 0xb06C
++#define HS_GEN_CTRL_REG 0xb070
++#define GEN_FIFO_STAT_REG 0xb074
++#define HS_DATA_FIFO_FULL BIT0
++#define HS_DATA_FIFO_HALF_EMPTY BIT1
++#define HS_DATA_FIFO_EMPTY BIT2
++#define LP_DATA_FIFO_FULL BIT8
++#define LP_DATA_FIFO_HALF_EMPTY BIT9
++#define LP_DATA_FIFO_EMPTY BIT10
++#define HS_CTRL_FIFO_FULL BIT16
++#define HS_CTRL_FIFO_HALF_EMPTY BIT17
++#define HS_CTRL_FIFO_EMPTY BIT18
++#define LP_CTRL_FIFO_FULL BIT24
++#define LP_CTRL_FIFO_HALF_EMPTY BIT25
++#define LP_CTRL_FIFO_EMPTY BIT26
++/*===========================================================================
++; MIPI Adapter registers
++;--------------------------------------------------------------------------*/
++#define MIPI_CONTROL_REG 0xb104
++#define MIPI_2X_CLOCK_BITS (BIT0 | BIT1)
++#define MIPI_DATA_ADDRESS_REG 0xb108
++#define MIPI_DATA_LENGTH_REG 0xb10C
++#define MIPI_COMMAND_ADDRESS_REG 0xb110
++#define MIPI_COMMAND_LENGTH_REG 0xb114
++#define MIPI_READ_DATA_RETURN_REG0 0xb118
++#define MIPI_READ_DATA_RETURN_REG1 0xb11C
++#define MIPI_READ_DATA_RETURN_REG2 0xb120
++#define MIPI_READ_DATA_RETURN_REG3 0xb124
++#define MIPI_READ_DATA_RETURN_REG4 0xb128
++#define MIPI_READ_DATA_RETURN_REG5 0xb12C
++#define MIPI_READ_DATA_RETURN_REG6 0xb130
++#define MIPI_READ_DATA_RETURN_REG7 0xb134
++#define MIPI_READ_DATA_VALID_REG 0xb138
++/* DBI COMMANDS */
++#define soft_reset 0x01
++/* ************************************************************************* *\
++The display module performs a software reset.
++Registers are written with their SW Reset default values.
++\* ************************************************************************* */
++#define get_power_mode 0x0a
++/* ************************************************************************* *\
++The display module returns the current power mode
++\* ************************************************************************* */
++#define get_address_mode 0x0b
++/* ************************************************************************* *\
++The display module returns the current status.
++\* ************************************************************************* */
++#define get_pixel_format 0x0c
++/* ************************************************************************* *\
++This command gets the pixel format for the RGB image data
++used by the interface.
++\* ************************************************************************* */
++#define get_display_mode 0x0d
++/* ************************************************************************* *\
++The display module returns the Display Image Mode status.
++\* ************************************************************************* */
++#define get_signal_mode 0x0e
++/* ************************************************************************* *\
++The display module returns the Display Signal Mode.
++\* ************************************************************************* */
++#define get_diagnostic_result 0x0f
++/* ************************************************************************* *\
++The display module returns the self-diagnostic results following
++a Sleep Out command.
++\* ************************************************************************* */
++#define enter_sleep_mode 0x10
++/* ************************************************************************* *\
++This command causes the display module to enter the Sleep mode.
++In this mode, all unnecessary blocks inside the display module are disabled
++except interface communication. This is the lowest power mode
++the display module supports.
++\* ************************************************************************* */
++#define exit_sleep_mode 0x11
++/* ************************************************************************* *\
++This command causes the display module to exit Sleep mode.
++All blocks inside the display module are enabled.
++\* ************************************************************************* */
++#define enter_partial_mode 0x12
++/* ************************************************************************* *\
++This command causes the display module to enter the Partial Display Mode.
++The Partial Display Mode window is described by the set_partial_area command.
++\* ************************************************************************* */
++#define enter_normal_mode 0x13
++/* ************************************************************************* *\
++This command causes the display module to enter the Normal mode.
++Normal Mode is defined as Partial Display mode and Scroll mode are off
++\* ************************************************************************* */
++#define exit_invert_mode 0x20
++/* ************************************************************************* *\
++This command causes the display module to stop inverting the image data on
++the display device. The frame memory contents remain unchanged.
++No status bits are changed.
++\* ************************************************************************* */
++#define enter_invert_mode 0x21
++/* ************************************************************************* *\
++This command causes the display module to invert the image data only on
++the display device. The frame memory contents remain unchanged.
++No status bits are changed.
++\* ************************************************************************* */
++#define set_gamma_curve 0x26
++/* ************************************************************************* *\
++This command selects the desired gamma curve for the display device.
++Four fixed gamma curves are defined in section DCS spec.
++\* ************************************************************************* */
++#define set_display_off 0x28
++/* ************************************************************************* *\
++This command causes the display module to stop displaying the image data
++on the display device. The frame memory contents remain unchanged.
++No status bits are changed.
++\* ************************************************************************* */
++#define set_display_on 0x29
++/* ************************************************************************* *\
++This command causes the display module to start displaying the image data
++on the display device. The frame memory contents remain unchanged.
++No status bits are changed.
++\* ************************************************************************* */
++#define set_column_address 0x2a
++/* ************************************************************************* *\
++This command defines the column extent of the frame memory accessed by the
++hostprocessor with the read_memory_continue and write_memory_continue commands.
++No status bits are changed.
++\* ************************************************************************* */
++#define set_page_address 0x2b
++/* ************************************************************************* *\
++This command defines the page extent of the frame memory accessed by the host
++processor with the write_memory_continue and read_memory_continue command.
++No status bits are changed.
++\* ************************************************************************* */
++#define write_mem_start 0x2c
++/* ************************************************************************* *\
++This command transfers image data from the host processor to the display
++module s frame memory starting at the pixel location specified by
++preceding set_column_address and set_page_address commands.
++\* ************************************************************************* */
++#define set_partial_area 0x30
++/* ************************************************************************* *\
++This command defines the Partial Display mode s display area.
++There are two parameters associated with
++this command, the first defines the Start Row (SR) and the second the End Row
++(ER). SR and ER refer to the Frame Memory Line Pointer.
++\* ************************************************************************* */
++#define set_scroll_area 0x33
++/* ************************************************************************* *\
++This command defines the display modules Vertical Scrolling Area.
++\* ************************************************************************* */
++#define set_tear_off 0x34
++/* ************************************************************************* *\
++This command turns off the display modules Tearing Effect output signal on
++the TE signal line.
++\* ************************************************************************* */
++#define set_tear_on 0x35
++/* ************************************************************************* *\
++This command turns on the display modules Tearing Effect output signal
++on the TE signal line.
++\* ************************************************************************* */
++#define set_address_mode 0x36
++/* ************************************************************************* *\
++This command sets the data order for transfers from the host processor to
++display modules frame memory,bits B[7:5] and B3, and from the display
++modules frame memory to the display device, bits B[2:0] and B4.
++\* ************************************************************************* */
++#define set_scroll_start 0x37
++/* ************************************************************************* *\
++This command sets the start of the vertical scrolling area in the frame memory.
++The vertical scrolling area is fully defined when this command is used with
++the set_scroll_area command The set_scroll_start command has one parameter,
++the Vertical Scroll Pointer. The VSP defines the line in the frame memory
++that is written to the display device as the first line of the vertical
++scroll area.
++\* ************************************************************************* */
++#define exit_idle_mode 0x38
++/* ************************************************************************* *\
++This command causes the display module to exit Idle mode.
++\* ************************************************************************* */
++#define enter_idle_mode 0x39
++/* ************************************************************************* *\
++This command causes the display module to enter Idle Mode.
++In Idle Mode, color expression is reduced. Colors are shown on the display
++device using the MSB of each of the R, G and B color components in the frame
++memory
++\* ************************************************************************* */
++#define set_pixel_format 0x3a
++/* ************************************************************************* *\
++This command sets the pixel format for the RGB image data used by the interface.
++Bits D[6:4] DPI Pixel Format Definition
++Bits D[2:0] DBI Pixel Format Definition
++Bits D7 and D3 are not used.
++\* ************************************************************************* */
++#define write_mem_cont 0x3c
++/* ************************************************************************* *\
++This command transfers image data from the host processor to the display
++module's frame memory continuing from the pixel location following the
++previous write_memory_continue or write_memory_start command.
++\* ************************************************************************* */
++#define set_tear_scanline 0x44
++/* ************************************************************************* *\
++This command turns on the display modules Tearing Effect output signal on the
++TE signal line when the display module reaches line N.
++\* ************************************************************************* */
++#define get_scanline 0x45
++/* ************************************************************************* *\
++The display module returns the current scanline, N, used to update the
++display device. The total number of scanlines on a display device is
++defined as VSYNC + VBP + VACT + VFP.The first scanline is defined as
++the first line of V Sync and is denoted as Line 0.
++When in Sleep Mode, the value returned by get_scanline is undefined.
++\* ************************************************************************* */
++/* DCS Interface Pixel Formats */
++#define DCS_PIXEL_FORMAT_3BPP 0x1
++#define DCS_PIXEL_FORMAT_8BPP 0x2
++#define DCS_PIXEL_FORMAT_12BPP 0x3
++#define DCS_PIXEL_FORMAT_16BPP 0x5
++#define DCS_PIXEL_FORMAT_18BPP 0x6
++#define DCS_PIXEL_FORMAT_24BPP 0x7
++/* ONE PARAMETER READ DATA */
++#define addr_mode_data 0xfc
++#define diag_res_data 0x00
++#define disp_mode_data 0x23
++#define pxl_fmt_data 0x77
++#define pwr_mode_data 0x74
++#define sig_mode_data 0x00
++/* TWO PARAMETERS READ DATA */
++#define scanline_data1 0xff
++#define scanline_data2 0xff
++/* DPI PIXEL FORMATS */
++#define RGB_565_FMT 0x01 /* RGB 565 FORMAT */
++#define RGB_666_FMT 0x02 /* RGB 666 FORMAT */
++#define LRGB_666_FMT 0x03 /* RGB LOOSELY PACKED
++ * 666 FORMAT
++ */
++#define RGB_888_FMT 0x04 /* RGB 888 FORMAT */
++#define NON_BURST_MODE_SYNC_PULSE 0x01 /* Non Burst Mode
++ * with Sync Pulse
++ */
++#define NON_BURST_MODE_SYNC_EVENTS 0x02 /* Non Burst Mode
++ * with Sync events
++ */
++#define BURST_MODE 0x03 /* Burst Mode */
++#define VIRTUAL_CHANNEL_NUMBER_0 0x00 /* Virtual channel 0 */
++#define VIRTUAL_CHANNEL_NUMBER_1 0x01 /* Virtual channel 1 */
++#define VIRTUAL_CHANNEL_NUMBER_2 0x02 /* Virtual channel 2 */
++#define VIRTUAL_CHANNEL_NUMBER_3 0x03 /* Virtual channel 3 */
++#define DBI_NOT_SUPPORTED 0x00 /* command mode
++ * is not supported
++ */
++#define DBI_DATA_WIDTH_16BIT 0x01 /* 16 bit data */
++#define DBI_DATA_WIDTH_9BIT 0x02 /* 9 bit data */
++#define DBI_DATA_WIDTH_8BIT 0x03 /* 8 bit data */
++#define DBI_COMMAND_BUFFER_SIZE 0x120 /* Allocate at least
++ * 0x100 Byte with 32
++ * byte alignment
++ */
++#define DBI_DATA_BUFFER_SIZE 0x120 /* Allocate at least
++ * 0x100 Byte with 32
++ * byte alignment
++ */
++#define ALIGNMENT_32BYTE_MASK (~(BIT0|BIT1|BIT2|BIT3|BIT4))
++#define SKU_83 0x01
++#define SKU_100 0x02
++#define SKU_100L 0x04
++#define SKU_BYPASS 0x08
++#if 0
++/* ************************************************************************* *\
++DSI command data structure
++\* ************************************************************************* */
++union DSI_LONG_PACKET_HEADER {
++ u32 DSI_longPacketHeader;
++ struct {
++ u8 dataID;
++ u16 wordCount;
++ u8 ECC;
++ };
++#if 0 /*FIXME JLIU7 */
++ struct {
++ u8 DT:6;
++ u8 VC:2;
++ };
++#endif /*FIXME JLIU7 */
++};
++
++union MIPI_ADPT_CMD_LNG_REG {
++ u32 commnadLengthReg;
++ struct {
++ u8 command0;
++ u8 command1;
++ u8 command2;
++ u8 command3;
++ };
++};
++
++struct SET_COLUMN_ADDRESS_DATA {
++ u8 command;
++ u16 SC; /* Start Column */
++ u16 EC; /* End Column */
++};
++
++struct SET_PAGE_ADDRESS_DATA {
++ u8 command;
++ u16 SP; /* Start Page */
++ u16 EP; /* End Page */
++};
++#endif
+diff -uNr a/drivers/gpu/drm/psb/psb_intel_sdvo.c b/drivers/gpu/drm/psb/psb_intel_sdvo.c
+--- a/drivers/gpu/drm/psb/psb_intel_sdvo.c 1969-12-31 16:00:00.000000000 -0800
++++ b/drivers/gpu/drm/psb/psb_intel_sdvo.c 2009-04-07 13:28:38.000000000 -0700
+@@ -0,0 +1,1232 @@
++/*
++ * Copyright © 2006-2007 Intel Corporation
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ * Eric Anholt <eric@anholt.net>
++ */
++/*
++ * Copyright 2006 Dave Airlie <airlied@linux.ie>
++ * Jesse Barnes <jesse.barnes@intel.com>
++ */
++
++#include <linux/i2c.h>
++#include <linux/delay.h>
++#include <drm/drm_crtc.h>
++#include "psb_intel_sdvo_regs.h"
++
++struct psb_intel_sdvo_priv {
++ struct psb_intel_i2c_chan *i2c_bus;
++ int slaveaddr;
++ int output_device;
++
++ u16 active_outputs;
++
++ struct psb_intel_sdvo_caps caps;
++ int pixel_clock_min, pixel_clock_max;
++
++ int save_sdvo_mult;
++ u16 save_active_outputs;
++ struct psb_intel_sdvo_dtd save_input_dtd_1, save_input_dtd_2;
++ struct psb_intel_sdvo_dtd save_output_dtd[16];
++ u32 save_SDVOX;
++};
++
++/**
++ * Writes the SDVOB or SDVOC with the given value, but always writes both
++ * SDVOB and SDVOC to work around apparent hardware issues (according to
++ * comments in the BIOS).
++ */
++void psb_intel_sdvo_write_sdvox(struct psb_intel_output *psb_intel_output, u32 val)
++{
++ struct drm_device *dev = psb_intel_output->base.dev;
++ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
++ u32 bval = val, cval = val;
++ int i;
++
++ if (sdvo_priv->output_device == SDVOB)
++ cval = REG_READ(SDVOC);
++ else
++ bval = REG_READ(SDVOB);
++ /*
++ * Write the registers twice for luck. Sometimes,
++ * writing them only once doesn't appear to 'stick'.
++ * The BIOS does this too. Yay, magic
++ */
++ for (i = 0; i < 2; i++) {
++ REG_WRITE(SDVOB, bval);
++ REG_READ(SDVOB);
++ REG_WRITE(SDVOC, cval);
++ REG_READ(SDVOC);
++ }
++}
++
++static bool psb_intel_sdvo_read_byte(struct psb_intel_output *psb_intel_output,
++ u8 addr, u8 *ch)
++{
++ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
++ u8 out_buf[2];
++ u8 buf[2];
++ int ret;
++
++ struct i2c_msg msgs[] = {
++ {
++ .addr = sdvo_priv->i2c_bus->slave_addr,
++ .flags = 0,
++ .len = 1,
++ .buf = out_buf,
++ },
++ {
++ .addr = sdvo_priv->i2c_bus->slave_addr,
++ .flags = I2C_M_RD,
++ .len = 1,
++ .buf = buf,
++ }
++ };
++
++ out_buf[0] = addr;
++ out_buf[1] = 0;
++
++ ret = i2c_transfer(&sdvo_priv->i2c_bus->adapter, msgs, 2);
++ if (ret == 2) {
++ /* DRM_DEBUG("got back from addr %02X = %02x\n",
++ * out_buf[0], buf[0]);
++ */
++ *ch = buf[0];
++ return true;
++ }
++
++ DRM_DEBUG("i2c transfer returned %d\n", ret);
++ return false;
++}
++
++static bool psb_intel_sdvo_write_byte(struct psb_intel_output *psb_intel_output,
++ int addr, u8 ch)
++{
++ u8 out_buf[2];
++ struct i2c_msg msgs[] = {
++ {
++ .addr = psb_intel_output->i2c_bus->slave_addr,
++ .flags = 0,
++ .len = 2,
++ .buf = out_buf,
++ }
++ };
++
++ out_buf[0] = addr;
++ out_buf[1] = ch;
++
++ if (i2c_transfer(&psb_intel_output->i2c_bus->adapter, msgs, 1) == 1)
++ return true;
++ return false;
++}
++
++#define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd}
++/** Mapping of command numbers to names, for debug output */
++const static struct _sdvo_cmd_name {
++ u8 cmd;
++ char *name;
++} sdvo_cmd_names[] = {
++SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FIRMWARE_REV),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TRAINED_INPUTS),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_OUTPUTS),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_OUTPUTS),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_IN_OUT_MAP),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_IN_OUT_MAP),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ATTACHED_DISPLAYS),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HOT_PLUG_SUPPORT),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_HOT_PLUG),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_HOT_PLUG),
++ SDVO_CMD_NAME_ENTRY
++ (SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_INPUT),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_OUTPUT),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART1),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART2),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART2),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART1),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART2),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART1),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART2),
++ SDVO_CMD_NAME_ENTRY
++ (SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING),
++ SDVO_CMD_NAME_ENTRY
++ (SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1),
++ SDVO_CMD_NAME_ENTRY
++ (SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2),
++ SDVO_CMD_NAME_ENTRY
++ (SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE),
++ SDVO_CMD_NAME_ENTRY
++ (SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE),
++ SDVO_CMD_NAME_ENTRY
++ (SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CLOCK_RATE_MULT),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CLOCK_RATE_MULT),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_TV_FORMATS),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_FORMAT),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_FORMAT),
++ SDVO_CMD_NAME_ENTRY
++ (SDVO_CMD_SET_TV_RESOLUTION_SUPPORT),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTROL_BUS_SWITCH),};
++
++#define SDVO_NAME(dev_priv) \
++ ((dev_priv)->output_device == SDVOB ? "SDVOB" : "SDVOC")
++#define SDVO_PRIV(output) ((struct psb_intel_sdvo_priv *) (output)->dev_priv)
++
++static void psb_intel_sdvo_write_cmd(struct psb_intel_output *psb_intel_output, u8 cmd,
++ void *args, int args_len)
++{
++ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
++ int i;
++
++ if (1) {
++ DRM_DEBUG("%s: W: %02X ", SDVO_NAME(sdvo_priv), cmd);
++ for (i = 0; i < args_len; i++)
++ printk(KERN_INFO"%02X ", ((u8 *) args)[i]);
++ for (; i < 8; i++)
++ printk(" ");
++ for (i = 0;
++ i <
++ sizeof(sdvo_cmd_names) / sizeof(sdvo_cmd_names[0]);
++ i++) {
++ if (cmd == sdvo_cmd_names[i].cmd) {
++ printk("(%s)", sdvo_cmd_names[i].name);
++ break;
++ }
++ }
++ if (i ==
++ sizeof(sdvo_cmd_names) / sizeof(sdvo_cmd_names[0]))
++ printk("(%02X)", cmd);
++ printk("\n");
++ }
++
++ for (i = 0; i < args_len; i++) {
++ psb_intel_sdvo_write_byte(psb_intel_output, SDVO_I2C_ARG_0 - i,
++ ((u8 *) args)[i]);
++ }
++
++ psb_intel_sdvo_write_byte(psb_intel_output, SDVO_I2C_OPCODE, cmd);
++}
++
++static const char *cmd_status_names[] = {
++ "Power on",
++ "Success",
++ "Not supported",
++ "Invalid arg",
++ "Pending",
++ "Target not specified",
++ "Scaling not supported"
++};
++
++static u8 psb_intel_sdvo_read_response(struct psb_intel_output *psb_intel_output,
++ void *response, int response_len)
++{
++ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
++ int i;
++ u8 status;
++ u8 retry = 50;
++
++ while (retry--) {
++ /* Read the command response */
++ for (i = 0; i < response_len; i++) {
++ psb_intel_sdvo_read_byte(psb_intel_output,
++ SDVO_I2C_RETURN_0 + i,
++ &((u8 *) response)[i]);
++ }
++
++ /* read the return status */
++ psb_intel_sdvo_read_byte(psb_intel_output, SDVO_I2C_CMD_STATUS,
++ &status);
++
++ if (1) {
++ DRM_DEBUG("%s: R: ", SDVO_NAME(sdvo_priv));
++ for (i = 0; i < response_len; i++)
++ printk(KERN_INFO"%02X ", ((u8 *) response)[i]);
++ for (; i < 8; i++)
++ printk(" ");
++ if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP)
++ printk(KERN_INFO"(%s)",
++ cmd_status_names[status]);
++ else
++ printk(KERN_INFO"(??? %d)", status);
++ printk("\n");
++ }
++
++ if (status != SDVO_CMD_STATUS_PENDING)
++ return status;
++
++ mdelay(50);
++ }
++
++ return status;
++}
++
++int psb_intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
++{
++ if (mode->clock >= 100000)
++ return 1;
++ else if (mode->clock >= 50000)
++ return 2;
++ else
++ return 4;
++}
++
++/**
++ * Don't check status code from this as it switches the bus back to the
++ * SDVO chips which defeats the purpose of doing a bus switch in the first
++ * place.
++ */
++void psb_intel_sdvo_set_control_bus_switch(struct psb_intel_output *psb_intel_output,
++ u8 target)
++{
++ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_SET_CONTROL_BUS_SWITCH,
++ &target, 1);
++}
++
++static bool psb_intel_sdvo_set_target_input(struct psb_intel_output *psb_intel_output,
++ bool target_0, bool target_1)
++{
++ struct psb_intel_sdvo_set_target_input_args targets = { 0 };
++ u8 status;
++
++ if (target_0 && target_1)
++ return SDVO_CMD_STATUS_NOTSUPP;
++
++ if (target_1)
++ targets.target_1 = 1;
++
++ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_SET_TARGET_INPUT,
++ &targets, sizeof(targets));
++
++ status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
++
++ return status == SDVO_CMD_STATUS_SUCCESS;
++}
++
++/**
++ * Return whether each input is trained.
++ *
++ * This function is making an assumption about the layout of the response,
++ * which should be checked against the docs.
++ */
++static bool psb_intel_sdvo_get_trained_inputs(struct psb_intel_output
++ *psb_intel_output, bool *input_1,
++ bool *input_2)
++{
++ struct psb_intel_sdvo_get_trained_inputs_response response;
++ u8 status;
++
++ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_TRAINED_INPUTS,
++ NULL, 0);
++ status =
++ psb_intel_sdvo_read_response(psb_intel_output, &response,
++ sizeof(response));
++ if (status != SDVO_CMD_STATUS_SUCCESS)
++ return false;
++
++ *input_1 = response.input0_trained;
++ *input_2 = response.input1_trained;
++ return true;
++}
++
++static bool psb_intel_sdvo_get_active_outputs(struct psb_intel_output
++ *psb_intel_output, u16 *outputs)
++{
++ u8 status;
++
++ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_ACTIVE_OUTPUTS,
++ NULL, 0);
++ status =
++ psb_intel_sdvo_read_response(psb_intel_output, outputs,
++ sizeof(*outputs));
++
++ return status == SDVO_CMD_STATUS_SUCCESS;
++}
++
++static bool psb_intel_sdvo_set_active_outputs(struct psb_intel_output
++ *psb_intel_output, u16 outputs)
++{
++ u8 status;
++
++ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_SET_ACTIVE_OUTPUTS,
++ &outputs, sizeof(outputs));
++ status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
++ return status == SDVO_CMD_STATUS_SUCCESS;
++}
++
++static bool psb_intel_sdvo_set_encoder_power_state(struct psb_intel_output
++ *psb_intel_output, int mode)
++{
++ u8 status, state = SDVO_ENCODER_STATE_ON;
++
++ switch (mode) {
++ case DRM_MODE_DPMS_ON:
++ state = SDVO_ENCODER_STATE_ON;
++ break;
++ case DRM_MODE_DPMS_STANDBY:
++ state = SDVO_ENCODER_STATE_STANDBY;
++ break;
++ case DRM_MODE_DPMS_SUSPEND:
++ state = SDVO_ENCODER_STATE_SUSPEND;
++ break;
++ case DRM_MODE_DPMS_OFF:
++ state = SDVO_ENCODER_STATE_OFF;
++ break;
++ }
++
++ psb_intel_sdvo_write_cmd(psb_intel_output,
++ SDVO_CMD_SET_ENCODER_POWER_STATE, &state,
++ sizeof(state));
++ status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
++
++ return status == SDVO_CMD_STATUS_SUCCESS;
++}
++
++static bool psb_intel_sdvo_get_input_pixel_clock_range(struct psb_intel_output
++ *psb_intel_output,
++ int *clock_min,
++ int *clock_max)
++{
++ struct psb_intel_sdvo_pixel_clock_range clocks;
++ u8 status;
++
++ psb_intel_sdvo_write_cmd(psb_intel_output,
++ SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE, NULL,
++ 0);
++
++ status =
++ psb_intel_sdvo_read_response(psb_intel_output, &clocks,
++ sizeof(clocks));
++
++ if (status != SDVO_CMD_STATUS_SUCCESS)
++ return false;
++
++ /* Convert the values from units of 10 kHz to kHz. */
++ *clock_min = clocks.min * 10;
++ *clock_max = clocks.max * 10;
++
++ return true;
++}
++
++static bool psb_intel_sdvo_set_target_output(struct psb_intel_output *psb_intel_output,
++ u16 outputs)
++{
++ u8 status;
++
++ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_SET_TARGET_OUTPUT,
++ &outputs, sizeof(outputs));
++
++ status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
++ return status == SDVO_CMD_STATUS_SUCCESS;
++}
++
++static bool psb_intel_sdvo_get_timing(struct psb_intel_output *psb_intel_output,
++ u8 cmd, struct psb_intel_sdvo_dtd *dtd)
++{
++ u8 status;
++
++ psb_intel_sdvo_write_cmd(psb_intel_output, cmd, NULL, 0);
++ status = psb_intel_sdvo_read_response(psb_intel_output, &dtd->part1,
++ sizeof(dtd->part1));
++ if (status != SDVO_CMD_STATUS_SUCCESS)
++ return false;
++
++ psb_intel_sdvo_write_cmd(psb_intel_output, cmd + 1, NULL, 0);
++ status = psb_intel_sdvo_read_response(psb_intel_output, &dtd->part2,
++ sizeof(dtd->part2));
++ if (status != SDVO_CMD_STATUS_SUCCESS)
++ return false;
++
++ return true;
++}
++
++static bool psb_intel_sdvo_get_input_timing(struct psb_intel_output *psb_intel_output,
++ struct psb_intel_sdvo_dtd *dtd)
++{
++ return psb_intel_sdvo_get_timing(psb_intel_output,
++ SDVO_CMD_GET_INPUT_TIMINGS_PART1,
++ dtd);
++}
++
++static bool psb_intel_sdvo_get_output_timing(struct psb_intel_output *psb_intel_output,
++ struct psb_intel_sdvo_dtd *dtd)
++{
++ return psb_intel_sdvo_get_timing(psb_intel_output,
++ SDVO_CMD_GET_OUTPUT_TIMINGS_PART1,
++ dtd);
++}
++
++static bool psb_intel_sdvo_set_timing(struct psb_intel_output *psb_intel_output,
++ u8 cmd, struct psb_intel_sdvo_dtd *dtd)
++{
++ u8 status;
++
++ psb_intel_sdvo_write_cmd(psb_intel_output, cmd, &dtd->part1,
++ sizeof(dtd->part1));
++ status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
++ if (status != SDVO_CMD_STATUS_SUCCESS)
++ return false;
++
++ psb_intel_sdvo_write_cmd(psb_intel_output, cmd + 1, &dtd->part2,
++ sizeof(dtd->part2));
++ status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
++ if (status != SDVO_CMD_STATUS_SUCCESS)
++ return false;
++
++ return true;
++}
++
++static bool psb_intel_sdvo_set_input_timing(struct psb_intel_output *psb_intel_output,
++ struct psb_intel_sdvo_dtd *dtd)
++{
++ return psb_intel_sdvo_set_timing(psb_intel_output,
++ SDVO_CMD_SET_INPUT_TIMINGS_PART1,
++ dtd);
++}
++
++static bool psb_intel_sdvo_set_output_timing(struct psb_intel_output *psb_intel_output,
++ struct psb_intel_sdvo_dtd *dtd)
++{
++ return psb_intel_sdvo_set_timing(psb_intel_output,
++ SDVO_CMD_SET_OUTPUT_TIMINGS_PART1,
++ dtd);
++}
++
++#if 0
++static bool psb_intel_sdvo_get_preferred_input_timing(struct psb_intel_output
++ *psb_intel_output,
++ struct psb_intel_sdvo_dtd
++ *dtd)
++{
++ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
++ u8 status;
++
++ psb_intel_sdvo_write_cmd(psb_intel_output,
++ SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1,
++ NULL, 0);
++
++ status = psb_intel_sdvo_read_response(psb_intel_output, &dtd->part1,
++ sizeof(dtd->part1));
++ if (status != SDVO_CMD_STATUS_SUCCESS)
++ return false;
++
++ psb_intel_sdvo_write_cmd(psb_intel_output,
++ SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2,
++ NULL, 0);
++ status =
++ psb_intel_sdvo_read_response(psb_intel_output, &dtd->part2,
++ sizeof(dtd->part2));
++ if (status != SDVO_CMD_STATUS_SUCCESS)
++ return false;
++
++ return true;
++}
++#endif
++
++static int psb_intel_sdvo_get_clock_rate_mult(struct psb_intel_output
++ *psb_intel_output)
++{
++ u8 response, status;
++
++ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_CLOCK_RATE_MULT,
++ NULL, 0);
++ status = psb_intel_sdvo_read_response(psb_intel_output, &response, 1);
++
++ if (status != SDVO_CMD_STATUS_SUCCESS) {
++ DRM_DEBUG("Couldn't get SDVO clock rate multiplier\n");
++ return SDVO_CLOCK_RATE_MULT_1X;
++ } else {
++ DRM_DEBUG("Current clock rate multiplier: %d\n", response);
++ }
++
++ return response;
++}
++
++static bool psb_intel_sdvo_set_clock_rate_mult(struct psb_intel_output
++ *psb_intel_output, u8 val)
++{
++ u8 status;
++
++ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_SET_CLOCK_RATE_MULT,
++ &val, 1);
++ status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
++ if (status != SDVO_CMD_STATUS_SUCCESS)
++ return false;
++
++ return true;
++}
++
++static bool psb_intel_sdvo_mode_fixup(struct drm_encoder *encoder,
++ struct drm_display_mode *mode,
++ struct drm_display_mode *adjusted_mode)
++{
++ /* Make the CRTC code factor in the SDVO pixel multiplier. The SDVO
++ * device will be told of the multiplier during mode_set.
++ */
++ adjusted_mode->clock *= psb_intel_sdvo_get_pixel_multiplier(mode);
++ return true;
++}
++
++static void psb_intel_sdvo_mode_set(struct drm_encoder *encoder,
++ struct drm_display_mode *mode,
++ struct drm_display_mode *adjusted_mode)
++{
++ struct drm_device *dev = encoder->dev;
++ struct drm_crtc *crtc = encoder->crtc;
++ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++ struct psb_intel_output *psb_intel_output = enc_to_psb_intel_output(encoder);
++ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
++ u16 width, height;
++ u16 h_blank_len, h_sync_len, v_blank_len, v_sync_len;
++ u16 h_sync_offset, v_sync_offset;
++ u32 sdvox;
++ struct psb_intel_sdvo_dtd output_dtd;
++ int sdvo_pixel_multiply;
++
++ if (!mode)
++ return;
++
++ width = mode->crtc_hdisplay;
++ height = mode->crtc_vdisplay;
++
++ /* do some mode translations */
++ h_blank_len = mode->crtc_hblank_end - mode->crtc_hblank_start;
++ h_sync_len = mode->crtc_hsync_end - mode->crtc_hsync_start;
++
++ v_blank_len = mode->crtc_vblank_end - mode->crtc_vblank_start;
++ v_sync_len = mode->crtc_vsync_end - mode->crtc_vsync_start;
++
++ h_sync_offset = mode->crtc_hsync_start - mode->crtc_hblank_start;
++ v_sync_offset = mode->crtc_vsync_start - mode->crtc_vblank_start;
++
++ output_dtd.part1.clock = mode->clock / 10;
++ output_dtd.part1.h_active = width & 0xff;
++ output_dtd.part1.h_blank = h_blank_len & 0xff;
++ output_dtd.part1.h_high = (((width >> 8) & 0xf) << 4) |
++ ((h_blank_len >> 8) & 0xf);
++ output_dtd.part1.v_active = height & 0xff;
++ output_dtd.part1.v_blank = v_blank_len & 0xff;
++ output_dtd.part1.v_high = (((height >> 8) & 0xf) << 4) |
++ ((v_blank_len >> 8) & 0xf);
++
++ output_dtd.part2.h_sync_off = h_sync_offset;
++ output_dtd.part2.h_sync_width = h_sync_len & 0xff;
++ output_dtd.part2.v_sync_off_width = (v_sync_offset & 0xf) << 4 |
++ (v_sync_len & 0xf);
++ output_dtd.part2.sync_off_width_high =
++ ((h_sync_offset & 0x300) >> 2) | ((h_sync_len & 0x300) >> 4) |
++ ((v_sync_offset & 0x30) >> 2) | ((v_sync_len & 0x30) >> 4);
++
++ output_dtd.part2.dtd_flags = 0x18;
++ if (mode->flags & DRM_MODE_FLAG_PHSYNC)
++ output_dtd.part2.dtd_flags |= 0x2;
++ if (mode->flags & DRM_MODE_FLAG_PVSYNC)
++ output_dtd.part2.dtd_flags |= 0x4;
++
++ output_dtd.part2.sdvo_flags = 0;
++ output_dtd.part2.v_sync_off_high = v_sync_offset & 0xc0;
++ output_dtd.part2.reserved = 0;
++
++ /* Set the output timing to the screen */
++ psb_intel_sdvo_set_target_output(psb_intel_output,
++ sdvo_priv->active_outputs);
++ psb_intel_sdvo_set_output_timing(psb_intel_output, &output_dtd);
++
++ /* Set the input timing to the screen. Assume always input 0. */
++ psb_intel_sdvo_set_target_input(psb_intel_output, true, false);
++
++ /* We would like to use i830_sdvo_create_preferred_input_timing() to
++ * provide the device with a timing it can support, if it supports that
++ * feature. However, presumably we would need to adjust the CRTC to
++ * output the preferred timing, and we don't support that currently.
++ */
++#if 0
++ success =
++ psb_intel_sdvo_create_preferred_input_timing(psb_intel_output, clock,
++ width, height);
++ if (success) {
++ struct psb_intel_sdvo_dtd *input_dtd;
++
++ psb_intel_sdvo_get_preferred_input_timing(psb_intel_output,
++ &input_dtd);
++ psb_intel_sdvo_set_input_timing(psb_intel_output, &input_dtd);
++ }
++#else
++ psb_intel_sdvo_set_input_timing(psb_intel_output, &output_dtd);
++#endif
++
++ switch (psb_intel_sdvo_get_pixel_multiplier(mode)) {
++ case 1:
++ psb_intel_sdvo_set_clock_rate_mult(psb_intel_output,
++ SDVO_CLOCK_RATE_MULT_1X);
++ break;
++ case 2:
++ psb_intel_sdvo_set_clock_rate_mult(psb_intel_output,
++ SDVO_CLOCK_RATE_MULT_2X);
++ break;
++ case 4:
++ psb_intel_sdvo_set_clock_rate_mult(psb_intel_output,
++ SDVO_CLOCK_RATE_MULT_4X);
++ break;
++ }
++
++ /* Set the SDVO control regs. */
++ if (0 /*IS_I965GM(dev) */) {
++ sdvox = SDVO_BORDER_ENABLE;
++ } else {
++ sdvox = REG_READ(sdvo_priv->output_device);
++ switch (sdvo_priv->output_device) {
++ case SDVOB:
++ sdvox &= SDVOB_PRESERVE_MASK;
++ break;
++ case SDVOC:
++ sdvox &= SDVOC_PRESERVE_MASK;
++ break;
++ }
++ sdvox |= (9 << 19) | SDVO_BORDER_ENABLE;
++ }
++ if (psb_intel_crtc->pipe == 1)
++ sdvox |= SDVO_PIPE_B_SELECT;
++
++ sdvo_pixel_multiply = psb_intel_sdvo_get_pixel_multiplier(mode);
++ if (IS_I965G(dev)) {
++ /* done in crtc_mode_set as the dpll_md reg must be written
++ * early */
++ } else if (IS_I945G(dev) || IS_I945GM(dev)) {
++ /* done in crtc_mode_set as it lives inside the
++ * dpll register */
++ } else {
++ sdvox |=
++ (sdvo_pixel_multiply - 1) << SDVO_PORT_MULTIPLY_SHIFT;
++ }
++
++ psb_intel_sdvo_write_sdvox(psb_intel_output, sdvox);
++}
++
++static void psb_intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
++{
++ struct drm_device *dev = encoder->dev;
++ struct psb_intel_output *psb_intel_output = enc_to_psb_intel_output(encoder);
++ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
++ u32 temp;
++
++ if (mode != DRM_MODE_DPMS_ON) {
++ psb_intel_sdvo_set_active_outputs(psb_intel_output, 0);
++ if (0)
++ psb_intel_sdvo_set_encoder_power_state(psb_intel_output,
++ mode);
++
++ if (mode == DRM_MODE_DPMS_OFF) {
++ temp = REG_READ(sdvo_priv->output_device);
++ if ((temp & SDVO_ENABLE) != 0) {
++ psb_intel_sdvo_write_sdvox(psb_intel_output,
++ temp &
++ ~SDVO_ENABLE);
++ }
++ }
++ } else {
++ bool input1, input2;
++ int i;
++ u8 status;
++
++ temp = REG_READ(sdvo_priv->output_device);
++ if ((temp & SDVO_ENABLE) == 0)
++ psb_intel_sdvo_write_sdvox(psb_intel_output,
++ temp | SDVO_ENABLE);
++ for (i = 0; i < 2; i++)
++ psb_intel_wait_for_vblank(dev);
++
++ status =
++ psb_intel_sdvo_get_trained_inputs(psb_intel_output, &input1,
++ &input2);
++
++
++ /* Warn if the device reported failure to sync.
++ * A lot of SDVO devices fail to notify of sync, but it's
++ * a given it the status is a success, we succeeded.
++ */
++ if (status == SDVO_CMD_STATUS_SUCCESS && !input1) {
++ DRM_DEBUG
++ ("First %s output reported failure to sync\n",
++ SDVO_NAME(sdvo_priv));
++ }
++
++ if (0)
++ psb_intel_sdvo_set_encoder_power_state(psb_intel_output,
++ mode);
++ psb_intel_sdvo_set_active_outputs(psb_intel_output,
++ sdvo_priv->active_outputs);
++ }
++ return;
++}
++
++static void psb_intel_sdvo_save(struct drm_connector *connector)
++{
++ struct drm_device *dev = connector->dev;
++ struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector);
++ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
++ int o;
++
++ sdvo_priv->save_sdvo_mult =
++ psb_intel_sdvo_get_clock_rate_mult(psb_intel_output);
++ psb_intel_sdvo_get_active_outputs(psb_intel_output,
++ &sdvo_priv->save_active_outputs);
++
++ if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) {
++ psb_intel_sdvo_set_target_input(psb_intel_output, true, false);
++ psb_intel_sdvo_get_input_timing(psb_intel_output,
++ &sdvo_priv->save_input_dtd_1);
++ }
++
++ if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) {
++ psb_intel_sdvo_set_target_input(psb_intel_output, false, true);
++ psb_intel_sdvo_get_input_timing(psb_intel_output,
++ &sdvo_priv->save_input_dtd_2);
++ }
++
++ for (o = SDVO_OUTPUT_FIRST; o <= SDVO_OUTPUT_LAST; o++) {
++ u16 this_output = (1 << o);
++ if (sdvo_priv->caps.output_flags & this_output) {
++ psb_intel_sdvo_set_target_output(psb_intel_output,
++ this_output);
++ psb_intel_sdvo_get_output_timing(psb_intel_output,
++ &sdvo_priv->
++ save_output_dtd[o]);
++ }
++ }
++
++ sdvo_priv->save_SDVOX = REG_READ(sdvo_priv->output_device);
++}
++
++static void psb_intel_sdvo_restore(struct drm_connector *connector)
++{
++ struct drm_device *dev = connector->dev;
++ struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector);
++ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
++ int o;
++ int i;
++ bool input1, input2;
++ u8 status;
++
++ psb_intel_sdvo_set_active_outputs(psb_intel_output, 0);
++
++ for (o = SDVO_OUTPUT_FIRST; o <= SDVO_OUTPUT_LAST; o++) {
++ u16 this_output = (1 << o);
++ if (sdvo_priv->caps.output_flags & this_output) {
++ psb_intel_sdvo_set_target_output(psb_intel_output,
++ this_output);
++ psb_intel_sdvo_set_output_timing(psb_intel_output,
++ &sdvo_priv->
++ save_output_dtd[o]);
++ }
++ }
++
++ if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) {
++ psb_intel_sdvo_set_target_input(psb_intel_output, true, false);
++ psb_intel_sdvo_set_input_timing(psb_intel_output,
++ &sdvo_priv->save_input_dtd_1);
++ }
++
++ if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) {
++ psb_intel_sdvo_set_target_input(psb_intel_output, false, true);
++ psb_intel_sdvo_set_input_timing(psb_intel_output,
++ &sdvo_priv->save_input_dtd_2);
++ }
++
++ psb_intel_sdvo_set_clock_rate_mult(psb_intel_output,
++ sdvo_priv->save_sdvo_mult);
++
++ REG_WRITE(sdvo_priv->output_device, sdvo_priv->save_SDVOX);
++
++ if (sdvo_priv->save_SDVOX & SDVO_ENABLE) {
++ for (i = 0; i < 2; i++)
++ psb_intel_wait_for_vblank(dev);
++ status =
++ psb_intel_sdvo_get_trained_inputs(psb_intel_output, &input1,
++ &input2);
++ if (status == SDVO_CMD_STATUS_SUCCESS && !input1)
++ DRM_DEBUG
++ ("First %s output reported failure to sync\n",
++ SDVO_NAME(sdvo_priv));
++ }
++
++ psb_intel_sdvo_set_active_outputs(psb_intel_output,
++ sdvo_priv->save_active_outputs);
++}
++
++static int psb_intel_sdvo_mode_valid(struct drm_connector *connector,
++ struct drm_display_mode *mode)
++{
++ struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector);
++ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
++
++ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
++ return MODE_NO_DBLESCAN;
++
++ if (sdvo_priv->pixel_clock_min > mode->clock)
++ return MODE_CLOCK_LOW;
++
++ if (sdvo_priv->pixel_clock_max < mode->clock)
++ return MODE_CLOCK_HIGH;
++
++ return MODE_OK;
++}
++
++static bool psb_intel_sdvo_get_capabilities(struct psb_intel_output *psb_intel_output,
++ struct psb_intel_sdvo_caps *caps)
++{
++ u8 status;
++
++ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_DEVICE_CAPS, NULL,
++ 0);
++ status =
++ psb_intel_sdvo_read_response(psb_intel_output, caps, sizeof(*caps));
++ if (status != SDVO_CMD_STATUS_SUCCESS)
++ return false;
++
++ return true;
++}
++
++struct drm_connector *psb_intel_sdvo_find(struct drm_device *dev, int sdvoB)
++{
++ struct drm_connector *connector = NULL;
++ struct psb_intel_output *iout = NULL;
++ struct psb_intel_sdvo_priv *sdvo;
++
++ /* find the sdvo connector */
++ list_for_each_entry(connector, &dev->mode_config.connector_list,
++ head) {
++ iout = to_psb_intel_output(connector);
++
++ if (iout->type != INTEL_OUTPUT_SDVO)
++ continue;
++
++ sdvo = iout->dev_priv;
++
++ if (sdvo->output_device == SDVOB && sdvoB)
++ return connector;
++
++ if (sdvo->output_device == SDVOC && !sdvoB)
++ return connector;
++
++ }
++
++ return NULL;
++}
++
++int psb_intel_sdvo_supports_hotplug(struct drm_connector *connector)
++{
++ u8 response[2];
++ u8 status;
++ struct psb_intel_output *psb_intel_output;
++ DRM_DEBUG("\n");
++
++ if (!connector)
++ return 0;
++
++ psb_intel_output = to_psb_intel_output(connector);
++
++ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_HOT_PLUG_SUPPORT,
++ NULL, 0);
++ status = psb_intel_sdvo_read_response(psb_intel_output, &response, 2);
++
++ if (response[0] != 0)
++ return 1;
++
++ return 0;
++}
++
++void psb_intel_sdvo_set_hotplug(struct drm_connector *connector, int on)
++{
++ u8 response[2];
++ u8 status;
++ struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector);
++
++ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_ACTIVE_HOT_PLUG,
++ NULL, 0);
++ psb_intel_sdvo_read_response(psb_intel_output, &response, 2);
++
++ if (on) {
++ psb_intel_sdvo_write_cmd(psb_intel_output,
++ SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL,
++ 0);
++ status =
++ psb_intel_sdvo_read_response(psb_intel_output, &response, 2);
++
++ psb_intel_sdvo_write_cmd(psb_intel_output,
++ SDVO_CMD_SET_ACTIVE_HOT_PLUG,
++ &response, 2);
++ } else {
++ response[0] = 0;
++ response[1] = 0;
++ psb_intel_sdvo_write_cmd(psb_intel_output,
++ SDVO_CMD_SET_ACTIVE_HOT_PLUG,
++ &response, 2);
++ }
++
++ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_ACTIVE_HOT_PLUG,
++ NULL, 0);
++ psb_intel_sdvo_read_response(psb_intel_output, &response, 2);
++}
++
++static enum drm_connector_status psb_intel_sdvo_detect(struct drm_connector
++ *connector)
++{
++ u8 response[2];
++ u8 status;
++ struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector);
++
++ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_ATTACHED_DISPLAYS,
++ NULL, 0);
++ status = psb_intel_sdvo_read_response(psb_intel_output, &response, 2);
++
++ DRM_DEBUG("SDVO response %d %d\n", response[0], response[1]);
++ if ((response[0] != 0) || (response[1] != 0))
++ return connector_status_connected;
++ else
++ return connector_status_disconnected;
++}
++
++static int psb_intel_sdvo_get_modes(struct drm_connector *connector)
++{
++ struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector);
++
++ /* set the bus switch and get the modes */
++ psb_intel_sdvo_set_control_bus_switch(psb_intel_output,
++ SDVO_CONTROL_BUS_DDC2);
++ psb_intel_ddc_get_modes(psb_intel_output);
++
++ if (list_empty(&connector->probed_modes))
++ return 0;
++ return 1;
++#if 0
++ /* Mac mini hack. On this device, I get DDC through the analog, which
++ * load-detects as disconnected. I fail to DDC through the SDVO DDC,
++ * but it does load-detect as connected. So, just steal the DDC bits
++ * from analog when we fail at finding it the right way.
++ */
++ /* TODO */
++ return NULL;
++
++ return NULL;
++#endif
++}
++
++static void psb_intel_sdvo_destroy(struct drm_connector *connector)
++{
++ struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector);
++
++ if (psb_intel_output->i2c_bus)
++ psb_intel_i2c_destroy(psb_intel_output->i2c_bus);
++ drm_sysfs_connector_remove(connector);
++ drm_connector_cleanup(connector);
++ kfree(psb_intel_output);
++}
++
++static const struct drm_encoder_helper_funcs psb_intel_sdvo_helper_funcs = {
++ .dpms = psb_intel_sdvo_dpms,
++ .mode_fixup = psb_intel_sdvo_mode_fixup,
++ .prepare = psb_intel_encoder_prepare,
++ .mode_set = psb_intel_sdvo_mode_set,
++ .commit = psb_intel_encoder_commit,
++};
++
++static const struct drm_connector_funcs psb_intel_sdvo_connector_funcs = {
++ .save = psb_intel_sdvo_save,
++ .restore = psb_intel_sdvo_restore,
++ .detect = psb_intel_sdvo_detect,
++ .fill_modes = drm_helper_probe_single_connector_modes,
++ .destroy = psb_intel_sdvo_destroy,
++};
++
++static const struct drm_connector_helper_funcs
++ psb_intel_sdvo_connector_helper_funcs = {
++ .get_modes = psb_intel_sdvo_get_modes,
++ .mode_valid = psb_intel_sdvo_mode_valid,
++ .best_encoder = psb_intel_best_encoder,
++};
++
++void psb_intel_sdvo_enc_destroy(struct drm_encoder *encoder)
++{
++ drm_encoder_cleanup(encoder);
++}
++
++static const struct drm_encoder_funcs psb_intel_sdvo_enc_funcs = {
++ .destroy = psb_intel_sdvo_enc_destroy,
++};
++
++
++void psb_intel_sdvo_init(struct drm_device *dev, int output_device)
++{
++ struct drm_connector *connector;
++ struct psb_intel_output *psb_intel_output;
++ struct psb_intel_sdvo_priv *sdvo_priv;
++ struct psb_intel_i2c_chan *i2cbus = NULL;
++ int connector_type;
++ u8 ch[0x40];
++ int i;
++ int encoder_type, output_id;
++
++ psb_intel_output =
++ kcalloc(sizeof(struct psb_intel_output) +
++ sizeof(struct psb_intel_sdvo_priv), 1, GFP_KERNEL);
++ if (!psb_intel_output)
++ return;
++
++ connector = &psb_intel_output->base;
++
++ drm_connector_init(dev, connector, &psb_intel_sdvo_connector_funcs,
++ DRM_MODE_CONNECTOR_Unknown);
++ drm_connector_helper_add(connector,
++ &psb_intel_sdvo_connector_helper_funcs);
++ sdvo_priv = (struct psb_intel_sdvo_priv *) (psb_intel_output + 1);
++ psb_intel_output->type = INTEL_OUTPUT_SDVO;
++
++ connector->interlace_allowed = 0;
++ connector->doublescan_allowed = 0;
++
++ /* setup the DDC bus. */
++ if (output_device == SDVOB)
++ i2cbus =
++ psb_intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOB");
++ else
++ i2cbus =
++ psb_intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOC");
++
++ if (!i2cbus)
++ goto err_connector;
++
++ sdvo_priv->i2c_bus = i2cbus;
++
++ if (output_device == SDVOB) {
++ output_id = 1;
++ sdvo_priv->i2c_bus->slave_addr = 0x38;
++ } else {
++ output_id = 2;
++ sdvo_priv->i2c_bus->slave_addr = 0x39;
++ }
++
++ sdvo_priv->output_device = output_device;
++ psb_intel_output->i2c_bus = i2cbus;
++ psb_intel_output->dev_priv = sdvo_priv;
++
++
++ /* Read the regs to test if we can talk to the device */
++ for (i = 0; i < 0x40; i++) {
++ if (!psb_intel_sdvo_read_byte(psb_intel_output, i, &ch[i])) {
++ DRM_DEBUG("No SDVO device found on SDVO%c\n",
++ output_device == SDVOB ? 'B' : 'C');
++ goto err_i2c;
++ }
++ }
++
++ psb_intel_sdvo_get_capabilities(psb_intel_output, &sdvo_priv->caps);
++
++ memset(&sdvo_priv->active_outputs, 0,
++ sizeof(sdvo_priv->active_outputs));
++
++ /* TODO, CVBS, SVID, YPRPB & SCART outputs. */
++ if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_RGB0) {
++ sdvo_priv->active_outputs = SDVO_OUTPUT_RGB0;
++ connector->display_info.subpixel_order =
++ SubPixelHorizontalRGB;
++ encoder_type = DRM_MODE_ENCODER_DAC;
++ connector_type = DRM_MODE_CONNECTOR_VGA;
++ } else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_RGB1) {
++ sdvo_priv->active_outputs = SDVO_OUTPUT_RGB1;
++ connector->display_info.subpixel_order =
++ SubPixelHorizontalRGB;
++ encoder_type = DRM_MODE_ENCODER_DAC;
++ connector_type = DRM_MODE_CONNECTOR_VGA;
++ } else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS0) {
++ sdvo_priv->active_outputs = SDVO_OUTPUT_TMDS0;
++ connector->display_info.subpixel_order =
++ SubPixelHorizontalRGB;
++ encoder_type = DRM_MODE_ENCODER_TMDS;
++ connector_type = DRM_MODE_CONNECTOR_DVID;
++ } else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS1) {
++ sdvo_priv->active_outputs = SDVO_OUTPUT_TMDS1;
++ connector->display_info.subpixel_order =
++ SubPixelHorizontalRGB;
++ encoder_type = DRM_MODE_ENCODER_TMDS;
++ connector_type = DRM_MODE_CONNECTOR_DVID;
++ } else {
++ unsigned char bytes[2];
++
++ memcpy(bytes, &sdvo_priv->caps.output_flags, 2);
++ DRM_DEBUG
++ ("%s: No active RGB or TMDS outputs (0x%02x%02x)\n",
++ SDVO_NAME(sdvo_priv), bytes[0], bytes[1]);
++ goto err_i2c;
++ }
++
++ drm_encoder_init(dev, &psb_intel_output->enc, &psb_intel_sdvo_enc_funcs,
++ encoder_type);
++ drm_encoder_helper_add(&psb_intel_output->enc,
++ &psb_intel_sdvo_helper_funcs);
++ connector->connector_type = connector_type;
++
++ drm_mode_connector_attach_encoder(&psb_intel_output->base,
++ &psb_intel_output->enc);
++ drm_sysfs_connector_add(connector);
++
++ /* Set the input timing to the screen. Assume always input 0. */
++ psb_intel_sdvo_set_target_input(psb_intel_output, true, false);
++
++ psb_intel_sdvo_get_input_pixel_clock_range(psb_intel_output,
++ &sdvo_priv->pixel_clock_min,
++ &sdvo_priv->
++ pixel_clock_max);
++
++
++ DRM_DEBUG("%s device VID/DID: %02X:%02X.%02X, "
++ "clock range %dMHz - %dMHz, "
++ "input 1: %c, input 2: %c, "
++ "output 1: %c, output 2: %c\n",
++ SDVO_NAME(sdvo_priv),
++ sdvo_priv->caps.vendor_id, sdvo_priv->caps.device_id,
++ sdvo_priv->caps.device_rev_id,
++ sdvo_priv->pixel_clock_min / 1000,
++ sdvo_priv->pixel_clock_max / 1000,
++ (sdvo_priv->caps.sdvo_inputs_mask & 0x1) ? 'Y' : 'N',
++ (sdvo_priv->caps.sdvo_inputs_mask & 0x2) ? 'Y' : 'N',
++ /* check currently supported outputs */
++ sdvo_priv->caps.output_flags &
++ (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_RGB0) ? 'Y' : 'N',
++ sdvo_priv->caps.output_flags &
++ (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N');
++
++ psb_intel_output->ddc_bus = i2cbus;
++
++ return;
++
++err_i2c:
++ psb_intel_i2c_destroy(psb_intel_output->i2c_bus);
++err_connector:
++ drm_connector_cleanup(connector);
++ kfree(psb_intel_output);
++
++ return;
++}
+diff -uNr a/drivers/gpu/drm/psb/psb_intel_sdvo_regs.h b/drivers/gpu/drm/psb/psb_intel_sdvo_regs.h
+--- a/drivers/gpu/drm/psb/psb_intel_sdvo_regs.h 1969-12-31 16:00:00.000000000 -0800
++++ b/drivers/gpu/drm/psb/psb_intel_sdvo_regs.h 2009-04-07 13:28:38.000000000 -0700
+@@ -0,0 +1,328 @@
++/*
++ * Copyright (c) 2008, Intel Corporation
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ * Eric Anholt <eric@anholt.net>
++ */
++
++/**
++ * @file SDVO command definitions and structures.
++ */
++
++#define SDVO_OUTPUT_FIRST (0)
++#define SDVO_OUTPUT_TMDS0 (1 << 0)
++#define SDVO_OUTPUT_RGB0 (1 << 1)
++#define SDVO_OUTPUT_CVBS0 (1 << 2)
++#define SDVO_OUTPUT_SVID0 (1 << 3)
++#define SDVO_OUTPUT_YPRPB0 (1 << 4)
++#define SDVO_OUTPUT_SCART0 (1 << 5)
++#define SDVO_OUTPUT_LVDS0 (1 << 6)
++#define SDVO_OUTPUT_TMDS1 (1 << 8)
++#define SDVO_OUTPUT_RGB1 (1 << 9)
++#define SDVO_OUTPUT_CVBS1 (1 << 10)
++#define SDVO_OUTPUT_SVID1 (1 << 11)
++#define SDVO_OUTPUT_YPRPB1 (1 << 12)
++#define SDVO_OUTPUT_SCART1 (1 << 13)
++#define SDVO_OUTPUT_LVDS1 (1 << 14)
++#define SDVO_OUTPUT_LAST (14)
++
++struct psb_intel_sdvo_caps {
++ u8 vendor_id;
++ u8 device_id;
++ u8 device_rev_id;
++ u8 sdvo_version_major;
++ u8 sdvo_version_minor;
++ unsigned int sdvo_inputs_mask:2;
++ unsigned int smooth_scaling:1;
++ unsigned int sharp_scaling:1;
++ unsigned int up_scaling:1;
++ unsigned int down_scaling:1;
++ unsigned int stall_support:1;
++ unsigned int pad:1;
++ u16 output_flags;
++} __attribute__ ((packed));
++
++/** This matches the EDID DTD structure, more or less */
++struct psb_intel_sdvo_dtd {
++ struct {
++ u16 clock; /**< pixel clock, in 10kHz units */
++ u8 h_active; /**< lower 8 bits (pixels) */
++ u8 h_blank; /**< lower 8 bits (pixels) */
++ u8 h_high; /**< upper 4 bits each h_active, h_blank */
++ u8 v_active; /**< lower 8 bits (lines) */
++ u8 v_blank; /**< lower 8 bits (lines) */
++ u8 v_high; /**< upper 4 bits each v_active, v_blank */
++ } part1;
++
++ struct {
++ u8 h_sync_off;
++ /**< lower 8 bits, from hblank start */
++ u8 h_sync_width;/**< lower 8 bits (pixels) */
++ /** lower 4 bits each vsync offset, vsync width */
++ u8 v_sync_off_width;
++ /**
++ * 2 high bits of hsync offset, 2 high bits of hsync width,
++ * bits 4-5 of vsync offset, and 2 high bits of vsync width.
++ */
++ u8 sync_off_width_high;
++ u8 dtd_flags;
++ u8 sdvo_flags;
++ /** bits 6-7 of vsync offset at bits 6-7 */
++ u8 v_sync_off_high;
++ u8 reserved;
++ } part2;
++} __attribute__ ((packed));
++
++struct psb_intel_sdvo_pixel_clock_range {
++ u16 min; /**< pixel clock, in 10kHz units */
++ u16 max; /**< pixel clock, in 10kHz units */
++} __attribute__ ((packed));
++
++struct psb_intel_sdvo_preferred_input_timing_args {
++ u16 clock;
++ u16 width;
++ u16 height;
++} __attribute__ ((packed));
++
++/* I2C registers for SDVO */
++#define SDVO_I2C_ARG_0 0x07
++#define SDVO_I2C_ARG_1 0x06
++#define SDVO_I2C_ARG_2 0x05
++#define SDVO_I2C_ARG_3 0x04
++#define SDVO_I2C_ARG_4 0x03
++#define SDVO_I2C_ARG_5 0x02
++#define SDVO_I2C_ARG_6 0x01
++#define SDVO_I2C_ARG_7 0x00
++#define SDVO_I2C_OPCODE 0x08
++#define SDVO_I2C_CMD_STATUS 0x09
++#define SDVO_I2C_RETURN_0 0x0a
++#define SDVO_I2C_RETURN_1 0x0b
++#define SDVO_I2C_RETURN_2 0x0c
++#define SDVO_I2C_RETURN_3 0x0d
++#define SDVO_I2C_RETURN_4 0x0e
++#define SDVO_I2C_RETURN_5 0x0f
++#define SDVO_I2C_RETURN_6 0x10
++#define SDVO_I2C_RETURN_7 0x11
++#define SDVO_I2C_VENDOR_BEGIN 0x20
++
++/* Status results */
++#define SDVO_CMD_STATUS_POWER_ON 0x0
++#define SDVO_CMD_STATUS_SUCCESS 0x1
++#define SDVO_CMD_STATUS_NOTSUPP 0x2
++#define SDVO_CMD_STATUS_INVALID_ARG 0x3
++#define SDVO_CMD_STATUS_PENDING 0x4
++#define SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED 0x5
++#define SDVO_CMD_STATUS_SCALING_NOT_SUPP 0x6
++
++/* SDVO commands, argument/result registers */
++
++#define SDVO_CMD_RESET 0x01
++
++/** Returns a struct psb_intel_sdvo_caps */
++#define SDVO_CMD_GET_DEVICE_CAPS 0x02
++
++#define SDVO_CMD_GET_FIRMWARE_REV 0x86
++# define SDVO_DEVICE_FIRMWARE_MINOR SDVO_I2C_RETURN_0
++# define SDVO_DEVICE_FIRMWARE_MAJOR SDVO_I2C_RETURN_1
++# define SDVO_DEVICE_FIRMWARE_PATCH SDVO_I2C_RETURN_2
++
++/**
++ * Reports which inputs are trained (managed to sync).
++ *
++ * Devices must have trained within 2 vsyncs of a mode change.
++ */
++#define SDVO_CMD_GET_TRAINED_INPUTS 0x03
++struct psb_intel_sdvo_get_trained_inputs_response {
++ unsigned int input0_trained:1;
++ unsigned int input1_trained:1;
++ unsigned int pad:6;
++} __attribute__ ((packed));
++
++/** Returns a struct psb_intel_sdvo_output_flags of active outputs. */
++#define SDVO_CMD_GET_ACTIVE_OUTPUTS 0x04
++
++/**
++ * Sets the current set of active outputs.
++ *
++ * Takes a struct psb_intel_sdvo_output_flags. Must be preceded by a SET_IN_OUT_MAP
++ * on multi-output devices.
++ */
++#define SDVO_CMD_SET_ACTIVE_OUTPUTS 0x05
++
++/**
++ * Returns the current mapping of SDVO inputs to outputs on the device.
++ *
++ * Returns two struct psb_intel_sdvo_output_flags structures.
++ */
++#define SDVO_CMD_GET_IN_OUT_MAP 0x06
++
++/**
++ * Sets the current mapping of SDVO inputs to outputs on the device.
++ *
++ * Takes two struct i380_sdvo_output_flags structures.
++ */
++#define SDVO_CMD_SET_IN_OUT_MAP 0x07
++
++/**
++ * Returns a struct psb_intel_sdvo_output_flags of attached displays.
++ */
++#define SDVO_CMD_GET_ATTACHED_DISPLAYS 0x0b
++
++/**
++ * Returns a struct psb_intel_sdvo_ouptut_flags of displays supporting hot plugging.
++ */
++#define SDVO_CMD_GET_HOT_PLUG_SUPPORT 0x0c
++
++/**
++ * Takes a struct psb_intel_sdvo_output_flags.
++ */
++#define SDVO_CMD_SET_ACTIVE_HOT_PLUG 0x0d
++
++/**
++ * Returns a struct psb_intel_sdvo_output_flags of displays with hot plug
++ * interrupts enabled.
++ */
++#define SDVO_CMD_GET_ACTIVE_HOT_PLUG 0x0e
++
++#define SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE 0x0f
++struct psb_intel_sdvo_get_interrupt_event_source_response {
++ u16 interrupt_status;
++ unsigned int ambient_light_interrupt:1;
++ unsigned int pad:7;
++} __attribute__ ((packed));
++
++/**
++ * Selects which input is affected by future input commands.
++ *
++ * Commands affected include SET_INPUT_TIMINGS_PART[12],
++ * GET_INPUT_TIMINGS_PART[12], GET_PREFERRED_INPUT_TIMINGS_PART[12],
++ * GET_INPUT_PIXEL_CLOCK_RANGE, and CREATE_PREFERRED_INPUT_TIMINGS.
++ */
++#define SDVO_CMD_SET_TARGET_INPUT 0x10
++struct psb_intel_sdvo_set_target_input_args {
++ unsigned int target_1:1;
++ unsigned int pad:7;
++} __attribute__ ((packed));
++
++/**
++ * Takes a struct psb_intel_sdvo_output_flags of which outputs are targetted by
++ * future output commands.
++ *
++ * Affected commands inclue SET_OUTPUT_TIMINGS_PART[12],
++ * GET_OUTPUT_TIMINGS_PART[12], and GET_OUTPUT_PIXEL_CLOCK_RANGE.
++ */
++#define SDVO_CMD_SET_TARGET_OUTPUT 0x11
++
++#define SDVO_CMD_GET_INPUT_TIMINGS_PART1 0x12
++#define SDVO_CMD_GET_INPUT_TIMINGS_PART2 0x13
++#define SDVO_CMD_SET_INPUT_TIMINGS_PART1 0x14
++#define SDVO_CMD_SET_INPUT_TIMINGS_PART2 0x15
++#define SDVO_CMD_SET_OUTPUT_TIMINGS_PART1 0x16
++#define SDVO_CMD_SET_OUTPUT_TIMINGS_PART2 0x17
++#define SDVO_CMD_GET_OUTPUT_TIMINGS_PART1 0x18
++#define SDVO_CMD_GET_OUTPUT_TIMINGS_PART2 0x19
++/* Part 1 */
++# define SDVO_DTD_CLOCK_LOW SDVO_I2C_ARG_0
++# define SDVO_DTD_CLOCK_HIGH SDVO_I2C_ARG_1
++# define SDVO_DTD_H_ACTIVE SDVO_I2C_ARG_2
++# define SDVO_DTD_H_BLANK SDVO_I2C_ARG_3
++# define SDVO_DTD_H_HIGH SDVO_I2C_ARG_4
++# define SDVO_DTD_V_ACTIVE SDVO_I2C_ARG_5
++# define SDVO_DTD_V_BLANK SDVO_I2C_ARG_6
++# define SDVO_DTD_V_HIGH SDVO_I2C_ARG_7
++/* Part 2 */
++# define SDVO_DTD_HSYNC_OFF SDVO_I2C_ARG_0
++# define SDVO_DTD_HSYNC_WIDTH SDVO_I2C_ARG_1
++# define SDVO_DTD_VSYNC_OFF_WIDTH SDVO_I2C_ARG_2
++# define SDVO_DTD_SYNC_OFF_WIDTH_HIGH SDVO_I2C_ARG_3
++# define SDVO_DTD_DTD_FLAGS SDVO_I2C_ARG_4
++# define SDVO_DTD_DTD_FLAG_INTERLACED (1 << 7)
++# define SDVO_DTD_DTD_FLAG_STEREO_MASK (3 << 5)
++# define SDVO_DTD_DTD_FLAG_INPUT_MASK (3 << 3)
++# define SDVO_DTD_DTD_FLAG_SYNC_MASK (3 << 1)
++# define SDVO_DTD_SDVO_FLAS SDVO_I2C_ARG_5
++# define SDVO_DTD_SDVO_FLAG_STALL (1 << 7)
++# define SDVO_DTD_SDVO_FLAG_CENTERED (0 << 6)
++# define SDVO_DTD_SDVO_FLAG_UPPER_LEFT (1 << 6)
++# define SDVO_DTD_SDVO_FLAG_SCALING_MASK (3 << 4)
++# define SDVO_DTD_SDVO_FLAG_SCALING_NONE (0 << 4)
++# define SDVO_DTD_SDVO_FLAG_SCALING_SHARP (1 << 4)
++# define SDVO_DTD_SDVO_FLAG_SCALING_SMOOTH (2 << 4)
++# define SDVO_DTD_VSYNC_OFF_HIGH SDVO_I2C_ARG_6
++
++/**
++ * Generates a DTD based on the given width, height, and flags.
++ *
++ * This will be supported by any device supporting scaling or interlaced
++ * modes.
++ */
++#define SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING 0x1a
++# define SDVO_PREFERRED_INPUT_TIMING_CLOCK_LOW SDVO_I2C_ARG_0
++# define SDVO_PREFERRED_INPUT_TIMING_CLOCK_HIGH SDVO_I2C_ARG_1
++# define SDVO_PREFERRED_INPUT_TIMING_WIDTH_LOW SDVO_I2C_ARG_2
++# define SDVO_PREFERRED_INPUT_TIMING_WIDTH_HIGH SDVO_I2C_ARG_3
++# define SDVO_PREFERRED_INPUT_TIMING_HEIGHT_LOW SDVO_I2C_ARG_4
++# define SDVO_PREFERRED_INPUT_TIMING_HEIGHT_HIGH SDVO_I2C_ARG_5
++# define SDVO_PREFERRED_INPUT_TIMING_FLAGS SDVO_I2C_ARG_6
++# define SDVO_PREFERRED_INPUT_TIMING_FLAGS_INTERLACED (1 << 0)
++# define SDVO_PREFERRED_INPUT_TIMING_FLAGS_SCALED (1 << 1)
++
++#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1 0x1b
++#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2 0x1c
++
++/** Returns a struct psb_intel_sdvo_pixel_clock_range */
++#define SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE 0x1d
++/** Returns a struct psb_intel_sdvo_pixel_clock_range */
++#define SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE 0x1e
++
++/** Returns a byte bitfield containing SDVO_CLOCK_RATE_MULT_* flags */
++#define SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS 0x1f
++
++/** Returns a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
++#define SDVO_CMD_GET_CLOCK_RATE_MULT 0x20
++/** Takes a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
++#define SDVO_CMD_SET_CLOCK_RATE_MULT 0x21
++# define SDVO_CLOCK_RATE_MULT_1X (1 << 0)
++# define SDVO_CLOCK_RATE_MULT_2X (1 << 1)
++# define SDVO_CLOCK_RATE_MULT_4X (1 << 3)
++
++#define SDVO_CMD_GET_SUPPORTED_TV_FORMATS 0x27
++
++#define SDVO_CMD_GET_TV_FORMAT 0x28
++
++#define SDVO_CMD_SET_TV_FORMAT 0x29
++
++#define SDVO_CMD_GET_SUPPORTED_POWER_STATES 0x2a
++#define SDVO_CMD_GET_ENCODER_POWER_STATE 0x2b
++#define SDVO_CMD_SET_ENCODER_POWER_STATE 0x2c
++# define SDVO_ENCODER_STATE_ON (1 << 0)
++# define SDVO_ENCODER_STATE_STANDBY (1 << 1)
++# define SDVO_ENCODER_STATE_SUSPEND (1 << 2)
++# define SDVO_ENCODER_STATE_OFF (1 << 3)
++
++#define SDVO_CMD_SET_TV_RESOLUTION_SUPPORT 0x93
++
++#define SDVO_CMD_SET_CONTROL_BUS_SWITCH 0x7a
++# define SDVO_CONTROL_BUS_PROM 0x0
++# define SDVO_CONTROL_BUS_DDC1 0x1
++# define SDVO_CONTROL_BUS_DDC2 0x2
++# define SDVO_CONTROL_BUS_DDC3 0x3
+diff -uNr a/drivers/gpu/drm/psb/psb_irq.c b/drivers/gpu/drm/psb/psb_irq.c
+--- a/drivers/gpu/drm/psb/psb_irq.c 1969-12-31 16:00:00.000000000 -0800
++++ b/drivers/gpu/drm/psb/psb_irq.c 2009-04-07 13:28:38.000000000 -0700
+@@ -0,0 +1,420 @@
++/**************************************************************************
++ * Copyright (c) 2007, Intel Corporation.
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
++ * develop this driver.
++ *
++ **************************************************************************/
++/*
++ */
++
++#include <drm/drmP.h>
++#include "psb_drv.h"
++#include "psb_reg.h"
++#include "psb_msvdx.h"
++#include "lnc_topaz.h"
++
++/*
++ * Video display controller interrupt.
++ */
++
++static void psb_vdc_interrupt(struct drm_device *dev, uint32_t vdc_stat)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++ int wake = 0;
++
++ if (!drm_psb_disable_vsync && (vdc_stat & _PSB_VSYNC_PIPEA_FLAG)) {
++#ifdef PSB_FIXME
++ atomic_inc(&dev->vbl_received);
++#endif
++ wake = 1;
++ PSB_WVDC32(_PSB_VBLANK_INTERRUPT_ENABLE |
++ _PSB_VBLANK_CLEAR, PSB_PIPEASTAT);
++ }
++
++ if (!drm_psb_disable_vsync && (vdc_stat & _PSB_VSYNC_PIPEB_FLAG)) {
++#ifdef PSB_FIXME
++ atomic_inc(&dev->vbl_received2);
++#endif
++ wake = 1;
++ PSB_WVDC32(_PSB_VBLANK_INTERRUPT_ENABLE |
++ _PSB_VBLANK_CLEAR, PSB_PIPEBSTAT);
++ }
++
++ PSB_WVDC32(vdc_stat, PSB_INT_IDENTITY_R);
++ (void) PSB_RVDC32(PSB_INT_IDENTITY_R);
++ DRM_READMEMORYBARRIER();
++
++#ifdef PSB_FIXME
++ if (wake) {
++ DRM_WAKEUP(&dev->vbl_queue);
++ drm_vbl_send_signals(dev);
++ }
++#endif
++}
++
++/*
++ * SGX interrupt source 1.
++ */
++
++static void psb_sgx_interrupt(struct drm_device *dev, uint32_t sgx_stat,
++ uint32_t sgx_stat2)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++
++ if (sgx_stat & _PSB_CE_TWOD_COMPLETE) {
++ DRM_WAKEUP(&dev_priv->event_2d_queue);
++ psb_fence_handler(dev, PSB_ENGINE_2D);
++ }
++
++ if (unlikely(sgx_stat2 & _PSB_CE2_BIF_REQUESTER_FAULT))
++ psb_print_pagefault(dev_priv);
++
++ psb_scheduler_handler(dev_priv, sgx_stat);
++}
++
++/*
++ * MSVDX interrupt.
++ */
++static void psb_msvdx_interrupt(struct drm_device *dev,
++ uint32_t msvdx_stat)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++
++ if (msvdx_stat & MSVDX_INTERRUPT_STATUS_CR_MMU_FAULT_IRQ_MASK) {
++ /*Ideally we should we should never get to this */
++ PSB_DEBUG_IRQ("MSVDX:MMU Fault:0x%x fence2_irq_on=%d\n",
++ msvdx_stat, dev_priv->fence2_irq_on);
++
++ /* Pause MMU */
++ PSB_WMSVDX32(MSVDX_MMU_CONTROL0_CR_MMU_PAUSE_MASK,
++ MSVDX_MMU_CONTROL0);
++ DRM_WRITEMEMORYBARRIER();
++
++ /* Clear this interupt bit only */
++ PSB_WMSVDX32(MSVDX_INTERRUPT_STATUS_CR_MMU_FAULT_IRQ_MASK,
++ MSVDX_INTERRUPT_CLEAR);
++ PSB_RMSVDX32(MSVDX_INTERRUPT_CLEAR);
++ DRM_READMEMORYBARRIER();
++
++ dev_priv->msvdx_needs_reset = 1;
++ } else if (msvdx_stat & MSVDX_INTERRUPT_STATUS_CR_MTX_IRQ_MASK) {
++ PSB_DEBUG_IRQ
++ ("MSVDX: msvdx_stat: 0x%x fence2_irq_on=%d(MTX)\n",
++ msvdx_stat, dev_priv->fence2_irq_on);
++
++ /* Clear all interupt bits */
++ PSB_WMSVDX32(0xffff, MSVDX_INTERRUPT_CLEAR);
++ PSB_RMSVDX32(MSVDX_INTERRUPT_CLEAR);
++ DRM_READMEMORYBARRIER();
++
++ psb_msvdx_mtx_interrupt(dev);
++ }
++}
++
++irqreturn_t psb_irq_handler(DRM_IRQ_ARGS)
++{
++ struct drm_device *dev = (struct drm_device *) arg;
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++ uint32_t vdc_stat,msvdx_int = 0, topaz_int = 0;
++ uint32_t sgx_stat = 0;
++ uint32_t sgx_stat2 = 0;
++ uint32_t sgx_int = 0;
++ int handled = 0;
++
++ spin_lock(&dev_priv->irqmask_lock);
++
++ vdc_stat = PSB_RVDC32(PSB_INT_IDENTITY_R);
++
++ if (vdc_stat & _PSB_IRQ_SGX_FLAG) {
++ PSB_DEBUG_IRQ("Got SGX interrupt\n");
++ sgx_int = 1;
++ }
++ if (vdc_stat & _PSB_IRQ_MSVDX_FLAG) {
++ PSB_DEBUG_IRQ("Got MSVDX interrupt\n");
++ msvdx_int = 1;
++ }
++
++ if (vdc_stat & _LNC_IRQ_TOPAZ_FLAG) {
++ PSB_DEBUG_IRQ("Got TOPAX interrupt\n");
++ topaz_int = 1;
++ }
++ if (sgx_int && (dev_priv->graphics_state == PSB_PWR_STATE_D0i0)) {
++ sgx_stat = PSB_RSGX32(PSB_CR_EVENT_STATUS);
++ sgx_stat2 = PSB_RSGX32(PSB_CR_EVENT_STATUS2);
++
++ sgx_stat2 &= dev_priv->sgx2_irq_mask;
++ sgx_stat &= dev_priv->sgx_irq_mask;
++ PSB_WSGX32(sgx_stat2, PSB_CR_EVENT_HOST_CLEAR2);
++ PSB_WSGX32(sgx_stat, PSB_CR_EVENT_HOST_CLEAR);
++ (void) PSB_RSGX32(PSB_CR_EVENT_HOST_CLEAR);
++ } else if (unlikely(PSB_D_PM & drm_psb_debug)) {
++ if (sgx_int)
++ PSB_DEBUG_PM("sgx int in down mode\n");
++ }
++ vdc_stat &= dev_priv->vdc_irq_mask;
++ spin_unlock(&dev_priv->irqmask_lock);
++
++ if (msvdx_int) {
++ uint32_t msvdx_stat = 0;
++
++ msvdx_stat = PSB_RMSVDX32(MSVDX_INTERRUPT_STATUS);
++ psb_msvdx_interrupt(dev, msvdx_stat);
++ handled = 1;
++ }
++
++ if (IS_MRST(dev) && topaz_int) {
++ uint32_t topaz_stat = 0;
++
++ TOPAZ_READ32(TOPAZ_CR_IMG_TOPAZ_INTSTAT,&topaz_stat);
++ lnc_topaz_interrupt (dev, topaz_stat);
++ handled = 1;
++ }
++
++ if (vdc_stat) {
++ /* MSVDX IRQ status is part of vdc_irq_mask */
++ psb_vdc_interrupt(dev, vdc_stat);
++ handled = 1;
++ }
++
++ if (sgx_stat || sgx_stat2) {
++
++ psb_sgx_interrupt(dev, sgx_stat, sgx_stat2);
++ handled = 1;
++ }
++
++ if (!handled)
++ return IRQ_NONE;
++
++
++ return IRQ_HANDLED;
++}
++
++void psb_msvdx_irq_preinstall(struct drm_psb_private *dev_priv)
++{
++ unsigned long mtx_int = 0;
++ dev_priv->vdc_irq_mask |= _PSB_IRQ_MSVDX_FLAG;
++
++ /* Clear MTX interrupt */
++ REGIO_WRITE_FIELD_LITE(mtx_int, MSVDX_INTERRUPT_STATUS, CR_MTX_IRQ,
++ 1);
++ PSB_WMSVDX32(mtx_int, MSVDX_INTERRUPT_CLEAR);
++}
++
++void psb_irq_preinstall(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++ unsigned long mtx_int = 0;
++ unsigned long irqflags;
++ PSB_DEBUG_PM("psb_irq_preinstall\n");
++
++ down_read(&dev_priv->sgx_sem);
++ psb_check_power_state(dev, PSB_DEVICE_SGX);
++ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
++
++ PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
++ PSB_WVDC32(0x00000000, PSB_INT_MASK_R);
++ PSB_WVDC32(0x00000000, PSB_INT_ENABLE_R);
++ PSB_WSGX32(0x00000000, PSB_CR_EVENT_HOST_ENABLE);
++ (void) PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE);
++
++ dev_priv->sgx_irq_mask = _PSB_CE_PIXELBE_END_RENDER |
++ _PSB_CE_DPM_3D_MEM_FREE |
++ _PSB_CE_TA_FINISHED |
++ _PSB_CE_DPM_REACHED_MEM_THRESH |
++ _PSB_CE_DPM_OUT_OF_MEMORY_GBL |
++ _PSB_CE_DPM_OUT_OF_MEMORY_MT |
++ _PSB_CE_TA_TERMINATE | _PSB_CE_SW_EVENT;
++
++ dev_priv->sgx2_irq_mask = _PSB_CE2_BIF_REQUESTER_FAULT;
++
++ dev_priv->vdc_irq_mask = _PSB_IRQ_SGX_FLAG | _PSB_IRQ_MSVDX_FLAG;
++
++ if (!drm_psb_disable_vsync)
++ dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG |
++ _PSB_VSYNC_PIPEB_FLAG;
++
++ /* Clear MTX interrupt */
++ REGIO_WRITE_FIELD_LITE(mtx_int, MSVDX_INTERRUPT_STATUS,
++ CR_MTX_IRQ, 1);
++ PSB_WMSVDX32(mtx_int, MSVDX_INTERRUPT_CLEAR);
++
++ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
++ up_read(&dev_priv->sgx_sem);
++}
++
++void psb_msvdx_irq_postinstall(struct drm_psb_private *dev_priv)
++{
++ /* Enable Mtx Interupt to host */
++ unsigned long enables = 0;
++ PSB_DEBUG_GENERAL("Setting up MSVDX IRQs.....\n");
++ REGIO_WRITE_FIELD_LITE(enables, MSVDX_INTERRUPT_STATUS, CR_MTX_IRQ,
++ 1);
++ PSB_WMSVDX32(enables, MSVDX_HOST_INTERRUPT_ENABLE);
++}
++
++int psb_irq_postinstall(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++ unsigned long irqflags;
++ unsigned long enables = 0;
++
++ PSB_DEBUG_PM("psb_irq_postinstall\n");
++ down_read(&dev_priv->sgx_sem);
++ psb_check_power_state(dev, PSB_DEVICE_SGX);
++ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
++
++ PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
++ PSB_WSGX32(dev_priv->sgx2_irq_mask, PSB_CR_EVENT_HOST_ENABLE2);
++ PSB_WSGX32(dev_priv->sgx_irq_mask, PSB_CR_EVENT_HOST_ENABLE);
++ (void) PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE);
++
++ /* MSVDX IRQ Setup, Enable Mtx Interupt to host */
++ PSB_DEBUG_GENERAL("Setting up MSVDX IRQs.....\n");
++ REGIO_WRITE_FIELD_LITE(enables, MSVDX_INTERRUPT_STATUS,
++ CR_MTX_IRQ, 1);
++ PSB_WMSVDX32(enables, MSVDX_HOST_INTERRUPT_ENABLE);
++
++ dev_priv->irq_enabled = 1;
++ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
++ up_read(&dev_priv->sgx_sem);
++ return 0;
++}
++
++void psb_irq_uninstall(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++ unsigned long irqflags;
++ PSB_DEBUG_PM("psb_irq_uninstall\n");
++ down_read(&dev_priv->sgx_sem);
++ psb_check_power_state(dev, PSB_DEVICE_SGX);
++ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
++
++ dev_priv->sgx_irq_mask = 0x00000000;
++ dev_priv->sgx2_irq_mask = 0x00000000;
++ dev_priv->vdc_irq_mask = 0x00000000;
++
++ PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
++ PSB_WVDC32(0xFFFFFFFF, PSB_INT_MASK_R);
++ PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
++ PSB_WSGX32(dev_priv->sgx_irq_mask, PSB_CR_EVENT_HOST_ENABLE);
++ PSB_WSGX32(dev_priv->sgx2_irq_mask, PSB_CR_EVENT_HOST_ENABLE2);
++ wmb();
++ PSB_WVDC32(PSB_RVDC32(PSB_INT_IDENTITY_R), PSB_INT_IDENTITY_R);
++ PSB_WSGX32(PSB_RSGX32(PSB_CR_EVENT_STATUS),
++ PSB_CR_EVENT_HOST_CLEAR);
++ PSB_WSGX32(PSB_RSGX32(PSB_CR_EVENT_STATUS2),
++ PSB_CR_EVENT_HOST_CLEAR2);
++
++ /* MSVDX IRQ Setup */
++ /* Clear interrupt enabled flag */
++ PSB_WMSVDX32(0, MSVDX_HOST_INTERRUPT_ENABLE);
++
++ if (IS_MRST(dev))
++ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_INTENAB, 0);
++
++ dev_priv->irq_enabled = 0;
++ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
++ up_read(&dev_priv->sgx_sem);
++}
++
++void psb_2D_irq_off(struct drm_psb_private *dev_priv)
++{
++ unsigned long irqflags;
++ uint32_t old_mask;
++ uint32_t cleared_mask;
++
++ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
++ --dev_priv->irqen_count_2d;
++ if (dev_priv->irq_enabled && dev_priv->irqen_count_2d == 0) {
++
++ old_mask = dev_priv->sgx_irq_mask;
++ dev_priv->sgx_irq_mask &= ~_PSB_CE_TWOD_COMPLETE;
++ PSB_WSGX32(dev_priv->sgx_irq_mask,
++ PSB_CR_EVENT_HOST_ENABLE);
++ (void) PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE);
++
++ cleared_mask =
++ (old_mask ^ dev_priv->sgx_irq_mask) & old_mask;
++ PSB_WSGX32(cleared_mask, PSB_CR_EVENT_HOST_CLEAR);
++ (void) PSB_RSGX32(PSB_CR_EVENT_HOST_CLEAR);
++ }
++ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
++}
++
++void psb_2D_irq_on(struct drm_psb_private *dev_priv)
++{
++ unsigned long irqflags;
++
++ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
++ if (dev_priv->irq_enabled && dev_priv->irqen_count_2d == 0) {
++ dev_priv->sgx_irq_mask |= _PSB_CE_TWOD_COMPLETE;
++ PSB_WSGX32(dev_priv->sgx_irq_mask,
++ PSB_CR_EVENT_HOST_ENABLE);
++ (void) PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE);
++ }
++ ++dev_priv->irqen_count_2d;
++ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
++}
++
++#ifdef PSB_FIXME
++static int psb_vblank_do_wait(struct drm_device *dev,
++ unsigned int *sequence, atomic_t *counter)
++{
++ unsigned int cur_vblank;
++ int ret = 0;
++ DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
++ (((cur_vblank = atomic_read(counter))
++ - *sequence) <= (1 << 23)));
++ *sequence = cur_vblank;
++
++ return ret;
++}
++#endif
++
++void psb_msvdx_irq_off(struct drm_psb_private *dev_priv)
++{
++ unsigned long irqflags;
++
++ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
++ if (dev_priv->irq_enabled) {
++ dev_priv->vdc_irq_mask &= ~_PSB_IRQ_MSVDX_FLAG;
++ PSB_WSGX32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
++ (void) PSB_RSGX32(PSB_INT_ENABLE_R);
++ }
++ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
++}
++
++void psb_msvdx_irq_on(struct drm_psb_private *dev_priv)
++{
++ unsigned long irqflags;
++
++ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
++ if (dev_priv->irq_enabled) {
++ dev_priv->vdc_irq_mask |= _PSB_IRQ_MSVDX_FLAG;
++ PSB_WSGX32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
++ (void) PSB_RSGX32(PSB_INT_ENABLE_R);
++ }
++ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
++}
+diff -uNr a/drivers/gpu/drm/psb/psb_mmu.c b/drivers/gpu/drm/psb/psb_mmu.c
+--- a/drivers/gpu/drm/psb/psb_mmu.c 1969-12-31 16:00:00.000000000 -0800
++++ b/drivers/gpu/drm/psb/psb_mmu.c 2009-04-07 13:28:38.000000000 -0700
+@@ -0,0 +1,1069 @@
++/**************************************************************************
++ * Copyright (c) 2007, Intel Corporation.
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
++ * develop this driver.
++ *
++ **************************************************************************/
++#include <drm/drmP.h>
++#include "psb_drv.h"
++#include "psb_reg.h"
++
++/*
++ * Code for the SGX MMU:
++ */
++
++/*
++ * clflush on one processor only:
++ * clflush should apparently flush the cache line on all processors in an
++ * SMP system.
++ */
++
++/*
++ * kmap atomic:
++ * The usage of the slots must be completely encapsulated within a spinlock, and
++ * no other functions that may be using the locks for other purposed may be
++ * called from within the locked region.
++ * Since the slots are per processor, this will guarantee that we are the only
++ * user.
++ */
++
++/*
++ * TODO: Inserting ptes from an interrupt handler:
++ * This may be desirable for some SGX functionality where the GPU can fault in
++ * needed pages. For that, we need to make an atomic insert_pages function, that
++ * may fail.
++ * If it fails, the caller need to insert the page using a workqueue function,
++ * but on average it should be fast.
++ */
++
++struct psb_mmu_driver {
++ /* protects driver- and pd structures. Always take in read mode
++ * before taking the page table spinlock.
++ */
++ struct rw_semaphore sem;
++
++ /* protects page tables, directory tables and pt tables.
++ * and pt structures.
++ */
++ spinlock_t lock;
++
++ atomic_t needs_tlbflush;
++
++ uint8_t __iomem *register_map;
++ struct psb_mmu_pd *default_pd;
++ uint32_t bif_ctrl;
++ int has_clflush;
++ int clflush_add;
++ unsigned long clflush_mask;
++
++ struct drm_psb_private *dev_priv;
++};
++
++struct psb_mmu_pd;
++
++struct psb_mmu_pt {
++ struct psb_mmu_pd *pd;
++ uint32_t index;
++ uint32_t count;
++ struct page *p;
++ uint32_t *v;
++};
++
++struct psb_mmu_pd {
++ struct psb_mmu_driver *driver;
++ int hw_context;
++ struct psb_mmu_pt **tables;
++ struct page *p;
++ struct page *dummy_pt;
++ struct page *dummy_page;
++ uint32_t pd_mask;
++ uint32_t invalid_pde;
++ uint32_t invalid_pte;
++};
++
++void topaz_mmu_flushcache(struct drm_psb_private *dev_priv);
++
++static inline uint32_t psb_mmu_pt_index(uint32_t offset)
++{
++ return (offset >> PSB_PTE_SHIFT) & 0x3FF;
++}
++
++static inline uint32_t psb_mmu_pd_index(uint32_t offset)
++{
++ return offset >> PSB_PDE_SHIFT;
++}
++
++#if defined(CONFIG_X86)
++static inline void psb_clflush(void *addr)
++{
++ __asm__ __volatile__("clflush (%0)\n" : : "r"(addr) : "memory");
++}
++
++static inline void psb_mmu_clflush(struct psb_mmu_driver *driver,
++ void *addr)
++{
++ if (!driver->has_clflush)
++ return;
++
++ mb();
++ psb_clflush(addr);
++ mb();
++}
++#else
++
++static inline void psb_mmu_clflush(struct psb_mmu_driver *driver,
++ void *addr)
++{;
++}
++
++#endif
++
++static inline void psb_iowrite32(const struct psb_mmu_driver *d,
++ uint32_t val, uint32_t offset)
++{
++ iowrite32(val, d->register_map + offset);
++}
++
++static inline uint32_t psb_ioread32(const struct psb_mmu_driver *d,
++ uint32_t offset)
++{
++ return ioread32(d->register_map + offset);
++}
++
++static void psb_mmu_flush_pd_locked(struct psb_mmu_driver *driver,
++ int force)
++{
++ if (atomic_read(&driver->needs_tlbflush) || force) {
++ uint32_t val = psb_ioread32(driver, PSB_CR_BIF_CTRL);
++ psb_iowrite32(driver, val | _PSB_CB_CTRL_INVALDC,
++ PSB_CR_BIF_CTRL);
++ wmb();
++ psb_iowrite32(driver, val & ~_PSB_CB_CTRL_INVALDC,
++ PSB_CR_BIF_CTRL);
++ (void)psb_ioread32(driver, PSB_CR_BIF_CTRL);
++ if (driver->dev_priv) {
++ atomic_set(&driver->dev_priv->msvdx_mmu_invaldc, 1);
++ if (IS_MRST(driver->dev_priv->dev))
++ topaz_mmu_flushcache(driver->dev_priv);
++ }
++ }
++ atomic_set(&driver->needs_tlbflush, 0);
++}
++
++static void psb_mmu_flush_pd(struct psb_mmu_driver *driver, int force)
++{
++ down_write(&driver->sem);
++ psb_mmu_flush_pd_locked(driver, force);
++ up_write(&driver->sem);
++}
++
++void psb_mmu_flush(struct psb_mmu_driver *driver)
++{
++ uint32_t val;
++
++ down_write(&driver->sem);
++ if (driver->dev_priv->graphics_state == PSB_PWR_STATE_D0i0) {
++ val = psb_ioread32(driver, PSB_CR_BIF_CTRL);
++ if (atomic_read(&driver->needs_tlbflush))
++ psb_iowrite32(driver, val | _PSB_CB_CTRL_INVALDC,
++ PSB_CR_BIF_CTRL);
++ else
++ psb_iowrite32(driver, val | _PSB_CB_CTRL_FLUSH,
++ PSB_CR_BIF_CTRL);
++ wmb();
++ psb_iowrite32(driver,
++ val & ~(_PSB_CB_CTRL_FLUSH | _PSB_CB_CTRL_INVALDC),
++ PSB_CR_BIF_CTRL);
++ (void) psb_ioread32(driver, PSB_CR_BIF_CTRL);
++ atomic_set(&driver->needs_tlbflush, 0);
++ } else {
++ PSB_DEBUG_PM("mmu flush when down\n");
++ }
++
++ if (driver->dev_priv) {
++ atomic_set(&driver->dev_priv->msvdx_mmu_invaldc, 1);
++ if (IS_MRST(driver->dev_priv->dev))
++ topaz_mmu_flushcache(driver->dev_priv);
++ }
++
++ up_write(&driver->sem);
++}
++
++void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context)
++{
++ uint32_t offset = (hw_context == 0) ? PSB_CR_BIF_DIR_LIST_BASE0 :
++ PSB_CR_BIF_DIR_LIST_BASE1 + hw_context * 4;
++
++ ttm_tt_cache_flush(&pd->p, 1);
++ down_write(&pd->driver->sem);
++ psb_iowrite32(pd->driver, (page_to_pfn(pd->p) << PAGE_SHIFT),
++ offset);
++ wmb();
++ psb_mmu_flush_pd_locked(pd->driver, 1);
++ pd->hw_context = hw_context;
++ up_write(&pd->driver->sem);
++
++}
++
++static inline unsigned long psb_pd_addr_end(unsigned long addr,
++ unsigned long end)
++{
++
++ addr = (addr + PSB_PDE_MASK + 1) & ~PSB_PDE_MASK;
++ return (addr < end) ? addr : end;
++}
++
++static inline uint32_t psb_mmu_mask_pte(uint32_t pfn, int type)
++{
++ uint32_t mask = PSB_PTE_VALID;
++
++ if (type & PSB_MMU_CACHED_MEMORY)
++ mask |= PSB_PTE_CACHED;
++ if (type & PSB_MMU_RO_MEMORY)
++ mask |= PSB_PTE_RO;
++ if (type & PSB_MMU_WO_MEMORY)
++ mask |= PSB_PTE_WO;
++
++ return (pfn << PAGE_SHIFT) | mask;
++}
++
++struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
++ int trap_pagefaults, int invalid_type)
++{
++ struct psb_mmu_pd *pd = kmalloc(sizeof(*pd), GFP_KERNEL);
++ uint32_t *v;
++ int i;
++
++ if (!pd)
++ return NULL;
++
++ pd->p = alloc_page(GFP_DMA32);
++ if (!pd->p)
++ goto out_err1;
++ pd->dummy_pt = alloc_page(GFP_DMA32);
++ if (!pd->dummy_pt)
++ goto out_err2;
++ pd->dummy_page = alloc_page(GFP_DMA32);
++ if (!pd->dummy_page)
++ goto out_err3;
++
++ if (!trap_pagefaults) {
++ pd->invalid_pde =
++ psb_mmu_mask_pte(page_to_pfn(pd->dummy_pt),
++ invalid_type);
++ pd->invalid_pte =
++ psb_mmu_mask_pte(page_to_pfn(pd->dummy_page),
++ invalid_type);
++ } else {
++ pd->invalid_pde = 0;
++ pd->invalid_pte = 0;
++ }
++
++ v = kmap(pd->dummy_pt);
++ for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
++ v[i] = pd->invalid_pte;
++
++ kunmap(pd->dummy_pt);
++
++ v = kmap(pd->p);
++ for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
++ v[i] = pd->invalid_pde;
++
++ kunmap(pd->p);
++
++ clear_page(kmap(pd->dummy_page));
++ kunmap(pd->dummy_page);
++
++ pd->tables = vmalloc_user(sizeof(struct psb_mmu_pt *) * 1024);
++ if (!pd->tables)
++ goto out_err4;
++
++ pd->hw_context = -1;
++ pd->pd_mask = PSB_PTE_VALID;
++ pd->driver = driver;
++
++ return pd;
++
++out_err4:
++ __free_page(pd->dummy_page);
++out_err3:
++ __free_page(pd->dummy_pt);
++out_err2:
++ __free_page(pd->p);
++out_err1:
++ kfree(pd);
++ return NULL;
++}
++
++void psb_mmu_free_pt(struct psb_mmu_pt *pt)
++{
++ __free_page(pt->p);
++ kfree(pt);
++}
++
++void psb_mmu_free_pagedir(struct psb_mmu_pd *pd)
++{
++ struct psb_mmu_driver *driver = pd->driver;
++ struct psb_mmu_pt *pt;
++ int i;
++
++ down_write(&driver->sem);
++ if (pd->hw_context != -1) {
++ psb_iowrite32(driver, 0,
++ PSB_CR_BIF_DIR_LIST_BASE0 +
++ pd->hw_context * 4);
++ psb_mmu_flush_pd_locked(driver, 1);
++ }
++
++ /* Should take the spinlock here, but we don't need to do that
++ since we have the semaphore in write mode. */
++
++ for (i = 0; i < 1024; ++i) {
++ pt = pd->tables[i];
++ if (pt)
++ psb_mmu_free_pt(pt);
++ }
++
++ vfree(pd->tables);
++ __free_page(pd->dummy_page);
++ __free_page(pd->dummy_pt);
++ __free_page(pd->p);
++ kfree(pd);
++ up_write(&driver->sem);
++}
++
++static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
++{
++ struct psb_mmu_pt *pt = kmalloc(sizeof(*pt), GFP_KERNEL);
++ void *v;
++ uint32_t clflush_add = pd->driver->clflush_add >> PAGE_SHIFT;
++ uint32_t clflush_count = PAGE_SIZE / clflush_add;
++ spinlock_t *lock = &pd->driver->lock;
++ uint8_t *clf;
++ uint32_t *ptes;
++ int i;
++
++ if (!pt)
++ return NULL;
++
++ pt->p = alloc_page(GFP_DMA32);
++ if (!pt->p) {
++ kfree(pt);
++ return NULL;
++ }
++
++ spin_lock(lock);
++
++ v = kmap_atomic(pt->p, KM_USER0);
++ clf = (uint8_t *) v;
++ ptes = (uint32_t *) v;
++ for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
++ *ptes++ = pd->invalid_pte;
++
++
++#if defined(CONFIG_X86)
++ if (pd->driver->has_clflush && pd->hw_context != -1) {
++ mb();
++ for (i = 0; i < clflush_count; ++i) {
++ psb_clflush(clf);
++ clf += clflush_add;
++ }
++ mb();
++ }
++#endif
++ kunmap_atomic(v, KM_USER0);
++ spin_unlock(lock);
++
++ pt->count = 0;
++ pt->pd = pd;
++ pt->index = 0;
++
++ return pt;
++}
++
++struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd,
++ unsigned long addr)
++{
++ uint32_t index = psb_mmu_pd_index(addr);
++ struct psb_mmu_pt *pt;
++ uint32_t *v;
++ spinlock_t *lock = &pd->driver->lock;
++
++ spin_lock(lock);
++ pt = pd->tables[index];
++ while (!pt) {
++ spin_unlock(lock);
++ pt = psb_mmu_alloc_pt(pd);
++ if (!pt)
++ return NULL;
++ spin_lock(lock);
++
++ if (pd->tables[index]) {
++ spin_unlock(lock);
++ psb_mmu_free_pt(pt);
++ spin_lock(lock);
++ pt = pd->tables[index];
++ continue;
++ }
++
++ v = kmap_atomic(pd->p, KM_USER0);
++ pd->tables[index] = pt;
++ v[index] = (page_to_pfn(pt->p) << 12) | pd->pd_mask;
++ pt->index = index;
++ kunmap_atomic((void *) v, KM_USER0);
++
++ if (pd->hw_context != -1) {
++ psb_mmu_clflush(pd->driver, (void *) &v[index]);
++ atomic_set(&pd->driver->needs_tlbflush, 1);
++ }
++ }
++ pt->v = kmap_atomic(pt->p, KM_USER0);
++ return pt;
++}
++
++static struct psb_mmu_pt *psb_mmu_pt_map_lock(struct psb_mmu_pd *pd,
++ unsigned long addr)
++{
++ uint32_t index = psb_mmu_pd_index(addr);
++ struct psb_mmu_pt *pt;
++ spinlock_t *lock = &pd->driver->lock;
++
++ spin_lock(lock);
++ pt = pd->tables[index];
++ if (!pt) {
++ spin_unlock(lock);
++ return NULL;
++ }
++ pt->v = kmap_atomic(pt->p, KM_USER0);
++ return pt;
++}
++
++static void psb_mmu_pt_unmap_unlock(struct psb_mmu_pt *pt)
++{
++ struct psb_mmu_pd *pd = pt->pd;
++ uint32_t *v;
++
++ kunmap_atomic(pt->v, KM_USER0);
++ if (pt->count == 0) {
++ v = kmap_atomic(pd->p, KM_USER0);
++ v[pt->index] = pd->invalid_pde;
++ pd->tables[pt->index] = NULL;
++
++ if (pd->hw_context != -1) {
++ psb_mmu_clflush(pd->driver,
++ (void *) &v[pt->index]);
++ atomic_set(&pd->driver->needs_tlbflush, 1);
++ }
++ kunmap_atomic(pt->v, KM_USER0);
++ spin_unlock(&pd->driver->lock);
++ psb_mmu_free_pt(pt);
++ return;
++ }
++ spin_unlock(&pd->driver->lock);
++}
++
++static inline void psb_mmu_set_pte(struct psb_mmu_pt *pt,
++ unsigned long addr, uint32_t pte)
++{
++ pt->v[psb_mmu_pt_index(addr)] = pte;
++}
++
++static inline void psb_mmu_invalidate_pte(struct psb_mmu_pt *pt,
++ unsigned long addr)
++{
++ pt->v[psb_mmu_pt_index(addr)] = pt->pd->invalid_pte;
++}
++
++#if 0
++static uint32_t psb_mmu_check_pte_locked(struct psb_mmu_pd *pd,
++ uint32_t mmu_offset)
++{
++ uint32_t *v;
++ uint32_t pfn;
++
++ v = kmap_atomic(pd->p, KM_USER0);
++ if (!v) {
++ printk(KERN_INFO "Could not kmap pde page.\n");
++ return 0;
++ }
++ pfn = v[psb_mmu_pd_index(mmu_offset)];
++ /* printk(KERN_INFO "pde is 0x%08x\n",pfn); */
++ kunmap_atomic(v, KM_USER0);
++ if (((pfn & 0x0F) != PSB_PTE_VALID)) {
++ printk(KERN_INFO "Strange pde at 0x%08x: 0x%08x.\n",
++ mmu_offset, pfn);
++ }
++ v = ioremap(pfn & 0xFFFFF000, 4096);
++ if (!v) {
++ printk(KERN_INFO "Could not kmap pte page.\n");
++ return 0;
++ }
++ pfn = v[psb_mmu_pt_index(mmu_offset)];
++ /* printk(KERN_INFO "pte is 0x%08x\n",pfn); */
++ iounmap(v);
++ if (((pfn & 0x0F) != PSB_PTE_VALID)) {
++ printk(KERN_INFO "Strange pte at 0x%08x: 0x%08x.\n",
++ mmu_offset, pfn);
++ }
++ return pfn >> PAGE_SHIFT;
++}
++
++static void psb_mmu_check_mirrored_gtt(struct psb_mmu_pd *pd,
++ uint32_t mmu_offset,
++ uint32_t gtt_pages)
++{
++ uint32_t start;
++ uint32_t next;
++
++ printk(KERN_INFO "Checking mirrored gtt 0x%08x %d\n",
++ mmu_offset, gtt_pages);
++ down_read(&pd->driver->sem);
++ start = psb_mmu_check_pte_locked(pd, mmu_offset);
++ mmu_offset += PAGE_SIZE;
++ gtt_pages -= 1;
++ while (gtt_pages--) {
++ next = psb_mmu_check_pte_locked(pd, mmu_offset);
++ if (next != start + 1) {
++ printk(KERN_INFO
++ "Ptes out of order: 0x%08x, 0x%08x.\n",
++ start, next);
++ }
++ start = next;
++ mmu_offset += PAGE_SIZE;
++ }
++ up_read(&pd->driver->sem);
++}
++
++#endif
++
++void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd,
++ uint32_t mmu_offset, uint32_t gtt_start,
++ uint32_t gtt_pages)
++{
++ uint32_t *v;
++ uint32_t start = psb_mmu_pd_index(mmu_offset);
++ struct psb_mmu_driver *driver = pd->driver;
++ int num_pages = gtt_pages;
++
++ down_read(&driver->sem);
++ spin_lock(&driver->lock);
++
++ v = kmap_atomic(pd->p, KM_USER0);
++ v += start;
++
++ while (gtt_pages--) {
++ *v++ = gtt_start | pd->pd_mask;
++ gtt_start += PAGE_SIZE;
++ }
++
++ ttm_tt_cache_flush(&pd->p, num_pages);
++ kunmap_atomic(v, KM_USER0);
++ spin_unlock(&driver->lock);
++
++ if (pd->hw_context != -1)
++ atomic_set(&pd->driver->needs_tlbflush, 1);
++
++ up_read(&pd->driver->sem);
++ psb_mmu_flush_pd(pd->driver, 0);
++}
++
++struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver *driver)
++{
++ struct psb_mmu_pd *pd;
++
++ down_read(&driver->sem);
++ pd = driver->default_pd;
++ up_read(&driver->sem);
++
++ return pd;
++}
++
++/* Returns the physical address of the PD shared by sgx/msvdx */
++uint32_t psb_get_default_pd_addr(struct psb_mmu_driver *driver)
++{
++ struct psb_mmu_pd *pd;
++
++ pd = psb_mmu_get_default_pd(driver);
++ return page_to_pfn(pd->p) << PAGE_SHIFT;
++}
++
++void psb_mmu_driver_takedown(struct psb_mmu_driver *driver)
++{
++ psb_iowrite32(driver, driver->bif_ctrl, PSB_CR_BIF_CTRL);
++ psb_mmu_free_pagedir(driver->default_pd);
++ kfree(driver);
++}
++
++struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers,
++ int trap_pagefaults,
++ int invalid_type,
++ struct drm_psb_private *dev_priv)
++{
++ struct psb_mmu_driver *driver;
++
++ driver = kmalloc(sizeof(*driver), GFP_KERNEL);
++
++ if (!driver)
++ return NULL;
++ driver->dev_priv = dev_priv;
++
++ driver->default_pd = psb_mmu_alloc_pd(driver, trap_pagefaults,
++ invalid_type);
++ if (!driver->default_pd)
++ goto out_err1;
++
++ spin_lock_init(&driver->lock);
++ init_rwsem(&driver->sem);
++ down_write(&driver->sem);
++ driver->register_map = registers;
++ atomic_set(&driver->needs_tlbflush, 1);
++
++ driver->bif_ctrl = psb_ioread32(driver, PSB_CR_BIF_CTRL);
++ psb_iowrite32(driver, driver->bif_ctrl | _PSB_CB_CTRL_CLEAR_FAULT,
++ PSB_CR_BIF_CTRL);
++ psb_iowrite32(driver, driver->bif_ctrl & ~_PSB_CB_CTRL_CLEAR_FAULT,
++ PSB_CR_BIF_CTRL);
++
++ driver->has_clflush = 0;
++
++#if defined(CONFIG_X86)
++ if (boot_cpu_has(X86_FEATURE_CLFLSH)) {
++ uint32_t tfms, misc, cap0, cap4, clflush_size;
++
++ /*
++ * clflush size is determined at kernel setup for x86_64
++ * but not for i386. We have to do it here.
++ */
++
++ cpuid(0x00000001, &tfms, &misc, &cap0, &cap4);
++ clflush_size = ((misc >> 8) & 0xff) * 8;
++ driver->has_clflush = 1;
++ driver->clflush_add =
++ PAGE_SIZE * clflush_size / sizeof(uint32_t);
++ driver->clflush_mask = driver->clflush_add - 1;
++ driver->clflush_mask = ~driver->clflush_mask;
++ }
++#endif
++
++ up_write(&driver->sem);
++ return driver;
++
++out_err1:
++ kfree(driver);
++ return NULL;
++}
++
++#if defined(CONFIG_X86)
++static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd,
++ unsigned long address, uint32_t num_pages,
++ uint32_t desired_tile_stride,
++ uint32_t hw_tile_stride)
++{
++ struct psb_mmu_pt *pt;
++ uint32_t rows = 1;
++ uint32_t i;
++ unsigned long addr;
++ unsigned long end;
++ unsigned long next;
++ unsigned long add;
++ unsigned long row_add;
++ unsigned long clflush_add = pd->driver->clflush_add;
++ unsigned long clflush_mask = pd->driver->clflush_mask;
++
++ if (!pd->driver->has_clflush) {
++ ttm_tt_cache_flush(&pd->p, num_pages);
++ return;
++ }
++
++ if (hw_tile_stride)
++ rows = num_pages / desired_tile_stride;
++ else
++ desired_tile_stride = num_pages;
++
++ add = desired_tile_stride << PAGE_SHIFT;
++ row_add = hw_tile_stride << PAGE_SHIFT;
++ mb();
++ for (i = 0; i < rows; ++i) {
++
++ addr = address;
++ end = addr + add;
++
++ do {
++ next = psb_pd_addr_end(addr, end);
++ pt = psb_mmu_pt_map_lock(pd, addr);
++ if (!pt)
++ continue;
++ do {
++ psb_clflush(&pt->v
++ [psb_mmu_pt_index(addr)]);
++ } while (addr +=
++ clflush_add,
++ (addr & clflush_mask) < next);
++
++ psb_mmu_pt_unmap_unlock(pt);
++ } while (addr = next, next != end);
++ address += row_add;
++ }
++ mb();
++}
++#else
++static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd,
++ unsigned long address, uint32_t num_pages,
++ uint32_t desired_tile_stride,
++ uint32_t hw_tile_stride)
++{
++ drm_ttm_cache_flush(&pd->p, num_pages);
++}
++#endif
++
++void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
++ unsigned long address, uint32_t num_pages)
++{
++ struct psb_mmu_pt *pt;
++ unsigned long addr;
++ unsigned long end;
++ unsigned long next;
++ unsigned long f_address = address;
++
++ down_read(&pd->driver->sem);
++
++ addr = address;
++ end = addr + (num_pages << PAGE_SHIFT);
++
++ do {
++ next = psb_pd_addr_end(addr, end);
++ pt = psb_mmu_pt_alloc_map_lock(pd, addr);
++ if (!pt)
++ goto out;
++ do {
++ psb_mmu_invalidate_pte(pt, addr);
++ --pt->count;
++ } while (addr += PAGE_SIZE, addr < next);
++ psb_mmu_pt_unmap_unlock(pt);
++
++ } while (addr = next, next != end);
++
++out:
++ if (pd->hw_context != -1)
++ psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
++
++ up_read(&pd->driver->sem);
++
++ if (pd->hw_context != -1)
++ psb_mmu_flush(pd->driver);
++
++ return;
++}
++
++void psb_mmu_remove_pages(struct psb_mmu_pd *pd, unsigned long address,
++ uint32_t num_pages, uint32_t desired_tile_stride,
++ uint32_t hw_tile_stride)
++{
++ struct psb_mmu_pt *pt;
++ uint32_t rows = 1;
++ uint32_t i;
++ unsigned long addr;
++ unsigned long end;
++ unsigned long next;
++ unsigned long add;
++ unsigned long row_add;
++ unsigned long f_address = address;
++
++ if (hw_tile_stride)
++ rows = num_pages / desired_tile_stride;
++ else
++ desired_tile_stride = num_pages;
++
++ add = desired_tile_stride << PAGE_SHIFT;
++ row_add = hw_tile_stride << PAGE_SHIFT;
++
++ down_read(&pd->driver->sem);
++
++ /* Make sure we only need to flush this processor's cache */
++
++ for (i = 0; i < rows; ++i) {
++
++ addr = address;
++ end = addr + add;
++
++ do {
++ next = psb_pd_addr_end(addr, end);
++ pt = psb_mmu_pt_map_lock(pd, addr);
++ if (!pt)
++ continue;
++ do {
++ psb_mmu_invalidate_pte(pt, addr);
++ --pt->count;
++
++ } while (addr += PAGE_SIZE, addr < next);
++ psb_mmu_pt_unmap_unlock(pt);
++
++ } while (addr = next, next != end);
++ address += row_add;
++ }
++ if (pd->hw_context != -1)
++ psb_mmu_flush_ptes(pd, f_address, num_pages,
++ desired_tile_stride, hw_tile_stride);
++
++ up_read(&pd->driver->sem);
++
++ if (pd->hw_context != -1)
++ psb_mmu_flush(pd->driver);
++}
++
++int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn,
++ unsigned long address, uint32_t num_pages,
++ int type)
++{
++ struct psb_mmu_pt *pt;
++ uint32_t pte;
++ unsigned long addr;
++ unsigned long end;
++ unsigned long next;
++ unsigned long f_address = address;
++ int ret = 0;
++
++ down_read(&pd->driver->sem);
++
++ addr = address;
++ end = addr + (num_pages << PAGE_SHIFT);
++
++ do {
++ next = psb_pd_addr_end(addr, end);
++ pt = psb_mmu_pt_alloc_map_lock(pd, addr);
++ if (!pt) {
++ ret = -ENOMEM;
++ goto out;
++ }
++ do {
++ pte = psb_mmu_mask_pte(start_pfn++, type);
++ psb_mmu_set_pte(pt, addr, pte);
++ pt->count++;
++ } while (addr += PAGE_SIZE, addr < next);
++ psb_mmu_pt_unmap_unlock(pt);
++
++ } while (addr = next, next != end);
++
++out:
++ if (pd->hw_context != -1)
++ psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
++
++ up_read(&pd->driver->sem);
++
++ if (pd->hw_context != -1)
++ psb_mmu_flush(pd->driver);
++
++ return ret;
++}
++
++int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
++ unsigned long address, uint32_t num_pages,
++ uint32_t desired_tile_stride,
++ uint32_t hw_tile_stride, int type)
++{
++ struct psb_mmu_pt *pt;
++ uint32_t rows = 1;
++ uint32_t i;
++ uint32_t pte;
++ unsigned long addr;
++ unsigned long end;
++ unsigned long next;
++ unsigned long add;
++ unsigned long row_add;
++ unsigned long f_address = address;
++ int ret = 0;
++
++ if (hw_tile_stride) {
++ if (num_pages % desired_tile_stride != 0)
++ return -EINVAL;
++ rows = num_pages / desired_tile_stride;
++ } else {
++ desired_tile_stride = num_pages;
++ }
++
++ add = desired_tile_stride << PAGE_SHIFT;
++ row_add = hw_tile_stride << PAGE_SHIFT;
++
++ down_read(&pd->driver->sem);
++
++ for (i = 0; i < rows; ++i) {
++
++ addr = address;
++ end = addr + add;
++
++ do {
++ next = psb_pd_addr_end(addr, end);
++ pt = psb_mmu_pt_alloc_map_lock(pd, addr);
++ if (!pt) {
++ ret = -ENOMEM;
++ goto out;
++ }
++ do {
++ pte =
++ psb_mmu_mask_pte(page_to_pfn(*pages++),
++ type);
++ psb_mmu_set_pte(pt, addr, pte);
++ pt->count++;
++ } while (addr += PAGE_SIZE, addr < next);
++ psb_mmu_pt_unmap_unlock(pt);
++
++ } while (addr = next, next != end);
++
++ address += row_add;
++ }
++out:
++ if (pd->hw_context != -1)
++ psb_mmu_flush_ptes(pd, f_address, num_pages,
++ desired_tile_stride, hw_tile_stride);
++
++ up_read(&pd->driver->sem);
++
++ if (pd->hw_context != -1)
++ psb_mmu_flush(pd->driver);
++
++ return ret;
++}
++
++void psb_mmu_enable_requestor(struct psb_mmu_driver *driver, uint32_t mask)
++{
++ mask &= _PSB_MMU_ER_MASK;
++ psb_iowrite32(driver,
++ psb_ioread32(driver, PSB_CR_BIF_CTRL) & ~mask,
++ PSB_CR_BIF_CTRL);
++ (void) psb_ioread32(driver, PSB_CR_BIF_CTRL);
++}
++
++void psb_mmu_disable_requestor(struct psb_mmu_driver *driver,
++ uint32_t mask)
++{
++ mask &= _PSB_MMU_ER_MASK;
++ psb_iowrite32(driver, psb_ioread32(driver, PSB_CR_BIF_CTRL) | mask,
++ PSB_CR_BIF_CTRL);
++ (void) psb_ioread32(driver, PSB_CR_BIF_CTRL);
++}
++
++int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
++ unsigned long *pfn)
++{
++ int ret;
++ struct psb_mmu_pt *pt;
++ uint32_t tmp;
++ spinlock_t *lock = &pd->driver->lock;
++
++ down_read(&pd->driver->sem);
++ pt = psb_mmu_pt_map_lock(pd, virtual);
++ if (!pt) {
++ uint32_t *v;
++
++ spin_lock(lock);
++ v = kmap_atomic(pd->p, KM_USER0);
++ tmp = v[psb_mmu_pd_index(virtual)];
++ kunmap_atomic(v, KM_USER0);
++ spin_unlock(lock);
++
++ if (tmp != pd->invalid_pde || !(tmp & PSB_PTE_VALID) ||
++ !(pd->invalid_pte & PSB_PTE_VALID)) {
++ ret = -EINVAL;
++ goto out;
++ }
++ ret = 0;
++ *pfn = pd->invalid_pte >> PAGE_SHIFT;
++ goto out;
++ }
++ tmp = pt->v[psb_mmu_pt_index(virtual)];
++ if (!(tmp & PSB_PTE_VALID)) {
++ ret = -EINVAL;
++ } else {
++ ret = 0;
++ *pfn = tmp >> PAGE_SHIFT;
++ }
++ psb_mmu_pt_unmap_unlock(pt);
++out:
++ up_read(&pd->driver->sem);
++ return ret;
++}
++
++void psb_mmu_test(struct psb_mmu_driver *driver, uint32_t offset)
++{
++ struct page *p;
++ unsigned long pfn;
++ int ret = 0;
++ struct psb_mmu_pd *pd;
++ uint32_t *v;
++ uint32_t *vmmu;
++
++ pd = driver->default_pd;
++ if (!pd)
++ printk(KERN_WARNING "Could not get default pd\n");
++
++
++ p = alloc_page(GFP_DMA32);
++
++ if (!p) {
++ printk(KERN_WARNING "Failed allocating page\n");
++ return;
++ }
++
++ v = kmap(p);
++ memset(v, 0x67, PAGE_SIZE);
++
++ pfn = (offset >> PAGE_SHIFT);
++
++ ret = psb_mmu_insert_pages(pd, &p, pfn << PAGE_SHIFT, 1, 0, 0, 0);
++ if (ret) {
++ printk(KERN_WARNING "Failed inserting mmu page\n");
++ goto out_err1;
++ }
++
++ /* Ioremap the page through the GART aperture */
++
++ vmmu = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
++ if (!vmmu) {
++ printk(KERN_WARNING "Failed ioremapping page\n");
++ goto out_err2;
++ }
++
++ /* Read from the page with mmu disabled. */
++ printk(KERN_INFO "Page first dword is 0x%08x\n", ioread32(vmmu));
++
++ /* Enable the mmu for host accesses and read again. */
++ psb_mmu_enable_requestor(driver, _PSB_MMU_ER_HOST);
++
++ printk(KERN_INFO "MMU Page first dword is (0x67676767) 0x%08x\n",
++ ioread32(vmmu));
++ *v = 0x15243705;
++ printk(KERN_INFO "MMU Page new dword is (0x15243705) 0x%08x\n",
++ ioread32(vmmu));
++ iowrite32(0x16243355, vmmu);
++ (void) ioread32(vmmu);
++ printk(KERN_INFO "Page new dword is (0x16243355) 0x%08x\n", *v);
++
++ printk(KERN_INFO "Int stat is 0x%08x\n",
++ psb_ioread32(driver, PSB_CR_BIF_INT_STAT));
++ printk(KERN_INFO "Fault is 0x%08x\n",
++ psb_ioread32(driver, PSB_CR_BIF_FAULT));
++
++ /* Disable MMU for host accesses and clear page fault register */
++ psb_mmu_disable_requestor(driver, _PSB_MMU_ER_HOST);
++ iounmap(vmmu);
++out_err2:
++ psb_mmu_remove_pages(pd, pfn << PAGE_SHIFT, 1, 0, 0);
++out_err1:
++ kunmap(p);
++ __free_page(p);
++}
+diff -uNr a/drivers/gpu/drm/psb/psb_msvdx.c b/drivers/gpu/drm/psb/psb_msvdx.c
+--- a/drivers/gpu/drm/psb/psb_msvdx.c 1969-12-31 16:00:00.000000000 -0800
++++ b/drivers/gpu/drm/psb/psb_msvdx.c 2009-04-07 13:28:38.000000000 -0700
+@@ -0,0 +1,681 @@
++/**
++ * file psb_msvdx.c
++ * MSVDX I/O operations and IRQ handling
++ *
++ */
++
++/**************************************************************************
++ *
++ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
++ * Copyright (c) Imagination Technologies Limited, UK
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ **************************************************************************/
++
++#include <drm/drmP.h>
++#include <drm/drm_os_linux.h>
++#include "psb_drv.h"
++#include "psb_drm.h"
++#include "psb_msvdx.h"
++
++#include <linux/io.h>
++#include <linux/delay.h>
++
++#ifndef list_first_entry
++#define list_first_entry(ptr, type, member) \
++ list_entry((ptr)->next, type, member)
++#endif
++
++
++static int psb_msvdx_send(struct drm_device *dev, void *cmd,
++ unsigned long cmd_size);
++
++int psb_msvdx_dequeue_send(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ struct psb_msvdx_cmd_queue *msvdx_cmd = NULL;
++ int ret = 0;
++
++ if (list_empty(&dev_priv->msvdx_queue)) {
++ PSB_DEBUG_GENERAL("MSVDXQUE: msvdx list empty.\n");
++ dev_priv->msvdx_busy = 0;
++ return -EINVAL;
++ }
++ msvdx_cmd = list_first_entry(&dev_priv->msvdx_queue,
++ struct psb_msvdx_cmd_queue, head);
++ PSB_DEBUG_GENERAL("MSVDXQUE: Queue has id %08x\n", msvdx_cmd->sequence);
++ ret = psb_msvdx_send(dev, msvdx_cmd->cmd, msvdx_cmd->cmd_size);
++ if (ret) {
++ DRM_ERROR("MSVDXQUE: psb_msvdx_send failed\n");
++ ret = -EINVAL;
++ }
++ list_del(&msvdx_cmd->head);
++ kfree(msvdx_cmd->cmd);
++ drm_free(msvdx_cmd, sizeof(struct psb_msvdx_cmd_queue), DRM_MEM_DRIVER);
++
++ return ret;
++}
++
++int psb_msvdx_map_command(struct drm_device *dev,
++ struct ttm_buffer_object *cmd_buffer,
++ unsigned long cmd_offset, unsigned long cmd_size,
++ void **msvdx_cmd, uint32_t sequence, int copy_cmd)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ int ret = 0;
++ unsigned long cmd_page_offset = cmd_offset & ~PAGE_MASK;
++ unsigned long cmd_size_remaining;
++ struct ttm_bo_kmap_obj cmd_kmap;
++ void *cmd, *tmp, *cmd_start;
++ bool is_iomem;
++
++ /* command buffers may not exceed page boundary */
++ if (cmd_size + cmd_page_offset > PAGE_SIZE)
++ return -EINVAL;
++
++ ret = ttm_bo_kmap(cmd_buffer, cmd_offset >> PAGE_SHIFT, 2, &cmd_kmap);
++ if (ret) {
++ DRM_ERROR("MSVDXQUE:ret:%d\n", ret);
++ return ret;
++ }
++
++ cmd_start = (void *)ttm_kmap_obj_virtual(&cmd_kmap, &is_iomem)
++ + cmd_page_offset;
++ cmd = cmd_start;
++ cmd_size_remaining = cmd_size;
++
++ while (cmd_size_remaining > 0) {
++ uint32_t cur_cmd_size = MEMIO_READ_FIELD(cmd, FWRK_GENMSG_SIZE);
++ uint32_t cur_cmd_id = MEMIO_READ_FIELD(cmd, FWRK_GENMSG_ID);
++ uint32_t mmu_ptd = 0, tmp = 0;
++
++ PSB_DEBUG_GENERAL("cmd start at %08x cur_cmd_size = %d"
++ " cur_cmd_id = %02x fence = %08x\n",
++ (uint32_t) cmd, cur_cmd_size, cur_cmd_id, sequence);
++ if ((cur_cmd_size % sizeof(uint32_t))
++ || (cur_cmd_size > cmd_size_remaining)) {
++ ret = -EINVAL;
++ DRM_ERROR("MSVDX: ret:%d\n", ret);
++ goto out;
++ }
++
++ switch (cur_cmd_id) {
++ case VA_MSGID_RENDER:
++ /* Fence ID */
++ MEMIO_WRITE_FIELD(cmd, FW_VA_RENDER_FENCE_VALUE,
++ sequence);
++ mmu_ptd = psb_get_default_pd_addr(dev_priv->mmu);
++ tmp = atomic_cmpxchg(&dev_priv->msvdx_mmu_invaldc,
++ 1, 0);
++ if (tmp == 1) {
++ mmu_ptd |= 1;
++ PSB_DEBUG_GENERAL("MSVDX:Set MMU invalidate\n");
++ }
++
++ /* PTD */
++ MEMIO_WRITE_FIELD(cmd, FW_VA_RENDER_MMUPTD, mmu_ptd);
++ break;
++
++ default:
++ /* Msg not supported */
++ ret = -EINVAL;
++ PSB_DEBUG_GENERAL("MSVDX: ret:%d\n", ret);
++ goto out;
++ }
++
++ cmd += cur_cmd_size;
++ cmd_size_remaining -= cur_cmd_size;
++ }
++
++ if (copy_cmd) {
++ PSB_DEBUG_GENERAL("MSVDXQUE:copying command\n");
++
++ tmp = drm_calloc(1, cmd_size, DRM_MEM_DRIVER);
++ if (tmp == NULL) {
++ ret = -ENOMEM;
++ DRM_ERROR("MSVDX: fail to callc,ret=:%d\n", ret);
++ goto out;
++ }
++ memcpy(tmp, cmd_start, cmd_size);
++ *msvdx_cmd = tmp;
++ } else {
++ PSB_DEBUG_GENERAL("MSVDXQUE:did NOT copy command\n");
++ ret = psb_msvdx_send(dev, cmd_start, cmd_size);
++ if (ret) {
++ DRM_ERROR("MSVDXQUE: psb_msvdx_send failed\n");
++ ret = -EINVAL;
++ }
++ }
++
++out:
++ ttm_bo_kunmap(&cmd_kmap);
++
++ return ret;
++}
++
++int psb_submit_video_cmdbuf(struct drm_device *dev,
++ struct ttm_buffer_object *cmd_buffer,
++ unsigned long cmd_offset, unsigned long cmd_size,
++ struct ttm_fence_object *fence)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ uint32_t sequence = dev_priv->sequence[PSB_ENGINE_VIDEO];
++ unsigned long irq_flags;
++ int ret = 0;
++
++ mutex_lock(&dev_priv->msvdx_mutex);
++
++ psb_schedule_watchdog(dev_priv);
++
++ spin_lock_irqsave(&dev_priv->msvdx_lock, irq_flags);
++ if (dev_priv->msvdx_needs_reset) {
++ spin_unlock_irqrestore(&dev_priv->msvdx_lock, irq_flags);
++ PSB_DEBUG_GENERAL("MSVDX: will reset msvdx\n");
++ if (psb_msvdx_reset(dev_priv)) {
++ mutex_unlock(&dev_priv->msvdx_mutex);
++ ret = -EBUSY;
++ DRM_ERROR("MSVDX: Reset failed\n");
++ return ret;
++ }
++ dev_priv->msvdx_needs_reset = 0;
++ dev_priv->msvdx_busy = 0;
++
++ psb_msvdx_init(dev);
++ psb_msvdx_irq_preinstall(dev_priv);
++ psb_msvdx_irq_postinstall(dev_priv);
++ spin_lock_irqsave(&dev_priv->msvdx_lock, irq_flags);
++ }
++
++ if (!dev_priv->msvdx_fw_loaded) {
++ spin_unlock_irqrestore(&dev_priv->msvdx_lock, irq_flags);
++ PSB_DEBUG_GENERAL("MSVDX:load /lib/firmware/msvdx_fw.bin"
++ " by udevd\n");
++
++ ret = psb_setup_fw(dev);
++ if (ret) {
++ mutex_unlock(&dev_priv->msvdx_mutex);
++
++ DRM_ERROR("MSVDX:is there a /lib/firmware/msvdx_fw.bin,"
++ "and udevd is configured correctly?\n");
++
++ /* FIXME: find a proper return value */
++ return -EFAULT;
++ }
++ dev_priv->msvdx_fw_loaded = 1;
++
++ psb_msvdx_irq_preinstall(dev_priv);
++ psb_msvdx_irq_postinstall(dev_priv);
++ PSB_DEBUG_GENERAL("MSVDX: load firmware successfully\n");
++ spin_lock_irqsave(&dev_priv->msvdx_lock, irq_flags);
++ }
++
++
++ if (!dev_priv->msvdx_busy) {
++ dev_priv->msvdx_busy = 1;
++ spin_unlock_irqrestore(&dev_priv->msvdx_lock, irq_flags);
++ PSB_DEBUG_GENERAL("MSVDX: commit command to HW,seq=0x%08x\n",
++ sequence);
++ ret = psb_msvdx_map_command(dev, cmd_buffer, cmd_offset,
++ cmd_size, NULL, sequence, 0);
++ if (ret) {
++ mutex_unlock(&dev_priv->msvdx_mutex);
++ DRM_ERROR("MSVDXQUE: Failed to extract cmd\n");
++ return ret;
++ }
++ } else {
++ struct psb_msvdx_cmd_queue *msvdx_cmd;
++ void *cmd = NULL;
++
++ spin_unlock_irqrestore(&dev_priv->msvdx_lock, irq_flags);
++ /* queue the command to be sent when the h/w is ready */
++ PSB_DEBUG_GENERAL("MSVDXQUE: queueing sequence:%08x..\n",
++ sequence);
++ msvdx_cmd = drm_calloc(1, sizeof(struct psb_msvdx_cmd_queue),
++ DRM_MEM_DRIVER);
++ if (msvdx_cmd == NULL) {
++ mutex_unlock(&dev_priv->msvdx_mutex);
++ DRM_ERROR("MSVDXQUE: Out of memory...\n");
++ return -ENOMEM;
++ }
++
++ ret = psb_msvdx_map_command(dev, cmd_buffer, cmd_offset,
++ cmd_size, &cmd, sequence, 1);
++ if (ret) {
++ mutex_unlock(&dev_priv->msvdx_mutex);
++ DRM_ERROR("MSVDXQUE: Failed to extract cmd\n");
++ drm_free(msvdx_cmd, sizeof(struct psb_msvdx_cmd_queue),
++ DRM_MEM_DRIVER);
++ return ret;
++ }
++ msvdx_cmd->cmd = cmd;
++ msvdx_cmd->cmd_size = cmd_size;
++ msvdx_cmd->sequence = sequence;
++ spin_lock_irqsave(&dev_priv->msvdx_lock, irq_flags);
++ list_add_tail(&msvdx_cmd->head, &dev_priv->msvdx_queue);
++ if (!dev_priv->msvdx_busy) {
++ dev_priv->msvdx_busy = 1;
++ PSB_DEBUG_GENERAL("MSVDXQUE: Need immediate dequeue\n");
++ psb_msvdx_dequeue_send(dev);
++ }
++ spin_unlock_irqrestore(&dev_priv->msvdx_lock, irq_flags);
++ }
++ mutex_unlock(&dev_priv->msvdx_mutex);
++ return ret;
++}
++
++int psb_msvdx_send(struct drm_device *dev, void *cmd, unsigned long cmd_size)
++{
++ int ret = 0;
++ struct drm_psb_private *dev_priv = dev->dev_private;
++
++ while (cmd_size > 0) {
++ uint32_t cur_cmd_size = MEMIO_READ_FIELD(cmd, FWRK_GENMSG_SIZE);
++ if (cur_cmd_size > cmd_size) {
++ ret = -EINVAL;
++ DRM_ERROR("MSVDX:cmd_size %lu cur_cmd_size %lu\n",
++ cmd_size, (unsigned long)cur_cmd_size);
++ goto out;
++ }
++ /* Send the message to h/w */
++ ret = psb_mtx_send(dev_priv, cmd);
++ if (ret) {
++ PSB_DEBUG_GENERAL("MSVDX: ret:%d\n", ret);
++ goto out;
++ }
++ cmd += cur_cmd_size;
++ cmd_size -= cur_cmd_size;
++ }
++
++out:
++ PSB_DEBUG_GENERAL("MSVDX: ret:%d\n", ret);
++ return ret;
++}
++
++int psb_mtx_send(struct drm_psb_private *dev_priv, const void *msg)
++{
++ static uint32_t pad_msg[FWRK_PADMSG_SIZE];
++ const uint32_t *p_msg = (uint32_t *) msg;
++ uint32_t msg_num, words_free, ridx, widx;
++ int ret = 0;
++
++ PSB_DEBUG_GENERAL("MSVDX: psb_mtx_send\n");
++
++ /* we need clocks enabled before we touch VEC local ram */
++ PSB_WMSVDX32(clk_enable_all, MSVDX_MAN_CLK_ENABLE);
++
++ msg_num = (MEMIO_READ_FIELD(msg, FWRK_GENMSG_SIZE) + 3) / 4;
++
++ if (msg_num > NUM_WORDS_MTX_BUF) {
++ ret = -EINVAL;
++ DRM_ERROR("MSVDX: message exceed maximum,ret:%d\n", ret);
++ goto out;
++ }
++
++ ridx = PSB_RMSVDX32(MSVDX_COMMS_TO_MTX_RD_INDEX);
++ widx = PSB_RMSVDX32(MSVDX_COMMS_TO_MTX_WRT_INDEX);
++
++ /* message would wrap, need to send a pad message */
++ if (widx + msg_num > NUM_WORDS_MTX_BUF) {
++ /* Shouldn't happen for a PAD message itself */
++ BUG_ON(MEMIO_READ_FIELD(msg, FWRK_GENMSG_ID)
++ == FWRK_MSGID_PADDING);
++
++ /* if the read pointer is at zero then we must wait for it to
++ * change otherwise the write pointer will equal the read
++ * pointer,which should only happen when the buffer is empty
++ *
++ * This will only happens if we try to overfill the queue,
++ * queue management should make
++ * sure this never happens in the first place.
++ */
++ BUG_ON(0 == ridx);
++ if (0 == ridx) {
++ ret = -EINVAL;
++ DRM_ERROR("MSVDX: RIndex=0, ret:%d\n", ret);
++ goto out;
++ }
++ /* Send a pad message */
++ MEMIO_WRITE_FIELD(pad_msg, FWRK_GENMSG_SIZE,
++ (NUM_WORDS_MTX_BUF - widx) << 2);
++ MEMIO_WRITE_FIELD(pad_msg, FWRK_GENMSG_ID,
++ FWRK_MSGID_PADDING);
++ psb_mtx_send(dev_priv, pad_msg);
++ widx = PSB_RMSVDX32(MSVDX_COMMS_TO_MTX_WRT_INDEX);
++ }
++
++ if (widx >= ridx)
++ words_free = NUM_WORDS_MTX_BUF - (widx - ridx);
++ else
++ words_free = ridx - widx;
++
++ BUG_ON(msg_num > words_free);
++ if (msg_num > words_free) {
++ ret = -EINVAL;
++ DRM_ERROR("MSVDX: msg_num > words_free, ret:%d\n", ret);
++ goto out;
++ }
++
++ while (msg_num > 0) {
++ PSB_WMSVDX32(*p_msg++, MSVDX_COMMS_TO_MTX_BUF + (widx << 2));
++ msg_num--;
++ widx++;
++ if (NUM_WORDS_MTX_BUF == widx)
++ widx = 0;
++ }
++ PSB_WMSVDX32(widx, MSVDX_COMMS_TO_MTX_WRT_INDEX);
++
++ /* Make sure clocks are enabled before we kick */
++ PSB_WMSVDX32(clk_enable_all, MSVDX_MAN_CLK_ENABLE);
++
++ PSB_WMSVDX32(clk_enable_all, MSVDX_MAN_CLK_ENABLE);
++
++ /* signal an interrupt to let the mtx know there is a new message */
++ PSB_WMSVDX32(1, MSVDX_MTX_KICKI);
++
++out:
++ return ret;
++}
++
++/*
++ * MSVDX MTX interrupt
++ */
++void psb_msvdx_mtx_interrupt(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private;
++ static uint32_t buf[128]; /* message buffer */
++ uint32_t ridx, widx;
++ uint32_t num, ofs; /* message num and offset */
++
++ PSB_DEBUG_GENERAL("MSVDX:Got a MSVDX MTX interrupt\n");
++
++ /* Are clocks enabled - If not enable before
++ * attempting to read from VLR
++ */
++ if (PSB_RMSVDX32(MSVDX_MAN_CLK_ENABLE) != (clk_enable_all)) {
++ PSB_DEBUG_GENERAL("MSVDX:Clocks disabled when Interupt set\n");
++ PSB_WMSVDX32(clk_enable_all, MSVDX_MAN_CLK_ENABLE);
++ }
++
++loop: /* just for coding style check */
++ ridx = PSB_RMSVDX32(MSVDX_COMMS_TO_HOST_RD_INDEX);
++ widx = PSB_RMSVDX32(MSVDX_COMMS_TO_HOST_WRT_INDEX);
++
++ /* Get out of here if nothing */
++ if (ridx == widx)
++ goto done;
++
++ ofs = 0;
++ buf[ofs] = PSB_RMSVDX32(MSVDX_COMMS_TO_HOST_BUF + (ridx << 2));
++
++ /* round to nearest word */
++ num = (MEMIO_READ_FIELD(buf, FWRK_GENMSG_SIZE) + 3) / 4;
++
++ /* ASSERT(num <= sizeof(buf) / sizeof(uint32_t)); */
++
++ if (++ridx >= NUM_WORDS_HOST_BUF)
++ ridx = 0;
++
++ for (ofs++; ofs < num; ofs++) {
++ buf[ofs] = PSB_RMSVDX32(MSVDX_COMMS_TO_HOST_BUF + (ridx << 2));
++
++ if (++ridx >= NUM_WORDS_HOST_BUF)
++ ridx = 0;
++ }
++
++ /* Update the Read index */
++ PSB_WMSVDX32(ridx, MSVDX_COMMS_TO_HOST_RD_INDEX);
++
++ if (dev_priv->msvdx_needs_reset)
++ goto loop;
++
++ switch (MEMIO_READ_FIELD(buf, FWRK_GENMSG_ID)) {
++ case VA_MSGID_CMD_HW_PANIC:
++ case VA_MSGID_CMD_FAILED: {
++ uint32_t fence = MEMIO_READ_FIELD(buf,
++ FW_VA_CMD_FAILED_FENCE_VALUE);
++ uint32_t fault = MEMIO_READ_FIELD(buf,
++ FW_VA_CMD_FAILED_IRQSTATUS);
++ uint32_t msg_id = MEMIO_READ_FIELD(buf, FWRK_GENMSG_ID);
++ uint32_t diff = 0;
++
++ if (msg_id == VA_MSGID_CMD_HW_PANIC)
++ PSB_DEBUG_GENERAL("MSVDX: VA_MSGID_CMD_HW_PANIC:"
++ "Fault detected"
++ " - Fence: %08x, Status: %08x"
++ " - resetting and ignoring error\n",
++ fence, fault);
++ else
++ PSB_DEBUG_GENERAL("MSVDX: VA_MSGID_CMD_FAILED:"
++ "Fault detected"
++ " - Fence: %08x, Status: %08x"
++ " - resetting and ignoring error\n",
++ fence, fault);
++
++ dev_priv->msvdx_needs_reset = 1;
++
++ if (msg_id == VA_MSGID_CMD_HW_PANIC) {
++ diff = dev_priv->msvdx_current_sequence
++ - dev_priv->sequence[PSB_ENGINE_VIDEO];
++
++ if (diff > 0x0FFFFFFF)
++ dev_priv->msvdx_current_sequence++;
++
++ PSB_DEBUG_GENERAL("MSVDX: Fence ID missing, "
++ "assuming %08x\n",
++ dev_priv->msvdx_current_sequence);
++ } else {
++ dev_priv->msvdx_current_sequence = fence;
++ }
++
++ psb_fence_error(dev, PSB_ENGINE_VIDEO,
++ dev_priv->msvdx_current_sequence,
++ _PSB_FENCE_TYPE_EXE, DRM_CMD_FAILED);
++
++ /* Flush the command queue */
++ psb_msvdx_flush_cmd_queue(dev);
++
++ goto done;
++ }
++ case VA_MSGID_CMD_COMPLETED: {
++ uint32_t fence = MEMIO_READ_FIELD(buf,
++ FW_VA_CMD_COMPLETED_FENCE_VALUE);
++ uint32_t flags = MEMIO_READ_FIELD(buf,
++ FW_VA_CMD_COMPLETED_FLAGS);
++
++ PSB_DEBUG_GENERAL("MSVDX:VA_MSGID_CMD_COMPLETED: "
++ "FenceID: %08x, flags: 0x%x\n",
++ fence, flags);
++
++ dev_priv->msvdx_current_sequence = fence;
++
++ psb_fence_handler(dev, PSB_ENGINE_VIDEO);
++
++ if (flags & FW_VA_RENDER_HOST_INT) {
++ /*Now send the next command from the msvdx cmd queue */
++ psb_msvdx_dequeue_send(dev);
++ goto done;
++ }
++
++ break;
++ }
++ case VA_MSGID_CMD_COMPLETED_BATCH: {
++ uint32_t fence = MEMIO_READ_FIELD(buf,
++ FW_VA_CMD_COMPLETED_FENCE_VALUE);
++ uint32_t tickcnt = MEMIO_READ_FIELD(buf,
++ FW_VA_CMD_COMPLETED_NO_TICKS);
++
++ /* we have the fence value in the message */
++ PSB_DEBUG_GENERAL("MSVDX:VA_MSGID_CMD_COMPLETED_BATCH:"
++ " FenceID: %08x, TickCount: %08x\n",
++ fence, tickcnt);
++ dev_priv->msvdx_current_sequence = fence;
++
++ break;
++ }
++ case VA_MSGID_ACK:
++ PSB_DEBUG_GENERAL("MSVDX: VA_MSGID_ACK\n");
++ break;
++
++ case VA_MSGID_TEST1:
++ PSB_DEBUG_GENERAL("MSVDX: VA_MSGID_TEST1\n");
++ break;
++
++ case VA_MSGID_TEST2:
++ PSB_DEBUG_GENERAL("MSVDX: VA_MSGID_TEST2\n");
++ break;
++ /* Don't need to do anything with these messages */
++
++ case VA_MSGID_DEBLOCK_REQUIRED: {
++ uint32_t ctxid = MEMIO_READ_FIELD(buf,
++ FW_VA_DEBLOCK_REQUIRED_CONTEXT);
++
++ /* The BE we now be locked. */
++ /* Unblock rendec by reading the mtx2mtx end of slice */
++ (void) PSB_RMSVDX32(MSVDX_RENDEC_READ_DATA);
++
++ PSB_DEBUG_GENERAL("MSVDX: VA_MSGID_DEBLOCK_REQUIRED"
++ " Context=%08x\n", ctxid);
++ goto done;
++ }
++ default:
++ DRM_ERROR("ERROR: msvdx Unknown message from MTX \n");
++ goto done;
++ }
++
++done:
++
++#if 1
++ if (!dev_priv->msvdx_busy) {
++ /* If the firmware says the hardware is idle
++ * and the CCB is empty then we can power down
++ */
++ uint32_t fs_status = PSB_RMSVDX32(MSVDX_COMMS_FW_STATUS);
++ uint32_t ccb_roff = PSB_RMSVDX32(MSVDX_COMMS_TO_MTX_RD_INDEX);
++ uint32_t ccb_woff = PSB_RMSVDX32(MSVDX_COMMS_TO_MTX_WRT_INDEX);
++
++ /* check that clocks are enabled before reading VLR */
++ if (PSB_RMSVDX32(MSVDX_MAN_CLK_ENABLE) != (clk_enable_all))
++ PSB_WMSVDX32(clk_enable_all, MSVDX_MAN_CLK_ENABLE);
++
++ if ((fs_status & MSVDX_FW_STATUS_HW_IDLE) &&
++ (ccb_roff == ccb_woff)) {
++ PSB_DEBUG_GENERAL("MSVDX: Setting clock to minimal\n");
++ PSB_WMSVDX32(clk_enable_minimal, MSVDX_MAN_CLK_ENABLE);
++ }
++ }
++#endif
++ DRM_MEMORYBARRIER(); /* TBD check this... */
++}
++
++void psb_msvdx_lockup(struct drm_psb_private *dev_priv,
++ int *msvdx_lockup, int *msvdx_idle)
++{
++ int tmp;
++ *msvdx_lockup = 0;
++ *msvdx_idle = 1;
++
++ if (!dev_priv->has_msvdx)
++ return;
++#if 0
++ PSB_DEBUG_GENERAL("MSVDXTimer: current_sequence:%d "
++ "last_sequence:%d and last_submitted_sequence :%d\n",
++ dev_priv->msvdx_current_sequence,
++ dev_priv->msvdx_last_sequence,
++ dev_priv->sequence[PSB_ENGINE_VIDEO]);
++#endif
++
++ tmp = dev_priv->msvdx_current_sequence -
++ dev_priv->sequence[PSB_ENGINE_VIDEO];
++
++ if (tmp > 0x0FFFFFFF) {
++ if (dev_priv->msvdx_current_sequence ==
++ dev_priv->msvdx_last_sequence) {
++ DRM_ERROR("MSVDXTimer:locked-up for sequence:%d\n",
++ dev_priv->msvdx_current_sequence);
++ *msvdx_lockup = 1;
++ } else {
++ PSB_DEBUG_GENERAL("MSVDXTimer: "
++ "msvdx responded fine so far\n");
++ dev_priv->msvdx_last_sequence =
++ dev_priv->msvdx_current_sequence;
++ *msvdx_idle = 0;
++ }
++ }
++}
++
++/* power up msvdx, OSPM function */
++int psb_power_up_msvdx(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private;
++ int ret;
++
++ if ((dev_priv->msvdx_state & PSB_PWR_STATE_MASK) != PSB_PWR_STATE_D0i3)
++ return -EINVAL;
++
++ PSB_DEBUG_TMP("power up msvdx\n");
++ dump_stack();
++
++ psb_up_island_power(dev, PSB_VIDEO_DEC_ISLAND);
++
++ ret = psb_msvdx_init(dev);
++ if (ret) {
++ DRM_ERROR("failed to init msvdx when power up it\n");
++ goto err;
++ }
++ PSB_WMSVDX32(dev_priv->msvdx_clk_state, MSVDX_MAN_CLK_ENABLE);
++
++ PSB_DEBUG_GENERAL("FIXME restore registers or init msvdx\n");
++
++ PSB_DEBUG_GENERAL("FIXME MSVDX MMU setting up\n");
++
++ dev_priv->msvdx_state = PSB_PWR_STATE_D0i0;
++ return 0;
++
++err:
++ return -1;
++}
++
++int psb_power_down_msvdx(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private;
++
++ if ((dev_priv->msvdx_state & PSB_PWR_STATE_MASK) != PSB_PWR_STATE_D0i0)
++ return -EINVAL;
++ if (dev_priv->msvdx_busy) {
++ PSB_DEBUG_GENERAL("FIXME: MSVDX is busy, should wait it\n");
++ return -EBUSY;
++ }
++
++ dev_priv->msvdx_clk_state = PSB_RMSVDX32(MSVDX_MAN_CLK_ENABLE);
++ PSB_DEBUG_GENERAL("FIXME: save MSVDX register\n");
++
++ PSB_DEBUG_GENERAL("FIXME: save MSVDX context\n");
++ psb_down_island_power(dev, PSB_VIDEO_DEC_ISLAND);
++
++ dev_priv->msvdx_state = PSB_PWR_STATE_D0i3;
++
++ return 0;
++}
+diff -uNr a/drivers/gpu/drm/psb/psb_msvdx.h b/drivers/gpu/drm/psb/psb_msvdx.h
+--- a/drivers/gpu/drm/psb/psb_msvdx.h 1969-12-31 16:00:00.000000000 -0800
++++ b/drivers/gpu/drm/psb/psb_msvdx.h 2009-04-07 13:28:38.000000000 -0700
+@@ -0,0 +1,442 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
++ * Copyright (c) Imagination Technologies Limited, UK
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ **************************************************************************/
++
++#ifndef _PSB_MSVDX_H_
++#define _PSB_MSVDX_H_
++
++#include "psb_drv.h"
++
++void psb_msvdx_mtx_interrupt(struct drm_device *dev);
++int psb_msvdx_init(struct drm_device *dev);
++int psb_msvdx_uninit(struct drm_device *dev);
++int psb_msvdx_reset(struct drm_psb_private *dev_priv);
++uint32_t psb_get_default_pd_addr(struct psb_mmu_driver *driver);
++int psb_mtx_send(struct drm_psb_private *dev_priv, const void *pvMsg);
++void psb_msvdx_irq_preinstall(struct drm_psb_private *dev_priv);
++void psb_msvdx_irq_postinstall(struct drm_psb_private *dev_priv);
++void psb_msvdx_flush_cmd_queue(struct drm_device *dev);
++extern void psb_msvdx_lockup(struct drm_psb_private *dev_priv,
++ int *msvdx_lockup, int *msvdx_idle);
++int psb_setup_fw(struct drm_device *dev);
++int psb_power_up_msvdx(struct drm_device *dev);
++int psb_power_down_msvdx(struct drm_device *dev);
++
++/* Non-Optimal Invalidation is not default */
++#define MSVDX_DEVICE_NODE_FLAGS_MMU_NONOPT_INV 2
++#define MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_MASK (0x00000100)
++
++#define FW_VA_RENDER_HOST_INT 0x00004000
++#define MSVDX_DEVICE_NODE_FLAGS_MMU_HW_INVALIDATION 0x00000020
++
++/* There is no work currently underway on the hardware */
++#define MSVDX_FW_STATUS_HW_IDLE 0x00000001
++#define MSVDX_DEVICE_NODE_FLAG_BRN23154_BLOCK_ON_FE 0x00000200
++#define MSVDX_DEVICE_NODE_FLAGS_DEFAULT_D0 \
++ (MSVDX_DEVICE_NODE_FLAGS_MMU_NONOPT_INV | \
++ MSVDX_DEVICE_NODE_FLAGS_MMU_HW_INVALIDATION | \
++ MSVDX_DEVICE_NODE_FLAG_BRN23154_BLOCK_ON_FE)
++
++#define MSVDX_DEVICE_NODE_FLAGS_DEFAULT_D1 \
++ (MSVDX_DEVICE_NODE_FLAGS_MMU_HW_INVALIDATION | \
++ MSVDX_DEVICE_NODE_FLAG_BRN23154_BLOCK_ON_FE)
++
++#define POULSBO_D0 0x5
++#define POULSBO_D1 0x6
++#define PSB_REVID_OFFSET 0x8
++
++#define MTX_CODE_BASE (0x80900000)
++#define MTX_DATA_BASE (0x82880000)
++#define PC_START_ADDRESS (0x80900000)
++
++#define MTX_CORE_CODE_MEM (0x10)
++#define MTX_CORE_DATA_MEM (0x18)
++
++#define MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_MASK (0x00000100)
++#define MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_SHIFT (8)
++#define MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_FE_SOFT_RESET_MASK \
++ (0x00010000)
++#define MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_BE_SOFT_RESET_MASK \
++ (0x00100000)
++#define MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_VEC_MEMIF_SOFT_RESET_MASK \
++ (0x01000000)
++#define MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_VEC_RENDEC_DEC_SOFT_RESET_MASK \
++ (0x10000000)
++
++#define clk_enable_all \
++(MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_CORE_MAN_CLK_ENABLE_MASK | \
++MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_PROCESS_MAN_CLK_ENABLE_MASK | \
++MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_ACCESS_MAN_CLK_ENABLE_MASK | \
++MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDMC_MAN_CLK_ENABLE_MASK | \
++MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ENTDEC_MAN_CLK_ENABLE_MASK | \
++MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ITRANS_MAN_CLK_ENABLE_MASK | \
++MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_MTX_MAN_CLK_ENABLE_MASK)
++
++#define clk_enable_minimal \
++MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_CORE_MAN_CLK_ENABLE_MASK | \
++MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_MTX_MAN_CLK_ENABLE_MASK
++
++#define clk_enable_auto \
++MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_PROCESS_AUTO_CLK_ENABLE_MASK | \
++MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_ACCESS_AUTO_CLK_ENABLE_MASK | \
++MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDMC_AUTO_CLK_ENABLE_MASK | \
++MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ENTDEC_AUTO_CLK_ENABLE_MASK | \
++MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ITRANS_AUTO_CLK_ENABLE_MASK | \
++MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_CORE_MAN_CLK_ENABLE_MASK | \
++MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_MTX_MAN_CLK_ENABLE_MASK
++
++#define msvdx_sw_reset_all \
++(MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_MASK | \
++MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_FE_SOFT_RESET_MASK | \
++MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_BE_SOFT_RESET_MASK | \
++MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_VEC_MEMIF_SOFT_RESET_MASK | \
++MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_VEC_RENDEC_DEC_SOFT_RESET_MASK)
++
++#define MTX_INTERNAL_REG(R_SPECIFIER , U_SPECIFIER) \
++ (((R_SPECIFIER)<<4) | (U_SPECIFIER))
++#define MTX_PC MTX_INTERNAL_REG(0, 5)
++
++#define RENDEC_A_SIZE (1024 * 1024)
++#define RENDEC_B_SIZE (1024 * 1024)
++
++#define MEMIO_READ_FIELD(vpMem, field) \
++ ((uint32_t)(((*((field##_TYPE*)(((uint32_t)vpMem) + field##_OFFSET))) \
++ & field##_MASK) >> field##_SHIFT))
++
++#define MEMIO_WRITE_FIELD(vpMem, field, value) \
++ (*((field##_TYPE*)(((uint32_t)vpMem) + field##_OFFSET))) = \
++ ((*((field##_TYPE*)(((uint32_t)vpMem) + field##_OFFSET))) \
++ & (field##_TYPE)~field##_MASK) | \
++ (field##_TYPE)(((uint32_t)(value) << field##_SHIFT) & field##_MASK);
++
++#define MEMIO_WRITE_FIELD_LITE(vpMem, field, value) \
++ (*((field##_TYPE*)(((uint32_t)vpMem) + field##_OFFSET))) = \
++ ((*((field##_TYPE*)(((uint32_t)vpMem) + field##_OFFSET))) | \
++ (field##_TYPE)(((uint32_t)(value) << field##_SHIFT)));
++
++#define REGIO_READ_FIELD(reg_val, reg, field) \
++ ((reg_val & reg##_##field##_MASK) >> reg##_##field##_SHIFT)
++
++#define REGIO_WRITE_FIELD(reg_val, reg, field, value) \
++ (reg_val) = \
++ ((reg_val) & ~(reg##_##field##_MASK)) | \
++ (((value) << (reg##_##field##_SHIFT)) & (reg##_##field##_MASK));
++
++#define REGIO_WRITE_FIELD_LITE(reg_val, reg, field, value) \
++ (reg_val) = \
++ ((reg_val) | ((value) << (reg##_##field##_SHIFT)));
++
++#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_CORE_MAN_CLK_ENABLE_MASK \
++ (0x00000001)
++#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_PROCESS_MAN_CLK_ENABLE_MASK \
++ (0x00000002)
++#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_ACCESS_MAN_CLK_ENABLE_MASK \
++ (0x00000004)
++#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDMC_MAN_CLK_ENABLE_MASK \
++ (0x00000008)
++#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ENTDEC_MAN_CLK_ENABLE_MASK \
++ (0x00000010)
++#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ITRANS_MAN_CLK_ENABLE_MASK \
++ (0x00000020)
++#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_MTX_MAN_CLK_ENABLE_MASK \
++ (0x00000040)
++
++#define clk_enable_all \
++ (MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_CORE_MAN_CLK_ENABLE_MASK | \
++MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_PROCESS_MAN_CLK_ENABLE_MASK | \
++MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_ACCESS_MAN_CLK_ENABLE_MASK | \
++MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDMC_MAN_CLK_ENABLE_MASK | \
++MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ENTDEC_MAN_CLK_ENABLE_MASK | \
++MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ITRANS_MAN_CLK_ENABLE_MASK | \
++MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_MTX_MAN_CLK_ENABLE_MASK)
++
++#define clk_enable_minimal \
++ MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_CORE_MAN_CLK_ENABLE_MASK | \
++ MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_MTX_MAN_CLK_ENABLE_MASK
++
++/* MTX registers */
++#define MSVDX_MTX_ENABLE (0x0000)
++#define MSVDX_MTX_KICKI (0x0088)
++#define MSVDX_MTX_REGISTER_READ_WRITE_REQUEST (0x00FC)
++#define MSVDX_MTX_REGISTER_READ_WRITE_DATA (0x00F8)
++#define MSVDX_MTX_RAM_ACCESS_DATA_TRANSFER (0x0104)
++#define MSVDX_MTX_RAM_ACCESS_CONTROL (0x0108)
++#define MSVDX_MTX_RAM_ACCESS_STATUS (0x010C)
++#define MSVDX_MTX_SOFT_RESET (0x0200)
++
++/* MSVDX registers */
++#define MSVDX_CONTROL (0x0600)
++#define MSVDX_INTERRUPT_CLEAR (0x060C)
++#define MSVDX_INTERRUPT_STATUS (0x0608)
++#define MSVDX_HOST_INTERRUPT_ENABLE (0x0610)
++#define MSVDX_MMU_CONTROL0 (0x0680)
++#define MSVDX_MTX_RAM_BANK (0x06F0)
++#define MSVDX_MAN_CLK_ENABLE (0x0620)
++
++/* RENDEC registers */
++#define MSVDX_RENDEC_CONTROL0 (0x0868)
++#define MSVDX_RENDEC_CONTROL1 (0x086C)
++#define MSVDX_RENDEC_BUFFER_SIZE (0x0870)
++#define MSVDX_RENDEC_BASE_ADDR0 (0x0874)
++#define MSVDX_RENDEC_BASE_ADDR1 (0x0878)
++#define MSVDX_RENDEC_READ_DATA (0x0898)
++#define MSVDX_RENDEC_CONTEXT0 (0x0950)
++#define MSVDX_RENDEC_CONTEXT1 (0x0954)
++#define MSVDX_RENDEC_CONTEXT2 (0x0958)
++#define MSVDX_RENDEC_CONTEXT3 (0x095C)
++#define MSVDX_RENDEC_CONTEXT4 (0x0960)
++#define MSVDX_RENDEC_CONTEXT5 (0x0964)
++
++/*
++ * This defines the MSVDX communication buffer
++ */
++#define MSVDX_COMMS_SIGNATURE_VALUE (0xA5A5A5A5) /*!< Signature value */
++/*!< Host buffer size (in 32-bit words) */
++#define NUM_WORDS_HOST_BUF (100)
++/*!< MTX buffer size (in 32-bit words) */
++#define NUM_WORDS_MTX_BUF (100)
++
++/* There is no work currently underway on the hardware */
++#define MSVDX_FW_STATUS_HW_IDLE 0x00000001
++
++#define MSVDX_COMMS_AREA_ADDR (0x02cc0)
++
++#define MSVDX_COMMS_OFFSET_FLAGS (MSVDX_COMMS_AREA_ADDR + 0x18)
++#define MSVDX_COMMS_MSG_COUNTER (MSVDX_COMMS_AREA_ADDR - 0x04)
++#define MSVDX_COMMS_FW_STATUS (MSVDX_COMMS_AREA_ADDR - 0x10)
++#define MSVDX_COMMS_SIGNATURE (MSVDX_COMMS_AREA_ADDR + 0x00)
++#define MSVDX_COMMS_TO_HOST_BUF_SIZE (MSVDX_COMMS_AREA_ADDR + 0x04)
++#define MSVDX_COMMS_TO_HOST_RD_INDEX (MSVDX_COMMS_AREA_ADDR + 0x08)
++#define MSVDX_COMMS_TO_HOST_WRT_INDEX (MSVDX_COMMS_AREA_ADDR + 0x0C)
++#define MSVDX_COMMS_TO_MTX_BUF_SIZE (MSVDX_COMMS_AREA_ADDR + 0x10)
++#define MSVDX_COMMS_TO_MTX_RD_INDEX (MSVDX_COMMS_AREA_ADDR + 0x14)
++#define MSVDX_COMMS_TO_MTX_CB_RD_INDEX (MSVDX_COMMS_AREA_ADDR + 0x18)
++#define MSVDX_COMMS_TO_MTX_WRT_INDEX (MSVDX_COMMS_AREA_ADDR + 0x1C)
++#define MSVDX_COMMS_TO_HOST_BUF (MSVDX_COMMS_AREA_ADDR + 0x20)
++#define MSVDX_COMMS_TO_MTX_BUF \
++ (MSVDX_COMMS_TO_HOST_BUF + (NUM_WORDS_HOST_BUF << 2))
++
++#define MSVDX_COMMS_AREA_END \
++ (MSVDX_COMMS_TO_MTX_BUF + (NUM_WORDS_HOST_BUF << 2))
++
++#if (MSVDX_COMMS_AREA_END != 0x03000)
++#error
++#endif
++
++#define MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK (0x80000000)
++#define MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_SHIFT (31)
++
++#define MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_RNW_MASK (0x00010000)
++#define MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_RNW_SHIFT (16)
++
++#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMID_MASK (0x0FF00000)
++#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMID_SHIFT (20)
++
++#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCM_ADDR_MASK (0x000FFFFC)
++#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCM_ADDR_SHIFT (2)
++
++#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMAI_MASK (0x00000002)
++#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMAI_SHIFT (1)
++
++#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMR_MASK (0x00000001)
++#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMR_SHIFT (0)
++
++#define MSVDX_MTX_SOFT_RESET_MTX_RESET_MASK (0x00000001)
++#define MSVDX_MTX_SOFT_RESET_MTX_RESET_SHIFT (0)
++
++#define MSVDX_MTX_ENABLE_MTX_ENABLE_MASK (0x00000001)
++#define MSVDX_MTX_ENABLE_MTX_ENABLE_SHIFT (0)
++
++#define MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_MASK (0x00000100)
++#define MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_SHIFT (8)
++
++#define MSVDX_INTERRUPT_STATUS_CR_MMU_FAULT_IRQ_MASK (0x00000F00)
++#define MSVDX_INTERRUPT_STATUS_CR_MMU_FAULT_IRQ_SHIFT (8)
++
++#define MSVDX_INTERRUPT_STATUS_CR_MTX_IRQ_MASK (0x00004000)
++#define MSVDX_INTERRUPT_STATUS_CR_MTX_IRQ_SHIFT (14)
++
++#define MSVDX_MMU_CONTROL0_CR_MMU_PAUSE_MASK (0x00000002)
++#define MSVDX_MMU_CONTROL0_CR_MMU_PAUSE_SHIFT (1)
++
++#define MSVDX_MTX_RAM_BANK_CR_MTX_RAM_BANK_SIZE_MASK (0x000F0000)
++#define MSVDX_MTX_RAM_BANK_CR_MTX_RAM_BANK_SIZE_SHIFT (16)
++
++#define MSVDX_RENDEC_BUFFER_SIZE_RENDEC_BUFFER_SIZE0_MASK (0x0000FFFF)
++#define MSVDX_RENDEC_BUFFER_SIZE_RENDEC_BUFFER_SIZE0_SHIFT (0)
++
++#define MSVDX_RENDEC_BUFFER_SIZE_RENDEC_BUFFER_SIZE1_MASK (0xFFFF0000)
++#define MSVDX_RENDEC_BUFFER_SIZE_RENDEC_BUFFER_SIZE1_SHIFT (16)
++
++#define MSVDX_RENDEC_CONTROL1_RENDEC_DECODE_START_SIZE_MASK (0x000000FF)
++#define MSVDX_RENDEC_CONTROL1_RENDEC_DECODE_START_SIZE_SHIFT (0)
++
++#define MSVDX_RENDEC_CONTROL1_RENDEC_BURST_SIZE_W_MASK (0x000C0000)
++#define MSVDX_RENDEC_CONTROL1_RENDEC_BURST_SIZE_W_SHIFT (18)
++
++#define MSVDX_RENDEC_CONTROL1_RENDEC_BURST_SIZE_R_MASK (0x00030000)
++#define MSVDX_RENDEC_CONTROL1_RENDEC_BURST_SIZE_R_SHIFT (16)
++
++#define MSVDX_RENDEC_CONTROL1_RENDEC_EXTERNAL_MEMORY_MASK (0x01000000)
++#define MSVDX_RENDEC_CONTROL1_RENDEC_EXTERNAL_MEMORY_SHIFT (24)
++
++#define MSVDX_RENDEC_CONTROL0_RENDEC_INITIALISE_MASK (0x00000001)
++#define MSVDX_RENDEC_CONTROL0_RENDEC_INITIALISE_SHIFT (0)
++
++/* Start of parser specific Host->MTX messages. */
++#define FWRK_MSGID_START_PSR_HOSTMTX_MSG (0x80)
++
++/* Start of parser specific MTX->Host messages. */
++#define FWRK_MSGID_START_PSR_MTXHOST_MSG (0xC0)
++
++#define FWRK_MSGID_PADDING (0)
++
++#define FWRK_GENMSG_SIZE_TYPE uint8_t
++#define FWRK_GENMSG_SIZE_MASK (0xFF)
++#define FWRK_GENMSG_SIZE_SHIFT (0)
++#define FWRK_GENMSG_SIZE_OFFSET (0x0000)
++#define FWRK_GENMSG_ID_TYPE uint8_t
++#define FWRK_GENMSG_ID_MASK (0xFF)
++#define FWRK_GENMSG_ID_SHIFT (0)
++#define FWRK_GENMSG_ID_OFFSET (0x0001)
++#define FWRK_PADMSG_SIZE (2)
++
++/* This type defines the framework specified message ids */
++enum {
++ /* ! Sent by the DXVA driver on the host to the mtx firmware.
++ */
++ VA_MSGID_INIT = FWRK_MSGID_START_PSR_HOSTMTX_MSG,
++ VA_MSGID_RENDER,
++ VA_MSGID_DEBLOCK,
++ VA_MSGID_BUBBLE,
++
++ /* Test Messages */
++ VA_MSGID_TEST1,
++ VA_MSGID_TEST2,
++
++ /*! Sent by the mtx firmware to itself.
++ */
++ VA_MSGID_RENDER_MC_INTERRUPT,
++
++ /*! Sent by the DXVA firmware on the MTX to the host.
++ */
++ VA_MSGID_CMD_COMPLETED = FWRK_MSGID_START_PSR_MTXHOST_MSG,
++ VA_MSGID_CMD_COMPLETED_BATCH,
++ VA_MSGID_DEBLOCK_REQUIRED,
++ VA_MSGID_TEST_RESPONCE,
++ VA_MSGID_ACK,
++
++ VA_MSGID_CMD_FAILED,
++ VA_MSGID_CMD_UNSUPPORTED,
++ VA_MSGID_CMD_HW_PANIC,
++};
++
++/* MSVDX Firmware interface */
++#define FW_VA_INIT_SIZE (8)
++#define FW_VA_DEBUG_TEST2_SIZE (4)
++
++/* FW_VA_DEBUG_TEST2 MSG_SIZE */
++#define FW_VA_DEBUG_TEST2_MSG_SIZE_TYPE uint8_t
++#define FW_VA_DEBUG_TEST2_MSG_SIZE_MASK (0xFF)
++#define FW_VA_DEBUG_TEST2_MSG_SIZE_OFFSET (0x0000)
++#define FW_VA_DEBUG_TEST2_MSG_SIZE_SHIFT (0)
++
++/* FW_VA_DEBUG_TEST2 ID */
++#define FW_VA_DEBUG_TEST2_ID_TYPE uint8_t
++#define FW_VA_DEBUG_TEST2_ID_MASK (0xFF)
++#define FW_VA_DEBUG_TEST2_ID_OFFSET (0x0001)
++#define FW_VA_DEBUG_TEST2_ID_SHIFT (0)
++
++/* FW_VA_CMD_FAILED FENCE_VALUE */
++#define FW_VA_CMD_FAILED_FENCE_VALUE_TYPE uint32_t
++#define FW_VA_CMD_FAILED_FENCE_VALUE_MASK (0xFFFFFFFF)
++#define FW_VA_CMD_FAILED_FENCE_VALUE_OFFSET (0x0004)
++#define FW_VA_CMD_FAILED_FENCE_VALUE_SHIFT (0)
++
++/* FW_VA_CMD_FAILED IRQSTATUS */
++#define FW_VA_CMD_FAILED_IRQSTATUS_TYPE uint32_t
++#define FW_VA_CMD_FAILED_IRQSTATUS_MASK (0xFFFFFFFF)
++#define FW_VA_CMD_FAILED_IRQSTATUS_OFFSET (0x0008)
++#define FW_VA_CMD_FAILED_IRQSTATUS_SHIFT (0)
++
++/* FW_VA_CMD_COMPLETED FENCE_VALUE */
++#define FW_VA_CMD_COMPLETED_FENCE_VALUE_TYPE uint32_t
++#define FW_VA_CMD_COMPLETED_FENCE_VALUE_MASK (0xFFFFFFFF)
++#define FW_VA_CMD_COMPLETED_FENCE_VALUE_OFFSET (0x0004)
++#define FW_VA_CMD_COMPLETED_FENCE_VALUE_SHIFT (0)
++
++/* FW_VA_CMD_COMPLETED FLAGS */
++#define FW_VA_CMD_COMPLETED_FLAGS_ALIGNMENT (4)
++#define FW_VA_CMD_COMPLETED_FLAGS_TYPE uint32_t
++#define FW_VA_CMD_COMPLETED_FLAGS_MASK (0xFFFFFFFF)
++#define FW_VA_CMD_COMPLETED_FLAGS_LSBMASK (0xFFFFFFFF)
++#define FW_VA_CMD_COMPLETED_FLAGS_OFFSET (0x0008)
++#define FW_VA_CMD_COMPLETED_FLAGS_SHIFT (0)
++
++/* FW_VA_CMD_COMPLETED NO_TICKS */
++#define FW_VA_CMD_COMPLETED_NO_TICKS_TYPE uint16_t
++#define FW_VA_CMD_COMPLETED_NO_TICKS_MASK (0xFFFF)
++#define FW_VA_CMD_COMPLETED_NO_TICKS_OFFSET (0x0002)
++#define FW_VA_CMD_COMPLETED_NO_TICKS_SHIFT (0)
++
++/* FW_VA_DEBLOCK_REQUIRED CONTEXT */
++#define FW_VA_DEBLOCK_REQUIRED_CONTEXT_TYPE uint32_t
++#define FW_VA_DEBLOCK_REQUIRED_CONTEXT_MASK (0xFFFFFFFF)
++#define FW_VA_DEBLOCK_REQUIRED_CONTEXT_OFFSET (0x0004)
++#define FW_VA_DEBLOCK_REQUIRED_CONTEXT_SHIFT (0)
++
++/* FW_VA_INIT GLOBAL_PTD */
++#define FW_VA_INIT_GLOBAL_PTD_TYPE uint32_t
++#define FW_VA_INIT_GLOBAL_PTD_MASK (0xFFFFFFFF)
++#define FW_VA_INIT_GLOBAL_PTD_OFFSET (0x0004)
++#define FW_VA_INIT_GLOBAL_PTD_SHIFT (0)
++
++/* FW_VA_RENDER FENCE_VALUE */
++#define FW_VA_RENDER_FENCE_VALUE_TYPE uint32_t
++#define FW_VA_RENDER_FENCE_VALUE_MASK (0xFFFFFFFF)
++#define FW_VA_RENDER_FENCE_VALUE_OFFSET (0x0010)
++#define FW_VA_RENDER_FENCE_VALUE_SHIFT (0)
++
++/* FW_VA_RENDER MMUPTD */
++#define FW_VA_RENDER_MMUPTD_TYPE uint32_t
++#define FW_VA_RENDER_MMUPTD_MASK (0xFFFFFFFF)
++#define FW_VA_RENDER_MMUPTD_OFFSET (0x0004)
++#define FW_VA_RENDER_MMUPTD_SHIFT (0)
++
++/* FW_VA_RENDER BUFFER_ADDRESS */
++#define FW_VA_RENDER_BUFFER_ADDRESS_TYPE uint32_t
++#define FW_VA_RENDER_BUFFER_ADDRESS_MASK (0xFFFFFFFF)
++#define FW_VA_RENDER_BUFFER_ADDRESS_OFFSET (0x0008)
++#define FW_VA_RENDER_BUFFER_ADDRESS_SHIFT (0)
++
++/* FW_VA_RENDER BUFFER_SIZE */
++#define FW_VA_RENDER_BUFFER_SIZE_TYPE uint16_t
++#define FW_VA_RENDER_BUFFER_SIZE_MASK (0x0FFF)
++#define FW_VA_RENDER_BUFFER_SIZE_OFFSET (0x0002)
++#define FW_VA_RENDER_BUFFER_SIZE_SHIFT (0)
++
++#endif
+diff -uNr a/drivers/gpu/drm/psb/psb_msvdxinit.c b/drivers/gpu/drm/psb/psb_msvdxinit.c
+--- a/drivers/gpu/drm/psb/psb_msvdxinit.c 1969-12-31 16:00:00.000000000 -0800
++++ b/drivers/gpu/drm/psb/psb_msvdxinit.c 2009-04-07 13:28:38.000000000 -0700
+@@ -0,0 +1,668 @@
++/**
++ * file psb_msvdxinit.c
++ * MSVDX initialization and mtx-firmware upload
++ *
++ */
++
++/**************************************************************************
++ *
++ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
++ * Copyright (c) Imagination Technologies Limited, UK
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ **************************************************************************/
++
++#include <drm/drmP.h>
++#include <drm/drm.h>
++#include "psb_drv.h"
++#include "psb_msvdx.h"
++#include <linux/firmware.h>
++
++#define MSVDX_REG (dev_priv->msvdx_reg)
++uint8_t psb_rev_id;
++/*MSVDX FW header*/
++struct msvdx_fw {
++ uint32_t ver;
++ uint32_t text_size;
++ uint32_t data_size;
++ uint32_t data_location;
++};
++
++int psb_wait_for_register(struct drm_psb_private *dev_priv,
++ uint32_t offset, uint32_t value, uint32_t enable)
++{
++ uint32_t tmp;
++ uint32_t poll_cnt = 10000;
++ while (poll_cnt) {
++ tmp = PSB_RMSVDX32(offset);
++ if (value == (tmp & enable)) /* All the bits are reset */
++ return 0; /* So exit */
++
++ /* Wait a bit */
++ DRM_UDELAY(1000);
++ poll_cnt--;
++ }
++ DRM_ERROR("MSVDX: Timeout while waiting for register %08x:"
++ " expecting %08x (mask %08x), got %08x\n",
++ offset, value, enable, tmp);
++
++ return 1;
++}
++
++int psb_poll_mtx_irq(struct drm_psb_private *dev_priv)
++{
++ int ret = 0;
++ uint32_t mtx_int = 0;
++
++ REGIO_WRITE_FIELD_LITE(mtx_int, MSVDX_INTERRUPT_STATUS, CR_MTX_IRQ,
++ 1);
++
++ ret = psb_wait_for_register(dev_priv, MSVDX_INTERRUPT_STATUS,
++ /* Required value */
++ mtx_int,
++ /* Enabled bits */
++ mtx_int);
++
++ if (ret) {
++ DRM_ERROR("MSVDX: Error Mtx did not return"
++ " int within a resonable time\n");
++ return ret;
++ }
++
++ PSB_DEBUG_IRQ("MSVDX: Got MTX Int\n");
++
++ /* Got it so clear the bit */
++ PSB_WMSVDX32(mtx_int, MSVDX_INTERRUPT_CLEAR);
++
++ return ret;
++}
++
++void psb_write_mtx_core_reg(struct drm_psb_private *dev_priv,
++ const uint32_t core_reg, const uint32_t val)
++{
++ uint32_t reg = 0;
++
++ /* Put data in MTX_RW_DATA */
++ PSB_WMSVDX32(val, MSVDX_MTX_REGISTER_READ_WRITE_DATA);
++
++ /* DREADY is set to 0 and request a write */
++ reg = core_reg;
++ REGIO_WRITE_FIELD_LITE(reg, MSVDX_MTX_REGISTER_READ_WRITE_REQUEST,
++ MTX_RNW, 0);
++ REGIO_WRITE_FIELD_LITE(reg, MSVDX_MTX_REGISTER_READ_WRITE_REQUEST,
++ MTX_DREADY, 0);
++ PSB_WMSVDX32(reg, MSVDX_MTX_REGISTER_READ_WRITE_REQUEST);
++
++ psb_wait_for_register(dev_priv,
++ MSVDX_MTX_REGISTER_READ_WRITE_REQUEST,
++ MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK,
++ MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK);
++}
++
++void psb_upload_fw(struct drm_psb_private *dev_priv,
++ const uint32_t data_mem, uint32_t ram_bank_size,
++ uint32_t address, const unsigned int words,
++ const uint32_t * const data)
++{
++ uint32_t loop, ctrl, ram_id, addr, cur_bank = (uint32_t) ~0;
++ uint32_t access_ctrl;
++
++ /* Save the access control register... */
++ access_ctrl = PSB_RMSVDX32(MSVDX_MTX_RAM_ACCESS_CONTROL);
++
++ /* Wait for MCMSTAT to become be idle 1 */
++ psb_wait_for_register(dev_priv, MSVDX_MTX_RAM_ACCESS_STATUS,
++ 1, /* Required Value */
++ 0xffffffff /* Enables */);
++
++ for (loop = 0; loop < words; loop++) {
++ ram_id = data_mem + (address / ram_bank_size);
++ if (ram_id != cur_bank) {
++ addr = address >> 2;
++ ctrl = 0;
++ REGIO_WRITE_FIELD_LITE(ctrl,
++ MSVDX_MTX_RAM_ACCESS_CONTROL,
++ MTX_MCMID, ram_id);
++ REGIO_WRITE_FIELD_LITE(ctrl,
++ MSVDX_MTX_RAM_ACCESS_CONTROL,
++ MTX_MCM_ADDR, addr);
++ REGIO_WRITE_FIELD_LITE(ctrl,
++ MSVDX_MTX_RAM_ACCESS_CONTROL,
++ MTX_MCMAI, 1);
++ PSB_WMSVDX32(ctrl, MSVDX_MTX_RAM_ACCESS_CONTROL);
++ cur_bank = ram_id;
++ }
++ address += 4;
++
++ PSB_WMSVDX32(data[loop],
++ MSVDX_MTX_RAM_ACCESS_DATA_TRANSFER);
++
++ /* Wait for MCMSTAT to become be idle 1 */
++ psb_wait_for_register(dev_priv, MSVDX_MTX_RAM_ACCESS_STATUS,
++ 1, /* Required Value */
++ 0xffffffff /* Enables */);
++ }
++ PSB_DEBUG_GENERAL("MSVDX: Upload done\n");
++
++ /* Restore the access control register... */
++ PSB_WMSVDX32(access_ctrl, MSVDX_MTX_RAM_ACCESS_CONTROL);
++}
++
++static int psb_verify_fw(struct drm_psb_private *dev_priv,
++ const uint32_t ram_bank_size,
++ const uint32_t data_mem, uint32_t address,
++ const uint32_t words, const uint32_t * const data)
++{
++ uint32_t loop, ctrl, ram_id, addr, cur_bank = (uint32_t) ~0;
++ uint32_t access_ctrl;
++ int ret = 0;
++
++ /* Save the access control register... */
++ access_ctrl = PSB_RMSVDX32(MSVDX_MTX_RAM_ACCESS_CONTROL);
++
++ /* Wait for MCMSTAT to become be idle 1 */
++ psb_wait_for_register(dev_priv, MSVDX_MTX_RAM_ACCESS_STATUS,
++ 1, /* Required Value */
++ 0xffffffff /* Enables */);
++
++ for (loop = 0; loop < words; loop++) {
++ uint32_t tmp;
++ ram_id = data_mem + (address / ram_bank_size);
++
++ if (ram_id != cur_bank) {
++ addr = address >> 2;
++ ctrl = 0;
++ REGIO_WRITE_FIELD_LITE(ctrl,
++ MSVDX_MTX_RAM_ACCESS_CONTROL,
++ MTX_MCMID, ram_id);
++ REGIO_WRITE_FIELD_LITE(ctrl,
++ MSVDX_MTX_RAM_ACCESS_CONTROL,
++ MTX_MCM_ADDR, addr);
++ REGIO_WRITE_FIELD_LITE(ctrl,
++ MSVDX_MTX_RAM_ACCESS_CONTROL,
++ MTX_MCMAI, 1);
++ REGIO_WRITE_FIELD_LITE(ctrl,
++ MSVDX_MTX_RAM_ACCESS_CONTROL,
++ MTX_MCMR, 1);
++
++ PSB_WMSVDX32(ctrl, MSVDX_MTX_RAM_ACCESS_CONTROL);
++
++ cur_bank = ram_id;
++ }
++ address += 4;
++
++ /* Wait for MCMSTAT to become be idle 1 */
++ psb_wait_for_register(dev_priv, MSVDX_MTX_RAM_ACCESS_STATUS,
++ 1, /* Required Value */
++ 0xffffffff /* Enables */);
++
++ tmp = PSB_RMSVDX32(MSVDX_MTX_RAM_ACCESS_DATA_TRANSFER);
++ if (data[loop] != tmp) {
++ DRM_ERROR("psb: Firmware validation fails"
++ " at index=%08x\n", loop);
++ ret = 1;
++ break;
++ }
++ }
++
++ /* Restore the access control register... */
++ PSB_WMSVDX32(access_ctrl, MSVDX_MTX_RAM_ACCESS_CONTROL);
++
++ return ret;
++}
++
++static uint32_t *msvdx_get_fw(struct drm_device *dev,
++ const struct firmware **raw, uint8_t *name)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ int rc, fw_size;
++ int *ptr = NULL;
++
++ rc = request_firmware(raw, name, &dev->pdev->dev);
++ if (rc < 0) {
++ DRM_ERROR("MSVDX: %s request_firmware failed: Reason %d\n",
++ name, rc);
++ return NULL;
++ }
++
++ if ((*raw)->size < sizeof(struct msvdx_fw)) {
++ DRM_ERROR("MSVDX: %s is is not correct size(%zd)\n",
++ name, (*raw)->size);
++ return NULL;
++ }
++
++ ptr = (int *) ((*raw))->data;
++
++ if (!ptr) {
++ DRM_ERROR("MSVDX: Failed to load %s\n", name);
++ return NULL;
++ }
++
++ /* another sanity check... */
++ fw_size = sizeof(struct msvdx_fw) +
++ sizeof(uint32_t) * ((struct msvdx_fw *) ptr)->text_size +
++ sizeof(uint32_t) * ((struct msvdx_fw *) ptr)->data_size;
++ if ((*raw)->size != fw_size) {
++ DRM_ERROR("MSVDX: %s is is not correct size(%zd)\n",
++ name, (*raw)->size);
++ return NULL;
++ }
++ dev_priv->msvdx_fw = drm_calloc(1, fw_size, DRM_MEM_DRIVER);
++ if (dev_priv->msvdx_fw == NULL)
++ DRM_ERROR("MSVDX: allocate FW buffer failed\n");
++ else {
++ memcpy(dev_priv->msvdx_fw, ptr, fw_size);
++ dev_priv->msvdx_fw_size = fw_size;
++ }
++
++ PSB_DEBUG_GENERAL("MSVDX: releasing firmware resouces\n");
++ release_firmware(*raw);
++
++ return dev_priv->msvdx_fw;
++}
++
++int psb_setup_fw(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ int ret = 0;
++
++ uint32_t ram_bank_size;
++ struct msvdx_fw *fw;
++ uint32_t *fw_ptr = NULL;
++ uint32_t *text_ptr = NULL;
++ uint32_t *data_ptr = NULL;
++ const struct firmware *raw = NULL;
++ /* todo : Assert the clock is on - if not turn it on to upload code */
++
++ PSB_DEBUG_GENERAL("MSVDX: psb_setup_fw\n");
++ PSB_WMSVDX32(clk_enable_all, MSVDX_MAN_CLK_ENABLE);
++
++ /* Reset MTX */
++ PSB_WMSVDX32(MSVDX_MTX_SOFT_RESET_MTX_RESET_MASK,
++ MSVDX_MTX_SOFT_RESET);
++
++ /* Initialses Communication controll area to 0 */
++ if (psb_rev_id >= POULSBO_D1) {
++ PSB_DEBUG_GENERAL("MSVDX: Detected Poulsbo D1"
++ " or later revision.\n");
++ PSB_WMSVDX32(MSVDX_DEVICE_NODE_FLAGS_DEFAULT_D1,
++ MSVDX_COMMS_OFFSET_FLAGS);
++ } else {
++ PSB_DEBUG_GENERAL("MSVDX: Detected Poulsbo D0"
++ " or earlier revision.\n");
++ PSB_WMSVDX32(MSVDX_DEVICE_NODE_FLAGS_DEFAULT_D0,
++ MSVDX_COMMS_OFFSET_FLAGS);
++ }
++
++ PSB_WMSVDX32(0, MSVDX_COMMS_MSG_COUNTER);
++ PSB_WMSVDX32(0, MSVDX_COMMS_SIGNATURE);
++ PSB_WMSVDX32(0, MSVDX_COMMS_TO_HOST_RD_INDEX);
++ PSB_WMSVDX32(0, MSVDX_COMMS_TO_HOST_WRT_INDEX);
++ PSB_WMSVDX32(0, MSVDX_COMMS_TO_MTX_RD_INDEX);
++ PSB_WMSVDX32(0, MSVDX_COMMS_TO_MTX_WRT_INDEX);
++ PSB_WMSVDX32(0, MSVDX_COMMS_FW_STATUS);
++
++ /* read register bank size */
++ {
++ uint32_t bank_size, reg;
++ reg = PSB_RMSVDX32(MSVDX_MTX_RAM_BANK);
++ bank_size =
++ REGIO_READ_FIELD(reg, MSVDX_MTX_RAM_BANK,
++ CR_MTX_RAM_BANK_SIZE);
++ ram_bank_size = (uint32_t) (1 << (bank_size + 2));
++ }
++
++ PSB_DEBUG_GENERAL("MSVDX: RAM bank size = %d bytes\n",
++ ram_bank_size);
++
++ /* if FW already loaded from storage */
++ if (dev_priv->msvdx_fw)
++ fw_ptr = dev_priv->msvdx_fw;
++ else
++ fw_ptr = msvdx_get_fw(dev, &raw, "msvdx_fw.bin");
++
++ if (!fw_ptr) {
++ DRM_ERROR("psb: No valid msvdx_fw.bin firmware found.\n");
++ ret = 1;
++ goto out;
++ }
++
++ fw = (struct msvdx_fw *) fw_ptr;
++ if (fw->ver != 0x02) {
++ DRM_ERROR("psb: msvdx_fw.bin firmware version mismatch,"
++ "got version=%02x expected version=%02x\n",
++ fw->ver, 0x02);
++ ret = 1;
++ goto out;
++ }
++
++ text_ptr =
++ (uint32_t *) ((uint8_t *) fw_ptr + sizeof(struct msvdx_fw));
++ data_ptr = text_ptr + fw->text_size;
++
++ PSB_DEBUG_GENERAL("MSVDX: Retrieved pointers for firmware\n");
++ PSB_DEBUG_GENERAL("MSVDX: text_size: %d\n", fw->text_size);
++ PSB_DEBUG_GENERAL("MSVDX: data_size: %d\n", fw->data_size);
++ PSB_DEBUG_GENERAL("MSVDX: data_location: 0x%x\n",
++ fw->data_location);
++ PSB_DEBUG_GENERAL("MSVDX: First 4 bytes of text: 0x%x\n",
++ *text_ptr);
++ PSB_DEBUG_GENERAL("MSVDX: First 4 bytes of data: 0x%x\n",
++ *data_ptr);
++
++ PSB_DEBUG_GENERAL("MSVDX: Uploading firmware\n");
++ psb_upload_fw(dev_priv, MTX_CORE_CODE_MEM, ram_bank_size,
++ PC_START_ADDRESS - MTX_CODE_BASE, fw->text_size,
++ text_ptr);
++ psb_upload_fw(dev_priv, MTX_CORE_DATA_MEM, ram_bank_size,
++ fw->data_location - MTX_DATA_BASE, fw->data_size,
++ data_ptr);
++
++#if 0
++ /* todo : Verify code upload possibly only in debug */
++ ret = psb_verify_fw(dev_priv, ram_bank_size,
++ MTX_CORE_CODE_MEM,
++ PC_START_ADDRESS - MTX_CODE_BASE,
++ fw->text_size, text_ptr);
++ if (ret) {
++ /* Firmware code upload failed */
++ ret = 1;
++ goto out;
++ }
++
++ ret = psb_verify_fw(dev_priv, ram_bank_size, MTX_CORE_DATA_MEM,
++ fw->data_location - MTX_DATA_BASE,
++ fw->data_size, data_ptr);
++ if (ret) {
++ /* Firmware data upload failed */
++ ret = 1;
++ goto out;
++ }
++#else
++ (void)psb_verify_fw;
++#endif
++ /* -- Set starting PC address */
++ psb_write_mtx_core_reg(dev_priv, MTX_PC, PC_START_ADDRESS);
++
++ /* -- Turn on the thread */
++ PSB_WMSVDX32(MSVDX_MTX_ENABLE_MTX_ENABLE_MASK, MSVDX_MTX_ENABLE);
++
++ /* Wait for the signature value to be written back */
++ ret = psb_wait_for_register(dev_priv, MSVDX_COMMS_SIGNATURE,
++ MSVDX_COMMS_SIGNATURE_VALUE, /*Required value*/
++ 0xffffffff /* Enabled bits */);
++ if (ret) {
++ DRM_ERROR("MSVDX: firmware fails to initialize.\n");
++ goto out;
++ }
++
++ PSB_DEBUG_GENERAL("MSVDX: MTX Initial indications OK\n");
++ PSB_DEBUG_GENERAL("MSVDX: MSVDX_COMMS_AREA_ADDR = %08x\n",
++ MSVDX_COMMS_AREA_ADDR);
++#if 0
++
++ /* Send test message */
++ {
++ uint32_t msg_buf[FW_VA_DEBUG_TEST2_SIZE >> 2];
++
++ MEMIO_WRITE_FIELD(msg_buf, FW_VA_DEBUG_TEST2_MSG_SIZE,
++ FW_VA_DEBUG_TEST2_SIZE);
++ MEMIO_WRITE_FIELD(msg_buf, FW_VA_DEBUG_TEST2_ID,
++ VA_MSGID_TEST2);
++
++ ret = psb_mtx_send(dev_priv, msg_buf);
++ if (ret) {
++ DRM_ERROR("psb: MSVDX sending fails.\n");
++ goto out;
++ }
++
++ /* Wait for Mtx to ack this message */
++ psb_poll_mtx_irq(dev_priv);
++
++ }
++#endif
++out:
++
++ return ret;
++}
++
++
++static void psb_free_ccb(struct ttm_buffer_object **ccb)
++{
++ ttm_bo_unref(ccb);
++ *ccb = NULL;
++}
++
++/**
++ * Reset chip and disable interrupts.
++ * Return 0 success, 1 failure
++ */
++int psb_msvdx_reset(struct drm_psb_private *dev_priv)
++{
++ int ret = 0;
++
++ /* Issue software reset */
++ PSB_WMSVDX32(msvdx_sw_reset_all, MSVDX_CONTROL);
++
++ ret = psb_wait_for_register(dev_priv, MSVDX_CONTROL, 0,
++ MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_MASK);
++
++ if (!ret) {
++ /* Clear interrupt enabled flag */
++ PSB_WMSVDX32(0, MSVDX_HOST_INTERRUPT_ENABLE);
++
++ /* Clear any pending interrupt flags */
++ PSB_WMSVDX32(0xFFFFFFFF, MSVDX_INTERRUPT_CLEAR);
++ }
++
++ /* mutex_destroy(&dev_priv->msvdx_mutex); */
++
++ return ret;
++}
++
++static int psb_allocate_ccb(struct drm_device *dev,
++ struct ttm_buffer_object **ccb,
++ uint32_t *base_addr, int size)
++{
++ struct drm_psb_private *dev_priv = psb_priv(dev);
++ struct ttm_bo_device *bdev = &dev_priv->bdev;
++ int ret;
++ struct ttm_bo_kmap_obj tmp_kmap;
++ bool is_iomem;
++
++ PSB_DEBUG_INIT("MSVDX: allocate CCB\n");
++
++ ret = ttm_buffer_object_create(bdev, size,
++ ttm_bo_type_kernel,
++ DRM_PSB_FLAG_MEM_KERNEL |
++ TTM_PL_FLAG_NO_EVICT, 0, 0, 0,
++ NULL, ccb);
++ if (ret) {
++ DRM_ERROR("MSVDX:failed to allocate CCB.\n");
++ *ccb = NULL;
++ return 1;
++ }
++
++ ret = ttm_bo_kmap(*ccb, 0, (*ccb)->num_pages, &tmp_kmap);
++ if (ret) {
++ PSB_DEBUG_GENERAL("ttm_bo_kmap failed ret: %d\n", ret);
++ ttm_bo_unref(ccb);
++ *ccb = NULL;
++ return 1;
++ }
++
++ memset(ttm_kmap_obj_virtual(&tmp_kmap, &is_iomem), 0,
++ RENDEC_A_SIZE);
++ ttm_bo_kunmap(&tmp_kmap);
++
++ *base_addr = (*ccb)->offset;
++ return 0;
++}
++
++int psb_msvdx_init(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ uint32_t cmd;
++ /* uint32_t clk_gate_ctrl = clk_enable_all; */
++ int ret;
++
++ if (!dev_priv->ccb0) { /* one for the first time */
++ /* Initialize comand msvdx queueing */
++ INIT_LIST_HEAD(&dev_priv->msvdx_queue);
++ mutex_init(&dev_priv->msvdx_mutex);
++ spin_lock_init(&dev_priv->msvdx_lock);
++ /*figure out the stepping */
++ pci_read_config_byte(dev->pdev, PSB_REVID_OFFSET, &psb_rev_id);
++ }
++
++ dev_priv->msvdx_busy = 0;
++
++ /* Enable Clocks */
++ PSB_DEBUG_GENERAL("Enabling clocks\n");
++ PSB_WMSVDX32(clk_enable_all, MSVDX_MAN_CLK_ENABLE);
++
++ /* Enable MMU by removing all bypass bits */
++ PSB_WMSVDX32(0, MSVDX_MMU_CONTROL0);
++
++ /* move firmware loading to the place receiving first command buffer */
++
++ PSB_DEBUG_GENERAL("MSVDX: Setting up RENDEC,allocate CCB 0/1\n");
++ /* Allocate device virtual memory as required by rendec.... */
++ if (!dev_priv->ccb0) {
++ ret = psb_allocate_ccb(dev, &dev_priv->ccb0,
++ &dev_priv->base_addr0,
++ RENDEC_A_SIZE);
++ if (ret)
++ goto err_exit;
++ }
++
++ if (!dev_priv->ccb1) {
++ ret = psb_allocate_ccb(dev, &dev_priv->ccb1,
++ &dev_priv->base_addr1,
++ RENDEC_B_SIZE);
++ if (ret)
++ goto err_exit;
++ }
++
++
++ PSB_DEBUG_GENERAL("MSVDX: RENDEC A: %08x RENDEC B: %08x\n",
++ dev_priv->base_addr0, dev_priv->base_addr1);
++
++ PSB_WMSVDX32(dev_priv->base_addr0, MSVDX_RENDEC_BASE_ADDR0);
++ PSB_WMSVDX32(dev_priv->base_addr1, MSVDX_RENDEC_BASE_ADDR1);
++
++ cmd = 0;
++ REGIO_WRITE_FIELD(cmd, MSVDX_RENDEC_BUFFER_SIZE,
++ RENDEC_BUFFER_SIZE0, RENDEC_A_SIZE / 4096);
++ REGIO_WRITE_FIELD(cmd, MSVDX_RENDEC_BUFFER_SIZE,
++ RENDEC_BUFFER_SIZE1, RENDEC_B_SIZE / 4096);
++ PSB_WMSVDX32(cmd, MSVDX_RENDEC_BUFFER_SIZE);
++
++ cmd = 0;
++ REGIO_WRITE_FIELD(cmd, MSVDX_RENDEC_CONTROL1,
++ RENDEC_DECODE_START_SIZE, 0);
++ REGIO_WRITE_FIELD(cmd, MSVDX_RENDEC_CONTROL1,
++ RENDEC_BURST_SIZE_W, 1);
++ REGIO_WRITE_FIELD(cmd, MSVDX_RENDEC_CONTROL1,
++ RENDEC_BURST_SIZE_R, 1);
++ REGIO_WRITE_FIELD(cmd, MSVDX_RENDEC_CONTROL1,
++ RENDEC_EXTERNAL_MEMORY, 1);
++ PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTROL1);
++
++ cmd = 0x00101010;
++ PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTEXT0);
++ PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTEXT1);
++ PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTEXT2);
++ PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTEXT3);
++ PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTEXT4);
++ PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTEXT5);
++
++ cmd = 0;
++ REGIO_WRITE_FIELD(cmd, MSVDX_RENDEC_CONTROL0, RENDEC_INITIALISE,
++ 1);
++ PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTROL0);
++
++ PSB_WMSVDX32(clk_enable_minimal, MSVDX_MAN_CLK_ENABLE);
++ PSB_DEBUG_INIT("MSVDX:defer firmware loading to the"
++ " place when receiving user space commands\n");
++
++ dev_priv->msvdx_fw_loaded = 0; /* need to load firware */
++
++ PSB_WMSVDX32(clk_enable_minimal, MSVDX_MAN_CLK_ENABLE);
++
++#if 0
++ ret = psb_setup_fw(dev);
++ if (ret)
++ goto err_exit;
++ /* Send Initialisation message to firmware */
++ if (0) {
++ uint32_t msg_init[FW_VA_INIT_SIZE >> 2];
++ MEMIO_WRITE_FIELD(msg_init, FWRK_GENMSG_SIZE,
++ FW_VA_INIT_SIZE);
++ MEMIO_WRITE_FIELD(msg_init, FWRK_GENMSG_ID, VA_MSGID_INIT);
++
++ /* Need to set this for all but A0 */
++ MEMIO_WRITE_FIELD(msg_init, FW_VA_INIT_GLOBAL_PTD,
++ psb_get_default_pd_addr(dev_priv->mmu));
++
++ ret = psb_mtx_send(dev_priv, msg_init);
++ if (ret)
++ goto err_exit;
++
++ psb_poll_mtx_irq(dev_priv);
++ }
++#endif
++
++ return 0;
++
++err_exit:
++ DRM_ERROR("MSVDX: initialization failed\n");
++ if (dev_priv->ccb0)
++ psb_free_ccb(&dev_priv->ccb0);
++ if (dev_priv->ccb1)
++ psb_free_ccb(&dev_priv->ccb1);
++
++ return 1;
++}
++
++int psb_msvdx_uninit(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++
++ /* Reset MSVDX chip */
++ psb_msvdx_reset(dev_priv);
++
++ /* PSB_WMSVDX32 (clk_enable_minimal, MSVDX_MAN_CLK_ENABLE); */
++ PSB_DEBUG_INIT("MSVDX:set the msvdx clock to 0\n");
++ PSB_WMSVDX32(0, MSVDX_MAN_CLK_ENABLE);
++
++ if (dev_priv->ccb0)
++ psb_free_ccb(&dev_priv->ccb0);
++ if (dev_priv->ccb1)
++ psb_free_ccb(&dev_priv->ccb1);
++ if (dev_priv->msvdx_fw)
++ drm_free(dev_priv->msvdx_fw, dev_priv->msvdx_fw_size,
++ DRM_MEM_DRIVER);
++
++ return 0;
++}
+diff -uNr a/drivers/gpu/drm/psb/psb_reg.h b/drivers/gpu/drm/psb/psb_reg.h
+--- a/drivers/gpu/drm/psb/psb_reg.h 1969-12-31 16:00:00.000000000 -0800
++++ b/drivers/gpu/drm/psb/psb_reg.h 2009-04-07 13:28:38.000000000 -0700
+@@ -0,0 +1,569 @@
++/**************************************************************************
++ *
++ * Copyright (c) (2005-2007) Imagination Technologies Limited.
++ * Copyright (c) 2007, Intel Corporation.
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
++ * develop this driver.
++ *
++ **************************************************************************/
++/*
++ */
++#ifndef _PSB_REG_H_
++#define _PSB_REG_H_
++
++#define PSB_CR_CLKGATECTL 0x0000
++#define _PSB_C_CLKGATECTL_AUTO_MAN_REG (1 << 24)
++#define _PSB_C_CLKGATECTL_USE_CLKG_SHIFT (20)
++#define _PSB_C_CLKGATECTL_USE_CLKG_MASK (0x3 << 20)
++#define _PSB_C_CLKGATECTL_DPM_CLKG_SHIFT (16)
++#define _PSB_C_CLKGATECTL_DPM_CLKG_MASK (0x3 << 16)
++#define _PSB_C_CLKGATECTL_TA_CLKG_SHIFT (12)
++#define _PSB_C_CLKGATECTL_TA_CLKG_MASK (0x3 << 12)
++#define _PSB_C_CLKGATECTL_TSP_CLKG_SHIFT (8)
++#define _PSB_C_CLKGATECTL_TSP_CLKG_MASK (0x3 << 8)
++#define _PSB_C_CLKGATECTL_ISP_CLKG_SHIFT (4)
++#define _PSB_C_CLKGATECTL_ISP_CLKG_MASK (0x3 << 4)
++#define _PSB_C_CLKGATECTL_2D_CLKG_SHIFT (0)
++#define _PSB_C_CLKGATECTL_2D_CLKG_MASK (0x3 << 0)
++#define _PSB_C_CLKGATECTL_CLKG_ENABLED (0)
++#define _PSB_C_CLKGATECTL_CLKG_DISABLED (1)
++#define _PSB_C_CLKGATECTL_CLKG_AUTO (2)
++
++#define PSB_CR_CORE_ID 0x0010
++#define _PSB_CC_ID_ID_SHIFT (16)
++#define _PSB_CC_ID_ID_MASK (0xFFFF << 16)
++#define _PSB_CC_ID_CONFIG_SHIFT (0)
++#define _PSB_CC_ID_CONFIG_MASK (0xFFFF << 0)
++
++#define PSB_CR_CORE_REVISION 0x0014
++#define _PSB_CC_REVISION_DESIGNER_SHIFT (24)
++#define _PSB_CC_REVISION_DESIGNER_MASK (0xFF << 24)
++#define _PSB_CC_REVISION_MAJOR_SHIFT (16)
++#define _PSB_CC_REVISION_MAJOR_MASK (0xFF << 16)
++#define _PSB_CC_REVISION_MINOR_SHIFT (8)
++#define _PSB_CC_REVISION_MINOR_MASK (0xFF << 8)
++#define _PSB_CC_REVISION_MAINTENANCE_SHIFT (0)
++#define _PSB_CC_REVISION_MAINTENANCE_MASK (0xFF << 0)
++
++#define PSB_CR_DESIGNER_REV_FIELD1 0x0018
++
++#define PSB_CR_SOFT_RESET 0x0080
++#define _PSB_CS_RESET_TSP_RESET (1 << 6)
++#define _PSB_CS_RESET_ISP_RESET (1 << 5)
++#define _PSB_CS_RESET_USE_RESET (1 << 4)
++#define _PSB_CS_RESET_TA_RESET (1 << 3)
++#define _PSB_CS_RESET_DPM_RESET (1 << 2)
++#define _PSB_CS_RESET_TWOD_RESET (1 << 1)
++#define _PSB_CS_RESET_BIF_RESET (1 << 0)
++
++#define PSB_CR_DESIGNER_REV_FIELD2 0x001C
++
++#define PSB_CR_EVENT_HOST_ENABLE2 0x0110
++
++#define PSB_CR_EVENT_STATUS2 0x0118
++
++#define PSB_CR_EVENT_HOST_CLEAR2 0x0114
++#define _PSB_CE2_BIF_REQUESTER_FAULT (1 << 4)
++
++#define PSB_CR_EVENT_STATUS 0x012C
++
++#define PSB_CR_EVENT_HOST_ENABLE 0x0130
++
++#define PSB_CR_EVENT_HOST_CLEAR 0x0134
++#define _PSB_CE_MASTER_INTERRUPT (1 << 31)
++#define _PSB_CE_TA_DPM_FAULT (1 << 28)
++#define _PSB_CE_TWOD_COMPLETE (1 << 27)
++#define _PSB_CE_DPM_OUT_OF_MEMORY_ZLS (1 << 25)
++#define _PSB_CE_DPM_TA_MEM_FREE (1 << 24)
++#define _PSB_CE_PIXELBE_END_RENDER (1 << 18)
++#define _PSB_CE_SW_EVENT (1 << 14)
++#define _PSB_CE_TA_FINISHED (1 << 13)
++#define _PSB_CE_TA_TERMINATE (1 << 12)
++#define _PSB_CE_DPM_REACHED_MEM_THRESH (1 << 3)
++#define _PSB_CE_DPM_OUT_OF_MEMORY_GBL (1 << 2)
++#define _PSB_CE_DPM_OUT_OF_MEMORY_MT (1 << 1)
++#define _PSB_CE_DPM_3D_MEM_FREE (1 << 0)
++
++
++#define PSB_USE_OFFSET_MASK 0x0007FFFF
++#define PSB_USE_OFFSET_SIZE (PSB_USE_OFFSET_MASK + 1)
++#define PSB_CR_USE_CODE_BASE0 0x0A0C
++#define PSB_CR_USE_CODE_BASE1 0x0A10
++#define PSB_CR_USE_CODE_BASE2 0x0A14
++#define PSB_CR_USE_CODE_BASE3 0x0A18
++#define PSB_CR_USE_CODE_BASE4 0x0A1C
++#define PSB_CR_USE_CODE_BASE5 0x0A20
++#define PSB_CR_USE_CODE_BASE6 0x0A24
++#define PSB_CR_USE_CODE_BASE7 0x0A28
++#define PSB_CR_USE_CODE_BASE8 0x0A2C
++#define PSB_CR_USE_CODE_BASE9 0x0A30
++#define PSB_CR_USE_CODE_BASE10 0x0A34
++#define PSB_CR_USE_CODE_BASE11 0x0A38
++#define PSB_CR_USE_CODE_BASE12 0x0A3C
++#define PSB_CR_USE_CODE_BASE13 0x0A40
++#define PSB_CR_USE_CODE_BASE14 0x0A44
++#define PSB_CR_USE_CODE_BASE15 0x0A48
++#define PSB_CR_USE_CODE_BASE(_i) (0x0A0C + ((_i) << 2))
++#define _PSB_CUC_BASE_DM_SHIFT (25)
++#define _PSB_CUC_BASE_DM_MASK (0x3 << 25)
++#define _PSB_CUC_BASE_ADDR_SHIFT (0) /* 1024-bit aligned address? */
++#define _PSB_CUC_BASE_ADDR_ALIGNSHIFT (7)
++#define _PSB_CUC_BASE_ADDR_MASK (0x1FFFFFF << 0)
++#define _PSB_CUC_DM_VERTEX (0)
++#define _PSB_CUC_DM_PIXEL (1)
++#define _PSB_CUC_DM_RESERVED (2)
++#define _PSB_CUC_DM_EDM (3)
++
++#define PSB_CR_PDS_EXEC_BASE 0x0AB8
++#define _PSB_CR_PDS_EXEC_BASE_ADDR_SHIFT (20) /* 1MB aligned address */
++#define _PSB_CR_PDS_EXEC_BASE_ADDR_ALIGNSHIFT (20)
++
++#define PSB_CR_EVENT_KICKER 0x0AC4
++#define _PSB_CE_KICKER_ADDRESS_SHIFT (4) /* 128-bit aligned address */
++
++#define PSB_CR_EVENT_KICK 0x0AC8
++#define _PSB_CE_KICK_NOW (1 << 0)
++
++
++#define PSB_CR_BIF_DIR_LIST_BASE1 0x0C38
++
++#define PSB_CR_BIF_CTRL 0x0C00
++#define _PSB_CB_CTRL_CLEAR_FAULT (1 << 4)
++#define _PSB_CB_CTRL_INVALDC (1 << 3)
++#define _PSB_CB_CTRL_FLUSH (1 << 2)
++
++#define PSB_CR_BIF_INT_STAT 0x0C04
++
++#define PSB_CR_BIF_FAULT 0x0C08
++#define _PSB_CBI_STAT_PF_N_RW (1 << 14)
++#define _PSB_CBI_STAT_FAULT_SHIFT (0)
++#define _PSB_CBI_STAT_FAULT_MASK (0x3FFF << 0)
++#define _PSB_CBI_STAT_FAULT_CACHE (1 << 1)
++#define _PSB_CBI_STAT_FAULT_TA (1 << 2)
++#define _PSB_CBI_STAT_FAULT_VDM (1 << 3)
++#define _PSB_CBI_STAT_FAULT_2D (1 << 4)
++#define _PSB_CBI_STAT_FAULT_PBE (1 << 5)
++#define _PSB_CBI_STAT_FAULT_TSP (1 << 6)
++#define _PSB_CBI_STAT_FAULT_ISP (1 << 7)
++#define _PSB_CBI_STAT_FAULT_USSEPDS (1 << 8)
++#define _PSB_CBI_STAT_FAULT_HOST (1 << 9)
++
++#define PSB_CR_BIF_BANK0 0x0C78
++
++#define PSB_CR_BIF_BANK1 0x0C7C
++
++#define PSB_CR_BIF_DIR_LIST_BASE0 0x0C84
++
++#define PSB_CR_BIF_TWOD_REQ_BASE 0x0C88
++#define PSB_CR_BIF_3D_REQ_BASE 0x0CAC
++
++#define PSB_CR_2D_SOCIF 0x0E18
++#define _PSB_C2_SOCIF_FREESPACE_SHIFT (0)
++#define _PSB_C2_SOCIF_FREESPACE_MASK (0xFF << 0)
++#define _PSB_C2_SOCIF_EMPTY (0x80 << 0)
++
++#define PSB_CR_2D_BLIT_STATUS 0x0E04
++#define _PSB_C2B_STATUS_BUSY (1 << 24)
++#define _PSB_C2B_STATUS_COMPLETE_SHIFT (0)
++#define _PSB_C2B_STATUS_COMPLETE_MASK (0xFFFFFF << 0)
++
++/*
++ * 2D defs.
++ */
++
++/*
++ * 2D Slave Port Data : Block Header's Object Type
++ */
++
++#define PSB_2D_CLIP_BH (0x00000000)
++#define PSB_2D_PAT_BH (0x10000000)
++#define PSB_2D_CTRL_BH (0x20000000)
++#define PSB_2D_SRC_OFF_BH (0x30000000)
++#define PSB_2D_MASK_OFF_BH (0x40000000)
++#define PSB_2D_RESERVED1_BH (0x50000000)
++#define PSB_2D_RESERVED2_BH (0x60000000)
++#define PSB_2D_FENCE_BH (0x70000000)
++#define PSB_2D_BLIT_BH (0x80000000)
++#define PSB_2D_SRC_SURF_BH (0x90000000)
++#define PSB_2D_DST_SURF_BH (0xA0000000)
++#define PSB_2D_PAT_SURF_BH (0xB0000000)
++#define PSB_2D_SRC_PAL_BH (0xC0000000)
++#define PSB_2D_PAT_PAL_BH (0xD0000000)
++#define PSB_2D_MASK_SURF_BH (0xE0000000)
++#define PSB_2D_FLUSH_BH (0xF0000000)
++
++/*
++ * Clip Definition block (PSB_2D_CLIP_BH)
++ */
++#define PSB_2D_CLIPCOUNT_MAX (1)
++#define PSB_2D_CLIPCOUNT_MASK (0x00000000)
++#define PSB_2D_CLIPCOUNT_CLRMASK (0xFFFFFFFF)
++#define PSB_2D_CLIPCOUNT_SHIFT (0)
++/* clip rectangle min & max */
++#define PSB_2D_CLIP_XMAX_MASK (0x00FFF000)
++#define PSB_2D_CLIP_XMAX_CLRMASK (0xFF000FFF)
++#define PSB_2D_CLIP_XMAX_SHIFT (12)
++#define PSB_2D_CLIP_XMIN_MASK (0x00000FFF)
++#define PSB_2D_CLIP_XMIN_CLRMASK (0x00FFF000)
++#define PSB_2D_CLIP_XMIN_SHIFT (0)
++/* clip rectangle offset */
++#define PSB_2D_CLIP_YMAX_MASK (0x00FFF000)
++#define PSB_2D_CLIP_YMAX_CLRMASK (0xFF000FFF)
++#define PSB_2D_CLIP_YMAX_SHIFT (12)
++#define PSB_2D_CLIP_YMIN_MASK (0x00000FFF)
++#define PSB_2D_CLIP_YMIN_CLRMASK (0x00FFF000)
++#define PSB_2D_CLIP_YMIN_SHIFT (0)
++
++/*
++ * Pattern Control (PSB_2D_PAT_BH)
++ */
++#define PSB_2D_PAT_HEIGHT_MASK (0x0000001F)
++#define PSB_2D_PAT_HEIGHT_SHIFT (0)
++#define PSB_2D_PAT_WIDTH_MASK (0x000003E0)
++#define PSB_2D_PAT_WIDTH_SHIFT (5)
++#define PSB_2D_PAT_YSTART_MASK (0x00007C00)
++#define PSB_2D_PAT_YSTART_SHIFT (10)
++#define PSB_2D_PAT_XSTART_MASK (0x000F8000)
++#define PSB_2D_PAT_XSTART_SHIFT (15)
++
++/*
++ * 2D Control block (PSB_2D_CTRL_BH)
++ */
++/* Present Flags */
++#define PSB_2D_SRCCK_CTRL (0x00000001)
++#define PSB_2D_DSTCK_CTRL (0x00000002)
++#define PSB_2D_ALPHA_CTRL (0x00000004)
++/* Colour Key Colour (SRC/DST)*/
++#define PSB_2D_CK_COL_MASK (0xFFFFFFFF)
++#define PSB_2D_CK_COL_CLRMASK (0x00000000)
++#define PSB_2D_CK_COL_SHIFT (0)
++/* Colour Key Mask (SRC/DST)*/
++#define PSB_2D_CK_MASK_MASK (0xFFFFFFFF)
++#define PSB_2D_CK_MASK_CLRMASK (0x00000000)
++#define PSB_2D_CK_MASK_SHIFT (0)
++/* Alpha Control (Alpha/RGB)*/
++#define PSB_2D_GBLALPHA_MASK (0x000FF000)
++#define PSB_2D_GBLALPHA_CLRMASK (0xFFF00FFF)
++#define PSB_2D_GBLALPHA_SHIFT (12)
++#define PSB_2D_SRCALPHA_OP_MASK (0x00700000)
++#define PSB_2D_SRCALPHA_OP_CLRMASK (0xFF8FFFFF)
++#define PSB_2D_SRCALPHA_OP_SHIFT (20)
++#define PSB_2D_SRCALPHA_OP_ONE (0x00000000)
++#define PSB_2D_SRCALPHA_OP_SRC (0x00100000)
++#define PSB_2D_SRCALPHA_OP_DST (0x00200000)
++#define PSB_2D_SRCALPHA_OP_SG (0x00300000)
++#define PSB_2D_SRCALPHA_OP_DG (0x00400000)
++#define PSB_2D_SRCALPHA_OP_GBL (0x00500000)
++#define PSB_2D_SRCALPHA_OP_ZERO (0x00600000)
++#define PSB_2D_SRCALPHA_INVERT (0x00800000)
++#define PSB_2D_SRCALPHA_INVERT_CLR (0xFF7FFFFF)
++#define PSB_2D_DSTALPHA_OP_MASK (0x07000000)
++#define PSB_2D_DSTALPHA_OP_CLRMASK (0xF8FFFFFF)
++#define PSB_2D_DSTALPHA_OP_SHIFT (24)
++#define PSB_2D_DSTALPHA_OP_ONE (0x00000000)
++#define PSB_2D_DSTALPHA_OP_SRC (0x01000000)
++#define PSB_2D_DSTALPHA_OP_DST (0x02000000)
++#define PSB_2D_DSTALPHA_OP_SG (0x03000000)
++#define PSB_2D_DSTALPHA_OP_DG (0x04000000)
++#define PSB_2D_DSTALPHA_OP_GBL (0x05000000)
++#define PSB_2D_DSTALPHA_OP_ZERO (0x06000000)
++#define PSB_2D_DSTALPHA_INVERT (0x08000000)
++#define PSB_2D_DSTALPHA_INVERT_CLR (0xF7FFFFFF)
++
++#define PSB_2D_PRE_MULTIPLICATION_ENABLE (0x10000000)
++#define PSB_2D_PRE_MULTIPLICATION_CLRMASK (0xEFFFFFFF)
++#define PSB_2D_ZERO_SOURCE_ALPHA_ENABLE (0x20000000)
++#define PSB_2D_ZERO_SOURCE_ALPHA_CLRMASK (0xDFFFFFFF)
++
++/*
++ *Source Offset (PSB_2D_SRC_OFF_BH)
++ */
++#define PSB_2D_SRCOFF_XSTART_MASK ((0x00000FFF) << 12)
++#define PSB_2D_SRCOFF_XSTART_SHIFT (12)
++#define PSB_2D_SRCOFF_YSTART_MASK (0x00000FFF)
++#define PSB_2D_SRCOFF_YSTART_SHIFT (0)
++
++/*
++ * Mask Offset (PSB_2D_MASK_OFF_BH)
++ */
++#define PSB_2D_MASKOFF_XSTART_MASK ((0x00000FFF) << 12)
++#define PSB_2D_MASKOFF_XSTART_SHIFT (12)
++#define PSB_2D_MASKOFF_YSTART_MASK (0x00000FFF)
++#define PSB_2D_MASKOFF_YSTART_SHIFT (0)
++
++/*
++ * 2D Fence (see PSB_2D_FENCE_BH): bits 0:27 are ignored
++ */
++
++/*
++ *Blit Rectangle (PSB_2D_BLIT_BH)
++ */
++
++#define PSB_2D_ROT_MASK (3<<25)
++#define PSB_2D_ROT_CLRMASK (~PSB_2D_ROT_MASK)
++#define PSB_2D_ROT_NONE (0<<25)
++#define PSB_2D_ROT_90DEGS (1<<25)
++#define PSB_2D_ROT_180DEGS (2<<25)
++#define PSB_2D_ROT_270DEGS (3<<25)
++
++#define PSB_2D_COPYORDER_MASK (3<<23)
++#define PSB_2D_COPYORDER_CLRMASK (~PSB_2D_COPYORDER_MASK)
++#define PSB_2D_COPYORDER_TL2BR (0<<23)
++#define PSB_2D_COPYORDER_BR2TL (1<<23)
++#define PSB_2D_COPYORDER_TR2BL (2<<23)
++#define PSB_2D_COPYORDER_BL2TR (3<<23)
++
++#define PSB_2D_DSTCK_CLRMASK (0xFF9FFFFF)
++#define PSB_2D_DSTCK_DISABLE (0x00000000)
++#define PSB_2D_DSTCK_PASS (0x00200000)
++#define PSB_2D_DSTCK_REJECT (0x00400000)
++
++#define PSB_2D_SRCCK_CLRMASK (0xFFE7FFFF)
++#define PSB_2D_SRCCK_DISABLE (0x00000000)
++#define PSB_2D_SRCCK_PASS (0x00080000)
++#define PSB_2D_SRCCK_REJECT (0x00100000)
++
++#define PSB_2D_CLIP_ENABLE (0x00040000)
++
++#define PSB_2D_ALPHA_ENABLE (0x00020000)
++
++#define PSB_2D_PAT_CLRMASK (0xFFFEFFFF)
++#define PSB_2D_PAT_MASK (0x00010000)
++#define PSB_2D_USE_PAT (0x00010000)
++#define PSB_2D_USE_FILL (0x00000000)
++/*
++ * Tungsten Graphics note on rop codes: If rop A and rop B are
++ * identical, the mask surface will not be read and need not be
++ * set up.
++ */
++
++#define PSB_2D_ROP3B_MASK (0x0000FF00)
++#define PSB_2D_ROP3B_CLRMASK (0xFFFF00FF)
++#define PSB_2D_ROP3B_SHIFT (8)
++/* rop code A */
++#define PSB_2D_ROP3A_MASK (0x000000FF)
++#define PSB_2D_ROP3A_CLRMASK (0xFFFFFF00)
++#define PSB_2D_ROP3A_SHIFT (0)
++
++#define PSB_2D_ROP4_MASK (0x0000FFFF)
++/*
++ * DWORD0: (Only pass if Pattern control == Use Fill Colour)
++ * Fill Colour RGBA8888
++ */
++#define PSB_2D_FILLCOLOUR_MASK (0xFFFFFFFF)
++#define PSB_2D_FILLCOLOUR_SHIFT (0)
++/*
++ * DWORD1: (Always Present)
++ * X Start (Dest)
++ * Y Start (Dest)
++ */
++#define PSB_2D_DST_XSTART_MASK (0x00FFF000)
++#define PSB_2D_DST_XSTART_CLRMASK (0xFF000FFF)
++#define PSB_2D_DST_XSTART_SHIFT (12)
++#define PSB_2D_DST_YSTART_MASK (0x00000FFF)
++#define PSB_2D_DST_YSTART_CLRMASK (0xFFFFF000)
++#define PSB_2D_DST_YSTART_SHIFT (0)
++/*
++ * DWORD2: (Always Present)
++ * X Size (Dest)
++ * Y Size (Dest)
++ */
++#define PSB_2D_DST_XSIZE_MASK (0x00FFF000)
++#define PSB_2D_DST_XSIZE_CLRMASK (0xFF000FFF)
++#define PSB_2D_DST_XSIZE_SHIFT (12)
++#define PSB_2D_DST_YSIZE_MASK (0x00000FFF)
++#define PSB_2D_DST_YSIZE_CLRMASK (0xFFFFF000)
++#define PSB_2D_DST_YSIZE_SHIFT (0)
++
++/*
++ * Source Surface (PSB_2D_SRC_SURF_BH)
++ */
++/*
++ * WORD 0
++ */
++
++#define PSB_2D_SRC_FORMAT_MASK (0x00078000)
++#define PSB_2D_SRC_1_PAL (0x00000000)
++#define PSB_2D_SRC_2_PAL (0x00008000)
++#define PSB_2D_SRC_4_PAL (0x00010000)
++#define PSB_2D_SRC_8_PAL (0x00018000)
++#define PSB_2D_SRC_8_ALPHA (0x00020000)
++#define PSB_2D_SRC_4_ALPHA (0x00028000)
++#define PSB_2D_SRC_332RGB (0x00030000)
++#define PSB_2D_SRC_4444ARGB (0x00038000)
++#define PSB_2D_SRC_555RGB (0x00040000)
++#define PSB_2D_SRC_1555ARGB (0x00048000)
++#define PSB_2D_SRC_565RGB (0x00050000)
++#define PSB_2D_SRC_0888ARGB (0x00058000)
++#define PSB_2D_SRC_8888ARGB (0x00060000)
++#define PSB_2D_SRC_8888UYVY (0x00068000)
++#define PSB_2D_SRC_RESERVED (0x00070000)
++#define PSB_2D_SRC_1555ARGB_LOOKUP (0x00078000)
++
++
++#define PSB_2D_SRC_STRIDE_MASK (0x00007FFF)
++#define PSB_2D_SRC_STRIDE_CLRMASK (0xFFFF8000)
++#define PSB_2D_SRC_STRIDE_SHIFT (0)
++/*
++ * WORD 1 - Base Address
++ */
++#define PSB_2D_SRC_ADDR_MASK (0x0FFFFFFC)
++#define PSB_2D_SRC_ADDR_CLRMASK (0x00000003)
++#define PSB_2D_SRC_ADDR_SHIFT (2)
++#define PSB_2D_SRC_ADDR_ALIGNSHIFT (2)
++
++/*
++ * Pattern Surface (PSB_2D_PAT_SURF_BH)
++ */
++/*
++ * WORD 0
++ */
++
++#define PSB_2D_PAT_FORMAT_MASK (0x00078000)
++#define PSB_2D_PAT_1_PAL (0x00000000)
++#define PSB_2D_PAT_2_PAL (0x00008000)
++#define PSB_2D_PAT_4_PAL (0x00010000)
++#define PSB_2D_PAT_8_PAL (0x00018000)
++#define PSB_2D_PAT_8_ALPHA (0x00020000)
++#define PSB_2D_PAT_4_ALPHA (0x00028000)
++#define PSB_2D_PAT_332RGB (0x00030000)
++#define PSB_2D_PAT_4444ARGB (0x00038000)
++#define PSB_2D_PAT_555RGB (0x00040000)
++#define PSB_2D_PAT_1555ARGB (0x00048000)
++#define PSB_2D_PAT_565RGB (0x00050000)
++#define PSB_2D_PAT_0888ARGB (0x00058000)
++#define PSB_2D_PAT_8888ARGB (0x00060000)
++
++#define PSB_2D_PAT_STRIDE_MASK (0x00007FFF)
++#define PSB_2D_PAT_STRIDE_CLRMASK (0xFFFF8000)
++#define PSB_2D_PAT_STRIDE_SHIFT (0)
++/*
++ * WORD 1 - Base Address
++ */
++#define PSB_2D_PAT_ADDR_MASK (0x0FFFFFFC)
++#define PSB_2D_PAT_ADDR_CLRMASK (0x00000003)
++#define PSB_2D_PAT_ADDR_SHIFT (2)
++#define PSB_2D_PAT_ADDR_ALIGNSHIFT (2)
++
++/*
++ * Destination Surface (PSB_2D_DST_SURF_BH)
++ */
++/*
++ * WORD 0
++ */
++
++#define PSB_2D_DST_FORMAT_MASK (0x00078000)
++#define PSB_2D_DST_332RGB (0x00030000)
++#define PSB_2D_DST_4444ARGB (0x00038000)
++#define PSB_2D_DST_555RGB (0x00040000)
++#define PSB_2D_DST_1555ARGB (0x00048000)
++#define PSB_2D_DST_565RGB (0x00050000)
++#define PSB_2D_DST_0888ARGB (0x00058000)
++#define PSB_2D_DST_8888ARGB (0x00060000)
++#define PSB_2D_DST_8888AYUV (0x00070000)
++
++#define PSB_2D_DST_STRIDE_MASK (0x00007FFF)
++#define PSB_2D_DST_STRIDE_CLRMASK (0xFFFF8000)
++#define PSB_2D_DST_STRIDE_SHIFT (0)
++/*
++ * WORD 1 - Base Address
++ */
++#define PSB_2D_DST_ADDR_MASK (0x0FFFFFFC)
++#define PSB_2D_DST_ADDR_CLRMASK (0x00000003)
++#define PSB_2D_DST_ADDR_SHIFT (2)
++#define PSB_2D_DST_ADDR_ALIGNSHIFT (2)
++
++/*
++ * Mask Surface (PSB_2D_MASK_SURF_BH)
++ */
++/*
++ * WORD 0
++ */
++#define PSB_2D_MASK_STRIDE_MASK (0x00007FFF)
++#define PSB_2D_MASK_STRIDE_CLRMASK (0xFFFF8000)
++#define PSB_2D_MASK_STRIDE_SHIFT (0)
++/*
++ * WORD 1 - Base Address
++ */
++#define PSB_2D_MASK_ADDR_MASK (0x0FFFFFFC)
++#define PSB_2D_MASK_ADDR_CLRMASK (0x00000003)
++#define PSB_2D_MASK_ADDR_SHIFT (2)
++#define PSB_2D_MASK_ADDR_ALIGNSHIFT (2)
++
++/*
++ * Source Palette (PSB_2D_SRC_PAL_BH)
++ */
++
++#define PSB_2D_SRCPAL_ADDR_SHIFT (0)
++#define PSB_2D_SRCPAL_ADDR_CLRMASK (0xF0000007)
++#define PSB_2D_SRCPAL_ADDR_MASK (0x0FFFFFF8)
++#define PSB_2D_SRCPAL_BYTEALIGN (1024)
++
++/*
++ * Pattern Palette (PSB_2D_PAT_PAL_BH)
++ */
++
++#define PSB_2D_PATPAL_ADDR_SHIFT (0)
++#define PSB_2D_PATPAL_ADDR_CLRMASK (0xF0000007)
++#define PSB_2D_PATPAL_ADDR_MASK (0x0FFFFFF8)
++#define PSB_2D_PATPAL_BYTEALIGN (1024)
++
++/*
++ * Rop3 Codes (2 LS bytes)
++ */
++
++#define PSB_2D_ROP3_SRCCOPY (0xCCCC)
++#define PSB_2D_ROP3_PATCOPY (0xF0F0)
++#define PSB_2D_ROP3_WHITENESS (0xFFFF)
++#define PSB_2D_ROP3_BLACKNESS (0x0000)
++#define PSB_2D_ROP3_SRC (0xCC)
++#define PSB_2D_ROP3_PAT (0xF0)
++#define PSB_2D_ROP3_DST (0xAA)
++
++
++/*
++ * Sizes.
++ */
++
++#define PSB_SCENE_HW_COOKIE_SIZE 16
++#define PSB_TA_MEM_HW_COOKIE_SIZE 16
++
++/*
++ * Scene stuff.
++ */
++
++#define PSB_NUM_HW_SCENES 2
++
++/*
++ * Scheduler completion actions.
++ */
++
++#define PSB_RASTER_BLOCK 0
++#define PSB_RASTER 1
++#define PSB_RETURN 2
++#define PSB_TA 3
++
++
++/*Power management*/
++#define PSB_PUNIT_PORT 0x04
++#define PSB_PWRGT_CNT 0x60
++#define PSB_PWRGT_STS 0x61
++#define PSB_PWRGT_GFX_MASK 0x3
++#define PSB_PWRGT_VID_ENC_MASK 0x30
++#define PSB_PWRGT_VID_DEC_MASK 0xc
++#endif
+diff -uNr a/drivers/gpu/drm/psb/psb_reset.c b/drivers/gpu/drm/psb/psb_reset.c
+--- a/drivers/gpu/drm/psb/psb_reset.c 1969-12-31 16:00:00.000000000 -0800
++++ b/drivers/gpu/drm/psb/psb_reset.c 2009-04-07 13:28:38.000000000 -0700
+@@ -0,0 +1,423 @@
++/**************************************************************************
++ * Copyright (c) 2007, Intel Corporation.
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
++ * develop this driver.
++ *
++ **************************************************************************/
++/*
++ * Authors:
++ * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ */
++
++#include <drm/drmP.h>
++#include "psb_drv.h"
++#include "psb_reg.h"
++#include "psb_scene.h"
++#include "psb_msvdx.h"
++#include "lnc_topaz.h"
++#include <linux/spinlock.h>
++#define PSB_2D_TIMEOUT_MSEC 100
++
++void psb_reset(struct drm_psb_private *dev_priv, int reset_2d)
++{
++ uint32_t val;
++
++ val = _PSB_CS_RESET_BIF_RESET |
++ _PSB_CS_RESET_DPM_RESET |
++ _PSB_CS_RESET_TA_RESET |
++ _PSB_CS_RESET_USE_RESET |
++ _PSB_CS_RESET_ISP_RESET | _PSB_CS_RESET_TSP_RESET;
++
++ if (reset_2d)
++ val |= _PSB_CS_RESET_TWOD_RESET;
++
++ PSB_WSGX32(val, PSB_CR_SOFT_RESET);
++ (void) PSB_RSGX32(PSB_CR_SOFT_RESET);
++
++ msleep(1);
++
++ PSB_WSGX32(0, PSB_CR_SOFT_RESET);
++ wmb();
++ PSB_WSGX32(PSB_RSGX32(PSB_CR_BIF_CTRL) | _PSB_CB_CTRL_CLEAR_FAULT,
++ PSB_CR_BIF_CTRL);
++ wmb();
++ (void) PSB_RSGX32(PSB_CR_BIF_CTRL);
++
++ msleep(1);
++ PSB_WSGX32(PSB_RSGX32(PSB_CR_BIF_CTRL) & ~_PSB_CB_CTRL_CLEAR_FAULT,
++ PSB_CR_BIF_CTRL);
++ (void) PSB_RSGX32(PSB_CR_BIF_CTRL);
++}
++
++void psb_print_pagefault(struct drm_psb_private *dev_priv)
++{
++ uint32_t val;
++ uint32_t addr;
++
++ val = PSB_RSGX32(PSB_CR_BIF_INT_STAT);
++ addr = PSB_RSGX32(PSB_CR_BIF_FAULT);
++
++ if (val) {
++ if (val & _PSB_CBI_STAT_PF_N_RW)
++ DRM_ERROR("Poulsbo MMU page fault:\n");
++ else
++ DRM_ERROR("Poulsbo MMU read / write "
++ "protection fault:\n");
++
++ if (val & _PSB_CBI_STAT_FAULT_CACHE)
++ DRM_ERROR("\tCache requestor.\n");
++ if (val & _PSB_CBI_STAT_FAULT_TA)
++ DRM_ERROR("\tTA requestor.\n");
++ if (val & _PSB_CBI_STAT_FAULT_VDM)
++ DRM_ERROR("\tVDM requestor.\n");
++ if (val & _PSB_CBI_STAT_FAULT_2D)
++ DRM_ERROR("\t2D requestor.\n");
++ if (val & _PSB_CBI_STAT_FAULT_PBE)
++ DRM_ERROR("\tPBE requestor.\n");
++ if (val & _PSB_CBI_STAT_FAULT_TSP)
++ DRM_ERROR("\tTSP requestor.\n");
++ if (val & _PSB_CBI_STAT_FAULT_ISP)
++ DRM_ERROR("\tISP requestor.\n");
++ if (val & _PSB_CBI_STAT_FAULT_USSEPDS)
++ DRM_ERROR("\tUSSEPDS requestor.\n");
++ if (val & _PSB_CBI_STAT_FAULT_HOST)
++ DRM_ERROR("\tHost requestor.\n");
++
++ DRM_ERROR("\tMMU failing address is 0x%08x.\n",
++ (unsigned) addr);
++ }
++}
++
++void psb_schedule_watchdog(struct drm_psb_private *dev_priv)
++{
++ struct timer_list *wt = &dev_priv->watchdog_timer;
++ unsigned long irq_flags;
++
++ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
++ if (dev_priv->timer_available && !timer_pending(wt)) {
++ wt->expires = jiffies + PSB_WATCHDOG_DELAY;
++ add_timer(wt);
++ }
++ spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
++}
++
++#if 0
++static void psb_seq_lockup_idle(struct drm_psb_private *dev_priv,
++ unsigned int engine, int *lockup,
++ int *idle)
++{
++ uint32_t received_seq;
++
++ received_seq = dev_priv->comm[engine << 4];
++ spin_lock(&dev_priv->sequence_lock);
++ *idle = (received_seq == dev_priv->sequence[engine]);
++ spin_unlock(&dev_priv->sequence_lock);
++
++ if (*idle) {
++ dev_priv->idle[engine] = 1;
++ *lockup = 0;
++ return;
++ }
++
++ if (dev_priv->idle[engine]) {
++ dev_priv->idle[engine] = 0;
++ dev_priv->last_sequence[engine] = received_seq;
++ *lockup = 0;
++ return;
++ }
++
++ *lockup = (dev_priv->last_sequence[engine] == received_seq);
++}
++
++#endif
++static void psb_watchdog_func(unsigned long data)
++{
++ struct drm_psb_private *dev_priv = (struct drm_psb_private *) data;
++ struct drm_device *dev = dev_priv->dev;
++ int lockup;
++ int msvdx_lockup;
++ int msvdx_idle;
++ int lockup_2d;
++#if 0
++ int topaz_lockup = 0;
++ int topaz_idle = 0;
++#endif
++ int idle_2d;
++ int idle;
++ unsigned long irq_flags;
++
++ psb_scheduler_lockup(dev_priv, &lockup, &idle);
++ psb_msvdx_lockup(dev_priv, &msvdx_lockup, &msvdx_idle);
++
++#if 0
++ if (IS_MRST(dev))
++ lnc_topaz_lockup(dev_priv, &topaz_lockup, &topaz_idle);
++#endif
++
++#if 0
++ psb_seq_lockup_idle(dev_priv, PSB_ENGINE_2D, &lockup_2d, &idle_2d);
++#else
++ lockup_2d = false;
++ idle_2d = true;
++#endif
++ if (lockup || msvdx_lockup || lockup_2d) {
++ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
++ dev_priv->timer_available = 0;
++ spin_unlock_irqrestore(&dev_priv->watchdog_lock,
++ irq_flags);
++ if (lockup) {
++ psb_print_pagefault(dev_priv);
++ schedule_work(&dev_priv->watchdog_wq);
++ }
++ if (msvdx_lockup)
++ schedule_work(&dev_priv->msvdx_watchdog_wq);
++#if 0
++ if (IS_MRST(dev) && (topaz_lockup))
++ schedule_work(&dev_priv->topaz_watchdog_wq);
++#else
++ (void) dev;
++#endif
++ }
++ if (!idle || !msvdx_idle || !idle_2d /* || !topaz_idle */)
++ psb_schedule_watchdog(dev_priv);
++}
++
++void psb_msvdx_flush_cmd_queue(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ struct psb_msvdx_cmd_queue *msvdx_cmd;
++ struct list_head *list, *next;
++ /*Flush the msvdx cmd queue and signal all fences in the queue */
++ list_for_each_safe(list, next, &dev_priv->msvdx_queue) {
++ msvdx_cmd =
++ list_entry(list, struct psb_msvdx_cmd_queue, head);
++ PSB_DEBUG_GENERAL("MSVDXQUE: flushing sequence:%d\n",
++ msvdx_cmd->sequence);
++ dev_priv->msvdx_current_sequence = msvdx_cmd->sequence;
++ psb_fence_error(dev, PSB_ENGINE_VIDEO,
++ dev_priv->msvdx_current_sequence,
++ _PSB_FENCE_TYPE_EXE, DRM_CMD_HANG);
++ list_del(list);
++ kfree(msvdx_cmd->cmd);
++ drm_free(msvdx_cmd, sizeof(struct psb_msvdx_cmd_queue),
++ DRM_MEM_DRIVER);
++ }
++}
++
++static void psb_msvdx_reset_wq(struct work_struct *work)
++{
++ struct drm_psb_private *dev_priv =
++ container_of(work, struct drm_psb_private, msvdx_watchdog_wq);
++
++ struct psb_scheduler *scheduler = &dev_priv->scheduler;
++ unsigned long irq_flags;
++
++ mutex_lock(&dev_priv->msvdx_mutex);
++ dev_priv->msvdx_needs_reset = 1;
++ dev_priv->msvdx_current_sequence++;
++ PSB_DEBUG_GENERAL
++ ("MSVDXFENCE: incremented msvdx_current_sequence to :%d\n",
++ dev_priv->msvdx_current_sequence);
++
++ psb_fence_error(scheduler->dev, PSB_ENGINE_VIDEO,
++ dev_priv->msvdx_current_sequence,
++ _PSB_FENCE_TYPE_EXE, DRM_CMD_HANG);
++
++ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
++ dev_priv->timer_available = 1;
++ spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
++
++ spin_lock_irqsave(&dev_priv->msvdx_lock, irq_flags);
++ psb_msvdx_flush_cmd_queue(scheduler->dev);
++ spin_unlock_irqrestore(&dev_priv->msvdx_lock, irq_flags);
++
++ psb_schedule_watchdog(dev_priv);
++ mutex_unlock(&dev_priv->msvdx_mutex);
++}
++
++static int psb_xhw_mmu_reset(struct drm_psb_private *dev_priv)
++{
++ struct psb_xhw_buf buf;
++ uint32_t bif_ctrl;
++
++ INIT_LIST_HEAD(&buf.head);
++ psb_mmu_set_pd_context(psb_mmu_get_default_pd(dev_priv->mmu), 0);
++ bif_ctrl = PSB_RSGX32(PSB_CR_BIF_CTRL);
++ PSB_WSGX32(bif_ctrl |
++ _PSB_CB_CTRL_CLEAR_FAULT |
++ _PSB_CB_CTRL_INVALDC, PSB_CR_BIF_CTRL);
++ (void) PSB_RSGX32(PSB_CR_BIF_CTRL);
++ msleep(1);
++ PSB_WSGX32(bif_ctrl, PSB_CR_BIF_CTRL);
++ (void) PSB_RSGX32(PSB_CR_BIF_CTRL);
++ return psb_xhw_reset_dpm(dev_priv, &buf);
++}
++
++/*
++ * Block command submission and reset hardware and schedulers.
++ */
++
++static void psb_reset_wq(struct work_struct *work)
++{
++ struct drm_psb_private *dev_priv =
++ container_of(work, struct drm_psb_private, watchdog_wq);
++ int lockup_2d;
++ int idle_2d;
++ unsigned long irq_flags;
++ int ret;
++ int reset_count = 0;
++ struct psb_xhw_buf buf;
++ uint32_t xhw_lockup;
++
++ /*
++ * Block command submission.
++ */
++ PSB_DEBUG_PM("ioctl: psb_pl_reference\n");
++
++ mutex_lock(&dev_priv->reset_mutex);
++
++ INIT_LIST_HEAD(&buf.head);
++ ret = psb_xhw_check_lockup(dev_priv, &buf, &xhw_lockup);
++ if (likely(ret == 0)) {
++ if (psb_extend_timeout(dev_priv, xhw_lockup) == 0) {
++ /*
++ * no lockup, just re-schedule
++ */
++ spin_lock_irqsave(&dev_priv->watchdog_lock,
++ irq_flags);
++ dev_priv->timer_available = 1;
++ spin_unlock_irqrestore(&dev_priv->watchdog_lock,
++ irq_flags);
++ psb_schedule_watchdog(dev_priv);
++ mutex_unlock(&dev_priv->reset_mutex);
++ return;
++ }
++ } else {
++ DRM_ERROR("Check lockup returned %d\n", ret);
++ }
++#if 0
++ msleep(PSB_2D_TIMEOUT_MSEC);
++
++ psb_seq_lockup_idle(dev_priv, PSB_ENGINE_2D, &lockup_2d, &idle_2d);
++
++ if (lockup_2d) {
++ uint32_t seq_2d;
++ spin_lock(&dev_priv->sequence_lock);
++ seq_2d = dev_priv->sequence[PSB_ENGINE_2D];
++ spin_unlock(&dev_priv->sequence_lock);
++ psb_fence_error(dev_priv->scheduler.dev,
++ PSB_ENGINE_2D,
++ seq_2d, DRM_FENCE_TYPE_EXE, -EBUSY);
++ DRM_INFO("Resetting 2D engine.\n");
++ }
++
++ psb_reset(dev_priv, lockup_2d);
++#else
++ (void) lockup_2d;
++ (void) idle_2d;
++ psb_reset(dev_priv, 0);
++#endif
++ (void) psb_xhw_mmu_reset(dev_priv);
++ DRM_INFO("Resetting scheduler.\n");
++ psb_scheduler_pause(dev_priv);
++ psb_scheduler_reset(dev_priv, -EBUSY);
++ psb_scheduler_ta_mem_check(dev_priv);
++
++ while (dev_priv->ta_mem &&
++ !dev_priv->force_ta_mem_load && ++reset_count < 10) {
++ struct ttm_fence_object *fence;
++
++ /*
++ * TA memory is currently fenced so offsets
++ * are valid. Reload offsets into the dpm now.
++ */
++
++ struct psb_xhw_buf buf;
++ INIT_LIST_HEAD(&buf.head);
++
++ msleep(100);
++
++ fence = dev_priv->ta_mem->ta_memory->sync_obj;
++
++ DRM_INFO("Reloading TA memory at offset "
++ "0x%08lx to 0x%08lx seq %d\n",
++ dev_priv->ta_mem->ta_memory->offset,
++ dev_priv->ta_mem->ta_memory->offset +
++ (dev_priv->ta_mem->ta_memory->num_pages << PAGE_SHIFT),
++ fence->sequence);
++
++ fence = dev_priv->ta_mem->hw_data->sync_obj;
++
++ DRM_INFO("Reloading TA HW memory at offset "
++ "0x%08lx to 0x%08lx seq %u\n",
++ dev_priv->ta_mem->hw_data->offset,
++ dev_priv->ta_mem->hw_data->offset +
++ (dev_priv->ta_mem->hw_data->num_pages << PAGE_SHIFT),
++ fence->sequence);
++
++ ret = psb_xhw_ta_mem_load(dev_priv, &buf,
++ PSB_TA_MEM_FLAG_TA |
++ PSB_TA_MEM_FLAG_RASTER |
++ PSB_TA_MEM_FLAG_HOSTA |
++ PSB_TA_MEM_FLAG_HOSTD |
++ PSB_TA_MEM_FLAG_INIT,
++ dev_priv->ta_mem->ta_memory->
++ offset,
++ dev_priv->ta_mem->hw_data->
++ offset,
++ dev_priv->ta_mem->hw_cookie);
++ if (!ret)
++ break;
++
++ DRM_INFO("Reloading TA memory failed. Retrying.\n");
++ psb_reset(dev_priv, 0);
++ (void) psb_xhw_mmu_reset(dev_priv);
++ }
++
++ psb_scheduler_restart(dev_priv);
++ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
++ dev_priv->timer_available = 1;
++ spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
++ mutex_unlock(&dev_priv->reset_mutex);
++}
++
++void psb_watchdog_init(struct drm_psb_private *dev_priv)
++{
++ struct timer_list *wt = &dev_priv->watchdog_timer;
++ unsigned long irq_flags;
++
++ spin_lock_init(&dev_priv->watchdog_lock);
++ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
++ init_timer(wt);
++ INIT_WORK(&dev_priv->watchdog_wq, &psb_reset_wq);
++ INIT_WORK(&dev_priv->msvdx_watchdog_wq, &psb_msvdx_reset_wq);
++ wt->data = (unsigned long) dev_priv;
++ wt->function = &psb_watchdog_func;
++ dev_priv->timer_available = 1;
++ spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
++}
++
++void psb_watchdog_takedown(struct drm_psb_private *dev_priv)
++{
++ unsigned long irq_flags;
++
++ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
++ dev_priv->timer_available = 0;
++ spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
++ (void) del_timer_sync(&dev_priv->watchdog_timer);
++}
+diff -uNr a/drivers/gpu/drm/psb/psb_scene.c b/drivers/gpu/drm/psb/psb_scene.c
+--- a/drivers/gpu/drm/psb/psb_scene.c 1969-12-31 16:00:00.000000000 -0800
++++ b/drivers/gpu/drm/psb/psb_scene.c 2009-04-07 13:28:38.000000000 -0700
+@@ -0,0 +1,523 @@
++/**************************************************************************
++ * Copyright (c) 2007, Intel Corporation.
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
++ * develop this driver.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
++ */
++
++#include <drm/drmP.h>
++#include "psb_drv.h"
++#include "psb_scene.h"
++
++void psb_clear_scene_atomic(struct psb_scene *scene)
++{
++ int i;
++ struct page *page;
++ void *v;
++
++ for (i = 0; i < scene->clear_num_pages; ++i) {
++ page = ttm_tt_get_page(scene->hw_data->ttm,
++ scene->clear_p_start + i);
++ if (in_irq())
++ v = kmap_atomic(page, KM_IRQ0);
++ else
++ v = kmap_atomic(page, KM_USER0);
++
++ memset(v, 0, PAGE_SIZE);
++
++ if (in_irq())
++ kunmap_atomic(v, KM_IRQ0);
++ else
++ kunmap_atomic(v, KM_USER0);
++ }
++}
++
++int psb_clear_scene(struct psb_scene *scene)
++{
++ struct ttm_bo_kmap_obj bmo;
++ bool is_iomem;
++ void *addr;
++
++ int ret = ttm_bo_kmap(scene->hw_data, scene->clear_p_start,
++ scene->clear_num_pages, &bmo);
++
++ PSB_DEBUG_RENDER("Scene clear.\n");
++ if (ret)
++ return ret;
++
++ addr = ttm_kmap_obj_virtual(&bmo, &is_iomem);
++ BUG_ON(is_iomem);
++ memset(addr, 0, scene->clear_num_pages << PAGE_SHIFT);
++ ttm_bo_kunmap(&bmo);
++
++ return 0;
++}
++
++static void psb_destroy_scene(struct kref *kref)
++{
++ struct psb_scene *scene =
++ container_of(kref, struct psb_scene, kref);
++
++ PSB_DEBUG_RENDER("Scene destroy.\n");
++ psb_scheduler_remove_scene_refs(scene);
++ ttm_bo_unref(&scene->hw_data);
++ drm_free(scene, sizeof(*scene), DRM_MEM_DRIVER);
++}
++
++void psb_scene_unref(struct psb_scene **p_scene)
++{
++ struct psb_scene *scene = *p_scene;
++
++ PSB_DEBUG_RENDER("Scene unref.\n");
++ *p_scene = NULL;
++ kref_put(&scene->kref, &psb_destroy_scene);
++}
++
++struct psb_scene *psb_scene_ref(struct psb_scene *src)
++{
++ PSB_DEBUG_RENDER("Scene ref.\n");
++ kref_get(&src->kref);
++ return src;
++}
++
++static struct psb_scene *psb_alloc_scene(struct drm_device *dev,
++ uint32_t w, uint32_t h)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++ struct ttm_bo_device *bdev = &dev_priv->bdev;
++ int ret = -EINVAL;
++ struct psb_scene *scene;
++ uint32_t bo_size;
++ struct psb_xhw_buf buf;
++
++ PSB_DEBUG_RENDER("Alloc scene w %u h %u msaa %u\n", w & 0xffff, h,
++ w >> 16);
++
++ scene = drm_calloc(1, sizeof(*scene), DRM_MEM_DRIVER);
++
++ if (!scene) {
++ DRM_ERROR("Out of memory allocating scene object.\n");
++ return NULL;
++ }
++
++ scene->dev = dev;
++ scene->w = w;
++ scene->h = h;
++ scene->hw_scene = NULL;
++ kref_init(&scene->kref);
++
++ INIT_LIST_HEAD(&buf.head);
++ ret = psb_xhw_scene_info(dev_priv, &buf, scene->w, scene->h,
++ scene->hw_cookie, &bo_size,
++ &scene->clear_p_start,
++ &scene->clear_num_pages);
++ if (ret)
++ goto out_err;
++
++ ret = ttm_buffer_object_create(bdev, bo_size, ttm_bo_type_kernel,
++ DRM_PSB_FLAG_MEM_MMU |
++ TTM_PL_FLAG_CACHED,
++ 0, 0, 1, NULL, &scene->hw_data);
++ if (ret)
++ goto out_err;
++
++ return scene;
++out_err:
++ drm_free(scene, sizeof(*scene), DRM_MEM_DRIVER);
++ return NULL;
++}
++
++int psb_validate_scene_pool(struct psb_context *context,
++ struct psb_scene_pool *pool,
++ uint32_t w,
++ uint32_t h,
++ int final_pass, struct psb_scene **scene_p)
++{
++ struct drm_device *dev = pool->dev;
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++ struct psb_scene *scene = pool->scenes[pool->cur_scene];
++ int ret;
++ unsigned long irq_flags;
++ struct psb_scheduler *scheduler = &dev_priv->scheduler;
++ uint32_t bin_pt_offset;
++ uint32_t bin_param_offset;
++
++ PSB_DEBUG_RENDER("Validate scene pool. Scene %u\n",
++ pool->cur_scene);
++
++ if (unlikely(!dev_priv->ta_mem)) {
++ dev_priv->ta_mem =
++ psb_alloc_ta_mem(dev, dev_priv->ta_mem_pages);
++ if (!dev_priv->ta_mem)
++ return -ENOMEM;
++
++ bin_pt_offset = ~0;
++ bin_param_offset = ~0;
++ } else {
++ bin_pt_offset = dev_priv->ta_mem->hw_data->offset;
++ bin_param_offset = dev_priv->ta_mem->ta_memory->offset;
++ }
++
++ pool->w = w;
++ pool->h = h;
++ if (scene && (scene->w != pool->w || scene->h != pool->h)) {
++ spin_lock_irqsave(&scheduler->lock, irq_flags);
++ if (scene->flags & PSB_SCENE_FLAG_DIRTY) {
++ spin_unlock_irqrestore(&scheduler->lock,
++ irq_flags);
++ DRM_ERROR("Trying to resize a dirty scene.\n");
++ return -EINVAL;
++ }
++ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
++ psb_scene_unref(&pool->scenes[pool->cur_scene]);
++ scene = NULL;
++ }
++
++ if (!scene) {
++ pool->scenes[pool->cur_scene] = scene =
++ psb_alloc_scene(pool->dev, pool->w, pool->h);
++
++ if (!scene)
++ return -ENOMEM;
++
++ scene->flags = PSB_SCENE_FLAG_CLEARED;
++ }
++
++ ret = psb_validate_kernel_buffer(context, scene->hw_data,
++ PSB_ENGINE_TA,
++ PSB_BO_FLAG_SCENE |
++ PSB_GPU_ACCESS_READ |
++ PSB_GPU_ACCESS_WRITE, 0);
++ if (unlikely(ret != 0))
++ return ret;
++
++ /*
++ * FIXME: We need atomic bit manipulation here for the
++ * scheduler. For now use the spinlock.
++ */
++
++ spin_lock_irqsave(&scheduler->lock, irq_flags);
++ if (!(scene->flags & PSB_SCENE_FLAG_CLEARED)) {
++ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
++ PSB_DEBUG_RENDER("Waiting to clear scene memory.\n");
++ mutex_lock(&scene->hw_data->mutex);
++
++ ret = ttm_bo_wait(scene->hw_data, 0, 1, 0);
++ mutex_unlock(&scene->hw_data->mutex);
++ if (ret)
++ return ret;
++
++ ret = psb_clear_scene(scene);
++
++ if (ret)
++ return ret;
++ spin_lock_irqsave(&scheduler->lock, irq_flags);
++ scene->flags |= PSB_SCENE_FLAG_CLEARED;
++ }
++ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
++
++ ret = psb_validate_kernel_buffer(context, dev_priv->ta_mem->hw_data,
++ PSB_ENGINE_TA,
++ PSB_BO_FLAG_SCENE |
++ PSB_GPU_ACCESS_READ |
++ PSB_GPU_ACCESS_WRITE, 0);
++ if (unlikely(ret != 0))
++ return ret;
++
++ ret =
++ psb_validate_kernel_buffer(context,
++ dev_priv->ta_mem->ta_memory,
++ PSB_ENGINE_TA,
++ PSB_BO_FLAG_SCENE |
++ PSB_GPU_ACCESS_READ |
++ PSB_GPU_ACCESS_WRITE, 0);
++
++ if (unlikely(ret != 0))
++ return ret;
++
++ if (unlikely(bin_param_offset !=
++ dev_priv->ta_mem->ta_memory->offset ||
++ bin_pt_offset !=
++ dev_priv->ta_mem->hw_data->offset ||
++ dev_priv->force_ta_mem_load)) {
++
++ struct psb_xhw_buf buf;
++
++ INIT_LIST_HEAD(&buf.head);
++ ret = psb_xhw_ta_mem_load(dev_priv, &buf,
++ PSB_TA_MEM_FLAG_TA |
++ PSB_TA_MEM_FLAG_RASTER |
++ PSB_TA_MEM_FLAG_HOSTA |
++ PSB_TA_MEM_FLAG_HOSTD |
++ PSB_TA_MEM_FLAG_INIT,
++ dev_priv->ta_mem->ta_memory->
++ offset,
++ dev_priv->ta_mem->hw_data->
++ offset,
++ dev_priv->ta_mem->hw_cookie);
++ if (ret)
++ return ret;
++
++ dev_priv->force_ta_mem_load = 0;
++ }
++
++ if (final_pass) {
++
++ /*
++ * Clear the scene on next use. Advance the scene counter.
++ */
++
++ spin_lock_irqsave(&scheduler->lock, irq_flags);
++ scene->flags &= ~PSB_SCENE_FLAG_CLEARED;
++ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
++ pool->cur_scene = (pool->cur_scene + 1) % pool->num_scenes;
++ }
++
++ *scene_p = psb_scene_ref(scene);
++ return 0;
++}
++
++static void psb_scene_pool_destroy(struct kref *kref)
++{
++ struct psb_scene_pool *pool =
++ container_of(kref, struct psb_scene_pool, kref);
++ int i;
++ PSB_DEBUG_RENDER("Scene pool destroy.\n");
++
++ for (i = 0; i < pool->num_scenes; ++i) {
++ PSB_DEBUG_RENDER("scenes %d is 0x%08lx\n", i,
++ (unsigned long) pool->scenes[i]);
++ if (pool->scenes[i])
++ psb_scene_unref(&pool->scenes[i]);
++ }
++
++ drm_free(pool, sizeof(*pool), DRM_MEM_DRIVER);
++}
++
++void psb_scene_pool_unref(struct psb_scene_pool **p_pool)
++{
++ struct psb_scene_pool *pool = *p_pool;
++
++ PSB_DEBUG_RENDER("Scene pool unref\n");
++ *p_pool = NULL;
++ kref_put(&pool->kref, &psb_scene_pool_destroy);
++}
++
++struct psb_scene_pool *psb_scene_pool_ref(struct psb_scene_pool *src)
++{
++ kref_get(&src->kref);
++ return src;
++}
++
++/*
++ * Callback for base object manager.
++ */
++
++static void psb_scene_pool_release(struct ttm_base_object **p_base)
++{
++ struct ttm_base_object *base = *p_base;
++ struct psb_scene_pool *pool =
++ container_of(base, struct psb_scene_pool, base);
++ *p_base = NULL;
++
++ psb_scene_pool_unref(&pool);
++}
++
++struct psb_scene_pool *psb_scene_pool_lookup(struct drm_file *file_priv,
++ uint32_t handle,
++ int check_owner)
++{
++ struct ttm_object_file *tfile = psb_fpriv(file_priv)->tfile;
++ struct ttm_base_object *base;
++ struct psb_scene_pool *pool;
++
++
++ base = ttm_base_object_lookup(tfile, handle);
++ if (!base || (base->object_type != PSB_USER_OBJECT_SCENE_POOL)) {
++ DRM_ERROR("Could not find scene pool object 0x%08x\n",
++ handle);
++ return NULL;
++ }
++
++ if (check_owner && tfile != base->tfile && !base->shareable) {
++ ttm_base_object_unref(&base);
++ return NULL;
++ }
++
++ pool = container_of(base, struct psb_scene_pool, base);
++ kref_get(&pool->kref);
++ ttm_base_object_unref(&base);
++ return pool;
++}
++
++struct psb_scene_pool *psb_scene_pool_alloc(struct drm_file *file_priv,
++ int shareable,
++ uint32_t num_scenes,
++ uint32_t w, uint32_t h)
++{
++ struct ttm_object_file *tfile = psb_fpriv(file_priv)->tfile;
++ struct drm_device *dev = file_priv->minor->dev;
++ struct psb_scene_pool *pool;
++ int ret;
++
++ PSB_DEBUG_RENDER("Scene pool alloc\n");
++ pool = drm_calloc(1, sizeof(*pool), DRM_MEM_DRIVER);
++ if (!pool) {
++ DRM_ERROR("Out of memory allocating scene pool object.\n");
++ return NULL;
++ }
++ pool->w = w;
++ pool->h = h;
++ pool->dev = dev;
++ pool->num_scenes = num_scenes;
++ kref_init(&pool->kref);
++
++ /*
++ * The base object holds a reference.
++ */
++
++ kref_get(&pool->kref);
++ ret = ttm_base_object_init(tfile, &pool->base, shareable,
++ PSB_USER_OBJECT_SCENE_POOL,
++ &psb_scene_pool_release, NULL);
++ if (unlikely(ret != 0))
++ goto out_err;
++
++ return pool;
++out_err:
++ drm_free(pool, sizeof(*pool), DRM_MEM_DRIVER);
++ return NULL;
++}
++
++/*
++ * Code to support multiple ta memory buffers.
++ */
++
++static void psb_ta_mem_destroy(struct kref *kref)
++{
++ struct psb_ta_mem *ta_mem =
++ container_of(kref, struct psb_ta_mem, kref);
++
++ ttm_bo_unref(&ta_mem->hw_data);
++ ttm_bo_unref(&ta_mem->ta_memory);
++ drm_free(ta_mem, sizeof(*ta_mem), DRM_MEM_DRIVER);
++}
++
++void psb_ta_mem_unref(struct psb_ta_mem **p_ta_mem)
++{
++ struct psb_ta_mem *ta_mem = *p_ta_mem;
++ *p_ta_mem = NULL;
++ kref_put(&ta_mem->kref, psb_ta_mem_destroy);
++}
++
++struct psb_ta_mem *psb_ta_mem_ref(struct psb_ta_mem *src)
++{
++ kref_get(&src->kref);
++ return src;
++}
++
++struct psb_ta_mem *psb_alloc_ta_mem(struct drm_device *dev, uint32_t pages)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++ struct ttm_bo_device *bdev = &dev_priv->bdev;
++ int ret = -EINVAL;
++ struct psb_ta_mem *ta_mem;
++ uint32_t bo_size;
++ uint32_t ta_min_size;
++ struct psb_xhw_buf buf;
++
++ INIT_LIST_HEAD(&buf.head);
++
++ ta_mem = drm_calloc(1, sizeof(*ta_mem), DRM_MEM_DRIVER);
++
++ if (!ta_mem) {
++ DRM_ERROR("Out of memory allocating parameter memory.\n");
++ return NULL;
++ }
++
++ kref_init(&ta_mem->kref);
++ ret = psb_xhw_ta_mem_info(dev_priv, &buf, pages,
++ ta_mem->hw_cookie,
++ &bo_size,
++ &ta_min_size);
++ if (ret == -ENOMEM) {
++ DRM_ERROR("Parameter memory size is too small.\n");
++ DRM_INFO("Attempted to use %u kiB of parameter memory.\n",
++ (unsigned int) (pages * (PAGE_SIZE / 1024)));
++ DRM_INFO("The Xpsb driver thinks this is too small and\n");
++ DRM_INFO("suggests %u kiB. Check the psb DRM\n",
++ (unsigned int)(ta_min_size / 1024));
++ DRM_INFO("\"ta_mem_size\" parameter!\n");
++ }
++ if (ret)
++ goto out_err0;
++
++ ret = ttm_buffer_object_create(bdev, bo_size, ttm_bo_type_kernel,
++ DRM_PSB_FLAG_MEM_MMU,
++ 0, 0, 0, NULL,
++ &ta_mem->hw_data);
++ if (ret)
++ goto out_err0;
++
++ bo_size = pages * PAGE_SIZE;
++ ret =
++ ttm_buffer_object_create(bdev, bo_size,
++ ttm_bo_type_kernel,
++ DRM_PSB_FLAG_MEM_RASTGEOM,
++ 0,
++ 1024 * 1024 >> PAGE_SHIFT, 0,
++ NULL,
++ &ta_mem->ta_memory);
++ if (ret)
++ goto out_err1;
++
++ return ta_mem;
++out_err1:
++ ttm_bo_unref(&ta_mem->hw_data);
++out_err0:
++ drm_free(ta_mem, sizeof(*ta_mem), DRM_MEM_DRIVER);
++ return NULL;
++}
++
++int drm_psb_scene_unref_ioctl(struct drm_device *dev,
++ void *data, struct drm_file *file_priv)
++{
++ struct ttm_object_file *tfile = psb_fpriv(file_priv)->tfile;
++ struct drm_psb_scene *scene = (struct drm_psb_scene *) data;
++ int ret = 0;
++ struct drm_psb_private *dev_priv = psb_priv(dev);
++ if (!scene->handle_valid)
++ return 0;
++ down_read(&dev_priv->sgx_sem);
++ psb_check_power_state(dev, PSB_DEVICE_SGX);
++
++ ret =
++ ttm_ref_object_base_unref(tfile, scene->handle, TTM_REF_USAGE);
++ if (unlikely(ret != 0))
++ DRM_ERROR("Could not unreference a scene object.\n");
++ up_read(&dev_priv->sgx_sem);
++ if (drm_psb_ospm && IS_MRST(dev))
++ schedule_delayed_work(&dev_priv->scheduler.wq, 1);
++ return ret;
++}
+diff -uNr a/drivers/gpu/drm/psb/psb_scene.h b/drivers/gpu/drm/psb/psb_scene.h
+--- a/drivers/gpu/drm/psb/psb_scene.h 1969-12-31 16:00:00.000000000 -0800
++++ b/drivers/gpu/drm/psb/psb_scene.h 2009-04-07 13:28:38.000000000 -0700
+@@ -0,0 +1,119 @@
++/**************************************************************************
++ * Copyright (c) 2007, Intel Corporation.
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
++ * develop this driver.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
++ */
++
++#ifndef _PSB_SCENE_H_
++#define _PSB_SCENE_H_
++
++#include "ttm/ttm_object.h"
++
++#define PSB_USER_OBJECT_SCENE_POOL ttm_driver_type0
++#define PSB_USER_OBJECT_TA_MEM ttm_driver_type1
++#define PSB_MAX_NUM_SCENES 8
++
++struct psb_hw_scene;
++struct psb_hw_ta_mem;
++
++struct psb_scene_pool {
++ struct ttm_base_object base;
++ struct drm_device *dev;
++ struct kref kref;
++ uint32_t w;
++ uint32_t h;
++ uint32_t cur_scene;
++ struct psb_scene *scenes[PSB_MAX_NUM_SCENES];
++ uint32_t num_scenes;
++};
++
++struct psb_scene {
++ struct drm_device *dev;
++ struct kref kref;
++ uint32_t hw_cookie[PSB_SCENE_HW_COOKIE_SIZE];
++ uint32_t bo_size;
++ uint32_t w;
++ uint32_t h;
++ struct psb_ta_mem *ta_mem;
++ struct psb_hw_scene *hw_scene;
++ struct ttm_buffer_object *hw_data;
++ uint32_t flags;
++ uint32_t clear_p_start;
++ uint32_t clear_num_pages;
++};
++
++#if 0
++struct psb_scene_entry {
++ struct list_head head;
++ struct psb_scene *scene;
++};
++
++struct psb_user_scene {
++ struct ttm_base_object base;
++ struct drm_device *dev;
++};
++
++#endif
++
++struct psb_ta_mem {
++ struct ttm_base_object base;
++ struct drm_device *dev;
++ struct kref kref;
++ uint32_t hw_cookie[PSB_TA_MEM_HW_COOKIE_SIZE];
++ uint32_t bo_size;
++ struct ttm_buffer_object *ta_memory;
++ struct ttm_buffer_object *hw_data;
++ int is_deallocating;
++ int deallocating_scheduled;
++};
++
++extern struct psb_scene_pool *psb_scene_pool_alloc(struct drm_file *priv,
++ int shareable,
++ uint32_t num_scenes,
++ uint32_t w, uint32_t h);
++extern void psb_scene_pool_unref(struct psb_scene_pool **pool);
++extern struct psb_scene_pool *psb_scene_pool_lookup(struct drm_file
++ *priv,
++ uint32_t handle,
++ int check_owner);
++extern int psb_validate_scene_pool(struct psb_context *context,
++ struct psb_scene_pool *pool,
++ uint32_t w,
++ uint32_t h, int final_pass,
++ struct psb_scene **scene_p);
++extern void psb_scene_unref(struct psb_scene **scene);
++extern struct psb_scene *psb_scene_ref(struct psb_scene *src);
++extern int drm_psb_scene_unref_ioctl(struct drm_device *dev,
++ void *data,
++ struct drm_file *file_priv);
++
++static inline uint32_t psb_scene_pool_handle(struct psb_scene_pool *pool)
++{
++ return pool->base.hash.key;
++}
++
++extern struct psb_ta_mem *psb_alloc_ta_mem(struct drm_device *dev,
++ uint32_t pages);
++extern struct psb_ta_mem *psb_ta_mem_ref(struct psb_ta_mem *src);
++extern void psb_ta_mem_unref(struct psb_ta_mem **ta_mem);
++
++#endif
+diff -uNr a/drivers/gpu/drm/psb/psb_schedule.c b/drivers/gpu/drm/psb/psb_schedule.c
+--- a/drivers/gpu/drm/psb/psb_schedule.c 1969-12-31 16:00:00.000000000 -0800
++++ b/drivers/gpu/drm/psb/psb_schedule.c 2009-04-07 13:28:38.000000000 -0700
+@@ -0,0 +1,1539 @@
++/**************************************************************************
++ * Copyright (c) 2007, Intel Corporation.
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
++ * develop this driver.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
++ */
++
++#include <drm/drmP.h>
++#include "psb_drm.h"
++#include "psb_drv.h"
++#include "psb_reg.h"
++#include "psb_scene.h"
++#include "ttm/ttm_execbuf_util.h"
++
++#define PSB_ALLOWED_RASTER_RUNTIME (DRM_HZ * 30)
++#define PSB_ALLOWED_TA_RUNTIME (DRM_HZ * 30)
++#define PSB_RASTER_TIMEOUT (DRM_HZ / 10)
++#define PSB_TA_TIMEOUT (DRM_HZ / 10)
++
++#undef PSB_SOFTWARE_WORKAHEAD
++
++#ifdef PSB_STABLE_SETTING
++
++/*
++ * Software blocks completely while the engines are working so there can be no
++ * overlap.
++ */
++
++#define PSB_WAIT_FOR_RASTER_COMPLETION
++#define PSB_WAIT_FOR_TA_COMPLETION
++
++#elif defined(PSB_PARANOID_SETTING)
++/*
++ * Software blocks "almost" while the engines are working so there can be no
++ * overlap.
++ */
++
++#define PSB_WAIT_FOR_RASTER_COMPLETION
++#define PSB_WAIT_FOR_TA_COMPLETION
++#define PSB_BE_PARANOID
++
++#elif defined(PSB_SOME_OVERLAP_BUT_LOCKUP)
++/*
++ * Software leaps ahead while the rasterizer is running and prepares
++ * a new ta job that can be scheduled before the rasterizer has
++ * finished.
++ */
++
++#define PSB_WAIT_FOR_TA_COMPLETION
++
++#elif defined(PSB_SOFTWARE_WORKAHEAD)
++/*
++ * Don't sync, but allow software to work ahead. and queue a number of jobs.
++ * But block overlapping in the scheduler.
++ */
++
++#define PSB_BLOCK_OVERLAP
++#define ONLY_ONE_JOB_IN_RASTER_QUEUE
++
++#endif
++
++/*
++ * Avoid pixelbe pagefaults on C0.
++ */
++#if 0
++#define PSB_BLOCK_OVERLAP
++#endif
++
++static void psb_dispatch_ta(struct drm_psb_private *dev_priv,
++ struct psb_scheduler *scheduler,
++ uint32_t reply_flag);
++static void psb_dispatch_raster(struct drm_psb_private *dev_priv,
++ struct psb_scheduler *scheduler,
++ uint32_t reply_flag);
++
++#ifdef FIX_TG_16
++
++void psb_2d_atomic_unlock(struct drm_psb_private *dev_priv);
++static int psb_check_2d_idle(struct drm_psb_private *dev_priv);
++
++#endif
++
++void psb_scheduler_lockup(struct drm_psb_private *dev_priv,
++ int *lockup, int *idle)
++{
++ unsigned long irq_flags;
++ struct psb_scheduler *scheduler = &dev_priv->scheduler;
++
++ *lockup = 0;
++ *idle = 1;
++
++ spin_lock_irqsave(&scheduler->lock, irq_flags);
++
++ if (scheduler->current_task[PSB_SCENE_ENGINE_TA] != NULL &&
++ time_after_eq(jiffies, scheduler->ta_end_jiffies)) {
++ *lockup = 1;
++ }
++ if (!*lockup
++ && (scheduler->current_task[PSB_SCENE_ENGINE_RASTER] != NULL)
++ && time_after_eq(jiffies, scheduler->raster_end_jiffies)) {
++ *lockup = 1;
++ }
++ if (!*lockup)
++ *idle = scheduler->idle;
++
++ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
++}
++
++static inline void psb_set_idle(struct psb_scheduler *scheduler)
++{
++ scheduler->idle =
++ (scheduler->current_task[PSB_SCENE_ENGINE_RASTER] == NULL) &&
++ (scheduler->current_task[PSB_SCENE_ENGINE_TA] == NULL);
++ if (scheduler->idle)
++ wake_up(&scheduler->idle_queue);
++}
++
++/*
++ * Call with the scheduler spinlock held.
++ * Assigns a scene context to either the ta or the rasterizer,
++ * flushing out other scenes to memory if necessary.
++ */
++
++static int psb_set_scene_fire(struct psb_scheduler *scheduler,
++ struct psb_scene *scene,
++ int engine, struct psb_task *task)
++{
++ uint32_t flags = 0;
++ struct psb_hw_scene *hw_scene;
++ struct drm_device *dev = scene->dev;
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++
++ hw_scene = scene->hw_scene;
++ if (hw_scene && hw_scene->last_scene == scene) {
++
++ /*
++ * Reuse the last hw scene context and delete it from the
++ * free list.
++ */
++
++ PSB_DEBUG_RENDER("Reusing hw scene %d.\n",
++ hw_scene->context_number);
++ if (scene->flags & PSB_SCENE_FLAG_DIRTY) {
++
++ /*
++ * No hw context initialization to be done.
++ */
++
++ flags |= PSB_SCENE_FLAG_SETUP_ONLY;
++ }
++
++ list_del_init(&hw_scene->head);
++
++ } else {
++ struct list_head *list;
++ hw_scene = NULL;
++
++ /*
++ * Grab a new hw scene context.
++ */
++
++ list_for_each(list, &scheduler->hw_scenes) {
++ hw_scene =
++ list_entry(list, struct psb_hw_scene, head);
++ break;
++ }
++ BUG_ON(!hw_scene);
++ PSB_DEBUG_RENDER("New hw scene %d.\n",
++ hw_scene->context_number);
++
++ list_del_init(list);
++ }
++ scene->hw_scene = hw_scene;
++ hw_scene->last_scene = scene;
++
++ flags |= PSB_SCENE_FLAG_SETUP;
++
++ /*
++ * Switch context and setup the engine.
++ */
++
++ return psb_xhw_scene_bind_fire(dev_priv,
++ &task->buf,
++ task->flags,
++ hw_scene->context_number,
++ scene->hw_cookie,
++ task->oom_cmds,
++ task->oom_cmd_size,
++ scene->hw_data->offset,
++ engine, flags | scene->flags);
++}
++
++static inline void psb_report_fence(struct drm_psb_private *dev_priv,
++ struct psb_scheduler *scheduler,
++ uint32_t class,
++ uint32_t sequence,
++ uint32_t type, int call_handler)
++{
++ struct psb_scheduler_seq *seq = &scheduler->seq[type];
++ struct ttm_fence_device *fdev = &dev_priv->fdev;
++ struct ttm_fence_class_manager *fc = &fdev->fence_class[PSB_ENGINE_TA];
++ unsigned long irq_flags;
++
++ /**
++ * Block racing poll_ta calls, that take the lock in write mode.
++ */
++
++ read_lock_irqsave(&fc->lock, irq_flags);
++ seq->sequence = sequence;
++ seq->reported = 0;
++ read_unlock_irqrestore(&fc->lock, irq_flags);
++
++ if (call_handler)
++ psb_fence_handler(scheduler->dev, class);
++}
++
++static void psb_schedule_raster(struct drm_psb_private *dev_priv,
++ struct psb_scheduler *scheduler);
++
++static void psb_schedule_ta(struct drm_psb_private *dev_priv,
++ struct psb_scheduler *scheduler)
++{
++ struct psb_task *task = NULL;
++ struct list_head *list, *next;
++ int pushed_raster_task = 0;
++
++ PSB_DEBUG_RENDER("schedule ta\n");
++
++ if (scheduler->idle_count != 0)
++ return;
++
++ if (scheduler->current_task[PSB_SCENE_ENGINE_TA] != NULL)
++ return;
++
++ if (scheduler->ta_state)
++ return;
++
++ /*
++ * Skip the ta stage for rasterization-only
++ * tasks. They arrive here to make sure we're rasterizing
++ * tasks in the correct order.
++ */
++
++ list_for_each_safe(list, next, &scheduler->ta_queue) {
++ task = list_entry(list, struct psb_task, head);
++ if (task->task_type != psb_raster_task)
++ break;
++
++ list_del_init(list);
++ list_add_tail(list, &scheduler->raster_queue);
++ psb_report_fence(dev_priv, scheduler, task->engine,
++ task->sequence,
++ _PSB_FENCE_TA_DONE_SHIFT, 1);
++ task = NULL;
++ pushed_raster_task = 1;
++ }
++
++ if (pushed_raster_task)
++ psb_schedule_raster(dev_priv, scheduler);
++
++ if (!task)
++ return;
++
++ /*
++ * Still waiting for a vistest?
++ */
++
++ if (scheduler->feedback_task == task)
++ return;
++
++#ifdef ONLY_ONE_JOB_IN_RASTER_QUEUE
++
++ /*
++ * Block ta from trying to use both hardware contexts
++ * without the rasterizer starting to render from one of them.
++ */
++
++ if (!list_empty(&scheduler->raster_queue))
++ return;
++
++#endif
++
++#ifdef PSB_BLOCK_OVERLAP
++ /*
++ * Make sure rasterizer isn't doing anything.
++ */
++ if (scheduler->current_task[PSB_SCENE_ENGINE_RASTER] != NULL)
++ return;
++#endif
++ if (list_empty(&scheduler->hw_scenes))
++ return;
++
++#ifdef FIX_TG_16
++ if (psb_check_2d_idle(dev_priv))
++ return;
++#endif
++
++ list_del_init(&task->head);
++ if (task->flags & PSB_FIRE_FLAG_XHW_OOM)
++ scheduler->ta_state = 1;
++
++ scheduler->current_task[PSB_SCENE_ENGINE_TA] = task;
++ scheduler->idle = 0;
++ scheduler->ta_end_jiffies = jiffies + PSB_TA_TIMEOUT;
++ scheduler->total_ta_jiffies = 0;
++
++ task->reply_flags = (task->flags & PSB_FIRE_FLAG_XHW_OOM) ?
++ 0x00000000 : PSB_RF_FIRE_TA;
++
++ (void) psb_reg_submit(dev_priv, task->ta_cmds, task->ta_cmd_size);
++ psb_set_scene_fire(scheduler, task->scene, PSB_SCENE_ENGINE_TA,
++ task);
++ psb_schedule_watchdog(dev_priv);
++}
++
++static int psb_fire_raster(struct psb_scheduler *scheduler,
++ struct psb_task *task)
++{
++ struct drm_device *dev = scheduler->dev;
++ struct drm_psb_private *dev_priv = (struct drm_psb_private *)
++ dev->dev_private;
++
++ PSB_DEBUG_RENDER("Fire raster %d\n", task->sequence);
++
++ return psb_xhw_fire_raster(dev_priv, &task->buf, task->flags);
++}
++
++/*
++ * Take the first rasterization task from the hp raster queue or from the
++ * raster queue and fire the rasterizer.
++ */
++
++static void psb_schedule_raster(struct drm_psb_private *dev_priv,
++ struct psb_scheduler *scheduler)
++{
++ struct psb_task *task;
++ struct list_head *list;
++
++ if (scheduler->idle_count != 0)
++ return;
++
++ if (scheduler->current_task[PSB_SCENE_ENGINE_RASTER] != NULL) {
++ PSB_DEBUG_RENDER("Raster busy.\n");
++ return;
++ }
++#ifdef PSB_BLOCK_OVERLAP
++ if (scheduler->current_task[PSB_SCENE_ENGINE_TA] != NULL) {
++ PSB_DEBUG_RENDER("TA busy.\n");
++ return;
++ }
++#endif
++
++ if (!list_empty(&scheduler->hp_raster_queue))
++ list = scheduler->hp_raster_queue.next;
++ else if (!list_empty(&scheduler->raster_queue))
++ list = scheduler->raster_queue.next;
++ else {
++ PSB_DEBUG_RENDER("Nothing in list\n");
++ return;
++ }
++
++ task = list_entry(list, struct psb_task, head);
++
++ /*
++ * Sometimes changing ZLS format requires an ISP reset.
++ * Doesn't seem to consume too much time.
++ */
++
++ if (task->scene)
++ PSB_WSGX32(_PSB_CS_RESET_ISP_RESET, PSB_CR_SOFT_RESET);
++
++ scheduler->current_task[PSB_SCENE_ENGINE_RASTER] = task;
++
++ list_del_init(list);
++ scheduler->idle = 0;
++ scheduler->raster_end_jiffies = jiffies + PSB_RASTER_TIMEOUT;
++ scheduler->total_raster_jiffies = 0;
++
++ if (task->scene)
++ PSB_WSGX32(0, PSB_CR_SOFT_RESET);
++
++ (void) psb_reg_submit(dev_priv, task->raster_cmds,
++ task->raster_cmd_size);
++
++ if (task->scene) {
++ task->reply_flags = (task->flags & PSB_FIRE_FLAG_XHW_OOM) ?
++ 0x00000000 : PSB_RF_FIRE_RASTER;
++ psb_set_scene_fire(scheduler,
++ task->scene, PSB_SCENE_ENGINE_RASTER,
++ task);
++ } else {
++ task->reply_flags = PSB_RF_DEALLOC | PSB_RF_FIRE_RASTER;
++ psb_fire_raster(scheduler, task);
++ }
++ psb_schedule_watchdog(dev_priv);
++}
++
++int psb_extend_timeout(struct drm_psb_private *dev_priv,
++ uint32_t xhw_lockup)
++{
++ struct psb_scheduler *scheduler = &dev_priv->scheduler;
++ unsigned long irq_flags;
++ int ret = -EBUSY;
++
++ spin_lock_irqsave(&scheduler->lock, irq_flags);
++
++ if (scheduler->current_task[PSB_SCENE_ENGINE_TA] != NULL &&
++ time_after_eq(jiffies, scheduler->ta_end_jiffies)) {
++ if (xhw_lockup & PSB_LOCKUP_TA) {
++ goto out_unlock;
++ } else {
++ scheduler->total_ta_jiffies +=
++ jiffies - scheduler->ta_end_jiffies +
++ PSB_TA_TIMEOUT;
++ if (scheduler->total_ta_jiffies >
++ PSB_ALLOWED_TA_RUNTIME)
++ goto out_unlock;
++ scheduler->ta_end_jiffies = jiffies + PSB_TA_TIMEOUT;
++ }
++ }
++ if (scheduler->current_task[PSB_SCENE_ENGINE_RASTER] != NULL &&
++ time_after_eq(jiffies, scheduler->raster_end_jiffies)) {
++ if (xhw_lockup & PSB_LOCKUP_RASTER) {
++ goto out_unlock;
++ } else {
++ scheduler->total_raster_jiffies +=
++ jiffies - scheduler->raster_end_jiffies +
++ PSB_RASTER_TIMEOUT;
++ if (scheduler->total_raster_jiffies >
++ PSB_ALLOWED_RASTER_RUNTIME)
++ goto out_unlock;
++ scheduler->raster_end_jiffies =
++ jiffies + PSB_RASTER_TIMEOUT;
++ }
++ }
++
++ ret = 0;
++
++out_unlock:
++ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
++ return ret;
++}
++
++/*
++ * TA done handler.
++ */
++
++static void psb_ta_done(struct drm_psb_private *dev_priv,
++ struct psb_scheduler *scheduler)
++{
++ struct psb_task *task =
++ scheduler->current_task[PSB_SCENE_ENGINE_TA];
++ struct psb_scene *scene = task->scene;
++
++ PSB_DEBUG_RENDER("TA done %u\n", task->sequence);
++
++ switch (task->ta_complete_action) {
++ case PSB_RASTER_BLOCK:
++ scheduler->ta_state = 1;
++ scene->flags |=
++ (PSB_SCENE_FLAG_DIRTY | PSB_SCENE_FLAG_COMPLETE);
++ list_add_tail(&task->head, &scheduler->raster_queue);
++ break;
++ case PSB_RASTER:
++ scene->flags |=
++ (PSB_SCENE_FLAG_DIRTY | PSB_SCENE_FLAG_COMPLETE);
++ list_add_tail(&task->head, &scheduler->raster_queue);
++ break;
++ case PSB_RETURN:
++ scheduler->ta_state = 0;
++ scene->flags |= PSB_SCENE_FLAG_DIRTY;
++ list_add_tail(&scene->hw_scene->head,
++ &scheduler->hw_scenes);
++
++ break;
++ }
++
++ scheduler->current_task[PSB_SCENE_ENGINE_TA] = NULL;
++
++#ifdef FIX_TG_16
++ psb_2d_atomic_unlock(dev_priv);
++#endif
++
++ if (task->ta_complete_action != PSB_RASTER_BLOCK)
++ psb_report_fence(dev_priv, scheduler, task->engine,
++ task->sequence,
++ _PSB_FENCE_TA_DONE_SHIFT, 1);
++
++ psb_schedule_raster(dev_priv, scheduler);
++ psb_schedule_ta(dev_priv, scheduler);
++ psb_set_idle(scheduler);
++
++ if (task->ta_complete_action != PSB_RETURN)
++ return;
++
++ list_add_tail(&task->head, &scheduler->task_done_queue);
++ schedule_delayed_work(&scheduler->wq, 1);
++}
++
++/*
++ * Rasterizer done handler.
++ */
++
++static void psb_raster_done(struct drm_psb_private *dev_priv,
++ struct psb_scheduler *scheduler)
++{
++ struct psb_task *task =
++ scheduler->current_task[PSB_SCENE_ENGINE_RASTER];
++ struct psb_scene *scene = task->scene;
++ uint32_t complete_action = task->raster_complete_action;
++
++ PSB_DEBUG_RENDER("Raster done %u\n", task->sequence);
++
++ scheduler->current_task[PSB_SCENE_ENGINE_RASTER] = NULL;
++
++ if (complete_action != PSB_RASTER)
++ psb_schedule_raster(dev_priv, scheduler);
++
++ if (scene) {
++ if (task->feedback.page) {
++ if (unlikely(scheduler->feedback_task)) {
++ /*
++ * This should never happen, since the previous
++ * feedback query will return before the next
++ * raster task is fired.
++ */
++ DRM_ERROR("Feedback task busy.\n");
++ }
++ scheduler->feedback_task = task;
++ psb_xhw_vistest(dev_priv, &task->buf);
++ }
++ switch (complete_action) {
++ case PSB_RETURN:
++ scene->flags &=
++ ~(PSB_SCENE_FLAG_DIRTY |
++ PSB_SCENE_FLAG_COMPLETE);
++ list_add_tail(&scene->hw_scene->head,
++ &scheduler->hw_scenes);
++ psb_report_fence(dev_priv, scheduler, task->engine,
++ task->sequence,
++ _PSB_FENCE_SCENE_DONE_SHIFT, 1);
++ if (task->flags & PSB_FIRE_FLAG_XHW_OOM)
++ scheduler->ta_state = 0;
++
++ break;
++ case PSB_RASTER:
++ list_add(&task->head, &scheduler->raster_queue);
++ task->raster_complete_action = PSB_RETURN;
++ psb_schedule_raster(dev_priv, scheduler);
++ break;
++ case PSB_TA:
++ list_add(&task->head, &scheduler->ta_queue);
++ scheduler->ta_state = 0;
++ task->raster_complete_action = PSB_RETURN;
++ task->ta_complete_action = PSB_RASTER;
++ break;
++
++ }
++ }
++ psb_schedule_ta(dev_priv, scheduler);
++ psb_set_idle(scheduler);
++
++ if (complete_action == PSB_RETURN) {
++ if (task->scene == NULL) {
++ psb_report_fence(dev_priv, scheduler, task->engine,
++ task->sequence,
++ _PSB_FENCE_RASTER_DONE_SHIFT, 1);
++ }
++ if (!task->feedback.page) {
++ list_add_tail(&task->head,
++ &scheduler->task_done_queue);
++ schedule_delayed_work(&scheduler->wq, 1);
++ }
++ }
++}
++
++void psb_scheduler_pause(struct drm_psb_private *dev_priv)
++{
++ struct psb_scheduler *scheduler = &dev_priv->scheduler;
++ unsigned long irq_flags;
++
++ spin_lock_irqsave(&scheduler->lock, irq_flags);
++ scheduler->idle_count++;
++ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
++}
++
++void psb_scheduler_restart(struct drm_psb_private *dev_priv)
++{
++ struct psb_scheduler *scheduler = &dev_priv->scheduler;
++ unsigned long irq_flags;
++
++ spin_lock_irqsave(&scheduler->lock, irq_flags);
++ if (--scheduler->idle_count == 0) {
++ psb_schedule_ta(dev_priv, scheduler);
++ psb_schedule_raster(dev_priv, scheduler);
++ }
++ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
++}
++
++int psb_scheduler_idle(struct drm_psb_private *dev_priv)
++{
++ struct psb_scheduler *scheduler = &dev_priv->scheduler;
++ unsigned long irq_flags;
++ int ret;
++ spin_lock_irqsave(&scheduler->lock, irq_flags);
++ ret = scheduler->idle_count != 0 && scheduler->idle;
++ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
++ return ret;
++}
++
++int psb_scheduler_finished(struct drm_psb_private *dev_priv)
++{
++ struct psb_scheduler *scheduler = &dev_priv->scheduler;
++ unsigned long irq_flags;
++ int ret;
++ spin_lock_irqsave(&scheduler->lock, irq_flags);
++ ret = (scheduler->idle &&
++ list_empty(&scheduler->raster_queue) &&
++ list_empty(&scheduler->ta_queue) &&
++ list_empty(&scheduler->hp_raster_queue));
++ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
++ return ret;
++}
++
++static void psb_ta_oom(struct drm_psb_private *dev_priv,
++ struct psb_scheduler *scheduler)
++{
++
++ struct psb_task *task =
++ scheduler->current_task[PSB_SCENE_ENGINE_TA];
++ if (!task)
++ return;
++
++ if (task->aborting)
++ return;
++ task->aborting = 1;
++
++ DRM_INFO("Info: TA out of parameter memory.\n");
++
++ (void) psb_xhw_ta_oom(dev_priv, &task->buf,
++ task->scene->hw_cookie);
++}
++
++static void psb_ta_oom_reply(struct drm_psb_private *dev_priv,
++ struct psb_scheduler *scheduler)
++{
++
++ struct psb_task *task =
++ scheduler->current_task[PSB_SCENE_ENGINE_TA];
++ uint32_t flags;
++ if (!task)
++ return;
++
++ psb_xhw_ta_oom_reply(dev_priv, &task->buf,
++ task->scene->hw_cookie,
++ &task->ta_complete_action,
++ &task->raster_complete_action, &flags);
++ task->flags |= flags;
++ task->aborting = 0;
++ psb_dispatch_ta(dev_priv, scheduler, PSB_RF_OOM_REPLY);
++}
++
++static void psb_ta_hw_scene_freed(struct drm_psb_private *dev_priv,
++ struct psb_scheduler *scheduler)
++{
++ DRM_ERROR("TA hw scene freed.\n");
++}
++
++static void psb_vistest_reply(struct drm_psb_private *dev_priv,
++ struct psb_scheduler *scheduler)
++{
++ struct psb_task *task = scheduler->feedback_task;
++ uint8_t *feedback_map;
++ uint32_t add;
++ uint32_t cur;
++ struct drm_psb_vistest *vistest;
++ int i;
++
++ scheduler->feedback_task = NULL;
++ if (!task) {
++ DRM_ERROR("No Poulsbo feedback task.\n");
++ return;
++ }
++ if (!task->feedback.page) {
++ DRM_ERROR("No Poulsbo feedback page.\n");
++ goto out;
++ }
++
++ if (in_irq())
++ feedback_map = kmap_atomic(task->feedback.page, KM_IRQ0);
++ else
++ feedback_map = kmap_atomic(task->feedback.page, KM_USER0);
++
++ /*
++ * Loop over all requested vistest components here.
++ * Only one (vistest) currently.
++ */
++
++ vistest = (struct drm_psb_vistest *)
++ (feedback_map + task->feedback.offset);
++
++ for (i = 0; i < PSB_HW_FEEDBACK_SIZE; ++i) {
++ add = task->buf.arg.arg.feedback[i];
++ cur = vistest->vt[i];
++
++ /*
++ * Vistest saturates.
++ */
++
++ vistest->vt[i] = (cur + add < cur) ? ~0 : cur + add;
++ }
++ if (in_irq())
++ kunmap_atomic(feedback_map, KM_IRQ0);
++ else
++ kunmap_atomic(feedback_map, KM_USER0);
++out:
++ psb_report_fence(dev_priv, scheduler, task->engine, task->sequence,
++ _PSB_FENCE_FEEDBACK_SHIFT, 1);
++
++ if (list_empty(&task->head)) {
++ list_add_tail(&task->head, &scheduler->task_done_queue);
++ schedule_delayed_work(&scheduler->wq, 1);
++ } else
++ psb_schedule_ta(dev_priv, scheduler);
++}
++
++static void psb_ta_fire_reply(struct drm_psb_private *dev_priv,
++ struct psb_scheduler *scheduler)
++{
++ struct psb_task *task =
++ scheduler->current_task[PSB_SCENE_ENGINE_TA];
++
++ psb_xhw_fire_reply(dev_priv, &task->buf, task->scene->hw_cookie);
++
++ psb_dispatch_ta(dev_priv, scheduler, PSB_RF_FIRE_TA);
++}
++
++static void psb_raster_fire_reply(struct drm_psb_private *dev_priv,
++ struct psb_scheduler *scheduler)
++{
++ struct psb_task *task =
++ scheduler->current_task[PSB_SCENE_ENGINE_RASTER];
++ uint32_t reply_flags;
++
++ if (!task) {
++ DRM_ERROR("Null task.\n");
++ return;
++ }
++
++ task->raster_complete_action = task->buf.arg.arg.sb.rca;
++ psb_xhw_fire_reply(dev_priv, &task->buf, task->scene->hw_cookie);
++
++ reply_flags = PSB_RF_FIRE_RASTER;
++ if (task->raster_complete_action == PSB_RASTER)
++ reply_flags |= PSB_RF_DEALLOC;
++
++ psb_dispatch_raster(dev_priv, scheduler, reply_flags);
++}
++
++static int psb_user_interrupt(struct drm_psb_private *dev_priv,
++ struct psb_scheduler *scheduler)
++{
++ uint32_t type;
++ int ret;
++ unsigned long irq_flags;
++
++ /*
++ * Xhw cannot write directly to the comm page, so
++ * do it here. Firmware would have written directly.
++ */
++
++ ret = psb_xhw_handler(dev_priv);
++ if (unlikely(ret))
++ return ret;
++
++ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
++ type = dev_priv->comm[PSB_COMM_USER_IRQ];
++ dev_priv->comm[PSB_COMM_USER_IRQ] = 0;
++ if (dev_priv->comm[PSB_COMM_USER_IRQ_LOST]) {
++ dev_priv->comm[PSB_COMM_USER_IRQ_LOST] = 0;
++ DRM_ERROR("Lost Poulsbo hardware event.\n");
++ }
++ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
++
++ if (type == 0)
++ return 0;
++
++ switch (type) {
++ case PSB_UIRQ_VISTEST:
++ psb_vistest_reply(dev_priv, scheduler);
++ break;
++ case PSB_UIRQ_OOM_REPLY:
++ psb_ta_oom_reply(dev_priv, scheduler);
++ break;
++ case PSB_UIRQ_FIRE_TA_REPLY:
++ psb_ta_fire_reply(dev_priv, scheduler);
++ break;
++ case PSB_UIRQ_FIRE_RASTER_REPLY:
++ psb_raster_fire_reply(dev_priv, scheduler);
++ break;
++ default:
++ DRM_ERROR("Unknown Poulsbo hardware event. %d\n", type);
++ }
++ return 0;
++}
++
++int psb_forced_user_interrupt(struct drm_psb_private *dev_priv)
++{
++ struct psb_scheduler *scheduler = &dev_priv->scheduler;
++ unsigned long irq_flags;
++ int ret;
++
++ spin_lock_irqsave(&scheduler->lock, irq_flags);
++ ret = psb_user_interrupt(dev_priv, scheduler);
++ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
++ return ret;
++}
++
++static void psb_dispatch_ta(struct drm_psb_private *dev_priv,
++ struct psb_scheduler *scheduler,
++ uint32_t reply_flag)
++{
++ struct psb_task *task =
++ scheduler->current_task[PSB_SCENE_ENGINE_TA];
++ uint32_t flags;
++ uint32_t mask;
++
++ task->reply_flags |= reply_flag;
++ flags = task->reply_flags;
++ mask = PSB_RF_FIRE_TA;
++
++ if (!(flags & mask))
++ return;
++
++ mask = PSB_RF_TA_DONE;
++ if ((flags & mask) == mask) {
++ task->reply_flags &= ~mask;
++ psb_ta_done(dev_priv, scheduler);
++ }
++
++ mask = PSB_RF_OOM;
++ if ((flags & mask) == mask) {
++ task->reply_flags &= ~mask;
++ psb_ta_oom(dev_priv, scheduler);
++ }
++
++ mask = (PSB_RF_OOM_REPLY | PSB_RF_TERMINATE);
++ if ((flags & mask) == mask) {
++ task->reply_flags &= ~mask;
++ psb_ta_done(dev_priv, scheduler);
++ }
++}
++
++static void psb_dispatch_raster(struct drm_psb_private *dev_priv,
++ struct psb_scheduler *scheduler,
++ uint32_t reply_flag)
++{
++ struct psb_task *task =
++ scheduler->current_task[PSB_SCENE_ENGINE_RASTER];
++ uint32_t flags;
++ uint32_t mask;
++
++ task->reply_flags |= reply_flag;
++ flags = task->reply_flags;
++ mask = PSB_RF_FIRE_RASTER;
++
++ if (!(flags & mask))
++ return;
++
++ /*
++ * For rasterizer-only tasks, don't report fence done here,
++ * as this is time consuming and the rasterizer wants a new
++ * task immediately. For other tasks, the hardware is probably
++ * still busy deallocating TA memory, so we can report
++ * fence done in parallel.
++ */
++
++ if (task->raster_complete_action == PSB_RETURN &&
++ (reply_flag & PSB_RF_RASTER_DONE) && task->scene != NULL) {
++ psb_report_fence(dev_priv, scheduler, task->engine,
++ task->sequence,
++ _PSB_FENCE_RASTER_DONE_SHIFT, 1);
++ }
++
++ mask = PSB_RF_RASTER_DONE | PSB_RF_DEALLOC;
++ if ((flags & mask) == mask) {
++ task->reply_flags &= ~mask;
++ psb_raster_done(dev_priv, scheduler);
++ }
++}
++
++void psb_scheduler_handler(struct drm_psb_private *dev_priv,
++ uint32_t status)
++{
++ struct psb_scheduler *scheduler = &dev_priv->scheduler;
++
++ spin_lock(&scheduler->lock);
++
++ if (status & _PSB_CE_PIXELBE_END_RENDER) {
++ psb_dispatch_raster(dev_priv, scheduler,
++ PSB_RF_RASTER_DONE);
++ }
++ if (status & _PSB_CE_DPM_3D_MEM_FREE)
++ psb_dispatch_raster(dev_priv, scheduler, PSB_RF_DEALLOC);
++
++ if (status & _PSB_CE_TA_FINISHED)
++ psb_dispatch_ta(dev_priv, scheduler, PSB_RF_TA_DONE);
++
++ if (status & _PSB_CE_TA_TERMINATE)
++ psb_dispatch_ta(dev_priv, scheduler, PSB_RF_TERMINATE);
++
++ if (status & (_PSB_CE_DPM_REACHED_MEM_THRESH |
++ _PSB_CE_DPM_OUT_OF_MEMORY_GBL |
++ _PSB_CE_DPM_OUT_OF_MEMORY_MT)) {
++ psb_dispatch_ta(dev_priv, scheduler, PSB_RF_OOM);
++ }
++ if (status & _PSB_CE_DPM_TA_MEM_FREE)
++ psb_ta_hw_scene_freed(dev_priv, scheduler);
++
++ if (status & _PSB_CE_SW_EVENT)
++ psb_user_interrupt(dev_priv, scheduler);
++
++ spin_unlock(&scheduler->lock);
++}
++
++static void psb_free_task_wq(struct work_struct *work)
++{
++ struct psb_scheduler *scheduler =
++ container_of(work, struct psb_scheduler, wq.work);
++
++ struct list_head *list, *next;
++ unsigned long irq_flags;
++ struct psb_task *task;
++
++ if (!mutex_trylock(&scheduler->task_wq_mutex))
++ return;
++
++ spin_lock_irqsave(&scheduler->lock, irq_flags);
++ list_for_each_safe(list, next, &scheduler->task_done_queue) {
++ task = list_entry(list, struct psb_task, head);
++ list_del_init(list);
++ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
++
++ PSB_DEBUG_RENDER("Checking Task %d: Scene 0x%08lx, "
++ "Feedback bo 0x%08lx, done %d\n",
++ task->sequence,
++ (unsigned long) task->scene,
++ (unsigned long) task->feedback.bo,
++ atomic_read(&task->buf.done));
++
++ if (task->scene) {
++ PSB_DEBUG_RENDER("Unref scene %d\n",
++ task->sequence);
++ psb_scene_unref(&task->scene);
++ if (task->feedback.bo) {
++ PSB_DEBUG_RENDER("Unref feedback bo %d\n",
++ task->sequence);
++ ttm_bo_unref(&task->feedback.bo);
++ }
++ }
++
++ if (atomic_read(&task->buf.done)) {
++ PSB_DEBUG_RENDER("Deleting task %d\n",
++ task->sequence);
++ drm_free(task, sizeof(*task), DRM_MEM_DRIVER);
++ task = NULL;
++ }
++ spin_lock_irqsave(&scheduler->lock, irq_flags);
++ if (task != NULL)
++ list_add(list, &scheduler->task_done_queue);
++ }
++ if (!list_empty(&scheduler->task_done_queue)) {
++ PSB_DEBUG_RENDER("Rescheduling wq\n");
++ schedule_delayed_work(&scheduler->wq, 1);
++ }
++ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
++
++ if (list_empty(&scheduler->task_done_queue) &&
++ drm_psb_ospm && IS_MRST(scheduler->dev)) {
++ psb_try_power_down_sgx(scheduler->dev);
++ }
++ mutex_unlock(&scheduler->task_wq_mutex);
++}
++
++/*
++ * Check if any of the tasks in the queues is using a scene.
++ * In that case we know the TA memory buffer objects are
++ * fenced and will not be evicted until that fence is signaled.
++ */
++
++void psb_scheduler_ta_mem_check(struct drm_psb_private *dev_priv)
++{
++ struct psb_scheduler *scheduler = &dev_priv->scheduler;
++ unsigned long irq_flags;
++ struct psb_task *task;
++ struct psb_task *next_task;
++
++ dev_priv->force_ta_mem_load = 1;
++ spin_lock_irqsave(&scheduler->lock, irq_flags);
++ list_for_each_entry_safe(task, next_task, &scheduler->ta_queue,
++ head) {
++ if (task->scene) {
++ dev_priv->force_ta_mem_load = 0;
++ break;
++ }
++ }
++ list_for_each_entry_safe(task, next_task, &scheduler->raster_queue,
++ head) {
++ if (task->scene) {
++ dev_priv->force_ta_mem_load = 0;
++ break;
++ }
++ }
++ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
++}
++
++void psb_scheduler_reset(struct drm_psb_private *dev_priv,
++ int error_condition)
++{
++ struct psb_scheduler *scheduler = &dev_priv->scheduler;
++ unsigned long wait_jiffies;
++ unsigned long cur_jiffies;
++ struct psb_task *task;
++ struct psb_task *next_task;
++ unsigned long irq_flags;
++
++ psb_scheduler_pause(dev_priv);
++ if (!psb_scheduler_idle(dev_priv)) {
++ spin_lock_irqsave(&scheduler->lock, irq_flags);
++
++ cur_jiffies = jiffies;
++ wait_jiffies = cur_jiffies;
++ if (scheduler->current_task[PSB_SCENE_ENGINE_TA] &&
++ time_after_eq(scheduler->ta_end_jiffies, wait_jiffies))
++ wait_jiffies = scheduler->ta_end_jiffies;
++ if (scheduler->current_task[PSB_SCENE_ENGINE_RASTER] &&
++ time_after_eq(scheduler->raster_end_jiffies,
++ wait_jiffies))
++ wait_jiffies = scheduler->raster_end_jiffies;
++
++ wait_jiffies -= cur_jiffies;
++ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
++
++ (void) wait_event_timeout(scheduler->idle_queue,
++ psb_scheduler_idle(dev_priv),
++ wait_jiffies);
++ }
++
++ if (!psb_scheduler_idle(dev_priv)) {
++ spin_lock_irqsave(&scheduler->lock, irq_flags);
++ task = scheduler->current_task[PSB_SCENE_ENGINE_RASTER];
++ if (task) {
++ DRM_ERROR("Detected Poulsbo rasterizer lockup.\n");
++ if (task->engine == PSB_ENGINE_HPRAST) {
++ psb_fence_error(scheduler->dev,
++ PSB_ENGINE_HPRAST,
++ task->sequence,
++ _PSB_FENCE_TYPE_RASTER_DONE,
++ error_condition);
++
++ list_del(&task->head);
++ psb_xhw_clean_buf(dev_priv, &task->buf);
++ list_add_tail(&task->head,
++ &scheduler->task_done_queue);
++ } else {
++ list_add(&task->head,
++ &scheduler->raster_queue);
++ }
++ }
++ scheduler->current_task[PSB_SCENE_ENGINE_RASTER] = NULL;
++ task = scheduler->current_task[PSB_SCENE_ENGINE_TA];
++ if (task) {
++ DRM_ERROR("Detected Poulsbo ta lockup.\n");
++ list_add_tail(&task->head,
++ &scheduler->raster_queue);
++#ifdef FIX_TG_16
++ psb_2d_atomic_unlock(dev_priv);
++#endif
++ }
++ scheduler->current_task[PSB_SCENE_ENGINE_TA] = NULL;
++ scheduler->ta_state = 0;
++
++#ifdef FIX_TG_16
++ atomic_set(&dev_priv->ta_wait_2d, 0);
++ atomic_set(&dev_priv->ta_wait_2d_irq, 0);
++ wake_up(&dev_priv->queue_2d);
++#endif
++ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
++ }
++
++ /*
++ * Empty raster queue.
++ */
++
++ spin_lock_irqsave(&scheduler->lock, irq_flags);
++ list_for_each_entry_safe(task, next_task, &scheduler->raster_queue,
++ head) {
++ struct psb_scene *scene = task->scene;
++
++ DRM_INFO("Signaling fence sequence %u\n",
++ task->sequence);
++
++ psb_fence_error(scheduler->dev,
++ task->engine,
++ task->sequence,
++ _PSB_FENCE_TYPE_TA_DONE |
++ _PSB_FENCE_TYPE_RASTER_DONE |
++ _PSB_FENCE_TYPE_SCENE_DONE |
++ _PSB_FENCE_TYPE_FEEDBACK, error_condition);
++ if (scene) {
++ scene->flags = 0;
++ if (scene->hw_scene) {
++ list_add_tail(&scene->hw_scene->head,
++ &scheduler->hw_scenes);
++ scene->hw_scene = NULL;
++ }
++ }
++
++ psb_xhw_clean_buf(dev_priv, &task->buf);
++ list_del(&task->head);
++ list_add_tail(&task->head, &scheduler->task_done_queue);
++ }
++
++ schedule_delayed_work(&scheduler->wq, 1);
++ scheduler->idle = 1;
++ wake_up(&scheduler->idle_queue);
++
++ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
++ psb_scheduler_restart(dev_priv);
++
++}
++
++int psb_scheduler_init(struct drm_device *dev,
++ struct psb_scheduler *scheduler)
++{
++ struct psb_hw_scene *hw_scene;
++ int i;
++
++ memset(scheduler, 0, sizeof(*scheduler));
++ scheduler->dev = dev;
++ mutex_init(&scheduler->task_wq_mutex);
++ spin_lock_init(&scheduler->lock);
++ scheduler->idle = 1;
++
++ INIT_LIST_HEAD(&scheduler->ta_queue);
++ INIT_LIST_HEAD(&scheduler->raster_queue);
++ INIT_LIST_HEAD(&scheduler->hp_raster_queue);
++ INIT_LIST_HEAD(&scheduler->hw_scenes);
++ INIT_LIST_HEAD(&scheduler->task_done_queue);
++ INIT_DELAYED_WORK(&scheduler->wq, &psb_free_task_wq);
++ init_waitqueue_head(&scheduler->idle_queue);
++
++ for (i = 0; i < PSB_NUM_HW_SCENES; ++i) {
++ hw_scene = &scheduler->hs[i];
++ hw_scene->context_number = i;
++ list_add_tail(&hw_scene->head, &scheduler->hw_scenes);
++ }
++
++ for (i = 0; i < _PSB_ENGINE_TA_FENCE_TYPES; ++i)
++ scheduler->seq[i].reported = 0;
++ return 0;
++}
++
++/*
++ * Scene references maintained by the scheduler are not refcounted.
++ * Remove all references to a particular scene here.
++ */
++
++void psb_scheduler_remove_scene_refs(struct psb_scene *scene)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) scene->dev->dev_private;
++ struct psb_scheduler *scheduler = &dev_priv->scheduler;
++ struct psb_hw_scene *hw_scene;
++ unsigned long irq_flags;
++ unsigned int i;
++
++ spin_lock_irqsave(&scheduler->lock, irq_flags);
++ for (i = 0; i < PSB_NUM_HW_SCENES; ++i) {
++ hw_scene = &scheduler->hs[i];
++ if (hw_scene->last_scene == scene) {
++ BUG_ON(list_empty(&hw_scene->head));
++ hw_scene->last_scene = NULL;
++ }
++ }
++ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
++}
++
++void psb_scheduler_takedown(struct psb_scheduler *scheduler)
++{
++ flush_scheduled_work();
++}
++
++static int psb_setup_task(struct drm_device *dev,
++ struct drm_psb_cmdbuf_arg *arg,
++ struct ttm_buffer_object *raster_cmd_buffer,
++ struct ttm_buffer_object *ta_cmd_buffer,
++ struct ttm_buffer_object *oom_cmd_buffer,
++ struct psb_scene *scene,
++ enum psb_task_type task_type,
++ uint32_t engine,
++ uint32_t flags, struct psb_task **task_p)
++{
++ struct psb_task *task;
++ int ret;
++
++ if (ta_cmd_buffer && arg->ta_size > PSB_MAX_TA_CMDS) {
++ DRM_ERROR("Too many ta cmds %d.\n", arg->ta_size);
++ return -EINVAL;
++ }
++ if (raster_cmd_buffer && arg->cmdbuf_size > PSB_MAX_RASTER_CMDS) {
++ DRM_ERROR("Too many raster cmds %d.\n", arg->cmdbuf_size);
++ return -EINVAL;
++ }
++ if (oom_cmd_buffer && arg->oom_size > PSB_MAX_OOM_CMDS) {
++ DRM_ERROR("Too many oom cmds %d.\n", arg->oom_size);
++ return -EINVAL;
++ }
++
++ task = drm_calloc(1, sizeof(*task), DRM_MEM_DRIVER);
++ if (!task)
++ return -ENOMEM;
++
++ atomic_set(&task->buf.done, 1);
++ task->engine = engine;
++ INIT_LIST_HEAD(&task->head);
++ INIT_LIST_HEAD(&task->buf.head);
++ if (ta_cmd_buffer && arg->ta_size != 0) {
++ task->ta_cmd_size = arg->ta_size;
++ ret = psb_submit_copy_cmdbuf(dev, ta_cmd_buffer,
++ arg->ta_offset,
++ arg->ta_size,
++ PSB_ENGINE_TA, task->ta_cmds);
++ if (ret)
++ goto out_err;
++ }
++ if (raster_cmd_buffer) {
++ task->raster_cmd_size = arg->cmdbuf_size;
++ ret = psb_submit_copy_cmdbuf(dev, raster_cmd_buffer,
++ arg->cmdbuf_offset,
++ arg->cmdbuf_size,
++ PSB_ENGINE_TA,
++ task->raster_cmds);
++ if (ret)
++ goto out_err;
++ }
++ if (oom_cmd_buffer && arg->oom_size != 0) {
++ task->oom_cmd_size = arg->oom_size;
++ ret = psb_submit_copy_cmdbuf(dev, oom_cmd_buffer,
++ arg->oom_offset,
++ arg->oom_size,
++ PSB_ENGINE_TA,
++ task->oom_cmds);
++ if (ret)
++ goto out_err;
++ }
++ task->task_type = task_type;
++ task->flags = flags;
++ if (scene)
++ task->scene = psb_scene_ref(scene);
++
++ *task_p = task;
++ return 0;
++out_err:
++ drm_free(task, sizeof(*task), DRM_MEM_DRIVER);
++ *task_p = NULL;
++ return ret;
++}
++
++int psb_cmdbuf_ta(struct drm_file *priv,
++ struct psb_context *context,
++ struct drm_psb_cmdbuf_arg *arg,
++ struct ttm_buffer_object *cmd_buffer,
++ struct ttm_buffer_object *ta_buffer,
++ struct ttm_buffer_object *oom_buffer,
++ struct psb_scene *scene,
++ struct psb_feedback_info *feedback,
++ struct psb_ttm_fence_rep *fence_arg)
++{
++ struct drm_device *dev = priv->minor->dev;
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ struct ttm_fence_object *fence = NULL;
++ struct psb_task *task = NULL;
++ int ret;
++ struct psb_scheduler *scheduler = &dev_priv->scheduler;
++ uint32_t sequence;
++
++ PSB_DEBUG_RENDER("Cmdbuf ta\n");
++
++ ret = psb_setup_task(dev, arg, cmd_buffer, ta_buffer,
++ oom_buffer, scene,
++ psb_ta_task, PSB_ENGINE_TA,
++ PSB_FIRE_FLAG_RASTER_DEALLOC, &task);
++
++ if (ret)
++ goto out_err;
++
++ task->feedback = *feedback;
++ mutex_lock(&dev_priv->reset_mutex);
++
++ /*
++ * Hand the task over to the scheduler.
++ */
++
++ task->sequence = psb_fence_advance_sequence(dev, PSB_ENGINE_TA);
++
++ task->ta_complete_action = PSB_RASTER;
++ task->raster_complete_action = PSB_RETURN;
++ sequence = task->sequence;
++
++ spin_lock_irq(&scheduler->lock);
++
++ list_add_tail(&task->head, &scheduler->ta_queue);
++ PSB_DEBUG_RENDER("queued ta %u\n", task->sequence);
++
++ psb_schedule_ta(dev_priv, scheduler);
++
++ /**
++ * From this point we may no longer dereference task,
++ * as the object it points to may be freed by another thread.
++ */
++
++ task = NULL;
++ spin_unlock_irq(&scheduler->lock);
++ mutex_unlock(&dev_priv->reset_mutex);
++
++ psb_fence_or_sync(priv, PSB_ENGINE_TA, context->fence_types,
++ arg->fence_flags,
++ &context->validate_list, fence_arg, &fence);
++ ttm_eu_fence_buffer_objects(&context->kern_validate_list, fence);
++
++ if (fence) {
++ spin_lock_irq(&scheduler->lock);
++ psb_report_fence(dev_priv, scheduler, PSB_ENGINE_TA,
++ sequence, _PSB_FENCE_EXE_SHIFT, 1);
++ spin_unlock_irq(&scheduler->lock);
++ fence_arg->signaled_types |= _PSB_FENCE_TYPE_EXE;
++ }
++
++out_err:
++ if (ret && ret != -ERESTART)
++ DRM_ERROR("TA task queue job failed.\n");
++
++ if (fence) {
++#ifdef PSB_WAIT_FOR_TA_COMPLETION
++ ttm_fence_object_wait(fence, 1, 1, DRM_FENCE_TYPE_EXE |
++ _PSB_FENCE_TYPE_TA_DONE);
++#ifdef PSB_BE_PARANOID
++ ttm_fence_object_wait(fence, 1, 1, DRM_FENCE_TYPE_EXE |
++ _PSB_FENCE_TYPE_SCENE_DONE);
++#endif
++#endif
++ ttm_fence_object_unref(&fence);
++ }
++ return ret;
++}
++
++int psb_cmdbuf_raster(struct drm_file *priv,
++ struct psb_context *context,
++ struct drm_psb_cmdbuf_arg *arg,
++ struct ttm_buffer_object *cmd_buffer,
++ struct psb_ttm_fence_rep *fence_arg)
++{
++ struct drm_device *dev = priv->minor->dev;
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ struct ttm_fence_object *fence = NULL;
++ struct psb_task *task = NULL;
++ int ret;
++ uint32_t sequence;
++ struct psb_scheduler *scheduler = &dev_priv->scheduler;
++
++ PSB_DEBUG_RENDER("Cmdbuf Raster\n");
++
++ ret = psb_setup_task(dev, arg, cmd_buffer, NULL, NULL,
++ NULL, psb_raster_task,
++ PSB_ENGINE_TA, 0, &task);
++
++ if (ret)
++ goto out_err;
++
++ /*
++ * Hand the task over to the scheduler.
++ */
++
++ mutex_lock(&dev_priv->reset_mutex);
++ task->sequence = psb_fence_advance_sequence(dev, PSB_ENGINE_TA);
++ task->ta_complete_action = PSB_RASTER;
++ task->raster_complete_action = PSB_RETURN;
++ sequence = task->sequence;
++
++ spin_lock_irq(&scheduler->lock);
++ list_add_tail(&task->head, &scheduler->ta_queue);
++ PSB_DEBUG_RENDER("queued raster %u\n", task->sequence);
++ psb_schedule_ta(dev_priv, scheduler);
++
++ /**
++ * From this point we may no longer dereference task,
++ * as the object it points to may be freed by another thread.
++ */
++
++ task = NULL;
++ spin_unlock_irq(&scheduler->lock);
++ mutex_unlock(&dev_priv->reset_mutex);
++
++ psb_fence_or_sync(priv, PSB_ENGINE_TA, context->fence_types,
++ arg->fence_flags,
++ &context->validate_list, fence_arg, &fence);
++
++ ttm_eu_fence_buffer_objects(&context->kern_validate_list, fence);
++ if (fence) {
++ spin_lock_irq(&scheduler->lock);
++ psb_report_fence(dev_priv, scheduler, PSB_ENGINE_TA, sequence,
++ _PSB_FENCE_EXE_SHIFT, 1);
++ spin_unlock_irq(&scheduler->lock);
++ fence_arg->signaled_types |= _PSB_FENCE_TYPE_EXE;
++ }
++out_err:
++ if (ret && ret != -ERESTART)
++ DRM_ERROR("Raster task queue job failed.\n");
++
++ if (fence) {
++#ifdef PSB_WAIT_FOR_RASTER_COMPLETION
++ ttm_fence_object_wait(fence, 1, 1, fence->type);
++#endif
++ ttm_fence_object_unref(&fence);
++ }
++
++ return ret;
++}
++
++#ifdef FIX_TG_16
++
++static int psb_check_2d_idle(struct drm_psb_private *dev_priv)
++{
++ if (psb_2d_trylock(dev_priv)) {
++ if ((PSB_RSGX32(PSB_CR_2D_SOCIF) == _PSB_C2_SOCIF_EMPTY) &&
++ !((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) &
++ _PSB_C2B_STATUS_BUSY))) {
++ return 0;
++ }
++ if (atomic_cmpxchg(&dev_priv->ta_wait_2d_irq, 0, 1) == 0)
++ psb_2D_irq_on(dev_priv);
++
++ PSB_WSGX32(PSB_2D_FENCE_BH, PSB_SGX_2D_SLAVE_PORT);
++ PSB_WSGX32(PSB_2D_FLUSH_BH, PSB_SGX_2D_SLAVE_PORT);
++ (void) PSB_RSGX32(PSB_SGX_2D_SLAVE_PORT);
++
++ psb_2d_atomic_unlock(dev_priv);
++ }
++
++ atomic_set(&dev_priv->ta_wait_2d, 1);
++ return -EBUSY;
++}
++
++static void psb_atomic_resume_ta_2d_idle(struct drm_psb_private *dev_priv)
++{
++ struct psb_scheduler *scheduler = &dev_priv->scheduler;
++
++ if (atomic_cmpxchg(&dev_priv->ta_wait_2d, 1, 0) == 1) {
++ psb_schedule_ta(dev_priv, scheduler);
++ if (atomic_read(&dev_priv->waiters_2d) != 0)
++ wake_up(&dev_priv->queue_2d);
++ }
++}
++
++void psb_resume_ta_2d_idle(struct drm_psb_private *dev_priv)
++{
++ struct psb_scheduler *scheduler = &dev_priv->scheduler;
++ unsigned long irq_flags;
++
++ spin_lock_irqsave(&scheduler->lock, irq_flags);
++ if (atomic_cmpxchg(&dev_priv->ta_wait_2d_irq, 1, 0) == 1) {
++ atomic_set(&dev_priv->ta_wait_2d, 0);
++ psb_2D_irq_off(dev_priv);
++ psb_schedule_ta(dev_priv, scheduler);
++ if (atomic_read(&dev_priv->waiters_2d) != 0)
++ wake_up(&dev_priv->queue_2d);
++ }
++ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
++}
++
++/*
++ * 2D locking functions. Can't use a mutex since the trylock() and
++ * unlock() methods need to be accessible from interrupt context.
++ */
++
++int psb_2d_trylock(struct drm_psb_private *dev_priv)
++{
++ return atomic_cmpxchg(&dev_priv->lock_2d, 0, 1) == 0;
++}
++
++void psb_2d_atomic_unlock(struct drm_psb_private *dev_priv)
++{
++ atomic_set(&dev_priv->lock_2d, 0);
++ if (atomic_read(&dev_priv->waiters_2d) != 0)
++ wake_up(&dev_priv->queue_2d);
++}
++
++void psb_2d_unlock(struct drm_psb_private *dev_priv)
++{
++ struct psb_scheduler *scheduler = &dev_priv->scheduler;
++ unsigned long irq_flags;
++
++ spin_lock_irqsave(&scheduler->lock, irq_flags);
++ psb_2d_atomic_unlock(dev_priv);
++ if (atomic_read(&dev_priv->ta_wait_2d) != 0)
++ psb_atomic_resume_ta_2d_idle(dev_priv);
++ spin_unlock_irqrestore(&scheduler->lock, irq_flags);
++}
++
++void psb_2d_lock(struct drm_psb_private *dev_priv)
++{
++ atomic_inc(&dev_priv->waiters_2d);
++ wait_event(dev_priv->queue_2d,
++ atomic_read(&dev_priv->ta_wait_2d) == 0);
++ wait_event(dev_priv->queue_2d, psb_2d_trylock(dev_priv));
++ atomic_dec(&dev_priv->waiters_2d);
++}
++
++#endif
+diff -uNr a/drivers/gpu/drm/psb/psb_schedule.h b/drivers/gpu/drm/psb/psb_schedule.h
+--- a/drivers/gpu/drm/psb/psb_schedule.h 1969-12-31 16:00:00.000000000 -0800
++++ b/drivers/gpu/drm/psb/psb_schedule.h 2009-04-07 13:28:38.000000000 -0700
+@@ -0,0 +1,176 @@
++/**************************************************************************
++ * Copyright (c) 2007, Intel Corporation.
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
++ * develop this driver.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
++ */
++
++#ifndef _PSB_SCHEDULE_H_
++#define _PSB_SCHEDULE_H_
++
++#include <drm/drmP.h>
++
++struct psb_context;
++
++enum psb_task_type {
++ psb_ta_midscene_task,
++ psb_ta_task,
++ psb_raster_task,
++ psb_freescene_task
++};
++
++#define PSB_MAX_TA_CMDS 60
++#define PSB_MAX_RASTER_CMDS 60
++#define PSB_MAX_OOM_CMDS (DRM_PSB_NUM_RASTER_USE_REG * 2 + 6)
++
++struct psb_xhw_buf {
++ struct list_head head;
++ int copy_back;
++ atomic_t done;
++ struct drm_psb_xhw_arg arg;
++
++};
++
++struct psb_feedback_info {
++ struct ttm_buffer_object *bo;
++ struct page *page;
++ uint32_t offset;
++};
++
++struct psb_task {
++ struct list_head head;
++ struct psb_scene *scene;
++ struct psb_feedback_info feedback;
++ enum psb_task_type task_type;
++ uint32_t engine;
++ uint32_t sequence;
++ uint32_t ta_cmds[PSB_MAX_TA_CMDS];
++ uint32_t raster_cmds[PSB_MAX_RASTER_CMDS];
++ uint32_t oom_cmds[PSB_MAX_OOM_CMDS];
++ uint32_t ta_cmd_size;
++ uint32_t raster_cmd_size;
++ uint32_t oom_cmd_size;
++ uint32_t feedback_offset;
++ uint32_t ta_complete_action;
++ uint32_t raster_complete_action;
++ uint32_t hw_cookie;
++ uint32_t flags;
++ uint32_t reply_flags;
++ uint32_t aborting;
++ struct psb_xhw_buf buf;
++};
++
++struct psb_hw_scene {
++ struct list_head head;
++ uint32_t context_number;
++
++ /*
++ * This pointer does not refcount the last_scene_buffer,
++ * so we must make sure it is set to NULL before destroying
++ * the corresponding task.
++ */
++
++ struct psb_scene *last_scene;
++};
++
++struct psb_scene;
++struct drm_psb_private;
++
++struct psb_scheduler_seq {
++ uint32_t sequence;
++ int reported;
++};
++
++struct psb_scheduler {
++ struct drm_device *dev;
++ struct psb_scheduler_seq seq[_PSB_ENGINE_TA_FENCE_TYPES];
++ struct psb_hw_scene hs[PSB_NUM_HW_SCENES];
++ struct mutex task_wq_mutex;
++ spinlock_t lock;
++ struct list_head hw_scenes;
++ struct list_head ta_queue;
++ struct list_head raster_queue;
++ struct list_head hp_raster_queue;
++ struct list_head task_done_queue;
++ struct psb_task *current_task[PSB_SCENE_NUM_ENGINES];
++ struct psb_task *feedback_task;
++ int ta_state;
++ struct psb_hw_scene *pending_hw_scene;
++ uint32_t pending_hw_scene_seq;
++ struct delayed_work wq;
++ struct psb_scene_pool *pool;
++ uint32_t idle_count;
++ int idle;
++ wait_queue_head_t idle_queue;
++ unsigned long ta_end_jiffies;
++ unsigned long total_ta_jiffies;
++ unsigned long raster_end_jiffies;
++ unsigned long total_raster_jiffies;
++};
++
++#define PSB_RF_FIRE_TA (1 << 0)
++#define PSB_RF_OOM (1 << 1)
++#define PSB_RF_OOM_REPLY (1 << 2)
++#define PSB_RF_TERMINATE (1 << 3)
++#define PSB_RF_TA_DONE (1 << 4)
++#define PSB_RF_FIRE_RASTER (1 << 5)
++#define PSB_RF_RASTER_DONE (1 << 6)
++#define PSB_RF_DEALLOC (1 << 7)
++
++extern struct psb_scene_pool *psb_alloc_scene_pool(struct drm_file *priv,
++ int shareable,
++ uint32_t w, uint32_t h);
++extern uint32_t psb_scene_handle(struct psb_scene *scene);
++extern int psb_scheduler_init(struct drm_device *dev,
++ struct psb_scheduler *scheduler);
++extern void psb_scheduler_takedown(struct psb_scheduler *scheduler);
++extern int psb_cmdbuf_ta(struct drm_file *priv,
++ struct psb_context *context,
++ struct drm_psb_cmdbuf_arg *arg,
++ struct ttm_buffer_object *cmd_buffer,
++ struct ttm_buffer_object *ta_buffer,
++ struct ttm_buffer_object *oom_buffer,
++ struct psb_scene *scene,
++ struct psb_feedback_info *feedback,
++ struct psb_ttm_fence_rep *fence_arg);
++extern int psb_cmdbuf_raster(struct drm_file *priv,
++ struct psb_context *context,
++ struct drm_psb_cmdbuf_arg *arg,
++ struct ttm_buffer_object *cmd_buffer,
++ struct psb_ttm_fence_rep *fence_arg);
++extern void psb_scheduler_handler(struct drm_psb_private *dev_priv,
++ uint32_t status);
++extern void psb_scheduler_pause(struct drm_psb_private *dev_priv);
++extern void psb_scheduler_restart(struct drm_psb_private *dev_priv);
++extern int psb_scheduler_idle(struct drm_psb_private *dev_priv);
++extern int psb_scheduler_finished(struct drm_psb_private *dev_priv);
++
++extern void psb_scheduler_lockup(struct drm_psb_private *dev_priv,
++ int *lockup, int *idle);
++extern void psb_scheduler_reset(struct drm_psb_private *dev_priv,
++ int error_condition);
++extern int psb_forced_user_interrupt(struct drm_psb_private *dev_priv);
++extern void psb_scheduler_remove_scene_refs(struct psb_scene *scene);
++extern void psb_scheduler_ta_mem_check(struct drm_psb_private *dev_priv);
++extern int psb_extend_timeout(struct drm_psb_private *dev_priv,
++ uint32_t xhw_lockup);
++
++#endif
+diff -uNr a/drivers/gpu/drm/psb/psb_setup.c b/drivers/gpu/drm/psb/psb_setup.c
+--- a/drivers/gpu/drm/psb/psb_setup.c 1969-12-31 16:00:00.000000000 -0800
++++ b/drivers/gpu/drm/psb/psb_setup.c 2009-04-07 13:28:38.000000000 -0700
+@@ -0,0 +1,18 @@
++#include <drm/drmP.h>
++#include <drm/drm.h>
++#include <drm/drm_crtc.h>
++#include <drm/drm_edid.h>
++#include "psb_intel_drv.h"
++#include "psb_drv.h"
++#include "psb_intel_reg.h"
++
++/* Fixed name */
++#define ACPI_EDID_LCD "\\_SB_.PCI0.GFX0.DD04._DDC"
++#define ACPI_DOD "\\_SB_.PCI0.GFX0._DOD"
++
++#include "psb_intel_i2c.c"
++#include "psb_intel_sdvo.c"
++#include "psb_intel_modes.c"
++#include "psb_intel_lvds.c"
++#include "psb_intel_dsi.c"
++#include "psb_intel_display.c"
+diff -uNr a/drivers/gpu/drm/psb/psb_sgx.c b/drivers/gpu/drm/psb/psb_sgx.c
+--- a/drivers/gpu/drm/psb/psb_sgx.c 1969-12-31 16:00:00.000000000 -0800
++++ b/drivers/gpu/drm/psb/psb_sgx.c 2009-04-07 13:28:38.000000000 -0700
+@@ -0,0 +1,1869 @@
++/**************************************************************************
++ * Copyright (c) 2007, Intel Corporation.
++ * All Rights Reserved.
++ * Copyright (c) 2008, Tungsten Graphics, Inc. Cedar Park, TX. USA.
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
++ * develop this driver.
++ *
++ **************************************************************************/
++/*
++ */
++
++#include <drm/drmP.h>
++#include "psb_drv.h"
++#include "psb_drm.h"
++#include "psb_reg.h"
++#include "psb_scene.h"
++#include "psb_msvdx.h"
++#include "lnc_topaz.h"
++#include "ttm/ttm_bo_api.h"
++#include "ttm/ttm_execbuf_util.h"
++#include "ttm/ttm_userobj_api.h"
++#include "ttm/ttm_placement_common.h"
++#include "psb_sgx.h"
++
++static inline int psb_same_page(unsigned long offset,
++ unsigned long offset2)
++{
++ return (offset & PAGE_MASK) == (offset2 & PAGE_MASK);
++}
++
++static inline unsigned long psb_offset_end(unsigned long offset,
++ unsigned long end)
++{
++ offset = (offset + PAGE_SIZE) & PAGE_MASK;
++ return (end < offset) ? end : offset;
++}
++
++static void psb_idle_engine(struct drm_device *dev, int engine);
++
++struct psb_dstbuf_cache {
++ unsigned int dst;
++ struct ttm_buffer_object *dst_buf;
++ unsigned long dst_offset;
++ uint32_t *dst_page;
++ unsigned int dst_page_offset;
++ struct ttm_bo_kmap_obj dst_kmap;
++ bool dst_is_iomem;
++};
++
++struct psb_validate_buffer {
++ struct ttm_validate_buffer base;
++ struct psb_validate_req req;
++ int ret;
++ struct psb_validate_arg __user *user_val_arg;
++ uint32_t flags;
++ uint32_t offset;
++ int po_correct;
++};
++
++
++
++#define PSB_REG_GRAN_SHIFT 2
++#define PSB_REG_GRANULARITY (1 << PSB_REG_GRAN_SHIFT)
++#define PSB_MAX_REG 0x1000
++
++static const uint32_t disallowed_ranges[][2] = {
++ {0x0000, 0x0200},
++ {0x0208, 0x0214},
++ {0x021C, 0x0224},
++ {0x0230, 0x0234},
++ {0x0248, 0x024C},
++ {0x0254, 0x0358},
++ {0x0428, 0x0428},
++ {0x0430, 0x043C},
++ {0x0498, 0x04B4},
++ {0x04CC, 0x04D8},
++ {0x04E0, 0x07FC},
++ {0x0804, 0x0A14},
++ {0x0A4C, 0x0A58},
++ {0x0A68, 0x0A80},
++ {0x0AA0, 0x0B1C},
++ {0x0B2C, 0x0CAC},
++ {0x0CB4, PSB_MAX_REG - PSB_REG_GRANULARITY}
++};
++
++static uint32_t psb_disallowed_regs[PSB_MAX_REG /
++ (PSB_REG_GRANULARITY *
++ (sizeof(uint32_t) << 3))];
++
++static inline int psb_disallowed(uint32_t reg)
++{
++ reg >>= PSB_REG_GRAN_SHIFT;
++ return (psb_disallowed_regs[reg >> 5] & (1 << (reg & 31))) != 0;
++}
++
++void psb_init_disallowed(void)
++{
++ int i;
++ uint32_t reg, tmp;
++ static int initialized;
++
++ if (initialized)
++ return;
++
++ initialized = 1;
++ memset(psb_disallowed_regs, 0, sizeof(psb_disallowed_regs));
++
++ for (i = 0;
++ i < (sizeof(disallowed_ranges) / (2 * sizeof(uint32_t)));
++ ++i) {
++ for (reg = disallowed_ranges[i][0];
++ reg <= disallowed_ranges[i][1]; reg += 4) {
++ tmp = reg >> 2;
++ psb_disallowed_regs[tmp >> 5] |= (1 << (tmp & 31));
++ }
++ }
++}
++
++static int psb_memcpy_check(uint32_t *dst, const uint32_t *src,
++ uint32_t size)
++{
++ size >>= 3;
++ while (size--) {
++ if (unlikely((*src >= 0x1000) || psb_disallowed(*src))) {
++ DRM_ERROR("Forbidden SGX register access: "
++ "0x%04x.\n", *src);
++ return -EPERM;
++ }
++ *dst++ = *src++;
++ *dst++ = *src++;
++ }
++ return 0;
++}
++
++int psb_2d_wait_available(struct drm_psb_private *dev_priv,
++ unsigned size)
++{
++ uint32_t avail = PSB_RSGX32(PSB_CR_2D_SOCIF);
++ int ret = 0;
++
++retry:
++ if (avail < size) {
++#if 0
++ /* We'd ideally
++ * like to have an IRQ-driven event here.
++ */
++
++ psb_2D_irq_on(dev_priv);
++ DRM_WAIT_ON(ret, dev_priv->event_2d_queue, DRM_HZ,
++ ((avail =
++ PSB_RSGX32(PSB_CR_2D_SOCIF)) >= size));
++ psb_2D_irq_off(dev_priv);
++ if (ret == 0)
++ return 0;
++ if (ret == -EINTR) {
++ ret = 0;
++ goto retry;
++ }
++#else
++ avail = PSB_RSGX32(PSB_CR_2D_SOCIF);
++ goto retry;
++#endif
++ }
++ return ret;
++}
++
++int psb_2d_submit(struct drm_psb_private *dev_priv, uint32_t *cmdbuf,
++ unsigned size)
++{
++ int ret = 0;
++ int i;
++ unsigned submit_size;
++
++ while (size > 0) {
++ submit_size = (size < 0x60) ? size : 0x60;
++ size -= submit_size;
++ ret = psb_2d_wait_available(dev_priv, submit_size);
++ if (ret)
++ return ret;
++
++ submit_size <<= 2;
++ mutex_lock(&dev_priv->reset_mutex);
++ for (i = 0; i < submit_size; i += 4) {
++ PSB_WSGX32(*cmdbuf++, PSB_SGX_2D_SLAVE_PORT + i);
++ }
++ (void)PSB_RSGX32(PSB_SGX_2D_SLAVE_PORT + i - 4);
++ mutex_unlock(&dev_priv->reset_mutex);
++ }
++ return 0;
++}
++
++int psb_blit_sequence(struct drm_psb_private *dev_priv, uint32_t sequence)
++{
++ uint32_t buffer[8];
++ uint32_t *bufp = buffer;
++ int ret;
++
++ *bufp++ = PSB_2D_FENCE_BH;
++
++ *bufp++ = PSB_2D_DST_SURF_BH |
++ PSB_2D_DST_8888ARGB | (4 << PSB_2D_DST_STRIDE_SHIFT);
++ *bufp++ = dev_priv->comm_mmu_offset - dev_priv->mmu_2d_offset;
++
++ *bufp++ = PSB_2D_BLIT_BH |
++ PSB_2D_ROT_NONE |
++ PSB_2D_COPYORDER_TL2BR |
++ PSB_2D_DSTCK_DISABLE |
++ PSB_2D_SRCCK_DISABLE | PSB_2D_USE_FILL | PSB_2D_ROP3_PATCOPY;
++
++ *bufp++ = sequence << PSB_2D_FILLCOLOUR_SHIFT;
++ *bufp++ = (0 << PSB_2D_DST_XSTART_SHIFT) |
++ (0 << PSB_2D_DST_YSTART_SHIFT);
++ *bufp++ =
++ (1 << PSB_2D_DST_XSIZE_SHIFT) | (1 << PSB_2D_DST_YSIZE_SHIFT);
++
++ *bufp++ = PSB_2D_FLUSH_BH;
++
++ psb_2d_lock(dev_priv);
++ ret = psb_2d_submit(dev_priv, buffer, bufp - buffer);
++ psb_2d_unlock(dev_priv);
++
++ if (!ret)
++ psb_schedule_watchdog(dev_priv);
++ return ret;
++}
++
++int psb_emit_2d_copy_blit(struct drm_device *dev,
++ uint32_t src_offset,
++ uint32_t dst_offset, uint32_t pages,
++ int direction)
++{
++ uint32_t cur_pages;
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ uint32_t buf[10];
++ uint32_t *bufp;
++ uint32_t xstart;
++ uint32_t ystart;
++ uint32_t blit_cmd;
++ uint32_t pg_add;
++ int ret = 0;
++
++ if (!dev_priv)
++ return 0;
++
++ if (direction) {
++ pg_add = (pages - 1) << PAGE_SHIFT;
++ src_offset += pg_add;
++ dst_offset += pg_add;
++ }
++
++ blit_cmd = PSB_2D_BLIT_BH |
++ PSB_2D_ROT_NONE |
++ PSB_2D_DSTCK_DISABLE |
++ PSB_2D_SRCCK_DISABLE |
++ PSB_2D_USE_PAT |
++ PSB_2D_ROP3_SRCCOPY |
++ (direction ? PSB_2D_COPYORDER_BR2TL : PSB_2D_COPYORDER_TL2BR);
++ xstart = (direction) ? ((PAGE_SIZE - 1) >> 2) : 0;
++
++ psb_2d_lock(dev_priv);
++ while (pages > 0) {
++ cur_pages = pages;
++ if (cur_pages > 2048)
++ cur_pages = 2048;
++ pages -= cur_pages;
++ ystart = (direction) ? cur_pages - 1 : 0;
++
++ bufp = buf;
++ *bufp++ = PSB_2D_FENCE_BH;
++
++ *bufp++ = PSB_2D_DST_SURF_BH | PSB_2D_DST_8888ARGB |
++ (PAGE_SIZE << PSB_2D_DST_STRIDE_SHIFT);
++ *bufp++ = dst_offset;
++ *bufp++ = PSB_2D_SRC_SURF_BH | PSB_2D_SRC_8888ARGB |
++ (PAGE_SIZE << PSB_2D_SRC_STRIDE_SHIFT);
++ *bufp++ = src_offset;
++ *bufp++ =
++ PSB_2D_SRC_OFF_BH | (xstart <<
++ PSB_2D_SRCOFF_XSTART_SHIFT) |
++ (ystart << PSB_2D_SRCOFF_YSTART_SHIFT);
++ *bufp++ = blit_cmd;
++ *bufp++ = (xstart << PSB_2D_DST_XSTART_SHIFT) |
++ (ystart << PSB_2D_DST_YSTART_SHIFT);
++ *bufp++ = ((PAGE_SIZE >> 2) << PSB_2D_DST_XSIZE_SHIFT) |
++ (cur_pages << PSB_2D_DST_YSIZE_SHIFT);
++
++ ret = psb_2d_submit(dev_priv, buf, bufp - buf);
++ if (ret)
++ goto out;
++ pg_add =
++ (cur_pages << PAGE_SHIFT) * ((direction) ? -1 : 1);
++ src_offset += pg_add;
++ dst_offset += pg_add;
++ }
++out:
++ psb_2d_unlock(dev_priv);
++ return ret;
++}
++
++void psb_init_2d(struct drm_psb_private *dev_priv)
++{
++ spin_lock_init(&dev_priv->sequence_lock);
++ psb_reset(dev_priv, 1);
++ dev_priv->mmu_2d_offset = dev_priv->pg->gatt_start;
++ PSB_WSGX32(dev_priv->mmu_2d_offset, PSB_CR_BIF_TWOD_REQ_BASE);
++ (void) PSB_RSGX32(PSB_CR_BIF_TWOD_REQ_BASE);
++}
++
++int psb_idle_2d(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ unsigned long _end = jiffies + DRM_HZ;
++ int busy = 0;
++
++ /*
++ * First idle the 2D engine.
++ */
++
++ if (dev_priv->engine_lockup_2d)
++ return -EBUSY;
++
++ if ((PSB_RSGX32(PSB_CR_2D_SOCIF) == _PSB_C2_SOCIF_EMPTY) &&
++ ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) & _PSB_C2B_STATUS_BUSY) ==
++ 0))
++ goto out;
++
++ do {
++ busy =
++ (PSB_RSGX32(PSB_CR_2D_SOCIF) != _PSB_C2_SOCIF_EMPTY);
++ } while (busy && !time_after_eq(jiffies, _end));
++
++ if (busy)
++ busy =
++ (PSB_RSGX32(PSB_CR_2D_SOCIF) != _PSB_C2_SOCIF_EMPTY);
++ if (busy)
++ goto out;
++
++ do {
++ busy =
++ ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) &
++ _PSB_C2B_STATUS_BUSY)
++ != 0);
++ } while (busy && !time_after_eq(jiffies, _end));
++ if (busy)
++ busy =
++ ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) &
++ _PSB_C2B_STATUS_BUSY)
++ != 0);
++
++out:
++ if (busy)
++ dev_priv->engine_lockup_2d = 1;
++
++ return (busy) ? -EBUSY : 0;
++}
++
++int psb_idle_3d(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ struct psb_scheduler *scheduler = &dev_priv->scheduler;
++ int ret;
++
++ ret = wait_event_timeout(scheduler->idle_queue,
++ psb_scheduler_finished(dev_priv),
++ DRM_HZ * 10);
++
++ return (ret < 1) ? -EBUSY : 0;
++}
++
++static int psb_check_presumed(struct psb_validate_req *req,
++ struct ttm_buffer_object *bo,
++ struct psb_validate_arg __user *data,
++ int *presumed_ok)
++{
++ struct psb_validate_req __user *user_req = &(data->d.req);
++
++ *presumed_ok = 0;
++
++ if (bo->mem.mem_type == TTM_PL_SYSTEM) {
++ *presumed_ok = 1;
++ return 0;
++ }
++
++ if (unlikely(!(req->presumed_flags & PSB_USE_PRESUMED)))
++ return 0;
++
++ if (bo->offset == req->presumed_gpu_offset) {
++ *presumed_ok = 1;
++ return 0;
++ }
++
++ return __put_user(req->presumed_flags & ~PSB_USE_PRESUMED,
++ &user_req->presumed_flags);
++}
++
++
++static void psb_unreference_buffers(struct psb_context *context)
++{
++ struct ttm_validate_buffer *entry, *next;
++ struct psb_validate_buffer *vbuf;
++ struct list_head *list = &context->validate_list;
++
++ list_for_each_entry_safe(entry, next, list, head) {
++ vbuf =
++ container_of(entry, struct psb_validate_buffer, base);
++ list_del(&entry->head);
++ ttm_bo_unref(&entry->bo);
++ }
++
++ list = &context->kern_validate_list;
++
++ list_for_each_entry_safe(entry, next, list, head) {
++ vbuf =
++ container_of(entry, struct psb_validate_buffer, base);
++ list_del(&entry->head);
++ ttm_bo_unref(&entry->bo);
++ }
++}
++
++
++static int psb_lookup_validate_buffer(struct drm_file *file_priv,
++ uint64_t data,
++ struct psb_validate_buffer *item)
++{
++ struct ttm_object_file *tfile = psb_fpriv(file_priv)->tfile;
++
++ item->user_val_arg =
++ (struct psb_validate_arg __user *) (unsigned long) data;
++
++ if (unlikely(copy_from_user(&item->req, &item->user_val_arg->d.req,
++ sizeof(item->req)) != 0)) {
++ DRM_ERROR("Lookup copy fault.\n");
++ return -EFAULT;
++ }
++
++ item->base.bo =
++ ttm_buffer_object_lookup(tfile, item->req.buffer_handle);
++
++ if (unlikely(item->base.bo == NULL)) {
++ DRM_ERROR("Bo lookup fault.\n");
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++static int psb_reference_buffers(struct drm_file *file_priv,
++ uint64_t data,
++ struct psb_context *context)
++{
++ struct psb_validate_buffer *item;
++ int ret;
++
++ while (likely(data != 0)) {
++ if (unlikely(context->used_buffers >=
++ PSB_NUM_VALIDATE_BUFFERS)) {
++ DRM_ERROR("Too many buffers "
++ "on validate list.\n");
++ ret = -EINVAL;
++ goto out_err0;
++ }
++
++ item = &context->buffers[context->used_buffers];
++
++ ret = psb_lookup_validate_buffer(file_priv, data, item);
++ if (unlikely(ret != 0))
++ goto out_err0;
++
++ item->base.reserved = 0;
++ list_add_tail(&item->base.head, &context->validate_list);
++ context->used_buffers++;
++ data = item->req.next;
++ }
++ return 0;
++
++out_err0:
++ psb_unreference_buffers(context);
++ return ret;
++}
++
++static int
++psb_placement_fence_type(struct ttm_buffer_object *bo,
++ uint64_t set_val_flags,
++ uint64_t clr_val_flags,
++ uint32_t new_fence_class,
++ uint32_t *new_fence_type)
++{
++ int ret;
++ uint32_t n_fence_type;
++ uint32_t set_flags = set_val_flags & 0xFFFFFFFF;
++ uint32_t clr_flags = clr_val_flags & 0xFFFFFFFF;
++ struct ttm_fence_object *old_fence;
++ uint32_t old_fence_type;
++
++ if (unlikely
++ (!(set_val_flags &
++ (PSB_GPU_ACCESS_READ | PSB_GPU_ACCESS_WRITE)))) {
++ DRM_ERROR
++ ("GPU access type (read / write) is not indicated.\n");
++ return -EINVAL;
++ }
++
++ ret = ttm_bo_check_placement(bo, set_flags, clr_flags);
++ if (unlikely(ret != 0))
++ return ret;
++
++ switch (new_fence_class) {
++ case PSB_ENGINE_TA:
++ n_fence_type = _PSB_FENCE_TYPE_EXE |
++ _PSB_FENCE_TYPE_TA_DONE | _PSB_FENCE_TYPE_RASTER_DONE;
++ if (set_val_flags & PSB_BO_FLAG_TA)
++ n_fence_type &= ~_PSB_FENCE_TYPE_RASTER_DONE;
++ if (set_val_flags & PSB_BO_FLAG_COMMAND)
++ n_fence_type &=
++ ~(_PSB_FENCE_TYPE_RASTER_DONE |
++ _PSB_FENCE_TYPE_TA_DONE);
++ if (set_val_flags & PSB_BO_FLAG_SCENE)
++ n_fence_type |= _PSB_FENCE_TYPE_SCENE_DONE;
++ if (set_val_flags & PSB_BO_FLAG_FEEDBACK)
++ n_fence_type |= _PSB_FENCE_TYPE_FEEDBACK;
++ break;
++ default:
++ n_fence_type = _PSB_FENCE_TYPE_EXE;
++ }
++
++ *new_fence_type = n_fence_type;
++ old_fence = (struct ttm_fence_object *) bo->sync_obj;
++ old_fence_type = (uint32_t) (unsigned long) bo->sync_obj_arg;
++
++ if (old_fence && ((new_fence_class != old_fence->fence_class) ||
++ ((n_fence_type ^ old_fence_type) &
++ old_fence_type))) {
++ ret = ttm_bo_wait(bo, 0, 1, 0);
++ if (unlikely(ret != 0))
++ return ret;
++ }
++
++ bo->proposed_flags = (bo->proposed_flags | set_flags)
++ & ~clr_flags & TTM_PL_MASK_MEMTYPE;
++
++ return 0;
++}
++
++int psb_validate_kernel_buffer(struct psb_context *context,
++ struct ttm_buffer_object *bo,
++ uint32_t fence_class,
++ uint64_t set_flags, uint64_t clr_flags)
++{
++ struct psb_validate_buffer *item;
++ uint32_t cur_fence_type;
++ int ret;
++
++ if (unlikely(context->used_buffers >= PSB_NUM_VALIDATE_BUFFERS)) {
++ DRM_ERROR("Out of free validation buffer entries for "
++ "kernel buffer validation.\n");
++ return -ENOMEM;
++ }
++
++ item = &context->buffers[context->used_buffers];
++ item->user_val_arg = NULL;
++ item->base.reserved = 0;
++
++ ret = ttm_bo_reserve(bo, 1, 0, 1, context->val_seq);
++ if (unlikely(ret != 0))
++ goto out_unlock;
++
++ mutex_lock(&bo->mutex);
++ ret = psb_placement_fence_type(bo, set_flags, clr_flags, fence_class,
++ &cur_fence_type);
++ if (unlikely(ret != 0)) {
++ ttm_bo_unreserve(bo);
++ goto out_unlock;
++ }
++
++ item->base.bo = ttm_bo_reference(bo);
++ item->base.new_sync_obj_arg = (void *) (unsigned long) cur_fence_type;
++ item->base.reserved = 1;
++
++ list_add_tail(&item->base.head, &context->kern_validate_list);
++ context->used_buffers++;
++
++ ret = ttm_buffer_object_validate(bo, 1, 0);
++ if (unlikely(ret != 0))
++ goto out_unlock;
++
++ item->offset = bo->offset;
++ item->flags = bo->mem.flags;
++ context->fence_types |= cur_fence_type;
++
++out_unlock:
++ mutex_unlock(&bo->mutex);
++ return ret;
++}
++
++
++static int psb_validate_buffer_list(struct drm_file *file_priv,
++ uint32_t fence_class,
++ struct psb_context *context,
++ int *po_correct)
++{
++ struct psb_validate_buffer *item;
++ struct ttm_buffer_object *bo;
++ int ret;
++ struct psb_validate_req *req;
++ uint32_t fence_types = 0;
++ uint32_t cur_fence_type;
++ struct ttm_validate_buffer *entry;
++ struct list_head *list = &context->validate_list;
++
++ *po_correct = 1;
++
++ list_for_each_entry(entry, list, head) {
++ item =
++ container_of(entry, struct psb_validate_buffer, base);
++ bo = entry->bo;
++ item->ret = 0;
++ req = &item->req;
++
++ mutex_lock(&bo->mutex);
++ ret = psb_placement_fence_type(bo,
++ req->set_flags,
++ req->clear_flags,
++ fence_class,
++ &cur_fence_type);
++ if (unlikely(ret != 0))
++ goto out_err;
++
++ ret = ttm_buffer_object_validate(bo, 1, 0);
++
++ if (unlikely(ret != 0))
++ goto out_err;
++
++ fence_types |= cur_fence_type;
++ entry->new_sync_obj_arg = (void *)
++ (unsigned long) cur_fence_type;
++
++ item->offset = bo->offset;
++ item->flags = bo->mem.flags;
++ mutex_unlock(&bo->mutex);
++
++ ret =
++ psb_check_presumed(&item->req, bo, item->user_val_arg,
++ &item->po_correct);
++ if (unlikely(ret != 0))
++ goto out_err;
++
++ if (unlikely(!item->po_correct))
++ *po_correct = 0;
++
++ item++;
++ }
++
++ context->fence_types |= fence_types;
++
++ return 0;
++out_err:
++ mutex_unlock(&bo->mutex);
++ item->ret = ret;
++ return ret;
++}
++
++
++int
++psb_reg_submit(struct drm_psb_private *dev_priv, uint32_t *regs,
++ unsigned int cmds)
++{
++ int i;
++
++ /*
++ * cmds is 32-bit words.
++ */
++
++ cmds >>= 1;
++ for (i = 0; i < cmds; ++i) {
++ PSB_WSGX32(regs[1], regs[0]);
++ regs += 2;
++ }
++ wmb();
++ return 0;
++}
++
++/*
++ * Security: Block user-space writing to MMU mapping registers.
++ * This is important for security and brings Poulsbo DRM
++ * up to par with the other DRM drivers. Using this,
++ * user-space should not be able to map arbitrary memory
++ * pages to graphics memory, but all user-space processes
++ * basically have access to all buffer objects mapped to
++ * graphics memory.
++ */
++
++int
++psb_submit_copy_cmdbuf(struct drm_device *dev,
++ struct ttm_buffer_object *cmd_buffer,
++ unsigned long cmd_offset,
++ unsigned long cmd_size,
++ int engine, uint32_t *copy_buffer)
++{
++ unsigned long cmd_end = cmd_offset + (cmd_size << 2);
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ unsigned long cmd_page_offset =
++ cmd_offset - (cmd_offset & PAGE_MASK);
++ unsigned long cmd_next;
++ struct ttm_bo_kmap_obj cmd_kmap;
++ uint32_t *cmd_page;
++ unsigned cmds;
++ bool is_iomem;
++ int ret = 0;
++
++ if (cmd_size == 0)
++ return 0;
++
++ if (engine == PSB_ENGINE_2D)
++ psb_2d_lock(dev_priv);
++
++ do {
++ cmd_next = psb_offset_end(cmd_offset, cmd_end);
++ ret = ttm_bo_kmap(cmd_buffer, cmd_offset >> PAGE_SHIFT,
++ 1, &cmd_kmap);
++
++ if (ret) {
++ if (engine == PSB_ENGINE_2D)
++ psb_2d_unlock(dev_priv);
++ return ret;
++ }
++ cmd_page = ttm_kmap_obj_virtual(&cmd_kmap, &is_iomem);
++ cmd_page_offset = (cmd_offset & ~PAGE_MASK) >> 2;
++ cmds = (cmd_next - cmd_offset) >> 2;
++
++ switch (engine) {
++ case PSB_ENGINE_2D:
++ ret =
++ psb_2d_submit(dev_priv,
++ cmd_page + cmd_page_offset,
++ cmds);
++ break;
++ case PSB_ENGINE_RASTERIZER:
++ case PSB_ENGINE_TA:
++ case PSB_ENGINE_HPRAST:
++ PSB_DEBUG_GENERAL("Reg copy.\n");
++ ret = psb_memcpy_check(copy_buffer,
++ cmd_page + cmd_page_offset,
++ cmds * sizeof(uint32_t));
++ copy_buffer += cmds;
++ break;
++ default:
++ ret = -EINVAL;
++ }
++ ttm_bo_kunmap(&cmd_kmap);
++ if (ret)
++ break;
++ } while (cmd_offset = cmd_next, cmd_offset != cmd_end);
++
++ if (engine == PSB_ENGINE_2D)
++ psb_2d_unlock(dev_priv);
++
++ return ret;
++}
++
++static void psb_clear_dstbuf_cache(struct psb_dstbuf_cache *dst_cache)
++{
++ if (dst_cache->dst_page) {
++ ttm_bo_kunmap(&dst_cache->dst_kmap);
++ dst_cache->dst_page = NULL;
++ }
++ dst_cache->dst_buf = NULL;
++ dst_cache->dst = ~0;
++}
++
++static int psb_update_dstbuf_cache(struct psb_dstbuf_cache *dst_cache,
++ struct psb_validate_buffer *buffers,
++ unsigned int dst,
++ unsigned long dst_offset)
++{
++ int ret;
++
++ PSB_DEBUG_GENERAL("Destination buffer is %d.\n", dst);
++
++ if (unlikely(dst != dst_cache->dst || NULL == dst_cache->dst_buf)) {
++ psb_clear_dstbuf_cache(dst_cache);
++ dst_cache->dst = dst;
++ dst_cache->dst_buf = buffers[dst].base.bo;
++ }
++
++ if (unlikely
++ (dst_offset > dst_cache->dst_buf->num_pages * PAGE_SIZE)) {
++ DRM_ERROR("Relocation destination out of bounds.\n");
++ return -EINVAL;
++ }
++
++ if (!psb_same_page(dst_cache->dst_offset, dst_offset) ||
++ NULL == dst_cache->dst_page) {
++ if (NULL != dst_cache->dst_page) {
++ ttm_bo_kunmap(&dst_cache->dst_kmap);
++ dst_cache->dst_page = NULL;
++ }
++
++ ret =
++ ttm_bo_kmap(dst_cache->dst_buf,
++ dst_offset >> PAGE_SHIFT, 1,
++ &dst_cache->dst_kmap);
++ if (ret) {
++ DRM_ERROR("Could not map destination buffer for "
++ "relocation.\n");
++ return ret;
++ }
++
++ dst_cache->dst_page =
++ ttm_kmap_obj_virtual(&dst_cache->dst_kmap,
++ &dst_cache->dst_is_iomem);
++ dst_cache->dst_offset = dst_offset & PAGE_MASK;
++ dst_cache->dst_page_offset = dst_cache->dst_offset >> 2;
++ }
++ return 0;
++}
++
++static int psb_apply_reloc(struct drm_psb_private *dev_priv,
++ uint32_t fence_class,
++ const struct drm_psb_reloc *reloc,
++ struct psb_validate_buffer *buffers,
++ int num_buffers,
++ struct psb_dstbuf_cache *dst_cache,
++ int no_wait, int interruptible)
++{
++ uint32_t val;
++ uint32_t background;
++ unsigned int index;
++ int ret;
++ unsigned int shift;
++ unsigned int align_shift;
++ struct ttm_buffer_object *reloc_bo;
++
++
++ PSB_DEBUG_GENERAL("Reloc type %d\n"
++ "\t where 0x%04x\n"
++ "\t buffer 0x%04x\n"
++ "\t mask 0x%08x\n"
++ "\t shift 0x%08x\n"
++ "\t pre_add 0x%08x\n"
++ "\t background 0x%08x\n"
++ "\t dst_buffer 0x%08x\n"
++ "\t arg0 0x%08x\n"
++ "\t arg1 0x%08x\n",
++ reloc->reloc_op,
++ reloc->where,
++ reloc->buffer,
++ reloc->mask,
++ reloc->shift,
++ reloc->pre_add,
++ reloc->background,
++ reloc->dst_buffer, reloc->arg0, reloc->arg1);
++
++ if (unlikely(reloc->buffer >= num_buffers)) {
++ DRM_ERROR("Illegal relocation buffer %d.\n",
++ reloc->buffer);
++ return -EINVAL;
++ }
++
++ if (buffers[reloc->buffer].po_correct)
++ return 0;
++
++ if (unlikely(reloc->dst_buffer >= num_buffers)) {
++ DRM_ERROR
++ ("Illegal destination buffer for relocation %d.\n",
++ reloc->dst_buffer);
++ return -EINVAL;
++ }
++
++ ret =
++ psb_update_dstbuf_cache(dst_cache, buffers, reloc->dst_buffer,
++ reloc->where << 2);
++ if (ret)
++ return ret;
++
++ reloc_bo = buffers[reloc->buffer].base.bo;
++
++ if (unlikely(reloc->pre_add > (reloc_bo->num_pages << PAGE_SHIFT))) {
++ DRM_ERROR("Illegal relocation offset add.\n");
++ return -EINVAL;
++ }
++
++ switch (reloc->reloc_op) {
++ case PSB_RELOC_OP_OFFSET:
++ val = reloc_bo->offset + reloc->pre_add;
++ break;
++ case PSB_RELOC_OP_2D_OFFSET:
++ val = reloc_bo->offset + reloc->pre_add -
++ dev_priv->mmu_2d_offset;
++ if (unlikely(val >= PSB_2D_SIZE)) {
++ DRM_ERROR("2D relocation out of bounds\n");
++ return -EINVAL;
++ }
++ break;
++ case PSB_RELOC_OP_PDS_OFFSET:
++ val =
++ reloc_bo->offset + reloc->pre_add - PSB_MEM_PDS_START;
++ if (unlikely
++ (val >= (PSB_MEM_MMU_START - PSB_MEM_PDS_START))) {
++ DRM_ERROR("PDS relocation out of bounds\n");
++ return -EINVAL;
++ }
++ break;
++ default:
++ DRM_ERROR("Unimplemented relocation.\n");
++ return -EINVAL;
++ }
++
++ shift =
++ (reloc->shift & PSB_RELOC_SHIFT_MASK) >> PSB_RELOC_SHIFT_SHIFT;
++ align_shift =
++ (reloc->
++ shift & PSB_RELOC_ALSHIFT_MASK) >> PSB_RELOC_ALSHIFT_SHIFT;
++
++ val = ((val >> align_shift) << shift);
++ index = reloc->where - dst_cache->dst_page_offset;
++
++ background = reloc->background;
++ val = (background & ~reloc->mask) | (val & reloc->mask);
++ dst_cache->dst_page[index] = val;
++
++ PSB_DEBUG_GENERAL("Reloc buffer %d index 0x%08x, value 0x%08x\n",
++ reloc->dst_buffer, index,
++ dst_cache->dst_page[index]);
++
++ return 0;
++}
++
++static int psb_ok_to_map_reloc(struct drm_psb_private *dev_priv,
++ unsigned int num_pages)
++{
++ int ret = 0;
++
++ spin_lock(&dev_priv->reloc_lock);
++ if (dev_priv->rel_mapped_pages + num_pages <= PSB_MAX_RELOC_PAGES) {
++ dev_priv->rel_mapped_pages += num_pages;
++ ret = 1;
++ }
++ spin_unlock(&dev_priv->reloc_lock);
++ return ret;
++}
++
++static int psb_fixup_relocs(struct drm_file *file_priv,
++ uint32_t fence_class,
++ unsigned int num_relocs,
++ unsigned int reloc_offset,
++ uint32_t reloc_handle,
++ struct psb_context *context,
++ int no_wait, int interruptible)
++{
++ struct drm_device *dev = file_priv->minor->dev;
++ struct ttm_object_file *tfile = psb_fpriv(file_priv)->tfile;
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++ struct ttm_buffer_object *reloc_buffer = NULL;
++ unsigned int reloc_num_pages;
++ unsigned int reloc_first_page;
++ unsigned int reloc_last_page;
++ struct psb_dstbuf_cache dst_cache;
++ struct drm_psb_reloc *reloc;
++ struct ttm_bo_kmap_obj reloc_kmap;
++ bool reloc_is_iomem;
++ int count;
++ int ret = 0;
++ int registered = 0;
++ uint32_t num_buffers = context->used_buffers;
++
++ if (num_relocs == 0)
++ return 0;
++
++ memset(&dst_cache, 0, sizeof(dst_cache));
++ memset(&reloc_kmap, 0, sizeof(reloc_kmap));
++
++ reloc_buffer = ttm_buffer_object_lookup(tfile, reloc_handle);
++ if (!reloc_buffer)
++ goto out;
++
++ if (unlikely(atomic_read(&reloc_buffer->reserved) != 1)) {
++ DRM_ERROR("Relocation buffer was not on validate list.\n");
++ ret = -EINVAL;
++ goto out;
++ }
++
++ reloc_first_page = reloc_offset >> PAGE_SHIFT;
++ reloc_last_page =
++ (reloc_offset +
++ num_relocs * sizeof(struct drm_psb_reloc)) >> PAGE_SHIFT;
++ reloc_num_pages = reloc_last_page - reloc_first_page + 1;
++ reloc_offset &= ~PAGE_MASK;
++
++ if (reloc_num_pages > PSB_MAX_RELOC_PAGES) {
++ DRM_ERROR("Relocation buffer is too large\n");
++ ret = -EINVAL;
++ goto out;
++ }
++
++ DRM_WAIT_ON(ret, dev_priv->rel_mapped_queue, 3 * DRM_HZ,
++ (registered =
++ psb_ok_to_map_reloc(dev_priv, reloc_num_pages)));
++
++ if (ret == -EINTR) {
++ ret = -ERESTART;
++ goto out;
++ }
++ if (ret) {
++ DRM_ERROR("Error waiting for space to map "
++ "relocation buffer.\n");
++ goto out;
++ }
++
++ ret = ttm_bo_kmap(reloc_buffer, reloc_first_page,
++ reloc_num_pages, &reloc_kmap);
++
++ if (ret) {
++ DRM_ERROR("Could not map relocation buffer.\n"
++ "\tReloc buffer id 0x%08x.\n"
++ "\tReloc first page %d.\n"
++ "\tReloc num pages %d.\n",
++ reloc_handle, reloc_first_page, reloc_num_pages);
++ goto out;
++ }
++
++ reloc = (struct drm_psb_reloc *)
++ ((unsigned long)
++ ttm_kmap_obj_virtual(&reloc_kmap,
++ &reloc_is_iomem) + reloc_offset);
++
++ for (count = 0; count < num_relocs; ++count) {
++ ret = psb_apply_reloc(dev_priv, fence_class,
++ reloc, context->buffers,
++ num_buffers, &dst_cache,
++ no_wait, interruptible);
++ if (ret)
++ goto out1;
++ reloc++;
++ }
++
++out1:
++ ttm_bo_kunmap(&reloc_kmap);
++out:
++ if (registered) {
++ spin_lock(&dev_priv->reloc_lock);
++ dev_priv->rel_mapped_pages -= reloc_num_pages;
++ spin_unlock(&dev_priv->reloc_lock);
++ DRM_WAKEUP(&dev_priv->rel_mapped_queue);
++ }
++
++ psb_clear_dstbuf_cache(&dst_cache);
++ if (reloc_buffer)
++ ttm_bo_unref(&reloc_buffer);
++ return ret;
++}
++
++void psb_fence_or_sync(struct drm_file *file_priv,
++ uint32_t engine,
++ uint32_t fence_types,
++ uint32_t fence_flags,
++ struct list_head *list,
++ struct psb_ttm_fence_rep *fence_arg,
++ struct ttm_fence_object **fence_p)
++{
++ struct drm_device *dev = file_priv->minor->dev;
++ struct drm_psb_private *dev_priv = psb_priv(dev);
++ struct ttm_fence_device *fdev = &dev_priv->fdev;
++ int ret;
++ struct ttm_fence_object *fence;
++ struct ttm_object_file *tfile = psb_fpriv(file_priv)->tfile;
++ uint32_t handle;
++
++ ret = ttm_fence_user_create(fdev, tfile,
++ engine, fence_types,
++ TTM_FENCE_FLAG_EMIT, &fence, &handle);
++ if (ret) {
++
++ /*
++ * Fence creation failed.
++ * Fall back to synchronous operation and idle the engine.
++ */
++
++ psb_idle_engine(dev, engine);
++ if (!(fence_flags & DRM_PSB_FENCE_NO_USER)) {
++
++ /*
++ * Communicate to user-space that
++ * fence creation has failed and that
++ * the engine is idle.
++ */
++
++ fence_arg->handle = ~0;
++ fence_arg->error = ret;
++ }
++
++ ttm_eu_backoff_reservation(list);
++ if (fence_p)
++ *fence_p = NULL;
++ return;
++ }
++
++ ttm_eu_fence_buffer_objects(list, fence);
++ if (!(fence_flags & DRM_PSB_FENCE_NO_USER)) {
++ struct ttm_fence_info info = ttm_fence_get_info(fence);
++ fence_arg->handle = handle;
++ fence_arg->fence_class = ttm_fence_class(fence);
++ fence_arg->fence_type = ttm_fence_types(fence);
++ fence_arg->signaled_types = info.signaled_types;
++ fence_arg->error = 0;
++ } else {
++ ret =
++ ttm_ref_object_base_unref(tfile, handle,
++ ttm_fence_type);
++ BUG_ON(ret);
++ }
++
++ if (fence_p)
++ *fence_p = fence;
++ else if (fence)
++ ttm_fence_object_unref(&fence);
++}
++
++
++
++static int psb_cmdbuf_2d(struct drm_file *priv,
++ struct list_head *validate_list,
++ uint32_t fence_type,
++ struct drm_psb_cmdbuf_arg *arg,
++ struct ttm_buffer_object *cmd_buffer,
++ struct psb_ttm_fence_rep *fence_arg)
++{
++ struct drm_device *dev = priv->minor->dev;
++ int ret;
++
++ ret = psb_submit_copy_cmdbuf(dev, cmd_buffer, arg->cmdbuf_offset,
++ arg->cmdbuf_size, PSB_ENGINE_2D,
++ NULL);
++ if (ret)
++ goto out_unlock;
++
++ psb_fence_or_sync(priv, PSB_ENGINE_2D, fence_type,
++ arg->fence_flags, validate_list, fence_arg,
++ NULL);
++
++ mutex_lock(&cmd_buffer->mutex);
++ if (cmd_buffer->sync_obj != NULL)
++ ttm_fence_sync_obj_unref(&cmd_buffer->sync_obj);
++ mutex_unlock(&cmd_buffer->mutex);
++out_unlock:
++ return ret;
++}
++
++#if 0
++static int psb_dump_page(struct ttm_buffer_object *bo,
++ unsigned int page_offset, unsigned int num)
++{
++ struct ttm_bo_kmap_obj kmobj;
++ int is_iomem;
++ uint32_t *p;
++ int ret;
++ unsigned int i;
++
++ ret = ttm_bo_kmap(bo, page_offset, 1, &kmobj);
++ if (ret)
++ return ret;
++
++ p = ttm_kmap_obj_virtual(&kmobj, &is_iomem);
++ for (i = 0; i < num; ++i)
++ PSB_DEBUG_GENERAL("0x%04x: 0x%08x\n", i, *p++);
++
++ ttm_bo_kunmap(&kmobj);
++ return 0;
++}
++#endif
++
++static void psb_idle_engine(struct drm_device *dev, int engine)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++ uint32_t dummy;
++ unsigned long dummy2;
++
++ switch (engine) {
++ case PSB_ENGINE_2D:
++
++ /*
++ * Make sure we flush 2D properly using a dummy
++ * fence sequence emit.
++ */
++
++ (void) psb_fence_emit_sequence(&dev_priv->fdev,
++ PSB_ENGINE_2D, 0,
++ &dummy, &dummy2);
++ psb_2d_lock(dev_priv);
++ (void) psb_idle_2d(dev);
++ psb_2d_unlock(dev_priv);
++ break;
++ case PSB_ENGINE_TA:
++ case PSB_ENGINE_RASTERIZER:
++ case PSB_ENGINE_HPRAST:
++ (void) psb_idle_3d(dev);
++ break;
++ default:
++
++ /*
++ * FIXME: Insert video engine idle command here.
++ */
++
++ break;
++ }
++}
++
++static int psb_handle_copyback(struct drm_device *dev,
++ struct psb_context *context,
++ int ret)
++{
++ int err = ret;
++ struct ttm_validate_buffer *entry;
++ struct psb_validate_arg arg;
++ struct list_head *list = &context->validate_list;
++
++ if (ret) {
++ ttm_eu_backoff_reservation(list);
++ ttm_eu_backoff_reservation(&context->kern_validate_list);
++ }
++
++
++ if (ret != -EAGAIN && ret != -EINTR && ret != -ERESTART) {
++ list_for_each_entry(entry, list, head) {
++ struct psb_validate_buffer *vbuf =
++ container_of(entry, struct psb_validate_buffer,
++ base);
++ arg.handled = 1;
++ arg.ret = vbuf->ret;
++ if (!arg.ret) {
++ struct ttm_buffer_object *bo = entry->bo;
++ mutex_lock(&bo->mutex);
++ arg.d.rep.gpu_offset = bo->offset;
++ arg.d.rep.placement = bo->mem.flags;
++ arg.d.rep.fence_type_mask =
++ (uint32_t) (unsigned long)
++ entry->new_sync_obj_arg;
++ mutex_unlock(&bo->mutex);
++ }
++
++ if (__copy_to_user(vbuf->user_val_arg,
++ &arg, sizeof(arg)))
++ err = -EFAULT;
++
++ if (arg.ret)
++ break;
++ }
++ }
++
++ return err;
++}
++
++
++static int psb_cmdbuf_video(struct drm_file *priv,
++ struct list_head *validate_list,
++ uint32_t fence_type,
++ struct drm_psb_cmdbuf_arg *arg,
++ struct ttm_buffer_object *cmd_buffer,
++ struct psb_ttm_fence_rep *fence_arg)
++{
++ struct drm_device *dev = priv->minor->dev;
++ struct ttm_fence_object *fence;
++ int ret;
++
++ /*
++ * Check this. Doesn't seem right. Have fencing done AFTER command
++ * submission and make sure drm_psb_idle idles the MSVDX completely.
++ */
++ ret =
++ psb_submit_video_cmdbuf(dev, cmd_buffer, arg->cmdbuf_offset,
++ arg->cmdbuf_size, NULL);
++ if (ret)
++ return ret;
++
++
++ /* DRM_ERROR("Intel: Fix video fencing!!\n"); */
++ psb_fence_or_sync(priv, PSB_ENGINE_VIDEO, fence_type,
++ arg->fence_flags, validate_list, fence_arg,
++ &fence);
++
++
++ ttm_fence_object_unref(&fence);
++ mutex_lock(&cmd_buffer->mutex);
++ if (cmd_buffer->sync_obj != NULL)
++ ttm_fence_sync_obj_unref(&cmd_buffer->sync_obj);
++ mutex_unlock(&cmd_buffer->mutex);
++ return 0;
++}
++
++static int psb_feedback_buf(struct ttm_object_file *tfile,
++ struct psb_context *context,
++ uint32_t feedback_ops,
++ uint32_t handle,
++ uint32_t offset,
++ uint32_t feedback_breakpoints,
++ uint32_t feedback_size,
++ struct psb_feedback_info *feedback)
++{
++ struct ttm_buffer_object *bo;
++ struct page *page;
++ uint32_t page_no;
++ uint32_t page_offset;
++ int ret;
++
++ if (feedback_ops & ~PSB_FEEDBACK_OP_VISTEST) {
++ DRM_ERROR("Illegal feedback op.\n");
++ return -EINVAL;
++ }
++
++ if (feedback_breakpoints != 0) {
++ DRM_ERROR("Feedback breakpoints not implemented yet.\n");
++ return -EINVAL;
++ }
++
++ if (feedback_size < PSB_HW_FEEDBACK_SIZE * sizeof(uint32_t)) {
++ DRM_ERROR("Feedback buffer size too small.\n");
++ return -EINVAL;
++ }
++
++ page_offset = offset & ~PAGE_MASK;
++ if ((PAGE_SIZE - PSB_HW_FEEDBACK_SIZE * sizeof(uint32_t))
++ < page_offset) {
++ DRM_ERROR("Illegal feedback buffer alignment.\n");
++ return -EINVAL;
++ }
++
++ bo = ttm_buffer_object_lookup(tfile, handle);
++ if (unlikely(bo == NULL)) {
++ DRM_ERROR("Failed looking up feedback buffer.\n");
++ return -EINVAL;
++ }
++
++
++ ret = psb_validate_kernel_buffer(context, bo,
++ PSB_ENGINE_TA,
++ TTM_PL_FLAG_SYSTEM |
++ TTM_PL_FLAG_CACHED |
++ PSB_GPU_ACCESS_WRITE |
++ PSB_BO_FLAG_FEEDBACK,
++ TTM_PL_MASK_MEM &
++ ~(TTM_PL_FLAG_SYSTEM |
++ TTM_PL_FLAG_CACHED));
++ if (unlikely(ret != 0))
++ goto out_unref;
++
++ page_no = offset >> PAGE_SHIFT;
++ if (unlikely(page_no >= bo->num_pages)) {
++ ret = -EINVAL;
++ DRM_ERROR("Illegal feedback buffer offset.\n");
++ goto out_unref;
++ }
++
++ if (unlikely(bo->ttm == NULL)) {
++ ret = -EINVAL;
++ DRM_ERROR("Vistest buffer without TTM.\n");
++ goto out_unref;
++ }
++
++ page = ttm_tt_get_page(bo->ttm, page_no);
++ if (unlikely(page == NULL)) {
++ ret = -ENOMEM;
++ goto out_unref;
++ }
++
++ feedback->page = page;
++ feedback->offset = page_offset;
++
++ /*
++ * Note: bo referece transferred.
++ */
++
++ feedback->bo = bo;
++ return 0;
++
++out_unref:
++ ttm_bo_unref(&bo);
++ return ret;
++}
++
++void psb_down_island_power(struct drm_device *dev, int islands)
++{
++ u32 pwr_cnt = 0;
++ pwr_cnt = MSG_READ32(PSB_PUNIT_PORT, PSB_PWRGT_CNT);
++ if (islands & PSB_GRAPHICS_ISLAND)
++ pwr_cnt |= 0x3;
++ if (islands & PSB_VIDEO_ENC_ISLAND)
++ pwr_cnt |= 0x30;
++ if (islands & PSB_VIDEO_DEC_ISLAND)
++ pwr_cnt |= 0xc;
++ MSG_WRITE32(PSB_PUNIT_PORT, PSB_PWRGT_CNT, pwr_cnt);
++}
++void psb_up_island_power(struct drm_device *dev, int islands)
++{
++ u32 pwr_cnt = 0;
++ u32 count = 5;
++ u32 pwr_sts = 0;
++ u32 pwr_mask = 0;
++ pwr_cnt = MSG_READ32(PSB_PUNIT_PORT, PSB_PWRGT_CNT);
++ if (islands & PSB_GRAPHICS_ISLAND) {
++ pwr_cnt &= ~PSB_PWRGT_GFX_MASK;
++ pwr_mask |= PSB_PWRGT_GFX_MASK;
++ }
++ if (islands & PSB_VIDEO_ENC_ISLAND) {
++ pwr_cnt &= ~PSB_PWRGT_VID_ENC_MASK;
++ pwr_mask |= PSB_PWRGT_VID_ENC_MASK;
++ }
++ if (islands & PSB_VIDEO_DEC_ISLAND) {
++ pwr_cnt &= ~PSB_PWRGT_VID_DEC_MASK;
++ pwr_mask |= PSB_PWRGT_VID_DEC_MASK;
++ }
++ MSG_WRITE32(PSB_PUNIT_PORT, PSB_PWRGT_CNT, pwr_cnt);
++ while (count--) {
++ pwr_sts = MSG_READ32(PSB_PUNIT_PORT, PSB_PWRGT_STS);
++ if ((pwr_sts & pwr_mask) == 0)
++ break;
++ else
++ udelay(10);
++ }
++}
++
++static int psb_power_down_sgx(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private;
++
++ PSB_DEBUG_PM("power down sgx \n");
++
++#ifdef OSPM_STAT
++ if (dev_priv->graphics_state == PSB_PWR_STATE_D0i0)
++ dev_priv->gfx_d0i0_time += jiffies - dev_priv->gfx_last_mode_change;
++ else
++ PSB_DEBUG_PM("power down:illegal previous power state\n");
++ dev_priv->gfx_last_mode_change = jiffies;
++ dev_priv->gfx_d0i3_cnt++;
++#endif
++
++ dev_priv->saveCLOCKGATING = PSB_RSGX32(PSB_CR_CLKGATECTL);
++ dev_priv->graphics_state = PSB_PWR_STATE_D0i3;
++ psb_down_island_power(dev, PSB_GRAPHICS_ISLAND);
++ return 0;
++}
++static int psb_power_up_sgx(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private;
++ if ((dev_priv->graphics_state & PSB_PWR_STATE_MASK) !=
++ PSB_PWR_STATE_D0i3)
++ return -EINVAL;
++
++ PSB_DEBUG_PM("power up sgx \n");
++ if (unlikely(PSB_D_PM & drm_psb_debug))
++ dump_stack();
++ INIT_LIST_HEAD(&dev_priv->resume_buf.head);
++
++ psb_up_island_power(dev, PSB_GRAPHICS_ISLAND);
++
++ /*
++ * The SGX loses it's register contents.
++ * Restore BIF registers. The MMU page tables are
++ * "normal" pages, so their contents should be kept.
++ */
++
++ PSB_WSGX32(dev_priv->saveCLOCKGATING, PSB_CR_CLKGATECTL);
++ PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK0);
++ PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK1);
++ PSB_RSGX32(PSB_CR_BIF_BANK1);
++
++ psb_mmu_set_pd_context(psb_mmu_get_default_pd(dev_priv->mmu), 0);
++ psb_mmu_set_pd_context(dev_priv->pf_pd, 1);
++ psb_mmu_enable_requestor(dev_priv->mmu, _PSB_MMU_ER_MASK);
++
++ /*
++ * 2D Base registers..
++ */
++ psb_init_2d(dev_priv);
++ /*
++ * Persistant 3D base registers and USSE base registers..
++ */
++
++ PSB_WSGX32(PSB_MEM_PDS_START, PSB_CR_PDS_EXEC_BASE);
++ PSB_WSGX32(PSB_MEM_RASTGEOM_START, PSB_CR_BIF_3D_REQ_BASE);
++ PSB_WSGX32(dev_priv->sgx2_irq_mask, PSB_CR_EVENT_HOST_ENABLE2);
++ PSB_WSGX32(dev_priv->sgx_irq_mask, PSB_CR_EVENT_HOST_ENABLE);
++ (void)PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE);
++ /*
++ * Now, re-initialize the 3D engine.
++ */
++ if (dev_priv->xhw_on)
++ psb_xhw_resume(dev_priv, &dev_priv->resume_buf);
++
++ psb_scheduler_ta_mem_check(dev_priv);
++ if (dev_priv->ta_mem && !dev_priv->force_ta_mem_load) {
++ psb_xhw_ta_mem_load(dev_priv, &dev_priv->resume_buf,
++ PSB_TA_MEM_FLAG_TA |
++ PSB_TA_MEM_FLAG_RASTER |
++ PSB_TA_MEM_FLAG_HOSTA |
++ PSB_TA_MEM_FLAG_HOSTD |
++ PSB_TA_MEM_FLAG_INIT,
++ dev_priv->ta_mem->ta_memory->offset,
++ dev_priv->ta_mem->hw_data->offset,
++ dev_priv->ta_mem->hw_cookie);
++ }
++
++#ifdef OSPM_STAT
++ if (dev_priv->graphics_state == PSB_PWR_STATE_D0i3)
++ dev_priv->gfx_d0i3_time += jiffies - dev_priv->gfx_last_mode_change;
++ else
++ PSB_DEBUG_PM("power up:illegal previous power state\n");
++ dev_priv->gfx_last_mode_change = jiffies;
++ dev_priv->gfx_d0i0_cnt++;
++#endif
++
++ dev_priv->graphics_state = PSB_PWR_STATE_D0i0;
++
++ return 0;
++}
++
++int psb_try_power_down_sgx(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private;
++ struct psb_scheduler *scheduler = &dev_priv->scheduler;
++ int ret;
++ if (!down_write_trylock(&dev_priv->sgx_sem))
++ return -EBUSY;
++ /*Try lock 2d, because FB driver ususally use 2D engine.*/
++ if (!psb_2d_trylock(dev_priv)) {
++ ret = -EBUSY;
++ goto out_err0;
++ }
++ if ((dev_priv->graphics_state & PSB_PWR_STATE_MASK) !=
++ PSB_PWR_STATE_D0i0) {
++ ret = -EINVAL;
++ goto out_err1;
++ }
++ if ((PSB_RSGX32(PSB_CR_2D_SOCIF) != _PSB_C2_SOCIF_EMPTY) ||
++ ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) & _PSB_C2B_STATUS_BUSY) != 0)) {
++ ret = -EBUSY;
++ goto out_err1;
++ }
++ if (!scheduler->idle ||
++ !list_empty(&scheduler->raster_queue) ||
++ !list_empty(&scheduler->ta_queue) ||
++ !list_empty(&scheduler->hp_raster_queue)) {
++ ret = -EBUSY;
++ goto out_err1;
++ }
++ /*flush_scheduled_work();*/
++ ret = psb_power_down_sgx(dev);
++out_err1:
++ psb_2d_atomic_unlock(dev_priv);
++out_err0:
++ up_write(&dev_priv->sgx_sem);
++ return ret;
++}
++/*check power state, if in sleep, wake up*/
++void psb_check_power_state(struct drm_device *dev, int devices)
++{
++ struct pci_dev *pdev = dev->pdev;
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ down(&dev_priv->pm_sem);
++ switch (pdev->current_state) {
++ case PCI_D3hot:
++ dev->driver->pci_driver.resume(pdev);
++ break;
++ default:
++
++ if (devices & PSB_DEVICE_SGX) {
++ if ((dev_priv->graphics_state & PSB_PWR_STATE_MASK) ==
++ PSB_PWR_STATE_D0i3) {
++ /*power up sgx*/
++ psb_power_up_sgx(dev);
++ }
++ } else if (devices & PSB_DEVICE_MSVDX) {
++ if ((dev_priv->msvdx_state & PSB_PWR_STATE_MASK) ==
++ PSB_PWR_STATE_D0i3) {
++ psb_power_up_msvdx(dev);
++ } else {
++ dev_priv->msvdx_last_action = jiffies;
++ }
++ }
++ break;
++ }
++ up(&dev_priv->pm_sem);
++}
++
++void psb_init_ospm(struct drm_psb_private *dev_priv)
++{
++ static int init;
++ if (!init) {
++ dev_priv->graphics_state = PSB_PWR_STATE_D0i0;
++ init_rwsem(&dev_priv->sgx_sem);
++ sema_init(&dev_priv->pm_sem, 1);
++#ifdef OSPM_STAT
++ dev_priv->gfx_last_mode_change = jiffies;
++ dev_priv->gfx_d0i0_time = 0;
++ dev_priv->gfx_d0i3_time = 0;
++ dev_priv->gfx_d3_time = 0;
++#endif
++ init = 1;
++ }
++}
++
++int psb_cmdbuf_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ struct drm_psb_cmdbuf_arg *arg = data;
++ int ret = 0;
++ struct ttm_object_file *tfile = psb_fpriv(file_priv)->tfile;
++ struct ttm_buffer_object *cmd_buffer = NULL;
++ struct ttm_buffer_object *ta_buffer = NULL;
++ struct ttm_buffer_object *oom_buffer = NULL;
++ struct psb_ttm_fence_rep fence_arg;
++ struct drm_psb_scene user_scene;
++ struct psb_scene_pool *pool = NULL;
++ struct psb_scene *scene = NULL;
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)file_priv->minor->dev->dev_private;
++ int engine;
++ struct psb_feedback_info feedback;
++ int po_correct;
++ struct psb_context *context;
++ unsigned num_buffers;
++
++ num_buffers = PSB_NUM_VALIDATE_BUFFERS;
++
++ ret = ttm_read_lock(&dev_priv->ttm_lock, true);
++ if (unlikely(ret != 0))
++ return ret;
++
++ if ((arg->engine == PSB_ENGINE_2D) || (arg->engine == PSB_ENGINE_TA)
++ || (arg->engine == PSB_ENGINE_RASTERIZER)) {
++ down_read(&dev_priv->sgx_sem);
++ psb_check_power_state(dev, PSB_DEVICE_SGX);
++ }
++
++ ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
++ if (unlikely(ret != 0))
++ goto out_err0;
++
++
++ context = &dev_priv->context;
++ context->used_buffers = 0;
++ context->fence_types = 0;
++ BUG_ON(!list_empty(&context->validate_list));
++ BUG_ON(!list_empty(&context->kern_validate_list));
++
++ if (unlikely(context->buffers == NULL)) {
++ context->buffers = vmalloc(PSB_NUM_VALIDATE_BUFFERS *
++ sizeof(*context->buffers));
++ if (unlikely(context->buffers == NULL)) {
++ ret = -ENOMEM;
++ goto out_err1;
++ }
++ }
++
++ ret = psb_reference_buffers(file_priv,
++ arg->buffer_list,
++ context);
++
++ if (unlikely(ret != 0))
++ goto out_err1;
++
++ context->val_seq = atomic_add_return(1, &dev_priv->val_seq);
++
++ ret = ttm_eu_reserve_buffers(&context->validate_list,
++ context->val_seq);
++ if (unlikely(ret != 0)) {
++ goto out_err2;
++ }
++
++ engine = (arg->engine == PSB_ENGINE_RASTERIZER) ?
++ PSB_ENGINE_TA : arg->engine;
++
++ ret = psb_validate_buffer_list(file_priv, engine,
++ context, &po_correct);
++ if (unlikely(ret != 0))
++ goto out_err3;
++
++ if (!po_correct) {
++ ret = psb_fixup_relocs(file_priv, engine, arg->num_relocs,
++ arg->reloc_offset,
++ arg->reloc_handle, context, 0, 1);
++ if (unlikely(ret != 0))
++ goto out_err3;
++
++ }
++
++ cmd_buffer = ttm_buffer_object_lookup(tfile, arg->cmdbuf_handle);
++ if (unlikely(cmd_buffer == NULL)) {
++ ret = -EINVAL;
++ goto out_err4;
++ }
++
++ switch (arg->engine) {
++ case PSB_ENGINE_2D:
++ ret = psb_cmdbuf_2d(file_priv, &context->validate_list,
++ context->fence_types, arg, cmd_buffer,
++ &fence_arg);
++ if (unlikely(ret != 0))
++ goto out_err4;
++ break;
++ case PSB_ENGINE_VIDEO:
++ psb_check_power_state(dev, PSB_DEVICE_MSVDX);
++ ret = psb_cmdbuf_video(file_priv, &context->validate_list,
++ context->fence_types, arg,
++ cmd_buffer, &fence_arg);
++
++ if (unlikely(ret != 0))
++ goto out_err4;
++ break;
++ case LNC_ENGINE_ENCODE:
++ psb_check_power_state(dev, PSB_DEVICE_TOPAZ);
++ ret = lnc_cmdbuf_video(file_priv, &context->validate_list,
++ context->fence_types, arg,
++ cmd_buffer, &fence_arg);
++ if (unlikely(ret != 0))
++ goto out_err4;
++ break;
++ case PSB_ENGINE_RASTERIZER:
++ ret = psb_cmdbuf_raster(file_priv, context,
++ arg, cmd_buffer, &fence_arg);
++ if (unlikely(ret != 0))
++ goto out_err4;
++ break;
++ case PSB_ENGINE_TA:
++ if (arg->ta_handle == arg->cmdbuf_handle) {
++ ta_buffer = ttm_bo_reference(cmd_buffer);
++ } else {
++ ta_buffer =
++ ttm_buffer_object_lookup(tfile,
++ arg->ta_handle);
++ if (!ta_buffer) {
++ ret = -EINVAL;
++ goto out_err4;
++ }
++ }
++ if (arg->oom_size != 0) {
++ if (arg->oom_handle == arg->cmdbuf_handle) {
++ oom_buffer = ttm_bo_reference(cmd_buffer);
++ } else {
++ oom_buffer =
++ ttm_buffer_object_lookup(tfile,
++ arg->
++ oom_handle);
++ if (!oom_buffer) {
++ ret = -EINVAL;
++ goto out_err4;
++ }
++ }
++ }
++
++ ret = copy_from_user(&user_scene, (void __user *)
++ ((unsigned long) arg->scene_arg),
++ sizeof(user_scene));
++ if (ret)
++ goto out_err4;
++
++ if (!user_scene.handle_valid) {
++ pool = psb_scene_pool_alloc(file_priv, 0,
++ user_scene.num_buffers,
++ user_scene.w,
++ user_scene.h);
++ if (!pool) {
++ ret = -ENOMEM;
++ goto out_err0;
++ }
++
++ user_scene.handle = psb_scene_pool_handle(pool);
++ user_scene.handle_valid = 1;
++ ret = copy_to_user((void __user *)
++ ((unsigned long) arg->
++ scene_arg), &user_scene,
++ sizeof(user_scene));
++
++ if (ret)
++ goto out_err4;
++ } else {
++ pool =
++ psb_scene_pool_lookup(file_priv,
++ user_scene.handle, 1);
++ if (!pool) {
++ ret = -EINVAL;
++ goto out_err4;
++ }
++ }
++
++ ret = psb_validate_scene_pool(context, pool,
++ user_scene.w,
++ user_scene.h,
++ arg->ta_flags &
++ PSB_TA_FLAG_LASTPASS, &scene);
++ if (ret)
++ goto out_err4;
++
++ memset(&feedback, 0, sizeof(feedback));
++ if (arg->feedback_ops) {
++ ret = psb_feedback_buf(tfile,
++ context,
++ arg->feedback_ops,
++ arg->feedback_handle,
++ arg->feedback_offset,
++ arg->feedback_breakpoints,
++ arg->feedback_size,
++ &feedback);
++ if (ret)
++ goto out_err4;
++ }
++ ret = psb_cmdbuf_ta(file_priv, context,
++ arg, cmd_buffer, ta_buffer,
++ oom_buffer, scene, &feedback,
++ &fence_arg);
++ if (ret)
++ goto out_err4;
++ break;
++ default:
++ DRM_ERROR
++ ("Unimplemented command submission mechanism (%x).\n",
++ arg->engine);
++ ret = -EINVAL;
++ goto out_err4;
++ }
++
++ if (!(arg->fence_flags & DRM_PSB_FENCE_NO_USER)) {
++ ret = copy_to_user((void __user *)
++ ((unsigned long) arg->fence_arg),
++ &fence_arg, sizeof(fence_arg));
++ }
++
++out_err4:
++ if (scene)
++ psb_scene_unref(&scene);
++ if (pool)
++ psb_scene_pool_unref(&pool);
++ if (cmd_buffer)
++ ttm_bo_unref(&cmd_buffer);
++ if (ta_buffer)
++ ttm_bo_unref(&ta_buffer);
++ if (oom_buffer)
++ ttm_bo_unref(&oom_buffer);
++out_err3:
++ ret = psb_handle_copyback(dev, context, ret);
++out_err2:
++ psb_unreference_buffers(context);
++out_err1:
++ mutex_unlock(&dev_priv->cmdbuf_mutex);
++out_err0:
++ ttm_read_unlock(&dev_priv->ttm_lock);
++ if ((arg->engine == PSB_ENGINE_2D) || (arg->engine == PSB_ENGINE_TA)
++ || (arg->engine == PSB_ENGINE_RASTERIZER))
++ up_read(&dev_priv->sgx_sem);
++ return ret;
++}
+diff -uNr a/drivers/gpu/drm/psb/psb_sgx.h b/drivers/gpu/drm/psb/psb_sgx.h
+--- a/drivers/gpu/drm/psb/psb_sgx.h 1969-12-31 16:00:00.000000000 -0800
++++ b/drivers/gpu/drm/psb/psb_sgx.h 2009-04-07 13:28:38.000000000 -0700
+@@ -0,0 +1,41 @@
++/*
++ * Copyright (c) 2008, Intel Corporation
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
++ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
++ * SOFTWARE.
++ *
++ * Authors:
++ * Eric Anholt <eric@anholt.net>
++ *
++ **/
++#ifndef _PSB_SGX_H_
++#define _PSB_SGX_H_
++
++extern int psb_submit_video_cmdbuf(struct drm_device *dev,
++ struct ttm_buffer_object *cmd_buffer,
++ unsigned long cmd_offset,
++ unsigned long cmd_size,
++ struct ttm_fence_object *fence);
++
++extern int psb_2d_wait_available(struct drm_psb_private *dev_priv,
++ unsigned size);
++extern int drm_idle_check_interval;
++extern int drm_psb_ospm;
++
++#endif
+diff -uNr a/drivers/gpu/drm/psb/psb_ttm_glue.c b/drivers/gpu/drm/psb/psb_ttm_glue.c
+--- a/drivers/gpu/drm/psb/psb_ttm_glue.c 1969-12-31 16:00:00.000000000 -0800
++++ b/drivers/gpu/drm/psb/psb_ttm_glue.c 2009-04-07 13:28:38.000000000 -0700
+@@ -0,0 +1,345 @@
++/**************************************************************************
++ * Copyright (c) 2008, Intel Corporation.
++ * All Rights Reserved.
++ * Copyright (c) 2008, Tungsten Graphics Inc. Cedar Park, TX., USA.
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
++ * develop this driver.
++ *
++ **************************************************************************/
++/*
++ */
++
++#include <drm/drmP.h>
++#include "psb_drv.h"
++#include "ttm/ttm_userobj_api.h"
++
++static struct vm_operations_struct psb_ttm_vm_ops;
++
++int psb_open(struct inode *inode, struct file *filp)
++{
++ struct drm_file *file_priv;
++ struct drm_psb_private *dev_priv;
++ struct psb_fpriv *psb_fp;
++ int ret;
++
++ ret = drm_open(inode, filp);
++ if (unlikely(ret))
++ return ret;
++
++ psb_fp = drm_calloc(1, sizeof(*psb_fp), DRM_MEM_FILES);
++
++ if (unlikely(psb_fp == NULL))
++ goto out_err0;
++
++ file_priv = (struct drm_file *) filp->private_data;
++ dev_priv = psb_priv(file_priv->minor->dev);
++
++
++ psb_fp->tfile = ttm_object_file_init(dev_priv->tdev,
++ PSB_FILE_OBJECT_HASH_ORDER);
++ if (unlikely(psb_fp->tfile == NULL))
++ goto out_err1;
++
++ file_priv->driver_priv = psb_fp;
++
++ if (unlikely(dev_priv->bdev.dev_mapping == NULL))
++ dev_priv->bdev.dev_mapping = dev_priv->dev->dev_mapping;
++
++ return 0;
++
++out_err1:
++ drm_free(psb_fp, sizeof(*psb_fp), DRM_MEM_FILES);
++out_err0:
++ (void) drm_release(inode, filp);
++ return ret;
++}
++
++int psb_release(struct inode *inode, struct file *filp)
++{
++ struct drm_file *file_priv;
++ struct psb_fpriv *psb_fp;
++ struct drm_psb_private *dev_priv;
++ int ret;
++
++ file_priv = (struct drm_file *) filp->private_data;
++ psb_fp = psb_fpriv(file_priv);
++ dev_priv = psb_priv(file_priv->minor->dev);
++
++ down_read(&dev_priv->sgx_sem);
++ psb_check_power_state(file_priv->minor->dev, PSB_DEVICE_SGX);
++
++ ttm_object_file_release(&psb_fp->tfile);
++ drm_free(psb_fp, sizeof(*psb_fp), DRM_MEM_FILES);
++
++ if (dev_priv && dev_priv->xhw_file)
++ psb_xhw_init_takedown(dev_priv, file_priv, 1);
++
++ ret = drm_release(inode, filp);
++ up_read(&dev_priv->sgx_sem);
++ if (drm_psb_ospm && IS_MRST(dev_priv->dev))
++ schedule_delayed_work(&dev_priv->scheduler.wq, 0);
++ return ret;
++}
++
++int psb_fence_signaled_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ int ret;
++ struct drm_psb_private *dev_priv = psb_priv(dev);
++ down_read(&dev_priv->sgx_sem);
++ psb_check_power_state(dev, PSB_DEVICE_SGX);
++ ret = ttm_fence_signaled_ioctl(psb_fpriv(file_priv)->tfile, data);
++ up_read(&dev_priv->sgx_sem);
++ if (drm_psb_ospm && IS_MRST(dev))
++ schedule_delayed_work(&dev_priv->scheduler.wq, 1);
++ return ret;
++}
++
++int psb_fence_finish_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ int ret;
++ struct drm_psb_private *dev_priv = psb_priv(dev);
++ down_read(&dev_priv->sgx_sem);
++ psb_check_power_state(dev, PSB_DEVICE_SGX);
++ ret = ttm_fence_finish_ioctl(psb_fpriv(file_priv)->tfile, data);
++ up_read(&dev_priv->sgx_sem);
++ if (drm_psb_ospm && IS_MRST(dev))
++ schedule_delayed_work(&dev_priv->scheduler.wq, 1);
++ return ret;
++}
++
++int psb_fence_unref_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ int ret;
++ struct drm_psb_private *dev_priv = psb_priv(dev);
++ down_read(&dev_priv->sgx_sem);
++ psb_check_power_state(dev, PSB_DEVICE_SGX);
++ ret = ttm_fence_unref_ioctl(psb_fpriv(file_priv)->tfile, data);
++ up_read(&dev_priv->sgx_sem);
++ if (drm_psb_ospm && IS_MRST(dev))
++ schedule_delayed_work(&dev_priv->scheduler.wq, 1);
++ return ret;
++}
++
++int psb_pl_waitidle_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ PSB_DEBUG_PM("ioctl: psb_pl_reference\n");
++ return ttm_pl_waitidle_ioctl(psb_fpriv(file_priv)->tfile, data);
++}
++
++int psb_pl_setstatus_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ int ret;
++ struct drm_psb_private *dev_priv = psb_priv(dev);
++ down_read(&dev_priv->sgx_sem);
++ psb_check_power_state(dev, PSB_DEVICE_SGX);
++ ret = ttm_pl_setstatus_ioctl(psb_fpriv(file_priv)->tfile,
++ &psb_priv(dev)->ttm_lock, data);
++ up_read(&dev_priv->sgx_sem);
++ if (drm_psb_ospm && IS_MRST(dev))
++ schedule_delayed_work(&dev_priv->scheduler.wq, 1);
++ return ret;
++}
++
++int psb_pl_synccpu_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ int ret;
++ struct drm_psb_private *dev_priv = psb_priv(dev);
++ down_read(&dev_priv->sgx_sem);
++ psb_check_power_state(dev, PSB_DEVICE_SGX);
++ ret = ttm_pl_synccpu_ioctl(psb_fpriv(file_priv)->tfile, data);
++ up_read(&dev_priv->sgx_sem);
++ if (drm_psb_ospm && IS_MRST(dev))
++ schedule_delayed_work(&dev_priv->scheduler.wq, 1);
++ return ret;
++}
++
++int psb_pl_unref_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ struct drm_psb_private *dev_priv = psb_priv(dev);
++ int ret;
++ down_read(&dev_priv->sgx_sem);
++ psb_check_power_state(dev, PSB_DEVICE_SGX);
++ ret = ttm_pl_unref_ioctl(psb_fpriv(file_priv)->tfile, data);
++ up_read(&dev_priv->sgx_sem);
++ if (drm_psb_ospm && IS_MRST(dev))
++ schedule_delayed_work(&dev_priv->scheduler.wq, 1);
++ return ret;
++}
++
++int psb_pl_reference_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ struct drm_psb_private *dev_priv = psb_priv(dev);
++ int ret;
++ down_read(&dev_priv->sgx_sem);
++ psb_check_power_state(dev, PSB_DEVICE_SGX);
++ ret = ttm_pl_reference_ioctl(psb_fpriv(file_priv)->tfile, data);
++ up_read(&dev_priv->sgx_sem);
++ if (drm_psb_ospm && IS_MRST(dev))
++ schedule_delayed_work(&dev_priv->scheduler.wq, 1);
++ return ret;
++}
++
++int psb_pl_create_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ struct drm_psb_private *dev_priv = psb_priv(dev);
++ int ret;
++ down_read(&dev_priv->sgx_sem);
++ psb_check_power_state(dev, PSB_DEVICE_SGX);
++ ret = ttm_pl_create_ioctl(psb_fpriv(file_priv)->tfile,
++ &dev_priv->bdev, &dev_priv->ttm_lock, data);
++ up_read(&dev_priv->sgx_sem);
++ if (drm_psb_ospm && IS_MRST(dev))
++ schedule_delayed_work(&dev_priv->scheduler.wq, 1);
++ return ret;
++}
++
++/**
++ * psb_ttm_fault - Wrapper around the ttm fault method.
++ *
++ * @vma: The struct vm_area_struct as in the vm fault() method.
++ * @vmf: The struct vm_fault as in the vm fault() method.
++ *
++ * Since ttm_fault() will reserve buffers while faulting,
++ * we need to take the ttm read lock around it, as this driver
++ * relies on the ttm_lock in write mode to exclude all threads from
++ * reserving and thus validating buffers in aperture- and memory shortage
++ * situations.
++ */
++
++static int psb_ttm_fault(struct vm_area_struct *vma,
++ struct vm_fault *vmf)
++{
++ struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
++ vma->vm_private_data;
++ struct drm_psb_private *dev_priv =
++ container_of(bo->bdev, struct drm_psb_private, bdev);
++ int ret;
++
++ ret = ttm_read_lock(&dev_priv->ttm_lock, true);
++ if (unlikely(ret != 0))
++ return VM_FAULT_NOPAGE;
++
++ ret = dev_priv->ttm_vm_ops->fault(vma, vmf);
++
++ ttm_read_unlock(&dev_priv->ttm_lock);
++ return ret;
++}
++
++
++int psb_mmap(struct file *filp, struct vm_area_struct *vma)
++{
++ struct drm_file *file_priv;
++ struct drm_psb_private *dev_priv;
++ int ret;
++
++ if (unlikely(vma->vm_pgoff < DRM_PSB_FILE_PAGE_OFFSET))
++ return drm_mmap(filp, vma);
++
++ file_priv = (struct drm_file *) filp->private_data;
++ dev_priv = psb_priv(file_priv->minor->dev);
++
++ ret = ttm_bo_mmap(filp, vma, &dev_priv->bdev);
++ if (unlikely(ret != 0))
++ return ret;
++
++ if (unlikely(dev_priv->ttm_vm_ops == NULL)) {
++ dev_priv->ttm_vm_ops = vma->vm_ops;
++ psb_ttm_vm_ops = *vma->vm_ops;
++ psb_ttm_vm_ops.fault = &psb_ttm_fault;
++ }
++
++ vma->vm_ops = &psb_ttm_vm_ops;
++
++ return 0;
++}
++
++ssize_t psb_ttm_write(struct file *filp, const char __user *buf,
++ size_t count, loff_t *f_pos)
++{
++ struct drm_file *file_priv = (struct drm_file *)filp->private_data;
++ struct drm_psb_private *dev_priv = psb_priv(file_priv->minor->dev);
++
++ return ttm_bo_io(&dev_priv->bdev, filp, buf, NULL, count, f_pos, 1);
++}
++
++ssize_t psb_ttm_read(struct file *filp, char __user *buf,
++ size_t count, loff_t *f_pos)
++{
++ struct drm_file *file_priv = (struct drm_file *)filp->private_data;
++ struct drm_psb_private *dev_priv = psb_priv(file_priv->minor->dev);
++
++ return ttm_bo_io(&dev_priv->bdev, filp, NULL, buf, count, f_pos, 1);
++}
++
++int psb_verify_access(struct ttm_buffer_object *bo,
++ struct file *filp)
++{
++ struct drm_file *file_priv = (struct drm_file *)filp->private_data;
++
++ if (capable(CAP_SYS_ADMIN))
++ return 0;
++
++ if (unlikely(!file_priv->authenticated))
++ return -EPERM;
++
++ return ttm_pl_verify_access(bo, psb_fpriv(file_priv)->tfile);
++}
++
++static int psb_ttm_mem_global_init(struct drm_global_reference *ref)
++{
++ return ttm_mem_global_init(ref->object);
++}
++
++static void psb_ttm_mem_global_release(struct drm_global_reference *ref)
++{
++ ttm_mem_global_release(ref->object);
++}
++
++int psb_ttm_global_init(struct drm_psb_private *dev_priv)
++{
++ struct drm_global_reference *global_ref;
++ int ret;
++
++ global_ref = &dev_priv->mem_global_ref;
++ global_ref->global_type = DRM_GLOBAL_TTM_MEM;
++ global_ref->size = sizeof(struct ttm_mem_global);
++ global_ref->init = &psb_ttm_mem_global_init;
++ global_ref->release = &psb_ttm_mem_global_release;
++
++ ret = drm_global_item_ref(global_ref);
++ if (unlikely(ret != 0)) {
++ DRM_ERROR("Failed referencing a global TTM memory object.\n");
++ return ret;
++ }
++
++ return 0;
++}
++
++void psb_ttm_global_release(struct drm_psb_private *dev_priv)
++{
++ drm_global_item_unref(&dev_priv->mem_global_ref);
++}
+diff -uNr a/drivers/gpu/drm/psb/psb_xhw.c b/drivers/gpu/drm/psb/psb_xhw.c
+--- a/drivers/gpu/drm/psb/psb_xhw.c 1969-12-31 16:00:00.000000000 -0800
++++ b/drivers/gpu/drm/psb/psb_xhw.c 2009-04-07 13:28:38.000000000 -0700
+@@ -0,0 +1,629 @@
++/**************************************************************************
++ *Copyright (c) 2007-2008, Intel Corporation.
++ *All Rights Reserved.
++ *
++ *This program is free software; you can redistribute it and/or modify it
++ *under the terms and conditions of the GNU General Public License,
++ *version 2, as published by the Free Software Foundation.
++ *
++ *This program is distributed in the hope it will be useful, but WITHOUT
++ *ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ *FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ *more details.
++ *
++ *You should have received a copy of the GNU General Public License along with
++ *this program; if not, write to the Free Software Foundation, Inc.,
++ *51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ *Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
++ *develop this driver.
++ *
++ **************************************************************************/
++/*
++ *Make calls into closed source X server code.
++ */
++
++#include <drm/drmP.h>
++#include "psb_drv.h"
++#include "ttm/ttm_userobj_api.h"
++
++void
++psb_xhw_clean_buf(struct drm_psb_private *dev_priv,
++ struct psb_xhw_buf *buf)
++{
++ unsigned long irq_flags;
++
++ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
++ list_del_init(&buf->head);
++ if (dev_priv->xhw_cur_buf == buf)
++ dev_priv->xhw_cur_buf = NULL;
++ atomic_set(&buf->done, 1);
++ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
++}
++
++static inline int psb_xhw_add(struct drm_psb_private *dev_priv,
++ struct psb_xhw_buf *buf)
++{
++ unsigned long irq_flags;
++
++ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
++ atomic_set(&buf->done, 0);
++ if (unlikely(!dev_priv->xhw_submit_ok)) {
++ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
++ DRM_ERROR("No Xpsb 3D extension available.\n");
++ return -EINVAL;
++ }
++ if (!list_empty(&buf->head)) {
++ DRM_ERROR("Recursive list adding.\n");
++ goto out;
++ }
++ list_add_tail(&buf->head, &dev_priv->xhw_in);
++ wake_up_interruptible(&dev_priv->xhw_queue);
++out:
++ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
++ return 0;
++}
++
++int psb_xhw_scene_info(struct drm_psb_private *dev_priv,
++ struct psb_xhw_buf *buf,
++ uint32_t w,
++ uint32_t h,
++ uint32_t *hw_cookie,
++ uint32_t *bo_size,
++ uint32_t *clear_p_start,
++ uint32_t *clear_num_pages)
++{
++ struct drm_psb_xhw_arg *xa = &buf->arg;
++ int ret;
++
++ buf->copy_back = 1;
++ xa->op = PSB_XHW_SCENE_INFO;
++ xa->irq_op = 0;
++ xa->issue_irq = 0;
++ xa->arg.si.w = w;
++ xa->arg.si.h = h;
++
++ ret = psb_xhw_add(dev_priv, buf);
++ if (ret)
++ return ret;
++
++ (void) wait_event_timeout(dev_priv->xhw_caller_queue,
++ atomic_read(&buf->done), DRM_HZ);
++
++ if (!atomic_read(&buf->done)) {
++ psb_xhw_clean_buf(dev_priv, buf);
++ return -EBUSY;
++ }
++
++ if (!xa->ret) {
++ memcpy(hw_cookie, xa->cookie, sizeof(xa->cookie));
++ *bo_size = xa->arg.si.size;
++ *clear_p_start = xa->arg.si.clear_p_start;
++ *clear_num_pages = xa->arg.si.clear_num_pages;
++ }
++ return xa->ret;
++}
++
++int psb_xhw_fire_raster(struct drm_psb_private *dev_priv,
++ struct psb_xhw_buf *buf, uint32_t fire_flags)
++{
++ struct drm_psb_xhw_arg *xa = &buf->arg;
++
++ buf->copy_back = 0;
++ xa->op = PSB_XHW_FIRE_RASTER;
++ xa->issue_irq = 0;
++ xa->arg.sb.fire_flags = 0;
++
++ return psb_xhw_add(dev_priv, buf);
++}
++
++int psb_xhw_vistest(struct drm_psb_private *dev_priv,
++ struct psb_xhw_buf *buf)
++{
++ struct drm_psb_xhw_arg *xa = &buf->arg;
++
++ buf->copy_back = 1;
++ xa->op = PSB_XHW_VISTEST;
++ /*
++ *Could perhaps decrease latency somewhat by
++ *issuing an irq in this case.
++ */
++ xa->issue_irq = 0;
++ xa->irq_op = PSB_UIRQ_VISTEST;
++ return psb_xhw_add(dev_priv, buf);
++}
++
++int psb_xhw_scene_bind_fire(struct drm_psb_private *dev_priv,
++ struct psb_xhw_buf *buf,
++ uint32_t fire_flags,
++ uint32_t hw_context,
++ uint32_t *cookie,
++ uint32_t *oom_cmds,
++ uint32_t num_oom_cmds,
++ uint32_t offset, uint32_t engine,
++ uint32_t flags)
++{
++ struct drm_psb_xhw_arg *xa = &buf->arg;
++
++ buf->copy_back = (fire_flags & PSB_FIRE_FLAG_XHW_OOM);
++ xa->op = PSB_XHW_SCENE_BIND_FIRE;
++ xa->issue_irq = (buf->copy_back) ? 1 : 0;
++ if (unlikely(buf->copy_back))
++ xa->irq_op = (engine == PSB_SCENE_ENGINE_TA) ?
++ PSB_UIRQ_FIRE_TA_REPLY : PSB_UIRQ_FIRE_RASTER_REPLY;
++ else
++ xa->irq_op = 0;
++ xa->arg.sb.fire_flags = fire_flags;
++ xa->arg.sb.hw_context = hw_context;
++ xa->arg.sb.offset = offset;
++ xa->arg.sb.engine = engine;
++ xa->arg.sb.flags = flags;
++ xa->arg.sb.num_oom_cmds = num_oom_cmds;
++ memcpy(xa->cookie, cookie, sizeof(xa->cookie));
++ if (num_oom_cmds)
++ memcpy(xa->arg.sb.oom_cmds, oom_cmds,
++ sizeof(uint32_t) * num_oom_cmds);
++ return psb_xhw_add(dev_priv, buf);
++}
++
++int psb_xhw_reset_dpm(struct drm_psb_private *dev_priv,
++ struct psb_xhw_buf *buf)
++{
++ struct drm_psb_xhw_arg *xa = &buf->arg;
++ int ret;
++
++ buf->copy_back = 1;
++ xa->op = PSB_XHW_RESET_DPM;
++ xa->issue_irq = 0;
++ xa->irq_op = 0;
++
++ ret = psb_xhw_add(dev_priv, buf);
++ if (ret)
++ return ret;
++
++ (void) wait_event_timeout(dev_priv->xhw_caller_queue,
++ atomic_read(&buf->done), 3 * DRM_HZ);
++
++ if (!atomic_read(&buf->done)) {
++ psb_xhw_clean_buf(dev_priv, buf);
++ return -EBUSY;
++ }
++
++ return xa->ret;
++}
++
++int psb_xhw_check_lockup(struct drm_psb_private *dev_priv,
++ struct psb_xhw_buf *buf, uint32_t *value)
++{
++ struct drm_psb_xhw_arg *xa = &buf->arg;
++ int ret;
++
++ *value = 0;
++
++ buf->copy_back = 1;
++ xa->op = PSB_XHW_CHECK_LOCKUP;
++ xa->issue_irq = 0;
++ xa->irq_op = 0;
++
++ ret = psb_xhw_add(dev_priv, buf);
++ if (ret)
++ return ret;
++
++ (void) wait_event_timeout(dev_priv->xhw_caller_queue,
++ atomic_read(&buf->done), DRM_HZ * 3);
++
++ if (!atomic_read(&buf->done)) {
++ psb_xhw_clean_buf(dev_priv, buf);
++ return -EBUSY;
++ }
++
++ if (!xa->ret)
++ *value = xa->arg.cl.value;
++
++ return xa->ret;
++}
++
++static int psb_xhw_terminate(struct drm_psb_private *dev_priv,
++ struct psb_xhw_buf *buf)
++{
++ struct drm_psb_xhw_arg *xa = &buf->arg;
++ unsigned long irq_flags;
++
++ buf->copy_back = 0;
++ xa->op = PSB_XHW_TERMINATE;
++ xa->issue_irq = 0;
++
++ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
++ dev_priv->xhw_submit_ok = 0;
++ atomic_set(&buf->done, 0);
++ if (!list_empty(&buf->head)) {
++ DRM_ERROR("Recursive list adding.\n");
++ goto out;
++ }
++ list_add_tail(&buf->head, &dev_priv->xhw_in);
++out:
++ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
++ wake_up_interruptible(&dev_priv->xhw_queue);
++
++ (void) wait_event_timeout(dev_priv->xhw_caller_queue,
++ atomic_read(&buf->done), DRM_HZ / 10);
++
++ if (!atomic_read(&buf->done)) {
++ DRM_ERROR("Xpsb terminate timeout.\n");
++ psb_xhw_clean_buf(dev_priv, buf);
++ return -EBUSY;
++ }
++
++ return 0;
++}
++
++int psb_xhw_ta_mem_info(struct drm_psb_private *dev_priv,
++ struct psb_xhw_buf *buf,
++ uint32_t pages, uint32_t * hw_cookie,
++ uint32_t * size,
++ uint32_t * ta_min_size)
++{
++ struct drm_psb_xhw_arg *xa = &buf->arg;
++ int ret;
++
++ buf->copy_back = 1;
++ xa->op = PSB_XHW_TA_MEM_INFO;
++ xa->issue_irq = 0;
++ xa->irq_op = 0;
++ xa->arg.bi.pages = pages;
++
++ ret = psb_xhw_add(dev_priv, buf);
++ if (ret)
++ return ret;
++
++ (void) wait_event_timeout(dev_priv->xhw_caller_queue,
++ atomic_read(&buf->done), DRM_HZ);
++
++ if (!atomic_read(&buf->done)) {
++ psb_xhw_clean_buf(dev_priv, buf);
++ return -EBUSY;
++ }
++
++ if (!xa->ret)
++ memcpy(hw_cookie, xa->cookie, sizeof(xa->cookie));
++
++ *size = xa->arg.bi.size;
++ *ta_min_size = xa->arg.bi.ta_min_size;
++ return xa->ret;
++}
++
++int psb_xhw_ta_mem_load(struct drm_psb_private *dev_priv,
++ struct psb_xhw_buf *buf,
++ uint32_t flags,
++ uint32_t param_offset,
++ uint32_t pt_offset, uint32_t *hw_cookie)
++{
++ struct drm_psb_xhw_arg *xa = &buf->arg;
++ int ret;
++
++ buf->copy_back = 1;
++ xa->op = PSB_XHW_TA_MEM_LOAD;
++ xa->issue_irq = 0;
++ xa->irq_op = 0;
++ xa->arg.bl.flags = flags;
++ xa->arg.bl.param_offset = param_offset;
++ xa->arg.bl.pt_offset = pt_offset;
++ memcpy(xa->cookie, hw_cookie, sizeof(xa->cookie));
++
++ ret = psb_xhw_add(dev_priv, buf);
++ if (ret)
++ return ret;
++
++ (void) wait_event_timeout(dev_priv->xhw_caller_queue,
++ atomic_read(&buf->done), 3 * DRM_HZ);
++
++ if (!atomic_read(&buf->done)) {
++ psb_xhw_clean_buf(dev_priv, buf);
++ return -EBUSY;
++ }
++
++ if (!xa->ret)
++ memcpy(hw_cookie, xa->cookie, sizeof(xa->cookie));
++
++ return xa->ret;
++}
++
++int psb_xhw_ta_oom(struct drm_psb_private *dev_priv,
++ struct psb_xhw_buf *buf, uint32_t *cookie)
++{
++ struct drm_psb_xhw_arg *xa = &buf->arg;
++
++ /*
++ *This calls the extensive closed source
++ *OOM handler, which resolves the condition and
++ *sends a reply telling the scheduler what to do
++ *with the task.
++ */
++
++ buf->copy_back = 1;
++ xa->op = PSB_XHW_OOM;
++ xa->issue_irq = 1;
++ xa->irq_op = PSB_UIRQ_OOM_REPLY;
++ memcpy(xa->cookie, cookie, sizeof(xa->cookie));
++
++ return psb_xhw_add(dev_priv, buf);
++}
++
++void psb_xhw_ta_oom_reply(struct drm_psb_private *dev_priv,
++ struct psb_xhw_buf *buf,
++ uint32_t *cookie,
++ uint32_t *bca, uint32_t *rca, uint32_t *flags)
++{
++ struct drm_psb_xhw_arg *xa = &buf->arg;
++
++ /*
++ *Get info about how to schedule an OOM task.
++ */
++
++ memcpy(cookie, xa->cookie, sizeof(xa->cookie));
++ *bca = xa->arg.oom.bca;
++ *rca = xa->arg.oom.rca;
++ *flags = xa->arg.oom.flags;
++}
++
++void psb_xhw_fire_reply(struct drm_psb_private *dev_priv,
++ struct psb_xhw_buf *buf, uint32_t *cookie)
++{
++ struct drm_psb_xhw_arg *xa = &buf->arg;
++
++ memcpy(cookie, xa->cookie, sizeof(xa->cookie));
++}
++
++int psb_xhw_resume(struct drm_psb_private *dev_priv,
++ struct psb_xhw_buf *buf)
++{
++ struct drm_psb_xhw_arg *xa = &buf->arg;
++
++ buf->copy_back = 0;
++ xa->op = PSB_XHW_RESUME;
++ xa->issue_irq = 0;
++ xa->irq_op = 0;
++ return psb_xhw_add(dev_priv, buf);
++}
++
++void psb_xhw_takedown(struct drm_psb_private *dev_priv)
++{
++}
++
++int psb_xhw_init(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++ unsigned long irq_flags;
++
++ INIT_LIST_HEAD(&dev_priv->xhw_in);
++ spin_lock_init(&dev_priv->xhw_lock);
++ atomic_set(&dev_priv->xhw_client, 0);
++ init_waitqueue_head(&dev_priv->xhw_queue);
++ init_waitqueue_head(&dev_priv->xhw_caller_queue);
++ mutex_init(&dev_priv->xhw_mutex);
++ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
++ dev_priv->xhw_on = 0;
++ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
++
++ return 0;
++}
++
++static int psb_xhw_init_init(struct drm_device *dev,
++ struct drm_file *file_priv,
++ struct drm_psb_xhw_init_arg *arg)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++ struct ttm_object_file *tfile = psb_fpriv(file_priv)->tfile;
++ int ret;
++ bool is_iomem;
++
++ if (atomic_add_unless(&dev_priv->xhw_client, 1, 1)) {
++ unsigned long irq_flags;
++
++ dev_priv->xhw_bo =
++ ttm_buffer_object_lookup(tfile, arg->buffer_handle);
++ if (!dev_priv->xhw_bo) {
++ ret = -EINVAL;
++ goto out_err;
++ }
++ ret = ttm_bo_kmap(dev_priv->xhw_bo, 0,
++ dev_priv->xhw_bo->num_pages,
++ &dev_priv->xhw_kmap);
++ if (ret) {
++ DRM_ERROR("Failed mapping X server "
++ "communications buffer.\n");
++ goto out_err0;
++ }
++ dev_priv->xhw =
++ ttm_kmap_obj_virtual(&dev_priv->xhw_kmap, &is_iomem);
++ if (is_iomem) {
++ DRM_ERROR("X server communications buffer"
++ "is in device memory.\n");
++ ret = -EINVAL;
++ goto out_err1;
++ }
++ dev_priv->xhw_file = file_priv;
++
++ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
++ dev_priv->xhw_on = 1;
++ dev_priv->xhw_submit_ok = 1;
++ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
++ return 0;
++ } else {
++ DRM_ERROR("Xhw is already initialized.\n");
++ return -EBUSY;
++ }
++out_err1:
++ dev_priv->xhw = NULL;
++ ttm_bo_kunmap(&dev_priv->xhw_kmap);
++out_err0:
++ ttm_bo_unref(&dev_priv->xhw_bo);
++out_err:
++ atomic_dec(&dev_priv->xhw_client);
++ return ret;
++}
++
++static void psb_xhw_queue_empty(struct drm_psb_private *dev_priv)
++{
++ struct psb_xhw_buf *cur_buf, *next;
++ unsigned long irq_flags;
++
++ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
++ dev_priv->xhw_submit_ok = 0;
++
++ list_for_each_entry_safe(cur_buf, next, &dev_priv->xhw_in, head) {
++ list_del_init(&cur_buf->head);
++ if (cur_buf->copy_back)
++ cur_buf->arg.ret = -EINVAL;
++ atomic_set(&cur_buf->done, 1);
++ }
++ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
++ wake_up(&dev_priv->xhw_caller_queue);
++}
++
++void psb_xhw_init_takedown(struct drm_psb_private *dev_priv,
++ struct drm_file *file_priv, int closing)
++{
++
++ if (dev_priv->xhw_file == file_priv &&
++ atomic_add_unless(&dev_priv->xhw_client, -1, 0)) {
++
++ if (closing)
++ psb_xhw_queue_empty(dev_priv);
++ else {
++ struct psb_xhw_buf buf;
++ INIT_LIST_HEAD(&buf.head);
++
++ psb_xhw_terminate(dev_priv, &buf);
++ psb_xhw_queue_empty(dev_priv);
++ }
++
++ dev_priv->xhw = NULL;
++ ttm_bo_kunmap(&dev_priv->xhw_kmap);
++ ttm_bo_unref(&dev_priv->xhw_bo);
++ dev_priv->xhw_file = NULL;
++ }
++}
++
++int psb_xhw_init_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ struct drm_psb_xhw_init_arg *arg =
++ (struct drm_psb_xhw_init_arg *) data;
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++ int ret = 0;
++ down_read(&dev_priv->sgx_sem);
++ psb_check_power_state(dev, PSB_DEVICE_SGX);
++ switch (arg->operation) {
++ case PSB_XHW_INIT:
++ ret = psb_xhw_init_init(dev, file_priv, arg);
++ break;
++ case PSB_XHW_TAKEDOWN:
++ psb_xhw_init_takedown(dev_priv, file_priv, 0);
++ break;
++ }
++ up_read(&dev_priv->sgx_sem);
++ return ret;
++}
++
++static int psb_xhw_in_empty(struct drm_psb_private *dev_priv)
++{
++ int empty;
++ unsigned long irq_flags;
++
++ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
++ empty = list_empty(&dev_priv->xhw_in);
++ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
++ return empty;
++}
++
++int psb_xhw_handler(struct drm_psb_private *dev_priv)
++{
++ unsigned long irq_flags;
++ struct drm_psb_xhw_arg *xa;
++ struct psb_xhw_buf *buf;
++
++ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
++
++ if (!dev_priv->xhw_on) {
++ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
++ return -EINVAL;
++ }
++
++ buf = dev_priv->xhw_cur_buf;
++ if (buf && buf->copy_back) {
++ xa = &buf->arg;
++ memcpy(xa, dev_priv->xhw, sizeof(*xa));
++ dev_priv->comm[PSB_COMM_USER_IRQ] = xa->irq_op;
++ atomic_set(&buf->done, 1);
++ wake_up(&dev_priv->xhw_caller_queue);
++ } else
++ dev_priv->comm[PSB_COMM_USER_IRQ] = 0;
++
++ dev_priv->xhw_cur_buf = 0;
++ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
++ return 0;
++}
++
++int psb_xhw_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++ unsigned long irq_flags;
++ struct drm_psb_xhw_arg *xa;
++ int ret;
++ struct list_head *list;
++ struct psb_xhw_buf *buf;
++
++ if (!dev_priv)
++ return -EINVAL;
++
++ if (mutex_lock_interruptible(&dev_priv->xhw_mutex))
++ return -ERESTART;
++
++ if (psb_forced_user_interrupt(dev_priv)) {
++ mutex_unlock(&dev_priv->xhw_mutex);
++ return -EINVAL;
++ }
++
++ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
++ while (list_empty(&dev_priv->xhw_in)) {
++ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
++ ret = wait_event_interruptible_timeout(dev_priv->xhw_queue,
++ !psb_xhw_in_empty
++ (dev_priv), DRM_HZ);
++ if (ret == -ERESTARTSYS || ret == 0) {
++ mutex_unlock(&dev_priv->xhw_mutex);
++ return -ERESTART;
++ }
++ spin_lock_irqsave(&dev_priv->xhw_lock, irq_flags);
++ }
++
++ list = dev_priv->xhw_in.next;
++ list_del_init(list);
++
++ buf = list_entry(list, struct psb_xhw_buf, head);
++ xa = &buf->arg;
++ memcpy(dev_priv->xhw, xa, sizeof(*xa));
++
++ if (unlikely(buf->copy_back))
++ dev_priv->xhw_cur_buf = buf;
++ else {
++ atomic_set(&buf->done, 1);
++ dev_priv->xhw_cur_buf = NULL;
++ }
++
++ if (xa->op == PSB_XHW_TERMINATE) {
++ dev_priv->xhw_on = 0;
++ wake_up(&dev_priv->xhw_caller_queue);
++ }
++ spin_unlock_irqrestore(&dev_priv->xhw_lock, irq_flags);
++
++ mutex_unlock(&dev_priv->xhw_mutex);
++
++ return 0;
++}
+diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_agp_backend.c b/drivers/gpu/drm/psb/ttm/ttm_agp_backend.c
+--- a/drivers/gpu/drm/psb/ttm/ttm_agp_backend.c 1969-12-31 16:00:00.000000000 -0800
++++ b/drivers/gpu/drm/psb/ttm/ttm_agp_backend.c 2009-04-07 13:28:38.000000000 -0700
+@@ -0,0 +1,149 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ * Keith Packard.
++ */
++
++#include "ttm/ttm_bo_driver.h"
++#ifdef TTM_HAS_AGP
++#include "ttm/ttm_placement_common.h"
++#include <linux/agp_backend.h>
++#include <asm/agp.h>
++#include <asm/io.h>
++
++struct ttm_agp_backend {
++ struct ttm_backend backend;
++ struct agp_memory *mem;
++ struct agp_bridge_data *bridge;
++};
++
++static int ttm_agp_populate(struct ttm_backend *backend,
++ unsigned long num_pages, struct page **pages,
++ struct page *dummy_read_page)
++{
++ struct ttm_agp_backend *agp_be =
++ container_of(backend, struct ttm_agp_backend, backend);
++ struct page **cur_page, **last_page = pages + num_pages;
++ struct agp_memory *mem;
++
++ mem = agp_allocate_memory(agp_be->bridge, num_pages, AGP_USER_MEMORY);
++ if (unlikely(mem == NULL))
++ return -ENOMEM;
++
++ mem->page_count = 0;
++ for (cur_page = pages; cur_page < last_page; ++cur_page) {
++ struct page *page = *cur_page;
++ if (!page) {
++ page = dummy_read_page;
++ }
++ mem->memory[mem->page_count++] =
++ phys_to_gart(page_to_phys(page));
++ }
++ agp_be->mem = mem;
++ return 0;
++}
++
++static int ttm_agp_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem)
++{
++ struct ttm_agp_backend *agp_be =
++ container_of(backend, struct ttm_agp_backend, backend);
++ struct agp_memory *mem = agp_be->mem;
++ int cached = (bo_mem->flags & TTM_PL_FLAG_CACHED);
++ int ret;
++
++ mem->is_flushed = 1;
++ mem->type = (cached) ? AGP_USER_CACHED_MEMORY : AGP_USER_MEMORY;
++
++ ret = agp_bind_memory(mem, bo_mem->mm_node->start);
++ if (ret)
++ printk(KERN_ERR "AGP Bind memory failed.\n");
++
++ return ret;
++}
++
++static int ttm_agp_unbind(struct ttm_backend *backend)
++{
++ struct ttm_agp_backend *agp_be =
++ container_of(backend, struct ttm_agp_backend, backend);
++
++ if (agp_be->mem->is_bound)
++ return agp_unbind_memory(agp_be->mem);
++ else
++ return 0;
++}
++
++static void ttm_agp_clear(struct ttm_backend *backend)
++{
++ struct ttm_agp_backend *agp_be =
++ container_of(backend, struct ttm_agp_backend, backend);
++ struct agp_memory *mem = agp_be->mem;
++
++ if (mem) {
++ ttm_agp_unbind(backend);
++ agp_free_memory(mem);
++ }
++ agp_be->mem = NULL;
++}
++
++static void ttm_agp_destroy(struct ttm_backend *backend)
++{
++ struct ttm_agp_backend *agp_be =
++ container_of(backend, struct ttm_agp_backend, backend);
++
++ if (agp_be->mem)
++ ttm_agp_clear(backend);
++ kfree(agp_be);
++}
++
++static struct ttm_backend_func ttm_agp_func = {
++ .populate = ttm_agp_populate,
++ .clear = ttm_agp_clear,
++ .bind = ttm_agp_bind,
++ .unbind = ttm_agp_unbind,
++ .destroy = ttm_agp_destroy,
++};
++
++struct ttm_backend *ttm_agp_backend_init(struct ttm_bo_device *bdev,
++ struct agp_bridge_data *bridge)
++{
++ struct ttm_agp_backend *agp_be;
++
++ agp_be = kmalloc(sizeof(*agp_be), GFP_KERNEL);
++ if (!agp_be)
++ return NULL;
++
++ agp_be->mem = NULL;
++ agp_be->bridge = bridge;
++ agp_be->backend.func = &ttm_agp_func;
++ agp_be->backend.bdev = bdev;
++ return &agp_be->backend;
++}
++
++#endif
+diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_bo_api.h b/drivers/gpu/drm/psb/ttm/ttm_bo_api.h
+--- a/drivers/gpu/drm/psb/ttm/ttm_bo_api.h 1969-12-31 16:00:00.000000000 -0800
++++ b/drivers/gpu/drm/psb/ttm/ttm_bo_api.h 2009-04-07 13:28:38.000000000 -0700
+@@ -0,0 +1,578 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ */
++
++#ifndef _TTM_BO_API_H_
++#define _TTM_BO_API_H_
++
++#include <drm/drm_hashtab.h>
++#include <linux/kref.h>
++#include <linux/list.h>
++#include <linux/wait.h>
++#include <linux/mutex.h>
++#include <linux/mm.h>
++#include <linux/rbtree.h>
++
++struct ttm_bo_device;
++
++struct drm_mm_node;
++
++/**
++ * struct ttm_mem_reg
++ *
++ * @mm_node: Memory manager node.
++ * @size: Requested size of memory region.
++ * @num_pages: Actual size of memory region in pages.
++ * @page_alignment: Page alignment.
++ * @flags: Placement flags.
++ * @proposed_flags: Proposed placement flags.
++ *
++ * Structure indicating the placement and space resources used by a
++ * buffer object.
++ */
++
++struct ttm_mem_reg {
++ struct drm_mm_node *mm_node;
++ unsigned long size;
++ unsigned long num_pages;
++ uint32_t page_alignment;
++ uint32_t mem_type;
++ uint32_t flags;
++ uint32_t proposed_flags;
++};
++
++/**
++ * enum ttm_bo_type
++ *
++ * @ttm_bo_type_device: These are 'normal' buffers that can
++ * be mmapped by user space. Each of these bos occupy a slot in the
++ * device address space, that can be used for normal vm operations.
++ *
++ * @ttm_bo_type_user: These are user-space memory areas that are made
++ * available to the GPU by mapping the buffer pages into the GPU aperture
++ * space. These buffers cannot be mmaped from the device address space.
++ *
++ * @ttm_bo_type_kernel: These buffers are like ttm_bo_type_device buffers,
++ * but they cannot be accessed from user-space. For kernel-only use.
++ */
++
++enum ttm_bo_type {
++ ttm_bo_type_device,
++ ttm_bo_type_user,
++ ttm_bo_type_kernel
++};
++
++struct ttm_tt;
++
++/**
++ * struct ttm_buffer_object
++ *
++ * @bdev: Pointer to the buffer object device structure.
++ * @kref: Reference count of this buffer object. When this refcount reaches
++ * zero, the object is put on the delayed delete list.
++ * @list_kref: List reference count of this buffer object. This member is
++ * used to avoid destruction while the buffer object is still on a list.
++ * Lru lists may keep one refcount, the delayed delete list, and kref != 0
++ * keeps one refcount. When this refcount reaches zero,
++ * the object is destroyed.
++ * @proposed_flags: Proposed placement for the buffer. Changed only by the
++ * creator prior to validation as opposed to bo->mem.proposed_flags which is
++ * changed by the implementation prior to a buffer move if it wants to outsmart
++ * the buffer creator / user. This latter happens, for example, at eviction.
++ * @buffer_start: The virtual user-space start address of ttm_bo_type_user
++ * buffers.
++ * @type: The bo type.
++ * @offset: The current GPU offset, which can have different meanings
++ * depending on the memory type. For SYSTEM type memory, it should be 0.
++ * @mem: structure describing current placement.
++ * @val_seq: Sequence of the validation holding the @reserved lock.
++ * Used to avoid starvation when many processes compete to validate the
++ * buffer. This member is protected by the bo_device::lru_lock.
++ * @seq_valid: The value of @val_seq is valid. This value is protected by
++ * the bo_device::lru_lock.
++ * @lru: List head for the lru list.
++ * @ddestroy: List head for the delayed destroy list.
++ * @swap: List head for swap LRU list.
++ * @persistant_swap_storage: Usually the swap storage is deleted for buffers
++ * pinned in physical memory. If this behaviour is not desired, this member
++ * holds a pointer to a persistant shmem object.
++ * @destroy: Destruction function. If NULL, kfree is used.
++ * @sync_obj_arg: Opaque argument to synchronization object function.
++ * @sync_obj: Pointer to a synchronization object.
++ * @priv_flags: Flags describing buffer object internal state.
++ * @event_queue: Queue for processes waiting on buffer object status change.
++ * @mutex: Lock protecting all members with the exception of constant members
++ * and list heads. We should really use a spinlock here.
++ * @num_pages: Actual number of pages.
++ * @ttm: TTM structure holding system pages.
++ * @vm_hash: Hash item for fast address space lookup. Need to change to a
++ * rb-tree node.
++ * @vm_node: Address space manager node.
++ * @addr_space_offset: Address space offset.
++ * @cpu_writes: For synchronization. Number of cpu writers.
++ * @reserved: Deadlock-free lock used for synchronization state transitions.
++ * @acc_size: Accounted size for this object.
++ *
++ * Base class for TTM buffer object, that deals with data placement and CPU
++ * mappings. GPU mappings are really up to the driver, but for simpler GPUs
++ * the driver can usually use the placement offset @offset directly as the
++ * GPU virtual address. For drivers implementing multiple
++ * GPU memory manager contexts, the driver should manage the address space
++ * in these contexts separately and use these objects to get the correct
++ * placement and caching for these GPU maps. This makes it possible to use
++ * these objects for even quite elaborate memory management schemes.
++ * The destroy member, the API visibility of this object makes it possible
++ * to derive driver specific types.
++ */
++
++struct ttm_buffer_object {
++ struct ttm_bo_device *bdev;
++ struct kref kref;
++ struct kref list_kref;
++
++ /*
++ * If there is a possibility that the usage variable is zero,
++ * then dev->struct_mutex should be locked before incrementing it.
++ */
++
++ uint32_t proposed_flags;
++ unsigned long buffer_start;
++ enum ttm_bo_type type;
++ unsigned long offset;
++ struct ttm_mem_reg mem;
++ uint32_t val_seq;
++ bool seq_valid;
++
++ struct list_head lru;
++ struct list_head ddestroy;
++ struct list_head swap;
++
++ struct file *persistant_swap_storage;
++
++ void (*destroy) (struct ttm_buffer_object *);
++
++ void *sync_obj_arg;
++ void *sync_obj;
++
++ uint32_t priv_flags;
++ wait_queue_head_t event_queue;
++ struct mutex mutex;
++ unsigned long num_pages;
++
++ struct ttm_tt *ttm;
++ struct rb_node vm_rb;
++ struct drm_mm_node *vm_node;
++ uint64_t addr_space_offset;
++
++ atomic_t cpu_writers;
++ atomic_t reserved;
++
++ size_t acc_size;
++};
++
++/**
++ * struct ttm_bo_kmap_obj
++ *
++ * @virtual: The current kernel virtual address.
++ * @page: The page when kmap'ing a single page.
++ * @bo_kmap_type: Type of bo_kmap.
++ *
++ * Object describing a kernel mapping. Since a TTM bo may be located
++ * in various memory types with various caching policies, the
++ * mapping can either be an ioremap, a vmap, a kmap or part of a
++ * premapped region.
++ */
++
++struct ttm_bo_kmap_obj {
++ void *virtual;
++ struct page *page;
++ enum {
++ ttm_bo_map_iomap,
++ ttm_bo_map_vmap,
++ ttm_bo_map_kmap,
++ ttm_bo_map_premapped,
++ } bo_kmap_type;
++};
++
++/**
++ * ttm_bo_reference - reference a struct ttm_buffer_object
++ *
++ * @bo: The buffer object.
++ *
++ * Returns a refcounted pointer to a buffer object.
++ */
++
++static inline struct ttm_buffer_object *ttm_bo_reference(struct
++ ttm_buffer_object *bo)
++{
++ kref_get(&bo->kref);
++ return bo;
++}
++
++/**
++ * ttm_bo_wait - wait for buffer idle.
++ *
++ * @bo: The buffer object.
++ * @interruptible: Use interruptible wait.
++ * @no_wait: Return immediately if buffer is busy.
++ *
++ * This function must be called with the bo::mutex held, and makes
++ * sure any previous rendering to the buffer is completed.
++ * Note: It might be necessary to block validations before the
++ * wait by reserving the buffer.
++ * Returns -EBUSY if no_wait is true and the buffer is busy.
++ * Returns -ERESTART if interrupted by a signal.
++ */
++extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy,
++ bool interruptible, bool no_wait);
++/**
++ * ttm_buffer_object_validate
++ *
++ * @bo: The buffer object.
++ * @interruptible: Sleep interruptible if sleeping.
++ * @no_wait: Return immediately if the buffer is busy.
++ *
++ * Changes placement and caching policy of the buffer object
++ * according to bo::proposed_flags.
++ * Returns
++ * -EINVAL on invalid proposed_flags.
++ * -ENOMEM on out-of-memory condition.
++ * -EBUSY if no_wait is true and buffer busy.
++ * -ERESTART if interrupted by a signal.
++ */
++extern int ttm_buffer_object_validate(struct ttm_buffer_object *bo,
++ bool interruptible, bool no_wait);
++/**
++ * ttm_bo_unref
++ *
++ * @bo: The buffer object.
++ *
++ * Unreference and clear a pointer to a buffer object.
++ */
++extern void ttm_bo_unref(struct ttm_buffer_object **bo);
++
++/**
++ * ttm_bo_synccpu_write_grab
++ *
++ * @bo: The buffer object:
++ * @no_wait: Return immediately if buffer is busy.
++ *
++ * Synchronizes a buffer object for CPU RW access. This means
++ * blocking command submission that affects the buffer and
++ * waiting for buffer idle. This lock is recursive.
++ * Returns
++ * -EBUSY if the buffer is busy and no_wait is true.
++ * -ERESTART if interrupted by a signal.
++ */
++
++extern int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait);
++/**
++ * ttm_bo_synccpu_write_release:
++ *
++ * @bo : The buffer object.
++ *
++ * Releases a synccpu lock.
++ */
++extern void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo);
++
++/**
++ * ttm_buffer_object_init
++ *
++ * @bdev: Pointer to a ttm_bo_device struct.
++ * @bo: Pointer to a ttm_buffer_object to be initialized.
++ * @size: Requested size of buffer object.
++ * @type: Requested type of buffer object.
++ * @flags: Initial placement flags.
++ * @page_alignment: Data alignment in pages.
++ * @buffer_start: Virtual address of user space data backing a
++ * user buffer object.
++ * @interruptible: If needing to sleep to wait for GPU resources,
++ * sleep interruptible.
++ * @persistant_swap_storage: Usually the swap storage is deleted for buffers
++ * pinned in physical memory. If this behaviour is not desired, this member
++ * holds a pointer to a persistant shmem object. Typically, this would
++ * point to the shmem object backing a GEM object if TTM is used to back a
++ * GEM user interface.
++ * @acc_size: Accounted size for this object.
++ * @destroy: Destroy function. Use NULL for kfree().
++ *
++ * This function initializes a pre-allocated struct ttm_buffer_object.
++ * As this object may be part of a larger structure, this function,
++ * together with the @destroy function,
++ * enables driver-specific objects derived from a ttm_buffer_object.
++ * On successful return, the object kref and list_kref are set to 1.
++ * Returns
++ * -ENOMEM: Out of memory.
++ * -EINVAL: Invalid placement flags.
++ * -ERESTART: Interrupted by signal while sleeping waiting for resources.
++ */
++
++extern int ttm_buffer_object_init(struct ttm_bo_device *bdev,
++ struct ttm_buffer_object *bo,
++ unsigned long size,
++ enum ttm_bo_type type,
++ uint32_t flags,
++ uint32_t page_alignment,
++ unsigned long buffer_start,
++ bool interrubtible,
++ struct file *persistant_swap_storage,
++ size_t acc_size,
++ void (*destroy) (struct ttm_buffer_object *));
++/**
++ * ttm_bo_synccpu_object_init
++ *
++ * @bdev: Pointer to a ttm_bo_device struct.
++ * @bo: Pointer to a ttm_buffer_object to be initialized.
++ * @size: Requested size of buffer object.
++ * @type: Requested type of buffer object.
++ * @flags: Initial placement flags.
++ * @page_alignment: Data alignment in pages.
++ * @buffer_start: Virtual address of user space data backing a
++ * user buffer object.
++ * @interruptible: If needing to sleep while waiting for GPU resources,
++ * sleep interruptible.
++ * @persistant_swap_storage: Usually the swap storage is deleted for buffers
++ * pinned in physical memory. If this behaviour is not desired, this member
++ * holds a pointer to a persistant shmem object. Typically, this would
++ * point to the shmem object backing a GEM object if TTM is used to back a
++ * GEM user interface.
++ * @p_bo: On successful completion *p_bo points to the created object.
++ *
++ * This function allocates a ttm_buffer_object, and then calls
++ * ttm_buffer_object_init on that object.
++ * The destroy function is set to kfree().
++ * Returns
++ * -ENOMEM: Out of memory.
++ * -EINVAL: Invalid placement flags.
++ * -ERESTART: Interrupted by signal while waiting for resources.
++ */
++
++extern int ttm_buffer_object_create(struct ttm_bo_device *bdev,
++ unsigned long size,
++ enum ttm_bo_type type,
++ uint32_t flags,
++ uint32_t page_alignment,
++ unsigned long buffer_start,
++ bool interruptible,
++ struct file *persistant_swap_storage,
++ struct ttm_buffer_object **p_bo);
++
++/**
++ * ttm_bo_check_placement
++ *
++ * @bo: the buffer object.
++ * @set_flags: placement flags to set.
++ * @clr_flags: placement flags to clear.
++ *
++ * Performs minimal validity checking on an intended change of
++ * placement flags.
++ * Returns
++ * -EINVAL: Intended change is invalid or not allowed.
++ */
++
++extern int ttm_bo_check_placement(struct ttm_buffer_object *bo,
++ uint32_t set_flags, uint32_t clr_flags);
++
++/**
++ * ttm_bo_init_mm
++ *
++ * @bdev: Pointer to a ttm_bo_device struct.
++ * @mem_type: The memory type.
++ * @p_offset: offset for managed area in pages.
++ * @p_size: size managed area in pages.
++ *
++ * Initialize a manager for a given memory type.
++ * Note: if part of driver firstopen, it must be protected from a
++ * potentially racing lastclose.
++ * Returns:
++ * -EINVAL: invalid size or memory type.
++ * -ENOMEM: Not enough memory.
++ * May also return driver-specified errors.
++ */
++
++extern int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
++ unsigned long p_offset, unsigned long p_size);
++/**
++ * ttm_bo_clean_mm
++ *
++ * @bdev: Pointer to a ttm_bo_device struct.
++ * @mem_type: The memory type.
++ *
++ * Take down a manager for a given memory type after first walking
++ * the LRU list to evict any buffers left alive.
++ *
++ * Normally, this function is part of lastclose() or unload(), and at that
++ * point there shouldn't be any buffers left created by user-space, since
++ * there should've been removed by the file descriptor release() method.
++ * However, before this function is run, make sure to signal all sync objects,
++ * and verify that the delayed delete queue is empty. The driver must also
++ * make sure that there are no NO_EVICT buffers present in this memory type
++ * when the call is made.
++ *
++ * If this function is part of a VT switch, the caller must make sure that
++ * there are no appications currently validating buffers before this
++ * function is called. The caller can do that by first taking the
++ * struct ttm_bo_device::ttm_lock in write mode.
++ *
++ * Returns:
++ * -EINVAL: invalid or uninitialized memory type.
++ * -EBUSY: There are still buffers left in this memory type.
++ */
++
++extern int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type);
++
++/**
++ * ttm_bo_evict_mm
++ *
++ * @bdev: Pointer to a ttm_bo_device struct.
++ * @mem_type: The memory type.
++ *
++ * Evicts all buffers on the lru list of the memory type.
++ * This is normally part of a VT switch or an
++ * out-of-memory-space-due-to-fragmentation handler.
++ * The caller must make sure that there are no other processes
++ * currently validating buffers, and can do that by taking the
++ * struct ttm_bo_device::ttm_lock in write mode.
++ *
++ * Returns:
++ * -EINVAL: Invalid or uninitialized memory type.
++ * -ERESTART: The call was interrupted by a signal while waiting to
++ * evict a buffer.
++ */
++
++extern int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type);
++
++/**
++ * ttm_kmap_obj_virtual
++ *
++ * @map: A struct ttm_bo_kmap_obj returned from ttm_bo_kmap.
++ * @is_iomem: Pointer to an integer that on return indicates 1 if the
++ * virtual map is io memory, 0 if normal memory.
++ *
++ * Returns the virtual address of a buffer object area mapped by ttm_bo_kmap.
++ * If *is_iomem is 1 on return, the virtual address points to an io memory area,
++ * that should strictly be accessed by the iowriteXX() and similar functions.
++ */
++
++static inline void *ttm_kmap_obj_virtual(struct ttm_bo_kmap_obj *map,
++ bool *is_iomem)
++{
++ *is_iomem = (map->bo_kmap_type == ttm_bo_map_iomap ||
++ map->bo_kmap_type == ttm_bo_map_premapped);
++ return map->virtual;
++}
++
++/**
++ * ttm_bo_kmap
++ *
++ * @bo: The buffer object.
++ * @start_page: The first page to map.
++ * @num_pages: Number of pages to map.
++ * @map: pointer to a struct ttm_bo_kmap_obj representing the map.
++ *
++ * Sets up a kernel virtual mapping, using ioremap, vmap or kmap to the
++ * data in the buffer object. The ttm_kmap_obj_virtual function can then be
++ * used to obtain a virtual address to the data.
++ *
++ * Returns
++ * -ENOMEM: Out of memory.
++ * -EINVAL: Invalid range.
++ */
++
++extern int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page,
++ unsigned long num_pages, struct ttm_bo_kmap_obj *map);
++
++/**
++ * ttm_bo_kunmap
++ *
++ * @map: Object describing the map to unmap.
++ *
++ * Unmaps a kernel map set up by ttm_bo_kmap.
++ */
++
++extern void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map);
++
++#if 0
++#endif
++
++/**
++ * ttm_fbdev_mmap - mmap fbdev memory backed by a ttm buffer object.
++ *
++ * @vma: vma as input from the fbdev mmap method.
++ * @bo: The bo backing the address space. The address space will
++ * have the same size as the bo, and start at offset 0.
++ *
++ * This function is intended to be called by the fbdev mmap method
++ * if the fbdev address space is to be backed by a bo.
++ */
++
++extern int ttm_fbdev_mmap(struct vm_area_struct *vma,
++ struct ttm_buffer_object *bo);
++
++/**
++ * ttm_bo_mmap - mmap out of the ttm device address space.
++ *
++ * @filp: filp as input from the mmap method.
++ * @vma: vma as input from the mmap method.
++ * @bdev: Pointer to the ttm_bo_device with the address space manager.
++ *
++ * This function is intended to be called by the device mmap method.
++ * if the device address space is to be backed by the bo manager.
++ */
++
++extern int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
++ struct ttm_bo_device *bdev);
++
++/**
++ * ttm_bo_io
++ *
++ * @bdev: Pointer to the struct ttm_bo_device.
++ * @filp: Pointer to the struct file attempting to read / write.
++ * @wbuf: User-space pointer to address of buffer to write. NULL on read.
++ * @rbuf: User-space pointer to address of buffer to read into. Null on write.
++ * @count: Number of bytes to read / write.
++ * @f_pos: Pointer to current file position.
++ * @write: 1 for read, 0 for write.
++ *
++ * This function implements read / write into ttm buffer objects, and is intended to
++ * be called from the fops::read and fops::write method.
++ * Returns:
++ * See man (2) write, man(2) read. In particular, the function may return -EINTR if
++ * interrupted by a signal.
++ */
++
++extern ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
++ const char __user * wbuf, char __user * rbuf,
++ size_t count, loff_t * f_pos, bool write);
++
++extern void ttm_bo_swapout_all(struct ttm_bo_device *bdev);
++
++#endif
+diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_bo.c b/drivers/gpu/drm/psb/ttm/ttm_bo.c
+--- a/drivers/gpu/drm/psb/ttm/ttm_bo.c 1969-12-31 16:00:00.000000000 -0800
++++ b/drivers/gpu/drm/psb/ttm/ttm_bo.c 2009-04-07 13:28:38.000000000 -0700
+@@ -0,0 +1,1716 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
++ */
++
++#include "ttm/ttm_bo_driver.h"
++#include "ttm/ttm_placement_common.h"
++#include <linux/jiffies.h>
++#include <linux/slab.h>
++#include <linux/sched.h>
++#include <linux/mm.h>
++#include <linux/file.h>
++
++#define TTM_ASSERT_LOCKED(param)
++#define TTM_DEBUG(fmt, arg...)
++#define TTM_BO_HASH_ORDER 13
++
++static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
++static void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
++static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
++
++static inline uint32_t ttm_bo_type_flags(unsigned type)
++{
++ return (1 << (type));
++}
++
++static void ttm_bo_release_list(struct kref *list_kref)
++{
++ struct ttm_buffer_object *bo =
++ container_of(list_kref, struct ttm_buffer_object, list_kref);
++ struct ttm_bo_device *bdev = bo->bdev;
++
++ BUG_ON(atomic_read(&bo->list_kref.refcount));
++ BUG_ON(atomic_read(&bo->kref.refcount));
++ BUG_ON(atomic_read(&bo->cpu_writers));
++ BUG_ON(bo->sync_obj != NULL);
++ BUG_ON(bo->mem.mm_node != NULL);
++ BUG_ON(!list_empty(&bo->lru));
++ BUG_ON(!list_empty(&bo->ddestroy));
++
++ if (bo->ttm)
++ ttm_tt_destroy(bo->ttm);
++ if (bo->destroy)
++ bo->destroy(bo);
++ else {
++ ttm_mem_global_free(bdev->mem_glob, bo->acc_size, false);
++ kfree(bo);
++ }
++}
++
++int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
++{
++
++ if (interruptible) {
++ int ret = 0;
++
++ ret = wait_event_interruptible(bo->event_queue,
++ atomic_read(&bo->reserved) == 0);
++ if (unlikely(ret != 0))
++ return -ERESTART;
++ } else {
++ wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0);
++ }
++ return 0;
++}
++
++static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
++{
++ struct ttm_bo_device *bdev = bo->bdev;
++ struct ttm_mem_type_manager *man;
++
++ BUG_ON(!atomic_read(&bo->reserved));
++
++ if (!(bo->mem.flags & TTM_PL_FLAG_NO_EVICT)) {
++
++ BUG_ON(!list_empty(&bo->lru));
++
++ man = &bdev->man[bo->mem.mem_type];
++ list_add_tail(&bo->lru, &man->lru);
++ kref_get(&bo->list_kref);
++
++ if (bo->ttm != NULL) {
++ list_add_tail(&bo->swap, &bdev->swap_lru);
++ kref_get(&bo->list_kref);
++ }
++ }
++}
++
++/*
++ * Call with bdev->lru_lock and bdev->global->swap_lock held..
++ */
++
++static int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
++{
++ int put_count = 0;
++
++ if (!list_empty(&bo->swap)) {
++ list_del_init(&bo->swap);
++ ++put_count;
++ }
++ if (!list_empty(&bo->lru)) {
++ list_del_init(&bo->lru);
++ ++put_count;
++ }
++
++ /*
++ * TODO: Add a driver hook to delete from
++ * driver-specific LRU's here.
++ */
++
++ return put_count;
++}
++
++int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
++ bool interruptible,
++ bool no_wait, bool use_sequence, uint32_t sequence)
++{
++ struct ttm_bo_device *bdev = bo->bdev;
++ int ret;
++
++ while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
++ if (use_sequence && bo->seq_valid &&
++ (sequence - bo->val_seq < (1 << 31))) {
++ return -EAGAIN;
++ }
++
++ if (no_wait)
++ return -EBUSY;
++
++ spin_unlock(&bdev->lru_lock);
++ ret = ttm_bo_wait_unreserved(bo, interruptible);
++ spin_lock(&bdev->lru_lock);
++
++ if (unlikely(ret))
++ return ret;
++ }
++
++ if (use_sequence) {
++ bo->val_seq = sequence;
++ bo->seq_valid = true;
++ } else {
++ bo->seq_valid = false;
++ }
++
++ return 0;
++}
++
++static void ttm_bo_ref_bug(struct kref *list_kref)
++{
++ BUG();
++}
++
++int ttm_bo_reserve(struct ttm_buffer_object *bo,
++ bool interruptible,
++ bool no_wait, bool use_sequence, uint32_t sequence)
++{
++ struct ttm_bo_device *bdev = bo->bdev;
++ int put_count = 0;
++ int ret;
++
++ spin_lock(&bdev->lru_lock);
++ ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence,
++ sequence);
++ if (likely(ret == 0))
++ put_count = ttm_bo_del_from_lru(bo);
++ spin_unlock(&bdev->lru_lock);
++
++ while (put_count--)
++ kref_put(&bo->list_kref, ttm_bo_ref_bug);
++
++ return ret;
++}
++
++void ttm_bo_unreserve(struct ttm_buffer_object *bo)
++{
++ struct ttm_bo_device *bdev = bo->bdev;
++
++ spin_lock(&bdev->lru_lock);
++ ttm_bo_add_to_lru(bo);
++ atomic_set(&bo->reserved, 0);
++ wake_up_all(&bo->event_queue);
++ spin_unlock(&bdev->lru_lock);
++}
++
++/*
++ * Call bo->mutex locked.
++ */
++
++static int ttm_bo_add_ttm(struct ttm_buffer_object *bo)
++{
++ struct ttm_bo_device *bdev = bo->bdev;
++ int ret = 0;
++ uint32_t page_flags = 0;
++
++ TTM_ASSERT_LOCKED(&bo->mutex);
++ bo->ttm = NULL;
++
++ switch (bo->type) {
++ case ttm_bo_type_device:
++ case ttm_bo_type_kernel:
++ bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
++ page_flags, bdev->dummy_read_page);
++ if (unlikely(bo->ttm == NULL))
++ ret = -ENOMEM;
++ break;
++ case ttm_bo_type_user:
++ bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
++ page_flags | TTM_PAGE_FLAG_USER,
++ bdev->dummy_read_page);
++ if (unlikely(bo->ttm == NULL))
++ ret = -ENOMEM;
++ break;
++
++ ret = ttm_tt_set_user(bo->ttm, current,
++ bo->buffer_start, bo->num_pages);
++ if (unlikely(ret != 0))
++ ttm_tt_destroy(bo->ttm);
++ break;
++ default:
++ printk(KERN_ERR "Illegal buffer object type\n");
++ ret = -EINVAL;
++ break;
++ }
++
++ return ret;
++}
++
++static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
++ struct ttm_mem_reg *mem,
++ bool evict, bool interruptible, bool no_wait)
++{
++ struct ttm_bo_device *bdev = bo->bdev;
++ bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
++ bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
++ struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
++ struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
++ int ret = 0;
++
++ if (old_is_pci || new_is_pci ||
++ ((mem->flags & bo->mem.flags & TTM_PL_MASK_CACHING) == 0))
++ ttm_bo_unmap_virtual(bo);
++
++ /*
++ * Create and bind a ttm if required.
++ */
++
++ if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && (bo->ttm == NULL)) {
++ ret = ttm_bo_add_ttm(bo);
++ if (ret)
++ goto out_err;
++
++ ret = ttm_tt_set_placement_caching(bo->ttm, mem->flags);
++ if (ret)
++ return ret;
++
++ if (mem->mem_type != TTM_PL_SYSTEM) {
++ ret = ttm_tt_bind(bo->ttm, mem);
++ if (ret)
++ goto out_err;
++ }
++
++ if (bo->mem.mem_type == TTM_PL_SYSTEM) {
++
++ struct ttm_mem_reg *old_mem = &bo->mem;
++ uint32_t save_flags = old_mem->flags;
++ uint32_t save_proposed_flags = old_mem->proposed_flags;
++
++ *old_mem = *mem;
++ mem->mm_node = NULL;
++ old_mem->proposed_flags = save_proposed_flags;
++ ttm_flag_masked(&save_flags, mem->flags,
++ TTM_PL_MASK_MEMTYPE);
++ goto moved;
++ }
++
++ }
++
++ if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
++ !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
++ ret = ttm_bo_move_ttm(bo, evict, no_wait, mem);
++ else if (bdev->driver->move)
++ ret = bdev->driver->move(bo, evict, interruptible,
++ no_wait, mem);
++ else
++ ret = ttm_bo_move_memcpy(bo, evict, no_wait, mem);
++
++ if (ret)
++ goto out_err;
++
++ moved:
++ if (bo->priv_flags & TTM_BO_PRIV_FLAG_EVICTED) {
++ ret = bdev->driver->invalidate_caches(bdev, bo->mem.flags);
++ if (ret)
++ printk(KERN_ERR "Can not flush read caches\n");
++ }
++
++ ttm_flag_masked(&bo->priv_flags,
++ (evict) ? TTM_BO_PRIV_FLAG_EVICTED : 0,
++ TTM_BO_PRIV_FLAG_EVICTED);
++
++ if (bo->mem.mm_node)
++ bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) +
++ bdev->man[bo->mem.mem_type].gpu_offset;
++
++ return 0;
++
++ out_err:
++ new_man = &bdev->man[bo->mem.mem_type];
++ if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) {
++ ttm_tt_unbind(bo->ttm);
++ ttm_tt_destroy(bo->ttm);
++ bo->ttm = NULL;
++ }
++
++ return ret;
++}
++
++static int ttm_bo_expire_sync_obj(struct ttm_buffer_object *bo,
++ bool allow_errors)
++{
++ struct ttm_bo_device *bdev = bo->bdev;
++ struct ttm_bo_driver *driver = bdev->driver;
++
++ if (bo->sync_obj) {
++ if (bdev->nice_mode) {
++ unsigned long _end = jiffies + 3 * HZ;
++ int ret;
++ do {
++ ret = ttm_bo_wait(bo, false, false, false);
++ if (ret && allow_errors)
++ return ret;
++
++ } while (ret && !time_after_eq(jiffies, _end));
++
++ if (bo->sync_obj) {
++ bdev->nice_mode = false;
++ printk(KERN_ERR "Detected probable GPU lockup. "
++ "Evicting buffer.\n");
++ }
++ }
++ if (bo->sync_obj) {
++ driver->sync_obj_unref(&bo->sync_obj);
++ bo->priv_flags &= ~TTM_BO_PRIV_FLAG_MOVING;
++ }
++ }
++ return 0;
++}
++
++/**
++ * If bo idle, remove from delayed- and lru lists, and unref.
++ * If not idle, and already on delayed list, do nothing.
++ * If not idle, and not on delayed list, put on delayed list,
++ * up the list_kref and schedule a delayed list check.
++ */
++
++static void ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
++{
++ struct ttm_bo_device *bdev = bo->bdev;
++ struct ttm_bo_driver *driver = bdev->driver;
++
++ mutex_lock(&bo->mutex);
++
++ if (bo->sync_obj && driver->sync_obj_signaled(bo->sync_obj,
++ bo->sync_obj_arg)) {
++ driver->sync_obj_unref(&bo->sync_obj);
++ bo->priv_flags &= ~TTM_BO_PRIV_FLAG_MOVING;
++ }
++
++ if (bo->sync_obj && remove_all)
++ (void)ttm_bo_expire_sync_obj(bo, false);
++
++ if (!bo->sync_obj) {
++ int put_count;
++
++ if (bo->ttm)
++ ttm_tt_unbind(bo->ttm);
++ spin_lock(&bdev->lru_lock);
++ if (!list_empty(&bo->ddestroy)) {
++ list_del_init(&bo->ddestroy);
++ kref_put(&bo->list_kref, ttm_bo_ref_bug);
++ }
++ if (bo->mem.mm_node) {
++ drm_mm_put_block(bo->mem.mm_node);
++ bo->mem.mm_node = NULL;
++ }
++ put_count = ttm_bo_del_from_lru(bo);
++ spin_unlock(&bdev->lru_lock);
++ mutex_unlock(&bo->mutex);
++ while (put_count--)
++ kref_put(&bo->list_kref, ttm_bo_release_list);
++
++ return;
++ }
++
++ spin_lock(&bdev->lru_lock);
++ if (list_empty(&bo->ddestroy)) {
++ spin_unlock(&bdev->lru_lock);
++ driver->sync_obj_flush(bo->sync_obj, bo->sync_obj_arg);
++ spin_lock(&bdev->lru_lock);
++ if (list_empty(&bo->ddestroy)) {
++ kref_get(&bo->list_kref);
++ list_add_tail(&bo->ddestroy, &bdev->ddestroy);
++ }
++ spin_unlock(&bdev->lru_lock);
++ schedule_delayed_work(&bdev->wq,
++ ((HZ / 100) < 1) ? 1 : HZ / 100);
++ } else
++ spin_unlock(&bdev->lru_lock);
++
++ mutex_unlock(&bo->mutex);
++ return;
++}
++
++/**
++ * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
++ * encountered buffers.
++ */
++
++static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
++{
++ struct ttm_buffer_object *entry, *nentry;
++ struct list_head *list, *next;
++ int ret;
++
++ spin_lock(&bdev->lru_lock);
++ list_for_each_safe(list, next, &bdev->ddestroy) {
++ entry = list_entry(list, struct ttm_buffer_object, ddestroy);
++ nentry = NULL;
++
++ /*
++ * Protect the next list entry from destruction while we
++ * unlock the lru_lock.
++ */
++
++ if (next != &bdev->ddestroy) {
++ nentry = list_entry(next, struct ttm_buffer_object,
++ ddestroy);
++ kref_get(&nentry->list_kref);
++ }
++ kref_get(&entry->list_kref);
++
++ spin_unlock(&bdev->lru_lock);
++ ttm_bo_cleanup_refs(entry, remove_all);
++ kref_put(&entry->list_kref, ttm_bo_release_list);
++ spin_lock(&bdev->lru_lock);
++
++ if (nentry) {
++ bool next_onlist = !list_empty(next);
++ kref_put(&nentry->list_kref, ttm_bo_release_list);
++
++ /*
++ * Someone might have raced us and removed the
++ * next entry from the list. We don't bother restarting
++ * list traversal.
++ */
++
++ if (!next_onlist)
++ break;
++ }
++ }
++ ret = !list_empty(&bdev->ddestroy);
++ spin_unlock(&bdev->lru_lock);
++
++ return ret;
++}
++
++static void ttm_bo_delayed_workqueue(struct work_struct *work)
++{
++ struct ttm_bo_device *bdev =
++ container_of(work, struct ttm_bo_device, wq.work);
++
++ if (ttm_bo_delayed_delete(bdev, false)) {
++ schedule_delayed_work(&bdev->wq,
++ ((HZ / 100) < 1) ? 1 : HZ / 100);
++ }
++}
++
++static void ttm_bo_release(struct kref *kref)
++{
++ struct ttm_buffer_object *bo =
++ container_of(kref, struct ttm_buffer_object, kref);
++ struct ttm_bo_device *bdev = bo->bdev;
++
++ if (likely(bo->vm_node != NULL)) {
++ rb_erase(&bo->vm_rb, &bdev->addr_space_rb);
++ drm_mm_put_block(bo->vm_node);
++ }
++ write_unlock(&bdev->vm_lock);
++ ttm_bo_cleanup_refs(bo, false);
++ kref_put(&bo->list_kref, ttm_bo_release_list);
++ write_lock(&bdev->vm_lock);
++}
++
++void ttm_bo_unref(struct ttm_buffer_object **p_bo)
++{
++ struct ttm_buffer_object *bo = *p_bo;
++ struct ttm_bo_device *bdev = bo->bdev;
++
++ *p_bo = NULL;
++ write_lock(&bdev->vm_lock);
++ kref_put(&bo->kref, ttm_bo_release);
++ write_unlock(&bdev->vm_lock);
++}
++
++static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type,
++ bool interruptible, bool no_wait)
++{
++ int ret = 0;
++ struct ttm_bo_device *bdev = bo->bdev;
++ struct ttm_mem_reg evict_mem;
++
++ if (bo->mem.mem_type != mem_type)
++ goto out;
++
++ ret = ttm_bo_wait(bo, false, interruptible, no_wait);
++ if (ret && ret != -ERESTART) {
++ printk(KERN_ERR "Failed to expire sync object before "
++ "buffer eviction.\n");
++ goto out;
++ }
++
++ BUG_ON(!atomic_read(&bo->reserved));
++
++ evict_mem = bo->mem;
++ evict_mem.mm_node = NULL;
++
++ evict_mem.proposed_flags = bdev->driver->evict_flags(bo);
++ BUG_ON(ttm_bo_type_flags(mem_type) & evict_mem.proposed_flags);
++
++ ret = ttm_bo_mem_space(bo, &evict_mem, interruptible, no_wait);
++ if (unlikely(ret != 0 && ret != -ERESTART)) {
++ evict_mem.proposed_flags = TTM_PL_FLAG_SYSTEM;
++ BUG_ON(ttm_bo_type_flags(mem_type) & evict_mem.proposed_flags);
++ ret = ttm_bo_mem_space(bo, &evict_mem, interruptible, no_wait);
++ }
++
++ if (ret) {
++ if (ret != -ERESTART)
++ printk(KERN_ERR "Failed to find memory space for "
++ "buffer 0x%p eviction.\n", bo);
++ goto out;
++ }
++
++ ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible, no_wait);
++ if (ret) {
++ if (ret != -ERESTART)
++ printk(KERN_ERR "Buffer eviction failed\n");
++ goto out;
++ }
++
++ spin_lock(&bdev->lru_lock);
++ if (evict_mem.mm_node) {
++ drm_mm_put_block(evict_mem.mm_node);
++ evict_mem.mm_node = NULL;
++ }
++ spin_unlock(&bdev->lru_lock);
++
++ ttm_flag_masked(&bo->priv_flags, TTM_BO_PRIV_FLAG_EVICTED,
++ TTM_BO_PRIV_FLAG_EVICTED);
++
++ out:
++ return ret;
++}
++
++/**
++ * Repeatedly evict memory from the LRU for @mem_type until we create enough
++ * space, or we've evicted everything and there isn't enough space.
++ */
++static int ttm_bo_mem_force_space(struct ttm_bo_device *bdev,
++ struct ttm_mem_reg *mem,
++ uint32_t mem_type,
++ bool interruptible, bool no_wait)
++{
++ struct drm_mm_node *node;
++ struct ttm_buffer_object *entry;
++ struct ttm_mem_type_manager *man = &bdev->man[mem_type];
++ struct list_head *lru;
++ unsigned long num_pages = mem->num_pages;
++ int put_count = 0;
++ int ret;
++
++ retry_pre_get:
++ ret = drm_mm_pre_get(&man->manager);
++ if (unlikely(ret != 0))
++ return ret;
++
++ spin_lock(&bdev->lru_lock);
++ do {
++ node = drm_mm_search_free(&man->manager, num_pages,
++ mem->page_alignment, 1);
++ if (node)
++ break;
++
++ lru = &man->lru;
++ if (list_empty(lru))
++ break;
++
++ entry = list_first_entry(lru, struct ttm_buffer_object, lru);
++ kref_get(&entry->list_kref);
++
++ ret =
++ ttm_bo_reserve_locked(entry, interruptible, no_wait, false, 0);
++
++ if (likely(ret == 0))
++ put_count = ttm_bo_del_from_lru(entry);
++
++ spin_unlock(&bdev->lru_lock);
++
++ if (unlikely(ret != 0))
++ return ret;
++
++ while (put_count--)
++ kref_put(&entry->list_kref, ttm_bo_ref_bug);
++
++ mutex_lock(&entry->mutex);
++ ret = ttm_bo_evict(entry, mem_type, interruptible, no_wait);
++ mutex_unlock(&entry->mutex);
++
++ ttm_bo_unreserve(entry);
++
++ kref_put(&entry->list_kref, ttm_bo_release_list);
++ if (ret)
++ return ret;
++
++ spin_lock(&bdev->lru_lock);
++ } while (1);
++
++ if (!node) {
++ spin_unlock(&bdev->lru_lock);
++ return -ENOMEM;
++ }
++
++ node = drm_mm_get_block_atomic(node, num_pages, mem->page_alignment);
++ if (unlikely(!node)) {
++ spin_unlock(&bdev->lru_lock);
++ goto retry_pre_get;
++ }
++
++ spin_unlock(&bdev->lru_lock);
++ mem->mm_node = node;
++ mem->mem_type = mem_type;
++ return 0;
++}
++
++static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
++ bool disallow_fixed,
++ uint32_t mem_type,
++ uint32_t mask, uint32_t * res_mask)
++{
++ uint32_t cur_flags = ttm_bo_type_flags(mem_type);
++
++ if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && disallow_fixed)
++ return false;
++
++ if ((cur_flags & mask & TTM_PL_MASK_MEM) == 0)
++ return false;
++
++ if ((mask & man->available_caching) == 0)
++ return false;
++ if (mask & man->default_caching)
++ cur_flags |= man->default_caching;
++ else if (mask & TTM_PL_FLAG_CACHED)
++ cur_flags |= TTM_PL_FLAG_CACHED;
++ else if (mask & TTM_PL_FLAG_WC)
++ cur_flags |= TTM_PL_FLAG_WC;
++ else
++ cur_flags |= TTM_PL_FLAG_UNCACHED;
++
++ *res_mask = cur_flags;
++ return true;
++}
++
++/**
++ * Creates space for memory region @mem according to its type.
++ *
++ * This function first searches for free space in compatible memory types in
++ * the priority order defined by the driver. If free space isn't found, then
++ * ttm_bo_mem_force_space is attempted in priority order to evict and find
++ * space.
++ */
++int ttm_bo_mem_space(struct ttm_buffer_object *bo,
++ struct ttm_mem_reg *mem, bool interruptible, bool no_wait)
++{
++ struct ttm_bo_device *bdev = bo->bdev;
++ struct ttm_mem_type_manager *man;
++
++ uint32_t num_prios = bdev->driver->num_mem_type_prio;
++ const uint32_t *prios = bdev->driver->mem_type_prio;
++ uint32_t i;
++ uint32_t mem_type = TTM_PL_SYSTEM;
++ uint32_t cur_flags = 0;
++ bool type_found = false;
++ bool type_ok = false;
++ bool has_eagain = false;
++ struct drm_mm_node *node = NULL;
++ int ret;
++
++ mem->mm_node = NULL;
++ for (i = 0; i < num_prios; ++i) {
++ mem_type = prios[i];
++ man = &bdev->man[mem_type];
++
++ type_ok = ttm_bo_mt_compatible(man,
++ bo->type == ttm_bo_type_user,
++ mem_type, mem->proposed_flags,
++ &cur_flags);
++
++ if (!type_ok)
++ continue;
++
++ if (mem_type == TTM_PL_SYSTEM)
++ break;
++
++ if (man->has_type && man->use_type) {
++ type_found = true;
++ do {
++ ret = drm_mm_pre_get(&man->manager);
++ if (unlikely(ret))
++ return ret;
++
++ spin_lock(&bdev->lru_lock);
++ node = drm_mm_search_free(&man->manager,
++ mem->num_pages,
++ mem->page_alignment,
++ 1);
++ if (unlikely(!node)) {
++ spin_unlock(&bdev->lru_lock);
++ break;
++ }
++ node = drm_mm_get_block_atomic(node,
++ mem->num_pages,
++ mem->
++ page_alignment);
++ spin_unlock(&bdev->lru_lock);
++ } while (!node);
++ }
++ if (node)
++ break;
++ }
++
++ if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || node) {
++ mem->mm_node = node;
++ mem->mem_type = mem_type;
++ mem->flags = cur_flags;
++ return 0;
++ }
++
++ if (!type_found)
++ return -EINVAL;
++
++ num_prios = bdev->driver->num_mem_busy_prio;
++ prios = bdev->driver->mem_busy_prio;
++
++ for (i = 0; i < num_prios; ++i) {
++ mem_type = prios[i];
++ man = &bdev->man[mem_type];
++
++ if (!man->has_type)
++ continue;
++
++ if (!ttm_bo_mt_compatible(man,
++ bo->type == ttm_bo_type_user,
++ mem_type,
++ mem->proposed_flags, &cur_flags))
++ continue;
++
++ ret = ttm_bo_mem_force_space(bdev, mem, mem_type,
++ interruptible, no_wait);
++
++ if (ret == 0 && mem->mm_node) {
++ mem->flags = cur_flags;
++ return 0;
++ }
++
++ if (ret == -ERESTART)
++ has_eagain = true;
++ }
++
++ ret = (has_eagain) ? -ERESTART : -ENOMEM;
++ return ret;
++}
++
++/*
++ * Call bo->mutex locked.
++ * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
++ */
++
++static int ttm_bo_busy(struct ttm_buffer_object *bo)
++{
++ void *sync_obj = bo->sync_obj;
++ struct ttm_bo_driver *driver = bo->bdev->driver;
++
++ if (sync_obj) {
++ if (driver->sync_obj_signaled(sync_obj, bo->sync_obj_arg)) {
++ driver->sync_obj_unref(&bo->sync_obj);
++ bo->priv_flags &= ~TTM_BO_PRIV_FLAG_MOVING;
++ return 0;
++ }
++ driver->sync_obj_flush(sync_obj, bo->sync_obj_arg);
++ if (driver->sync_obj_signaled(sync_obj, bo->sync_obj_arg)) {
++ driver->sync_obj_unref(&bo->sync_obj);
++ bo->priv_flags &= ~TTM_BO_PRIV_FLAG_MOVING;
++ return 0;
++ }
++ return 1;
++ }
++ return 0;
++}
++
++int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait)
++{
++ int ret = 0;
++
++ if ((atomic_read(&bo->cpu_writers) > 0) && no_wait)
++ return -EBUSY;
++
++ ret = wait_event_interruptible(bo->event_queue,
++ atomic_read(&bo->cpu_writers) == 0);
++
++ if (ret == -ERESTARTSYS)
++ ret = -ERESTART;
++
++ return ret;
++}
++
++/*
++ * bo->mutex locked.
++ * Note that new_mem_flags are NOT transferred to the bo->mem.proposed_flags.
++ */
++
++int ttm_bo_move_buffer(struct ttm_buffer_object *bo, uint32_t new_mem_flags,
++ bool interruptible, bool no_wait)
++{
++ struct ttm_bo_device *bdev = bo->bdev;
++ int ret = 0;
++ struct ttm_mem_reg mem;
++
++ BUG_ON(!atomic_read(&bo->reserved));
++
++ /*
++ * FIXME: It's possible to pipeline buffer moves.
++ * Have the driver move function wait for idle when necessary,
++ * instead of doing it here.
++ */
++
++ ttm_bo_busy(bo);
++ ret = ttm_bo_wait(bo, false, interruptible, no_wait);
++ if (ret)
++ return ret;
++
++ mem.num_pages = bo->num_pages;
++ mem.size = mem.num_pages << PAGE_SHIFT;
++ mem.proposed_flags = new_mem_flags;
++ mem.page_alignment = bo->mem.page_alignment;
++
++ /*
++ * Determine where to move the buffer.
++ */
++
++ ret = ttm_bo_mem_space(bo, &mem, interruptible, no_wait);
++ if (ret)
++ goto out_unlock;
++
++ ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait);
++
++ out_unlock:
++ if (ret && mem.mm_node) {
++ spin_lock(&bdev->lru_lock);
++ drm_mm_put_block(mem.mm_node);
++ spin_unlock(&bdev->lru_lock);
++ }
++ return ret;
++}
++
++static int ttm_bo_mem_compat(struct ttm_mem_reg *mem)
++{
++ if ((mem->proposed_flags & mem->flags & TTM_PL_MASK_MEM) == 0)
++ return 0;
++ if ((mem->proposed_flags & mem->flags & TTM_PL_MASK_CACHING) == 0)
++ return 0;
++
++ return 1;
++}
++
++int ttm_buffer_object_validate(struct ttm_buffer_object *bo,
++ bool interruptible, bool no_wait)
++{
++ int ret;
++
++ BUG_ON(!atomic_read(&bo->reserved));
++ bo->mem.proposed_flags = bo->proposed_flags;
++
++ TTM_DEBUG("Proposed flags 0x%08lx, Old flags 0x%08lx\n",
++ (unsigned long)bo->mem.proposed_flags,
++ (unsigned long)bo->mem.flags);
++
++ /*
++ * Check whether we need to move buffer.
++ */
++
++ if (!ttm_bo_mem_compat(&bo->mem)) {
++ ret = ttm_bo_move_buffer(bo, bo->mem.proposed_flags,
++ interruptible, no_wait);
++ if (ret) {
++ if (ret != -ERESTART)
++ printk(KERN_ERR "Failed moving buffer. "
++ "Proposed placement 0x%08x\n",
++ bo->mem.proposed_flags);
++ if (ret == -ENOMEM)
++ printk(KERN_ERR "Out of aperture space or "
++ "DRM memory quota.\n");
++ return ret;
++ }
++ }
++
++ /*
++ * We might need to add a TTM.
++ */
++
++ if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
++ ret = ttm_bo_add_ttm(bo);
++ if (ret)
++ return ret;
++ }
++ /*
++ * Validation has succeeded, move the access and other
++ * non-mapping-related flag bits from the proposed flags to
++ * the active flags
++ */
++
++ ttm_flag_masked(&bo->mem.flags, bo->proposed_flags,
++ ~TTM_PL_MASK_MEMTYPE);
++
++ return 0;
++}
++
++int
++ttm_bo_check_placement(struct ttm_buffer_object *bo,
++ uint32_t set_flags, uint32_t clr_flags)
++{
++ uint32_t new_mask = set_flags | clr_flags;
++
++ if ((bo->type == ttm_bo_type_user) && (clr_flags & TTM_PL_FLAG_CACHED)) {
++ printk(KERN_ERR
++ "User buffers require cache-coherent memory.\n");
++ return -EINVAL;
++ }
++
++ if (!capable(CAP_SYS_ADMIN)) {
++ if (new_mask & TTM_PL_FLAG_NO_EVICT) {
++ printk(KERN_ERR "Need to be root to modify"
++ " NO_EVICT status.\n");
++ return -EINVAL;
++ }
++
++ if ((clr_flags & bo->mem.flags & TTM_PL_MASK_MEMTYPE) &&
++ (bo->mem.flags & TTM_PL_FLAG_NO_EVICT)) {
++ printk(KERN_ERR "Incompatible memory specification"
++ " for NO_EVICT buffer.\n");
++ return -EINVAL;
++ }
++ }
++ return 0;
++}
++
++int ttm_buffer_object_init(struct ttm_bo_device *bdev,
++ struct ttm_buffer_object *bo,
++ unsigned long size,
++ enum ttm_bo_type type,
++ uint32_t flags,
++ uint32_t page_alignment,
++ unsigned long buffer_start,
++ bool interruptible,
++ struct file *persistant_swap_storage,
++ size_t acc_size,
++ void (*destroy) (struct ttm_buffer_object *))
++{
++ int ret = 0;
++ unsigned long num_pages;
++
++ size += buffer_start & ~PAGE_MASK;
++ num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
++ if (num_pages == 0) {
++ printk(KERN_ERR "Illegal buffer object size.\n");
++ return -EINVAL;
++ }
++ bo->destroy = destroy;
++
++ mutex_init(&bo->mutex);
++ mutex_lock(&bo->mutex);
++ kref_init(&bo->kref);
++ kref_init(&bo->list_kref);
++ atomic_set(&bo->cpu_writers, 0);
++ atomic_set(&bo->reserved, 1);
++ init_waitqueue_head(&bo->event_queue);
++ INIT_LIST_HEAD(&bo->lru);
++ INIT_LIST_HEAD(&bo->ddestroy);
++ INIT_LIST_HEAD(&bo->swap);
++ bo->bdev = bdev;
++ bo->type = type;
++ bo->num_pages = num_pages;
++ bo->mem.mem_type = TTM_PL_SYSTEM;
++ bo->mem.num_pages = bo->num_pages;
++ bo->mem.mm_node = NULL;
++ bo->mem.page_alignment = page_alignment;
++ bo->buffer_start = buffer_start & PAGE_MASK;
++ bo->priv_flags = 0;
++ bo->mem.flags = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
++ bo->seq_valid = false;
++ bo->persistant_swap_storage = persistant_swap_storage;
++ bo->acc_size = acc_size;
++
++ ret = ttm_bo_check_placement(bo, flags, 0ULL);
++ if (unlikely(ret != 0))
++ goto out_err;
++
++ /*
++ * If no caching attributes are set, accept any form of caching.
++ */
++
++ if ((flags & TTM_PL_MASK_CACHING) == 0)
++ flags |= TTM_PL_MASK_CACHING;
++
++ bo->proposed_flags = flags;
++ bo->mem.proposed_flags = flags;
++
++ /*
++ * For ttm_bo_type_device buffers, allocate
++ * address space from the device.
++ */
++
++ if (bo->type == ttm_bo_type_device) {
++ ret = ttm_bo_setup_vm(bo);
++ if (ret)
++ goto out_err;
++ }
++
++ ret = ttm_buffer_object_validate(bo, interruptible, false);
++ if (ret)
++ goto out_err;
++
++ mutex_unlock(&bo->mutex);
++ ttm_bo_unreserve(bo);
++ return 0;
++
++ out_err:
++ mutex_unlock(&bo->mutex);
++ ttm_bo_unreserve(bo);
++ ttm_bo_unref(&bo);
++
++ return ret;
++}
++
++static inline size_t ttm_bo_size(struct ttm_bo_device *bdev,
++ unsigned long num_pages)
++{
++ size_t page_array_size = (num_pages * sizeof(void *) + PAGE_SIZE - 1) &
++ PAGE_MASK;
++
++ return bdev->ttm_bo_size + 2 * page_array_size;
++}
++
++int ttm_buffer_object_create(struct ttm_bo_device *bdev,
++ unsigned long size,
++ enum ttm_bo_type type,
++ uint32_t flags,
++ uint32_t page_alignment,
++ unsigned long buffer_start,
++ bool interruptible,
++ struct file *persistant_swap_storage,
++ struct ttm_buffer_object **p_bo)
++{
++ struct ttm_buffer_object *bo;
++ int ret;
++ struct ttm_mem_global *mem_glob = bdev->mem_glob;
++
++ size_t acc_size =
++ ttm_bo_size(bdev, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
++ ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false, false);
++ if (unlikely(ret != 0))
++ return ret;
++
++ bo = kzalloc(sizeof(*bo), GFP_KERNEL);
++
++ if (unlikely(bo == NULL)) {
++ ttm_mem_global_free(mem_glob, acc_size, false);
++ return -ENOMEM;
++ }
++
++ ret = ttm_buffer_object_init(bdev, bo, size, type, flags,
++ page_alignment, buffer_start,
++ interruptible,
++ persistant_swap_storage, acc_size, NULL);
++ if (likely(ret == 0))
++ *p_bo = bo;
++
++ return ret;
++}
++
++static int ttm_bo_leave_list(struct ttm_buffer_object *bo,
++ uint32_t mem_type, bool allow_errors)
++{
++ int ret;
++
++ mutex_lock(&bo->mutex);
++
++ ret = ttm_bo_expire_sync_obj(bo, allow_errors);
++ if (ret)
++ goto out;
++
++ if (bo->mem.mem_type == mem_type)
++ ret = ttm_bo_evict(bo, mem_type, false, false);
++
++ if (ret) {
++ if (allow_errors) {
++ goto out;
++ } else {
++ ret = 0;
++ printk(KERN_ERR "Cleanup eviction failed\n");
++ }
++ }
++
++ out:
++ mutex_unlock(&bo->mutex);
++ return ret;
++}
++
++static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
++ struct list_head *head,
++ unsigned mem_type, bool allow_errors)
++{
++ struct ttm_buffer_object *entry;
++ int ret;
++ int put_count;
++
++ /*
++ * Can't use standard list traversal since we're unlocking.
++ */
++
++ spin_lock(&bdev->lru_lock);
++
++ while (!list_empty(head)) {
++ entry = list_first_entry(head, struct ttm_buffer_object, lru);
++ kref_get(&entry->list_kref);
++ ret = ttm_bo_reserve_locked(entry, false, false, false, 0);
++ put_count = ttm_bo_del_from_lru(entry);
++ spin_unlock(&bdev->lru_lock);
++ while (put_count--)
++ kref_put(&entry->list_kref, ttm_bo_ref_bug);
++ BUG_ON(ret);
++ ret = ttm_bo_leave_list(entry, mem_type, allow_errors);
++ ttm_bo_unreserve(entry);
++ kref_put(&entry->list_kref, ttm_bo_release_list);
++ spin_lock(&bdev->lru_lock);
++ }
++
++ spin_unlock(&bdev->lru_lock);
++
++ return 0;
++}
++
++int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
++{
++ struct ttm_mem_type_manager *man = &bdev->man[mem_type];
++ int ret = -EINVAL;
++
++ if (mem_type >= TTM_NUM_MEM_TYPES) {
++ printk(KERN_ERR "Illegal memory type %d\n", mem_type);
++ return ret;
++ }
++
++ if (!man->has_type) {
++ printk(KERN_ERR "Trying to take down uninitialized "
++ "memory manager type %u\n", mem_type);
++ return ret;
++ }
++
++ man->use_type = false;
++ man->has_type = false;
++
++ ret = 0;
++ if (mem_type > 0) {
++ ttm_bo_force_list_clean(bdev, &man->lru, mem_type, false);
++
++ spin_lock(&bdev->lru_lock);
++ if (drm_mm_clean(&man->manager)) {
++ drm_mm_takedown(&man->manager);
++ } else {
++ ret = -EBUSY;
++ }
++ spin_unlock(&bdev->lru_lock);
++ }
++
++ return ret;
++}
++
++int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
++{
++ struct ttm_mem_type_manager *man = &bdev->man[mem_type];
++
++ if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
++ printk(KERN_ERR "Illegal memory manager memory type %u.\n",
++ mem_type);
++ return -EINVAL;
++ }
++
++ if (!man->has_type) {
++ printk(KERN_ERR "Memory type %u has not been initialized.\n",
++ mem_type);
++ return 0;
++ }
++
++ return ttm_bo_force_list_clean(bdev, &man->lru, mem_type, true);
++}
++
++int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
++ unsigned long p_offset, unsigned long p_size)
++{
++ int ret = -EINVAL;
++ struct ttm_mem_type_manager *man;
++
++ if (type >= TTM_NUM_MEM_TYPES) {
++ printk(KERN_ERR "Illegal memory type %d\n", type);
++ return ret;
++ }
++
++ man = &bdev->man[type];
++ if (man->has_type) {
++ printk(KERN_ERR
++ "Memory manager already initialized for type %d\n",
++ type);
++ return ret;
++ }
++
++ ret = bdev->driver->init_mem_type(bdev, type, man);
++ if (ret)
++ return ret;
++
++ ret = 0;
++ if (type != TTM_PL_SYSTEM) {
++ if (!p_size) {
++ printk(KERN_ERR "Zero size memory manager type %d\n",
++ type);
++ return ret;
++ }
++ ret = drm_mm_init(&man->manager, p_offset, p_size);
++ if (ret)
++ return ret;
++ }
++ man->has_type = true;
++ man->use_type = true;
++ man->size = p_size;
++
++ INIT_LIST_HEAD(&man->lru);
++
++ return 0;
++}
++
++int ttm_bo_device_release(struct ttm_bo_device *bdev)
++{
++ int ret = 0;
++ unsigned i = TTM_NUM_MEM_TYPES;
++ struct ttm_mem_type_manager *man;
++
++ while (i--) {
++ man = &bdev->man[i];
++ if (man->has_type) {
++ man->use_type = false;
++ if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
++ ret = -EBUSY;
++ printk(KERN_ERR "DRM memory manager type %d "
++ "is not clean.\n", i);
++ }
++ man->has_type = false;
++ }
++ }
++
++ if (!cancel_delayed_work(&bdev->wq))
++ flush_scheduled_work();
++
++ while (ttm_bo_delayed_delete(bdev, true)) ;
++
++ spin_lock(&bdev->lru_lock);
++ if (list_empty(&bdev->ddestroy))
++ TTM_DEBUG("Delayed destroy list was clean\n");
++
++ if (list_empty(&bdev->man[0].lru))
++ TTM_DEBUG("Swap list was clean\n");
++ spin_unlock(&bdev->lru_lock);
++
++ ttm_mem_unregister_shrink(bdev->mem_glob, &bdev->shrink);
++ BUG_ON(!drm_mm_clean(&bdev->addr_space_mm));
++ write_lock(&bdev->vm_lock);
++ drm_mm_takedown(&bdev->addr_space_mm);
++ write_unlock(&bdev->vm_lock);
++
++ __free_page(bdev->dummy_read_page);
++ return ret;
++}
++
++/*
++ * This function is intended to be called on drm driver load.
++ * If you decide to call it from firstopen, you must protect the call
++ * from a potentially racing ttm_bo_driver_finish in lastclose.
++ * (This may happen on X server restart).
++ */
++
++int ttm_bo_device_init(struct ttm_bo_device *bdev,
++ struct ttm_mem_global *mem_glob,
++ struct ttm_bo_driver *driver, uint64_t file_page_offset)
++{
++ int ret = -EINVAL;
++
++ bdev->dummy_read_page = NULL;
++ rwlock_init(&bdev->vm_lock);
++ spin_lock_init(&bdev->lru_lock);
++
++ bdev->driver = driver;
++ bdev->mem_glob = mem_glob;
++
++ memset(bdev->man, 0, sizeof(bdev->man));
++
++ bdev->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
++ if (unlikely(bdev->dummy_read_page == NULL)) {
++ ret = -ENOMEM;
++ goto out_err0;
++ }
++
++ /*
++ * Initialize the system memory buffer type.
++ * Other types need to be driver / IOCTL initialized.
++ */
++ ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0, 0);
++ if (unlikely(ret != 0))
++ goto out_err1;
++
++ bdev->addr_space_rb = RB_ROOT;
++ ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000);
++ if (unlikely(ret != 0))
++ goto out_err2;
++
++ INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
++ bdev->nice_mode = true;
++ INIT_LIST_HEAD(&bdev->ddestroy);
++ INIT_LIST_HEAD(&bdev->swap_lru);
++ bdev->dev_mapping = NULL;
++ ttm_mem_init_shrink(&bdev->shrink, ttm_bo_swapout);
++ ret = ttm_mem_register_shrink(mem_glob, &bdev->shrink);
++ if (unlikely(ret != 0)) {
++ printk(KERN_ERR "Could not register buffer object swapout.\n");
++ goto out_err2;
++ }
++ return 0;
++ out_err2:
++ ttm_bo_clean_mm(bdev, 0);
++ out_err1:
++ __free_page(bdev->dummy_read_page);
++ out_err0:
++ return ret;
++}
++
++/*
++ * buffer object vm functions.
++ */
++
++bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
++{
++ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
++
++ if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
++ if (mem->mem_type == TTM_PL_SYSTEM)
++ return false;
++
++ if (man->flags & TTM_MEMTYPE_FLAG_CMA)
++ return false;
++
++ if (mem->flags & TTM_PL_FLAG_CACHED)
++ return false;
++ }
++ return true;
++}
++
++int ttm_bo_pci_offset(struct ttm_bo_device *bdev,
++ struct ttm_mem_reg *mem,
++ unsigned long *bus_base,
++ unsigned long *bus_offset, unsigned long *bus_size)
++{
++ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
++
++ *bus_size = 0;
++ if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
++ return -EINVAL;
++
++ if (ttm_mem_reg_is_pci(bdev, mem)) {
++ *bus_offset = mem->mm_node->start << PAGE_SHIFT;
++ *bus_size = mem->num_pages << PAGE_SHIFT;
++ *bus_base = man->io_offset;
++ }
++
++ return 0;
++}
++
++/**
++ * \c Kill all user-space virtual mappings of this buffer object.
++ *
++ * \param bo The buffer object.
++ *
++ * Call bo->mutex locked.
++ */
++
++void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
++{
++ struct ttm_bo_device *bdev = bo->bdev;
++ loff_t offset = (loff_t) bo->addr_space_offset;
++ loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
++
++ if (!bdev->dev_mapping)
++ return;
++
++ unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
++}
++
++static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
++{
++ struct ttm_bo_device *bdev = bo->bdev;
++ struct rb_node **cur = &bdev->addr_space_rb.rb_node;
++ struct rb_node *parent = NULL;
++ struct ttm_buffer_object *cur_bo;
++ unsigned long offset = bo->vm_node->start;
++ unsigned long cur_offset;
++
++ while (*cur) {
++ parent = *cur;
++ cur_bo = rb_entry(parent, struct ttm_buffer_object, vm_rb);
++ cur_offset = cur_bo->vm_node->start;
++ if (offset < cur_offset)
++ cur = &parent->rb_left;
++ else if (offset > cur_offset)
++ cur = &parent->rb_right;
++ else
++ BUG();
++ }
++
++ rb_link_node(&bo->vm_rb, parent, cur);
++ rb_insert_color(&bo->vm_rb, &bdev->addr_space_rb);
++}
++
++/**
++ * ttm_bo_setup_vm:
++ *
++ * @bo: the buffer to allocate address space for
++ *
++ * Allocate address space in the drm device so that applications
++ * can mmap the buffer and access the contents. This only
++ * applies to ttm_bo_type_device objects as others are not
++ * placed in the drm device address space.
++ */
++
++static int ttm_bo_setup_vm(struct ttm_buffer_object *bo)
++{
++ struct ttm_bo_device *bdev = bo->bdev;
++ int ret;
++
++ retry_pre_get:
++ ret = drm_mm_pre_get(&bdev->addr_space_mm);
++ if (unlikely(ret != 0))
++ return ret;
++
++ write_lock(&bdev->vm_lock);
++ bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm,
++ bo->mem.num_pages, 0, 0);
++
++ if (unlikely(bo->vm_node == NULL)) {
++ ret = -ENOMEM;
++ goto out_unlock;
++ }
++
++ bo->vm_node = drm_mm_get_block_atomic(bo->vm_node,
++ bo->mem.num_pages, 0);
++
++ if (unlikely(bo->vm_node == NULL)) {
++ write_unlock(&bdev->vm_lock);
++ goto retry_pre_get;
++ }
++
++ ttm_bo_vm_insert_rb(bo);
++ write_unlock(&bdev->vm_lock);
++ bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT;
++
++ return 0;
++ out_unlock:
++ write_unlock(&bdev->vm_lock);
++ return ret;
++}
++
++int ttm_bo_wait(struct ttm_buffer_object *bo,
++ bool lazy, bool interruptible, bool no_wait)
++{
++ struct ttm_bo_driver *driver = bo->bdev->driver;
++ void *sync_obj;
++ void *sync_obj_arg;
++ int ret = 0;
++
++ while (bo->sync_obj) {
++ if (driver->sync_obj_signaled(bo->sync_obj, bo->sync_obj_arg)) {
++ driver->sync_obj_unref(&bo->sync_obj);
++ bo->priv_flags &= ~TTM_BO_PRIV_FLAG_MOVING;
++ goto out;
++ }
++ if (no_wait) {
++ ret = -EBUSY;
++ goto out;
++ }
++ sync_obj = driver->sync_obj_ref(bo->sync_obj);
++ sync_obj_arg = bo->sync_obj_arg;
++ mutex_unlock(&bo->mutex);
++ ret = driver->sync_obj_wait(sync_obj, sync_obj_arg,
++ lazy, interruptible);
++
++ mutex_lock(&bo->mutex);
++ if (unlikely(ret != 0)) {
++ driver->sync_obj_unref(&sync_obj);
++ return ret;
++ }
++
++ if (bo->sync_obj == sync_obj) {
++ driver->sync_obj_unref(&bo->sync_obj);
++ bo->priv_flags &= ~TTM_BO_PRIV_FLAG_MOVING;
++ }
++ driver->sync_obj_unref(&sync_obj);
++ }
++ out:
++ return 0;
++}
++
++void ttm_bo_unblock_reservation(struct ttm_buffer_object *bo)
++{
++ atomic_set(&bo->reserved, 0);
++ wake_up_all(&bo->event_queue);
++}
++
++int ttm_bo_block_reservation(struct ttm_buffer_object *bo, bool interruptible,
++ bool no_wait)
++{
++ int ret;
++
++ while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
++ if (no_wait)
++ return -EBUSY;
++ else if (interruptible) {
++ ret = wait_event_interruptible
++ (bo->event_queue, atomic_read(&bo->reserved) == 0);
++ if (unlikely(ret != 0))
++ return -ERESTART;
++ } else {
++ wait_event(bo->event_queue,
++ atomic_read(&bo->reserved) == 0);
++ }
++ }
++ return 0;
++}
++
++int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
++{
++ int ret = 0;
++
++ /*
++ * Using ttm_bo_reserve instead of ttm_bo_block_reservation
++ * makes sure the lru lists are updated.
++ */
++
++ ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
++ if (unlikely(ret != 0))
++ return ret;
++ mutex_lock(&bo->mutex);
++ ret = ttm_bo_wait(bo, false, true, no_wait);
++ if (unlikely(ret != 0))
++ goto out_err0;
++ atomic_inc(&bo->cpu_writers);
++ out_err0:
++ mutex_unlock(&bo->mutex);
++ ttm_bo_unreserve(bo);
++ return ret;
++}
++
++void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
++{
++ if (atomic_dec_and_test(&bo->cpu_writers))
++ wake_up_all(&bo->event_queue);
++}
++
++/**
++ * A buffer object shrink method that tries to swap out the first
++ * buffer object on the bo_global::swap_lru list.
++ */
++
++static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
++{
++ struct ttm_bo_device *bdev =
++ container_of(shrink, struct ttm_bo_device, shrink);
++ struct ttm_buffer_object *bo;
++ int ret = -EBUSY;
++ int put_count;
++ uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
++
++ spin_lock(&bdev->lru_lock);
++ while (ret == -EBUSY) {
++ if (unlikely(list_empty(&bdev->swap_lru))) {
++ spin_unlock(&bdev->lru_lock);
++ return -EBUSY;
++ }
++
++ bo = list_first_entry(&bdev->swap_lru,
++ struct ttm_buffer_object, swap);
++ kref_get(&bo->list_kref);
++
++ /**
++ * Reserve buffer. Since we unlock while sleeping, we need
++ * to re-check that nobody removed us from the swap-list while
++ * we slept.
++ */
++
++ ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
++ if (unlikely(ret == -EBUSY)) {
++ spin_unlock(&bdev->lru_lock);
++ ttm_bo_wait_unreserved(bo, false);
++ kref_put(&bo->list_kref, ttm_bo_release_list);
++ spin_lock(&bdev->lru_lock);
++ }
++ }
++
++ BUG_ON(ret != 0);
++ put_count = ttm_bo_del_from_lru(bo);
++ spin_unlock(&bdev->lru_lock);
++
++ while (put_count--)
++ kref_put(&bo->list_kref, ttm_bo_ref_bug);
++
++ /**
++ * Wait for GPU, then move to system cached.
++ */
++
++ mutex_lock(&bo->mutex);
++ ret = ttm_bo_wait(bo, false, false, false);
++ if (unlikely(ret != 0))
++ goto out;
++
++ if ((bo->mem.flags & swap_placement) != swap_placement) {
++ struct ttm_mem_reg evict_mem;
++
++ evict_mem = bo->mem;
++ evict_mem.mm_node = NULL;
++ evict_mem.proposed_flags =
++ TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
++ evict_mem.flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
++ evict_mem.mem_type = TTM_PL_SYSTEM;
++
++ ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, false, false);
++ if (unlikely(ret != 0))
++ goto out;
++ }
++
++ ttm_bo_unmap_virtual(bo);
++
++ /**
++ * Swap out. Buffer will be swapped in again as soon as
++ * anyone tries to access a ttm page.
++ */
++
++ ret = ttm_tt_swapout(bo->ttm, bo->persistant_swap_storage);
++ out:
++ mutex_unlock(&bo->mutex);
++
++ /**
++ *
++ * Unreserve without putting on LRU to avoid swapping out an
++ * already swapped buffer.
++ */
++
++ atomic_set(&bo->reserved, 0);
++ wake_up_all(&bo->event_queue);
++ kref_put(&bo->list_kref, ttm_bo_release_list);
++ return ret;
++}
++
++void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
++{
++ while (ttm_bo_swapout(&bdev->shrink) == 0) ;
++}
+diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_bo_driver.h b/drivers/gpu/drm/psb/ttm/ttm_bo_driver.h
+--- a/drivers/gpu/drm/psb/ttm/ttm_bo_driver.h 1969-12-31 16:00:00.000000000 -0800
++++ b/drivers/gpu/drm/psb/ttm/ttm_bo_driver.h 2009-04-07 13:28:38.000000000 -0700
+@@ -0,0 +1,859 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 Vmware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ */
++#ifndef _TTM_BO_DRIVER_H_
++#define _TTM_BO_DRIVER_H_
++
++#include "ttm/ttm_bo_api.h"
++#include "ttm/ttm_memory.h"
++#include <drm/drm_mm.h>
++#include "linux/workqueue.h"
++#include "linux/fs.h"
++#include "linux/spinlock.h"
++
++struct ttm_backend;
++
++struct ttm_backend_func {
++ /**
++ * struct ttm_backend_func member populate
++ *
++ * @backend: Pointer to a struct ttm_backend.
++ * @num_pages: Number of pages to populate.
++ * @pages: Array of pointers to ttm pages.
++ * @dummy_read_page: Page to be used instead of NULL pages in the
++ * array @pages.
++ *
++ * Populate the backend with ttm pages. Depending on the backend,
++ * it may or may not copy the @pages array.
++ */
++ int (*populate) (struct ttm_backend * backend,
++ unsigned long num_pages, struct page ** pages,
++ struct page * dummy_read_page);
++ /**
++ * struct ttm_backend_func member clear
++ *
++ * @backend: Pointer to a struct ttm_backend.
++ *
++ * This is an "unpopulate" function. Release all resources
++ * allocated with populate.
++ */
++ void (*clear) (struct ttm_backend * backend);
++
++ /**
++ * struct ttm_backend_func member bind
++ *
++ * @backend: Pointer to a struct ttm_backend.
++ * @bo_mem: Pointer to a struct ttm_mem_reg describing the
++ * memory type and location for binding.
++ *
++ * Bind the backend pages into the aperture in the location
++ * indicated by @bo_mem. This function should be able to handle
++ * differences between aperture- and system page sizes.
++ */
++ int (*bind) (struct ttm_backend * backend, struct ttm_mem_reg * bo_mem);
++
++ /**
++ * struct ttm_backend_func member unbind
++ *
++ * @backend: Pointer to a struct ttm_backend.
++ *
++ * Unbind previously bound backend pages. This function should be
++ * able to handle differences between aperture- and system page sizes.
++ */
++ int (*unbind) (struct ttm_backend * backend);
++
++ /**
++ * struct ttm_backend_func member destroy
++ *
++ * @backend: Pointer to a struct ttm_backend.
++ *
++ * Destroy the backend.
++ */
++ void (*destroy) (struct ttm_backend * backend);
++};
++
++/**
++ * struct ttm_backend
++ *
++ * @bdev: Pointer to a struct ttm_bo_device.
++ * @flags: For driver use.
++ * @func: Pointer to a struct ttm_backend_func that describes
++ * the backend methods.
++ *
++ */
++
++struct ttm_backend {
++ struct ttm_bo_device *bdev;
++ uint32_t flags;
++ struct ttm_backend_func *func;
++};
++
++#define TTM_PAGE_FLAG_VMALLOC (1 << 0)
++#define TTM_PAGE_FLAG_USER (1 << 1)
++#define TTM_PAGE_FLAG_USER_DIRTY (1 << 2)
++#define TTM_PAGE_FLAG_WRITE (1 << 3)
++#define TTM_PAGE_FLAG_SWAPPED (1 << 4)
++#define TTM_PAGE_FLAG_PERSISTANT_SWAP (1 << 5)
++
++enum ttm_caching_state {
++ tt_uncached,
++ tt_wc,
++ tt_cached
++};
++
++/**
++ * struct ttm_tt
++ *
++ * @dummy_read_page: Page to map where the ttm_tt page array contains a NULL
++ * pointer.
++ * @pages: Array of pages backing the data.
++ * @first_himem_page: Himem pages are put last in the page array, which
++ * enables us to run caching attribute changes on only the first part
++ * of the page array containing lomem pages. This is the index of the
++ * first himem page.
++ * @last_lomem_page: Index of the last lomem page in the page array.
++ * @num_pages: Number of pages in the page array.
++ * @bdev: Pointer to the current struct ttm_bo_device.
++ * @be: Pointer to the ttm backend.
++ * @tsk: The task for user ttm.
++ * @start: virtual address for user ttm.
++ * @swap_storage: Pointer to shmem struct file for swap storage.
++ * @caching_state: The current caching state of the pages.
++ * @state: The current binding state of the pages.
++ *
++ * This is a structure holding the pages, caching- and aperture binding
++ * status for a buffer object that isn't backed by fixed (VRAM / AGP)
++ * memory.
++ */
++
++struct ttm_tt {
++ struct page *dummy_read_page;
++ struct page **pages;
++ long first_himem_page;
++ long last_lomem_page;
++ uint32_t page_flags;
++ unsigned long num_pages;
++ struct ttm_bo_device *bdev;
++ struct ttm_backend *be;
++ struct task_struct *tsk;
++ unsigned long start;
++ struct file *swap_storage;
++ enum ttm_caching_state caching_state;
++ enum {
++ tt_bound,
++ tt_unbound,
++ tt_unpopulated,
++ } state;
++};
++
++#define TTM_MEMTYPE_FLAG_FIXED (1 << 0) /* Fixed (on-card) PCI memory */
++#define TTM_MEMTYPE_FLAG_MAPPABLE (1 << 1) /* Memory mappable */
++#define TTM_MEMTYPE_FLAG_NEEDS_IOREMAP (1 << 2) /* Fixed memory needs ioremap
++ before kernel access. */
++#define TTM_MEMTYPE_FLAG_CMA (1 << 3) /* Can't map aperture */
++
++/**
++ * struct ttm_mem_type_manager
++ *
++ * @has_type: The memory type has been initialized.
++ * @use_type: The memory type is enabled.
++ * @flags: TTM_MEMTYPE_XX flags identifying the traits of the memory
++ * managed by this memory type.
++ * @gpu_offset: If used, the GPU offset of the first managed page of
++ * fixed memory or the first managed location in an aperture.
++ * @io_offset: The io_offset of the first managed page of IO memory or
++ * the first managed location in an aperture. For TTM_MEMTYPE_FLAG_CMA
++ * memory, this should be set to NULL.
++ * @io_size: The size of a managed IO region (fixed memory or aperture).
++ * @io_addr: Virtual kernel address if the io region is pre-mapped. For
++ * TTM_MEMTYPE_FLAG_NEEDS_IOREMAP there is no pre-mapped io map and
++ * @io_addr should be set to NULL.
++ * @size: Size of the managed region.
++ * @available_caching: A mask of available caching types, TTM_PL_FLAG_XX,
++ * as defined in ttm_placement_common.h
++ * @default_caching: The default caching policy used for a buffer object
++ * placed in this memory type if the user doesn't provide one.
++ * @manager: The range manager used for this memory type. FIXME: If the aperture
++ * has a page size different from the underlying system, the granularity
++ * of this manager should take care of this. But the range allocating code
++ * in ttm_bo.c needs to be modified for this.
++ * @lru: The lru list for this memory type.
++ *
++ * This structure is used to identify and manage memory types for a device.
++ * It's set up by the ttm_bo_driver::init_mem_type method.
++ */
++
++struct ttm_mem_type_manager {
++
++ /*
++ * No protection. Constant from start.
++ */
++
++ bool has_type;
++ bool use_type;
++ uint32_t flags;
++ unsigned long gpu_offset;
++ unsigned long io_offset;
++ unsigned long io_size;
++ void *io_addr;
++ uint64_t size;
++ uint32_t available_caching;
++ uint32_t default_caching;
++
++ /*
++ * Protected by the bdev->lru_lock.
++ * TODO: Consider one lru_lock per ttm_mem_type_manager.
++ * Plays ill with list removal, though.
++ */
++
++ struct drm_mm manager;
++ struct list_head lru;
++};
++
++/**
++ * struct ttm_bo_driver
++ *
++ * @mem_type_prio: Priority array of memory types to place a buffer object in
++ * if it fits without evicting buffers from any of these memory types.
++ * @mem_busy_prio: Priority array of memory types to place a buffer object in
++ * if it needs to evict buffers to make room.
++ * @num_mem_type_prio: Number of elements in the @mem_type_prio array.
++ * @num_mem_busy_prio: Number of elements in the @num_mem_busy_prio array.
++ * @create_ttm_backend_entry: Callback to create a struct ttm_backend.
++ * @invalidate_caches: Callback to invalidate read caches when a buffer object
++ * has been evicted.
++ * @init_mem_type: Callback to initialize a struct ttm_mem_type_manager structure.
++ * @evict_flags: Callback to obtain placement flags when a buffer is evicted.
++ * @move: Callback for a driver to hook in accelerated functions to move a buffer.
++ * If set to NULL, a potentially slow memcpy() move is used.
++ * @sync_obj_signaled: See ttm_fence_api.h
++ * @sync_obj_wait: See ttm_fence_api.h
++ * @sync_obj_flush: See ttm_fence_api.h
++ * @sync_obj_unref: See ttm_fence_api.h
++ * @sync_obj_ref: See ttm_fence_api.h
++ */
++
++struct ttm_bo_driver {
++ const uint32_t *mem_type_prio;
++ const uint32_t *mem_busy_prio;
++ uint32_t num_mem_type_prio;
++ uint32_t num_mem_busy_prio;
++
++ /**
++ * struct ttm_bo_driver member create_ttm_backend_entry
++ *
++ * @bdev: The buffer object device.
++ *
++ * Create a driver specific struct ttm_backend.
++ */
++
++ struct ttm_backend *(*create_ttm_backend_entry)
++ (struct ttm_bo_device * bdev);
++
++ /**
++ * struct ttm_bo_driver member invalidate_caches
++ *
++ * @bdev: the buffer object device.
++ * @flags: new placement of the rebound buffer object.
++ *
++ * A previosly evicted buffer has been rebound in a
++ * potentially new location. Tell the driver that it might
++ * consider invalidating read (texture) caches on the next command
++ * submission as a consequence.
++ */
++
++ int (*invalidate_caches) (struct ttm_bo_device * bdev, uint32_t flags);
++ int (*init_mem_type) (struct ttm_bo_device * bdev, uint32_t type,
++ struct ttm_mem_type_manager * man);
++ /**
++ * struct ttm_bo_driver member evict_flags:
++ *
++ * @bo: the buffer object to be evicted
++ *
++ * Return the bo flags for a buffer which is not mapped to the hardware.
++ * These will be placed in proposed_flags so that when the move is
++ * finished, they'll end up in bo->mem.flags
++ */
++
++ uint32_t(*evict_flags) (struct ttm_buffer_object * bo);
++ /**
++ * struct ttm_bo_driver member move:
++ *
++ * @bo: the buffer to move
++ * @evict: whether this motion is evicting the buffer from
++ * the graphics address space
++ * @interruptible: Use interruptible sleeps if possible when sleeping.
++ * @no_wait: whether this should give up and return -EBUSY
++ * if this move would require sleeping
++ * @new_mem: the new memory region receiving the buffer
++ *
++ * Move a buffer between two memory regions.
++ */
++ int (*move) (struct ttm_buffer_object * bo,
++ bool evict, bool interruptible,
++ bool no_wait, struct ttm_mem_reg * new_mem);
++
++ /**
++ * struct ttm_bo_driver_member verify_access
++ *
++ * @bo: Pointer to a buffer object.
++ * @filp: Pointer to a struct file trying to access the object.
++ *
++ * Called from the map / write / read methods to verify that the
++ * caller is permitted to access the buffer object.
++ * This member may be set to NULL, which will refuse this kind of
++ * access for all buffer objects.
++ * This function should return 0 if access is granted, -EPERM otherwise.
++ */
++ int (*verify_access) (struct ttm_buffer_object * bo,
++ struct file * filp);
++
++ /**
++ * In case a driver writer dislikes the TTM fence objects,
++ * the driver writer can replace those with sync objects of
++ * his / her own. If it turns out that no driver writer is
++ * using these. I suggest we remove these hooks and plug in
++ * fences directly. The bo driver needs the following functionality:
++ * See the corresponding functions in the fence object API
++ * documentation.
++ */
++
++ bool (*sync_obj_signaled) (void *sync_obj, void *sync_arg);
++ int (*sync_obj_wait) (void *sync_obj, void *sync_arg,
++ bool lazy, bool interruptible);
++ int (*sync_obj_flush) (void *sync_obj, void *sync_arg);
++ void (*sync_obj_unref) (void **sync_obj);
++ void *(*sync_obj_ref) (void *sync_obj);
++};
++
++#define TTM_NUM_MEM_TYPES 10
++
++#define TTM_BO_PRIV_FLAG_EVICTED (1 << 0) /* Buffer object is evicted. */
++#define TTM_BO_PRIV_FLAG_MOVING (1 << 1) /* Buffer object is moving and needs
++ idling before CPU mapping */
++/**
++ * struct ttm_bo_device - Buffer object driver device-specific data.
++ *
++ * @mem_glob: Pointer to a struct ttm_mem_global object for accounting.
++ * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver.
++ * @count: Current number of buffer object.
++ * @pages: Current number of pinned pages.
++ * @dummy_read_page: Pointer to a dummy page used for mapping requests
++ * of unpopulated pages.
++ * @shrink: A shrink callback object used for buffre object swap.
++ * @ttm_bo_extra_size: Extra size (sizeof(struct ttm_buffer_object) excluded)
++ * used by a buffer object. This is excluding page arrays and backing pages.
++ * @ttm_bo_size: This is @ttm_bo_extra_size + sizeof(struct ttm_buffer_object).
++ * @man: An array of mem_type_managers.
++ * @addr_space_mm: Range manager for the device address space.
++ * lru_lock: Spinlock that protects the buffer+device lru lists and
++ * ddestroy lists.
++ * @nice_mode: Try nicely to wait for buffer idle when cleaning a manager.
++ * If a GPU lockup has been detected, this is forced to 0.
++ * @dev_mapping: A pointer to the struct address_space representing the
++ * device address space.
++ * @wq: Work queue structure for the delayed delete workqueue.
++ *
++ */
++
++struct ttm_bo_device {
++
++ /*
++ * Constant after bo device init / atomic.
++ */
++
++ struct ttm_mem_global *mem_glob;
++ struct ttm_bo_driver *driver;
++ struct page *dummy_read_page;
++ struct ttm_mem_shrink shrink;
++
++ size_t ttm_bo_extra_size;
++ size_t ttm_bo_size;
++
++ rwlock_t vm_lock;
++ /*
++ * Protected by the vm lock.
++ */
++ struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
++ struct rb_root addr_space_rb;
++ struct drm_mm addr_space_mm;
++
++ /*
++ * Might want to change this to one lock per manager.
++ */
++ spinlock_t lru_lock;
++ /*
++ * Protected by the lru lock.
++ */
++ struct list_head ddestroy;
++ struct list_head swap_lru;
++
++ /*
++ * Protected by load / firstopen / lastclose /unload sync.
++ */
++
++ bool nice_mode;
++ struct address_space *dev_mapping;
++
++ /*
++ * Internal protection.
++ */
++
++ struct delayed_work wq;
++};
++
++/**
++ * ttm_flag_masked
++ *
++ * @old: Pointer to the result and original value.
++ * @new: New value of bits.
++ * @mask: Mask of bits to change.
++ *
++ * Convenience function to change a number of bits identified by a mask.
++ */
++
++static inline uint32_t
++ttm_flag_masked(uint32_t * old, uint32_t new, uint32_t mask)
++{
++ *old ^= (*old ^ new) & mask;
++ return *old;
++}
++
++/**
++ * ttm_tt_create
++ *
++ * @bdev: pointer to a struct ttm_bo_device:
++ * @size: Size of the data needed backing.
++ * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
++ * @dummy_read_page: See struct ttm_bo_device.
++ *
++ * Create a struct ttm_tt to back data with system memory pages.
++ * No pages are actually allocated.
++ * Returns:
++ * NULL: Out of memory.
++ */
++extern struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev,
++ unsigned long size,
++ uint32_t page_flags,
++ struct page *dummy_read_page);
++
++/**
++ * ttm_tt_set_user:
++ *
++ * @ttm: The struct ttm_tt to populate.
++ * @tsk: A struct task_struct for which @start is a valid user-space address.
++ * @start: A valid user-space address.
++ * @num_pages: Size in pages of the user memory area.
++ *
++ * Populate a struct ttm_tt with a user-space memory area after first pinning
++ * the pages backing it.
++ * Returns:
++ * !0: Error.
++ */
++
++extern int ttm_tt_set_user(struct ttm_tt *ttm,
++ struct task_struct *tsk,
++ unsigned long start, unsigned long num_pages);
++
++/**
++ * ttm_ttm_bind:
++ *
++ * @ttm: The struct ttm_tt containing backing pages.
++ * @bo_mem: The struct ttm_mem_reg identifying the binding location.
++ *
++ * Bind the pages of @ttm to an aperture location identified by @bo_mem
++ */
++extern int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
++
++/**
++ * ttm_ttm_destroy:
++ *
++ * @ttm: The struct ttm_tt.
++ *
++ * Unbind, unpopulate and destroy a struct ttm_tt.
++ */
++extern void ttm_tt_destroy(struct ttm_tt *ttm);
++
++/**
++ * ttm_ttm_unbind:
++ *
++ * @ttm: The struct ttm_tt.
++ *
++ * Unbind a struct ttm_tt.
++ */
++extern void ttm_tt_unbind(struct ttm_tt *ttm);
++
++/**
++ * ttm_ttm_destroy:
++ *
++ * @ttm: The struct ttm_tt.
++ * @index: Index of the desired page.
++ *
++ * Return a pointer to the struct page backing @ttm at page
++ * index @index. If the page is unpopulated, one will be allocated to
++ * populate that index.
++ *
++ * Returns:
++ * NULL on OOM.
++ */
++extern struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index);
++
++/**
++ * ttm_tt_cache_flush:
++ *
++ * @pages: An array of pointers to struct page:s to flush.
++ * @num_pages: Number of pages to flush.
++ *
++ * Flush the data of the indicated pages from the cpu caches.
++ * This is used when changing caching attributes of the pages from
++ * cache-coherent.
++ */
++extern void ttm_tt_cache_flush(struct page *pages[], unsigned long num_pages);
++
++/**
++ * ttm_tt_set_placement_caching:
++ *
++ * @ttm A struct ttm_tt the backing pages of which will change caching policy.
++ * @placement: Flag indicating the desired caching policy.
++ *
++ * This function will change caching policy of any default kernel mappings of
++ * the pages backing @ttm. If changing from cached to uncached or write-combined,
++ * all CPU caches will first be flushed to make sure the data of the pages
++ * hit RAM. This function may be very costly as it involves global TLB
++ * and cache flushes and potential page splitting / combining.
++ */
++extern int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement);
++extern int ttm_tt_swapout(struct ttm_tt *ttm,
++ struct file *persistant_swap_storage);
++
++/*
++ * ttm_bo.c
++ */
++
++/**
++ * ttm_mem_reg_is_pci
++ *
++ * @bdev: Pointer to a struct ttm_bo_device.
++ * @mem: A valid struct ttm_mem_reg.
++ *
++ * Returns true if the memory described by @mem is PCI memory,
++ * false otherwise.
++ */
++extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev,
++ struct ttm_mem_reg *mem);
++
++/**
++ * ttm_bo_mem_space
++ *
++ * @bo: Pointer to a struct ttm_buffer_object. the data of which
++ * we want to allocate space for.
++ * @mem: A struct ttm_mem_reg with the struct ttm_mem_reg::proposed_flags set
++ * up.
++ * @interruptible: Sleep interruptible when sliping.
++ * @no_wait: Don't sleep waiting for space to become available.
++ *
++ * Allocate memory space for the buffer object pointed to by @bo, using
++ * the placement flags in @mem, potentially evicting other idle buffer objects.
++ * This function may sleep while waiting for space to become available.
++ * Returns:
++ * -EBUSY: No space available (only if no_wait == 1).
++ * -ENOMEM: Could not allocate memory for the buffer object, either due to
++ * fragmentation or concurrent allocators.
++ * -ERESTART: An interruptible sleep was interrupted by a signal.
++ */
++extern int ttm_bo_mem_space(struct ttm_buffer_object *bo,
++ struct ttm_mem_reg *mem,
++ bool interruptible, bool no_wait);
++/**
++ * ttm_bo_wait_for_cpu
++ *
++ * @bo: Pointer to a struct ttm_buffer_object.
++ * @no_wait: Don't sleep while waiting.
++ *
++ * Wait until a buffer object is no longer sync'ed for CPU access.
++ * Returns:
++ * -EBUSY: Buffer object was sync'ed for CPU access. (only if no_wait == 1).
++ * -ERESTART: An interruptible sleep was interrupted by a signal.
++ */
++
++extern int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait);
++
++/**
++ * ttm_bo_pci_offset - Get the PCI offset for the buffer object memory.
++ *
++ * @bo Pointer to a struct ttm_buffer_object.
++ * @bus_base On return the base of the PCI region
++ * @bus_offset On return the byte offset into the PCI region
++ * @bus_size On return the byte size of the buffer object or zero if
++ * the buffer object memory is not accessible through a PCI region.
++ *
++ * Returns:
++ * -EINVAL if the buffer object is currently not mappable.
++ * 0 otherwise.
++ */
++
++extern int ttm_bo_pci_offset(struct ttm_bo_device *bdev,
++ struct ttm_mem_reg *mem,
++ unsigned long *bus_base,
++ unsigned long *bus_offset,
++ unsigned long *bus_size);
++
++extern int ttm_bo_device_release(struct ttm_bo_device *bdev);
++
++/**
++ * ttm_bo_device_init
++ *
++ * @bdev: A pointer to a struct ttm_bo_device to initialize.
++ * @mem_global: A pointer to an initialized struct ttm_mem_global.
++ * @driver: A pointer to a struct ttm_bo_driver set up by the caller.
++ * @file_page_offset: Offset into the device address space that is available
++ * for buffer data. This ensures compatibility with other users of the
++ * address space.
++ *
++ * Initializes a struct ttm_bo_device:
++ * Returns:
++ * !0: Failure.
++ */
++extern int ttm_bo_device_init(struct ttm_bo_device *bdev,
++ struct ttm_mem_global *mem_glob,
++ struct ttm_bo_driver *driver,
++ uint64_t file_page_offset);
++
++/**
++ * ttm_bo_reserve:
++ *
++ * @bo: A pointer to a struct ttm_buffer_object.
++ * @interruptible: Sleep interruptible if waiting.
++ * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
++ * @use_sequence: If @bo is already reserved, Only sleep waiting for
++ * it to become unreserved if @sequence < (@bo)->sequence.
++ *
++ * Locks a buffer object for validation. (Or prevents other processes from
++ * locking it for validation) and removes it from lru lists, while taking
++ * a number of measures to prevent deadlocks.
++ *
++ * Deadlocks may occur when two processes try to reserve multiple buffers in
++ * different order, either by will or as a result of a buffer being evicted
++ * to make room for a buffer already reserved. (Buffers are reserved before
++ * they are evicted). The following algorithm prevents such deadlocks from
++ * occuring:
++ * 1) Buffers are reserved with the lru spinlock held. Upon successful
++ * reservation they are removed from the lru list. This stops a reserved buffer
++ * from being evicted. However the lru spinlock is released between the time
++ * a buffer is selected for eviction and the time it is reserved.
++ * Therefore a check is made when a buffer is reserved for eviction, that it
++ * is still the first buffer in the lru list, before it is removed from the
++ * list. @check_lru == 1 forces this check. If it fails, the function returns
++ * -EINVAL, and the caller should then choose a new buffer to evict and repeat
++ * the procedure.
++ * 2) Processes attempting to reserve multiple buffers other than for eviction,
++ * (typically execbuf), should first obtain a unique 32-bit
++ * validation sequence number,
++ * and call this function with @use_sequence == 1 and @sequence == the unique
++ * sequence number. If upon call of this function, the buffer object is already
++ * reserved, the validation sequence is checked against the validation
++ * sequence of the process currently reserving the buffer,
++ * and if the current validation sequence is greater than that of the process
++ * holding the reservation, the function returns -EAGAIN. Otherwise it sleeps
++ * waiting for the buffer to become unreserved, after which it retries reserving.
++ * The caller should, when receiving an -EAGAIN error
++ * release all its buffer reservations, wait for @bo to become unreserved, and
++ * then rerun the validation with the same validation sequence. This procedure
++ * will always guarantee that the process with the lowest validation sequence
++ * will eventually succeed, preventing both deadlocks and starvation.
++ *
++ * Returns:
++ * -EAGAIN: The reservation may cause a deadlock. Release all buffer reservations,
++ * wait for @bo to become unreserved and try again. (only if use_sequence == 1).
++ * -ERESTART: A wait for the buffer to become unreserved was interrupted by
++ * a signal. Release all buffer reservations and return to user-space.
++ */
++extern int ttm_bo_reserve(struct ttm_buffer_object *bo,
++ bool interruptible,
++ bool no_wait, bool use_sequence, uint32_t sequence);
++
++/**
++ * ttm_bo_unreserve
++ *
++ * @bo: A pointer to a struct ttm_buffer_object.
++ *
++ * Unreserve a previous reservation of @bo.
++ */
++extern void ttm_bo_unreserve(struct ttm_buffer_object *bo);
++
++/**
++ * ttm_bo_wait_unreserved
++ *
++ * @bo: A pointer to a struct ttm_buffer_object.
++ *
++ * Wait for a struct ttm_buffer_object to become unreserved.
++ * This is typically used in the execbuf code to relax cpu-usage when
++ * a potential deadlock condition backoff.
++ */
++extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo,
++ bool interruptible);
++
++/**
++ * ttm_bo_block_reservation
++ *
++ * @bo: A pointer to a struct ttm_buffer_object.
++ * @interruptible: Use interruptible sleep when waiting.
++ * @no_wait: Don't sleep, but rather return -EBUSY.
++ *
++ * Block reservation for validation by simply reserving the buffer. This is intended
++ * for single buffer use only without eviction, and thus needs no deadlock protection.
++ *
++ * Returns:
++ * -EBUSY: If no_wait == 1 and the buffer is already reserved.
++ * -ERESTART: If interruptible == 1 and the process received a signal while sleeping.
++ */
++extern int ttm_bo_block_reservation(struct ttm_buffer_object *bo,
++ bool interruptible, bool no_wait);
++
++/**
++ * ttm_bo_unblock_reservation
++ *
++ * @bo: A pointer to a struct ttm_buffer_object.
++ *
++ * Unblocks reservation leaving lru lists untouched.
++ */
++extern void ttm_bo_unblock_reservation(struct ttm_buffer_object *bo);
++
++/*
++ * ttm_bo_util.c
++ */
++
++/**
++ * ttm_bo_move_ttm
++ *
++ * @bo: A pointer to a struct ttm_buffer_object.
++ * @evict: 1: This is an eviction. Don't try to pipeline.
++ * @no_wait: Never sleep, but rather return with -EBUSY.
++ * @new_mem: struct ttm_mem_reg indicating where to move.
++ *
++ * Optimized move function for a buffer object with both old and
++ * new placement backed by a TTM. The function will, if successful,
++ * free any old aperture space, and set (@new_mem)->mm_node to NULL,
++ * and update the (@bo)->mem placement flags. If unsuccessful, the old
++ * data remains untouched, and it's up to the caller to free the
++ * memory space indicated by @new_mem.
++ * Returns:
++ * !0: Failure.
++ */
++
++extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
++ bool evict, bool no_wait, struct ttm_mem_reg *new_mem);
++
++/**
++ * ttm_bo_move_memcpy
++ *
++ * @bo: A pointer to a struct ttm_buffer_object.
++ * @evict: 1: This is an eviction. Don't try to pipeline.
++ * @no_wait: Never sleep, but rather return with -EBUSY.
++ * @new_mem: struct ttm_mem_reg indicating where to move.
++ *
++ * Fallback move function for a mappable buffer object in mappable memory.
++ * The function will, if successful,
++ * free any old aperture space, and set (@new_mem)->mm_node to NULL,
++ * and update the (@bo)->mem placement flags. If unsuccessful, the old
++ * data remains untouched, and it's up to the caller to free the
++ * memory space indicated by @new_mem.
++ * Returns:
++ * !0: Failure.
++ */
++
++extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
++ bool evict,
++ bool no_wait, struct ttm_mem_reg *new_mem);
++
++/**
++ * ttm_bo_free_old_node
++ *
++ * @bo: A pointer to a struct ttm_buffer_object.
++ *
++ * Utility function to free an old placement after a successful move.
++ */
++extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
++
++/**
++ * ttm_bo_move_accel_cleanup.
++ *
++ * @bo: A pointer to a struct ttm_buffer_object.
++ * @sync_obj: A sync object that signals when moving is complete.
++ * @sync_obj_arg: An argument to pass to the sync object idle / wait
++ * functions.
++ * @evict: This is an evict move. Don't return until the buffer is idle.
++ * @no_wait: Never sleep, but rather return with -EBUSY.
++ * @new_mem: struct ttm_mem_reg indicating where to move.
++ *
++ * Accelerated move function to be called when an accelerated move
++ * has been scheduled. The function will create a new temporary buffer object
++ * representing the old placement, and put the sync object on both buffer
++ * objects. After that the newly created buffer object is unref'd to be
++ * destroyed when the move is complete. This will help pipeline
++ * buffer moves.
++ */
++
++extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
++ void *sync_obj,
++ void *sync_obj_arg,
++ bool evict, bool no_wait,
++ struct ttm_mem_reg *new_mem);
++/**
++ * ttm_io_prot
++ *
++ * @c_state: Caching state.
++ * @tmp: Page protection flag for a normal, cached mapping.
++ *
++ * Utility function that returns the pgprot_t that should be used for
++ * setting up a PTE with the caching model indicated by @c_state.
++ */
++extern pgprot_t ttm_io_prot(enum ttm_caching_state c_state, pgprot_t tmp);
++
++#if (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE)))
++#define TTM_HAS_AGP
++#include <linux/agp_backend.h>
++
++/**
++ * ttm_agp_backend_init
++ *
++ * @bdev: Pointer to a struct ttm_bo_device.
++ * @bridge: The agp bridge this device is sitting on.
++ *
++ * Create a TTM backend that uses the indicated AGP bridge as an aperture
++ * for TT memory. This function uses the linux agpgart interface to
++ * bind and unbind memory backing a ttm_tt.
++ */
++extern struct ttm_backend *ttm_agp_backend_init(struct ttm_bo_device *bdev,
++ struct agp_bridge_data *bridge);
++#endif
++
++#endif
+diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_bo_util.c b/drivers/gpu/drm/psb/ttm/ttm_bo_util.c
+--- a/drivers/gpu/drm/psb/ttm/ttm_bo_util.c 1969-12-31 16:00:00.000000000 -0800
++++ b/drivers/gpu/drm/psb/ttm/ttm_bo_util.c 2009-04-07 13:28:38.000000000 -0700
+@@ -0,0 +1,529 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
++ */
++
++#include "ttm/ttm_bo_driver.h"
++#include "ttm/ttm_placement_common.h"
++#include "ttm/ttm_pat_compat.h"
++#include <linux/io.h>
++#include <linux/highmem.h>
++#include <linux/wait.h>
++#include <linux/version.h>
++
++void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
++{
++ struct ttm_mem_reg *old_mem = &bo->mem;
++
++ if (old_mem->mm_node) {
++ spin_lock(&bo->bdev->lru_lock);
++ drm_mm_put_block(old_mem->mm_node);
++ spin_unlock(&bo->bdev->lru_lock);
++ }
++ old_mem->mm_node = NULL;
++}
++
++int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
++ bool evict, bool no_wait, struct ttm_mem_reg *new_mem)
++{
++ struct ttm_tt *ttm = bo->ttm;
++ struct ttm_mem_reg *old_mem = &bo->mem;
++ uint32_t save_flags = old_mem->flags;
++ uint32_t save_proposed_flags = old_mem->proposed_flags;
++ int ret;
++
++ if (old_mem->mem_type != TTM_PL_SYSTEM) {
++ ttm_tt_unbind(ttm);
++ ttm_bo_free_old_node(bo);
++ ttm_flag_masked(&old_mem->flags, TTM_PL_FLAG_SYSTEM,
++ TTM_PL_MASK_MEM);
++ old_mem->mem_type = TTM_PL_SYSTEM;
++ save_flags = old_mem->flags;
++ }
++
++ ret = ttm_tt_set_placement_caching(ttm, new_mem->flags);
++ if (unlikely(ret != 0))
++ return ret;
++
++ if (new_mem->mem_type != TTM_PL_SYSTEM) {
++ ret = ttm_tt_bind(ttm, new_mem);
++ if (unlikely(ret != 0))
++ return ret;
++ }
++
++ *old_mem = *new_mem;
++ new_mem->mm_node = NULL;
++ old_mem->proposed_flags = save_proposed_flags;
++ ttm_flag_masked(&save_flags, new_mem->flags, TTM_PL_MASK_MEMTYPE);
++ return 0;
++}
++
++int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
++ void **virtual)
++{
++ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
++ unsigned long bus_offset;
++ unsigned long bus_size;
++ unsigned long bus_base;
++ int ret;
++ void *addr;
++
++ *virtual = NULL;
++ ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset, &bus_size);
++ if (ret || bus_size == 0)
++ return ret;
++
++ if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP))
++ addr = (void *)(((u8 *) man->io_addr) + bus_offset);
++ else {
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26))
++ if (mem->flags & TTM_PL_FLAG_WC)
++ addr = ioremap_wc(bus_base + bus_offset, bus_size);
++ else
++ addr = ioremap_nocache(bus_base + bus_offset, bus_size);
++#else
++ addr = ioremap_nocache(bus_base + bus_offset, bus_size);
++#endif
++ if (!addr)
++ return -ENOMEM;
++ }
++ *virtual = addr;
++ return 0;
++}
++
++void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
++ void *virtual)
++{
++ struct ttm_mem_type_manager *man;
++
++ man = &bdev->man[mem->mem_type];
++
++ if (virtual && (man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP))
++ iounmap(virtual);
++}
++
++static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
++{
++ uint32_t *dstP =
++ (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
++ uint32_t *srcP =
++ (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
++
++ int i;
++ for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
++ iowrite32(ioread32(srcP++), dstP++);
++ return 0;
++}
++
++static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
++ unsigned long page)
++{
++ struct page *d = ttm_tt_get_page(ttm, page);
++ void *dst;
++
++ if (!d)
++ return -ENOMEM;
++
++ src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
++ dst = kmap(d);
++ if (!dst)
++ return -ENOMEM;
++
++ memcpy_fromio(dst, src, PAGE_SIZE);
++ kunmap(d);
++ return 0;
++}
++
++static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
++ unsigned long page)
++{
++ struct page *s = ttm_tt_get_page(ttm, page);
++ void *src;
++
++ if (!s)
++ return -ENOMEM;
++
++ dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
++ src = kmap(s);
++ if (!src)
++ return -ENOMEM;
++
++ memcpy_toio(dst, src, PAGE_SIZE);
++ kunmap(s);
++ return 0;
++}
++
++int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
++ bool evict, bool no_wait, struct ttm_mem_reg *new_mem)
++{
++ struct ttm_bo_device *bdev = bo->bdev;
++ struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
++ struct ttm_tt *ttm = bo->ttm;
++ struct ttm_mem_reg *old_mem = &bo->mem;
++ struct ttm_mem_reg old_copy = *old_mem;
++ void *old_iomap;
++ void *new_iomap;
++ int ret;
++ uint32_t save_flags = old_mem->flags;
++ uint32_t save_proposed_flags = old_mem->proposed_flags;
++ unsigned long i;
++ unsigned long page;
++ unsigned long add = 0;
++ int dir;
++
++ ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
++ if (ret)
++ return ret;
++ ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
++ if (ret)
++ goto out;
++
++ if (old_iomap == NULL && new_iomap == NULL)
++ goto out2;
++ if (old_iomap == NULL && ttm == NULL)
++ goto out2;
++
++ add = 0;
++ dir = 1;
++
++ if ((old_mem->mem_type == new_mem->mem_type) &&
++ (new_mem->mm_node->start <
++ old_mem->mm_node->start + old_mem->mm_node->size)) {
++ dir = -1;
++ add = new_mem->num_pages - 1;
++ }
++
++ for (i = 0; i < new_mem->num_pages; ++i) {
++ page = i * dir + add;
++ if (old_iomap == NULL)
++ ret = ttm_copy_ttm_io_page(ttm, new_iomap, page);
++ else if (new_iomap == NULL)
++ ret = ttm_copy_io_ttm_page(ttm, old_iomap, page);
++ else
++ ret = ttm_copy_io_page(new_iomap, old_iomap, page);
++ if (ret)
++ goto out1;
++ }
++ mb();
++ out2:
++ ttm_bo_free_old_node(bo);
++
++ *old_mem = *new_mem;
++ new_mem->mm_node = NULL;
++ old_mem->proposed_flags = save_proposed_flags;
++ ttm_flag_masked(&save_flags, new_mem->flags, TTM_PL_MASK_MEMTYPE);
++
++ if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
++ ttm_tt_unbind(ttm);
++ ttm_tt_destroy(ttm);
++ bo->ttm = NULL;
++ }
++
++ out1:
++ ttm_mem_reg_iounmap(bdev, new_mem, new_iomap);
++ out:
++ ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
++ return ret;
++}
++
++/**
++ * ttm_buffer_object_transfer
++ *
++ * @bo: A pointer to a struct ttm_buffer_object.
++ * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
++ * holding the data of @bo with the old placement.
++ *
++ * This is a utility function that may be called after an accelerated move
++ * has been scheduled. A new buffer object is created as a placeholder for
++ * the old data while it's being copied. When that buffer object is idle,
++ * it can be destroyed, releasing the space of the old placement.
++ * Returns:
++ * !0: Failure.
++ */
++
++static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
++ struct ttm_buffer_object **new_obj)
++{
++ struct ttm_buffer_object *fbo;
++ struct ttm_bo_device *bdev = bo->bdev;
++ struct ttm_bo_driver *driver = bdev->driver;
++
++ fbo = kzalloc(sizeof(*fbo), GFP_KERNEL);
++ if (!fbo)
++ return -ENOMEM;
++
++ *fbo = *bo;
++ mutex_init(&fbo->mutex);
++ mutex_lock(&fbo->mutex);
++
++ init_waitqueue_head(&fbo->event_queue);
++ INIT_LIST_HEAD(&fbo->ddestroy);
++ INIT_LIST_HEAD(&fbo->lru);
++
++ fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
++ if (fbo->mem.mm_node)
++ fbo->mem.mm_node->private = (void *)fbo;
++ kref_init(&fbo->list_kref);
++ kref_init(&fbo->kref);
++
++ mutex_unlock(&fbo->mutex);
++
++ *new_obj = fbo;
++ return 0;
++}
++
++pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
++{
++#if defined(__i386__) || defined(__x86_64__)
++ if (caching_flags & TTM_PL_FLAG_WC) {
++ tmp = pgprot_ttm_x86_wc(tmp);
++ } else if (boot_cpu_data.x86 > 3) {
++ tmp = pgprot_noncached(tmp);
++ }
++#elif defined(__powerpc__)
++ if (!(caching_flags & TTM_PL_FLAG_CACHED)) {
++ pgprot_val(tmp) |= _PAGE_NO_CACHE;
++ if (caching_flags & TTM_PL_FLAG_UNCACHED)
++ pgprot_val(tmp) |= _PAGE_GUARDED;
++ }
++#endif
++#if defined(__ia64__)
++ if (caching_flags & TTM_PL_FLAG_WC)
++ tmp = pgprot_writecombine(tmp);
++ else
++ tmp = pgprot_noncached(tmp);
++#endif
++#if defined(__sparc__)
++ if (!(caching_flags & TTM_PL_FLAG_CACHED))
++ tmp = pgprot_noncached(tmp);
++#endif
++ return tmp;
++}
++
++static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
++ unsigned long bus_base,
++ unsigned long bus_offset,
++ unsigned long bus_size,
++ struct ttm_bo_kmap_obj *map)
++{
++ struct ttm_bo_device * bdev = bo->bdev;
++ struct ttm_mem_reg * mem = &bo->mem;
++ struct ttm_mem_type_manager * man = &bdev->man[mem->mem_type];
++
++ if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP)) {
++ map->bo_kmap_type = ttm_bo_map_premapped;
++ map->virtual = (void *)(((u8 *) man->io_addr) + bus_offset);} else {
++ map->bo_kmap_type = ttm_bo_map_iomap;
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26))
++ if (mem->flags & TTM_PL_FLAG_WC)
++ map->virtual = ioremap_wc(bus_base + bus_offset, bus_size);
++ else
++ map->virtual = ioremap_nocache(bus_base + bus_offset, bus_size);
++#else
++ map->virtual = ioremap_nocache(bus_base + bus_offset, bus_size);
++#endif
++ }
++ return (!map->virtual) ? -ENOMEM : 0;
++}
++
++static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
++ unsigned long start_page,
++ unsigned long num_pages,
++ struct ttm_bo_kmap_obj *map)
++{
++ struct ttm_mem_reg * mem = &bo->mem; pgprot_t prot;
++ struct ttm_tt * ttm = bo->ttm;
++ struct page * d;
++ int i;
++ BUG_ON(!ttm);
++ if (num_pages == 1 && (mem->flags & TTM_PL_FLAG_CACHED)) {
++ /*
++ * We're mapping a single page, and the desired
++ * page protection is consistent with the bo.
++ */
++ map->bo_kmap_type = ttm_bo_map_kmap;
++ map->page = ttm_tt_get_page(ttm, start_page);
++ map->virtual = kmap(map->page);
++ } else {
++ /*
++ * Populate the part we're mapping;
++ */
++ for (i = start_page; i < start_page + num_pages; ++i) {
++ d = ttm_tt_get_page(ttm, i); if (!d)
++ return -ENOMEM;
++ }
++
++ /*
++ * We need to use vmap to get the desired page protection
++ * or to make the buffer object look contigous.
++ */
++ prot = (mem->flags & TTM_PL_FLAG_CACHED) ?
++ PAGE_KERNEL :
++ ttm_io_prot(mem->flags, PAGE_KERNEL);
++ map->bo_kmap_type = ttm_bo_map_vmap;
++ map->virtual = vmap(ttm->pages + start_page, num_pages, 0, prot);
++ }
++ return (!map->virtual) ? -ENOMEM : 0;
++}
++
++int ttm_bo_kmap(struct ttm_buffer_object *bo,
++ unsigned long start_page, unsigned long num_pages,
++ struct ttm_bo_kmap_obj *map)
++{
++ int ret;
++ unsigned long bus_base;
++ unsigned long bus_offset;
++ unsigned long bus_size;
++ BUG_ON(!list_empty(&bo->swap));
++ map->virtual = NULL;
++ if (num_pages > bo->num_pages)
++ return -EINVAL;
++ if (start_page > bo->num_pages)
++ return -EINVAL;
++#if 0
++ if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
++ return -EPERM;
++#endif
++ ret = ttm_bo_pci_offset(bo->bdev, &bo->mem, &bus_base,
++ &bus_offset, &bus_size);
++ if (ret)
++ return ret;
++ if (bus_size == 0) {
++ return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
++ } else {
++ bus_offset += start_page << PAGE_SHIFT;
++ bus_size = num_pages << PAGE_SHIFT;
++ return ttm_bo_ioremap(bo, bus_base, bus_offset, bus_size, map);
++ }
++}
++
++void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
++{
++ if (!map->virtual)
++ return;
++ switch (map->bo_kmap_type) {
++ case ttm_bo_map_iomap:
++ iounmap(map->virtual);
++ break;
++ case ttm_bo_map_vmap:
++ vunmap(map->virtual);
++ break;
++ case ttm_bo_map_kmap:
++ kunmap(map->page);
++ break;
++ case ttm_bo_map_premapped:
++ break;
++ default:
++ BUG();
++ }
++ map->virtual = NULL;
++ map->page = NULL;
++}
++
++int ttm_bo_pfn_prot(struct ttm_buffer_object *bo,
++ unsigned long dst_offset,
++ unsigned long *pfn, pgprot_t * prot)
++{
++ struct ttm_mem_reg * mem = &bo->mem;
++ struct ttm_bo_device * bdev = bo->bdev;
++ unsigned long bus_offset;
++ unsigned long bus_size;
++ unsigned long bus_base;
++ int ret;
++ ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset,
++ &bus_size);
++ if (ret)
++ return -EINVAL;
++ if (bus_size != 0)
++ * pfn = (bus_base + bus_offset + dst_offset) >> PAGE_SHIFT;
++ else
++ if (!bo->ttm)
++ return -EINVAL;
++ else
++ *pfn =
++ page_to_pfn(ttm_tt_get_page(bo->ttm, dst_offset >> PAGE_SHIFT));
++ *prot =
++ (mem->flags & TTM_PL_FLAG_CACHED) ? PAGE_KERNEL : ttm_io_prot(mem->
++ flags,
++ PAGE_KERNEL);
++ return 0;
++}
++
++int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
++ void *sync_obj,
++ void *sync_obj_arg,
++ bool evict, bool no_wait,
++ struct ttm_mem_reg *new_mem)
++{
++ struct ttm_bo_device * bdev = bo->bdev;
++ struct ttm_bo_driver * driver = bdev->driver;
++ struct ttm_mem_type_manager * man = &bdev->man[new_mem->mem_type];
++ struct ttm_mem_reg * old_mem = &bo->mem;
++ int ret;
++ uint32_t save_flags = old_mem->flags;
++ uint32_t save_proposed_flags = old_mem->proposed_flags;
++ struct ttm_buffer_object * old_obj;
++ if (bo->sync_obj)
++ driver->sync_obj_unref(&bo->sync_obj);
++ bo->sync_obj = driver->sync_obj_ref(sync_obj);
++ bo->sync_obj_arg = sync_obj_arg;
++ if (evict) {
++ ret = ttm_bo_wait(bo, false, false, false);
++ if (ret)
++ return ret;
++ ttm_bo_free_old_node(bo);
++ if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (bo->ttm != NULL)) {
++ ttm_tt_unbind(bo->ttm); ttm_tt_destroy(bo->ttm); bo->ttm = NULL;
++ }
++ } else {
++
++ /* This should help pipeline ordinary buffer moves.
++ *
++ * Hang old buffer memory on a new buffer object,
++ * and leave it to be released when the GPU
++ * operation has completed.
++ */
++ ret = ttm_buffer_object_transfer(bo, &old_obj);
++ if (ret)
++ return ret;
++ if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
++ old_obj->ttm = NULL;
++ else
++ bo->ttm = NULL;
++ bo->priv_flags |= TTM_BO_PRIV_FLAG_MOVING;
++ ttm_bo_unreserve(old_obj);
++ }
++
++ *old_mem = *new_mem;
++ new_mem->mm_node = NULL;
++ old_mem->proposed_flags = save_proposed_flags;
++ ttm_flag_masked(&save_flags, new_mem->flags, TTM_PL_MASK_MEMTYPE);
++ return 0;
++}
+diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_bo_vm.c b/drivers/gpu/drm/psb/ttm/ttm_bo_vm.c
+--- a/drivers/gpu/drm/psb/ttm/ttm_bo_vm.c 1969-12-31 16:00:00.000000000 -0800
++++ b/drivers/gpu/drm/psb/ttm/ttm_bo_vm.c 2009-04-07 13:28:38.000000000 -0700
+@@ -0,0 +1,596 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ */
++
++
++#include "ttm/ttm_bo_driver.h"
++#include "ttm/ttm_placement_common.h"
++#include <linux/mm.h>
++#include <linux/version.h>
++#include <linux/rbtree.h>
++#include <asm/uaccess.h>
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25))
++#error "TTM doesn't build on kernel versions below 2.6.25."
++#endif
++
++#define TTM_BO_VM_NUM_PREFAULT 16
++
++static struct ttm_buffer_object *ttm_bo_vm_lookup_rb(struct ttm_bo_device *bdev,
++ unsigned long page_start,
++ unsigned long num_pages)
++{
++ struct rb_node *cur = bdev->addr_space_rb.rb_node;
++ unsigned long cur_offset;
++ struct ttm_buffer_object *bo;
++ struct ttm_buffer_object *best_bo = NULL;
++
++ while (likely(cur != NULL)) {
++ bo = rb_entry(cur, struct ttm_buffer_object, vm_rb);
++ cur_offset = bo->vm_node->start;
++ if (page_start >= cur_offset) {
++ cur = cur->rb_right;
++ best_bo = bo;
++ if (page_start == cur_offset)
++ break;
++ } else
++ cur = cur->rb_left;
++ }
++
++ if (unlikely(best_bo == NULL))
++ return NULL;
++
++ if (unlikely((best_bo->vm_node->start + best_bo->num_pages) <
++ (page_start + num_pages)))
++ return NULL;
++
++ return best_bo;
++}
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26))
++static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
++{
++ struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
++ vma->vm_private_data;
++ struct ttm_bo_device *bdev = bo->bdev;
++ unsigned long bus_base;
++ unsigned long bus_offset;
++ unsigned long bus_size;
++ unsigned long page_offset;
++ unsigned long page_last;
++ unsigned long pfn;
++ struct ttm_tt *ttm = NULL;
++ struct page *page;
++ int ret;
++ int i;
++ bool is_iomem;
++ unsigned long address = (unsigned long)vmf->virtual_address;
++ int retval = VM_FAULT_NOPAGE;
++
++ ret = ttm_bo_reserve(bo, true, false, false, 0);
++ if (unlikely(ret != 0))
++ return VM_FAULT_NOPAGE;
++
++ mutex_lock(&bo->mutex);
++
++ /*
++ * Wait for buffer data in transit, due to a pipelined
++ * move.
++ */
++
++ if (bo->priv_flags & TTM_BO_PRIV_FLAG_MOVING) {
++ ret = ttm_bo_wait(bo, false, true, false);
++ if (unlikely(ret != 0)) {
++ retval = (ret != -ERESTART) ?
++ VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
++ goto out_unlock;
++ }
++ }
++
++ ret = ttm_bo_pci_offset(bdev, &bo->mem, &bus_base, &bus_offset,
++ &bus_size);
++ if (unlikely(ret != 0)) {
++ retval = VM_FAULT_SIGBUS;
++ goto out_unlock;
++ }
++
++ is_iomem = (bus_size != 0);
++
++ page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
++ bo->vm_node->start - vma->vm_pgoff;
++ page_last = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) +
++ bo->vm_node->start - vma->vm_pgoff;
++
++ if (unlikely(page_offset >= bo->num_pages)) {
++ retval = VM_FAULT_SIGBUS;
++ goto out_unlock;
++ }
++
++ /*
++ * Strictly, we're not allowed to modify vma->vm_page_prot here,
++ * since the mmap_sem is only held in read mode. However, we
++ * modify only the caching bits of vma->vm_page_prot and
++ * consider those bits protected by
++ * the bo->mutex, as we should be the only writers.
++ * There shouldn't really be any readers of these bits except
++ * within vm_insert_mixed()? fork?
++ *
++ * TODO: Add a list of vmas to the bo, and change the
++ * vma->vm_page_prot when the object changes caching policy, with
++ * the correct locks held.
++ */
++
++ if (is_iomem) {
++ vma->vm_page_prot = ttm_io_prot(bo->mem.flags,
++ vma->vm_page_prot);
++ } else {
++ ttm = bo->ttm;
++ vma->vm_page_prot = (bo->mem.flags & TTM_PL_FLAG_CACHED) ?
++ vm_get_page_prot(vma->vm_flags) :
++ ttm_io_prot(bo->mem.flags, vma->vm_page_prot);
++ }
++
++ /*
++ * Speculatively prefault a number of pages. Only error on
++ * first page.
++ */
++
++ for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
++
++ if (is_iomem)
++ pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) +
++ page_offset;
++ else {
++ page = ttm_tt_get_page(ttm, page_offset);
++ if (unlikely(!page && i == 0)) {
++ retval = VM_FAULT_OOM;
++ goto out_unlock;
++ } else if (unlikely(!page)) {
++ break;
++ }
++ pfn = page_to_pfn(page);
++ }
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29))
++ ret = vm_insert_mixed(vma, address, pfn);
++#else
++ ret = vm_insert_pfn(vma, address, pfn);
++#endif
++ /*
++ * Somebody beat us to this PTE or prefaulting to
++ * an already populated PTE, or prefaulting error.
++ */
++
++ if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
++ break;
++ else if (unlikely(ret != 0)) {
++ retval =
++ (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
++ goto out_unlock;
++
++ }
++
++ address += PAGE_SIZE;
++ if (unlikely(++page_offset >= page_last))
++ break;
++ }
++
++ out_unlock:
++ mutex_unlock(&bo->mutex);
++ ttm_bo_unreserve(bo);
++ return retval;
++}
++
++#else
++
++static unsigned long ttm_bo_vm_nopfn(struct vm_area_struct *vma,
++ unsigned long address)
++{
++ struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
++ vma->vm_private_data;
++ struct ttm_bo_device *bdev = bo->bdev;
++ unsigned long bus_base;
++ unsigned long bus_offset;
++ unsigned long bus_size;
++ unsigned long page_offset;
++ unsigned long page_last;
++ unsigned long pfn;
++ struct ttm_tt *ttm = NULL;
++ struct page *page;
++ int ret;
++ int i;
++ bool is_iomem;
++ unsigned long retval = NOPFN_REFAULT;
++
++ ret = ttm_bo_reserve(bo, true, false, false, 0);
++ if (unlikely(ret != 0))
++ return NOPFN_REFAULT;
++
++ mutex_lock(&bo->mutex);
++
++ /*
++ * Wait for buffer data in transit, due to a pipelined
++ * move.
++ */
++
++ if (bo->priv_flags & TTM_BO_PRIV_FLAG_MOVING) {
++ ret = ttm_bo_wait(bo, false, true, false);
++ if (unlikely(ret != 0)) {
++ retval = (ret != -ERESTART) ?
++ NOPFN_SIGBUS : NOPFN_REFAULT;
++ goto out_unlock;
++ }
++ }
++
++ ret = ttm_bo_pci_offset(bdev, &bo->mem, &bus_base, &bus_offset,
++ &bus_size);
++ if (unlikely(ret != 0)) {
++ printk(KERN_ERR "Attempted buffer object access "
++ "of unmappable object.\n");
++ retval = NOPFN_SIGBUS;
++ goto out_unlock;
++ }
++
++ is_iomem = (bus_size != 0);
++
++ page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
++ bo->vm_node->start - vma->vm_pgoff;
++
++ page_last = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) +
++ bo->vm_node->start - vma->vm_pgoff;
++
++ if (unlikely(page_offset >= bo->num_pages)) {
++ printk(KERN_ERR "Attempted buffer object access "
++ "outside object.\n");
++ retval = NOPFN_SIGBUS;
++ goto out_unlock;
++ }
++
++ /*
++ * Strictly, we're not allowed to modify vma->vm_page_prot here,
++ * since the mmap_sem is only held in read mode. However, we
++ * modify only the caching bits of vma->vm_page_prot and
++ * consider those bits protected by
++ * the bo->mutex, as we should be the only writers.
++ * There shouldn't really be any readers of these bits except
++ * within vm_insert_mixed()? fork?
++ *
++ * TODO: Add a list of vmas to the bo, and change the
++ * vma->vm_page_prot when the object changes caching policy, with
++ * the correct locks held.
++ */
++
++ if (is_iomem) {
++ vma->vm_page_prot = ttm_io_prot(bo->mem.flags,
++ vma->vm_page_prot);
++ } else {
++ ttm = bo->ttm;
++ vma->vm_page_prot = (bo->mem.flags & TTM_PL_FLAG_CACHED) ?
++ vm_get_page_prot(vma->vm_flags) :
++ ttm_io_prot(bo->mem.flags, vma->vm_page_prot);
++ }
++
++ /*
++ * Speculatively prefault a number of pages. Only error on
++ * first page.
++ */
++
++ for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
++
++ if (is_iomem)
++ pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) +
++ page_offset;
++ else {
++ page = ttm_tt_get_page(ttm, page_offset);
++ if (unlikely(!page && i == 0)) {
++ retval = NOPFN_OOM;
++ goto out_unlock;
++ } else if (unlikely(!page)) {
++ break;
++ }
++ pfn = page_to_pfn(page);
++ }
++
++ ret = vm_insert_pfn(vma, address, pfn);
++ if (unlikely(ret == -EBUSY || (ret != 0 && i != 0)))
++ break;
++
++ /*
++ * Somebody beat us to this PTE or prefaulting to
++ * an already populated PTE, or prefaulting error.
++ */
++
++ if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
++ break;
++ else if (unlikely(ret != 0)) {
++ retval =
++ (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
++ goto out_unlock;
++ }
++
++ address += PAGE_SIZE;
++ if (unlikely(++page_offset >= page_last))
++ break;
++ }
++
++ out_unlock:
++ mutex_unlock(&bo->mutex);
++ ttm_bo_unreserve(bo);
++ return retval;
++}
++#endif
++
++static void ttm_bo_vm_open(struct vm_area_struct *vma)
++{
++ struct ttm_buffer_object *bo =
++ (struct ttm_buffer_object *)vma->vm_private_data;
++
++ (void)ttm_bo_reference(bo);
++}
++
++static void ttm_bo_vm_close(struct vm_area_struct *vma)
++{
++ struct ttm_buffer_object *bo =
++ (struct ttm_buffer_object *)vma->vm_private_data;
++
++ ttm_bo_unref(&bo);
++ vma->vm_private_data = NULL;
++}
++
++static struct vm_operations_struct ttm_bo_vm_ops = {
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26))
++ .fault = ttm_bo_vm_fault,
++#else
++ .nopfn = ttm_bo_vm_nopfn,
++#endif
++ .open = ttm_bo_vm_open,
++ .close = ttm_bo_vm_close
++};
++
++int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
++ struct ttm_bo_device *bdev)
++{
++ struct ttm_bo_driver *driver;
++ struct ttm_buffer_object *bo;
++ int ret;
++
++ read_lock(&bdev->vm_lock);
++ bo = ttm_bo_vm_lookup_rb(bdev, vma->vm_pgoff,
++ (vma->vm_end - vma->vm_start) >> PAGE_SHIFT);
++ if (likely(bo != NULL))
++ ttm_bo_reference(bo);
++ read_unlock(&bdev->vm_lock);
++
++ if (unlikely(bo == NULL)) {
++ printk(KERN_ERR "Could not find buffer object to map.\n");
++ ret = -EINVAL;
++ goto out_unref;
++ }
++
++ driver = bo->bdev->driver;
++ if (unlikely(!driver->verify_access)) {
++ ret = -EPERM;
++ goto out_unref;
++ }
++ ret = driver->verify_access(bo, filp);
++ if (unlikely(ret != 0))
++ goto out_unref;
++
++ vma->vm_ops = &ttm_bo_vm_ops;
++
++ /*
++ * Note: We're transferring the bo reference to
++ * vma->vm_private_data here.
++ */
++
++ vma->vm_private_data = bo;
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29))
++ vma->vm_flags |= VM_RESERVED | VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
++#else
++ vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND;
++#endif
++ return 0;
++ out_unref:
++ ttm_bo_unref(&bo);
++ return ret;
++}
++
++int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
++{
++ if (vma->vm_pgoff != 0)
++ return -EACCES;
++
++ vma->vm_ops = &ttm_bo_vm_ops;
++ vma->vm_private_data = ttm_bo_reference(bo);
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29))
++ vma->vm_flags |= VM_RESERVED | VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
++#else
++ vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND;
++#endif
++ return 0;
++}
++
++ssize_t ttm_bo_io(struct ttm_bo_device * bdev, struct file * filp,
++ const char __user * wbuf, char __user * rbuf, size_t count,
++ loff_t * f_pos, bool write)
++{
++ struct ttm_buffer_object *bo;
++ struct ttm_bo_driver *driver;
++ struct ttm_bo_kmap_obj map;
++ unsigned long dev_offset = (*f_pos >> PAGE_SHIFT);
++ unsigned long kmap_offset;
++ unsigned long kmap_end;
++ unsigned long kmap_num;
++ size_t io_size;
++ unsigned int page_offset;
++ char *virtual;
++ int ret;
++ bool no_wait = false;
++ bool dummy;
++
++ driver = bo->bdev->driver;
++ read_lock(&bdev->vm_lock);
++ bo = ttm_bo_vm_lookup_rb(bdev, dev_offset, 1);
++ if (likely(bo != NULL))
++ ttm_bo_reference(bo);
++ read_unlock(&bdev->vm_lock);
++
++ if (unlikely(bo == NULL))
++ return -EFAULT;
++
++ if (unlikely(driver->verify_access))
++ return -EPERM;
++
++ ret = driver->verify_access(bo, filp);
++ if (unlikely(ret != 0))
++ goto out_unref;
++
++ kmap_offset = dev_offset - bo->vm_node->start;
++ if (unlikely(kmap_offset) >= bo->num_pages) {
++ ret = -EFBIG;
++ goto out_unref;
++ }
++
++ page_offset = *f_pos & ~PAGE_MASK;
++ io_size = bo->num_pages - kmap_offset;
++ io_size = (io_size << PAGE_SHIFT) - page_offset;
++ if (count < io_size)
++ io_size = count;
++
++ kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
++ kmap_num = kmap_end - kmap_offset + 1;
++
++ ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
++
++ switch (ret) {
++ case 0:
++ break;
++ case -ERESTART:
++ ret = -EINTR;
++ goto out_unref;
++ case -EBUSY:
++ ret = -EAGAIN;
++ goto out_unref;
++ default:
++ goto out_unref;
++ }
++
++ ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
++ if (unlikely(ret != 0))
++ goto out_unref;
++
++ virtual = ttm_kmap_obj_virtual(&map, &dummy);
++ virtual += page_offset;
++
++ if (write)
++ ret = copy_from_user(virtual, wbuf, io_size);
++ else
++ ret = copy_to_user(rbuf, virtual, io_size);
++
++ ttm_bo_kunmap(&map);
++ ttm_bo_unreserve(bo);
++ ttm_bo_unref(&bo);
++
++ if (unlikely(ret != 0))
++ return -EFBIG;
++
++ *f_pos += io_size;
++
++ return io_size;
++ out_unref:
++ ttm_bo_unref(&bo);
++ return ret;
++}
++
++ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object * bo, const char __user * wbuf,
++ char __user * rbuf, size_t count, loff_t * f_pos,
++ bool write)
++{
++ struct ttm_bo_kmap_obj map;
++ unsigned long kmap_offset;
++ unsigned long kmap_end;
++ unsigned long kmap_num;
++ size_t io_size;
++ unsigned int page_offset;
++ char *virtual;
++ int ret;
++ bool no_wait = false;
++ bool dummy;
++
++ kmap_offset = (*f_pos >> PAGE_SHIFT);
++ if (unlikely(kmap_offset) >= bo->num_pages)
++ return -EFBIG;
++
++ page_offset = *f_pos & ~PAGE_MASK;
++ io_size = bo->num_pages - kmap_offset;
++ io_size = (io_size << PAGE_SHIFT) - page_offset;
++ if (count < io_size)
++ io_size = count;
++
++ kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
++ kmap_num = kmap_end - kmap_offset + 1;
++
++ ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
++
++ switch (ret) {
++ case 0:
++ break;
++ case -ERESTART:
++ return -EINTR;
++ case -EBUSY:
++ return -EAGAIN;
++ default:
++ return ret;
++ }
++
++ ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
++ if (unlikely(ret != 0))
++ return ret;
++
++ virtual = ttm_kmap_obj_virtual(&map, &dummy);
++ virtual += page_offset;
++
++ if (write)
++ ret = copy_from_user(virtual, wbuf, io_size);
++ else
++ ret = copy_to_user(rbuf, virtual, io_size);
++
++ ttm_bo_kunmap(&map);
++ ttm_bo_unreserve(bo);
++ ttm_bo_unref(&bo);
++
++ if (unlikely(ret != 0))
++ return ret;
++
++ *f_pos += io_size;
++
++ return io_size;
++}
+diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/psb/ttm/ttm_execbuf_util.c
+--- a/drivers/gpu/drm/psb/ttm/ttm_execbuf_util.c 1969-12-31 16:00:00.000000000 -0800
++++ b/drivers/gpu/drm/psb/ttm/ttm_execbuf_util.c 2009-04-07 13:28:38.000000000 -0700
+@@ -0,0 +1,115 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ **************************************************************************/
++
++#include "ttm/ttm_execbuf_util.h"
++#include "ttm/ttm_bo_driver.h"
++#include "ttm/ttm_placement_common.h"
++#include <linux/wait.h>
++#include <linux/sched.h>
++
++void ttm_eu_backoff_reservation(struct list_head *list)
++{
++ struct ttm_validate_buffer *entry;
++
++ list_for_each_entry(entry, list, head) {
++ struct ttm_buffer_object *bo = entry->bo;
++ if (!entry->reserved)
++ continue;
++
++ entry->reserved = false;
++ ttm_bo_unreserve(bo);
++ }
++}
++
++/*
++ * Reserve buffers for validation.
++ *
++ * If a buffer in the list is marked for CPU access, we back off and
++ * wait for that buffer to become free for GPU access.
++ *
++ * If a buffer is reserved for another validation, the validator with
++ * the highest validation sequence backs off and waits for that buffer
++ * to become unreserved. This prevents deadlocks when validating multiple
++ * buffers in different orders.
++ */
++
++int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq)
++{
++ struct ttm_validate_buffer *entry;
++ int ret;
++
++ retry:
++ list_for_each_entry(entry, list, head) {
++ struct ttm_buffer_object *bo = entry->bo;
++
++ entry->reserved = false;
++ ret = ttm_bo_reserve(bo, true, false, true, val_seq);
++ if (ret != 0) {
++ ttm_eu_backoff_reservation(list);
++ if (ret == -EAGAIN) {
++ ret = ttm_bo_wait_unreserved(bo, true);
++ if (unlikely(ret != 0))
++ return ret;
++ goto retry;
++ } else
++ return ret;
++ }
++
++ entry->reserved = true;
++ if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
++ ttm_eu_backoff_reservation(list);
++ ret = ttm_bo_wait_cpu(bo, false);
++ if (ret)
++ return ret;
++ goto retry;
++ }
++ }
++ return 0;
++}
++
++void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj)
++{
++ struct ttm_validate_buffer *entry;
++
++ list_for_each_entry(entry, list, head) {
++ struct ttm_buffer_object *bo = entry->bo;
++ struct ttm_bo_driver *driver = bo->bdev->driver;
++ void *old_sync_obj;
++
++ mutex_lock(&bo->mutex);
++ old_sync_obj = bo->sync_obj;
++ bo->sync_obj = driver->sync_obj_ref(sync_obj);
++ bo->sync_obj_arg = entry->new_sync_obj_arg;
++ mutex_unlock(&bo->mutex);
++ ttm_bo_unreserve(bo);
++ entry->reserved = false;
++ if (old_sync_obj)
++ driver->sync_obj_unref(&old_sync_obj);
++ }
++}
+diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_execbuf_util.h b/drivers/gpu/drm/psb/ttm/ttm_execbuf_util.h
+--- a/drivers/gpu/drm/psb/ttm/ttm_execbuf_util.h 1969-12-31 16:00:00.000000000 -0800
++++ b/drivers/gpu/drm/psb/ttm/ttm_execbuf_util.h 2009-04-07 13:28:38.000000000 -0700
+@@ -0,0 +1,110 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ */
++
++#ifndef _TTM_EXECBUF_UTIL_H_
++#define _TTM_EXECBUF_UTIL_H_
++
++#include "ttm/ttm_bo_api.h"
++#include "ttm/ttm_fence_api.h"
++#include <linux/list.h>
++
++/**
++ * struct ttm_validate_buffer
++ *
++ * @head: list head for thread-private list.
++ * @bo: refcounted buffer object pointer.
++ * @new_sync_obj_arg: New sync_obj_arg for @bo, to be used once
++ * adding a new sync object.
++ * @reservied: Indicates whether @bo has been reserved for validation.
++ */
++
++struct ttm_validate_buffer {
++ struct list_head head;
++ struct ttm_buffer_object *bo;
++ void *new_sync_obj_arg;
++ bool reserved;
++};
++
++/**
++ * function ttm_eu_backoff_reservation
++ *
++ * @list: thread private list of ttm_validate_buffer structs.
++ *
++ * Undoes all buffer validation reservations for bos pointed to by
++ * the list entries.
++ */
++
++extern void ttm_eu_backoff_reservation(struct list_head *list);
++
++/**
++ * function ttm_eu_reserve_buffers
++ *
++ * @list: thread private list of ttm_validate_buffer structs.
++ * @val_seq: A unique sequence number.
++ *
++ * Tries to reserve bos pointed to by the list entries for validation.
++ * If the function returns 0, all buffers are marked as "unfenced",
++ * taken off the lru lists and are not synced for write CPU usage.
++ *
++ * If the function detects a deadlock due to multiple threads trying to
++ * reserve the same buffers in reverse order, all threads except one will
++ * back off and retry. This function may sleep while waiting for
++ * CPU write reservations to be cleared, and for other threads to
++ * unreserve their buffers.
++ *
++ * This function may return -ERESTART or -EAGAIN if the calling process
++ * receives a signal while waiting. In that case, no buffers on the list
++ * will be reserved upon return.
++ *
++ * Buffers reserved by this function should be unreserved by
++ * a call to either ttm_eu_backoff_reservation() or
++ * ttm_eu_fence_buffer_objects() when command submission is complete or
++ * has failed.
++ */
++
++extern int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq);
++
++/**
++ * function ttm_eu_fence_buffer_objects.
++ *
++ * @list: thread private list of ttm_validate_buffer structs.
++ * @sync_obj: The new sync object for the buffers.
++ *
++ * This function should be called when command submission is complete, and
++ * it will add a new sync object to bos pointed to by entries on @list.
++ * It also unreserves all buffers, putting them on lru lists.
++ *
++ */
++
++extern void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj);
++
++#endif
+diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_fence_api.h b/drivers/gpu/drm/psb/ttm/ttm_fence_api.h
+--- a/drivers/gpu/drm/psb/ttm/ttm_fence_api.h 1969-12-31 16:00:00.000000000 -0800
++++ b/drivers/gpu/drm/psb/ttm/ttm_fence_api.h 2009-04-07 13:28:38.000000000 -0700
+@@ -0,0 +1,277 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ */
++#ifndef _TTM_FENCE_API_H_
++#define _TTM_FENCE_API_H_
++
++#include <linux/list.h>
++#include <linux/kref.h>
++
++#define TTM_FENCE_FLAG_EMIT (1 << 0)
++#define TTM_FENCE_TYPE_EXE (1 << 0)
++
++struct ttm_fence_device;
++
++/**
++ * struct ttm_fence_info
++ *
++ * @fence_class: The fence class.
++ * @fence_type: Bitfield indicating types for this fence.
++ * @signaled_types: Bitfield indicating which types are signaled.
++ * @error: Last error reported from the device.
++ *
++ * Used as output from the ttm_fence_get_info
++ */
++
++struct ttm_fence_info {
++ uint32_t signaled_types;
++ uint32_t error;
++};
++
++/**
++ * struct ttm_fence_object
++ *
++ * @fdev: Pointer to the fence device struct.
++ * @kref: Holds the reference count of this fence object.
++ * @ring: List head used for the circular list of not-completely
++ * signaled fences.
++ * @info: Data for fast retrieval using the ttm_fence_get_info()
++ * function.
++ * @timeout_jiffies: Absolute jiffies value indicating when this fence
++ * object times out and, if waited on, calls ttm_fence_lockup
++ * to check for and resolve a GPU lockup.
++ * @sequence: Fence sequence number.
++ * @waiting_types: Types currently waited on.
++ * @destroy: Called to free the fence object, when its refcount has
++ * reached zero. If NULL, kfree is used.
++ *
++ * This struct is provided in the driver interface so that drivers can
++ * derive from it and create their own fence implementation. All members
++ * are private to the fence implementation and the fence driver callbacks.
++ * Otherwise a driver may access the derived object using container_of().
++ */
++
++struct ttm_fence_object {
++ struct ttm_fence_device *fdev;
++ struct kref kref;
++ uint32_t fence_class;
++ uint32_t fence_type;
++
++ /*
++ * The below fields are protected by the fence class
++ * manager spinlock.
++ */
++
++ struct list_head ring;
++ struct ttm_fence_info info;
++ unsigned long timeout_jiffies;
++ uint32_t sequence;
++ uint32_t waiting_types;
++ void (*destroy) (struct ttm_fence_object *);
++};
++
++/**
++ * ttm_fence_object_init
++ *
++ * @fdev: Pointer to a struct ttm_fence_device.
++ * @fence_class: Fence class for this fence.
++ * @type: Fence type for this fence.
++ * @create_flags: Flags indicating varios actions at init time. At this point
++ * there's only TTM_FENCE_FLAG_EMIT, which triggers a sequence emission to
++ * the command stream.
++ * @destroy: Destroy function. If NULL, kfree() is used.
++ * @fence: The struct ttm_fence_object to initialize.
++ *
++ * Initialize a pre-allocated fence object. This function, together with the
++ * destroy function makes it possible to derive driver-specific fence objects.
++ */
++
++extern int
++ttm_fence_object_init(struct ttm_fence_device *fdev,
++ uint32_t fence_class,
++ uint32_t type,
++ uint32_t create_flags,
++ void (*destroy) (struct ttm_fence_object * fence),
++ struct ttm_fence_object *fence);
++
++/**
++ * ttm_fence_object_create
++ *
++ * @fdev: Pointer to a struct ttm_fence_device.
++ * @fence_class: Fence class for this fence.
++ * @type: Fence type for this fence.
++ * @create_flags: Flags indicating varios actions at init time. At this point
++ * there's only TTM_FENCE_FLAG_EMIT, which triggers a sequence emission to
++ * the command stream.
++ * @c_fence: On successful termination, *(@c_fence) will point to the created
++ * fence object.
++ *
++ * Create and initialize a struct ttm_fence_object. The destroy function will
++ * be set to kfree().
++ */
++
++extern int
++ttm_fence_object_create(struct ttm_fence_device *fdev,
++ uint32_t fence_class,
++ uint32_t type,
++ uint32_t create_flags,
++ struct ttm_fence_object **c_fence);
++
++/**
++ * ttm_fence_object_wait
++ *
++ * @fence: The fence object to wait on.
++ * @lazy: Allow sleeps to reduce the cpu-usage if polling.
++ * @interruptible: Sleep interruptible when waiting.
++ * @type_mask: Wait for the given type_mask to signal.
++ *
++ * Wait for a fence to signal the given type_mask. The function will
++ * perform a fence_flush using type_mask. (See ttm_fence_object_flush).
++ *
++ * Returns
++ * -ERESTART if interrupted by a signal.
++ * May return driver-specific error codes if timed-out.
++ */
++
++extern int
++ttm_fence_object_wait(struct ttm_fence_object *fence,
++ bool lazy, bool interruptible, uint32_t type_mask);
++
++/**
++ * ttm_fence_object_flush
++ *
++ * @fence: The fence object to flush.
++ * @flush_mask: Fence types to flush.
++ *
++ * Make sure that the given fence eventually signals the
++ * types indicated by @flush_mask. Note that this may or may not
++ * map to a CPU or GPU flush.
++ */
++
++extern int
++ttm_fence_object_flush(struct ttm_fence_object *fence, uint32_t flush_mask);
++
++/**
++ * ttm_fence_get_info
++ *
++ * @fence: The fence object.
++ *
++ * Copy the info block from the fence while holding relevant locks.
++ */
++
++struct ttm_fence_info ttm_fence_get_info(struct ttm_fence_object *fence);
++
++/**
++ * ttm_fence_object_ref
++ *
++ * @fence: The fence object.
++ *
++ * Return a ref-counted pointer to the fence object indicated by @fence.
++ */
++
++static inline struct ttm_fence_object *ttm_fence_object_ref(struct
++ ttm_fence_object
++ *fence)
++{
++ kref_get(&fence->kref);
++ return fence;
++}
++
++/**
++ * ttm_fence_object_unref
++ *
++ * @p_fence: Pointer to a ref-counted pinter to a struct ttm_fence_object.
++ *
++ * Unreference the fence object pointed to by *(@p_fence), clearing
++ * *(p_fence).
++ */
++
++extern void ttm_fence_object_unref(struct ttm_fence_object **p_fence);
++
++/**
++ * ttm_fence_object_signaled
++ *
++ * @fence: Pointer to the struct ttm_fence_object.
++ * @mask: Type mask to check whether signaled.
++ *
++ * This function checks (without waiting) whether the fence object
++ * pointed to by @fence has signaled the types indicated by @mask,
++ * and returns 1 if true, 0 if false. This function does NOT perform
++ * an implicit fence flush.
++ */
++
++extern bool
++ttm_fence_object_signaled(struct ttm_fence_object *fence, uint32_t mask);
++
++/**
++ * ttm_fence_class
++ *
++ * @fence: Pointer to the struct ttm_fence_object.
++ *
++ * Convenience function that returns the fence class of a struct ttm_fence_object.
++ */
++
++static inline uint32_t ttm_fence_class(const struct ttm_fence_object *fence)
++{
++ return fence->fence_class;
++}
++
++/**
++ * ttm_fence_types
++ *
++ * @fence: Pointer to the struct ttm_fence_object.
++ *
++ * Convenience function that returns the fence types of a struct ttm_fence_object.
++ */
++
++static inline uint32_t ttm_fence_types(const struct ttm_fence_object *fence)
++{
++ return fence->fence_type;
++}
++
++/*
++ * The functions below are wrappers to the above functions, with
++ * similar names but with sync_obj omitted. These wrappers are intended
++ * to be plugged directly into the buffer object driver's sync object
++ * API, if the driver chooses to use ttm_fence_objects as buffer object
++ * sync objects. In the prototypes below, a sync_obj is cast to a
++ * struct ttm_fence_object, whereas a sync_arg is cast to an uint32_t representing
++ * a fence_type argument.
++ */
++
++extern bool ttm_fence_sync_obj_signaled(void *sync_obj, void *sync_arg);
++extern int ttm_fence_sync_obj_wait(void *sync_obj, void *sync_arg,
++ bool lazy, bool interruptible);
++extern int ttm_fence_sync_obj_flush(void *sync_obj, void *sync_arg);
++extern void ttm_fence_sync_obj_unref(void **sync_obj);
++extern void *ttm_fence_sync_obj_ref(void *sync_obj);
++
++#endif
+diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_fence.c b/drivers/gpu/drm/psb/ttm/ttm_fence.c
+--- a/drivers/gpu/drm/psb/ttm/ttm_fence.c 1969-12-31 16:00:00.000000000 -0800
++++ b/drivers/gpu/drm/psb/ttm/ttm_fence.c 2009-04-07 13:28:38.000000000 -0700
+@@ -0,0 +1,607 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ */
++
++#include "ttm/ttm_fence_api.h"
++#include "ttm/ttm_fence_driver.h"
++#include <linux/wait.h>
++#include <linux/sched.h>
++
++#include <drm/drmP.h>
++
++/*
++ * Simple implementation for now.
++ */
++
++static void ttm_fence_lockup(struct ttm_fence_object *fence, uint32_t mask)
++{
++ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
++
++ printk(KERN_ERR "GPU lockup dectected on engine %u "
++ "fence type 0x%08x\n",
++ (unsigned int)fence->fence_class, (unsigned int)mask);
++ /*
++ * Give engines some time to idle?
++ */
++
++ write_lock(&fc->lock);
++ ttm_fence_handler(fence->fdev, fence->fence_class,
++ fence->sequence, mask, -EBUSY);
++ write_unlock(&fc->lock);
++}
++
++/*
++ * Convenience function to be called by fence::wait methods that
++ * need polling.
++ */
++
++int ttm_fence_wait_polling(struct ttm_fence_object *fence, bool lazy,
++ bool interruptible, uint32_t mask)
++{
++ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
++ const struct ttm_fence_driver *driver = ttm_fence_driver(fence);
++ uint32_t count = 0;
++ int ret;
++ unsigned long end_jiffies = fence->timeout_jiffies;
++
++ DECLARE_WAITQUEUE(entry, current);
++ add_wait_queue(&fc->fence_queue, &entry);
++
++ ret = 0;
++
++ for (;;) {
++ __set_current_state((interruptible) ?
++ TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
++ if (ttm_fence_object_signaled(fence, mask))
++ break;
++ if (time_after_eq(jiffies, end_jiffies)) {
++ if (driver->lockup)
++ driver->lockup(fence, mask);
++ else
++ ttm_fence_lockup(fence, mask);
++ continue;
++ }
++ if (lazy)
++ schedule_timeout(1);
++ else if ((++count & 0x0F) == 0) {
++ __set_current_state(TASK_RUNNING);
++ schedule();
++ __set_current_state((interruptible) ?
++ TASK_INTERRUPTIBLE :
++ TASK_UNINTERRUPTIBLE);
++ }
++ if (interruptible && signal_pending(current)) {
++ ret = -ERESTART;
++ break;
++ }
++ }
++ __set_current_state(TASK_RUNNING);
++ remove_wait_queue(&fc->fence_queue, &entry);
++ return ret;
++}
++
++/*
++ * Typically called by the IRQ handler.
++ */
++
++void ttm_fence_handler(struct ttm_fence_device *fdev, uint32_t fence_class,
++ uint32_t sequence, uint32_t type, uint32_t error)
++{
++ int wake = 0;
++ uint32_t diff;
++ uint32_t relevant_type;
++ uint32_t new_type;
++ struct ttm_fence_class_manager *fc = &fdev->fence_class[fence_class];
++ const struct ttm_fence_driver *driver = ttm_fence_driver_from_dev(fdev);
++ struct list_head *head;
++ struct ttm_fence_object *fence, *next;
++ bool found = false;
++
++ if (list_empty(&fc->ring))
++ return;
++
++ list_for_each_entry(fence, &fc->ring, ring) {
++ diff = (sequence - fence->sequence) & fc->sequence_mask;
++ if (diff > fc->wrap_diff) {
++ found = true;
++ break;
++ }
++ }
++
++ fc->waiting_types &= ~type;
++ head = (found) ? &fence->ring : &fc->ring;
++
++ list_for_each_entry_safe_reverse(fence, next, head, ring) {
++ if (&fence->ring == &fc->ring)
++ break;
++
++ DRM_DEBUG("Fence 0x%08lx, sequence 0x%08x, type 0x%08x\n",
++ (unsigned long)fence, fence->sequence,
++ fence->fence_type);
++
++ if (error) {
++ fence->info.error = error;
++ fence->info.signaled_types = fence->fence_type;
++ list_del_init(&fence->ring);
++ wake = 1;
++ break;
++ }
++
++ relevant_type = type & fence->fence_type;
++ new_type = (fence->info.signaled_types | relevant_type) ^
++ fence->info.signaled_types;
++
++ if (new_type) {
++ fence->info.signaled_types |= new_type;
++ DRM_DEBUG("Fence 0x%08lx signaled 0x%08x\n",
++ (unsigned long)fence,
++ fence->info.signaled_types);
++
++ if (unlikely(driver->signaled))
++ driver->signaled(fence);
++
++ if (driver->needed_flush)
++ fc->pending_flush |=
++ driver->needed_flush(fence);
++
++ if (new_type & fence->waiting_types)
++ wake = 1;
++ }
++
++ fc->waiting_types |=
++ fence->waiting_types & ~fence->info.signaled_types;
++
++ if (!(fence->fence_type & ~fence->info.signaled_types)) {
++ DRM_DEBUG("Fence completely signaled 0x%08lx\n",
++ (unsigned long)fence);
++ list_del_init(&fence->ring);
++ }
++ }
++
++ /*
++ * Reinstate lost waiting types.
++ */
++
++ if ((fc->waiting_types & type) != type) {
++ head = head->prev;
++ list_for_each_entry(fence, head, ring) {
++ if (&fence->ring == &fc->ring)
++ break;
++ diff =
++ (fc->highest_waiting_sequence -
++ fence->sequence) & fc->sequence_mask;
++ if (diff > fc->wrap_diff)
++ break;
++
++ fc->waiting_types |=
++ fence->waiting_types & ~fence->info.signaled_types;
++ }
++ }
++
++ if (wake)
++ wake_up_all(&fc->fence_queue);
++}
++
++static void ttm_fence_unring(struct ttm_fence_object *fence)
++{
++ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
++ unsigned long irq_flags;
++
++ write_lock_irqsave(&fc->lock, irq_flags);
++ list_del_init(&fence->ring);
++ write_unlock_irqrestore(&fc->lock, irq_flags);
++}
++
++bool ttm_fence_object_signaled(struct ttm_fence_object *fence, uint32_t mask)
++{
++ unsigned long flags;
++ bool signaled;
++ const struct ttm_fence_driver *driver = ttm_fence_driver(fence);
++ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
++
++ mask &= fence->fence_type;
++ read_lock_irqsave(&fc->lock, flags);
++ signaled = (mask & fence->info.signaled_types) == mask;
++ read_unlock_irqrestore(&fc->lock, flags);
++ if (!signaled && driver->poll) {
++ write_lock_irqsave(&fc->lock, flags);
++ driver->poll(fence->fdev, fence->fence_class, mask);
++ signaled = (mask & fence->info.signaled_types) == mask;
++ write_unlock_irqrestore(&fc->lock, flags);
++ }
++ return signaled;
++}
++
++int ttm_fence_object_flush(struct ttm_fence_object *fence, uint32_t type)
++{
++ const struct ttm_fence_driver *driver = ttm_fence_driver(fence);
++ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
++ unsigned long irq_flags;
++ uint32_t saved_pending_flush;
++ uint32_t diff;
++ bool call_flush;
++
++ if (type & ~fence->fence_type) {
++ DRM_ERROR("Flush trying to extend fence type, "
++ "0x%x, 0x%x\n", type, fence->fence_type);
++ return -EINVAL;
++ }
++
++ write_lock_irqsave(&fc->lock, irq_flags);
++ fence->waiting_types |= type;
++ fc->waiting_types |= fence->waiting_types;
++ diff = (fence->sequence - fc->highest_waiting_sequence) &
++ fc->sequence_mask;
++
++ if (diff < fc->wrap_diff)
++ fc->highest_waiting_sequence = fence->sequence;
++
++ /*
++ * fence->waiting_types has changed. Determine whether
++ * we need to initiate some kind of flush as a result of this.
++ */
++
++ saved_pending_flush = fc->pending_flush;
++ if (driver->needed_flush)
++ fc->pending_flush |= driver->needed_flush(fence);
++
++ if (driver->poll)
++ driver->poll(fence->fdev, fence->fence_class,
++ fence->waiting_types);
++
++ call_flush = (fc->pending_flush != 0);
++ write_unlock_irqrestore(&fc->lock, irq_flags);
++
++ if (call_flush && driver->flush)
++ driver->flush(fence->fdev, fence->fence_class);
++
++ return 0;
++}
++
++/*
++ * Make sure old fence objects are signaled before their fence sequences are
++ * wrapped around and reused.
++ */
++
++void ttm_fence_flush_old(struct ttm_fence_device *fdev,
++ uint32_t fence_class, uint32_t sequence)
++{
++ struct ttm_fence_class_manager *fc = &fdev->fence_class[fence_class];
++ struct ttm_fence_object *fence;
++ unsigned long irq_flags;
++ const struct ttm_fence_driver *driver = fdev->driver;
++ bool call_flush;
++
++ uint32_t diff;
++
++ write_lock_irqsave(&fc->lock, irq_flags);
++
++ list_for_each_entry_reverse(fence, &fc->ring, ring) {
++ diff = (sequence - fence->sequence) & fc->sequence_mask;
++ if (diff <= fc->flush_diff)
++ break;
++
++ fence->waiting_types = fence->fence_type;
++ fc->waiting_types |= fence->fence_type;
++
++ if (driver->needed_flush)
++ fc->pending_flush |= driver->needed_flush(fence);
++ }
++
++ if (driver->poll)
++ driver->poll(fdev, fence_class, fc->waiting_types);
++
++ call_flush = (fc->pending_flush != 0);
++ write_unlock_irqrestore(&fc->lock, irq_flags);
++
++ if (call_flush && driver->flush)
++ driver->flush(fdev, fence->fence_class);
++
++ /*
++ * FIXME: Shold we implement a wait here for really old fences?
++ */
++
++}
++
++int ttm_fence_object_wait(struct ttm_fence_object *fence,
++ bool lazy, bool interruptible, uint32_t mask)
++{
++ const struct ttm_fence_driver *driver = ttm_fence_driver(fence);
++ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
++ int ret = 0;
++ unsigned long timeout;
++ unsigned long cur_jiffies;
++ unsigned long to_jiffies;
++
++ if (mask & ~fence->fence_type) {
++ DRM_ERROR("Wait trying to extend fence type"
++ " 0x%08x 0x%08x\n", mask, fence->fence_type);
++ BUG();
++ return -EINVAL;
++ }
++
++ if (driver->wait)
++ return driver->wait(fence, lazy, interruptible, mask);
++
++ ttm_fence_object_flush(fence, mask);
++ retry:
++ if (!driver->has_irq ||
++ driver->has_irq(fence->fdev, fence->fence_class, mask)) {
++
++ cur_jiffies = jiffies;
++ to_jiffies = fence->timeout_jiffies;
++
++ timeout = (time_after(to_jiffies, cur_jiffies)) ?
++ to_jiffies - cur_jiffies : 1;
++
++ if (interruptible)
++ ret = wait_event_interruptible_timeout
++ (fc->fence_queue,
++ ttm_fence_object_signaled(fence, mask), timeout);
++ else
++ ret = wait_event_timeout
++ (fc->fence_queue,
++ ttm_fence_object_signaled(fence, mask), timeout);
++
++ if (unlikely(ret == -ERESTARTSYS))
++ return -ERESTART;
++
++ if (unlikely(ret == 0)) {
++ if (driver->lockup)
++ driver->lockup(fence, mask);
++ else
++ ttm_fence_lockup(fence, mask);
++ goto retry;
++ }
++
++ return 0;
++ }
++
++ return ttm_fence_wait_polling(fence, lazy, interruptible, mask);
++}
++
++int ttm_fence_object_emit(struct ttm_fence_object *fence, uint32_t fence_flags,
++ uint32_t fence_class, uint32_t type)
++{
++ const struct ttm_fence_driver *driver = ttm_fence_driver(fence);
++ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
++ unsigned long flags;
++ uint32_t sequence;
++ unsigned long timeout;
++ int ret;
++
++ ttm_fence_unring(fence);
++ ret = driver->emit(fence->fdev,
++ fence_class, fence_flags, &sequence, &timeout);
++ if (ret)
++ return ret;
++
++ write_lock_irqsave(&fc->lock, flags);
++ fence->fence_class = fence_class;
++ fence->fence_type = type;
++ fence->waiting_types = 0;
++ fence->info.signaled_types = 0;
++ fence->info.error = 0;
++ fence->sequence = sequence;
++ fence->timeout_jiffies = timeout;
++ if (list_empty(&fc->ring))
++ fc->highest_waiting_sequence = sequence - 1;
++ list_add_tail(&fence->ring, &fc->ring);
++ fc->latest_queued_sequence = sequence;
++ write_unlock_irqrestore(&fc->lock, flags);
++ return 0;
++}
++
++int ttm_fence_object_init(struct ttm_fence_device *fdev,
++ uint32_t fence_class,
++ uint32_t type,
++ uint32_t create_flags,
++ void (*destroy) (struct ttm_fence_object *),
++ struct ttm_fence_object *fence)
++{
++ int ret = 0;
++
++ kref_init(&fence->kref);
++ fence->fence_class = fence_class;
++ fence->fence_type = type;
++ fence->info.signaled_types = 0;
++ fence->waiting_types = 0;
++ fence->sequence = 0;
++ fence->info.error = 0;
++ fence->fdev = fdev;
++ fence->destroy = destroy;
++ INIT_LIST_HEAD(&fence->ring);
++ atomic_inc(&fdev->count);
++
++ if (create_flags & TTM_FENCE_FLAG_EMIT) {
++ ret = ttm_fence_object_emit(fence, create_flags,
++ fence->fence_class, type);
++ }
++
++ return ret;
++}
++
++int ttm_fence_object_create(struct ttm_fence_device *fdev,
++ uint32_t fence_class,
++ uint32_t type,
++ uint32_t create_flags,
++ struct ttm_fence_object **c_fence)
++{
++ struct ttm_fence_object *fence;
++ int ret;
++
++ ret = ttm_mem_global_alloc(fdev->mem_glob, sizeof(*fence), false, false, false);
++ if (unlikely(ret != 0)) {
++ printk(KERN_ERR "Out of memory creating fence object\n");
++ return ret;
++ }
++
++ fence = kmalloc(sizeof(*fence), GFP_KERNEL);
++ if (!fence) {
++ printk(KERN_ERR "Out of memory creating fence object\n");
++ ttm_mem_global_free(fdev->mem_glob, sizeof(*fence), false);
++ return -ENOMEM;
++ }
++
++ ret = ttm_fence_object_init(fdev, fence_class, type,
++ create_flags, NULL, fence);
++ if (ret) {
++ ttm_fence_object_unref(&fence);
++ return ret;
++ }
++ *c_fence = fence;
++
++ return 0;
++}
++
++static void ttm_fence_object_destroy(struct kref *kref)
++{
++ struct ttm_fence_object *fence =
++ container_of(kref, struct ttm_fence_object, kref);
++ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
++ unsigned long irq_flags;
++
++ write_lock_irqsave(&fc->lock, irq_flags);
++ list_del_init(&fence->ring);
++ write_unlock_irqrestore(&fc->lock, irq_flags);
++
++ atomic_dec(&fence->fdev->count);
++ if (fence->destroy)
++ fence->destroy(fence);
++ else {
++ ttm_mem_global_free(fence->fdev->mem_glob, sizeof(*fence), false);
++ kfree(fence);
++ }
++}
++
++void ttm_fence_device_release(struct ttm_fence_device *fdev)
++{
++ kfree(fdev->fence_class);
++}
++
++int
++ttm_fence_device_init(int num_classes,
++ struct ttm_mem_global *mem_glob,
++ struct ttm_fence_device *fdev,
++ const struct ttm_fence_class_init *init,
++ bool replicate_init, const struct ttm_fence_driver *driver)
++{
++ struct ttm_fence_class_manager *fc;
++ const struct ttm_fence_class_init *fci;
++ int i;
++
++ fdev->mem_glob = mem_glob;
++ fdev->fence_class = kzalloc(num_classes *
++ sizeof(*fdev->fence_class), GFP_KERNEL);
++
++ if (unlikely(!fdev->fence_class))
++ return -ENOMEM;
++
++ fdev->num_classes = num_classes;
++ atomic_set(&fdev->count, 0);
++ fdev->driver = driver;
++
++ for (i = 0; i < fdev->num_classes; ++i) {
++ fc = &fdev->fence_class[i];
++ fci = &init[(replicate_init) ? 0 : i];
++
++ fc->wrap_diff = fci->wrap_diff;
++ fc->flush_diff = fci->flush_diff;
++ fc->sequence_mask = fci->sequence_mask;
++
++ rwlock_init(&fc->lock);
++ INIT_LIST_HEAD(&fc->ring);
++ init_waitqueue_head(&fc->fence_queue);
++ }
++
++ return 0;
++}
++
++struct ttm_fence_info ttm_fence_get_info(struct ttm_fence_object *fence)
++{
++ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
++ struct ttm_fence_info tmp;
++ unsigned long irq_flags;
++
++ read_lock_irqsave(&fc->lock, irq_flags);
++ tmp = fence->info;
++ read_unlock_irqrestore(&fc->lock, irq_flags);
++
++ return tmp;
++}
++
++void ttm_fence_object_unref(struct ttm_fence_object **p_fence)
++{
++ struct ttm_fence_object *fence = *p_fence;
++
++ *p_fence = NULL;
++ (void)kref_put(&fence->kref, &ttm_fence_object_destroy);
++}
++
++/*
++ * Placement / BO sync object glue.
++ */
++
++bool ttm_fence_sync_obj_signaled(void *sync_obj, void *sync_arg)
++{
++ struct ttm_fence_object *fence = (struct ttm_fence_object *)sync_obj;
++ uint32_t fence_types = (uint32_t) (unsigned long)sync_arg;
++
++ return ttm_fence_object_signaled(fence, fence_types);
++}
++
++int ttm_fence_sync_obj_wait(void *sync_obj, void *sync_arg,
++ bool lazy, bool interruptible)
++{
++ struct ttm_fence_object *fence = (struct ttm_fence_object *)sync_obj;
++ uint32_t fence_types = (uint32_t) (unsigned long)sync_arg;
++
++ return ttm_fence_object_wait(fence, lazy, interruptible, fence_types);
++}
++
++int ttm_fence_sync_obj_flush(void *sync_obj, void *sync_arg)
++{
++ struct ttm_fence_object *fence = (struct ttm_fence_object *)sync_obj;
++ uint32_t fence_types = (uint32_t) (unsigned long)sync_arg;
++
++ return ttm_fence_object_flush(fence, fence_types);
++}
++
++void ttm_fence_sync_obj_unref(void **sync_obj)
++{
++ ttm_fence_object_unref((struct ttm_fence_object **)sync_obj);
++}
++
++void *ttm_fence_sync_obj_ref(void *sync_obj)
++{
++ return (void *)
++ ttm_fence_object_ref((struct ttm_fence_object *)sync_obj);
++}
+diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_fence_driver.h b/drivers/gpu/drm/psb/ttm/ttm_fence_driver.h
+--- a/drivers/gpu/drm/psb/ttm/ttm_fence_driver.h 1969-12-31 16:00:00.000000000 -0800
++++ b/drivers/gpu/drm/psb/ttm/ttm_fence_driver.h 2009-04-07 13:28:38.000000000 -0700
+@@ -0,0 +1,309 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ */
++#ifndef _TTM_FENCE_DRIVER_H_
++#define _TTM_FENCE_DRIVER_H_
++
++#include <linux/kref.h>
++#include <linux/spinlock.h>
++#include <linux/wait.h>
++#include "ttm_fence_api.h"
++#include "ttm_memory.h"
++
++/** @file ttm_fence_driver.h
++ *
++ * Definitions needed for a driver implementing the
++ * ttm_fence subsystem.
++ */
++
++/**
++ * struct ttm_fence_class_manager:
++ *
++ * @wrap_diff: Sequence difference to catch 32-bit wrapping.
++ * if (seqa - seqb) > @wrap_diff, then seqa < seqb.
++ * @flush_diff: Sequence difference to trigger fence flush.
++ * if (cur_seq - seqa) > @flush_diff, then consider fence object with
++ * seqa as old an needing a flush.
++ * @sequence_mask: Mask of valid bits in a fence sequence.
++ * @lock: Lock protecting this struct as well as fence objects
++ * associated with this struct.
++ * @ring: Circular sequence-ordered list of fence objects.
++ * @pending_flush: Fence types currently needing a flush.
++ * @waiting_types: Fence types that are currently waited for.
++ * @fence_queue: Queue of waiters on fences belonging to this fence class.
++ * @highest_waiting_sequence: Sequence number of the fence with highest sequence
++ * number and that is waited for.
++ * @latest_queued_sequence: Sequence number of the fence latest queued on the ring.
++ */
++
++struct ttm_fence_class_manager {
++
++ /*
++ * Unprotected constant members.
++ */
++
++ uint32_t wrap_diff;
++ uint32_t flush_diff;
++ uint32_t sequence_mask;
++
++ /*
++ * The rwlock protects this structure as well as
++ * the data in all fence objects belonging to this
++ * class. This should be OK as most fence objects are
++ * only read from once they're created.
++ */
++
++ rwlock_t lock;
++ struct list_head ring;
++ uint32_t pending_flush;
++ uint32_t waiting_types;
++ wait_queue_head_t fence_queue;
++ uint32_t highest_waiting_sequence;
++ uint32_t latest_queued_sequence;
++};
++
++/**
++ * struct ttm_fence_device
++ *
++ * @fence_class: Array of fence class managers.
++ * @num_classes: Array dimension of @fence_class.
++ * @count: Current number of fence objects for statistics.
++ * @driver: Driver struct.
++ *
++ * Provided in the driver interface so that the driver can derive
++ * from this struct for its driver_private, and accordingly
++ * access the driver_private from the fence driver callbacks.
++ *
++ * All members except "count" are initialized at creation and
++ * never touched after that. No protection needed.
++ *
++ * This struct is private to the fence implementation and to the fence
++ * driver callbacks, and may otherwise be used by drivers only to
++ * obtain the derived device_private object using container_of().
++ */
++
++struct ttm_fence_device {
++ struct ttm_mem_global *mem_glob;
++ struct ttm_fence_class_manager *fence_class;
++ uint32_t num_classes;
++ atomic_t count;
++ const struct ttm_fence_driver *driver;
++};
++
++/**
++ * struct ttm_fence_class_init
++ *
++ * @wrap_diff: Fence sequence number wrap indicator. If
++ * (sequence1 - sequence2) > @wrap_diff, then sequence1 is
++ * considered to be older than sequence2.
++ * @flush_diff: Fence sequence number flush indicator.
++ * If a non-completely-signaled fence has a fence sequence number
++ * sequence1 and (sequence1 - current_emit_sequence) > @flush_diff,
++ * the fence is considered too old and it will be flushed upon the
++ * next call of ttm_fence_flush_old(), to make sure no fences with
++ * stale sequence numbers remains unsignaled. @flush_diff should
++ * be sufficiently less than @wrap_diff.
++ * @sequence_mask: Mask with valid bits of the fence sequence
++ * number set to 1.
++ *
++ * This struct is used as input to ttm_fence_device_init.
++ */
++
++struct ttm_fence_class_init {
++ uint32_t wrap_diff;
++ uint32_t flush_diff;
++ uint32_t sequence_mask;
++};
++
++/**
++ * struct ttm_fence_driver
++ *
++ * @has_irq: Called by a potential waiter. Should return 1 if a
++ * fence object with indicated parameters is expected to signal
++ * automatically, and 0 if the fence implementation needs to
++ * repeatedly call @poll to make it signal.
++ * @emit: Make sure a fence with the given parameters is
++ * present in the indicated command stream. Return its sequence number
++ * in "breadcrumb".
++ * @poll: Check and report sequences of the given "fence_class"
++ * that have signaled "types"
++ * @flush: Make sure that the types indicated by the bitfield
++ * ttm_fence_class_manager::pending_flush will eventually
++ * signal. These bits have been put together using the
++ * result from the needed_flush function described below.
++ * @needed_flush: Given the fence_class and fence_types indicated by
++ * "fence", and the last received fence sequence of this
++ * fence class, indicate what types need a fence flush to
++ * signal. Return as a bitfield.
++ * @wait: Set to non-NULL if the driver wants to override the fence
++ * wait implementation. Return 0 on success, -EBUSY on failure,
++ * and -ERESTART if interruptible and a signal is pending.
++ * @signaled: Driver callback that is called whenever a
++ * ttm_fence_object::signaled_types has changed status.
++ * This function is called from atomic context,
++ * with the ttm_fence_class_manager::lock held in write mode.
++ * @lockup: Driver callback that is called whenever a wait has exceeded
++ * the lifetime of a fence object.
++ * If there is a GPU lockup,
++ * this function should, if possible, reset the GPU,
++ * call the ttm_fence_handler with an error status, and
++ * return. If no lockup was detected, simply extend the
++ * fence timeout_jiffies and return. The driver might
++ * want to protect the lockup check with a mutex and cache a
++ * non-locked-up status for a while to avoid an excessive
++ * amount of lockup checks from every waiting thread.
++ */
++
++struct ttm_fence_driver {
++ bool (*has_irq) (struct ttm_fence_device * fdev,
++ uint32_t fence_class, uint32_t flags);
++ int (*emit) (struct ttm_fence_device * fdev,
++ uint32_t fence_class,
++ uint32_t flags,
++ uint32_t * breadcrumb, unsigned long *timeout_jiffies);
++ void (*flush) (struct ttm_fence_device * fdev, uint32_t fence_class);
++ void (*poll) (struct ttm_fence_device * fdev,
++ uint32_t fence_class, uint32_t types);
++ uint32_t(*needed_flush)
++ (struct ttm_fence_object * fence);
++ int (*wait) (struct ttm_fence_object * fence, bool lazy,
++ bool interruptible, uint32_t mask);
++ void (*signaled) (struct ttm_fence_object * fence);
++ void (*lockup) (struct ttm_fence_object * fence, uint32_t fence_types);
++};
++
++/**
++ * function ttm_fence_device_init
++ *
++ * @num_classes: Number of fence classes for this fence implementation.
++ * @mem_global: Pointer to the global memory accounting info.
++ * @fdev: Pointer to an uninitialised struct ttm_fence_device.
++ * @init: Array of initialization info for each fence class.
++ * @replicate_init: Use the first @init initialization info for all classes.
++ * @driver: Driver callbacks.
++ *
++ * Initialize a struct ttm_fence_driver structure. Returns -ENOMEM if
++ * out-of-memory. Otherwise returns 0.
++ */
++extern int
++ttm_fence_device_init(int num_classes,
++ struct ttm_mem_global *mem_glob,
++ struct ttm_fence_device *fdev,
++ const struct ttm_fence_class_init *init,
++ bool replicate_init,
++ const struct ttm_fence_driver *driver);
++
++/**
++ * function ttm_fence_device_release
++ *
++ * @fdev: Pointer to the fence device.
++ *
++ * Release all resources held by a fence device. Note that before
++ * this function is called, the caller must have made sure all fence
++ * objects belonging to this fence device are completely signaled.
++ */
++
++extern void ttm_fence_device_release(struct ttm_fence_device *fdev);
++
++/**
++ * ttm_fence_handler - the fence handler.
++ *
++ * @fdev: Pointer to the fence device.
++ * @fence_class: Fence class that signals.
++ * @sequence: Signaled sequence.
++ * @type: Types that signal.
++ * @error: Error from the engine.
++ *
++ * This function signals all fences with a sequence previous to the
++ * @sequence argument, and belonging to @fence_class. The signaled fence
++ * types are provided in @type. If error is non-zero, the error member
++ * of the fence with sequence = @sequence is set to @error. This value
++ * may be reported back to user-space, indicating, for example an illegal
++ * 3D command or illegal mpeg data.
++ *
++ * This function is typically called from the driver::poll method when the
++ * command sequence preceding the fence marker has executed. It should be
++ * called with the ttm_fence_class_manager::lock held in write mode and
++ * may be called from interrupt context.
++ */
++
++extern void
++ttm_fence_handler(struct ttm_fence_device *fdev,
++ uint32_t fence_class,
++ uint32_t sequence, uint32_t type, uint32_t error);
++
++/**
++ * ttm_fence_driver_from_dev
++ *
++ * @fdev: The ttm fence device.
++ *
++ * Returns a pointer to the fence driver struct.
++ */
++
++static inline const struct ttm_fence_driver *ttm_fence_driver_from_dev(struct
++ ttm_fence_device
++ *fdev)
++{
++ return fdev->driver;
++}
++
++/**
++ * ttm_fence_driver
++ *
++ * @fence: Pointer to a ttm fence object.
++ *
++ * Returns a pointer to the fence driver struct.
++ */
++
++static inline const struct ttm_fence_driver *ttm_fence_driver(struct
++ ttm_fence_object
++ *fence)
++{
++ return ttm_fence_driver_from_dev(fence->fdev);
++}
++
++/**
++ * ttm_fence_fc
++ *
++ * @fence: Pointer to a ttm fence object.
++ *
++ * Returns a pointer to the struct ttm_fence_class_manager for the
++ * fence class of @fence.
++ */
++
++static inline struct ttm_fence_class_manager *ttm_fence_fc(struct
++ ttm_fence_object
++ *fence)
++{
++ return &fence->fdev->fence_class[fence->fence_class];
++}
++
++#endif
+diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_fence_user.c b/drivers/gpu/drm/psb/ttm/ttm_fence_user.c
+--- a/drivers/gpu/drm/psb/ttm/ttm_fence_user.c 1969-12-31 16:00:00.000000000 -0800
++++ b/drivers/gpu/drm/psb/ttm/ttm_fence_user.c 2009-04-07 13:28:38.000000000 -0700
+@@ -0,0 +1,242 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ */
++
++#include <drm/drmP.h>
++#include "ttm/ttm_fence_user.h"
++#include "ttm/ttm_object.h"
++#include "ttm/ttm_fence_driver.h"
++#include "ttm/ttm_userobj_api.h"
++
++/**
++ * struct ttm_fence_user_object
++ *
++ * @base: The base object used for user-space visibility and refcounting.
++ *
++ * @fence: The fence object itself.
++ *
++ */
++
++struct ttm_fence_user_object {
++ struct ttm_base_object base;
++ struct ttm_fence_object fence;
++};
++
++static struct ttm_fence_user_object *ttm_fence_user_object_lookup(struct
++ ttm_object_file
++ *tfile,
++ uint32_t
++ handle)
++{
++ struct ttm_base_object *base;
++
++ base = ttm_base_object_lookup(tfile, handle);
++ if (unlikely(base == NULL)) {
++ printk(KERN_ERR "Invalid fence handle 0x%08lx\n",
++ (unsigned long)handle);
++ return NULL;
++ }
++
++ if (unlikely(base->object_type != ttm_fence_type)) {
++ ttm_base_object_unref(&base);
++ printk(KERN_ERR "Invalid fence handle 0x%08lx\n",
++ (unsigned long)handle);
++ return NULL;
++ }
++
++ return container_of(base, struct ttm_fence_user_object, base);
++}
++
++/*
++ * The fence object destructor.
++ */
++
++static void ttm_fence_user_destroy(struct ttm_fence_object *fence)
++{
++ struct ttm_fence_user_object *ufence =
++ container_of(fence, struct ttm_fence_user_object, fence);
++
++ ttm_mem_global_free(fence->fdev->mem_glob, sizeof(*ufence), false);
++ kfree(ufence);
++}
++
++/*
++ * The base object destructor. We basically unly unreference the
++ * attached fence object.
++ */
++
++static void ttm_fence_user_release(struct ttm_base_object **p_base)
++{
++ struct ttm_fence_user_object *ufence;
++ struct ttm_base_object *base = *p_base;
++ struct ttm_fence_object *fence;
++
++ *p_base = NULL;
++
++ if (unlikely(base == NULL))
++ return;
++
++ ufence = container_of(base, struct ttm_fence_user_object, base);
++ fence = &ufence->fence;
++ ttm_fence_object_unref(&fence);
++}
++
++int
++ttm_fence_user_create(struct ttm_fence_device *fdev,
++ struct ttm_object_file *tfile,
++ uint32_t fence_class,
++ uint32_t fence_types,
++ uint32_t create_flags,
++ struct ttm_fence_object **fence, uint32_t * user_handle)
++{
++ int ret;
++ struct ttm_fence_object *tmp;
++ struct ttm_fence_user_object *ufence;
++
++ ret = ttm_mem_global_alloc(fdev->mem_glob, sizeof(*ufence), false, false, false);
++ if (unlikely(ret != 0))
++ return -ENOMEM;
++
++ ufence = kmalloc(sizeof(*ufence), GFP_KERNEL);
++ if (unlikely(ufence == NULL)) {
++ ttm_mem_global_free(fdev->mem_glob, sizeof(*ufence), false);
++ return -ENOMEM;
++ }
++
++ ret = ttm_fence_object_init(fdev,
++ fence_class,
++ fence_types, create_flags,
++ &ttm_fence_user_destroy, &ufence->fence);
++
++ if (unlikely(ret != 0))
++ goto out_err0;
++
++ /*
++ * One fence ref is held by the fence ptr we return.
++ * The other one by the base object. Need to up the
++ * fence refcount before we publish this object to
++ * user-space.
++ */
++
++ tmp = ttm_fence_object_ref(&ufence->fence);
++ ret = ttm_base_object_init(tfile, &ufence->base,
++ false, ttm_fence_type,
++ &ttm_fence_user_release, NULL);
++
++ if (unlikely(ret != 0))
++ goto out_err1;
++
++ *fence = &ufence->fence;
++ *user_handle = ufence->base.hash.key;
++
++ return 0;
++ out_err1:
++ ttm_fence_object_unref(&tmp);
++ tmp = &ufence->fence;
++ ttm_fence_object_unref(&tmp);
++ return ret;
++ out_err0:
++ ttm_mem_global_free(fdev->mem_glob, sizeof(*ufence), false);
++ kfree(ufence);
++ return ret;
++}
++
++int ttm_fence_signaled_ioctl(struct ttm_object_file *tfile, void *data)
++{
++ int ret;
++ union ttm_fence_signaled_arg *arg = data;
++ struct ttm_fence_object *fence;
++ struct ttm_fence_info info;
++ struct ttm_fence_user_object *ufence;
++ struct ttm_base_object *base;
++ ret = 0;
++
++ ufence = ttm_fence_user_object_lookup(tfile, arg->req.handle);
++ if (unlikely(ufence == NULL))
++ return -EINVAL;
++
++ fence = &ufence->fence;
++
++ if (arg->req.flush) {
++ ret = ttm_fence_object_flush(fence, arg->req.fence_type);
++ if (unlikely(ret != 0))
++ goto out;
++ }
++
++ info = ttm_fence_get_info(fence);
++ arg->rep.signaled_types = info.signaled_types;
++ arg->rep.fence_error = info.error;
++
++ out:
++ base = &ufence->base;
++ ttm_base_object_unref(&base);
++ return ret;
++}
++
++int ttm_fence_finish_ioctl(struct ttm_object_file *tfile, void *data)
++{
++ int ret;
++ union ttm_fence_finish_arg *arg = data;
++ struct ttm_fence_user_object *ufence;
++ struct ttm_base_object *base;
++ struct ttm_fence_object *fence;
++ ret = 0;
++
++ ufence = ttm_fence_user_object_lookup(tfile, arg->req.handle);
++ if (unlikely(ufence == NULL))
++ return -EINVAL;
++
++ fence = &ufence->fence;
++
++ ret = ttm_fence_object_wait(fence,
++ arg->req.mode & TTM_FENCE_FINISH_MODE_LAZY,
++ true, arg->req.fence_type);
++ if (likely(ret == 0)) {
++ struct ttm_fence_info info = ttm_fence_get_info(fence);
++
++ arg->rep.signaled_types = info.signaled_types;
++ arg->rep.fence_error = info.error;
++ }
++
++ base = &ufence->base;
++ ttm_base_object_unref(&base);
++
++ return ret;
++}
++
++int ttm_fence_unref_ioctl(struct ttm_object_file *tfile, void *data)
++{
++ struct ttm_fence_unref_arg *arg = data;
++ int ret = 0;
++
++ ret = ttm_ref_object_base_unref(tfile, arg->handle, ttm_fence_type);
++ return ret;
++}
+diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_fence_user.h b/drivers/gpu/drm/psb/ttm/ttm_fence_user.h
+--- a/drivers/gpu/drm/psb/ttm/ttm_fence_user.h 1969-12-31 16:00:00.000000000 -0800
++++ b/drivers/gpu/drm/psb/ttm/ttm_fence_user.h 2009-04-07 13:28:38.000000000 -0700
+@@ -0,0 +1,147 @@
++/**************************************************************************
++ *
++ * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ **************************************************************************/
++/*
++ * Authors
++ * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
++ */
++
++#ifndef TTM_FENCE_USER_H
++#define TTM_FENCE_USER_H
++
++#if !defined(__KERNEL__) && !defined(_KERNEL)
++#include <stdint.h>
++#endif
++
++#define TTM_FENCE_MAJOR 0
++#define TTM_FENCE_MINOR 1
++#define TTM_FENCE_PL 0
++#define TTM_FENCE_DATE "080819"
++
++/**
++ * struct ttm_fence_signaled_req
++ *
++ * @handle: Handle to the fence object. Input.
++ *
++ * @fence_type: Fence types we want to flush. Input.
++ *
++ * @flush: Boolean. Flush the indicated fence_types. Input.
++ *
++ * Argument to the TTM_FENCE_SIGNALED ioctl.
++ */
++
++struct ttm_fence_signaled_req {
++ uint32_t handle;
++ uint32_t fence_type;
++ int32_t flush;
++ uint32_t pad64;
++};
++
++/**
++ * struct ttm_fence_rep
++ *
++ * @signaled_types: Fence type that has signaled.
++ *
++ * @fence_error: Command execution error.
++ * Hardware errors that are consequences of the execution
++ * of the command stream preceding the fence are reported
++ * here.
++ *
++ * Output argument to the TTM_FENCE_SIGNALED and
++ * TTM_FENCE_FINISH ioctls.
++ */
++
++struct ttm_fence_rep {
++ uint32_t signaled_types;
++ uint32_t fence_error;
++};
++
++union ttm_fence_signaled_arg {
++ struct ttm_fence_signaled_req req;
++ struct ttm_fence_rep rep;
++};
++
++/*
++ * Waiting mode flags for the TTM_FENCE_FINISH ioctl.
++ *
++ * TTM_FENCE_FINISH_MODE_LAZY: Allow for sleeps during polling
++ * wait.
++ *
++ * TTM_FENCE_FINISH_MODE_NO_BLOCK: Don't block waiting for GPU,
++ * but return -EBUSY if the buffer is busy.
++ */
++
++#define TTM_FENCE_FINISH_MODE_LAZY (1 << 0)
++#define TTM_FENCE_FINISH_MODE_NO_BLOCK (1 << 1)
++
++/**
++ * struct ttm_fence_finish_req
++ *
++ * @handle: Handle to the fence object. Input.
++ *
++ * @fence_type: Fence types we want to finish.
++ *
++ * @mode: Wait mode.
++ *
++ * Input to the TTM_FENCE_FINISH ioctl.
++ */
++
++struct ttm_fence_finish_req {
++ uint32_t handle;
++ uint32_t fence_type;
++ uint32_t mode;
++ uint32_t pad64;
++};
++
++union ttm_fence_finish_arg {
++ struct ttm_fence_finish_req req;
++ struct ttm_fence_rep rep;
++};
++
++/**
++ * struct ttm_fence_unref_arg
++ *
++ * @handle: Handle to the fence object.
++ *
++ * Argument to the TTM_FENCE_UNREF ioctl.
++ */
++
++struct ttm_fence_unref_arg {
++ uint32_t handle;
++ uint32_t pad64;
++};
++
++/*
++ * Ioctl offsets frome extenstion start.
++ */
++
++#define TTM_FENCE_SIGNALED 0x01
++#define TTM_FENCE_FINISH 0x02
++#define TTM_FENCE_UNREF 0x03
++
++#endif
+diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_lock.c b/drivers/gpu/drm/psb/ttm/ttm_lock.c
+--- a/drivers/gpu/drm/psb/ttm/ttm_lock.c 1969-12-31 16:00:00.000000000 -0800
++++ b/drivers/gpu/drm/psb/ttm/ttm_lock.c 2009-04-07 13:28:38.000000000 -0700
+@@ -0,0 +1,162 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
++ */
++
++#include "ttm/ttm_lock.h"
++#include <asm/atomic.h>
++#include <linux/errno.h>
++#include <linux/wait.h>
++#include <linux/sched.h>
++
++void ttm_lock_init(struct ttm_lock *lock)
++{
++ init_waitqueue_head(&lock->queue);
++ atomic_set(&lock->write_lock_pending, 0);
++ atomic_set(&lock->readers, 0);
++ lock->kill_takers = false;
++ lock->signal = SIGKILL;
++}
++
++void ttm_read_unlock(struct ttm_lock *lock)
++{
++ if (atomic_dec_and_test(&lock->readers))
++ wake_up_all(&lock->queue);
++}
++
++int ttm_read_lock(struct ttm_lock *lock, bool interruptible)
++{
++ while (unlikely(atomic_read(&lock->write_lock_pending) != 0)) {
++ int ret;
++
++ if (!interruptible) {
++ wait_event(lock->queue,
++ atomic_read(&lock->write_lock_pending) == 0);
++ continue;
++ }
++ ret = wait_event_interruptible
++ (lock->queue, atomic_read(&lock->write_lock_pending) == 0);
++ if (ret)
++ return -ERESTART;
++ }
++
++ while (unlikely(!atomic_add_unless(&lock->readers, 1, -1))) {
++ int ret;
++ if (!interruptible) {
++ wait_event(lock->queue,
++ atomic_read(&lock->readers) != -1);
++ continue;
++ }
++ ret = wait_event_interruptible
++ (lock->queue, atomic_read(&lock->readers) != -1);
++ if (ret)
++ return -ERESTART;
++ }
++
++ if (unlikely(lock->kill_takers)) {
++ send_sig(lock->signal, current, 0);
++ ttm_read_unlock(lock);
++ return -ERESTART;
++ }
++
++ return 0;
++}
++
++static int __ttm_write_unlock(struct ttm_lock *lock)
++{
++ if (unlikely(atomic_cmpxchg(&lock->readers, -1, 0) != -1))
++ return -EINVAL;
++ wake_up_all(&lock->queue);
++ return 0;
++}
++
++static void ttm_write_lock_remove(struct ttm_base_object **p_base)
++{
++ struct ttm_base_object *base = *p_base;
++ struct ttm_lock *lock = container_of(base, struct ttm_lock, base);
++ int ret;
++
++ *p_base = NULL;
++ ret = __ttm_write_unlock(lock);
++ BUG_ON(ret != 0);
++}
++
++int ttm_write_lock(struct ttm_lock *lock,
++ bool interruptible,
++ struct ttm_object_file *tfile)
++{
++ int ret = 0;
++
++ atomic_inc(&lock->write_lock_pending);
++
++ while (unlikely(atomic_cmpxchg(&lock->readers, 0, -1) != 0)) {
++ if (!interruptible) {
++ wait_event(lock->queue,
++ atomic_read(&lock->readers) == 0);
++ continue;
++ }
++ ret = wait_event_interruptible
++ (lock->queue, atomic_read(&lock->readers) == 0);
++
++ if (ret) {
++ if (atomic_dec_and_test(&lock->write_lock_pending))
++ wake_up_all(&lock->queue);
++ return -ERESTART;
++ }
++ }
++
++ if (atomic_dec_and_test(&lock->write_lock_pending))
++ wake_up_all(&lock->queue);
++
++ if (unlikely(lock->kill_takers)) {
++ send_sig(lock->signal, current, 0);
++ __ttm_write_unlock(lock);
++ return -ERESTART;
++ }
++
++ /*
++ * Add a base-object, the destructor of which will
++ * make sure the lock is released if the client dies
++ * while holding it.
++ */
++
++ ret = ttm_base_object_init(tfile, &lock->base, false,
++ ttm_lock_type, &ttm_write_lock_remove, NULL);
++ if (ret)
++ (void)__ttm_write_unlock(lock);
++
++ return ret;
++}
++
++int ttm_write_unlock(struct ttm_lock *lock, struct ttm_object_file *tfile)
++{
++ return ttm_ref_object_base_unref(tfile,
++ lock->base.hash.key, TTM_REF_USAGE);
++}
+diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_lock.h b/drivers/gpu/drm/psb/ttm/ttm_lock.h
+--- a/drivers/gpu/drm/psb/ttm/ttm_lock.h 1969-12-31 16:00:00.000000000 -0800
++++ b/drivers/gpu/drm/psb/ttm/ttm_lock.h 2009-04-07 13:28:38.000000000 -0700
+@@ -0,0 +1,181 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ */
++
++/** @file ttm_lock.h
++ * This file implements a simple replacement for the buffer manager use
++ * of the DRM heavyweight hardware lock.
++ * The lock is a read-write lock. Taking it in read mode is fast, and
++ * intended for in-kernel use only.
++ * Taking it in write mode is slow.
++ *
++ * The write mode is used only when there is a need to block all
++ * user-space processes from validating buffers.
++ * It's allowed to leave kernel space with the write lock held.
++ * If a user-space process dies while having the write-lock,
++ * it will be released during the file descriptor release.
++ *
++ * The read lock is typically placed at the start of an IOCTL- or
++ * user-space callable function that may end up allocating a memory area.
++ * This includes setstatus, super-ioctls and faults; the latter may move
++ * unmappable regions to mappable. It's a bug to leave kernel space with the
++ * read lock held.
++ *
++ * Both read- and write lock taking is interruptible for low signal-delivery
++ * latency. The locking functions will return -ERESTART if interrupted by a
++ * signal.
++ *
++ * Locking order: The lock should be taken BEFORE any TTM mutexes
++ * or spinlocks.
++ *
++ * Typical usages:
++ * a) VT-switching, when we want to clean VRAM and perhaps AGP. The lock
++ * stops it from being repopulated.
++ * b) out-of-VRAM or out-of-aperture space, in which case the process
++ * receiving the out-of-space notification may take the lock in write mode
++ * and evict all buffers prior to start validating its own buffers.
++ */
++
++#ifndef _TTM_LOCK_H_
++#define _TTM_LOCK_H_
++
++#include "ttm_object.h"
++#include <linux/wait.h>
++#include <asm/atomic.h>
++
++/**
++ * struct ttm_lock
++ *
++ * @base: ttm base object used solely to release the lock if the client
++ * holding the lock dies.
++ * @queue: Queue for processes waiting for lock change-of-status.
++ * @write_lock_pending: Flag indicating that a write-lock is pending. Avoids
++ * write lock starvation.
++ * @readers: The lock status: A negative number indicates that a write lock is
++ * held. Positive values indicate number of concurrent readers.
++ */
++
++struct ttm_lock {
++ struct ttm_base_object base;
++ wait_queue_head_t queue;
++ atomic_t write_lock_pending;
++ atomic_t readers;
++ bool kill_takers;
++ int signal;
++};
++
++/**
++ * ttm_lock_init
++ *
++ * @lock: Pointer to a struct ttm_lock
++ * Initializes the lock.
++ */
++extern void ttm_lock_init(struct ttm_lock *lock);
++
++/**
++ * ttm_read_unlock
++ *
++ * @lock: Pointer to a struct ttm_lock
++ *
++ * Releases a read lock.
++ */
++
++extern void ttm_read_unlock(struct ttm_lock *lock);
++
++/**
++ * ttm_read_unlock
++ *
++ * @lock: Pointer to a struct ttm_lock
++ * @interruptible: Interruptible sleeping while waiting for a lock.
++ *
++ * Takes the lock in read mode.
++ * Returns:
++ * -ERESTART If interrupted by a signal and interruptible is true.
++ */
++
++extern int ttm_read_lock(struct ttm_lock *lock, bool interruptible);
++
++/**
++ * ttm_write_lock
++ *
++ * @lock: Pointer to a struct ttm_lock
++ * @interruptible: Interruptible sleeping while waiting for a lock.
++ * @tfile: Pointer to a struct ttm_object_file used to identify the user-space
++ * application taking the lock.
++ *
++ * Takes the lock in write mode.
++ * Returns:
++ * -ERESTART If interrupted by a signal and interruptible is true.
++ * -ENOMEM: Out of memory when locking.
++ */
++extern int ttm_write_lock(struct ttm_lock *lock, bool interruptible,
++ struct ttm_object_file *tfile);
++
++/**
++ * ttm_write_unlock
++ *
++ * @lock: Pointer to a struct ttm_lock
++ * @tfile: Pointer to a struct ttm_object_file used to identify the user-space
++ * application taking the lock.
++ *
++ * Releases a write lock.
++ * Returns:
++ * -EINVAL If the lock was not held.
++ */
++extern int ttm_write_unlock(struct ttm_lock *lock,
++ struct ttm_object_file *tfile);
++
++/**
++ * ttm_lock_set_kill
++ *
++ * @lock: Pointer to a struct ttm_lock
++ * @val: Boolean whether to kill processes taking the lock.
++ * @signal: Signal to send to the process taking the lock.
++ *
++ * The kill-when-taking-lock functionality is used to kill processes that keep
++ * on using the TTM functionality when its resources has been taken down, for
++ * example when the X server exits. A typical sequence would look like this:
++ * - X server takes lock in write mode.
++ * - ttm_lock_set_kill() is called with @val set to true.
++ * - As part of X server exit, TTM resources are taken down.
++ * - X server releases the lock on file release.
++ * - Another dri client wants to render, takes the lock and is killed.
++ *
++ */
++
++static inline void ttm_lock_set_kill(struct ttm_lock *lock, bool val, int signal)
++{
++ lock->kill_takers = val;
++ if (val)
++ lock->signal = signal;
++}
++
++#endif
+diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_memory.c b/drivers/gpu/drm/psb/ttm/ttm_memory.c
+--- a/drivers/gpu/drm/psb/ttm/ttm_memory.c 1969-12-31 16:00:00.000000000 -0800
++++ b/drivers/gpu/drm/psb/ttm/ttm_memory.c 2009-04-07 13:28:38.000000000 -0700
+@@ -0,0 +1,232 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ **************************************************************************/
++
++#include "ttm/ttm_memory.h"
++#include <linux/spinlock.h>
++#include <linux/sched.h>
++#include <linux/wait.h>
++#include <linux/mm.h>
++
++#define TTM_MEMORY_ALLOC_RETRIES 4
++
++/**
++ * At this point we only support a single shrink callback.
++ * Extend this if needed, perhaps using a linked list of callbacks.
++ * Note that this function is reentrant:
++ * many threads may try to swap out at any given time.
++ */
++
++static void ttm_shrink(struct ttm_mem_global *glob, bool from_workqueue,
++ uint64_t extra)
++{
++ int ret;
++ struct ttm_mem_shrink *shrink;
++ uint64_t target;
++ uint64_t total_target;
++
++ spin_lock(&glob->lock);
++ if (glob->shrink == NULL)
++ goto out;
++
++ if (from_workqueue) {
++ target = glob->swap_limit;
++ total_target = glob->total_memory_swap_limit;
++ } else if (capable(CAP_SYS_ADMIN)) {
++ total_target = glob->emer_total_memory;
++ target = glob->emer_memory;
++ } else {
++ total_target = glob->max_total_memory;
++ target = glob->max_memory;
++ }
++
++ total_target = (extra >= total_target) ? 0: total_target - extra;
++ target = (extra >= target) ? 0: target - extra;
++
++ while (glob->used_memory > target ||
++ glob->used_total_memory > total_target) {
++ shrink = glob->shrink;
++ spin_unlock(&glob->lock);
++ ret = shrink->do_shrink(shrink);
++ spin_lock(&glob->lock);
++ if (unlikely(ret != 0))
++ goto out;
++ }
++ out:
++ spin_unlock(&glob->lock);
++}
++
++static void ttm_shrink_work(struct work_struct *work)
++{
++ struct ttm_mem_global *glob =
++ container_of(work, struct ttm_mem_global, work);
++
++ ttm_shrink(glob, true, 0ULL);
++}
++
++int ttm_mem_global_init(struct ttm_mem_global *glob)
++{
++ struct sysinfo si;
++ uint64_t mem;
++
++ spin_lock_init(&glob->lock);
++ glob->swap_queue = create_singlethread_workqueue("ttm_swap");
++ INIT_WORK(&glob->work, ttm_shrink_work);
++ init_waitqueue_head(&glob->queue);
++
++ si_meminfo(&si);
++
++ mem = si.totalram - si.totalhigh;
++ mem *= si.mem_unit;
++
++ glob->max_memory = mem >> 1;
++ glob->emer_memory = glob->max_memory + (mem >> 2);
++ glob->swap_limit = glob->max_memory - (mem >> 5);
++ glob->used_memory = 0;
++ glob->used_total_memory = 0;
++ glob->shrink = NULL;
++
++ mem = si.totalram;
++ mem *= si.mem_unit;
++
++ glob->max_total_memory = mem >> 1;
++ glob->emer_total_memory = glob->max_total_memory + (mem >> 2);
++ glob->total_memory_swap_limit = glob->max_total_memory - (mem >> 5);
++
++ printk(KERN_INFO "TTM available graphics memory: %llu MiB\n",
++ glob->max_total_memory >> 20);
++ printk(KERN_INFO "TTM available object memory: %llu MiB\n",
++ glob->max_memory >> 20);
++ printk(KERN_INFO "TTM available swap breakpoint: %llu MiB\n",
++ glob->swap_limit >> 20);
++
++ return 0;
++}
++
++void ttm_mem_global_release(struct ttm_mem_global *glob)
++{
++ printk(KERN_INFO "Used total memory is %llu bytes.\n",
++ (unsigned long long)glob->used_total_memory);
++ flush_workqueue(glob->swap_queue);
++ destroy_workqueue(glob->swap_queue);
++ glob->swap_queue = NULL;
++}
++
++static inline void ttm_check_swapping(struct ttm_mem_global *glob)
++{
++ bool needs_swapping;
++
++ spin_lock(&glob->lock);
++ needs_swapping = (glob->used_memory > glob->swap_limit ||
++ glob->used_total_memory >
++ glob->total_memory_swap_limit);
++ spin_unlock(&glob->lock);
++
++ if (unlikely(needs_swapping))
++ (void)queue_work(glob->swap_queue, &glob->work);
++
++}
++
++void ttm_mem_global_free(struct ttm_mem_global *glob,
++ uint64_t amount, bool himem)
++{
++ spin_lock(&glob->lock);
++ glob->used_total_memory -= amount;
++ if (!himem)
++ glob->used_memory -= amount;
++ wake_up_all(&glob->queue);
++ spin_unlock(&glob->lock);
++}
++
++static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
++ uint64_t amount, bool himem, bool reserve)
++{
++ uint64_t limit;
++ uint64_t lomem_limit;
++ int ret = -ENOMEM;
++
++ spin_lock(&glob->lock);
++
++ if (capable(CAP_SYS_ADMIN)) {
++ limit = glob->emer_total_memory;
++ lomem_limit = glob->emer_memory;
++ } else {
++ limit = glob->max_total_memory;
++ lomem_limit = glob->max_memory;
++ }
++
++ if (unlikely(glob->used_total_memory + amount > limit))
++ goto out_unlock;
++ if (unlikely(!himem && glob->used_memory + amount > lomem_limit))
++ goto out_unlock;
++
++ if (reserve) {
++ glob->used_total_memory += amount;
++ if (!himem)
++ glob->used_memory += amount;
++ }
++ ret = 0;
++ out_unlock:
++ spin_unlock(&glob->lock);
++ ttm_check_swapping(glob);
++
++ return ret;
++}
++
++int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
++ bool no_wait, bool interruptible, bool himem)
++{
++ int count = TTM_MEMORY_ALLOC_RETRIES;
++
++ while (unlikely(ttm_mem_global_reserve(glob, memory, himem, true) != 0)) {
++ if (no_wait)
++ return -ENOMEM;
++ if (unlikely(count-- == 0))
++ return -ENOMEM;
++ ttm_shrink(glob, false, memory + (memory >> 2) + 16);
++ }
++
++ return 0;
++}
++
++size_t ttm_round_pot(size_t size)
++{
++ if ((size & (size - 1)) == 0)
++ return size;
++ else if (size > PAGE_SIZE)
++ return PAGE_ALIGN(size);
++ else {
++ size_t tmp_size = 4;
++
++ while (tmp_size < size)
++ tmp_size <<= 1;
++
++ return tmp_size;
++ }
++ return 0;
++}
+diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_memory.h b/drivers/gpu/drm/psb/ttm/ttm_memory.h
+--- a/drivers/gpu/drm/psb/ttm/ttm_memory.h 1969-12-31 16:00:00.000000000 -0800
++++ b/drivers/gpu/drm/psb/ttm/ttm_memory.h 2009-04-07 13:28:38.000000000 -0700
+@@ -0,0 +1,154 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ **************************************************************************/
++
++#ifndef TTM_MEMORY_H
++#define TTM_MEMORY_H
++
++#include <linux/workqueue.h>
++#include <linux/spinlock.h>
++#include <linux/wait.h>
++
++/**
++ * struct ttm_mem_shrink - callback to shrink TTM memory usage.
++ *
++ * @do_shrink: The callback function.
++ *
++ * Arguments to the do_shrink functions are intended to be passed using
++ * inheritance. That is, the argument class derives from struct ttm_mem_srink,
++ * and can be accessed using container_of().
++ */
++
++struct ttm_mem_shrink {
++ int (*do_shrink) (struct ttm_mem_shrink *);
++};
++
++/**
++ * struct ttm_mem_global - Global memory accounting structure.
++ *
++ * @shrink: A single callback to shrink TTM memory usage. Extend this
++ * to a linked list to be able to handle multiple callbacks when needed.
++ * @swap_queue: A workqueue to handle shrinking in low memory situations. We
++ * need a separate workqueue since it will spend a lot of time waiting
++ * for the GPU, and this will otherwise block other workqueue tasks(?)
++ * At this point we use only a single-threaded workqueue.
++ * @work: The workqueue callback for the shrink queue.
++ * @queue: Wait queue for processes suspended waiting for memory.
++ * @lock: Lock to protect the @shrink - and the memory accounting members,
++ * that is, essentially the whole structure with some exceptions.
++ * @emer_memory: Lowmem memory limit available for root.
++ * @max_memory: Lowmem memory limit available for non-root.
++ * @swap_limit: Lowmem memory limit where the shrink workqueue kicks in.
++ * @used_memory: Currently used lowmem memory.
++ * @used_total_memory: Currently used total (lowmem + highmem) memory.
++ * @total_memory_swap_limit: Total memory limit where the shrink workqueue
++ * kicks in.
++ * @max_total_memory: Total memory available to non-root processes.
++ * @emer_total_memory: Total memory available to root processes.
++ *
++ * Note that this structure is not per device. It should be global for all
++ * graphics devices.
++ */
++
++struct ttm_mem_global {
++ struct ttm_mem_shrink *shrink;
++ struct workqueue_struct *swap_queue;
++ struct work_struct work;
++ wait_queue_head_t queue;
++ spinlock_t lock;
++ uint64_t emer_memory;
++ uint64_t max_memory;
++ uint64_t swap_limit;
++ uint64_t used_memory;
++ uint64_t used_total_memory;
++ uint64_t total_memory_swap_limit;
++ uint64_t max_total_memory;
++ uint64_t emer_total_memory;
++};
++
++/**
++ * ttm_mem_init_shrink - initialize a struct ttm_mem_shrink object
++ *
++ * @shrink: The object to initialize.
++ * @func: The callback function.
++ */
++
++static inline void ttm_mem_init_shrink(struct ttm_mem_shrink *shrink,
++ int (*func) (struct ttm_mem_shrink *))
++{
++ shrink->do_shrink = func;
++}
++
++/**
++ * ttm_mem_register_shrink - register a struct ttm_mem_shrink object.
++ *
++ * @glob: The struct ttm_mem_global object to register with.
++ * @shrink: An initialized struct ttm_mem_shrink object to register.
++ *
++ * Returns:
++ * -EBUSY: There's already a callback registered. (May change).
++ */
++
++static inline int ttm_mem_register_shrink(struct ttm_mem_global *glob,
++ struct ttm_mem_shrink *shrink)
++{
++ spin_lock(&glob->lock);
++ if (glob->shrink != NULL) {
++ spin_unlock(&glob->lock);
++ return -EBUSY;
++ }
++ glob->shrink = shrink;
++ spin_unlock(&glob->lock);
++ return 0;
++}
++
++/**
++ * ttm_mem_unregister_shrink - unregister a struct ttm_mem_shrink object.
++ *
++ * @glob: The struct ttm_mem_global object to unregister from.
++ * @shrink: A previously registert struct ttm_mem_shrink object.
++ *
++ */
++
++static inline void ttm_mem_unregister_shrink(struct ttm_mem_global *glob,
++ struct ttm_mem_shrink *shrink)
++{
++ spin_lock(&glob->lock);
++ BUG_ON(glob->shrink != shrink);
++ glob->shrink = NULL;
++ spin_unlock(&glob->lock);
++}
++
++extern int ttm_mem_global_init(struct ttm_mem_global *glob);
++extern void ttm_mem_global_release(struct ttm_mem_global *glob);
++extern int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
++ bool no_wait, bool interruptible, bool himem);
++extern void ttm_mem_global_free(struct ttm_mem_global *glob,
++ uint64_t amount, bool himem);
++extern size_t ttm_round_pot(size_t size);
++#endif
+diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_object.c b/drivers/gpu/drm/psb/ttm/ttm_object.c
+--- a/drivers/gpu/drm/psb/ttm/ttm_object.c 1969-12-31 16:00:00.000000000 -0800
++++ b/drivers/gpu/drm/psb/ttm/ttm_object.c 2009-04-07 13:28:38.000000000 -0700
+@@ -0,0 +1,444 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ */
++/** @file ttm_ref_object.c
++ *
++ * Base- and reference object implementation for the various
++ * ttm objects. Implements reference counting, minimal security checks
++ * and release on file close.
++ */
++
++/**
++ * struct ttm_object_file
++ *
++ * @tdev: Pointer to the ttm_object_device.
++ *
++ * @lock: Lock that protects the ref_list list and the
++ * ref_hash hash tables.
++ *
++ * @ref_list: List of ttm_ref_objects to be destroyed at
++ * file release.
++ *
++ * @ref_hash: Hash tables of ref objects, one per ttm_ref_type,
++ * for fast lookup of ref objects given a base object.
++ */
++
++#include "ttm/ttm_object.h"
++#include <linux/list.h>
++#include <linux/spinlock.h>
++#include <linux/slab.h>
++#include <asm/atomic.h>
++
++struct ttm_object_file {
++ struct ttm_object_device *tdev;
++ rwlock_t lock;
++ struct list_head ref_list;
++ struct drm_open_hash ref_hash[TTM_REF_NUM];
++ struct kref refcount;
++};
++
++/**
++ * struct ttm_object_device
++ *
++ * @object_lock: lock that protects the object_hash hash table.
++ *
++ * @object_hash: hash table for fast lookup of object global names.
++ *
++ * @object_count: Per device object count.
++ *
++ * This is the per-device data structure needed for ttm object management.
++ */
++
++struct ttm_object_device {
++ rwlock_t object_lock;
++ struct drm_open_hash object_hash;
++ atomic_t object_count;
++ struct ttm_mem_global *mem_glob;
++};
++
++/**
++ * struct ttm_ref_object
++ *
++ * @hash: Hash entry for the per-file object reference hash.
++ *
++ * @head: List entry for the per-file list of ref-objects.
++ *
++ * @kref: Ref count.
++ *
++ * @obj: Base object this ref object is referencing.
++ *
++ * @ref_type: Type of ref object.
++ *
++ * This is similar to an idr object, but it also has a hash table entry
++ * that allows lookup with a pointer to the referenced object as a key. In
++ * that way, one can easily detect whether a base object is referenced by
++ * a particular ttm_object_file. It also carries a ref count to avoid creating
++ * multiple ref objects if a ttm_object_file references the same base object more
++ * than once.
++ */
++
++struct ttm_ref_object {
++ struct drm_hash_item hash;
++ struct list_head head;
++ struct kref kref;
++ struct ttm_base_object *obj;
++ enum ttm_ref_type ref_type;
++ struct ttm_object_file *tfile;
++};
++
++static inline struct ttm_object_file *
++ttm_object_file_ref(struct ttm_object_file *tfile)
++{
++ kref_get(&tfile->refcount);
++ return tfile;
++}
++
++static void ttm_object_file_destroy(struct kref *kref)
++{
++ struct ttm_object_file *tfile =
++ container_of(kref, struct ttm_object_file, refcount);
++
++// printk(KERN_INFO "Freeing 0x%08lx\n", (unsigned long) tfile);
++ kfree(tfile);
++}
++
++
++static inline void ttm_object_file_unref(struct ttm_object_file **p_tfile)
++{
++ struct ttm_object_file *tfile = *p_tfile;
++
++ *p_tfile = NULL;
++ kref_put(&tfile->refcount, ttm_object_file_destroy);
++}
++
++
++int ttm_base_object_init(struct ttm_object_file *tfile,
++ struct ttm_base_object *base,
++ bool shareable,
++ enum ttm_object_type object_type,
++ void (*refcount_release) (struct ttm_base_object **),
++ void (*ref_obj_release) (struct ttm_base_object *,
++ enum ttm_ref_type ref_type))
++{
++ struct ttm_object_device *tdev = tfile->tdev;
++ int ret;
++
++ base->shareable = shareable;
++ base->tfile = ttm_object_file_ref(tfile);
++ base->refcount_release = refcount_release;
++ base->ref_obj_release = ref_obj_release;
++ base->object_type = object_type;
++ write_lock(&tdev->object_lock);
++ kref_init(&base->refcount);
++ ret = drm_ht_just_insert_please(&tdev->object_hash,
++ &base->hash,
++ (unsigned long)base, 31, 0, 0);
++ write_unlock(&tdev->object_lock);
++ if (unlikely(ret != 0))
++ goto out_err0;
++
++ ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
++ if (unlikely(ret != 0))
++ goto out_err1;
++
++ ttm_base_object_unref(&base);
++
++ return 0;
++ out_err1:
++ (void)drm_ht_remove_item(&tdev->object_hash, &base->hash);
++ out_err0:
++ return ret;
++}
++
++static void ttm_release_base(struct kref *kref)
++{
++ struct ttm_base_object *base =
++ container_of(kref, struct ttm_base_object, refcount);
++ struct ttm_object_device *tdev = base->tfile->tdev;
++
++ (void)drm_ht_remove_item(&tdev->object_hash, &base->hash);
++ write_unlock(&tdev->object_lock);
++ if (base->refcount_release) {
++ ttm_object_file_unref(&base->tfile);
++ base->refcount_release(&base);
++ }
++ write_lock(&tdev->object_lock);
++}
++
++void ttm_base_object_unref(struct ttm_base_object **p_base)
++{
++ struct ttm_base_object *base = *p_base;
++ struct ttm_object_device *tdev = base->tfile->tdev;
++
++ // printk(KERN_INFO "TTM base object unref.\n");
++ *p_base = NULL;
++
++ /*
++ * Need to take the lock here to avoid racing with
++ * users trying to look up the object.
++ */
++
++ write_lock(&tdev->object_lock);
++ (void)kref_put(&base->refcount, &ttm_release_base);
++ write_unlock(&tdev->object_lock);
++}
++
++struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
++ uint32_t key)
++{
++ struct ttm_object_device *tdev = tfile->tdev;
++ struct ttm_base_object *base;
++ struct drm_hash_item *hash;
++ int ret;
++
++ read_lock(&tdev->object_lock);
++ ret = drm_ht_find_item(&tdev->object_hash, key, &hash);
++
++ if (likely(ret == 0)) {
++ base = drm_hash_entry(hash, struct ttm_base_object, hash);
++ kref_get(&base->refcount);
++ }
++ read_unlock(&tdev->object_lock);
++
++ if (unlikely(ret != 0))
++ return NULL;
++
++ if (tfile != base->tfile && !base->shareable) {
++ printk(KERN_ERR "Attempted access of non-shareable object.\n");
++ ttm_base_object_unref(&base);
++ return NULL;
++ }
++
++ return base;
++}
++
++int ttm_ref_object_add(struct ttm_object_file *tfile,
++ struct ttm_base_object *base,
++ enum ttm_ref_type ref_type, bool *existed)
++{
++ struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
++ struct ttm_ref_object *ref;
++ struct drm_hash_item *hash;
++ struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
++ int ret = -EINVAL;
++
++ if (existed != NULL)
++ *existed = true;
++
++ while (ret == -EINVAL) {
++ read_lock(&tfile->lock);
++ ret = drm_ht_find_item(ht, base->hash.key, &hash);
++
++ if (ret == 0) {
++ ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
++ kref_get(&ref->kref);
++ read_unlock(&tfile->lock);
++ break;
++ }
++
++ read_unlock(&tfile->lock);
++ ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref), false, false, false);
++ if (unlikely(ret != 0))
++ return ret;
++ ref = kmalloc(sizeof(*ref), GFP_KERNEL);
++ if (unlikely(ref == NULL)) {
++ ttm_mem_global_free(mem_glob, sizeof(*ref), false);
++ return -ENOMEM;
++ }
++
++ ref->hash.key = base->hash.key;
++ ref->obj = base;
++ ref->tfile = tfile;
++ ref->ref_type = ref_type;
++ kref_init(&ref->kref);
++
++ write_lock(&tfile->lock);
++ ret = drm_ht_insert_item(ht, &ref->hash);
++
++ if (likely(ret == 0)) {
++ list_add_tail(&ref->head, &tfile->ref_list);
++ kref_get(&base->refcount);
++ write_unlock(&tfile->lock);
++ if (existed != NULL)
++ *existed = false;
++ break;
++ }
++
++ write_unlock(&tfile->lock);
++ BUG_ON(ret != -EINVAL);
++
++ ttm_mem_global_free(mem_glob, sizeof(*ref), false);
++ kfree(ref);
++ }
++
++ return ret;
++}
++
++static void ttm_ref_object_release(struct kref *kref)
++{
++ struct ttm_ref_object *ref =
++ container_of(kref, struct ttm_ref_object, kref);
++ struct ttm_base_object *base = ref->obj;
++ struct ttm_object_file *tfile = ref->tfile;
++ struct drm_open_hash *ht;
++ struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
++
++ ht = &tfile->ref_hash[ref->ref_type];
++ (void)drm_ht_remove_item(ht, &ref->hash);
++ list_del(&ref->head);
++ write_unlock(&tfile->lock);
++
++ if (ref->ref_type != TTM_REF_USAGE && base->ref_obj_release)
++ base->ref_obj_release(base, ref->ref_type);
++
++ ttm_base_object_unref(&ref->obj);
++ ttm_mem_global_free(mem_glob, sizeof(*ref), false);
++ kfree(ref);
++ write_lock(&tfile->lock);
++}
++
++int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
++ unsigned long key, enum ttm_ref_type ref_type)
++{
++ struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
++ struct ttm_ref_object *ref;
++ struct drm_hash_item *hash;
++ int ret;
++
++ write_lock(&tfile->lock);
++ ret = drm_ht_find_item(ht, key, &hash);
++ if (unlikely(ret != 0)) {
++ write_unlock(&tfile->lock);
++ return -EINVAL;
++ }
++ ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
++ kref_put(&ref->kref, ttm_ref_object_release);
++ write_unlock(&tfile->lock);
++ return 0;
++}
++
++void ttm_object_file_release(struct ttm_object_file **p_tfile)
++{
++ struct ttm_ref_object *ref;
++ struct list_head *list;
++ unsigned int i;
++ struct ttm_object_file *tfile = *p_tfile;
++
++ *p_tfile = NULL;
++ write_lock(&tfile->lock);
++
++ /*
++ * Since we release the lock within the loop, we have to
++ * restart it from the beginning each time.
++ */
++
++ while (!list_empty(&tfile->ref_list)) {
++ list = tfile->ref_list.next;
++ ref = list_entry(list, struct ttm_ref_object, head);
++ ttm_ref_object_release(&ref->kref);
++ }
++
++ for (i = 0; i < TTM_REF_NUM; ++i) {
++ drm_ht_remove(&tfile->ref_hash[i]);
++ }
++
++ write_unlock(&tfile->lock);
++ ttm_object_file_unref(&tfile);
++}
++
++struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev,
++ unsigned int hash_order)
++{
++ struct ttm_object_file *tfile = kmalloc(sizeof(*tfile), GFP_KERNEL);
++ unsigned int i;
++ unsigned int j = 0;
++ int ret;
++
++ if (unlikely(tfile == NULL))
++ return NULL;
++
++ rwlock_init(&tfile->lock);
++ tfile->tdev = tdev;
++ kref_init(&tfile->refcount);
++ INIT_LIST_HEAD(&tfile->ref_list);
++
++ for (i = 0; i < TTM_REF_NUM; ++i) {
++ ret = drm_ht_create(&tfile->ref_hash[i], hash_order);
++ if (ret) {
++ j = i;
++ goto out_err;
++ }
++ }
++
++ return tfile;
++ out_err:
++ for (i = 0; i < j; ++i) {
++ drm_ht_remove(&tfile->ref_hash[i]);
++ }
++ kfree(tfile);
++
++ return NULL;
++}
++
++struct ttm_object_device *ttm_object_device_init(struct ttm_mem_global
++ *mem_glob,
++ unsigned int hash_order)
++{
++ struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL);
++ int ret;
++
++ if (unlikely(tdev == NULL))
++ return NULL;
++
++ tdev->mem_glob = mem_glob;
++ rwlock_init(&tdev->object_lock);
++ atomic_set(&tdev->object_count, 0);
++ ret = drm_ht_create(&tdev->object_hash, hash_order);
++
++ if (likely(ret == 0))
++ return tdev;
++
++ kfree(tdev);
++ return NULL;
++}
++
++void ttm_object_device_release(struct ttm_object_device **p_tdev)
++{
++ struct ttm_object_device *tdev = *p_tdev;
++
++ *p_tdev = NULL;
++
++ write_lock(&tdev->object_lock);
++ drm_ht_remove(&tdev->object_hash);
++ write_unlock(&tdev->object_lock);
++
++ kfree(tdev);
++}
+diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_object.h b/drivers/gpu/drm/psb/ttm/ttm_object.h
+--- a/drivers/gpu/drm/psb/ttm/ttm_object.h 1969-12-31 16:00:00.000000000 -0800
++++ b/drivers/gpu/drm/psb/ttm/ttm_object.h 2009-04-07 13:28:38.000000000 -0700
+@@ -0,0 +1,269 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ */
++/** @file ttm_ref_object.h
++ *
++ * Base- and reference object implementation for the various
++ * ttm objects. Implements reference counting, minimal security checks
++ * and release on file close.
++ */
++
++#ifndef _TTM_OBJECT_H_
++#define _TTM_OBJECT_H_
++
++#include <linux/list.h>
++#include <drm/drm_hashtab.h>
++#include <linux/kref.h>
++#include <ttm/ttm_memory.h>
++
++/**
++ * enum ttm_ref_type
++ *
++ * Describes what type of reference a ref object holds.
++ *
++ * TTM_REF_USAGE is a simple refcount on a base object.
++ *
++ * TTM_REF_SYNCCPU_READ is a SYNCCPU_READ reference on a
++ * buffer object.
++ *
++ * TTM_REF_SYNCCPU_WRITE is a SYNCCPU_WRITE reference on a
++ * buffer object.
++ *
++ */
++
++enum ttm_ref_type {
++ TTM_REF_USAGE,
++ TTM_REF_SYNCCPU_READ,
++ TTM_REF_SYNCCPU_WRITE,
++ TTM_REF_NUM
++};
++
++/**
++ * enum ttm_object_type
++ *
++ * One entry per ttm object type.
++ * Device-specific types should use the
++ * ttm_driver_typex types.
++ */
++
++enum ttm_object_type {
++ ttm_fence_type,
++ ttm_buffer_type,
++ ttm_lock_type,
++ ttm_driver_type0 = 256,
++ ttm_driver_type1
++};
++
++struct ttm_object_file;
++struct ttm_object_device;
++
++/**
++ * struct ttm_base_object
++ *
++ * @hash: hash entry for the per-device object hash.
++ * @type: derived type this object is base class for.
++ * @shareable: Other ttm_object_files can access this object.
++ *
++ * @tfile: Pointer to ttm_object_file of the creator.
++ * NULL if the object was not created by a user request.
++ * (kernel object).
++ *
++ * @refcount: Number of references to this object, not
++ * including the hash entry. A reference to a base object can
++ * only be held by a ref object.
++ *
++ * @refcount_release: A function to be called when there are
++ * no more references to this object. This function should
++ * destroy the object (or make sure destruction eventually happens),
++ * and when it is called, the object has
++ * already been taken out of the per-device hash. The parameter
++ * "base" should be set to NULL by the function.
++ *
++ * @ref_obj_release: A function to be called when a reference object
++ * with another ttm_ref_type than TTM_REF_USAGE is deleted.
++ * this function may, for example, release a lock held by a user-space
++ * process.
++ *
++ * This struct is intended to be used as a base struct for objects that
++ * are visible to user-space. It provides a global name, race-safe
++ * access and refcounting, minimal access contol and hooks for unref actions.
++ */
++
++struct ttm_base_object {
++ struct drm_hash_item hash;
++ enum ttm_object_type object_type;
++ bool shareable;
++ struct ttm_object_file *tfile;
++ struct kref refcount;
++ void (*refcount_release) (struct ttm_base_object ** base);
++ void (*ref_obj_release) (struct ttm_base_object * base,
++ enum ttm_ref_type ref_type);
++};
++
++/**
++ * ttm_base_object_init
++ *
++ * @tfile: Pointer to a struct ttm_object_file.
++ * @base: The struct ttm_base_object to initialize.
++ * @shareable: This object is shareable with other applcations.
++ * (different @tfile pointers.)
++ * @type: The object type.
++ * @refcount_release: See the struct ttm_base_object description.
++ * @ref_obj_release: See the struct ttm_base_object description.
++ *
++ * Initializes a struct ttm_base_object.
++ */
++
++extern int ttm_base_object_init(struct ttm_object_file *tfile,
++ struct ttm_base_object *base,
++ bool shareable,
++ enum ttm_object_type type,
++ void (*refcount_release) (struct ttm_base_object
++ **),
++ void (*ref_obj_release) (struct ttm_base_object
++ *,
++ enum ttm_ref_type
++ ref_type));
++
++/**
++ * ttm_base_object_lookup
++ *
++ * @tfile: Pointer to a struct ttm_object_file.
++ * @key: Hash key
++ *
++ * Looks up a struct ttm_base_object with the key @key.
++ * Also verifies that the object is visible to the application, by
++ * comparing the @tfile argument and checking the object shareable flag.
++ */
++
++extern struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file
++ *tfile, uint32_t key);
++
++/**
++ * ttm_base_object_unref
++ *
++ * @p_base: Pointer to a pointer referncing a struct ttm_base_object.
++ *
++ * Decrements the base object refcount and clears the pointer pointed to by
++ * p_base.
++ */
++
++extern void ttm_base_object_unref(struct ttm_base_object **p_base);
++
++/**
++ * ttm_ref_object_add.
++ *
++ * @tfile: A struct ttm_object_file representing the application owning the
++ * ref_object.
++ * @base: The base object to reference.
++ * @ref_type: The type of reference.
++ * @existed: Upon completion, indicates that an identical reference object
++ * already existed, and the refcount was upped on that object instead.
++ *
++ * Adding a ref object to a base object is basically like referencing the
++ * base object, but a user-space application holds the reference. When the
++ * file corresponding to @tfile is closed, all its reference objects are
++ * deleted. A reference object can have different types depending on what
++ * it's intended for. It can be refcounting to prevent object destruction,
++ * When user-space takes a lock, it can add a ref object to that lock to
++ * make sure the lock is released if the application dies. A ref object
++ * will hold a single reference on a base object.
++ */
++extern int ttm_ref_object_add(struct ttm_object_file *tfile,
++ struct ttm_base_object *base,
++ enum ttm_ref_type ref_type, bool *existed);
++/**
++ * ttm_ref_object_base_unref
++ *
++ * @key: Key representing the base object.
++ * @ref_type: Ref type of the ref object to be dereferenced.
++ *
++ * Unreference a ref object with type @ref_type
++ * on the base object identified by @key. If there are no duplicate
++ * references, the ref object will be destroyed and the base object
++ * will be unreferenced.
++ */
++extern int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
++ unsigned long key,
++ enum ttm_ref_type ref_type);
++
++/**
++ * ttm_object_file_init - initialize a struct ttm_object file
++ *
++ * @tdev: A struct ttm_object device this file is initialized on.
++ * @hash_order: Order of the hash table used to hold the reference objects.
++ *
++ * This is typically called by the file_ops::open function.
++ */
++
++extern struct ttm_object_file *ttm_object_file_init(struct ttm_object_device
++ *tdev,
++ unsigned int hash_order);
++
++/**
++ * ttm_object_file_release - release data held by a ttm_object_file
++ *
++ * @p_tfile: Pointer to pointer to the ttm_object_file object to release.
++ * *p_tfile will be set to NULL by this function.
++ *
++ * Releases all data associated by a ttm_object_file.
++ * Typically called from file_ops::release. The caller must
++ * ensure that there are no concurrent users of tfile.
++ */
++
++extern void ttm_object_file_release(struct ttm_object_file **p_tfile);
++
++/**
++ * ttm_object device init - initialize a struct ttm_object_device
++ *
++ * @hash_order: Order of hash table used to hash the base objects.
++ *
++ * This function is typically called on device initialization to prepare
++ * data structures needed for ttm base and ref objects.
++ */
++
++extern struct ttm_object_device *ttm_object_device_init
++ (struct ttm_mem_global *mem_glob, unsigned int hash_order);
++
++/**
++ * ttm_object_device_release - release data held by a ttm_object_device
++ *
++ * @p_tdev: Pointer to pointer to the ttm_object_device object to release.
++ * *p_tdev will be set to NULL by this function.
++ *
++ * Releases all data associated by a ttm_object_device.
++ * Typically called from driver::unload before the destruction of the
++ * device private data structure.
++ */
++
++extern void ttm_object_device_release(struct ttm_object_device **p_tdev);
++
++#endif
+diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_pat_compat.c b/drivers/gpu/drm/psb/ttm/ttm_pat_compat.c
+--- a/drivers/gpu/drm/psb/ttm/ttm_pat_compat.c 1969-12-31 16:00:00.000000000 -0800
++++ b/drivers/gpu/drm/psb/ttm/ttm_pat_compat.c 2009-04-07 13:28:38.000000000 -0700
+@@ -0,0 +1,178 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ */
++
++#include "ttm/ttm_pat_compat.h"
++#include <linux/version.h>
++
++#include <linux/spinlock.h>
++#include <asm/pgtable.h>
++
++#if (defined(CONFIG_X86) && !defined(CONFIG_X86_PAT))
++#include <asm/tlbflush.h>
++#include <asm/msr.h>
++#include <asm/system.h>
++#include <linux/notifier.h>
++#include <linux/cpu.h>
++
++#ifndef MSR_IA32_CR_PAT
++#define MSR_IA32_CR_PAT 0x0277
++#endif
++
++#ifndef _PAGE_PAT
++#define _PAGE_PAT 0x080
++#endif
++
++static int ttm_has_pat = 0;
++
++/*
++ * Used at resume-time when CPU-s are fired up.
++ */
++
++static void ttm_pat_ipi_handler(void *notused)
++{
++ u32 v1, v2;
++
++ rdmsr(MSR_IA32_CR_PAT, v1, v2);
++ v2 &= 0xFFFFFFF8;
++ v2 |= 0x00000001;
++ wbinvd();
++ wrmsr(MSR_IA32_CR_PAT, v1, v2);
++ wbinvd();
++ __flush_tlb_all();
++}
++
++static void ttm_pat_enable(void)
++{
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27))
++ if (on_each_cpu(ttm_pat_ipi_handler, NULL, 1, 1) != 0) {
++#else
++ if (on_each_cpu(ttm_pat_ipi_handler, NULL, 1) != 0) {
++#endif
++ printk(KERN_ERR "Timed out setting up CPU PAT.\n");
++ }
++}
++
++void ttm_pat_resume(void)
++{
++ if (unlikely(!ttm_has_pat))
++ return;
++
++ ttm_pat_enable();
++}
++
++static int psb_cpu_callback(struct notifier_block *nfb,
++ unsigned long action, void *hcpu)
++{
++ if (action == CPU_ONLINE) {
++ ttm_pat_resume();
++ }
++
++ return 0;
++}
++
++static struct notifier_block psb_nb = {
++ .notifier_call = psb_cpu_callback,
++ .priority = 1
++};
++
++/*
++ * Set i386 PAT entry PAT4 to Write-combining memory type on all processors.
++ */
++
++void ttm_pat_init(void)
++{
++ if (likely(ttm_has_pat))
++ return;
++
++ if (!boot_cpu_has(X86_FEATURE_PAT)) {
++ return;
++ }
++
++ ttm_pat_enable();
++
++ if (num_present_cpus() > 1)
++ register_cpu_notifier(&psb_nb);
++
++ ttm_has_pat = 1;
++}
++
++void ttm_pat_takedown(void)
++{
++ if (unlikely(!ttm_has_pat))
++ return;
++
++ if (num_present_cpus() > 1)
++ unregister_cpu_notifier(&psb_nb);
++
++ ttm_has_pat = 0;
++}
++
++pgprot_t pgprot_ttm_x86_wc(pgprot_t prot)
++{
++ if (likely(ttm_has_pat)) {
++ pgprot_val(prot) |= _PAGE_PAT;
++ return prot;
++ } else {
++ return pgprot_noncached(prot);
++ }
++}
++
++#else
++
++void ttm_pat_init(void)
++{
++}
++
++void ttm_pat_takedown(void)
++{
++}
++
++void ttm_pat_resume(void)
++{
++}
++
++#ifdef CONFIG_X86
++#include <asm/pat.h>
++
++pgprot_t pgprot_ttm_x86_wc(pgprot_t prot)
++{
++ uint32_t cache_bits = ((1) ? _PAGE_CACHE_WC : _PAGE_CACHE_UC_MINUS);
++
++ return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_MASK) | cache_bits);
++}
++#else
++pgprot_t pgprot_ttm_x86_wc(pgprot_t prot)
++{
++ BUG();
++}
++#endif
++#endif
+diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_pat_compat.h b/drivers/gpu/drm/psb/ttm/ttm_pat_compat.h
+--- a/drivers/gpu/drm/psb/ttm/ttm_pat_compat.h 1969-12-31 16:00:00.000000000 -0800
++++ b/drivers/gpu/drm/psb/ttm/ttm_pat_compat.h 2009-04-07 13:28:38.000000000 -0700
+@@ -0,0 +1,41 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ */
++
++#ifndef _TTM_PAT_COMPAT_
++#define _TTM_PAT_COMPAT_
++#include <asm/page.h>
++
++extern void ttm_pat_init(void);
++extern void ttm_pat_takedown(void);
++extern void ttm_pat_resume(void);
++extern pgprot_t pgprot_ttm_x86_wc(pgprot_t prot);
++#endif
+diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_placement_common.h b/drivers/gpu/drm/psb/ttm/ttm_placement_common.h
+--- a/drivers/gpu/drm/psb/ttm/ttm_placement_common.h 1969-12-31 16:00:00.000000000 -0800
++++ b/drivers/gpu/drm/psb/ttm/ttm_placement_common.h 2009-04-07 13:28:38.000000000 -0700
+@@ -0,0 +1,96 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ */
++
++#ifndef _TTM_PL_COMMON_H_
++#define _TTM_PL_COMMON_H_
++/*
++ * Memory regions for data placement.
++ */
++
++#define TTM_PL_SYSTEM 0
++#define TTM_PL_TT 1
++#define TTM_PL_VRAM 2
++#define TTM_PL_PRIV0 3
++#define TTM_PL_PRIV1 4
++#define TTM_PL_PRIV2 5
++#define TTM_PL_PRIV3 6
++#define TTM_PL_PRIV4 7
++#define TTM_PL_PRIV5 8
++#define TTM_PL_CI 9
++#define TTM_PL_SWAPPED 15
++
++#define TTM_PL_FLAG_SYSTEM (1 << TTM_PL_SYSTEM)
++#define TTM_PL_FLAG_TT (1 << TTM_PL_TT)
++#define TTM_PL_FLAG_VRAM (1 << TTM_PL_VRAM)
++#define TTM_PL_FLAG_PRIV0 (1 << TTM_PL_PRIV0)
++#define TTM_PL_FLAG_PRIV1 (1 << TTM_PL_PRIV1)
++#define TTM_PL_FLAG_PRIV2 (1 << TTM_PL_PRIV2)
++#define TTM_PL_FLAG_PRIV3 (1 << TTM_PL_PRIV3)
++#define TTM_PL_FLAG_PRIV4 (1 << TTM_PL_PRIV4)
++#define TTM_PL_FLAG_PRIV5 (1 << TTM_PL_PRIV5)
++#define TTM_PL_FLAG_CI (1 << TTM_PL_CI)
++#define TTM_PL_FLAG_SWAPPED (1 << TTM_PL_SWAPPED)
++#define TTM_PL_MASK_MEM 0x0000FFFF
++
++/*
++ * Other flags that affects data placement.
++ * TTM_PL_FLAG_CACHED indicates cache-coherent mappings
++ * if available.
++ * TTM_PL_FLAG_SHARED means that another application may
++ * reference the buffer.
++ * TTM_PL_FLAG_NO_EVICT means that the buffer may never
++ * be evicted to make room for other buffers.
++ */
++
++#define TTM_PL_FLAG_CACHED (1 << 16)
++#define TTM_PL_FLAG_UNCACHED (1 << 17)
++#define TTM_PL_FLAG_WC (1 << 18)
++#define TTM_PL_FLAG_SHARED (1 << 20)
++#define TTM_PL_FLAG_NO_EVICT (1 << 21)
++
++#define TTM_PL_MASK_CACHING (TTM_PL_FLAG_CACHED | \
++ TTM_PL_FLAG_UNCACHED | \
++ TTM_PL_FLAG_WC)
++
++#define TTM_PL_MASK_MEMTYPE (TTM_PL_MASK_MEM | TTM_PL_MASK_CACHING)
++
++/*
++ * Access flags to be used for CPU- and GPU- mappings.
++ * The idea is that the TTM synchronization mechanism will
++ * allow concurrent READ access and exclusive write access.
++ * Currently GPU- and CPU accesses are exclusive.
++ */
++
++#define TTM_ACCESS_READ (1 << 0)
++#define TTM_ACCESS_WRITE (1 << 1)
++
++#endif
+diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_placement_user.c b/drivers/gpu/drm/psb/ttm/ttm_placement_user.c
+--- a/drivers/gpu/drm/psb/ttm/ttm_placement_user.c 1969-12-31 16:00:00.000000000 -0800
++++ b/drivers/gpu/drm/psb/ttm/ttm_placement_user.c 2009-04-07 13:28:38.000000000 -0700
+@@ -0,0 +1,468 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ */
++
++#include "ttm/ttm_placement_user.h"
++#include "ttm/ttm_bo_driver.h"
++#include "ttm/ttm_object.h"
++#include "ttm/ttm_userobj_api.h"
++#include "ttm/ttm_lock.h"
++
++struct ttm_bo_user_object {
++ struct ttm_base_object base;
++ struct ttm_buffer_object bo;
++};
++
++static size_t pl_bo_size = 0;
++
++static size_t ttm_pl_size(struct ttm_bo_device *bdev, unsigned long num_pages)
++{
++ size_t page_array_size =
++ (num_pages * sizeof(void *) + PAGE_SIZE - 1) & PAGE_MASK;
++
++ if (unlikely(pl_bo_size == 0)) {
++ pl_bo_size = bdev->ttm_bo_extra_size +
++ ttm_round_pot(sizeof(struct ttm_bo_user_object));
++ }
++
++ return bdev->ttm_bo_size + 2 * page_array_size;
++}
++
++static struct ttm_bo_user_object *ttm_bo_user_lookup(struct ttm_object_file
++ *tfile, uint32_t handle)
++{
++ struct ttm_base_object *base;
++
++ base = ttm_base_object_lookup(tfile, handle);
++ if (unlikely(base == NULL)) {
++ printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
++ (unsigned long)handle);
++ return NULL;
++ }
++
++ if (unlikely(base->object_type != ttm_buffer_type)) {
++ ttm_base_object_unref(&base);
++ printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
++ (unsigned long)handle);
++ return NULL;
++ }
++
++ return container_of(base, struct ttm_bo_user_object, base);
++}
++
++struct ttm_buffer_object *ttm_buffer_object_lookup(struct ttm_object_file
++ *tfile, uint32_t handle)
++{
++ struct ttm_bo_user_object *user_bo;
++ struct ttm_base_object *base;
++
++ user_bo = ttm_bo_user_lookup(tfile, handle);
++ if (unlikely(user_bo == NULL))
++ return NULL;
++
++ (void)ttm_bo_reference(&user_bo->bo);
++ base = &user_bo->base;
++ ttm_base_object_unref(&base);
++ return &user_bo->bo;
++}
++
++static void ttm_bo_user_destroy(struct ttm_buffer_object *bo)
++{
++ struct ttm_bo_user_object *user_bo =
++ container_of(bo, struct ttm_bo_user_object, bo);
++
++ ttm_mem_global_free(bo->bdev->mem_glob, bo->acc_size, false);
++ kfree(user_bo);
++}
++
++static void ttm_bo_user_release(struct ttm_base_object **p_base)
++{
++ struct ttm_bo_user_object *user_bo;
++ struct ttm_base_object *base = *p_base;
++ struct ttm_buffer_object *bo;
++
++ *p_base = NULL;
++
++ if (unlikely(base == NULL))
++ return;
++
++ user_bo = container_of(base, struct ttm_bo_user_object, base);
++ bo = &user_bo->bo;
++ ttm_bo_unref(&bo);
++}
++
++static void ttm_bo_user_ref_release(struct ttm_base_object *base,
++ enum ttm_ref_type ref_type)
++{
++ struct ttm_bo_user_object *user_bo =
++ container_of(base, struct ttm_bo_user_object, base);
++ struct ttm_buffer_object *bo = &user_bo->bo;
++
++ switch (ref_type) {
++ case TTM_REF_SYNCCPU_WRITE:
++ ttm_bo_synccpu_write_release(bo);
++ break;
++ default:
++ BUG();
++ }
++}
++
++static void ttm_pl_fill_rep(struct ttm_buffer_object *bo,
++ struct ttm_pl_rep *rep)
++{
++ struct ttm_bo_user_object *user_bo =
++ container_of(bo, struct ttm_bo_user_object, bo);
++
++ rep->gpu_offset = bo->offset;
++ rep->bo_size = bo->num_pages << PAGE_SHIFT;
++ rep->map_handle = bo->addr_space_offset;
++ rep->placement = bo->mem.flags;
++ rep->handle = user_bo->base.hash.key;
++ rep->sync_object_arg = (uint32_t) (unsigned long)bo->sync_obj_arg;
++}
++
++int ttm_pl_create_ioctl(struct ttm_object_file *tfile,
++ struct ttm_bo_device *bdev,
++ struct ttm_lock *lock, void *data)
++{
++ union ttm_pl_create_arg *arg = data;
++ struct ttm_pl_create_req *req = &arg->req;
++ struct ttm_pl_rep *rep = &arg->rep;
++ struct ttm_buffer_object *bo;
++ struct ttm_buffer_object *tmp;
++ struct ttm_bo_user_object *user_bo;
++ uint32_t flags;
++ int ret = 0;
++ struct ttm_mem_global *mem_glob = bdev->mem_glob;
++ size_t acc_size =
++ ttm_pl_size(bdev, (req->size + PAGE_SIZE - 1) >> PAGE_SHIFT);
++ ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false, false);
++ if (unlikely(ret != 0))
++ return ret;
++
++ flags = req->placement;
++ user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
++ if (unlikely(user_bo == NULL)) {
++ ttm_mem_global_free(mem_glob, acc_size, false);
++ return -ENOMEM;
++ }
++
++ bo = &user_bo->bo;
++ ret = ttm_read_lock(lock, true);
++ if (unlikely(ret != 0)) {
++ ttm_mem_global_free(mem_glob, acc_size, false);
++ kfree(user_bo);
++ return ret;
++ }
++
++ ret = ttm_buffer_object_init(bdev, bo, req->size,
++ ttm_bo_type_device, flags,
++ req->page_alignment, 0, true,
++ NULL, acc_size, &ttm_bo_user_destroy);
++ ttm_read_unlock(lock);
++
++ /*
++ * Note that the ttm_buffer_object_init function
++ * would've called the destroy function on failure!!
++ */
++
++ if (unlikely(ret != 0))
++ goto out;
++
++ tmp = ttm_bo_reference(bo);
++ ret = ttm_base_object_init(tfile, &user_bo->base,
++ flags & TTM_PL_FLAG_SHARED,
++ ttm_buffer_type,
++ &ttm_bo_user_release,
++ &ttm_bo_user_ref_release);
++ if (unlikely(ret != 0))
++ goto out_err;
++
++ mutex_lock(&bo->mutex);
++ ttm_pl_fill_rep(bo, rep);
++ mutex_unlock(&bo->mutex);
++ ttm_bo_unref(&bo);
++ out:
++ return 0;
++ out_err:
++ ttm_bo_unref(&tmp);
++ ttm_bo_unref(&bo);
++ return ret;
++}
++
++int ttm_pl_ub_create_ioctl(struct ttm_object_file *tfile,
++ struct ttm_bo_device *bdev,
++ struct ttm_lock *lock, void *data)
++{
++ union ttm_pl_create_ub_arg *arg = data;
++ struct ttm_pl_create_ub_req *req = &arg->req;
++ struct ttm_pl_rep *rep = &arg->rep;
++ struct ttm_buffer_object *bo;
++ struct ttm_buffer_object *tmp;
++ struct ttm_bo_user_object *user_bo;
++ uint32_t flags;
++ int ret = 0;
++ struct ttm_mem_global *mem_glob = bdev->mem_glob;
++ size_t acc_size =
++ ttm_pl_size(bdev, (req->size + PAGE_SIZE - 1) >> PAGE_SHIFT);
++ ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false, false);
++ if (unlikely(ret != 0))
++ return ret;
++
++ flags = req->placement;
++ user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
++ if (unlikely(user_bo == NULL)) {
++ ttm_mem_global_free(mem_glob, acc_size, false);
++ return -ENOMEM;
++ }
++ ret = ttm_read_lock(lock, true);
++ if (unlikely(ret != 0)) {
++ ttm_mem_global_free(mem_glob, acc_size, false);
++ kfree(user_bo);
++ return ret;
++ }
++ bo = &user_bo->bo;
++ ret = ttm_buffer_object_init(bdev, bo, req->size,
++ ttm_bo_type_user, flags,
++ req->page_alignment, req->user_address,
++ true, NULL, acc_size, &ttm_bo_user_destroy);
++
++ /*
++ * Note that the ttm_buffer_object_init function
++ * would've called the destroy function on failure!!
++ */
++ ttm_read_unlock(lock);
++ if (unlikely(ret != 0))
++ goto out;
++
++ tmp = ttm_bo_reference(bo);
++ ret = ttm_base_object_init(tfile, &user_bo->base,
++ flags & TTM_PL_FLAG_SHARED,
++ ttm_buffer_type,
++ &ttm_bo_user_release,
++ &ttm_bo_user_ref_release);
++ if (unlikely(ret != 0))
++ goto out_err;
++
++ mutex_lock(&bo->mutex);
++ ttm_pl_fill_rep(bo, rep);
++ mutex_unlock(&bo->mutex);
++ ttm_bo_unref(&bo);
++ out:
++ return 0;
++ out_err:
++ ttm_bo_unref(&tmp);
++ ttm_bo_unref(&bo);
++ return ret;
++}
++
++int ttm_pl_reference_ioctl(struct ttm_object_file *tfile, void *data)
++{
++ union ttm_pl_reference_arg *arg = data;
++ struct ttm_pl_rep *rep = &arg->rep;
++ struct ttm_bo_user_object *user_bo;
++ struct ttm_buffer_object *bo;
++ struct ttm_base_object *base;
++ int ret;
++
++ user_bo = ttm_bo_user_lookup(tfile, arg->req.handle);
++ if (unlikely(user_bo == NULL)) {
++ printk(KERN_ERR "Could not reference buffer object.\n");
++ return -EINVAL;
++ }
++
++ bo = &user_bo->bo;
++ ret = ttm_ref_object_add(tfile, &user_bo->base, TTM_REF_USAGE, NULL);
++ if (unlikely(ret != 0)) {
++ printk(KERN_ERR
++ "Could not add a reference to buffer object.\n");
++ goto out;
++ }
++
++ mutex_lock(&bo->mutex);
++ ttm_pl_fill_rep(bo, rep);
++ mutex_unlock(&bo->mutex);
++
++ out:
++ base = &user_bo->base;
++ ttm_base_object_unref(&base);
++ return ret;
++}
++
++int ttm_pl_unref_ioctl(struct ttm_object_file *tfile, void *data)
++{
++ struct ttm_pl_reference_req *arg = data;
++
++ return ttm_ref_object_base_unref(tfile, arg->handle, TTM_REF_USAGE);
++}
++
++int ttm_pl_synccpu_ioctl(struct ttm_object_file *tfile, void *data)
++{
++ struct ttm_pl_synccpu_arg *arg = data;
++ struct ttm_bo_user_object *user_bo;
++ struct ttm_buffer_object *bo;
++ struct ttm_base_object *base;
++ bool existed;
++ int ret;
++
++ switch (arg->op) {
++ case TTM_PL_SYNCCPU_OP_GRAB:
++ user_bo = ttm_bo_user_lookup(tfile, arg->handle);
++ if (unlikely(user_bo == NULL)) {
++ printk(KERN_ERR
++ "Could not find buffer object for synccpu.\n");
++ return -EINVAL;
++ }
++ bo = &user_bo->bo;
++ base = &user_bo->base;
++ ret = ttm_bo_synccpu_write_grab(bo,
++ arg->access_mode &
++ TTM_PL_SYNCCPU_MODE_NO_BLOCK);
++ if (unlikely(ret != 0)) {
++ ttm_base_object_unref(&base);
++ goto out;
++ }
++ ret = ttm_ref_object_add(tfile, &user_bo->base,
++ TTM_REF_SYNCCPU_WRITE, &existed);
++ if (existed || ret != 0)
++ ttm_bo_synccpu_write_release(bo);
++ ttm_base_object_unref(&base);
++ break;
++ case TTM_PL_SYNCCPU_OP_RELEASE:
++ ret = ttm_ref_object_base_unref(tfile, arg->handle,
++ TTM_REF_SYNCCPU_WRITE);
++ break;
++ default:
++ ret = -EINVAL;
++ break;
++ }
++ out:
++ return ret;
++}
++
++int ttm_pl_setstatus_ioctl(struct ttm_object_file *tfile,
++ struct ttm_lock *lock, void *data)
++{
++ union ttm_pl_setstatus_arg *arg = data;
++ struct ttm_pl_setstatus_req *req = &arg->req;
++ struct ttm_pl_rep *rep = &arg->rep;
++ struct ttm_buffer_object *bo;
++ struct ttm_bo_device *bdev;
++ int ret;
++
++ bo = ttm_buffer_object_lookup(tfile, req->handle);
++ if (unlikely(bo == NULL)) {
++ printk(KERN_ERR
++ "Could not find buffer object for setstatus.\n");
++ return -EINVAL;
++ }
++
++ bdev = bo->bdev;
++
++ ret = ttm_read_lock(lock, true);
++ if (unlikely(ret != 0))
++ goto out_err0;
++
++ ret = ttm_bo_reserve(bo, true, false, false, 0);
++ if (unlikely(ret != 0))
++ goto out_err1;
++
++ ret = ttm_bo_wait_cpu(bo, false);
++ if (unlikely(ret != 0))
++ goto out_err2;
++
++ mutex_lock(&bo->mutex);
++ ret = ttm_bo_check_placement(bo, req->set_placement,
++ req->clr_placement);
++ if (unlikely(ret != 0))
++ goto out_err2;
++
++ bo->proposed_flags = (bo->proposed_flags | req->set_placement)
++ & ~req->clr_placement;
++ ret = ttm_buffer_object_validate(bo, true, false);
++ if (unlikely(ret != 0))
++ goto out_err2;
++
++ ttm_pl_fill_rep(bo, rep);
++ out_err2:
++ mutex_unlock(&bo->mutex);
++ ttm_bo_unreserve(bo);
++ out_err1:
++ ttm_read_unlock(lock);
++ out_err0:
++ ttm_bo_unref(&bo);
++ return ret;
++}
++
++int ttm_pl_waitidle_ioctl(struct ttm_object_file *tfile, void *data)
++{
++ struct ttm_pl_waitidle_arg *arg = data;
++ struct ttm_buffer_object *bo;
++ int ret;
++
++ bo = ttm_buffer_object_lookup(tfile, arg->handle);
++ if (unlikely(bo == NULL)) {
++ printk(KERN_ERR "Could not find buffer object for waitidle.\n");
++ return -EINVAL;
++ }
++
++ ret =
++ ttm_bo_block_reservation(bo, true,
++ arg->mode & TTM_PL_WAITIDLE_MODE_NO_BLOCK);
++ if (unlikely(ret != 0))
++ goto out;
++ mutex_lock(&bo->mutex);
++ ret = ttm_bo_wait(bo,
++ arg->mode & TTM_PL_WAITIDLE_MODE_LAZY,
++ true, arg->mode & TTM_PL_WAITIDLE_MODE_NO_BLOCK);
++ mutex_unlock(&bo->mutex);
++ ttm_bo_unblock_reservation(bo);
++ out:
++ ttm_bo_unref(&bo);
++ return ret;
++}
++
++int ttm_pl_verify_access(struct ttm_buffer_object *bo,
++ struct ttm_object_file *tfile)
++{
++ struct ttm_bo_user_object *ubo;
++
++ /*
++ * Check bo subclass.
++ */
++
++ if (unlikely(bo->destroy != &ttm_bo_user_destroy))
++ return -EPERM;
++
++ ubo = container_of(bo, struct ttm_bo_user_object, bo);
++ if (likely(ubo->base.shareable || ubo->base.tfile == tfile))
++ return 0;
++
++ return -EPERM;
++}
+diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_placement_user.h b/drivers/gpu/drm/psb/ttm/ttm_placement_user.h
+--- a/drivers/gpu/drm/psb/ttm/ttm_placement_user.h 1969-12-31 16:00:00.000000000 -0800
++++ b/drivers/gpu/drm/psb/ttm/ttm_placement_user.h 2009-04-07 13:28:38.000000000 -0700
+@@ -0,0 +1,259 @@
++/**************************************************************************
++ *
++ * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ **************************************************************************/
++/*
++ * Authors
++ * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
++ */
++
++#ifndef _TTM_PLACEMENT_USER_H_
++#define _TTM_PLACEMENT_USER_H_
++
++#if !defined(__KERNEL__) && !defined(_KERNEL)
++#include <stdint.h>
++#else
++#include <linux/kernel.h>
++#endif
++
++#include "ttm/ttm_placement_common.h"
++
++#define TTM_PLACEMENT_MAJOR 0
++#define TTM_PLACEMENT_MINOR 1
++#define TTM_PLACEMENT_PL 0
++#define TTM_PLACEMENT_DATE "080819"
++
++/**
++ * struct ttm_pl_create_req
++ *
++ * @size: The buffer object size.
++ * @placement: Flags that indicate initial acceptable
++ * placement.
++ * @page_alignment: Required alignment in pages.
++ *
++ * Input to the TTM_BO_CREATE ioctl.
++ */
++
++struct ttm_pl_create_req {
++ uint64_t size;
++ uint32_t placement;
++ uint32_t page_alignment;
++};
++
++/**
++ * struct ttm_pl_create_ub_req
++ *
++ * @size: The buffer object size.
++ * @user_address: User-space address of the memory area that
++ * should be used to back the buffer object cast to 64-bit.
++ * @placement: Flags that indicate initial acceptable
++ * placement.
++ * @page_alignment: Required alignment in pages.
++ *
++ * Input to the TTM_BO_CREATE_UB ioctl.
++ */
++
++struct ttm_pl_create_ub_req {
++ uint64_t size;
++ uint64_t user_address;
++ uint32_t placement;
++ uint32_t page_alignment;
++};
++
++/**
++ * struct ttm_pl_rep
++ *
++ * @gpu_offset: The current offset into the memory region used.
++ * This can be used directly by the GPU if there are no
++ * additional GPU mapping procedures used by the driver.
++ *
++ * @bo_size: Actual buffer object size.
++ *
++ * @map_handle: Offset into the device address space.
++ * Used for map, seek, read, write. This will never change
++ * during the lifetime of an object.
++ *
++ * @placement: Flag indicating the placement status of
++ * the buffer object using the TTM_PL flags above.
++ *
++ * @sync_object_arg: Used for user-space synchronization and
++ * depends on the synchronization model used. If fences are
++ * used, this is the buffer_object::fence_type_mask
++ *
++ * Output from the TTM_PL_CREATE and TTM_PL_REFERENCE, and
++ * TTM_PL_SETSTATUS ioctls.
++ */
++
++struct ttm_pl_rep {
++ uint64_t gpu_offset;
++ uint64_t bo_size;
++ uint64_t map_handle;
++ uint32_t placement;
++ uint32_t handle;
++ uint32_t sync_object_arg;
++ uint32_t pad64;
++};
++
++/**
++ * struct ttm_pl_setstatus_req
++ *
++ * @set_placement: Placement flags to set.
++ *
++ * @clr_placement: Placement flags to clear.
++ *
++ * @handle: The object handle
++ *
++ * Input to the TTM_PL_SETSTATUS ioctl.
++ */
++
++struct ttm_pl_setstatus_req {
++ uint32_t set_placement;
++ uint32_t clr_placement;
++ uint32_t handle;
++ uint32_t pad64;
++};
++
++/**
++ * struct ttm_pl_reference_req
++ *
++ * @handle: The object to put a reference on.
++ *
++ * Input to the TTM_PL_REFERENCE and the TTM_PL_UNREFERENCE ioctls.
++ */
++
++struct ttm_pl_reference_req {
++ uint32_t handle;
++ uint32_t pad64;
++};
++
++/*
++ * ACCESS mode flags for SYNCCPU.
++ *
++ * TTM_SYNCCPU_MODE_READ will guarantee that the GPU is not
++ * writing to the buffer.
++ *
++ * TTM_SYNCCPU_MODE_WRITE will guarantee that the GPU is not
++ * accessing the buffer.
++ *
++ * TTM_SYNCCPU_MODE_NO_BLOCK makes sure the call does not wait
++ * for GPU accesses to finish but return -EBUSY.
++ *
++ * TTM_SYNCCPU_MODE_TRYCACHED Try to place the buffer in cacheable
++ * memory while synchronized for CPU.
++ */
++
++#define TTM_PL_SYNCCPU_MODE_READ TTM_ACCESS_READ
++#define TTM_PL_SYNCCPU_MODE_WRITE TTM_ACCESS_WRITE
++#define TTM_PL_SYNCCPU_MODE_NO_BLOCK (1 << 2)
++#define TTM_PL_SYNCCPU_MODE_TRYCACHED (1 << 3)
++
++/**
++ * struct ttm_pl_synccpu_arg
++ *
++ * @handle: The object to synchronize.
++ *
++ * @access_mode: access mode indicated by the
++ * TTM_SYNCCPU_MODE flags.
++ *
++ * @op: indicates whether to grab or release the
++ * buffer for cpu usage.
++ *
++ * Input to the TTM_PL_SYNCCPU ioctl.
++ */
++
++struct ttm_pl_synccpu_arg {
++ uint32_t handle;
++ uint32_t access_mode;
++ enum {
++ TTM_PL_SYNCCPU_OP_GRAB,
++ TTM_PL_SYNCCPU_OP_RELEASE
++ } op;
++ uint32_t pad64;
++};
++
++/*
++ * Waiting mode flags for the TTM_BO_WAITIDLE ioctl.
++ *
++ * TTM_WAITIDLE_MODE_LAZY: Allow for sleeps during polling
++ * wait.
++ *
++ * TTM_WAITIDLE_MODE_NO_BLOCK: Don't block waiting for GPU,
++ * but return -EBUSY if the buffer is busy.
++ */
++
++#define TTM_PL_WAITIDLE_MODE_LAZY (1 << 0)
++#define TTM_PL_WAITIDLE_MODE_NO_BLOCK (1 << 1)
++
++/**
++ * struct ttm_waitidle_arg
++ *
++ * @handle: The object to synchronize.
++ *
++ * @mode: wait mode indicated by the
++ * TTM_SYNCCPU_MODE flags.
++ *
++ * Argument to the TTM_BO_WAITIDLE ioctl.
++ */
++
++struct ttm_pl_waitidle_arg {
++ uint32_t handle;
++ uint32_t mode;
++};
++
++union ttm_pl_create_arg {
++ struct ttm_pl_create_req req;
++ struct ttm_pl_rep rep;
++};
++
++union ttm_pl_reference_arg {
++ struct ttm_pl_reference_req req;
++ struct ttm_pl_rep rep;
++};
++
++union ttm_pl_setstatus_arg {
++ struct ttm_pl_setstatus_req req;
++ struct ttm_pl_rep rep;
++};
++
++union ttm_pl_create_ub_arg {
++ struct ttm_pl_create_ub_req req;
++ struct ttm_pl_rep rep;
++};
++
++/*
++ * Ioctl offsets.
++ */
++
++#define TTM_PL_CREATE 0x00
++#define TTM_PL_REFERENCE 0x01
++#define TTM_PL_UNREF 0x02
++#define TTM_PL_SYNCCPU 0x03
++#define TTM_PL_WAITIDLE 0x04
++#define TTM_PL_SETSTATUS 0x05
++#define TTM_PL_CREATE_UB 0x06
++
++#endif
+diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_regman.h b/drivers/gpu/drm/psb/ttm/ttm_regman.h
+--- a/drivers/gpu/drm/psb/ttm/ttm_regman.h 1969-12-31 16:00:00.000000000 -0800
++++ b/drivers/gpu/drm/psb/ttm/ttm_regman.h 2009-04-07 13:28:38.000000000 -0700
+@@ -0,0 +1,74 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ */
++
++#ifndef _TTM_REGMAN_H_
++#define _TTM_REGMAN_H_
++
++#include <linux/list.h>
++
++struct ttm_fence_object;
++
++struct ttm_reg {
++ struct list_head head;
++ struct ttm_fence_object *fence;
++ uint32_t fence_type;
++ uint32_t new_fence_type;
++};
++
++struct ttm_reg_manager {
++ struct list_head free;
++ struct list_head lru;
++ struct list_head unfenced;
++
++ int (*reg_reusable)(const struct ttm_reg *reg, const void *data);
++ void (*reg_destroy)(struct ttm_reg *reg);
++};
++
++extern int ttm_regs_alloc(struct ttm_reg_manager *manager,
++ const void *data,
++ uint32_t fence_class,
++ uint32_t fence_type,
++ int interruptible,
++ int no_wait,
++ struct ttm_reg **reg);
++
++extern void ttm_regs_fence(struct ttm_reg_manager *regs,
++ struct ttm_fence_object *fence);
++
++extern void ttm_regs_free(struct ttm_reg_manager *manager);
++extern void ttm_regs_add(struct ttm_reg_manager *manager, struct ttm_reg *reg);
++extern void ttm_regs_init(struct ttm_reg_manager *manager,
++ int (*reg_reusable)(const struct ttm_reg *,
++ const void *),
++ void (*reg_destroy)(struct ttm_reg *));
++
++#endif
+diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_tt.c b/drivers/gpu/drm/psb/ttm/ttm_tt.c
+--- a/drivers/gpu/drm/psb/ttm/ttm_tt.c 1969-12-31 16:00:00.000000000 -0800
++++ b/drivers/gpu/drm/psb/ttm/ttm_tt.c 2009-04-07 13:28:38.000000000 -0700
+@@ -0,0 +1,655 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
++ */
++
++#include <linux/version.h>
++#include <linux/vmalloc.h>
++#include <linux/sched.h>
++#include <linux/highmem.h>
++#include <linux/pagemap.h>
++#include <linux/file.h>
++#include <linux/swap.h>
++#include "ttm/ttm_bo_driver.h"
++#include "ttm/ttm_placement_common.h"
++
++static int ttm_tt_swapin(struct ttm_tt *ttm);
++
++#if defined( CONFIG_X86 )
++static void ttm_tt_clflush_page(struct page *page)
++{
++ uint8_t *page_virtual;
++ unsigned int i;
++
++ if (unlikely(page == NULL))
++ return;
++
++ page_virtual = kmap_atomic(page, KM_USER0);
++
++ for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
++ clflush(page_virtual + i);
++
++ kunmap_atomic(page_virtual, KM_USER0);
++}
++
++static void ttm_tt_cache_flush_clflush(struct page *pages[],
++ unsigned long num_pages)
++{
++ unsigned long i;
++
++ mb();
++ for (i = 0; i < num_pages; ++i)
++ ttm_tt_clflush_page(*pages++);
++ mb();
++}
++#else
++static void ttm_tt_ipi_handler(void *null)
++{
++ ;
++}
++#endif
++
++void ttm_tt_cache_flush(struct page *pages[], unsigned long num_pages)
++{
++
++#if defined( CONFIG_X86 )
++ if (cpu_has_clflush) {
++ ttm_tt_cache_flush_clflush(pages, num_pages);
++ return;
++ }
++#else
++ if (on_each_cpu(ttm_tt_ipi_handler, NULL, 1, 1) != 0)
++ printk(KERN_ERR "Timed out waiting for drm cache flush.\n");
++#endif
++}
++
++/**
++ * Allocates storage for pointers to the pages that back the ttm.
++ *
++ * Uses kmalloc if possible. Otherwise falls back to vmalloc.
++ */
++static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
++{
++ unsigned long size = ttm->num_pages * sizeof(*ttm->pages);
++ ttm->pages = NULL;
++
++ if (size <= PAGE_SIZE)
++ ttm->pages = kzalloc(size, GFP_KERNEL);
++
++ if (!ttm->pages) {
++ ttm->pages = vmalloc_user(size);
++ if (ttm->pages)
++ ttm->page_flags |= TTM_PAGE_FLAG_VMALLOC;
++ }
++}
++
++static void ttm_tt_free_page_directory(struct ttm_tt *ttm)
++{
++ if (ttm->page_flags & TTM_PAGE_FLAG_VMALLOC) {
++ vfree(ttm->pages);
++ ttm->page_flags &= ~TTM_PAGE_FLAG_VMALLOC;
++ } else {
++ kfree(ttm->pages);
++ }
++ ttm->pages = NULL;
++}
++
++static struct page *ttm_tt_alloc_page(void)
++{
++ return alloc_page(GFP_HIGHUSER | __GFP_ZERO);
++}
++
++static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
++{
++ int write;
++ int dirty;
++ struct page *page;
++ int i;
++ struct ttm_backend *be = ttm->be;
++
++ BUG_ON(!(ttm->page_flags & TTM_PAGE_FLAG_USER));
++ write = ((ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0);
++ dirty = ((ttm->page_flags & TTM_PAGE_FLAG_USER_DIRTY) != 0);
++
++ if (be)
++ be->func->clear(be);
++
++ for (i = 0; i < ttm->num_pages; ++i) {
++ page = ttm->pages[i];
++ if (page == NULL)
++ continue;
++
++ if (page == ttm->dummy_read_page) {
++ BUG_ON(write);
++ continue;
++ }
++
++ if (write && dirty && !PageReserved(page))
++ set_page_dirty_lock(page);
++
++ ttm->pages[i] = NULL;
++ ttm_mem_global_free(ttm->bdev->mem_glob, PAGE_SIZE, false);
++ put_page(page);
++ }
++ ttm->state = tt_unpopulated;
++ ttm->first_himem_page = ttm->num_pages;
++ ttm->last_lomem_page = -1;
++}
++
++static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index)
++{
++ struct page *p;
++ struct ttm_bo_device *bdev = ttm->bdev;
++ struct ttm_mem_global *mem_glob = bdev->mem_glob;
++ int ret;
++
++ while (NULL == (p = ttm->pages[index])) {
++ p = ttm_tt_alloc_page();
++
++ if (!p)
++ return NULL;
++
++ if (PageHighMem(p)) {
++ ret =
++ ttm_mem_global_alloc(mem_glob, PAGE_SIZE, false, false, true);
++ if (unlikely(ret != 0))
++ goto out_err;
++ ttm->pages[--ttm->first_himem_page] = p;
++ } else {
++ ret =
++ ttm_mem_global_alloc(mem_glob, PAGE_SIZE, false, false, false);
++ if (unlikely(ret != 0))
++ goto out_err;
++ ttm->pages[++ttm->last_lomem_page] = p;
++ }
++ }
++ return p;
++ out_err:
++ put_page(p);
++ return NULL;
++}
++
++struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index)
++{
++ int ret;
++
++ if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
++ ret = ttm_tt_swapin(ttm);
++ if (unlikely(ret != 0))
++ return NULL;
++ }
++ return __ttm_tt_get_page(ttm, index);
++}
++
++int ttm_tt_populate(struct ttm_tt *ttm)
++{
++ struct page *page;
++ unsigned long i;
++ struct ttm_backend *be;
++ int ret;
++
++ if (ttm->state != tt_unpopulated)
++ return 0;
++
++ if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
++ ret = ttm_tt_swapin(ttm);
++ if (unlikely(ret != 0))
++ return ret;
++ }
++
++ be = ttm->be;
++
++ for (i = 0; i < ttm->num_pages; ++i) {
++ page = __ttm_tt_get_page(ttm, i);
++ if (!page)
++ return -ENOMEM;
++ }
++
++ be->func->populate(be, ttm->num_pages, ttm->pages,
++ ttm->dummy_read_page);
++ ttm->state = tt_unbound;
++ return 0;
++}
++
++#ifdef CONFIG_X86
++static inline int ttm_tt_set_page_caching(struct page *p,
++ enum ttm_caching_state c_state)
++{
++ if (PageHighMem(p))
++ return 0;
++
++ switch (c_state) {
++ case tt_cached:
++ return set_pages_wb(p, 1);
++ case tt_wc:
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27))
++ return set_memory_wc((unsigned long) page_address(p), 1);
++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)) */
++ default:
++ return set_pages_uc(p, 1);
++ }
++}
++#else /* CONFIG_X86 */
++static inline int ttm_tt_set_page_caching(struct page *p,
++ enum ttm_caching_state c_state)
++{
++ return 0;
++}
++#endif /* CONFIG_X86 */
++
++/*
++ * Change caching policy for the linear kernel map
++ * for range of pages in a ttm.
++ */
++
++static int ttm_tt_set_caching(struct ttm_tt *ttm,
++ enum ttm_caching_state c_state)
++{
++ int i, j;
++ struct page *cur_page;
++ int ret;
++
++ if (ttm->caching_state == c_state)
++ return 0;
++
++ if (c_state != tt_cached) {
++ ret = ttm_tt_populate(ttm);
++ if (unlikely(ret != 0))
++ return ret;
++ }
++
++ if (ttm->caching_state == tt_cached)
++ ttm_tt_cache_flush(ttm->pages, ttm->num_pages);
++
++ for (i = 0; i < ttm->num_pages; ++i) {
++ cur_page = ttm->pages[i];
++ if (likely(cur_page != NULL)) {
++ ret = ttm_tt_set_page_caching(cur_page, c_state);
++ if (unlikely(ret != 0))
++ goto out_err;
++ }
++ }
++
++ ttm->caching_state = c_state;
++
++ return 0;
++
++ out_err:
++ for (j = 0; j < i; ++j) {
++ cur_page = ttm->pages[j];
++ if (likely(cur_page != NULL)) {
++ (void)ttm_tt_set_page_caching(cur_page,
++ ttm->caching_state);
++ }
++ }
++
++ return ret;
++}
++
++int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
++{
++ enum ttm_caching_state state;
++
++ if (placement & TTM_PL_FLAG_WC)
++ state = tt_wc;
++ else if (placement & TTM_PL_FLAG_UNCACHED)
++ state = tt_uncached;
++ else
++ state = tt_cached;
++
++ return ttm_tt_set_caching(ttm, state);
++}
++
++static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
++{
++ int i;
++ struct page *cur_page;
++ struct ttm_backend *be = ttm->be;
++
++ if (be)
++ be->func->clear(be);
++ (void)ttm_tt_set_caching(ttm, tt_cached);
++ for (i = 0; i < ttm->num_pages; ++i) {
++ cur_page = ttm->pages[i];
++ ttm->pages[i] = NULL;
++ if (cur_page) {
++ if (page_count(cur_page) != 1)
++ printk(KERN_ERR
++ "Erroneous page count. Leaking pages.\n");
++ ttm_mem_global_free(ttm->bdev->mem_glob, PAGE_SIZE,
++ PageHighMem(cur_page));
++ __free_page(cur_page);
++ }
++ }
++ ttm->state = tt_unpopulated;
++ ttm->first_himem_page = ttm->num_pages;
++ ttm->last_lomem_page = -1;
++}
++
++void ttm_tt_destroy(struct ttm_tt *ttm)
++{
++ struct ttm_backend *be;
++
++ if (unlikely(ttm == NULL))
++ return;
++
++ be = ttm->be;
++ if (likely(be != NULL)) {
++ be->func->destroy(be);
++ ttm->be = NULL;
++ }
++
++ if (likely(ttm->pages != NULL)) {
++ if (ttm->page_flags & TTM_PAGE_FLAG_USER)
++ ttm_tt_free_user_pages(ttm);
++ else
++ ttm_tt_free_alloced_pages(ttm);
++
++ ttm_tt_free_page_directory(ttm);
++ }
++
++ if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTANT_SWAP) &&
++ ttm->swap_storage)
++ fput(ttm->swap_storage);
++
++ kfree(ttm);
++}
++
++int ttm_tt_set_user(struct ttm_tt *ttm,
++ struct task_struct *tsk,
++ unsigned long start, unsigned long num_pages)
++{
++ struct mm_struct *mm = tsk->mm;
++ int ret;
++ int write = (ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0;
++ struct ttm_mem_global *mem_glob = ttm->bdev->mem_glob;
++
++ BUG_ON(num_pages != ttm->num_pages);
++ BUG_ON((ttm->page_flags & TTM_PAGE_FLAG_USER) == 0);
++
++ /**
++ * Account user pages as lowmem pages for now.
++ */
++
++ ret = ttm_mem_global_alloc(mem_glob, num_pages * PAGE_SIZE, false, false, false);
++ if (unlikely(ret != 0))
++ return ret;
++
++ down_read(&mm->mmap_sem);
++ ret = get_user_pages(tsk, mm, start, num_pages,
++ write, 0, ttm->pages, NULL);
++ up_read(&mm->mmap_sem);
++
++ if (ret != num_pages && write) {
++ ttm_tt_free_user_pages(ttm);
++ ttm_mem_global_free(mem_glob, num_pages * PAGE_SIZE, false);
++ return -ENOMEM;
++ }
++
++ ttm->tsk = tsk;
++ ttm->start = start;
++ ttm->state = tt_unbound;
++
++ return 0;
++}
++
++struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
++ uint32_t page_flags, struct page *dummy_read_page)
++{
++ struct ttm_bo_driver *bo_driver = bdev->driver;
++ struct ttm_tt *ttm;
++
++ if (!bo_driver)
++ return NULL;
++
++ ttm = kzalloc(sizeof(*ttm), GFP_KERNEL);
++ if (!ttm)
++ return NULL;
++
++ ttm->bdev = bdev;
++
++ ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
++ ttm->first_himem_page = ttm->num_pages;
++ ttm->last_lomem_page = -1;
++ ttm->caching_state = tt_cached;
++ ttm->page_flags = page_flags;
++
++ ttm->dummy_read_page = dummy_read_page;
++
++ ttm_tt_alloc_page_directory(ttm);
++ if (!ttm->pages) {
++ ttm_tt_destroy(ttm);
++ printk(KERN_ERR "Failed allocating page table\n");
++ return NULL;
++ }
++ ttm->be = bo_driver->create_ttm_backend_entry(bdev);
++ if (!ttm->be) {
++ ttm_tt_destroy(ttm);
++ printk(KERN_ERR "Failed creating ttm backend entry\n");
++ return NULL;
++ }
++ ttm->state = tt_unpopulated;
++ return ttm;
++}
++
++/**
++ * ttm_tt_unbind:
++ *
++ * @ttm: the object to unbind from the graphics device
++ *
++ * Unbind an object from the aperture. This removes the mappings
++ * from the graphics device and flushes caches if necessary.
++ */
++void ttm_tt_unbind(struct ttm_tt *ttm)
++{
++ int ret;
++ struct ttm_backend *be = ttm->be;
++
++ if (ttm->state == tt_bound) {
++ ret = be->func->unbind(be);
++ BUG_ON(ret);
++ }
++ ttm->state = tt_unbound;
++}
++
++/**
++ * ttm_tt_bind:
++ *
++ * @ttm: the ttm object to bind to the graphics device
++ *
++ * @bo_mem: the aperture memory region which will hold the object
++ *
++ * Bind a ttm object to the aperture. This ensures that the necessary
++ * pages are allocated, flushes CPU caches as needed and marks the
++ * ttm as DRM_TTM_PAGE_USER_DIRTY to indicate that it may have been
++ * modified by the GPU
++ */
++
++int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
++{
++ int ret = 0;
++ struct ttm_backend *be;
++
++ if (!ttm)
++ return -EINVAL;
++
++ if (ttm->state == tt_bound)
++ return 0;
++
++ be = ttm->be;
++
++ ret = ttm_tt_populate(ttm);
++ if (ret)
++ return ret;
++
++ ret = be->func->bind(be, bo_mem);
++ if (ret) {
++ printk(KERN_ERR "Couldn't bind backend.\n");
++ return ret;
++ }
++
++ ttm->state = tt_bound;
++
++ if (ttm->page_flags & TTM_PAGE_FLAG_USER)
++ ttm->page_flags |= TTM_PAGE_FLAG_USER_DIRTY;
++ return 0;
++}
++
++static int ttm_tt_swapin(struct ttm_tt *ttm)
++{
++ struct address_space *swap_space;
++ struct file *swap_storage;
++ struct page *from_page;
++ struct page *to_page;
++ void *from_virtual;
++ void *to_virtual;
++ int i;
++ int ret;
++
++ if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
++ ret = ttm_tt_set_user(ttm, ttm->tsk, ttm->start,
++ ttm->num_pages);
++ if (unlikely(ret != 0))
++ return ret;
++
++ ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
++ return 0;
++ }
++
++ swap_storage = ttm->swap_storage;
++ BUG_ON(swap_storage == NULL);
++
++ swap_space = swap_storage->f_path.dentry->d_inode->i_mapping;
++
++ for (i = 0; i < ttm->num_pages; ++i) {
++ from_page = read_mapping_page(swap_space, i, NULL);
++ if (IS_ERR(from_page))
++ goto out_err;
++ to_page = __ttm_tt_get_page(ttm, i);
++ if (unlikely(to_page == NULL))
++ goto out_err;
++
++ preempt_disable();
++ from_virtual = kmap_atomic(from_page, KM_USER0);
++ to_virtual = kmap_atomic(to_page, KM_USER1);
++ memcpy(to_virtual, from_virtual, PAGE_SIZE);
++ kunmap_atomic(to_virtual, KM_USER1);
++ kunmap_atomic(from_virtual, KM_USER0);
++ preempt_enable();
++ page_cache_release(from_page);
++ }
++
++ if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTANT_SWAP))
++ fput(swap_storage);
++ ttm->swap_storage = NULL;
++ ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
++
++ return 0;
++ out_err:
++ ttm_tt_free_alloced_pages(ttm);
++ return -ENOMEM;
++}
++
++int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage)
++{
++ struct address_space *swap_space;
++ struct file *swap_storage;
++ struct page *from_page;
++ struct page *to_page;
++ void *from_virtual;
++ void *to_virtual;
++ int i;
++
++ BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
++ BUG_ON(ttm->caching_state != tt_cached);
++
++ /*
++ * For user buffers, just unpin the pages, as there should be
++ * vma references.
++ */
++
++ if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
++ ttm_tt_free_user_pages(ttm);
++ ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
++ ttm->swap_storage = NULL;
++ return 0;
++ }
++
++ if (!persistant_swap_storage) {
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28))
++ swap_storage = shmem_file_setup("ttm swap",
++ ttm->num_pages << PAGE_SHIFT,
++ 0);
++ if (unlikely(IS_ERR(swap_storage))) {
++ printk(KERN_ERR "Failed allocating swap storage.\n");
++ return -ENOMEM;
++ }
++#else
++ return -ENOMEM;
++#endif
++ } else
++ swap_storage = persistant_swap_storage;
++
++ swap_space = swap_storage->f_path.dentry->d_inode->i_mapping;
++
++ for (i = 0; i < ttm->num_pages; ++i) {
++ from_page = ttm->pages[i];
++ if (unlikely(from_page == NULL))
++ continue;
++ to_page = read_mapping_page(swap_space, i, NULL);
++ if (unlikely(to_page == NULL))
++ goto out_err;
++
++ preempt_disable();
++ from_virtual = kmap_atomic(from_page, KM_USER0);
++ to_virtual = kmap_atomic(to_page, KM_USER1);
++ memcpy(to_virtual, from_virtual, PAGE_SIZE);
++ kunmap_atomic(to_virtual, KM_USER1);
++ kunmap_atomic(from_virtual, KM_USER0);
++ preempt_enable();
++ set_page_dirty(to_page);
++ mark_page_accessed(to_page);
++// unlock_page(to_page);
++ page_cache_release(to_page);
++ }
++
++ ttm_tt_free_alloced_pages(ttm);
++ ttm->swap_storage = swap_storage;
++ ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
++ if (persistant_swap_storage)
++ ttm->page_flags |= TTM_PAGE_FLAG_PERSISTANT_SWAP;
++
++ return 0;
++ out_err:
++ if (!persistant_swap_storage)
++ fput(swap_storage);
++
++ return -ENOMEM;
++}
+diff -uNr a/drivers/gpu/drm/psb/ttm/ttm_userobj_api.h b/drivers/gpu/drm/psb/ttm/ttm_userobj_api.h
+--- a/drivers/gpu/drm/psb/ttm/ttm_userobj_api.h 1969-12-31 16:00:00.000000000 -0800
++++ b/drivers/gpu/drm/psb/ttm/ttm_userobj_api.h 2009-04-07 13:28:38.000000000 -0700
+@@ -0,0 +1,79 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ */
++
++#ifndef _TTM_USEROBJ_API_H_
++#define _TTM_USEROBJ_API_H_
++
++#include "ttm/ttm_placement_user.h"
++#include "ttm/ttm_fence_user.h"
++#include "ttm/ttm_object.h"
++#include "ttm/ttm_fence_api.h"
++#include "ttm/ttm_bo_api.h"
++
++struct ttm_lock;
++
++/*
++ * User ioctls.
++ */
++
++extern int ttm_pl_create_ioctl(struct ttm_object_file *tfile,
++ struct ttm_bo_device *bdev,
++ struct ttm_lock *lock, void *data);
++extern int ttm_pl_ub_create_ioctl(struct ttm_object_file *tfile,
++ struct ttm_bo_device *bdev,
++ struct ttm_lock *lock, void *data);
++extern int ttm_pl_reference_ioctl(struct ttm_object_file *tfile, void *data);
++extern int ttm_pl_unref_ioctl(struct ttm_object_file *tfile, void *data);
++extern int ttm_pl_synccpu_ioctl(struct ttm_object_file *tfile, void *data);
++extern int ttm_pl_setstatus_ioctl(struct ttm_object_file *tfile,
++ struct ttm_lock *lock, void *data);
++extern int ttm_pl_waitidle_ioctl(struct ttm_object_file *tfile, void *data);
++extern int ttm_fence_signaled_ioctl(struct ttm_object_file *tfile, void *data);
++extern int ttm_fence_finish_ioctl(struct ttm_object_file *tfile, void *data);
++extern int ttm_fence_unref_ioctl(struct ttm_object_file *tfile, void *data);
++
++extern int
++ttm_fence_user_create(struct ttm_fence_device *fdev,
++ struct ttm_object_file *tfile,
++ uint32_t fence_class,
++ uint32_t fence_types,
++ uint32_t create_flags,
++ struct ttm_fence_object **fence, uint32_t * user_handle);
++
++extern struct ttm_buffer_object *ttm_buffer_object_lookup(struct ttm_object_file
++ *tfile,
++ uint32_t handle);
++
++extern int
++ttm_pl_verify_access(struct ttm_buffer_object *bo,
++ struct ttm_object_file *tfile);
++#endif
+diff -uNr a/include/drm/drm_compat.h b/include/drm/drm_compat.h
+--- a/include/drm/drm_compat.h 1969-12-31 16:00:00.000000000 -0800
++++ b/include/drm/drm_compat.h 2009-04-07 13:28:38.000000000 -0700
+@@ -0,0 +1,238 @@
++/**
++ * \file drm_compat.h
++ * Backward compatability definitions for Direct Rendering Manager
++ *
++ * \author Rickard E. (Rik) Faith <faith@valinux.com>
++ * \author Gareth Hughes <gareth@valinux.com>
++ */
++
++/*
++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * All rights reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#ifndef _DRM_COMPAT_H_
++#define _DRM_COMPAT_H_
++
++#ifndef minor
++#define minor(x) MINOR((x))
++#endif
++
++#ifndef MODULE_LICENSE
++#define MODULE_LICENSE(x)
++#endif
++
++#ifndef preempt_disable
++#define preempt_disable()
++#define preempt_enable()
++#endif
++
++#ifndef pte_offset_map
++#define pte_offset_map pte_offset
++#define pte_unmap(pte)
++#endif
++
++#ifndef module_param
++#define module_param(name, type, perm)
++#endif
++
++/* older kernels had different irq args */
++
++#ifndef list_for_each_safe
++#define list_for_each_safe(pos, n, head) \
++ for (pos = (head)->next, n = pos->next; pos != (head); \
++ pos = n, n = pos->next)
++#endif
++
++#ifndef list_for_each_entry
++#define list_for_each_entry(pos, head, member) \
++ for (pos = list_entry((head)->next, typeof(*pos), member), \
++ prefetch(pos->member.next); \
++ &pos->member != (head); \
++ pos = list_entry(pos->member.next, typeof(*pos), member), \
++ prefetch(pos->member.next))
++#endif
++
++#ifndef list_for_each_entry_safe
++#define list_for_each_entry_safe(pos, n, head, member) \
++ for (pos = list_entry((head)->next, typeof(*pos), member), \
++ n = list_entry(pos->member.next, typeof(*pos), member); \
++ &pos->member != (head); \
++ pos = n, n = list_entry(n->member.next, typeof(*n), member))
++#endif
++
++#ifndef __user
++#define __user
++#endif
++
++#if !defined(__put_page)
++#define __put_page(p) atomic_dec(&(p)->count)
++#endif
++
++#if !defined(__GFP_COMP)
++#define __GFP_COMP 0
++#endif
++
++#if !defined(IRQF_SHARED)
++#define IRQF_SHARED SA_SHIRQ
++#endif
++
++
++
++#ifndef DEFINE_SPINLOCK
++#define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED
++#endif
++
++/* old architectures */
++#ifdef __AMD64__
++#define __x86_64__
++#endif
++
++/* sysfs __ATTR macro */
++#ifndef __ATTR
++#define __ATTR(_name,_mode,_show,_store) { \
++ .attr = {.name = __stringify(_name), .mode = _mode, .owner = THIS_MODULE }, \
++ .show = _show, \
++ .store = _store, \
++}
++#endif
++
++
++#ifndef list_for_each_entry_safe_reverse
++#define list_for_each_entry_safe_reverse(pos, n, head, member) \
++ for (pos = list_entry((head)->prev, typeof(*pos), member), \
++ n = list_entry(pos->member.prev, typeof(*pos), member); \
++ &pos->member != (head); \
++ pos = n, n = list_entry(n->member.prev, typeof(*n), member))
++#endif
++
++#include <linux/mm.h>
++#include <asm/page.h>
++
++
++#define DRM_FULL_MM_COMPAT
++
++
++/*
++ * Flush relevant caches and clear a VMA structure so that page references
++ * will cause a page fault. Don't flush tlbs.
++ */
++
++extern void drm_clear_vma(struct vm_area_struct *vma,
++ unsigned long addr, unsigned long end);
++
++/*
++ * Return the PTE protection map entries for the VMA flags given by
++ * flags. This is a functional interface to the kernel's protection map.
++ */
++
++extern pgprot_t vm_get_page_prot(unsigned long vm_flags);
++
++#ifndef GFP_DMA32
++#define GFP_DMA32 GFP_KERNEL
++#endif
++#ifndef __GFP_DMA32
++#define __GFP_DMA32 GFP_KERNEL
++#endif
++
++#if defined(CONFIG_X86) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
++
++/*
++ * These are too slow in earlier kernels.
++ */
++
++extern int drm_unmap_page_from_agp(struct page *page);
++extern int drm_map_page_into_agp(struct page *page);
++
++#define map_page_into_agp drm_map_page_into_agp
++#define unmap_page_from_agp drm_unmap_page_from_agp
++#endif
++
++
++
++
++
++/* fixme when functions are upstreamed - upstreamed for 2.6.23 */
++#ifdef DRM_IDR_COMPAT_FN
++int idr_for_each(struct idr *idp,
++ int (*fn)(int id, void *p, void *data), void *data);
++void idr_remove_all(struct idr *idp);
++#endif
++
++
++
++
++
++
++#ifndef PM_EVENT_PRETHAW
++#define PM_EVENT_PRETHAW 3
++#endif
++
++
++#if (defined(CONFIG_X86) && defined(CONFIG_X86_32) && defined(CONFIG_HIGHMEM) && \
++ (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)))
++#define DRM_KMAP_ATOMIC_PROT_PFN
++extern void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type,
++ pgprot_t protection);
++#endif
++
++#if !defined(flush_agp_mappings)
++#define flush_agp_mappings() do {} while(0)
++#endif
++
++#ifndef DMA_BIT_MASK
++#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : (1ULL<<(n)) - 1)
++#endif
++
++#ifndef VM_CAN_NONLINEAR
++#define DRM_VM_NOPAGE 1
++#endif
++
++#ifdef DRM_VM_NOPAGE
++
++extern struct page *drm_vm_nopage(struct vm_area_struct *vma,
++ unsigned long address, int *type);
++
++extern struct page *drm_vm_shm_nopage(struct vm_area_struct *vma,
++ unsigned long address, int *type);
++
++extern struct page *drm_vm_dma_nopage(struct vm_area_struct *vma,
++ unsigned long address, int *type);
++
++extern struct page *drm_vm_sg_nopage(struct vm_area_struct *vma,
++ unsigned long address, int *type);
++#endif
++
++#define drm_on_each_cpu(handler, data, wait) \
++ on_each_cpu(handler, data, wait)
++
++
++#ifndef OS_HAS_GEM
++#define OS_HAS_GEM 1
++#endif
++
++#ifndef current_euid
++#define current_euid() (current->euid)
++#endif
++
++#endif
+diff -uNr a/include/drm/drm_internal.h b/include/drm/drm_internal.h
+--- a/include/drm/drm_internal.h 1969-12-31 16:00:00.000000000 -0800
++++ b/include/drm/drm_internal.h 2009-04-07 13:28:38.000000000 -0700
+@@ -0,0 +1,40 @@
++/*
++ * Copyright 2007 Red Hat, Inc
++ * All rights reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++/* This header file holds function prototypes and data types that are
++ * internal to the drm (not exported to user space) but shared across
++ * drivers and platforms */
++
++#ifndef __DRM_INTERNAL_H__
++#define __DRM_INTERNAL_H__
++
++/**
++ * Drawable information.
++ */
++struct drm_drawable_info {
++ unsigned int num_rects;
++ struct drm_clip_rect *rects;
++};
++
++#endif
+diff -uNr a/include/drm/ttm/ttm_fence_user.h b/include/drm/ttm/ttm_fence_user.h
+--- a/include/drm/ttm/ttm_fence_user.h 1969-12-31 16:00:00.000000000 -0800
++++ b/include/drm/ttm/ttm_fence_user.h 2009-04-07 13:28:38.000000000 -0700
+@@ -0,0 +1,147 @@
++/**************************************************************************
++ *
++ * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ **************************************************************************/
++/*
++ * Authors
++ * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
++ */
++
++#ifndef TTM_FENCE_USER_H
++#define TTM_FENCE_USER_H
++
++#if !defined(__KERNEL__) && !defined(_KERNEL)
++#include <stdint.h>
++#endif
++
++#define TTM_FENCE_MAJOR 0
++#define TTM_FENCE_MINOR 1
++#define TTM_FENCE_PL 0
++#define TTM_FENCE_DATE "080819"
++
++/**
++ * struct ttm_fence_signaled_req
++ *
++ * @handle: Handle to the fence object. Input.
++ *
++ * @fence_type: Fence types we want to flush. Input.
++ *
++ * @flush: Boolean. Flush the indicated fence_types. Input.
++ *
++ * Argument to the TTM_FENCE_SIGNALED ioctl.
++ */
++
++struct ttm_fence_signaled_req {
++ uint32_t handle;
++ uint32_t fence_type;
++ int32_t flush;
++ uint32_t pad64;
++};
++
++/**
++ * struct ttm_fence_rep
++ *
++ * @signaled_types: Fence type that has signaled.
++ *
++ * @fence_error: Command execution error.
++ * Hardware errors that are consequences of the execution
++ * of the command stream preceding the fence are reported
++ * here.
++ *
++ * Output argument to the TTM_FENCE_SIGNALED and
++ * TTM_FENCE_FINISH ioctls.
++ */
++
++struct ttm_fence_rep {
++ uint32_t signaled_types;
++ uint32_t fence_error;
++};
++
++union ttm_fence_signaled_arg {
++ struct ttm_fence_signaled_req req;
++ struct ttm_fence_rep rep;
++};
++
++/*
++ * Waiting mode flags for the TTM_FENCE_FINISH ioctl.
++ *
++ * TTM_FENCE_FINISH_MODE_LAZY: Allow for sleeps during polling
++ * wait.
++ *
++ * TTM_FENCE_FINISH_MODE_NO_BLOCK: Don't block waiting for GPU,
++ * but return -EBUSY if the buffer is busy.
++ */
++
++#define TTM_FENCE_FINISH_MODE_LAZY (1 << 0)
++#define TTM_FENCE_FINISH_MODE_NO_BLOCK (1 << 1)
++
++/**
++ * struct ttm_fence_finish_req
++ *
++ * @handle: Handle to the fence object. Input.
++ *
++ * @fence_type: Fence types we want to finish.
++ *
++ * @mode: Wait mode.
++ *
++ * Input to the TTM_FENCE_FINISH ioctl.
++ */
++
++struct ttm_fence_finish_req {
++ uint32_t handle;
++ uint32_t fence_type;
++ uint32_t mode;
++ uint32_t pad64;
++};
++
++union ttm_fence_finish_arg {
++ struct ttm_fence_finish_req req;
++ struct ttm_fence_rep rep;
++};
++
++/**
++ * struct ttm_fence_unref_arg
++ *
++ * @handle: Handle to the fence object.
++ *
++ * Argument to the TTM_FENCE_UNREF ioctl.
++ */
++
++struct ttm_fence_unref_arg {
++ uint32_t handle;
++ uint32_t pad64;
++};
++
++/*
++ * Ioctl offsets frome extenstion start.
++ */
++
++#define TTM_FENCE_SIGNALED 0x01
++#define TTM_FENCE_FINISH 0x02
++#define TTM_FENCE_UNREF 0x03
++
++#endif
+diff -uNr a/include/drm/ttm/ttm_placement_common.h b/include/drm/ttm/ttm_placement_common.h
+--- a/include/drm/ttm/ttm_placement_common.h 1969-12-31 16:00:00.000000000 -0800
++++ b/include/drm/ttm/ttm_placement_common.h 2009-04-07 13:28:38.000000000 -0700
+@@ -0,0 +1,96 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ */
++
++#ifndef _TTM_PL_COMMON_H_
++#define _TTM_PL_COMMON_H_
++/*
++ * Memory regions for data placement.
++ */
++
++#define TTM_PL_SYSTEM 0
++#define TTM_PL_TT 1
++#define TTM_PL_VRAM 2
++#define TTM_PL_PRIV0 3
++#define TTM_PL_PRIV1 4
++#define TTM_PL_PRIV2 5
++#define TTM_PL_PRIV3 6
++#define TTM_PL_PRIV4 7
++#define TTM_PL_PRIV5 8
++#define TTM_PL_CI 9
++#define TTM_PL_SWAPPED 15
++
++#define TTM_PL_FLAG_SYSTEM (1 << TTM_PL_SYSTEM)
++#define TTM_PL_FLAG_TT (1 << TTM_PL_TT)
++#define TTM_PL_FLAG_VRAM (1 << TTM_PL_VRAM)
++#define TTM_PL_FLAG_PRIV0 (1 << TTM_PL_PRIV0)
++#define TTM_PL_FLAG_PRIV1 (1 << TTM_PL_PRIV1)
++#define TTM_PL_FLAG_PRIV2 (1 << TTM_PL_PRIV2)
++#define TTM_PL_FLAG_PRIV3 (1 << TTM_PL_PRIV3)
++#define TTM_PL_FLAG_PRIV4 (1 << TTM_PL_PRIV4)
++#define TTM_PL_FLAG_PRIV5 (1 << TTM_PL_PRIV5)
++#define TTM_PL_FLAG_CI (1 << TTM_PL_CI)
++#define TTM_PL_FLAG_SWAPPED (1 << TTM_PL_SWAPPED)
++#define TTM_PL_MASK_MEM 0x0000FFFF
++
++/*
++ * Other flags that affects data placement.
++ * TTM_PL_FLAG_CACHED indicates cache-coherent mappings
++ * if available.
++ * TTM_PL_FLAG_SHARED means that another application may
++ * reference the buffer.
++ * TTM_PL_FLAG_NO_EVICT means that the buffer may never
++ * be evicted to make room for other buffers.
++ */
++
++#define TTM_PL_FLAG_CACHED (1 << 16)
++#define TTM_PL_FLAG_UNCACHED (1 << 17)
++#define TTM_PL_FLAG_WC (1 << 18)
++#define TTM_PL_FLAG_SHARED (1 << 20)
++#define TTM_PL_FLAG_NO_EVICT (1 << 21)
++
++#define TTM_PL_MASK_CACHING (TTM_PL_FLAG_CACHED | \
++ TTM_PL_FLAG_UNCACHED | \
++ TTM_PL_FLAG_WC)
++
++#define TTM_PL_MASK_MEMTYPE (TTM_PL_MASK_MEM | TTM_PL_MASK_CACHING)
++
++/*
++ * Access flags to be used for CPU- and GPU- mappings.
++ * The idea is that the TTM synchronization mechanism will
++ * allow concurrent READ access and exclusive write access.
++ * Currently GPU- and CPU accesses are exclusive.
++ */
++
++#define TTM_ACCESS_READ (1 << 0)
++#define TTM_ACCESS_WRITE (1 << 1)
++
++#endif
+diff -uNr a/include/drm/ttm/ttm_placement_user.h b/include/drm/ttm/ttm_placement_user.h
+--- a/include/drm/ttm/ttm_placement_user.h 1969-12-31 16:00:00.000000000 -0800
++++ b/include/drm/ttm/ttm_placement_user.h 2009-04-07 13:28:38.000000000 -0700
+@@ -0,0 +1,259 @@
++/**************************************************************************
++ *
++ * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ **************************************************************************/
++/*
++ * Authors
++ * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
++ */
++
++#ifndef _TTM_PLACEMENT_USER_H_
++#define _TTM_PLACEMENT_USER_H_
++
++#if !defined(__KERNEL__) && !defined(_KERNEL)
++#include <stdint.h>
++#else
++#include <linux/kernel.h>
++#endif
++
++#include "ttm/ttm_placement_common.h"
++
++#define TTM_PLACEMENT_MAJOR 0
++#define TTM_PLACEMENT_MINOR 1
++#define TTM_PLACEMENT_PL 0
++#define TTM_PLACEMENT_DATE "080819"
++
++/**
++ * struct ttm_pl_create_req
++ *
++ * @size: The buffer object size.
++ * @placement: Flags that indicate initial acceptable
++ * placement.
++ * @page_alignment: Required alignment in pages.
++ *
++ * Input to the TTM_BO_CREATE ioctl.
++ */
++
++struct ttm_pl_create_req {
++ uint64_t size;
++ uint32_t placement;
++ uint32_t page_alignment;
++};
++
++/**
++ * struct ttm_pl_create_ub_req
++ *
++ * @size: The buffer object size.
++ * @user_address: User-space address of the memory area that
++ * should be used to back the buffer object cast to 64-bit.
++ * @placement: Flags that indicate initial acceptable
++ * placement.
++ * @page_alignment: Required alignment in pages.
++ *
++ * Input to the TTM_BO_CREATE_UB ioctl.
++ */
++
++struct ttm_pl_create_ub_req {
++ uint64_t size;
++ uint64_t user_address;
++ uint32_t placement;
++ uint32_t page_alignment;
++};
++
++/**
++ * struct ttm_pl_rep
++ *
++ * @gpu_offset: The current offset into the memory region used.
++ * This can be used directly by the GPU if there are no
++ * additional GPU mapping procedures used by the driver.
++ *
++ * @bo_size: Actual buffer object size.
++ *
++ * @map_handle: Offset into the device address space.
++ * Used for map, seek, read, write. This will never change
++ * during the lifetime of an object.
++ *
++ * @placement: Flag indicating the placement status of
++ * the buffer object using the TTM_PL flags above.
++ *
++ * @sync_object_arg: Used for user-space synchronization and
++ * depends on the synchronization model used. If fences are
++ * used, this is the buffer_object::fence_type_mask
++ *
++ * Output from the TTM_PL_CREATE and TTM_PL_REFERENCE, and
++ * TTM_PL_SETSTATUS ioctls.
++ */
++
++struct ttm_pl_rep {
++ uint64_t gpu_offset;
++ uint64_t bo_size;
++ uint64_t map_handle;
++ uint32_t placement;
++ uint32_t handle;
++ uint32_t sync_object_arg;
++ uint32_t pad64;
++};
++
++/**
++ * struct ttm_pl_setstatus_req
++ *
++ * @set_placement: Placement flags to set.
++ *
++ * @clr_placement: Placement flags to clear.
++ *
++ * @handle: The object handle
++ *
++ * Input to the TTM_PL_SETSTATUS ioctl.
++ */
++
++struct ttm_pl_setstatus_req {
++ uint32_t set_placement;
++ uint32_t clr_placement;
++ uint32_t handle;
++ uint32_t pad64;
++};
++
++/**
++ * struct ttm_pl_reference_req
++ *
++ * @handle: The object to put a reference on.
++ *
++ * Input to the TTM_PL_REFERENCE and the TTM_PL_UNREFERENCE ioctls.
++ */
++
++struct ttm_pl_reference_req {
++ uint32_t handle;
++ uint32_t pad64;
++};
++
++/*
++ * ACCESS mode flags for SYNCCPU.
++ *
++ * TTM_SYNCCPU_MODE_READ will guarantee that the GPU is not
++ * writing to the buffer.
++ *
++ * TTM_SYNCCPU_MODE_WRITE will guarantee that the GPU is not
++ * accessing the buffer.
++ *
++ * TTM_SYNCCPU_MODE_NO_BLOCK makes sure the call does not wait
++ * for GPU accesses to finish but return -EBUSY.
++ *
++ * TTM_SYNCCPU_MODE_TRYCACHED Try to place the buffer in cacheable
++ * memory while synchronized for CPU.
++ */
++
++#define TTM_PL_SYNCCPU_MODE_READ TTM_ACCESS_READ
++#define TTM_PL_SYNCCPU_MODE_WRITE TTM_ACCESS_WRITE
++#define TTM_PL_SYNCCPU_MODE_NO_BLOCK (1 << 2)
++#define TTM_PL_SYNCCPU_MODE_TRYCACHED (1 << 3)
++
++/**
++ * struct ttm_pl_synccpu_arg
++ *
++ * @handle: The object to synchronize.
++ *
++ * @access_mode: access mode indicated by the
++ * TTM_SYNCCPU_MODE flags.
++ *
++ * @op: indicates whether to grab or release the
++ * buffer for cpu usage.
++ *
++ * Input to the TTM_PL_SYNCCPU ioctl.
++ */
++
++struct ttm_pl_synccpu_arg {
++ uint32_t handle;
++ uint32_t access_mode;
++ enum {
++ TTM_PL_SYNCCPU_OP_GRAB,
++ TTM_PL_SYNCCPU_OP_RELEASE
++ } op;
++ uint32_t pad64;
++};
++
++/*
++ * Waiting mode flags for the TTM_BO_WAITIDLE ioctl.
++ *
++ * TTM_WAITIDLE_MODE_LAZY: Allow for sleeps during polling
++ * wait.
++ *
++ * TTM_WAITIDLE_MODE_NO_BLOCK: Don't block waiting for GPU,
++ * but return -EBUSY if the buffer is busy.
++ */
++
++#define TTM_PL_WAITIDLE_MODE_LAZY (1 << 0)
++#define TTM_PL_WAITIDLE_MODE_NO_BLOCK (1 << 1)
++
++/**
++ * struct ttm_waitidle_arg
++ *
++ * @handle: The object to synchronize.
++ *
++ * @mode: wait mode indicated by the
++ * TTM_SYNCCPU_MODE flags.
++ *
++ * Argument to the TTM_BO_WAITIDLE ioctl.
++ */
++
++struct ttm_pl_waitidle_arg {
++ uint32_t handle;
++ uint32_t mode;
++};
++
++union ttm_pl_create_arg {
++ struct ttm_pl_create_req req;
++ struct ttm_pl_rep rep;
++};
++
++union ttm_pl_reference_arg {
++ struct ttm_pl_reference_req req;
++ struct ttm_pl_rep rep;
++};
++
++union ttm_pl_setstatus_arg {
++ struct ttm_pl_setstatus_req req;
++ struct ttm_pl_rep rep;
++};
++
++union ttm_pl_create_ub_arg {
++ struct ttm_pl_create_ub_req req;
++ struct ttm_pl_rep rep;
++};
++
++/*
++ * Ioctl offsets.
++ */
++
++#define TTM_PL_CREATE 0x00
++#define TTM_PL_REFERENCE 0x01
++#define TTM_PL_UNREF 0x02
++#define TTM_PL_SYNCCPU 0x03
++#define TTM_PL_WAITIDLE 0x04
++#define TTM_PL_SETSTATUS 0x05
++#define TTM_PL_CREATE_UB 0x06
++
++#endif
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.28+2.6.29-rc2/0001-fastboot-retry-mounting-the-root-fs-if-we-can-t-fin.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-retry-root-mount.patch
index 8f34a0f3f..8f34a0f3f 100644
--- a/meta-moblin/packages/linux/linux-moblin-2.6.28+2.6.29-rc2/0001-fastboot-retry-mounting-the-root-fs-if-we-can-t-fin.patch
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-retry-root-mount.patch
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.28+2.6.29-rc2/0007-acer-error-msg.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-silence-acer-message.patch
index 7bf897ab5..7bf897ab5 100644
--- a/meta-moblin/packages/linux/linux-moblin-2.6.28+2.6.29-rc2/0007-acer-error-msg.patch
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-silence-acer-message.patch
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.28+2.6.29-rc2/0004-superreadahead-patch.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-sreadahead.patch
index e4e200110..e4e200110 100644
--- a/meta-moblin/packages/linux/linux-moblin-2.6.28+2.6.29-rc2/0004-superreadahead-patch.patch
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-sreadahead.patch
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-timberdale.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-timberdale.patch
new file mode 100644
index 000000000..c36e5ba4a
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-timberdale.patch
@@ -0,0 +1,6095 @@
+Patch provided by Mocean in order to enable the timberdale subsystem of the Russelville board.
+
+Signed-off-by: Joel Clark <joel.clark@intel.com>
+Acked-by: Arjan van de Ven <arjan@infradead.org>
+Signed-off-by: Todd Brandt todd.e.brandt@intel.com
+
+
+diff -uNr linux-2.6.29-clean/drivers/gpio/Kconfig linux-2.6.29/drivers/gpio/Kconfig
+--- linux-2.6.29-clean/drivers/gpio/Kconfig 2009-04-01 09:20:23.000000000 -0700
++++ linux-2.6.29/drivers/gpio/Kconfig 2009-04-06 13:51:47.000000000 -0700
+@@ -161,6 +161,12 @@
+
+ If unsure, say N.
+
++config GPIO_TIMBERDALE
++ tristate "Support for timberdale GPIO"
++ depends on MFD_TIMBERDALE && GPIOLIB
++ ---help---
++ Add support for GPIO usage of some pins of the timberdale FPGA.
++
+ comment "SPI GPIO expanders:"
+
+ config GPIO_MAX7301
+diff -uNr linux-2.6.29-clean/drivers/gpio/Makefile linux-2.6.29/drivers/gpio/Makefile
+--- linux-2.6.29-clean/drivers/gpio/Makefile 2009-04-01 09:20:23.000000000 -0700
++++ linux-2.6.29/drivers/gpio/Makefile 2009-04-06 13:51:47.000000000 -0700
+@@ -12,3 +12,4 @@
+ obj-$(CONFIG_GPIO_TWL4030) += twl4030-gpio.o
+ obj-$(CONFIG_GPIO_XILINX) += xilinx_gpio.o
+ obj-$(CONFIG_GPIO_BT8XX) += bt8xxgpio.o
++obj-$(CONFIG_GPIO_TIMBERDALE) += timbgpio.o
+diff -uNr linux-2.6.29-clean/drivers/gpio/timbgpio.c linux-2.6.29/drivers/gpio/timbgpio.c
+--- linux-2.6.29-clean/drivers/gpio/timbgpio.c 1969-12-31 16:00:00.000000000 -0800
++++ linux-2.6.29/drivers/gpio/timbgpio.c 2009-04-06 13:51:47.000000000 -0700
+@@ -0,0 +1,275 @@
++/*
++ * timbgpio.c timberdale FPGA GPIO driver
++ * Copyright (c) 2009 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++/* Supports:
++ * Timberdale FPGA GPIO
++ */
++
++#include <linux/module.h>
++#include <linux/gpio.h>
++#include <linux/pci.h>
++#include <linux/platform_device.h>
++#include <linux/interrupt.h>
++
++#include "timbgpio.h"
++
++static u32 timbgpio_configure(struct gpio_chip *gpio, unsigned nr,
++ unsigned off, unsigned val)
++{
++ struct timbgpio *tgpio = container_of(gpio, struct timbgpio, gpio);
++
++ u32 config, oldconfig, wconfig;
++
++ mutex_lock(&tgpio->lock);
++ config = ioread32(tgpio->membase + off);
++ oldconfig = config;
++
++ if (val)
++ config |= (1 << nr);
++ else
++ config &= ~(1 << nr);
++
++ iowrite32(config, tgpio->membase + off);
++ wconfig = ioread32(tgpio->membase + off);
++ mutex_unlock(&tgpio->lock);
++
++ return oldconfig;
++}
++
++static int timbgpio_gpio_direction_input(struct gpio_chip *gpio, unsigned nr)
++{
++ timbgpio_configure(gpio, nr, TGPIODIR, 1);
++ return 0;
++}
++
++static int timbgpio_gpio_get(struct gpio_chip *gpio, unsigned nr)
++{
++ struct timbgpio *tgpio = container_of(gpio, struct timbgpio, gpio);
++ u32 value;
++
++ value = ioread32(tgpio->membase + TGPIOVAL);
++ return (value & (1 << nr)) ? 1 : 0;
++}
++
++static int timbgpio_gpio_direction_output(struct gpio_chip *gpio,
++ unsigned nr, int val)
++{
++ timbgpio_configure(gpio, nr, TGPIODIR, 0);
++ return 0;
++}
++
++
++
++static void timbgpio_gpio_set(struct gpio_chip *gpio,
++ unsigned nr, int val)
++{
++ timbgpio_configure(gpio, nr, TGPIOVAL, val);
++}
++
++/*
++ * Function to control flank or level triggered GPIO pin
++ * @nr - pin
++ * @ val - 1: flank, 0: level
++ *
++ */
++static void timbgpio_gpio_flnk_lvl_ctrl(struct gpio_chip *gpio,
++ unsigned nr, int val)
++{
++ timbgpio_configure(gpio, nr, TGPIOFLK, val);
++}
++EXPORT_SYMBOL(timbgpio_gpio_flnk_lvl_ctrl);
++
++/*
++ * Enable or disable interrupt
++ *
++ */
++static void timbgpio_gpio_int_ctrl(struct gpio_chip *gpio,
++ unsigned nr, int val)
++{
++ timbgpio_configure(gpio, nr, TGPIOINT, val);
++}
++EXPORT_SYMBOL(timbgpio_gpio_int_ctrl);
++
++/*
++ * @val - 1: Asserted high or on positive flank, 0: Asserted low or on negative flank
++ *
++ */
++static void timbgpio_gpio_lvl_ctrl(struct gpio_chip *gpio,
++ unsigned nr, int val)
++{
++ timbgpio_configure(gpio, nr, TGPIOLVL, val);
++}
++EXPORT_SYMBOL(timbgpio_gpio_lvl_ctrl);
++
++static void timbgpio_gpio_int_clr(struct gpio_chip *gpio,
++ unsigned nr, int val)
++{
++ timbgpio_configure(gpio, nr, TGPIOINT_CLR, val);
++}
++EXPORT_SYMBOL(timbgpio_gpio_int_clr);
++
++
++static irqreturn_t timbgpio_handleinterrupt(int irq, void *devid)
++{
++ struct timbgpio *tgpio = (struct timbgpio *)devid;
++
++ iowrite32(0xffffffff, tgpio->membase + TGPIOINT_CLR);
++
++ return IRQ_HANDLED;
++}
++
++static int timbgpio_probe(struct platform_device *dev)
++{
++ int err, irq;
++ struct gpio_chip *gc;
++ struct timbgpio *tgpio;
++ struct resource *iomem, *rscr;
++
++ iomem = platform_get_resource(dev, IORESOURCE_MEM, 0);
++ if (!iomem) {
++ err = -EINVAL;
++ goto err_mem;
++ }
++
++ tgpio = kzalloc(sizeof(*tgpio), GFP_KERNEL);
++ if (!tgpio) {
++ err = -EINVAL;
++ goto err_mem;
++ }
++
++ mutex_init(&tgpio->lock);
++
++ rscr = &tgpio->rscr;
++ rscr->name = "timb-gpio";
++ rscr->start = iomem->start;
++ rscr->end = iomem->end;
++ rscr->flags = IORESOURCE_MEM;
++
++ err = request_resource(iomem, rscr);
++ if (err)
++ goto err_request;
++
++ tgpio->membase = ioremap(rscr->start, resource_size(rscr));
++ if (!tgpio->membase) {
++ err = -ENOMEM;
++ goto err_ioremap;
++ }
++
++ gc = &tgpio->gpio;
++
++ gc->label = "timbgpio";
++ gc->owner = THIS_MODULE;
++ gc->direction_input = timbgpio_gpio_direction_input;
++ gc->get = timbgpio_gpio_get;
++ gc->direction_output = timbgpio_gpio_direction_output;
++ gc->set = timbgpio_gpio_set;
++ gc->dbg_show = NULL;
++ gc->base = 0;
++ gc->ngpio = TIMB_NR_GPIOS;
++ gc->can_sleep = 0;
++
++ err = gpiochip_add(gc);
++ if (err)
++ goto err_chipadd;
++
++ platform_set_drvdata(dev, tgpio);
++
++ /* register interrupt */
++ irq = platform_get_irq(dev, 0);
++ if (irq < 0)
++ goto err_get_irq;
++
++ /* clear pending interrupts */
++ iowrite32(0xffffffff, tgpio->membase + TGPIOINT_CLR);
++ iowrite32(0x0, tgpio->membase + TGPIOINT);
++
++ /* request IRQ */
++ err = request_irq(irq, timbgpio_handleinterrupt, IRQF_SHARED,
++ "timb-gpio", tgpio);
++ if (err) {
++ printk(KERN_ERR "timbgpio: Failed to request IRQ\n");
++ goto err_get_irq;
++ }
++
++ return err;
++
++err_get_irq:
++ err = gpiochip_remove(&tgpio->gpio);
++ if (err)
++ printk(KERN_ERR "timbgpio: failed to remove gpio_chip\n");
++err_chipadd:
++ iounmap(tgpio->membase);
++err_ioremap:
++ release_resource(&tgpio->rscr);
++err_request:
++ kfree(tgpio);
++err_mem:
++ printk(KERN_ERR "timberdale: Failed to register GPIOs: %d\n", err);
++
++ return err;
++}
++
++static int timbgpio_remove(struct platform_device *dev)
++{
++ int err;
++ struct timbgpio *tgpio = platform_get_drvdata(dev);
++
++ /* disable interrupts */
++ iowrite32(0x0, tgpio->membase + TGPIOINT);
++
++ free_irq(platform_get_irq(dev, 0), tgpio);
++ err = gpiochip_remove(&tgpio->gpio);
++ if (err)
++ printk(KERN_ERR "timbgpio: failed to remove gpio_chip\n");
++
++ iounmap(tgpio->membase);
++ release_resource(&tgpio->rscr);
++ kfree(tgpio);
++
++ return 0;
++}
++
++static struct platform_driver timbgpio_platform_driver = {
++ .driver = {
++ .name = "timb-gpio",
++ .owner = THIS_MODULE,
++ },
++ .probe = timbgpio_probe,
++ .remove = timbgpio_remove,
++};
++
++/*--------------------------------------------------------------------------*/
++
++static int __init timbgpio_init(void)
++{
++ return platform_driver_register(&timbgpio_platform_driver);
++}
++
++static void __exit timbgpio_exit(void)
++{
++ platform_driver_unregister(&timbgpio_platform_driver);
++}
++
++module_init(timbgpio_init);
++module_exit(timbgpio_exit);
++
++MODULE_DESCRIPTION("Timberdale GPIO driver");
++MODULE_LICENSE("GPL v2");
++MODULE_AUTHOR("Mocean Laboratories");
++MODULE_ALIAS("platform:timb-gpio");
++
+diff -uNr linux-2.6.29-clean/drivers/gpio/timbgpio.h linux-2.6.29/drivers/gpio/timbgpio.h
+--- linux-2.6.29-clean/drivers/gpio/timbgpio.h 1969-12-31 16:00:00.000000000 -0800
++++ linux-2.6.29/drivers/gpio/timbgpio.h 2009-04-06 13:51:47.000000000 -0700
+@@ -0,0 +1,48 @@
++/*
++ * timbgpio.h timberdale FPGA GPIO driver defines
++ * Copyright (c) 2009 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++/* Supports:
++ * Timberdale FPGA GPIO
++ */
++
++#ifndef _TIMBGPIO_H_
++#define _TIMBGPIO_H_
++
++#include <linux/mutex.h>
++#include <linux/gpio.h>
++
++#define TIMB_NR_GPIOS 16
++
++#define TGPIOVAL 0
++#define TGPIODIR 0x04
++#define TGPIOINT 0x08
++#define TGPIOINT_STATUS 0x0c
++#define TGPIOINT_PENDING 0x10
++#define TGPIOINT_CLR 0x14
++#define TGPIOFLK 0x18
++#define TGPIOLVL 0x1c
++
++struct timbgpio {
++ void __iomem *membase;
++ struct resource rscr;
++ struct mutex lock; /* mutual exclusion */
++ struct pci_dev *pdev;
++ struct gpio_chip gpio;
++};
++
++#endif
+diff -uNr linux-2.6.29-clean/drivers/i2c/busses/i2c-ocores.c linux-2.6.29/drivers/i2c/busses/i2c-ocores.c
+--- linux-2.6.29-clean/drivers/i2c/busses/i2c-ocores.c 2009-04-01 09:20:24.000000000 -0700
++++ linux-2.6.29/drivers/i2c/busses/i2c-ocores.c 2009-04-06 13:51:47.000000000 -0700
+@@ -216,6 +216,7 @@
+ struct ocores_i2c_platform_data *pdata;
+ struct resource *res, *res2;
+ int ret;
++ u8 i;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+@@ -271,6 +272,10 @@
+ goto add_adapter_failed;
+ }
+
++ /* add in known devices to the bus */
++ for (i = 0; i < pdata->num_devices; i++)
++ i2c_new_device(&i2c->adap, pdata->devices + i);
++
+ return 0;
+
+ add_adapter_failed:
+diff -uNr linux-2.6.29-clean/drivers/input/touchscreen/Kconfig linux-2.6.29/drivers/input/touchscreen/Kconfig
+--- linux-2.6.29-clean/drivers/input/touchscreen/Kconfig 2009-04-01 09:20:23.000000000 -0700
++++ linux-2.6.29/drivers/input/touchscreen/Kconfig 2009-04-06 13:51:47.000000000 -0700
+@@ -397,6 +397,17 @@
+ To compile this driver as a module, choose M here: the
+ module will be called touchit213.
+
++config TOUCHSCREEN_TSC2003
++ tristate "TSC2003 based touchscreens"
++ depends on I2C
++ help
++ Say Y here if you have a TSC2003 based touchscreen.
++
++ If unsure, say N.
++
++ To compile this driver as a module, choose M here: the
++ module will be called tsc2003.
++
+ config TOUCHSCREEN_TSC2007
+ tristate "TSC2007 based touchscreens"
+ depends on I2C
+diff -uNr linux-2.6.29-clean/drivers/input/touchscreen/Makefile linux-2.6.29/drivers/input/touchscreen/Makefile
+--- linux-2.6.29-clean/drivers/input/touchscreen/Makefile 2009-04-01 09:20:23.000000000 -0700
++++ linux-2.6.29/drivers/input/touchscreen/Makefile 2009-04-06 13:51:47.000000000 -0700
+@@ -25,6 +25,7 @@
+ obj-$(CONFIG_TOUCHSCREEN_TOUCHIT213) += touchit213.o
+ obj-$(CONFIG_TOUCHSCREEN_TOUCHRIGHT) += touchright.o
+ obj-$(CONFIG_TOUCHSCREEN_TOUCHWIN) += touchwin.o
++obj-$(CONFIG_TOUCHSCREEN_TSC2003) += tsc2003.o
+ obj-$(CONFIG_TOUCHSCREEN_TSC2007) += tsc2007.o
+ obj-$(CONFIG_TOUCHSCREEN_UCB1400) += ucb1400_ts.o
+ obj-$(CONFIG_TOUCHSCREEN_WACOM_W8001) += wacom_w8001.o
+diff -uNr linux-2.6.29-clean/drivers/input/touchscreen/tsc2003.c linux-2.6.29/drivers/input/touchscreen/tsc2003.c
+--- linux-2.6.29-clean/drivers/input/touchscreen/tsc2003.c 1969-12-31 16:00:00.000000000 -0800
++++ linux-2.6.29/drivers/input/touchscreen/tsc2003.c 2009-04-06 13:51:47.000000000 -0700
+@@ -0,0 +1,387 @@
++/*
++ * tsc2003.c Driver for TI TSC2003 touch screen controller
++ * Copyright (c) 2009 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++/* Supports:
++ * TI TSC2003
++ *
++ * Inspired by tsc2007, Copyright (c) 2008 MtekVision Co., Ltd.
++ */
++#include <linux/module.h>
++#include <linux/input.h>
++#include <linux/interrupt.h>
++#include <linux/i2c.h>
++#include <linux/i2c/tsc2007.h>
++#include <linux/kthread.h>
++#include <linux/semaphore.h>
++
++#define TSC2003_DRIVER_NAME "tsc2003"
++
++#define TS_POLL_PERIOD 20 /* ms delay between samples */
++
++#define TSC2003_MEASURE_TEMP0 (0x0 << 4)
++#define TSC2003_MEASURE_AUX (0x2 << 4)
++#define TSC2003_MEASURE_TEMP1 (0x4 << 4)
++#define TSC2003_ACTIVATE_XN (0x8 << 4)
++#define TSC2003_ACTIVATE_YN (0x9 << 4)
++#define TSC2003_ACTIVATE_YP_XN (0xa << 4)
++#define TSC2003_SETUP (0xb << 4)
++#define TSC2003_MEASURE_X (0xc << 4)
++#define TSC2003_MEASURE_Y (0xd << 4)
++#define TSC2003_MEASURE_Z1 (0xe << 4)
++#define TSC2003_MEASURE_Z2 (0xf << 4)
++
++#define TSC2003_POWER_OFF_IRQ_EN (0x0 << 2)
++#define TSC2003_ADC_ON_IRQ_DIS0 (0x1 << 2)
++#define TSC2003_ADC_OFF_IRQ_EN (0x2 << 2)
++#define TSC2003_ADC_ON_IRQ_DIS1 (0x3 << 2)
++
++#define TSC2003_12BIT (0x0 << 1)
++#define TSC2003_8BIT (0x1 << 1)
++
++#define MAX_12BIT ((1 << 12) - 1)
++
++#define ADC_ON_12BIT (TSC2003_12BIT | TSC2003_ADC_ON_IRQ_DIS0)
++
++#define READ_Y (ADC_ON_12BIT | TSC2003_MEASURE_Y)
++#define READ_Z1 (ADC_ON_12BIT | TSC2003_MEASURE_Z1)
++#define READ_Z2 (ADC_ON_12BIT | TSC2003_MEASURE_Z2)
++#define READ_X (ADC_ON_12BIT | TSC2003_MEASURE_X)
++#define PWRDOWN (TSC2003_12BIT | TSC2003_POWER_OFF_IRQ_EN)
++
++struct ts_event {
++ int x;
++ int y;
++ int z1, z2;
++};
++
++struct tsc2003 {
++ struct input_dev *input;
++ char phys[32];
++ struct task_struct *task;
++ struct ts_event tc;
++ struct completion penirq_completion;
++
++ struct i2c_client *client;
++
++ u16 model;
++ u16 x_plate_ohms;
++
++ unsigned pendown;
++};
++
++static inline int tsc2003_xfer(struct tsc2003 *tsc, u8 cmd)
++{
++ s32 data;
++ u16 val;
++
++ data = i2c_smbus_read_word_data(tsc->client, cmd);
++ if (data < 0) {
++ dev_err(&tsc->client->dev, "i2c io error: %d\n", data);
++ return data;
++ }
++
++ /* The protocol and raw data format from i2c interface:
++ * S Addr Wr [A] Comm [A] S Addr Rd [A] [DataLow] A [DataHigh] NA P
++ * Where DataLow has [D11-D4], DataHigh has [D3-D0 << 4 | Dummy 4bit].
++ */
++ val = swab16(data) >> 4;
++
++ dev_dbg(&tsc->client->dev, "data: 0x%x, val: 0x%x\n", data, val);
++
++ return val;
++}
++
++static void tsc2003_send_event(void *tsc)
++{
++ struct tsc2003 *ts = tsc;
++ struct input_dev *input = ts->input;
++ u32 rt = 0;
++ u16 x, y, z1, z2;
++
++ x = ts->tc.x;
++ y = ts->tc.y;
++ z1 = ts->tc.z1;
++ z2 = ts->tc.z2;
++
++ /* range filtering */
++ if (x == MAX_12BIT)
++ x = 0;
++
++ if (likely(x && z1)) {
++ /* compute touch pressure resistance using equation #1 */
++ rt = z2;
++ rt -= z1;
++ rt *= x;
++ rt *= ts->x_plate_ohms;
++ rt /= z1;
++ rt = (rt + 2047) >> 12;
++ }
++
++ /* Sample found inconsistent by debouncing or pressure is beyond
++ * the maximum. Don't report it to user space, repeat at least
++ * once more the measurement
++ */
++ if (rt > MAX_12BIT)
++ return;
++
++ /* NOTE: We can't rely on the pressure to determine the pen down
++ * state, even this controller has a pressure sensor. The pressure
++ * value can fluctuate for quite a while after lifting the pen and
++ * in some cases may not even settle at the expected value.
++ *
++ * The only safe way to check for the pen up condition is in the
++ * timer by reading the pen signal state (it's a GPIO _and_ IRQ).
++ */
++ if (rt) {
++ if (!ts->pendown) {
++ dev_dbg(&ts->client->dev, "DOWN\n");
++
++ input_report_key(input, BTN_TOUCH, 1);
++ ts->pendown = 1;
++ }
++
++ input_report_abs(input, ABS_X, x);
++ input_report_abs(input, ABS_Y, y);
++ input_report_abs(input, ABS_PRESSURE, rt);
++
++ input_sync(input);
++
++ dev_dbg(&ts->client->dev, "point(%4d,%4d), pressure (%4u)\n",
++ x, y, rt);
++ } else if (ts->pendown) {
++ /* pen up */
++ dev_dbg(&ts->client->dev, "UP\n");
++ input_report_key(input, BTN_TOUCH, 0);
++ input_report_abs(input, ABS_PRESSURE, 0);
++ input_sync(input);
++
++ ts->pendown = 0;
++ }
++}
++
++static int tsc2003_power_off_irq_en(struct tsc2003 *tsc)
++{
++ /* power down */
++ return tsc2003_xfer(tsc, PWRDOWN);
++}
++
++static int tsc2003_read_values(struct tsc2003 *tsc)
++{
++ /* y- still on; turn on only y+ (and ADC) */
++ tsc->tc.y = tsc2003_xfer(tsc, READ_Y);
++ if (tsc->tc.y < 0)
++ return tsc->tc.y;
++
++ /* turn y- off, x+ on, then leave in lowpower */
++ tsc->tc.x = tsc2003_xfer(tsc, READ_X);
++ if (tsc->tc.x < 0)
++ return tsc->tc.x;
++
++ /* turn y+ off, x- on; we'll use formula #1 */
++ tsc->tc.z1 = tsc2003_xfer(tsc, READ_Z1);
++ if (tsc->tc.z1 < 0)
++ return tsc->tc.z1;
++
++ tsc->tc.z2 = tsc2003_xfer(tsc, READ_Z2);
++ if (tsc->tc.z2 < 0)
++ return tsc->tc.z2;
++
++ return 0;
++}
++
++
++static irqreturn_t tsc2003_irq(int irq, void *handle)
++{
++ struct tsc2003 *ts = handle;
++
++ /* do not call the synced version -> deadlock */
++ disable_irq_nosync(irq);
++ /* signal the thread to continue */
++ complete(&ts->penirq_completion);
++
++ return IRQ_HANDLED;
++}
++
++static int tsc2003_thread(void *d)
++{
++ struct tsc2003 *ts = (struct tsc2003 *)d;
++ int ret;
++
++ allow_signal(SIGKILL);
++
++ while (!signal_pending(current)) {
++ /* power down and wait for interrupt */
++ do {
++ /* loop because the I2C bus might be busy */
++ ret = msleep_interruptible(TS_POLL_PERIOD);
++ if (!ret)
++ ret = tsc2003_power_off_irq_en(ts);
++ } while (ret == -EAGAIN && !signal_pending(current));
++
++ if (signal_pending(current))
++ break;
++
++ ret = wait_for_completion_interruptible(&ts->penirq_completion);
++ if (!ret) {
++ int first = 1;
++ /* got IRQ, start poll, until pen is up */
++ while (!ret && !signal_pending(current)
++ && (first || ts->pendown)) {
++ ret = tsc2003_read_values(ts);
++ if (!ret)
++ tsc2003_send_event(ts);
++ ret = msleep_interruptible(TS_POLL_PERIOD);
++ first = 0;
++ }
++
++ /* we re enable the interrupt */
++ if (!signal_pending(current))
++ enable_irq(ts->client->irq);
++ }
++ }
++
++ return 0;
++}
++
++static int tsc2003_probe(struct i2c_client *client,
++ const struct i2c_device_id *id)
++{
++ struct tsc2003 *ts;
++ struct tsc2007_platform_data *pdata = client->dev.platform_data;
++ struct input_dev *input_dev;
++ int err;
++
++ if (!pdata) {
++ dev_err(&client->dev, "platform data is required!\n");
++ return -EINVAL;
++ }
++
++ if (!i2c_check_functionality(client->adapter,
++ I2C_FUNC_SMBUS_READ_WORD_DATA))
++ return -EIO;
++
++ ts = kzalloc(sizeof(struct tsc2003), GFP_KERNEL);
++ input_dev = input_allocate_device();
++ if (!ts || !input_dev) {
++ err = -ENOMEM;
++ goto err_free_mem;
++ }
++
++ ts->client = client;
++ i2c_set_clientdata(client, ts);
++
++ ts->input = input_dev;
++
++ ts->model = pdata->model;
++ ts->x_plate_ohms = pdata->x_plate_ohms;
++
++ snprintf(ts->phys, sizeof(ts->phys),
++ "%s/input0", dev_name(&client->dev));
++
++ input_dev->name = TSC2003_DRIVER_NAME" Touchscreen";
++ input_dev->phys = ts->phys;
++ input_dev->id.bustype = BUS_I2C;
++
++ input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
++ input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
++
++ input_set_abs_params(input_dev, ABS_X, 0, MAX_12BIT, 0, 0);
++ input_set_abs_params(input_dev, ABS_Y, 0, MAX_12BIT, 0, 0);
++ input_set_abs_params(input_dev, ABS_PRESSURE, 0, MAX_12BIT, 0, 0);
++
++ init_completion(&ts->penirq_completion);
++
++ ts->task = kthread_run(tsc2003_thread, ts, TSC2003_DRIVER_NAME);
++ if (IS_ERR(ts->task)) {
++ err = PTR_ERR(ts->task);
++ goto err_free_mem;
++ }
++
++ err = request_irq(client->irq, tsc2003_irq, 0,
++ client->dev.driver->name, ts);
++ if (err < 0) {
++ dev_err(&client->dev, "irq %d busy?\n", client->irq);
++ goto err_free_thread;
++ }
++
++ err = input_register_device(input_dev);
++ if (err)
++ goto err_free_irq;
++
++ dev_info(&client->dev, "registered with irq (%d)\n", client->irq);
++
++ return 0;
++
++ err_free_irq:
++ free_irq(client->irq, ts);
++ err_free_thread:
++ kthread_stop(ts->task);
++ err_free_mem:
++ input_free_device(input_dev);
++ kfree(ts);
++ return err;
++}
++
++static int tsc2003_remove(struct i2c_client *client)
++{
++ struct tsc2003 *ts = i2c_get_clientdata(client);
++
++ free_irq(client->irq, ts);
++ send_sig(SIGKILL, ts->task, 1);
++ kthread_stop(ts->task);
++ input_unregister_device(ts->input);
++ kfree(ts);
++
++ return 0;
++}
++
++static struct i2c_device_id tsc2003_idtable[] = {
++ { TSC2003_DRIVER_NAME, 0 },
++ { }
++};
++
++MODULE_DEVICE_TABLE(i2c, tsc2003_idtable);
++
++static struct i2c_driver tsc2003_driver = {
++ .driver = {
++ .owner = THIS_MODULE,
++ .name = TSC2003_DRIVER_NAME,
++ .bus = &i2c_bus_type,
++ },
++ .id_table = tsc2003_idtable,
++ .probe = tsc2003_probe,
++ .remove = tsc2003_remove,
++};
++
++static int __init tsc2003_init(void)
++{
++ return i2c_add_driver(&tsc2003_driver);
++}
++
++static void __exit tsc2003_exit(void)
++{
++ i2c_del_driver(&tsc2003_driver);
++}
++
++module_init(tsc2003_init);
++module_exit(tsc2003_exit);
++
++MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>");
++MODULE_DESCRIPTION("TSC2003 TouchScreen Driver");
++MODULE_LICENSE("GPL v2");
++
+diff -uNr linux-2.6.29-clean/drivers/media/video/adv7180.c linux-2.6.29/drivers/media/video/adv7180.c
+--- linux-2.6.29-clean/drivers/media/video/adv7180.c 1969-12-31 16:00:00.000000000 -0800
++++ linux-2.6.29/drivers/media/video/adv7180.c 2009-04-06 13:51:47.000000000 -0700
+@@ -0,0 +1,361 @@
++/*
++ * adv7180.c Analog Devices ADV7180 video decoder driver
++ * Copyright (c) 2009 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/interrupt.h>
++#include <linux/delay.h>
++#include <linux/errno.h>
++#include <linux/fs.h>
++#include <linux/kernel.h>
++#include <linux/major.h>
++#include <linux/slab.h>
++#include <linux/mm.h>
++#include <linux/signal.h>
++#include <linux/types.h>
++#include <linux/io.h>
++#include <asm/pgtable.h>
++#include <asm/page.h>
++#include <linux/uaccess.h>
++
++#include <linux/i2c-ocores.h>
++#include <linux/platform_device.h>
++#include <linux/i2c.h>
++#include <linux/i2c-id.h>
++#include <linux/videodev.h>
++#include <linux/video_decoder.h>
++#include <media/v4l2-ioctl.h>
++#include <media/adv7180.h>
++
++
++MODULE_DESCRIPTION("Analog Devices ADV7180 video decoder driver");
++MODULE_AUTHOR("Mocean Laboratories");
++MODULE_LICENSE("GPL v2");
++
++static inline int adv7180_write(struct i2c_client *client,
++ u8 reg, u8 value)
++{
++ struct adv7180 *decoder = i2c_get_clientdata(client);
++
++ decoder->reg[reg] = value;
++ return i2c_smbus_write_byte_data(client, reg, value);
++}
++
++static inline int adv7180_read(struct i2c_client *client, u8 reg)
++{
++ return i2c_smbus_read_byte_data(client, reg);
++}
++
++static int adv7180_write_block(struct i2c_client *client,
++ const u8 *data, unsigned int len)
++{
++ int ret = -1;
++ u8 reg;
++
++ /* the adv7180 has an autoincrement function, use it if
++ * the adapter understands raw I2C */
++ if (i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
++ /* do raw I2C, not smbus compatible */
++ struct adv7180 *decoder = i2c_get_clientdata(client);
++ u8 block_data[32];
++ int block_len;
++
++ while (len >= 2) {
++ block_len = 0;
++ reg = data[0];
++ block_data[block_len++] = reg;
++ do {
++ block_data[block_len++] =
++ decoder->reg[reg++] = data[1];
++ len -= 2;
++ data += 2;
++ } while (len >= 2 && data[0] == reg &&
++ block_len < 32);
++
++ ret = i2c_master_send(client, block_data, block_len);
++ if (ret < 0)
++ break;
++ }
++ } else {
++ /* do some slow I2C emulation kind of thing */
++ while (len >= 2) {
++ reg = *data++;
++ ret = adv7180_write(client, reg, *data++);
++ if (ret < 0)
++ break;
++
++ len -= 2;
++ }
++ }
++
++ return ret;
++}
++#ifdef CONFIG_MFD_TIMBERDALE
++static irqreturn_t adv7180_irq(int irq, void *dev_id)
++{
++ struct adv7180 *decoder = (struct adv7180 *) dev_id;
++
++ /* Activate access to sub-regs */
++ adv7180_write(decoder->client, ADV7180_ADI_CTRL, ADI_ENABLE);
++
++ /* TODO: implement a real interrupt handler
++ * for now just
++ * clear all four regs
++ */
++ adv7180_write_block(decoder->client, reset_icr, sizeof(reset_icr));
++
++ return IRQ_HANDLED;
++}
++#endif
++static int adv7180_command(struct i2c_client *client,
++ unsigned int cmd, void *arg)
++{
++ struct adv7180 *decoder = i2c_get_clientdata(client);
++ int *iarg = (int *)arg;
++ int status;
++
++ switch (cmd) {
++
++ case DECODER_INIT:
++ adv7180_write(client, 0x0f, 0x80); /* Reset */
++ break;
++
++ case DECODER_GET_CAPABILITIES:
++ {
++ struct video_decoder_capability *cap = arg;
++ cap->flags = VIDEO_DECODER_PAL |
++ VIDEO_DECODER_NTSC |
++ VIDEO_DECODER_SECAM |
++ VIDEO_DECODER_AUTO;
++ cap->inputs = 3;
++ cap->outputs = 1;
++ }
++ break;
++
++ case DECODER_GET_STATUS:
++ {
++ *iarg = 0;
++ status = adv7180_read(client, ADV7180_SR);
++ if ((status & ADV7180_STATUS_PAL))
++ *iarg = (*iarg | DECODER_STATUS_PAL);
++
++ if ((status & ADV7180_STATUS_NTSC))
++ *iarg = (*iarg | DECODER_STATUS_NTSC);
++
++ if ((status & ADV7180_STATUS_SECAM))
++ *iarg = (*iarg | DECODER_STATUS_SECAM);
++ }
++ break;
++
++ case DECODER_SET_NORM:
++ {
++ int v = *(int *) arg;
++ if (decoder->norm != v) {
++ decoder->norm = v;
++ switch (v) {
++ case VIDEO_MODE_NTSC:
++ adv7180_write(client, ADV7180_IN_CTRL, 0x40);
++ break;
++ case VIDEO_MODE_PAL:
++ adv7180_write(client, ADV7180_IN_CTRL, 0x70);
++ break;
++ case VIDEO_MODE_SECAM:
++ adv7180_write(client, ADV7180_IN_CTRL, 0x90);
++ break;
++ case VIDEO_MODE_AUTO:
++ adv7180_write(client, ADV7180_IN_CTRL, 0x00);
++ break;
++ default:
++ return -EPERM;
++ }
++ }
++ }
++ break;
++
++ case DECODER_SET_INPUT:
++ {
++ int v = *(int *) arg;
++ if (decoder->input != v) {
++ decoder->input = v;
++
++ switch (v) {
++ case CVBS:
++ adv7180_write_block(client, init_cvbs_64,
++ sizeof(init_cvbs_64));
++ break;
++ case SVIDEO:
++ adv7180_write_block(client, init_svideo_64,
++ sizeof(init_svideo_64));
++ break;
++ case YPbPr:
++ adv7180_write_block(client, init_ypbpr_64,
++ sizeof(init_ypbpr_64));
++ break;
++ default:
++ return -EINVAL;
++ }
++ }
++ }
++ break;
++
++ case DECODER_SET_OUTPUT:
++ {
++ }
++ break;
++
++ case DECODER_ENABLE_OUTPUT:
++ {
++ }
++ break;
++
++ case DECODER_SET_PICTURE:
++ {
++ }
++ break;
++
++ case DECODER_DUMP:
++ {
++ adv7180_write(client, 1, 0x88);
++ }
++ break;
++
++ default:
++ return -EINVAL;
++ }
++ return 0;
++}
++
++/* ----------------------------------------------------------------------- */
++
++/*
++ * Generic i2c probe
++ * concerning the addresses: i2c wants 7 bit (without the r/w bit), so '>>1'
++ */
++static unsigned short normal_i2c[] = {
++ 0x40 >> 1, 0x41 >> 1,
++ I2C_ADV7180 >> 1, 0x43 >> 1,
++ I2C_CLIENT_END
++};
++
++I2C_CLIENT_INSMOD;
++
++static int adv7180_detect(struct i2c_client *client, int kind,
++ struct i2c_board_info *info)
++{
++ struct i2c_adapter *adapter = client->adapter;
++
++ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE
++ | I2C_FUNC_SMBUS_WRITE_BYTE_DATA))
++ return -ENODEV;
++
++ /* Is chip alive ? */
++ if (adv7180_read(client, 0x11) != 0x1b)
++ return -ENODEV;
++
++ strlcpy(info->type, DRIVER_NAME, I2C_NAME_SIZE);
++
++ return 0;
++}
++
++static int adv7180_probe(struct i2c_client *client,
++ const struct i2c_device_id *id)
++{
++ int err = 0;
++ struct adv7180 *decoder;
++
++ printk(KERN_INFO DRIVER_NAME" chip found @ 0x%x (%s)\n",
++ client->addr << 1, client->adapter->name);
++
++ decoder = kzalloc(sizeof(struct adv7180), GFP_KERNEL);
++ if (decoder == NULL)
++ return -ENOMEM;
++
++ decoder->norm = VIDEO_MODE_PAL | VIDEO_MODE_NTSC |
++ VIDEO_MODE_SECAM |
++ VIDEO_MODE_AUTO;
++ decoder->input = CVBS;
++ decoder->enable = 1;
++ decoder->client = client;
++ i2c_set_clientdata(client, decoder);
++#ifdef CONFIG_MFD_TIMBERDALE
++ err = request_irq(client->irq, adv7180_irq, 0,
++ client->dev.driver->name, decoder);
++ if (err < 0) {
++ dev_err(&client->dev, "irq %d busy?\n", client->irq);
++ goto err_free_dec;
++ }
++ dev_info(&client->dev, "registered with irq (%d)\n", client->irq);
++#endif
++ adv7180_command(client, DECODER_INIT, NULL); /* Reset */
++
++ return 0;
++#ifdef CONFIG_MFD_TIMBERDALE
++err_free_dec:
++ kfree(decoder);
++
++ return err;
++#endif
++}
++
++static int adv7180_remove(struct i2c_client *client)
++{
++ struct adv7180 *decoder = i2c_get_clientdata(client);
++#ifdef CONFIG_MFD_TIMBERDALE
++ free_irq(client->irq, decoder);
++#endif
++ kfree(decoder);
++ return 0;
++}
++
++/* ----------------------------------------------------------------------- */
++static const struct i2c_device_id adv7180_id[] = {
++ { DRIVER_NAME, 0 },
++ { }
++};
++MODULE_DEVICE_TABLE(i2c, adv7180_id);
++
++static struct i2c_driver i2c_driver_adv7180 = {
++ .driver = {
++ .owner = THIS_MODULE,
++ .name = DRIVER_NAME,
++ .bus = &i2c_bus_type,
++ },
++
++ .id_table = adv7180_id,
++ .probe = adv7180_probe,
++ .remove = adv7180_remove,
++
++ .class = 0xffffffff,
++ .detect = adv7180_detect,
++ .address_data = &addr_data,
++
++ .command = adv7180_command,
++};
++
++static int __init adv7180_init(void)
++{
++ return i2c_add_driver(&i2c_driver_adv7180);
++}
++
++static void __exit adv7180_exit(void)
++{
++ i2c_del_driver(&i2c_driver_adv7180);
++}
++
++module_init(adv7180_init);
++module_exit(adv7180_exit);
+diff -uNr linux-2.6.29-clean/drivers/media/video/Kconfig linux-2.6.29/drivers/media/video/Kconfig
+--- linux-2.6.29-clean/drivers/media/video/Kconfig 2009-04-01 09:20:24.000000000 -0700
++++ linux-2.6.29/drivers/media/video/Kconfig 2009-04-06 13:51:47.000000000 -0700
+@@ -251,6 +251,15 @@
+
+ comment "Video decoders"
+
++config VIDEO_ADV7180
++ tristate "Analog Devices ADV7180 decoder"
++ depends on VIDEO_V4L1 && I2C
++ ---help---
++ Support for the Analog Devices ADV7180 video decoder.
++
++ To compile this driver as a module, choose M here: the
++ module will be called adv7180.
++
+ config VIDEO_BT819
+ tristate "BT819A VideoStream decoder"
+ depends on VIDEO_V4L1 && I2C
+@@ -800,6 +809,12 @@
+ ---help---
+ This is a v4l2 driver for the TI OMAP2 camera capture interface
+
++config VIDEO_TIMBERDALE
++ tristate "Support for timberdale Video In/LogiWIN"
++ depends on VIDEO_V4L2 && MFD_TIMBERDALE_DMA
++ ---help---
++ Add support for the Video In peripherial of the timberdale FPGA.
++
+ #
+ # USB Multimedia device configuration
+ #
+diff -uNr linux-2.6.29-clean/drivers/media/video/Makefile linux-2.6.29/drivers/media/video/Makefile
+--- linux-2.6.29-clean/drivers/media/video/Makefile 2009-04-01 09:20:24.000000000 -0700
++++ linux-2.6.29/drivers/media/video/Makefile 2009-04-06 13:51:47.000000000 -0700
+@@ -52,6 +52,7 @@
+ obj-$(CONFIG_VIDEO_SAA7191) += saa7191.o
+ obj-$(CONFIG_VIDEO_ADV7170) += adv7170.o
+ obj-$(CONFIG_VIDEO_ADV7175) += adv7175.o
++obj-$(CONFIG_VIDEO_ADV7180) += adv7180.o
+ obj-$(CONFIG_VIDEO_VPX3220) += vpx3220.o
+ obj-$(CONFIG_VIDEO_BT819) += bt819.o
+ obj-$(CONFIG_VIDEO_BT856) += bt856.o
+@@ -148,6 +149,8 @@
+
+ obj-$(CONFIG_VIDEO_AU0828) += au0828/
+
++obj-$(CONFIG_VIDEO_TIMBERDALE) += timblogiw.o
++
+ obj-$(CONFIG_USB_VIDEO_CLASS) += uvc/
+
+ EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core
+diff -uNr linux-2.6.29-clean/drivers/media/video/timblogiw.c linux-2.6.29/drivers/media/video/timblogiw.c
+--- linux-2.6.29-clean/drivers/media/video/timblogiw.c 1969-12-31 16:00:00.000000000 -0800
++++ linux-2.6.29/drivers/media/video/timblogiw.c 2009-04-06 13:51:47.000000000 -0700
+@@ -0,0 +1,930 @@
++/*
++ * timblogiw.c timberdale FPGA LogiWin Video In driver
++ * Copyright (c) 2009 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++/* Supports:
++ * Timberdale FPGA LogiWin Video In
++ */
++
++#include <linux/list.h>
++#include <linux/version.h>
++#include <linux/module.h>
++#include <linux/pci.h>
++#include <linux/dma-mapping.h>
++#include <media/v4l2-common.h>
++#include <media/v4l2-ioctl.h>
++#include <linux/platform_device.h>
++#include <linux/interrupt.h>
++#include "timblogiw.h"
++#include <linux/mfd/timbdma.h>
++
++
++#define TIMBLOGIW_CTRL 0x40
++
++#define TIMBLOGIW_H_SCALE 0x20
++#define TIMBLOGIW_V_SCALE 0x28
++
++#define TIMBLOGIW_X_CROP 0x58
++#define TIMBLOGIW_Y_CROP 0x60
++
++#define TIMBLOGIW_W_CROP 0x00
++#define TIMBLOGIW_H_CROP 0x08
++
++#define TIMBLOGIW_VERSION_CODE 0x02
++
++#define TIMBLOGIW_FRAME 0x10
++#define TIMBLOGIW_DROP 0x20
++
++#define TIMBLOGIW_BUF 0x04
++#define TIMBLOGIW_TBI 0x2c
++#define TIMBLOGIW_BPL 0x30
++
++#define dbg(...)
++
++const struct timblogiw_tvnorm timblogiw_tvnorms[] = {
++ {
++ .v4l2_id = V4L2_STD_PAL,
++ .name = "PAL",
++ .swidth = 720,
++ .sheight = 576
++ },
++ {
++ .v4l2_id = V4L2_STD_NTSC_M,
++ .name = "NTSC",
++ .swidth = 720,
++ .sheight = 480
++ }
++};
++
++static void timblogiw_handleframe(unsigned long arg)
++{
++ struct timblogiw_frame *f;
++ struct timblogiw *lw = (struct timblogiw *)arg;
++
++ spin_lock_bh(&lw->queue_lock);
++ if (!list_empty(&lw->inqueue)) {
++ /* put the entry in the outqueue */
++ f = list_entry(lw->inqueue.next, struct timblogiw_frame, frame);
++
++ /* copy data from the DMA buffer */
++ memcpy(f->bufmem, lw->dma.filled->buf, f->buf.length);
++ /* buffer consumed */
++ lw->dma.filled = NULL;
++
++ do_gettimeofday(&f->buf.timestamp);
++ f->buf.sequence = ++lw->frame_count;
++ f->buf.field = V4L2_FIELD_NONE;
++ f->state = F_DONE;
++ f->buf.bytesused = lw->frame_size;
++ list_move_tail(&f->frame, &lw->outqueue);
++ /* wake up any waiter */
++ wake_up(&lw->wait_frame);
++ }
++ spin_unlock_bh(&lw->queue_lock);
++}
++
++static int timblogiw_isr(u32 flag, void *pdev)
++{
++ struct timblogiw *lw = (struct timblogiw *)pdev;
++
++ if (!lw->dma.filled) {
++ /* no stored transfer so far, store this, and flip to next */
++ lw->dma.filled = lw->dma.transfer + lw->dma.curr;
++ lw->dma.curr = !lw->dma.curr;
++ }
++
++ if (lw->stream == STREAM_ON)
++ timb_start_dma(DMA_IRQ_VIDEO_RX,
++ lw->dma.transfer[lw->dma.curr].handle, lw->frame_size,
++ lw->bytesperline);
++
++ if (flag & DMA_IRQ_VIDEO_DROP)
++ dbg("%s: frame dropped\n", __func__);
++ if (flag & DMA_IRQ_VIDEO_RX) {
++ dbg("%s: frame RX\n", __func__);
++ tasklet_schedule(&lw->tasklet);
++ }
++ return 0;
++}
++
++static void timblogiw_empty_framequeues(struct timblogiw *lw)
++{
++ u32 i;
++
++ dbg("%s\n", __func__);
++
++ INIT_LIST_HEAD(&lw->inqueue);
++ INIT_LIST_HEAD(&lw->outqueue);
++
++ for (i = 0; i < lw->num_frames; i++) {
++ lw->frame[i].state = F_UNUSED;
++ lw->frame[i].buf.bytesused = 0;
++ }
++}
++
++u32 timblogiw_request_buffers(struct timblogiw *lw, u32 count)
++{
++ /* needs to be page aligned cause the */
++ /* buffers can be mapped individually! */
++ const size_t imagesize = PAGE_ALIGN(lw->frame_size);
++ void *buff = NULL;
++ u32 i;
++
++ dbg("%s - request of %i buffers of size %zi\n",
++ __func__, count, lw->frame_size);
++
++ lw->dma.transfer[0].buf = pci_alloc_consistent(lw->dev, imagesize,
++ &lw->dma.transfer[0].handle);
++ lw->dma.transfer[1].buf = pci_alloc_consistent(lw->dev, imagesize,
++ &lw->dma.transfer[1].handle);
++ if ((lw->dma.transfer[0].buf == NULL) ||
++ (lw->dma.transfer[1].buf == NULL)) {
++ printk(KERN_ALERT "alloc failed\n");
++ if (lw->dma.transfer[0].buf != NULL)
++ pci_free_consistent(lw->dev, imagesize,
++ lw->dma.transfer[0].buf,
++ lw->dma.transfer[0].handle);
++ if (lw->dma.transfer[1].buf != NULL)
++ pci_free_consistent(lw->dev, imagesize,
++ lw->dma.transfer[1].buf,
++ lw->dma.transfer[1].handle);
++ return 0;
++ }
++
++ if (count > TIMBLOGIW_NUM_FRAMES)
++ count = TIMBLOGIW_NUM_FRAMES;
++
++ lw->num_frames = count;
++ while (lw->num_frames > 0) {
++ buff = vmalloc_32(lw->num_frames * imagesize);
++ if (buff) {
++ memset(buff, 0, lw->num_frames * imagesize);
++ break;
++ }
++ lw->num_frames--;
++ }
++
++ for (i = 0; i < lw->num_frames; i++) {
++ lw->frame[i].bufmem = buff + i * imagesize;
++ lw->frame[i].buf.index = i;
++ lw->frame[i].buf.m.offset = i * imagesize;
++ lw->frame[i].buf.length = lw->frame_size;
++ lw->frame[i].buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
++ lw->frame[i].buf.sequence = 0;
++ lw->frame[i].buf.field = V4L2_FIELD_NONE;
++ lw->frame[i].buf.memory = V4L2_MEMORY_MMAP;
++ lw->frame[i].buf.flags = 0;
++ }
++
++ lw->dma.curr = 0;
++ lw->dma.filled = NULL;
++ return lw->num_frames;
++}
++
++void timblogiw_release_buffers(struct timblogiw *lw)
++{
++ dbg("%s\n", __func__);
++
++ if (lw->frame[0].bufmem != NULL) {
++ vfree(lw->frame[0].bufmem);
++ lw->frame[0].bufmem = NULL;
++ lw->num_frames = TIMBLOGIW_NUM_FRAMES;
++ pci_free_consistent(lw->dev, lw->frame_size,
++ lw->dma.transfer[0].buf, lw->dma.transfer[0].handle);
++ pci_free_consistent(lw->dev, lw->frame_size,
++ lw->dma.transfer[1].buf, lw->dma.transfer[1].handle);
++ }
++}
++
++/* IOCTL functions */
++
++static int timblogiw_g_fmt(struct timblogiw *lw, struct v4l2_format *format)
++{
++ dbg("%s -\n", __func__);
++
++ if (format->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
++ return -EINVAL;
++
++ format->fmt.pix.width = lw->width;
++ format->fmt.pix.height = lw->height;
++ format->fmt.pix.pixelformat = V4L2_PIX_FMT_YUYV;
++ format->fmt.pix.bytesperline = lw->bytesperline;
++ format->fmt.pix.sizeimage = lw->frame_size;
++ format->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
++ format->fmt.pix.field = V4L2_FIELD_NONE;
++ return 0;
++}
++
++static int timblogiw_s_fmt(struct timblogiw *lw, struct v4l2_format *format)
++{
++ struct v4l2_pix_format *pix = &format->fmt.pix;
++ dbg("%s - type: %d\n", __func__, format->type);
++
++ if (format->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
++ return -EINVAL;
++
++ if ((lw->height != pix->height) || (lw->width != lw->width))
++ return -EINVAL;
++
++ if (format->fmt.pix.field != V4L2_FIELD_NONE)
++ return -EINVAL;
++
++ dbg("%s - width=%d, height=%d, pixelformat=%d, field=%d\n"
++ "bytes per line %d, size image: %d, colorspace: %d\n",
++ __func__,
++ pix->width, pix->height, pix->pixelformat, pix->field,
++ pix->bytesperline, pix->sizeimage, pix->colorspace);
++
++ return 0;
++}
++
++static int timblogiw_querycap(struct timblogiw *lw,
++ struct v4l2_capability *cap)
++{
++ memset(cap, 0, sizeof(*cap));
++ strncpy(cap->card, "Timberdale Video", sizeof(cap->card)-1);
++ strncpy(cap->driver, "Timblogiw", sizeof(cap->card)-1);
++ cap->version = TIMBLOGIW_VERSION_CODE;
++ cap->capabilities = V4L2_CAP_VIDEO_CAPTURE |
++ V4L2_CAP_STREAMING;
++
++ return 0;
++}
++
++static int timblogiw_enum_fmt(struct timblogiw *lw, struct v4l2_fmtdesc *fmt)
++{
++ dbg("%s - VIDIOC_ENUM_FMT\n", __func__);
++
++ if (fmt->index != 0)
++ return -EINVAL;
++ memset(fmt, 0, sizeof(*fmt));
++ fmt->index = 0;
++ fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
++ strncpy(fmt->description, "4:2:2, packed, YUYV",
++ sizeof(fmt->description)-1);
++ fmt->pixelformat = V4L2_PIX_FMT_YUYV;
++ memset(fmt->reserved, 0, sizeof(fmt->reserved));
++
++ return 0;
++}
++
++static int timblogiw_reqbufs(struct timblogiw *lw,
++ struct v4l2_requestbuffers *rb)
++{
++ if (rb->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
++ rb->memory != V4L2_MEMORY_MMAP)
++ return -EINVAL;
++
++ timblogiw_empty_framequeues(lw);
++
++ timblogiw_release_buffers(lw);
++ if (rb->count)
++ rb->count = timblogiw_request_buffers(lw, rb->count);
++
++ dbg("%s - VIDIOC_REQBUFS: io method is mmap. num bufs %i\n",
++ __func__, rb->count);
++
++ return 0;
++}
++
++static int timblogiw_querybuf(struct timblogiw *lw, struct v4l2_buffer *b)
++{
++ if (b->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
++ b->index >= lw->num_frames)
++ return -EINVAL;
++
++ memcpy(b, &lw->frame[b->index].buf, sizeof(*b));
++
++ if (lw->frame[b->index].vma_use_count)
++ b->flags |= V4L2_BUF_FLAG_MAPPED;
++
++ if (lw->frame[b->index].state == F_DONE)
++ b->flags |= V4L2_BUF_FLAG_DONE;
++ else if (lw->frame[b->index].state != F_UNUSED)
++ b->flags |= V4L2_BUF_FLAG_QUEUED;
++
++ return 0;
++}
++
++static int timblogiw_qbuf(struct timblogiw *lw, struct v4l2_buffer *b)
++{
++ unsigned long lock_flags;
++
++ if (b->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
++ b->index >= lw->num_frames)
++ return -EINVAL;
++
++ if (lw->frame[b->index].state != F_UNUSED)
++ return -EAGAIN;
++
++ if (b->memory != V4L2_MEMORY_MMAP)
++ return -EINVAL;
++
++ lw->frame[b->index].state = F_QUEUED;
++
++ spin_lock_irqsave(&lw->queue_lock, lock_flags);
++ list_add_tail(&lw->frame[b->index].frame, &lw->inqueue);
++ spin_unlock_irqrestore(&lw->queue_lock, lock_flags);
++
++ return 0;
++}
++
++static int timblogiw_dqbuf(struct timblogiw *lw, struct file *file,
++ struct v4l2_buffer *b)
++{
++ struct timblogiw_frame *f;
++ unsigned long lock_flags;
++ int ret = 0;
++
++ if (b->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
++ dbg("%s - VIDIOC_DQBUF, illegal buf type!\n",
++ __func__);
++ return -EINVAL;
++ }
++
++ if (list_empty(&lw->outqueue)) {
++ if (file->f_flags & O_NONBLOCK)
++ return -EAGAIN;
++
++ ret = wait_event_interruptible(lw->wait_frame,
++ !list_empty(&lw->outqueue));
++ if (ret)
++ return ret;
++ }
++
++ spin_lock_irqsave(&lw->queue_lock, lock_flags);
++ f = list_entry(lw->outqueue.next,
++ struct timblogiw_frame, frame);
++ list_del(lw->outqueue.next);
++ spin_unlock_irqrestore(&lw->queue_lock, lock_flags);
++
++ f->state = F_UNUSED;
++ memcpy(b, &f->buf, sizeof(*b));
++
++ if (f->vma_use_count)
++ b->flags |= V4L2_BUF_FLAG_MAPPED;
++
++ return 0;
++}
++
++static int timblogiw_enumstd(struct timblogiw *lw, struct v4l2_standard *std)
++{
++ if (std->index != 0)
++ return -EINVAL;
++
++ memset(std, 0, sizeof(*std));
++ std->index = 0;
++
++ std->id = V4L2_STD_PAL;
++ strncpy(std->name, "PAL", sizeof(std->name)-1);
++
++ return 0;
++}
++
++static int timblogiw_g_std(struct timblogiw *lw, v4l2_std_id *std)
++{
++ *std = V4L2_STD_PAL;
++ return 0;
++}
++
++static int timblogiw_s_std(struct timblogiw *lw, v4l2_std_id *std)
++{
++ if (!(*std & V4L2_STD_PAL))
++ return -EINVAL;
++ return 0;
++}
++
++static int timblogiw_enuminput(struct timblogiw *lw, struct v4l2_input *inp)
++{
++ if (inp->index != 0)
++ return -EINVAL;
++
++ memset(inp, 0, sizeof(*inp));
++ inp->index = 0;
++
++ strncpy(inp->name, "Timb input 1", sizeof(inp->name) - 1);
++ inp->type = V4L2_INPUT_TYPE_CAMERA;
++ inp->std = V4L2_STD_ALL;
++
++ return 0;
++}
++
++static int timblogiw_g_input(struct timblogiw *lw, int *input)
++{
++ *input = 0;
++
++ return 0;
++}
++
++static int timblogiw_s_input(struct timblogiw *lw, int *input)
++{
++ if (*input != 0)
++ return -EINVAL;
++ return 0;
++}
++
++static int timblogiw_streamon(struct timblogiw *lw, int *type)
++{
++ struct timblogiw_frame *f;
++
++ if (*type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
++ dbg("%s - No capture device\n", __func__);
++ return -EINVAL;
++ }
++
++ if (list_empty(&lw->inqueue)) {
++ dbg("%s - inqueue is empty\n", __func__);
++ return -EINVAL;
++ }
++
++ if (lw->stream == STREAM_ON)
++ return 0;
++
++ lw->stream = STREAM_ON;
++
++ f = list_entry(lw->inqueue.next,
++ struct timblogiw_frame, frame);
++
++ dbg("%s - f size: %d, bpr: %d, dma addr: %x\n", __func__,
++ lw->frame_size, lw->bytesperline,
++ (unsigned int)lw->dma.transfer[lw->dma.curr].handle);
++ timb_start_dma(DMA_IRQ_VIDEO_RX,
++ lw->dma.transfer[lw->dma.curr].handle,
++ lw->frame_size, lw->bytesperline);
++
++ return 0;
++}
++
++static int timblogiw_streamoff(struct timblogiw *lw, int *type)
++{
++ if (*type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
++ return -EINVAL;
++
++ if (lw->stream == STREAM_ON) {
++ unsigned long lock_flags;
++ spin_lock_irqsave(&lw->queue_lock, lock_flags);
++ timb_stop_dma(DMA_IRQ_VIDEO_RX);
++ lw->stream = STREAM_OFF;
++ spin_unlock_irqrestore(&lw->queue_lock, lock_flags);
++ }
++ timblogiw_empty_framequeues(lw);
++
++ return 0;
++}
++
++static int timblogiw_querystd(struct timblogiw *lw, v4l2_std_id *std)
++{
++ /* TODO: Ask encoder */
++ *std = V4L2_STD_PAL;
++ return 0;
++}
++
++static int timblogiw_enum_framsizes(struct timblogiw *lw,
++ struct v4l2_frmsizeenum *fsize)
++{
++ if ((fsize->index != 0) ||
++ (fsize->pixel_format != V4L2_PIX_FMT_YUYV))
++ return -EINVAL;
++
++ fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
++ fsize->discrete.width = lw->width;
++ fsize->discrete.height = lw->height;
++
++ return 0;
++}
++
++static int timblogiw_g_parm(struct timblogiw *lw, struct v4l2_streamparm *sp)
++{
++ if (sp->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
++ return -EINVAL;
++
++ sp->parm.capture.extendedmode = 0;
++ sp->parm.capture.readbuffers = lw->num_frames;
++ return 0;
++}
++
++/*******************************
++ * Device Operations functions *
++ *******************************/
++
++static int timblogiw_open(struct file *file)
++{
++ struct video_device *vdev = video_devdata(file);
++ struct timblogiw *lw = video_get_drvdata(vdev);
++
++ dbg("%s -\n", __func__);
++
++ mutex_init(&lw->fileop_lock);
++ spin_lock_init(&lw->queue_lock);
++ init_waitqueue_head(&lw->wait_frame);
++
++ mutex_lock(&lw->lock);
++
++ lw->width = 720; /* TODO: Should depend on tv norm */
++ lw->height = 576;
++ lw->frame_size = lw->width * lw->height * 2;
++ lw->bytesperline = lw->width * 2;
++
++ file->private_data = lw;
++ lw->stream = STREAM_OFF;
++ lw->num_frames = TIMBLOGIW_NUM_FRAMES;
++
++ timblogiw_empty_framequeues(lw);
++
++ timb_set_dma_interruptcb(DMA_IRQ_VIDEO_RX | DMA_IRQ_VIDEO_DROP,
++ timblogiw_isr, (void *)lw);
++
++ mutex_unlock(&lw->lock);
++
++ return 0;
++}
++
++static int timblogiw_close(struct file *file)
++{
++ struct timblogiw *lw = file->private_data;
++
++ dbg("%s - entry\n", __func__);
++
++ mutex_lock(&lw->lock);
++
++ timb_stop_dma(DMA_IRQ_VIDEO_RX);
++ timb_set_dma_interruptcb(DMA_IRQ_VIDEO_RX | DMA_IRQ_VIDEO_DROP, NULL,
++ NULL);
++ timblogiw_release_buffers(lw);
++
++ mutex_unlock(&lw->lock);
++ return 0;
++}
++
++static ssize_t timblogiw_read(struct file *file, char __user *data,
++ size_t count, loff_t *ppos)
++{
++ dbg("%s - read request\n", __func__);
++ return -EINVAL;
++}
++
++static void timblogiw_vm_open(struct vm_area_struct *vma)
++{
++ struct timblogiw_frame *f = vma->vm_private_data;
++ f->vma_use_count++;
++}
++
++static void timblogiw_vm_close(struct vm_area_struct *vma)
++{
++ struct timblogiw_frame *f = vma->vm_private_data;
++ f->vma_use_count--;
++}
++
++static struct vm_operations_struct timblogiw_vm_ops = {
++ .open = timblogiw_vm_open,
++ .close = timblogiw_vm_close,
++};
++
++static int timblogiw_mmap(struct file *filp, struct vm_area_struct *vma)
++{
++ unsigned long size = vma->vm_end - vma->vm_start, start = vma->vm_start;
++ void *pos;
++ u32 i;
++ int ret = -EINVAL;
++
++ struct timblogiw *lw = filp->private_data;
++ dbg("%s\n", __func__);
++
++ if (mutex_lock_interruptible(&lw->fileop_lock))
++ return -ERESTARTSYS;
++
++ if (!(vma->vm_flags & VM_WRITE) ||
++ size != PAGE_ALIGN(lw->frame[0].buf.length))
++ goto error_unlock;
++
++ for (i = 0; i < lw->num_frames; i++)
++ if ((lw->frame[i].buf.m.offset >> PAGE_SHIFT) == vma->vm_pgoff)
++ break;
++
++ if (i == lw->num_frames) {
++ dbg("%s - user supplied mapping address is out of range\n",
++ __func__);
++ goto error_unlock;
++ }
++
++ vma->vm_flags |= VM_IO;
++ vma->vm_flags |= VM_RESERVED; /* Do not swap out this VMA */
++
++ pos = lw->frame[i].bufmem;
++ while (size > 0) { /* size is page-aligned */
++ if (vm_insert_page(vma, start, vmalloc_to_page(pos))) {
++ dbg("%s - vm_insert_page failed\n", __func__);
++ ret = -EAGAIN;
++ goto error_unlock;
++ }
++ start += PAGE_SIZE;
++ pos += PAGE_SIZE;
++ size -= PAGE_SIZE;
++ }
++
++ vma->vm_ops = &timblogiw_vm_ops;
++ vma->vm_private_data = &lw->frame[i];
++ timblogiw_vm_open(vma);
++ ret = 0;
++
++error_unlock:
++ mutex_unlock(&lw->fileop_lock);
++ return ret;
++}
++
++static long
++timblogiw_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
++{
++ struct timblogiw *lw = file->private_data;
++
++ switch (cmd) {
++
++ case VIDIOC_QUERYCAP:
++ {
++ dbg("%s - VIDIOC_QUERYCAP\n", __func__);
++ return timblogiw_querycap(lw, (struct v4l2_capability *)arg);
++ }
++
++ case VIDIOC_ENUM_FMT:
++ {
++ dbg("%s - VIDIOC_ENUM_FMT\n", __func__);
++ return timblogiw_enum_fmt(lw, (struct v4l2_fmtdesc *)arg);
++ }
++
++ case VIDIOC_G_FMT:
++ {
++ dbg("%s - VIDIOC_G_FMT\n", __func__);
++ return timblogiw_g_fmt(lw, (struct v4l2_format *) arg);
++ }
++
++ case VIDIOC_TRY_FMT:
++ case VIDIOC_S_FMT:
++ {
++ dbg("%s - VIDIOC_S_FMT\n", __func__);
++ return timblogiw_s_fmt(lw, (struct v4l2_format *)arg);
++ }
++
++ case VIDIOC_REQBUFS:
++ {
++ dbg("%s - VIDIOC_REQBUFS\n", __func__);
++ return timblogiw_reqbufs(lw, (struct v4l2_requestbuffers *)arg);
++ }
++
++ case VIDIOC_QUERYBUF:
++ {
++ dbg("%s - VIDIOC_QUERYBUF\n", __func__);
++ return timblogiw_querybuf(lw, (struct v4l2_buffer *)arg);
++ }
++
++ case VIDIOC_QBUF:
++ {
++ return timblogiw_qbuf(lw, (struct v4l2_buffer *)arg);
++ }
++
++ case VIDIOC_DQBUF:
++ {
++ return timblogiw_dqbuf(lw, file, (struct v4l2_buffer *)arg);
++ }
++
++ case VIDIOC_ENUMSTD:
++ {
++ dbg("%s - VIDIOC_ENUMSTD\n", __func__);
++ return timblogiw_enumstd(lw, (struct v4l2_standard *)arg);
++ }
++
++ case VIDIOC_G_STD:
++ {
++ dbg("%s - VIDIOC_G_STD\n", __func__);
++ return timblogiw_g_std(lw, (v4l2_std_id *)arg);
++ }
++
++ case VIDIOC_S_STD:
++ {
++ dbg("%s - VIDIOC_S_STD\n", __func__);
++ return timblogiw_s_std(lw, (v4l2_std_id *)arg);
++ }
++
++ case VIDIOC_ENUMINPUT:
++ {
++ dbg("%s - VIDIOC_ENUMINPUT\n", __func__);
++ return timblogiw_enuminput(lw, (struct v4l2_input *)arg);
++ }
++
++ case VIDIOC_G_INPUT:
++ {
++ dbg("%s - VIDIOC_G_INPUT\n", __func__);
++ return timblogiw_g_input(lw, (int *)arg);
++ }
++
++ case VIDIOC_S_INPUT:
++ {
++ dbg("%s - VIDIOC_S_INPUT\n", __func__);
++ return timblogiw_s_input(lw, (int *)arg);
++ }
++
++ case VIDIOC_STREAMON:
++ {
++ dbg("%s - VIDIOC_STREAMON\n", __func__);
++ return timblogiw_streamon(lw, (int *)arg);
++ }
++
++ case VIDIOC_STREAMOFF:
++ {
++ dbg("%s - VIDIOC_STREAMOFF\n", __func__);
++ return timblogiw_streamoff(lw, (int *)arg);
++ }
++
++ case VIDIOC_QUERYSTD:
++ {
++ dbg("%s - VIDIOC_QUERYSTD\n", __func__);
++ return timblogiw_querystd(lw, (v4l2_std_id *)arg);
++ }
++
++ case VIDIOC_ENUM_FRAMESIZES:
++ {
++ dbg("%s - VIDIOC_ENUM_FRAMESIZES\n", __func__);
++ return timblogiw_enum_framsizes(lw,
++ (struct v4l2_frmsizeenum *)arg);
++ }
++
++ case VIDIOC_G_PARM:
++ {
++ dbg("%s - VIDIOC_G_PARM\n", __func__);
++ return timblogiw_g_parm(lw, (struct v4l2_streamparm *)arg);
++ }
++
++ default:
++ {
++ dbg("%s Unknown command, dir: %x, type: %x, nr: %x, size: %x\n",
++ __func__,
++ _IOC_DIR(cmd),
++ _IOC_TYPE(cmd),
++ _IOC_NR(cmd),
++ _IOC_SIZE(cmd));
++ break;
++ }
++ }
++
++ return -EINVAL;
++}
++
++void timblogiw_vdev_release(struct video_device *vdev)
++{
++ kfree(vdev);
++}
++
++static const struct v4l2_file_operations timblogiw_fops = {
++ .owner = THIS_MODULE,
++ .open = timblogiw_open,
++ .release = timblogiw_close,
++ .ioctl = timblogiw_ioctl,
++ .mmap = timblogiw_mmap,
++ .read = timblogiw_read,
++};
++
++static const struct video_device timblogiw_template = {
++ .name = TIMBLOGIWIN_NAME,
++ .fops = &timblogiw_fops,
++ .release = &timblogiw_vdev_release,
++ .minor = -1
++};
++
++static int timblogiw_probe(struct platform_device *dev)
++{
++ int err;
++ struct timblogiw *lw;
++ struct resource *iomem;
++
++ iomem = platform_get_resource(dev, IORESOURCE_MEM, 0);
++ if (!iomem) {
++ err = -EINVAL;
++ goto err_mem;
++ }
++
++ lw = kzalloc(sizeof(*lw), GFP_KERNEL);
++ if (!lw) {
++ err = -EINVAL;
++ goto err_mem;
++ }
++
++ /* find the PCI device from the parent... */
++ if (!dev->dev.parent) {
++ printk(KERN_ERR "timblogwi: No parent device found??\n");
++ err = -ENODEV;
++ goto err_mem;
++ }
++
++ lw->dev = container_of(dev->dev.parent, struct pci_dev, dev);
++
++ mutex_init(&lw->lock);
++
++ lw->video_dev = video_device_alloc();
++ if (!lw->video_dev) {
++ err = -ENOMEM;
++ goto err_video_req;
++ }
++ *lw->video_dev = timblogiw_template;
++
++ err = video_register_device(lw->video_dev, VFL_TYPE_GRABBER, 0);
++ if (err) {
++ video_device_release(lw->video_dev);
++ printk(KERN_ALERT "Error reg video\n");
++ goto err_video_req;
++ }
++
++ tasklet_init(&lw->tasklet, timblogiw_handleframe, (unsigned long)lw);
++
++ if (!request_mem_region(iomem->start, resource_size(iomem),
++ "timb-video")) {
++ err = -EBUSY;
++ goto err_request;
++ }
++
++ lw->membase = ioremap(iomem->start, resource_size(iomem));
++ if (!lw->membase) {
++ err = -ENOMEM;
++ goto err_ioremap;
++ }
++
++ platform_set_drvdata(dev, lw);
++ video_set_drvdata(lw->video_dev, lw);
++
++ return 0;
++
++err_ioremap:
++ release_mem_region(iomem->start, resource_size(iomem));
++err_request:
++ if (-1 != lw->video_dev->minor)
++ video_unregister_device(lw->video_dev);
++ else
++ video_device_release(lw->video_dev);
++err_video_req:
++ kfree(lw);
++err_mem:
++ printk(KERN_ERR
++ "timberdale: Failed to register Timberdale Video In: %d\n",
++ err);
++
++ return err;
++}
++
++static int timblogiw_remove(struct platform_device *dev)
++{
++ struct timblogiw *lw = platform_get_drvdata(dev);
++ struct resource *iomem = platform_get_resource(dev, IORESOURCE_MEM, 0);
++
++ if (-1 != lw->video_dev->minor)
++ video_unregister_device(lw->video_dev);
++ else
++ video_device_release(lw->video_dev);
++
++ tasklet_kill(&lw->tasklet);
++ iounmap(lw->membase);
++ release_mem_region(iomem->start, resource_size(iomem));
++ kfree(lw);
++
++ return 0;
++}
++
++static struct platform_driver timblogiw_platform_driver = {
++ .driver = {
++ .name = "timb-video",
++ .owner = THIS_MODULE,
++ },
++ .probe = timblogiw_probe,
++ .remove = timblogiw_remove,
++};
++
++/*--------------------------------------------------------------------------*/
++
++static int __init timblogiw_init(void)
++{
++ return platform_driver_register(&timblogiw_platform_driver);
++}
++
++static void __exit timblogiw_exit(void)
++{
++ platform_driver_unregister(&timblogiw_platform_driver);
++}
++
++module_init(timblogiw_init);
++module_exit(timblogiw_exit);
++
++MODULE_DESCRIPTION("Timberdale Video In driver");
++MODULE_LICENSE("GPL v2");
++MODULE_ALIAS("platform:timb-video");
++
+diff -uNr linux-2.6.29-clean/drivers/media/video/timblogiw.h linux-2.6.29/drivers/media/video/timblogiw.h
+--- linux-2.6.29-clean/drivers/media/video/timblogiw.h 1969-12-31 16:00:00.000000000 -0800
++++ linux-2.6.29/drivers/media/video/timblogiw.h 2009-04-06 13:51:47.000000000 -0700
+@@ -0,0 +1,95 @@
++/*
++ * timblogiw.h timberdale FPGA LogiWin Video In driver defines
++ * Copyright (c) 2009 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++/* Supports:
++ * Timberdale FPGA LogiWin Video In
++ */
++
++#ifndef _TIMBLOGIW_H
++#define _TIMBLOGIW_H
++
++#include <linux/interrupt.h>
++
++#define TIMBLOGIWIN_NAME "Timberdale Video-In"
++
++#define TIMBLOGIW_NUM_FRAMES 10
++
++
++enum timblogiw_stream_state {
++ STREAM_OFF,
++ STREAM_ON,
++};
++
++enum timblogiw_frame_state {
++ F_UNUSED = 0,
++ F_QUEUED,
++ F_GRABBING,
++ F_DONE,
++ F_ERROR,
++};
++
++struct timblogiw_frame {
++ void *bufmem;
++ struct v4l2_buffer buf;
++ enum timblogiw_frame_state state;
++ struct list_head frame;
++ unsigned long vma_use_count;
++};
++
++struct timblogiw_tvnorm {
++ int v4l2_id;
++ char *name;
++ u16 swidth;
++ u16 sheight;
++};
++
++
++struct timbdma_transfer {
++ dma_addr_t handle;
++ void *buf;
++};
++
++struct timbdma_control {
++ struct timbdma_transfer transfer[2];
++ struct timbdma_transfer *filled;
++ int curr;
++};
++
++struct timblogiw {
++ struct i2c_client *decoder;
++ struct timblogiw_frame frame[TIMBLOGIW_NUM_FRAMES];
++ int num_frames;
++ unsigned int frame_count;
++ struct list_head inqueue, outqueue;
++ spinlock_t queue_lock; /* mutual exclusion */
++ enum timblogiw_stream_state stream;
++ struct video_device *video_dev;
++ struct mutex lock, fileop_lock;
++ wait_queue_head_t wait_frame;
++ int width;
++ int height;
++ u32 frame_size;
++ int bytesperline;
++ struct pci_dev *dev;
++ struct timbdma_control dma;
++ void __iomem *membase;
++ struct tasklet_struct tasklet;
++};
++
++#endif /* _TIMBLOGIW_H */
++
+diff -uNr linux-2.6.29-clean/drivers/mfd/Kconfig linux-2.6.29/drivers/mfd/Kconfig
+--- linux-2.6.29-clean/drivers/mfd/Kconfig 2009-04-01 09:20:24.000000000 -0700
++++ linux-2.6.29/drivers/mfd/Kconfig 2009-04-06 13:51:47.000000000 -0700
+@@ -240,6 +240,27 @@
+ Say yes here if you want to include support GPIO for pins on
+ the PCF50633 chip.
+
++config MFD_TIMBERDALE
++ bool "Support for Timberdale"
++ select MFD_CORE
++ ---help---
++ This is the core driver for the timberdale FPGA. This device is a
++ multifunctioanl device which may provide numerous interfaces.
++
++config MFD_TIMBERDALE_DMA
++ tristate "Support for timberdale DMA"
++ depends on MFD_TIMBERDALE
++ ---help---
++ Add support the DMA block inside the timberdale FPGA. This to be able
++ to do DMA transfers directly to some of the blocks inside the FPGA
++
++config MFD_TIMBERDALE_I2S
++ tristate "Support for timberdale I2S bus"
++ depends on MFD_TIMBERDALE
++ ---help---
++ Add support for the I2S bus handled by timberdale FPGA.
++ I2S RX and TX instances are then available for other devices to make use of.
++
+ endmenu
+
+ menu "Multimedia Capabilities Port drivers"
+diff -uNr linux-2.6.29-clean/drivers/mfd/Makefile linux-2.6.29/drivers/mfd/Makefile
+--- linux-2.6.29-clean/drivers/mfd/Makefile 2009-04-01 09:20:24.000000000 -0700
++++ linux-2.6.29/drivers/mfd/Makefile 2009-04-06 13:51:47.000000000 -0700
+@@ -40,4 +40,8 @@
+
+ obj-$(CONFIG_MFD_PCF50633) += pcf50633-core.o
+ obj-$(CONFIG_PCF50633_ADC) += pcf50633-adc.o
+-obj-$(CONFIG_PCF50633_GPIO) += pcf50633-gpio.o
+\ No newline at end of file
++obj-$(CONFIG_PCF50633_GPIO) += pcf50633-gpio.o
++
++obj-$(CONFIG_MFD_TIMBERDALE) += timberdale.o
++obj-$(CONFIG_MFD_TIMBERDALE_DMA) += timbdma.o
++obj-$(CONFIG_MFD_TIMBERDALE_I2S) += timbi2s.o
+diff -uNr linux-2.6.29-clean/drivers/mfd/timbdma.c linux-2.6.29/drivers/mfd/timbdma.c
+--- linux-2.6.29-clean/drivers/mfd/timbdma.c 1969-12-31 16:00:00.000000000 -0800
++++ linux-2.6.29/drivers/mfd/timbdma.c 2009-04-06 13:51:47.000000000 -0700
+@@ -0,0 +1,301 @@
++/*
++ * timbdma.c timberdale FPGA DMA driver
++ * Copyright (c) 2009 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++/* Supports:
++ * Timberdale FPGA DMA engine
++ */
++
++#include <linux/version.h>
++#include <linux/module.h>
++#include <linux/pci.h>
++#include <linux/interrupt.h>
++#include <linux/platform_device.h>
++
++#include <linux/mfd/timbdma.h>
++
++static struct timbdma_dev *self_g;
++
++static irqreturn_t timbdma_handleinterrupt(int irq, void *devid)
++{
++ struct timbdma_dev *dev = (struct timbdma_dev *)devid;
++ int ipr;
++ int i;
++
++ ipr = ioread32(dev->membase + timbdma_ctrlmap_TIMBPEND);
++
++ /* ack */
++ iowrite32(ipr, dev->membase + timbdma_ctrlmap_TIMBSTATUS);
++
++ /* call the callbacks */
++ for (i = 0; i < DMA_IRQS; i++) {
++ int mask = 1 << i;
++ if ((ipr & mask) && dev->callbacks[i])
++ dev->callbacks[i](mask, dev->callback_data[i]);
++ }
++
++ if (ipr)
++ return IRQ_HANDLED;
++ else
++ return IRQ_NONE;
++}
++
++
++void timb_start_dma(u32 flag, unsigned long buf, int len, int bytes_per_row)
++{
++ int i;
++ unsigned long irqflags;
++ struct timbdma_dev *dev = self_g;
++
++ spin_lock_irqsave(&dev->lock, irqflags);
++
++ /* now enable the DMA transfer */
++ for (i = 0; i < DMA_IRQS; i++)
++ if (flag & (1 << i)) {
++ u32 offset = i / 2 * 0x40;
++
++ if (!(i % 2)) {
++ /* RX */
++ /* bytes per row */
++ iowrite32(bytes_per_row, dev->membase + offset +
++ timbdma_dmacfg_BPERROW);
++ /* address high */
++ iowrite32(0, dev->membase + offset +
++ timbdma_dmacfg_RXSTARTH);
++ /* address low */
++ iowrite32(buf, dev->membase + offset +
++ timbdma_dmacfg_RXSTARTL);
++ /* Length */
++ iowrite32(len, dev->membase + offset +
++ timbdma_dmacfg_RXLENGTH);
++ /* Clear rx sw read pointer */
++ iowrite32(0, dev->membase + offset +
++ timbdma_dmacfg_RXSWRP);
++ /* enable the transfer */
++ iowrite32(1, dev->membase + offset +
++ timbdma_dmacfg_RXENABLE);
++ } else {
++ /* TX */
++ /* address high */
++ iowrite32(0, dev->membase + offset +
++ timbdma_dmacfg_TXSTARTH);
++ /* address low */
++ iowrite32(buf, dev->membase + offset +
++ timbdma_dmacfg_TXSTARTL);
++ /* Length */
++ iowrite32(len, dev->membase + offset +
++ timbdma_dmacfg_TXLENGTH);
++ /* Set tx sw write pointer */
++ iowrite32(len, dev->membase + offset +
++ timbdma_dmacfg_TXSWWP);
++ }
++
++ /* only allow one bit in the flag field */
++ break;
++ }
++ spin_unlock_irqrestore(&dev->lock, irqflags);
++}
++EXPORT_SYMBOL(timb_start_dma);
++
++void *timb_stop_dma(u32 flags)
++{
++ int i;
++ unsigned long irqflags;
++ struct timbdma_dev *dev = self_g;
++ void *result = 0;
++
++ spin_lock_irqsave(&dev->lock, irqflags);
++
++ /* now disable the DMA transfers */
++ for (i = 0; i < DMA_IRQS; i++)
++ if (flags & (1 << i)) {
++ /*
++ RX enable registers are located at:
++ 0x14
++ 0x54
++ 0x94
++
++ TX SW pointer registers are located at:
++ 0x24
++ 0x64
++ */
++ u32 offset = i / 2 * 0x40;
++ u32 result_offset = offset;
++ if (!(i % 2)) {
++ /* even -> RX enable */
++ offset += timbdma_dmacfg_RXENABLE;
++ result_offset += timbdma_dmacfg_RXFPGAWP;
++ } else {
++ /* odd -> TX SW pointer reg */
++ offset += timbdma_dmacfg_TXSWWP;
++ result_offset = timbdma_dmacfg_TXFPGARP;
++ }
++
++ iowrite32(0, dev->membase + offset);
++ /* check how far the FPGA has written/read */
++ result = (void *)ioread32(dev->membase + result_offset);
++ }
++
++ /* ack any pending IRQs */
++ iowrite32(flags, dev->membase + timbdma_ctrlmap_TIMBSTATUS);
++
++ spin_unlock_irqrestore(&dev->lock, irqflags);
++
++ return result;
++}
++EXPORT_SYMBOL(timb_stop_dma);
++
++void timb_set_dma_interruptcb(u32 flags, timbdma_interruptcb icb, void *data)
++{
++ int i;
++ unsigned long irqflags;
++ struct timbdma_dev *dev = self_g;
++ u32 ier;
++
++ spin_lock_irqsave(&dev->lock, irqflags);
++
++ for (i = 0; i < DMA_IRQS; i++)
++ if (flags & (1 << i)) {
++ dev->callbacks[i] = icb;
++ dev->callback_data[i] = data;
++ }
++
++ /* Ack any pending IRQ */
++ iowrite32(flags, dev->membase + timbdma_ctrlmap_TIMBSTATUS);
++
++ /* if a null callback is given -> clear interrupt, else -> enable */
++ ier = ioread32(dev->membase + timbdma_ctrlmap_TIMBENABLE);
++ if (icb != NULL)
++ ier |= flags;
++ else
++ ier &= ~flags;
++ iowrite32(ier, dev->membase + timbdma_ctrlmap_TIMBENABLE);
++
++ spin_unlock_irqrestore(&dev->lock, irqflags);
++}
++EXPORT_SYMBOL(timb_set_dma_interruptcb);
++
++static int timbdma_probe(struct platform_device *dev)
++{
++ int err, irq;
++ struct timbdma_dev *self;
++ struct resource *iomem;
++
++ iomem = platform_get_resource(dev, IORESOURCE_MEM, 0);
++ if (!iomem) {
++ err = -EINVAL;
++ goto err_mem;
++ }
++
++ self = kzalloc(sizeof(*self), GFP_KERNEL);
++ if (!self) {
++ err = -EINVAL;
++ goto err_mem;
++ }
++
++ spin_lock_init(&self->lock);
++
++ if (!request_mem_region(iomem->start,
++ resource_size(iomem), "timb-dma")) {
++ err = -EBUSY;
++ goto err_request;
++ }
++
++ self->membase = ioremap(iomem->start, resource_size(iomem));
++ if (!self->membase) {
++ printk(KERN_ERR "timbdma: Failed to remap I/O memory\n");
++ err = -ENOMEM;
++ goto err_ioremap;
++ }
++
++ /* register interrupt */
++ irq = platform_get_irq(dev, 0);
++ if (irq < 0) {
++ err = irq;
++ goto err_get_irq;
++ }
++
++ /* request IRQ */
++ err = request_irq(irq, timbdma_handleinterrupt, IRQF_SHARED,
++ "timb-dma", self);
++ if (err) {
++ printk(KERN_ERR "timbdma: Failed to request IRQ\n");
++ goto err_get_irq;
++ }
++
++ platform_set_drvdata(dev, self);
++
++ /* assign the global pointer */
++ self_g = self;
++
++ return 0;
++
++err_get_irq:
++ iounmap(self->membase);
++err_ioremap:
++ release_mem_region(iomem->start, resource_size(iomem));
++err_request:
++ kfree(self);
++err_mem:
++ printk(KERN_ERR "timberdale: Failed to register Timberdale DMA: %d\n",
++ err);
++
++ return err;
++}
++
++static int timbdma_remove(struct platform_device *dev)
++{
++ struct timbdma_dev *self = platform_get_drvdata(dev);
++ struct resource *iomem = platform_get_resource(dev, IORESOURCE_MEM, 0);
++
++ free_irq(platform_get_irq(dev, 0), self);
++ iounmap(self->membase);
++ release_mem_region(iomem->start, resource_size(iomem));
++ kfree(self);
++ self_g = NULL;
++ return 0;
++}
++
++static struct platform_driver timbdma_platform_driver = {
++ .driver = {
++ .name = "timb-dma",
++ .owner = THIS_MODULE,
++ },
++ .probe = timbdma_probe,
++ .remove = timbdma_remove,
++};
++
++/*--------------------------------------------------------------------------*/
++
++static int __init timbdma_init(void)
++{
++ self_g = NULL;
++ return platform_driver_register(&timbdma_platform_driver);
++}
++
++static void __exit timbdma_exit(void)
++{
++ platform_driver_unregister(&timbdma_platform_driver);
++}
++
++module_init(timbdma_init);
++module_exit(timbdma_exit);
++
++MODULE_DESCRIPTION("Timberdale DMA driver");
++MODULE_LICENSE("GPL v2");
++MODULE_ALIAS("platform:timb-dma");
++
+diff -uNr linux-2.6.29-clean/drivers/mfd/timberdale.c linux-2.6.29/drivers/mfd/timberdale.c
+--- linux-2.6.29-clean/drivers/mfd/timberdale.c 1969-12-31 16:00:00.000000000 -0800
++++ linux-2.6.29/drivers/mfd/timberdale.c 2009-04-06 13:51:47.000000000 -0700
+@@ -0,0 +1,599 @@
++/*
++ * timberdale.c timberdale FPGA mfd shim driver
++ * Copyright (c) 2009 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++/* Supports:
++ * Timberdale FPGA
++ */
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/pci.h>
++#include <linux/msi.h>
++#include <linux/init.h>
++#include <linux/interrupt.h>
++#include <linux/platform_device.h>
++#include <linux/mfd/core.h>
++#include <linux/irq.h>
++
++#include <linux/i2c.h>
++#include <linux/i2c-ocores.h>
++#include <linux/i2c/tsc2007.h>
++#include <linux/spi/xilinx_spi.h>
++#include "timberdale.h"
++
++struct timberdale_device {
++ resource_size_t intc_mapbase;
++ resource_size_t ctl_mapbase;
++ unsigned char __iomem *intc_membase;
++ unsigned char __iomem *ctl_membase;
++ int irq_base;
++ u32 irq_ack_mask;
++ /* locking from interrupts while modifiying registers */
++ spinlock_t lock;
++};
++
++/*--------------------------------------------------------------------------*/
++
++struct tsc2007_platform_data timberdale_tsc2007_platform_data = {
++ .model = 2003,
++ .x_plate_ohms = 100
++};
++
++struct i2c_board_info timberdale_i2c_board_info[] = {
++ {
++ I2C_BOARD_INFO("tsc2003", 0x48),
++ .platform_data = &timberdale_tsc2007_platform_data,
++ .irq = IRQ_TIMBERDALE_TSC_INT
++ },
++ {
++ I2C_BOARD_INFO("adv7180", 0x42 >> 1),
++ .irq = IRQ_TIMBERDALE_ADV7180
++ }
++};
++
++static __devinitdata struct ocores_i2c_platform_data
++timberdale_i2c_platform_data = {
++ .regstep = 4,
++ .clock_khz = 62500,
++ .devices = timberdale_i2c_board_info,
++ .num_devices = ARRAY_SIZE(timberdale_i2c_board_info)
++};
++
++const static __devinitconst struct resource timberdale_i2c_resources[] = {
++ {
++ .start = I2COFFSET,
++ .end = I2CEND,
++ .flags = IORESOURCE_MEM,
++ },
++ {
++ .start = IRQ_TIMBERDALE_I2C,
++ .end = IRQ_TIMBERDALE_I2C,
++ .flags = IORESOURCE_IRQ,
++ },
++};
++
++static __devinitdata struct xspi_platform_data timberdale_xspi_platorm_data = {
++ .bus_num = -1,
++ /* according to spec. we can have up to 32 slaves however,
++ * as of current(2009-03-06) revision of
++ * Timberdale we can only handle 3 right now
++ */
++ .num_chipselect = 3,
++ .speed_hz = 1953125, /* hardcoded value in IP, for now */
++ .cr_offset = 0x60,
++ .sr_offset = 0x64,
++ .txd_offset = 0x68,
++ .rxd_offset = 0x6c,
++ .ssr_offset = 0x70
++};
++
++const static __devinitconst struct resource timberdale_spi_resources[] = {
++ {
++ .start = SPIOFFSET,
++ .end = SPIEND,
++ .flags = IORESOURCE_MEM,
++ },
++ {
++ .start = IRQ_TIMBERDALE_SPI,
++ .end = IRQ_TIMBERDALE_SPI,
++ .flags = IORESOURCE_IRQ,
++ },
++};
++
++const static __devinitconst struct resource timberdale_eth_resources[] = {
++ {
++ .start = ETHOFFSET,
++ .end = ETHEND,
++ .flags = IORESOURCE_MEM,
++ },
++ {
++ .start = IRQ_TIMBERDALE_ETHSW_IF,
++ .end = IRQ_TIMBERDALE_ETHSW_IF,
++ .flags = IORESOURCE_IRQ,
++ },
++};
++
++const static __devinitconst struct resource timberdale_gpio_resources[] = {
++ {
++ .start = GPIOOFFSET,
++ .end = GPIOEND,
++ .flags = IORESOURCE_MEM,
++ },
++ {
++ .start = IRQ_TIMBERDALE_GPIO,
++ .end = IRQ_TIMBERDALE_GPIO,
++ .flags = IORESOURCE_IRQ,
++ },
++};
++
++
++const static __devinitconst struct resource timberdale_most_resources[] = {
++ {
++ .start = MOSTOFFSET,
++ .end = MOSTEND,
++ .flags = IORESOURCE_MEM,
++ },
++ {
++ .start = IRQ_TIMBERDALE_MLB,
++ .end = IRQ_TIMBERDALE_MLB,
++ .flags = IORESOURCE_IRQ,
++ },
++};
++
++const static __devinitconst struct resource timberdale_uart_resources[] = {
++ {
++ .start = UARTOFFSET,
++ .end = UARTEND,
++ .flags = IORESOURCE_MEM,
++ },
++ {
++ .start = IRQ_TIMBERDALE_UART,
++ .end = IRQ_TIMBERDALE_UART,
++ .flags = IORESOURCE_IRQ,
++ },
++};
++
++const static __devinitconst struct resource timberdale_i2s_resources[] = {
++ {
++ .start = I2SOFFSET,
++ .end = I2SEND,
++ .flags = IORESOURCE_MEM,
++ },
++ {
++ .start = IRQ_TIMBERDALE_I2S,
++ .end = IRQ_TIMBERDALE_I2S,
++ .flags = IORESOURCE_IRQ,
++ },
++};
++
++const static __devinitconst struct resource timberdale_video_resources[] = {
++ {
++ .start = LOGIWOFFSET,
++ .end = LOGIWEND,
++ .flags = IORESOURCE_MEM,
++ },
++ /*
++ note that the "frame buffer" is located in DMA area
++ starting at 0x1200000
++ */
++};
++
++const static __devinitconst struct resource timberdale_dma_resources[] = {
++ {
++ .start = DMAOFFSET,
++ .end = DMAEND,
++ .flags = IORESOURCE_MEM,
++ },
++ {
++ .start = IRQ_TIMBERDALE_DMA,
++ .end = IRQ_TIMBERDALE_DMA,
++ .flags = IORESOURCE_IRQ,
++ },
++};
++
++static __devinitdata struct mfd_cell timberdale_cells_bar0[] = {
++ {
++ .name = "timb-uart",
++ .num_resources = ARRAY_SIZE(timberdale_uart_resources),
++ .resources = timberdale_uart_resources,
++ },
++ {
++ .name = "ocores-i2c",
++ .num_resources = ARRAY_SIZE(timberdale_i2c_resources),
++ .resources = timberdale_i2c_resources,
++ .platform_data = &timberdale_i2c_platform_data,
++ .data_size = sizeof(timberdale_i2c_platform_data),
++ },
++ {
++ .name = "timb-gpio",
++ .num_resources = ARRAY_SIZE(timberdale_gpio_resources),
++ .resources = timberdale_gpio_resources,
++ },
++ {
++ .name = "timb-i2s",
++ .num_resources = ARRAY_SIZE(timberdale_i2s_resources),
++ .resources = timberdale_i2s_resources,
++ },
++ {
++ .name = "timb-most",
++ .num_resources = ARRAY_SIZE(timberdale_most_resources),
++ .resources = timberdale_most_resources,
++ },
++ {
++ .name = "timb-video",
++ .num_resources = ARRAY_SIZE(timberdale_video_resources),
++ .resources = timberdale_video_resources,
++ },
++ {
++ .name = "xilinx_spi",
++ .num_resources = ARRAY_SIZE(timberdale_spi_resources),
++ .resources = timberdale_spi_resources,
++ .platform_data = &timberdale_xspi_platorm_data,
++ .data_size = sizeof(timberdale_xspi_platorm_data),
++ },
++ {
++ .name = "ks884x",
++ .num_resources = ARRAY_SIZE(timberdale_eth_resources),
++ .resources = timberdale_eth_resources,
++ },
++ {
++ .name = "timb-dma",
++ .num_resources = ARRAY_SIZE(timberdale_dma_resources),
++ .resources = timberdale_dma_resources,
++ },
++};
++
++static const __devinitconst struct resource timberdale_sdhc_resources_bar1[] = {
++ {
++ .start = SDHC0OFFSET,
++ .end = SDHC0END,
++ .flags = IORESOURCE_MEM,
++ },
++ {
++ .start = IRQ_TIMBERDALE_SDHC,
++ .end = IRQ_TIMBERDALE_SDHC,
++ .flags = IORESOURCE_IRQ,
++ },
++};
++
++static __devinitdata struct mfd_cell timberdale_cells_bar1[] = {
++ {
++ .name = "sdhci",
++ .num_resources = ARRAY_SIZE(timberdale_sdhc_resources_bar1),
++ .resources = timberdale_sdhc_resources_bar1,
++ },
++};
++
++/*--------------------------------------------------------------------------*/
++
++
++/* Handle the timberdale interrupt mux */
++static void timberdale_irq(unsigned int irq, struct irq_desc *desc)
++{
++ struct timberdale_device *priv = get_irq_data(irq);
++ unsigned int i, ipr;
++
++ desc->chip->ack(irq);
++
++ while ((ipr = ioread32(priv->intc_membase + IPR))) {
++ priv->irq_ack_mask = 0;
++ for (i = 0; i < TIMBERDALE_NR_IRQS; i++)
++ if (ipr & (1 << i))
++ generic_handle_irq(priv->irq_base + i);
++ if (priv->irq_ack_mask)
++ iowrite32(priv->irq_ack_mask, priv->intc_membase + IAR);
++ }
++}
++
++static void timberdale_irq_mask(unsigned int irq)
++{
++ struct timberdale_device *priv = get_irq_chip_data(irq);
++ unsigned long flags;
++
++ spin_lock_irqsave(&priv->lock, flags);
++ iowrite32(1 << (irq - priv->irq_base), priv->intc_membase + CIE);
++ spin_unlock_irqrestore(&priv->lock, flags);
++}
++
++static void timberdale_irq_unmask(unsigned int irq)
++{
++ struct timberdale_device *priv = get_irq_chip_data(irq);
++ unsigned long flags;
++
++ spin_lock_irqsave(&priv->lock, flags);
++ iowrite32(1 << (irq - priv->irq_base), priv->intc_membase + SIE);
++ spin_unlock_irqrestore(&priv->lock, flags);
++}
++
++static void timberdale_irq_ack(unsigned int irq)
++{
++ struct timberdale_device *priv = get_irq_chip_data(irq);
++ unsigned long flags;
++ u32 ack_mask = 1 << (irq - priv->irq_base);
++
++ spin_lock_irqsave(&priv->lock, flags);
++ /* if edge triggered, ack directly. Otherwhise ack in the end of
++ * irq handler
++ */
++ if (ack_mask & IRQ_TIMBERDALE_EDGE_MASK)
++ iowrite32(ack_mask, priv->intc_membase + IAR);
++ else
++ priv->irq_ack_mask |= ack_mask;
++ spin_unlock_irqrestore(&priv->lock, flags);
++}
++
++static struct irq_chip timberdale_chip = {
++ .name = "timberdale",
++ .ack = timberdale_irq_ack,
++ .mask = timberdale_irq_mask,
++ .unmask = timberdale_irq_unmask,
++ .disable = timberdale_irq_mask,
++ .enable = timberdale_irq_unmask,
++};
++
++/*--------------------------------------------------------------------------*/
++
++/* Install the IRQ handler */
++static void timberdale_attach_irq(struct pci_dev *dev)
++{
++ struct timberdale_device *priv = pci_get_drvdata(dev);
++ unsigned int irq, irq_base;
++
++ irq_base = priv->irq_base;
++ for (irq = irq_base; irq < irq_base + TIMBERDALE_NR_IRQS; irq++) {
++ set_irq_chip_and_handler_name(irq, &timberdale_chip,
++ handle_edge_irq, "mux");
++
++ set_irq_chip_data(irq, priv);
++
++#ifdef CONFIG_ARM
++ set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
++#endif
++ }
++
++ set_irq_data(dev->irq, priv);
++ set_irq_chained_handler(dev->irq, timberdale_irq);
++}
++
++static void timberdale_detach_irq(struct pci_dev *dev)
++{
++ struct timberdale_device *priv = pci_get_drvdata(dev);
++ unsigned int irq, irq_base;
++
++ irq_base = priv->irq_base;
++
++ set_irq_chained_handler(dev->irq, NULL);
++ set_irq_data(dev->irq, NULL);
++
++ for (irq = irq_base; irq < irq_base + TIMBERDALE_NR_IRQS; irq++) {
++#ifdef CONFIG_ARM
++ set_irq_flags(irq, 0);
++#endif
++ set_irq_chip(irq, NULL);
++ set_irq_chip_data(irq, NULL);
++ }
++}
++
++static int __devinit timb_probe(struct pci_dev *dev,
++ const struct pci_device_id *id)
++{
++ struct timberdale_device *priv;
++ int err, i;
++ u16 ver;
++ resource_size_t mapbase;
++
++ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
++ if (!priv)
++ return -ENOMEM;
++
++ spin_lock_init(&priv->lock);
++ pci_set_drvdata(dev, priv);
++
++ err = pci_enable_device(dev);
++ if (err)
++ goto err_enable;
++
++ mapbase = pci_resource_start(dev, 0);
++ if (!mapbase) {
++ printk(KERN_ERR "timberdale: No resource\n");
++ goto err_start;
++ }
++
++ /* create a resource for the Interrupt controller registers */
++ priv->intc_mapbase = mapbase + INTCOFFSET;
++ if (!request_mem_region(priv->intc_mapbase, INTCSIZE, "timb-intc")) {
++ printk(KERN_ERR "timberdale: Failed to request intc mem\n");
++ goto err_request;
++ }
++
++ /* create a resource for the PCI master register */
++ priv->ctl_mapbase = mapbase + CHIPCTLOFFSET;
++ if (!request_mem_region(priv->ctl_mapbase, CHIPCTLSIZE, "timb-intc")) {
++ printk(KERN_ERR "timberdale: Failed to request ctl mem\n");
++ goto err_request_ctl;
++ }
++
++ priv->intc_membase = ioremap(priv->intc_mapbase, INTCSIZE);
++ if (!priv->intc_membase) {
++ printk(KERN_ALERT "timberdale: Map error, intc\n");
++ goto err_ioremap;
++ }
++
++ priv->ctl_membase = ioremap(priv->ctl_mapbase, CHIPCTLSIZE);
++ if (!priv->ctl_membase) {
++ printk(KERN_ALERT "timberdale: Map error, ctl\n");
++ goto err_ioremap_ctl;
++ }
++
++ err = pci_enable_msi(dev);
++ if (err) {
++ printk(KERN_WARNING "timberdale: MSI init failed: %d\n", err);
++ goto err_msi;
++ }
++
++ /* Reset all FPGA PLB peripherals */
++ iowrite32(0x1, priv->ctl_membase + MAYSVILLERST);
++
++ /* at this stage the FPGA does not generate a
++ * unique interrupt per function, to emulate real interrupts
++ * we assign them a faked interrupt which we issue in the
++ * interrupt handler. For now just hard code a base number
++ */
++ priv->irq_base = NR_IRQS - TIMBERDALE_NR_IRQS - 1;
++ if (priv->irq_base < dev->irq)
++ /* ops the device itself got the IRQ in the end... */
++ priv->irq_base = 400;
++
++ timberdale_attach_irq(dev);
++
++ /* update IRQ offsets in I2C board info */
++ for (i = 0; i < ARRAY_SIZE(timberdale_i2c_board_info); i++)
++ timberdale_i2c_board_info[i].irq += priv->irq_base;
++
++ /* don't leave platform_data empty on any device */
++ for (i = 0; i < ARRAY_SIZE(timberdale_cells_bar0); i++)
++ if (timberdale_cells_bar0[i].platform_data == NULL) {
++ timberdale_cells_bar0[i].platform_data =
++ timberdale_cells_bar0 + i;
++ timberdale_cells_bar0[i].data_size =
++ sizeof(timberdale_cells_bar0[i]);
++ }
++
++ err = mfd_add_devices(&dev->dev, -1,
++ timberdale_cells_bar0, ARRAY_SIZE(timberdale_cells_bar0),
++ &dev->resource[0], priv->irq_base);
++ if (err)
++ printk(KERN_WARNING
++ "timberdale: mfd_add_devices failed: %d\n", err);
++ else {
++ err = mfd_add_devices(&dev->dev, -1,
++ timberdale_cells_bar1,
++ ARRAY_SIZE(timberdale_cells_bar1),
++ &dev->resource[1], priv->irq_base);
++
++ if (err)
++ printk(KERN_WARNING
++ "timberdale: timb_add_sdhci failed: %d\n", err);
++ }
++
++ if (err)
++ goto err_mfd;
++
++ ver = ioread16(priv->ctl_membase + TIMB_REV);
++
++ printk(KERN_INFO "Found Maysville Card. Rev: %d\n", ver);
++
++ /* Enable interrupts and wire the hardware interrupts */
++ iowrite32(0x3, priv->intc_membase + MER);
++
++ return 0;
++err_mfd:
++ timberdale_detach_irq(dev);
++ pci_disable_msi(dev);
++err_msi:
++ iounmap(priv->ctl_membase);
++err_ioremap_ctl:
++ iounmap(priv->intc_membase);
++err_ioremap:
++ release_mem_region(priv->ctl_mapbase, CHIPCTLSIZE);
++err_request_ctl:
++ release_mem_region(priv->intc_mapbase, INTCSIZE);
++err_request:
++ pci_set_drvdata(dev, NULL);
++err_start:
++ pci_disable_device(dev);
++err_enable:
++ kfree(priv);
++ pci_set_drvdata(dev, NULL);
++ return -ENODEV;
++}
++
++static void __devexit timb_remove(struct pci_dev *dev)
++{
++ /* clean up any allocated resources and stuff here.
++ * like call release_region();
++ */
++ struct timberdale_device *priv;
++
++ priv = pci_get_drvdata(dev);
++
++ mfd_remove_devices(&dev->dev);
++
++ timberdale_detach_irq(dev);
++
++ iowrite32(0xffffffff, priv->intc_membase + IAR);
++ iowrite32(0, priv->intc_membase + MER);
++ iowrite32(0, priv->intc_membase + IER);
++
++ iounmap(priv->ctl_membase);
++ iounmap(priv->intc_membase);
++ release_mem_region(priv->ctl_mapbase, CHIPCTLSIZE);
++ release_mem_region(priv->intc_mapbase, INTCSIZE);
++
++ pci_disable_msi(dev);
++ pci_disable_device(dev);
++ pci_set_drvdata(dev, NULL);
++ kfree(priv);
++}
++
++static struct pci_device_id timberdale_pci_tbl[] = {
++ { PCI_DEVICE(PCI_VENDOR_ID_TIMB, PCI_DEVICE_ID_TIMB) },
++ { 0 }
++};
++MODULE_DEVICE_TABLE(pci, timberdale_pci_tbl);
++
++static struct pci_driver timberdale_pci_driver = {
++ .name = "timberdale",
++ .id_table = timberdale_pci_tbl,
++ .probe = timb_probe,
++ .remove = timb_remove,
++};
++
++static int __init timberdale_init(void)
++{
++ int err;
++
++ err = pci_register_driver(&timberdale_pci_driver);
++ if (err < 0) {
++ printk(KERN_ERR
++ "Failed to register PCI driver for %s device.\n",
++ timberdale_pci_driver.name);
++ return -ENODEV;
++ }
++
++ printk(KERN_INFO "Driver for %s has been successfully registered.\n",
++ timberdale_pci_driver.name);
++
++ return 0;
++}
++
++static void __exit timberdale_exit(void)
++{
++ pci_unregister_driver(&timberdale_pci_driver);
++
++ printk(KERN_INFO "Driver for %s has been successfully unregistered.\n",
++ timberdale_pci_driver.name);
++}
++
++MODULE_LICENSE("GPL v2");
++MODULE_VERSION(DRV_VERSION);
++MODULE_AUTHOR("Richard Rojfors");
++
++module_init(timberdale_init);
++module_exit(timberdale_exit);
++
+diff -uNr linux-2.6.29-clean/drivers/mfd/timberdale.h linux-2.6.29/drivers/mfd/timberdale.h
+--- linux-2.6.29-clean/drivers/mfd/timberdale.h 1969-12-31 16:00:00.000000000 -0800
++++ linux-2.6.29/drivers/mfd/timberdale.h 2009-04-06 13:51:47.000000000 -0700
+@@ -0,0 +1,114 @@
++/*
++ * timberdale.h timberdale FPGA mfd shim driver defines
++ * Copyright (c) 2009 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++/* Supports:
++ * Timberdale FPGA
++ */
++
++#ifndef MFD_TIMBERDALE_H
++#define MFD_TIMBERDALE_H
++
++/* Registers of the interrupt controller */
++#define ISR 0x00
++#define IPR 0x04
++#define IER 0x08
++#define IAR 0x0c
++#define SIE 0x10
++#define CIE 0x14
++#define MER 0x1c
++
++/* Registers of the control area */
++#define TIMB_REV 0x00
++#define MAYSVILLERST 0x40
++
++
++#define I2COFFSET 0x0
++#define I2CEND 0x1f
++
++#define SPIOFFSET 0x80
++#define SPIEND 0xff
++
++#define ETHOFFSET 0x300
++#define ETHEND 0x30f
++
++#define GPIOOFFSET 0x400
++#define GPIOEND 0x7ff
++
++#define CHIPCTLOFFSET 0x800
++#define CHIPCTLEND 0x8ff
++#define CHIPCTLSIZE (CHIPCTLEND - CHIPCTLOFFSET)
++
++#define INTCOFFSET 0xc00
++#define INTCEND 0xfff
++#define INTCSIZE (INTCEND - INTCOFFSET)
++
++#define MOSTOFFSET 0x1000
++#define MOSTEND 0x13ff
++
++#define UARTOFFSET 0x1400
++#define UARTEND 0x17ff
++
++#define I2SOFFSET 0x1C00
++#define I2SEND 0x1fff
++
++#define LOGIWOFFSET 0x30000
++#define LOGIWEND 0x37fff
++
++#define DMAOFFSET 0x01000000
++#define DMAEND 0x013fffff
++
++/* SDHC0 is placed in PCI bar 1 */
++#define SDHC0OFFSET 0x00
++#define SDHC0END 0xff
++
++/* SDHC1 is placed in PCI bar 2 */
++#define SDHC1OFFSET 0x00
++#define SDHC1END 0xff
++
++#define PCI_VENDOR_ID_TIMB 0x10ee
++#define PCI_DEVICE_ID_TIMB 0xa123
++#define DRV_VERSION "0.1"
++
++
++#define IRQ_TIMBERDALE_INIC 0
++#define IRQ_TIMBERDALE_MLB 1
++#define IRQ_TIMBERDALE_GPIO 2
++#define IRQ_TIMBERDALE_I2C 3
++#define IRQ_TIMBERDALE_UART 4
++#define IRQ_TIMBERDALE_DMA 5
++#define IRQ_TIMBERDALE_I2S 6
++#define IRQ_TIMBERDALE_TSC_INT 7
++#define IRQ_TIMBERDALE_SDHC 8
++#define IRQ_TIMBERDALE_ADV7180 9
++#define IRQ_TIMBERDALE_ETHSW_IF 10
++#define IRQ_TIMBERDALE_SPI 11
++
++#define TIMBERDALE_NR_IRQS 12
++
++/* Some of the interrupts are level triggered, some are edge triggered */
++#define IRQ_TIMBERDALE_EDGE_MASK ((1 << IRQ_TIMBERDALE_ADV7180) | \
++ (1 << IRQ_TIMBERDALE_TSC_INT) | (1 << IRQ_TIMBERDALE_DMA) | \
++ (1 << IRQ_TIMBERDALE_MLB) | (1 << IRQ_TIMBERDALE_INIC))
++
++#define IRQ_TIMBERDALE_LEVEL_MASK ((1 << IRQ_TIMBERDALE_SPI) | \
++ (1 << IRQ_TIMBERDALE_ETHSW_IF) | (1 << IRQ_TIMBERDALE_SDHC) | \
++ (1 << IRQ_TIMBERDALE_I2S) | (1 << IRQ_TIMBERDALE_UART) | \
++ (1 << IRQ_TIMBERDALE_I2C) | (1 << IRQ_TIMBERDALE_GPIO))
++
++#endif
++
+diff -uNr linux-2.6.29-clean/drivers/mfd/timbi2s.c linux-2.6.29/drivers/mfd/timbi2s.c
+--- linux-2.6.29-clean/drivers/mfd/timbi2s.c 1969-12-31 16:00:00.000000000 -0800
++++ linux-2.6.29/drivers/mfd/timbi2s.c 2009-04-06 13:51:47.000000000 -0700
+@@ -0,0 +1,597 @@
++/*
++ * timbi2s.c timberdale FPGA I2S driver
++ * Copyright (c) 2009 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++/* Supports:
++ * Timberdale FPGA I2S
++ *
++ * As of 2009-03-23 I2S instances
++ * are not configured as masters
++ *
++ * TODO: implement switching between master and slave
++ */
++
++#include <linux/io.h>
++#include <linux/fs.h>
++#include <linux/module.h>
++#include <linux/circ_buf.h>
++#include <linux/spinlock.h>
++#include <linux/workqueue.h>
++#include <linux/interrupt.h>
++#include <linux/platform_device.h>
++
++#include <linux/mfd/timbi2s.h>
++
++#define DRIVER_NAME "timb-i2s"
++
++#define I2S_CLOCK_SPEED 62500000 /* 62,5MHz */
++
++#define FIFO_FILL_SIZE 127
++#define I2S_BUFFER_SIZE PAGE_SIZE
++
++#define ALMOST_FULL 170
++#define ALMOST_EMPTY 85
++
++/* As of 2009-03-16, IP can instanciate max. 4 RX and 4 TX */
++#define MAX_TX_NR 4
++#define MAX_RX_NR 4
++/* and actually up and running only 4.
++ * 1 TX and 3 RX
++ */
++#define IP_I2S_NR 4
++#define REGSTEP 0x04
++
++#define VERSION 0x00
++#define I2S_UIR 0x04 /* Unit Interrupt Register */
++
++/* Registers for all possible I2S IP instances
++ * are the same as for first one (from 0x08 to 0x20)
++ */
++#define I2S_PRESCALE 0x08 /* Holds prescale value, if clock master */
++#define I2S_ICR 0x0c /* Interrupt Clear Register */
++# define ICR_F 0x01 /* Full */
++# define ICR_AF 0x02 /* Almost full */
++# define ICR_AE 0x04 /* Almost empty */
++# define ICR_RX_D 0x08 /* Data present, RX only */
++# define ICR_TX_E 0x08 /* Epmty, TX only */
++
++#define I2S_IPR 0x10 /* Interrupt Pending Register */
++#define I2S_ISR 0x14 /* Interrupt Status Register */
++
++#define I2S_IER 0x18 /* Interrupt Enable Register */
++# define IER_FF 0x01 /* RX/TX FIFO Full */
++# define IER_FAF 0x02 /* RX/TX FIFO Almost Full */
++# define IER_FAE 0x04 /* RX/TX FIFO Almost Empty */
++# define IER_RX_DATA 0x08 /* RX. Data Present */
++# define IER_TX_FE 0x08 /* TX. FIFO Empty */
++
++#define I2S_CTRL 0x1c /* Control Register */
++# define CTRL_TX_ENABLE 0x01 /* Enable TX */
++# define CTRL_RX_ENABLE 0x02 /* Enable RX */
++# define CTRL_NONE 0x04 /* Not used */
++# define CTRL_FIFO_CLR 0x08 /* FIFO Clear */
++# define CTRL_SWR 0x10 /* Soft reset */
++# define CTRL_CLKMASTER 0x1000 /* IP I2S instance is master */
++# define CTRL_IS_TX 0x40000000 /* IP I2S is an TX-instance */
++# define CTRL_IS_RX 0x20000000 /* IP I2S is an RX-instance */
++
++#define I2S_FIFO 0x20 /* read/write FIFO */
++
++#define INC_HEAD(buf, size) \
++ (buf->head = (buf->head + 1) & (size-1))
++
++#define INC_TAIL(buf, size) \
++ (buf->tail = (buf->tail + 1) & (size-1))
++
++
++/* circular buffer */
++static struct circ_buf *timbi2s_buf_alloc(void);
++static void timbi2s_buf_free(struct circ_buf *cb);
++static void timbi2s_buf_clear(struct circ_buf *cb);
++
++static int timbi2s_fifo_read(struct circ_buf *cb, ssize_t count, long add);
++static int timbi2s_fifo_write(struct circ_buf *cb, ssize_t count, long add);
++
++static int timbi2s_ioctrl(struct timbi2s_dev *);
++
++static struct timbi2s_bus *bus_p;
++
++static int timbi2s_is_tx(struct timbi2s_dev *i2sdev)
++{
++ return (ioread32(i2sdev->membase + i2sdev->ctrl_offset)
++ & CTRL_IS_TX) ? 1 : 0;
++}
++
++static int timbi2s_is_rx(struct timbi2s_dev *i2sdev)
++{
++ return (ioread32(i2sdev->membase + i2sdev->ctrl_offset)
++ & CTRL_IS_RX) ? 1 : 0;
++}
++
++/* Return unused TX-instance */
++static struct timbi2s_dev *timbi2s_get_tx(void)
++{
++ struct timbi2s_dev *tdev, *tmp;
++
++ if (bus_p == NULL)
++ return NULL;
++
++ list_for_each_entry_safe(tdev, tmp, &bus_p->control->list, item) {
++ if (!tdev->in_use && timbi2s_is_tx(tdev)) {
++ tdev->in_use = 1;
++ return tdev;
++ }
++
++ }
++ return NULL;
++}
++EXPORT_SYMBOL_GPL(timbi2s_get_tx);
++
++/* Return unused RX-instance */
++static struct timbi2s_dev *timbi2s_get_rx(void)
++{
++ struct timbi2s_dev *tdev, *tmp;
++
++ if (bus_p == NULL)
++ return NULL;
++
++ list_for_each_entry_safe(tdev, tmp, &bus_p->control->list, item) {
++ if (!tdev->in_use && timbi2s_is_rx(tdev)) {
++ tdev->in_use = 1;
++ return tdev;
++ }
++
++ }
++ return NULL;
++}
++EXPORT_SYMBOL_GPL(timbi2s_get_rx);
++
++/* Flag TX/RX as unused and reset it */
++static void timbi2s_put(struct timbi2s_dev *tdev)
++{
++ if (tdev->in_use) {
++ tdev->in_use = 0;
++ timbi2s_ioctrl(tdev);
++ }
++}
++EXPORT_SYMBOL_GPL(timbi2s_put);
++
++/*
++ * Write data to the FIFO
++ */
++static void timbi2s_tx_handler(struct timbi2s_dev *i2sdev)
++{
++ u32 pend;
++
++ pend = ioread32(i2sdev->membase + i2sdev->ipr_offset);
++
++ if (pend & IER_FAE) {
++ timbi2s_fifo_write(i2sdev->buffer,
++ ALMOST_FULL - ALMOST_EMPTY,
++ (unsigned long)i2sdev->membase +
++ i2sdev->fifo);
++ /* clear interrupt */
++ iowrite32(ICR_AE, i2sdev->membase + i2sdev->icr_offset);
++ }
++}
++
++/*
++ * Read data from the FIFO
++ */
++static void timbi2s_rx_handler(struct timbi2s_dev *i2sdev)
++{
++ u32 pend;
++ pend = ioread32(i2sdev->membase + i2sdev->ipr_offset);
++
++ if (pend & IER_FAE) {
++ timbi2s_fifo_read(i2sdev->buffer,
++ ALMOST_EMPTY,
++ (unsigned long)i2sdev->membase +
++ i2sdev->fifo);
++
++ /* clear interrupt */
++ iowrite32(ICR_AE | ICR_AF,
++ i2sdev->membase + i2sdev->icr_offset);
++ }
++}
++
++void timbi2s_int_handler(struct work_struct *workp)
++{
++ u32 pend, stat, i2stype;
++ unsigned long flags;
++ struct timbi2s_dev *i2sdev = container_of(workp,
++ struct timbi2s_dev,
++ work);
++
++ pend = ioread32(i2sdev->membase + i2sdev->ipr_offset);
++ stat = ioread32(i2sdev->membase + i2sdev->isr_offset);
++ i2stype = ioread32(i2sdev->membase + i2sdev->ctrl_offset);
++
++ spin_lock_irqsave(&i2sdev->lock, flags);
++
++ if (i2stype & CTRL_IS_RX) {
++ /* Enable Almost Empty Almost Full interrupt */
++ iowrite32(IER_FAE | IER_FAF,
++ i2sdev->membase + i2sdev->ier_offset);
++ /* Enable RX */
++ iowrite32(CTRL_RX_ENABLE,
++ i2sdev->membase + i2sdev->ctrl_offset);
++ timbi2s_rx_handler(i2sdev);
++ } else if (i2stype & CTRL_IS_TX) {
++ /* Enable Almost Empty interrupt */
++ iowrite32(IER_FAE, i2sdev->membase + i2sdev->ier_offset);
++ /* Enable TX */
++ iowrite32(CTRL_TX_ENABLE,
++ i2sdev->membase + i2sdev->ctrl_offset);
++ timbi2s_tx_handler(i2sdev);
++ }
++
++ spin_unlock_irqrestore(&i2sdev->lock, flags);
++}
++
++static int timbi2s_ioctrl(struct timbi2s_dev *i2sdev)
++{
++ u32 i2stype;
++
++ /* Reset */
++ iowrite8(CTRL_SWR, i2sdev->membase + i2sdev->ctrl_offset);
++ /* Clear IER */
++ iowrite32(0x00000000, i2sdev->membase + i2sdev->ier_offset);
++ /* Clear ICR */
++ iowrite32(0xffffffff, i2sdev->membase + i2sdev->icr_offset);
++
++ i2stype = ioread32(i2sdev->membase + i2sdev->ctrl_offset);
++
++ if (i2stype & CTRL_IS_TX)
++ printk(KERN_INFO DRIVER_NAME": found active I2S Transmitter\n");
++ else if (i2stype & CTRL_IS_RX)
++ printk(KERN_INFO DRIVER_NAME": found active I2S Receiver\n");
++
++ return 1;
++}
++EXPORT_SYMBOL_GPL(timbi2s_ioctrl);
++
++static struct circ_buf *timbi2s_buf_alloc(void)
++{
++ struct circ_buf *cb;
++
++ cb = kzalloc(sizeof(*cb), GFP_KERNEL);
++ if (cb == NULL)
++ return NULL;
++
++ cb->buf = kzalloc(I2S_BUFFER_SIZE, GFP_KERNEL);
++ if (cb->buf == NULL) {
++ kfree(cb);
++ return NULL;
++ }
++
++ timbi2s_buf_clear(cb);
++
++ return cb;
++}
++
++static void timbi2s_buf_free(struct circ_buf *cb)
++{
++ kfree(cb->buf);
++ kfree(cb);
++}
++
++static void timbi2s_buf_clear(struct circ_buf *cb)
++{
++ cb->head = 0;
++ cb->tail = cb->head;
++}
++
++/*
++ * Read data from the FIFO and write it to the given circular buffer
++ */
++static int timbi2s_fifo_read(struct circ_buf *cb, ssize_t count, long add)
++{
++ int c, ret = 0;
++
++ unsigned char *hi = (unsigned char *)ioread32((void *)(add >> 16));
++ unsigned char *lo = (unsigned char *)ioread32((void *)(add & 0xFFFF));
++
++ c = CIRC_SPACE_TO_END(cb->head, cb->tail, I2S_BUFFER_SIZE);
++ if (count < c)
++ c = count;
++
++ if (c <= 0)
++ return 1;
++
++ while (c >= 0) {
++ memcpy(cb->buf + cb->head, hi, 2);
++ INC_HEAD(cb, I2S_BUFFER_SIZE);
++
++ memcpy(cb->buf + cb->head, lo, 2);
++ INC_HEAD(cb, I2S_BUFFER_SIZE);
++ count -= 4;
++ }
++ return ret;
++}
++
++/*
++ * Get data from the circular buffer and write it to the given FIFO address
++ */
++static int timbi2s_fifo_write(struct circ_buf *cb, ssize_t count, long add)
++{
++ int c, ret = 0;
++
++ c = CIRC_CNT_TO_END(cb->head, cb->tail, I2S_BUFFER_SIZE);
++ if (count < c)
++ c = count;
++
++ if (c <= 0)
++ return 1;
++
++ while (c >= 0) {
++ iowrite32(*(s16 *)(cb->buf + cb->tail), (void *)(add >> 16));
++ INC_TAIL(cb, I2S_BUFFER_SIZE);
++
++ iowrite32(*(s16 *)(cb->buf + cb->tail), (void *)(add & 0xFFFF));
++ INC_TAIL(cb, I2S_BUFFER_SIZE);
++ count -= 4;
++ }
++
++ return ret;
++}
++
++static void timbi2s_control_destroy(struct timbi2s_bus_control *control)
++{
++ kfree(control);
++ control = NULL;
++}
++
++static void timbi2s_control_add_dev(struct timbi2s_dev *i2sdev)
++{
++ list_add(&i2sdev->item, &i2sdev->bus->control->list);
++}
++
++static void timbi2s_control_del_dev(struct timbi2s_dev *i2sdev)
++{
++ list_del(&i2sdev->item);
++ if (list_empty(&i2sdev->bus->control->list))
++ timbi2s_control_destroy(i2sdev->bus->control);
++}
++
++static irqreturn_t timbi2s_irq(int irq, void *dev_id)
++{
++ u8 pend;
++ u32 iunit;
++ int i;
++
++ struct timbi2s_bus *tbus = dev_id;
++ queue_work(tbus->workqueue, &tbus->work);
++
++ iunit = ioread32(tbus->membase + I2S_UIR);
++ /* Find out which I2S instance is interrupting */
++ for (i = 0; i < 32; i++) {
++ if ((1 << i) & iunit) {
++ pend = ioread8(tbus->membase +
++ (I2S_IPR + (i * REGSTEP * 7)));
++ iowrite8(pend, tbus->membase +
++ (I2S_ICR + (i * REGSTEP * 7)));
++ }
++ }
++
++ return IRQ_HANDLED;
++}
++
++static int __init timbi2s_probe(struct platform_device *dev)
++{
++ int err = 0;
++ struct timbi2s_dev *tdev, *tmp;
++ struct timbi2s_bus *tbus;
++ struct resource *iomem;
++ int i;
++
++ iomem = platform_get_resource(dev, IORESOURCE_MEM, 0);
++ if (!iomem) {
++ err = -EINVAL;
++ goto err_mem;
++ }
++
++ tbus = kzalloc(sizeof(*tbus), GFP_KERNEL);
++ if (!tbus) {
++ err = -EINVAL;
++ goto err_mem;
++ }
++
++ /* Init bus_control */
++ tbus->control = kzalloc(sizeof(struct timbi2s_bus_control), GFP_KERNEL);
++ if (!tbus->control) {
++ printk(KERN_ERR DRIVER_NAME
++ ": Failed to allocate timbi2s_bus_control.\n");
++ err = -ENOMEM;
++ goto err_free;
++ }
++ INIT_LIST_HEAD(&tbus->control->list);
++
++ /* Init workqueue */
++ tbus->workqueue = create_singlethread_workqueue("timbi2s");
++ if (tbus->workqueue == NULL) {
++ printk(KERN_ERR DRIVER_NAME
++ ": unable to create workqueue\n");
++ err = -ENOMEM;
++ goto err_control;
++ }
++ INIT_WORK(&tbus->work, timbi2s_int_handler);
++
++ if (!request_mem_region(iomem->start,
++ resource_size(iomem), DRIVER_NAME)) {
++ printk(KERN_EMERG DRIVER_NAME
++ ": Mem region is already in use\n");
++ err = -ENXIO;
++ goto err_control;
++ }
++
++ tbus->membase = ioremap(iomem->start, resource_size(iomem));
++ if (tbus->membase == NULL) {
++ err = -ENOMEM;
++ goto err_request;
++ }
++
++ bus_p = tbus;
++
++
++
++ /* For now we have only 4 I2S instances in IP : 3 RX and 1 TX */
++ /* Note: TX'es are always on top */
++ /* TODO: auto-check how many are alive and bring them into control */
++ for (i = 0; i < IP_I2S_NR; i++) {
++ tdev = kzalloc(sizeof(*tdev), GFP_KERNEL);
++ if (!tdev) {
++ err = -EINVAL;
++ goto clean_list;
++ }
++
++ /* Allocate circ_buf */
++ tdev->buffer = timbi2s_buf_alloc();
++ if (tdev->buffer == NULL) {
++ printk(KERN_ERR "timbi2s: unable to allocate buffer\n");
++ goto clean_list;
++ }
++
++ INIT_LIST_HEAD(&tdev->item);
++ spin_lock_init(&tdev->lock);
++
++ /* set up offsets for each instance of I2S */
++ tdev->bus = tbus; /* ptr to our bus */
++ tdev->membase = tbus->membase;
++ tdev->in_use = 0;
++ tdev->pscale_offset = I2S_PRESCALE + (i * REGSTEP * 7);
++ tdev->icr_offset = I2S_ICR + (i * REGSTEP * 7);
++ tdev->isr_offset = I2S_ISR + (i * REGSTEP * 7);
++ tdev->ipr_offset = I2S_IPR + (i * REGSTEP * 7);
++ tdev->ier_offset = I2S_IER + (i * REGSTEP * 7);
++ tdev->ctrl_offset = I2S_CTRL + (i * REGSTEP * 7);
++ tdev->fifo = I2S_FIFO + (i * REGSTEP * 7);
++
++ /* Try to check and reset hardware */
++ if (timbi2s_ioctrl(tdev))
++ timbi2s_control_add_dev(tdev);
++
++ tdev = NULL;
++ }
++
++ tbus->irq = platform_get_irq(dev, 0);
++ if (tbus->irq < 0) {
++ err = -EINVAL;
++ goto clean_list;
++ }
++
++ err = request_irq(tbus->irq, timbi2s_irq, 0, DRIVER_NAME, tbus);
++ if (err != 0)
++ goto clean_list;
++
++ platform_set_drvdata(dev, tbus);
++
++ dev_info(&dev->dev, "Driver for Timberdale I2S (ver: %d)"
++ " has been successfully registered.\n",
++ ioread32(tbus->membase + 0x00));
++ return 0;
++
++clean_list:
++ list_for_each_entry_safe(tdev, tmp, &tbus->control->list, item) {
++ if (tdev->workqueue != NULL) {
++ flush_workqueue(tdev->workqueue);
++ destroy_workqueue(tdev->workqueue);
++ }
++
++ if (tdev->buffer != NULL)
++ timbi2s_buf_free(tdev->buffer);
++
++ timbi2s_control_del_dev(tdev);
++ kfree(tdev);
++ }
++ free_irq(tbus->irq, tbus);
++ iounmap(tbus->membase);
++err_request:
++ release_mem_region(iomem->start, resource_size(iomem));
++err_control:
++ if (tbus->control != NULL)
++ timbi2s_control_destroy(tbus->control);
++err_free:
++ kfree(tbus);
++err_mem:
++ printk(KERN_ERR
++ DRIVER_NAME": Failed to register Timberdale I2S: %d\n", err);
++
++ return err;
++}
++
++static int __devexit timbi2s_remove(struct platform_device *dev)
++{
++ struct timbi2s_bus *tbus;
++ struct timbi2s_dev *tdev, *tmp;
++ struct resource *r;
++
++ tbus = platform_get_drvdata(dev);
++ free_irq(tbus->irq, tbus);
++
++ r = platform_get_resource(dev, IORESOURCE_MEM, 0);
++
++ list_for_each_entry_safe(tdev, tmp, &tbus->control->list, item) {
++ if (tdev->workqueue != NULL) {
++ flush_workqueue(tdev->workqueue);
++ destroy_workqueue(tdev->workqueue);
++ }
++
++ if (tdev->buffer != NULL)
++ timbi2s_buf_free(tdev->buffer);
++
++ kfree(tdev);
++ }
++
++ iounmap(tdev->membase);
++ if (r)
++ release_mem_region(r->start, resource_size(r));
++
++ dev_info(&dev->dev, "Driver for Timberdale I2S has been"
++ " successfully unregistered.\n");
++
++ platform_set_drvdata(dev, 0);
++ return 0;
++}
++
++static struct platform_driver timbi2s_platform_driver = {
++ .driver = {
++ .name = DRIVER_NAME,
++ .owner = THIS_MODULE,
++ },
++ .probe = timbi2s_probe,
++ .remove = __devexit_p(timbi2s_remove),
++};
++
++/*--------------------------------------------------------------------------*/
++
++static int __init timbi2s_init(void)
++{
++ return platform_driver_register(&timbi2s_platform_driver);
++}
++
++static void __exit timbi2s_exit(void)
++{
++ platform_driver_unregister(&timbi2s_platform_driver);
++}
++
++module_init(timbi2s_init);
++module_exit(timbi2s_exit);
++
++MODULE_AUTHOR("Mocean Laboratories");
++MODULE_DESCRIPTION("Timberdale I2S bus driver");
++MODULE_LICENSE("GPL v2");
+diff -uNr linux-2.6.29-clean/drivers/mmc/host/Kconfig linux-2.6.29/drivers/mmc/host/Kconfig
+--- linux-2.6.29-clean/drivers/mmc/host/Kconfig 2009-04-01 09:20:24.000000000 -0700
++++ linux-2.6.29/drivers/mmc/host/Kconfig 2009-04-06 13:51:47.000000000 -0700
+@@ -65,6 +65,16 @@
+
+ If unsure, say Y.
+
++config MMC_SDHCI_PLTFM
++ tristate "SDHCI support on platform devices"
++ depends on MMC_SDHCI
++ help
++ This selects the Secure Digital Host Controller Interface.
++
++ If you have a controller with this interface, say Y or M here.
++
++ If unsure, say N.
++
+ config MMC_OMAP
+ tristate "TI OMAP Multimedia Card Interface support"
+ depends on ARCH_OMAP
+diff -uNr linux-2.6.29-clean/drivers/mmc/host/Makefile linux-2.6.29/drivers/mmc/host/Makefile
+--- linux-2.6.29-clean/drivers/mmc/host/Makefile 2009-04-01 09:20:24.000000000 -0700
++++ linux-2.6.29/drivers/mmc/host/Makefile 2009-04-06 13:51:47.000000000 -0700
+@@ -13,6 +13,7 @@
+ obj-$(CONFIG_MMC_SDHCI) += sdhci.o
+ obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o
+ obj-$(CONFIG_MMC_RICOH_MMC) += ricoh_mmc.o
++obj-$(CONFIG_MMC_SDHCI_PLTFM) += sdhci-pltfm.o
+ obj-$(CONFIG_MMC_WBSD) += wbsd.o
+ obj-$(CONFIG_MMC_AU1X) += au1xmmc.o
+ obj-$(CONFIG_MMC_OMAP) += omap.o
+diff -uNr linux-2.6.29-clean/drivers/mmc/host/sdhci-pltfm.c linux-2.6.29/drivers/mmc/host/sdhci-pltfm.c
+--- linux-2.6.29-clean/drivers/mmc/host/sdhci-pltfm.c 1969-12-31 16:00:00.000000000 -0800
++++ linux-2.6.29/drivers/mmc/host/sdhci-pltfm.c 2009-04-06 13:51:47.000000000 -0700
+@@ -0,0 +1,262 @@
++/*
++ * sdhci-pltfm.c Support for SDHCI platform devices
++ * Copyright (c) 2009 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++/* Supports:
++ * SDHCI platform devices
++ *
++ * Inspired by sdhci-pci.c, by Pierre Ossman
++ */
++
++#include <linux/delay.h>
++#include <linux/highmem.h>
++#include <linux/platform_device.h>
++
++#include <linux/mmc/host.h>
++
++#include <linux/io.h>
++
++#include "sdhci.h"
++
++
++#define MAX_SLOTS 8
++
++struct sdhci_pltfm_chip;
++
++struct sdhci_pltfm_slot {
++ struct sdhci_pltfm_chip *chip;
++ struct sdhci_host *host;
++
++ int pltfm_resource;
++};
++
++struct sdhci_pltfm_chip {
++ struct platform_device *pdev;
++
++ unsigned int quirks;
++
++ int num_slots; /* Slots on controller */
++ struct sdhci_pltfm_slot *slots[MAX_SLOTS]; /* Pointers to host slots */
++};
++
++
++/*****************************************************************************\
++ * *
++ * SDHCI core callbacks *
++ * *
++\*****************************************************************************/
++
++static struct sdhci_ops sdhci_pltfm_ops = {
++};
++
++/*****************************************************************************\
++ * *
++ * Device probing/removal *
++ * *
++\*****************************************************************************/
++
++
++static struct sdhci_pltfm_slot * __devinit sdhci_pltfm_probe_slot(
++ struct platform_device *pdev, struct sdhci_pltfm_chip *chip,
++ int resource)
++{
++ struct sdhci_pltfm_slot *slot;
++ struct sdhci_host *host;
++ struct resource *iomem;
++ int ret;
++
++ iomem = platform_get_resource(pdev, IORESOURCE_MEM, resource);
++ if (!iomem)
++ return ERR_PTR(-ENODEV);
++
++ if (resource_size(iomem) != 0x100) {
++ dev_err(&pdev->dev, "Invalid iomem size. You may "
++ "experience problems.\n");
++ }
++
++ if (!pdev->dev.parent) {
++ dev_err(&pdev->dev, "The parent device be a PCI device\n");
++ return ERR_PTR(-ENODEV);
++ }
++
++ host = sdhci_alloc_host(pdev->dev.parent,
++ sizeof(struct sdhci_pltfm_slot));
++ if (IS_ERR(host))
++ return ERR_PTR(PTR_ERR(host));
++
++ slot = sdhci_priv(host);
++
++ slot->chip = chip;
++ slot->host = host;
++ slot->pltfm_resource = resource;
++
++ host->hw_name = "PLTFM";
++ host->ops = &sdhci_pltfm_ops;
++ host->quirks = chip->quirks;
++
++ host->irq = platform_get_irq(pdev, 0);
++
++ if (!request_mem_region(iomem->start, resource_size(iomem),
++ mmc_hostname(host->mmc))) {
++ dev_err(&pdev->dev, "cannot request region\n");
++ ret = -EBUSY;
++ goto free;
++ }
++
++ host->ioaddr = ioremap(iomem->start, resource_size(iomem));
++ if (!host->ioaddr) {
++ dev_err(&pdev->dev, "failed to remap registers\n");
++ goto release;
++ }
++
++ ret = sdhci_add_host(host);
++ if (ret)
++ goto unmap;
++
++ return slot;
++
++unmap:
++ iounmap(host->ioaddr);
++release:
++ release_mem_region(iomem->start, resource_size(iomem));
++free:
++ sdhci_free_host(host);
++
++ return ERR_PTR(ret);
++}
++
++static void sdhci_pltfm_remove_slot(struct sdhci_pltfm_slot *slot)
++{
++ int dead;
++ u32 scratch;
++ struct resource *iomem;
++
++ dead = 0;
++ scratch = readl(slot->host->ioaddr + SDHCI_INT_STATUS);
++ if (scratch == (u32)-1)
++ dead = 1;
++
++ sdhci_remove_host(slot->host, dead);
++
++ iounmap(slot->host->ioaddr);
++
++ iomem = platform_get_resource(slot->chip->pdev, IORESOURCE_MEM,
++ slot->pltfm_resource);
++ release_mem_region(iomem->start, resource_size(iomem));
++
++ sdhci_free_host(slot->host);
++}
++
++static int __devinit sdhci_pltfm_probe(struct platform_device *pdev)
++{
++ struct sdhci_pltfm_chip *chip;
++ struct sdhci_pltfm_slot *slot;
++ u8 slots;
++ int ret, i;
++
++ BUG_ON(pdev == NULL);
++
++ for (slots = 0; slots <= MAX_SLOTS; slots++)
++ if (!platform_get_resource(pdev, IORESOURCE_MEM, slots))
++ break;
++
++ BUG_ON(slots > MAX_SLOTS || slots == 0);
++
++ chip = kzalloc(sizeof(struct sdhci_pltfm_chip), GFP_KERNEL);
++ if (!chip) {
++ ret = -ENOMEM;
++ goto err;
++ }
++
++ chip->pdev = pdev;
++ chip->num_slots = slots;
++ platform_set_drvdata(pdev, chip);
++
++ for (i = 0; i < slots; i++) {
++ slot = sdhci_pltfm_probe_slot(pdev, chip, i);
++ if (IS_ERR(slot)) {
++ for (i--; i >= 0; i--)
++ sdhci_pltfm_remove_slot(chip->slots[i]);
++ ret = PTR_ERR(slot);
++ goto free;
++ }
++
++ chip->slots[i] = slot;
++ }
++
++ return 0;
++
++free:
++ platform_set_drvdata(pdev, NULL);
++ kfree(chip);
++
++err:
++ printk(KERN_ERR"Probing of sdhci-pltfm failed: %d\n", ret);
++ return ret;
++}
++
++static int __devexit sdhci_pltfm_remove(struct platform_device *pdev)
++{
++ int i;
++ struct sdhci_pltfm_chip *chip;
++
++ chip = platform_get_drvdata(pdev);
++
++ if (chip) {
++ for (i = 0; i < chip->num_slots; i++)
++ sdhci_pltfm_remove_slot(chip->slots[i]);
++
++ platform_set_drvdata(pdev, NULL);
++ kfree(chip);
++ }
++
++ return 0;
++}
++
++static struct platform_driver sdhci_pltfm_driver = {
++ .driver = {
++ .name = "sdhci",
++ .owner = THIS_MODULE,
++ },
++ .probe = sdhci_pltfm_probe,
++ .remove = __devexit_p(sdhci_pltfm_remove),
++};
++
++/*****************************************************************************\
++ * *
++ * Driver init/exit *
++ * *
++\*****************************************************************************/
++
++static int __init sdhci_drv_init(void)
++{
++ return platform_driver_register(&sdhci_pltfm_driver);
++}
++
++static void __exit sdhci_drv_exit(void)
++{
++ platform_driver_unregister(&sdhci_pltfm_driver);
++}
++
++module_init(sdhci_drv_init);
++module_exit(sdhci_drv_exit);
++
++MODULE_DESCRIPTION("Secure Digital Host Controller Interface platform driver");
++MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>");
++MODULE_LICENSE("GPL v2");
++MODULE_ALIAS("platform:sdhci");
++
+diff -uNr linux-2.6.29-clean/drivers/serial/Kconfig linux-2.6.29/drivers/serial/Kconfig
+--- linux-2.6.29-clean/drivers/serial/Kconfig 2009-04-01 09:20:24.000000000 -0700
++++ linux-2.6.29/drivers/serial/Kconfig 2009-04-06 13:51:47.000000000 -0700
+@@ -1412,4 +1412,11 @@
+ default 19200 if (SERIAL_SPORT_BAUD_RATE_19200)
+ default 9600 if (SERIAL_SPORT_BAUD_RATE_9600)
+
++config SERIAL_TIMBERDALE
++ tristate "Support for timberdale UART"
++ depends on MFD_TIMBERDALE
++ select SERIAL_CORE
++ ---help---
++ Add support for UART controller on timberdale.
++
+ endmenu
+diff -uNr linux-2.6.29-clean/drivers/serial/Makefile linux-2.6.29/drivers/serial/Makefile
+--- linux-2.6.29-clean/drivers/serial/Makefile 2009-04-01 09:20:24.000000000 -0700
++++ linux-2.6.29/drivers/serial/Makefile 2009-04-06 13:51:47.000000000 -0700
+@@ -76,3 +76,4 @@
+ obj-$(CONFIG_SERIAL_KS8695) += serial_ks8695.o
+ obj-$(CONFIG_KGDB_SERIAL_CONSOLE) += kgdboc.o
+ obj-$(CONFIG_SERIAL_QE) += ucc_uart.o
++obj-$(CONFIG_SERIAL_TIMBERDALE) += timbuart.o
+diff -uNr linux-2.6.29-clean/drivers/serial/timbuart.c linux-2.6.29/drivers/serial/timbuart.c
+--- linux-2.6.29-clean/drivers/serial/timbuart.c 1969-12-31 16:00:00.000000000 -0800
++++ linux-2.6.29/drivers/serial/timbuart.c 2009-04-06 13:51:47.000000000 -0700
+@@ -0,0 +1,519 @@
++/*
++ * timbuart.c timberdale FPGA UART driver
++ * Copyright (c) 2009 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++/* Supports:
++ * Timberdale FPGA UART
++ */
++
++#include <linux/pci.h>
++#include <linux/interrupt.h>
++#include <linux/serial_core.h>
++#include <linux/kernel.h>
++#include <linux/platform_device.h>
++#include <linux/ioport.h>
++
++#include "timbuart.h"
++
++struct timbuart_port {
++ struct uart_port port;
++ struct tasklet_struct tasklet;
++ int usedma;
++ u8 last_ier;
++ struct platform_device *dev;
++};
++
++static int baudrates[] = {9600, 19200, 38400, 57600, 115200, 230400, 460800,
++ 921600, 1843200, 3250000};
++
++static void timbuart_mctrl_check(struct uart_port *port, u8 isr, u8 *ier);
++
++static irqreturn_t timbuart_handleinterrupt(int irq, void *devid);
++
++static void timbuart_stop_rx(struct uart_port *port)
++{
++ /* spin lock held by upper layer, disable all RX interrupts */
++ u8 ier = ioread8(port->membase + TIMBUART_IER) & ~RXFLAGS;
++ iowrite8(ier, port->membase + TIMBUART_IER);
++}
++
++static void timbuart_stop_tx(struct uart_port *port)
++{
++ /* spinlock held by upper layer, disable TX interrupt */
++ u8 ier = ioread8(port->membase + TIMBUART_IER) & ~TXBAE;
++ iowrite8(ier, port->membase + TIMBUART_IER);
++}
++
++static void timbuart_start_tx(struct uart_port *port)
++{
++ struct timbuart_port *uart =
++ container_of(port, struct timbuart_port, port);
++
++ /* do not transfer anything here -> fire off the tasklet */
++ tasklet_schedule(&uart->tasklet);
++}
++
++static void timbuart_flush_buffer(struct uart_port *port)
++{
++ u8 ctl = ioread8(port->membase + TIMBUART_CTRL) | TIMBUART_CTRL_FLSHTX;
++
++ iowrite8(ctl, port->membase + TIMBUART_CTRL);
++ iowrite8(TXBF, port->membase + TIMBUART_ISR);
++}
++
++static void timbuart_rx_chars(struct uart_port *port)
++{
++ struct tty_struct *tty = port->info->port.tty;
++
++ while (ioread8(port->membase + TIMBUART_ISR) & RXDP) {
++ u8 ch = ioread8(port->membase + TIMBUART_RXFIFO);
++ /* ack */
++ iowrite8(RXDP, port->membase + TIMBUART_ISR);
++ port->icount.rx++;
++ tty_insert_flip_char(tty, ch, TTY_NORMAL);
++ }
++
++ spin_unlock(&port->lock);
++ tty_flip_buffer_push(port->info->port.tty);
++ spin_lock(&port->lock);
++
++ dev_dbg(port->dev, "%s - total read %d bytes\n",
++ __func__, port->icount.rx);
++}
++
++static void timbuart_tx_chars(struct uart_port *port)
++{
++ struct circ_buf *xmit = &port->info->xmit;
++
++ while (!(ioread8(port->membase + TIMBUART_ISR) & TXBF) &&
++ !uart_circ_empty(xmit)) {
++ iowrite8(xmit->buf[xmit->tail],
++ port->membase + TIMBUART_TXFIFO);
++ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
++ port->icount.tx++;
++ }
++
++ dev_dbg(port->dev,
++ "%s - total written %d bytes, CTL: %x, RTS: %x, baud: %x\n",
++ __func__,
++ port->icount.tx,
++ ioread8(port->membase + TIMBUART_CTRL),
++ port->mctrl & TIOCM_RTS,
++ ioread8(port->membase + TIMBUART_BAUDRATE));
++}
++
++static void timbuart_handle_tx_port(struct uart_port *port, u8 isr, u8 *ier)
++{
++ struct timbuart_port *uart =
++ container_of(port, struct timbuart_port, port);
++ struct circ_buf *xmit = &port->info->xmit;
++
++ if (uart_circ_empty(xmit) || uart_tx_stopped(port))
++ return;
++
++ if (port->x_char)
++ return;
++
++ if (isr & TXFLAGS) {
++ timbuart_tx_chars(port);
++ /* clear all TX interrupts */
++ iowrite8(TXFLAGS, port->membase + TIMBUART_ISR);
++
++ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
++ uart_write_wakeup(port);
++ } else
++ /* Re-enable any tx interrupt */
++ *ier |= uart->last_ier & TXFLAGS;
++
++ /* enable interrupts if there are chars in the transmit buffer,
++ * Or if we delivered some bytes and want the almost empty interrupt
++ * we wake up the upper layer later when we got the interrupt
++ * to give it some time to go out...
++ */
++ if (!uart_circ_empty(xmit))
++ *ier |= TXBAE;
++
++ dev_dbg(port->dev, "%s - leaving\n", __func__);
++}
++
++void timbuart_handle_rx_port(struct uart_port *port, u8 isr, u8 *ier)
++{
++ if (isr & RXFLAGS) {
++ /* Some RX status is set */
++ if (isr & RXBF) {
++ u8 ctl = ioread8(port->membase + TIMBUART_CTRL) |
++ TIMBUART_CTRL_FLSHRX;
++ iowrite8(ctl, port->membase + TIMBUART_CTRL);
++ port->icount.overrun++;
++ } else if (isr & (RXDP))
++ timbuart_rx_chars(port);
++
++ /* ack all RX interrupts */
++ iowrite8(RXFLAGS, port->membase + TIMBUART_ISR);
++ }
++
++ /* always have the RX interrupts enabled */
++ *ier |= RXBAF | RXBF | RXTT;
++
++ dev_dbg(port->dev, "%s - leaving\n", __func__);
++}
++
++void timbuart_tasklet(unsigned long arg)
++{
++ struct timbuart_port *uart = (struct timbuart_port *)arg;
++ u8 isr, ier = 0;
++
++ spin_lock(&uart->port.lock);
++
++ isr = ioread8(uart->port.membase + TIMBUART_ISR);
++ dev_dbg(uart->port.dev, "%s ISR: %x\n", __func__, isr);
++
++ if (!uart->usedma)
++ timbuart_handle_tx_port(&uart->port, isr, &ier);
++
++ timbuart_mctrl_check(&uart->port, isr, &ier);
++
++ if (!uart->usedma)
++ timbuart_handle_rx_port(&uart->port, isr, &ier);
++
++ iowrite8(ier, uart->port.membase + TIMBUART_IER);
++
++ spin_unlock(&uart->port.lock);
++ dev_dbg(uart->port.dev, "%s leaving\n", __func__);
++}
++
++static unsigned int timbuart_tx_empty(struct uart_port *port)
++{
++ u8 isr = ioread8(port->membase + TIMBUART_ISR);
++
++ return (isr & TXBAE) ? TIOCSER_TEMT : 0;
++}
++
++static unsigned int timbuart_get_mctrl(struct uart_port *port)
++{
++ u8 cts = ioread8(port->membase + TIMBUART_CTRL);
++ dev_dbg(port->dev, "%s - cts %x\n", __func__, cts);
++
++ if (cts & TIMBUART_CTRL_CTS)
++ return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR;
++ else
++ return TIOCM_DSR | TIOCM_CAR;
++}
++
++static void timbuart_set_mctrl(struct uart_port *port, unsigned int mctrl)
++{
++ dev_dbg(port->dev, "%s - %x\n", __func__, mctrl);
++
++ if (mctrl & TIOCM_RTS)
++ iowrite8(TIMBUART_CTRL_RTS, port->membase + TIMBUART_CTRL);
++ else
++ iowrite8(TIMBUART_CTRL_RTS, port->membase + TIMBUART_CTRL);
++}
++
++static void timbuart_mctrl_check(struct uart_port *port, u8 isr, u8 *ier)
++{
++ unsigned int cts;
++
++ if (isr & CTS_DELTA) {
++ /* ack */
++ iowrite8(CTS_DELTA, port->membase + TIMBUART_ISR);
++ cts = timbuart_get_mctrl(port);
++ uart_handle_cts_change(port, cts & TIOCM_CTS);
++ wake_up_interruptible(&port->info->delta_msr_wait);
++ }
++
++ *ier |= CTS_DELTA;
++}
++
++static void timbuart_enable_ms(struct uart_port *port)
++{
++ /* N/A */
++}
++
++static void timbuart_break_ctl(struct uart_port *port, int ctl)
++{
++ /* N/A */
++}
++
++static int timbuart_startup(struct uart_port *port)
++{
++ struct timbuart_port *uart =
++ container_of(port, struct timbuart_port, port);
++
++ dev_dbg(port->dev, "%s\n", __func__);
++
++ iowrite8(TIMBUART_CTRL_FLSHRX, port->membase + TIMBUART_CTRL);
++ iowrite8(0xff, port->membase + TIMBUART_ISR);
++ /* Enable all but TX interrupts */
++ iowrite8(RXBAF | RXBF | RXTT | CTS_DELTA,
++ port->membase + TIMBUART_IER);
++
++ return request_irq(port->irq, timbuart_handleinterrupt, IRQF_SHARED,
++ "timb-uart", uart);
++}
++
++static void timbuart_shutdown(struct uart_port *port)
++{
++ struct timbuart_port *uart =
++ container_of(port, struct timbuart_port, port);
++ dev_dbg(port->dev, "%s\n", __func__);
++ free_irq(port->irq, uart);
++ iowrite8(0, port->membase + TIMBUART_IER);
++}
++
++static int get_bindex(int baud)
++{
++ int i;
++
++ for (i = 0; i < ARRAY_SIZE(baudrates); i++)
++ if (baud == baudrates[i])
++ return i;
++
++ return -1;
++}
++
++static void timbuart_set_termios(struct uart_port *port,
++ struct ktermios *termios,
++ struct ktermios *old)
++{
++ unsigned int baud;
++ short bindex;
++ unsigned long flags;
++
++ baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16);
++ bindex = get_bindex(baud);
++ dev_dbg(port->dev, "%s - bindex %d\n", __func__, bindex);
++
++ if (bindex < 0) {
++ printk(KERN_ALERT "timbuart: Unsupported baud rate\n");
++ } else {
++ spin_lock_irqsave(&port->lock, flags);
++ iowrite8((u8)bindex, port->membase + TIMBUART_BAUDRATE);
++ uart_update_timeout(port, termios->c_cflag, baud);
++ spin_unlock_irqrestore(&port->lock, flags);
++ }
++}
++
++static const char *timbuart_type(struct uart_port *port)
++{
++ return port->type == PORT_UNKNOWN ? "timbuart" : NULL;
++}
++
++/* We do not request/release mappings of the registers here,
++ * currently it's done in the proble function.
++ */
++static void timbuart_release_port(struct uart_port *port)
++{
++ struct platform_device *pdev = to_platform_device(port->dev);
++ int size =
++ resource_size(platform_get_resource(pdev, IORESOURCE_MEM, 0));
++
++ if (port->flags & UPF_IOREMAP) {
++ iounmap(port->membase);
++ port->membase = NULL;
++ }
++
++ release_mem_region(port->mapbase, size);
++}
++
++static int timbuart_request_port(struct uart_port *port)
++{
++ struct platform_device *pdev = to_platform_device(port->dev);
++ int size =
++ resource_size(platform_get_resource(pdev, IORESOURCE_MEM, 0));
++
++ if (!request_mem_region(port->mapbase, size, "timb-uart"))
++ return -EBUSY;
++
++ if (port->flags & UPF_IOREMAP) {
++ port->membase = ioremap(port->mapbase, size);
++ if (port->membase == NULL) {
++ release_mem_region(port->mapbase, size);
++ return -ENOMEM;
++ }
++ }
++
++ return 0;
++}
++
++static irqreturn_t timbuart_handleinterrupt(int irq, void *devid)
++{
++ struct timbuart_port *uart = (struct timbuart_port *)devid;
++
++ uart->last_ier = ioread8(uart->port.membase + TIMBUART_IER);
++
++ /* disable interrupts, let the tasklet enable them again if needed */
++ iowrite8(0, uart->port.membase + TIMBUART_IER);
++
++ /* fire off bottom half */
++ tasklet_schedule(&uart->tasklet);
++
++ return IRQ_HANDLED;
++}
++
++/*
++ * Configure/autoconfigure the port.
++ */
++static void timbuart_config_port(struct uart_port *port, int flags)
++{
++ if (flags & UART_CONFIG_TYPE) {
++ port->type = PORT_TIMBUART;
++ timbuart_request_port(port);
++ }
++}
++
++static int timbuart_verify_port(struct uart_port *port,
++ struct serial_struct *ser)
++{
++ /* we don't want the core code to modify any port params */
++ return -EINVAL;
++}
++
++static struct uart_ops timbuart_ops = {
++ .tx_empty = timbuart_tx_empty,
++ .set_mctrl = timbuart_set_mctrl,
++ .get_mctrl = timbuart_get_mctrl,
++ .stop_tx = timbuart_stop_tx,
++ .start_tx = timbuart_start_tx,
++ .flush_buffer = timbuart_flush_buffer,
++ .stop_rx = timbuart_stop_rx,
++ .enable_ms = timbuart_enable_ms,
++ .break_ctl = timbuart_break_ctl,
++ .startup = timbuart_startup,
++ .shutdown = timbuart_shutdown,
++ .set_termios = timbuart_set_termios,
++ .type = timbuart_type,
++ .release_port = timbuart_release_port,
++ .request_port = timbuart_request_port,
++ .config_port = timbuart_config_port,
++ .verify_port = timbuart_verify_port
++};
++
++static struct uart_driver timbuart_driver = {
++ .owner = THIS_MODULE,
++ .driver_name = "timberdale_uart",
++ .dev_name = "ttyTU",
++ .major = TIMBUART_MAJOR,
++ .minor = TIMBUART_MINOR,
++ .nr = 1
++};
++
++static int timbuart_probe(struct platform_device *dev)
++{
++ int err;
++ struct timbuart_port *uart;
++ struct resource *iomem;
++
++ dev_dbg(&dev->dev, "%s\n", __func__);
++
++ uart = kzalloc(sizeof(*uart), GFP_KERNEL);
++ if (!uart) {
++ err = -EINVAL;
++ goto err_mem;
++ }
++
++ uart->usedma = 0;
++
++ uart->port.uartclk = 3250000 * 16;
++ uart->port.fifosize = TIMBUART_FIFO_SIZE;
++ uart->port.regshift = 2;
++ uart->port.iotype = UPIO_MEM;
++ uart->port.ops = &timbuart_ops;
++ uart->port.irq = 0;
++ uart->port.flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP;
++ uart->port.line = 0;
++ uart->port.dev = &dev->dev;
++
++ iomem = platform_get_resource(dev, IORESOURCE_MEM, 0);
++ if (!iomem) {
++ err = -ENOMEM;
++ goto err_register;
++ }
++ uart->port.mapbase = iomem->start;
++ uart->port.membase = NULL;
++
++ uart->port.irq = platform_get_irq(dev, 0);
++ if (uart->port.irq < 0) {
++ err = -EINVAL;
++ goto err_register;
++ }
++
++ tasklet_init(&uart->tasklet, timbuart_tasklet, (unsigned long)uart);
++
++ err = uart_register_driver(&timbuart_driver);
++ if (err)
++ goto err_register;
++
++ err = uart_add_one_port(&timbuart_driver, &uart->port);
++ if (err)
++ goto err_add_port;
++
++ platform_set_drvdata(dev, uart);
++
++ return 0;
++
++err_add_port:
++ uart_unregister_driver(&timbuart_driver);
++err_register:
++ kfree(uart);
++err_mem:
++ printk(KERN_ERR "timberdale: Failed to register Timberdale UART: %d\n",
++ err);
++
++ return err;
++}
++
++static int timbuart_remove(struct platform_device *dev)
++{
++ struct timbuart_port *uart = platform_get_drvdata(dev);
++
++ tasklet_kill(&uart->tasklet);
++ uart_remove_one_port(&timbuart_driver, &uart->port);
++ uart_unregister_driver(&timbuart_driver);
++ kfree(uart);
++
++ return 0;
++}
++
++static struct platform_driver timbuart_platform_driver = {
++ .driver = {
++ .name = "timb-uart",
++ .owner = THIS_MODULE,
++ },
++ .probe = timbuart_probe,
++ .remove = timbuart_remove,
++};
++
++/*--------------------------------------------------------------------------*/
++
++static int __init timbuart_init(void)
++{
++ return platform_driver_register(&timbuart_platform_driver);
++}
++
++static void __exit timbuart_exit(void)
++{
++ platform_driver_unregister(&timbuart_platform_driver);
++}
++
++module_init(timbuart_init);
++module_exit(timbuart_exit);
++
++MODULE_DESCRIPTION("Timberdale UART driver");
++MODULE_LICENSE("GPL v2");
++MODULE_ALIAS("platform:timb-uart");
++
+diff -uNr linux-2.6.29-clean/drivers/serial/timbuart.h linux-2.6.29/drivers/serial/timbuart.h
+--- linux-2.6.29-clean/drivers/serial/timbuart.h 1969-12-31 16:00:00.000000000 -0800
++++ linux-2.6.29/drivers/serial/timbuart.h 2009-04-06 13:51:47.000000000 -0700
+@@ -0,0 +1,57 @@
++/*
++ * timbuart.c timberdale FPGA GPIO driver
++ * Copyright (c) 2009 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++/* Supports:
++ * Timberdale FPGA UART
++ */
++
++#ifndef _TIMBUART_H
++#define _TIMBUART_H
++
++#define TIMBUART_FIFO_SIZE 2048
++
++#define TIMBUART_RXFIFO 0x08
++#define TIMBUART_TXFIFO 0x0c
++#define TIMBUART_IER 0x10
++#define TIMBUART_IPR 0x14
++#define TIMBUART_ISR 0x18
++#define TIMBUART_CTRL 0x1c
++#define TIMBUART_BAUDRATE 0x20
++
++#define TIMBUART_CTRL_RTS 0x01
++#define TIMBUART_CTRL_CTS 0x02
++#define TIMBUART_CTRL_FLSHTX 0x40
++#define TIMBUART_CTRL_FLSHRX 0x80
++
++#define TXBF 0x01
++#define TXBAE 0x02
++#define CTS_DELTA 0x04
++#define RXDP 0x08
++#define RXBAF 0x10
++#define RXBF 0x20
++#define RXTT 0x40
++#define RXBNAE 0x80
++
++#define RXFLAGS (RXDP | RXBAF | RXBF | RXTT | RXBNAE)
++#define TXFLAGS (TXBF | TXBAE)
++
++#define TIMBUART_MAJOR 204
++#define TIMBUART_MINOR 192
++
++#endif /* _TIMBUART_H */
++
+diff -uNr linux-2.6.29-clean/drivers/spi/Kconfig linux-2.6.29/drivers/spi/Kconfig
+--- linux-2.6.29-clean/drivers/spi/Kconfig 2009-04-01 09:20:25.000000000 -0700
++++ linux-2.6.29/drivers/spi/Kconfig 2009-04-06 13:51:47.000000000 -0700
+@@ -211,8 +211,8 @@
+ SPI driver for Toshiba TXx9 MIPS SoCs
+
+ config SPI_XILINX
+- tristate "Xilinx SPI controller"
+- depends on XILINX_VIRTEX && EXPERIMENTAL
++ tristate "Xilinx SPI controller common module"
++ depends on EXPERIMENTAL
+ select SPI_BITBANG
+ help
+ This exposes the SPI controller IP from the Xilinx EDK.
+@@ -220,6 +220,25 @@
+ See the "OPB Serial Peripheral Interface (SPI) (v1.00e)"
+ Product Specification document (DS464) for hardware details.
+
++config SPI_XILINX_OF
++ tristate "Xilinx SPI controller OF device"
++ depends on SPI_XILINX && XILINX_VIRTEX
++ help
++ This exposes the SPI controller IP from the Xilinx EDK.
++
++ See the "OPB Serial Peripheral Interface (SPI) (v1.00e)"
++ Product Specification document (DS464) for hardware details.
++
++config SPI_XILINX_PLTFM
++ tristate "Xilinx SPI controller platform device"
++ depends on SPI_XILINX
++ help
++ This exposes the SPI controller IP from the Xilinx EDK.
++
++ See the "OPB Serial Peripheral Interface (SPI) (v1.00e)"
++ Product Specification document (DS464) for hardware details.
++
++
+ #
+ # Add new SPI master controllers in alphabetical order above this line
+ #
+diff -uNr linux-2.6.29-clean/drivers/spi/Makefile linux-2.6.29/drivers/spi/Makefile
+--- linux-2.6.29-clean/drivers/spi/Makefile 2009-04-01 09:20:25.000000000 -0700
++++ linux-2.6.29/drivers/spi/Makefile 2009-04-06 13:51:47.000000000 -0700
+@@ -29,6 +29,8 @@
+ obj-$(CONFIG_SPI_S3C24XX) += spi_s3c24xx.o
+ obj-$(CONFIG_SPI_TXX9) += spi_txx9.o
+ obj-$(CONFIG_SPI_XILINX) += xilinx_spi.o
++obj-$(CONFIG_SPI_XILINX_OF) += xilinx_spi_of.o
++obj-$(CONFIG_SPI_XILINX_PLTFM) += xilinx_spi_pltfm.o
+ obj-$(CONFIG_SPI_SH_SCI) += spi_sh_sci.o
+ # ... add above this line ...
+
+diff -uNr linux-2.6.29-clean/drivers/spi/xilinx_spi.c linux-2.6.29/drivers/spi/xilinx_spi.c
+--- linux-2.6.29-clean/drivers/spi/xilinx_spi.c 2009-04-01 09:20:25.000000000 -0700
++++ linux-2.6.29/drivers/spi/xilinx_spi.c 2009-04-06 13:51:47.000000000 -0700
+@@ -14,22 +14,28 @@
+ #include <linux/module.h>
+ #include <linux/init.h>
+ #include <linux/interrupt.h>
+-#include <linux/platform_device.h>
+-
+-#include <linux/of_platform.h>
+-#include <linux/of_device.h>
+-#include <linux/of_spi.h>
+
+ #include <linux/spi/spi.h>
+ #include <linux/spi/spi_bitbang.h>
+ #include <linux/io.h>
+
+-#define XILINX_SPI_NAME "xilinx_spi"
++#include "xilinx_spi.h"
++
++#ifndef CONFIG_PPC
++#define in_8(addr) ioread8(addr)
++#define in_be16(addr) ioread16(addr)
++#define in_be32(addr) ioread32(addr)
++
++#define out_8(addr, b) iowrite8(b, addr)
++#define out_be16(addr, w) iowrite16(w, addr)
++#define out_be32(addr, l) iowrite32(l, addr)
++#endif
++
+
+ /* Register definitions as per "OPB Serial Peripheral Interface (SPI) (v1.00e)
+ * Product Specification", DS464
+ */
+-#define XSPI_CR_OFFSET 0x62 /* 16-bit Control Register */
++#define XSPI_CR_OFFSET_DEF 0x62 /* 16-bit Control Register */
+
+ #define XSPI_CR_ENABLE 0x02
+ #define XSPI_CR_MASTER_MODE 0x04
+@@ -41,7 +47,7 @@
+ #define XSPI_CR_MANUAL_SSELECT 0x80
+ #define XSPI_CR_TRANS_INHIBIT 0x100
+
+-#define XSPI_SR_OFFSET 0x67 /* 8-bit Status Register */
++#define XSPI_SR_OFFSET_DEF 0x67 /* 8-bit Status Register */
+
+ #define XSPI_SR_RX_EMPTY_MASK 0x01 /* Receive FIFO is empty */
+ #define XSPI_SR_RX_FULL_MASK 0x02 /* Receive FIFO is full */
+@@ -49,10 +55,10 @@
+ #define XSPI_SR_TX_FULL_MASK 0x08 /* Transmit FIFO is full */
+ #define XSPI_SR_MODE_FAULT_MASK 0x10 /* Mode fault error */
+
+-#define XSPI_TXD_OFFSET 0x6b /* 8-bit Data Transmit Register */
+-#define XSPI_RXD_OFFSET 0x6f /* 8-bit Data Receive Register */
++#define XSPI_TXD_OFFSET_DEF 0x6b /* 8-bit Data Transmit Register */
++#define XSPI_RXD_OFFSET_DEF 0x6f /* 8-bit Data Receive Register */
+
+-#define XSPI_SSR_OFFSET 0x70 /* 32-bit Slave Select Register */
++#define XSPI_SSR_OFFSET_DEF 0x70 /* 32-bit Slave Select Register */
+
+ /* Register definitions as per "OPB IPIF (v3.01c) Product Specification", DS414
+ * IPIF registers are 32 bit
+@@ -74,24 +80,10 @@
+ #define XIPIF_V123B_RESETR_OFFSET 0x40 /* IPIF reset register */
+ #define XIPIF_V123B_RESET_MASK 0x0a /* the value to write */
+
+-struct xilinx_spi {
+- /* bitbang has to be first */
+- struct spi_bitbang bitbang;
+- struct completion done;
+-
+- void __iomem *regs; /* virt. address of the control registers */
+-
+- u32 irq;
+-
+- u32 speed_hz; /* SCK has a fixed frequency of speed_hz Hz */
+-
+- u8 *rx_ptr; /* pointer in the Tx buffer */
+- const u8 *tx_ptr; /* pointer in the Rx buffer */
+- int remaining_bytes; /* the number of bytes left to transfer */
+-};
+
+-static void xspi_init_hw(void __iomem *regs_base)
++void xspi_init_hw(struct xilinx_spi *xspi)
+ {
++ void __iomem *regs_base = xspi->regs;
+ /* Reset the SPI device */
+ out_be32(regs_base + XIPIF_V123B_RESETR_OFFSET,
+ XIPIF_V123B_RESET_MASK);
+@@ -101,30 +93,31 @@
+ out_be32(regs_base + XIPIF_V123B_DGIER_OFFSET,
+ XIPIF_V123B_GINTR_ENABLE);
+ /* Deselect the slave on the SPI bus */
+- out_be32(regs_base + XSPI_SSR_OFFSET, 0xffff);
++ out_be32(regs_base + xspi->ssr_offset, 0xffff);
+ /* Disable the transmitter, enable Manual Slave Select Assertion,
+ * put SPI controller into master mode, and enable it */
+- out_be16(regs_base + XSPI_CR_OFFSET,
++ out_be16(regs_base + xspi->cr_offset,
+ XSPI_CR_TRANS_INHIBIT | XSPI_CR_MANUAL_SSELECT
+ | XSPI_CR_MASTER_MODE | XSPI_CR_ENABLE);
+ }
++EXPORT_SYMBOL(xspi_init_hw);
+
+-static void xilinx_spi_chipselect(struct spi_device *spi, int is_on)
++void xilinx_spi_chipselect(struct spi_device *spi, int is_on)
+ {
+ struct xilinx_spi *xspi = spi_master_get_devdata(spi->master);
+
+ if (is_on == BITBANG_CS_INACTIVE) {
+ /* Deselect the slave on the SPI bus */
+- out_be32(xspi->regs + XSPI_SSR_OFFSET, 0xffff);
++ out_be32(xspi->regs + xspi->ssr_offset, 0xffff);
+ } else if (is_on == BITBANG_CS_ACTIVE) {
+ /* Set the SPI clock phase and polarity */
+- u16 cr = in_be16(xspi->regs + XSPI_CR_OFFSET)
++ u16 cr = in_be16(xspi->regs + xspi->cr_offset)
+ & ~XSPI_CR_MODE_MASK;
+ if (spi->mode & SPI_CPHA)
+ cr |= XSPI_CR_CPHA;
+ if (spi->mode & SPI_CPOL)
+ cr |= XSPI_CR_CPOL;
+- out_be16(xspi->regs + XSPI_CR_OFFSET, cr);
++ out_be16(xspi->regs + xspi->cr_offset, cr);
+
+ /* We do not check spi->max_speed_hz here as the SPI clock
+ * frequency is not software programmable (the IP block design
+@@ -132,10 +125,11 @@
+ */
+
+ /* Activate the chip select */
+- out_be32(xspi->regs + XSPI_SSR_OFFSET,
++ out_be32(xspi->regs + xspi->ssr_offset,
+ ~(0x0001 << spi->chip_select));
+ }
+ }
++EXPORT_SYMBOL(xilinx_spi_chipselect);
+
+ /* spi_bitbang requires custom setup_transfer() to be defined if there is a
+ * custom txrx_bufs(). We have nothing to setup here as the SPI IP block
+@@ -143,8 +137,7 @@
+ * Check for 8 bits per word. Chip select delay calculations could be
+ * added here as soon as bitbang_work() can be made aware of the delay value.
+ */
+-static int xilinx_spi_setup_transfer(struct spi_device *spi,
+- struct spi_transfer *t)
++int xilinx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
+ {
+ u8 bits_per_word;
+
+@@ -157,11 +150,12 @@
+
+ return 0;
+ }
++EXPORT_SYMBOL(xilinx_spi_setup_transfer);
+
+ /* the spi->mode bits understood by this driver: */
+ #define MODEBITS (SPI_CPOL | SPI_CPHA)
+
+-static int xilinx_spi_setup(struct spi_device *spi)
++int xilinx_spi_setup(struct spi_device *spi)
+ {
+ struct spi_bitbang *bitbang;
+ struct xilinx_spi *xspi;
+@@ -188,25 +182,25 @@
+
+ return 0;
+ }
++EXPORT_SYMBOL(xilinx_spi_setup);
+
+ static void xilinx_spi_fill_tx_fifo(struct xilinx_spi *xspi)
+ {
+ u8 sr;
+
+ /* Fill the Tx FIFO with as many bytes as possible */
+- sr = in_8(xspi->regs + XSPI_SR_OFFSET);
++ sr = in_8(xspi->regs + xspi->sr_offset);
+ while ((sr & XSPI_SR_TX_FULL_MASK) == 0 && xspi->remaining_bytes > 0) {
+- if (xspi->tx_ptr) {
+- out_8(xspi->regs + XSPI_TXD_OFFSET, *xspi->tx_ptr++);
+- } else {
+- out_8(xspi->regs + XSPI_TXD_OFFSET, 0);
+- }
++ if (xspi->tx_ptr)
++ out_8(xspi->regs + xspi->txd_offset, *xspi->tx_ptr++);
++ else
++ out_8(xspi->regs + xspi->txd_offset, 0);
+ xspi->remaining_bytes--;
+- sr = in_8(xspi->regs + XSPI_SR_OFFSET);
++ sr = in_8(xspi->regs + xspi->sr_offset);
+ }
+ }
+
+-static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
++int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
+ {
+ struct xilinx_spi *xspi = spi_master_get_devdata(spi->master);
+ u32 ipif_ier;
+@@ -229,8 +223,8 @@
+ ipif_ier | XSPI_INTR_TX_EMPTY);
+
+ /* Start the transfer by not inhibiting the transmitter any longer */
+- cr = in_be16(xspi->regs + XSPI_CR_OFFSET) & ~XSPI_CR_TRANS_INHIBIT;
+- out_be16(xspi->regs + XSPI_CR_OFFSET, cr);
++ cr = in_be16(xspi->regs + xspi->cr_offset) & ~XSPI_CR_TRANS_INHIBIT;
++ out_be16(xspi->regs + xspi->cr_offset, cr);
+
+ wait_for_completion(&xspi->done);
+
+@@ -239,14 +233,14 @@
+
+ return t->len - xspi->remaining_bytes;
+ }
+-
++EXPORT_SYMBOL(xilinx_spi_txrx_bufs);
+
+ /* This driver supports single master mode only. Hence Tx FIFO Empty
+ * is the only interrupt we care about.
+ * Receive FIFO Overrun, Transmit FIFO Underrun, Mode Fault, and Slave Mode
+ * Fault are not to happen.
+ */
+-static irqreturn_t xilinx_spi_irq(int irq, void *dev_id)
++irqreturn_t xilinx_spi_irq(int irq, void *dev_id)
+ {
+ struct xilinx_spi *xspi = dev_id;
+ u32 ipif_isr;
+@@ -264,20 +258,19 @@
+ * transmitter while the Isr refills the transmit register/FIFO,
+ * or make sure it is stopped if we're done.
+ */
+- cr = in_be16(xspi->regs + XSPI_CR_OFFSET);
+- out_be16(xspi->regs + XSPI_CR_OFFSET,
++ cr = in_be16(xspi->regs + xspi->cr_offset);
++ out_be16(xspi->regs + xspi->cr_offset,
+ cr | XSPI_CR_TRANS_INHIBIT);
+
+ /* Read out all the data from the Rx FIFO */
+- sr = in_8(xspi->regs + XSPI_SR_OFFSET);
++ sr = in_8(xspi->regs + xspi->sr_offset);
+ while ((sr & XSPI_SR_RX_EMPTY_MASK) == 0) {
+ u8 data;
+
+- data = in_8(xspi->regs + XSPI_RXD_OFFSET);
+- if (xspi->rx_ptr) {
++ data = in_8(xspi->regs + xspi->rxd_offset);
++ if (xspi->rx_ptr)
+ *xspi->rx_ptr++ = data;
+- }
+- sr = in_8(xspi->regs + XSPI_SR_OFFSET);
++ sr = in_8(xspi->regs + xspi->sr_offset);
+ }
+
+ /* See if there is more data to send */
+@@ -286,7 +279,7 @@
+ /* Start the transfer by not inhibiting the
+ * transmitter any longer
+ */
+- out_be16(xspi->regs + XSPI_CR_OFFSET, cr);
++ out_be16(xspi->regs + xspi->cr_offset, cr);
+ } else {
+ /* No more data to send.
+ * Indicate the transfer is completed.
+@@ -297,167 +290,18 @@
+
+ return IRQ_HANDLED;
+ }
++EXPORT_SYMBOL(xilinx_spi_irq);
+
+-static int __init xilinx_spi_of_probe(struct of_device *ofdev,
+- const struct of_device_id *match)
+-{
+- struct spi_master *master;
+- struct xilinx_spi *xspi;
+- struct resource r_irq_struct;
+- struct resource r_mem_struct;
+-
+- struct resource *r_irq = &r_irq_struct;
+- struct resource *r_mem = &r_mem_struct;
+- int rc = 0;
+- const u32 *prop;
+- int len;
+-
+- /* Get resources(memory, IRQ) associated with the device */
+- master = spi_alloc_master(&ofdev->dev, sizeof(struct xilinx_spi));
+-
+- if (master == NULL) {
+- return -ENOMEM;
+- }
+-
+- dev_set_drvdata(&ofdev->dev, master);
+-
+- rc = of_address_to_resource(ofdev->node, 0, r_mem);
+- if (rc) {
+- dev_warn(&ofdev->dev, "invalid address\n");
+- goto put_master;
+- }
+-
+- rc = of_irq_to_resource(ofdev->node, 0, r_irq);
+- if (rc == NO_IRQ) {
+- dev_warn(&ofdev->dev, "no IRQ found\n");
+- goto put_master;
+- }
+-
+- xspi = spi_master_get_devdata(master);
+- xspi->bitbang.master = spi_master_get(master);
+- xspi->bitbang.chipselect = xilinx_spi_chipselect;
+- xspi->bitbang.setup_transfer = xilinx_spi_setup_transfer;
+- xspi->bitbang.txrx_bufs = xilinx_spi_txrx_bufs;
+- xspi->bitbang.master->setup = xilinx_spi_setup;
+- init_completion(&xspi->done);
+-
+- xspi->irq = r_irq->start;
+-
+- if (!request_mem_region(r_mem->start,
+- r_mem->end - r_mem->start + 1, XILINX_SPI_NAME)) {
+- rc = -ENXIO;
+- dev_warn(&ofdev->dev, "memory request failure\n");
+- goto put_master;
+- }
+-
+- xspi->regs = ioremap(r_mem->start, r_mem->end - r_mem->start + 1);
+- if (xspi->regs == NULL) {
+- rc = -ENOMEM;
+- dev_warn(&ofdev->dev, "ioremap failure\n");
+- goto put_master;
+- }
+- xspi->irq = r_irq->start;
+-
+- /* dynamic bus assignment */
+- master->bus_num = -1;
+-
+- /* number of slave select bits is required */
+- prop = of_get_property(ofdev->node, "xlnx,num-ss-bits", &len);
+- if (!prop || len < sizeof(*prop)) {
+- dev_warn(&ofdev->dev, "no 'xlnx,num-ss-bits' property\n");
+- goto put_master;
+- }
+- master->num_chipselect = *prop;
+-
+- /* SPI controller initializations */
+- xspi_init_hw(xspi->regs);
+-
+- /* Register for SPI Interrupt */
+- rc = request_irq(xspi->irq, xilinx_spi_irq, 0, XILINX_SPI_NAME, xspi);
+- if (rc != 0) {
+- dev_warn(&ofdev->dev, "irq request failure: %d\n", xspi->irq);
+- goto unmap_io;
+- }
+-
+- rc = spi_bitbang_start(&xspi->bitbang);
+- if (rc != 0) {
+- dev_err(&ofdev->dev, "spi_bitbang_start FAILED\n");
+- goto free_irq;
+- }
+-
+- dev_info(&ofdev->dev, "at 0x%08X mapped to 0x%08X, irq=%d\n",
+- (unsigned int)r_mem->start, (u32)xspi->regs, xspi->irq);
+-
+- /* Add any subnodes on the SPI bus */
+- of_register_spi_devices(master, ofdev->node);
+-
+- return rc;
+-
+-free_irq:
+- free_irq(xspi->irq, xspi);
+-unmap_io:
+- iounmap(xspi->regs);
+-put_master:
+- spi_master_put(master);
+- return rc;
+-}
+-
+-static int __devexit xilinx_spi_remove(struct of_device *ofdev)
++void xilinx_spi_set_default_reg_offsets(struct xilinx_spi *xspi)
+ {
+- struct xilinx_spi *xspi;
+- struct spi_master *master;
+-
+- master = platform_get_drvdata(ofdev);
+- xspi = spi_master_get_devdata(master);
+-
+- spi_bitbang_stop(&xspi->bitbang);
+- free_irq(xspi->irq, xspi);
+- iounmap(xspi->regs);
+- dev_set_drvdata(&ofdev->dev, 0);
+- spi_master_put(xspi->bitbang.master);
+-
+- return 0;
+-}
+-
+-/* work with hotplug and coldplug */
+-MODULE_ALIAS("platform:" XILINX_SPI_NAME);
+-
+-static int __exit xilinx_spi_of_remove(struct of_device *op)
+-{
+- return xilinx_spi_remove(op);
++ xspi->cr_offset = XSPI_CR_OFFSET_DEF;
++ xspi->sr_offset = XSPI_SR_OFFSET_DEF;
++ xspi->txd_offset = XSPI_TXD_OFFSET_DEF;
++ xspi->rxd_offset = XSPI_RXD_OFFSET_DEF;
++ xspi->ssr_offset = XSPI_SSR_OFFSET_DEF;
+ }
++EXPORT_SYMBOL(xilinx_spi_set_default_reg_offsets);
+
+-static struct of_device_id xilinx_spi_of_match[] = {
+- { .compatible = "xlnx,xps-spi-2.00.a", },
+- { .compatible = "xlnx,xps-spi-2.00.b", },
+- {}
+-};
+-
+-MODULE_DEVICE_TABLE(of, xilinx_spi_of_match);
+-
+-static struct of_platform_driver xilinx_spi_of_driver = {
+- .owner = THIS_MODULE,
+- .name = "xilinx-xps-spi",
+- .match_table = xilinx_spi_of_match,
+- .probe = xilinx_spi_of_probe,
+- .remove = __exit_p(xilinx_spi_of_remove),
+- .driver = {
+- .name = "xilinx-xps-spi",
+- .owner = THIS_MODULE,
+- },
+-};
+-
+-static int __init xilinx_spi_init(void)
+-{
+- return of_register_platform_driver(&xilinx_spi_of_driver);
+-}
+-module_init(xilinx_spi_init);
+-
+-static void __exit xilinx_spi_exit(void)
+-{
+- of_unregister_platform_driver(&xilinx_spi_of_driver);
+-}
+-module_exit(xilinx_spi_exit);
+ MODULE_AUTHOR("MontaVista Software, Inc. <source@mvista.com>");
+ MODULE_DESCRIPTION("Xilinx SPI driver");
+ MODULE_LICENSE("GPL");
+diff -uNr linux-2.6.29-clean/drivers/spi/xilinx_spi.h linux-2.6.29/drivers/spi/xilinx_spi.h
+--- linux-2.6.29-clean/drivers/spi/xilinx_spi.h 1969-12-31 16:00:00.000000000 -0800
++++ linux-2.6.29/drivers/spi/xilinx_spi.h 2009-04-06 13:51:47.000000000 -0700
+@@ -0,0 +1,52 @@
++/*
++ * xilinx_spi.c
++ *
++ * Xilinx SPI controller driver (master mode only)
++ *
++ * Author: MontaVista Software, Inc.
++ * source@mvista.com
++ *
++ * 2002-2007 (c) MontaVista Software, Inc. This file is licensed under the
++ * terms of the GNU General Public License version 2. This program is licensed
++ * "as is" without any warranty of any kind, whether express or implied.
++ */
++
++#ifndef _XILINX_SPI_H_
++#define _XILINX_SPI_H_ 1
++
++#include <linux/spi/spi.h>
++#include <linux/spi/spi_bitbang.h>
++
++#define XILINX_SPI_NAME "xilinx_spi"
++
++
++struct xilinx_spi {
++ /* bitbang has to be first */
++ struct spi_bitbang bitbang;
++ struct completion done;
++
++ void __iomem *regs; /* virt. address of the control registers */
++
++ u32 irq;
++
++ u32 speed_hz; /* SCK has a fixed frequency of speed_hz Hz */
++
++ u8 *rx_ptr; /* pointer in the Tx buffer */
++ const u8 *tx_ptr; /* pointer in the Rx buffer */
++ int remaining_bytes; /* the number of bytes left to transfer */
++ /* offset to the XSPI regs, these might vary... */
++ u8 cr_offset;
++ u8 sr_offset;
++ u8 txd_offset;
++ u8 rxd_offset;
++ u8 ssr_offset;
++};
++
++void xspi_init_hw(struct xilinx_spi *xspi);
++void xilinx_spi_set_default_reg_offsets(struct xilinx_spi *xspi);
++void xilinx_spi_chipselect(struct spi_device *spi, int is_on);
++int xilinx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t);
++int xilinx_spi_setup(struct spi_device *spi);
++int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t);
++irqreturn_t xilinx_spi_irq(int irq, void *dev_id);
++#endif
+diff -uNr linux-2.6.29-clean/drivers/spi/xilinx_spi_of.c linux-2.6.29/drivers/spi/xilinx_spi_of.c
+--- linux-2.6.29-clean/drivers/spi/xilinx_spi_of.c 1969-12-31 16:00:00.000000000 -0800
++++ linux-2.6.29/drivers/spi/xilinx_spi_of.c 2009-04-06 13:51:47.000000000 -0700
+@@ -0,0 +1,193 @@
++/*
++ * xilinx_spi.c
++ *
++ * Xilinx SPI controller driver (master mode only)
++ *
++ * Author: MontaVista Software, Inc.
++ * source@mvista.com
++ *
++ * 2002-2007 (c) MontaVista Software, Inc. This file is licensed under the
++ * terms of the GNU General Public License version 2. This program is licensed
++ * "as is" without any warranty of any kind, whether express or implied.
++ */
++
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/interrupt.h>
++#include <linux/io.h>
++#include <linux/platform_device.h>
++
++#include <linux/of_platform.h>
++#include <linux/of_device.h>
++#include <linux/of_spi.h>
++
++#include <linux/spi/spi.h>
++#include <linux/spi/spi_bitbang.h>
++
++#include "xilinx_spi.h"
++
++
++static int __init xilinx_spi_of_probe(struct of_device *ofdev,
++ const struct of_device_id *match)
++{
++ struct spi_master *master;
++ struct xilinx_spi *xspi;
++ struct resource r_irq_struct;
++ struct resource r_mem_struct;
++
++ struct resource *r_irq = &r_irq_struct;
++ struct resource *r_mem = &r_mem_struct;
++ int rc = 0;
++ const u32 *prop;
++ int len;
++
++ /* Get resources(memory, IRQ) associated with the device */
++ master = spi_alloc_master(&ofdev->dev, sizeof(struct xilinx_spi));
++
++ if (master == NULL)
++ return -ENOMEM;
++
++ dev_set_drvdata(&ofdev->dev, master);
++
++ rc = of_address_to_resource(ofdev->node, 0, r_mem);
++ if (rc) {
++ dev_warn(&ofdev->dev, "invalid address\n");
++ goto put_master;
++ }
++
++ rc = of_irq_to_resource(ofdev->node, 0, r_irq);
++ if (rc == NO_IRQ) {
++ dev_warn(&ofdev->dev, "no IRQ found\n");
++ goto put_master;
++ }
++
++ xspi = spi_master_get_devdata(master);
++ xspi->bitbang.master = spi_master_get(master);
++ xspi->bitbang.chipselect = xilinx_spi_chipselect;
++ xspi->bitbang.setup_transfer = xilinx_spi_setup_transfer;
++ xspi->bitbang.txrx_bufs = xilinx_spi_txrx_bufs;
++ xspi->bitbang.master->setup = xilinx_spi_setup;
++ init_completion(&xspi->done);
++
++ xspi->irq = r_irq->start;
++
++ if (!request_mem_region(r_mem->start,
++ r_mem->end - r_mem->start + 1, XILINX_SPI_NAME)) {
++ rc = -ENXIO;
++ dev_warn(&ofdev->dev, "memory request failure\n");
++ goto put_master;
++ }
++
++ xspi->regs = ioremap(r_mem->start, r_mem->end - r_mem->start + 1);
++ if (xspi->regs == NULL) {
++ rc = -ENOMEM;
++ dev_warn(&ofdev->dev, "ioremap failure\n");
++ goto put_master;
++ }
++ xspi->irq = r_irq->start;
++
++ /* dynamic bus assignment */
++ master->bus_num = -1;
++
++ /* number of slave select bits is required */
++ prop = of_get_property(ofdev->node, "xlnx,num-ss-bits", &len);
++ if (!prop || len < sizeof(*prop)) {
++ dev_warn(&ofdev->dev, "no 'xlnx,num-ss-bits' property\n");
++ goto put_master;
++ }
++ master->num_chipselect = *prop;
++
++ xilinx_spi_set_default_reg_offsets(xspi);
++
++ /* SPI controller initializations */
++ xspi_init_hw(xspi->regs);
++
++ /* Register for SPI Interrupt */
++ rc = request_irq(xspi->irq, xilinx_spi_irq, 0, XILINX_SPI_NAME, xspi);
++ if (rc != 0) {
++ dev_warn(&ofdev->dev, "irq request failure: %d\n", xspi->irq);
++ goto unmap_io;
++ }
++
++ rc = spi_bitbang_start(&xspi->bitbang);
++ if (rc != 0) {
++ dev_err(&ofdev->dev, "spi_bitbang_start FAILED\n");
++ goto free_irq;
++ }
++
++ dev_info(&ofdev->dev, "at 0x%08X mapped to 0x%08X, irq=%d\n",
++ (unsigned int)r_mem->start, (u32)xspi->regs, xspi->irq);
++
++ /* Add any subnodes on the SPI bus */
++ of_register_spi_devices(master, ofdev->node);
++
++ return rc;
++
++free_irq:
++ free_irq(xspi->irq, xspi);
++unmap_io:
++ iounmap(xspi->regs);
++put_master:
++ spi_master_put(master);
++ return rc;
++}
++
++static int __devexit xilinx_spi_remove(struct of_device *ofdev)
++{
++ struct xilinx_spi *xspi;
++ struct spi_master *master;
++
++ master = platform_get_drvdata(ofdev);
++ xspi = spi_master_get_devdata(master);
++
++ spi_bitbang_stop(&xspi->bitbang);
++ free_irq(xspi->irq, xspi);
++ iounmap(xspi->regs);
++ dev_set_drvdata(&ofdev->dev, 0);
++ spi_master_put(xspi->bitbang.master);
++
++ return 0;
++}
++
++/* work with hotplug and coldplug */
++MODULE_ALIAS("platform:" XILINX_SPI_NAME);
++
++static int __exit xilinx_spi_of_remove(struct of_device *op)
++{
++ return xilinx_spi_remove(op);
++}
++
++static struct of_device_id xilinx_spi_of_match[] = {
++ { .compatible = "xlnx,xps-spi-2.00.a", },
++ { .compatible = "xlnx,xps-spi-2.00.b", },
++ {}
++};
++
++MODULE_DEVICE_TABLE(of, xilinx_spi_of_match);
++
++static struct of_platform_driver xilinx_spi_of_driver = {
++ .owner = THIS_MODULE,
++ .name = "xilinx-xps-spi",
++ .match_table = xilinx_spi_of_match,
++ .probe = xilinx_spi_of_probe,
++ .remove = __exit_p(xilinx_spi_of_remove),
++ .driver = {
++ .name = "xilinx-xps-spi",
++ .owner = THIS_MODULE,
++ },
++};
++
++static int __init xilinx_spi_init(void)
++{
++ return of_register_platform_driver(&xilinx_spi_of_driver);
++}
++module_init(xilinx_spi_init);
++
++static void __exit xilinx_spi_exit(void)
++{
++ of_unregister_platform_driver(&xilinx_spi_of_driver);
++}
++module_exit(xilinx_spi_exit);
++MODULE_AUTHOR("MontaVista Software, Inc. <source@mvista.com>");
++MODULE_DESCRIPTION("Xilinx SPI driver");
++MODULE_LICENSE("GPL");
+diff -uNr linux-2.6.29-clean/drivers/spi/xilinx_spi_pltfm.c linux-2.6.29/drivers/spi/xilinx_spi_pltfm.c
+--- linux-2.6.29-clean/drivers/spi/xilinx_spi_pltfm.c 1969-12-31 16:00:00.000000000 -0800
++++ linux-2.6.29/drivers/spi/xilinx_spi_pltfm.c 2009-04-06 13:51:47.000000000 -0700
+@@ -0,0 +1,184 @@
++/*
++ * xilinx_spi_pltfm.c Support for Xilinx SPI platform devices
++ * Copyright (c) 2009 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++/* Supports:
++ * Xilinx SPI devices as platform devices
++ *
++ * Inspired by xilinx_spi.c, 2002-2007 (c) MontaVista Software, Inc.
++ */
++
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/interrupt.h>
++#include <linux/io.h>
++#include <linux/platform_device.h>
++
++#include <linux/spi/spi.h>
++#include <linux/spi/spi_bitbang.h>
++#include <linux/spi/xilinx_spi.h>
++
++#include "xilinx_spi.h"
++
++static int __init xilinx_spi_probe(struct platform_device *dev)
++{
++ int ret = 0;
++ struct spi_master *master;
++ struct xilinx_spi *xspi;
++ struct xspi_platform_data *pdata;
++ struct resource *r;
++
++ master = spi_alloc_master(&dev->dev, sizeof(struct xilinx_spi));
++
++ if (master == NULL)
++ return -ENOMEM;
++
++
++ platform_set_drvdata(dev, master);
++ pdata = dev->dev.platform_data;
++ if (pdata == NULL) {
++ ret = -ENODEV;
++ goto put_master;
++ }
++
++ r = platform_get_resource(dev, IORESOURCE_MEM, 0);
++ if (r == NULL) {
++ ret = -ENODEV;
++ goto put_master;
++ }
++
++ xspi = spi_master_get_devdata(master);
++ xspi->bitbang.master = spi_master_get(master);
++ xspi->bitbang.chipselect = xilinx_spi_chipselect;
++ xspi->bitbang.setup_transfer = xilinx_spi_setup_transfer;
++ xspi->bitbang.txrx_bufs = xilinx_spi_txrx_bufs;
++ xspi->bitbang.master->setup = xilinx_spi_setup;
++ init_completion(&xspi->done);
++
++ if (!request_mem_region(r->start, resource_size(r), XILINX_SPI_NAME)) {
++ ret = -ENXIO;
++ goto put_master;
++ }
++
++ xspi->regs = ioremap(r->start, resource_size(r));
++ if (xspi->regs == NULL) {
++ ret = -ENOMEM;
++ goto map_failed;
++ }
++
++ ret = platform_get_irq(dev, 0);
++ if (ret < 0) {
++ ret = -ENXIO;
++ goto unmap_io;
++ }
++ xspi->irq = ret;
++
++ master->bus_num = pdata->bus_num;
++ master->num_chipselect = pdata->num_chipselect;
++ xspi->speed_hz = pdata->speed_hz;
++ xilinx_spi_set_default_reg_offsets(xspi);
++ if (pdata->cr_offset)
++ xspi->cr_offset = pdata->cr_offset;
++ if (pdata->sr_offset)
++ xspi->sr_offset = pdata->sr_offset;
++ if (pdata->txd_offset)
++ xspi->txd_offset = pdata->txd_offset;
++ if (pdata->rxd_offset)
++ xspi->rxd_offset = pdata->rxd_offset;
++ if (pdata->ssr_offset)
++ xspi->ssr_offset = pdata->ssr_offset;
++
++ /* SPI controller initializations */
++ xspi_init_hw(xspi);
++
++ /* Register for SPI Interrupt */
++ ret = request_irq(xspi->irq, xilinx_spi_irq, 0, XILINX_SPI_NAME, xspi);
++ if (ret != 0)
++ goto unmap_io;
++
++ ret = spi_bitbang_start(&xspi->bitbang);
++ if (ret != 0) {
++ dev_err(&dev->dev, "spi_bitbang_start FAILED\n");
++ goto free_irq;
++ }
++
++ dev_info(&dev->dev, "at 0x%08X mapped to 0x%08X, irq=%d\n",
++ (u32)r->start, (u32)xspi->regs, xspi->irq);
++ return ret;
++
++free_irq:
++ free_irq(xspi->irq, xspi);
++unmap_io:
++ iounmap(xspi->regs);
++map_failed:
++ release_mem_region(r->start, resource_size(r));
++put_master:
++ spi_master_put(master);
++ return ret;
++}
++
++static int __devexit xilinx_spi_remove(struct platform_device *dev)
++{
++ struct xilinx_spi *xspi;
++ struct spi_master *master;
++ struct resource *r;
++
++ master = platform_get_drvdata(dev);
++ xspi = spi_master_get_devdata(master);
++ r = platform_get_resource(dev, IORESOURCE_MEM, 0);
++
++ spi_bitbang_stop(&xspi->bitbang);
++ free_irq(xspi->irq, xspi);
++ iounmap(xspi->regs);
++
++ if (r)
++ release_mem_region(r->start, resource_size(r));
++
++ platform_set_drvdata(dev, 0);
++ spi_master_put(xspi->bitbang.master);
++
++ return 0;
++}
++
++/* work with hotplug and coldplug */
++MODULE_ALIAS("platform:" XILINX_SPI_NAME);
++
++static struct platform_driver xilinx_spi_driver = {
++ .probe = xilinx_spi_probe,
++ .remove = __devexit_p(xilinx_spi_remove),
++ .driver = {
++ .name = XILINX_SPI_NAME,
++ .owner = THIS_MODULE,
++ },
++};
++
++static int __init xilinx_spi_init(void)
++{
++ return platform_driver_register(&xilinx_spi_driver);
++}
++module_init(xilinx_spi_init);
++
++static void __exit xilinx_spi_exit(void)
++{
++ platform_driver_unregister(&xilinx_spi_driver);
++}
++module_exit(xilinx_spi_exit);
++
++MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>");
++MODULE_DESCRIPTION("Xilinx SPI platform driver");
++MODULE_LICENSE("GPL v2");
++
+diff -uNr linux-2.6.29-clean/include/linux/i2c-ocores.h linux-2.6.29/include/linux/i2c-ocores.h
+--- linux-2.6.29-clean/include/linux/i2c-ocores.h 2009-04-01 09:20:20.000000000 -0700
++++ linux-2.6.29/include/linux/i2c-ocores.h 2009-04-06 13:51:47.000000000 -0700
+@@ -14,6 +14,8 @@
+ struct ocores_i2c_platform_data {
+ u32 regstep; /* distance between registers */
+ u32 clock_khz; /* input clock in kHz */
++ u8 num_devices; /* number of devices in the devices list */
++ struct i2c_board_info const *devices; /* devices connected to the bus */
+ };
+
+ #endif /* _LINUX_I2C_OCORES_H */
+diff -uNr linux-2.6.29-clean/include/linux/mfd/timbdma.h linux-2.6.29/include/linux/mfd/timbdma.h
+--- linux-2.6.29-clean/include/linux/mfd/timbdma.h 1969-12-31 16:00:00.000000000 -0800
++++ linux-2.6.29/include/linux/mfd/timbdma.h 2009-04-06 13:51:47.000000000 -0700
+@@ -0,0 +1,80 @@
++/*
++ * timbdma.h timberdale FPGA DMA driver defines
++ * Copyright (c) 2009 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++/* Supports:
++ * Timberdale FPGA DMA engine
++ */
++
++#ifndef _TIMBDMA_H
++#define _TIMBDMA_H
++
++#include <linux/spinlock.h>
++
++
++#define DMA_IRQ_UART_RX 0x01
++#define DMA_IRQ_UART_TX 0x02
++#define DMA_IRQ_MLB_RX 0x04
++#define DMA_IRQ_MLB_TX 0x08
++#define DMA_IRQ_VIDEO_RX 0x10
++#define DMA_IRQ_VIDEO_DROP 0x20
++#define DMA_IRQS 6
++
++
++typedef int (*timbdma_interruptcb)(u32 flag, void *data);
++
++enum timbdma_ctrlmap {
++ timbdma_ctrlmap_DMACFGBTUART = 0x000000,
++ timbdma_ctrlmap_DMACFGMLBSY = 0x000040,
++ timbdma_ctrlmap_DMACFGVIDEO = 0x000080,
++ timbdma_ctrlmap_TIMBSTATUS = 0x080000,
++ timbdma_ctrlmap_TIMBPEND = 0x080004,
++ timbdma_ctrlmap_TIMBENABLE = 0x080008,
++ timbdma_ctrlmap_VIDEOBUFFER = 0x200000
++};
++
++enum timbdma_dmacfg {
++ timbdma_dmacfg_RXSTARTH = 0x00,
++ timbdma_dmacfg_RXSTARTL = 0x04,
++ timbdma_dmacfg_RXLENGTH = 0x08,
++ timbdma_dmacfg_RXFPGAWP = 0x0C,
++ timbdma_dmacfg_RXSWRP = 0x10,
++ timbdma_dmacfg_RXENABLE = 0x14,
++ timbdma_dmacfg_TXSTARTH = 0x18,
++ timbdma_dmacfg_TXSTARTL = 0x1C,
++ timbdma_dmacfg_TXLENGTH = 0x20,
++ timbdma_dmacfg_TXSWWP = 0x24,
++ timbdma_dmacfg_TXFPGARP = 0x28,
++ timbdma_dmacfg_TXBEFINT = 0x2C,
++ timbdma_dmacfg_BPERROW = 0x30
++};
++
++struct timbdma_dev {
++ void __iomem *membase;
++ timbdma_interruptcb callbacks[DMA_IRQS];
++ void *callback_data[DMA_IRQS];
++ spinlock_t lock; /* mutual exclusion */
++};
++
++void timb_start_dma(u32 flag, unsigned long buf, int len, int bytes_per_row);
++
++void *timb_stop_dma(u32 flags);
++
++void timb_set_dma_interruptcb(u32 flags, timbdma_interruptcb icb, void *data);
++
++#endif /* _TIMBDMA_H */
++
+diff -uNr linux-2.6.29-clean/include/linux/mfd/timbi2s.h linux-2.6.29/include/linux/mfd/timbi2s.h
+--- linux-2.6.29-clean/include/linux/mfd/timbi2s.h 1969-12-31 16:00:00.000000000 -0800
++++ linux-2.6.29/include/linux/mfd/timbi2s.h 2009-04-06 13:51:47.000000000 -0700
+@@ -0,0 +1,66 @@
++/*
++ * timbi2s.h timberdale FPGA I2S driver
++ * Copyright (c) 2009 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++/* Supports:
++ * Timberdale FPGA I2S
++ */
++
++struct timbi2s_bus_control {
++ struct list_head list;
++};
++
++struct timbi2s_bus {
++ void __iomem *membase;
++ u32 irq;
++ struct timbi2s_bus_control *control;
++ struct workqueue_struct *workqueue;
++ struct work_struct work;
++};
++
++struct timbi2s_dev {
++ void __iomem *membase;
++ u32 irq;
++ struct timbi2s_bus *bus;
++ struct workqueue_struct *workqueue;
++ struct work_struct work;
++ u32 ioctrl;
++ u32 devid;
++ u8 timbi2s_rx;
++ u8 timbi2s_tx;
++ struct circ_buf *buffer;
++ /* Register access */
++ spinlock_t lock;
++
++ int in_use;
++ u8 pscale_offset; /* Prescale */
++ u8 icr_offset; /* Clear register */
++ u8 isr_offset; /* Status */
++ u8 ipr_offset; /* Pending register */
++ u8 ier_offset; /* Interrupt Enable register */
++ u8 ctrl_offset;
++ u8 fifo;
++
++ struct list_head item;
++};
++
++static struct timbi2s_dev *timbi2s_get_tx(void);
++static struct timbi2s_dev *timbi2s_get_rx(void);
++static void timbi2s_put(struct timbi2s_dev *tdev);
++
++static int timbi2s_ioctrl(struct timbi2s_dev *i2sdev);
++
+diff -uNr linux-2.6.29-clean/include/linux/serial_core.h linux-2.6.29/include/linux/serial_core.h
+--- linux-2.6.29-clean/include/linux/serial_core.h 2009-04-01 09:20:20.000000000 -0700
++++ linux-2.6.29/include/linux/serial_core.h 2009-04-06 13:51:47.000000000 -0700
+@@ -164,6 +164,9 @@
+ /* NWPSERIAL */
+ #define PORT_NWPSERIAL 85
+
++/* Timberdale UART */
++#define PORT_TIMBUART 86
++
+ #ifdef __KERNEL__
+
+ #include <linux/compiler.h>
+diff -uNr linux-2.6.29-clean/include/linux/spi/xilinx_spi.h linux-2.6.29/include/linux/spi/xilinx_spi.h
+--- linux-2.6.29-clean/include/linux/spi/xilinx_spi.h 1969-12-31 16:00:00.000000000 -0800
++++ linux-2.6.29/include/linux/spi/xilinx_spi.h 2009-04-06 13:51:47.000000000 -0700
+@@ -0,0 +1,17 @@
++#ifndef __LINUX_SPI_XILINX_SPI_H
++#define __LINUX_SPI_XILINX_SPI_H
++
++/* SPI Controller IP */
++struct xspi_platform_data {
++ s16 bus_num;
++ u16 num_chipselect;
++ u32 speed_hz;
++ u8 cr_offset;
++ u8 sr_offset;
++ u8 txd_offset;
++ u8 rxd_offset;
++ u8 ssr_offset;
++};
++
++#endif /* __LINUX_SPI_XILINX_SPI_H */
++
+diff -uNr linux-2.6.29-clean/include/media/adv7180.h linux-2.6.29/include/media/adv7180.h
+--- linux-2.6.29-clean/include/media/adv7180.h 1969-12-31 16:00:00.000000000 -0800
++++ linux-2.6.29/include/media/adv7180.h 2009-04-06 13:51:47.000000000 -0700
+@@ -0,0 +1,127 @@
++/*
++ * adv7180.h Analog Devices ADV7180 video decoder driver defines
++ * Copyright (c) 2009 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++#define DRIVER_NAME "adv7180"
++
++#define I2C_ADV7180 0x42
++#define ADV7180_NR_REG 0xfc
++
++#define ADV7180_IN_CTRL 0x00 /* Input CR */
++#define ADV7180_OUT_CTRL 0x03 /* Output CR */
++#define ADV7180_EXT_OUT_CTRL 0x04 /* Extended Output CR */
++
++#define ADV7180_ADI_CTRL 0x0e /* ADI CR */
++# define ADI_ENABLE 0x20 /* Enable access to sub-regs */
++
++#define ADV7180_SR_1 0x10 /* Status Register 1 */
++#define ADV7180_SR_2 0x12
++#define ADV7180_SR_3 0x13
++
++/* Interrupt and VDP sub-registers */
++#define ADV7180_ISR_1 0x42 /* Interrupt Status Register 1 */
++#define ADV7180_ICR_1 0x43 /* Interrupt Clear Register 1 */
++
++#define ADV7180_ISR_2 0x46
++#define ADV7180_ICR_2 0x47
++
++#define ADV7180_ISR_3 0x4a
++#define ADV7180_ICR_3 0x4b
++
++#define ADV7180_ISR_4 0x4e
++#define ADV7180_ICR_4 0x4f
++/* */
++
++#define ADV7180_SR 0x10
++#define ADV7180_STATUS_NTSM 0x00 /* NTSM M/J */
++#define ADV7180_STATUS_NTSC 0x10 /* NTSC 4.43 */
++#define ADV7180_STATUS_PAL_M 0x20 /* PAL M */
++#define ADV7180_STATUS_PAL_60 0x30 /* PAL 60 */
++#define ADV7180_STATUS_PAL 0x40 /* PAL B/G/H/I/D */
++#define ADV7180_STATUS_SECAM 0x50 /* SECAM */
++#define ADV7180_STATUS_PAL_N 0x60 /* PAL Combination N */
++#define ADV7180_STATUS_SECAM_525 0x70 /* SECAM 525 */
++
++enum input_mode {
++ CVBS, /* Composite */
++ SVIDEO, /* S-video */
++ YPbPr, /* Component */
++};
++
++struct adv7180 {
++ unsigned char reg[ADV7180_NR_REG];
++ int norm;
++ enum input_mode input;
++ int enable;
++ struct i2c_client *client;
++};
++
++static const unsigned char reset_icr[] = {
++ ADV7180_ICR_1, 0x00,
++ ADV7180_ICR_2, 0x00,
++ ADV7180_ICR_3, 0x00,
++ ADV7180_ICR_4, 0x00,
++};
++
++/* ADV7180 LQFP-64. ADV7180.pdf, page 104 */
++static const unsigned char init_cvbs_64[] = {
++ 0x00, 0x01, /* INSEL = CVBS in on Ain2 */
++ 0x04, 0x57, /* Enable SFL */
++ 0x17, 0x41, /* Select SH1 */
++
++ 0x31, 0x02, /* Clear NEWAV_MODE, SAV/EAV to
++ * suit ADV video encoders
++ */
++ 0x3d, 0xa2, /* MWE enable manual window,
++ * color kill threshold to 2
++ */
++ 0x3e, 0x6a, /* BLM optimization */
++ 0x3f, 0xa0, /* BGB optimization */
++ 0x0e, 0x80, /* Hidden space */
++ 0x55, 0x81, /* ADC configuration */
++ 0x0e, 0x00, /* User space */
++};
++
++static const unsigned char init_svideo_64[] = {
++ 0x00, 0x08, /* Insel = Y/C, Y = AIN3, C = AIN6 */
++ 0x04, 0x57, /* Enable SFL */
++ 0x31, 0x02, /* Clear NEWAV_MODE, SAV/EAV to
++ * suit ADV video encoders
++ */
++ 0x3d, 0xa2, /* MWE enable manual window,
++ * color kill threshold to 2
++ */
++ 0x3e, 0x6a, /* BLM optimization */
++ 0x3f, 0xa0, /* BGB optimization */
++ 0x58, 0x04, /* Mandatory write. This must be
++ * performed for correct operation.
++ */
++ 0x0e, 0x80, /* Hidden space */
++ 0x55, 0x81, /* ADC configuration */
++ 0x0e, 0x00, /* User space */
++};
++
++static const unsigned char init_ypbpr_64[] = {
++ 0x00, 0x09, /* INSEL = YPrPb, Y = AIN1, Pr = AIN4, Pb = AIN5 */
++ 0x31, 0x02, /* Clear NEWAV_MODE, SAV/EAV to suit ADV video encoders */
++ 0x3d, 0xa2, /* MWE enable manual window */
++ 0x3e, 0x6a, /* BLM optimization */
++ 0x3f, 0xa0, /* ADI recommended */
++ 0x0e, 0x80, /* Hidden space */
++ 0x55, 0x81, /* ADC configuration */
++ 0x0e, 0x00, /* User space */
++};
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-touchkit.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-touchkit.patch
new file mode 100644
index 000000000..92e71fa31
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.29-touchkit.patch
@@ -0,0 +1,130 @@
+diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
+index f8f86de..5d4cea2 100644
+--- a/drivers/input/mouse/psmouse-base.c
++++ b/drivers/input/mouse/psmouse-base.c
+@@ -676,6 +676,9 @@ static int psmouse_extensions(struct psmouse *psmouse,
+
+ if (touchkit_ps2_detect(psmouse, set_properties) == 0)
+ return PSMOUSE_TOUCHKIT_PS2;
++
++ if (elftouch_ps2_detect(psmouse, set_properties) == 0)
++ return PSMOUSE_ELFTOUCH_PS2;
+ }
+
+ /*
+@@ -786,6 +789,12 @@ static const struct psmouse_protocol psmouse_protocols[] = {
+ .alias = "trackpoint",
+ .detect = trackpoint_detect,
+ },
++ {
++ .type = PSMOUSE_ELFTOUCH_PS2,
++ .name = "elftouchPS2",
++ .alias = "elftouch",
++ .detect = elftouch_ps2_detect,
++ },
+ #endif
+ #ifdef CONFIG_MOUSE_PS2_TOUCHKIT
+ {
+diff --git a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h
+index 54ed267..8d1ba79 100644
+--- a/drivers/input/mouse/psmouse.h
++++ b/drivers/input/mouse/psmouse.h
+@@ -89,6 +89,7 @@ enum psmouse_type {
+ PSMOUSE_TRACKPOINT,
+ PSMOUSE_TOUCHKIT_PS2,
+ PSMOUSE_CORTRON,
++ PSMOUSE_ELFTOUCH_PS2,
+ PSMOUSE_HGPK,
+ PSMOUSE_ELANTECH,
+ PSMOUSE_AUTO /* This one should always be last */
+diff --git a/drivers/input/mouse/touchkit_ps2.c b/drivers/input/mouse/touchkit_ps2.c
+index 3fadb2a..e9c27f1 100644
+--- a/drivers/input/mouse/touchkit_ps2.c
++++ b/drivers/input/mouse/touchkit_ps2.c
+@@ -51,6 +51,11 @@
+ #define TOUCHKIT_GET_X(packet) (((packet)[1] << 7) | (packet)[2])
+ #define TOUCHKIT_GET_Y(packet) (((packet)[3] << 7) | (packet)[4])
+
++#define ELFTOUCH_MAX_XC 0x0fff
++#define ELFTOUCH_MAX_YC 0x0fff
++#define ELFTOUCH_GET_X(packet) (((packet)[3] << 7) | (packet)[4])
++#define ELFTOUCH_GET_Y(packet) (((packet)[1] << 7) | (packet)[2])
++
+ static psmouse_ret_t touchkit_ps2_process_byte(struct psmouse *psmouse)
+ {
+ unsigned char *packet = psmouse->packet;
+@@ -59,9 +64,15 @@ static psmouse_ret_t touchkit_ps2_process_byte(struct psmouse *psmouse)
+ if (psmouse->pktcnt != 5)
+ return PSMOUSE_GOOD_DATA;
+
+- input_report_abs(dev, ABS_X, TOUCHKIT_GET_X(packet));
+- input_report_abs(dev, ABS_Y, TOUCHKIT_GET_Y(packet));
++ if(psmouse->type==PSMOUSE_ELFTOUCH_PS2) {
++ input_report_abs(dev, ABS_X, ELFTOUCH_GET_X(packet));
++ input_report_abs(dev, ABS_Y, ELFTOUCH_GET_Y(packet));
++ } else {
++ input_report_abs(dev, ABS_X, TOUCHKIT_GET_X(packet));
++ input_report_abs(dev, ABS_Y, TOUCHKIT_GET_Y(packet));
++ }
+ input_report_key(dev, BTN_TOUCH, TOUCHKIT_GET_TOUCHED(packet));
++
+ input_sync(dev);
+
+ return PSMOUSE_FULL_PACKET;
+@@ -98,3 +109,33 @@ int touchkit_ps2_detect(struct psmouse *psmouse, int set_properties)
+
+ return 0;
+ }
++
++int elftouch_ps2_detect(struct psmouse *psmouse, int set_properties)
++{
++ struct input_dev *dev = psmouse->dev;
++ unsigned char param[16];
++ int command, res;
++
++ param[0]=0x0f4;
++ command = TOUCHKIT_SEND_PARMS(1, 0, TOUCHKIT_CMD);
++ res=ps2_command(&psmouse->ps2dev, param, command);
++ if(res) { return -ENODEV; }
++
++ param[0]=0x0b0;
++ command = TOUCHKIT_SEND_PARMS(1, 1, TOUCHKIT_CMD);
++ res=ps2_command(&psmouse->ps2dev, param, command);
++ if(res) { return -ENODEV; }
++
++ if (set_properties) {
++ dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
++ set_bit(BTN_TOUCH, dev->keybit);
++ input_set_abs_params(dev, ABS_X, 0, ELFTOUCH_MAX_XC, 0, 0);
++ input_set_abs_params(dev, ABS_Y, 0, ELFTOUCH_MAX_YC, 0, 0);
++
++ psmouse->vendor = "ElfTouch";
++ psmouse->name = "Touchscreen";
++ psmouse->protocol_handler = touchkit_ps2_process_byte;
++ psmouse->pktsize = 5;
++ }
++ return 0;
++}
+diff --git a/drivers/input/mouse/touchkit_ps2.h b/drivers/input/mouse/touchkit_ps2.h
+index 8a0dd35..f32ef4c 100644
+--- a/drivers/input/mouse/touchkit_ps2.h
++++ b/drivers/input/mouse/touchkit_ps2.h
+@@ -14,12 +14,18 @@
+
+ #ifdef CONFIG_MOUSE_PS2_TOUCHKIT
+ int touchkit_ps2_detect(struct psmouse *psmouse, int set_properties);
++int elftouch_ps2_detect(struct psmouse *psmouse, int set_properties);
+ #else
+ static inline int touchkit_ps2_detect(struct psmouse *psmouse,
+ int set_properties)
+ {
+ return -ENOSYS;
+ }
++static inline int elftouch_ps2_detect(struct psmouse *psmouse,
++ int set_properties)
++{
++ return -ENOSYS;
++}
+ #endif /* CONFIG_MOUSE_PS2_TOUCHKIT */
+
+ #endif
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.30-fix-async.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.30-fix-async.patch
new file mode 100644
index 000000000..a489339cb
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.30-fix-async.patch
@@ -0,0 +1,69 @@
+
+Gitweb: http://git.kernel.org/linus/d6de2c80e9d758d2e36c21699117db6178c0f517
+Commit: d6de2c80e9d758d2e36c21699117db6178c0f517
+Parent: 7933a3cfba017330ebb25f9820cb25ec9cdd67cc
+Author: Linus Torvalds <torvalds@linux-foundation.org>
+AuthorDate: Fri Apr 10 12:17:41 2009 -0700
+Committer: Linus Torvalds <torvalds@linux-foundation.org>
+CommitDate: Sat Apr 11 12:44:49 2009 -0700
+
+ async: Fix module loading async-work regression
+
+ Several drivers use asynchronous work to do device discovery, and we
+ synchronize with them in the compiled-in case before we actually try to
+ mount root filesystems etc.
+
+ However, when compiled as modules, that synchronization is missing - the
+ module loading completes, but the driver hasn't actually finished
+ probing for devices, and that means that any user mode that expects to
+ use the devices after the 'insmod' is now potentially broken.
+
+ We already saw one case of a similar issue in the ACPI battery code,
+ where the kernel itself expected the module to be all done, and unmapped
+ the init memory - but the async device discovery was still running.
+ That got hacked around by just removing the "__init" (see commit
+ 5d38258ec026921a7b266f4047ebeaa75db358e5 "ACPI battery: fix async boot
+ oops"), but the real fix is to just make the module loading wait for all
+ async work to be completed.
+
+ It will slow down module loading, but since common devices should be
+ built in anyway, and since the bug is really annoying and hard to handle
+ from user space (and caused several S3 resume regressions), the simple
+ fix to wait is the right one.
+
+ This fixes at least
+
+ http://bugzilla.kernel.org/show_bug.cgi?id=13063
+
+ but probably a few other bugzilla entries too (12936, for example), and
+ is confirmed to fix Rafael's storage driver breakage after resume bug
+ report (no bugzilla entry).
+
+ We should also be able to now revert that ACPI battery fix.
+
+ Reported-and-tested-by: Rafael J. Wysocki <rjw@suse.com>
+ Tested-by: Heinz Diehl <htd@fancy-poultry.org>
+ Acked-by: Arjan van de Ven <arjan@linux.intel.com>
+ Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+---
+ kernel/module.c | 3 +++
+ 1 files changed, 3 insertions(+), 0 deletions(-)
+
+diff --git a/kernel/module.c b/kernel/module.c
+index 05f014e..e797812 100644
+--- a/kernel/module.c
++++ b/kernel/module.c
+@@ -2388,6 +2388,9 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
+ blocking_notifier_call_chain(&module_notify_list,
+ MODULE_STATE_LIVE, mod);
+
++ /* We need to finish all async code before the module init sequence is done */
++ async_synchronize_full();
++
+ mutex_lock(&module_mutex);
+ /* Drop initial reference. */
+ module_put(mod);
+--
+To unsubscribe from this list: send the line "unsubscribe git-commits-head" in
+the body of a message to majordomo@vger.kernel.org
+More majordomo info at http://vger.kernel.org/majordomo-info.html
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.30-fix-suspend.patch b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.30-fix-suspend.patch
new file mode 100644
index 000000000..3932a51ae
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.29.1/linux-2.6.30-fix-suspend.patch
@@ -0,0 +1,139 @@
+From: Rafael J. Wysocki <rjw@suse.com>
+Organization: SUSE
+To: Arjan van de Ven <arjan@linux.intel.com>
+CC: Linus Torvalds <torvalds@linux-foundation.org>
+
+
+OK, updated patch follows, with a changelog.
+
+I've added this check to user.c too, because that code can be called
+independently of the one in disk.c . Also, if resume is user space-driven,
+it's a good idea to wait for all of the device probes to complete before
+continuing.
+
+Thanks,
+Rafael
+
+---
+From: Rafael J. Wysocki <rjw@sisk.pl>
+Subject: PM/Hibernate: Wait for SCSI devices scan to complete during resume
+
+There is a race between resume from hibernation and the asynchronous
+scanning of SCSI devices and to prevent it from happening we need to
+call scsi_complete_async_scans() during resume from hibernation.
+
+In addition, if the resume from hibernation is userland-driven, it's
+better to wait for all device probes in the kernel to complete before
+attempting to open the resume device.
+
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+---
+ drivers/scsi/scsi_priv.h | 3 ---
+ drivers/scsi/scsi_wait_scan.c | 2 +-
+ include/scsi/scsi_scan.h | 11 +++++++++++
+ kernel/power/disk.c | 8 ++++++++
+ kernel/power/user.c | 9 +++++++++
+ 5 files changed, 29 insertions(+), 4 deletions(-)
+
+Index: linux-2.6/include/scsi/scsi_scan.h
+===================================================================
+--- /dev/null
++++ linux-2.6/include/scsi/scsi_scan.h
+@@ -0,0 +1,11 @@
++#ifndef _SCSI_SCSI_SCAN_H
++#define _SCSI_SCSI_SCAN_H
++
++#ifdef CONFIG_SCSI
++/* drivers/scsi/scsi_scan.c */
++extern int scsi_complete_async_scans(void);
++#else
++static inline int scsi_complete_async_scans(void) { return 0; }
++#endif
++
++#endif /* _SCSI_SCSI_SCAN_H */
+Index: linux-2.6/drivers/scsi/scsi_priv.h
+===================================================================
+--- linux-2.6.orig/drivers/scsi/scsi_priv.h
++++ linux-2.6/drivers/scsi/scsi_priv.h
+@@ -38,9 +38,6 @@ static inline void scsi_log_completion(s
+ { };
+ #endif
+
+-/* scsi_scan.c */
+-int scsi_complete_async_scans(void);
+-
+ /* scsi_devinfo.c */
+ extern int scsi_get_device_flags(struct scsi_device *sdev,
+ const unsigned char *vendor,
+Index: linux-2.6/drivers/scsi/scsi_wait_scan.c
+===================================================================
+--- linux-2.6.orig/drivers/scsi/scsi_wait_scan.c
++++ linux-2.6/drivers/scsi/scsi_wait_scan.c
+@@ -11,7 +11,7 @@
+ */
+
+ #include <linux/module.h>
+-#include "scsi_priv.h"
++#include <scsi/scsi_scan.h>
+
+ static int __init wait_scan_init(void)
+ {
+Index: linux-2.6/kernel/power/disk.c
+===================================================================
+--- linux-2.6.orig/kernel/power/disk.c
++++ linux-2.6/kernel/power/disk.c
+@@ -22,5 +22,6 @@
+ #include <linux/console.h>
+ #include <linux/cpu.h>
+ #include <linux/freezer.h>
++#include <scsi/scsi_scan.h>
+
+ #include "power.h"
+@@ -645,6 +646,13 @@ static int software_resume(void)
+ return 0;
+
+ /*
++ * We can't depend on SCSI devices being available after loading one of
++ * their modules if scsi_complete_async_scans() is not called and the
++ * resume device usually is a SCSI one.
++ */
++ scsi_complete_async_scans();
++
++ /*
+ * name_to_dev_t() below takes a sysfs buffer mutex when sysfs
+ * is configured into the kernel. Since the regular hibernate
+ * trigger path is via sysfs which takes a buffer mutex before
+Index: linux-2.6/kernel/power/user.c
+===================================================================
+--- linux-2.6.orig/kernel/power/user.c
++++ linux-2.6/kernel/power/user.c
+@@ -24,6 +24,7 @@
+ #include <linux/cpu.h>
+ #include <linux/freezer.h>
+ #include <linux/smp_lock.h>
++#include <scsi/scsi_scan.h>
+
+ #include <asm/uaccess.h>
+
+@@ -92,6 +93,7 @@ static int snapshot_open(struct inode *i
+ filp->private_data = data;
+ memset(&data->handle, 0, sizeof(struct snapshot_handle));
+ if ((filp->f_flags & O_ACCMODE) == O_RDONLY) {
++ /* Hibernating. The image device should be accessible. */
+ data->swap = swsusp_resume_device ?
+ swap_type_of(swsusp_resume_device, 0, NULL) : -1;
+ data->mode = O_RDONLY;
+@@ -99,6 +101,13 @@ static int snapshot_open(struct inode *i
+ if (error)
+ pm_notifier_call_chain(PM_POST_HIBERNATION);
+ } else {
++ /*
++ * Resuming. We may need to wait for the image device to
++ * appear.
++ */
++ wait_for_device_probe();
++ scsi_complete_async_scans();
++
+ data->swap = -1;
+ data->mode = O_WRONLY;
+ error = pm_notifier_call_chain(PM_RESTORE_PREPARE);
diff --git a/meta-moblin/packages/linux/linux-moblin_2.6.27.bb b/meta-moblin/packages/linux/linux-moblin_2.6.27.bb
deleted file mode 100644
index 82f7b435e..000000000
--- a/meta-moblin/packages/linux/linux-moblin_2.6.27.bb
+++ /dev/null
@@ -1,59 +0,0 @@
-require linux-moblin.inc
-
-PR = "r8"
-PE = "1"
-
-DEFAULT_PREFERENCE = "-1"
-DEFAULT_PREFERENCE_netbook = "1"
-DEFAULT_PREFERENCE_menlow = "1"
-
-SRC_URI = "${KERNELORG_MIRROR}pub/linux/kernel/v2.6/linux-2.6.27.tar.bz2 \
- file://0001-drm-remove-define-for-non-linux-systems.patch;patch=1 \
- file://0002-i915-remove-settable-use_mi_batchbuffer_start.patch;patch=1 \
- file://0003-i915-Ignore-X-server-provided-mmio-address.patch;patch=1 \
- file://0004-i915-Use-more-consistent-names-for-regs-and-store.patch;patch=1 \
- file://0005-i915-Add-support-for-MSI-and-interrupt-mitigation.patch;patch=1 \
- file://0006-i915-Track-progress-inside-of-batchbuffers-for-dete.patch;patch=1 \
- file://0007-i915-Initialize-hardware-status-page-at-device-load.patch;patch=1 \
- file://0008-Add-Intel-ACPI-IGD-OpRegion-support.patch;patch=1 \
- file://0009-drm-fix-sysfs-error-path.patch;patch=1 \
- file://0010-i915-separate-suspend-resume-functions.patch;patch=1 \
- file://0011-drm-vblank-rework.patch;patch=1 \
- file://0012-Export-shmem_file_setup-for-DRM-GEM.patch;patch=1 \
- file://0013-Export-kmap_atomic_pfn-for-DRM-GEM.patch;patch=1 \
- file://0014-drm-Add-GEM-graphics-execution-manager-to-i915.patch;patch=1 \
- file://0015-i915-Add-chip-set-ID-param.patch;patch=1 \
- file://0016-i915-Use-struct_mutex-to-protect-ring-in-GEM-mode.patch;patch=1 \
- file://0017-i915-Make-use-of-sarea_priv-conditional.patch;patch=1 \
- file://0018-i915-gem-install-and-uninstall-irq-handler-in-enter.patch;patch=1 \
- file://0019-DRM-Return-EBADF-on-bad-object-in-flink-and-retur.patch;patch=1 \
- file://0020-drm-Avoid-oops-in-GEM-execbuffers-with-bad-argument.patch;patch=1 \
- file://0021-drm-G33-class-hardware-has-a-newer-965-style-MCH-n.patch;patch=1 \
- file://0022-drm-use-ioremap_wc-in-i915-instead-of-ioremap.patch;patch=1 \
- file://0023-drm-clean-up-many-sparse-warnings-in-i915.patch;patch=1 \
- file://0024-fastboot-create-a-asynchronous-initlevel.patch;patch=1 \
- file://0025-fastboot-turn-the-USB-hostcontroller-initcalls-into.patch;patch=1 \
- file://0026-fastboot-convert-a-few-non-critical-ACPI-drivers-to.patch;patch=1 \
- file://0027-fastboot-hold-the-BKL-over-the-async-init-call-sequ.patch;patch=1 \
- file://0028-fastboot-sync-the-async-execution-before-late_initc.patch;patch=1 \
- file://0029-fastboot-make-fastboot-a-config-option.patch;patch=1 \
- file://0030-fastboot-retry-mounting-the-root-fs-if-we-can-t-fin.patch;patch=1 \
- file://0031-fastboot-make-the-raid-autodetect-code-wait-for-all.patch;patch=1 \
- file://0032-fastboot-remove-wait-for-all-devices-before-mounti.patch;patch=1 \
- file://0033-fastboot-make-the-RAID-autostart-code-print-a-messa.patch;patch=1 \
- file://0034-fastboot-fix-typo-in-init-Kconfig-text.patch;patch=1 \
- file://0035-fastboot-remove-duplicate-unpack_to_rootfs.patch;patch=1 \
- file://0036-warning-fix-init-do_mounts_md-c.patch;patch=1 \
- file://0037-init-initramfs.c-unused-function-when-compiling-wit.patch;patch=1 \
- file://0038-fastboot-fix-blackfin-breakage-due-to-vmlinux.lds-c.patch;patch=1 \
- file://0039-Add-a-script-to-visualize-the-kernel-boot-process.patch;patch=1 \
- file://0040-fastboot-fix-issues-and-improve-output-of-bootgraph.patch;patch=1 \
- file://0041-r8169-8101e.patch;patch=1 \
- file://0042-intelfb-945gme.patch;patch=1 \
- file://0043-superreadahead-patch.patch;patch=1 \
- file://defconfig-menlow \
- file://defconfig-netbook"
-
-SRC_URI_append_menlow = " file://psb-driver.patch;patch=1"
-
-S = "${WORKDIR}/linux-2.6.27"
diff --git a/meta-moblin/packages/linux/linux-moblin_2.6.28+2.6.29-rc2.bb b/meta-moblin/packages/linux/linux-moblin_2.6.28+2.6.29-rc2.bb
deleted file mode 100644
index bb807b365..000000000
--- a/meta-moblin/packages/linux/linux-moblin_2.6.28+2.6.29-rc2.bb
+++ /dev/null
@@ -1,24 +0,0 @@
-require linux-moblin.inc
-
-PR = "r2"
-PE = "1"
-
-DEFAULT_PREFERENCE = "-1"
-DEFAULT_PREFERENCE_netbook = "1"
-DEFAULT_PREFERENCE_menlow = "1"
-
-SRC_URI = "${KERNELORG_MIRROR}pub/linux/kernel/v2.6/linux-2.6.28.tar.bz2 \
- ${KERNELORG_MIRROR}pub/linux/kernel/v2.6/testing/patch-2.6.29-rc2.bz2;patch=1 \
- file://0001-fastboot-retry-mounting-the-root-fs-if-we-can-t-fin.patch;patch=1 \
- file://0002-fastboot-remove-wait-for-all-devices-before-mounti.patch;patch=1 \
- file://0003-fastboot-remove-duplicate-unpack_to_rootfs.patch;patch=1 \
- file://0004-superreadahead-patch.patch;patch=1 \
- file://0005-fastboot-async-enable-default.patch;patch=1 \
- file://0006-Revert-drm-i915-GEM-on-PAE-has-problems-disable.patch;patch=1 \
- file://0007-acer-error-msg.patch;patch=1 \
- file://defconfig-menlow \
- file://defconfig-netbook"
-
-SRC_URI_append_menlow = " file://i915_split.patch;patch=1 file://psb-driver.patch;patch=1"
-
-S = "${WORKDIR}/linux-2.6.28"
diff --git a/meta-moblin/packages/linux/linux-moblin_2.6.29.1.bb b/meta-moblin/packages/linux/linux-moblin_2.6.29.1.bb
new file mode 100644
index 000000000..d72c96f9c
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin_2.6.29.1.bb
@@ -0,0 +1,46 @@
+require linux-moblin.inc
+
+PR = "r10"
+
+DEFAULT_PREFERENCE = "-1"
+DEFAULT_PREFERENCE_netbook = "1"
+DEFAULT_PREFERENCE_menlow = "1"
+
+SRC_URI = "${KERNELORG_MIRROR}pub/linux/kernel/v2.6/linux-2.6.29.1.tar.bz2 \
+ file://linux-2.6-build-nonintconfig.patch;patch=1 \
+ file://linux-2.6.29-retry-root-mount.patch;patch=1 \
+ file://linux-2.6.29-dont-wait-for-mouse.patch;patch=1 \
+ file://linux-2.6.29-fast-initrd.patch;patch=1 \
+ file://linux-2.6.29-sreadahead.patch;patch=1 \
+ file://linux-2.6.29-enable-async-by-default.patch;patch=1 \
+ file://linux-2.6.29-drm-revert.patch;patch=1 \
+ file://linux-2.6.19-modesetting-by-default.patch;patch=1 \
+ file://linux-2.6.29-fast-kms.patch;patch=1 \
+ file://linux-2.6.29-even-faster-kms.patch;patch=1 \
+ file://linux-2.6.29-silence-acer-message.patch;patch=1 \
+ file://linux-2.6.29-input-introduce-a-tougher-i8042.reset.patch;patch=1 \
+ file://linux-2.6.29-msiwind.patch;patch=1 \
+ file://linux-2.6.29-flip-ide-net.patch;patch=1 \
+ file://linux-2.6.29-kms-after-sata.patch;patch=1 \
+ file://linux-2.6.29-jbd-longer-commit-interval.patch;patch=1 \
+ file://linux-2.6.29-touchkit.patch;patch=1 \
+ file://linux-2.6.30-fix-async.patch;patch=1 \
+ file://linux-2.6.30-fix-suspend.patch;patch=1 \
+ file://0001-drm-Split-out-the-mm-declarations-in-a-separate-hea.patch;patch=1 \
+ file://0002-drm-Add-a-tracker-for-global-objects.patch;patch=1 \
+ file://0003-drm-Export-hash-table-functionality.patch;patch=1 \
+ file://0007-drm-Add-unlocked-IOCTL-functionality-from-the-drm-r.patch;patch=1 \
+ file://linux-2.6.29-psb-driver.patch;patch=1 \
+ file://linux-2.6.29-psb-S0i1_and_S0i3_OSPM_support.patch;patch=1 \
+ file://linux-2.6.29-e100-add-support-for-82552-10-100-adapter.patch;patch=1 \
+ file://linux-2.6.29-pnv-agp.patch;patch=1 \
+ file://linux-2.6.29-pnv-drm.patch;patch=1 \
+ file://linux-2.6.29-pnv-fix-gtt-size.patch;patch=1 \
+ file://linux-2.6.29-pnv-fix-i2c.patch;patch=1 \
+ file://linux-2.6.29-drm-i915-Fix-LVDS-dither-setting.patch;patch=1 \
+ file://linux-2.6.29-timberdale.patch;patch=1 \
+# file://i915_split.patch;patch=1 \
+ file://defconfig-menlow \
+ file://defconfig-netbook"
+
+S = "${WORKDIR}/linux-2.6.29.1"
diff --git a/meta-moblin/packages/mojito/mojito_git.bb b/meta-moblin/packages/mojito/mojito_git.bb
index d8ac5c3fb..808020c80 100644
--- a/meta-moblin/packages/mojito/mojito_git.bb
+++ b/meta-moblin/packages/mojito/mojito_git.bb
@@ -1,7 +1,7 @@
SRC_URI = "git://git.moblin.org/${PN}.git;protocol=git"
PV = "0.0+git${SRCREV}"
-PR = "r1"
+PR = "r2"
DEPENDS = "libsoup-2.4 gconf-dbus librest glib-2.0 twitter-glib sqlite3"
@@ -9,7 +9,10 @@ S = "${WORKDIR}/git"
inherit autotools_stage
-FILES_${PN}-dbg += "${libdir}/mojito/sources/.debug/*"
+FILES_${PN} += "${datadir}/dbus-1/services"
+FILES_${PN}-dbg += "${libdir}/mojito/sources/.debug/* ${libdir}/mojito/services/.debug/"
+
+PARALLEL_MAKE = ""
pkg_postinst_${PN} () {
#!/bin/sh -e
diff --git a/meta-openmoko/packages/pulseaudio/libatomics-ops/fedora/libatomic_ops-1.2-ppclwzfix.patch b/meta-moblin/packages/pulseaudio/libatomics-ops/fedora/libatomic_ops-1.2-ppclwzfix.patch
index 51161dbe0..51161dbe0 100644
--- a/meta-openmoko/packages/pulseaudio/libatomics-ops/fedora/libatomic_ops-1.2-ppclwzfix.patch
+++ b/meta-moblin/packages/pulseaudio/libatomics-ops/fedora/libatomic_ops-1.2-ppclwzfix.patch
diff --git a/meta-openmoko/packages/pulseaudio/libatomics-ops_1.2.bb b/meta-moblin/packages/pulseaudio/libatomics-ops_1.2.bb
index 0563eb638..0563eb638 100644
--- a/meta-openmoko/packages/pulseaudio/libatomics-ops_1.2.bb
+++ b/meta-moblin/packages/pulseaudio/libatomics-ops_1.2.bb
diff --git a/meta-moblin/packages/pulseaudio/libcanberra/autoconf_version.patch b/meta-moblin/packages/pulseaudio/libcanberra/autoconf_version.patch
new file mode 100644
index 000000000..73a2ea6e4
--- /dev/null
+++ b/meta-moblin/packages/pulseaudio/libcanberra/autoconf_version.patch
@@ -0,0 +1,38 @@
+Allow a slightly older autoconf and disable the inbuild libltdl
+
+RP - 16/4/09
+
+Index: libcanberra-0.10/Makefile.am
+===================================================================
+--- libcanberra-0.10.orig/Makefile.am 2009-04-16 14:38:53.000000000 +0100
++++ libcanberra-0.10/Makefile.am 2009-04-16 14:39:04.000000000 +0100
+@@ -19,7 +19,7 @@
+ dist_doc_DATA = README
+
+ EXTRA_DIST = bootstrap.sh autogen.sh LGPL libcanberra.schemas
+-SUBDIRS = src gtkdoc libltdl doc
++SUBDIRS = src gtkdoc doc
+
+ MAINTAINERCLEANFILES = README
+ noinst_DATA = README
+Index: libcanberra-0.10/configure.ac
+===================================================================
+--- libcanberra-0.10.orig/configure.ac 2009-04-16 14:32:33.000000000 +0100
++++ libcanberra-0.10/configure.ac 2009-04-16 14:37:50.000000000 +0100
+@@ -19,7 +19,7 @@
+ # License along with libcanberra. If not, see
+ # <http://www.gnu.org/licenses/>.
+
+-AC_PREREQ(2.62)
++AC_PREREQ(2.61)
+
+ AC_INIT([libcanberra], 0.10, [mzyvopnaoreen (at) 0pointer (dot) de])
+ AC_CONFIG_SRCDIR([src/common.c])
+@@ -70,7 +70,6 @@
+ AC_PROG_LIBTOOL
+ AC_SUBST(LTDLINCL)
+ AC_SUBST(LIBLTDL)
+-AC_CONFIG_SUBDIRS(libltdl)
+
+ #### Determine build environment ####
+
diff --git a/meta-moblin/packages/pulseaudio/libcanberra_0.10.bb b/meta-moblin/packages/pulseaudio/libcanberra_0.10.bb
new file mode 100644
index 000000000..ad44b0f1a
--- /dev/null
+++ b/meta-moblin/packages/pulseaudio/libcanberra_0.10.bb
@@ -0,0 +1,28 @@
+DESCRIPTION = "Libcanberra is an implementation of the XDG Sound Theme and Name Specifications, for generating event sounds on free desktops."
+LICENSE = "LGPL"
+DEPENDS = "gtk+ pulseaudio alsa-lib"
+PR = "r1"
+
+inherit gconf autotools
+
+SRC_URI = "http://0pointer.de/lennart/projects/libcanberra/libcanberra-${PV}.tar.gz \
+ file://autoconf_version.patch;patch=1"
+
+EXTRA_OECONF = " --disable-oss "
+
+do_configure_prepend () {
+ rm -f ${S}/libltdl/configure*
+}
+
+FILES_${PN} += "${libdir}/gtk-2.0/modules/ ${datadir}/gnome"
+FILES_${PN}-dbg += "${libdir}/gtk-2.0/modules/.debug"
+
+AUTOTOOLS_STAGE_PKGCONFIG = "1"
+
+do_stage() {
+ autotools_stage_all
+}
+
+
+
+
diff --git a/meta-openmoko/packages/pulseaudio/pulseaudio-0.9.10/2113.diff b/meta-moblin/packages/pulseaudio/pulseaudio-0.9.12/2113.diff
index 666ed34ad..666ed34ad 100644
--- a/meta-openmoko/packages/pulseaudio/pulseaudio-0.9.10/2113.diff
+++ b/meta-moblin/packages/pulseaudio/pulseaudio-0.9.12/2113.diff
diff --git a/meta-openmoko/packages/pulseaudio/pulseaudio-0.9.10/2114.diff b/meta-moblin/packages/pulseaudio/pulseaudio-0.9.12/2114.diff
index cbd82eeb8..cbd82eeb8 100644
--- a/meta-openmoko/packages/pulseaudio/pulseaudio-0.9.10/2114.diff
+++ b/meta-moblin/packages/pulseaudio/pulseaudio-0.9.12/2114.diff
diff --git a/meta-moblin/packages/pulseaudio/pulseaudio-0.9.12/autoconf_version.patch b/meta-moblin/packages/pulseaudio/pulseaudio-0.9.12/autoconf_version.patch
new file mode 100644
index 000000000..a3bc3c43c
--- /dev/null
+++ b/meta-moblin/packages/pulseaudio/pulseaudio-0.9.12/autoconf_version.patch
@@ -0,0 +1,13 @@
+Index: pulseaudio-0.9.12/configure.ac
+===================================================================
+--- pulseaudio-0.9.12.orig/configure.ac 2009-04-16 14:52:38.000000000 +0100
++++ pulseaudio-0.9.12/configure.ac 2009-04-16 14:52:46.000000000 +0100
+@@ -20,7 +20,7 @@
+ # along with PulseAudio; if not, write to the Free Software Foundation,
+ # Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+
+-AC_PREREQ(2.62)
++AC_PREREQ(2.61)
+
+ m4_define(PA_MAJOR, [0])
+ m4_define(PA_MINOR, [9])
diff --git a/meta-moblin/packages/pulseaudio/pulseaudio-0.9.12/buildfix.patch b/meta-moblin/packages/pulseaudio/pulseaudio-0.9.12/buildfix.patch
new file mode 100644
index 000000000..ca01e0d80
--- /dev/null
+++ b/meta-moblin/packages/pulseaudio/pulseaudio-0.9.12/buildfix.patch
@@ -0,0 +1,13 @@
+Index: pulseaudio-0.9.11/src/pulsecore/atomic.h
+===================================================================
+--- pulseaudio-0.9.11.orig/src/pulsecore/atomic.h
++++ pulseaudio-0.9.11/src/pulsecore/atomic.h
+@@ -40,6 +40,8 @@
+ #error "Please include config.h before including this file!"
+ #endif
+
++#include "macro.h"
++
+ #ifdef HAVE_ATOMIC_BUILTINS
+
+ /* __sync based implementation */
diff --git a/meta-openmoko/packages/pulseaudio/files/gcc4-compile-fix.patch b/meta-moblin/packages/pulseaudio/pulseaudio-0.9.12/gcc4-compile-fix.patch
index 34ad026e4..34ad026e4 100644
--- a/meta-openmoko/packages/pulseaudio/files/gcc4-compile-fix.patch
+++ b/meta-moblin/packages/pulseaudio/pulseaudio-0.9.12/gcc4-compile-fix.patch
diff --git a/meta-openmoko/packages/pulseaudio/files/volatiles.04_pulse b/meta-moblin/packages/pulseaudio/pulseaudio-0.9.12/volatiles.04_pulse
index 5b1998032..5b1998032 100644
--- a/meta-openmoko/packages/pulseaudio/files/volatiles.04_pulse
+++ b/meta-moblin/packages/pulseaudio/pulseaudio-0.9.12/volatiles.04_pulse
diff --git a/meta-openmoko/packages/pulseaudio/pulse.inc b/meta-moblin/packages/pulseaudio/pulseaudio.inc
index 4b13980cc..4b13980cc 100644
--- a/meta-openmoko/packages/pulseaudio/pulse.inc
+++ b/meta-moblin/packages/pulseaudio/pulseaudio.inc
diff --git a/meta-moblin/packages/pulseaudio/pulseaudio_0.9.12.bb b/meta-moblin/packages/pulseaudio/pulseaudio_0.9.12.bb
new file mode 100644
index 000000000..6ef666995
--- /dev/null
+++ b/meta-moblin/packages/pulseaudio/pulseaudio_0.9.12.bb
@@ -0,0 +1,17 @@
+require pulseaudio.inc
+
+DEPENDS += "dbus gdbm speex"
+PR = "r0"
+
+SRC_URI += "\
+ file://buildfix.patch;patch=1 \
+ file://autoconf_version.patch;patch=1 \
+ file://2113.diff;patch=1;pnum=0 \
+ file://2114.diff;patch=1;pnum=0 \
+"
+
+do_compile_prepend() {
+ cd ${S}
+ mkdir -p ${S}/libltdl
+ cp ${STAGING_LIBDIR}/libltdl* ${S}/libltdl
+}
diff --git a/meta-moblin/packages/tasks/task-moblin-x11-netbook.bb b/meta-moblin/packages/tasks/task-moblin-x11-netbook.bb
index f57ddbace..7d090c56d 100644
--- a/meta-moblin/packages/tasks/task-moblin-x11-netbook.bb
+++ b/meta-moblin/packages/tasks/task-moblin-x11-netbook.bb
@@ -3,7 +3,7 @@
#
DESCRIPTION = "Netbook GUI Tasks for Moblin"
-PR = "r5"
+PR = "r6"
PACKAGES = "\
task-moblin-x11-netbook \
@@ -18,7 +18,7 @@ ALLOW_EMPTY = "1"
NETWORK_MANAGER ?= "networkmanager-applet"
EXTRA_MOBLIN_PACKAGES ?= ""
RDEPENDS_task-moblin-x11-netbook = "\
- metacity-clutter \
+ mutter \
matchbox-session-netbook \
matchbox-config-gtk \
xcursor-transparent-theme \
diff --git a/meta-openmoko/packages/pulseaudio/files/disable-using-glibc-tls.patch b/meta-openmoko/packages/pulseaudio/files/disable-using-glibc-tls.patch
deleted file mode 100644
index 31d147be7..000000000
--- a/meta-openmoko/packages/pulseaudio/files/disable-using-glibc-tls.patch
+++ /dev/null
@@ -1,25 +0,0 @@
-Index: pulseaudio-0.9.9/configure.ac
-===================================================================
---- pulseaudio-0.9.9.orig/configure.ac 2008-01-24 01:06:06.000000000 +0000
-+++ pulseaudio-0.9.9/configure.ac 2008-02-05 17:45:59.000000000 +0000
-@@ -139,8 +139,8 @@
- ret=$?
- rm -f conftest.o conftest
- if test $ret -eq 0 ; then
-- AC_DEFINE([HAVE_TLS_BUILTIN], 1, [Have __thread().])
-- AC_MSG_RESULT([yes])
-+dnl AC_DEFINE([HAVE_TLS_BUILTIN], 1, [Have __thread().])
-+ AC_MSG_RESULT([no])
- else
- AC_MSG_RESULT([no])
- fi
-@@ -325,8 +325,7 @@
- rm -f conftest.o conftest
-
- if test $ret -eq 0 ; then
-- AC_DEFINE([HAVE_PTHREAD_PRIO_INHERIT], 1, [Have PTHREAD_PRIO_INHERIT.])
-- AC_MSG_RESULT([yes])
-+ AC_MSG_RESULT([no])
- else
- AC_MSG_RESULT([no])
- fi
diff --git a/meta-openmoko/packages/pulseaudio/files/fix-dbus-without-hal.patch b/meta-openmoko/packages/pulseaudio/files/fix-dbus-without-hal.patch
deleted file mode 100644
index f8ac5674a..000000000
--- a/meta-openmoko/packages/pulseaudio/files/fix-dbus-without-hal.patch
+++ /dev/null
@@ -1,15 +0,0 @@
-Index: pulseaudio-0.9.8/src/Makefile.am
-===================================================================
---- pulseaudio-0.9.8.orig/src/Makefile.am
-+++ pulseaudio-0.9.8/src/Makefile.am
-@@ -1424,8 +1424,8 @@
- # HAL
- libdbus_util_la_SOURCES = modules/dbus-util.c modules/dbus-util.h
- libdbus_util_la_LDFLAGS = -avoid-version
--libdbus_util_la_LIBADD = $(AM_LIBADD) $(HAL_LIBS) libpulsecore.la
--libdbus_util_la_CFLAGS = $(AM_CFLAGS) $(HAL_CFLAGS)
-+libdbus_util_la_LIBADD = $(AM_LIBADD) $(HAL_LIBS) $(DBUS_LIBS) libpulsecore.la
-+libdbus_util_la_CFLAGS = $(AM_CFLAGS) $(HAL_CFLAGS) $(DBUS_CFLAGS)
-
- module_hal_detect_la_SOURCES = modules/module-hal-detect.c
- module_hal_detect_la_LDFLAGS = -module -avoid-version
diff --git a/meta-openmoko/packages/pulseaudio/files/fix-shm.patch b/meta-openmoko/packages/pulseaudio/files/fix-shm.patch
deleted file mode 100644
index 3bf61d0d3..000000000
--- a/meta-openmoko/packages/pulseaudio/files/fix-shm.patch
+++ /dev/null
@@ -1,20 +0,0 @@
-# fixes building against uclibc
-# fixed upstream, see http://www.pulseaudio.org/ticket/200
-Index: pulseaudio-0.9.9/src/pulsecore/shm.c
-===================================================================
---- pulseaudio-0.9.9/src/pulsecore/shm.c (revision 1971)
-+++ pulseaudio-0.9.9/src/pulsecore/shm.c (revision 2110)
-@@ -319,4 +319,5 @@
- int pa_shm_cleanup(void) {
-
-+#ifdef HAVE_SHM_OPEN
- #ifdef SHM_PATH
- DIR *d;
-@@ -376,5 +377,6 @@
-
- closedir(d);
--#endif
-+#endif /* SHM_PATH */
-+#endif /* HAVE_SHM_OPEN */
-
- return 0;
diff --git a/meta-openmoko/packages/pulseaudio/pulseaudio-0.9.10/libpulsedsp-references-libpulsecore.patch b/meta-openmoko/packages/pulseaudio/pulseaudio-0.9.10/libpulsedsp-references-libpulsecore.patch
deleted file mode 100644
index 4234af5f5..000000000
--- a/meta-openmoko/packages/pulseaudio/pulseaudio-0.9.10/libpulsedsp-references-libpulsecore.patch
+++ /dev/null
@@ -1,24 +0,0 @@
-upstream: http://pulseaudio.org/ticket/287
-
-Index: pulseaudio-0.9.10/src/Makefile.am
-===================================================================
---- pulseaudio-0.9.10.orig/src/Makefile.am 2008-03-30 03:43:05.000000000 +0200
-+++ pulseaudio-0.9.10/src/Makefile.am 2008-05-03 11:55:46.000000000 +0200
-@@ -605,7 +605,7 @@
-
- libpulsedsp_la_SOURCES = utils/padsp.c
- libpulsedsp_la_CFLAGS = $(AM_CFLAGS)
--libpulsedsp_la_LIBADD = $(AM_LIBADD) libpulse.la
-+libpulsedsp_la_LIBADD = $(AM_LIBADD) libpulse.la libpulsecore.la
- libpulsedsp_la_LDFLAGS = -avoid-version
-
- ###################################
-@@ -888,7 +888,7 @@
-
- libpstream_util_la_SOURCES = pulsecore/pstream-util.c pulsecore/pstream-util.h
- libpstream_util_la_LDFLAGS = -avoid-version
--libpstream_util_la_LIBADD = $(AM_LIBADD) libpacket.la libpstream.la libtagstruct.la
-+libpstream_util_la_LIBADD = $(AM_LIBADD) libpacket.la libpstream.la libtagstruct.la libpulsecore.la
-
- libpdispatch_la_SOURCES = pulsecore/pdispatch.c pulsecore/pdispatch.h
- libpdispatch_la_LDFLAGS = -avoid-version
diff --git a/meta-openmoko/packages/pulseaudio/pulseaudio-0.9.10/libtool2.patch b/meta-openmoko/packages/pulseaudio/pulseaudio-0.9.10/libtool2.patch
deleted file mode 100644
index 9e9e5f55d..000000000
--- a/meta-openmoko/packages/pulseaudio/pulseaudio-0.9.10/libtool2.patch
+++ /dev/null
@@ -1,48 +0,0 @@
-Tell where is libltdl from libtool - was needed to get it built with libtool 2.2.4
-
----
- configure.ac | 26 +++++++++++++++++++-------
- 1 file changed, 19 insertions(+), 7 deletions(-)
-
---- pulseaudio-0.9.10.orig/configure.ac
-+++ pulseaudio-0.9.10/configure.ac
-@@ -242,20 +242,32 @@ if test $ret -eq 0 ; then
- else
- AC_MSG_RESULT([no])
- fi
-
- #### libtool stuff ####
--
--AC_LTDL_ENABLE_INSTALL
--AC_LIBLTDL_INSTALLABLE
--AC_LIBTOOL_DLOPEN
--AC_LIBTOOL_WIN32_DLL
-+LT_PREREQ([2.2])
-+LT_INIT([dlopen win32-dll])
- AC_PROG_LIBTOOL
--AC_SUBST(LTDLINCL)
--AC_SUBST(LIBLTDL)
-+#AC_SUBST(LTDLINCL)
-+#AC_SUBST(LIBLTDL)
- AC_CONFIG_SUBDIRS(libltdl)
-
-+AC_ARG_WITH(libltdl-libs,
-+ [ --with-libltdl-libs=DIR path to libltdl libs], libltdl_libs_prefix=$withval)
-+if test x$libltdl_libs_prefix != x; then
-+ LIBLTDL="-L${libltdl_libs_prefix}/libltdl.so -lltdl"
-+fi
-+AC_SUBST(LIBLTDL)
-+
-+AC_ARG_WITH(libltdl-includes,
-+ [ --with-libltdl-includes=DIR path to libltdl includes], libltdl_prefix=$withval)
-+if test x$libltdl_prefix != x; then
-+ LTDLINCL="-I${libltdl_prefix} ${libltdl_libs_prefix}/libltdl.so"
-+fi
-+AC_SUBST(LTDLINCL)
-+
-+
- old_LIBS=$LIBS
- LIBS="$LIBS $LIBLTDL"
- AC_CHECK_FUNCS([lt_dlmutex_register])
- LIBS=$old_LIBS
- AC_CHECK_TYPES([struct lt_user_dlloader, lt_dladvise], , , [#include <ltdl.h>])
diff --git a/meta-openmoko/packages/pulseaudio/pulseaudio-0.9.10/pa-drop-caps-returns-void.patch b/meta-openmoko/packages/pulseaudio/pulseaudio-0.9.10/pa-drop-caps-returns-void.patch
deleted file mode 100644
index 74ea6ac06..000000000
--- a/meta-openmoko/packages/pulseaudio/pulseaudio-0.9.10/pa-drop-caps-returns-void.patch
+++ /dev/null
@@ -1,17 +0,0 @@
-upstream: http://pulseaudio.org/changeset/2213
-
-Index: pulseaudio-0.9.10/src/daemon/caps.c
-===================================================================
---- pulseaudio-0.9.10.orig/src/daemon/caps.c 2008-05-03 12:10:26.000000000 +0200
-+++ pulseaudio-0.9.10/src/daemon/caps.c 2008-05-03 12:10:30.000000000 +0200
-@@ -131,9 +131,8 @@
- return 0;
- }
-
--int pa_drop_caps(void) {
-+void pa_drop_caps(void) {
- pa_drop_root();
-- return 0;
- }
-
- #endif
diff --git a/meta-openmoko/packages/pulseaudio/pulseaudio_0.9.10.bb b/meta-openmoko/packages/pulseaudio/pulseaudio_0.9.10.bb
deleted file mode 100644
index 671fb66a9..000000000
--- a/meta-openmoko/packages/pulseaudio/pulseaudio_0.9.10.bb
+++ /dev/null
@@ -1,21 +0,0 @@
-require pulse.inc
-
-DEPENDS += "dbus"
-PR = "r2"
-
-# this is not correct (see below)
-SRC_URI += "\
- file://disable-using-glibc-tls.patch;patch=1 \
- file://libpulsedsp-references-libpulsecore.patch;patch=1 \
- file://pa-drop-caps-returns-void.patch;patch=1 \
- file://2113.diff;patch=1;pnum=0 \
- file://2114.diff;patch=1;pnum=0 \
- file://libtool2.patch;patch=1 \
-"
-
-EXTRA_OECONF += "--with-libltdl-includes=${STAGING_INCDIR}/libltdl/ --with-libltdl-libs=${STAGING_LIBDIR}"
-
-# problems w/ pulseaudio 0.9.10 atm:
-# 1.) needs libltdl >= 1.5.24 (yes, any older version will NOT work at runtime)
-# 2.) doesn't build w/ glibc TLS support (hence patched out)
-# 3.) fails with hierarchical pthread stuff w/ gst-pulse (hence patched out)
diff --git a/meta/classes/base.bbclass b/meta/classes/base.bbclass
index 1f280a0dc..e801fd12a 100644
--- a/meta/classes/base.bbclass
+++ b/meta/classes/base.bbclass
@@ -681,9 +681,20 @@ def oe_unpack_file(file, data, url = None):
if os.path.samefile(file, dest):
return True
+ # Change to subdir before executing command
+ save_cwd = os.getcwd();
+ parm = bb.decodeurl(url)[5]
+ if 'subdir' in parm:
+ newdir = ("%s/%s" % (os.getcwd(), parm['subdir']))
+ bb.mkdirhier(newdir)
+ os.chdir(newdir)
+
cmd = "PATH=\"%s\" %s" % (bb.data.getVar('PATH', data, 1), cmd)
bb.note("Unpacking %s to %s/" % (file, os.getcwd()))
ret = os.system(cmd)
+
+ os.chdir(save_cwd)
+
return ret == 0
addtask unpack after do_fetch
diff --git a/meta/classes/package.bbclass b/meta/classes/package.bbclass
index fa53b1358..df870142f 100644
--- a/meta/classes/package.bbclass
+++ b/meta/classes/package.bbclass
@@ -23,7 +23,7 @@ def legitimize_package_name(s):
# Remaining package name validity fixes
return s.lower().replace('_', '-').replace('@', '+').replace(',', '+').replace('/', '-')
-def do_split_packages(d, root, file_regex, output_pattern, description, postinst=None, recursive=False, hook=None, extra_depends=None, aux_files_pattern=None, postrm=None, allow_dirs=False, prepend=False, match_path=False, aux_files_pattern_verbatim=None):
+def do_split_packages(d, root, file_regex, output_pattern, description, postinst=None, recursive=False, hook=None, extra_depends=None, aux_files_pattern=None, postrm=None, allow_dirs=False, prepend=False, match_path=False, aux_files_pattern_verbatim=None, allow_links=False):
"""
Used in .bb files to split up dynamically generated subpackages of a
given package, usually plugins or modules.
@@ -72,7 +72,7 @@ def do_split_packages(d, root, file_regex, output_pattern, description, postinst
continue
f = os.path.join(dvar + root, o)
mode = os.lstat(f).st_mode
- if not (stat.S_ISREG(mode) or (allow_dirs and stat.S_ISDIR(mode))):
+ if not (stat.S_ISREG(mode) or (allow_links and stat.S_ISLNK(mode)) or (allow_dirs and stat.S_ISDIR(mode))):
continue
on = legitimize_package_name(m.group(1))
pkg = output_pattern % on
diff --git a/meta/conf/bitbake.conf b/meta/conf/bitbake.conf
index 692d9b162..7f0da1b96 100644
--- a/meta/conf/bitbake.conf
+++ b/meta/conf/bitbake.conf
@@ -295,7 +295,7 @@ IMAGE_ROOTFS = "${TMPDIR}/rootfs"
IMAGE_BASENAME = "${PN}"
IMAGE_NAME = "${IMAGE_BASENAME}-${MACHINE}-${DATETIME}"
IMAGE_LINK_NAME = "${IMAGE_BASENAME}-${MACHINE}"
-IMAGE_EXTRA_SPACE = 10240
+IMAGE_EXTRA_SPACE = 40240
IMAGE_CMD = ""
IMAGE_CMD_jffs2 = "mkfs.jffs2 --root=${IMAGE_ROOTFS} --faketime --output=${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.jffs2 ${EXTRA_IMAGECMD}"
@@ -424,7 +424,7 @@ EXTRA_OEMAKE_prepend_task-compile = "${PARALLEL_MAKE} "
# Optimization flags.
##################################################################
-FULL_OPTIMIZATION = "-fexpensive-optimizations -fomit-frame-pointer -frename-registers -O2"
+FULL_OPTIMIZATION = "-fexpensive-optimizations -fomit-frame-pointer -frename-registers -O2 -ggdb -feliminate-unused-debug-types"
DEBUG_OPTIMIZATION = "-O -fno-omit-frame-pointer -g"
SELECTED_OPTIMIZATION = "${@bb.data.getVar(['FULL_OPTIMIZATION', 'DEBUG_OPTIMIZATION'][bb.data.getVar('DEBUG_BUILD', d, 1) == '1'], d, 1)}"
BUILD_OPTIMIZATION = "-O2"
diff --git a/meta/conf/checksums.ini b/meta/conf/checksums.ini
index daf64cfd0..f45e5560d 100644
--- a/meta/conf/checksums.ini
+++ b/meta/conf/checksums.ini
@@ -702,6 +702,10 @@ sha256=140828e8aa7d690d8f2a9a54faeac38bcbf40ea18c1f44a186af51bd6964cd13
md5=1ef201f29283179c8e5ab618529cac78
sha256=744d8f3a8206fbc45a5558d92163d5ef7e5e0cc0700283bb6a617fb1201629f9
+[http://archive.apache.org/dist/tomcat/tomcat-4/v4.1.37/src/apache-tomcat-4.1.37-src.tar.gz]
+md5=fe50177a25a084ad1abf4a201d08c2a6
+sha256=65e4cc9d3ea4a7f30044bfd6c660c772adb75ac152d775342deb6049a0d19d74
+
[ftp://ftp.buici.com/pub/apex/apex-1.4.11.tar.gz]
md5=7259a49d93b1853b1b3435753893ab7c
sha256=bce6638d95fa8d32b109176a7cef6aa71e40c130068fac71b0df369d1a36c82c
@@ -946,6 +950,10 @@ sha256=e25caa0e9639ea54dd7c4f21e8146ac9859a61fa126f397edf874b5fdc147430
md5=4510391e6b3edaa4cffb3ced87c9560c
sha256=b26a3499672b0eddcbd87c8e61dfb60ae74d9ee0cc181a5d924fb73db62a25ec
+[ftp://ftp.gnu.org/gnu/automake/automake-1.10.2.tar.bz2]
+md5=1498208ab1c8393dcbd9afb7d06df6d5
+sha256=c38ac9fe53d9492b0a0a02a8e691cd2c96d7f4cc13aedeedc6d08613b77e640f
+
[ftp://ftp.gnu.org/gnu/automake/automake-1.10.tar.bz2]
md5=0e2e0f757f9e1e89b66033905860fded
sha256=2efebda5dd64fd52599a19564041d63dcaba68fafacc0080ffa6e8f0df8da697
@@ -1310,6 +1318,10 @@ sha256=ac6a5c3f4aa88f39e8f04fe43f00972f0235674ac3edb994c70ac802a1ed0219
md5=5a6228512bcce7d9fabe8fc2d66269bf
sha256=5cd1a5aeace83a9305f7221e9ec95127b5a26870c619fd00ae48f1962d59f48b
+[ftp://ftp.debian.org/debian/pool/main/b/blktool/blktool_4.orig.tar.gz]
+md5=62edc09c9908107e69391c87f4f3fd40
+sha256=b1e6d5912546d2a4b704ec65c2b9664aa3b4663e7d800e06803330335a2cb764
+
[ftp://ftp.pl.debian.org/pub/debian/pool/main/b/blktool/blktool_4.orig.tar.gz]
md5=62edc09c9908107e69391c87f4f3fd40
sha256=b1e6d5912546d2a4b704ec65c2b9664aa3b4663e7d800e06803330335a2cb764
@@ -1594,6 +1606,14 @@ sha256=f7bf5368309d76e5daf3a89d4d1bea688dac7780742e7a0ae1af19be9316fe22
md5=badb4563a02d4188b478df31fa1b657d
sha256=5c4594f71eab62e24d12c67ae6a6aead306c537cf71e946c4e9fbebbe9786243
+[http://www.complang.tuwien.ac.at/cacaojvm/download/cacao-0.98/cacao-0.98.tar.bz2]
+md5=8b8907c8b925761c9410bcadb9705346
+sha256=cb9363add825cedf77764fc49a223aaf43f0a9f485b711ba8c92f16b13fff188
+
+[http://www.complang.tuwien.ac.at/cacaojvm/download/cacao-0.99.3/cacao-0.99.3.tar.bz2]
+md5=db93ab31c6d1b7f1e213771bb81bde58
+sha256=1ea5bd257f755ffcae2c7a1935c37147c7392478922410e0870361eea08b6c27
+
[http://cairographics.org/releases/cairo-1.2.2.tar.gz]
md5=2460487b1257a5583c889b1b9d2033ec
sha256=9d53b8dd0bf76dd0da7cdbe65a6c6934be49bd3942dc5f7ef7bbcc7529d9ee02
@@ -1806,6 +1826,18 @@ sha256=6a068f70f908ee4de0ba4d7e1d5127b12ad548da371494c668966d1ff98e05a2
md5=cf5aaaf07fe43567fa605b33165355d7
sha256=4f2824c42062dea269e489c1acb2769c015bf78cdaa929ee75441d412f10a03b
+[ftp://ftp.gnu.org/gnu/classpath/classpath-0.93.tar.gz]
+md5=ffa9e9cac31c5acbf0ea9eff9efa923d
+sha256=df2d093612abd23fe67e9409d89bb2a8e79b1664fe2b2da40e1c8ed693e32945
+
+[ftp://ftp.gnu.org/gnu/classpath/classpath-0.96.1.tar.gz]
+md5=a2ffb40f13fc76c2dda8f8311b30add9
+sha256=ee464d20ef9df2196e238302d5d06954cb96a11e73c4c44c6ef376859de2a078
+
+[ftp://ftp.gnu.org/gnu/classpath/classpath-0.97.2.tar.gz]
+md5=6a35347901ace03c31cc49751b338f31
+sha256=001fee5ad3ddd043783d59593153510f09064b0d9b5aea82f535266f62f02db4
+
[http://downloads.sourceforge.net/sylpheed-claws/claws-mail-2.7.2.tar.bz2]
md5=079f167fba6e17ae2c688a0dae858b0f
sha256=a47a079f583581c2d049195f12a26524390e272c820b2c7233a37582e887a5b5
@@ -2182,6 +2214,10 @@ sha256=4be82f4ef8f4657f997a44b3daddba3f95dbb6518487509767d803dd72bf529e
md5=c552b9bc4b69e4c602644abc21b7661e
sha256=6f1fca18af9214e896c5c0a11f94fe05a0345c641ed57da886a93a4e3b33147e
+[http://dbus.freedesktop.org/releases/dbus/dbus-1.2.4.tar.gz]
+md5=2e643910a09f44b000a0d76038637999
+sha256=ab3c24d8028a5792e62e8e3b40a9733b10b21327dc4c578e34b092a1793afbb8
+
[http://freedesktop.org/software/dbus/releases/dbus-glib-0.71.tar.gz]
md5=4e1e7348b26ee8b6485452113f4221cc
sha256=b58a489fdd35a70d241f512bc08f3b6d9d8e05110f4a1d5341cff6a0b7d854b7
@@ -2418,6 +2454,10 @@ sha256=cc2c86cbd0ec020a4486ac9d195e6a848a0d11a280c3194e095719f60c9751a9
md5=ab066a97c226066485ad20e5ad5ce424
sha256=54f3e9ac2bd9f622b28ab0d5149e9bc13d62d8826be085c32abc929bc30dda6a
+[http://www.thekelleys.org.uk/dnsmasq/dnsmasq-2.46.tar.gz]
+md5=79ec740d1a10ee75f13efa4ff36d0250
+sha256=9888d96d7d91e518c96a034dc863f8f7d961acd9334a1acde849c6022e540212
+
[ftp://sources.redhat.com/pub/docbook-tools/new-trials/SOURCES/docbook-utils-0.6.13.tar.gz]
md5=af785716b207a7a562551851298a3b1a
sha256=f74e9485f1e1f7484be432f4979415afbd9d8c20d0bdf6eab42eb2931b9b5d97
@@ -2506,6 +2546,22 @@ sha256=c4e482687d0cff240d02a70fcf423cc14296b6a7869cd8dd42d5404d098e0bb7
md5=1c1c5177aea9a23b45b9b3f5b3241819
sha256=635d6d07e5a4d615e599c051166af965baa754f9ff7435e746de1a5d2a5c0d56
+[http://downloads.sourceforge.net/e2fsprogs/e2fsprogs-libs-1.33.tar.gz]
+md5=c7545bb05e0c2fc59174f0dc455c6519
+sha256=a65d064bee3e97c164d07a490692db0e73da8da10019ad9a89866706c93ae703
+
+[http://downloads.sourceforge.net/e2fsprogs/e2fsprogs-libs-1.34.tar.gz]
+md5=b51824f1c34be88e8581d96fc418d224
+sha256=2c505a14157fe591bd540b9a0dca11f5d842e3b6c34a23dbbbd9eafcc9fa33fa
+
+[http://downloads.sourceforge.net/e2fsprogs/e2fsprogs-libs-1.35.tar.gz]
+md5=d31301cc2756dc9b0254b6aac03c376e
+sha256=236f1c173fe378b1306db797dc31f1884208ceac3ab81ad52f69e01f7bf61277
+
+[http://downloads.sourceforge.net/e2fsprogs/e2fsprogs-libs-1.39.tar.gz]
+md5=32a2f67223feb2d70233eb3f41e73311
+sha256=fe3c72d861f2a3791f31ea33043d25e29e9a4e3d6a3833b151237a62c5c703f9
+
[http://www.pobox.com/~sheff/sw/e2tools/e2tools-0.0.16.tar.gz]
md5=1829b2b261e0e0d07566066769b5b28b
sha256=4e3c8e17786ccc03fc9fb4145724edf332bb50e1b3c91b6f33e0e3a54861949b
@@ -2530,6 +2586,10 @@ sha256=6b5a71790120977a96d5a468ed69987107c5079f14b0a4081f460b3b14fbf952
md5=c3c60c83f6df30021e11da50a699dec9
sha256=2382f315fda4241a0043bac1dfc669f006d8e93e87fa382b263b1672972f4077
+[http://mirrors.ibiblio.org/pub/mirrors/eclipse/eclipse/downloads/drops/R-3.4-200806172000/ecjsrc-3.4.zip]
+md5=f7b84d912e61dfdd77b7173065845250
+sha256=a6ce5f0682462e85e5e47229e5b16555f74c60188fa0bb17536beb1138c537b4
+
[http://enlightenment.freedesktop.org/files/ecore-0.9.9.037.tar.gz]
md5=ec6d00a39ca6648f12f58f8d4f2208cc
sha256=2c31372ccfc9b411edc73785450c5212dc4a2fe286de9f49f38a02dd24895aea
@@ -2822,6 +2882,10 @@ sha256=52b31fa70151b97ef871d44ad9214e90279f7cbd95144c6b40da49079ed0f1ce
md5=3654bbbf01deeb0d804df194e1b1a2b3
sha256=52b31fa70151b97ef871d44ad9214e90279f7cbd95144c6b40da49079ed0f1ce
+[ftp://ftp.debian.org/debian/pool/main/f/fakeroot/fakeroot_1.9.4.tar.gz]
+md5=c324eb1e157edad17d98b1b2c8b99e25
+sha256=8c55b1640c726bd421b68a1119f20b002eb29d95ae58d480a1d5d7d8afba1e81
+
[ftp://ftp.pl.debian.org/pub/debian/pool/main/f/fakeroot/fakeroot_1.9.4.tar.gz]
md5=c324eb1e157edad17d98b1b2c8b99e25
sha256=8c55b1640c726bd421b68a1119f20b002eb29d95ae58d480a1d5d7d8afba1e81
@@ -2838,6 +2902,10 @@ sha256=1e0aa136693a3e9f4b43ebd71e7bd934cea31817a4a6cba2edc7aac353b8a93f
md5=2aaf871471a9ec037763c5dc7c193c57
sha256=6b9785167934948a582839f9723e37214cab1607a9764c35f10d555f8e662575
+[http://download.savannah.nongnu.org/releases/fastjar/fastjar-0.95.tar.gz]
+md5=92a70f9e56223b653bce0f58f90cf950
+sha256=ca03811490c965abc9f466e75418ae48bb71f14d8e8ed4c43e8109207b85ce54
+
[http://ftp3.ie.freebsd.org/pub/gentoo/distfiles/fbgetty-0.1.698.tar.gz]
md5=1705bc0f8f1e03fe50d324ba84ac4e56
sha256=332cbffa7c489b39a7d13d12d581c27dfc57ba098041431a6845b44785cf2d35
@@ -3498,6 +3566,10 @@ sha256=63dc1c03a107ec73871151bb765da7d5dfc1d699c0d0d6a3d244cf5ccb030913
md5=b594ff4ea4fbef4ba9220887de713dfe
sha256=3444179840638cb8664e8e53604900c4521d29d57785a5091202ee4937d8d0fd
+[ftp://ftp.gnu.org/gnu/gcc/gcc-3.4.6/gcc-3.4.6.tar.bz2]
+md5=4a21ac777d4b5617283ce488b808da7b
+sha256=7791a601878b765669022b8b3409fba33cc72f9e39340fec8af6d0e6f72dec39
+
[ftp://ftp.gnu.org/gnu/gcc/gcc-4.0.0/gcc-4.0.0.tar.bz2]
md5=55ee7df1b29f719138ec063c57b89db6
sha256=38a9a01e195000976dcd04ec854c398478ada839510b1de384ffbd5f99791bdc
@@ -5598,6 +5670,10 @@ sha256=ef6b1c8def236f16ea914eccbb050c84ee314c0028b03c560ed20ff96776f74c
md5=641ec45fe377529c7fd914f77b11b44f
sha256=9ff8360375432a7a5c476cc6d55b3fdea9d6f3edc080d295a60421d8f47b1834
+[http://jalimo.evolvis.org/repository/sources/icepick-0.0+hg20080118.tar.bz2]
+md5=ce7b1827e6f4cbe73b9ffa90b0d45a6a
+sha256=63170836095f8451498c813a1f93a9ec70a1164d82aa30650c0f2216ca4377ab
+
[http://ftp.debian.org/debian/pool/main/i/iceweasel/iceweasel_3.0.1-1.diff.gz]
md5=7124ea3c13369ff27bcff0080aca678f
sha256=1d70e21e1c20e8d88bf3d2590de75c3e65b12da335097716adc58712cea41dad
@@ -7870,6 +7946,10 @@ sha256=e69f4cc9baee9be87400371cbdca1cb03428394c624640e64397089d090dbf0d
md5=c779f84c4add124e704e6ea3ccc4039c
sha256=d98203f017c6e1de7dca2037c533000705fc86b12f29694ddb4db17854e5498a
+[http://ftp.gnome.org/pub/GNOME/sources/libsoup/2.4/libsoup-2.4.1.tar.bz2]
+md5=d0fc91ccb9da401e9e40d2f4612bdac9
+sha256=774094746748fb0c8985210de0994accdc7095c222fba42c5623e2137443b6cd
+
[http://www.libspf2.org/spf/libspf2-1.0.4.tar.gz]
md5=5fe69ba13bf35d505b733247032a8a64
sha256=222803a98d1e86ac7eee9491beb5fbf30e259a3c74cd4166bda1796374c26cd1
@@ -8538,6 +8618,10 @@ sha256=e08067d89e306fad7f4be56821998c073fc457d9444a18f6eb55dc7c0710a9d5
md5=dea74646b7e5c621fef7174df83c34b1
sha256=a6c85d85f912e1c321723084389d63dee7660b81b8292452b190ea7190dd73bc
+[http://www.lua.org/ftp/lua-5.1.4.tar.gz]
+md5=d0870f2de55d59c1c8419f36e8fac150
+sha256=b038e225eaf2a5b57c9bcc35cd13aa8c6c8288ef493d52970c9545074098af3a
+
[http://luaforge.net/frs/download.php/989/lua-gtk2-0.3.tar.gz]
md5=a2788c45d60ef8ce30168811d7e72334
sha256=b3dd85e34b22cf757eafb6ef15c5505d5ec5e71803caef4b69ddc7fd5d46fabe
@@ -8626,6 +8710,10 @@ sha256=d7ee2e668455f9a092418e5475f32676eb0b37c54ae38a7fcdf2d14e0fb80c91
md5=be2790a34349ab452dddbcfe4c95606a
sha256=c1c67839d4af290aecd8f7743312aacc0a39ae99757f0910918531ef9a63e14f
+[ftp://ftp.gnu.org/gnu/m4/m4-1.4.12.tar.gz]
+md5=0499a958a561781b125c740f18061ea8
+sha256=47e8f9a33ba06fa6710b42d6f6ded41f45027f6f4039b0a3ed887c5116bc2173
+
[ftp://ftp.gnu.org/gnu/m4/m4-1.4.4.tar.gz]
md5=8d1d64dbecf1494690a0f3ba8db4482a
sha256=a116c52d314c8e3365756cb1e14c6b460d6bd28769121f92373a362497359d88
@@ -9594,6 +9682,10 @@ sha256=e4c0875794a74ac627b7b74b6098e75c8413bd156856dc434a49c4c112a68af2
md5=7df692e3186109cc00db6825b777201e
sha256=1d2d7996cc94f9b87d0c51cf0e028070ac177c4123ecbfd7ac1cb8d0b7d322d1
+[http://jalimo.evolvis.org/repository/sources/openjdk-langtools-jdk7-b31.tar.bz2]
+md5=670931f67b2e4ac46c6e0cd15418f2fa
+sha256=f8b8820e410c137d279d14dec7e7a93217cc371acdfe1b3343b2278d1728932e
+
[ftp://ftp.openldap.org/pub/OpenLDAP/openldap-release/openldap-2.2.29.tgz]
md5=6c4c72a1336aa45b463e738034c078d6
sha256=82ed5a27d2b340826b2e10625e687627ccefc883a426314952e4a253d5a6af29
@@ -10082,6 +10174,10 @@ sha256=3d087c88f6e2cc8fde8d2ab12ff2f85ca3ecb2c67754d0483d158f2e5802b015
md5=5f09df47a16e83462384b44b75310539
sha256=72f3cdad93b5436070dcc63b67764a06c594f324ccc001e8bfb974a8d1a86f36
+[http://distro.ibiblio.org/pub/linux/distributions/gentoo/distfiles/pax-utils-0.1.19.tar.bz2]
+md5=98f6b9fe17a740a8cc577255422c6103
+sha256=3918628e9f2508708a1a28f5ed4cb39d07cbd5711747bbb3ddf63816d056c11e
+
[http://the.earth.li/pub/e3/pbltool-0.2.c]
md5=c8fd507f831d2b017dfecf60bc55e28a
sha256=2a1e31d4ae859e530895882273fad0170374a2d5b838bb4558303e59236e9cb6
@@ -10178,6 +10274,10 @@ sha256=3e9510ce715f28b7e6b803fcdadb73b37c83792c0b5e1e9bcb066ab972649996
md5=e6029fafcee029edcfa2ceed7a005333
sha256=e105b6aad55589aecf20ae70ab7aa81eb202296fc52531f62e546af23077cca1
+[http://us2.php.net/distributions/php-5.2.6.tar.bz2]
+md5=7380ffecebd95c6edb317ef861229ebd
+sha256=1892b2dd50b56ae2c9aec027fcd9035b76673f113555bc2bc1007bab8ae4db81
+
[http://efault.net/npat/hacks/picocom/dist/picocom-1.3.tar.gz]
md5=21865bf2891222082afc44afdd80aeaa
sha256=ed3e0190a1940cf08a167429aa3fd25b3ae7313fdf126f851a9abccc89845ee6
@@ -11046,6 +11146,10 @@ sha256=06b32d6f8fe7065b4c2c8142a244374e1be963757125bac20ba016b92231b5bf
md5=5f8dd5a041ca7c700b16620228f922aa
sha256=de7290f334baf631a14d031df304ee94cfb550aa69e7512aa589d21c2381c7d8
+[http://download.savannah.gnu.org/releases/quilt/quilt-0.47.tar.gz]
+md5=d33d2442bd34387260b1c1db3e623af0
+sha256=100a6a7d5012d36590e607ebc87535f62318cfe7368f4cd44cacb94a2cf938a2
+
[http://downloads.sourceforge.net/qwt/qwt-4.2.0rc1.tgz]
md5=142b10ab27e837c3c4603cf9a7e9343b
sha256=3b6db68d53441119dced27e5bad26ec087294cb9d878d37bcea61e1f1e4849a1
@@ -11286,6 +11390,10 @@ sha256=59cc003bab753335b3ce14a908e663ea782514b3531dc7030379ff753ef1a78c
md5=3fbb02294a8ca33d4684055adba5ed6f
sha256=19590e972b80333e26a6514c34d976c2037138361481a16f27b75e5d33f33a58
+[ftp://ftp.ruby-lang.org/pub/ruby/1.8/ruby-1.8.6-p286.tar.gz]
+md5=797ea136fe43e4286c9362ee4516674e
+sha256=1774de918b156c360843c1b68690f5f57532ee48ff079d4d05c51dace8d523ed
+
[http://dist.schmorp.de/rxvt-unicode/Attic/rxvt-unicode-4.8.tar.bz2]
md5=b8f69ee5a4a2cd195892107be3a3cb29
sha256=de644640cf71a6ce72f27ac6489b65434105fc6c94538c471925a15e2259b796
@@ -11826,10 +11934,18 @@ sha256=0ad86c4b26cd687122b158bf848f0a07d7cd644033d15c4f43b6d643bd74cd81
md5=b4f631268d811189cfca1d5259cb5dd8
sha256=af0b4ca361435e54316ddf715737b5b94c737c41e3c0b0e6d66ee8a8938f4500
+[http://www.sqlite.org/sqlite-3.5.4.tar.gz]
+md5=f17da840eed792e896c3408d0ce97718
+sha256=47daba209bd3bcffa1c5fcd5fdfc4f524eae619b4fa855aeeb1bbbc8bd2bb04f
+
[http://www.sqlite.org/sqlite-3.5.6.tar.gz]
md5=903c9e935c538af392364a9172a3d98d
sha256=1673b9218ec318067a662315c161ae123088cad319231ec079018acb4717e8bb
+[http://www.sqlite.org/sqlite-3.6.7.tar.gz]
+md5=5223d1f459b608ed8c2c54f8847f8e1a
+sha256=55464820daf6d0559ac6355e9c873b6f111f48a18a9751261a86c650291a65de
+
[http://downloads.sourceforge.net/squashfs/squashfs2.0-r2.tar.gz]
md5=0b7fcaab8e634ae2385984885c731082
sha256=353b768294783959219f1203e76c128fb4c75fa37f3a3bb139603c5db55c6974
@@ -11954,6 +12070,10 @@ sha256=8f2275d9667909d3418213522d5d4ef3312c158190429062f79da6b982b9ce6b
md5=157d31c9dc02aa22b5f27323e5a203fc
sha256=c31d3bbee984c7971d2b24cddc279d8ad65edff8216778d617484c147ba3ae3d
+[http://swfdec.freedesktop.org/download/swfdec/0.7/swfdec-0.7.4.tar.gz]
+md5=08fcda4a46454bfc66b97fba2d385f8c
+sha256=4345da2a4790125bd7205bd10a3326bf94e36f97229850b99ec9e8f050a9ab4a
+
[http://downloads.sourceforge.net/swig/swig-1.3.29.tar.gz]
md5=44c6f6d8d724cf7fa4a5109e6f63c9b1
sha256=68b1b032cdc6297f068ef8c3c09abdd46a08512e17705d477cc0bf7b80a550e8
@@ -13282,6 +13402,10 @@ sha256=41c091d05943817c0b667192dab8c8f3a3a59c04a66dd311a9573f48a7b70b3b
md5=c0e88fc3483d90a7fea6a399298d90ea
sha256=41c091d05943817c0b667192dab8c8f3a3a59c04a66dd311a9573f48a7b70b3b
+[http://xorg.freedesktop.org/releases/individual/app/xeyes-1.0.1.tar.bz2]
+md5=033f14f7c4e30d1f4edbb22d5ef86883
+sha256=530902e8732130e37d9b46e73eecff90d6dbc5d5628620fded74a4e054aa8029
+
[http://xorg.freedesktop.org/releases/X11R7.0/src/app/xeyes-X11R7.0-1.0.1.tar.bz2]
md5=3ffafa7f222ea799bcd9fcd85c60ab98
sha256=e93bb322fe96ceabd0581bb70101aaa1001b0b8f894b3ae0e1fa2e4b38f41af2
diff --git a/meta/conf/distro/include/poky-fixed-revisions.inc b/meta/conf/distro/include/poky-fixed-revisions.inc
index 21b632a61..97c8da689 100644
--- a/meta/conf/distro/include/poky-fixed-revisions.inc
+++ b/meta/conf/distro/include/poky-fixed-revisions.inc
@@ -33,7 +33,7 @@ SRCREV_pn-clutter-gtk-0.8 ?= "7d3c3230376e731c06c21afa9d8c1d44dbea27cd"
SRCREV_pn-clutter-gtk ?= "70f4b0cbd568dfa265484a03be3bd08ad15ed12e"
SRCREV_pn-clutter-0.6 ?= "62e852a51e29dd42c84d6b34fe23b80b2542be59"
SRCREV_pn-clutter-0.8 ?= "3ba37ff115ed3fbe6c94195775941091a3ac2b30"
-SRCREV_pn-clutter ?= "4e0073cd508ad55c8d6b6d031f060ae9644620c2"
+SRCREV_pn-clutter ?= "269598a2cfa147b4d8361789356aba8318bde434"
SRCREV_pn-connman ?= "e952851502277cc41cd2a54ef5386cca7a8843dd"
SRCREV_pn-connman-gnome ?= "b589f4f89bff20d54682cc6b49efa86feac8adb4"
SRCREV_pn-dialer ?= "42a2b4dd62cf94dc905caeb087f5e7e9f418bf93"
@@ -115,7 +115,7 @@ SRCREV_pn-ubootchart ?= "10"
SRCREV_pn-dfu-util-native = "3139"
SRCREV_pn-dfu-util = "3139"
SRCREV_pn-libgsmd ?= "4335"
-SRCREV_pn-libjana = "732"
+SRCREV_pn-libjana = "749"
SRCREV_pn-libmokogsmd2 ?= "4334"
SRCREV_pn-libmokojournal2 ?= "3473"
SRCREV_pn-libmokopanelui2 ?= "3349"
@@ -157,7 +157,7 @@ SRCREV_pn-xserver-xf86-dri-lite ?= "251d0d8090322b2c9dc0c8b7bef001f338d19433"
SRCREV_pn-xserver-xf86-dri2-lite ?= "251d0d8090322b2c9dc0c8b7bef001f338d19433"
SRCREV_pn-gsm0710muxd ?= "6fecc78198dd821bbe29efd096bd8fecd855179d"
SRCREV_pn-gsmd2 ?= "963f34df8fa3ff4b301079dcf86e9acea6b6fe0f"
-SRCREV_pn-metacity-clutter ?= "69a57bc1e7324dc7b129eafa6b396ba34bc2fe9f"
+SRCREV_pn-mutter ?= "69a57bc1e7324dc7b129eafa6b396ba34bc2fe9f"
SRCREV_pn-dri2proto ?= "8cab3f0e6f551220bd11074779f4ccec1e948e00"
SRCREV_pn-inputproto ?= "7203036522ba9d4b224d282d6afc2d0b947711ee"
SRCREV_pn-inputproto-native ?= "7203036522ba9d4b224d282d6afc2d0b947711ee"
@@ -199,7 +199,7 @@ PREFERRED_VERSION_glib-2.0-native ?= "2.18.1"
PREFERRED_VERSION_atk ?= "1.24.0"
PREFERRED_VERSION_cairo ?= "1.8.2"
PREFERRED_VERSION_pango ?= "1.22.2"
-PREFERRED_VERSION_gtk+ ?= "2.12.7"
+PREFERRED_VERSION_gtk+ ?= "2.14.2"
PREFERRED_VERSION_dbus ?= "1.2.4"
PREFERRED_VERSION_dbus-native ?= "1.2.4"
diff --git a/meta/conf/distro/include/poky-floating-revisions.inc b/meta/conf/distro/include/poky-floating-revisions.inc
index 89e5c7a10..99907c539 100644
--- a/meta/conf/distro/include/poky-floating-revisions.inc
+++ b/meta/conf/distro/include/poky-floating-revisions.inc
@@ -56,7 +56,7 @@ SRCREV_pn-table ?= "${AUTOREV}"
SRCREV_pn-libmatchbox ?= "${AUTOREV}"
SRCREV_pn-tasks ?= "${AUTOREV}"
SRCREV_pn-libjana = "${AUTOREV}"
-SRCREV_pn-metacity-clutter = "${AUTOREV}"
+SRCREV_pn-mutter = "${AUTOREV}"
SRCREV_pn-dri2proto = "${AUTOREV}"
#PREFERRED_VERSION_dri2proto ?= "1.99.1+git${SRCREV}"
diff --git a/meta/conf/distro/include/preferred-xorg-versions.inc b/meta/conf/distro/include/preferred-xorg-versions.inc
index f50f2b19a..d076aca26 100644
--- a/meta/conf/distro/include/preferred-xorg-versions.inc
+++ b/meta/conf/distro/include/preferred-xorg-versions.inc
@@ -129,23 +129,23 @@ PREFERRED_VERSION_xproxymanagementprotocol ?= "1.0.2"
PREFERRED_VERSION_xrandr ?= "1.2.3"
PREFERRED_VERSION_xrdb ?= "1.0.5"
PREFERRED_VERSION_xserver-kdrive ?= "1.3.0.0"
-PREFERRED_VERSION_xserver-xf86-dri-lite ?= "1.5.99.1+git${SRCREV}"
+PREFERRED_VERSION_xserver-xf86-dri-lite ?= "1.6.0"
PREFERRED_VERSION_xset ?= "1.0.4"
PREFERRED_VERSION_xtrans ?= "1.2.2"
PREFERRED_VERSION_xtrans-native ?= "1.2.2"
PREFERRED_VERSION_xtrans-sdk ?= "1.2.2"
PREFERRED_VERSION_xvinfo ?= "1.0.2"
PREFERRED_VERSION_xwininfo ?= "1.0.4"
-PREFERRED_VERSION_mesa-dri ?= "7.3.0+git${SRCREV}"
-PREFERRED_VERSION_libdrm ?= "2.4.4"
+PREFERRED_VERSION_mesa-dri ?= "7.4"
+PREFERRED_VERSION_libdrm ?= "2.4.7"
PREFERRED_VERSION_xcb-proto ?= "1.2"
PREFERRED_VERSION_libxcb ?= "1.1.91"
PREFERRED_VERSION_libxcb-sdk ?= "1.1.91"
PREFERRED_VERSION_libpciaccess ?= "0.10.5"
-PREFERRED_VERSION_xf86-input-evdev ?= "2.1.1"
+PREFERRED_VERSION_xf86-input-evdev ?= "2.2.1"
PREFERRED_VERSION_xf86-input-mouse ?= "1.4.0"
PREFERRED_VERSION_xf86-input-keyboard ?= "1.3.2"
-PREFERRED_VERSION_xf86-input-synaptics ?= "0.99.3"
+PREFERRED_VERSION_xf86-input-synaptics ?= "1.1.0"
PREFERRED_VERSION_xf86-video-intel ?= "2.4.97.0+git${SRCREV}"
PREFERRED_VERSION_xf86-video-intel-dri2 ?= "2.4.97.0+git${SRCREV}"
diff --git a/meta/packages/clutter/clutter-mozembed/link-with-g++.patch b/meta/packages/clutter/clutter-mozembed/link-with-g++.patch
index 8f84e6142..5a76e4522 100644
--- a/meta/packages/clutter/clutter-mozembed/link-with-g++.patch
+++ b/meta/packages/clutter/clutter-mozembed/link-with-g++.patch
@@ -1,25 +1,26 @@
Index: git/cluttermozembed/Makefile.am
===================================================================
---- git.orig/cluttermozembed/Makefile.am 2009-01-27 14:17:00.000000000 +0000
-+++ git/cluttermozembed/Makefile.am 2009-01-28 15:36:27.000000000 +0000
-@@ -16,11 +16,11 @@
+--- git.orig/cluttermozembed/Makefile.am 2009-04-16 15:54:16.000000000 +0100
++++ git/cluttermozembed/Makefile.am 2009-04-16 15:57:02.000000000 +0100
+@@ -51,12 +51,13 @@
source_h = \
clutter-mozembed.h
source_c = \
- clutter-mozembed.c
-+ clutter-mozembed.c dummy2.cpp
++ clutter-mozembed.c dummy.cpp
- bin_PROGRAMS = mozheadless
+ bin_PROGRAMS = clutter-mozheadless
--mozheadless_SOURCES = mozheadless.c
-+mozheadless_SOURCES = mozheadless.c dummy.cpp
+ clutter_mozheadless_SOURCES = \
+ clutter-mozheadless.c \
++ dummy2.cpp \
+ clutter-mozheadless.h
lib_LTLIBRARIES = libcluttermozembed.la
- libcluttermozembed_la_SOURCES = $(source_c) $(source_h)
Index: git/configure.ac
===================================================================
---- git.orig/configure.ac 2009-01-27 14:17:01.000000000 +0000
-+++ git/configure.ac 2009-01-28 15:35:38.000000000 +0000
+--- git.orig/configure.ac 2009-04-16 15:54:16.000000000 +0100
++++ git/configure.ac 2009-04-16 15:54:30.000000000 +0100
@@ -7,6 +7,7 @@
AC_ISC_POSIX
diff --git a/meta/packages/clutter/clutter/enable_tests.patch b/meta/packages/clutter/clutter/enable_tests.patch
index 854461479..953edf6bb 100644
--- a/meta/packages/clutter/clutter/enable_tests.patch
+++ b/meta/packages/clutter/clutter/enable_tests.patch
@@ -6,10 +6,10 @@
Index: git/tests/interactive/Makefile.am
===================================================================
---- git.orig/tests/interactive/Makefile.am 2008-12-18 16:50:45.000000000 +0000
-+++ git/tests/interactive/Makefile.am 2008-12-18 16:57:36.000000000 +0000
-@@ -68,7 +68,7 @@
- AM_CFLAGS = $(CLUTTER_CFLAGS)
+--- git.orig/tests/interactive/Makefile.am 2009-03-12 11:01:36.000000000 +0000
++++ git/tests/interactive/Makefile.am 2009-03-12 11:04:12.000000000 +0000
+@@ -77,7 +77,7 @@
+ AM_CFLAGS = $(CLUTTER_CFLAGS) $(MAINTAINER_CFLAGS)
AM_LDFLAGS = $(CLUTTER_LIBS)
-noinst_PROGRAMS = test-interactive
@@ -19,14 +19,14 @@ Index: git/tests/interactive/Makefile.am
test-main.c \
Index: git/tests/interactive/test-actors.c
===================================================================
---- git.orig/tests/interactive/test-actors.c 2008-12-18 16:58:06.000000000 +0000
-+++ git/tests/interactive/test-actors.c 2008-12-18 16:58:13.000000000 +0000
-@@ -189,7 +189,7 @@
- /* Create a texture from file, then clone in to same resources */
- if (i == 0)
- {
-- if ((oh->hand[i] = clutter_texture_new_from_file ("redhand.png",
-+ if ((oh->hand[i] = clutter_texture_new_from_file ("/usr/share/clutter/redhand.png",
- &error)) == NULL)
- {
- g_error ("image load failed: %s", error->message);
+--- git.orig/tests/interactive/test-actors.c 2009-03-12 11:01:36.000000000 +0000
++++ git/tests/interactive/test-actors.c 2009-03-12 11:06:32.000000000 +0000
+@@ -182,7 +182,7 @@
+ oh->scaler_1 = clutter_behaviour_scale_new (alpha, 0.5, 0.5, 1.0, 1.0);
+ oh->scaler_2 = clutter_behaviour_scale_new (alpha, 1.0, 1.0, 0.5, 0.5);
+
+- real_hand = clutter_texture_new_from_file ("redhand.png", &error);
++ real_hand = clutter_texture_new_from_file ("/usr/share/clutter/redhand.png", &error);
+ if (real_hand == NULL)
+ {
+ g_error ("image load failed: %s", error->message);
diff --git a/meta/packages/dbus/dbus-native_1.2.4.bb b/meta/packages/dbus/dbus-native_1.2.4.bb
index 47e3167ed..d799ba2d1 100644
--- a/meta/packages/dbus/dbus-native_1.2.4.bb
+++ b/meta/packages/dbus/dbus-native_1.2.4.bb
@@ -3,7 +3,7 @@ inherit native
DEPENDS = "glib-2.0-native libxml2-native expat-native"
-PR = "r2"
+PR = "r3"
do_stage() {
oe_runmake install
diff --git a/meta/packages/dbus/dbus.inc b/meta/packages/dbus/dbus.inc
index 68c8a842e..2efcf46b2 100644
--- a/meta/packages/dbus/dbus.inc
+++ b/meta/packages/dbus/dbus.inc
@@ -12,7 +12,7 @@ SRC_URI = "\
file://99_dbus \
"
-inherit autotools pkgconfig update-rc.d gettext
+inherit autotools pkgconfig gettext
INITSCRIPT_NAME = "dbus-1"
INITSCRIPT_PARAMS = "start 02 5 2 . stop 20 0 1 6 ."
diff --git a/meta/packages/dbus/dbus_1.2.4.bb b/meta/packages/dbus/dbus_1.2.4.bb
index 25ab98b9f..b170e7b34 100644
--- a/meta/packages/dbus/dbus_1.2.4.bb
+++ b/meta/packages/dbus/dbus_1.2.4.bb
@@ -1,4 +1,7 @@
include dbus.inc
+PR = "r1"
+
+inherit update-rc.d
SRC_URI += "file://fix-install-daemon.patch;patch=1 "
diff --git a/meta/packages/drm/files/poulsbo.patch b/meta/packages/drm/files/poulsbo.patch
deleted file mode 100644
index 91f8975f5..000000000
--- a/meta/packages/drm/files/poulsbo.patch
+++ /dev/null
@@ -1,2516 +0,0 @@
-Index: libdrm-2.4.4/libdrm/xf86drm.c
-===================================================================
---- libdrm-2.4.4.orig/libdrm/xf86drm.c 2009-01-10 01:08:29.000000000 +0000
-+++ libdrm-2.4.4/libdrm/xf86drm.c 2009-02-05 12:23:22.000000000 +0000
-@@ -2402,6 +2402,569 @@
- return 0;
- }
-
-+
-+/*
-+ * Valid flags are
-+ * DRM_FENCE_FLAG_EMIT
-+ * DRM_FENCE_FLAG_SHAREABLE
-+ * DRM_FENCE_MASK_DRIVER
-+ */
-+
-+int drmFenceCreate(int fd, unsigned flags, int fence_class, unsigned type,
-+ drmFence *fence)
-+{
-+ drm_fence_arg_t arg;
-+
-+ memset(&arg, 0, sizeof(arg));
-+ arg.flags = flags;
-+ arg.type = type;
-+ arg.fence_class = fence_class;
-+
-+ if (ioctl(fd, DRM_IOCTL_FENCE_CREATE, &arg))
-+ return -errno;
-+ fence->handle = arg.handle;
-+ fence->fence_class = arg.fence_class;
-+ fence->type = arg.type;
-+ fence->flags = arg.flags;
-+ fence->signaled = 0;
-+ return 0;
-+}
-+
-+/*
-+ * Valid flags are
-+ * DRM_FENCE_FLAG_SHAREABLE
-+ * DRM_FENCE_MASK_DRIVER
-+ */
-+
-+int drmFenceBuffers(int fd, unsigned flags, uint32_t fence_class, drmFence *fence)
-+{
-+ drm_fence_arg_t arg;
-+
-+ memset(&arg, 0, sizeof(arg));
-+ arg.flags = flags;
-+ arg.fence_class = fence_class;
-+
-+ if (ioctl(fd, DRM_IOCTL_FENCE_BUFFERS, &arg))
-+ return -errno;
-+ fence->handle = arg.handle;
-+ fence->fence_class = arg.fence_class;
-+ fence->type = arg.type;
-+ fence->flags = arg.flags;
-+ fence->sequence = arg.sequence;
-+ fence->signaled = 0;
-+ return 0;
-+}
-+
-+int drmFenceReference(int fd, unsigned handle, drmFence *fence)
-+{
-+ drm_fence_arg_t arg;
-+
-+ memset(&arg, 0, sizeof(arg));
-+ arg.handle = handle;
-+
-+ if (ioctl(fd, DRM_IOCTL_FENCE_REFERENCE, &arg))
-+ return -errno;
-+ fence->handle = arg.handle;
-+ fence->fence_class = arg.fence_class;
-+ fence->type = arg.type;
-+ fence->flags = arg.flags;
-+ fence->signaled = arg.signaled;
-+ return 0;
-+}
-+
-+int drmFenceUnreference(int fd, const drmFence *fence)
-+{
-+ drm_fence_arg_t arg;
-+
-+ memset(&arg, 0, sizeof(arg));
-+ arg.handle = fence->handle;
-+
-+ if (ioctl(fd, DRM_IOCTL_FENCE_UNREFERENCE, &arg))
-+ return -errno;
-+ return 0;
-+}
-+
-+int drmFenceFlush(int fd, drmFence *fence, unsigned flush_type)
-+{
-+ drm_fence_arg_t arg;
-+
-+ memset(&arg, 0, sizeof(arg));
-+ arg.handle = fence->handle;
-+ arg.type = flush_type;
-+
-+ if (ioctl(fd, DRM_IOCTL_FENCE_FLUSH, &arg))
-+ return -errno;
-+ fence->fence_class = arg.fence_class;
-+ fence->type = arg.type;
-+ fence->signaled = arg.signaled;
-+ return arg.error;
-+}
-+
-+int drmFenceUpdate(int fd, drmFence *fence)
-+{
-+ drm_fence_arg_t arg;
-+
-+ memset(&arg, 0, sizeof(arg));
-+ arg.handle = fence->handle;
-+
-+ if (ioctl(fd, DRM_IOCTL_FENCE_SIGNALED, &arg))
-+ return -errno;
-+ fence->fence_class = arg.fence_class;
-+ fence->type = arg.type;
-+ fence->signaled = arg.signaled;
-+ return 0;
-+}
-+
-+int drmFenceSignaled(int fd, drmFence *fence, unsigned fenceType,
-+ int *signaled)
-+{
-+ if ((fence->flags & DRM_FENCE_FLAG_SHAREABLE) ||
-+ ((fenceType & fence->signaled) != fenceType)) {
-+ int ret = drmFenceFlush(fd, fence, fenceType);
-+ if (ret)
-+ return ret;
-+ }
-+
-+ *signaled = ((fenceType & fence->signaled) == fenceType);
-+
-+ return 0;
-+}
-+
-+/*
-+ * Valid flags are
-+ * DRM_FENCE_FLAG_SHAREABLE
-+ * DRM_FENCE_MASK_DRIVER
-+ */
-+
-+
-+int drmFenceEmit(int fd, unsigned flags, drmFence *fence, unsigned emit_type)
-+{
-+ drm_fence_arg_t arg;
-+
-+ memset(&arg, 0, sizeof(arg));
-+ arg.fence_class = fence->fence_class;
-+ arg.flags = flags;
-+ arg.handle = fence->handle;
-+ arg.type = emit_type;
-+
-+ if (ioctl(fd, DRM_IOCTL_FENCE_EMIT, &arg))
-+ return -errno;
-+ fence->fence_class = arg.fence_class;
-+ fence->type = arg.type;
-+ fence->signaled = arg.signaled;
-+ fence->sequence = arg.sequence;
-+ return 0;
-+}
-+
-+/*
-+ * Valid flags are
-+ * DRM_FENCE_FLAG_WAIT_LAZY
-+ * DRM_FENCE_FLAG_WAIT_IGNORE_SIGNALS
-+ */
-+
-+#define DRM_IOCTL_TIMEOUT_USEC 3000000UL
-+
-+static unsigned long
-+drmTimeDiff(struct timeval *now, struct timeval *then)
-+{
-+ uint64_t val;
-+
-+ val = now->tv_sec - then->tv_sec;
-+ val *= 1000000LL;
-+ val += now->tv_usec;
-+ val -= then->tv_usec;
-+
-+ return (unsigned long) val;
-+}
-+
-+static int
-+drmIoctlTimeout(int fd, unsigned long request, void *argp)
-+{
-+ int haveThen = 0;
-+ struct timeval then, now;
-+ int ret;
-+
-+ do {
-+ ret = ioctl(fd, request, argp);
-+ if (ret != 0 && errno == EAGAIN) {
-+ if (!haveThen) {
-+ gettimeofday(&then, NULL);
-+ haveThen = 1;
-+ }
-+ gettimeofday(&now, NULL);
-+ }
-+ } while (ret != 0 && errno == EAGAIN &&
-+ drmTimeDiff(&now, &then) < DRM_IOCTL_TIMEOUT_USEC);
-+
-+ if (ret != 0)
-+ return ((errno == EAGAIN) ? -EBUSY : -errno);
-+
-+ return 0;
-+}
-+
-+
-+
-+
-+int drmFenceWait(int fd, unsigned flags, drmFence *fence, unsigned flush_type)
-+{
-+ drm_fence_arg_t arg;
-+ int ret;
-+
-+ if (flush_type == 0) {
-+ flush_type = fence->type;
-+ }
-+
-+ if (!(fence->flags & DRM_FENCE_FLAG_SHAREABLE)) {
-+ if ((flush_type & fence->signaled) == flush_type) {
-+ return 0;
-+ }
-+ }
-+
-+ memset(&arg, 0, sizeof(arg));
-+ arg.handle = fence->handle;
-+ arg.type = flush_type;
-+ arg.flags = flags;
-+
-+
-+ ret = drmIoctlTimeout(fd, DRM_IOCTL_FENCE_WAIT, &arg);
-+ if (ret)
-+ return ret;
-+
-+ fence->fence_class = arg.fence_class;
-+ fence->type = arg.type;
-+ fence->signaled = arg.signaled;
-+ return arg.error;
-+}
-+
-+static void drmBOCopyReply(const struct drm_bo_info_rep *rep, drmBO *buf)
-+{
-+ buf->handle = rep->handle;
-+ buf->flags = rep->flags;
-+ buf->size = rep->size;
-+ buf->offset = rep->offset;
-+ buf->mapHandle = rep->arg_handle;
-+ buf->mask = rep->proposed_flags;
-+ buf->start = rep->buffer_start;
-+ buf->fenceFlags = rep->fence_flags;
-+ buf->replyFlags = rep->rep_flags;
-+ buf->pageAlignment = rep->page_alignment;
-+ buf->tileInfo = rep->tile_info;
-+ buf->hwTileStride = rep->hw_tile_stride;
-+ buf->desiredTileStride = rep->desired_tile_stride;
-+}
-+
-+
-+
-+int drmBOCreate(int fd, unsigned long size,
-+ unsigned pageAlignment, void *user_buffer,
-+ uint64_t mask,
-+ unsigned hint, drmBO *buf)
-+{
-+ struct drm_bo_create_arg arg;
-+ struct drm_bo_create_req *req = &arg.d.req;
-+ struct drm_bo_info_rep *rep = &arg.d.rep;
-+ int ret;
-+
-+ memset(buf, 0, sizeof(*buf));
-+ memset(&arg, 0, sizeof(arg));
-+ req->flags = mask;
-+ req->hint = hint;
-+ req->size = size;
-+ req->page_alignment = pageAlignment;
-+ req->buffer_start = (unsigned long) user_buffer;
-+
-+ buf->virtual = NULL;
-+
-+ ret = drmIoctlTimeout(fd, DRM_IOCTL_BO_CREATE, &arg);
-+ if (ret)
-+ return ret;
-+
-+ drmBOCopyReply(rep, buf);
-+ buf->virtual = user_buffer;
-+ buf->mapCount = 0;
-+
-+ return 0;
-+}
-+
-+int drmBOReference(int fd, unsigned handle, drmBO *buf)
-+{
-+ struct drm_bo_reference_info_arg arg;
-+ struct drm_bo_handle_arg *req = &arg.d.req;
-+ struct drm_bo_info_rep *rep = &arg.d.rep;
-+
-+ memset(&arg, 0, sizeof(arg));
-+ req->handle = handle;
-+
-+ if (ioctl(fd, DRM_IOCTL_BO_REFERENCE, &arg))
-+ return -errno;
-+
-+ drmBOCopyReply(rep, buf);
-+ buf->mapVirtual = NULL;
-+ buf->mapCount = 0;
-+ buf->virtual = NULL;
-+
-+ return 0;
-+}
-+
-+int drmBOUnreference(int fd, drmBO *buf)
-+{
-+ struct drm_bo_handle_arg arg;
-+
-+ if (buf->mapVirtual && buf->mapHandle) {
-+ (void) munmap(buf->mapVirtual, buf->start + buf->size);
-+ buf->mapVirtual = NULL;
-+ buf->virtual = NULL;
-+ }
-+
-+ memset(&arg, 0, sizeof(arg));
-+ arg.handle = buf->handle;
-+
-+ if (ioctl(fd, DRM_IOCTL_BO_UNREFERENCE, &arg))
-+ return -errno;
-+
-+ buf->handle = 0;
-+ return 0;
-+}
-+
-+
-+/*
-+ * Flags can be DRM_BO_FLAG_READ, DRM_BO_FLAG_WRITE or'ed together
-+ * Hint currently be DRM_BO_HINT_DONT_BLOCK, which makes the
-+ * call return an -EBUSY if it can' immediately honor the mapping request.
-+ */
-+
-+int drmBOMap(int fd, drmBO *buf, unsigned mapFlags, unsigned mapHint,
-+ void **address)
-+{
-+ struct drm_bo_map_wait_idle_arg arg;
-+ struct drm_bo_info_req *req = &arg.d.req;
-+ struct drm_bo_info_rep *rep = &arg.d.rep;
-+ int ret = 0;
-+
-+ /*
-+ * Make sure we have a virtual address of the buffer.
-+ */
-+
-+ if (!buf->virtual) {
-+ drmAddress virtual;
-+ virtual = mmap(0, buf->size + buf->start,
-+ PROT_READ | PROT_WRITE, MAP_SHARED,
-+ fd, buf->mapHandle);
-+ if (virtual == MAP_FAILED) {
-+ ret = -errno;
-+ }
-+ if (ret)
-+ return ret;
-+ buf->mapVirtual = virtual;
-+ buf->virtual = ((char *) virtual) + buf->start;
-+ }
-+
-+ memset(&arg, 0, sizeof(arg));
-+ req->handle = buf->handle;
-+ req->mask = mapFlags;
-+ req->hint = mapHint;
-+
-+ /*
-+ * May hang if the buffer object is busy.
-+ * This IOCTL synchronizes the buffer.
-+ */
-+
-+ ret = drmIoctlTimeout(fd, DRM_IOCTL_BO_MAP, &arg);
-+ if (ret)
-+ return ret;
-+
-+ drmBOCopyReply(rep, buf);
-+ buf->mapFlags = mapFlags;
-+ ++buf->mapCount;
-+ *address = buf->virtual;
-+
-+ return 0;
-+}
-+
-+
-+int drmBOUnmap(int fd, drmBO *buf)
-+{
-+ struct drm_bo_handle_arg arg;
-+
-+ memset(&arg, 0, sizeof(arg));
-+ arg.handle = buf->handle;
-+
-+ if (ioctl(fd, DRM_IOCTL_BO_UNMAP, &arg)) {
-+ return -errno;
-+ }
-+ buf->mapCount--;
-+ return 0;
-+}
-+
-+int drmBOSetStatus(int fd, drmBO *buf,
-+ uint64_t flags, uint64_t mask,
-+ unsigned int hint,
-+ unsigned int desired_tile_stride,
-+ unsigned int tile_info)
-+{
-+
-+ struct drm_bo_map_wait_idle_arg arg;
-+ struct drm_bo_info_req *req = &arg.d.req;
-+ struct drm_bo_info_rep *rep = &arg.d.rep;
-+ int ret = 0;
-+
-+ memset(&arg, 0, sizeof(arg));
-+ req->mask = mask;
-+ req->flags = flags;
-+ req->handle = buf->handle;
-+ req->hint = hint;
-+ req->desired_tile_stride = desired_tile_stride;
-+ req->tile_info = tile_info;
-+
-+ ret = drmIoctlTimeout(fd, DRM_IOCTL_BO_SETSTATUS, &arg);
-+ if (ret)
-+ return ret;
-+
-+ drmBOCopyReply(rep, buf);
-+ return 0;
-+}
-+
-+
-+int drmBOInfo(int fd, drmBO *buf)
-+{
-+ struct drm_bo_reference_info_arg arg;
-+ struct drm_bo_handle_arg *req = &arg.d.req;
-+ struct drm_bo_info_rep *rep = &arg.d.rep;
-+ int ret = 0;
-+
-+ memset(&arg, 0, sizeof(arg));
-+ req->handle = buf->handle;
-+
-+ ret = ioctl(fd, DRM_IOCTL_BO_INFO, &arg);
-+ if (ret)
-+ return -errno;
-+
-+ drmBOCopyReply(rep, buf);
-+ return 0;
-+}
-+
-+int drmBOWaitIdle(int fd, drmBO *buf, unsigned hint)
-+{
-+ struct drm_bo_map_wait_idle_arg arg;
-+ struct drm_bo_info_req *req = &arg.d.req;
-+ struct drm_bo_info_rep *rep = &arg.d.rep;
-+ int ret = 0;
-+
-+ if ((buf->flags & DRM_BO_FLAG_SHAREABLE) ||
-+ (buf->replyFlags & DRM_BO_REP_BUSY)) {
-+ memset(&arg, 0, sizeof(arg));
-+ req->handle = buf->handle;
-+ req->hint = hint;
-+
-+ ret = drmIoctlTimeout(fd, DRM_IOCTL_BO_WAIT_IDLE, &arg);
-+ if (ret)
-+ return ret;
-+
-+ drmBOCopyReply(rep, buf);
-+ }
-+ return 0;
-+}
-+
-+int drmBOBusy(int fd, drmBO *buf, int *busy)
-+{
-+ if (!(buf->flags & DRM_BO_FLAG_SHAREABLE) &&
-+ !(buf->replyFlags & DRM_BO_REP_BUSY)) {
-+ *busy = 0;
-+ return 0;
-+ }
-+ else {
-+ int ret = drmBOInfo(fd, buf);
-+ if (ret)
-+ return ret;
-+ *busy = (buf->replyFlags & DRM_BO_REP_BUSY);
-+ return 0;
-+ }
-+}
-+
-+int drmMMInit(int fd, unsigned long pOffset, unsigned long pSize,
-+ unsigned memType)
-+{
-+ struct drm_mm_init_arg arg;
-+
-+ memset(&arg, 0, sizeof(arg));
-+
-+ arg.magic = DRM_BO_INIT_MAGIC;
-+ arg.major = DRM_BO_INIT_MAJOR;
-+ arg.minor = DRM_BO_INIT_MINOR;
-+ arg.p_offset = pOffset;
-+ arg.p_size = pSize;
-+ arg.mem_type = memType;
-+
-+ if (ioctl(fd, DRM_IOCTL_MM_INIT, &arg))
-+ return -errno;
-+ return 0;
-+}
-+
-+int drmMMTakedown(int fd, unsigned memType)
-+{
-+ struct drm_mm_type_arg arg;
-+
-+ memset(&arg, 0, sizeof(arg));
-+ arg.mem_type = memType;
-+
-+ if (ioctl(fd, DRM_IOCTL_MM_TAKEDOWN, &arg))
-+ return -errno;
-+ return 0;
-+}
-+
-+/*
-+ * If this function returns an error, and lockBM was set to 1,
-+ * the buffer manager is NOT locked.
-+ */
-+
-+int drmMMLock(int fd, unsigned memType, int lockBM, int ignoreNoEvict)
-+{
-+ struct drm_mm_type_arg arg;
-+
-+ memset(&arg, 0, sizeof(arg));
-+ arg.mem_type = memType;
-+ arg.lock_flags |= (lockBM) ? DRM_BO_LOCK_UNLOCK_BM : 0;
-+ arg.lock_flags |= (ignoreNoEvict) ? DRM_BO_LOCK_IGNORE_NO_EVICT : 0;
-+
-+ return drmIoctlTimeout(fd, DRM_IOCTL_MM_LOCK, &arg);
-+}
-+
-+int drmMMUnlock(int fd, unsigned memType, int unlockBM)
-+{
-+ struct drm_mm_type_arg arg;
-+
-+ memset(&arg, 0, sizeof(arg));
-+
-+ arg.mem_type = memType;
-+ arg.lock_flags |= (unlockBM) ? DRM_BO_LOCK_UNLOCK_BM : 0;
-+
-+ return drmIoctlTimeout(fd, DRM_IOCTL_MM_UNLOCK, &arg);
-+}
-+
-+int drmBOVersion(int fd, unsigned int *major,
-+ unsigned int *minor,
-+ unsigned int *patchlevel)
-+{
-+ struct drm_bo_version_arg arg;
-+ int ret;
-+
-+ memset(&arg, 0, sizeof(arg));
-+ ret = ioctl(fd, DRM_IOCTL_BO_VERSION, &arg);
-+ if (ret)
-+ return -errno;
-+
-+ if (major)
-+ *major = arg.major;
-+ if (minor)
-+ *minor = arg.minor;
-+ if (patchlevel)
-+ *patchlevel = arg.patchlevel;
-+
-+ return 0;
-+}
-+
-+
-+
- #define DRM_MAX_FDS 16
- static struct {
- char *BusID;
-Index: libdrm-2.4.4/libdrm/xf86drm.h
-===================================================================
---- libdrm-2.4.4.orig/libdrm/xf86drm.h 2008-12-17 18:28:24.000000000 +0000
-+++ libdrm-2.4.4/libdrm/xf86drm.h 2009-02-04 16:39:55.000000000 +0000
-@@ -665,4 +665,6 @@
- extern int drmSetMaster(int fd);
- extern int drmDropMaster(int fd);
-
-+#include "xf86mm.h"
-+
- #endif
-Index: libdrm-2.4.4/libdrm/xf86mm.h
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ libdrm-2.4.4/libdrm/xf86mm.h 2009-02-04 16:39:55.000000000 +0000
-@@ -0,0 +1,140 @@
-+/**************************************************************************
-+ *
-+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND. USA.
-+ * All Rights Reserved.
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a
-+ * copy of this software and associated documentation files (the
-+ * "Software"), to deal in the Software without restriction, including
-+ * without limitation the rights to use, copy, modify, merge, publish,
-+ * distribute, sub license, and/or sell copies of the Software, and to
-+ * permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
-+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
-+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
-+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
-+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
-+ *
-+ * The above copyright notice and this permission notice (including the
-+ * next paragraph) shall be included in all copies or substantial portions
-+ * of the Software.
-+ *
-+ *
-+ **************************************************************************/
-+
-+#ifndef _XF86MM_H_
-+#define _XF86MM_H_
-+#include <stddef.h>
-+#include <stdint.h>
-+#include "drm.h"
-+
-+/*
-+ * Note on multithreaded applications using this interface.
-+ * Libdrm is not threadsafe, so common buffer, TTM, and fence objects need to
-+ * be protected using an external mutex.
-+ *
-+ * Note: Don't protect the following functions, as it may lead to deadlocks:
-+ * drmBOUnmap().
-+ * The kernel is synchronizing and refcounting buffer maps.
-+ * User space only needs to refcount object usage within the same application.
-+ */
-+
-+
-+/*
-+ * List macros heavily inspired by the Linux kernel
-+ * list handling. No list looping yet.
-+ */
-+
-+typedef struct _drmFence
-+{
-+ unsigned handle;
-+ int fence_class;
-+ unsigned type;
-+ unsigned flags;
-+ unsigned signaled;
-+ uint32_t sequence;
-+ unsigned pad[4]; /* for future expansion */
-+} drmFence;
-+
-+typedef struct _drmBO
-+{
-+ unsigned handle;
-+ uint64_t mapHandle;
-+ uint64_t flags;
-+ uint64_t mask;
-+ unsigned mapFlags;
-+ unsigned long size;
-+ unsigned long offset;
-+ unsigned long start;
-+ unsigned replyFlags;
-+ unsigned fenceFlags;
-+ unsigned pageAlignment;
-+ unsigned tileInfo;
-+ unsigned hwTileStride;
-+ unsigned desiredTileStride;
-+ void *virtual;
-+ void *mapVirtual;
-+ int mapCount;
-+ unsigned pad[8]; /* for future expansion */
-+} drmBO;
-+
-+/*
-+ * Fence functions.
-+ */
-+
-+extern int drmFenceCreate(int fd, unsigned flags, int fence_class,
-+ unsigned type, drmFence *fence);
-+extern int drmFenceReference(int fd, unsigned handle, drmFence *fence);
-+extern int drmFenceUnreference(int fd, const drmFence *fence);
-+extern int drmFenceFlush(int fd, drmFence *fence, unsigned flush_type);
-+extern int drmFenceSignaled(int fd, drmFence *fence,
-+ unsigned fenceType, int *signaled);
-+extern int drmFenceWait(int fd, unsigned flags, drmFence *fence,
-+ unsigned flush_type);
-+extern int drmFenceEmit(int fd, unsigned flags, drmFence *fence,
-+ unsigned emit_type);
-+extern int drmFenceBuffers(int fd, unsigned flags, uint32_t fence_class, drmFence *fence);
-+
-+
-+/*
-+ * Buffer object functions.
-+ */
-+
-+extern int drmBOCreate(int fd, unsigned long size,
-+ unsigned pageAlignment, void *user_buffer,
-+ uint64_t mask, unsigned hint, drmBO *buf);
-+extern int drmBOReference(int fd, unsigned handle, drmBO *buf);
-+extern int drmBOUnreference(int fd, drmBO *buf);
-+extern int drmBOMap(int fd, drmBO *buf, unsigned mapFlags, unsigned mapHint,
-+ void **address);
-+extern int drmBOUnmap(int fd, drmBO *buf);
-+extern int drmBOFence(int fd, drmBO *buf, unsigned flags, unsigned fenceHandle);
-+extern int drmBOInfo(int fd, drmBO *buf);
-+extern int drmBOBusy(int fd, drmBO *buf, int *busy);
-+
-+extern int drmBOWaitIdle(int fd, drmBO *buf, unsigned hint);
-+
-+/*
-+ * Initialization functions.
-+ */
-+
-+extern int drmMMInit(int fd, unsigned long pOffset, unsigned long pSize,
-+ unsigned memType);
-+extern int drmMMTakedown(int fd, unsigned memType);
-+extern int drmMMLock(int fd, unsigned memType, int lockBM, int ignoreNoEvict);
-+extern int drmMMUnlock(int fd, unsigned memType, int unlockBM);
-+extern int drmBOSetStatus(int fd, drmBO *buf,
-+ uint64_t flags, uint64_t mask,
-+ unsigned int hint,
-+ unsigned int desired_tile_stride,
-+ unsigned int tile_info);
-+extern int drmBOVersion(int fd, unsigned int *major,
-+ unsigned int *minor,
-+ unsigned int *patchlevel);
-+
-+
-+#endif
-Index: libdrm-2.4.4/shared-core/drm.h
-===================================================================
---- libdrm-2.4.4.orig/shared-core/drm.h 2008-12-17 18:28:24.000000000 +0000
-+++ libdrm-2.4.4/shared-core/drm.h 2009-02-05 12:20:53.000000000 +0000
-@@ -632,6 +632,8 @@
- unsigned long handle; /**< Used for mapping / unmapping */
- };
-
-+
-+
- /**
- * DRM_IOCTL_SET_VERSION ioctl argument type.
- */
-@@ -1109,6 +1111,32 @@
- #define DRM_IOCTL_MODE_RMFB DRM_IOWR(0xAF, uint32_t)
- #define DRM_IOCTL_MODE_REPLACEFB DRM_IOWR(0xB0, struct drm_mode_fb_cmd)
-
-+#define DRM_IOCTL_MM_INIT DRM_IOWR(0xc0, struct drm_mm_init_arg)
-+#define DRM_IOCTL_MM_TAKEDOWN DRM_IOWR(0xc1, struct drm_mm_type_arg)
-+#define DRM_IOCTL_MM_LOCK DRM_IOWR(0xc2, struct drm_mm_type_arg)
-+#define DRM_IOCTL_MM_UNLOCK DRM_IOWR(0xc3, struct drm_mm_type_arg)
-+
-+#define DRM_IOCTL_FENCE_CREATE DRM_IOWR(0xc4, struct drm_fence_arg)
-+#define DRM_IOCTL_FENCE_REFERENCE DRM_IOWR(0xc6, struct drm_fence_arg)
-+#define DRM_IOCTL_FENCE_UNREFERENCE DRM_IOWR(0xc7, struct drm_fence_arg)
-+#define DRM_IOCTL_FENCE_SIGNALED DRM_IOWR(0xc8, struct drm_fence_arg)
-+#define DRM_IOCTL_FENCE_FLUSH DRM_IOWR(0xc9, struct drm_fence_arg)
-+#define DRM_IOCTL_FENCE_WAIT DRM_IOWR(0xca, struct drm_fence_arg)
-+#define DRM_IOCTL_FENCE_EMIT DRM_IOWR(0xcb, struct drm_fence_arg)
-+#define DRM_IOCTL_FENCE_BUFFERS DRM_IOWR(0xcc, struct drm_fence_arg)
-+
-+#define DRM_IOCTL_BO_CREATE DRM_IOWR(0xcd, struct drm_bo_create_arg)
-+#define DRM_IOCTL_BO_MAP DRM_IOWR(0xcf, struct drm_bo_map_wait_idle_arg)
-+#define DRM_IOCTL_BO_UNMAP DRM_IOWR(0xd0, struct drm_bo_handle_arg)
-+#define DRM_IOCTL_BO_REFERENCE DRM_IOWR(0xd1, struct drm_bo_reference_info_arg)
-+#define DRM_IOCTL_BO_UNREFERENCE DRM_IOWR(0xd2, struct drm_bo_handle_arg)
-+#define DRM_IOCTL_BO_SETSTATUS DRM_IOWR(0xd3, struct drm_bo_map_wait_idle_arg)
-+#define DRM_IOCTL_BO_INFO DRM_IOWR(0xd4, struct drm_bo_reference_info_arg)
-+#define DRM_IOCTL_BO_WAIT_IDLE DRM_IOWR(0xd5, struct drm_bo_map_wait_idle_arg)
-+#define DRM_IOCTL_BO_VERSION DRM_IOR(0xd6, struct drm_bo_version_arg)
-+
-+#define DRM_IOCTL_MODE_ADDMODE DRM_IOWR(0xA7, struct drm_mode_modeinfo)
-+#define DRM_IOCTL_MODE_RMMODE DRM_IOWR(0xA8, unsigned int)
- /*@}*/
-
- /**
-Index: libdrm-2.4.4/shared-core/Makefile.am
-===================================================================
---- libdrm-2.4.4.orig/shared-core/Makefile.am 2008-12-17 18:28:24.000000000 +0000
-+++ libdrm-2.4.4/shared-core/Makefile.am 2009-02-04 16:39:55.000000000 +0000
-@@ -31,6 +31,8 @@
- mach64_drm.h \
- mga_drm.h \
- nouveau_drm.h \
-+ psb_drm.h \
-+ psb_reg.h \
- r128_drm.h \
- radeon_drm.h \
- savage_drm.h \
-Index: libdrm-2.4.4/shared-core/psb_drm.h
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ libdrm-2.4.4/shared-core/psb_drm.h 2009-02-04 16:39:55.000000000 +0000
-@@ -0,0 +1,359 @@
-+/**************************************************************************
-+ * Copyright (c) 2007, Intel Corporation.
-+ * All Rights Reserved.
-+ *
-+ * This program is free software; you can redistribute it and/or modify it
-+ * under the terms and conditions of the GNU General Public License,
-+ * version 2, as published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope it will be useful, but WITHOUT
-+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-+ * more details.
-+ *
-+ * You should have received a copy of the GNU General Public License along with
-+ * this program; if not, write to the Free Software Foundation, Inc.,
-+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-+ *
-+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
-+ * develop this driver.
-+ *
-+ **************************************************************************/
-+/*
-+ */
-+
-+#ifndef _PSB_DRM_H_
-+#define _PSB_DRM_H_
-+
-+#if defined(__linux__) && !defined(__KERNEL__)
-+#include<stdint.h>
-+#endif
-+
-+/*
-+ * Intel Poulsbo driver package version.
-+ *
-+ */
-+/* #define PSB_PACKAGE_VERSION "ED"__DATE__*/
-+#define PSB_PACKAGE_VERSION "2.0.0.32L.0007"
-+
-+#define DRM_PSB_SAREA_MAJOR 0
-+#define DRM_PSB_SAREA_MINOR 1
-+#define PSB_FIXED_SHIFT 16
-+
-+/*
-+ * Public memory types.
-+ */
-+
-+#define DRM_PSB_MEM_MMU DRM_BO_MEM_PRIV1
-+#define DRM_PSB_FLAG_MEM_MMU DRM_BO_FLAG_MEM_PRIV1
-+#define DRM_PSB_MEM_PDS DRM_BO_MEM_PRIV2
-+#define DRM_PSB_FLAG_MEM_PDS DRM_BO_FLAG_MEM_PRIV2
-+#define DRM_PSB_MEM_APER DRM_BO_MEM_PRIV3
-+#define DRM_PSB_FLAG_MEM_APER DRM_BO_FLAG_MEM_PRIV3
-+#define DRM_PSB_MEM_RASTGEOM DRM_BO_MEM_PRIV4
-+#define DRM_PSB_FLAG_MEM_RASTGEOM DRM_BO_FLAG_MEM_PRIV4
-+#define PSB_MEM_RASTGEOM_START 0x30000000
-+
-+typedef int32_t psb_fixed;
-+typedef uint32_t psb_ufixed;
-+
-+static inline psb_fixed psb_int_to_fixed(int a)
-+{
-+ return a * (1 << PSB_FIXED_SHIFT);
-+}
-+
-+static inline psb_ufixed psb_unsigned_to_ufixed(unsigned int a)
-+{
-+ return a << PSB_FIXED_SHIFT;
-+}
-+
-+/*Status of the command sent to the gfx device.*/
-+typedef enum {
-+ DRM_CMD_SUCCESS,
-+ DRM_CMD_FAILED,
-+ DRM_CMD_HANG
-+} drm_cmd_status_t;
-+
-+struct drm_psb_scanout {
-+ uint32_t buffer_id; /* DRM buffer object ID */
-+ uint32_t rotation; /* Rotation as in RR_rotation definitions */
-+ uint32_t stride; /* Buffer stride in bytes */
-+ uint32_t depth; /* Buffer depth in bits (NOT) bpp */
-+ uint32_t width; /* Buffer width in pixels */
-+ uint32_t height; /* Buffer height in lines */
-+ psb_fixed transform[3][3]; /* Buffer composite transform */
-+ /* (scaling, rot, reflect) */
-+};
-+
-+#define DRM_PSB_SAREA_OWNERS 16
-+#define DRM_PSB_SAREA_OWNER_2D 0
-+#define DRM_PSB_SAREA_OWNER_3D 1
-+
-+#define DRM_PSB_SAREA_SCANOUTS 3
-+
-+struct drm_psb_sarea {
-+ /* Track changes of this data structure */
-+
-+ uint32_t major;
-+ uint32_t minor;
-+
-+ /* Last context to touch part of hw */
-+ uint32_t ctx_owners[DRM_PSB_SAREA_OWNERS];
-+
-+ /* Definition of front- and rotated buffers */
-+ uint32_t num_scanouts;
-+ struct drm_psb_scanout scanouts[DRM_PSB_SAREA_SCANOUTS];
-+
-+ int planeA_x;
-+ int planeA_y;
-+ int planeA_w;
-+ int planeA_h;
-+ int planeB_x;
-+ int planeB_y;
-+ int planeB_w;
-+ int planeB_h;
-+ uint32_t msvdx_state;
-+ uint32_t msvdx_context;
-+};
-+
-+#define PSB_RELOC_MAGIC 0x67676767
-+#define PSB_RELOC_SHIFT_MASK 0x0000FFFF
-+#define PSB_RELOC_SHIFT_SHIFT 0
-+#define PSB_RELOC_ALSHIFT_MASK 0xFFFF0000
-+#define PSB_RELOC_ALSHIFT_SHIFT 16
-+
-+#define PSB_RELOC_OP_OFFSET 0 /* Offset of the indicated
-+ * buffer
-+ */
-+#define PSB_RELOC_OP_2D_OFFSET 1 /* Offset of the indicated
-+ * buffer, relative to 2D
-+ * base address
-+ */
-+#define PSB_RELOC_OP_PDS_OFFSET 2 /* Offset of the indicated buffer,
-+ * relative to PDS base address
-+ */
-+#define PSB_RELOC_OP_STRIDE 3 /* Stride of the indicated
-+ * buffer (for tiling)
-+ */
-+#define PSB_RELOC_OP_USE_OFFSET 4 /* Offset of USE buffer
-+ * relative to base reg
-+ */
-+#define PSB_RELOC_OP_USE_REG 5 /* Base reg of USE buffer */
-+
-+struct drm_psb_reloc {
-+ uint32_t reloc_op;
-+ uint32_t where; /* offset in destination buffer */
-+ uint32_t buffer; /* Buffer reloc applies to */
-+ uint32_t mask; /* Destination format: */
-+ uint32_t shift; /* Destination format: */
-+ uint32_t pre_add; /* Destination format: */
-+ uint32_t background; /* Destination add */
-+ uint32_t dst_buffer; /* Destination buffer. Index into buffer_list */
-+ uint32_t arg0; /* Reloc-op dependant */
-+ uint32_t arg1;
-+};
-+
-+#define PSB_BO_FLAG_TA (1ULL << 48)
-+#define PSB_BO_FLAG_SCENE (1ULL << 49)
-+#define PSB_BO_FLAG_FEEDBACK (1ULL << 50)
-+#define PSB_BO_FLAG_USSE (1ULL << 51)
-+
-+#define PSB_ENGINE_2D 0
-+#define PSB_ENGINE_VIDEO 1
-+#define PSB_ENGINE_RASTERIZER 2
-+#define PSB_ENGINE_TA 3
-+#define PSB_ENGINE_HPRAST 4
-+
-+/*
-+ * For this fence class we have a couple of
-+ * fence types.
-+ */
-+
-+#define _PSB_FENCE_EXE_SHIFT 0
-+#define _PSB_FENCE_TA_DONE_SHIFT 1
-+#define _PSB_FENCE_RASTER_DONE_SHIFT 2
-+#define _PSB_FENCE_SCENE_DONE_SHIFT 3
-+#define _PSB_FENCE_FEEDBACK_SHIFT 4
-+
-+#define _PSB_ENGINE_TA_FENCE_TYPES 5
-+#define _PSB_FENCE_TYPE_TA_DONE (1 << _PSB_FENCE_TA_DONE_SHIFT)
-+#define _PSB_FENCE_TYPE_RASTER_DONE (1 << _PSB_FENCE_RASTER_DONE_SHIFT)
-+#define _PSB_FENCE_TYPE_SCENE_DONE (1 << _PSB_FENCE_SCENE_DONE_SHIFT)
-+#define _PSB_FENCE_TYPE_FEEDBACK (1 << _PSB_FENCE_FEEDBACK_SHIFT)
-+
-+#define PSB_ENGINE_HPRAST 4
-+#define PSB_NUM_ENGINES 5
-+
-+#define PSB_TA_FLAG_FIRSTPASS (1 << 0)
-+#define PSB_TA_FLAG_LASTPASS (1 << 1)
-+
-+#define PSB_FEEDBACK_OP_VISTEST (1 << 0)
-+
-+struct drm_psb_scene {
-+ int handle_valid;
-+ uint32_t handle;
-+ uint32_t w;
-+ uint32_t h;
-+ uint32_t num_buffers;
-+};
-+
-+typedef struct drm_psb_cmdbuf_arg {
-+ uint64_t buffer_list; /* List of buffers to validate */
-+ uint64_t clip_rects; /* See i915 counterpart */
-+ uint64_t scene_arg;
-+ uint64_t fence_arg;
-+
-+ uint32_t ta_flags;
-+
-+ uint32_t ta_handle; /* TA reg-value pairs */
-+ uint32_t ta_offset;
-+ uint32_t ta_size;
-+
-+ uint32_t oom_handle;
-+ uint32_t oom_offset;
-+ uint32_t oom_size;
-+
-+ uint32_t cmdbuf_handle; /* 2D Command buffer object or, */
-+ uint32_t cmdbuf_offset; /* rasterizer reg-value pairs */
-+ uint32_t cmdbuf_size;
-+
-+ uint32_t reloc_handle; /* Reloc buffer object */
-+ uint32_t reloc_offset;
-+ uint32_t num_relocs;
-+
-+ int32_t damage; /* Damage front buffer with cliprects */
-+ /* Not implemented yet */
-+ uint32_t fence_flags;
-+ uint32_t engine;
-+
-+ /*
-+ * Feedback;
-+ */
-+
-+ uint32_t feedback_ops;
-+ uint32_t feedback_handle;
-+ uint32_t feedback_offset;
-+ uint32_t feedback_breakpoints;
-+ uint32_t feedback_size;
-+} drm_psb_cmdbuf_arg_t;
-+
-+struct drm_psb_xhw_init_arg {
-+ uint32_t operation;
-+ uint32_t buffer_handle;
-+};
-+
-+/*
-+ * Feedback components:
-+ */
-+
-+/*
-+ * Vistest component. The number of these in the feedback buffer
-+ * equals the number of vistest breakpoints + 1.
-+ * This is currently the only feedback component.
-+ */
-+
-+struct drm_psb_vistest {
-+ uint32_t vt[8];
-+};
-+
-+#define PSB_HW_COOKIE_SIZE 16
-+#define PSB_HW_FEEDBACK_SIZE 8
-+#define PSB_HW_OOM_CMD_SIZE 6
-+
-+struct drm_psb_xhw_arg {
-+ uint32_t op;
-+ int ret;
-+ uint32_t irq_op;
-+ uint32_t issue_irq;
-+ uint32_t cookie[PSB_HW_COOKIE_SIZE];
-+ union {
-+ struct {
-+ uint32_t w;
-+ uint32_t h;
-+ uint32_t size;
-+ uint32_t clear_p_start;
-+ uint32_t clear_num_pages;
-+ } si;
-+ struct {
-+ uint32_t fire_flags;
-+ uint32_t hw_context;
-+ uint32_t offset;
-+ uint32_t engine;
-+ uint32_t flags;
-+ uint32_t rca;
-+ uint32_t num_oom_cmds;
-+ uint32_t oom_cmds[PSB_HW_OOM_CMD_SIZE];
-+ } sb;
-+ struct {
-+ uint32_t pages;
-+ uint32_t size;
-+ } bi;
-+ struct {
-+ uint32_t bca;
-+ uint32_t rca;
-+ uint32_t flags;
-+ } oom;
-+ struct {
-+ uint32_t pt_offset;
-+ uint32_t param_offset;
-+ uint32_t flags;
-+ } bl;
-+ uint32_t feedback[PSB_HW_FEEDBACK_SIZE];
-+ } arg;
-+};
-+
-+#define DRM_PSB_CMDBUF 0x00
-+#define DRM_PSB_XHW_INIT 0x01
-+#define DRM_PSB_XHW 0x02
-+#define DRM_PSB_SCENE_UNREF 0x03
-+/* Controlling the kernel modesetting buffers */
-+#define DRM_PSB_KMS_OFF 0x04
-+#define DRM_PSB_KMS_ON 0x05
-+
-+#define PSB_XHW_INIT 0x00
-+#define PSB_XHW_TAKEDOWN 0x01
-+
-+#define PSB_XHW_FIRE_RASTER 0x00
-+#define PSB_XHW_SCENE_INFO 0x01
-+#define PSB_XHW_SCENE_BIND_FIRE 0x02
-+#define PSB_XHW_TA_MEM_INFO 0x03
-+#define PSB_XHW_RESET_DPM 0x04
-+#define PSB_XHW_OOM 0x05
-+#define PSB_XHW_TERMINATE 0x06
-+#define PSB_XHW_VISTEST 0x07
-+#define PSB_XHW_RESUME 0x08
-+#define PSB_XHW_TA_MEM_LOAD 0x09
-+
-+#define PSB_SCENE_FLAG_DIRTY (1 << 0)
-+#define PSB_SCENE_FLAG_COMPLETE (1 << 1)
-+#define PSB_SCENE_FLAG_SETUP (1 << 2)
-+#define PSB_SCENE_FLAG_SETUP_ONLY (1 << 3)
-+#define PSB_SCENE_FLAG_CLEARED (1 << 4)
-+
-+#define PSB_TA_MEM_FLAG_TA (1 << 0)
-+#define PSB_TA_MEM_FLAG_RASTER (1 << 1)
-+#define PSB_TA_MEM_FLAG_HOSTA (1 << 2)
-+#define PSB_TA_MEM_FLAG_HOSTD (1 << 3)
-+#define PSB_TA_MEM_FLAG_INIT (1 << 4)
-+#define PSB_TA_MEM_FLAG_NEW_PT_OFFSET (1 << 5)
-+
-+/*Raster fire will deallocate memory */
-+#define PSB_FIRE_FLAG_RASTER_DEALLOC (1 << 0)
-+/*Isp reset needed due to change in ZLS format */
-+#define PSB_FIRE_FLAG_NEEDS_ISP_RESET (1 << 1)
-+/*These are set by Xpsb. */
-+#define PSB_FIRE_FLAG_XHW_MASK 0xff000000
-+/*The task has had at least one OOM and Xpsb will
-+ send back messages on each fire. */
-+#define PSB_FIRE_FLAG_XHW_OOM (1 << 24)
-+
-+#define PSB_SCENE_ENGINE_TA 0
-+#define PSB_SCENE_ENGINE_RASTER 1
-+#define PSB_SCENE_NUM_ENGINES 2
-+
-+struct drm_psb_dev_info_arg {
-+ uint32_t num_use_attribute_registers;
-+};
-+#define DRM_PSB_DEVINFO 0x01
-+
-+#endif
-Index: libdrm-2.4.4/shared-core/psb_drv.h
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ libdrm-2.4.4/shared-core/psb_drv.h 2009-02-04 16:39:55.000000000 +0000
-@@ -0,0 +1,786 @@
-+/**************************************************************************
-+ * Copyright (c) 2007, Intel Corporation.
-+ * All Rights Reserved.
-+ *
-+ * This program is free software; you can redistribute it and/or modify it
-+ * under the terms and conditions of the GNU General Public License,
-+ * version 2, as published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope it will be useful, but WITHOUT
-+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-+ * more details.
-+ *
-+ * You should have received a copy of the GNU General Public License along with
-+ * this program; if not, write to the Free Software Foundation, Inc.,
-+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-+ *
-+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
-+ * develop this driver.
-+ *
-+ **************************************************************************/
-+/*
-+ */
-+#ifndef _PSB_DRV_H_
-+#define _PSB_DRV_H_
-+
-+#include "drmP.h"
-+#include "psb_drm.h"
-+#include "psb_reg.h"
-+#include "psb_schedule.h"
-+#include "intel_drv.h"
-+
-+enum {
-+ CHIP_PSB_8108 = 0,
-+ CHIP_PSB_8109 = 1
-+};
-+
-+#define DRIVER_NAME "psb"
-+#define DRIVER_DESC "drm driver for the Intel GMA500"
-+#define DRIVER_AUTHOR "Tungsten Graphics Inc."
-+
-+#define PSB_DRM_DRIVER_DATE "20080107"
-+#define PSB_DRM_DRIVER_MAJOR 4
-+#define PSB_DRM_DRIVER_MINOR 1
-+#define PSB_DRM_DRIVER_PATCHLEVEL 0
-+
-+#define PSB_VDC_OFFSET 0x00000000
-+#define PSB_VDC_SIZE 0x000080000
-+#define PSB_SGX_SIZE 0x8000
-+#define PSB_SGX_OFFSET 0x00040000
-+#define PSB_MMIO_RESOURCE 0
-+#define PSB_GATT_RESOURCE 2
-+#define PSB_GTT_RESOURCE 3
-+#define PSB_GMCH_CTRL 0x52
-+#define PSB_BSM 0x5C
-+#define _PSB_GMCH_ENABLED 0x4
-+#define PSB_PGETBL_CTL 0x2020
-+#define _PSB_PGETBL_ENABLED 0x00000001
-+#define PSB_SGX_2D_SLAVE_PORT 0x4000
-+#define PSB_TT_PRIV0_LIMIT (256*1024*1024)
-+#define PSB_TT_PRIV0_PLIMIT (PSB_TT_PRIV0_LIMIT >> PAGE_SHIFT)
-+#define PSB_NUM_VALIDATE_BUFFERS 640
-+#define PSB_MEM_KERNEL_START 0x10000000
-+#define PSB_MEM_PDS_START 0x20000000
-+#define PSB_MEM_MMU_START 0x40000000
-+
-+#define DRM_PSB_MEM_KERNEL DRM_BO_MEM_PRIV0
-+#define DRM_PSB_FLAG_MEM_KERNEL DRM_BO_FLAG_MEM_PRIV0
-+
-+/*
-+ * Flags for external memory type field.
-+ */
-+
-+#define PSB_MSVDX_OFFSET 0x50000 /*MSVDX Base offset */
-+#define PSB_MSVDX_SIZE 0x8000 /*MSVDX MMIO region is 0x50000 - 0x57fff ==> 32KB */
-+
-+#define PSB_MMU_CACHED_MEMORY 0x0001 /* Bind to MMU only */
-+#define PSB_MMU_RO_MEMORY 0x0002 /* MMU RO memory */
-+#define PSB_MMU_WO_MEMORY 0x0004 /* MMU WO memory */
-+
-+/*
-+ * PTE's and PDE's
-+ */
-+
-+#define PSB_PDE_MASK 0x003FFFFF
-+#define PSB_PDE_SHIFT 22
-+#define PSB_PTE_SHIFT 12
-+
-+#define PSB_PTE_VALID 0x0001 /* PTE / PDE valid */
-+#define PSB_PTE_WO 0x0002 /* Write only */
-+#define PSB_PTE_RO 0x0004 /* Read only */
-+#define PSB_PTE_CACHED 0x0008 /* CPU cache coherent */
-+
-+/*
-+ * VDC registers and bits
-+ */
-+#define PSB_HWSTAM 0x2098
-+#define PSB_INSTPM 0x20C0
-+#define PSB_INT_IDENTITY_R 0x20A4
-+#define _PSB_VSYNC_PIPEB_FLAG (1<<5)
-+#define _PSB_VSYNC_PIPEA_FLAG (1<<7)
-+#define _PSB_IRQ_SGX_FLAG (1<<18)
-+#define _PSB_IRQ_MSVDX_FLAG (1<<19)
-+#define PSB_INT_MASK_R 0x20A8
-+#define PSB_INT_ENABLE_R 0x20A0
-+#define PSB_PIPEASTAT 0x70024
-+#define _PSB_VBLANK_INTERRUPT_ENABLE (1 << 17)
-+#define _PSB_VBLANK_CLEAR (1 << 1)
-+#define PSB_PIPEBSTAT 0x71024
-+
-+#define _PSB_MMU_ER_MASK 0x0001FF00
-+#define _PSB_MMU_ER_HOST (1 << 16)
-+#define GPIOA 0x5010
-+#define GPIOB 0x5014
-+#define GPIOC 0x5018
-+#define GPIOD 0x501c
-+#define GPIOE 0x5020
-+#define GPIOF 0x5024
-+#define GPIOG 0x5028
-+#define GPIOH 0x502c
-+#define GPIO_CLOCK_DIR_MASK (1 << 0)
-+#define GPIO_CLOCK_DIR_IN (0 << 1)
-+#define GPIO_CLOCK_DIR_OUT (1 << 1)
-+#define GPIO_CLOCK_VAL_MASK (1 << 2)
-+#define GPIO_CLOCK_VAL_OUT (1 << 3)
-+#define GPIO_CLOCK_VAL_IN (1 << 4)
-+#define GPIO_CLOCK_PULLUP_DISABLE (1 << 5)
-+#define GPIO_DATA_DIR_MASK (1 << 8)
-+#define GPIO_DATA_DIR_IN (0 << 9)
-+#define GPIO_DATA_DIR_OUT (1 << 9)
-+#define GPIO_DATA_VAL_MASK (1 << 10)
-+#define GPIO_DATA_VAL_OUT (1 << 11)
-+#define GPIO_DATA_VAL_IN (1 << 12)
-+#define GPIO_DATA_PULLUP_DISABLE (1 << 13)
-+
-+#define VCLK_DIVISOR_VGA0 0x6000
-+#define VCLK_DIVISOR_VGA1 0x6004
-+#define VCLK_POST_DIV 0x6010
-+
-+#define DRM_DRIVER_PRIVATE_T struct drm_psb_private
-+#define I915_WRITE(_offs, _val) \
-+ iowrite32(_val, dev_priv->vdc_reg + (_offs))
-+#define I915_READ(_offs) \
-+ ioread32(dev_priv->vdc_reg + (_offs))
-+
-+#define PSB_COMM_2D (PSB_ENGINE_2D << 4)
-+#define PSB_COMM_3D (PSB_ENGINE_3D << 4)
-+#define PSB_COMM_TA (PSB_ENGINE_TA << 4)
-+#define PSB_COMM_HP (PSB_ENGINE_HP << 4)
-+#define PSB_COMM_USER_IRQ (1024 >> 2)
-+#define PSB_COMM_USER_IRQ_LOST (PSB_COMM_USER_IRQ + 1)
-+#define PSB_COMM_FW (2048 >> 2)
-+
-+#define PSB_UIRQ_VISTEST 1
-+#define PSB_UIRQ_OOM_REPLY 2
-+#define PSB_UIRQ_FIRE_TA_REPLY 3
-+#define PSB_UIRQ_FIRE_RASTER_REPLY 4
-+
-+#define PSB_2D_SIZE (256*1024*1024)
-+#define PSB_MAX_RELOC_PAGES 1024
-+
-+#define PSB_LOW_REG_OFFS 0x0204
-+#define PSB_HIGH_REG_OFFS 0x0600
-+
-+#define PSB_NUM_VBLANKS 2
-+
-+#define PSB_COMM_2D (PSB_ENGINE_2D << 4)
-+#define PSB_COMM_3D (PSB_ENGINE_3D << 4)
-+#define PSB_COMM_TA (PSB_ENGINE_TA << 4)
-+#define PSB_COMM_HP (PSB_ENGINE_HP << 4)
-+#define PSB_COMM_FW (2048 >> 2)
-+
-+#define PSB_2D_SIZE (256*1024*1024)
-+#define PSB_MAX_RELOC_PAGES 1024
-+
-+#define PSB_LOW_REG_OFFS 0x0204
-+#define PSB_HIGH_REG_OFFS 0x0600
-+
-+#define PSB_NUM_VBLANKS 2
-+#define PSB_WATCHDOG_DELAY (DRM_HZ / 10)
-+
-+/*
-+ * User options.
-+ */
-+
-+struct drm_psb_uopt {
-+ int disable_clock_gating;
-+};
-+
-+struct psb_gtt {
-+ struct drm_device *dev;
-+ int initialized;
-+ uint32_t gatt_start;
-+ uint32_t gtt_start;
-+ uint32_t gtt_phys_start;
-+ unsigned gtt_pages;
-+ unsigned gatt_pages;
-+ uint32_t stolen_base;
-+ uint32_t pge_ctl;
-+ u16 gmch_ctrl;
-+ unsigned long stolen_size;
-+ uint32_t *gtt_map;
-+ struct rw_semaphore sem;
-+};
-+
-+struct psb_use_base {
-+ struct list_head head;
-+ struct drm_fence_object *fence;
-+ unsigned int reg;
-+ unsigned long offset;
-+ unsigned int dm;
-+};
-+
-+struct psb_buflist_item {
-+ struct drm_buffer_object *bo;
-+ void __user *data;
-+ struct drm_bo_info_rep rep;
-+ int ret;
-+};
-+
-+struct psb_msvdx_cmd_queue {
-+ struct list_head head;
-+ void *cmd;
-+ unsigned long cmd_size;
-+ uint32_t sequence;
-+};
-+
-+struct drm_psb_private {
-+ unsigned long chipset;
-+
-+ struct psb_xhw_buf resume_buf;
-+ struct drm_psb_dev_info_arg dev_info;
-+ struct drm_psb_uopt uopt;
-+
-+ struct psb_gtt *pg;
-+
-+ struct page *scratch_page;
-+ struct page *comm_page;
-+
-+ volatile uint32_t *comm;
-+ uint32_t comm_mmu_offset;
-+ uint32_t mmu_2d_offset;
-+ uint32_t sequence[PSB_NUM_ENGINES];
-+ uint32_t last_sequence[PSB_NUM_ENGINES];
-+ int idle[PSB_NUM_ENGINES];
-+ uint32_t last_submitted_seq[PSB_NUM_ENGINES];
-+ int engine_lockup_2d;
-+
-+ struct psb_mmu_driver *mmu;
-+ struct psb_mmu_pd *pf_pd;
-+
-+ uint8_t *sgx_reg;
-+ uint8_t *vdc_reg;
-+ uint8_t *msvdx_reg;
-+ /*MSVDX*/ int msvdx_needs_reset;
-+ int has_msvdx;
-+ uint32_t gatt_free_offset;
-+
-+ /*
-+ * Fencing / irq.
-+ */
-+
-+ uint32_t sgx_irq_mask;
-+ uint32_t vdc_irq_mask;
-+
-+ spinlock_t irqmask_lock;
-+ spinlock_t sequence_lock;
-+ int fence0_irq_on;
-+ int irq_enabled;
-+ unsigned int irqen_count_2d;
-+ wait_queue_head_t event_2d_queue;
-+
-+ uint32_t msvdx_current_sequence;
-+ uint32_t msvdx_last_sequence;
-+ int fence2_irq_on;
-+ struct mutex mutex_2d;
-+
-+ /*
-+ * MSVDX Rendec Memory
-+ */
-+ struct drm_buffer_object *ccb0;
-+ uint32_t base_addr0;
-+ struct drm_buffer_object *ccb1;
-+ uint32_t base_addr1;
-+
-+ /*
-+ * Memory managers
-+ */
-+
-+ int have_vram;
-+ int have_tt;
-+ int have_mem_mmu;
-+ int have_mem_aper;
-+ int have_mem_kernel;
-+ int have_mem_pds;
-+ int have_mem_rastgeom;
-+ struct mutex temp_mem;
-+
-+ /*
-+ * Relocation buffer mapping.
-+ */
-+
-+ spinlock_t reloc_lock;
-+ unsigned int rel_mapped_pages;
-+ wait_queue_head_t rel_mapped_queue;
-+
-+ /*
-+ * SAREA
-+ */
-+ struct drm_psb_sarea *sarea_priv;
-+
-+ /*
-+ * LVDS info
-+ */
-+ uint8_t blc_type;
-+ uint8_t blc_pol;
-+ uint8_t blc_freq;
-+ uint8_t blc_minbrightness;
-+ uint8_t blc_i2caddr;
-+ uint8_t blc_brightnesscmd;
-+ int backlight; /* restore backlight to this value */
-+
-+ struct intel_i2c_chan *i2c_bus;
-+ u32 CoreClock;
-+ u32 PWMControlRegFreq;
-+
-+ unsigned char * OpRegion;
-+ unsigned int OpRegionSize;
-+
-+ int backlight_duty_cycle; /* restore backlight to this value */
-+ bool panel_wants_dither;
-+ struct drm_display_mode *panel_fixed_mode;
-+
-+ /*
-+ * Register state
-+ */
-+ uint32_t saveDSPACNTR;
-+ uint32_t saveDSPBCNTR;
-+ uint32_t savePIPEACONF;
-+ uint32_t savePIPEBCONF;
-+ uint32_t savePIPEASRC;
-+ uint32_t savePIPEBSRC;
-+ uint32_t saveFPA0;
-+ uint32_t saveFPA1;
-+ uint32_t saveDPLL_A;
-+ uint32_t saveDPLL_A_MD;
-+ uint32_t saveHTOTAL_A;
-+ uint32_t saveHBLANK_A;
-+ uint32_t saveHSYNC_A;
-+ uint32_t saveVTOTAL_A;
-+ uint32_t saveVBLANK_A;
-+ uint32_t saveVSYNC_A;
-+ uint32_t saveDSPASTRIDE;
-+ uint32_t saveDSPASIZE;
-+ uint32_t saveDSPAPOS;
-+ uint32_t saveDSPABASE;
-+ uint32_t saveDSPASURF;
-+ uint32_t saveFPB0;
-+ uint32_t saveFPB1;
-+ uint32_t saveDPLL_B;
-+ uint32_t saveDPLL_B_MD;
-+ uint32_t saveHTOTAL_B;
-+ uint32_t saveHBLANK_B;
-+ uint32_t saveHSYNC_B;
-+ uint32_t saveVTOTAL_B;
-+ uint32_t saveVBLANK_B;
-+ uint32_t saveVSYNC_B;
-+ uint32_t saveDSPBSTRIDE;
-+ uint32_t saveDSPBSIZE;
-+ uint32_t saveDSPBPOS;
-+ uint32_t saveDSPBBASE;
-+ uint32_t saveDSPBSURF;
-+ uint32_t saveVCLK_DIVISOR_VGA0;
-+ uint32_t saveVCLK_DIVISOR_VGA1;
-+ uint32_t saveVCLK_POST_DIV;
-+ uint32_t saveVGACNTRL;
-+ uint32_t saveADPA;
-+ uint32_t saveLVDS;
-+ uint32_t saveDVOA;
-+ uint32_t saveDVOB;
-+ uint32_t saveDVOC;
-+ uint32_t savePP_ON;
-+ uint32_t savePP_OFF;
-+ uint32_t savePP_CONTROL;
-+ uint32_t savePP_CYCLE;
-+ uint32_t savePFIT_CONTROL;
-+ uint32_t savePaletteA[256];
-+ uint32_t savePaletteB[256];
-+ uint32_t saveBLC_PWM_CTL;
-+
-+ /*
-+ * USE code base register management.
-+ */
-+
-+ struct drm_reg_manager use_manager;
-+
-+ /*
-+ * Xhw
-+ */
-+
-+ uint32_t *xhw;
-+ struct drm_buffer_object *xhw_bo;
-+ struct drm_bo_kmap_obj xhw_kmap;
-+ struct list_head xhw_in;
-+ spinlock_t xhw_lock;
-+ atomic_t xhw_client;
-+ struct drm_file *xhw_file;
-+ wait_queue_head_t xhw_queue;
-+ wait_queue_head_t xhw_caller_queue;
-+ struct mutex xhw_mutex;
-+ struct psb_xhw_buf *xhw_cur_buf;
-+ int xhw_submit_ok;
-+ int xhw_on;
-+
-+ /*
-+ * Scheduling.
-+ */
-+
-+ struct mutex reset_mutex;
-+ struct mutex cmdbuf_mutex;
-+ struct psb_scheduler scheduler;
-+ struct psb_buflist_item buffers[PSB_NUM_VALIDATE_BUFFERS];
-+ uint32_t ta_mem_pages;
-+ struct psb_ta_mem *ta_mem;
-+ int force_ta_mem_load;
-+
-+ /*
-+ * Watchdog
-+ */
-+
-+ spinlock_t watchdog_lock;
-+ struct timer_list watchdog_timer;
-+ struct work_struct watchdog_wq;
-+ struct work_struct msvdx_watchdog_wq;
-+ int timer_available;
-+
-+ /*
-+ * msvdx command queue
-+ */
-+ spinlock_t msvdx_lock;
-+ struct mutex msvdx_mutex;
-+ struct list_head msvdx_queue;
-+ int msvdx_busy;
-+};
-+
-+struct psb_mmu_driver;
-+
-+extern struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers,
-+ int trap_pagefaults,
-+ int invalid_type);
-+extern void psb_mmu_driver_takedown(struct psb_mmu_driver *driver);
-+extern struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver *driver);
-+extern void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd, uint32_t mmu_offset,
-+ uint32_t gtt_start, uint32_t gtt_pages);
-+extern void psb_mmu_test(struct psb_mmu_driver *driver, uint32_t offset);
-+extern struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
-+ int trap_pagefaults,
-+ int invalid_type);
-+extern void psb_mmu_free_pagedir(struct psb_mmu_pd *pd);
-+extern void psb_mmu_flush(struct psb_mmu_driver *driver);
-+extern void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
-+ unsigned long address,
-+ uint32_t num_pages);
-+extern int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd,
-+ uint32_t start_pfn,
-+ unsigned long address,
-+ uint32_t num_pages, int type);
-+extern int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
-+ unsigned long *pfn);
-+
-+/*
-+ * Enable / disable MMU for different requestors.
-+ */
-+
-+extern void psb_mmu_enable_requestor(struct psb_mmu_driver *driver,
-+ uint32_t mask);
-+extern void psb_mmu_disable_requestor(struct psb_mmu_driver *driver,
-+ uint32_t mask);
-+extern void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context);
-+extern int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
-+ unsigned long address, uint32_t num_pages,
-+ uint32_t desired_tile_stride,
-+ uint32_t hw_tile_stride, int type);
-+extern void psb_mmu_remove_pages(struct psb_mmu_pd *pd, unsigned long address,
-+ uint32_t num_pages,
-+ uint32_t desired_tile_stride,
-+ uint32_t hw_tile_stride);
-+/*
-+ * psb_sgx.c
-+ */
-+
-+extern int psb_blit_sequence(struct drm_psb_private *dev_priv,
-+ uint32_t sequence);
-+extern void psb_init_2d(struct drm_psb_private *dev_priv);
-+extern int drm_psb_idle(struct drm_device *dev);
-+extern int psb_emit_2d_copy_blit(struct drm_device *dev,
-+ uint32_t src_offset,
-+ uint32_t dst_offset, uint32_t pages,
-+ int direction);
-+extern int psb_cmdbuf_ioctl(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv);
-+extern int psb_reg_submit(struct drm_psb_private *dev_priv, uint32_t * regs,
-+ unsigned int cmds);
-+extern int psb_submit_copy_cmdbuf(struct drm_device *dev,
-+ struct drm_buffer_object *cmd_buffer,
-+ unsigned long cmd_offset,
-+ unsigned long cmd_size, int engine,
-+ uint32_t * copy_buffer);
-+
-+extern int psb_fence_for_errors(struct drm_file *priv,
-+ struct drm_psb_cmdbuf_arg *arg,
-+ struct drm_fence_arg *fence_arg,
-+ struct drm_fence_object **fence_p);
-+
-+/*
-+ * psb_irq.c
-+ */
-+
-+extern irqreturn_t psb_irq_handler(DRM_IRQ_ARGS);
-+extern void psb_irq_preinstall(struct drm_device *dev);
-+extern void psb_irq_postinstall(struct drm_device *dev);
-+extern void psb_irq_uninstall(struct drm_device *dev);
-+extern int psb_vblank_wait2(struct drm_device *dev, unsigned int *sequence);
-+extern int psb_vblank_wait(struct drm_device *dev, unsigned int *sequence);
-+
-+/*
-+ * psb_fence.c
-+ */
-+
-+extern void psb_poke_flush(struct drm_device *dev, uint32_t class);
-+extern int psb_fence_emit_sequence(struct drm_device *dev, uint32_t class,
-+ uint32_t flags, uint32_t * sequence,
-+ uint32_t * native_type);
-+extern void psb_fence_handler(struct drm_device *dev, uint32_t class);
-+extern int psb_fence_has_irq(struct drm_device *dev, uint32_t class,
-+ uint32_t flags);
-+extern void psb_2D_irq_off(struct drm_psb_private *dev_priv);
-+extern void psb_2D_irq_on(struct drm_psb_private *dev_priv);
-+extern uint32_t psb_fence_advance_sequence(struct drm_device *dev,
-+ uint32_t class);
-+extern void psb_fence_error(struct drm_device *dev,
-+ uint32_t class,
-+ uint32_t sequence, uint32_t type, int error);
-+
-+/*MSVDX stuff*/
-+extern void psb_msvdx_irq_off(struct drm_psb_private *dev_priv);
-+extern void psb_msvdx_irq_on(struct drm_psb_private *dev_priv);
-+
-+/*
-+ * psb_buffer.c
-+ */
-+extern struct drm_ttm_backend *drm_psb_tbe_init(struct drm_device *dev);
-+extern int psb_fence_types(struct drm_buffer_object *bo, uint32_t * class,
-+ uint32_t * type);
-+extern uint32_t psb_evict_mask(struct drm_buffer_object *bo);
-+extern int psb_invalidate_caches(struct drm_device *dev, uint64_t flags);
-+extern int psb_init_mem_type(struct drm_device *dev, uint32_t type,
-+ struct drm_mem_type_manager *man);
-+extern int psb_move(struct drm_buffer_object *bo,
-+ int evict, int no_wait, struct drm_bo_mem_reg *new_mem);
-+
-+/*
-+ * psb_gtt.c
-+ */
-+extern int psb_gtt_init(struct psb_gtt *pg, int resume);
-+extern int psb_gtt_insert_pages(struct psb_gtt *pg, struct page **pages,
-+ unsigned offset_pages, unsigned num_pages,
-+ unsigned desired_tile_stride,
-+ unsigned hw_tile_stride, int type);
-+extern int psb_gtt_remove_pages(struct psb_gtt *pg, unsigned offset_pages,
-+ unsigned num_pages,
-+ unsigned desired_tile_stride,
-+ unsigned hw_tile_stride);
-+
-+extern struct psb_gtt *psb_gtt_alloc(struct drm_device *dev);
-+extern void psb_gtt_takedown(struct psb_gtt *pg, int free);
-+
-+/*
-+ * psb_fb.c
-+ */
-+extern int psbfb_probe(struct drm_device *dev, struct drm_crtc *crtc);
-+extern int psbfb_remove(struct drm_device *dev, struct drm_crtc *crtc);
-+extern int psbfb_kms_off_ioctl(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv);
-+extern int psbfb_kms_on_ioctl(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv);
-+
-+/*
-+ * psb_reset.c
-+ */
-+
-+extern void psb_reset(struct drm_psb_private *dev_priv, int reset_2d);
-+extern void psb_schedule_watchdog(struct drm_psb_private *dev_priv);
-+extern void psb_watchdog_init(struct drm_psb_private *dev_priv);
-+extern void psb_watchdog_takedown(struct drm_psb_private *dev_priv);
-+
-+/*
-+ * psb_regman.c
-+ */
-+
-+extern void psb_takedown_use_base(struct drm_psb_private *dev_priv);
-+extern int psb_grab_use_base(struct drm_psb_private *dev_priv,
-+ unsigned long dev_virtual,
-+ unsigned long size,
-+ unsigned int data_master,
-+ uint32_t fence_class,
-+ uint32_t fence_type,
-+ int no_wait,
-+ int ignore_signals,
-+ int *r_reg, uint32_t * r_offset);
-+extern int psb_init_use_base(struct drm_psb_private *dev_priv,
-+ unsigned int reg_start, unsigned int reg_num);
-+
-+/*
-+ * psb_xhw.c
-+ */
-+
-+extern int psb_xhw_ioctl(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv);
-+extern int psb_xhw_init_ioctl(struct drm_device *dev, void *data,
-+ struct drm_file *file_priv);
-+extern int psb_xhw_init(struct drm_device *dev);
-+extern void psb_xhw_takedown(struct drm_psb_private *dev_priv);
-+extern void psb_xhw_init_takedown(struct drm_psb_private *dev_priv,
-+ struct drm_file *file_priv, int closing);
-+extern int psb_xhw_scene_bind_fire(struct drm_psb_private *dev_priv,
-+ struct psb_xhw_buf *buf,
-+ uint32_t fire_flags,
-+ uint32_t hw_context,
-+ uint32_t * cookie,
-+ uint32_t * oom_cmds,
-+ uint32_t num_oom_cmds,
-+ uint32_t offset,
-+ uint32_t engine, uint32_t flags);
-+extern int psb_xhw_fire_raster(struct drm_psb_private *dev_priv,
-+ struct psb_xhw_buf *buf, uint32_t fire_flags);
-+extern int psb_xhw_scene_info(struct drm_psb_private *dev_priv,
-+ struct psb_xhw_buf *buf,
-+ uint32_t w,
-+ uint32_t h,
-+ uint32_t * hw_cookie,
-+ uint32_t * bo_size,
-+ uint32_t * clear_p_start,
-+ uint32_t * clear_num_pages);
-+
-+extern int psb_xhw_reset_dpm(struct drm_psb_private *dev_priv,
-+ struct psb_xhw_buf *buf);
-+extern int psb_xhw_ta_mem_info(struct drm_psb_private *dev_priv,
-+ struct psb_xhw_buf *buf,
-+ uint32_t pages,
-+ uint32_t * hw_cookie, uint32_t * size);
-+extern int psb_xhw_ta_oom(struct drm_psb_private *dev_priv,
-+ struct psb_xhw_buf *buf, uint32_t * cookie);
-+extern void psb_xhw_ta_oom_reply(struct drm_psb_private *dev_priv,
-+ struct psb_xhw_buf *buf,
-+ uint32_t * cookie,
-+ uint32_t * bca,
-+ uint32_t * rca, uint32_t * flags);
-+extern int psb_xhw_vistest(struct drm_psb_private *dev_priv,
-+ struct psb_xhw_buf *buf);
-+extern int psb_xhw_handler(struct drm_psb_private *dev_priv);
-+extern int psb_xhw_resume(struct drm_psb_private *dev_priv,
-+ struct psb_xhw_buf *buf);
-+extern void psb_xhw_fire_reply(struct drm_psb_private *dev_priv,
-+ struct psb_xhw_buf *buf, uint32_t * cookie);
-+extern int psb_xhw_ta_mem_load(struct drm_psb_private *dev_priv,
-+ struct psb_xhw_buf *buf,
-+ uint32_t flags,
-+ uint32_t param_offset,
-+ uint32_t pt_offset,
-+ uint32_t *hw_cookie);
-+extern void psb_xhw_clean_buf(struct drm_psb_private *dev_priv,
-+ struct psb_xhw_buf *buf);
-+
-+/*
-+ * Utilities
-+ */
-+
-+#define PSB_ALIGN_TO(_val, _align) \
-+ (((_val) + ((_align) - 1)) & ~((_align) - 1))
-+#define PSB_WVDC32(_val, _offs) \
-+ iowrite32(_val, dev_priv->vdc_reg + (_offs))
-+#define PSB_RVDC32(_offs) \
-+ ioread32(dev_priv->vdc_reg + (_offs))
-+#define PSB_WSGX32(_val, _offs) \
-+ iowrite32(_val, dev_priv->sgx_reg + (_offs))
-+#define PSB_RSGX32(_offs) \
-+ ioread32(dev_priv->sgx_reg + (_offs))
-+#define PSB_WMSVDX32(_val, _offs) \
-+ iowrite32(_val, dev_priv->msvdx_reg + (_offs))
-+#define PSB_RMSVDX32(_offs) \
-+ ioread32(dev_priv->msvdx_reg + (_offs))
-+
-+#define PSB_ALPL(_val, _base) \
-+ (((_val) >> (_base ## _ALIGNSHIFT)) << (_base ## _SHIFT))
-+#define PSB_ALPLM(_val, _base) \
-+ ((((_val) >> (_base ## _ALIGNSHIFT)) << (_base ## _SHIFT)) & (_base ## _MASK))
-+
-+static inline psb_fixed psb_mul_fixed(psb_fixed a, psb_fixed b)
-+{
-+ s64 tmp;
-+ s64 a64 = (s64) a;
-+ s64 b64 = (s64) b;
-+
-+ tmp = a64 * b64;
-+ return tmp / (1ULL << PSB_FIXED_SHIFT) +
-+ ((tmp & 0x80000000ULL) ? 1 : 0);
-+}
-+
-+static inline psb_fixed psb_mul_ufixed(psb_ufixed a, psb_fixed b)
-+{
-+ u64 tmp;
-+ u64 a64 = (u64) a;
-+ u64 b64 = (u64) b;
-+
-+ tmp = a64 * b64;
-+ return (tmp >> PSB_FIXED_SHIFT) + ((tmp & 0x80000000ULL) ? 1 : 0);
-+}
-+
-+static inline uint32_t psb_ufixed_to_float32(psb_ufixed a)
-+{
-+ uint32_t exp = 0x7f + 7;
-+ uint32_t mantissa = (uint32_t) a;
-+
-+ if (a == 0)
-+ return 0;
-+ while ((mantissa & 0xff800000) == 0) {
-+ exp -= 1;
-+ mantissa <<= 1;
-+ }
-+ while ((mantissa & 0xff800000) > 0x00800000) {
-+ exp += 1;
-+ mantissa >>= 1;
-+ }
-+ return (mantissa & ~0xff800000) | (exp << 23);
-+}
-+
-+static inline uint32_t psb_fixed_to_float32(psb_fixed a)
-+{
-+ if (a < 0)
-+ return psb_ufixed_to_float32(-a) | 0x80000000;
-+ else
-+ return psb_ufixed_to_float32(a);
-+}
-+
-+#define PSB_D_RENDER (1 << 16)
-+
-+#define PSB_D_GENERAL (1 << 0)
-+#define PSB_D_INIT (1 << 1)
-+#define PSB_D_IRQ (1 << 2)
-+#define PSB_D_FW (1 << 3)
-+#define PSB_D_PERF (1 << 4)
-+#define PSB_D_TMP (1 << 5)
-+
-+extern int drm_psb_debug;
-+extern int drm_psb_no_fb;
-+extern int drm_psb_disable_vsync;
-+
-+#define PSB_DEBUG_FW(_fmt, _arg...) \
-+ PSB_DEBUG(PSB_D_FW, _fmt, ##_arg)
-+#define PSB_DEBUG_GENERAL(_fmt, _arg...) \
-+ PSB_DEBUG(PSB_D_GENERAL, _fmt, ##_arg)
-+#define PSB_DEBUG_INIT(_fmt, _arg...) \
-+ PSB_DEBUG(PSB_D_INIT, _fmt, ##_arg)
-+#define PSB_DEBUG_IRQ(_fmt, _arg...) \
-+ PSB_DEBUG(PSB_D_IRQ, _fmt, ##_arg)
-+#define PSB_DEBUG_RENDER(_fmt, _arg...) \
-+ PSB_DEBUG(PSB_D_RENDER, _fmt, ##_arg)
-+#define PSB_DEBUG_PERF(_fmt, _arg...) \
-+ PSB_DEBUG(PSB_D_PERF, _fmt, ##_arg)
-+#define PSB_DEBUG_TMP(_fmt, _arg...) \
-+ PSB_DEBUG(PSB_D_TMP, _fmt, ##_arg)
-+
-+#if DRM_DEBUG_CODE
-+#define PSB_DEBUG(_flag, _fmt, _arg...) \
-+ do { \
-+ if ((_flag) & drm_psb_debug) \
-+ printk(KERN_DEBUG \
-+ "[psb:0x%02x:%s] " _fmt , _flag, \
-+ __FUNCTION__ , ##_arg); \
-+ } while (0)
-+#else
-+#define PSB_DEBUG(_fmt, _arg...) do { } while (0)
-+#endif
-+
-+#endif
-Index: libdrm-2.4.4/shared-core/psb_reg.h
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ libdrm-2.4.4/shared-core/psb_reg.h 2009-02-04 16:39:55.000000000 +0000
-@@ -0,0 +1,555 @@
-+/**************************************************************************
-+ *
-+ * Copyright (c) (2005-2007) Imagination Technologies Limited.
-+ * Copyright (c) 2007, Intel Corporation.
-+ * All Rights Reserved.
-+ *
-+ * This program is free software; you can redistribute it and/or modify it
-+ * under the terms and conditions of the GNU General Public License,
-+ * version 2, as published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope it will be useful, but WITHOUT
-+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-+ * more details.
-+ *
-+ * You should have received a copy of the GNU General Public License along with
-+ * this program; if not, write to the Free Software Foundation, Inc.,
-+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-+ *
-+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
-+ * develop this driver.
-+ *
-+ **************************************************************************/
-+/*
-+ */
-+#ifndef _PSB_REG_H_
-+#define _PSB_REG_H_
-+
-+#define PSB_CR_CLKGATECTL 0x0000
-+#define _PSB_C_CLKGATECTL_AUTO_MAN_REG (1 << 24)
-+#define _PSB_C_CLKGATECTL_USE_CLKG_SHIFT (20)
-+#define _PSB_C_CLKGATECTL_USE_CLKG_MASK (0x3 << 20)
-+#define _PSB_C_CLKGATECTL_DPM_CLKG_SHIFT (16)
-+#define _PSB_C_CLKGATECTL_DPM_CLKG_MASK (0x3 << 16)
-+#define _PSB_C_CLKGATECTL_TA_CLKG_SHIFT (12)
-+#define _PSB_C_CLKGATECTL_TA_CLKG_MASK (0x3 << 12)
-+#define _PSB_C_CLKGATECTL_TSP_CLKG_SHIFT (8)
-+#define _PSB_C_CLKGATECTL_TSP_CLKG_MASK (0x3 << 8)
-+#define _PSB_C_CLKGATECTL_ISP_CLKG_SHIFT (4)
-+#define _PSB_C_CLKGATECTL_ISP_CLKG_MASK (0x3 << 4)
-+#define _PSB_C_CLKGATECTL_2D_CLKG_SHIFT (0)
-+#define _PSB_C_CLKGATECTL_2D_CLKG_MASK (0x3 << 0)
-+#define _PSB_C_CLKGATECTL_CLKG_ENABLED (0)
-+#define _PSB_C_CLKGATECTL_CLKG_DISABLED (1)
-+#define _PSB_C_CLKGATECTL_CLKG_AUTO (2)
-+
-+#define PSB_CR_CORE_ID 0x0010
-+#define _PSB_CC_ID_ID_SHIFT (16)
-+#define _PSB_CC_ID_ID_MASK (0xFFFF << 16)
-+#define _PSB_CC_ID_CONFIG_SHIFT (0)
-+#define _PSB_CC_ID_CONFIG_MASK (0xFFFF << 0)
-+
-+#define PSB_CR_CORE_REVISION 0x0014
-+#define _PSB_CC_REVISION_DESIGNER_SHIFT (24)
-+#define _PSB_CC_REVISION_DESIGNER_MASK (0xFF << 24)
-+#define _PSB_CC_REVISION_MAJOR_SHIFT (16)
-+#define _PSB_CC_REVISION_MAJOR_MASK (0xFF << 16)
-+#define _PSB_CC_REVISION_MINOR_SHIFT (8)
-+#define _PSB_CC_REVISION_MINOR_MASK (0xFF << 8)
-+#define _PSB_CC_REVISION_MAINTENANCE_SHIFT (0)
-+#define _PSB_CC_REVISION_MAINTENANCE_MASK (0xFF << 0)
-+
-+#define PSB_CR_DESIGNER_REV_FIELD1 0x0018
-+
-+#define PSB_CR_SOFT_RESET 0x0080
-+#define _PSB_CS_RESET_TSP_RESET (1 << 6)
-+#define _PSB_CS_RESET_ISP_RESET (1 << 5)
-+#define _PSB_CS_RESET_USE_RESET (1 << 4)
-+#define _PSB_CS_RESET_TA_RESET (1 << 3)
-+#define _PSB_CS_RESET_DPM_RESET (1 << 2)
-+#define _PSB_CS_RESET_TWOD_RESET (1 << 1)
-+#define _PSB_CS_RESET_BIF_RESET (1 << 0)
-+
-+#define PSB_CR_DESIGNER_REV_FIELD2 0x001C
-+
-+#define PSB_CR_EVENT_STATUS 0x012C
-+
-+#define PSB_CR_EVENT_HOST_ENABLE 0x0130
-+
-+#define PSB_CR_EVENT_HOST_CLEAR 0x0134
-+#define _PSB_CE_MASTER_INTERRUPT (1 << 31)
-+#define _PSB_CE_TA_DPM_FAULT (1 << 28)
-+#define _PSB_CE_TWOD_COMPLETE (1 << 27)
-+#define _PSB_CE_DPM_OUT_OF_MEMORY_ZLS (1 << 25)
-+#define _PSB_CE_DPM_TA_MEM_FREE (1 << 24)
-+#define _PSB_CE_PIXELBE_END_RENDER (1 << 18)
-+#define _PSB_CE_SW_EVENT (1 << 14)
-+#define _PSB_CE_TA_FINISHED (1 << 13)
-+#define _PSB_CE_TA_TERMINATE (1 << 12)
-+#define _PSB_CE_DPM_REACHED_MEM_THRESH (1 << 3)
-+#define _PSB_CE_DPM_OUT_OF_MEMORY_GBL (1 << 2)
-+#define _PSB_CE_DPM_OUT_OF_MEMORY_MT (1 << 1)
-+#define _PSB_CE_DPM_3D_MEM_FREE (1 << 0)
-+
-+
-+#define PSB_USE_OFFSET_MASK 0x0007FFFF
-+#define PSB_USE_OFFSET_SIZE (PSB_USE_OFFSET_MASK + 1)
-+#define PSB_CR_USE_CODE_BASE0 0x0A0C
-+#define PSB_CR_USE_CODE_BASE1 0x0A10
-+#define PSB_CR_USE_CODE_BASE2 0x0A14
-+#define PSB_CR_USE_CODE_BASE3 0x0A18
-+#define PSB_CR_USE_CODE_BASE4 0x0A1C
-+#define PSB_CR_USE_CODE_BASE5 0x0A20
-+#define PSB_CR_USE_CODE_BASE6 0x0A24
-+#define PSB_CR_USE_CODE_BASE7 0x0A28
-+#define PSB_CR_USE_CODE_BASE8 0x0A2C
-+#define PSB_CR_USE_CODE_BASE9 0x0A30
-+#define PSB_CR_USE_CODE_BASE10 0x0A34
-+#define PSB_CR_USE_CODE_BASE11 0x0A38
-+#define PSB_CR_USE_CODE_BASE12 0x0A3C
-+#define PSB_CR_USE_CODE_BASE13 0x0A40
-+#define PSB_CR_USE_CODE_BASE14 0x0A44
-+#define PSB_CR_USE_CODE_BASE15 0x0A48
-+#define PSB_CR_USE_CODE_BASE(_i) (0x0A0C + ((_i) << 2))
-+#define _PSB_CUC_BASE_DM_SHIFT (25)
-+#define _PSB_CUC_BASE_DM_MASK (0x3 << 25)
-+#define _PSB_CUC_BASE_ADDR_SHIFT (0) // 1024-bit aligned address?
-+#define _PSB_CUC_BASE_ADDR_ALIGNSHIFT (7)
-+#define _PSB_CUC_BASE_ADDR_MASK (0x1FFFFFF << 0)
-+#define _PSB_CUC_DM_VERTEX (0)
-+#define _PSB_CUC_DM_PIXEL (1)
-+#define _PSB_CUC_DM_RESERVED (2)
-+#define _PSB_CUC_DM_EDM (3)
-+
-+#define PSB_CR_PDS_EXEC_BASE 0x0AB8
-+#define _PSB_CR_PDS_EXEC_BASE_ADDR_SHIFT (20) // 1MB aligned address
-+#define _PSB_CR_PDS_EXEC_BASE_ADDR_ALIGNSHIFT (20)
-+
-+#define PSB_CR_EVENT_KICKER 0x0AC4
-+#define _PSB_CE_KICKER_ADDRESS_SHIFT (4) // 128-bit aligned address
-+
-+#define PSB_CR_EVENT_KICK 0x0AC8
-+#define _PSB_CE_KICK_NOW (1 << 0)
-+
-+
-+#define PSB_CR_BIF_DIR_LIST_BASE1 0x0C38
-+
-+#define PSB_CR_BIF_CTRL 0x0C00
-+#define _PSB_CB_CTRL_CLEAR_FAULT (1 << 4)
-+#define _PSB_CB_CTRL_INVALDC (1 << 3)
-+#define _PSB_CB_CTRL_FLUSH (1 << 2)
-+
-+#define PSB_CR_BIF_INT_STAT 0x0C04
-+
-+#define PSB_CR_BIF_FAULT 0x0C08
-+#define _PSB_CBI_STAT_PF_N_RW (1 << 14)
-+#define _PSB_CBI_STAT_FAULT_SHIFT (0)
-+#define _PSB_CBI_STAT_FAULT_MASK (0x3FFF << 0)
-+#define _PSB_CBI_STAT_FAULT_CACHE (1 << 1)
-+#define _PSB_CBI_STAT_FAULT_TA (1 << 2)
-+#define _PSB_CBI_STAT_FAULT_VDM (1 << 3)
-+#define _PSB_CBI_STAT_FAULT_2D (1 << 4)
-+#define _PSB_CBI_STAT_FAULT_PBE (1 << 5)
-+#define _PSB_CBI_STAT_FAULT_TSP (1 << 6)
-+#define _PSB_CBI_STAT_FAULT_ISP (1 << 7)
-+#define _PSB_CBI_STAT_FAULT_USSEPDS (1 << 8)
-+#define _PSB_CBI_STAT_FAULT_HOST (1 << 9)
-+
-+#define PSB_CR_BIF_BANK0 0x0C78
-+
-+#define PSB_CR_BIF_BANK1 0x0C7C
-+
-+#define PSB_CR_BIF_DIR_LIST_BASE0 0x0C84
-+
-+#define PSB_CR_BIF_TWOD_REQ_BASE 0x0C88
-+#define PSB_CR_BIF_3D_REQ_BASE 0x0CAC
-+
-+#define PSB_CR_2D_SOCIF 0x0E18
-+#define _PSB_C2_SOCIF_FREESPACE_SHIFT (0)
-+#define _PSB_C2_SOCIF_FREESPACE_MASK (0xFF << 0)
-+#define _PSB_C2_SOCIF_EMPTY (0x80 << 0)
-+
-+#define PSB_CR_2D_BLIT_STATUS 0x0E04
-+#define _PSB_C2B_STATUS_BUSY (1 << 24)
-+#define _PSB_C2B_STATUS_COMPLETE_SHIFT (0)
-+#define _PSB_C2B_STATUS_COMPLETE_MASK (0xFFFFFF << 0)
-+
-+/*
-+ * 2D defs.
-+ */
-+
-+/*
-+ * 2D Slave Port Data : Block Header's Object Type
-+ */
-+
-+#define PSB_2D_CLIP_BH (0x00000000)
-+#define PSB_2D_PAT_BH (0x10000000)
-+#define PSB_2D_CTRL_BH (0x20000000)
-+#define PSB_2D_SRC_OFF_BH (0x30000000)
-+#define PSB_2D_MASK_OFF_BH (0x40000000)
-+#define PSB_2D_RESERVED1_BH (0x50000000)
-+#define PSB_2D_RESERVED2_BH (0x60000000)
-+#define PSB_2D_FENCE_BH (0x70000000)
-+#define PSB_2D_BLIT_BH (0x80000000)
-+#define PSB_2D_SRC_SURF_BH (0x90000000)
-+#define PSB_2D_DST_SURF_BH (0xA0000000)
-+#define PSB_2D_PAT_SURF_BH (0xB0000000)
-+#define PSB_2D_SRC_PAL_BH (0xC0000000)
-+#define PSB_2D_PAT_PAL_BH (0xD0000000)
-+#define PSB_2D_MASK_SURF_BH (0xE0000000)
-+#define PSB_2D_FLUSH_BH (0xF0000000)
-+
-+/*
-+ * Clip Definition block (PSB_2D_CLIP_BH)
-+ */
-+#define PSB_2D_CLIPCOUNT_MAX (1)
-+#define PSB_2D_CLIPCOUNT_MASK (0x00000000)
-+#define PSB_2D_CLIPCOUNT_CLRMASK (0xFFFFFFFF)
-+#define PSB_2D_CLIPCOUNT_SHIFT (0)
-+// clip rectangle min & max
-+#define PSB_2D_CLIP_XMAX_MASK (0x00FFF000)
-+#define PSB_2D_CLIP_XMAX_CLRMASK (0xFF000FFF)
-+#define PSB_2D_CLIP_XMAX_SHIFT (12)
-+#define PSB_2D_CLIP_XMIN_MASK (0x00000FFF)
-+#define PSB_2D_CLIP_XMIN_CLRMASK (0x00FFF000)
-+#define PSB_2D_CLIP_XMIN_SHIFT (0)
-+// clip rectangle offset
-+#define PSB_2D_CLIP_YMAX_MASK (0x00FFF000)
-+#define PSB_2D_CLIP_YMAX_CLRMASK (0xFF000FFF)
-+#define PSB_2D_CLIP_YMAX_SHIFT (12)
-+#define PSB_2D_CLIP_YMIN_MASK (0x00000FFF)
-+#define PSB_2D_CLIP_YMIN_CLRMASK (0x00FFF000)
-+#define PSB_2D_CLIP_YMIN_SHIFT (0)
-+
-+/*
-+ * Pattern Control (PSB_2D_PAT_BH)
-+ */
-+#define PSB_2D_PAT_HEIGHT_MASK (0x0000001F)
-+#define PSB_2D_PAT_HEIGHT_SHIFT (0)
-+#define PSB_2D_PAT_WIDTH_MASK (0x000003E0)
-+#define PSB_2D_PAT_WIDTH_SHIFT (5)
-+#define PSB_2D_PAT_YSTART_MASK (0x00007C00)
-+#define PSB_2D_PAT_YSTART_SHIFT (10)
-+#define PSB_2D_PAT_XSTART_MASK (0x000F8000)
-+#define PSB_2D_PAT_XSTART_SHIFT (15)
-+
-+/*
-+ * 2D Control block (PSB_2D_CTRL_BH)
-+ */
-+// Present Flags
-+#define PSB_2D_SRCCK_CTRL (0x00000001)
-+#define PSB_2D_DSTCK_CTRL (0x00000002)
-+#define PSB_2D_ALPHA_CTRL (0x00000004)
-+// Colour Key Colour (SRC/DST)
-+#define PSB_2D_CK_COL_MASK (0xFFFFFFFF)
-+#define PSB_2D_CK_COL_CLRMASK (0x00000000)
-+#define PSB_2D_CK_COL_SHIFT (0)
-+// Colour Key Mask (SRC/DST)
-+#define PSB_2D_CK_MASK_MASK (0xFFFFFFFF)
-+#define PSB_2D_CK_MASK_CLRMASK (0x00000000)
-+#define PSB_2D_CK_MASK_SHIFT (0)
-+// Alpha Control (Alpha/RGB)
-+#define PSB_2D_GBLALPHA_MASK (0x000FF000)
-+#define PSB_2D_GBLALPHA_CLRMASK (0xFFF00FFF)
-+#define PSB_2D_GBLALPHA_SHIFT (12)
-+#define PSB_2D_SRCALPHA_OP_MASK (0x00700000)
-+#define PSB_2D_SRCALPHA_OP_CLRMASK (0xFF8FFFFF)
-+#define PSB_2D_SRCALPHA_OP_SHIFT (20)
-+#define PSB_2D_SRCALPHA_OP_ONE (0x00000000)
-+#define PSB_2D_SRCALPHA_OP_SRC (0x00100000)
-+#define PSB_2D_SRCALPHA_OP_DST (0x00200000)
-+#define PSB_2D_SRCALPHA_OP_SG (0x00300000)
-+#define PSB_2D_SRCALPHA_OP_DG (0x00400000)
-+#define PSB_2D_SRCALPHA_OP_GBL (0x00500000)
-+#define PSB_2D_SRCALPHA_OP_ZERO (0x00600000)
-+#define PSB_2D_SRCALPHA_INVERT (0x00800000)
-+#define PSB_2D_SRCALPHA_INVERT_CLR (0xFF7FFFFF)
-+#define PSB_2D_DSTALPHA_OP_MASK (0x07000000)
-+#define PSB_2D_DSTALPHA_OP_CLRMASK (0xF8FFFFFF)
-+#define PSB_2D_DSTALPHA_OP_SHIFT (24)
-+#define PSB_2D_DSTALPHA_OP_ONE (0x00000000)
-+#define PSB_2D_DSTALPHA_OP_SRC (0x01000000)
-+#define PSB_2D_DSTALPHA_OP_DST (0x02000000)
-+#define PSB_2D_DSTALPHA_OP_SG (0x03000000)
-+#define PSB_2D_DSTALPHA_OP_DG (0x04000000)
-+#define PSB_2D_DSTALPHA_OP_GBL (0x05000000)
-+#define PSB_2D_DSTALPHA_OP_ZERO (0x06000000)
-+#define PSB_2D_DSTALPHA_INVERT (0x08000000)
-+#define PSB_2D_DSTALPHA_INVERT_CLR (0xF7FFFFFF)
-+
-+#define PSB_2D_PRE_MULTIPLICATION_ENABLE (0x10000000)
-+#define PSB_2D_PRE_MULTIPLICATION_CLRMASK (0xEFFFFFFF)
-+#define PSB_2D_ZERO_SOURCE_ALPHA_ENABLE (0x20000000)
-+#define PSB_2D_ZERO_SOURCE_ALPHA_CLRMASK (0xDFFFFFFF)
-+
-+/*
-+ *Source Offset (PSB_2D_SRC_OFF_BH)
-+ */
-+#define PSB_2D_SRCOFF_XSTART_MASK ((0x00000FFF) << 12)
-+#define PSB_2D_SRCOFF_XSTART_SHIFT (12)
-+#define PSB_2D_SRCOFF_YSTART_MASK (0x00000FFF)
-+#define PSB_2D_SRCOFF_YSTART_SHIFT (0)
-+
-+/*
-+ * Mask Offset (PSB_2D_MASK_OFF_BH)
-+ */
-+#define PSB_2D_MASKOFF_XSTART_MASK ((0x00000FFF) << 12)
-+#define PSB_2D_MASKOFF_XSTART_SHIFT (12)
-+#define PSB_2D_MASKOFF_YSTART_MASK (0x00000FFF)
-+#define PSB_2D_MASKOFF_YSTART_SHIFT (0)
-+
-+/*
-+ * 2D Fence (see PSB_2D_FENCE_BH): bits 0:27 are ignored
-+ */
-+
-+/*
-+ *Blit Rectangle (PSB_2D_BLIT_BH)
-+ */
-+
-+#define PSB_2D_ROT_MASK (3<<25)
-+#define PSB_2D_ROT_CLRMASK (~PSB_2D_ROT_MASK)
-+#define PSB_2D_ROT_NONE (0<<25)
-+#define PSB_2D_ROT_90DEGS (1<<25)
-+#define PSB_2D_ROT_180DEGS (2<<25)
-+#define PSB_2D_ROT_270DEGS (3<<25)
-+
-+#define PSB_2D_COPYORDER_MASK (3<<23)
-+#define PSB_2D_COPYORDER_CLRMASK (~PSB_2D_COPYORDER_MASK)
-+#define PSB_2D_COPYORDER_TL2BR (0<<23)
-+#define PSB_2D_COPYORDER_BR2TL (1<<23)
-+#define PSB_2D_COPYORDER_TR2BL (2<<23)
-+#define PSB_2D_COPYORDER_BL2TR (3<<23)
-+
-+#define PSB_2D_DSTCK_CLRMASK (0xFF9FFFFF)
-+#define PSB_2D_DSTCK_DISABLE (0x00000000)
-+#define PSB_2D_DSTCK_PASS (0x00200000)
-+#define PSB_2D_DSTCK_REJECT (0x00400000)
-+
-+#define PSB_2D_SRCCK_CLRMASK (0xFFE7FFFF)
-+#define PSB_2D_SRCCK_DISABLE (0x00000000)
-+#define PSB_2D_SRCCK_PASS (0x00080000)
-+#define PSB_2D_SRCCK_REJECT (0x00100000)
-+
-+#define PSB_2D_CLIP_ENABLE (0x00040000)
-+
-+#define PSB_2D_ALPHA_ENABLE (0x00020000)
-+
-+#define PSB_2D_PAT_CLRMASK (0xFFFEFFFF)
-+#define PSB_2D_PAT_MASK (0x00010000)
-+#define PSB_2D_USE_PAT (0x00010000)
-+#define PSB_2D_USE_FILL (0x00000000)
-+/*
-+ * Tungsten Graphics note on rop codes: If rop A and rop B are
-+ * identical, the mask surface will not be read and need not be
-+ * set up.
-+ */
-+
-+#define PSB_2D_ROP3B_MASK (0x0000FF00)
-+#define PSB_2D_ROP3B_CLRMASK (0xFFFF00FF)
-+#define PSB_2D_ROP3B_SHIFT (8)
-+// rop code A
-+#define PSB_2D_ROP3A_MASK (0x000000FF)
-+#define PSB_2D_ROP3A_CLRMASK (0xFFFFFF00)
-+#define PSB_2D_ROP3A_SHIFT (0)
-+
-+#define PSB_2D_ROP4_MASK (0x0000FFFF)
-+/*
-+ * DWORD0: (Only pass if Pattern control == Use Fill Colour)
-+ * Fill Colour RGBA8888
-+ */
-+#define PSB_2D_FILLCOLOUR_MASK (0xFFFFFFFF)
-+#define PSB_2D_FILLCOLOUR_SHIFT (0)
-+/*
-+ * DWORD1: (Always Present)
-+ * X Start (Dest)
-+ * Y Start (Dest)
-+ */
-+#define PSB_2D_DST_XSTART_MASK (0x00FFF000)
-+#define PSB_2D_DST_XSTART_CLRMASK (0xFF000FFF)
-+#define PSB_2D_DST_XSTART_SHIFT (12)
-+#define PSB_2D_DST_YSTART_MASK (0x00000FFF)
-+#define PSB_2D_DST_YSTART_CLRMASK (0xFFFFF000)
-+#define PSB_2D_DST_YSTART_SHIFT (0)
-+/*
-+ * DWORD2: (Always Present)
-+ * X Size (Dest)
-+ * Y Size (Dest)
-+ */
-+#define PSB_2D_DST_XSIZE_MASK (0x00FFF000)
-+#define PSB_2D_DST_XSIZE_CLRMASK (0xFF000FFF)
-+#define PSB_2D_DST_XSIZE_SHIFT (12)
-+#define PSB_2D_DST_YSIZE_MASK (0x00000FFF)
-+#define PSB_2D_DST_YSIZE_CLRMASK (0xFFFFF000)
-+#define PSB_2D_DST_YSIZE_SHIFT (0)
-+
-+/*
-+ * Source Surface (PSB_2D_SRC_SURF_BH)
-+ */
-+/*
-+ * WORD 0
-+ */
-+
-+#define PSB_2D_SRC_FORMAT_MASK (0x00078000)
-+#define PSB_2D_SRC_1_PAL (0x00000000)
-+#define PSB_2D_SRC_2_PAL (0x00008000)
-+#define PSB_2D_SRC_4_PAL (0x00010000)
-+#define PSB_2D_SRC_8_PAL (0x00018000)
-+#define PSB_2D_SRC_8_ALPHA (0x00020000)
-+#define PSB_2D_SRC_4_ALPHA (0x00028000)
-+#define PSB_2D_SRC_332RGB (0x00030000)
-+#define PSB_2D_SRC_4444ARGB (0x00038000)
-+#define PSB_2D_SRC_555RGB (0x00040000)
-+#define PSB_2D_SRC_1555ARGB (0x00048000)
-+#define PSB_2D_SRC_565RGB (0x00050000)
-+#define PSB_2D_SRC_0888ARGB (0x00058000)
-+#define PSB_2D_SRC_8888ARGB (0x00060000)
-+#define PSB_2D_SRC_8888UYVY (0x00068000)
-+#define PSB_2D_SRC_RESERVED (0x00070000)
-+#define PSB_2D_SRC_1555ARGB_LOOKUP (0x00078000)
-+
-+
-+#define PSB_2D_SRC_STRIDE_MASK (0x00007FFF)
-+#define PSB_2D_SRC_STRIDE_CLRMASK (0xFFFF8000)
-+#define PSB_2D_SRC_STRIDE_SHIFT (0)
-+/*
-+ * WORD 1 - Base Address
-+ */
-+#define PSB_2D_SRC_ADDR_MASK (0x0FFFFFFC)
-+#define PSB_2D_SRC_ADDR_CLRMASK (0x00000003)
-+#define PSB_2D_SRC_ADDR_SHIFT (2)
-+#define PSB_2D_SRC_ADDR_ALIGNSHIFT (2)
-+
-+/*
-+ * Pattern Surface (PSB_2D_PAT_SURF_BH)
-+ */
-+/*
-+ * WORD 0
-+ */
-+
-+#define PSB_2D_PAT_FORMAT_MASK (0x00078000)
-+#define PSB_2D_PAT_1_PAL (0x00000000)
-+#define PSB_2D_PAT_2_PAL (0x00008000)
-+#define PSB_2D_PAT_4_PAL (0x00010000)
-+#define PSB_2D_PAT_8_PAL (0x00018000)
-+#define PSB_2D_PAT_8_ALPHA (0x00020000)
-+#define PSB_2D_PAT_4_ALPHA (0x00028000)
-+#define PSB_2D_PAT_332RGB (0x00030000)
-+#define PSB_2D_PAT_4444ARGB (0x00038000)
-+#define PSB_2D_PAT_555RGB (0x00040000)
-+#define PSB_2D_PAT_1555ARGB (0x00048000)
-+#define PSB_2D_PAT_565RGB (0x00050000)
-+#define PSB_2D_PAT_0888ARGB (0x00058000)
-+#define PSB_2D_PAT_8888ARGB (0x00060000)
-+
-+#define PSB_2D_PAT_STRIDE_MASK (0x00007FFF)
-+#define PSB_2D_PAT_STRIDE_CLRMASK (0xFFFF8000)
-+#define PSB_2D_PAT_STRIDE_SHIFT (0)
-+/*
-+ * WORD 1 - Base Address
-+ */
-+#define PSB_2D_PAT_ADDR_MASK (0x0FFFFFFC)
-+#define PSB_2D_PAT_ADDR_CLRMASK (0x00000003)
-+#define PSB_2D_PAT_ADDR_SHIFT (2)
-+#define PSB_2D_PAT_ADDR_ALIGNSHIFT (2)
-+
-+/*
-+ * Destination Surface (PSB_2D_DST_SURF_BH)
-+ */
-+/*
-+ * WORD 0
-+ */
-+
-+#define PSB_2D_DST_FORMAT_MASK (0x00078000)
-+#define PSB_2D_DST_332RGB (0x00030000)
-+#define PSB_2D_DST_4444ARGB (0x00038000)
-+#define PSB_2D_DST_555RGB (0x00040000)
-+#define PSB_2D_DST_1555ARGB (0x00048000)
-+#define PSB_2D_DST_565RGB (0x00050000)
-+#define PSB_2D_DST_0888ARGB (0x00058000)
-+#define PSB_2D_DST_8888ARGB (0x00060000)
-+#define PSB_2D_DST_8888AYUV (0x00070000)
-+
-+#define PSB_2D_DST_STRIDE_MASK (0x00007FFF)
-+#define PSB_2D_DST_STRIDE_CLRMASK (0xFFFF8000)
-+#define PSB_2D_DST_STRIDE_SHIFT (0)
-+/*
-+ * WORD 1 - Base Address
-+ */
-+#define PSB_2D_DST_ADDR_MASK (0x0FFFFFFC)
-+#define PSB_2D_DST_ADDR_CLRMASK (0x00000003)
-+#define PSB_2D_DST_ADDR_SHIFT (2)
-+#define PSB_2D_DST_ADDR_ALIGNSHIFT (2)
-+
-+/*
-+ * Mask Surface (PSB_2D_MASK_SURF_BH)
-+ */
-+/*
-+ * WORD 0
-+ */
-+#define PSB_2D_MASK_STRIDE_MASK (0x00007FFF)
-+#define PSB_2D_MASK_STRIDE_CLRMASK (0xFFFF8000)
-+#define PSB_2D_MASK_STRIDE_SHIFT (0)
-+/*
-+ * WORD 1 - Base Address
-+ */
-+#define PSB_2D_MASK_ADDR_MASK (0x0FFFFFFC)
-+#define PSB_2D_MASK_ADDR_CLRMASK (0x00000003)
-+#define PSB_2D_MASK_ADDR_SHIFT (2)
-+#define PSB_2D_MASK_ADDR_ALIGNSHIFT (2)
-+
-+/*
-+ * Source Palette (PSB_2D_SRC_PAL_BH)
-+ */
-+
-+#define PSB_2D_SRCPAL_ADDR_SHIFT (0)
-+#define PSB_2D_SRCPAL_ADDR_CLRMASK (0xF0000007)
-+#define PSB_2D_SRCPAL_ADDR_MASK (0x0FFFFFF8)
-+#define PSB_2D_SRCPAL_BYTEALIGN (1024)
-+
-+/*
-+ * Pattern Palette (PSB_2D_PAT_PAL_BH)
-+ */
-+
-+#define PSB_2D_PATPAL_ADDR_SHIFT (0)
-+#define PSB_2D_PATPAL_ADDR_CLRMASK (0xF0000007)
-+#define PSB_2D_PATPAL_ADDR_MASK (0x0FFFFFF8)
-+#define PSB_2D_PATPAL_BYTEALIGN (1024)
-+
-+/*
-+ * Rop3 Codes (2 LS bytes)
-+ */
-+
-+#define PSB_2D_ROP3_SRCCOPY (0xCCCC)
-+#define PSB_2D_ROP3_PATCOPY (0xF0F0)
-+#define PSB_2D_ROP3_WHITENESS (0xFFFF)
-+#define PSB_2D_ROP3_BLACKNESS (0x0000)
-+#define PSB_2D_ROP3_SRC (0xCC)
-+#define PSB_2D_ROP3_PAT (0xF0)
-+#define PSB_2D_ROP3_DST (0xAA)
-+
-+
-+/*
-+ * Sizes.
-+ */
-+
-+#define PSB_SCENE_HW_COOKIE_SIZE 16
-+#define PSB_TA_MEM_HW_COOKIE_SIZE 16
-+
-+/*
-+ * Scene stuff.
-+ */
-+
-+#define PSB_NUM_HW_SCENES 2
-+
-+/*
-+ * Scheduler completion actions.
-+ */
-+
-+#define PSB_RASTER_BLOCK 0
-+#define PSB_RASTER 1
-+#define PSB_RETURN 2
-+#define PSB_TA 3
-+
-+
-+#endif
-Index: libdrm-2.4.4/libdrm/Makefile.am
-===================================================================
---- libdrm-2.4.4.orig/libdrm/Makefile.am 2009-02-04 16:42:01.000000000 +0000
-+++ libdrm-2.4.4/libdrm/Makefile.am 2009-02-04 16:45:06.000000000 +0000
-@@ -31,6 +31,6 @@
- libdrm_lists.h
-
- libdrmincludedir = ${includedir}
--libdrminclude_HEADERS = xf86drm.h xf86drmMode.h
-+libdrminclude_HEADERS = xf86drm.h xf86drmMode.h xf86mm.h libdrm_lists.h
-
- EXTRA_DIST = ChangeLog TODO
diff --git a/meta/packages/drm/libdrm-2.4.7/installtests.patch b/meta/packages/drm/libdrm-2.4.7/installtests.patch
new file mode 100644
index 000000000..a16c8a738
--- /dev/null
+++ b/meta/packages/drm/libdrm-2.4.7/installtests.patch
@@ -0,0 +1,39 @@
+Index: libdrm-2.4.7/tests/Makefile.am
+===================================================================
+--- libdrm-2.4.7.orig/tests/Makefile.am 2009-04-09 20:16:35.000000000 +0100
++++ libdrm-2.4.7/tests/Makefile.am 2009-04-17 12:35:14.000000000 +0100
+@@ -4,7 +4,7 @@
+
+ LDADD = $(top_builddir)/libdrm/libdrm.la
+
+-noinst_PROGRAMS = \
++bin_PROGRAMS = \
+ dristat \
+ drmstat
+
+Index: libdrm-2.4.7/tests/modeprint/Makefile.am
+===================================================================
+--- libdrm-2.4.7.orig/tests/modeprint/Makefile.am 2009-02-17 19:52:37.000000000 +0000
++++ libdrm-2.4.7/tests/modeprint/Makefile.am 2009-04-17 12:35:32.000000000 +0100
+@@ -3,7 +3,7 @@
+ -I$(top_srcdir)/libdrm/intel/ \
+ -I$(top_srcdir)/libdrm
+
+-noinst_PROGRAMS = \
++bin_PROGRAMS = \
+ modeprint
+
+ modeprint_SOURCES = \
+Index: libdrm-2.4.7/tests/modetest/Makefile.am
+===================================================================
+--- libdrm-2.4.7.orig/tests/modetest/Makefile.am 2009-02-17 19:52:37.000000000 +0000
++++ libdrm-2.4.7/tests/modetest/Makefile.am 2009-04-17 12:35:42.000000000 +0100
+@@ -4,7 +4,7 @@
+ -I$(top_srcdir)/libdrm \
+ $(CAIRO_CFLAGS)
+
+-noinst_PROGRAMS = \
++bin_PROGRAMS = \
+ modetest
+
+ modetest_SOURCES = \
diff --git a/meta/packages/drm/libdrm_2.4.4.bb b/meta/packages/drm/libdrm_2.4.4.bb
deleted file mode 100644
index 8198592f8..000000000
--- a/meta/packages/drm/libdrm_2.4.4.bb
+++ /dev/null
@@ -1,9 +0,0 @@
-SECTION = "x11/base"
-LICENSE = "MIT"
-SRC_URI = "http://dri.freedesktop.org/libdrm/libdrm-${PV}.tar.bz2 \
- file://poulsbo.patch;patch=1"
-PR = "r3"
-PROVIDES = "drm"
-DEPENDS = "libpthread-stubs"
-
-inherit autotools_stage pkgconfig
diff --git a/meta/packages/drm/libdrm_2.4.7.bb b/meta/packages/drm/libdrm_2.4.7.bb
new file mode 100644
index 000000000..f0d566893
--- /dev/null
+++ b/meta/packages/drm/libdrm_2.4.7.bb
@@ -0,0 +1,12 @@
+SECTION = "x11/base"
+LICENSE = "MIT"
+SRC_URI = "http://dri.freedesktop.org/libdrm/libdrm-${PV}.tar.bz2 \
+ file://installtests.patch;patch=1"
+PR = "r1"
+PROVIDES = "drm"
+DEPENDS = "libpthread-stubs udev cairo"
+
+PACKAGES =+ "libdrm-tests"
+FILES_libdrm-tests = "${bindir}/dr* ${bindir}/mode*"
+
+inherit autotools_stage pkgconfig
diff --git a/meta/packages/e2fsprogs/e2fsprogs-native_1.41.2.bb b/meta/packages/e2fsprogs/e2fsprogs-native_1.41.2.bb
index eaed14d93..90095d9ae 100644
--- a/meta/packages/e2fsprogs/e2fsprogs-native_1.41.2.bb
+++ b/meta/packages/e2fsprogs/e2fsprogs-native_1.41.2.bb
@@ -2,7 +2,30 @@ require e2fsprogs_${PV}.bb
inherit native
DEPENDS = "gettext-native"
+PR = "r1"
do_stage () {
- oe_runmake install
+ oe_libinstall -a -C lib libblkid ${STAGING_LIBDIR}/
+ oe_libinstall -a -C lib libe2p ${STAGING_LIBDIR}/
+ oe_libinstall -a -C lib libext2fs ${STAGING_LIBDIR}/
+ oe_libinstall -a -C lib libuuid ${STAGING_LIBDIR}/
+ install -d ${STAGING_INCDIR}/e2p
+ for h in ${e2pheaders}; do
+ install -m 0644 lib/e2p/$h ${STAGING_INCDIR}/e2p/ || die "failed to install $h"
+ done
+ install -d ${STAGING_INCDIR}/ext2fs
+ for h in ${ext2fsheaders}; do
+ install -m 0644 lib/ext2fs/$h ${STAGING_INCDIR}/ext2fs/ || die "failed to install $h"
+ done
+ install -d ${STAGING_INCDIR}/blkid
+ for h in blkid.h blkid_types.h; do
+ install -m 0644 lib/blkid/$h ${STAGING_INCDIR}/blkid/ || die "failed to install $h"
+ done
+ install -d ${STAGING_INCDIR}/uuid
+ install -m 0644 lib/uuid/uuid.h ${STAGING_INCDIR}/uuid/ || die "failed to install $h"
+
+ install -d ${STAGING_LIBDIR}/pkgconfig
+ for pc in lib/*/*.pc; do
+ install -m 0644 $pc ${STAGING_LIBDIR}/pkgconfig/ || die "failed to install $h"
+ done
}
diff --git a/meta/packages/initrdscripts/files/init-live.sh b/meta/packages/initrdscripts/files/init-live.sh
index d4e53713e..c96b1f47c 100644
--- a/meta/packages/initrdscripts/files/init-live.sh
+++ b/meta/packages/initrdscripts/files/init-live.sh
@@ -58,6 +58,7 @@ do
for i in `ls /media 2>/dev/null`; do
if [ -f /media/$i/$ROOT_IMAGE ] ; then
found="yes"
+ break
fi
done
if [ "$found" = "yes" ]; then
diff --git a/meta/packages/initrdscripts/initramfs-live-boot_1.0.bb b/meta/packages/initrdscripts/initramfs-live-boot_1.0.bb
index 2c92f7377..07d349c8f 100644
--- a/meta/packages/initrdscripts/initramfs-live-boot_1.0.bb
+++ b/meta/packages/initrdscripts/initramfs-live-boot_1.0.bb
@@ -2,7 +2,7 @@ DESCRIPTON = "A live image init script"
SRC_URI = "file://init-live.sh"
-PR = "r1"
+PR = "r2"
do_install() {
install -m 0755 ${WORKDIR}/init-live.sh ${D}/init
diff --git a/meta/packages/libnl/libnl_1.0-pre6.bb b/meta/packages/libnl/libnl_1.0-pre8.bb
index 740bc937d..0b98f896a 100644
--- a/meta/packages/libnl/libnl_1.0-pre6.bb
+++ b/meta/packages/libnl/libnl_1.0-pre8.bb
@@ -3,11 +3,11 @@ SECTION = "libs/network"
LICENSE = "LGPL"
HOMEPAGE = "http://people.suug.ch/~tgr/libnl/"
PRIORITY = "optional"
-PV = "0.99+1.0-pre6"
+PV = "0.99+1.0-pre8"
inherit autotools_stage pkgconfig
-SRC_URI= "http://people.suug.ch/~tgr/libnl/files/${PN}-1.0-pre6.tar.gz \
+SRC_URI= "http://people.suug.ch/~tgr/libnl/files/${PN}-1.0-pre8.tar.gz \
file://local-includes.patch;patch=1"
-S = "${WORKDIR}/${PN}-1.0-pre6"
+S = "${WORKDIR}/${PN}-1.0-pre8"
diff --git a/meta/packages/libproxy/libproxy/asneededfix.patch b/meta/packages/libproxy/libproxy/asneededfix.patch
new file mode 100644
index 000000000..cc22077f0
--- /dev/null
+++ b/meta/packages/libproxy/libproxy/asneededfix.patch
@@ -0,0 +1,13 @@
+Index: libproxy-0.2.3/src/lib/Makefile.am
+===================================================================
+--- libproxy-0.2.3.orig/src/lib/Makefile.am 2009-03-09 16:24:08.000000000 +0000
++++ libproxy-0.2.3/src/lib/Makefile.am 2009-03-09 16:24:13.000000000 +0000
+@@ -2,7 +2,7 @@
+ libproxy_la_SOURCES = misc.c url.c pac.c dhcp.c dns.c slp.c wpad.c proxy_factory.c config_file.c \
+ misc.h url.h pac.h dhcp.h dns.h slp.h wpad.h proxy_factory.h proxy.h config_file.h
+ libproxy_la_CFLAGS = -Wall
+-libproxy_la_LDFLAGS = -lm
++libproxy_la_LDFLAGS = -lm -ldl
+
+ include_HEADERS = proxy.h
+
diff --git a/meta/packages/libproxy/libproxy_0.2.3.bb b/meta/packages/libproxy/libproxy_0.2.3.bb
new file mode 100644
index 000000000..34172f263
--- /dev/null
+++ b/meta/packages/libproxy/libproxy_0.2.3.bb
@@ -0,0 +1,16 @@
+DESCRIPTION = "A library that provides automatic proxy configuration management"
+HOMEPAGE = "http://code.google.com/p/libproxy/"
+LICENSE = "LGPL"
+SECTION = "libs"
+
+DEPENDS = "virtual/libx11 xmu gconf-dbus"
+
+SRC_URI = "http://libproxy.googlecode.com/files/libproxy-${PV}.tar.gz \
+ file://asneededfix.patch;patch=1"
+S = "${WORKDIR}/libproxy-${PV}"
+
+inherit autotools_stage pkgconfig
+
+EXTRA_OECONF = "--without-kde --with-gnome --without-webkit --without-python --without-mozjs --without-networkmanager"
+
+FILES_${PN}-dbg += "${libdir}/libproxy/0.2.3/plugins/" \ No newline at end of file
diff --git a/meta/packages/libsoup/libsoup-2.4_2.25.91.bb b/meta/packages/libsoup/libsoup-2.4_2.25.91.bb
new file mode 100644
index 000000000..e4494cf45
--- /dev/null
+++ b/meta/packages/libsoup/libsoup-2.4_2.25.91.bb
@@ -0,0 +1,14 @@
+DESCRIPTION = "An HTTP library implementation in C"
+LICENSE = "GPL"
+SECTION = "x11/gnome/libs"
+
+DEPENDS = "glib-2.0 gnutls libxml2 libproxy sqlite3"
+
+SRC_URI = "http://ftp.gnome.org/pub/GNOME/sources/libsoup/2.25/libsoup-${PV}.tar.bz2"
+S = "${WORKDIR}/libsoup-${PV}"
+
+inherit autotools_stage pkgconfig
+
+do_stage() {
+ autotools_stage_all
+}
diff --git a/meta/packages/mesa/mesa-7.0.2/fix-host-compile.patch b/meta/packages/mesa/mesa-7.0.2/fix-host-compile.patch
deleted file mode 100644
index d24d03d37..000000000
--- a/meta/packages/mesa/mesa-7.0.2/fix-host-compile.patch
+++ /dev/null
@@ -1,30 +0,0 @@
---- /src/mesa/x86/orig-Makefile 2005-07-01 04:54:38.000000000 +0300
-+++ /src/mesa/x86/Makefile 2007-06-07 21:52:31.000000000 +0300
-@@ -5,6 +5,7 @@
-
-
- INCLUDE_DIRS = \
-+ -I/usr/include \
- -I$(TOP)/include/GL \
- -I$(TOP)/include \
- -I.. \
-@@ -13,6 +14,10 @@
- -I../glapi \
- -I../tnl
-
-+OPT_FLAGS_host = -fexpensive-optimizations -fomit-frame-pointer -frename-registers -Os
-+
-+CFLAGS_host = -Wall -Wmissing-prototypes $(OPT_FLAGS_host) $(PIC_FLAGS) $(ARCH_FLAGS) \
-+ $(DEFINES) $(ASM_FLAGS) $(X11_INCLUDES) -std=c99 -ffast-math
-
- default: gen_matypes matypes.h
-
-@@ -21,7 +26,7 @@
-
-
- gen_matypes: gen_matypes.c
-- $(CC) $(INCLUDE_DIRS) $(CFLAGS) gen_matypes.c -o gen_matypes
-+ $(CC) $(INCLUDE_DIRS) $(CFLAGS_host) gen_matypes.c -o gen_matypes
-
- # need some special rules here, unfortunately
- matypes.h: ../main/mtypes.h ../tnl/t_context.h gen_matypes
diff --git a/meta/packages/mesa/mesa-7.0.2/mklib-rpath-link.patch b/meta/packages/mesa/mesa-7.0.2/mklib-rpath-link.patch
deleted file mode 100644
index 618f5b57e..000000000
--- a/meta/packages/mesa/mesa-7.0.2/mklib-rpath-link.patch
+++ /dev/null
@@ -1,23 +0,0 @@
---- /tmp/mklib 2007-12-08 11:03:23.000000000 +0100
-+++ Mesa-7.0.2/bin/mklib 2007-12-08 11:04:02.509863000 +0100
-@@ -106,6 +106,9 @@
- -L*)
- DEPS="$DEPS $1"
- ;;
-+ -Wl*)
-+ DEPS="$DEPS $1"
-+ ;;
- -pthread)
- # this is a special case (see bugzilla 10876)
- DEPS="$DEPS $1"
---- /tmp/default 2007-12-08 11:04:17.000000000 +0100
-+++ Mesa-7.0.2/configs/default 2007-12-08 11:05:06.279863000 +0100
-@@ -76,7 +76,7 @@
- GLW_LIB_DEPS = -L$(TOP)/$(LIB_DIR) -l$(GL_LIB) $(EXTRA_LIB_PATH) -lXt -lX11
-
- # Program dependencies - specific GL/glut libraries added in Makefiles
--APP_LIB_DEPS = -lm
-+APP_LIB_DEPS = $(EXTRA_LIB_PATH) -lm
-
-
-
diff --git a/meta/packages/mesa/mesa-dri_7.2.bb b/meta/packages/mesa/mesa-dri_7.4.bb
index 43e4cb552..43e4cb552 100644
--- a/meta/packages/mesa/mesa-dri_7.2.bb
+++ b/meta/packages/mesa/mesa-dri_7.4.bb
diff --git a/meta/packages/mesa/mesa-dri_git.bb b/meta/packages/mesa/mesa-dri_git.bb
index e565ac536..2608e226b 100644
--- a/meta/packages/mesa/mesa-dri_git.bb
+++ b/meta/packages/mesa/mesa-dri_git.bb
@@ -14,7 +14,7 @@ DEFAULT_PREFERENCE = "-1"
# Netbooks have DRI support so use mesa-dri by default
DEFAULT_PREFERENCE_netbook = "1"
-SRC_URI = "git://anongit.freedesktop.org/git/mesa/mesa;protocol=git;branch=intel-2008-q3 "
+SRC_URI = "git://anongit.freedesktop.org/git/mesa/mesa;protocol=git;branch=master"
S = "${WORKDIR}/git"
PACKAGES =+ "${PN}-xprogs"
diff --git a/meta/packages/mesa/mesa-xlib_7.2.bb b/meta/packages/mesa/mesa-xlib_7.4.bb
index 069446904..069446904 100644
--- a/meta/packages/mesa/mesa-xlib_7.2.bb
+++ b/meta/packages/mesa/mesa-xlib_7.4.bb
diff --git a/meta/packages/mozilla-headless/mozilla-headless/configurefix.patch b/meta/packages/mozilla-headless/mozilla-headless/configurefix.patch
index 8a5fbbb77..e02e4af2b 100644
--- a/meta/packages/mozilla-headless/mozilla-headless/configurefix.patch
+++ b/meta/packages/mozilla-headless/mozilla-headless/configurefix.patch
@@ -1,7 +1,7 @@
-Index: git/configure.in
+Index: offscreen/configure.in
===================================================================
---- git.orig/configure.in 2009-01-27 16:11:22.000000000 +0000
-+++ git/configure.in 2009-01-27 16:16:45.000000000 +0000
+--- offscreen.orig/configure.in 2009-04-16 22:51:48.000000000 +0100
++++ offscreen/configure.in 2009-04-16 23:07:48.000000000 +0100
@@ -62,7 +62,6 @@
AC_PREREQ(2.13)
@@ -18,7 +18,7 @@ Index: git/configure.in
dnl Set the version number of the libs included with mozilla
dnl ========================================================
-@@ -137,6 +135,9 @@
+@@ -136,6 +134,9 @@
MSMANIFEST_TOOL=
@@ -28,7 +28,7 @@ Index: git/configure.in
dnl Set various checks
dnl ========================================================
MISSING_X=
-@@ -263,7 +264,7 @@
+@@ -281,7 +282,7 @@
;;
esac
@@ -37,7 +37,7 @@ Index: git/configure.in
echo "cross compiling from $host to $target"
cross_compiling=yes
-@@ -301,7 +302,7 @@
+@@ -319,7 +320,7 @@
AC_MSG_CHECKING([whether the host c compiler ($HOST_CC $HOST_CFLAGS $HOST_LDFLAGS) works])
AC_TRY_COMPILE([], [return(0);],
@@ -46,7 +46,7 @@ Index: git/configure.in
AC_MSG_ERROR([installation or configuration problem: host compiler $HOST_CC cannot create executables.]) )
CC="$HOST_CXX"
-@@ -309,7 +310,7 @@
+@@ -327,7 +328,7 @@
AC_MSG_CHECKING([whether the host c++ compiler ($HOST_CXX $HOST_CXXFLAGS $HOST_LDFLAGS) works])
AC_TRY_COMPILE([], [return(0);],
@@ -55,7 +55,7 @@ Index: git/configure.in
AC_MSG_ERROR([installation or configuration problem: host compiler $HOST_CXX cannot create executables.]) )
CC=$_SAVE_CC
-@@ -330,7 +331,7 @@
+@@ -348,7 +349,7 @@
;;
esac
@@ -64,7 +64,7 @@ Index: git/configure.in
unset ac_cv_prog_CC
AC_PROG_CC
AC_CHECK_PROGS(CXX, $CXX "${target_alias}-g++" "${target}-g++", :)
-@@ -354,37 +355,7 @@
+@@ -372,37 +373,6 @@
AC_CHECK_PROGS(STRIP, $STRIP "${target_alias}-strip" "${target}-strip", :)
AC_CHECK_PROGS(WINDRES, $WINDRES "${target_alias}-windres" "${target}-windres", :)
AC_DEFINE(CROSS_COMPILE)
@@ -72,7 +72,7 @@ Index: git/configure.in
- AC_PROG_CC
- AC_PROG_CXX
- AC_PROG_RANLIB
-- AC_PATH_PROGS(AS, $AS as, $CC)
+- MOZ_PATH_PROGS(AS, $AS as, $CC)
- AC_CHECK_PROGS(AR, ar, :)
- AC_CHECK_PROGS(LD, ld, :)
- AC_CHECK_PROGS(STRIP, strip, :)
@@ -99,11 +99,10 @@ Index: git/configure.in
- HOST_AR="$AR"
- fi
-fi
-+
GNU_AS=
GNU_LD=
-@@ -1467,6 +1438,7 @@
+@@ -1569,6 +1539,7 @@
'
dnl test that the macros actually work:
@@ -111,7 +110,7 @@ Index: git/configure.in
AC_MSG_CHECKING(that static assertion macros used in autoconf tests work)
AC_CACHE_VAL(ac_cv_static_assertion_macros_work,
[AC_LANG_SAVE
-@@ -2625,9 +2597,13 @@
+@@ -2730,9 +2701,13 @@
AC_LANG_C
AC_HEADER_STDC
AC_C_CONST
@@ -125,7 +124,7 @@ Index: git/configure.in
AC_TYPE_SIZE_T
AC_STRUCT_ST_BLKSIZE
AC_MSG_CHECKING(for siginfo_t)
-@@ -3009,19 +2985,9 @@
+@@ -3115,19 +3090,9 @@
dnl We don't want to link against libm or libpthread on Darwin since
dnl they both are just symlinks to libSystem and explicitly linking
dnl against libSystem causes issues when debugging (see bug 299601).
@@ -146,7 +145,7 @@ Index: git/configure.in
_SAVE_CFLAGS="$CFLAGS"
CFLAGS="$CFLAGS -D_GNU_SOURCE"
-@@ -6763,18 +6729,13 @@
+@@ -6931,18 +6896,13 @@
# Demangle only for debug or trace-malloc builds
MOZ_DEMANGLE_SYMBOLS=
@@ -167,7 +166,7 @@ Index: git/configure.in
dnl ========================================================
dnl =
-@@ -7288,10 +7249,7 @@
+@@ -7454,10 +7414,7 @@
dnl if no gtk/libIDL1 or gtk2/libIDL2 combination was found, fall back
dnl to either libIDL1 or libIDL2.
if test -z "$_LIBIDL_FOUND"; then
@@ -178,19 +177,21 @@ Index: git/configure.in
fi
dnl
dnl If we don't have a libIDL config program & not cross-compiling,
-@@ -7364,11 +7322,7 @@
+@@ -7529,13 +7486,7 @@
+ fi
if test -z "$SKIP_PATH_CHECKS"; then
- if test -z "${GLIB_CFLAGS}" || test -z "${GLIB_LIBS}" ; then
+-if test -z "${GLIB_CFLAGS}" || test -z "${GLIB_LIBS}" ; then
- if test "$MOZ_ENABLE_GTK2" || test "$USE_ELF_DYNSTR_GC" || test "$MOZ_ENABLE_HEADLESS"; then
PKG_CHECK_MODULES(GLIB, glib-2.0 >= 1.3.7 gobject-2.0)
- else
- AM_PATH_GLIB(${GLIB_VERSION})
- fi
- fi
+-fi
fi
-@@ -8214,10 +8168,7 @@
+ if test -z "${GLIB_GMODULE_LIBS}" -a -n "${GLIB_CONFIG}"; then
+@@ -8346,10 +8297,7 @@
HAVE_WCRTOMB
"
@@ -202,7 +203,7 @@ Index: git/configure.in
)
# Save the defines header file before autoconf removes it.
-@@ -8276,28 +8227,9 @@
+@@ -8408,31 +8356,11 @@
dnl To add new Makefiles, edit allmakefiles.sh.
dnl allmakefiles.sh sets the variable, MAKEFILES.
. ${srcdir}/allmakefiles.sh
@@ -225,14 +226,16 @@ Index: git/configure.in
-. ./conftest.sh
-rm conftest.sh
--echo $MAKEFILES > unallmakefiles
+ echo $MAKEFILES > unallmakefiles
--AC_OUTPUT($MAKEFILES)
-+echo $MAKEFILES > unallmakefiles
+ mv -f config/autoconf.mk config/autoconf.mk.orig 2> /dev/null
+-AC_OUTPUT($MAKEFILES)
+-
dnl Prevent the regeneration of cairo-features.h forcing rebuilds of gfx stuff
if test "$CAIRO_FEATURES_H"; then
-@@ -8323,14 +8255,14 @@
+ if cmp -s $CAIRO_FEATURES_H "$CAIRO_FEATURES_H".orig; then
+@@ -8458,14 +8386,14 @@
HOST_LDFLAGS="$_SUBDIR_HOST_LDFLAGS"
RC=
@@ -250,7 +253,7 @@ Index: git/configure.in
if test -z "$MOZ_DEBUG"; then
ac_configure_args="$ac_configure_args --disable-debug"
fi
-@@ -8346,8 +8278,7 @@
+@@ -8481,8 +8409,7 @@
if test -n "$USE_ARM_KUSER"; then
ac_configure_args="$ac_configure_args --with-arm-kuser"
fi
@@ -260,7 +263,7 @@ Index: git/configure.in
fi
if test -z "$MOZ_NATIVE_NSPR"; then
-@@ -8364,7 +8295,6 @@
+@@ -8499,7 +8426,6 @@
# Run the SpiderMonkey 'configure' script.
dist=$MOZ_BUILD_ROOT/dist
@@ -268,7 +271,7 @@ Index: git/configure.in
ac_configure_args="$ac_configure_args --enable-threadsafe"
if test -z "$MOZ_NATIVE_NSPR"; then
ac_configure_args="$ac_configure_args --with-nspr-cflags='$NSPR_CFLAGS'"
-@@ -8378,7 +8308,11 @@
+@@ -8513,11 +8439,14 @@
if test "$MOZ_MEMORY"; then
ac_configure_args="$ac_configure_args --enable-jemalloc"
fi
@@ -277,15 +280,18 @@ Index: git/configure.in
+AC_CONFIG_SUBDIRS(js/src)
fi # COMPILE_ENVIRONMENT && !LIBXUL_SDK_DIR
-+
+
+m4_pattern_allow(AS_BIN)
+
+AC_OUTPUT($MAKEFILES)
+
-Index: git/js/src/configure.in
+ dnl Prevent the regeneration of autoconf.mk forcing rebuilds of the world
+ dnl Needs to be at the end to respect possible changes from NSPR configure
+ if cmp -s config/autoconf.mk config/autoconf.mk.orig; then
+Index: offscreen/js/src/configure.in
===================================================================
---- git.orig/js/src/configure.in 2009-01-27 16:11:53.000000000 +0000
-+++ git/js/src/configure.in 2009-01-27 16:16:45.000000000 +0000
+--- offscreen.orig/js/src/configure.in 2009-04-16 22:51:49.000000000 +0100
++++ offscreen/js/src/configure.in 2009-04-16 23:06:42.000000000 +0100
@@ -62,7 +62,6 @@
AC_PREREQ(2.13)
@@ -313,7 +319,7 @@ Index: git/js/src/configure.in
dnl Set various checks
dnl ========================================================
MISSING_X=
-@@ -201,7 +205,7 @@
+@@ -200,7 +204,7 @@
if test "$COMPILE_ENVIRONMENT"; then
@@ -322,7 +328,7 @@ Index: git/js/src/configure.in
echo "cross compiling from $host to $target"
_SAVE_CC="$CC"
-@@ -238,7 +242,7 @@
+@@ -237,7 +241,7 @@
AC_MSG_CHECKING([whether the host c compiler ($HOST_CC $HOST_CFLAGS $HOST_LDFLAGS) works])
AC_TRY_COMPILE([], [return(0);],
@@ -331,7 +337,7 @@ Index: git/js/src/configure.in
AC_MSG_ERROR([installation or configuration problem: host compiler $HOST_CC cannot create executables.]) )
CC="$HOST_CXX"
-@@ -246,7 +250,7 @@
+@@ -245,7 +249,7 @@
AC_MSG_CHECKING([whether the host c++ compiler ($HOST_CXX $HOST_CXXFLAGS $HOST_LDFLAGS) works])
AC_TRY_COMPILE([], [return(0);],
@@ -340,7 +346,7 @@ Index: git/js/src/configure.in
AC_MSG_ERROR([installation or configuration problem: host compiler $HOST_CXX cannot create executables.]) )
CC=$_SAVE_CC
-@@ -267,7 +271,7 @@
+@@ -266,7 +270,7 @@
;;
esac
@@ -349,7 +355,7 @@ Index: git/js/src/configure.in
unset ac_cv_prog_CC
AC_PROG_CC
AC_CHECK_PROGS(CXX, $CXX "${target_alias}-g++" "${target}-g++", :)
-@@ -297,37 +301,6 @@
+@@ -296,37 +300,6 @@
dnl able to run ppc code in a translated environment, making a cross
dnl compiler appear native. So we override that here.
cross_compiling=yes
@@ -357,7 +363,7 @@ Index: git/js/src/configure.in
- AC_PROG_CC
- AC_PROG_CXX
- AC_PROG_RANLIB
-- AC_PATH_PROGS(AS, $AS as, $CC)
+- MOZ_PATH_PROGS(AS, $AS as, $CC)
- AC_CHECK_PROGS(AR, ar, :)
- AC_CHECK_PROGS(LD, ld, :)
- AC_CHECK_PROGS(STRIP, strip, :)
@@ -387,7 +393,7 @@ Index: git/js/src/configure.in
GNU_AS=
GNU_LD=
-@@ -1396,6 +1369,8 @@
+@@ -1435,6 +1408,8 @@
fi # GNU_CC
fi # COMPILE_ENVIRONMENT
@@ -396,7 +402,7 @@ Index: git/js/src/configure.in
dnl =================================================================
dnl Set up and test static assertion macros used to avoid AC_TRY_RUN,
dnl which is bad when cross compiling.
-@@ -2524,9 +2499,13 @@
+@@ -2565,9 +2540,13 @@
AC_LANG_C
AC_HEADER_STDC
AC_C_CONST
@@ -410,7 +416,7 @@ Index: git/js/src/configure.in
AC_TYPE_SIZE_T
AC_STRUCT_ST_BLKSIZE
AC_MSG_CHECKING(for siginfo_t)
-@@ -2551,7 +2530,8 @@
+@@ -2592,7 +2571,8 @@
AC_CHECK_HEADER(stdint.h)
if test "$ac_cv_header_stdint_h" = yes; then
@@ -420,7 +426,7 @@ Index: git/js/src/configure.in
else
dnl We'll figure them out for ourselves. List more likely types
dnl earlier. If we ever really encounter a size for which none of
-@@ -2937,10 +2917,7 @@
+@@ -2990,10 +2970,7 @@
;;
*)
AC_CHECK_LIB(m, atan)
@@ -432,7 +438,7 @@ Index: git/js/src/configure.in
;;
esac
-@@ -3839,6 +3816,7 @@
+@@ -3908,6 +3885,7 @@
[ --with-nspr-libs=LIBS Pass LIBS to LD when linking code that uses NSPR.
See --with-nspr-cflags for more details.],
NSPR_LIBS=$withval)
@@ -440,7 +446,7 @@ Index: git/js/src/configure.in
AC_SUBST(NSPR_CFLAGS)
AC_SUBST(NSPR_LIBS)
-@@ -4467,18 +4445,11 @@
+@@ -4542,18 +4520,11 @@
# Demangle only for debug or trace-malloc builds
MOZ_DEMANGLE_SYMBOLS=
@@ -459,7 +465,7 @@ Index: git/js/src/configure.in
dnl ========================================================
dnl =
-@@ -5161,6 +5132,8 @@
+@@ -5256,6 +5227,8 @@
done
AC_SUBST(LIBS_PATH)
@@ -468,15 +474,15 @@ Index: git/js/src/configure.in
dnl ========================================================
dnl JavaScript shell
dnl ========================================================
-@@ -5292,3 +5265,4 @@
+@@ -5396,3 +5369,4 @@
# 'js-config' in Makefile.in.
AC_MSG_RESULT(invoking make to create js-config script)
$MAKE js-config
+
-Index: git/nsprpub/configure.in
+Index: offscreen/nsprpub/configure.in
===================================================================
---- git.orig/nsprpub/configure.in 2009-01-27 16:12:40.000000000 +0000
-+++ git/nsprpub/configure.in 2009-01-27 16:16:45.000000000 +0000
+--- offscreen.orig/nsprpub/configure.in 2009-04-16 18:09:00.000000000 +0100
++++ offscreen/nsprpub/configure.in 2009-04-16 23:06:42.000000000 +0100
@@ -42,7 +42,6 @@
AC_PREREQ(2.12)
AC_INIT(config/libc_r.h)
@@ -556,7 +562,7 @@ Index: git/nsprpub/configure.in
dnl ========================================================
dnl Check for gcc -pipe support
-@@ -2254,10 +2226,7 @@
+@@ -2249,10 +2221,7 @@
*-darwin*|*-beos*)
;;
*)
@@ -568,7 +574,7 @@ Index: git/nsprpub/configure.in
esac
-@@ -2874,6 +2843,8 @@
+@@ -2869,6 +2838,8 @@
dnl pr/tests/w16gui/Makefile
dnl tools/Makefile
@@ -577,17 +583,17 @@ Index: git/nsprpub/configure.in
if test -z "$USE_PTHREADS" && test -z "$USE_BTHREADS"; then
MAKEFILES="$MAKEFILES pr/src/threads/combined/Makefile"
elif test -n "$USE_PTHREADS"; then
-@@ -2889,3 +2860,5 @@
+@@ -2884,3 +2855,5 @@
echo $MAKEFILES > unallmakefiles
AC_OUTPUT([$MAKEFILES], [chmod +x config/nspr-config])
+
+
-Index: git/toolkit/toolkit-makefiles.sh
+Index: offscreen/toolkit/toolkit-makefiles.sh
===================================================================
---- git.orig/toolkit/toolkit-makefiles.sh 2009-01-27 16:13:11.000000000 +0000
-+++ git/toolkit/toolkit-makefiles.sh 2009-01-27 16:16:45.000000000 +0000
-@@ -632,7 +632,6 @@
+--- offscreen.orig/toolkit/toolkit-makefiles.sh 2009-04-16 22:51:50.000000000 +0100
++++ offscreen/toolkit/toolkit-makefiles.sh 2009-04-16 23:06:42.000000000 +0100
+@@ -628,7 +628,6 @@
toolkit/crashreporter/client/Makefile
toolkit/crashreporter/google-breakpad/src/client/Makefile
toolkit/crashreporter/google-breakpad/src/client/mac/handler/Makefile
@@ -595,15 +601,15 @@ Index: git/toolkit/toolkit-makefiles.sh
toolkit/crashreporter/google-breakpad/src/client/windows/handler/Makefile
toolkit/crashreporter/google-breakpad/src/client/windows/sender/Makefile
toolkit/crashreporter/google-breakpad/src/common/Makefile
-Index: git/js/src/Makefile.in
+Index: offscreen/js/src/Makefile.in
===================================================================
---- git.orig/js/src/Makefile.in 2009-01-27 16:11:52.000000000 +0000
-+++ git/js/src/Makefile.in 2009-01-27 16:16:45.000000000 +0000
-@@ -513,20 +513,8 @@
+--- offscreen.orig/js/src/Makefile.in 2009-04-16 18:07:56.000000000 +0100
++++ offscreen/js/src/Makefile.in 2009-04-16 23:06:42.000000000 +0100
+@@ -507,20 +507,8 @@
export:: jsautocfg.h
--ifeq (,$(CROSS_COMPILE)$(filter-out WINNT,$(OS_ARCH)))
+-ifeq (,$(CROSS_COMPILE)$(GNU_CC)$(filter-out WINNT,$(OS_ARCH)))
jsautocfg.h:
touch $@
-else
@@ -620,7 +626,7 @@ Index: git/js/src/Makefile.in
# jscpucfg is a strange target
# Needs to be built with the host compiler but needs to include
-@@ -556,7 +544,7 @@
+@@ -550,7 +538,7 @@
echo no need to build jscpucfg $<
else
jscpucfg$(HOST_BIN_SUFFIX): jscpucfg.cpp Makefile.in
@@ -629,10 +635,10 @@ Index: git/js/src/Makefile.in
endif
endif
-Index: git/js/src/xpconnect/loader/mozJSComponentLoader.cpp
+Index: offscreen/js/src/xpconnect/loader/mozJSComponentLoader.cpp
===================================================================
---- git.orig/js/src/xpconnect/loader/mozJSComponentLoader.cpp 2009-01-27 16:11:55.000000000 +0000
-+++ git/js/src/xpconnect/loader/mozJSComponentLoader.cpp 2009-01-27 16:16:45.000000000 +0000
+--- offscreen.orig/js/src/xpconnect/loader/mozJSComponentLoader.cpp 2009-04-16 18:08:00.000000000 +0100
++++ offscreen/js/src/xpconnect/loader/mozJSComponentLoader.cpp 2009-04-16 23:06:42.000000000 +0100
@@ -47,6 +47,8 @@
#include <stdarg.h>
@@ -642,10 +648,10 @@ Index: git/js/src/xpconnect/loader/mozJSComponentLoader.cpp
#include "prlog.h"
#include "nsCOMPtr.h"
-Index: git/js/src/xpconnect/loader/mozJSSubScriptLoader.cpp
+Index: offscreen/js/src/xpconnect/loader/mozJSSubScriptLoader.cpp
===================================================================
---- git.orig/js/src/xpconnect/loader/mozJSSubScriptLoader.cpp 2009-01-27 16:11:55.000000000 +0000
-+++ git/js/src/xpconnect/loader/mozJSSubScriptLoader.cpp 2009-01-27 16:16:45.000000000 +0000
+--- offscreen.orig/js/src/xpconnect/loader/mozJSSubScriptLoader.cpp 2009-04-16 18:08:00.000000000 +0100
++++ offscreen/js/src/xpconnect/loader/mozJSSubScriptLoader.cpp 2009-04-16 23:06:42.000000000 +0100
@@ -39,6 +39,8 @@
*
* ***** END LICENSE BLOCK ***** */
@@ -655,10 +661,10 @@ Index: git/js/src/xpconnect/loader/mozJSSubScriptLoader.cpp
#if !defined(XPCONNECT_STANDALONE) && !defined(NO_SUBSCRIPT_LOADER)
#include "mozJSSubScriptLoader.h"
-Index: git/modules/lcms/include/icc34.h
+Index: offscreen/modules/lcms/include/icc34.h
===================================================================
---- git.orig/modules/lcms/include/icc34.h 2009-01-27 16:12:31.000000000 +0000
-+++ git/modules/lcms/include/icc34.h 2009-01-27 16:16:45.000000000 +0000
+--- offscreen.orig/modules/lcms/include/icc34.h 2009-04-16 18:08:47.000000000 +0100
++++ offscreen/modules/lcms/include/icc34.h 2009-04-16 23:06:42.000000000 +0100
@@ -144,7 +144,7 @@
*/
@@ -668,10 +674,10 @@ Index: git/modules/lcms/include/icc34.h
/*
June 9, 2003, Adapted for use with configure by Bob Friesenhahn
Added the stupid check for autoconf by Marti Maria.
-Index: git/toolkit/mozapps/update/src/updater/Makefile.in
+Index: offscreen/toolkit/mozapps/update/src/updater/Makefile.in
===================================================================
---- git.orig/toolkit/mozapps/update/src/updater/Makefile.in 2009-01-27 16:13:09.000000000 +0000
-+++ git/toolkit/mozapps/update/src/updater/Makefile.in 2009-01-27 16:16:45.000000000 +0000
+--- offscreen.orig/toolkit/mozapps/update/src/updater/Makefile.in 2009-04-16 18:09:35.000000000 +0100
++++ offscreen/toolkit/mozapps/update/src/updater/Makefile.in 2009-04-16 23:06:42.000000000 +0100
@@ -59,7 +59,7 @@
LIBS += \
@@ -681,10 +687,10 @@ Index: git/toolkit/mozapps/update/src/updater/Makefile.in
$(NULL)
ifeq ($(OS_ARCH),WINNT)
-Index: git/xpcom/sample/program/Makefile.in
+Index: offscreen/xpcom/sample/program/Makefile.in
===================================================================
---- git.orig/xpcom/sample/program/Makefile.in 2009-01-27 16:13:18.000000000 +0000
-+++ git/xpcom/sample/program/Makefile.in 2009-01-27 16:16:45.000000000 +0000
+--- offscreen.orig/xpcom/sample/program/Makefile.in 2009-04-16 18:09:47.000000000 +0100
++++ offscreen/xpcom/sample/program/Makefile.in 2009-04-16 23:06:42.000000000 +0100
@@ -57,7 +57,7 @@
# that the application be linked against the XPCOM dynamic library or the NSPR
# dynamic libraries.
@@ -694,10 +700,10 @@ Index: git/xpcom/sample/program/Makefile.in
$(NULL)
# Need to link with CoreFoundation on Mac
-Index: git/xpcom/tools/registry/Makefile.in
+Index: offscreen/xpcom/tools/registry/Makefile.in
===================================================================
---- git.orig/xpcom/tools/registry/Makefile.in 2009-01-27 16:13:18.000000000 +0000
-+++ git/xpcom/tools/registry/Makefile.in 2009-01-27 16:16:45.000000000 +0000
+--- offscreen.orig/xpcom/tools/registry/Makefile.in 2009-04-16 18:09:48.000000000 +0100
++++ offscreen/xpcom/tools/registry/Makefile.in 2009-04-16 23:06:42.000000000 +0100
@@ -54,7 +54,7 @@
SIMPLE_PROGRAMS = $(CPPSRCS:.cpp=$(BIN_SUFFIX))
@@ -707,10 +713,10 @@ Index: git/xpcom/tools/registry/Makefile.in
$(NULL)
# Need to link with CoreFoundation on Mac
-Index: git/xulrunner/app/Makefile.in
+Index: offscreen/xulrunner/app/Makefile.in
===================================================================
---- git.orig/xulrunner/app/Makefile.in 2009-01-27 16:13:19.000000000 +0000
-+++ git/xulrunner/app/Makefile.in 2009-01-27 16:16:45.000000000 +0000
+--- offscreen.orig/xulrunner/app/Makefile.in 2009-04-16 18:09:50.000000000 +0100
++++ offscreen/xulrunner/app/Makefile.in 2009-04-16 23:06:42.000000000 +0100
@@ -180,7 +180,7 @@
RCFLAGS += -DXULRUNNER_ICO=\"$(DIST)/branding/xulrunner.ico\" -DDOCUMENT_ICO=\"$(DIST)/branding/document.ico\"
endif
@@ -720,10 +726,10 @@ Index: git/xulrunner/app/Makefile.in
include $(topsrcdir)/config/rules.mk
-Index: git/xulrunner/stub/Makefile.in
+Index: offscreen/xulrunner/stub/Makefile.in
===================================================================
---- git.orig/xulrunner/stub/Makefile.in 2009-01-27 16:13:20.000000000 +0000
-+++ git/xulrunner/stub/Makefile.in 2009-01-27 16:16:45.000000000 +0000
+--- offscreen.orig/xulrunner/stub/Makefile.in 2009-04-16 18:09:50.000000000 +0100
++++ offscreen/xulrunner/stub/Makefile.in 2009-04-16 23:06:42.000000000 +0100
@@ -101,7 +101,7 @@
endif
endif
@@ -733,10 +739,10 @@ Index: git/xulrunner/stub/Makefile.in
include $(topsrcdir)/config/rules.mk
-Index: git/modules/plugin/test/testplugin/Makefile.in
+Index: offscreen/modules/plugin/test/testplugin/Makefile.in
===================================================================
---- git.orig/modules/plugin/test/testplugin/Makefile.in 2009-01-27 16:12:37.000000000 +0000
-+++ git/modules/plugin/test/testplugin/Makefile.in 2009-01-27 16:16:45.000000000 +0000
+--- offscreen.orig/modules/plugin/test/testplugin/Makefile.in 2009-04-16 18:08:56.000000000 +0100
++++ offscreen/modules/plugin/test/testplugin/Makefile.in 2009-04-16 23:06:42.000000000 +0100
@@ -63,9 +63,7 @@
CMMSRCS = nptest_macosx.mm
endif
@@ -745,12 +751,12 @@ Index: git/modules/plugin/test/testplugin/Makefile.in
CPPSRCS += nptest_gtk2.cpp
-endif
- ifeq ($(MOZ_WIDGET_TOOLKIT),windows)
- CPPSRCS += nptest_windows.cpp
-Index: git/xulrunner/installer/libxul-unstable.pc.in
+ ifeq ($(MOZ_WIDGET_TOOLKIT),qt)
+ CPPSRCS += nptest_qt.cpp
+Index: offscreen/xulrunner/installer/libxul-unstable.pc.in
===================================================================
---- git.orig/xulrunner/installer/libxul-unstable.pc.in 2009-01-27 18:07:55.000000000 +0000
-+++ git/xulrunner/installer/libxul-unstable.pc.in 2009-01-28 15:19:22.000000000 +0000
+--- offscreen.orig/xulrunner/installer/libxul-unstable.pc.in 2009-04-16 18:09:50.000000000 +0100
++++ offscreen/xulrunner/installer/libxul-unstable.pc.in 2009-04-16 23:06:42.000000000 +0100
@@ -8,5 +8,5 @@
Description: The Mozilla Runtime and Embedding Engine (unstable API)
Version: %MOZILLA_VERSION%
@@ -758,3 +764,12 @@ Index: git/xulrunner/installer/libxul-unstable.pc.in
-Libs: -L${sdkdir}/lib -lxpcomglue_s -lxul -lxpcom
+Libs: -L${sdkdir}/lib -lxpcomglue_s -lxul -lxpcom -lsmime3 -lnss3 -lssl3 -lnssutil3 -L${sdkdir}/../xulrunner-1.9.2a1pre
Cflags: -I${includedir}/${includetype} %WCHAR_CFLAGS%
+Index: offscreen/js/src/aclocal.m4
+===================================================================
+--- offscreen.orig/js/src/aclocal.m4 2009-04-16 18:07:56.000000000 +0100
++++ offscreen/js/src/aclocal.m4 2009-04-16 23:06:42.000000000 +0100
+@@ -9,4 +9,3 @@
+ builtin(include, build/autoconf/moznbytetype.m4)dnl
+ builtin(include, build/autoconf/mozprog.m4)dnl
+
+-MOZ_PROG_CHECKMSYS()
diff --git a/meta/packages/mozilla-headless/mozilla-headless_git.bb b/meta/packages/mozilla-headless/mozilla-headless_git.bb
index 9a016ad69..7e59ed35f 100644
--- a/meta/packages/mozilla-headless/mozilla-headless_git.bb
+++ b/meta/packages/mozilla-headless/mozilla-headless_git.bb
@@ -1,14 +1,14 @@
-SRC_URI = "git://git.o-hand.com/${PN}.git;protocol=git \
+SRC_URI = "hg://hg.mozilla.org/incubator;protocol=http;rev=c6fe23d41598;module=offscreen \
file://configurefix.patch;patch=1 \
file://jsautocfg.h \
file://mozconfig"
-PV = "0.0+git${SRCREV}"
-PR = "r8"
+PV = "0.0+hg-1.0+c6fe23d41598"
+PR = "r0"
-S = "${WORKDIR}/git"
+S = "${WORKDIR}/offscreen"
-DEPENDS = "gconf gnome-vfs pango dbus-glib alsa-lib libidl-native sqlite3"
+DEPENDS = "gconf gnome-vfs pango dbus-glib alsa-lib libidl-native sqlite3 libidl"
FILES_${PN} += "${libdir}/xulrunner-1.9.2a1pre"
FILES_${PN}-dev += "${libdir}/xulrunner-devel-1.9.2a1pre"
diff --git a/meta/packages/gnome/metacity-clutter/crosscompile.patch b/meta/packages/mutter/mutter/crosscompile.patch
index b27383b09..b27383b09 100644
--- a/meta/packages/gnome/metacity-clutter/crosscompile.patch
+++ b/meta/packages/mutter/mutter/crosscompile.patch
diff --git a/meta/packages/gnome/metacity-clutter/fix_pkgconfig.patch b/meta/packages/mutter/mutter/fix_pkgconfig.patch
index 3714383bc..3714383bc 100644
--- a/meta/packages/gnome/metacity-clutter/fix_pkgconfig.patch
+++ b/meta/packages/mutter/mutter/fix_pkgconfig.patch
diff --git a/meta/packages/gnome/metacity-clutter/nodocs.patch b/meta/packages/mutter/mutter/nodocs.patch
index 98f43be5e..98f43be5e 100644
--- a/meta/packages/gnome/metacity-clutter/nodocs.patch
+++ b/meta/packages/mutter/mutter/nodocs.patch
diff --git a/meta/packages/gnome/metacity-clutter_git.bb b/meta/packages/mutter/mutter_git.bb
index 00d9a9427..7d3a3b8c4 100644
--- a/meta/packages/gnome/metacity-clutter_git.bb
+++ b/meta/packages/mutter/mutter_git.bb
@@ -1,12 +1,12 @@
SECTION = "x11/wm"
-DESCRIPTION = "Metacity is the boring window manager for the adult in you."
+DESCRIPTION = "Metacity is the boring window manager for the adult in you. Mutter is metacity + clutter."
LICENSE = "GPL"
-DEPENDS = "startup-notification gtk+ gconf clutter-0.8 gdk-pixbuf-csource-native intltool glib-2.0-native"
+DEPENDS = "startup-notification gtk+ gconf clutter gdk-pixbuf-csource-native intltool glib-2.0-native"
PR = "r8"
PV = "2.25.1+git${SRCREV}"
inherit gnome update-alternatives
-SRC_URI = "git://git.o-hand.com/metacity-clutter.git;protocol=git;branch=clutter \
+SRC_URI = "git://git.gnome.org/mutter.git;protocol=git;branch=master \
file://nodocs.patch;patch=1 \
file://crosscompile.patch;patch=1 \
file://fix_pkgconfig.patch;patch=1"
diff --git a/meta/packages/sqlite/sqlite3_3.6.7.bb b/meta/packages/sqlite/sqlite3_3.6.10.bb
index 5075dd35b..5075dd35b 100644
--- a/meta/packages/sqlite/sqlite3_3.6.7.bb
+++ b/meta/packages/sqlite/sqlite3_3.6.10.bb
diff --git a/meta/packages/tasks/task-poky-x11-netbook.bb b/meta/packages/tasks/task-poky-x11-netbook.bb
index 61bd5833a..a5ebb5407 100644
--- a/meta/packages/tasks/task-poky-x11-netbook.bb
+++ b/meta/packages/tasks/task-poky-x11-netbook.bb
@@ -3,7 +3,7 @@
#
DESCRIPTION = "Netbook Tasks for Poky"
-PR = "r0"
+PR = "r1"
PACKAGES = "\
task-poky-x11-netbook \
@@ -17,7 +17,7 @@ ALLOW_EMPTY = "1"
NETWORK_MANAGER ?= "networkmanager-applet"
RDEPENDS_task-poky-x11-netbook = "\
- metacity-clutter \
+ mutter \
matchbox-desktop \
matchbox-session-netbook \
matchbox-config-gtk \
diff --git a/meta/packages/xorg-driver/xf86-input-evdev_2.1.1.bb b/meta/packages/xorg-driver/xf86-input-evdev_2.2.1.bb
index d1fb0f000..d1fb0f000 100644
--- a/meta/packages/xorg-driver/xf86-input-evdev_2.1.1.bb
+++ b/meta/packages/xorg-driver/xf86-input-evdev_2.2.1.bb
diff --git a/meta/packages/xorg-driver/xf86-input-synaptics_0.99.3.bb b/meta/packages/xorg-driver/xf86-input-synaptics_1.1.0.bb
index f554141c9..f554141c9 100644
--- a/meta/packages/xorg-driver/xf86-input-synaptics_0.99.3.bb
+++ b/meta/packages/xorg-driver/xf86-input-synaptics_1.1.0.bb
diff --git a/meta/packages/xorg-driver/xf86-video-intel-dri2_git.bb b/meta/packages/xorg-driver/xf86-video-intel-dri2_git.bb
deleted file mode 100644
index ca9334625..000000000
--- a/meta/packages/xorg-driver/xf86-video-intel-dri2_git.bb
+++ /dev/null
@@ -1,27 +0,0 @@
-require xf86-video-common.inc
-
-DESCRIPTION = "X.Org X server -- Intel i8xx, i9xx display driver"
-DEPENDS += "virtual/libx11 libxvmc drm dri2proto glproto \
- virtual/libgl xineramaproto libpciaccess"
-PROVIDES = "xf86-video-intel"
-
-DEFAULT_PREFERENCE = "-1"
-
-PE = "1"
-PR = "r2"
-PV = "2.4.97.0+git${SRCREV}"
-
-FILESPATH = "${FILE_DIRNAME}/xf86-video-intel"
-
-SRC_URI = "git://anongit.freedesktop.org/git/xorg/driver/xf86-video-intel;protocol=git;branch=dri2 \
- file://002_avoid_duplicate_SaveHWState.patch;patch=1 \
- file://004_reduce_driver_boottime.patch;patch=1 \
- file://005_disable_sdvo_TV_port_restoreHW.patch;patch=1 \
- file://006_disable_check_lvds_panelpower_status.patch;patch=1"
-
-S = "${WORKDIR}/git"
-
-COMPATIBLE_HOST = '(i.86.*-linux)'
-
-EXTRA_OECONF = "--enable-dri --disable-static"
-
diff --git a/meta/packages/xorg-driver/xf86-video-intel_2.6.0.bb b/meta/packages/xorg-driver/xf86-video-intel_2.7.0.bb
index 30e713538..30e713538 100644
--- a/meta/packages/xorg-driver/xf86-video-intel_2.6.0.bb
+++ b/meta/packages/xorg-driver/xf86-video-intel_2.7.0.bb
diff --git a/meta/packages/xorg-lib/libice_1.0.4.bb b/meta/packages/xorg-lib/libice_1.0.4.bb
index e4a655739..afe405a55 100644
--- a/meta/packages/xorg-lib/libice_1.0.4.bb
+++ b/meta/packages/xorg-lib/libice_1.0.4.bb
@@ -7,3 +7,5 @@ PR = "r1"
PE = "1"
XORG_PN = "libICE"
+
+BBCLASSEXTEND = "native"
diff --git a/meta/packages/xorg-lib/libsm_1.1.0.bb b/meta/packages/xorg-lib/libsm_1.1.0.bb
index 32bee5bd8..72a1c726d 100644
--- a/meta/packages/xorg-lib/libsm_1.1.0.bb
+++ b/meta/packages/xorg-lib/libsm_1.1.0.bb
@@ -6,3 +6,5 @@ PR = "r1"
PE = "1"
XORG_PN = "libSM"
+
+BBCLASSEXTEND = "native"
diff --git a/meta/packages/xorg-lib/libxt_1.0.5.bb b/meta/packages/xorg-lib/libxt_1.0.5.bb
index 504779933..b0b983f67 100644
--- a/meta/packages/xorg-lib/libxt_1.0.5.bb
+++ b/meta/packages/xorg-lib/libxt_1.0.5.bb
@@ -8,6 +8,8 @@ PE = "1"
XORG_PN = "libXt"
+BBCLASSEXTEND = "native"
+
EXTRA_OECONF += "--disable-install-makestrs --disable-xkb"
do_compile() {
diff --git a/meta/packages/xorg-xserver/xserver-xf86-dri-lite.inc b/meta/packages/xorg-xserver/xserver-xf86-dri-lite.inc
index 227be4ec0..3ffd4a4d1 100644
--- a/meta/packages/xorg-xserver/xserver-xf86-dri-lite.inc
+++ b/meta/packages/xorg-xserver/xserver-xf86-dri-lite.inc
@@ -2,7 +2,7 @@ require xserver-xf86-common.inc
PROTO_DEPS = "randrproto renderproto fixesproto damageproto xextproto xproto xf86dgaproto xf86miscproto xf86rushproto xf86vidmodeproto xf86bigfontproto compositeproto recordproto resourceproto videoproto scrnsaverproto evieext trapproto xineramaproto fontsproto kbproto inputproto bigreqsproto xcmiscproto glproto"
-LIB_DEPS = "pixman mesa-dri libxfont xtrans libxau libxext libxdmcp libdrm libxkbfile libpciaccess openssl"
+LIB_DEPS = "pixman virtual/libgl libxfont xtrans libxau libxext libxdmcp libdrm libxkbfile libpciaccess openssl"
DEPENDS = "${PROTO_DEPS} ${LIB_DEPS}"