summaryrefslogtreecommitdiff
path: root/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-img-graphics-driver-5.3.0.0007.patch
diff options
context:
space:
mode:
Diffstat (limited to 'meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-img-graphics-driver-5.3.0.0007.patch')
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-img-graphics-driver-5.3.0.0007.patch106773
1 files changed, 106773 insertions, 0 deletions
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-img-graphics-driver-5.3.0.0007.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-img-graphics-driver-5.3.0.0007.patch
new file mode 100644
index 000000000..65048c8ff
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-img-graphics-driver-5.3.0.0007.patch
@@ -0,0 +1,106773 @@
+From e6fbc1d68e24c1526e9e30d1d2381a77697f3b1d Mon Sep 17 00:00:00 2001
+From: Prajwal Mohan <prajwal.karur.mohan@intel.com>
+Date: Thu, 13 May 2010 14:50:27 -0700
+Subject: [PATCH] IMG graphics driver consolidation patch
+
+Signed-off-by: Prajwal Mohan <prajwal.karur.mohan@intel.com>
+Patch-mainline: 2.6.35?
+---
+ drivers/gpu/drm/Kconfig | 2 +
+ drivers/gpu/drm/Makefile | 3 +-
+ drivers/gpu/drm/drm_crtc.c | 2 +
+ drivers/gpu/drm/drm_drv.c | 11 +-
+ drivers/gpu/drm/drm_global.c | 107 +
+ drivers/gpu/drm/drm_irq.c | 27 +
+ drivers/gpu/drm/mrst/Kconfig | 220 ++
+ drivers/gpu/drm/mrst/Makefile | 169 +
+ drivers/gpu/drm/mrst/drv/lnc_topaz.c | 714 ++++
+ drivers/gpu/drm/mrst/drv/lnc_topaz.h | 925 ++++++
+ drivers/gpu/drm/mrst/drv/lnc_topazinit.c | 2051 ++++++++++++
+ drivers/gpu/drm/mrst/drv/msvdx_power.c | 164 +
+ drivers/gpu/drm/mrst/drv/msvdx_power.h | 48 +
+ drivers/gpu/drm/mrst/drv/psb_bl.c | 260 ++
+ drivers/gpu/drm/mrst/drv/psb_buffer.c | 379 +++
+ drivers/gpu/drm/mrst/drv/psb_dpst.c | 254 ++
+ drivers/gpu/drm/mrst/drv/psb_dpst.h | 98 +
+ drivers/gpu/drm/mrst/drv/psb_drm.h | 634 ++++
+ drivers/gpu/drm/mrst/drv/psb_drv.c | 2218 +++++++++++++
+ drivers/gpu/drm/mrst/drv/psb_drv.h | 1025 ++++++
+ drivers/gpu/drm/mrst/drv/psb_fb.c | 1817 +++++++++++
+ drivers/gpu/drm/mrst/drv/psb_fb.h | 49 +
+ drivers/gpu/drm/mrst/drv/psb_fence.c | 158 +
+ drivers/gpu/drm/mrst/drv/psb_gtt.c | 1040 ++++++
+ drivers/gpu/drm/mrst/drv/psb_gtt.h | 111 +
+ drivers/gpu/drm/mrst/drv/psb_hotplug.c | 425 +++
+ drivers/gpu/drm/mrst/drv/psb_hotplug.h | 90 +
+ drivers/gpu/drm/mrst/drv/psb_intel_bios.c | 305 ++
+ drivers/gpu/drm/mrst/drv/psb_intel_bios.h | 430 +++
+ drivers/gpu/drm/mrst/drv/psb_intel_display.c | 2538 +++++++++++++++
+ drivers/gpu/drm/mrst/drv/psb_intel_display.h | 25 +
+ drivers/gpu/drm/mrst/drv/psb_intel_drv.h | 283 ++
+ drivers/gpu/drm/mrst/drv/psb_intel_dsi.c | 2450 ++++++++++++++
+ drivers/gpu/drm/mrst/drv/psb_intel_dsi_aava.c | 996 ++++++
+ drivers/gpu/drm/mrst/drv/psb_intel_i2c.c | 172 +
+ drivers/gpu/drm/mrst/drv/psb_intel_lvds.c | 1385 ++++++++
+ drivers/gpu/drm/mrst/drv/psb_intel_modes.c | 77 +
+ drivers/gpu/drm/mrst/drv/psb_intel_reg.h | 1099 +++++++
+ drivers/gpu/drm/mrst/drv/psb_intel_sdvo.c | 1408 ++++++++
+ drivers/gpu/drm/mrst/drv/psb_intel_sdvo_regs.h | 338 ++
+ drivers/gpu/drm/mrst/drv/psb_mmu.c | 1010 ++++++
+ drivers/gpu/drm/mrst/drv/psb_msvdx.c | 1063 ++++++
+ drivers/gpu/drm/mrst/drv/psb_msvdx.h | 610 ++++
+ drivers/gpu/drm/mrst/drv/psb_msvdxinit.c | 770 +++++
+ drivers/gpu/drm/mrst/drv/psb_pvr_glue.c | 74 +
+ drivers/gpu/drm/mrst/drv/psb_pvr_glue.h | 26 +
+ drivers/gpu/drm/mrst/drv/psb_reg.h | 570 ++++
+ drivers/gpu/drm/mrst/drv/psb_reset.c | 209 ++
+ drivers/gpu/drm/mrst/drv/psb_schedule.c | 70 +
+ drivers/gpu/drm/mrst/drv/psb_schedule.h | 81 +
+ drivers/gpu/drm/mrst/drv/psb_setup.c | 35 +
+ drivers/gpu/drm/mrst/drv/psb_sgx.c | 929 ++++++
+ drivers/gpu/drm/mrst/drv/psb_sgx.h | 32 +
+ drivers/gpu/drm/mrst/drv/psb_socket.c | 376 +++
+ drivers/gpu/drm/mrst/drv/psb_ttm_glue.c | 344 ++
+ drivers/gpu/drm/mrst/drv/psb_umevents.c | 485 +++
+ drivers/gpu/drm/mrst/drv/psb_umevents.h | 154 +
+ drivers/gpu/drm/mrst/drv/topaz_power.c | 173 +
+ drivers/gpu/drm/mrst/drv/topaz_power.h | 53 +
+ drivers/gpu/drm/mrst/drv/ttm/ttm_agp_backend.c | 144 +
+ drivers/gpu/drm/mrst/drv/ttm/ttm_bo.c | 1729 ++++++++++
+ drivers/gpu/drm/mrst/drv/ttm/ttm_bo_api.h | 573 ++++
+ drivers/gpu/drm/mrst/drv/ttm/ttm_bo_driver.h | 862 +++++
+ drivers/gpu/drm/mrst/drv/ttm/ttm_bo_util.c | 546 ++++
+ drivers/gpu/drm/mrst/drv/ttm/ttm_bo_vm.c | 429 +++
+ drivers/gpu/drm/mrst/drv/ttm/ttm_execbuf_util.c | 108 +
+ drivers/gpu/drm/mrst/drv/ttm/ttm_execbuf_util.h | 103 +
+ drivers/gpu/drm/mrst/drv/ttm/ttm_fence.c | 607 ++++
+ drivers/gpu/drm/mrst/drv/ttm/ttm_fence_api.h | 272 ++
+ drivers/gpu/drm/mrst/drv/ttm/ttm_fence_driver.h | 302 ++
+ drivers/gpu/drm/mrst/drv/ttm/ttm_fence_user.c | 238 ++
+ drivers/gpu/drm/mrst/drv/ttm/ttm_fence_user.h | 140 +
+ drivers/gpu/drm/mrst/drv/ttm/ttm_lock.c | 155 +
+ drivers/gpu/drm/mrst/drv/ttm/ttm_lock.h | 176 +
+ drivers/gpu/drm/mrst/drv/ttm/ttm_memory.c | 228 ++
+ drivers/gpu/drm/mrst/drv/ttm/ttm_memory.h | 147 +
+ drivers/gpu/drm/mrst/drv/ttm/ttm_object.c | 440 +++
+ drivers/gpu/drm/mrst/drv/ttm/ttm_object.h | 262 ++
+ drivers/gpu/drm/mrst/drv/ttm/ttm_pat_compat.c | 164 +
+ drivers/gpu/drm/mrst/drv/ttm/ttm_pat_compat.h | 34 +
+ .../gpu/drm/mrst/drv/ttm/ttm_placement_common.h | 91 +
+ drivers/gpu/drm/mrst/drv/ttm/ttm_placement_user.c | 468 +++
+ drivers/gpu/drm/mrst/drv/ttm/ttm_placement_user.h | 252 ++
+ drivers/gpu/drm/mrst/drv/ttm/ttm_regman.h | 67 +
+ drivers/gpu/drm/mrst/drv/ttm/ttm_tt.c | 653 ++++
+ drivers/gpu/drm/mrst/drv/ttm/ttm_userobj_api.h | 72 +
+ drivers/gpu/drm/mrst/pvr/COPYING | 351 ++
+ drivers/gpu/drm/mrst/pvr/INSTALL | 76 +
+ drivers/gpu/drm/mrst/pvr/README | 48 +
+ drivers/gpu/drm/mrst/pvr/eurasiacon/.gitignore | 6 +
+ drivers/gpu/drm/mrst/pvr/include4/dbgdrvif.h | 298 ++
+ drivers/gpu/drm/mrst/pvr/include4/img_defs.h | 108 +
+ drivers/gpu/drm/mrst/pvr/include4/img_types.h | 128 +
+ drivers/gpu/drm/mrst/pvr/include4/ioctldef.h | 98 +
+ drivers/gpu/drm/mrst/pvr/include4/pdumpdefs.h | 99 +
+ drivers/gpu/drm/mrst/pvr/include4/pvr_debug.h | 127 +
+ drivers/gpu/drm/mrst/pvr/include4/pvrmodule.h | 31 +
+ drivers/gpu/drm/mrst/pvr/include4/pvrversion.h | 38 +
+ drivers/gpu/drm/mrst/pvr/include4/regpaths.h | 43 +
+ drivers/gpu/drm/mrst/pvr/include4/services.h | 872 +++++
+ drivers/gpu/drm/mrst/pvr/include4/servicesext.h | 648 ++++
+ drivers/gpu/drm/mrst/pvr/include4/sgx_options.h | 224 ++
+ drivers/gpu/drm/mrst/pvr/include4/sgxapi_km.h | 323 ++
+ drivers/gpu/drm/mrst/pvr/include4/sgxscript.h | 81 +
+ .../3rdparty/linux_framebuffer_mrst/.gitignore | 6 +
+ .../linux_framebuffer_mrst/makefile.linux.common | 41 +
+ .../3rdparty/linux_framebuffer_mrst/mrstlfb.h | 295 ++
+ .../linux_framebuffer_mrst/mrstlfb_displayclass.c | 2056 ++++++++++++
+ .../linux_framebuffer_mrst/mrstlfb_linux.c | 206 ++
+ .../services4/include/env/linux/pvr_drm_shared.h | 54 +
+ .../drm/mrst/pvr/services4/include/kernelbuffer.h | 60 +
+ .../drm/mrst/pvr/services4/include/kerneldisplay.h | 153 +
+ .../drm/mrst/pvr/services4/include/pvr_bridge.h | 1383 ++++++++
+ .../drm/mrst/pvr/services4/include/pvr_bridge_km.h | 288 ++
+ .../gpu/drm/mrst/pvr/services4/include/pvrmmap.h | 36 +
+ .../drm/mrst/pvr/services4/include/servicesint.h | 266 ++
+ .../drm/mrst/pvr/services4/include/sgx_bridge.h | 477 +++
+ .../drm/mrst/pvr/services4/include/sgx_mkif_km.h | 334 ++
+ .../gpu/drm/mrst/pvr/services4/include/sgxinfo.h | 288 ++
+ .../mrst/pvr/services4/srvkm/bridged/.gitignore | 5 +
+ .../services4/srvkm/bridged/bridged_pvr_bridge.c | 3426 ++++++++++++++++++++
+ .../services4/srvkm/bridged/bridged_pvr_bridge.h | 231 ++
+ .../pvr/services4/srvkm/bridged/bridged_support.c | 85 +
+ .../pvr/services4/srvkm/bridged/bridged_support.h | 43 +
+ .../srvkm/bridged/sgx/bridged_sgx_bridge.c | 2511 ++++++++++++++
+ .../srvkm/bridged/sgx/bridged_sgx_bridge.h | 42 +
+ .../drm/mrst/pvr/services4/srvkm/common/.gitignore | 5 +
+ .../pvr/services4/srvkm/common/buffer_manager.c | 2036 ++++++++++++
+ .../mrst/pvr/services4/srvkm/common/deviceclass.c | 1937 +++++++++++
+ .../mrst/pvr/services4/srvkm/common/devicemem.c | 1448 +++++++++
+ .../drm/mrst/pvr/services4/srvkm/common/handle.c | 1547 +++++++++
+ .../gpu/drm/mrst/pvr/services4/srvkm/common/hash.c | 463 +++
+ .../drm/mrst/pvr/services4/srvkm/common/lists.c | 99 +
+ .../gpu/drm/mrst/pvr/services4/srvkm/common/mem.c | 151 +
+ .../mrst/pvr/services4/srvkm/common/mem_debug.c | 250 ++
+ .../drm/mrst/pvr/services4/srvkm/common/metrics.c | 160 +
+ .../mrst/pvr/services4/srvkm/common/pdump_common.c | 1723 ++++++++++
+ .../drm/mrst/pvr/services4/srvkm/common/perproc.c | 283 ++
+ .../drm/mrst/pvr/services4/srvkm/common/power.c | 818 +++++
+ .../drm/mrst/pvr/services4/srvkm/common/pvrsrv.c | 1195 +++++++
+ .../drm/mrst/pvr/services4/srvkm/common/queue.c | 1137 +++++++
+ .../gpu/drm/mrst/pvr/services4/srvkm/common/ra.c | 1871 +++++++++++
+ .../drm/mrst/pvr/services4/srvkm/common/resman.c | 717 ++++
+ .../pvr/services4/srvkm/devices/sgx/.gitignore | 5 +
+ .../drm/mrst/pvr/services4/srvkm/devices/sgx/mmu.c | 2776 ++++++++++++++++
+ .../drm/mrst/pvr/services4/srvkm/devices/sgx/mmu.h | 139 +
+ .../drm/mrst/pvr/services4/srvkm/devices/sgx/pb.c | 458 +++
+ .../services4/srvkm/devices/sgx/sgx_bridge_km.h | 147 +
+ .../pvr/services4/srvkm/devices/sgx/sgxconfig.h | 134 +
+ .../pvr/services4/srvkm/devices/sgx/sgxinfokm.h | 352 ++
+ .../mrst/pvr/services4/srvkm/devices/sgx/sgxinit.c | 2218 +++++++++++++
+ .../mrst/pvr/services4/srvkm/devices/sgx/sgxkick.c | 744 +++++
+ .../pvr/services4/srvkm/devices/sgx/sgxpower.c | 453 +++
+ .../pvr/services4/srvkm/devices/sgx/sgxreset.c | 489 +++
+ .../pvr/services4/srvkm/devices/sgx/sgxtransfer.c | 543 ++++
+ .../pvr/services4/srvkm/devices/sgx/sgxutils.c | 928 ++++++
+ .../pvr/services4/srvkm/devices/sgx/sgxutils.h | 99 +
+ .../mrst/pvr/services4/srvkm/env/linux/.gitignore | 5 +
+ .../mrst/pvr/services4/srvkm/env/linux/env_data.h | 66 +
+ .../pvr/services4/srvkm/env/linux/env_perproc.h | 56 +
+ .../drm/mrst/pvr/services4/srvkm/env/linux/event.c | 273 ++
+ .../drm/mrst/pvr/services4/srvkm/env/linux/event.h | 32 +
+ .../mrst/pvr/services4/srvkm/env/linux/linkage.h | 61 +
+ .../drm/mrst/pvr/services4/srvkm/env/linux/lock.h | 32 +
+ .../drm/mrst/pvr/services4/srvkm/env/linux/mm.c | 2360 ++++++++++++++
+ .../drm/mrst/pvr/services4/srvkm/env/linux/mm.h | 331 ++
+ .../drm/mrst/pvr/services4/srvkm/env/linux/mmap.c | 1148 +++++++
+ .../drm/mrst/pvr/services4/srvkm/env/linux/mmap.h | 107 +
+ .../mrst/pvr/services4/srvkm/env/linux/module.c | 765 +++++
+ .../drm/mrst/pvr/services4/srvkm/env/linux/mutex.c | 136 +
+ .../drm/mrst/pvr/services4/srvkm/env/linux/mutex.h | 70 +
+ .../mrst/pvr/services4/srvkm/env/linux/mutils.c | 133 +
+ .../mrst/pvr/services4/srvkm/env/linux/mutils.h | 101 +
+ .../mrst/pvr/services4/srvkm/env/linux/osfunc.c | 2564 +++++++++++++++
+ .../mrst/pvr/services4/srvkm/env/linux/osperproc.c | 113 +
+ .../drm/mrst/pvr/services4/srvkm/env/linux/pdump.c | 662 ++++
+ .../pvr/services4/srvkm/env/linux/private_data.h | 67 +
+ .../drm/mrst/pvr/services4/srvkm/env/linux/proc.c | 970 ++++++
+ .../drm/mrst/pvr/services4/srvkm/env/linux/proc.h | 115 +
+ .../pvr/services4/srvkm/env/linux/pvr_bridge_k.c | 651 ++++
+ .../mrst/pvr/services4/srvkm/env/linux/pvr_debug.c | 426 +++
+ .../mrst/pvr/services4/srvkm/env/linux/pvr_drm.c | 310 ++
+ .../mrst/pvr/services4/srvkm/env/linux/pvr_drm.h | 80 +
+ .../mrst/pvr/services4/srvkm/hwdefs/sgx535defs.h | 637 ++++
+ .../drm/mrst/pvr/services4/srvkm/hwdefs/sgxdefs.h | 82 +
+ .../mrst/pvr/services4/srvkm/hwdefs/sgxerrata.h | 308 ++
+ .../pvr/services4/srvkm/hwdefs/sgxfeaturedefs.h | 163 +
+ .../drm/mrst/pvr/services4/srvkm/hwdefs/sgxmmu.h | 79 +
+ .../pvr/services4/srvkm/include/buffer_manager.h | 213 ++
+ .../drm/mrst/pvr/services4/srvkm/include/device.h | 278 ++
+ .../drm/mrst/pvr/services4/srvkm/include/handle.h | 382 +++
+ .../drm/mrst/pvr/services4/srvkm/include/hash.h | 73 +
+ .../drm/mrst/pvr/services4/srvkm/include/lists.h | 176 +
+ .../drm/mrst/pvr/services4/srvkm/include/metrics.h | 130 +
+ .../drm/mrst/pvr/services4/srvkm/include/osfunc.h | 487 +++
+ .../mrst/pvr/services4/srvkm/include/osperproc.h | 76 +
+ .../mrst/pvr/services4/srvkm/include/pdump_km.h | 451 +++
+ .../pvr/services4/srvkm/include/pdump_osfunc.h | 137 +
+ .../drm/mrst/pvr/services4/srvkm/include/perproc.h | 110 +
+ .../drm/mrst/pvr/services4/srvkm/include/power.h | 133 +
+ .../drm/mrst/pvr/services4/srvkm/include/queue.h | 119 +
+ .../gpu/drm/mrst/pvr/services4/srvkm/include/ra.h | 155 +
+ .../drm/mrst/pvr/services4/srvkm/include/resman.h | 113 +
+ .../pvr/services4/srvkm/include/services_headers.h | 49 +
+ .../drm/mrst/pvr/services4/srvkm/include/srvkm.h | 69 +
+ .../mrst/pvr/services4/system/include/syscommon.h | 217 ++
+ .../pvr/services4/system/moorestown/.gitignore | 5 +
+ .../pvr/services4/system/moorestown/oemfuncs.h | 72 +
+ .../pvr/services4/system/moorestown/ospm_power.c | 479 +++
+ .../pvr/services4/system/moorestown/ospm_power.h | 79 +
+ .../system/moorestown/sys_pvr_drm_export.c | 135 +
+ .../system/moorestown/sys_pvr_drm_export.h | 87 +
+ .../system/moorestown/sys_pvr_drm_import.h | 45 +
+ .../pvr/services4/system/moorestown/sysconfig.c | 1022 ++++++
+ .../pvr/services4/system/moorestown/sysconfig.h | 139 +
+ .../mrst/pvr/services4/system/moorestown/sysinfo.h | 43 +
+ .../mrst/pvr/services4/system/moorestown/sysirq.c | 565 ++++
+ .../mrst/pvr/services4/system/moorestown/sysirq.h | 49 +
+ .../pvr/services4/system/moorestown/syslocal.h | 82 +
+ .../pvr/services4/system/moorestown/sysutils.c | 30 +
+ .../mrst/pvr/tools/intern/debug/client/linuxsrv.h | 48 +
+ .../tools/intern/debug/dbgdriv/common/dbgdriv.c | 2075 ++++++++++++
+ .../tools/intern/debug/dbgdriv/common/dbgdriv.h | 116 +
+ .../tools/intern/debug/dbgdriv/common/hostfunc.h | 58 +
+ .../pvr/tools/intern/debug/dbgdriv/common/hotkey.c | 135 +
+ .../pvr/tools/intern/debug/dbgdriv/common/hotkey.h | 60 +
+ .../pvr/tools/intern/debug/dbgdriv/common/ioctl.c | 371 +++
+ .../pvr/tools/intern/debug/dbgdriv/common/ioctl.h | 87 +
+ .../tools/intern/debug/dbgdriv/linux/hostfunc.c | 302 ++
+ .../intern/debug/dbgdriv/linux/kbuild/Makefile | 35 +
+ .../pvr/tools/intern/debug/dbgdriv/linux/main.c | 298 ++
+ .../debug/dbgdriv/linux/makefile.linux.common | 40 +
+ include/drm/drmP.h | 22 +
+ include/drm/drm_mode.h | 2 +
+ include/linux/backlight.h | 3 +
+ 235 files changed, 104731 insertions(+), 2 deletions(-)
+ create mode 100644 drivers/gpu/drm/drm_global.c
+ create mode 100644 drivers/gpu/drm/mrst/Kconfig
+ create mode 100644 drivers/gpu/drm/mrst/Makefile
+ create mode 100644 drivers/gpu/drm/mrst/drv/lnc_topaz.c
+ create mode 100644 drivers/gpu/drm/mrst/drv/lnc_topaz.h
+ create mode 100644 drivers/gpu/drm/mrst/drv/lnc_topazinit.c
+ create mode 100644 drivers/gpu/drm/mrst/drv/msvdx_power.c
+ create mode 100644 drivers/gpu/drm/mrst/drv/msvdx_power.h
+ create mode 100644 drivers/gpu/drm/mrst/drv/psb_bl.c
+ create mode 100644 drivers/gpu/drm/mrst/drv/psb_buffer.c
+ create mode 100644 drivers/gpu/drm/mrst/drv/psb_dpst.c
+ create mode 100644 drivers/gpu/drm/mrst/drv/psb_dpst.h
+ create mode 100644 drivers/gpu/drm/mrst/drv/psb_drm.h
+ create mode 100644 drivers/gpu/drm/mrst/drv/psb_drv.c
+ create mode 100644 drivers/gpu/drm/mrst/drv/psb_drv.h
+ create mode 100644 drivers/gpu/drm/mrst/drv/psb_fb.c
+ create mode 100644 drivers/gpu/drm/mrst/drv/psb_fb.h
+ create mode 100644 drivers/gpu/drm/mrst/drv/psb_fence.c
+ create mode 100644 drivers/gpu/drm/mrst/drv/psb_gtt.c
+ create mode 100644 drivers/gpu/drm/mrst/drv/psb_gtt.h
+ create mode 100644 drivers/gpu/drm/mrst/drv/psb_hotplug.c
+ create mode 100644 drivers/gpu/drm/mrst/drv/psb_hotplug.h
+ create mode 100644 drivers/gpu/drm/mrst/drv/psb_intel_bios.c
+ create mode 100644 drivers/gpu/drm/mrst/drv/psb_intel_bios.h
+ create mode 100644 drivers/gpu/drm/mrst/drv/psb_intel_display.c
+ create mode 100644 drivers/gpu/drm/mrst/drv/psb_intel_display.h
+ create mode 100644 drivers/gpu/drm/mrst/drv/psb_intel_drv.h
+ create mode 100644 drivers/gpu/drm/mrst/drv/psb_intel_dsi.c
+ create mode 100644 drivers/gpu/drm/mrst/drv/psb_intel_dsi_aava.c
+ create mode 100644 drivers/gpu/drm/mrst/drv/psb_intel_i2c.c
+ create mode 100644 drivers/gpu/drm/mrst/drv/psb_intel_lvds.c
+ create mode 100644 drivers/gpu/drm/mrst/drv/psb_intel_modes.c
+ create mode 100644 drivers/gpu/drm/mrst/drv/psb_intel_reg.h
+ create mode 100644 drivers/gpu/drm/mrst/drv/psb_intel_sdvo.c
+ create mode 100644 drivers/gpu/drm/mrst/drv/psb_intel_sdvo_regs.h
+ create mode 100644 drivers/gpu/drm/mrst/drv/psb_mmu.c
+ create mode 100644 drivers/gpu/drm/mrst/drv/psb_msvdx.c
+ create mode 100644 drivers/gpu/drm/mrst/drv/psb_msvdx.h
+ create mode 100644 drivers/gpu/drm/mrst/drv/psb_msvdxinit.c
+ create mode 100644 drivers/gpu/drm/mrst/drv/psb_pvr_glue.c
+ create mode 100644 drivers/gpu/drm/mrst/drv/psb_pvr_glue.h
+ create mode 100644 drivers/gpu/drm/mrst/drv/psb_reg.h
+ create mode 100644 drivers/gpu/drm/mrst/drv/psb_reset.c
+ create mode 100644 drivers/gpu/drm/mrst/drv/psb_schedule.c
+ create mode 100644 drivers/gpu/drm/mrst/drv/psb_schedule.h
+ create mode 100644 drivers/gpu/drm/mrst/drv/psb_setup.c
+ create mode 100644 drivers/gpu/drm/mrst/drv/psb_sgx.c
+ create mode 100644 drivers/gpu/drm/mrst/drv/psb_sgx.h
+ create mode 100644 drivers/gpu/drm/mrst/drv/psb_socket.c
+ create mode 100644 drivers/gpu/drm/mrst/drv/psb_ttm_glue.c
+ create mode 100644 drivers/gpu/drm/mrst/drv/psb_umevents.c
+ create mode 100644 drivers/gpu/drm/mrst/drv/psb_umevents.h
+ create mode 100644 drivers/gpu/drm/mrst/drv/topaz_power.c
+ create mode 100644 drivers/gpu/drm/mrst/drv/topaz_power.h
+ create mode 100644 drivers/gpu/drm/mrst/drv/ttm/ttm_agp_backend.c
+ create mode 100644 drivers/gpu/drm/mrst/drv/ttm/ttm_bo.c
+ create mode 100644 drivers/gpu/drm/mrst/drv/ttm/ttm_bo_api.h
+ create mode 100644 drivers/gpu/drm/mrst/drv/ttm/ttm_bo_driver.h
+ create mode 100644 drivers/gpu/drm/mrst/drv/ttm/ttm_bo_util.c
+ create mode 100644 drivers/gpu/drm/mrst/drv/ttm/ttm_bo_vm.c
+ create mode 100644 drivers/gpu/drm/mrst/drv/ttm/ttm_execbuf_util.c
+ create mode 100644 drivers/gpu/drm/mrst/drv/ttm/ttm_execbuf_util.h
+ create mode 100644 drivers/gpu/drm/mrst/drv/ttm/ttm_fence.c
+ create mode 100644 drivers/gpu/drm/mrst/drv/ttm/ttm_fence_api.h
+ create mode 100644 drivers/gpu/drm/mrst/drv/ttm/ttm_fence_driver.h
+ create mode 100644 drivers/gpu/drm/mrst/drv/ttm/ttm_fence_user.c
+ create mode 100644 drivers/gpu/drm/mrst/drv/ttm/ttm_fence_user.h
+ create mode 100644 drivers/gpu/drm/mrst/drv/ttm/ttm_lock.c
+ create mode 100644 drivers/gpu/drm/mrst/drv/ttm/ttm_lock.h
+ create mode 100644 drivers/gpu/drm/mrst/drv/ttm/ttm_memory.c
+ create mode 100644 drivers/gpu/drm/mrst/drv/ttm/ttm_memory.h
+ create mode 100644 drivers/gpu/drm/mrst/drv/ttm/ttm_object.c
+ create mode 100644 drivers/gpu/drm/mrst/drv/ttm/ttm_object.h
+ create mode 100644 drivers/gpu/drm/mrst/drv/ttm/ttm_pat_compat.c
+ create mode 100644 drivers/gpu/drm/mrst/drv/ttm/ttm_pat_compat.h
+ create mode 100644 drivers/gpu/drm/mrst/drv/ttm/ttm_placement_common.h
+ create mode 100644 drivers/gpu/drm/mrst/drv/ttm/ttm_placement_user.c
+ create mode 100644 drivers/gpu/drm/mrst/drv/ttm/ttm_placement_user.h
+ create mode 100644 drivers/gpu/drm/mrst/drv/ttm/ttm_regman.h
+ create mode 100644 drivers/gpu/drm/mrst/drv/ttm/ttm_tt.c
+ create mode 100644 drivers/gpu/drm/mrst/drv/ttm/ttm_userobj_api.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/COPYING
+ create mode 100644 drivers/gpu/drm/mrst/pvr/INSTALL
+ create mode 100644 drivers/gpu/drm/mrst/pvr/README
+ create mode 100644 drivers/gpu/drm/mrst/pvr/eurasiacon/.gitignore
+ create mode 100644 drivers/gpu/drm/mrst/pvr/include4/dbgdrvif.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/include4/img_defs.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/include4/img_types.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/include4/ioctldef.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/include4/pdumpdefs.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/include4/pvr_debug.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/include4/pvrmodule.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/include4/pvrversion.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/include4/regpaths.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/include4/services.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/include4/servicesext.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/include4/sgx_options.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/include4/sgxapi_km.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/include4/sgxscript.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/3rdparty/linux_framebuffer_mrst/.gitignore
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/3rdparty/linux_framebuffer_mrst/makefile.linux.common
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/3rdparty/linux_framebuffer_mrst/mrstlfb.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/3rdparty/linux_framebuffer_mrst/mrstlfb_displayclass.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/3rdparty/linux_framebuffer_mrst/mrstlfb_linux.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/include/env/linux/pvr_drm_shared.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/include/kernelbuffer.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/include/kerneldisplay.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/include/pvr_bridge.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/include/pvr_bridge_km.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/include/pvrmmap.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/include/servicesint.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/include/sgx_bridge.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/include/sgx_mkif_km.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/include/sgxinfo.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/bridged/.gitignore
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/bridged/bridged_pvr_bridge.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/bridged/bridged_pvr_bridge.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/bridged/bridged_support.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/bridged/bridged_support.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/bridged/sgx/bridged_sgx_bridge.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/bridged/sgx/bridged_sgx_bridge.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/common/.gitignore
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/common/buffer_manager.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/common/deviceclass.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/common/devicemem.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/common/handle.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/common/hash.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/common/lists.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/common/mem.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/common/mem_debug.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/common/metrics.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/common/pdump_common.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/common/perproc.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/common/power.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/common/pvrsrv.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/common/queue.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/common/ra.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/common/resman.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/.gitignore
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/mmu.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/mmu.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/pb.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/sgx_bridge_km.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/sgxconfig.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/sgxinfokm.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/sgxinit.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/sgxkick.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/sgxpower.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/sgxreset.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/sgxtransfer.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/sgxutils.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/sgxutils.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/.gitignore
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/env_data.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/env_perproc.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/event.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/event.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/linkage.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/lock.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/mm.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/mm.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/mmap.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/mmap.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/module.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/mutex.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/mutex.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/mutils.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/mutils.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/osfunc.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/osperproc.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/pdump.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/private_data.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/proc.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/proc.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/pvr_bridge_k.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/pvr_debug.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/pvr_drm.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/pvr_drm.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/hwdefs/sgx535defs.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/hwdefs/sgxdefs.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/hwdefs/sgxerrata.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/hwdefs/sgxfeaturedefs.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/hwdefs/sgxmmu.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/include/buffer_manager.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/include/device.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/include/handle.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/include/hash.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/include/lists.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/include/metrics.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/include/osfunc.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/include/osperproc.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/include/pdump_km.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/include/pdump_osfunc.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/include/perproc.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/include/power.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/include/queue.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/include/ra.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/include/resman.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/include/services_headers.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/include/srvkm.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/system/include/syscommon.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/system/moorestown/.gitignore
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/system/moorestown/oemfuncs.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/system/moorestown/ospm_power.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/system/moorestown/ospm_power.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/system/moorestown/sys_pvr_drm_export.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/system/moorestown/sys_pvr_drm_export.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/system/moorestown/sys_pvr_drm_import.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/system/moorestown/sysconfig.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/system/moorestown/sysconfig.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/system/moorestown/sysinfo.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/system/moorestown/sysirq.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/system/moorestown/sysirq.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/system/moorestown/syslocal.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/system/moorestown/sysutils.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/tools/intern/debug/client/linuxsrv.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/tools/intern/debug/dbgdriv/common/dbgdriv.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/tools/intern/debug/dbgdriv/common/dbgdriv.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/tools/intern/debug/dbgdriv/common/hostfunc.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/tools/intern/debug/dbgdriv/common/hotkey.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/tools/intern/debug/dbgdriv/common/hotkey.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/tools/intern/debug/dbgdriv/common/ioctl.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/tools/intern/debug/dbgdriv/common/ioctl.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/tools/intern/debug/dbgdriv/linux/hostfunc.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/tools/intern/debug/dbgdriv/linux/kbuild/Makefile
+ create mode 100644 drivers/gpu/drm/mrst/pvr/tools/intern/debug/dbgdriv/linux/main.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/tools/intern/debug/dbgdriv/linux/makefile.linux.common
+
+diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
+index 305c590..8242c7f 100644
+--- a/drivers/gpu/drm/Kconfig
++++ b/drivers/gpu/drm/Kconfig
+@@ -157,3 +157,5 @@ config DRM_SAVAGE
+ help
+ Choose this option if you have a Savage3D/4/SuperSavage/Pro/Twister
+ chipset. If M is selected the module will be called savage.
++
++source drivers/gpu/drm/mrst/Kconfig
+diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
+index 39c5aa7..ca0eea7 100644
+--- a/drivers/gpu/drm/Makefile
++++ b/drivers/gpu/drm/Makefile
+@@ -11,7 +11,7 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \
+ drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \
+ drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o \
+ drm_crtc.o drm_modes.o drm_edid.o \
+- drm_info.o drm_debugfs.o drm_encoder_slave.o
++ drm_info.o drm_debugfs.o drm_encoder_slave.o drm_global.o
+
+ drm-$(CONFIG_COMPAT) += drm_ioc32.o
+
+@@ -33,4 +33,5 @@ obj-$(CONFIG_DRM_SAVAGE)+= savage/
+ obj-$(CONFIG_DRM_VMWGFX)+= vmwgfx/
+ obj-$(CONFIG_DRM_VIA) +=via/
+ obj-$(CONFIG_DRM_NOUVEAU) +=nouveau/
++obj-$(CONFIG_DRM_MRST) +=mrst/
+ obj-y += i2c/
+diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
+index d91fb8c..9004741 100644
+--- a/drivers/gpu/drm/drm_crtc.c
++++ b/drivers/gpu/drm/drm_crtc.c
+@@ -159,6 +159,7 @@ static struct drm_conn_prop_enum_list drm_connector_enum_list[] =
+ { DRM_MODE_CONNECTOR_HDMIB, "HDMI Type B", 0 },
+ { DRM_MODE_CONNECTOR_TV, "TV", 0 },
+ { DRM_MODE_CONNECTOR_eDP, "Embedded DisplayPort", 0 },
++ { DRM_MODE_CONNECTOR_MIPI, "MIPI", 0 },
+ };
+
+ static struct drm_prop_enum_list drm_encoder_enum_list[] =
+@@ -167,6 +168,7 @@ static struct drm_prop_enum_list drm_encoder_enum_list[] =
+ { DRM_MODE_ENCODER_TMDS, "TMDS" },
+ { DRM_MODE_ENCODER_LVDS, "LVDS" },
+ { DRM_MODE_ENCODER_TVDAC, "TV" },
++ { DRM_MODE_ENCODER_MIPI, "MIPI" },
+ };
+
+ char *drm_get_encoder_name(struct drm_encoder *encoder)
+diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
+index 766c468..48d70c2 100644
+--- a/drivers/gpu/drm/drm_drv.c
++++ b/drivers/gpu/drm/drm_drv.c
+@@ -342,6 +342,8 @@ static int __init drm_core_init(void)
+
+ DRM_INFO("Initialized %s %d.%d.%d %s\n",
+ CORE_NAME, CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE);
++ drm_global_init();
++
+ return 0;
+ err_p3:
+ drm_sysfs_destroy();
+@@ -355,6 +357,7 @@ err_p1:
+
+ static void __exit drm_core_exit(void)
+ {
++ drm_global_release();
+ remove_proc_entry("dri", NULL);
+ debugfs_remove(drm_debugfs_root);
+ drm_sysfs_destroy();
+@@ -437,6 +440,12 @@ static int drm_version(struct drm_device *dev, void *data,
+ long drm_ioctl(struct file *filp,
+ unsigned int cmd, unsigned long arg)
+ {
++ return drm_unlocked_ioctl(filp, cmd, arg);
++}
++EXPORT_SYMBOL(drm_ioctl);
++
++long drm_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
++{
+ struct drm_file *file_priv = filp->private_data;
+ struct drm_device *dev;
+ struct drm_ioctl_desc *ioctl;
+@@ -526,7 +535,7 @@ long drm_ioctl(struct file *filp,
+ return retcode;
+ }
+
+-EXPORT_SYMBOL(drm_ioctl);
++EXPORT_SYMBOL(drm_unlocked_ioctl);
+
+ struct drm_local_map *drm_getsarea(struct drm_device *dev)
+ {
+diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
+new file mode 100644
+index 0000000..e054c4f
+--- /dev/null
++++ b/drivers/gpu/drm/drm_global.c
+@@ -0,0 +1,107 @@
++/**************************************************************************
++ *
++ * Copyright 2008-2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ **************************************************************************/
++#include <drmP.h>
++struct drm_global_item {
++ struct mutex mutex;
++ void *object;
++ int refcount;
++};
++
++static struct drm_global_item glob[DRM_GLOBAL_NUM];
++
++void drm_global_init(void)
++{
++ int i;
++
++ for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
++ struct drm_global_item *item = &glob[i];
++ mutex_init(&item->mutex);
++ item->object = NULL;
++ item->refcount = 0;
++ }
++}
++
++void drm_global_release(void)
++{
++ int i;
++ for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
++ struct drm_global_item *item = &glob[i];
++ BUG_ON(item->object != NULL);
++ BUG_ON(item->refcount != 0);
++ }
++}
++
++int drm_global_item_ref(struct drm_global_reference *ref)
++{
++ int ret;
++ struct drm_global_item *item = &glob[ref->global_type];
++ void *object;
++
++ mutex_lock(&item->mutex);
++ if (item->refcount == 0) {
++ item->object = kmalloc(ref->size, GFP_KERNEL);
++ if (unlikely(item->object == NULL)) {
++ ret = -ENOMEM;
++ goto out_err;
++ }
++
++ ref->object = item->object;
++ ret = ref->init(ref);
++ if (unlikely(ret != 0))
++ goto out_err;
++
++ ++item->refcount;
++ }
++ ref->object = item->object;
++ object = item->object;
++ mutex_unlock(&item->mutex);
++ return 0;
++ out_err:
++ kfree(item->object);
++ mutex_unlock(&item->mutex);
++ item->object = NULL;
++ return ret;
++}
++
++EXPORT_SYMBOL(drm_global_item_ref);
++
++void drm_global_item_unref(struct drm_global_reference *ref)
++{
++ struct drm_global_item *item = &glob[ref->global_type];
++
++ mutex_lock(&item->mutex);
++ BUG_ON(item->refcount == 0);
++ BUG_ON(ref->object != item->object);
++ if (--item->refcount == 0) {
++ ref->release(ref);
++ kfree(item->object);
++ item->object = NULL;
++ }
++ mutex_unlock(&item->mutex);
++}
++
++EXPORT_SYMBOL(drm_global_item_unref);
+diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
+index b98384d..7991d00 100644
+--- a/drivers/gpu/drm/drm_irq.c
++++ b/drivers/gpu/drm/drm_irq.c
+@@ -72,6 +72,28 @@ int drm_irq_by_busid(struct drm_device *dev, void *data,
+ return 0;
+ }
+
++#if 0
++static void drm_flip_work_func(struct work_struct *work)
++{
++ struct drm_device *dev =
++ container_of(work, struct drm_device, flip_work);
++#if 0
++ struct drm_pending_flip *f, *t;
++#endif
++ u32 frame;
++
++ mutex_lock(&dev->struct_mutex);
++
++ list_for_each_entry_safe(f, t, &dev->flip_list, link) {
++ frame = drm_vblank_count(dev, f->pipe);
++ if (vblank_after(frame, f->frame))
++ drm_finish_pending_flip(dev, f, frame);
++ }
++
++ mutex_unlock(&dev->struct_mutex);
++}
++#endif
++
+ static void vblank_disable_fn(unsigned long arg)
+ {
+ struct drm_device *dev = (struct drm_device *)arg;
+@@ -163,6 +185,11 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs)
+ atomic_set(&dev->vblank_refcount[i], 0);
+ }
+
++#if 0
++ INIT_LIST_HEAD(&dev->flip_list);
++ INIT_WORK(&dev->flip_work, drm_flip_work_func);
++#endif
++
+ dev->vblank_disable_allowed = 0;
+ return 0;
+
+diff --git a/drivers/gpu/drm/mrst/Kconfig b/drivers/gpu/drm/mrst/Kconfig
+new file mode 100644
+index 0000000..2fc22d1
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/Kconfig
+@@ -0,0 +1,220 @@
++#
++# Drm device configuration
++#
++# This driver provides support for the
++# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
++#
++menuconfig DRM_MRST
++ tristate "Intel Moorestown (load along with IMG driver)"
++ depends on DRM && PCI
++ select FB_CFB_COPYAREA
++ select FB_CFB_FILLRECT
++ select FB_CFB_IMAGEBLIT
++ select PVR_SUPPORT_DRI_DRM
++ select DRM_KMS_HELPER
++ help
++ Choose this option if you have a Moorestown platform.
++ If M is selected the module will be called mrst.
++
++config IMG_DOES_NOT_SUPPORT_MENLOW
++ bool "Disable MRST funtions for Menlow"
++ depends on DRM_MRST
++ default n
++ help
++ Choose Menlow
++
++config PVR_RELEASE
++ string "Build IMG kernel services as release"
++ depends on DRM_MRST
++ default "release"
++ help
++ xxxxxxx
++
++config PVR_SERVICES4
++ bool "Enable PVR services4"
++ depends on DRM_MRST
++ default y
++ help
++ xxxxxxx
++
++config PVR_XOPEN_SOURCE
++ int "Number of xopen source"
++ depends on DRM_MRST
++ default 600
++ help
++ xxxxxxx
++
++config PVR2D_VALIDATE_INPUT_PARAMS
++ bool "PVR2D Validate input params"
++ depends on DRM_MRST
++ default y
++ help
++ xxxxxxx
++
++config PVR_DISPLAY_CONTROLLER
++ string "Name of PVR display controller"
++ depends on DRM_MRST
++ default "mrstlfb"
++ help
++ xxxxxxx
++
++config PVR_SGX_CORE_REV
++ int "SGX core revison"
++ depends on DRM_MRST
++ default 121
++ help
++ xxxxxxx
++
++config PVR_SUPPORT_SVRINIT
++ bool "Support IMG Kernel Service Init"
++ depends on DRM_MRST
++ default y
++ help
++ xxxxxxxx
++
++config PVR_SUPPORT_SGX
++ bool "Support IMG SGX core"
++ depends on DRM_MRST
++ default y
++ help
++ xxxxxxxx
++
++config PVR_SUPPORT_PERCONTEXT_PB
++ bool "Support PVR PERCONTEXT_PB"
++ depends on DRM_MRST
++ default y
++ help
++ xxxxxxx
++
++config PVR_SUPPORT_LINUX_X86_WRITECOMBINE
++ bool "Support X86 write combine in IMG service"
++ depends on DRM_MRST
++ default y
++ help
++ xxxxxxx
++
++config PVR_TRANSFER_QUEUE
++ bool "Support IMG TRANSFER_QUEUE"
++ depends on DRM_MRST
++ default y
++ help
++ xxxxxxxx
++
++config PVR_SUPPORT_DRI_DRM
++ bool
++ depends on DRM_MRST
++ default y
++ help
++ xxxxxxxx
++
++config PVR_SYS_USING_INTERRUPTS
++ bool "Using interrupts in IMG kernel service"
++ depends on DRM_MRST
++ default y
++ help
++ xxxxxxxx
++
++config PVR_SUPPORT_HW_RECOVERY
++ bool "Support hardware recover in IMG kernel service"
++ depends on DRM_MRST
++ default y
++ help
++ xxxxxxxx
++
++config PVR_SUPPORT_POWER_MANAGEMENT
++ bool "Support POWER_MANAGEMENT in IMG kernel service"
++ depends on DRM_MRST
++ default y
++ help
++ xxxxxxxx
++
++config PVR_SECURE_HANDLES
++ bool "Support PVR_SECURE_HANDLES"
++ depends on DRM_MRST
++ default y
++ help
++ xxxxxxxx
++
++config PVR_USE_PTHREADS
++ bool "Use pthreads in IMG service"
++ depends on DRM_MRST
++ default y
++ help
++ xxxxxxx
++
++config PVR_SUPPORT_SGX_EVENT_OBJECT
++ bool "Support SGX event object"
++ depends on DRM_MRST
++ default y
++ help
++ xxxxxxx
++
++config PVR_SUPPORT_SGX_HWPERF
++ bool "Support SGX HWPERF"
++ depends on DRM_MRST
++ default y
++ help
++ xxxxxxxx
++
++config PVR_SUPPORT_SGX_LOW_LATENCY_SCHEDULING
++ bool "Support SGX LOW_LATENCY_SCHEDULING"
++ depends on DRM_MRST
++ default y
++ help
++ xxxxxxxx
++
++config PVR_SUPPORT_LINUX_X86_PAT
++ bool "Support PAT in IMG kernel service"
++ depends on DRM_MRST
++ default y
++ help
++ xxxxxxx
++
++config PVR_PROC_USE_SEQ_FILE
++ bool "Support PVR_PROC_USE_SEQ_FILE"
++ depends on DRM_MRST
++ default y
++ help
++ xxxxxx
++
++config PVR_SUPPORT_SGX535
++ bool "SUPPORT_SGX535"
++ depends on DRM_MRST
++ default y
++ help
++ xxxxxx
++
++config PVR_SUPPORT_CACHEFLUSH_ON_ALLOC
++ bool "SUPPORT_CACHEFLUSH_ON_ALLOC"
++ depends on DRM_MRST
++ default n
++ help
++ xxxxxx
++
++config PVR_SUPPORT_MEMINFO_IDS
++ bool "SUPPORT_MEMINFO_IDS"
++ depends on DRM_MRST
++ default n
++ help
++ xxxxxx
++
++config PVR_SUPPORT_CACHE_LINE_FLUSH
++ bool "SUPPORT_CACHE_LINE_FLUSH"
++ depends on DRM_MRST
++ default y
++ help
++ xxxxxx
++
++config PVR_SUPPORT_CPU_CACHED_BUFFERS
++ bool "SUPPORT_CPU_CACHED_BUFFERS"
++ depends on DRM_MRST
++ default y
++ help
++ xxxxxx
++
++config PVR_DEBUG_MESA_OGL_TRACE
++ bool "DEBUG_MESA_OGL_TRACE"
++ depends on DRM_MRST
++ default y
++ help
++ xxxxxx
+diff --git a/drivers/gpu/drm/mrst/Makefile b/drivers/gpu/drm/mrst/Makefile
+new file mode 100644
+index 0000000..e23d8c3
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/Makefile
+@@ -0,0 +1,169 @@
++# Makefile for the drm device driver. This driver provides support for the
++# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
++
++ccflags-y += -Idrivers/gpu/drm/mrst/pvr/include4 \
++ -Idrivers/gpu/drm/mrst/pvr/services4/include \
++ -Idrivers/gpu/drm/mrst/pvr/services4/include/env/linux \
++ -Idrivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux \
++ -Idrivers/gpu/drm/mrst/pvr/services4/srvkm/include \
++ -Idrivers/gpu/drm/mrst/pvr/services4/srvkm/bridged \
++ -Idrivers/gpu/drm/mrst/pvr/services4/system/moorestown \
++ -Idrivers/gpu/drm/mrst/pvr/services4/system/include \
++ -Idrivers/gpu/drm/mrst/pvr/services4/srvkm/hwdefs \
++ -Idrivers/gpu/drm/mrst/pvr/services4/srvkm/bridged/sgx \
++ -Idrivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx \
++ -Idrivers/gpu/drm/mrst/drv \
++ -Idrivers/gpu/drm/mrst/drv/ttm \
++ -Iinclude/linux \
++ -Werror \
++ -DLINUX \
++ -DPVR_BUILD_DIR="\"pc_i686_moorestown_linux\"" \
++ -DSGX535
++
++#FIXME: whether we need the follow -D
++ccflags-$(CONFIG_PCI_MSI) += -DCONFIG_PCI_MSI
++ccflags-y += -DBUILD=$(CONFIG_PVR_RELEASE)
++ccflags-y += -DPVR_BUILD_TYPE="\"$(CONFIG_PVR_RELEASE)\""
++ifeq ($(CONFIG_PVR_RELEASE),"release")
++ ccflags-y += -DRELEASE
++else
++ ccflags-y += -DDEBUG
++endif
++ccflags-$(CONFIG_PVR_SERVICES4) += -DSERVICES4
++ccflags-y += -D_XOPEN_SOURCE=$(CONFIG_PVR_XOPEN_SOURCE)
++ccflags-$(CONFIG_PVR2D_VALIDATE_INPUT_PARAMS) += -DPVR2D_VALIDATE_INPUT_PARAMS
++ccflags-y += -DDISPLAY_CONTROLLER=$(CONFIG_PVR_DISPLAY_CONTROLLER)
++ccflags-y += -UDEBUG_LOG_PATH_TRUNCATE
++ccflags-$(CONFIG_PVR_SUPPORT_SVRINIT) += -DSUPPORT_SRVINIT
++ccflags-$(CONFIG_PVR_SUPPORT_SGX) += -DSUPPORT_SGX
++ccflags-$(CONFIG_PVR_SUPPORT_PERCONTEXT_PB) += -DSUPPORT_PERCONTEXT_PB
++ccflags-$(CONFIG_PVR_SUPPORT_LINUX_X86_WRITECOMBINE) += -DSUPPORT_LINUX_X86_WRITECOMBINE
++ccflags-$(CONFIG_PVR_TRANSFER_QUEUE) += -DTRANSFER_QUEUE
++ccflags-$(CONFIG_PVR_SUPPORT_DRI_DRM) += -DSUPPORT_DRI_DRM
++ccflags-$(CONFIG_PVR_SUPPORT_DRI_DRM) += -DSUPPORT_DRI_DRM_EXT
++ccflags-$(CONFIG_PVR_SYS_USING_INTERRUPTS) += -DSYS_USING_INTERRUPTS
++ccflags-$(CONFIG_PVR_SUPPORT_HW_RECOVERY) += -DSUPPORT_HW_RECOVERY
++ccflags-$(CONFIG_PVR_SUPPORT_POWER_MANAGEMENT) += -DSUPPORT_ACTIVE_POWER_MANAGEMENT
++ccflags-$(CONFIG_PVR_SECURE_HANDLES) += -DPVR_SECURE_HANDLES
++ccflags-$(CONFIG_PVR_USE_PTHREADS) += -DUSE_PTHREADS
++ccflags-$(CONFIG_PVR_SUPPORT_SGX_EVENT_OBJECT) += -DSUPPORT_SGX_EVENT_OBJECT
++ccflags-$(CONFIG_PVR_SUPPORT_SGX_HWPERF) += -DSUPPORT_SGX_HWPERF
++ccflags-$(CONFIG_PVR_SUPPORT_SGX_LOW_LATENCY_SCHEDULING) += -DSUPPORT_SGX_LOW_LATENCY_SCHEDULING
++ccflags-$(CONFIG_PVR_SUPPORT_LINUX_X86_PAT) += -DSUPPORT_LINUX_X86_PAT
++ccflags-$(CONFIG_PVR_PROC_USE_SEQ_FILE) += -DPVR_PROC_USE_SEQ_FILE
++ccflags-$(CONFIG_PVR_SUPPORT_SGX535) += -DSUPPORT_SGX535
++ccflags-y += -DSGX_CORE_REV=$(CONFIG_PVR_SGX_CORE_REV)
++ccflags-$(CONFIG_PVR_SUPPORT_CACHEFLUSH_ON_ALLOC) += -DSUPPORT_CACHEFLUSH_ON_ALLOC
++ccflags-$(CONFIG_PVR_SUPPORT_MEMINFO_IDS) += -DSUPPORT_MEMINFO_IDS
++ccflags-$(CONFIG_PVR_SUPPORT_CACHE_LINE_FLUSH) += -DSUPPORT_CACHE_LINE_FLUSH
++ccflags-$(CONFIG_PVR_SUPPORT_CPU_CACHED_BUFFERS) += -DSUPPORT_CPU_CACHED_BUFFERS
++ccflags-$(CONFIG_PVR_DEBUG_MESA_OGL_TRACE)+= -DDEBUG_MESA_OGL_TRACE
++
++ENVDIR = pvr/services4/srvkm/env/linux
++COMMONDIR = pvr/services4/srvkm/common
++BRIDGEDDIR = pvr/services4/srvkm/bridged
++SYSCONFIGDIR = pvr/services4/system/moorestown
++SGXDIR = pvr/services4/srvkm/devices/sgx
++FBDEVDIR = pvr/services4/3rdparty/linux_framebuffer_mrst
++DRMDRVDIR = drv
++
++ENV_OBJS = $(ENVDIR)/osfunc.o \
++ $(ENVDIR)/mutils.o \
++ $(ENVDIR)/mmap.o \
++ $(ENVDIR)/module.o \
++ $(ENVDIR)/pdump.o \
++ $(ENVDIR)/proc.o \
++ $(ENVDIR)/pvr_bridge_k.o \
++ $(ENVDIR)/pvr_debug.o \
++ $(ENVDIR)/mm.o \
++ $(ENVDIR)/mutex.o \
++ $(ENVDIR)/event.o \
++ $(ENVDIR)/osperproc.o \
++ $(ENVDIR)/pvr_drm.o
++
++COMMON_OBJS = $(COMMONDIR)/buffer_manager.o \
++ $(COMMONDIR)/devicemem.o \
++ $(COMMONDIR)/deviceclass.o \
++ $(COMMONDIR)/handle.o \
++ $(COMMONDIR)/hash.o \
++ $(COMMONDIR)/metrics.o \
++ $(COMMONDIR)/pvrsrv.o \
++ $(COMMONDIR)/queue.o \
++ $(COMMONDIR)/ra.o \
++ $(COMMONDIR)/resman.o \
++ $(COMMONDIR)/power.o \
++ $(COMMONDIR)/mem.o \
++ $(COMMONDIR)/pdump_common.o \
++ $(COMMONDIR)/perproc.o \
++ $(COMMONDIR)/lists.o \
++ $(COMMONDIR)/mem_debug.o
++
++BRIDGED_OBJS = $(BRIDGEDDIR)/bridged_support.o \
++ $(BRIDGEDDIR)/bridged_pvr_bridge.o \
++ $(BRIDGEDDIR)/sgx/bridged_sgx_bridge.o
++
++SYSCONFIG_OBJS = $(SYSCONFIGDIR)/sysconfig.o \
++ $(SYSCONFIGDIR)/sysutils.o \
++ $(SYSCONFIGDIR)/ospm_power.o \
++ $(SYSCONFIGDIR)/sysirq.o \
++ $(SYSCONFIGDIR)/sys_pvr_drm_export.o
++
++SGX_OBJS = $(SGXDIR)/sgxinit.o \
++ $(SGXDIR)/sgxpower.o \
++ $(SGXDIR)/sgxreset.o \
++ $(SGXDIR)/sgxutils.o \
++ $(SGXDIR)/sgxkick.o \
++ $(SGXDIR)/sgxtransfer.o \
++ $(SGXDIR)/mmu.o \
++ $(SGXDIR)/pb.o
++
++FB_OBJS = $(FBDEVDIR)/mrstlfb_displayclass.o \
++ $(FBDEVDIR)/mrstlfb_linux.o
++
++DRV_OBJS = $(DRMDRVDIR)/lnc_topaz.o \
++ $(DRMDRVDIR)/topaz_power.o \
++ $(DRMDRVDIR)/lnc_topazinit.o \
++ $(DRMDRVDIR)/psb_bl.o \
++ $(DRMDRVDIR)/psb_buffer.o \
++ $(DRMDRVDIR)/psb_dpst.o \
++ $(DRMDRVDIR)/psb_drv.o \
++ $(DRMDRVDIR)/psb_fb.o \
++ $(DRMDRVDIR)/psb_fence.o \
++ $(DRMDRVDIR)/psb_gtt.o \
++ $(DRMDRVDIR)/psb_hotplug.o \
++ $(DRMDRVDIR)/psb_intel_bios.o \
++ $(DRMDRVDIR)/psb_intel_display.o \
++ $(DRMDRVDIR)/psb_intel_i2c.o \
++ $(DRMDRVDIR)/psb_intel_lvds.o \
++ $(DRMDRVDIR)/psb_intel_modes.o \
++ $(DRMDRVDIR)/psb_intel_sdvo.o \
++ $(DRMDRVDIR)/psb_mmu.o \
++ $(DRMDRVDIR)/psb_msvdx.o \
++ $(DRMDRVDIR)/msvdx_power.o \
++ $(DRMDRVDIR)/psb_msvdxinit.o \
++ $(DRMDRVDIR)/psb_reset.o \
++ $(DRMDRVDIR)/psb_schedule.o \
++ $(DRMDRVDIR)/psb_sgx.o \
++ $(DRMDRVDIR)/psb_socket.o \
++ $(DRMDRVDIR)/psb_ttm_glue.o \
++ $(DRMDRVDIR)/psb_pvr_glue.o \
++ $(DRMDRVDIR)/psb_umevents.o \
++ $(DRMDRVDIR)/ttm/ttm_agp_backend.o \
++ $(DRMDRVDIR)/ttm/ttm_bo.o \
++ $(DRMDRVDIR)/ttm/ttm_bo_util.o \
++ $(DRMDRVDIR)/ttm/ttm_bo_vm.o \
++ $(DRMDRVDIR)/ttm/ttm_execbuf_util.o \
++ $(DRMDRVDIR)/ttm/ttm_fence.o \
++ $(DRMDRVDIR)/ttm/ttm_fence_user.o \
++ $(DRMDRVDIR)/ttm/ttm_lock.o \
++ $(DRMDRVDIR)/ttm/ttm_memory.o \
++ $(DRMDRVDIR)/ttm/ttm_object.o \
++ $(DRMDRVDIR)/ttm/ttm_pat_compat.o \
++ $(DRMDRVDIR)/ttm/ttm_placement_user.o \
++ $(DRMDRVDIR)/ttm/ttm_tt.o
++
++mrst-objs += $(ENV_OBJS) $(COMMON_OBJS) $(BRIDGED_OBJS) $(SYSCONFIG_OBJS) $(SGX_OBJS) $(FB_OBJS) $(DRV_OBJS)
++
++obj-$(CONFIG_DRM_MRST) += mrst.o
++obj-$(CONFIG_DRM_MRST_AAVA) += $(DRMDRVDIR)/psb_intel_dsi_aava.o
++obj-$(CONFIG_DRM_MRST_CDK) += $(DRMDRVDIR)/psb_intel_dsi.o
+diff --git a/drivers/gpu/drm/mrst/drv/lnc_topaz.c b/drivers/gpu/drm/mrst/drv/lnc_topaz.c
+new file mode 100644
+index 0000000..04c42f8
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/lnc_topaz.c
+@@ -0,0 +1,714 @@
++/**
++ * file lnc_topaz.c
++ * TOPAZ I/O operations and IRQ handling
++ *
++ */
++
++/**************************************************************************
++ *
++ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
++ * Copyright (c) Imagination Technologies Limited, UK
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++
++/* include headers */
++/* #define DRM_DEBUG_CODE 2 */
++#include <drm/drmP.h>
++#include <drm/drm_os_linux.h>
++
++#include "psb_drv.h"
++#include "psb_drm.h"
++#include "lnc_topaz.h"
++#include "ospm_power.h"
++
++#include <linux/io.h>
++#include <linux/delay.h>
++
++#define TOPAZ_RM_MULTI_MTX_WRITE
++
++/* static function define */
++static int lnc_topaz_deliver_command(struct drm_device *dev,
++ struct ttm_buffer_object *cmd_buffer,
++ unsigned long cmd_offset,
++ unsigned long cmd_size,
++ void **topaz_cmd, uint32_t sequence,
++ int copy_cmd);
++static int lnc_topaz_send(struct drm_device *dev, void *cmd,
++ unsigned long cmd_size, uint32_t sync_seq);
++static int lnc_mtx_send(struct drm_psb_private *dev_priv, const void *cmd);
++static int lnc_topaz_dequeue_send(struct drm_device *dev);
++static int lnc_topaz_save_command(struct drm_device *dev, void *cmd,
++ unsigned long cmd_size, uint32_t sequence);
++
++IMG_BOOL lnc_topaz_interrupt(IMG_VOID *pvData)
++{
++ struct drm_device *dev;
++ struct drm_psb_private *dev_priv;
++ uint32_t clr_flag;
++ struct topaz_private *topaz_priv;
++ uint32_t topaz_stat;
++ uint32_t cur_seq;
++
++ if (pvData == IMG_NULL) {
++ DRM_ERROR("ERROR: TOPAZ %s, Invalid params\n", __func__);
++ return IMG_FALSE;
++ }
++
++ if (!ospm_power_is_hw_on(OSPM_VIDEO_ENC_ISLAND)) {
++ DRM_ERROR("ERROR: interrupt arrived but HW is power off\n");
++ return IMG_FALSE;
++ }
++
++ dev = (struct drm_device *)pvData;
++ dev_priv = (struct drm_psb_private *) dev->dev_private;
++ topaz_priv = dev_priv->topaz_private;
++
++ topaz_priv->topaz_hw_busy = REG_READ(0x20D0) & (0x1 << 11);
++
++ TOPAZ_READ32(TOPAZ_CR_IMG_TOPAZ_INTSTAT, &topaz_stat);
++ clr_flag = lnc_topaz_queryirq(dev);
++
++ lnc_topaz_clearirq(dev, clr_flag);
++
++ /* ignore non-SYNC interrupts */
++ if ((CCB_CTRL_SEQ(dev_priv) & 0x8000) == 0)
++ return IMG_TRUE;
++
++ cur_seq = *(uint32_t *)topaz_priv->topaz_sync_addr;
++
++ PSB_DEBUG_IRQ("TOPAZ:Got SYNC IRQ,sync seq:0x%08x (MTX) vs 0x%08x\n",
++ cur_seq, dev_priv->sequence[LNC_ENGINE_ENCODE]);
++
++ psb_fence_handler(dev, LNC_ENGINE_ENCODE);
++
++ /* save frame skip flag for query */
++ topaz_priv->frame_skip = CCB_CTRL_FRAMESKIP(dev_priv);
++
++ topaz_priv->topaz_busy = 1;
++ lnc_topaz_dequeue_send(dev);
++
++ if (drm_topaz_pmpolicy != PSB_PMPOLICY_NOPM)
++ schedule_delayed_work(&dev_priv->scheduler.topaz_suspend_wq, 0);
++
++ return IMG_TRUE;
++}
++
++static int lnc_submit_encode_cmdbuf(struct drm_device *dev,
++ struct ttm_buffer_object *cmd_buffer,
++ unsigned long cmd_offset, unsigned long cmd_size,
++ struct ttm_fence_object *fence)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ unsigned long irq_flags;
++ int ret = 0;
++ void *cmd;
++ uint32_t tmp;
++ uint32_t sequence = dev_priv->sequence[LNC_ENGINE_ENCODE];
++ struct topaz_private *topaz_priv = dev_priv->topaz_private;
++ uint32_t ui32_reg_value = 0;
++
++ PSB_DEBUG_GENERAL("TOPAZ: command submit\n");
++
++ PSB_DEBUG_GENERAL("TOPAZ: topaz busy = %d\n", topaz_priv->topaz_busy);
++
++ /* FIXME: workaround for HSD 3469585
++ * disable DRAM Self Refresh Mode
++ * by resetting DUNIT.DPMC0
++ */
++ ui32_reg_value = MSG_READ32(0x1, 0x4);
++ MSG_WRITE32(0x1, 0x4, (ui32_reg_value & (~(0x1 << 7))));
++
++ if (topaz_priv->topaz_fw_loaded == 0) {
++ /* #.# load fw to driver */
++ PSB_DEBUG_INIT("TOPAZ: load /lib/firmware/topaz_fw.bin\n");
++ ret = topaz_init_fw(dev);
++ if (ret != 0) {
++ /* FIXME: find a proper return value */
++ DRM_ERROR("TOPAX:load /lib/firmware/topaz_fw.bin fail,"
++ "ensure udevd is configured correctly!\n");
++
++ return -EFAULT;
++ }
++ topaz_priv->topaz_fw_loaded = 1;
++ }
++
++ tmp = atomic_cmpxchg(&dev_priv->topaz_mmu_invaldc, 1, 0);
++ if (tmp == 1)
++ topaz_mmu_flushcache(dev_priv);
++
++ /* # schedule watchdog */
++ /* psb_schedule_watchdog(dev_priv); */
++
++ /* # spin lock irq save [msvdx_lock] */
++ spin_lock_irqsave(&topaz_priv->topaz_lock, irq_flags);
++
++ /* # if topaz need to reset, reset it */
++ if (topaz_priv->topaz_needs_reset) {
++ /* #.# reset it */
++ spin_unlock_irqrestore(&topaz_priv->topaz_lock, irq_flags);
++ PSB_DEBUG_GENERAL("TOPAZ: needs reset.\n");
++
++ if (lnc_topaz_reset(dev_priv)) {
++ ret = -EBUSY;
++ DRM_ERROR("TOPAZ: reset failed.\n");
++ return ret;
++ }
++
++ PSB_DEBUG_GENERAL("TOPAZ: reset ok.\n");
++
++ /* #.# upload firmware */
++ if (topaz_setup_fw(dev, topaz_priv->topaz_cur_codec)) {
++ DRM_ERROR("TOPAZ: upload FW to HW failed\n");
++ return -EBUSY;
++ }
++
++ spin_lock_irqsave(&topaz_priv->topaz_lock, irq_flags);
++ }
++
++ if (!topaz_priv->topaz_busy) {
++ /* # direct map topaz command if topaz is free */
++ PSB_DEBUG_GENERAL("TOPAZ:direct send command,sequence %08x \n",
++ sequence);
++
++ topaz_priv->topaz_busy = 1;
++ spin_unlock_irqrestore(&topaz_priv->topaz_lock, irq_flags);
++
++ ret = lnc_topaz_deliver_command(dev, cmd_buffer, cmd_offset,
++ cmd_size, NULL, sequence, 0);
++
++ if (ret) {
++ DRM_ERROR("TOPAZ: failed to extract cmd...\n");
++ return ret;
++ }
++ } else {
++ PSB_DEBUG_GENERAL("TOPAZ: queue command,sequence %08x \n",
++ sequence);
++ cmd = NULL;
++
++ spin_unlock_irqrestore(&topaz_priv->topaz_lock, irq_flags);
++
++ ret = lnc_topaz_deliver_command(dev, cmd_buffer, cmd_offset,
++ cmd_size, &cmd, sequence, 1);
++ if (cmd == NULL || ret) {
++ DRM_ERROR("TOPAZ: map command for save fialed\n");
++ return ret;
++ }
++
++ ret = lnc_topaz_save_command(dev, cmd, cmd_size, sequence);
++ if (ret)
++ DRM_ERROR("TOPAZ: save command failed\n");
++ }
++
++ return ret;
++}
++
++static int lnc_topaz_save_command(struct drm_device *dev, void *cmd,
++ unsigned long cmd_size, uint32_t sequence)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ struct lnc_topaz_cmd_queue *topaz_cmd;
++ unsigned long irq_flags;
++ struct topaz_private *topaz_priv = dev_priv->topaz_private;
++
++ PSB_DEBUG_GENERAL("TOPAZ: queue command,sequence: %08x..\n",
++ sequence);
++
++ topaz_cmd = kzalloc(sizeof(struct lnc_topaz_cmd_queue),
++ GFP_KERNEL);
++ if (topaz_cmd == NULL) {
++ mutex_unlock(&topaz_priv->topaz_mutex);
++ DRM_ERROR("TOPAZ: out of memory....\n");
++ return -ENOMEM;
++ }
++
++ topaz_cmd->cmd = cmd;
++ topaz_cmd->cmd_size = cmd_size;
++ topaz_cmd->sequence = sequence;
++
++ spin_lock_irqsave(&topaz_priv->topaz_lock, irq_flags);
++ list_add_tail(&topaz_cmd->head, &topaz_priv->topaz_queue);
++ if (!topaz_priv->topaz_busy) {
++ /* topaz_priv->topaz_busy = 1; */
++ PSB_DEBUG_GENERAL("TOPAZ: need immediate dequeue...\n");
++ lnc_topaz_dequeue_send(dev);
++ PSB_DEBUG_GENERAL("TOPAZ: after dequeue command\n");
++ }
++
++ spin_unlock_irqrestore(&topaz_priv->topaz_lock, irq_flags);
++
++ return 0;
++}
++
++
++int lnc_cmdbuf_video(struct drm_file *priv,
++ struct list_head *validate_list,
++ uint32_t fence_type,
++ struct drm_psb_cmdbuf_arg *arg,
++ struct ttm_buffer_object *cmd_buffer,
++ struct psb_ttm_fence_rep *fence_arg)
++{
++ struct drm_device *dev = priv->minor->dev;
++ struct ttm_fence_object *fence = NULL;
++ int ret;
++
++ ret = lnc_submit_encode_cmdbuf(dev, cmd_buffer, arg->cmdbuf_offset,
++ arg->cmdbuf_size, fence);
++ if (ret)
++ return ret;
++
++ /* workaround for interrupt issue */
++ psb_fence_or_sync(priv, LNC_ENGINE_ENCODE, fence_type, arg->fence_flags,
++ validate_list, fence_arg, &fence);
++
++ if (fence)
++ ttm_fence_object_unref(&fence);
++
++ mutex_lock(&cmd_buffer->mutex);
++ if (cmd_buffer->sync_obj != NULL)
++ ttm_fence_sync_obj_unref(&cmd_buffer->sync_obj);
++ mutex_unlock(&cmd_buffer->mutex);
++
++ return 0;
++}
++
++static int lnc_topaz_sync(struct drm_device *dev, uint32_t sync_seq)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ uint32_t sync_cmd[3];
++ struct topaz_private *topaz_priv = dev_priv->topaz_private;
++
++#if 0
++ struct ttm_fence_device *fdev = &dev_priv->fdev;
++ struct ttm_fence_class_manager *fc =
++ &fdev->fence_class[LNC_ENGINE_ENCODE];
++ unsigned long irq_flags;
++#endif
++#if LNC_TOPAZ_NO_IRQ
++ uint32_t *sync_p = (uint32_t *)topaz_priv->topaz_sync_addr;
++ int count = 10000;
++ uint32_t cur_seq;
++#endif
++
++ /* insert a SYNC command here */
++ topaz_priv->topaz_sync_cmd_seq = (1 << 15) |
++ topaz_priv->topaz_cmd_seq++;
++ sync_cmd[0] = 1 | (MTX_CMDID_SYNC << 1) | (3 << 8) |
++ (topaz_priv->topaz_sync_cmd_seq << 16);
++ sync_cmd[1] = topaz_priv->topaz_sync_offset;
++ sync_cmd[2] = sync_seq;
++
++ PSB_DEBUG_GENERAL("TOPAZ:MTX_CMDID_SYNC: size(3),cmd seq (0x%04x),"
++ "sync_seq (0x%08x)\n",
++ topaz_priv->topaz_sync_cmd_seq, sync_seq);
++
++ if (drm_topaz_sbuswa)
++ TOPAZ_WAIT_UNTIL_IDLE;
++
++ lnc_mtx_send(dev_priv, sync_cmd);
++
++#if LNC_TOPAZ_NO_IRQ /* workaround for interrupt issue */
++ /* # poll topaz register for certain times */
++ while (count && *sync_p != sync_seq) {
++ DRM_UDELAY(100);
++ --count;
++ }
++ if ((count == 0) && (*sync_p != sync_seq)) {
++ DRM_ERROR("TOPAZ: wait sycn timeout (0x%08x),actual 0x%08x\n",
++ sync_seq, *sync_p);
++ return -EBUSY;
++ }
++ PSB_DEBUG_GENERAL("TOPAZ: SYNC done, seq=0x%08x\n", *sync_p);
++
++ topaz_priv->topaz_busy = 0;
++
++ /* XXX: check psb_fence_handler is suitable for topaz */
++ cur_seq = *sync_p;
++#if 0
++ write_lock_irqsave(&fc->lock, irq_flags);
++ ttm_fence_handler(fdev, LNC_ENGINE_ENCODE,
++ cur_seq,
++ _PSB_FENCE_TYPE_EXE, 0);
++ write_unlock_irqrestore(&fc->lock, irq_flags);
++#endif
++#endif
++ return 0;
++}
++
++int
++lnc_topaz_deliver_command(struct drm_device *dev,
++ struct ttm_buffer_object *cmd_buffer,
++ unsigned long cmd_offset, unsigned long cmd_size,
++ void **topaz_cmd, uint32_t sequence,
++ int copy_cmd)
++{
++ unsigned long cmd_page_offset = cmd_offset & ~PAGE_MASK;
++ struct ttm_bo_kmap_obj cmd_kmap;
++ bool is_iomem;
++ int ret;
++ unsigned char *cmd_start, *tmp;
++
++ ret = ttm_bo_kmap(cmd_buffer, cmd_offset >> PAGE_SHIFT, 2,
++ &cmd_kmap);
++ if (ret) {
++ DRM_ERROR("TOPAZ: drm_bo_kmap failed: %d\n", ret);
++ return ret;
++ }
++ cmd_start = (unsigned char *) ttm_kmap_obj_virtual(&cmd_kmap,
++ &is_iomem) + cmd_page_offset;
++
++ if (copy_cmd) {
++ PSB_DEBUG_GENERAL("TOPAZ: queue commands\n");
++ tmp = kzalloc(cmd_size, GFP_KERNEL);
++ if (tmp == NULL) {
++ ret = -ENOMEM;
++ goto out;
++ }
++ memcpy(tmp, cmd_start, cmd_size);
++ *topaz_cmd = tmp;
++ } else {
++ PSB_DEBUG_GENERAL("TOPAZ: directly send the command\n");
++ ret = lnc_topaz_send(dev, cmd_start, cmd_size, sequence);
++ if (ret) {
++ DRM_ERROR("TOPAZ: commit commands failed.\n");
++ ret = -EINVAL;
++ }
++ }
++
++out:
++ PSB_DEBUG_GENERAL("TOPAZ:cmd_size(%ld), sequence(%d) copy_cmd(%d)\n",
++ cmd_size, sequence, copy_cmd);
++
++ ttm_bo_kunmap(&cmd_kmap);
++
++ return ret;
++}
++
++int
++lnc_topaz_send(struct drm_device *dev, void *cmd,
++ unsigned long cmd_size, uint32_t sync_seq)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ int ret = 0;
++ unsigned char *command = (unsigned char *) cmd;
++ struct topaz_cmd_header *cur_cmd_header;
++ uint32_t cur_cmd_size, cur_cmd_id;
++ uint32_t codec;
++ struct topaz_private *topaz_priv = dev_priv->topaz_private;
++
++ PSB_DEBUG_GENERAL("TOPAZ: send the command in the buffer one by one\n");
++
++ while (cmd_size > 0) {
++ cur_cmd_header = (struct topaz_cmd_header *) command;
++ cur_cmd_size = cur_cmd_header->size * 4;
++ cur_cmd_id = cur_cmd_header->id;
++
++ switch (cur_cmd_id) {
++ case MTX_CMDID_SW_NEW_CODEC:
++ codec = *((uint32_t *) cmd + 1);
++
++ PSB_DEBUG_GENERAL("TOPAZ: setup new codec %s (%d)\n",
++ codec_to_string(codec), codec);
++ if (topaz_setup_fw(dev, codec)) {
++ DRM_ERROR("TOPAZ: upload FW to HW failed\n");
++ return -EBUSY;
++ }
++
++ topaz_priv->topaz_cur_codec = codec;
++ break;
++
++ case MTX_CMDID_SW_ENTER_LOWPOWER:
++ PSB_DEBUG_GENERAL("TOPAZ: enter lowpower.... \n");
++ PSB_DEBUG_GENERAL("XXX: implement it\n");
++ break;
++
++ case MTX_CMDID_SW_LEAVE_LOWPOWER:
++ PSB_DEBUG_GENERAL("TOPAZ: leave lowpower... \n");
++ PSB_DEBUG_GENERAL("XXX: implement it\n");
++ break;
++
++ /* ordinary commmand */
++ case MTX_CMDID_START_PIC:
++ /* XXX: specially handle START_PIC hw command */
++ CCB_CTRL_SET_QP(dev_priv,
++ *(command + cur_cmd_size - 4));
++ /* strip the QP parameter (it's software arg) */
++ cur_cmd_header->size--;
++ default:
++ cur_cmd_header->seq = 0x7fff &
++ topaz_priv->topaz_cmd_seq++;
++
++ PSB_DEBUG_GENERAL("TOPAZ: %s: size(%d),"
++ " seq (0x%04x)\n",
++ cmd_to_string(cur_cmd_id),
++ cur_cmd_size, cur_cmd_header->seq);
++
++ if (drm_topaz_sbuswa && cur_cmd_id != \
++ MTX_CMDID_START_PIC)
++ TOPAZ_WAIT_UNTIL_IDLE;
++
++ ret = lnc_mtx_send(dev_priv, command);
++ if (ret) {
++ DRM_ERROR("TOPAZ: error -- ret(%d)\n", ret);
++ goto out;
++ }
++ break;
++ }
++
++ command += cur_cmd_size;
++ cmd_size -= cur_cmd_size;
++ }
++ lnc_topaz_sync(dev, sync_seq);
++out:
++ return ret;
++}
++
++static int lnc_mtx_send(struct drm_psb_private *dev_priv, const void *cmd)
++{
++ struct topaz_cmd_header *cur_cmd_header =
++ (struct topaz_cmd_header *) cmd;
++ uint32_t cmd_size = cur_cmd_header->size;
++ uint32_t read_index, write_index;
++ const uint32_t *cmd_pointer = (uint32_t *) cmd;
++ struct topaz_private *topaz_priv = dev_priv->topaz_private;
++
++ int ret = 0;
++
++ /* <msvdx does> # enable all clock */
++
++ write_index = topaz_priv->topaz_cmd_windex;
++ if (write_index + cmd_size + 1 > topaz_priv->topaz_ccb_size) {
++ int free_space = topaz_priv->topaz_ccb_size - write_index;
++
++ PSB_DEBUG_GENERAL("TOPAZ: -------will wrap CCB write point.\n");
++ if (free_space > 0) {
++ struct topaz_cmd_header pad_cmd;
++
++ pad_cmd.id = MTX_CMDID_NULL;
++ pad_cmd.size = free_space;
++ pad_cmd.seq = 0x7fff & topaz_priv->topaz_cmd_seq;
++
++ PSB_DEBUG_GENERAL("TOPAZ: MTX_CMDID_NULL:"
++ " size(%d),seq (0x%04x)\n",
++ pad_cmd.size, pad_cmd.seq);
++
++#ifndef TOPAZ_RM_MULTI_MTX_WRITE
++ TOPAZ_BEGIN_CCB(dev_priv);
++ TOPAZ_OUT_CCB(dev_priv, pad_cmd.val);
++#else
++ topaz_write_mtx_mem(dev_priv,
++ topaz_priv->topaz_ccb_buffer_addr
++ + topaz_priv->topaz_cmd_windex * 4,
++ pad_cmd.val);
++ topaz_priv->topaz_cmd_windex++;
++#endif
++ TOPAZ_END_CCB(dev_priv, 1);
++
++ POLL_WB_SEQ(dev_priv, pad_cmd.seq);
++ ++topaz_priv->topaz_cmd_seq;
++ }
++ POLL_WB_RINDEX(dev_priv, 0);
++ if (ret == 0)
++ topaz_priv->topaz_cmd_windex = 0;
++ else {
++ DRM_ERROR("TOPAZ: poll rindex timeout\n");
++ return ret; /* HW may hang, need reset */
++ }
++ PSB_DEBUG_GENERAL("TOPAZ: -------wrap CCB was done.\n");
++ }
++
++ read_index = CCB_CTRL_RINDEX(dev_priv);/* temperily use CCB CTRL */
++ write_index = topaz_priv->topaz_cmd_windex;
++
++ PSB_DEBUG_GENERAL("TOPAZ: write index(%d), read index(%d,WB=%d)\n",
++ write_index, read_index, WB_CCB_CTRL_RINDEX(dev_priv));
++
++#ifndef TOPAZ_RM_MULTI_MTX_WRITE
++ TOPAZ_BEGIN_CCB(dev_priv);
++ while (cmd_size > 0) {
++ TOPAZ_OUT_CCB(dev_priv, *cmd_pointer++);
++ --cmd_size;
++ }
++#else
++ while (cmd_size > 0) {
++ topaz_write_mtx_mem(
++ dev_priv,
++ topaz_priv->topaz_ccb_buffer_addr
++ + topaz_priv->topaz_cmd_windex * 4,
++ *cmd_pointer++);
++ topaz_priv->topaz_cmd_windex++;
++ --cmd_size;
++ }
++#endif
++ TOPAZ_END_CCB(dev_priv, 1);
++
++#if 0
++ DRM_UDELAY(1000);
++ lnc_topaz_clearirq(dev,
++ lnc_topaz_queryirq(dev));
++ LNC_TRACEL("TOPAZ: after clear, query again\n");
++ lnc_topaz_queryirq(dev_priv);
++#endif
++
++ return ret;
++}
++
++int lnc_topaz_dequeue_send(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ struct lnc_topaz_cmd_queue *topaz_cmd = NULL;
++ int ret;
++ struct topaz_private *topaz_priv = dev_priv->topaz_private;
++
++ PSB_DEBUG_GENERAL("TOPAZ: dequeue command and send it to topaz\n");
++
++ if (list_empty(&topaz_priv->topaz_queue)) {
++ topaz_priv->topaz_busy = 0;
++ return 0;
++ }
++
++ topaz_cmd = list_first_entry(&topaz_priv->topaz_queue,
++ struct lnc_topaz_cmd_queue, head);
++
++ PSB_DEBUG_GENERAL("TOPAZ: queue has id %08x\n", topaz_cmd->sequence);
++ ret = lnc_topaz_send(dev, topaz_cmd->cmd, topaz_cmd->cmd_size,
++ topaz_cmd->sequence);
++ if (ret) {
++ DRM_ERROR("TOPAZ: lnc_topaz_send failed.\n");
++ ret = -EINVAL;
++ }
++
++ list_del(&topaz_cmd->head);
++ kfree(topaz_cmd->cmd);
++ kfree(topaz_cmd
++ );
++
++ return ret;
++}
++
++void topaz_mtx_kick(struct drm_psb_private *dev_priv, uint32_t kick_count)
++{
++ PSB_DEBUG_GENERAL("TOPAZ: kick mtx count(%d).\n", kick_count);
++ MTX_WRITE32(MTX_CR_MTX_KICK, kick_count);
++}
++
++int lnc_check_topaz_idle(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private;
++ struct topaz_private *topaz_priv = dev_priv->topaz_private;
++
++ if (topaz_priv->topaz_fw_loaded == 0)
++ return 0;
++
++ if (topaz_priv->topaz_busy)
++ return -EBUSY;
++
++ if (topaz_priv->topaz_hw_busy) {
++ PSB_DEBUG_PM("TOPAZ: %s, HW is busy\n", __func__);
++ return -EBUSY;
++ }
++
++ return 0; /* we think it is idle */
++}
++
++int lnc_video_frameskip(struct drm_device *dev, uint64_t user_pointer)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private;
++ struct topaz_private *topaz_priv = dev_priv->topaz_private;
++ int ret;
++
++ ret = copy_to_user((void __user *) ((unsigned long)user_pointer),
++ &topaz_priv->frame_skip, sizeof(topaz_priv->frame_skip));
++
++ if (ret)
++ return -EFAULT;
++
++ return 0;
++}
++
++static void lnc_topaz_flush_cmd_queue(struct topaz_private *topaz_priv)
++{
++ struct lnc_topaz_cmd_queue *entry, *next;
++
++ /* remind to reset topaz */
++ topaz_priv->topaz_needs_reset = 1;
++
++ if (list_empty(&topaz_priv->topaz_queue)) {
++ topaz_priv->topaz_busy = 0;
++ return;
++ }
++
++ /* flush all command in queue */
++ list_for_each_entry_safe(entry, next,
++ &topaz_priv->topaz_queue,
++ head) {
++ list_del(&entry->head);
++ kfree(entry->cmd);
++ kfree(entry);
++ }
++
++ return;
++}
++
++void lnc_topaz_handle_timeout(struct ttm_fence_device *fdev)
++{
++ struct drm_psb_private *dev_priv =
++ container_of(fdev, struct drm_psb_private, fdev);
++ struct topaz_private *topaz_priv = dev_priv->topaz_private;
++
++ lnc_topaz_flush_cmd_queue(topaz_priv);
++}
++
++inline int psb_try_power_down_topaz(struct drm_device *dev)
++{
++ ospm_apm_power_down_topaz(dev);
++ return 0;
++}
++
++void lnc_map_topaz_reg(struct drm_device *dev)
++{
++ unsigned long resource_start;
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private;
++
++ resource_start = pci_resource_start(dev->pdev, PSB_MMIO_RESOURCE);
++
++ if (IS_MRST(dev) && !dev_priv->topaz_disabled) {
++ dev_priv->topaz_reg =
++ ioremap(resource_start + LNC_TOPAZ_OFFSET,
++ LNC_TOPAZ_SIZE);
++ if (!dev_priv->topaz_reg)
++ DRM_ERROR("failed to map TOPAZ register address\n");
++ }
++
++ return;
++}
++
++void lnc_unmap_topaz_reg(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private;
++
++ if (IS_MRST(dev)) {
++ if (dev_priv->topaz_reg) {
++ iounmap(dev_priv->topaz_reg);
++ dev_priv->topaz_reg = NULL;
++ }
++ }
++
++ return;
++}
+diff --git a/drivers/gpu/drm/mrst/drv/lnc_topaz.h b/drivers/gpu/drm/mrst/drv/lnc_topaz.h
+new file mode 100644
+index 0000000..7511c32
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/lnc_topaz.h
+@@ -0,0 +1,925 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
++ * Copyright (c) Imagination Technologies Limited, UK
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++
++#ifndef _LNC_TOPAZ_H_
++#define _LNC_TOPAZ_H_
++
++#include "psb_drv.h"
++#include "img_types.h"
++
++#define LNC_TOPAZ_NO_IRQ 0
++#define TOPAZ_MTX_REG_SIZE (34 * 4 + 183 * 4)
++
++extern int drm_topaz_pmpolicy;
++
++/*
++ * MACROS to insert values into fields within a word. The basename of the
++ * field must have MASK_BASENAME and SHIFT_BASENAME constants.
++ */
++#define MM_WRITE32(base, offset, value) \
++do { \
++ *((unsigned long *)((unsigned char *)(dev_priv->topaz_reg) \
++ + base + offset)) = value; \
++} while (0)
++
++#define MM_READ32(base, offset, pointer) \
++do { \
++ *(pointer) = *((unsigned long *)((unsigned char *)(dev_priv->topaz_reg)\
++ + base + offset)); \
++} while (0)
++
++#define F_MASK(basename) (MASK_##basename)
++#define F_SHIFT(basename) (SHIFT_##basename)
++
++#define F_ENCODE(val, basename) \
++ (((val) << (F_SHIFT(basename))) & (F_MASK(basename)))
++
++/* MVEA macro */
++#define MVEA_START 0x03000
++
++#define MVEA_WRITE32(offset, value) MM_WRITE32(MVEA_START, offset, value)
++#define MVEA_READ32(offset, pointer) MM_READ32(MVEA_START, offset, pointer);
++
++#define F_MASK_MVEA(basename) (MASK_MVEA_##basename) /* MVEA */
++#define F_SHIFT_MVEA(basename) (SHIFT_MVEA_##basename) /* MVEA */
++#define F_ENCODE_MVEA(val, basename) \
++ (((val)<<(F_SHIFT_MVEA(basename)))&(F_MASK_MVEA(basename)))
++
++/* VLC macro */
++#define TOPAZ_VLC_START 0x05000
++
++/* TOPAZ macro */
++#define TOPAZ_START 0x02000
++
++#define TOPAZ_WRITE32(offset, value) MM_WRITE32(TOPAZ_START, offset, value)
++#define TOPAZ_READ32(offset, pointer) MM_READ32(TOPAZ_START, offset, pointer)
++
++#define F_MASK_TOPAZ(basename) (MASK_TOPAZ_##basename)
++#define F_SHIFT_TOPAZ(basename) (SHIFT_TOPAZ_##basename)
++#define F_ENCODE_TOPAZ(val, basename) \
++ (((val)<<(F_SHIFT_TOPAZ(basename)))&(F_MASK_TOPAZ(basename)))
++
++/* MTX macro */
++#define MTX_START 0x0
++
++#define MTX_WRITE32(offset, value) MM_WRITE32(MTX_START, offset, value)
++#define MTX_READ32(offset, pointer) MM_READ32(MTX_START, offset, pointer)
++
++/* DMAC macro */
++#define DMAC_START 0x0f000
++
++#define DMAC_WRITE32(offset, value) MM_WRITE32(DMAC_START, offset, value)
++#define DMAC_READ32(offset, pointer) MM_READ32(DMAC_START, offset, pointer)
++
++#define F_MASK_DMAC(basename) (MASK_DMAC_##basename)
++#define F_SHIFT_DMAC(basename) (SHIFT_DMAC_##basename)
++#define F_ENCODE_DMAC(val, basename) \
++ (((val)<<(F_SHIFT_DMAC(basename)))&(F_MASK_DMAC(basename)))
++
++
++/* Register CR_IMG_TOPAZ_INTENAB */
++#define TOPAZ_CR_IMG_TOPAZ_INTENAB 0x0008
++#define MASK_TOPAZ_CR_IMG_TOPAZ_INTEN_MVEA 0x00000001
++#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTEN_MVEA 0
++#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTEN_MVEA 0x0008
++
++#define MASK_TOPAZ_CR_IMG_TOPAZ_MAS_INTEN 0x80000000
++#define SHIFT_TOPAZ_CR_IMG_TOPAZ_MAS_INTEN 31
++#define REGNUM_TOPAZ_CR_IMG_TOPAZ_MAS_INTEN 0x0008
++
++#define MASK_TOPAZ_CR_IMG_TOPAZ_INTEN_MMU_FAULT 0x00000008
++#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTEN_MMU_FAULT 3
++#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTEN_MMU_FAULT 0x0008
++
++#define MASK_TOPAZ_CR_IMG_TOPAZ_INTEN_MTX 0x00000002
++#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTEN_MTX 1
++#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTEN_MTX 0x0008
++
++#define MASK_TOPAZ_CR_IMG_TOPAZ_INTEN_MTX_HALT 0x00000004
++#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTEN_MTX_HALT 2
++#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTEN_MTX_HALT 0x0008
++
++#define TOPAZ_CR_IMG_TOPAZ_INTCLEAR 0x000C
++#define MASK_TOPAZ_CR_IMG_TOPAZ_INTCLR_MVEA 0x00000001
++#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTCLR_MVEA 0
++#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTCLR_MVEA 0x000C
++
++#define TOPAZ_CR_IMG_TOPAZ_INTSTAT 0x0004
++#define MASK_TOPAZ_CR_IMG_TOPAZ_INTS_MVEA 0x00000001
++#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTS_MVEA 0
++#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTS_MVEA 0x0004
++
++#define MTX_CCBCTRL_ROFF 0
++#define MTX_CCBCTRL_COMPLETE 4
++#define MTX_CCBCTRL_CCBSIZE 8
++#define MTX_CCBCTRL_QP 12
++#define MTX_CCBCTRL_FRAMESKIP 20
++#define MTX_CCBCTRL_INITQP 24
++
++#define TOPAZ_CR_MMU_STATUS 0x001C
++#define MASK_TOPAZ_CR_MMU_PF_N_RW 0x00000001
++#define SHIFT_TOPAZ_CR_MMU_PF_N_RW 0
++#define REGNUM_TOPAZ_CR_MMU_PF_N_RW 0x001C
++
++#define MASK_TOPAZ_CR_IMG_TOPAZ_INTCLR_MMU_FAULT 0x00000008
++#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTCLR_MMU_FAULT 3
++#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTCLR_MMU_FAULT 0x000C
++
++#define TOPAZ_CR_MMU_MEM_REQ 0x0020
++#define MASK_TOPAZ_CR_MEM_REQ_STAT_READS 0x000000FF
++#define SHIFT_TOPAZ_CR_MEM_REQ_STAT_READS 0
++#define REGNUM_TOPAZ_CR_MEM_REQ_STAT_READS 0x0020
++
++#define MASK_TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX 0x00000002
++#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX 1
++#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX 0x000C
++
++#define MASK_TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX_HALT 0x00000004
++#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX_HALT 2
++#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX_HALT 0x000C
++
++#define MTX_CR_MTX_KICK 0x0080
++#define MASK_MTX_MTX_KICK 0x0000FFFF
++#define SHIFT_MTX_MTX_KICK 0
++#define REGNUM_MTX_MTX_KICK 0x0080
++
++#define MTX_DATA_MEM_BASE 0x82880000
++
++#define MTX_CR_MTX_RAM_ACCESS_CONTROL 0x0108
++#define MASK_MTX_MTX_MCMR 0x00000001
++#define SHIFT_MTX_MTX_MCMR 0
++#define REGNUM_MTX_MTX_MCMR 0x0108
++
++#define MASK_MTX_MTX_MCMID 0x0FF00000
++#define SHIFT_MTX_MTX_MCMID 20
++#define REGNUM_MTX_MTX_MCMID 0x0108
++
++#define MASK_MTX_MTX_MCM_ADDR 0x000FFFFC
++#define SHIFT_MTX_MTX_MCM_ADDR 2
++#define REGNUM_MTX_MTX_MCM_ADDR 0x0108
++
++#define MTX_CR_MTX_RAM_ACCESS_STATUS 0x010C
++#define MASK_MTX_MTX_MTX_MCM_STAT 0x00000001
++#define SHIFT_MTX_MTX_MTX_MCM_STAT 0
++#define REGNUM_MTX_MTX_MTX_MCM_STAT 0x010C
++
++#define MASK_MTX_MTX_MCMAI 0x00000002
++#define SHIFT_MTX_MTX_MCMAI 1
++#define REGNUM_MTX_MTX_MCMAI 0x0108
++
++#define MTX_CR_MTX_RAM_ACCESS_DATA_TRANSFER 0x0104
++
++#define MVEA_CR_MVEA_BUSY 0x0018
++#define MVEA_CR_MVEA_DMACMDFIFO_WAIT 0x001C
++#define MVEA_CR_MVEA_DMACMDFIFO_STATUS 0x0020
++
++#define MVEA_CR_IMG_MVEA_SRST 0x0000
++#define MASK_MVEA_CR_IMG_MVEA_SPE_SOFT_RESET 0x00000001
++#define SHIFT_MVEA_CR_IMG_MVEA_SPE_SOFT_RESET 0
++#define REGNUM_MVEA_CR_IMG_MVEA_SPE_SOFT_RESET 0x0000
++
++#define MASK_MVEA_CR_IMG_MVEA_IPE_SOFT_RESET 0x00000002
++#define SHIFT_MVEA_CR_IMG_MVEA_IPE_SOFT_RESET 1
++#define REGNUM_MVEA_CR_IMG_MVEA_IPE_SOFT_RESET 0x0000
++
++#define MASK_MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET 0x00000004
++#define SHIFT_MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET 2
++#define REGNUM_MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET 0x0000
++
++#define MASK_MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET 0x00000008
++#define SHIFT_MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET 3
++#define REGNUM_MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET 0x0000
++
++#define MASK_MVEA_CR_IMG_MVEA_CMC_SOFT_RESET 0x00000010
++#define SHIFT_MVEA_CR_IMG_MVEA_CMC_SOFT_RESET 4
++#define REGNUM_MVEA_CR_IMG_MVEA_CMC_SOFT_RESET 0x0000
++
++#define MASK_MVEA_CR_IMG_MVEA_DCF_SOFT_RESET 0x00000020
++#define SHIFT_MVEA_CR_IMG_MVEA_DCF_SOFT_RESET 5
++#define REGNUM_MVEA_CR_IMG_MVEA_DCF_SOFT_RESET 0x0000
++
++#define TOPAZ_CR_IMG_TOPAZ_CORE_ID 0x03C0
++#define TOPAZ_CR_IMG_TOPAZ_CORE_REV 0x03D0
++
++#define TOPAZ_MTX_PC (0x00000005)
++#define PC_START_ADDRESS (0x80900000)
++
++#define TOPAZ_CR_TOPAZ_AUTO_CLK_GATE 0x0014
++#define MASK_TOPAZ_CR_TOPAZ_VLC_AUTO_CLK_GATE 0x00000001
++#define SHIFT_TOPAZ_CR_TOPAZ_VLC_AUTO_CLK_GATE 0
++#define REGNUM_TOPAZ_CR_TOPAZ_VLC_AUTO_CLK_GATE 0x0014
++
++#define MASK_TOPAZ_CR_TOPAZ_DB_AUTO_CLK_GATE 0x00000002
++#define SHIFT_TOPAZ_CR_TOPAZ_DB_AUTO_CLK_GATE 1
++#define REGNUM_TOPAZ_CR_TOPAZ_DB_AUTO_CLK_GATE 0x0014
++
++#define MASK_TOPAZ_CR_TOPAZ_MTX_MAN_CLK_GATE 0x00000002
++#define SHIFT_TOPAZ_CR_TOPAZ_MTX_MAN_CLK_GATE 1
++#define REGNUM_TOPAZ_CR_TOPAZ_MTX_MAN_CLK_GATE 0x0010
++
++#define MTX_CORE_CR_MTX_REGISTER_READ_WRITE_DATA_OFFSET 0x000000F8
++#define MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_OFFSET 0x000000FC
++#define MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_RNW_MASK 0x00010000
++#define MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK 0x80000000
++
++#define TOPAZ_CORE_CR_MTX_DEBUG_OFFSET 0x0000003C
++
++#define MASK_TOPAZ_CR_MTX_DBG_IS_SLAVE 0x00000004
++#define SHIFT_TOPAZ_CR_MTX_DBG_IS_SLAVE 2
++#define REGNUM_TOPAZ_CR_MTX_DBG_IS_SLAVE 0x003C
++
++#define MASK_TOPAZ_CR_MTX_DBG_GPIO_OUT 0x00000018
++#define SHIFT_TOPAZ_CR_MTX_DBG_GPIO_OUT 3
++#define REGNUM_TOPAZ_CR_MTX_DBG_GPIO_OUT 0x003C
++
++#define MTX_CORE_CR_MTX_RAM_ACCESS_CONTROL_OFFSET 0x00000108
++
++#define TOPAZ_CR_MMU_CONTROL0 0x0024
++#define MASK_TOPAZ_CR_MMU_BYPASS 0x00000800
++#define SHIFT_TOPAZ_CR_MMU_BYPASS 11
++#define REGNUM_TOPAZ_CR_MMU_BYPASS 0x0024
++
++#define TOPAZ_CR_MMU_DIR_LIST_BASE(X) (0x0030 + (4 * (X)))
++#define MASK_TOPAZ_CR_MMU_DIR_LIST_BASE_ADDR 0xFFFFF000
++#define SHIFT_TOPAZ_CR_MMU_DIR_LIST_BASE_ADDR 12
++#define REGNUM_TOPAZ_CR_MMU_DIR_LIST_BASE_ADDR 0x0030
++
++#define MASK_TOPAZ_CR_MMU_INVALDC 0x00000008
++#define SHIFT_TOPAZ_CR_MMU_INVALDC 3
++#define REGNUM_TOPAZ_CR_MMU_INVALDC 0x0024
++
++#define MASK_TOPAZ_CR_MMU_FLUSH 0x00000004
++#define SHIFT_TOPAZ_CR_MMU_FLUSH 2
++#define REGNUM_TOPAZ_CR_MMU_FLUSH 0x0024
++
++#define TOPAZ_CR_MMU_BANK_INDEX 0x0038
++#define MASK_TOPAZ_CR_MMU_BANK_N_INDEX_M(i) (0x00000003 << (8 + ((i) * 2)))
++#define SHIFT_TOPAZ_CR_MMU_BANK_N_INDEX_M(i) (8 + ((i) * 2))
++#define REGNUM_TOPAZ_CR_MMU_BANK_N_INDEX_M(i) 0x0038
++
++#define TOPAZ_CR_TOPAZ_MAN_CLK_GATE 0x0010
++#define MASK_TOPAZ_CR_TOPAZ_MVEA_MAN_CLK_GATE 0x00000001
++#define SHIFT_TOPAZ_CR_TOPAZ_MVEA_MAN_CLK_GATE 0
++#define REGNUM_TOPAZ_CR_TOPAZ_MVEA_MAN_CLK_GATE 0x0010
++
++#define MTX_CORE_CR_MTX_TXRPT_OFFSET 0x0000000c
++#define TXRPT_WAITONKICK_VALUE 0x8ade0000
++
++#define MTX_CORE_CR_MTX_ENABLE_MTX_TOFF_MASK 0x00000002
++
++#define MTX_CORE_CR_MTX_ENABLE_OFFSET 0x00000000
++#define MTX_CORE_CR_MTX_ENABLE_MTX_ENABLE_MASK 0x00000001
++
++#define MASK_TOPAZ_CR_IMG_TOPAZ_INTS_MTX 0x00000002
++#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTS_MTX 1
++#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTS_MTX 0x0004
++
++#define MTX_CORE_CR_MTX_SOFT_RESET_OFFSET 0x00000200
++#define MTX_CORE_CR_MTX_SOFT_RESET_MTX_RESET_MASK 0x00000001
++
++#define MTX_CR_MTX_SYSC_CDMAA 0x0344
++#define MASK_MTX_CDMAA_ADDRESS 0x03FFFFFC
++#define SHIFT_MTX_CDMAA_ADDRESS 2
++#define REGNUM_MTX_CDMAA_ADDRESS 0x0344
++
++#define MTX_CR_MTX_SYSC_CDMAC 0x0340
++#define MASK_MTX_LENGTH 0x0000FFFF
++#define SHIFT_MTX_LENGTH 0
++#define REGNUM_MTX_LENGTH 0x0340
++
++#define MASK_MTX_BURSTSIZE 0x07000000
++#define SHIFT_MTX_BURSTSIZE 24
++#define REGNUM_MTX_BURSTSIZE 0x0340
++
++#define MASK_MTX_RNW 0x00020000
++#define SHIFT_MTX_RNW 17
++#define REGNUM_MTX_RNW 0x0340
++
++#define MASK_MTX_ENABLE 0x00010000
++#define SHIFT_MTX_ENABLE 16
++#define REGNUM_MTX_ENABLE 0x0340
++
++#define MASK_MTX_LENGTH 0x0000FFFF
++#define SHIFT_MTX_LENGTH 0
++#define REGNUM_MTX_LENGTH 0x0340
++
++#define TOPAZ_CR_IMG_TOPAZ_SRST 0x0000
++#define MASK_TOPAZ_CR_IMG_TOPAZ_MVEA_SOFT_RESET 0x00000001
++#define SHIFT_TOPAZ_CR_IMG_TOPAZ_MVEA_SOFT_RESET 0
++#define REGNUM_TOPAZ_CR_IMG_TOPAZ_MVEA_SOFT_RESET 0x0000
++
++#define MASK_TOPAZ_CR_IMG_TOPAZ_VLC_SOFT_RESET 0x00000008
++#define SHIFT_TOPAZ_CR_IMG_TOPAZ_VLC_SOFT_RESET 3
++#define REGNUM_TOPAZ_CR_IMG_TOPAZ_VLC_SOFT_RESET 0x0000
++
++#define MASK_TOPAZ_CR_IMG_TOPAZ_MTX_SOFT_RESET 0x00000002
++#define SHIFT_TOPAZ_CR_IMG_TOPAZ_MTX_SOFT_RESET 1
++#define REGNUM_TOPAZ_CR_IMG_TOPAZ_MTX_SOFT_RESET 0x0000
++
++#define MVEA_CR_MVEA_AUTO_CLOCK_GATING 0x0024
++#define MASK_MVEA_CR_MVEA_SPE_AUTO_CLK_GATE 0x00000001
++#define SHIFT_MVEA_CR_MVEA_SPE_AUTO_CLK_GATE 0
++#define REGNUM_MVEA_CR_MVEA_SPE_AUTO_CLK_GATE 0x0024
++
++#define MASK_MVEA_CR_MVEA_IPE_AUTO_CLK_GATE 0x00000002
++#define SHIFT_MVEA_CR_MVEA_IPE_AUTO_CLK_GATE 1
++#define REGNUM_MVEA_CR_MVEA_IPE_AUTO_CLK_GATE 0x0024
++
++#define MASK_MVEA_CR_MVEA_CMPRS_AUTO_CLK_GATE 0x00000004
++#define SHIFT_MVEA_CR_MVEA_CMPRS_AUTO_CLK_GATE 2
++#define REGNUM_MVEA_CR_MVEA_CMPRS_AUTO_CLK_GATE 0x0024
++
++#define MASK_MVEA_CR_MVEA_JMCOMP_AUTO_CLK_GATE 0x00000008
++#define SHIFT_MVEA_CR_MVEA_JMCOMP_AUTO_CLK_GATE 3
++#define REGNUM_MVEA_CR_MVEA_JMCOMP_AUTO_CLK_GATE 0x0024
++
++#define TOPAZ_CR_IMG_TOPAZ_DMAC_MODE 0x0040
++#define MASK_TOPAZ_CR_DMAC_MASTER_MODE 0x00000001
++#define SHIFT_TOPAZ_CR_DMAC_MASTER_MODE 0
++#define REGNUM_TOPAZ_CR_DMAC_MASTER_MODE 0x0040
++
++#define MTX_CR_MTX_SYSC_CDMAT 0x0350
++#define MASK_MTX_TRANSFERDATA 0xFFFFFFFF
++#define SHIFT_MTX_TRANSFERDATA 0
++#define REGNUM_MTX_TRANSFERDATA 0x0350
++
++#define IMG_SOC_DMAC_IRQ_STAT(X) (0x000C + (32 * (X)))
++#define MASK_IMG_SOC_TRANSFER_FIN 0x00020000
++#define SHIFT_IMG_SOC_TRANSFER_FIN 17
++#define REGNUM_IMG_SOC_TRANSFER_FIN 0x000C
++
++#define IMG_SOC_DMAC_COUNT(X) (0x0004 + (32 * (X)))
++#define MASK_IMG_SOC_CNT 0x0000FFFF
++#define SHIFT_IMG_SOC_CNT 0
++#define REGNUM_IMG_SOC_CNT 0x0004
++
++#define MASK_IMG_SOC_EN 0x00010000
++#define SHIFT_IMG_SOC_EN 16
++#define REGNUM_IMG_SOC_EN 0x0004
++
++#define MASK_IMG_SOC_LIST_EN 0x00040000
++#define SHIFT_IMG_SOC_LIST_EN 18
++#define REGNUM_IMG_SOC_LIST_EN 0x0004
++
++#define IMG_SOC_DMAC_PER_HOLD(X) (0x0018 + (32 * (X)))
++#define MASK_IMG_SOC_PER_HOLD 0x0000007F
++#define SHIFT_IMG_SOC_PER_HOLD 0
++#define REGNUM_IMG_SOC_PER_HOLD 0x0018
++
++#define IMG_SOC_DMAC_SETUP(X) (0x0000 + (32 * (X)))
++#define MASK_IMG_SOC_START_ADDRESS 0xFFFFFFF
++#define SHIFT_IMG_SOC_START_ADDRESS 0
++#define REGNUM_IMG_SOC_START_ADDRESS 0x0000
++
++#define MASK_IMG_SOC_BSWAP 0x40000000
++#define SHIFT_IMG_SOC_BSWAP 30
++#define REGNUM_IMG_SOC_BSWAP 0x0004
++
++#define MASK_IMG_SOC_PW 0x18000000
++#define SHIFT_IMG_SOC_PW 27
++#define REGNUM_IMG_SOC_PW 0x0004
++
++#define MASK_IMG_SOC_DIR 0x04000000
++#define SHIFT_IMG_SOC_DIR 26
++#define REGNUM_IMG_SOC_DIR 0x0004
++
++#define MASK_IMG_SOC_PI 0x03000000
++#define SHIFT_IMG_SOC_PI 24
++#define REGNUM_IMG_SOC_PI 0x0004
++#define IMG_SOC_PI_1 0x00000002
++#define IMG_SOC_PI_2 0x00000001
++#define IMG_SOC_PI_4 0x00000000
++
++#define MASK_IMG_SOC_TRANSFER_IEN 0x20000000
++#define SHIFT_IMG_SOC_TRANSFER_IEN 29
++#define REGNUM_IMG_SOC_TRANSFER_IEN 0x0004
++
++#define DMAC_VALUE_COUNT(BSWAP, PW, DIR, PERIPH_INCR, COUNT) \
++ ((((BSWAP) << SHIFT_IMG_SOC_BSWAP) & MASK_IMG_SOC_BSWAP)| \
++ (((PW) << SHIFT_IMG_SOC_PW) & MASK_IMG_SOC_PW)| \
++ (((DIR) << SHIFT_IMG_SOC_DIR) & MASK_IMG_SOC_DIR)| \
++ (((PERIPH_INCR) << SHIFT_IMG_SOC_PI) & MASK_IMG_SOC_PI)| \
++ (((COUNT) << SHIFT_IMG_SOC_CNT) & MASK_IMG_SOC_CNT))
++
++#define IMG_SOC_DMAC_PERIPH(X) (0x0008 + (32 * (X)))
++#define MASK_IMG_SOC_EXT_SA 0x0000000F
++#define SHIFT_IMG_SOC_EXT_SA 0
++#define REGNUM_IMG_SOC_EXT_SA 0x0008
++
++#define MASK_IMG_SOC_ACC_DEL 0xE0000000
++#define SHIFT_IMG_SOC_ACC_DEL 29
++#define REGNUM_IMG_SOC_ACC_DEL 0x0008
++
++#define MASK_IMG_SOC_INCR 0x08000000
++#define SHIFT_IMG_SOC_INCR 27
++#define REGNUM_IMG_SOC_INCR 0x0008
++
++#define MASK_IMG_SOC_BURST 0x07000000
++#define SHIFT_IMG_SOC_BURST 24
++#define REGNUM_IMG_SOC_BURST 0x0008
++
++#define DMAC_VALUE_PERIPH_PARAM(ACC_DEL, INCR, BURST) \
++((((ACC_DEL) << SHIFT_IMG_SOC_ACC_DEL) & MASK_IMG_SOC_ACC_DEL)| \
++(((INCR) << SHIFT_IMG_SOC_INCR) & MASK_IMG_SOC_INCR)| \
++(((BURST) << SHIFT_IMG_SOC_BURST) & MASK_IMG_SOC_BURST))
++
++#define IMG_SOC_DMAC_PERIPHERAL_ADDR(X) (0x0014 + (32 * (X)))
++#define MASK_IMG_SOC_ADDR 0x007FFFFF
++#define SHIFT_IMG_SOC_ADDR 0
++#define REGNUM_IMG_SOC_ADDR 0x0014
++
++#define SHIFT_TOPAZ_VEC_BUSY 11
++#define MASK_TOPAZ_VEC_BUSY (0x1<<SHIFT_TOPAZ_VEC_BUSY)
++
++#define TOPAZ_MTX_TXRPT_OFFSET 0xc
++#define TOPAZ_GUNIT_GVD_PSMI_GFX_OFFSET 0x20D0
++
++#define TOPAZ_GUNIT_READ32(offset) ioread32(dev_priv->vdc_reg + offset)
++#define TOPAZ_READ_BITS(val, basename) \
++ (((val)&MASK_TOPAZ_##basename)>>SHIFT_TOPAZ_##basename)
++
++#define TOPAZ_WAIT_UNTIL_IDLE \
++ do { \
++ uint8_t tmp_poll_number = 0;\
++ uint32_t tmp_reg; \
++ if (topaz_priv->topaz_cmd_windex == WB_CCB_CTRL_RINDEX(dev_priv)) { \
++ tmp_reg = TOPAZ_GUNIT_READ32(TOPAZ_GUNIT_GVD_PSMI_GFX_OFFSET);\
++ if (0 != TOPAZ_READ_BITS(tmp_reg, VEC_BUSY)) { \
++ MTX_READ32(TOPAZ_MTX_TXRPT_OFFSET, &tmp_reg);\
++ while ((tmp_reg != 0x8ade0000) && \
++ (tmp_poll_number++ < 10)) \
++ MTX_READ32(0xc, &tmp_reg); \
++ PSB_DEBUG_GENERAL( \
++ "TOPAZ: TXRPT reg remain: %x,poll %d times.\n",\
++ tmp_reg, tmp_poll_number);\
++ } \
++ } \
++ } while (0)
++
++/* **************** DMAC define **************** */
++enum DMAC_eBSwap {
++ DMAC_BSWAP_NO_SWAP = 0x0,/* !< No byte swapping will be performed. */
++ DMAC_BSWAP_REVERSE = 0x1,/* !< Byte order will be reversed. */
++};
++
++enum DMAC_ePW {
++ DMAC_PWIDTH_32_BIT = 0x0,/* !< Peripheral width 32-bit. */
++ DMAC_PWIDTH_16_BIT = 0x1,/* !< Peripheral width 16-bit. */
++ DMAC_PWIDTH_8_BIT = 0x2,/* !< Peripheral width 8-bit. */
++};
++
++enum DMAC_eAccDel {
++ DMAC_ACC_DEL_0 = 0x0, /* !< Access delay zero clock cycles */
++ DMAC_ACC_DEL_256 = 0x1, /* !< Access delay 256 clock cycles */
++ DMAC_ACC_DEL_512 = 0x2, /* !< Access delay 512 clock cycles */
++ DMAC_ACC_DEL_768 = 0x3, /* !< Access delay 768 clock cycles */
++ DMAC_ACC_DEL_1024 = 0x4,/* !< Access delay 1024 clock cycles */
++ DMAC_ACC_DEL_1280 = 0x5,/* !< Access delay 1280 clock cycles */
++ DMAC_ACC_DEL_1536 = 0x6,/* !< Access delay 1536 clock cycles */
++ DMAC_ACC_DEL_1792 = 0x7,/* !< Access delay 1792 clock cycles */
++};
++
++enum DMAC_eBurst {
++ DMAC_BURST_0 = 0x0, /* !< burst size of 0 */
++ DMAC_BURST_1 = 0x1, /* !< burst size of 1 */
++ DMAC_BURST_2 = 0x2, /* !< burst size of 2 */
++ DMAC_BURST_3 = 0x3, /* !< burst size of 3 */
++ DMAC_BURST_4 = 0x4, /* !< burst size of 4 */
++ DMAC_BURST_5 = 0x5, /* !< burst size of 5 */
++ DMAC_BURST_6 = 0x6, /* !< burst size of 6 */
++ DMAC_BURST_7 = 0x7, /* !< burst size of 7 */
++};
++
++/* commands for topaz,shared with user space driver */
++enum drm_lnc_topaz_cmd {
++ MTX_CMDID_NULL = 0,
++ MTX_CMDID_DO_HEADER = 1,
++ MTX_CMDID_ENCODE_SLICE = 2,
++ MTX_CMDID_WRITEREG = 3,
++ MTX_CMDID_START_PIC = 4,
++ MTX_CMDID_END_PIC = 5,
++ MTX_CMDID_SYNC = 6,
++ MTX_CMDID_ENCODE_ONE_ROW = 7,
++ MTX_CMDID_FLUSH = 8,
++ MTX_CMDID_SW_LEAVE_LOWPOWER = 0x7c,
++ MTX_CMDID_SW_ENTER_LOWPOWER = 0x7e,
++ MTX_CMDID_SW_NEW_CODEC = 0x7f
++};
++
++/* codecs topaz supports,shared with user space driver */
++enum drm_lnc_topaz_codec {
++ IMG_CODEC_JPEG = 0,
++ IMG_CODEC_H264_NO_RC,
++ IMG_CODEC_H264_VBR,
++ IMG_CODEC_H264_CBR,
++ IMG_CODEC_H263_NO_RC,
++ IMG_CODEC_H263_VBR,
++ IMG_CODEC_H263_CBR,
++ IMG_CODEC_MPEG4_NO_RC,
++ IMG_CODEC_MPEG4_VBR,
++ IMG_CODEC_MPEG4_CBR,
++ IMG_CODEC_NUM
++};
++
++/* XXX: it's a copy of msvdx cmd queue. should have some change? */
++struct lnc_topaz_cmd_queue {
++ struct list_head head;
++ void *cmd;
++ unsigned long cmd_size;
++ uint32_t sequence;
++};
++
++
++struct topaz_cmd_header {
++ union {
++ struct {
++ unsigned long enable_interrupt:1;
++ unsigned long id:7;
++ unsigned long size:8;
++ unsigned long seq:16;
++ };
++ uint32_t val;
++ };
++};
++
++/* define structure */
++/* firmware file's info head */
++struct topaz_fwinfo {
++ unsigned int ver:16;
++ unsigned int codec:16;
++
++ unsigned int text_size;
++ unsigned int data_size;
++ unsigned int data_location;
++};
++
++/* firmware data array define */
++struct topaz_codec_fw {
++ uint32_t ver;
++ uint32_t codec;
++
++ uint32_t text_size;
++ uint32_t data_size;
++ uint32_t data_location;
++
++ struct ttm_buffer_object *text;
++ struct ttm_buffer_object *data;
++};
++
++struct topaz_private {
++ /* current video task */
++ unsigned int pmstate;
++ struct sysfs_dirent *sysfs_pmstate;
++ int frame_skip;
++
++ void *topaz_mtx_reg_state;
++ struct ttm_buffer_object *topaz_mtx_data_mem;
++ uint32_t topaz_cur_codec;
++ uint32_t cur_mtx_data_size;
++ int topaz_needs_reset;
++
++ /*
++ *topaz command queue
++ */
++ spinlock_t topaz_lock;
++ struct mutex topaz_mutex;
++ struct list_head topaz_queue;
++ int topaz_busy; /* 0 means topaz is free */
++ int topaz_fw_loaded;
++
++ /* topaz ccb data */
++ /* XXX: should the addr stored by 32 bits? more compatible way?? */
++ uint32_t topaz_ccb_buffer_addr;
++ uint32_t topaz_ccb_ctrl_addr;
++ uint32_t topaz_ccb_size;
++ uint32_t topaz_cmd_windex;
++ uint16_t topaz_cmd_seq;
++
++ uint32_t stored_initial_qp;
++ uint32_t topaz_dash_access_ctrl;
++
++ struct ttm_buffer_object *topaz_bo; /* 4K->2K/2K for writeback/sync */
++ struct ttm_bo_kmap_obj topaz_bo_kmap;
++ void *topaz_ccb_wb;
++ uint32_t topaz_wb_offset;
++ uint32_t *topaz_sync_addr;
++ uint32_t topaz_sync_offset;
++ uint32_t topaz_sync_cmd_seq;
++ uint32_t topaz_mtx_saved;
++
++ /* firmware */
++ struct topaz_codec_fw topaz_fw[IMG_CODEC_NUM];
++
++ uint32_t topaz_hw_busy;
++};
++
++/* external function declare */
++/* lnc_topazinit.c */
++int lnc_topaz_init(struct drm_device *dev);
++int lnc_topaz_uninit(struct drm_device *dev);
++int lnc_topaz_reset(struct drm_psb_private *dev_priv);
++int topaz_init_fw(struct drm_device *dev);
++int topaz_setup_fw(struct drm_device *dev, enum drm_lnc_topaz_codec codec);
++int topaz_wait_for_register(struct drm_psb_private *dev_priv,
++ uint32_t addr, uint32_t value,
++ uint32_t enable);
++void topaz_write_mtx_mem(struct drm_psb_private *dev_priv,
++ uint32_t byte_addr, uint32_t val);
++uint32_t topaz_read_mtx_mem(struct drm_psb_private *dev_priv,
++ uint32_t byte_addr);
++void topaz_write_mtx_mem_multiple_setup(struct drm_psb_private *dev_priv,
++ uint32_t addr);
++void topaz_write_mtx_mem_multiple(struct drm_psb_private *dev_priv,
++ uint32_t val);
++void topaz_mmu_flushcache(struct drm_psb_private *dev_priv);
++int lnc_topaz_save_mtx_state(struct drm_device *dev);
++int lnc_topaz_restore_mtx_state(struct drm_device *dev);
++
++/* lnc_topaz.c */
++IMG_BOOL lnc_topaz_interrupt(IMG_VOID *pvData);
++
++int lnc_cmdbuf_video(struct drm_file *priv,
++ struct list_head *validate_list,
++ uint32_t fence_type,
++ struct drm_psb_cmdbuf_arg *arg,
++ struct ttm_buffer_object *cmd_buffer,
++ struct psb_ttm_fence_rep *fence_arg);
++
++void topaz_mtx_kick(struct drm_psb_private *dev_priv, uint32_t kick_cout);
++void lnc_topaz_handle_timeout(struct ttm_fence_device *fdev);
++
++uint32_t psb_get_default_pd_addr(struct psb_mmu_driver *driver);
++int lnc_wait_topaz_idle(struct drm_device *dev);
++int lnc_check_topaz_idle(struct drm_device *dev);
++void lnc_unmap_topaz_reg(struct drm_device *dev);
++void lnc_map_topaz_reg(struct drm_device *dev);
++
++/* macros to get/set CCB control data */
++#define WB_CCB_CTRL_RINDEX(dev_priv) \
++(*((uint32_t *)((struct topaz_private *)dev_priv->topaz_private)->topaz_ccb_wb))
++
++#define WB_CCB_CTRL_SEQ(dev_priv) \
++(*((uint32_t *)((struct topaz_private *)dev_priv->topaz_private)->topaz_ccb_wb\
++ + 1))
++
++#define POLL_WB_RINDEX(dev_priv, value) \
++do { \
++ int i; \
++ for (i = 0; i < 10000; i++) { \
++ if (WB_CCB_CTRL_RINDEX(dev_priv) == value) \
++ break; \
++ else \
++ DRM_UDELAY(100); \
++ } \
++ if (WB_CCB_CTRL_RINDEX(dev_priv) != value) { \
++ DRM_ERROR("TOPAZ: poll rindex timeout\n"); \
++ ret = -EBUSY; \
++ } \
++} while (0)
++
++#define POLL_WB_SEQ(dev_priv, value) \
++do { \
++ int i; \
++ for (i = 0; i < 10000; i++) { \
++ if (CCB_CTRL_SEQ(dev_priv) == value) \
++ break; \
++ else \
++ DRM_UDELAY(1000); \
++ } \
++ if (CCB_CTRL_SEQ(dev_priv) != value) { \
++ DRM_ERROR("TOPAZ:poll mtxseq timeout,0x%08x(mtx) vs 0x%08x\n",\
++ WB_CCB_CTRL_SEQ(dev_priv), value); \
++ ret = -EBUSY; \
++ } \
++} while (0)
++
++#define CCB_CTRL_RINDEX(dev_priv) \
++ topaz_read_mtx_mem(dev_priv, \
++ ((struct topaz_private *)dev_priv->topaz_private)->topaz_ccb_ctrl_addr \
++ + MTX_CCBCTRL_ROFF)
++
++#define CCB_CTRL_RINDEX(dev_priv) \
++ topaz_read_mtx_mem(dev_priv, \
++ ((struct topaz_private *)dev_priv->topaz_private)->topaz_ccb_ctrl_addr \
++ + MTX_CCBCTRL_ROFF)
++
++#define CCB_CTRL_QP(dev_priv) \
++ topaz_read_mtx_mem(dev_priv, \
++ ((struct topaz_private *)dev_priv->topaz_private)->topaz_ccb_ctrl_addr \
++ + MTX_CCBCTRL_QP)
++
++#define CCB_CTRL_SEQ(dev_priv) \
++ topaz_read_mtx_mem(dev_priv, \
++ ((struct topaz_private *)dev_priv->topaz_private)->topaz_ccb_ctrl_addr \
++ + MTX_CCBCTRL_COMPLETE)
++
++#define CCB_CTRL_FRAMESKIP(dev_priv) \
++ topaz_read_mtx_mem(dev_priv, \
++ ((struct topaz_private *)dev_priv->topaz_private)->topaz_ccb_ctrl_addr \
++ + MTX_CCBCTRL_FRAMESKIP)
++
++#define CCB_CTRL_SET_QP(dev_priv, qp) \
++ topaz_write_mtx_mem(dev_priv, \
++ ((struct topaz_private *)dev_priv->topaz_private)->topaz_ccb_ctrl_addr \
++ + MTX_CCBCTRL_QP, qp)
++
++#define CCB_CTRL_SET_INITIALQP(dev_priv, qp) \
++ topaz_write_mtx_mem(dev_priv, \
++ ((struct topaz_private *)dev_priv->topaz_private)->topaz_ccb_ctrl_addr \
++ + MTX_CCBCTRL_INITQP, qp)
++
++
++#define TOPAZ_BEGIN_CCB(dev_priv) \
++ topaz_write_mtx_mem_multiple_setup(dev_priv, \
++ ((struct topaz_private *)dev_priv->topaz_private)->topaz_ccb_buffer_addr + \
++ ((struct topaz_private *)dev_priv->topaz_private)->topaz_cmd_windex * 4)
++
++#define TOPAZ_OUT_CCB(dev_priv, cmd) \
++do { \
++ topaz_write_mtx_mem_multiple(dev_priv, cmd); \
++ ((struct topaz_private *)dev_priv->topaz_private)->topaz_cmd_windex++; \
++} while (0)
++
++#define TOPAZ_END_CCB(dev_priv, kick_count) \
++ topaz_mtx_kick(dev_priv, 1);
++
++static inline char *cmd_to_string(int cmd_id)
++{
++ switch (cmd_id) {
++ case MTX_CMDID_START_PIC:
++ return "MTX_CMDID_START_PIC";
++ case MTX_CMDID_END_PIC:
++ return "MTX_CMDID_END_PIC";
++ case MTX_CMDID_DO_HEADER:
++ return "MTX_CMDID_DO_HEADER";
++ case MTX_CMDID_ENCODE_SLICE:
++ return "MTX_CMDID_ENCODE_SLICE";
++ case MTX_CMDID_SYNC:
++ return "MTX_CMDID_SYNC";
++
++ default:
++ return "Undefined command";
++
++ }
++}
++
++static inline char *codec_to_string(int codec)
++{
++ switch (codec) {
++ case IMG_CODEC_H264_NO_RC:
++ return "H264_NO_RC";
++ case IMG_CODEC_H264_VBR:
++ return "H264_VBR";
++ case IMG_CODEC_H264_CBR:
++ return "H264_CBR";
++ case IMG_CODEC_H263_NO_RC:
++ return "H263_NO_RC";
++ case IMG_CODEC_H263_VBR:
++ return "H263_VBR";
++ case IMG_CODEC_H263_CBR:
++ return "H263_CBR";
++ case IMG_CODEC_MPEG4_NO_RC:
++ return "MPEG4_NO_RC";
++ case IMG_CODEC_MPEG4_VBR:
++ return "MPEG4_VBR";
++ case IMG_CODEC_MPEG4_CBR:
++ return "MPEG4_CBR";
++ default:
++ return "Undefined codec";
++ }
++}
++
++
++static inline void lnc_topaz_enableirq(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ /* uint32_t ier = dev_priv->vdc_irq_mask | _LNC_IRQ_TOPAZ_FLAG; */
++
++ PSB_DEBUG_IRQ("TOPAZ: enable IRQ\n");
++
++ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_INTENAB,
++ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_MAS_INTEN) |
++ /* F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTEN_MVEA) | */
++ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTEN_MMU_FAULT) |
++ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTEN_MTX) |
++ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTEN_MTX_HALT));
++
++ /* write in sysirq.c */
++ /* PSB_WVDC32(ier, PSB_INT_ENABLE_R); /\* essential *\/ */
++}
++
++static inline void lnc_topaz_disableirq(struct drm_device *dev)
++{
++
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ /* uint32_t ier = dev_priv->vdc_irq_mask & (~_LNC_IRQ_TOPAZ_FLAG); */
++
++ PSB_DEBUG_INIT("TOPAZ: disable IRQ\n");
++
++ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_INTENAB, 0);
++
++ /* write in sysirq.c */
++ /* PSB_WVDC32(ier, PSB_INT_ENABLE_R); /\* essential *\/ */
++}
++
++
++static inline void lnc_topaz_clearirq(struct drm_device *dev,
++ uint32_t clear_topaz)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++
++ PSB_DEBUG_INIT("TOPAZ: clear IRQ\n");
++ if (clear_topaz != 0)
++ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_INTCLEAR, clear_topaz);
++
++ /* PSB_WVDC32(_LNC_IRQ_TOPAZ_FLAG, PSB_INT_IDENTITY_R); */
++}
++
++static inline uint32_t lnc_topaz_queryirq(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ uint32_t val, /* iir, */ clear = 0;
++ struct topaz_private *topaz_priv = dev_priv->topaz_private;
++
++ TOPAZ_READ32(TOPAZ_CR_IMG_TOPAZ_INTSTAT, &val);
++ /* iir = PSB_RVDC32(PSB_INT_IDENTITY_R); */
++
++ (void) topaz_priv;
++
++ if ((val == 0) /* && (iir == 0) */) {/* no interrupt */
++ PSB_DEBUG_GENERAL("TOPAZ: no interrupt,IIR=TOPAZ_INTSTAT=0\n");
++ return 0;
++ }
++
++ PSB_DEBUG_IRQ("TOPAZ:TOPAZ_INTSTAT=0x%08x\n", val);
++
++ if (val & (1<<31))
++ PSB_DEBUG_IRQ("TOPAZ:IRQ pin activated,cmd seq=0x%04x,"
++ "sync seq: 0x%08x vs 0x%08x (MTX)\n",
++ CCB_CTRL_SEQ(dev_priv),
++ dev_priv->sequence[LNC_ENGINE_ENCODE],
++ *(uint32_t *)topaz_priv->topaz_sync_addr);
++ else
++ PSB_DEBUG_IRQ("TOPAZ:IRQ pin not activated,cmd seq=0x%04x,"
++ "sync seq: 0x%08x vs 0x%08x (MTX)\n",
++ CCB_CTRL_SEQ(dev_priv),
++ dev_priv->sequence[LNC_ENGINE_ENCODE],
++ *(uint32_t *)topaz_priv->topaz_sync_addr);
++
++ if (val & 0x8) {
++ uint32_t mmu_status, mmu_req;
++
++ TOPAZ_READ32(TOPAZ_CR_MMU_STATUS, &mmu_status);
++ TOPAZ_READ32(TOPAZ_CR_MMU_MEM_REQ, &mmu_req);
++
++ PSB_DEBUG_IRQ("TOPAZ: detect a page fault interrupt, "
++ "address=0x%08x,mem req=0x%08x\n",
++ mmu_status, mmu_req);
++ clear |= F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MMU_FAULT);
++ }
++
++ if (val & 0x4) {
++ PSB_DEBUG_IRQ("TOPAZ: detect a MTX_HALT interrupt\n");
++ clear |= F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX_HALT);
++ }
++
++ if (val & 0x2) {
++ PSB_DEBUG_IRQ("TOPAZ: detect a MTX interrupt\n");
++ clear |= F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX);
++ }
++
++ if (val & 0x1) {
++ PSB_DEBUG_IRQ("TOPAZ: detect a MVEA interrupt\n");
++ clear |= F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MVEA);
++ }
++
++ return clear;
++}
++
++
++#define TOPAZ_NEW_PMSTATE(drm_dev, topaz_priv, new_state) \
++do { \
++ topaz_priv->pmstate = new_state; \
++ sysfs_notify_dirent(topaz_priv->sysfs_pmstate); \
++ PSB_DEBUG_PM("TOPAZ: %s\n", \
++ (new_state == PSB_PMSTATE_POWERUP) ? "powerup": "powerdown"); \
++} while (0)
++
++#endif /* _LNC_TOPAZ_H_ */
+diff --git a/drivers/gpu/drm/mrst/drv/lnc_topazinit.c b/drivers/gpu/drm/mrst/drv/lnc_topazinit.c
+new file mode 100644
+index 0000000..f968d5b
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/lnc_topazinit.c
+@@ -0,0 +1,2051 @@
++/**
++ * file lnc_topazinit.c
++ * TOPAZ initialization and mtx-firmware upload
++ *
++ */
++
++/**************************************************************************
++ *
++ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
++ * Copyright (c) Imagination Technologies Limited, UK
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++
++/* NOTE: (READ BEFORE REFINE CODE)
++ * 1. The FIRMWARE's SIZE is measured by byte, we have to pass the size
++ * measured by word to DMAC.
++ *
++ *
++ *
++ */
++
++/* include headers */
++
++/* #define DRM_DEBUG_CODE 2 */
++
++#include <linux/firmware.h>
++
++#include <drm/drmP.h>
++#include <drm/drm.h>
++
++#include "psb_drv.h"
++#include "lnc_topaz.h"
++#include "ospm_power.h"
++#include "sysirq.h"
++
++/* WARNING: this define is very important */
++#define RAM_SIZE (1024 * 24)
++
++/* register default values
++ * THIS HEADER IS ONLY INCLUDE ONCE*/
++static unsigned long topaz_default_regs[183][3] = {
++ {MVEA_START, 0x00000000, 0x00000000},
++ {MVEA_START, 0x00000004, 0x00000400},
++ {MVEA_START, 0x00000008, 0x00000000},
++ {MVEA_START, 0x0000000C, 0x00000000},
++ {MVEA_START, 0x00000010, 0x00000000},
++ {MVEA_START, 0x00000014, 0x00000000},
++ {MVEA_START, 0x00000018, 0x00000000},
++ {MVEA_START, 0x0000001C, 0x00000000},
++ {MVEA_START, 0x00000020, 0x00000120},
++ {MVEA_START, 0x00000024, 0x00000000},
++ {MVEA_START, 0x00000028, 0x00000000},
++ {MVEA_START, 0x00000100, 0x00000000},
++ {MVEA_START, 0x00000104, 0x00000000},
++ {MVEA_START, 0x00000108, 0x00000000},
++ {MVEA_START, 0x0000010C, 0x00000000},
++ {MVEA_START, 0x0000011C, 0x00000001},
++ {MVEA_START, 0x0000012C, 0x00000000},
++ {MVEA_START, 0x00000180, 0x00000000},
++ {MVEA_START, 0x00000184, 0x00000000},
++ {MVEA_START, 0x00000188, 0x00000000},
++ {MVEA_START, 0x0000018C, 0x00000000},
++ {MVEA_START, 0x00000190, 0x00000000},
++ {MVEA_START, 0x00000194, 0x00000000},
++ {MVEA_START, 0x00000198, 0x00000000},
++ {MVEA_START, 0x0000019C, 0x00000000},
++ {MVEA_START, 0x000001A0, 0x00000000},
++ {MVEA_START, 0x000001A4, 0x00000000},
++ {MVEA_START, 0x000001A8, 0x00000000},
++ {MVEA_START, 0x000001AC, 0x00000000},
++ {MVEA_START, 0x000001B0, 0x00000000},
++ {MVEA_START, 0x000001B4, 0x00000000},
++ {MVEA_START, 0x000001B8, 0x00000000},
++ {MVEA_START, 0x000001BC, 0x00000000},
++ {MVEA_START, 0x000001F8, 0x00000000},
++ {MVEA_START, 0x000001FC, 0x00000000},
++ {MVEA_START, 0x00000200, 0x00000000},
++ {MVEA_START, 0x00000204, 0x00000000},
++ {MVEA_START, 0x00000208, 0x00000000},
++ {MVEA_START, 0x0000020C, 0x00000000},
++ {MVEA_START, 0x00000210, 0x00000000},
++ {MVEA_START, 0x00000220, 0x00000001},
++ {MVEA_START, 0x00000224, 0x0000001F},
++ {MVEA_START, 0x00000228, 0x00000100},
++ {MVEA_START, 0x0000022C, 0x00001F00},
++ {MVEA_START, 0x00000230, 0x00000101},
++ {MVEA_START, 0x00000234, 0x00001F1F},
++ {MVEA_START, 0x00000238, 0x00001F01},
++ {MVEA_START, 0x0000023C, 0x0000011F},
++ {MVEA_START, 0x00000240, 0x00000200},
++ {MVEA_START, 0x00000244, 0x00001E00},
++ {MVEA_START, 0x00000248, 0x00000002},
++ {MVEA_START, 0x0000024C, 0x0000001E},
++ {MVEA_START, 0x00000250, 0x00000003},
++ {MVEA_START, 0x00000254, 0x0000001D},
++ {MVEA_START, 0x00000258, 0x00001F02},
++ {MVEA_START, 0x0000025C, 0x00000102},
++ {MVEA_START, 0x00000260, 0x0000011E},
++ {MVEA_START, 0x00000264, 0x00000000},
++ {MVEA_START, 0x00000268, 0x00000000},
++ {MVEA_START, 0x0000026C, 0x00000000},
++ {MVEA_START, 0x00000270, 0x00000000},
++ {MVEA_START, 0x00000274, 0x00000000},
++ {MVEA_START, 0x00000278, 0x00000000},
++ {MVEA_START, 0x00000280, 0x00008000},
++ {MVEA_START, 0x00000284, 0x00000000},
++ {MVEA_START, 0x00000288, 0x00000000},
++ {MVEA_START, 0x0000028C, 0x00000000},
++ {MVEA_START, 0x00000314, 0x00000000},
++ {MVEA_START, 0x00000318, 0x00000000},
++ {MVEA_START, 0x0000031C, 0x00000000},
++ {MVEA_START, 0x00000320, 0x00000000},
++ {MVEA_START, 0x00000324, 0x00000000},
++ {MVEA_START, 0x00000348, 0x00000000},
++ {MVEA_START, 0x00000380, 0x00000000},
++ {MVEA_START, 0x00000384, 0x00000000},
++ {MVEA_START, 0x00000388, 0x00000000},
++ {MVEA_START, 0x0000038C, 0x00000000},
++ {MVEA_START, 0x00000390, 0x00000000},
++ {MVEA_START, 0x00000394, 0x00000000},
++ {MVEA_START, 0x00000398, 0x00000000},
++ {MVEA_START, 0x0000039C, 0x00000000},
++ {MVEA_START, 0x000003A0, 0x00000000},
++ {MVEA_START, 0x000003A4, 0x00000000},
++ {MVEA_START, 0x000003A8, 0x00000000},
++ {MVEA_START, 0x000003B0, 0x00000000},
++ {MVEA_START, 0x000003B4, 0x00000000},
++ {MVEA_START, 0x000003B8, 0x00000000},
++ {MVEA_START, 0x000003BC, 0x00000000},
++ {MVEA_START, 0x000003D4, 0x00000000},
++ {MVEA_START, 0x000003D8, 0x00000000},
++ {MVEA_START, 0x000003DC, 0x00000000},
++ {MVEA_START, 0x000003E0, 0x00000000},
++ {MVEA_START, 0x000003E4, 0x00000000},
++ {MVEA_START, 0x000003EC, 0x00000000},
++ {MVEA_START, 0x000002D0, 0x00000000},
++ {MVEA_START, 0x000002D4, 0x00000000},
++ {MVEA_START, 0x000002D8, 0x00000000},
++ {MVEA_START, 0x000002DC, 0x00000000},
++ {MVEA_START, 0x000002E0, 0x00000000},
++ {MVEA_START, 0x000002E4, 0x00000000},
++ {MVEA_START, 0x000002E8, 0x00000000},
++ {MVEA_START, 0x000002EC, 0x00000000},
++ {MVEA_START, 0x000002F0, 0x00000000},
++ {MVEA_START, 0x000002F4, 0x00000000},
++ {MVEA_START, 0x000002F8, 0x00000000},
++ {MVEA_START, 0x000002FC, 0x00000000},
++ {MVEA_START, 0x00000300, 0x00000000},
++ {MVEA_START, 0x00000304, 0x00000000},
++ {MVEA_START, 0x00000308, 0x00000000},
++ {MVEA_START, 0x0000030C, 0x00000000},
++ {MVEA_START, 0x00000290, 0x00000000},
++ {MVEA_START, 0x00000294, 0x00000000},
++ {MVEA_START, 0x00000298, 0x00000000},
++ {MVEA_START, 0x0000029C, 0x00000000},
++ {MVEA_START, 0x000002A0, 0x00000000},
++ {MVEA_START, 0x000002A4, 0x00000000},
++ {MVEA_START, 0x000002A8, 0x00000000},
++ {MVEA_START, 0x000002AC, 0x00000000},
++ {MVEA_START, 0x000002B0, 0x00000000},
++ {MVEA_START, 0x000002B4, 0x00000000},
++ {MVEA_START, 0x000002B8, 0x00000000},
++ {MVEA_START, 0x000002BC, 0x00000000},
++ {MVEA_START, 0x000002C0, 0x00000000},
++ {MVEA_START, 0x000002C4, 0x00000000},
++ {MVEA_START, 0x000002C8, 0x00000000},
++ {MVEA_START, 0x000002CC, 0x00000000},
++ {MVEA_START, 0x00000080, 0x00000000},
++ {MVEA_START, 0x00000084, 0x80705700},
++ {MVEA_START, 0x00000088, 0x00000000},
++ {MVEA_START, 0x0000008C, 0x00000000},
++ {MVEA_START, 0x00000090, 0x00000000},
++ {MVEA_START, 0x00000094, 0x00000000},
++ {MVEA_START, 0x00000098, 0x00000000},
++ {MVEA_START, 0x0000009C, 0x00000000},
++ {MVEA_START, 0x000000A0, 0x00000000},
++ {MVEA_START, 0x000000A4, 0x00000000},
++ {MVEA_START, 0x000000A8, 0x00000000},
++ {MVEA_START, 0x000000AC, 0x00000000},
++ {MVEA_START, 0x000000B0, 0x00000000},
++ {MVEA_START, 0x000000B4, 0x00000000},
++ {MVEA_START, 0x000000B8, 0x00000000},
++ {MVEA_START, 0x000000BC, 0x00000000},
++ {MVEA_START, 0x000000C0, 0x00000000},
++ {MVEA_START, 0x000000C4, 0x00000000},
++ {MVEA_START, 0x000000C8, 0x00000000},
++ {MVEA_START, 0x000000CC, 0x00000000},
++ {MVEA_START, 0x000000D0, 0x00000000},
++ {MVEA_START, 0x000000D4, 0x00000000},
++ {MVEA_START, 0x000000D8, 0x00000000},
++ {MVEA_START, 0x000000DC, 0x00000000},
++ {MVEA_START, 0x000000E0, 0x00000000},
++ {MVEA_START, 0x000000E4, 0x00000000},
++ {MVEA_START, 0x000000E8, 0x00000000},
++ {MVEA_START, 0x000000EC, 0x00000000},
++ {MVEA_START, 0x000000F0, 0x00000000},
++ {MVEA_START, 0x000000F4, 0x00000000},
++ {MVEA_START, 0x000000F8, 0x00000000},
++ {MVEA_START, 0x000000FC, 0x00000000},
++ {TOPAZ_VLC_START, 0x00000000, 0x00000000},
++ {TOPAZ_VLC_START, 0x00000004, 0x00000000},
++ {TOPAZ_VLC_START, 0x00000008, 0x00000000},
++ {TOPAZ_VLC_START, 0x0000000C, 0x00000000},
++ {TOPAZ_VLC_START, 0x00000010, 0x00000000},
++ {TOPAZ_VLC_START, 0x00000014, 0x00000000},
++ {TOPAZ_VLC_START, 0x0000001C, 0x00000000},
++ {TOPAZ_VLC_START, 0x00000020, 0x00000000},
++ {TOPAZ_VLC_START, 0x00000024, 0x00000000},
++ {TOPAZ_VLC_START, 0x0000002C, 0x00000000},
++ {TOPAZ_VLC_START, 0x00000034, 0x00000000},
++ {TOPAZ_VLC_START, 0x00000038, 0x00000000},
++ {TOPAZ_VLC_START, 0x0000003C, 0x00000000},
++ {TOPAZ_VLC_START, 0x00000040, 0x00000000},
++ {TOPAZ_VLC_START, 0x00000044, 0x00000000},
++ {TOPAZ_VLC_START, 0x00000048, 0x00000000},
++ {TOPAZ_VLC_START, 0x0000004C, 0x00000000},
++ {TOPAZ_VLC_START, 0x00000050, 0x00000000},
++ {TOPAZ_VLC_START, 0x00000054, 0x00000000},
++ {TOPAZ_VLC_START, 0x00000058, 0x00000000},
++ {TOPAZ_VLC_START, 0x0000005C, 0x00000000},
++ {TOPAZ_VLC_START, 0x00000060, 0x00000000},
++ {TOPAZ_VLC_START, 0x00000064, 0x00000000},
++ {TOPAZ_VLC_START, 0x00000068, 0x00000000},
++ {TOPAZ_VLC_START, 0x0000006C, 0x00000000}
++};
++
++#define FIRMWARE_NAME "topaz_fw.bin"
++
++/* static function define */
++static int topaz_upload_fw(struct drm_device *dev,
++ enum drm_lnc_topaz_codec codec);
++static inline void topaz_set_default_regs(struct drm_psb_private
++ *dev_priv);
++
++#define UPLOAD_FW_BY_DMA 1
++
++#if UPLOAD_FW_BY_DMA
++static void topaz_dma_transfer(struct drm_psb_private *dev_priv,
++ uint32_t channel, uint32_t src_phy_addr,
++ uint32_t offset, uint32_t dst_addr,
++ uint32_t byte_num, uint32_t is_increment,
++ uint32_t is_write);
++#else
++static void topaz_mtx_upload_by_register(struct drm_device *dev,
++ uint32_t mtx_mem, uint32_t addr,
++ uint32_t size,
++ struct ttm_buffer_object *buf);
++#endif
++
++static void topaz_write_core_reg(struct drm_psb_private *dev_priv,
++ uint32_t reg, const uint32_t val);
++static void topaz_read_core_reg(struct drm_psb_private *dev_priv,
++ uint32_t reg, uint32_t *ret_val);
++static void get_mtx_control_from_dash(struct drm_psb_private *dev_priv);
++static void release_mtx_control_from_dash(struct drm_psb_private
++ *dev_priv);
++static void topaz_mmu_hwsetup(struct drm_psb_private *dev_priv);
++static void mtx_dma_read(struct drm_device *dev, uint32_t source_addr,
++ uint32_t size);
++static void mtx_dma_write(struct drm_device *dev);
++
++
++#define DEBUG_FUNCTION 0
++
++#if DEBUG_FUNCTION
++static int topaz_test_null(struct drm_device *dev, uint32_t seq);
++static int topaz_test_sync(struct drm_device *dev, uint32_t seq,
++ uint32_t sync_seq);
++static void topaz_mmu_test(struct drm_device *dev, uint32_t sync_value);
++static void topaz_save_default_regs(struct drm_psb_private *dev_priv,
++ uint32_t *data);
++static void topaz_restore_default_regs(struct drm_psb_private *dev_priv,
++ uint32_t *data);
++static int topaz_test_sync_manual_alloc_page(struct drm_device *dev,
++ uint32_t seq,
++ uint32_t sync_seq,
++ uint32_t offset);
++static int topaz_test_sync_tt_test(struct drm_device *dev,
++ uint32_t seq,
++ uint32_t sync_seq);
++#endif
++
++uint32_t topaz_read_mtx_mem(struct drm_psb_private *dev_priv,
++ uint32_t byte_addr)
++{
++ uint32_t read_val;
++ uint32_t reg, bank_size, ram_bank_size, ram_id;
++
++ TOPAZ_READ32(0x3c, &reg);
++ reg = 0x0a0a0606;
++ bank_size = (reg & 0xF0000) >> 16;
++
++ ram_bank_size = (uint32_t) (1 << (bank_size + 2));
++ ram_id = (byte_addr - MTX_DATA_MEM_BASE) / ram_bank_size;
++
++ MTX_WRITE32(MTX_CR_MTX_RAM_ACCESS_CONTROL,
++ F_ENCODE(0x18 + ram_id, MTX_MTX_MCMID) |
++ F_ENCODE(byte_addr >> 2, MTX_MTX_MCM_ADDR) |
++ F_ENCODE(1, MTX_MTX_MCMR));
++
++ /* ?? poll this reg? */
++ topaz_wait_for_register(dev_priv,
++ MTX_START + MTX_CR_MTX_RAM_ACCESS_STATUS,
++ 1, 1);
++
++ MTX_READ32(MTX_CR_MTX_RAM_ACCESS_DATA_TRANSFER, &read_val);
++
++ return read_val;
++}
++
++void topaz_write_mtx_mem(struct drm_psb_private *dev_priv,
++ uint32_t byte_addr, uint32_t val)
++{
++ uint32_t ram_id = 0;
++ uint32_t reg, bank_size, ram_bank_size;
++
++ TOPAZ_READ32(0x3c, &reg);
++
++ /* PSB_DEBUG_GENERAL ("TOPAZ: DEBUG REG(%x)\n", reg); */
++ reg = 0x0a0a0606;
++
++ bank_size = (reg & 0xF0000) >> 16;
++
++ ram_bank_size = (uint32_t) (1 << (bank_size + 2));
++ ram_id = (byte_addr - MTX_DATA_MEM_BASE) / ram_bank_size;
++
++ MTX_WRITE32(MTX_CR_MTX_RAM_ACCESS_CONTROL,
++ F_ENCODE(0x18 + ram_id, MTX_MTX_MCMID) |
++ F_ENCODE(byte_addr >> 2, MTX_MTX_MCM_ADDR));
++
++ MTX_WRITE32(MTX_CR_MTX_RAM_ACCESS_DATA_TRANSFER, val);
++
++ /* ?? poll this reg? */
++ topaz_wait_for_register(dev_priv,
++ MTX_START + MTX_CR_MTX_RAM_ACCESS_STATUS,
++ 1, 1);
++
++ return;
++}
++
++void topaz_write_mtx_mem_multiple_setup(struct drm_psb_private *dev_priv,
++ uint32_t byte_addr)
++{
++ uint32_t ram_id = 0;
++ uint32_t reg, bank_size, ram_bank_size;
++
++ TOPAZ_READ32(0x3c, &reg);
++
++ reg = 0x0a0a0606;
++
++ bank_size = (reg & 0xF0000) >> 16;
++
++ ram_bank_size = (uint32_t) (1 << (bank_size + 2));
++ ram_id = (byte_addr - MTX_DATA_MEM_BASE) / ram_bank_size;
++
++ MTX_WRITE32(MTX_CR_MTX_RAM_ACCESS_CONTROL,
++ F_ENCODE(0x18 + ram_id, MTX_MTX_MCMID) |
++ F_ENCODE(1, MTX_MTX_MCMAI) |
++ F_ENCODE(byte_addr >> 2, MTX_MTX_MCM_ADDR));
++}
++
++void topaz_write_mtx_mem_multiple(struct drm_psb_private *dev_priv,
++ uint32_t val)
++{
++ MTX_WRITE32(MTX_CR_MTX_RAM_ACCESS_DATA_TRANSFER, val);
++}
++
++
++int topaz_wait_for_register(struct drm_psb_private *dev_priv,
++ uint32_t addr, uint32_t value, uint32_t mask)
++{
++ uint32_t tmp;
++ uint32_t count = 10000;
++
++ /* # poll topaz register for certain times */
++ while (count) {
++ /* #.# read */
++ MM_READ32(addr, 0, &tmp);
++
++ if (value == (tmp & mask))
++ return 0;
++
++ /* #.# delay and loop */
++ DRM_UDELAY(100);
++ --count;
++ }
++
++ /* # now waiting is timeout, return 1 indicat failed */
++ /* XXX: testsuit means a timeout 10000 */
++
++ DRM_ERROR("TOPAZ:time out to poll addr(0x%x) expected value(0x%08x), "
++ "actual 0x%08x (0x%08x & 0x%08x)\n",
++ addr, value, tmp & mask, tmp, mask);
++
++ return -EBUSY;
++
++}
++
++static ssize_t psb_topaz_pmstate_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct drm_device *drm_dev = dev_get_drvdata(dev);
++ struct drm_psb_private *dev_priv;
++ struct topaz_private *topaz_priv;
++ unsigned int pmstate;
++ unsigned long flags;
++ int ret = -EINVAL;
++
++ if (drm_dev == NULL)
++ return 0;
++
++ dev_priv = drm_dev->dev_private;
++ topaz_priv = dev_priv->topaz_private;
++ pmstate = topaz_priv->pmstate;
++
++ pmstate = topaz_priv->pmstate;
++ spin_lock_irqsave(&topaz_priv->topaz_lock, flags);
++ ret = sprintf(buf, "%s\n",
++ (pmstate == PSB_PMSTATE_POWERUP) ? "powerup" : "powerdown");
++ spin_unlock_irqrestore(&topaz_priv->topaz_lock, flags);
++
++ return ret;
++}
++
++static DEVICE_ATTR(topaz_pmstate, 0444, psb_topaz_pmstate_show, NULL);
++
++
++/* this function finish the first part of initialization, the rest
++ * should be done in topaz_setup_fw
++ */
++int lnc_topaz_init(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ struct ttm_bo_device *bdev = &dev_priv->bdev;
++ uint32_t core_id, core_rev;
++ int ret = 0, n;
++ bool is_iomem;
++ struct topaz_private *topaz_priv;
++ void *topaz_bo_virt;
++
++ PSB_DEBUG_GENERAL("TOPAZ: init topaz data structures\n");
++ topaz_priv = kmalloc(sizeof(struct topaz_private), GFP_KERNEL);
++ if (topaz_priv == NULL)
++ return -1;
++
++ dev_priv->topaz_private = topaz_priv;
++ memset(topaz_priv, 0, sizeof(struct topaz_private));
++
++ /* get device --> drm_device --> drm_psb_private --> topaz_priv
++ * for psb_topaz_pmstate_show: topaz_pmpolicy
++ * if not pci_set_drvdata, can't get drm_device from device
++ */
++ pci_set_drvdata(dev->pdev, dev);
++ if (device_create_file(&dev->pdev->dev,
++ &dev_attr_topaz_pmstate))
++ DRM_ERROR("TOPAZ: could not create sysfs file\n");
++ topaz_priv->sysfs_pmstate = sysfs_get_dirent(
++ dev->pdev->dev.kobj.sd, "topaz_pmstate");
++
++ topaz_priv = dev_priv->topaz_private;
++
++ /* # initialize comand topaz queueing [msvdx_queue] */
++ INIT_LIST_HEAD(&topaz_priv->topaz_queue);
++ /* # init mutex? CHECK: mutex usage [msvdx_mutex] */
++ mutex_init(&topaz_priv->topaz_mutex);
++ /* # spin lock init? CHECK spin lock usage [msvdx_lock] */
++ spin_lock_init(&topaz_priv->topaz_lock);
++
++ /* # topaz status init. [msvdx_busy] */
++ topaz_priv->topaz_busy = 0;
++ topaz_priv->topaz_cmd_seq = 0;
++ topaz_priv->topaz_fw_loaded = 0;
++ /* FIXME: workaround since JPEG firmware is not ready */
++ topaz_priv->topaz_cur_codec = 1;
++ topaz_priv->cur_mtx_data_size = 0;
++ topaz_priv->topaz_hw_busy = 1;
++
++ topaz_priv->topaz_mtx_reg_state = kmalloc(TOPAZ_MTX_REG_SIZE,
++ GFP_KERNEL);
++ if (topaz_priv->topaz_mtx_reg_state == NULL) {
++ DRM_ERROR("TOPAZ: failed to allocate space "
++ "for mtx register\n");
++ return -1;
++ }
++
++ /* # gain write back structure,we may only need 32+4=40DW */
++ ret = ttm_buffer_object_create(bdev, 4096,
++ ttm_bo_type_kernel,
++ DRM_PSB_FLAG_MEM_MMU | TTM_PL_FLAG_NO_EVICT,
++ 0, 0, 0, NULL, &(topaz_priv->topaz_bo));
++ if (ret != 0) {
++ DRM_ERROR("TOPAZ: failed to allocate topaz BO.\n");
++ return ret;
++ }
++
++ ret = ttm_bo_kmap(topaz_priv->topaz_bo, 0,
++ topaz_priv->topaz_bo->num_pages,
++ &topaz_priv->topaz_bo_kmap);
++ if (ret) {
++ DRM_ERROR("TOPAZ: map topaz BO bo failed......\n");
++ ttm_bo_unref(&topaz_priv->topaz_bo);
++ return ret;
++ }
++
++ topaz_bo_virt = ttm_kmap_obj_virtual(&topaz_priv->topaz_bo_kmap,
++ &is_iomem);
++ topaz_priv->topaz_ccb_wb = (void *) topaz_bo_virt;
++ topaz_priv->topaz_wb_offset = topaz_priv->topaz_bo->offset;
++ topaz_priv->topaz_sync_addr = (uint32_t *) (topaz_bo_virt
++ + 2048);
++ topaz_priv->topaz_sync_offset = topaz_priv->topaz_wb_offset
++ + 2048;
++ PSB_DEBUG_GENERAL("TOPAZ: alloc BO for WriteBack and SYNC\n");
++ PSB_DEBUG_GENERAL("TOPAZ: WB offset=0x%08x\n",
++ topaz_priv->topaz_wb_offset);
++ PSB_DEBUG_GENERAL("TOPAZ: SYNC offset=0x%08x\n",
++ topaz_priv->topaz_sync_offset);
++
++ *(topaz_priv->topaz_sync_addr) = ~0; /* reset sync seq */
++
++ /* # reset topaz */
++ MVEA_WRITE32(MVEA_CR_IMG_MVEA_SRST,
++ F_ENCODE(1, MVEA_CR_IMG_MVEA_SPE_SOFT_RESET) |
++ F_ENCODE(1, MVEA_CR_IMG_MVEA_IPE_SOFT_RESET) |
++ F_ENCODE(1, MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET) |
++ F_ENCODE(1, MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET) |
++ F_ENCODE(1, MVEA_CR_IMG_MVEA_CMC_SOFT_RESET) |
++ F_ENCODE(1, MVEA_CR_IMG_MVEA_DCF_SOFT_RESET));
++
++ MVEA_WRITE32(MVEA_CR_IMG_MVEA_SRST,
++ F_ENCODE(0, MVEA_CR_IMG_MVEA_SPE_SOFT_RESET) |
++ F_ENCODE(0, MVEA_CR_IMG_MVEA_IPE_SOFT_RESET) |
++ F_ENCODE(0, MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET) |
++ F_ENCODE(0, MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET) |
++ F_ENCODE(0, MVEA_CR_IMG_MVEA_CMC_SOFT_RESET) |
++ F_ENCODE(0, MVEA_CR_IMG_MVEA_DCF_SOFT_RESET));
++
++ /* # set up MMU */
++ topaz_mmu_hwsetup(dev_priv);
++
++ PSB_DEBUG_GENERAL("TOPAZ: defer firmware loading to the place"
++ "when receiving user space commands\n");
++
++#if 0 /* can't load FW here */
++ /* #.# load fw to driver */
++ PSB_DEBUG_GENERAL("TOPAZ: will init firmware\n");
++ ret = topaz_init_fw(dev);
++ if (ret != 0)
++ return -1;
++
++ topaz_setup_fw(dev, IMG_CODEC_MPEG4_NO_RC);/* just for test */
++#endif
++ /* <msvdx does> # minimal clock */
++
++ /* <msvdx does> # return 0 */
++ TOPAZ_READ32(TOPAZ_CR_IMG_TOPAZ_CORE_ID, &core_id);
++ TOPAZ_READ32(TOPAZ_CR_IMG_TOPAZ_CORE_REV, &core_rev);
++
++ PSB_DEBUG_GENERAL("TOPAZ: core_id(%x) core_rev(%x)\n",
++ core_id, core_rev);
++
++ /* create firmware storage */
++ for (n = 1; n < IMG_CODEC_NUM; ++n) {
++ /* #.# malloc DRM object for fw storage */
++ ret = ttm_buffer_object_create(bdev, 12 * 4096,
++ ttm_bo_type_kernel,
++ DRM_PSB_FLAG_MEM_MMU | TTM_PL_FLAG_NO_EVICT,
++ 0, 0, 0, NULL, &topaz_priv->topaz_fw[n].text);
++ if (ret) {
++ DRM_ERROR("Failed to allocate firmware.\n");
++ goto out;
++ }
++
++ /* #.# malloc DRM object for fw storage */
++ ret = ttm_buffer_object_create(bdev, 12 * 4096,
++ ttm_bo_type_kernel,
++ DRM_PSB_FLAG_MEM_MMU | TTM_PL_FLAG_NO_EVICT,
++ 0, 0, 0, NULL, &topaz_priv->topaz_fw[n].data);
++ if (ret) {
++ DRM_ERROR("Failed to allocate firmware.\n");
++ goto out;
++ }
++ }
++
++ ret = ttm_buffer_object_create(bdev,
++ 12 * 4096,
++ ttm_bo_type_kernel,
++ DRM_PSB_FLAG_MEM_MMU |
++ TTM_PL_FLAG_NO_EVICT,
++ 0, 0, 0, NULL,
++ &topaz_priv->topaz_mtx_data_mem);
++ if (ret) {
++ DRM_ERROR("TOPAZ: failed to allocate ttm buffer for "
++ "mtx data save\n");
++ goto out;
++ }
++ topaz_priv->cur_mtx_data_size = 0;
++
++ PSB_DEBUG_INIT("TOPAZ:old clock gating disable = 0x%08x\n",
++ PSB_RVDC32(PSB_TOPAZ_CLOCKGATING));
++
++ PSB_DEBUG_INIT("TOPAZ:rest MSDVX to disable clock gating\n");
++
++ PSB_WVDC32(0x00011fff, PSB_TOPAZ_CLOCKGATING);
++
++ PSB_DEBUG_INIT("MSDVX:new clock gating disable = 0x%08x\n",
++ PSB_RVDC32(PSB_TOPAZ_CLOCKGATING));
++
++ return 0;
++
++out:
++ for (n = 1; n < IMG_CODEC_NUM; ++n) {
++ if (topaz_priv->topaz_fw[n].text != NULL)
++ ttm_bo_unref(&topaz_priv->topaz_fw[n].text);
++ if (topaz_priv->topaz_fw[n].data != NULL)
++ ttm_bo_unref(&topaz_priv->topaz_fw[n].data);
++ }
++
++ if (topaz_priv->topaz_mtx_data_mem != NULL)
++ ttm_bo_unref(&topaz_priv->topaz_mtx_data_mem);
++
++ return ret;
++}
++
++int lnc_topaz_uninit(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ struct topaz_private *topaz_priv = dev_priv->topaz_private;
++ int n;
++
++ /* flush MMU */
++ PSB_DEBUG_GENERAL("XXX: need to flush mmu cache here??\n");
++ /* topaz_mmu_flushcache (dev_priv); */
++
++ /* # reset TOPAZ chip */
++ lnc_topaz_reset(dev_priv);
++
++ /* release resources */
++ /* # release write back memory */
++ topaz_priv->topaz_ccb_wb = NULL;
++
++ /* release mtx register save space */
++ kfree(topaz_priv->topaz_mtx_reg_state);
++
++ /* release mtx data memory save space */
++ if (topaz_priv->topaz_mtx_data_mem)
++ ttm_bo_unref(&topaz_priv->topaz_mtx_data_mem);
++
++ /* # release firmware storage */
++ for (n = 1; n < IMG_CODEC_NUM; ++n) {
++ if (topaz_priv->topaz_fw[n].text != NULL)
++ ttm_bo_unref(&topaz_priv->topaz_fw[n].text);
++ if (topaz_priv->topaz_fw[n].data != NULL)
++ ttm_bo_unref(&topaz_priv->topaz_fw[n].data);
++ }
++
++ ttm_bo_kunmap(&topaz_priv->topaz_bo_kmap);
++ ttm_bo_unref(&topaz_priv->topaz_bo);
++
++ if (topaz_priv) {
++ pci_set_drvdata(dev->pdev, NULL);
++ device_remove_file(&dev->pdev->dev, &dev_attr_topaz_pmstate);
++ sysfs_put(topaz_priv->sysfs_pmstate);
++ topaz_priv->sysfs_pmstate = NULL;
++
++ kfree(topaz_priv);
++ dev_priv->topaz_private = NULL;
++ }
++
++ return 0;
++}
++
++int lnc_topaz_reset(struct drm_psb_private *dev_priv)
++{
++ struct topaz_private *topaz_priv;
++
++ topaz_priv = dev_priv->topaz_private;
++ topaz_priv->topaz_busy = 0;
++ topaz_priv->topaz_cmd_seq = 0;
++ topaz_priv->cur_mtx_data_size = 0;
++ topaz_priv->topaz_cmd_windex = 0;
++ topaz_priv->topaz_needs_reset = 0;
++
++ /* # reset topaz */
++ MVEA_WRITE32(MVEA_CR_IMG_MVEA_SRST,
++ F_ENCODE(1, MVEA_CR_IMG_MVEA_SPE_SOFT_RESET) |
++ F_ENCODE(1, MVEA_CR_IMG_MVEA_IPE_SOFT_RESET) |
++ F_ENCODE(1, MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET) |
++ F_ENCODE(1, MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET) |
++ F_ENCODE(1, MVEA_CR_IMG_MVEA_CMC_SOFT_RESET) |
++ F_ENCODE(1, MVEA_CR_IMG_MVEA_DCF_SOFT_RESET));
++
++ MVEA_WRITE32(MVEA_CR_IMG_MVEA_SRST,
++ F_ENCODE(0, MVEA_CR_IMG_MVEA_SPE_SOFT_RESET) |
++ F_ENCODE(0, MVEA_CR_IMG_MVEA_IPE_SOFT_RESET) |
++ F_ENCODE(0, MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET) |
++ F_ENCODE(0, MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET) |
++ F_ENCODE(0, MVEA_CR_IMG_MVEA_CMC_SOFT_RESET) |
++ F_ENCODE(0, MVEA_CR_IMG_MVEA_DCF_SOFT_RESET));
++
++ /* # set up MMU */
++ topaz_mmu_hwsetup(dev_priv);
++
++ return 0;
++}
++
++/* read firmware bin file and load all data into driver */
++int topaz_init_fw(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ const struct firmware *raw = NULL;
++ unsigned char *ptr;
++ int ret = 0;
++ int n;
++ struct topaz_fwinfo *cur_fw;
++ int cur_size;
++ struct topaz_codec_fw *cur_codec;
++ struct ttm_buffer_object **cur_drm_obj;
++ struct ttm_bo_kmap_obj tmp_kmap;
++ bool is_iomem;
++ struct topaz_private *topaz_priv = dev_priv->topaz_private;
++
++ topaz_priv->stored_initial_qp = 0;
++
++ /* # get firmware */
++ ret = request_firmware(&raw, FIRMWARE_NAME, &dev->pdev->dev);
++ if (ret != 0) {
++ DRM_ERROR("TOPAZ: request_firmware failed: %d\n", ret);
++ return ret;
++ }
++
++ PSB_DEBUG_GENERAL("TOPAZ: opened firmware\n");
++
++ if (raw && (raw->size < sizeof(struct topaz_fwinfo))) {
++ DRM_ERROR("TOPAZ: firmware file is not correct size.\n");
++ goto out;
++ }
++
++ ptr = (unsigned char *) raw->data;
++
++ if (!ptr) {
++ DRM_ERROR("TOPAZ: failed to load firmware.\n");
++ goto out;
++ }
++
++ /* # load fw from file */
++ PSB_DEBUG_GENERAL("TOPAZ: load firmware.....\n");
++ cur_fw = NULL;
++ /* didn't use the first element */
++ for (n = 1; n < IMG_CODEC_NUM; ++n) {
++ cur_fw = (struct topaz_fwinfo *) ptr;
++
++ cur_codec = &topaz_priv->topaz_fw[cur_fw->codec];
++ cur_codec->ver = cur_fw->ver;
++ cur_codec->codec = cur_fw->codec;
++ cur_codec->text_size = cur_fw->text_size;
++ cur_codec->data_size = cur_fw->data_size;
++ cur_codec->data_location = cur_fw->data_location;
++
++ PSB_DEBUG_GENERAL("TOPAZ: load firemware %s.\n",
++ codec_to_string(cur_fw->codec));
++
++ /* #.# handle text section */
++ ptr += sizeof(struct topaz_fwinfo);
++ cur_drm_obj = &cur_codec->text;
++ cur_size = cur_fw->text_size;
++
++ /* #.# fill DRM object with firmware data */
++ ret = ttm_bo_kmap(*cur_drm_obj, 0, (*cur_drm_obj)->num_pages,
++ &tmp_kmap);
++ if (ret) {
++ PSB_DEBUG_GENERAL("drm_bo_kmap failed: %d\n", ret);
++ ttm_bo_unref(cur_drm_obj);
++ *cur_drm_obj = NULL;
++ goto out;
++ }
++
++ memcpy(ttm_kmap_obj_virtual(&tmp_kmap, &is_iomem), ptr,
++ cur_size);
++
++ ttm_bo_kunmap(&tmp_kmap);
++
++ /* #.# handle data section */
++ ptr += cur_fw->text_size;
++ cur_drm_obj = &cur_codec->data;
++ cur_size = cur_fw->data_size;
++
++ /* #.# fill DRM object with firmware data */
++ ret = ttm_bo_kmap(*cur_drm_obj, 0, (*cur_drm_obj)->num_pages,
++ &tmp_kmap);
++ if (ret) {
++ PSB_DEBUG_GENERAL("drm_bo_kmap failed: %d\n", ret);
++ ttm_bo_unref(cur_drm_obj);
++ *cur_drm_obj = NULL;
++ goto out;
++ }
++
++ memcpy(ttm_kmap_obj_virtual(&tmp_kmap, &is_iomem), ptr,
++ cur_size);
++
++ ttm_bo_kunmap(&tmp_kmap);
++
++ /* #.# validate firmware */
++
++ /* #.# update ptr */
++ ptr += cur_fw->data_size;
++ }
++
++ release_firmware(raw);
++
++ PSB_DEBUG_GENERAL("TOPAZ: return from firmware init\n");
++
++ return 0;
++
++out:
++ if (raw) {
++ PSB_DEBUG_GENERAL("release firmware....\n");
++ release_firmware(raw);
++ }
++
++ return -1;
++}
++
++/* setup fw when start a new context */
++int topaz_setup_fw(struct drm_device *dev, enum drm_lnc_topaz_codec codec)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ uint32_t mem_size = RAM_SIZE; /* follow DDK */
++ uint32_t verify_pc;
++ struct topaz_private *topaz_priv = dev_priv->topaz_private;
++
++#if 0
++ if (codec == topaz_priv->topaz_current_codec) {
++ LNC_TRACEL("TOPAZ: reuse previous codec\n");
++ return 0;
++ }
++#endif
++
++ /* XXX: need to rest topaz? */
++ PSB_DEBUG_GENERAL("XXX: should reset topaz when context change?\n");
++
++ /* XXX: interrupt enable shouldn't be enable here,
++ * this funtion is called when interrupt is enable,
++ * but here, we've no choice since we have to call setup_fw by
++ * manual */
++ /* # upload firmware, clear interruputs and start the firmware
++ * -- from hostutils.c in TestSuits*/
++
++ /* # reset MVEA */
++ MVEA_WRITE32(MVEA_CR_IMG_MVEA_SRST,
++ F_ENCODE(1, MVEA_CR_IMG_MVEA_SPE_SOFT_RESET) |
++ F_ENCODE(1, MVEA_CR_IMG_MVEA_IPE_SOFT_RESET) |
++ F_ENCODE(1, MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET) |
++ F_ENCODE(1, MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET) |
++ F_ENCODE(1, MVEA_CR_IMG_MVEA_CMC_SOFT_RESET) |
++ F_ENCODE(1, MVEA_CR_IMG_MVEA_DCF_SOFT_RESET));
++
++ MVEA_WRITE32(MVEA_CR_IMG_MVEA_SRST,
++ F_ENCODE(0, MVEA_CR_IMG_MVEA_SPE_SOFT_RESET) |
++ F_ENCODE(0, MVEA_CR_IMG_MVEA_IPE_SOFT_RESET) |
++ F_ENCODE(0, MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET) |
++ F_ENCODE(0, MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET) |
++ F_ENCODE(0, MVEA_CR_IMG_MVEA_CMC_SOFT_RESET) |
++ F_ENCODE(0, MVEA_CR_IMG_MVEA_DCF_SOFT_RESET));
++
++
++ topaz_mmu_hwsetup(dev_priv);
++
++#if !LNC_TOPAZ_NO_IRQ
++ sysirq_uninstall_islands(dev, OSPM_VIDEO_ENC_ISLAND);
++#endif
++
++ PSB_DEBUG_GENERAL("TOPAZ: will setup firmware....\n");
++
++ topaz_set_default_regs(dev_priv);
++
++ /* # reset mtx */
++ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_SRST,
++ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_MVEA_SOFT_RESET) |
++ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_MTX_SOFT_RESET) |
++ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_VLC_SOFT_RESET));
++
++ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_SRST, 0x0);
++
++ /* # upload fw by drm */
++ PSB_DEBUG_GENERAL("TOPAZ: will upload firmware\n");
++
++ topaz_upload_fw(dev, codec);
++#if 0
++ /* allocate the space for context save & restore if needed */
++ if (topaz_priv->topaz_mtx_data_mem == NULL) {
++ ret = ttm_buffer_object_create(bdev,
++ topaz_priv->cur_mtx_data_size * 4,
++ ttm_bo_type_kernel,
++ DRM_PSB_FLAG_MEM_MMU |
++ TTM_PL_FLAG_NO_EVICT,
++ 0, 0, 0, NULL,
++ &topaz_priv->topaz_mtx_data_mem);
++ if (ret) {
++ DRM_ERROR("TOPAZ: failed to allocate ttm buffer for "
++ "mtx data save\n");
++ return -1;
++ }
++ }
++ PSB_DEBUG_GENERAL("TOPAZ: after upload fw ....\n");
++#endif
++
++ /* XXX: In power save mode, need to save the complete data memory
++ * and restore it. MTX_FWIF.c record the data size */
++ PSB_DEBUG_GENERAL("TOPAZ:in power save mode need to save memory?\n");
++
++ PSB_DEBUG_GENERAL("TOPAZ: setting up pc address\n");
++ topaz_write_core_reg(dev_priv, TOPAZ_MTX_PC, PC_START_ADDRESS);
++
++ PSB_DEBUG_GENERAL("TOPAZ: verify pc address\n");
++
++ topaz_read_core_reg(dev_priv, TOPAZ_MTX_PC, &verify_pc);
++
++ /* enable auto clock is essential for this driver */
++ TOPAZ_WRITE32(TOPAZ_CR_TOPAZ_AUTO_CLK_GATE,
++ F_ENCODE(1, TOPAZ_CR_TOPAZ_VLC_AUTO_CLK_GATE) |
++ F_ENCODE(1, TOPAZ_CR_TOPAZ_DB_AUTO_CLK_GATE));
++ MVEA_WRITE32(MVEA_CR_MVEA_AUTO_CLOCK_GATING,
++ F_ENCODE(1, MVEA_CR_MVEA_IPE_AUTO_CLK_GATE) |
++ F_ENCODE(1, MVEA_CR_MVEA_SPE_AUTO_CLK_GATE) |
++ F_ENCODE(1, MVEA_CR_MVEA_CMPRS_AUTO_CLK_GATE) |
++ F_ENCODE(1, MVEA_CR_MVEA_JMCOMP_AUTO_CLK_GATE));
++
++ PSB_DEBUG_GENERAL("TOPAZ: current pc(%08X) vs %08X\n",
++ verify_pc, PC_START_ADDRESS);
++
++ /* # turn on MTX */
++ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_INTCLEAR,
++ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX));
++
++ MTX_WRITE32(MTX_CORE_CR_MTX_ENABLE_OFFSET,
++ MTX_CORE_CR_MTX_ENABLE_MTX_ENABLE_MASK);
++
++ /* # poll on the interrupt which the firmware will generate */
++ topaz_wait_for_register(dev_priv,
++ TOPAZ_START + TOPAZ_CR_IMG_TOPAZ_INTSTAT,
++ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTS_MTX),
++ F_MASK(TOPAZ_CR_IMG_TOPAZ_INTS_MTX));
++
++ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_INTCLEAR,
++ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX));
++
++ PSB_DEBUG_GENERAL("TOPAZ: after topaz mtx setup ....\n");
++
++ /* # get ccb buffer addr -- file hostutils.c */
++ topaz_priv->topaz_ccb_buffer_addr =
++ topaz_read_mtx_mem(dev_priv,
++ MTX_DATA_MEM_BASE + mem_size - 4);
++ topaz_priv->topaz_ccb_ctrl_addr =
++ topaz_read_mtx_mem(dev_priv,
++ MTX_DATA_MEM_BASE + mem_size - 8);
++ topaz_priv->topaz_ccb_size =
++ topaz_read_mtx_mem(dev_priv,
++ topaz_priv->topaz_ccb_ctrl_addr +
++ MTX_CCBCTRL_CCBSIZE);
++
++ topaz_priv->topaz_cmd_windex = 0;
++
++ PSB_DEBUG_GENERAL("TOPAZ:ccb_buffer_addr(%x),ctrl_addr(%x) size(%d)\n",
++ topaz_priv->topaz_ccb_buffer_addr,
++ topaz_priv->topaz_ccb_ctrl_addr,
++ topaz_priv->topaz_ccb_size);
++
++ /* # write back the initial QP Value */
++ topaz_write_mtx_mem(dev_priv,
++ topaz_priv->topaz_ccb_ctrl_addr + MTX_CCBCTRL_INITQP,
++ topaz_priv->stored_initial_qp);
++
++ PSB_DEBUG_GENERAL("TOPAZ: write WB mem address 0x%08x\n",
++ topaz_priv->topaz_wb_offset);
++ topaz_write_mtx_mem(dev_priv, MTX_DATA_MEM_BASE + mem_size - 12,
++ topaz_priv->topaz_wb_offset);
++
++ /* this kick is essential for mtx.... */
++ *((uint32_t *) topaz_priv->topaz_ccb_wb) = 0x01020304;
++ topaz_mtx_kick(dev_priv, 1);
++ DRM_UDELAY(1000);
++ PSB_DEBUG_GENERAL("TOPAZ: DDK expected 0x12345678 in WB memory,"
++ " and here it is 0x%08x\n",
++ *((uint32_t *) topaz_priv->topaz_ccb_wb));
++
++ *((uint32_t *) topaz_priv->topaz_ccb_wb) = 0x0;/* reset it to 0 */
++ PSB_DEBUG_GENERAL("TOPAZ: firmware uploaded.\n");
++
++ /* XXX: is there any need to record next cmd num??
++ * we use fence seqence number to record it
++ */
++ topaz_priv->topaz_busy = 0;
++ topaz_priv->topaz_cmd_seq = 0;
++
++#if !LNC_TOPAZ_NO_IRQ
++ sysirq_preinstall_islands(dev, OSPM_VIDEO_ENC_ISLAND);
++ sysirq_postinstall_islands(dev, OSPM_VIDEO_ENC_ISLAND);
++ lnc_topaz_enableirq(dev);
++#endif
++
++#if 0
++ topaz_mmu_flushcache(dev_priv);
++ topaz_test_null(dev, 0xe1e1);
++ topaz_test_null(dev, 0xe2e2);
++ topaz_test_sync(dev, 0xe2e2, 0x87654321);
++
++ topaz_mmu_test(dev, 0x12345678);
++ topaz_test_null(dev, 0xe3e3);
++ topaz_mmu_test(dev, 0x8764321);
++
++ topaz_test_null(dev, 0xe4e4);
++ topaz_test_null(dev, 0xf3f3);
++#endif
++
++ return 0;
++}
++
++#if UPLOAD_FW_BY_DMA
++int topaz_upload_fw(struct drm_device *dev, enum drm_lnc_topaz_codec codec)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ const struct topaz_codec_fw *cur_codec_fw;
++ uint32_t text_size, data_size;
++ uint32_t data_location;
++ uint32_t cur_mtx_data_size;
++ struct topaz_private *topaz_priv = dev_priv->topaz_private;
++
++ /* # refer HLD document */
++
++ /* # MTX reset */
++ PSB_DEBUG_GENERAL("TOPAZ: mtx reset.\n");
++ MTX_WRITE32(MTX_CORE_CR_MTX_SOFT_RESET_OFFSET,
++ MTX_CORE_CR_MTX_SOFT_RESET_MTX_RESET_MASK);
++
++ DRM_UDELAY(6000);
++
++ /* # upload the firmware by DMA */
++ cur_codec_fw = &topaz_priv->topaz_fw[codec];
++
++ PSB_DEBUG_GENERAL("Topaz:upload codec %s(%d) text sz=%d data sz=%d"
++ " data location(%d)\n", codec_to_string(codec), codec,
++ cur_codec_fw->text_size, cur_codec_fw->data_size,
++ cur_codec_fw->data_location);
++
++ /* # upload text */
++ text_size = cur_codec_fw->text_size / 4;
++
++ /* setup the MTX to start recieving data:
++ use a register for the transfer which will point to the source
++ (MTX_CR_MTX_SYSC_CDMAT) */
++ /* #.# fill the dst addr */
++ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAA, 0x80900000);
++ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAC,
++ F_ENCODE(2, MTX_BURSTSIZE) |
++ F_ENCODE(0, MTX_RNW) |
++ F_ENCODE(1, MTX_ENABLE) |
++ F_ENCODE(text_size, MTX_LENGTH));
++
++ /* #.# set DMAC access to host memory via BIF */
++ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 1);
++
++ /* #.# transfer the codec */
++ topaz_dma_transfer(dev_priv, 0, cur_codec_fw->text->offset, 0,
++ MTX_CR_MTX_SYSC_CDMAT, text_size, 0, 0);
++
++ /* #.# wait dma finish */
++ topaz_wait_for_register(dev_priv,
++ DMAC_START + IMG_SOC_DMAC_IRQ_STAT(0),
++ F_ENCODE(1, IMG_SOC_TRANSFER_FIN),
++ F_ENCODE(1, IMG_SOC_TRANSFER_FIN));
++
++ /* #.# clear interrupt */
++ DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(0), 0);
++
++ /* # return access to topaz core */
++ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 0);
++
++ /* # upload data */
++ data_size = cur_codec_fw->data_size / 4;
++ data_location = cur_codec_fw->data_location;
++
++ /* #.# fill the dst addr */
++ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAA,
++ 0x80900000 + (data_location - 0x82880000));
++ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAC,
++ F_ENCODE(2, MTX_BURSTSIZE) |
++ F_ENCODE(0, MTX_RNW) |
++ F_ENCODE(1, MTX_ENABLE) |
++ F_ENCODE(data_size, MTX_LENGTH));
++
++ /* #.# set DMAC access to host memory via BIF */
++ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 1);
++
++ /* #.# transfer the codec */
++ topaz_dma_transfer(dev_priv, 0, cur_codec_fw->data->offset, 0,
++ MTX_CR_MTX_SYSC_CDMAT, data_size, 0, 0);
++
++ /* #.# wait dma finish */
++ topaz_wait_for_register(dev_priv,
++ DMAC_START + IMG_SOC_DMAC_IRQ_STAT(0),
++ F_ENCODE(1, IMG_SOC_TRANSFER_FIN),
++ F_ENCODE(1, IMG_SOC_TRANSFER_FIN));
++
++ /* #.# clear interrupt */
++ DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(0), 0);
++
++ /* # return access to topaz core */
++ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 0);
++
++ /* record this codec's mtx data size for
++ * context save & restore */
++ /* FIXME: since non-root sighting fixed by pre allocated,
++ * only need to correct the buffer size
++ */
++ cur_mtx_data_size = data_size;
++ if (topaz_priv->cur_mtx_data_size != cur_mtx_data_size)
++ topaz_priv->cur_mtx_data_size = cur_mtx_data_size;
++
++ return 0;
++}
++
++#else
++
++void topaz_mtx_upload_by_register(struct drm_device *dev, uint32_t mtx_mem,
++ uint32_t addr, uint32_t size,
++ struct ttm_buffer_object *buf)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ uint32_t *buf_p;
++ uint32_t debug_reg, bank_size, bank_ram_size, bank_count;
++ uint32_t cur_ram_id, ram_addr , ram_id;
++ int map_ret, lp;
++ struct ttm_bo_kmap_obj bo_kmap;
++ bool is_iomem;
++ uint32_t cur_addr;
++
++ get_mtx_control_from_dash(dev_priv);
++
++ map_ret = ttm_bo_kmap(buf, 0, buf->num_pages, &bo_kmap);
++ if (map_ret) {
++ DRM_ERROR("TOPAZ: drm_bo_kmap failed: %d\n", map_ret);
++ return;
++ }
++ buf_p = (uint32_t *) ttm_kmap_obj_virtual(&bo_kmap, &is_iomem);
++
++
++ TOPAZ_READ32(TOPAZ_CORE_CR_MTX_DEBUG_OFFSET, &debug_reg);
++ debug_reg = 0x0a0a0606;
++ bank_size = (debug_reg & 0xf0000) >> 16;
++ bank_ram_size = 1 << (bank_size + 2);
++
++ bank_count = (debug_reg & 0xf00) >> 8;
++
++ topaz_wait_for_register(dev_priv,
++ MTX_START+MTX_CORE_CR_MTX_RAM_ACCESS_STATUS_OFFSET,
++ MTX_CORE_CR_MTX_RAM_ACCESS_STATUS_MTX_MTX_MCM_STAT_MASK,
++ MTX_CORE_CR_MTX_RAM_ACCESS_STATUS_MTX_MTX_MCM_STAT_MASK);
++
++ cur_ram_id = -1;
++ cur_addr = addr;
++ for (lp = 0; lp < size / 4; ++lp) {
++ ram_id = mtx_mem + (cur_addr / bank_ram_size);
++
++ if (cur_ram_id != ram_id) {
++ ram_addr = cur_addr >> 2;
++
++ MTX_WRITE32(MTX_CORE_CR_MTX_RAM_ACCESS_CONTROL_OFFSET,
++ F_ENCODE(ram_id, MTX_MTX_MCMID) |
++ F_ENCODE(ram_addr, MTX_MTX_MCM_ADDR) |
++ F_ENCODE(1, MTX_MTX_MCMAI));
++
++ cur_ram_id = ram_id;
++ }
++ cur_addr += 4;
++
++ MTX_WRITE32(MTX_CORE_CR_MTX_RAM_ACCESS_DATA_TRANSFER_OFFSET,
++ *(buf_p + lp));
++
++ topaz_wait_for_register(dev_priv,
++ MTX_CORE_CR_MTX_RAM_ACCESS_STATUS_OFFSET + MTX_START,
++ MTX_CORE_CR_MTX_RAM_ACCESS_STATUS_MTX_MTX_MCM_STAT_MASK,
++ MTX_CORE_CR_MTX_RAM_ACCESS_STATUS_MTX_MTX_MCM_STAT_MASK);
++ }
++
++ ttm_bo_kunmap(&bo_kmap);
++
++ PSB_DEBUG_GENERAL("TOPAZ: register data upload done\n");
++ return;
++}
++
++int topaz_upload_fw(struct drm_device *dev, enum drm_lnc_topaz_codec codec)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ const struct topaz_codec_fw *cur_codec_fw;
++ uint32_t text_size, data_size;
++ uint32_t data_location;
++
++ /* # refer HLD document */
++ /* # MTX reset */
++ PSB_DEBUG_GENERAL("TOPAZ: mtx reset.\n");
++ MTX_WRITE32(MTX_CORE_CR_MTX_SOFT_RESET_OFFSET,
++ MTX_CORE_CR_MTX_SOFT_RESET_MTX_RESET_MASK);
++
++ DRM_UDELAY(6000);
++
++ /* # upload the firmware by DMA */
++ cur_codec_fw = &topaz_priv->topaz_fw[codec];
++
++ PSB_DEBUG_GENERAL("Topaz: upload codec %s text size(%d) data size(%d)"
++ " data location(0x%08x)\n", codec_to_string(codec),
++ cur_codec_fw->text_size, cur_codec_fw->data_size,
++ cur_codec_fw->data_location);
++
++ /* # upload text */
++ text_size = cur_codec_fw->text_size;
++
++ topaz_mtx_upload_by_register(dev, LNC_MTX_CORE_CODE_MEM,
++ PC_START_ADDRESS - MTX_MEMORY_BASE,
++ text_size, cur_codec_fw->text);
++
++ /* # upload data */
++ data_size = cur_codec_fw->data_size;
++ data_location = cur_codec_fw->data_location;
++
++ topaz_mtx_upload_by_register(dev, LNC_MTX_CORE_DATA_MEM,
++ data_location - 0x82880000, data_size,
++ cur_codec_fw->data);
++
++ return 0;
++}
++
++#endif /* UPLOAD_FW_BY_DMA */
++
++void
++topaz_dma_transfer(struct drm_psb_private *dev_priv, uint32_t channel,
++ uint32_t src_phy_addr, uint32_t offset,
++ uint32_t soc_addr, uint32_t byte_num,
++ uint32_t is_increment, uint32_t is_write)
++{
++ uint32_t dmac_count;
++ uint32_t irq_stat;
++ uint32_t count;
++
++ PSB_DEBUG_GENERAL("TOPAZ: using dma to transfer firmware\n");
++ /* # check that no transfer is currently in progress and no
++ interrupts are outstanding ?? (why care interrupt) */
++ DMAC_READ32(IMG_SOC_DMAC_COUNT(channel), &dmac_count);
++ if (0 != (dmac_count & (MASK_IMG_SOC_EN | MASK_IMG_SOC_LIST_EN)))
++ DRM_ERROR("TOPAZ: there is tranfer in progress\n");
++
++ /* assert(0==(dmac_count & (MASK_IMG_SOC_EN | MASK_IMG_SOC_LIST_EN)));*/
++
++ /* no hold off period */
++ DMAC_WRITE32(IMG_SOC_DMAC_PER_HOLD(channel), 0);
++ /* clear previous interrupts */
++ DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(channel), 0);
++ /* check irq status */
++ DMAC_READ32(IMG_SOC_DMAC_IRQ_STAT(channel), &irq_stat);
++ /* assert(0 == irq_stat); */
++ if (0 != irq_stat)
++ DRM_ERROR("TOPAZ: there is hold up\n");
++
++ DMAC_WRITE32(IMG_SOC_DMAC_SETUP(channel),
++ (src_phy_addr + offset));
++ count = DMAC_VALUE_COUNT(DMAC_BSWAP_NO_SWAP, DMAC_PWIDTH_32_BIT,
++ is_write, DMAC_PWIDTH_32_BIT, byte_num);
++ /* generate an interrupt at the end of transfer */
++ count |= MASK_IMG_SOC_TRANSFER_IEN;
++ count |= F_ENCODE(is_write, IMG_SOC_DIR);
++ DMAC_WRITE32(IMG_SOC_DMAC_COUNT(channel), count);
++
++ DMAC_WRITE32(IMG_SOC_DMAC_PERIPH(channel),
++ DMAC_VALUE_PERIPH_PARAM(DMAC_ACC_DEL_0,
++ is_increment, DMAC_BURST_2));
++
++ DMAC_WRITE32(IMG_SOC_DMAC_PERIPHERAL_ADDR(channel), soc_addr);
++
++ /* Finally, rewrite the count register with
++ * the enable bit set to kick off the transfer
++ */
++ DMAC_WRITE32(IMG_SOC_DMAC_COUNT(channel), count | MASK_IMG_SOC_EN);
++
++ PSB_DEBUG_GENERAL("TOPAZ: dma transfer started.\n");
++
++ return;
++}
++
++void topaz_set_default_regs(struct drm_psb_private *dev_priv)
++{
++ int n;
++ int count = sizeof(topaz_default_regs) / (sizeof(unsigned long) * 3);
++
++ for (n = 0; n < count; n++)
++ MM_WRITE32(topaz_default_regs[n][0],
++ topaz_default_regs[n][1],
++ topaz_default_regs[n][2]);
++
++}
++
++void topaz_write_core_reg(struct drm_psb_private *dev_priv, uint32_t reg,
++ const uint32_t val)
++{
++ uint32_t tmp;
++ get_mtx_control_from_dash(dev_priv);
++
++ /* put data into MTX_RW_DATA */
++ MTX_WRITE32(MTX_CORE_CR_MTX_REGISTER_READ_WRITE_DATA_OFFSET, val);
++
++ /* request a write */
++ tmp = reg &
++ ~MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK;
++ MTX_WRITE32(MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_OFFSET, tmp);
++
++ /* wait for operation finished */
++ topaz_wait_for_register(dev_priv,
++ MTX_START +
++ MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_OFFSET,
++ MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK,
++ MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK);
++
++ release_mtx_control_from_dash(dev_priv);
++}
++
++void topaz_read_core_reg(struct drm_psb_private *dev_priv, uint32_t reg,
++ uint32_t *ret_val)
++{
++ uint32_t tmp;
++
++ get_mtx_control_from_dash(dev_priv);
++
++ /* request a write */
++ tmp = (reg &
++ ~MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK);
++ MTX_WRITE32(MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_OFFSET,
++ MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_RNW_MASK | tmp);
++
++ /* wait for operation finished */
++ topaz_wait_for_register(dev_priv,
++ MTX_START +
++ MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_OFFSET,
++ MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK,
++ MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK);
++
++ /* read */
++ MTX_READ32(MTX_CORE_CR_MTX_REGISTER_READ_WRITE_DATA_OFFSET,
++ ret_val);
++
++ release_mtx_control_from_dash(dev_priv);
++}
++
++void get_mtx_control_from_dash(struct drm_psb_private *dev_priv)
++{
++ int debug_reg_slave_val;
++ struct topaz_private *topaz_priv = dev_priv->topaz_private;
++
++ /* GetMTXControlFromDash */
++ TOPAZ_WRITE32(TOPAZ_CORE_CR_MTX_DEBUG_OFFSET,
++ F_ENCODE(1, TOPAZ_CR_MTX_DBG_IS_SLAVE) |
++ F_ENCODE(2, TOPAZ_CR_MTX_DBG_GPIO_OUT));
++ do {
++ TOPAZ_READ32(TOPAZ_CORE_CR_MTX_DEBUG_OFFSET,
++ &debug_reg_slave_val);
++ } while ((debug_reg_slave_val & 0x18) != 0);
++
++ /* save access control */
++ TOPAZ_READ32(MTX_CORE_CR_MTX_RAM_ACCESS_CONTROL_OFFSET,
++ &topaz_priv->topaz_dash_access_ctrl);
++}
++
++void release_mtx_control_from_dash(struct drm_psb_private *dev_priv)
++{
++ struct topaz_private *topaz_priv = dev_priv->topaz_private;
++
++ /* restore access control */
++ TOPAZ_WRITE32(MTX_CORE_CR_MTX_RAM_ACCESS_CONTROL_OFFSET,
++ topaz_priv->topaz_dash_access_ctrl);
++
++ /* release bus */
++ TOPAZ_WRITE32(TOPAZ_CORE_CR_MTX_DEBUG_OFFSET,
++ F_ENCODE(1, TOPAZ_CR_MTX_DBG_IS_SLAVE));
++}
++
++void topaz_mmu_hwsetup(struct drm_psb_private *dev_priv)
++{
++ uint32_t pd_addr = psb_get_default_pd_addr(dev_priv->mmu);
++
++ /* bypass all request while MMU is being configured */
++ TOPAZ_WRITE32(TOPAZ_CR_MMU_CONTROL0,
++ F_ENCODE(1, TOPAZ_CR_MMU_BYPASS));
++
++ /* set MMU hardware at the page table directory */
++ PSB_DEBUG_GENERAL("TOPAZ: write PD phyaddr=0x%08x "
++ "into MMU_DIR_LIST0/1\n", pd_addr);
++ TOPAZ_WRITE32(TOPAZ_CR_MMU_DIR_LIST_BASE(0), pd_addr);
++ TOPAZ_WRITE32(TOPAZ_CR_MMU_DIR_LIST_BASE(1), 0);
++
++ /* setup index register, all pointing to directory bank 0 */
++ TOPAZ_WRITE32(TOPAZ_CR_MMU_BANK_INDEX, 0);
++
++ /* now enable MMU access for all requestors */
++ TOPAZ_WRITE32(TOPAZ_CR_MMU_CONTROL0, 0);
++}
++
++void topaz_mmu_flushcache(struct drm_psb_private *dev_priv)
++{
++ uint32_t mmu_control;
++
++ if (dev_priv->topaz_disabled)
++ return;
++
++#if 0
++ PSB_DEBUG_GENERAL("XXX: Only one PTD/PTE cache"
++ " so flush using the master core\n");
++#endif
++ /* XXX: disable interrupt */
++
++ TOPAZ_READ32(TOPAZ_CR_MMU_CONTROL0, &mmu_control);
++ mmu_control |= F_ENCODE(1, TOPAZ_CR_MMU_INVALDC);
++ mmu_control |= F_ENCODE(1, TOPAZ_CR_MMU_FLUSH);
++
++#if 0
++ PSB_DEBUG_GENERAL("Set Invalid flag (this causes a flush with MMU\n"
++ "still operating afterwards even if not cleared,\n"
++ "but may want to replace with MMU_FLUSH?\n");
++#endif
++ TOPAZ_WRITE32(TOPAZ_CR_MMU_CONTROL0, mmu_control);
++
++ /* clear it */
++ mmu_control &= (~F_ENCODE(1, TOPAZ_CR_MMU_INVALDC));
++ mmu_control &= (~F_ENCODE(1, TOPAZ_CR_MMU_FLUSH));
++ TOPAZ_WRITE32(TOPAZ_CR_MMU_CONTROL0, mmu_control);
++}
++
++#if DEBUG_FUNCTION
++
++static int topaz_test_sync(struct drm_device *dev, uint32_t seq,
++ uint32_t sync_seq)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ struct topaz_private *topaz_priv = dev_priv->topaz_private;
++ uint32_t sync_cmd[3];
++ struct topaz_cmd_header *cmd_hdr;
++ uint32_t *sync_p = (uint32_t *)topaz_priv->topaz_sync_addr;
++ int count = 1000;
++ uint32_t clr_flag;
++
++ cmd_hdr = (struct topaz_cmd_header *)&sync_cmd[0];
++
++ /* reset sync area */
++ *sync_p = 0;
++
++ /* insert a SYNC command here */
++ cmd_hdr->id = MTX_CMDID_SYNC;
++ cmd_hdr->size = 3;
++ cmd_hdr->seq = seq;
++
++ sync_cmd[1] = topaz_priv->topaz_sync_offset;
++ sync_cmd[2] = sync_seq;
++
++ TOPAZ_BEGIN_CCB(dev_priv);
++ TOPAZ_OUT_CCB(dev_priv, sync_cmd[0]);
++ TOPAZ_OUT_CCB(dev_priv, sync_cmd[1]);
++ TOPAZ_OUT_CCB(dev_priv, sync_cmd[2]);
++ TOPAZ_END_CCB(dev_priv, 1);
++
++ PSB_DEBUG_GENERAL("Topaz: Sent SYNC with cmd seq=0x%08x,"
++ "sync_seq=0x%08x\n", seq, sync_seq);
++
++ while (count && *sync_p != sync_seq) {
++ DRM_UDELAY(100);
++ --count;
++ }
++ if ((count == 0) && (*sync_p != sync_seq)) {
++ DRM_ERROR("TOPAZ: wait sycn timeout, expect sync seq 0x%08x,"
++ "actual 0x%08x\n", sync_seq, *sync_p);
++ }
++ PSB_DEBUG_GENERAL("TOPAZ: SYNC succeed, sync seq=0x%08x\n", *sync_p);
++ PSB_DEBUG_GENERAL("Topaz: after SYNC test, query IRQ and clear it\n");
++
++ clr_flag = lnc_topaz_queryirq(dev);
++ lnc_topaz_clearirq(dev, clr_flag);
++
++ return 0;
++}
++static int topaz_test_sync_tt_test(struct drm_device *dev,
++ uint32_t seq,
++ uint32_t sync_seq)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ struct ttm_bo_device *bdev = &dev_priv->bdev;
++ int ret;
++ bool is_iomem;
++ struct ttm_buffer_object *test_obj;
++ struct ttm_bo_kmap_obj test_kmap;
++ unsigned int *test_adr;
++ uint32_t sync_cmd[3];
++ int count = 1000;
++ unsigned long pfn;
++
++ ret = ttm_buffer_object_create(bdev, 4096,
++ ttm_bo_type_kernel,
++ TTM_PL_FLAG_TT | TTM_PL_FLAG_NO_EVICT,
++ 0, 0, 0, NULL, &test_obj);
++ if (ret) {
++ DRM_ERROR("failed create test object buffer\n");
++ return -1;
++ }
++
++ ret = psb_mmu_virtual_to_pfn(psb_mmu_get_default_pd(dev_priv->mmu),
++ test_obj->offset, &pfn);
++ if (ret) {
++ DRM_ERROR("failed to get pfn from virtual\n");
++ return -1;
++ }
++
++ PSB_DEBUG_GENERAL("Topaz:offset %lx, pfn %lx\n", test_obj->offset, pfn);
++
++ ret = ttm_bo_kmap(test_obj, 0, test_obj->num_pages,
++ &test_kmap);
++ if (ret) {
++ DRM_ERROR("failed map buffer\n");
++ return -1;
++ }
++ test_adr = ttm_kmap_obj_virtual(&test_kmap, &is_iomem);
++ *test_adr = 0xff55;
++ ttm_bo_kunmap(&test_kmap);
++
++ /* insert a SYNC command here */
++ sync_cmd[0] = (MTX_CMDID_SYNC << 1) | (3 << 8) |
++ (seq << 16);
++ sync_cmd[1] = test_obj->offset;
++ sync_cmd[2] = sync_seq;
++
++ TOPAZ_BEGIN_CCB(dev_priv);
++ TOPAZ_OUT_CCB(dev_priv, sync_cmd[0]);
++ TOPAZ_OUT_CCB(dev_priv, sync_cmd[1]);
++ TOPAZ_OUT_CCB(dev_priv, sync_cmd[2]);
++ TOPAZ_END_CCB(dev_priv, 1);
++
++ ret = ttm_bo_kmap(test_obj, 0, test_obj->num_pages,
++ &test_kmap);
++ if (ret) {
++ DRM_ERROR("failed map buffer\n");
++ return -1;
++ }
++ test_adr = ttm_kmap_obj_virtual(&test_kmap, &is_iomem);
++
++ while (count && *test_adr != sync_seq) {
++ DRM_UDELAY(100);
++ --count;
++ }
++ if ((count == 0) && (*test_adr != sync_seq)) {
++ DRM_ERROR("TOPAZ: wait sycn timeout (0x%08x),"
++ "actual 0x%08x\n",
++ sync_seq, *test_adr);
++ }
++ PSB_DEBUG_GENERAL("TOPAZ: SYNC done, seq=0x%08x\n", *test_adr);
++ ttm_bo_kunmap(&test_kmap);
++ ttm_bo_unref(&test_obj);
++
++ return 0;
++}
++
++static int topaz_test_sync_manual_alloc_page(struct drm_device *dev,
++ uint32_t seq,
++ uint32_t sync_seq,
++ uint32_t offset)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ int ret;
++ uint32_t sync_cmd[3];
++ int count = 1000;
++ unsigned long pfn;
++
++ struct page *p;
++ uint32_t *v;
++/* uint32_t offset = 0xd0000000; */
++
++ p = alloc_page(GFP_DMA32);
++ if (!p) {
++ DRM_ERROR("Topaz:Failed allocating page\n");
++ return -1;
++ }
++
++ v = kmap(p);
++ memset(v, 0x67, PAGE_SIZE);
++ pfn = (offset >> PAGE_SHIFT);
++ kunmap(p);
++
++ ret = psb_mmu_insert_pages(psb_mmu_get_default_pd(dev_priv->mmu),
++ &p, pfn << PAGE_SHIFT, 1, 0, 0, 0);
++ if (ret) {
++ DRM_ERROR("Topaz:Failed inserting mmu page\n");
++ return -1;
++ }
++
++ /* insert a SYNC command here */
++ sync_cmd[0] = (MTX_CMDID_SYNC << 1) | (3 << 8) |
++ (0x5b << 16);
++ sync_cmd[1] = pfn << PAGE_SHIFT;
++ sync_cmd[2] = seq;
++
++ TOPAZ_BEGIN_CCB(dev_priv);
++ TOPAZ_OUT_CCB(dev_priv, sync_cmd[0]);
++ TOPAZ_OUT_CCB(dev_priv, sync_cmd[1]);
++ TOPAZ_OUT_CCB(dev_priv, sync_cmd[2]);
++ TOPAZ_END_CCB(dev_priv, 1);
++
++ v = kmap(p);
++ while (count && *v != sync_seq) {
++ DRM_UDELAY(100);
++ --count;
++ }
++ if ((count == 0) && (*v != sync_seq)) {
++ DRM_ERROR("TOPAZ: wait sycn timeout (0x%08x),"
++ "actual 0x%08x\n",
++ sync_seq, *v);
++ }
++ PSB_DEBUG_GENERAL("TOPAZ: SYNC done, seq=0x%08x\n", *v);
++ kunmap(p);
++
++ return 0;
++}
++
++static int topaz_test_null(struct drm_device *dev, uint32_t seq)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ struct topaz_cmd_header null_cmd;
++ uint32_t clr_flag;
++
++ /* XXX: here we finished firmware setup....
++ * using a NULL command to verify the
++ * correctness of firmware
++ */
++
++ null_cmd.id = MTX_CMDID_NULL;
++ null_cmd.size = 1;
++ null_cmd.seq = seq;
++
++ TOPAZ_BEGIN_CCB(dev_priv);
++ TOPAZ_OUT_CCB(dev_priv, *((uint32_t *)&null_cmd));
++ TOPAZ_END_CCB(dev_priv, 1);
++
++ DRM_UDELAY(1000); /* wait to finish */
++
++ PSB_DEBUG_GENERAL("Topaz: Sent NULL with sequence=0x%08x,"
++ " got sequence=0x%08x (WB_seq=0x%08x,WB_roff=%d)\n",
++ seq, CCB_CTRL_SEQ(dev_priv), WB_CCB_CTRL_SEQ(dev_priv),
++ WB_CCB_CTRL_RINDEX(dev_priv));
++
++ PSB_DEBUG_GENERAL("Topaz: after NULL test, query IRQ and clear it\n");
++
++ clr_flag = lnc_topaz_queryirq(dev);
++ lnc_topaz_clearirq(dev, clr_flag);
++
++ return 0;
++}
++
++
++/*
++ * this function will test whether the mmu is correct:
++ * it get a drm_buffer_object and use CMD_SYNC to write
++ * certain value into this buffer.
++ */
++static void topaz_mmu_test(struct drm_device *dev, uint32_t sync_value)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ struct topaz_private *topaz_priv = dev_priv->topaz_private;
++ unsigned long real_pfn;
++ int ret;
++
++ /* topaz_mmu_flush(dev); */
++ topaz_test_sync(dev, 0x55, sync_value);
++
++ ret = psb_mmu_virtual_to_pfn(psb_mmu_get_default_pd(dev_priv->mmu),
++ topaz_priv->topaz_sync_offset, &real_pfn);
++ if (ret != 0) {
++ PSB_DEBUG_GENERAL("psb_mmu_virtual_to_pfn failed,exit\n");
++ return;
++ }
++ PSB_DEBUG_GENERAL("TOPAZ: issued SYNC command, "
++ "BO offset=0x%08x (pfn=%lu), synch value=0x%08x\n",
++ topaz_priv->topaz_sync_offset, real_pfn, sync_value);
++}
++
++void topaz_save_default_regs(struct drm_psb_private *dev_priv, uint32_t *data)
++{
++ int n;
++ int count;
++
++ count = sizeof(topaz_default_regs) / (sizeof(unsigned long) * 3);
++ for (n = 0; n < count; n++, ++data)
++ MM_READ32(topaz_default_regs[n][0],
++ topaz_default_regs[n][1],
++ data);
++
++}
++
++void topaz_restore_default_regs(struct drm_psb_private *dev_priv,
++ uint32_t *data)
++{
++ int n;
++ int count;
++
++ count = sizeof(topaz_default_regs) / (sizeof(unsigned long) * 3);
++ for (n = 0; n < count; n++, ++data)
++ MM_WRITE32(topaz_default_regs[n][0],
++ topaz_default_regs[n][1],
++ *data);
++
++}
++
++#endif
++
++int lnc_topaz_restore_mtx_state(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private;
++ uint32_t reg_val;
++ uint32_t *mtx_reg_state;
++ int i;
++ struct topaz_private *topaz_priv = dev_priv->topaz_private;
++
++ if (!topaz_priv->topaz_mtx_saved)
++ return -1;
++
++ if (topaz_priv->topaz_mtx_data_mem == NULL) {
++ PSB_DEBUG_GENERAL("TOPAZ: try to restore context without "
++ "space allocated, return directly without restore\n");
++ return -1;
++ }
++
++ /* turn on mtx clocks */
++ MTX_READ32(TOPAZ_CR_TOPAZ_MAN_CLK_GATE, &reg_val);
++ MTX_WRITE32(TOPAZ_CR_TOPAZ_MAN_CLK_GATE,
++ reg_val & (~MASK_TOPAZ_CR_TOPAZ_MTX_MAN_CLK_GATE));
++
++ /* reset mtx */
++ /* FIXME: should use core_write??? */
++ MTX_WRITE32(MTX_CORE_CR_MTX_SOFT_RESET_OFFSET,
++ MTX_CORE_CR_MTX_SOFT_RESET_MTX_RESET_MASK);
++ DRM_UDELAY(6000);
++
++ topaz_mmu_hwsetup(dev_priv);
++ /* upload code, restore mtx data */
++ mtx_dma_write(dev);
++
++ mtx_reg_state = topaz_priv->topaz_mtx_reg_state;
++ /* restore register */
++ /* FIXME: conside to put read/write into one function */
++ /* Saves 8 Registers of D0 Bank */
++ /* DoRe0, D0Ar6, D0Ar4, D0Ar2, D0FrT, D0.5, D0.6 and D0.7 */
++ for (i = 0; i < 8; i++) {
++ topaz_write_core_reg(dev_priv, 0x1 | (i<<4),
++ *mtx_reg_state);
++ mtx_reg_state++;
++ }
++ /* Saves 8 Registers of D1 Bank */
++ /* D1Re0, D1Ar5, D1Ar3, D1Ar1, D1RtP, D1.5, D1.6 and D1.7 */
++ for (i = 0; i < 8; i++) {
++ topaz_write_core_reg(dev_priv, 0x2 | (i<<4),
++ *mtx_reg_state);
++ mtx_reg_state++;
++ }
++ /* Saves 4 Registers of A0 Bank */
++ /* A0StP, A0FrP, A0.2 and A0.3 */
++ for (i = 0; i < 4; i++) {
++ topaz_write_core_reg(dev_priv, 0x3 | (i<<4),
++ *mtx_reg_state);
++ mtx_reg_state++;
++ }
++ /* Saves 4 Registers of A1 Bank */
++ /* A1GbP, A1LbP, A1.2 and A1.3 */
++ for (i = 0; i < 4; i++) {
++ topaz_write_core_reg(dev_priv, 0x4 | (i<<4),
++ *mtx_reg_state);
++ mtx_reg_state++;
++ }
++ /* Saves PC and PCX */
++ for (i = 0; i < 2; i++) {
++ topaz_write_core_reg(dev_priv, 0x5 | (i<<4),
++ *mtx_reg_state);
++ mtx_reg_state++;
++ }
++ /* Saves 8 Control Registers */
++ /* TXSTAT, TXMASK, TXSTATI, TXMASKI, TXPOLL, TXGPIOI, TXPOLLI,
++ * TXGPIOO */
++ for (i = 0; i < 8; i++) {
++ topaz_write_core_reg(dev_priv, 0x7 | (i<<4),
++ *mtx_reg_state);
++ mtx_reg_state++;
++ }
++
++ /* turn on MTX */
++ MTX_WRITE32(MTX_CORE_CR_MTX_ENABLE_OFFSET,
++ MTX_CORE_CR_MTX_ENABLE_MTX_ENABLE_MASK);
++
++ topaz_priv->topaz_mtx_saved = 0;
++
++ return 0;
++}
++
++int lnc_topaz_save_mtx_state(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private;
++ uint32_t *mtx_reg_state;
++ int i;
++ struct topaz_codec_fw *cur_codec_fw;
++ struct topaz_private *topaz_priv = dev_priv->topaz_private;
++
++ /* FIXME: make sure the topaz_mtx_data_mem is allocated */
++ if (topaz_priv->topaz_mtx_data_mem == NULL) {
++ PSB_DEBUG_GENERAL("TOPAZ: try to save context without space "
++ "allocated, return directly without save\n");
++ return -1;
++ }
++ if (topaz_priv->topaz_fw_loaded == 0) {
++ PSB_DEBUG_GENERAL("TOPAZ: try to save context without firmware "
++ "uploaded\n");
++ return -1;
++ }
++
++ topaz_wait_for_register(dev_priv,
++ MTX_START + MTX_CORE_CR_MTX_TXRPT_OFFSET,
++ TXRPT_WAITONKICK_VALUE,
++ 0xffffffff);
++
++ /* stop mtx */
++ MTX_WRITE32(MTX_CORE_CR_MTX_ENABLE_OFFSET,
++ MTX_CORE_CR_MTX_ENABLE_MTX_TOFF_MASK);
++
++ mtx_reg_state = topaz_priv->topaz_mtx_reg_state;
++
++ /* FIXME: conside to put read/write into one function */
++ /* Saves 8 Registers of D0 Bank */
++ /* DoRe0, D0Ar6, D0Ar4, D0Ar2, D0FrT, D0.5, D0.6 and D0.7 */
++ for (i = 0; i < 8; i++) {
++ topaz_read_core_reg(dev_priv, 0x1 | (i<<4),
++ mtx_reg_state);
++ mtx_reg_state++;
++ }
++ /* Saves 8 Registers of D1 Bank */
++ /* D1Re0, D1Ar5, D1Ar3, D1Ar1, D1RtP, D1.5, D1.6 and D1.7 */
++ for (i = 0; i < 8; i++) {
++ topaz_read_core_reg(dev_priv, 0x2 | (i<<4),
++ mtx_reg_state);
++ mtx_reg_state++;
++ }
++ /* Saves 4 Registers of A0 Bank */
++ /* A0StP, A0FrP, A0.2 and A0.3 */
++ for (i = 0; i < 4; i++) {
++ topaz_read_core_reg(dev_priv, 0x3 | (i<<4),
++ mtx_reg_state);
++ mtx_reg_state++;
++ }
++ /* Saves 4 Registers of A1 Bank */
++ /* A1GbP, A1LbP, A1.2 and A1.3 */
++ for (i = 0; i < 4; i++) {
++ topaz_read_core_reg(dev_priv, 0x4 | (i<<4),
++ mtx_reg_state);
++ mtx_reg_state++;
++ }
++ /* Saves PC and PCX */
++ for (i = 0; i < 2; i++) {
++ topaz_read_core_reg(dev_priv, 0x5 | (i<<4),
++ mtx_reg_state);
++ mtx_reg_state++;
++ }
++ /* Saves 8 Control Registers */
++ /* TXSTAT, TXMASK, TXSTATI, TXMASKI, TXPOLL, TXGPIOI, TXPOLLI,
++ * TXGPIOO */
++ for (i = 0; i < 8; i++) {
++ topaz_read_core_reg(dev_priv, 0x7 | (i<<4),
++ mtx_reg_state);
++ mtx_reg_state++;
++ }
++
++ /* save mtx data memory */
++ cur_codec_fw = &topaz_priv->topaz_fw[topaz_priv->topaz_cur_codec];
++
++ mtx_dma_read(dev, cur_codec_fw->data_location + 0x80900000 - 0x82880000,
++ topaz_priv->cur_mtx_data_size);
++
++ /* turn off mtx clocks */
++ MTX_WRITE32(TOPAZ_CR_TOPAZ_MAN_CLK_GATE,
++ MASK_TOPAZ_CR_TOPAZ_MTX_MAN_CLK_GATE);
++
++ topaz_priv->topaz_mtx_saved = 1;
++
++ return 0;
++}
++
++void mtx_dma_read(struct drm_device *dev, uint32_t source_addr, uint32_t size)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private;
++ struct ttm_buffer_object *target;
++ struct topaz_private *topaz_priv = dev_priv->topaz_private;
++
++ /* setup mtx DMAC registers to do transfer */
++ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAA, source_addr);
++ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAC,
++ F_ENCODE(2, MTX_BURSTSIZE) |
++ F_ENCODE(1, MTX_RNW) |
++ F_ENCODE(1, MTX_ENABLE) |
++ F_ENCODE(size, MTX_LENGTH));
++
++ /* give the DMAC access to the host memory via BIF */
++ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 1);
++
++ target = topaz_priv->topaz_mtx_data_mem;
++ /* transfert the data */
++ /* FIXME: size is meaured by bytes? */
++ topaz_dma_transfer(dev_priv, 0, target->offset, 0,
++ MTX_CR_MTX_SYSC_CDMAT,
++ size, 0, 1);
++
++ /* wait for it transfer */
++ topaz_wait_for_register(dev_priv, IMG_SOC_DMAC_IRQ_STAT(0) + DMAC_START,
++ F_ENCODE(1, IMG_SOC_TRANSFER_FIN),
++ F_ENCODE(1, IMG_SOC_TRANSFER_FIN));
++ /* clear interrupt */
++ DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(0), 0);
++ /* give access back to topaz core */
++ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 0);
++}
++
++void dmac_transfer(struct drm_device *dev, uint32_t channel, uint32_t dst_addr,
++ uint32_t soc_addr, uint32_t bytes_num,
++ int increment, int rnw)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private;
++ uint32_t count_reg;
++ uint32_t irq_state;
++
++ /* check no transfer is in progress */
++ DMAC_READ32(IMG_SOC_DMAC_COUNT(channel), &count_reg);
++ if (0 != (count_reg & (MASK_IMG_SOC_EN | MASK_IMG_SOC_LIST_EN))) {
++ DRM_ERROR("TOPAZ: there's transfer in progress when wanna "
++ "save mtx data\n");
++ /* FIXME: how to handle this error */
++ return;
++ }
++
++ /* no hold off period */
++ DMAC_WRITE32(IMG_SOC_DMAC_PER_HOLD(channel), 0);
++ /* cleare irq state */
++ DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(channel), 0);
++ DMAC_READ32(IMG_SOC_DMAC_IRQ_STAT(channel), &irq_state);
++ if (0 != irq_state) {
++ DRM_ERROR("TOPAZ: there's irq cann't clear\n");
++ return;
++ }
++
++ DMAC_WRITE32(IMG_SOC_DMAC_SETUP(channel), dst_addr);
++ count_reg = DMAC_VALUE_COUNT(DMAC_BSWAP_NO_SWAP,
++ DMAC_PWIDTH_32_BIT, rnw,
++ DMAC_PWIDTH_32_BIT, bytes_num);
++ /* generate an interrupt at end of transfer */
++ count_reg |= MASK_IMG_SOC_TRANSFER_IEN;
++ count_reg |= F_ENCODE(rnw, IMG_SOC_DIR);
++ DMAC_WRITE32(IMG_SOC_DMAC_COUNT(channel), count_reg);
++
++ DMAC_WRITE32(IMG_SOC_DMAC_PERIPH(channel),
++ DMAC_VALUE_PERIPH_PARAM(DMAC_ACC_DEL_0, increment,
++ DMAC_BURST_2));
++ DMAC_WRITE32(IMG_SOC_DMAC_PERIPHERAL_ADDR(channel), soc_addr);
++
++ /* Finally, rewrite the count register with the enable
++ * bit set to kick off the transfer */
++ DMAC_WRITE32(IMG_SOC_DMAC_COUNT(channel),
++ count_reg | MASK_IMG_SOC_EN);
++}
++
++void mtx_dma_write(struct drm_device *dev)
++{
++ struct topaz_codec_fw *cur_codec_fw;
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private;
++ struct topaz_private *topaz_priv = dev_priv->topaz_private;
++
++ cur_codec_fw = &topaz_priv->topaz_fw[topaz_priv->topaz_cur_codec];
++
++ /* upload code */
++ /* setup mtx DMAC registers to recieve transfer */
++ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAA, 0x80900000);
++ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAC,
++ F_ENCODE(2, MTX_BURSTSIZE) |
++ F_ENCODE(0, MTX_RNW) |
++ F_ENCODE(1, MTX_ENABLE) |
++ F_ENCODE(cur_codec_fw->text_size / 4, MTX_LENGTH));
++
++ /* give DMAC access to host memory */
++ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 1);
++
++ /* transfer code */
++ topaz_dma_transfer(dev_priv, 0, cur_codec_fw->text->offset, 0,
++ MTX_CR_MTX_SYSC_CDMAT, cur_codec_fw->text_size / 4,
++ 0, 0);
++ /* wait finished */
++ topaz_wait_for_register(dev_priv, IMG_SOC_DMAC_IRQ_STAT(0) + DMAC_START,
++ F_ENCODE(1, IMG_SOC_TRANSFER_FIN),
++ F_ENCODE(1, IMG_SOC_TRANSFER_FIN));
++ /* clear interrupt */
++ DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(0), 0);
++
++ /* setup mtx start recieving data */
++ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAA, 0x80900000 +
++ (cur_codec_fw->data_location) - 0x82880000);
++
++ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAC,
++ F_ENCODE(2, MTX_BURSTSIZE) |
++ F_ENCODE(0, MTX_RNW) |
++ F_ENCODE(1, MTX_ENABLE) |
++ F_ENCODE(topaz_priv->cur_mtx_data_size, MTX_LENGTH));
++
++ /* give DMAC access to host memory */
++ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 1);
++
++ /* transfer data */
++ topaz_dma_transfer(dev_priv, 0, topaz_priv->topaz_mtx_data_mem->offset,
++ 0, MTX_CR_MTX_SYSC_CDMAT,
++ topaz_priv->cur_mtx_data_size,
++ 0, 0);
++ /* wait finished */
++ topaz_wait_for_register(dev_priv, IMG_SOC_DMAC_IRQ_STAT(0) + DMAC_START,
++ F_ENCODE(1, IMG_SOC_TRANSFER_FIN),
++ F_ENCODE(1, IMG_SOC_TRANSFER_FIN));
++ /* clear interrupt */
++ DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(0), 0);
++
++ /* give access back to Topaz Core */
++ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 0);
++}
++
+diff --git a/drivers/gpu/drm/mrst/drv/msvdx_power.c b/drivers/gpu/drm/mrst/drv/msvdx_power.c
+new file mode 100644
+index 0000000..803e04d
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/msvdx_power.c
+@@ -0,0 +1,164 @@
++/*
++ * Copyright (c) 2009, Intel Corporation.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors: binglin.chen@intel.com>
++ *
++ */
++
++#include "msvdx_power.h"
++#include "psb_msvdx.h"
++#include "psb_drv.h"
++
++#include "services_headers.h"
++#include "sysconfig.h"
++
++static PVRSRV_ERROR DevInitMSVDXPart1(IMG_VOID *pvDeviceNode)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)pvDeviceNode;
++ PVRSRV_ERROR eError;
++ PVRSRV_DEV_POWER_STATE eDefaultPowerState;
++
++ /* register power operation function */
++ /* FIXME: this should be in part2 init function, but
++ * currently here only OSPM needs IMG device... */
++ eDefaultPowerState = PVRSRV_DEV_POWER_STATE_OFF;
++ eError = PVRSRVRegisterPowerDevice(psDeviceNode->sDevId.ui32DeviceIndex,
++ MSVDXPrePowerState,
++ MSVDXPostPowerState,
++ MSVDXPreClockSpeedChange,
++ MSVDXPostClockSpeedChange,
++ (IMG_HANDLE)psDeviceNode,
++ PVRSRV_DEV_POWER_STATE_ON,
++ eDefaultPowerState);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR, "DevInitMSVDXPart1: failed to "
++ "register device with power manager"));
++ return eError;
++ }
++
++ return PVRSRV_OK;
++}
++
++static PVRSRV_ERROR DevDeInitMSVDX(IMG_VOID *pvDeviceNode)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)pvDeviceNode;
++ PVRSRV_ERROR eError;
++
++ /* should deinit all resource */
++
++ eError = PVRSRVRemovePowerDevice(psDeviceNode->sDevId.ui32DeviceIndex);
++ if (eError != PVRSRV_OK)
++ return eError;
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR MSVDXDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++ /* version check */
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR MSVDXRegisterDevice(PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++ psDeviceNode->sDevId.eDeviceType = PVRSRV_DEVICE_TYPE_MSVDX;
++ psDeviceNode->sDevId.eDeviceClass = PVRSRV_DEVICE_CLASS_VIDEO;
++
++ psDeviceNode->pfnInitDevice = DevInitMSVDXPart1;
++ psDeviceNode->pfnDeInitDevice = DevDeInitMSVDX;
++
++ psDeviceNode->pfnInitDeviceCompatCheck = MSVDXDevInitCompatCheck;
++
++ psDeviceNode->pfnDeviceISR = psb_msvdx_interrupt;
++ psDeviceNode->pvISRData = (IMG_VOID *)gpDrmDevice;
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR MSVDXPrePowerState(IMG_HANDLE hDevHandle,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState)
++{
++ /* ask for a change not power on*/
++ if ((eNewPowerState != eCurrentPowerState) &&
++ (eNewPowerState != PVRSRV_DEV_POWER_STATE_ON)) {
++ struct drm_psb_private *dev_priv = gpDrmDevice->dev_private;
++ struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
++ MSVDX_NEW_PMSTATE(gpDrmDevice, msvdx_priv, PSB_PMSTATE_POWERDOWN);
++
++ /* context save */
++ psb_msvdx_save_context(gpDrmDevice);
++
++ /* internally close the device */
++
++ /* ask for power off */
++ if (eNewPowerState == PVRSRV_DEV_POWER_STATE_OFF) {
++ /* here will deinitialize the driver if needed */
++ } else {
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "%s no action for transform from %d to %d",
++ __func__,
++ eCurrentPowerState,
++ eNewPowerState));
++ }
++ }
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR MSVDXPostPowerState(IMG_HANDLE hDevHandle,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState)
++{
++ /* if ask for change & current status is not on */
++ if ((eNewPowerState != eCurrentPowerState) &&
++ (eCurrentPowerState != PVRSRV_DEV_POWER_STATE_ON)) {
++ /* internally open device */
++ struct drm_psb_private *dev_priv = gpDrmDevice->dev_private;
++ struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
++ MSVDX_NEW_PMSTATE(gpDrmDevice, msvdx_priv, PSB_PMSTATE_POWERUP);
++
++ /* context restore */
++ psb_msvdx_restore_context(gpDrmDevice);
++
++ if (eCurrentPowerState == PVRSRV_DEV_POWER_STATE_OFF) {
++ /* here will initialize the driver if needed */
++ } else {
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "%s no action for transform from %d to %d",
++ __func__,
++ eCurrentPowerState,
++ eNewPowerState));
++ }
++ }
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR MSVDXPreClockSpeedChange(IMG_HANDLE hDevHandle,
++ IMG_BOOL bIdleDevice,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState)
++{
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR MSVDXPostClockSpeedChange(IMG_HANDLE hDevHandle,
++ IMG_BOOL bIdleDevice,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState)
++{
++ return PVRSRV_OK;
++}
+diff --git a/drivers/gpu/drm/mrst/drv/msvdx_power.h b/drivers/gpu/drm/mrst/drv/msvdx_power.h
+new file mode 100644
+index 0000000..19a3d44
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/msvdx_power.h
+@@ -0,0 +1,48 @@
++/*
++ * Copyright (c) 2009, Intel Corporation.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors: binglin.chen@intel.com>
++ *
++ */
++
++#ifndef MSVDX_POWER_H_
++#define MSVDX_POWER_H_
++
++#include "services_headers.h"
++#include "sysconfig.h"
++
++extern struct drm_device *gpDrmDevice;
++
++/* function define */
++PVRSRV_ERROR MSVDXRegisterDevice(PVRSRV_DEVICE_NODE *psDeviceNode);
++PVRSRV_ERROR MSVDXDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode);
++
++/* power function define */
++PVRSRV_ERROR MSVDXPrePowerState(IMG_HANDLE hDevHandle,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
++PVRSRV_ERROR MSVDXPostPowerState(IMG_HANDLE hDevHandle,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
++PVRSRV_ERROR MSVDXPreClockSpeedChange(IMG_HANDLE hDevHandle,
++ IMG_BOOL bIdleDevice,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
++PVRSRV_ERROR MSVDXPostClockSpeedChange(IMG_HANDLE hDevHandle,
++ IMG_BOOL bIdleDevice,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
++PVRSRV_ERROR MSVDXInitOSPM(PVRSRV_DEVICE_NODE *psDeviceNode);
++
++#endif /* !MSVDX_POWER_H_ */
+diff --git a/drivers/gpu/drm/mrst/drv/psb_bl.c b/drivers/gpu/drm/mrst/drv/psb_bl.c
+new file mode 100644
+index 0000000..0ef6c41
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/psb_bl.c
+@@ -0,0 +1,260 @@
++/*
++ * psb backlight using HAL
++ *
++ * Copyright (c) 2009, Intel Corporation.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors: Eric Knopp
++ *
++ */
++
++#include <linux/backlight.h>
++#include "psb_drv.h"
++#include "psb_intel_reg.h"
++#include "psb_intel_drv.h"
++#include "psb_intel_bios.h"
++#include "ospm_power.h"
++
++#define MRST_BLC_MAX_PWM_REG_FREQ 0xFFFF
++#define BLC_PWM_PRECISION_FACTOR 100 /* 10000000 */
++#define BLC_PWM_FREQ_CALC_CONSTANT 32
++#define MHz 1000000
++#define BRIGHTNESS_MIN_LEVEL 1
++#define BRIGHTNESS_MAX_LEVEL 100
++#define BRIGHTNESS_MASK 0xFF
++#define BLC_POLARITY_NORMAL 0
++#define BLC_POLARITY_INVERSE 1
++#define BLC_ADJUSTMENT_MAX 100
++
++#define PSB_BLC_PWM_PRECISION_FACTOR 10
++#define PSB_BLC_MAX_PWM_REG_FREQ 0xFFFE
++#define PSB_BLC_MIN_PWM_REG_FREQ 0x2
++
++#define PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xFFFE)
++#define PSB_BACKLIGHT_PWM_CTL_SHIFT (16)
++
++static int psb_brightness;
++static struct backlight_device *psb_backlight_device;
++static u8 blc_brightnesscmd;
++u8 blc_pol;
++u8 blc_type;
++
++
++int psb_set_brightness(struct backlight_device *bd)
++{
++ u32 blc_pwm_ctl;
++ u32 max_pwm_blc;
++
++ struct drm_device *dev =
++ (struct drm_device *)psb_backlight_device->priv;
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++
++ int level = bd->props.brightness;
++
++ DRM_DEBUG("backlight level set to %d\n", level);
++
++ /* Perform value bounds checking */
++ if (level < BRIGHTNESS_MIN_LEVEL)
++ level = BRIGHTNESS_MIN_LEVEL;
++
++ if (IS_POULSBO(dev)) {
++ psb_intel_lvds_set_brightness(dev, level);
++ psb_brightness = level;
++ return 0;
++ }
++
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_ONLY_IF_ON)) {
++ /* Calculate and set the brightness value */
++ max_pwm_blc = REG_READ(BLC_PWM_CTL) >>
++ MRST_BACKLIGHT_MODULATION_FREQ_SHIFT;
++ blc_pwm_ctl = level * max_pwm_blc / BRIGHTNESS_MAX_LEVEL;
++
++ /* Adjust the backlight level with the percent in
++ * dev_priv->blc_adj1;
++ */
++ blc_pwm_ctl = blc_pwm_ctl * dev_priv->blc_adj1;
++ blc_pwm_ctl = blc_pwm_ctl / BLC_ADJUSTMENT_MAX;
++
++ /* Adjust the backlight level with the percent in
++ * dev_priv->blc_adj2;
++ */
++ blc_pwm_ctl = blc_pwm_ctl * dev_priv->blc_adj2;
++ blc_pwm_ctl = blc_pwm_ctl / BLC_ADJUSTMENT_MAX;
++
++
++ if (blc_pol == BLC_POLARITY_INVERSE)
++ blc_pwm_ctl = max_pwm_blc - blc_pwm_ctl;
++
++ /* force PWM bit on */
++ REG_WRITE(BLC_PWM_CTL2, (0x80000000 | REG_READ(BLC_PWM_CTL2)));
++ REG_WRITE(BLC_PWM_CTL,
++ (max_pwm_blc << MRST_BACKLIGHT_MODULATION_FREQ_SHIFT) |
++ blc_pwm_ctl);
++
++ /* printk("***backlight brightness = %i\n", level); */
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ }
++
++ /* cache the brightness for later use */
++ psb_brightness = level;
++ return 0;
++}
++
++int psb_get_brightness(struct backlight_device *bd)
++{
++ /* return locally cached var instead of HW read (due to DPST etc.) */
++ return psb_brightness;
++}
++
++struct backlight_ops psb_ops = {
++ .get_brightness = psb_get_brightness,
++ .update_status = psb_set_brightness,
++};
++
++int psb_backlight_init(struct drm_device *dev)
++{
++#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
++ unsigned long CoreClock;
++ /* u32 bl_max_freq; */
++ /* unsigned long value; */
++ u16 bl_max_freq;
++ uint32_t value;
++ uint32_t clock;
++ uint32_t blc_pwm_precision_factor;
++
++ struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
++
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++
++ psb_backlight_device = backlight_device_register("psb-bl",
++ NULL, NULL, &psb_ops);
++ if (IS_ERR(psb_backlight_device))
++ return PTR_ERR(psb_backlight_device);
++
++ psb_backlight_device->priv = dev;
++
++ if (IS_MRST(dev)) {
++ dev_priv->blc_adj1 = BLC_ADJUSTMENT_MAX;
++ dev_priv->blc_adj2 = BLC_ADJUSTMENT_MAX;
++
++ /* this needs to come from VBT when available */
++ bl_max_freq = 256;
++ /* this needs to be set elsewhere */
++ blc_pol = BLC_POLARITY_NORMAL;
++ blc_pwm_precision_factor = BLC_PWM_PRECISION_FACTOR;
++
++ if (dev_priv->sku_83)
++ CoreClock = 166;
++ else if (dev_priv->sku_100)
++ CoreClock = 200;
++ else if (dev_priv->sku_100L)
++ CoreClock = 100;
++ else
++ return 1;
++ } else {
++ /* get bl_max_freq and pol from dev_priv*/
++ if (!dev_priv->lvds_bl) {
++ DRM_ERROR("Has no valid LVDS backlight info\n");
++ return 1;
++ }
++ bl_max_freq = dev_priv->lvds_bl->freq;
++ blc_pol = dev_priv->lvds_bl->pol;
++ blc_pwm_precision_factor = PSB_BLC_PWM_PRECISION_FACTOR;
++ blc_brightnesscmd = dev_priv->lvds_bl->brightnesscmd;
++ blc_type = dev_priv->lvds_bl->type;
++
++ /*pci_write_config_dword(pci_root, 0xD4, 0x00C32004);*/
++ /*pci_write_config_dword(pci_root, 0xD0, 0xE0033000);*/
++
++ pci_write_config_dword(pci_root, 0xD0, 0xD0050300);
++ pci_read_config_dword(pci_root, 0xD4, &clock);
++
++ switch (clock & 0x07) {
++ case 0:
++ CoreClock = 100;
++ break;
++ case 1:
++ CoreClock = 133;
++ break;
++ case 2:
++ CoreClock = 150;
++ break;
++ case 3:
++ CoreClock = 178;
++ break;
++ case 4:
++ CoreClock = 200;
++ break;
++ case 5:
++ case 6:
++ case 7:
++ CoreClock = 266;
++ default:
++ return 1;
++ }
++ } /*end if(IS_MRST(dev))*/
++
++ value = (CoreClock * MHz) / BLC_PWM_FREQ_CALC_CONSTANT;
++ value *= blc_pwm_precision_factor;
++ value /= bl_max_freq;
++ value /= blc_pwm_precision_factor;
++
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_ONLY_IF_ON)) {
++ if (IS_MRST(dev)) {
++ if (value >
++ (unsigned long long)MRST_BLC_MAX_PWM_REG_FREQ)
++ return 2;
++ else {
++ REG_WRITE(BLC_PWM_CTL2,
++ (0x80000000 | REG_READ(BLC_PWM_CTL2)));
++ REG_WRITE(BLC_PWM_CTL, value |
++ (value <<
++ MRST_BACKLIGHT_MODULATION_FREQ_SHIFT));
++ }
++ } else {
++ if (
++ value > (unsigned long long)PSB_BLC_MAX_PWM_REG_FREQ ||
++ value < (unsigned long long)PSB_BLC_MIN_PWM_REG_FREQ)
++ return 2;
++ else {
++ value &= PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR;
++ REG_WRITE(BLC_PWM_CTL,
++ (value << PSB_BACKLIGHT_PWM_CTL_SHIFT) |
++ (value));
++ }
++ } /*end if(IS_MRST(dev))*/
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ }
++
++ psb_backlight_device->props.brightness = BRIGHTNESS_MAX_LEVEL;
++ psb_backlight_device->props.max_brightness = BRIGHTNESS_MAX_LEVEL;
++ backlight_update_status(psb_backlight_device);
++#endif
++ return 0;
++}
++
++void psb_backlight_exit(void)
++{
++#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
++ psb_backlight_device->props.brightness = 0;
++ backlight_update_status(psb_backlight_device);
++ backlight_device_unregister(psb_backlight_device);
++#endif
++ return;
++}
+diff --git a/drivers/gpu/drm/mrst/drv/psb_buffer.c b/drivers/gpu/drm/mrst/drv/psb_buffer.c
+new file mode 100644
+index 0000000..d54a429
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/psb_buffer.c
+@@ -0,0 +1,379 @@
++/**************************************************************************
++ * Copyright (c) 2007, Intel Corporation.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
++ */
++#include "ttm/ttm_placement_common.h"
++#include "ttm/ttm_execbuf_util.h"
++#include "ttm/ttm_fence_api.h"
++#include <drm/drmP.h>
++#include "psb_drv.h"
++#include "psb_schedule.h"
++
++#define DRM_MEM_TTM 26
++
++struct drm_psb_ttm_backend {
++ struct ttm_backend base;
++ struct page **pages;
++ unsigned int desired_tile_stride;
++ unsigned int hw_tile_stride;
++ int mem_type;
++ unsigned long offset;
++ unsigned long num_pages;
++};
++
++/*
++ * MSVDX/TOPAZ GPU virtual space looks like this
++ * (We currently use only one MMU context).
++ * PSB_MEM_MMU_START: from 0x40000000, for generic buffers
++ * TTM_PL_CI: from 0xe0000000+half GTT space, for camear/video buffer sharing
++ * TTM_PL_RAR: from TTM_PL_CI, for RAR/video buffer sharing
++ * TTM_PL_TT: from TTM_PL_RAR, for buffers need to mapping into GTT
++ */
++static int psb_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
++ struct ttm_mem_type_manager *man)
++{
++
++ struct drm_psb_private *dev_priv =
++ container_of(bdev, struct drm_psb_private, bdev);
++ struct psb_gtt *pg = dev_priv->pg;
++
++ switch (type) {
++ case TTM_PL_SYSTEM:
++ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
++ man->available_caching = TTM_PL_FLAG_CACHED |
++ TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
++ man->default_caching = TTM_PL_FLAG_CACHED;
++ break;
++ case DRM_PSB_MEM_MMU:
++ man->io_offset = 0x00000000;
++ man->io_size = 0x00000000;
++ man->io_addr = NULL;
++ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
++ TTM_MEMTYPE_FLAG_CMA;
++ man->gpu_offset = PSB_MEM_MMU_START;
++ man->available_caching = TTM_PL_FLAG_CACHED |
++ TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
++ man->default_caching = TTM_PL_FLAG_WC;
++ break;
++ case TTM_PL_CI:
++ man->io_addr = NULL;
++ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
++ TTM_MEMTYPE_FLAG_FIXED |
++ TTM_MEMTYPE_FLAG_NEEDS_IOREMAP;
++ man->io_offset = dev_priv->ci_region_start;
++ man->io_size = pg->ci_stolen_size;
++ man->gpu_offset = pg->mmu_gatt_start;
++ man->available_caching = TTM_PL_FLAG_UNCACHED;
++ man->default_caching = TTM_PL_FLAG_UNCACHED;
++ break;
++ case TTM_PL_RAR: /* Unmappable RAR memory */
++ man->io_offset = dev_priv->rar_region_start;
++ man->io_size = pg->rar_stolen_size;
++ man->io_addr = NULL;
++ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
++ TTM_MEMTYPE_FLAG_FIXED |
++ TTM_MEMTYPE_FLAG_NEEDS_IOREMAP;
++ man->available_caching = TTM_PL_FLAG_UNCACHED;
++ man->default_caching = TTM_PL_FLAG_UNCACHED;
++ man->gpu_offset = pg->mmu_gatt_start;
++ break;
++ case TTM_PL_TT: /* Mappable GATT memory */
++ man->io_offset = pg->gatt_start;
++ man->io_size = pg->gatt_pages << PAGE_SHIFT;
++ man->io_addr = NULL;
++#ifdef PSB_WORKING_HOST_MMU_ACCESS
++ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
++ TTM_MEMTYPE_FLAG_NEEDS_IOREMAP;
++#else
++ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
++ TTM_MEMTYPE_FLAG_CMA;
++#endif
++ man->available_caching = TTM_PL_FLAG_CACHED |
++ TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
++ man->default_caching = TTM_PL_FLAG_WC;
++ man->gpu_offset = pg->mmu_gatt_start;
++ break;
++ default:
++ DRM_ERROR("Unsupported memory type %u\n", (unsigned) type);
++ return -EINVAL;
++ }
++ return 0;
++}
++
++static uint32_t psb_evict_mask(struct ttm_buffer_object *bo)
++{
++ uint32_t cur_placement = bo->mem.flags & ~TTM_PL_MASK_MEM;
++
++ /* all buffers evicted to system memory */
++ return cur_placement | TTM_PL_FLAG_SYSTEM;
++}
++
++static int psb_invalidate_caches(struct ttm_bo_device *bdev,
++ uint32_t placement)
++{
++ return 0;
++}
++
++static int psb_move_blit(struct ttm_buffer_object *bo,
++ bool evict, bool no_wait,
++ struct ttm_mem_reg *new_mem)
++{
++ BUG();
++ return 0;
++}
++
++/*
++ * Flip destination ttm into GATT,
++ * then blit and subsequently move out again.
++ */
++
++static int psb_move_flip(struct ttm_buffer_object *bo,
++ bool evict, bool interruptible, bool no_wait,
++ struct ttm_mem_reg *new_mem)
++{
++ struct ttm_bo_device *bdev = bo->bdev;
++ struct ttm_mem_reg tmp_mem;
++ int ret;
++
++ tmp_mem = *new_mem;
++ tmp_mem.mm_node = NULL;
++ tmp_mem.proposed_flags = TTM_PL_FLAG_TT;
++
++ ret = ttm_bo_mem_space(bo, &tmp_mem, interruptible, no_wait);
++ if (ret)
++ return ret;
++ ret = ttm_tt_bind(bo->ttm, &tmp_mem);
++ if (ret)
++ goto out_cleanup;
++ ret = psb_move_blit(bo, true, no_wait, &tmp_mem);
++ if (ret)
++ goto out_cleanup;
++
++ ret = ttm_bo_move_ttm(bo, evict, no_wait, new_mem);
++out_cleanup:
++ if (tmp_mem.mm_node) {
++ spin_lock(&bdev->lru_lock);
++ drm_mm_put_block(tmp_mem.mm_node);
++ tmp_mem.mm_node = NULL;
++ spin_unlock(&bdev->lru_lock);
++ }
++ return ret;
++}
++
++static int psb_move(struct ttm_buffer_object *bo,
++ bool evict, bool interruptible,
++ bool no_wait, struct ttm_mem_reg *new_mem)
++{
++ struct ttm_mem_reg *old_mem = &bo->mem;
++
++ if ((old_mem->mem_type == TTM_PL_RAR) ||
++ (new_mem->mem_type == TTM_PL_RAR)) {
++ ttm_bo_free_old_node(bo);
++ *old_mem = *new_mem;
++ } else if (old_mem->mem_type == TTM_PL_SYSTEM) {
++ return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
++ } else if (new_mem->mem_type == TTM_PL_SYSTEM) {
++ int ret = psb_move_flip(bo, evict, interruptible,
++ no_wait, new_mem);
++ if (unlikely(ret != 0)) {
++ if (ret == -ERESTART)
++ return ret;
++ else
++ return ttm_bo_move_memcpy(bo, evict, no_wait,
++ new_mem);
++ }
++ } else {
++ if (psb_move_blit(bo, evict, no_wait, new_mem))
++ return ttm_bo_move_memcpy(bo, evict, no_wait,
++ new_mem);
++ }
++ return 0;
++}
++
++static int drm_psb_tbe_populate(struct ttm_backend *backend,
++ unsigned long num_pages,
++ struct page **pages,
++ struct page *dummy_read_page)
++{
++ struct drm_psb_ttm_backend *psb_be =
++ container_of(backend, struct drm_psb_ttm_backend, base);
++
++ psb_be->pages = pages;
++ return 0;
++}
++
++static int drm_psb_tbe_unbind(struct ttm_backend *backend)
++{
++ struct ttm_bo_device *bdev = backend->bdev;
++ struct drm_psb_private *dev_priv =
++ container_of(bdev, struct drm_psb_private, bdev);
++ struct drm_psb_ttm_backend *psb_be =
++ container_of(backend, struct drm_psb_ttm_backend, base);
++ struct psb_mmu_pd *pd = psb_mmu_get_default_pd(dev_priv->mmu);
++ struct ttm_mem_type_manager *man = &bdev->man[psb_be->mem_type];
++
++ PSB_DEBUG_RENDER("MMU unbind.\n");
++
++ if (psb_be->mem_type == TTM_PL_TT) {
++ uint32_t gatt_p_offset =
++ (psb_be->offset - man->gpu_offset) >> PAGE_SHIFT;
++
++ (void) psb_gtt_remove_pages(dev_priv->pg, gatt_p_offset,
++ psb_be->num_pages,
++ psb_be->desired_tile_stride,
++ psb_be->hw_tile_stride);
++ }
++
++ psb_mmu_remove_pages(pd, psb_be->offset,
++ psb_be->num_pages,
++ psb_be->desired_tile_stride,
++ psb_be->hw_tile_stride);
++
++ return 0;
++}
++
++static int drm_psb_tbe_bind(struct ttm_backend *backend,
++ struct ttm_mem_reg *bo_mem)
++{
++ struct ttm_bo_device *bdev = backend->bdev;
++ struct drm_psb_private *dev_priv =
++ container_of(bdev, struct drm_psb_private, bdev);
++ struct drm_psb_ttm_backend *psb_be =
++ container_of(backend, struct drm_psb_ttm_backend, base);
++ struct psb_mmu_pd *pd = psb_mmu_get_default_pd(dev_priv->mmu);
++ struct ttm_mem_type_manager *man = &bdev->man[bo_mem->mem_type];
++ int type;
++ int ret = 0;
++
++ psb_be->mem_type = bo_mem->mem_type;
++ psb_be->num_pages = bo_mem->num_pages;
++ psb_be->desired_tile_stride = 0;
++ psb_be->hw_tile_stride = 0;
++ psb_be->offset = (bo_mem->mm_node->start << PAGE_SHIFT) +
++ man->gpu_offset;
++
++ type =
++ (bo_mem->
++ flags & TTM_PL_FLAG_CACHED) ? PSB_MMU_CACHED_MEMORY : 0;
++
++ PSB_DEBUG_RENDER("MMU bind.\n");
++ if (psb_be->mem_type == TTM_PL_TT) {
++ uint32_t gatt_p_offset =
++ (psb_be->offset - man->gpu_offset) >> PAGE_SHIFT;
++
++ ret = psb_gtt_insert_pages(dev_priv->pg, psb_be->pages,
++ gatt_p_offset,
++ psb_be->num_pages,
++ psb_be->desired_tile_stride,
++ psb_be->hw_tile_stride, type);
++ }
++
++ ret = psb_mmu_insert_pages(pd, psb_be->pages,
++ psb_be->offset, psb_be->num_pages,
++ psb_be->desired_tile_stride,
++ psb_be->hw_tile_stride, type);
++ if (ret)
++ goto out_err;
++
++ return 0;
++out_err:
++ drm_psb_tbe_unbind(backend);
++ return ret;
++
++}
++
++static void drm_psb_tbe_clear(struct ttm_backend *backend)
++{
++ struct drm_psb_ttm_backend *psb_be =
++ container_of(backend, struct drm_psb_ttm_backend, base);
++
++ psb_be->pages = NULL;
++ return;
++}
++
++static void drm_psb_tbe_destroy(struct ttm_backend *backend)
++{
++ struct drm_psb_ttm_backend *psb_be =
++ container_of(backend, struct drm_psb_ttm_backend, base);
++
++ if (backend)
++ kfree(psb_be);
++}
++
++static struct ttm_backend_func psb_ttm_backend = {
++ .populate = drm_psb_tbe_populate,
++ .clear = drm_psb_tbe_clear,
++ .bind = drm_psb_tbe_bind,
++ .unbind = drm_psb_tbe_unbind,
++ .destroy = drm_psb_tbe_destroy,
++};
++
++static struct ttm_backend *drm_psb_tbe_init(struct ttm_bo_device *bdev)
++{
++ struct drm_psb_ttm_backend *psb_be;
++
++ psb_be = kzalloc(sizeof(*psb_be), GFP_KERNEL);
++ if (!psb_be)
++ return NULL;
++ psb_be->pages = NULL;
++ psb_be->base.func = &psb_ttm_backend;
++ psb_be->base.bdev = bdev;
++ return &psb_be->base;
++}
++
++/*
++ * Use this memory type priority if no eviction is needed.
++ */
++static uint32_t psb_mem_prios[] = {
++ TTM_PL_CI,
++ TTM_PL_RAR,
++ TTM_PL_TT,
++ DRM_PSB_MEM_MMU,
++ TTM_PL_SYSTEM
++};
++
++/*
++ * Use this memory type priority if need to evict.
++ */
++static uint32_t psb_busy_prios[] = {
++ TTM_PL_TT,
++ TTM_PL_CI,
++ TTM_PL_RAR,
++ DRM_PSB_MEM_MMU,
++ TTM_PL_SYSTEM
++};
++
++
++struct ttm_bo_driver psb_ttm_bo_driver = {
++ .mem_type_prio = psb_mem_prios,
++ .mem_busy_prio = psb_busy_prios,
++ .num_mem_type_prio = ARRAY_SIZE(psb_mem_prios),
++ .num_mem_busy_prio = ARRAY_SIZE(psb_busy_prios),
++ .create_ttm_backend_entry = &drm_psb_tbe_init,
++ .invalidate_caches = &psb_invalidate_caches,
++ .init_mem_type = &psb_init_mem_type,
++ .evict_flags = &psb_evict_mask,
++ .move = &psb_move,
++ .verify_access = &psb_verify_access,
++ .sync_obj_signaled = &ttm_fence_sync_obj_signaled,
++ .sync_obj_wait = &ttm_fence_sync_obj_wait,
++ .sync_obj_flush = &ttm_fence_sync_obj_flush,
++ .sync_obj_unref = &ttm_fence_sync_obj_unref,
++ .sync_obj_ref = &ttm_fence_sync_obj_ref
++};
+diff --git a/drivers/gpu/drm/mrst/drv/psb_dpst.c b/drivers/gpu/drm/mrst/drv/psb_dpst.c
+new file mode 100644
+index 0000000..c16c982
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/psb_dpst.c
+@@ -0,0 +1,254 @@
++/*
++ * Copyright © 2009 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors:
++ * James C. Gualario <james.c.gualario@intel.com>
++ *
++ */
++
++#include "psb_umevents.h"
++#include "psb_dpst.h"
++/**
++ * inform the kernel of the work to be performed and related function.
++ *
++ */
++DECLARE_WORK(dpst_dev_change_work, &psb_dpst_dev_change_wq);
++/**
++ * psb_dpst_notify_change_um - notify user mode of hotplug changes
++ *
++ * @name: name of event to notify user mode of change to
++ * @state: dpst state struct to get workqueue from
++ *
++ */
++int psb_dpst_notify_change_um(enum dpst_event_enum event,
++ struct dpst_state *state)
++{
++ if (state == NULL)
++ return IRQ_HANDLED;
++
++ state->dpst_change_wq_data.dev_name_arry_rw_status
++ [state->dpst_change_wq_data.dev_name_write] =
++ DRM_DPST_READY_TO_READ;
++ state->dpst_change_wq_data.dpst_events
++ [state->dpst_change_wq_data.dev_name_write] =
++ event;
++ if (state->dpst_change_wq_data.dev_name_read_write_wrap_ack == 1)
++ state->dpst_change_wq_data.dev_name_read_write_wrap_ack = 0;
++ state->dpst_change_wq_data.dev_name_write++;
++ if (state->dpst_change_wq_data.dev_name_write ==
++ state->dpst_change_wq_data.dev_name_read) {
++ state->dpst_change_wq_data.dev_name_write--;
++ return IRQ_NONE;
++ }
++ if (state->dpst_change_wq_data.dev_name_write >
++ DRM_DPST_RING_DEPTH_MAX) {
++ state->dpst_change_wq_data.dev_name_write = 0;
++ state->dpst_change_wq_data.dev_name_write_wrap = 1;
++ }
++ state->dpst_change_wq_data.hotplug_dev_list = state->list;
++ queue_work(state->dpst_wq, &(state->dpst_change_wq_data.work));
++ return IRQ_HANDLED;
++}
++EXPORT_SYMBOL(psb_dpst_notify_change_um);
++/**
++ *
++ * psb_dpst_create_and_notify_um - create and notify user mode of new dev
++ *
++ * @name: name to give for new event / device
++ * @state: dpst state instaces to associate event with
++ *
++ */
++struct umevent_obj *psb_dpst_create_and_notify_um(const char *name,
++ struct dpst_state *state)
++{
++ return psb_create_umevent_obj(name, state->list);
++
++}
++EXPORT_SYMBOL(psb_dpst_create_and_notify_um);
++/**
++ * psb_dpst_device_pool_create_and_init - make new hotplug device pool
++ *
++ * @parent_kobj - parent kobject to associate dpst kset with
++ * @state - dpst state instance to associate list with
++ *
++ */
++struct umevent_list *psb_dpst_device_pool_create_and_init(
++ struct kobject *parent_kobj,
++ struct dpst_state *state)
++{
++ struct umevent_list *new_hotplug_dev_list = NULL;
++ new_hotplug_dev_list = psb_umevent_create_list();
++ if (new_hotplug_dev_list)
++ psb_umevent_init(parent_kobj, new_hotplug_dev_list,
++ "psb_dpst");
++
++ state->dpst_wq = create_singlethread_workqueue("dpst-wq");
++
++ if (!state->dpst_wq)
++ return NULL;
++
++ INIT_WORK(&state->dpst_change_wq_data.work, psb_dpst_dev_change_wq);
++
++ state->dpst_change_wq_data.dev_name_read = 0;
++ state->dpst_change_wq_data.dev_name_write = 0;
++ state->dpst_change_wq_data.dev_name_write_wrap = 0;
++ state->dpst_change_wq_data.dev_name_read_write_wrap_ack = 0;
++
++ memset(&(state->dpst_change_wq_data.dev_name_arry_rw_status[0]),
++ 0, sizeof(int)*DRM_DPST_RING_DEPTH);
++
++ return new_hotplug_dev_list;
++}
++EXPORT_SYMBOL(psb_dpst_device_pool_create_and_init);
++/**
++ * psb_dpst_init - init dpst subsystem
++ * @parent_kobj - parent kobject to associate dpst state with
++ *
++ */
++struct dpst_state *psb_dpst_init(struct kobject *parent_kobj)
++{
++ struct dpst_state *state;
++ struct umevent_obj *working_umevent;
++
++ state = kzalloc(sizeof(struct dpst_state), GFP_KERNEL);
++ printk(KERN_ALERT "after kzalloc\n");
++ state->list = NULL;
++ state->list = psb_dpst_device_pool_create_and_init(
++ parent_kobj,
++ state);
++ working_umevent =
++ psb_dpst_create_and_notify_um("init",
++ state);
++ state->dpst_change_wq_data.dev_umevent_arry
++ [DPST_EVENT_INIT_COMPLETE] = &(working_umevent->head);
++ working_umevent =
++ psb_dpst_create_and_notify_um("hist_int",
++ state);
++ state->dpst_change_wq_data.dev_umevent_arry
++ [DPST_EVENT_HIST_INTERRUPT] = &(working_umevent->head);
++ working_umevent =
++ psb_dpst_create_and_notify_um("term",
++ state);
++ state->dpst_change_wq_data.dev_umevent_arry
++ [DPST_EVENT_TERMINATE] = &(working_umevent->head);
++ working_umevent =
++ psb_dpst_create_and_notify_um("phase_done",
++ state);
++ state->dpst_change_wq_data.dev_umevent_arry
++ [DPST_EVENT_PHASE_COMPLETE] = &(working_umevent->head);
++
++ return state;
++}
++EXPORT_SYMBOL(psb_dpst_init);
++/**
++ * psb_dpst_device_pool_destroy - destroy all dpst related resources
++ *
++ * @state: dpst state instance to destroy
++ *
++ */
++void psb_dpst_device_pool_destroy(struct dpst_state *state)
++{
++ int i;
++ struct umevent_list *list;
++ struct umevent_obj *umevent_test;
++ list = state->list;
++ flush_workqueue(state->dpst_wq);
++ destroy_workqueue(state->dpst_wq);
++ for (i = 0; i < DRM_DPST_MAX_NUM_EVENTS; i++) {
++ umevent_test = list_entry(
++ (state->dpst_change_wq_data.dev_umevent_arry[i]),
++ struct umevent_obj, head);
++ state->dpst_change_wq_data.dev_umevent_arry[i] = NULL;
++ }
++ psb_umevent_cleanup(list);
++ kfree(state);
++}
++EXPORT_SYMBOL(psb_dpst_device_pool_destroy);
++/**
++ * psb_dpst_dev_change_wq - change workqueue implementation
++ *
++ * @work: work struct to use for kernel scheduling
++ *
++ */
++void psb_dpst_dev_change_wq(struct work_struct *work)
++{
++ struct dpst_disp_workqueue_data *wq_data;
++ int curr_event_index;
++ wq_data = to_dpst_disp_workqueue_data(work);
++ if (wq_data->dev_name_write_wrap == 1) {
++ wq_data->dev_name_read_write_wrap_ack = 1;
++ wq_data->dev_name_write_wrap = 0;
++ while (wq_data->dev_name_read != DRM_DPST_RING_DEPTH_MAX) {
++ if (wq_data->dev_name_arry_rw_status
++ [wq_data->dev_name_read] ==
++ DRM_DPST_READY_TO_READ) {
++ wq_data->dev_name_arry_rw_status
++ [wq_data->dev_name_read] =
++ DRM_DPST_READ_COMPLETE;
++ curr_event_index = wq_data->dpst_events
++ [wq_data->dev_name_read];
++ psb_umevent_notify_change_gfxsock
++ (list_entry(
++ (wq_data->dev_umevent_arry
++ [curr_event_index]),
++ struct umevent_obj, head),
++ DRM_DPST_SOCKET_GROUP_ID);
++ }
++ wq_data->dev_name_read++;
++ }
++ wq_data->dev_name_read = 0;
++ while (wq_data->dev_name_read < wq_data->dev_name_write-1) {
++ if (wq_data->dev_name_arry_rw_status
++ [wq_data->dev_name_read] ==
++ DRM_DPST_READY_TO_READ) {
++ wq_data->dev_name_arry_rw_status
++ [wq_data->dev_name_read] =
++ DRM_DPST_READ_COMPLETE;
++ curr_event_index = wq_data->dpst_events
++ [wq_data->dev_name_read];
++ psb_umevent_notify_change_gfxsock
++ (list_entry(
++ (wq_data->dev_umevent_arry
++ [curr_event_index]),
++ struct umevent_obj, head),
++ DRM_DPST_SOCKET_GROUP_ID);
++ }
++ wq_data->dev_name_read++;
++ }
++ } else {
++ while (wq_data->dev_name_read < wq_data->dev_name_write) {
++ if (wq_data->dev_name_arry_rw_status
++ [wq_data->dev_name_read] ==
++ DRM_DPST_READY_TO_READ) {
++ wq_data->dev_name_arry_rw_status
++ [wq_data->dev_name_read] =
++ DRM_DPST_READ_COMPLETE;
++ curr_event_index = wq_data->dpst_events
++ [wq_data->dev_name_read];
++ psb_umevent_notify_change_gfxsock
++ (list_entry(
++ (wq_data->dev_umevent_arry
++ [curr_event_index]),
++ struct umevent_obj, head),
++ DRM_DPST_SOCKET_GROUP_ID);
++ }
++ wq_data->dev_name_read++;
++ }
++ }
++ if (wq_data->dev_name_read > DRM_DPST_RING_DEPTH_MAX)
++ wq_data->dev_name_read = 0;
++}
++EXPORT_SYMBOL(psb_dpst_dev_change_wq);
+diff --git a/drivers/gpu/drm/mrst/drv/psb_dpst.h b/drivers/gpu/drm/mrst/drv/psb_dpst.h
+new file mode 100644
+index 0000000..6f24a05
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/psb_dpst.h
+@@ -0,0 +1,98 @@
++/*
++ * Copyright © 2009 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors:
++ * James C. Gualario <james.c.gualario@intel.com>
++ *
++ */
++
++#ifndef _PSB_DPST_H_
++#define _PSB_DPST_H_
++/**
++ * required includes
++ *
++ */
++#include "psb_umevents.h"
++/**
++ * dpst event enumeration
++ *
++ */
++enum dpst_event_enum {
++ DPST_EVENT_INIT_COMPLETE,
++ DPST_EVENT_HIST_INTERRUPT,
++ DPST_EVENT_TERMINATE,
++ DPST_EVENT_PHASE_COMPLETE,
++ DPST_MAX_EVENT
++};
++/**
++ * dpst specific defines
++ *
++ */
++#define DRM_DPST_RING_DEPTH 256
++#define DRM_DPST_RING_DEPTH_MAX (DRM_DPST_RING_DEPTH-1)
++#define DRM_DPST_READY_TO_READ 1
++#define DRM_DPST_READ_COMPLETE 2
++#define DRM_DPST_MAX_NUM_EVENTS (DPST_MAX_EVENT)
++/**
++ * dpst workqueue data struct.
++ */
++struct dpst_disp_workqueue_data {
++ struct work_struct work;
++ const char *dev_name;
++ int dev_name_write;
++ int dev_name_read;
++ int dev_name_write_wrap;
++ int dev_name_read_write_wrap_ack;
++ enum dpst_event_enum dpst_events[DRM_DPST_RING_DEPTH];
++ int dev_name_arry_rw_status[DRM_DPST_RING_DEPTH];
++ struct umevent_list *hotplug_dev_list;
++ struct list_head *dev_umevent_arry[DRM_DPST_MAX_NUM_EVENTS];
++};
++/**
++ * dpst state structure
++ *
++ */
++struct dpst_state {
++ struct workqueue_struct *dpst_wq;
++ struct dpst_disp_workqueue_data dpst_change_wq_data;
++ struct umevent_list *list;
++};
++/**
++ * main interface function prototytpes for dpst support.
++ *
++ */
++extern struct dpst_state *psb_dpst_init(struct kobject *parent_kobj);
++extern int psb_dpst_notify_change_um(enum dpst_event_enum event,
++ struct dpst_state *state);
++extern struct umevent_obj *psb_dpst_create_and_notify_um(const char *name,
++ struct dpst_state *state);
++extern struct umevent_list *psb_dpst_device_pool_create_and_init(
++ struct kobject *parent_kobj,
++ struct dpst_state *state);
++extern void psb_dpst_device_pool_destroy(struct dpst_state *state);
++/**
++ * to go back and forth between work struct and workqueue data
++ *
++ */
++#define to_dpst_disp_workqueue_data(x) \
++ container_of(x, struct dpst_disp_workqueue_data, work)
++
++/**
++ * function prototypes for workqueue implementation
++ *
++ */
++extern void psb_dpst_dev_change_wq(struct work_struct *work);
++#endif
+diff --git a/drivers/gpu/drm/mrst/drv/psb_drm.h b/drivers/gpu/drm/mrst/drv/psb_drm.h
+new file mode 100644
+index 0000000..f23afd0
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/psb_drm.h
+@@ -0,0 +1,634 @@
++/**************************************************************************
++ * Copyright (c) 2007, Intel Corporation.
++ * All Rights Reserved.
++ * Copyright (c) 2008, Tungsten Graphics Inc. Cedar Park, TX., USA.
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++
++#ifndef _PSB_DRM_H_
++#define _PSB_DRM_H_
++
++#if defined(__linux__) && !defined(__KERNEL__)
++#include<stdint.h>
++#include <linux/types.h>
++#include "drm_mode.h"
++#endif
++
++#include "ttm/ttm_fence_user.h"
++#include "ttm/ttm_placement_user.h"
++
++/*
++ * Menlow/MRST graphics driver package version
++ * a.b.c.xxxx
++ * a - Product Family: 5 - Linux
++ * b - Major Release Version: 0 - non-Gallium (Unbuntu);
++ * 1 - Gallium (Moblin2)
++ * c - Hotfix Release
++ * xxxx - Graphics internal build #
++ */
++#define PSB_PACKAGE_VERSION "5.3.0.32L.0007"
++
++#define DRM_PSB_SAREA_MAJOR 0
++#define DRM_PSB_SAREA_MINOR 2
++#define PSB_FIXED_SHIFT 16
++
++
++#define PSB_NUM_PIPE 2
++
++/*
++ * Public memory types.
++ */
++
++#define DRM_PSB_MEM_MMU TTM_PL_PRIV1
++#define DRM_PSB_FLAG_MEM_MMU TTM_PL_FLAG_PRIV1
++
++typedef int32_t psb_fixed;
++typedef uint32_t psb_ufixed;
++
++static inline int32_t psb_int_to_fixed(int a)
++{
++ return a * (1 << PSB_FIXED_SHIFT);
++}
++
++static inline uint32_t psb_unsigned_to_ufixed(unsigned int a)
++{
++ return a << PSB_FIXED_SHIFT;
++}
++
++/*Status of the command sent to the gfx device.*/
++typedef enum {
++ DRM_CMD_SUCCESS,
++ DRM_CMD_FAILED,
++ DRM_CMD_HANG
++} drm_cmd_status_t;
++
++struct drm_psb_scanout {
++ uint32_t buffer_id; /* DRM buffer object ID */
++ uint32_t rotation; /* Rotation as in RR_rotation definitions */
++ uint32_t stride; /* Buffer stride in bytes */
++ uint32_t depth; /* Buffer depth in bits (NOT) bpp */
++ uint32_t width; /* Buffer width in pixels */
++ uint32_t height; /* Buffer height in lines */
++ int32_t transform[3][3]; /* Buffer composite transform */
++ /* (scaling, rot, reflect) */
++};
++
++#define DRM_PSB_SAREA_OWNERS 16
++#define DRM_PSB_SAREA_OWNER_2D 0
++#define DRM_PSB_SAREA_OWNER_3D 1
++
++#define DRM_PSB_SAREA_SCANOUTS 3
++
++struct drm_psb_sarea {
++ /* Track changes of this data structure */
++
++ uint32_t major;
++ uint32_t minor;
++
++ /* Last context to touch part of hw */
++ uint32_t ctx_owners[DRM_PSB_SAREA_OWNERS];
++
++ /* Definition of front- and rotated buffers */
++ uint32_t num_scanouts;
++ struct drm_psb_scanout scanouts[DRM_PSB_SAREA_SCANOUTS];
++
++ int planeA_x;
++ int planeA_y;
++ int planeA_w;
++ int planeA_h;
++ int planeB_x;
++ int planeB_y;
++ int planeB_w;
++ int planeB_h;
++ /* Number of active scanouts */
++ uint32_t num_active_scanouts;
++};
++
++#define PSB_RELOC_MAGIC 0x67676767
++#define PSB_RELOC_SHIFT_MASK 0x0000FFFF
++#define PSB_RELOC_SHIFT_SHIFT 0
++#define PSB_RELOC_ALSHIFT_MASK 0xFFFF0000
++#define PSB_RELOC_ALSHIFT_SHIFT 16
++
++#define PSB_RELOC_OP_OFFSET 0 /* Offset of the indicated
++ * buffer
++ */
++
++struct drm_psb_reloc {
++ uint32_t reloc_op;
++ uint32_t where; /* offset in destination buffer */
++ uint32_t buffer; /* Buffer reloc applies to */
++ uint32_t mask; /* Destination format: */
++ uint32_t shift; /* Destination format: */
++ uint32_t pre_add; /* Destination format: */
++ uint32_t background; /* Destination add */
++ uint32_t dst_buffer; /* Destination buffer. Index into buffer_list */
++ uint32_t arg0; /* Reloc-op dependant */
++ uint32_t arg1;
++};
++
++
++#define PSB_GPU_ACCESS_READ (1ULL << 32)
++#define PSB_GPU_ACCESS_WRITE (1ULL << 33)
++#define PSB_GPU_ACCESS_MASK (PSB_GPU_ACCESS_READ | PSB_GPU_ACCESS_WRITE)
++
++#define PSB_BO_FLAG_COMMAND (1ULL << 52)
++
++#define PSB_ENGINE_2D 0
++#define PSB_ENGINE_VIDEO 1
++#define LNC_ENGINE_ENCODE 5
++
++/*
++ * For this fence class we have a couple of
++ * fence types.
++ */
++
++#define _PSB_FENCE_EXE_SHIFT 0
++#define _PSB_FENCE_FEEDBACK_SHIFT 4
++
++#define _PSB_FENCE_TYPE_EXE (1 << _PSB_FENCE_EXE_SHIFT)
++#define _PSB_FENCE_TYPE_FEEDBACK (1 << _PSB_FENCE_FEEDBACK_SHIFT)
++
++#define PSB_NUM_ENGINES 6
++
++
++#define PSB_FEEDBACK_OP_VISTEST (1 << 0)
++
++struct drm_psb_extension_rep {
++ int32_t exists;
++ uint32_t driver_ioctl_offset;
++ uint32_t sarea_offset;
++ uint32_t major;
++ uint32_t minor;
++ uint32_t pl;
++};
++
++#define DRM_PSB_EXT_NAME_LEN 128
++
++union drm_psb_extension_arg {
++ char extension[DRM_PSB_EXT_NAME_LEN];
++ struct drm_psb_extension_rep rep;
++};
++
++struct psb_validate_req {
++ uint64_t set_flags;
++ uint64_t clear_flags;
++ uint64_t next;
++ uint64_t presumed_gpu_offset;
++ uint32_t buffer_handle;
++ uint32_t presumed_flags;
++ uint32_t group;
++ uint32_t pad64;
++};
++
++struct psb_validate_rep {
++ uint64_t gpu_offset;
++ uint32_t placement;
++ uint32_t fence_type_mask;
++};
++
++#define PSB_USE_PRESUMED (1 << 0)
++
++struct psb_validate_arg {
++ int handled;
++ int ret;
++ union {
++ struct psb_validate_req req;
++ struct psb_validate_rep rep;
++ } d;
++};
++
++
++#define DRM_PSB_FENCE_NO_USER (1 << 0)
++
++struct psb_ttm_fence_rep {
++ uint32_t handle;
++ uint32_t fence_class;
++ uint32_t fence_type;
++ uint32_t signaled_types;
++ uint32_t error;
++};
++
++typedef struct drm_psb_cmdbuf_arg {
++ uint64_t buffer_list; /* List of buffers to validate */
++ uint64_t clip_rects; /* See i915 counterpart */
++ uint64_t scene_arg;
++ uint64_t fence_arg;
++
++ uint32_t ta_flags;
++
++ uint32_t ta_handle; /* TA reg-value pairs */
++ uint32_t ta_offset;
++ uint32_t ta_size;
++
++ uint32_t oom_handle;
++ uint32_t oom_offset;
++ uint32_t oom_size;
++
++ uint32_t cmdbuf_handle; /* 2D Command buffer object or, */
++ uint32_t cmdbuf_offset; /* rasterizer reg-value pairs */
++ uint32_t cmdbuf_size;
++
++ uint32_t reloc_handle; /* Reloc buffer object */
++ uint32_t reloc_offset;
++ uint32_t num_relocs;
++
++ int32_t damage; /* Damage front buffer with cliprects */
++ /* Not implemented yet */
++ uint32_t fence_flags;
++ uint32_t engine;
++
++ /*
++ * Feedback;
++ */
++
++ uint32_t feedback_ops;
++ uint32_t feedback_handle;
++ uint32_t feedback_offset;
++ uint32_t feedback_breakpoints;
++ uint32_t feedback_size;
++} drm_psb_cmdbuf_arg_t;
++
++typedef struct drm_psb_pageflip_arg {
++ uint32_t flip_offset;
++ uint32_t stride;
++} drm_psb_pageflip_arg_t;
++
++typedef enum {
++ LNC_VIDEO_DEVICE_INFO,
++ LNC_VIDEO_GETPARAM_RAR_INFO,
++ LNC_VIDEO_GETPARAM_CI_INFO,
++ LNC_VIDEO_GETPARAM_RAR_HANDLER_OFFSET,
++ LNC_VIDEO_FRAME_SKIP
++} lnc_getparam_key_t;
++
++struct drm_lnc_video_getparam_arg {
++ lnc_getparam_key_t key;
++ uint64_t arg; /* argument pointer */
++ uint64_t value; /* feed back pointer */
++};
++
++
++/*
++ * Feedback components:
++ */
++
++/*
++ * Vistest component. The number of these in the feedback buffer
++ * equals the number of vistest breakpoints + 1.
++ * This is currently the only feedback component.
++ */
++
++struct drm_psb_vistest {
++ uint32_t vt[8];
++};
++
++struct drm_psb_sizes_arg {
++ uint32_t ta_mem_size;
++ uint32_t mmu_size;
++ uint32_t pds_size;
++ uint32_t rastgeom_size;
++ uint32_t tt_size;
++ uint32_t vram_size;
++};
++
++struct drm_psb_hist_status_arg {
++ uint32_t buf[32];
++};
++
++struct drm_psb_dpst_lut_arg {
++ uint8_t lut[256];
++ int output_id;
++};
++
++struct mrst_timing_info {
++ uint16_t pixel_clock;
++ uint8_t hactive_lo;
++ uint8_t hblank_lo;
++ uint8_t hblank_hi:4;
++ uint8_t hactive_hi:4;
++ uint8_t vactive_lo;
++ uint8_t vblank_lo;
++ uint8_t vblank_hi:4;
++ uint8_t vactive_hi:4;
++ uint8_t hsync_offset_lo;
++ uint8_t hsync_pulse_width_lo;
++ uint8_t vsync_pulse_width_lo:4;
++ uint8_t vsync_offset_lo:4;
++ uint8_t vsync_pulse_width_hi:2;
++ uint8_t vsync_offset_hi:2;
++ uint8_t hsync_pulse_width_hi:2;
++ uint8_t hsync_offset_hi:2;
++ uint8_t width_mm_lo;
++ uint8_t height_mm_lo;
++ uint8_t height_mm_hi:4;
++ uint8_t width_mm_hi:4;
++ uint8_t hborder;
++ uint8_t vborder;
++ uint8_t unknown0:1;
++ uint8_t hsync_positive:1;
++ uint8_t vsync_positive:1;
++ uint8_t separate_sync:2;
++ uint8_t stereo:1;
++ uint8_t unknown6:1;
++ uint8_t interlaced:1;
++} __attribute__((packed));
++
++struct mrst_panel_descriptor_v1{
++ uint32_t Panel_Port_Control; /* 1 dword, Register 0x61180 if LVDS */
++ /* 0x61190 if MIPI */
++ uint32_t Panel_Power_On_Sequencing;/*1 dword,Register 0x61208,*/
++ uint32_t Panel_Power_Off_Sequencing;/*1 dword,Register 0x6120C,*/
++ uint32_t Panel_Power_Cycle_Delay_and_Reference_Divisor;/* 1 dword */
++ /* Register 0x61210 */
++ struct mrst_timing_info DTD;/*18 bytes, Standard definition */
++ uint16_t Panel_Backlight_Inverter_Descriptor;/* 16 bits, as follows */
++ /* Bit 0, Frequency, 15 bits,0 - 32767Hz */
++ /* Bit 15, Polarity, 1 bit, 0: Normal, 1: Inverted */
++ uint16_t Panel_MIPI_Display_Descriptor;
++ /*16 bits, Defined as follows: */
++ /* if MIPI, 0x0000 if LVDS */
++ /* Bit 0, Type, 2 bits, */
++ /* 0: Type-1, */
++ /* 1: Type-2, */
++ /* 2: Type-3, */
++ /* 3: Type-4 */
++ /* Bit 2, Pixel Format, 4 bits */
++ /* Bit0: 16bpp (not supported in LNC), */
++ /* Bit1: 18bpp loosely packed, */
++ /* Bit2: 18bpp packed, */
++ /* Bit3: 24bpp */
++ /* Bit 6, Reserved, 2 bits, 00b */
++ /* Bit 8, Minimum Supported Frame Rate, 6 bits, 0 - 63Hz */
++ /* Bit 14, Reserved, 2 bits, 00b */
++} __attribute__ ((packed));
++
++struct mrst_panel_descriptor_v2{
++ uint32_t Panel_Port_Control; /* 1 dword, Register 0x61180 if LVDS */
++ /* 0x61190 if MIPI */
++ uint32_t Panel_Power_On_Sequencing;/*1 dword,Register 0x61208,*/
++ uint32_t Panel_Power_Off_Sequencing;/*1 dword,Register 0x6120C,*/
++ uint8_t Panel_Power_Cycle_Delay_and_Reference_Divisor;/* 1 byte */
++ /* Register 0x61210 */
++ struct mrst_timing_info DTD;/*18 bytes, Standard definition */
++ uint16_t Panel_Backlight_Inverter_Descriptor;/*16 bits, as follows*/
++ /*Bit 0, Frequency, 16 bits, 0 - 32767Hz*/
++ uint8_t Panel_Initial_Brightness;/* [7:0] 0 - 100% */
++ /*Bit 7, Polarity, 1 bit,0: Normal, 1: Inverted*/
++ uint16_t Panel_MIPI_Display_Descriptor;
++ /*16 bits, Defined as follows: */
++ /* if MIPI, 0x0000 if LVDS */
++ /* Bit 0, Type, 2 bits, */
++ /* 0: Type-1, */
++ /* 1: Type-2, */
++ /* 2: Type-3, */
++ /* 3: Type-4 */
++ /* Bit 2, Pixel Format, 4 bits */
++ /* Bit0: 16bpp (not supported in LNC), */
++ /* Bit1: 18bpp loosely packed, */
++ /* Bit2: 18bpp packed, */
++ /* Bit3: 24bpp */
++ /* Bit 6, Reserved, 2 bits, 00b */
++ /* Bit 8, Minimum Supported Frame Rate, 6 bits, 0 - 63Hz */
++ /* Bit 14, Reserved, 2 bits, 00b */
++} __attribute__ ((packed));
++
++union mrst_panel_rx{
++ struct{
++ uint16_t NumberOfLanes:2; /*Num of Lanes, 2 bits,0 = 1 lane,*/
++ /* 1 = 2 lanes, 2 = 3 lanes, 3 = 4 lanes. */
++ uint16_t MaxLaneFreq:3; /* 0: 100MHz, 1: 200MHz, 2: 300MHz, */
++ /*3: 400MHz, 4: 500MHz, 5: 600MHz, 6: 700MHz, 7: 800MHz.*/
++ uint16_t SupportedVideoTransferMode:2; /*0: Non-burst only */
++ /* 1: Burst and non-burst */
++ /* 2/3: Reserved */
++ uint16_t HSClkBehavior:1; /*0: Continuous, 1: Non-continuous*/
++ uint16_t DuoDisplaySupport:1; /*1 bit,0: No, 1: Yes*/
++ uint16_t ECC_ChecksumCapabilities:1;/*1 bit,0: No, 1: Yes*/
++ uint16_t BidirectionalCommunication:1;/*1 bit,0: No, 1: Yes */
++ uint16_t Rsvd:5;/*5 bits,00000b */
++ } panelrx;
++ uint16_t panel_receiver;
++} __attribute__ ((packed));
++
++struct gct_ioctl_arg{
++ uint8_t bpi; /* boot panel index, number of panel used during boot */
++ uint8_t pt; /* panel type, 4 bit field, 0=lvds, 1=mipi */
++ struct mrst_timing_info DTD; /* timing info for the selected panel */
++ uint32_t Panel_Port_Control;
++ uint32_t PP_On_Sequencing;/*1 dword,Register 0x61208,*/
++ uint32_t PP_Off_Sequencing;/*1 dword,Register 0x6120C,*/
++ uint32_t PP_Cycle_Delay;
++ uint16_t Panel_Backlight_Inverter_Descriptor;
++ uint16_t Panel_MIPI_Display_Descriptor;
++} __attribute__ ((packed));
++
++struct mrst_vbt{
++ char Signature[4]; /*4 bytes,"$GCT" */
++ uint8_t Revision; /*1 byte */
++ uint8_t Size; /*1 byte */
++ uint8_t Checksum; /*1 byte,Calculated*/
++ void *mrst_gct;
++} __attribute__ ((packed));
++
++struct mrst_gct_v1{ /* expect this table to change per customer request*/
++ union{ /*8 bits,Defined as follows: */
++ struct{
++ uint8_t PanelType:4; /*4 bits, Bit field for panels*/
++ /* 0 - 3: 0 = LVDS, 1 = MIPI*/
++ /*2 bits,Specifies which of the*/
++ uint8_t BootPanelIndex:2;
++ /* 4 panels to use by default*/
++ uint8_t BootMIPI_DSI_RxIndex:2;/*Specifies which of*/
++ /* the 4 MIPI DSI receivers to use*/
++ } PD;
++ uint8_t PanelDescriptor;
++ };
++ struct mrst_panel_descriptor_v1 panel[4];/*panel descrs,38 bytes each*/
++ union mrst_panel_rx panelrx[4]; /* panel receivers*/
++} __attribute__ ((packed));
++
++struct mrst_gct_v2{ /* expect this table to change per customer request*/
++ union{ /*8 bits,Defined as follows: */
++ struct{
++ uint8_t PanelType:4; /*4 bits, Bit field for panels*/
++ /* 0 - 3: 0 = LVDS, 1 = MIPI*/
++ /*2 bits,Specifies which of the*/
++ uint8_t BootPanelIndex:2;
++ /* 4 panels to use by default*/
++ uint8_t BootMIPI_DSI_RxIndex:2;/*Specifies which of*/
++ /* the 4 MIPI DSI receivers to use*/
++ } PD;
++ uint8_t PanelDescriptor;
++ };
++ struct mrst_panel_descriptor_v2 panel[4];/*panel descrs,38 bytes each*/
++ union mrst_panel_rx panelrx[4]; /* panel receivers*/
++} __attribute__ ((packed));
++
++#define PSB_DC_CRTC_SAVE 0x01
++#define PSB_DC_CRTC_RESTORE 0x02
++#define PSB_DC_OUTPUT_SAVE 0x04
++#define PSB_DC_OUTPUT_RESTORE 0x08
++#define PSB_DC_CRTC_MASK 0x03
++#define PSB_DC_OUTPUT_MASK 0x0C
++
++struct drm_psb_dc_state_arg {
++ uint32_t flags;
++ uint32_t obj_id;
++};
++
++struct drm_psb_mode_operation_arg {
++ uint32_t obj_id;
++ uint16_t operation;
++ struct drm_mode_modeinfo mode;
++ void *data;
++};
++
++struct drm_psb_stolen_memory_arg {
++ uint32_t base;
++ uint32_t size;
++};
++
++/*Display Register Bits*/
++#define REGRWBITS_PFIT_CONTROLS (1 << 0)
++#define REGRWBITS_PFIT_AUTOSCALE_RATIOS (1 << 1)
++#define REGRWBITS_PFIT_PROGRAMMED_SCALE_RATIOS (1 << 2)
++#define REGRWBITS_PIPEASRC (1 << 3)
++#define REGRWBITS_PIPEBSRC (1 << 4)
++#define REGRWBITS_VTOTAL_A (1 << 5)
++#define REGRWBITS_VTOTAL_B (1 << 6)
++
++/*Overlay Register Bits*/
++#define OV_REGRWBITS_OVADD (1 << 0)
++#define OV_REGRWBITS_OGAM_ALL (1 << 1)
++
++struct drm_psb_register_rw_arg {
++ uint32_t b_force_hw_on;
++
++ uint32_t display_read_mask;
++ uint32_t display_write_mask;
++
++ struct {
++ uint32_t pfit_controls;
++ uint32_t pfit_autoscale_ratios;
++ uint32_t pfit_programmed_scale_ratios;
++ uint32_t pipeasrc;
++ uint32_t pipebsrc;
++ uint32_t vtotal_a;
++ uint32_t vtotal_b;
++ } display;
++
++ uint32_t overlay_read_mask;
++ uint32_t overlay_write_mask;
++
++ struct {
++ uint32_t OVADD;
++ uint32_t OGAMC0;
++ uint32_t OGAMC1;
++ uint32_t OGAMC2;
++ uint32_t OGAMC3;
++ uint32_t OGAMC4;
++ uint32_t OGAMC5;
++ } overlay;
++
++ uint32_t sprite_enable_mask;
++ uint32_t sprite_disable_mask;
++
++ struct {
++ uint32_t dspa_control;
++ uint32_t dspa_key_value;
++ uint32_t dspa_key_mask;
++ uint32_t dspc_control;
++ uint32_t dspc_stride;
++ uint32_t dspc_position;
++ uint32_t dspc_linear_offset;
++ uint32_t dspc_size;
++ uint32_t dspc_surface;
++ } sprite;
++};
++
++struct psb_gtt_mapping_arg {
++ void *hKernelMemInfo;
++ uint32_t offset_pages;
++};
++
++struct drm_psb_getpageaddrs_arg {
++ uint32_t handle;
++ unsigned long *page_addrs;
++ unsigned long gtt_offset;
++};
++
++
++/* Controlling the kernel modesetting buffers */
++
++#define DRM_PSB_KMS_OFF 0x00
++#define DRM_PSB_KMS_ON 0x01
++#define DRM_PSB_VT_LEAVE 0x02
++#define DRM_PSB_VT_ENTER 0x03
++#define DRM_PSB_EXTENSION 0x06
++#define DRM_PSB_SIZES 0x07
++#define DRM_PSB_FUSE_REG 0x08
++#define DRM_PSB_VBT 0x09
++#define DRM_PSB_DC_STATE 0x0A
++#define DRM_PSB_ADB 0x0B
++#define DRM_PSB_MODE_OPERATION 0x0C
++#define DRM_PSB_STOLEN_MEMORY 0x0D
++#define DRM_PSB_REGISTER_RW 0x0E
++#define DRM_PSB_GTT_MAP 0x0F
++#define DRM_PSB_GTT_UNMAP 0x10
++#define DRM_PSB_GETPAGEADDRS 0x11
++/**
++ * NOTE: Add new commands here, but increment
++ * the values below and increment their
++ * corresponding defines where they're
++ * defined elsewhere.
++ */
++#define DRM_PVR_RESERVED1 0x12
++#define DRM_PVR_RESERVED2 0x13
++#define DRM_PVR_RESERVED3 0x14
++#define DRM_PVR_RESERVED4 0x15
++#define DRM_PVR_RESERVED5 0x16
++
++#define DRM_PSB_HIST_ENABLE 0x17
++#define DRM_PSB_HIST_STATUS 0x18
++#define DRM_PSB_UPDATE_GUARD 0x19
++#define DRM_PSB_INIT_COMM 0x1A
++#define DRM_PSB_DPST 0x1B
++#define DRM_PSB_GAMMA 0x1C
++#define DRM_PSB_DPST_BL 0x1D
++
++#define DRM_PVR_RESERVED6 0x1E
++
++#define DRM_PSB_GET_PIPE_FROM_CRTC_ID 0x1F
++
++struct drm_psb_dev_info_arg {
++ uint32_t num_use_attribute_registers;
++};
++#define DRM_PSB_DEVINFO 0x01
++
++#define PSB_MODE_OPERATION_MODE_VALID 0x01
++#define PSB_MODE_OPERATION_SET_DC_BASE 0x02
++
++struct drm_psb_get_pipe_from_crtc_id_arg {
++ /** ID of CRTC being requested **/
++ uint32_t crtc_id;
++
++ /** pipe of requested CRTC **/
++ uint32_t pipe;
++};
++
++#endif
+diff --git a/drivers/gpu/drm/mrst/drv/psb_drv.c b/drivers/gpu/drm/mrst/drv/psb_drv.c
+new file mode 100644
+index 0000000..dbc4327
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/psb_drv.c
+@@ -0,0 +1,2218 @@
++/**************************************************************************
++ * Copyright (c) 2007, Intel Corporation.
++ * All Rights Reserved.
++ * Copyright (c) 2008, Tungsten Graphics, Inc. Cedar Park, TX., USA.
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++
++#include <drm/drmP.h>
++#include <drm/drm.h>
++#include "psb_drm.h"
++#include "psb_drv.h"
++#include "psb_fb.h"
++#include "psb_reg.h"
++#include "psb_intel_reg.h"
++#include "psb_intel_bios.h"
++#include "psb_msvdx.h"
++#include "lnc_topaz.h"
++#include <drm/drm_pciids.h>
++#include "ospm_power.h"
++#include "pvr_drm_shared.h"
++#include "img_types.h"
++#include <linux/cpu.h>
++#include <linux/notifier.h>
++#include <linux/spinlock.h>
++#include <linux/rar/rar_register.h>
++#include <linux/rar/memrar.h>
++
++/*IMG headers*/
++#include "pvr_drm_shared.h"
++#include "img_types.h"
++#include "pvr_bridge.h"
++#include "linkage.h"
++#include "sysirq.h"
++
++int drm_psb_debug;
++EXPORT_SYMBOL(drm_psb_debug);
++static int drm_psb_trap_pagefaults;
++
++int drm_psb_no_fb;
++int drm_psb_force_pipeb;
++int drm_idle_check_interval = 5;
++int drm_msvdx_pmpolicy = PSB_PMPOLICY_POWERDOWN;
++int drm_topaz_pmpolicy = PSB_PMPOLICY_NOPM;
++int drm_topaz_sbuswa;
++int drm_psb_ospm = 1;
++
++static int psb_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
++
++MODULE_PARM_DESC(debug, "Enable debug output");
++MODULE_PARM_DESC(no_fb, "Disable FBdev");
++MODULE_PARM_DESC(trap_pagefaults, "Error and reset on MMU pagefaults");
++MODULE_PARM_DESC(disable_vsync, "Disable vsync interrupts");
++MODULE_PARM_DESC(force_pipeb, "Forces PIPEB to become primary fb");
++MODULE_PARM_DESC(ta_mem_size, "TA memory size in kiB");
++MODULE_PARM_DESC(ospm, "switch for ospm support");
++MODULE_PARM_DESC(msvdx_pmpolicy, "msvdx power management policy btw frames");
++MODULE_PARM_DESC(topaz_pmpolicy, "topaz power managerment policy btw frames");
++MODULE_PARM_DESC(topaz_sbuswa, "WA for topaz sysbus write");
++module_param_named(debug, drm_psb_debug, int, 0600);
++module_param_named(no_fb, drm_psb_no_fb, int, 0600);
++module_param_named(trap_pagefaults, drm_psb_trap_pagefaults, int, 0600);
++module_param_named(force_pipeb, drm_psb_force_pipeb, int, 0600);
++module_param_named(msvdx_pmpolicy, drm_msvdx_pmpolicy, int, 0600);
++module_param_named(topaz_pmpolicy, drm_topaz_pmpolicy, int, 0600);
++module_param_named(topaz_sbuswa, drm_topaz_sbuswa, int, 0600);
++module_param_named(ospm, drm_psb_ospm, int, 0600);
++
++#if 0
++#ifndef CONFIG_X86_PAT
++#warning "Don't build this driver without PAT support!!!"
++#endif
++#endif
++#define psb_PCI_IDS \
++ {0x8086, 0x8108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PSB_8108}, \
++ {0x8086, 0x8109, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PSB_8109}, \
++ {0x8086, 0x4100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
++ {0x8086, 0x4101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
++ {0x8086, 0x4102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
++ {0x8086, 0x4103, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
++ {0x8086, 0x4104, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
++ {0x8086, 0x4105, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
++ {0x8086, 0x4106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
++ {0x8086, 0x4107, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
++ {0, 0, 0}
++
++static struct pci_device_id pciidlist[] = {
++ psb_PCI_IDS
++};
++
++/*
++ * Standard IOCTLs.
++ */
++
++#define DRM_IOCTL_PSB_KMS_OFF \
++ DRM_IO(DRM_PSB_KMS_OFF + DRM_COMMAND_BASE)
++#define DRM_IOCTL_PSB_KMS_ON \
++ DRM_IO(DRM_PSB_KMS_ON + DRM_COMMAND_BASE)
++#define DRM_IOCTL_PSB_VT_LEAVE \
++ DRM_IO(DRM_PSB_VT_LEAVE + DRM_COMMAND_BASE)
++#define DRM_IOCTL_PSB_VT_ENTER \
++ DRM_IO(DRM_PSB_VT_ENTER + DRM_COMMAND_BASE)
++#define DRM_IOCTL_PSB_EXTENSION \
++ DRM_IOWR(DRM_PSB_EXTENSION + DRM_COMMAND_BASE, \
++ union drm_psb_extension_arg)
++#define DRM_IOCTL_PSB_SIZES \
++ DRM_IOR(DRM_PSB_SIZES + DRM_COMMAND_BASE, \
++ struct drm_psb_sizes_arg)
++#define DRM_IOCTL_PSB_FUSE_REG \
++ DRM_IOWR(DRM_PSB_FUSE_REG + DRM_COMMAND_BASE, uint32_t)
++#define DRM_IOCTL_PSB_VBT \
++ DRM_IOWR(DRM_PSB_VBT + DRM_COMMAND_BASE, \
++ struct gct_ioctl_arg)
++#define DRM_IOCTL_PSB_DC_STATE \
++ DRM_IOW(DRM_PSB_DC_STATE + DRM_COMMAND_BASE, \
++ struct drm_psb_dc_state_arg)
++#define DRM_IOCTL_PSB_ADB \
++ DRM_IOWR(DRM_PSB_ADB + DRM_COMMAND_BASE, uint32_t)
++#define DRM_IOCTL_PSB_MODE_OPERATION \
++ DRM_IOWR(DRM_PSB_MODE_OPERATION + DRM_COMMAND_BASE, \
++ struct drm_psb_mode_operation_arg)
++#define DRM_IOCTL_PSB_STOLEN_MEMORY \
++ DRM_IOWR(DRM_PSB_STOLEN_MEMORY + DRM_COMMAND_BASE, \
++ struct drm_psb_stolen_memory_arg)
++#define DRM_IOCTL_PSB_REGISTER_RW \
++ DRM_IOWR(DRM_PSB_REGISTER_RW + DRM_COMMAND_BASE, \
++ struct drm_psb_register_rw_arg)
++#define DRM_IOCTL_PSB_GTT_MAP \
++ DRM_IOWR(DRM_PSB_GTT_MAP + DRM_COMMAND_BASE, \
++ struct psb_gtt_mapping_arg)
++#define DRM_IOCTL_PSB_GTT_UNMAP \
++ DRM_IOW(DRM_PSB_GTT_UNMAP + DRM_COMMAND_BASE, \
++ struct psb_gtt_mapping_arg)
++#define DRM_IOCTL_PSB_GETPAGEADDRS \
++ DRM_IOWR(DRM_COMMAND_BASE + DRM_PSB_GETPAGEADDRS,\
++ struct drm_psb_getpageaddrs_arg)
++#define DRM_IOCTL_PSB_HIST_ENABLE \
++ DRM_IOWR(DRM_PSB_HIST_ENABLE + DRM_COMMAND_BASE, \
++ uint32_t)
++#define DRM_IOCTL_PSB_HIST_STATUS \
++ DRM_IOWR(DRM_PSB_HIST_STATUS + DRM_COMMAND_BASE, \
++ struct drm_psb_hist_status_arg)
++#define DRM_IOCTL_PSB_UPDATE_GUARD \
++ DRM_IOWR(DRM_PSB_UPDATE_GUARD + DRM_COMMAND_BASE, \
++ uint32_t)
++#define DRM_IOCTL_PSB_INIT_COMM \
++ DRM_IOWR(DRM_PSB_INIT_COMM + DRM_COMMAND_BASE, \
++ uint32_t)
++#define DRM_IOCTL_PSB_DPST \
++ DRM_IOWR(DRM_PSB_DPST + DRM_COMMAND_BASE, \
++ uint32_t)
++#define DRM_IOCTL_PSB_GAMMA \
++ DRM_IOWR(DRM_PSB_GAMMA + DRM_COMMAND_BASE, \
++ struct drm_psb_dpst_lut_arg)
++#define DRM_IOCTL_PSB_DPST_BL \
++ DRM_IOWR(DRM_PSB_DPST_BL + DRM_COMMAND_BASE, \
++ uint32_t)
++#define DRM_IOCTL_PSB_GET_PIPE_FROM_CRTC_ID \
++ DRM_IOWR(DRM_PSB_GET_PIPE_FROM_CRTC_ID + DRM_COMMAND_BASE, \
++ struct drm_psb_get_pipe_from_crtc_id_arg)
++
++
++/*pvr ioctls*/
++#define PVR_DRM_SRVKM_IOCTL \
++ DRM_IOW(DRM_COMMAND_BASE + PVR_DRM_SRVKM_CMD, \
++ PVRSRV_BRIDGE_PACKAGE)
++#define PVR_DRM_DISP_IOCTL \
++ DRM_IO(DRM_COMMAND_BASE + PVR_DRM_DISP_CMD)
++#define PVR_DRM_BC_IOCTL \
++ DRM_IO(DRM_COMMAND_BASE + PVR_DRM_BC_CMD)
++#define PVR_DRM_IS_MASTER_IOCTL \
++ DRM_IO(DRM_COMMAND_BASE + PVR_DRM_IS_MASTER_CMD)
++#define PVR_DRM_UNPRIV_IOCTL \
++ DRM_IOWR(DRM_COMMAND_BASE + PVR_DRM_UNPRIV_CMD, \
++ IMG_UINT32)
++#define PVR_DRM_DBGDRV_IOCTL \
++ DRM_IO(DRM_COMMAND_BASE + PVR_DRM_DBGDRV_CMD)
++
++/*
++ * TTM execbuf extension.
++ */
++#if defined(PDUMP)
++#define DRM_PSB_CMDBUF (PVR_DRM_DBGDRV_CMD + 1)
++#else
++#define DRM_PSB_CMDBUF (DRM_PSB_DPST_BL + 1)
++#endif
++
++#define DRM_PSB_SCENE_UNREF (DRM_PSB_CMDBUF + 1)
++#define DRM_IOCTL_PSB_CMDBUF \
++ DRM_IOW(DRM_PSB_CMDBUF + DRM_COMMAND_BASE, \
++ struct drm_psb_cmdbuf_arg)
++#define DRM_IOCTL_PSB_SCENE_UNREF \
++ DRM_IOW(DRM_PSB_SCENE_UNREF + DRM_COMMAND_BASE, \
++ struct drm_psb_scene)
++#define DRM_IOCTL_PSB_KMS_OFF DRM_IO(DRM_PSB_KMS_OFF + DRM_COMMAND_BASE)
++#define DRM_IOCTL_PSB_KMS_ON DRM_IO(DRM_PSB_KMS_ON + DRM_COMMAND_BASE)
++#define DRM_IOCTL_PSB_EXTENSION \
++ DRM_IOWR(DRM_PSB_EXTENSION + DRM_COMMAND_BASE, \
++ union drm_psb_extension_arg)
++/*
++ * TTM placement user extension.
++ */
++
++#define DRM_PSB_PLACEMENT_OFFSET (DRM_PSB_SCENE_UNREF + 1)
++
++#define DRM_PSB_TTM_PL_CREATE (TTM_PL_CREATE + DRM_PSB_PLACEMENT_OFFSET)
++#define DRM_PSB_TTM_PL_REFERENCE (TTM_PL_REFERENCE + DRM_PSB_PLACEMENT_OFFSET)
++#define DRM_PSB_TTM_PL_UNREF (TTM_PL_UNREF + DRM_PSB_PLACEMENT_OFFSET)
++#define DRM_PSB_TTM_PL_SYNCCPU (TTM_PL_SYNCCPU + DRM_PSB_PLACEMENT_OFFSET)
++#define DRM_PSB_TTM_PL_WAITIDLE (TTM_PL_WAITIDLE + DRM_PSB_PLACEMENT_OFFSET)
++#define DRM_PSB_TTM_PL_SETSTATUS (TTM_PL_SETSTATUS + DRM_PSB_PLACEMENT_OFFSET)
++
++/*
++ * TTM fence extension.
++ */
++
++#define DRM_PSB_FENCE_OFFSET (DRM_PSB_TTM_PL_SETSTATUS + 1)
++#define DRM_PSB_TTM_FENCE_SIGNALED (TTM_FENCE_SIGNALED + DRM_PSB_FENCE_OFFSET)
++#define DRM_PSB_TTM_FENCE_FINISH (TTM_FENCE_FINISH + DRM_PSB_FENCE_OFFSET)
++#define DRM_PSB_TTM_FENCE_UNREF (TTM_FENCE_UNREF + DRM_PSB_FENCE_OFFSET)
++
++#define DRM_PSB_FLIP (DRM_PSB_TTM_FENCE_UNREF + 1) /*20*/
++/* PSB video extension */
++#define DRM_LNC_VIDEO_GETPARAM (DRM_PSB_FLIP + 1)
++
++#define DRM_IOCTL_PSB_TTM_PL_CREATE \
++ DRM_IOWR(DRM_COMMAND_BASE + DRM_PSB_TTM_PL_CREATE,\
++ union ttm_pl_create_arg)
++#define DRM_IOCTL_PSB_TTM_PL_REFERENCE \
++ DRM_IOWR(DRM_COMMAND_BASE + DRM_PSB_TTM_PL_REFERENCE,\
++ union ttm_pl_reference_arg)
++#define DRM_IOCTL_PSB_TTM_PL_UNREF \
++ DRM_IOW(DRM_COMMAND_BASE + DRM_PSB_TTM_PL_UNREF,\
++ struct ttm_pl_reference_req)
++#define DRM_IOCTL_PSB_TTM_PL_SYNCCPU \
++ DRM_IOW(DRM_COMMAND_BASE + DRM_PSB_TTM_PL_SYNCCPU,\
++ struct ttm_pl_synccpu_arg)
++#define DRM_IOCTL_PSB_TTM_PL_WAITIDLE \
++ DRM_IOW(DRM_COMMAND_BASE + DRM_PSB_TTM_PL_WAITIDLE,\
++ struct ttm_pl_waitidle_arg)
++#define DRM_IOCTL_PSB_TTM_PL_SETSTATUS \
++ DRM_IOWR(DRM_COMMAND_BASE + DRM_PSB_TTM_PL_SETSTATUS,\
++ union ttm_pl_setstatus_arg)
++#define DRM_IOCTL_PSB_TTM_FENCE_SIGNALED \
++ DRM_IOWR(DRM_COMMAND_BASE + DRM_PSB_TTM_FENCE_SIGNALED, \
++ union ttm_fence_signaled_arg)
++#define DRM_IOCTL_PSB_TTM_FENCE_FINISH \
++ DRM_IOWR(DRM_COMMAND_BASE + DRM_PSB_TTM_FENCE_FINISH, \
++ union ttm_fence_finish_arg)
++#define DRM_IOCTL_PSB_TTM_FENCE_UNREF \
++ DRM_IOW(DRM_COMMAND_BASE + DRM_PSB_TTM_FENCE_UNREF, \
++ struct ttm_fence_unref_arg)
++#define DRM_IOCTL_PSB_FLIP \
++ DRM_IOWR(DRM_COMMAND_BASE + DRM_PSB_FLIP, \
++ struct drm_psb_pageflip_arg)
++#define DRM_IOCTL_LNC_VIDEO_GETPARAM \
++ DRM_IOWR(DRM_COMMAND_BASE + DRM_LNC_VIDEO_GETPARAM, \
++ struct drm_lnc_video_getparam_arg)
++
++static int psb_vt_leave_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++static int psb_vt_enter_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++static int psb_sizes_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++static int psb_fuse_reg_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++static int psb_vbt_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++static int psb_dc_state_ioctl(struct drm_device *dev, void * data,
++ struct drm_file *file_priv);
++static int psb_adb_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++static int psb_mode_operation_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++static int psb_stolen_memory_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++static int psb_register_rw_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++static int psb_hist_enable_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++static int psb_hist_status_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++static int psb_update_guard_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++static int psb_init_comm_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++static int psb_dpst_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++static int psb_gamma_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++static int psb_dpst_bl_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++
++#define PSB_IOCTL_DEF(ioctl, func, flags) \
++ [DRM_IOCTL_NR(ioctl) - DRM_COMMAND_BASE] = {ioctl, flags, func}
++
++static struct drm_ioctl_desc psb_ioctls[] = {
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_KMS_OFF, psbfb_kms_off_ioctl,
++ DRM_ROOT_ONLY),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_KMS_ON,
++ psbfb_kms_on_ioctl,
++ DRM_ROOT_ONLY),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_VT_LEAVE, psb_vt_leave_ioctl,
++ DRM_ROOT_ONLY),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_VT_ENTER,
++ psb_vt_enter_ioctl,
++ DRM_ROOT_ONLY),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_EXTENSION, psb_extension_ioctl, DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_SIZES, psb_sizes_ioctl, DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_FUSE_REG, psb_fuse_reg_ioctl, DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_VBT, psb_vbt_ioctl, DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_DC_STATE, psb_dc_state_ioctl, DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_ADB, psb_adb_ioctl, DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_MODE_OPERATION, psb_mode_operation_ioctl,
++ DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_STOLEN_MEMORY, psb_stolen_memory_ioctl,
++ DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_REGISTER_RW, psb_register_rw_ioctl,
++ DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_GTT_MAP,
++ psb_gtt_map_meminfo_ioctl,
++ DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_GTT_UNMAP,
++ psb_gtt_unmap_meminfo_ioctl,
++ DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_GETPAGEADDRS,
++ psb_getpageaddrs_ioctl,
++ DRM_AUTH),
++ PSB_IOCTL_DEF(PVR_DRM_SRVKM_IOCTL, PVRSRV_BridgeDispatchKM, 0),
++ PSB_IOCTL_DEF(PVR_DRM_DISP_IOCTL, PVRDRM_Dummy_ioctl, 0),
++ PSB_IOCTL_DEF(PVR_DRM_BC_IOCTL, PVRDRM_Dummy_ioctl, 0),
++ PSB_IOCTL_DEF(PVR_DRM_IS_MASTER_IOCTL, PVRDRMIsMaster, DRM_MASTER),
++ PSB_IOCTL_DEF(PVR_DRM_UNPRIV_IOCTL, PVRDRMUnprivCmd, 0),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_HIST_ENABLE,
++ psb_hist_enable_ioctl,
++ DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_HIST_STATUS,
++ psb_hist_status_ioctl,
++ DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_UPDATE_GUARD, psb_update_guard_ioctl, DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_INIT_COMM, psb_init_comm_ioctl, DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_DPST, psb_dpst_ioctl, DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_GAMMA, psb_gamma_ioctl, DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_DPST_BL, psb_dpst_bl_ioctl, DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_GET_PIPE_FROM_CRTC_ID, psb_intel_get_pipe_from_crtc_id, 0),
++#if defined(PDUMP)
++ PSB_IOCTL_DEF(PVR_DRM_DBGDRV_IOCTL, dbgdrv_ioctl, 0),
++#endif
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_CMDBUF, psb_cmdbuf_ioctl, DRM_AUTH),
++ /*to be removed later*/
++ /*PSB_IOCTL_DEF(DRM_IOCTL_PSB_SCENE_UNREF, drm_psb_scene_unref_ioctl,
++ DRM_AUTH),*/
++
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_PL_CREATE, psb_pl_create_ioctl,
++ DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_PL_REFERENCE, psb_pl_reference_ioctl,
++ DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_PL_UNREF, psb_pl_unref_ioctl,
++ DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_PL_SYNCCPU, psb_pl_synccpu_ioctl,
++ DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_PL_WAITIDLE, psb_pl_waitidle_ioctl,
++ DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_PL_SETSTATUS, psb_pl_setstatus_ioctl,
++ DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_FENCE_SIGNALED,
++ psb_fence_signaled_ioctl, DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_FENCE_FINISH, psb_fence_finish_ioctl,
++ DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_FENCE_UNREF, psb_fence_unref_ioctl,
++ DRM_AUTH),
++ /*to be removed later */
++ /*PSB_IOCTL_DEF(DRM_IOCTL_PSB_FLIP, psb_page_flip, DRM_AUTH),*/
++ PSB_IOCTL_DEF(DRM_IOCTL_LNC_VIDEO_GETPARAM,
++ lnc_video_getparam, DRM_AUTH)
++};
++
++static int psb_max_ioctl = DRM_ARRAY_SIZE(psb_ioctls);
++
++static void get_ci_info(struct drm_psb_private *dev_priv)
++{
++ struct pci_dev *pdev;
++
++ pdev = pci_get_subsys(0x8086, 0x080b, 0, 0, NULL);
++ if (pdev == NULL) {
++ /* IF no pci_device we set size & addr to 0, no ci
++ * share buffer can be created */
++ dev_priv->ci_region_start = 0;
++ dev_priv->ci_region_size = 0;
++ printk(KERN_ERR "can't find CI device, no ci share buffer\n");
++ return;
++ }
++
++ dev_priv->ci_region_start = pci_resource_start(pdev, 1);
++ dev_priv->ci_region_size = pci_resource_len(pdev, 1);
++
++ printk(KERN_INFO "ci_region_start %x ci_region_size %d\n",
++ dev_priv->ci_region_start, dev_priv->ci_region_size);
++
++ pci_dev_put(pdev);
++
++ return;
++}
++
++static void get_rar_info(struct drm_psb_private *dev_priv)
++{
++#if defined(CONFIG_RAR_REGISTER)
++ int ret;
++ u32 start_addr, end_addr;
++
++ dev_priv->rar_region_start = 0;
++ dev_priv->rar_region_size = 0;
++ end_addr = 0;
++ ret = 0;
++
++ ret = rar_get_address(RAR_TYPE_VIDEO, &start_addr,
++ &end_addr);
++ if (ret) {
++ printk(KERN_ERR "failed to get rar region info\n");
++ return;
++ }
++ dev_priv->rar_region_start = (uint32_t) start_addr;
++ if (!ret)
++ dev_priv->rar_region_size =
++ end_addr - dev_priv->rar_region_start + 1;
++
++#endif
++ return;
++}
++
++static void psb_set_uopt(struct drm_psb_uopt *uopt)
++{
++ return;
++}
++
++static void psb_lastclose(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++
++ return;
++
++ if (!dev->dev_private)
++ return;
++
++ mutex_lock(&dev_priv->cmdbuf_mutex);
++ if (dev_priv->context.buffers) {
++ vfree(dev_priv->context.buffers);
++ dev_priv->context.buffers = NULL;
++ }
++ mutex_unlock(&dev_priv->cmdbuf_mutex);
++}
++
++static void psb_do_takedown(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++ struct ttm_bo_device *bdev = &dev_priv->bdev;
++
++
++ if (dev_priv->have_mem_mmu) {
++ ttm_bo_clean_mm(bdev, DRM_PSB_MEM_MMU);
++ dev_priv->have_mem_mmu = 0;
++ }
++
++ if (dev_priv->have_tt) {
++ ttm_bo_clean_mm(bdev, TTM_PL_TT);
++ dev_priv->have_tt = 0;
++ }
++
++ if (dev_priv->have_camera) {
++ ttm_bo_clean_mm(bdev, TTM_PL_CI);
++ dev_priv->have_camera = 0;
++ }
++ if (dev_priv->have_rar) {
++ ttm_bo_clean_mm(bdev, TTM_PL_RAR);
++ dev_priv->have_rar = 0;
++ }
++
++ psb_msvdx_uninit(dev);
++
++ if (IS_MRST(dev))
++ if (!dev_priv->topaz_disabled)
++ lnc_topaz_uninit(dev);
++}
++
++#define FB_REG06 0xD0810600
++#define FB_TOPAZ_DISABLE BIT0
++#define PCI_ID_TOPAZ_DISABLED 0x4101
++#define FB_MIPI_DISABLE BIT11
++#define FB_REG09 0xD0810900
++#define FB_SKU_MASK (BIT12|BIT13|BIT14)
++#define FB_SKU_SHIFT 12
++#define FB_SKU_100 0
++#define FB_SKU_100L 1
++#define FB_SKU_83 2
++#if 1 /* FIXME remove it after PO */
++#define FB_GFX_CLK_DIVIDE_MASK (BIT20|BIT21|BIT22)
++#define FB_GFX_CLK_DIVIDE_SHIFT 20
++#define FB_VED_CLK_DIVIDE_MASK (BIT23|BIT24)
++#define FB_VED_CLK_DIVIDE_SHIFT 23
++#define FB_VEC_CLK_DIVIDE_MASK (BIT25|BIT26)
++#define FB_VEC_CLK_DIVIDE_SHIFT 25
++#endif /* FIXME remove it after PO */
++
++
++void mrst_get_fuse_settings(struct drm_psb_private *dev_priv)
++{
++ struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
++ uint32_t fuse_value = 0;
++ uint32_t fuse_value_tmp = 0;
++
++ pci_write_config_dword(pci_root, 0xD0, FB_REG06);
++ pci_read_config_dword(pci_root, 0xD4, &fuse_value);
++
++ dev_priv->iLVDS_enable = fuse_value & FB_MIPI_DISABLE;
++
++ DRM_INFO("internal display is %s\n",
++ dev_priv->iLVDS_enable ? "LVDS display" : "MIPI display");
++
++ if (dev_priv->dev->pci_device == PCI_ID_TOPAZ_DISABLED)
++ dev_priv->topaz_disabled = 1;
++ else
++ dev_priv->topaz_disabled = 0;
++
++ dev_priv->video_device_fuse = fuse_value;
++
++ DRM_INFO("topaz is %s\n",
++ dev_priv->topaz_disabled ? "disabled" : "enabled");
++
++ pci_write_config_dword(pci_root, 0xD0, FB_REG09);
++ pci_read_config_dword(pci_root, 0xD4, &fuse_value);
++
++ DRM_INFO("SKU values is 0x%x. \n", fuse_value);
++ fuse_value_tmp = (fuse_value & FB_SKU_MASK) >> FB_SKU_SHIFT;
++
++ dev_priv->fuse_reg_value = fuse_value;
++
++ switch (fuse_value_tmp) {
++ case FB_SKU_100:
++ DRM_INFO("SKU values is SKU_100. LNC core clk is 200MHz.\n");
++ dev_priv->sku_100 = true;
++ break;
++ case FB_SKU_100L:
++ DRM_INFO("SKU values is SKU_100L. LNC core clk is 100MHz.\n");
++ dev_priv->sku_100L = true;
++ break;
++ case FB_SKU_83:
++ DRM_INFO("SKU values is SKU_83. LNC core clk is 166MHz.\n");
++ dev_priv->sku_83 = true;
++ break;
++ default:
++ DRM_ERROR("Invalid SKU values, SKU value = 0x%08x\n",
++ fuse_value_tmp);
++ }
++
++#if 1 /* FIXME remove it after PO */
++ fuse_value_tmp =
++ (fuse_value & FB_GFX_CLK_DIVIDE_MASK) >> FB_GFX_CLK_DIVIDE_SHIFT;
++
++ switch (fuse_value_tmp) {
++ case 0:
++ DRM_INFO("Gfx clk : core clk = 1:1. \n");
++ break;
++ case 1:
++ DRM_INFO("Gfx clk : core clk = 4:3. \n");
++ break;
++ case 2:
++ DRM_INFO("Gfx clk : core clk = 8:5. \n");
++ break;
++ case 3:
++ DRM_INFO("Gfx clk : core clk = 2:1. \n");
++ break;
++ case 4:
++ DRM_INFO("Gfx clk : core clk = 16:7. \n");
++ break;
++ case 5:
++ DRM_INFO("Gfx clk : core clk = 8:3. \n");
++ break;
++ case 6:
++ DRM_INFO("Gfx clk : core clk = 16:5. \n");
++ break;
++ case 7:
++ DRM_INFO("Gfx clk : core clk = 4:1. \n");
++ break;
++ default:
++ DRM_ERROR("Invalid GFX CLK DIVIDE values, value = 0x%08x\n",
++ fuse_value_tmp);
++ }
++
++ fuse_value_tmp =
++ (fuse_value & FB_VED_CLK_DIVIDE_MASK) >> FB_VED_CLK_DIVIDE_SHIFT;
++
++ switch (fuse_value_tmp) {
++ case 0:
++ DRM_INFO("Ved clk : core clk = 1:1. \n");
++ break;
++ case 1:
++ DRM_INFO("Ved clk : core clk = 4:3. \n");
++ break;
++ case 2:
++ DRM_INFO("Ved clk : core clk = 8:5. \n");
++ break;
++ case 3:
++ DRM_INFO("Ved clk : core clk = 2:1. \n");
++ break;
++ default:
++ DRM_ERROR("Invalid VED CLK DIVIDE values, value = 0x%08x\n",
++ fuse_value_tmp);
++ }
++
++ fuse_value_tmp =
++ (fuse_value & FB_VEC_CLK_DIVIDE_MASK) >> FB_VEC_CLK_DIVIDE_SHIFT;
++
++ switch (fuse_value_tmp) {
++ case 0:
++ DRM_INFO("Vec clk : core clk = 1:1. \n");
++ break;
++ case 1:
++ DRM_INFO("Vec clk : core clk = 4:3. \n");
++ break;
++ case 2:
++ DRM_INFO("Vec clk : core clk = 8:5. \n");
++ break;
++ case 3:
++ DRM_INFO("Vec clk : core clk = 2:1. \n");
++ break;
++ default:
++ DRM_ERROR("Invalid VEC CLK DIVIDE values, value = 0x%08x\n",
++ fuse_value_tmp);
++ }
++#endif /* FIXME remove it after PO */
++
++ return;
++}
++
++bool mrst_get_vbt_data(struct drm_psb_private *dev_priv)
++{
++ struct mrst_vbt *pVBT = &dev_priv->vbt_data;
++ u32 platform_config_address;
++ u8 *pVBT_virtual;
++ u8 bpi;
++ void *pGCT;
++ struct pci_dev *pci_gfx_root = pci_get_bus_and_slot(0, PCI_DEVFN(2, 0));
++
++ /*get the address of the platform config vbt, B0:D2:F0;0xFC */
++ pci_read_config_dword(pci_gfx_root, 0xFC, &platform_config_address);
++ DRM_INFO("drm platform config address is %x\n",
++ platform_config_address);
++
++ /* check for platform config address == 0. */
++ /* this means fw doesn't support vbt */
++
++ if (platform_config_address == 0) {
++ pVBT->Size = 0;
++ return false;
++ }
++
++ /* get the virtual address of the vbt */
++ pVBT_virtual = ioremap(platform_config_address, sizeof(*pVBT));
++
++ memcpy(pVBT, pVBT_virtual, sizeof(*pVBT));
++ iounmap(pVBT_virtual); /* Free virtual address space */
++
++ printk(KERN_ALERT "GCT Revision is %x\n", pVBT->Revision);
++ pVBT->mrst_gct = NULL;
++ pVBT->mrst_gct = ioremap(platform_config_address + sizeof(*pVBT) - 4,
++ pVBT->Size - sizeof(*pVBT) + 4);
++ pGCT = pVBT->mrst_gct;
++
++ switch (pVBT->Revision) {
++ case 0:
++ bpi = ((struct mrst_gct_v1 *)pGCT)->PD.BootPanelIndex;
++ dev_priv->gct_data.bpi = bpi;
++ dev_priv->gct_data.pt =
++ ((struct mrst_gct_v1 *)pGCT)->PD.PanelType;
++ memcpy(&dev_priv->gct_data.DTD,
++ &((struct mrst_gct_v1 *)pGCT)->panel[bpi].DTD,
++ sizeof(struct mrst_timing_info));
++ dev_priv->gct_data.Panel_Port_Control =
++ ((struct mrst_gct_v1 *)pGCT)->panel[bpi].Panel_Port_Control;
++ dev_priv->gct_data.Panel_MIPI_Display_Descriptor =
++ ((struct mrst_gct_v1 *)pGCT)->panel[bpi].Panel_MIPI_Display_Descriptor;
++ break;
++ case 1:
++ bpi = ((struct mrst_gct_v2 *)pGCT)->PD.BootPanelIndex;
++ dev_priv->gct_data.bpi = bpi;
++ dev_priv->gct_data.pt =
++ ((struct mrst_gct_v2 *)pGCT)->PD.PanelType;
++ memcpy(&dev_priv->gct_data.DTD,
++ &((struct mrst_gct_v2 *)pGCT)->panel[bpi].DTD,
++ sizeof(struct mrst_timing_info));
++ dev_priv->gct_data.Panel_Port_Control =
++ ((struct mrst_gct_v2 *)pGCT)->panel[bpi].Panel_Port_Control;
++ dev_priv->gct_data.Panel_MIPI_Display_Descriptor =
++ ((struct mrst_gct_v2 *)pGCT)->panel[bpi].Panel_MIPI_Display_Descriptor;
++ break;
++ default:
++ printk(KERN_ALERT "Unknown revision of GCT!\n");
++ pVBT->Size = 0;
++ return false;
++ }
++
++ return true;
++}
++
++static int psb_do_init(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++ struct ttm_bo_device *bdev = &dev_priv->bdev;
++ struct psb_gtt *pg = dev_priv->pg;
++
++ uint32_t stolen_gtt;
++ uint32_t tt_start;
++ uint32_t tt_pages;
++
++ int ret = -ENOMEM;
++
++
++ /*
++ * Initialize sequence numbers for the different command
++ * submission mechanisms.
++ */
++
++ dev_priv->sequence[PSB_ENGINE_2D] = 0;
++ dev_priv->sequence[PSB_ENGINE_VIDEO] = 0;
++ dev_priv->sequence[LNC_ENGINE_ENCODE] = 0;
++
++ if (pg->mmu_gatt_start & 0x0FFFFFFF) {
++ DRM_ERROR("Gatt must be 256M aligned. This is a bug.\n");
++ ret = -EINVAL;
++ goto out_err;
++ }
++
++ stolen_gtt = (pg->stolen_size >> PAGE_SHIFT) * 4;
++ stolen_gtt = (stolen_gtt + PAGE_SIZE - 1) >> PAGE_SHIFT;
++ stolen_gtt =
++ (stolen_gtt < pg->gtt_pages) ? stolen_gtt : pg->gtt_pages;
++
++ dev_priv->gatt_free_offset = pg->mmu_gatt_start +
++ (stolen_gtt << PAGE_SHIFT) * 1024;
++
++ if (1 || drm_debug) {
++ uint32_t core_id = PSB_RSGX32(PSB_CR_CORE_ID);
++ uint32_t core_rev = PSB_RSGX32(PSB_CR_CORE_REVISION);
++ DRM_INFO("SGX core id = 0x%08x\n", core_id);
++ DRM_INFO("SGX core rev major = 0x%02x, minor = 0x%02x\n",
++ (core_rev & _PSB_CC_REVISION_MAJOR_MASK) >>
++ _PSB_CC_REVISION_MAJOR_SHIFT,
++ (core_rev & _PSB_CC_REVISION_MINOR_MASK) >>
++ _PSB_CC_REVISION_MINOR_SHIFT);
++ DRM_INFO
++ ("SGX core rev maintenance = 0x%02x, designer = 0x%02x\n",
++ (core_rev & _PSB_CC_REVISION_MAINTENANCE_MASK) >>
++ _PSB_CC_REVISION_MAINTENANCE_SHIFT,
++ (core_rev & _PSB_CC_REVISION_DESIGNER_MASK) >>
++ _PSB_CC_REVISION_DESIGNER_SHIFT);
++ }
++
++ spin_lock_init(&dev_priv->irqmask_lock);
++
++ tt_pages = (pg->gatt_pages < PSB_TT_PRIV0_PLIMIT) ?
++ pg->gatt_pages : PSB_TT_PRIV0_PLIMIT;
++ tt_start = dev_priv->gatt_free_offset - pg->mmu_gatt_start;
++ tt_pages -= tt_start >> PAGE_SHIFT;
++ dev_priv->sizes.ta_mem_size = 0;
++
++
++ if (IS_MRST(dev) &&
++ (dev_priv->ci_region_size != 0) &&
++ !ttm_bo_init_mm(bdev, TTM_PL_CI, pg->ci_start >> PAGE_SHIFT,
++ dev_priv->ci_region_size >> PAGE_SHIFT)) {
++ dev_priv->have_camera = 1;
++ }
++
++ /* since there is always rar region for video, it is ok */
++ if (IS_MRST(dev) &&
++ (dev_priv->rar_region_size != 0) &&
++ !ttm_bo_init_mm(bdev, TTM_PL_RAR, pg->rar_start >> PAGE_SHIFT,
++ dev_priv->rar_region_size >> PAGE_SHIFT)) {
++ dev_priv->have_rar = 1;
++ }
++
++ /* TT region managed by TTM. */
++ if (!ttm_bo_init_mm(bdev, TTM_PL_TT,
++ (pg->rar_start + dev_priv->rar_region_size) >> PAGE_SHIFT,
++ pg->gatt_pages -
++ (pg->ci_start >> PAGE_SHIFT) -
++ ((dev_priv->ci_region_size + dev_priv->rar_region_size)
++ >> PAGE_SHIFT))) {
++
++ dev_priv->have_tt = 1;
++ dev_priv->sizes.tt_size =
++ (tt_pages << PAGE_SHIFT) / (1024 * 1024) / 2;
++ }
++
++ if (!ttm_bo_init_mm(bdev,
++ DRM_PSB_MEM_MMU,
++ 0x00000000,
++ (pg->gatt_start - PSB_MEM_MMU_START) >> PAGE_SHIFT)) {
++ dev_priv->have_mem_mmu = 1;
++ dev_priv->sizes.mmu_size =
++ (pg->gatt_start - PSB_MEM_MMU_START) /
++ (1024*1024);
++ }
++
++
++ PSB_DEBUG_INIT("Init MSVDX\n");
++ psb_msvdx_init(dev);
++
++ if (IS_MRST(dev)) {
++ PSB_DEBUG_INIT("Init Topaz\n");
++ /* for sku100L and sku100M, VEC is disabled in fuses */
++ if (!dev_priv->topaz_disabled)
++ lnc_topaz_init(dev);
++ else
++ ospm_power_island_down(OSPM_VIDEO_ENC_ISLAND);
++ }
++
++ return 0;
++out_err:
++ psb_do_takedown(dev);
++ return ret;
++}
++
++static int psb_intel_opregion_init(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ /*struct psb_intel_opregion * opregion = &dev_priv->opregion;*/
++ u32 opregion_phy;
++ void *base;
++ u32 *lid_state;
++
++ dev_priv->lid_state = NULL;
++
++ pci_read_config_dword(dev->pdev, 0xfc, &opregion_phy);
++ if (opregion_phy == 0) {
++ DRM_DEBUG("Opregion not supported, won't support lid-switch\n");
++ return -ENOTSUPP;
++ }
++ DRM_DEBUG("OpRegion detected at 0x%8x\n", opregion_phy);
++
++ base = ioremap(opregion_phy, 8*1024);
++ if (!base)
++ return -ENOMEM;
++
++ lid_state = base + 0x01ac;
++
++ DRM_DEBUG("Lid switch state 0x%08x\n", *lid_state);
++
++ dev_priv->lid_state = lid_state;
++ dev_priv->lid_last_state = *lid_state;
++ return 0;
++}
++
++static int psb_driver_unload(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++
++ /*Fristly, unload pvr driver*/
++ PVRSRVDrmUnload(dev);
++
++ psb_backlight_exit(); /*writes minimum value to backlight HW reg */
++
++ if (drm_psb_no_fb == 0)
++ psb_modeset_cleanup(dev);
++
++ if (dev_priv) {
++ if (IS_POULSBO(dev))
++ psb_lid_timer_takedown(dev_priv);
++
++ /* psb_watchdog_takedown(dev_priv); */
++ psb_do_takedown(dev);
++
++
++ if (dev_priv->pf_pd) {
++ psb_mmu_free_pagedir(dev_priv->pf_pd);
++ dev_priv->pf_pd = NULL;
++ }
++ if (dev_priv->mmu) {
++ struct psb_gtt *pg = dev_priv->pg;
++
++ down_read(&pg->sem);
++ psb_mmu_remove_pfn_sequence(
++ psb_mmu_get_default_pd
++ (dev_priv->mmu),
++ pg->mmu_gatt_start,
++ pg->vram_stolen_size >> PAGE_SHIFT);
++ if (pg->ci_stolen_size != 0)
++ psb_mmu_remove_pfn_sequence(
++ psb_mmu_get_default_pd
++ (dev_priv->mmu),
++ pg->ci_start,
++ pg->ci_stolen_size >> PAGE_SHIFT);
++ if (pg->rar_stolen_size != 0)
++ psb_mmu_remove_pfn_sequence(
++ psb_mmu_get_default_pd
++ (dev_priv->mmu),
++ pg->rar_start,
++ pg->rar_stolen_size >> PAGE_SHIFT);
++ up_read(&pg->sem);
++ psb_mmu_driver_takedown(dev_priv->mmu);
++ dev_priv->mmu = NULL;
++ }
++ psb_gtt_takedown(dev_priv->pg, 1);
++ if (dev_priv->scratch_page) {
++ __free_page(dev_priv->scratch_page);
++ dev_priv->scratch_page = NULL;
++ }
++ if (dev_priv->has_bo_device) {
++ ttm_bo_device_release(&dev_priv->bdev);
++ dev_priv->has_bo_device = 0;
++ }
++ if (dev_priv->has_fence_device) {
++ ttm_fence_device_release(&dev_priv->fdev);
++ dev_priv->has_fence_device = 0;
++ }
++ if (dev_priv->vdc_reg) {
++ iounmap(dev_priv->vdc_reg);
++ dev_priv->vdc_reg = NULL;
++ }
++ if (dev_priv->sgx_reg) {
++ iounmap(dev_priv->sgx_reg);
++ dev_priv->sgx_reg = NULL;
++ }
++ if (dev_priv->msvdx_reg) {
++ iounmap(dev_priv->msvdx_reg);
++ dev_priv->msvdx_reg = NULL;
++ }
++
++ if (IS_MRST(dev)) {
++ if (dev_priv->topaz_reg) {
++ iounmap(dev_priv->topaz_reg);
++ dev_priv->topaz_reg = NULL;
++ }
++ }
++
++ if (dev_priv->tdev)
++ ttm_object_device_release(&dev_priv->tdev);
++
++ if (dev_priv->has_global)
++ psb_ttm_global_release(dev_priv);
++
++ kfree(dev_priv);
++ dev->dev_private = NULL;
++
++ /*destory VBT data*/
++ if (IS_POULSBO(dev))
++ psb_intel_destory_bios(dev);
++ }
++
++ ospm_power_uninit();
++
++ return 0;
++}
++
++
++static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
++{
++ struct drm_psb_private *dev_priv;
++ struct ttm_bo_device *bdev;
++ unsigned long resource_start;
++ struct psb_gtt *pg;
++ int ret = -ENOMEM;
++ uint32_t tt_pages;
++
++ DRM_INFO("psb - %s\n", PSB_PACKAGE_VERSION);
++
++ if (IS_MRST(dev))
++ DRM_INFO("Run drivers on Moorestown platform!\n");
++ else
++ DRM_INFO("Run drivers on Poulsbo platform!\n");
++
++ dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
++ if (dev_priv == NULL)
++ return -ENOMEM;
++
++ /*init DPST umcomm to NULL*/
++ dev_priv->psb_dpst_state = NULL;
++ dev_priv->psb_hotplug_state = NULL;
++
++ dev_priv->dev = dev;
++ bdev = &dev_priv->bdev;
++
++ ret = psb_ttm_global_init(dev_priv);
++ if (unlikely(ret != 0))
++ goto out_err;
++ dev_priv->has_global = 1;
++
++ dev_priv->tdev = ttm_object_device_init
++ (dev_priv->mem_global_ref.object, PSB_OBJECT_HASH_ORDER);
++ if (unlikely(dev_priv->tdev == NULL))
++ goto out_err;
++
++ mutex_init(&dev_priv->temp_mem);
++ mutex_init(&dev_priv->cmdbuf_mutex);
++ mutex_init(&dev_priv->reset_mutex);
++ INIT_LIST_HEAD(&dev_priv->context.validate_list);
++ INIT_LIST_HEAD(&dev_priv->context.kern_validate_list);
++
++
++ spin_lock_init(&dev_priv->reloc_lock);
++
++ DRM_INIT_WAITQUEUE(&dev_priv->rel_mapped_queue);
++
++ dev->dev_private = (void *) dev_priv;
++ dev_priv->chipset = chipset;
++ psb_set_uopt(&dev_priv->uopt);
++
++ PSB_DEBUG_GENERAL("Init watchdog and scheduler\n");
++ /* psb_watchdog_init(dev_priv); */
++ psb_scheduler_init(dev, &dev_priv->scheduler);
++
++
++ PSB_DEBUG_INIT("Mapping MMIO\n");
++ resource_start = pci_resource_start(dev->pdev, PSB_MMIO_RESOURCE);
++
++ if (IS_MRST(dev))
++ dev_priv->msvdx_reg =
++ ioremap(resource_start + MRST_MSVDX_OFFSET,
++ PSB_MSVDX_SIZE);
++ else
++ dev_priv->msvdx_reg =
++ ioremap(resource_start + PSB_MSVDX_OFFSET,
++ PSB_MSVDX_SIZE);
++
++ if (!dev_priv->msvdx_reg)
++ goto out_err;
++
++ if (IS_MRST(dev) && !dev_priv->topaz_disabled) {
++ dev_priv->topaz_reg =
++ ioremap(resource_start + LNC_TOPAZ_OFFSET,
++ LNC_TOPAZ_SIZE);
++ if (!dev_priv->topaz_reg)
++ goto out_err;
++ }
++
++ dev_priv->vdc_reg =
++ ioremap(resource_start + PSB_VDC_OFFSET, PSB_VDC_SIZE);
++ if (!dev_priv->vdc_reg)
++ goto out_err;
++
++ if (IS_MRST(dev))
++ dev_priv->sgx_reg =
++ ioremap(resource_start + MRST_SGX_OFFSET,
++ PSB_SGX_SIZE);
++ else
++ dev_priv->sgx_reg =
++ ioremap(resource_start + PSB_SGX_OFFSET, PSB_SGX_SIZE);
++
++ if (!dev_priv->sgx_reg)
++ goto out_err;
++
++ if (IS_MRST(dev)) {
++ mrst_get_fuse_settings(dev_priv);
++ mrst_get_vbt_data(dev_priv);
++ } else {
++ psb_intel_opregion_init(dev);
++ psb_intel_init_bios(dev);
++ }
++
++ PSB_DEBUG_INIT("Init TTM fence and BO driver\n");
++
++ if (IS_MRST(dev)) {
++ get_ci_info(dev_priv);
++ get_rar_info(dev_priv);
++ }
++
++ /* Init OSPM support */
++ ospm_power_init(dev);
++
++ ret = psb_ttm_fence_device_init(&dev_priv->fdev);
++ if (unlikely(ret != 0))
++ goto out_err;
++
++ dev_priv->has_fence_device = 1;
++ ret = ttm_bo_device_init(bdev,
++ dev_priv->mem_global_ref.object,
++ &psb_ttm_bo_driver,
++ DRM_PSB_FILE_PAGE_OFFSET);
++ if (unlikely(ret != 0))
++ goto out_err;
++ dev_priv->has_bo_device = 1;
++ ttm_lock_init(&dev_priv->ttm_lock);
++
++ ret = -ENOMEM;
++
++ dev_priv->scratch_page = alloc_page(GFP_DMA32 | __GFP_ZERO);
++ if (!dev_priv->scratch_page)
++ goto out_err;
++
++ set_pages_uc(dev_priv->scratch_page, 1);
++
++ dev_priv->pg = psb_gtt_alloc(dev);
++ if (!dev_priv->pg)
++ goto out_err;
++
++ ret = psb_gtt_init(dev_priv->pg, 0);
++ if (ret)
++ goto out_err;
++
++ ret = psb_gtt_mm_init(dev_priv->pg);
++ if (ret)
++ goto out_err;
++
++ dev_priv->mmu = psb_mmu_driver_init((void *)0,
++ drm_psb_trap_pagefaults, 0,
++ dev_priv);
++ if (!dev_priv->mmu)
++ goto out_err;
++
++ pg = dev_priv->pg;
++
++ tt_pages = (pg->gatt_pages < PSB_TT_PRIV0_PLIMIT) ?
++ (pg->gatt_pages) : PSB_TT_PRIV0_PLIMIT;
++
++ /* CI/RAR use the lower half of TT. */
++ pg->ci_start = (tt_pages / 2) << PAGE_SHIFT;
++ pg->rar_start = pg->ci_start + pg->ci_stolen_size;
++
++
++ /*
++ * Make MSVDX/TOPAZ MMU aware of the CI stolen memory area.
++ */
++ if (dev_priv->pg->ci_stolen_size != 0) {
++ down_read(&pg->sem);
++ ret = psb_mmu_insert_pfn_sequence(psb_mmu_get_default_pd
++ (dev_priv->mmu),
++ dev_priv->ci_region_start >> PAGE_SHIFT,
++ pg->mmu_gatt_start + pg->ci_start,
++ pg->ci_stolen_size >> PAGE_SHIFT, 0);
++ up_read(&pg->sem);
++ if (ret)
++ goto out_err;
++ }
++
++ /*
++ * Make MSVDX/TOPAZ MMU aware of the rar stolen memory area.
++ */
++ if (dev_priv->pg->rar_stolen_size != 0) {
++ down_read(&pg->sem);
++ ret = psb_mmu_insert_pfn_sequence(
++ psb_mmu_get_default_pd(dev_priv->mmu),
++ dev_priv->rar_region_start >> PAGE_SHIFT,
++ pg->mmu_gatt_start + pg->rar_start,
++ pg->rar_stolen_size >> PAGE_SHIFT, 0);
++ up_read(&pg->sem);
++ if (ret)
++ goto out_err;
++ }
++
++ dev_priv->pf_pd = psb_mmu_alloc_pd(dev_priv->mmu, 1, 0);
++ if (!dev_priv->pf_pd)
++ goto out_err;
++
++ psb_mmu_set_pd_context(psb_mmu_get_default_pd(dev_priv->mmu), 0);
++ psb_mmu_set_pd_context(dev_priv->pf_pd, 1);
++
++
++ spin_lock_init(&dev_priv->sequence_lock);
++
++
++ PSB_DEBUG_INIT("Begin to init SGX/MSVDX/Topaz\n");
++
++ ret = psb_do_init(dev);
++ if (ret)
++ return ret;
++
++ /**
++ * Init lid switch timer.
++ * NOTE: must do this after psb_intel_opregion_init
++ * and psb_backlight_init
++ */
++ if (IS_POULSBO(dev) && dev_priv->lid_state)
++ psb_lid_timer_init(dev_priv);
++
++ /*initialize the MSI for MRST*/
++ if (IS_MRST(dev)) {
++ if (pci_enable_msi(dev->pdev)) {
++ DRM_ERROR("Enable MSI for MRST failed!\n");
++ } else {
++ PSB_DEBUG_INIT("Enabled MSI IRQ (%d)\n",
++ dev->pdev->irq);
++ /* pci_write_config_word(pdev, 0x04, 0x07); */
++ }
++ }
++
++
++ if (drm_psb_no_fb == 0) {
++ psb_modeset_init(dev);
++ drm_helper_initial_config(dev);
++ }
++
++ /*must be after mrst_get_fuse_settings()*/
++ ret = psb_backlight_init(dev);
++ if (ret)
++ return ret;
++
++
++ /*Intel drm driver load is done, continue doing pvr load*/
++ DRM_DEBUG("Pvr driver load\n");
++
++ return PVRSRVDrmLoad(dev, chipset);
++out_err:
++ psb_driver_unload(dev);
++ return ret;
++}
++
++int psb_driver_device_is_agp(struct drm_device *dev)
++{
++ return 0;
++}
++
++int psb_extension_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ union drm_psb_extension_arg *arg = data;
++ struct drm_psb_extension_rep *rep = &arg->rep;
++
++ if (strcmp(arg->extension, "psb_ttm_placement_alphadrop") == 0) {
++ rep->exists = 1;
++ rep->driver_ioctl_offset = DRM_PSB_PLACEMENT_OFFSET;
++ rep->sarea_offset = 0;
++ rep->major = 1;
++ rep->minor = 0;
++ rep->pl = 0;
++ return 0;
++ }
++ if (strcmp(arg->extension, "psb_ttm_fence_alphadrop") == 0) {
++ rep->exists = 1;
++ rep->driver_ioctl_offset = DRM_PSB_FENCE_OFFSET;
++ rep->sarea_offset = 0;
++ rep->major = 1;
++ rep->minor = 0;
++ rep->pl = 0;
++ return 0;
++ }
++ if (strcmp(arg->extension, "psb_ttm_execbuf_alphadrop") == 0) {
++ rep->exists = 1;
++ rep->driver_ioctl_offset = DRM_PSB_CMDBUF;
++ rep->sarea_offset = 0;
++ rep->major = 1;
++ rep->minor = 0;
++ rep->pl = 0;
++ return 0;
++ }
++
++ /*return the page flipping ioctl offset*/
++ if (strcmp(arg->extension, "psb_page_flipping_alphadrop") == 0) {
++ rep->exists = 1;
++ rep->driver_ioctl_offset = DRM_PSB_FLIP;
++ rep->sarea_offset = 0;
++ rep->major = 1;
++ rep->minor = 0;
++ rep->pl = 0;
++ return 0;
++ }
++
++ /* return the video rar offset */
++ if (strcmp(arg->extension, "lnc_video_getparam") == 0) {
++ rep->exists = 1;
++ rep->driver_ioctl_offset = DRM_LNC_VIDEO_GETPARAM;
++ rep->sarea_offset = 0;
++ rep->major = 1;
++ rep->minor = 0;
++ rep->pl = 0;
++ return 0;
++ }
++
++ rep->exists = 0;
++ return 0;
++}
++
++static int psb_vt_leave_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ struct drm_psb_private *dev_priv = psb_priv(dev);
++ struct ttm_bo_device *bdev = &dev_priv->bdev;
++ struct ttm_mem_type_manager *man;
++ int clean;
++ int ret;
++
++ ret = ttm_write_lock(&dev_priv->ttm_lock, 1,
++ psb_fpriv(file_priv)->tfile);
++ if (unlikely(ret != 0))
++ return ret;
++
++ ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_TT);
++ if (unlikely(ret != 0))
++ goto out_unlock;
++
++ man = &bdev->man[TTM_PL_TT];
++ spin_lock(&bdev->lru_lock);
++ clean = drm_mm_clean(&man->manager);
++ spin_unlock(&bdev->lru_lock);
++ if (unlikely(!clean))
++ DRM_INFO("Warning: GATT was not clean after VT switch.\n");
++
++ ttm_bo_swapout_all(&dev_priv->bdev);
++
++ return 0;
++out_unlock:
++ (void) ttm_write_unlock(&dev_priv->ttm_lock,
++ psb_fpriv(file_priv)->tfile);
++ return ret;
++}
++
++static int psb_vt_enter_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ struct drm_psb_private *dev_priv = psb_priv(dev);
++ return ttm_write_unlock(&dev_priv->ttm_lock,
++ psb_fpriv(file_priv)->tfile);
++}
++
++static int psb_sizes_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ struct drm_psb_private *dev_priv = psb_priv(dev);
++ struct drm_psb_sizes_arg *arg =
++ (struct drm_psb_sizes_arg *) data;
++
++ *arg = dev_priv->sizes;
++ return 0;
++}
++
++static int psb_fuse_reg_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ struct drm_psb_private *dev_priv = psb_priv(dev);
++ uint32_t *arg = data;
++
++ *arg = dev_priv->fuse_reg_value;
++ return 0;
++}
++static int psb_vbt_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ struct drm_psb_private *dev_priv = psb_priv(dev);
++ struct gct_ioctl_arg *pGCT = data;
++
++ memcpy(pGCT, &dev_priv->gct_data, sizeof(*pGCT));
++
++ return 0;
++}
++
++static int psb_dc_state_ioctl(struct drm_device *dev, void * data,
++ struct drm_file *file_priv)
++{
++ uint32_t flags;
++ uint32_t obj_id;
++ struct drm_mode_object *obj;
++ struct drm_connector *connector;
++ struct drm_crtc *crtc;
++ struct drm_psb_dc_state_arg *arg =
++ (struct drm_psb_dc_state_arg *)data;
++
++ if (IS_MRST(dev))
++ return 0;
++
++ flags = arg->flags;
++ obj_id = arg->obj_id;
++
++ if (flags & PSB_DC_CRTC_MASK) {
++ obj = drm_mode_object_find(dev, obj_id,
++ DRM_MODE_OBJECT_CRTC);
++ if (!obj) {
++ DRM_DEBUG("Invalid CRTC object.\n");
++ return -EINVAL;
++ }
++
++ crtc = obj_to_crtc(obj);
++
++ mutex_lock(&dev->mode_config.mutex);
++ if (drm_helper_crtc_in_use(crtc)) {
++ if (flags & PSB_DC_CRTC_SAVE)
++ crtc->funcs->save(crtc);
++ else
++ crtc->funcs->restore(crtc);
++ }
++ mutex_unlock(&dev->mode_config.mutex);
++
++ return 0;
++ } else if (flags & PSB_DC_OUTPUT_MASK) {
++ obj = drm_mode_object_find(dev, obj_id,
++ DRM_MODE_OBJECT_CONNECTOR);
++ if (!obj) {
++ DRM_DEBUG("Invalid connector id.\n");
++ return -EINVAL;
++ }
++
++ connector = obj_to_connector(obj);
++ if (flags & PSB_DC_OUTPUT_SAVE)
++ connector->funcs->save(connector);
++ else
++ connector->funcs->restore(connector);
++
++ return 0;
++ }
++
++ DRM_DEBUG("Bad flags 0x%x\n", flags);
++ return -EINVAL;
++}
++
++static int psb_dpst_bl_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ struct drm_psb_private *dev_priv = psb_priv(dev);
++ uint32_t *arg = data;
++ struct backlight_device bd;
++ dev_priv->blc_adj2 = *arg;
++#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
++ bd.props.brightness = psb_get_brightness(&bd);
++ psb_set_brightness(&bd);
++#endif
++ return 0;
++}
++
++static int psb_adb_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ struct drm_psb_private *dev_priv = psb_priv(dev);
++ uint32_t *arg = data;
++ struct backlight_device bd;
++ dev_priv->blc_adj1 = *arg;
++#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
++ bd.props.brightness = psb_get_brightness(&bd);
++ psb_set_brightness(&bd);
++#endif
++ return 0;
++}
++
++static int psb_hist_enable_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ u32 irqCtrl = 0;
++ struct drm_psb_private *dev_priv = psb_priv(dev);
++ struct dpst_guardband guardband_reg;
++ struct dpst_ie_histogram_control ie_hist_cont_reg;
++ uint32_t *enable = data;
++
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, OSPM_UHB_ONLY_IF_ON)) {
++ return 0;
++ }
++
++ if (*enable == 1) {
++ ie_hist_cont_reg.data = PSB_RVDC32(HISTOGRAM_LOGIC_CONTROL);
++ ie_hist_cont_reg.ie_pipe_assignment = 0;
++ ie_hist_cont_reg.histogram_mode_select = DPST_YUV_LUMA_MODE;
++ ie_hist_cont_reg.ie_histogram_enable = 1;
++ PSB_WVDC32(ie_hist_cont_reg.data, HISTOGRAM_LOGIC_CONTROL);
++
++ guardband_reg.data = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
++ guardband_reg.interrupt_enable = 1;
++ guardband_reg.interrupt_status = 1;
++ PSB_WVDC32(guardband_reg.data, HISTOGRAM_INT_CONTROL);
++
++ irqCtrl = PSB_RVDC32(PIPEASTAT);
++ PSB_WVDC32(irqCtrl | PIPE_DPST_EVENT_ENABLE, PIPEASTAT);
++ /* Wait for two vblanks */
++ } else {
++ guardband_reg.data = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
++ guardband_reg.interrupt_enable = 0;
++ guardband_reg.interrupt_status = 1;
++ PSB_WVDC32(guardband_reg.data, HISTOGRAM_INT_CONTROL);
++
++ ie_hist_cont_reg.data = PSB_RVDC32(HISTOGRAM_LOGIC_CONTROL);
++ ie_hist_cont_reg.ie_histogram_enable = 0;
++ PSB_WVDC32(ie_hist_cont_reg.data, HISTOGRAM_LOGIC_CONTROL);
++
++ irqCtrl = PSB_RVDC32(PIPEASTAT);
++ irqCtrl &= ~PIPE_DPST_EVENT_ENABLE;
++ PSB_WVDC32(irqCtrl, PIPEASTAT);
++ }
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++
++ return 0;
++}
++
++static int psb_hist_status_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ struct drm_psb_private *dev_priv = psb_priv(dev);
++ struct drm_psb_hist_status_arg *hist_status = data;
++ uint32_t *arg = hist_status->buf;
++ u32 iedbr_reg_data = 0;
++ struct dpst_ie_histogram_control ie_hist_cont_reg;
++ u32 i;
++ int dpst3_bin_threshold_count = 0;
++ uint32_t blm_hist_ctl = HISTOGRAM_LOGIC_CONTROL;
++ uint32_t iebdr_reg = HISTOGRAM_BIN_DATA;
++ uint32_t segvalue_max_22_bit = 0x3fffff;
++ uint32_t iedbr_busy_bit = 0x80000000;
++ int dpst3_bin_count = 32;
++
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, OSPM_UHB_ONLY_IF_ON)) {
++ return 0;
++ }
++
++ ie_hist_cont_reg.data = PSB_RVDC32(blm_hist_ctl);
++ ie_hist_cont_reg.bin_reg_func_select = dpst3_bin_threshold_count;
++ ie_hist_cont_reg.bin_reg_index = 0;
++
++ PSB_WVDC32(ie_hist_cont_reg.data, blm_hist_ctl);
++
++ for (i = 0; i < dpst3_bin_count; i++) {
++ iedbr_reg_data = PSB_RVDC32(iebdr_reg);
++
++ if (!(iedbr_reg_data & iedbr_busy_bit)) {
++ arg[i] = iedbr_reg_data & segvalue_max_22_bit;
++ } else {
++ i = 0;
++ ie_hist_cont_reg.data = PSB_RVDC32(blm_hist_ctl);
++ ie_hist_cont_reg.bin_reg_index = 0;
++ PSB_WVDC32(ie_hist_cont_reg.data, blm_hist_ctl);
++ }
++ }
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++
++ return 0;
++}
++
++static int psb_init_comm_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ struct drm_psb_private *dev_priv = psb_priv(dev);
++ struct pci_dev *pdev = NULL;
++ struct device *ddev = NULL;
++ struct kobject *kobj = NULL;
++ uint32_t *arg = data;
++
++ if (*arg == 1) {
++ /*find handle to drm kboject*/
++ pdev = dev->pdev;
++ ddev = &pdev->dev;
++ kobj = &ddev->kobj;
++
++ if (dev_priv->psb_dpst_state == NULL) {
++ /*init dpst kmum comms*/
++ dev_priv->psb_dpst_state = psb_dpst_init(kobj);
++ } else {
++ printk(KERN_ALERT "DPST already initialized\n");
++ }
++
++ sysirq_enable_dpst(dev);
++ psb_dpst_notify_change_um(DPST_EVENT_INIT_COMPLETE,
++ dev_priv->psb_dpst_state);
++ } else {
++ /*hotplug and dpst destroy examples*/
++ sysirq_disable_dpst(dev);
++ psb_dpst_notify_change_um(DPST_EVENT_TERMINATE,
++ dev_priv->psb_dpst_state);
++ psb_dpst_device_pool_destroy(dev_priv->psb_dpst_state);
++ dev_priv->psb_dpst_state = NULL;
++ }
++ return 0;
++}
++
++/* return the current mode to the dpst module */
++static int psb_dpst_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ struct drm_psb_private *dev_priv = psb_priv(dev);
++ uint32_t *arg = data;
++ uint32_t x;
++ uint32_t y;
++ uint32_t reg;
++
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, OSPM_UHB_ONLY_IF_ON)) {
++ return 0;
++ }
++
++ reg = PSB_RVDC32(PIPEASRC);
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++
++ /* horizontal is the left 16 bits */
++ x = reg >> 16;
++ /* vertical is the right 16 bits */
++ y = reg & 0x0000ffff;
++
++ /* the values are the image size minus one */
++ x+=1;
++ y+=1;
++
++ *arg = (x << 16) | y;
++
++ return 0;
++}
++static int psb_gamma_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ struct drm_psb_dpst_lut_arg *lut_arg = data;
++ struct drm_mode_object *obj;
++ struct drm_crtc *crtc;
++ struct drm_connector *connector;
++ struct psb_intel_crtc *psb_intel_crtc;
++ int i = 0;
++ int32_t obj_id;
++
++ obj_id = lut_arg->output_id;
++ obj = drm_mode_object_find(dev, obj_id, DRM_MODE_OBJECT_CONNECTOR);
++ if (!obj) {
++ DRM_DEBUG("Invalid Connector object.\n");
++ return -EINVAL;
++ }
++
++ connector = obj_to_connector(obj);
++ crtc = connector->encoder->crtc;
++ psb_intel_crtc = to_psb_intel_crtc(crtc);
++
++ for (i = 0; i < 256; i++)
++ psb_intel_crtc->lut_adj[i] = lut_arg->lut[i];
++
++ psb_intel_crtc_load_lut(crtc);
++
++ return 0;
++}
++
++static int psb_update_guard_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ struct drm_psb_private *dev_priv = psb_priv(dev);
++ struct dpst_guardband* input = (struct dpst_guardband*) data;
++ struct dpst_guardband reg_data;
++
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, OSPM_UHB_ONLY_IF_ON)) {
++ return 0;
++ }
++
++ reg_data.data = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
++ reg_data.guardband = input->guardband;
++ reg_data.guardband_interrupt_delay = input->guardband_interrupt_delay;
++ /* printk(KERN_ALERT "guardband = %u\ninterrupt delay = %u\n",
++ reg_data.guardband, reg_data.guardband_interrupt_delay); */
++ PSB_WVDC32(reg_data.data, HISTOGRAM_INT_CONTROL);
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++
++ return 0;
++}
++
++static int psb_mode_operation_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ uint32_t obj_id;
++ uint16_t op;
++ struct drm_mode_modeinfo *umode;
++ struct drm_display_mode *mode = NULL;
++ struct drm_psb_mode_operation_arg *arg;
++ struct drm_mode_object *obj;
++ struct drm_connector *connector;
++ struct drm_framebuffer * drm_fb;
++ struct psb_framebuffer * psb_fb;
++ struct drm_connector_helper_funcs *connector_funcs;
++ int ret = 0;
++ int resp = MODE_OK;
++ struct drm_psb_private *dev_priv = psb_priv(dev);
++
++ arg = (struct drm_psb_mode_operation_arg *)data;
++ obj_id = arg->obj_id;
++ op = arg->operation;
++
++ switch(op) {
++ case PSB_MODE_OPERATION_SET_DC_BASE:
++ obj = drm_mode_object_find(dev, obj_id, DRM_MODE_OBJECT_FB);
++ if(!obj) {
++ DRM_ERROR("Invalid FB id %d\n", obj_id);
++ return -EINVAL;
++ }
++
++ drm_fb = obj_to_fb(obj);
++ psb_fb = to_psb_fb(drm_fb);
++
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_ONLY_IF_ON)) {
++ REG_WRITE(DSPASURF, psb_fb->offset);
++ REG_READ(DSPASURF);
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ } else {
++ dev_priv->saveDSPASURF = psb_fb->offset;
++ }
++
++ return 0;
++ case PSB_MODE_OPERATION_MODE_VALID:
++ umode = &arg->mode;
++
++ mutex_lock(&dev->mode_config.mutex);
++
++ obj = drm_mode_object_find(dev, obj_id, DRM_MODE_OBJECT_CONNECTOR);
++ if (!obj) {
++ ret = -EINVAL;
++ goto mode_op_out;
++ }
++
++ connector = obj_to_connector(obj);
++
++ mode = drm_mode_create(dev);
++ if (!mode) {
++ ret = -ENOMEM;
++ goto mode_op_out;
++ }
++
++ /* drm_crtc_convert_umode(mode, umode); */
++ {
++ mode->clock = umode->clock;
++ mode->hdisplay = umode->hdisplay;
++ mode->hsync_start = umode->hsync_start;
++ mode->hsync_end = umode->hsync_end;
++ mode->htotal = umode->htotal;
++ mode->hskew = umode->hskew;
++ mode->vdisplay = umode->vdisplay;
++ mode->vsync_start = umode->vsync_start;
++ mode->vsync_end = umode->vsync_end;
++ mode->vtotal = umode->vtotal;
++ mode->vscan = umode->vscan;
++ mode->vrefresh = umode->vrefresh;
++ mode->flags = umode->flags;
++ mode->type = umode->type;
++ strncpy(mode->name, umode->name, DRM_DISPLAY_MODE_LEN);
++ mode->name[DRM_DISPLAY_MODE_LEN-1] = 0;
++ }
++
++ connector_funcs = (struct drm_connector_helper_funcs *)
++ connector->helper_private;
++
++ if (connector_funcs->mode_valid) {
++ resp = connector_funcs->mode_valid(connector, mode);
++ arg->data = (void *)resp;
++ }
++
++ /*do some clean up work*/
++ if(mode) {
++ drm_mode_destroy(dev, mode);
++ }
++mode_op_out:
++ mutex_unlock(&dev->mode_config.mutex);
++ return ret;
++
++ default:
++ DRM_DEBUG("Unsupported psb mode operation");
++ return -EOPNOTSUPP;
++ }
++
++ return 0;
++}
++
++static int psb_stolen_memory_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ struct drm_psb_private *dev_priv = psb_priv(dev);
++ struct drm_psb_stolen_memory_arg *arg = data;
++
++ arg->base = dev_priv->pg->stolen_base;
++ arg->size = dev_priv->pg->vram_stolen_size;
++
++ return 0;
++}
++
++static int psb_register_rw_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ struct drm_psb_private *dev_priv = psb_priv(dev);
++ struct drm_psb_register_rw_arg *arg = data;
++ UHBUsage usage =
++ arg->b_force_hw_on ? OSPM_UHB_FORCE_POWER_ON : OSPM_UHB_ONLY_IF_ON;
++
++ if (arg->display_write_mask != 0) {
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, usage)) {
++ if (arg->display_write_mask & REGRWBITS_PFIT_CONTROLS)
++ PSB_WVDC32(arg->display.pfit_controls,
++ PFIT_CONTROL);
++ if (arg->display_write_mask &
++ REGRWBITS_PFIT_AUTOSCALE_RATIOS)
++ PSB_WVDC32(arg->display.pfit_autoscale_ratios,
++ PFIT_AUTO_RATIOS);
++ if (arg->display_write_mask &
++ REGRWBITS_PFIT_PROGRAMMED_SCALE_RATIOS)
++ PSB_WVDC32(
++ arg->display.pfit_programmed_scale_ratios,
++ PFIT_PGM_RATIOS);
++ if (arg->display_write_mask & REGRWBITS_PIPEASRC)
++ PSB_WVDC32(arg->display.pipeasrc,
++ PIPEASRC);
++ if (arg->display_write_mask & REGRWBITS_PIPEBSRC)
++ PSB_WVDC32(arg->display.pipebsrc,
++ PIPEBSRC);
++ if (arg->display_write_mask & REGRWBITS_VTOTAL_A)
++ PSB_WVDC32(arg->display.vtotal_a,
++ VTOTAL_A);
++ if (arg->display_write_mask & REGRWBITS_VTOTAL_B)
++ PSB_WVDC32(arg->display.vtotal_b,
++ VTOTAL_B);
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ } else {
++ if (arg->display_write_mask & REGRWBITS_PFIT_CONTROLS)
++ dev_priv->savePFIT_CONTROL =
++ arg->display.pfit_controls;
++ if (arg->display_write_mask &
++ REGRWBITS_PFIT_AUTOSCALE_RATIOS)
++ dev_priv->savePFIT_AUTO_RATIOS =
++ arg->display.pfit_autoscale_ratios;
++ if (arg->display_write_mask &
++ REGRWBITS_PFIT_PROGRAMMED_SCALE_RATIOS)
++ dev_priv->savePFIT_PGM_RATIOS =
++ arg->display.pfit_programmed_scale_ratios;
++ if (arg->display_write_mask & REGRWBITS_PIPEASRC)
++ dev_priv->savePIPEASRC = arg->display.pipeasrc;
++ if (arg->display_write_mask & REGRWBITS_PIPEBSRC)
++ dev_priv->savePIPEBSRC = arg->display.pipebsrc;
++ if (arg->display_write_mask & REGRWBITS_VTOTAL_A)
++ dev_priv->saveVTOTAL_A = arg->display.vtotal_a;
++ if (arg->display_write_mask & REGRWBITS_VTOTAL_B)
++ dev_priv->saveVTOTAL_B = arg->display.vtotal_b;
++ }
++ }
++
++ if (arg->display_read_mask != 0) {
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, usage)) {
++ if (arg->display_read_mask &
++ REGRWBITS_PFIT_CONTROLS)
++ arg->display.pfit_controls =
++ PSB_RVDC32(PFIT_CONTROL);
++ if (arg->display_read_mask &
++ REGRWBITS_PFIT_AUTOSCALE_RATIOS)
++ arg->display.pfit_autoscale_ratios =
++ PSB_RVDC32(PFIT_AUTO_RATIOS);
++ if (arg->display_read_mask &
++ REGRWBITS_PFIT_PROGRAMMED_SCALE_RATIOS)
++ arg->display.pfit_programmed_scale_ratios =
++ PSB_RVDC32(PFIT_PGM_RATIOS);
++ if (arg->display_read_mask & REGRWBITS_PIPEASRC)
++ arg->display.pipeasrc = PSB_RVDC32(PIPEASRC);
++ if (arg->display_read_mask & REGRWBITS_PIPEBSRC)
++ arg->display.pipebsrc = PSB_RVDC32(PIPEBSRC);
++ if (arg->display_read_mask & REGRWBITS_VTOTAL_A)
++ arg->display.vtotal_a = PSB_RVDC32(VTOTAL_A);
++ if (arg->display_read_mask & REGRWBITS_VTOTAL_B)
++ arg->display.vtotal_b = PSB_RVDC32(VTOTAL_B);
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ } else {
++ if (arg->display_read_mask &
++ REGRWBITS_PFIT_CONTROLS)
++ arg->display.pfit_controls =
++ dev_priv->savePFIT_CONTROL;
++ if (arg->display_read_mask &
++ REGRWBITS_PFIT_AUTOSCALE_RATIOS)
++ arg->display.pfit_autoscale_ratios =
++ dev_priv->savePFIT_AUTO_RATIOS;
++ if (arg->display_read_mask &
++ REGRWBITS_PFIT_PROGRAMMED_SCALE_RATIOS)
++ arg->display.pfit_programmed_scale_ratios =
++ dev_priv->savePFIT_PGM_RATIOS;
++ if (arg->display_read_mask & REGRWBITS_PIPEASRC)
++ arg->display.pipeasrc = dev_priv->savePIPEASRC;
++ if (arg->display_read_mask & REGRWBITS_PIPEBSRC)
++ arg->display.pipebsrc = dev_priv->savePIPEBSRC;
++ if (arg->display_read_mask & REGRWBITS_VTOTAL_A)
++ arg->display.vtotal_a = dev_priv->saveVTOTAL_A;
++ if (arg->display_read_mask & REGRWBITS_VTOTAL_B)
++ arg->display.vtotal_b = dev_priv->saveVTOTAL_B;
++ }
++ }
++
++ if (arg->overlay_write_mask != 0) {
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, usage)) {
++ if (arg->overlay_write_mask & OV_REGRWBITS_OGAM_ALL) {
++ PSB_WVDC32(arg->overlay.OGAMC5, OV_OGAMC5);
++ PSB_WVDC32(arg->overlay.OGAMC4, OV_OGAMC4);
++ PSB_WVDC32(arg->overlay.OGAMC3, OV_OGAMC3);
++ PSB_WVDC32(arg->overlay.OGAMC2, OV_OGAMC2);
++ PSB_WVDC32(arg->overlay.OGAMC1, OV_OGAMC1);
++ PSB_WVDC32(arg->overlay.OGAMC0, OV_OGAMC0);
++ }
++
++ if (arg->overlay_write_mask & OV_REGRWBITS_OVADD)
++ PSB_WVDC32(arg->overlay.OVADD, OV_OVADD);
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ } else {
++ if (arg->overlay_write_mask & OV_REGRWBITS_OGAM_ALL) {
++ dev_priv->saveOV_OGAMC5 = arg->overlay.OGAMC5;
++ dev_priv->saveOV_OGAMC4 = arg->overlay.OGAMC4;
++ dev_priv->saveOV_OGAMC3 = arg->overlay.OGAMC3;
++ dev_priv->saveOV_OGAMC2 = arg->overlay.OGAMC2;
++ dev_priv->saveOV_OGAMC1 = arg->overlay.OGAMC1;
++ dev_priv->saveOV_OGAMC0 = arg->overlay.OGAMC0;
++ }
++ if (arg->overlay_write_mask & OV_REGRWBITS_OVADD)
++ dev_priv->saveOV_OVADD = arg->overlay.OVADD;
++ }
++ }
++
++ if (arg->overlay_read_mask != 0) {
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, usage)) {
++ if (arg->overlay_read_mask & OV_REGRWBITS_OGAM_ALL) {
++ arg->overlay.OGAMC5 = PSB_RVDC32(OV_OGAMC5);
++ arg->overlay.OGAMC4 = PSB_RVDC32(OV_OGAMC4);
++ arg->overlay.OGAMC3 = PSB_RVDC32(OV_OGAMC3);
++ arg->overlay.OGAMC2 = PSB_RVDC32(OV_OGAMC2);
++ arg->overlay.OGAMC1 = PSB_RVDC32(OV_OGAMC1);
++ arg->overlay.OGAMC0 = PSB_RVDC32(OV_OGAMC0);
++ }
++ if (arg->overlay_read_mask & OV_REGRWBITS_OVADD)
++ arg->overlay.OVADD = PSB_RVDC32(OV_OVADD);
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ } else {
++ if (arg->overlay_read_mask & OV_REGRWBITS_OGAM_ALL) {
++ arg->overlay.OGAMC5 = dev_priv->saveOV_OGAMC5;
++ arg->overlay.OGAMC4 = dev_priv->saveOV_OGAMC4;
++ arg->overlay.OGAMC3 = dev_priv->saveOV_OGAMC3;
++ arg->overlay.OGAMC2 = dev_priv->saveOV_OGAMC2;
++ arg->overlay.OGAMC1 = dev_priv->saveOV_OGAMC1;
++ arg->overlay.OGAMC0 = dev_priv->saveOV_OGAMC0;
++ }
++ if (arg->overlay_read_mask & OV_REGRWBITS_OVADD)
++ arg->overlay.OVADD = dev_priv->saveOV_OVADD;
++ }
++ }
++
++ if (arg->sprite_enable_mask != 0) {
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, usage)) {
++ PSB_WVDC32(0x1F3E, DSPARB);
++ PSB_WVDC32(arg->sprite.dspa_control | PSB_RVDC32(DSPACNTR), DSPACNTR);
++ PSB_WVDC32(arg->sprite.dspa_key_value, DSPAKEYVAL);
++ PSB_WVDC32(arg->sprite.dspa_key_mask, DSPAKEYMASK);
++ PSB_WVDC32(PSB_RVDC32(DSPASURF), DSPASURF);
++ PSB_RVDC32(DSPASURF);
++ PSB_WVDC32(arg->sprite.dspc_control, DSPCCNTR);
++ PSB_WVDC32(arg->sprite.dspc_stride, DSPCSTRIDE);
++ PSB_WVDC32(arg->sprite.dspc_position, DSPCPOS);
++ PSB_WVDC32(arg->sprite.dspc_linear_offset, DSPCLINOFF);
++ PSB_WVDC32(arg->sprite.dspc_size, DSPCSIZE);
++ PSB_WVDC32(arg->sprite.dspc_surface, DSPCSURF);
++ PSB_RVDC32(DSPCSURF);
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ }
++ }
++
++ if (arg->sprite_disable_mask != 0) {
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, usage)) {
++ PSB_WVDC32(0x3F3E, DSPARB);
++ PSB_WVDC32(0x0, DSPCCNTR);
++ PSB_WVDC32(arg->sprite.dspc_surface, DSPCSURF);
++ PSB_RVDC32(DSPCSURF);
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ }
++ }
++
++
++ return 0;
++}
++
++/* always available as we are SIGIO'd */
++static unsigned int psb_poll(struct file *filp,
++ struct poll_table_struct *wait)
++{
++ return POLLIN | POLLRDNORM;
++}
++
++int psb_driver_open(struct drm_device *dev, struct drm_file *priv)
++{
++ DRM_DEBUG("\n");
++ return PVRSRVOpen(dev, priv);
++}
++
++static long psb_unlocked_ioctl(struct file *filp, unsigned int cmd,
++ unsigned long arg)
++{
++ struct drm_file *file_priv = filp->private_data;
++ struct drm_device *dev = file_priv->minor->dev;
++ unsigned int nr = DRM_IOCTL_NR(cmd);
++ long ret;
++
++ DRM_DEBUG("cmd = %x, nr = %x\n", cmd, nr);
++
++ /*
++ * The driver private ioctls and TTM ioctls should be
++ * thread-safe.
++ */
++
++ if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
++ && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
++ struct drm_ioctl_desc *ioctl =
++ &psb_ioctls[nr - DRM_COMMAND_BASE];
++
++ if (unlikely(ioctl->cmd != cmd)) {
++ DRM_ERROR(
++ "Invalid drm cmnd %d ioctl->cmd %x, cmd %x\n",
++ nr - DRM_COMMAND_BASE, ioctl->cmd, cmd);
++ return -EINVAL;
++ }
++
++ return drm_unlocked_ioctl(filp, cmd, arg);
++ }
++ /*
++ * Not all old drm ioctls are thread-safe.
++ */
++
++ lock_kernel();
++ ret = drm_unlocked_ioctl(filp, cmd, arg);
++ unlock_kernel();
++ return ret;
++}
++
++static int psb_blc_read(char *buf, char **start, off_t offset, int request,
++ int *eof, void *data)
++{
++ struct drm_minor *minor = (struct drm_minor *) data;
++ struct drm_device *dev = minor->dev;
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++ struct backlight_device bd;
++ int user_brightness = 0;
++ int final_brightness = 0;
++ int len = 0;
++
++ *start = &buf[offset];
++ *eof = 0;
++
++ user_brightness = psb_get_brightness(&bd);
++ final_brightness = (user_brightness * dev_priv->blc_adj1) / 100;
++ final_brightness = (final_brightness * dev_priv->blc_adj2) / 100;
++
++ DRM_INFO("%i\n", final_brightness);
++
++ if (len > request + offset)
++ return request;
++ *eof = 1;
++ return len - offset;
++}
++
++static int psb_ospm_read(char *buf, char **start, off_t offset, int request,
++ int *eof, void *data)
++{
++ struct drm_minor *minor = (struct drm_minor *) data;
++ struct drm_device *dev = minor->dev;
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++ int len = 0;
++#ifdef OSPM_STAT
++ unsigned long on_time = 0;
++ unsigned long off_time = 0;
++#endif
++
++ *start = &buf[offset];
++ *eof = 0;
++
++#ifdef SUPPORT_ACTIVE_POWER_MANAGEMENT
++ DRM_INFO("GFX D0i3: enabled ");
++#else
++ DRM_INFO("GFX D0i3: disabled ");
++#endif
++
++#ifdef OSPM_STAT
++ switch (dev_priv->graphics_state) {
++ case PSB_PWR_STATE_ON:
++ DRM_INFO("GFX state:%s\n", "on");
++ break;
++ case PSB_PWR_STATE_OFF:
++ DRM_INFO("GFX state:%s\n", "off");
++ break;
++ default:
++ DRM_INFO("GFX state:%s\n", "unknown");
++ }
++
++ on_time = dev_priv->gfx_on_time * 1000 / HZ;
++ off_time = dev_priv->gfx_off_time * 1000 / HZ;
++ switch (dev_priv->graphics_state) {
++ case PSB_PWR_STATE_ON:
++ on_time += (jiffies - dev_priv->gfx_last_mode_change) * \
++ 1000 / HZ;
++ break;
++ case PSB_PWR_STATE_OFF:
++ off_time += (jiffies - dev_priv->gfx_last_mode_change) * \
++ 1000 / HZ;
++ break;
++ }
++ DRM_INFO("GFX(count/ms):\n");
++ DRM_INFO("on:%lu/%lu, off:%lu/%lu \n",
++ dev_priv->gfx_on_cnt, on_time, dev_priv->gfx_off_cnt, off_time);
++#endif
++ if (len > request + offset)
++ return request;
++ *eof = 1;
++ return len - offset;
++}
++
++/* When a client dies:
++ * - Check for and clean up flipped page state
++ */
++void psb_driver_preclose(struct drm_device *dev, struct drm_file *priv)
++{
++}
++
++static void psb_remove(struct pci_dev *pdev)
++{
++ struct drm_device *dev = pci_get_drvdata(pdev);
++ drm_put_dev(dev);
++}
++
++static int psb_proc_init(struct drm_minor *minor)
++{
++ struct proc_dir_entry *ent;
++ struct proc_dir_entry *ent1;
++ ent = create_proc_read_entry(OSPM_PROC_ENTRY, 0, minor->proc_root,
++ psb_ospm_read, minor);
++ ent1 = create_proc_read_entry(BLC_PROC_ENTRY, 0, minor->proc_root,
++ psb_blc_read, minor);
++
++ if (!ent || !ent1)
++ return -1;
++
++ return 0;
++}
++
++static void psb_proc_cleanup(struct drm_minor *minor)
++{
++ remove_proc_entry(OSPM_PROC_ENTRY, minor->proc_root);
++ remove_proc_entry(BLC_PROC_ENTRY, minor->proc_root);
++ return;
++}
++
++static struct drm_driver driver = {
++ .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | \
++ DRIVER_IRQ_VBL | DRIVER_MODESET,
++ .load = psb_driver_load,
++ .unload = psb_driver_unload,
++
++ .ioctls = psb_ioctls,
++ .device_is_agp = psb_driver_device_is_agp,
++ .irq_preinstall = sysirq_preinstall,
++ .irq_postinstall = sysirq_postinstall,
++ .irq_uninstall = sysirq_uninstall,
++ .irq_handler = sysirq_handler,
++ .enable_vblank = sysirq_enable_vblank,
++ .disable_vblank = sysirq_disable_vblank,
++ .get_vblank_counter = sysirq_get_vblank_counter,
++ .firstopen = NULL,
++ .lastclose = psb_lastclose,
++ .open = psb_driver_open,
++ .postclose = PVRSRVDrmPostClose,
++ .suspend = PVRSRVDriverSuspend,
++ .resume = PVRSRVDriverResume,
++ .get_map_ofs = drm_core_get_map_ofs,
++ .get_reg_ofs = drm_core_get_reg_ofs,
++ .proc_init = psb_proc_init,
++ .proc_cleanup = psb_proc_cleanup,
++ .preclose = psb_driver_preclose,
++ .fops = {
++ .owner = THIS_MODULE,
++ .open = psb_open,
++ .release = psb_release,
++ .unlocked_ioctl = psb_unlocked_ioctl,
++ .mmap = psb_mmap,
++ .poll = psb_poll,
++ .fasync = drm_fasync,
++ .read = drm_read,
++ },
++ .pci_driver = {
++ .name = DRIVER_NAME,
++ .id_table = pciidlist,
++ .resume = ospm_power_resume,
++ .suspend = ospm_power_suspend,
++ .probe = psb_probe,
++ .remove = psb_remove,
++ },
++ .name = DRIVER_NAME,
++ .desc = DRIVER_DESC,
++ .date = PSB_DRM_DRIVER_DATE,
++ .major = PSB_DRM_DRIVER_MAJOR,
++ .minor = PSB_DRM_DRIVER_MINOR,
++ .patchlevel = PSB_DRM_DRIVER_PATCHLEVEL
++};
++
++static int psb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
++{
++ return drm_get_dev(pdev, ent, &driver);
++}
++
++static int __init psb_init(void)
++{
++ driver.num_ioctls = psb_max_ioctl;
++
++ PVRDPFInit();
++
++ return drm_init(&driver);
++}
++
++static void __exit psb_exit(void)
++{
++ drm_exit(&driver);
++}
++
++late_initcall(psb_init);
++module_exit(psb_exit);
++
++MODULE_AUTHOR(DRIVER_AUTHOR);
++MODULE_DESCRIPTION(DRIVER_DESC);
++MODULE_LICENSE("GPL");
+diff --git a/drivers/gpu/drm/mrst/drv/psb_drv.h b/drivers/gpu/drm/mrst/drv/psb_drv.h
+new file mode 100644
+index 0000000..2ac7934
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/psb_drv.h
+@@ -0,0 +1,1025 @@
++/**************************************************************************
++ * Copyright (c) 2007-2008, Intel Corporation.
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++
++#ifndef _PSB_DRV_H_
++#define _PSB_DRV_H_
++
++#include <drm/drmP.h>
++#include "psb_drm.h"
++#include "psb_reg.h"
++#include "psb_schedule.h"
++#include "psb_intel_drv.h"
++#include "psb_hotplug.h"
++#include "psb_dpst.h"
++#include "psb_gtt.h"
++#include "ttm/ttm_object.h"
++#include "ttm/ttm_fence_driver.h"
++#include "ttm/ttm_bo_driver.h"
++#include "ttm/ttm_lock.h"
++
++/*IMG headers*/
++#include "private_data.h"
++#include "pvr_drm.h"
++
++extern struct ttm_bo_driver psb_ttm_bo_driver;
++
++enum {
++ CHIP_PSB_8108 = 0,
++ CHIP_PSB_8109 = 1,
++ CHIP_MRST_4100 = 2
++};
++
++/*
++ *Hardware bugfixes
++ */
++
++#define FIX_TG_16
++#define FIX_TG_2D_CLOCKGATE
++#define OSPM_STAT
++
++#define DRIVER_NAME "pvrsrvkm"
++#define DRIVER_DESC "drm driver for the Intel GMA500"
++#define DRIVER_AUTHOR "Tungsten Graphics Inc."
++#define OSPM_PROC_ENTRY "ospm"
++#define BLC_PROC_ENTRY "mrst_blc"
++
++#define PSB_DRM_DRIVER_DATE "2009-03-10"
++#define PSB_DRM_DRIVER_MAJOR 8
++#define PSB_DRM_DRIVER_MINOR 1
++#define PSB_DRM_DRIVER_PATCHLEVEL 0
++
++/*
++ *TTM driver private offsets.
++ */
++
++#define DRM_PSB_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
++
++#define PSB_OBJECT_HASH_ORDER 13
++#define PSB_FILE_OBJECT_HASH_ORDER 12
++#define PSB_BO_HASH_ORDER 12
++
++#define PSB_VDC_OFFSET 0x00000000
++#define PSB_VDC_SIZE 0x000080000
++#define MRST_MMIO_SIZE 0x0000C0000
++#define PSB_SGX_SIZE 0x8000
++#define PSB_SGX_OFFSET 0x00040000
++#define MRST_SGX_OFFSET 0x00080000
++#define PSB_MMIO_RESOURCE 0
++#define PSB_GATT_RESOURCE 2
++#define PSB_GTT_RESOURCE 3
++#define PSB_GMCH_CTRL 0x52
++#define PSB_BSM 0x5C
++#define _PSB_GMCH_ENABLED 0x4
++#define PSB_PGETBL_CTL 0x2020
++#define _PSB_PGETBL_ENABLED 0x00000001
++#define PSB_SGX_2D_SLAVE_PORT 0x4000
++#define PSB_TT_PRIV0_LIMIT (256*1024*1024)
++#define PSB_TT_PRIV0_PLIMIT (PSB_TT_PRIV0_LIMIT >> PAGE_SHIFT)
++#define PSB_NUM_VALIDATE_BUFFERS 2048
++
++#define PSB_MEM_MMU_START 0x40000000
++
++/*
++ *Flags for external memory type field.
++ */
++
++#define MRST_MSVDX_OFFSET 0x90000 /*MSVDX Base offset */
++#define PSB_MSVDX_OFFSET 0x50000 /*MSVDX Base offset */
++/* MSVDX MMIO region is 0x50000 - 0x57fff ==> 32KB */
++#define PSB_MSVDX_SIZE 0x10000
++
++#define LNC_TOPAZ_OFFSET 0xA0000
++#define LNC_TOPAZ_SIZE 0x10000
++
++#define PSB_MMU_CACHED_MEMORY 0x0001 /* Bind to MMU only */
++#define PSB_MMU_RO_MEMORY 0x0002 /* MMU RO memory */
++#define PSB_MMU_WO_MEMORY 0x0004 /* MMU WO memory */
++
++/*
++ *PTE's and PDE's
++ */
++
++#define PSB_PDE_MASK 0x003FFFFF
++#define PSB_PDE_SHIFT 22
++#define PSB_PTE_SHIFT 12
++
++#define PSB_PTE_VALID 0x0001 /* PTE / PDE valid */
++#define PSB_PTE_WO 0x0002 /* Write only */
++#define PSB_PTE_RO 0x0004 /* Read only */
++#define PSB_PTE_CACHED 0x0008 /* CPU cache coherent */
++
++/*
++ *VDC registers and bits
++ */
++#define PSB_MSVDX_CLOCKGATING 0x2064
++#define PSB_TOPAZ_CLOCKGATING 0x2068
++#define PSB_HWSTAM 0x2098
++#define PSB_INSTPM 0x20C0
++#define _PSB_VSYNC_PIPEB_FLAG (1<<5)
++#define _PSB_VSYNC_PIPEA_FLAG (1<<7)
++#define _PSB_DPST_PIPEA_FLAG (1<<6)
++#define _PSB_DPST_PIPEB_FLAG (1<<4)
++#define _PSB_IRQ_SGX_FLAG (1<<18)
++#define _PSB_IRQ_MSVDX_FLAG (1<<19)
++#define _LNC_IRQ_TOPAZ_FLAG (1<<20)
++#define PSB_INT_IDENTITY_R 0x20A4
++#define PSB_INT_MASK_R 0x20A8
++#define PSB_INT_ENABLE_R 0x20A0
++
++#define _PSB_MMU_ER_MASK 0x0001FF00
++#define _PSB_MMU_ER_HOST (1 << 16)
++#define GPIOA 0x5010
++#define GPIOB 0x5014
++#define GPIOC 0x5018
++#define GPIOD 0x501c
++#define GPIOE 0x5020
++#define GPIOF 0x5024
++#define GPIOG 0x5028
++#define GPIOH 0x502c
++#define GPIO_CLOCK_DIR_MASK (1 << 0)
++#define GPIO_CLOCK_DIR_IN (0 << 1)
++#define GPIO_CLOCK_DIR_OUT (1 << 1)
++#define GPIO_CLOCK_VAL_MASK (1 << 2)
++#define GPIO_CLOCK_VAL_OUT (1 << 3)
++#define GPIO_CLOCK_VAL_IN (1 << 4)
++#define GPIO_CLOCK_PULLUP_DISABLE (1 << 5)
++#define GPIO_DATA_DIR_MASK (1 << 8)
++#define GPIO_DATA_DIR_IN (0 << 9)
++#define GPIO_DATA_DIR_OUT (1 << 9)
++#define GPIO_DATA_VAL_MASK (1 << 10)
++#define GPIO_DATA_VAL_OUT (1 << 11)
++#define GPIO_DATA_VAL_IN (1 << 12)
++#define GPIO_DATA_PULLUP_DISABLE (1 << 13)
++
++#define VCLK_DIVISOR_VGA0 0x6000
++#define VCLK_DIVISOR_VGA1 0x6004
++#define VCLK_POST_DIV 0x6010
++
++#define PSB_COMM_2D (PSB_ENGINE_2D << 4)
++#define PSB_COMM_3D (PSB_ENGINE_3D << 4)
++#define PSB_COMM_TA (PSB_ENGINE_TA << 4)
++#define PSB_COMM_HP (PSB_ENGINE_HP << 4)
++#define PSB_COMM_USER_IRQ (1024 >> 2)
++#define PSB_COMM_USER_IRQ_LOST (PSB_COMM_USER_IRQ + 1)
++#define PSB_COMM_FW (2048 >> 2)
++
++#define PSB_UIRQ_VISTEST 1
++#define PSB_UIRQ_OOM_REPLY 2
++#define PSB_UIRQ_FIRE_TA_REPLY 3
++#define PSB_UIRQ_FIRE_RASTER_REPLY 4
++
++#define PSB_2D_SIZE (256*1024*1024)
++#define PSB_MAX_RELOC_PAGES 1024
++
++#define PSB_LOW_REG_OFFS 0x0204
++#define PSB_HIGH_REG_OFFS 0x0600
++
++#define PSB_NUM_VBLANKS 2
++
++
++#define PSB_2D_SIZE (256*1024*1024)
++#define PSB_MAX_RELOC_PAGES 1024
++
++#define PSB_LOW_REG_OFFS 0x0204
++#define PSB_HIGH_REG_OFFS 0x0600
++
++#define PSB_NUM_VBLANKS 2
++#define PSB_WATCHDOG_DELAY (DRM_HZ * 2)
++#define PSB_LID_DELAY (DRM_HZ / 10)
++
++#define PSB_PWR_STATE_ON 1
++#define PSB_PWR_STATE_OFF 2
++
++#define PSB_PMPOLICY_NOPM 0
++#define PSB_PMPOLICY_POWERDOWN 2
++
++#define PSB_PMSTATE_POWERUP 0
++#define PSB_PMSTATE_POWERDOWN 2
++
++/*
++ *User options.
++ */
++
++struct drm_psb_uopt {
++ int pad; /*keep it here in case we use it in future*/
++};
++
++/**
++ *struct psb_context
++ *
++ *@buffers: array of pre-allocated validate buffers.
++ *@used_buffers: number of buffers in @buffers array currently in use.
++ *@validate_buffer: buffers validated from user-space.
++ *@kern_validate_buffers : buffers validated from kernel-space.
++ *@fence_flags : Fence flags to be used for fence creation.
++ *
++ *This structure is used during execbuf validation.
++ */
++
++struct psb_context {
++ struct psb_validate_buffer *buffers;
++ uint32_t used_buffers;
++ struct list_head validate_list;
++ struct list_head kern_validate_list;
++ uint32_t fence_types;
++ uint32_t val_seq;
++};
++
++struct psb_validate_buffer;
++
++struct psb_msvdx_cmd_queue {
++ struct list_head head;
++ void *cmd;
++ unsigned long cmd_size;
++ uint32_t sequence;
++};
++
++struct drm_psb_private {
++
++ /*
++ *TTM Glue.
++ */
++
++ struct drm_global_reference mem_global_ref;
++ int has_global;
++
++ struct drm_device *dev;
++ struct ttm_object_device *tdev;
++ struct ttm_fence_device fdev;
++ struct ttm_bo_device bdev;
++ struct ttm_lock ttm_lock;
++ struct vm_operations_struct *ttm_vm_ops;
++ int has_fence_device;
++ int has_bo_device;
++
++ unsigned long chipset;
++
++ struct drm_psb_dev_info_arg dev_info;
++ struct drm_psb_uopt uopt;
++
++ struct psb_gtt *pg;
++
++ /*GTT Memory manager*/
++ struct psb_gtt_mm *gtt_mm;
++
++ struct page *scratch_page;
++ uint32_t sequence[PSB_NUM_ENGINES];
++ uint32_t last_sequence[PSB_NUM_ENGINES];
++ uint32_t last_submitted_seq[PSB_NUM_ENGINES];
++
++ struct psb_mmu_driver *mmu;
++ struct psb_mmu_pd *pf_pd;
++
++ uint8_t *sgx_reg;
++ uint8_t *vdc_reg;
++ uint32_t gatt_free_offset;
++
++ /*
++ *MSVDX
++ */
++ uint8_t *msvdx_reg;
++ atomic_t msvdx_mmu_invaldc;
++ void *msvdx_private;
++
++ /*
++ *TOPAZ
++ */
++ uint8_t *topaz_reg;
++ void *topaz_private;
++ uint8_t topaz_disabled;
++ uint32_t video_device_fuse;
++ atomic_t topaz_mmu_invaldc;
++
++ /*
++ *Fencing / irq.
++ */
++
++ uint32_t vdc_irq_mask;
++ u32 pipestat[2];
++ bool vblanksEnabledForFlips;
++
++ spinlock_t irqmask_lock;
++ spinlock_t sequence_lock;
++
++ /*
++ *Modesetting
++ */
++ struct psb_intel_mode_device mode_dev;
++
++ struct drm_crtc *plane_to_crtc_mapping[2];
++ struct drm_crtc *pipe_to_crtc_mapping[2];
++
++ /*
++ * CI share buffer
++ */
++ unsigned int ci_region_start;
++ unsigned int ci_region_size;
++
++ /*
++ * RAR share buffer;
++ */
++ unsigned int rar_region_start;
++ unsigned int rar_region_size;
++
++ /*
++ *Memory managers
++ */
++
++ int have_camera;
++ int have_rar;
++ int have_tt;
++ int have_mem_mmu;
++ struct mutex temp_mem;
++
++ /*
++ *Relocation buffer mapping.
++ */
++
++ spinlock_t reloc_lock;
++ unsigned int rel_mapped_pages;
++ wait_queue_head_t rel_mapped_queue;
++
++ /*
++ *SAREA
++ */
++ struct drm_psb_sarea *sarea_priv;
++
++ /*
++ *OSPM info
++ */
++ uint32_t ospm_base;
++
++ /*
++ * Sizes info
++ */
++
++ struct drm_psb_sizes_arg sizes;
++
++ uint32_t fuse_reg_value;
++
++ /* vbt (gct) header information*/
++ struct mrst_vbt vbt_data;
++ /* info that is stored from the gct */
++ struct gct_ioctl_arg gct_data;
++
++ /*
++ *LVDS info
++ */
++ int backlight_duty_cycle; /* restore backlight to this value */
++ bool panel_wants_dither;
++ struct drm_display_mode *panel_fixed_mode;
++ struct drm_display_mode *lfp_lvds_vbt_mode;
++ struct drm_display_mode *sdvo_lvds_vbt_mode;
++
++ struct bdb_lvds_backlight *lvds_bl; /*LVDS backlight info from VBT*/
++ struct psb_intel_i2c_chan *lvds_i2c_bus;
++
++ /* Feature bits from the VBIOS*/
++ unsigned int int_tv_support:1;
++ unsigned int lvds_dither:1;
++ unsigned int lvds_vbt:1;
++ unsigned int int_crt_support:1;
++ unsigned int lvds_use_ssc:1;
++ int lvds_ssc_freq;
++
++/* MRST private date start */
++/*FIXME JLIU7 need to revisit */
++ bool sku_83;
++ bool sku_100;
++ bool sku_100L;
++ bool sku_bypass;
++ uint32_t iLVDS_enable;
++
++ /* pipe config register value */
++ uint32_t pipeconf;
++
++ /* plane control register value */
++ uint32_t dspcntr;
++
++/* MRST_DSI private date start */
++ /*
++ *MRST DSI info
++ */
++ /* The DSI device ready */
++ bool dsi_device_ready;
++
++ /* The DPI panel power on */
++ bool dpi_panel_on;
++
++ /* The DBI panel power on */
++ bool dbi_panel_on;
++
++ /* The DPI display */
++ bool dpi;
++
++ enum mipi_panel_type panel_make;
++
++ /* status */
++ uint32_t videoModeFormat:2;
++ uint32_t laneCount:3;
++ uint32_t status_reserved:27;
++
++ /* dual display - DPI & DBI */
++ bool dual_display;
++
++ /* HS or LP transmission */
++ bool lp_transmission;
++
++ /* configuration phase */
++ bool config_phase;
++
++ /* DSI clock */
++ uint32_t RRate;
++ uint32_t DDR_Clock;
++ uint32_t DDR_Clock_Calculated;
++ uint32_t ClockBits;
++
++ /* DBI Buffer pointer */
++ u8 *p_DBI_commandBuffer_orig;
++ u8 *p_DBI_commandBuffer;
++ uint32_t DBI_CB_pointer;
++ u8 *p_DBI_dataBuffer_orig;
++ u8 *p_DBI_dataBuffer;
++ uint32_t DBI_DB_pointer;
++
++ /* DPI panel spec */
++ uint32_t pixelClock;
++ uint32_t HsyncWidth;
++ uint32_t HbackPorch;
++ uint32_t HfrontPorch;
++ uint32_t HactiveArea;
++ uint32_t VsyncWidth;
++ uint32_t VbackPorch;
++ uint32_t VfrontPorch;
++ uint32_t VactiveArea;
++ uint32_t bpp:5;
++ uint32_t Reserved:27;
++
++ /* DBI panel spec */
++ uint32_t dbi_pixelClock;
++ uint32_t dbi_HsyncWidth;
++ uint32_t dbi_HbackPorch;
++ uint32_t dbi_HfrontPorch;
++ uint32_t dbi_HactiveArea;
++ uint32_t dbi_VsyncWidth;
++ uint32_t dbi_VbackPorch;
++ uint32_t dbi_VfrontPorch;
++ uint32_t dbi_VactiveArea;
++ uint32_t dbi_bpp:5;
++ uint32_t dbi_Reserved:27;
++
++/* MRST_DSI private date end */
++
++ /*
++ *Register state
++ */
++ uint32_t saveDSPACNTR;
++ uint32_t saveDSPBCNTR;
++ uint32_t savePIPEACONF;
++ uint32_t savePIPEBCONF;
++ uint32_t savePIPEASRC;
++ uint32_t savePIPEBSRC;
++ uint32_t saveFPA0;
++ uint32_t saveFPA1;
++ uint32_t saveDPLL_A;
++ uint32_t saveDPLL_A_MD;
++ uint32_t saveHTOTAL_A;
++ uint32_t saveHBLANK_A;
++ uint32_t saveHSYNC_A;
++ uint32_t saveVTOTAL_A;
++ uint32_t saveVBLANK_A;
++ uint32_t saveVSYNC_A;
++ uint32_t saveDSPASTRIDE;
++ uint32_t saveDSPASIZE;
++ uint32_t saveDSPAPOS;
++ uint32_t saveDSPABASE;
++ uint32_t saveDSPASURF;
++ uint32_t saveFPB0;
++ uint32_t saveFPB1;
++ uint32_t saveDPLL_B;
++ uint32_t saveDPLL_B_MD;
++ uint32_t saveHTOTAL_B;
++ uint32_t saveHBLANK_B;
++ uint32_t saveHSYNC_B;
++ uint32_t saveVTOTAL_B;
++ uint32_t saveVBLANK_B;
++ uint32_t saveVSYNC_B;
++ uint32_t saveDSPBSTRIDE;
++ uint32_t saveDSPBSIZE;
++ uint32_t saveDSPBPOS;
++ uint32_t saveDSPBBASE;
++ uint32_t saveDSPBSURF;
++ uint32_t saveVCLK_DIVISOR_VGA0;
++ uint32_t saveVCLK_DIVISOR_VGA1;
++ uint32_t saveVCLK_POST_DIV;
++ uint32_t saveVGACNTRL;
++ uint32_t saveADPA;
++ uint32_t saveLVDS;
++ uint32_t saveDVOA;
++ uint32_t saveDVOB;
++ uint32_t saveDVOC;
++ uint32_t savePP_ON;
++ uint32_t savePP_OFF;
++ uint32_t savePP_CONTROL;
++ uint32_t savePP_CYCLE;
++ uint32_t savePFIT_CONTROL;
++ uint32_t savePaletteA[256];
++ uint32_t savePaletteB[256];
++ uint32_t saveBLC_PWM_CTL2;
++ uint32_t saveBLC_PWM_CTL;
++ uint32_t saveCLOCKGATING;
++ uint32_t saveDSPARB;
++ uint32_t saveDSPATILEOFF;
++ uint32_t saveDSPBTILEOFF;
++ uint32_t saveDSPAADDR;
++ uint32_t saveDSPBADDR;
++ uint32_t savePFIT_AUTO_RATIOS;
++ uint32_t savePFIT_PGM_RATIOS;
++ uint32_t savePP_ON_DELAYS;
++ uint32_t savePP_OFF_DELAYS;
++ uint32_t savePP_DIVISOR;
++ uint32_t saveBCLRPAT_A;
++ uint32_t saveBCLRPAT_B;
++ uint32_t saveDSPALINOFF;
++ uint32_t saveDSPBLINOFF;
++ uint32_t savePERF_MODE;
++ uint32_t saveDSPFW1;
++ uint32_t saveDSPFW2;
++ uint32_t saveDSPFW3;
++ uint32_t saveDSPFW4;
++ uint32_t saveDSPFW5;
++ uint32_t saveDSPFW6;
++ uint32_t saveCHICKENBIT;
++ uint32_t saveDSPACURSOR_CTRL;
++ uint32_t saveDSPBCURSOR_CTRL;
++ uint32_t saveDSPACURSOR_BASE;
++ uint32_t saveDSPBCURSOR_BASE;
++ uint32_t saveDSPACURSOR_POS;
++ uint32_t saveDSPBCURSOR_POS;
++ uint32_t save_palette_a[256];
++ uint32_t save_palette_b[256];
++ uint32_t saveOV_OVADD;
++ uint32_t saveOV_OGAMC0;
++ uint32_t saveOV_OGAMC1;
++ uint32_t saveOV_OGAMC2;
++ uint32_t saveOV_OGAMC3;
++ uint32_t saveOV_OGAMC4;
++ uint32_t saveOV_OGAMC5;
++
++ /* DSI reg save */
++ uint32_t saveDEVICE_READY_REG;
++ uint32_t saveINTR_EN_REG;
++ uint32_t saveDSI_FUNC_PRG_REG;
++ uint32_t saveHS_TX_TIMEOUT_REG;
++ uint32_t saveLP_RX_TIMEOUT_REG;
++ uint32_t saveTURN_AROUND_TIMEOUT_REG;
++ uint32_t saveDEVICE_RESET_REG;
++ uint32_t saveDPI_RESOLUTION_REG;
++ uint32_t saveHORIZ_SYNC_PAD_COUNT_REG;
++ uint32_t saveHORIZ_BACK_PORCH_COUNT_REG;
++ uint32_t saveHORIZ_FRONT_PORCH_COUNT_REG;
++ uint32_t saveHORIZ_ACTIVE_AREA_COUNT_REG;
++ uint32_t saveVERT_SYNC_PAD_COUNT_REG;
++ uint32_t saveVERT_BACK_PORCH_COUNT_REG;
++ uint32_t saveVERT_FRONT_PORCH_COUNT_REG;
++ uint32_t saveHIGH_LOW_SWITCH_COUNT_REG;
++ uint32_t saveINIT_COUNT_REG;
++ uint32_t saveMAX_RET_PAK_REG;
++ uint32_t saveVIDEO_FMT_REG;
++ uint32_t saveEOT_DISABLE_REG;
++ uint32_t saveLP_BYTECLK_REG;
++ uint32_t saveHS_LS_DBI_ENABLE_REG;
++ uint32_t saveTXCLKESC_REG;
++ uint32_t saveDPHY_PARAM_REG;
++ uint32_t saveMIPI_CONTROL_REG;
++ uint32_t saveMIPI;
++ void (*init_drvIC)(struct drm_device *dev);
++
++ /* DPST Register Save */
++ uint32_t saveHISTOGRAM_INT_CONTROL_REG;
++ uint32_t saveHISTOGRAM_LOGIC_CONTROL_REG;
++
++ /*
++ *Scheduling.
++ */
++
++ struct mutex reset_mutex;
++ struct psb_scheduler scheduler;
++ struct mutex cmdbuf_mutex;
++ /*uint32_t ta_mem_pages;
++ struct psb_ta_mem *ta_mem;
++ int force_ta_mem_load;*/
++ atomic_t val_seq;
++
++ /*
++ *TODO: change this to be per drm-context.
++ */
++
++ struct psb_context context;
++
++ /*
++ * LID-Switch
++ */
++ spinlock_t lid_lock;
++ struct timer_list lid_timer;
++ struct psb_intel_opregion opregion;
++ u32 *lid_state;
++ u32 lid_last_state;
++
++ /*
++ *Watchdog
++ */
++
++ spinlock_t watchdog_lock;
++ struct timer_list watchdog_timer;
++ struct work_struct watchdog_wq;
++ struct work_struct msvdx_watchdog_wq;
++ struct work_struct topaz_watchdog_wq;
++ int timer_available;
++
++ uint32_t apm_reg;
++ uint16_t apm_base;
++#ifdef OSPM_STAT
++ unsigned char graphics_state;
++ unsigned long gfx_on_time;
++ unsigned long gfx_off_time;
++ unsigned long gfx_last_mode_change;
++ unsigned long gfx_on_cnt;
++ unsigned long gfx_off_cnt;
++#endif
++
++ /*to be removed later*/
++ /*int dri_page_flipping;
++ int current_page;
++ int pipe_active[2];
++ int saved_start[2];
++ int saved_offset[2];
++ int saved_stride[2];
++
++ int flip_start[2];
++ int flip_offset[2];
++ int flip_stride[2];*/
++
++
++ /*
++ * Used for modifying backlight from
++ * xrandr -- consider removing and using HAL instead
++ */
++ struct drm_property *backlight_property;
++ uint32_t blc_adj1;
++ uint32_t blc_adj2;
++
++ /*
++ * DPST and Hotplug state
++ */
++
++ struct dpst_state *psb_dpst_state;
++ struct hotplug_state *psb_hotplug_state;
++
++};
++
++struct psb_fpriv {
++ struct ttm_object_file *tfile;
++};
++
++struct psb_mmu_driver;
++
++extern int drm_crtc_probe_output_modes(struct drm_device *dev, int, int);
++extern int drm_pick_crtcs(struct drm_device *dev);
++
++
++static inline struct psb_fpriv *psb_fpriv(struct drm_file *file_priv)
++{
++ PVRSRV_FILE_PRIVATE_DATA *pvr_file_priv
++ = (PVRSRV_FILE_PRIVATE_DATA *)file_priv->driver_priv;
++ return (struct psb_fpriv *) pvr_file_priv->pPriv;
++}
++
++static inline struct drm_psb_private *psb_priv(struct drm_device *dev)
++{
++ return (struct drm_psb_private *) dev->dev_private;
++}
++
++/*
++ *TTM glue. psb_ttm_glue.c
++ */
++
++extern int psb_open(struct inode *inode, struct file *filp);
++extern int psb_release(struct inode *inode, struct file *filp);
++extern int psb_mmap(struct file *filp, struct vm_area_struct *vma);
++
++extern int psb_fence_signaled_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++extern int psb_verify_access(struct ttm_buffer_object *bo,
++ struct file *filp);
++extern ssize_t psb_ttm_read(struct file *filp, char __user *buf,
++ size_t count, loff_t *f_pos);
++extern ssize_t psb_ttm_write(struct file *filp, const char __user *buf,
++ size_t count, loff_t *f_pos);
++extern int psb_fence_finish_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++extern int psb_fence_unref_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++extern int psb_pl_waitidle_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++extern int psb_pl_setstatus_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++extern int psb_pl_synccpu_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++extern int psb_pl_unref_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++extern int psb_pl_reference_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++extern int psb_pl_create_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++extern int psb_extension_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++extern int psb_ttm_global_init(struct drm_psb_private *dev_priv);
++extern void psb_ttm_global_release(struct drm_psb_private *dev_priv);
++extern int psb_getpageaddrs_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++/*
++ *MMU stuff.
++ */
++
++extern struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers,
++ int trap_pagefaults,
++ int invalid_type,
++ struct drm_psb_private *dev_priv);
++extern void psb_mmu_driver_takedown(struct psb_mmu_driver *driver);
++extern struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver
++ *driver);
++extern void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd, uint32_t mmu_offset,
++ uint32_t gtt_start, uint32_t gtt_pages);
++extern struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
++ int trap_pagefaults,
++ int invalid_type);
++extern void psb_mmu_free_pagedir(struct psb_mmu_pd *pd);
++extern void psb_mmu_flush(struct psb_mmu_driver *driver);
++extern void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
++ unsigned long address,
++ uint32_t num_pages);
++extern int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd,
++ uint32_t start_pfn,
++ unsigned long address,
++ uint32_t num_pages, int type);
++extern int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
++ unsigned long *pfn);
++
++/*
++ *Enable / disable MMU for different requestors.
++ */
++
++
++extern void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context);
++extern int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
++ unsigned long address, uint32_t num_pages,
++ uint32_t desired_tile_stride,
++ uint32_t hw_tile_stride, int type);
++extern void psb_mmu_remove_pages(struct psb_mmu_pd *pd,
++ unsigned long address, uint32_t num_pages,
++ uint32_t desired_tile_stride,
++ uint32_t hw_tile_stride);
++/*
++ *psb_sgx.c
++ */
++
++
++
++extern int psb_cmdbuf_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++extern int psb_reg_submit(struct drm_psb_private *dev_priv,
++ uint32_t *regs, unsigned int cmds);
++
++
++extern void psb_fence_or_sync(struct drm_file *file_priv,
++ uint32_t engine,
++ uint32_t fence_types,
++ uint32_t fence_flags,
++ struct list_head *list,
++ struct psb_ttm_fence_rep *fence_arg,
++ struct ttm_fence_object **fence_p);
++extern int psb_validate_kernel_buffer(struct psb_context *context,
++ struct ttm_buffer_object *bo,
++ uint32_t fence_class,
++ uint64_t set_flags,
++ uint64_t clr_flags);
++
++
++/*
++ *psb_fence.c
++ */
++
++extern void psb_fence_handler(struct drm_device *dev, uint32_t class);
++
++extern int psb_fence_emit_sequence(struct ttm_fence_device *fdev,
++ uint32_t fence_class,
++ uint32_t flags, uint32_t *sequence,
++ unsigned long *timeout_jiffies);
++extern void psb_fence_error(struct drm_device *dev,
++ uint32_t class,
++ uint32_t sequence, uint32_t type, int error);
++extern int psb_ttm_fence_device_init(struct ttm_fence_device *fdev);
++
++/* MSVDX/Topaz stuff */
++extern int lnc_video_frameskip(struct drm_device *dev,
++ uint64_t user_pointer);
++extern int lnc_video_getparam(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++extern int psb_try_power_down_topaz(struct drm_device *dev);
++extern int psb_try_power_down_msvdx(struct drm_device *dev);
++
++
++/*
++ *psb_fb.c
++ */
++extern int psbfb_probed(struct drm_device *dev);
++extern int psbfb_remove(struct drm_device *dev,
++ struct drm_framebuffer *fb);
++extern int psbfb_kms_off_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++extern int psbfb_kms_on_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++
++/*
++ *psb_reset.c
++ */
++
++extern void psb_schedule_watchdog(struct drm_psb_private *dev_priv);
++extern void psb_watchdog_init(struct drm_psb_private *dev_priv);
++extern void psb_watchdog_takedown(struct drm_psb_private *dev_priv);
++extern void psb_lid_timer_init(struct drm_psb_private *dev_priv);
++extern void psb_lid_timer_takedown(struct drm_psb_private *dev_priv);
++extern void psb_print_pagefault(struct drm_psb_private *dev_priv);
++
++
++
++/* modesetting */
++extern void psb_modeset_init(struct drm_device *dev);
++extern void psb_modeset_cleanup(struct drm_device *dev);
++
++/* psb_bl.c */
++int psb_backlight_init(struct drm_device *dev);
++void psb_backlight_exit(void);
++int psb_set_brightness(struct backlight_device *bd);
++int psb_get_brightness(struct backlight_device *bd);
++
++/*
++ *Utilities
++ */
++#define DRM_DRIVER_PRIVATE_T struct drm_psb_private
++
++static inline u32 MSG_READ32(uint port, uint offset)
++{
++ int mcr = (0xD0<<24) | (port << 16) | (offset << 8);
++ outl(0x800000D0, 0xCF8);
++ outl(mcr, 0xCFC);
++ outl(0x800000D4, 0xCF8);
++ return inl(0xcfc);
++}
++static inline void MSG_WRITE32(uint port, uint offset, u32 value)
++{
++ int mcr = (0xE0<<24) | (port << 16) | (offset << 8) | 0xF0;
++ outl(0x800000D4, 0xCF8);
++ outl(value, 0xcfc);
++ outl(0x800000D0, 0xCF8);
++ outl(mcr, 0xCFC);
++}
++
++static inline uint32_t REGISTER_READ(struct drm_device *dev, uint32_t reg)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++
++ return ioread32(dev_priv->vdc_reg + (reg));
++}
++
++#define REG_READ(reg) REGISTER_READ(dev, (reg))
++static inline void REGISTER_WRITE(struct drm_device *dev, uint32_t reg,
++ uint32_t val)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++
++ iowrite32((val), dev_priv->vdc_reg + (reg));
++}
++
++#define REG_WRITE(reg, val) REGISTER_WRITE(dev, (reg), (val))
++
++static inline void REGISTER_WRITE16(struct drm_device *dev,
++ uint32_t reg, uint32_t val)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++
++ iowrite16((val), dev_priv->vdc_reg + (reg));
++}
++
++#define REG_WRITE16(reg, val) REGISTER_WRITE16(dev, (reg), (val))
++
++static inline void REGISTER_WRITE8(struct drm_device *dev,
++ uint32_t reg, uint32_t val)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++
++ iowrite8((val), dev_priv->vdc_reg + (reg));
++}
++
++#define REG_WRITE8(reg, val) REGISTER_WRITE8(dev, (reg), (val))
++
++#define PSB_ALIGN_TO(_val, _align) \
++ (((_val) + ((_align) - 1)) & ~((_align) - 1))
++#define PSB_WVDC32(_val, _offs) \
++ iowrite32(_val, dev_priv->vdc_reg + (_offs))
++#define PSB_RVDC32(_offs) \
++ ioread32(dev_priv->vdc_reg + (_offs))
++
++/* #define TRAP_SGX_PM_FAULT 1 */
++#ifdef TRAP_SGX_PM_FAULT
++#define PSB_RSGX32(_offs) \
++({ \
++ if (inl(dev_priv->apm_base + PSB_APM_STS) & 0x3) { \
++ printk(KERN_ERR "access sgx when it's off!! (READ) %s, %d\n", \
++ __FILE__, __LINE__); \
++ mdelay(1000); \
++ } \
++ ioread32(dev_priv->sgx_reg + (_offs)); \
++})
++#else
++#define PSB_RSGX32(_offs) \
++ ioread32(dev_priv->sgx_reg + (_offs))
++#endif
++
++#define PSB_WMSVDX32(_val, _offs) \
++ iowrite32(_val, dev_priv->msvdx_reg + (_offs))
++#define PSB_RMSVDX32(_offs) \
++ ioread32(dev_priv->msvdx_reg + (_offs))
++
++#define PSB_ALPL(_val, _base) \
++ (((_val) >> (_base ## _ALIGNSHIFT)) << (_base ## _SHIFT))
++#define PSB_ALPLM(_val, _base) \
++ ((((_val) >> (_base ## _ALIGNSHIFT)) << (_base ## _SHIFT)) & (_base ## _MASK))
++
++#define PSB_D_RENDER (1 << 16)
++
++#define PSB_D_GENERAL (1 << 0)
++#define PSB_D_INIT (1 << 1)
++#define PSB_D_IRQ (1 << 2)
++#define PSB_D_FW (1 << 3)
++#define PSB_D_PERF (1 << 4)
++#define PSB_D_TMP (1 << 5)
++#define PSB_D_PM (1 << 6)
++
++extern int drm_psb_debug;
++extern int drm_psb_no_fb;
++extern int drm_idle_check_interval;
++extern int drm_topaz_sbuswa;
++
++#define PSB_DEBUG_FW(_fmt, _arg...) \
++ PSB_DEBUG(PSB_D_FW, _fmt, ##_arg)
++#define PSB_DEBUG_GENERAL(_fmt, _arg...) \
++ PSB_DEBUG(PSB_D_GENERAL, _fmt, ##_arg)
++#define PSB_DEBUG_INIT(_fmt, _arg...) \
++ PSB_DEBUG(PSB_D_INIT, _fmt, ##_arg)
++#define PSB_DEBUG_IRQ(_fmt, _arg...) \
++ PSB_DEBUG(PSB_D_IRQ, _fmt, ##_arg)
++#define PSB_DEBUG_RENDER(_fmt, _arg...) \
++ PSB_DEBUG(PSB_D_RENDER, _fmt, ##_arg)
++#define PSB_DEBUG_PERF(_fmt, _arg...) \
++ PSB_DEBUG(PSB_D_PERF, _fmt, ##_arg)
++#define PSB_DEBUG_TMP(_fmt, _arg...) \
++ PSB_DEBUG(PSB_D_TMP, _fmt, ##_arg)
++#define PSB_DEBUG_PM(_fmt, _arg...) \
++ PSB_DEBUG(PSB_D_PM, _fmt, ##_arg)
++
++#if DRM_DEBUG_CODE
++#define PSB_DEBUG(_flag, _fmt, _arg...) \
++ do { \
++ if (unlikely((_flag) & drm_psb_debug)) \
++ printk(KERN_DEBUG \
++ "[psb:0x%02x:%s] " _fmt , _flag, \
++ __func__ , ##_arg); \
++ } while (0)
++#else
++#define PSB_DEBUG(_fmt, _arg...) do { } while (0)
++#endif
++
++#define IS_POULSBO(dev) (((dev)->pci_device == 0x8108) || \
++ ((dev)->pci_device == 0x8109))
++
++#define IS_MRST(dev) (((dev)->pci_device & 0xfffc) == 0x4100)
++
++#endif
+diff --git a/drivers/gpu/drm/mrst/drv/psb_fb.c b/drivers/gpu/drm/mrst/drv/psb_fb.c
+new file mode 100644
+index 0000000..addec23
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/psb_fb.c
+@@ -0,0 +1,1817 @@
++/**************************************************************************
++ * Copyright (c) 2007, Intel Corporation.
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/string.h>
++#include <linux/mm.h>
++#include <linux/tty.h>
++#include <linux/slab.h>
++#include <linux/delay.h>
++#include <linux/fb.h>
++#include <linux/init.h>
++#include <linux/console.h>
++
++#include <drm/drmP.h>
++#include <drm/drm.h>
++#include <drm/drm_crtc.h>
++
++#include "psb_drv.h"
++#include "psb_intel_reg.h"
++#include "psb_intel_drv.h"
++#include "ttm/ttm_userobj_api.h"
++#include "psb_fb.h"
++#include "psb_sgx.h"
++#include "psb_pvr_glue.h"
++
++static int fill_fb_bitfield(struct fb_var_screeninfo *var, int depth)
++{
++ switch (depth) {
++ case 8:
++ var->red.offset = 0;
++ var->green.offset = 0;
++ var->blue.offset = 0;
++ var->red.length = 8;
++ var->green.length = 8;
++ var->blue.length = 8;
++ var->transp.length = 0;
++ var->transp.offset = 0;
++ break;
++ case 15:
++ var->red.offset = 10;
++ var->green.offset = 5;
++ var->blue.offset = 0;
++ var->red.length = 5;
++ var->green.length = 5;
++ var->blue.length = 5;
++ var->transp.length = 1;
++ var->transp.offset = 15;
++ break;
++ case 16:
++ var->red.offset = 11;
++ var->green.offset = 5;
++ var->blue.offset = 0;
++ var->red.length = 5;
++ var->green.length = 6;
++ var->blue.length = 5;
++ var->transp.length = 0;
++ var->transp.offset = 0;
++ break;
++ case 24:
++ var->red.offset = 16;
++ var->green.offset = 8;
++ var->blue.offset = 0;
++ var->red.length = 8;
++ var->green.length = 8;
++ var->blue.length = 8;
++ var->transp.length = 0;
++ var->transp.offset = 0;
++ break;
++ case 32:
++ var->red.offset = 16;
++ var->green.offset = 8;
++ var->blue.offset = 0;
++ var->red.length = 8;
++ var->green.length = 8;
++ var->blue.length = 8;
++ var->transp.length = 8;
++ var->transp.offset = 24;
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++static void psb_user_framebuffer_destroy(struct drm_framebuffer *fb);
++static int psb_user_framebuffer_create_handle(struct drm_framebuffer *fb,
++ struct drm_file *file_priv,
++ unsigned int *handle);
++
++static const struct drm_framebuffer_funcs psb_fb_funcs = {
++ .destroy = psb_user_framebuffer_destroy,
++ .create_handle = psb_user_framebuffer_create_handle,
++};
++
++struct psbfb_par {
++ struct drm_device *dev;
++ struct psb_framebuffer *psbfb;
++
++ int dpms_state;
++
++ int crtc_count;
++ /* crtc currently bound to this */
++ uint32_t crtc_ids[2];
++};
++
++#define CMAP_TOHW(_val, _width) ((((_val) << (_width)) + 0x7FFF - (_val)) >> 16)
++
++void *psbfb_vdc_reg(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv;
++ dev_priv = (struct drm_psb_private *) dev->dev_private;
++ return dev_priv->vdc_reg;
++}
++EXPORT_SYMBOL(psbfb_vdc_reg);
++
++static int psbfb_setcolreg(unsigned regno, unsigned red, unsigned green,
++ unsigned blue, unsigned transp,
++ struct fb_info *info)
++{
++ struct psbfb_par *par = info->par;
++ struct drm_framebuffer *fb = &par->psbfb->base;
++ uint32_t v;
++
++ if (!fb)
++ return -ENOMEM;
++
++ if (regno > 255)
++ return 1;
++
++#if 0 /* JB: not drop, check that this works */
++ if (fb->bits_per_pixel == 8) {
++ list_for_each_entry(crtc, &dev->mode_config.crtc_list,
++ head) {
++ for (i = 0; i < par->crtc_count; i++)
++ if (crtc->base.id == par->crtc_ids[i])
++ break;
++
++ if (i == par->crtc_count)
++ continue;
++
++ if (crtc->funcs->gamma_set)
++ crtc->funcs->gamma_set(crtc, red, green,
++ blue, regno);
++ }
++ return 0;
++ }
++#endif
++
++ red = CMAP_TOHW(red, info->var.red.length);
++ blue = CMAP_TOHW(blue, info->var.blue.length);
++ green = CMAP_TOHW(green, info->var.green.length);
++ transp = CMAP_TOHW(transp, info->var.transp.length);
++
++ v = (red << info->var.red.offset) |
++ (green << info->var.green.offset) |
++ (blue << info->var.blue.offset) |
++ (transp << info->var.transp.offset);
++
++ if (regno < 16) {
++ switch (fb->bits_per_pixel) {
++ case 16:
++ ((uint32_t *) info->pseudo_palette)[regno] = v;
++ break;
++ case 24:
++ case 32:
++ ((uint32_t *) info->pseudo_palette)[regno] = v;
++ break;
++ }
++ }
++
++ return 0;
++}
++
++static struct drm_display_mode *psbfb_find_first_mode(struct
++ fb_var_screeninfo
++ *var,
++ struct fb_info *info,
++ struct drm_crtc
++ *crtc)
++{
++ struct psbfb_par *par = info->par;
++ struct drm_device *dev = par->dev;
++ struct drm_display_mode *drm_mode;
++ struct drm_display_mode *preferred_mode = NULL;
++ struct drm_display_mode *last_mode = NULL;
++ struct drm_connector *connector;
++ int found;
++
++ found = 0;
++ list_for_each_entry(connector, &dev->mode_config.connector_list,
++ head) {
++ if (connector->encoder && connector->encoder->crtc == crtc) {
++ found = 1;
++ break;
++ }
++ }
++
++ /* found no connector, bail */
++ if (!found)
++ return NULL;
++
++ found = 0;
++ list_for_each_entry(drm_mode, &connector->modes, head) {
++ if (drm_mode->hdisplay == var->xres &&
++ drm_mode->vdisplay == var->yres
++ && drm_mode->clock != 0) {
++ found = 1;
++ last_mode = drm_mode;
++ if (IS_POULSBO(dev)) {
++ if (last_mode->type & DRM_MODE_TYPE_PREFERRED)
++ preferred_mode = last_mode;
++ }
++ }
++ }
++
++ /* No mode matching mode found */
++ if (!found)
++ return NULL;
++
++ if (IS_POULSBO(dev)) {
++ if (preferred_mode)
++ return preferred_mode;
++ else
++ return last_mode;
++ } else {
++ return last_mode;
++ }
++}
++
++static int psbfb_check_var(struct fb_var_screeninfo *var,
++ struct fb_info *info)
++{
++ struct psbfb_par *par = info->par;
++ struct psb_framebuffer *psbfb = par->psbfb;
++ struct drm_device *dev = par->dev;
++ int ret;
++ int depth;
++ int pitch;
++ int bpp = var->bits_per_pixel;
++
++ if (!psbfb)
++ return -ENOMEM;
++
++ if (!var->pixclock)
++ return -EINVAL;
++
++ /* don't support virtuals for now */
++ if (var->xres_virtual > var->xres)
++ return -EINVAL;
++
++ if (var->yres_virtual > var->yres)
++ return -EINVAL;
++
++ switch (bpp) {
++#if 0 /* JB: for now only support true color */
++ case 8:
++ depth = 8;
++ break;
++#endif
++ case 16:
++ depth = (var->green.length == 6) ? 16 : 15;
++ break;
++ case 24: /* assume this is 32bpp / depth 24 */
++ bpp = 32;
++ /* fallthrough */
++ case 32:
++ depth = (var->transp.length > 0) ? 32 : 24;
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ pitch = ((var->xres * ((bpp + 1) / 8)) + 0x3f) & ~0x3f;
++
++ /* Check that we can resize */
++ if ((pitch * var->yres) > psbfb->size) {
++#if 1
++ /* Need to resize the fb object.
++ * But the generic fbdev code doesn't really understand
++ * that we can do this. So disable for now.
++ */
++ DRM_INFO("Can't support requested size, too big!\n");
++ return -EINVAL;
++#endif
++ }
++
++ ret = fill_fb_bitfield(var, depth);
++ if (ret)
++ return ret;
++
++#if 1
++ /* Here we walk the output mode list and look for modes. If we haven't
++ * got it, then bail. Not very nice, so this is disabled.
++ * In the set_par code, we create our mode based on the incoming
++ * parameters. Nicer, but may not be desired by some.
++ */
++ {
++ struct drm_crtc *crtc;
++ int i;
++
++ list_for_each_entry(crtc, &dev->mode_config.crtc_list,
++ head) {
++ struct psb_intel_crtc *psb_intel_crtc =
++ to_psb_intel_crtc(crtc);
++
++ for (i = 0; i < par->crtc_count; i++)
++ if (crtc->base.id == par->crtc_ids[i])
++ break;
++
++ if (i == par->crtc_count)
++ continue;
++
++ if (psb_intel_crtc->mode_set.num_connectors == 0)
++ continue;
++
++ if (!psbfb_find_first_mode(&info->var, info, crtc))
++ return -EINVAL;
++ }
++ }
++#else
++ (void) i;
++ (void) dev; /* silence warnings */
++ (void) crtc;
++ (void) drm_mode;
++ (void) connector;
++#endif
++
++ return 0;
++}
++
++/* this will let fbcon do the mode init */
++static int psbfb_set_par(struct fb_info *info)
++{
++ struct psbfb_par *par = info->par;
++ struct psb_framebuffer *psbfb = par->psbfb;
++ struct drm_framebuffer *fb = &psbfb->base;
++ struct drm_device *dev = par->dev;
++ struct fb_var_screeninfo *var = &info->var;
++ /* struct drm_psb_private *dev_priv = dev->dev_private; */
++ struct drm_display_mode *drm_mode;
++ int pitch;
++ int depth;
++ int bpp = var->bits_per_pixel;
++
++ if (!fb)
++ return -ENOMEM;
++
++ switch (bpp) {
++ case 8:
++ depth = 8;
++ break;
++ case 16:
++ depth = (var->green.length == 6) ? 16 : 15;
++ break;
++ case 24: /* assume this is 32bpp / depth 24 */
++ bpp = 32;
++ /* fallthrough */
++ case 32:
++ depth = (var->transp.length > 0) ? 32 : 24;
++ break;
++ default:
++ DRM_ERROR("Illegal BPP\n");
++ return -EINVAL;
++ }
++
++ pitch = ((var->xres * ((bpp + 1) / 8)) + 0x3f) & ~0x3f;
++
++ if ((pitch * var->yres) > (psbfb->size)) {
++#if 1
++ /* Need to resize the fb object.
++ * But the generic fbdev code doesn't really understand
++ * that we can do this. So disable for now.
++ */
++ DRM_INFO("Can't support requested size, too big!\n");
++ return -EINVAL;
++#endif
++ }
++
++ psbfb->offset = 0;
++ fb->width = var->xres;
++ fb->height = var->yres;
++ fb->bits_per_pixel = bpp;
++ fb->pitch = pitch;
++ fb->depth = depth;
++
++ info->fix.line_length = psbfb->base.pitch;
++ info->fix.visual =
++ (psbfb->base.depth ==
++ 8) ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_DIRECTCOLOR;
++
++ /* some fbdev's apps don't want these to change */
++ info->fix.smem_start = dev->mode_config.fb_base + psbfb->offset;
++
++#if 0
++ /* relates to resize - disable */
++ info->fix.smem_len = info->fix.line_length * var->yres;
++ info->screen_size = info->fix.smem_len; /* ??? */
++#endif
++
++ /* Should we walk the output's modelist or just create our own ???
++ * For now, we create and destroy a mode based on the incoming
++ * parameters. But there's commented out code below which scans
++ * the output list too.
++ */
++#if 1
++ /* This code is now in the for loop futher down. */
++#endif
++
++ {
++ struct drm_crtc *crtc;
++ int ret;
++ int i;
++
++ list_for_each_entry(crtc, &dev->mode_config.crtc_list,
++ head) {
++ struct psb_intel_crtc *psb_intel_crtc =
++ to_psb_intel_crtc(crtc);
++
++ for (i = 0; i < par->crtc_count; i++)
++ if (crtc->base.id == par->crtc_ids[i])
++ break;
++
++ if (i == par->crtc_count)
++ continue;
++
++ if (psb_intel_crtc->mode_set.num_connectors == 0)
++ continue;
++
++#if 1
++ drm_mode =
++ psbfb_find_first_mode(&info->var, info, crtc);
++ if (!drm_mode)
++ DRM_ERROR("No matching mode found\n");
++ psb_intel_crtc->mode_set.mode = drm_mode;
++#endif
++
++#if 0 /* FIXME: TH */
++ if (crtc->fb == psb_intel_crtc->mode_set.fb) {
++#endif
++ DRM_DEBUG
++ ("setting mode on crtc %p with id %u\n",
++ crtc, crtc->base.id);
++ ret =
++ crtc->funcs->
++ set_config(&psb_intel_crtc->mode_set);
++ if (ret) {
++ DRM_ERROR("Failed setting mode\n");
++ return ret;
++ }
++#if 0
++ }
++#endif
++ }
++ DRM_DEBUG("Set par returned OK.\n");
++ return 0;
++ }
++
++ return 0;
++}
++#if 0
++static int psbfb_2d_submit(struct drm_psb_private *dev_priv, uint32_t *cmdbuf,
++ unsigned size)
++{
++ int ret = 0;
++ int i;
++ unsigned submit_size;
++
++ while (size > 0) {
++ submit_size = (size < 0x60) ? size : 0x60;
++ size -= submit_size;
++ ret = psb_2d_wait_available(dev_priv, submit_size);
++ if (ret)
++ return ret;
++
++ submit_size <<= 2;
++ for (i = 0; i < submit_size; i += 4)
++ PSB_WSGX32(*cmdbuf++, PSB_SGX_2D_SLAVE_PORT + i);
++
++ (void)PSB_RSGX32(PSB_SGX_2D_SLAVE_PORT + i - 4);
++ }
++ return 0;
++}
++
++static int psb_accel_2d_fillrect(struct drm_psb_private *dev_priv,
++ uint32_t dst_offset, uint32_t dst_stride,
++ uint32_t dst_format, uint16_t dst_x,
++ uint16_t dst_y, uint16_t size_x,
++ uint16_t size_y, uint32_t fill)
++{
++ uint32_t buffer[10];
++ uint32_t *buf;
++
++ buf = buffer;
++
++ *buf++ = PSB_2D_FENCE_BH;
++
++ *buf++ =
++ PSB_2D_DST_SURF_BH | dst_format | (dst_stride <<
++ PSB_2D_DST_STRIDE_SHIFT);
++ *buf++ = dst_offset;
++
++ *buf++ =
++ PSB_2D_BLIT_BH |
++ PSB_2D_ROT_NONE |
++ PSB_2D_COPYORDER_TL2BR |
++ PSB_2D_DSTCK_DISABLE |
++ PSB_2D_SRCCK_DISABLE | PSB_2D_USE_FILL | PSB_2D_ROP3_PATCOPY;
++
++ *buf++ = fill << PSB_2D_FILLCOLOUR_SHIFT;
++ *buf++ =
++ (dst_x << PSB_2D_DST_XSTART_SHIFT) | (dst_y <<
++ PSB_2D_DST_YSTART_SHIFT);
++ *buf++ =
++ (size_x << PSB_2D_DST_XSIZE_SHIFT) | (size_y <<
++ PSB_2D_DST_YSIZE_SHIFT);
++ *buf++ = PSB_2D_FLUSH_BH;
++
++ return psbfb_2d_submit(dev_priv, buffer, buf - buffer);
++}
++
++static void psbfb_fillrect_accel(struct fb_info *info,
++ const struct fb_fillrect *r)
++{
++ struct psbfb_par *par = info->par;
++ struct psb_framebuffer *psbfb = par->psbfb;
++ struct drm_framebuffer *fb = &psbfb->base;
++ struct drm_psb_private *dev_priv = par->dev->dev_private;
++ uint32_t offset;
++ uint32_t stride;
++ uint32_t format;
++
++ if (!fb)
++ return;
++
++ offset = psbfb->offset;
++ stride = fb->pitch;
++
++ switch (fb->depth) {
++ case 8:
++ format = PSB_2D_DST_332RGB;
++ break;
++ case 15:
++ format = PSB_2D_DST_555RGB;
++ break;
++ case 16:
++ format = PSB_2D_DST_565RGB;
++ break;
++ case 24:
++ case 32:
++ /* this is wrong but since we don't do blending its okay */
++ format = PSB_2D_DST_8888ARGB;
++ break;
++ default:
++ /* software fallback */
++ cfb_fillrect(info, r);
++ return;
++ }
++
++ psb_accel_2d_fillrect(dev_priv,
++ offset, stride, format,
++ r->dx, r->dy, r->width, r->height, r->color);
++}
++
++static void psbfb_fillrect(struct fb_info *info,
++ const struct fb_fillrect *rect)
++{
++ struct psbfb_par *par = info->par;
++ struct drm_device *dev = par->dev;
++ struct drm_psb_private *dev_priv = dev->dev_private;
++
++ if (unlikely(info->state != FBINFO_STATE_RUNNING))
++ return;
++
++ if (info->flags & FBINFO_HWACCEL_DISABLED)
++ return cfb_fillrect(info, rect);
++ /*
++ * psbfb_fillrect is atomic so need to do instantaneous check of
++ * power on
++ */
++ if (powermgmt_is_suspend_in_progress(PSB_GRAPHICS_ISLAND) ||
++ powermgmt_is_resume_in_progress(PSB_GRAPHICS_ISLAND) ||
++ !powermgmt_is_hw_on(dev->pdev, PSB_GRAPHICS_ISLAND))
++ return cfb_fillrect(info, rect);
++
++ if (psb_2d_trylock(dev_priv)) {
++ psbfb_fillrect_accel(info, rect);
++ psb_2d_unlock(dev_priv);
++ schedule_delayed_work(&dev_priv->scheduler.wq, 1);
++ } else
++ cfb_fillrect(info, rect);
++}
++
++uint32_t psb_accel_2d_copy_direction(int xdir, int ydir)
++{
++ if (xdir < 0)
++ return (ydir <
++ 0) ? PSB_2D_COPYORDER_BR2TL :
++ PSB_2D_COPYORDER_TR2BL;
++ else
++ return (ydir <
++ 0) ? PSB_2D_COPYORDER_BL2TR :
++ PSB_2D_COPYORDER_TL2BR;
++}
++
++/*
++ * @srcOffset in bytes
++ * @srcStride in bytes
++ * @srcFormat psb 2D format defines
++ * @dstOffset in bytes
++ * @dstStride in bytes
++ * @dstFormat psb 2D format defines
++ * @srcX offset in pixels
++ * @srcY offset in pixels
++ * @dstX offset in pixels
++ * @dstY offset in pixels
++ * @sizeX of the copied area
++ * @sizeY of the copied area
++ */
++static int psb_accel_2d_copy(struct drm_psb_private *dev_priv,
++ uint32_t src_offset, uint32_t src_stride,
++ uint32_t src_format, uint32_t dst_offset,
++ uint32_t dst_stride, uint32_t dst_format,
++ uint16_t src_x, uint16_t src_y,
++ uint16_t dst_x, uint16_t dst_y,
++ uint16_t size_x, uint16_t size_y)
++{
++ uint32_t blit_cmd;
++ uint32_t buffer[10];
++ uint32_t *buf;
++ uint32_t direction;
++
++ buf = buffer;
++
++ direction =
++ psb_accel_2d_copy_direction(src_x - dst_x, src_y - dst_y);
++
++ if (direction == PSB_2D_COPYORDER_BR2TL ||
++ direction == PSB_2D_COPYORDER_TR2BL) {
++ src_x += size_x - 1;
++ dst_x += size_x - 1;
++ }
++ if (direction == PSB_2D_COPYORDER_BR2TL ||
++ direction == PSB_2D_COPYORDER_BL2TR) {
++ src_y += size_y - 1;
++ dst_y += size_y - 1;
++ }
++
++ blit_cmd =
++ PSB_2D_BLIT_BH |
++ PSB_2D_ROT_NONE |
++ PSB_2D_DSTCK_DISABLE |
++ PSB_2D_SRCCK_DISABLE |
++ PSB_2D_USE_PAT | PSB_2D_ROP3_SRCCOPY | direction;
++
++ *buf++ = PSB_2D_FENCE_BH;
++ *buf++ =
++ PSB_2D_DST_SURF_BH | dst_format | (dst_stride <<
++ PSB_2D_DST_STRIDE_SHIFT);
++ *buf++ = dst_offset;
++ *buf++ =
++ PSB_2D_SRC_SURF_BH | src_format | (src_stride <<
++ PSB_2D_SRC_STRIDE_SHIFT);
++ *buf++ = src_offset;
++ *buf++ =
++ PSB_2D_SRC_OFF_BH | (src_x << PSB_2D_SRCOFF_XSTART_SHIFT) |
++ (src_y << PSB_2D_SRCOFF_YSTART_SHIFT);
++ *buf++ = blit_cmd;
++ *buf++ =
++ (dst_x << PSB_2D_DST_XSTART_SHIFT) | (dst_y <<
++ PSB_2D_DST_YSTART_SHIFT);
++ *buf++ =
++ (size_x << PSB_2D_DST_XSIZE_SHIFT) | (size_y <<
++ PSB_2D_DST_YSIZE_SHIFT);
++ *buf++ = PSB_2D_FLUSH_BH;
++
++ return psbfb_2d_submit(dev_priv, buffer, buf - buffer);
++}
++
++static void psbfb_copyarea_accel(struct fb_info *info,
++ const struct fb_copyarea *a)
++{
++ struct psbfb_par *par = info->par;
++ struct psb_framebuffer *psbfb = par->psbfb;
++ struct drm_framebuffer *fb = &psbfb->base;
++ struct drm_psb_private *dev_priv = par->dev->dev_private;
++ uint32_t offset;
++ uint32_t stride;
++ uint32_t src_format;
++ uint32_t dst_format;
++
++ if (!fb)
++ return;
++
++ offset = psbfb->offset;
++ stride = fb->pitch;
++
++ switch (fb->depth) {
++ case 8:
++ src_format = PSB_2D_SRC_332RGB;
++ dst_format = PSB_2D_DST_332RGB;
++ break;
++ case 15:
++ src_format = PSB_2D_SRC_555RGB;
++ dst_format = PSB_2D_DST_555RGB;
++ break;
++ case 16:
++ src_format = PSB_2D_SRC_565RGB;
++ dst_format = PSB_2D_DST_565RGB;
++ break;
++ case 24:
++ case 32:
++ /* this is wrong but since we don't do blending its okay */
++ src_format = PSB_2D_SRC_8888ARGB;
++ dst_format = PSB_2D_DST_8888ARGB;
++ break;
++ default:
++ /* software fallback */
++ cfb_copyarea(info, a);
++ return;
++ }
++
++ psb_accel_2d_copy(dev_priv,
++ offset, stride, src_format,
++ offset, stride, dst_format,
++ a->sx, a->sy, a->dx, a->dy, a->width, a->height);
++}
++
++static void psbfb_copyarea(struct fb_info *info,
++ const struct fb_copyarea *region)
++{
++ struct psbfb_par *par = info->par;
++ struct drm_device *dev = par->dev;
++ struct drm_psb_private *dev_priv = dev->dev_private;
++
++ if (unlikely(info->state != FBINFO_STATE_RUNNING))
++ return;
++
++ if (info->flags & FBINFO_HWACCEL_DISABLED)
++ return cfb_copyarea(info, region);
++ /*
++ * psbfb_copyarea is atomic so need to do instantaneous check of
++ * power on
++ */
++ if (powermgmt_is_suspend_in_progress(PSB_GRAPHICS_ISLAND) ||
++ powermgmt_is_resume_in_progress(PSB_GRAPHICS_ISLAND) ||
++ !powermgmt_is_hw_on(dev->pdev, PSB_GRAPHICS_ISLAND))
++ return cfb_copyarea(info, region);
++
++ if (psb_2d_trylock(dev_priv)) {
++ psbfb_copyarea_accel(info, region);
++ psb_2d_unlock(dev_priv);
++ schedule_delayed_work(&dev_priv->scheduler.wq, 1);
++ } else
++ cfb_copyarea(info, region);
++}
++#endif
++void psbfb_imageblit(struct fb_info *info, const struct fb_image *image)
++{
++ if (unlikely(info->state != FBINFO_STATE_RUNNING))
++ return;
++
++ cfb_imageblit(info, image);
++}
++
++static void psbfb_onoff(struct fb_info *info, int dpms_mode)
++{
++ struct psbfb_par *par = info->par;
++ struct drm_device *dev = par->dev;
++ struct drm_crtc *crtc;
++ struct drm_encoder *encoder;
++ int i;
++
++ /*
++ * For each CRTC in this fb, find all associated encoders
++ * and turn them off, then turn off the CRTC.
++ */
++ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
++ struct drm_crtc_helper_funcs *crtc_funcs =
++ crtc->helper_private;
++
++ for (i = 0; i < par->crtc_count; i++)
++ if (crtc->base.id == par->crtc_ids[i])
++ break;
++
++ if (i == par->crtc_count)
++ continue;
++
++ if (dpms_mode == DRM_MODE_DPMS_ON)
++ crtc_funcs->dpms(crtc, dpms_mode);
++
++ /* Found a CRTC on this fb, now find encoders */
++ list_for_each_entry(encoder,
++ &dev->mode_config.encoder_list, head) {
++ if (encoder->crtc == crtc) {
++ struct drm_encoder_helper_funcs
++ *encoder_funcs;
++ encoder_funcs = encoder->helper_private;
++ encoder_funcs->dpms(encoder, dpms_mode);
++ }
++ }
++
++ if (dpms_mode == DRM_MODE_DPMS_OFF)
++ crtc_funcs->dpms(crtc, dpms_mode);
++ }
++}
++
++static int psbfb_blank(int blank_mode, struct fb_info *info)
++{
++ struct psbfb_par *par = info->par;
++
++ par->dpms_state = blank_mode;
++ PSB_DEBUG_PM("psbfb_blank \n");
++ switch (blank_mode) {
++ case FB_BLANK_UNBLANK:
++ psbfb_onoff(info, DRM_MODE_DPMS_ON);
++ break;
++ case FB_BLANK_NORMAL:
++ psbfb_onoff(info, DRM_MODE_DPMS_STANDBY);
++ break;
++ case FB_BLANK_HSYNC_SUSPEND:
++ psbfb_onoff(info, DRM_MODE_DPMS_STANDBY);
++ break;
++ case FB_BLANK_VSYNC_SUSPEND:
++ psbfb_onoff(info, DRM_MODE_DPMS_SUSPEND);
++ break;
++ case FB_BLANK_POWERDOWN:
++ psbfb_onoff(info, DRM_MODE_DPMS_OFF);
++ break;
++ }
++
++ return 0;
++}
++
++
++static int psbfb_kms_off(struct drm_device *dev, int suspend)
++{
++ struct drm_framebuffer *fb = 0;
++ DRM_DEBUG("psbfb_kms_off_ioctl\n");
++
++ mutex_lock(&dev->mode_config.mutex);
++ list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
++ struct fb_info *info = fb->fbdev;
++
++ if (suspend) {
++ fb_set_suspend(info, 1);
++ psbfb_blank(FB_BLANK_POWERDOWN, info);
++ }
++ }
++ mutex_unlock(&dev->mode_config.mutex);
++ return 0;
++}
++
++int psbfb_kms_off_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ int ret;
++
++ if (drm_psb_no_fb)
++ return 0;
++ acquire_console_sem();
++ ret = psbfb_kms_off(dev, 0);
++ release_console_sem();
++
++ return ret;
++}
++
++static int psbfb_kms_on(struct drm_device *dev, int resume)
++{
++ struct drm_framebuffer *fb = 0;
++
++ DRM_DEBUG("psbfb_kms_on_ioctl\n");
++
++ mutex_lock(&dev->mode_config.mutex);
++ list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
++ struct fb_info *info = fb->fbdev;
++
++ if (resume) {
++ fb_set_suspend(info, 0);
++ psbfb_blank(FB_BLANK_UNBLANK, info);
++ }
++
++ }
++ mutex_unlock(&dev->mode_config.mutex);
++
++ return 0;
++}
++
++int psbfb_kms_on_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ int ret;
++
++ if (drm_psb_no_fb)
++ return 0;
++ acquire_console_sem();
++ ret = psbfb_kms_on(dev, 0);
++ release_console_sem();
++ drm_helper_disable_unused_functions(dev);
++ return ret;
++}
++
++void psbfb_suspend(struct drm_device *dev)
++{
++ acquire_console_sem();
++ psbfb_kms_off(dev, 1);
++ release_console_sem();
++}
++
++void psbfb_resume(struct drm_device *dev)
++{
++ acquire_console_sem();
++ psbfb_kms_on(dev, 1);
++ release_console_sem();
++ drm_helper_disable_unused_functions(dev);
++}
++
++static int psbfb_vm_fault(struct vm_area_struct * vma, struct vm_fault * vmf)
++{
++ int page_num = 0;
++ int i;
++ unsigned long address = 0;
++ int ret;
++ unsigned long pfn;
++ struct psb_framebuffer *psbfb = (struct psb_framebuffer *)vma->vm_private_data;
++ struct drm_device * dev = psbfb->base.dev;
++ struct drm_psb_private * dev_priv = (struct drm_psb_private *)dev->dev_private;
++ struct psb_gtt *pg = dev_priv->pg;
++ unsigned long phys_addr = (unsigned long)pg->stolen_base;;
++
++ page_num = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
++
++ address = (unsigned long)vmf->virtual_address;
++
++ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
++
++ for(i=0; i<page_num; i++) {
++ pfn = (phys_addr >> PAGE_SHIFT); //phys_to_pfn(phys_addr);
++
++ ret = vm_insert_mixed(vma, address, pfn);
++ if(unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
++ break;
++ else if(unlikely(ret != 0)) {
++ ret = (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
++ return ret;
++ }
++
++ address += PAGE_SIZE;
++ phys_addr += PAGE_SIZE;
++ }
++
++ return VM_FAULT_NOPAGE;
++}
++
++static void psbfb_vm_open(struct vm_area_struct * vma)
++{
++ DRM_DEBUG("vm_open\n");
++}
++
++static void psbfb_vm_close(struct vm_area_struct * vma)
++{
++ DRM_DEBUG("vm_close\n");
++}
++
++static struct vm_operations_struct psbfb_vm_ops = {
++ .fault = psbfb_vm_fault,
++ .open = psbfb_vm_open,
++ .close = psbfb_vm_close
++};
++
++static int psbfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
++{
++ struct psbfb_par *par = info->par;
++ struct psb_framebuffer *psbfb = par->psbfb;
++ char * fb_screen_base = NULL;
++ struct drm_device * dev = psbfb->base.dev;
++ struct drm_psb_private * dev_priv = (struct drm_psb_private *)dev->dev_private;
++ struct psb_gtt *pg = dev_priv->pg;
++
++ if (vma->vm_pgoff != 0)
++ return -EINVAL;
++ if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
++ return -EINVAL;
++
++ if (!psbfb->addr_space)
++ psbfb->addr_space = vma->vm_file->f_mapping;
++
++ fb_screen_base = (char *)info->screen_base;
++
++ DRM_DEBUG("vm_pgoff 0x%lx, screen base %p vram_addr %p\n", vma->vm_pgoff, fb_screen_base, pg->vram_addr);
++
++ /*if using stolen memory, */
++ if(fb_screen_base == pg->vram_addr) {
++ vma->vm_ops = &psbfb_vm_ops;
++ vma->vm_private_data = (void *)psbfb;
++ vma->vm_flags |= VM_RESERVED | VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
++ } else {
++ /*using IMG meminfo, can I use pvrmmap to map it?*/
++
++
++ }
++
++ return 0;
++}
++
++
++static struct fb_ops psbfb_ops = {
++ .owner = THIS_MODULE,
++ .fb_check_var = psbfb_check_var,
++ .fb_set_par = psbfb_set_par,
++ .fb_setcolreg = psbfb_setcolreg,
++ .fb_fillrect = cfb_fillrect,
++ .fb_copyarea = cfb_copyarea,
++ .fb_imageblit = cfb_imageblit,
++ .fb_mmap = psbfb_mmap,
++ .fb_blank = psbfb_blank,
++};
++
++static struct drm_mode_set panic_mode;
++
++int psbfb_panic(struct notifier_block *n, unsigned long ununsed,
++ void *panic_str)
++{
++ DRM_ERROR("panic occurred, switching back to text console\n");
++ drm_crtc_helper_set_config(&panic_mode);
++
++ return 0;
++}
++EXPORT_SYMBOL(psbfb_panic);
++
++static struct notifier_block paniced = {
++ .notifier_call = psbfb_panic,
++};
++
++
++static struct drm_framebuffer *psb_framebuffer_create
++ (struct drm_device *dev, struct drm_mode_fb_cmd *r,
++ void *mm_private)
++{
++ struct psb_framebuffer *fb;
++ int ret;
++
++ fb = kzalloc(sizeof(*fb), GFP_KERNEL);
++ if (!fb)
++ return NULL;
++
++ ret = drm_framebuffer_init(dev, &fb->base, &psb_fb_funcs);
++
++ if (ret)
++ goto err;
++
++ drm_helper_mode_fill_fb_struct(&fb->base, r);
++
++ fb->pvrBO = mm_private;
++
++ return &fb->base;
++
++err:
++ kfree(fb);
++ return NULL;
++}
++
++static struct drm_framebuffer *psb_user_framebuffer_create
++ (struct drm_device *dev, struct drm_file *filp,
++ struct drm_mode_fb_cmd *r)
++{
++ struct psb_framebuffer *psbfb;
++ struct drm_framebuffer *fb;
++ struct fb_info *info;
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo = IMG_NULL;
++ IMG_HANDLE hKernelMemInfo = (IMG_HANDLE)r->handle;
++ struct drm_psb_private *dev_priv
++ = (struct drm_psb_private *) dev->dev_private;
++ struct psb_gtt *pg = dev_priv->pg;
++ int ret;
++ uint32_t offset;
++ uint64_t size;
++
++ ret = psb_get_meminfo_by_handle(hKernelMemInfo, &psKernelMemInfo);
++ if (ret) {
++ DRM_ERROR("Cannot get meminfo for handle %lx\n",
++ (IMG_UINT32)hKernelMemInfo);
++
++ return NULL;
++ }
++
++ DRM_DEBUG("Got Kernel MemInfo for handle %lx\n",
++ (IMG_UINT32)hKernelMemInfo);
++
++ /* JB: TODO not drop, make smarter */
++ size = psKernelMemInfo->ui32AllocSize;
++ if (size < r->height * r->pitch)
++ return NULL;
++
++ /* JB: TODO not drop, refcount buffer */
++ /* return psb_framebuffer_create(dev, r, bo); */
++
++ fb = psb_framebuffer_create(dev, r, (void *)psKernelMemInfo);
++ if (!fb) {
++ DRM_ERROR("failed to allocate fb.\n");
++ return NULL;
++ }
++
++ psbfb = to_psb_fb(fb);
++ psbfb->size = size;
++ psbfb->hKernelMemInfo = hKernelMemInfo;
++
++ DRM_DEBUG("Mapping to gtt..., KernelMemInfo %p\n", psKernelMemInfo);
++
++ /*if not VRAM, map it into tt aperture*/
++ if (psKernelMemInfo->pvLinAddrKM != pg->vram_addr) {
++ ret = psb_gtt_map_meminfo(dev, hKernelMemInfo, &offset);
++ if (ret) {
++ DRM_ERROR("map meminfo for %lx failed\n",
++ (IMG_UINT32)hKernelMemInfo);
++ return NULL;
++ }
++ psbfb->offset = (offset << PAGE_SHIFT);
++ } else {
++ psbfb->offset = 0;
++ }
++
++ info = framebuffer_alloc(sizeof(struct psbfb_par), &dev->pdev->dev);
++ if (!info)
++ return NULL;
++
++ strcpy(info->fix.id, "psbfb");
++ info->fix.type = FB_TYPE_PACKED_PIXELS;
++ info->fix.visual = FB_VISUAL_TRUECOLOR;
++ info->fix.type_aux = 0;
++ info->fix.xpanstep = 1; /* doing it in hw */
++ info->fix.ypanstep = 1; /* doing it in hw */
++ info->fix.ywrapstep = 0;
++ info->fix.accel = FB_ACCEL_I830;
++ info->fix.type_aux = 0;
++
++ info->flags = FBINFO_DEFAULT;
++
++ info->fbops = &psbfb_ops;
++
++ info->fix.line_length = fb->pitch;
++ info->fix.smem_start = dev->mode_config.fb_base;
++ info->fix.smem_len = size;
++
++ info->flags = FBINFO_DEFAULT;
++
++ info->screen_base = psKernelMemInfo->pvLinAddrKM;
++ info->screen_size = size;
++
++ /* it is called for kms flip, the back buffer has been rendered,
++ * then we should not clear it*/
++#if 0
++ if (is_iomem)
++ memset_io(info->screen_base, 0, size);
++ else
++ memset(info->screen_base, 0, size);
++#endif
++ info->pseudo_palette = fb->pseudo_palette;
++ info->var.xres_virtual = fb->width;
++ info->var.yres_virtual = fb->height;
++ info->var.bits_per_pixel = fb->bits_per_pixel;
++ info->var.xoffset = 0;
++ info->var.yoffset = 0;
++ info->var.activate = FB_ACTIVATE_NOW;
++ info->var.height = -1;
++ info->var.width = -1;
++
++ info->var.xres = r->width;
++ info->var.yres = r->height;
++
++ info->fix.mmio_start = pci_resource_start(dev->pdev, 0);
++ info->fix.mmio_len = pci_resource_len(dev->pdev, 0);
++
++ info->pixmap.size = 64 * 1024;
++ info->pixmap.buf_align = 8;
++ info->pixmap.access_align = 32;
++ info->pixmap.flags = FB_PIXMAP_SYSTEM;
++ info->pixmap.scan_align = 1;
++
++ fill_fb_bitfield(&info->var, fb->depth);
++
++ register_framebuffer(info);
++
++ fb->fbdev = info;
++
++ return fb;
++}
++
++int psbfb_create(struct drm_device *dev, uint32_t fb_width,
++ uint32_t fb_height, uint32_t surface_width,
++ uint32_t surface_height, struct psb_framebuffer **psbfb_p)
++{
++ struct fb_info *info;
++ struct psbfb_par *par;
++ struct drm_framebuffer *fb;
++ struct psb_framebuffer *psbfb;
++ struct drm_mode_fb_cmd mode_cmd;
++ struct device *device = &dev->pdev->dev;
++ struct drm_psb_private *dev_priv
++ = (struct drm_psb_private *)dev->dev_private;
++ struct psb_gtt *pg = dev_priv->pg;
++ int size, aligned_size, ret;
++
++ mode_cmd.width = surface_width; /* crtc->desired_mode->hdisplay; */
++ mode_cmd.height = surface_height; /* crtc->desired_mode->vdisplay; */
++
++ mode_cmd.bpp = 32;
++ //HW requires pitch to be 64 byte aligned
++ mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 1) / 8), 64);
++ mode_cmd.depth = 24;
++
++ size = mode_cmd.pitch * mode_cmd.height;
++ aligned_size = ALIGN(size, PAGE_SIZE);
++
++ mutex_lock(&dev->struct_mutex);
++ fb = psb_framebuffer_create(dev, &mode_cmd, NULL);
++ if (!fb) {
++
++ DRM_ERROR("failed to allocate fb.\n");
++ ret = -ENOMEM;
++ goto out_err0;
++ }
++ psbfb = to_psb_fb(fb);
++ psbfb->size = size;
++
++ list_add(&fb->filp_head, &dev->mode_config.fb_kernel_list);
++ info = framebuffer_alloc(sizeof(struct psbfb_par), device);
++ if (!info) {
++ ret = -ENOMEM;
++ goto out_err1;
++ }
++
++ par = info->par;
++ par->psbfb = psbfb;
++
++ strcpy(info->fix.id, "psbfb");
++ info->fix.type = FB_TYPE_PACKED_PIXELS;
++ info->fix.visual = FB_VISUAL_TRUECOLOR;
++ info->fix.type_aux = 0;
++ info->fix.xpanstep = 1; /* doing it in hw */
++ info->fix.ypanstep = 1; /* doing it in hw */
++ info->fix.ywrapstep = 0;
++ info->fix.accel = FB_ACCEL_I830;
++ info->fix.type_aux = 0;
++
++ info->flags = FBINFO_DEFAULT;
++
++ info->fbops = &psbfb_ops;
++
++ info->fix.line_length = fb->pitch;
++ info->fix.smem_start = dev->mode_config.fb_base;
++ info->fix.smem_len = size;
++ info->flags = FBINFO_DEFAULT;
++ info->screen_base = (char *)pg->vram_addr;
++ info->screen_size = size;
++ memset(info->screen_base, 0, size);
++
++ info->pseudo_palette = fb->pseudo_palette;
++ info->var.xres_virtual = fb->width;
++ info->var.yres_virtual = fb->height;
++ info->var.bits_per_pixel = fb->bits_per_pixel;
++ info->var.xoffset = 0;
++ info->var.yoffset = 0;
++ info->var.activate = FB_ACTIVATE_NOW;
++ info->var.height = -1;
++ info->var.width = -1;
++
++ info->var.xres = fb_width;
++ info->var.yres = fb_height;
++
++ info->fix.mmio_start = pci_resource_start(dev->pdev, 0);
++ info->fix.mmio_len = pci_resource_len(dev->pdev, 0);
++
++ info->pixmap.size = 64 * 1024;
++ info->pixmap.buf_align = 8;
++ info->pixmap.access_align = 32;
++ info->pixmap.flags = FB_PIXMAP_SYSTEM;
++ info->pixmap.scan_align = 1;
++
++ DRM_DEBUG("fb depth is %d\n", fb->depth);
++ DRM_DEBUG(" pitch is %d\n", fb->pitch);
++ fill_fb_bitfield(&info->var, fb->depth);
++
++ fb->fbdev = info;
++
++ par->dev = dev;
++
++ /* To allow resizing without swapping buffers */
++ printk(KERN_INFO"allocated %dx%d fb\n",
++ psbfb->base.width,
++ psbfb->base.height);
++
++ if (psbfb_p)
++ *psbfb_p = psbfb;
++
++ mutex_unlock(&dev->struct_mutex);
++
++ return 0;
++out_err1:
++ fb->funcs->destroy(fb);
++out_err0:
++ mutex_unlock(&dev->struct_mutex);
++ return ret;
++}
++
++static int psbfb_multi_fb_probe_crtc(struct drm_device *dev,
++ struct drm_crtc *crtc)
++{
++ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++ struct drm_framebuffer *fb = crtc->fb;
++ struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
++ struct drm_connector *connector;
++ struct fb_info *info;
++ struct psbfb_par *par;
++ struct drm_mode_set *modeset;
++ unsigned int width, height;
++ int new_fb = 0;
++ int ret, i, conn_count;
++
++ if (!drm_helper_crtc_in_use(crtc))
++ return 0;
++
++ if (!crtc->desired_mode)
++ return 0;
++
++ width = crtc->desired_mode->hdisplay;
++ height = crtc->desired_mode->vdisplay;
++
++ /* is there an fb bound to this crtc already */
++ if (!psb_intel_crtc->mode_set.fb) {
++ ret =
++ psbfb_create(dev, width, height, width, height,
++ &psbfb);
++ if (ret)
++ return -EINVAL;
++ new_fb = 1;
++ } else {
++ fb = psb_intel_crtc->mode_set.fb;
++ if ((fb->width < width) || (fb->height < height))
++ return -EINVAL;
++ }
++
++ info = fb->fbdev;
++ par = info->par;
++
++ modeset = &psb_intel_crtc->mode_set;
++ modeset->fb = fb;
++ conn_count = 0;
++ list_for_each_entry(connector, &dev->mode_config.connector_list,
++ head) {
++ if (connector->encoder)
++ if (connector->encoder->crtc == modeset->crtc) {
++ modeset->connectors[conn_count] =
++ connector;
++ conn_count++;
++ if (conn_count > INTELFB_CONN_LIMIT)
++ BUG();
++ }
++ }
++
++ for (i = conn_count; i < INTELFB_CONN_LIMIT; i++)
++ modeset->connectors[i] = NULL;
++
++ par->crtc_ids[0] = crtc->base.id;
++
++ modeset->num_connectors = conn_count;
++ if (modeset->mode != modeset->crtc->desired_mode)
++ modeset->mode = modeset->crtc->desired_mode;
++
++ par->crtc_count = 1;
++
++ if (new_fb) {
++ info->var.pixclock = -1;
++ if (register_framebuffer(info) < 0)
++ return -EINVAL;
++ } else
++ psbfb_set_par(info);
++
++ printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
++ info->fix.id);
++
++ /* Switch back to kernel console on panic */
++ panic_mode = *modeset;
++ atomic_notifier_chain_register(&panic_notifier_list, &paniced);
++ printk(KERN_INFO "registered panic notifier\n");
++
++ return 0;
++}
++
++static int psbfb_multi_fb_probe(struct drm_device *dev)
++{
++
++ struct drm_crtc *crtc;
++ int ret = 0;
++
++ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
++ ret = psbfb_multi_fb_probe_crtc(dev, crtc);
++ if (ret)
++ return ret;
++ }
++ return ret;
++}
++
++static int psbfb_single_fb_probe(struct drm_device *dev)
++{
++ struct drm_crtc *crtc;
++ struct drm_connector *connector;
++ unsigned int fb_width = (unsigned) -1, fb_height = (unsigned) -1;
++ unsigned int surface_width = 0, surface_height = 0;
++ int new_fb = 0;
++ int crtc_count = 0;
++ int ret, i, conn_count = 0;
++ struct fb_info *info;
++ struct psbfb_par *par;
++ struct drm_mode_set *modeset = NULL;
++ struct drm_framebuffer *fb = NULL;
++ struct psb_framebuffer *psbfb = NULL;
++
++ /* first up get a count of crtcs now in use and
++ * new min/maxes width/heights */
++ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
++ if (drm_helper_crtc_in_use(crtc)) {
++ if (crtc->desired_mode) {
++ fb = crtc->fb;
++ if (crtc->desired_mode->hdisplay <
++ fb_width)
++ fb_width =
++ crtc->desired_mode->hdisplay;
++
++ if (crtc->desired_mode->vdisplay <
++ fb_height)
++ fb_height =
++ crtc->desired_mode->vdisplay;
++
++ if (crtc->desired_mode->hdisplay >
++ surface_width)
++ surface_width =
++ crtc->desired_mode->hdisplay;
++
++ if (crtc->desired_mode->vdisplay >
++ surface_height)
++ surface_height =
++ crtc->desired_mode->vdisplay;
++
++ }
++ crtc_count++;
++ }
++ }
++
++ if (crtc_count == 0 || fb_width == -1 || fb_height == -1) {
++ /* hmm everyone went away - assume VGA cable just fell out
++ and will come back later. */
++ return 0;
++ }
++
++ /* do we have an fb already? */
++ if (list_empty(&dev->mode_config.fb_kernel_list)) {
++ /* create an fb if we don't have one */
++ ret =
++ psbfb_create(dev, fb_width, fb_height, surface_width,
++ surface_height, &psbfb);
++ if (ret)
++ return -EINVAL;
++ new_fb = 1;
++ fb = &psbfb->base;
++ } else {
++ fb = list_first_entry(&dev->mode_config.fb_kernel_list,
++ struct drm_framebuffer, filp_head);
++
++ /* if someone hotplugs something bigger than we have already
++ * allocated, we are pwned. As really we can't resize an
++ * fbdev that is in the wild currently due to fbdev not really
++ * being designed for the lower layers moving stuff around
++ * under it. - so in the grand style of things - punt. */
++ if ((fb->width < surface_width)
++ || (fb->height < surface_height)) {
++ DRM_ERROR
++ ("Framebuffer not large enough to scale"
++ " console onto.\n");
++ return -EINVAL;
++ }
++ }
++
++ info = fb->fbdev;
++ par = info->par;
++
++ crtc_count = 0;
++ /* okay we need to setup new connector sets in the crtcs */
++ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
++ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++ modeset = &psb_intel_crtc->mode_set;
++ modeset->fb = fb;
++ conn_count = 0;
++ list_for_each_entry(connector,
++ &dev->mode_config.connector_list,
++ head) {
++ if (connector->encoder)
++ if (connector->encoder->crtc ==
++ modeset->crtc) {
++ modeset->connectors[conn_count] =
++ connector;
++ conn_count++;
++ if (conn_count >
++ INTELFB_CONN_LIMIT)
++ BUG();
++ }
++ }
++
++ for (i = conn_count; i < INTELFB_CONN_LIMIT; i++)
++ modeset->connectors[i] = NULL;
++
++ par->crtc_ids[crtc_count++] = crtc->base.id;
++
++ modeset->num_connectors = conn_count;
++ if (modeset->mode != modeset->crtc->desired_mode)
++ modeset->mode = modeset->crtc->desired_mode;
++ }
++ par->crtc_count = crtc_count;
++
++ if (new_fb) {
++ info->var.pixclock = -1;
++ if (register_framebuffer(info) < 0)
++ return -EINVAL;
++ } else
++ psbfb_set_par(info);
++
++ printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
++ info->fix.id);
++
++ /* Switch back to kernel console on panic */
++ panic_mode = *modeset;
++ atomic_notifier_chain_register(&panic_notifier_list, &paniced);
++ printk(KERN_INFO "registered panic notifier\n");
++
++ return 0;
++}
++
++int psbfb_probe(struct drm_device *dev)
++{
++ int ret = 0;
++
++ DRM_DEBUG("\n");
++
++ /* something has changed in the lower levels of hell - deal with it
++ here */
++
++ /* two modes : a) 1 fb to rule all crtcs.
++ b) one fb per crtc.
++ two actions 1) new connected device
++ 2) device removed.
++ case a/1 : if the fb surface isn't big enough -
++ resize the surface fb.
++ if the fb size isn't big enough - resize fb into surface.
++ if everything big enough configure the new crtc/etc.
++ case a/2 : undo the configuration
++ possibly resize down the fb to fit the new configuration.
++ case b/1 : see if it is on a new crtc - setup a new fb and add it.
++ case b/2 : teardown the new fb.
++ */
++
++ /* mode a first */
++ /* search for an fb */
++ if (0 /*i915_fbpercrtc == 1 */)
++ ret = psbfb_multi_fb_probe(dev);
++ else
++ ret = psbfb_single_fb_probe(dev);
++
++ return ret;
++}
++EXPORT_SYMBOL(psbfb_probe);
++
++int psbfb_remove(struct drm_device *dev, struct drm_framebuffer *fb)
++{
++ struct fb_info *info;
++
++ if (drm_psb_no_fb)
++ return 0;
++
++ info = fb->fbdev;
++
++ if (info) {
++ unregister_framebuffer(info);
++ framebuffer_release(info);
++ }
++
++ atomic_notifier_chain_unregister(&panic_notifier_list, &paniced);
++ memset(&panic_mode, 0, sizeof(struct drm_mode_set));
++ return 0;
++}
++EXPORT_SYMBOL(psbfb_remove);
++
++static int psb_user_framebuffer_create_handle(struct drm_framebuffer *fb,
++ struct drm_file *file_priv,
++ unsigned int *handle)
++{
++ /* JB: TODO currently we can't go from a bo to a handle with ttm */
++ (void) file_priv;
++ *handle = 0;
++ return 0;
++}
++
++static void psb_user_framebuffer_destroy(struct drm_framebuffer *fb)
++{
++ struct drm_device *dev = fb->dev;
++ struct psb_framebuffer *psbfb = to_psb_fb(fb);
++
++ /*ummap gtt pages*/
++ psb_gtt_unmap_meminfo(dev, psbfb->hKernelMemInfo);
++
++ if (fb->fbdev)
++ psbfb_remove(dev, fb);
++
++ /* JB: TODO not drop, refcount buffer */
++ drm_framebuffer_cleanup(fb);
++
++ kfree(fb);
++}
++
++static const struct drm_mode_config_funcs psb_mode_funcs = {
++ .fb_create = psb_user_framebuffer_create,
++ .fb_changed = psbfb_probe,
++};
++
++static int psb_create_backlight_property(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv
++ = (struct drm_psb_private *) dev->dev_private;
++ struct drm_property *backlight;
++
++ if (dev_priv->backlight_property)
++ return 0;
++
++ backlight = drm_property_create(dev,
++ DRM_MODE_PROP_RANGE,
++ "backlight",
++ 2);
++ backlight->values[0] = 0;
++ backlight->values[1] = 100;
++
++ dev_priv->backlight_property = backlight;
++
++ return 0;
++}
++
++static void psb_setup_outputs(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++ struct drm_connector *connector;
++
++ drm_mode_create_scaling_mode_property(dev);
++
++ psb_create_backlight_property(dev);
++
++ if (IS_MRST(dev)) {
++ if (dev_priv->iLVDS_enable)
++ /* Set up integrated LVDS for MRST */
++ mrst_lvds_init(dev, &dev_priv->mode_dev);
++ else {
++ /* Set up integrated MIPI for MRST */
++ mrst_dsi_init(dev, &dev_priv->mode_dev);
++ }
++ } else {
++ psb_intel_lvds_init(dev, &dev_priv->mode_dev);
++ psb_intel_sdvo_init(dev, SDVOB);
++ }
++
++ list_for_each_entry(connector, &dev->mode_config.connector_list,
++ head) {
++ struct psb_intel_output *psb_intel_output =
++ to_psb_intel_output(connector);
++ struct drm_encoder *encoder = &psb_intel_output->enc;
++ int crtc_mask = 0, clone_mask = 0;
++
++ /* valid crtcs */
++ switch (psb_intel_output->type) {
++ case INTEL_OUTPUT_SDVO:
++ crtc_mask = ((1 << 0) | (1 << 1));
++ clone_mask = (1 << INTEL_OUTPUT_SDVO);
++ break;
++ case INTEL_OUTPUT_LVDS:
++ if (IS_MRST(dev))
++ crtc_mask = (1 << 0);
++ else
++ crtc_mask = (1 << 1);
++
++ clone_mask = (1 << INTEL_OUTPUT_LVDS);
++ break;
++ case INTEL_OUTPUT_MIPI:
++ crtc_mask = (1 << 0);
++ clone_mask = (1 << INTEL_OUTPUT_MIPI);
++ break;
++ }
++ encoder->possible_crtcs = crtc_mask;
++ encoder->possible_clones =
++ psb_intel_connector_clones(dev, clone_mask);
++ }
++}
++
++static void *psb_bo_from_handle(struct drm_device *dev,
++ struct drm_file *file_priv,
++ unsigned int handle)
++{
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo = IMG_NULL;
++ IMG_HANDLE hKernelMemInfo = (IMG_HANDLE)handle;
++ int ret;
++
++ ret = psb_get_meminfo_by_handle(hKernelMemInfo, &psKernelMemInfo);
++ if (ret) {
++ DRM_ERROR("Cannot get meminfo for handle %lx\n",
++ (IMG_UINT32)hKernelMemInfo);
++ return NULL;
++ }
++
++ return (void *)psKernelMemInfo;
++}
++
++static size_t psb_bo_size(struct drm_device *dev, void *bof)
++{
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo = (PVRSRV_KERNEL_MEM_INFO *)bof;
++ return (size_t)psKernelMemInfo->ui32AllocSize;
++}
++
++static size_t psb_bo_offset(struct drm_device *dev, void *bof)
++{
++ struct psb_framebuffer *psbfb
++ = (struct psb_framebuffer *)bof;
++
++ return (size_t)psbfb->offset;
++}
++
++static int psb_bo_pin_for_scanout(struct drm_device *dev, void *bo)
++{
++#if 0 /* JB: Not used for the drop */
++ struct ttm_buffer_object *bo = bof;
++ We should do things like check if
++ the buffer is in a scanout : able
++ place.And make sure that its pinned.
++#endif
++ return 0;
++ }
++
++ static int psb_bo_unpin_for_scanout(struct drm_device *dev,
++ void *bo) {
++#if 0 /* JB: Not used for the drop */
++ struct ttm_buffer_object *bo = bof;
++#endif
++ return 0;
++ }
++
++ void psb_modeset_init(struct drm_device *dev)
++ {
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++ struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
++ int i;
++ int num_pipe;
++
++ /* Init mm functions */
++ mode_dev->bo_from_handle = psb_bo_from_handle;
++ mode_dev->bo_size = psb_bo_size;
++ mode_dev->bo_offset = psb_bo_offset;
++ mode_dev->bo_pin_for_scanout = psb_bo_pin_for_scanout;
++ mode_dev->bo_unpin_for_scanout = psb_bo_unpin_for_scanout;
++
++ drm_mode_config_init(dev);
++
++ dev->mode_config.min_width = 0;
++ dev->mode_config.min_height = 0;
++
++ dev->mode_config.funcs = (void *) &psb_mode_funcs;
++
++ dev->mode_config.max_width = 2048;
++ dev->mode_config.max_height = 2048;
++
++ /* set memory base */
++ /* MRST and PSB should use BAR 2*/
++ dev->mode_config.fb_base =
++ pci_resource_start(dev->pdev, 2);
++
++ if (IS_MRST(dev))
++ num_pipe = 1;
++ else
++ num_pipe = 2;
++
++
++ for (i = 0; i < num_pipe; i++)
++ psb_intel_crtc_init(dev, i, mode_dev);
++
++ psb_setup_outputs(dev);
++
++ /* setup fbs */
++ /* drm_initial_config(dev); */
++ }
++
++ void psb_modeset_cleanup(struct drm_device *dev)
++ {
++ drm_mode_config_cleanup(dev);
++ }
+diff --git a/drivers/gpu/drm/mrst/drv/psb_fb.h b/drivers/gpu/drm/mrst/drv/psb_fb.h
+new file mode 100644
+index 0000000..1986eca
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/psb_fb.h
+@@ -0,0 +1,49 @@
++/*
++ * Copyright (c) 2008, Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors:
++ * Eric Anholt <eric@anholt.net>
++ *
++ */
++
++#ifndef _PSB_FB_H_
++#define _PSB_FB_H_
++
++#include <drm/drmP.h>
++#include "psb_drv.h"
++
++/*IMG Headers*/
++#include "servicesint.h"
++
++struct psb_framebuffer {
++ struct drm_framebuffer base;
++ struct address_space *addr_space;
++ struct ttm_buffer_object *bo;
++ /* struct ttm_bo_kmap_obj kmap; */
++ PVRSRV_KERNEL_MEM_INFO *pvrBO;
++ IMG_HANDLE hKernelMemInfo;
++ uint32_t size;
++ uint32_t offset;
++};
++
++#define to_psb_fb(x) container_of(x, struct psb_framebuffer, base)
++
++
++extern int psb_intel_connector_clones(struct drm_device *dev, int type_mask);
++
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/drv/psb_fence.c b/drivers/gpu/drm/mrst/drv/psb_fence.c
+new file mode 100644
+index 0000000..b630fc2
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/psb_fence.c
+@@ -0,0 +1,158 @@
++/*
++ * Copyright (c) 2007, Intel Corporation.
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ *
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ */
++
++#include <drm/drmP.h>
++#include "psb_drv.h"
++#include "psb_msvdx.h"
++#include "lnc_topaz.h"
++
++static void psb_fence_poll(struct ttm_fence_device *fdev,
++ uint32_t fence_class, uint32_t waiting_types)
++{
++ struct drm_psb_private *dev_priv =
++ container_of(fdev, struct drm_psb_private, fdev);
++ struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
++ struct topaz_private *topaz_priv = dev_priv->topaz_private;
++ uint32_t sequence = 0;
++
++ if (unlikely(!dev_priv))
++ return;
++
++ if (waiting_types) {
++ switch (fence_class) {
++ case PSB_ENGINE_VIDEO:
++ sequence = msvdx_priv->msvdx_current_sequence;
++ break;
++ case LNC_ENGINE_ENCODE:
++ sequence = *((uint32_t *)topaz_priv->topaz_sync_addr);
++ break;
++ default:
++ break;
++ }
++
++ ttm_fence_handler(fdev, fence_class, sequence,
++ _PSB_FENCE_TYPE_EXE, 0);
++
++ }
++}
++
++void psb_fence_error(struct drm_device *dev,
++ uint32_t fence_class,
++ uint32_t sequence, uint32_t type, int error)
++{
++ struct drm_psb_private *dev_priv = psb_priv(dev);
++ struct ttm_fence_device *fdev = &dev_priv->fdev;
++ unsigned long irq_flags;
++ struct ttm_fence_class_manager *fc =
++ &fdev->fence_class[fence_class];
++
++ BUG_ON(fence_class >= PSB_NUM_ENGINES);
++ write_lock_irqsave(&fc->lock, irq_flags);
++ ttm_fence_handler(fdev, fence_class, sequence, type, error);
++ write_unlock_irqrestore(&fc->lock, irq_flags);
++}
++
++int psb_fence_emit_sequence(struct ttm_fence_device *fdev,
++ uint32_t fence_class,
++ uint32_t flags, uint32_t *sequence,
++ unsigned long *timeout_jiffies)
++{
++ struct drm_psb_private *dev_priv =
++ container_of(fdev, struct drm_psb_private, fdev);
++ uint32_t seq = 0;
++
++ if (!dev_priv)
++ return -EINVAL;
++
++ if (fence_class >= PSB_NUM_ENGINES)
++ return -EINVAL;
++
++ spin_lock(&dev_priv->sequence_lock);
++ seq = dev_priv->sequence[fence_class]++;
++ spin_unlock(&dev_priv->sequence_lock);
++
++ *sequence = seq;
++ *timeout_jiffies = jiffies + DRM_HZ * 3;
++
++ return 0;
++}
++
++static void psb_fence_lockup(struct ttm_fence_object *fence,
++ uint32_t fence_types)
++{
++ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
++
++ if (fence->fence_class == LNC_ENGINE_ENCODE) {
++ DRM_ERROR("TOPAZ timeout (probable lockup)\n");
++
++ write_lock(&fc->lock);
++ lnc_topaz_handle_timeout(fence->fdev);
++ ttm_fence_handler(fence->fdev, fence->fence_class,
++ fence->sequence, fence_types, -EBUSY);
++ write_unlock(&fc->lock);
++ } else {
++ DRM_ERROR("MSVDX timeout (probable lockup)\n");
++ write_lock(&fc->lock);
++ ttm_fence_handler(fence->fdev, fence->fence_class,
++ fence->sequence, fence_types, -EBUSY);
++ write_unlock(&fc->lock);
++ }
++}
++
++void psb_fence_handler(struct drm_device *dev, uint32_t fence_class)
++{
++ struct drm_psb_private *dev_priv = psb_priv(dev);
++ struct ttm_fence_device *fdev = &dev_priv->fdev;
++ struct ttm_fence_class_manager *fc =
++ &fdev->fence_class[fence_class];
++ unsigned long irq_flags;
++
++ write_lock_irqsave(&fc->lock, irq_flags);
++ psb_fence_poll(fdev, fence_class, fc->waiting_types);
++ write_unlock_irqrestore(&fc->lock, irq_flags);
++}
++
++
++static struct ttm_fence_driver psb_ttm_fence_driver = {
++ .has_irq = NULL,
++ .emit = psb_fence_emit_sequence,
++ .flush = NULL,
++ .poll = psb_fence_poll,
++ .needed_flush = NULL,
++ .wait = NULL,
++ .signaled = NULL,
++ .lockup = psb_fence_lockup,
++};
++
++int psb_ttm_fence_device_init(struct ttm_fence_device *fdev)
++{
++ struct drm_psb_private *dev_priv =
++ container_of(fdev, struct drm_psb_private, fdev);
++ struct ttm_fence_class_init fci = {.wrap_diff = (1 << 30),
++ .flush_diff = (1 << 29),
++ .sequence_mask = 0xFFFFFFFF
++ };
++
++ return ttm_fence_device_init(PSB_NUM_ENGINES,
++ dev_priv->mem_global_ref.object,
++ fdev, &fci, 1,
++ &psb_ttm_fence_driver);
++}
+diff --git a/drivers/gpu/drm/mrst/drv/psb_gtt.c b/drivers/gpu/drm/mrst/drv/psb_gtt.c
+new file mode 100644
+index 0000000..5f66e75
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/psb_gtt.c
+@@ -0,0 +1,1040 @@
++/*
++ * Copyright (c) 2007, Intel Corporation.
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
++ */
++
++#include <drm/drmP.h>
++#include "psb_drv.h"
++#include "psb_pvr_glue.h"
++
++static inline uint32_t psb_gtt_mask_pte(uint32_t pfn, int type)
++{
++ uint32_t mask = PSB_PTE_VALID;
++
++ if (type & PSB_MMU_CACHED_MEMORY)
++ mask |= PSB_PTE_CACHED;
++ if (type & PSB_MMU_RO_MEMORY)
++ mask |= PSB_PTE_RO;
++ if (type & PSB_MMU_WO_MEMORY)
++ mask |= PSB_PTE_WO;
++
++ return (pfn << PAGE_SHIFT) | mask;
++}
++
++struct psb_gtt *psb_gtt_alloc(struct drm_device *dev)
++{
++ struct psb_gtt *tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
++
++ if (!tmp)
++ return NULL;
++
++ init_rwsem(&tmp->sem);
++ tmp->dev = dev;
++
++ return tmp;
++}
++
++void psb_gtt_takedown(struct psb_gtt *pg, int free)
++{
++ struct drm_psb_private *dev_priv = pg->dev->dev_private;
++
++ if (!pg)
++ return;
++
++ if (pg->gtt_map) {
++ iounmap(pg->gtt_map);
++ pg->gtt_map = NULL;
++ }
++ if (pg->initialized) {
++ pci_write_config_word(pg->dev->pdev, PSB_GMCH_CTRL,
++ pg->gmch_ctrl);
++ PSB_WVDC32(pg->pge_ctl, PSB_PGETBL_CTL);
++ (void) PSB_RVDC32(PSB_PGETBL_CTL);
++ }
++ if (free)
++ kfree(pg);
++}
++
++int psb_gtt_init(struct psb_gtt *pg, int resume)
++{
++ struct drm_device *dev = pg->dev;
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ unsigned gtt_pages;
++ unsigned long stolen_size, vram_stolen_size, ci_stolen_size;
++ unsigned long rar_stolen_size;
++ unsigned i, num_pages;
++ unsigned pfn_base;
++ uint32_t ci_pages, vram_pages;
++ uint32_t tt_pages;
++ uint32_t *ttm_gtt_map;
++ uint32_t dvmt_mode = 0;
++
++ int ret = 0;
++ uint32_t pte;
++
++ pci_read_config_word(dev->pdev, PSB_GMCH_CTRL, &pg->gmch_ctrl);
++ pci_write_config_word(dev->pdev, PSB_GMCH_CTRL,
++ pg->gmch_ctrl | _PSB_GMCH_ENABLED);
++
++ pg->pge_ctl = PSB_RVDC32(PSB_PGETBL_CTL);
++ PSB_WVDC32(pg->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL);
++ (void) PSB_RVDC32(PSB_PGETBL_CTL);
++
++ pg->initialized = 1;
++
++ pg->gtt_phys_start = pg->pge_ctl & PAGE_MASK;
++
++ pg->gatt_start = pci_resource_start(dev->pdev, PSB_GATT_RESOURCE);
++ /* fix me: video mmu has hw bug to access 0x0D0000000,
++ * then make gatt start at 0x0e000,0000 */
++ pg->mmu_gatt_start = 0xE0000000;
++ pg->gtt_start = pci_resource_start(dev->pdev, PSB_GTT_RESOURCE);
++ gtt_pages =
++ pci_resource_len(dev->pdev, PSB_GTT_RESOURCE) >> PAGE_SHIFT;
++ pg->gatt_pages = pci_resource_len(dev->pdev, PSB_GATT_RESOURCE)
++ >> PAGE_SHIFT;
++
++ pci_read_config_dword(dev->pdev, PSB_BSM, &pg->stolen_base);
++ vram_stolen_size = pg->gtt_phys_start - pg->stolen_base - PAGE_SIZE;
++
++ /* CI is not included in the stolen size since the TOPAZ MMU bug */
++ ci_stolen_size = dev_priv->ci_region_size;
++ /* Don't add CI & RAR share buffer space
++ * managed by TTM to stolen_size */
++ stolen_size = vram_stolen_size;
++
++ rar_stolen_size = dev_priv->rar_region_size;
++
++ printk(KERN_INFO"GMMADR(region 0) start: 0x%08x (%dM).\n",
++ pg->gatt_start, pg->gatt_pages/256);
++ printk(KERN_INFO"GTTADR(region 3) start: 0x%08x (can map %dM RAM), and actual RAM base 0x%08x.\n",
++ pg->gtt_start, gtt_pages * 4, pg->gtt_phys_start);
++ printk(KERN_INFO"Stole memory information \n");
++ printk(KERN_INFO" base in RAM: 0x%x \n", pg->stolen_base);
++ printk(KERN_INFO" size: %luK, calculated by (GTT RAM base) - (Stolen base), seems wrong\n",
++ vram_stolen_size/1024);
++ dvmt_mode = (pg->gmch_ctrl >> 4) & 0x7;
++ printk(KERN_INFO" the correct size should be: %dM(dvmt mode=%d) \n",
++ (dvmt_mode == 1) ? 1 : (2 << (dvmt_mode - 1)), dvmt_mode);
++
++ if (ci_stolen_size > 0)
++ printk(KERN_INFO"CI Stole memory: RAM base = 0x%08x, size = %lu M \n",
++ dev_priv->ci_region_start,
++ ci_stolen_size / 1024 / 1024);
++ if (rar_stolen_size > 0)
++ printk(KERN_INFO"RAR Stole memory: RAM base = 0x%08x, size = %lu M \n",
++ dev_priv->rar_region_start,
++ rar_stolen_size / 1024 / 1024);
++
++ if (resume && (gtt_pages != pg->gtt_pages) &&
++ (stolen_size != pg->stolen_size)) {
++ DRM_ERROR("GTT resume error.\n");
++ ret = -EINVAL;
++ goto out_err;
++ }
++
++ pg->gtt_pages = gtt_pages;
++ pg->stolen_size = stolen_size;
++ pg->vram_stolen_size = vram_stolen_size;
++ pg->ci_stolen_size = ci_stolen_size;
++ pg->rar_stolen_size = rar_stolen_size;
++ pg->gtt_map =
++ ioremap_nocache(pg->gtt_phys_start, gtt_pages << PAGE_SHIFT);
++ if (!pg->gtt_map) {
++ DRM_ERROR("Failure to map gtt.\n");
++ ret = -ENOMEM;
++ goto out_err;
++ }
++
++ pg->vram_addr = ioremap_wc(pg->stolen_base, stolen_size);
++ if (!pg->vram_addr) {
++ DRM_ERROR("Failure to map stolen base.\n");
++ ret = -ENOMEM;
++ goto out_err;
++ }
++
++ DRM_DEBUG("%s: vram kernel virtual address %p\n", pg->vram_addr);
++
++ tt_pages = (pg->gatt_pages < PSB_TT_PRIV0_PLIMIT) ?
++ (pg->gatt_pages) : PSB_TT_PRIV0_PLIMIT;
++
++ ttm_gtt_map = pg->gtt_map + tt_pages / 2;
++
++ /*
++ * insert vram stolen pages.
++ */
++
++ pfn_base = pg->stolen_base >> PAGE_SHIFT;
++ vram_pages = num_pages = vram_stolen_size >> PAGE_SHIFT;
++ printk(KERN_INFO"Set up %d stolen pages starting at 0x%08x, GTT offset %dK\n",
++ num_pages, pfn_base, 0);
++ for (i = 0; i < num_pages; ++i) {
++ pte = psb_gtt_mask_pte(pfn_base + i, 0);
++ iowrite32(pte, pg->gtt_map + i);
++ }
++
++ /*
++ * Init rest of gtt managed by IMG.
++ */
++ pfn_base = page_to_pfn(dev_priv->scratch_page);
++ pte = psb_gtt_mask_pte(pfn_base, 0);
++ for (; i < tt_pages / 2 - 1; ++i)
++ iowrite32(pte, pg->gtt_map + i);
++
++ /*
++ * insert CI stolen pages
++ */
++
++ pfn_base = dev_priv->ci_region_start >> PAGE_SHIFT;
++ ci_pages = num_pages = ci_stolen_size >> PAGE_SHIFT;
++ printk(KERN_INFO"Set up %d CI stolen pages starting at 0x%08x, GTT offset %dK\n",
++ num_pages, pfn_base, (ttm_gtt_map - pg->gtt_map) * 4);
++ for (i = 0; i < num_pages; ++i) {
++ pte = psb_gtt_mask_pte(pfn_base + i, 0);
++ iowrite32(pte, ttm_gtt_map + i);
++ }
++
++ /*
++ * insert RAR stolen pages
++ */
++ if (rar_stolen_size != 0) {
++ pfn_base = dev_priv->rar_region_start >> PAGE_SHIFT;
++ num_pages = rar_stolen_size >> PAGE_SHIFT;
++ printk(KERN_INFO"Set up %d RAR stolen pages starting at 0x%08x, GTT offset %dK\n",
++ num_pages, pfn_base,
++ (ttm_gtt_map - pg->gtt_map + i) * 4);
++ for (; i < num_pages + ci_pages; ++i) {
++ pte = psb_gtt_mask_pte(pfn_base + i - ci_pages, 0);
++ iowrite32(pte, ttm_gtt_map + i);
++ }
++ }
++ /*
++ * Init rest of gtt managed by TTM.
++ */
++
++ pfn_base = page_to_pfn(dev_priv->scratch_page);
++ pte = psb_gtt_mask_pte(pfn_base, 0);
++ PSB_DEBUG_INIT("Initializing the rest of a total "
++ "of %d gtt pages.\n", pg->gatt_pages);
++
++ for (; i < pg->gatt_pages - tt_pages / 2; ++i)
++ iowrite32(pte, ttm_gtt_map + i);
++ (void) ioread32(pg->gtt_map + i - 1);
++
++ return 0;
++
++out_err:
++ psb_gtt_takedown(pg, 0);
++ return ret;
++}
++
++int psb_gtt_insert_pages(struct psb_gtt *pg, struct page **pages,
++ unsigned offset_pages, unsigned num_pages,
++ unsigned desired_tile_stride,
++ unsigned hw_tile_stride, int type)
++{
++ unsigned rows = 1;
++ unsigned add;
++ unsigned row_add;
++ unsigned i;
++ unsigned j;
++ uint32_t *cur_page = NULL;
++ uint32_t pte;
++
++ if (hw_tile_stride)
++ rows = num_pages / desired_tile_stride;
++ else
++ desired_tile_stride = num_pages;
++
++ add = desired_tile_stride;
++ row_add = hw_tile_stride;
++
++ down_read(&pg->sem);
++ for (i = 0; i < rows; ++i) {
++ cur_page = pg->gtt_map + offset_pages;
++ for (j = 0; j < desired_tile_stride; ++j) {
++ pte =
++ psb_gtt_mask_pte(page_to_pfn(*pages++), type);
++ iowrite32(pte, cur_page++);
++ }
++ offset_pages += add;
++ }
++ (void) ioread32(cur_page - 1);
++ up_read(&pg->sem);
++
++ return 0;
++}
++
++int psb_gtt_insert_phys_addresses(struct psb_gtt *pg, IMG_CPU_PHYADDR *pPhysFrames,
++ unsigned offset_pages, unsigned num_pages, int type)
++{
++ unsigned j;
++ uint32_t *cur_page = NULL;
++ uint32_t pte;
++
++ //printk("Allocatng IMG GTT mem at %x (pages %d)\n",offset_pages,num_pages);
++ down_read(&pg->sem);
++
++ cur_page = pg->gtt_map + offset_pages;
++ for (j = 0; j < num_pages; ++j)
++ {
++ pte = psb_gtt_mask_pte( (pPhysFrames++)->uiAddr >> PAGE_SHIFT, type);
++ iowrite32(pte, cur_page++);
++ //printk("PTE %d: %x/%x\n",j,(pPhysFrames-1)->uiAddr,pte);
++ }
++ (void) ioread32(cur_page - 1);
++
++ up_read(&pg->sem);
++
++ return 0;
++}
++
++int psb_gtt_remove_pages(struct psb_gtt *pg, unsigned offset_pages,
++ unsigned num_pages, unsigned desired_tile_stride,
++ unsigned hw_tile_stride)
++{
++ struct drm_psb_private *dev_priv = pg->dev->dev_private;
++ unsigned rows = 1;
++ unsigned add;
++ unsigned row_add;
++ unsigned i;
++ unsigned j;
++ uint32_t *cur_page = NULL;
++ unsigned pfn_base = page_to_pfn(dev_priv->scratch_page);
++ uint32_t pte = psb_gtt_mask_pte(pfn_base, 0);
++
++ if (hw_tile_stride)
++ rows = num_pages / desired_tile_stride;
++ else
++ desired_tile_stride = num_pages;
++
++ add = desired_tile_stride;
++ row_add = hw_tile_stride;
++
++ down_read(&pg->sem);
++ for (i = 0; i < rows; ++i) {
++ cur_page = pg->gtt_map + offset_pages;
++ for (j = 0; j < desired_tile_stride; ++j)
++ iowrite32(pte, cur_page++);
++
++ offset_pages += add;
++ }
++ (void) ioread32(cur_page - 1);
++ up_read(&pg->sem);
++
++ return 0;
++}
++
++int psb_gtt_mm_init(struct psb_gtt *pg)
++{
++ struct psb_gtt_mm *gtt_mm;
++ struct drm_psb_private *dev_priv = pg->dev->dev_private;
++ struct drm_open_hash *ht;
++ struct drm_mm *mm;
++ int ret;
++ uint32_t tt_start;
++ uint32_t tt_size;
++
++ if (!pg || !pg->initialized) {
++ DRM_DEBUG("Invalid gtt struct\n");
++ return -EINVAL;
++ }
++
++ gtt_mm = kzalloc(sizeof(struct psb_gtt_mm), GFP_KERNEL);
++ if (!gtt_mm)
++ return -ENOMEM;
++
++ spin_lock_init(&gtt_mm->lock);
++
++ ht = &gtt_mm->hash;
++ ret = drm_ht_create(ht, 20);
++ if (ret) {
++ DRM_DEBUG("Create hash table failed(%d)\n", ret);
++ goto err_free;
++ }
++
++ tt_start = (pg->stolen_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
++ tt_start = (tt_start < pg->gatt_pages) ? tt_start : pg->gatt_pages;
++ tt_size = (pg->gatt_pages < PSB_TT_PRIV0_PLIMIT) ?
++ (pg->gatt_pages) : PSB_TT_PRIV0_PLIMIT;
++
++ mm = &gtt_mm->base;
++
++ /*will use tt_start ~ 128M for IMG TT buffers*/
++ ret = drm_mm_init(mm, tt_start, ((tt_size / 2) - tt_start));
++ if (ret) {
++ DRM_DEBUG("drm_mm_int error(%d)\n", ret);
++ goto err_mm_init;
++ }
++
++ gtt_mm->count = 0;
++
++ dev_priv->gtt_mm = gtt_mm;
++
++ DRM_INFO("PSB GTT mem manager ready, tt_start %ld, tt_size %ld pages\n",
++ (unsigned long)tt_start,
++ (unsigned long)((tt_size / 2) - tt_start));
++ return 0;
++err_mm_init:
++ drm_ht_remove(ht);
++
++err_free:
++ kfree(gtt_mm);
++ return ret;
++}
++
++/**
++ * Delete all hash entries;
++ */
++void psb_gtt_mm_takedown(void)
++{
++ return;
++}
++
++static int psb_gtt_mm_get_ht_by_pid_locked(struct psb_gtt_mm *mm,
++ u32 tgid,
++ struct psb_gtt_hash_entry **hentry)
++{
++ struct drm_hash_item *entry;
++ struct psb_gtt_hash_entry *psb_entry;
++ int ret;
++
++ ret = drm_ht_find_item(&mm->hash, tgid, &entry);
++ if (ret) {
++ DRM_DEBUG("Cannot find entry pid=%ld\n", tgid);
++ return ret;
++ }
++
++ psb_entry = container_of(entry, struct psb_gtt_hash_entry, item);
++ if (!psb_entry) {
++ DRM_DEBUG("Invalid entry");
++ return -EINVAL;
++ }
++
++ *hentry = psb_entry;
++ return 0;
++}
++
++
++static int psb_gtt_mm_insert_ht_locked(struct psb_gtt_mm *mm,
++ u32 tgid,
++ struct psb_gtt_hash_entry *hentry)
++{
++ struct drm_hash_item *item;
++ int ret;
++
++ if (!hentry) {
++ DRM_DEBUG("Invalid parameters\n");
++ return -EINVAL;
++ }
++
++ item = &hentry->item;
++ item->key = tgid;
++
++ /**
++ * NOTE: drm_ht_insert_item will perform such a check
++ ret = psb_gtt_mm_get_ht_by_pid(mm, tgid, &tmp);
++ if (!ret) {
++ DRM_DEBUG("Entry already exists for pid %ld\n", tgid);
++ return -EAGAIN;
++ }
++ */
++
++ /*Insert the given entry*/
++ ret = drm_ht_insert_item(&mm->hash, item);
++ if (ret) {
++ DRM_DEBUG("Insert failure\n");
++ return ret;
++ }
++
++ mm->count++;
++
++ return 0;
++}
++
++static int psb_gtt_mm_alloc_insert_ht(struct psb_gtt_mm *mm,
++ u32 tgid,
++ struct psb_gtt_hash_entry **entry)
++{
++ struct psb_gtt_hash_entry *hentry;
++ int ret;
++
++ /*if the hentry for this tgid exists, just get it and return*/
++ spin_lock(&mm->lock);
++ ret = psb_gtt_mm_get_ht_by_pid_locked(mm, tgid, &hentry);
++ if (!ret) {
++ DRM_DEBUG("Entry for tgid %ld exist, hentry %p\n",
++ tgid, hentry);
++ *entry = hentry;
++ spin_unlock(&mm->lock);
++ return 0;
++ }
++ spin_unlock(&mm->lock);
++
++ DRM_DEBUG("Entry for tgid %ld doesn't exist, will create it\n", tgid);
++
++ hentry = kzalloc(sizeof(struct psb_gtt_hash_entry), GFP_KERNEL);
++ if (!hentry) {
++ DRM_DEBUG("Kmalloc failled\n");
++ return -ENOMEM;
++ }
++
++ ret = drm_ht_create(&hentry->ht, 20);
++ if (ret) {
++ DRM_DEBUG("Create hash table failed\n");
++ return ret;
++ }
++
++ spin_lock(&mm->lock);
++ ret = psb_gtt_mm_insert_ht_locked(mm, tgid, hentry);
++ spin_unlock(&mm->lock);
++
++ if (!ret)
++ *entry = hentry;
++
++ return ret;
++}
++
++static struct psb_gtt_hash_entry *
++psb_gtt_mm_remove_ht_locked(struct psb_gtt_mm *mm, u32 tgid)
++{
++ struct psb_gtt_hash_entry *tmp;
++ int ret;
++
++ ret = psb_gtt_mm_get_ht_by_pid_locked(mm, tgid, &tmp);
++ if (ret) {
++ DRM_DEBUG("Cannot find entry pid %ld\n", tgid);
++ return NULL;
++ }
++
++ /*remove it from ht*/
++ drm_ht_remove_item(&mm->hash, &tmp->item);
++
++ mm->count--;
++
++ return tmp;
++}
++
++static int psb_gtt_mm_remove_free_ht_locked(struct psb_gtt_mm *mm, u32 tgid)
++{
++ struct psb_gtt_hash_entry *entry;
++
++ entry = psb_gtt_mm_remove_ht_locked(mm, tgid);
++
++ if (!entry) {
++ DRM_DEBUG("Invalid entry");
++ return -EINVAL;
++ }
++
++ /*delete ht*/
++ drm_ht_remove(&entry->ht);
++
++ /*free this entry*/
++ kfree(entry);
++ return 0;
++}
++
++static int
++psb_gtt_mm_get_mem_mapping_locked(struct drm_open_hash *ht,
++ u32 key,
++ struct psb_gtt_mem_mapping **hentry)
++{
++ struct drm_hash_item *entry;
++ struct psb_gtt_mem_mapping *mapping;
++ int ret;
++
++ ret = drm_ht_find_item(ht, key, &entry);
++ if (ret) {
++ DRM_DEBUG("Cannot find key %ld\n", key);
++ return ret;
++ }
++
++ mapping = container_of(entry, struct psb_gtt_mem_mapping, item);
++ if (!mapping) {
++ DRM_DEBUG("Invalid entry\n");
++ return -EINVAL;
++ }
++
++ *hentry = mapping;
++ return 0;
++}
++
++static int
++psb_gtt_mm_insert_mem_mapping_locked(struct drm_open_hash *ht,
++ u32 key,
++ struct psb_gtt_mem_mapping *hentry)
++{
++ struct drm_hash_item *item;
++ struct psb_gtt_hash_entry *entry;
++ int ret;
++
++ if (!hentry) {
++ DRM_DEBUG("hentry is NULL\n");
++ return -EINVAL;
++ }
++
++ item = &hentry->item;
++ item->key = key;
++
++ ret = drm_ht_insert_item(ht, item);
++ if (ret) {
++ DRM_DEBUG("insert_item failed\n");
++ return ret;
++ }
++
++ entry = container_of(ht, struct psb_gtt_hash_entry, ht);
++ if (entry)
++ entry->count++;
++
++ return 0;
++}
++
++static int
++psb_gtt_mm_alloc_insert_mem_mapping(struct psb_gtt_mm *mm,
++ struct drm_open_hash *ht,
++ u32 key,
++ struct drm_mm_node *node,
++ struct psb_gtt_mem_mapping **entry)
++{
++ struct psb_gtt_mem_mapping *mapping;
++ int ret;
++
++ if (!node || !ht) {
++ DRM_DEBUG("parameter error\n");
++ return -EINVAL;
++ }
++
++ /*try to get this mem_map */
++ spin_lock(&mm->lock);
++ ret = psb_gtt_mm_get_mem_mapping_locked(ht, key, &mapping);
++ if (!ret) {
++ DRM_DEBUG("mapping entry for key %ld exists, entry %p\n",
++ key, mapping);
++ *entry = mapping;
++ spin_unlock(&mm->lock);
++ return 0;
++ }
++ spin_unlock(&mm->lock);
++
++ DRM_DEBUG("Mapping entry for key %ld doesn't exist, will create it\n",
++ key);
++
++ mapping = kzalloc(sizeof(struct psb_gtt_mem_mapping), GFP_KERNEL);
++ if (!mapping) {
++ DRM_DEBUG("kmalloc failed\n");
++ return -ENOMEM;
++ }
++
++ mapping->node = node;
++
++ spin_lock(&mm->lock);
++ ret = psb_gtt_mm_insert_mem_mapping_locked(ht, key, mapping);
++ spin_unlock(&mm->lock);
++
++ if (!ret)
++ *entry = mapping;
++
++ return ret;
++}
++
++static struct psb_gtt_mem_mapping *
++psb_gtt_mm_remove_mem_mapping_locked(struct drm_open_hash *ht, u32 key)
++{
++ struct psb_gtt_mem_mapping *tmp;
++ struct psb_gtt_hash_entry *entry;
++ int ret;
++
++ ret = psb_gtt_mm_get_mem_mapping_locked(ht, key, &tmp);
++ if (ret) {
++ DRM_DEBUG("Cannot find key %ld\n", key);
++ return NULL;
++ }
++
++ drm_ht_remove_item(ht, &tmp->item);
++
++ entry = container_of(ht, struct psb_gtt_hash_entry, ht);
++ if (entry)
++ entry->count--;
++
++ return tmp;
++}
++
++static int psb_gtt_mm_remove_free_mem_mapping_locked(struct drm_open_hash *ht,
++ u32 key,
++ struct drm_mm_node **node)
++{
++ struct psb_gtt_mem_mapping *entry;
++
++ entry = psb_gtt_mm_remove_mem_mapping_locked(ht, key);
++ if (!entry) {
++ DRM_DEBUG("entry is NULL\n");
++ return -EINVAL;
++ }
++
++ *node = entry->node;
++
++ kfree(entry);
++ return 0;
++}
++
++static int psb_gtt_add_node(struct psb_gtt_mm *mm,
++ u32 tgid,
++ u32 key,
++ struct drm_mm_node *node,
++ struct psb_gtt_mem_mapping **entry)
++{
++ struct psb_gtt_hash_entry *hentry;
++ struct psb_gtt_mem_mapping *mapping;
++ int ret;
++
++ ret = psb_gtt_mm_alloc_insert_ht(mm, tgid, &hentry);
++ if (ret) {
++ DRM_DEBUG("alloc_insert failed\n");
++ return ret;
++ }
++
++ ret = psb_gtt_mm_alloc_insert_mem_mapping(mm,
++ &hentry->ht,
++ key,
++ node,
++ &mapping);
++ if (ret) {
++ DRM_DEBUG("mapping alloc_insert failed\n");
++ return ret;
++ }
++
++ *entry = mapping;
++
++ return 0;
++}
++
++static int psb_gtt_remove_node(struct psb_gtt_mm *mm,
++ u32 tgid,
++ u32 key,
++ struct drm_mm_node **node)
++{
++ struct psb_gtt_hash_entry *hentry;
++ struct drm_mm_node *tmp;
++ int ret;
++
++ spin_lock(&mm->lock);
++ ret = psb_gtt_mm_get_ht_by_pid_locked(mm, tgid, &hentry);
++ if (ret) {
++ DRM_DEBUG("Cannot find entry for pid %ld\n", tgid);
++ spin_unlock(&mm->lock);
++ return ret;
++ }
++ spin_unlock(&mm->lock);
++
++ /*remove mapping entry*/
++ spin_lock(&mm->lock);
++ ret = psb_gtt_mm_remove_free_mem_mapping_locked(&hentry->ht,
++ key,
++ &tmp);
++ if (ret) {
++ DRM_DEBUG("remove_free failed\n");
++ spin_unlock(&mm->lock);
++ return ret;
++ }
++
++ *node = tmp;
++
++ /*check the count of mapping entry*/
++ if (!hentry->count) {
++ DRM_DEBUG("count of mapping entry is zero, tgid=%ld\n", tgid);
++ psb_gtt_mm_remove_free_ht_locked(mm, tgid);
++ }
++
++ spin_unlock(&mm->lock);
++
++ return 0;
++}
++
++static int psb_gtt_mm_alloc_mem(struct psb_gtt_mm *mm,
++ uint32_t pages,
++ uint32_t align,
++ struct drm_mm_node **node)
++{
++ struct drm_mm_node *tmp_node;
++ int ret;
++
++ do {
++ ret = drm_mm_pre_get(&mm->base);
++ if (unlikely(ret)) {
++ DRM_DEBUG("drm_mm_pre_get error\n");
++ return ret;
++ }
++
++ spin_lock(&mm->lock);
++ tmp_node = drm_mm_search_free(&mm->base, pages, align, 1);
++ if (unlikely(!tmp_node)) {
++ DRM_DEBUG("No free node found\n");
++ spin_unlock(&mm->lock);
++ break;
++ }
++
++ tmp_node = drm_mm_get_block_atomic(tmp_node, pages, align);
++ spin_unlock(&mm->lock);
++ } while (!tmp_node);
++
++ if (!tmp_node) {
++ DRM_DEBUG("Node allocation failed\n");
++ return -ENOMEM;
++ }
++
++ *node = tmp_node;
++ return 0;
++}
++
++static void psb_gtt_mm_free_mem(struct psb_gtt_mm *mm, struct drm_mm_node *node)
++{
++ spin_lock(&mm->lock);
++ drm_mm_put_block(node);
++ spin_unlock(&mm->lock);
++}
++
++int psb_gtt_map_meminfo(struct drm_device *dev,
++ IMG_HANDLE hKernelMemInfo,
++ uint32_t *offset)
++{
++ struct drm_psb_private *dev_priv
++ = (struct drm_psb_private *)dev->dev_private;
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++ struct psb_gtt_mm *mm = dev_priv->gtt_mm;
++ struct psb_gtt *pg = dev_priv->pg;
++ uint32_t size, pages, offset_pages;
++ void *kmem;
++ struct drm_mm_node *node;
++ struct page **page_list;
++ struct psb_gtt_mem_mapping *mapping = NULL;
++ int ret;
++
++ ret = psb_get_meminfo_by_handle(hKernelMemInfo, &psKernelMemInfo);
++ if (ret) {
++ DRM_DEBUG("Cannot find kernelMemInfo handle %ld\n",
++ hKernelMemInfo);
++ return -EINVAL;
++ }
++
++ DRM_DEBUG("Got psKernelMemInfo %p for handle %lx\n",
++ psKernelMemInfo, (u32)hKernelMemInfo);
++
++ size = psKernelMemInfo->ui32AllocSize;
++ kmem = psKernelMemInfo->pvLinAddrKM;
++ pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
++
++ DRM_DEBUG("KerMemInfo size %ld, cpuVadr %lx, pages %ld, osMemHdl %lx\n",
++ size, kmem, pages, psKernelMemInfo->sMemBlk.hOSMemHandle);
++
++ if (!kmem)
++ DRM_DEBUG("kmem is NULL");
++
++ /*get pages*/
++ ret = psb_get_pages_by_mem_handle(psKernelMemInfo->sMemBlk.hOSMemHandle,
++ &page_list);
++ if (ret) {
++ DRM_DEBUG("get pages error\n");
++ return ret;
++ }
++
++ DRM_DEBUG("get %ld pages\n", pages);
++
++ /*alloc memory in TT apeture*/
++ ret = psb_gtt_mm_alloc_mem(mm, pages, 0, &node);
++ if (ret) {
++ DRM_DEBUG("alloc TT memory error\n");
++ goto failed_pages_alloc;
++ }
++
++ /*update psb_gtt_mm*/
++ ret = psb_gtt_add_node(mm,
++ (u32)psb_get_tgid(),
++ (u32)hKernelMemInfo,
++ node,
++ &mapping);
++ if (ret) {
++ DRM_DEBUG("add_node failed");
++ goto failed_add_node;
++ }
++
++ node = mapping->node;
++ offset_pages = node->start;
++
++ DRM_DEBUG("get free node for %ld pages, offset %ld pages",
++ pages, offset_pages);
++
++ /*update gtt*/
++ psb_gtt_insert_pages(pg, page_list,
++ (unsigned)offset_pages,
++ (unsigned)pages,
++ 0,
++ 0,
++ 0);
++
++ *offset = offset_pages;
++ return 0;
++
++failed_add_node:
++ psb_gtt_mm_free_mem(mm, node);
++failed_pages_alloc:
++ kfree(page_list);
++ return ret;
++}
++
++int psb_gtt_unmap_meminfo(struct drm_device *dev, IMG_HANDLE hKernelMemInfo)
++{
++ struct drm_psb_private *dev_priv
++ = (struct drm_psb_private *)dev->dev_private;
++ struct psb_gtt_mm *mm = dev_priv->gtt_mm;
++ struct psb_gtt *pg = dev_priv->pg;
++ uint32_t pages, offset_pages;
++ struct drm_mm_node *node;
++ int ret;
++
++ ret = psb_gtt_remove_node(mm,
++ (u32)psb_get_tgid(),
++ (u32)hKernelMemInfo,
++ &node);
++ if (ret) {
++ DRM_DEBUG("remove node failed\n");
++ return ret;
++ }
++
++ /*remove gtt entries*/
++ offset_pages = node->start;
++ pages = node->size;
++
++ psb_gtt_remove_pages(pg, offset_pages, pages, 0, 0);
++
++
++ /*free tt node*/
++
++ psb_gtt_mm_free_mem(mm, node);
++ return 0;
++}
++
++int psb_gtt_map_meminfo_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ struct psb_gtt_mapping_arg *arg
++ = (struct psb_gtt_mapping_arg *)data;
++ uint32_t *offset_pages = &arg->offset_pages;
++
++ DRM_DEBUG("\n");
++
++ return psb_gtt_map_meminfo(dev, arg->hKernelMemInfo, offset_pages);
++}
++
++int psb_gtt_unmap_meminfo_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++
++ struct psb_gtt_mapping_arg *arg
++ = (struct psb_gtt_mapping_arg *)data;
++
++ DRM_DEBUG("\n");
++
++ return psb_gtt_unmap_meminfo(dev, arg->hKernelMemInfo);
++}
++
++int psb_gtt_map_pvr_memory(struct drm_device *dev,
++ unsigned int hHandle,
++ unsigned int ui32TaskId,
++ IMG_CPU_PHYADDR *pPages,
++ unsigned int ui32PagesNum,
++ unsigned int *ui32Offset)
++{
++ struct drm_psb_private * dev_priv = (struct drm_psb_private *)dev->dev_private;
++ struct psb_gtt_mm * mm = dev_priv->gtt_mm;
++ struct psb_gtt * pg = dev_priv->pg;
++
++ uint32_t size, pages, offset_pages;
++ struct drm_mm_node * node = NULL;
++ struct psb_gtt_mem_mapping * mapping = NULL;
++ int ret;
++
++ size = ui32PagesNum * PAGE_SIZE;
++ pages = 0;
++
++ /*alloc memory in TT apeture*/
++ ret = psb_gtt_mm_alloc_mem(mm, ui32PagesNum, 0, &node);
++ if(ret)
++ {
++ DRM_DEBUG("alloc TT memory error\n");
++ goto failed_pages_alloc;
++ }
++
++ /*update psb_gtt_mm*/
++ ret = psb_gtt_add_node(mm,
++ (u32)ui32TaskId,
++ (u32)hHandle,
++ node,
++ &mapping);
++ if(ret)
++ {
++ DRM_DEBUG("add_node failed");
++ goto failed_add_node;
++ }
++
++ node = mapping->node;
++ offset_pages = node->start;
++
++ DRM_DEBUG("get free node for %ld pages, offset %ld pages", pages, offset_pages);
++
++ /*update gtt*/
++ psb_gtt_insert_phys_addresses( pg, pPages, (unsigned)offset_pages, (unsigned)ui32PagesNum, 0 );
++
++ *ui32Offset = offset_pages;
++ return 0;
++
++failed_add_node:
++ psb_gtt_mm_free_mem(mm, node);
++failed_pages_alloc:
++ return ret;
++}
++
++
++int psb_gtt_unmap_pvr_memory(struct drm_device *dev, unsigned int hHandle, unsigned int ui32TaskId)
++{
++ struct drm_psb_private * dev_priv = (struct drm_psb_private *)dev->dev_private;
++ struct psb_gtt_mm * mm = dev_priv->gtt_mm;
++ struct psb_gtt * pg = dev_priv->pg;
++ uint32_t pages, offset_pages;
++ struct drm_mm_node * node;
++ int ret;
++
++ ret = psb_gtt_remove_node(mm,
++ (u32)ui32TaskId,
++ (u32)hHandle,
++ &node);
++ if(ret)
++ {
++ printk("remove node failed\n");
++ return ret;
++ }
++
++ /*remove gtt entries*/
++ offset_pages = node->start;
++ pages = node->size;
++
++ psb_gtt_remove_pages(pg, offset_pages, pages, 0, 0);
++
++ /*free tt node*/
++ psb_gtt_mm_free_mem(mm, node);
++ return 0;
++}
+diff --git a/drivers/gpu/drm/mrst/drv/psb_gtt.h b/drivers/gpu/drm/mrst/drv/psb_gtt.h
+new file mode 100644
+index 0000000..ab19989
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/psb_gtt.h
+@@ -0,0 +1,111 @@
++/**************************************************************************
++ * Copyright (c) 2007-2008, Intel Corporation.
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++
++#ifndef _PSB_GTT_H_
++#define _PSB_GTT_H_
++
++#include <drm/drmP.h>
++
++#include "img_types.h"
++
++struct psb_gtt {
++ struct drm_device *dev;
++ int initialized;
++ uint32_t gatt_start;
++ uint32_t mmu_gatt_start;
++ uint32_t ci_start;
++ uint32_t rar_start;
++ uint32_t gtt_start;
++ uint32_t gtt_phys_start;
++ unsigned gtt_pages;
++ unsigned gatt_pages;
++ uint32_t stolen_base;
++ void *vram_addr;
++ uint32_t pge_ctl;
++ u16 gmch_ctrl;
++ unsigned long stolen_size;
++ unsigned long vram_stolen_size;
++ unsigned long ci_stolen_size;
++ unsigned long rar_stolen_size;
++ uint32_t *gtt_map;
++ struct rw_semaphore sem;
++};
++
++struct psb_gtt_mm {
++ struct drm_mm base;
++ struct drm_open_hash hash;
++ uint32_t count;
++ spinlock_t lock;
++};
++
++struct psb_gtt_hash_entry {
++ struct drm_open_hash ht;
++ uint32_t count;
++ struct drm_hash_item item;
++};
++
++struct psb_gtt_mem_mapping {
++ struct drm_mm_node *node;
++ struct drm_hash_item item;
++};
++
++#if 0
++/*Ioctl args*/
++struct psb_gtt_mapping_arg {
++ IMG_HANDLE hKernelMemInfo;
++};
++#endif
++
++/*Exported functions*/
++extern int psb_gtt_init(struct psb_gtt *pg, int resume);
++extern int psb_gtt_insert_pages(struct psb_gtt *pg, struct page **pages,
++ unsigned offset_pages, unsigned num_pages,
++ unsigned desired_tile_stride,
++ unsigned hw_tile_stride, int type);
++extern int psb_gtt_remove_pages(struct psb_gtt *pg, unsigned offset_pages,
++ unsigned num_pages,
++ unsigned desired_tile_stride,
++ unsigned hw_tile_stride);
++
++extern struct psb_gtt *psb_gtt_alloc(struct drm_device *dev);
++extern void psb_gtt_takedown(struct psb_gtt *pg, int free);
++extern int psb_gtt_map_meminfo(struct drm_device *dev,
++ IMG_HANDLE hKernelMemInfo,
++ uint32_t *offset);
++extern int psb_gtt_unmap_meminfo(struct drm_device *dev,
++ IMG_HANDLE hKernelMemInfo);
++extern int psb_gtt_map_meminfo_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++extern int psb_gtt_unmap_meminfo_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++extern int psb_gtt_mm_init(struct psb_gtt *pg);
++extern void psb_gtt_mm_takedown(void);
++
++extern int psb_gtt_map_pvr_memory(struct drm_device *dev,
++ unsigned int hHandle,
++ unsigned int ui32TaskId,
++ IMG_CPU_PHYADDR *pPages,
++ unsigned int ui32PagesNum,
++ unsigned int *ui32Offset);
++
++extern int psb_gtt_unmap_pvr_memory(struct drm_device *dev,
++ unsigned int hHandle,
++ unsigned int ui32TaskId);
++
++#endif
+diff --git a/drivers/gpu/drm/mrst/drv/psb_hotplug.c b/drivers/gpu/drm/mrst/drv/psb_hotplug.c
+new file mode 100644
+index 0000000..d50fd83
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/psb_hotplug.c
+@@ -0,0 +1,425 @@
++/*
++ * Copyright © 2009 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors:
++ * James C. Gualario <james.c.gualario@intel.com>
++ *
++ */
++
++#include "psb_umevents.h"
++#include "psb_hotplug.h"
++/**
++ * inform the kernel of the work to be performed and related function.
++ *
++ */
++DECLARE_WORK(hotplug_dev_create_work, &psb_hotplug_dev_create_wq);
++DECLARE_WORK(hotplug_dev_remove_work, &psb_hotplug_dev_remove_wq);
++DECLARE_WORK(hotplug_dev_change_work, &psb_hotplug_dev_change_wq);
++/**
++ * psb_hotplug_notify_change_um - notify user mode of hotplug changes
++ *
++ * @name: name of event to notify user mode of change to
++ * @state: hotplug state to search for event object in
++ *
++ */
++int psb_hotplug_notify_change_um(const char *name,
++ struct hotplug_state *state)
++{
++ strcpy(&(state->hotplug_change_wq_data.dev_name_arry
++ [state->hotplug_change_wq_data.dev_name_write][0]), name);
++ state->hotplug_change_wq_data.dev_name_arry_rw_status
++ [state->hotplug_change_wq_data.dev_name_write] =
++ DRM_HOTPLUG_READY_TO_READ;
++ if (state->hotplug_change_wq_data.dev_name_read_write_wrap_ack == 1)
++ state->hotplug_change_wq_data.dev_name_read_write_wrap_ack = 0;
++ state->hotplug_change_wq_data.dev_name_write++;
++ if (state->hotplug_change_wq_data.dev_name_write ==
++ state->hotplug_change_wq_data.dev_name_read) {
++ state->hotplug_change_wq_data.dev_name_write--;
++ return IRQ_NONE;
++ }
++ if (state->hotplug_change_wq_data.dev_name_write >
++ DRM_HOTPLUG_RING_DEPTH_MAX) {
++ state->hotplug_change_wq_data.dev_name_write = 0;
++ state->hotplug_change_wq_data.dev_name_write_wrap = 1;
++ }
++ state->hotplug_change_wq_data.hotplug_dev_list = state->list;
++ queue_work(state->hotplug_wq, &(state->hotplug_change_wq_data.work));
++ return IRQ_HANDLED;
++}
++/**
++ *
++ * psb_hotplug_create_and_notify_um - create and notify user mode of new dev
++ *
++ * @name: name to give for new event / device
++ * @state: hotplug state to track new event /device in
++ *
++ */
++int psb_hotplug_create_and_notify_um(const char *name,
++ struct hotplug_state *state)
++{
++ strcpy(&(state->hotplug_create_wq_data.dev_name_arry
++ [state->hotplug_create_wq_data.dev_name_write][0]), name);
++ state->hotplug_create_wq_data.dev_name_arry_rw_status
++ [state->hotplug_create_wq_data.dev_name_write] =
++ DRM_HOTPLUG_READY_TO_READ;
++ if (state->hotplug_create_wq_data.dev_name_read_write_wrap_ack == 1)
++ state->hotplug_create_wq_data.dev_name_read_write_wrap_ack = 0;
++ state->hotplug_create_wq_data.dev_name_write++;
++ if (state->hotplug_create_wq_data.dev_name_write ==
++ state->hotplug_create_wq_data.dev_name_read) {
++ state->hotplug_create_wq_data.dev_name_write--;
++ return IRQ_NONE;
++ }
++ if (state->hotplug_create_wq_data.dev_name_write >
++ DRM_HOTPLUG_RING_DEPTH_MAX) {
++ state->hotplug_create_wq_data.dev_name_write = 0;
++ state->hotplug_create_wq_data.dev_name_write_wrap = 1;
++ }
++ state->hotplug_create_wq_data.hotplug_dev_list = state->list;
++ queue_work(state->hotplug_wq, &(state->hotplug_create_wq_data.work));
++ return IRQ_HANDLED;
++}
++EXPORT_SYMBOL(psb_hotplug_create_and_notify_um);
++/**
++ * psb_hotplug_remove_and_notify_um - remove device and notify user mode
++ *
++ * @name: name of event / device to remove
++ * @state: hotplug state to remove event / device from
++ *
++ */
++int psb_hotplug_remove_and_notify_um(const char *name,
++ struct hotplug_state *state)
++{
++ strcpy(&(state->hotplug_remove_wq_data.dev_name_arry
++ [state->hotplug_remove_wq_data.dev_name_write][0]), name);
++ state->hotplug_remove_wq_data.dev_name_arry_rw_status
++ [state->hotplug_remove_wq_data.dev_name_write] =
++ DRM_HOTPLUG_READY_TO_READ;
++ if (state->hotplug_remove_wq_data.dev_name_read_write_wrap_ack == 1)
++ state->hotplug_remove_wq_data.dev_name_read_write_wrap_ack = 0;
++ state->hotplug_remove_wq_data.dev_name_write++;
++ if (state->hotplug_remove_wq_data.dev_name_write ==
++ state->hotplug_remove_wq_data.dev_name_read) {
++ state->hotplug_remove_wq_data.dev_name_write--;
++ return IRQ_NONE;
++ }
++ if (state->hotplug_remove_wq_data.dev_name_write >
++ DRM_HOTPLUG_RING_DEPTH_MAX) {
++ state->hotplug_remove_wq_data.dev_name_write = 0;
++ state->hotplug_remove_wq_data.dev_name_write_wrap = 1;
++ }
++ state->hotplug_remove_wq_data.hotplug_dev_list = state->list;
++ queue_work(state->hotplug_wq, &(state->hotplug_remove_wq_data.work));
++ return IRQ_HANDLED;
++}
++EXPORT_SYMBOL(psb_hotplug_remove_and_notify_um);
++/**
++ * psb_hotplug_device_pool_create_and_init - make new hotplug device pool
++ *
++ * @parent_kobj: parent kobject to associate hotplug kset with
++ * @state: hotplug state to assocaite workqueues with
++ *
++ */
++struct umevent_list *psb_hotplug_device_pool_create_and_init(
++ struct kobject *parent_kobj,
++ struct hotplug_state *state)
++{
++ struct umevent_list *new_hotplug_dev_list = NULL;
++
++ new_hotplug_dev_list = psb_umevent_create_list();
++ if (new_hotplug_dev_list)
++ psb_umevent_init(parent_kobj, new_hotplug_dev_list,
++ "psb_hotplug");
++
++ state->hotplug_wq = create_singlethread_workqueue("hotplug-wq");
++ if (!state->hotplug_wq)
++ return NULL;
++
++ INIT_WORK(&state->hotplug_create_wq_data.work,
++ psb_hotplug_dev_create_wq);
++ INIT_WORK(&state->hotplug_remove_wq_data.work,
++ psb_hotplug_dev_remove_wq);
++ INIT_WORK(&state->hotplug_change_wq_data.work,
++ psb_hotplug_dev_change_wq);
++
++ state->hotplug_create_wq_data.dev_name_read = 0;
++ state->hotplug_create_wq_data.dev_name_write = 0;
++ state->hotplug_create_wq_data.dev_name_write_wrap = 0;
++ state->hotplug_create_wq_data.dev_name_read_write_wrap_ack = 0;
++ memset(&(state->hotplug_create_wq_data.dev_name_arry_rw_status[0]),
++ 0, sizeof(int)*DRM_HOTPLUG_RING_DEPTH);
++
++ state->hotplug_remove_wq_data.dev_name_read = 0;
++ state->hotplug_remove_wq_data.dev_name_write = 0;
++ state->hotplug_remove_wq_data.dev_name_write_wrap = 0;
++ state->hotplug_remove_wq_data.dev_name_read_write_wrap_ack = 0;
++ memset(&(state->hotplug_remove_wq_data.dev_name_arry_rw_status[0]),
++ 0, sizeof(int)*DRM_HOTPLUG_RING_DEPTH);
++
++ state->hotplug_change_wq_data.dev_name_read = 0;
++ state->hotplug_change_wq_data.dev_name_write = 0;
++ state->hotplug_change_wq_data.dev_name_write_wrap = 0;
++ state->hotplug_change_wq_data.dev_name_read_write_wrap_ack = 0;
++ memset(&(state->hotplug_change_wq_data.dev_name_arry_rw_status[0]),
++ 0, sizeof(int)*DRM_HOTPLUG_RING_DEPTH);
++
++ return new_hotplug_dev_list;
++}
++EXPORT_SYMBOL(psb_hotplug_device_pool_create_and_init);
++/**
++ *
++ * psb_hotplug_init - init hotplug subsystem
++ *
++ * @parent_kobj: parent kobject to associate hotplug state with
++ *
++ */
++struct hotplug_state *psb_hotplug_init(struct kobject *parent_kobj)
++{
++ struct hotplug_state *state;
++ state = kzalloc(sizeof(struct hotplug_state), GFP_KERNEL);
++ state->list = NULL;
++ state->list = psb_hotplug_device_pool_create_and_init(
++ parent_kobj,
++ state);
++ return state;
++}
++/**
++ * psb_hotplug_device_pool_destroy - destroy all hotplug related resources
++ *
++ * @state: hotplug state to destroy
++ *
++ */
++void psb_hotplug_device_pool_destroy(struct hotplug_state *state)
++{
++ flush_workqueue(state->hotplug_wq);
++ destroy_workqueue(state->hotplug_wq);
++ psb_umevent_cleanup(state->list);
++ kfree(state);
++}
++EXPORT_SYMBOL(psb_hotplug_device_pool_destroy);
++/**
++ * psb_hotplug_dev_create_wq - create workqueue implementation
++ *
++ * @work: work struct to use for kernel scheduling
++ *
++ */
++void psb_hotplug_dev_create_wq(struct work_struct *work)
++{
++ struct hotplug_disp_workqueue_data *wq_data;
++ struct umevent_obj *wq_working_hotplug_disp_obj;
++ wq_data = to_hotplug_disp_workqueue_data(work);
++ if (wq_data->dev_name_write_wrap == 1) {
++ wq_data->dev_name_read_write_wrap_ack = 1;
++ wq_data->dev_name_write_wrap = 0;
++ while (wq_data->dev_name_read != DRM_HOTPLUG_RING_DEPTH_MAX) {
++ if (wq_data->dev_name_arry_rw_status
++ [wq_data->dev_name_read] ==
++ DRM_HOTPLUG_READY_TO_READ) {
++ wq_working_hotplug_disp_obj =
++ psb_create_umevent_obj(
++ &wq_data->dev_name_arry
++ [wq_data->dev_name_read][0],
++ wq_data->hotplug_dev_list);
++ wq_data->dev_name_arry_rw_status
++ [wq_data->dev_name_read] =
++ DRM_HOTPLUG_READ_COMPLETE;
++ psb_umevent_notify
++ (wq_working_hotplug_disp_obj);
++ }
++ wq_data->dev_name_read++;
++ }
++ wq_data->dev_name_read = 0;
++ while (wq_data->dev_name_read < wq_data->dev_name_write-1) {
++ if (wq_data->dev_name_arry_rw_status
++ [wq_data->dev_name_read] ==
++ DRM_HOTPLUG_READY_TO_READ) {
++ wq_working_hotplug_disp_obj =
++ psb_create_umevent_obj(
++ &wq_data->dev_name_arry
++ [wq_data->dev_name_read][0],
++ wq_data->hotplug_dev_list);
++ wq_data->dev_name_arry_rw_status
++ [wq_data->dev_name_read] =
++ DRM_HOTPLUG_READ_COMPLETE;
++ psb_umevent_notify
++ (wq_working_hotplug_disp_obj);
++ }
++ wq_data->dev_name_read++;
++ }
++ } else {
++ while (wq_data->dev_name_read < wq_data->dev_name_write) {
++ if (wq_data->dev_name_arry_rw_status
++ [wq_data->dev_name_read] ==
++ DRM_HOTPLUG_READY_TO_READ) {
++ wq_working_hotplug_disp_obj =
++ psb_create_umevent_obj(
++ &wq_data->dev_name_arry
++ [wq_data->dev_name_read][0],
++ wq_data->hotplug_dev_list);
++ wq_data->dev_name_arry_rw_status
++ [wq_data->dev_name_read] =
++ DRM_HOTPLUG_READ_COMPLETE;
++ psb_umevent_notify
++ (wq_working_hotplug_disp_obj);
++ }
++ wq_data->dev_name_read++;
++ }
++ }
++ if (wq_data->dev_name_read > DRM_HOTPLUG_RING_DEPTH_MAX)
++ wq_data->dev_name_read = 0;
++}
++EXPORT_SYMBOL(psb_hotplug_dev_create_wq);
++/**
++ * psb_hotplug_dev_remove_wq - remove workqueue implementation
++ *
++ * @work: work struct to use for kernel scheduling
++ *
++ */
++void psb_hotplug_dev_remove_wq(struct work_struct *work)
++{
++ struct hotplug_disp_workqueue_data *wq_data;
++ wq_data = to_hotplug_disp_workqueue_data(work);
++ if (wq_data->dev_name_write_wrap == 1) {
++ wq_data->dev_name_read_write_wrap_ack = 1;
++ wq_data->dev_name_write_wrap = 0;
++ while (wq_data->dev_name_read != DRM_HOTPLUG_RING_DEPTH_MAX) {
++ if (wq_data->dev_name_arry_rw_status
++ [wq_data->dev_name_read] ==
++ DRM_HOTPLUG_READY_TO_READ) {
++ psb_umevent_remove_from_list(
++ wq_data->hotplug_dev_list,
++ &wq_data->dev_name_arry
++ [wq_data->dev_name_read][0]);
++ wq_data->dev_name_arry_rw_status
++ [wq_data->dev_name_read] =
++ DRM_HOTPLUG_READ_COMPLETE;
++ }
++ wq_data->dev_name_read++;
++ }
++ wq_data->dev_name_read = 0;
++ while (wq_data->dev_name_read < wq_data->dev_name_write-1) {
++ if (wq_data->dev_name_arry_rw_status
++ [wq_data->dev_name_read] ==
++ DRM_HOTPLUG_READY_TO_READ) {
++ psb_umevent_remove_from_list(
++ wq_data->hotplug_dev_list,
++ &wq_data->dev_name_arry
++ [wq_data->dev_name_read][0]);
++ wq_data->dev_name_arry_rw_status
++ [wq_data->dev_name_read] =
++ DRM_HOTPLUG_READ_COMPLETE;
++ }
++ wq_data->dev_name_read++;
++ }
++ } else {
++ while (wq_data->dev_name_read < wq_data->dev_name_write) {
++ if (wq_data->dev_name_arry_rw_status
++ [wq_data->dev_name_read] ==
++ DRM_HOTPLUG_READY_TO_READ) {
++ psb_umevent_remove_from_list(
++ wq_data->hotplug_dev_list,
++ &wq_data->dev_name_arry
++ [wq_data->dev_name_read][0]);
++ wq_data->dev_name_arry_rw_status
++ [wq_data->dev_name_read] =
++ DRM_HOTPLUG_READ_COMPLETE;
++ }
++ wq_data->dev_name_read++;
++ }
++ }
++ if (wq_data->dev_name_read > DRM_HOTPLUG_RING_DEPTH_MAX)
++ wq_data->dev_name_read = 0;
++}
++EXPORT_SYMBOL(psb_hotplug_dev_remove_wq);
++/**
++ * psb_hotplug_dev_change_wq - change workqueue implementation
++ *
++ * @work: work struct to use for kernel scheduling
++ *
++ */
++void psb_hotplug_dev_change_wq(struct work_struct *work)
++{
++ struct hotplug_disp_workqueue_data *wq_data;
++ struct umevent_obj *wq_working_hotplug_disp_obj;
++ wq_data = to_hotplug_disp_workqueue_data(work);
++ if (wq_data->dev_name_write_wrap == 1) {
++ wq_data->dev_name_read_write_wrap_ack = 1;
++ wq_data->dev_name_write_wrap = 0;
++ while (wq_data->dev_name_read != DRM_HOTPLUG_RING_DEPTH_MAX) {
++ if (wq_data->dev_name_arry_rw_status
++ [wq_data->dev_name_read] ==
++ DRM_HOTPLUG_READY_TO_READ) {
++ wq_data->dev_name_arry_rw_status
++ [wq_data->dev_name_read] =
++ DRM_HOTPLUG_READ_COMPLETE;
++
++ wq_working_hotplug_disp_obj =
++ psb_umevent_find_obj(
++ &wq_data->dev_name_arry
++ [wq_data->dev_name_read][0],
++ wq_data->hotplug_dev_list);
++ psb_umevent_notify_change_gfxsock
++ (wq_working_hotplug_disp_obj,
++ DRM_HOTPLUG_SOCKET_GROUP_ID);
++ }
++ wq_data->dev_name_read++;
++ }
++ wq_data->dev_name_read = 0;
++ while (wq_data->dev_name_read < wq_data->dev_name_write-1) {
++ if (wq_data->dev_name_arry_rw_status
++ [wq_data->dev_name_read] ==
++ DRM_HOTPLUG_READY_TO_READ) {
++ wq_data->dev_name_arry_rw_status
++ [wq_data->dev_name_read] =
++ DRM_HOTPLUG_READ_COMPLETE;
++
++ wq_working_hotplug_disp_obj =
++ psb_umevent_find_obj(
++ &wq_data->dev_name_arry
++ [wq_data->dev_name_read][0],
++ wq_data->hotplug_dev_list);
++ psb_umevent_notify_change_gfxsock
++ (wq_working_hotplug_disp_obj,
++ DRM_HOTPLUG_SOCKET_GROUP_ID);
++ }
++ wq_data->dev_name_read++;
++ }
++ } else {
++ while (wq_data->dev_name_read < wq_data->dev_name_write) {
++ if (wq_data->dev_name_arry_rw_status
++ [wq_data->dev_name_read] ==
++ DRM_HOTPLUG_READY_TO_READ) {
++ wq_data->dev_name_arry_rw_status
++ [wq_data->dev_name_read] =
++ DRM_HOTPLUG_READ_COMPLETE;
++
++ wq_working_hotplug_disp_obj =
++ psb_umevent_find_obj(
++ &wq_data->dev_name_arry
++ [wq_data->dev_name_read][0],
++ wq_data->hotplug_dev_list);
++ psb_umevent_notify_change_gfxsock
++ (wq_working_hotplug_disp_obj,
++ DRM_HOTPLUG_SOCKET_GROUP_ID);
++ }
++ wq_data->dev_name_read++;
++ }
++ }
++ if (wq_data->dev_name_read > DRM_HOTPLUG_RING_DEPTH_MAX)
++ wq_data->dev_name_read = 0;
++}
++EXPORT_SYMBOL(psb_hotplug_dev_change_wq);
+diff --git a/drivers/gpu/drm/mrst/drv/psb_hotplug.h b/drivers/gpu/drm/mrst/drv/psb_hotplug.h
+new file mode 100644
+index 0000000..b6e42a4
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/psb_hotplug.h
+@@ -0,0 +1,90 @@
++/*
++ * Copyright © 2009 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors:
++ * James C. Gualario <james.c.gualario@intel.com>
++ *
++ */
++#ifndef _PSB_HOTPLUG_H_
++#define _PSB_HOTPLUG_H_
++/**
++ * required includes
++ *
++ */
++#include "psb_umevents.h"
++/**
++ * hotplug specific defines
++ *
++ */
++#define DRM_HOTPLUG_RING_DEPTH 256
++#define DRM_HOTPLUG_RING_DEPTH_MAX (DRM_HOTPLUG_RING_DEPTH-1)
++#define DRM_HOTPLUG_READY_TO_READ 1
++#define DRM_HOTPLUG_READ_COMPLETE 2
++/**
++ * hotplug workqueue data struct.
++ */
++struct hotplug_disp_workqueue_data {
++ struct work_struct work;
++ const char *dev_name;
++ int dev_name_write;
++ int dev_name_read;
++ int dev_name_write_wrap;
++ int dev_name_read_write_wrap_ack;
++ char dev_name_arry[DRM_HOTPLUG_RING_DEPTH][24];
++ int dev_name_arry_rw_status[DRM_HOTPLUG_RING_DEPTH];
++ struct umevent_list *hotplug_dev_list;
++};
++/**
++ * hotplug state structure
++ *
++ */
++struct hotplug_state {
++ struct workqueue_struct *hotplug_wq;
++ struct hotplug_disp_workqueue_data hotplug_remove_wq_data;
++ struct hotplug_disp_workqueue_data hotplug_create_wq_data;
++ struct hotplug_disp_workqueue_data hotplug_change_wq_data;
++ struct umevent_list *list;
++};
++/**
++ * main interface function prototytpes for hotplug support.
++ *
++ */
++struct hotplug_state *psb_hotplug_init(struct kobject *parent_kobj);
++extern int psb_hotplug_notify_change_um(const char *name,
++ struct hotplug_state *state);
++extern int psb_hotplug_create_and_notify_um(const char *name,
++ struct hotplug_state *state);
++extern int psb_hotplug_remove_and_notify_um(const char *name,
++ struct hotplug_state *state);
++extern struct umevent_list *psb_hotplug_device_pool_create_and_init(
++ struct kobject *parent_kobj,
++ struct hotplug_state *state);
++extern void psb_hotplug_device_pool_destroy(struct hotplug_state *state);
++/**
++ * to go back and forth between work strauct and workqueue data
++ *
++ */
++#define to_hotplug_disp_workqueue_data(x) \
++ container_of(x, struct hotplug_disp_workqueue_data, work)
++
++/**
++ * function prototypes for workqueue implementation
++ *
++ */
++extern void psb_hotplug_dev_create_wq(struct work_struct *work);
++extern void psb_hotplug_dev_remove_wq(struct work_struct *work);
++extern void psb_hotplug_dev_change_wq(struct work_struct *work);
++#endif
+diff --git a/drivers/gpu/drm/mrst/drv/psb_intel_bios.c b/drivers/gpu/drm/mrst/drv/psb_intel_bios.c
+new file mode 100644
+index 0000000..e752bde
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/psb_intel_bios.c
+@@ -0,0 +1,305 @@
++/*
++ * Copyright (c) 2006 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors:
++ * Eric Anholt <eric@anholt.net>
++ *
++ */
++#include <drm/drmP.h>
++#include <drm/drm.h>
++#include "psb_drm.h"
++#include "psb_drv.h"
++#include "psb_intel_drv.h"
++#include "psb_intel_reg.h"
++#include "psb_intel_bios.h"
++
++
++static void *find_section(struct bdb_header *bdb, int section_id)
++{
++ u8 *base = (u8 *)bdb;
++ int index = 0;
++ u16 total, current_size;
++ u8 current_id;
++
++ /* skip to first section */
++ index += bdb->header_size;
++ total = bdb->bdb_size;
++
++ /* walk the sections looking for section_id */
++ while (index < total) {
++ current_id = *(base + index);
++ index++;
++ current_size = *((u16 *)(base + index));
++ index += 2;
++ if (current_id == section_id)
++ return base + index;
++ index += current_size;
++ }
++
++ return NULL;
++}
++
++static void fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode,
++ struct lvds_dvo_timing *dvo_timing)
++{
++ panel_fixed_mode->hdisplay = (dvo_timing->hactive_hi << 8) |
++ dvo_timing->hactive_lo;
++ panel_fixed_mode->hsync_start = panel_fixed_mode->hdisplay +
++ ((dvo_timing->hsync_off_hi << 8) | dvo_timing->hsync_off_lo);
++ panel_fixed_mode->hsync_end = panel_fixed_mode->hsync_start +
++ dvo_timing->hsync_pulse_width;
++ panel_fixed_mode->htotal = panel_fixed_mode->hdisplay +
++ ((dvo_timing->hblank_hi << 8) | dvo_timing->hblank_lo);
++
++ panel_fixed_mode->vdisplay = (dvo_timing->vactive_hi << 8) |
++ dvo_timing->vactive_lo;
++ panel_fixed_mode->vsync_start = panel_fixed_mode->vdisplay +
++ dvo_timing->vsync_off;
++ panel_fixed_mode->vsync_end = panel_fixed_mode->vsync_start +
++ dvo_timing->vsync_pulse_width;
++ panel_fixed_mode->vtotal = panel_fixed_mode->vdisplay +
++ ((dvo_timing->vblank_hi << 8) | dvo_timing->vblank_lo);
++ panel_fixed_mode->clock = dvo_timing->clock * 10;
++ panel_fixed_mode->type = DRM_MODE_TYPE_PREFERRED;
++
++ /* Some VBTs have bogus h/vtotal values */
++ if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal)
++ panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1;
++ if (panel_fixed_mode->vsync_end > panel_fixed_mode->vtotal)
++ panel_fixed_mode->vtotal = panel_fixed_mode->vsync_end + 1;
++
++ drm_mode_set_name(panel_fixed_mode);
++}
++
++static void parse_backlight_data(struct drm_psb_private *dev_priv,
++ struct bdb_header *bdb)
++{
++ struct bdb_lvds_backlight *vbt_lvds_bl = NULL;
++ struct bdb_lvds_backlight *lvds_bl;
++ u8 p_type = 0;
++ void *bl_start = NULL;
++ struct bdb_lvds_options *lvds_opts
++ = find_section(bdb, BDB_LVDS_OPTIONS);
++
++ dev_priv->lvds_bl = NULL;
++
++ if (lvds_opts) {
++ DRM_DEBUG("lvds_options found at %p\n", lvds_opts);
++ p_type = lvds_opts->panel_type;
++ } else {
++ DRM_DEBUG("no lvds_options\n");
++ return;
++ }
++
++ bl_start = find_section(bdb, BDB_LVDS_BACKLIGHT);
++ vbt_lvds_bl = (struct bdb_lvds_backlight *)(bl_start + 1) + p_type;
++
++ lvds_bl = kzalloc(sizeof(*vbt_lvds_bl), GFP_KERNEL);
++ if (!lvds_bl) {
++ DRM_DEBUG("No memory\n");
++ return;
++ }
++
++ memcpy(lvds_bl, vbt_lvds_bl, sizeof(*vbt_lvds_bl));
++
++ dev_priv->lvds_bl = lvds_bl;
++}
++
++/* Try to find integrated panel data */
++static void parse_lfp_panel_data(struct drm_psb_private *dev_priv,
++ struct bdb_header *bdb)
++{
++ struct bdb_lvds_options *lvds_options;
++ struct bdb_lvds_lfp_data *lvds_lfp_data;
++ struct bdb_lvds_lfp_data_entry *entry;
++ struct lvds_dvo_timing *dvo_timing;
++ struct drm_display_mode *panel_fixed_mode;
++
++ /* Defaults if we can't find VBT info */
++ dev_priv->lvds_dither = 0;
++ dev_priv->lvds_vbt = 0;
++
++ lvds_options = find_section(bdb, BDB_LVDS_OPTIONS);
++ if (!lvds_options)
++ return;
++
++ dev_priv->lvds_dither = lvds_options->pixel_dither;
++ if (lvds_options->panel_type == 0xff)
++ return;
++
++ lvds_lfp_data = find_section(bdb, BDB_LVDS_LFP_DATA);
++ if (!lvds_lfp_data)
++ return;
++
++ dev_priv->lvds_vbt = 1;
++
++ entry = &lvds_lfp_data->data[lvds_options->panel_type];
++ dvo_timing = &entry->dvo_timing;
++
++ panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode),
++ GFP_KERNEL);
++
++ fill_detail_timing_data(panel_fixed_mode, dvo_timing);
++
++ dev_priv->lfp_lvds_vbt_mode = panel_fixed_mode;
++
++ DRM_DEBUG("Found panel mode in BIOS VBT tables:\n");
++ drm_mode_debug_printmodeline(panel_fixed_mode);
++
++ return;
++}
++
++/* Try to find sdvo panel data */
++static void parse_sdvo_panel_data(struct drm_psb_private *dev_priv,
++ struct bdb_header *bdb)
++{
++ struct bdb_sdvo_lvds_options *sdvo_lvds_options;
++ struct lvds_dvo_timing *dvo_timing;
++ struct drm_display_mode *panel_fixed_mode;
++
++ dev_priv->sdvo_lvds_vbt_mode = NULL;
++
++ sdvo_lvds_options = find_section(bdb, BDB_SDVO_LVDS_OPTIONS);
++ if (!sdvo_lvds_options)
++ return;
++
++ dvo_timing = find_section(bdb, BDB_SDVO_PANEL_DTDS);
++ if (!dvo_timing)
++ return;
++
++ panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL);
++
++ if (!panel_fixed_mode)
++ return;
++
++ fill_detail_timing_data(panel_fixed_mode,
++ dvo_timing + sdvo_lvds_options->panel_type);
++
++ dev_priv->sdvo_lvds_vbt_mode = panel_fixed_mode;
++
++ return;
++}
++
++static void parse_general_features(struct drm_psb_private *dev_priv,
++ struct bdb_header *bdb)
++{
++ struct bdb_general_features *general;
++
++ /* Set sensible defaults in case we can't find the general block */
++ dev_priv->int_tv_support = 1;
++ dev_priv->int_crt_support = 1;
++
++ general = find_section(bdb, BDB_GENERAL_FEATURES);
++ if (general) {
++ dev_priv->int_tv_support = general->int_tv_support;
++ dev_priv->int_crt_support = general->int_crt_support;
++ dev_priv->lvds_use_ssc = general->enable_ssc;
++
++ if (dev_priv->lvds_use_ssc) {
++ if (IS_I855(dev_priv->dev))
++ dev_priv->lvds_ssc_freq
++ = general->ssc_freq ? 66 : 48;
++ else
++ dev_priv->lvds_ssc_freq
++ = general->ssc_freq ? 100 : 96;
++ }
++ }
++}
++
++/**
++ * psb_intel_init_bios - initialize VBIOS settings & find VBT
++ * @dev: DRM device
++ *
++ * Loads the Video BIOS and checks that the VBT exists. Sets scratch registers
++ * to appropriate values.
++ *
++ * VBT existence is a sanity check that is relied on by other i830_bios.c code.
++ * Note that it would be better to use a BIOS call to get the VBT, as BIOSes may
++ * feed an updated VBT back through that, compared to what we'll fetch using
++ * this method of groping around in the BIOS data.
++ *
++ * Returns 0 on success, nonzero on failure.
++ */
++bool psb_intel_init_bios(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ struct pci_dev *pdev = dev->pdev;
++ struct vbt_header *vbt = NULL;
++ struct bdb_header *bdb;
++ u8 __iomem *bios;
++ size_t size;
++ int i;
++
++ bios = pci_map_rom(pdev, &size);
++ if (!bios)
++ return -1;
++
++ /* Scour memory looking for the VBT signature */
++ for (i = 0; i + 4 < size; i++) {
++ if (!memcmp(bios + i, "$VBT", 4)) {
++ vbt = (struct vbt_header *)(bios + i);
++ break;
++ }
++ }
++
++ if (!vbt) {
++ DRM_ERROR("VBT signature missing\n");
++ pci_unmap_rom(pdev, bios);
++ return -1;
++ }
++
++ bdb = (struct bdb_header *)(bios + i + vbt->bdb_offset);
++
++ /* Grab useful general definitions */
++ parse_general_features(dev_priv, bdb);
++ parse_lfp_panel_data(dev_priv, bdb);
++ parse_sdvo_panel_data(dev_priv, bdb);
++ parse_backlight_data(dev_priv, bdb);
++
++ pci_unmap_rom(pdev, bios);
++
++ return 0;
++}
++
++/**
++ * Destory and free VBT data
++ */
++void psb_intel_destory_bios(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ struct drm_display_mode *sdvo_lvds_vbt_mode =
++ dev_priv->sdvo_lvds_vbt_mode;
++ struct drm_display_mode *lfp_lvds_vbt_mode =
++ dev_priv->lfp_lvds_vbt_mode;
++ struct bdb_lvds_backlight *lvds_bl =
++ dev_priv->lvds_bl;
++
++ /*free sdvo panel mode*/
++ if (sdvo_lvds_vbt_mode) {
++ dev_priv->sdvo_lvds_vbt_mode = NULL;
++ kfree(sdvo_lvds_vbt_mode);
++ }
++
++ if (lfp_lvds_vbt_mode) {
++ dev_priv->lfp_lvds_vbt_mode = NULL;
++ kfree(lfp_lvds_vbt_mode);
++ }
++
++ if (lvds_bl) {
++ dev_priv->lvds_bl = NULL;
++ kfree(lvds_bl);
++ }
++}
+diff --git a/drivers/gpu/drm/mrst/drv/psb_intel_bios.h b/drivers/gpu/drm/mrst/drv/psb_intel_bios.h
+new file mode 100644
+index 0000000..dfcae62
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/psb_intel_bios.h
+@@ -0,0 +1,430 @@
++/*
++ * Copyright (c) 2006 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors:
++ * Eric Anholt <eric@anholt.net>
++ *
++ */
++
++#ifndef _I830_BIOS_H_
++#define _I830_BIOS_H_
++
++#include <drm/drmP.h>
++
++struct vbt_header {
++ u8 signature[20]; /**< Always starts with 'VBT$' */
++ u16 version; /**< decimal */
++ u16 header_size; /**< in bytes */
++ u16 vbt_size; /**< in bytes */
++ u8 vbt_checksum;
++ u8 reserved0;
++ u32 bdb_offset; /**< from beginning of VBT */
++ u32 aim_offset[4]; /**< from beginning of VBT */
++} __attribute__((packed));
++
++
++struct bdb_header {
++ u8 signature[16]; /**< Always 'BIOS_DATA_BLOCK' */
++ u16 version; /**< decimal */
++ u16 header_size; /**< in bytes */
++ u16 bdb_size; /**< in bytes */
++};
++
++/* strictly speaking, this is a "skip" block, but it has interesting info */
++struct vbios_data {
++ u8 type; /* 0 == desktop, 1 == mobile */
++ u8 relstage;
++ u8 chipset;
++ u8 lvds_present:1;
++ u8 tv_present:1;
++ u8 rsvd2:6; /* finish byte */
++ u8 rsvd3[4];
++ u8 signon[155];
++ u8 copyright[61];
++ u16 code_segment;
++ u8 dos_boot_mode;
++ u8 bandwidth_percent;
++ u8 rsvd4; /* popup memory size */
++ u8 resize_pci_bios;
++ u8 rsvd5; /* is crt already on ddc2 */
++} __attribute__((packed));
++
++/*
++ * There are several types of BIOS data blocks (BDBs), each block has
++ * an ID and size in the first 3 bytes (ID in first, size in next 2).
++ * Known types are listed below.
++ */
++#define BDB_GENERAL_FEATURES 1
++#define BDB_GENERAL_DEFINITIONS 2
++#define BDB_OLD_TOGGLE_LIST 3
++#define BDB_MODE_SUPPORT_LIST 4
++#define BDB_GENERIC_MODE_TABLE 5
++#define BDB_EXT_MMIO_REGS 6
++#define BDB_SWF_IO 7
++#define BDB_SWF_MMIO 8
++#define BDB_DOT_CLOCK_TABLE 9
++#define BDB_MODE_REMOVAL_TABLE 10
++#define BDB_CHILD_DEVICE_TABLE 11
++#define BDB_DRIVER_FEATURES 12
++#define BDB_DRIVER_PERSISTENCE 13
++#define BDB_EXT_TABLE_PTRS 14
++#define BDB_DOT_CLOCK_OVERRIDE 15
++#define BDB_DISPLAY_SELECT 16
++/* 17 rsvd */
++#define BDB_DRIVER_ROTATION 18
++#define BDB_DISPLAY_REMOVE 19
++#define BDB_OEM_CUSTOM 20
++#define BDB_EFP_LIST 21 /* workarounds for VGA hsync/vsync */
++#define BDB_SDVO_LVDS_OPTIONS 22
++#define BDB_SDVO_PANEL_DTDS 23
++#define BDB_SDVO_LVDS_PNP_IDS 24
++#define BDB_SDVO_LVDS_POWER_SEQ 25
++#define BDB_TV_OPTIONS 26
++#define BDB_LVDS_OPTIONS 40
++#define BDB_LVDS_LFP_DATA_PTRS 41
++#define BDB_LVDS_LFP_DATA 42
++#define BDB_LVDS_BACKLIGHT 43
++#define BDB_LVDS_POWER 44
++#define BDB_SKIP 254 /* VBIOS private block, ignore */
++
++struct bdb_general_features {
++ /* bits 1 */
++ u8 panel_fitting:2;
++ u8 flexaim:1;
++ u8 msg_enable:1;
++ u8 clear_screen:3;
++ u8 color_flip:1;
++
++ /* bits 2 */
++ u8 download_ext_vbt:1;
++ u8 enable_ssc:1;
++ u8 ssc_freq:1;
++ u8 enable_lfp_on_override:1;
++ u8 disable_ssc_ddt:1;
++ u8 rsvd8:3; /* finish byte */
++
++ /* bits 3 */
++ u8 disable_smooth_vision:1;
++ u8 single_dvi:1;
++ u8 rsvd9:6; /* finish byte */
++
++ /* bits 4 */
++ u8 legacy_monitor_detect;
++
++ /* bits 5 */
++ u8 int_crt_support:1;
++ u8 int_tv_support:1;
++ u8 rsvd11:6; /* finish byte */
++} __attribute__((packed));
++
++struct bdb_general_definitions {
++ /* DDC GPIO */
++ u8 crt_ddc_gmbus_pin;
++
++ /* DPMS bits */
++ u8 dpms_acpi:1;
++ u8 skip_boot_crt_detect:1;
++ u8 dpms_aim:1;
++ u8 rsvd1:5; /* finish byte */
++
++ /* boot device bits */
++ u8 boot_display[2];
++ u8 child_dev_size;
++
++ /* device info */
++ u8 tv_or_lvds_info[33];
++ u8 dev1[33];
++ u8 dev2[33];
++ u8 dev3[33];
++ u8 dev4[33];
++ /* may be another device block here on some platforms */
++};
++
++struct bdb_lvds_options {
++ u8 panel_type;
++ u8 rsvd1;
++ /* LVDS capabilities, stored in a dword */
++ u8 pfit_mode:2;
++ u8 pfit_text_mode_enhanced:1;
++ u8 pfit_gfx_mode_enhanced:1;
++ u8 pfit_ratio_auto:1;
++ u8 pixel_dither:1;
++ u8 lvds_edid:1;
++ u8 rsvd2:1;
++ u8 rsvd4;
++} __attribute__((packed));
++
++struct bdb_lvds_backlight {
++ u8 type:2;
++ u8 pol:1;
++ u8 gpio:3;
++ u8 gmbus:2;
++ u16 freq;
++ u8 minbrightness;
++ u8 i2caddr;
++ u8 brightnesscmd;
++ /*FIXME: more...*/
++} __attribute__((packed));
++
++/* LFP pointer table contains entries to the struct below */
++struct bdb_lvds_lfp_data_ptr {
++ u16 fp_timing_offset; /* offsets are from start of bdb */
++ u8 fp_table_size;
++ u16 dvo_timing_offset;
++ u8 dvo_table_size;
++ u16 panel_pnp_id_offset;
++ u8 pnp_table_size;
++} __attribute__((packed));
++
++struct bdb_lvds_lfp_data_ptrs {
++ u8 lvds_entries; /* followed by one or more lvds_data_ptr structs */
++ struct bdb_lvds_lfp_data_ptr ptr[16];
++} __attribute__((packed));
++
++/* LFP data has 3 blocks per entry */
++struct lvds_fp_timing {
++ u16 x_res;
++ u16 y_res;
++ u32 lvds_reg;
++ u32 lvds_reg_val;
++ u32 pp_on_reg;
++ u32 pp_on_reg_val;
++ u32 pp_off_reg;
++ u32 pp_off_reg_val;
++ u32 pp_cycle_reg;
++ u32 pp_cycle_reg_val;
++ u32 pfit_reg;
++ u32 pfit_reg_val;
++ u16 terminator;
++} __attribute__((packed));
++
++struct lvds_dvo_timing {
++ u16 clock; /**< In 10khz */
++ u8 hactive_lo;
++ u8 hblank_lo;
++ u8 hblank_hi:4;
++ u8 hactive_hi:4;
++ u8 vactive_lo;
++ u8 vblank_lo;
++ u8 vblank_hi:4;
++ u8 vactive_hi:4;
++ u8 hsync_off_lo;
++ u8 hsync_pulse_width;
++ u8 vsync_pulse_width:4;
++ u8 vsync_off:4;
++ u8 rsvd0:6;
++ u8 hsync_off_hi:2;
++ u8 h_image;
++ u8 v_image;
++ u8 max_hv;
++ u8 h_border;
++ u8 v_border;
++ u8 rsvd1:3;
++ u8 digital:2;
++ u8 vsync_positive:1;
++ u8 hsync_positive:1;
++ u8 rsvd2:1;
++} __attribute__((packed));
++
++struct lvds_pnp_id {
++ u16 mfg_name;
++ u16 product_code;
++ u32 serial;
++ u8 mfg_week;
++ u8 mfg_year;
++} __attribute__((packed));
++
++struct bdb_lvds_lfp_data_entry {
++ struct lvds_fp_timing fp_timing;
++ struct lvds_dvo_timing dvo_timing;
++ struct lvds_pnp_id pnp_id;
++} __attribute__((packed));
++
++struct bdb_lvds_lfp_data {
++ struct bdb_lvds_lfp_data_entry data[16];
++} __attribute__((packed));
++
++struct aimdb_header {
++ char signature[16];
++ char oem_device[20];
++ u16 aimdb_version;
++ u16 aimdb_header_size;
++ u16 aimdb_size;
++} __attribute__((packed));
++
++struct aimdb_block {
++ u8 aimdb_id;
++ u16 aimdb_size;
++} __attribute__((packed));
++
++struct vch_panel_data {
++ u16 fp_timing_offset;
++ u8 fp_timing_size;
++ u16 dvo_timing_offset;
++ u8 dvo_timing_size;
++ u16 text_fitting_offset;
++ u8 text_fitting_size;
++ u16 graphics_fitting_offset;
++ u8 graphics_fitting_size;
++} __attribute__((packed));
++
++struct vch_bdb_22 {
++ struct aimdb_block aimdb_block;
++ struct vch_panel_data panels[16];
++} __attribute__((packed));
++
++struct bdb_sdvo_lvds_options {
++ u8 panel_backlight;
++ u8 h40_set_panel_type;
++ u8 panel_type;
++ u8 ssc_clk_freq;
++ u16 als_low_trip;
++ u16 als_high_trip;
++ u8 sclalarcoeff_tab_row_num;
++ u8 sclalarcoeff_tab_row_size;
++ u8 coefficient[8];
++ u8 panel_misc_bits_1;
++ u8 panel_misc_bits_2;
++ u8 panel_misc_bits_3;
++ u8 panel_misc_bits_4;
++} __attribute__((packed));
++
++
++extern bool psb_intel_init_bios(struct drm_device *dev);
++extern void psb_intel_destory_bios(struct drm_device *dev);
++
++/*
++ * Driver<->VBIOS interaction occurs through scratch bits in
++ * GR18 & SWF*.
++ */
++
++/* GR18 bits are set on display switch and hotkey events */
++#define GR18_DRIVER_SWITCH_EN (1<<7) /* 0: VBIOS control, 1: driver control */
++#define GR18_HOTKEY_MASK 0x78 /* See also SWF4 15:0 */
++#define GR18_HK_NONE (0x0<<3)
++#define GR18_HK_LFP_STRETCH (0x1<<3)
++#define GR18_HK_TOGGLE_DISP (0x2<<3)
++#define GR18_HK_DISP_SWITCH (0x4<<3) /* see SWF14 15:0 for what to enable */
++#define GR18_HK_POPUP_DISABLED (0x6<<3)
++#define GR18_HK_POPUP_ENABLED (0x7<<3)
++#define GR18_HK_PFIT (0x8<<3)
++#define GR18_HK_APM_CHANGE (0xa<<3)
++#define GR18_HK_MULTIPLE (0xc<<3)
++#define GR18_USER_INT_EN (1<<2)
++#define GR18_A0000_FLUSH_EN (1<<1)
++#define GR18_SMM_EN (1<<0)
++
++/* Set by driver, cleared by VBIOS */
++#define SWF00_YRES_SHIFT 16
++#define SWF00_XRES_SHIFT 0
++#define SWF00_RES_MASK 0xffff
++
++/* Set by VBIOS at boot time and driver at runtime */
++#define SWF01_TV2_FORMAT_SHIFT 8
++#define SWF01_TV1_FORMAT_SHIFT 0
++#define SWF01_TV_FORMAT_MASK 0xffff
++
++#define SWF10_VBIOS_BLC_I2C_EN (1<<29)
++#define SWF10_GTT_OVERRIDE_EN (1<<28)
++#define SWF10_LFP_DPMS_OVR (1<<27) /* override DPMS on display switch */
++#define SWF10_ACTIVE_TOGGLE_LIST_MASK (7<<24)
++#define SWF10_OLD_TOGGLE 0x0
++#define SWF10_TOGGLE_LIST_1 0x1
++#define SWF10_TOGGLE_LIST_2 0x2
++#define SWF10_TOGGLE_LIST_3 0x3
++#define SWF10_TOGGLE_LIST_4 0x4
++#define SWF10_PANNING_EN (1<<23)
++#define SWF10_DRIVER_LOADED (1<<22)
++#define SWF10_EXTENDED_DESKTOP (1<<21)
++#define SWF10_EXCLUSIVE_MODE (1<<20)
++#define SWF10_OVERLAY_EN (1<<19)
++#define SWF10_PLANEB_HOLDOFF (1<<18)
++#define SWF10_PLANEA_HOLDOFF (1<<17)
++#define SWF10_VGA_HOLDOFF (1<<16)
++#define SWF10_ACTIVE_DISP_MASK 0xffff
++#define SWF10_PIPEB_LFP2 (1<<15)
++#define SWF10_PIPEB_EFP2 (1<<14)
++#define SWF10_PIPEB_TV2 (1<<13)
++#define SWF10_PIPEB_CRT2 (1<<12)
++#define SWF10_PIPEB_LFP (1<<11)
++#define SWF10_PIPEB_EFP (1<<10)
++#define SWF10_PIPEB_TV (1<<9)
++#define SWF10_PIPEB_CRT (1<<8)
++#define SWF10_PIPEA_LFP2 (1<<7)
++#define SWF10_PIPEA_EFP2 (1<<6)
++#define SWF10_PIPEA_TV2 (1<<5)
++#define SWF10_PIPEA_CRT2 (1<<4)
++#define SWF10_PIPEA_LFP (1<<3)
++#define SWF10_PIPEA_EFP (1<<2)
++#define SWF10_PIPEA_TV (1<<1)
++#define SWF10_PIPEA_CRT (1<<0)
++
++#define SWF11_MEMORY_SIZE_SHIFT 16
++#define SWF11_SV_TEST_EN (1<<15)
++#define SWF11_IS_AGP (1<<14)
++#define SWF11_DISPLAY_HOLDOFF (1<<13)
++#define SWF11_DPMS_REDUCED (1<<12)
++#define SWF11_IS_VBE_MODE (1<<11)
++#define SWF11_PIPEB_ACCESS (1<<10) /* 0 here means pipe a */
++#define SWF11_DPMS_MASK 0x07
++#define SWF11_DPMS_OFF (1<<2)
++#define SWF11_DPMS_SUSPEND (1<<1)
++#define SWF11_DPMS_STANDBY (1<<0)
++#define SWF11_DPMS_ON 0
++
++#define SWF14_GFX_PFIT_EN (1<<31)
++#define SWF14_TEXT_PFIT_EN (1<<30)
++#define SWF14_LID_STATUS_CLOSED (1<<29) /* 0 here means open */
++#define SWF14_POPUP_EN (1<<28)
++#define SWF14_DISPLAY_HOLDOFF (1<<27)
++#define SWF14_DISP_DETECT_EN (1<<26)
++#define SWF14_DOCKING_STATUS_DOCKED (1<<25) /* 0 here means undocked */
++#define SWF14_DRIVER_STATUS (1<<24)
++#define SWF14_OS_TYPE_WIN9X (1<<23)
++#define SWF14_OS_TYPE_WINNT (1<<22)
++/* 21:19 rsvd */
++#define SWF14_PM_TYPE_MASK 0x00070000
++#define SWF14_PM_ACPI_VIDEO (0x4 << 16)
++#define SWF14_PM_ACPI (0x3 << 16)
++#define SWF14_PM_APM_12 (0x2 << 16)
++#define SWF14_PM_APM_11 (0x1 << 16)
++#define SWF14_HK_REQUEST_MASK 0x0000ffff /* see GR18 6:3 for event type */
++ /* if GR18 indicates a display switch */
++#define SWF14_DS_PIPEB_LFP2_EN (1<<15)
++#define SWF14_DS_PIPEB_EFP2_EN (1<<14)
++#define SWF14_DS_PIPEB_TV2_EN (1<<13)
++#define SWF14_DS_PIPEB_CRT2_EN (1<<12)
++#define SWF14_DS_PIPEB_LFP_EN (1<<11)
++#define SWF14_DS_PIPEB_EFP_EN (1<<10)
++#define SWF14_DS_PIPEB_TV_EN (1<<9)
++#define SWF14_DS_PIPEB_CRT_EN (1<<8)
++#define SWF14_DS_PIPEA_LFP2_EN (1<<7)
++#define SWF14_DS_PIPEA_EFP2_EN (1<<6)
++#define SWF14_DS_PIPEA_TV2_EN (1<<5)
++#define SWF14_DS_PIPEA_CRT2_EN (1<<4)
++#define SWF14_DS_PIPEA_LFP_EN (1<<3)
++#define SWF14_DS_PIPEA_EFP_EN (1<<2)
++#define SWF14_DS_PIPEA_TV_EN (1<<1)
++#define SWF14_DS_PIPEA_CRT_EN (1<<0)
++ /* if GR18 indicates a panel fitting request */
++#define SWF14_PFIT_EN (1<<0) /* 0 means disable */
++ /* if GR18 indicates an APM change request */
++#define SWF14_APM_HIBERNATE 0x4
++#define SWF14_APM_SUSPEND 0x3
++#define SWF14_APM_STANDBY 0x1
++#define SWF14_APM_RESTORE 0x0
++
++#endif /* _I830_BIOS_H_ */
+diff --git a/drivers/gpu/drm/mrst/drv/psb_intel_display.c b/drivers/gpu/drm/mrst/drv/psb_intel_display.c
+new file mode 100644
+index 0000000..10c6dec
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/psb_intel_display.c
+@@ -0,0 +1,2538 @@
++/*
++ * Copyright © 2006-2007 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors:
++ * Eric Anholt <eric@anholt.net>
++ */
++
++#include <linux/i2c.h>
++
++#include <drm/drmP.h>
++#include "psb_fb.h"
++#include "psb_drv.h"
++#include "psb_intel_drv.h"
++#include "psb_intel_reg.h"
++#include "psb_intel_display.h"
++#include "ospm_power.h"
++
++struct psb_intel_clock_t {
++ /* given values */
++ int n;
++ int m1, m2;
++ int p1, p2;
++ /* derived values */
++ int dot;
++ int vco;
++ int m;
++ int p;
++};
++
++struct psb_intel_range_t {
++ int min, max;
++};
++
++struct psb_intel_p2_t {
++ int dot_limit;
++ int p2_slow, p2_fast;
++};
++
++#define INTEL_P2_NUM 2
++
++struct psb_intel_limit_t {
++ struct psb_intel_range_t dot, vco, n, m, m1, m2, p, p1;
++ struct psb_intel_p2_t p2;
++};
++
++#define I8XX_DOT_MIN 25000
++#define I8XX_DOT_MAX 350000
++#define I8XX_VCO_MIN 930000
++#define I8XX_VCO_MAX 1400000
++#define I8XX_N_MIN 3
++#define I8XX_N_MAX 16
++#define I8XX_M_MIN 96
++#define I8XX_M_MAX 140
++#define I8XX_M1_MIN 18
++#define I8XX_M1_MAX 26
++#define I8XX_M2_MIN 6
++#define I8XX_M2_MAX 16
++#define I8XX_P_MIN 4
++#define I8XX_P_MAX 128
++#define I8XX_P1_MIN 2
++#define I8XX_P1_MAX 33
++#define I8XX_P1_LVDS_MIN 1
++#define I8XX_P1_LVDS_MAX 6
++#define I8XX_P2_SLOW 4
++#define I8XX_P2_FAST 2
++#define I8XX_P2_LVDS_SLOW 14
++#define I8XX_P2_LVDS_FAST 14 /* No fast option */
++#define I8XX_P2_SLOW_LIMIT 165000
++
++#define I9XX_DOT_MIN 20000
++#define I9XX_DOT_MAX 400000
++#define I9XX_VCO_MIN 1400000
++#define I9XX_VCO_MAX 2800000
++#define I9XX_N_MIN 3
++#define I9XX_N_MAX 8
++#define I9XX_M_MIN 70
++#define I9XX_M_MAX 120
++#define I9XX_M1_MIN 10
++#define I9XX_M1_MAX 20
++#define I9XX_M2_MIN 5
++#define I9XX_M2_MAX 9
++#define I9XX_P_SDVO_DAC_MIN 5
++#define I9XX_P_SDVO_DAC_MAX 80
++#define I9XX_P_LVDS_MIN 7
++#define I9XX_P_LVDS_MAX 98
++#define I9XX_P1_MIN 1
++#define I9XX_P1_MAX 8
++#define I9XX_P2_SDVO_DAC_SLOW 10
++#define I9XX_P2_SDVO_DAC_FAST 5
++#define I9XX_P2_SDVO_DAC_SLOW_LIMIT 200000
++#define I9XX_P2_LVDS_SLOW 14
++#define I9XX_P2_LVDS_FAST 7
++#define I9XX_P2_LVDS_SLOW_LIMIT 112000
++
++#define INTEL_LIMIT_I8XX_DVO_DAC 0
++#define INTEL_LIMIT_I8XX_LVDS 1
++#define INTEL_LIMIT_I9XX_SDVO_DAC 2
++#define INTEL_LIMIT_I9XX_LVDS 3
++
++static const struct psb_intel_limit_t psb_intel_limits[] = {
++ { /* INTEL_LIMIT_I8XX_DVO_DAC */
++ .dot = {.min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX},
++ .vco = {.min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX},
++ .n = {.min = I8XX_N_MIN, .max = I8XX_N_MAX},
++ .m = {.min = I8XX_M_MIN, .max = I8XX_M_MAX},
++ .m1 = {.min = I8XX_M1_MIN, .max = I8XX_M1_MAX},
++ .m2 = {.min = I8XX_M2_MIN, .max = I8XX_M2_MAX},
++ .p = {.min = I8XX_P_MIN, .max = I8XX_P_MAX},
++ .p1 = {.min = I8XX_P1_MIN, .max = I8XX_P1_MAX},
++ .p2 = {.dot_limit = I8XX_P2_SLOW_LIMIT,
++ .p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST},
++ },
++ { /* INTEL_LIMIT_I8XX_LVDS */
++ .dot = {.min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX},
++ .vco = {.min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX},
++ .n = {.min = I8XX_N_MIN, .max = I8XX_N_MAX},
++ .m = {.min = I8XX_M_MIN, .max = I8XX_M_MAX},
++ .m1 = {.min = I8XX_M1_MIN, .max = I8XX_M1_MAX},
++ .m2 = {.min = I8XX_M2_MIN, .max = I8XX_M2_MAX},
++ .p = {.min = I8XX_P_MIN, .max = I8XX_P_MAX},
++ .p1 = {.min = I8XX_P1_LVDS_MIN, .max = I8XX_P1_LVDS_MAX},
++ .p2 = {.dot_limit = I8XX_P2_SLOW_LIMIT,
++ .p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST},
++ },
++ { /* INTEL_LIMIT_I9XX_SDVO_DAC */
++ .dot = {.min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX},
++ .vco = {.min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX},
++ .n = {.min = I9XX_N_MIN, .max = I9XX_N_MAX},
++ .m = {.min = I9XX_M_MIN, .max = I9XX_M_MAX},
++ .m1 = {.min = I9XX_M1_MIN, .max = I9XX_M1_MAX},
++ .m2 = {.min = I9XX_M2_MIN, .max = I9XX_M2_MAX},
++ .p = {.min = I9XX_P_SDVO_DAC_MIN, .max = I9XX_P_SDVO_DAC_MAX},
++ .p1 = {.min = I9XX_P1_MIN, .max = I9XX_P1_MAX},
++ .p2 = {.dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
++ .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast =
++ I9XX_P2_SDVO_DAC_FAST},
++ },
++ { /* INTEL_LIMIT_I9XX_LVDS */
++ .dot = {.min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX},
++ .vco = {.min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX},
++ .n = {.min = I9XX_N_MIN, .max = I9XX_N_MAX},
++ .m = {.min = I9XX_M_MIN, .max = I9XX_M_MAX},
++ .m1 = {.min = I9XX_M1_MIN, .max = I9XX_M1_MAX},
++ .m2 = {.min = I9XX_M2_MIN, .max = I9XX_M2_MAX},
++ .p = {.min = I9XX_P_LVDS_MIN, .max = I9XX_P_LVDS_MAX},
++ .p1 = {.min = I9XX_P1_MIN, .max = I9XX_P1_MAX},
++ /* The single-channel range is 25-112Mhz, and dual-channel
++ * is 80-224Mhz. Prefer single channel as much as possible.
++ */
++ .p2 = {.dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
++ .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST},
++ },
++};
++
++static const struct psb_intel_limit_t *psb_intel_limit(struct drm_crtc *crtc)
++{
++ struct drm_device *dev = crtc->dev;
++ const struct psb_intel_limit_t *limit;
++
++ if (IS_I9XX(dev)) {
++ if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
++ limit = &psb_intel_limits[INTEL_LIMIT_I9XX_LVDS];
++ else
++ limit = &psb_intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC];
++ } else {
++ if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
++ limit = &psb_intel_limits[INTEL_LIMIT_I8XX_LVDS];
++ else
++ limit = &psb_intel_limits[INTEL_LIMIT_I8XX_DVO_DAC];
++ }
++ return limit;
++}
++
++/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */
++
++static void i8xx_clock(int refclk, struct psb_intel_clock_t *clock)
++{
++ clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
++ clock->p = clock->p1 * clock->p2;
++ clock->vco = refclk * clock->m / (clock->n + 2);
++ clock->dot = clock->vco / clock->p;
++}
++
++/** Derive the pixel clock for the given refclk and divisors for 9xx chips. */
++
++static void i9xx_clock(int refclk, struct psb_intel_clock_t *clock)
++{
++ clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
++ clock->p = clock->p1 * clock->p2;
++ clock->vco = refclk * clock->m / (clock->n + 2);
++ clock->dot = clock->vco / clock->p;
++}
++
++static void psb_intel_clock(struct drm_device *dev, int refclk,
++ struct psb_intel_clock_t *clock)
++{
++ if (IS_I9XX(dev))
++ return i9xx_clock(refclk, clock);
++ else
++ return i8xx_clock(refclk, clock);
++}
++
++/**
++ * Returns whether any output on the specified pipe is of the specified type
++ */
++bool psb_intel_pipe_has_type(struct drm_crtc *crtc, int type)
++{
++ struct drm_device *dev = crtc->dev;
++ struct drm_mode_config *mode_config = &dev->mode_config;
++ struct drm_connector *l_entry;
++
++ list_for_each_entry(l_entry, &mode_config->connector_list, head) {
++ if (l_entry->encoder && l_entry->encoder->crtc == crtc) {
++ struct psb_intel_output *psb_intel_output =
++ to_psb_intel_output(l_entry);
++ if (psb_intel_output->type == type)
++ return true;
++ }
++ }
++ return false;
++}
++
++#define INTELPllInvalid(s) { /* ErrorF (s) */; return false; }
++/**
++ * Returns whether the given set of divisors are valid for a given refclk with
++ * the given connectors.
++ */
++
++static bool psb_intel_PLL_is_valid(struct drm_crtc *crtc,
++ struct psb_intel_clock_t *clock)
++{
++ const struct psb_intel_limit_t *limit = psb_intel_limit(crtc);
++
++ if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
++ INTELPllInvalid("p1 out of range\n");
++ if (clock->p < limit->p.min || limit->p.max < clock->p)
++ INTELPllInvalid("p out of range\n");
++ if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
++ INTELPllInvalid("m2 out of range\n");
++ if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
++ INTELPllInvalid("m1 out of range\n");
++ if (clock->m1 <= clock->m2)
++ INTELPllInvalid("m1 <= m2\n");
++ if (clock->m < limit->m.min || limit->m.max < clock->m)
++ INTELPllInvalid("m out of range\n");
++ if (clock->n < limit->n.min || limit->n.max < clock->n)
++ INTELPllInvalid("n out of range\n");
++ if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
++ INTELPllInvalid("vco out of range\n");
++ /* XXX: We may need to be checking "Dot clock"
++ * depending on the multiplier, connector, etc.,
++ * rather than just a single range.
++ */
++ if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
++ INTELPllInvalid("dot out of range\n");
++
++ return true;
++}
++
++/**
++ * Returns a set of divisors for the desired target clock with the given
++ * refclk, or FALSE. The returned values represent the clock equation:
++ * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
++ */
++static bool psb_intel_find_best_PLL(struct drm_crtc *crtc, int target,
++ int refclk,
++ struct psb_intel_clock_t *best_clock)
++{
++ struct drm_device *dev = crtc->dev;
++ struct psb_intel_clock_t clock;
++ const struct psb_intel_limit_t *limit = psb_intel_limit(crtc);
++ int err = target;
++
++ if (IS_I9XX(dev) && psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
++ (REG_READ(LVDS) & LVDS_PORT_EN) != 0) {
++ /*
++ * For LVDS, if the panel is on, just rely on its current
++ * settings for dual-channel. We haven't figured out how to
++ * reliably set up different single/dual channel state, if we
++ * even can.
++ */
++ if ((REG_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
++ LVDS_CLKB_POWER_UP)
++ clock.p2 = limit->p2.p2_fast;
++ else
++ clock.p2 = limit->p2.p2_slow;
++ } else {
++ if (target < limit->p2.dot_limit)
++ clock.p2 = limit->p2.p2_slow;
++ else
++ clock.p2 = limit->p2.p2_fast;
++ }
++
++ memset(best_clock, 0, sizeof(*best_clock));
++
++ for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
++ clock.m1++) {
++ for (clock.m2 = limit->m2.min;
++ clock.m2 < clock.m1 && clock.m2 <= limit->m2.max;
++ clock.m2++) {
++ for (clock.n = limit->n.min;
++ clock.n <= limit->n.max; clock.n++) {
++ for (clock.p1 = limit->p1.min;
++ clock.p1 <= limit->p1.max;
++ clock.p1++) {
++ int this_err;
++
++ psb_intel_clock(dev, refclk, &clock);
++
++ if (!psb_intel_PLL_is_valid
++ (crtc, &clock))
++ continue;
++
++ this_err = abs(clock.dot - target);
++ if (this_err < err) {
++ *best_clock = clock;
++ err = this_err;
++ }
++ }
++ }
++ }
++ }
++
++ return err != target;
++}
++
++void psb_intel_wait_for_vblank(struct drm_device *dev)
++{
++ /* Wait for 20ms, i.e. one cycle at 50hz. */
++ udelay(20000);
++}
++
++int psb_intel_pipe_set_base(struct drm_crtc *crtc,
++ int x, int y, struct drm_framebuffer *old_fb)
++{
++ struct drm_device *dev = crtc->dev;
++ /* struct drm_i915_master_private *master_priv; */
++ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++ struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
++ struct psb_intel_mode_device *mode_dev = psb_intel_crtc->mode_dev;
++ int pipe = psb_intel_crtc->pipe;
++ unsigned long Start, Offset;
++ int dspbase = (pipe == 0 ? DSPABASE : DSPBBASE);
++ int dspsurf = (pipe == 0 ? DSPASURF : DSPBSURF);
++ int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE;
++ int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
++ u32 dspcntr;
++ int ret = 0;
++
++ /* no fb bound */
++ if (!crtc->fb) {
++ DRM_DEBUG("No FB bound\n");
++ return 0;
++ }
++
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_FORCE_POWER_ON))
++ return 0;
++
++ if (IS_MRST(dev) && (pipe == 0))
++ dspbase = MRST_DSPABASE;
++
++ Start = mode_dev->bo_offset(dev, psbfb);
++ Offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8);
++
++ REG_WRITE(dspstride, crtc->fb->pitch);
++
++ dspcntr = REG_READ(dspcntr_reg);
++ dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
++
++ switch (crtc->fb->bits_per_pixel) {
++ case 8:
++ dspcntr |= DISPPLANE_8BPP;
++ break;
++ case 16:
++ if (crtc->fb->depth == 15)
++ dspcntr |= DISPPLANE_15_16BPP;
++ else
++ dspcntr |= DISPPLANE_16BPP;
++ break;
++ case 24:
++ case 32:
++ dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
++ break;
++ default:
++ DRM_ERROR("Unknown color depth\n");
++ ret = -EINVAL;
++ goto psb_intel_pipe_set_base_exit;
++ }
++ REG_WRITE(dspcntr_reg, dspcntr);
++
++ DRM_DEBUG("Writing base %08lX %08lX %d %d\n", Start, Offset, x, y);
++ if (IS_I965G(dev) || IS_MRST(dev)) {
++ REG_WRITE(dspbase, Offset);
++ REG_READ(dspbase);
++ REG_WRITE(dspsurf, Start);
++ REG_READ(dspsurf);
++ } else {
++ REG_WRITE(dspbase, Start + Offset);
++ REG_READ(dspbase);
++ }
++
++psb_intel_pipe_set_base_exit:
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++
++ return ret;
++}
++
++/**
++ * Sets the power management mode of the pipe and plane.
++ *
++ * This code should probably grow support for turning the cursor off and back
++ * on appropriately at the same time as we're turning the pipe off/on.
++ */
++static void psb_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
++{
++ struct drm_device *dev = crtc->dev;
++ /* struct drm_i915_master_private *master_priv; */
++ /* struct drm_i915_private *dev_priv = dev->dev_private; */
++ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++ int pipe = psb_intel_crtc->pipe;
++ int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
++ int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
++ int dspbase_reg = (pipe == 0) ? DSPABASE : DSPBBASE;
++ int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
++ u32 temp;
++ bool enabled;
++
++ /* XXX: When our outputs are all unaware of DPMS modes other than off
++ * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
++ */
++ switch (mode) {
++ case DRM_MODE_DPMS_ON:
++ case DRM_MODE_DPMS_STANDBY:
++ case DRM_MODE_DPMS_SUSPEND:
++ /* Enable the DPLL */
++ temp = REG_READ(dpll_reg);
++ if ((temp & DPLL_VCO_ENABLE) == 0) {
++ REG_WRITE(dpll_reg, temp);
++ REG_READ(dpll_reg);
++ /* Wait for the clocks to stabilize. */
++ udelay(150);
++ REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
++ REG_READ(dpll_reg);
++ /* Wait for the clocks to stabilize. */
++ udelay(150);
++ REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
++ REG_READ(dpll_reg);
++ /* Wait for the clocks to stabilize. */
++ udelay(150);
++ }
++
++ /* Enable the pipe */
++ temp = REG_READ(pipeconf_reg);
++ if ((temp & PIPEACONF_ENABLE) == 0)
++ REG_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
++
++ /* Enable the plane */
++ temp = REG_READ(dspcntr_reg);
++ if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
++ REG_WRITE(dspcntr_reg,
++ temp | DISPLAY_PLANE_ENABLE);
++ /* Flush the plane changes */
++ REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
++ }
++
++ psb_intel_crtc_load_lut(crtc);
++
++ /* Give the overlay scaler a chance to enable
++ * if it's on this pipe */
++ /* psb_intel_crtc_dpms_video(crtc, true); TODO */
++ break;
++ case DRM_MODE_DPMS_OFF:
++ /* Give the overlay scaler a chance to disable
++ * if it's on this pipe */
++ /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
++
++ /* Disable the VGA plane that we never use */
++ REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
++
++ /* Disable display plane */
++ temp = REG_READ(dspcntr_reg);
++ if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
++ REG_WRITE(dspcntr_reg,
++ temp & ~DISPLAY_PLANE_ENABLE);
++ /* Flush the plane changes */
++ REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
++ REG_READ(dspbase_reg);
++ }
++
++ if (!IS_I9XX(dev)) {
++ /* Wait for vblank for the disable to take effect */
++ psb_intel_wait_for_vblank(dev);
++ }
++
++ /* Next, disable display pipes */
++ temp = REG_READ(pipeconf_reg);
++ if ((temp & PIPEACONF_ENABLE) != 0) {
++ REG_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
++ REG_READ(pipeconf_reg);
++ }
++
++ /* Wait for vblank for the disable to take effect. */
++ psb_intel_wait_for_vblank(dev);
++
++ temp = REG_READ(dpll_reg);
++ if ((temp & DPLL_VCO_ENABLE) != 0) {
++ REG_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE);
++ REG_READ(dpll_reg);
++ }
++
++ /* Wait for the clocks to turn off. */
++ udelay(150);
++ break;
++ }
++
++ enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
++
++#if 0 /* JB: Add vblank support later */
++ if (enabled)
++ dev_priv->vblank_pipe |= (1 << pipe);
++ else
++ dev_priv->vblank_pipe &= ~(1 << pipe);
++#endif
++
++#if 0 /* JB: Add sarea support later */
++ if (!dev->primary->master)
++ return 0;
++
++ master_priv = dev->primary->master->driver_priv;
++ if (!master_priv->sarea_priv)
++ return 0;
++
++ switch (pipe) {
++ case 0:
++ master_priv->sarea_priv->planeA_w =
++ enabled ? crtc->mode.hdisplay : 0;
++ master_priv->sarea_priv->planeA_h =
++ enabled ? crtc->mode.vdisplay : 0;
++ break;
++ case 1:
++ master_priv->sarea_priv->planeB_w =
++ enabled ? crtc->mode.hdisplay : 0;
++ master_priv->sarea_priv->planeB_h =
++ enabled ? crtc->mode.vdisplay : 0;
++ break;
++ default:
++ DRM_ERROR("Can't update pipe %d in SAREA\n", pipe);
++ break;
++ }
++#endif
++
++ /*Set FIFO Watermarks*/
++ REG_WRITE(DSPARB, 0x3F3E);
++}
++
++static void psb_intel_crtc_prepare(struct drm_crtc *crtc)
++{
++ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
++ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
++}
++
++static void psb_intel_crtc_commit(struct drm_crtc *crtc)
++{
++ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
++ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
++}
++
++void psb_intel_encoder_prepare(struct drm_encoder *encoder)
++{
++ struct drm_encoder_helper_funcs *encoder_funcs =
++ encoder->helper_private;
++ /* lvds has its own version of prepare see psb_intel_lvds_prepare */
++ encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
++}
++
++void psb_intel_encoder_commit(struct drm_encoder *encoder)
++{
++ struct drm_encoder_helper_funcs *encoder_funcs =
++ encoder->helper_private;
++ /* lvds has its own version of commit see psb_intel_lvds_commit */
++ encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
++}
++
++static bool psb_intel_crtc_mode_fixup(struct drm_crtc *crtc,
++ struct drm_display_mode *mode,
++ struct drm_display_mode *adjusted_mode)
++{
++ return true;
++}
++
++
++/**
++ * Return the pipe currently connected to the panel fitter,
++ * or -1 if the panel fitter is not present or not in use
++ */
++static int psb_intel_panel_fitter_pipe(struct drm_device *dev)
++{
++ u32 pfit_control;
++
++ /* i830 doesn't have a panel fitter */
++ if (IS_I830(dev))
++ return -1;
++
++ pfit_control = REG_READ(PFIT_CONTROL);
++
++ /* See if the panel fitter is in use */
++ if ((pfit_control & PFIT_ENABLE) == 0)
++ return -1;
++
++ /* 965 can place panel fitter on either pipe */
++ if (IS_I965G(dev) || IS_MRST(dev))
++ return (pfit_control >> 29) & 0x3;
++
++ /* older chips can only use pipe 1 */
++ return 1;
++}
++
++static int psb_intel_crtc_mode_set(struct drm_crtc *crtc,
++ struct drm_display_mode *mode,
++ struct drm_display_mode *adjusted_mode,
++ int x, int y,
++ struct drm_framebuffer *old_fb)
++{
++ struct drm_device *dev = crtc->dev;
++ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++ int pipe = psb_intel_crtc->pipe;
++ int fp_reg = (pipe == 0) ? FPA0 : FPB0;
++ int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
++ int dpll_md_reg = (psb_intel_crtc->pipe == 0) ? DPLL_A_MD : DPLL_B_MD;
++ int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
++ int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
++ int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
++ int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
++ int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
++ int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
++ int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
++ int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
++ int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE;
++ int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS;
++ int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
++ int refclk;
++ struct psb_intel_clock_t clock;
++ u32 dpll = 0, fp = 0, dspcntr, pipeconf;
++ bool ok, is_sdvo = false, is_dvo = false;
++ bool is_crt = false, is_lvds = false, is_tv = false;
++ struct drm_mode_config *mode_config = &dev->mode_config;
++ struct drm_connector *connector;
++
++ list_for_each_entry(connector, &mode_config->connector_list, head) {
++ struct psb_intel_output *psb_intel_output =
++ to_psb_intel_output(connector);
++
++ if (!connector->encoder
++ || connector->encoder->crtc != crtc)
++ continue;
++
++ switch (psb_intel_output->type) {
++ case INTEL_OUTPUT_LVDS:
++ is_lvds = true;
++ break;
++ case INTEL_OUTPUT_SDVO:
++ is_sdvo = true;
++ break;
++ case INTEL_OUTPUT_DVO:
++ is_dvo = true;
++ break;
++ case INTEL_OUTPUT_TVOUT:
++ is_tv = true;
++ break;
++ case INTEL_OUTPUT_ANALOG:
++ is_crt = true;
++ break;
++ }
++ }
++
++ if (IS_I9XX(dev))
++ refclk = 96000;
++ else
++ refclk = 48000;
++
++ ok = psb_intel_find_best_PLL(crtc, adjusted_mode->clock, refclk,
++ &clock);
++ if (!ok) {
++ DRM_ERROR("Couldn't find PLL settings for mode!\n");
++ return 0;
++ }
++
++ fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
++
++ dpll = DPLL_VGA_MODE_DIS;
++ if (IS_I9XX(dev)) {
++ if (is_lvds) {
++ dpll |= DPLLB_MODE_LVDS;
++ if (IS_POULSBO(dev))
++ dpll |= DPLL_DVO_HIGH_SPEED;
++ } else
++ dpll |= DPLLB_MODE_DAC_SERIAL;
++ if (is_sdvo) {
++ dpll |= DPLL_DVO_HIGH_SPEED;
++ if (IS_I945G(dev) ||
++ IS_I945GM(dev) ||
++ IS_POULSBO(dev)) {
++ int sdvo_pixel_multiply =
++ adjusted_mode->clock / mode->clock;
++ dpll |=
++ (sdvo_pixel_multiply -
++ 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
++ }
++ }
++
++ /* compute bitmask from p1 value */
++ dpll |= (1 << (clock.p1 - 1)) << 16;
++ switch (clock.p2) {
++ case 5:
++ dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
++ break;
++ case 7:
++ dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
++ break;
++ case 10:
++ dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
++ break;
++ case 14:
++ dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
++ break;
++ }
++ if (IS_I965G(dev))
++ dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
++ } else {
++ if (is_lvds) {
++ dpll |=
++ (1 << (clock.p1 - 1)) <<
++ DPLL_FPA01_P1_POST_DIV_SHIFT;
++ } else {
++ if (clock.p1 == 2)
++ dpll |= PLL_P1_DIVIDE_BY_TWO;
++ else
++ dpll |=
++ (clock.p1 -
++ 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
++ if (clock.p2 == 4)
++ dpll |= PLL_P2_DIVIDE_BY_4;
++ }
++ }
++
++ if (is_tv) {
++ /* XXX: just matching BIOS for now */
++/* dpll |= PLL_REF_INPUT_TVCLKINBC; */
++ dpll |= 3;
++ }
++#if 0
++ else if (is_lvds)
++ dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
++#endif
++ else
++ dpll |= PLL_REF_INPUT_DREFCLK;
++
++ /* setup pipeconf */
++ pipeconf = REG_READ(pipeconf_reg);
++
++ /* Set up the display plane register */
++ dspcntr = DISPPLANE_GAMMA_ENABLE;
++
++ if (pipe == 0)
++ dspcntr |= DISPPLANE_SEL_PIPE_A;
++ else
++ dspcntr |= DISPPLANE_SEL_PIPE_B;
++
++ dspcntr |= DISPLAY_PLANE_ENABLE;
++ pipeconf |= PIPEACONF_ENABLE;
++ dpll |= DPLL_VCO_ENABLE;
++
++
++ /* Disable the panel fitter if it was on our pipe */
++ if (psb_intel_panel_fitter_pipe(dev) == pipe)
++ REG_WRITE(PFIT_CONTROL, 0);
++
++ DRM_DEBUG("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
++ drm_mode_debug_printmodeline(mode);
++
++ if (dpll & DPLL_VCO_ENABLE) {
++ REG_WRITE(fp_reg, fp);
++ REG_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
++ REG_READ(dpll_reg);
++ udelay(150);
++ }
++
++ /* The LVDS pin pair needs to be on before the DPLLs are enabled.
++ * This is an exception to the general rule that mode_set doesn't turn
++ * things on.
++ */
++ if (is_lvds) {
++ u32 lvds = REG_READ(LVDS);
++
++ lvds |=
++ LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP |
++ LVDS_PIPEB_SELECT;
++ /* Set the B0-B3 data pairs corresponding to
++ * whether we're going to
++ * set the DPLLs for dual-channel mode or not.
++ */
++ if (clock.p2 == 7)
++ lvds |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
++ else
++ lvds &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
++
++ /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
++ * appropriately here, but we need to look more
++ * thoroughly into how panels behave in the two modes.
++ */
++
++ REG_WRITE(LVDS, lvds);
++ REG_READ(LVDS);
++ }
++
++ REG_WRITE(fp_reg, fp);
++ REG_WRITE(dpll_reg, dpll);
++ REG_READ(dpll_reg);
++ /* Wait for the clocks to stabilize. */
++ udelay(150);
++
++ if (IS_I965G(dev)) {
++ int sdvo_pixel_multiply =
++ adjusted_mode->clock / mode->clock;
++ REG_WRITE(dpll_md_reg,
++ (0 << DPLL_MD_UDI_DIVIDER_SHIFT) |
++ ((sdvo_pixel_multiply -
++ 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT));
++ } else {
++ /* write it again -- the BIOS does, after all */
++ REG_WRITE(dpll_reg, dpll);
++ }
++ REG_READ(dpll_reg);
++ /* Wait for the clocks to stabilize. */
++ udelay(150);
++
++ REG_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
++ ((adjusted_mode->crtc_htotal - 1) << 16));
++ REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
++ ((adjusted_mode->crtc_hblank_end - 1) << 16));
++ REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
++ ((adjusted_mode->crtc_hsync_end - 1) << 16));
++ REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
++ ((adjusted_mode->crtc_vtotal - 1) << 16));
++ REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
++ ((adjusted_mode->crtc_vblank_end - 1) << 16));
++ REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
++ ((adjusted_mode->crtc_vsync_end - 1) << 16));
++ /* pipesrc and dspsize control the size that is scaled from,
++ * which should always be the user's requested size.
++ */
++ REG_WRITE(dspsize_reg,
++ ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
++ REG_WRITE(dsppos_reg, 0);
++ REG_WRITE(pipesrc_reg,
++ ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
++ REG_WRITE(pipeconf_reg, pipeconf);
++ REG_READ(pipeconf_reg);
++
++ psb_intel_wait_for_vblank(dev);
++
++ REG_WRITE(dspcntr_reg, dspcntr);
++
++ /* Flush the plane changes */
++ {
++ struct drm_crtc_helper_funcs *crtc_funcs =
++ crtc->helper_private;
++ crtc_funcs->mode_set_base(crtc, x, y, old_fb);
++ }
++
++ psb_intel_wait_for_vblank(dev);
++
++ return 0;
++}
++
++/** Loads the palette/gamma unit for the CRTC with the prepared values */
++void psb_intel_crtc_load_lut(struct drm_crtc *crtc)
++{
++ struct drm_device *dev = crtc->dev;
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private;
++ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++ int palreg = (psb_intel_crtc->pipe == 0) ? PALETTE_A : PALETTE_B;
++ int i;
++
++ /* The clocks have to be on to load the palette. */
++ if (!crtc->enabled)
++ return;
++
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_ONLY_IF_ON)) {
++ for (i = 0; i < 256; i++) {
++ REG_WRITE(palreg + 4 * i,
++ ((psb_intel_crtc->lut_r[i] +
++ psb_intel_crtc->lut_adj[i]) << 16) |
++ ((psb_intel_crtc->lut_g[i] +
++ psb_intel_crtc->lut_adj[i]) << 8) |
++ (psb_intel_crtc->lut_b[i] +
++ psb_intel_crtc->lut_adj[i]));
++ }
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ } else {
++ for (i = 0; i < 256; i++) {
++ dev_priv->save_palette_a[i] =
++ ((psb_intel_crtc->lut_r[i] +
++ psb_intel_crtc->lut_adj[i]) << 16) |
++ ((psb_intel_crtc->lut_g[i] +
++ psb_intel_crtc->lut_adj[i]) << 8) |
++ (psb_intel_crtc->lut_b[i] +
++ psb_intel_crtc->lut_adj[i]);
++ }
++
++ }
++}
++
++#ifndef CONFIG_X86_MRST
++/**
++ * Save HW states of giving crtc
++ */
++static void psb_intel_crtc_save(struct drm_crtc *crtc)
++{
++ struct drm_device *dev = crtc->dev;
++ /* struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private; */
++ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++ struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
++ int pipeA = (psb_intel_crtc->pipe == 0);
++ uint32_t paletteReg;
++ int i;
++
++ DRM_DEBUG("\n");
++
++ if (!crtc_state) {
++ DRM_DEBUG("No CRTC state found\n");
++ return;
++ }
++
++ crtc_state->saveDSPCNTR = REG_READ(pipeA ? DSPACNTR : DSPBCNTR);
++ crtc_state->savePIPECONF = REG_READ(pipeA ? PIPEACONF : PIPEBCONF);
++ crtc_state->savePIPESRC = REG_READ(pipeA ? PIPEASRC : PIPEBSRC);
++ crtc_state->saveFP0 = REG_READ(pipeA ? FPA0 : FPB0);
++ crtc_state->saveFP1 = REG_READ(pipeA ? FPA1 : FPB1);
++ crtc_state->saveDPLL = REG_READ(pipeA ? DPLL_A : DPLL_B);
++ crtc_state->saveHTOTAL = REG_READ(pipeA ? HTOTAL_A : HTOTAL_B);
++ crtc_state->saveHBLANK = REG_READ(pipeA ? HBLANK_A : HBLANK_B);
++ crtc_state->saveHSYNC = REG_READ(pipeA ? HSYNC_A : HSYNC_B);
++ crtc_state->saveVTOTAL = REG_READ(pipeA ? VTOTAL_A : VTOTAL_B);
++ crtc_state->saveVBLANK = REG_READ(pipeA ? VBLANK_A : VBLANK_B);
++ crtc_state->saveVSYNC = REG_READ(pipeA ? VSYNC_A : VSYNC_B);
++ crtc_state->saveDSPSTRIDE = REG_READ(pipeA ? DSPASTRIDE : DSPBSTRIDE);
++
++ /*NOTE: DSPSIZE DSPPOS only for psb*/
++ crtc_state->saveDSPSIZE = REG_READ(pipeA ? DSPASIZE : DSPBSIZE);
++ crtc_state->saveDSPPOS = REG_READ(pipeA ? DSPAPOS : DSPBPOS);
++
++ crtc_state->saveDSPBASE = REG_READ(pipeA ? DSPABASE : DSPBBASE);
++
++ DRM_DEBUG("(%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n",
++ crtc_state->saveDSPCNTR,
++ crtc_state->savePIPECONF,
++ crtc_state->savePIPESRC,
++ crtc_state->saveFP0,
++ crtc_state->saveFP1,
++ crtc_state->saveDPLL,
++ crtc_state->saveHTOTAL,
++ crtc_state->saveHBLANK,
++ crtc_state->saveHSYNC,
++ crtc_state->saveVTOTAL,
++ crtc_state->saveVBLANK,
++ crtc_state->saveVSYNC,
++ crtc_state->saveDSPSTRIDE,
++ crtc_state->saveDSPSIZE,
++ crtc_state->saveDSPPOS,
++ crtc_state->saveDSPBASE
++ );
++
++ paletteReg = pipeA ? PALETTE_A : PALETTE_B;
++ for (i = 0; i < 256; ++i)
++ crtc_state->savePalette[i] = REG_READ(paletteReg + (i << 2));
++}
++
++/**
++ * Restore HW states of giving crtc
++ */
++static void psb_intel_crtc_restore(struct drm_crtc *crtc)
++{
++ struct drm_device *dev = crtc->dev;
++ /* struct drm_psb_private * dev_priv =
++ (struct drm_psb_private *)dev->dev_private; */
++ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++ struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
++ /* struct drm_crtc_helper_funcs * crtc_funcs = crtc->helper_private; */
++ int pipeA = (psb_intel_crtc->pipe == 0);
++ uint32_t paletteReg;
++ int i;
++
++ DRM_DEBUG("\n");
++
++ if (!crtc_state) {
++ DRM_DEBUG("No crtc state\n");
++ return;
++ }
++
++ DRM_DEBUG(
++ "current:(%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n",
++ REG_READ(pipeA ? DSPACNTR : DSPBCNTR),
++ REG_READ(pipeA ? PIPEACONF : PIPEBCONF),
++ REG_READ(pipeA ? PIPEASRC : PIPEBSRC),
++ REG_READ(pipeA ? FPA0 : FPB0),
++ REG_READ(pipeA ? FPA1 : FPB1),
++ REG_READ(pipeA ? DPLL_A : DPLL_B),
++ REG_READ(pipeA ? HTOTAL_A : HTOTAL_B),
++ REG_READ(pipeA ? HBLANK_A : HBLANK_B),
++ REG_READ(pipeA ? HSYNC_A : HSYNC_B),
++ REG_READ(pipeA ? VTOTAL_A : VTOTAL_B),
++ REG_READ(pipeA ? VBLANK_A : VBLANK_B),
++ REG_READ(pipeA ? VSYNC_A : VSYNC_B),
++ REG_READ(pipeA ? DSPASTRIDE : DSPBSTRIDE),
++ REG_READ(pipeA ? DSPASIZE : DSPBSIZE),
++ REG_READ(pipeA ? DSPAPOS : DSPBPOS),
++ REG_READ(pipeA ? DSPABASE : DSPBBASE)
++ );
++
++ DRM_DEBUG(
++ "saved: (%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n",
++ crtc_state->saveDSPCNTR,
++ crtc_state->savePIPECONF,
++ crtc_state->savePIPESRC,
++ crtc_state->saveFP0,
++ crtc_state->saveFP1,
++ crtc_state->saveDPLL,
++ crtc_state->saveHTOTAL,
++ crtc_state->saveHBLANK,
++ crtc_state->saveHSYNC,
++ crtc_state->saveVTOTAL,
++ crtc_state->saveVBLANK,
++ crtc_state->saveVSYNC,
++ crtc_state->saveDSPSTRIDE,
++ crtc_state->saveDSPSIZE,
++ crtc_state->saveDSPPOS,
++ crtc_state->saveDSPBASE
++ );
++
++
++#if 0
++ if (drm_helper_crtc_in_use(crtc))
++ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
++
++
++ if (psb_intel_panel_fitter_pipe(dev) == psb_intel_crtc->pipe) {
++ REG_WRITE(PFIT_CONTROL, crtc_state->savePFITCTRL);
++ DRM_DEBUG("write pfit_controle: %x\n", REG_READ(PFIT_CONTROL));
++ }
++#endif
++
++ if (crtc_state->saveDPLL & DPLL_VCO_ENABLE) {
++ REG_WRITE(pipeA ? DPLL_A : DPLL_B,
++ crtc_state->saveDPLL & ~DPLL_VCO_ENABLE);
++ REG_READ(pipeA ? DPLL_A : DPLL_B);
++ DRM_DEBUG("write dpll: %x\n",
++ REG_READ(pipeA ? DPLL_A : DPLL_B));
++ udelay(150);
++ }
++
++ REG_WRITE(pipeA ? FPA0 : FPB0, crtc_state->saveFP0);
++ REG_READ(pipeA ? FPA0 : FPB0);
++
++ REG_WRITE(pipeA ? FPA1 : FPB1, crtc_state->saveFP1);
++ REG_READ(pipeA ? FPA1 : FPB1);
++
++ REG_WRITE(pipeA ? DPLL_A : DPLL_B, crtc_state->saveDPLL);
++ REG_READ(pipeA ? DPLL_A : DPLL_B);
++ udelay(150);
++
++ REG_WRITE(pipeA ? HTOTAL_A : HTOTAL_B, crtc_state->saveHTOTAL);
++ REG_WRITE(pipeA ? HBLANK_A : HBLANK_B, crtc_state->saveHBLANK);
++ REG_WRITE(pipeA ? HSYNC_A : HSYNC_B, crtc_state->saveHSYNC);
++ REG_WRITE(pipeA ? VTOTAL_A : VTOTAL_B, crtc_state->saveVTOTAL);
++ REG_WRITE(pipeA ? VBLANK_A : VBLANK_B, crtc_state->saveVBLANK);
++ REG_WRITE(pipeA ? VSYNC_A : VSYNC_B, crtc_state->saveVSYNC);
++ REG_WRITE(pipeA ? DSPASTRIDE : DSPBSTRIDE, crtc_state->saveDSPSTRIDE);
++
++ REG_WRITE(pipeA ? DSPASIZE : DSPBSIZE, crtc_state->saveDSPSIZE);
++ REG_WRITE(pipeA ? DSPAPOS : DSPBPOS, crtc_state->saveDSPPOS);
++
++ REG_WRITE(pipeA ? PIPEASRC : PIPEBSRC, crtc_state->savePIPESRC);
++ REG_WRITE(pipeA ? DSPABASE : DSPBBASE, crtc_state->saveDSPBASE);
++ REG_WRITE(pipeA ? PIPEACONF : PIPEBCONF, crtc_state->savePIPECONF);
++
++ psb_intel_wait_for_vblank(dev);
++
++ REG_WRITE(pipeA ? DSPACNTR : DSPBCNTR, crtc_state->saveDSPCNTR);
++ REG_WRITE(pipeA ? DSPABASE : DSPBBASE, crtc_state->saveDSPBASE);
++
++ psb_intel_wait_for_vblank(dev);
++
++ paletteReg = pipeA ? PALETTE_A : PALETTE_B;
++ for (i = 0; i < 256; ++i)
++ REG_WRITE(paletteReg + (i << 2), crtc_state->savePalette[i]);
++}
++#endif
++
++static int psb_intel_crtc_cursor_set(struct drm_crtc *crtc,
++ struct drm_file *file_priv,
++ uint32_t handle,
++ uint32_t width, uint32_t height)
++{
++ struct drm_device *dev = crtc->dev;
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private;
++ struct psb_gtt *pg = dev_priv->pg;
++ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++ struct psb_intel_mode_device *mode_dev = psb_intel_crtc->mode_dev;
++ int pipe = psb_intel_crtc->pipe;
++ uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR;
++ uint32_t base = (pipe == 0) ? CURABASE : CURBBASE;
++ uint32_t temp;
++ size_t addr = 0;
++ uint32_t page_offset;
++ size_t size;
++ void *bo;
++ int ret;
++
++ DRM_DEBUG("\n");
++
++ /* if we want to turn of the cursor ignore width and height */
++ if (!handle) {
++ DRM_DEBUG("cursor off\n");
++ /* turn of the cursor */
++ temp = 0;
++ temp |= CURSOR_MODE_DISABLE;
++
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_ONLY_IF_ON)) {
++ REG_WRITE(control, temp);
++ REG_WRITE(base, 0);
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ }
++
++ /* unpin the old bo */
++ if (psb_intel_crtc->cursor_bo) {
++ mode_dev->bo_unpin_for_scanout(dev,
++ psb_intel_crtc->
++ cursor_bo);
++ psb_intel_crtc->cursor_bo = NULL;
++ }
++
++ return 0;
++ }
++
++ /* Currently we only support 64x64 cursors */
++ if (width != 64 || height != 64) {
++ DRM_ERROR("we currently only support 64x64 cursors\n");
++ return -EINVAL;
++ }
++
++ bo = mode_dev->bo_from_handle(dev, file_priv, handle);
++ if (!bo)
++ return -ENOENT;
++
++ ret = mode_dev->bo_pin_for_scanout(dev, bo);
++ if (ret)
++ return ret;
++ size = mode_dev->bo_size(dev, bo);
++ if (size < width * height * 4) {
++ DRM_ERROR("buffer is to small\n");
++ return -ENOMEM;
++ }
++
++ /*insert this bo into gtt*/
++ DRM_DEBUG("%s: map meminfo for hw cursor. handle %x\n",
++ __func__, handle);
++
++ ret = psb_gtt_map_meminfo(dev, (IMG_HANDLE)handle, &page_offset);
++ if (ret) {
++ DRM_ERROR("Can not map meminfo to GTT. handle 0x%x\n", handle);
++ return ret;
++ }
++
++ addr = page_offset << PAGE_SHIFT;
++
++ if (IS_POULSBO(dev))
++ addr += pg->stolen_base;
++
++ psb_intel_crtc->cursor_addr = addr;
++
++ temp = 0;
++ /* set the pipe for the cursor */
++ temp |= (pipe << 28);
++ temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
++
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_ONLY_IF_ON)) {
++ REG_WRITE(control, temp);
++ REG_WRITE(base, addr);
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ }
++
++ /* unpin the old bo */
++ if (psb_intel_crtc->cursor_bo && psb_intel_crtc->cursor_bo != bo) {
++ mode_dev->bo_unpin_for_scanout(dev, psb_intel_crtc->cursor_bo);
++ psb_intel_crtc->cursor_bo = bo;
++ }
++
++ return 0;
++}
++
++static int psb_intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
++{
++ struct drm_device *dev = crtc->dev;
++ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++ int pipe = psb_intel_crtc->pipe;
++ uint32_t temp = 0;
++ uint32_t adder;
++
++ if (x < 0) {
++ temp |= (CURSOR_POS_SIGN << CURSOR_X_SHIFT);
++ x = -x;
++ }
++ if (y < 0) {
++ temp |= (CURSOR_POS_SIGN << CURSOR_Y_SHIFT);
++ y = -y;
++ }
++
++ temp |= ((x & CURSOR_POS_MASK) << CURSOR_X_SHIFT);
++ temp |= ((y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT);
++
++ adder = psb_intel_crtc->cursor_addr;
++
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_ONLY_IF_ON)) {
++ REG_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp);
++ REG_WRITE((pipe == 0) ? CURABASE : CURBBASE, adder);
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ }
++ return 0;
++}
++
++static void psb_intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red,
++ u16 *green, u16 *blue, uint32_t size)
++{
++ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++ int i;
++
++ if (size != 256)
++ return;
++
++ for (i = 0; i < 256; i++) {
++ psb_intel_crtc->lut_r[i] = red[i] >> 8;
++ psb_intel_crtc->lut_g[i] = green[i] >> 8;
++ psb_intel_crtc->lut_b[i] = blue[i] >> 8;
++ }
++
++ psb_intel_crtc_load_lut(crtc);
++}
++
++/* Returns the clock of the currently programmed mode of the given pipe. */
++static int psb_intel_crtc_clock_get(struct drm_device *dev,
++ struct drm_crtc *crtc)
++{
++ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++ int pipe = psb_intel_crtc->pipe;
++ u32 dpll;
++ u32 fp;
++ struct psb_intel_clock_t clock;
++ bool is_lvds;
++ struct drm_psb_private *dev_priv = dev->dev_private;
++
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_ONLY_IF_ON)) {
++ dpll = REG_READ((pipe == 0) ? DPLL_A : DPLL_B);
++ if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
++ fp = REG_READ((pipe == 0) ? FPA0 : FPB0);
++ else
++ fp = REG_READ((pipe == 0) ? FPA1 : FPB1);
++ is_lvds = (pipe == 1) && (REG_READ(LVDS) & LVDS_PORT_EN);
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ } else {
++ dpll = (pipe == 0) ?
++ dev_priv->saveDPLL_A : dev_priv->saveDPLL_B;
++
++ if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
++ fp = (pipe == 0) ?
++ dev_priv->saveFPA0 :
++ dev_priv->saveFPB0;
++ else
++ fp = (pipe == 0) ?
++ dev_priv->saveFPA1 :
++ dev_priv->saveFPB1;
++
++ is_lvds = (pipe == 1) && (dev_priv->saveLVDS & LVDS_PORT_EN);
++ }
++
++ clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
++ clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
++ clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
++
++ if (is_lvds) {
++ clock.p1 =
++ ffs((dpll &
++ DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
++ DPLL_FPA01_P1_POST_DIV_SHIFT);
++ clock.p2 = 14;
++
++ if ((dpll & PLL_REF_INPUT_MASK) ==
++ PLLB_REF_INPUT_SPREADSPECTRUMIN) {
++ /* XXX: might not be 66MHz */
++ i8xx_clock(66000, &clock);
++ } else
++ i8xx_clock(48000, &clock);
++ } else {
++ if (dpll & PLL_P1_DIVIDE_BY_TWO)
++ clock.p1 = 2;
++ else {
++ clock.p1 =
++ ((dpll &
++ DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
++ DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
++ }
++ if (dpll & PLL_P2_DIVIDE_BY_4)
++ clock.p2 = 4;
++ else
++ clock.p2 = 2;
++
++ i8xx_clock(48000, &clock);
++ }
++
++ /* XXX: It would be nice to validate the clocks, but we can't reuse
++ * i830PllIsValid() because it relies on the xf86_config connector
++ * configuration being accurate, which it isn't necessarily.
++ */
++
++ return clock.dot;
++}
++
++/** Returns the currently programmed mode of the given pipe. */
++struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev,
++ struct drm_crtc *crtc)
++{
++ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++ int pipe = psb_intel_crtc->pipe;
++ struct drm_display_mode *mode;
++ int htot;
++ int hsync;
++ int vtot;
++ int vsync;
++ struct drm_psb_private *dev_priv = dev->dev_private;
++
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_ONLY_IF_ON)) {
++ htot = REG_READ((pipe == 0) ? HTOTAL_A : HTOTAL_B);
++ hsync = REG_READ((pipe == 0) ? HSYNC_A : HSYNC_B);
++ vtot = REG_READ((pipe == 0) ? VTOTAL_A : VTOTAL_B);
++ vsync = REG_READ((pipe == 0) ? VSYNC_A : VSYNC_B);
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ } else {
++ htot = (pipe == 0) ?
++ dev_priv->saveHTOTAL_A : dev_priv->saveHTOTAL_B;
++ hsync = (pipe == 0) ?
++ dev_priv->saveHSYNC_A : dev_priv->saveHSYNC_B;
++ vtot = (pipe == 0) ?
++ dev_priv->saveVTOTAL_A : dev_priv->saveVTOTAL_B;
++ vsync = (pipe == 0) ?
++ dev_priv->saveVSYNC_A : dev_priv->saveVSYNC_B;
++ }
++
++ mode = kzalloc(sizeof(*mode), GFP_KERNEL);
++ if (!mode)
++ return NULL;
++
++ mode->clock = psb_intel_crtc_clock_get(dev, crtc);
++ mode->hdisplay = (htot & 0xffff) + 1;
++ mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
++ mode->hsync_start = (hsync & 0xffff) + 1;
++ mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
++ mode->vdisplay = (vtot & 0xffff) + 1;
++ mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
++ mode->vsync_start = (vsync & 0xffff) + 1;
++ mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
++
++ drm_mode_set_name(mode);
++ drm_mode_set_crtcinfo(mode, 0);
++
++ return mode;
++}
++
++static void psb_intel_crtc_destroy(struct drm_crtc *crtc)
++{
++ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++
++#ifndef CONFIG_X86_MRST
++ kfree(psb_intel_crtc->crtc_state);
++#endif
++ drm_crtc_cleanup(crtc);
++ kfree(psb_intel_crtc);
++}
++
++static const struct drm_crtc_helper_funcs psb_intel_helper_funcs = {
++ .dpms = psb_intel_crtc_dpms,
++ .mode_fixup = psb_intel_crtc_mode_fixup,
++ .mode_set = psb_intel_crtc_mode_set,
++ .mode_set_base = psb_intel_pipe_set_base,
++ .prepare = psb_intel_crtc_prepare,
++ .commit = psb_intel_crtc_commit,
++};
++
++static const struct drm_crtc_helper_funcs mrst_helper_funcs;
++
++const struct drm_crtc_funcs psb_intel_crtc_funcs = {
++#ifndef CONFIG_X86_MRST
++ .save = psb_intel_crtc_save,
++ .restore = psb_intel_crtc_restore,
++#endif
++ .cursor_set = psb_intel_crtc_cursor_set,
++ .cursor_move = psb_intel_crtc_cursor_move,
++ .gamma_set = psb_intel_crtc_gamma_set,
++ .set_config = drm_crtc_helper_set_config,
++ .destroy = psb_intel_crtc_destroy,
++};
++
++
++void psb_intel_crtc_init(struct drm_device *dev, int pipe,
++ struct psb_intel_mode_device *mode_dev)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ struct psb_intel_crtc *psb_intel_crtc;
++ int i;
++ uint16_t *r_base, *g_base, *b_base;
++
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 enter psb_intel_crtc_init \n");
++#endif /* PRINT_JLIU7 */
++
++ /* We allocate a extra array of drm_connector pointers
++ * for fbdev after the crtc */
++ psb_intel_crtc =
++ kzalloc(sizeof(struct psb_intel_crtc) +
++ (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)),
++ GFP_KERNEL);
++ if (psb_intel_crtc == NULL)
++ return;
++
++#ifndef CONFIG_X86_MRST
++ psb_intel_crtc->crtc_state =
++ kzalloc(sizeof(struct psb_intel_crtc_state), GFP_KERNEL);
++ if (!psb_intel_crtc->crtc_state) {
++ DRM_INFO("Crtc state error: No memory\n");
++ kfree(psb_intel_crtc);
++ return;
++ }
++#endif
++
++ drm_crtc_init(dev, &psb_intel_crtc->base, &psb_intel_crtc_funcs);
++
++ drm_mode_crtc_set_gamma_size(&psb_intel_crtc->base, 256);
++ psb_intel_crtc->pipe = pipe;
++ psb_intel_crtc->plane = pipe;
++
++ r_base = psb_intel_crtc->base.gamma_store;
++ g_base = r_base + 256;
++ b_base = g_base + 256;
++ for (i = 0; i < 256; i++) {
++ psb_intel_crtc->lut_r[i] = i;
++ psb_intel_crtc->lut_g[i] = i;
++ psb_intel_crtc->lut_b[i] = i;
++ r_base[i] = i << 8;
++ g_base[i] = i << 8;
++ b_base[i] = i << 8;
++
++ psb_intel_crtc->lut_adj[i] = 0;
++ }
++
++ psb_intel_crtc->mode_dev = mode_dev;
++ psb_intel_crtc->cursor_addr = 0;
++
++ if (IS_MRST(dev)) {
++ drm_crtc_helper_add(&psb_intel_crtc->base, &mrst_helper_funcs);
++ } else {
++ drm_crtc_helper_add(&psb_intel_crtc->base,
++ &psb_intel_helper_funcs);
++ }
++
++ /* Setup the array of drm_connector pointer array */
++ psb_intel_crtc->mode_set.crtc = &psb_intel_crtc->base;
++ BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
++ dev_priv->plane_to_crtc_mapping[psb_intel_crtc->plane] != NULL);
++ dev_priv->plane_to_crtc_mapping[psb_intel_crtc->plane] = &psb_intel_crtc->base;
++ dev_priv->pipe_to_crtc_mapping[psb_intel_crtc->pipe] = &psb_intel_crtc->base;
++ psb_intel_crtc->mode_set.connectors =
++ (struct drm_connector **) (psb_intel_crtc + 1);
++ psb_intel_crtc->mode_set.num_connectors = 0;
++}
++
++int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ struct drm_psb_get_pipe_from_crtc_id_arg *pipe_from_crtc_id = data;
++ struct drm_mode_object *drmmode_obj;
++ struct psb_intel_crtc *crtc;
++
++ if (!dev_priv) {
++ DRM_ERROR("called with no initialization\n");
++ return -EINVAL;
++ }
++
++ drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id,
++ DRM_MODE_OBJECT_CRTC);
++
++ if (!drmmode_obj) {
++ DRM_ERROR("no such CRTC id\n");
++ return -EINVAL;
++ }
++
++ crtc = to_psb_intel_crtc(obj_to_crtc(drmmode_obj));
++ pipe_from_crtc_id->pipe = crtc->pipe;
++
++ return 0;
++}
++
++struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev, int pipe)
++{
++ struct drm_crtc *crtc = NULL;
++
++ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
++ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++ if (psb_intel_crtc->pipe == pipe)
++ break;
++ }
++ return crtc;
++}
++
++int psb_intel_connector_clones(struct drm_device *dev, int type_mask)
++{
++ int index_mask = 0;
++ struct drm_connector *connector;
++ int entry = 0;
++
++ list_for_each_entry(connector, &dev->mode_config.connector_list,
++ head) {
++ struct psb_intel_output *psb_intel_output =
++ to_psb_intel_output(connector);
++ if (type_mask & (1 << psb_intel_output->type))
++ index_mask |= (1 << entry);
++ entry++;
++ }
++ return index_mask;
++}
++
++#if 0 /* JB: Should be per device */
++static void psb_intel_setup_outputs(struct drm_device *dev)
++{
++ struct drm_connector *connector;
++
++ psb_intel_crt_init(dev);
++
++ /* Set up integrated LVDS */
++ if (IS_MOBILE(dev) && !IS_I830(dev))
++ psb_intel_lvds_init(dev);
++
++ if (IS_I9XX(dev)) {
++ psb_intel_sdvo_init(dev, SDVOB);
++ psb_intel_sdvo_init(dev, SDVOC);
++ } else
++ psb_intel_dvo_init(dev);
++
++ if (IS_I9XX(dev) && !IS_I915G(dev))
++ psb_intel_tv_init(dev);
++
++ list_for_each_entry(connector, &dev->mode_config.connector_list,
++ head) {
++ struct psb_intel_output *psb_intel_output =
++ to_psb_intel_output(connector);
++ struct drm_encoder *encoder = &psb_intel_output->enc;
++ int crtc_mask = 0, clone_mask = 0;
++
++ /* valid crtcs */
++ switch (psb_intel_output->type) {
++ case INTEL_OUTPUT_DVO:
++ case INTEL_OUTPUT_SDVO:
++ crtc_mask = ((1 << 0) | (1 << 1));
++ clone_mask = ((1 << INTEL_OUTPUT_ANALOG) |
++ (1 << INTEL_OUTPUT_DVO) |
++ (1 << INTEL_OUTPUT_SDVO));
++ break;
++ case INTEL_OUTPUT_ANALOG:
++ crtc_mask = ((1 << 0) | (1 << 1));
++ clone_mask = ((1 << INTEL_OUTPUT_ANALOG) |
++ (1 << INTEL_OUTPUT_DVO) |
++ (1 << INTEL_OUTPUT_SDVO));
++ break;
++ case INTEL_OUTPUT_LVDS:
++ crtc_mask = (1 << 1);
++ clone_mask = (1 << INTEL_OUTPUT_LVDS);
++ break;
++ case INTEL_OUTPUT_TVOUT:
++ crtc_mask = ((1 << 0) | (1 << 1));
++ clone_mask = (1 << INTEL_OUTPUT_TVOUT);
++ break;
++ }
++ encoder->possible_crtcs = crtc_mask;
++ encoder->possible_clones =
++ psb_intel_connector_clones(dev, clone_mask);
++ }
++}
++#endif
++
++#if 0 /* JB: Rework framebuffer code into something none device specific */
++static void psb_intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
++{
++ struct psb_intel_framebuffer *psb_intel_fb =
++ to_psb_intel_framebuffer(fb);
++ struct drm_device *dev = fb->dev;
++
++ if (fb->fbdev)
++ intelfb_remove(dev, fb);
++
++ drm_framebuffer_cleanup(fb);
++ drm_gem_object_unreference(fb->mm_private);
++
++ kfree(psb_intel_fb);
++}
++
++static int psb_intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
++ struct drm_file *file_priv,
++ unsigned int *handle)
++{
++ struct drm_gem_object *object = fb->mm_private;
++
++ return drm_gem_handle_create(file_priv, object, handle);
++}
++
++static const struct drm_framebuffer_funcs psb_intel_fb_funcs = {
++ .destroy = psb_intel_user_framebuffer_destroy,
++ .create_handle = psb_intel_user_framebuffer_create_handle,
++};
++
++struct drm_framebuffer *psb_intel_framebuffer_create(struct drm_device *dev,
++ struct drm_mode_fb_cmd
++ *mode_cmd,
++ void *mm_private)
++{
++ struct psb_intel_framebuffer *psb_intel_fb;
++
++ psb_intel_fb = kzalloc(sizeof(*psb_intel_fb), GFP_KERNEL);
++ if (!psb_intel_fb)
++ return NULL;
++
++ if (!drm_framebuffer_init(dev,
++ &psb_intel_fb->base,
++ &psb_intel_fb_funcs))
++ return NULL;
++
++ drm_helper_mode_fill_fb_struct(&psb_intel_fb->base, mode_cmd);
++
++ return &psb_intel_fb->base;
++}
++
++
++static struct drm_framebuffer *psb_intel_user_framebuffer_create(struct
++ drm_device
++ *dev,
++ struct
++ drm_file
++ *filp,
++ struct
++ drm_mode_fb_cmd
++ *mode_cmd)
++{
++ struct drm_gem_object *obj;
++
++ obj = drm_gem_object_lookup(dev, filp, mode_cmd->handle);
++ if (!obj)
++ return NULL;
++
++ return psb_intel_framebuffer_create(dev, mode_cmd, obj);
++}
++
++static int psb_intel_insert_new_fb(struct drm_device *dev,
++ struct drm_file *file_priv,
++ struct drm_framebuffer *fb,
++ struct drm_mode_fb_cmd *mode_cmd)
++{
++ struct psb_intel_framebuffer *psb_intel_fb;
++ struct drm_gem_object *obj;
++ struct drm_crtc *crtc;
++
++ psb_intel_fb = to_psb_intel_framebuffer(fb);
++
++ mutex_lock(&dev->struct_mutex);
++ obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle);
++
++ if (!obj) {
++ mutex_unlock(&dev->struct_mutex);
++ return -EINVAL;
++ }
++ drm_gem_object_unreference(psb_intel_fb->base.mm_private);
++ drm_helper_mode_fill_fb_struct(fb, mode_cmd, obj);
++ mutex_unlock(&dev->struct_mutex);
++
++ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
++ if (crtc->fb == fb) {
++ struct drm_crtc_helper_funcs *crtc_funcs =
++ crtc->helper_private;
++ crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y);
++ }
++ }
++ return 0;
++}
++
++static const struct drm_mode_config_funcs psb_intel_mode_funcs = {
++ .resize_fb = psb_intel_insert_new_fb,
++ .fb_create = psb_intel_user_framebuffer_create,
++ .fb_changed = intelfb_probe,
++};
++#endif
++
++#if 0 /* Should be per device */
++void psb_intel_modeset_init(struct drm_device *dev)
++{
++ int num_pipe;
++ int i;
++
++ drm_mode_config_init(dev);
++
++ dev->mode_config.min_width = 0;
++ dev->mode_config.min_height = 0;
++
++ dev->mode_config.funcs = (void *) &psb_intel_mode_funcs;
++
++ if (IS_I965G(dev)) {
++ dev->mode_config.max_width = 8192;
++ dev->mode_config.max_height = 8192;
++ } else {
++ dev->mode_config.max_width = 2048;
++ dev->mode_config.max_height = 2048;
++ }
++
++ /* set memory base */
++ /* MRST and PSB should use BAR 2*/
++ dev->mode_config.fb_base =
++ pci_resource_start(dev->pdev, 2);
++
++ if (IS_MOBILE(dev) || IS_I9XX(dev))
++ num_pipe = 2;
++ else
++ num_pipe = 1;
++ DRM_DEBUG("%d display pipe%s available.\n",
++ num_pipe, num_pipe > 1 ? "s" : "");
++
++ for (i = 0; i < num_pipe; i++)
++ psb_intel_crtc_init(dev, i);
++
++ psb_intel_setup_outputs(dev);
++
++ /* setup fbs */
++ /* drm_initial_config(dev); */
++}
++#endif
++
++void psb_intel_modeset_cleanup(struct drm_device *dev)
++{
++ drm_mode_config_cleanup(dev);
++}
++
++
++/* current intel driver doesn't take advantage of encoders
++ always give back the encoder for the connector
++*/
++struct drm_encoder *psb_intel_best_encoder(struct drm_connector *connector)
++{
++ struct psb_intel_output *psb_intel_output =
++ to_psb_intel_output(connector);
++
++ return &psb_intel_output->enc;
++}
++
++/* MRST_PLATFORM start */
++
++#if DUMP_REGISTER
++void dump_dc_registers(struct drm_device *dev)
++{
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++ unsigned int i = 0;
++
++ DRM_INFO("jliu7 dump_dc_registers\n");
++
++
++ if (0x80000000 & REG_READ(0x70008)) {
++ for (i = 0x20a0; i < 0x20af; i += 4) {
++ DRM_INFO("jliu7 interrupt register=0x%x, value=%x\n",
++ i, (unsigned int) REG_READ(i));
++ }
++
++ for (i = 0xf014; i < 0xf047; i += 4) {
++ DRM_INFO("jliu7 pipe A dpll register=0x%x, value=%x\n",
++ i, (unsigned int) REG_READ(i));
++ }
++
++ for (i = 0x60000; i < 0x6005f; i += 4) {
++ DRM_INFO
++ ("jliu7 pipe A timing register=0x%x, value=%x\n",
++ i, (unsigned int) REG_READ(i));
++ }
++
++ for (i = 0x61140; i < 0x61143; i += 4) {
++ DRM_INFO("jliu7 SDBOB register=0x%x, value=%x\n",
++ i, (unsigned int) REG_READ(i));
++ }
++
++ for (i = 0x61180; i < 0x6123F; i += 4) {
++ DRM_INFO
++ ("jliu7 LVDS PORT register=0x%x, value=%x\n",
++ i, (unsigned int) REG_READ(i));
++ }
++
++ for (i = 0x61254; i < 0x612AB; i += 4) {
++ DRM_INFO("jliu7 BLC register=0x%x, value=%x\n",
++ i, (unsigned int) REG_READ(i));
++ }
++
++ for (i = 0x70000; i < 0x70047; i += 4) {
++ DRM_INFO
++ ("jliu7 PIPE A control register=0x%x, value=%x\n",
++ i, (unsigned int) REG_READ(i));
++ }
++
++ for (i = 0x70180; i < 0x7020b; i += 4) {
++ DRM_INFO("jliu7 display A control register=0x%x,"
++ "value=%x\n", i,
++ (unsigned int) REG_READ(i));
++ }
++
++ for (i = 0x71400; i < 0x71403; i += 4) {
++ DRM_INFO
++ ("jliu7 VGA Display Plane Control register=0x%x,"
++ "value=%x\n", i, (unsigned int) REG_READ(i));
++ }
++ }
++
++ if (0x80000000 & REG_READ(0x71008)) {
++ for (i = 0x61000; i < 0x6105f; i += 4) {
++ DRM_INFO
++ ("jliu7 pipe B timing register=0x%x, value=%x\n",
++ i, (unsigned int) REG_READ(i));
++ }
++
++ for (i = 0x71000; i < 0x71047; i += 4) {
++ DRM_INFO
++ ("jliu7 PIPE B control register=0x%x, value=%x\n",
++ i, (unsigned int) REG_READ(i));
++ }
++
++ for (i = 0x71180; i < 0x7120b; i += 4) {
++ DRM_INFO("jliu7 display B control register=0x%x,"
++ "value=%x\n", i,
++ (unsigned int) REG_READ(i));
++ }
++ }
++#if 0
++ for (i = 0x70080; i < 0x700df; i += 4) {
++ DRM_INFO("jliu7 cursor A & B register=0x%x, value=%x\n",
++ i, (unsigned int) REG_READ(i));
++ }
++#endif
++
++}
++
++void dump_dsi_registers(struct drm_device *dev)
++{
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++ unsigned int i = 0;
++
++ DRM_INFO("jliu7 dump_dsi_registers\n");
++
++ for (i = 0xb000; i < 0xb064; i += 4) {
++ DRM_INFO("jliu7 MIPI IP register=0x%x, value=%x\n", i,
++ (unsigned int) REG_READ(i));
++ }
++
++ i = 0xb104;
++ DRM_INFO("jliu7 MIPI control register=0x%x, value=%x\n",
++ i, (unsigned int) REG_READ(i));
++}
++#endif /* DUMP_REGISTER */
++
++
++struct mrst_limit_t {
++ struct psb_intel_range_t dot, m, p1;
++};
++
++struct mrst_clock_t {
++ /* derived values */
++ int dot;
++ int m;
++ int p1;
++};
++
++#define MRST_LIMIT_LVDS_100L 0
++#define MRST_LIMIT_LVDS_83 1
++#define MRST_LIMIT_LVDS_100 2
++
++#define MRST_DOT_MIN 19750
++#define MRST_DOT_MAX 120000
++#define MRST_M_MIN_100L 20
++#define MRST_M_MIN_100 10
++#define MRST_M_MIN_83 12
++#define MRST_M_MAX_100L 34
++#define MRST_M_MAX_100 17
++#define MRST_M_MAX_83 20
++#define MRST_P1_MIN 2
++#define MRST_P1_MAX_0 7
++#define MRST_P1_MAX_1 8
++
++static const struct mrst_limit_t mrst_limits[] = {
++ { /* MRST_LIMIT_LVDS_100L */
++ .dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX},
++ .m = {.min = MRST_M_MIN_100L, .max = MRST_M_MAX_100L},
++ .p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_1},
++ },
++ { /* MRST_LIMIT_LVDS_83L */
++ .dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX},
++ .m = {.min = MRST_M_MIN_83, .max = MRST_M_MAX_83},
++ .p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_0},
++ },
++ { /* MRST_LIMIT_LVDS_100 */
++ .dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX},
++ .m = {.min = MRST_M_MIN_100, .max = MRST_M_MAX_100},
++ .p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_1},
++ },
++};
++
++#define MRST_M_MIN 10
++static const u32 mrst_m_converts[] = {
++ 0x2B, 0x15, 0x2A, 0x35, 0x1A, 0x0D, 0x26, 0x33, 0x19, 0x2C,
++ 0x36, 0x3B, 0x1D, 0x2E, 0x37, 0x1B, 0x2D, 0x16, 0x0B, 0x25,
++ 0x12, 0x09, 0x24, 0x32, 0x39, 0x1c,
++};
++
++#define COUNT_MAX 0x10000000
++void mrstWaitForPipeDisable(struct drm_device *dev)
++{
++ int count, temp;
++
++ /* FIXME JLIU7_PO */
++ psb_intel_wait_for_vblank(dev);
++ return;
++
++ /* Wait for for the pipe disable to take effect. */
++ for (count = 0; count < COUNT_MAX; count++) {
++ temp = REG_READ(PIPEACONF);
++ if ((temp & PIPEACONF_PIPE_STATE) == 0)
++ break;
++ }
++
++ if (count == COUNT_MAX) {
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 mrstWaitForPipeDisable time out. \n");
++#endif /* PRINT_JLIU7 */
++ } else {
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 mrstWaitForPipeDisable cout = %d. \n",
++ count);
++#endif /* PRINT_JLIU7 */
++ }
++}
++
++void mrstWaitForPipeEnable(struct drm_device *dev)
++{
++ int count, temp;
++
++ /* FIXME JLIU7_PO */
++ psb_intel_wait_for_vblank(dev);
++ return;
++
++ /* Wait for for the pipe disable to take effect. */
++ for (count = 0; count < COUNT_MAX; count++) {
++ temp = REG_READ(PIPEACONF);
++ if ((temp & PIPEACONF_PIPE_STATE) == 1)
++ break;
++ }
++
++ if (count == COUNT_MAX) {
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 mrstWaitForPipeEnable time out. \n");
++#endif /* PRINT_JLIU7 */
++ } else {
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 mrstWaitForPipeEnable cout = %d. \n",
++ count);
++#endif /* PRINT_JLIU7 */
++ }
++}
++
++static const struct mrst_limit_t *mrst_limit(struct drm_crtc *crtc)
++{
++ const struct mrst_limit_t *limit;
++ struct drm_device *dev = crtc->dev;
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++
++ if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)
++ || psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_MIPI)) {
++ if (dev_priv->sku_100L)
++ limit = &mrst_limits[MRST_LIMIT_LVDS_100L];
++ if (dev_priv->sku_83)
++ limit = &mrst_limits[MRST_LIMIT_LVDS_83];
++ if (dev_priv->sku_100)
++ limit = &mrst_limits[MRST_LIMIT_LVDS_100];
++ } else {
++ limit = NULL;
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 jliu7 mrst_limit Wrong display type. \n");
++#endif /* PRINT_JLIU7 */
++ }
++
++ return limit;
++}
++
++/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */
++static void mrst_clock(int refclk, struct mrst_clock_t *clock)
++{
++ clock->dot = (refclk * clock->m) / (14 * clock->p1);
++}
++
++void mrstPrintPll(char *prefix, struct mrst_clock_t *clock)
++{
++#if PRINT_JLIU7
++ DRM_INFO
++ ("JLIU7 mrstPrintPll %s: dotclock = %d, m = %d, p1 = %d. \n",
++ prefix, clock->dot, clock->m, clock->p1);
++#endif /* PRINT_JLIU7 */
++}
++
++/**
++ * Returns a set of divisors for the desired target clock with the given refclk,
++ * or FALSE. Divisor values are the actual divisors for
++ */
++static bool
++mrstFindBestPLL(struct drm_crtc *crtc, int target, int refclk,
++ struct mrst_clock_t *best_clock)
++{
++ struct mrst_clock_t clock;
++ const struct mrst_limit_t *limit = mrst_limit(crtc);
++ int err = target;
++
++ memset(best_clock, 0, sizeof(*best_clock));
++
++ for (clock.m = limit->m.min; clock.m <= limit->m.max; clock.m++) {
++ for (clock.p1 = limit->p1.min; clock.p1 <= limit->p1.max;
++ clock.p1++) {
++ int this_err;
++
++ mrst_clock(refclk, &clock);
++
++ this_err = abs(clock.dot - target);
++ if (this_err < err) {
++ *best_clock = clock;
++ err = this_err;
++ }
++ }
++ }
++ DRM_DEBUG("mrstFindBestPLL err = %d.\n", err);
++
++ return err != target;
++}
++
++/**
++ * Sets the power management mode of the pipe and plane.
++ *
++ * This code should probably grow support for turning the cursor off and back
++ * on appropriately at the same time as we're turning the pipe off/on.
++ */
++static void mrst_crtc_dpms(struct drm_crtc *crtc, int mode)
++{
++ struct drm_device *dev = crtc->dev;
++ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++ int pipe = psb_intel_crtc->pipe;
++ int dpll_reg = (pipe == 0) ? MRST_DPLL_A : DPLL_B;
++ int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
++ int dspbase_reg = (pipe == 0) ? MRST_DSPABASE : DSPBBASE;
++ int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
++ u32 temp;
++ bool enabled;
++
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 enter mrst_crtc_dpms, mode = %d, pipe = %d \n",
++ mode, pipe);
++#endif /* PRINT_JLIU7 */
++
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_FORCE_POWER_ON))
++ return;
++
++ /* XXX: When our outputs are all unaware of DPMS modes other than off
++ * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
++ */
++ switch (mode) {
++ case DRM_MODE_DPMS_ON:
++ case DRM_MODE_DPMS_STANDBY:
++ case DRM_MODE_DPMS_SUSPEND:
++ /* Enable the DPLL */
++ temp = REG_READ(dpll_reg);
++ if ((temp & DPLL_VCO_ENABLE) == 0) {
++ REG_WRITE(dpll_reg, temp);
++ REG_READ(dpll_reg);
++ /* Wait for the clocks to stabilize. */
++ udelay(150);
++ REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
++ REG_READ(dpll_reg);
++ /* Wait for the clocks to stabilize. */
++ udelay(150);
++ REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
++ REG_READ(dpll_reg);
++ /* Wait for the clocks to stabilize. */
++ udelay(150);
++ }
++
++ /* Enable the pipe */
++ temp = REG_READ(pipeconf_reg);
++ if ((temp & PIPEACONF_ENABLE) == 0)
++ REG_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
++
++ /* Enable the plane */
++ temp = REG_READ(dspcntr_reg);
++ if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
++ REG_WRITE(dspcntr_reg,
++ temp | DISPLAY_PLANE_ENABLE);
++ /* Flush the plane changes */
++ REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
++ }
++
++ psb_intel_crtc_load_lut(crtc);
++
++ /* Give the overlay scaler a chance to enable
++ if it's on this pipe */
++ /* psb_intel_crtc_dpms_video(crtc, true); TODO */
++ break;
++ case DRM_MODE_DPMS_OFF:
++ /* Give the overlay scaler a chance to disable
++ * if it's on this pipe */
++ /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
++
++ /* Disable the VGA plane that we never use */
++ REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
++
++ /* Disable display plane */
++ temp = REG_READ(dspcntr_reg);
++ if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
++ REG_WRITE(dspcntr_reg,
++ temp & ~DISPLAY_PLANE_ENABLE);
++ /* Flush the plane changes */
++ REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
++ REG_READ(dspbase_reg);
++ }
++
++ if (!IS_I9XX(dev)) {
++ /* Wait for vblank for the disable to take effect */
++ psb_intel_wait_for_vblank(dev);
++ }
++
++ /* Next, disable display pipes */
++ temp = REG_READ(pipeconf_reg);
++ if ((temp & PIPEACONF_ENABLE) != 0) {
++ REG_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
++ REG_READ(pipeconf_reg);
++ }
++
++ /* Wait for for the pipe disable to take effect. */
++ mrstWaitForPipeDisable(dev);
++
++ temp = REG_READ(dpll_reg);
++ if ((temp & DPLL_VCO_ENABLE) != 0) {
++ REG_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE);
++ REG_READ(dpll_reg);
++ }
++
++ /* Wait for the clocks to turn off. */
++ udelay(150);
++ break;
++ }
++
++#if DUMP_REGISTER
++ dump_dc_registers(dev);
++#endif /* DUMP_REGISTER */
++
++ enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
++
++#if 0 /* JB: Add vblank support later */
++ if (enabled)
++ dev_priv->vblank_pipe |= (1 << pipe);
++ else
++ dev_priv->vblank_pipe &= ~(1 << pipe);
++#endif
++
++#if 0 /* JB: Add sarea support later */
++ if (!dev->primary->master)
++ return;
++
++ master_priv = dev->primary->master->driver_priv;
++ if (!master_priv->sarea_priv)
++ return;
++
++ switch (pipe) {
++ case 0:
++ master_priv->sarea_priv->planeA_w =
++ enabled ? crtc->mode.hdisplay : 0;
++ master_priv->sarea_priv->planeA_h =
++ enabled ? crtc->mode.vdisplay : 0;
++ break;
++ case 1:
++ master_priv->sarea_priv->planeB_w =
++ enabled ? crtc->mode.hdisplay : 0;
++ master_priv->sarea_priv->planeB_h =
++ enabled ? crtc->mode.vdisplay : 0;
++ break;
++ default:
++ DRM_ERROR("Can't update pipe %d in SAREA\n", pipe);
++ break;
++ }
++#endif
++
++ /*Set FIFO Watermarks*/
++ REG_WRITE(DSPARB, 0x3FFF);
++ REG_WRITE(DSPFW1, 0x3F88080A);
++ REG_WRITE(DSPFW2, 0x0b060808);
++ REG_WRITE(DSPFW3, 0x0);
++ REG_WRITE(DSPFW4, 0x08030404);
++ REG_WRITE(DSPFW5, 0x04040404);
++ REG_WRITE(DSPFW6, 0x78);
++ REG_WRITE(0x70400, REG_READ(0x70400) | 0x4000);
++ /* Must write Bit 14 of the Chicken Bit Register */
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++}
++
++static int mrst_crtc_mode_set(struct drm_crtc *crtc,
++ struct drm_display_mode *mode,
++ struct drm_display_mode *adjusted_mode,
++ int x, int y,
++ struct drm_framebuffer *old_fb)
++{
++ struct drm_device *dev = crtc->dev;
++ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++ int pipe = psb_intel_crtc->pipe;
++ int fp_reg = (pipe == 0) ? MRST_FPA0 : FPB0;
++ int dpll_reg = (pipe == 0) ? MRST_DPLL_A : DPLL_B;
++ int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
++ int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
++ int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
++ int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
++ int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
++ int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
++ int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
++ int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
++ int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
++ int refclk = 0;
++ struct mrst_clock_t clock;
++ u32 dpll = 0, fp = 0, dspcntr, pipeconf, lvdsport;
++ bool ok, is_sdvo = false;
++ bool is_crt = false, is_lvds = false, is_tv = false;
++ bool is_mipi = false;
++ struct drm_mode_config *mode_config = &dev->mode_config;
++ struct psb_intel_output *psb_intel_output = NULL;
++ uint64_t scalingType = DRM_MODE_SCALE_FULLSCREEN;
++ struct drm_encoder *encoder;
++
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 enter mrst_crtc_mode_set \n");
++#endif /* PRINT_JLIU7 */
++
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_FORCE_POWER_ON))
++ return 0;
++
++ memcpy(&psb_intel_crtc->saved_mode,
++ mode,
++ sizeof(struct drm_display_mode));
++ memcpy(&psb_intel_crtc->saved_adjusted_mode,
++ adjusted_mode,
++ sizeof(struct drm_display_mode));
++
++ list_for_each_entry(encoder, &mode_config->encoder_list, head) {
++
++ if (encoder->crtc != crtc)
++ continue;
++
++ psb_intel_output = enc_to_psb_intel_output(encoder);
++ switch (psb_intel_output->type) {
++ case INTEL_OUTPUT_LVDS:
++ is_lvds = true;
++ break;
++ case INTEL_OUTPUT_SDVO:
++ is_sdvo = true;
++ break;
++ case INTEL_OUTPUT_TVOUT:
++ is_tv = true;
++ break;
++ case INTEL_OUTPUT_ANALOG:
++ is_crt = true;
++ break;
++ case INTEL_OUTPUT_MIPI:
++ is_mipi = true;
++ break;
++ }
++ }
++
++ if (is_lvds | is_mipi) {
++ /*FIXME JLIU7 Get panel power delay parameters from
++ config data */
++ REG_WRITE(0x61208, 0x25807d0);
++ REG_WRITE(0x6120c, 0x1f407d0);
++ REG_WRITE(0x61210, 0x270f04);
++ }
++
++ /* Disable the VGA plane that we never use */
++ REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
++
++ /* Disable the panel fitter if it was on our pipe */
++ if (psb_intel_panel_fitter_pipe(dev) == pipe)
++ REG_WRITE(PFIT_CONTROL, 0);
++
++ REG_WRITE(pipesrc_reg,
++ ((mode->crtc_hdisplay - 1) << 16) |
++ (mode->crtc_vdisplay - 1));
++
++ if (psb_intel_output)
++ drm_connector_property_get_value(&psb_intel_output->base,
++ dev->mode_config.scaling_mode_property, &scalingType);
++
++ if (scalingType == DRM_MODE_SCALE_CENTER) {
++ /* Moorestown doesn't have register support for centering so
++ * we need to mess with the h/vblank and h/vsync start and
++ * ends to get centering */
++ int offsetX = 0, offsetY = 0;
++
++ offsetX = (adjusted_mode->crtc_hdisplay -
++ mode->crtc_hdisplay) / 2;
++ offsetY = (adjusted_mode->crtc_vdisplay -
++ mode->crtc_vdisplay) / 2;
++
++ REG_WRITE(htot_reg, (mode->crtc_hdisplay - 1) |
++ ((adjusted_mode->crtc_htotal - 1) << 16));
++ REG_WRITE(vtot_reg, (mode->crtc_vdisplay - 1) |
++ ((adjusted_mode->crtc_vtotal - 1) << 16));
++ REG_WRITE(hblank_reg,
++ (adjusted_mode->crtc_hblank_start - offsetX - 1) |
++ ((adjusted_mode->crtc_hblank_end - offsetX - 1) << 16));
++ REG_WRITE(hsync_reg,
++ (adjusted_mode->crtc_hsync_start - offsetX - 1) |
++ ((adjusted_mode->crtc_hsync_end - offsetX - 1) << 16));
++ REG_WRITE(vblank_reg,
++ (adjusted_mode->crtc_vblank_start - offsetY - 1) |
++ ((adjusted_mode->crtc_vblank_end - offsetY - 1) << 16));
++ REG_WRITE(vsync_reg,
++ (adjusted_mode->crtc_vsync_start - offsetY - 1) |
++ ((adjusted_mode->crtc_vsync_end - offsetY - 1) << 16));
++ } else {
++ REG_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
++ ((adjusted_mode->crtc_htotal - 1) << 16));
++ REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
++ ((adjusted_mode->crtc_vtotal - 1) << 16));
++ REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
++ ((adjusted_mode->crtc_hblank_end - 1) << 16));
++ REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
++ ((adjusted_mode->crtc_hsync_end - 1) << 16));
++ REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
++ ((adjusted_mode->crtc_vblank_end - 1) << 16));
++ REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
++ ((adjusted_mode->crtc_vsync_end - 1) << 16));
++ }
++
++ /* Flush the plane changes */
++ {
++ struct drm_crtc_helper_funcs *crtc_funcs =
++ crtc->helper_private;
++ crtc_funcs->mode_set_base(crtc, x, y, old_fb);
++ }
++
++ /* setup pipeconf */
++ pipeconf = REG_READ(pipeconf_reg);
++
++ /* Set up the display plane register */
++ dspcntr = REG_READ(dspcntr_reg);
++ dspcntr |= DISPPLANE_GAMMA_ENABLE;
++
++ if (pipe == 0)
++ dspcntr |= DISPPLANE_SEL_PIPE_A;
++ else
++ dspcntr |= DISPPLANE_SEL_PIPE_B;
++
++ dev_priv->dspcntr = dspcntr |= DISPLAY_PLANE_ENABLE;
++ dev_priv->pipeconf = pipeconf |= PIPEACONF_ENABLE;
++
++ if (is_mipi)
++ goto mrst_crtc_mode_set_exit;
++
++ if (dev_priv->sku_100L)
++ refclk = 100000;
++ else if (dev_priv->sku_83)
++ refclk = 166000;
++ else if (dev_priv->sku_100)
++ refclk = 200000;
++
++ dpll = 0; /*BIT16 = 0 for 100MHz reference */
++
++ ok = mrstFindBestPLL(crtc, adjusted_mode->clock, refclk, &clock);
++
++ if (!ok) {
++#if PRINT_JLIU7
++ DRM_INFO
++ ("JLIU7 mrstFindBestPLL fail in mrst_crtc_mode_set. \n");
++#endif /* PRINT_JLIU7 */
++ } else {
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 mrst_crtc_mode_set pixel clock = %d,"
++ "m = %x, p1 = %x. \n", clock.dot, clock.m,
++ clock.p1);
++#endif /* PRINT_JLIU7 */
++ }
++
++ fp = mrst_m_converts[(clock.m - MRST_M_MIN)] << 8;
++
++ dpll |= DPLL_VGA_MODE_DIS;
++
++
++ dpll |= DPLL_VCO_ENABLE;
++
++ if (is_lvds)
++ dpll |= DPLLA_MODE_LVDS;
++ else
++ dpll |= DPLLB_MODE_DAC_SERIAL;
++
++ if (is_sdvo) {
++ int sdvo_pixel_multiply =
++ adjusted_mode->clock / mode->clock;
++
++ dpll |= DPLL_DVO_HIGH_SPEED;
++ dpll |=
++ (sdvo_pixel_multiply -
++ 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
++ }
++
++
++ /* compute bitmask from p1 value */
++ dpll |= (1 << (clock.p1 - 2)) << 17;
++
++ dpll |= DPLL_VCO_ENABLE;
++
++#if PRINT_JLIU7
++ mrstPrintPll("chosen", &clock);
++#endif /* PRINT_JLIU7 */
++
++#if 0
++ if (!xf86ModesEqual(mode, adjusted_mode)) {
++ xf86DrvMsg(pScrn->scrnIndex, X_INFO,
++ "Adjusted mode for pipe %c:\n",
++ pipe == 0 ? 'A' : 'B');
++ xf86PrintModeline(pScrn->scrnIndex, mode);
++ }
++ i830PrintPll("chosen", &clock);
++#endif
++
++ if (dpll & DPLL_VCO_ENABLE) {
++ REG_WRITE(fp_reg, fp);
++ REG_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
++ REG_READ(dpll_reg);
++/* FIXME jliu7 check the DPLLA lock bit PIPEACONF[29] */
++ udelay(150);
++ }
++
++ /* The LVDS pin pair needs to be on before the DPLLs are enabled.
++ * This is an exception to the general rule that mode_set doesn't turn
++ * things on.
++ */
++ if (is_lvds) {
++
++ /*lvdsport = 0x803003c0;*/
++ /*lvdsport = 0x813003c0;*/
++ lvdsport = dev_priv->gct_data.Panel_Port_Control;
++
++ REG_WRITE(LVDS, lvdsport);
++ }
++
++ REG_WRITE(fp_reg, fp);
++ REG_WRITE(dpll_reg, dpll);
++ REG_READ(dpll_reg);
++ /* Wait for the clocks to stabilize. */
++ udelay(150);
++
++ /* write it again -- the BIOS does, after all */
++ REG_WRITE(dpll_reg, dpll);
++ REG_READ(dpll_reg);
++ /* Wait for the clocks to stabilize. */
++ udelay(150);
++
++ REG_WRITE(pipeconf_reg, pipeconf);
++ REG_READ(pipeconf_reg);
++
++ /* Wait for for the pipe enable to take effect. */
++ mrstWaitForPipeEnable(dev);
++
++ REG_WRITE(dspcntr_reg, dspcntr);
++ psb_intel_wait_for_vblank(dev);
++
++mrst_crtc_mode_set_exit:
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++
++ return 0;
++}
++
++
++static const struct drm_crtc_helper_funcs mrst_helper_funcs = {
++ .dpms = mrst_crtc_dpms,
++ .mode_fixup = psb_intel_crtc_mode_fixup,
++ .mode_set = mrst_crtc_mode_set,
++ .mode_set_base = psb_intel_pipe_set_base,
++ .prepare = psb_intel_crtc_prepare,
++ .commit = psb_intel_crtc_commit,
++};
++
++/* MRST_PLATFORM end */
+diff --git a/drivers/gpu/drm/mrst/drv/psb_intel_display.h b/drivers/gpu/drm/mrst/drv/psb_intel_display.h
+new file mode 100644
+index 0000000..74e3b5e
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/psb_intel_display.h
+@@ -0,0 +1,25 @@
++/* copyright (c) 2008, Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors:
++ * Eric Anholt <eric@anholt.net>
++ */
++
++#ifndef _INTEL_DISPLAY_H_
++#define _INTEL_DISPLAY_H_
++
++bool psb_intel_pipe_has_type(struct drm_crtc *crtc, int type);
++
++#endif
+diff --git a/drivers/gpu/drm/mrst/drv/psb_intel_drv.h b/drivers/gpu/drm/mrst/drv/psb_intel_drv.h
+new file mode 100644
+index 0000000..9e77cce
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/psb_intel_drv.h
+@@ -0,0 +1,283 @@
++/*
++ * Copyright (c) 2009, Intel Corporation.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ */
++
++#ifndef __INTEL_DRV_H__
++#define __INTEL_DRV_H__
++
++#include <linux/i2c.h>
++#include <linux/i2c-id.h>
++#include <linux/i2c-algo-bit.h>
++#include <drm/drm_crtc.h>
++
++#include <drm/drm_crtc_helper.h>
++
++/*
++ * MOORESTOWN defines
++ */
++#define MRST_I2C 0
++
++#define DUMP_REGISTER 0
++#define MRST_24BIT_LVDS 1
++#define MRST_24BIT_DOT_1 0
++#define MRST_24BIT_WA 0
++
++#define PRINT_JLIU7 0
++#define DELAY_TIME1 2000 /* 1000 = 1ms */
++
++/*
++ * Display related stuff
++ */
++
++/* store information about an Ixxx DVO */
++/* The i830->i865 use multiple DVOs with multiple i2cs */
++/* the i915, i945 have a single sDVO i2c bus - which is different */
++#define MAX_OUTPUTS 6
++/* maximum connectors per crtcs in the mode set */
++#define INTELFB_CONN_LIMIT 4
++
++#define INTEL_I2C_BUS_DVO 1
++#define INTEL_I2C_BUS_SDVO 2
++
++/* these are outputs from the chip - integrated only
++ * external chips are via DVO or SDVO output */
++#define INTEL_OUTPUT_UNUSED 0
++#define INTEL_OUTPUT_ANALOG 1
++#define INTEL_OUTPUT_DVO 2
++#define INTEL_OUTPUT_SDVO 3
++#define INTEL_OUTPUT_LVDS 4
++#define INTEL_OUTPUT_TVOUT 5
++#define INTEL_OUTPUT_MIPI 6
++
++#define INTEL_DVO_CHIP_NONE 0
++#define INTEL_DVO_CHIP_LVDS 1
++#define INTEL_DVO_CHIP_TMDS 2
++#define INTEL_DVO_CHIP_TVOUT 4
++
++enum mipi_panel_type {
++ NSC_800X480 = 1,
++ LGE_480X1024 = 2,
++ TPO_864X480 = 3
++};
++
++struct opregion_header {
++ u8 signature[16];
++ u32 size;
++ u32 opregion_ver;
++ u8 bios_ver[32];
++ u8 vbios_ver[16];
++ u8 driver_ver[16];
++ u32 mboxes;
++ u8 reserved[164];
++} __attribute__((packed));
++
++struct opregion_apci {
++ /*FIXME: add it later*/
++} __attribute__((packed));
++
++struct opregion_swsci {
++ /*FIXME: add it later*/
++} __attribute__((packed));
++
++struct opregion_acpi {
++ /*FIXME: add it later*/
++} __attribute__((packed));
++
++struct psb_intel_opregion {
++ struct opregion_header *header;
++ struct opregion_acpi *acpi;
++ struct opregion_swsci *swsci;
++ struct opregion_asle *asle;
++ int enabled;
++};
++
++/**
++ * Hold information useally put on the device driver privates here,
++ * since it needs to be shared across multiple of devices drivers privates.
++*/
++struct psb_intel_mode_device {
++
++ /*
++ * Abstracted memory manager operations
++ */
++ void *(*bo_from_handle) (struct drm_device *dev,
++ struct drm_file *file_priv,
++ unsigned int handle);
++ size_t(*bo_size) (struct drm_device *dev, void *bo);
++ size_t(*bo_offset) (struct drm_device *dev, void *bo);
++ int (*bo_pin_for_scanout) (struct drm_device *dev, void *bo);
++ int (*bo_unpin_for_scanout) (struct drm_device *dev, void *bo);
++
++ /*
++ * Cursor
++ */
++ int cursor_needs_physical;
++
++ /*
++ * LVDS info
++ */
++ int backlight_duty_cycle; /* restore backlight to this value */
++ bool panel_wants_dither;
++ struct drm_display_mode *panel_fixed_mode;
++ struct drm_display_mode *vbt_mode; /* if any */
++
++ uint32_t saveBLC_PWM_CTL;
++};
++
++struct psb_intel_i2c_chan {
++ /* for getting at dev. private (mmio etc.) */
++ struct drm_device *drm_dev;
++ u32 reg; /* GPIO reg */
++ struct i2c_adapter adapter;
++ struct i2c_algo_bit_data algo;
++ u8 slave_addr;
++};
++
++struct psb_intel_output {
++ struct drm_connector base;
++
++ struct drm_encoder enc;
++ int type;
++ struct psb_intel_i2c_chan *i2c_bus; /* for control functions */
++ struct psb_intel_i2c_chan *ddc_bus; /* for DDC only stuff */
++ bool load_detect_temp;
++ void *dev_priv;
++
++ struct psb_intel_mode_device *mode_dev;
++
++};
++
++struct psb_intel_crtc_state {
++ uint32_t saveDSPCNTR;
++ uint32_t savePIPECONF;
++ uint32_t savePIPESRC;
++ uint32_t saveDPLL;
++ uint32_t saveFP0;
++ uint32_t saveFP1;
++ uint32_t saveHTOTAL;
++ uint32_t saveHBLANK;
++ uint32_t saveHSYNC;
++ uint32_t saveVTOTAL;
++ uint32_t saveVBLANK;
++ uint32_t saveVSYNC;
++ uint32_t saveDSPSTRIDE;
++ uint32_t saveDSPSIZE;
++ uint32_t saveDSPPOS;
++ uint32_t saveDSPBASE;
++ uint32_t savePalette[256];
++};
++
++struct psb_intel_crtc {
++ struct drm_crtc base;
++ int pipe;
++ int plane;
++ uint32_t cursor_addr;
++ u8 lut_r[256], lut_g[256], lut_b[256];
++ u8 lut_adj[256];
++ struct psb_intel_framebuffer *fbdev_fb;
++ /* a mode_set for fbdev users on this crtc */
++ struct drm_mode_set mode_set;
++
++ /* current bo we scanout from */
++ void *scanout_bo;
++
++ /* current bo we cursor from */
++ void *cursor_bo;
++
++ struct drm_display_mode saved_mode;
++ struct drm_display_mode saved_adjusted_mode;
++
++ struct psb_intel_mode_device *mode_dev;
++
++/*FIXME: Workaround to avoid MRST block.*/
++#ifndef CONFIG_X86_MRST
++ /* Saved Crtc HW states */
++ struct psb_intel_crtc_state *crtc_state;
++#endif
++};
++
++#define to_psb_intel_crtc(x) \
++ container_of(x, struct psb_intel_crtc, base)
++#define to_psb_intel_output(x) \
++ container_of(x, struct psb_intel_output, base)
++#define enc_to_psb_intel_output(x) \
++ container_of(x, struct psb_intel_output, enc)
++#define to_psb_intel_framebuffer(x) \
++ container_of(x, struct psb_framebuffer, base)
++
++struct psb_intel_i2c_chan *psb_intel_i2c_create(struct drm_device *dev,
++ const u32 reg, const char *name);
++void psb_intel_i2c_destroy(struct psb_intel_i2c_chan *chan);
++int psb_intel_ddc_get_modes(struct psb_intel_output *psb_intel_output);
++extern bool psb_intel_ddc_probe(struct psb_intel_output *psb_intel_output);
++
++extern void psb_intel_crtc_init(struct drm_device *dev, int pipe,
++ struct psb_intel_mode_device *mode_dev);
++extern void psb_intel_crt_init(struct drm_device *dev);
++extern void psb_intel_sdvo_init(struct drm_device *dev, int output_device);
++extern void psb_intel_dvo_init(struct drm_device *dev);
++extern void psb_intel_tv_init(struct drm_device *dev);
++extern void psb_intel_lvds_init(struct drm_device *dev,
++ struct psb_intel_mode_device *mode_dev);
++extern void psb_intel_lvds_set_brightness(struct drm_device *dev, int level);
++extern void mrst_lvds_init(struct drm_device *dev,
++ struct psb_intel_mode_device *mode_dev);
++extern void mrst_dsi_init(struct drm_device *dev,
++ struct psb_intel_mode_device *mode_dev);
++
++extern void psb_intel_crtc_load_lut(struct drm_crtc *crtc);
++extern void psb_intel_encoder_prepare(struct drm_encoder *encoder);
++extern void psb_intel_encoder_commit(struct drm_encoder *encoder);
++
++extern struct drm_encoder *psb_intel_best_encoder(struct drm_connector
++ *connector);
++
++extern struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev,
++ struct drm_crtc *crtc);
++extern void psb_intel_wait_for_vblank(struct drm_device *dev);
++extern int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++extern struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev,
++ int pipe);
++extern struct drm_connector *psb_intel_sdvo_find(struct drm_device *dev,
++ int sdvoB);
++extern int psb_intel_sdvo_supports_hotplug(struct drm_connector *connector);
++extern void psb_intel_sdvo_set_hotplug(struct drm_connector *connector,
++ int enable);
++extern int intelfb_probe(struct drm_device *dev);
++extern int intelfb_remove(struct drm_device *dev,
++ struct drm_framebuffer *fb);
++extern struct drm_framebuffer *psb_intel_framebuffer_create(struct drm_device
++ *dev, struct
++ drm_mode_fb_cmd
++ *mode_cmd,
++ void *mm_private);
++extern bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder,
++ struct drm_display_mode *mode,
++ struct drm_display_mode *adjusted_mode);
++extern int psb_intel_lvds_mode_valid(struct drm_connector *connector,
++ struct drm_display_mode *mode);
++extern int psb_intel_lvds_set_property(struct drm_connector *connector,
++ struct drm_property *property,
++ uint64_t value);
++extern void psb_intel_lvds_destroy(struct drm_connector *connector);
++extern const struct drm_encoder_funcs psb_intel_lvds_enc_funcs;
++
++extern uint8_t blc_pol;
++extern uint8_t blc_freq;
++
++#endif /* __INTEL_DRV_H__ */
+diff --git a/drivers/gpu/drm/mrst/drv/psb_intel_dsi.c b/drivers/gpu/drm/mrst/drv/psb_intel_dsi.c
+new file mode 100644
+index 0000000..3d45df8
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/psb_intel_dsi.c
+@@ -0,0 +1,2450 @@
++/*
++ * Copyright © 2006-2007 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors:
++ * jim liu <jim.liu@intel.com>
++ */
++
++#include <linux/backlight.h>
++#include <drm/drmP.h>
++#include <drm/drm.h>
++#include <drm/drm_crtc.h>
++#include <drm/drm_edid.h>
++#include <asm/ipc_defs.h>
++
++#include "psb_drv.h"
++#include "psb_intel_drv.h"
++#include "psb_intel_reg.h"
++#include "ospm_power.h"
++
++#define DRM_MODE_ENCODER_MIPI 5
++
++#define BRIGHTNESS_MAX_LEVEL 100
++#define BLC_POLARITY_NORMAL 0
++
++#if DUMP_REGISTER
++extern void dump_dsi_registers(struct drm_device *dev);
++#endif /* DUMP_REGISTER */
++void mrst_init_TPO_MIPI(struct drm_device *dev);
++
++int dsi_backlight; /* restore backlight to this value */
++
++/**
++ * Returns the maximum level of the backlight duty cycle field.
++ */
++static u32 mrst_dsi_get_max_backlight(struct drm_device *dev)
++{
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 enter mrst_dsi_get_max_backlight \n");
++#endif /* PRINT_JLIU7 */
++
++ return BRIGHTNESS_MAX_LEVEL;
++
++/* FIXME jliu7 need to revisit */
++}
++
++/**
++ * Sets the power state for the panel.
++ */
++static void mrst_dsi_set_power(struct drm_device *dev,
++ struct psb_intel_output *output, bool on)
++{
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++
++ DRM_INFO("Enter mrst_dsi_set_power \n");
++
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_FORCE_POWER_ON))
++ return;
++
++ if (on) {
++ /* program MIPI DSI controller and Display Controller
++ * set the device ready bit + set 'turn on' bit b048
++ * wait for 100 ms ??
++ * set pipe enable bit */
++ REG_WRITE(DPI_CONTROL_REG, 2);
++ msleep(100);
++ if (dev_priv->panel_make == TPO_864X480)
++ dev_priv->init_drvIC(dev); /* initialize the panel */
++ /* Turn on backlight */
++ REG_WRITE(BLC_PWM_CTL, 0x2faf1fc9);
++ } else {
++ /* set the shutdown bit b048h
++ * de-assert pipe enable
++ * clear device ready bit unless DBI is to be left on */
++ REG_WRITE(BLC_PWM_CTL, 0x2faf0000); /* turn off backlight */
++ REG_WRITE(DPI_CONTROL_REG, 1); /* send shut down message */
++ }
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++}
++
++static void mrst_dsi_dpms(struct drm_encoder *encoder, int mode)
++{
++ struct drm_device *dev = encoder->dev;
++ struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
++
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 enter mrst_dsi_dpms \n");
++#endif /* PRINT_JLIU7 */
++
++ if (mode == DRM_MODE_DPMS_ON)
++ mrst_dsi_set_power(dev, output, true);
++ else
++ mrst_dsi_set_power(dev, output, false);
++
++ /* XXX: We never power down the DSI pairs. */
++}
++
++static void mrst_dsi_save(struct drm_connector *connector)
++{
++#if 0 /* JB: Disable for drop */
++ struct drm_device *dev = connector->dev;
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 enter mrst_dsi_save \n");
++#endif /* PRINT_JLIU7 */
++
++ dev_priv->savePP_ON = REG_READ(LVDSPP_ON);
++ dev_priv->savePP_OFF = REG_READ(LVDSPP_OFF);
++ dev_priv->savePP_CONTROL = REG_READ(PP_CONTROL);
++ dev_priv->savePP_CYCLE = REG_READ(PP_CYCLE);
++ dev_priv->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
++ dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL &
++ BACKLIGHT_DUTY_CYCLE_MASK);
++
++ /*
++ * make backlight to full brightness
++ */
++ dsi_backlight = mrst_dsi_get_max_backlight(dev);
++#endif
++}
++
++static void mrst_dsi_restore(struct drm_connector *connector)
++{
++#if 0 /* JB: Disable for drop */
++ struct drm_device *dev = connector->dev;
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 enter mrst_dsi_restore \n");
++#endif /* PRINT_JLIU7 */
++
++ REG_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL);
++ REG_WRITE(LVDSPP_ON, dev_priv->savePP_ON);
++ REG_WRITE(LVDSPP_OFF, dev_priv->savePP_OFF);
++ REG_WRITE(PP_CYCLE, dev_priv->savePP_CYCLE);
++ *REG_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL);
++ if (dev_priv->savePP_CONTROL & POWER_TARGET_ON)
++ mrst_dsi_set_power(dev, true);
++ else
++ mrst_dsi_set_power(dev, false);
++#endif
++}
++
++static void mrst_dsi_prepare(struct drm_encoder *encoder)
++{
++ struct drm_device *dev = encoder->dev;
++ struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
++ struct psb_intel_mode_device *mode_dev = output->mode_dev;
++
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 enter mrst_dsi_prepare \n");
++#endif /* PRINT_JLIU7 */
++
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_FORCE_POWER_ON))
++ return;
++
++ mode_dev->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
++ mode_dev->backlight_duty_cycle = (mode_dev->saveBLC_PWM_CTL &
++ BACKLIGHT_DUTY_CYCLE_MASK);
++
++ mrst_dsi_set_power(dev, output, false);
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++}
++
++static void mrst_dsi_commit(struct drm_encoder *encoder)
++{
++ struct drm_device *dev = encoder->dev;
++ struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
++ struct psb_intel_mode_device *mode_dev = output->mode_dev;
++
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 enter mrst_dsi_commit \n");
++#endif /* PRINT_JLIU7 */
++
++ if (mode_dev->backlight_duty_cycle == 0)
++ mode_dev->backlight_duty_cycle =
++ mrst_dsi_get_max_backlight(dev);
++
++ mrst_dsi_set_power(dev, output, true);
++
++#if DUMP_REGISTER
++ dump_dsi_registers(dev);
++#endif /* DUMP_REGISTER */
++}
++
++#if 0
++/* ************************************************************************* *\
++FUNCTION: GetHS_TX_timeoutCount
++DESCRIPTION: In burst mode, value greater than one DPI line Time in byte clock
++ (txbyteclkhs). To timeout this timer 1+ of the
++ above said value is recommended.
++
++ In non-burst mode, Value greater than one DPI frame time
++ in byte clock(txbyteclkhs).
++
++ To timeout this timer 1+ of the above said value is recommended.
++
++\* ************************************************************************* */
++static u32 GetHS_TX_timeoutCount(DRM_DRIVER_PRIVATE_T *dev_priv)
++{
++
++ u32 timeoutCount = 0, HTOT_count = 0, VTOT_count = 0, HTotalPixel = 0;
++
++ /* Total pixels need to be transfer per line*/
++ HTotalPixel = (dev_priv->HsyncWidth +
++ dev_priv->HbackPorch +
++ dev_priv->HfrontPorch) *
++ dev_priv->laneCount +
++ dev_priv->HactiveArea;
++
++ /* byte count = (pixel count * bits per pixel) / 8 */
++ HTOT_count = (HTotalPixel * dev_priv->bpp) / 8;
++
++ if (dev_priv->videoModeFormat == BURST_MODE) {
++ timeoutCount = HTOT_count + 1;
++#if 1 /*FIXME remove it after power-on */
++ VTOT_count = dev_priv->VactiveArea +
++ dev_priv->VbackPorch +
++ dev_priv->VfrontPorch + dev_priv->VsyncWidth;
++
++ /* timeoutCount = (HTOT_count * VTOT_count) + 1; */
++ timeoutCount = (HTOT_count * VTOT_count) + 1;
++#endif
++ } else {
++ VTOT_count = dev_priv->VactiveArea +
++ dev_priv->VbackPorch +
++ dev_priv->VfrontPorch +
++ dev_priv->VsyncWidth;
++ /* timeoutCount = (HTOT_count * VTOT_count) + 1; */
++ timeoutCount = (HTOT_count * VTOT_count) + 1;
++ }
++
++ return timeoutCount & 0xFFFF;
++}
++
++/* ************************************************************************* *\
++FUNCTION: GetLP_RX_timeoutCount
++
++DESCRIPTION: The timeout value is protocol specific. Time out value is
++ calculated from txclkesc(50ns).
++
++ Minimum value =
++ Time to send one Trigger message = 4 X txclkesc
++ [Escape mode entry sequence)
++ + 8-bit trigger message (2x8xtxclkesc)
++ +1 txclksesc [stop_state]
++ = 21 X txclkesc [ 15h]
++
++ Maximum Value =
++ Time to send a long packet with maximum payload data
++ = 4 X txclkesc [Escape mode entry sequence)
++ + 8-bit Low power data transmission Command (2x8xtxclkesc)
++ + packet header [ 4X8X2X txclkesc]
++ +payload [ nX8X2Xtxclkesc]
++ +CRC[2X8X2txclkesc]
++ +1 txclksesc [stop_state]
++ = 117 txclkesc +n[payload in terms of bytes]X16txclkesc.
++
++\* ************************************************************************* */
++static u32 GetLP_RX_timeoutCount(DRM_DRIVER_PRIVATE_T *dev_priv)
++{
++
++ u32 timeoutCount = 0;
++
++ if (dev_priv->config_phase) {
++ /* Assuming 256 byte DDB data.*/
++ timeoutCount = 117 + 256 * 16;
++ } else {
++ /* For DPI video only mode use the minimum value.*/
++ timeoutCount = 0x15;
++#if 1 /*FIXME remove it after power-on */
++ /* Assuming 256 byte DDB data.*/
++ timeoutCount = 117 + 256 * 16;
++#endif
++ }
++
++ return timeoutCount;
++}
++#endif /* #if 0 - to avoid warnings */
++
++/* ************************************************************************* *\
++FUNCTION: GetHSA_Count
++
++DESCRIPTION: Shows the horizontal sync value in terms of byte clock
++ (txbyteclkhs)
++ Minimum HSA period should be sufficient to transmit a hsync start short
++ packet(4 bytes)
++ i) For Non-burst Mode with sync pulse, Min value 4 in decimal
++ [plus an optional 6 bytes for a zero payload blanking
++ packet]. But if the value is less than 10 but more
++ than 4, then this count will be added to the HBP s
++ count for one lane.
++ ii) For Non-Burst Sync Event & Burst Mode, there is no HSA,
++ so you can program this to zero. If you program this
++ register, these byte values will be added to HBP.
++ iii) For Burst mode of operation, normally the values
++ programmed in terms of byte clock are based on the
++ principle - time for transfering
++ HSA in Burst mode is the same as in non-bust mode.
++\* ************************************************************************* */
++static u32 GetHSA_Count(struct drm_device *dev, DRM_DRIVER_PRIVATE_T *dev_priv)
++{
++ u32 HSA_count;
++ u32 HSA_countX8;
++
++ /* byte clock count = (pixel clock count * bits per pixel) /8 */
++ /*HSA_countX8 = dev_priv->HsyncWidth * dev_priv->bpp;
++
++ if (dev_priv->videoModeFormat == BURST_MODE) {
++ HSA_countX8 *= dev_priv->DDR_Clock /
++ dev_priv->DDR_Clock_Calculated;
++ }
++
++ HSA_count = HSA_countX8 / 8;*/
++
++ /* since mode_set already computed Display Controller timings,
++ * read the register and compute mipi timings.
++ */
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_ONLY_IF_ON)) {
++ HSA_countX8 = REG_READ(HSYNC_A);
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ } else
++ HSA_countX8 = dev_priv->saveHSYNC_A;
++
++ /* Get the hsync pulse width */
++ HSA_count = ((HSA_countX8 & 0xffff0000)>>16) - (HSA_countX8 & 0xffff);
++ /* compute HSA according to equation:
++ (hsync_end - hsync_start) * 24 bpp / (2 * 8 bits per lane * 2 lanes)*/
++ HSA_count = (HSA_count * dev_priv->bpp)/(2 * 8 * 2);
++ if (HSA_count < 4) /* minimum value of 4 */
++ HSA_count = 4;
++
++ return HSA_count;
++}
++
++/* ************************************************************************* *\
++FUNCTION: GetHBP_Count
++
++DESCRIPTION: Shows the horizontal back porch value in terms of txbyteclkhs.
++ Minimum HBP period should be sufficient to transmit a �hsync end short
++ packet(4 bytes) + Blanking packet overhead(6 bytes) +
++ RGB packet header(4 bytes)�
++ For Burst mode of operation, normally the values programmed in terms of
++ byte clock are based on the principle - time for transfering HBP
++ in Burst mode is the same as in non-bust mode.
++
++ Min value � 14 in decimal
++ [accounted with zero payload for blanking packet] for one lane.
++ Max value � any value greater than 14 based on DPI resolution
++\* ************************************************************************* */
++static u32 GetHBP_Count(struct drm_device *dev, DRM_DRIVER_PRIVATE_T *dev_priv)
++{
++ u32 HBP_count;
++ u32 HBE, HSE;
++
++ /* byte clock count = (pixel clock count * bits per pixel) /8 */
++ /*HBP_countX8 = dev_priv->HbackPorch * dev_priv->bpp;
++
++ if (dev_priv->videoModeFormat == BURST_MODE) {
++ HBP_countX8 *= dev_priv->DDR_Clock /
++ dev_priv->DDR_Clock_Calculated;
++ }
++
++ HBP_count = HBP_countX8 / 8;*/
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_ONLY_IF_ON)) {
++ HBE = (REG_READ(HBLANK_A) & 0xffff0000) >> 16;
++ HSE = (REG_READ(HSYNC_A) & 0xffff0000) >> 16;
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ } else {
++ HBE = (dev_priv->saveHBLANK_A & 0xffff0000) >> 16;
++ HSE = (dev_priv->saveHSYNC_A & 0xffff0000) >> 16;
++ }
++
++ /* Get the hsync pulse width */
++ HBP_count = HBE - HSE;
++ /*compute HSA according to equation:
++ *(hblank_end - hsync_end) * 24 bpp / (2 * 8 bits per lane * 2 lanes)*/
++ HBP_count = (HBP_count * dev_priv->bpp)/(2 * 8 * 2);
++ if (HBP_count < 8) /* minimum value of 8 */
++ HBP_count = 8;
++
++ return HBP_count;
++}
++
++/* ************************************************************************* *\
++FUNCTION: GetHFP_Count
++
++DESCRIPTION: Shows the horizontal front porch value in terms of txbyteclkhs.
++Minimum HFP period should be sufficient to transmit �RGB Data packet
++footer(2 bytes) + Blanking packet overhead(6 bytes)� for non burst mode.
++
++For burst mode, Minimum HFP period should be sufficient to transmit
++Blanking packet overhead(6 bytes)�
++
++For Burst mode of operation, normally the values programmed in terms of
++ byte clock are based on the principle - time for transfering HFP
++ in Burst mode is the same as in non-bust mode.
++
++Min value � 8 in decimal for non-burst mode [accounted with zero payload
++ for blanking packet] for one lane.
++Min value � 6 in decimal for burst mode for one lane.
++
++Max value � any value greater than the minimum vaue based on DPI resolution
++\* ************************************************************************* */
++static u32 GetHFP_Count(struct drm_device *dev, DRM_DRIVER_PRIVATE_T *dev_priv)
++{
++ u32 HFP_count;
++ u32 HBS, HSS;
++
++ /* byte clock count = (pixel clock count * bits per pixel) /8 */
++ /*HFP_countX8 = dev_priv->HfrontPorch * dev_priv->bpp;
++
++ if (dev_priv->videoModeFormat == BURST_MODE) {
++ HFP_countX8 *= dev_priv->DDR_Clock /
++ dev_priv->DDR_Clock_Calculated;
++ }
++
++ HFP_count = HFP_countX8 / 8;*/
++
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_ONLY_IF_ON)) {
++ HBS = REG_READ(HBLANK_A) & 0xffff;
++ HSS = REG_READ(HSYNC_A) & 0xffff;
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ } else {
++ HBS = dev_priv->saveHBLANK_A & 0xffff;
++ HSS = dev_priv->saveHSYNC_A & 0xffff;
++ }
++
++ /* Get the hsync pulse width */
++ HFP_count = HSS - HBS;
++ /*compute HSA according to equation:
++ *(hblank_end - hsync_end) * 24 bpp / (2 * 8 bits per lane * 2 lanes)*/
++ HFP_count = (HFP_count * dev_priv->bpp)/(2 * 8 * 2);
++ if (HFP_count < 8) /* minimum value of 8 */
++ HFP_count = 8;
++
++ return HFP_count;
++}
++
++/* ************************************************************************* *\
++FUNCTION: GetHAdr_Count
++
++DESCRIPTION: Shows the horizontal active area value in terms of txbyteclkhs.
++ In Non Burst Mode, Count equal to RGB word count value
++
++In Burst Mode, RGB pixel packets are time-compressed, leaving more time
++ during a scan line for LP mode (saving power) or for multiplexing
++ other transmissions onto the DSI link. Hence, the count equals the
++ time in txbyteclkhs for sending time compressed RGB pixels plus
++ the time needed for moving to power save mode or the time needed
++ for secondary channel to use the DSI link.
++
++But if the left out time for moving to low power mode is less than
++ 8 txbyteclkhs [2txbyteclkhs for RGB data packet footer and
++ 6txbyteclkhs for a blanking packet with zero payload], then
++ this count will be added to the HFP's count for one lane.
++
++Min value � 8 in decimal for non-burst mode [accounted with zero payload
++ for blanking packet] for one lane.
++Min value � 6 in decimal for burst mode for one lane.
++
++Max value � any value greater than the minimum vaue based on DPI resolution
++\* ************************************************************************* */
++static u32 GetHAdr_Count(struct drm_device *dev, DRM_DRIVER_PRIVATE_T *dev_priv)
++{
++ u32 HAdr_count;
++ u32 Hactive;
++
++ /* byte clock count = (pixel clock count * bits per pixel) /8 */
++ /*HAdr_countX8 = dev_priv->HactiveArea * dev_priv->bpp;
++
++ if (dev_priv->videoModeFormat == BURST_MODE) {
++ HAdr_countX8 *= dev_priv->DDR_Clock /
++ dev_priv->DDR_Clock_Calculated;
++ }
++
++ HAdr_count = HAdr_countX8 / 8;*/
++
++ /* use HactiveArea instead of H_TOTAL register or else panel
++ centering won't work.*/
++ Hactive = dev_priv->HactiveArea;
++
++ /* compute HAdr according to equation:
++ * (hactive * 24 bpp/8) / 2 lanes)*/
++
++ HAdr_count = (Hactive * dev_priv->bpp/8) / 2;
++
++ return HAdr_count;
++}
++
++/* ************************************************************************* *\
++FUNCTION: GetVSA_Count
++
++DESCRIPTION: Shows the vertical sync value in terms of lines
++
++\* ************************************************************************* */
++static u32 GetVSA_Count(struct drm_device *dev, DRM_DRIVER_PRIVATE_T *dev_priv)
++{
++ u32 VSA_count;
++ u32 VSA_countX8;
++
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_ONLY_IF_ON)) {
++ VSA_countX8 = REG_READ(VSYNC_A);
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ } else
++ VSA_countX8 = dev_priv->saveVSYNC_A;
++
++ /* Get the vsync pulse width */
++ VSA_count = ((VSA_countX8 & 0xffff0000)>>16) - (VSA_countX8 & 0xffff);
++
++ if (VSA_count < 2) /* minimum value of 2 */
++ VSA_count = 2;
++
++ return VSA_count;
++}
++
++/* ************************************************************************* *\
++ * FUNCTION: GetVBP_Count
++ *
++ * DESCRIPTION: Shows the vertical back porch value in lines.
++ *
++\* ************************************************************************* */
++static u32 GetVBP_Count(struct drm_device *dev, DRM_DRIVER_PRIVATE_T *dev_priv)
++{
++ u32 VBP_count;
++ u32 VBE, VSE;
++
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_ONLY_IF_ON)) {
++ VBE = (REG_READ(VBLANK_A) & 0xffff0000) >> 16;
++ VSE = (REG_READ(VSYNC_A) & 0xffff0000) >> 16;
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ } else {
++ VBE = (dev_priv->saveVBLANK_A & 0xffff0000) >> 16;
++ VSE = (dev_priv->saveVSYNC_A & 0xffff0000) >> 16;
++ }
++
++ /* Get the hsync pulse width */
++ VBP_count = VBE - VSE;
++
++ if (VBP_count < 2) /* minimum value of 2 */
++ VBP_count = 2;
++
++ return VBP_count;
++}
++/* ************************************************************************* *\
++ * FUNCTION: GetVFP_Count
++ *
++ * DESCRIPTION: Shows the vertical front porch value in terms of lines.
++ *
++\* ************************************************************************* */
++static u32 GetVFP_Count(struct drm_device *dev, DRM_DRIVER_PRIVATE_T *dev_priv)
++{
++ u32 VFP_count;
++ u32 VBS, VSS;
++
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_ONLY_IF_ON)) {
++ VBS = REG_READ(VBLANK_A) & 0xffff;
++ VSS = REG_READ(VSYNC_A) & 0xffff;
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ } else {
++ VBS = dev_priv->saveVBLANK_A & 0xffff;
++ VSS = dev_priv->saveVSYNC_A & 0xffff;
++ }
++
++ /* Get the hsync pulse width */
++ VFP_count = VSS - VBS;
++
++ if (VFP_count < 2) /* minimum value of 2 */
++ VFP_count = 2;
++
++ return VFP_count;
++}
++
++#if 0
++/* ************************************************************************* *\
++FUNCTION: GetHighLowSwitchCount
++
++DESCRIPTION: High speed to low power or Low power to high speed switching time
++ in terms byte clock (txbyteclkhs). This value is based on the
++ byte clock (txbyteclkhs) and low power clock frequency (txclkesc)
++
++Typical value - Number of byte clocks required to switch from low power mode
++ to high speed mode after "txrequesths" is asserted.
++
++The worst count value among the low to high or high to low switching time
++ in terms of txbyteclkhs has to be programmed in this register.
++
++Usefull Formulae:
++ DDR clock period = 2 times UI
++ txbyteclkhs clock = 8 times UI
++ Tlpx = 1 / txclkesc
++ CALCULATION OF LOW POWER TO HIGH SPEED SWITCH COUNT VALUE
++ (from Standard D-PHY spec)
++
++ LP01 + LP00 + HS0 = 1Tlpx + 1Tlpx + 3Tlpx [Approx] +
++ 1DDR clock [2UI] + 1txbyteclkhs clock [8UI]
++
++ CALCULATION OF HIGH SPEED TO LOW POWER SWITCH COUNT VALUE
++ (from Standard D-PHY spec)
++
++ Ths-trail = 1txbyteclkhs clock [8UI] +
++ 5DDR clock [10UI] + 4 Tlpx [Approx]
++\* ************************************************************************* */
++static u32 GetHighLowSwitchCount(DRM_DRIVER_PRIVATE_T *dev_priv)
++{
++ u32 HighLowSwitchCount, HighToLowSwitchCount, LowToHighSwitchCount;
++
++/* ************************************************************************* *\
++CALCULATION OF HIGH SPEED TO LOW POWER SWITCH COUNT VALUE
++(from Standard D-PHY spec)
++
++Ths-trail = 1txbyteclkhs clock [8UI] + 5DDR clock [10UI] + 4 Tlpx [Approx]
++
++Tlpx = 50 ns, Using max txclkesc (20MHz)
++
++txbyteclkhs_period = 4000 / dev_priv->DDR_Clock; in ns
++UI_period = 500 / dev_priv->DDR_Clock; in ns
++
++HS_to_LP = Ths-trail = 18 * UI_period + 4 * Tlpx
++ = 9000 / dev_priv->DDR_Clock + 200;
++
++HighToLowSwitchCount = HS_to_LP / txbyteclkhs_period
++ = (9000 / dev_priv->DDR_Clock + 200) / (4000 / dev_priv->DDR_Clock)
++ = (9000 + (200 * dev_priv->DDR_Clock)) / 4000
++
++\* ************************************************************************* */
++ HighToLowSwitchCount = (9000 + (200 * dev_priv->DDR_Clock)) / 4000 + 1;
++
++/* ************************************************************************* *\
++CALCULATION OF LOW POWER TO HIGH SPEED SWITCH COUNT VALUE
++(from Standard D-PHY spec)
++
++LP01 + LP00 + HS0 = 1Tlpx + 1Tlpx + 3Tlpx [Approx] +
++1DDR clock [2UI] + 1txbyteclkhs clock [8UI]
++
++ LP_to_HS = 10 * UI_period + 5 * Tlpx =
++ = 5000 / dev_priv->DDR_Clock + 250;
++
++ LowToHighSwitchCount = LP_to_HS / txbyteclkhs_period
++ = (5000 / dev_priv->DDR_Clock + 250) /
++ (4000 / dev_priv->DDR_Clock)
++
++ = (5000 + (250 * dev_priv->DDR_Clock)) / 4000
++
++\* ************************************************************************* */
++ LowToHighSwitchCount = (5000 + (250 * dev_priv->DDR_Clock)) / 4000 + 1;
++
++ if (HighToLowSwitchCount > LowToHighSwitchCount)
++ HighLowSwitchCount = HighToLowSwitchCount;
++ else
++ HighLowSwitchCount = LowToHighSwitchCount;
++
++ /* FIXME jliu need to fine tune the above formulae and remove the
++ * following after power on */
++ if (HighLowSwitchCount < 0x1f)
++ HighLowSwitchCount = 0x1f;
++
++ return HighLowSwitchCount;
++}
++
++/* ************************************************************************* *\
++FUNCTION: mrst_gen_long_write
++DESCRIPTION:
++\* ************************************************************************* */
++static void mrst_gen_long_write(struct drm_device *dev,
++ u32 *data,
++ u16 wc,
++ u8 vc)
++{
++ u32 gen_data_reg = HS_GEN_DATA_REG;
++ u32 gen_ctrl_reg = HS_GEN_CTRL_REG;
++ u32 date_full_bit = HS_DATA_FIFO_FULL;
++ u32 control_full_bit = HS_CTRL_FIFO_FULL;
++ u16 wc_saved = wc;
++
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 enter mrst_gen_long_write \n");
++#endif /* PRINT_JLIU7 */
++
++ /* sanity check */
++ if (vc > 4) {
++ DRM_ERROR
++ (KERN_ERR "MIPI Virtual channel Can't greater than 4.\n");
++ return;
++ }
++
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_FORCE_POWER_ON))
++ return;
++
++ if (0) { /* FIXME JLIU7 check if it is in LP*/
++ gen_data_reg = LP_GEN_DATA_REG;
++ gen_ctrl_reg = LP_GEN_CTRL_REG;
++ date_full_bit = LP_DATA_FIFO_FULL;
++ control_full_bit = LP_CTRL_FIFO_FULL;
++ }
++
++ while (wc >= 4) {
++ /* Check if MIPI IP generic data fifo is not full */
++ while ((REG_READ(GEN_FIFO_STAT_REG) & date_full_bit)
++ == date_full_bit) {
++ /* Do Nothing Here */
++ /* This will make checkpatch work */
++ }
++
++ /* write to data buffer */
++ REG_WRITE(gen_data_reg, *data);
++
++ wc -= 4;
++ data++;
++ }
++
++ switch (wc) {
++ case 1:
++ REG_WRITE8(gen_data_reg, *((u8 *)data));
++ break;
++ case 2:
++ REG_WRITE16(gen_data_reg, *((u16 *)data));
++ break;
++ case 3:
++ REG_WRITE16(gen_data_reg, *((u16 *)data));
++ data = (u32 *)((u8 *) data + 2);
++ REG_WRITE8(gen_data_reg, *((u8 *)data));
++ break;
++ }
++
++ /* Check if MIPI IP generic control fifo is not full */
++ while ((REG_READ(GEN_FIFO_STAT_REG) & control_full_bit)
++ == control_full_bit) {
++ /* Do Nothing Here */
++ /* This will make Checkpatch work */
++ }
++ /* write to control buffer */
++ REG_WRITE(gen_ctrl_reg, 0x29 | (wc_saved << 8) | (vc << 6));
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++}
++
++/* ************************************************************************* *\
++FUNCTION: mrst_init_HIMAX_MIPI_bridge
++DESCRIPTION:
++\* ************************************************************************* */
++static void mrst_init_HIMAX_MIPI_bridge(struct drm_device *dev)
++{
++ u32 gen_data[2];
++ u16 wc = 0;
++ u8 vc = 0;
++ u32 gen_data_intel = 0x200105;
++
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 enter mrst_init_HIMAX_MIPI_bridge \n");
++#endif /* PRINT_JLIU7 */
++
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_FORCE_POWER_ON))
++ return;
++
++ /* exit sleep mode */
++ wc = 0x5;
++ gen_data[0] = gen_data_intel | (0x11 << 24);
++ gen_data[1] = 0;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* set_pixel_format */
++ gen_data[0] = gen_data_intel | (0x3A << 24);
++ gen_data[1] = 0x77;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* Set resolution for (800X480) */
++ wc = 0x8;
++ gen_data[0] = gen_data_intel | (0x2A << 24);
++ gen_data[1] = 0x1F030000;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[0] = gen_data_intel | (0x2B << 24);
++ gen_data[1] = 0xDF010000;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* System control */
++ wc = 0x6;
++ gen_data[0] = gen_data_intel | (0xEE << 24);
++ gen_data[1] = 0x10FA;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* INPUT TIMING FOR TEST PATTERN(800X480) */
++ /* H-size */
++ gen_data[1] = 0x2000;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0x0301;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* V-size */
++ gen_data[1] = 0xE002;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0x0103;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* H-total */
++ gen_data[1] = 0x2004;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0x0405;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* V-total */
++ gen_data[1] = 0x0d06;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0x0207;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* H-blank */
++ gen_data[1] = 0x0308;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0x0009;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* H-blank */
++ gen_data[1] = 0x030A;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0x000B;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* H-start */
++ gen_data[1] = 0xD80C;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0x000D;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* V-start */
++ gen_data[1] = 0x230E;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0x000F;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* RGB domain */
++ gen_data[1] = 0x0027;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* INP_FORM Setting */
++ /* set_1 */
++ gen_data[1] = 0x1C10;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* set_2 */
++ gen_data[1] = 0x0711;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* set_3 */
++ gen_data[1] = 0x0012;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* set_4 */
++ gen_data[1] = 0x0013;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* set_5 */
++ gen_data[1] = 0x2314;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* set_6 */
++ gen_data[1] = 0x0015;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* set_7 */
++ gen_data[1] = 0x2316;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* set_8 */
++ gen_data[1] = 0x0017;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* set_1 */
++ gen_data[1] = 0x0330;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* FRC Setting */
++ /* FRC_set_2 */
++ gen_data[1] = 0x237A;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* FRC_set_3 */
++ gen_data[1] = 0x4C7B;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* FRC_set_4 */
++ gen_data[1] = 0x037C;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* FRC_set_5 */
++ gen_data[1] = 0x3482;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* FRC_set_7 */
++ gen_data[1] = 0x1785;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++#if 0
++ /* FRC_set_8 */
++ gen_data[1] = 0xD08F;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++#endif
++
++ /* OUTPUT TIMING FOR TEST PATTERN (800X480) */
++ /* out_htotal */
++ gen_data[1] = 0x2090;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0x0491;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* out_hsync */
++ gen_data[1] = 0x0392;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0x0093;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* out_hstart */
++ gen_data[1] = 0xD894;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0x0095;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* out_hsize */
++ gen_data[1] = 0x2096;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0x0397;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* out_vtotal */
++ gen_data[1] = 0x0D98;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0x0299;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* out_vsync */
++ gen_data[1] = 0x039A;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0x009B;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* out_vstart */
++ gen_data[1] = 0x239C;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0x009D;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* out_vsize */
++ gen_data[1] = 0xE09E;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0x019F;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* FRC_set_6 */
++ gen_data[1] = 0x9084;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* Other setting */
++ gen_data[1] = 0x0526;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* RBG domain */
++ gen_data[1] = 0x1177;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* rgbw */
++ /* set_1 */
++ gen_data[1] = 0xD28F;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* set_2 */
++ gen_data[1] = 0x02D0;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* set_3 */
++ gen_data[1] = 0x08D1;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* set_4 */
++ gen_data[1] = 0x05D2;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* set_5 */
++ gen_data[1] = 0x24D4;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* set_6 */
++ gen_data[1] = 0x00D5;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0x02D7;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0x00D8;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ gen_data[1] = 0x48F3;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0xD4F2;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0x3D8E;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0x60FD;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0x00B5;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0x48F4;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* inside patten */
++ gen_data[1] = 0x0060;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++}
++#endif
++
++static void mrst_wait_for_LP_CTRL_FIFO(struct drm_device *dev)
++{
++ int timeout = 0;
++ udelay(500);
++
++ /* This will time out after approximately 2+ seconds */
++ while ((timeout < 20000) && (REG_READ(GEN_FIFO_STAT_REG) &
++ LP_CTRL_FIFO_FULL)) {
++ udelay(100);
++ timeout++;
++ }
++
++ if (timeout == 20000)
++ DRM_INFO("MIPI: LP CMD FIFO was never cleared!\n");
++}
++
++static void mrst_wait_for_HS_DATA_FIFO(struct drm_device *dev)
++{
++ int timeout = 0;
++ udelay(500);
++
++ /* This will time out after approximately 2+ seconds */
++ while ((timeout < 20000) && (REG_READ(GEN_FIFO_STAT_REG) &
++ HS_DATA_FIFO_FULL)) {
++ udelay(100);
++ timeout++;
++ }
++
++ if (timeout == 20000)
++ DRM_INFO("MIPI: HS Data FIFO was never cleared!\n");
++}
++
++static void mrst_wait_for_HS_CTRL_FIFO(struct drm_device *dev)
++{
++ int timeout = 0;
++ udelay(500);
++
++ /* This will time out after approximately 2+ seconds */
++ while ((timeout < 20000) && (REG_READ(GEN_FIFO_STAT_REG) &
++ HS_CTRL_FIFO_FULL)) {
++ udelay(100);
++ timeout++;
++ }
++ if (timeout == 20000)
++ DRM_INFO("MIPI: HS CMD FIFO was never cleared!\n");
++}
++
++/* ************************************************************************* *\
++FUNCTION: mrst_init_NSC_MIPI_bridge
++DESCRIPTION: This function is called only by mrst_dsi_mode_set and
++ restore_display_registers. since this function does not
++ acquire the mutex, it is important that the calling function
++ does!
++\* ************************************************************************* */
++void mrst_init_NSC_MIPI_bridge(struct drm_device *dev)
++{
++
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++
++ DRM_INFO("Enter mrst_init_NSC_MIPI_bridge.\n");
++
++ /* Program MIPI IP to 100MHz DSI, Non-Burst mode with sync event,
++ 2 Data Lanes */
++
++ mrst_wait_for_LP_CTRL_FIFO(dev);
++ /* enable RGB24*/
++ REG_WRITE(LP_GEN_CTRL_REG, 0x003205e3);
++
++ mrst_wait_for_LP_CTRL_FIFO(dev);
++ /* enable all error reporting*/
++ REG_WRITE(LP_GEN_CTRL_REG, 0x000040e3);
++ mrst_wait_for_LP_CTRL_FIFO(dev);
++ REG_WRITE(LP_GEN_CTRL_REG, 0x000041e3);
++
++ mrst_wait_for_LP_CTRL_FIFO(dev);
++ /* enable 2 data lane; video shaping & error reporting */
++ REG_WRITE(LP_GEN_CTRL_REG, 0x00a842e3); /* 0x006842e3 for 1 data lane */
++
++ mrst_wait_for_LP_CTRL_FIFO(dev);
++ /* HS timeout */
++ REG_WRITE(LP_GEN_CTRL_REG, 0x009243e3);
++
++ mrst_wait_for_LP_CTRL_FIFO(dev);
++ /* setle = 6h; low power timeout = ((2^21)-1)*4TX_esc_clks. */
++ REG_WRITE(LP_GEN_CTRL_REG, 0x00e645e3);
++
++ mrst_wait_for_LP_CTRL_FIFO(dev);
++ /* enable all virtual channels */
++ REG_WRITE(LP_GEN_CTRL_REG, 0x000f46e3);
++
++ mrst_wait_for_LP_CTRL_FIFO(dev);
++ /* set output strength to low-drive */
++ REG_WRITE(LP_GEN_CTRL_REG, 0x00007de3);
++
++ mrst_wait_for_LP_CTRL_FIFO(dev);
++ if (dev_priv->sku_83) {
++ /* set escape clock to divede by 8 */
++ REG_WRITE(LP_GEN_CTRL_REG, 0x000044e3);
++ } else if (dev_priv->sku_100L) {
++ /* set escape clock to divede by 16 */
++ REG_WRITE(LP_GEN_CTRL_REG, 0x001044e3);
++ } else if (dev_priv->sku_100) {
++ /* set escape clock to divede by 32*/
++ /*REG_WRITE(LP_GEN_CTRL_REG, 0x003044e3);*/
++ REG_WRITE(LP_GEN_CTRL_REG, 0x001044e3);
++
++ /*mrst_wait_for_LP_CTRL_FIFO(dev);*/
++ /* setle = 6h; low power timeout = ((2^21)-1)*4TX_esc_clks. */
++ /*REG_WRITE(LP_GEN_CTRL_REG, 0x00ec45e3);*/
++ }
++
++ mrst_wait_for_LP_CTRL_FIFO(dev);
++ /* CFG_VALID=1; RGB_CLK_EN=1. */
++ REG_WRITE(LP_GEN_CTRL_REG, 0x00057fe3);
++
++ /*ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);*/
++}
++
++static int mrst_check_mipi_error(struct drm_device *dev)
++{
++ u32 int_status_reg = 0;
++ u32 relevant_error_bits = 0x0fff; /* only care about error bits 0-11 */
++ u32 reported_errors = 0;
++
++ mrst_wait_for_LP_CTRL_FIFO(dev);
++ REG_WRITE(LP_GEN_CTRL_REG, 0x010524); /* 2-parameter gen short read */
++
++ /* sleep 100 microseconds */
++ udelay(100);
++
++ int_status_reg = REG_READ(INTR_STAT_REG);
++ printk(KERN_ALERT "MIPI Intr Status Reg: 0x%X\n", int_status_reg);
++
++ reported_errors = int_status_reg & relevant_error_bits;
++ if (reported_errors) {
++ printk(KERN_ALERT "MIPI Init sequence reported errs: 0x%X\n",
++ reported_errors);
++ /* Clear the error bits */
++ REG_WRITE(INTR_STAT_REG, reported_errors);
++ return reported_errors;
++ }
++
++ return 0;
++}
++
++/* ************************************************************************* *\
++ * FUNCTION: mrst_init_TPO_MIPI
++ *
++ * DESCRIPTION: This function is called only by mrst_dsi_mode_set and
++ * restore_display_registers. since this function does not
++ * acquire the mutex, it is important that the calling function
++ * does!
++\* ************************************************************************* */
++void mrst_init_TPO_MIPI(struct drm_device *dev)
++{
++ /*DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;*/
++
++ DRM_INFO("Enter mrst init TPO MIPI display.\n");
++
++ /* Flip page order */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x00008036);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(0xb070, 0x00000229);
++
++ /* 0xF0 */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x005a5af0);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(0xb070, 0x00000329);
++
++ /* Write protection key */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x005a5af1);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(0xb070, 0x00000329);
++
++ /* 0xFC */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x005a5afc);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(0xb070, 0x00000329);
++
++ /* 0xB7 */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x770000b7);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x00000044);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(0xb070, 0x00000529);
++
++ /* 0xB6 */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x000a0ab6);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(0xb070, 0x00000329);
++
++ /* 0xF2 */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x081010f2);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x4a070708);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x000000c5);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(0xb070, 0x00000929);
++
++ /* 0xF8 */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x024003f8);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x01030a04);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x0e020220);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x00000004);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(0xb070, 0x00000d29);
++
++ /* 0xE2 */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x398fc3e2);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x0000916f);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(0xb070, 0x00000629);
++
++ /* 0xB0 */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x000000b0);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(0xb070, 0x00000229);
++
++ /* 0xF4 */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x240242f4);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x78ee2002);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x2a071050);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x507fee10);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x10300710);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(0xb070, 0x00001429);
++
++ /* 0xBA */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x19fe07ba);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x101c0a31);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x00000010);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(0xb070, 0x00000929);
++
++ /* 0xBB */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x28ff07bb);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x24280a31);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x00000034);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(0xb070, 0x00000929);
++
++ /* 0xFB */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x535d05fb);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x1b1a2130);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x221e180e);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x131d2120);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x535d0508);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x1c1a2131);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x231f160d);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x111b2220);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x535c2008);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x1f1d2433);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x2c251a10);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x2c34372d);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x00000023);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(0xb070, 0x00003129);
++
++ /* 0xFA */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x525c0bfa);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x1c1c232f);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x2623190e);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x18212625);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x545d0d0e);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x1e1d2333);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x26231a10);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x1a222725);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x545d280f);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x21202635);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x31292013);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x31393d33);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x00000029);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(0xb070, 0x00003129);
++
++ /* Set DM */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x000100f7);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(0xb070, 0x00000329);
++}
++
++static void panel_reset_on(void)
++{
++ struct ipc_pmic_reg_data tmp_reg = {0};
++
++ tmp_reg.ioc = 1;
++ tmp_reg.num_entries = 1;
++
++ tmp_reg.pmic_reg_data[0].register_address = 0xf4;
++ if (ipc_pmic_register_read(&tmp_reg)) {
++ printk(KERN_WARNING "pnl_rst_on: fail to read pmic 0xf4!\n");
++ return;
++ }
++ tmp_reg.pmic_reg_data[0].value &= 0xbf;
++
++ if (ipc_pmic_register_write(&tmp_reg, TRUE))
++ printk(KERN_WARNING "pnl_rst_on: fail to write pmic 0xe6!\n");
++}
++
++static void panel_reset_off(void)
++{
++ struct ipc_pmic_reg_data tmp_reg = {0};
++
++ printk(KERN_INFO "panel_reset_off\n");
++
++ tmp_reg.ioc = 1;
++ tmp_reg.num_entries = 1;
++
++ tmp_reg.pmic_reg_data[0].register_address = 0xf4;
++ if (ipc_pmic_register_read(&tmp_reg)) {
++ printk(KERN_WARNING "pnl_rst_off: fail to read pmic 0xf4!\n");
++ return;
++ }
++ tmp_reg.pmic_reg_data[0].value |= 0x40;
++
++ if (ipc_pmic_register_write(&tmp_reg, TRUE))
++ printk(KERN_WARNING "pnl_rst_off: fail to write pmic 0xe6!\n");
++}
++
++static void panel_reset(void)
++{
++ printk(KERN_INFO "panel_reset\n");
++
++ panel_reset_on();
++ msleep(20);
++ panel_reset_off();
++ msleep(20);
++}
++
++/* ************************************************************************* *\
++ * FUNCTION: mrst_init_LGE_MIPI
++ *
++ * DESCRIPTION: This function is called only by mrst_dsi_mode_set and
++ * restore_display_registers. since this function does not
++ * acquire the mutex, it is important that the calling function
++ * does!
++\* ************************************************************************* */
++void mrst_init_LGE_MIPI(struct drm_device *dev)
++{
++ /*DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;*/
++ int i = 0;
++
++ DRM_INFO("Enter mrst init LGE MIPI display.\n");
++
++ mrst_wait_for_LP_CTRL_FIFO(dev);
++ REG_WRITE(0xb06c, 0x00870123);
++
++ /* LGE 480x1024 Panel Initialization sequence */
++ for (i = 0; i < 10; i++) {
++ /* Panel Characteristics Settings */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0xb2200105);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x0ec820);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(HS_GEN_CTRL_REG, 0x29 | 0x7 << 8 | 0x0 << 6);
++
++ /* Panel Driver Setting */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0xb3200105);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x02);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(HS_GEN_CTRL_REG, 0x29 | 0x5 << 8 | 0x0 << 6);
++
++ /* Display Mode Control */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0xb4200105);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x00);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(HS_GEN_CTRL_REG, 0x29 | 0x5 << 8 | 0x0 << 6);
++
++ /* Display Mode and Frame Memory write Mode Setting */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0xb5200105);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x000f0f12);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x00);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(HS_GEN_CTRL_REG, 0x29 | 0x9 << 8 | 0x0 << 6);
++
++ /* Display Control (GIP Specific) */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0xb6200105);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x40021803);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x3010);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(HS_GEN_CTRL_REG, 0x29 | 0xa << 8 | 0x0 << 6);
++
++ /* Power Setting */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0xc0200105);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x1f01);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(HS_GEN_CTRL_REG, 0x29 | 0x6 << 8 | 0x0 << 6);
++
++ /* Power Setting */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0xc3200105);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x03040407);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x07);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(HS_GEN_CTRL_REG, 0x29 | 0x9 << 8 | 0x0 << 6);
++
++ /* */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0xc4200105);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x15154412);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x6d04);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(HS_GEN_CTRL_REG, 0x29 | 0xa << 8 | 0x0 << 6);
++
++ /* */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0xc5200105);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x64);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(HS_GEN_CTRL_REG, 0x29 | 0x5 << 8 | 0x0 << 6);
++
++ /* */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0xc6200105);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x004024);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(HS_GEN_CTRL_REG, 0x29 | 0x7 << 8 | 0x0 << 6);
++
++ /* red */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0xd0200105);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x06774701);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x00200000);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x02);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(HS_GEN_CTRL_REG, 0x29 | 0xd << 8 | 0x0 << 6);
++
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0xd1200105);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x06774701);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x00200000);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x02);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(HS_GEN_CTRL_REG, 0x29 | 0xd << 8 | 0x0 << 6);
++
++ /* green */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0xd2200105);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x06774701);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x00200000);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x02);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(HS_GEN_CTRL_REG, 0x29 | 0xd << 8 | 0x0 << 6);
++
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0xd3200105);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x06774701);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x00200000);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x02);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(HS_GEN_CTRL_REG, 0x29 | 0xd << 8 | 0x0 << 6);
++
++ /* blue */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0xd4200105);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x06774701);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x00200000);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x02);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(HS_GEN_CTRL_REG, 0x29 | 0xd << 8 | 0x0 << 6);
++
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0xd5200105);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x06774701);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x00200000);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x02);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(HS_GEN_CTRL_REG, 0x29 | 0xd << 8 | 0x0 << 6);
++
++ if (!mrst_check_mipi_error(dev)) {
++ i = 0;
++ break;
++ }
++ }
++
++ for (i = 0; i < 10; i++) {
++ /* Sleep Out */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x11200105);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(HS_GEN_CTRL_REG, 0x29 | 0x4 << 8 | 0x0 << 6);
++
++ if (!mrst_check_mipi_error(dev)) {
++ i = 0;
++ break;
++ }
++ }
++
++ udelay(10000);
++
++ for (i = 0; i < 10; i++) {
++ /* Display On */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x29200105);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(HS_GEN_CTRL_REG, 0x29 | 0x4 << 8 | 0x0 << 6);
++
++ if (!mrst_check_mipi_error(dev)) {
++ i = 0;
++ break;
++ }
++ }
++
++ /*ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);*/
++}
++
++/*enum mipi_panel_type {
++ NSC_800X480 = 0,
++ LGE_480X1024 = 1,
++ TPO_864X480 = 2
++};*/
++
++static void mrst_dsi_mode_set(struct drm_encoder *encoder,
++ struct drm_display_mode *mode,
++ struct drm_display_mode *adjusted_mode)
++{
++ struct psb_intel_mode_device *mode_dev =
++ enc_to_psb_intel_output(encoder)->mode_dev;
++ struct drm_device *dev = encoder->dev;
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++ u32 dsiFuncPrgValue = 0;
++ u32 SupportedFormat = 0;
++ u32 channelNumber = 0;
++ u32 DBI_dataWidth = 0;
++ u32 resolution = 0;
++ u32 mipi_control_val = 0;
++ u32 intr_en_val = 0;
++ u32 turnaround_timeout_val = 0;
++ u32 device_reset_val = 0;
++ u32 init_count_val = 0;
++ u32 hs_tx_timeout_val = 0;
++ u32 lp_rx_timeout_val = 0;
++ u32 high_low_switch_count_val = 0;
++ u32 eot_disable_val = 0;
++ u32 lp_byteclk_val = 0;
++ u32 device_ready_val = 0;
++ /*u32 dpi_control_val = 0;*/
++ u32 vsa_count = 0;
++ u32 vbp_count = 0;
++ u32 vfp_count = 0;
++ u32 hsa_count = 0;
++ u32 hbp_count = 0;
++ u32 hfp_count = 0;
++ u32 haa_count = 0;
++ u32 video_mode_format = 0;
++ u32 max_ret_packet_size = 0;
++ uint64_t curValue = DRM_MODE_SCALE_FULLSCREEN;
++ /*enum mipi_panel_type panel_make;*/
++ u32 mipi_port, tmp_VAL;
++
++ DRM_INFO("enter mrst_dsi_mode_set \n");
++#if 0
++ switch (dev_priv->gct_data.bpi) {
++ case 1:
++ panel_make = NSC_800X480;
++ break;
++ case 2:
++ panel_make = TPO_864X480;
++ break;
++ case 3:
++ panel_make = LGE_480X1024;
++ break;
++ default:
++ DRM_INFO("MIPI: unknown panel type! Setting NSC.\n");
++ panel_make = NSC_800X480; /* assume NSC */
++ }
++
++ /* Force TPO for Aava testing */
++ panel_make = TPO_864X480;
++#endif
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_FORCE_POWER_ON))
++ return;
++
++ switch (dev_priv->bpp) {
++ case 16:
++ SupportedFormat = RGB_565_FMT;
++ break;
++ case 18:
++ SupportedFormat = RGB_666_FMT;
++ break;
++ case 24:
++ SupportedFormat = RGB_888_FMT;
++ break;
++ default:
++ DRM_INFO("mrst_dsi_mode_set, invalid bpp \n");
++ break;
++ }
++
++
++ if (dev_priv->dpi) {
++ drm_connector_property_get_value(
++ &enc_to_psb_intel_output(encoder)->base,
++ dev->mode_config.scaling_mode_property,
++ &curValue);
++
++ if (curValue == DRM_MODE_SCALE_CENTER)
++ REG_WRITE(PFIT_CONTROL, 0);
++ else if (curValue == DRM_MODE_SCALE_ASPECT) {
++ if ((mode->vdisplay != adjusted_mode->crtc_vdisplay) ||
++ (mode->hdisplay != adjusted_mode->crtc_hdisplay)) {
++ if ((adjusted_mode->crtc_hdisplay *
++ mode->vdisplay) == (mode->hdisplay *
++ adjusted_mode->crtc_vdisplay))
++ REG_WRITE(PFIT_CONTROL, PFIT_ENABLE);
++ else if ((adjusted_mode->crtc_hdisplay *
++ mode->vdisplay) > (mode->hdisplay *
++ adjusted_mode->crtc_vdisplay))
++ REG_WRITE(PFIT_CONTROL, PFIT_ENABLE |
++ PFIT_SCALING_MODE_PILLARBOX);
++ else
++ REG_WRITE(PFIT_CONTROL, PFIT_ENABLE |
++ PFIT_SCALING_MODE_LETTERBOX);
++ } else
++ REG_WRITE(PFIT_CONTROL, PFIT_ENABLE);
++ } else /*(curValue == DRM_MODE_SCALE_FULLSCREEN)*/
++ REG_WRITE(PFIT_CONTROL, PFIT_ENABLE);
++
++ switch (dev_priv->panel_make) {
++ case NSC_800X480:
++ intr_en_val = 0xffffffff;
++ turnaround_timeout_val = 0x00000001;
++ device_reset_val = 0x000000ff;
++ init_count_val = 0x00000fff;
++ resolution = dev_priv->HactiveArea |
++ (dev_priv->VactiveArea << RES_V_POS);
++ SupportedFormat <<= FMT_DPI_POS;
++ dsiFuncPrgValue = dev_priv->laneCount | SupportedFormat;
++ vsa_count = GetVSA_Count(dev, dev_priv);
++ vbp_count = GetVBP_Count(dev, dev_priv);
++ vfp_count = GetVFP_Count(dev, dev_priv);
++ hsa_count = GetHSA_Count(dev, dev_priv);
++ hbp_count = GetHBP_Count(dev, dev_priv);
++ hfp_count = GetHFP_Count(dev, dev_priv);
++ haa_count = GetHAdr_Count(dev, dev_priv);
++ video_mode_format = dev_priv->videoModeFormat;
++ hs_tx_timeout_val = 0x00001000;
++ lp_rx_timeout_val = 0x0000ffff;
++ high_low_switch_count_val = 0x46;
++ eot_disable_val = 0x00000000;
++ lp_byteclk_val = 0x00000004;
++ device_ready_val = 0x00000001;
++ max_ret_packet_size = 0x40;
++ break;
++ case TPO_864X480:
++ intr_en_val = 0xffffffff;
++ turnaround_timeout_val = 0x0000000a;
++ device_reset_val = 0x000000ff;
++ init_count_val = 0x00000fff;
++ resolution = 0x01e00360;
++ dsiFuncPrgValue = 0x00000202;
++ vsa_count = 0x00000004;
++ vbp_count = 0x00000008;
++ vfp_count = 0x00000008;
++ hsa_count = 0x00000006;
++ hbp_count = 0x0000000f;
++ hfp_count = 0x0000000f;
++ haa_count = 0x00000510;
++ video_mode_format = 0x00000003;
++ hs_tx_timeout_val = 0x00090000;
++ lp_rx_timeout_val = 0x0000ffff;
++ high_low_switch_count_val = 0x00000046;
++ eot_disable_val = 0x00000000;
++ lp_byteclk_val = 0x00000004;
++ device_ready_val = 0x00000001;
++ max_ret_packet_size = 0x40;
++ break;
++ case LGE_480X1024:
++ intr_en_val = 0xffffffff;
++ turnaround_timeout_val = 0x00000012;
++ device_reset_val = 0x000000ff;
++ init_count_val = 0x00000fff;
++ resolution = 0x040001e0;
++ dsiFuncPrgValue = 0x00000202;
++ vsa_count = 0x00000005;
++ vbp_count = 0x0000000f;
++ vfp_count = 0x0000000f;
++ hsa_count = 0x00000008;
++ hbp_count = 0x00000018;
++ hfp_count = 0x0000000f;
++ haa_count = 0x00000320;
++ video_mode_format = 0x00000003;
++ hs_tx_timeout_val = 0x00ffffff;
++ lp_rx_timeout_val = 0x0000ffff;
++ high_low_switch_count_val = 0x00000016;
++ eot_disable_val = 0x00000000;
++ lp_byteclk_val = 0x00000004;
++ device_ready_val = 0x00000001;
++ max_ret_packet_size = 0x40;
++ break;
++ }
++
++ /* set 100 mhz dsi clk based on sku */
++ if (dev_priv->sku_83)
++ mipi_control_val = 0x0018; /* 100 mhz * 1 = 100 mhz */
++ else if (dev_priv->sku_100L)
++ mipi_control_val = 0x0019; /* 50 mhz * 2 = 100 mhz */
++ else if (dev_priv->sku_100)
++ mipi_control_val = 0x0018; /* 100 mhz * 1 = 100 mhz */
++
++ /* wait for PIPE A to disable */
++ while (REG_READ(0x70008) & 0x40000000) {
++ /* Do Nothing Here */
++ /* This should make checkpatch work */
++ }
++
++ /* wait for DPI FIFO to clear */
++ while ((REG_READ(GEN_FIFO_STAT_REG) & DPI_FIFO_EMPTY)
++ != DPI_FIFO_EMPTY) {
++ /* Do Nothing Here */
++ /* This should make checkpatch work */
++ }
++
++ /* Clear Device Ready Bit */
++ REG_WRITE(DEVICE_READY_REG, 0x00000000);
++
++ /* clear intr status register */
++ tmp_VAL = REG_READ(INTR_STAT_REG);
++ REG_WRITE(INTR_STAT_REG, tmp_VAL);
++
++ /* Reset Aava panel */
++ if (dev_priv->panel_make == TPO_864X480) {
++ panel_reset();
++ msleep(1000);
++ }
++
++ /* Enable MIPI Port */
++ mipi_port = MIPI_PORT_EN | MIPI_BORDER_EN;
++
++ /* Enable dithering if required */
++ if (mode_dev->panel_wants_dither)
++ mipi_port |= MRST_PANEL_8TO6_DITHER_ENABLE;
++
++ REG_WRITE(MIPI, mipi_port);
++
++ /* set the lane speed */
++ REG_WRITE(MIPI_CONTROL_REG, mipi_control_val);
++
++ /* Enable all the error interrupt */
++ REG_WRITE(INTR_EN_REG, intr_en_val);
++ REG_WRITE(TURN_AROUND_TIMEOUT_REG, turnaround_timeout_val);
++ REG_WRITE(DEVICE_RESET_REG, device_reset_val);
++ REG_WRITE(INIT_COUNT_REG, init_count_val);
++
++ REG_WRITE(DSI_FUNC_PRG_REG, dsiFuncPrgValue);
++
++ REG_WRITE(DPI_RESOLUTION_REG, resolution);
++ /*REG_WRITE(DBI_RESOLUTION_REG, 0x00000000);*/
++
++ REG_WRITE(VERT_SYNC_PAD_COUNT_REG, vsa_count);
++ REG_WRITE(VERT_BACK_PORCH_COUNT_REG, vbp_count);
++ REG_WRITE(VERT_FRONT_PORCH_COUNT_REG, vfp_count);
++
++ REG_WRITE(HORIZ_SYNC_PAD_COUNT_REG, hsa_count);
++ REG_WRITE(HORIZ_BACK_PORCH_COUNT_REG, hbp_count);
++ REG_WRITE(HORIZ_FRONT_PORCH_COUNT_REG, hfp_count);
++ REG_WRITE(HORIZ_ACTIVE_AREA_COUNT_REG, haa_count);
++
++ REG_WRITE(VIDEO_FMT_REG, video_mode_format);
++
++ REG_WRITE(HS_TX_TIMEOUT_REG, hs_tx_timeout_val);
++ REG_WRITE(LP_RX_TIMEOUT_REG, lp_rx_timeout_val);
++
++ REG_WRITE(HIGH_LOW_SWITCH_COUNT_REG,
++ high_low_switch_count_val);
++
++ REG_WRITE(EOT_DISABLE_REG, eot_disable_val);
++
++ REG_WRITE(LP_BYTECLK_REG, lp_byteclk_val);
++ REG_WRITE(MAX_RET_PAK_REG, max_ret_packet_size);
++
++ REG_WRITE(DEVICE_READY_REG, device_ready_val);
++ REG_WRITE(DPI_CONTROL_REG, DPI_TURN_ON);
++ } else {
++ /* JLIU7 FIXME VIRTUAL_CHANNEL_NUMBER_1 or
++ * VIRTUAL_CHANNEL_NUMBER_0*/
++ channelNumber =
++ VIRTUAL_CHANNEL_NUMBER_1 << DBI_CHANNEL_NUMBER_POS;
++ DBI_dataWidth = DBI_DATA_WIDTH_16BIT << DBI_DATA_WIDTH_POS;
++ dsiFuncPrgValue =
++ dev_priv->laneCount | channelNumber | DBI_dataWidth;
++ /* JLIU7 FIXME */
++ SupportedFormat <<= FMT_DBI_POS;
++ dsiFuncPrgValue |= SupportedFormat;
++ REG_WRITE(DSI_FUNC_PRG_REG, dsiFuncPrgValue);
++
++ REG_WRITE(DPI_RESOLUTION_REG, 0x00000000);
++ REG_WRITE(DBI_RESOLUTION_REG, resolution);
++ }
++
++ dev_priv->dsi_device_ready = true;
++
++ if ((dev_priv->panel_make == NSC_800X480) || (dev_priv->panel_make == LGE_480X1024))
++ dev_priv->init_drvIC(dev); /* initialize the mipi panel */
++
++ /* set the dphy settings for 100 mhz */
++ REG_WRITE(0xb080, 0x0b061c04);
++
++ REG_WRITE(PIPEACONF, dev_priv->pipeconf);
++ /* REG_READ(PIPEACONF); */
++
++ /* Wait for 20ms for the pipe enable to take effect. */
++ /*udelay(20000);*/
++
++ REG_WRITE(DSPACNTR, dev_priv->dspcntr);
++
++ /* Wait for 20ms for the plane enable to take effect. */
++ /*udelay(20000);*/
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++}
++
++/**
++ * Detect the MIPI connection.
++ *
++ * This always returns CONNECTOR_STATUS_CONNECTED.
++ * This connector should only have
++ * been set up if the MIPI was actually connected anyway.
++ */
++static enum drm_connector_status mrst_dsi_detect(struct drm_connector
++ *connector)
++{
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 enter mrst_dsi_detect \n");
++#endif /* PRINT_JLIU7 */
++
++ return connector_status_connected;
++}
++
++/**
++ * Return the list of MIPI DDB modes if available.
++ */
++static int mrst_dsi_get_modes(struct drm_connector *connector)
++{
++ struct drm_device *dev = connector->dev;
++ struct psb_intel_output *psb_intel_output =
++ to_psb_intel_output(connector);
++ struct psb_intel_mode_device *mode_dev = psb_intel_output->mode_dev;
++
++/* FIXME get the MIPI DDB modes */
++
++ /* Didn't get an DDB, so
++ * Set wide sync ranges so we get all modes
++ * handed to valid_mode for checking
++ */
++ connector->display_info.min_vfreq = 0;
++ connector->display_info.max_vfreq = 200;
++ connector->display_info.min_hfreq = 0;
++ connector->display_info.max_hfreq = 200;
++
++ if (mode_dev->panel_fixed_mode != NULL) {
++ struct drm_display_mode *mode =
++ drm_mode_duplicate(dev, mode_dev->panel_fixed_mode);
++ drm_mode_probed_add(connector, mode);
++ return 1;
++ }
++
++ return 0;
++}
++
++static const struct drm_encoder_helper_funcs mrst_dsi_helper_funcs = {
++ .dpms = mrst_dsi_dpms,
++ .mode_fixup = psb_intel_lvds_mode_fixup,
++ .prepare = mrst_dsi_prepare,
++ .mode_set = mrst_dsi_mode_set,
++ .commit = mrst_dsi_commit,
++};
++
++static const struct drm_connector_helper_funcs
++ mrst_dsi_connector_helper_funcs = {
++ .get_modes = mrst_dsi_get_modes,
++ .mode_valid = psb_intel_lvds_mode_valid,
++ .best_encoder = psb_intel_best_encoder,
++};
++
++static const struct drm_connector_funcs mrst_dsi_connector_funcs = {
++ .dpms = drm_helper_connector_dpms,
++ .save = mrst_dsi_save,
++ .restore = mrst_dsi_restore,
++ .detect = mrst_dsi_detect,
++ .fill_modes = drm_helper_probe_single_connector_modes,
++ .set_property = psb_intel_lvds_set_property,
++ .destroy = psb_intel_lvds_destroy,
++};
++
++/** Returns the panel fixed mode from configuration. */
++/** FIXME JLIU7 need to revist it. */
++struct drm_display_mode *mrst_dsi_get_configuration_mode(struct drm_device *dev)
++{
++ struct drm_display_mode *mode;
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++ u8 panel_index = dev_priv->gct_data.bpi;
++ u8 panel_type = dev_priv->gct_data.pt;
++ struct mrst_timing_info *ti = &dev_priv->gct_data.DTD;
++ bool use_gct = false;
++
++ DRM_INFO("Enter mrst_dsi_get_configuration_mode\n");
++
++ mode = kzalloc(sizeof(*mode), GFP_KERNEL);
++ if (!mode)
++ return NULL;
++
++ if (dev_priv->vbt_data.Size != 0x00) /*if non-zero, vbt is present*/
++ if ((1<<panel_index) & panel_type) /* if non-zero,*/
++ use_gct = true; /*then mipi panel.*/
++
++ if (use_gct) {
++ mode->hdisplay = (ti->hactive_hi << 8) | ti->hactive_lo;
++ mode->vdisplay = (ti->vactive_hi << 8) | ti->vactive_lo;
++ mode->hsync_start = mode->hdisplay + \
++ ((ti->hsync_offset_hi << 8) | \
++ ti->hsync_offset_lo);
++ mode->hsync_end = mode->hsync_start + \
++ ((ti->hsync_pulse_width_hi << 8) | \
++ ti->hsync_pulse_width_lo);
++ mode->htotal = mode->hdisplay + ((ti->hblank_hi << 8) | \
++ ti->hblank_lo);
++ mode->vsync_start = \
++ mode->vdisplay + ((ti->vsync_offset_hi << 4) | \
++ ti->vsync_offset_lo);
++ mode->vsync_end = \
++ mode->vsync_start + ((ti->vsync_pulse_width_hi << 4) | \
++ ti->vsync_pulse_width_lo);
++ mode->vtotal = mode->vdisplay + \
++ ((ti->vblank_hi << 8) | ti->vblank_lo);
++ mode->clock = ti->pixel_clock * 10;
++#if 1
++ printk(KERN_INFO "hdisplay is %d\n", mode->hdisplay);
++ printk(KERN_INFO "vdisplay is %d\n", mode->vdisplay);
++ printk(KERN_INFO "HSS is %d\n", mode->hsync_start);
++ printk(KERN_INFO "HSE is %d\n", mode->hsync_end);
++ printk(KERN_INFO "htotal is %d\n", mode->htotal);
++ printk(KERN_INFO "VSS is %d\n", mode->vsync_start);
++ printk(KERN_INFO "VSE is %d\n", mode->vsync_end);
++ printk(KERN_INFO "vtotal is %d\n", mode->vtotal);
++ printk(KERN_INFO "clock is %d\n", mode->clock);
++#endif
++
++ } else {
++
++#if 0 /* LGE 480x1024 tentative timings */
++ mode->hdisplay = 480;
++ mode->vdisplay = 1024;
++ mode->hsync_start = 499;
++ mode->hsync_end = 506;
++ mode->htotal = 517;
++ mode->vsync_start = 1039;
++ mode->vsync_end = 1041;
++ mode->vtotal = 1047;
++ mode->clock = 33264;
++#endif
++#if 1 /*FIXME jliu7 remove it later */
++ /* copy from SV - hard coded fixed mode for
++ * DSI TPO TD043MTEA2 LCD panel */
++ mode->hdisplay = 864;
++ mode->vdisplay = 480;
++ mode->hsync_start = 873;
++ mode->hsync_end = 876;
++ mode->htotal = 887;
++ mode->vsync_start = 487;
++ mode->vsync_end = 490;
++ mode->vtotal = 499;
++ mode->clock = 33264;
++#endif /*FIXME jliu7 remove it later */
++
++#if 0 /*FIXME jliu7 remove it later */
++ /* hard coded fixed mode for DSI TPO TD043MTEA2 LCD panel */
++ mode->hdisplay = 800;
++ mode->vdisplay = 480;
++ mode->hsync_start = 836;
++ mode->hsync_end = 846;
++ mode->htotal = 1056;
++ mode->vsync_start = 489;
++ mode->vsync_end = 491;
++ mode->vtotal = 525;
++ mode->clock = 33264;
++#endif /*FIXME jliu7 remove it later */
++
++#if 0 /*FIXME jliu7 remove it later */
++ /* hard coded fixed mode for LVDS 800x480 */
++ mode->hdisplay = 800;
++ mode->vdisplay = 480;
++ mode->hsync_start = 801;
++ mode->hsync_end = 802;
++ mode->htotal = 1024;
++ mode->vsync_start = 481;
++ mode->vsync_end = 482;
++ mode->vtotal = 525;
++ mode->clock = 30994;
++#endif /*FIXME jliu7 remove it later */
++
++#if 0 /*FIXME jliu7 remove it later, jliu7 modify it according to the spec*/
++ /* hard coded fixed mode for Samsung 480wsvga LVDS 1024x600@75 */
++ mode->hdisplay = 1024;
++ mode->vdisplay = 600;
++ mode->hsync_start = 1072;
++ mode->hsync_end = 1104;
++ mode->htotal = 1184;
++ mode->vsync_start = 603;
++ mode->vsync_end = 604;
++ mode->vtotal = 608;
++ mode->clock = 53990;
++#endif /*FIXME jliu7 remove it later */
++
++#if 0 /*FIXME jliu7 remove it, it is copied from SBIOS */
++ /* hard coded fixed mode for Samsung 480wsvga LVDS 1024x600@75 */
++ mode->hdisplay = 1024;
++ mode->vdisplay = 600;
++ mode->hsync_start = 1104;
++ mode->hsync_end = 1136;
++ mode->htotal = 1184;
++ mode->vsync_start = 603;
++ mode->vsync_end = 604;
++ mode->vtotal = 608;
++ mode->clock = 53990;
++#endif /*FIXME jliu7 remove it later */
++
++#if 0 /*FIXME jliu7 remove it later */
++ /* hard coded fixed mode for Sharp wsvga LVDS 1024x600 */
++ mode->hdisplay = 1024;
++ mode->vdisplay = 600;
++ mode->hsync_start = 1124;
++ mode->hsync_end = 1204;
++ mode->htotal = 1312;
++ mode->vsync_start = 607;
++ mode->vsync_end = 610;
++ mode->vtotal = 621;
++ mode->clock = 48885;
++#endif /*FIXME jliu7 remove it later */
++
++#if 0 /*FIXME jliu7 remove it later */
++ /* hard coded fixed mode for LVDS 1024x768 */
++ mode->hdisplay = 1024;
++ mode->vdisplay = 768;
++ mode->hsync_start = 1048;
++ mode->hsync_end = 1184;
++ mode->htotal = 1344;
++ mode->vsync_start = 771;
++ mode->vsync_end = 777;
++ mode->vtotal = 806;
++ mode->clock = 65000;
++#endif /*FIXME jliu7 remove it later */
++
++#if 0 /*FIXME jliu7 remove it later */
++ /* hard coded fixed mode for LVDS 1366x768 */
++ mode->hdisplay = 1366;
++ mode->vdisplay = 768;
++ mode->hsync_start = 1430;
++ mode->hsync_end = 1558;
++ mode->htotal = 1664;
++ mode->vsync_start = 769;
++ mode->vsync_end = 770;
++ mode->vtotal = 776;
++ mode->clock = 77500;
++#endif /*FIXME jliu7 remove it later */
++ }
++ drm_mode_set_name(mode);
++ drm_mode_set_crtcinfo(mode, 0);
++
++ return mode;
++}
++
++/* ************************************************************************* *\
++FUNCTION: mrstDSI_clockInit
++DESCRIPTION:
++
++\* ************************************************************************* */
++static u32 sku_83_mipi_2xclk[4] = {166667, 333333, 444444, 666667};
++static u32 sku_100_mipi_2xclk[4] = {200000, 400000, 533333, 800000};
++static u32 sku_100L_mipi_2xclk[4] = {100000, 200000, 266667, 400000};
++#define MIPI_2XCLK_COUNT 0x04
++
++static bool mrstDSI_clockInit(DRM_DRIVER_PRIVATE_T *dev_priv)
++{
++ u32 Htotal = 0, Vtotal = 0, RRate = 0, mipi_2xclk = 0;
++ u32 i = 0;
++ u32 *p_mipi_2xclk = NULL;
++
++#if 0 /* JLIU7_PO old values */
++ /* FIXME jliu7 DPI hard coded for TPO TD043MTEA2 LCD panel */
++ dev_priv->pixelClock = 33264; /*KHz*/
++ dev_priv->HsyncWidth = 10;
++ dev_priv->HbackPorch = 210;
++ dev_priv->HfrontPorch = 36;
++ dev_priv->HactiveArea = 800;
++ dev_priv->VsyncWidth = 2;
++ dev_priv->VbackPorch = 34;
++ dev_priv->VfrontPorch = 9;
++ dev_priv->VactiveArea = 480;
++ dev_priv->bpp = 24;
++
++ /* FIXME jliu7 DBI hard coded for TPO TD043MTEA2 LCD panel */
++ dev_priv->dbi_pixelClock = 33264; /*KHz*/
++ dev_priv->dbi_HsyncWidth = 10;
++ dev_priv->dbi_HbackPorch = 210;
++ dev_priv->dbi_HfrontPorch = 36;
++ dev_priv->dbi_HactiveArea = 800;
++ dev_priv->dbi_VsyncWidth = 2;
++ dev_priv->dbi_VbackPorch = 34;
++ dev_priv->dbi_VfrontPorch = 9;
++ dev_priv->dbi_VactiveArea = 480;
++ dev_priv->dbi_bpp = 24;
++#else /* JLIU7_PO old values */
++ /* FIXME jliu7 DPI hard coded for TPO TD043MTEA2 LCD panel */
++ /* FIXME Pre-Si value, 1 or 2 lanes; 50MHz; Non-Burst w/ sync event */
++ dev_priv->pixelClock = 33264; /*KHz*/
++ dev_priv->HsyncWidth = 10;
++ dev_priv->HbackPorch = 8;
++ dev_priv->HfrontPorch = 3;
++ dev_priv->HactiveArea = 800;
++ dev_priv->VsyncWidth = 2;
++ dev_priv->VbackPorch = 3;
++ dev_priv->VfrontPorch = 2;
++ dev_priv->VactiveArea = 480;
++ dev_priv->bpp = 24;
++
++ /* FIXME jliu7 DBI hard coded for TPO TD043MTEA2 LCD panel */
++ dev_priv->dbi_pixelClock = 33264; /*KHz*/
++ dev_priv->dbi_HsyncWidth = 10;
++ dev_priv->dbi_HbackPorch = 8;
++ dev_priv->dbi_HfrontPorch = 3;
++ dev_priv->dbi_HactiveArea = 800;
++ dev_priv->dbi_VsyncWidth = 2;
++ dev_priv->dbi_VbackPorch = 3;
++ dev_priv->dbi_VfrontPorch = 2;
++ dev_priv->dbi_VactiveArea = 480;
++ dev_priv->dbi_bpp = 24;
++#endif /* JLIU7_PO old values */
++
++ Htotal = dev_priv->HsyncWidth
++ + dev_priv->HbackPorch
++ + dev_priv->HfrontPorch
++ + dev_priv->HactiveArea;
++ Vtotal = dev_priv->VsyncWidth
++ + dev_priv->VbackPorch
++ + dev_priv->VfrontPorch
++ + dev_priv->VactiveArea;
++
++ RRate = ((dev_priv->pixelClock * 1000) / (Htotal * Vtotal)) + 1;
++
++ dev_priv->RRate = RRate;
++
++ /* ddr clock frequence = (pixel clock frequence * bits per pixel)/2*/
++ mipi_2xclk = (dev_priv->pixelClock * dev_priv->bpp) /
++ dev_priv->laneCount; /* KHz */
++ dev_priv->DDR_Clock_Calculated = mipi_2xclk / 2; /* KHz */
++
++ DRM_DEBUG("mrstDSI_clockInit RRate = %d, mipi_2xclk = %d. \n",
++ RRate, mipi_2xclk);
++
++ if (dev_priv->sku_100)
++ p_mipi_2xclk = sku_100_mipi_2xclk;
++ else if (dev_priv->sku_100L)
++ p_mipi_2xclk = sku_100L_mipi_2xclk;
++ else
++ p_mipi_2xclk = sku_83_mipi_2xclk;
++
++ for (; i < MIPI_2XCLK_COUNT; i++) {
++ if ((dev_priv->DDR_Clock_Calculated * 2) < p_mipi_2xclk[i])
++ break;
++ }
++
++ if (i == MIPI_2XCLK_COUNT) {
++ DRM_DEBUG("mrstDSI_clkInit DDR clk too big-DDR_Clk_Calcd=%d\n",
++ dev_priv->DDR_Clock_Calculated);
++ return false;
++ }
++
++ dev_priv->DDR_Clock = p_mipi_2xclk[i] / 2;
++ dev_priv->ClockBits = i;
++
++#if 1 /* FIXME remove it after power on*/
++ DRM_DEBUG("mrstDSI_clkInit mipi_2x_clk_divr=0x%x, DDR_Clk_Calcd=%d\n",
++ i,
++ dev_priv->DDR_Clock_Calculated);
++#endif /* FIXME remove it after power on*/
++
++ return true;
++}
++
++/**
++ * mrst_dsi_init - setup MIPI connectors on this device
++ * @dev: drm device
++ *
++ * Create the connector, try to figure out what
++ * modes we can display on the MIPI panel (if present).
++ */
++void mrst_dsi_init(struct drm_device *dev,
++ struct psb_intel_mode_device *mode_dev)
++{
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++ struct psb_intel_output *psb_intel_output;
++ struct drm_connector *connector;
++ struct drm_encoder *encoder;
++
++ DRM_INFO("JLIU7 enter mrst_dsi_init \n");
++
++ psb_intel_output = kzalloc(sizeof(struct psb_intel_output), GFP_KERNEL);
++ if (!psb_intel_output)
++ return;
++
++ psb_intel_output->mode_dev = mode_dev;
++ connector = &psb_intel_output->base;
++ encoder = &psb_intel_output->enc;
++ drm_connector_init(dev, &psb_intel_output->base,
++ &mrst_dsi_connector_funcs,
++ DRM_MODE_CONNECTOR_MIPI);
++
++ drm_encoder_init(dev, &psb_intel_output->enc, &psb_intel_lvds_enc_funcs,
++ DRM_MODE_ENCODER_MIPI);
++
++ drm_mode_connector_attach_encoder(&psb_intel_output->base,
++ &psb_intel_output->enc);
++ psb_intel_output->type = INTEL_OUTPUT_MIPI;
++
++ drm_encoder_helper_add(encoder, &mrst_dsi_helper_funcs);
++ drm_connector_helper_add(connector,
++ &mrst_dsi_connector_helper_funcs);
++ connector->display_info.subpixel_order = SubPixelHorizontalRGB;
++ connector->interlace_allowed = false;
++ connector->doublescan_allowed = false;
++
++ drm_connector_attach_property(connector,
++ dev->mode_config.scaling_mode_property,
++ DRM_MODE_SCALE_FULLSCREEN);
++ drm_connector_attach_property(connector,
++ dev_priv->backlight_property,
++ BRIGHTNESS_MAX_LEVEL);
++
++ dsi_backlight = BRIGHTNESS_MAX_LEVEL;
++ blc_pol = BLC_POLARITY_NORMAL;
++ blc_freq = 0xc8;
++
++ mode_dev->panel_wants_dither = false;
++ if (dev_priv->vbt_data.Size != 0x00) {
++ mode_dev->panel_wants_dither = (dev_priv->gct_data.Panel_MIPI_Display_Descriptor & (BIT3 | BIT4));
++ switch (dev_priv->gct_data.bpi) { /* set panel make */
++ case 1:
++ dev_priv->panel_make = NSC_800X480;
++ break;
++ case 2:
++ dev_priv->panel_make = TPO_864X480;
++ break;
++ case 3:
++ dev_priv->panel_make = LGE_480X1024;
++ break;
++ default:
++ DRM_INFO("MIPI: unknown panel type! Setting NSC.\n");
++ dev_priv->panel_make = NSC_800X480; /* assume NSC */
++ }
++ } else {
++ DRM_INFO("MIPI: No GCT! Setting NSC.\n");
++ dev_priv->panel_make = NSC_800X480;
++ }
++
++ /* set panel initialize function */
++ switch (dev_priv->panel_make) {
++ case NSC_800X480:
++ dev_priv->init_drvIC = mrst_init_NSC_MIPI_bridge;
++ break;
++ case TPO_864X480:
++ dev_priv->init_drvIC = mrst_init_TPO_MIPI;
++ break;
++ case LGE_480X1024:
++ dev_priv->init_drvIC = mrst_init_LGE_MIPI;
++ break;
++ }
++
++ /*
++ * MIPI discovery:
++ * 1) check for DDB data
++ * 2) check for VBT data
++ * 4) make sure lid is open
++ * if closed, act like it's not there for now
++ */
++
++ /* FIXME jliu7 we only support DPI */
++ dev_priv->dpi = true;
++
++ /* FIXME hard coded 4 lanes for Himax HX8858-A,
++ * 2 lanes for NSC LM2550 */
++ dev_priv->laneCount = 2;
++
++ /* FIXME hard coded for NSC PO. */
++ /* We only support BUST_MODE */
++ dev_priv->videoModeFormat = NON_BURST_MODE_SYNC_EVENTS;
++ /* FIXME change it to true if GET_DDB works */
++ dev_priv->config_phase = false;
++
++ if (!mrstDSI_clockInit(dev_priv)) {
++ DRM_DEBUG("Can't iniitialize MRST DSI clock.\n");
++#if 0 /* FIXME JLIU7 */
++ goto failed_find;
++#endif /* FIXME JLIU7 */
++ }
++
++ /*
++ * If we didn't get DDB data, try geting panel timing
++ * from configuration data
++ */
++ mode_dev->panel_fixed_mode = mrst_dsi_get_configuration_mode(dev);
++
++ if (mode_dev->panel_fixed_mode) {
++ mode_dev->panel_fixed_mode->type |=
++ DRM_MODE_TYPE_PREFERRED;
++ goto out; /* FIXME: check for quirks */
++ }
++
++ /* If we still don't have a mode after all that, give up. */
++ if (!mode_dev->panel_fixed_mode) {
++ DRM_DEBUG
++ ("Found no modes on the lvds, ignoring the LVDS\n");
++ goto failed_find;
++ }
++
++out:
++ drm_sysfs_connector_add(connector);
++ return;
++
++failed_find:
++ DRM_DEBUG("No MIIP modes found, disabling.\n");
++ drm_encoder_cleanup(encoder);
++ drm_connector_cleanup(connector);
++ kfree(connector);
++}
+diff --git a/drivers/gpu/drm/mrst/drv/psb_intel_dsi_aava.c b/drivers/gpu/drm/mrst/drv/psb_intel_dsi_aava.c
+new file mode 100644
+index 0000000..6c21480
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/psb_intel_dsi_aava.c
+@@ -0,0 +1,996 @@
++/*
++ * Copyright © 2006-2007 Intel Corporation
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ * jim liu <jim.liu@intel.com>
++ */
++
++/* This enables setting backlights on with a delay at startup,
++ should be removed after resolving issue with backlights going off
++ after setting them on in initial mrst_dsi_set_power call */
++#define AAVA_BACKLIGHT_HACK
++
++#include <linux/backlight.h>
++#include <drm/drmP.h>
++#include <drm/drm.h>
++#include <drm/drm_crtc.h>
++#include <drm/drm_edid.h>
++
++#include <asm/ipc_defs.h>
++
++#ifdef AAVA_BACKLIGHT_HACK
++#include <linux/workqueue.h>
++#endif /* AAVA_BACKLIGHT_HACK */
++
++#include "psb_drv.h"
++#include "psb_intel_drv.h"
++#include "psb_intel_reg.h"
++#include "ospm_power.h"
++
++#define DRM_MODE_ENCODER_MIPI 5
++
++//#define DBG_PRINTS 1
++#define DBG_PRINTS 0
++
++#define NEW_CRAP_SAMPLE_SETTINGS
++
++#define AAVA_EV_0_5
++
++#define VSIZE 480
++#define HSIZE 864
++#define HFP_DOTS 10
++#define HBP_DOTS 10
++#define HSYNC_DOTS 4
++#define VFP_LINES 8
++#define VBP_LINES 8
++#define VSYNC_LINES 4
++
++#define MIPI_LANES 2
++#define MIPI_HACT ((HSIZE * 3) / MIPI_LANES)
++#define MIPI_HFP ((HFP_DOTS * 3) / MIPI_LANES)
++#define MIPI_HBP ((HBP_DOTS * 3) / MIPI_LANES)
++#define MIPI_HSPAD ((HSYNC_DOTS * 3) / MIPI_LANES)
++#define MIPI_VFP VFP_LINES
++#define MIPI_VSPAD VSYNC_LINES
++#define MIPI_VBP VBP_LINES
++
++#define DISP_HPIX (HSIZE - 1)
++#define DISP_VPIX (VSIZE - 1)
++#define DISP_HBLANK_START DISP_HPIX
++#define DISP_HBLANK_END (DISP_HBLANK_START + HFP_DOTS + HSYNC_DOTS + HBP_DOTS - 1)
++#define DISP_HSYNC_START (DISP_HBLANK_START + HFP_DOTS - 1)
++#define DISP_HSYNC_END (DISP_HSYNC_START + HSYNC_DOTS - 1)
++#define DISP_VBLANK_START DISP_VPIX
++#define DISP_VBLANK_END (DISP_VBLANK_START + VFP_LINES + VSYNC_LINES + VBP_LINES - 1)
++#define DISP_VSYNC_START (DISP_VBLANK_START + VFP_LINES - 1)
++#define DISP_VSYNC_END (DISP_VSYNC_START + VSYNC_LINES - 1)
++
++#define BRIGHTNESS_MAX_LEVEL 100
++
++static unsigned int dphy_reg = 0x0d0a7f06;
++static unsigned int mipi_clock = 0x2;
++
++#ifdef AAVA_BACKLIGHT_HACK
++static void bl_work_handler(struct work_struct *work);
++DECLARE_DELAYED_WORK(bl_work, bl_work_handler);
++#endif /* AAVA_BACKLIGHT_HACK */
++
++// Temporary access from sysfs begin
++static struct drm_encoder *orig_encoder;
++static void mrst_dsi_prepare(struct drm_encoder *encoder);
++static void mrst_dsi_commit(struct drm_encoder *encoder);
++static void mrst_dsi_mode_set(struct drm_encoder *encoder,
++ struct drm_display_mode *mode,
++ struct drm_display_mode *adjusted_mode);
++static void panel_reset(void);
++
++static ssize_t dphy_store(struct class *class, const char *buf, size_t len)
++{
++ ssize_t status;
++ unsigned long value;
++
++ status = strict_strtoul(buf, 16, &value);
++ dphy_reg = value;
++ printk("!!! dphy_reg = %x, clock = %x\n", dphy_reg, mipi_clock);
++
++ return len;
++}
++
++static ssize_t clock_store(struct class *class, const char *buf, size_t len)
++{
++ ssize_t status;
++ unsigned long value;
++
++ status = strict_strtoul(buf, 0, &value);
++ mipi_clock = value;
++ printk("!!! dphy_reg = %x, clock = %x\n", dphy_reg, mipi_clock);
++
++ return len;
++}
++
++static ssize_t apply_settings(struct class *class, const char *buf, size_t len)
++{
++ ssize_t status;
++ long value;
++
++ printk("!!! dphy_reg = %x, clock = %x\n", dphy_reg, mipi_clock);
++
++ status = strict_strtoul(buf, 0, &value);
++ if (value > 0) {
++ mrst_dsi_prepare(orig_encoder);
++ msleep(500);
++ if (value > 1) {
++ panel_reset();
++ msleep(500);
++ }
++ mrst_dsi_mode_set(orig_encoder, NULL, NULL);
++ msleep(500);
++ mrst_dsi_commit(orig_encoder);
++ }
++
++ return len;
++}
++// Temporary access from sysfs end
++
++static void panel_init(struct drm_device *dev)
++{
++#if DBG_PRINTS
++ printk("panel_init\n");
++#endif /* DBG_PRINTS */
++
++ /* Flip page order */
++ while ((REG_READ(GEN_FIFO_STAT_REG) & HS_DATA_FIFO_FULL) ==
++ HS_DATA_FIFO_FULL);
++ REG_WRITE(0xb068, 0x00008036);
++ while ((REG_READ(GEN_FIFO_STAT_REG) & HS_CTRL_FIFO_FULL) ==
++ HS_CTRL_FIFO_FULL);
++ REG_WRITE(0xb070, 0x00000229);
++
++#ifdef NEW_CRAP_SAMPLE_SETTINGS
++ // 0xF0, for new crap displays
++ while ((REG_READ(GEN_FIFO_STAT_REG) & HS_DATA_FIFO_FULL) ==
++ HS_DATA_FIFO_FULL);
++ REG_WRITE(0xb068, 0x005a5af0);
++ while ((REG_READ(GEN_FIFO_STAT_REG) & HS_CTRL_FIFO_FULL) ==
++ HS_CTRL_FIFO_FULL);
++ REG_WRITE(0xb070, 0x00000329);
++#endif
++
++ /* Write protection key */
++ while ((REG_READ(GEN_FIFO_STAT_REG) & HS_DATA_FIFO_FULL) ==
++ HS_DATA_FIFO_FULL);
++ REG_WRITE(0xb068, 0x005a5af1);
++ while ((REG_READ(GEN_FIFO_STAT_REG) & HS_CTRL_FIFO_FULL) ==
++ HS_CTRL_FIFO_FULL);
++ REG_WRITE(0xb070, 0x00000329);
++
++#ifdef NEW_CRAP_SAMPLE_SETTINGS
++ // 0xFC, for new crap displays
++ while ((REG_READ(GEN_FIFO_STAT_REG) & HS_DATA_FIFO_FULL) ==
++ HS_DATA_FIFO_FULL);
++ REG_WRITE(0xb068, 0x005a5afc);
++ while ((REG_READ(GEN_FIFO_STAT_REG) & HS_CTRL_FIFO_FULL) ==
++ HS_CTRL_FIFO_FULL);
++ REG_WRITE(0xb070, 0x00000329);
++
++ // 0xB7, for new crap displays
++ while ((REG_READ(GEN_FIFO_STAT_REG) & HS_DATA_FIFO_FULL) ==
++ HS_DATA_FIFO_FULL);
++#ifdef DOES_NOT_WORK
++ /* Suggested by TPO, doesn't work as usual */
++ REG_WRITE(0xb068, 0x110000b7);
++ REG_WRITE(0xb068, 0x00000044);
++#else
++ REG_WRITE(0xb068, 0x770000b7);
++ REG_WRITE(0xb068, 0x00000044);
++#endif
++ while ((REG_READ(GEN_FIFO_STAT_REG) & HS_CTRL_FIFO_FULL) ==
++ HS_CTRL_FIFO_FULL);
++ REG_WRITE(0xb070, 0x00000529);
++
++ // 0xB6, for new crap displays
++ while ((REG_READ(GEN_FIFO_STAT_REG) & HS_DATA_FIFO_FULL) ==
++ HS_DATA_FIFO_FULL);
++ REG_WRITE(0xb068, 0x000a0ab6);
++ while ((REG_READ(GEN_FIFO_STAT_REG) & HS_CTRL_FIFO_FULL) ==
++ HS_CTRL_FIFO_FULL);
++ REG_WRITE(0xb070, 0x00000329);
++
++ // 0xF2, for new crap displays
++ while ((REG_READ(GEN_FIFO_STAT_REG) & HS_DATA_FIFO_FULL) ==
++ HS_DATA_FIFO_FULL);
++ REG_WRITE(0xb068, 0x081010f2);
++ REG_WRITE(0xb068, 0x4a070708);
++ REG_WRITE(0xb068, 0x000000c5);
++ while ((REG_READ(GEN_FIFO_STAT_REG) & HS_CTRL_FIFO_FULL) ==
++ HS_CTRL_FIFO_FULL);
++ REG_WRITE(0xb070, 0x00000929);
++
++ // 0xF8, for new crap displays
++ while ((REG_READ(GEN_FIFO_STAT_REG) & HS_DATA_FIFO_FULL) ==
++ HS_DATA_FIFO_FULL);
++ REG_WRITE(0xb068, 0x024003f8);
++ REG_WRITE(0xb068, 0x01030a04);
++ REG_WRITE(0xb068, 0x0e020220);
++ REG_WRITE(0xb068, 0x00000004);
++
++ while ((REG_READ(GEN_FIFO_STAT_REG) & HS_CTRL_FIFO_FULL) ==
++ HS_CTRL_FIFO_FULL);
++ REG_WRITE(0xb070, 0x00000d29);
++
++ // 0xE2, for new crap displays
++ while ((REG_READ(GEN_FIFO_STAT_REG) & HS_DATA_FIFO_FULL) ==
++ HS_DATA_FIFO_FULL);
++ REG_WRITE(0xb068, 0x398fc3e2);
++ REG_WRITE(0xb068, 0x0000916f);
++ while ((REG_READ(GEN_FIFO_STAT_REG) & HS_CTRL_FIFO_FULL) ==
++ HS_CTRL_FIFO_FULL);
++ REG_WRITE(0xb070, 0x00000629);
++
++#ifdef DOES_NOT_WORK
++ /* Suggested by TPO, doesn't work as usual */
++ // 0xE3, for new crap displays
++ while ((REG_READ(GEN_FIFO_STAT_REG) & HS_DATA_FIFO_FULL) ==
++ HS_DATA_FIFO_FULL);
++ REG_WRITE(0xb068, 0x20f684e3);
++ while ((REG_READ(GEN_FIFO_STAT_REG) & HS_CTRL_FIFO_FULL) ==
++ HS_CTRL_FIFO_FULL);
++ REG_WRITE(0xb070, 0x00000429);
++
++ msleep(50);
++#endif
++
++ // 0xB0, for new crap displays
++ while ((REG_READ(GEN_FIFO_STAT_REG) & HS_DATA_FIFO_FULL) ==
++ HS_DATA_FIFO_FULL);
++ REG_WRITE(0xb068, 0x000000b0);
++ while ((REG_READ(GEN_FIFO_STAT_REG) & HS_CTRL_FIFO_FULL) ==
++ HS_CTRL_FIFO_FULL);
++ REG_WRITE(0xb070, 0x00000229);
++
++ // 0xF4, for new crap displays
++ while ((REG_READ(GEN_FIFO_STAT_REG) & HS_DATA_FIFO_FULL) ==
++ HS_DATA_FIFO_FULL);
++ REG_WRITE(0xb068, 0x240242f4);
++ REG_WRITE(0xb068, 0x78ee2002);
++ REG_WRITE(0xb068, 0x2a071050);
++ REG_WRITE(0xb068, 0x507fee10);
++ REG_WRITE(0xb068, 0x10300710);
++ while ((REG_READ(GEN_FIFO_STAT_REG) & HS_CTRL_FIFO_FULL) ==
++ HS_CTRL_FIFO_FULL);
++ REG_WRITE(0xb070, 0x00001429);
++
++ // 0xBA, for new crap displays
++ while ((REG_READ(GEN_FIFO_STAT_REG) & HS_DATA_FIFO_FULL) ==
++ HS_DATA_FIFO_FULL);
++ REG_WRITE(0xb068, 0x19fe07ba);
++ REG_WRITE(0xb068, 0x101c0a31);
++ REG_WRITE(0xb068, 0x00000010);
++ while ((REG_READ(GEN_FIFO_STAT_REG) & HS_CTRL_FIFO_FULL) ==
++ HS_CTRL_FIFO_FULL);
++ REG_WRITE(0xb070, 0x00000929);
++
++ // 0xBB, for new crap displays
++ while ((REG_READ(GEN_FIFO_STAT_REG) & HS_DATA_FIFO_FULL) ==
++ HS_DATA_FIFO_FULL);
++ REG_WRITE(0xb068, 0x28ff07bb);
++ REG_WRITE(0xb068, 0x24280a31);
++ REG_WRITE(0xb068, 0x00000034);
++ while ((REG_READ(GEN_FIFO_STAT_REG) & HS_CTRL_FIFO_FULL) ==
++ HS_CTRL_FIFO_FULL);
++ REG_WRITE(0xb070, 0x00000929);
++
++ // 0xFB, for new crap displays
++ while ((REG_READ(GEN_FIFO_STAT_REG) & HS_DATA_FIFO_FULL) ==
++ HS_DATA_FIFO_FULL);
++ REG_WRITE(0xb068, 0x535d05fb);
++ REG_WRITE(0xb068, 0x1b1a2130);
++ REG_WRITE(0xb068, 0x221e180e);
++ REG_WRITE(0xb068, 0x131d2120);
++ REG_WRITE(0xb068, 0x535d0508);
++ REG_WRITE(0xb068, 0x1c1a2131);
++ REG_WRITE(0xb068, 0x231f160d);
++ REG_WRITE(0xb068, 0x111b2220);
++ REG_WRITE(0xb068, 0x535c2008);
++ REG_WRITE(0xb068, 0x1f1d2433);
++ REG_WRITE(0xb068, 0x2c251a10);
++ REG_WRITE(0xb068, 0x2c34372d);
++ REG_WRITE(0xb068, 0x00000023);
++ while ((REG_READ(GEN_FIFO_STAT_REG) & HS_CTRL_FIFO_FULL) ==
++ HS_CTRL_FIFO_FULL);
++ REG_WRITE(0xb070, 0x00003129);
++
++ // 0xFA, for new crap displays
++ while ((REG_READ(GEN_FIFO_STAT_REG) & HS_DATA_FIFO_FULL) ==
++ HS_DATA_FIFO_FULL);
++ REG_WRITE(0xb068, 0x525c0bfa);
++ REG_WRITE(0xb068, 0x1c1c232f);
++ REG_WRITE(0xb068, 0x2623190e);
++ REG_WRITE(0xb068, 0x18212625);
++ REG_WRITE(0xb068, 0x545d0d0e);
++ REG_WRITE(0xb068, 0x1e1d2333);
++ REG_WRITE(0xb068, 0x26231a10);
++ REG_WRITE(0xb068, 0x1a222725);
++ REG_WRITE(0xb068, 0x545d280f);
++ REG_WRITE(0xb068, 0x21202635);
++ REG_WRITE(0xb068, 0x31292013);
++ REG_WRITE(0xb068, 0x31393d33);
++ REG_WRITE(0xb068, 0x00000029);
++ while ((REG_READ(GEN_FIFO_STAT_REG) & HS_CTRL_FIFO_FULL) ==
++ HS_CTRL_FIFO_FULL);
++ REG_WRITE(0xb070, 0x00003129);
++#endif
++
++ /* Set DM */
++ while ((REG_READ(GEN_FIFO_STAT_REG) & HS_DATA_FIFO_FULL) ==
++ HS_DATA_FIFO_FULL);
++ REG_WRITE(0xb068, 0x000100f7);
++ while ((REG_READ(GEN_FIFO_STAT_REG) & HS_CTRL_FIFO_FULL) ==
++ HS_CTRL_FIFO_FULL);
++ REG_WRITE(0xb070, 0x00000329);
++}
++
++
++static void panel_reset_on(void)
++{
++ struct ipc_pmic_reg_data tmp_reg = {0};
++#if DBG_PRINTS
++ printk("panel_reset_on\n");
++#endif /* DBG_PRINTS */
++ tmp_reg.ioc = 1;
++ tmp_reg.num_entries = 1;
++#ifdef AAVA_EV_0_5
++ tmp_reg.pmic_reg_data[0].register_address = 0xe6;
++ tmp_reg.pmic_reg_data[0].value = 0x01;
++#else /* CDK */
++ tmp_reg.pmic_reg_data[0].register_address = 0xf4;
++ if (ipc_pmic_register_read(&tmp_reg)) {
++ printk("panel_reset_on: failed to read pmic reg 0xf4!\n");
++ return;
++ }
++ tmp_reg.pmic_reg_data[0].value &= 0xbf;
++#endif /* AAVA_EV_0_5 */
++ if (ipc_pmic_register_write(&tmp_reg, TRUE)) {
++ printk("panel_reset_on: failed to write pmic reg 0xe6!\n");
++ }
++}
++
++
++static void panel_reset_off(void)
++{
++ struct ipc_pmic_reg_data tmp_reg = {0};
++#if DBG_PRINTS
++ printk("panel_reset_off\n");
++#endif /* DBG_PRINTS */
++ tmp_reg.ioc = 1;
++ tmp_reg.num_entries = 1;
++#ifdef AAVA_EV_0_5
++ tmp_reg.pmic_reg_data[0].register_address = 0xe6;
++ tmp_reg.pmic_reg_data[0].value = 0x09;
++#else /* CDK */
++ tmp_reg.pmic_reg_data[0].register_address = 0xf4;
++ if (ipc_pmic_register_read(&tmp_reg)) {
++ printk("panel_reset_off: failed to read pmic reg 0xf4!\n");
++ return;
++ }
++ tmp_reg.pmic_reg_data[0].value |= 0x40;
++#endif /* AAVA_EV_0_5 */
++ if (ipc_pmic_register_write(&tmp_reg, TRUE)) {
++ printk("panel_reset_off: failed to write pmic reg 0xe6!\n");
++ }
++}
++
++
++static void panel_reset(void)
++{
++#if DBG_PRINTS
++ printk("panel_reset\n");
++#endif /* DBG_PRINTS */
++
++ panel_reset_on();
++ msleep(20);
++ panel_reset_off();
++ msleep(20);
++}
++
++
++static void backlight_state(bool on)
++{
++ struct ipc_pmic_reg_data tmp_reg;
++
++#if DBG_PRINTS
++ printk("backlight_state\n");
++#endif /* DBG_PRINTS */
++
++ tmp_reg.ioc = 1;
++ tmp_reg.num_entries = 2;
++ tmp_reg.pmic_reg_data[0].register_address = 0x2a;
++ tmp_reg.pmic_reg_data[1].register_address = 0x28;
++
++ if( on ) {
++#if DBG_PRINTS
++ printk("backlight_state: ON\n");
++#endif /* DBG_PRINTS */
++ tmp_reg.pmic_reg_data[0].value = 0xaa;
++#ifdef AAVA_EV_0_5
++ tmp_reg.pmic_reg_data[1].value = 0x30;
++#else /* CDK */
++ tmp_reg.pmic_reg_data[1].value = 0x60;
++#endif /* AAVA_EV_0_5 */
++ } else {
++#if DBG_PRINTS
++ printk("backlight_state: OFF\n");
++#endif /* DBG_PRINTS */
++ tmp_reg.pmic_reg_data[0].value = 0x00;
++ tmp_reg.pmic_reg_data[1].value = 0x00;
++ }
++
++ if (ipc_pmic_register_write(&tmp_reg, TRUE)) {
++ printk("backlight_state: failed to write pmic regs 0x2a and 0x28!\n");
++ }
++}
++
++#ifdef AAVA_BACKLIGHT_HACK
++static void bl_work_handler(struct work_struct *work)
++{
++ backlight_state(true);
++}
++#endif /* AAVA_BACKLIGHT_HACK */
++
++
++/**
++ * Sets the power state for the panel.
++ */
++static void mrst_dsi_set_power(struct drm_device *dev,
++ struct psb_intel_output *output, bool on)
++{
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++ u32 pp_status;
++
++#if DBG_PRINTS
++ printk("mrst_dsi_set_power\n");
++#endif /* DBG_PRINTS */
++
++ /*
++ * The DIS device must be ready before we can change power state.
++ */
++ if (!dev_priv->dsi_device_ready)
++ {
++#if DBG_PRINTS
++ printk("mrst_dsi_set_power: !dev_priv->dsi_device_ready!\n");
++#endif /* DBG_PRINTS */
++ return;
++ }
++
++ /*
++ * We don't support dual DSI yet. May be in POR in the future.
++ */
++ if (dev_priv->dual_display)
++ {
++#if DBG_PRINTS
++ printk("mrst_dsi_set_power: dev_priv->dual_display!\n");
++#endif /* DBG_PRINTS */
++ return;
++ }
++
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_FORCE_POWER_ON))
++ return;
++
++ if (on) {
++#if DBG_PRINTS
++ printk("mrst_dsi_set_power: on\n");
++#endif /* DBG_PRINTS */
++ if (dev_priv->dpi && !dev_priv->dpi_panel_on) {
++#if DBG_PRINTS
++ printk("mrst_dsi_set_power: dpi\n");
++#endif /* DBG_PRINTS */
++ REG_WRITE(DPI_CONTROL_REG, DPI_TURN_ON);
++ REG_WRITE(PP_CONTROL,
++ (REG_READ(PP_CONTROL) | POWER_TARGET_ON));
++ do {
++ pp_status = REG_READ(PP_STATUS);
++ } while ((pp_status & (PP_ON | PP_READY)) == PP_READY);
++
++ /* Run TPO display specific initialisations */
++// MiKo TBD, this delay may need to be tuned
++ msleep(50);
++ panel_init(dev);
++
++ /* Set backlights on */
++ backlight_state( true );
++ dev_priv->dpi_panel_on = true;
++ }
++ } else {
++#if DBG_PRINTS
++ printk("mrst_dsi_set_power: off\n");
++#endif /* DBG_PRINTS */
++ if (dev_priv->dpi && dev_priv->dpi_panel_on) {
++#if DBG_PRINTS
++ printk("mrst_dsi_set_power: dpi\n");
++#endif /* DBG_PRINTS */
++ /* Set backlights off */
++ backlight_state( false );
++
++// MiKo TBD, something clever could be done here to save power, for example:
++// -Set display to sleep mode, or
++// -Set display to HW reset, or
++// -Shutdown the voltages to display
++
++ REG_WRITE(PP_CONTROL,
++ (REG_READ(PP_CONTROL) & ~POWER_TARGET_ON));
++ do {
++ pp_status = REG_READ(PP_STATUS);
++ } while (pp_status & PP_ON);
++
++ REG_WRITE(DPI_CONTROL_REG, DPI_SHUT_DOWN);
++
++ dev_priv->dpi_panel_on = false;
++ }
++ }
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++}
++
++
++static void mrst_dsi_dpms(struct drm_encoder *encoder, int mode)
++{
++ struct drm_device *dev = encoder->dev;
++ struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
++
++#if DBG_PRINTS
++ printk("mrst_dsi_dpms\n");
++#endif /* DBG_PRINTS */
++
++ if (mode == DRM_MODE_DPMS_ON)
++ mrst_dsi_set_power(dev, output, true);
++ else
++ mrst_dsi_set_power(dev, output, false);
++}
++
++
++static void mrst_dsi_save(struct drm_connector *connector)
++{
++#if DBG_PRINTS
++ printk("mrst_dsi_save\n");
++#endif /* DBG_PRINTS */
++ // MiKo TBD
++}
++
++
++static void mrst_dsi_restore(struct drm_connector *connector)
++{
++#if DBG_PRINTS
++ printk("mrst_dsi_restore\n");
++#endif /* DBG_PRINTS */
++ // MiKo TBD
++}
++
++
++static void mrst_dsi_prepare(struct drm_encoder *encoder)
++{
++ struct drm_device *dev = encoder->dev;
++ struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
++
++#if DBG_PRINTS
++ printk("mrst_dsi_prepare\n");
++#endif /* DBG_PRINTS */
++
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_FORCE_POWER_ON))
++ return;
++
++ mrst_dsi_set_power(dev, output, false);
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++}
++
++
++static void mrst_dsi_commit(struct drm_encoder *encoder)
++{
++ struct drm_device *dev = encoder->dev;
++ struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
++
++#if DBG_PRINTS
++ printk("mrst_dsi_commit\n");
++#endif /* DBG_PRINTS */
++
++ mrst_dsi_set_power(dev, output, true);
++}
++
++
++static void mrst_dsi_mode_set(struct drm_encoder *encoder,
++ struct drm_display_mode *mode,
++ struct drm_display_mode *adjusted_mode)
++{
++ struct drm_device *dev = encoder->dev;
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++ u32 SupportedFormat = 0;
++ u32 resolution = 0;
++ uint64_t curValue = DRM_MODE_SCALE_FULLSCREEN;
++
++#if DBG_PRINTS
++ printk("mrst_dsi_mode_set\n");
++#endif /* DBG_PRINTS */
++
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_FORCE_POWER_ON))
++ return;
++
++ /* Sleep to ensure that the graphics engine is ready
++ * since its mode_set is called before ours
++ */
++ msleep(100);
++
++ switch (dev_priv->bpp)
++ {
++ case 24:
++ SupportedFormat = RGB_888_FMT;
++ break;
++ default:
++ printk("mrst_dsi_mode_set, invalid bpp!\n");
++ break;
++ }
++
++ if (dev_priv->dpi) {
++ drm_connector_property_get_value(
++ &enc_to_psb_intel_output(encoder)->base,
++ dev->mode_config.scaling_mode_property,
++ &curValue);
++ if (curValue == DRM_MODE_SCALE_CENTER) {
++ REG_WRITE(PFIT_CONTROL, 0);
++ } else if (curValue == DRM_MODE_SCALE_FULLSCREEN) {
++ REG_WRITE(PFIT_CONTROL, PFIT_ENABLE);
++ } else {
++ printk("mrst_dsi_mode_set, scaling not supported!\n");
++ REG_WRITE(PFIT_CONTROL, 0);
++ }
++
++
++ /* MIPI clock ratio 1:1 */
++ //REG_WRITE(MIPI_CONTROL_REG, 0x00000018);
++ //REG_WRITE(0xb080, 0x0b061a02);
++
++ /* MIPI clock ratio 2:1 */
++ //REG_WRITE(MIPI_CONTROL_REG, 0x00000019);
++ //REG_WRITE(0xb080, 0x3f1f1c04);
++
++ /* MIPI clock ratio 3:1 */
++ //REG_WRITE(MIPI_CONTROL_REG, 0x0000001a);
++ //REG_WRITE(0xb080, 0x091f7f08);
++
++ /* MIPI clock ratio 4:1 */
++ REG_WRITE(MIPI_CONTROL_REG, (0x00000018 | mipi_clock));
++ REG_WRITE(0xb080, dphy_reg);
++
++ /* Enable all interrupts */
++ REG_WRITE(INTR_EN_REG, 0xffffffff);
++
++ REG_WRITE(TURN_AROUND_TIMEOUT_REG, 0x0000000A);
++ REG_WRITE(DEVICE_RESET_REG, 0x000000ff);
++ REG_WRITE(INIT_COUNT_REG, 0x00000fff);
++ REG_WRITE(HS_TX_TIMEOUT_REG, 0x90000);
++ REG_WRITE(LP_RX_TIMEOUT_REG, 0xffff);
++ REG_WRITE(HIGH_LOW_SWITCH_COUNT_REG, 0x46);
++ REG_WRITE(EOT_DISABLE_REG, 0x00000000);
++ REG_WRITE(LP_BYTECLK_REG, 0x00000004);
++
++ REG_WRITE(VIDEO_FMT_REG, dev_priv->videoModeFormat);
++
++ SupportedFormat <<= FMT_DPI_POS;
++ REG_WRITE(DSI_FUNC_PRG_REG,
++ (dev_priv->laneCount | SupportedFormat));
++
++ resolution = dev_priv->HactiveArea |
++ (dev_priv->VactiveArea << RES_V_POS);
++ REG_WRITE(DPI_RESOLUTION_REG, resolution);
++
++ REG_WRITE(VERT_SYNC_PAD_COUNT_REG, dev_priv->VsyncWidth);
++ REG_WRITE(VERT_BACK_PORCH_COUNT_REG, dev_priv->VbackPorch);
++ REG_WRITE(VERT_FRONT_PORCH_COUNT_REG, dev_priv->VfrontPorch);
++
++ REG_WRITE(HORIZ_SYNC_PAD_COUNT_REG, dev_priv->HsyncWidth);
++ REG_WRITE(HORIZ_BACK_PORCH_COUNT_REG, dev_priv->HbackPorch);
++ REG_WRITE(HORIZ_FRONT_PORCH_COUNT_REG, dev_priv->HfrontPorch);
++ REG_WRITE(HORIZ_ACTIVE_AREA_COUNT_REG, MIPI_HACT);
++ }
++
++ /* Enable MIPI Port */
++ REG_WRITE(MIPI, MIPI_PORT_EN);
++
++ REG_WRITE(DEVICE_READY_REG, 0x00000001);
++ REG_WRITE(DPI_CONTROL_REG, 0x00000002); /* Turn On */
++
++ dev_priv->dsi_device_ready = true;
++
++ /* Enable pipe */
++ REG_WRITE(PIPEACONF, dev_priv->pipeconf);
++ REG_READ(PIPEACONF);
++
++ /* Wait for 20ms for the pipe enable to take effect. */
++ udelay(20000);
++
++ /* Enable plane */
++ REG_WRITE(DSPACNTR, dev_priv->dspcntr);
++
++ /* Wait for 20ms for the plane enable to take effect. */
++ udelay(20000);
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++}
++
++
++/**
++ * Detect the MIPI connection.
++ *
++ * This always returns CONNECTOR_STATUS_CONNECTED.
++ * This connector should only have
++ * been set up if the MIPI was actually connected anyway.
++ */
++static enum drm_connector_status mrst_dsi_detect(struct drm_connector
++ *connector)
++{
++#if DBG_PRINTS
++ printk("mrst_dsi_detect\n");
++#endif /* DBG_PRINTS */
++ return connector_status_connected;
++}
++
++
++/**
++ * Return the list of MIPI DDB modes if available.
++ */
++static int mrst_dsi_get_modes(struct drm_connector *connector)
++{
++ struct drm_device *dev = connector->dev;
++ struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector);
++ struct psb_intel_mode_device *mode_dev = psb_intel_output->mode_dev;
++
++ /* Didn't get an DDB, so
++ * Set wide sync ranges so we get all modes
++ * handed to valid_mode for checking
++ */
++ connector->display_info.min_vfreq = 0;
++ connector->display_info.max_vfreq = 200;
++ connector->display_info.min_hfreq = 0;
++ connector->display_info.max_hfreq = 200;
++
++ if (mode_dev->panel_fixed_mode != NULL) {
++ struct drm_display_mode *mode =
++ drm_mode_duplicate(dev, mode_dev->panel_fixed_mode);
++ drm_mode_probed_add(connector, mode);
++ return 1;
++ }
++ return 0;
++}
++
++
++static const struct drm_encoder_helper_funcs mrst_dsi_helper_funcs = {
++ .dpms = mrst_dsi_dpms,
++ .mode_fixup = psb_intel_lvds_mode_fixup,
++ .prepare = mrst_dsi_prepare,
++ .mode_set = mrst_dsi_mode_set,
++ .commit = mrst_dsi_commit,
++};
++
++
++static const struct drm_connector_helper_funcs
++ mrst_dsi_connector_helper_funcs = {
++ .get_modes = mrst_dsi_get_modes,
++ .mode_valid = psb_intel_lvds_mode_valid,
++ .best_encoder = psb_intel_best_encoder,
++};
++
++
++static const struct drm_connector_funcs mrst_dsi_connector_funcs = {
++ .dpms = drm_helper_connector_dpms,
++ .save = mrst_dsi_save,
++ .restore = mrst_dsi_restore,
++ .detect = mrst_dsi_detect,
++ .fill_modes = drm_helper_probe_single_connector_modes,
++ .set_property = psb_intel_lvds_set_property,
++ .destroy = psb_intel_lvds_destroy,
++};
++
++
++/** Returns the panel fixed mode from configuration. */
++struct drm_display_mode *mrst_dsi_get_configuration_mode(struct drm_device *dev)
++{
++ struct drm_display_mode *mode;
++
++ mode = kzalloc(sizeof(*mode), GFP_KERNEL);
++ if (!mode)
++ return NULL;
++
++ /* MiKo, fixed mode for TPO display
++ Note: Using defined values for easier match with ITP scripts
++ and adding 1 since psb_intel_display.c decreases by 1
++ */
++ mode->hdisplay = (DISP_HPIX + 1);
++ mode->vdisplay = (DISP_VPIX + 1);
++ mode->hsync_start = (DISP_HSYNC_START + 1);
++ mode->hsync_end = (DISP_HSYNC_END + 1);
++ mode->htotal = (DISP_HBLANK_END + 1);
++ mode->vsync_start = (DISP_VSYNC_START + 1);
++ mode->vsync_end = (DISP_VSYNC_END + 1);
++ mode->vtotal = (DISP_VBLANK_END + 1);
++ mode->clock = 33264;
++
++ drm_mode_set_name(mode);
++ drm_mode_set_crtcinfo(mode, 0);
++
++ return mode;
++}
++
++
++/* ************************************************************************* *\
++FUNCTION: mrst_mipi_settings_init
++ `
++DESCRIPTION:
++
++\* ************************************************************************* */
++static bool mrst_mipi_settings_init(DRM_DRIVER_PRIVATE_T *dev_priv)
++{
++ /* MiKo, fixed values for TPO display */
++ dev_priv->pixelClock = 33264;
++ dev_priv->HsyncWidth = MIPI_HSPAD;
++ dev_priv->HbackPorch = MIPI_HBP;
++ dev_priv->HfrontPorch = MIPI_HFP;
++ dev_priv->HactiveArea = HSIZE;
++ dev_priv->VsyncWidth = MIPI_VSPAD;
++ dev_priv->VbackPorch = MIPI_VBP;
++ dev_priv->VfrontPorch = MIPI_VFP;
++ dev_priv->VactiveArea = VSIZE;
++ dev_priv->bpp = 24;
++
++ /* video mode */
++ dev_priv->dpi = true;
++
++ /* MiKo, set these true by default to ensure that first mode set is done
++ cleanly
++ */
++ dev_priv->dpi_panel_on = true;
++ dev_priv->dsi_device_ready = true;
++
++ /* 2 lanes */
++ dev_priv->laneCount = MIPI_LANES;
++
++ /* Burst mode */
++ dev_priv->videoModeFormat = BURST_MODE;
++
++ return true;
++}
++
++
++/**
++ * mrst_dsi_init - setup MIPI connectors on this device
++ * @dev: drm device
++ *
++ * Create the connector, try to figure out what
++ * modes we can display on the MIPI panel (if present).
++ */
++void mrst_dsi_init(struct drm_device *dev,
++ struct psb_intel_mode_device *mode_dev)
++{
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++ struct psb_intel_output *psb_intel_output;
++ struct drm_connector *connector;
++ struct drm_encoder *encoder;
++
++#if DBG_PRINTS
++ printk("mrst_dsi_init\n");
++#endif /* DBG_PRINTS */
++
++ psb_intel_output = kzalloc(sizeof(struct psb_intel_output), GFP_KERNEL);
++ if (!psb_intel_output)
++ return;
++
++ panel_reset();
++
++#ifdef AAVA_BACKLIGHT_HACK
++ schedule_delayed_work(&bl_work, 2*HZ);
++#endif /* AAVA_BACKLIGHT_HACK */
++
++ psb_intel_output->mode_dev = mode_dev;
++ connector = &psb_intel_output->base;
++ encoder = &psb_intel_output->enc;
++ drm_connector_init(dev,
++ &psb_intel_output->base,
++ &mrst_dsi_connector_funcs,
++ DRM_MODE_CONNECTOR_MIPI);
++
++ drm_encoder_init(dev,
++ &psb_intel_output->enc,
++ &psb_intel_lvds_enc_funcs,
++ DRM_MODE_ENCODER_MIPI);
++
++ drm_mode_connector_attach_encoder(&psb_intel_output->base,
++ &psb_intel_output->enc);
++ psb_intel_output->type = INTEL_OUTPUT_MIPI;
++
++ drm_encoder_helper_add(encoder, &mrst_dsi_helper_funcs);
++ drm_connector_helper_add(connector, &mrst_dsi_connector_helper_funcs);
++ connector->display_info.subpixel_order = SubPixelHorizontalRGB;
++ connector->interlace_allowed = false;
++ connector->doublescan_allowed = false;
++
++ drm_connector_attach_property(connector,
++ dev->mode_config.scaling_mode_property,
++ DRM_MODE_SCALE_FULLSCREEN);
++ drm_connector_attach_property(connector,
++ dev_priv->backlight_property,
++ BRIGHTNESS_MAX_LEVEL);
++
++ if (!mrst_mipi_settings_init(dev_priv))
++ printk("Can't initialize MIPI settings\n");
++
++ /* No config phase */
++ dev_priv->config_phase = false;
++
++ /* Get the fixed mode */
++ mode_dev->panel_fixed_mode = mrst_dsi_get_configuration_mode(dev);
++ if (mode_dev->panel_fixed_mode) {
++ mode_dev->panel_fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
++ } else {
++ printk("Found no modes for MIPI!\n");
++ goto failed_find;
++ }
++// Temporary access from sysfs begin
++ orig_encoder = encoder;
++// Temporary access from sysfs end
++ drm_sysfs_connector_add(connector);
++ return;
++
++failed_find:
++ drm_encoder_cleanup(encoder);
++ drm_connector_cleanup(connector);
++ kfree(connector);
++}
++
++// Temporary access from sysfs begin
++static struct class_attribute miko_class_attrs[] = {
++ __ATTR(dphy, 0644, NULL, dphy_store),
++ __ATTR(clock, 0644, NULL, clock_store),
++ __ATTR(apply, 0200, NULL, apply_settings),
++ __ATTR_NULL,
++};
++
++static struct class miko_class = {
++ .name = "miko",
++ .owner = THIS_MODULE,
++
++ .class_attrs = miko_class_attrs,
++};
++
++static int __init miko_sysfs_init(void)
++{
++ int status;
++
++ status = class_register(&miko_class);
++ if (status < 0)
++ return status;
++
++ return status;
++}
++postcore_initcall(miko_sysfs_init);
++// Temporary access from sysfs end
++
+diff --git a/drivers/gpu/drm/mrst/drv/psb_intel_i2c.c b/drivers/gpu/drm/mrst/drv/psb_intel_i2c.c
+new file mode 100644
+index 0000000..415847d
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/psb_intel_i2c.c
+@@ -0,0 +1,172 @@
++/*
++ * Copyright © 2006-2007 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors:
++ * Eric Anholt <eric@anholt.net>
++ */
++
++#include <linux/i2c.h>
++#include <linux/i2c-id.h>
++#include <linux/i2c-algo-bit.h>
++
++#include "psb_drv.h"
++#include "psb_intel_reg.h"
++
++/*
++ * Intel GPIO access functions
++ */
++
++#define I2C_RISEFALL_TIME 20
++
++static int get_clock(void *data)
++{
++ struct psb_intel_i2c_chan *chan = data;
++ struct drm_device *dev = chan->drm_dev;
++ u32 val;
++
++ val = REG_READ(chan->reg);
++ return (val & GPIO_CLOCK_VAL_IN) != 0;
++}
++
++static int get_data(void *data)
++{
++ struct psb_intel_i2c_chan *chan = data;
++ struct drm_device *dev = chan->drm_dev;
++ u32 val;
++
++ val = REG_READ(chan->reg);
++ return (val & GPIO_DATA_VAL_IN) != 0;
++}
++
++static void set_clock(void *data, int state_high)
++{
++ struct psb_intel_i2c_chan *chan = data;
++ struct drm_device *dev = chan->drm_dev;
++ u32 reserved = 0, clock_bits;
++
++ /* On most chips, these bits must be preserved in software. */
++ if (!IS_I830(dev) && !IS_845G(dev))
++ reserved =
++ REG_READ(chan->reg) & (GPIO_DATA_PULLUP_DISABLE |
++ GPIO_CLOCK_PULLUP_DISABLE);
++
++ if (state_high)
++ clock_bits = GPIO_CLOCK_DIR_IN | GPIO_CLOCK_DIR_MASK;
++ else
++ clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK |
++ GPIO_CLOCK_VAL_MASK;
++ REG_WRITE(chan->reg, reserved | clock_bits);
++ udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */
++}
++
++static void set_data(void *data, int state_high)
++{
++ struct psb_intel_i2c_chan *chan = data;
++ struct drm_device *dev = chan->drm_dev;
++ u32 reserved = 0, data_bits;
++
++ /* On most chips, these bits must be preserved in software. */
++ if (!IS_I830(dev) && !IS_845G(dev))
++ reserved =
++ REG_READ(chan->reg) & (GPIO_DATA_PULLUP_DISABLE |
++ GPIO_CLOCK_PULLUP_DISABLE);
++
++ if (state_high)
++ data_bits = GPIO_DATA_DIR_IN | GPIO_DATA_DIR_MASK;
++ else
++ data_bits =
++ GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK |
++ GPIO_DATA_VAL_MASK;
++
++ REG_WRITE(chan->reg, reserved | data_bits);
++ udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */
++}
++
++/**
++ * psb_intel_i2c_create - instantiate an Intel i2c bus using the specified GPIO reg
++ * @dev: DRM device
++ * @output: driver specific output device
++ * @reg: GPIO reg to use
++ * @name: name for this bus
++ *
++ * Creates and registers a new i2c bus with the Linux i2c layer, for use
++ * in output probing and control (e.g. DDC or SDVO control functions).
++ *
++ * Possible values for @reg include:
++ * %GPIOA
++ * %GPIOB
++ * %GPIOC
++ * %GPIOD
++ * %GPIOE
++ * %GPIOF
++ * %GPIOG
++ * %GPIOH
++ * see PRM for details on how these different busses are used.
++ */
++struct psb_intel_i2c_chan *psb_intel_i2c_create(struct drm_device *dev,
++ const u32 reg, const char *name)
++{
++ struct psb_intel_i2c_chan *chan;
++
++ chan = kzalloc(sizeof(struct psb_intel_i2c_chan), GFP_KERNEL);
++ if (!chan)
++ goto out_free;
++
++ chan->drm_dev = dev;
++ chan->reg = reg;
++ snprintf(chan->adapter.name, I2C_NAME_SIZE, "intel drm %s", name);
++ chan->adapter.owner = THIS_MODULE;
++ chan->adapter.algo_data = &chan->algo;
++ chan->adapter.dev.parent = &dev->pdev->dev;
++ chan->algo.setsda = set_data;
++ chan->algo.setscl = set_clock;
++ chan->algo.getsda = get_data;
++ chan->algo.getscl = get_clock;
++ chan->algo.udelay = 20;
++ chan->algo.timeout = usecs_to_jiffies(2200);
++ chan->algo.data = chan;
++
++ i2c_set_adapdata(&chan->adapter, chan);
++
++ if (i2c_bit_add_bus(&chan->adapter))
++ goto out_free;
++
++ /* JJJ: raise SCL and SDA? */
++ set_data(chan, 1);
++ set_clock(chan, 1);
++ udelay(20);
++
++ return chan;
++
++out_free:
++ kfree(chan);
++ return NULL;
++}
++
++/**
++ * psb_intel_i2c_destroy - unregister and free i2c bus resources
++ * @output: channel to free
++ *
++ * Unregister the adapter from the i2c layer, then free the structure.
++ */
++void psb_intel_i2c_destroy(struct psb_intel_i2c_chan *chan)
++{
++ if (!chan)
++ return;
++
++ i2c_del_adapter(&chan->adapter);
++ kfree(chan);
++}
+diff --git a/drivers/gpu/drm/mrst/drv/psb_intel_lvds.c b/drivers/gpu/drm/mrst/drv/psb_intel_lvds.c
+new file mode 100644
+index 0000000..b426b53
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/psb_intel_lvds.c
+@@ -0,0 +1,1385 @@
++/*
++ * Copyright © 2006-2007 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors:
++ * Eric Anholt <eric@anholt.net>
++ * Dave Airlie <airlied@linux.ie>
++ * Jesse Barnes <jesse.barnes@intel.com>
++ */
++
++#include <linux/i2c.h>
++/* #include <drm/drm_crtc.h> */
++/* #include <drm/drm_edid.h> */
++#include <drm/drmP.h>
++
++#include "psb_intel_bios.h"
++#include "psb_drv.h"
++#include "psb_intel_drv.h"
++#include "psb_intel_reg.h"
++#include "ospm_power.h"
++
++/* MRST defines start */
++uint8_t blc_freq;
++uint8_t blc_minbrightness;
++uint8_t blc_i2caddr;
++uint8_t blc_brightnesscmd;
++int lvds_backlight; /* restore backlight to this value */
++
++u32 CoreClock;
++u32 PWMControlRegFreq;
++
++/**
++ * LVDS I2C backlight control macros
++ */
++#define BRIGHTNESS_MAX_LEVEL 100
++#define BRIGHTNESS_MASK 0xFF
++#define BLC_I2C_TYPE 0x01
++#define BLC_PWM_TYPT 0x02
++
++#define BLC_POLARITY_NORMAL 0
++#define BLC_POLARITY_INVERSE 1
++
++#define PSB_BLC_MAX_PWM_REG_FREQ (0xFFFE)
++#define PSB_BLC_MIN_PWM_REG_FREQ (0x2)
++#define PSB_BLC_PWM_PRECISION_FACTOR (10)
++#define PSB_BACKLIGHT_PWM_CTL_SHIFT (16)
++#define PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xFFFE)
++
++struct psb_intel_lvds_priv {
++ /**
++ * Saved LVDO output states
++ */
++ uint32_t savePP_ON;
++ uint32_t savePP_OFF;
++ uint32_t saveLVDS;
++ uint32_t savePP_CONTROL;
++ uint32_t savePP_CYCLE;
++ uint32_t savePFIT_CONTROL;
++ uint32_t savePFIT_PGM_RATIOS;
++ uint32_t saveBLC_PWM_CTL;
++};
++
++/* MRST defines end */
++
++/**
++ * Returns the maximum level of the backlight duty cycle field.
++ */
++static u32 psb_intel_lvds_get_max_backlight(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ u32 retVal;
++
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_ONLY_IF_ON)) {
++ retVal = ((REG_READ(BLC_PWM_CTL) &
++ BACKLIGHT_MODULATION_FREQ_MASK) >>
++ BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ } else
++ retVal = ((dev_priv->saveBLC_PWM_CTL &
++ BACKLIGHT_MODULATION_FREQ_MASK) >>
++ BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
++
++ return retVal;
++}
++
++/**
++ * Set LVDS backlight level by I2C command
++ */
++static int psb_lvds_i2c_set_brightness(struct drm_device *dev,
++ unsigned int level)
++ {
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private;
++
++ struct psb_intel_i2c_chan *lvds_i2c_bus = dev_priv->lvds_i2c_bus;
++ u8 out_buf[2];
++ unsigned int blc_i2c_brightness;
++
++ struct i2c_msg msgs[] = {
++ {
++ .addr = lvds_i2c_bus->slave_addr,
++ .flags = 0,
++ .len = 2,
++ .buf = out_buf,
++ }
++ };
++
++ blc_i2c_brightness = BRIGHTNESS_MASK & ((unsigned int)level *
++ BRIGHTNESS_MASK /
++ BRIGHTNESS_MAX_LEVEL);
++
++ if (dev_priv->lvds_bl->pol == BLC_POLARITY_INVERSE)
++ blc_i2c_brightness = BRIGHTNESS_MASK - blc_i2c_brightness;
++
++ out_buf[0] = dev_priv->lvds_bl->brightnesscmd;
++ out_buf[1] = (u8)blc_i2c_brightness;
++
++ if (i2c_transfer(&lvds_i2c_bus->adapter, msgs, 1) == 1) {
++ DRM_DEBUG("I2C set brightness.(command, value) (%d, %d)\n",
++ blc_brightnesscmd,
++ blc_i2c_brightness);
++ return 0;
++ }
++
++ DRM_ERROR("I2C transfer error\n");
++ return -1;
++}
++
++
++static int psb_lvds_pwm_set_brightness(struct drm_device *dev, int level)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private;
++
++ u32 max_pwm_blc;
++ u32 blc_pwm_duty_cycle;
++
++ max_pwm_blc = psb_intel_lvds_get_max_backlight(dev);
++
++ /*BLC_PWM_CTL Should be initiated while backlight device init*/
++ BUG_ON((max_pwm_blc & PSB_BLC_MAX_PWM_REG_FREQ) == 0);
++
++ blc_pwm_duty_cycle = level * max_pwm_blc / BRIGHTNESS_MAX_LEVEL;
++
++ if (dev_priv->lvds_bl->pol == BLC_POLARITY_INVERSE)
++ blc_pwm_duty_cycle = max_pwm_blc - blc_pwm_duty_cycle;
++
++ blc_pwm_duty_cycle &= PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR;
++ REG_WRITE(BLC_PWM_CTL,
++ (max_pwm_blc << PSB_BACKLIGHT_PWM_CTL_SHIFT) |
++ (blc_pwm_duty_cycle));
++
++ return 0;
++}
++
++/**
++ * Set LVDS backlight level either by I2C or PWM
++ */
++void psb_intel_lvds_set_brightness(struct drm_device *dev, int level)
++{
++ /*u32 blc_pwm_ctl;*/
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private;
++
++ DRM_DEBUG("backlight level is %d\n", level);
++
++ if (!dev_priv->lvds_bl) {
++ DRM_ERROR("NO LVDS Backlight Info\n");
++ return;
++ }
++
++ if (IS_MRST(dev)) {
++ DRM_ERROR(
++ "psb_intel_lvds_set_brightness called...not expected\n");
++ return;
++ }
++
++ if (dev_priv->lvds_bl->type == BLC_I2C_TYPE)
++ psb_lvds_i2c_set_brightness(dev, level);
++ else
++ psb_lvds_pwm_set_brightness(dev, level);
++}
++
++/**
++ * Sets the backlight level.
++ *
++ * \param level backlight level, from 0 to psb_intel_lvds_get_max_backlight().
++ */
++static void psb_intel_lvds_set_backlight(struct drm_device *dev, int level)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ u32 blc_pwm_ctl;
++
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_ONLY_IF_ON)) {
++ blc_pwm_ctl =
++ REG_READ(BLC_PWM_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
++ REG_WRITE(BLC_PWM_CTL,
++ (blc_pwm_ctl |
++ (level << BACKLIGHT_DUTY_CYCLE_SHIFT)));
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ } else {
++ blc_pwm_ctl = dev_priv->saveBLC_PWM_CTL &
++ ~BACKLIGHT_DUTY_CYCLE_MASK;
++ dev_priv->saveBLC_PWM_CTL = (blc_pwm_ctl |
++ (level << BACKLIGHT_DUTY_CYCLE_SHIFT));
++ }
++}
++
++/**
++ * Sets the power state for the panel.
++ */
++static void psb_intel_lvds_set_power(struct drm_device *dev,
++ struct psb_intel_output *output, bool on)
++{
++ u32 pp_status;
++
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_FORCE_POWER_ON))
++ return;
++
++ if (on) {
++ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) |
++ POWER_TARGET_ON);
++ do {
++ pp_status = REG_READ(PP_STATUS);
++ } while ((pp_status & PP_ON) == 0);
++
++ psb_intel_lvds_set_backlight(dev,
++ output->
++ mode_dev->backlight_duty_cycle);
++ } else {
++ psb_intel_lvds_set_backlight(dev, 0);
++
++ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) &
++ ~POWER_TARGET_ON);
++ do {
++ pp_status = REG_READ(PP_STATUS);
++ } while (pp_status & PP_ON);
++ }
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++}
++
++static void psb_intel_lvds_encoder_dpms(struct drm_encoder *encoder, int mode)
++{
++ struct drm_device *dev = encoder->dev;
++ struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
++
++ if (mode == DRM_MODE_DPMS_ON)
++ psb_intel_lvds_set_power(dev, output, true);
++ else
++ psb_intel_lvds_set_power(dev, output, false);
++
++ /* XXX: We never power down the LVDS pairs. */
++}
++
++static void psb_intel_lvds_save(struct drm_connector *connector)
++{
++ struct drm_device *dev = connector->dev;
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private;
++ struct psb_intel_output *psb_intel_output =
++ to_psb_intel_output(connector);
++ struct psb_intel_lvds_priv *lvds_priv =
++ (struct psb_intel_lvds_priv *)psb_intel_output->dev_priv;
++
++ if (IS_POULSBO(dev)) {
++ lvds_priv->savePP_ON = REG_READ(LVDSPP_ON);
++ lvds_priv->savePP_OFF = REG_READ(LVDSPP_OFF);
++ lvds_priv->saveLVDS = REG_READ(LVDS);
++ lvds_priv->savePP_CONTROL = REG_READ(PP_CONTROL);
++ lvds_priv->savePP_CYCLE = REG_READ(PP_CYCLE);
++ /*lvds_priv->savePP_DIVISOR = REG_READ(PP_DIVISOR);*/
++ lvds_priv->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
++ lvds_priv->savePFIT_CONTROL = REG_READ(PFIT_CONTROL);
++ lvds_priv->savePFIT_PGM_RATIOS = REG_READ(PFIT_PGM_RATIOS);
++
++ /*TODO: move backlight_duty_cycle to psb_intel_lvds_priv*/
++ dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL &
++ BACKLIGHT_DUTY_CYCLE_MASK);
++
++ /*
++ * If the light is off at server startup,
++ * just make it full brightness
++ */
++ if (dev_priv->backlight_duty_cycle == 0)
++ dev_priv->backlight_duty_cycle =
++ psb_intel_lvds_get_max_backlight(dev);
++
++ DRM_DEBUG("(0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x)\n",
++ lvds_priv->savePP_ON,
++ lvds_priv->savePP_OFF,
++ lvds_priv->saveLVDS,
++ lvds_priv->savePP_CONTROL,
++ lvds_priv->savePP_CYCLE,
++ lvds_priv->saveBLC_PWM_CTL);
++ }
++}
++
++static void psb_intel_lvds_restore(struct drm_connector *connector)
++{
++ struct drm_device *dev = connector->dev;
++ u32 pp_status;
++
++ /*struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private;*/
++ struct psb_intel_output *psb_intel_output =
++ to_psb_intel_output(connector);
++ struct psb_intel_lvds_priv *lvds_priv =
++ (struct psb_intel_lvds_priv *)psb_intel_output->dev_priv;
++
++ if (IS_POULSBO(dev)) {
++ DRM_DEBUG("(0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x)\n",
++ lvds_priv->savePP_ON,
++ lvds_priv->savePP_OFF,
++ lvds_priv->saveLVDS,
++ lvds_priv->savePP_CONTROL,
++ lvds_priv->savePP_CYCLE,
++ lvds_priv->saveBLC_PWM_CTL);
++
++ REG_WRITE(BLC_PWM_CTL, lvds_priv->saveBLC_PWM_CTL);
++ REG_WRITE(PFIT_CONTROL, lvds_priv->savePFIT_CONTROL);
++ REG_WRITE(PFIT_PGM_RATIOS, lvds_priv->savePFIT_PGM_RATIOS);
++ REG_WRITE(LVDSPP_ON, lvds_priv->savePP_ON);
++ REG_WRITE(LVDSPP_OFF, lvds_priv->savePP_OFF);
++ /*REG_WRITE(PP_DIVISOR, lvds_priv->savePP_DIVISOR);*/
++ REG_WRITE(PP_CYCLE, lvds_priv->savePP_CYCLE);
++ REG_WRITE(PP_CONTROL, lvds_priv->savePP_CONTROL);
++ REG_WRITE(LVDS, lvds_priv->saveLVDS);
++
++ if (lvds_priv->savePP_CONTROL & POWER_TARGET_ON) {
++ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) |
++ POWER_TARGET_ON);
++ do {
++ pp_status = REG_READ(PP_STATUS);
++ } while ((pp_status & PP_ON) == 0);
++ } else {
++ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) &
++ ~POWER_TARGET_ON);
++ do {
++ pp_status = REG_READ(PP_STATUS);
++ } while (pp_status & PP_ON);
++ }
++ }
++}
++
++int psb_intel_lvds_mode_valid(struct drm_connector *connector,
++ struct drm_display_mode *mode)
++{
++ struct psb_intel_output *psb_intel_output =
++ to_psb_intel_output(connector);
++ struct drm_display_mode *fixed_mode =
++ psb_intel_output->mode_dev->panel_fixed_mode;
++
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 enter psb_intel_lvds_mode_valid \n");
++#endif /* PRINT_JLIU7 */
++
++ /* just in case */
++ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
++ return MODE_NO_DBLESCAN;
++
++ /* just in case */
++ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
++ return MODE_NO_INTERLACE;
++
++ if (fixed_mode) {
++ if (mode->hdisplay > fixed_mode->hdisplay)
++ return MODE_PANEL;
++ if (mode->vdisplay > fixed_mode->vdisplay)
++ return MODE_PANEL;
++ }
++ return MODE_OK;
++}
++
++bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder,
++ struct drm_display_mode *mode,
++ struct drm_display_mode *adjusted_mode)
++{
++ struct psb_intel_mode_device *mode_dev =
++ enc_to_psb_intel_output(encoder)->mode_dev;
++ struct drm_device *dev = encoder->dev;
++ struct psb_intel_crtc *psb_intel_crtc =
++ to_psb_intel_crtc(encoder->crtc);
++ struct drm_encoder *tmp_encoder;
++
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 enter psb_intel_lvds_mode_fixup \n");
++#endif /* PRINT_JLIU7 */
++
++ /* Should never happen!! */
++ if (IS_MRST(dev) && psb_intel_crtc->pipe != 0) {
++ printk(KERN_ERR
++ "Can't support LVDS/MIPI on pipe B on MRST\n");
++ return false;
++ } else if (!IS_MRST(dev) && !IS_I965G(dev)
++ && psb_intel_crtc->pipe == 0) {
++ printk(KERN_ERR "Can't support LVDS on pipe A\n");
++ return false;
++ }
++ /* Should never happen!! */
++ list_for_each_entry(tmp_encoder, &dev->mode_config.encoder_list,
++ head) {
++ if (tmp_encoder != encoder
++ && tmp_encoder->crtc == encoder->crtc) {
++ printk(KERN_ERR "Can't enable LVDS and another "
++ "encoder on the same pipe\n");
++ return false;
++ }
++ }
++
++ /*
++ * If we have timings from the BIOS for the panel, put them in
++ * to the adjusted mode. The CRTC will be set up for this mode,
++ * with the panel scaling set up to source from the H/VDisplay
++ * of the original mode.
++ */
++ if (mode_dev->panel_fixed_mode != NULL) {
++ adjusted_mode->hdisplay =
++ mode_dev->panel_fixed_mode->hdisplay;
++ adjusted_mode->hsync_start =
++ mode_dev->panel_fixed_mode->hsync_start;
++ adjusted_mode->hsync_end =
++ mode_dev->panel_fixed_mode->hsync_end;
++ adjusted_mode->htotal = mode_dev->panel_fixed_mode->htotal;
++ adjusted_mode->vdisplay =
++ mode_dev->panel_fixed_mode->vdisplay;
++ adjusted_mode->vsync_start =
++ mode_dev->panel_fixed_mode->vsync_start;
++ adjusted_mode->vsync_end =
++ mode_dev->panel_fixed_mode->vsync_end;
++ adjusted_mode->vtotal = mode_dev->panel_fixed_mode->vtotal;
++ adjusted_mode->clock = mode_dev->panel_fixed_mode->clock;
++ drm_mode_set_crtcinfo(adjusted_mode,
++ CRTC_INTERLACE_HALVE_V);
++ }
++
++ /*
++ * XXX: It would be nice to support lower refresh rates on the
++ * panels to reduce power consumption, and perhaps match the
++ * user's requested refresh rate.
++ */
++
++ return true;
++}
++
++static void psb_intel_lvds_prepare(struct drm_encoder *encoder)
++{
++ struct drm_device *dev = encoder->dev;
++ struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
++ struct psb_intel_mode_device *mode_dev = output->mode_dev;
++
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 enter psb_intel_lvds_prepare \n");
++#endif /* PRINT_JLIU7 */
++
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_FORCE_POWER_ON))
++ return;
++
++ mode_dev->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
++ mode_dev->backlight_duty_cycle = (mode_dev->saveBLC_PWM_CTL &
++ BACKLIGHT_DUTY_CYCLE_MASK);
++
++ psb_intel_lvds_set_power(dev, output, false);
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++}
++
++static void psb_intel_lvds_commit(struct drm_encoder *encoder)
++{
++ struct drm_device *dev = encoder->dev;
++ struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
++ struct psb_intel_mode_device *mode_dev = output->mode_dev;
++
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 enter psb_intel_lvds_commit \n");
++#endif /* PRINT_JLIU7 */
++
++ if (mode_dev->backlight_duty_cycle == 0)
++ mode_dev->backlight_duty_cycle =
++ psb_intel_lvds_get_max_backlight(dev);
++
++ psb_intel_lvds_set_power(dev, output, true);
++}
++
++static void psb_intel_lvds_mode_set(struct drm_encoder *encoder,
++ struct drm_display_mode *mode,
++ struct drm_display_mode *adjusted_mode)
++{
++ struct psb_intel_mode_device *mode_dev =
++ enc_to_psb_intel_output(encoder)->mode_dev;
++ struct drm_device *dev = encoder->dev;
++ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(
++ encoder->crtc);
++ u32 pfit_control;
++
++ /*
++ * The LVDS pin pair will already have been turned on in the
++ * psb_intel_crtc_mode_set since it has a large impact on the DPLL
++ * settings.
++ */
++
++ /*
++ * Enable automatic panel scaling so that non-native modes fill the
++ * screen. Should be enabled before the pipe is enabled, according to
++ * register description and PRM.
++ */
++ if (mode->hdisplay != adjusted_mode->hdisplay ||
++ mode->vdisplay != adjusted_mode->vdisplay)
++ pfit_control = (PFIT_ENABLE | VERT_AUTO_SCALE |
++ HORIZ_AUTO_SCALE | VERT_INTERP_BILINEAR |
++ HORIZ_INTERP_BILINEAR);
++ else
++ pfit_control = 0;
++
++ if (!IS_I965G(dev)) {
++ if (mode_dev->panel_wants_dither)
++ pfit_control |= PANEL_8TO6_DITHER_ENABLE;
++ } else
++ pfit_control |= psb_intel_crtc->pipe << PFIT_PIPE_SHIFT;
++
++ REG_WRITE(PFIT_CONTROL, pfit_control);
++}
++
++/**
++ * Detect the LVDS connection.
++ *
++ * This always returns CONNECTOR_STATUS_CONNECTED.
++ * This connector should only have
++ * been set up if the LVDS was actually connected anyway.
++ */
++static enum drm_connector_status psb_intel_lvds_detect(struct drm_connector
++ *connector)
++{
++ return connector_status_connected;
++}
++
++/**
++ * Return the list of DDC modes if available, or the BIOS fixed mode otherwise.
++ */
++static int psb_intel_lvds_get_modes(struct drm_connector *connector)
++{
++ struct drm_device *dev = connector->dev;
++ struct psb_intel_output *psb_intel_output =
++ to_psb_intel_output(connector);
++ struct psb_intel_mode_device *mode_dev =
++ psb_intel_output->mode_dev;
++ int ret = 0;
++
++ if (!IS_MRST(dev))
++ ret = psb_intel_ddc_get_modes(psb_intel_output);
++
++ if (ret)
++ return ret;
++
++ /* Didn't get an EDID, so
++ * Set wide sync ranges so we get all modes
++ * handed to valid_mode for checking
++ */
++ connector->display_info.min_vfreq = 0;
++ connector->display_info.max_vfreq = 200;
++ connector->display_info.min_hfreq = 0;
++ connector->display_info.max_hfreq = 200;
++
++ if (mode_dev->panel_fixed_mode != NULL) {
++ struct drm_display_mode *mode =
++ drm_mode_duplicate(dev, mode_dev->panel_fixed_mode);
++ drm_mode_probed_add(connector, mode);
++ return 1;
++ }
++
++ return 0;
++}
++
++/**
++ * psb_intel_lvds_destroy - unregister and free LVDS structures
++ * @connector: connector to free
++ *
++ * Unregister the DDC bus for this connector then free the driver private
++ * structure.
++ */
++void psb_intel_lvds_destroy(struct drm_connector *connector)
++{
++ struct psb_intel_output *psb_intel_output =
++ to_psb_intel_output(connector);
++
++ if (psb_intel_output->ddc_bus)
++ psb_intel_i2c_destroy(psb_intel_output->ddc_bus);
++ drm_sysfs_connector_remove(connector);
++ drm_connector_cleanup(connector);
++ kfree(connector);
++}
++
++int psb_intel_lvds_set_property(struct drm_connector *connector,
++ struct drm_property *property,
++ uint64_t value)
++{
++ struct drm_encoder *pEncoder = connector->encoder;
++
++ if (!strcmp(property->name, "scaling mode") && pEncoder) {
++ struct psb_intel_crtc *pPsbCrtc =
++ to_psb_intel_crtc(pEncoder->crtc);
++ uint64_t curValue;
++
++ if (!pPsbCrtc)
++ goto set_prop_error;
++
++ switch (value) {
++ case DRM_MODE_SCALE_FULLSCREEN:
++ break;
++ case DRM_MODE_SCALE_CENTER:
++ break;
++ case DRM_MODE_SCALE_ASPECT:
++ break;
++ default:
++ goto set_prop_error;
++ }
++
++ if (drm_connector_property_get_value(connector,
++ property,
++ &curValue))
++ goto set_prop_error;
++
++ if (curValue == value)
++ goto set_prop_done;
++
++ if (drm_connector_property_set_value(connector,
++ property,
++ value))
++ goto set_prop_error;
++
++ if (pPsbCrtc->saved_mode.hdisplay != 0 &&
++ pPsbCrtc->saved_mode.vdisplay != 0) {
++ if (!drm_crtc_helper_set_mode(pEncoder->crtc,
++ &pPsbCrtc->saved_mode,
++ pEncoder->crtc->x,
++ pEncoder->crtc->y,
++ pEncoder->crtc->fb))
++ goto set_prop_error;
++ }
++ } else if (!strcmp(property->name, "backlight") && pEncoder) {
++ if (drm_connector_property_set_value(connector,
++ property,
++ value))
++ goto set_prop_error;
++ else {
++#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
++ struct backlight_device bd;
++ bd.props.brightness = value;
++ psb_set_brightness(&bd);
++#endif
++ }
++ }
++
++set_prop_done:
++ return 0;
++set_prop_error:
++ return -1;
++}
++
++static const struct drm_encoder_helper_funcs psb_intel_lvds_helper_funcs = {
++ .dpms = psb_intel_lvds_encoder_dpms,
++ .mode_fixup = psb_intel_lvds_mode_fixup,
++ .prepare = psb_intel_lvds_prepare,
++ .mode_set = psb_intel_lvds_mode_set,
++ .commit = psb_intel_lvds_commit,
++};
++
++static const struct drm_connector_helper_funcs
++ psb_intel_lvds_connector_helper_funcs = {
++ .get_modes = psb_intel_lvds_get_modes,
++ .mode_valid = psb_intel_lvds_mode_valid,
++ .best_encoder = psb_intel_best_encoder,
++};
++
++static const struct drm_connector_funcs psb_intel_lvds_connector_funcs = {
++ .dpms = drm_helper_connector_dpms,
++ .save = psb_intel_lvds_save,
++ .restore = psb_intel_lvds_restore,
++ .detect = psb_intel_lvds_detect,
++ .fill_modes = drm_helper_probe_single_connector_modes,
++ .set_property = psb_intel_lvds_set_property,
++ .destroy = psb_intel_lvds_destroy,
++};
++
++
++static void psb_intel_lvds_enc_destroy(struct drm_encoder *encoder)
++{
++ drm_encoder_cleanup(encoder);
++}
++
++const struct drm_encoder_funcs psb_intel_lvds_enc_funcs = {
++ .destroy = psb_intel_lvds_enc_destroy,
++};
++
++
++
++/**
++ * psb_intel_lvds_init - setup LVDS connectors on this device
++ * @dev: drm device
++ *
++ * Create the connector, register the LVDS DDC bus, and try to figure out what
++ * modes we can display on the LVDS panel (if present).
++ */
++void psb_intel_lvds_init(struct drm_device *dev,
++ struct psb_intel_mode_device *mode_dev)
++{
++ struct psb_intel_output *psb_intel_output;
++ struct psb_intel_lvds_priv *lvds_priv;
++ struct drm_connector *connector;
++ struct drm_encoder *encoder;
++ struct drm_display_mode *scan; /* *modes, *bios_mode; */
++ struct drm_crtc *crtc;
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private;
++ u32 lvds;
++ int pipe;
++
++ psb_intel_output = kzalloc(sizeof(struct psb_intel_output), GFP_KERNEL);
++ if (!psb_intel_output)
++ return;
++
++ lvds_priv = kzalloc(sizeof(struct psb_intel_lvds_priv), GFP_KERNEL);
++ if (!lvds_priv) {
++ kfree(psb_intel_output);
++ DRM_DEBUG("LVDS private allocation error\n");
++ return;
++ }
++
++ psb_intel_output->dev_priv = lvds_priv;
++
++ psb_intel_output->mode_dev = mode_dev;
++ connector = &psb_intel_output->base;
++ encoder = &psb_intel_output->enc;
++ drm_connector_init(dev, &psb_intel_output->base,
++ &psb_intel_lvds_connector_funcs,
++ DRM_MODE_CONNECTOR_LVDS);
++
++ drm_encoder_init(dev, &psb_intel_output->enc,
++ &psb_intel_lvds_enc_funcs,
++ DRM_MODE_ENCODER_LVDS);
++
++ drm_mode_connector_attach_encoder(&psb_intel_output->base,
++ &psb_intel_output->enc);
++ psb_intel_output->type = INTEL_OUTPUT_LVDS;
++
++ drm_encoder_helper_add(encoder, &psb_intel_lvds_helper_funcs);
++ drm_connector_helper_add(connector,
++ &psb_intel_lvds_connector_helper_funcs);
++ connector->display_info.subpixel_order = SubPixelHorizontalRGB;
++ connector->interlace_allowed = false;
++ connector->doublescan_allowed = false;
++
++ /*Attach connector properties*/
++ drm_connector_attach_property(connector,
++ dev->mode_config.scaling_mode_property,
++ DRM_MODE_SCALE_FULLSCREEN);
++ drm_connector_attach_property(connector,
++ dev_priv->backlight_property,
++ BRIGHTNESS_MAX_LEVEL);
++
++ /**
++ * Set up I2C bus
++ * FIXME: distroy i2c_bus when exit
++ */
++ psb_intel_output->i2c_bus = psb_intel_i2c_create(dev,
++ GPIOB,
++ "LVDSBLC_B");
++ if (!psb_intel_output->i2c_bus) {
++ dev_printk(KERN_ERR,
++ &dev->pdev->dev, "I2C bus registration failed.\n");
++ goto failed_blc_i2c;
++ }
++ psb_intel_output->i2c_bus->slave_addr = 0x2C;
++ dev_priv->lvds_i2c_bus = psb_intel_output->i2c_bus;
++
++ /*
++ * LVDS discovery:
++ * 1) check for EDID on DDC
++ * 2) check for VBT data
++ * 3) check to see if LVDS is already on
++ * if none of the above, no panel
++ * 4) make sure lid is open
++ * if closed, act like it's not there for now
++ */
++
++ /* Set up the DDC bus. */
++ psb_intel_output->ddc_bus = psb_intel_i2c_create(dev,
++ GPIOC,
++ "LVDSDDC_C");
++ if (!psb_intel_output->ddc_bus) {
++ dev_printk(KERN_ERR, &dev->pdev->dev,
++ "DDC bus registration " "failed.\n");
++ goto failed_ddc;
++ }
++
++ /*
++ * Attempt to get the fixed panel mode from DDC. Assume that the
++ * preferred mode is the right one.
++ */
++ psb_intel_ddc_get_modes(psb_intel_output);
++ list_for_each_entry(scan, &connector->probed_modes, head) {
++ if (scan->type & DRM_MODE_TYPE_PREFERRED) {
++ mode_dev->panel_fixed_mode =
++ drm_mode_duplicate(dev, scan);
++ goto out; /* FIXME: check for quirks */
++ }
++ }
++
++ /* Failed to get EDID, what about VBT? do we need this?*/
++ if (mode_dev->vbt_mode)
++ mode_dev->panel_fixed_mode =
++ drm_mode_duplicate(dev, mode_dev->vbt_mode);
++
++ if (!mode_dev->panel_fixed_mode)
++ if (dev_priv->lfp_lvds_vbt_mode)
++ mode_dev->panel_fixed_mode =
++ drm_mode_duplicate(dev,
++ dev_priv->lfp_lvds_vbt_mode);
++
++ /*
++ * If we didn't get EDID, try checking if the panel is already turned
++ * on. If so, assume that whatever is currently programmed is the
++ * correct mode.
++ */
++ lvds = REG_READ(LVDS);
++ pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0;
++ crtc = psb_intel_get_crtc_from_pipe(dev, pipe);
++
++ if (crtc && (lvds & LVDS_PORT_EN)) {
++ mode_dev->panel_fixed_mode =
++ psb_intel_crtc_mode_get(dev, crtc);
++ if (mode_dev->panel_fixed_mode) {
++ mode_dev->panel_fixed_mode->type |=
++ DRM_MODE_TYPE_PREFERRED;
++ goto out; /* FIXME: check for quirks */
++ }
++ }
++
++ /* If we still don't have a mode after all that, give up. */
++ if (!mode_dev->panel_fixed_mode) {
++ DRM_DEBUG
++ ("Found no modes on the lvds, ignoring the LVDS\n");
++ goto failed_find;
++ }
++
++ /* FIXME: detect aopen & mac mini type stuff automatically? */
++ /*
++ * Blacklist machines with BIOSes that list an LVDS panel without
++ * actually having one.
++ */
++ if (IS_I945GM(dev)) {
++ /* aopen mini pc */
++ if (dev->pdev->subsystem_vendor == 0xa0a0) {
++ DRM_DEBUG
++ ("Suspected AOpen Mini PC, ignoring the LVDS\n");
++ goto failed_find;
++ }
++
++ if ((dev->pdev->subsystem_vendor == 0x8086) &&
++ (dev->pdev->subsystem_device == 0x7270)) {
++ /* It's a Mac Mini or Macbook Pro. */
++
++ if (mode_dev->panel_fixed_mode != NULL &&
++ mode_dev->panel_fixed_mode->hdisplay == 800 &&
++ mode_dev->panel_fixed_mode->vdisplay == 600) {
++ DRM_DEBUG
++ ("Suspected Mac Mini, ignoring the LVDS\n");
++ goto failed_find;
++ }
++ }
++ }
++
++out:
++ drm_sysfs_connector_add(connector);
++
++#if PRINT_JLIU7
++ DRM_INFO("PRINT_JLIU7 hdisplay = %d\n",
++ mode_dev->panel_fixed_mode->hdisplay);
++ DRM_INFO("PRINT_JLIU7 vdisplay = %d\n",
++ mode_dev->panel_fixed_mode->vdisplay);
++ DRM_INFO("PRINT_JLIU7 hsync_start = %d\n",
++ mode_dev->panel_fixed_mode->hsync_start);
++ DRM_INFO("PRINT_JLIU7 hsync_end = %d\n",
++ mode_dev->panel_fixed_mode->hsync_end);
++ DRM_INFO("PRINT_JLIU7 htotal = %d\n",
++ mode_dev->panel_fixed_mode->htotal);
++ DRM_INFO("PRINT_JLIU7 vsync_start = %d\n",
++ mode_dev->panel_fixed_mode->vsync_start);
++ DRM_INFO("PRINT_JLIU7 vsync_end = %d\n",
++ mode_dev->panel_fixed_mode->vsync_end);
++ DRM_INFO("PRINT_JLIU7 vtotal = %d\n",
++ mode_dev->panel_fixed_mode->vtotal);
++ DRM_INFO("PRINT_JLIU7 clock = %d\n",
++ mode_dev->panel_fixed_mode->clock);
++#endif /* PRINT_JLIU7 */
++ return;
++
++failed_find:
++ if (psb_intel_output->ddc_bus)
++ psb_intel_i2c_destroy(psb_intel_output->ddc_bus);
++failed_ddc:
++ if (psb_intel_output->i2c_bus)
++ psb_intel_i2c_destroy(psb_intel_output->i2c_bus);
++failed_blc_i2c:
++ drm_encoder_cleanup(encoder);
++ drm_connector_cleanup(connector);
++ kfree(connector);
++}
++
++/* MRST platform start */
++
++/*
++ * FIXME need to move to register define head file
++ */
++#define MRST_BACKLIGHT_MODULATION_FREQ_SHIFT (16)
++#define MRST_BACKLIGHT_MODULATION_FREQ_MASK (0xffff << 16)
++
++/* The max/min PWM frequency in BPCR[31:17] - */
++/* The smallest number is 1 (not 0) that can fit in the
++ * 15-bit field of the and then*/
++/* shifts to the left by one bit to get the actual 16-bit
++ * value that the 15-bits correspond to.*/
++#define MRST_BLC_MAX_PWM_REG_FREQ 0xFFFF
++
++#define BRIGHTNESS_MAX_LEVEL 100
++#define BLC_PWM_PRECISION_FACTOR 10 /* 10000000 */
++#define BLC_PWM_FREQ_CALC_CONSTANT 32
++#define MHz 1000000
++#define BLC_POLARITY_NORMAL 0
++#define BLC_POLARITY_INVERSE 1
++
++/**
++ * Calculate PWM control register value.
++ */
++#if 0
++static bool mrstLVDSCalculatePWMCtrlRegFreq(struct drm_device *dev)
++{
++ unsigned long value = 0;
++ if (blc_freq == 0) {
++ /* DRM_ERROR(KERN_ERR "mrstLVDSCalculatePWMCtrlRegFreq:
++ * Frequency Requested is 0.\n"); */
++ return false;
++ }
++
++ value = (CoreClock * MHz);
++ value = (value / BLC_PWM_FREQ_CALC_CONSTANT);
++ value = (value * BLC_PWM_PRECISION_FACTOR);
++ value = (value / blc_freq);
++ value = (value / BLC_PWM_PRECISION_FACTOR);
++
++ if (value > (unsigned long) MRST_BLC_MAX_PWM_REG_FREQ) {
++ return 0;
++ } else {
++ PWMControlRegFreq = (u32) value;
++ return 1;
++ }
++}
++#endif
++/**
++ * Sets the power state for the panel.
++ */
++static void mrst_lvds_set_power(struct drm_device *dev,
++ struct psb_intel_output *output, bool on)
++{
++ u32 pp_status;
++
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 enter mrst_lvds_set_power \n");
++#endif /* PRINT_JLIU7 */
++
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_FORCE_POWER_ON))
++ return;
++
++ if (on) {
++ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) |
++ POWER_TARGET_ON);
++ do {
++ pp_status = REG_READ(PP_STATUS);
++ } while ((pp_status & (PP_ON | PP_READY)) == PP_READY);
++ } else {
++ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) &
++ ~POWER_TARGET_ON);
++ do {
++ pp_status = REG_READ(PP_STATUS);
++ } while (pp_status & PP_ON);
++ }
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++}
++
++static void mrst_lvds_dpms(struct drm_encoder *encoder, int mode)
++{
++ struct drm_device *dev = encoder->dev;
++ struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
++
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 enter mrst_lvds_dpms \n");
++#endif /* PRINT_JLIU7 */
++
++ if (mode == DRM_MODE_DPMS_ON)
++ mrst_lvds_set_power(dev, output, true);
++ else
++ mrst_lvds_set_power(dev, output, false);
++
++ /* XXX: We never power down the LVDS pairs. */
++}
++
++static void mrst_lvds_mode_set(struct drm_encoder *encoder,
++ struct drm_display_mode *mode,
++ struct drm_display_mode *adjusted_mode)
++{
++ struct psb_intel_mode_device *mode_dev =
++ enc_to_psb_intel_output(encoder)->mode_dev;
++ struct drm_device *dev = encoder->dev;
++ u32 lvds_port;
++ uint64_t curValue = DRM_MODE_SCALE_FULLSCREEN;
++
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 enter mrst_lvds_mode_set \n");
++#endif /* PRINT_JLIU7 */
++
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_FORCE_POWER_ON))
++ return;
++
++ /*
++ * The LVDS pin pair will already have been turned on in the
++ * psb_intel_crtc_mode_set since it has a large impact on the DPLL
++ * settings.
++ */
++ /*FIXME JLIU7 Get panel power delay parameters from config data */
++ REG_WRITE(0x61208, 0x25807d0);
++ REG_WRITE(0x6120c, 0x1f407d0);
++ REG_WRITE(0x61210, 0x270f04);
++
++ lvds_port = (REG_READ(LVDS) &
++ (~LVDS_PIPEB_SELECT)) |
++ LVDS_PORT_EN |
++ LVDS_BORDER_EN;
++
++ if (mode_dev->panel_wants_dither)
++ lvds_port |= MRST_PANEL_8TO6_DITHER_ENABLE;
++
++ REG_WRITE(LVDS, lvds_port);
++
++ drm_connector_property_get_value(
++ &enc_to_psb_intel_output(encoder)->base,
++ dev->mode_config.scaling_mode_property,
++ &curValue);
++
++ if (curValue == DRM_MODE_SCALE_CENTER)
++ REG_WRITE(PFIT_CONTROL, 0);
++ else if (curValue == DRM_MODE_SCALE_ASPECT) {
++ if ((mode->vdisplay != adjusted_mode->crtc_vdisplay) ||
++ (mode->hdisplay != adjusted_mode->crtc_hdisplay)) {
++ if ((adjusted_mode->crtc_hdisplay * mode->vdisplay) ==
++ (mode->hdisplay * adjusted_mode->crtc_vdisplay))
++ REG_WRITE(PFIT_CONTROL, PFIT_ENABLE);
++ else if ((adjusted_mode->crtc_hdisplay *
++ mode->vdisplay) > (mode->hdisplay *
++ adjusted_mode->crtc_vdisplay))
++ REG_WRITE(PFIT_CONTROL, PFIT_ENABLE |
++ PFIT_SCALING_MODE_PILLARBOX);
++ else
++ REG_WRITE(PFIT_CONTROL, PFIT_ENABLE |
++ PFIT_SCALING_MODE_LETTERBOX);
++ } else
++ REG_WRITE(PFIT_CONTROL, PFIT_ENABLE);
++ } else /*(curValue == DRM_MODE_SCALE_FULLSCREEN)*/
++ REG_WRITE(PFIT_CONTROL, PFIT_ENABLE);
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++}
++
++
++static const struct drm_encoder_helper_funcs mrst_lvds_helper_funcs = {
++ .dpms = mrst_lvds_dpms,
++ .mode_fixup = psb_intel_lvds_mode_fixup,
++ .prepare = psb_intel_lvds_prepare,
++ .mode_set = mrst_lvds_mode_set,
++ .commit = psb_intel_lvds_commit,
++};
++
++/** Returns the panel fixed mode from configuration. */
++/** FIXME JLIU7 need to revist it. */
++struct drm_display_mode *mrst_lvds_get_configuration_mode(struct drm_device
++ *dev)
++{
++ struct drm_display_mode *mode;
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++ struct mrst_timing_info *ti = &dev_priv->gct_data.DTD;
++
++ mode = kzalloc(sizeof(*mode), GFP_KERNEL);
++ if (!mode)
++ return NULL;
++
++ if (dev_priv->vbt_data.Size != 0x00) { /*if non-zero, then use vbt*/
++
++ mode->hdisplay = (ti->hactive_hi << 8) | ti->hactive_lo;
++ mode->vdisplay = (ti->vactive_hi << 8) | ti->vactive_lo;
++ mode->hsync_start = mode->hdisplay + \
++ ((ti->hsync_offset_hi << 8) | \
++ ti->hsync_offset_lo);
++ mode->hsync_end = mode->hsync_start + \
++ ((ti->hsync_pulse_width_hi << 8) | \
++ ti->hsync_pulse_width_lo);
++ mode->htotal = mode->hdisplay + ((ti->hblank_hi << 8) | \
++ ti->hblank_lo);
++ mode->vsync_start = \
++ mode->vdisplay + ((ti->vsync_offset_hi << 4) | \
++ ti->vsync_offset_lo);
++ mode->vsync_end = \
++ mode->vsync_start + ((ti->vsync_pulse_width_hi << 4) | \
++ ti->vsync_pulse_width_lo);
++ mode->vtotal = mode->vdisplay + \
++ ((ti->vblank_hi << 8) | ti->vblank_lo);
++ mode->clock = ti->pixel_clock * 10;
++#if 0
++ printk(KERN_INFO "hdisplay is %d\n", mode->hdisplay);
++ printk(KERN_INFO "vdisplay is %d\n", mode->vdisplay);
++ printk(KERN_INFO "HSS is %d\n", mode->hsync_start);
++ printk(KERN_INFO "HSE is %d\n", mode->hsync_end);
++ printk(KERN_INFO "htotal is %d\n", mode->htotal);
++ printk(KERN_INFO "VSS is %d\n", mode->vsync_start);
++ printk(KERN_INFO "VSE is %d\n", mode->vsync_end);
++ printk(KERN_INFO "vtotal is %d\n", mode->vtotal);
++ printk(KERN_INFO "clock is %d\n", mode->clock);
++#endif
++ } else {
++
++#if 0 /*FIXME jliu7 remove it later */
++ /* hard coded fixed mode for TPO LTPS LPJ040K001A */
++ mode->hdisplay = 800;
++ mode->vdisplay = 480;
++ mode->hsync_start = 836;
++ mode->hsync_end = 846;
++ mode->htotal = 1056;
++ mode->vsync_start = 489;
++ mode->vsync_end = 491;
++ mode->vtotal = 525;
++ mode->clock = 33264;
++#endif /*FIXME jliu7 remove it later */
++
++#if 0 /*FIXME jliu7 remove it later */
++ /* hard coded fixed mode for LVDS 800x480 */
++ mode->hdisplay = 800;
++ mode->vdisplay = 480;
++ mode->hsync_start = 801;
++ mode->hsync_end = 802;
++ mode->htotal = 1024;
++ mode->vsync_start = 481;
++ mode->vsync_end = 482;
++ mode->vtotal = 525;
++ mode->clock = 30994;
++#endif /*FIXME jliu7 remove it later */
++
++#if 1 /*FIXME jliu7 remove it later, jliu7 modify it according to the spec*/
++ /* hard coded fixed mode for Samsung 480wsvga LVDS 1024x600@75 */
++ mode->hdisplay = 1024;
++ mode->vdisplay = 600;
++ mode->hsync_start = 1072;
++ mode->hsync_end = 1104;
++ mode->htotal = 1184;
++ mode->vsync_start = 603;
++ mode->vsync_end = 604;
++ mode->vtotal = 608;
++ mode->clock = 53990;
++#endif /*FIXME jliu7 remove it later */
++
++#if 0 /*FIXME jliu7 remove it, it is copied from SBIOS */
++ /* hard coded fixed mode for Samsung 480wsvga LVDS 1024x600@75 */
++ mode->hdisplay = 1024;
++ mode->vdisplay = 600;
++ mode->hsync_start = 1104;
++ mode->hsync_end = 1136;
++ mode->htotal = 1184;
++ mode->vsync_start = 603;
++ mode->vsync_end = 604;
++ mode->vtotal = 608;
++ mode->clock = 53990;
++#endif /*FIXME jliu7 remove it later */
++
++#if 0 /*FIXME jliu7 remove it later */
++ /* hard coded fixed mode for Sharp wsvga LVDS 1024x600 */
++ mode->hdisplay = 1024;
++ mode->vdisplay = 600;
++ mode->hsync_start = 1124;
++ mode->hsync_end = 1204;
++ mode->htotal = 1312;
++ mode->vsync_start = 607;
++ mode->vsync_end = 610;
++ mode->vtotal = 621;
++ mode->clock = 48885;
++#endif /*FIXME jliu7 remove it later */
++
++#if 0 /*FIXME jliu7 remove it later */
++ /* hard coded fixed mode for LVDS 1024x768 */
++ mode->hdisplay = 1024;
++ mode->vdisplay = 768;
++ mode->hsync_start = 1048;
++ mode->hsync_end = 1184;
++ mode->htotal = 1344;
++ mode->vsync_start = 771;
++ mode->vsync_end = 777;
++ mode->vtotal = 806;
++ mode->clock = 65000;
++#endif /*FIXME jliu7 remove it later */
++
++#if 0 /*FIXME jliu7 remove it later */
++ /* hard coded fixed mode for LVDS 1366x768 */
++ mode->hdisplay = 1366;
++ mode->vdisplay = 768;
++ mode->hsync_start = 1430;
++ mode->hsync_end = 1558;
++ mode->htotal = 1664;
++ mode->vsync_start = 769;
++ mode->vsync_end = 770;
++ mode->vtotal = 776;
++ mode->clock = 77500;
++#endif /*FIXME jliu7 remove it later */
++ }
++ drm_mode_set_name(mode);
++ drm_mode_set_crtcinfo(mode, 0);
++
++ return mode;
++}
++
++/**
++ * mrst_lvds_init - setup LVDS connectors on this device
++ * @dev: drm device
++ *
++ * Create the connector, register the LVDS DDC bus, and try to figure out what
++ * modes we can display on the LVDS panel (if present).
++ */
++void mrst_lvds_init(struct drm_device *dev,
++ struct psb_intel_mode_device *mode_dev)
++{
++ struct psb_intel_output *psb_intel_output;
++ struct drm_connector *connector;
++ struct drm_encoder *encoder;
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++ struct edid *edid;
++ int ret = 0;
++ struct i2c_adapter *i2c_adap;
++ struct drm_display_mode *scan; /* *modes, *bios_mode; */
++
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 enter mrst_lvds_init \n");
++#endif /* PRINT_JLIU7 */
++
++ psb_intel_output = kzalloc(sizeof(struct psb_intel_output), GFP_KERNEL);
++ if (!psb_intel_output)
++ return;
++
++ psb_intel_output->mode_dev = mode_dev;
++ connector = &psb_intel_output->base;
++ encoder = &psb_intel_output->enc;
++ drm_connector_init(dev, &psb_intel_output->base,
++ &psb_intel_lvds_connector_funcs,
++ DRM_MODE_CONNECTOR_LVDS);
++
++ drm_encoder_init(dev, &psb_intel_output->enc, &psb_intel_lvds_enc_funcs,
++ DRM_MODE_ENCODER_LVDS);
++
++ drm_mode_connector_attach_encoder(&psb_intel_output->base,
++ &psb_intel_output->enc);
++ psb_intel_output->type = INTEL_OUTPUT_LVDS;
++
++ drm_encoder_helper_add(encoder, &mrst_lvds_helper_funcs);
++ drm_connector_helper_add(connector,
++ &psb_intel_lvds_connector_helper_funcs);
++ connector->display_info.subpixel_order = SubPixelHorizontalRGB;
++ connector->interlace_allowed = false;
++ connector->doublescan_allowed = false;
++
++ drm_connector_attach_property(connector,
++ dev->mode_config.scaling_mode_property,
++ DRM_MODE_SCALE_FULLSCREEN);
++ drm_connector_attach_property(connector,
++ dev_priv->backlight_property,
++ BRIGHTNESS_MAX_LEVEL);
++
++ lvds_backlight = BRIGHTNESS_MAX_LEVEL;
++
++ mode_dev->panel_wants_dither = false;
++ if (dev_priv->vbt_data.Size != 0x00)
++ mode_dev->panel_wants_dither = (dev_priv->gct_data.Panel_Port_Control & MRST_PANEL_8TO6_DITHER_ENABLE);
++
++ /*
++ * LVDS discovery:
++ * 1) check for EDID on DDC
++ * 2) check for VBT data
++ * 3) check to see if LVDS is already on
++ * if none of the above, no panel
++ * 4) make sure lid is open
++ * if closed, act like it's not there for now
++ */
++ i2c_adap = i2c_get_adapter(2);
++ if (i2c_adap == NULL)
++ printk(KERN_ALERT "No ddc adapter available!\n");
++ /* Set up the DDC bus. */
++/* psb_intel_output->ddc_bus = psb_intel_i2c_create(dev,
++ GPIOC,
++ "LVDSDDC_C");
++ if (!psb_intel_output->ddc_bus) {
++ dev_printk(KERN_ERR, &dev->pdev->dev,
++ "DDC bus registration " "failed.\n");
++ goto failed_ddc;
++ }*/
++
++ /*
++ * Attempt to get the fixed panel mode from DDC. Assume that the
++ * preferred mode is the right one.
++ */
++ edid = drm_get_edid(connector, i2c_adap);
++ if (edid) {
++ drm_mode_connector_update_edid_property(connector, edid);
++ ret = drm_add_edid_modes(connector, edid);
++ kfree(edid);
++ }
++
++ list_for_each_entry(scan, &connector->probed_modes, head) {
++ if (scan->type & DRM_MODE_TYPE_PREFERRED) {
++ mode_dev->panel_fixed_mode =
++ drm_mode_duplicate(dev, scan);
++ goto out; /* FIXME: check for quirks */
++ }
++ }
++
++ /*
++ * If we didn't get EDID, try geting panel timing
++ * from configuration data
++ */
++ mode_dev->panel_fixed_mode = mrst_lvds_get_configuration_mode(dev);
++
++ if (mode_dev->panel_fixed_mode) {
++ mode_dev->panel_fixed_mode->type |=
++ DRM_MODE_TYPE_PREFERRED;
++ goto out; /* FIXME: check for quirks */
++ }
++
++ /* If we still don't have a mode after all that, give up. */
++ if (!mode_dev->panel_fixed_mode) {
++ DRM_DEBUG
++ ("Found no modes on the lvds, ignoring the LVDS\n");
++ goto failed_find;
++ }
++
++out:
++ drm_sysfs_connector_add(connector);
++ return;
++
++failed_find:
++ DRM_DEBUG("No LVDS modes found, disabling.\n");
++ if (psb_intel_output->ddc_bus)
++ psb_intel_i2c_destroy(psb_intel_output->ddc_bus);
++
++/* failed_ddc: */
++
++ drm_encoder_cleanup(encoder);
++ drm_connector_cleanup(connector);
++ kfree(connector);
++}
++
++/* MRST platform end */
+diff --git a/drivers/gpu/drm/mrst/drv/psb_intel_modes.c b/drivers/gpu/drm/mrst/drv/psb_intel_modes.c
+new file mode 100644
+index 0000000..e248aed
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/psb_intel_modes.c
+@@ -0,0 +1,77 @@
++/*
++ * Copyright (c) 2007 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authers: Jesse Barnes <jesse.barnes@intel.com>
++ */
++
++#include <linux/i2c.h>
++#include <linux/fb.h>
++#include <drm/drmP.h>
++#include "psb_intel_drv.h"
++
++/**
++ * psb_intel_ddc_probe
++ *
++ */
++bool psb_intel_ddc_probe(struct psb_intel_output *psb_intel_output)
++{
++ u8 out_buf[] = { 0x0, 0x0 };
++ u8 buf[2];
++ int ret;
++ struct i2c_msg msgs[] = {
++ {
++ .addr = 0x50,
++ .flags = 0,
++ .len = 1,
++ .buf = out_buf,
++ },
++ {
++ .addr = 0x50,
++ .flags = I2C_M_RD,
++ .len = 1,
++ .buf = buf,
++ }
++ };
++
++ ret = i2c_transfer(&psb_intel_output->ddc_bus->adapter, msgs, 2);
++ if (ret == 2)
++ return true;
++
++ return false;
++}
++
++/**
++ * psb_intel_ddc_get_modes - get modelist from monitor
++ * @connector: DRM connector device to use
++ *
++ * Fetch the EDID information from @connector using the DDC bus.
++ */
++int psb_intel_ddc_get_modes(struct psb_intel_output *psb_intel_output)
++{
++ struct edid *edid;
++ int ret = 0;
++
++ edid =
++ drm_get_edid(&psb_intel_output->base,
++ &psb_intel_output->ddc_bus->adapter);
++ if (edid) {
++ drm_mode_connector_update_edid_property(&psb_intel_output->
++ base, edid);
++ ret = drm_add_edid_modes(&psb_intel_output->base, edid);
++ kfree(edid);
++ }
++ return ret;
++}
+diff --git a/drivers/gpu/drm/mrst/drv/psb_intel_reg.h b/drivers/gpu/drm/mrst/drv/psb_intel_reg.h
+new file mode 100644
+index 0000000..d6b8921
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/psb_intel_reg.h
+@@ -0,0 +1,1099 @@
++/*
++ * Copyright (c) 2009, Intel Corporation.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++
++#define BLC_PWM_CTL 0x61254
++#define BLC_PWM_CTL2 0x61250
++#define BACKLIGHT_MODULATION_FREQ_SHIFT (17)
++/**
++ * This is the most significant 15 bits of the number of backlight cycles in a
++ * complete cycle of the modulated backlight control.
++ *
++ * The actual value is this field multiplied by two.
++ */
++#define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17)
++#define BLM_LEGACY_MODE (1 << 16)
++/**
++ * This is the number of cycles out of the backlight modulation cycle for which
++ * the backlight is on.
++ *
++ * This field must be no greater than the number of cycles in the complete
++ * backlight modulation cycle.
++ */
++#define BACKLIGHT_DUTY_CYCLE_SHIFT (0)
++#define BACKLIGHT_DUTY_CYCLE_MASK (0xffff)
++
++#define I915_GCFGC 0xf0
++#define I915_LOW_FREQUENCY_ENABLE (1 << 7)
++#define I915_DISPLAY_CLOCK_190_200_MHZ (0 << 4)
++#define I915_DISPLAY_CLOCK_333_MHZ (4 << 4)
++#define I915_DISPLAY_CLOCK_MASK (7 << 4)
++
++#define I855_HPLLCC 0xc0
++#define I855_CLOCK_CONTROL_MASK (3 << 0)
++#define I855_CLOCK_133_200 (0 << 0)
++#define I855_CLOCK_100_200 (1 << 0)
++#define I855_CLOCK_100_133 (2 << 0)
++#define I855_CLOCK_166_250 (3 << 0)
++
++/* I830 CRTC registers */
++#define HTOTAL_A 0x60000
++#define HBLANK_A 0x60004
++#define HSYNC_A 0x60008
++#define VTOTAL_A 0x6000c
++#define VBLANK_A 0x60010
++#define VSYNC_A 0x60014
++#define PIPEASRC 0x6001c
++#define BCLRPAT_A 0x60020
++#define VSYNCSHIFT_A 0x60028
++
++#define HTOTAL_B 0x61000
++#define HBLANK_B 0x61004
++#define HSYNC_B 0x61008
++#define VTOTAL_B 0x6100c
++#define VBLANK_B 0x61010
++#define VSYNC_B 0x61014
++#define PIPEBSRC 0x6101c
++#define BCLRPAT_B 0x61020
++#define VSYNCSHIFT_B 0x61028
++
++#define PP_STATUS 0x61200
++# define PP_ON (1 << 31)
++/**
++ * Indicates that all dependencies of the panel are on:
++ *
++ * - PLL enabled
++ * - pipe enabled
++ * - LVDS/DVOB/DVOC on
++ */
++# define PP_READY (1 << 30)
++# define PP_SEQUENCE_NONE (0 << 28)
++# define PP_SEQUENCE_ON (1 << 28)
++# define PP_SEQUENCE_OFF (2 << 28)
++# define PP_SEQUENCE_MASK 0x30000000
++#define PP_CONTROL 0x61204
++# define POWER_TARGET_ON (1 << 0)
++
++#define LVDSPP_ON 0x61208
++#define LVDSPP_OFF 0x6120c
++#define PP_CYCLE 0x61210
++
++#define PFIT_CONTROL 0x61230
++# define PFIT_ENABLE (1 << 31)
++# define PFIT_PIPE_MASK (3 << 29)
++# define PFIT_PIPE_SHIFT 29
++# define PFIT_SCALING_MODE_PILLARBOX (1 << 27)
++# define PFIT_SCALING_MODE_LETTERBOX (3 << 26)
++# define VERT_INTERP_DISABLE (0 << 10)
++# define VERT_INTERP_BILINEAR (1 << 10)
++# define VERT_INTERP_MASK (3 << 10)
++# define VERT_AUTO_SCALE (1 << 9)
++# define HORIZ_INTERP_DISABLE (0 << 6)
++# define HORIZ_INTERP_BILINEAR (1 << 6)
++# define HORIZ_INTERP_MASK (3 << 6)
++# define HORIZ_AUTO_SCALE (1 << 5)
++# define PANEL_8TO6_DITHER_ENABLE (1 << 3)
++
++#define PFIT_PGM_RATIOS 0x61234
++# define PFIT_VERT_SCALE_MASK 0xfff00000
++# define PFIT_HORIZ_SCALE_MASK 0x0000fff0
++
++#define PFIT_AUTO_RATIOS 0x61238
++
++
++#define DPLL_A 0x06014
++#define DPLL_B 0x06018
++# define DPLL_VCO_ENABLE (1 << 31)
++# define DPLL_DVO_HIGH_SPEED (1 << 30)
++# define DPLL_SYNCLOCK_ENABLE (1 << 29)
++# define DPLL_VGA_MODE_DIS (1 << 28)
++# define DPLLB_MODE_DAC_SERIAL (1 << 26) /* i915 */
++# define DPLLB_MODE_LVDS (2 << 26) /* i915 */
++# define DPLL_MODE_MASK (3 << 26)
++# define DPLL_DAC_SERIAL_P2_CLOCK_DIV_10 (0 << 24) /* i915 */
++# define DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 (1 << 24) /* i915 */
++# define DPLLB_LVDS_P2_CLOCK_DIV_14 (0 << 24) /* i915 */
++# define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */
++# define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */
++# define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
++/**
++ * The i830 generation, in DAC/serial mode, defines p1 as two plus this
++ * bitfield, or just 2 if PLL_P1_DIVIDE_BY_TWO is set.
++ */
++# define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000
++/**
++ * The i830 generation, in LVDS mode, defines P1 as the bit number set within
++ * this field (only one bit may be set).
++ */
++# define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS 0x003f0000
++# define DPLL_FPA01_P1_POST_DIV_SHIFT 16
++# define PLL_P2_DIVIDE_BY_4 (1 << 23) /* i830, required
++ * in DVO non-gang */
++# define PLL_P1_DIVIDE_BY_TWO (1 << 21) /* i830 */
++# define PLL_REF_INPUT_DREFCLK (0 << 13)
++# define PLL_REF_INPUT_TVCLKINA (1 << 13) /* i830 */
++# define PLL_REF_INPUT_TVCLKINBC (2 << 13) /* SDVO
++ * TVCLKIN */
++# define PLLB_REF_INPUT_SPREADSPECTRUMIN (3 << 13)
++# define PLL_REF_INPUT_MASK (3 << 13)
++# define PLL_LOAD_PULSE_PHASE_SHIFT 9
++/*
++ * Parallel to Serial Load Pulse phase selection.
++ * Selects the phase for the 10X DPLL clock for the PCIe
++ * digital display port. The range is 4 to 13; 10 or more
++ * is just a flip delay. The default is 6
++ */
++# define PLL_LOAD_PULSE_PHASE_MASK (0xf << PLL_LOAD_PULSE_PHASE_SHIFT)
++# define DISPLAY_RATE_SELECT_FPA1 (1 << 8)
++
++/**
++ * SDVO multiplier for 945G/GM. Not used on 965.
++ *
++ * \sa DPLL_MD_UDI_MULTIPLIER_MASK
++ */
++# define SDVO_MULTIPLIER_MASK 0x000000ff
++# define SDVO_MULTIPLIER_SHIFT_HIRES 4
++# define SDVO_MULTIPLIER_SHIFT_VGA 0
++
++/** @defgroup DPLL_MD
++ * @{
++ */
++/** Pipe A SDVO/UDI clock multiplier/divider register for G965. */
++#define DPLL_A_MD 0x0601c
++/** Pipe B SDVO/UDI clock multiplier/divider register for G965. */
++#define DPLL_B_MD 0x06020
++/**
++ * UDI pixel divider, controlling how many pixels are stuffed into a packet.
++ *
++ * Value is pixels minus 1. Must be set to 1 pixel for SDVO.
++ */
++# define DPLL_MD_UDI_DIVIDER_MASK 0x3f000000
++# define DPLL_MD_UDI_DIVIDER_SHIFT 24
++/** UDI pixel divider for VGA, same as DPLL_MD_UDI_DIVIDER_MASK. */
++# define DPLL_MD_VGA_UDI_DIVIDER_MASK 0x003f0000
++# define DPLL_MD_VGA_UDI_DIVIDER_SHIFT 16
++/**
++ * SDVO/UDI pixel multiplier.
++ *
++ * SDVO requires that the bus clock rate be between 1 and 2 Ghz, and the bus
++ * clock rate is 10 times the DPLL clock. At low resolution/refresh rate
++ * modes, the bus rate would be below the limits, so SDVO allows for stuffing
++ * dummy bytes in the datastream at an increased clock rate, with both sides of
++ * the link knowing how many bytes are fill.
++ *
++ * So, for a mode with a dotclock of 65Mhz, we would want to double the clock
++ * rate to 130Mhz to get a bus rate of 1.30Ghz. The DPLL clock rate would be
++ * set to 130Mhz, and the SDVO multiplier set to 2x in this register and
++ * through an SDVO command.
++ *
++ * This register field has values of multiplication factor minus 1, with
++ * a maximum multiplier of 5 for SDVO.
++ */
++# define DPLL_MD_UDI_MULTIPLIER_MASK 0x00003f00
++# define DPLL_MD_UDI_MULTIPLIER_SHIFT 8
++/** SDVO/UDI pixel multiplier for VGA, same as DPLL_MD_UDI_MULTIPLIER_MASK.
++ * This best be set to the default value (3) or the CRT won't work. No,
++ * I don't entirely understand what this does...
++ */
++# define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f
++# define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
++/** @} */
++
++#define DPLL_TEST 0x606c
++# define DPLLB_TEST_SDVO_DIV_1 (0 << 22)
++# define DPLLB_TEST_SDVO_DIV_2 (1 << 22)
++# define DPLLB_TEST_SDVO_DIV_4 (2 << 22)
++# define DPLLB_TEST_SDVO_DIV_MASK (3 << 22)
++# define DPLLB_TEST_N_BYPASS (1 << 19)
++# define DPLLB_TEST_M_BYPASS (1 << 18)
++# define DPLLB_INPUT_BUFFER_ENABLE (1 << 16)
++# define DPLLA_TEST_N_BYPASS (1 << 3)
++# define DPLLA_TEST_M_BYPASS (1 << 2)
++# define DPLLA_INPUT_BUFFER_ENABLE (1 << 0)
++
++#define ADPA 0x61100
++#define ADPA_DAC_ENABLE (1<<31)
++#define ADPA_DAC_DISABLE 0
++#define ADPA_PIPE_SELECT_MASK (1<<30)
++#define ADPA_PIPE_A_SELECT 0
++#define ADPA_PIPE_B_SELECT (1<<30)
++#define ADPA_USE_VGA_HVPOLARITY (1<<15)
++#define ADPA_SETS_HVPOLARITY 0
++#define ADPA_VSYNC_CNTL_DISABLE (1<<11)
++#define ADPA_VSYNC_CNTL_ENABLE 0
++#define ADPA_HSYNC_CNTL_DISABLE (1<<10)
++#define ADPA_HSYNC_CNTL_ENABLE 0
++#define ADPA_VSYNC_ACTIVE_HIGH (1<<4)
++#define ADPA_VSYNC_ACTIVE_LOW 0
++#define ADPA_HSYNC_ACTIVE_HIGH (1<<3)
++#define ADPA_HSYNC_ACTIVE_LOW 0
++
++#define FPA0 0x06040
++#define FPA1 0x06044
++#define FPB0 0x06048
++#define FPB1 0x0604c
++# define FP_N_DIV_MASK 0x003f0000
++# define FP_N_DIV_SHIFT 16
++# define FP_M1_DIV_MASK 0x00003f00
++# define FP_M1_DIV_SHIFT 8
++# define FP_M2_DIV_MASK 0x0000003f
++# define FP_M2_DIV_SHIFT 0
++
++
++#define PORT_HOTPLUG_EN 0x61110
++# define SDVOB_HOTPLUG_INT_EN (1 << 26)
++# define SDVOC_HOTPLUG_INT_EN (1 << 25)
++# define TV_HOTPLUG_INT_EN (1 << 18)
++# define CRT_HOTPLUG_INT_EN (1 << 9)
++# define CRT_HOTPLUG_FORCE_DETECT (1 << 3)
++
++#define PORT_HOTPLUG_STAT 0x61114
++# define CRT_HOTPLUG_INT_STATUS (1 << 11)
++# define TV_HOTPLUG_INT_STATUS (1 << 10)
++# define CRT_HOTPLUG_MONITOR_MASK (3 << 8)
++# define CRT_HOTPLUG_MONITOR_COLOR (3 << 8)
++# define CRT_HOTPLUG_MONITOR_MONO (2 << 8)
++# define CRT_HOTPLUG_MONITOR_NONE (0 << 8)
++# define SDVOC_HOTPLUG_INT_STATUS (1 << 7)
++# define SDVOB_HOTPLUG_INT_STATUS (1 << 6)
++
++#define SDVOB 0x61140
++#define SDVOC 0x61160
++#define SDVO_ENABLE (1 << 31)
++#define SDVO_PIPE_B_SELECT (1 << 30)
++#define SDVO_STALL_SELECT (1 << 29)
++#define SDVO_INTERRUPT_ENABLE (1 << 26)
++/**
++ * 915G/GM SDVO pixel multiplier.
++ *
++ * Programmed value is multiplier - 1, up to 5x.
++ *
++ * \sa DPLL_MD_UDI_MULTIPLIER_MASK
++ */
++#define SDVO_PORT_MULTIPLY_MASK (7 << 23)
++#define SDVO_PORT_MULTIPLY_SHIFT 23
++#define SDVO_PHASE_SELECT_MASK (15 << 19)
++#define SDVO_PHASE_SELECT_DEFAULT (6 << 19)
++#define SDVO_CLOCK_OUTPUT_INVERT (1 << 18)
++#define SDVOC_GANG_MODE (1 << 16)
++#define SDVO_BORDER_ENABLE (1 << 7)
++#define SDVOB_PCIE_CONCURRENCY (1 << 3)
++#define SDVO_DETECTED (1 << 2)
++/* Bits to be preserved when writing */
++#define SDVOB_PRESERVE_MASK ((1 << 17) | (1 << 16) | (1 << 14))
++#define SDVOC_PRESERVE_MASK (1 << 17)
++
++/** @defgroup LVDS
++ * @{
++ */
++/**
++ * This register controls the LVDS output enable, pipe selection, and data
++ * format selection.
++ *
++ * All of the clock/data pairs are force powered down by power sequencing.
++ */
++#define LVDS 0x61180
++/**
++ * Enables the LVDS port. This bit must be set before DPLLs are enabled, as
++ * the DPLL semantics change when the LVDS is assigned to that pipe.
++ */
++# define LVDS_PORT_EN (1 << 31)
++/** Selects pipe B for LVDS data. Must be set on pre-965. */
++# define LVDS_PIPEB_SELECT (1 << 30)
++
++/** Turns on border drawing to allow centered display. */
++# define LVDS_BORDER_EN (1 << 15)
++
++/**
++ * Enables the A0-A2 data pairs and CLKA, containing 18 bits of color data per
++ * pixel.
++ */
++# define LVDS_A0A2_CLKA_POWER_MASK (3 << 8)
++# define LVDS_A0A2_CLKA_POWER_DOWN (0 << 8)
++# define LVDS_A0A2_CLKA_POWER_UP (3 << 8)
++/**
++ * Controls the A3 data pair, which contains the additional LSBs for 24 bit
++ * mode. Only enabled if LVDS_A0A2_CLKA_POWER_UP also indicates it should be
++ * on.
++ */
++# define LVDS_A3_POWER_MASK (3 << 6)
++# define LVDS_A3_POWER_DOWN (0 << 6)
++# define LVDS_A3_POWER_UP (3 << 6)
++/**
++ * Controls the CLKB pair. This should only be set when LVDS_B0B3_POWER_UP
++ * is set.
++ */
++# define LVDS_CLKB_POWER_MASK (3 << 4)
++# define LVDS_CLKB_POWER_DOWN (0 << 4)
++# define LVDS_CLKB_POWER_UP (3 << 4)
++
++/**
++ * Controls the B0-B3 data pairs. This must be set to match the DPLL p2
++ * setting for whether we are in dual-channel mode. The B3 pair will
++ * additionally only be powered up when LVDS_A3_POWER_UP is set.
++ */
++# define LVDS_B0B3_POWER_MASK (3 << 2)
++# define LVDS_B0B3_POWER_DOWN (0 << 2)
++# define LVDS_B0B3_POWER_UP (3 << 2)
++
++#define PIPEACONF 0x70008
++#define PIPEACONF_ENABLE (1<<31)
++#define PIPEACONF_DISABLE 0
++#define PIPEACONF_DOUBLE_WIDE (1<<30)
++#define I965_PIPECONF_ACTIVE (1<<30)
++#define PIPEACONF_SINGLE_WIDE 0
++#define PIPEACONF_PIPE_UNLOCKED 0
++#define PIPEACONF_PIPE_LOCKED (1<<25)
++#define PIPEACONF_PALETTE 0
++#define PIPEACONF_GAMMA (1<<24)
++#define PIPECONF_FORCE_BORDER (1<<25)
++#define PIPECONF_PROGRESSIVE (0 << 21)
++#define PIPECONF_INTERLACE_W_FIELD_INDICATION (6 << 21)
++#define PIPECONF_INTERLACE_FIELD_0_ONLY (7 << 21)
++
++#define PIPEBCONF 0x71008
++#define PIPEBCONF_ENABLE (1<<31)
++#define PIPEBCONF_DISABLE 0
++#define PIPEBCONF_DOUBLE_WIDE (1<<30)
++#define PIPEBCONF_DISABLE 0
++#define PIPEBCONF_GAMMA (1<<24)
++#define PIPEBCONF_PALETTE 0
++
++#define PIPEBGCMAXRED 0x71010
++#define PIPEBGCMAXGREEN 0x71014
++#define PIPEBGCMAXBLUE 0x71018
++
++#define PIPEASTAT 0x70024
++#define PIPEBSTAT 0x71024
++#define PIPE_VBLANK_CLEAR (1 << 1)
++#define PIPE_START_VBLANK_INTERRUPT_ENABLE (1UL<<18)
++#define PIPE_VBLANK_INTERRUPT_ENABLE (1UL<<17)
++
++#define PIPE_VSYNC_ENABL (1UL<<25)
++#define PIPE_VSYNC_CLEAR (1UL<<9)
++#define HISTOGRAM_INT_CONTROL 0x61268
++#define HISTOGRAM_BIN_DATA 0X61264
++#define HISTOGRAM_LOGIC_CONTROL 0x61260
++#define PWM_CONTROL_LOGIC 0x61250
++#define PIPE_DPST_EVENT_ENABLE (1UL<<23)
++#define PIPE_HOTPLUG_INTERRUPT_STATUS (1UL<<10)
++#define PIPE_DPST_EVENT_STATUS (1UL<<7)
++#define HISTOGRAM_INTERRUPT_ENABLE (1UL<<31)
++#define HISTOGRAM_LOGIC_ENABLE (1UL<<31)
++#define PWM_LOGIC_ENABLE (1UL<<31)
++#define PWM_PHASEIN_ENABLE (1UL<<25)
++#define PWM_PHASEIN_INT_ENABLE (1UL<<24)
++#define PWM_PHASEIN_VB_COUNT 0x00001f00
++#define PWM_PHASEIN_INC 0x0000001f
++#define HISTOGRAM_INT_CTRL_CLEAR (1UL<<30)
++#define DPST_YUV_LUMA_MODE 0
++
++struct dpst_ie_histogram_control {
++ union {
++ uint32_t data;
++ struct {
++ uint32_t bin_reg_index:7;
++ uint32_t reserved:4;
++ uint32_t bin_reg_func_select:1;
++ uint32_t sync_to_phase_in:1;
++ uint32_t alt_enhancement_mode:2;
++ uint32_t reserved1:1;
++ uint32_t sync_to_phase_in_count:8;
++ uint32_t histogram_mode_select:1;
++ uint32_t reserved2:4;
++ uint32_t ie_pipe_assignment:1;
++ uint32_t ie_mode_table_enabled:1;
++ uint32_t ie_histogram_enable:1;
++ };
++ };
++};
++
++struct dpst_guardband {
++ union {
++ uint32_t data;
++ struct {
++ uint32_t guardband:22;
++ uint32_t guardband_interrupt_delay:8;
++ uint32_t interrupt_status:1;
++ uint32_t interrupt_enable:1;
++ };
++ };
++};
++
++#define PIPEAFRAMEHIGH 0x70040
++#define PIPEAFRAMEPIXEL 0x70044
++#define PIPEBFRAMEHIGH 0x71040
++#define PIPEBFRAMEPIXEL 0x71044
++#define PIPE_FRAME_HIGH_MASK 0x0000ffff
++#define PIPE_FRAME_HIGH_SHIFT 0
++#define PIPE_FRAME_LOW_MASK 0xff000000
++#define PIPE_FRAME_LOW_SHIFT 24
++#define PIPE_PIXEL_MASK 0x00ffffff
++#define PIPE_PIXEL_SHIFT 0
++
++#define DSPARB 0x70030
++#define DSPFW1 0x70034
++#define DSPFW2 0x70038
++#define DSPFW3 0x7003c
++#define DSPFW4 0x70050
++#define DSPFW5 0x70054
++#define DSPFW6 0x70058
++#define DSPCHICKENBIT 0x70400
++#define DSPACNTR 0x70180
++#define DSPBCNTR 0x71180
++#define DISPLAY_PLANE_ENABLE (1<<31)
++#define DISPLAY_PLANE_DISABLE 0
++#define DISPPLANE_GAMMA_ENABLE (1<<30)
++#define DISPPLANE_GAMMA_DISABLE 0
++#define DISPPLANE_PIXFORMAT_MASK (0xf<<26)
++#define DISPPLANE_8BPP (0x2<<26)
++#define DISPPLANE_15_16BPP (0x4<<26)
++#define DISPPLANE_16BPP (0x5<<26)
++#define DISPPLANE_32BPP_NO_ALPHA (0x6<<26)
++#define DISPPLANE_32BPP (0x7<<26)
++#define DISPPLANE_STEREO_ENABLE (1<<25)
++#define DISPPLANE_STEREO_DISABLE 0
++#define DISPPLANE_SEL_PIPE_MASK (1<<24)
++#define DISPPLANE_SEL_PIPE_A 0
++#define DISPPLANE_SEL_PIPE_B (1<<24)
++#define DISPPLANE_SRC_KEY_ENABLE (1<<22)
++#define DISPPLANE_SRC_KEY_DISABLE 0
++#define DISPPLANE_LINE_DOUBLE (1<<20)
++#define DISPPLANE_NO_LINE_DOUBLE 0
++#define DISPPLANE_STEREO_POLARITY_FIRST 0
++#define DISPPLANE_STEREO_POLARITY_SECOND (1<<18)
++/* plane B only */
++#define DISPPLANE_ALPHA_TRANS_ENABLE (1<<15)
++#define DISPPLANE_ALPHA_TRANS_DISABLE 0
++#define DISPPLANE_SPRITE_ABOVE_DISPLAYA 0
++#define DISPPLANE_SPRITE_ABOVE_OVERLAY (1)
++
++#define DSPABASE 0x70184
++#define DSPALINOFF 0x70184
++#define DSPASTRIDE 0x70188
++
++#define DSPBBASE 0x71184
++#define DSPBLINOFF 0X71184
++#define DSPBADDR DSPBBASE
++#define DSPBSTRIDE 0x71188
++
++#define DSPAKEYVAL 0x70194
++#define DSPAKEYMASK 0x70198
++
++#define DSPAPOS 0x7018C /* reserved */
++#define DSPASIZE 0x70190
++#define DSPBPOS 0x7118C
++#define DSPBSIZE 0x71190
++
++#define DSPASURF 0x7019C
++#define DSPATILEOFF 0x701A4
++
++#define DSPBSURF 0x7119C
++#define DSPBTILEOFF 0x711A4
++
++/* plane C only */
++#define DSPCCNTR 0x72180
++#define DSPCLINOFF 0x72184
++#define DSPCSTRIDE 0x72188
++#define DSPCPOS 0x7218C
++#define DSPCSIZE 0x72190
++#define DSPCSURF 0x7219C
++#define DSPCKEYMAXVAL 0x721A0
++#define DSPCKEYMINVAL 0x72194
++#define DSPCKEYMSK 0x72198
++
++#define VGACNTRL 0x71400
++# define VGA_DISP_DISABLE (1 << 31)
++# define VGA_2X_MODE (1 << 30)
++# define VGA_PIPE_B_SELECT (1 << 29)
++
++/*
++ * Overlay registers
++ */
++#define OV_OVADD 0x30000
++#define OV_OGAMC5 0x30010
++#define OV_OGAMC4 0x30014
++#define OV_OGAMC3 0x30018
++#define OV_OGAMC2 0x3001C
++#define OV_OGAMC1 0x30020
++#define OV_OGAMC0 0x30024
++
++/*
++ * Some BIOS scratch area registers. The 845 (and 830?) store the amount
++ * of video memory available to the BIOS in SWF1.
++ */
++
++#define SWF0 0x71410
++#define SWF1 0x71414
++#define SWF2 0x71418
++#define SWF3 0x7141c
++#define SWF4 0x71420
++#define SWF5 0x71424
++#define SWF6 0x71428
++
++/*
++ * 855 scratch registers.
++ */
++#define SWF00 0x70410
++#define SWF01 0x70414
++#define SWF02 0x70418
++#define SWF03 0x7041c
++#define SWF04 0x70420
++#define SWF05 0x70424
++#define SWF06 0x70428
++
++#define SWF10 SWF0
++#define SWF11 SWF1
++#define SWF12 SWF2
++#define SWF13 SWF3
++#define SWF14 SWF4
++#define SWF15 SWF5
++#define SWF16 SWF6
++
++#define SWF30 0x72414
++#define SWF31 0x72418
++#define SWF32 0x7241c
++
++
++/*
++ * Palette registers
++ */
++#define PALETTE_A 0x0a000
++#define PALETTE_B 0x0a800
++
++#define IS_I830(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82830_CGC)
++#define IS_845G(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82845G_IG)
++#define IS_I85X(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82855GM_IG)
++#define IS_I855(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82855GM_IG)
++#define IS_I865G(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82865_IG)
++
++
++/* || dev->pci_device == PCI_DEVICE_ID_INTELPCI_CHIP_E7221_G) */
++#define IS_I915G(dev) (dev->pci_device == PCI_DEVICE_ID_INTEL_82915G_IG)
++#define IS_I915GM(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82915GM_IG)
++#define IS_I945G(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82945G_IG)
++#define IS_I945GM(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82945GM_IG)
++
++#define IS_I965G(dev) ((dev)->pci_device == 0x2972 || \
++ (dev)->pci_device == 0x2982 || \
++ (dev)->pci_device == 0x2992 || \
++ (dev)->pci_device == 0x29A2 || \
++ (dev)->pci_device == 0x2A02 || \
++ (dev)->pci_device == 0x2A12)
++
++#define IS_I965GM(dev) ((dev)->pci_device == 0x2A02)
++
++#define IS_G33(dev) ((dev)->pci_device == 0x29C2 || \
++ (dev)->pci_device == 0x29B2 || \
++ (dev)->pci_device == 0x29D2)
++
++#define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \
++ IS_I945GM(dev) || IS_I965G(dev) || IS_POULSBO(dev) || \
++ IS_MRST(dev))
++
++#define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \
++ IS_I945GM(dev) || IS_I965GM(dev) || \
++ IS_POULSBO(dev) || IS_MRST(dev))
++
++/* Cursor A & B regs */
++#define CURACNTR 0x70080
++#define CURSOR_MODE_DISABLE 0x00
++#define CURSOR_MODE_64_32B_AX 0x07
++#define CURSOR_MODE_64_ARGB_AX ((1 << 5) | CURSOR_MODE_64_32B_AX)
++#define MCURSOR_GAMMA_ENABLE (1 << 26)
++#define CURABASE 0x70084
++#define CURAPOS 0x70088
++#define CURSOR_POS_MASK 0x007FF
++#define CURSOR_POS_SIGN 0x8000
++#define CURSOR_X_SHIFT 0
++#define CURSOR_Y_SHIFT 16
++#define CURBCNTR 0x700c0
++#define CURBBASE 0x700c4
++#define CURBPOS 0x700c8
++
++/*
++ * Interrupt Registers
++ */
++#define IER 0x020a0
++#define IIR 0x020a4
++#define IMR 0x020a8
++#define ISR 0x020ac
++
++/*
++ * MOORESTOWN delta registers
++ */
++#define MRST_DPLL_A 0x0f014
++#define DPLLA_MODE_LVDS (2 << 26) /* mrst */
++#define MRST_FPA0 0x0f040
++#define MRST_FPA1 0x0f044
++#define MRST_PERF_MODE 0x020f4
++
++/* #define LVDS 0x61180 */
++# define MRST_PANEL_8TO6_DITHER_ENABLE (1 << 25)
++# define MRST_PANEL_24_DOT_1_FORMAT (1 << 24)
++# define LVDS_A3_POWER_UP_0_OUTPUT (1 << 6)
++
++#define MIPI 0x61190
++# define MIPI_PORT_EN (1 << 31)
++/** Turns on border drawing to allow centered display. */
++# define MIPI_BORDER_EN (1 << 15)
++
++/* #define PP_CONTROL 0x61204 */
++# define POWER_DOWN_ON_RESET (1 << 1)
++
++/* #define PFIT_CONTROL 0x61230 */
++# define PFIT_PIPE_SELECT (3 << 29)
++# define PFIT_PIPE_SELECT_SHIFT (29)
++
++/* #define BLC_PWM_CTL 0x61254 */
++#define MRST_BACKLIGHT_MODULATION_FREQ_SHIFT (16)
++#define MRST_BACKLIGHT_MODULATION_FREQ_MASK (0xffff << 16)
++
++/* #define PIPEACONF 0x70008 */
++#define PIPEACONF_PIPE_STATE (1<<30)
++/* #define DSPACNTR 0x70180 */
++#if 0 /*FIXME JLIU7 need to define the following */
++1000 = 32 - bit RGBX(10 : 10 : 10 : 2)
++pixel format.Ignore alpha.1010 = BGRX 10 : 10 : 10 : 2 1100 = 64 - bit RGBX
++(16 : 16 : 16 : 16) 16 bit floating point pixel format.
++Ignore alpha.1110 = 32 - bit RGBX(8 : 8 : 8 : 8) pixel format.
++ Ignore
++ alpha.
++#endif /*FIXME JLIU7 need to define the following */
++
++#define MRST_DSPABASE 0x7019c
++
++/*
++ * MOORESTOWN reserved registers
++ */
++#if 0
++#define DSPAPOS 0x7018C /* reserved */
++#define DSPASIZE 0x70190
++#endif
++/*
++ * Moorestown registers.
++ */
++/*===========================================================================
++; General Constants
++;--------------------------------------------------------------------------*/
++#define BIT0 0x00000001
++#define BIT1 0x00000002
++#define BIT2 0x00000004
++#define BIT3 0x00000008
++#define BIT4 0x00000010
++#define BIT5 0x00000020
++#define BIT6 0x00000040
++#define BIT7 0x00000080
++#define BIT8 0x00000100
++#define BIT9 0x00000200
++#define BIT10 0x00000400
++#define BIT11 0x00000800
++#define BIT12 0x00001000
++#define BIT13 0x00002000
++#define BIT14 0x00004000
++#define BIT15 0x00008000
++#define BIT16 0x00010000
++#define BIT17 0x00020000
++#define BIT18 0x00040000
++#define BIT19 0x00080000
++#define BIT20 0x00100000
++#define BIT21 0x00200000
++#define BIT22 0x00400000
++#define BIT23 0x00800000
++#define BIT24 0x01000000
++#define BIT25 0x02000000
++#define BIT26 0x04000000
++#define BIT27 0x08000000
++#define BIT28 0x10000000
++#define BIT29 0x20000000
++#define BIT30 0x40000000
++#define BIT31 0x80000000
++/*===========================================================================
++; MIPI IP registers
++;--------------------------------------------------------------------------*/
++#define DEVICE_READY_REG 0xb000
++#define INTR_STAT_REG 0xb004
++#define RX_SOT_ERROR BIT0
++#define RX_SOT_SYNC_ERROR BIT1
++#define RX_ESCAPE_MODE_ENTRY_ERROR BIT3
++#define RX_LP_TX_SYNC_ERROR BIT4
++#define RX_HS_RECEIVE_TIMEOUT_ERROR BIT5
++#define RX_FALSE_CONTROL_ERROR BIT6
++#define RX_ECC_SINGLE_BIT_ERROR BIT7
++#define RX_ECC_MULTI_BIT_ERROR BIT8
++#define RX_CHECKSUM_ERROR BIT9
++#define RX_DSI_DATA_TYPE_NOT_RECOGNIZED BIT10
++#define RX_DSI_VC_ID_INVALID BIT11
++#define TX_FALSE_CONTROL_ERROR BIT12
++#define TX_ECC_SINGLE_BIT_ERROR BIT13
++#define TX_ECC_MULTI_BIT_ERROR BIT14
++#define TX_CHECKSUM_ERROR BIT15
++#define TX_DSI_DATA_TYPE_NOT_RECOGNIZED BIT16
++#define TX_DSI_VC_ID_INVALID BIT17
++#define HIGH_CONTENTION BIT18
++#define LOW_CONTENTION BIT19
++#define DPI_FIFO_UNDER_RUN BIT20
++#define HS_TX_TIMEOUT BIT21
++#define LP_RX_TIMEOUT BIT22
++#define TURN_AROUND_ACK_TIMEOUT BIT23
++#define ACK_WITH_NO_ERROR BIT24
++#define INTR_EN_REG 0xb008
++#define DSI_FUNC_PRG_REG 0xb00c
++#define DPI_CHANNEL_NUMBER_POS 0x03
++#define DBI_CHANNEL_NUMBER_POS 0x05
++#define FMT_DPI_POS 0x07
++#define FMT_DBI_POS 0x0A
++#define DBI_DATA_WIDTH_POS 0x0D
++#define HS_TX_TIMEOUT_REG 0xb010
++#define LP_RX_TIMEOUT_REG 0xb014
++#define TURN_AROUND_TIMEOUT_REG 0xb018
++#define DEVICE_RESET_REG 0xb01C
++#define DPI_RESOLUTION_REG 0xb020
++#define RES_V_POS 0x10
++#define DBI_RESOLUTION_REG 0xb024
++#define HORIZ_SYNC_PAD_COUNT_REG 0xb028
++#define HORIZ_BACK_PORCH_COUNT_REG 0xb02C
++#define HORIZ_FRONT_PORCH_COUNT_REG 0xb030
++#define HORIZ_ACTIVE_AREA_COUNT_REG 0xb034
++#define VERT_SYNC_PAD_COUNT_REG 0xb038
++#define VERT_BACK_PORCH_COUNT_REG 0xb03c
++#define VERT_FRONT_PORCH_COUNT_REG 0xb040
++#define HIGH_LOW_SWITCH_COUNT_REG 0xb044
++#define DPI_CONTROL_REG 0xb048
++#define DPI_SHUT_DOWN BIT0
++#define DPI_TURN_ON BIT1
++#define DPI_COLOR_MODE_ON BIT2
++#define DPI_COLOR_MODE_OFF BIT3
++#define DPI_BACK_LIGHT_ON BIT4
++#define DPI_BACK_LIGHT_OFF BIT5
++#define DPI_LP BIT6
++#define DPI_DATA_REG 0xb04c
++#define DPI_BACK_LIGHT_ON_DATA 0x07
++#define DPI_BACK_LIGHT_OFF_DATA 0x17
++#define INIT_COUNT_REG 0xb050
++#define MAX_RET_PAK_REG 0xb054
++#define VIDEO_FMT_REG 0xb058
++#define EOT_DISABLE_REG 0xb05c
++#define LP_BYTECLK_REG 0xb060
++#define LP_GEN_DATA_REG 0xb064
++#define HS_GEN_DATA_REG 0xb068
++#define LP_GEN_CTRL_REG 0xb06C
++#define HS_GEN_CTRL_REG 0xb070
++#define GEN_FIFO_STAT_REG 0xb074
++#define HS_DATA_FIFO_FULL BIT0
++#define HS_DATA_FIFO_HALF_EMPTY BIT1
++#define HS_DATA_FIFO_EMPTY BIT2
++#define LP_DATA_FIFO_FULL BIT8
++#define LP_DATA_FIFO_HALF_EMPTY BIT9
++#define LP_DATA_FIFO_EMPTY BIT10
++#define HS_CTRL_FIFO_FULL BIT16
++#define HS_CTRL_FIFO_HALF_EMPTY BIT17
++#define HS_CTRL_FIFO_EMPTY BIT18
++#define LP_CTRL_FIFO_FULL BIT24
++#define LP_CTRL_FIFO_HALF_EMPTY BIT25
++#define LP_CTRL_FIFO_EMPTY BIT26
++#define DBI_FIFO_EMPTY BIT27
++#define DPI_FIFO_EMPTY BIT28
++#define HS_LS_DBI_ENABLE_REG 0xb078
++#define TXCLKESC_REG 0xb07c
++#define DPHY_PARAM_REG 0xb080
++/*===========================================================================
++; MIPI Adapter registers
++;--------------------------------------------------------------------------*/
++#define MIPI_CONTROL_REG 0xb104
++#define MIPI_2X_CLOCK_BITS (BIT0 | BIT1)
++#define MIPI_DATA_ADDRESS_REG 0xb108
++#define MIPI_DATA_LENGTH_REG 0xb10C
++#define MIPI_COMMAND_ADDRESS_REG 0xb110
++#define MIPI_COMMAND_LENGTH_REG 0xb114
++#define MIPI_READ_DATA_RETURN_REG0 0xb118
++#define MIPI_READ_DATA_RETURN_REG1 0xb11C
++#define MIPI_READ_DATA_RETURN_REG2 0xb120
++#define MIPI_READ_DATA_RETURN_REG3 0xb124
++#define MIPI_READ_DATA_RETURN_REG4 0xb128
++#define MIPI_READ_DATA_RETURN_REG5 0xb12C
++#define MIPI_READ_DATA_RETURN_REG6 0xb130
++#define MIPI_READ_DATA_RETURN_REG7 0xb134
++#define MIPI_READ_DATA_VALID_REG 0xb138
++/* DBI COMMANDS */
++#define soft_reset 0x01
++/* ************************************************************************* *\
++The display module performs a software reset.
++Registers are written with their SW Reset default values.
++\* ************************************************************************* */
++#define get_power_mode 0x0a
++/* ************************************************************************* *\
++The display module returns the current power mode
++\* ************************************************************************* */
++#define get_address_mode 0x0b
++/* ************************************************************************* *\
++The display module returns the current status.
++\* ************************************************************************* */
++#define get_pixel_format 0x0c
++/* ************************************************************************* *\
++This command gets the pixel format for the RGB image data
++used by the interface.
++\* ************************************************************************* */
++#define get_display_mode 0x0d
++/* ************************************************************************* *\
++The display module returns the Display Image Mode status.
++\* ************************************************************************* */
++#define get_signal_mode 0x0e
++/* ************************************************************************* *\
++The display module returns the Display Signal Mode.
++\* ************************************************************************* */
++#define get_diagnostic_result 0x0f
++/* ************************************************************************* *\
++The display module returns the self-diagnostic results following
++a Sleep Out command.
++\* ************************************************************************* */
++#define enter_sleep_mode 0x10
++/* ************************************************************************* *\
++This command causes the display module to enter the Sleep mode.
++In this mode, all unnecessary blocks inside the display module are disabled
++except interface communication. This is the lowest power mode
++the display module supports.
++\* ************************************************************************* */
++#define exit_sleep_mode 0x11
++/* ************************************************************************* *\
++This command causes the display module to exit Sleep mode.
++All blocks inside the display module are enabled.
++\* ************************************************************************* */
++#define enter_partial_mode 0x12
++/* ************************************************************************* *\
++This command causes the display module to enter the Partial Display Mode.
++The Partial Display Mode window is described by the set_partial_area command.
++\* ************************************************************************* */
++#define enter_normal_mode 0x13
++/* ************************************************************************* *\
++This command causes the display module to enter the Normal mode.
++Normal Mode is defined as Partial Display mode and Scroll mode are off
++\* ************************************************************************* */
++#define exit_invert_mode 0x20
++/* ************************************************************************* *\
++This command causes the display module to stop inverting the image data on
++the display device. The frame memory contents remain unchanged.
++No status bits are changed.
++\* ************************************************************************* */
++#define enter_invert_mode 0x21
++/* ************************************************************************* *\
++This command causes the display module to invert the image data only on
++the display device. The frame memory contents remain unchanged.
++No status bits are changed.
++\* ************************************************************************* */
++#define set_gamma_curve 0x26
++/* ************************************************************************* *\
++This command selects the desired gamma curve for the display device.
++Four fixed gamma curves are defined in section DCS spec.
++\* ************************************************************************* */
++#define set_display_off 0x28
++/* ************************************************************************* *\
++This command causes the display module to stop displaying the image data
++on the display device. The frame memory contents remain unchanged.
++No status bits are changed.
++\* ************************************************************************* */
++#define set_display_on 0x29
++/* ************************************************************************* *\
++This command causes the display module to start displaying the image data
++on the display device. The frame memory contents remain unchanged.
++No status bits are changed.
++\* ************************************************************************* */
++#define set_column_address 0x2a
++/* ************************************************************************* *\
++This command defines the column extent of the frame memory accessed by the
++hostprocessor with the read_memory_continue and write_memory_continue commands.
++No status bits are changed.
++\* ************************************************************************* */
++#define set_page_address 0x2b
++/* ************************************************************************* *\
++This command defines the page extent of the frame memory accessed by the host
++processor with the write_memory_continue and read_memory_continue command.
++No status bits are changed.
++\* ************************************************************************* */
++#define write_mem_start 0x2c
++/* ************************************************************************* *\
++This command transfers image data from the host processor to the display
++module s frame memory starting at the pixel location specified by
++preceding set_column_address and set_page_address commands.
++\* ************************************************************************* */
++#define set_partial_area 0x30
++/* ************************************************************************* *\
++This command defines the Partial Display mode s display area.
++There are two parameters associated with
++this command, the first defines the Start Row (SR) and the second the End Row
++(ER). SR and ER refer to the Frame Memory Line Pointer.
++\* ************************************************************************* */
++#define set_scroll_area 0x33
++/* ************************************************************************* *\
++This command defines the display modules Vertical Scrolling Area.
++\* ************************************************************************* */
++#define set_tear_off 0x34
++/* ************************************************************************* *\
++This command turns off the display modules Tearing Effect output signal on
++the TE signal line.
++\* ************************************************************************* */
++#define set_tear_on 0x35
++/* ************************************************************************* *\
++This command turns on the display modules Tearing Effect output signal
++on the TE signal line.
++\* ************************************************************************* */
++#define set_address_mode 0x36
++/* ************************************************************************* *\
++This command sets the data order for transfers from the host processor to
++display modules frame memory,bits B[7:5] and B3, and from the display
++modules frame memory to the display device, bits B[2:0] and B4.
++\* ************************************************************************* */
++#define set_scroll_start 0x37
++/* ************************************************************************* *\
++This command sets the start of the vertical scrolling area in the frame memory.
++The vertical scrolling area is fully defined when this command is used with
++the set_scroll_area command The set_scroll_start command has one parameter,
++the Vertical Scroll Pointer. The VSP defines the line in the frame memory
++that is written to the display device as the first line of the vertical
++scroll area.
++\* ************************************************************************* */
++#define exit_idle_mode 0x38
++/* ************************************************************************* *\
++This command causes the display module to exit Idle mode.
++\* ************************************************************************* */
++#define enter_idle_mode 0x39
++/* ************************************************************************* *\
++This command causes the display module to enter Idle Mode.
++In Idle Mode, color expression is reduced. Colors are shown on the display
++device using the MSB of each of the R, G and B color components in the frame
++memory
++\* ************************************************************************* */
++#define set_pixel_format 0x3a
++/* ************************************************************************* *\
++This command sets the pixel format for the RGB image data used by the interface.
++Bits D[6:4] DPI Pixel Format Definition
++Bits D[2:0] DBI Pixel Format Definition
++Bits D7 and D3 are not used.
++\* ************************************************************************* */
++#define write_mem_cont 0x3c
++/* ************************************************************************* *\
++This command transfers image data from the host processor to the display
++module's frame memory continuing from the pixel location following the
++previous write_memory_continue or write_memory_start command.
++\* ************************************************************************* */
++#define set_tear_scanline 0x44
++/* ************************************************************************* *\
++This command turns on the display modules Tearing Effect output signal on the
++TE signal line when the display module reaches line N.
++\* ************************************************************************* */
++#define get_scanline 0x45
++/* ************************************************************************* *\
++The display module returns the current scanline, N, used to update the
++display device. The total number of scanlines on a display device is
++defined as VSYNC + VBP + VACT + VFP.The first scanline is defined as
++the first line of V Sync and is denoted as Line 0.
++When in Sleep Mode, the value returned by get_scanline is undefined.
++\* ************************************************************************* */
++/* DCS Interface Pixel Formats */
++#define DCS_PIXEL_FORMAT_3BPP 0x1
++#define DCS_PIXEL_FORMAT_8BPP 0x2
++#define DCS_PIXEL_FORMAT_12BPP 0x3
++#define DCS_PIXEL_FORMAT_16BPP 0x5
++#define DCS_PIXEL_FORMAT_18BPP 0x6
++#define DCS_PIXEL_FORMAT_24BPP 0x7
++/* ONE PARAMETER READ DATA */
++#define addr_mode_data 0xfc
++#define diag_res_data 0x00
++#define disp_mode_data 0x23
++#define pxl_fmt_data 0x77
++#define pwr_mode_data 0x74
++#define sig_mode_data 0x00
++/* TWO PARAMETERS READ DATA */
++#define scanline_data1 0xff
++#define scanline_data2 0xff
++/* DPI PIXEL FORMATS */
++#define RGB_565_FMT 0x01 /* RGB 565 FORMAT */
++#define RGB_666_FMT 0x02 /* RGB 666 FORMAT */
++#define LRGB_666_FMT 0x03 /* RGB LOOSELY PACKED
++ * 666 FORMAT
++ */
++#define RGB_888_FMT 0x04 /* RGB 888 FORMAT */
++#define NON_BURST_MODE_SYNC_PULSE 0x01 /* Non Burst Mode
++ * with Sync Pulse
++ */
++#define NON_BURST_MODE_SYNC_EVENTS 0x02 /* Non Burst Mode
++ * with Sync events
++ */
++#define BURST_MODE 0x03 /* Burst Mode */
++#define VIRTUAL_CHANNEL_NUMBER_0 0x00 /* Virtual channel 0 */
++#define VIRTUAL_CHANNEL_NUMBER_1 0x01 /* Virtual channel 1 */
++#define VIRTUAL_CHANNEL_NUMBER_2 0x02 /* Virtual channel 2 */
++#define VIRTUAL_CHANNEL_NUMBER_3 0x03 /* Virtual channel 3 */
++#define DBI_NOT_SUPPORTED 0x00 /* command mode
++ * is not supported
++ */
++#define DBI_DATA_WIDTH_16BIT 0x01 /* 16 bit data */
++#define DBI_DATA_WIDTH_9BIT 0x02 /* 9 bit data */
++#define DBI_DATA_WIDTH_8BIT 0x03 /* 8 bit data */
++#define DBI_COMMAND_BUFFER_SIZE 0x120 /* Allocate at least
++ * 0x100 Byte with 32
++ * byte alignment
++ */
++#define DBI_DATA_BUFFER_SIZE 0x120 /* Allocate at least
++ * 0x100 Byte with 32
++ * byte alignment
++ */
++#define ALIGNMENT_32BYTE_MASK (~(BIT0|BIT1|BIT2|BIT3|BIT4))
++#define SKU_83 0x01
++#define SKU_100 0x02
++#define SKU_100L 0x04
++#define SKU_BYPASS 0x08
++#if 0
++/* ************************************************************************* *\
++DSI command data structure
++\* ************************************************************************* */
++union DSI_LONG_PACKET_HEADER {
++ u32 DSI_longPacketHeader;
++ struct {
++ u8 dataID;
++ u16 wordCount;
++ u8 ECC;
++ };
++#if 0 /*FIXME JLIU7 */
++ struct {
++ u8 DT:6;
++ u8 VC:2;
++ };
++#endif /*FIXME JLIU7 */
++};
++
++union MIPI_ADPT_CMD_LNG_REG {
++ u32 commnadLengthReg;
++ struct {
++ u8 command0;
++ u8 command1;
++ u8 command2;
++ u8 command3;
++ };
++};
++
++struct SET_COLUMN_ADDRESS_DATA {
++ u8 command;
++ u16 SC; /* Start Column */
++ u16 EC; /* End Column */
++};
++
++struct SET_PAGE_ADDRESS_DATA {
++ u8 command;
++ u16 SP; /* Start Page */
++ u16 EP; /* End Page */
++};
++#endif
+diff --git a/drivers/gpu/drm/mrst/drv/psb_intel_sdvo.c b/drivers/gpu/drm/mrst/drv/psb_intel_sdvo.c
+new file mode 100644
+index 0000000..87696ed
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/psb_intel_sdvo.c
+@@ -0,0 +1,1408 @@
++/*
++ * Copyright (c) 2006-2007 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors:
++ * Eric Anholt <eric@anholt.net>
++ */
++
++#include <linux/i2c.h>
++#include <linux/delay.h>
++/* #include <drm/drm_crtc.h> */
++#include <drm/drmP.h>
++#include "psb_drv.h"
++#include "psb_intel_drv.h"
++#include "psb_intel_reg.h"
++#include "psb_intel_sdvo_regs.h"
++
++struct psb_intel_sdvo_priv {
++ struct psb_intel_i2c_chan *i2c_bus;
++ int slaveaddr;
++ int output_device;
++
++ u16 active_outputs;
++
++ struct psb_intel_sdvo_caps caps;
++ int pixel_clock_min, pixel_clock_max;
++
++ int save_sdvo_mult;
++ u16 save_active_outputs;
++ struct psb_intel_sdvo_dtd save_input_dtd_1, save_input_dtd_2;
++ struct psb_intel_sdvo_dtd save_output_dtd[16];
++ u32 save_SDVOX;
++ u8 in_out_map[4];
++
++ u8 by_input_wiring;
++ u32 active_device;
++};
++
++/**
++ * Writes the SDVOB or SDVOC with the given value, but always writes both
++ * SDVOB and SDVOC to work around apparent hardware issues (according to
++ * comments in the BIOS).
++ */
++void psb_intel_sdvo_write_sdvox(struct psb_intel_output *psb_intel_output,
++ u32 val)
++{
++ struct drm_device *dev = psb_intel_output->base.dev;
++ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
++ u32 bval = val, cval = val;
++ int i;
++
++ if (sdvo_priv->output_device == SDVOB)
++ cval = REG_READ(SDVOC);
++ else
++ bval = REG_READ(SDVOB);
++ /*
++ * Write the registers twice for luck. Sometimes,
++ * writing them only once doesn't appear to 'stick'.
++ * The BIOS does this too. Yay, magic
++ */
++ for (i = 0; i < 2; i++) {
++ REG_WRITE(SDVOB, bval);
++ REG_READ(SDVOB);
++ REG_WRITE(SDVOC, cval);
++ REG_READ(SDVOC);
++ }
++}
++
++static bool psb_intel_sdvo_read_byte(
++ struct psb_intel_output *psb_intel_output,
++ u8 addr, u8 *ch)
++{
++ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
++ u8 out_buf[2];
++ u8 buf[2];
++ int ret;
++
++ struct i2c_msg msgs[] = {
++ {
++ .addr = sdvo_priv->i2c_bus->slave_addr,
++ .flags = 0,
++ .len = 1,
++ .buf = out_buf,
++ },
++ {
++ .addr = sdvo_priv->i2c_bus->slave_addr,
++ .flags = I2C_M_RD,
++ .len = 1,
++ .buf = buf,
++ }
++ };
++
++ out_buf[0] = addr;
++ out_buf[1] = 0;
++
++ ret = i2c_transfer(&sdvo_priv->i2c_bus->adapter, msgs, 2);
++ if (ret == 2) {
++ /* DRM_DEBUG("got back from addr %02X = %02x\n",
++ * out_buf[0], buf[0]);
++ */
++ *ch = buf[0];
++ return true;
++ }
++
++ DRM_DEBUG("i2c transfer returned %d\n", ret);
++ return false;
++}
++
++static bool psb_intel_sdvo_write_byte(
++ struct psb_intel_output *psb_intel_output,
++ int addr, u8 ch)
++{
++ u8 out_buf[2];
++ struct i2c_msg msgs[] = {
++ {
++ .addr = psb_intel_output->i2c_bus->slave_addr,
++ .flags = 0,
++ .len = 2,
++ .buf = out_buf,
++ }
++ };
++
++ out_buf[0] = addr;
++ out_buf[1] = ch;
++
++ if (i2c_transfer(&psb_intel_output->i2c_bus->adapter, msgs, 1) == 1)
++ return true;
++ return false;
++}
++
++#define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd}
++/** Mapping of command numbers to names, for debug output */
++const static struct _sdvo_cmd_name {
++ u8 cmd;
++ char *name;
++} sdvo_cmd_names[] = {
++SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FIRMWARE_REV),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TRAINED_INPUTS),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_OUTPUTS),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_OUTPUTS),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_IN_OUT_MAP),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_IN_OUT_MAP),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ATTACHED_DISPLAYS),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HOT_PLUG_SUPPORT),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_HOT_PLUG),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_HOT_PLUG),
++ SDVO_CMD_NAME_ENTRY
++ (SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_INPUT),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_OUTPUT),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART1),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART2),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART2),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART1),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART2),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART1),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART2),
++ SDVO_CMD_NAME_ENTRY
++ (SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING),
++ SDVO_CMD_NAME_ENTRY
++ (SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1),
++ SDVO_CMD_NAME_ENTRY
++ (SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2),
++ SDVO_CMD_NAME_ENTRY
++ (SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE),
++ SDVO_CMD_NAME_ENTRY
++ (SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE),
++ SDVO_CMD_NAME_ENTRY
++ (SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CLOCK_RATE_MULT),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CLOCK_RATE_MULT),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_TV_FORMATS),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_FORMAT),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_FORMAT),
++ SDVO_CMD_NAME_ENTRY
++ (SDVO_CMD_SET_TV_RESOLUTION_SUPPORT),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTROL_BUS_SWITCH),};
++
++#define SDVO_NAME(dev_priv) \
++ ((dev_priv)->output_device == SDVOB ? "SDVOB" : "SDVOC")
++#define SDVO_PRIV(output) ((struct psb_intel_sdvo_priv *) (output)->dev_priv)
++
++static void psb_intel_sdvo_write_cmd(struct psb_intel_output *psb_intel_output,
++ u8 cmd,
++ void *args,
++ int args_len)
++{
++ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
++ int i;
++
++ if (1) {
++ DRM_DEBUG("%s: W: %02X ", SDVO_NAME(sdvo_priv), cmd);
++ for (i = 0; i < args_len; i++)
++ printk(KERN_INFO"%02X ", ((u8 *) args)[i]);
++ for (; i < 8; i++)
++ printk(" ");
++ for (i = 0;
++ i <
++ sizeof(sdvo_cmd_names) / sizeof(sdvo_cmd_names[0]);
++ i++) {
++ if (cmd == sdvo_cmd_names[i].cmd) {
++ printk("(%s)", sdvo_cmd_names[i].name);
++ break;
++ }
++ }
++ if (i ==
++ sizeof(sdvo_cmd_names) / sizeof(sdvo_cmd_names[0]))
++ printk("(%02X)", cmd);
++ printk("\n");
++ }
++
++ for (i = 0; i < args_len; i++) {
++ psb_intel_sdvo_write_byte(psb_intel_output,
++ SDVO_I2C_ARG_0 - i,
++ ((u8 *) args)[i]);
++ }
++
++ psb_intel_sdvo_write_byte(psb_intel_output, SDVO_I2C_OPCODE, cmd);
++}
++
++static const char *cmd_status_names[] = {
++ "Power on",
++ "Success",
++ "Not supported",
++ "Invalid arg",
++ "Pending",
++ "Target not specified",
++ "Scaling not supported"
++};
++
++static u8 psb_intel_sdvo_read_response(
++ struct psb_intel_output *psb_intel_output,
++ void *response, int response_len)
++{
++ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
++ int i;
++ u8 status;
++ u8 retry = 50;
++
++ while (retry--) {
++ /* Read the command response */
++ for (i = 0; i < response_len; i++) {
++ psb_intel_sdvo_read_byte(psb_intel_output,
++ SDVO_I2C_RETURN_0 + i,
++ &((u8 *) response)[i]);
++ }
++
++ /* read the return status */
++ psb_intel_sdvo_read_byte(psb_intel_output,
++ SDVO_I2C_CMD_STATUS,
++ &status);
++
++ if (1) {
++ DRM_DEBUG("%s: R: ", SDVO_NAME(sdvo_priv));
++ for (i = 0; i < response_len; i++)
++ printk(KERN_INFO"%02X ", ((u8 *) response)[i]);
++ for (; i < 8; i++)
++ printk(" ");
++ if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP)
++ printk(KERN_INFO"(%s)",
++ cmd_status_names[status]);
++ else
++ printk(KERN_INFO"(??? %d)", status);
++ printk("\n");
++ }
++
++ if (status != SDVO_CMD_STATUS_PENDING)
++ return status;
++
++ mdelay(50);
++ }
++
++ return status;
++}
++
++int psb_intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
++{
++ if (mode->clock >= 100000)
++ return 1;
++ else if (mode->clock >= 50000)
++ return 2;
++ else
++ return 4;
++}
++
++/**
++ * Don't check status code from this as it switches the bus back to the
++ * SDVO chips which defeats the purpose of doing a bus switch in the first
++ * place.
++ */
++void psb_intel_sdvo_set_control_bus_switch(
++ struct psb_intel_output *psb_intel_output,
++ u8 target)
++{
++ psb_intel_sdvo_write_cmd(psb_intel_output,
++ SDVO_CMD_SET_CONTROL_BUS_SWITCH,
++ &target,
++ 1);
++}
++
++static bool psb_intel_sdvo_set_target_input(
++ struct psb_intel_output *psb_intel_output,
++ bool target_0, bool target_1)
++{
++ struct psb_intel_sdvo_set_target_input_args targets = { 0 };
++ u8 status;
++
++ if (target_0 && target_1)
++ return SDVO_CMD_STATUS_NOTSUPP;
++
++ if (target_1)
++ targets.target_1 = 1;
++
++ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_SET_TARGET_INPUT,
++ &targets, sizeof(targets));
++
++ status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
++
++ return status == SDVO_CMD_STATUS_SUCCESS;
++}
++
++/**
++ * Return whether each input is trained.
++ *
++ * This function is making an assumption about the layout of the response,
++ * which should be checked against the docs.
++ */
++static bool psb_intel_sdvo_get_trained_inputs(struct psb_intel_output
++ *psb_intel_output, bool *input_1,
++ bool *input_2)
++{
++ struct psb_intel_sdvo_get_trained_inputs_response response;
++ u8 status;
++
++ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_TRAINED_INPUTS,
++ NULL, 0);
++ status =
++ psb_intel_sdvo_read_response(psb_intel_output, &response,
++ sizeof(response));
++ if (status != SDVO_CMD_STATUS_SUCCESS)
++ return false;
++
++ *input_1 = response.input0_trained;
++ *input_2 = response.input1_trained;
++ return true;
++}
++
++static bool psb_intel_sdvo_get_active_outputs(struct psb_intel_output
++ *psb_intel_output, u16 *outputs)
++{
++ u8 status;
++
++ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_ACTIVE_OUTPUTS,
++ NULL, 0);
++ status =
++ psb_intel_sdvo_read_response(psb_intel_output, outputs,
++ sizeof(*outputs));
++
++ return status == SDVO_CMD_STATUS_SUCCESS;
++}
++
++static bool psb_intel_sdvo_set_active_outputs(struct psb_intel_output
++ *psb_intel_output, u16 outputs)
++{
++ u8 status;
++
++ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_SET_ACTIVE_OUTPUTS,
++ &outputs, sizeof(outputs));
++ status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
++ return status == SDVO_CMD_STATUS_SUCCESS;
++}
++
++static bool psb_intel_sdvo_set_encoder_power_state(struct psb_intel_output
++ *psb_intel_output, int mode)
++{
++ u8 status, state = SDVO_ENCODER_STATE_ON;
++
++ switch (mode) {
++ case DRM_MODE_DPMS_ON:
++ state = SDVO_ENCODER_STATE_ON;
++ break;
++ case DRM_MODE_DPMS_STANDBY:
++ state = SDVO_ENCODER_STATE_STANDBY;
++ break;
++ case DRM_MODE_DPMS_SUSPEND:
++ state = SDVO_ENCODER_STATE_SUSPEND;
++ break;
++ case DRM_MODE_DPMS_OFF:
++ state = SDVO_ENCODER_STATE_OFF;
++ break;
++ }
++
++ psb_intel_sdvo_write_cmd(psb_intel_output,
++ SDVO_CMD_SET_ENCODER_POWER_STATE, &state,
++ sizeof(state));
++ status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
++
++ return status == SDVO_CMD_STATUS_SUCCESS;
++}
++
++static bool psb_intel_sdvo_get_input_pixel_clock_range(struct psb_intel_output
++ *psb_intel_output,
++ int *clock_min,
++ int *clock_max)
++{
++ struct psb_intel_sdvo_pixel_clock_range clocks;
++ u8 status;
++
++ psb_intel_sdvo_write_cmd(psb_intel_output,
++ SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE, NULL,
++ 0);
++
++ status =
++ psb_intel_sdvo_read_response(psb_intel_output, &clocks,
++ sizeof(clocks));
++
++ if (status != SDVO_CMD_STATUS_SUCCESS)
++ return false;
++
++ /* Convert the values from units of 10 kHz to kHz. */
++ *clock_min = clocks.min * 10;
++ *clock_max = clocks.max * 10;
++
++ return true;
++}
++
++static bool psb_intel_sdvo_set_target_output(
++ struct psb_intel_output *psb_intel_output,
++ u16 outputs)
++{
++ u8 status;
++
++ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_SET_TARGET_OUTPUT,
++ &outputs, sizeof(outputs));
++
++ status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
++ return status == SDVO_CMD_STATUS_SUCCESS;
++}
++
++static bool psb_intel_sdvo_get_timing(struct psb_intel_output *psb_intel_output,
++ u8 cmd, struct psb_intel_sdvo_dtd *dtd)
++{
++ u8 status;
++
++ psb_intel_sdvo_write_cmd(psb_intel_output, cmd, NULL, 0);
++ status = psb_intel_sdvo_read_response(psb_intel_output, &dtd->part1,
++ sizeof(dtd->part1));
++ if (status != SDVO_CMD_STATUS_SUCCESS)
++ return false;
++
++ psb_intel_sdvo_write_cmd(psb_intel_output, cmd + 1, NULL, 0);
++ status = psb_intel_sdvo_read_response(psb_intel_output, &dtd->part2,
++ sizeof(dtd->part2));
++ if (status != SDVO_CMD_STATUS_SUCCESS)
++ return false;
++
++ return true;
++}
++
++static bool psb_intel_sdvo_get_input_timing(
++ struct psb_intel_output *psb_intel_output,
++ struct psb_intel_sdvo_dtd *dtd)
++{
++ return psb_intel_sdvo_get_timing(psb_intel_output,
++ SDVO_CMD_GET_INPUT_TIMINGS_PART1,
++ dtd);
++}
++#if 0
++static bool psb_intel_sdvo_get_output_timing(
++ struct psb_intel_output *psb_intel_output,
++ struct psb_intel_sdvo_dtd *dtd)
++{
++ return psb_intel_sdvo_get_timing(psb_intel_output,
++ SDVO_CMD_GET_OUTPUT_TIMINGS_PART1,
++ dtd);
++}
++#endif
++static bool psb_intel_sdvo_set_timing(
++ struct psb_intel_output *psb_intel_output,
++ u8 cmd,
++ struct psb_intel_sdvo_dtd *dtd)
++{
++ u8 status;
++
++ psb_intel_sdvo_write_cmd(psb_intel_output, cmd, &dtd->part1,
++ sizeof(dtd->part1));
++ status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
++ if (status != SDVO_CMD_STATUS_SUCCESS)
++ return false;
++
++ psb_intel_sdvo_write_cmd(psb_intel_output, cmd + 1, &dtd->part2,
++ sizeof(dtd->part2));
++ status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
++ if (status != SDVO_CMD_STATUS_SUCCESS)
++ return false;
++
++ return true;
++}
++
++static bool psb_intel_sdvo_set_input_timing(
++ struct psb_intel_output *psb_intel_output,
++ struct psb_intel_sdvo_dtd *dtd)
++{
++ return psb_intel_sdvo_set_timing(psb_intel_output,
++ SDVO_CMD_SET_INPUT_TIMINGS_PART1,
++ dtd);
++}
++
++static bool psb_intel_sdvo_set_output_timing(
++ struct psb_intel_output *psb_intel_output,
++ struct psb_intel_sdvo_dtd *dtd)
++{
++ return psb_intel_sdvo_set_timing(psb_intel_output,
++ SDVO_CMD_SET_OUTPUT_TIMINGS_PART1,
++ dtd);
++}
++
++#if 0
++static bool psb_intel_sdvo_get_preferred_input_timing(struct psb_intel_output
++ *psb_intel_output,
++ struct psb_intel_sdvo_dtd
++ *dtd)
++{
++ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
++ u8 status;
++
++ psb_intel_sdvo_write_cmd(psb_intel_output,
++ SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1,
++ NULL, 0);
++
++ status = psb_intel_sdvo_read_response(psb_intel_output, &dtd->part1,
++ sizeof(dtd->part1));
++ if (status != SDVO_CMD_STATUS_SUCCESS)
++ return false;
++
++ psb_intel_sdvo_write_cmd(psb_intel_output,
++ SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2,
++ NULL, 0);
++ status =
++ psb_intel_sdvo_read_response(psb_intel_output, &dtd->part2,
++ sizeof(dtd->part2));
++ if (status != SDVO_CMD_STATUS_SUCCESS)
++ return false;
++
++ return true;
++}
++#endif
++
++static int psb_intel_sdvo_get_clock_rate_mult(struct psb_intel_output
++ *psb_intel_output)
++{
++ u8 response, status;
++
++ psb_intel_sdvo_write_cmd(psb_intel_output,
++ SDVO_CMD_GET_CLOCK_RATE_MULT,
++ NULL,
++ 0);
++
++ status = psb_intel_sdvo_read_response(psb_intel_output, &response, 1);
++
++ if (status != SDVO_CMD_STATUS_SUCCESS) {
++ DRM_DEBUG("Couldn't get SDVO clock rate multiplier\n");
++ return SDVO_CLOCK_RATE_MULT_1X;
++ } else {
++ DRM_DEBUG("Current clock rate multiplier: %d\n", response);
++ }
++
++ return response;
++}
++
++static bool psb_intel_sdvo_set_clock_rate_mult(struct psb_intel_output
++ *psb_intel_output, u8 val)
++{
++ u8 status;
++
++ psb_intel_sdvo_write_cmd(psb_intel_output,
++ SDVO_CMD_SET_CLOCK_RATE_MULT,
++ &val,
++ 1);
++
++ status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
++ if (status != SDVO_CMD_STATUS_SUCCESS)
++ return false;
++
++ return true;
++}
++
++static bool psb_sdvo_set_current_inoutmap(struct psb_intel_output *output,
++ u32 in0outputmask,
++ u32 in1outputmask)
++{
++ u8 byArgs[4];
++ u8 status;
++ int i;
++ struct psb_intel_sdvo_priv *sdvo_priv = output->dev_priv;
++
++ /* Make all fields of the args/ret to zero */
++ memset(byArgs, 0, sizeof(byArgs));
++
++ /* Fill up the arguement values; */
++ byArgs[0] = (u8) (in0outputmask & 0xFF);
++ byArgs[1] = (u8) ((in0outputmask >> 8) & 0xFF);
++ byArgs[2] = (u8) (in1outputmask & 0xFF);
++ byArgs[3] = (u8) ((in1outputmask >> 8) & 0xFF);
++
++
++ /*save inoutmap arg here*/
++ for (i = 0; i < 4; i++)
++ sdvo_priv->in_out_map[i] = byArgs[0];
++
++ psb_intel_sdvo_write_cmd(output, SDVO_CMD_SET_IN_OUT_MAP, byArgs, 4);
++ status = psb_intel_sdvo_read_response(output, NULL, 0);
++
++ if (status != SDVO_CMD_STATUS_SUCCESS)
++ return false;
++ return true;
++}
++
++
++static void psb_intel_sdvo_set_iomap(struct psb_intel_output *output)
++{
++ u32 dwCurrentSDVOIn0 = 0;
++ u32 dwCurrentSDVOIn1 = 0;
++ u32 dwDevMask = 0;
++
++
++ struct psb_intel_sdvo_priv *sdvo_priv = output->dev_priv;
++
++ /* Please DO NOT change the following code. */
++ /* SDVOB_IN0 or SDVOB_IN1 ==> sdvo_in0 */
++ /* SDVOC_IN0 or SDVOC_IN1 ==> sdvo_in1 */
++ if (sdvo_priv->by_input_wiring & (SDVOB_IN0 | SDVOC_IN0)) {
++ switch (sdvo_priv->active_device) {
++ case SDVO_DEVICE_LVDS:
++ dwDevMask = SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1;
++ break;
++ case SDVO_DEVICE_TMDS:
++ dwDevMask = SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1;
++ break;
++ case SDVO_DEVICE_TV:
++ dwDevMask =
++ SDVO_OUTPUT_YPRPB0 | SDVO_OUTPUT_SVID0 |
++ SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_YPRPB1 |
++ SDVO_OUTPUT_SVID1 | SDVO_OUTPUT_CVBS1 |
++ SDVO_OUTPUT_SCART0 | SDVO_OUTPUT_SCART1;
++ break;
++ case SDVO_DEVICE_CRT:
++ dwDevMask = SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1;
++ break;
++ }
++ dwCurrentSDVOIn0 = (sdvo_priv->active_outputs & dwDevMask);
++ } else if (sdvo_priv->by_input_wiring & (SDVOB_IN1 | SDVOC_IN1)) {
++ switch (sdvo_priv->active_device) {
++ case SDVO_DEVICE_LVDS:
++ dwDevMask = SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1;
++ break;
++ case SDVO_DEVICE_TMDS:
++ dwDevMask = SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1;
++ break;
++ case SDVO_DEVICE_TV:
++ dwDevMask =
++ SDVO_OUTPUT_YPRPB0 | SDVO_OUTPUT_SVID0 |
++ SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_YPRPB1 |
++ SDVO_OUTPUT_SVID1 | SDVO_OUTPUT_CVBS1 |
++ SDVO_OUTPUT_SCART0 | SDVO_OUTPUT_SCART1;
++ break;
++ case SDVO_DEVICE_CRT:
++ dwDevMask = SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1;
++ break;
++ }
++ dwCurrentSDVOIn1 = (sdvo_priv->active_outputs & dwDevMask);
++ }
++
++ psb_sdvo_set_current_inoutmap(output, dwCurrentSDVOIn0,
++ dwCurrentSDVOIn1);
++}
++
++
++static bool psb_intel_sdvo_mode_fixup(struct drm_encoder *encoder,
++ struct drm_display_mode *mode,
++ struct drm_display_mode *adjusted_mode)
++{
++ /* Make the CRTC code factor in the SDVO pixel multiplier. The SDVO
++ * device will be told of the multiplier during mode_set.
++ */
++ adjusted_mode->clock *= psb_intel_sdvo_get_pixel_multiplier(mode);
++ return true;
++}
++
++static void psb_intel_sdvo_mode_set(struct drm_encoder *encoder,
++ struct drm_display_mode *mode,
++ struct drm_display_mode *adjusted_mode)
++{
++ struct drm_device *dev = encoder->dev;
++ struct drm_crtc *crtc = encoder->crtc;
++ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++ struct psb_intel_output *psb_intel_output =
++ enc_to_psb_intel_output(encoder);
++ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
++ u16 width, height;
++ u16 h_blank_len, h_sync_len, v_blank_len, v_sync_len;
++ u16 h_sync_offset, v_sync_offset;
++ u32 sdvox;
++ struct psb_intel_sdvo_dtd output_dtd;
++ int sdvo_pixel_multiply;
++
++ if (!mode)
++ return;
++
++ psb_intel_sdvo_set_target_output(psb_intel_output, 0);
++
++ width = mode->crtc_hdisplay;
++ height = mode->crtc_vdisplay;
++
++ /* do some mode translations */
++ h_blank_len = mode->crtc_hblank_end - mode->crtc_hblank_start;
++ h_sync_len = mode->crtc_hsync_end - mode->crtc_hsync_start;
++
++ v_blank_len = mode->crtc_vblank_end - mode->crtc_vblank_start;
++ v_sync_len = mode->crtc_vsync_end - mode->crtc_vsync_start;
++
++ h_sync_offset = mode->crtc_hsync_start - mode->crtc_hblank_start;
++ v_sync_offset = mode->crtc_vsync_start - mode->crtc_vblank_start;
++
++ output_dtd.part1.clock = mode->clock / 10;
++ output_dtd.part1.h_active = width & 0xff;
++ output_dtd.part1.h_blank = h_blank_len & 0xff;
++ output_dtd.part1.h_high = (((width >> 8) & 0xf) << 4) |
++ ((h_blank_len >> 8) & 0xf);
++ output_dtd.part1.v_active = height & 0xff;
++ output_dtd.part1.v_blank = v_blank_len & 0xff;
++ output_dtd.part1.v_high = (((height >> 8) & 0xf) << 4) |
++ ((v_blank_len >> 8) & 0xf);
++
++ output_dtd.part2.h_sync_off = h_sync_offset;
++ output_dtd.part2.h_sync_width = h_sync_len & 0xff;
++ output_dtd.part2.v_sync_off_width = (v_sync_offset & 0xf) << 4 |
++ (v_sync_len & 0xf);
++ output_dtd.part2.sync_off_width_high =
++ ((h_sync_offset & 0x300) >> 2) | ((h_sync_len & 0x300) >> 4) |
++ ((v_sync_offset & 0x30) >> 2) | ((v_sync_len & 0x30) >> 4);
++
++ output_dtd.part2.dtd_flags = 0x18;
++ if (mode->flags & DRM_MODE_FLAG_PHSYNC)
++ output_dtd.part2.dtd_flags |= 0x2;
++ if (mode->flags & DRM_MODE_FLAG_PVSYNC)
++ output_dtd.part2.dtd_flags |= 0x4;
++
++ output_dtd.part2.sdvo_flags = 0;
++ output_dtd.part2.v_sync_off_high = v_sync_offset & 0xc0;
++ output_dtd.part2.reserved = 0;
++
++ /* Set the output timing to the screen */
++ psb_intel_sdvo_set_target_output(psb_intel_output,
++ sdvo_priv->active_outputs);
++
++ /* Set the input timing to the screen. Assume always input 0. */
++ psb_intel_sdvo_set_target_input(psb_intel_output, true, false);
++
++ psb_intel_sdvo_set_output_timing(psb_intel_output, &output_dtd);
++
++ /* We would like to use i830_sdvo_create_preferred_input_timing() to
++ * provide the device with a timing it can support, if it supports that
++ * feature. However, presumably we would need to adjust the CRTC to
++ * output the preferred timing, and we don't support that currently.
++ */
++#if 0
++ success =
++ psb_intel_sdvo_create_preferred_input_timing(psb_intel_output,
++ clock,
++ width,
++ height);
++ if (success) {
++ struct psb_intel_sdvo_dtd *input_dtd;
++
++ psb_intel_sdvo_get_preferred_input_timing(psb_intel_output,
++ &input_dtd);
++ psb_intel_sdvo_set_input_timing(psb_intel_output, &input_dtd);
++ }
++#else
++ psb_intel_sdvo_set_input_timing(psb_intel_output, &output_dtd);
++#endif
++
++ switch (psb_intel_sdvo_get_pixel_multiplier(mode)) {
++ case 1:
++ psb_intel_sdvo_set_clock_rate_mult(psb_intel_output,
++ SDVO_CLOCK_RATE_MULT_1X);
++ break;
++ case 2:
++ psb_intel_sdvo_set_clock_rate_mult(psb_intel_output,
++ SDVO_CLOCK_RATE_MULT_2X);
++ break;
++ case 4:
++ psb_intel_sdvo_set_clock_rate_mult(psb_intel_output,
++ SDVO_CLOCK_RATE_MULT_4X);
++ break;
++ }
++
++ /* Set the SDVO control regs. */
++ if (0 /*IS_I965GM(dev) */) {
++ sdvox = SDVO_BORDER_ENABLE;
++ } else {
++ sdvox = REG_READ(sdvo_priv->output_device);
++ switch (sdvo_priv->output_device) {
++ case SDVOB:
++ sdvox &= SDVOB_PRESERVE_MASK;
++ break;
++ case SDVOC:
++ sdvox &= SDVOC_PRESERVE_MASK;
++ break;
++ }
++ sdvox |= (9 << 19) | SDVO_BORDER_ENABLE;
++ }
++ if (psb_intel_crtc->pipe == 1)
++ sdvox |= SDVO_PIPE_B_SELECT;
++
++ sdvo_pixel_multiply = psb_intel_sdvo_get_pixel_multiplier(mode);
++
++#if 0
++ if (IS_I965G(dev)) {
++ /* done in crtc_mode_set as the dpll_md reg must be written
++ * early */
++ } else if (IS_I945G(dev) || IS_I945GM(dev)) {
++ /* done in crtc_mode_set as it lives inside the
++ * dpll register */
++ } else {
++ sdvox |=
++ (sdvo_pixel_multiply - 1) << SDVO_PORT_MULTIPLY_SHIFT;
++ }
++#endif
++
++ psb_intel_sdvo_write_sdvox(psb_intel_output, sdvox);
++
++ psb_intel_sdvo_set_iomap(psb_intel_output);
++}
++
++static void psb_intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
++{
++ struct drm_device *dev = encoder->dev;
++ struct psb_intel_output *psb_intel_output =
++ enc_to_psb_intel_output(encoder);
++ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
++ u32 temp;
++
++ if (mode != DRM_MODE_DPMS_ON) {
++ psb_intel_sdvo_set_active_outputs(psb_intel_output, 0);
++ if (0)
++ psb_intel_sdvo_set_encoder_power_state(
++ psb_intel_output,
++ mode);
++
++ if (mode == DRM_MODE_DPMS_OFF) {
++ temp = REG_READ(sdvo_priv->output_device);
++ if ((temp & SDVO_ENABLE) != 0) {
++ psb_intel_sdvo_write_sdvox(psb_intel_output,
++ temp &
++ ~SDVO_ENABLE);
++ }
++ }
++ } else {
++ bool input1, input2;
++ int i;
++ u8 status;
++
++ temp = REG_READ(sdvo_priv->output_device);
++ if ((temp & SDVO_ENABLE) == 0)
++ psb_intel_sdvo_write_sdvox(psb_intel_output,
++ temp | SDVO_ENABLE);
++ for (i = 0; i < 2; i++)
++ psb_intel_wait_for_vblank(dev);
++
++ status =
++ psb_intel_sdvo_get_trained_inputs(psb_intel_output,
++ &input1,
++ &input2);
++
++
++ /* Warn if the device reported failure to sync.
++ * A lot of SDVO devices fail to notify of sync, but it's
++ * a given it the status is a success, we succeeded.
++ */
++ if (status == SDVO_CMD_STATUS_SUCCESS && !input1) {
++ DRM_DEBUG
++ ("First %s output reported failure to sync\n",
++ SDVO_NAME(sdvo_priv));
++ }
++
++ if (0)
++ psb_intel_sdvo_set_encoder_power_state(
++ psb_intel_output,
++ mode);
++ psb_intel_sdvo_set_active_outputs(psb_intel_output,
++ sdvo_priv->active_outputs);
++ }
++ return;
++}
++
++static void psb_intel_sdvo_save(struct drm_connector *connector)
++{
++ struct drm_device *dev = connector->dev;
++ struct psb_intel_output *psb_intel_output =
++ to_psb_intel_output(connector);
++ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
++ /*int o;*/
++
++ sdvo_priv->save_sdvo_mult =
++ psb_intel_sdvo_get_clock_rate_mult(psb_intel_output);
++ psb_intel_sdvo_get_active_outputs(psb_intel_output,
++ &sdvo_priv->save_active_outputs);
++
++ if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) {
++ psb_intel_sdvo_set_target_input(psb_intel_output,
++ true,
++ false);
++ psb_intel_sdvo_get_input_timing(psb_intel_output,
++ &sdvo_priv->save_input_dtd_1);
++ }
++
++ if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) {
++ psb_intel_sdvo_set_target_input(psb_intel_output,
++ false,
++ true);
++ psb_intel_sdvo_get_input_timing(psb_intel_output,
++ &sdvo_priv->save_input_dtd_2);
++ }
++
++#if 0
++ for (o = SDVO_OUTPUT_FIRST; o <= SDVO_OUTPUT_LAST; o++) {
++ u16 this_output = (1 << o);
++ if (sdvo_priv->caps.output_flags & this_output) {
++ psb_intel_sdvo_set_target_output(psb_intel_output,
++ this_output);
++ psb_intel_sdvo_get_output_timing(psb_intel_output,
++ &sdvo_priv->
++ save_output_dtd[o]);
++ }
++ }
++#endif
++
++ sdvo_priv->save_SDVOX = REG_READ(sdvo_priv->output_device);
++
++ /*TODO: save the in_out_map state*/
++}
++
++static void psb_intel_sdvo_restore(struct drm_connector *connector)
++{
++ struct drm_device *dev = connector->dev;
++ struct psb_intel_output *psb_intel_output =
++ to_psb_intel_output(connector);
++ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
++ /*int o;*/
++ int i;
++ bool input1, input2;
++ u8 status;
++
++ psb_intel_sdvo_set_active_outputs(psb_intel_output, 0);
++
++#if 0
++ for (o = SDVO_OUTPUT_FIRST; o <= SDVO_OUTPUT_LAST; o++) {
++ u16 this_output = (1 << o);
++ if (sdvo_priv->caps.output_flags & this_output) {
++ psb_intel_sdvo_set_target_output(psb_intel_output,
++ this_output);
++ psb_intel_sdvo_set_output_timing(psb_intel_output,
++ &sdvo_priv->
++ save_output_dtd[o]);
++ }
++ }
++#endif
++
++ if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) {
++ psb_intel_sdvo_set_target_input(psb_intel_output, true, false);
++ psb_intel_sdvo_set_input_timing(psb_intel_output,
++ &sdvo_priv->save_input_dtd_1);
++ }
++
++ if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) {
++ psb_intel_sdvo_set_target_input(psb_intel_output, false, true);
++ psb_intel_sdvo_set_input_timing(psb_intel_output,
++ &sdvo_priv->save_input_dtd_2);
++ }
++
++ psb_intel_sdvo_set_clock_rate_mult(psb_intel_output,
++ sdvo_priv->save_sdvo_mult);
++
++ REG_WRITE(sdvo_priv->output_device, sdvo_priv->save_SDVOX);
++
++ if (sdvo_priv->save_SDVOX & SDVO_ENABLE) {
++ for (i = 0; i < 2; i++)
++ psb_intel_wait_for_vblank(dev);
++ status =
++ psb_intel_sdvo_get_trained_inputs(psb_intel_output,
++ &input1,
++ &input2);
++ if (status == SDVO_CMD_STATUS_SUCCESS && !input1)
++ DRM_DEBUG
++ ("First %s output reported failure to sync\n",
++ SDVO_NAME(sdvo_priv));
++ }
++
++ psb_intel_sdvo_set_active_outputs(psb_intel_output,
++ sdvo_priv->save_active_outputs);
++
++ /*TODO: restore in_out_map*/
++ psb_intel_sdvo_write_cmd(psb_intel_output,
++ SDVO_CMD_SET_IN_OUT_MAP,
++ sdvo_priv->in_out_map,
++ 4);
++
++ psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
++}
++
++static int psb_intel_sdvo_mode_valid(struct drm_connector *connector,
++ struct drm_display_mode *mode)
++{
++ struct psb_intel_output *psb_intel_output =
++ to_psb_intel_output(connector);
++ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
++
++ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
++ return MODE_NO_DBLESCAN;
++
++ if (sdvo_priv->pixel_clock_min > mode->clock)
++ return MODE_CLOCK_LOW;
++
++ if (sdvo_priv->pixel_clock_max < mode->clock)
++ return MODE_CLOCK_HIGH;
++
++ return MODE_OK;
++}
++
++static bool psb_intel_sdvo_get_capabilities(
++ struct psb_intel_output *psb_intel_output,
++ struct psb_intel_sdvo_caps *caps)
++{
++ u8 status;
++
++ psb_intel_sdvo_write_cmd(psb_intel_output,
++ SDVO_CMD_GET_DEVICE_CAPS,
++ NULL,
++ 0);
++ status = psb_intel_sdvo_read_response(psb_intel_output,
++ caps,
++ sizeof(*caps));
++ if (status != SDVO_CMD_STATUS_SUCCESS)
++ return false;
++
++ return true;
++}
++
++struct drm_connector *psb_intel_sdvo_find(struct drm_device *dev, int sdvoB)
++{
++ struct drm_connector *connector = NULL;
++ struct psb_intel_output *iout = NULL;
++ struct psb_intel_sdvo_priv *sdvo;
++
++ /* find the sdvo connector */
++ list_for_each_entry(connector, &dev->mode_config.connector_list,
++ head) {
++ iout = to_psb_intel_output(connector);
++
++ if (iout->type != INTEL_OUTPUT_SDVO)
++ continue;
++
++ sdvo = iout->dev_priv;
++
++ if (sdvo->output_device == SDVOB && sdvoB)
++ return connector;
++
++ if (sdvo->output_device == SDVOC && !sdvoB)
++ return connector;
++
++ }
++
++ return NULL;
++}
++
++int psb_intel_sdvo_supports_hotplug(struct drm_connector *connector)
++{
++ u8 response[2];
++ u8 status;
++ struct psb_intel_output *psb_intel_output;
++ DRM_DEBUG("\n");
++
++ if (!connector)
++ return 0;
++
++ psb_intel_output = to_psb_intel_output(connector);
++
++ psb_intel_sdvo_write_cmd(psb_intel_output,
++ SDVO_CMD_GET_HOT_PLUG_SUPPORT,
++ NULL,
++ 0);
++ status = psb_intel_sdvo_read_response(psb_intel_output,
++ &response,
++ 2);
++
++ if (response[0] != 0)
++ return 1;
++
++ return 0;
++}
++
++void psb_intel_sdvo_set_hotplug(struct drm_connector *connector, int on)
++{
++ u8 response[2];
++ u8 status;
++ struct psb_intel_output *psb_intel_output =
++ to_psb_intel_output(connector);
++
++ psb_intel_sdvo_write_cmd(psb_intel_output,
++ SDVO_CMD_GET_ACTIVE_HOT_PLUG,
++ NULL,
++ 0);
++ psb_intel_sdvo_read_response(psb_intel_output, &response, 2);
++
++ if (on) {
++ psb_intel_sdvo_write_cmd(psb_intel_output,
++ SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL,
++ 0);
++ status = psb_intel_sdvo_read_response(psb_intel_output,
++ &response,
++ 2);
++
++ psb_intel_sdvo_write_cmd(psb_intel_output,
++ SDVO_CMD_SET_ACTIVE_HOT_PLUG,
++ &response, 2);
++ } else {
++ response[0] = 0;
++ response[1] = 0;
++ psb_intel_sdvo_write_cmd(psb_intel_output,
++ SDVO_CMD_SET_ACTIVE_HOT_PLUG,
++ &response, 2);
++ }
++
++ psb_intel_sdvo_write_cmd(psb_intel_output,
++ SDVO_CMD_GET_ACTIVE_HOT_PLUG,
++ NULL,
++ 0);
++ psb_intel_sdvo_read_response(psb_intel_output, &response, 2);
++}
++
++static enum drm_connector_status psb_intel_sdvo_detect(struct drm_connector
++ *connector)
++{
++ u8 response[2];
++ u8 status;
++ struct psb_intel_output *psb_intel_output =
++ to_psb_intel_output(connector);
++
++ psb_intel_sdvo_write_cmd(psb_intel_output,
++ SDVO_CMD_GET_ATTACHED_DISPLAYS,
++ NULL,
++ 0);
++ status = psb_intel_sdvo_read_response(psb_intel_output, &response, 2);
++
++ DRM_DEBUG("SDVO response %d %d\n", response[0], response[1]);
++ if ((response[0] != 0) || (response[1] != 0))
++ return connector_status_connected;
++ else
++ return connector_status_disconnected;
++}
++
++static int psb_intel_sdvo_get_modes(struct drm_connector *connector)
++{
++ struct psb_intel_output *psb_intel_output =
++ to_psb_intel_output(connector);
++
++ /* set the bus switch and get the modes */
++ psb_intel_sdvo_set_control_bus_switch(psb_intel_output,
++ SDVO_CONTROL_BUS_DDC2);
++ psb_intel_ddc_get_modes(psb_intel_output);
++
++ if (list_empty(&connector->probed_modes))
++ return 0;
++ return 1;
++#if 0
++ /* Mac mini hack. On this device, I get DDC through the analog, which
++ * load-detects as disconnected. I fail to DDC through the SDVO DDC,
++ * but it does load-detect as connected. So, just steal the DDC bits
++ * from analog when we fail at finding it the right way.
++ */
++ /* TODO */
++ return NULL;
++
++ return NULL;
++#endif
++}
++
++static void psb_intel_sdvo_destroy(struct drm_connector *connector)
++{
++ struct psb_intel_output *psb_intel_output =
++ to_psb_intel_output(connector);
++
++ if (psb_intel_output->i2c_bus)
++ psb_intel_i2c_destroy(psb_intel_output->i2c_bus);
++ drm_sysfs_connector_remove(connector);
++ drm_connector_cleanup(connector);
++ kfree(psb_intel_output);
++}
++
++static const struct drm_encoder_helper_funcs psb_intel_sdvo_helper_funcs = {
++ .dpms = psb_intel_sdvo_dpms,
++ .mode_fixup = psb_intel_sdvo_mode_fixup,
++ .prepare = psb_intel_encoder_prepare,
++ .mode_set = psb_intel_sdvo_mode_set,
++ .commit = psb_intel_encoder_commit,
++};
++
++static const struct drm_connector_funcs psb_intel_sdvo_connector_funcs = {
++ .dpms = drm_helper_connector_dpms,
++ .save = psb_intel_sdvo_save,
++ .restore = psb_intel_sdvo_restore,
++ .detect = psb_intel_sdvo_detect,
++ .fill_modes = drm_helper_probe_single_connector_modes,
++ .destroy = psb_intel_sdvo_destroy,
++};
++
++static const struct drm_connector_helper_funcs
++ psb_intel_sdvo_connector_helper_funcs = {
++ .get_modes = psb_intel_sdvo_get_modes,
++ .mode_valid = psb_intel_sdvo_mode_valid,
++ .best_encoder = psb_intel_best_encoder,
++};
++
++void psb_intel_sdvo_enc_destroy(struct drm_encoder *encoder)
++{
++ drm_encoder_cleanup(encoder);
++}
++
++static const struct drm_encoder_funcs psb_intel_sdvo_enc_funcs = {
++ .destroy = psb_intel_sdvo_enc_destroy,
++};
++
++
++void psb_intel_sdvo_init(struct drm_device *dev, int output_device)
++{
++ struct drm_connector *connector;
++ struct psb_intel_output *psb_intel_output;
++ struct psb_intel_sdvo_priv *sdvo_priv;
++ struct psb_intel_i2c_chan *i2cbus = NULL;
++ int connector_type;
++ u8 ch[0x40];
++ int i;
++ int encoder_type, output_id;
++
++ psb_intel_output =
++ kcalloc(sizeof(struct psb_intel_output) +
++ sizeof(struct psb_intel_sdvo_priv), 1, GFP_KERNEL);
++ if (!psb_intel_output)
++ return;
++
++ connector = &psb_intel_output->base;
++
++ drm_connector_init(dev, connector, &psb_intel_sdvo_connector_funcs,
++ DRM_MODE_CONNECTOR_Unknown);
++ drm_connector_helper_add(connector,
++ &psb_intel_sdvo_connector_helper_funcs);
++ sdvo_priv = (struct psb_intel_sdvo_priv *) (psb_intel_output + 1);
++ psb_intel_output->type = INTEL_OUTPUT_SDVO;
++
++ connector->interlace_allowed = 0;
++ connector->doublescan_allowed = 0;
++
++ /* setup the DDC bus. */
++ if (output_device == SDVOB)
++ i2cbus =
++ psb_intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOB");
++ else
++ i2cbus =
++ psb_intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOC");
++
++ if (!i2cbus)
++ goto err_connector;
++
++ sdvo_priv->i2c_bus = i2cbus;
++
++ if (output_device == SDVOB) {
++ output_id = 1;
++ sdvo_priv->by_input_wiring = SDVOB_IN0;
++ sdvo_priv->i2c_bus->slave_addr = 0x38;
++ } else {
++ output_id = 2;
++ sdvo_priv->i2c_bus->slave_addr = 0x39;
++ }
++
++ sdvo_priv->output_device = output_device;
++ psb_intel_output->i2c_bus = i2cbus;
++ psb_intel_output->dev_priv = sdvo_priv;
++
++
++ /* Read the regs to test if we can talk to the device */
++ for (i = 0; i < 0x40; i++) {
++ if (!psb_intel_sdvo_read_byte(psb_intel_output, i, &ch[i])) {
++ DRM_DEBUG("No SDVO device found on SDVO%c\n",
++ output_device == SDVOB ? 'B' : 'C');
++ goto err_i2c;
++ }
++ }
++
++ psb_intel_sdvo_get_capabilities(psb_intel_output, &sdvo_priv->caps);
++
++ memset(&sdvo_priv->active_outputs, 0,
++ sizeof(sdvo_priv->active_outputs));
++
++ /* TODO, CVBS, SVID, YPRPB & SCART outputs. */
++ if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_RGB0) {
++ sdvo_priv->active_outputs = SDVO_OUTPUT_RGB0;
++ sdvo_priv->active_device = SDVO_DEVICE_CRT;
++ connector->display_info.subpixel_order =
++ SubPixelHorizontalRGB;
++ encoder_type = DRM_MODE_ENCODER_DAC;
++ connector_type = DRM_MODE_CONNECTOR_VGA;
++ } else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_RGB1) {
++ sdvo_priv->active_outputs = SDVO_OUTPUT_RGB1;
++ sdvo_priv->active_outputs = SDVO_DEVICE_CRT;
++ connector->display_info.subpixel_order =
++ SubPixelHorizontalRGB;
++ encoder_type = DRM_MODE_ENCODER_DAC;
++ connector_type = DRM_MODE_CONNECTOR_VGA;
++ } else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS0) {
++ sdvo_priv->active_outputs = SDVO_OUTPUT_TMDS0;
++ sdvo_priv->active_device = SDVO_DEVICE_TMDS;
++ connector->display_info.subpixel_order =
++ SubPixelHorizontalRGB;
++ encoder_type = DRM_MODE_ENCODER_TMDS;
++ connector_type = DRM_MODE_CONNECTOR_DVID;
++ } else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS1) {
++ sdvo_priv->active_outputs = SDVO_OUTPUT_TMDS1;
++ sdvo_priv->active_device = SDVO_DEVICE_TMDS;
++ connector->display_info.subpixel_order =
++ SubPixelHorizontalRGB;
++ encoder_type = DRM_MODE_ENCODER_TMDS;
++ connector_type = DRM_MODE_CONNECTOR_DVID;
++ } else {
++ unsigned char bytes[2];
++
++ memcpy(bytes, &sdvo_priv->caps.output_flags, 2);
++ DRM_DEBUG
++ ("%s: No active RGB or TMDS outputs (0x%02x%02x)\n",
++ SDVO_NAME(sdvo_priv), bytes[0], bytes[1]);
++ goto err_i2c;
++ }
++
++ drm_encoder_init(dev, &psb_intel_output->enc, &psb_intel_sdvo_enc_funcs,
++ encoder_type);
++ drm_encoder_helper_add(&psb_intel_output->enc,
++ &psb_intel_sdvo_helper_funcs);
++ connector->connector_type = connector_type;
++
++ drm_mode_connector_attach_encoder(&psb_intel_output->base,
++ &psb_intel_output->enc);
++ drm_sysfs_connector_add(connector);
++
++ /* Set the input timing to the screen. Assume always input 0. */
++ psb_intel_sdvo_set_target_input(psb_intel_output, true, false);
++
++ psb_intel_sdvo_get_input_pixel_clock_range(psb_intel_output,
++ &sdvo_priv->pixel_clock_min,
++ &sdvo_priv->
++ pixel_clock_max);
++
++
++ DRM_DEBUG("%s device VID/DID: %02X:%02X.%02X, "
++ "clock range %dMHz - %dMHz, "
++ "input 1: %c, input 2: %c, "
++ "output 1: %c, output 2: %c\n",
++ SDVO_NAME(sdvo_priv),
++ sdvo_priv->caps.vendor_id, sdvo_priv->caps.device_id,
++ sdvo_priv->caps.device_rev_id,
++ sdvo_priv->pixel_clock_min / 1000,
++ sdvo_priv->pixel_clock_max / 1000,
++ (sdvo_priv->caps.sdvo_inputs_mask & 0x1) ? 'Y' : 'N',
++ (sdvo_priv->caps.sdvo_inputs_mask & 0x2) ? 'Y' : 'N',
++ /* check currently supported outputs */
++ sdvo_priv->caps.output_flags &
++ (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_RGB0) ? 'Y' : 'N',
++ sdvo_priv->caps.output_flags &
++ (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N');
++
++ psb_intel_output->ddc_bus = i2cbus;
++
++ return;
++
++err_i2c:
++ psb_intel_i2c_destroy(psb_intel_output->i2c_bus);
++err_connector:
++ drm_connector_cleanup(connector);
++ kfree(psb_intel_output);
++
++ return;
++}
+diff --git a/drivers/gpu/drm/mrst/drv/psb_intel_sdvo_regs.h b/drivers/gpu/drm/mrst/drv/psb_intel_sdvo_regs.h
+new file mode 100644
+index 0000000..ed2f136
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/psb_intel_sdvo_regs.h
+@@ -0,0 +1,338 @@
++/*
++ * SDVO command definitions and structures.
++ *
++ * Copyright (c) 2008, Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors:
++ * Eric Anholt <eric@anholt.net>
++ */
++
++#define SDVO_OUTPUT_FIRST (0)
++#define SDVO_OUTPUT_TMDS0 (1 << 0)
++#define SDVO_OUTPUT_RGB0 (1 << 1)
++#define SDVO_OUTPUT_CVBS0 (1 << 2)
++#define SDVO_OUTPUT_SVID0 (1 << 3)
++#define SDVO_OUTPUT_YPRPB0 (1 << 4)
++#define SDVO_OUTPUT_SCART0 (1 << 5)
++#define SDVO_OUTPUT_LVDS0 (1 << 6)
++#define SDVO_OUTPUT_TMDS1 (1 << 8)
++#define SDVO_OUTPUT_RGB1 (1 << 9)
++#define SDVO_OUTPUT_CVBS1 (1 << 10)
++#define SDVO_OUTPUT_SVID1 (1 << 11)
++#define SDVO_OUTPUT_YPRPB1 (1 << 12)
++#define SDVO_OUTPUT_SCART1 (1 << 13)
++#define SDVO_OUTPUT_LVDS1 (1 << 14)
++#define SDVO_OUTPUT_LAST (14)
++
++struct psb_intel_sdvo_caps {
++ u8 vendor_id;
++ u8 device_id;
++ u8 device_rev_id;
++ u8 sdvo_version_major;
++ u8 sdvo_version_minor;
++ unsigned int sdvo_inputs_mask:2;
++ unsigned int smooth_scaling:1;
++ unsigned int sharp_scaling:1;
++ unsigned int up_scaling:1;
++ unsigned int down_scaling:1;
++ unsigned int stall_support:1;
++ unsigned int pad:1;
++ u16 output_flags;
++} __attribute__ ((packed));
++
++/** This matches the EDID DTD structure, more or less */
++struct psb_intel_sdvo_dtd {
++ struct {
++ u16 clock; /**< pixel clock, in 10kHz units */
++ u8 h_active; /**< lower 8 bits (pixels) */
++ u8 h_blank; /**< lower 8 bits (pixels) */
++ u8 h_high; /**< upper 4 bits each h_active, h_blank */
++ u8 v_active; /**< lower 8 bits (lines) */
++ u8 v_blank; /**< lower 8 bits (lines) */
++ u8 v_high; /**< upper 4 bits each v_active, v_blank */
++ } part1;
++
++ struct {
++ u8 h_sync_off;
++ /**< lower 8 bits, from hblank start */
++ u8 h_sync_width;/**< lower 8 bits (pixels) */
++ /** lower 4 bits each vsync offset, vsync width */
++ u8 v_sync_off_width;
++ /**
++ * 2 high bits of hsync offset, 2 high bits of hsync width,
++ * bits 4-5 of vsync offset, and 2 high bits of vsync width.
++ */
++ u8 sync_off_width_high;
++ u8 dtd_flags;
++ u8 sdvo_flags;
++ /** bits 6-7 of vsync offset at bits 6-7 */
++ u8 v_sync_off_high;
++ u8 reserved;
++ } part2;
++} __attribute__ ((packed));
++
++struct psb_intel_sdvo_pixel_clock_range {
++ u16 min; /**< pixel clock, in 10kHz units */
++ u16 max; /**< pixel clock, in 10kHz units */
++} __attribute__ ((packed));
++
++struct psb_intel_sdvo_preferred_input_timing_args {
++ u16 clock;
++ u16 width;
++ u16 height;
++} __attribute__ ((packed));
++
++/* I2C registers for SDVO */
++#define SDVO_I2C_ARG_0 0x07
++#define SDVO_I2C_ARG_1 0x06
++#define SDVO_I2C_ARG_2 0x05
++#define SDVO_I2C_ARG_3 0x04
++#define SDVO_I2C_ARG_4 0x03
++#define SDVO_I2C_ARG_5 0x02
++#define SDVO_I2C_ARG_6 0x01
++#define SDVO_I2C_ARG_7 0x00
++#define SDVO_I2C_OPCODE 0x08
++#define SDVO_I2C_CMD_STATUS 0x09
++#define SDVO_I2C_RETURN_0 0x0a
++#define SDVO_I2C_RETURN_1 0x0b
++#define SDVO_I2C_RETURN_2 0x0c
++#define SDVO_I2C_RETURN_3 0x0d
++#define SDVO_I2C_RETURN_4 0x0e
++#define SDVO_I2C_RETURN_5 0x0f
++#define SDVO_I2C_RETURN_6 0x10
++#define SDVO_I2C_RETURN_7 0x11
++#define SDVO_I2C_VENDOR_BEGIN 0x20
++
++/* Status results */
++#define SDVO_CMD_STATUS_POWER_ON 0x0
++#define SDVO_CMD_STATUS_SUCCESS 0x1
++#define SDVO_CMD_STATUS_NOTSUPP 0x2
++#define SDVO_CMD_STATUS_INVALID_ARG 0x3
++#define SDVO_CMD_STATUS_PENDING 0x4
++#define SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED 0x5
++#define SDVO_CMD_STATUS_SCALING_NOT_SUPP 0x6
++
++/* SDVO commands, argument/result registers */
++
++#define SDVO_CMD_RESET 0x01
++
++/** Returns a struct psb_intel_sdvo_caps */
++#define SDVO_CMD_GET_DEVICE_CAPS 0x02
++
++#define SDVO_CMD_GET_FIRMWARE_REV 0x86
++# define SDVO_DEVICE_FIRMWARE_MINOR SDVO_I2C_RETURN_0
++# define SDVO_DEVICE_FIRMWARE_MAJOR SDVO_I2C_RETURN_1
++# define SDVO_DEVICE_FIRMWARE_PATCH SDVO_I2C_RETURN_2
++
++/**
++ * Reports which inputs are trained (managed to sync).
++ *
++ * Devices must have trained within 2 vsyncs of a mode change.
++ */
++#define SDVO_CMD_GET_TRAINED_INPUTS 0x03
++struct psb_intel_sdvo_get_trained_inputs_response {
++ unsigned int input0_trained:1;
++ unsigned int input1_trained:1;
++ unsigned int pad:6;
++} __attribute__ ((packed));
++
++/** Returns a struct psb_intel_sdvo_output_flags of active outputs. */
++#define SDVO_CMD_GET_ACTIVE_OUTPUTS 0x04
++
++/**
++ * Sets the current set of active outputs.
++ *
++ * Takes a struct psb_intel_sdvo_output_flags.
++ * Must be preceded by a SET_IN_OUT_MAP
++ * on multi-output devices.
++ */
++#define SDVO_CMD_SET_ACTIVE_OUTPUTS 0x05
++
++/**
++ * Returns the current mapping of SDVO inputs to outputs on the device.
++ *
++ * Returns two struct psb_intel_sdvo_output_flags structures.
++ */
++#define SDVO_CMD_GET_IN_OUT_MAP 0x06
++
++/**
++ * Sets the current mapping of SDVO inputs to outputs on the device.
++ *
++ * Takes two struct i380_sdvo_output_flags structures.
++ */
++#define SDVO_CMD_SET_IN_OUT_MAP 0x07
++
++/**
++ * Returns a struct psb_intel_sdvo_output_flags of attached displays.
++ */
++#define SDVO_CMD_GET_ATTACHED_DISPLAYS 0x0b
++
++/**
++ * Returns a struct psb_intel_sdvo_ouptut_flags of displays supporting hot plugging.
++ */
++#define SDVO_CMD_GET_HOT_PLUG_SUPPORT 0x0c
++
++/**
++ * Takes a struct psb_intel_sdvo_output_flags.
++ */
++#define SDVO_CMD_SET_ACTIVE_HOT_PLUG 0x0d
++
++/**
++ * Returns a struct psb_intel_sdvo_output_flags of displays with hot plug
++ * interrupts enabled.
++ */
++#define SDVO_CMD_GET_ACTIVE_HOT_PLUG 0x0e
++
++#define SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE 0x0f
++struct psb_intel_sdvo_get_interrupt_event_source_response {
++ u16 interrupt_status;
++ unsigned int ambient_light_interrupt:1;
++ unsigned int pad:7;
++} __attribute__ ((packed));
++
++/**
++ * Selects which input is affected by future input commands.
++ *
++ * Commands affected include SET_INPUT_TIMINGS_PART[12],
++ * GET_INPUT_TIMINGS_PART[12], GET_PREFERRED_INPUT_TIMINGS_PART[12],
++ * GET_INPUT_PIXEL_CLOCK_RANGE, and CREATE_PREFERRED_INPUT_TIMINGS.
++ */
++#define SDVO_CMD_SET_TARGET_INPUT 0x10
++struct psb_intel_sdvo_set_target_input_args {
++ unsigned int target_1:1;
++ unsigned int pad:7;
++} __attribute__ ((packed));
++
++/**
++ * Takes a struct psb_intel_sdvo_output_flags of which outputs are targetted by
++ * future output commands.
++ *
++ * Affected commands inclue SET_OUTPUT_TIMINGS_PART[12],
++ * GET_OUTPUT_TIMINGS_PART[12], and GET_OUTPUT_PIXEL_CLOCK_RANGE.
++ */
++#define SDVO_CMD_SET_TARGET_OUTPUT 0x11
++
++#define SDVO_CMD_GET_INPUT_TIMINGS_PART1 0x12
++#define SDVO_CMD_GET_INPUT_TIMINGS_PART2 0x13
++#define SDVO_CMD_SET_INPUT_TIMINGS_PART1 0x14
++#define SDVO_CMD_SET_INPUT_TIMINGS_PART2 0x15
++#define SDVO_CMD_SET_OUTPUT_TIMINGS_PART1 0x16
++#define SDVO_CMD_SET_OUTPUT_TIMINGS_PART2 0x17
++#define SDVO_CMD_GET_OUTPUT_TIMINGS_PART1 0x18
++#define SDVO_CMD_GET_OUTPUT_TIMINGS_PART2 0x19
++/* Part 1 */
++# define SDVO_DTD_CLOCK_LOW SDVO_I2C_ARG_0
++# define SDVO_DTD_CLOCK_HIGH SDVO_I2C_ARG_1
++# define SDVO_DTD_H_ACTIVE SDVO_I2C_ARG_2
++# define SDVO_DTD_H_BLANK SDVO_I2C_ARG_3
++# define SDVO_DTD_H_HIGH SDVO_I2C_ARG_4
++# define SDVO_DTD_V_ACTIVE SDVO_I2C_ARG_5
++# define SDVO_DTD_V_BLANK SDVO_I2C_ARG_6
++# define SDVO_DTD_V_HIGH SDVO_I2C_ARG_7
++/* Part 2 */
++# define SDVO_DTD_HSYNC_OFF SDVO_I2C_ARG_0
++# define SDVO_DTD_HSYNC_WIDTH SDVO_I2C_ARG_1
++# define SDVO_DTD_VSYNC_OFF_WIDTH SDVO_I2C_ARG_2
++# define SDVO_DTD_SYNC_OFF_WIDTH_HIGH SDVO_I2C_ARG_3
++# define SDVO_DTD_DTD_FLAGS SDVO_I2C_ARG_4
++# define SDVO_DTD_DTD_FLAG_INTERLACED (1 << 7)
++# define SDVO_DTD_DTD_FLAG_STEREO_MASK (3 << 5)
++# define SDVO_DTD_DTD_FLAG_INPUT_MASK (3 << 3)
++# define SDVO_DTD_DTD_FLAG_SYNC_MASK (3 << 1)
++# define SDVO_DTD_SDVO_FLAS SDVO_I2C_ARG_5
++# define SDVO_DTD_SDVO_FLAG_STALL (1 << 7)
++# define SDVO_DTD_SDVO_FLAG_CENTERED (0 << 6)
++# define SDVO_DTD_SDVO_FLAG_UPPER_LEFT (1 << 6)
++# define SDVO_DTD_SDVO_FLAG_SCALING_MASK (3 << 4)
++# define SDVO_DTD_SDVO_FLAG_SCALING_NONE (0 << 4)
++# define SDVO_DTD_SDVO_FLAG_SCALING_SHARP (1 << 4)
++# define SDVO_DTD_SDVO_FLAG_SCALING_SMOOTH (2 << 4)
++# define SDVO_DTD_VSYNC_OFF_HIGH SDVO_I2C_ARG_6
++
++/**
++ * Generates a DTD based on the given width, height, and flags.
++ *
++ * This will be supported by any device supporting scaling or interlaced
++ * modes.
++ */
++#define SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING 0x1a
++# define SDVO_PREFERRED_INPUT_TIMING_CLOCK_LOW SDVO_I2C_ARG_0
++# define SDVO_PREFERRED_INPUT_TIMING_CLOCK_HIGH SDVO_I2C_ARG_1
++# define SDVO_PREFERRED_INPUT_TIMING_WIDTH_LOW SDVO_I2C_ARG_2
++# define SDVO_PREFERRED_INPUT_TIMING_WIDTH_HIGH SDVO_I2C_ARG_3
++# define SDVO_PREFERRED_INPUT_TIMING_HEIGHT_LOW SDVO_I2C_ARG_4
++# define SDVO_PREFERRED_INPUT_TIMING_HEIGHT_HIGH SDVO_I2C_ARG_5
++# define SDVO_PREFERRED_INPUT_TIMING_FLAGS SDVO_I2C_ARG_6
++# define SDVO_PREFERRED_INPUT_TIMING_FLAGS_INTERLACED (1 << 0)
++# define SDVO_PREFERRED_INPUT_TIMING_FLAGS_SCALED (1 << 1)
++
++#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1 0x1b
++#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2 0x1c
++
++/** Returns a struct psb_intel_sdvo_pixel_clock_range */
++#define SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE 0x1d
++/** Returns a struct psb_intel_sdvo_pixel_clock_range */
++#define SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE 0x1e
++
++/** Returns a byte bitfield containing SDVO_CLOCK_RATE_MULT_* flags */
++#define SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS 0x1f
++
++/** Returns a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
++#define SDVO_CMD_GET_CLOCK_RATE_MULT 0x20
++/** Takes a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
++#define SDVO_CMD_SET_CLOCK_RATE_MULT 0x21
++# define SDVO_CLOCK_RATE_MULT_1X (1 << 0)
++# define SDVO_CLOCK_RATE_MULT_2X (1 << 1)
++# define SDVO_CLOCK_RATE_MULT_4X (1 << 3)
++
++#define SDVO_CMD_GET_SUPPORTED_TV_FORMATS 0x27
++
++#define SDVO_CMD_GET_TV_FORMAT 0x28
++
++#define SDVO_CMD_SET_TV_FORMAT 0x29
++
++#define SDVO_CMD_GET_SUPPORTED_POWER_STATES 0x2a
++#define SDVO_CMD_GET_ENCODER_POWER_STATE 0x2b
++#define SDVO_CMD_SET_ENCODER_POWER_STATE 0x2c
++# define SDVO_ENCODER_STATE_ON (1 << 0)
++# define SDVO_ENCODER_STATE_STANDBY (1 << 1)
++# define SDVO_ENCODER_STATE_SUSPEND (1 << 2)
++# define SDVO_ENCODER_STATE_OFF (1 << 3)
++
++#define SDVO_CMD_SET_TV_RESOLUTION_SUPPORT 0x93
++
++#define SDVO_CMD_SET_CONTROL_BUS_SWITCH 0x7a
++# define SDVO_CONTROL_BUS_PROM 0x0
++# define SDVO_CONTROL_BUS_DDC1 0x1
++# define SDVO_CONTROL_BUS_DDC2 0x2
++# define SDVO_CONTROL_BUS_DDC3 0x3
++
++/* SDVO Bus & SDVO Inputs wiring details*/
++/* Bit 0: Is SDVOB connected to In0 (1 = yes, 0 = no*/
++/* Bit 1: Is SDVOB connected to In1 (1 = yes, 0 = no*/
++/* Bit 2: Is SDVOC connected to In0 (1 = yes, 0 = no*/
++/* Bit 3: Is SDVOC connected to In1 (1 = yes, 0 = no*/
++#define SDVOB_IN0 0x01
++#define SDVOB_IN1 0x02
++#define SDVOC_IN0 0x04
++#define SDVOC_IN1 0x08
++
++#define SDVO_DEVICE_NONE 0x00
++#define SDVO_DEVICE_CRT 0x01
++#define SDVO_DEVICE_TV 0x02
++#define SDVO_DEVICE_LVDS 0x04
++#define SDVO_DEVICE_TMDS 0x08
++
+diff --git a/drivers/gpu/drm/mrst/drv/psb_mmu.c b/drivers/gpu/drm/mrst/drv/psb_mmu.c
+new file mode 100644
+index 0000000..cced0a8
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/psb_mmu.c
+@@ -0,0 +1,1010 @@
++/**************************************************************************
++ * Copyright (c) 2007, Intel Corporation.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++#include <drm/drmP.h>
++#include "psb_drv.h"
++#include "psb_reg.h"
++
++/*
++ * Code for the SGX MMU:
++ */
++
++/*
++ * clflush on one processor only:
++ * clflush should apparently flush the cache line on all processors in an
++ * SMP system.
++ */
++
++/*
++ * kmap atomic:
++ * The usage of the slots must be completely encapsulated within a spinlock, and
++ * no other functions that may be using the locks for other purposed may be
++ * called from within the locked region.
++ * Since the slots are per processor, this will guarantee that we are the only
++ * user.
++ */
++
++/*
++ * TODO: Inserting ptes from an interrupt handler:
++ * This may be desirable for some SGX functionality where the GPU can fault in
++ * needed pages. For that, we need to make an atomic insert_pages function, that
++ * may fail.
++ * If it fails, the caller need to insert the page using a workqueue function,
++ * but on average it should be fast.
++ */
++
++struct psb_mmu_driver {
++ /* protects driver- and pd structures. Always take in read mode
++ * before taking the page table spinlock.
++ */
++ struct rw_semaphore sem;
++
++ /* protects page tables, directory tables and pt tables.
++ * and pt structures.
++ */
++ spinlock_t lock;
++
++ atomic_t needs_tlbflush;
++
++ uint8_t __iomem *register_map;
++ struct psb_mmu_pd *default_pd;
++ /*uint32_t bif_ctrl;*/
++ int has_clflush;
++ int clflush_add;
++ unsigned long clflush_mask;
++
++ struct drm_psb_private *dev_priv;
++};
++
++struct psb_mmu_pd;
++
++struct psb_mmu_pt {
++ struct psb_mmu_pd *pd;
++ uint32_t index;
++ uint32_t count;
++ struct page *p;
++ uint32_t *v;
++};
++
++struct psb_mmu_pd {
++ struct psb_mmu_driver *driver;
++ int hw_context;
++ struct psb_mmu_pt **tables;
++ struct page *p;
++ struct page *dummy_pt;
++ struct page *dummy_page;
++ uint32_t pd_mask;
++ uint32_t invalid_pde;
++ uint32_t invalid_pte;
++};
++
++static inline uint32_t psb_mmu_pt_index(uint32_t offset)
++{
++ return (offset >> PSB_PTE_SHIFT) & 0x3FF;
++}
++
++static inline uint32_t psb_mmu_pd_index(uint32_t offset)
++{
++ return offset >> PSB_PDE_SHIFT;
++}
++
++#if defined(CONFIG_X86)
++static inline void psb_clflush(void *addr)
++{
++ __asm__ __volatile__("clflush (%0)\n" : : "r"(addr) : "memory");
++}
++
++static inline void psb_mmu_clflush(struct psb_mmu_driver *driver,
++ void *addr)
++{
++ if (!driver->has_clflush)
++ return;
++
++ mb();
++ psb_clflush(addr);
++ mb();
++}
++#else
++
++static inline void psb_mmu_clflush(struct psb_mmu_driver *driver,
++ void *addr)
++{;
++}
++
++#endif
++
++static void psb_mmu_flush_pd_locked(struct psb_mmu_driver *driver,
++ int force)
++{
++ if (atomic_read(&driver->needs_tlbflush) || force) {
++ if (driver->dev_priv) {
++ atomic_set(&driver->dev_priv->msvdx_mmu_invaldc, 1);
++ if (IS_MRST(driver->dev_priv->dev))
++ atomic_set( \
++ &driver->dev_priv->topaz_mmu_invaldc, 1);
++ }
++ }
++ atomic_set(&driver->needs_tlbflush, 0);
++}
++
++static void psb_mmu_flush_pd(struct psb_mmu_driver *driver, int force)
++{
++ down_write(&driver->sem);
++ psb_mmu_flush_pd_locked(driver, force);
++ up_write(&driver->sem);
++}
++
++void psb_mmu_flush(struct psb_mmu_driver *driver)
++{
++ down_write(&driver->sem);
++ if (driver->dev_priv) {
++ atomic_set(&driver->dev_priv->msvdx_mmu_invaldc, 1);
++ if (IS_MRST(driver->dev_priv->dev))
++ atomic_set(&driver->dev_priv->topaz_mmu_invaldc, 1);
++ }
++
++ up_write(&driver->sem);
++}
++
++void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context)
++{
++ ttm_tt_cache_flush(&pd->p, 1);
++ down_write(&pd->driver->sem);
++ wmb();
++ psb_mmu_flush_pd_locked(pd->driver, 1);
++ pd->hw_context = hw_context;
++ up_write(&pd->driver->sem);
++
++}
++
++static inline unsigned long psb_pd_addr_end(unsigned long addr,
++ unsigned long end)
++{
++
++ addr = (addr + PSB_PDE_MASK + 1) & ~PSB_PDE_MASK;
++ return (addr < end) ? addr : end;
++}
++
++static inline uint32_t psb_mmu_mask_pte(uint32_t pfn, int type)
++{
++ uint32_t mask = PSB_PTE_VALID;
++
++ if (type & PSB_MMU_CACHED_MEMORY)
++ mask |= PSB_PTE_CACHED;
++ if (type & PSB_MMU_RO_MEMORY)
++ mask |= PSB_PTE_RO;
++ if (type & PSB_MMU_WO_MEMORY)
++ mask |= PSB_PTE_WO;
++
++ return (pfn << PAGE_SHIFT) | mask;
++}
++
++struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
++ int trap_pagefaults, int invalid_type)
++{
++ struct psb_mmu_pd *pd = kmalloc(sizeof(*pd), GFP_KERNEL);
++ uint32_t *v;
++ int i;
++
++ if (!pd)
++ return NULL;
++
++ pd->p = alloc_page(GFP_DMA32);
++ if (!pd->p)
++ goto out_err1;
++ pd->dummy_pt = alloc_page(GFP_DMA32);
++ if (!pd->dummy_pt)
++ goto out_err2;
++ pd->dummy_page = alloc_page(GFP_DMA32);
++ if (!pd->dummy_page)
++ goto out_err3;
++
++ if (!trap_pagefaults) {
++ pd->invalid_pde =
++ psb_mmu_mask_pte(page_to_pfn(pd->dummy_pt),
++ invalid_type);
++ pd->invalid_pte =
++ psb_mmu_mask_pte(page_to_pfn(pd->dummy_page),
++ invalid_type);
++ } else {
++ pd->invalid_pde = 0;
++ pd->invalid_pte = 0;
++ }
++
++ v = kmap(pd->dummy_pt);
++ for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
++ v[i] = pd->invalid_pte;
++
++ kunmap(pd->dummy_pt);
++
++ v = kmap(pd->p);
++ for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
++ v[i] = pd->invalid_pde;
++
++ kunmap(pd->p);
++
++ clear_page(kmap(pd->dummy_page));
++ kunmap(pd->dummy_page);
++
++ pd->tables = vmalloc_user(sizeof(struct psb_mmu_pt *) * 1024);
++ if (!pd->tables)
++ goto out_err4;
++
++ pd->hw_context = -1;
++ pd->pd_mask = PSB_PTE_VALID;
++ pd->driver = driver;
++
++ return pd;
++
++out_err4:
++ __free_page(pd->dummy_page);
++out_err3:
++ __free_page(pd->dummy_pt);
++out_err2:
++ __free_page(pd->p);
++out_err1:
++ kfree(pd);
++ return NULL;
++}
++
++void psb_mmu_free_pt(struct psb_mmu_pt *pt)
++{
++ __free_page(pt->p);
++ kfree(pt);
++}
++
++void psb_mmu_free_pagedir(struct psb_mmu_pd *pd)
++{
++ struct psb_mmu_driver *driver = pd->driver;
++ struct psb_mmu_pt *pt;
++ int i;
++
++ down_write(&driver->sem);
++ if (pd->hw_context != -1)
++ psb_mmu_flush_pd_locked(driver, 1);
++
++ /* Should take the spinlock here, but we don't need to do that
++ since we have the semaphore in write mode. */
++
++ for (i = 0; i < 1024; ++i) {
++ pt = pd->tables[i];
++ if (pt)
++ psb_mmu_free_pt(pt);
++ }
++
++ vfree(pd->tables);
++ __free_page(pd->dummy_page);
++ __free_page(pd->dummy_pt);
++ __free_page(pd->p);
++ kfree(pd);
++ up_write(&driver->sem);
++}
++
++static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
++{
++ struct psb_mmu_pt *pt = kmalloc(sizeof(*pt), GFP_KERNEL);
++ void *v;
++ uint32_t clflush_add = pd->driver->clflush_add >> PAGE_SHIFT;
++ uint32_t clflush_count = PAGE_SIZE / clflush_add;
++ spinlock_t *lock = &pd->driver->lock;
++ uint8_t *clf;
++ uint32_t *ptes;
++ int i;
++
++ if (!pt)
++ return NULL;
++
++ pt->p = alloc_page(GFP_DMA32);
++ if (!pt->p) {
++ kfree(pt);
++ return NULL;
++ }
++
++ spin_lock(lock);
++
++ v = kmap_atomic(pt->p, KM_USER0);
++ clf = (uint8_t *) v;
++ ptes = (uint32_t *) v;
++ for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
++ *ptes++ = pd->invalid_pte;
++
++
++#if defined(CONFIG_X86)
++ if (pd->driver->has_clflush && pd->hw_context != -1) {
++ mb();
++ for (i = 0; i < clflush_count; ++i) {
++ psb_clflush(clf);
++ clf += clflush_add;
++ }
++ mb();
++ }
++#endif
++ kunmap_atomic(v, KM_USER0);
++ spin_unlock(lock);
++
++ pt->count = 0;
++ pt->pd = pd;
++ pt->index = 0;
++
++ return pt;
++}
++
++struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd,
++ unsigned long addr)
++{
++ uint32_t index = psb_mmu_pd_index(addr);
++ struct psb_mmu_pt *pt;
++ uint32_t *v;
++ spinlock_t *lock = &pd->driver->lock;
++
++ spin_lock(lock);
++ pt = pd->tables[index];
++ while (!pt) {
++ spin_unlock(lock);
++ pt = psb_mmu_alloc_pt(pd);
++ if (!pt)
++ return NULL;
++ spin_lock(lock);
++
++ if (pd->tables[index]) {
++ spin_unlock(lock);
++ psb_mmu_free_pt(pt);
++ spin_lock(lock);
++ pt = pd->tables[index];
++ continue;
++ }
++
++ v = kmap_atomic(pd->p, KM_USER0);
++ pd->tables[index] = pt;
++ v[index] = (page_to_pfn(pt->p) << 12) | pd->pd_mask;
++ pt->index = index;
++ kunmap_atomic((void *) v, KM_USER0);
++
++ if (pd->hw_context != -1) {
++ psb_mmu_clflush(pd->driver, (void *) &v[index]);
++ atomic_set(&pd->driver->needs_tlbflush, 1);
++ }
++ }
++ pt->v = kmap_atomic(pt->p, KM_USER0);
++ return pt;
++}
++
++static struct psb_mmu_pt *psb_mmu_pt_map_lock(struct psb_mmu_pd *pd,
++ unsigned long addr)
++{
++ uint32_t index = psb_mmu_pd_index(addr);
++ struct psb_mmu_pt *pt;
++ spinlock_t *lock = &pd->driver->lock;
++
++ spin_lock(lock);
++ pt = pd->tables[index];
++ if (!pt) {
++ spin_unlock(lock);
++ return NULL;
++ }
++ pt->v = kmap_atomic(pt->p, KM_USER0);
++ return pt;
++}
++
++static void psb_mmu_pt_unmap_unlock(struct psb_mmu_pt *pt)
++{
++ struct psb_mmu_pd *pd = pt->pd;
++ uint32_t *v;
++
++ kunmap_atomic(pt->v, KM_USER0);
++ if (pt->count == 0) {
++ v = kmap_atomic(pd->p, KM_USER0);
++ v[pt->index] = pd->invalid_pde;
++ pd->tables[pt->index] = NULL;
++
++ if (pd->hw_context != -1) {
++ psb_mmu_clflush(pd->driver,
++ (void *) &v[pt->index]);
++ atomic_set(&pd->driver->needs_tlbflush, 1);
++ }
++ kunmap_atomic(pt->v, KM_USER0);
++ spin_unlock(&pd->driver->lock);
++ psb_mmu_free_pt(pt);
++ return;
++ }
++ spin_unlock(&pd->driver->lock);
++}
++
++static inline void psb_mmu_set_pte(struct psb_mmu_pt *pt,
++ unsigned long addr, uint32_t pte)
++{
++ pt->v[psb_mmu_pt_index(addr)] = pte;
++}
++
++static inline void psb_mmu_invalidate_pte(struct psb_mmu_pt *pt,
++ unsigned long addr)
++{
++ pt->v[psb_mmu_pt_index(addr)] = pt->pd->invalid_pte;
++}
++
++#if 0
++static uint32_t psb_mmu_check_pte_locked(struct psb_mmu_pd *pd,
++ uint32_t mmu_offset)
++{
++ uint32_t *v;
++ uint32_t pfn;
++
++ v = kmap_atomic(pd->p, KM_USER0);
++ if (!v) {
++ printk(KERN_INFO "Could not kmap pde page.\n");
++ return 0;
++ }
++ pfn = v[psb_mmu_pd_index(mmu_offset)];
++ /* printk(KERN_INFO "pde is 0x%08x\n",pfn); */
++ kunmap_atomic(v, KM_USER0);
++ if (((pfn & 0x0F) != PSB_PTE_VALID)) {
++ printk(KERN_INFO "Strange pde at 0x%08x: 0x%08x.\n",
++ mmu_offset, pfn);
++ }
++ v = ioremap(pfn & 0xFFFFF000, 4096);
++ if (!v) {
++ printk(KERN_INFO "Could not kmap pte page.\n");
++ return 0;
++ }
++ pfn = v[psb_mmu_pt_index(mmu_offset)];
++ /* printk(KERN_INFO "pte is 0x%08x\n",pfn); */
++ iounmap(v);
++ if (((pfn & 0x0F) != PSB_PTE_VALID)) {
++ printk(KERN_INFO "Strange pte at 0x%08x: 0x%08x.\n",
++ mmu_offset, pfn);
++ }
++ return pfn >> PAGE_SHIFT;
++}
++
++static void psb_mmu_check_mirrored_gtt(struct psb_mmu_pd *pd,
++ uint32_t mmu_offset,
++ uint32_t gtt_pages)
++{
++ uint32_t start;
++ uint32_t next;
++
++ printk(KERN_INFO "Checking mirrored gtt 0x%08x %d\n",
++ mmu_offset, gtt_pages);
++ down_read(&pd->driver->sem);
++ start = psb_mmu_check_pte_locked(pd, mmu_offset);
++ mmu_offset += PAGE_SIZE;
++ gtt_pages -= 1;
++ while (gtt_pages--) {
++ next = psb_mmu_check_pte_locked(pd, mmu_offset);
++ if (next != start + 1) {
++ printk(KERN_INFO
++ "Ptes out of order: 0x%08x, 0x%08x.\n",
++ start, next);
++ }
++ start = next;
++ mmu_offset += PAGE_SIZE;
++ }
++ up_read(&pd->driver->sem);
++}
++
++#endif
++
++void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd,
++ uint32_t mmu_offset, uint32_t gtt_start,
++ uint32_t gtt_pages)
++{
++ uint32_t *v;
++ uint32_t start = psb_mmu_pd_index(mmu_offset);
++ struct psb_mmu_driver *driver = pd->driver;
++ int num_pages = gtt_pages;
++
++ down_read(&driver->sem);
++ spin_lock(&driver->lock);
++
++ v = kmap_atomic(pd->p, KM_USER0);
++ v += start;
++
++ while (gtt_pages--) {
++ *v++ = gtt_start | pd->pd_mask;
++ gtt_start += PAGE_SIZE;
++ }
++
++ ttm_tt_cache_flush(&pd->p, num_pages);
++ kunmap_atomic(v, KM_USER0);
++ spin_unlock(&driver->lock);
++
++ if (pd->hw_context != -1)
++ atomic_set(&pd->driver->needs_tlbflush, 1);
++
++ up_read(&pd->driver->sem);
++ psb_mmu_flush_pd(pd->driver, 0);
++}
++
++struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver *driver)
++{
++ struct psb_mmu_pd *pd;
++
++ down_read(&driver->sem);
++ pd = driver->default_pd;
++ up_read(&driver->sem);
++
++ return pd;
++}
++
++/* Returns the physical address of the PD shared by sgx/msvdx */
++uint32_t psb_get_default_pd_addr(struct psb_mmu_driver *driver)
++{
++ struct psb_mmu_pd *pd;
++
++ pd = psb_mmu_get_default_pd(driver);
++ return page_to_pfn(pd->p) << PAGE_SHIFT;
++}
++
++void psb_mmu_driver_takedown(struct psb_mmu_driver *driver)
++{
++ psb_mmu_free_pagedir(driver->default_pd);
++ kfree(driver);
++}
++
++struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers,
++ int trap_pagefaults,
++ int invalid_type,
++ struct drm_psb_private *dev_priv)
++{
++ struct psb_mmu_driver *driver;
++
++ driver = kmalloc(sizeof(*driver), GFP_KERNEL);
++
++ if (!driver)
++ return NULL;
++ driver->dev_priv = dev_priv;
++
++ driver->default_pd = psb_mmu_alloc_pd(driver, trap_pagefaults,
++ invalid_type);
++ if (!driver->default_pd)
++ goto out_err1;
++
++ spin_lock_init(&driver->lock);
++ init_rwsem(&driver->sem);
++ down_write(&driver->sem);
++ driver->register_map = registers;
++ atomic_set(&driver->needs_tlbflush, 1);
++
++ driver->has_clflush = 0;
++
++#if defined(CONFIG_X86)
++ if (boot_cpu_has(X86_FEATURE_CLFLSH)) {
++ uint32_t tfms, misc, cap0, cap4, clflush_size;
++
++ /*
++ * clflush size is determined at kernel setup for x86_64
++ * but not for i386. We have to do it here.
++ */
++
++ cpuid(0x00000001, &tfms, &misc, &cap0, &cap4);
++ clflush_size = ((misc >> 8) & 0xff) * 8;
++ driver->has_clflush = 1;
++ driver->clflush_add =
++ PAGE_SIZE * clflush_size / sizeof(uint32_t);
++ driver->clflush_mask = driver->clflush_add - 1;
++ driver->clflush_mask = ~driver->clflush_mask;
++ }
++#endif
++
++ up_write(&driver->sem);
++ return driver;
++
++out_err1:
++ kfree(driver);
++ return NULL;
++}
++
++#if defined(CONFIG_X86)
++static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd,
++ unsigned long address, uint32_t num_pages,
++ uint32_t desired_tile_stride,
++ uint32_t hw_tile_stride)
++{
++ struct psb_mmu_pt *pt;
++ uint32_t rows = 1;
++ uint32_t i;
++ unsigned long addr;
++ unsigned long end;
++ unsigned long next;
++ unsigned long add;
++ unsigned long row_add;
++ unsigned long clflush_add = pd->driver->clflush_add;
++ unsigned long clflush_mask = pd->driver->clflush_mask;
++
++ if (!pd->driver->has_clflush) {
++ ttm_tt_cache_flush(&pd->p, num_pages);
++ return;
++ }
++
++ if (hw_tile_stride)
++ rows = num_pages / desired_tile_stride;
++ else
++ desired_tile_stride = num_pages;
++
++ add = desired_tile_stride << PAGE_SHIFT;
++ row_add = hw_tile_stride << PAGE_SHIFT;
++ mb();
++ for (i = 0; i < rows; ++i) {
++
++ addr = address;
++ end = addr + add;
++
++ do {
++ next = psb_pd_addr_end(addr, end);
++ pt = psb_mmu_pt_map_lock(pd, addr);
++ if (!pt)
++ continue;
++ do {
++ psb_clflush(&pt->v
++ [psb_mmu_pt_index(addr)]);
++ } while (addr +=
++ clflush_add,
++ (addr & clflush_mask) < next);
++
++ psb_mmu_pt_unmap_unlock(pt);
++ } while (addr = next, next != end);
++ address += row_add;
++ }
++ mb();
++}
++#else
++static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd,
++ unsigned long address, uint32_t num_pages,
++ uint32_t desired_tile_stride,
++ uint32_t hw_tile_stride)
++{
++ drm_ttm_cache_flush(&pd->p, num_pages);
++}
++#endif
++
++void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
++ unsigned long address, uint32_t num_pages)
++{
++ struct psb_mmu_pt *pt;
++ unsigned long addr;
++ unsigned long end;
++ unsigned long next;
++ unsigned long f_address = address;
++
++ down_read(&pd->driver->sem);
++
++ addr = address;
++ end = addr + (num_pages << PAGE_SHIFT);
++
++ do {
++ next = psb_pd_addr_end(addr, end);
++ pt = psb_mmu_pt_alloc_map_lock(pd, addr);
++ if (!pt)
++ goto out;
++ do {
++ psb_mmu_invalidate_pte(pt, addr);
++ --pt->count;
++ } while (addr += PAGE_SIZE, addr < next);
++ psb_mmu_pt_unmap_unlock(pt);
++
++ } while (addr = next, next != end);
++
++out:
++ if (pd->hw_context != -1)
++ psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
++
++ up_read(&pd->driver->sem);
++
++ if (pd->hw_context != -1)
++ psb_mmu_flush(pd->driver);
++
++ return;
++}
++
++void psb_mmu_remove_pages(struct psb_mmu_pd *pd, unsigned long address,
++ uint32_t num_pages, uint32_t desired_tile_stride,
++ uint32_t hw_tile_stride)
++{
++ struct psb_mmu_pt *pt;
++ uint32_t rows = 1;
++ uint32_t i;
++ unsigned long addr;
++ unsigned long end;
++ unsigned long next;
++ unsigned long add;
++ unsigned long row_add;
++ unsigned long f_address = address;
++
++ if (hw_tile_stride)
++ rows = num_pages / desired_tile_stride;
++ else
++ desired_tile_stride = num_pages;
++
++ add = desired_tile_stride << PAGE_SHIFT;
++ row_add = hw_tile_stride << PAGE_SHIFT;
++
++ down_read(&pd->driver->sem);
++
++ /* Make sure we only need to flush this processor's cache */
++
++ for (i = 0; i < rows; ++i) {
++
++ addr = address;
++ end = addr + add;
++
++ do {
++ next = psb_pd_addr_end(addr, end);
++ pt = psb_mmu_pt_map_lock(pd, addr);
++ if (!pt)
++ continue;
++ do {
++ psb_mmu_invalidate_pte(pt, addr);
++ --pt->count;
++
++ } while (addr += PAGE_SIZE, addr < next);
++ psb_mmu_pt_unmap_unlock(pt);
++
++ } while (addr = next, next != end);
++ address += row_add;
++ }
++ if (pd->hw_context != -1)
++ psb_mmu_flush_ptes(pd, f_address, num_pages,
++ desired_tile_stride, hw_tile_stride);
++
++ up_read(&pd->driver->sem);
++
++ if (pd->hw_context != -1)
++ psb_mmu_flush(pd->driver);
++}
++
++int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn,
++ unsigned long address, uint32_t num_pages,
++ int type)
++{
++ struct psb_mmu_pt *pt;
++ uint32_t pte;
++ unsigned long addr;
++ unsigned long end;
++ unsigned long next;
++ unsigned long f_address = address;
++ int ret = 0;
++
++ down_read(&pd->driver->sem);
++
++ addr = address;
++ end = addr + (num_pages << PAGE_SHIFT);
++
++ do {
++ next = psb_pd_addr_end(addr, end);
++ pt = psb_mmu_pt_alloc_map_lock(pd, addr);
++ if (!pt) {
++ ret = -ENOMEM;
++ goto out;
++ }
++ do {
++ pte = psb_mmu_mask_pte(start_pfn++, type);
++ psb_mmu_set_pte(pt, addr, pte);
++ pt->count++;
++ } while (addr += PAGE_SIZE, addr < next);
++ psb_mmu_pt_unmap_unlock(pt);
++
++ } while (addr = next, next != end);
++
++out:
++ if (pd->hw_context != -1)
++ psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
++
++ up_read(&pd->driver->sem);
++
++ if (pd->hw_context != -1)
++ psb_mmu_flush(pd->driver);
++
++ return ret;
++}
++
++int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
++ unsigned long address, uint32_t num_pages,
++ uint32_t desired_tile_stride,
++ uint32_t hw_tile_stride, int type)
++{
++ struct psb_mmu_pt *pt;
++ uint32_t rows = 1;
++ uint32_t i;
++ uint32_t pte;
++ unsigned long addr;
++ unsigned long end;
++ unsigned long next;
++ unsigned long add;
++ unsigned long row_add;
++ unsigned long f_address = address;
++ int ret = 0;
++
++ if (hw_tile_stride) {
++ if (num_pages % desired_tile_stride != 0)
++ return -EINVAL;
++ rows = num_pages / desired_tile_stride;
++ } else {
++ desired_tile_stride = num_pages;
++ }
++
++ add = desired_tile_stride << PAGE_SHIFT;
++ row_add = hw_tile_stride << PAGE_SHIFT;
++
++ down_read(&pd->driver->sem);
++
++ for (i = 0; i < rows; ++i) {
++
++ addr = address;
++ end = addr + add;
++
++ do {
++ next = psb_pd_addr_end(addr, end);
++ pt = psb_mmu_pt_alloc_map_lock(pd, addr);
++ if (!pt) {
++ ret = -ENOMEM;
++ goto out;
++ }
++ do {
++ pte =
++ psb_mmu_mask_pte(page_to_pfn(*pages++),
++ type);
++ psb_mmu_set_pte(pt, addr, pte);
++ pt->count++;
++ } while (addr += PAGE_SIZE, addr < next);
++ psb_mmu_pt_unmap_unlock(pt);
++
++ } while (addr = next, next != end);
++
++ address += row_add;
++ }
++out:
++ if (pd->hw_context != -1)
++ psb_mmu_flush_ptes(pd, f_address, num_pages,
++ desired_tile_stride, hw_tile_stride);
++
++ up_read(&pd->driver->sem);
++
++ if (pd->hw_context != -1)
++ psb_mmu_flush(pd->driver);
++
++ return ret;
++}
++#if 0 /*comented out, only used in mmu test now*/
++void psb_mmu_enable_requestor(struct psb_mmu_driver *driver, uint32_t mask)
++{
++ mask &= _PSB_MMU_ER_MASK;
++ psb_iowrite32(driver,
++ psb_ioread32(driver, PSB_CR_BIF_CTRL) & ~mask,
++ PSB_CR_BIF_CTRL);
++ (void) psb_ioread32(driver, PSB_CR_BIF_CTRL);
++}
++
++void psb_mmu_disable_requestor(struct psb_mmu_driver *driver,
++ uint32_t mask)
++{
++ mask &= _PSB_MMU_ER_MASK;
++ psb_iowrite32(driver, psb_ioread32(driver, PSB_CR_BIF_CTRL) | mask,
++ PSB_CR_BIF_CTRL);
++ (void) psb_ioread32(driver, PSB_CR_BIF_CTRL);
++}
++#endif
++int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
++ unsigned long *pfn)
++{
++ int ret;
++ struct psb_mmu_pt *pt;
++ uint32_t tmp;
++ spinlock_t *lock = &pd->driver->lock;
++
++ down_read(&pd->driver->sem);
++ pt = psb_mmu_pt_map_lock(pd, virtual);
++ if (!pt) {
++ uint32_t *v;
++
++ spin_lock(lock);
++ v = kmap_atomic(pd->p, KM_USER0);
++ tmp = v[psb_mmu_pd_index(virtual)];
++ kunmap_atomic(v, KM_USER0);
++ spin_unlock(lock);
++
++ if (tmp != pd->invalid_pde || !(tmp & PSB_PTE_VALID) ||
++ !(pd->invalid_pte & PSB_PTE_VALID)) {
++ ret = -EINVAL;
++ goto out;
++ }
++ ret = 0;
++ *pfn = pd->invalid_pte >> PAGE_SHIFT;
++ goto out;
++ }
++ tmp = pt->v[psb_mmu_pt_index(virtual)];
++ if (!(tmp & PSB_PTE_VALID)) {
++ ret = -EINVAL;
++ } else {
++ ret = 0;
++ *pfn = tmp >> PAGE_SHIFT;
++ }
++ psb_mmu_pt_unmap_unlock(pt);
++out:
++ up_read(&pd->driver->sem);
++ return ret;
++}
++#if 0
++void psb_mmu_test(struct psb_mmu_driver *driver, uint32_t offset)
++{
++ struct page *p;
++ unsigned long pfn;
++ int ret = 0;
++ struct psb_mmu_pd *pd;
++ uint32_t *v;
++ uint32_t *vmmu;
++
++ pd = driver->default_pd;
++ if (!pd)
++ printk(KERN_WARNING "Could not get default pd\n");
++
++
++ p = alloc_page(GFP_DMA32);
++
++ if (!p) {
++ printk(KERN_WARNING "Failed allocating page\n");
++ return;
++ }
++
++ v = kmap(p);
++ memset(v, 0x67, PAGE_SIZE);
++
++ pfn = (offset >> PAGE_SHIFT);
++
++ ret = psb_mmu_insert_pages(pd, &p, pfn << PAGE_SHIFT, 1, 0, 0, 0);
++ if (ret) {
++ printk(KERN_WARNING "Failed inserting mmu page\n");
++ goto out_err1;
++ }
++
++ /* Ioremap the page through the GART aperture */
++
++ vmmu = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
++ if (!vmmu) {
++ printk(KERN_WARNING "Failed ioremapping page\n");
++ goto out_err2;
++ }
++
++ /* Read from the page with mmu disabled. */
++ printk(KERN_INFO "Page first dword is 0x%08x\n", ioread32(vmmu));
++
++ /* Enable the mmu for host accesses and read again. */
++ psb_mmu_enable_requestor(driver, _PSB_MMU_ER_HOST);
++
++ printk(KERN_INFO "MMU Page first dword is (0x67676767) 0x%08x\n",
++ ioread32(vmmu));
++ *v = 0x15243705;
++ printk(KERN_INFO "MMU Page new dword is (0x15243705) 0x%08x\n",
++ ioread32(vmmu));
++ iowrite32(0x16243355, vmmu);
++ (void) ioread32(vmmu);
++ printk(KERN_INFO "Page new dword is (0x16243355) 0x%08x\n", *v);
++
++ printk(KERN_INFO "Int stat is 0x%08x\n",
++ psb_ioread32(driver, PSB_CR_BIF_INT_STAT));
++ printk(KERN_INFO "Fault is 0x%08x\n",
++ psb_ioread32(driver, PSB_CR_BIF_FAULT));
++
++ /* Disable MMU for host accesses and clear page fault register */
++ psb_mmu_disable_requestor(driver, _PSB_MMU_ER_HOST);
++ iounmap(vmmu);
++out_err2:
++ psb_mmu_remove_pages(pd, pfn << PAGE_SHIFT, 1, 0, 0);
++out_err1:
++ kunmap(p);
++ __free_page(p);
++}
++#endif
+diff --git a/drivers/gpu/drm/mrst/drv/psb_msvdx.c b/drivers/gpu/drm/mrst/drv/psb_msvdx.c
+new file mode 100644
+index 0000000..4ad5b31
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/psb_msvdx.c
+@@ -0,0 +1,1063 @@
++/**************************************************************************
++ * MSVDX I/O operations and IRQ handling
++ *
++ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
++ * Copyright (c) Imagination Technologies Limited, UK
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++
++#include <drm/drmP.h>
++#include <drm/drm_os_linux.h>
++#include "psb_drv.h"
++#include "psb_drm.h"
++#include "psb_msvdx.h"
++#include "lnc_topaz.h"
++#include "ospm_power.h"
++#include <linux/io.h>
++#include <linux/delay.h>
++
++#ifndef list_first_entry
++#define list_first_entry(ptr, type, member) \
++ list_entry((ptr)->next, type, member)
++#endif
++
++
++static int psb_msvdx_send(struct drm_device *dev, void *cmd,
++ unsigned long cmd_size);
++
++static int psb_msvdx_dequeue_send(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ struct psb_msvdx_cmd_queue *msvdx_cmd = NULL;
++ int ret = 0;
++ struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
++
++ if (list_empty(&msvdx_priv->msvdx_queue)) {
++ PSB_DEBUG_GENERAL("MSVDXQUE: msvdx list empty.\n");
++ msvdx_priv->msvdx_busy = 0;
++ return -EINVAL;
++ }
++ msvdx_cmd = list_first_entry(&msvdx_priv->msvdx_queue,
++ struct psb_msvdx_cmd_queue, head);
++ PSB_DEBUG_GENERAL("MSVDXQUE: Queue has id %08x\n", msvdx_cmd->sequence);
++ ret = psb_msvdx_send(dev, msvdx_cmd->cmd, msvdx_cmd->cmd_size);
++ if (ret) {
++ DRM_ERROR("MSVDXQUE: psb_msvdx_send failed\n");
++ ret = -EINVAL;
++ }
++ list_del(&msvdx_cmd->head);
++ kfree(msvdx_cmd->cmd);
++ kfree(msvdx_cmd);
++
++ return ret;
++}
++
++static int psb_msvdx_map_command(struct drm_device *dev,
++ struct ttm_buffer_object *cmd_buffer,
++ unsigned long cmd_offset, unsigned long cmd_size,
++ void **msvdx_cmd, uint32_t sequence, int copy_cmd)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
++ int ret = 0;
++ unsigned long cmd_page_offset = cmd_offset & ~PAGE_MASK;
++ unsigned long cmd_size_remaining;
++ struct ttm_bo_kmap_obj cmd_kmap, regio_kmap;
++ void *cmd, *tmp, *cmd_start;
++ bool is_iomem;
++
++ /* command buffers may not exceed page boundary */
++ if (cmd_size + cmd_page_offset > PAGE_SIZE)
++ return -EINVAL;
++
++ ret = ttm_bo_kmap(cmd_buffer, cmd_offset >> PAGE_SHIFT, 1, &cmd_kmap);
++ if (ret) {
++ DRM_ERROR("MSVDXQUE:ret:%d\n", ret);
++ return ret;
++ }
++
++ cmd_start = (void *)ttm_kmap_obj_virtual(&cmd_kmap, &is_iomem)
++ + cmd_page_offset;
++ cmd = cmd_start;
++ cmd_size_remaining = cmd_size;
++
++ while (cmd_size_remaining > 0) {
++ uint32_t cur_cmd_size = MEMIO_READ_FIELD(cmd, FWRK_GENMSG_SIZE);
++ uint32_t cur_cmd_id = MEMIO_READ_FIELD(cmd, FWRK_GENMSG_ID);
++ uint32_t mmu_ptd = 0, tmp = 0;
++ struct psb_msvdx_deblock_queue *msvdx_deblock;
++ unsigned long irq_flags;
++
++ PSB_DEBUG_GENERAL("cmd start at %08x cur_cmd_size = %d"
++ " cur_cmd_id = %02x fence = %08x\n",
++ (uint32_t) cmd, cur_cmd_size, cur_cmd_id, sequence);
++ if ((cur_cmd_size % sizeof(uint32_t))
++ || (cur_cmd_size > cmd_size_remaining)) {
++ ret = -EINVAL;
++ DRM_ERROR("MSVDX: ret:%d\n", ret);
++ goto out;
++ }
++
++ switch (cur_cmd_id) {
++ case VA_MSGID_RENDER:
++ /* Fence ID */
++ MEMIO_WRITE_FIELD(cmd, FW_VA_RENDER_FENCE_VALUE,
++ sequence);
++ mmu_ptd = psb_get_default_pd_addr(dev_priv->mmu);
++ tmp = atomic_cmpxchg(&dev_priv->msvdx_mmu_invaldc,
++ 1, 0);
++ if (tmp == 1) {
++ mmu_ptd |= 1;
++ PSB_DEBUG_GENERAL("MSVDX:Set MMU invalidate\n");
++ }
++
++ /* PTD */
++ MEMIO_WRITE_FIELD(cmd, FW_VA_RENDER_MMUPTD, mmu_ptd);
++ break;
++
++ case VA_MSGID_DEBLOCK:
++ /* Fence ID */
++ MEMIO_WRITE_FIELD(cmd, FW_DXVA_DEBLOCK_FENCE_VALUE,
++ sequence);
++ mmu_ptd = psb_get_default_pd_addr(dev_priv->mmu);
++ tmp = atomic_cmpxchg(&dev_priv->msvdx_mmu_invaldc,
++ 1, 0);
++ if (tmp == 1) {
++ mmu_ptd |= 1;
++ PSB_DEBUG_GENERAL("MSVDX:Set MMU invalidate\n");
++ }
++
++ /* PTD */
++ MEMIO_WRITE_FIELD(cmd,
++ FW_DXVA_DEBLOCK_MMUPTD,
++ mmu_ptd);
++
++ /* printk("Got deblock msg\n"); */
++ /* Deblock message is followed by 32 */
++ /* bytes of deblock params */
++ msvdx_deblock = kmalloc(
++ sizeof(struct psb_msvdx_deblock_queue),
++ GFP_KERNEL);
++
++ if (msvdx_deblock == NULL) {
++ DRM_ERROR("DEBLOCK QUE: Out of memory...\n");
++ ret = -ENOMEM;
++ goto out;
++ }
++
++ memcpy(&msvdx_deblock->dbParams, cmd + 16, 32);
++
++ ret = ttm_bo_kmap(
++ (struct ttm_buffer_object *)
++ msvdx_deblock->dbParams.handle,
++ 0,
++ (msvdx_deblock->dbParams.buffer_size +
++ PAGE_SIZE - 1) >> PAGE_SHIFT,
++ &regio_kmap);
++
++ /* printk("deblock regio buffer size is 0x%x\n",
++ msvdx_deblock->dbParams.buffer_size); */
++
++ if (likely(!ret)) {
++ msvdx_deblock->dbParams.pPicparams = kmalloc(
++ msvdx_deblock->dbParams.buffer_size,
++ GFP_KERNEL);
++
++ if (msvdx_deblock->dbParams.pPicparams != NULL)
++ memcpy(
++ msvdx_deblock->dbParams.pPicparams,
++ regio_kmap.virtual,
++ msvdx_deblock->dbParams.buffer_size);
++ ttm_bo_kunmap(&regio_kmap);
++ }
++ spin_lock_irqsave(&msvdx_priv->msvdx_lock, irq_flags);
++ list_add_tail(&msvdx_deblock->head,
++ &msvdx_priv->deblock_queue);
++ spin_unlock_irqrestore(&msvdx_priv->msvdx_lock,
++ irq_flags);
++
++ cmd += 32;
++ cmd_size_remaining -= 32;
++ break;
++
++
++ default:
++ /* Msg not supported */
++ ret = -EINVAL;
++ PSB_DEBUG_GENERAL("MSVDX: ret:%d\n", ret);
++ goto out;
++ }
++
++ cmd += cur_cmd_size;
++ cmd_size_remaining -= cur_cmd_size;
++ }
++
++ if (copy_cmd) {
++ PSB_DEBUG_GENERAL("MSVDXQUE:copying command\n");
++
++ tmp = kzalloc(cmd_size, GFP_KERNEL);
++ if (tmp == NULL) {
++ ret = -ENOMEM;
++ DRM_ERROR("MSVDX: fail to callc,ret=:%d\n", ret);
++ goto out;
++ }
++ memcpy(tmp, cmd_start, cmd_size);
++ *msvdx_cmd = tmp;
++ } else {
++ PSB_DEBUG_GENERAL("MSVDXQUE:did NOT copy command\n");
++ ret = psb_msvdx_send(dev, cmd_start, cmd_size);
++ if (ret) {
++ DRM_ERROR("MSVDXQUE: psb_msvdx_send failed\n");
++ ret = -EINVAL;
++ }
++ }
++
++out:
++ ttm_bo_kunmap(&cmd_kmap);
++
++ return ret;
++}
++
++int psb_submit_video_cmdbuf(struct drm_device *dev,
++ struct ttm_buffer_object *cmd_buffer,
++ unsigned long cmd_offset, unsigned long cmd_size,
++ struct ttm_fence_object *fence)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ uint32_t sequence = dev_priv->sequence[PSB_ENGINE_VIDEO];
++ unsigned long irq_flags;
++ int ret = 0;
++ struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
++ int offset = 0;
++
++ /* psb_schedule_watchdog(dev_priv); */
++
++ spin_lock_irqsave(&msvdx_priv->msvdx_lock, irq_flags);
++ if (msvdx_priv->msvdx_needs_reset) {
++ spin_unlock_irqrestore(&msvdx_priv->msvdx_lock, irq_flags);
++ PSB_DEBUG_GENERAL("MSVDX: will reset msvdx\n");
++ if (psb_msvdx_reset(dev_priv)) {
++ ret = -EBUSY;
++ DRM_ERROR("MSVDX: Reset failed\n");
++ return ret;
++ }
++ msvdx_priv->msvdx_needs_reset = 0;
++ msvdx_priv->msvdx_busy = 0;
++
++ psb_msvdx_init(dev);
++
++ /* restore vec local mem if needed */
++ if (msvdx_priv->vec_local_mem_saved) {
++ for (offset = 0; offset < VEC_LOCAL_MEM_BYTE_SIZE / 4; ++offset)
++ PSB_WMSVDX32(msvdx_priv->vec_local_mem_data[offset],
++ VEC_LOCAL_MEM_OFFSET + offset * 4);
++
++ msvdx_priv->vec_local_mem_saved = 0;
++ }
++
++ spin_lock_irqsave(&msvdx_priv->msvdx_lock, irq_flags);
++ }
++
++ if (!msvdx_priv->msvdx_fw_loaded) {
++ spin_unlock_irqrestore(&msvdx_priv->msvdx_lock, irq_flags);
++ PSB_DEBUG_GENERAL("MSVDX:reload FW to MTX\n");
++
++ ret = psb_setup_fw(dev);
++ if (ret) {
++ DRM_ERROR("MSVDX:fail to load FW\n");
++ /* FIXME: find a proper return value */
++ return -EFAULT;
++ }
++ msvdx_priv->msvdx_fw_loaded = 1;
++
++ PSB_DEBUG_GENERAL("MSVDX: load firmware successfully\n");
++ spin_lock_irqsave(&msvdx_priv->msvdx_lock, irq_flags);
++ }
++
++ if (!msvdx_priv->msvdx_busy) {
++ msvdx_priv->msvdx_busy = 1;
++ spin_unlock_irqrestore(&msvdx_priv->msvdx_lock, irq_flags);
++ PSB_DEBUG_GENERAL("MSVDX: commit command to HW,seq=0x%08x\n",
++ sequence);
++ ret = psb_msvdx_map_command(dev, cmd_buffer, cmd_offset,
++ cmd_size, NULL, sequence, 0);
++ if (ret) {
++ DRM_ERROR("MSVDXQUE: Failed to extract cmd\n");
++ return ret;
++ }
++ } else {
++ struct psb_msvdx_cmd_queue *msvdx_cmd;
++ void *cmd = NULL;
++
++ spin_unlock_irqrestore(&msvdx_priv->msvdx_lock, irq_flags);
++ /* queue the command to be sent when the h/w is ready */
++ PSB_DEBUG_GENERAL("MSVDXQUE: queueing sequence:%08x..\n",
++ sequence);
++ msvdx_cmd = kzalloc(sizeof(struct psb_msvdx_cmd_queue),
++ GFP_KERNEL);
++ if (msvdx_cmd == NULL) {
++ DRM_ERROR("MSVDXQUE: Out of memory...\n");
++ return -ENOMEM;
++ }
++
++ ret = psb_msvdx_map_command(dev, cmd_buffer, cmd_offset,
++ cmd_size, &cmd, sequence, 1);
++ if (ret) {
++ DRM_ERROR("MSVDXQUE: Failed to extract cmd\n");
++ kfree(msvdx_cmd
++ );
++ return ret;
++ }
++ msvdx_cmd->cmd = cmd;
++ msvdx_cmd->cmd_size = cmd_size;
++ msvdx_cmd->sequence = sequence;
++ spin_lock_irqsave(&msvdx_priv->msvdx_lock, irq_flags);
++ list_add_tail(&msvdx_cmd->head, &msvdx_priv->msvdx_queue);
++ if (!msvdx_priv->msvdx_busy) {
++ msvdx_priv->msvdx_busy = 1;
++ PSB_DEBUG_GENERAL("MSVDXQUE: Need immediate dequeue\n");
++ psb_msvdx_dequeue_send(dev);
++ }
++ spin_unlock_irqrestore(&msvdx_priv->msvdx_lock, irq_flags);
++ }
++
++ return ret;
++}
++
++int psb_cmdbuf_video(struct drm_file *priv,
++ struct list_head *validate_list,
++ uint32_t fence_type,
++ struct drm_psb_cmdbuf_arg *arg,
++ struct ttm_buffer_object *cmd_buffer,
++ struct psb_ttm_fence_rep *fence_arg)
++{
++ struct drm_device *dev = priv->minor->dev;
++ struct ttm_fence_object *fence;
++ int ret;
++
++ /*
++ * Check this. Doesn't seem right. Have fencing done AFTER command
++ * submission and make sure drm_psb_idle idles the MSVDX completely.
++ */
++ ret =
++ psb_submit_video_cmdbuf(dev, cmd_buffer, arg->cmdbuf_offset,
++ arg->cmdbuf_size, NULL);
++ if (ret)
++ return ret;
++
++
++ /* DRM_ERROR("Intel: Fix video fencing!!\n"); */
++ psb_fence_or_sync(priv, PSB_ENGINE_VIDEO, fence_type,
++ arg->fence_flags, validate_list, fence_arg,
++ &fence);
++
++ ttm_fence_object_unref(&fence);
++ mutex_lock(&cmd_buffer->mutex);
++ if (cmd_buffer->sync_obj != NULL)
++ ttm_fence_sync_obj_unref(&cmd_buffer->sync_obj);
++ mutex_unlock(&cmd_buffer->mutex);
++
++ return 0;
++}
++
++
++static int psb_msvdx_send(struct drm_device *dev, void *cmd,
++ unsigned long cmd_size)
++{
++ int ret = 0;
++ struct drm_psb_private *dev_priv = dev->dev_private;
++
++ while (cmd_size > 0) {
++ uint32_t cur_cmd_size = MEMIO_READ_FIELD(cmd, FWRK_GENMSG_SIZE);
++ uint32_t cur_cmd_id = MEMIO_READ_FIELD(cmd, FWRK_GENMSG_ID);
++ if (cur_cmd_size > cmd_size) {
++ ret = -EINVAL;
++ DRM_ERROR("MSVDX:cmd_size %lu cur_cmd_size %lu\n",
++ cmd_size, (unsigned long)cur_cmd_size);
++ goto out;
++ }
++
++ /* Send the message to h/w */
++ ret = psb_mtx_send(dev_priv, cmd);
++ if (ret) {
++ PSB_DEBUG_GENERAL("MSVDX: ret:%d\n", ret);
++ goto out;
++ }
++ cmd += cur_cmd_size;
++ cmd_size -= cur_cmd_size;
++ if (cur_cmd_id == VA_MSGID_DEBLOCK) {
++ cmd += 32;
++ cmd_size -= 32;
++ }
++ }
++
++out:
++ PSB_DEBUG_GENERAL("MSVDX: ret:%d\n", ret);
++ return ret;
++}
++
++int psb_mtx_send(struct drm_psb_private *dev_priv, const void *msg)
++{
++ static uint32_t pad_msg[FWRK_PADMSG_SIZE];
++ const uint32_t *p_msg = (uint32_t *) msg;
++ uint32_t msg_num, words_free, ridx, widx, buf_size, buf_offset;
++ int ret = 0;
++
++ PSB_DEBUG_GENERAL("MSVDX: psb_mtx_send\n");
++
++ /* we need clocks enabled before we touch VEC local ram */
++ PSB_WMSVDX32(clk_enable_all, MSVDX_MAN_CLK_ENABLE);
++
++ msg_num = (MEMIO_READ_FIELD(msg, FWRK_GENMSG_SIZE) + 3) / 4;
++
++ buf_size = PSB_RMSVDX32(MSVDX_COMMS_TO_MTX_BUF_SIZE) & ((1 << 16) - 1);
++
++ if (msg_num > buf_size) {
++ ret = -EINVAL;
++ DRM_ERROR("MSVDX: message exceed maximum,ret:%d\n", ret);
++ goto out;
++ }
++
++ ridx = PSB_RMSVDX32(MSVDX_COMMS_TO_MTX_RD_INDEX);
++ widx = PSB_RMSVDX32(MSVDX_COMMS_TO_MTX_WRT_INDEX);
++
++
++ buf_size = PSB_RMSVDX32(MSVDX_COMMS_TO_MTX_BUF_SIZE) & ((1 << 16) - 1);
++ /*0x2000 is VEC Local Ram offset*/
++ buf_offset =
++ (PSB_RMSVDX32(MSVDX_COMMS_TO_MTX_BUF_SIZE) >> 16) + 0x2000;
++
++ /* message would wrap, need to send a pad message */
++ if (widx + msg_num > buf_size) {
++ /* Shouldn't happen for a PAD message itself */
++ BUG_ON(MEMIO_READ_FIELD(msg, FWRK_GENMSG_ID)
++ == FWRK_MSGID_PADDING);
++
++ /* if the read pointer is at zero then we must wait for it to
++ * change otherwise the write pointer will equal the read
++ * pointer,which should only happen when the buffer is empty
++ *
++ * This will only happens if we try to overfill the queue,
++ * queue management should make
++ * sure this never happens in the first place.
++ */
++ BUG_ON(0 == ridx);
++ if (0 == ridx) {
++ ret = -EINVAL;
++ DRM_ERROR("MSVDX: RIndex=0, ret:%d\n", ret);
++ goto out;
++ }
++
++ /* Send a pad message */
++ MEMIO_WRITE_FIELD(pad_msg, FWRK_GENMSG_SIZE,
++ (buf_size - widx) << 2);
++ MEMIO_WRITE_FIELD(pad_msg, FWRK_GENMSG_ID,
++ FWRK_MSGID_PADDING);
++ psb_mtx_send(dev_priv, pad_msg);
++ widx = PSB_RMSVDX32(MSVDX_COMMS_TO_MTX_WRT_INDEX);
++ }
++
++ if (widx >= ridx)
++ words_free = buf_size - (widx - ridx);
++ else
++ words_free = ridx - widx;
++
++ BUG_ON(msg_num > words_free);
++ if (msg_num > words_free) {
++ ret = -EINVAL;
++ DRM_ERROR("MSVDX: msg_num > words_free, ret:%d\n", ret);
++ goto out;
++ }
++ while (msg_num > 0) {
++ PSB_WMSVDX32(*p_msg++, buf_offset + (widx << 2));
++ msg_num--;
++ widx++;
++ if (buf_size == widx)
++ widx = 0;
++ }
++
++ PSB_WMSVDX32(widx, MSVDX_COMMS_TO_MTX_WRT_INDEX);
++
++ /* Make sure clocks are enabled before we kick */
++ PSB_WMSVDX32(clk_enable_all, MSVDX_MAN_CLK_ENABLE);
++
++ PSB_WMSVDX32(clk_enable_all, MSVDX_MAN_CLK_ENABLE);
++
++ /* signal an interrupt to let the mtx know there is a new message */
++ /* PSB_WMSVDX32(1, MSVDX_MTX_KICKI); */
++ PSB_WMSVDX32(1, MSVDX_MTX_KICK);
++
++ /* Read MSVDX Register several times in case Idle signal assert */
++ PSB_RMSVDX32(MSVDX_INTERRUPT_STATUS);
++ PSB_RMSVDX32(MSVDX_INTERRUPT_STATUS);
++ PSB_RMSVDX32(MSVDX_INTERRUPT_STATUS);
++ PSB_RMSVDX32(MSVDX_INTERRUPT_STATUS);
++
++
++out:
++ return ret;
++}
++
++static int psb_msvdx_towpass_deblock(struct drm_device *dev,
++ uint32_t *pPicparams)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private;
++ uint32_t cmd_size, cmd_count = 0;
++ uint32_t cmd_id, reg, value, wait, tmp, read = 0, ret = 0;
++
++ cmd_size = *pPicparams++;
++ PSB_DEBUG_GENERAL("MSVDX DEBLOCK: deblock get cmd size %d\n", cmd_size);
++ /* printk("MSVDX DEBLOCK: deblock get cmd size %d\n", cmd_size); */
++
++ do {
++ cmd_id = (*pPicparams) & 0xf0000000;
++ reg = (*pPicparams++) & 0x0fffffff;
++ switch (cmd_id) {
++ case MSVDX_DEBLOCK_REG_SET: {
++ value = *pPicparams++;
++ PSB_WMSVDX32(value, reg);
++ cmd_count += 2;
++ break;
++ }
++ case MSVDX_DEBLOCK_REG_GET: {
++ read = PSB_RMSVDX32(reg);
++ cmd_count += 1;
++ break;
++ }
++ case MSVDX_DEBLOCK_REG_POLLn: {
++ value = *pPicparams++;
++ wait = 0;
++
++ do {
++ tmp = PSB_RMSVDX32(reg);
++ } while ((wait++ < 20000) && (value > tmp));
++
++ if (wait >= 20000) {
++ ret = 1;
++ PSB_DEBUG_GENERAL(
++ "MSVDX DEBLOCK: polln cmd space time out!\n");
++ goto finish_deblock;
++ }
++ cmd_count += 2;
++ break;
++ }
++ case MSVDX_DEBLOCK_REG_POLLx: {
++ wait = 0;
++
++ do {
++ tmp = PSB_RMSVDX32(reg);
++ } while ((wait++ < 20000) && (read > tmp));
++
++ if (wait >= 20000) {
++ ret = 1;
++ PSB_DEBUG_GENERAL(
++ "MSVDX DEBLOCK: pollx cmd space time out!\n");
++ goto finish_deblock;
++ }
++
++ cmd_count += 1;
++ break;
++ }
++ default:
++ ret = 1;
++ PSB_DEBUG_GENERAL(
++ "MSVDX DEBLOCK: get error cmd_id: 0x%x!\n",
++ cmd_id);
++ PSB_DEBUG_GENERAL(
++ "MSVDX DEBLOCK: execute cmd num is %d\n",
++ cmd_count);
++ /* printk("MSVDX DEBLOCK: get error cmd_id: 0x%x!\n",
++ cmd_id); */
++ /* printk("MSVDX DEBLOCK: execute cmd num is %d\n",
++ cmd_count); */
++ goto finish_deblock;
++ }
++ } while (cmd_count < cmd_size);
++
++
++finish_deblock:
++ PSB_DEBUG_GENERAL("MSVDX DEBLOCK: execute cmd num is %d\n", cmd_count);
++ return ret;
++}
++
++/*
++ * MSVDX MTX interrupt
++ */
++static void psb_msvdx_mtx_interrupt(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private;
++ static uint32_t buf[128]; /* message buffer */
++ uint32_t ridx, widx, buf_size, buf_offset;
++ uint32_t num, ofs; /* message num and offset */
++ struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
++
++ PSB_DEBUG_GENERAL("MSVDX:Got a MSVDX MTX interrupt\n");
++
++ /* Are clocks enabled - If not enable before
++ * attempting to read from VLR
++ */
++ if (PSB_RMSVDX32(MSVDX_MAN_CLK_ENABLE) != (clk_enable_all)) {
++ PSB_DEBUG_GENERAL("MSVDX:Clocks disabled when Interupt set\n");
++ PSB_WMSVDX32(clk_enable_all, MSVDX_MAN_CLK_ENABLE);
++ }
++
++loop: /* just for coding style check */
++ ridx = PSB_RMSVDX32(MSVDX_COMMS_TO_HOST_RD_INDEX);
++ widx = PSB_RMSVDX32(MSVDX_COMMS_TO_HOST_WRT_INDEX);
++
++ /* Get out of here if nothing */
++ if (ridx == widx)
++ goto done;
++
++
++ buf_size = PSB_RMSVDX32(MSVDX_COMMS_TO_HOST_BUF_SIZE) & ((1 << 16) - 1);
++ /*0x2000 is VEC Local Ram offset*/
++ buf_offset =
++ (PSB_RMSVDX32(MSVDX_COMMS_TO_HOST_BUF_SIZE) >> 16) + 0x2000;
++
++ ofs = 0;
++ buf[ofs] = PSB_RMSVDX32(buf_offset + (ridx << 2));
++
++ /* round to nearest word */
++ num = (MEMIO_READ_FIELD(buf, FWRK_GENMSG_SIZE) + 3) / 4;
++
++ /* ASSERT(num <= sizeof(buf) / sizeof(uint32_t)); */
++
++ if (++ridx >= buf_size)
++ ridx = 0;
++
++ for (ofs++; ofs < num; ofs++) {
++ buf[ofs] = PSB_RMSVDX32(buf_offset + (ridx << 2));
++
++ if (++ridx >= buf_size)
++ ridx = 0;
++ }
++
++ /* Update the Read index */
++ PSB_WMSVDX32(ridx, MSVDX_COMMS_TO_HOST_RD_INDEX);
++
++ if (msvdx_priv->msvdx_needs_reset)
++ goto loop;
++
++ switch (MEMIO_READ_FIELD(buf, FWRK_GENMSG_ID)) {
++ case VA_MSGID_CMD_HW_PANIC:
++ case VA_MSGID_CMD_FAILED: {
++ uint32_t fence = MEMIO_READ_FIELD(buf,
++ FW_VA_CMD_FAILED_FENCE_VALUE);
++ uint32_t fault = MEMIO_READ_FIELD(buf,
++ FW_VA_CMD_FAILED_IRQSTATUS);
++ uint32_t msg_id = MEMIO_READ_FIELD(buf, FWRK_GENMSG_ID);
++ uint32_t diff = 0;
++
++ (void) fault;
++ if (msg_id == VA_MSGID_CMD_HW_PANIC)
++ PSB_DEBUG_GENERAL("MSVDX: VA_MSGID_CMD_HW_PANIC:"
++ "Fault detected"
++ " - Fence: %08x, Status: %08x"
++ " - resetting and ignoring error\n",
++ fence, fault);
++ else
++ PSB_DEBUG_GENERAL("MSVDX: VA_MSGID_CMD_FAILED:"
++ "Fault detected"
++ " - Fence: %08x, Status: %08x"
++ " - resetting and ignoring error\n",
++ fence, fault);
++
++ msvdx_priv->msvdx_needs_reset = 1;
++
++ if (msg_id == VA_MSGID_CMD_HW_PANIC) {
++ diff = msvdx_priv->msvdx_current_sequence
++ - dev_priv->sequence[PSB_ENGINE_VIDEO];
++
++ if (diff > 0x0FFFFFFF)
++ msvdx_priv->msvdx_current_sequence++;
++
++ PSB_DEBUG_GENERAL("MSVDX: Fence ID missing, "
++ "assuming %08x\n",
++ msvdx_priv->msvdx_current_sequence);
++ } else {
++ msvdx_priv->msvdx_current_sequence = fence;
++ }
++
++ psb_fence_error(dev, PSB_ENGINE_VIDEO,
++ msvdx_priv->msvdx_current_sequence,
++ _PSB_FENCE_TYPE_EXE, DRM_CMD_FAILED);
++
++ /* Flush the command queue */
++ psb_msvdx_flush_cmd_queue(dev);
++
++ goto done;
++ }
++ case VA_MSGID_CMD_COMPLETED: {
++ uint32_t fence = MEMIO_READ_FIELD(buf,
++ FW_VA_CMD_COMPLETED_FENCE_VALUE);
++ uint32_t flags = MEMIO_READ_FIELD(buf,
++ FW_VA_CMD_COMPLETED_FLAGS);
++
++ PSB_DEBUG_GENERAL("MSVDX:VA_MSGID_CMD_COMPLETED: "
++ "FenceID: %08x, flags: 0x%x\n",
++ fence, flags);
++
++ msvdx_priv->msvdx_current_sequence = fence;
++
++ psb_fence_handler(dev, PSB_ENGINE_VIDEO);
++
++ if (flags & FW_VA_RENDER_HOST_INT) {
++ /*Now send the next command from the msvdx cmd queue */
++ psb_msvdx_dequeue_send(dev);
++ goto done;
++ }
++
++ break;
++ }
++ case VA_MSGID_CMD_COMPLETED_BATCH: {
++ uint32_t fence = MEMIO_READ_FIELD(buf,
++ FW_VA_CMD_COMPLETED_FENCE_VALUE);
++ uint32_t tickcnt = MEMIO_READ_FIELD(buf,
++ FW_VA_CMD_COMPLETED_NO_TICKS);
++ (void)tickcnt;
++ /* we have the fence value in the message */
++ PSB_DEBUG_GENERAL("MSVDX:VA_MSGID_CMD_COMPLETED_BATCH:"
++ " FenceID: %08x, TickCount: %08x\n",
++ fence, tickcnt);
++ msvdx_priv->msvdx_current_sequence = fence;
++
++ break;
++ }
++ case VA_MSGID_ACK:
++ PSB_DEBUG_GENERAL("MSVDX: VA_MSGID_ACK\n");
++ break;
++
++ case VA_MSGID_TEST1:
++ PSB_DEBUG_GENERAL("MSVDX: VA_MSGID_TEST1\n");
++ break;
++
++ case VA_MSGID_TEST2:
++ PSB_DEBUG_GENERAL("MSVDX: VA_MSGID_TEST2\n");
++ break;
++ /* Don't need to do anything with these messages */
++
++ case VA_MSGID_DEBLOCK_REQUIRED: {
++ uint32_t ctxid = MEMIO_READ_FIELD(buf,
++ FW_VA_DEBLOCK_REQUIRED_CONTEXT);
++ struct psb_msvdx_deblock_queue *msvdx_deblock;
++
++ PSB_DEBUG_GENERAL("MSVDX: VA_MSGID_DEBLOCK_REQUIRED"
++ " Context=%08x\n", ctxid);
++ if (list_empty(&msvdx_priv->deblock_queue)) {
++ PSB_DEBUG_GENERAL(
++ "DEBLOCKQUE: deblock param list is empty\n");
++ PSB_WMSVDX32(0, MSVDX_CMDS_END_SLICE_PICTURE);
++ PSB_WMSVDX32(1, MSVDX_CMDS_END_SLICE_PICTURE);
++ goto done;
++ }
++ msvdx_deblock = list_first_entry(&msvdx_priv->deblock_queue,
++ struct psb_msvdx_deblock_queue, head);
++
++ if (0) {
++ PSB_DEBUG_GENERAL("MSVDX DEBLOCK: by pass \n");
++ /* try to unblock rendec */
++ PSB_WMSVDX32(0, MSVDX_CMDS_END_SLICE_PICTURE);
++ PSB_WMSVDX32(1, MSVDX_CMDS_END_SLICE_PICTURE);
++ kfree(msvdx_deblock->dbParams.pPicparams);
++ list_del(&msvdx_deblock->head);
++ goto done;
++ }
++
++
++ if (ctxid != msvdx_deblock->dbParams.ctxid) {
++ PSB_DEBUG_GENERAL("MSVDX DEBLOCK: wrong ctxid, may "
++ "caused by multiple context since "
++ "it's not supported yet\n");
++ /* try to unblock rendec */
++ PSB_WMSVDX32(0, MSVDX_CMDS_END_SLICE_PICTURE);
++ PSB_WMSVDX32(1, MSVDX_CMDS_END_SLICE_PICTURE);
++ kfree(msvdx_deblock->dbParams.pPicparams);
++ list_del(&msvdx_deblock->head);
++ goto done;
++ }
++
++ if (msvdx_deblock->dbParams.pPicparams) {
++ PSB_DEBUG_GENERAL("MSVDX DEBLOCK: start deblocking\n");
++ /* printk("MSVDX DEBLOCK: start deblocking\n"); */
++
++ if (psb_msvdx_towpass_deblock(dev,
++ msvdx_deblock->dbParams.pPicparams)) {
++
++ PSB_DEBUG_GENERAL(
++ "MSVDX DEBLOCK: deblock fail!\n");
++ PSB_WMSVDX32(0, MSVDX_CMDS_END_SLICE_PICTURE);
++ PSB_WMSVDX32(1, MSVDX_CMDS_END_SLICE_PICTURE);
++ }
++ kfree(msvdx_deblock->dbParams.pPicparams);
++ } else {
++ PSB_DEBUG_GENERAL("MSVDX DEBLOCK: deblock abort!\n");
++ /* printk("MSVDX DEBLOCK: deblock abort!\n"); */
++ PSB_WMSVDX32(0, MSVDX_CMDS_END_SLICE_PICTURE);
++ PSB_WMSVDX32(1, MSVDX_CMDS_END_SLICE_PICTURE);
++ }
++
++ list_del(&msvdx_deblock->head);
++ kfree(msvdx_deblock);
++ break;
++ }
++ default:
++ DRM_ERROR("ERROR: msvdx Unknown message from MTX \n");
++ goto done;
++ }
++
++done:
++ /* we get a frame/slice done, try to save some power*/
++ if (drm_msvdx_pmpolicy != PSB_PMPOLICY_NOPM)
++ schedule_delayed_work(&dev_priv->scheduler.msvdx_suspend_wq, 0);
++
++ DRM_MEMORYBARRIER(); /* TBD check this... */
++}
++
++
++/*
++ * MSVDX interrupt.
++ */
++IMG_BOOL psb_msvdx_interrupt(IMG_VOID *pvData)
++{
++ struct drm_device *dev;
++ struct drm_psb_private *dev_priv;
++ struct msvdx_private *msvdx_priv;
++ uint32_t msvdx_stat;
++
++ if (pvData == IMG_NULL) {
++ DRM_ERROR("ERROR: msvdx %s, Invalid params\n", __func__);
++ return IMG_FALSE;
++ }
++
++ if (!ospm_power_is_hw_on(OSPM_VIDEO_DEC_ISLAND)) {
++ DRM_ERROR("ERROR: interrupt arrived but HW is power off\n");
++ return IMG_FALSE;
++ }
++
++ dev = (struct drm_device *)pvData;
++ dev_priv = (struct drm_psb_private *) dev->dev_private;
++ msvdx_priv = dev_priv->msvdx_private;
++
++ msvdx_priv->msvdx_hw_busy = REG_READ(0x20D0) & (0x1 << 9);
++
++ msvdx_stat = PSB_RMSVDX32(MSVDX_INTERRUPT_STATUS);
++
++ if (msvdx_stat & MSVDX_INTERRUPT_STATUS_CR_MMU_FAULT_IRQ_MASK) {
++ /*Ideally we should we should never get to this */
++ PSB_DEBUG_IRQ("MSVDX:MMU Fault:0x%x\n", msvdx_stat);
++
++ /* Pause MMU */
++ PSB_WMSVDX32(MSVDX_MMU_CONTROL0_CR_MMU_PAUSE_MASK,
++ MSVDX_MMU_CONTROL0);
++ DRM_WRITEMEMORYBARRIER();
++
++ /* Clear this interupt bit only */
++ PSB_WMSVDX32(MSVDX_INTERRUPT_STATUS_CR_MMU_FAULT_IRQ_MASK,
++ MSVDX_INTERRUPT_CLEAR);
++ PSB_RMSVDX32(MSVDX_INTERRUPT_CLEAR);
++ DRM_READMEMORYBARRIER();
++
++ msvdx_priv->msvdx_needs_reset = 1;
++ } else if (msvdx_stat & MSVDX_INTERRUPT_STATUS_CR_MTX_IRQ_MASK) {
++ PSB_DEBUG_IRQ
++ ("MSVDX: msvdx_stat: 0x%x(MTX)\n", msvdx_stat);
++
++ /* Clear all interupt bits */
++ PSB_WMSVDX32(0xffff, MSVDX_INTERRUPT_CLEAR);
++ PSB_RMSVDX32(MSVDX_INTERRUPT_CLEAR);
++ DRM_READMEMORYBARRIER();
++
++ psb_msvdx_mtx_interrupt(dev);
++ }
++
++ return IMG_TRUE;
++}
++
++
++void psb_msvdx_lockup(struct drm_psb_private *dev_priv,
++ int *msvdx_lockup, int *msvdx_idle)
++{
++ int tmp;
++ struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
++
++ *msvdx_lockup = 0;
++ *msvdx_idle = 1;
++
++#if 0
++ PSB_DEBUG_GENERAL("MSVDXTimer: current_sequence:%d "
++ "last_sequence:%d and last_submitted_sequence :%d\n",
++ msvdx_priv->msvdx_current_sequence,
++ msvdx_priv->msvdx_last_sequence,
++ dev_priv->sequence[PSB_ENGINE_VIDEO]);
++#endif
++
++ tmp = msvdx_priv->msvdx_current_sequence -
++ dev_priv->sequence[PSB_ENGINE_VIDEO];
++
++ if (tmp > 0x0FFFFFFF) {
++ if (msvdx_priv->msvdx_current_sequence ==
++ msvdx_priv->msvdx_last_sequence) {
++ DRM_ERROR("MSVDXTimer:locked-up for sequence:%d\n",
++ msvdx_priv->msvdx_current_sequence);
++ *msvdx_lockup = 1;
++ } else {
++ PSB_DEBUG_GENERAL("MSVDXTimer: "
++ "msvdx responded fine so far\n");
++ msvdx_priv->msvdx_last_sequence =
++ msvdx_priv->msvdx_current_sequence;
++ *msvdx_idle = 0;
++ }
++ }
++}
++
++int psb_check_msvdx_idle(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private;
++ struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
++
++ if (msvdx_priv->msvdx_fw_loaded == 0)
++ return 0;
++
++ if (msvdx_priv->msvdx_busy) {
++ PSB_DEBUG_PM("MSVDX: psb_check_msvdx_idle returns busy\n");
++ return -EBUSY;
++ }
++
++ if (msvdx_priv->msvdx_hw_busy) {
++ PSB_DEBUG_PM("MSVDX: %s, HW is busy\n", __func__);
++ return -EBUSY;
++ }
++
++ return 0;
++}
++
++int lnc_video_getparam(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ struct drm_lnc_video_getparam_arg *arg = data;
++ int ret = 0;
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)file_priv->minor->dev->dev_private;
++#if defined(CONFIG_MRST_RAR_HANDLER)
++ struct RAR_buffer rar_buf;
++ size_t rar_status;
++#endif
++ void *rar_handler;
++ uint32_t offset = 0;
++ uint32_t device_info = 0;
++ uint32_t rar_ci_info[2];
++
++ switch (arg->key) {
++ case LNC_VIDEO_GETPARAM_RAR_INFO:
++ rar_ci_info[0] = dev_priv->rar_region_start;
++ rar_ci_info[1] = dev_priv->rar_region_size;
++ ret = copy_to_user((void __user *) ((unsigned long)arg->value),
++ &rar_ci_info[0],
++ sizeof(rar_ci_info));
++ break;
++ case LNC_VIDEO_GETPARAM_CI_INFO:
++ rar_ci_info[0] = dev_priv->ci_region_start;
++ rar_ci_info[1] = dev_priv->ci_region_size;
++ ret = copy_to_user((void __user *) ((unsigned long)arg->value),
++ &rar_ci_info[0],
++ sizeof(rar_ci_info));
++ break;
++ case LNC_VIDEO_GETPARAM_RAR_HANDLER_OFFSET:
++ ret = copy_from_user(&rar_handler,
++ (void __user *)((unsigned long)arg->arg),
++ sizeof(rar_handler));
++ if (ret)
++ break;
++
++#if defined(CONFIG_MRST_RAR_HANDLER)
++ rar_buf.info.handle = (__u32)rar_handler;
++ rar_buf.bus_address = (dma_addr_t)dev_priv->rar_region_start;
++ rar_status = 1;
++
++ rar_status = rar_handle_to_bus(&rar_buf, 1);
++ if (rar_status != 1) {
++ DRM_ERROR("MSVDX:rar_handle_to_bus failed\n");
++ ret = -1;
++ break;
++ }
++ rar_status = rar_release(&rar_buf, 1);
++ if (rar_status != 1)
++ DRM_ERROR("MSVDX:rar_release failed\n");
++
++ offset = (uint32_t) rar_buf.bus_address - dev_priv->rar_region_start;
++ PSB_DEBUG_GENERAL("MSVDX:RAR handler %p, bus address=0x%08x,"
++ "RAR region=0x%08x\n",
++ rar_handler,
++ (uint32_t)rar_buf.bus_address,
++ dev_priv->rar_region_start);
++#endif
++ ret = copy_to_user((void __user *)((unsigned long)arg->value),
++ &offset,
++ sizeof(offset));
++ break;
++ case LNC_VIDEO_FRAME_SKIP:
++ ret = lnc_video_frameskip(dev, arg->value);
++ break;
++ case LNC_VIDEO_DEVICE_INFO:
++ device_info = 0xffff & dev_priv->video_device_fuse;
++ device_info |= (0xffff & dev->pci_device) << 16;
++
++ ret = copy_to_user((void __user *) ((unsigned long)arg->value),
++ &device_info, sizeof(device_info));
++ break;
++ default:
++ ret = -EFAULT;
++ break;
++ }
++
++ if (ret)
++ return -EFAULT;
++
++ return 0;
++}
++
++inline int psb_try_power_down_msvdx(struct drm_device *dev)
++{
++ ospm_apm_power_down_msvdx(dev);
++ return 0;
++}
++
++int psb_msvdx_save_context(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private;
++ struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
++ int offset = 0;
++
++ msvdx_priv->msvdx_needs_reset = 1;
++
++ for (offset = 0; offset < VEC_LOCAL_MEM_BYTE_SIZE / 4; ++offset)
++ msvdx_priv->vec_local_mem_data[offset] =
++ PSB_RMSVDX32(VEC_LOCAL_MEM_OFFSET + offset * 4);
++
++ msvdx_priv->vec_local_mem_saved = 1;
++
++ return 0;
++}
++
++int psb_msvdx_restore_context(struct drm_device *dev)
++{
++ return 0;
++}
+diff --git a/drivers/gpu/drm/mrst/drv/psb_msvdx.h b/drivers/gpu/drm/mrst/drv/psb_msvdx.h
+new file mode 100644
+index 0000000..c067203
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/psb_msvdx.h
+@@ -0,0 +1,610 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
++ * Copyright (c) Imagination Technologies Limited, UK
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++
++#ifndef _PSB_MSVDX_H_
++#define _PSB_MSVDX_H_
++
++#include "psb_drv.h"
++#include "img_types.h"
++
++#if defined(CONFIG_MRST_RAR_HANDLER)
++#include "rar/memrar.h"
++#endif
++
++extern int drm_msvdx_pmpolicy;
++
++int psb_wait_for_register(struct drm_psb_private *dev_priv,
++ uint32_t offset,
++ uint32_t value,
++ uint32_t enable);
++
++IMG_BOOL psb_msvdx_interrupt(IMG_VOID *pvData);
++
++int psb_msvdx_init(struct drm_device *dev);
++int psb_msvdx_uninit(struct drm_device *dev);
++int psb_msvdx_reset(struct drm_psb_private *dev_priv);
++uint32_t psb_get_default_pd_addr(struct psb_mmu_driver *driver);
++int psb_mtx_send(struct drm_psb_private *dev_priv, const void *pvMsg);
++void psb_msvdx_flush_cmd_queue(struct drm_device *dev);
++void psb_msvdx_lockup(struct drm_psb_private *dev_priv,
++ int *msvdx_lockup, int *msvdx_idle);
++int psb_setup_fw(struct drm_device *dev);
++int psb_check_msvdx_idle(struct drm_device *dev);
++int psb_wait_msvdx_idle(struct drm_device *dev);
++int psb_cmdbuf_video(struct drm_file *priv,
++ struct list_head *validate_list,
++ uint32_t fence_type,
++ struct drm_psb_cmdbuf_arg *arg,
++ struct ttm_buffer_object *cmd_buffer,
++ struct psb_ttm_fence_rep *fence_arg);
++int psb_msvdx_save_context(struct drm_device *dev);
++int psb_msvdx_restore_context(struct drm_device *dev);
++
++bool
++psb_host_second_pass(struct drm_device *dev,
++ uint32_t ui32OperatingModeCmd,
++ void *pvParamBase,
++ uint32_t PicWidthInMbs,
++ uint32_t FrameHeightInMbs,
++ uint32_t ui32DeblockSourceY,
++ uint32_t ui32DeblockSourceUV);
++
++/* Non-Optimal Invalidation is not default */
++#define MSVDX_DEVICE_NODE_FLAGS_MMU_NONOPT_INV 2
++#define MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_MASK (0x00000100)
++
++#define FW_VA_RENDER_HOST_INT 0x00004000
++#define MSVDX_DEVICE_NODE_FLAGS_MMU_HW_INVALIDATION 0x00000020
++
++/* There is no work currently underway on the hardware */
++#define MSVDX_FW_STATUS_HW_IDLE 0x00000001
++#define MSVDX_DEVICE_NODE_FLAG_BRN23154_BLOCK_ON_FE 0x00000200
++#define MSVDX_DEVICE_NODE_FLAGS_DEFAULT_D0 \
++ (MSVDX_DEVICE_NODE_FLAGS_MMU_NONOPT_INV | \
++ MSVDX_DEVICE_NODE_FLAGS_MMU_HW_INVALIDATION | \
++ MSVDX_DEVICE_NODE_FLAG_BRN23154_BLOCK_ON_FE)
++
++#define MSVDX_DEVICE_NODE_FLAGS_DEFAULT_D1 \
++ (MSVDX_DEVICE_NODE_FLAGS_MMU_HW_INVALIDATION | \
++ MSVDX_DEVICE_NODE_FLAG_BRN23154_BLOCK_ON_FE)
++
++#define POULSBO_D0 0x5
++#define POULSBO_D1 0x6
++#define PSB_REVID_OFFSET 0x8
++
++#define MTX_CODE_BASE (0x80900000)
++#define MTX_DATA_BASE (0x82880000)
++#define PC_START_ADDRESS (0x80900000)
++
++#define MTX_CORE_CODE_MEM (0x10)
++#define MTX_CORE_DATA_MEM (0x18)
++
++#define MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_MASK (0x00000100)
++#define MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_SHIFT (8)
++#define MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_FE_SOFT_RESET_MASK \
++ (0x00010000)
++#define MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_BE_SOFT_RESET_MASK \
++ (0x00100000)
++#define MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_VEC_MEMIF_SOFT_RESET_MASK \
++ (0x01000000)
++#define MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_VEC_RENDEC_DEC_SOFT_RESET_MASK \
++ (0x10000000)
++
++#define clk_enable_all \
++(MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_CORE_MAN_CLK_ENABLE_MASK | \
++MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_PROCESS_MAN_CLK_ENABLE_MASK | \
++MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_ACCESS_MAN_CLK_ENABLE_MASK | \
++MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDMC_MAN_CLK_ENABLE_MASK | \
++MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ENTDEC_MAN_CLK_ENABLE_MASK | \
++MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ITRANS_MAN_CLK_ENABLE_MASK | \
++MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_MTX_MAN_CLK_ENABLE_MASK)
++
++#define clk_enable_minimal \
++MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_CORE_MAN_CLK_ENABLE_MASK | \
++MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_MTX_MAN_CLK_ENABLE_MASK
++
++#define clk_enable_auto \
++MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_PROCESS_AUTO_CLK_ENABLE_MASK | \
++MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_ACCESS_AUTO_CLK_ENABLE_MASK | \
++MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDMC_AUTO_CLK_ENABLE_MASK | \
++MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ENTDEC_AUTO_CLK_ENABLE_MASK | \
++MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ITRANS_AUTO_CLK_ENABLE_MASK | \
++MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_CORE_MAN_CLK_ENABLE_MASK | \
++MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_MTX_MAN_CLK_ENABLE_MASK
++
++#define msvdx_sw_reset_all \
++(MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_MASK | \
++MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_FE_SOFT_RESET_MASK | \
++MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_BE_SOFT_RESET_MASK | \
++MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_VEC_MEMIF_SOFT_RESET_MASK | \
++MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_VEC_RENDEC_DEC_SOFT_RESET_MASK)
++
++#define MTX_INTERNAL_REG(R_SPECIFIER , U_SPECIFIER) \
++ (((R_SPECIFIER)<<4) | (U_SPECIFIER))
++#define MTX_PC MTX_INTERNAL_REG(0, 5)
++
++#define RENDEC_A_SIZE (4 * 1024 * 1024)
++#define RENDEC_B_SIZE (1024 * 1024)
++
++#define MEMIO_READ_FIELD(vpMem, field) \
++ ((uint32_t)(((*((field##_TYPE*)(((uint32_t)vpMem) + field##_OFFSET))) \
++ & field##_MASK) >> field##_SHIFT))
++
++#define MEMIO_WRITE_FIELD(vpMem, field, value) \
++ (*((field##_TYPE*)(((uint32_t)vpMem) + field##_OFFSET))) = \
++ ((*((field##_TYPE*)(((uint32_t)vpMem) + field##_OFFSET))) \
++ & (field##_TYPE)~field##_MASK) | \
++ (field##_TYPE)(((uint32_t)(value) << field##_SHIFT) & field##_MASK);
++
++#define MEMIO_WRITE_FIELD_LITE(vpMem, field, value) \
++ (*((field##_TYPE*)(((uint32_t)vpMem) + field##_OFFSET))) = \
++ ((*((field##_TYPE*)(((uint32_t)vpMem) + field##_OFFSET))) | \
++ (field##_TYPE)(((uint32_t)(value) << field##_SHIFT)));
++
++#define REGIO_READ_FIELD(reg_val, reg, field) \
++ ((reg_val & reg##_##field##_MASK) >> reg##_##field##_SHIFT)
++
++#define REGIO_WRITE_FIELD(reg_val, reg, field, value) \
++ (reg_val) = \
++ ((reg_val) & ~(reg##_##field##_MASK)) | \
++ (((value) << (reg##_##field##_SHIFT)) & (reg##_##field##_MASK));
++
++#define REGIO_WRITE_FIELD_LITE(reg_val, reg, field, value) \
++ (reg_val) = \
++ ((reg_val) | ((value) << (reg##_##field##_SHIFT)));
++
++#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_CORE_MAN_CLK_ENABLE_MASK \
++ (0x00000001)
++#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_PROCESS_MAN_CLK_ENABLE_MASK \
++ (0x00000002)
++#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_ACCESS_MAN_CLK_ENABLE_MASK \
++ (0x00000004)
++#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDMC_MAN_CLK_ENABLE_MASK \
++ (0x00000008)
++#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ENTDEC_MAN_CLK_ENABLE_MASK \
++ (0x00000010)
++#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ITRANS_MAN_CLK_ENABLE_MASK \
++ (0x00000020)
++#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_MTX_MAN_CLK_ENABLE_MASK \
++ (0x00000040)
++
++#define clk_enable_all \
++ (MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_CORE_MAN_CLK_ENABLE_MASK | \
++MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_PROCESS_MAN_CLK_ENABLE_MASK | \
++MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_ACCESS_MAN_CLK_ENABLE_MASK | \
++MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDMC_MAN_CLK_ENABLE_MASK | \
++MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ENTDEC_MAN_CLK_ENABLE_MASK | \
++MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ITRANS_MAN_CLK_ENABLE_MASK | \
++MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_MTX_MAN_CLK_ENABLE_MASK)
++
++#define clk_enable_minimal \
++ MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_CORE_MAN_CLK_ENABLE_MASK | \
++ MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_MTX_MAN_CLK_ENABLE_MASK
++
++/* MTX registers */
++#define MSVDX_MTX_ENABLE (0x0000)
++#define MSVDX_MTX_KICKI (0x0088)
++#define MSVDX_MTX_KICK (0x0080)
++#define MSVDX_MTX_REGISTER_READ_WRITE_REQUEST (0x00FC)
++#define MSVDX_MTX_REGISTER_READ_WRITE_DATA (0x00F8)
++#define MSVDX_MTX_RAM_ACCESS_DATA_TRANSFER (0x0104)
++#define MSVDX_MTX_RAM_ACCESS_CONTROL (0x0108)
++#define MSVDX_MTX_RAM_ACCESS_STATUS (0x010C)
++#define MSVDX_MTX_SOFT_RESET (0x0200)
++
++/* MSVDX registers */
++#define MSVDX_CONTROL (0x0600)
++#define MSVDX_INTERRUPT_CLEAR (0x060C)
++#define MSVDX_INTERRUPT_STATUS (0x0608)
++#define MSVDX_HOST_INTERRUPT_ENABLE (0x0610)
++#define MSVDX_MMU_CONTROL0 (0x0680)
++#define MSVDX_MTX_RAM_BANK (0x06F0)
++#define MSVDX_MAN_CLK_ENABLE (0x0620)
++
++/* RENDEC registers */
++#define MSVDX_RENDEC_CONTROL0 (0x0868)
++#define MSVDX_RENDEC_CONTROL1 (0x086C)
++#define MSVDX_RENDEC_BUFFER_SIZE (0x0870)
++#define MSVDX_RENDEC_BASE_ADDR0 (0x0874)
++#define MSVDX_RENDEC_BASE_ADDR1 (0x0878)
++#define MSVDX_RENDEC_READ_DATA (0x0898)
++#define MSVDX_RENDEC_CONTEXT0 (0x0950)
++#define MSVDX_RENDEC_CONTEXT1 (0x0954)
++#define MSVDX_RENDEC_CONTEXT2 (0x0958)
++#define MSVDX_RENDEC_CONTEXT3 (0x095C)
++#define MSVDX_RENDEC_CONTEXT4 (0x0960)
++#define MSVDX_RENDEC_CONTEXT5 (0x0964)
++
++/* CMD */
++#define MSVDX_CMDS_END_SLICE_PICTURE (0x1404)
++
++/*
++ * This defines the MSVDX communication buffer
++ */
++#define MSVDX_COMMS_SIGNATURE_VALUE (0xA5A5A5A5) /*!< Signature value */
++/*!< Host buffer size (in 32-bit words) */
++#define NUM_WORDS_HOST_BUF (100)
++/*!< MTX buffer size (in 32-bit words) */
++#define NUM_WORDS_MTX_BUF (100)
++
++/* There is no work currently underway on the hardware */
++#define MSVDX_FW_STATUS_HW_IDLE 0x00000001
++
++#define MSVDX_COMMS_AREA_ADDR (0x02fe0)
++
++#define MSVDX_COMMS_OFFSET_FLAGS (MSVDX_COMMS_AREA_ADDR + 0x18)
++#define MSVDX_COMMS_MSG_COUNTER (MSVDX_COMMS_AREA_ADDR - 0x04)
++#define MSVDX_COMMS_FW_STATUS (MSVDX_COMMS_AREA_ADDR - 0x10)
++#define MSVDX_COMMS_SIGNATURE (MSVDX_COMMS_AREA_ADDR + 0x00)
++#define MSVDX_COMMS_TO_HOST_BUF_SIZE (MSVDX_COMMS_AREA_ADDR + 0x04)
++#define MSVDX_COMMS_TO_HOST_RD_INDEX (MSVDX_COMMS_AREA_ADDR + 0x08)
++#define MSVDX_COMMS_TO_HOST_WRT_INDEX (MSVDX_COMMS_AREA_ADDR + 0x0C)
++#define MSVDX_COMMS_TO_MTX_BUF_SIZE (MSVDX_COMMS_AREA_ADDR + 0x10)
++#define MSVDX_COMMS_TO_MTX_RD_INDEX (MSVDX_COMMS_AREA_ADDR + 0x14)
++#define MSVDX_COMMS_TO_MTX_CB_RD_INDEX (MSVDX_COMMS_AREA_ADDR + 0x18)
++#define MSVDX_COMMS_TO_MTX_WRT_INDEX (MSVDX_COMMS_AREA_ADDR + 0x1C)
++#define MSVDX_COMMS_TO_HOST_BUF (MSVDX_COMMS_AREA_ADDR + 0x20)
++#define MSVDX_COMMS_TO_MTX_BUF \
++ (MSVDX_COMMS_TO_HOST_BUF + (NUM_WORDS_HOST_BUF << 2))
++
++/*
++#define MSVDX_COMMS_AREA_END \
++ (MSVDX_COMMS_TO_MTX_BUF + (NUM_WORDS_HOST_BUF << 2))
++*/
++#define MSVDX_COMMS_AREA_END 0x03000
++
++#if (MSVDX_COMMS_AREA_END != 0x03000)
++#error
++#endif
++
++#define MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK (0x80000000)
++#define MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_SHIFT (31)
++
++#define MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_RNW_MASK (0x00010000)
++#define MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_RNW_SHIFT (16)
++
++#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMID_MASK (0x0FF00000)
++#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMID_SHIFT (20)
++
++#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCM_ADDR_MASK (0x000FFFFC)
++#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCM_ADDR_SHIFT (2)
++
++#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMAI_MASK (0x00000002)
++#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMAI_SHIFT (1)
++
++#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMR_MASK (0x00000001)
++#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMR_SHIFT (0)
++
++#define MSVDX_MTX_SOFT_RESET_MTX_RESET_MASK (0x00000001)
++#define MSVDX_MTX_SOFT_RESET_MTX_RESET_SHIFT (0)
++
++#define MSVDX_MTX_ENABLE_MTX_ENABLE_MASK (0x00000001)
++#define MSVDX_MTX_ENABLE_MTX_ENABLE_SHIFT (0)
++
++#define MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_MASK (0x00000100)
++#define MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_SHIFT (8)
++
++#define MSVDX_INTERRUPT_STATUS_CR_MMU_FAULT_IRQ_MASK (0x00000F00)
++#define MSVDX_INTERRUPT_STATUS_CR_MMU_FAULT_IRQ_SHIFT (8)
++
++#define MSVDX_INTERRUPT_STATUS_CR_MTX_IRQ_MASK (0x00004000)
++#define MSVDX_INTERRUPT_STATUS_CR_MTX_IRQ_SHIFT (14)
++
++#define MSVDX_MMU_CONTROL0_CR_MMU_PAUSE_MASK (0x00000002)
++#define MSVDX_MMU_CONTROL0_CR_MMU_PAUSE_SHIFT (1)
++
++#define MSVDX_MTX_RAM_BANK_CR_MTX_RAM_BANK_SIZE_MASK (0x000F0000)
++#define MSVDX_MTX_RAM_BANK_CR_MTX_RAM_BANK_SIZE_SHIFT (16)
++
++#define MSVDX_RENDEC_BUFFER_SIZE_RENDEC_BUFFER_SIZE0_MASK (0x0000FFFF)
++#define MSVDX_RENDEC_BUFFER_SIZE_RENDEC_BUFFER_SIZE0_SHIFT (0)
++
++#define MSVDX_RENDEC_BUFFER_SIZE_RENDEC_BUFFER_SIZE1_MASK (0xFFFF0000)
++#define MSVDX_RENDEC_BUFFER_SIZE_RENDEC_BUFFER_SIZE1_SHIFT (16)
++
++#define MSVDX_RENDEC_CONTROL1_RENDEC_DECODE_START_SIZE_MASK (0x000000FF)
++#define MSVDX_RENDEC_CONTROL1_RENDEC_DECODE_START_SIZE_SHIFT (0)
++
++#define MSVDX_RENDEC_CONTROL1_RENDEC_BURST_SIZE_W_MASK (0x000C0000)
++#define MSVDX_RENDEC_CONTROL1_RENDEC_BURST_SIZE_W_SHIFT (18)
++
++#define MSVDX_RENDEC_CONTROL1_RENDEC_BURST_SIZE_R_MASK (0x00030000)
++#define MSVDX_RENDEC_CONTROL1_RENDEC_BURST_SIZE_R_SHIFT (16)
++
++#define MSVDX_RENDEC_CONTROL1_RENDEC_EXTERNAL_MEMORY_MASK (0x01000000)
++#define MSVDX_RENDEC_CONTROL1_RENDEC_EXTERNAL_MEMORY_SHIFT (24)
++
++#define MSVDX_RENDEC_CONTROL0_RENDEC_INITIALISE_MASK (0x00000001)
++#define MSVDX_RENDEC_CONTROL0_RENDEC_INITIALISE_SHIFT (0)
++
++#define VEC_SHIFTREG_CONTROL_SR_MASTER_SELECT_MASK (0x00000300)
++#define VEC_SHIFTREG_CONTROL_SR_MASTER_SELECT_SHIFT (8)
++
++/* Start of parser specific Host->MTX messages. */
++#define FWRK_MSGID_START_PSR_HOSTMTX_MSG (0x80)
++
++/* Start of parser specific MTX->Host messages. */
++#define FWRK_MSGID_START_PSR_MTXHOST_MSG (0xC0)
++
++#define FWRK_MSGID_PADDING (0)
++
++#define FWRK_GENMSG_SIZE_TYPE uint8_t
++#define FWRK_GENMSG_SIZE_MASK (0xFF)
++#define FWRK_GENMSG_SIZE_SHIFT (0)
++#define FWRK_GENMSG_SIZE_OFFSET (0x0000)
++#define FWRK_GENMSG_ID_TYPE uint8_t
++#define FWRK_GENMSG_ID_MASK (0xFF)
++#define FWRK_GENMSG_ID_SHIFT (0)
++#define FWRK_GENMSG_ID_OFFSET (0x0001)
++#define FWRK_PADMSG_SIZE (2)
++
++/* Deblock CMD_ID */
++#define MSVDX_DEBLOCK_REG_SET 0x10000000
++#define MSVDX_DEBLOCK_REG_GET 0x20000000
++#define MSVDX_DEBLOCK_REG_POLLn 0x30000000
++#define MSVDX_DEBLOCK_REG_POLLx 0x40000000
++
++/* vec local MEM save/restore */
++#define VEC_LOCAL_MEM_BYTE_SIZE (4 * 1024)
++#define VEC_LOCAL_MEM_OFFSET 0x2000
++
++/* This type defines the framework specified message ids */
++enum {
++ /* ! Sent by the DXVA driver on the host to the mtx firmware.
++ */
++ VA_MSGID_INIT = FWRK_MSGID_START_PSR_HOSTMTX_MSG,
++ VA_MSGID_RENDER,
++ VA_MSGID_DEBLOCK,
++ VA_MSGID_BUBBLE,
++
++ /* Test Messages */
++ VA_MSGID_TEST1,
++ VA_MSGID_TEST2,
++
++ /*! Sent by the mtx firmware to itself.
++ */
++ VA_MSGID_RENDER_MC_INTERRUPT,
++
++ /*! Sent by the DXVA firmware on the MTX to the host.
++ */
++ VA_MSGID_CMD_COMPLETED = FWRK_MSGID_START_PSR_MTXHOST_MSG,
++ VA_MSGID_CMD_COMPLETED_BATCH,
++ VA_MSGID_DEBLOCK_REQUIRED,
++ VA_MSGID_TEST_RESPONCE,
++ VA_MSGID_ACK,
++
++ VA_MSGID_CMD_FAILED,
++ VA_MSGID_CMD_UNSUPPORTED,
++ VA_MSGID_CMD_HW_PANIC,
++};
++
++/* Deblock parameters */
++struct DEBLOCKPARAMS {
++ uint32_t handle; /* struct ttm_buffer_object * of REGIO */
++ uint32_t buffer_size;
++ uint32_t ctxid;
++
++ uint32_t *pPicparams;
++ struct ttm_bo_kmap_obj *regio_kmap; /* virtual of regio */
++ uint32_t pad[3];
++};
++
++struct psb_msvdx_deblock_queue {
++
++ struct list_head head;
++ struct DEBLOCKPARAMS dbParams;
++};
++
++/* MSVDX private structure */
++struct msvdx_private {
++ int msvdx_needs_reset;
++
++ unsigned int pmstate;
++
++ struct sysfs_dirent *sysfs_pmstate;
++
++ uint32_t msvdx_current_sequence;
++ uint32_t msvdx_last_sequence;
++
++ /*
++ *MSVDX Rendec Memory
++ */
++ struct ttm_buffer_object *ccb0;
++ uint32_t base_addr0;
++ struct ttm_buffer_object *ccb1;
++ uint32_t base_addr1;
++
++ /*
++ *msvdx command queue
++ */
++ spinlock_t msvdx_lock;
++ struct mutex msvdx_mutex;
++ struct list_head msvdx_queue;
++ int msvdx_busy;
++ int msvdx_fw_loaded;
++ void *msvdx_fw;
++ int msvdx_fw_size;
++
++ struct list_head deblock_queue; /* deblock parameter list */
++
++ uint32_t msvdx_hw_busy;
++
++ uint32_t *vec_local_mem_data;
++ uint32_t vec_local_mem_size;
++ uint32_t vec_local_mem_saved;
++};
++
++/* MSVDX Firmware interface */
++#define FW_VA_INIT_SIZE (8)
++#define FW_VA_DEBUG_TEST2_SIZE (4)
++
++/* FW_VA_DEBUG_TEST2 MSG_SIZE */
++#define FW_VA_DEBUG_TEST2_MSG_SIZE_TYPE uint8_t
++#define FW_VA_DEBUG_TEST2_MSG_SIZE_MASK (0xFF)
++#define FW_VA_DEBUG_TEST2_MSG_SIZE_OFFSET (0x0000)
++#define FW_VA_DEBUG_TEST2_MSG_SIZE_SHIFT (0)
++
++/* FW_VA_DEBUG_TEST2 ID */
++#define FW_VA_DEBUG_TEST2_ID_TYPE uint8_t
++#define FW_VA_DEBUG_TEST2_ID_MASK (0xFF)
++#define FW_VA_DEBUG_TEST2_ID_OFFSET (0x0001)
++#define FW_VA_DEBUG_TEST2_ID_SHIFT (0)
++
++/* FW_VA_CMD_FAILED FENCE_VALUE */
++#define FW_VA_CMD_FAILED_FENCE_VALUE_TYPE uint32_t
++#define FW_VA_CMD_FAILED_FENCE_VALUE_MASK (0xFFFFFFFF)
++#define FW_VA_CMD_FAILED_FENCE_VALUE_OFFSET (0x0004)
++#define FW_VA_CMD_FAILED_FENCE_VALUE_SHIFT (0)
++
++/* FW_VA_CMD_FAILED IRQSTATUS */
++#define FW_VA_CMD_FAILED_IRQSTATUS_TYPE uint32_t
++#define FW_VA_CMD_FAILED_IRQSTATUS_MASK (0xFFFFFFFF)
++#define FW_VA_CMD_FAILED_IRQSTATUS_OFFSET (0x0008)
++#define FW_VA_CMD_FAILED_IRQSTATUS_SHIFT (0)
++
++/* FW_VA_CMD_COMPLETED FENCE_VALUE */
++#define FW_VA_CMD_COMPLETED_FENCE_VALUE_TYPE uint32_t
++#define FW_VA_CMD_COMPLETED_FENCE_VALUE_MASK (0xFFFFFFFF)
++#define FW_VA_CMD_COMPLETED_FENCE_VALUE_OFFSET (0x0004)
++#define FW_VA_CMD_COMPLETED_FENCE_VALUE_SHIFT (0)
++
++/* FW_VA_CMD_COMPLETED FLAGS */
++#define FW_VA_CMD_COMPLETED_FLAGS_ALIGNMENT (4)
++#define FW_VA_CMD_COMPLETED_FLAGS_TYPE uint32_t
++#define FW_VA_CMD_COMPLETED_FLAGS_MASK (0xFFFFFFFF)
++#define FW_VA_CMD_COMPLETED_FLAGS_LSBMASK (0xFFFFFFFF)
++#define FW_VA_CMD_COMPLETED_FLAGS_OFFSET (0x0008)
++#define FW_VA_CMD_COMPLETED_FLAGS_SHIFT (0)
++
++/* FW_VA_CMD_COMPLETED NO_TICKS */
++#define FW_VA_CMD_COMPLETED_NO_TICKS_TYPE uint16_t
++#define FW_VA_CMD_COMPLETED_NO_TICKS_MASK (0xFFFF)
++#define FW_VA_CMD_COMPLETED_NO_TICKS_OFFSET (0x0002)
++#define FW_VA_CMD_COMPLETED_NO_TICKS_SHIFT (0)
++
++/* FW_VA_DEBLOCK_REQUIRED CONTEXT */
++#define FW_VA_DEBLOCK_REQUIRED_CONTEXT_TYPE uint32_t
++#define FW_VA_DEBLOCK_REQUIRED_CONTEXT_MASK (0xFFFFFFFF)
++#define FW_VA_DEBLOCK_REQUIRED_CONTEXT_OFFSET (0x0004)
++#define FW_VA_DEBLOCK_REQUIRED_CONTEXT_SHIFT (0)
++
++/* FW_VA_INIT GLOBAL_PTD */
++#define FW_VA_INIT_GLOBAL_PTD_TYPE uint32_t
++#define FW_VA_INIT_GLOBAL_PTD_MASK (0xFFFFFFFF)
++#define FW_VA_INIT_GLOBAL_PTD_OFFSET (0x0004)
++#define FW_VA_INIT_GLOBAL_PTD_SHIFT (0)
++
++/* FW_VA_RENDER FENCE_VALUE */
++#define FW_VA_RENDER_FENCE_VALUE_TYPE uint32_t
++#define FW_VA_RENDER_FENCE_VALUE_MASK (0xFFFFFFFF)
++#define FW_VA_RENDER_FENCE_VALUE_OFFSET (0x0010)
++#define FW_VA_RENDER_FENCE_VALUE_SHIFT (0)
++
++/* FW_VA_RENDER MMUPTD */
++#define FW_VA_RENDER_MMUPTD_TYPE uint32_t
++#define FW_VA_RENDER_MMUPTD_MASK (0xFFFFFFFF)
++#define FW_VA_RENDER_MMUPTD_OFFSET (0x0004)
++#define FW_VA_RENDER_MMUPTD_SHIFT (0)
++
++/* FW_VA_RENDER BUFFER_ADDRESS */
++#define FW_VA_RENDER_BUFFER_ADDRESS_TYPE uint32_t
++#define FW_VA_RENDER_BUFFER_ADDRESS_MASK (0xFFFFFFFF)
++#define FW_VA_RENDER_BUFFER_ADDRESS_OFFSET (0x0008)
++#define FW_VA_RENDER_BUFFER_ADDRESS_SHIFT (0)
++
++/* FW_VA_RENDER BUFFER_SIZE */
++#define FW_VA_RENDER_BUFFER_SIZE_TYPE uint16_t
++#define FW_VA_RENDER_BUFFER_SIZE_MASK (0x0FFF)
++#define FW_VA_RENDER_BUFFER_SIZE_OFFSET (0x0002)
++#define FW_VA_RENDER_BUFFER_SIZE_SHIFT (0)
++
++ /* FW_DXVA_DEBLOCK MSG_SIZE */
++#define FW_DXVA_DEBLOCK_MSG_SIZE_ALIGNMENT (1)
++#define FW_DXVA_DEBLOCK_MSG_SIZE_TYPE uint8_t
++#define FW_DXVA_DEBLOCK_MSG_SIZE_MASK (0xFF)
++#define FW_DXVA_DEBLOCK_MSG_SIZE_LSBMASK (0xFF)
++#define FW_DXVA_DEBLOCK_MSG_SIZE_OFFSET (0x0000)
++#define FW_DXVA_DEBLOCK_MSG_SIZE_SHIFT (0)
++
++/* FW_DXVA_DEBLOCK ID */
++#define FW_DXVA_DEBLOCK_ID_ALIGNMENT (1)
++#define FW_DXVA_DEBLOCK_ID_TYPE uint8_t
++#define FW_DXVA_DEBLOCK_ID_MASK (0xFF)
++#define FW_DXVA_DEBLOCK_ID_LSBMASK (0xFF)
++#define FW_DXVA_DEBLOCK_ID_OFFSET (0x0001)
++#define FW_DXVA_DEBLOCK_ID_SHIFT (0)
++
++/* FW_DXVA_DEBLOCK FENCE_VALUE */
++#define FW_DXVA_DEBLOCK_FENCE_VALUE_ALIGNMENT (4)
++#define FW_DXVA_DEBLOCK_FENCE_VALUE_TYPE uint32_t
++#define FW_DXVA_DEBLOCK_FENCE_VALUE_MASK (0xFFFFFFFF)
++#define FW_DXVA_DEBLOCK_FENCE_VALUE_LSBMASK (0xFFFFFFFF)
++#define FW_DXVA_DEBLOCK_FENCE_VALUE_OFFSET (0x0008)
++#define FW_DXVA_DEBLOCK_FENCE_VALUE_SHIFT (0)
++
++/* FW_DXVA_DEBLOCK MMUPTD */
++#define FW_DXVA_DEBLOCK_MMUPTD_ALIGNMENT (4)
++#define FW_DXVA_DEBLOCK_MMUPTD_TYPE uint32_t
++#define FW_DXVA_DEBLOCK_MMUPTD_MASK (0xFFFFFFFF)
++#define FW_DXVA_DEBLOCK_MMUPTD_LSBMASK (0xFFFFFFFF)
++#define FW_DXVA_DEBLOCK_MMUPTD_OFFSET (0x000C)
++#define FW_DXVA_DEBLOCK_MMUPTD_SHIFT (0)
++
++
++static inline void psb_msvdx_clearirq(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ unsigned long mtx_int = 0;
++
++ PSB_DEBUG_IRQ("MSVDX: clear IRQ\n");
++
++ /* Clear MTX interrupt */
++ REGIO_WRITE_FIELD_LITE(mtx_int, MSVDX_INTERRUPT_STATUS, CR_MTX_IRQ,
++ 1);
++ PSB_WMSVDX32(mtx_int, MSVDX_INTERRUPT_CLEAR);
++}
++
++
++static inline void psb_msvdx_disableirq(struct drm_device *dev)
++{
++ /* nothing */
++}
++
++
++static inline void psb_msvdx_enableirq(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ unsigned long enables = 0;
++
++ PSB_DEBUG_IRQ("MSVDX: enable MSVDX MTX IRQ\n");
++ REGIO_WRITE_FIELD_LITE(enables, MSVDX_INTERRUPT_STATUS, CR_MTX_IRQ,
++ 1);
++ PSB_WMSVDX32(enables, MSVDX_HOST_INTERRUPT_ENABLE);
++}
++
++#define MSVDX_NEW_PMSTATE(drm_dev, msvdx_priv, new_state) \
++do { \
++ msvdx_priv->pmstate = new_state; \
++ sysfs_notify_dirent(msvdx_priv->sysfs_pmstate); \
++ PSB_DEBUG_PM("MSVDX: %s\n", \
++ (new_state == PSB_PMSTATE_POWERUP) ? "powerup": "powerdown"); \
++} while (0)
++
++#endif
+diff --git a/drivers/gpu/drm/mrst/drv/psb_msvdxinit.c b/drivers/gpu/drm/mrst/drv/psb_msvdxinit.c
+new file mode 100644
+index 0000000..a543778
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/psb_msvdxinit.c
+@@ -0,0 +1,770 @@
++/**************************************************************************
++ * psb_msvdxinit.c
++ * MSVDX initialization and mtx-firmware upload
++ *
++ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
++ * Copyright (c) Imagination Technologies Limited, UK
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++
++#include <drm/drmP.h>
++#include <drm/drm.h>
++#include "psb_drv.h"
++#include "psb_msvdx.h"
++#include <linux/firmware.h>
++
++#define MSVDX_REG (dev_priv->msvdx_reg)
++uint8_t psb_rev_id;
++/*MSVDX FW header*/
++struct msvdx_fw {
++ uint32_t ver;
++ uint32_t text_size;
++ uint32_t data_size;
++ uint32_t data_location;
++};
++
++int psb_wait_for_register(struct drm_psb_private *dev_priv,
++ uint32_t offset, uint32_t value, uint32_t enable)
++{
++ uint32_t tmp;
++ uint32_t poll_cnt = 10000;
++ while (poll_cnt) {
++ tmp = PSB_RMSVDX32(offset);
++ if (value == (tmp & enable)) /* All the bits are reset */
++ return 0; /* So exit */
++
++ /* Wait a bit */
++ DRM_UDELAY(1000);
++ poll_cnt--;
++ }
++ DRM_ERROR("MSVDX: Timeout while waiting for register %08x:"
++ " expecting %08x (mask %08x), got %08x\n",
++ offset, value, enable, tmp);
++
++ return 1;
++}
++
++int psb_poll_mtx_irq(struct drm_psb_private *dev_priv)
++{
++ int ret = 0;
++ uint32_t mtx_int = 0;
++
++ REGIO_WRITE_FIELD_LITE(mtx_int, MSVDX_INTERRUPT_STATUS, CR_MTX_IRQ,
++ 1);
++
++ ret = psb_wait_for_register(dev_priv, MSVDX_INTERRUPT_STATUS,
++ /* Required value */
++ mtx_int,
++ /* Enabled bits */
++ mtx_int);
++
++ if (ret) {
++ DRM_ERROR("MSVDX: Error Mtx did not return"
++ " int within a resonable time\n");
++ return ret;
++ }
++
++ PSB_DEBUG_IRQ("MSVDX: Got MTX Int\n");
++
++ /* Got it so clear the bit */
++ PSB_WMSVDX32(mtx_int, MSVDX_INTERRUPT_CLEAR);
++
++ return ret;
++}
++
++void psb_write_mtx_core_reg(struct drm_psb_private *dev_priv,
++ const uint32_t core_reg, const uint32_t val)
++{
++ uint32_t reg = 0;
++
++ /* Put data in MTX_RW_DATA */
++ PSB_WMSVDX32(val, MSVDX_MTX_REGISTER_READ_WRITE_DATA);
++
++ /* DREADY is set to 0 and request a write */
++ reg = core_reg;
++ REGIO_WRITE_FIELD_LITE(reg, MSVDX_MTX_REGISTER_READ_WRITE_REQUEST,
++ MTX_RNW, 0);
++ REGIO_WRITE_FIELD_LITE(reg, MSVDX_MTX_REGISTER_READ_WRITE_REQUEST,
++ MTX_DREADY, 0);
++ PSB_WMSVDX32(reg, MSVDX_MTX_REGISTER_READ_WRITE_REQUEST);
++
++ psb_wait_for_register(dev_priv,
++ MSVDX_MTX_REGISTER_READ_WRITE_REQUEST,
++ MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK,
++ MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK);
++}
++
++void psb_upload_fw(struct drm_psb_private *dev_priv,
++ const uint32_t data_mem, uint32_t ram_bank_size,
++ uint32_t address, const unsigned int words,
++ const uint32_t * const data)
++{
++ uint32_t loop, ctrl, ram_id, addr, cur_bank = (uint32_t) ~0;
++ uint32_t access_ctrl;
++
++ /* Save the access control register... */
++ access_ctrl = PSB_RMSVDX32(MSVDX_MTX_RAM_ACCESS_CONTROL);
++
++ /* Wait for MCMSTAT to become be idle 1 */
++ psb_wait_for_register(dev_priv, MSVDX_MTX_RAM_ACCESS_STATUS,
++ 1, /* Required Value */
++ 0xffffffff /* Enables */);
++
++ for (loop = 0; loop < words; loop++) {
++ ram_id = data_mem + (address / ram_bank_size);
++ if (ram_id != cur_bank) {
++ addr = address >> 2;
++ ctrl = 0;
++ REGIO_WRITE_FIELD_LITE(ctrl,
++ MSVDX_MTX_RAM_ACCESS_CONTROL,
++ MTX_MCMID, ram_id);
++ REGIO_WRITE_FIELD_LITE(ctrl,
++ MSVDX_MTX_RAM_ACCESS_CONTROL,
++ MTX_MCM_ADDR, addr);
++ REGIO_WRITE_FIELD_LITE(ctrl,
++ MSVDX_MTX_RAM_ACCESS_CONTROL,
++ MTX_MCMAI, 1);
++ PSB_WMSVDX32(ctrl, MSVDX_MTX_RAM_ACCESS_CONTROL);
++ cur_bank = ram_id;
++ }
++ address += 4;
++
++ PSB_WMSVDX32(data[loop],
++ MSVDX_MTX_RAM_ACCESS_DATA_TRANSFER);
++
++ /* Wait for MCMSTAT to become be idle 1 */
++ psb_wait_for_register(dev_priv, MSVDX_MTX_RAM_ACCESS_STATUS,
++ 1, /* Required Value */
++ 0xffffffff /* Enables */);
++ }
++ PSB_DEBUG_GENERAL("MSVDX: Upload done\n");
++
++ /* Restore the access control register... */
++ PSB_WMSVDX32(access_ctrl, MSVDX_MTX_RAM_ACCESS_CONTROL);
++}
++
++static int psb_verify_fw(struct drm_psb_private *dev_priv,
++ const uint32_t ram_bank_size,
++ const uint32_t data_mem, uint32_t address,
++ const uint32_t words, const uint32_t * const data)
++{
++ uint32_t loop, ctrl, ram_id, addr, cur_bank = (uint32_t) ~0;
++ uint32_t access_ctrl;
++ int ret = 0;
++
++ /* Save the access control register... */
++ access_ctrl = PSB_RMSVDX32(MSVDX_MTX_RAM_ACCESS_CONTROL);
++
++ /* Wait for MCMSTAT to become be idle 1 */
++ psb_wait_for_register(dev_priv, MSVDX_MTX_RAM_ACCESS_STATUS,
++ 1, /* Required Value */
++ 0xffffffff /* Enables */);
++
++ for (loop = 0; loop < words; loop++) {
++ uint32_t tmp;
++ ram_id = data_mem + (address / ram_bank_size);
++
++ if (ram_id != cur_bank) {
++ addr = address >> 2;
++ ctrl = 0;
++ REGIO_WRITE_FIELD_LITE(ctrl,
++ MSVDX_MTX_RAM_ACCESS_CONTROL,
++ MTX_MCMID, ram_id);
++ REGIO_WRITE_FIELD_LITE(ctrl,
++ MSVDX_MTX_RAM_ACCESS_CONTROL,
++ MTX_MCM_ADDR, addr);
++ REGIO_WRITE_FIELD_LITE(ctrl,
++ MSVDX_MTX_RAM_ACCESS_CONTROL,
++ MTX_MCMAI, 1);
++ REGIO_WRITE_FIELD_LITE(ctrl,
++ MSVDX_MTX_RAM_ACCESS_CONTROL,
++ MTX_MCMR, 1);
++
++ PSB_WMSVDX32(ctrl, MSVDX_MTX_RAM_ACCESS_CONTROL);
++
++ cur_bank = ram_id;
++ }
++ address += 4;
++
++ /* Wait for MCMSTAT to become be idle 1 */
++ psb_wait_for_register(dev_priv, MSVDX_MTX_RAM_ACCESS_STATUS,
++ 1, /* Required Value */
++ 0xffffffff /* Enables */);
++
++ tmp = PSB_RMSVDX32(MSVDX_MTX_RAM_ACCESS_DATA_TRANSFER);
++ if (data[loop] != tmp) {
++ DRM_ERROR("psb: Firmware validation fails"
++ " at index=%08x\n", loop);
++ ret = 1;
++ break;
++ }
++ }
++
++ /* Restore the access control register... */
++ PSB_WMSVDX32(access_ctrl, MSVDX_MTX_RAM_ACCESS_CONTROL);
++
++ return ret;
++}
++
++static uint32_t *msvdx_get_fw(struct drm_device *dev,
++ const struct firmware **raw, uint8_t *name)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ int rc, fw_size;
++ int *ptr = NULL;
++ struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
++
++ rc = request_firmware(raw, name, &dev->pdev->dev);
++ if (rc < 0) {
++ DRM_ERROR("MSVDX: %s request_firmware failed: Reason %d\n",
++ name, rc);
++ return NULL;
++ }
++
++ if ((*raw)->size < sizeof(struct msvdx_fw)) {
++ DRM_ERROR("MSVDX: %s is is not correct size(%zd)\n",
++ name, (*raw)->size);
++ return NULL;
++ }
++
++ ptr = (int *) ((*raw))->data;
++
++ if (!ptr) {
++ DRM_ERROR("MSVDX: Failed to load %s\n", name);
++ return NULL;
++ }
++
++ /* another sanity check... */
++ fw_size = sizeof(struct msvdx_fw) +
++ sizeof(uint32_t) * ((struct msvdx_fw *) ptr)->text_size +
++ sizeof(uint32_t) * ((struct msvdx_fw *) ptr)->data_size;
++ if ((*raw)->size != fw_size) {
++ DRM_ERROR("MSVDX: %s is is not correct size(%zd)\n",
++ name, (*raw)->size);
++ return NULL;
++ }
++ msvdx_priv->msvdx_fw = kzalloc(fw_size, GFP_KERNEL);
++ if (msvdx_priv->msvdx_fw == NULL)
++ DRM_ERROR("MSVDX: allocate FW buffer failed\n");
++ else {
++ memcpy(msvdx_priv->msvdx_fw, ptr, fw_size);
++ msvdx_priv->msvdx_fw_size = fw_size;
++ }
++
++ PSB_DEBUG_GENERAL("MSVDX: releasing firmware resouces\n");
++ release_firmware(*raw);
++
++ return msvdx_priv->msvdx_fw;
++}
++
++int psb_setup_fw(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ int ret = 0;
++
++ uint32_t ram_bank_size;
++ struct msvdx_fw *fw;
++ uint32_t *fw_ptr = NULL;
++ uint32_t *text_ptr = NULL;
++ uint32_t *data_ptr = NULL;
++ const struct firmware *raw = NULL;
++ struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
++
++ /* todo : Assert the clock is on - if not turn it on to upload code */
++ PSB_DEBUG_GENERAL("MSVDX: psb_setup_fw\n");
++ PSB_WMSVDX32(clk_enable_all, MSVDX_MAN_CLK_ENABLE);
++
++ /* Reset MTX */
++ PSB_WMSVDX32(MSVDX_MTX_SOFT_RESET_MTX_RESET_MASK,
++ MSVDX_MTX_SOFT_RESET);
++
++ /* Initialses Communication controll area to 0 */
++/*
++ if (psb_rev_id >= POULSBO_D1) {
++ PSB_DEBUG_GENERAL("MSVDX: Detected Poulsbo D1"
++ " or later revision.\n");
++ PSB_WMSVDX32(MSVDX_DEVICE_NODE_FLAGS_DEFAULT_D1,
++ MSVDX_COMMS_OFFSET_FLAGS);
++ } else {
++ PSB_DEBUG_GENERAL("MSVDX: Detected Poulsbo D0"
++ " or earlier revision.\n");
++ PSB_WMSVDX32(MSVDX_DEVICE_NODE_FLAGS_DEFAULT_D0,
++ MSVDX_COMMS_OFFSET_FLAGS);
++ }
++*/
++
++ PSB_WMSVDX32(0, MSVDX_COMMS_MSG_COUNTER);
++ PSB_WMSVDX32(0, MSVDX_COMMS_SIGNATURE);
++ PSB_WMSVDX32(0, MSVDX_COMMS_TO_HOST_RD_INDEX);
++ PSB_WMSVDX32(0, MSVDX_COMMS_TO_HOST_WRT_INDEX);
++ PSB_WMSVDX32(0, MSVDX_COMMS_TO_MTX_RD_INDEX);
++ PSB_WMSVDX32(0, MSVDX_COMMS_TO_MTX_WRT_INDEX);
++ PSB_WMSVDX32(0, MSVDX_COMMS_FW_STATUS);
++ PSB_WMSVDX32(0, MSVDX_COMMS_OFFSET_FLAGS);
++ PSB_WMSVDX32(0, MSVDX_COMMS_SIGNATURE);
++ /* read register bank size */
++ {
++ uint32_t bank_size, reg;
++ reg = PSB_RMSVDX32(MSVDX_MTX_RAM_BANK);
++ bank_size =
++ REGIO_READ_FIELD(reg, MSVDX_MTX_RAM_BANK,
++ CR_MTX_RAM_BANK_SIZE);
++ ram_bank_size = (uint32_t) (1 << (bank_size + 2));
++ }
++
++ PSB_DEBUG_GENERAL("MSVDX: RAM bank size = %d bytes\n",
++ ram_bank_size);
++
++ /* if FW already loaded from storage */
++ if (msvdx_priv->msvdx_fw)
++ fw_ptr = msvdx_priv->msvdx_fw;
++ else {
++ PSB_DEBUG_GENERAL("MSVDX:load msvdx_fw.bin by udevd\n");
++ fw_ptr = msvdx_get_fw(dev, &raw, "msvdx_fw.bin");
++ }
++
++ if (!fw_ptr) {
++ DRM_ERROR("MSVDX:load msvdx_fw.bin failed,is udevd running?\n");
++ ret = 1;
++ goto out;
++ }
++
++ fw = (struct msvdx_fw *) fw_ptr;
++ if (fw->ver != 0x02) {
++ DRM_ERROR("psb: msvdx_fw.bin firmware version mismatch,"
++ "got version=%02x expected version=%02x\n",
++ fw->ver, 0x02);
++ ret = 1;
++ goto out;
++ }
++
++ text_ptr =
++ (uint32_t *) ((uint8_t *) fw_ptr + sizeof(struct msvdx_fw));
++ data_ptr = text_ptr + fw->text_size;
++
++ if (fw->text_size == 2858)
++ PSB_DEBUG_GENERAL(
++ "MSVDX: FW ver 1.00.10.0187 of SliceSwitch variant\n");
++ else if (fw->text_size == 3021)
++ PSB_DEBUG_GENERAL(
++ "MSVDX: FW ver 1.00.10.0187 of FrameSwitch variant\n");
++ else if (fw->text_size == 2841)
++ PSB_DEBUG_GENERAL("MSVDX: FW ver 1.00.10.0788\n");
++ else
++ PSB_DEBUG_GENERAL("MSVDX: FW ver unknown\n");
++
++
++ PSB_DEBUG_GENERAL("MSVDX: Retrieved pointers for firmware\n");
++ PSB_DEBUG_GENERAL("MSVDX: text_size: %d\n", fw->text_size);
++ PSB_DEBUG_GENERAL("MSVDX: data_size: %d\n", fw->data_size);
++ PSB_DEBUG_GENERAL("MSVDX: data_location: 0x%x\n",
++ fw->data_location);
++ PSB_DEBUG_GENERAL("MSVDX: First 4 bytes of text: 0x%x\n",
++ *text_ptr);
++ PSB_DEBUG_GENERAL("MSVDX: First 4 bytes of data: 0x%x\n",
++ *data_ptr);
++
++ PSB_DEBUG_GENERAL("MSVDX: Uploading firmware\n");
++ psb_upload_fw(dev_priv, MTX_CORE_CODE_MEM, ram_bank_size,
++ PC_START_ADDRESS - MTX_CODE_BASE, fw->text_size,
++ text_ptr);
++ psb_upload_fw(dev_priv, MTX_CORE_DATA_MEM, ram_bank_size,
++ fw->data_location - MTX_DATA_BASE, fw->data_size,
++ data_ptr);
++
++#if 0
++ /* todo : Verify code upload possibly only in debug */
++ ret = psb_verify_fw(dev_priv, ram_bank_size,
++ MTX_CORE_CODE_MEM,
++ PC_START_ADDRESS - MTX_CODE_BASE,
++ fw->text_size, text_ptr);
++ if (ret) {
++ /* Firmware code upload failed */
++ ret = 1;
++ goto out;
++ }
++
++ ret = psb_verify_fw(dev_priv, ram_bank_size, MTX_CORE_DATA_MEM,
++ fw->data_location - MTX_DATA_BASE,
++ fw->data_size, data_ptr);
++ if (ret) {
++ /* Firmware data upload failed */
++ ret = 1;
++ goto out;
++ }
++#else
++ (void)psb_verify_fw;
++#endif
++ /* -- Set starting PC address */
++ psb_write_mtx_core_reg(dev_priv, MTX_PC, PC_START_ADDRESS);
++
++ /* -- Turn on the thread */
++ PSB_WMSVDX32(MSVDX_MTX_ENABLE_MTX_ENABLE_MASK, MSVDX_MTX_ENABLE);
++
++ /* Wait for the signature value to be written back */
++ ret = psb_wait_for_register(dev_priv, MSVDX_COMMS_SIGNATURE,
++ MSVDX_COMMS_SIGNATURE_VALUE, /*Required value*/
++ 0xffffffff /* Enabled bits */);
++ if (ret) {
++ DRM_ERROR("MSVDX: firmware fails to initialize.\n");
++ goto out;
++ }
++
++ PSB_DEBUG_GENERAL("MSVDX: MTX Initial indications OK\n");
++ PSB_DEBUG_GENERAL("MSVDX: MSVDX_COMMS_AREA_ADDR = %08x\n",
++ MSVDX_COMMS_AREA_ADDR);
++#if 0
++
++ /* Send test message */
++ {
++ uint32_t msg_buf[FW_VA_DEBUG_TEST2_SIZE >> 2];
++
++ MEMIO_WRITE_FIELD(msg_buf, FW_VA_DEBUG_TEST2_MSG_SIZE,
++ FW_VA_DEBUG_TEST2_SIZE);
++ MEMIO_WRITE_FIELD(msg_buf, FW_VA_DEBUG_TEST2_ID,
++ VA_MSGID_TEST2);
++
++ ret = psb_mtx_send(dev_priv, msg_buf);
++ if (ret) {
++ DRM_ERROR("psb: MSVDX sending fails.\n");
++ goto out;
++ }
++
++ /* Wait for Mtx to ack this message */
++ psb_poll_mtx_irq(dev_priv);
++
++ }
++#endif
++out:
++
++ return ret;
++}
++
++
++static void psb_free_ccb(struct ttm_buffer_object **ccb)
++{
++ ttm_bo_unref(ccb);
++ *ccb = NULL;
++}
++
++/**
++ * Reset chip and disable interrupts.
++ * Return 0 success, 1 failure
++ */
++int psb_msvdx_reset(struct drm_psb_private *dev_priv)
++{
++ int ret = 0;
++
++ /* Issue software reset */
++ PSB_WMSVDX32(msvdx_sw_reset_all, MSVDX_CONTROL);
++
++ ret = psb_wait_for_register(dev_priv, MSVDX_CONTROL, 0,
++ MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_MASK);
++
++ if (!ret) {
++ /* Clear interrupt enabled flag */
++ PSB_WMSVDX32(0, MSVDX_HOST_INTERRUPT_ENABLE);
++
++ /* Clear any pending interrupt flags */
++ PSB_WMSVDX32(0xFFFFFFFF, MSVDX_INTERRUPT_CLEAR);
++ }
++
++ /* mutex_destroy(&msvdx_priv->msvdx_mutex); */
++
++ return ret;
++}
++
++static int psb_allocate_ccb(struct drm_device *dev,
++ struct ttm_buffer_object **ccb,
++ uint32_t *base_addr, unsigned long size)
++{
++ struct drm_psb_private *dev_priv = psb_priv(dev);
++ struct ttm_bo_device *bdev = &dev_priv->bdev;
++ int ret;
++ struct ttm_bo_kmap_obj tmp_kmap;
++ bool is_iomem;
++
++ PSB_DEBUG_INIT("MSVDX: allocate CCB\n");
++
++ ret = ttm_buffer_object_create(bdev, size,
++ ttm_bo_type_kernel,
++ DRM_PSB_FLAG_MEM_MMU |
++ TTM_PL_FLAG_NO_EVICT, 0, 0, 0,
++ NULL, ccb);
++ if (ret) {
++ DRM_ERROR("MSVDX:failed to allocate CCB.\n");
++ *ccb = NULL;
++ return 1;
++ }
++
++ ret = ttm_bo_kmap(*ccb, 0, (*ccb)->num_pages, &tmp_kmap);
++ if (ret) {
++ PSB_DEBUG_GENERAL("ttm_bo_kmap failed ret: %d\n", ret);
++ ttm_bo_unref(ccb);
++ *ccb = NULL;
++ return 1;
++ }
++/*
++ memset(ttm_kmap_obj_virtual(&tmp_kmap, &is_iomem), 0,
++ RENDEC_A_SIZE);
++*/
++ memset(ttm_kmap_obj_virtual(&tmp_kmap, &is_iomem), 0,
++ size);
++ ttm_bo_kunmap(&tmp_kmap);
++
++ *base_addr = (*ccb)->offset;
++ return 0;
++}
++
++static ssize_t psb_msvdx_pmstate_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct drm_device *drm_dev = dev_get_drvdata(dev);
++ struct drm_psb_private *dev_priv;
++ struct msvdx_private *msvdx_priv;
++ unsigned int pmstate;
++ unsigned long flags;
++ int ret = -EINVAL;
++
++ if (drm_dev == NULL)
++ return 0;
++
++ dev_priv = drm_dev->dev_private;
++ msvdx_priv = dev_priv->msvdx_private;
++ pmstate = msvdx_priv->pmstate;
++
++ spin_lock_irqsave(&msvdx_priv->msvdx_lock, flags);
++ ret = sprintf(buf, "%s\n",
++ (pmstate == PSB_PMSTATE_POWERUP) ? "powerup" : "powerdown");
++ spin_unlock_irqrestore(&msvdx_priv->msvdx_lock, flags);
++
++ return ret;
++}
++
++static DEVICE_ATTR(msvdx_pmstate, 0444, psb_msvdx_pmstate_show, NULL);
++
++int psb_msvdx_init(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ /* uint32_t clk_gate_ctrl = clk_enable_all; */
++ uint32_t cmd;
++ int ret;
++ struct msvdx_private *msvdx_priv;
++
++ if (!dev_priv->msvdx_private) {
++ msvdx_priv = kmalloc(sizeof(struct msvdx_private), GFP_KERNEL);
++ if (msvdx_priv == NULL)
++ goto err_exit;
++
++ dev_priv->msvdx_private = msvdx_priv;
++ memset(msvdx_priv, 0, sizeof(struct msvdx_private));
++
++ /* get device --> drm_device --> drm_psb_private --> msvdx_priv
++ * for psb_msvdx_pmstate_show: msvdx_pmpolicy
++ * if not pci_set_drvdata, can't get drm_device from device
++ */
++ /* pci_set_drvdata(dev->pdev, dev); */
++ if (device_create_file(&dev->pdev->dev,
++ &dev_attr_msvdx_pmstate))
++ DRM_ERROR("MSVDX: could not create sysfs file\n");
++ msvdx_priv->sysfs_pmstate = sysfs_get_dirent(
++ dev->pdev->dev.kobj.sd, "msvdx_pmstate");
++ }
++
++ msvdx_priv = dev_priv->msvdx_private;
++ if (!msvdx_priv->ccb0) { /* one for the first time */
++ /* Initialize comand msvdx queueing */
++ INIT_LIST_HEAD(&msvdx_priv->msvdx_queue);
++ INIT_LIST_HEAD(&msvdx_priv->deblock_queue);
++ mutex_init(&msvdx_priv->msvdx_mutex);
++ spin_lock_init(&msvdx_priv->msvdx_lock);
++ /*figure out the stepping */
++ pci_read_config_byte(dev->pdev, PSB_REVID_OFFSET, &psb_rev_id);
++ }
++
++ msvdx_priv->vec_local_mem_size = VEC_LOCAL_MEM_BYTE_SIZE;
++ if (!msvdx_priv->vec_local_mem_data) {
++ msvdx_priv->vec_local_mem_data =
++ kmalloc(msvdx_priv->vec_local_mem_size, GFP_KERNEL);
++ memset(msvdx_priv->vec_local_mem_data, 0, msvdx_priv->vec_local_mem_size);
++ }
++
++ msvdx_priv->msvdx_busy = 0;
++ msvdx_priv->msvdx_hw_busy = 1;
++
++ /* Enable Clocks */
++ PSB_DEBUG_GENERAL("Enabling clocks\n");
++ PSB_WMSVDX32(clk_enable_all, MSVDX_MAN_CLK_ENABLE);
++
++
++ /* Enable MMU by removing all bypass bits */
++ PSB_WMSVDX32(0, MSVDX_MMU_CONTROL0);
++
++ /* move firmware loading to the place receiving first command buffer */
++
++ PSB_DEBUG_GENERAL("MSVDX: Setting up RENDEC,allocate CCB 0/1\n");
++ /* Allocate device virtual memory as required by rendec.... */
++ if (!msvdx_priv->ccb0) {
++ ret = psb_allocate_ccb(dev, &msvdx_priv->ccb0,
++ &msvdx_priv->base_addr0,
++ RENDEC_A_SIZE);
++ if (ret) {
++ PSB_DEBUG_GENERAL("Allocate Rendec A fail\n");
++ goto err_exit;
++ }
++ }
++
++ if (!msvdx_priv->ccb1) {
++ ret = psb_allocate_ccb(dev, &msvdx_priv->ccb1,
++ &msvdx_priv->base_addr1,
++ RENDEC_B_SIZE);
++ if (ret)
++ goto err_exit;
++ }
++
++
++ PSB_DEBUG_GENERAL("MSVDX: RENDEC A: %08x RENDEC B: %08x\n",
++ msvdx_priv->base_addr0, msvdx_priv->base_addr1);
++
++ PSB_WMSVDX32(msvdx_priv->base_addr0, MSVDX_RENDEC_BASE_ADDR0);
++ PSB_WMSVDX32(msvdx_priv->base_addr1, MSVDX_RENDEC_BASE_ADDR1);
++
++ cmd = 0;
++ REGIO_WRITE_FIELD(cmd, MSVDX_RENDEC_BUFFER_SIZE,
++ RENDEC_BUFFER_SIZE0, RENDEC_A_SIZE / 4096);
++ REGIO_WRITE_FIELD(cmd, MSVDX_RENDEC_BUFFER_SIZE,
++ RENDEC_BUFFER_SIZE1, RENDEC_B_SIZE / 4096);
++ PSB_WMSVDX32(cmd, MSVDX_RENDEC_BUFFER_SIZE);
++
++
++ cmd = 0;
++ REGIO_WRITE_FIELD(cmd, MSVDX_RENDEC_CONTROL1,
++ RENDEC_DECODE_START_SIZE, 0);
++ REGIO_WRITE_FIELD(cmd, MSVDX_RENDEC_CONTROL1,
++ RENDEC_BURST_SIZE_W, 1);
++ REGIO_WRITE_FIELD(cmd, MSVDX_RENDEC_CONTROL1,
++ RENDEC_BURST_SIZE_R, 1);
++ REGIO_WRITE_FIELD(cmd, MSVDX_RENDEC_CONTROL1,
++ RENDEC_EXTERNAL_MEMORY, 1);
++ PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTROL1);
++
++ cmd = 0x00101010;
++ PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTEXT0);
++ PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTEXT1);
++ PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTEXT2);
++ PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTEXT3);
++ PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTEXT4);
++ PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTEXT5);
++
++ cmd = 0;
++ REGIO_WRITE_FIELD(cmd, MSVDX_RENDEC_CONTROL0, RENDEC_INITIALISE,
++ 1);
++ PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTROL0);
++
++ /* PSB_WMSVDX32(clk_enable_minimal, MSVDX_MAN_CLK_ENABLE); */
++ PSB_DEBUG_INIT("MSVDX:defer firmware loading to the"
++ " place when receiving user space commands\n");
++
++ msvdx_priv->msvdx_fw_loaded = 0; /* need to load firware */
++
++ psb_msvdx_clearirq(dev);
++ psb_msvdx_enableirq(dev);
++
++ if (IS_MRST(dev)) {
++ PSB_DEBUG_INIT("MSDVX:old clock gating disable = 0x%08x\n",
++ PSB_RVDC32(PSB_MSVDX_CLOCKGATING));
++ }
++
++ {
++ cmd = 0;
++ cmd = PSB_RMSVDX32(0x818); /* VEC_SHIFTREG_CONTROL */
++ REGIO_WRITE_FIELD(cmd,
++ VEC_SHIFTREG_CONTROL,
++ SR_MASTER_SELECT,
++ 1); /* Host */
++ PSB_WMSVDX32(cmd, 0x818);
++ }
++
++#if 0
++ ret = psb_setup_fw(dev);
++ if (ret)
++ goto err_exit;
++ /* Send Initialisation message to firmware */
++ if (0) {
++ uint32_t msg_init[FW_VA_INIT_SIZE >> 2];
++ MEMIO_WRITE_FIELD(msg_init, FWRK_GENMSG_SIZE,
++ FW_VA_INIT_SIZE);
++ MEMIO_WRITE_FIELD(msg_init, FWRK_GENMSG_ID, VA_MSGID_INIT);
++
++ /* Need to set this for all but A0 */
++ MEMIO_WRITE_FIELD(msg_init, FW_VA_INIT_GLOBAL_PTD,
++ psb_get_default_pd_addr(dev_priv->mmu));
++
++ ret = psb_mtx_send(dev_priv, msg_init);
++ if (ret)
++ goto err_exit;
++
++ psb_poll_mtx_irq(dev_priv);
++ }
++#endif
++
++ return 0;
++
++err_exit:
++ DRM_ERROR("MSVDX: initialization failed\n");
++ if (msvdx_priv->ccb0)
++ psb_free_ccb(&msvdx_priv->ccb0);
++ if (msvdx_priv->ccb1)
++ psb_free_ccb(&msvdx_priv->ccb1);
++ kfree(dev_priv->msvdx_private);
++
++ return 1;
++}
++
++int psb_msvdx_uninit(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
++
++ /* Reset MSVDX chip */
++ psb_msvdx_reset(dev_priv);
++
++ /* PSB_WMSVDX32 (clk_enable_minimal, MSVDX_MAN_CLK_ENABLE); */
++ PSB_DEBUG_INIT("MSVDX:set the msvdx clock to 0\n");
++ PSB_WMSVDX32(0, MSVDX_MAN_CLK_ENABLE);
++
++ if (msvdx_priv->ccb0)
++ psb_free_ccb(&msvdx_priv->ccb0);
++ if (msvdx_priv->ccb1)
++ psb_free_ccb(&msvdx_priv->ccb1);
++ if (msvdx_priv->msvdx_fw)
++ kfree(msvdx_priv->msvdx_fw
++ );
++ if (msvdx_priv->vec_local_mem_data)
++ kfree(msvdx_priv->vec_local_mem_data);
++
++ if (msvdx_priv) {
++ /* pci_set_drvdata(dev->pdev, NULL); */
++ device_remove_file(&dev->pdev->dev, &dev_attr_msvdx_pmstate);
++ sysfs_put(msvdx_priv->sysfs_pmstate);
++ msvdx_priv->sysfs_pmstate = NULL;
++
++ kfree(msvdx_priv);
++ dev_priv->msvdx_private = NULL;
++ }
++
++ return 0;
++}
+diff --git a/drivers/gpu/drm/mrst/drv/psb_pvr_glue.c b/drivers/gpu/drm/mrst/drv/psb_pvr_glue.c
+new file mode 100644
+index 0000000..cb11475
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/psb_pvr_glue.c
+@@ -0,0 +1,74 @@
++/*
++ * Copyright (c) 2009, Intel Corporation.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ */
++
++#include "psb_pvr_glue.h"
++
++/**
++ * FIXME: should NOT use these file under env/linux directly
++ */
++#include "mm.h"
++
++int psb_get_meminfo_by_handle(IMG_HANDLE hKernelMemInfo,
++ PVRSRV_KERNEL_MEM_INFO **ppsKernelMemInfo)
++{
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo = IMG_NULL;
++ PVRSRV_PER_PROCESS_DATA *psPerProc = IMG_NULL;
++ PVRSRV_ERROR eError;
++
++ psPerProc = PVRSRVPerProcessData(OSGetCurrentProcessIDKM());
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ (IMG_VOID *)&psKernelMemInfo,
++ hKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ if (eError != PVRSRV_OK) {
++ DRM_ERROR("Cannot find kernel meminfo for handle %lx\n",
++ (IMG_UINT32)hKernelMemInfo);
++ return -EINVAL;
++ }
++
++ *ppsKernelMemInfo = psKernelMemInfo;
++
++ DRM_DEBUG("Got Kernel MemInfo for handle %lx\n",
++ (IMG_UINT32)hKernelMemInfo);
++ return 0;
++}
++
++IMG_UINT32 psb_get_tgid(void)
++{
++ return OSGetCurrentProcessIDKM();
++}
++
++int psb_get_pages_by_mem_handle(IMG_HANDLE hOSMemHandle, struct page ***pages)
++{
++ LinuxMemArea *psLinuxMemArea = (LinuxMemArea *)hOSMemHandle;
++ struct page **page_list;
++
++ if (psLinuxMemArea->eAreaType != LINUX_MEM_AREA_ALLOC_PAGES) {
++ DRM_ERROR("MemArea type is not LINUX_MEM_AREA_ALLOC_PAGES\n");
++ return -EINVAL;
++ }
++
++ page_list = psLinuxMemArea->uData.sPageList.pvPageList;
++ if (!page_list) {
++ DRM_DEBUG("Page List is NULL\n");
++ return -ENOMEM;
++ }
++
++ *pages = page_list;
++ return 0;
++}
+diff --git a/drivers/gpu/drm/mrst/drv/psb_pvr_glue.h b/drivers/gpu/drm/mrst/drv/psb_pvr_glue.h
+new file mode 100644
+index 0000000..3c2ae45
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/psb_pvr_glue.h
+@@ -0,0 +1,26 @@
++/*
++ * Copyright (c) 2009, Intel Corporation.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ */
++
++#include "psb_drv.h"
++#include "services_headers.h"
++
++extern int psb_get_meminfo_by_handle(IMG_HANDLE hKernelMemInfo,
++ PVRSRV_KERNEL_MEM_INFO **ppsKernelMemInfo);
++extern IMG_UINT32 psb_get_tgid(void);
++extern int psb_get_pages_by_mem_handle(IMG_HANDLE hOSMemHandle,
++ struct page ***pages);
+diff --git a/drivers/gpu/drm/mrst/drv/psb_reg.h b/drivers/gpu/drm/mrst/drv/psb_reg.h
+new file mode 100644
+index 0000000..ea1e812
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/psb_reg.h
+@@ -0,0 +1,570 @@
++/**************************************************************************
++ *
++ * Copyright (c) (2005-2007) Imagination Technologies Limited.
++ * Copyright (c) 2007, Intel Corporation.
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA..
++ *
++ **************************************************************************/
++
++#ifndef _PSB_REG_H_
++#define _PSB_REG_H_
++
++#define PSB_CR_CLKGATECTL 0x0000
++#define _PSB_C_CLKGATECTL_AUTO_MAN_REG (1 << 24)
++#define _PSB_C_CLKGATECTL_USE_CLKG_SHIFT (20)
++#define _PSB_C_CLKGATECTL_USE_CLKG_MASK (0x3 << 20)
++#define _PSB_C_CLKGATECTL_DPM_CLKG_SHIFT (16)
++#define _PSB_C_CLKGATECTL_DPM_CLKG_MASK (0x3 << 16)
++#define _PSB_C_CLKGATECTL_TA_CLKG_SHIFT (12)
++#define _PSB_C_CLKGATECTL_TA_CLKG_MASK (0x3 << 12)
++#define _PSB_C_CLKGATECTL_TSP_CLKG_SHIFT (8)
++#define _PSB_C_CLKGATECTL_TSP_CLKG_MASK (0x3 << 8)
++#define _PSB_C_CLKGATECTL_ISP_CLKG_SHIFT (4)
++#define _PSB_C_CLKGATECTL_ISP_CLKG_MASK (0x3 << 4)
++#define _PSB_C_CLKGATECTL_2D_CLKG_SHIFT (0)
++#define _PSB_C_CLKGATECTL_2D_CLKG_MASK (0x3 << 0)
++#define _PSB_C_CLKGATECTL_CLKG_ENABLED (0)
++#define _PSB_C_CLKGATECTL_CLKG_DISABLED (1)
++#define _PSB_C_CLKGATECTL_CLKG_AUTO (2)
++
++#define PSB_CR_CORE_ID 0x0010
++#define _PSB_CC_ID_ID_SHIFT (16)
++#define _PSB_CC_ID_ID_MASK (0xFFFF << 16)
++#define _PSB_CC_ID_CONFIG_SHIFT (0)
++#define _PSB_CC_ID_CONFIG_MASK (0xFFFF << 0)
++
++#define PSB_CR_CORE_REVISION 0x0014
++#define _PSB_CC_REVISION_DESIGNER_SHIFT (24)
++#define _PSB_CC_REVISION_DESIGNER_MASK (0xFF << 24)
++#define _PSB_CC_REVISION_MAJOR_SHIFT (16)
++#define _PSB_CC_REVISION_MAJOR_MASK (0xFF << 16)
++#define _PSB_CC_REVISION_MINOR_SHIFT (8)
++#define _PSB_CC_REVISION_MINOR_MASK (0xFF << 8)
++#define _PSB_CC_REVISION_MAINTENANCE_SHIFT (0)
++#define _PSB_CC_REVISION_MAINTENANCE_MASK (0xFF << 0)
++
++#define PSB_CR_DESIGNER_REV_FIELD1 0x0018
++
++#define PSB_CR_SOFT_RESET 0x0080
++#define _PSB_CS_RESET_TSP_RESET (1 << 6)
++#define _PSB_CS_RESET_ISP_RESET (1 << 5)
++#define _PSB_CS_RESET_USE_RESET (1 << 4)
++#define _PSB_CS_RESET_TA_RESET (1 << 3)
++#define _PSB_CS_RESET_DPM_RESET (1 << 2)
++#define _PSB_CS_RESET_TWOD_RESET (1 << 1)
++#define _PSB_CS_RESET_BIF_RESET (1 << 0)
++
++#define PSB_CR_DESIGNER_REV_FIELD2 0x001C
++
++#define PSB_CR_EVENT_HOST_ENABLE2 0x0110
++
++#define PSB_CR_EVENT_STATUS2 0x0118
++
++#define PSB_CR_EVENT_HOST_CLEAR2 0x0114
++#define _PSB_CE2_BIF_REQUESTER_FAULT (1 << 4)
++
++#define PSB_CR_EVENT_STATUS 0x012C
++
++#define PSB_CR_EVENT_HOST_ENABLE 0x0130
++
++#define PSB_CR_EVENT_HOST_CLEAR 0x0134
++#define _PSB_CE_MASTER_INTERRUPT (1 << 31)
++#define _PSB_CE_TA_DPM_FAULT (1 << 28)
++#define _PSB_CE_TWOD_COMPLETE (1 << 27)
++#define _PSB_CE_DPM_OUT_OF_MEMORY_ZLS (1 << 25)
++#define _PSB_CE_DPM_TA_MEM_FREE (1 << 24)
++#define _PSB_CE_PIXELBE_END_RENDER (1 << 18)
++#define _PSB_CE_SW_EVENT (1 << 14)
++#define _PSB_CE_TA_FINISHED (1 << 13)
++#define _PSB_CE_TA_TERMINATE (1 << 12)
++#define _PSB_CE_DPM_REACHED_MEM_THRESH (1 << 3)
++#define _PSB_CE_DPM_OUT_OF_MEMORY_GBL (1 << 2)
++#define _PSB_CE_DPM_OUT_OF_MEMORY_MT (1 << 1)
++#define _PSB_CE_DPM_3D_MEM_FREE (1 << 0)
++
++
++#define PSB_USE_OFFSET_MASK 0x0007FFFF
++#define PSB_USE_OFFSET_SIZE (PSB_USE_OFFSET_MASK + 1)
++#define PSB_CR_USE_CODE_BASE0 0x0A0C
++#define PSB_CR_USE_CODE_BASE1 0x0A10
++#define PSB_CR_USE_CODE_BASE2 0x0A14
++#define PSB_CR_USE_CODE_BASE3 0x0A18
++#define PSB_CR_USE_CODE_BASE4 0x0A1C
++#define PSB_CR_USE_CODE_BASE5 0x0A20
++#define PSB_CR_USE_CODE_BASE6 0x0A24
++#define PSB_CR_USE_CODE_BASE7 0x0A28
++#define PSB_CR_USE_CODE_BASE8 0x0A2C
++#define PSB_CR_USE_CODE_BASE9 0x0A30
++#define PSB_CR_USE_CODE_BASE10 0x0A34
++#define PSB_CR_USE_CODE_BASE11 0x0A38
++#define PSB_CR_USE_CODE_BASE12 0x0A3C
++#define PSB_CR_USE_CODE_BASE13 0x0A40
++#define PSB_CR_USE_CODE_BASE14 0x0A44
++#define PSB_CR_USE_CODE_BASE15 0x0A48
++#define PSB_CR_USE_CODE_BASE(_i) (0x0A0C + ((_i) << 2))
++#define _PSB_CUC_BASE_DM_SHIFT (25)
++#define _PSB_CUC_BASE_DM_MASK (0x3 << 25)
++#define _PSB_CUC_BASE_ADDR_SHIFT (0) /* 1024-bit aligned address? */
++#define _PSB_CUC_BASE_ADDR_ALIGNSHIFT (7)
++#define _PSB_CUC_BASE_ADDR_MASK (0x1FFFFFF << 0)
++#define _PSB_CUC_DM_VERTEX (0)
++#define _PSB_CUC_DM_PIXEL (1)
++#define _PSB_CUC_DM_RESERVED (2)
++#define _PSB_CUC_DM_EDM (3)
++
++#define PSB_CR_PDS_EXEC_BASE 0x0AB8
++#define _PSB_CR_PDS_EXEC_BASE_ADDR_SHIFT (20) /* 1MB aligned address */
++#define _PSB_CR_PDS_EXEC_BASE_ADDR_ALIGNSHIFT (20)
++
++#define PSB_CR_EVENT_KICKER 0x0AC4
++#define _PSB_CE_KICKER_ADDRESS_SHIFT (4) /* 128-bit aligned address */
++
++#define PSB_CR_EVENT_KICK 0x0AC8
++#define _PSB_CE_KICK_NOW (1 << 0)
++
++
++#define PSB_CR_BIF_DIR_LIST_BASE1 0x0C38
++
++#define PSB_CR_BIF_CTRL 0x0C00
++#define _PSB_CB_CTRL_CLEAR_FAULT (1 << 4)
++#define _PSB_CB_CTRL_INVALDC (1 << 3)
++#define _PSB_CB_CTRL_FLUSH (1 << 2)
++
++#define PSB_CR_BIF_INT_STAT 0x0C04
++
++#define PSB_CR_BIF_FAULT 0x0C08
++#define _PSB_CBI_STAT_PF_N_RW (1 << 14)
++#define _PSB_CBI_STAT_FAULT_SHIFT (0)
++#define _PSB_CBI_STAT_FAULT_MASK (0x3FFF << 0)
++#define _PSB_CBI_STAT_FAULT_CACHE (1 << 1)
++#define _PSB_CBI_STAT_FAULT_TA (1 << 2)
++#define _PSB_CBI_STAT_FAULT_VDM (1 << 3)
++#define _PSB_CBI_STAT_FAULT_2D (1 << 4)
++#define _PSB_CBI_STAT_FAULT_PBE (1 << 5)
++#define _PSB_CBI_STAT_FAULT_TSP (1 << 6)
++#define _PSB_CBI_STAT_FAULT_ISP (1 << 7)
++#define _PSB_CBI_STAT_FAULT_USSEPDS (1 << 8)
++#define _PSB_CBI_STAT_FAULT_HOST (1 << 9)
++
++#define PSB_CR_BIF_BANK0 0x0C78
++
++#define PSB_CR_BIF_BANK1 0x0C7C
++
++#define PSB_CR_BIF_DIR_LIST_BASE0 0x0C84
++
++#define PSB_CR_BIF_TWOD_REQ_BASE 0x0C88
++#define PSB_CR_BIF_3D_REQ_BASE 0x0CAC
++
++#define PSB_CR_2D_SOCIF 0x0E18
++#define _PSB_C2_SOCIF_FREESPACE_SHIFT (0)
++#define _PSB_C2_SOCIF_FREESPACE_MASK (0xFF << 0)
++#define _PSB_C2_SOCIF_EMPTY (0x80 << 0)
++
++#define PSB_CR_2D_BLIT_STATUS 0x0E04
++#define _PSB_C2B_STATUS_BUSY (1 << 24)
++#define _PSB_C2B_STATUS_COMPLETE_SHIFT (0)
++#define _PSB_C2B_STATUS_COMPLETE_MASK (0xFFFFFF << 0)
++
++/*
++ * 2D defs.
++ */
++
++/*
++ * 2D Slave Port Data : Block Header's Object Type
++ */
++
++#define PSB_2D_CLIP_BH (0x00000000)
++#define PSB_2D_PAT_BH (0x10000000)
++#define PSB_2D_CTRL_BH (0x20000000)
++#define PSB_2D_SRC_OFF_BH (0x30000000)
++#define PSB_2D_MASK_OFF_BH (0x40000000)
++#define PSB_2D_RESERVED1_BH (0x50000000)
++#define PSB_2D_RESERVED2_BH (0x60000000)
++#define PSB_2D_FENCE_BH (0x70000000)
++#define PSB_2D_BLIT_BH (0x80000000)
++#define PSB_2D_SRC_SURF_BH (0x90000000)
++#define PSB_2D_DST_SURF_BH (0xA0000000)
++#define PSB_2D_PAT_SURF_BH (0xB0000000)
++#define PSB_2D_SRC_PAL_BH (0xC0000000)
++#define PSB_2D_PAT_PAL_BH (0xD0000000)
++#define PSB_2D_MASK_SURF_BH (0xE0000000)
++#define PSB_2D_FLUSH_BH (0xF0000000)
++
++/*
++ * Clip Definition block (PSB_2D_CLIP_BH)
++ */
++#define PSB_2D_CLIPCOUNT_MAX (1)
++#define PSB_2D_CLIPCOUNT_MASK (0x00000000)
++#define PSB_2D_CLIPCOUNT_CLRMASK (0xFFFFFFFF)
++#define PSB_2D_CLIPCOUNT_SHIFT (0)
++/* clip rectangle min & max */
++#define PSB_2D_CLIP_XMAX_MASK (0x00FFF000)
++#define PSB_2D_CLIP_XMAX_CLRMASK (0xFF000FFF)
++#define PSB_2D_CLIP_XMAX_SHIFT (12)
++#define PSB_2D_CLIP_XMIN_MASK (0x00000FFF)
++#define PSB_2D_CLIP_XMIN_CLRMASK (0x00FFF000)
++#define PSB_2D_CLIP_XMIN_SHIFT (0)
++/* clip rectangle offset */
++#define PSB_2D_CLIP_YMAX_MASK (0x00FFF000)
++#define PSB_2D_CLIP_YMAX_CLRMASK (0xFF000FFF)
++#define PSB_2D_CLIP_YMAX_SHIFT (12)
++#define PSB_2D_CLIP_YMIN_MASK (0x00000FFF)
++#define PSB_2D_CLIP_YMIN_CLRMASK (0x00FFF000)
++#define PSB_2D_CLIP_YMIN_SHIFT (0)
++
++/*
++ * Pattern Control (PSB_2D_PAT_BH)
++ */
++#define PSB_2D_PAT_HEIGHT_MASK (0x0000001F)
++#define PSB_2D_PAT_HEIGHT_SHIFT (0)
++#define PSB_2D_PAT_WIDTH_MASK (0x000003E0)
++#define PSB_2D_PAT_WIDTH_SHIFT (5)
++#define PSB_2D_PAT_YSTART_MASK (0x00007C00)
++#define PSB_2D_PAT_YSTART_SHIFT (10)
++#define PSB_2D_PAT_XSTART_MASK (0x000F8000)
++#define PSB_2D_PAT_XSTART_SHIFT (15)
++
++/*
++ * 2D Control block (PSB_2D_CTRL_BH)
++ */
++/* Present Flags */
++#define PSB_2D_SRCCK_CTRL (0x00000001)
++#define PSB_2D_DSTCK_CTRL (0x00000002)
++#define PSB_2D_ALPHA_CTRL (0x00000004)
++/* Colour Key Colour (SRC/DST)*/
++#define PSB_2D_CK_COL_MASK (0xFFFFFFFF)
++#define PSB_2D_CK_COL_CLRMASK (0x00000000)
++#define PSB_2D_CK_COL_SHIFT (0)
++/* Colour Key Mask (SRC/DST)*/
++#define PSB_2D_CK_MASK_MASK (0xFFFFFFFF)
++#define PSB_2D_CK_MASK_CLRMASK (0x00000000)
++#define PSB_2D_CK_MASK_SHIFT (0)
++/* Alpha Control (Alpha/RGB)*/
++#define PSB_2D_GBLALPHA_MASK (0x000FF000)
++#define PSB_2D_GBLALPHA_CLRMASK (0xFFF00FFF)
++#define PSB_2D_GBLALPHA_SHIFT (12)
++#define PSB_2D_SRCALPHA_OP_MASK (0x00700000)
++#define PSB_2D_SRCALPHA_OP_CLRMASK (0xFF8FFFFF)
++#define PSB_2D_SRCALPHA_OP_SHIFT (20)
++#define PSB_2D_SRCALPHA_OP_ONE (0x00000000)
++#define PSB_2D_SRCALPHA_OP_SRC (0x00100000)
++#define PSB_2D_SRCALPHA_OP_DST (0x00200000)
++#define PSB_2D_SRCALPHA_OP_SG (0x00300000)
++#define PSB_2D_SRCALPHA_OP_DG (0x00400000)
++#define PSB_2D_SRCALPHA_OP_GBL (0x00500000)
++#define PSB_2D_SRCALPHA_OP_ZERO (0x00600000)
++#define PSB_2D_SRCALPHA_INVERT (0x00800000)
++#define PSB_2D_SRCALPHA_INVERT_CLR (0xFF7FFFFF)
++#define PSB_2D_DSTALPHA_OP_MASK (0x07000000)
++#define PSB_2D_DSTALPHA_OP_CLRMASK (0xF8FFFFFF)
++#define PSB_2D_DSTALPHA_OP_SHIFT (24)
++#define PSB_2D_DSTALPHA_OP_ONE (0x00000000)
++#define PSB_2D_DSTALPHA_OP_SRC (0x01000000)
++#define PSB_2D_DSTALPHA_OP_DST (0x02000000)
++#define PSB_2D_DSTALPHA_OP_SG (0x03000000)
++#define PSB_2D_DSTALPHA_OP_DG (0x04000000)
++#define PSB_2D_DSTALPHA_OP_GBL (0x05000000)
++#define PSB_2D_DSTALPHA_OP_ZERO (0x06000000)
++#define PSB_2D_DSTALPHA_INVERT (0x08000000)
++#define PSB_2D_DSTALPHA_INVERT_CLR (0xF7FFFFFF)
++
++#define PSB_2D_PRE_MULTIPLICATION_ENABLE (0x10000000)
++#define PSB_2D_PRE_MULTIPLICATION_CLRMASK (0xEFFFFFFF)
++#define PSB_2D_ZERO_SOURCE_ALPHA_ENABLE (0x20000000)
++#define PSB_2D_ZERO_SOURCE_ALPHA_CLRMASK (0xDFFFFFFF)
++
++/*
++ *Source Offset (PSB_2D_SRC_OFF_BH)
++ */
++#define PSB_2D_SRCOFF_XSTART_MASK ((0x00000FFF) << 12)
++#define PSB_2D_SRCOFF_XSTART_SHIFT (12)
++#define PSB_2D_SRCOFF_YSTART_MASK (0x00000FFF)
++#define PSB_2D_SRCOFF_YSTART_SHIFT (0)
++
++/*
++ * Mask Offset (PSB_2D_MASK_OFF_BH)
++ */
++#define PSB_2D_MASKOFF_XSTART_MASK ((0x00000FFF) << 12)
++#define PSB_2D_MASKOFF_XSTART_SHIFT (12)
++#define PSB_2D_MASKOFF_YSTART_MASK (0x00000FFF)
++#define PSB_2D_MASKOFF_YSTART_SHIFT (0)
++
++/*
++ * 2D Fence (see PSB_2D_FENCE_BH): bits 0:27 are ignored
++ */
++
++/*
++ *Blit Rectangle (PSB_2D_BLIT_BH)
++ */
++
++#define PSB_2D_ROT_MASK (3<<25)
++#define PSB_2D_ROT_CLRMASK (~PSB_2D_ROT_MASK)
++#define PSB_2D_ROT_NONE (0<<25)
++#define PSB_2D_ROT_90DEGS (1<<25)
++#define PSB_2D_ROT_180DEGS (2<<25)
++#define PSB_2D_ROT_270DEGS (3<<25)
++
++#define PSB_2D_COPYORDER_MASK (3<<23)
++#define PSB_2D_COPYORDER_CLRMASK (~PSB_2D_COPYORDER_MASK)
++#define PSB_2D_COPYORDER_TL2BR (0<<23)
++#define PSB_2D_COPYORDER_BR2TL (1<<23)
++#define PSB_2D_COPYORDER_TR2BL (2<<23)
++#define PSB_2D_COPYORDER_BL2TR (3<<23)
++
++#define PSB_2D_DSTCK_CLRMASK (0xFF9FFFFF)
++#define PSB_2D_DSTCK_DISABLE (0x00000000)
++#define PSB_2D_DSTCK_PASS (0x00200000)
++#define PSB_2D_DSTCK_REJECT (0x00400000)
++
++#define PSB_2D_SRCCK_CLRMASK (0xFFE7FFFF)
++#define PSB_2D_SRCCK_DISABLE (0x00000000)
++#define PSB_2D_SRCCK_PASS (0x00080000)
++#define PSB_2D_SRCCK_REJECT (0x00100000)
++
++#define PSB_2D_CLIP_ENABLE (0x00040000)
++
++#define PSB_2D_ALPHA_ENABLE (0x00020000)
++
++#define PSB_2D_PAT_CLRMASK (0xFFFEFFFF)
++#define PSB_2D_PAT_MASK (0x00010000)
++#define PSB_2D_USE_PAT (0x00010000)
++#define PSB_2D_USE_FILL (0x00000000)
++/*
++ * Tungsten Graphics note on rop codes: If rop A and rop B are
++ * identical, the mask surface will not be read and need not be
++ * set up.
++ */
++
++#define PSB_2D_ROP3B_MASK (0x0000FF00)
++#define PSB_2D_ROP3B_CLRMASK (0xFFFF00FF)
++#define PSB_2D_ROP3B_SHIFT (8)
++/* rop code A */
++#define PSB_2D_ROP3A_MASK (0x000000FF)
++#define PSB_2D_ROP3A_CLRMASK (0xFFFFFF00)
++#define PSB_2D_ROP3A_SHIFT (0)
++
++#define PSB_2D_ROP4_MASK (0x0000FFFF)
++/*
++ * DWORD0: (Only pass if Pattern control == Use Fill Colour)
++ * Fill Colour RGBA8888
++ */
++#define PSB_2D_FILLCOLOUR_MASK (0xFFFFFFFF)
++#define PSB_2D_FILLCOLOUR_SHIFT (0)
++/*
++ * DWORD1: (Always Present)
++ * X Start (Dest)
++ * Y Start (Dest)
++ */
++#define PSB_2D_DST_XSTART_MASK (0x00FFF000)
++#define PSB_2D_DST_XSTART_CLRMASK (0xFF000FFF)
++#define PSB_2D_DST_XSTART_SHIFT (12)
++#define PSB_2D_DST_YSTART_MASK (0x00000FFF)
++#define PSB_2D_DST_YSTART_CLRMASK (0xFFFFF000)
++#define PSB_2D_DST_YSTART_SHIFT (0)
++/*
++ * DWORD2: (Always Present)
++ * X Size (Dest)
++ * Y Size (Dest)
++ */
++#define PSB_2D_DST_XSIZE_MASK (0x00FFF000)
++#define PSB_2D_DST_XSIZE_CLRMASK (0xFF000FFF)
++#define PSB_2D_DST_XSIZE_SHIFT (12)
++#define PSB_2D_DST_YSIZE_MASK (0x00000FFF)
++#define PSB_2D_DST_YSIZE_CLRMASK (0xFFFFF000)
++#define PSB_2D_DST_YSIZE_SHIFT (0)
++
++/*
++ * Source Surface (PSB_2D_SRC_SURF_BH)
++ */
++/*
++ * WORD 0
++ */
++
++#define PSB_2D_SRC_FORMAT_MASK (0x00078000)
++#define PSB_2D_SRC_1_PAL (0x00000000)
++#define PSB_2D_SRC_2_PAL (0x00008000)
++#define PSB_2D_SRC_4_PAL (0x00010000)
++#define PSB_2D_SRC_8_PAL (0x00018000)
++#define PSB_2D_SRC_8_ALPHA (0x00020000)
++#define PSB_2D_SRC_4_ALPHA (0x00028000)
++#define PSB_2D_SRC_332RGB (0x00030000)
++#define PSB_2D_SRC_4444ARGB (0x00038000)
++#define PSB_2D_SRC_555RGB (0x00040000)
++#define PSB_2D_SRC_1555ARGB (0x00048000)
++#define PSB_2D_SRC_565RGB (0x00050000)
++#define PSB_2D_SRC_0888ARGB (0x00058000)
++#define PSB_2D_SRC_8888ARGB (0x00060000)
++#define PSB_2D_SRC_8888UYVY (0x00068000)
++#define PSB_2D_SRC_RESERVED (0x00070000)
++#define PSB_2D_SRC_1555ARGB_LOOKUP (0x00078000)
++
++
++#define PSB_2D_SRC_STRIDE_MASK (0x00007FFF)
++#define PSB_2D_SRC_STRIDE_CLRMASK (0xFFFF8000)
++#define PSB_2D_SRC_STRIDE_SHIFT (0)
++/*
++ * WORD 1 - Base Address
++ */
++#define PSB_2D_SRC_ADDR_MASK (0x0FFFFFFC)
++#define PSB_2D_SRC_ADDR_CLRMASK (0x00000003)
++#define PSB_2D_SRC_ADDR_SHIFT (2)
++#define PSB_2D_SRC_ADDR_ALIGNSHIFT (2)
++
++/*
++ * Pattern Surface (PSB_2D_PAT_SURF_BH)
++ */
++/*
++ * WORD 0
++ */
++
++#define PSB_2D_PAT_FORMAT_MASK (0x00078000)
++#define PSB_2D_PAT_1_PAL (0x00000000)
++#define PSB_2D_PAT_2_PAL (0x00008000)
++#define PSB_2D_PAT_4_PAL (0x00010000)
++#define PSB_2D_PAT_8_PAL (0x00018000)
++#define PSB_2D_PAT_8_ALPHA (0x00020000)
++#define PSB_2D_PAT_4_ALPHA (0x00028000)
++#define PSB_2D_PAT_332RGB (0x00030000)
++#define PSB_2D_PAT_4444ARGB (0x00038000)
++#define PSB_2D_PAT_555RGB (0x00040000)
++#define PSB_2D_PAT_1555ARGB (0x00048000)
++#define PSB_2D_PAT_565RGB (0x00050000)
++#define PSB_2D_PAT_0888ARGB (0x00058000)
++#define PSB_2D_PAT_8888ARGB (0x00060000)
++
++#define PSB_2D_PAT_STRIDE_MASK (0x00007FFF)
++#define PSB_2D_PAT_STRIDE_CLRMASK (0xFFFF8000)
++#define PSB_2D_PAT_STRIDE_SHIFT (0)
++/*
++ * WORD 1 - Base Address
++ */
++#define PSB_2D_PAT_ADDR_MASK (0x0FFFFFFC)
++#define PSB_2D_PAT_ADDR_CLRMASK (0x00000003)
++#define PSB_2D_PAT_ADDR_SHIFT (2)
++#define PSB_2D_PAT_ADDR_ALIGNSHIFT (2)
++
++/*
++ * Destination Surface (PSB_2D_DST_SURF_BH)
++ */
++/*
++ * WORD 0
++ */
++
++#define PSB_2D_DST_FORMAT_MASK (0x00078000)
++#define PSB_2D_DST_332RGB (0x00030000)
++#define PSB_2D_DST_4444ARGB (0x00038000)
++#define PSB_2D_DST_555RGB (0x00040000)
++#define PSB_2D_DST_1555ARGB (0x00048000)
++#define PSB_2D_DST_565RGB (0x00050000)
++#define PSB_2D_DST_0888ARGB (0x00058000)
++#define PSB_2D_DST_8888ARGB (0x00060000)
++#define PSB_2D_DST_8888AYUV (0x00070000)
++
++#define PSB_2D_DST_STRIDE_MASK (0x00007FFF)
++#define PSB_2D_DST_STRIDE_CLRMASK (0xFFFF8000)
++#define PSB_2D_DST_STRIDE_SHIFT (0)
++/*
++ * WORD 1 - Base Address
++ */
++#define PSB_2D_DST_ADDR_MASK (0x0FFFFFFC)
++#define PSB_2D_DST_ADDR_CLRMASK (0x00000003)
++#define PSB_2D_DST_ADDR_SHIFT (2)
++#define PSB_2D_DST_ADDR_ALIGNSHIFT (2)
++
++/*
++ * Mask Surface (PSB_2D_MASK_SURF_BH)
++ */
++/*
++ * WORD 0
++ */
++#define PSB_2D_MASK_STRIDE_MASK (0x00007FFF)
++#define PSB_2D_MASK_STRIDE_CLRMASK (0xFFFF8000)
++#define PSB_2D_MASK_STRIDE_SHIFT (0)
++/*
++ * WORD 1 - Base Address
++ */
++#define PSB_2D_MASK_ADDR_MASK (0x0FFFFFFC)
++#define PSB_2D_MASK_ADDR_CLRMASK (0x00000003)
++#define PSB_2D_MASK_ADDR_SHIFT (2)
++#define PSB_2D_MASK_ADDR_ALIGNSHIFT (2)
++
++/*
++ * Source Palette (PSB_2D_SRC_PAL_BH)
++ */
++
++#define PSB_2D_SRCPAL_ADDR_SHIFT (0)
++#define PSB_2D_SRCPAL_ADDR_CLRMASK (0xF0000007)
++#define PSB_2D_SRCPAL_ADDR_MASK (0x0FFFFFF8)
++#define PSB_2D_SRCPAL_BYTEALIGN (1024)
++
++/*
++ * Pattern Palette (PSB_2D_PAT_PAL_BH)
++ */
++
++#define PSB_2D_PATPAL_ADDR_SHIFT (0)
++#define PSB_2D_PATPAL_ADDR_CLRMASK (0xF0000007)
++#define PSB_2D_PATPAL_ADDR_MASK (0x0FFFFFF8)
++#define PSB_2D_PATPAL_BYTEALIGN (1024)
++
++/*
++ * Rop3 Codes (2 LS bytes)
++ */
++
++#define PSB_2D_ROP3_SRCCOPY (0xCCCC)
++#define PSB_2D_ROP3_PATCOPY (0xF0F0)
++#define PSB_2D_ROP3_WHITENESS (0xFFFF)
++#define PSB_2D_ROP3_BLACKNESS (0x0000)
++#define PSB_2D_ROP3_SRC (0xCC)
++#define PSB_2D_ROP3_PAT (0xF0)
++#define PSB_2D_ROP3_DST (0xAA)
++
++
++/*
++ * Sizes.
++ */
++
++#define PSB_SCENE_HW_COOKIE_SIZE 16
++#define PSB_TA_MEM_HW_COOKIE_SIZE 16
++
++/*
++ * Scene stuff.
++ */
++
++#define PSB_NUM_HW_SCENES 2
++
++/*
++ * Scheduler completion actions.
++ */
++
++#define PSB_RASTER_BLOCK 0
++#define PSB_RASTER 1
++#define PSB_RETURN 2
++#define PSB_TA 3
++
++
++/*Power management*/
++#define PSB_PUNIT_PORT 0x04
++#define PSB_APMBA 0x7a
++#define PSB_APM_CMD 0x0
++#define PSB_APM_STS 0x04
++#define PSB_PWRGT_GFX_MASK 0x3
++#define PSB_PWRGT_VID_ENC_MASK 0x30
++#define PSB_PWRGT_VID_DEC_MASK 0xc
++
++#define PSB_PM_SSC 0x20
++#define PSB_PM_SSS 0x30
++#define PSB_PWRGT_DISPLAY_MASK 0xc /*on a different BA than video/gfx*/
++#endif
+diff --git a/drivers/gpu/drm/mrst/drv/psb_reset.c b/drivers/gpu/drm/mrst/drv/psb_reset.c
+new file mode 100644
+index 0000000..eba85ea
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/psb_reset.c
+@@ -0,0 +1,209 @@
++/**************************************************************************
++ * Copyright (c) 2007, Intel Corporation.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ **************************************************************************/
++
++#include <drm/drmP.h>
++#include "psb_drv.h"
++#include "psb_reg.h"
++#include "psb_intel_reg.h"
++#include "psb_msvdx.h"
++#include "lnc_topaz.h"
++#include <linux/spinlock.h>
++
++
++void psb_schedule_watchdog(struct drm_psb_private *dev_priv)
++{
++ struct timer_list *wt = &dev_priv->watchdog_timer;
++ unsigned long irq_flags;
++
++ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
++ if (dev_priv->timer_available && !timer_pending(wt)) {
++ wt->expires = jiffies + PSB_WATCHDOG_DELAY;
++ add_timer(wt);
++ }
++ spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
++}
++
++
++static void psb_watchdog_func(unsigned long data)
++{
++ struct drm_psb_private *dev_priv = (struct drm_psb_private *) data;
++ int msvdx_lockup;
++ int msvdx_idle;
++ unsigned long irq_flags;
++
++ psb_msvdx_lockup(dev_priv, &msvdx_lockup, &msvdx_idle);
++
++ if (msvdx_lockup) {
++ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
++ dev_priv->timer_available = 0;
++ spin_unlock_irqrestore(&dev_priv->watchdog_lock,
++ irq_flags);
++ if (msvdx_lockup)
++ schedule_work(&dev_priv->msvdx_watchdog_wq);
++ }
++ if (!msvdx_idle)
++ psb_schedule_watchdog(dev_priv);
++}
++
++void psb_msvdx_flush_cmd_queue(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ struct psb_msvdx_cmd_queue *msvdx_cmd;
++ struct list_head *list, *next;
++ struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
++
++ /*Flush the msvdx cmd queue and signal all fences in the queue */
++ list_for_each_safe(list, next, &msvdx_priv->msvdx_queue) {
++ msvdx_cmd =
++ list_entry(list, struct psb_msvdx_cmd_queue, head);
++ PSB_DEBUG_GENERAL("MSVDXQUE: flushing sequence:%d\n",
++ msvdx_cmd->sequence);
++ msvdx_priv->msvdx_current_sequence = msvdx_cmd->sequence;
++ psb_fence_error(dev, PSB_ENGINE_VIDEO,
++ msvdx_priv->msvdx_current_sequence,
++ _PSB_FENCE_TYPE_EXE, DRM_CMD_HANG);
++ list_del(list);
++ kfree(msvdx_cmd->cmd);
++ kfree(msvdx_cmd
++ );
++ }
++}
++
++static void psb_msvdx_reset_wq(struct work_struct *work)
++{
++ struct drm_psb_private *dev_priv =
++ container_of(work, struct drm_psb_private, msvdx_watchdog_wq);
++ struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
++
++ struct psb_scheduler *scheduler = &dev_priv->scheduler;
++ unsigned long irq_flags;
++
++ mutex_lock(&msvdx_priv->msvdx_mutex);
++ msvdx_priv->msvdx_needs_reset = 1;
++ msvdx_priv->msvdx_current_sequence++;
++ PSB_DEBUG_GENERAL
++ ("MSVDXFENCE: incremented msvdx_current_sequence to :%d\n",
++ msvdx_priv->msvdx_current_sequence);
++
++ psb_fence_error(scheduler->dev, PSB_ENGINE_VIDEO,
++ msvdx_priv->msvdx_current_sequence,
++ _PSB_FENCE_TYPE_EXE, DRM_CMD_HANG);
++
++ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
++ dev_priv->timer_available = 1;
++ spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
++
++ spin_lock_irqsave(&msvdx_priv->msvdx_lock, irq_flags);
++ psb_msvdx_flush_cmd_queue(scheduler->dev);
++ spin_unlock_irqrestore(&msvdx_priv->msvdx_lock, irq_flags);
++
++ psb_schedule_watchdog(dev_priv);
++ mutex_unlock(&msvdx_priv->msvdx_mutex);
++}
++
++void psb_watchdog_init(struct drm_psb_private *dev_priv)
++{
++ struct timer_list *wt = &dev_priv->watchdog_timer;
++ unsigned long irq_flags;
++
++ spin_lock_init(&dev_priv->watchdog_lock);
++ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
++ init_timer(wt);
++ INIT_WORK(&dev_priv->msvdx_watchdog_wq, &psb_msvdx_reset_wq);
++ wt->data = (unsigned long) dev_priv;
++ wt->function = &psb_watchdog_func;
++ dev_priv->timer_available = 1;
++ spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
++}
++
++void psb_watchdog_takedown(struct drm_psb_private *dev_priv)
++{
++ unsigned long irq_flags;
++
++ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
++ dev_priv->timer_available = 0;
++ spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
++ (void) del_timer_sync(&dev_priv->watchdog_timer);
++}
++
++static void psb_lid_timer_func(unsigned long data)
++{
++ struct drm_psb_private * dev_priv = (struct drm_psb_private *)data;
++ struct drm_device *dev = (struct drm_device *)dev_priv->dev;
++ struct timer_list *lid_timer = &dev_priv->lid_timer;
++ unsigned long irq_flags;
++ u32 *lid_state = dev_priv->lid_state;
++ u32 pp_status;
++
++ if (*lid_state == dev_priv->lid_last_state)
++ goto lid_timer_schedule;
++
++ if ((*lid_state) & 0x01) {
++ /*lid state is open*/
++ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) | POWER_TARGET_ON);
++ do {
++ pp_status = REG_READ(PP_STATUS);
++ } while ((pp_status & PP_ON) == 0);
++
++ /*FIXME: should be backlight level before*/
++ psb_intel_lvds_set_brightness(dev, 100);
++ } else {
++ psb_intel_lvds_set_brightness(dev, 0);
++
++ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) & ~POWER_TARGET_ON);
++ do {
++ pp_status = REG_READ(PP_STATUS);
++ } while ((pp_status & PP_ON) == 0);
++ }
++ /* printk(KERN_INFO"%s: lid: closed\n", __FUNCTION__); */
++
++ dev_priv->lid_last_state = *lid_state;
++
++lid_timer_schedule:
++ spin_lock_irqsave(&dev_priv->lid_lock, irq_flags);
++ if (!timer_pending(lid_timer)) {
++ lid_timer->expires = jiffies + PSB_LID_DELAY;
++ add_timer(lid_timer);
++ }
++ spin_unlock_irqrestore(&dev_priv->lid_lock, irq_flags);
++}
++
++void psb_lid_timer_init(struct drm_psb_private *dev_priv)
++{
++ struct timer_list *lid_timer = &dev_priv->lid_timer;
++ unsigned long irq_flags;
++
++ spin_lock_init(&dev_priv->lid_lock);
++ spin_lock_irqsave(&dev_priv->lid_lock, irq_flags);
++
++ init_timer(lid_timer);
++
++ lid_timer->data = (unsigned long)dev_priv;
++ lid_timer->function = psb_lid_timer_func;
++ lid_timer->expires = jiffies + PSB_LID_DELAY;
++
++ add_timer(lid_timer);
++ spin_unlock_irqrestore(&dev_priv->lid_lock, irq_flags);
++}
++
++void psb_lid_timer_takedown(struct drm_psb_private *dev_priv)
++{
++ del_timer_sync(&dev_priv->lid_timer);
++}
++
+diff --git a/drivers/gpu/drm/mrst/drv/psb_schedule.c b/drivers/gpu/drm/mrst/drv/psb_schedule.c
+new file mode 100644
+index 0000000..4e2127c
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/psb_schedule.c
+@@ -0,0 +1,70 @@
++/**************************************************************************
++ * Copyright (c) 2007, Intel Corporation.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
++ **************************************************************************/
++
++#include <drm/drmP.h>
++#include "psb_drm.h"
++#include "psb_drv.h"
++#include "psb_reg.h"
++#include "ttm/ttm_execbuf_util.h"
++
++
++static void psb_powerdown_topaz(struct work_struct *work)
++{
++ struct psb_scheduler *scheduler =
++ container_of(work, struct psb_scheduler, topaz_suspend_wq.work);
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) scheduler->dev->dev_private;
++
++ if (!dev_priv->topaz_disabled) {
++ if (!mutex_trylock(&scheduler->topaz_power_mutex))
++ return;
++
++ psb_try_power_down_topaz(scheduler->dev);
++ mutex_unlock(&scheduler->topaz_power_mutex);
++ }
++}
++
++static void psb_powerdown_msvdx(struct work_struct *work)
++{
++ struct psb_scheduler *scheduler =
++ container_of(work, struct psb_scheduler, msvdx_suspend_wq.work);
++
++ if (!mutex_trylock(&scheduler->msvdx_power_mutex))
++ return;
++
++ psb_try_power_down_msvdx(scheduler->dev);
++ mutex_unlock(&scheduler->msvdx_power_mutex);
++}
++
++int psb_scheduler_init(struct drm_device *dev,
++ struct psb_scheduler *scheduler)
++{
++ memset(scheduler, 0, sizeof(*scheduler));
++ scheduler->dev = dev;
++ mutex_init(&scheduler->topaz_power_mutex);
++ mutex_init(&scheduler->msvdx_power_mutex);
++
++ INIT_DELAYED_WORK(&scheduler->topaz_suspend_wq,
++ &psb_powerdown_topaz);
++ INIT_DELAYED_WORK(&scheduler->msvdx_suspend_wq,
++ &psb_powerdown_msvdx);
++
++ return 0;
++}
++
+diff --git a/drivers/gpu/drm/mrst/drv/psb_schedule.h b/drivers/gpu/drm/mrst/drv/psb_schedule.h
+new file mode 100644
+index 0000000..764eb29
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/psb_schedule.h
+@@ -0,0 +1,81 @@
++/**************************************************************************
++ * Copyright (c) 2007, Intel Corporation.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
++ **************************************************************************/
++#ifndef _PSB_SCHEDULE_H_
++#define _PSB_SCHEDULE_H_
++
++#include <drm/drmP.h>
++
++struct psb_context;
++
++enum psb_task_type {
++ psb_flip_task
++};
++
++struct drm_psb_private;
++
++/*struct psb_scheduler_seq {
++ uint32_t sequence;
++ int reported;
++};*/
++
++struct psb_scheduler {
++ struct drm_device *dev;
++ /*struct psb_scheduler_seq seq[_PSB_ENGINE_TA_FENCE_TYPES];
++ struct psb_hw_scene hs[PSB_NUM_HW_SCENES];
++ struct mutex task_wq_mutex;*/
++ struct mutex topaz_power_mutex;
++ struct mutex msvdx_power_mutex;
++ /*spinlock_t lock;
++ struct list_head hw_scenes;
++ struct list_head ta_queue;
++ struct list_head raster_queue;
++ struct list_head hp_raster_queue;
++ struct list_head task_done_queue;
++ struct psb_task *current_task[PSB_SCENE_NUM_ENGINES];
++ struct psb_task *feedback_task;
++ int ta_state;
++ struct psb_hw_scene *pending_hw_scene;
++ uint32_t pending_hw_scene_seq;
++ struct delayed_work wq*/;
++ struct delayed_work topaz_suspend_wq;
++ struct delayed_work msvdx_suspend_wq;
++ /*struct psb_scene_pool *pool;
++ uint32_t idle_count;
++ int idle;
++ wait_queue_head_t idle_queue;
++ unsigned long ta_end_jiffies;
++ unsigned long total_ta_jiffies;
++ unsigned long raster_end_jiffies;
++ unsigned long total_raster_jiffies;*/
++};
++
++/*#define PSB_RF_FIRE_TA (1 << 0)
++#define PSB_RF_OOM (1 << 1)
++#define PSB_RF_OOM_REPLY (1 << 2)
++#define PSB_RF_TERMINATE (1 << 3)
++#define PSB_RF_TA_DONE (1 << 4)
++#define PSB_RF_FIRE_RASTER (1 << 5)
++#define PSB_RF_RASTER_DONE (1 << 6)
++#define PSB_RF_DEALLOC (1 << 7)
++*/
++
++extern int psb_scheduler_init(struct drm_device *dev,
++ struct psb_scheduler *scheduler);
++
++#endif
+diff --git a/drivers/gpu/drm/mrst/drv/psb_setup.c b/drivers/gpu/drm/mrst/drv/psb_setup.c
+new file mode 100644
+index 0000000..7bf2dcf
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/psb_setup.c
+@@ -0,0 +1,35 @@
++/*
++ * Copyright (c) 2009, Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++
++#include <drm/drmP.h>
++#include <drm/drm.h>
++#include <drm/drm_crtc.h>
++#include <drm/drm_edid.h>
++#include "psb_intel_drv.h"
++#include "psb_drv.h"
++#include "psb_intel_reg.h"
++
++/* Fixed name */
++#define ACPI_EDID_LCD "\\_SB_.PCI0.GFX0.DD04._DDC"
++#define ACPI_DOD "\\_SB_.PCI0.GFX0._DOD"
++
++#include "psb_intel_i2c.c"
++#include "psb_intel_sdvo.c"
++#include "psb_intel_modes.c"
++#include "psb_intel_lvds.c"
++#include "psb_intel_dsi.c"
++#include "psb_intel_display.c"
+diff --git a/drivers/gpu/drm/mrst/drv/psb_sgx.c b/drivers/gpu/drm/mrst/drv/psb_sgx.c
+new file mode 100644
+index 0000000..6bc821a
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/psb_sgx.c
+@@ -0,0 +1,929 @@
++/**************************************************************************
++ * Copyright (c) 2007, Intel Corporation.
++ * All Rights Reserved.
++ * Copyright (c) 2008, Tungsten Graphics, Inc. Cedar Park, TX. USA.
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++
++#include <drm/drmP.h>
++#include "psb_drv.h"
++#include "psb_drm.h"
++#include "psb_reg.h"
++#include "psb_msvdx.h"
++#include "lnc_topaz.h"
++#include "ttm/ttm_bo_api.h"
++#include "ttm/ttm_execbuf_util.h"
++#include "ttm/ttm_userobj_api.h"
++#include "ttm/ttm_placement_common.h"
++#include "psb_sgx.h"
++#include "psb_intel_reg.h"
++#include "ospm_power.h"
++
++
++static inline int psb_same_page(unsigned long offset,
++ unsigned long offset2)
++{
++ return (offset & PAGE_MASK) == (offset2 & PAGE_MASK);
++}
++
++static inline unsigned long psb_offset_end(unsigned long offset,
++ unsigned long end)
++{
++ offset = (offset + PAGE_SIZE) & PAGE_MASK;
++ return (end < offset) ? end : offset;
++}
++
++static void psb_idle_engine(struct drm_device *dev, int engine);
++
++struct psb_dstbuf_cache {
++ unsigned int dst;
++ struct ttm_buffer_object *dst_buf;
++ unsigned long dst_offset;
++ uint32_t *dst_page;
++ unsigned int dst_page_offset;
++ struct ttm_bo_kmap_obj dst_kmap;
++ bool dst_is_iomem;
++};
++
++struct psb_validate_buffer {
++ struct ttm_validate_buffer base;
++ struct psb_validate_req req;
++ int ret;
++ struct psb_validate_arg __user *user_val_arg;
++ uint32_t flags;
++ uint32_t offset;
++ int po_correct;
++};
++
++static int psb_check_presumed(struct psb_validate_req *req,
++ struct ttm_buffer_object *bo,
++ struct psb_validate_arg __user *data,
++ int *presumed_ok)
++{
++ struct psb_validate_req __user *user_req = &(data->d.req);
++
++ *presumed_ok = 0;
++
++ if (bo->mem.mem_type == TTM_PL_SYSTEM) {
++ *presumed_ok = 1;
++ return 0;
++ }
++
++ if (unlikely(!(req->presumed_flags & PSB_USE_PRESUMED)))
++ return 0;
++
++ if (bo->offset == req->presumed_gpu_offset) {
++ *presumed_ok = 1;
++ return 0;
++ }
++
++ return __put_user(req->presumed_flags & ~PSB_USE_PRESUMED,
++ &user_req->presumed_flags);
++}
++
++
++static void psb_unreference_buffers(struct psb_context *context)
++{
++ struct ttm_validate_buffer *entry, *next;
++ struct psb_validate_buffer *vbuf;
++ struct list_head *list = &context->validate_list;
++
++ list_for_each_entry_safe(entry, next, list, head) {
++ vbuf =
++ container_of(entry, struct psb_validate_buffer, base);
++ list_del(&entry->head);
++ ttm_bo_unref(&entry->bo);
++ }
++
++ list = &context->kern_validate_list;
++
++ list_for_each_entry_safe(entry, next, list, head) {
++ vbuf =
++ container_of(entry, struct psb_validate_buffer, base);
++ list_del(&entry->head);
++ ttm_bo_unref(&entry->bo);
++ }
++}
++
++
++static int psb_lookup_validate_buffer(struct drm_file *file_priv,
++ uint64_t data,
++ struct psb_validate_buffer *item)
++{
++ struct ttm_object_file *tfile = psb_fpriv(file_priv)->tfile;
++
++ item->user_val_arg =
++ (struct psb_validate_arg __user *) (unsigned long) data;
++
++ if (unlikely(copy_from_user(&item->req, &item->user_val_arg->d.req,
++ sizeof(item->req)) != 0)) {
++ DRM_ERROR("Lookup copy fault.\n");
++ return -EFAULT;
++ }
++
++ item->base.bo =
++ ttm_buffer_object_lookup(tfile, item->req.buffer_handle);
++
++ if (unlikely(item->base.bo == NULL)) {
++ DRM_ERROR("Bo lookup fault.\n");
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++static int psb_reference_buffers(struct drm_file *file_priv,
++ uint64_t data,
++ struct psb_context *context)
++{
++ struct psb_validate_buffer *item;
++ int ret;
++
++ while (likely(data != 0)) {
++ if (unlikely(context->used_buffers >=
++ PSB_NUM_VALIDATE_BUFFERS)) {
++ DRM_ERROR("Too many buffers "
++ "on validate list.\n");
++ ret = -EINVAL;
++ goto out_err0;
++ }
++
++ item = &context->buffers[context->used_buffers];
++
++ ret = psb_lookup_validate_buffer(file_priv, data, item);
++ if (unlikely(ret != 0))
++ goto out_err0;
++
++ item->base.reserved = 0;
++ list_add_tail(&item->base.head, &context->validate_list);
++ context->used_buffers++;
++ data = item->req.next;
++ }
++ return 0;
++
++out_err0:
++ psb_unreference_buffers(context);
++ return ret;
++}
++
++static int
++psb_placement_fence_type(struct ttm_buffer_object *bo,
++ uint64_t set_val_flags,
++ uint64_t clr_val_flags,
++ uint32_t new_fence_class,
++ uint32_t *new_fence_type)
++{
++ int ret;
++ uint32_t n_fence_type;
++ uint32_t set_flags = set_val_flags & 0xFFFFFFFF;
++ uint32_t clr_flags = clr_val_flags & 0xFFFFFFFF;
++ struct ttm_fence_object *old_fence;
++ uint32_t old_fence_type;
++
++ if (unlikely
++ (!(set_val_flags &
++ (PSB_GPU_ACCESS_READ | PSB_GPU_ACCESS_WRITE)))) {
++ DRM_ERROR
++ ("GPU access type (read / write) is not indicated.\n");
++ return -EINVAL;
++ }
++
++ ret = ttm_bo_check_placement(bo, set_flags, clr_flags);
++ if (unlikely(ret != 0))
++ return ret;
++
++ switch (new_fence_class) {
++ default:
++ n_fence_type = _PSB_FENCE_TYPE_EXE;
++ }
++
++ *new_fence_type = n_fence_type;
++ old_fence = (struct ttm_fence_object *) bo->sync_obj;
++ old_fence_type = (uint32_t) (unsigned long) bo->sync_obj_arg;
++
++ if (old_fence && ((new_fence_class != old_fence->fence_class) ||
++ ((n_fence_type ^ old_fence_type) &
++ old_fence_type))) {
++ ret = ttm_bo_wait(bo, 0, 1, 0);
++ if (unlikely(ret != 0))
++ return ret;
++ }
++
++ bo->proposed_flags = (bo->proposed_flags | set_flags)
++ & ~clr_flags & TTM_PL_MASK_MEMTYPE;
++
++ return 0;
++}
++
++int psb_validate_kernel_buffer(struct psb_context *context,
++ struct ttm_buffer_object *bo,
++ uint32_t fence_class,
++ uint64_t set_flags, uint64_t clr_flags)
++{
++ struct psb_validate_buffer *item;
++ uint32_t cur_fence_type;
++ int ret;
++
++ if (unlikely(context->used_buffers >= PSB_NUM_VALIDATE_BUFFERS)) {
++ DRM_ERROR("Out of free validation buffer entries for "
++ "kernel buffer validation.\n");
++ return -ENOMEM;
++ }
++
++ item = &context->buffers[context->used_buffers];
++ item->user_val_arg = NULL;
++ item->base.reserved = 0;
++
++ ret = ttm_bo_reserve(bo, 1, 0, 1, context->val_seq);
++ if (unlikely(ret != 0))
++ goto out_unlock;
++
++ mutex_lock(&bo->mutex);
++ ret = psb_placement_fence_type(bo, set_flags, clr_flags, fence_class,
++ &cur_fence_type);
++ if (unlikely(ret != 0)) {
++ ttm_bo_unreserve(bo);
++ goto out_unlock;
++ }
++
++ item->base.bo = ttm_bo_reference(bo);
++ item->base.new_sync_obj_arg = (void *) (unsigned long) cur_fence_type;
++ item->base.reserved = 1;
++
++ list_add_tail(&item->base.head, &context->kern_validate_list);
++ context->used_buffers++;
++
++ ret = ttm_buffer_object_validate(bo, 1, 0);
++ if (unlikely(ret != 0))
++ goto out_unlock;
++
++ item->offset = bo->offset;
++ item->flags = bo->mem.flags;
++ context->fence_types |= cur_fence_type;
++
++out_unlock:
++ mutex_unlock(&bo->mutex);
++ return ret;
++}
++
++
++static int psb_validate_buffer_list(struct drm_file *file_priv,
++ uint32_t fence_class,
++ struct psb_context *context,
++ int *po_correct)
++{
++ struct psb_validate_buffer *item;
++ struct ttm_buffer_object *bo;
++ int ret;
++ struct psb_validate_req *req;
++ uint32_t fence_types = 0;
++ uint32_t cur_fence_type;
++ struct ttm_validate_buffer *entry;
++ struct list_head *list = &context->validate_list;
++
++ *po_correct = 1;
++
++ list_for_each_entry(entry, list, head) {
++ item =
++ container_of(entry, struct psb_validate_buffer, base);
++ bo = entry->bo;
++ item->ret = 0;
++ req = &item->req;
++
++ mutex_lock(&bo->mutex);
++ ret = psb_placement_fence_type(bo,
++ req->set_flags,
++ req->clear_flags,
++ fence_class,
++ &cur_fence_type);
++ if (unlikely(ret != 0))
++ goto out_err;
++
++ ret = ttm_buffer_object_validate(bo, 1, 0);
++
++ if (unlikely(ret != 0))
++ goto out_err;
++
++ fence_types |= cur_fence_type;
++ entry->new_sync_obj_arg = (void *)
++ (unsigned long) cur_fence_type;
++
++ item->offset = bo->offset;
++ item->flags = bo->mem.flags;
++ mutex_unlock(&bo->mutex);
++
++ ret =
++ psb_check_presumed(&item->req, bo, item->user_val_arg,
++ &item->po_correct);
++ if (unlikely(ret != 0))
++ goto out_err;
++
++ if (unlikely(!item->po_correct))
++ *po_correct = 0;
++
++ item++;
++ }
++
++ context->fence_types |= fence_types;
++
++ return 0;
++out_err:
++ mutex_unlock(&bo->mutex);
++ item->ret = ret;
++ return ret;
++}
++
++static void psb_clear_dstbuf_cache(struct psb_dstbuf_cache *dst_cache)
++{
++ if (dst_cache->dst_page) {
++ ttm_bo_kunmap(&dst_cache->dst_kmap);
++ dst_cache->dst_page = NULL;
++ }
++ dst_cache->dst_buf = NULL;
++ dst_cache->dst = ~0;
++}
++
++static int psb_update_dstbuf_cache(struct psb_dstbuf_cache *dst_cache,
++ struct psb_validate_buffer *buffers,
++ unsigned int dst,
++ unsigned long dst_offset)
++{
++ int ret;
++
++ PSB_DEBUG_GENERAL("Destination buffer is %d.\n", dst);
++
++ if (unlikely(dst != dst_cache->dst || NULL == dst_cache->dst_buf)) {
++ psb_clear_dstbuf_cache(dst_cache);
++ dst_cache->dst = dst;
++ dst_cache->dst_buf = buffers[dst].base.bo;
++ }
++
++ if (unlikely
++ (dst_offset > dst_cache->dst_buf->num_pages * PAGE_SIZE)) {
++ DRM_ERROR("Relocation destination out of bounds.\n");
++ return -EINVAL;
++ }
++
++ if (!psb_same_page(dst_cache->dst_offset, dst_offset) ||
++ NULL == dst_cache->dst_page) {
++ if (NULL != dst_cache->dst_page) {
++ ttm_bo_kunmap(&dst_cache->dst_kmap);
++ dst_cache->dst_page = NULL;
++ }
++
++ ret =
++ ttm_bo_kmap(dst_cache->dst_buf,
++ dst_offset >> PAGE_SHIFT, 1,
++ &dst_cache->dst_kmap);
++ if (ret) {
++ DRM_ERROR("Could not map destination buffer for "
++ "relocation.\n");
++ return ret;
++ }
++
++ dst_cache->dst_page =
++ ttm_kmap_obj_virtual(&dst_cache->dst_kmap,
++ &dst_cache->dst_is_iomem);
++ dst_cache->dst_offset = dst_offset & PAGE_MASK;
++ dst_cache->dst_page_offset = dst_cache->dst_offset >> 2;
++ }
++ return 0;
++}
++
++static int psb_apply_reloc(struct drm_psb_private *dev_priv,
++ uint32_t fence_class,
++ const struct drm_psb_reloc *reloc,
++ struct psb_validate_buffer *buffers,
++ int num_buffers,
++ struct psb_dstbuf_cache *dst_cache,
++ int no_wait, int interruptible)
++{
++ uint32_t val;
++ uint32_t background;
++ unsigned int index;
++ int ret;
++ unsigned int shift;
++ unsigned int align_shift;
++ struct ttm_buffer_object *reloc_bo;
++
++
++ PSB_DEBUG_GENERAL("Reloc type %d\n"
++ "\t where 0x%04x\n"
++ "\t buffer 0x%04x\n"
++ "\t mask 0x%08x\n"
++ "\t shift 0x%08x\n"
++ "\t pre_add 0x%08x\n"
++ "\t background 0x%08x\n"
++ "\t dst_buffer 0x%08x\n"
++ "\t arg0 0x%08x\n"
++ "\t arg1 0x%08x\n",
++ reloc->reloc_op,
++ reloc->where,
++ reloc->buffer,
++ reloc->mask,
++ reloc->shift,
++ reloc->pre_add,
++ reloc->background,
++ reloc->dst_buffer, reloc->arg0, reloc->arg1);
++
++ if (unlikely(reloc->buffer >= num_buffers)) {
++ DRM_ERROR("Illegal relocation buffer %d.\n",
++ reloc->buffer);
++ return -EINVAL;
++ }
++
++ if (buffers[reloc->buffer].po_correct)
++ return 0;
++
++ if (unlikely(reloc->dst_buffer >= num_buffers)) {
++ DRM_ERROR
++ ("Illegal destination buffer for relocation %d.\n",
++ reloc->dst_buffer);
++ return -EINVAL;
++ }
++
++ ret =
++ psb_update_dstbuf_cache(dst_cache, buffers, reloc->dst_buffer,
++ reloc->where << 2);
++ if (ret)
++ return ret;
++
++ reloc_bo = buffers[reloc->buffer].base.bo;
++
++ if (unlikely(reloc->pre_add > (reloc_bo->num_pages << PAGE_SHIFT))) {
++ DRM_ERROR("Illegal relocation offset add.\n");
++ return -EINVAL;
++ }
++
++ switch (reloc->reloc_op) {
++ case PSB_RELOC_OP_OFFSET:
++ val = reloc_bo->offset + reloc->pre_add;
++ break;
++ default:
++ DRM_ERROR("Unimplemented relocation.\n");
++ return -EINVAL;
++ }
++
++ shift =
++ (reloc->shift & PSB_RELOC_SHIFT_MASK) >> PSB_RELOC_SHIFT_SHIFT;
++ align_shift =
++ (reloc->
++ shift & PSB_RELOC_ALSHIFT_MASK) >> PSB_RELOC_ALSHIFT_SHIFT;
++
++ val = ((val >> align_shift) << shift);
++ index = reloc->where - dst_cache->dst_page_offset;
++
++ background = reloc->background;
++ val = (background & ~reloc->mask) | (val & reloc->mask);
++ dst_cache->dst_page[index] = val;
++
++ PSB_DEBUG_GENERAL("Reloc buffer %d index 0x%08x, value 0x%08x\n",
++ reloc->dst_buffer, index,
++ dst_cache->dst_page[index]);
++
++ return 0;
++}
++
++static int psb_ok_to_map_reloc(struct drm_psb_private *dev_priv,
++ unsigned int num_pages)
++{
++ int ret = 0;
++
++ spin_lock(&dev_priv->reloc_lock);
++ if (dev_priv->rel_mapped_pages + num_pages <= PSB_MAX_RELOC_PAGES) {
++ dev_priv->rel_mapped_pages += num_pages;
++ ret = 1;
++ }
++ spin_unlock(&dev_priv->reloc_lock);
++ return ret;
++}
++
++static int psb_fixup_relocs(struct drm_file *file_priv,
++ uint32_t fence_class,
++ unsigned int num_relocs,
++ unsigned int reloc_offset,
++ uint32_t reloc_handle,
++ struct psb_context *context,
++ int no_wait, int interruptible)
++{
++ struct drm_device *dev = file_priv->minor->dev;
++ struct ttm_object_file *tfile = psb_fpriv(file_priv)->tfile;
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++ struct ttm_buffer_object *reloc_buffer = NULL;
++ unsigned int reloc_num_pages;
++ unsigned int reloc_first_page;
++ unsigned int reloc_last_page;
++ struct psb_dstbuf_cache dst_cache;
++ struct drm_psb_reloc *reloc;
++ struct ttm_bo_kmap_obj reloc_kmap;
++ bool reloc_is_iomem;
++ int count;
++ int ret = 0;
++ int registered = 0;
++ uint32_t num_buffers = context->used_buffers;
++
++ if (num_relocs == 0)
++ return 0;
++
++ memset(&dst_cache, 0, sizeof(dst_cache));
++ memset(&reloc_kmap, 0, sizeof(reloc_kmap));
++
++ reloc_buffer = ttm_buffer_object_lookup(tfile, reloc_handle);
++ if (!reloc_buffer)
++ goto out;
++
++ if (unlikely(atomic_read(&reloc_buffer->reserved) != 1)) {
++ DRM_ERROR("Relocation buffer was not on validate list.\n");
++ ret = -EINVAL;
++ goto out;
++ }
++
++ reloc_first_page = reloc_offset >> PAGE_SHIFT;
++ reloc_last_page =
++ (reloc_offset +
++ num_relocs * sizeof(struct drm_psb_reloc)) >> PAGE_SHIFT;
++ reloc_num_pages = reloc_last_page - reloc_first_page + 1;
++ reloc_offset &= ~PAGE_MASK;
++
++ if (reloc_num_pages > PSB_MAX_RELOC_PAGES) {
++ DRM_ERROR("Relocation buffer is too large\n");
++ ret = -EINVAL;
++ goto out;
++ }
++
++ DRM_WAIT_ON(ret, dev_priv->rel_mapped_queue, 3 * DRM_HZ,
++ (registered =
++ psb_ok_to_map_reloc(dev_priv, reloc_num_pages)));
++
++ if (ret == -EINTR) {
++ ret = -ERESTART;
++ goto out;
++ }
++ if (ret) {
++ DRM_ERROR("Error waiting for space to map "
++ "relocation buffer.\n");
++ goto out;
++ }
++
++ ret = ttm_bo_kmap(reloc_buffer, reloc_first_page,
++ reloc_num_pages, &reloc_kmap);
++
++ if (ret) {
++ DRM_ERROR("Could not map relocation buffer.\n"
++ "\tReloc buffer id 0x%08x.\n"
++ "\tReloc first page %d.\n"
++ "\tReloc num pages %d.\n",
++ reloc_handle, reloc_first_page, reloc_num_pages);
++ goto out;
++ }
++
++ reloc = (struct drm_psb_reloc *)
++ ((unsigned long)
++ ttm_kmap_obj_virtual(&reloc_kmap,
++ &reloc_is_iomem) + reloc_offset);
++
++ for (count = 0; count < num_relocs; ++count) {
++ ret = psb_apply_reloc(dev_priv, fence_class,
++ reloc, context->buffers,
++ num_buffers, &dst_cache,
++ no_wait, interruptible);
++ if (ret)
++ goto out1;
++ reloc++;
++ }
++
++out1:
++ ttm_bo_kunmap(&reloc_kmap);
++out:
++ if (registered) {
++ spin_lock(&dev_priv->reloc_lock);
++ dev_priv->rel_mapped_pages -= reloc_num_pages;
++ spin_unlock(&dev_priv->reloc_lock);
++ DRM_WAKEUP(&dev_priv->rel_mapped_queue);
++ }
++
++ psb_clear_dstbuf_cache(&dst_cache);
++ if (reloc_buffer)
++ ttm_bo_unref(&reloc_buffer);
++ return ret;
++}
++
++void psb_fence_or_sync(struct drm_file *file_priv,
++ uint32_t engine,
++ uint32_t fence_types,
++ uint32_t fence_flags,
++ struct list_head *list,
++ struct psb_ttm_fence_rep *fence_arg,
++ struct ttm_fence_object **fence_p)
++{
++ struct drm_device *dev = file_priv->minor->dev;
++ struct drm_psb_private *dev_priv = psb_priv(dev);
++ struct ttm_fence_device *fdev = &dev_priv->fdev;
++ int ret;
++ struct ttm_fence_object *fence;
++ struct ttm_object_file *tfile = psb_fpriv(file_priv)->tfile;
++ uint32_t handle;
++
++ ret = ttm_fence_user_create(fdev, tfile,
++ engine, fence_types,
++ TTM_FENCE_FLAG_EMIT, &fence, &handle);
++ if (ret) {
++
++ /*
++ * Fence creation failed.
++ * Fall back to synchronous operation and idle the engine.
++ */
++
++ psb_idle_engine(dev, engine);
++ if (!(fence_flags & DRM_PSB_FENCE_NO_USER)) {
++
++ /*
++ * Communicate to user-space that
++ * fence creation has failed and that
++ * the engine is idle.
++ */
++
++ fence_arg->handle = ~0;
++ fence_arg->error = ret;
++ }
++
++ ttm_eu_backoff_reservation(list);
++ if (fence_p)
++ *fence_p = NULL;
++ return;
++ }
++
++ ttm_eu_fence_buffer_objects(list, fence);
++ if (!(fence_flags & DRM_PSB_FENCE_NO_USER)) {
++ struct ttm_fence_info info = ttm_fence_get_info(fence);
++ fence_arg->handle = handle;
++ fence_arg->fence_class = ttm_fence_class(fence);
++ fence_arg->fence_type = ttm_fence_types(fence);
++ fence_arg->signaled_types = info.signaled_types;
++ fence_arg->error = 0;
++ } else {
++ ret =
++ ttm_ref_object_base_unref(tfile, handle,
++ ttm_fence_type);
++ BUG_ON(ret);
++ }
++
++ if (fence_p)
++ *fence_p = fence;
++ else if (fence)
++ ttm_fence_object_unref(&fence);
++}
++
++
++#if 0
++static int psb_dump_page(struct ttm_buffer_object *bo,
++ unsigned int page_offset, unsigned int num)
++{
++ struct ttm_bo_kmap_obj kmobj;
++ int is_iomem;
++ uint32_t *p;
++ int ret;
++ unsigned int i;
++
++ ret = ttm_bo_kmap(bo, page_offset, 1, &kmobj);
++ if (ret)
++ return ret;
++
++ p = ttm_kmap_obj_virtual(&kmobj, &is_iomem);
++ for (i = 0; i < num; ++i)
++ PSB_DEBUG_GENERAL("0x%04x: 0x%08x\n", i, *p++);
++
++ ttm_bo_kunmap(&kmobj);
++ return 0;
++}
++#endif
++
++static void psb_idle_engine(struct drm_device *dev, int engine)
++{
++ /*Fix me add video engile support*/
++ return;
++}
++
++static int psb_handle_copyback(struct drm_device *dev,
++ struct psb_context *context,
++ int ret)
++{
++ int err = ret;
++ struct ttm_validate_buffer *entry;
++ struct psb_validate_arg arg;
++ struct list_head *list = &context->validate_list;
++
++ if (ret) {
++ ttm_eu_backoff_reservation(list);
++ ttm_eu_backoff_reservation(&context->kern_validate_list);
++ }
++
++
++ if (ret != -EAGAIN && ret != -EINTR && ret != -ERESTART) {
++ list_for_each_entry(entry, list, head) {
++ struct psb_validate_buffer *vbuf =
++ container_of(entry, struct psb_validate_buffer,
++ base);
++ arg.handled = 1;
++ arg.ret = vbuf->ret;
++ if (!arg.ret) {
++ struct ttm_buffer_object *bo = entry->bo;
++ mutex_lock(&bo->mutex);
++ arg.d.rep.gpu_offset = bo->offset;
++ arg.d.rep.placement = bo->mem.flags;
++ arg.d.rep.fence_type_mask =
++ (uint32_t) (unsigned long)
++ entry->new_sync_obj_arg;
++ mutex_unlock(&bo->mutex);
++ }
++
++ if (__copy_to_user(vbuf->user_val_arg,
++ &arg, sizeof(arg)))
++ err = -EFAULT;
++
++ if (arg.ret)
++ break;
++ }
++ }
++
++ return err;
++}
++
++int psb_cmdbuf_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ struct drm_psb_cmdbuf_arg *arg = data;
++ int ret = 0;
++ struct ttm_object_file *tfile = psb_fpriv(file_priv)->tfile;
++ struct ttm_buffer_object *cmd_buffer = NULL;
++ struct psb_ttm_fence_rep fence_arg;
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)file_priv->minor->dev->dev_private;
++ int engine;
++ int po_correct;
++ struct psb_context *context;
++ unsigned num_buffers;
++
++ num_buffers = PSB_NUM_VALIDATE_BUFFERS;
++
++ ret = ttm_read_lock(&dev_priv->ttm_lock, true);
++ if (unlikely(ret != 0))
++ return ret;
++
++ if (arg->engine == PSB_ENGINE_VIDEO) {
++ if (!ospm_power_using_hw_begin(OSPM_VIDEO_DEC_ISLAND,
++ OSPM_UHB_FORCE_POWER_ON))
++ return -EBUSY;
++ } else if (arg->engine == LNC_ENGINE_ENCODE) {
++ if (dev_priv->topaz_disabled)
++ return -ENODEV;
++
++ if (!ospm_power_using_hw_begin(OSPM_VIDEO_ENC_ISLAND,
++ OSPM_UHB_FORCE_POWER_ON))
++ return -EBUSY;
++ }
++
++
++ ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
++ if (unlikely(ret != 0))
++ goto out_err0;
++
++
++ context = &dev_priv->context;
++ context->used_buffers = 0;
++ context->fence_types = 0;
++ BUG_ON(!list_empty(&context->validate_list));
++ BUG_ON(!list_empty(&context->kern_validate_list));
++
++ if (unlikely(context->buffers == NULL)) {
++ context->buffers = vmalloc(PSB_NUM_VALIDATE_BUFFERS *
++ sizeof(*context->buffers));
++ if (unlikely(context->buffers == NULL)) {
++ ret = -ENOMEM;
++ goto out_err1;
++ }
++ }
++
++ ret = psb_reference_buffers(file_priv,
++ arg->buffer_list,
++ context);
++
++ if (unlikely(ret != 0))
++ goto out_err1;
++
++ context->val_seq = atomic_add_return(1, &dev_priv->val_seq);
++
++ ret = ttm_eu_reserve_buffers(&context->validate_list,
++ context->val_seq);
++ if (unlikely(ret != 0))
++ goto out_err2;
++
++ engine = arg->engine;
++ ret = psb_validate_buffer_list(file_priv, engine,
++ context, &po_correct);
++ if (unlikely(ret != 0))
++ goto out_err3;
++
++ if (!po_correct) {
++ ret = psb_fixup_relocs(file_priv, engine, arg->num_relocs,
++ arg->reloc_offset,
++ arg->reloc_handle, context, 0, 1);
++ if (unlikely(ret != 0))
++ goto out_err3;
++
++ }
++
++ cmd_buffer = ttm_buffer_object_lookup(tfile, arg->cmdbuf_handle);
++ if (unlikely(cmd_buffer == NULL)) {
++ ret = -EINVAL;
++ goto out_err4;
++ }
++
++ switch (arg->engine) {
++ case PSB_ENGINE_VIDEO:
++ if (arg->cmdbuf_size == (16 + 32)) {
++ /* Identify deblock msg cmdbuf */
++ /* according to cmdbuf_size */
++ struct ttm_bo_kmap_obj cmd_kmap;
++ struct ttm_buffer_object *deblock;
++ uint32_t *cmd;
++ bool is_iomem;
++
++ /* write regIO BO's address after deblcok msg */
++ ret = ttm_bo_kmap(cmd_buffer, 0, 1, &cmd_kmap);
++ if (unlikely(ret != 0))
++ goto out_err4;
++ cmd = (uint32_t *)(ttm_kmap_obj_virtual(&cmd_kmap,
++ &is_iomem) + 16);
++ deblock = ttm_buffer_object_lookup(tfile,
++ (uint32_t)(*cmd));
++ *cmd = (uint32_t)deblock;
++ ttm_bo_kunmap(&cmd_kmap);
++ }
++
++ ret = psb_cmdbuf_video(file_priv, &context->validate_list,
++ context->fence_types, arg,
++ cmd_buffer, &fence_arg);
++
++ if (unlikely(ret != 0))
++ goto out_err4;
++ break;
++ case LNC_ENGINE_ENCODE:
++ ret = lnc_cmdbuf_video(file_priv, &context->validate_list,
++ context->fence_types, arg,
++ cmd_buffer, &fence_arg);
++ if (unlikely(ret != 0))
++ goto out_err4;
++ break;
++
++
++ default:
++ DRM_ERROR
++ ("Unimplemented command submission mechanism (%x).\n",
++ arg->engine);
++ ret = -EINVAL;
++ goto out_err4;
++ }
++
++ if (!(arg->fence_flags & DRM_PSB_FENCE_NO_USER)) {
++ ret = copy_to_user((void __user *)
++ ((unsigned long) arg->fence_arg),
++ &fence_arg, sizeof(fence_arg));
++ }
++
++out_err4:
++ if (cmd_buffer)
++ ttm_bo_unref(&cmd_buffer);
++out_err3:
++ ret = psb_handle_copyback(dev, context, ret);
++out_err2:
++ psb_unreference_buffers(context);
++out_err1:
++ mutex_unlock(&dev_priv->cmdbuf_mutex);
++out_err0:
++ ttm_read_unlock(&dev_priv->ttm_lock);
++
++ if (arg->engine == PSB_ENGINE_VIDEO)
++ ospm_power_using_hw_end(OSPM_VIDEO_DEC_ISLAND);
++
++ if (arg->engine == LNC_ENGINE_ENCODE)
++ ospm_power_using_hw_end(OSPM_VIDEO_ENC_ISLAND);
++
++ return ret;
++}
++
+diff --git a/drivers/gpu/drm/mrst/drv/psb_sgx.h b/drivers/gpu/drm/mrst/drv/psb_sgx.h
+new file mode 100644
+index 0000000..2934e5d
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/psb_sgx.h
+@@ -0,0 +1,32 @@
++/*
++ * Copyright (c) 2008, Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors:
++ * Eric Anholt <eric@anholt.net>
++ *
++ **/
++#ifndef _PSB_SGX_H_
++#define _PSB_SGX_H_
++
++extern int psb_submit_video_cmdbuf(struct drm_device *dev,
++ struct ttm_buffer_object *cmd_buffer,
++ unsigned long cmd_offset,
++ unsigned long cmd_size,
++ struct ttm_fence_object *fence);
++
++extern int drm_idle_check_interval;
++
++#endif
+diff --git a/drivers/gpu/drm/mrst/drv/psb_socket.c b/drivers/gpu/drm/mrst/drv/psb_socket.c
+new file mode 100644
+index 0000000..8bb12cf
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/psb_socket.c
+@@ -0,0 +1,376 @@
++/*
++ * Copyright (c) 2009, Intel Corporation.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Copyright (C) 2004 Red Hat, Inc. All rights reserved.
++ * Copyright (C) 2004 Novell, Inc. All rights reserved.
++ * Copyright (C) 2004 IBM, Inc. All rights reserved.
++ * Copyright (C) 2009 Intel Corporation. All rights reserved.
++ *
++ * Licensed under the GNU GPL v2.
++ *
++ * Authors:
++ * Robert Love <rml@novell.com>
++ * Kay Sievers <kay.sievers@vrfy.org>
++ * Arjan van de Ven <arjanv@redhat.com>
++ * Greg Kroah-Hartman <greg@kroah.com>
++ *
++ * Notes:
++ * Adapted from existing kobj event socket code to enable
++ * mutlicast usermode communication for gfx driver to mutiple
++ * usermode threads via different socket broadcast groups.
++ * Original kobject uevent code does not allow for different
++ * broadcast groups. Due to the frequency of usermode events
++ * generated by some gfx subsystems it is necessary to open
++ * a new dedicated socket with multicast group support. In
++ * the future it is hoped that this code can be removed
++ * and either a new netlink protocol type added for graphics
++ * or conversely to simply enable group routing to be leveraged
++ * on the existing kobject uevent infrastructure.
++ */
++
++#include <linux/spinlock.h>
++#include <linux/string.h>
++#include <linux/kobject.h>
++#include <linux/module.h>
++#include <linux/socket.h>
++#include <linux/skbuff.h>
++#include <linux/netlink.h>
++#include <net/sock.h>
++#include "psb_umevents.h"
++
++#define NETLINK_PSB_KOBJECT_UEVENT 31
++
++u64 psb_uevent_seqnum;
++char psb_uevent_helper[UEVENT_HELPER_PATH_LEN] = CONFIG_UEVENT_HELPER_PATH;
++static DEFINE_SPINLOCK(sequence_lock);
++#if defined(CONFIG_NET)
++static struct sock *uevent_sock;
++#endif
++
++/* the strings here must match the enum in include/linux/kobject.h */
++static const char *psb_kobject_actions[] = {
++ [KOBJ_ADD] = "add",
++ [KOBJ_REMOVE] = "remove",
++ [KOBJ_CHANGE] = "change",
++ [KOBJ_MOVE] = "move",
++ [KOBJ_ONLINE] = "online",
++ [KOBJ_OFFLINE] = "offline",
++};
++
++/**
++ * kobject_action_type - translate action string to numeric type
++ *
++ * @buf: buffer containing the action string, newline is ignored
++ * @len: length of buffer
++ * @type: pointer to the location to store the action type
++ *
++ * Returns 0 if the action string was recognized.
++ */
++int psb_kobject_action_type(const char *buf, size_t count,
++ enum kobject_action *type)
++{
++ enum kobject_action action;
++ int ret = -EINVAL;
++
++ if (count && (buf[count-1] == '\n' || buf[count-1] == '\0'))
++ count--;
++
++ if (!count)
++ goto out;
++
++ for (action = 0; action < ARRAY_SIZE(psb_kobject_actions); action++) {
++ if (strncmp(psb_kobject_actions[action], buf, count) != 0)
++ continue;
++ if (psb_kobject_actions[action][count] != '\0')
++ continue;
++ *type = action;
++ ret = 0;
++ break;
++ }
++out:
++ return ret;
++}
++
++/**
++ * psb_kobject_uevent_env - send an uevent with environmental data
++ *
++ * @action: action that is happening
++ * @kobj: struct kobject that the action is happening to
++ * @envp_ext: pointer to environmental data
++ *
++ * Returns 0 if kobject_uevent() is completed with success or the
++ * corresponding error when it fails.
++ */
++int psb_kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
++ char *envp_ext[], int dst_group_id)
++{
++ struct kobj_uevent_env *env;
++ const char *action_string = psb_kobject_actions[action];
++ const char *devpath = NULL;
++ const char *subsystem;
++ struct kobject *top_kobj;
++ struct kset *kset;
++ struct kset_uevent_ops *uevent_ops;
++ u64 seq;
++ int i = 0;
++ int retval = 0;
++
++ pr_debug("kobject: '%s' (%p): %s\n",
++ kobject_name(kobj), kobj, __func__);
++
++ /* search the kset we belong to */
++ top_kobj = kobj;
++ while (!top_kobj->kset && top_kobj->parent)
++ top_kobj = top_kobj->parent;
++
++ if (!top_kobj->kset) {
++ pr_debug("kobject: '%s' (%p): %s: attempted to send uevent "
++ "without kset!\n", kobject_name(kobj), kobj,
++ __func__);
++ return -EINVAL;
++ }
++
++ kset = top_kobj->kset;
++ uevent_ops = kset->uevent_ops;
++
++ /* skip the event, if uevent_suppress is set*/
++ if (kobj->uevent_suppress) {
++ pr_debug("kobject: '%s' (%p): %s: uevent_suppress "
++ "caused the event to drop!\n",
++ kobject_name(kobj), kobj, __func__);
++ return 0;
++ }
++ /* skip the event, if the filter returns zero. */
++ if (uevent_ops && uevent_ops->filter)
++ if (!uevent_ops->filter(kset, kobj)) {
++ pr_debug("kobject: '%s' (%p): %s: filter function "
++ "caused the event to drop!\n",
++ kobject_name(kobj), kobj, __func__);
++ return 0;
++ }
++
++ /* originating subsystem */
++ if (uevent_ops && uevent_ops->name)
++ subsystem = uevent_ops->name(kset, kobj);
++ else
++ subsystem = kobject_name(&kset->kobj);
++ if (!subsystem) {
++ pr_debug("kobject: '%s' (%p): %s: unset subsystem caused the "
++ "event to drop!\n", kobject_name(kobj), kobj,
++ __func__);
++ return 0;
++ }
++
++ /* environment buffer */
++ env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL);
++ if (!env)
++ return -ENOMEM;
++
++ /* complete object path */
++ devpath = kobject_get_path(kobj, GFP_KERNEL);
++ if (!devpath) {
++ retval = -ENOENT;
++ goto exit;
++ }
++
++ /* default keys */
++ retval = add_uevent_var(env, "ACTION=%s", action_string);
++ if (retval)
++ goto exit;
++ retval = add_uevent_var(env, "DEVPATH=%s", devpath);
++ if (retval)
++ goto exit;
++ retval = add_uevent_var(env, "SUBSYSTEM=%s", subsystem);
++ if (retval)
++ goto exit;
++
++ /* keys passed in from the caller */
++ if (envp_ext) {
++ for (i = 0; envp_ext[i]; i++) {
++ retval = add_uevent_var(env, "%s", envp_ext[i]);
++ if (retval)
++ goto exit;
++ }
++ }
++
++ /* let the kset specific function add its stuff */
++ if (uevent_ops && uevent_ops->uevent) {
++ retval = uevent_ops->uevent(kset, kobj, env);
++ if (retval) {
++ pr_debug("kobject: '%s' (%p): %s: uevent() returned "
++ "%d\n", kobject_name(kobj), kobj,
++ __func__, retval);
++ goto exit;
++ }
++ }
++
++ /*
++ * Mark "add" and "remove" events in the object to ensure proper
++ * events to userspace during automatic cleanup. If the object did
++ * send an "add" event, "remove" will automatically generated by
++ * the core, if not already done by the caller.
++ */
++ if (action == KOBJ_ADD)
++ kobj->state_add_uevent_sent = 1;
++ else if (action == KOBJ_REMOVE)
++ kobj->state_remove_uevent_sent = 1;
++
++ /* we will send an event, so request a new sequence number */
++ spin_lock(&sequence_lock);
++ seq = ++psb_uevent_seqnum;
++ spin_unlock(&sequence_lock);
++ retval = add_uevent_var(env, "SEQNUM=%llu", (unsigned long long)seq);
++ if (retval)
++ goto exit;
++
++#if defined(CONFIG_NET)
++ /* send netlink message */
++ if (uevent_sock) {
++ struct sk_buff *skb;
++ size_t len;
++
++ /* allocate message with the maximum possible size */
++ len = strlen(action_string) + strlen(devpath) + 2;
++ skb = alloc_skb(len + env->buflen, GFP_KERNEL);
++ if (skb) {
++ char *scratch;
++
++ /* add header */
++ scratch = skb_put(skb, len);
++ sprintf(scratch, "%s@%s", action_string, devpath);
++
++ /* copy keys to our continuous event payload buffer */
++ for (i = 0; i < env->envp_idx; i++) {
++ len = strlen(env->envp[i]) + 1;
++ scratch = skb_put(skb, len);
++ strcpy(scratch, env->envp[i]);
++ }
++
++ NETLINK_CB(skb).dst_group = dst_group_id;
++ retval = netlink_broadcast(uevent_sock, skb, 0,
++ dst_group_id,
++ GFP_KERNEL);
++
++ /* ENOBUFS should be handled in userspace */
++ if (retval == -ENOBUFS)
++ retval = 0;
++ } else
++ retval = -ENOMEM;
++ }
++#endif
++
++ /* call psb_uevent_helper, usually only enabled during early boot */
++ if (psb_uevent_helper[0]) {
++ char *argv[3];
++
++ argv[0] = psb_uevent_helper;
++ argv[1] = (char *)subsystem;
++ argv[2] = NULL;
++ retval = add_uevent_var(env, "HOME=/");
++ if (retval)
++ goto exit;
++ retval = add_uevent_var(env,
++ "PATH=/sbin:/bin:/usr/sbin:/usr/bin");
++ if (retval)
++ goto exit;
++
++ retval = call_usermodehelper(argv[0], argv,
++ env->envp, UMH_WAIT_EXEC);
++ }
++
++exit:
++ kfree(devpath);
++ kfree(env);
++ return retval;
++}
++EXPORT_SYMBOL_GPL(psb_kobject_uevent_env);
++
++/**
++ * psb_kobject_uevent - notify userspace by ending an uevent
++ *
++ * @action: action that is happening
++ * @kobj: struct kobject that the action is happening to
++ *
++ * Returns 0 if psb_kobject_uevent() is completed with success or the
++ * corresponding error when it fails.
++ */
++int psb_kobject_uevent(struct kobject *kobj, enum kobject_action action,
++ int dst_group_id)
++{
++ return psb_kobject_uevent_env(kobj, action, NULL, dst_group_id);
++}
++EXPORT_SYMBOL_GPL(psb_kobject_uevent);
++
++/**
++ * psb_add_uevent_var - add key value string to the environment buffer
++ * @env: environment buffer structure
++ * @format: printf format for the key=value pair
++ *
++ * Returns 0 if environment variable was added successfully or -ENOMEM
++ * if no space was available.
++ */
++int psb_add_uevent_var(struct kobj_uevent_env *env, const char *format, ...)
++{
++ va_list args;
++ int len;
++
++ if (env->envp_idx >= ARRAY_SIZE(env->envp)) {
++ WARN(1, KERN_ERR "psb_add_uevent_var: too many keys\n");
++ return -ENOMEM;
++ }
++
++ va_start(args, format);
++ len = vsnprintf(&env->buf[env->buflen],
++ sizeof(env->buf) - env->buflen,
++ format, args);
++ va_end(args);
++
++ if (len >= (sizeof(env->buf) - env->buflen)) {
++ WARN(1,
++ KERN_ERR "psb_add_uevent_var: buffer size too small\n");
++ return -ENOMEM;
++ }
++
++ env->envp[env->envp_idx++] = &env->buf[env->buflen];
++ env->buflen += len + 1;
++ return 0;
++}
++EXPORT_SYMBOL_GPL(psb_add_uevent_var);
++
++#if defined(CONFIG_NET)
++static int __init psb_kobject_uevent_init(void)
++{
++ /* This should be the 15, but 3 seems to work better. Why? WHY!? */
++ /* uevent_sock = netlink_kernel_create(&init_net,
++ NETLINK_PSB_KOBJECT_UEVENT,
++ DRM_GFX_SOCKET_GROUPS,
++ NULL, NULL, THIS_MODULE); */
++ uevent_sock = netlink_kernel_create(&init_net,
++ NETLINK_PSB_KOBJECT_UEVENT,
++ 0x1, /* 3 is for hotplug & dpst */
++ NULL, NULL, THIS_MODULE);
++
++ if (!uevent_sock) {
++ printk(KERN_ERR "psb_kobject_uevent: failed create socket!\n");
++ return -ENODEV;
++ }
++ netlink_set_nonroot(NETLINK_PSB_KOBJECT_UEVENT, NL_NONROOT_RECV);
++
++ return 0;
++}
++
++postcore_initcall(psb_kobject_uevent_init);
++#endif
+diff --git a/drivers/gpu/drm/mrst/drv/psb_ttm_glue.c b/drivers/gpu/drm/mrst/drv/psb_ttm_glue.c
+new file mode 100644
+index 0000000..ad0e6ee
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/psb_ttm_glue.c
+@@ -0,0 +1,344 @@
++/**************************************************************************
++ * Copyright (c) 2008, Intel Corporation.
++ * All Rights Reserved.
++ * Copyright (c) 2008, Tungsten Graphics Inc. Cedar Park, TX., USA.
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++
++
++#include <drm/drmP.h>
++#include "psb_drv.h"
++#include "ttm/ttm_userobj_api.h"
++#include <linux/io.h>
++
++/*IMG Headers*/
++#include "private_data.h"
++
++extern int PVRMMap(struct file *pFile, struct vm_area_struct *ps_vma);
++
++static struct vm_operations_struct psb_ttm_vm_ops;
++
++/**
++ * NOTE: driver_private of drm_file is now a PVRSRV_FILE_PRIVATE_DATA struct
++ * pPriv in PVRSRV_FILE_PRIVATE_DATA contains the original psb_fpriv;
++ */
++int psb_open(struct inode *inode, struct file *filp)
++{
++ struct drm_file *file_priv;
++ struct drm_psb_private *dev_priv;
++ struct psb_fpriv *psb_fp;
++ PVRSRV_FILE_PRIVATE_DATA *pvr_file_priv;
++ int ret;
++
++ DRM_DEBUG("\n");
++
++ ret = drm_open(inode, filp);
++ if (unlikely(ret))
++ return ret;
++
++ psb_fp = kzalloc(sizeof(*psb_fp), GFP_KERNEL);
++
++ if (unlikely(psb_fp == NULL))
++ goto out_err0;
++
++ file_priv = (struct drm_file *) filp->private_data;
++ dev_priv = psb_priv(file_priv->minor->dev);
++
++ DRM_DEBUG("is_master %d\n", file_priv->is_master ? 1 : 0);
++
++ psb_fp->tfile = ttm_object_file_init(dev_priv->tdev,
++ PSB_FILE_OBJECT_HASH_ORDER);
++ if (unlikely(psb_fp->tfile == NULL))
++ goto out_err1;
++
++ pvr_file_priv = (PVRSRV_FILE_PRIVATE_DATA *)file_priv->driver_priv;
++ if (!pvr_file_priv) {
++ DRM_ERROR("drm file private is NULL\n");
++ goto out_err1;
++ }
++
++ pvr_file_priv->pPriv = psb_fp;
++
++ if (unlikely(dev_priv->bdev.dev_mapping == NULL))
++ dev_priv->bdev.dev_mapping = dev_priv->dev->dev_mapping;
++
++ return 0;
++
++out_err1:
++ kfree(psb_fp);
++out_err0:
++ (void) drm_release(inode, filp);
++ return ret;
++}
++
++int psb_release(struct inode *inode, struct file *filp)
++{
++ struct drm_file *file_priv;
++ struct psb_fpriv *psb_fp;
++ struct drm_psb_private *dev_priv;
++ int ret;
++ uint32_t ui32_reg_value = 0;
++
++ file_priv = (struct drm_file *) filp->private_data;
++ psb_fp = psb_fpriv(file_priv);
++ dev_priv = psb_priv(file_priv->minor->dev);
++
++ ttm_object_file_release(&psb_fp->tfile);
++ kfree(psb_fp);
++
++ if (IS_MRST(dev_priv->dev))
++ {
++ schedule_delayed_work(&dev_priv->scheduler.topaz_suspend_wq, 10);
++ /* FIXME: workaround for HSD3469585
++ * re-enable DRAM Self Refresh Mode
++ * by setting DUNIT.DPMC0
++ */
++ ui32_reg_value = MSG_READ32(0x1, 0x4);
++ MSG_WRITE32(0x1, 0x4, (ui32_reg_value | (0x1 << 7)));
++ }
++
++ if (IS_MRST(dev_priv->dev))
++ schedule_delayed_work(&dev_priv->scheduler.msvdx_suspend_wq, 10);
++
++ ret = drm_release(inode, filp);
++
++ return ret;
++}
++
++int psb_fence_signaled_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++
++ return ttm_fence_signaled_ioctl(psb_fpriv(file_priv)->tfile, data);
++}
++
++int psb_fence_finish_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ return ttm_fence_finish_ioctl(psb_fpriv(file_priv)->tfile, data);
++}
++
++int psb_fence_unref_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ return ttm_fence_unref_ioctl(psb_fpriv(file_priv)->tfile, data);
++}
++
++int psb_pl_waitidle_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ return ttm_pl_waitidle_ioctl(psb_fpriv(file_priv)->tfile, data);
++}
++
++int psb_pl_setstatus_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ return ttm_pl_setstatus_ioctl(psb_fpriv(file_priv)->tfile,
++ &psb_priv(dev)->ttm_lock, data);
++
++}
++
++int psb_pl_synccpu_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ return ttm_pl_synccpu_ioctl(psb_fpriv(file_priv)->tfile, data);
++}
++
++int psb_pl_unref_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ return ttm_pl_unref_ioctl(psb_fpriv(file_priv)->tfile, data);
++
++}
++
++int psb_pl_reference_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ return ttm_pl_reference_ioctl(psb_fpriv(file_priv)->tfile, data);
++
++}
++
++int psb_pl_create_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ struct drm_psb_private *dev_priv = psb_priv(dev);
++
++ return ttm_pl_create_ioctl(psb_fpriv(file_priv)->tfile,
++ &dev_priv->bdev, &dev_priv->ttm_lock, data);
++
++}
++
++/**
++ * psb_ttm_fault - Wrapper around the ttm fault method.
++ *
++ * @vma: The struct vm_area_struct as in the vm fault() method.
++ * @vmf: The struct vm_fault as in the vm fault() method.
++ *
++ * Since ttm_fault() will reserve buffers while faulting,
++ * we need to take the ttm read lock around it, as this driver
++ * relies on the ttm_lock in write mode to exclude all threads from
++ * reserving and thus validating buffers in aperture- and memory shortage
++ * situations.
++ */
++
++static int psb_ttm_fault(struct vm_area_struct *vma,
++ struct vm_fault *vmf)
++{
++ struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
++ vma->vm_private_data;
++ struct drm_psb_private *dev_priv =
++ container_of(bo->bdev, struct drm_psb_private, bdev);
++ int ret;
++
++ ret = ttm_read_lock(&dev_priv->ttm_lock, true);
++ if (unlikely(ret != 0))
++ return VM_FAULT_NOPAGE;
++
++ ret = dev_priv->ttm_vm_ops->fault(vma, vmf);
++
++ ttm_read_unlock(&dev_priv->ttm_lock);
++ return ret;
++}
++
++/**
++ * if vm_pgoff < DRM_PSB_FILE_PAGE_OFFSET call directly to
++ * PVRMMap
++ */
++int psb_mmap(struct file *filp, struct vm_area_struct *vma)
++{
++ struct drm_file *file_priv;
++ struct drm_psb_private *dev_priv;
++ int ret;
++
++ if (vma->vm_pgoff < DRM_PSB_FILE_PAGE_OFFSET ||
++ vma->vm_pgoff > 2 * DRM_PSB_FILE_PAGE_OFFSET)
++ return PVRMMap(filp, vma);
++
++ file_priv = (struct drm_file *) filp->private_data;
++ dev_priv = psb_priv(file_priv->minor->dev);
++
++ ret = ttm_bo_mmap(filp, vma, &dev_priv->bdev);
++ if (unlikely(ret != 0))
++ return ret;
++
++ if (unlikely(dev_priv->ttm_vm_ops == NULL)) {
++ dev_priv->ttm_vm_ops = (struct vm_operations_struct *)vma->vm_ops;
++ psb_ttm_vm_ops = *vma->vm_ops;
++ psb_ttm_vm_ops.fault = &psb_ttm_fault;
++ }
++
++ vma->vm_ops = &psb_ttm_vm_ops;
++
++ return 0;
++}
++
++ssize_t psb_ttm_write(struct file *filp, const char __user *buf,
++ size_t count, loff_t *f_pos)
++{
++ struct drm_file *file_priv = (struct drm_file *)filp->private_data;
++ struct drm_psb_private *dev_priv = psb_priv(file_priv->minor->dev);
++
++ return ttm_bo_io(&dev_priv->bdev, filp, buf, NULL, count, f_pos, 1);
++}
++
++ssize_t psb_ttm_read(struct file *filp, char __user *buf,
++ size_t count, loff_t *f_pos)
++{
++ struct drm_file *file_priv = (struct drm_file *)filp->private_data;
++ struct drm_psb_private *dev_priv = psb_priv(file_priv->minor->dev);
++
++ return ttm_bo_io(&dev_priv->bdev, filp, NULL, buf, count, f_pos, 1);
++}
++
++int psb_verify_access(struct ttm_buffer_object *bo,
++ struct file *filp)
++{
++ struct drm_file *file_priv = (struct drm_file *)filp->private_data;
++
++ if (capable(CAP_SYS_ADMIN))
++ return 0;
++
++ if (unlikely(!file_priv->authenticated))
++ return -EPERM;
++
++ return ttm_pl_verify_access(bo, psb_fpriv(file_priv)->tfile);
++}
++
++static int psb_ttm_mem_global_init(struct drm_global_reference *ref)
++{
++ return ttm_mem_global_init(ref->object);
++}
++
++static void psb_ttm_mem_global_release(struct drm_global_reference *ref)
++{
++ ttm_mem_global_release(ref->object);
++}
++
++int psb_ttm_global_init(struct drm_psb_private *dev_priv)
++{
++ struct drm_global_reference *global_ref;
++ int ret;
++
++ global_ref = &dev_priv->mem_global_ref;
++ global_ref->global_type = DRM_GLOBAL_TTM_MEM;
++ global_ref->size = sizeof(struct ttm_mem_global);
++ global_ref->init = &psb_ttm_mem_global_init;
++ global_ref->release = &psb_ttm_mem_global_release;
++
++ ret = drm_global_item_ref(global_ref);
++ if (unlikely(ret != 0)) {
++ DRM_ERROR("Failed referencing a global TTM memory object.\n");
++ return ret;
++ }
++
++ return 0;
++}
++
++void psb_ttm_global_release(struct drm_psb_private *dev_priv)
++{
++ drm_global_item_unref(&dev_priv->mem_global_ref);
++}
++
++int psb_getpageaddrs_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ struct drm_psb_getpageaddrs_arg *arg = data;
++ struct ttm_buffer_object *bo;
++ struct ttm_tt *ttm;
++ struct page **tt_pages;
++ unsigned long i, num_pages;
++ unsigned long *p = arg->page_addrs;
++ int ret = 0;
++
++ bo = ttm_buffer_object_lookup(psb_fpriv(file_priv)->tfile,
++ arg->handle);
++ if (unlikely(bo == NULL)) {
++ printk(KERN_ERR
++ "Could not find buffer object for getpageaddrs.\n");
++ return -EINVAL;
++ }
++
++ arg->gtt_offset = bo->offset;
++ ttm = bo->ttm;
++ num_pages = ttm->num_pages;
++ tt_pages = ttm->pages;
++
++ for (i = 0; i < num_pages; i++)
++ p[i] = (unsigned long)page_to_phys(tt_pages[i]);
++
++ return ret;
++}
+diff --git a/drivers/gpu/drm/mrst/drv/psb_umevents.c b/drivers/gpu/drm/mrst/drv/psb_umevents.c
+new file mode 100644
+index 0000000..d9bf3c1
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/psb_umevents.c
+@@ -0,0 +1,485 @@
++/*
++ * Copyright © 2009 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors:
++ * James C. Gualario <james.c.gualario@intel.com>
++ *
++ */
++#include "psb_umevents.h"
++/**
++ * define sysfs operations supported by umevent objects.
++ *
++ */
++static struct sysfs_ops umevent_obj_sysfs_ops = {
++ .show = psb_umevent_attr_show,
++ .store = psb_umevent_attr_store,
++};
++/**
++ * define the data attributes we will expose through sysfs.
++ *
++ */
++static struct umevent_attribute data_0 =
++ __ATTR(data_0_val, 0666, psb_umevent_attr_show_imp,
++ psb_umevent_attr_store_imp);
++static struct umevent_attribute data_1 =
++ __ATTR(data_1_val, 0666, psb_umevent_attr_show_imp,
++ psb_umevent_attr_store_imp);
++static struct umevent_attribute data_2 =
++ __ATTR(data_2_val, 0666, psb_umevent_attr_show_imp,
++ psb_umevent_attr_store_imp);
++static struct umevent_attribute data_3 =
++ __ATTR(data_3_val, 0666, psb_umevent_attr_show_imp,
++ psb_umevent_attr_store_imp);
++static struct umevent_attribute data_4 =
++ __ATTR(data_4_val, 0666, psb_umevent_attr_show_imp,
++ psb_umevent_attr_store_imp);
++static struct umevent_attribute data_5 =
++ __ATTR(data_5_val, 0666, psb_umevent_attr_show_imp,
++ psb_umevent_attr_store_imp);
++static struct umevent_attribute data_6 =
++ __ATTR(data_6_val, 0666, psb_umevent_attr_show_imp,
++ psb_umevent_attr_store_imp);
++static struct umevent_attribute data_7 =
++ __ATTR(data_7_val, 0666, psb_umevent_attr_show_imp,
++ psb_umevent_attr_store_imp);
++/**
++ * define the structure used to seed our ktype.
++ *
++ */
++static struct attribute *umevent_obj_default_attrs[] = {
++ &data_0.attr,
++ &data_1.attr,
++ &data_2.attr,
++ &data_3.attr,
++ &data_4.attr,
++ &data_5.attr,
++ &data_6.attr,
++ &data_7.attr,
++ NULL, /* need to NULL terminate the list of attributes */
++};
++/**
++ * specify the ktype for our kobjects.
++ *
++ */
++static struct kobj_type umevent_obj_ktype = {
++ .sysfs_ops = &umevent_obj_sysfs_ops,
++ .release = psb_umevent_obj_release,
++ .default_attrs = umevent_obj_default_attrs,
++};
++/**
++ * psb_umevent_attr_show - default kobject show function
++ *
++ * @kobj: kobject associated with the show operation
++ * @attr: attribute being requested
++ * @buf: pointer to the return buffer
++ *
++ */
++ssize_t psb_umevent_attr_show(struct kobject *kobj,
++ struct attribute *attr,
++ char *buf)
++{
++ struct umevent_attribute *attribute;
++ struct umevent_obj *any_umevent_obj;
++ attribute = to_umevent_attr(attr);
++ any_umevent_obj = to_umevent_obj(kobj);
++ if (!attribute->show)
++ return -EIO;
++
++ return attribute->show(any_umevent_obj, attribute, buf);
++}
++/**
++ * psb_umevent_attr_store - default kobject store function
++ *
++ * @kobj: kobject associated with the store operation
++ * @attr: attribute being requested
++ * @buf: input data to write to attribute
++ * @len: character count
++ *
++ */
++ssize_t psb_umevent_attr_store(struct kobject *kobj,
++ struct attribute *attr,
++ const char *buf, size_t len)
++{
++ struct umevent_attribute *attribute;
++ struct umevent_obj *any_umevent_obj;
++ attribute = to_umevent_attr(attr);
++ any_umevent_obj = to_umevent_obj(kobj);
++ if (!attribute->store)
++ return -EIO;
++
++ return attribute->store(any_umevent_obj, attribute, buf, len);
++}
++/**
++ * psb_umevent_obj_release - kobject release funtion
++ *
++ * @kobj: kobject to be released.
++ */
++void psb_umevent_obj_release(struct kobject *kobj)
++{
++ struct umevent_obj *any_umevent_obj;
++ any_umevent_obj = to_umevent_obj(kobj);
++ kfree(any_umevent_obj);
++}
++/**
++ * psb_umevent_attr_show_imp - attribute show implementation
++ *
++ * @any_umevent_obj: kobject managed data to read from
++ * @attr: attribute being requested
++ * @buf: pointer to the return buffer
++ *
++ */
++ssize_t psb_umevent_attr_show_imp(struct umevent_obj
++ *any_umevent_obj,
++ struct umevent_attribute *attr,
++ char *buf)
++{
++ int var;
++
++ if (strcmp(attr->attr.name, "data_0_val") == 0)
++ var = any_umevent_obj->data_0_val;
++ else if (strcmp(attr->attr.name, "data_1_val") == 0)
++ var = any_umevent_obj->data_1_val;
++ else if (strcmp(attr->attr.name, "data_2_val") == 0)
++ var = any_umevent_obj->data_2_val;
++ else if (strcmp(attr->attr.name, "data_3_val") == 0)
++ var = any_umevent_obj->data_3_val;
++ else if (strcmp(attr->attr.name, "data_4_val") == 0)
++ var = any_umevent_obj->data_4_val;
++ else if (strcmp(attr->attr.name, "data_5_val") == 0)
++ var = any_umevent_obj->data_5_val;
++ else if (strcmp(attr->attr.name, "data_6_val") == 0)
++ var = any_umevent_obj->data_6_val;
++ else
++ var = any_umevent_obj->data_7_val;
++
++ return sprintf(buf, "%d\n", var);
++}
++/**
++ * psb_umevent_attr_store_imp - attribute store implementation
++ *
++ * @any_umevent_obj: kobject managed data to write to
++ * @attr: attribute being requested
++ * @buf: input data to write to attribute
++ * @count: character count
++ *
++ */
++ssize_t psb_umevent_attr_store_imp(struct umevent_obj
++ *any_umevent_obj,
++ struct umevent_attribute *attr,
++ const char *buf, size_t count)
++{
++ int var;
++
++ sscanf(buf, "%du", &var);
++ if (strcmp(attr->attr.name, "data_0_val") == 0)
++ any_umevent_obj->data_0_val = var;
++ else if (strcmp(attr->attr.name, "data_1_val") == 0)
++ any_umevent_obj->data_1_val = var;
++ else if (strcmp(attr->attr.name, "data_2_val") == 0)
++ any_umevent_obj->data_2_val = var;
++ else if (strcmp(attr->attr.name, "data_3_val") == 0)
++ any_umevent_obj->data_3_val = var;
++ else if (strcmp(attr->attr.name, "data_4_val") == 0)
++ any_umevent_obj->data_4_val = var;
++ else if (strcmp(attr->attr.name, "data_5_val") == 0)
++ any_umevent_obj->data_5_val = var;
++ else if (strcmp(attr->attr.name, "data_6_val") == 0)
++ any_umevent_obj->data_6_val = var;
++ else
++ any_umevent_obj->data_7_val = var;
++ return count;
++}
++/**
++ * psb_create_umevent_obj - create and track new event objects
++ *
++ * @name: name to give to new sysfs / kobject entry
++ * @list: event object list to track the kobject in
++ */
++struct umevent_obj *psb_create_umevent_obj(const char *name,
++ struct umevent_list
++ *list)
++{
++ struct umevent_obj *new_umevent_obj;
++ int retval;
++ new_umevent_obj = kzalloc(sizeof(*new_umevent_obj),
++ GFP_KERNEL);
++ if (!new_umevent_obj)
++ return NULL;
++
++ new_umevent_obj->kobj.kset = list->umevent_disp_pool;
++ retval = kobject_init_and_add(&new_umevent_obj->kobj,
++ &umevent_obj_ktype, NULL,
++ "%s", name);
++ if (retval) {
++ kobject_put(&new_umevent_obj->kobj);
++ return NULL;
++ }
++ psb_umevent_add_to_list(list, new_umevent_obj);
++ return new_umevent_obj;
++}
++EXPORT_SYMBOL(psb_create_umevent_obj);
++/**
++ * psb_umevent_notify - info user mode of a new device
++ *
++ * @notify_disp_obj: event object to perform notification for
++ *
++ */
++void psb_umevent_notify(struct umevent_obj *notify_disp_obj)
++{
++ kobject_uevent(&notify_disp_obj->kobj, KOBJ_ADD);
++}
++EXPORT_SYMBOL(psb_umevent_notify);
++/**
++ * psb_umevent_notify_change - notify user mode of a change to a device
++ *
++ * @notify_disp_obj: event object to perform notification for
++ *
++ */
++void psb_umevent_notify_change(struct umevent_obj *notify_disp_obj)
++{
++ kobject_uevent(&notify_disp_obj->kobj, KOBJ_CHANGE);
++}
++EXPORT_SYMBOL(psb_umevent_notify_change);
++/**
++ * psb_umevent_notify_change - notify user mode of a change to a device
++ *
++ * @notify_disp_obj: event object to perform notification for
++ *
++ */
++void psb_umevent_notify_change_gfxsock(struct umevent_obj *notify_disp_obj,
++ int dst_group_id)
++{
++ psb_kobject_uevent(&notify_disp_obj->kobj, KOBJ_CHANGE, dst_group_id);
++}
++EXPORT_SYMBOL(psb_umevent_notify_change_gfxsock);
++/**
++ * psb_destroy_umvent_obj - decrement ref count on event so kernel can kill it
++ *
++ * @any_umevent_obj: event object to destroy
++ *
++ */
++void psb_destroy_umevent_obj(struct umevent_obj
++ *any_umevent_obj)
++{
++ kobject_put(&any_umevent_obj->kobj);
++}
++/**
++ *
++ * psb_umevent_init - init the event pool
++ *
++ * @parent_kobj: parent kobject to associate new kset with
++ * @new_umevent_list: event list to associate kset with
++ * @name: name to give to new sysfs entry
++ *
++ */
++int psb_umevent_init(struct kobject *parent_kobj,
++ struct umevent_list *new_umevent_list,
++ const char *name)
++{
++ psb_umevent_init_list(new_umevent_list);
++ new_umevent_list->umevent_disp_pool = kset_create_and_add(name, NULL,
++ parent_kobj);
++ if (!new_umevent_list->umevent_disp_pool)
++ return -ENOMEM;
++
++ return 0;
++}
++EXPORT_SYMBOL(psb_umevent_init);
++/**
++ *
++ * psb_umevent_cleanup - cleanup all event objects
++ *
++ * @kill_list: list of events to destroy
++ *
++ */
++void psb_umevent_cleanup(struct umevent_list *kill_list)
++{
++ psb_umevent_destroy_list(kill_list);
++}
++EXPORT_SYMBOL(psb_umevent_cleanup);
++/**
++ * psb_umevent_add_to_list - add an event to the event list
++ *
++ * @list: list to add the event to
++ * @umevent_obj_to_add: event to add
++ *
++ */
++void psb_umevent_add_to_list(struct umevent_list *list,
++ struct umevent_obj *umevent_obj_to_add)
++{
++ unsigned long flags;
++ spin_lock_irqsave(&list->list_lock, flags);
++ list_add(&umevent_obj_to_add->head, &list->head);
++ spin_unlock_irqrestore(&list->list_lock, flags);
++}
++/**
++ * psb_umevent_init_list - initialize event list
++ *
++ * @list: list to initialize
++ *
++ */
++void psb_umevent_init_list(struct umevent_list *list)
++{
++ spin_lock_init(&list->list_lock);
++ INIT_LIST_HEAD(&list->head);
++}
++/**
++ * psb_umevent_create_list - allocate an event list
++ *
++ */
++struct umevent_list *psb_umevent_create_list()
++{
++ struct umevent_list *new_umevent_list;
++ new_umevent_list = NULL;
++ new_umevent_list = kmalloc(sizeof(struct umevent_list),
++ GFP_ATOMIC);
++ return new_umevent_list;
++}
++EXPORT_SYMBOL(psb_umevent_create_list);
++/**
++ * psb_umevent_destroy_list - destroy a list and clean up all mem
++ *
++ * @list: list to destroy and clean up after
++ *
++ */
++void psb_umevent_destroy_list(struct umevent_list *list)
++{
++ struct umevent_obj *umevent_obj_curr;
++ struct list_head *node;
++ struct list_head *node_kill;
++ int i;
++ i = 0;
++ node = NULL;
++ node_kill = NULL;
++ node = list->head.next;
++ while (node != (&list->head)) {
++ umevent_obj_curr = list_entry(node,
++ struct umevent_obj,
++ head);
++ node_kill = node;
++ node = umevent_obj_curr->head.next;
++ psb_destroy_umevent_obj(umevent_obj_curr);
++ umevent_obj_curr = NULL;
++ list_del(node_kill);
++ i++;
++ }
++ kset_unregister(list->umevent_disp_pool);
++ kfree(list);
++}
++/**
++ * psb_umevent_remove_from_list - remove an event from tracking list
++ *
++ * @list: list to remove the event from
++ * @disp_to_remove: name of event to remove.
++ *
++ */
++void psb_umevent_remove_from_list(struct umevent_list *list,
++ const char *disp_to_remove)
++{
++ struct umevent_obj *umevent_obj_curr = NULL;
++ struct list_head *node = NULL;
++ struct list_head *node_kill = NULL;
++ int i = 0;
++ int found_match = 0;
++ i = 0;
++ node = NULL;
++ node_kill = NULL;
++ node = list->head.next;
++ while (node != (&list->head)) {
++ umevent_obj_curr = list_entry(node,
++ struct umevent_obj, head);
++ if (strcmp(umevent_obj_curr->kobj.name,
++ disp_to_remove) == 0) {
++ found_match = 1;
++ break;
++ }
++ node = NULL;
++ node = umevent_obj_curr->head.next;
++ i++;
++ }
++ if (found_match == 1) {
++ node_kill = node;
++ node = umevent_obj_curr->head.next;
++ psb_destroy_umevent_obj(umevent_obj_curr);
++ umevent_obj_curr = NULL;
++ list_del(node_kill);
++ }
++}
++EXPORT_SYMBOL(psb_umevent_remove_from_list);
++/**
++ * psb_umevent_find_obj - find an event in a tracking list
++ *
++ * @name: name of the event to find
++ * @list: list to find the event in
++ *
++ */
++struct umevent_obj *psb_umevent_find_obj(const char *name,
++ struct umevent_list *list)
++{
++ struct umevent_obj *umevent_obj_curr = NULL;
++ struct list_head *node = NULL;
++ struct list_head *node_find = NULL;
++ int i = 0;
++ int found_match = 0;
++ i = 0;
++ node = NULL;
++ node_find = NULL;
++ node = list->head.next;
++ while (node != (&list->head)) {
++ umevent_obj_curr = list_entry(node,
++ struct umevent_obj, head);
++ if (strcmp(umevent_obj_curr->kobj.name,
++ name) == 0) {
++ found_match = 1;
++ break;
++ }
++ node = NULL;
++ node = umevent_obj_curr->head.next;
++ i++;
++ }
++ if (found_match == 1)
++ return umevent_obj_curr;
++
++ return NULL;
++}
++EXPORT_SYMBOL(psb_umevent_find_obj);
++/**
++ * psb_umevent_debug_dump_list - debug list dump
++ *
++ * @list: list to dump
++ *
++ */
++void psb_umevent_debug_dump_list(struct umevent_list *list)
++{
++ struct umevent_obj *umevent_obj_curr;
++ unsigned long flags;
++ struct list_head *node;
++ int i;
++ spin_lock_irqsave(&list->list_lock, flags);
++ i = 0;
++ node = NULL;
++ node = list->head.next;
++ while (node != (&list->head)) {
++ umevent_obj_curr = list_entry(node,
++ struct umevent_obj,
++ head);
++ /*TBD: DUMP ANY REQUIRED VALUES WITH PRINTK*/
++ node = NULL;
++ node = umevent_obj_curr->head.next;
++ i++;
++ }
++ spin_unlock_irqrestore(&list->list_lock, flags);
++}
+diff --git a/drivers/gpu/drm/mrst/drv/psb_umevents.h b/drivers/gpu/drm/mrst/drv/psb_umevents.h
+new file mode 100644
+index 0000000..868bee4
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/psb_umevents.h
+@@ -0,0 +1,154 @@
++/*
++ * Copyright (c) 2009, Intel Corporation.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors:
++ * James C. Gualario <james.c.gualario@intel.com>
++ *
++ */
++#ifndef _PSB_UMEVENT_H_
++#define _PSB_UMEVENT_H_
++/**
++ * required includes
++ *
++ */
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <drm/drmP.h>
++#include <drm/drm_core.h>
++#include <drm/drm_pciids.h>
++#include <linux/spinlock.h>
++/**
++ * event groups for routing to different user mode threads
++ *
++ */
++#define DRM_DPST_SOCKET_GROUP_ID 1
++#define DRM_HOTPLUG_SOCKET_GROUP_ID 2
++#define DRM_HDMI_AUDIO_SOCKET_GROUP 4
++#define DRM_HDMI_HDCP_SOCKET_GROUP 8
++#define DRM_GFX_SOCKET_GROUPS 15
++/**
++ * event structure managed by kobjects
++ *
++ */
++struct umevent_obj {
++ struct kobject kobj;
++ struct list_head head;
++ int data_0_val;
++ int data_1_val;
++ int data_2_val;
++ int data_3_val;
++ int data_4_val;
++ int data_5_val;
++ int data_6_val;
++ int data_7_val;
++};
++/**
++ * event tracking list element
++ *
++ */
++struct umevent_list{
++ struct list_head head;
++ struct kset *umevent_disp_pool;
++ spinlock_t list_lock;
++};
++/**
++ * to go back and forth between kobjects and their main container
++ *
++ */
++#define to_umevent_obj(x) \
++ container_of(x, struct umevent_obj, kobj)
++
++/**
++ * event attributes exposed via sysfs
++ *
++ */
++struct umevent_attribute {
++ struct attribute attr;
++ ssize_t (*show)(struct umevent_obj *any_umevent_obj,
++ struct umevent_attribute *attr, char *buf);
++ ssize_t (*store)(struct umevent_obj *any_umevent_obj,
++ struct umevent_attribute *attr,
++ const char *buf, size_t count);
++};
++/**
++ * to go back and forth between the attribute passed to us by the OS
++ * and the umevent_attribute
++ *
++ */
++#define to_umevent_attr(x) \
++ container_of(x, struct umevent_attribute, \
++ attr)
++
++/**
++ * umevent function prototypes
++ *
++ */
++extern struct umevent_obj *psb_create_umevent_obj(const char *name,
++ struct umevent_list
++ *list);
++extern ssize_t psb_umevent_attr_show(struct kobject *kobj,
++ struct attribute *attr, char *buf);
++extern ssize_t psb_umevent_attr_store(struct kobject *kobj,
++ struct attribute *attr,
++ const char *buf, size_t len);
++extern ssize_t psb_umevent_attr_show_imp(struct umevent_obj
++ *any_umevent_obj,
++ struct umevent_attribute *attr,
++ char *buf);
++extern ssize_t psb_umevent_attr_store_imp(struct umevent_obj
++ *any_umevent_obj,
++ struct umevent_attribute *attr,
++ const char *buf, size_t count);
++extern void psb_umevent_cleanup(struct umevent_list *kill_list);
++extern int psb_umevent_init(struct kobject *parent_kobj,
++ struct umevent_list *new_umevent_list,
++ const char *name);
++extern void psb_umevent_init_list(struct umevent_list *list);
++extern void psb_umevent_debug_dump_list(struct umevent_list *list);
++extern void psb_umevent_add_to_list(struct umevent_list *list,
++ struct umevent_obj
++ *umevent_obj_to_add);
++extern void psb_umevent_destroy_list(struct umevent_list *list);
++extern struct umevent_list *psb_umevent_create_list(void);
++extern void psb_umevent_notify(struct umevent_obj *notify_disp_obj);
++extern void psb_umevent_obj_release(struct kobject *kobj);
++extern void psb_umevent_remove_from_list(struct umevent_list *list,
++ const char *disp_to_remove);
++extern void psb_umevent_workqueue_dispatch(int work_type, const char *name,
++ struct umevent_list *list);
++extern void psb_umevent_notify_change(struct umevent_obj *notify_disp_obj);
++extern void psb_umevent_notify_change_gfxsock(struct umevent_obj
++ *notify_disp_obj,
++ int dst_group_id);
++extern struct umevent_obj *psb_umevent_find_obj(const char *name,
++ struct umevent_list
++ *list);
++/**
++ * socket function prototypes
++ *
++ */
++extern int psb_kobject_uevent(struct kobject *kobj,
++ enum kobject_action action, int dst_group_id);
++extern int psb_kobject_uevent_env(struct kobject *kobj,
++ enum kobject_action action,
++ char *envp[], int dst_group_id);
++int psb_add_uevent_var(struct kobj_uevent_env *env,
++ const char *format, ...)
++ __attribute__((format (printf, 2, 3)));
++int psb_kobject_action_type(const char *buf,
++ size_t count, enum kobject_action *type);
++#endif
+diff --git a/drivers/gpu/drm/mrst/drv/topaz_power.c b/drivers/gpu/drm/mrst/drv/topaz_power.c
+new file mode 100644
+index 0000000..7481390
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/topaz_power.c
+@@ -0,0 +1,173 @@
++/*
++ * Copyright (c) 2009, Intel Corporation.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Author:binglin.chen@intel.com
++ */
++
++#include "topaz_power.h"
++#include "lnc_topaz.h"
++#include "psb_drv.h"
++#include "sysirq.h"
++#include "ospm_power.h"
++
++#include "services_headers.h"
++#include "sysconfig.h"
++
++static PVRSRV_ERROR DevInitTOPAZPart1(IMG_VOID *pvDeviceNode)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)pvDeviceNode;
++ PVRSRV_ERROR eError;
++ PVRSRV_DEV_POWER_STATE eDefaultPowerState;
++
++ /* register power operation function */
++ /* FIXME: this should be in part2 init function, but
++ * currently here only OSPM needs IMG device... */
++ eDefaultPowerState = PVRSRV_DEV_POWER_STATE_OFF;
++ eError = PVRSRVRegisterPowerDevice(psDeviceNode->sDevId.ui32DeviceIndex,
++ TOPAZPrePowerState,
++ TOPAZPostPowerState,
++ TOPAZPreClockSpeedChange,
++ TOPAZPostClockSpeedChange,
++ (IMG_HANDLE)psDeviceNode,
++ PVRSRV_DEV_POWER_STATE_ON,
++ eDefaultPowerState);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR, "DevInitTOPAZPart1: failed to "
++ "register device with power manager"));
++ return eError;
++ }
++
++ return PVRSRV_OK;
++}
++
++static PVRSRV_ERROR DevDeInitTOPAZ(IMG_VOID *pvDeviceNode)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)pvDeviceNode;
++ PVRSRV_ERROR eError;
++
++ /* should deinit all resource */
++
++ eError = PVRSRVRemovePowerDevice(psDeviceNode->sDevId.ui32DeviceIndex);
++ if (eError != PVRSRV_OK)
++ return eError;
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR TOPAZDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++ /* version check */
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR TOPAZRegisterDevice(PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++ psDeviceNode->sDevId.eDeviceType = PVRSRV_DEVICE_TYPE_TOPAZ;
++ psDeviceNode->sDevId.eDeviceClass = PVRSRV_DEVICE_CLASS_VIDEO;
++
++ psDeviceNode->pfnInitDevice = DevInitTOPAZPart1;
++ psDeviceNode->pfnDeInitDevice = DevDeInitTOPAZ;
++
++ psDeviceNode->pfnInitDeviceCompatCheck = TOPAZDevInitCompatCheck;
++
++ psDeviceNode->pfnDeviceISR = lnc_topaz_interrupt;
++ psDeviceNode->pvISRData = (IMG_VOID *)gpDrmDevice;
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR TOPAZPrePowerState(IMG_HANDLE hDevHandle,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState)
++{
++ /* ask for a change not power on*/
++ if ((eNewPowerState != eCurrentPowerState) &&
++ (eNewPowerState != PVRSRV_DEV_POWER_STATE_ON)) {
++ struct drm_psb_private *dev_priv = gpDrmDevice->dev_private;
++ struct topaz_private *topaz_priv = dev_priv->topaz_private;
++ TOPAZ_NEW_PMSTATE(gpDrmDevice, topaz_priv, PSB_PMSTATE_POWERDOWN);
++
++ /* context save */
++ /* context save require irq disable first */
++ sysirq_uninstall_islands(gpDrmDevice, OSPM_VIDEO_ENC_ISLAND);
++ lnc_topaz_save_mtx_state(gpDrmDevice);
++
++ /* internally close the device */
++
++ /* ask for power off */
++ if (eNewPowerState == PVRSRV_DEV_POWER_STATE_OFF) {
++ /* here will deinitialize the driver if needed */
++ lnc_unmap_topaz_reg(gpDrmDevice);
++ } else {
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "%s no action for transform from %d to %d",
++ __func__,
++ eCurrentPowerState,
++ eNewPowerState));
++ }
++ }
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR TOPAZPostPowerState(IMG_HANDLE hDevHandle,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState)
++{
++ /* if ask for change & current status is not on */
++ if ((eNewPowerState != eCurrentPowerState) &&
++ (eCurrentPowerState != PVRSRV_DEV_POWER_STATE_ON)) {
++ /* internally open device */
++ struct drm_psb_private *dev_priv = gpDrmDevice->dev_private;
++ struct topaz_private *topaz_priv = dev_priv->topaz_private;
++ TOPAZ_NEW_PMSTATE(gpDrmDevice, topaz_priv, PSB_PMSTATE_POWERUP);
++
++ if (eCurrentPowerState == PVRSRV_DEV_POWER_STATE_OFF) {
++ /* here will initialize the driver if needed */
++ lnc_map_topaz_reg(gpDrmDevice);
++ } else {
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "%s no action for transform from %d to %d",
++ __func__,
++ eCurrentPowerState,
++ eNewPowerState));
++ }
++
++ /* context restore */
++ sysirq_uninstall_islands(gpDrmDevice, OSPM_VIDEO_ENC_ISLAND);
++ lnc_topaz_restore_mtx_state(gpDrmDevice);
++ sysirq_preinstall_islands(gpDrmDevice, OSPM_VIDEO_ENC_ISLAND);
++ sysirq_postinstall_islands(gpDrmDevice, OSPM_VIDEO_ENC_ISLAND);
++
++ }
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR TOPAZPreClockSpeedChange(IMG_HANDLE hDevHandle,
++ IMG_BOOL bIdleDevice,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState)
++{
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR TOPAZPostClockSpeedChange(IMG_HANDLE hDevHandle,
++ IMG_BOOL bIdleDevice,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState)
++{
++ return PVRSRV_OK;
++}
+diff --git a/drivers/gpu/drm/mrst/drv/topaz_power.h b/drivers/gpu/drm/mrst/drv/topaz_power.h
+new file mode 100644
+index 0000000..beb6114
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/topaz_power.h
+@@ -0,0 +1,53 @@
++/*
++** topaz_power.h
++** Login : <binglin.chen@intel.com>
++** Started on Mon Nov 16 13:31:42 2009 brady
++**
++** Copyright (C) 2009 brady
++** This program is free software; you can redistribute it and/or modify
++** it under the terms of the GNU General Public License as published by
++** the Free Software Foundation; either version 2 of the License, or
++** (at your option) any later version.
++**
++** This program is distributed in the hope that it will be useful,
++** but WITHOUT ANY WARRANTY; without even the implied warranty of
++** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++** GNU General Public License for more details.
++**
++** You should have received a copy of the GNU General Public License
++** along with this program; if not, write to the Free Software
++** Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++*/
++
++#ifndef TOPAZ_POWER_H_
++#define TOPAZ_POWER_H_
++
++#include "services_headers.h"
++#include "sysconfig.h"
++
++extern struct drm_device *gpDrmDevice;
++
++/* function define */
++PVRSRV_ERROR TOPAZRegisterDevice(PVRSRV_DEVICE_NODE *psDeviceNode);
++PVRSRV_ERROR TOPAZDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode);
++
++/* power function define */
++PVRSRV_ERROR TOPAZPrePowerState(
++ IMG_HANDLE hDevHandle,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
++PVRSRV_ERROR TOPAZPostPowerState(
++ IMG_HANDLE hDevHandle,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
++PVRSRV_ERROR TOPAZPreClockSpeedChange(
++ IMG_HANDLE hDevHandle,
++ IMG_BOOL bIdleDevice,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
++PVRSRV_ERROR TOPAZPostClockSpeedChange(
++ IMG_HANDLE hDevHandle,
++ IMG_BOOL bIdleDevice,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
++PVRSRV_ERROR TOPAZInitOSPM(PVRSRV_DEVICE_NODE *psDeviceNode);
++
++#endif /* !TOPAZ_POWER_H_ */
+diff --git a/drivers/gpu/drm/mrst/drv/ttm/ttm_agp_backend.c b/drivers/gpu/drm/mrst/drv/ttm/ttm_agp_backend.c
+new file mode 100644
+index 0000000..8eb830a
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/ttm/ttm_agp_backend.c
+@@ -0,0 +1,144 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ * Keith Packard.
++ */
++
++#include "ttm_bo_driver.h"
++#ifdef TTM_HAS_AGP
++#include "ttm_placement_common.h"
++#include <linux/agp_backend.h>
++#include <asm/agp.h>
++#include <linux/io.h>
++
++struct ttm_agp_backend {
++ struct ttm_backend backend;
++ struct agp_memory *mem;
++ struct agp_bridge_data *bridge;
++};
++
++static int ttm_agp_populate(struct ttm_backend *backend,
++ unsigned long num_pages, struct page **pages,
++ struct page *dummy_read_page)
++{
++ struct ttm_agp_backend *agp_be =
++ container_of(backend, struct ttm_agp_backend, backend);
++ struct page **cur_page, **last_page = pages + num_pages;
++ struct agp_memory *mem;
++
++ mem = agp_allocate_memory(agp_be->bridge, num_pages, AGP_USER_MEMORY);
++ if (unlikely(mem == NULL))
++ return -ENOMEM;
++
++ mem->page_count = 0;
++ for (cur_page = pages; cur_page < last_page; ++cur_page) {
++ struct page *page = *cur_page;
++ if (!page)
++ page = dummy_read_page;
++
++ #if 0
++ mem->memory[mem->page_count++] =
++ phys_to_gart(page_to_phys(page));
++ #endif
++ }
++ agp_be->mem = mem;
++ return 0;
++}
++
++static int ttm_agp_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem)
++{
++ struct ttm_agp_backend *agp_be =
++ container_of(backend, struct ttm_agp_backend, backend);
++ struct agp_memory *mem = agp_be->mem;
++ int cached = (bo_mem->flags & TTM_PL_FLAG_CACHED);
++ int ret;
++
++ mem->is_flushed = 1;
++ mem->type = (cached) ? AGP_USER_CACHED_MEMORY : AGP_USER_MEMORY;
++
++ ret = agp_bind_memory(mem, bo_mem->mm_node->start);
++ if (ret)
++ printk(KERN_ERR "AGP Bind memory failed.\n");
++
++ return ret;
++}
++
++static int ttm_agp_unbind(struct ttm_backend *backend)
++{
++ struct ttm_agp_backend *agp_be =
++ container_of(backend, struct ttm_agp_backend, backend);
++
++ if (agp_be->mem->is_bound)
++ return agp_unbind_memory(agp_be->mem);
++ else
++ return 0;
++}
++
++static void ttm_agp_clear(struct ttm_backend *backend)
++{
++ struct ttm_agp_backend *agp_be =
++ container_of(backend, struct ttm_agp_backend, backend);
++ struct agp_memory *mem = agp_be->mem;
++
++ if (mem) {
++ ttm_agp_unbind(backend);
++ agp_free_memory(mem);
++ }
++ agp_be->mem = NULL;
++}
++
++static void ttm_agp_destroy(struct ttm_backend *backend)
++{
++ struct ttm_agp_backend *agp_be =
++ container_of(backend, struct ttm_agp_backend, backend);
++
++ if (agp_be->mem)
++ ttm_agp_clear(backend);
++ kfree(agp_be);
++}
++
++static struct ttm_backend_func ttm_agp_func = {
++ .populate = ttm_agp_populate,
++ .clear = ttm_agp_clear,
++ .bind = ttm_agp_bind,
++ .unbind = ttm_agp_unbind,
++ .destroy = ttm_agp_destroy,
++};
++
++struct ttm_backend *ttm_agp_backend_init(struct ttm_bo_device *bdev,
++ struct agp_bridge_data *bridge)
++{
++ struct ttm_agp_backend *agp_be;
++
++ agp_be = kmalloc(sizeof(*agp_be), GFP_KERNEL);
++ if (!agp_be)
++ return NULL;
++
++ agp_be->mem = NULL;
++ agp_be->bridge = bridge;
++ agp_be->backend.func = &ttm_agp_func;
++ agp_be->backend.bdev = bdev;
++ return &agp_be->backend;
++}
++
++#endif
+diff --git a/drivers/gpu/drm/mrst/drv/ttm/ttm_bo.c b/drivers/gpu/drm/mrst/drv/ttm/ttm_bo.c
+new file mode 100644
+index 0000000..2d738b6
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/ttm/ttm_bo.c
+@@ -0,0 +1,1729 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
++ */
++
++#include "ttm_bo_driver.h"
++#include "ttm_placement_common.h"
++#include <linux/jiffies.h>
++#include <linux/slab.h>
++#include <linux/sched.h>
++#include <linux/mm.h>
++#include <linux/file.h>
++
++#define TTM_ASSERT_LOCKED(param)
++#define TTM_DEBUG(fmt, arg...)
++#define TTM_BO_HASH_ORDER 13
++
++static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
++static void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
++static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
++
++static inline uint32_t ttm_bo_type_flags(unsigned type)
++{
++ uint32_t return_type = 1 << (type);
++ return return_type;
++}
++
++static void ttm_bo_release_list(struct kref *list_kref)
++{
++ struct ttm_buffer_object *bo =
++ container_of(list_kref, struct ttm_buffer_object, list_kref);
++ struct ttm_bo_device *bdev = bo->bdev;
++
++ BUG_ON(atomic_read(&bo->list_kref.refcount));
++ BUG_ON(atomic_read(&bo->kref.refcount));
++ BUG_ON(atomic_read(&bo->cpu_writers));
++ BUG_ON(bo->sync_obj != NULL);
++ BUG_ON(bo->mem.mm_node != NULL);
++ BUG_ON(!list_empty(&bo->lru));
++ BUG_ON(!list_empty(&bo->ddestroy));
++
++ if (bo->ttm)
++ ttm_tt_destroy(bo->ttm);
++ if (bo->destroy)
++ bo->destroy(bo);
++ else {
++ ttm_mem_global_free(bdev->mem_glob, bo->acc_size, false);
++ kfree(bo);
++ }
++}
++
++int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
++{
++
++ if (interruptible) {
++ int ret = 0;
++
++ ret = wait_event_interruptible(bo->event_queue,
++ atomic_read(&bo->reserved) == 0);
++ if (unlikely(ret != 0))
++ return -ERESTART;
++ } else {
++ wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0);
++ }
++ return 0;
++}
++
++static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
++{
++ struct ttm_bo_device *bdev = bo->bdev;
++ struct ttm_mem_type_manager *man;
++
++ BUG_ON(!atomic_read(&bo->reserved));
++
++ if (!(bo->mem.flags & TTM_PL_FLAG_NO_EVICT)) {
++
++ BUG_ON(!list_empty(&bo->lru));
++
++ man = &bdev->man[bo->mem.mem_type];
++ list_add_tail(&bo->lru, &man->lru);
++ kref_get(&bo->list_kref);
++
++ if (bo->ttm != NULL) {
++ list_add_tail(&bo->swap, &bdev->swap_lru);
++ kref_get(&bo->list_kref);
++ }
++ }
++}
++
++/*
++ * Call with bdev->lru_lock and bdev->global->swap_lock held..
++ */
++
++static int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
++{
++ int put_count = 0;
++
++ if (!list_empty(&bo->swap)) {
++ list_del_init(&bo->swap);
++ ++put_count;
++ }
++ if (!list_empty(&bo->lru)) {
++ list_del_init(&bo->lru);
++ ++put_count;
++ }
++
++ /*
++ * TODO: Add a driver hook to delete from
++ * driver-specific LRU's here.
++ */
++
++ return put_count;
++}
++
++int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
++ bool interruptible,
++ bool no_wait, bool use_sequence, uint32_t sequence)
++{
++ struct ttm_bo_device *bdev = bo->bdev;
++ int ret;
++
++ while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
++ if (use_sequence && bo->seq_valid &&
++ (sequence - bo->val_seq < (1 << 31))) {
++ return -EAGAIN;
++ }
++
++ if (no_wait)
++ return -EBUSY;
++
++ spin_unlock(&bdev->lru_lock);
++ ret = ttm_bo_wait_unreserved(bo, interruptible);
++ spin_lock(&bdev->lru_lock);
++
++ if (unlikely(ret))
++ return ret;
++ }
++
++ if (use_sequence) {
++ bo->val_seq = sequence;
++ bo->seq_valid = true;
++ } else {
++ bo->seq_valid = false;
++ }
++
++ return 0;
++}
++
++static void ttm_bo_ref_bug(struct kref *list_kref)
++{
++ BUG();
++}
++
++int ttm_bo_reserve(struct ttm_buffer_object *bo,
++ bool interruptible,
++ bool no_wait, bool use_sequence, uint32_t sequence)
++{
++ struct ttm_bo_device *bdev = bo->bdev;
++ int put_count = 0;
++ int ret;
++
++ spin_lock(&bdev->lru_lock);
++ ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence,
++ sequence);
++ if (likely(ret == 0))
++ put_count = ttm_bo_del_from_lru(bo);
++ spin_unlock(&bdev->lru_lock);
++
++ while (put_count--)
++ kref_put(&bo->list_kref, ttm_bo_ref_bug);
++
++ return ret;
++}
++
++void ttm_bo_unreserve(struct ttm_buffer_object *bo)
++{
++ struct ttm_bo_device *bdev = bo->bdev;
++
++ spin_lock(&bdev->lru_lock);
++ ttm_bo_add_to_lru(bo);
++ atomic_set(&bo->reserved, 0);
++ wake_up_all(&bo->event_queue);
++ spin_unlock(&bdev->lru_lock);
++}
++
++/*
++ * Call bo->mutex locked.
++ */
++
++static int ttm_bo_add_ttm(struct ttm_buffer_object *bo)
++{
++ struct ttm_bo_device *bdev = bo->bdev;
++ int ret = 0;
++ uint32_t page_flags = 0;
++
++ TTM_ASSERT_LOCKED(&bo->mutex);
++ bo->ttm = NULL;
++
++ switch (bo->type) {
++ case ttm_bo_type_device:
++ case ttm_bo_type_kernel:
++ bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
++ page_flags, bdev->dummy_read_page);
++ if (unlikely(bo->ttm == NULL))
++ ret = -ENOMEM;
++ break;
++ case ttm_bo_type_user:
++ bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
++ page_flags | TTM_PAGE_FLAG_USER,
++ bdev->dummy_read_page);
++ if (unlikely(bo->ttm == NULL))
++ ret = -ENOMEM;
++ break;
++
++ ret = ttm_tt_set_user(bo->ttm, current,
++ bo->buffer_start, bo->num_pages);
++ if (unlikely(ret != 0))
++ ttm_tt_destroy(bo->ttm);
++ break;
++ default:
++ printk(KERN_ERR "Illegal buffer object type\n");
++ ret = -EINVAL;
++ break;
++ }
++
++ return ret;
++}
++
++static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
++ struct ttm_mem_reg *mem,
++ bool evict, bool interruptible, bool no_wait)
++{
++ struct ttm_bo_device *bdev = bo->bdev;
++ bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
++ bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
++ struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
++ struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
++ int ret = 0;
++
++ if (old_is_pci || new_is_pci ||
++ ((mem->flags & bo->mem.flags & TTM_PL_MASK_CACHING) == 0))
++ ttm_bo_unmap_virtual(bo);
++
++ /*
++ * Create and bind a ttm if required.
++ */
++
++ if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && (bo->ttm == NULL)) {
++ ret = ttm_bo_add_ttm(bo);
++ if (ret)
++ goto out_err;
++
++ ret = ttm_tt_set_placement_caching(bo->ttm, mem->flags);
++ if (ret)
++ return ret;
++
++ if (mem->mem_type != TTM_PL_SYSTEM) {
++ ret = ttm_tt_bind(bo->ttm, mem);
++ if (ret)
++ goto out_err;
++ }
++
++ if (bo->mem.mem_type == TTM_PL_SYSTEM) {
++
++ struct ttm_mem_reg *old_mem = &bo->mem;
++ uint32_t save_flags = old_mem->flags;
++ uint32_t save_proposed_flags = old_mem->proposed_flags;
++
++ *old_mem = *mem;
++ mem->mm_node = NULL;
++ old_mem->proposed_flags = save_proposed_flags;
++ ttm_flag_masked(&save_flags, mem->flags,
++ TTM_PL_MASK_MEMTYPE);
++ goto moved;
++ }
++
++ }
++
++ if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
++ !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
++ ret = ttm_bo_move_ttm(bo, evict, no_wait, mem);
++ else if (bdev->driver->move)
++ ret = bdev->driver->move(bo, evict, interruptible,
++ no_wait, mem);
++ else
++ ret = ttm_bo_move_memcpy(bo, evict, no_wait, mem);
++
++ if (ret)
++ goto out_err;
++
++moved:
++ if (bo->priv_flags & TTM_BO_PRIV_FLAG_EVICTED) {
++ ret = bdev->driver->invalidate_caches(bdev, bo->mem.flags);
++ if (ret)
++ printk(KERN_ERR "Can not flush read caches\n");
++ }
++
++ ttm_flag_masked(&bo->priv_flags,
++ (evict) ? TTM_BO_PRIV_FLAG_EVICTED : 0,
++ TTM_BO_PRIV_FLAG_EVICTED);
++
++ if (bo->mem.mm_node)
++ bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) +
++ bdev->man[bo->mem.mem_type].gpu_offset;
++
++ return 0;
++
++out_err:
++ new_man = &bdev->man[bo->mem.mem_type];
++ if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) {
++ ttm_tt_unbind(bo->ttm);
++ ttm_tt_destroy(bo->ttm);
++ bo->ttm = NULL;
++ }
++
++ return ret;
++}
++
++static int ttm_bo_expire_sync_obj(struct ttm_buffer_object *bo,
++ bool allow_errors)
++{
++ struct ttm_bo_device *bdev = bo->bdev;
++ struct ttm_bo_driver *driver = bdev->driver;
++
++ if (bo->sync_obj) {
++ if (bdev->nice_mode) {
++ unsigned long _end = jiffies + 3 * HZ;
++ int ret;
++ do {
++ ret = ttm_bo_wait(bo, false, false, false);
++ if (ret && allow_errors)
++ return ret;
++
++ } while (ret && !time_after_eq(jiffies, _end));
++
++ if (bo->sync_obj) {
++ bdev->nice_mode = false;
++ printk(KERN_ERR "Detected probable GPU lockup. "
++ "Evicting buffer.\n");
++ }
++ }
++ if (bo->sync_obj) {
++ driver->sync_obj_unref(&bo->sync_obj);
++ bo->priv_flags &= ~TTM_BO_PRIV_FLAG_MOVING;
++ }
++ }
++ return 0;
++}
++
++/**
++ * If bo idle, remove from delayed- and lru lists, and unref.
++ * If not idle, and already on delayed list, do nothing.
++ * If not idle, and not on delayed list, put on delayed list,
++ * up the list_kref and schedule a delayed list check.
++ */
++
++static void ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
++{
++ struct ttm_bo_device *bdev = bo->bdev;
++ struct ttm_bo_driver *driver = bdev->driver;
++
++ mutex_lock(&bo->mutex);
++
++ if (bo->sync_obj && driver->sync_obj_signaled(bo->sync_obj,
++ bo->sync_obj_arg)) {
++ driver->sync_obj_unref(&bo->sync_obj);
++ bo->priv_flags &= ~TTM_BO_PRIV_FLAG_MOVING;
++ }
++
++ if (bo->sync_obj && remove_all)
++ (void)ttm_bo_expire_sync_obj(bo, false);
++
++ if (!bo->sync_obj) {
++ int put_count;
++
++ if (bo->ttm)
++ ttm_tt_unbind(bo->ttm);
++ spin_lock(&bdev->lru_lock);
++ if (!list_empty(&bo->ddestroy)) {
++ list_del_init(&bo->ddestroy);
++ kref_put(&bo->list_kref, ttm_bo_ref_bug);
++ }
++ if (bo->mem.mm_node) {
++ drm_mm_put_block(bo->mem.mm_node);
++ bo->mem.mm_node = NULL;
++ }
++ put_count = ttm_bo_del_from_lru(bo);
++ spin_unlock(&bdev->lru_lock);
++ mutex_unlock(&bo->mutex);
++ while (put_count--)
++ kref_put(&bo->list_kref, ttm_bo_release_list);
++
++ return;
++ }
++
++ spin_lock(&bdev->lru_lock);
++ if (list_empty(&bo->ddestroy)) {
++ spin_unlock(&bdev->lru_lock);
++ driver->sync_obj_flush(bo->sync_obj, bo->sync_obj_arg);
++ spin_lock(&bdev->lru_lock);
++ if (list_empty(&bo->ddestroy)) {
++ kref_get(&bo->list_kref);
++ list_add_tail(&bo->ddestroy, &bdev->ddestroy);
++ }
++ spin_unlock(&bdev->lru_lock);
++ schedule_delayed_work(&bdev->wq,
++ ((HZ / 100) < 1) ? 1 : HZ / 100);
++ } else
++ spin_unlock(&bdev->lru_lock);
++
++ mutex_unlock(&bo->mutex);
++ return;
++}
++
++/**
++ * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
++ * encountered buffers.
++ */
++
++static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
++{
++ struct ttm_buffer_object *entry, *nentry;
++ struct list_head *list, *next;
++ int ret;
++
++ spin_lock(&bdev->lru_lock);
++ list_for_each_safe(list, next, &bdev->ddestroy) {
++ entry = list_entry(list, struct ttm_buffer_object, ddestroy);
++ nentry = NULL;
++
++ /*
++ * Protect the next list entry from destruction while we
++ * unlock the lru_lock.
++ */
++
++ if (next != &bdev->ddestroy) {
++ nentry = list_entry(next, struct ttm_buffer_object,
++ ddestroy);
++ kref_get(&nentry->list_kref);
++ }
++ kref_get(&entry->list_kref);
++
++ spin_unlock(&bdev->lru_lock);
++ ttm_bo_cleanup_refs(entry, remove_all);
++ kref_put(&entry->list_kref, ttm_bo_release_list);
++ spin_lock(&bdev->lru_lock);
++
++ if (nentry) {
++ bool next_onlist = !list_empty(next);
++ kref_put(&nentry->list_kref, ttm_bo_release_list);
++
++ /*
++ * Someone might have raced us and removed the
++ * next entry from the list. We don't bother restarting
++ * list traversal.
++ */
++
++ if (!next_onlist)
++ break;
++ }
++ }
++ ret = !list_empty(&bdev->ddestroy);
++ spin_unlock(&bdev->lru_lock);
++
++ return ret;
++}
++
++static void ttm_bo_delayed_workqueue(struct work_struct *work)
++{
++ struct ttm_bo_device *bdev =
++ container_of(work, struct ttm_bo_device, wq.work);
++
++ if (ttm_bo_delayed_delete(bdev, false)) {
++ schedule_delayed_work(&bdev->wq,
++ ((HZ / 100) < 1) ? 1 : HZ / 100);
++ }
++}
++
++static void ttm_bo_release(struct kref *kref)
++{
++ struct ttm_buffer_object *bo =
++ container_of(kref, struct ttm_buffer_object, kref);
++ struct ttm_bo_device *bdev = bo->bdev;
++
++ if (likely(bo->vm_node != NULL)) {
++ rb_erase(&bo->vm_rb, &bdev->addr_space_rb);
++ drm_mm_put_block(bo->vm_node);
++ }
++ write_unlock(&bdev->vm_lock);
++ ttm_bo_cleanup_refs(bo, false);
++ kref_put(&bo->list_kref, ttm_bo_release_list);
++ write_lock(&bdev->vm_lock);
++}
++
++void ttm_bo_unref(struct ttm_buffer_object **p_bo)
++{
++ struct ttm_buffer_object *bo = *p_bo;
++ struct ttm_bo_device *bdev = bo->bdev;
++
++ *p_bo = NULL;
++ write_lock(&bdev->vm_lock);
++ kref_put(&bo->kref, ttm_bo_release);
++ write_unlock(&bdev->vm_lock);
++}
++
++static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type,
++ bool interruptible, bool no_wait)
++{
++ int ret = 0;
++ struct ttm_bo_device *bdev = bo->bdev;
++ struct ttm_mem_reg evict_mem;
++
++ if (bo->mem.mem_type != mem_type)
++ goto out;
++
++ ret = ttm_bo_wait(bo, false, interruptible, no_wait);
++ if (ret && ret != -ERESTART) {
++ printk(KERN_ERR "Failed to expire sync object before "
++ "buffer eviction.\n");
++ goto out;
++ }
++
++ BUG_ON(!atomic_read(&bo->reserved));
++
++ evict_mem = bo->mem;
++ evict_mem.mm_node = NULL;
++
++ evict_mem.proposed_flags = bdev->driver->evict_flags(bo);
++ BUG_ON(ttm_bo_type_flags(mem_type) & evict_mem.proposed_flags);
++
++ ret = ttm_bo_mem_space(bo, &evict_mem, interruptible, no_wait);
++ if (unlikely(ret != 0 && ret != -ERESTART)) {
++ evict_mem.proposed_flags = TTM_PL_FLAG_SYSTEM;
++ BUG_ON(ttm_bo_type_flags(mem_type) & evict_mem.proposed_flags);
++ ret = ttm_bo_mem_space(bo, &evict_mem, interruptible, no_wait);
++ }
++
++ if (ret) {
++ if (ret != -ERESTART)
++ printk(KERN_ERR "Failed to find memory space for "
++ "buffer 0x%p eviction.\n", bo);
++ goto out;
++ }
++
++ ret = ttm_bo_handle_move_mem(bo,
++ &evict_mem,
++ true,
++ interruptible,
++ no_wait);
++ if (ret) {
++ if (ret != -ERESTART)
++ printk(KERN_ERR "Buffer eviction failed\n");
++ goto out;
++ }
++
++ spin_lock(&bdev->lru_lock);
++ if (evict_mem.mm_node) {
++ drm_mm_put_block(evict_mem.mm_node);
++ evict_mem.mm_node = NULL;
++ }
++ spin_unlock(&bdev->lru_lock);
++
++ ttm_flag_masked(&bo->priv_flags, TTM_BO_PRIV_FLAG_EVICTED,
++ TTM_BO_PRIV_FLAG_EVICTED);
++
++out:
++ return ret;
++}
++
++/**
++ * Repeatedly evict memory from the LRU for @mem_type until we create enough
++ * space, or we've evicted everything and there isn't enough space.
++ */
++static int ttm_bo_mem_force_space(struct ttm_bo_device *bdev,
++ struct ttm_mem_reg *mem,
++ uint32_t mem_type,
++ bool interruptible, bool no_wait)
++{
++ struct drm_mm_node *node;
++ struct ttm_buffer_object *entry;
++ struct ttm_mem_type_manager *man = &bdev->man[mem_type];
++ struct list_head *lru;
++ unsigned long num_pages = mem->num_pages;
++ int put_count = 0;
++ int ret;
++
++retry_pre_get:
++ ret = drm_mm_pre_get(&man->manager);
++ if (unlikely(ret != 0))
++ return ret;
++
++ spin_lock(&bdev->lru_lock);
++ do {
++ node = drm_mm_search_free(&man->manager, num_pages,
++ mem->page_alignment, 1);
++ if (node)
++ break;
++
++ lru = &man->lru;
++ if (list_empty(lru))
++ break;
++
++ entry = list_first_entry(lru, struct ttm_buffer_object, lru);
++ kref_get(&entry->list_kref);
++
++ ret = ttm_bo_reserve_locked(entry,
++ interruptible,
++ no_wait,
++ false,
++ 0);
++
++ if (likely(ret == 0))
++ put_count = ttm_bo_del_from_lru(entry);
++
++ spin_unlock(&bdev->lru_lock);
++
++ if (unlikely(ret != 0))
++ return ret;
++
++ while (put_count--)
++ kref_put(&entry->list_kref, ttm_bo_ref_bug);
++
++ mutex_lock(&entry->mutex);
++ ret = ttm_bo_evict(entry, mem_type, interruptible, no_wait);
++ mutex_unlock(&entry->mutex);
++
++ ttm_bo_unreserve(entry);
++
++ kref_put(&entry->list_kref, ttm_bo_release_list);
++ if (ret)
++ return ret;
++
++ spin_lock(&bdev->lru_lock);
++ } while (1);
++
++ if (!node) {
++ spin_unlock(&bdev->lru_lock);
++ return -ENOMEM;
++ }
++
++ node = drm_mm_get_block_atomic(node, num_pages, mem->page_alignment);
++ if (unlikely(!node)) {
++ spin_unlock(&bdev->lru_lock);
++ goto retry_pre_get;
++ }
++
++ spin_unlock(&bdev->lru_lock);
++ mem->mm_node = node;
++ mem->mem_type = mem_type;
++ return 0;
++}
++
++static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
++ bool disallow_fixed,
++ uint32_t mem_type,
++ uint32_t mask, uint32_t *res_mask)
++{
++ uint32_t cur_flags = ttm_bo_type_flags(mem_type);
++
++ if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && disallow_fixed)
++ return false;
++
++ if ((cur_flags & mask & TTM_PL_MASK_MEM) == 0)
++ return false;
++
++ if ((mask & man->available_caching) == 0)
++ return false;
++ if (mask & man->default_caching)
++ cur_flags |= man->default_caching;
++ else if (mask & TTM_PL_FLAG_CACHED)
++ cur_flags |= TTM_PL_FLAG_CACHED;
++ else if (mask & TTM_PL_FLAG_WC)
++ cur_flags |= TTM_PL_FLAG_WC;
++ else
++ cur_flags |= TTM_PL_FLAG_UNCACHED;
++
++ *res_mask = cur_flags;
++ return true;
++}
++
++/**
++ * Creates space for memory region @mem according to its type.
++ *
++ * This function first searches for free space in compatible memory types in
++ * the priority order defined by the driver. If free space isn't found, then
++ * ttm_bo_mem_force_space is attempted in priority order to evict and find
++ * space.
++ */
++int ttm_bo_mem_space(struct ttm_buffer_object *bo,
++ struct ttm_mem_reg *mem, bool interruptible, bool no_wait)
++{
++ struct ttm_bo_device *bdev = bo->bdev;
++ struct ttm_mem_type_manager *man;
++
++ uint32_t num_prios = bdev->driver->num_mem_type_prio;
++ const uint32_t *prios = bdev->driver->mem_type_prio;
++ uint32_t i;
++ uint32_t mem_type = TTM_PL_SYSTEM;
++ uint32_t cur_flags = 0;
++ bool type_found = false;
++ bool type_ok = false;
++ bool has_eagain = false;
++ struct drm_mm_node *node = NULL;
++ int ret;
++
++ mem->mm_node = NULL;
++ for (i = 0; i < num_prios; ++i) {
++ mem_type = prios[i];
++ man = &bdev->man[mem_type];
++
++ type_ok = ttm_bo_mt_compatible(man,
++ bo->type == ttm_bo_type_user,
++ mem_type, mem->proposed_flags,
++ &cur_flags);
++
++ if (!type_ok)
++ continue;
++
++ if (mem_type == TTM_PL_SYSTEM)
++ break;
++
++ if (man->has_type && man->use_type) {
++ type_found = true;
++ do {
++ ret = drm_mm_pre_get(&man->manager);
++ if (unlikely(ret))
++ return ret;
++
++ spin_lock(&bdev->lru_lock);
++ node = drm_mm_search_free(&man->manager,
++ mem->num_pages,
++ mem->page_alignment,
++ 1);
++ if (unlikely(!node)) {
++ spin_unlock(&bdev->lru_lock);
++ break;
++ }
++ node = drm_mm_get_block_atomic(node,
++ mem->num_pages,
++ mem->
++ page_alignment);
++ spin_unlock(&bdev->lru_lock);
++ } while (!node);
++ }
++ if (node)
++ break;
++ }
++
++ if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || node) {
++ mem->mm_node = node;
++ mem->mem_type = mem_type;
++ mem->flags = cur_flags;
++ return 0;
++ }
++
++ if (!type_found)
++ return -EINVAL;
++
++ num_prios = bdev->driver->num_mem_busy_prio;
++ prios = bdev->driver->mem_busy_prio;
++
++ for (i = 0; i < num_prios; ++i) {
++ mem_type = prios[i];
++ man = &bdev->man[mem_type];
++
++ if (!man->has_type)
++ continue;
++
++ if (!ttm_bo_mt_compatible(man,
++ bo->type == ttm_bo_type_user,
++ mem_type,
++ mem->proposed_flags, &cur_flags))
++ continue;
++
++ ret = ttm_bo_mem_force_space(bdev, mem, mem_type,
++ interruptible, no_wait);
++
++ if (ret == 0 && mem->mm_node) {
++ mem->flags = cur_flags;
++ return 0;
++ }
++
++ if (ret == -ERESTART)
++ has_eagain = true;
++ }
++
++ ret = (has_eagain) ? -ERESTART : -ENOMEM;
++ return ret;
++}
++
++/*
++ * Call bo->mutex locked.
++ * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
++ */
++
++static int ttm_bo_busy(struct ttm_buffer_object *bo)
++{
++ void *sync_obj = bo->sync_obj;
++ struct ttm_bo_driver *driver = bo->bdev->driver;
++
++ if (sync_obj) {
++ if (driver->sync_obj_signaled(sync_obj, bo->sync_obj_arg)) {
++ driver->sync_obj_unref(&bo->sync_obj);
++ bo->priv_flags &= ~TTM_BO_PRIV_FLAG_MOVING;
++ return 0;
++ }
++ driver->sync_obj_flush(sync_obj, bo->sync_obj_arg);
++ if (driver->sync_obj_signaled(sync_obj, bo->sync_obj_arg)) {
++ driver->sync_obj_unref(&bo->sync_obj);
++ bo->priv_flags &= ~TTM_BO_PRIV_FLAG_MOVING;
++ return 0;
++ }
++ return 1;
++ }
++ return 0;
++}
++
++int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait)
++{
++ int ret = 0;
++
++ if ((atomic_read(&bo->cpu_writers) > 0) && no_wait)
++ return -EBUSY;
++
++ ret = wait_event_interruptible(bo->event_queue,
++ atomic_read(&bo->cpu_writers) == 0);
++
++ if (ret == -ERESTARTSYS)
++ ret = -ERESTART;
++
++ return ret;
++}
++
++/*
++ * bo->mutex locked.
++ * Note that new_mem_flags are NOT transferred to the bo->mem.proposed_flags.
++ */
++
++int ttm_bo_move_buffer(struct ttm_buffer_object *bo, uint32_t new_mem_flags,
++ bool interruptible, bool no_wait)
++{
++ struct ttm_bo_device *bdev = bo->bdev;
++ int ret = 0;
++ struct ttm_mem_reg mem;
++
++ BUG_ON(!atomic_read(&bo->reserved));
++
++ /*
++ * FIXME: It's possible to pipeline buffer moves.
++ * Have the driver move function wait for idle when necessary,
++ * instead of doing it here.
++ */
++
++ ttm_bo_busy(bo);
++ ret = ttm_bo_wait(bo, false, interruptible, no_wait);
++ if (ret)
++ return ret;
++
++ mem.num_pages = bo->num_pages;
++ mem.size = mem.num_pages << PAGE_SHIFT;
++ mem.proposed_flags = new_mem_flags;
++ mem.page_alignment = bo->mem.page_alignment;
++
++ /*
++ * Determine where to move the buffer.
++ */
++
++ ret = ttm_bo_mem_space(bo, &mem, interruptible, no_wait);
++ if (ret)
++ goto out_unlock;
++
++ ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait);
++
++out_unlock:
++ if (ret && mem.mm_node) {
++ spin_lock(&bdev->lru_lock);
++ drm_mm_put_block(mem.mm_node);
++ spin_unlock(&bdev->lru_lock);
++ }
++ return ret;
++}
++
++static int ttm_bo_mem_compat(struct ttm_mem_reg *mem)
++{
++ if ((mem->proposed_flags & mem->flags & TTM_PL_MASK_MEM) == 0)
++ return 0;
++ if ((mem->proposed_flags & mem->flags & TTM_PL_MASK_CACHING) == 0)
++ return 0;
++
++ return 1;
++}
++
++int ttm_buffer_object_validate(struct ttm_buffer_object *bo,
++ bool interruptible, bool no_wait)
++{
++ int ret;
++
++ BUG_ON(!atomic_read(&bo->reserved));
++ bo->mem.proposed_flags = bo->proposed_flags;
++
++ TTM_DEBUG("Proposed flags 0x%08lx, Old flags 0x%08lx\n",
++ (unsigned long)bo->mem.proposed_flags,
++ (unsigned long)bo->mem.flags);
++
++ /*
++ * Check whether we need to move buffer.
++ */
++
++ if (!ttm_bo_mem_compat(&bo->mem)) {
++ ret = ttm_bo_move_buffer(bo, bo->mem.proposed_flags,
++ interruptible, no_wait);
++ if (ret) {
++ if (ret != -ERESTART)
++ printk(KERN_ERR "Failed moving buffer. "
++ "Proposed placement 0x%08x\n",
++ bo->mem.proposed_flags);
++ if (ret == -ENOMEM)
++ printk(KERN_ERR "Out of aperture space or "
++ "DRM memory quota.\n");
++ return ret;
++ }
++ }
++
++ /*
++ * We might need to add a TTM.
++ */
++
++ if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
++ ret = ttm_bo_add_ttm(bo);
++ if (ret)
++ return ret;
++ }
++ /*
++ * Validation has succeeded, move the access and other
++ * non-mapping-related flag bits from the proposed flags to
++ * the active flags
++ */
++
++ ttm_flag_masked(&bo->mem.flags, bo->proposed_flags,
++ ~TTM_PL_MASK_MEMTYPE);
++
++ return 0;
++}
++
++int
++ttm_bo_check_placement(struct ttm_buffer_object *bo,
++ uint32_t set_flags, uint32_t clr_flags)
++{
++ uint32_t new_mask = set_flags | clr_flags;
++
++ if ((bo->type == ttm_bo_type_user) &&
++ (clr_flags & TTM_PL_FLAG_CACHED)) {
++ printk(KERN_ERR
++ "User buffers require cache-coherent memory.\n");
++ return -EINVAL;
++ }
++
++ if (!capable(CAP_SYS_ADMIN)) {
++ if (new_mask & TTM_PL_FLAG_NO_EVICT) {
++ printk(KERN_ERR "Need to be root to modify"
++ " NO_EVICT status.\n");
++ return -EINVAL;
++ }
++
++ if ((clr_flags & bo->mem.flags & TTM_PL_MASK_MEMTYPE) &&
++ (bo->mem.flags & TTM_PL_FLAG_NO_EVICT)) {
++ printk(KERN_ERR "Incompatible memory specification"
++ " for NO_EVICT buffer.\n");
++ return -EINVAL;
++ }
++ }
++ return 0;
++}
++
++int ttm_buffer_object_init(struct ttm_bo_device *bdev,
++ struct ttm_buffer_object *bo,
++ unsigned long size,
++ enum ttm_bo_type type,
++ uint32_t flags,
++ uint32_t page_alignment,
++ unsigned long buffer_start,
++ bool interruptible,
++ struct file *persistant_swap_storage,
++ size_t acc_size,
++ void (*destroy) (struct ttm_buffer_object *))
++{
++ int ret = 0;
++ unsigned long num_pages;
++
++ size += buffer_start & ~PAGE_MASK;
++ num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
++ if (num_pages == 0) {
++ printk(KERN_ERR "Illegal buffer object size.\n");
++ return -EINVAL;
++ }
++ bo->destroy = destroy;
++
++ mutex_init(&bo->mutex);
++ mutex_lock(&bo->mutex);
++ kref_init(&bo->kref);
++ kref_init(&bo->list_kref);
++ atomic_set(&bo->cpu_writers, 0);
++ atomic_set(&bo->reserved, 1);
++ init_waitqueue_head(&bo->event_queue);
++ INIT_LIST_HEAD(&bo->lru);
++ INIT_LIST_HEAD(&bo->ddestroy);
++ INIT_LIST_HEAD(&bo->swap);
++ bo->bdev = bdev;
++ bo->type = type;
++ bo->num_pages = num_pages;
++ bo->mem.mem_type = TTM_PL_SYSTEM;
++ bo->mem.num_pages = bo->num_pages;
++ bo->mem.mm_node = NULL;
++ bo->mem.page_alignment = page_alignment;
++ bo->buffer_start = buffer_start & PAGE_MASK;
++ bo->priv_flags = 0;
++ bo->mem.flags = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
++ bo->seq_valid = false;
++ bo->persistant_swap_storage = persistant_swap_storage;
++ bo->acc_size = acc_size;
++
++ ret = ttm_bo_check_placement(bo, flags, 0ULL);
++ if (unlikely(ret != 0))
++ goto out_err;
++
++ /*
++ * If no caching attributes are set, accept any form of caching.
++ */
++
++ if ((flags & TTM_PL_MASK_CACHING) == 0)
++ flags |= TTM_PL_MASK_CACHING;
++
++ bo->proposed_flags = flags;
++ bo->mem.proposed_flags = flags;
++
++ /*
++ * For ttm_bo_type_device buffers, allocate
++ * address space from the device.
++ */
++
++ if (bo->type == ttm_bo_type_device) {
++ ret = ttm_bo_setup_vm(bo);
++ if (ret)
++ goto out_err;
++ }
++
++ ret = ttm_buffer_object_validate(bo, interruptible, false);
++ if (ret)
++ goto out_err;
++
++ mutex_unlock(&bo->mutex);
++ ttm_bo_unreserve(bo);
++ return 0;
++
++out_err:
++ mutex_unlock(&bo->mutex);
++ ttm_bo_unreserve(bo);
++ ttm_bo_unref(&bo);
++
++ return ret;
++}
++
++static inline size_t ttm_bo_size(struct ttm_bo_device *bdev,
++ unsigned long num_pages)
++{
++ size_t page_array_size = (num_pages * sizeof(void *) + PAGE_SIZE - 1) &
++ PAGE_MASK;
++
++ return bdev->ttm_bo_size + 2 * page_array_size;
++}
++
++int ttm_buffer_object_create(struct ttm_bo_device *bdev,
++ unsigned long size,
++ enum ttm_bo_type type,
++ uint32_t flags,
++ uint32_t page_alignment,
++ unsigned long buffer_start,
++ bool interruptible,
++ struct file *persistant_swap_storage,
++ struct ttm_buffer_object **p_bo)
++{
++ struct ttm_buffer_object *bo;
++ int ret;
++ struct ttm_mem_global *mem_glob = bdev->mem_glob;
++
++ size_t acc_size =
++ ttm_bo_size(bdev, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
++ ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false, false);
++ if (unlikely(ret != 0))
++ return ret;
++
++ bo = kzalloc(sizeof(*bo), GFP_KERNEL);
++
++ if (unlikely(bo == NULL)) {
++ ttm_mem_global_free(mem_glob, acc_size, false);
++ return -ENOMEM;
++ }
++
++ ret = ttm_buffer_object_init(bdev, bo, size, type, flags,
++ page_alignment, buffer_start,
++ interruptible,
++ persistant_swap_storage, acc_size, NULL);
++ if (likely(ret == 0))
++ *p_bo = bo;
++
++ return ret;
++}
++
++static int ttm_bo_leave_list(struct ttm_buffer_object *bo,
++ uint32_t mem_type, bool allow_errors)
++{
++ int ret;
++
++ mutex_lock(&bo->mutex);
++
++ ret = ttm_bo_expire_sync_obj(bo, allow_errors);
++ if (ret)
++ goto out;
++
++ if (bo->mem.mem_type == mem_type)
++ ret = ttm_bo_evict(bo, mem_type, false, false);
++
++ if (ret) {
++ if (allow_errors)
++ goto out;
++ else {
++ ret = 0;
++ printk(KERN_ERR "Cleanup eviction failed\n");
++ }
++ }
++
++out:
++ mutex_unlock(&bo->mutex);
++ return ret;
++}
++
++static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
++ struct list_head *head,
++ unsigned mem_type, bool allow_errors)
++{
++ struct ttm_buffer_object *entry;
++ int ret;
++ int put_count;
++
++ /*
++ * Can't use standard list traversal since we're unlocking.
++ */
++
++ spin_lock(&bdev->lru_lock);
++
++ while (!list_empty(head)) {
++ entry = list_first_entry(head, struct ttm_buffer_object, lru);
++ kref_get(&entry->list_kref);
++ ret = ttm_bo_reserve_locked(entry, false, false, false, 0);
++ put_count = ttm_bo_del_from_lru(entry);
++ spin_unlock(&bdev->lru_lock);
++ while (put_count--)
++ kref_put(&entry->list_kref, ttm_bo_ref_bug);
++ BUG_ON(ret);
++ ret = ttm_bo_leave_list(entry, mem_type, allow_errors);
++ ttm_bo_unreserve(entry);
++ kref_put(&entry->list_kref, ttm_bo_release_list);
++ spin_lock(&bdev->lru_lock);
++ }
++
++ spin_unlock(&bdev->lru_lock);
++
++ return 0;
++}
++
++int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
++{
++ struct ttm_mem_type_manager *man = &bdev->man[mem_type];
++ int ret = -EINVAL;
++
++ if (mem_type >= TTM_NUM_MEM_TYPES) {
++ printk(KERN_ERR "Illegal memory type %d\n", mem_type);
++ return ret;
++ }
++
++ if (!man->has_type) {
++ printk(KERN_ERR "Trying to take down uninitialized "
++ "memory manager type %u\n", mem_type);
++ return ret;
++ }
++
++ man->use_type = false;
++ man->has_type = false;
++
++ ret = 0;
++ if (mem_type > 0) {
++ ttm_bo_force_list_clean(bdev, &man->lru, mem_type, false);
++
++ spin_lock(&bdev->lru_lock);
++ if (drm_mm_clean(&man->manager))
++ drm_mm_takedown(&man->manager);
++ else
++ ret = -EBUSY;
++ spin_unlock(&bdev->lru_lock);
++ }
++
++ return ret;
++}
++
++int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
++{
++ struct ttm_mem_type_manager *man = &bdev->man[mem_type];
++
++ if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
++ printk(KERN_ERR "Illegal memory manager memory type %u.\n",
++ mem_type);
++ return -EINVAL;
++ }
++
++ if (!man->has_type) {
++ printk(KERN_ERR "Memory type %u has not been initialized.\n",
++ mem_type);
++ return 0;
++ }
++
++ return ttm_bo_force_list_clean(bdev, &man->lru, mem_type, true);
++}
++
++int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
++ unsigned long p_offset, unsigned long p_size)
++{
++ int ret = -EINVAL;
++ struct ttm_mem_type_manager *man;
++
++ if (type >= TTM_NUM_MEM_TYPES) {
++ printk(KERN_ERR "Illegal memory type %d\n", type);
++ return ret;
++ }
++
++ man = &bdev->man[type];
++ if (man->has_type) {
++ printk(KERN_ERR
++ "Memory manager already initialized for type %d\n",
++ type);
++ return ret;
++ }
++
++ ret = bdev->driver->init_mem_type(bdev, type, man);
++ if (ret)
++ return ret;
++
++ ret = 0;
++ if (type != TTM_PL_SYSTEM) {
++ if (!p_size) {
++ printk(KERN_ERR "Zero size memory manager type %d\n",
++ type);
++ return ret;
++ }
++ ret = drm_mm_init(&man->manager, p_offset, p_size);
++ if (ret)
++ return ret;
++ }
++ man->has_type = true;
++ man->use_type = true;
++ man->size = p_size;
++
++ INIT_LIST_HEAD(&man->lru);
++
++ return 0;
++}
++
++int ttm_bo_device_release(struct ttm_bo_device *bdev)
++{
++ int ret = 0;
++ unsigned i = TTM_NUM_MEM_TYPES;
++ struct ttm_mem_type_manager *man;
++
++ while (i--) {
++ man = &bdev->man[i];
++ if (man->has_type) {
++ man->use_type = false;
++ if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
++ ret = -EBUSY;
++ printk(KERN_ERR "DRM memory manager type %d "
++ "is not clean.\n", i);
++ }
++ man->has_type = false;
++ }
++ }
++
++ if (!cancel_delayed_work(&bdev->wq))
++ flush_scheduled_work();
++
++ while (ttm_bo_delayed_delete(bdev, true)) {
++ /* Don't you know you have to do */
++ /* something here otherwise checkpatch will */
++ /* give you error */
++ }
++
++
++ spin_lock(&bdev->lru_lock);
++ if (list_empty(&bdev->ddestroy))
++ TTM_DEBUG("Delayed destroy list was clean\n");
++
++ if (list_empty(&bdev->man[0].lru))
++ TTM_DEBUG("Swap list was clean\n");
++ spin_unlock(&bdev->lru_lock);
++
++ ttm_mem_unregister_shrink(bdev->mem_glob, &bdev->shrink);
++ BUG_ON(!drm_mm_clean(&bdev->addr_space_mm));
++ write_lock(&bdev->vm_lock);
++ drm_mm_takedown(&bdev->addr_space_mm);
++ write_unlock(&bdev->vm_lock);
++
++ __free_page(bdev->dummy_read_page);
++ return ret;
++}
++
++/*
++ * This function is intended to be called on drm driver load.
++ * If you decide to call it from firstopen, you must protect the call
++ * from a potentially racing ttm_bo_driver_finish in lastclose.
++ * (This may happen on X server restart).
++ */
++
++int ttm_bo_device_init(struct ttm_bo_device *bdev,
++ struct ttm_mem_global *mem_glob,
++ struct ttm_bo_driver *driver, uint64_t file_page_offset)
++{
++ int ret = -EINVAL;
++
++ bdev->dummy_read_page = NULL;
++ rwlock_init(&bdev->vm_lock);
++ spin_lock_init(&bdev->lru_lock);
++
++ bdev->driver = driver;
++ bdev->mem_glob = mem_glob;
++
++ memset(bdev->man, 0, sizeof(bdev->man));
++
++ bdev->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
++ if (unlikely(bdev->dummy_read_page == NULL)) {
++ ret = -ENOMEM;
++ goto out_err0;
++ }
++
++ /*
++ * Initialize the system memory buffer type.
++ * Other types need to be driver / IOCTL initialized.
++ */
++ ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0, 0);
++ if (unlikely(ret != 0))
++ goto out_err1;
++
++ bdev->addr_space_rb = RB_ROOT;
++ ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000);
++ if (unlikely(ret != 0))
++ goto out_err2;
++
++ INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
++ bdev->nice_mode = true;
++ INIT_LIST_HEAD(&bdev->ddestroy);
++ INIT_LIST_HEAD(&bdev->swap_lru);
++ bdev->dev_mapping = NULL;
++ ttm_mem_init_shrink(&bdev->shrink, ttm_bo_swapout);
++ ret = ttm_mem_register_shrink(mem_glob, &bdev->shrink);
++ if (unlikely(ret != 0)) {
++ printk(KERN_ERR "Could not register buffer object swapout.\n");
++ goto out_err2;
++ }
++ return 0;
++out_err2:
++ ttm_bo_clean_mm(bdev, 0);
++out_err1:
++ __free_page(bdev->dummy_read_page);
++out_err0:
++ return ret;
++}
++
++/*
++ * buffer object vm functions.
++ */
++
++bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
++{
++ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
++
++ if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
++ if (mem->mem_type == TTM_PL_SYSTEM)
++ return false;
++
++ if (man->flags & TTM_MEMTYPE_FLAG_CMA)
++ return false;
++
++ if (mem->flags & TTM_PL_FLAG_CACHED)
++ return false;
++ }
++ return true;
++}
++
++int ttm_bo_pci_offset(struct ttm_bo_device *bdev,
++ struct ttm_mem_reg *mem,
++ unsigned long *bus_base,
++ unsigned long *bus_offset, unsigned long *bus_size)
++{
++ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
++
++ *bus_size = 0;
++ if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
++ return -EINVAL;
++
++ if (ttm_mem_reg_is_pci(bdev, mem)) {
++ *bus_offset = mem->mm_node->start << PAGE_SHIFT;
++ *bus_size = mem->num_pages << PAGE_SHIFT;
++ *bus_base = man->io_offset;
++ }
++
++ return 0;
++}
++
++/**
++ * \c Kill all user-space virtual mappings of this buffer object.
++ *
++ * \param bo The buffer object.
++ *
++ * Call bo->mutex locked.
++ */
++
++void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
++{
++ struct ttm_bo_device *bdev = bo->bdev;
++ loff_t offset = (loff_t) bo->addr_space_offset;
++ loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
++
++ if (!bdev->dev_mapping)
++ return;
++
++ unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
++}
++
++static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
++{
++ struct ttm_bo_device *bdev = bo->bdev;
++ struct rb_node **cur = &bdev->addr_space_rb.rb_node;
++ struct rb_node *parent = NULL;
++ struct ttm_buffer_object *cur_bo;
++ unsigned long offset = bo->vm_node->start;
++ unsigned long cur_offset;
++
++ while (*cur) {
++ parent = *cur;
++ cur_bo = rb_entry(parent, struct ttm_buffer_object, vm_rb);
++ cur_offset = cur_bo->vm_node->start;
++ if (offset < cur_offset)
++ cur = &parent->rb_left;
++ else if (offset > cur_offset)
++ cur = &parent->rb_right;
++ else
++ BUG();
++ }
++
++ rb_link_node(&bo->vm_rb, parent, cur);
++ rb_insert_color(&bo->vm_rb, &bdev->addr_space_rb);
++}
++
++/**
++ * ttm_bo_setup_vm:
++ *
++ * @bo: the buffer to allocate address space for
++ *
++ * Allocate address space in the drm device so that applications
++ * can mmap the buffer and access the contents. This only
++ * applies to ttm_bo_type_device objects as others are not
++ * placed in the drm device address space.
++ */
++
++static int ttm_bo_setup_vm(struct ttm_buffer_object *bo)
++{
++ struct ttm_bo_device *bdev = bo->bdev;
++ int ret;
++
++retry_pre_get:
++ ret = drm_mm_pre_get(&bdev->addr_space_mm);
++ if (unlikely(ret != 0))
++ return ret;
++
++ write_lock(&bdev->vm_lock);
++ bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm,
++ bo->mem.num_pages, 0, 0);
++
++ if (unlikely(bo->vm_node == NULL)) {
++ ret = -ENOMEM;
++ goto out_unlock;
++ }
++
++ bo->vm_node = drm_mm_get_block_atomic(bo->vm_node,
++ bo->mem.num_pages, 0);
++
++ if (unlikely(bo->vm_node == NULL)) {
++ write_unlock(&bdev->vm_lock);
++ goto retry_pre_get;
++ }
++
++ ttm_bo_vm_insert_rb(bo);
++ write_unlock(&bdev->vm_lock);
++ bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT;
++
++ return 0;
++out_unlock:
++ write_unlock(&bdev->vm_lock);
++ return ret;
++}
++
++int ttm_bo_wait(struct ttm_buffer_object *bo,
++ bool lazy, bool interruptible, bool no_wait)
++{
++ struct ttm_bo_driver *driver = bo->bdev->driver;
++ void *sync_obj;
++ void *sync_obj_arg;
++ int ret = 0;
++
++ while (bo->sync_obj) {
++ if (driver->sync_obj_signaled(bo->sync_obj, bo->sync_obj_arg)) {
++ driver->sync_obj_unref(&bo->sync_obj);
++ bo->priv_flags &= ~TTM_BO_PRIV_FLAG_MOVING;
++ goto out;
++ }
++ if (no_wait) {
++ ret = -EBUSY;
++ goto out;
++ }
++ sync_obj = driver->sync_obj_ref(bo->sync_obj);
++ sync_obj_arg = bo->sync_obj_arg;
++ mutex_unlock(&bo->mutex);
++ ret = driver->sync_obj_wait(sync_obj, sync_obj_arg,
++ lazy, interruptible);
++
++ mutex_lock(&bo->mutex);
++ if (unlikely(ret != 0)) {
++ driver->sync_obj_unref(&sync_obj);
++ return ret;
++ }
++
++ if (bo->sync_obj == sync_obj) {
++ driver->sync_obj_unref(&bo->sync_obj);
++ bo->priv_flags &= ~TTM_BO_PRIV_FLAG_MOVING;
++ }
++ driver->sync_obj_unref(&sync_obj);
++ }
++out:
++ return 0;
++}
++
++void ttm_bo_unblock_reservation(struct ttm_buffer_object *bo)
++{
++ atomic_set(&bo->reserved, 0);
++ wake_up_all(&bo->event_queue);
++}
++
++int ttm_bo_block_reservation(struct ttm_buffer_object *bo, bool interruptible,
++ bool no_wait)
++{
++ int ret;
++
++ while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
++ if (no_wait)
++ return -EBUSY;
++ else if (interruptible) {
++ ret = wait_event_interruptible
++ (bo->event_queue, atomic_read(&bo->reserved) == 0);
++ if (unlikely(ret != 0))
++ return -ERESTART;
++ } else {
++ wait_event(bo->event_queue,
++ atomic_read(&bo->reserved) == 0);
++ }
++ }
++ return 0;
++}
++
++int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
++{
++ int ret = 0;
++
++ /*
++ * Using ttm_bo_reserve instead of ttm_bo_block_reservation
++ * makes sure the lru lists are updated.
++ */
++
++ ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
++ if (unlikely(ret != 0))
++ return ret;
++ mutex_lock(&bo->mutex);
++ ret = ttm_bo_wait(bo, false, true, no_wait);
++ if (unlikely(ret != 0))
++ goto out_err0;
++ atomic_inc(&bo->cpu_writers);
++out_err0:
++ mutex_unlock(&bo->mutex);
++ ttm_bo_unreserve(bo);
++ return ret;
++}
++
++void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
++{
++ if (atomic_dec_and_test(&bo->cpu_writers))
++ wake_up_all(&bo->event_queue);
++}
++
++/**
++ * A buffer object shrink method that tries to swap out the first
++ * buffer object on the bo_global::swap_lru list.
++ */
++
++static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
++{
++ struct ttm_bo_device *bdev =
++ container_of(shrink, struct ttm_bo_device, shrink);
++ struct ttm_buffer_object *bo;
++ int ret = -EBUSY;
++ int put_count;
++ uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
++
++ spin_lock(&bdev->lru_lock);
++ while (ret == -EBUSY) {
++ if (unlikely(list_empty(&bdev->swap_lru))) {
++ spin_unlock(&bdev->lru_lock);
++ return -EBUSY;
++ }
++
++ bo = list_first_entry(&bdev->swap_lru,
++ struct ttm_buffer_object, swap);
++ kref_get(&bo->list_kref);
++
++ /**
++ * Reserve buffer. Since we unlock while sleeping, we need
++ * to re-check that nobody removed us from the swap-list while
++ * we slept.
++ */
++
++ ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
++ if (unlikely(ret == -EBUSY)) {
++ spin_unlock(&bdev->lru_lock);
++ ttm_bo_wait_unreserved(bo, false);
++ kref_put(&bo->list_kref, ttm_bo_release_list);
++ spin_lock(&bdev->lru_lock);
++ }
++ }
++
++ BUG_ON(ret != 0);
++ put_count = ttm_bo_del_from_lru(bo);
++ spin_unlock(&bdev->lru_lock);
++
++ while (put_count--)
++ kref_put(&bo->list_kref, ttm_bo_ref_bug);
++
++ /**
++ * Wait for GPU, then move to system cached.
++ */
++
++ mutex_lock(&bo->mutex);
++ ret = ttm_bo_wait(bo, false, false, false);
++ if (unlikely(ret != 0))
++ goto out;
++
++ if ((bo->mem.flags & swap_placement) != swap_placement) {
++ struct ttm_mem_reg evict_mem;
++
++ evict_mem = bo->mem;
++ evict_mem.mm_node = NULL;
++ evict_mem.proposed_flags =
++ TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
++ evict_mem.flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
++ evict_mem.mem_type = TTM_PL_SYSTEM;
++
++ ret = ttm_bo_handle_move_mem(bo,
++ &evict_mem,
++ true,
++ false,
++ false);
++ if (unlikely(ret != 0))
++ goto out;
++ }
++
++ ttm_bo_unmap_virtual(bo);
++
++ /**
++ * Swap out. Buffer will be swapped in again as soon as
++ * anyone tries to access a ttm page.
++ */
++
++ ret = ttm_tt_swapout(bo->ttm, bo->persistant_swap_storage);
++out:
++ mutex_unlock(&bo->mutex);
++
++ /**
++ *
++ * Unreserve without putting on LRU to avoid swapping out an
++ * already swapped buffer.
++ */
++
++ atomic_set(&bo->reserved, 0);
++ wake_up_all(&bo->event_queue);
++ kref_put(&bo->list_kref, ttm_bo_release_list);
++ return ret;
++}
++
++void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
++{
++ while (ttm_bo_swapout(&bdev->shrink) == 0) {
++ /* Checkpatch doesn't like it */
++ /* adding something here */
++ }
++}
+diff --git a/drivers/gpu/drm/mrst/drv/ttm/ttm_bo_api.h b/drivers/gpu/drm/mrst/drv/ttm/ttm_bo_api.h
+new file mode 100644
+index 0000000..e336893
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/ttm/ttm_bo_api.h
+@@ -0,0 +1,573 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ */
++
++#ifndef _TTM_BO_API_H_
++#define _TTM_BO_API_H_
++
++#include <drm/drm_hashtab.h>
++#include <linux/kref.h>
++#include <linux/list.h>
++#include <linux/wait.h>
++#include <linux/mutex.h>
++#include <linux/mm.h>
++#include <linux/rbtree.h>
++
++struct ttm_bo_device;
++
++struct drm_mm_node;
++
++/**
++ * struct ttm_mem_reg
++ *
++ * @mm_node: Memory manager node.
++ * @size: Requested size of memory region.
++ * @num_pages: Actual size of memory region in pages.
++ * @page_alignment: Page alignment.
++ * @flags: Placement flags.
++ * @proposed_flags: Proposed placement flags.
++ *
++ * Structure indicating the placement and space resources used by a
++ * buffer object.
++ */
++
++struct ttm_mem_reg {
++ struct drm_mm_node *mm_node;
++ unsigned long size;
++ unsigned long num_pages;
++ uint32_t page_alignment;
++ uint32_t mem_type;
++ uint32_t flags;
++ uint32_t proposed_flags;
++};
++
++/**
++ * enum ttm_bo_type
++ *
++ * @ttm_bo_type_device: These are 'normal' buffers that can
++ * be mmapped by user space. Each of these bos occupy a slot in the
++ * device address space, that can be used for normal vm operations.
++ *
++ * @ttm_bo_type_user: These are user-space memory areas that are made
++ * available to the GPU by mapping the buffer pages into the GPU aperture
++ * space. These buffers cannot be mmaped from the device address space.
++ *
++ * @ttm_bo_type_kernel: These buffers are like ttm_bo_type_device buffers,
++ * but they cannot be accessed from user-space. For kernel-only use.
++ */
++
++enum ttm_bo_type {
++ ttm_bo_type_device,
++ ttm_bo_type_user,
++ ttm_bo_type_kernel
++};
++
++struct ttm_tt;
++
++/**
++ * struct ttm_buffer_object
++ *
++ * @bdev: Pointer to the buffer object device structure.
++ * @kref: Reference count of this buffer object. When this refcount reaches
++ * zero, the object is put on the delayed delete list.
++ * @list_kref: List reference count of this buffer object. This member is
++ * used to avoid destruction while the buffer object is still on a list.
++ * Lru lists may keep one refcount, the delayed delete list, and kref != 0
++ * keeps one refcount. When this refcount reaches zero,
++ * the object is destroyed.
++ * @proposed_flags: Proposed placement for the buffer. Changed only by the
++ * creator prior to validation as opposed to bo->mem.proposed_flags which is
++ * changed by the implementation prior to a buffer move if it wants to outsmart
++ * the buffer creator / user. This latter happens, for example, at eviction.
++ * @buffer_start: The virtual user-space start address of ttm_bo_type_user
++ * buffers.
++ * @type: The bo type.
++ * @offset: The current GPU offset, which can have different meanings
++ * depending on the memory type. For SYSTEM type memory, it should be 0.
++ * @mem: structure describing current placement.
++ * @val_seq: Sequence of the validation holding the @reserved lock.
++ * Used to avoid starvation when many processes compete to validate the
++ * buffer. This member is protected by the bo_device::lru_lock.
++ * @seq_valid: The value of @val_seq is valid. This value is protected by
++ * the bo_device::lru_lock.
++ * @lru: List head for the lru list.
++ * @ddestroy: List head for the delayed destroy list.
++ * @swap: List head for swap LRU list.
++ * @persistant_swap_storage: Usually the swap storage is deleted for buffers
++ * pinned in physical memory. If this behaviour is not desired, this member
++ * holds a pointer to a persistant shmem object.
++ * @destroy: Destruction function. If NULL, kfree is used.
++ * @sync_obj_arg: Opaque argument to synchronization object function.
++ * @sync_obj: Pointer to a synchronization object.
++ * @priv_flags: Flags describing buffer object internal state.
++ * @event_queue: Queue for processes waiting on buffer object status change.
++ * @mutex: Lock protecting all members with the exception of constant members
++ * and list heads. We should really use a spinlock here.
++ * @num_pages: Actual number of pages.
++ * @ttm: TTM structure holding system pages.
++ * @vm_hash: Hash item for fast address space lookup. Need to change to a
++ * rb-tree node.
++ * @vm_node: Address space manager node.
++ * @addr_space_offset: Address space offset.
++ * @cpu_writes: For synchronization. Number of cpu writers.
++ * @reserved: Deadlock-free lock used for synchronization state transitions.
++ * @acc_size: Accounted size for this object.
++ *
++ * Base class for TTM buffer object, that deals with data placement and CPU
++ * mappings. GPU mappings are really up to the driver, but for simpler GPUs
++ * the driver can usually use the placement offset @offset directly as the
++ * GPU virtual address. For drivers implementing multiple
++ * GPU memory manager contexts, the driver should manage the address space
++ * in these contexts separately and use these objects to get the correct
++ * placement and caching for these GPU maps. This makes it possible to use
++ * these objects for even quite elaborate memory management schemes.
++ * The destroy member, the API visibility of this object makes it possible
++ * to derive driver specific types.
++ */
++
++struct ttm_buffer_object {
++ struct ttm_bo_device *bdev;
++ struct kref kref;
++ struct kref list_kref;
++
++ /*
++ * If there is a possibility that the usage variable is zero,
++ * then dev->struct_mutex should be locked before incrementing it.
++ */
++
++ uint32_t proposed_flags;
++ unsigned long buffer_start;
++ enum ttm_bo_type type;
++ unsigned long offset;
++ struct ttm_mem_reg mem;
++ uint32_t val_seq;
++ bool seq_valid;
++
++ struct list_head lru;
++ struct list_head ddestroy;
++ struct list_head swap;
++
++ struct file *persistant_swap_storage;
++
++ void (*destroy) (struct ttm_buffer_object *);
++
++ void *sync_obj_arg;
++ void *sync_obj;
++
++ uint32_t priv_flags;
++ wait_queue_head_t event_queue;
++ struct mutex mutex;
++ unsigned long num_pages;
++
++ struct ttm_tt *ttm;
++ struct rb_node vm_rb;
++ struct drm_mm_node *vm_node;
++ uint64_t addr_space_offset;
++
++ atomic_t cpu_writers;
++ atomic_t reserved;
++
++ size_t acc_size;
++};
++
++/**
++ * struct ttm_bo_kmap_obj
++ *
++ * @virtual: The current kernel virtual address.
++ * @page: The page when kmap'ing a single page.
++ * @bo_kmap_type: Type of bo_kmap.
++ *
++ * Object describing a kernel mapping. Since a TTM bo may be located
++ * in various memory types with various caching policies, the
++ * mapping can either be an ioremap, a vmap, a kmap or part of a
++ * premapped region.
++ */
++
++struct ttm_bo_kmap_obj {
++ void *virtual;
++ struct page *page;
++ enum {
++ ttm_bo_map_iomap,
++ ttm_bo_map_vmap,
++ ttm_bo_map_kmap,
++ ttm_bo_map_premapped,
++ } bo_kmap_type;
++};
++
++/**
++ * ttm_bo_reference - reference a struct ttm_buffer_object
++ *
++ * @bo: The buffer object.
++ *
++ * Returns a refcounted pointer to a buffer object.
++ */
++
++static inline struct ttm_buffer_object *ttm_bo_reference(
++ struct ttm_buffer_object *bo)
++{
++ kref_get(&bo->kref);
++ return bo;
++}
++
++/**
++ * ttm_bo_wait - wait for buffer idle.
++ *
++ * @bo: The buffer object.
++ * @interruptible: Use interruptible wait.
++ * @no_wait: Return immediately if buffer is busy.
++ *
++ * This function must be called with the bo::mutex held, and makes
++ * sure any previous rendering to the buffer is completed.
++ * Note: It might be necessary to block validations before the
++ * wait by reserving the buffer.
++ * Returns -EBUSY if no_wait is true and the buffer is busy.
++ * Returns -ERESTART if interrupted by a signal.
++ */
++extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy,
++ bool interruptible, bool no_wait);
++/**
++ * ttm_buffer_object_validate
++ *
++ * @bo: The buffer object.
++ * @interruptible: Sleep interruptible if sleeping.
++ * @no_wait: Return immediately if the buffer is busy.
++ *
++ * Changes placement and caching policy of the buffer object
++ * according to bo::proposed_flags.
++ * Returns
++ * -EINVAL on invalid proposed_flags.
++ * -ENOMEM on out-of-memory condition.
++ * -EBUSY if no_wait is true and buffer busy.
++ * -ERESTART if interrupted by a signal.
++ */
++extern int ttm_buffer_object_validate(struct ttm_buffer_object *bo,
++ bool interruptible, bool no_wait);
++/**
++ * ttm_bo_unref
++ *
++ * @bo: The buffer object.
++ *
++ * Unreference and clear a pointer to a buffer object.
++ */
++extern void ttm_bo_unref(struct ttm_buffer_object **bo);
++
++/**
++ * ttm_bo_synccpu_write_grab
++ *
++ * @bo: The buffer object:
++ * @no_wait: Return immediately if buffer is busy.
++ *
++ * Synchronizes a buffer object for CPU RW access. This means
++ * blocking command submission that affects the buffer and
++ * waiting for buffer idle. This lock is recursive.
++ * Returns
++ * -EBUSY if the buffer is busy and no_wait is true.
++ * -ERESTART if interrupted by a signal.
++ */
++
++extern int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo,
++ bool no_wait);
++/**
++ * ttm_bo_synccpu_write_release:
++ *
++ * @bo : The buffer object.
++ *
++ * Releases a synccpu lock.
++ */
++extern void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo);
++
++/**
++ * ttm_buffer_object_init
++ *
++ * @bdev: Pointer to a ttm_bo_device struct.
++ * @bo: Pointer to a ttm_buffer_object to be initialized.
++ * @size: Requested size of buffer object.
++ * @type: Requested type of buffer object.
++ * @flags: Initial placement flags.
++ * @page_alignment: Data alignment in pages.
++ * @buffer_start: Virtual address of user space data backing a
++ * user buffer object.
++ * @interruptible: If needing to sleep to wait for GPU resources,
++ * sleep interruptible.
++ * @persistant_swap_storage: Usually the swap storage is deleted for buffers
++ * pinned in physical memory. If this behaviour is not desired, this member
++ * holds a pointer to a persistant shmem object. Typically, this would
++ * point to the shmem object backing a GEM object if TTM is used to back a
++ * GEM user interface.
++ * @acc_size: Accounted size for this object.
++ * @destroy: Destroy function. Use NULL for kfree().
++ *
++ * This function initializes a pre-allocated struct ttm_buffer_object.
++ * As this object may be part of a larger structure, this function,
++ * together with the @destroy function,
++ * enables driver-specific objects derived from a ttm_buffer_object.
++ * On successful return, the object kref and list_kref are set to 1.
++ * Returns
++ * -ENOMEM: Out of memory.
++ * -EINVAL: Invalid placement flags.
++ * -ERESTART: Interrupted by signal while sleeping waiting for resources.
++ */
++
++extern int ttm_buffer_object_init(struct ttm_bo_device *bdev,
++ struct ttm_buffer_object *bo,
++ unsigned long size,
++ enum ttm_bo_type type,
++ uint32_t flags,
++ uint32_t page_alignment,
++ unsigned long buffer_start,
++ bool interrubtible,
++ struct file *persistant_swap_storage,
++ size_t acc_size,
++ void (*destroy) (struct ttm_buffer_object *));
++/**
++ * ttm_bo_synccpu_object_init
++ *
++ * @bdev: Pointer to a ttm_bo_device struct.
++ * @bo: Pointer to a ttm_buffer_object to be initialized.
++ * @size: Requested size of buffer object.
++ * @type: Requested type of buffer object.
++ * @flags: Initial placement flags.
++ * @page_alignment: Data alignment in pages.
++ * @buffer_start: Virtual address of user space data backing a
++ * user buffer object.
++ * @interruptible: If needing to sleep while waiting for GPU resources,
++ * sleep interruptible.
++ * @persistant_swap_storage: Usually the swap storage is deleted for buffers
++ * pinned in physical memory. If this behaviour is not desired, this member
++ * holds a pointer to a persistant shmem object. Typically, this would
++ * point to the shmem object backing a GEM object if TTM is used to back a
++ * GEM user interface.
++ * @p_bo: On successful completion *p_bo points to the created object.
++ *
++ * This function allocates a ttm_buffer_object, and then calls
++ * ttm_buffer_object_init on that object.
++ * The destroy function is set to kfree().
++ * Returns
++ * -ENOMEM: Out of memory.
++ * -EINVAL: Invalid placement flags.
++ * -ERESTART: Interrupted by signal while waiting for resources.
++ */
++
++extern int ttm_buffer_object_create(struct ttm_bo_device *bdev,
++ unsigned long size,
++ enum ttm_bo_type type,
++ uint32_t flags,
++ uint32_t page_alignment,
++ unsigned long buffer_start,
++ bool interruptible,
++ struct file *persistant_swap_storage,
++ struct ttm_buffer_object **p_bo);
++
++/**
++ * ttm_bo_check_placement
++ *
++ * @bo: the buffer object.
++ * @set_flags: placement flags to set.
++ * @clr_flags: placement flags to clear.
++ *
++ * Performs minimal validity checking on an intended change of
++ * placement flags.
++ * Returns
++ * -EINVAL: Intended change is invalid or not allowed.
++ */
++
++extern int ttm_bo_check_placement(struct ttm_buffer_object *bo,
++ uint32_t set_flags, uint32_t clr_flags);
++
++/**
++ * ttm_bo_init_mm
++ *
++ * @bdev: Pointer to a ttm_bo_device struct.
++ * @mem_type: The memory type.
++ * @p_offset: offset for managed area in pages.
++ * @p_size: size managed area in pages.
++ *
++ * Initialize a manager for a given memory type.
++ * Note: if part of driver firstopen, it must be protected from a
++ * potentially racing lastclose.
++ * Returns:
++ * -EINVAL: invalid size or memory type.
++ * -ENOMEM: Not enough memory.
++ * May also return driver-specified errors.
++ */
++
++extern int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
++ unsigned long p_offset, unsigned long p_size);
++/**
++ * ttm_bo_clean_mm
++ *
++ * @bdev: Pointer to a ttm_bo_device struct.
++ * @mem_type: The memory type.
++ *
++ * Take down a manager for a given memory type after first walking
++ * the LRU list to evict any buffers left alive.
++ *
++ * Normally, this function is part of lastclose() or unload(), and at that
++ * point there shouldn't be any buffers left created by user-space, since
++ * there should've been removed by the file descriptor release() method.
++ * However, before this function is run, make sure to signal all sync objects,
++ * and verify that the delayed delete queue is empty. The driver must also
++ * make sure that there are no NO_EVICT buffers present in this memory type
++ * when the call is made.
++ *
++ * If this function is part of a VT switch, the caller must make sure that
++ * there are no appications currently validating buffers before this
++ * function is called. The caller can do that by first taking the
++ * struct ttm_bo_device::ttm_lock in write mode.
++ *
++ * Returns:
++ * -EINVAL: invalid or uninitialized memory type.
++ * -EBUSY: There are still buffers left in this memory type.
++ */
++
++extern int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type);
++
++/**
++ * ttm_bo_evict_mm
++ *
++ * @bdev: Pointer to a ttm_bo_device struct.
++ * @mem_type: The memory type.
++ *
++ * Evicts all buffers on the lru list of the memory type.
++ * This is normally part of a VT switch or an
++ * out-of-memory-space-due-to-fragmentation handler.
++ * The caller must make sure that there are no other processes
++ * currently validating buffers, and can do that by taking the
++ * struct ttm_bo_device::ttm_lock in write mode.
++ *
++ * Returns:
++ * -EINVAL: Invalid or uninitialized memory type.
++ * -ERESTART: The call was interrupted by a signal while waiting to
++ * evict a buffer.
++ */
++
++extern int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type);
++
++/**
++ * ttm_kmap_obj_virtual
++ *
++ * @map: A struct ttm_bo_kmap_obj returned from ttm_bo_kmap.
++ * @is_iomem: Pointer to an integer that on return indicates 1 if the
++ * virtual map is io memory, 0 if normal memory.
++ *
++ * Returns the virtual address of a buffer object area mapped by ttm_bo_kmap.
++ * If *is_iomem is 1 on return, the virtual address points to an io memory area,
++ * that should strictly be accessed by the iowriteXX() and similar functions.
++ */
++
++static inline void *ttm_kmap_obj_virtual(struct ttm_bo_kmap_obj *map,
++ bool *is_iomem)
++{
++ *is_iomem = (map->bo_kmap_type == ttm_bo_map_iomap ||
++ map->bo_kmap_type == ttm_bo_map_premapped);
++ return map->virtual;
++}
++
++/**
++ * ttm_bo_kmap
++ *
++ * @bo: The buffer object.
++ * @start_page: The first page to map.
++ * @num_pages: Number of pages to map.
++ * @map: pointer to a struct ttm_bo_kmap_obj representing the map.
++ *
++ * Sets up a kernel virtual mapping, using ioremap, vmap or kmap to the
++ * data in the buffer object. The ttm_kmap_obj_virtual function can then be
++ * used to obtain a virtual address to the data.
++ *
++ * Returns
++ * -ENOMEM: Out of memory.
++ * -EINVAL: Invalid range.
++ */
++
++extern int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page,
++ unsigned long num_pages, struct ttm_bo_kmap_obj *map);
++
++/**
++ * ttm_bo_kunmap
++ *
++ * @map: Object describing the map to unmap.
++ *
++ * Unmaps a kernel map set up by ttm_bo_kmap.
++ */
++
++extern void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map);
++
++#if 0
++#endif
++
++/**
++ * ttm_fbdev_mmap - mmap fbdev memory backed by a ttm buffer object.
++ *
++ * @vma: vma as input from the fbdev mmap method.
++ * @bo: The bo backing the address space. The address space will
++ * have the same size as the bo, and start at offset 0.
++ *
++ * This function is intended to be called by the fbdev mmap method
++ * if the fbdev address space is to be backed by a bo.
++ */
++
++extern int ttm_fbdev_mmap(struct vm_area_struct *vma,
++ struct ttm_buffer_object *bo);
++
++/**
++ * ttm_bo_mmap - mmap out of the ttm device address space.
++ *
++ * @filp: filp as input from the mmap method.
++ * @vma: vma as input from the mmap method.
++ * @bdev: Pointer to the ttm_bo_device with the address space manager.
++ *
++ * This function is intended to be called by the device mmap method.
++ * if the device address space is to be backed by the bo manager.
++ */
++
++extern int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
++ struct ttm_bo_device *bdev);
++
++/**
++ * ttm_bo_io
++ *
++ * @bdev: Pointer to the struct ttm_bo_device.
++ * @filp: Pointer to the struct file attempting to read / write.
++ * @wbuf: User-space pointer to address of buffer to write. NULL on read.
++ * @rbuf: User-space pointer to address of buffer to read into.
++ * Null on write.
++ * @count: Number of bytes to read / write.
++ * @f_pos: Pointer to current file position.
++ * @write: 1 for read, 0 for write.
++ *
++ * This function implements read / write into ttm buffer objects, and is
++ * intended to be called from the fops::read and fops::write method.
++ * Returns:
++ * See man (2) write, man(2) read. In particular, the function may
++ * return -EINTR if interrupted by a signal.
++ */
++
++extern ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
++ const char __user *wbuf, char __user *rbuf,
++ size_t count, loff_t *f_pos, bool write);
++
++extern void ttm_bo_swapout_all(struct ttm_bo_device *bdev);
++
++#endif
+diff --git a/drivers/gpu/drm/mrst/drv/ttm/ttm_bo_driver.h b/drivers/gpu/drm/mrst/drv/ttm/ttm_bo_driver.h
+new file mode 100644
+index 0000000..4991256
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/ttm/ttm_bo_driver.h
+@@ -0,0 +1,862 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 Vmware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ */
++#ifndef _TTM_BO_DRIVER_H_
++#define _TTM_BO_DRIVER_H_
++
++#include "ttm_bo_api.h"
++#include "ttm_memory.h"
++#include <drm/drm_mm.h>
++#include "linux/workqueue.h"
++#include "linux/fs.h"
++#include "linux/spinlock.h"
++
++struct ttm_backend;
++
++struct ttm_backend_func {
++ /**
++ * struct ttm_backend_func member populate
++ *
++ * @backend: Pointer to a struct ttm_backend.
++ * @num_pages: Number of pages to populate.
++ * @pages: Array of pointers to ttm pages.
++ * @dummy_read_page: Page to be used instead of NULL pages in the
++ * array @pages.
++ *
++ * Populate the backend with ttm pages. Depending on the backend,
++ * it may or may not copy the @pages array.
++ */
++ int (*populate) (struct ttm_backend *backend,
++ unsigned long num_pages, struct page **pages,
++ struct page *dummy_read_page);
++ /**
++ * struct ttm_backend_func member clear
++ *
++ * @backend: Pointer to a struct ttm_backend.
++ *
++ * This is an "unpopulate" function. Release all resources
++ * allocated with populate.
++ */
++ void (*clear) (struct ttm_backend *backend);
++
++ /**
++ * struct ttm_backend_func member bind
++ *
++ * @backend: Pointer to a struct ttm_backend.
++ * @bo_mem: Pointer to a struct ttm_mem_reg describing the
++ * memory type and location for binding.
++ *
++ * Bind the backend pages into the aperture in the location
++ * indicated by @bo_mem. This function should be able to handle
++ * differences between aperture- and system page sizes.
++ */
++ int (*bind) (struct ttm_backend *backend, struct ttm_mem_reg *bo_mem);
++
++ /**
++ * struct ttm_backend_func member unbind
++ *
++ * @backend: Pointer to a struct ttm_backend.
++ *
++ * Unbind previously bound backend pages. This function should be
++ * able to handle differences between aperture- and system page sizes.
++ */
++ int (*unbind) (struct ttm_backend *backend);
++
++ /**
++ * struct ttm_backend_func member destroy
++ *
++ * @backend: Pointer to a struct ttm_backend.
++ *
++ * Destroy the backend.
++ */
++ void (*destroy) (struct ttm_backend *backend);
++};
++
++/**
++ * struct ttm_backend
++ *
++ * @bdev: Pointer to a struct ttm_bo_device.
++ * @flags: For driver use.
++ * @func: Pointer to a struct ttm_backend_func that describes
++ * the backend methods.
++ *
++ */
++
++struct ttm_backend {
++ struct ttm_bo_device *bdev;
++ uint32_t flags;
++ struct ttm_backend_func *func;
++};
++
++#define TTM_PAGE_FLAG_VMALLOC (1 << 0)
++#define TTM_PAGE_FLAG_USER (1 << 1)
++#define TTM_PAGE_FLAG_USER_DIRTY (1 << 2)
++#define TTM_PAGE_FLAG_WRITE (1 << 3)
++#define TTM_PAGE_FLAG_SWAPPED (1 << 4)
++#define TTM_PAGE_FLAG_PERSISTANT_SWAP (1 << 5)
++
++enum ttm_caching_state {
++ tt_uncached,
++ tt_wc,
++ tt_cached
++};
++
++/**
++ * struct ttm_tt
++ *
++ * @dummy_read_page: Page to map where the ttm_tt page array contains a NULL
++ * pointer.
++ * @pages: Array of pages backing the data.
++ * @first_himem_page: Himem pages are put last in the page array, which
++ * enables us to run caching attribute changes on only the first part
++ * of the page array containing lomem pages. This is the index of the
++ * first himem page.
++ * @last_lomem_page: Index of the last lomem page in the page array.
++ * @num_pages: Number of pages in the page array.
++ * @bdev: Pointer to the current struct ttm_bo_device.
++ * @be: Pointer to the ttm backend.
++ * @tsk: The task for user ttm.
++ * @start: virtual address for user ttm.
++ * @swap_storage: Pointer to shmem struct file for swap storage.
++ * @caching_state: The current caching state of the pages.
++ * @state: The current binding state of the pages.
++ *
++ * This is a structure holding the pages, caching- and aperture binding
++ * status for a buffer object that isn't backed by fixed (VRAM / AGP)
++ * memory.
++ */
++
++struct ttm_tt {
++ struct page *dummy_read_page;
++ struct page **pages;
++ long first_himem_page;
++ long last_lomem_page;
++ uint32_t page_flags;
++ unsigned long num_pages;
++ struct ttm_bo_device *bdev;
++ struct ttm_backend *be;
++ struct task_struct *tsk;
++ unsigned long start;
++ struct file *swap_storage;
++ enum ttm_caching_state caching_state;
++ enum {
++ tt_bound,
++ tt_unbound,
++ tt_unpopulated,
++ } state;
++};
++
++#define TTM_MEMTYPE_FLAG_FIXED (1 << 0) /* Fixed (on-card) PCI memory */
++#define TTM_MEMTYPE_FLAG_MAPPABLE (1 << 1) /* Memory mappable */
++#define TTM_MEMTYPE_FLAG_NEEDS_IOREMAP (1 << 2) /* Fixed memory needs ioremap
++ before kernel access. */
++#define TTM_MEMTYPE_FLAG_CMA (1 << 3) /* Can't map aperture */
++
++/**
++ * struct ttm_mem_type_manager
++ *
++ * @has_type: The memory type has been initialized.
++ * @use_type: The memory type is enabled.
++ * @flags: TTM_MEMTYPE_XX flags identifying the traits of the memory
++ * managed by this memory type.
++ * @gpu_offset: If used, the GPU offset of the first managed page of
++ * fixed memory or the first managed location in an aperture.
++ * @io_offset: The io_offset of the first managed page of IO memory or
++ * the first managed location in an aperture. For TTM_MEMTYPE_FLAG_CMA
++ * memory, this should be set to NULL.
++ * @io_size: The size of a managed IO region (fixed memory or aperture).
++ * @io_addr: Virtual kernel address if the io region is pre-mapped. For
++ * TTM_MEMTYPE_FLAG_NEEDS_IOREMAP there is no pre-mapped io map and
++ * @io_addr should be set to NULL.
++ * @size: Size of the managed region.
++ * @available_caching: A mask of available caching types, TTM_PL_FLAG_XX,
++ * as defined in ttm_placement_common.h
++ * @default_caching: The default caching policy used for a buffer object
++ * placed in this memory type if the user doesn't provide one.
++ * @manager: The range manager used for this memory type. FIXME: If the aperture
++ * has a page size different from the underlying system, the granularity
++ * of this manager should take care of this. But the range allocating code
++ * in ttm_bo.c needs to be modified for this.
++ * @lru: The lru list for this memory type.
++ *
++ * This structure is used to identify and manage memory types for a device.
++ * It's set up by the ttm_bo_driver::init_mem_type method.
++ */
++
++struct ttm_mem_type_manager {
++
++ /*
++ * No protection. Constant from start.
++ */
++
++ bool has_type;
++ bool use_type;
++ uint32_t flags;
++ unsigned long gpu_offset;
++ unsigned long io_offset;
++ unsigned long io_size;
++ void *io_addr;
++ uint64_t size;
++ uint32_t available_caching;
++ uint32_t default_caching;
++
++ /*
++ * Protected by the bdev->lru_lock.
++ * TODO: Consider one lru_lock per ttm_mem_type_manager.
++ * Plays ill with list removal, though.
++ */
++
++ struct drm_mm manager;
++ struct list_head lru;
++};
++
++/**
++ * struct ttm_bo_driver
++ *
++ * @mem_type_prio: Priority array of memory types to place a buffer object in
++ * if it fits without evicting buffers from any of these memory types.
++ * @mem_busy_prio: Priority array of memory types to place a buffer object in
++ * if it needs to evict buffers to make room.
++ * @num_mem_type_prio: Number of elements in the @mem_type_prio array.
++ * @num_mem_busy_prio: Number of elements in the @num_mem_busy_prio array.
++ * @create_ttm_backend_entry: Callback to create a struct ttm_backend.
++ * @invalidate_caches: Callback to invalidate read caches when a buffer object
++ * has been evicted.
++ * @init_mem_type: Callback to initialize a struct ttm_mem_type_manager
++ * structure.
++ * @evict_flags: Callback to obtain placement flags when a buffer is evicted.
++ * @move: Callback for a driver to hook in accelerated functions to move
++ * a buffer.
++ * If set to NULL, a potentially slow memcpy() move is used.
++ * @sync_obj_signaled: See ttm_fence_api.h
++ * @sync_obj_wait: See ttm_fence_api.h
++ * @sync_obj_flush: See ttm_fence_api.h
++ * @sync_obj_unref: See ttm_fence_api.h
++ * @sync_obj_ref: See ttm_fence_api.h
++ */
++
++struct ttm_bo_driver {
++ const uint32_t *mem_type_prio;
++ const uint32_t *mem_busy_prio;
++ uint32_t num_mem_type_prio;
++ uint32_t num_mem_busy_prio;
++
++ /**
++ * struct ttm_bo_driver member create_ttm_backend_entry
++ *
++ * @bdev: The buffer object device.
++ *
++ * Create a driver specific struct ttm_backend.
++ */
++
++ struct ttm_backend *(*create_ttm_backend_entry)
++ (struct ttm_bo_device *bdev);
++
++ /**
++ * struct ttm_bo_driver member invalidate_caches
++ *
++ * @bdev: the buffer object device.
++ * @flags: new placement of the rebound buffer object.
++ *
++ * A previosly evicted buffer has been rebound in a
++ * potentially new location. Tell the driver that it might
++ * consider invalidating read (texture) caches on the next command
++ * submission as a consequence.
++ */
++
++ int (*invalidate_caches) (struct ttm_bo_device *bdev, uint32_t flags);
++ int (*init_mem_type) (struct ttm_bo_device *bdev, uint32_t type,
++ struct ttm_mem_type_manager *man);
++ /**
++ * struct ttm_bo_driver member evict_flags:
++ *
++ * @bo: the buffer object to be evicted
++ *
++ * Return the bo flags for a buffer which is not mapped to the hardware.
++ * These will be placed in proposed_flags so that when the move is
++ * finished, they'll end up in bo->mem.flags
++ */
++
++ uint32_t(*evict_flags) (struct ttm_buffer_object *bo);
++ /**
++ * struct ttm_bo_driver member move:
++ *
++ * @bo: the buffer to move
++ * @evict: whether this motion is evicting the buffer from
++ * the graphics address space
++ * @interruptible: Use interruptible sleeps if possible when sleeping.
++ * @no_wait: whether this should give up and return -EBUSY
++ * if this move would require sleeping
++ * @new_mem: the new memory region receiving the buffer
++ *
++ * Move a buffer between two memory regions.
++ */
++ int (*move) (struct ttm_buffer_object *bo,
++ bool evict, bool interruptible,
++ bool no_wait, struct ttm_mem_reg *new_mem);
++
++ /**
++ * struct ttm_bo_driver_member verify_access
++ *
++ * @bo: Pointer to a buffer object.
++ * @filp: Pointer to a struct file trying to access the object.
++ *
++ * Called from the map / write / read methods to verify that the
++ * caller is permitted to access the buffer object.
++ * This member may be set to NULL, which will refuse this kind of
++ * access for all buffer objects.
++ * This function should return 0 if access is granted, -EPERM otherwise.
++ */
++ int (*verify_access) (struct ttm_buffer_object *bo,
++ struct file *filp);
++
++ /**
++ * In case a driver writer dislikes the TTM fence objects,
++ * the driver writer can replace those with sync objects of
++ * his / her own. If it turns out that no driver writer is
++ * using these. I suggest we remove these hooks and plug in
++ * fences directly. The bo driver needs the following functionality:
++ * See the corresponding functions in the fence object API
++ * documentation.
++ */
++
++ bool (*sync_obj_signaled) (void *sync_obj, void *sync_arg);
++ int (*sync_obj_wait) (void *sync_obj, void *sync_arg,
++ bool lazy, bool interruptible);
++ int (*sync_obj_flush) (void *sync_obj, void *sync_arg);
++ void (*sync_obj_unref) (void **sync_obj);
++ void *(*sync_obj_ref) (void *sync_obj);
++};
++
++#define TTM_NUM_MEM_TYPES 11
++
++#define TTM_BO_PRIV_FLAG_EVICTED (1 << 0) /* Buffer object is evicted. */
++#define TTM_BO_PRIV_FLAG_MOVING (1 << 1) /* Buffer object is moving
++ and needs idling before
++ CPU mapping */
++/**
++ * struct ttm_bo_device - Buffer object driver device-specific data.
++ *
++ * @mem_glob: Pointer to a struct ttm_mem_global object for accounting.
++ * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver.
++ * @count: Current number of buffer object.
++ * @pages: Current number of pinned pages.
++ * @dummy_read_page: Pointer to a dummy page used for mapping requests
++ * of unpopulated pages.
++ * @shrink: A shrink callback object used for buffre object swap.
++ * @ttm_bo_extra_size: Extra size (sizeof(struct ttm_buffer_object) excluded)
++ * used by a buffer object. This is excluding page arrays and backing pages.
++ * @ttm_bo_size: This is @ttm_bo_extra_size + sizeof(struct ttm_buffer_object).
++ * @man: An array of mem_type_managers.
++ * @addr_space_mm: Range manager for the device address space.
++ * lru_lock: Spinlock that protects the buffer+device lru lists and
++ * ddestroy lists.
++ * @nice_mode: Try nicely to wait for buffer idle when cleaning a manager.
++ * If a GPU lockup has been detected, this is forced to 0.
++ * @dev_mapping: A pointer to the struct address_space representing the
++ * device address space.
++ * @wq: Work queue structure for the delayed delete workqueue.
++ *
++ */
++
++struct ttm_bo_device {
++
++ /*
++ * Constant after bo device init / atomic.
++ */
++
++ struct ttm_mem_global *mem_glob;
++ struct ttm_bo_driver *driver;
++ struct page *dummy_read_page;
++ struct ttm_mem_shrink shrink;
++
++ size_t ttm_bo_extra_size;
++ size_t ttm_bo_size;
++
++ rwlock_t vm_lock;
++ /*
++ * Protected by the vm lock.
++ */
++ struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
++ struct rb_root addr_space_rb;
++ struct drm_mm addr_space_mm;
++
++ /*
++ * Might want to change this to one lock per manager.
++ */
++ spinlock_t lru_lock;
++ /*
++ * Protected by the lru lock.
++ */
++ struct list_head ddestroy;
++ struct list_head swap_lru;
++
++ /*
++ * Protected by load / firstopen / lastclose /unload sync.
++ */
++
++ bool nice_mode;
++ struct address_space *dev_mapping;
++
++ /*
++ * Internal protection.
++ */
++
++ struct delayed_work wq;
++};
++
++/**
++ * ttm_flag_masked
++ *
++ * @old: Pointer to the result and original value.
++ * @new: New value of bits.
++ * @mask: Mask of bits to change.
++ *
++ * Convenience function to change a number of bits identified by a mask.
++ */
++
++static inline uint32_t
++ttm_flag_masked(uint32_t *old, uint32_t new, uint32_t mask)
++{
++ *old ^= (*old ^ new) & mask;
++ return *old;
++}
++
++/**
++ * ttm_tt_create
++ *
++ * @bdev: pointer to a struct ttm_bo_device:
++ * @size: Size of the data needed backing.
++ * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
++ * @dummy_read_page: See struct ttm_bo_device.
++ *
++ * Create a struct ttm_tt to back data with system memory pages.
++ * No pages are actually allocated.
++ * Returns:
++ * NULL: Out of memory.
++ */
++extern struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev,
++ unsigned long size,
++ uint32_t page_flags,
++ struct page *dummy_read_page);
++
++/**
++ * ttm_tt_set_user:
++ *
++ * @ttm: The struct ttm_tt to populate.
++ * @tsk: A struct task_struct for which @start is a valid user-space address.
++ * @start: A valid user-space address.
++ * @num_pages: Size in pages of the user memory area.
++ *
++ * Populate a struct ttm_tt with a user-space memory area after first pinning
++ * the pages backing it.
++ * Returns:
++ * !0: Error.
++ */
++
++extern int ttm_tt_set_user(struct ttm_tt *ttm,
++ struct task_struct *tsk,
++ unsigned long start, unsigned long num_pages);
++
++/**
++ * ttm_ttm_bind:
++ *
++ * @ttm: The struct ttm_tt containing backing pages.
++ * @bo_mem: The struct ttm_mem_reg identifying the binding location.
++ *
++ * Bind the pages of @ttm to an aperture location identified by @bo_mem
++ */
++extern int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
++
++/**
++ * ttm_ttm_destroy:
++ *
++ * @ttm: The struct ttm_tt.
++ *
++ * Unbind, unpopulate and destroy a struct ttm_tt.
++ */
++extern void ttm_tt_destroy(struct ttm_tt *ttm);
++
++/**
++ * ttm_ttm_unbind:
++ *
++ * @ttm: The struct ttm_tt.
++ *
++ * Unbind a struct ttm_tt.
++ */
++extern void ttm_tt_unbind(struct ttm_tt *ttm);
++
++/**
++ * ttm_ttm_destroy:
++ *
++ * @ttm: The struct ttm_tt.
++ * @index: Index of the desired page.
++ *
++ * Return a pointer to the struct page backing @ttm at page
++ * index @index. If the page is unpopulated, one will be allocated to
++ * populate that index.
++ *
++ * Returns:
++ * NULL on OOM.
++ */
++extern struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index);
++
++/**
++ * ttm_tt_cache_flush:
++ *
++ * @pages: An array of pointers to struct page:s to flush.
++ * @num_pages: Number of pages to flush.
++ *
++ * Flush the data of the indicated pages from the cpu caches.
++ * This is used when changing caching attributes of the pages from
++ * cache-coherent.
++ */
++extern void ttm_tt_cache_flush(struct page *pages[], unsigned long num_pages);
++
++/**
++ * ttm_tt_set_placement_caching:
++ *
++ * @ttm A struct ttm_tt the backing pages of which will change caching policy.
++ * @placement: Flag indicating the desired caching policy.
++ *
++ * This function will change caching policy of any default kernel mappings of
++ * the pages backing @ttm. If changing from cached to uncached or
++ * write-combined, all CPU caches will first be flushed to make sure the
++ * data of the pages hit RAM. This function may be very costly as it involves
++ * global TLB and cache flushes and potential page splitting / combining.
++ */
++extern int ttm_tt_set_placement_caching(struct ttm_tt *ttm,
++ uint32_t placement);
++extern int ttm_tt_swapout(struct ttm_tt *ttm,
++ struct file *persistant_swap_storage);
++
++/*
++ * ttm_bo.c
++ */
++
++/**
++ * ttm_mem_reg_is_pci
++ *
++ * @bdev: Pointer to a struct ttm_bo_device.
++ * @mem: A valid struct ttm_mem_reg.
++ *
++ * Returns true if the memory described by @mem is PCI memory,
++ * false otherwise.
++ */
++extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev,
++ struct ttm_mem_reg *mem);
++
++/**
++ * ttm_bo_mem_space
++ *
++ * @bo: Pointer to a struct ttm_buffer_object. the data of which
++ * we want to allocate space for.
++ * @mem: A struct ttm_mem_reg with the struct ttm_mem_reg::proposed_flags set
++ * up.
++ * @interruptible: Sleep interruptible when sliping.
++ * @no_wait: Don't sleep waiting for space to become available.
++ *
++ * Allocate memory space for the buffer object pointed to by @bo, using
++ * the placement flags in @mem, potentially evicting other idle buffer objects.
++ * This function may sleep while waiting for space to become available.
++ * Returns:
++ * -EBUSY: No space available (only if no_wait == 1).
++ * -ENOMEM: Could not allocate memory for the buffer object, either due to
++ * fragmentation or concurrent allocators.
++ * -ERESTART: An interruptible sleep was interrupted by a signal.
++ */
++extern int ttm_bo_mem_space(struct ttm_buffer_object *bo,
++ struct ttm_mem_reg *mem,
++ bool interruptible, bool no_wait);
++/**
++ * ttm_bo_wait_for_cpu
++ *
++ * @bo: Pointer to a struct ttm_buffer_object.
++ * @no_wait: Don't sleep while waiting.
++ *
++ * Wait until a buffer object is no longer sync'ed for CPU access.
++ * Returns:
++ * -EBUSY: Buffer object was sync'ed for CPU access. (only if no_wait == 1).
++ * -ERESTART: An interruptible sleep was interrupted by a signal.
++ */
++
++extern int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait);
++
++/**
++ * ttm_bo_pci_offset - Get the PCI offset for the buffer object memory.
++ *
++ * @bo Pointer to a struct ttm_buffer_object.
++ * @bus_base On return the base of the PCI region
++ * @bus_offset On return the byte offset into the PCI region
++ * @bus_size On return the byte size of the buffer object or zero if
++ * the buffer object memory is not accessible through a PCI region.
++ *
++ * Returns:
++ * -EINVAL if the buffer object is currently not mappable.
++ * 0 otherwise.
++ */
++
++extern int ttm_bo_pci_offset(struct ttm_bo_device *bdev,
++ struct ttm_mem_reg *mem,
++ unsigned long *bus_base,
++ unsigned long *bus_offset,
++ unsigned long *bus_size);
++
++extern int ttm_bo_device_release(struct ttm_bo_device *bdev);
++
++/**
++ * ttm_bo_device_init
++ *
++ * @bdev: A pointer to a struct ttm_bo_device to initialize.
++ * @mem_global: A pointer to an initialized struct ttm_mem_global.
++ * @driver: A pointer to a struct ttm_bo_driver set up by the caller.
++ * @file_page_offset: Offset into the device address space that is available
++ * for buffer data. This ensures compatibility with other users of the
++ * address space.
++ *
++ * Initializes a struct ttm_bo_device:
++ * Returns:
++ * !0: Failure.
++ */
++extern int ttm_bo_device_init(struct ttm_bo_device *bdev,
++ struct ttm_mem_global *mem_glob,
++ struct ttm_bo_driver *driver,
++ uint64_t file_page_offset);
++
++/**
++ * ttm_bo_reserve:
++ *
++ * @bo: A pointer to a struct ttm_buffer_object.
++ * @interruptible: Sleep interruptible if waiting.
++ * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
++ * @use_sequence: If @bo is already reserved, Only sleep waiting for
++ * it to become unreserved if @sequence < (@bo)->sequence.
++ *
++ * Locks a buffer object for validation. (Or prevents other processes from
++ * locking it for validation) and removes it from lru lists, while taking
++ * a number of measures to prevent deadlocks.
++ *
++ * Deadlocks may occur when two processes try to reserve multiple buffers in
++ * different order, either by will or as a result of a buffer being evicted
++ * to make room for a buffer already reserved. (Buffers are reserved before
++ * they are evicted). The following algorithm prevents such deadlocks from
++ * occuring:
++ * 1) Buffers are reserved with the lru spinlock held. Upon successful
++ * reservation they are removed from the lru list. This stops a reserved buffer
++ * from being evicted. However the lru spinlock is released between the time
++ * a buffer is selected for eviction and the time it is reserved.
++ * Therefore a check is made when a buffer is reserved for eviction, that it
++ * is still the first buffer in the lru list, before it is removed from the
++ * list. @check_lru == 1 forces this check. If it fails, the function returns
++ * -EINVAL, and the caller should then choose a new buffer to evict and repeat
++ * the procedure.
++ * 2) Processes attempting to reserve multiple buffers other than for eviction,
++ * (typically execbuf), should first obtain a unique 32-bit
++ * validation sequence number,
++ * and call this function with @use_sequence == 1 and @sequence == the unique
++ * sequence number. If upon call of this function, the buffer object is already
++ * reserved, the validation sequence is checked against the validation
++ * sequence of the process currently reserving the buffer,
++ * and if the current validation sequence is greater than that of the process
++ * holding the reservation, the function returns -EAGAIN. Otherwise it sleeps
++ * waiting for the buffer to become unreserved, after which it retries
++ * reserving. The caller should, when receiving an -EAGAIN error
++ * release all its buffer reservations, wait for @bo to become unreserved, and
++ * then rerun the validation with the same validation sequence. This procedure
++ * will always guarantee that the process with the lowest validation sequence
++ * will eventually succeed, preventing both deadlocks and starvation.
++ *
++ * Returns:
++ * -EAGAIN: The reservation may cause a deadlock. Release all buffer
++ * reservations, wait for @bo to become unreserved and try again.
++ * (only if use_sequence == 1).
++ * -ERESTART: A wait for the buffer to become unreserved was interrupted by
++ * a signal. Release all buffer reservations and return to user-space.
++ */
++extern int ttm_bo_reserve(struct ttm_buffer_object *bo,
++ bool interruptible,
++ bool no_wait, bool use_sequence, uint32_t sequence);
++
++/**
++ * ttm_bo_unreserve
++ *
++ * @bo: A pointer to a struct ttm_buffer_object.
++ *
++ * Unreserve a previous reservation of @bo.
++ */
++extern void ttm_bo_unreserve(struct ttm_buffer_object *bo);
++
++/**
++ * ttm_bo_wait_unreserved
++ *
++ * @bo: A pointer to a struct ttm_buffer_object.
++ *
++ * Wait for a struct ttm_buffer_object to become unreserved.
++ * This is typically used in the execbuf code to relax cpu-usage when
++ * a potential deadlock condition backoff.
++ */
++extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo,
++ bool interruptible);
++
++/**
++ * ttm_bo_block_reservation
++ *
++ * @bo: A pointer to a struct ttm_buffer_object.
++ * @interruptible: Use interruptible sleep when waiting.
++ * @no_wait: Don't sleep, but rather return -EBUSY.
++ *
++ * Block reservation for validation by simply reserving the buffer.
++ * This is intended for single buffer use only without eviction,
++ * and thus needs no deadlock protection.
++ *
++ * Returns:
++ * -EBUSY: If no_wait == 1 and the buffer is already reserved.
++ * -ERESTART: If interruptible == 1 and the process received a
++ * signal while sleeping.
++ */
++extern int ttm_bo_block_reservation(struct ttm_buffer_object *bo,
++ bool interruptible, bool no_wait);
++
++/**
++ * ttm_bo_unblock_reservation
++ *
++ * @bo: A pointer to a struct ttm_buffer_object.
++ *
++ * Unblocks reservation leaving lru lists untouched.
++ */
++extern void ttm_bo_unblock_reservation(struct ttm_buffer_object *bo);
++
++/*
++ * ttm_bo_util.c
++ */
++
++/**
++ * ttm_bo_move_ttm
++ *
++ * @bo: A pointer to a struct ttm_buffer_object.
++ * @evict: 1: This is an eviction. Don't try to pipeline.
++ * @no_wait: Never sleep, but rather return with -EBUSY.
++ * @new_mem: struct ttm_mem_reg indicating where to move.
++ *
++ * Optimized move function for a buffer object with both old and
++ * new placement backed by a TTM. The function will, if successful,
++ * free any old aperture space, and set (@new_mem)->mm_node to NULL,
++ * and update the (@bo)->mem placement flags. If unsuccessful, the old
++ * data remains untouched, and it's up to the caller to free the
++ * memory space indicated by @new_mem.
++ * Returns:
++ * !0: Failure.
++ */
++
++extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
++ bool evict,
++ bool no_wait,
++ struct ttm_mem_reg *new_mem);
++
++/**
++ * ttm_bo_move_memcpy
++ *
++ * @bo: A pointer to a struct ttm_buffer_object.
++ * @evict: 1: This is an eviction. Don't try to pipeline.
++ * @no_wait: Never sleep, but rather return with -EBUSY.
++ * @new_mem: struct ttm_mem_reg indicating where to move.
++ *
++ * Fallback move function for a mappable buffer object in mappable memory.
++ * The function will, if successful,
++ * free any old aperture space, and set (@new_mem)->mm_node to NULL,
++ * and update the (@bo)->mem placement flags. If unsuccessful, the old
++ * data remains untouched, and it's up to the caller to free the
++ * memory space indicated by @new_mem.
++ * Returns:
++ * !0: Failure.
++ */
++
++extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
++ bool evict,
++ bool no_wait,
++ struct ttm_mem_reg *new_mem);
++
++/**
++ * ttm_bo_free_old_node
++ *
++ * @bo: A pointer to a struct ttm_buffer_object.
++ *
++ * Utility function to free an old placement after a successful move.
++ */
++extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
++
++/**
++ * ttm_bo_move_accel_cleanup.
++ *
++ * @bo: A pointer to a struct ttm_buffer_object.
++ * @sync_obj: A sync object that signals when moving is complete.
++ * @sync_obj_arg: An argument to pass to the sync object idle / wait
++ * functions.
++ * @evict: This is an evict move. Don't return until the buffer is idle.
++ * @no_wait: Never sleep, but rather return with -EBUSY.
++ * @new_mem: struct ttm_mem_reg indicating where to move.
++ *
++ * Accelerated move function to be called when an accelerated move
++ * has been scheduled. The function will create a new temporary buffer object
++ * representing the old placement, and put the sync object on both buffer
++ * objects. After that the newly created buffer object is unref'd to be
++ * destroyed when the move is complete. This will help pipeline
++ * buffer moves.
++ */
++
++extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
++ void *sync_obj,
++ void *sync_obj_arg,
++ bool evict, bool no_wait,
++ struct ttm_mem_reg *new_mem);
++/**
++ * ttm_io_prot
++ *
++ * @c_state: Caching state.
++ * @tmp: Page protection flag for a normal, cached mapping.
++ *
++ * Utility function that returns the pgprot_t that should be used for
++ * setting up a PTE with the caching model indicated by @c_state.
++ */
++extern pgprot_t ttm_io_prot(enum ttm_caching_state c_state, pgprot_t tmp);
++
++#if (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE)))
++#define TTM_HAS_AGP
++#include <linux/agp_backend.h>
++
++/**
++ * ttm_agp_backend_init
++ *
++ * @bdev: Pointer to a struct ttm_bo_device.
++ * @bridge: The agp bridge this device is sitting on.
++ *
++ * Create a TTM backend that uses the indicated AGP bridge as an aperture
++ * for TT memory. This function uses the linux agpgart interface to
++ * bind and unbind memory backing a ttm_tt.
++ */
++extern struct ttm_backend *ttm_agp_backend_init(struct ttm_bo_device *bdev,
++ struct agp_bridge_data *bridge);
++#endif
++
++#endif
+diff --git a/drivers/gpu/drm/mrst/drv/ttm/ttm_bo_util.c b/drivers/gpu/drm/mrst/drv/ttm/ttm_bo_util.c
+new file mode 100644
+index 0000000..ce8eaed
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/ttm/ttm_bo_util.c
+@@ -0,0 +1,546 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
++ */
++
++#include "ttm_bo_driver.h"
++#include "ttm_placement_common.h"
++#include "ttm_pat_compat.h"
++#include <linux/io.h>
++#include <linux/highmem.h>
++#include <linux/wait.h>
++#include <linux/version.h>
++
++void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
++{
++ struct ttm_mem_reg *old_mem = &bo->mem;
++
++ if (old_mem->mm_node) {
++ spin_lock(&bo->bdev->lru_lock);
++ drm_mm_put_block(old_mem->mm_node);
++ spin_unlock(&bo->bdev->lru_lock);
++ }
++ old_mem->mm_node = NULL;
++}
++
++int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
++ bool evict, bool no_wait, struct ttm_mem_reg *new_mem)
++{
++ struct ttm_tt *ttm = bo->ttm;
++ struct ttm_mem_reg *old_mem = &bo->mem;
++ uint32_t save_flags = old_mem->flags;
++ uint32_t save_proposed_flags = old_mem->proposed_flags;
++ int ret;
++
++ if (old_mem->mem_type != TTM_PL_SYSTEM) {
++ ttm_tt_unbind(ttm);
++ ttm_bo_free_old_node(bo);
++ ttm_flag_masked(&old_mem->flags, TTM_PL_FLAG_SYSTEM,
++ TTM_PL_MASK_MEM);
++ old_mem->mem_type = TTM_PL_SYSTEM;
++ save_flags = old_mem->flags;
++ }
++
++ ret = ttm_tt_set_placement_caching(ttm, new_mem->flags);
++ if (unlikely(ret != 0))
++ return ret;
++
++ if (new_mem->mem_type != TTM_PL_SYSTEM) {
++ ret = ttm_tt_bind(ttm, new_mem);
++ if (unlikely(ret != 0))
++ return ret;
++ }
++
++ *old_mem = *new_mem;
++ new_mem->mm_node = NULL;
++ old_mem->proposed_flags = save_proposed_flags;
++ ttm_flag_masked(&save_flags, new_mem->flags, TTM_PL_MASK_MEMTYPE);
++ return 0;
++}
++
++int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
++ void **virtual)
++{
++ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
++ unsigned long bus_offset;
++ unsigned long bus_size;
++ unsigned long bus_base;
++ int ret;
++ void *addr;
++
++ *virtual = NULL;
++ ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset, &bus_size);
++ if (ret || bus_size == 0)
++ return ret;
++
++ if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP))
++ addr = (void *)(((u8 *) man->io_addr) + bus_offset);
++ else {
++ if (mem->flags & TTM_PL_FLAG_WC)
++ addr = ioremap_wc(bus_base + bus_offset, bus_size);
++ else
++ addr = ioremap_nocache(bus_base + bus_offset, bus_size);
++ if (!addr)
++ return -ENOMEM;
++ }
++ *virtual = addr;
++ return 0;
++}
++
++void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
++ void *virtual)
++{
++ struct ttm_mem_type_manager *man;
++
++ man = &bdev->man[mem->mem_type];
++
++ if (virtual && (man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP))
++ iounmap(virtual);
++}
++
++static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
++{
++ uint32_t *dstP =
++ (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
++ uint32_t *srcP =
++ (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
++
++ int i;
++ for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
++ iowrite32(ioread32(srcP++), dstP++);
++ return 0;
++}
++
++static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
++ unsigned long page)
++{
++ struct page *d = ttm_tt_get_page(ttm, page);
++ void *dst;
++
++ if (!d)
++ return -ENOMEM;
++
++ src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
++ dst = kmap(d);
++ if (!dst)
++ return -ENOMEM;
++
++ memcpy_fromio(dst, src, PAGE_SIZE);
++ kunmap(d);
++ return 0;
++}
++
++static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
++ unsigned long page)
++{
++ struct page *s = ttm_tt_get_page(ttm, page);
++ void *src;
++
++ if (!s)
++ return -ENOMEM;
++
++ dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
++ src = kmap(s);
++ if (!src)
++ return -ENOMEM;
++
++ memcpy_toio(dst, src, PAGE_SIZE);
++ kunmap(s);
++ return 0;
++}
++
++int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
++ bool evict, bool no_wait, struct ttm_mem_reg *new_mem)
++{
++ struct ttm_bo_device *bdev = bo->bdev;
++ struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
++ struct ttm_tt *ttm = bo->ttm;
++ struct ttm_mem_reg *old_mem = &bo->mem;
++ struct ttm_mem_reg old_copy = *old_mem;
++ void *old_iomap;
++ void *new_iomap;
++ int ret;
++ uint32_t save_flags = old_mem->flags;
++ uint32_t save_proposed_flags = old_mem->proposed_flags;
++ unsigned long i;
++ unsigned long page;
++ unsigned long add = 0;
++ int dir;
++
++ ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
++ if (ret)
++ return ret;
++ ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
++ if (ret)
++ goto out;
++
++ if (old_iomap == NULL && new_iomap == NULL)
++ goto out2;
++ if (old_iomap == NULL && ttm == NULL)
++ goto out2;
++
++ add = 0;
++ dir = 1;
++
++ if ((old_mem->mem_type == new_mem->mem_type) &&
++ (new_mem->mm_node->start <
++ old_mem->mm_node->start + old_mem->mm_node->size)) {
++ dir = -1;
++ add = new_mem->num_pages - 1;
++ }
++
++ for (i = 0; i < new_mem->num_pages; ++i) {
++ page = i * dir + add;
++ if (old_iomap == NULL)
++ ret = ttm_copy_ttm_io_page(ttm, new_iomap, page);
++ else if (new_iomap == NULL)
++ ret = ttm_copy_io_ttm_page(ttm, old_iomap, page);
++ else
++ ret = ttm_copy_io_page(new_iomap, old_iomap, page);
++ if (ret)
++ goto out1;
++ }
++ mb();
++out2:
++ ttm_bo_free_old_node(bo);
++
++ *old_mem = *new_mem;
++ new_mem->mm_node = NULL;
++ old_mem->proposed_flags = save_proposed_flags;
++ ttm_flag_masked(&save_flags, new_mem->flags, TTM_PL_MASK_MEMTYPE);
++
++ if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
++ ttm_tt_unbind(ttm);
++ ttm_tt_destroy(ttm);
++ bo->ttm = NULL;
++ }
++
++out1:
++ ttm_mem_reg_iounmap(bdev, new_mem, new_iomap);
++out:
++ ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
++ return ret;
++}
++
++/**
++ * ttm_buffer_object_transfer
++ *
++ * @bo: A pointer to a struct ttm_buffer_object.
++ * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
++ * holding the data of @bo with the old placement.
++ *
++ * This is a utility function that may be called after an accelerated move
++ * has been scheduled. A new buffer object is created as a placeholder for
++ * the old data while it's being copied. When that buffer object is idle,
++ * it can be destroyed, releasing the space of the old placement.
++ * Returns:
++ * !0: Failure.
++ */
++
++static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
++ struct ttm_buffer_object **new_obj)
++{
++ struct ttm_buffer_object *fbo;
++ struct ttm_bo_device *bdev = bo->bdev;
++ struct ttm_bo_driver *driver = bdev->driver;
++
++ fbo = kzalloc(sizeof(*fbo), GFP_KERNEL);
++ if (!fbo)
++ return -ENOMEM;
++
++ *fbo = *bo;
++ mutex_init(&fbo->mutex);
++ mutex_lock(&fbo->mutex);
++
++ init_waitqueue_head(&fbo->event_queue);
++ INIT_LIST_HEAD(&fbo->ddestroy);
++ INIT_LIST_HEAD(&fbo->lru);
++
++ fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
++ if (fbo->mem.mm_node)
++ fbo->mem.mm_node->private = (void *)fbo;
++ kref_init(&fbo->list_kref);
++ kref_init(&fbo->kref);
++
++ mutex_unlock(&fbo->mutex);
++
++ *new_obj = fbo;
++ return 0;
++}
++
++pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
++{
++#if defined(__i386__) || defined(__x86_64__)
++ if (caching_flags & TTM_PL_FLAG_WC) {
++ tmp = pgprot_ttm_x86_wc(tmp);
++ } else if (boot_cpu_data.x86 > 3 &&
++ (caching_flags & TTM_PL_FLAG_UNCACHED)) {
++ tmp = pgprot_noncached(tmp);
++ }
++#elif defined(__powerpc__)
++ if (!(caching_flags & TTM_PL_FLAG_CACHED)) {
++ pgprot_val(tmp) |= _PAGE_NO_CACHE;
++ if (caching_flags & TTM_PL_FLAG_UNCACHED)
++ pgprot_val(tmp) |= _PAGE_GUARDED;
++ }
++#endif
++#if defined(__ia64__)
++ if (caching_flags & TTM_PL_FLAG_WC)
++ tmp = pgprot_writecombine(tmp);
++ else
++ tmp = pgprot_noncached(tmp);
++#endif
++#if defined(__sparc__)
++ if (!(caching_flags & TTM_PL_FLAG_CACHED))
++ tmp = pgprot_noncached(tmp);
++#endif
++ return tmp;
++}
++
++static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
++ unsigned long bus_base,
++ unsigned long bus_offset,
++ unsigned long bus_size,
++ struct ttm_bo_kmap_obj *map)
++{
++ struct ttm_bo_device *bdev = bo->bdev;
++ struct ttm_mem_reg *mem = &bo->mem;
++ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
++
++ if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP)) {
++ map->bo_kmap_type = ttm_bo_map_premapped;
++ map->virtual = (void *)(((u8 *) man->io_addr) + bus_offset);
++ } else {
++ map->bo_kmap_type = ttm_bo_map_iomap;
++ if (mem->flags & TTM_PL_FLAG_WC)
++ map->virtual =
++ ioremap_wc(bus_base + bus_offset,
++ bus_size);
++ else
++ map->virtual =
++ ioremap_nocache(bus_base + bus_offset,
++ bus_size);
++ }
++ return (!map->virtual) ? -ENOMEM : 0;
++}
++
++static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
++ unsigned long start_page,
++ unsigned long num_pages,
++ struct ttm_bo_kmap_obj *map)
++{
++ struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
++ struct ttm_tt *ttm = bo->ttm;
++ struct page *d;
++ bool do_kmap = false;
++ int i;
++ BUG_ON(!ttm);
++ if (num_pages == 1) {
++ map->page = ttm_tt_get_page(ttm, start_page);
++ do_kmap = (!PageHighMem(map->page) ||
++ (mem->flags & TTM_PL_FLAG_CACHED));
++ }
++
++ if (do_kmap) {
++ /*
++ * We're mapping a single page, and the desired
++ * page protection is consistent with the bo.
++ */
++ map->bo_kmap_type = ttm_bo_map_kmap;
++ map->virtual = kmap(map->page);
++ } else {
++ /* Populate the part we're mapping; */
++ for (i = start_page; i < start_page + num_pages; ++i) {
++ d = ttm_tt_get_page(ttm, i);
++
++ if (!d)
++ return -ENOMEM;
++ }
++
++ /*
++ * We need to use vmap to get the desired page protection
++ * or to make the buffer object look contigous.
++ */
++ prot = (mem->flags & TTM_PL_FLAG_CACHED) ?
++ PAGE_KERNEL :
++ ttm_io_prot(mem->flags, PAGE_KERNEL);
++ map->bo_kmap_type = ttm_bo_map_vmap;
++ map->virtual = vmap(ttm->pages + start_page,
++ num_pages,
++ 0,
++ prot);
++ }
++ return (!map->virtual) ? -ENOMEM : 0;
++}
++
++int ttm_bo_kmap(struct ttm_buffer_object *bo,
++ unsigned long start_page, unsigned long num_pages,
++ struct ttm_bo_kmap_obj *map)
++{
++ int ret;
++ unsigned long bus_base;
++ unsigned long bus_offset;
++ unsigned long bus_size;
++ BUG_ON(!list_empty(&bo->swap));
++ map->virtual = NULL;
++
++ if (num_pages > bo->num_pages)
++ return -EINVAL;
++
++ if (start_page > bo->num_pages)
++ return -EINVAL;
++#if 0
++ if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
++ return -EPERM;
++#endif
++ ret = ttm_bo_pci_offset(bo->bdev,
++ &bo->mem,
++ &bus_base,
++ &bus_offset,
++ &bus_size);
++ if (ret)
++ return ret;
++
++ if (bus_size == 0) {
++ return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
++ } else {
++ bus_offset += start_page << PAGE_SHIFT;
++ bus_size = num_pages << PAGE_SHIFT;
++
++ return ttm_bo_ioremap(bo,
++ bus_base,
++ bus_offset,
++ bus_size, map);
++ }
++}
++
++void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
++{
++ if (!map->virtual)
++ return;
++ switch (map->bo_kmap_type) {
++ case ttm_bo_map_iomap:
++ iounmap(map->virtual);
++ break;
++ case ttm_bo_map_vmap:
++ vunmap(map->virtual);
++ break;
++ case ttm_bo_map_kmap:
++ kunmap(map->page);
++ break;
++ case ttm_bo_map_premapped:
++ break;
++ default:
++ BUG();
++ }
++ map->virtual = NULL;
++ map->page = NULL;
++}
++
++int ttm_bo_pfn_prot(struct ttm_buffer_object *bo,
++ unsigned long dst_offset,
++ unsigned long *pfn, pgprot_t *prot)
++{
++ struct ttm_mem_reg *mem = &bo->mem;
++ struct ttm_bo_device *bdev = bo->bdev;
++ unsigned long bus_offset;
++ unsigned long bus_size;
++ unsigned long bus_base;
++ int ret;
++ ret = ttm_bo_pci_offset(bdev,
++ mem,
++ &bus_base,
++ &bus_offset,
++ &bus_size);
++ if (ret)
++ return -EINVAL;
++ if (bus_size != 0)
++ *pfn = (bus_base + bus_offset + dst_offset) >> PAGE_SHIFT;
++ else
++ if (!bo->ttm)
++ return -EINVAL;
++ else
++ *pfn = page_to_pfn(ttm_tt_get_page(
++ bo->ttm,
++ dst_offset >> PAGE_SHIFT));
++
++ *prot = (mem->flags & TTM_PL_FLAG_CACHED) ?
++ PAGE_KERNEL :
++ ttm_io_prot(mem->flags, PAGE_KERNEL);
++ return 0;
++}
++
++int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
++ void *sync_obj,
++ void *sync_obj_arg,
++ bool evict, bool no_wait,
++ struct ttm_mem_reg *new_mem)
++{
++ struct ttm_bo_device *bdev = bo->bdev;
++ struct ttm_bo_driver *driver = bdev->driver;
++ struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
++ struct ttm_mem_reg *old_mem = &bo->mem;
++ int ret;
++ uint32_t save_flags = old_mem->flags;
++ uint32_t save_proposed_flags = old_mem->proposed_flags;
++ struct ttm_buffer_object *old_obj;
++ if (bo->sync_obj)
++ driver->sync_obj_unref(&bo->sync_obj);
++ bo->sync_obj = driver->sync_obj_ref(sync_obj);
++ bo->sync_obj_arg = sync_obj_arg;
++ if (evict) {
++ ret = ttm_bo_wait(bo, false, false, false);
++ if (ret)
++ return ret;
++ ttm_bo_free_old_node(bo);
++ if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
++ (bo->ttm != NULL)) {
++ ttm_tt_unbind(bo->ttm);
++ ttm_tt_destroy(bo->ttm);
++ bo->ttm = NULL;
++ }
++ } else {
++
++ /* This should help pipeline ordinary buffer moves.
++ *
++ * Hang old buffer memory on a new buffer object,
++ * and leave it to be released when the GPU
++ * operation has completed.
++ */
++ ret = ttm_buffer_object_transfer(bo, &old_obj);
++ if (ret)
++ return ret;
++ if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
++ old_obj->ttm = NULL;
++ else
++ bo->ttm = NULL;
++ bo->priv_flags |= TTM_BO_PRIV_FLAG_MOVING;
++ ttm_bo_unreserve(old_obj);
++ }
++
++ *old_mem = *new_mem;
++ new_mem->mm_node = NULL;
++ old_mem->proposed_flags = save_proposed_flags;
++ ttm_flag_masked(&save_flags, new_mem->flags, TTM_PL_MASK_MEMTYPE);
++ return 0;
++}
+diff --git a/drivers/gpu/drm/mrst/drv/ttm/ttm_bo_vm.c b/drivers/gpu/drm/mrst/drv/ttm/ttm_bo_vm.c
+new file mode 100644
+index 0000000..a8aae7e
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/ttm/ttm_bo_vm.c
+@@ -0,0 +1,429 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 Vmware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ */
++
++
++#include "ttm_bo_driver.h"
++#include "ttm_placement_common.h"
++#include <linux/mm.h>
++#include <linux/version.h>
++#include <linux/rbtree.h>
++#include <linux/uaccess.h>
++
++#define TTM_BO_VM_NUM_PREFAULT 16
++
++static struct ttm_buffer_object *ttm_bo_vm_lookup_rb(struct ttm_bo_device *bdev,
++ unsigned long page_start,
++ unsigned long num_pages)
++{
++ struct rb_node *cur = bdev->addr_space_rb.rb_node;
++ unsigned long cur_offset;
++ struct ttm_buffer_object *bo;
++ struct ttm_buffer_object *best_bo = NULL;
++
++ while (likely(cur != NULL)) {
++ bo = rb_entry(cur, struct ttm_buffer_object, vm_rb);
++ cur_offset = bo->vm_node->start;
++ if (page_start >= cur_offset) {
++ cur = cur->rb_right;
++ best_bo = bo;
++ if (page_start == cur_offset)
++ break;
++ } else
++ cur = cur->rb_left;
++ }
++
++ if (unlikely(best_bo == NULL))
++ return NULL;
++
++ if (unlikely((best_bo->vm_node->start + best_bo->num_pages) <
++ (page_start + num_pages)))
++ return NULL;
++
++ return best_bo;
++}
++
++static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
++{
++ struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
++ vma->vm_private_data;
++ struct ttm_bo_device *bdev = bo->bdev;
++ unsigned long bus_base;
++ unsigned long bus_offset;
++ unsigned long bus_size;
++ unsigned long page_offset;
++ unsigned long page_last;
++ unsigned long pfn;
++ struct ttm_tt *ttm = NULL;
++ struct page *page;
++ int ret;
++ int i;
++ bool is_iomem;
++ unsigned long address = (unsigned long)vmf->virtual_address;
++ int retval = VM_FAULT_NOPAGE;
++
++ ret = ttm_bo_reserve(bo, true, false, false, 0);
++ if (unlikely(ret != 0))
++ return VM_FAULT_NOPAGE;
++
++ mutex_lock(&bo->mutex);
++
++ /*
++ * Wait for buffer data in transit, due to a pipelined
++ * move.
++ */
++
++ if (bo->priv_flags & TTM_BO_PRIV_FLAG_MOVING) {
++ ret = ttm_bo_wait(bo, false, true, false);
++ if (unlikely(ret != 0)) {
++ retval = (ret != -ERESTART) ?
++ VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
++ goto out_unlock;
++ }
++ }
++
++ ret = ttm_bo_pci_offset(bdev, &bo->mem, &bus_base, &bus_offset,
++ &bus_size);
++ if (unlikely(ret != 0)) {
++ retval = VM_FAULT_SIGBUS;
++ goto out_unlock;
++ }
++
++ is_iomem = (bus_size != 0);
++
++ page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
++ bo->vm_node->start - vma->vm_pgoff;
++ page_last = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) +
++ bo->vm_node->start - vma->vm_pgoff;
++
++ if (unlikely(page_offset >= bo->num_pages)) {
++ retval = VM_FAULT_SIGBUS;
++ goto out_unlock;
++ }
++
++ /*
++ * Strictly, we're not allowed to modify vma->vm_page_prot here,
++ * since the mmap_sem is only held in read mode. However, we
++ * modify only the caching bits of vma->vm_page_prot and
++ * consider those bits protected by
++ * the bo->mutex, as we should be the only writers.
++ * There shouldn't really be any readers of these bits except
++ * within vm_insert_mixed()? fork?
++ *
++ * TODO: Add a list of vmas to the bo, and change the
++ * vma->vm_page_prot when the object changes caching policy, with
++ * the correct locks held.
++ */
++
++ if (is_iomem) {
++ vma->vm_page_prot = ttm_io_prot(bo->mem.flags,
++ vma->vm_page_prot);
++ } else {
++ ttm = bo->ttm;
++ vma->vm_page_prot = (bo->mem.flags & TTM_PL_FLAG_CACHED) ?
++ vm_get_page_prot(vma->vm_flags) :
++ ttm_io_prot(bo->mem.flags, vma->vm_page_prot);
++ }
++
++ /*
++ * Speculatively prefault a number of pages. Only error on
++ * first page.
++ */
++
++ for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
++
++ if (is_iomem)
++ pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) +
++ page_offset;
++ else {
++ page = ttm_tt_get_page(ttm, page_offset);
++ if (unlikely(!page && i == 0)) {
++ retval = VM_FAULT_OOM;
++ goto out_unlock;
++ } else if (unlikely(!page)) {
++ break;
++ }
++ pfn = page_to_pfn(page);
++ }
++
++ ret = vm_insert_mixed(vma, address, pfn);
++ /*
++ * Somebody beat us to this PTE or prefaulting to
++ * an already populated PTE, or prefaulting error.
++ */
++
++ if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
++ break;
++ else if (unlikely(ret != 0)) {
++ retval =
++ (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
++ goto out_unlock;
++
++ }
++
++ address += PAGE_SIZE;
++ if (unlikely(++page_offset >= page_last))
++ break;
++ }
++
++out_unlock:
++ mutex_unlock(&bo->mutex);
++ ttm_bo_unreserve(bo);
++ return retval;
++}
++
++static void ttm_bo_vm_open(struct vm_area_struct *vma)
++{
++ struct ttm_buffer_object *bo =
++ (struct ttm_buffer_object *)vma->vm_private_data;
++
++ (void)ttm_bo_reference(bo);
++}
++
++static void ttm_bo_vm_close(struct vm_area_struct *vma)
++{
++ struct ttm_buffer_object *bo =
++ (struct ttm_buffer_object *)vma->vm_private_data;
++
++ ttm_bo_unref(&bo);
++ vma->vm_private_data = NULL;
++}
++
++static struct vm_operations_struct ttm_bo_vm_ops = {
++ .fault = ttm_bo_vm_fault,
++ .open = ttm_bo_vm_open,
++ .close = ttm_bo_vm_close
++};
++
++int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
++ struct ttm_bo_device *bdev)
++{
++ struct ttm_bo_driver *driver;
++ struct ttm_buffer_object *bo;
++ int ret;
++
++ read_lock(&bdev->vm_lock);
++ bo = ttm_bo_vm_lookup_rb(bdev, vma->vm_pgoff,
++ (vma->vm_end - vma->vm_start) >> PAGE_SHIFT);
++ if (likely(bo != NULL))
++ ttm_bo_reference(bo);
++ read_unlock(&bdev->vm_lock);
++
++ if (unlikely(bo == NULL)) {
++ printk(KERN_ERR "Could not find buffer object to map.\n");
++ ret = -EINVAL;
++ goto out_unref;
++ }
++
++ driver = bo->bdev->driver;
++ if (unlikely(!driver->verify_access)) {
++ ret = -EPERM;
++ goto out_unref;
++ }
++ ret = driver->verify_access(bo, filp);
++ if (unlikely(ret != 0))
++ goto out_unref;
++
++ vma->vm_ops = &ttm_bo_vm_ops;
++
++ /*
++ * Note: We're transferring the bo reference to
++ * vma->vm_private_data here.
++ */
++
++ vma->vm_private_data = bo;
++ vma->vm_flags |= VM_RESERVED | VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
++ return 0;
++out_unref:
++ ttm_bo_unref(&bo);
++ return ret;
++}
++
++int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
++{
++ if (vma->vm_pgoff != 0)
++ return -EACCES;
++
++ vma->vm_ops = &ttm_bo_vm_ops;
++ vma->vm_private_data = ttm_bo_reference(bo);
++ vma->vm_flags |= VM_RESERVED | VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
++ return 0;
++}
++
++ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
++ const char __user *wbuf, char __user *rbuf, size_t count,
++ loff_t *f_pos, bool write)
++{
++ struct ttm_buffer_object *bo;
++ struct ttm_bo_driver *driver;
++ struct ttm_bo_kmap_obj map;
++ unsigned long dev_offset = (*f_pos >> PAGE_SHIFT);
++ unsigned long kmap_offset;
++ unsigned long kmap_end;
++ unsigned long kmap_num;
++ size_t io_size;
++ unsigned int page_offset;
++ char *virtual;
++ int ret;
++ bool no_wait = false;
++ bool dummy;
++
++ read_lock(&bdev->vm_lock);
++ bo = ttm_bo_vm_lookup_rb(bdev, dev_offset, 1);
++ if (likely(bo != NULL))
++ ttm_bo_reference(bo);
++ read_unlock(&bdev->vm_lock);
++
++ if (unlikely(bo == NULL))
++ return -EFAULT;
++
++ driver = bo->bdev->driver;
++ if (unlikely(driver->verify_access))
++ return -EPERM;
++
++ ret = driver->verify_access(bo, filp);
++ if (unlikely(ret != 0))
++ goto out_unref;
++
++ kmap_offset = dev_offset - bo->vm_node->start;
++ if (unlikely(kmap_offset) >= bo->num_pages) {
++ ret = -EFBIG;
++ goto out_unref;
++ }
++
++ page_offset = *f_pos & ~PAGE_MASK;
++ io_size = bo->num_pages - kmap_offset;
++ io_size = (io_size << PAGE_SHIFT) - page_offset;
++ if (count < io_size)
++ io_size = count;
++
++ kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
++ kmap_num = kmap_end - kmap_offset + 1;
++
++ ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
++
++ switch (ret) {
++ case 0:
++ break;
++ case -ERESTART:
++ ret = -EINTR;
++ goto out_unref;
++ case -EBUSY:
++ ret = -EAGAIN;
++ goto out_unref;
++ default:
++ goto out_unref;
++ }
++
++ ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
++ if (unlikely(ret != 0))
++ goto out_unref;
++
++ virtual = ttm_kmap_obj_virtual(&map, &dummy);
++ virtual += page_offset;
++
++ if (write)
++ ret = copy_from_user(virtual, wbuf, io_size);
++ else
++ ret = copy_to_user(rbuf, virtual, io_size);
++
++ ttm_bo_kunmap(&map);
++ ttm_bo_unreserve(bo);
++ ttm_bo_unref(&bo);
++
++ if (unlikely(ret != 0))
++ return -EFBIG;
++
++ *f_pos += io_size;
++
++ return io_size;
++out_unref:
++ ttm_bo_unref(&bo);
++ return ret;
++}
++
++ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *bo, const char __user *wbuf,
++ char __user *rbuf, size_t count, loff_t *f_pos,
++ bool write)
++{
++ struct ttm_bo_kmap_obj map;
++ unsigned long kmap_offset;
++ unsigned long kmap_end;
++ unsigned long kmap_num;
++ size_t io_size;
++ unsigned int page_offset;
++ char *virtual;
++ int ret;
++ bool no_wait = false;
++ bool dummy;
++
++ kmap_offset = (*f_pos >> PAGE_SHIFT);
++ if (unlikely(kmap_offset) >= bo->num_pages)
++ return -EFBIG;
++
++ page_offset = *f_pos & ~PAGE_MASK;
++ io_size = bo->num_pages - kmap_offset;
++ io_size = (io_size << PAGE_SHIFT) - page_offset;
++ if (count < io_size)
++ io_size = count;
++
++ kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
++ kmap_num = kmap_end - kmap_offset + 1;
++
++ ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
++
++ switch (ret) {
++ case 0:
++ break;
++ case -ERESTART:
++ return -EINTR;
++ case -EBUSY:
++ return -EAGAIN;
++ default:
++ return ret;
++ }
++
++ ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
++ if (unlikely(ret != 0))
++ return ret;
++
++ virtual = ttm_kmap_obj_virtual(&map, &dummy);
++ virtual += page_offset;
++
++ if (write)
++ ret = copy_from_user(virtual, wbuf, io_size);
++ else
++ ret = copy_to_user(rbuf, virtual, io_size);
++
++ ttm_bo_kunmap(&map);
++ ttm_bo_unreserve(bo);
++ ttm_bo_unref(&bo);
++
++ if (unlikely(ret != 0))
++ return ret;
++
++ *f_pos += io_size;
++
++ return io_size;
++}
+diff --git a/drivers/gpu/drm/mrst/drv/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/mrst/drv/ttm/ttm_execbuf_util.c
+new file mode 100644
+index 0000000..610e0e0
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/ttm/ttm_execbuf_util.c
+@@ -0,0 +1,108 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++
++#include "ttm_execbuf_util.h"
++#include "ttm_bo_driver.h"
++#include "ttm_placement_common.h"
++#include <linux/wait.h>
++#include <linux/sched.h>
++
++void ttm_eu_backoff_reservation(struct list_head *list)
++{
++ struct ttm_validate_buffer *entry;
++
++ list_for_each_entry(entry, list, head) {
++ struct ttm_buffer_object *bo = entry->bo;
++ if (!entry->reserved)
++ continue;
++
++ entry->reserved = false;
++ ttm_bo_unreserve(bo);
++ }
++}
++
++/*
++ * Reserve buffers for validation.
++ *
++ * If a buffer in the list is marked for CPU access, we back off and
++ * wait for that buffer to become free for GPU access.
++ *
++ * If a buffer is reserved for another validation, the validator with
++ * the highest validation sequence backs off and waits for that buffer
++ * to become unreserved. This prevents deadlocks when validating multiple
++ * buffers in different orders.
++ */
++
++int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq)
++{
++ struct ttm_validate_buffer *entry;
++ int ret;
++
++retry:
++ list_for_each_entry(entry, list, head) {
++ struct ttm_buffer_object *bo = entry->bo;
++
++ entry->reserved = false;
++ ret = ttm_bo_reserve(bo, true, false, true, val_seq);
++ if (ret != 0) {
++ ttm_eu_backoff_reservation(list);
++ if (ret == -EAGAIN) {
++ ret = ttm_bo_wait_unreserved(bo, true);
++ if (unlikely(ret != 0))
++ return ret;
++ goto retry;
++ } else
++ return ret;
++ }
++
++ entry->reserved = true;
++ if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
++ ttm_eu_backoff_reservation(list);
++ ret = ttm_bo_wait_cpu(bo, false);
++ if (ret)
++ return ret;
++ goto retry;
++ }
++ }
++ return 0;
++}
++
++void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj)
++{
++ struct ttm_validate_buffer *entry;
++
++ list_for_each_entry(entry, list, head) {
++ struct ttm_buffer_object *bo = entry->bo;
++ struct ttm_bo_driver *driver = bo->bdev->driver;
++ void *old_sync_obj;
++
++ mutex_lock(&bo->mutex);
++ old_sync_obj = bo->sync_obj;
++ bo->sync_obj = driver->sync_obj_ref(sync_obj);
++ bo->sync_obj_arg = entry->new_sync_obj_arg;
++ mutex_unlock(&bo->mutex);
++ ttm_bo_unreserve(bo);
++ entry->reserved = false;
++ if (old_sync_obj)
++ driver->sync_obj_unref(&old_sync_obj);
++ }
++}
+diff --git a/drivers/gpu/drm/mrst/drv/ttm/ttm_execbuf_util.h b/drivers/gpu/drm/mrst/drv/ttm/ttm_execbuf_util.h
+new file mode 100644
+index 0000000..0b88d08
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/ttm/ttm_execbuf_util.h
+@@ -0,0 +1,103 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ */
++
++#ifndef _TTM_EXECBUF_UTIL_H_
++#define _TTM_EXECBUF_UTIL_H_
++
++#include "ttm_bo_api.h"
++#include "ttm_fence_api.h"
++#include <linux/list.h>
++
++/**
++ * struct ttm_validate_buffer
++ *
++ * @head: list head for thread-private list.
++ * @bo: refcounted buffer object pointer.
++ * @new_sync_obj_arg: New sync_obj_arg for @bo, to be used once
++ * adding a new sync object.
++ * @reservied: Indicates whether @bo has been reserved for validation.
++ */
++
++struct ttm_validate_buffer {
++ struct list_head head;
++ struct ttm_buffer_object *bo;
++ void *new_sync_obj_arg;
++ bool reserved;
++};
++
++/**
++ * function ttm_eu_backoff_reservation
++ *
++ * @list: thread private list of ttm_validate_buffer structs.
++ *
++ * Undoes all buffer validation reservations for bos pointed to by
++ * the list entries.
++ */
++
++extern void ttm_eu_backoff_reservation(struct list_head *list);
++
++/**
++ * function ttm_eu_reserve_buffers
++ *
++ * @list: thread private list of ttm_validate_buffer structs.
++ * @val_seq: A unique sequence number.
++ *
++ * Tries to reserve bos pointed to by the list entries for validation.
++ * If the function returns 0, all buffers are marked as "unfenced",
++ * taken off the lru lists and are not synced for write CPU usage.
++ *
++ * If the function detects a deadlock due to multiple threads trying to
++ * reserve the same buffers in reverse order, all threads except one will
++ * back off and retry. This function may sleep while waiting for
++ * CPU write reservations to be cleared, and for other threads to
++ * unreserve their buffers.
++ *
++ * This function may return -ERESTART or -EAGAIN if the calling process
++ * receives a signal while waiting. In that case, no buffers on the list
++ * will be reserved upon return.
++ *
++ * Buffers reserved by this function should be unreserved by
++ * a call to either ttm_eu_backoff_reservation() or
++ * ttm_eu_fence_buffer_objects() when command submission is complete or
++ * has failed.
++ */
++
++extern int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq);
++
++/**
++ * function ttm_eu_fence_buffer_objects.
++ *
++ * @list: thread private list of ttm_validate_buffer structs.
++ * @sync_obj: The new sync object for the buffers.
++ *
++ * This function should be called when command submission is complete, and
++ * it will add a new sync object to bos pointed to by entries on @list.
++ * It also unreserves all buffers, putting them on lru lists.
++ *
++ */
++
++extern void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj);
++
++#endif
+diff --git a/drivers/gpu/drm/mrst/drv/ttm/ttm_fence.c b/drivers/gpu/drm/mrst/drv/ttm/ttm_fence.c
+new file mode 100644
+index 0000000..3f36ecc
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/ttm/ttm_fence.c
+@@ -0,0 +1,607 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ */
++
++#include "ttm_fence_api.h"
++#include "ttm_fence_driver.h"
++#include <linux/wait.h>
++#include <linux/sched.h>
++
++#include <drm/drmP.h>
++
++/*
++ * Simple implementation for now.
++ */
++
++static void ttm_fence_lockup(struct ttm_fence_object *fence, uint32_t mask)
++{
++ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
++
++ printk(KERN_ERR "GPU lockup dectected on engine %u "
++ "fence type 0x%08x\n",
++ (unsigned int)fence->fence_class, (unsigned int)mask);
++ /*
++ * Give engines some time to idle?
++ */
++
++ write_lock(&fc->lock);
++ ttm_fence_handler(fence->fdev, fence->fence_class,
++ fence->sequence, mask, -EBUSY);
++ write_unlock(&fc->lock);
++}
++
++/*
++ * Convenience function to be called by fence::wait methods that
++ * need polling.
++ */
++
++int ttm_fence_wait_polling(struct ttm_fence_object *fence, bool lazy,
++ bool interruptible, uint32_t mask)
++{
++ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
++ const struct ttm_fence_driver *driver = ttm_fence_driver(fence);
++ uint32_t count = 0;
++ int ret;
++ unsigned long end_jiffies = fence->timeout_jiffies;
++
++ DECLARE_WAITQUEUE(entry, current);
++ add_wait_queue(&fc->fence_queue, &entry);
++
++ ret = 0;
++
++ for (;;) {
++ __set_current_state((interruptible) ?
++ TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
++ if (ttm_fence_object_signaled(fence, mask))
++ break;
++ if (time_after_eq(jiffies, end_jiffies)) {
++ if (driver->lockup)
++ driver->lockup(fence, mask);
++ else
++ ttm_fence_lockup(fence, mask);
++ continue;
++ }
++ if (lazy)
++ schedule_timeout(1);
++ else if ((++count & 0x0F) == 0) {
++ __set_current_state(TASK_RUNNING);
++ schedule();
++ __set_current_state((interruptible) ?
++ TASK_INTERRUPTIBLE :
++ TASK_UNINTERRUPTIBLE);
++ }
++ if (interruptible && signal_pending(current)) {
++ ret = -ERESTART;
++ break;
++ }
++ }
++ __set_current_state(TASK_RUNNING);
++ remove_wait_queue(&fc->fence_queue, &entry);
++ return ret;
++}
++
++/*
++ * Typically called by the IRQ handler.
++ */
++
++void ttm_fence_handler(struct ttm_fence_device *fdev, uint32_t fence_class,
++ uint32_t sequence, uint32_t type, uint32_t error)
++{
++ int wake = 0;
++ uint32_t diff;
++ uint32_t relevant_type;
++ uint32_t new_type;
++ struct ttm_fence_class_manager *fc = &fdev->fence_class[fence_class];
++ const struct ttm_fence_driver *driver = ttm_fence_driver_from_dev(fdev);
++ struct list_head *head;
++ struct ttm_fence_object *fence, *next;
++ bool found = false;
++
++ if (list_empty(&fc->ring))
++ return;
++
++ list_for_each_entry(fence, &fc->ring, ring) {
++ diff = (sequence - fence->sequence) & fc->sequence_mask;
++ if (diff > fc->wrap_diff) {
++ found = true;
++ break;
++ }
++ }
++
++ fc->waiting_types &= ~type;
++ head = (found) ? &fence->ring : &fc->ring;
++
++ list_for_each_entry_safe_reverse(fence, next, head, ring) {
++ if (&fence->ring == &fc->ring)
++ break;
++
++ DRM_DEBUG("Fence 0x%08lx, sequence 0x%08x, type 0x%08x\n",
++ (unsigned long)fence, fence->sequence,
++ fence->fence_type);
++
++ if (error) {
++ fence->info.error = error;
++ fence->info.signaled_types = fence->fence_type;
++ list_del_init(&fence->ring);
++ wake = 1;
++ break;
++ }
++
++ relevant_type = type & fence->fence_type;
++ new_type = (fence->info.signaled_types | relevant_type) ^
++ fence->info.signaled_types;
++
++ if (new_type) {
++ fence->info.signaled_types |= new_type;
++ DRM_DEBUG("Fence 0x%08lx signaled 0x%08x\n",
++ (unsigned long)fence,
++ fence->info.signaled_types);
++
++ if (unlikely(driver->signaled))
++ driver->signaled(fence);
++
++ if (driver->needed_flush)
++ fc->pending_flush |=
++ driver->needed_flush(fence);
++
++ if (new_type & fence->waiting_types)
++ wake = 1;
++ }
++
++ fc->waiting_types |=
++ fence->waiting_types & ~fence->info.signaled_types;
++
++ if (!(fence->fence_type & ~fence->info.signaled_types)) {
++ DRM_DEBUG("Fence completely signaled 0x%08lx\n",
++ (unsigned long)fence);
++ list_del_init(&fence->ring);
++ }
++ }
++
++ /*
++ * Reinstate lost waiting types.
++ */
++
++ if ((fc->waiting_types & type) != type) {
++ head = head->prev;
++ list_for_each_entry(fence, head, ring) {
++ if (&fence->ring == &fc->ring)
++ break;
++ diff =
++ (fc->highest_waiting_sequence -
++ fence->sequence) & fc->sequence_mask;
++ if (diff > fc->wrap_diff)
++ break;
++
++ fc->waiting_types |=
++ fence->waiting_types & ~fence->info.signaled_types;
++ }
++ }
++
++ if (wake)
++ wake_up_all(&fc->fence_queue);
++}
++
++static void ttm_fence_unring(struct ttm_fence_object *fence)
++{
++ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
++ unsigned long irq_flags;
++
++ write_lock_irqsave(&fc->lock, irq_flags);
++ list_del_init(&fence->ring);
++ write_unlock_irqrestore(&fc->lock, irq_flags);
++}
++
++bool ttm_fence_object_signaled(struct ttm_fence_object *fence, uint32_t mask)
++{
++ unsigned long flags;
++ bool signaled;
++ const struct ttm_fence_driver *driver = ttm_fence_driver(fence);
++ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
++
++ mask &= fence->fence_type;
++ read_lock_irqsave(&fc->lock, flags);
++ signaled = (mask & fence->info.signaled_types) == mask;
++ read_unlock_irqrestore(&fc->lock, flags);
++ if (!signaled && driver->poll) {
++ write_lock_irqsave(&fc->lock, flags);
++ driver->poll(fence->fdev, fence->fence_class, mask);
++ signaled = (mask & fence->info.signaled_types) == mask;
++ write_unlock_irqrestore(&fc->lock, flags);
++ }
++ return signaled;
++}
++
++int ttm_fence_object_flush(struct ttm_fence_object *fence, uint32_t type)
++{
++ const struct ttm_fence_driver *driver = ttm_fence_driver(fence);
++ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
++ unsigned long irq_flags;
++ uint32_t saved_pending_flush;
++ uint32_t diff;
++ bool call_flush;
++
++ if (type & ~fence->fence_type) {
++ DRM_ERROR("Flush trying to extend fence type, "
++ "0x%x, 0x%x\n", type, fence->fence_type);
++ return -EINVAL;
++ }
++
++ write_lock_irqsave(&fc->lock, irq_flags);
++ fence->waiting_types |= type;
++ fc->waiting_types |= fence->waiting_types;
++ diff = (fence->sequence - fc->highest_waiting_sequence) &
++ fc->sequence_mask;
++
++ if (diff < fc->wrap_diff)
++ fc->highest_waiting_sequence = fence->sequence;
++
++ /*
++ * fence->waiting_types has changed. Determine whether
++ * we need to initiate some kind of flush as a result of this.
++ */
++
++ saved_pending_flush = fc->pending_flush;
++ if (driver->needed_flush)
++ fc->pending_flush |= driver->needed_flush(fence);
++
++ if (driver->poll)
++ driver->poll(fence->fdev, fence->fence_class,
++ fence->waiting_types);
++
++ call_flush = (fc->pending_flush != 0);
++ write_unlock_irqrestore(&fc->lock, irq_flags);
++
++ if (call_flush && driver->flush)
++ driver->flush(fence->fdev, fence->fence_class);
++
++ return 0;
++}
++
++/*
++ * Make sure old fence objects are signaled before their fence sequences are
++ * wrapped around and reused.
++ */
++
++void ttm_fence_flush_old(struct ttm_fence_device *fdev,
++ uint32_t fence_class, uint32_t sequence)
++{
++ struct ttm_fence_class_manager *fc = &fdev->fence_class[fence_class];
++ struct ttm_fence_object *fence;
++ unsigned long irq_flags;
++ const struct ttm_fence_driver *driver = fdev->driver;
++ bool call_flush;
++
++ uint32_t diff;
++
++ write_lock_irqsave(&fc->lock, irq_flags);
++
++ list_for_each_entry_reverse(fence, &fc->ring, ring) {
++ diff = (sequence - fence->sequence) & fc->sequence_mask;
++ if (diff <= fc->flush_diff)
++ break;
++
++ fence->waiting_types = fence->fence_type;
++ fc->waiting_types |= fence->fence_type;
++
++ if (driver->needed_flush)
++ fc->pending_flush |= driver->needed_flush(fence);
++ }
++
++ if (driver->poll)
++ driver->poll(fdev, fence_class, fc->waiting_types);
++
++ call_flush = (fc->pending_flush != 0);
++ write_unlock_irqrestore(&fc->lock, irq_flags);
++
++ if (call_flush && driver->flush)
++ driver->flush(fdev, fence->fence_class);
++
++ /*
++ * FIXME: Shold we implement a wait here for really old fences?
++ */
++
++}
++
++int ttm_fence_object_wait(struct ttm_fence_object *fence,
++ bool lazy, bool interruptible, uint32_t mask)
++{
++ const struct ttm_fence_driver *driver = ttm_fence_driver(fence);
++ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
++ int ret = 0;
++ unsigned long timeout;
++ unsigned long cur_jiffies;
++ unsigned long to_jiffies;
++
++ if (mask & ~fence->fence_type) {
++ DRM_ERROR("Wait trying to extend fence type"
++ " 0x%08x 0x%08x\n", mask, fence->fence_type);
++ BUG();
++ return -EINVAL;
++ }
++
++ if (driver->wait)
++ return driver->wait(fence, lazy, interruptible, mask);
++
++ ttm_fence_object_flush(fence, mask);
++retry:
++ if (!driver->has_irq ||
++ driver->has_irq(fence->fdev, fence->fence_class, mask)) {
++
++ cur_jiffies = jiffies;
++ to_jiffies = fence->timeout_jiffies;
++
++ timeout = (time_after(to_jiffies, cur_jiffies)) ?
++ to_jiffies - cur_jiffies : 1;
++
++ if (interruptible)
++ ret = wait_event_interruptible_timeout
++ (fc->fence_queue,
++ ttm_fence_object_signaled(fence, mask), timeout);
++ else
++ ret = wait_event_timeout
++ (fc->fence_queue,
++ ttm_fence_object_signaled(fence, mask), timeout);
++
++ if (unlikely(ret == -ERESTARTSYS))
++ return -ERESTART;
++
++ if (unlikely(ret == 0)) {
++ if (driver->lockup)
++ driver->lockup(fence, mask);
++ else
++ ttm_fence_lockup(fence, mask);
++ goto retry;
++ }
++
++ return 0;
++ }
++
++ return ttm_fence_wait_polling(fence, lazy, interruptible, mask);
++}
++
++int ttm_fence_object_emit(struct ttm_fence_object *fence, uint32_t fence_flags,
++ uint32_t fence_class, uint32_t type)
++{
++ const struct ttm_fence_driver *driver = ttm_fence_driver(fence);
++ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
++ unsigned long flags;
++ uint32_t sequence;
++ unsigned long timeout;
++ int ret;
++
++ ttm_fence_unring(fence);
++ ret = driver->emit(fence->fdev,
++ fence_class, fence_flags, &sequence, &timeout);
++ if (ret)
++ return ret;
++
++ write_lock_irqsave(&fc->lock, flags);
++ fence->fence_class = fence_class;
++ fence->fence_type = type;
++ fence->waiting_types = 0;
++ fence->info.signaled_types = 0;
++ fence->info.error = 0;
++ fence->sequence = sequence;
++ fence->timeout_jiffies = timeout;
++ if (list_empty(&fc->ring))
++ fc->highest_waiting_sequence = sequence - 1;
++ list_add_tail(&fence->ring, &fc->ring);
++ fc->latest_queued_sequence = sequence;
++ write_unlock_irqrestore(&fc->lock, flags);
++ return 0;
++}
++
++int ttm_fence_object_init(struct ttm_fence_device *fdev,
++ uint32_t fence_class,
++ uint32_t type,
++ uint32_t create_flags,
++ void (*destroy) (struct ttm_fence_object *),
++ struct ttm_fence_object *fence)
++{
++ int ret = 0;
++
++ kref_init(&fence->kref);
++ fence->fence_class = fence_class;
++ fence->fence_type = type;
++ fence->info.signaled_types = 0;
++ fence->waiting_types = 0;
++ fence->sequence = 0;
++ fence->info.error = 0;
++ fence->fdev = fdev;
++ fence->destroy = destroy;
++ INIT_LIST_HEAD(&fence->ring);
++ atomic_inc(&fdev->count);
++
++ if (create_flags & TTM_FENCE_FLAG_EMIT) {
++ ret = ttm_fence_object_emit(fence, create_flags,
++ fence->fence_class, type);
++ }
++
++ return ret;
++}
++
++int ttm_fence_object_create(struct ttm_fence_device *fdev,
++ uint32_t fence_class,
++ uint32_t type,
++ uint32_t create_flags,
++ struct ttm_fence_object **c_fence)
++{
++ struct ttm_fence_object *fence;
++ int ret;
++
++ ret = ttm_mem_global_alloc(fdev->mem_glob,
++ sizeof(*fence),
++ false,
++ false,
++ false);
++ if (unlikely(ret != 0)) {
++ printk(KERN_ERR "Out of memory creating fence object\n");
++ return ret;
++ }
++
++ fence = kmalloc(sizeof(*fence), GFP_KERNEL);
++ if (!fence) {
++ printk(KERN_ERR "Out of memory creating fence object\n");
++ ttm_mem_global_free(fdev->mem_glob, sizeof(*fence), false);
++ return -ENOMEM;
++ }
++
++ ret = ttm_fence_object_init(fdev, fence_class, type,
++ create_flags, NULL, fence);
++ if (ret) {
++ ttm_fence_object_unref(&fence);
++ return ret;
++ }
++ *c_fence = fence;
++
++ return 0;
++}
++
++static void ttm_fence_object_destroy(struct kref *kref)
++{
++ struct ttm_fence_object *fence =
++ container_of(kref, struct ttm_fence_object, kref);
++ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
++ unsigned long irq_flags;
++
++ write_lock_irqsave(&fc->lock, irq_flags);
++ list_del_init(&fence->ring);
++ write_unlock_irqrestore(&fc->lock, irq_flags);
++
++ atomic_dec(&fence->fdev->count);
++ if (fence->destroy)
++ fence->destroy(fence);
++ else {
++ ttm_mem_global_free(fence->fdev->mem_glob,
++ sizeof(*fence),
++ false);
++ kfree(fence);
++ }
++}
++
++void ttm_fence_device_release(struct ttm_fence_device *fdev)
++{
++ kfree(fdev->fence_class);
++}
++
++int
++ttm_fence_device_init(int num_classes,
++ struct ttm_mem_global *mem_glob,
++ struct ttm_fence_device *fdev,
++ const struct ttm_fence_class_init *init,
++ bool replicate_init,
++ const struct ttm_fence_driver *driver)
++{
++ struct ttm_fence_class_manager *fc;
++ const struct ttm_fence_class_init *fci;
++ int i;
++
++ fdev->mem_glob = mem_glob;
++ fdev->fence_class = kzalloc(num_classes *
++ sizeof(*fdev->fence_class), GFP_KERNEL);
++
++ if (unlikely(!fdev->fence_class))
++ return -ENOMEM;
++
++ fdev->num_classes = num_classes;
++ atomic_set(&fdev->count, 0);
++ fdev->driver = driver;
++
++ for (i = 0; i < fdev->num_classes; ++i) {
++ fc = &fdev->fence_class[i];
++ fci = &init[(replicate_init) ? 0 : i];
++
++ fc->wrap_diff = fci->wrap_diff;
++ fc->flush_diff = fci->flush_diff;
++ fc->sequence_mask = fci->sequence_mask;
++
++ rwlock_init(&fc->lock);
++ INIT_LIST_HEAD(&fc->ring);
++ init_waitqueue_head(&fc->fence_queue);
++ }
++
++ return 0;
++}
++
++struct ttm_fence_info ttm_fence_get_info(struct ttm_fence_object *fence)
++{
++ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
++ struct ttm_fence_info tmp;
++ unsigned long irq_flags;
++
++ read_lock_irqsave(&fc->lock, irq_flags);
++ tmp = fence->info;
++ read_unlock_irqrestore(&fc->lock, irq_flags);
++
++ return tmp;
++}
++
++void ttm_fence_object_unref(struct ttm_fence_object **p_fence)
++{
++ struct ttm_fence_object *fence = *p_fence;
++
++ *p_fence = NULL;
++ (void)kref_put(&fence->kref, &ttm_fence_object_destroy);
++}
++
++/*
++ * Placement / BO sync object glue.
++ */
++
++bool ttm_fence_sync_obj_signaled(void *sync_obj, void *sync_arg)
++{
++ struct ttm_fence_object *fence = (struct ttm_fence_object *)sync_obj;
++ uint32_t fence_types = (uint32_t) (unsigned long)sync_arg;
++
++ return ttm_fence_object_signaled(fence, fence_types);
++}
++
++int ttm_fence_sync_obj_wait(void *sync_obj, void *sync_arg,
++ bool lazy, bool interruptible)
++{
++ struct ttm_fence_object *fence = (struct ttm_fence_object *)sync_obj;
++ uint32_t fence_types = (uint32_t) (unsigned long)sync_arg;
++
++ return ttm_fence_object_wait(fence, lazy, interruptible, fence_types);
++}
++
++int ttm_fence_sync_obj_flush(void *sync_obj, void *sync_arg)
++{
++ struct ttm_fence_object *fence = (struct ttm_fence_object *)sync_obj;
++ uint32_t fence_types = (uint32_t) (unsigned long)sync_arg;
++
++ return ttm_fence_object_flush(fence, fence_types);
++}
++
++void ttm_fence_sync_obj_unref(void **sync_obj)
++{
++ ttm_fence_object_unref((struct ttm_fence_object **)sync_obj);
++}
++
++void *ttm_fence_sync_obj_ref(void *sync_obj)
++{
++ return (void *)
++ ttm_fence_object_ref((struct ttm_fence_object *)sync_obj);
++}
+diff --git a/drivers/gpu/drm/mrst/drv/ttm/ttm_fence_api.h b/drivers/gpu/drm/mrst/drv/ttm/ttm_fence_api.h
+new file mode 100644
+index 0000000..d42904c
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/ttm/ttm_fence_api.h
+@@ -0,0 +1,272 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ */
++#ifndef _TTM_FENCE_API_H_
++#define _TTM_FENCE_API_H_
++
++#include <linux/list.h>
++#include <linux/kref.h>
++
++#define TTM_FENCE_FLAG_EMIT (1 << 0)
++#define TTM_FENCE_TYPE_EXE (1 << 0)
++
++struct ttm_fence_device;
++
++/**
++ * struct ttm_fence_info
++ *
++ * @fence_class: The fence class.
++ * @fence_type: Bitfield indicating types for this fence.
++ * @signaled_types: Bitfield indicating which types are signaled.
++ * @error: Last error reported from the device.
++ *
++ * Used as output from the ttm_fence_get_info
++ */
++
++struct ttm_fence_info {
++ uint32_t signaled_types;
++ uint32_t error;
++};
++
++/**
++ * struct ttm_fence_object
++ *
++ * @fdev: Pointer to the fence device struct.
++ * @kref: Holds the reference count of this fence object.
++ * @ring: List head used for the circular list of not-completely
++ * signaled fences.
++ * @info: Data for fast retrieval using the ttm_fence_get_info()
++ * function.
++ * @timeout_jiffies: Absolute jiffies value indicating when this fence
++ * object times out and, if waited on, calls ttm_fence_lockup
++ * to check for and resolve a GPU lockup.
++ * @sequence: Fence sequence number.
++ * @waiting_types: Types currently waited on.
++ * @destroy: Called to free the fence object, when its refcount has
++ * reached zero. If NULL, kfree is used.
++ *
++ * This struct is provided in the driver interface so that drivers can
++ * derive from it and create their own fence implementation. All members
++ * are private to the fence implementation and the fence driver callbacks.
++ * Otherwise a driver may access the derived object using container_of().
++ */
++
++struct ttm_fence_object {
++ struct ttm_fence_device *fdev;
++ struct kref kref;
++ uint32_t fence_class;
++ uint32_t fence_type;
++
++ /*
++ * The below fields are protected by the fence class
++ * manager spinlock.
++ */
++
++ struct list_head ring;
++ struct ttm_fence_info info;
++ unsigned long timeout_jiffies;
++ uint32_t sequence;
++ uint32_t waiting_types;
++ void (*destroy) (struct ttm_fence_object *);
++};
++
++/**
++ * ttm_fence_object_init
++ *
++ * @fdev: Pointer to a struct ttm_fence_device.
++ * @fence_class: Fence class for this fence.
++ * @type: Fence type for this fence.
++ * @create_flags: Flags indicating varios actions at init time. At this point
++ * there's only TTM_FENCE_FLAG_EMIT, which triggers a sequence emission to
++ * the command stream.
++ * @destroy: Destroy function. If NULL, kfree() is used.
++ * @fence: The struct ttm_fence_object to initialize.
++ *
++ * Initialize a pre-allocated fence object. This function, together with the
++ * destroy function makes it possible to derive driver-specific fence objects.
++ */
++
++extern int
++ttm_fence_object_init(struct ttm_fence_device *fdev,
++ uint32_t fence_class,
++ uint32_t type,
++ uint32_t create_flags,
++ void (*destroy) (struct ttm_fence_object *fence),
++ struct ttm_fence_object *fence);
++
++/**
++ * ttm_fence_object_create
++ *
++ * @fdev: Pointer to a struct ttm_fence_device.
++ * @fence_class: Fence class for this fence.
++ * @type: Fence type for this fence.
++ * @create_flags: Flags indicating varios actions at init time. At this point
++ * there's only TTM_FENCE_FLAG_EMIT, which triggers a sequence emission to
++ * the command stream.
++ * @c_fence: On successful termination, *(@c_fence) will point to the created
++ * fence object.
++ *
++ * Create and initialize a struct ttm_fence_object. The destroy function will
++ * be set to kfree().
++ */
++
++extern int
++ttm_fence_object_create(struct ttm_fence_device *fdev,
++ uint32_t fence_class,
++ uint32_t type,
++ uint32_t create_flags,
++ struct ttm_fence_object **c_fence);
++
++/**
++ * ttm_fence_object_wait
++ *
++ * @fence: The fence object to wait on.
++ * @lazy: Allow sleeps to reduce the cpu-usage if polling.
++ * @interruptible: Sleep interruptible when waiting.
++ * @type_mask: Wait for the given type_mask to signal.
++ *
++ * Wait for a fence to signal the given type_mask. The function will
++ * perform a fence_flush using type_mask. (See ttm_fence_object_flush).
++ *
++ * Returns
++ * -ERESTART if interrupted by a signal.
++ * May return driver-specific error codes if timed-out.
++ */
++
++extern int
++ttm_fence_object_wait(struct ttm_fence_object *fence,
++ bool lazy, bool interruptible, uint32_t type_mask);
++
++/**
++ * ttm_fence_object_flush
++ *
++ * @fence: The fence object to flush.
++ * @flush_mask: Fence types to flush.
++ *
++ * Make sure that the given fence eventually signals the
++ * types indicated by @flush_mask. Note that this may or may not
++ * map to a CPU or GPU flush.
++ */
++
++extern int
++ttm_fence_object_flush(struct ttm_fence_object *fence, uint32_t flush_mask);
++
++/**
++ * ttm_fence_get_info
++ *
++ * @fence: The fence object.
++ *
++ * Copy the info block from the fence while holding relevant locks.
++ */
++
++struct ttm_fence_info ttm_fence_get_info(struct ttm_fence_object *fence);
++
++/**
++ * ttm_fence_object_ref
++ *
++ * @fence: The fence object.
++ *
++ * Return a ref-counted pointer to the fence object indicated by @fence.
++ */
++
++static inline struct ttm_fence_object *ttm_fence_object_ref(struct
++ ttm_fence_object
++ *fence)
++{
++ kref_get(&fence->kref);
++ return fence;
++}
++
++/**
++ * ttm_fence_object_unref
++ *
++ * @p_fence: Pointer to a ref-counted pinter to a struct ttm_fence_object.
++ *
++ * Unreference the fence object pointed to by *(@p_fence), clearing
++ * *(p_fence).
++ */
++
++extern void ttm_fence_object_unref(struct ttm_fence_object **p_fence);
++
++/**
++ * ttm_fence_object_signaled
++ *
++ * @fence: Pointer to the struct ttm_fence_object.
++ * @mask: Type mask to check whether signaled.
++ *
++ * This function checks (without waiting) whether the fence object
++ * pointed to by @fence has signaled the types indicated by @mask,
++ * and returns 1 if true, 0 if false. This function does NOT perform
++ * an implicit fence flush.
++ */
++
++extern bool
++ttm_fence_object_signaled(struct ttm_fence_object *fence, uint32_t mask);
++
++/**
++ * ttm_fence_class
++ *
++ * @fence: Pointer to the struct ttm_fence_object.
++ *
++ * Convenience function that returns the fence class of a
++ * struct ttm_fence_object.
++ */
++
++static inline uint32_t ttm_fence_class(const struct ttm_fence_object *fence)
++{
++ return fence->fence_class;
++}
++
++/**
++ * ttm_fence_types
++ *
++ * @fence: Pointer to the struct ttm_fence_object.
++ *
++ * Convenience function that returns the fence types of a
++ * struct ttm_fence_object.
++ */
++
++static inline uint32_t ttm_fence_types(const struct ttm_fence_object *fence)
++{
++ return fence->fence_type;
++}
++
++/*
++ * The functions below are wrappers to the above functions, with
++ * similar names but with sync_obj omitted. These wrappers are intended
++ * to be plugged directly into the buffer object driver's sync object
++ * API, if the driver chooses to use ttm_fence_objects as buffer object
++ * sync objects. In the prototypes below, a sync_obj is cast to a
++ * struct ttm_fence_object, whereas a sync_arg is cast to an
++ * uint32_t representing a fence_type argument.
++ */
++
++extern bool ttm_fence_sync_obj_signaled(void *sync_obj, void *sync_arg);
++extern int ttm_fence_sync_obj_wait(void *sync_obj, void *sync_arg,
++ bool lazy, bool interruptible);
++extern int ttm_fence_sync_obj_flush(void *sync_obj, void *sync_arg);
++extern void ttm_fence_sync_obj_unref(void **sync_obj);
++extern void *ttm_fence_sync_obj_ref(void *sync_obj);
++
++#endif
+diff --git a/drivers/gpu/drm/mrst/drv/ttm/ttm_fence_driver.h b/drivers/gpu/drm/mrst/drv/ttm/ttm_fence_driver.h
+new file mode 100644
+index 0000000..1dbd817
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/ttm/ttm_fence_driver.h
+@@ -0,0 +1,302 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ */
++#ifndef _TTM_FENCE_DRIVER_H_
++#define _TTM_FENCE_DRIVER_H_
++
++#include <linux/kref.h>
++#include <linux/spinlock.h>
++#include <linux/wait.h>
++#include "ttm_fence_api.h"
++#include "ttm_memory.h"
++
++/** @file ttm_fence_driver.h
++ *
++ * Definitions needed for a driver implementing the
++ * ttm_fence subsystem.
++ */
++
++/**
++ * struct ttm_fence_class_manager:
++ *
++ * @wrap_diff: Sequence difference to catch 32-bit wrapping.
++ * if (seqa - seqb) > @wrap_diff, then seqa < seqb.
++ * @flush_diff: Sequence difference to trigger fence flush.
++ * if (cur_seq - seqa) > @flush_diff, then consider fence object with
++ * seqa as old an needing a flush.
++ * @sequence_mask: Mask of valid bits in a fence sequence.
++ * @lock: Lock protecting this struct as well as fence objects
++ * associated with this struct.
++ * @ring: Circular sequence-ordered list of fence objects.
++ * @pending_flush: Fence types currently needing a flush.
++ * @waiting_types: Fence types that are currently waited for.
++ * @fence_queue: Queue of waiters on fences belonging to this fence class.
++ * @highest_waiting_sequence: Sequence number of the fence with highest
++ * sequence number and that is waited for.
++ * @latest_queued_sequence: Sequence number of the fence latest queued
++ * on the ring.
++ */
++
++struct ttm_fence_class_manager {
++
++ /*
++ * Unprotected constant members.
++ */
++
++ uint32_t wrap_diff;
++ uint32_t flush_diff;
++ uint32_t sequence_mask;
++
++ /*
++ * The rwlock protects this structure as well as
++ * the data in all fence objects belonging to this
++ * class. This should be OK as most fence objects are
++ * only read from once they're created.
++ */
++
++ rwlock_t lock;
++ struct list_head ring;
++ uint32_t pending_flush;
++ uint32_t waiting_types;
++ wait_queue_head_t fence_queue;
++ uint32_t highest_waiting_sequence;
++ uint32_t latest_queued_sequence;
++};
++
++/**
++ * struct ttm_fence_device
++ *
++ * @fence_class: Array of fence class managers.
++ * @num_classes: Array dimension of @fence_class.
++ * @count: Current number of fence objects for statistics.
++ * @driver: Driver struct.
++ *
++ * Provided in the driver interface so that the driver can derive
++ * from this struct for its driver_private, and accordingly
++ * access the driver_private from the fence driver callbacks.
++ *
++ * All members except "count" are initialized at creation and
++ * never touched after that. No protection needed.
++ *
++ * This struct is private to the fence implementation and to the fence
++ * driver callbacks, and may otherwise be used by drivers only to
++ * obtain the derived device_private object using container_of().
++ */
++
++struct ttm_fence_device {
++ struct ttm_mem_global *mem_glob;
++ struct ttm_fence_class_manager *fence_class;
++ uint32_t num_classes;
++ atomic_t count;
++ const struct ttm_fence_driver *driver;
++};
++
++/**
++ * struct ttm_fence_class_init
++ *
++ * @wrap_diff: Fence sequence number wrap indicator. If
++ * (sequence1 - sequence2) > @wrap_diff, then sequence1 is
++ * considered to be older than sequence2.
++ * @flush_diff: Fence sequence number flush indicator.
++ * If a non-completely-signaled fence has a fence sequence number
++ * sequence1 and (sequence1 - current_emit_sequence) > @flush_diff,
++ * the fence is considered too old and it will be flushed upon the
++ * next call of ttm_fence_flush_old(), to make sure no fences with
++ * stale sequence numbers remains unsignaled. @flush_diff should
++ * be sufficiently less than @wrap_diff.
++ * @sequence_mask: Mask with valid bits of the fence sequence
++ * number set to 1.
++ *
++ * This struct is used as input to ttm_fence_device_init.
++ */
++
++struct ttm_fence_class_init {
++ uint32_t wrap_diff;
++ uint32_t flush_diff;
++ uint32_t sequence_mask;
++};
++
++/**
++ * struct ttm_fence_driver
++ *
++ * @has_irq: Called by a potential waiter. Should return 1 if a
++ * fence object with indicated parameters is expected to signal
++ * automatically, and 0 if the fence implementation needs to
++ * repeatedly call @poll to make it signal.
++ * @emit: Make sure a fence with the given parameters is
++ * present in the indicated command stream. Return its sequence number
++ * in "breadcrumb".
++ * @poll: Check and report sequences of the given "fence_class"
++ * that have signaled "types"
++ * @flush: Make sure that the types indicated by the bitfield
++ * ttm_fence_class_manager::pending_flush will eventually
++ * signal. These bits have been put together using the
++ * result from the needed_flush function described below.
++ * @needed_flush: Given the fence_class and fence_types indicated by
++ * "fence", and the last received fence sequence of this
++ * fence class, indicate what types need a fence flush to
++ * signal. Return as a bitfield.
++ * @wait: Set to non-NULL if the driver wants to override the fence
++ * wait implementation. Return 0 on success, -EBUSY on failure,
++ * and -ERESTART if interruptible and a signal is pending.
++ * @signaled: Driver callback that is called whenever a
++ * ttm_fence_object::signaled_types has changed status.
++ * This function is called from atomic context,
++ * with the ttm_fence_class_manager::lock held in write mode.
++ * @lockup: Driver callback that is called whenever a wait has exceeded
++ * the lifetime of a fence object.
++ * If there is a GPU lockup,
++ * this function should, if possible, reset the GPU,
++ * call the ttm_fence_handler with an error status, and
++ * return. If no lockup was detected, simply extend the
++ * fence timeout_jiffies and return. The driver might
++ * want to protect the lockup check with a mutex and cache a
++ * non-locked-up status for a while to avoid an excessive
++ * amount of lockup checks from every waiting thread.
++ */
++
++struct ttm_fence_driver {
++ bool (*has_irq) (struct ttm_fence_device *fdev,
++ uint32_t fence_class, uint32_t flags);
++ int (*emit) (struct ttm_fence_device *fdev,
++ uint32_t fence_class,
++ uint32_t flags,
++ uint32_t *breadcrumb, unsigned long *timeout_jiffies);
++ void (*flush) (struct ttm_fence_device *fdev, uint32_t fence_class);
++ void (*poll) (struct ttm_fence_device *fdev,
++ uint32_t fence_class, uint32_t types);
++ uint32_t(*needed_flush)
++ (struct ttm_fence_object *fence);
++ int (*wait) (struct ttm_fence_object *fence, bool lazy,
++ bool interruptible, uint32_t mask);
++ void (*signaled) (struct ttm_fence_object *fence);
++ void (*lockup) (struct ttm_fence_object *fence, uint32_t fence_types);
++};
++
++/**
++ * function ttm_fence_device_init
++ *
++ * @num_classes: Number of fence classes for this fence implementation.
++ * @mem_global: Pointer to the global memory accounting info.
++ * @fdev: Pointer to an uninitialised struct ttm_fence_device.
++ * @init: Array of initialization info for each fence class.
++ * @replicate_init: Use the first @init initialization info for all classes.
++ * @driver: Driver callbacks.
++ *
++ * Initialize a struct ttm_fence_driver structure. Returns -ENOMEM if
++ * out-of-memory. Otherwise returns 0.
++ */
++extern int
++ttm_fence_device_init(int num_classes,
++ struct ttm_mem_global *mem_glob,
++ struct ttm_fence_device *fdev,
++ const struct ttm_fence_class_init *init,
++ bool replicate_init,
++ const struct ttm_fence_driver *driver);
++
++/**
++ * function ttm_fence_device_release
++ *
++ * @fdev: Pointer to the fence device.
++ *
++ * Release all resources held by a fence device. Note that before
++ * this function is called, the caller must have made sure all fence
++ * objects belonging to this fence device are completely signaled.
++ */
++
++extern void ttm_fence_device_release(struct ttm_fence_device *fdev);
++
++/**
++ * ttm_fence_handler - the fence handler.
++ *
++ * @fdev: Pointer to the fence device.
++ * @fence_class: Fence class that signals.
++ * @sequence: Signaled sequence.
++ * @type: Types that signal.
++ * @error: Error from the engine.
++ *
++ * This function signals all fences with a sequence previous to the
++ * @sequence argument, and belonging to @fence_class. The signaled fence
++ * types are provided in @type. If error is non-zero, the error member
++ * of the fence with sequence = @sequence is set to @error. This value
++ * may be reported back to user-space, indicating, for example an illegal
++ * 3D command or illegal mpeg data.
++ *
++ * This function is typically called from the driver::poll method when the
++ * command sequence preceding the fence marker has executed. It should be
++ * called with the ttm_fence_class_manager::lock held in write mode and
++ * may be called from interrupt context.
++ */
++
++extern void
++ttm_fence_handler(struct ttm_fence_device *fdev,
++ uint32_t fence_class,
++ uint32_t sequence, uint32_t type, uint32_t error);
++
++/**
++ * ttm_fence_driver_from_dev
++ *
++ * @fdev: The ttm fence device.
++ *
++ * Returns a pointer to the fence driver struct.
++ */
++
++static inline const struct ttm_fence_driver *ttm_fence_driver_from_dev(
++ struct ttm_fence_device *fdev)
++{
++ return fdev->driver;
++}
++
++/**
++ * ttm_fence_driver
++ *
++ * @fence: Pointer to a ttm fence object.
++ *
++ * Returns a pointer to the fence driver struct.
++ */
++
++static inline const struct ttm_fence_driver *ttm_fence_driver(struct
++ ttm_fence_object
++ *fence)
++{
++ return ttm_fence_driver_from_dev(fence->fdev);
++}
++
++/**
++ * ttm_fence_fc
++ *
++ * @fence: Pointer to a ttm fence object.
++ *
++ * Returns a pointer to the struct ttm_fence_class_manager for the
++ * fence class of @fence.
++ */
++
++static inline struct ttm_fence_class_manager *ttm_fence_fc(struct
++ ttm_fence_object
++ *fence)
++{
++ return &fence->fdev->fence_class[fence->fence_class];
++}
++
++#endif
+diff --git a/drivers/gpu/drm/mrst/drv/ttm/ttm_fence_user.c b/drivers/gpu/drm/mrst/drv/ttm/ttm_fence_user.c
+new file mode 100644
+index 0000000..878c9bd
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/ttm/ttm_fence_user.c
+@@ -0,0 +1,238 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ */
++
++#include <drm/drmP.h>
++#include "ttm_fence_user.h"
++#include "ttm_object.h"
++#include "ttm_fence_driver.h"
++#include "ttm_userobj_api.h"
++
++/**
++ * struct ttm_fence_user_object
++ *
++ * @base: The base object used for user-space visibility and refcounting.
++ *
++ * @fence: The fence object itself.
++ *
++ */
++
++struct ttm_fence_user_object {
++ struct ttm_base_object base;
++ struct ttm_fence_object fence;
++};
++
++static struct ttm_fence_user_object *ttm_fence_user_object_lookup(
++ struct ttm_object_file *tfile,
++ uint32_t handle)
++{
++ struct ttm_base_object *base;
++
++ base = ttm_base_object_lookup(tfile, handle);
++ if (unlikely(base == NULL)) {
++ printk(KERN_ERR "Invalid fence handle 0x%08lx\n",
++ (unsigned long)handle);
++ return NULL;
++ }
++
++ if (unlikely(base->object_type != ttm_fence_type)) {
++ ttm_base_object_unref(&base);
++ printk(KERN_ERR "Invalid fence handle 0x%08lx\n",
++ (unsigned long)handle);
++ return NULL;
++ }
++
++ return container_of(base, struct ttm_fence_user_object, base);
++}
++
++/*
++ * The fence object destructor.
++ */
++
++static void ttm_fence_user_destroy(struct ttm_fence_object *fence)
++{
++ struct ttm_fence_user_object *ufence =
++ container_of(fence, struct ttm_fence_user_object, fence);
++
++ ttm_mem_global_free(fence->fdev->mem_glob, sizeof(*ufence), false);
++ kfree(ufence);
++}
++
++/*
++ * The base object destructor. We basically unly unreference the
++ * attached fence object.
++ */
++
++static void ttm_fence_user_release(struct ttm_base_object **p_base)
++{
++ struct ttm_fence_user_object *ufence;
++ struct ttm_base_object *base = *p_base;
++ struct ttm_fence_object *fence;
++
++ *p_base = NULL;
++
++ if (unlikely(base == NULL))
++ return;
++
++ ufence = container_of(base, struct ttm_fence_user_object, base);
++ fence = &ufence->fence;
++ ttm_fence_object_unref(&fence);
++}
++
++int
++ttm_fence_user_create(struct ttm_fence_device *fdev,
++ struct ttm_object_file *tfile,
++ uint32_t fence_class,
++ uint32_t fence_types,
++ uint32_t create_flags,
++ struct ttm_fence_object **fence,
++ uint32_t *user_handle)
++{
++ int ret;
++ struct ttm_fence_object *tmp;
++ struct ttm_fence_user_object *ufence;
++
++ ret = ttm_mem_global_alloc(fdev->mem_glob,
++ sizeof(*ufence),
++ false,
++ false,
++ false);
++ if (unlikely(ret != 0))
++ return -ENOMEM;
++
++ ufence = kmalloc(sizeof(*ufence), GFP_KERNEL);
++ if (unlikely(ufence == NULL)) {
++ ttm_mem_global_free(fdev->mem_glob, sizeof(*ufence), false);
++ return -ENOMEM;
++ }
++
++ ret = ttm_fence_object_init(fdev,
++ fence_class,
++ fence_types, create_flags,
++ &ttm_fence_user_destroy, &ufence->fence);
++
++ if (unlikely(ret != 0))
++ goto out_err0;
++
++ /*
++ * One fence ref is held by the fence ptr we return.
++ * The other one by the base object. Need to up the
++ * fence refcount before we publish this object to
++ * user-space.
++ */
++
++ tmp = ttm_fence_object_ref(&ufence->fence);
++ ret = ttm_base_object_init(tfile, &ufence->base,
++ false, ttm_fence_type,
++ &ttm_fence_user_release, NULL);
++
++ if (unlikely(ret != 0))
++ goto out_err1;
++
++ *fence = &ufence->fence;
++ *user_handle = ufence->base.hash.key;
++
++ return 0;
++out_err1:
++ ttm_fence_object_unref(&tmp);
++ tmp = &ufence->fence;
++ ttm_fence_object_unref(&tmp);
++ return ret;
++out_err0:
++ ttm_mem_global_free(fdev->mem_glob, sizeof(*ufence), false);
++ kfree(ufence);
++ return ret;
++}
++
++int ttm_fence_signaled_ioctl(struct ttm_object_file *tfile, void *data)
++{
++ int ret;
++ union ttm_fence_signaled_arg *arg = data;
++ struct ttm_fence_object *fence;
++ struct ttm_fence_info info;
++ struct ttm_fence_user_object *ufence;
++ struct ttm_base_object *base;
++ ret = 0;
++
++ ufence = ttm_fence_user_object_lookup(tfile, arg->req.handle);
++ if (unlikely(ufence == NULL))
++ return -EINVAL;
++
++ fence = &ufence->fence;
++
++ if (arg->req.flush) {
++ ret = ttm_fence_object_flush(fence, arg->req.fence_type);
++ if (unlikely(ret != 0))
++ goto out;
++ }
++
++ info = ttm_fence_get_info(fence);
++ arg->rep.signaled_types = info.signaled_types;
++ arg->rep.fence_error = info.error;
++
++out:
++ base = &ufence->base;
++ ttm_base_object_unref(&base);
++ return ret;
++}
++
++int ttm_fence_finish_ioctl(struct ttm_object_file *tfile, void *data)
++{
++ int ret;
++ union ttm_fence_finish_arg *arg = data;
++ struct ttm_fence_user_object *ufence;
++ struct ttm_base_object *base;
++ struct ttm_fence_object *fence;
++ ret = 0;
++
++ ufence = ttm_fence_user_object_lookup(tfile, arg->req.handle);
++ if (unlikely(ufence == NULL))
++ return -EINVAL;
++
++ fence = &ufence->fence;
++
++ ret = ttm_fence_object_wait(fence,
++ arg->req.mode & TTM_FENCE_FINISH_MODE_LAZY,
++ true, arg->req.fence_type);
++ if (likely(ret == 0)) {
++ struct ttm_fence_info info = ttm_fence_get_info(fence);
++
++ arg->rep.signaled_types = info.signaled_types;
++ arg->rep.fence_error = info.error;
++ }
++
++ base = &ufence->base;
++ ttm_base_object_unref(&base);
++
++ return ret;
++}
++
++int ttm_fence_unref_ioctl(struct ttm_object_file *tfile, void *data)
++{
++ struct ttm_fence_unref_arg *arg = data;
++ int ret = 0;
++
++ ret = ttm_ref_object_base_unref(tfile, arg->handle, ttm_fence_type);
++ return ret;
++}
+diff --git a/drivers/gpu/drm/mrst/drv/ttm/ttm_fence_user.h b/drivers/gpu/drm/mrst/drv/ttm/ttm_fence_user.h
+new file mode 100644
+index 0000000..ee95e6a
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/ttm/ttm_fence_user.h
+@@ -0,0 +1,140 @@
++/**************************************************************************
++ *
++ * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++/*
++ * Authors
++ * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
++ */
++
++#ifndef TTM_FENCE_USER_H
++#define TTM_FENCE_USER_H
++
++#if !defined(__KERNEL__) && !defined(_KERNEL)
++#include <stdint.h>
++#endif
++
++#define TTM_FENCE_MAJOR 0
++#define TTM_FENCE_MINOR 1
++#define TTM_FENCE_PL 0
++#define TTM_FENCE_DATE "080819"
++
++/**
++ * struct ttm_fence_signaled_req
++ *
++ * @handle: Handle to the fence object. Input.
++ *
++ * @fence_type: Fence types we want to flush. Input.
++ *
++ * @flush: Boolean. Flush the indicated fence_types. Input.
++ *
++ * Argument to the TTM_FENCE_SIGNALED ioctl.
++ */
++
++struct ttm_fence_signaled_req {
++ uint32_t handle;
++ uint32_t fence_type;
++ int32_t flush;
++ uint32_t pad64;
++};
++
++/**
++ * struct ttm_fence_rep
++ *
++ * @signaled_types: Fence type that has signaled.
++ *
++ * @fence_error: Command execution error.
++ * Hardware errors that are consequences of the execution
++ * of the command stream preceding the fence are reported
++ * here.
++ *
++ * Output argument to the TTM_FENCE_SIGNALED and
++ * TTM_FENCE_FINISH ioctls.
++ */
++
++struct ttm_fence_rep {
++ uint32_t signaled_types;
++ uint32_t fence_error;
++};
++
++union ttm_fence_signaled_arg {
++ struct ttm_fence_signaled_req req;
++ struct ttm_fence_rep rep;
++};
++
++/*
++ * Waiting mode flags for the TTM_FENCE_FINISH ioctl.
++ *
++ * TTM_FENCE_FINISH_MODE_LAZY: Allow for sleeps during polling
++ * wait.
++ *
++ * TTM_FENCE_FINISH_MODE_NO_BLOCK: Don't block waiting for GPU,
++ * but return -EBUSY if the buffer is busy.
++ */
++
++#define TTM_FENCE_FINISH_MODE_LAZY (1 << 0)
++#define TTM_FENCE_FINISH_MODE_NO_BLOCK (1 << 1)
++
++/**
++ * struct ttm_fence_finish_req
++ *
++ * @handle: Handle to the fence object. Input.
++ *
++ * @fence_type: Fence types we want to finish.
++ *
++ * @mode: Wait mode.
++ *
++ * Input to the TTM_FENCE_FINISH ioctl.
++ */
++
++struct ttm_fence_finish_req {
++ uint32_t handle;
++ uint32_t fence_type;
++ uint32_t mode;
++ uint32_t pad64;
++};
++
++union ttm_fence_finish_arg {
++ struct ttm_fence_finish_req req;
++ struct ttm_fence_rep rep;
++};
++
++/**
++ * struct ttm_fence_unref_arg
++ *
++ * @handle: Handle to the fence object.
++ *
++ * Argument to the TTM_FENCE_UNREF ioctl.
++ */
++
++struct ttm_fence_unref_arg {
++ uint32_t handle;
++ uint32_t pad64;
++};
++
++/*
++ * Ioctl offsets frome extenstion start.
++ */
++
++#define TTM_FENCE_SIGNALED 0x01
++#define TTM_FENCE_FINISH 0x02
++#define TTM_FENCE_UNREF 0x03
++
++#endif
+diff --git a/drivers/gpu/drm/mrst/drv/ttm/ttm_lock.c b/drivers/gpu/drm/mrst/drv/ttm/ttm_lock.c
+new file mode 100644
+index 0000000..be7464c
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/ttm/ttm_lock.c
+@@ -0,0 +1,155 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
++ */
++
++#include "ttm_lock.h"
++#include <asm/atomic.h>
++#include <linux/errno.h>
++#include <linux/wait.h>
++#include <linux/sched.h>
++
++void ttm_lock_init(struct ttm_lock *lock)
++{
++ init_waitqueue_head(&lock->queue);
++ atomic_set(&lock->write_lock_pending, 0);
++ atomic_set(&lock->readers, 0);
++ lock->kill_takers = false;
++ lock->signal = SIGKILL;
++}
++
++void ttm_read_unlock(struct ttm_lock *lock)
++{
++ if (atomic_dec_and_test(&lock->readers))
++ wake_up_all(&lock->queue);
++}
++
++int ttm_read_lock(struct ttm_lock *lock, bool interruptible)
++{
++ while (unlikely(atomic_read(&lock->write_lock_pending) != 0)) {
++ int ret;
++
++ if (!interruptible) {
++ wait_event(lock->queue,
++ atomic_read(&lock->write_lock_pending) == 0);
++ continue;
++ }
++ ret = wait_event_interruptible
++ (lock->queue, atomic_read(&lock->write_lock_pending) == 0);
++ if (ret)
++ return -ERESTART;
++ }
++
++ while (unlikely(!atomic_add_unless(&lock->readers, 1, -1))) {
++ int ret;
++ if (!interruptible) {
++ wait_event(lock->queue,
++ atomic_read(&lock->readers) != -1);
++ continue;
++ }
++ ret = wait_event_interruptible
++ (lock->queue, atomic_read(&lock->readers) != -1);
++ if (ret)
++ return -ERESTART;
++ }
++
++ if (unlikely(lock->kill_takers)) {
++ send_sig(lock->signal, current, 0);
++ ttm_read_unlock(lock);
++ return -ERESTART;
++ }
++
++ return 0;
++}
++
++static int __ttm_write_unlock(struct ttm_lock *lock)
++{
++ if (unlikely(atomic_cmpxchg(&lock->readers, -1, 0) != -1))
++ return -EINVAL;
++ wake_up_all(&lock->queue);
++ return 0;
++}
++
++static void ttm_write_lock_remove(struct ttm_base_object **p_base)
++{
++ struct ttm_base_object *base = *p_base;
++ struct ttm_lock *lock = container_of(base, struct ttm_lock, base);
++ int ret;
++
++ *p_base = NULL;
++ ret = __ttm_write_unlock(lock);
++ BUG_ON(ret != 0);
++}
++
++int ttm_write_lock(struct ttm_lock *lock,
++ bool interruptible,
++ struct ttm_object_file *tfile)
++{
++ int ret = 0;
++
++ atomic_inc(&lock->write_lock_pending);
++
++ while (unlikely(atomic_cmpxchg(&lock->readers, 0, -1) != 0)) {
++ if (!interruptible) {
++ wait_event(lock->queue,
++ atomic_read(&lock->readers) == 0);
++ continue;
++ }
++ ret = wait_event_interruptible
++ (lock->queue, atomic_read(&lock->readers) == 0);
++
++ if (ret) {
++ if (atomic_dec_and_test(&lock->write_lock_pending))
++ wake_up_all(&lock->queue);
++ return -ERESTART;
++ }
++ }
++
++ if (atomic_dec_and_test(&lock->write_lock_pending))
++ wake_up_all(&lock->queue);
++
++ if (unlikely(lock->kill_takers)) {
++ send_sig(lock->signal, current, 0);
++ __ttm_write_unlock(lock);
++ return -ERESTART;
++ }
++
++ /*
++ * Add a base-object, the destructor of which will
++ * make sure the lock is released if the client dies
++ * while holding it.
++ */
++
++ ret = ttm_base_object_init(tfile, &lock->base, false,
++ ttm_lock_type, &ttm_write_lock_remove, NULL);
++ if (ret)
++ (void)__ttm_write_unlock(lock);
++
++ return ret;
++}
++
++int ttm_write_unlock(struct ttm_lock *lock, struct ttm_object_file *tfile)
++{
++ return ttm_ref_object_base_unref(tfile,
++ lock->base.hash.key, TTM_REF_USAGE);
++}
+diff --git a/drivers/gpu/drm/mrst/drv/ttm/ttm_lock.h b/drivers/gpu/drm/mrst/drv/ttm/ttm_lock.h
+new file mode 100644
+index 0000000..500b2c1
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/ttm/ttm_lock.h
+@@ -0,0 +1,176 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ */
++
++/** @file ttm_lock.h
++ * This file implements a simple replacement for the buffer manager use
++ * of the DRM heavyweight hardware lock.
++ * The lock is a read-write lock. Taking it in read mode is fast, and
++ * intended for in-kernel use only.
++ * Taking it in write mode is slow.
++ *
++ * The write mode is used only when there is a need to block all
++ * user-space processes from validating buffers.
++ * It's allowed to leave kernel space with the write lock held.
++ * If a user-space process dies while having the write-lock,
++ * it will be released during the file descriptor release.
++ *
++ * The read lock is typically placed at the start of an IOCTL- or
++ * user-space callable function that may end up allocating a memory area.
++ * This includes setstatus, super-ioctls and faults; the latter may move
++ * unmappable regions to mappable. It's a bug to leave kernel space with the
++ * read lock held.
++ *
++ * Both read- and write lock taking is interruptible for low signal-delivery
++ * latency. The locking functions will return -ERESTART if interrupted by a
++ * signal.
++ *
++ * Locking order: The lock should be taken BEFORE any TTM mutexes
++ * or spinlocks.
++ *
++ * Typical usages:
++ * a) VT-switching, when we want to clean VRAM and perhaps AGP. The lock
++ * stops it from being repopulated.
++ * b) out-of-VRAM or out-of-aperture space, in which case the process
++ * receiving the out-of-space notification may take the lock in write mode
++ * and evict all buffers prior to start validating its own buffers.
++ */
++
++#ifndef _TTM_LOCK_H_
++#define _TTM_LOCK_H_
++
++#include "ttm_object.h"
++#include <linux/wait.h>
++#include <asm/atomic.h>
++
++/**
++ * struct ttm_lock
++ *
++ * @base: ttm base object used solely to release the lock if the client
++ * holding the lock dies.
++ * @queue: Queue for processes waiting for lock change-of-status.
++ * @write_lock_pending: Flag indicating that a write-lock is pending. Avoids
++ * write lock starvation.
++ * @readers: The lock status: A negative number indicates that a write lock is
++ * held. Positive values indicate number of concurrent readers.
++ */
++
++struct ttm_lock {
++ struct ttm_base_object base;
++ wait_queue_head_t queue;
++ atomic_t write_lock_pending;
++ atomic_t readers;
++ bool kill_takers;
++ int signal;
++};
++
++/**
++ * ttm_lock_init
++ *
++ * @lock: Pointer to a struct ttm_lock
++ * Initializes the lock.
++ */
++extern void ttm_lock_init(struct ttm_lock *lock);
++
++/**
++ * ttm_read_unlock
++ *
++ * @lock: Pointer to a struct ttm_lock
++ *
++ * Releases a read lock.
++ */
++
++extern void ttm_read_unlock(struct ttm_lock *lock);
++
++/**
++ * ttm_read_unlock
++ *
++ * @lock: Pointer to a struct ttm_lock
++ * @interruptible: Interruptible sleeping while waiting for a lock.
++ *
++ * Takes the lock in read mode.
++ * Returns:
++ * -ERESTART If interrupted by a signal and interruptible is true.
++ */
++
++extern int ttm_read_lock(struct ttm_lock *lock, bool interruptible);
++
++/**
++ * ttm_write_lock
++ *
++ * @lock: Pointer to a struct ttm_lock
++ * @interruptible: Interruptible sleeping while waiting for a lock.
++ * @tfile: Pointer to a struct ttm_object_file used to identify the user-space
++ * application taking the lock.
++ *
++ * Takes the lock in write mode.
++ * Returns:
++ * -ERESTART If interrupted by a signal and interruptible is true.
++ * -ENOMEM: Out of memory when locking.
++ */
++extern int ttm_write_lock(struct ttm_lock *lock, bool interruptible,
++ struct ttm_object_file *tfile);
++
++/**
++ * ttm_write_unlock
++ *
++ * @lock: Pointer to a struct ttm_lock
++ * @tfile: Pointer to a struct ttm_object_file used to identify the user-space
++ * application taking the lock.
++ *
++ * Releases a write lock.
++ * Returns:
++ * -EINVAL If the lock was not held.
++ */
++extern int ttm_write_unlock(struct ttm_lock *lock,
++ struct ttm_object_file *tfile);
++
++/**
++ * ttm_lock_set_kill
++ *
++ * @lock: Pointer to a struct ttm_lock
++ * @val: Boolean whether to kill processes taking the lock.
++ * @signal: Signal to send to the process taking the lock.
++ *
++ * The kill-when-taking-lock functionality is used to kill processes that keep
++ * on using the TTM functionality when its resources has been taken down, for
++ * example when the X server exits. A typical sequence would look like this:
++ * - X server takes lock in write mode.
++ * - ttm_lock_set_kill() is called with @val set to true.
++ * - As part of X server exit, TTM resources are taken down.
++ * - X server releases the lock on file release.
++ * - Another dri client wants to render, takes the lock and is killed.
++ *
++ */
++
++static inline void ttm_lock_set_kill(struct ttm_lock *lock,
++ bool val,
++ int signal)
++{
++ lock->kill_takers = val;
++ if (val)
++ lock->signal = signal;
++}
++
++#endif
+diff --git a/drivers/gpu/drm/mrst/drv/ttm/ttm_memory.c b/drivers/gpu/drm/mrst/drv/ttm/ttm_memory.c
+new file mode 100644
+index 0000000..363c1c3
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/ttm/ttm_memory.c
+@@ -0,0 +1,228 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++
++#include "ttm_memory.h"
++#include <linux/spinlock.h>
++#include <linux/sched.h>
++#include <linux/wait.h>
++#include <linux/mm.h>
++
++#define TTM_MEMORY_ALLOC_RETRIES 4
++
++/**
++ * At this point we only support a single shrink callback.
++ * Extend this if needed, perhaps using a linked list of callbacks.
++ * Note that this function is reentrant:
++ * many threads may try to swap out at any given time.
++ */
++
++static void ttm_shrink(struct ttm_mem_global *glob, bool from_workqueue,
++ uint64_t extra)
++{
++ int ret;
++ struct ttm_mem_shrink *shrink;
++ uint64_t target;
++ uint64_t total_target;
++
++ spin_lock(&glob->lock);
++ if (glob->shrink == NULL)
++ goto out;
++
++ if (from_workqueue) {
++ target = glob->swap_limit;
++ total_target = glob->total_memory_swap_limit;
++ } else if (capable(CAP_SYS_ADMIN)) {
++ total_target = glob->emer_total_memory;
++ target = glob->emer_memory;
++ } else {
++ total_target = glob->max_total_memory;
++ target = glob->max_memory;
++ }
++
++ total_target = (extra >= total_target) ? 0 : total_target - extra;
++ target = (extra >= target) ? 0 : target - extra;
++
++ while (glob->used_memory > target ||
++ glob->used_total_memory > total_target) {
++ shrink = glob->shrink;
++ spin_unlock(&glob->lock);
++ ret = shrink->do_shrink(shrink);
++ spin_lock(&glob->lock);
++ if (unlikely(ret != 0))
++ goto out;
++ }
++out:
++ spin_unlock(&glob->lock);
++}
++
++static void ttm_shrink_work(struct work_struct *work)
++{
++ struct ttm_mem_global *glob =
++ container_of(work, struct ttm_mem_global, work);
++
++ ttm_shrink(glob, true, 0ULL);
++}
++
++int ttm_mem_global_init(struct ttm_mem_global *glob)
++{
++ struct sysinfo si;
++ uint64_t mem;
++
++ spin_lock_init(&glob->lock);
++ glob->swap_queue = create_singlethread_workqueue("ttm_swap");
++ INIT_WORK(&glob->work, ttm_shrink_work);
++ init_waitqueue_head(&glob->queue);
++
++ si_meminfo(&si);
++
++ mem = si.totalram - si.totalhigh;
++ mem *= si.mem_unit;
++
++ glob->max_memory = mem >> 1;
++ glob->emer_memory = glob->max_memory + (mem >> 2);
++ glob->swap_limit = glob->max_memory - (mem >> 5);
++ glob->used_memory = 0;
++ glob->used_total_memory = 0;
++ glob->shrink = NULL;
++
++ mem = si.totalram;
++ mem *= si.mem_unit;
++
++ glob->max_total_memory = mem >> 1;
++ glob->emer_total_memory = glob->max_total_memory + (mem >> 2);
++ glob->total_memory_swap_limit = glob->max_total_memory - (mem >> 5);
++
++ printk(KERN_INFO "TTM available graphics memory: %llu MiB\n",
++ glob->max_total_memory >> 20);
++ printk(KERN_INFO "TTM available object memory: %llu MiB\n",
++ glob->max_memory >> 20);
++ printk(KERN_INFO "TTM available swap breakpoint: %llu MiB\n",
++ glob->swap_limit >> 20);
++
++ return 0;
++}
++
++void ttm_mem_global_release(struct ttm_mem_global *glob)
++{
++ printk(KERN_INFO "Used total memory is %llu bytes.\n",
++ (unsigned long long)glob->used_total_memory);
++ flush_workqueue(glob->swap_queue);
++ destroy_workqueue(glob->swap_queue);
++ glob->swap_queue = NULL;
++}
++
++static inline void ttm_check_swapping(struct ttm_mem_global *glob)
++{
++ bool needs_swapping;
++
++ spin_lock(&glob->lock);
++ needs_swapping = (glob->used_memory > glob->swap_limit ||
++ glob->used_total_memory >
++ glob->total_memory_swap_limit);
++ spin_unlock(&glob->lock);
++
++ if (unlikely(needs_swapping))
++ (void)queue_work(glob->swap_queue, &glob->work);
++
++}
++
++void ttm_mem_global_free(struct ttm_mem_global *glob,
++ uint64_t amount, bool himem)
++{
++ spin_lock(&glob->lock);
++ glob->used_total_memory -= amount;
++ if (!himem)
++ glob->used_memory -= amount;
++ wake_up_all(&glob->queue);
++ spin_unlock(&glob->lock);
++}
++
++static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
++ uint64_t amount, bool himem, bool reserve)
++{
++ uint64_t limit;
++ uint64_t lomem_limit;
++ int ret = -ENOMEM;
++
++ spin_lock(&glob->lock);
++
++ if (capable(CAP_SYS_ADMIN)) {
++ limit = glob->emer_total_memory;
++ lomem_limit = glob->emer_memory;
++ } else {
++ limit = glob->max_total_memory;
++ lomem_limit = glob->max_memory;
++ }
++
++ if (unlikely(glob->used_total_memory + amount > limit))
++ goto out_unlock;
++ if (unlikely(!himem && glob->used_memory + amount > lomem_limit))
++ goto out_unlock;
++
++ if (reserve) {
++ glob->used_total_memory += amount;
++ if (!himem)
++ glob->used_memory += amount;
++ }
++ ret = 0;
++out_unlock:
++ spin_unlock(&glob->lock);
++ ttm_check_swapping(glob);
++
++ return ret;
++}
++
++int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
++ bool no_wait, bool interruptible, bool himem)
++{
++ int count = TTM_MEMORY_ALLOC_RETRIES;
++
++ while (unlikely(ttm_mem_global_reserve(glob,
++ memory,
++ himem,
++ true) != 0)) {
++ if (no_wait)
++ return -ENOMEM;
++ if (unlikely(count-- == 0))
++ return -ENOMEM;
++ ttm_shrink(glob, false, memory + (memory >> 2) + 16);
++ }
++
++ return 0;
++}
++
++size_t ttm_round_pot(size_t size)
++{
++ if ((size & (size - 1)) == 0)
++ return size;
++ else if (size > PAGE_SIZE)
++ return PAGE_ALIGN(size);
++ else {
++ size_t tmp_size = 4;
++
++ while (tmp_size < size)
++ tmp_size <<= 1;
++
++ return tmp_size;
++ }
++ return 0;
++}
+diff --git a/drivers/gpu/drm/mrst/drv/ttm/ttm_memory.h b/drivers/gpu/drm/mrst/drv/ttm/ttm_memory.h
+new file mode 100644
+index 0000000..2ceeb32
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/ttm/ttm_memory.h
+@@ -0,0 +1,147 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++
++#ifndef TTM_MEMORY_H
++#define TTM_MEMORY_H
++
++#include <linux/workqueue.h>
++#include <linux/spinlock.h>
++#include <linux/wait.h>
++
++/**
++ * struct ttm_mem_shrink - callback to shrink TTM memory usage.
++ *
++ * @do_shrink: The callback function.
++ *
++ * Arguments to the do_shrink functions are intended to be passed using
++ * inheritance. That is, the argument class derives from struct ttm_mem_srink,
++ * and can be accessed using container_of().
++ */
++
++struct ttm_mem_shrink {
++ int (*do_shrink) (struct ttm_mem_shrink *);
++};
++
++/**
++ * struct ttm_mem_global - Global memory accounting structure.
++ *
++ * @shrink: A single callback to shrink TTM memory usage. Extend this
++ * to a linked list to be able to handle multiple callbacks when needed.
++ * @swap_queue: A workqueue to handle shrinking in low memory situations. We
++ * need a separate workqueue since it will spend a lot of time waiting
++ * for the GPU, and this will otherwise block other workqueue tasks(?)
++ * At this point we use only a single-threaded workqueue.
++ * @work: The workqueue callback for the shrink queue.
++ * @queue: Wait queue for processes suspended waiting for memory.
++ * @lock: Lock to protect the @shrink - and the memory accounting members,
++ * that is, essentially the whole structure with some exceptions.
++ * @emer_memory: Lowmem memory limit available for root.
++ * @max_memory: Lowmem memory limit available for non-root.
++ * @swap_limit: Lowmem memory limit where the shrink workqueue kicks in.
++ * @used_memory: Currently used lowmem memory.
++ * @used_total_memory: Currently used total (lowmem + highmem) memory.
++ * @total_memory_swap_limit: Total memory limit where the shrink workqueue
++ * kicks in.
++ * @max_total_memory: Total memory available to non-root processes.
++ * @emer_total_memory: Total memory available to root processes.
++ *
++ * Note that this structure is not per device. It should be global for all
++ * graphics devices.
++ */
++
++struct ttm_mem_global {
++ struct ttm_mem_shrink *shrink;
++ struct workqueue_struct *swap_queue;
++ struct work_struct work;
++ wait_queue_head_t queue;
++ spinlock_t lock;
++ uint64_t emer_memory;
++ uint64_t max_memory;
++ uint64_t swap_limit;
++ uint64_t used_memory;
++ uint64_t used_total_memory;
++ uint64_t total_memory_swap_limit;
++ uint64_t max_total_memory;
++ uint64_t emer_total_memory;
++};
++
++/**
++ * ttm_mem_init_shrink - initialize a struct ttm_mem_shrink object
++ *
++ * @shrink: The object to initialize.
++ * @func: The callback function.
++ */
++
++static inline void ttm_mem_init_shrink(struct ttm_mem_shrink *shrink,
++ int (*func) (struct ttm_mem_shrink *))
++{
++ shrink->do_shrink = func;
++}
++
++/**
++ * ttm_mem_register_shrink - register a struct ttm_mem_shrink object.
++ *
++ * @glob: The struct ttm_mem_global object to register with.
++ * @shrink: An initialized struct ttm_mem_shrink object to register.
++ *
++ * Returns:
++ * -EBUSY: There's already a callback registered. (May change).
++ */
++
++static inline int ttm_mem_register_shrink(struct ttm_mem_global *glob,
++ struct ttm_mem_shrink *shrink)
++{
++ spin_lock(&glob->lock);
++ if (glob->shrink != NULL) {
++ spin_unlock(&glob->lock);
++ return -EBUSY;
++ }
++ glob->shrink = shrink;
++ spin_unlock(&glob->lock);
++ return 0;
++}
++
++/**
++ * ttm_mem_unregister_shrink - unregister a struct ttm_mem_shrink object.
++ *
++ * @glob: The struct ttm_mem_global object to unregister from.
++ * @shrink: A previously registert struct ttm_mem_shrink object.
++ *
++ */
++
++static inline void ttm_mem_unregister_shrink(struct ttm_mem_global *glob,
++ struct ttm_mem_shrink *shrink)
++{
++ spin_lock(&glob->lock);
++ BUG_ON(glob->shrink != shrink);
++ glob->shrink = NULL;
++ spin_unlock(&glob->lock);
++}
++
++extern int ttm_mem_global_init(struct ttm_mem_global *glob);
++extern void ttm_mem_global_release(struct ttm_mem_global *glob);
++extern int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
++ bool no_wait, bool interruptible, bool himem);
++extern void ttm_mem_global_free(struct ttm_mem_global *glob,
++ uint64_t amount, bool himem);
++extern size_t ttm_round_pot(size_t size);
++#endif
+diff --git a/drivers/gpu/drm/mrst/drv/ttm/ttm_object.c b/drivers/gpu/drm/mrst/drv/ttm/ttm_object.c
+new file mode 100644
+index 0000000..53ee1c9
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/ttm/ttm_object.c
+@@ -0,0 +1,440 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ */
++/** @file ttm_ref_object.c
++ *
++ * Base- and reference object implementation for the various
++ * ttm objects. Implements reference counting, minimal security checks
++ * and release on file close.
++ */
++
++/**
++ * struct ttm_object_file
++ *
++ * @tdev: Pointer to the ttm_object_device.
++ *
++ * @lock: Lock that protects the ref_list list and the
++ * ref_hash hash tables.
++ *
++ * @ref_list: List of ttm_ref_objects to be destroyed at
++ * file release.
++ *
++ * @ref_hash: Hash tables of ref objects, one per ttm_ref_type,
++ * for fast lookup of ref objects given a base object.
++ */
++
++#include "ttm_object.h"
++#include <linux/list.h>
++#include <linux/spinlock.h>
++#include <linux/slab.h>
++#include <asm/atomic.h>
++
++struct ttm_object_file {
++ struct ttm_object_device *tdev;
++ rwlock_t lock;
++ struct list_head ref_list;
++ struct drm_open_hash ref_hash[TTM_REF_NUM];
++ struct kref refcount;
++};
++
++/**
++ * struct ttm_object_device
++ *
++ * @object_lock: lock that protects the object_hash hash table.
++ *
++ * @object_hash: hash table for fast lookup of object global names.
++ *
++ * @object_count: Per device object count.
++ *
++ * This is the per-device data structure needed for ttm object management.
++ */
++
++struct ttm_object_device {
++ rwlock_t object_lock;
++ struct drm_open_hash object_hash;
++ atomic_t object_count;
++ struct ttm_mem_global *mem_glob;
++};
++
++/**
++ * struct ttm_ref_object
++ *
++ * @hash: Hash entry for the per-file object reference hash.
++ *
++ * @head: List entry for the per-file list of ref-objects.
++ *
++ * @kref: Ref count.
++ *
++ * @obj: Base object this ref object is referencing.
++ *
++ * @ref_type: Type of ref object.
++ *
++ * This is similar to an idr object, but it also has a hash table entry
++ * that allows lookup with a pointer to the referenced object as a key. In
++ * that way, one can easily detect whether a base object is referenced by
++ * a particular ttm_object_file. It also carries a ref count to avoid creating
++ * multiple ref objects if a ttm_object_file references the same base object
++ * more than once.
++ */
++
++struct ttm_ref_object {
++ struct drm_hash_item hash;
++ struct list_head head;
++ struct kref kref;
++ struct ttm_base_object *obj;
++ enum ttm_ref_type ref_type;
++ struct ttm_object_file *tfile;
++};
++
++static inline struct ttm_object_file *
++ttm_object_file_ref(struct ttm_object_file *tfile)
++{
++ kref_get(&tfile->refcount);
++ return tfile;
++}
++
++static void ttm_object_file_destroy(struct kref *kref)
++{
++ struct ttm_object_file *tfile =
++ container_of(kref, struct ttm_object_file, refcount);
++
++ /* printk(KERN_INFO "Freeing 0x%08lx\n", (unsigned long) tfile); */
++ kfree(tfile);
++}
++
++
++static inline void ttm_object_file_unref(struct ttm_object_file **p_tfile)
++{
++ struct ttm_object_file *tfile = *p_tfile;
++
++ *p_tfile = NULL;
++ kref_put(&tfile->refcount, ttm_object_file_destroy);
++}
++
++
++int ttm_base_object_init(struct ttm_object_file *tfile,
++ struct ttm_base_object *base,
++ bool shareable,
++ enum ttm_object_type object_type,
++ void (*refcount_release) (struct ttm_base_object **),
++ void (*ref_obj_release) (struct ttm_base_object *,
++ enum ttm_ref_type ref_type))
++{
++ struct ttm_object_device *tdev = tfile->tdev;
++ int ret;
++
++ base->shareable = shareable;
++ base->tfile = ttm_object_file_ref(tfile);
++ base->refcount_release = refcount_release;
++ base->ref_obj_release = ref_obj_release;
++ base->object_type = object_type;
++ write_lock(&tdev->object_lock);
++ kref_init(&base->refcount);
++ ret = drm_ht_just_insert_please(&tdev->object_hash,
++ &base->hash,
++ (unsigned long)base, 31, 0, 0);
++ write_unlock(&tdev->object_lock);
++ if (unlikely(ret != 0))
++ goto out_err0;
++
++ ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
++ if (unlikely(ret != 0))
++ goto out_err1;
++
++ ttm_base_object_unref(&base);
++
++ return 0;
++out_err1:
++ (void)drm_ht_remove_item(&tdev->object_hash, &base->hash);
++out_err0:
++ return ret;
++}
++
++static void ttm_release_base(struct kref *kref)
++{
++ struct ttm_base_object *base =
++ container_of(kref, struct ttm_base_object, refcount);
++ struct ttm_object_device *tdev = base->tfile->tdev;
++
++ (void)drm_ht_remove_item(&tdev->object_hash, &base->hash);
++ write_unlock(&tdev->object_lock);
++ if (base->refcount_release) {
++ ttm_object_file_unref(&base->tfile);
++ base->refcount_release(&base);
++ }
++ write_lock(&tdev->object_lock);
++}
++
++void ttm_base_object_unref(struct ttm_base_object **p_base)
++{
++ struct ttm_base_object *base = *p_base;
++ struct ttm_object_device *tdev = base->tfile->tdev;
++
++ /* printk(KERN_INFO "TTM base object unref.\n"); */
++ *p_base = NULL;
++
++ /*
++ * Need to take the lock here to avoid racing with
++ * users trying to look up the object.
++ */
++
++ write_lock(&tdev->object_lock);
++ (void)kref_put(&base->refcount, &ttm_release_base);
++ write_unlock(&tdev->object_lock);
++}
++
++struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
++ uint32_t key)
++{
++ struct ttm_object_device *tdev = tfile->tdev;
++ struct ttm_base_object *base;
++ struct drm_hash_item *hash;
++ int ret;
++
++ read_lock(&tdev->object_lock);
++ ret = drm_ht_find_item(&tdev->object_hash, key, &hash);
++
++ if (likely(ret == 0)) {
++ base = drm_hash_entry(hash, struct ttm_base_object, hash);
++ kref_get(&base->refcount);
++ }
++ read_unlock(&tdev->object_lock);
++
++ if (unlikely(ret != 0))
++ return NULL;
++
++ if (tfile != base->tfile && !base->shareable) {
++ printk(KERN_ERR "Attempted access of non-shareable object.\n");
++ ttm_base_object_unref(&base);
++ return NULL;
++ }
++
++ return base;
++}
++
++int ttm_ref_object_add(struct ttm_object_file *tfile,
++ struct ttm_base_object *base,
++ enum ttm_ref_type ref_type, bool *existed)
++{
++ struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
++ struct ttm_ref_object *ref;
++ struct drm_hash_item *hash;
++ struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
++ int ret = -EINVAL;
++
++ if (existed != NULL)
++ *existed = true;
++
++ while (ret == -EINVAL) {
++ read_lock(&tfile->lock);
++ ret = drm_ht_find_item(ht, base->hash.key, &hash);
++
++ if (ret == 0) {
++ ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
++ kref_get(&ref->kref);
++ read_unlock(&tfile->lock);
++ break;
++ }
++
++ read_unlock(&tfile->lock);
++ ret = ttm_mem_global_alloc(mem_glob,
++ sizeof(*ref),
++ false,
++ false,
++ false);
++ if (unlikely(ret != 0))
++ return ret;
++ ref = kmalloc(sizeof(*ref), GFP_KERNEL);
++ if (unlikely(ref == NULL)) {
++ ttm_mem_global_free(mem_glob, sizeof(*ref), false);
++ return -ENOMEM;
++ }
++
++ ref->hash.key = base->hash.key;
++ ref->obj = base;
++ ref->tfile = tfile;
++ ref->ref_type = ref_type;
++ kref_init(&ref->kref);
++
++ write_lock(&tfile->lock);
++ ret = drm_ht_insert_item(ht, &ref->hash);
++
++ if (likely(ret == 0)) {
++ list_add_tail(&ref->head, &tfile->ref_list);
++ kref_get(&base->refcount);
++ write_unlock(&tfile->lock);
++ if (existed != NULL)
++ *existed = false;
++ break;
++ }
++
++ write_unlock(&tfile->lock);
++ BUG_ON(ret != -EINVAL);
++
++ ttm_mem_global_free(mem_glob, sizeof(*ref), false);
++ kfree(ref);
++ }
++
++ return ret;
++}
++
++static void ttm_ref_object_release(struct kref *kref)
++{
++ struct ttm_ref_object *ref =
++ container_of(kref, struct ttm_ref_object, kref);
++ struct ttm_base_object *base = ref->obj;
++ struct ttm_object_file *tfile = ref->tfile;
++ struct drm_open_hash *ht;
++ struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
++
++ ht = &tfile->ref_hash[ref->ref_type];
++ (void)drm_ht_remove_item(ht, &ref->hash);
++ list_del(&ref->head);
++ write_unlock(&tfile->lock);
++
++ if (ref->ref_type != TTM_REF_USAGE && base->ref_obj_release)
++ base->ref_obj_release(base, ref->ref_type);
++
++ ttm_base_object_unref(&ref->obj);
++ ttm_mem_global_free(mem_glob, sizeof(*ref), false);
++ kfree(ref);
++ write_lock(&tfile->lock);
++}
++
++int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
++ unsigned long key, enum ttm_ref_type ref_type)
++{
++ struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
++ struct ttm_ref_object *ref;
++ struct drm_hash_item *hash;
++ int ret;
++
++ write_lock(&tfile->lock);
++ ret = drm_ht_find_item(ht, key, &hash);
++ if (unlikely(ret != 0)) {
++ write_unlock(&tfile->lock);
++ return -EINVAL;
++ }
++ ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
++ kref_put(&ref->kref, ttm_ref_object_release);
++ write_unlock(&tfile->lock);
++ return 0;
++}
++
++void ttm_object_file_release(struct ttm_object_file **p_tfile)
++{
++ struct ttm_ref_object *ref;
++ struct list_head *list;
++ unsigned int i;
++ struct ttm_object_file *tfile = *p_tfile;
++
++ *p_tfile = NULL;
++ write_lock(&tfile->lock);
++
++ /*
++ * Since we release the lock within the loop, we have to
++ * restart it from the beginning each time.
++ */
++
++ while (!list_empty(&tfile->ref_list)) {
++ list = tfile->ref_list.next;
++ ref = list_entry(list, struct ttm_ref_object, head);
++ ttm_ref_object_release(&ref->kref);
++ }
++
++ for (i = 0; i < TTM_REF_NUM; ++i)
++ drm_ht_remove(&tfile->ref_hash[i]);
++
++ write_unlock(&tfile->lock);
++ ttm_object_file_unref(&tfile);
++}
++
++struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev,
++ unsigned int hash_order)
++{
++ struct ttm_object_file *tfile = kmalloc(sizeof(*tfile), GFP_KERNEL);
++ unsigned int i;
++ unsigned int j = 0;
++ int ret;
++
++ if (unlikely(tfile == NULL))
++ return NULL;
++
++ rwlock_init(&tfile->lock);
++ tfile->tdev = tdev;
++ kref_init(&tfile->refcount);
++ INIT_LIST_HEAD(&tfile->ref_list);
++
++ for (i = 0; i < TTM_REF_NUM; ++i) {
++ ret = drm_ht_create(&tfile->ref_hash[i], hash_order);
++ if (ret) {
++ j = i;
++ goto out_err;
++ }
++ }
++
++ return tfile;
++out_err:
++ for (i = 0; i < j; ++i)
++ drm_ht_remove(&tfile->ref_hash[i]);
++
++ kfree(tfile);
++
++ return NULL;
++}
++
++struct ttm_object_device *ttm_object_device_init(struct ttm_mem_global
++ *mem_glob,
++ unsigned int hash_order)
++{
++ struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL);
++ int ret;
++
++ if (unlikely(tdev == NULL))
++ return NULL;
++
++ tdev->mem_glob = mem_glob;
++ rwlock_init(&tdev->object_lock);
++ atomic_set(&tdev->object_count, 0);
++ ret = drm_ht_create(&tdev->object_hash, hash_order);
++
++ if (likely(ret == 0))
++ return tdev;
++
++ kfree(tdev);
++ return NULL;
++}
++
++void ttm_object_device_release(struct ttm_object_device **p_tdev)
++{
++ struct ttm_object_device *tdev = *p_tdev;
++
++ *p_tdev = NULL;
++
++ write_lock(&tdev->object_lock);
++ drm_ht_remove(&tdev->object_hash);
++ write_unlock(&tdev->object_lock);
++
++ kfree(tdev);
++}
+diff --git a/drivers/gpu/drm/mrst/drv/ttm/ttm_object.h b/drivers/gpu/drm/mrst/drv/ttm/ttm_object.h
+new file mode 100644
+index 0000000..b04c714
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/ttm/ttm_object.h
+@@ -0,0 +1,262 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ */
++/** @file ttm_ref_object.h
++ *
++ * Base- and reference object implementation for the various
++ * ttm objects. Implements reference counting, minimal security checks
++ * and release on file close.
++ */
++
++#ifndef _TTM_OBJECT_H_
++#define _TTM_OBJECT_H_
++
++#include <linux/list.h>
++#include <drm/drm_hashtab.h>
++#include <linux/kref.h>
++#include "ttm_memory.h"
++
++/**
++ * enum ttm_ref_type
++ *
++ * Describes what type of reference a ref object holds.
++ *
++ * TTM_REF_USAGE is a simple refcount on a base object.
++ *
++ * TTM_REF_SYNCCPU_READ is a SYNCCPU_READ reference on a
++ * buffer object.
++ *
++ * TTM_REF_SYNCCPU_WRITE is a SYNCCPU_WRITE reference on a
++ * buffer object.
++ *
++ */
++
++enum ttm_ref_type {
++ TTM_REF_USAGE,
++ TTM_REF_SYNCCPU_READ,
++ TTM_REF_SYNCCPU_WRITE,
++ TTM_REF_NUM
++};
++
++/**
++ * enum ttm_object_type
++ *
++ * One entry per ttm object type.
++ * Device-specific types should use the
++ * ttm_driver_typex types.
++ */
++
++enum ttm_object_type {
++ ttm_fence_type,
++ ttm_buffer_type,
++ ttm_lock_type,
++ ttm_driver_type0 = 256,
++ ttm_driver_type1
++};
++
++struct ttm_object_file;
++struct ttm_object_device;
++
++/**
++ * struct ttm_base_object
++ *
++ * @hash: hash entry for the per-device object hash.
++ * @type: derived type this object is base class for.
++ * @shareable: Other ttm_object_files can access this object.
++ *
++ * @tfile: Pointer to ttm_object_file of the creator.
++ * NULL if the object was not created by a user request.
++ * (kernel object).
++ *
++ * @refcount: Number of references to this object, not
++ * including the hash entry. A reference to a base object can
++ * only be held by a ref object.
++ *
++ * @refcount_release: A function to be called when there are
++ * no more references to this object. This function should
++ * destroy the object (or make sure destruction eventually happens),
++ * and when it is called, the object has
++ * already been taken out of the per-device hash. The parameter
++ * "base" should be set to NULL by the function.
++ *
++ * @ref_obj_release: A function to be called when a reference object
++ * with another ttm_ref_type than TTM_REF_USAGE is deleted.
++ * this function may, for example, release a lock held by a user-space
++ * process.
++ *
++ * This struct is intended to be used as a base struct for objects that
++ * are visible to user-space. It provides a global name, race-safe
++ * access and refcounting, minimal access contol and hooks for unref actions.
++ */
++
++struct ttm_base_object {
++ struct drm_hash_item hash;
++ enum ttm_object_type object_type;
++ bool shareable;
++ struct ttm_object_file *tfile;
++ struct kref refcount;
++ void (*refcount_release) (struct ttm_base_object **base);
++ void (*ref_obj_release) (struct ttm_base_object *base,
++ enum ttm_ref_type ref_type);
++};
++
++/**
++ * ttm_base_object_init
++ *
++ * @tfile: Pointer to a struct ttm_object_file.
++ * @base: The struct ttm_base_object to initialize.
++ * @shareable: This object is shareable with other applcations.
++ * (different @tfile pointers.)
++ * @type: The object type.
++ * @refcount_release: See the struct ttm_base_object description.
++ * @ref_obj_release: See the struct ttm_base_object description.
++ *
++ * Initializes a struct ttm_base_object.
++ */
++
++extern int ttm_base_object_init(struct ttm_object_file *tfile,
++ struct ttm_base_object *base,
++ bool shareable,
++ enum ttm_object_type type,
++ void (*refcount_release) (struct ttm_base_object
++ **),
++ void (*ref_obj_release) (struct ttm_base_object
++ *,
++ enum ttm_ref_type
++ ref_type));
++
++/**
++ * ttm_base_object_lookup
++ *
++ * @tfile: Pointer to a struct ttm_object_file.
++ * @key: Hash key
++ *
++ * Looks up a struct ttm_base_object with the key @key.
++ * Also verifies that the object is visible to the application, by
++ * comparing the @tfile argument and checking the object shareable flag.
++ */
++
++extern struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file
++ *tfile, uint32_t key);
++
++/**
++ * ttm_base_object_unref
++ *
++ * @p_base: Pointer to a pointer referncing a struct ttm_base_object.
++ *
++ * Decrements the base object refcount and clears the pointer pointed to by
++ * p_base.
++ */
++
++extern void ttm_base_object_unref(struct ttm_base_object **p_base);
++
++/**
++ * ttm_ref_object_add.
++ *
++ * @tfile: A struct ttm_object_file representing the application owning the
++ * ref_object.
++ * @base: The base object to reference.
++ * @ref_type: The type of reference.
++ * @existed: Upon completion, indicates that an identical reference object
++ * already existed, and the refcount was upped on that object instead.
++ *
++ * Adding a ref object to a base object is basically like referencing the
++ * base object, but a user-space application holds the reference. When the
++ * file corresponding to @tfile is closed, all its reference objects are
++ * deleted. A reference object can have different types depending on what
++ * it's intended for. It can be refcounting to prevent object destruction,
++ * When user-space takes a lock, it can add a ref object to that lock to
++ * make sure the lock is released if the application dies. A ref object
++ * will hold a single reference on a base object.
++ */
++extern int ttm_ref_object_add(struct ttm_object_file *tfile,
++ struct ttm_base_object *base,
++ enum ttm_ref_type ref_type, bool *existed);
++/**
++ * ttm_ref_object_base_unref
++ *
++ * @key: Key representing the base object.
++ * @ref_type: Ref type of the ref object to be dereferenced.
++ *
++ * Unreference a ref object with type @ref_type
++ * on the base object identified by @key. If there are no duplicate
++ * references, the ref object will be destroyed and the base object
++ * will be unreferenced.
++ */
++extern int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
++ unsigned long key,
++ enum ttm_ref_type ref_type);
++
++/**
++ * ttm_object_file_init - initialize a struct ttm_object file
++ *
++ * @tdev: A struct ttm_object device this file is initialized on.
++ * @hash_order: Order of the hash table used to hold the reference objects.
++ *
++ * This is typically called by the file_ops::open function.
++ */
++
++extern struct ttm_object_file *ttm_object_file_init(struct ttm_object_device
++ *tdev,
++ unsigned int hash_order);
++
++/**
++ * ttm_object_file_release - release data held by a ttm_object_file
++ *
++ * @p_tfile: Pointer to pointer to the ttm_object_file object to release.
++ * *p_tfile will be set to NULL by this function.
++ *
++ * Releases all data associated by a ttm_object_file.
++ * Typically called from file_ops::release. The caller must
++ * ensure that there are no concurrent users of tfile.
++ */
++
++extern void ttm_object_file_release(struct ttm_object_file **p_tfile);
++
++/**
++ * ttm_object device init - initialize a struct ttm_object_device
++ *
++ * @hash_order: Order of hash table used to hash the base objects.
++ *
++ * This function is typically called on device initialization to prepare
++ * data structures needed for ttm base and ref objects.
++ */
++
++extern struct ttm_object_device *ttm_object_device_init
++ (struct ttm_mem_global *mem_glob, unsigned int hash_order);
++
++/**
++ * ttm_object_device_release - release data held by a ttm_object_device
++ *
++ * @p_tdev: Pointer to pointer to the ttm_object_device object to release.
++ * *p_tdev will be set to NULL by this function.
++ *
++ * Releases all data associated by a ttm_object_device.
++ * Typically called from driver::unload before the destruction of the
++ * device private data structure.
++ */
++
++extern void ttm_object_device_release(struct ttm_object_device **p_tdev);
++
++#endif
+diff --git a/drivers/gpu/drm/mrst/drv/ttm/ttm_pat_compat.c b/drivers/gpu/drm/mrst/drv/ttm/ttm_pat_compat.c
+new file mode 100644
+index 0000000..83f34c6
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/ttm/ttm_pat_compat.c
+@@ -0,0 +1,164 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ */
++
++#include "ttm_pat_compat.h"
++#include <linux/version.h>
++#include <asm/page.h>
++#include <linux/spinlock.h>
++#include <asm/pgtable.h>
++
++#if (defined(CONFIG_X86) && !defined(CONFIG_X86_PAT))
++#include <asm/tlbflush.h>
++#include <asm/msr.h>
++#include <asm/system.h>
++#include <linux/notifier.h>
++#include <linux/cpu.h>
++
++#ifndef MSR_IA32_CR_PAT
++#define MSR_IA32_CR_PAT 0x0277
++#endif
++
++#ifndef _PAGE_PAT
++#define _PAGE_PAT 0x080
++#endif
++
++static int ttm_has_pat;
++
++/*
++ * Used at resume-time when CPU-s are fired up.
++ */
++
++static void ttm_pat_ipi_handler(void *notused)
++{
++ u32 v1, v2;
++
++ rdmsr(MSR_IA32_CR_PAT, v1, v2);
++ v2 &= 0xFFFFFFF8;
++ v2 |= 0x00000001;
++ wbinvd();
++ wrmsr(MSR_IA32_CR_PAT, v1, v2);
++ wbinvd();
++ __flush_tlb_all();
++}
++
++static void ttm_pat_enable(void)
++{
++ if (on_each_cpu(ttm_pat_ipi_handler, NULL, 1) != 0)
++ printk(KERN_ERR "Timed out setting up CPU PAT.\n");
++}
++
++void ttm_pat_resume(void)
++{
++ if (unlikely(!ttm_has_pat))
++ return;
++
++ ttm_pat_enable();
++}
++
++static int psb_cpu_callback(struct notifier_block *nfb,
++ unsigned long action, void *hcpu)
++{
++ if (action == CPU_ONLINE)
++ ttm_pat_resume();
++
++ return 0;
++}
++
++static struct notifier_block psb_nb = {
++ .notifier_call = psb_cpu_callback,
++ .priority = 1
++};
++
++/*
++ * Set i386 PAT entry PAT4 to Write-combining memory type on all processors.
++ */
++
++void ttm_pat_init(void)
++{
++ if (likely(ttm_has_pat))
++ return;
++
++ if (!boot_cpu_has(X86_FEATURE_PAT))
++ return;
++
++ ttm_pat_enable();
++
++ if (num_present_cpus() > 1)
++ register_cpu_notifier(&psb_nb);
++
++ ttm_has_pat = 1;
++}
++
++void ttm_pat_takedown(void)
++{
++ if (unlikely(!ttm_has_pat))
++ return;
++
++ if (num_present_cpus() > 1)
++ unregister_cpu_notifier(&psb_nb);
++
++ ttm_has_pat = 0;
++}
++
++pgprot_t pgprot_ttm_x86_wc(pgprot_t prot)
++{
++ if (likely(ttm_has_pat)) {
++ pgprot_val(prot) |= _PAGE_PAT;
++ return prot;
++ } else {
++ return pgprot_noncached(prot);
++ }
++}
++
++#else
++
++void ttm_pat_init(void)
++{
++}
++
++void ttm_pat_takedown(void)
++{
++}
++
++void ttm_pat_resume(void)
++{
++}
++
++#ifdef CONFIG_X86
++#include <asm/pat.h>
++
++pgprot_t pgprot_ttm_x86_wc(pgprot_t prot)
++{
++ uint32_t cache_bits = ((1) ? _PAGE_CACHE_WC : _PAGE_CACHE_UC_MINUS);
++
++ return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_MASK) | cache_bits);
++}
++#else
++pgprot_t pgprot_ttm_x86_wc(pgprot_t prot)
++{
++ BUG();
++}
++#endif
++#endif
+diff --git a/drivers/gpu/drm/mrst/drv/ttm/ttm_pat_compat.h b/drivers/gpu/drm/mrst/drv/ttm/ttm_pat_compat.h
+new file mode 100644
+index 0000000..4702f1c
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/ttm/ttm_pat_compat.h
+@@ -0,0 +1,34 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ */
++
++#ifndef _TTM_PAT_COMPAT_
++#define _TTM_PAT_COMPAT_
++#include <asm/page.h>
++#include <asm/pgtable_types.h>
++extern void ttm_pat_init(void);
++extern void ttm_pat_takedown(void);
++extern void ttm_pat_resume(void);
++extern pgprot_t pgprot_ttm_x86_wc(pgprot_t prot);
++#endif
+diff --git a/drivers/gpu/drm/mrst/drv/ttm/ttm_placement_common.h b/drivers/gpu/drm/mrst/drv/ttm/ttm_placement_common.h
+new file mode 100644
+index 0000000..067ce27
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/ttm/ttm_placement_common.h
+@@ -0,0 +1,91 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ */
++
++#ifndef _TTM_PL_COMMON_H_
++#define _TTM_PL_COMMON_H_
++/*
++ * Memory regions for data placement.
++ */
++
++#define TTM_PL_SYSTEM 0
++#define TTM_PL_TT 1
++#define TTM_PL_VRAM 2
++#define TTM_PL_PRIV0 3
++#define TTM_PL_PRIV1 4
++#define TTM_PL_PRIV2 5
++#define TTM_PL_PRIV3 6
++#define TTM_PL_PRIV4 7
++#define TTM_PL_PRIV5 8
++#define TTM_PL_CI 9
++#define TTM_PL_RAR 10
++#define TTM_PL_SWAPPED 15
++
++#define TTM_PL_FLAG_SYSTEM (1 << TTM_PL_SYSTEM)
++#define TTM_PL_FLAG_TT (1 << TTM_PL_TT)
++#define TTM_PL_FLAG_VRAM (1 << TTM_PL_VRAM)
++#define TTM_PL_FLAG_PRIV0 (1 << TTM_PL_PRIV0)
++#define TTM_PL_FLAG_PRIV1 (1 << TTM_PL_PRIV1)
++#define TTM_PL_FLAG_PRIV2 (1 << TTM_PL_PRIV2)
++#define TTM_PL_FLAG_PRIV3 (1 << TTM_PL_PRIV3)
++#define TTM_PL_FLAG_PRIV4 (1 << TTM_PL_PRIV4)
++#define TTM_PL_FLAG_PRIV5 (1 << TTM_PL_PRIV5)
++#define TTM_PL_FLAG_CI (1 << TTM_PL_CI)
++#define TTM_PL_FLAG_RAR (1 << TTM_PL_RAR)
++#define TTM_PL_FLAG_SWAPPED (1 << TTM_PL_SWAPPED)
++#define TTM_PL_MASK_MEM 0x0000FFFF
++
++/*
++ * Other flags that affects data placement.
++ * TTM_PL_FLAG_CACHED indicates cache-coherent mappings
++ * if available.
++ * TTM_PL_FLAG_SHARED means that another application may
++ * reference the buffer.
++ * TTM_PL_FLAG_NO_EVICT means that the buffer may never
++ * be evicted to make room for other buffers.
++ */
++
++#define TTM_PL_FLAG_CACHED (1 << 16)
++#define TTM_PL_FLAG_UNCACHED (1 << 17)
++#define TTM_PL_FLAG_WC (1 << 18)
++#define TTM_PL_FLAG_SHARED (1 << 20)
++#define TTM_PL_FLAG_NO_EVICT (1 << 21)
++
++#define TTM_PL_MASK_CACHING (TTM_PL_FLAG_CACHED | \
++ TTM_PL_FLAG_UNCACHED | \
++ TTM_PL_FLAG_WC)
++
++#define TTM_PL_MASK_MEMTYPE (TTM_PL_MASK_MEM | TTM_PL_MASK_CACHING)
++
++/*
++ * Access flags to be used for CPU- and GPU- mappings.
++ * The idea is that the TTM synchronization mechanism will
++ * allow concurrent READ access and exclusive write access.
++ * Currently GPU- and CPU accesses are exclusive.
++ */
++
++#define TTM_ACCESS_READ (1 << 0)
++#define TTM_ACCESS_WRITE (1 << 1)
++
++#endif
+diff --git a/drivers/gpu/drm/mrst/drv/ttm/ttm_placement_user.c b/drivers/gpu/drm/mrst/drv/ttm/ttm_placement_user.c
+new file mode 100644
+index 0000000..e4d6964
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/ttm/ttm_placement_user.c
+@@ -0,0 +1,468 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ */
++
++#include "ttm_placement_user.h"
++#include "ttm_bo_driver.h"
++#include "ttm_object.h"
++#include "ttm_userobj_api.h"
++#include "ttm_lock.h"
++
++struct ttm_bo_user_object {
++ struct ttm_base_object base;
++ struct ttm_buffer_object bo;
++};
++
++static size_t pl_bo_size;
++
++static size_t ttm_pl_size(struct ttm_bo_device *bdev, unsigned long num_pages)
++{
++ size_t page_array_size =
++ (num_pages * sizeof(void *) + PAGE_SIZE - 1) & PAGE_MASK;
++
++ if (unlikely(pl_bo_size == 0)) {
++ pl_bo_size = bdev->ttm_bo_extra_size +
++ ttm_round_pot(sizeof(struct ttm_bo_user_object));
++ }
++
++ return bdev->ttm_bo_size + 2 * page_array_size;
++}
++
++static struct ttm_bo_user_object *ttm_bo_user_lookup(struct ttm_object_file
++ *tfile, uint32_t handle)
++{
++ struct ttm_base_object *base;
++
++ base = ttm_base_object_lookup(tfile, handle);
++ if (unlikely(base == NULL)) {
++ printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
++ (unsigned long)handle);
++ return NULL;
++ }
++
++ if (unlikely(base->object_type != ttm_buffer_type)) {
++ ttm_base_object_unref(&base);
++ printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
++ (unsigned long)handle);
++ return NULL;
++ }
++
++ return container_of(base, struct ttm_bo_user_object, base);
++}
++
++struct ttm_buffer_object *ttm_buffer_object_lookup(struct ttm_object_file
++ *tfile, uint32_t handle)
++{
++ struct ttm_bo_user_object *user_bo;
++ struct ttm_base_object *base;
++
++ user_bo = ttm_bo_user_lookup(tfile, handle);
++ if (unlikely(user_bo == NULL))
++ return NULL;
++
++ (void)ttm_bo_reference(&user_bo->bo);
++ base = &user_bo->base;
++ ttm_base_object_unref(&base);
++ return &user_bo->bo;
++}
++
++static void ttm_bo_user_destroy(struct ttm_buffer_object *bo)
++{
++ struct ttm_bo_user_object *user_bo =
++ container_of(bo, struct ttm_bo_user_object, bo);
++
++ ttm_mem_global_free(bo->bdev->mem_glob, bo->acc_size, false);
++ kfree(user_bo);
++}
++
++static void ttm_bo_user_release(struct ttm_base_object **p_base)
++{
++ struct ttm_bo_user_object *user_bo;
++ struct ttm_base_object *base = *p_base;
++ struct ttm_buffer_object *bo;
++
++ *p_base = NULL;
++
++ if (unlikely(base == NULL))
++ return;
++
++ user_bo = container_of(base, struct ttm_bo_user_object, base);
++ bo = &user_bo->bo;
++ ttm_bo_unref(&bo);
++}
++
++static void ttm_bo_user_ref_release(struct ttm_base_object *base,
++ enum ttm_ref_type ref_type)
++{
++ struct ttm_bo_user_object *user_bo =
++ container_of(base, struct ttm_bo_user_object, base);
++ struct ttm_buffer_object *bo = &user_bo->bo;
++
++ switch (ref_type) {
++ case TTM_REF_SYNCCPU_WRITE:
++ ttm_bo_synccpu_write_release(bo);
++ break;
++ default:
++ BUG();
++ }
++}
++
++static void ttm_pl_fill_rep(struct ttm_buffer_object *bo,
++ struct ttm_pl_rep *rep)
++{
++ struct ttm_bo_user_object *user_bo =
++ container_of(bo, struct ttm_bo_user_object, bo);
++
++ rep->gpu_offset = bo->offset;
++ rep->bo_size = bo->num_pages << PAGE_SHIFT;
++ rep->map_handle = bo->addr_space_offset;
++ rep->placement = bo->mem.flags;
++ rep->handle = user_bo->base.hash.key;
++ rep->sync_object_arg = (uint32_t) (unsigned long)bo->sync_obj_arg;
++}
++
++int ttm_pl_create_ioctl(struct ttm_object_file *tfile,
++ struct ttm_bo_device *bdev,
++ struct ttm_lock *lock, void *data)
++{
++ union ttm_pl_create_arg *arg = data;
++ struct ttm_pl_create_req *req = &arg->req;
++ struct ttm_pl_rep *rep = &arg->rep;
++ struct ttm_buffer_object *bo;
++ struct ttm_buffer_object *tmp;
++ struct ttm_bo_user_object *user_bo;
++ uint32_t flags;
++ int ret = 0;
++ struct ttm_mem_global *mem_glob = bdev->mem_glob;
++ size_t acc_size =
++ ttm_pl_size(bdev, (req->size + PAGE_SIZE - 1) >> PAGE_SHIFT);
++ ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false, false);
++ if (unlikely(ret != 0))
++ return ret;
++
++ flags = req->placement;
++ user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
++ if (unlikely(user_bo == NULL)) {
++ ttm_mem_global_free(mem_glob, acc_size, false);
++ return -ENOMEM;
++ }
++
++ bo = &user_bo->bo;
++ ret = ttm_read_lock(lock, true);
++ if (unlikely(ret != 0)) {
++ ttm_mem_global_free(mem_glob, acc_size, false);
++ kfree(user_bo);
++ return ret;
++ }
++
++ ret = ttm_buffer_object_init(bdev, bo, req->size,
++ ttm_bo_type_device, flags,
++ req->page_alignment, 0, true,
++ NULL, acc_size, &ttm_bo_user_destroy);
++ ttm_read_unlock(lock);
++
++ /*
++ * Note that the ttm_buffer_object_init function
++ * would've called the destroy function on failure!!
++ */
++
++ if (unlikely(ret != 0))
++ goto out;
++
++ tmp = ttm_bo_reference(bo);
++ ret = ttm_base_object_init(tfile, &user_bo->base,
++ flags & TTM_PL_FLAG_SHARED,
++ ttm_buffer_type,
++ &ttm_bo_user_release,
++ &ttm_bo_user_ref_release);
++ if (unlikely(ret != 0))
++ goto out_err;
++
++ mutex_lock(&bo->mutex);
++ ttm_pl_fill_rep(bo, rep);
++ mutex_unlock(&bo->mutex);
++ ttm_bo_unref(&bo);
++out:
++ return 0;
++out_err:
++ ttm_bo_unref(&tmp);
++ ttm_bo_unref(&bo);
++ return ret;
++}
++
++int ttm_pl_ub_create_ioctl(struct ttm_object_file *tfile,
++ struct ttm_bo_device *bdev,
++ struct ttm_lock *lock, void *data)
++{
++ union ttm_pl_create_ub_arg *arg = data;
++ struct ttm_pl_create_ub_req *req = &arg->req;
++ struct ttm_pl_rep *rep = &arg->rep;
++ struct ttm_buffer_object *bo;
++ struct ttm_buffer_object *tmp;
++ struct ttm_bo_user_object *user_bo;
++ uint32_t flags;
++ int ret = 0;
++ struct ttm_mem_global *mem_glob = bdev->mem_glob;
++ size_t acc_size =
++ ttm_pl_size(bdev, (req->size + PAGE_SIZE - 1) >> PAGE_SHIFT);
++ ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false, false);
++ if (unlikely(ret != 0))
++ return ret;
++
++ flags = req->placement;
++ user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
++ if (unlikely(user_bo == NULL)) {
++ ttm_mem_global_free(mem_glob, acc_size, false);
++ return -ENOMEM;
++ }
++ ret = ttm_read_lock(lock, true);
++ if (unlikely(ret != 0)) {
++ ttm_mem_global_free(mem_glob, acc_size, false);
++ kfree(user_bo);
++ return ret;
++ }
++ bo = &user_bo->bo;
++ ret = ttm_buffer_object_init(bdev,
++ bo,
++ req->size,
++ ttm_bo_type_user,
++ flags,
++ req->page_alignment,
++ req->user_address,
++ true,
++ NULL,
++ acc_size,
++ &ttm_bo_user_destroy);
++
++ /*
++ * Note that the ttm_buffer_object_init function
++ * would've called the destroy function on failure!!
++ */
++ ttm_read_unlock(lock);
++ if (unlikely(ret != 0))
++ goto out;
++
++ tmp = ttm_bo_reference(bo);
++ ret = ttm_base_object_init(tfile, &user_bo->base,
++ flags & TTM_PL_FLAG_SHARED,
++ ttm_buffer_type,
++ &ttm_bo_user_release,
++ &ttm_bo_user_ref_release);
++ if (unlikely(ret != 0))
++ goto out_err;
++
++ mutex_lock(&bo->mutex);
++ ttm_pl_fill_rep(bo, rep);
++ mutex_unlock(&bo->mutex);
++ ttm_bo_unref(&bo);
++out:
++ return 0;
++out_err:
++ ttm_bo_unref(&tmp);
++ ttm_bo_unref(&bo);
++ return ret;
++}
++
++int ttm_pl_reference_ioctl(struct ttm_object_file *tfile, void *data)
++{
++ union ttm_pl_reference_arg *arg = data;
++ struct ttm_pl_rep *rep = &arg->rep;
++ struct ttm_bo_user_object *user_bo;
++ struct ttm_buffer_object *bo;
++ struct ttm_base_object *base;
++ int ret;
++
++ user_bo = ttm_bo_user_lookup(tfile, arg->req.handle);
++ if (unlikely(user_bo == NULL)) {
++ printk(KERN_ERR "Could not reference buffer object.\n");
++ return -EINVAL;
++ }
++
++ bo = &user_bo->bo;
++ ret = ttm_ref_object_add(tfile, &user_bo->base, TTM_REF_USAGE, NULL);
++ if (unlikely(ret != 0)) {
++ printk(KERN_ERR
++ "Could not add a reference to buffer object.\n");
++ goto out;
++ }
++
++ mutex_lock(&bo->mutex);
++ ttm_pl_fill_rep(bo, rep);
++ mutex_unlock(&bo->mutex);
++
++out:
++ base = &user_bo->base;
++ ttm_base_object_unref(&base);
++ return ret;
++}
++
++int ttm_pl_unref_ioctl(struct ttm_object_file *tfile, void *data)
++{
++ struct ttm_pl_reference_req *arg = data;
++
++ return ttm_ref_object_base_unref(tfile, arg->handle, TTM_REF_USAGE);
++}
++
++int ttm_pl_synccpu_ioctl(struct ttm_object_file *tfile, void *data)
++{
++ struct ttm_pl_synccpu_arg *arg = data;
++ struct ttm_bo_user_object *user_bo;
++ struct ttm_buffer_object *bo;
++ struct ttm_base_object *base;
++ bool existed;
++ int ret;
++
++ switch (arg->op) {
++ case TTM_PL_SYNCCPU_OP_GRAB:
++ user_bo = ttm_bo_user_lookup(tfile, arg->handle);
++ if (unlikely(user_bo == NULL)) {
++ printk(KERN_ERR
++ "Could not find buffer object for synccpu.\n");
++ return -EINVAL;
++ }
++ bo = &user_bo->bo;
++ base = &user_bo->base;
++ ret = ttm_bo_synccpu_write_grab(bo,
++ arg->access_mode &
++ TTM_PL_SYNCCPU_MODE_NO_BLOCK);
++ if (unlikely(ret != 0)) {
++ ttm_base_object_unref(&base);
++ goto out;
++ }
++ ret = ttm_ref_object_add(tfile, &user_bo->base,
++ TTM_REF_SYNCCPU_WRITE, &existed);
++ if (existed || ret != 0)
++ ttm_bo_synccpu_write_release(bo);
++ ttm_base_object_unref(&base);
++ break;
++ case TTM_PL_SYNCCPU_OP_RELEASE:
++ ret = ttm_ref_object_base_unref(tfile, arg->handle,
++ TTM_REF_SYNCCPU_WRITE);
++ break;
++ default:
++ ret = -EINVAL;
++ break;
++ }
++out:
++ return ret;
++}
++
++int ttm_pl_setstatus_ioctl(struct ttm_object_file *tfile,
++ struct ttm_lock *lock, void *data)
++{
++ union ttm_pl_setstatus_arg *arg = data;
++ struct ttm_pl_setstatus_req *req = &arg->req;
++ struct ttm_pl_rep *rep = &arg->rep;
++ struct ttm_buffer_object *bo;
++ struct ttm_bo_device *bdev;
++ int ret;
++
++ bo = ttm_buffer_object_lookup(tfile, req->handle);
++ if (unlikely(bo == NULL)) {
++ printk(KERN_ERR
++ "Could not find buffer object for setstatus.\n");
++ return -EINVAL;
++ }
++
++ bdev = bo->bdev;
++
++ ret = ttm_read_lock(lock, true);
++ if (unlikely(ret != 0))
++ goto out_err0;
++
++ ret = ttm_bo_reserve(bo, true, false, false, 0);
++ if (unlikely(ret != 0))
++ goto out_err1;
++
++ ret = ttm_bo_wait_cpu(bo, false);
++ if (unlikely(ret != 0))
++ goto out_err2;
++
++ mutex_lock(&bo->mutex);
++ ret = ttm_bo_check_placement(bo, req->set_placement,
++ req->clr_placement);
++ if (unlikely(ret != 0))
++ goto out_err2;
++
++ bo->proposed_flags = (bo->proposed_flags | req->set_placement)
++ & ~req->clr_placement;
++ ret = ttm_buffer_object_validate(bo, true, false);
++ if (unlikely(ret != 0))
++ goto out_err2;
++
++ ttm_pl_fill_rep(bo, rep);
++out_err2:
++ mutex_unlock(&bo->mutex);
++ ttm_bo_unreserve(bo);
++out_err1:
++ ttm_read_unlock(lock);
++out_err0:
++ ttm_bo_unref(&bo);
++ return ret;
++}
++
++int ttm_pl_waitidle_ioctl(struct ttm_object_file *tfile, void *data)
++{
++ struct ttm_pl_waitidle_arg *arg = data;
++ struct ttm_buffer_object *bo;
++ int ret;
++
++ bo = ttm_buffer_object_lookup(tfile, arg->handle);
++ if (unlikely(bo == NULL)) {
++ printk(KERN_ERR "Could not find buffer object for waitidle.\n");
++ return -EINVAL;
++ }
++
++ ret =
++ ttm_bo_block_reservation(bo, true,
++ arg->mode & TTM_PL_WAITIDLE_MODE_NO_BLOCK);
++ if (unlikely(ret != 0))
++ goto out;
++ mutex_lock(&bo->mutex);
++ ret = ttm_bo_wait(bo,
++ arg->mode & TTM_PL_WAITIDLE_MODE_LAZY,
++ true, arg->mode & TTM_PL_WAITIDLE_MODE_NO_BLOCK);
++ mutex_unlock(&bo->mutex);
++ ttm_bo_unblock_reservation(bo);
++out:
++ ttm_bo_unref(&bo);
++ return ret;
++}
++
++int ttm_pl_verify_access(struct ttm_buffer_object *bo,
++ struct ttm_object_file *tfile)
++{
++ struct ttm_bo_user_object *ubo;
++
++ /*
++ * Check bo subclass.
++ */
++
++ if (unlikely(bo->destroy != &ttm_bo_user_destroy))
++ return -EPERM;
++
++ ubo = container_of(bo, struct ttm_bo_user_object, bo);
++ if (likely(ubo->base.shareable || ubo->base.tfile == tfile))
++ return 0;
++
++ return -EPERM;
++}
+diff --git a/drivers/gpu/drm/mrst/drv/ttm/ttm_placement_user.h b/drivers/gpu/drm/mrst/drv/ttm/ttm_placement_user.h
+new file mode 100644
+index 0000000..5d8100f
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/ttm/ttm_placement_user.h
+@@ -0,0 +1,252 @@
++/**************************************************************************
++ *
++ * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++/*
++ * Authors
++ * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
++ */
++
++#ifndef _TTM_PLACEMENT_USER_H_
++#define _TTM_PLACEMENT_USER_H_
++
++#if !defined(__KERNEL__) && !defined(_KERNEL)
++#include <stdint.h>
++#else
++#include <linux/kernel.h>
++#endif
++
++#include "ttm_placement_common.h"
++
++#define TTM_PLACEMENT_MAJOR 0
++#define TTM_PLACEMENT_MINOR 1
++#define TTM_PLACEMENT_PL 0
++#define TTM_PLACEMENT_DATE "080819"
++
++/**
++ * struct ttm_pl_create_req
++ *
++ * @size: The buffer object size.
++ * @placement: Flags that indicate initial acceptable
++ * placement.
++ * @page_alignment: Required alignment in pages.
++ *
++ * Input to the TTM_BO_CREATE ioctl.
++ */
++
++struct ttm_pl_create_req {
++ uint64_t size;
++ uint32_t placement;
++ uint32_t page_alignment;
++};
++
++/**
++ * struct ttm_pl_create_ub_req
++ *
++ * @size: The buffer object size.
++ * @user_address: User-space address of the memory area that
++ * should be used to back the buffer object cast to 64-bit.
++ * @placement: Flags that indicate initial acceptable
++ * placement.
++ * @page_alignment: Required alignment in pages.
++ *
++ * Input to the TTM_BO_CREATE_UB ioctl.
++ */
++
++struct ttm_pl_create_ub_req {
++ uint64_t size;
++ uint64_t user_address;
++ uint32_t placement;
++ uint32_t page_alignment;
++};
++
++/**
++ * struct ttm_pl_rep
++ *
++ * @gpu_offset: The current offset into the memory region used.
++ * This can be used directly by the GPU if there are no
++ * additional GPU mapping procedures used by the driver.
++ *
++ * @bo_size: Actual buffer object size.
++ *
++ * @map_handle: Offset into the device address space.
++ * Used for map, seek, read, write. This will never change
++ * during the lifetime of an object.
++ *
++ * @placement: Flag indicating the placement status of
++ * the buffer object using the TTM_PL flags above.
++ *
++ * @sync_object_arg: Used for user-space synchronization and
++ * depends on the synchronization model used. If fences are
++ * used, this is the buffer_object::fence_type_mask
++ *
++ * Output from the TTM_PL_CREATE and TTM_PL_REFERENCE, and
++ * TTM_PL_SETSTATUS ioctls.
++ */
++
++struct ttm_pl_rep {
++ uint64_t gpu_offset;
++ uint64_t bo_size;
++ uint64_t map_handle;
++ uint32_t placement;
++ uint32_t handle;
++ uint32_t sync_object_arg;
++ uint32_t pad64;
++};
++
++/**
++ * struct ttm_pl_setstatus_req
++ *
++ * @set_placement: Placement flags to set.
++ *
++ * @clr_placement: Placement flags to clear.
++ *
++ * @handle: The object handle
++ *
++ * Input to the TTM_PL_SETSTATUS ioctl.
++ */
++
++struct ttm_pl_setstatus_req {
++ uint32_t set_placement;
++ uint32_t clr_placement;
++ uint32_t handle;
++ uint32_t pad64;
++};
++
++/**
++ * struct ttm_pl_reference_req
++ *
++ * @handle: The object to put a reference on.
++ *
++ * Input to the TTM_PL_REFERENCE and the TTM_PL_UNREFERENCE ioctls.
++ */
++
++struct ttm_pl_reference_req {
++ uint32_t handle;
++ uint32_t pad64;
++};
++
++/*
++ * ACCESS mode flags for SYNCCPU.
++ *
++ * TTM_SYNCCPU_MODE_READ will guarantee that the GPU is not
++ * writing to the buffer.
++ *
++ * TTM_SYNCCPU_MODE_WRITE will guarantee that the GPU is not
++ * accessing the buffer.
++ *
++ * TTM_SYNCCPU_MODE_NO_BLOCK makes sure the call does not wait
++ * for GPU accesses to finish but return -EBUSY.
++ *
++ * TTM_SYNCCPU_MODE_TRYCACHED Try to place the buffer in cacheable
++ * memory while synchronized for CPU.
++ */
++
++#define TTM_PL_SYNCCPU_MODE_READ TTM_ACCESS_READ
++#define TTM_PL_SYNCCPU_MODE_WRITE TTM_ACCESS_WRITE
++#define TTM_PL_SYNCCPU_MODE_NO_BLOCK (1 << 2)
++#define TTM_PL_SYNCCPU_MODE_TRYCACHED (1 << 3)
++
++/**
++ * struct ttm_pl_synccpu_arg
++ *
++ * @handle: The object to synchronize.
++ *
++ * @access_mode: access mode indicated by the
++ * TTM_SYNCCPU_MODE flags.
++ *
++ * @op: indicates whether to grab or release the
++ * buffer for cpu usage.
++ *
++ * Input to the TTM_PL_SYNCCPU ioctl.
++ */
++
++struct ttm_pl_synccpu_arg {
++ uint32_t handle;
++ uint32_t access_mode;
++ enum {
++ TTM_PL_SYNCCPU_OP_GRAB,
++ TTM_PL_SYNCCPU_OP_RELEASE
++ } op;
++ uint32_t pad64;
++};
++
++/*
++ * Waiting mode flags for the TTM_BO_WAITIDLE ioctl.
++ *
++ * TTM_WAITIDLE_MODE_LAZY: Allow for sleeps during polling
++ * wait.
++ *
++ * TTM_WAITIDLE_MODE_NO_BLOCK: Don't block waiting for GPU,
++ * but return -EBUSY if the buffer is busy.
++ */
++
++#define TTM_PL_WAITIDLE_MODE_LAZY (1 << 0)
++#define TTM_PL_WAITIDLE_MODE_NO_BLOCK (1 << 1)
++
++/**
++ * struct ttm_waitidle_arg
++ *
++ * @handle: The object to synchronize.
++ *
++ * @mode: wait mode indicated by the
++ * TTM_SYNCCPU_MODE flags.
++ *
++ * Argument to the TTM_BO_WAITIDLE ioctl.
++ */
++
++struct ttm_pl_waitidle_arg {
++ uint32_t handle;
++ uint32_t mode;
++};
++
++union ttm_pl_create_arg {
++ struct ttm_pl_create_req req;
++ struct ttm_pl_rep rep;
++};
++
++union ttm_pl_reference_arg {
++ struct ttm_pl_reference_req req;
++ struct ttm_pl_rep rep;
++};
++
++union ttm_pl_setstatus_arg {
++ struct ttm_pl_setstatus_req req;
++ struct ttm_pl_rep rep;
++};
++
++union ttm_pl_create_ub_arg {
++ struct ttm_pl_create_ub_req req;
++ struct ttm_pl_rep rep;
++};
++
++/*
++ * Ioctl offsets.
++ */
++
++#define TTM_PL_CREATE 0x00
++#define TTM_PL_REFERENCE 0x01
++#define TTM_PL_UNREF 0x02
++#define TTM_PL_SYNCCPU 0x03
++#define TTM_PL_WAITIDLE 0x04
++#define TTM_PL_SETSTATUS 0x05
++#define TTM_PL_CREATE_UB 0x06
++
++#endif
+diff --git a/drivers/gpu/drm/mrst/drv/ttm/ttm_regman.h b/drivers/gpu/drm/mrst/drv/ttm/ttm_regman.h
+new file mode 100644
+index 0000000..ed73652
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/ttm/ttm_regman.h
+@@ -0,0 +1,67 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ */
++
++#ifndef _TTM_REGMAN_H_
++#define _TTM_REGMAN_H_
++
++#include <linux/list.h>
++
++struct ttm_fence_object;
++
++struct ttm_reg {
++ struct list_head head;
++ struct ttm_fence_object *fence;
++ uint32_t fence_type;
++ uint32_t new_fence_type;
++};
++
++struct ttm_reg_manager {
++ struct list_head free;
++ struct list_head lru;
++ struct list_head unfenced;
++
++ int (*reg_reusable)(const struct ttm_reg *reg, const void *data);
++ void (*reg_destroy)(struct ttm_reg *reg);
++};
++
++extern int ttm_regs_alloc(struct ttm_reg_manager *manager,
++ const void *data,
++ uint32_t fence_class,
++ uint32_t fence_type,
++ int interruptible,
++ int no_wait,
++ struct ttm_reg **reg);
++
++extern void ttm_regs_fence(struct ttm_reg_manager *regs,
++ struct ttm_fence_object *fence);
++
++extern void ttm_regs_free(struct ttm_reg_manager *manager);
++extern void ttm_regs_add(struct ttm_reg_manager *manager, struct ttm_reg *reg);
++extern void ttm_regs_init(struct ttm_reg_manager *manager,
++ int (*reg_reusable)(const struct ttm_reg *,
++ const void *),
++ void (*reg_destroy)(struct ttm_reg *));
++
++#endif
+diff --git a/drivers/gpu/drm/mrst/drv/ttm/ttm_tt.c b/drivers/gpu/drm/mrst/drv/ttm/ttm_tt.c
+new file mode 100644
+index 0000000..4c0e318
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/ttm/ttm_tt.c
+@@ -0,0 +1,653 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
++ */
++
++#include <linux/version.h>
++#include <linux/vmalloc.h>
++#include <linux/sched.h>
++#include <linux/highmem.h>
++#include <linux/pagemap.h>
++#include <linux/file.h>
++#include <linux/swap.h>
++#include "ttm_bo_driver.h"
++#include "ttm_placement_common.h"
++
++static int ttm_tt_swapin(struct ttm_tt *ttm);
++
++#if defined(CONFIG_X86)
++static void ttm_tt_clflush_page(struct page *page)
++{
++ uint8_t *page_virtual;
++ unsigned int i;
++
++ if (unlikely(page == NULL))
++ return;
++
++ page_virtual = kmap_atomic(page, KM_USER0);
++
++ for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
++ clflush(page_virtual + i);
++
++ kunmap_atomic(page_virtual, KM_USER0);
++}
++
++static void ttm_tt_cache_flush_clflush(struct page *pages[],
++ unsigned long num_pages)
++{
++ unsigned long i;
++
++ mb();
++ for (i = 0; i < num_pages; ++i)
++ ttm_tt_clflush_page(*pages++);
++ mb();
++}
++#else
++static void ttm_tt_ipi_handler(void *null)
++{
++ ;
++}
++#endif
++
++void ttm_tt_cache_flush(struct page *pages[], unsigned long num_pages)
++{
++
++#if defined(CONFIG_X86)
++ if (cpu_has_clflush) {
++ ttm_tt_cache_flush_clflush(pages, num_pages);
++ return;
++ }
++#else
++ if (on_each_cpu(ttm_tt_ipi_handler, NULL, 1, 1) != 0)
++ printk(KERN_ERR "Timed out waiting for drm cache flush.\n");
++#endif
++}
++
++/**
++ * Allocates storage for pointers to the pages that back the ttm.
++ *
++ * Uses kmalloc if possible. Otherwise falls back to vmalloc.
++ */
++static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
++{
++ unsigned long size = ttm->num_pages * sizeof(*ttm->pages);
++ ttm->pages = NULL;
++
++ if (size <= PAGE_SIZE)
++ ttm->pages = kzalloc(size, GFP_KERNEL);
++
++ if (!ttm->pages) {
++ ttm->pages = vmalloc_user(size);
++ if (ttm->pages)
++ ttm->page_flags |= TTM_PAGE_FLAG_VMALLOC;
++ }
++}
++
++static void ttm_tt_free_page_directory(struct ttm_tt *ttm)
++{
++ if (ttm->page_flags & TTM_PAGE_FLAG_VMALLOC) {
++ vfree(ttm->pages);
++ ttm->page_flags &= ~TTM_PAGE_FLAG_VMALLOC;
++ } else {
++ kfree(ttm->pages);
++ }
++ ttm->pages = NULL;
++}
++
++static struct page *ttm_tt_alloc_page(void)
++{
++ return alloc_page(GFP_KERNEL | __GFP_ZERO);
++}
++
++static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
++{
++ int write;
++ int dirty;
++ struct page *page;
++ int i;
++ struct ttm_backend *be = ttm->be;
++
++ BUG_ON(!(ttm->page_flags & TTM_PAGE_FLAG_USER));
++ write = ((ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0);
++ dirty = ((ttm->page_flags & TTM_PAGE_FLAG_USER_DIRTY) != 0);
++
++ if (be)
++ be->func->clear(be);
++
++ for (i = 0; i < ttm->num_pages; ++i) {
++ page = ttm->pages[i];
++ if (page == NULL)
++ continue;
++
++ if (page == ttm->dummy_read_page) {
++ BUG_ON(write);
++ continue;
++ }
++
++ if (write && dirty && !PageReserved(page))
++ set_page_dirty_lock(page);
++
++ ttm->pages[i] = NULL;
++ ttm_mem_global_free(ttm->bdev->mem_glob, PAGE_SIZE, false);
++ put_page(page);
++ }
++ ttm->state = tt_unpopulated;
++ ttm->first_himem_page = ttm->num_pages;
++ ttm->last_lomem_page = -1;
++}
++
++static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index)
++{
++ struct page *p;
++ struct ttm_bo_device *bdev = ttm->bdev;
++ struct ttm_mem_global *mem_glob = bdev->mem_glob;
++ int ret;
++
++ while (NULL == (p = ttm->pages[index])) {
++ p = ttm_tt_alloc_page();
++
++ if (!p)
++ return NULL;
++
++ if (PageHighMem(p)) {
++ ret = ttm_mem_global_alloc(mem_glob,
++ PAGE_SIZE,
++ false,
++ false,
++ true);
++ if (unlikely(ret != 0))
++ goto out_err;
++ ttm->pages[--ttm->first_himem_page] = p;
++ } else {
++ ret =
++ ttm_mem_global_alloc(mem_glob,
++ PAGE_SIZE,
++ false,
++ false,
++ false);
++ if (unlikely(ret != 0))
++ goto out_err;
++ ttm->pages[++ttm->last_lomem_page] = p;
++ }
++ }
++ return p;
++out_err:
++ put_page(p);
++ return NULL;
++}
++
++struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index)
++{
++ int ret;
++
++ if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
++ ret = ttm_tt_swapin(ttm);
++ if (unlikely(ret != 0))
++ return NULL;
++ }
++ return __ttm_tt_get_page(ttm, index);
++}
++
++int ttm_tt_populate(struct ttm_tt *ttm)
++{
++ struct page *page;
++ unsigned long i;
++ struct ttm_backend *be;
++ int ret;
++
++ if (ttm->state != tt_unpopulated)
++ return 0;
++
++ if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
++ ret = ttm_tt_swapin(ttm);
++ if (unlikely(ret != 0))
++ return ret;
++ }
++
++ be = ttm->be;
++
++ for (i = 0; i < ttm->num_pages; ++i) {
++ page = __ttm_tt_get_page(ttm, i);
++ if (!page)
++ return -ENOMEM;
++ }
++
++ be->func->populate(be, ttm->num_pages, ttm->pages,
++ ttm->dummy_read_page);
++ ttm->state = tt_unbound;
++ return 0;
++}
++
++#ifdef CONFIG_X86
++static inline int ttm_tt_set_page_caching(struct page *p,
++ enum ttm_caching_state c_state)
++{
++ if (PageHighMem(p))
++ return 0;
++
++ switch (c_state) {
++ case tt_cached:
++ return set_pages_wb(p, 1);
++ case tt_wc:
++ return set_memory_wc((unsigned long) page_address(p), 1);
++ default:
++ return set_pages_uc(p, 1);
++ }
++}
++#else /* CONFIG_X86 */
++static inline int ttm_tt_set_page_caching(struct page *p,
++ enum ttm_caching_state c_state)
++{
++ return 0;
++}
++#endif /* CONFIG_X86 */
++
++/*
++ * Change caching policy for the linear kernel map
++ * for range of pages in a ttm.
++ */
++
++static int ttm_tt_set_caching(struct ttm_tt *ttm,
++ enum ttm_caching_state c_state)
++{
++ int i, j;
++ struct page *cur_page;
++ int ret;
++
++ if (ttm->caching_state == c_state)
++ return 0;
++
++ if (c_state != tt_cached) {
++ ret = ttm_tt_populate(ttm);
++ if (unlikely(ret != 0))
++ return ret;
++ }
++
++ if (ttm->caching_state == tt_cached)
++ ttm_tt_cache_flush(ttm->pages, ttm->num_pages);
++
++ for (i = 0; i < ttm->num_pages; ++i) {
++ cur_page = ttm->pages[i];
++ if (likely(cur_page != NULL)) {
++ ret = ttm_tt_set_page_caching(cur_page, c_state);
++ if (unlikely(ret != 0))
++ goto out_err;
++ }
++ }
++
++ ttm->caching_state = c_state;
++
++ return 0;
++
++out_err:
++ for (j = 0; j < i; ++j) {
++ cur_page = ttm->pages[j];
++ if (likely(cur_page != NULL)) {
++ (void)ttm_tt_set_page_caching(cur_page,
++ ttm->caching_state);
++ }
++ }
++
++ return ret;
++}
++
++int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
++{
++ enum ttm_caching_state state;
++
++ if (placement & TTM_PL_FLAG_WC)
++ state = tt_wc;
++ else if (placement & TTM_PL_FLAG_UNCACHED)
++ state = tt_uncached;
++ else
++ state = tt_cached;
++
++ return ttm_tt_set_caching(ttm, state);
++}
++
++static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
++{
++ int i;
++ struct page *cur_page;
++ struct ttm_backend *be = ttm->be;
++
++ if (be)
++ be->func->clear(be);
++ (void)ttm_tt_set_caching(ttm, tt_cached);
++ for (i = 0; i < ttm->num_pages; ++i) {
++ cur_page = ttm->pages[i];
++ ttm->pages[i] = NULL;
++ if (cur_page) {
++ if (page_count(cur_page) != 1)
++ printk(KERN_ERR
++ "Erroneous page count. Leaking pages.\n");
++ ttm_mem_global_free(ttm->bdev->mem_glob, PAGE_SIZE,
++ PageHighMem(cur_page));
++ __free_page(cur_page);
++ }
++ }
++ ttm->state = tt_unpopulated;
++ ttm->first_himem_page = ttm->num_pages;
++ ttm->last_lomem_page = -1;
++}
++
++void ttm_tt_destroy(struct ttm_tt *ttm)
++{
++ struct ttm_backend *be;
++
++ if (unlikely(ttm == NULL))
++ return;
++
++ be = ttm->be;
++ if (likely(be != NULL)) {
++ be->func->destroy(be);
++ ttm->be = NULL;
++ }
++
++ if (likely(ttm->pages != NULL)) {
++ if (ttm->page_flags & TTM_PAGE_FLAG_USER)
++ ttm_tt_free_user_pages(ttm);
++ else
++ ttm_tt_free_alloced_pages(ttm);
++
++ ttm_tt_free_page_directory(ttm);
++ }
++
++ if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTANT_SWAP) &&
++ ttm->swap_storage)
++ fput(ttm->swap_storage);
++
++ kfree(ttm);
++}
++
++int ttm_tt_set_user(struct ttm_tt *ttm,
++ struct task_struct *tsk,
++ unsigned long start, unsigned long num_pages)
++{
++ struct mm_struct *mm = tsk->mm;
++ int ret;
++ int write = (ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0;
++ struct ttm_mem_global *mem_glob = ttm->bdev->mem_glob;
++
++ BUG_ON(num_pages != ttm->num_pages);
++ BUG_ON((ttm->page_flags & TTM_PAGE_FLAG_USER) == 0);
++
++ /**
++ * Account user pages as lowmem pages for now.
++ */
++
++ ret = ttm_mem_global_alloc(mem_glob,
++ num_pages * PAGE_SIZE,
++ false,
++ false,
++ false);
++ if (unlikely(ret != 0))
++ return ret;
++
++ down_read(&mm->mmap_sem);
++ ret = get_user_pages(tsk, mm, start, num_pages,
++ write, 0, ttm->pages, NULL);
++ up_read(&mm->mmap_sem);
++
++ if (ret != num_pages && write) {
++ ttm_tt_free_user_pages(ttm);
++ ttm_mem_global_free(mem_glob, num_pages * PAGE_SIZE, false);
++ return -ENOMEM;
++ }
++
++ ttm->tsk = tsk;
++ ttm->start = start;
++ ttm->state = tt_unbound;
++
++ return 0;
++}
++
++struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
++ uint32_t page_flags, struct page *dummy_read_page)
++{
++ struct ttm_bo_driver *bo_driver = bdev->driver;
++ struct ttm_tt *ttm;
++
++ if (!bo_driver)
++ return NULL;
++
++ ttm = kzalloc(sizeof(*ttm), GFP_KERNEL);
++ if (!ttm)
++ return NULL;
++
++ ttm->bdev = bdev;
++
++ ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
++ ttm->first_himem_page = ttm->num_pages;
++ ttm->last_lomem_page = -1;
++ ttm->caching_state = tt_cached;
++ ttm->page_flags = page_flags;
++
++ ttm->dummy_read_page = dummy_read_page;
++
++ ttm_tt_alloc_page_directory(ttm);
++ if (!ttm->pages) {
++ ttm_tt_destroy(ttm);
++ printk(KERN_ERR "Failed allocating page table\n");
++ return NULL;
++ }
++ ttm->be = bo_driver->create_ttm_backend_entry(bdev);
++ if (!ttm->be) {
++ ttm_tt_destroy(ttm);
++ printk(KERN_ERR "Failed creating ttm backend entry\n");
++ return NULL;
++ }
++ ttm->state = tt_unpopulated;
++ return ttm;
++}
++
++/**
++ * ttm_tt_unbind:
++ *
++ * @ttm: the object to unbind from the graphics device
++ *
++ * Unbind an object from the aperture. This removes the mappings
++ * from the graphics device and flushes caches if necessary.
++ */
++void ttm_tt_unbind(struct ttm_tt *ttm)
++{
++ int ret;
++ struct ttm_backend *be = ttm->be;
++
++ if (ttm->state == tt_bound) {
++ ret = be->func->unbind(be);
++ BUG_ON(ret);
++ }
++ ttm->state = tt_unbound;
++}
++
++/**
++ * ttm_tt_bind:
++ *
++ * @ttm: the ttm object to bind to the graphics device
++ *
++ * @bo_mem: the aperture memory region which will hold the object
++ *
++ * Bind a ttm object to the aperture. This ensures that the necessary
++ * pages are allocated, flushes CPU caches as needed and marks the
++ * ttm as DRM_TTM_PAGE_USER_DIRTY to indicate that it may have been
++ * modified by the GPU
++ */
++
++int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
++{
++ int ret = 0;
++ struct ttm_backend *be;
++
++ if (!ttm)
++ return -EINVAL;
++
++ if (ttm->state == tt_bound)
++ return 0;
++
++ be = ttm->be;
++
++ ret = ttm_tt_populate(ttm);
++ if (ret)
++ return ret;
++
++ ret = be->func->bind(be, bo_mem);
++ if (ret) {
++ printk(KERN_ERR "Couldn't bind backend.\n");
++ return ret;
++ }
++
++ ttm->state = tt_bound;
++
++ if (ttm->page_flags & TTM_PAGE_FLAG_USER)
++ ttm->page_flags |= TTM_PAGE_FLAG_USER_DIRTY;
++ return 0;
++}
++
++static int ttm_tt_swapin(struct ttm_tt *ttm)
++{
++ struct address_space *swap_space;
++ struct file *swap_storage;
++ struct page *from_page;
++ struct page *to_page;
++ void *from_virtual;
++ void *to_virtual;
++ int i;
++ int ret;
++
++ if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
++ ret = ttm_tt_set_user(ttm, ttm->tsk, ttm->start,
++ ttm->num_pages);
++ if (unlikely(ret != 0))
++ return ret;
++
++ ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
++ return 0;
++ }
++
++ swap_storage = ttm->swap_storage;
++ BUG_ON(swap_storage == NULL);
++
++ swap_space = swap_storage->f_path.dentry->d_inode->i_mapping;
++
++ for (i = 0; i < ttm->num_pages; ++i) {
++ from_page = read_mapping_page(swap_space, i, NULL);
++ if (IS_ERR(from_page))
++ goto out_err;
++ to_page = __ttm_tt_get_page(ttm, i);
++ if (unlikely(to_page == NULL))
++ goto out_err;
++
++ preempt_disable();
++ from_virtual = kmap_atomic(from_page, KM_USER0);
++ to_virtual = kmap_atomic(to_page, KM_USER1);
++ memcpy(to_virtual, from_virtual, PAGE_SIZE);
++ kunmap_atomic(to_virtual, KM_USER1);
++ kunmap_atomic(from_virtual, KM_USER0);
++ preempt_enable();
++ page_cache_release(from_page);
++ }
++
++ if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTANT_SWAP))
++ fput(swap_storage);
++ ttm->swap_storage = NULL;
++ ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
++
++ return 0;
++out_err:
++ ttm_tt_free_alloced_pages(ttm);
++ return -ENOMEM;
++}
++
++int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage)
++{
++ struct address_space *swap_space;
++ struct file *swap_storage;
++ struct page *from_page;
++ struct page *to_page;
++ void *from_virtual;
++ void *to_virtual;
++ int i;
++
++ BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
++ BUG_ON(ttm->caching_state != tt_cached);
++
++ /*
++ * For user buffers, just unpin the pages, as there should be
++ * vma references.
++ */
++
++ if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
++ ttm_tt_free_user_pages(ttm);
++ ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
++ ttm->swap_storage = NULL;
++ return 0;
++ }
++
++ if (!persistant_swap_storage) {
++ swap_storage = shmem_file_setup("ttm swap",
++ ttm->num_pages << PAGE_SHIFT,
++ 0);
++ if (unlikely(IS_ERR(swap_storage))) {
++ printk(KERN_ERR "Failed allocating swap storage.\n");
++ return -ENOMEM;
++ }
++ } else
++ swap_storage = persistant_swap_storage;
++
++ swap_space = swap_storage->f_path.dentry->d_inode->i_mapping;
++
++ for (i = 0; i < ttm->num_pages; ++i) {
++ from_page = ttm->pages[i];
++ if (unlikely(from_page == NULL))
++ continue;
++ to_page = read_mapping_page(swap_space, i, NULL);
++ if (unlikely(to_page == NULL))
++ goto out_err;
++
++ preempt_disable();
++ from_virtual = kmap_atomic(from_page, KM_USER0);
++ to_virtual = kmap_atomic(to_page, KM_USER1);
++ memcpy(to_virtual, from_virtual, PAGE_SIZE);
++ kunmap_atomic(to_virtual, KM_USER1);
++ kunmap_atomic(from_virtual, KM_USER0);
++ preempt_enable();
++ set_page_dirty(to_page);
++ mark_page_accessed(to_page);
++ /* unlock_page(to_page); */
++ page_cache_release(to_page);
++ }
++
++ ttm_tt_free_alloced_pages(ttm);
++ ttm->swap_storage = swap_storage;
++ ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
++ if (persistant_swap_storage)
++ ttm->page_flags |= TTM_PAGE_FLAG_PERSISTANT_SWAP;
++
++ return 0;
++out_err:
++ if (!persistant_swap_storage)
++ fput(swap_storage);
++
++ return -ENOMEM;
++}
+diff --git a/drivers/gpu/drm/mrst/drv/ttm/ttm_userobj_api.h b/drivers/gpu/drm/mrst/drv/ttm/ttm_userobj_api.h
+new file mode 100644
+index 0000000..36df724
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/ttm/ttm_userobj_api.h
+@@ -0,0 +1,72 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ */
++
++#ifndef _TTM_USEROBJ_API_H_
++#define _TTM_USEROBJ_API_H_
++
++#include "ttm_placement_user.h"
++#include "ttm_fence_user.h"
++#include "ttm_object.h"
++#include "ttm_fence_api.h"
++#include "ttm_bo_api.h"
++
++struct ttm_lock;
++
++/*
++ * User ioctls.
++ */
++
++extern int ttm_pl_create_ioctl(struct ttm_object_file *tfile,
++ struct ttm_bo_device *bdev,
++ struct ttm_lock *lock, void *data);
++extern int ttm_pl_ub_create_ioctl(struct ttm_object_file *tfile,
++ struct ttm_bo_device *bdev,
++ struct ttm_lock *lock, void *data);
++extern int ttm_pl_reference_ioctl(struct ttm_object_file *tfile, void *data);
++extern int ttm_pl_unref_ioctl(struct ttm_object_file *tfile, void *data);
++extern int ttm_pl_synccpu_ioctl(struct ttm_object_file *tfile, void *data);
++extern int ttm_pl_setstatus_ioctl(struct ttm_object_file *tfile,
++ struct ttm_lock *lock, void *data);
++extern int ttm_pl_waitidle_ioctl(struct ttm_object_file *tfile, void *data);
++extern int ttm_fence_signaled_ioctl(struct ttm_object_file *tfile, void *data);
++extern int ttm_fence_finish_ioctl(struct ttm_object_file *tfile, void *data);
++extern int ttm_fence_unref_ioctl(struct ttm_object_file *tfile, void *data);
++
++extern int
++ttm_fence_user_create(struct ttm_fence_device *fdev,
++ struct ttm_object_file *tfile,
++ uint32_t fence_class,
++ uint32_t fence_types,
++ uint32_t create_flags,
++ struct ttm_fence_object **fence, uint32_t * user_handle);
++
++extern struct ttm_buffer_object *ttm_buffer_object_lookup(struct ttm_object_file
++ *tfile,
++ uint32_t handle);
++
++extern int
++ttm_pl_verify_access(struct ttm_buffer_object *bo,
++ struct ttm_object_file *tfile);
++#endif
+diff --git a/drivers/gpu/drm/mrst/pvr/COPYING b/drivers/gpu/drm/mrst/pvr/COPYING
+new file mode 100644
+index 0000000..80dd76b
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/COPYING
+@@ -0,0 +1,351 @@
++
++This software is Copyright (C) 2008 Imagination Technologies Ltd.
++ All rights reserved.
++
++You may use, distribute and copy this software under the terms of
++GNU General Public License version 2, which is displayed below.
++
++-------------------------------------------------------------------------
++
++ GNU GENERAL PUBLIC LICENSE
++ Version 2, June 1991
++
++ Copyright (C) 1989, 1991 Free Software Foundation, Inc.
++ 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ Everyone is permitted to copy and distribute verbatim copies
++ of this license document, but changing it is not allowed.
++
++ Preamble
++
++ The licenses for most software are designed to take away your
++freedom to share and change it. By contrast, the GNU General Public
++License is intended to guarantee your freedom to share and change free
++software--to make sure the software is free for all its users. This
++General Public License applies to most of the Free Software
++Foundation's software and to any other program whose authors commit to
++using it. (Some other Free Software Foundation software is covered by
++the GNU Library General Public License instead.) You can apply it to
++your programs, too.
++
++ When we speak of free software, we are referring to freedom, not
++price. Our General Public Licenses are designed to make sure that you
++have the freedom to distribute copies of free software (and charge for
++this service if you wish), that you receive source code or can get it
++if you want it, that you can change the software or use pieces of it
++in new free programs; and that you know you can do these things.
++
++ To protect your rights, we need to make restrictions that forbid
++anyone to deny you these rights or to ask you to surrender the rights.
++These restrictions translate to certain responsibilities for you if you
++distribute copies of the software, or if you modify it.
++
++ For example, if you distribute copies of such a program, whether
++gratis or for a fee, you must give the recipients all the rights that
++you have. You must make sure that they, too, receive or can get the
++source code. And you must show them these terms so they know their
++rights.
++
++ We protect your rights with two steps: (1) copyright the software, and
++(2) offer you this license which gives you legal permission to copy,
++distribute and/or modify the software.
++
++ Also, for each author's protection and ours, we want to make certain
++that everyone understands that there is no warranty for this free
++software. If the software is modified by someone else and passed on, we
++want its recipients to know that what they have is not the original, so
++that any problems introduced by others will not reflect on the original
++authors' reputations.
++
++ Finally, any free program is threatened constantly by software
++patents. We wish to avoid the danger that redistributors of a free
++program will individually obtain patent licenses, in effect making the
++program proprietary. To prevent this, we have made it clear that any
++patent must be licensed for everyone's free use or not licensed at all.
++
++ The precise terms and conditions for copying, distribution and
++modification follow.
++
++ GNU GENERAL PUBLIC LICENSE
++ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
++
++ 0. This License applies to any program or other work which contains
++a notice placed by the copyright holder saying it may be distributed
++under the terms of this General Public License. The "Program", below,
++refers to any such program or work, and a "work based on the Program"
++means either the Program or any derivative work under copyright law:
++that is to say, a work containing the Program or a portion of it,
++either verbatim or with modifications and/or translated into another
++language. (Hereinafter, translation is included without limitation in
++the term "modification".) Each licensee is addressed as "you".
++
++Activities other than copying, distribution and modification are not
++covered by this License; they are outside its scope. The act of
++running the Program is not restricted, and the output from the Program
++is covered only if its contents constitute a work based on the
++Program (independent of having been made by running the Program).
++Whether that is true depends on what the Program does.
++
++ 1. You may copy and distribute verbatim copies of the Program's
++source code as you receive it, in any medium, provided that you
++conspicuously and appropriately publish on each copy an appropriate
++copyright notice and disclaimer of warranty; keep intact all the
++notices that refer to this License and to the absence of any warranty;
++and give any other recipients of the Program a copy of this License
++along with the Program.
++
++You may charge a fee for the physical act of transferring a copy, and
++you may at your option offer warranty protection in exchange for a fee.
++
++ 2. You may modify your copy or copies of the Program or any portion
++of it, thus forming a work based on the Program, and copy and
++distribute such modifications or work under the terms of Section 1
++above, provided that you also meet all of these conditions:
++
++ a) You must cause the modified files to carry prominent notices
++ stating that you changed the files and the date of any change.
++
++ b) You must cause any work that you distribute or publish, that in
++ whole or in part contains or is derived from the Program or any
++ part thereof, to be licensed as a whole at no charge to all third
++ parties under the terms of this License.
++
++ c) If the modified program normally reads commands interactively
++ when run, you must cause it, when started running for such
++ interactive use in the most ordinary way, to print or display an
++ announcement including an appropriate copyright notice and a
++ notice that there is no warranty (or else, saying that you provide
++ a warranty) and that users may redistribute the program under
++ these conditions, and telling the user how to view a copy of this
++ License. (Exception: if the Program itself is interactive but
++ does not normally print such an announcement, your work based on
++ the Program is not required to print an announcement.)
++
++These requirements apply to the modified work as a whole. If
++identifiable sections of that work are not derived from the Program,
++and can be reasonably considered independent and separate works in
++themselves, then this License, and its terms, do not apply to those
++sections when you distribute them as separate works. But when you
++distribute the same sections as part of a whole which is a work based
++on the Program, the distribution of the whole must be on the terms of
++this License, whose permissions for other licensees extend to the
++entire whole, and thus to each and every part regardless of who wrote it.
++
++Thus, it is not the intent of this section to claim rights or contest
++your rights to work written entirely by you; rather, the intent is to
++exercise the right to control the distribution of derivative or
++collective works based on the Program.
++
++In addition, mere aggregation of another work not based on the Program
++with the Program (or with a work based on the Program) on a volume of
++a storage or distribution medium does not bring the other work under
++the scope of this License.
++
++ 3. You may copy and distribute the Program (or a work based on it,
++under Section 2) in object code or executable form under the terms of
++Sections 1 and 2 above provided that you also do one of the following:
++
++ a) Accompany it with the complete corresponding machine-readable
++ source code, which must be distributed under the terms of Sections
++ 1 and 2 above on a medium customarily used for software interchange; or,
++
++ b) Accompany it with a written offer, valid for at least three
++ years, to give any third party, for a charge no more than your
++ cost of physically performing source distribution, a complete
++ machine-readable copy of the corresponding source code, to be
++ distributed under the terms of Sections 1 and 2 above on a medium
++ customarily used for software interchange; or,
++
++ c) Accompany it with the information you received as to the offer
++ to distribute corresponding source code. (This alternative is
++ allowed only for noncommercial distribution and only if you
++ received the program in object code or executable form with such
++ an offer, in accord with Subsection b above.)
++
++The source code for a work means the preferred form of the work for
++making modifications to it. For an executable work, complete source
++code means all the source code for all modules it contains, plus any
++associated interface definition files, plus the scripts used to
++control compilation and installation of the executable. However, as a
++special exception, the source code distributed need not include
++anything that is normally distributed (in either source or binary
++form) with the major components (compiler, kernel, and so on) of the
++operating system on which the executable runs, unless that component
++itself accompanies the executable.
++
++If distribution of executable or object code is made by offering
++access to copy from a designated place, then offering equivalent
++access to copy the source code from the same place counts as
++distribution of the source code, even though third parties are not
++compelled to copy the source along with the object code.
++
++ 4. You may not copy, modify, sublicense, or distribute the Program
++except as expressly provided under this License. Any attempt
++otherwise to copy, modify, sublicense or distribute the Program is
++void, and will automatically terminate your rights under this License.
++However, parties who have received copies, or rights, from you under
++this License will not have their licenses terminated so long as such
++parties remain in full compliance.
++
++ 5. You are not required to accept this License, since you have not
++signed it. However, nothing else grants you permission to modify or
++distribute the Program or its derivative works. These actions are
++prohibited by law if you do not accept this License. Therefore, by
++modifying or distributing the Program (or any work based on the
++Program), you indicate your acceptance of this License to do so, and
++all its terms and conditions for copying, distributing or modifying
++the Program or works based on it.
++
++ 6. Each time you redistribute the Program (or any work based on the
++Program), the recipient automatically receives a license from the
++original licensor to copy, distribute or modify the Program subject to
++these terms and conditions. You may not impose any further
++restrictions on the recipients' exercise of the rights granted herein.
++You are not responsible for enforcing compliance by third parties to
++this License.
++
++ 7. If, as a consequence of a court judgment or allegation of patent
++infringement or for any other reason (not limited to patent issues),
++conditions are imposed on you (whether by court order, agreement or
++otherwise) that contradict the conditions of this License, they do not
++excuse you from the conditions of this License. If you cannot
++distribute so as to satisfy simultaneously your obligations under this
++License and any other pertinent obligations, then as a consequence you
++may not distribute the Program at all. For example, if a patent
++license would not permit royalty-free redistribution of the Program by
++all those who receive copies directly or indirectly through you, then
++the only way you could satisfy both it and this License would be to
++refrain entirely from distribution of the Program.
++
++If any portion of this section is held invalid or unenforceable under
++any particular circumstance, the balance of the section is intended to
++apply and the section as a whole is intended to apply in other
++circumstances.
++
++It is not the purpose of this section to induce you to infringe any
++patents or other property right claims or to contest validity of any
++such claims; this section has the sole purpose of protecting the
++integrity of the free software distribution system, which is
++implemented by public license practices. Many people have made
++generous contributions to the wide range of software distributed
++through that system in reliance on consistent application of that
++system; it is up to the author/donor to decide if he or she is willing
++to distribute software through any other system and a licensee cannot
++impose that choice.
++
++This section is intended to make thoroughly clear what is believed to
++be a consequence of the rest of this License.
++
++ 8. If the distribution and/or use of the Program is restricted in
++certain countries either by patents or by copyrighted interfaces, the
++original copyright holder who places the Program under this License
++may add an explicit geographical distribution limitation excluding
++those countries, so that distribution is permitted only in or among
++countries not thus excluded. In such case, this License incorporates
++the limitation as if written in the body of this License.
++
++ 9. The Free Software Foundation may publish revised and/or new versions
++of the General Public License from time to time. Such new versions will
++be similar in spirit to the present version, but may differ in detail to
++address new problems or concerns.
++
++Each version is given a distinguishing version number. If the Program
++specifies a version number of this License which applies to it and "any
++later version", you have the option of following the terms and conditions
++either of that version or of any later version published by the Free
++Software Foundation. If the Program does not specify a version number of
++this License, you may choose any version ever published by the Free Software
++Foundation.
++
++ 10. If you wish to incorporate parts of the Program into other free
++programs whose distribution conditions are different, write to the author
++to ask for permission. For software which is copyrighted by the Free
++Software Foundation, write to the Free Software Foundation; we sometimes
++make exceptions for this. Our decision will be guided by the two goals
++of preserving the free status of all derivatives of our free software and
++of promoting the sharing and reuse of software generally.
++
++ NO WARRANTY
++
++ 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
++FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
++OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
++PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
++OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
++MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
++TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
++PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
++REPAIR OR CORRECTION.
++
++ 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
++WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
++REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
++INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
++OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
++TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
++YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
++PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
++POSSIBILITY OF SUCH DAMAGES.
++
++ END OF TERMS AND CONDITIONS
++
++ Appendix: How to Apply These Terms to Your New Programs
++
++ If you develop a new program, and you want it to be of the greatest
++possible use to the public, the best way to achieve this is to make it
++free software which everyone can redistribute and change under these terms.
++
++ To do so, attach the following notices to the program. It is safest
++to attach them to the start of each source file to most effectively
++convey the exclusion of warranty; and each file should have at least
++the "copyright" line and a pointer to where the full notice is found.
++
++ <one line to give the program's name and a brief idea of what it does.>
++ Copyright (C) 19yy <name of author>
++
++ This program is free software; you can redistribute it and/or modify
++ it under the terms of the GNU General Public License as published by
++ the Free Software Foundation; either version 2 of the License, or
++ (at your option) any later version.
++
++ This program is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ GNU General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program; if not, write to the Free Software
++ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++
++Also add information on how to contact you by electronic and paper mail.
++
++If the program is interactive, make it output a short notice like this
++when it starts in an interactive mode:
++
++ Gnomovision version 69, Copyright (C) 19yy name of author
++ Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
++ This is free software, and you are welcome to redistribute it
++ under certain conditions; type `show c' for details.
++
++The hypothetical commands `show w' and `show c' should show the appropriate
++parts of the General Public License. Of course, the commands you use may
++be called something other than `show w' and `show c'; they could even be
++mouse-clicks or menu items--whatever suits your program.
++
++You should also get your employer (if you work as a programmer) or your
++school, if any, to sign a "copyright disclaimer" for the program, if
++necessary. Here is a sample; alter the names:
++
++ Yoyodyne, Inc., hereby disclaims all copyright interest in the program
++ `Gnomovision' (which makes passes at compilers) written by James Hacker.
++
++ <signature of Ty Coon>, 1 April 1989
++ Ty Coon, President of Vice
++
++This General Public License does not permit incorporating your program into
++proprietary programs. If your program is a subroutine library, you may
++consider it more useful to permit linking proprietary applications with the
++library. If this is what you want to do, use the GNU Library General
++Public License instead of this License.
++
++-------------------------------------------------------------------------
++
+diff --git a/drivers/gpu/drm/mrst/pvr/INSTALL b/drivers/gpu/drm/mrst/pvr/INSTALL
+new file mode 100644
+index 0000000..e4c1069
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/INSTALL
+@@ -0,0 +1,76 @@
++
++SGX Embedded Systems DDK for the Linux kernel.
++Copyright (C) 2008 Imagination Technologies Ltd. All rights reserved.
++======================================================================
++
++This file covers how to build and install the Imagination Technologies
++SGX DDK for the Linux kernel.
++
++
++Build System Environment Variables
++-------------------------------------------
++
++The SGX DDK Build scripts depend on a number of environment variables
++being setup before compilation or installation of DDK software can
++commence:
++
++$DISCIMAGE
++The DDK Build scripts install files to the location specified by the
++DISCIMAGE environment variable, when the make install target is used.
++This should point to the target filesystem.
++$ export DISCIMAGE=/path/to/filesystem
++
++$KERNELDIR
++When building the SGX DDK kernel module, the build needs access
++to the headers of the Linux kernel
++$ export KERNELDIR=/path/to/kernel
++
++$PATH
++If a cross compiler is being used make sure the PATH environment variable
++includes the path to the toolchain
++$ export PATH=$PATH:/path/to/toolchain
++
++$CROSS_COMPILE
++Since the SGX DDK Build scripts are geared toward a cross-compilation
++workflow, the CROSS_COMPILE environment variable needs to be set
++$ export CROSS_COMPILE=toolchain-prefix-
++
++
++Build and Install Instructions
++-------------------------------------------
++
++The SGX DDK configures different target builds within directories under
++eurasiacon/build/linux/.
++
++The supported build targets are:
++
++ all Makes everything
++ clean Removes all intermediate files created by a build.
++ clobber Removes all binaries for all builds as well.
++ install Runs the install script generated by the build.
++
++The following variables may be set on the command line to influence a build.
++
++ BUILD The type of build being performed.
++ Alternatives are release, timing or debug.
++ CFLAGS Build dependent optimisations and debug information flags.
++ SILENT Determines whether text of commands is produced during build.
++
++To build for, change to the appropriate target directory, e.g.:
++$ cd eurasiacon/build/linux/platform/kbuild
++
++Issue the make command:
++$ make BUILD=debug all
++
++The DDK software must be installed by the root user. Become the root user:
++$ su
++
++Install the DDK software:
++$ make install
++
++Become an ordinary user again:
++$ exit
++
++
++
++
+diff --git a/drivers/gpu/drm/mrst/pvr/README b/drivers/gpu/drm/mrst/pvr/README
+new file mode 100644
+index 0000000..8039c39
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/README
+@@ -0,0 +1,48 @@
++
++SGX Embedded Systems DDK for Linux kernel.
++Copyright (C) 2008 Imagination Technologies Ltd. All rights reserved.
++======================================================================
++
++
++About
++-------------------------------------------
++
++This is the Imagination Technologies SGX DDK for the Linux kernel.
++
++
++License
++-------------------------------------------
++
++You may use, distribute and copy this software under the terms of
++GNU General Public License version 2.
++
++The full GNU General Public License version 2 is included in this
++distribution in the file called "COPYING".
++
++
++Build and Install Instructions
++-------------------------------------------
++
++For details see the "INSTALL" file.
++
++To build for, change to the appropriate target directory, e.g.:
++$ cd eurasiacon/build/linux/platform/kbuild
++
++Issue the make command:
++$ make BUILD=debug all
++
++The DDK software must be installed by the root user. Become the root user:
++$ su
++
++Install the DDK software:
++$ make install
++
++Become an ordinary user again:
++$ exit
++
++
++Contact information:
++-------------------------------------------
++
++Imagination Technologies Ltd. <gpl-support@imgtec.com>
++Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+diff --git a/drivers/gpu/drm/mrst/pvr/eurasiacon/.gitignore b/drivers/gpu/drm/mrst/pvr/eurasiacon/.gitignore
+new file mode 100644
+index 0000000..f558f8b
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/eurasiacon/.gitignore
+@@ -0,0 +1,6 @@
++bin_pc_i686*
++tmp_pc_i686*
++host_pc_i686*
++binary_pc_i686*
++*.o
++*.o.cmd
+diff --git a/drivers/gpu/drm/mrst/pvr/include4/dbgdrvif.h b/drivers/gpu/drm/mrst/pvr/include4/dbgdrvif.h
+new file mode 100644
+index 0000000..e65e551
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/include4/dbgdrvif.h
+@@ -0,0 +1,298 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _DBGDRVIF_
++#define _DBGDRVIF_
++
++
++#include "ioctldef.h"
++
++#define DEBUG_CAPMODE_FRAMED 0x00000001UL
++#define DEBUG_CAPMODE_CONTINUOUS 0x00000002UL
++#define DEBUG_CAPMODE_HOTKEY 0x00000004UL
++
++#define DEBUG_OUTMODE_STANDARDDBG 0x00000001UL
++#define DEBUG_OUTMODE_MONO 0x00000002UL
++#define DEBUG_OUTMODE_STREAMENABLE 0x00000004UL
++#define DEBUG_OUTMODE_ASYNC 0x00000008UL
++#define DEBUG_OUTMODE_SGXVGA 0x00000010UL
++
++#define DEBUG_FLAGS_USE_NONPAGED_MEM 0x00000001UL
++#define DEBUG_FLAGS_NO_BUF_EXPANDSION 0x00000002UL
++#define DEBUG_FLAGS_ENABLESAMPLE 0x00000004UL
++
++#define DEBUG_FLAGS_TEXTSTREAM 0x80000000UL
++
++#define DEBUG_LEVEL_0 0x00000001UL
++#define DEBUG_LEVEL_1 0x00000003UL
++#define DEBUG_LEVEL_2 0x00000007UL
++#define DEBUG_LEVEL_3 0x0000000FUL
++#define DEBUG_LEVEL_4 0x0000001FUL
++#define DEBUG_LEVEL_5 0x0000003FUL
++#define DEBUG_LEVEL_6 0x0000007FUL
++#define DEBUG_LEVEL_7 0x000000FFUL
++#define DEBUG_LEVEL_8 0x000001FFUL
++#define DEBUG_LEVEL_9 0x000003FFUL
++#define DEBUG_LEVEL_10 0x000007FFUL
++#define DEBUG_LEVEL_11 0x00000FFFUL
++
++#define DEBUG_LEVEL_SEL0 0x00000001UL
++#define DEBUG_LEVEL_SEL1 0x00000002UL
++#define DEBUG_LEVEL_SEL2 0x00000004UL
++#define DEBUG_LEVEL_SEL3 0x00000008UL
++#define DEBUG_LEVEL_SEL4 0x00000010UL
++#define DEBUG_LEVEL_SEL5 0x00000020UL
++#define DEBUG_LEVEL_SEL6 0x00000040UL
++#define DEBUG_LEVEL_SEL7 0x00000080UL
++#define DEBUG_LEVEL_SEL8 0x00000100UL
++#define DEBUG_LEVEL_SEL9 0x00000200UL
++#define DEBUG_LEVEL_SEL10 0x00000400UL
++#define DEBUG_LEVEL_SEL11 0x00000800UL
++
++#define DEBUG_SERVICE_IOCTL_BASE 0x800UL
++#define DEBUG_SERVICE_CREATESTREAM \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x01, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_DESTROYSTREAM \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x02, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_GETSTREAM \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x03, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_WRITESTRING \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x04, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_READSTRING \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x05, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_WRITE \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x06, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_READ \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x07, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_SETDEBUGMODE \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x08, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_SETDEBUGOUTMODE \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x09, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_SETDEBUGLEVEL \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x0A, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_SETFRAME \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x0B, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_GETFRAME \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x0C, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_OVERRIDEMODE \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x0D, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_DEFAULTMODE \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x0E, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_GETSERVICETABLE \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x0F, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_WRITE2 \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x10, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_WRITESTRINGCM \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x11, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_WRITECM \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x12, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_SETMARKER \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x13, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_GETMARKER \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x14, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_ISCAPTUREFRAME \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x15, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_WRITELF \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x16, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_READLF \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x17, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_WAITFOREVENT \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x18, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++
++
++typedef enum _DBG_EVENT_ {
++ DBG_EVENT_STREAM_DATA = 1
++} DBG_EVENT;
++
++typedef struct _DBG_IN_CREATESTREAM_ {
++ IMG_UINT32 ui32Pages;
++ IMG_UINT32 ui32CapMode;
++ IMG_UINT32 ui32OutMode;
++ IMG_CHAR *pszName;
++}DBG_IN_CREATESTREAM, *PDBG_IN_CREATESTREAM;
++
++typedef struct _DBG_IN_FINDSTREAM_ {
++ IMG_BOOL bResetStream;
++ IMG_CHAR *pszName;
++}DBG_IN_FINDSTREAM, *PDBG_IN_FINDSTREAM;
++
++typedef struct _DBG_IN_WRITESTRING_ {
++ IMG_VOID *pvStream;
++ IMG_UINT32 ui32Level;
++ IMG_CHAR *pszString;
++}DBG_IN_WRITESTRING, *PDBG_IN_WRITESTRING;
++
++typedef struct _DBG_IN_READSTRING_ {
++ IMG_VOID *pvStream;
++ IMG_UINT32 ui32StringLen;
++ IMG_CHAR *pszString;
++} DBG_IN_READSTRING, *PDBG_IN_READSTRING;
++
++typedef struct _DBG_IN_SETDEBUGMODE_ {
++ IMG_VOID *pvStream;
++ IMG_UINT32 ui32Mode;
++ IMG_UINT32 ui32Start;
++ IMG_UINT32 ui32End;
++ IMG_UINT32 ui32SampleRate;
++} DBG_IN_SETDEBUGMODE, *PDBG_IN_SETDEBUGMODE;
++
++typedef struct _DBG_IN_SETDEBUGOUTMODE_ {
++ IMG_VOID *pvStream;
++ IMG_UINT32 ui32Mode;
++} DBG_IN_SETDEBUGOUTMODE, *PDBG_IN_SETDEBUGOUTMODE;
++
++typedef struct _DBG_IN_SETDEBUGLEVEL_ {
++ IMG_VOID *pvStream;
++ IMG_UINT32 ui32Level;
++} DBG_IN_SETDEBUGLEVEL, *PDBG_IN_SETDEBUGLEVEL;
++
++typedef struct _DBG_IN_SETFRAME_ {
++ IMG_VOID *pvStream;
++ IMG_UINT32 ui32Frame;
++} DBG_IN_SETFRAME, *PDBG_IN_SETFRAME;
++
++typedef struct _DBG_IN_WRITE_ {
++ IMG_VOID *pvStream;
++ IMG_UINT32 ui32Level;
++ IMG_UINT32 ui32TransferSize;
++ IMG_UINT8 *pui8InBuffer;
++} DBG_IN_WRITE, *PDBG_IN_WRITE;
++
++typedef struct _DBG_IN_READ_ {
++ IMG_VOID *pvStream;
++ IMG_BOOL bReadInitBuffer;
++ IMG_UINT32 ui32OutBufferSize;
++ IMG_UINT8 *pui8OutBuffer;
++} DBG_IN_READ, *PDBG_IN_READ;
++
++typedef struct _DBG_IN_OVERRIDEMODE_ {
++ IMG_VOID *pvStream;
++ IMG_UINT32 ui32Mode;
++} DBG_IN_OVERRIDEMODE, *PDBG_IN_OVERRIDEMODE;
++
++typedef struct _DBG_IN_ISCAPTUREFRAME_ {
++ IMG_VOID *pvStream;
++ IMG_BOOL bCheckPreviousFrame;
++} DBG_IN_ISCAPTUREFRAME, *PDBG_IN_ISCAPTUREFRAME;
++
++typedef struct _DBG_IN_SETMARKER_ {
++ IMG_VOID *pvStream;
++ IMG_UINT32 ui32Marker;
++} DBG_IN_SETMARKER, *PDBG_IN_SETMARKER;
++
++typedef struct _DBG_IN_WRITE_LF_ {
++ IMG_UINT32 ui32Flags;
++ IMG_VOID *pvStream;
++ IMG_UINT32 ui32Level;
++ IMG_UINT32 ui32BufferSize;
++ IMG_UINT8 *pui8InBuffer;
++} DBG_IN_WRITE_LF, *PDBG_IN_WRITE_LF;
++
++#define WRITELF_FLAGS_RESETBUF 0x00000001UL
++
++typedef struct _DBG_STREAM_ {
++ struct _DBG_STREAM_ *psNext;
++ struct _DBG_STREAM_ *psInitStream;
++ IMG_BOOL bInitPhaseComplete;
++ IMG_UINT32 ui32Flags;
++ IMG_UINT32 ui32Base;
++ IMG_UINT32 ui32Size;
++ IMG_UINT32 ui32RPtr;
++ IMG_UINT32 ui32WPtr;
++ IMG_UINT32 ui32DataWritten;
++ IMG_UINT32 ui32CapMode;
++ IMG_UINT32 ui32OutMode;
++ IMG_UINT32 ui32DebugLevel;
++ IMG_UINT32 ui32DefaultMode;
++ IMG_UINT32 ui32Start;
++ IMG_UINT32 ui32End;
++ IMG_UINT32 ui32Current;
++ IMG_UINT32 ui32Access;
++ IMG_UINT32 ui32SampleRate;
++ IMG_UINT32 ui32Reserved;
++ IMG_UINT32 ui32Timeout;
++ IMG_UINT32 ui32Marker;
++ IMG_CHAR szName[30];
++} DBG_STREAM,*PDBG_STREAM;
++
++typedef struct _DBGKM_SERVICE_TABLE_ {
++ IMG_UINT32 ui32Size;
++ IMG_VOID * (IMG_CALLCONV *pfnCreateStream) (IMG_CHAR * pszName,IMG_UINT32 ui32CapMode,IMG_UINT32 ui32OutMode,IMG_UINT32 ui32Flags,IMG_UINT32 ui32Pages);
++ IMG_VOID (IMG_CALLCONV *pfnDestroyStream) (PDBG_STREAM psStream);
++ IMG_VOID * (IMG_CALLCONV *pfnFindStream) (IMG_CHAR * pszName, IMG_BOOL bResetInitBuffer);
++ IMG_UINT32 (IMG_CALLCONV *pfnWriteString) (PDBG_STREAM psStream,IMG_CHAR * pszString,IMG_UINT32 ui32Level);
++ IMG_UINT32 (IMG_CALLCONV *pfnReadString) (PDBG_STREAM psStream,IMG_CHAR * pszString,IMG_UINT32 ui32Limit);
++ IMG_UINT32 (IMG_CALLCONV *pfnWriteBIN) (PDBG_STREAM psStream,IMG_UINT8 *pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level);
++ IMG_UINT32 (IMG_CALLCONV *pfnReadBIN) (PDBG_STREAM psStream,IMG_BOOL bReadInitBuffer, IMG_UINT32 ui32OutBufferSize,IMG_UINT8 *pui8OutBuf);
++ IMG_VOID (IMG_CALLCONV *pfnSetCaptureMode) (PDBG_STREAM psStream,IMG_UINT32 ui32CapMode,IMG_UINT32 ui32Start,IMG_UINT32 ui32Stop,IMG_UINT32 ui32SampleRate);
++ IMG_VOID (IMG_CALLCONV *pfnSetOutputMode) (PDBG_STREAM psStream,IMG_UINT32 ui32OutMode);
++ IMG_VOID (IMG_CALLCONV *pfnSetDebugLevel) (PDBG_STREAM psStream,IMG_UINT32 ui32DebugLevel);
++ IMG_VOID (IMG_CALLCONV *pfnSetFrame) (PDBG_STREAM psStream,IMG_UINT32 ui32Frame);
++ IMG_UINT32 (IMG_CALLCONV *pfnGetFrame) (PDBG_STREAM psStream);
++ IMG_VOID (IMG_CALLCONV *pfnOverrideMode) (PDBG_STREAM psStream,IMG_UINT32 ui32Mode);
++ IMG_VOID (IMG_CALLCONV *pfnDefaultMode) (PDBG_STREAM psStream);
++ IMG_UINT32 (IMG_CALLCONV *pfnDBGDrivWrite2) (PDBG_STREAM psStream,IMG_UINT8 *pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level);
++ IMG_UINT32 (IMG_CALLCONV *pfnWriteStringCM) (PDBG_STREAM psStream,IMG_CHAR * pszString,IMG_UINT32 ui32Level);
++ IMG_UINT32 (IMG_CALLCONV *pfnWriteBINCM) (PDBG_STREAM psStream,IMG_UINT8 *pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level);
++ IMG_VOID (IMG_CALLCONV *pfnSetMarker) (PDBG_STREAM psStream,IMG_UINT32 ui32Marker);
++ IMG_UINT32 (IMG_CALLCONV *pfnGetMarker) (PDBG_STREAM psStream);
++ IMG_VOID (IMG_CALLCONV *pfnStartInitPhase) (PDBG_STREAM psStream);
++ IMG_VOID (IMG_CALLCONV *pfnStopInitPhase) (PDBG_STREAM psStream);
++ IMG_BOOL (IMG_CALLCONV *pfnIsCaptureFrame) (PDBG_STREAM psStream, IMG_BOOL bCheckPreviousFrame);
++ IMG_UINT32 (IMG_CALLCONV *pfnWriteLF) (PDBG_STREAM psStream, IMG_UINT8 *pui8InBuf, IMG_UINT32 ui32InBuffSize, IMG_UINT32 ui32Level, IMG_UINT32 ui32Flags);
++ IMG_UINT32 (IMG_CALLCONV *pfnReadLF) (PDBG_STREAM psStream, IMG_UINT32 ui32OutBuffSize, IMG_UINT8 *pui8OutBuf);
++ IMG_UINT32 (IMG_CALLCONV *pfnGetStreamOffset) (PDBG_STREAM psStream);
++ IMG_VOID (IMG_CALLCONV *pfnSetStreamOffset) (PDBG_STREAM psStream, IMG_UINT32 ui32StreamOffset);
++ IMG_BOOL (IMG_CALLCONV *pfnIsLastCaptureFrame) (PDBG_STREAM psStream);
++ IMG_VOID (IMG_CALLCONV *pfnWaitForEvent) (DBG_EVENT eEvent);
++} DBGKM_SERVICE_TABLE, *PDBGKM_SERVICE_TABLE;
++
++
++#endif
+diff --git a/drivers/gpu/drm/mrst/pvr/include4/img_defs.h b/drivers/gpu/drm/mrst/pvr/include4/img_defs.h
+new file mode 100644
+index 0000000..370300a
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/include4/img_defs.h
+@@ -0,0 +1,108 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined (__IMG_DEFS_H__)
++#define __IMG_DEFS_H__
++
++#include "img_types.h"
++
++typedef enum img_tag_TriStateSwitch
++{
++ IMG_ON = 0x00,
++ IMG_OFF,
++ IMG_IGNORE
++
++} img_TriStateSwitch, * img_pTriStateSwitch;
++
++#define IMG_SUCCESS 0
++
++#define IMG_NO_REG 1
++
++#if defined (NO_INLINE_FUNCS)
++ #define INLINE
++ #define FORCE_INLINE
++#else
++#if defined (__cplusplus)
++ #define INLINE inline
++ #define FORCE_INLINE inline
++#else
++#if !defined(INLINE)
++ #define INLINE __inline
++#endif
++ #define FORCE_INLINE static __inline
++#endif
++#endif
++
++
++#ifndef PVR_UNREFERENCED_PARAMETER
++#define PVR_UNREFERENCED_PARAMETER(param) (param) = (param)
++#endif
++
++#ifdef __GNUC__
++#define unref__ __attribute__ ((unused))
++#else
++#define unref__
++#endif
++
++#ifndef _TCHAR_DEFINED
++#if defined(UNICODE)
++typedef unsigned short TCHAR, *PTCHAR, *PTSTR;
++#else
++typedef char TCHAR, *PTCHAR, *PTSTR;
++#endif
++#define _TCHAR_DEFINED
++#endif
++
++
++ #if defined(__linux__) || defined(__METAG)
++
++ #define IMG_CALLCONV
++ #define IMG_INTERNAL __attribute__ ((visibility ("hidden")))
++ #define IMG_EXPORT
++ #define IMG_IMPORT
++ #define IMG_RESTRICT __restrict__
++
++ #else
++ #error("define an OS")
++ #endif
++
++#ifndef IMG_ABORT
++ #define IMG_ABORT() abort()
++#endif
++
++#ifndef IMG_MALLOC
++ #define IMG_MALLOC(A) malloc (A)
++#endif
++
++#ifndef IMG_FREE
++ #define IMG_FREE(A) free (A)
++#endif
++
++#define IMG_CONST const
++
++#define IMG_FORMAT_PRINTF(x,y)
++
++#endif
+diff --git a/drivers/gpu/drm/mrst/pvr/include4/img_types.h b/drivers/gpu/drm/mrst/pvr/include4/img_types.h
+new file mode 100644
+index 0000000..1b55521
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/include4/img_types.h
+@@ -0,0 +1,128 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __IMG_TYPES_H__
++#define __IMG_TYPES_H__
++
++#if !defined(IMG_ADDRSPACE_CPUVADDR_BITS)
++#define IMG_ADDRSPACE_CPUVADDR_BITS 32
++#endif
++
++#if !defined(IMG_ADDRSPACE_PHYSADDR_BITS)
++#define IMG_ADDRSPACE_PHYSADDR_BITS 32
++#endif
++
++typedef unsigned int IMG_UINT, *IMG_PUINT;
++typedef signed int IMG_INT, *IMG_PINT;
++
++typedef unsigned char IMG_UINT8, *IMG_PUINT8;
++typedef unsigned char IMG_BYTE, *IMG_PBYTE;
++typedef signed char IMG_INT8, *IMG_PINT8;
++typedef char IMG_CHAR, *IMG_PCHAR;
++
++typedef unsigned short IMG_UINT16, *IMG_PUINT16;
++typedef signed short IMG_INT16, *IMG_PINT16;
++typedef unsigned long IMG_UINT32, *IMG_PUINT32;
++typedef signed long IMG_INT32, *IMG_PINT32;
++
++#if !defined(IMG_UINT32_MAX)
++ #define IMG_UINT32_MAX 0xFFFFFFFFUL
++#endif
++
++ #if (defined(LINUX) || defined(__METAG))
++#if !defined(USE_CODE)
++ typedef unsigned long long IMG_UINT64, *IMG_PUINT64;
++ typedef long long IMG_INT64, *IMG_PINT64;
++#endif
++ #else
++
++ #error("define an OS")
++
++ #endif
++
++#if !(defined(LINUX) && defined (__KERNEL__))
++typedef float IMG_FLOAT, *IMG_PFLOAT;
++typedef double IMG_DOUBLE, *IMG_PDOUBLE;
++#endif
++
++typedef enum tag_img_bool
++{
++ IMG_FALSE = 0,
++ IMG_TRUE = 1,
++ IMG_FORCE_ALIGN = 0x7FFFFFFF
++} IMG_BOOL, *IMG_PBOOL;
++
++typedef void IMG_VOID, *IMG_PVOID;
++
++typedef IMG_INT32 IMG_RESULT;
++
++typedef IMG_UINT32 IMG_UINTPTR_T;
++
++typedef IMG_PVOID IMG_HANDLE;
++
++typedef void** IMG_HVOID, * IMG_PHVOID;
++
++typedef IMG_UINT32 IMG_SIZE_T;
++
++#define IMG_NULL 0
++
++
++typedef IMG_PVOID IMG_CPU_VIRTADDR;
++
++typedef struct
++{
++
++ IMG_UINT32 uiAddr;
++#define IMG_CAST_TO_DEVVADDR_UINT(var) (IMG_UINT32)(var)
++
++} IMG_DEV_VIRTADDR;
++
++typedef struct _IMG_CPU_PHYADDR
++{
++
++ IMG_UINTPTR_T uiAddr;
++} IMG_CPU_PHYADDR;
++
++typedef struct _IMG_DEV_PHYADDR
++{
++#if IMG_ADDRSPACE_PHYSADDR_BITS == 32
++
++ IMG_UINTPTR_T uiAddr;
++#else
++ IMG_UINT32 uiAddr;
++ IMG_UINT32 uiHighAddr;
++#endif
++} IMG_DEV_PHYADDR;
++
++typedef struct _IMG_SYS_PHYADDR
++{
++
++ IMG_UINTPTR_T uiAddr;
++} IMG_SYS_PHYADDR;
++
++#include "img_defs.h"
++
++#endif
+diff --git a/drivers/gpu/drm/mrst/pvr/include4/ioctldef.h b/drivers/gpu/drm/mrst/pvr/include4/ioctldef.h
+new file mode 100644
+index 0000000..cc69629
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/include4/ioctldef.h
+@@ -0,0 +1,98 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __IOCTLDEF_H__
++#define __IOCTLDEF_H__
++
++#define MAKEIOCTLINDEX(i) (((i) >> 2) & 0xFFF)
++
++#ifndef CTL_CODE
++
++#define DEVICE_TYPE ULONG
++
++#define FILE_DEVICE_BEEP 0x00000001
++#define FILE_DEVICE_CD_ROM 0x00000002
++#define FILE_DEVICE_CD_ROM_FILE_SYSTEM 0x00000003
++#define FILE_DEVICE_CONTROLLER 0x00000004
++#define FILE_DEVICE_DATALINK 0x00000005
++#define FILE_DEVICE_DFS 0x00000006
++#define FILE_DEVICE_DISK 0x00000007
++#define FILE_DEVICE_DISK_FILE_SYSTEM 0x00000008
++#define FILE_DEVICE_FILE_SYSTEM 0x00000009
++#define FILE_DEVICE_INPORT_PORT 0x0000000a
++#define FILE_DEVICE_KEYBOARD 0x0000000b
++#define FILE_DEVICE_MAILSLOT 0x0000000c
++#define FILE_DEVICE_MIDI_IN 0x0000000d
++#define FILE_DEVICE_MIDI_OUT 0x0000000e
++#define FILE_DEVICE_MOUSE 0x0000000f
++#define FILE_DEVICE_MULTI_UNC_PROVIDER 0x00000010
++#define FILE_DEVICE_NAMED_PIPE 0x00000011
++#define FILE_DEVICE_NETWORK 0x00000012
++#define FILE_DEVICE_NETWORK_BROWSER 0x00000013
++#define FILE_DEVICE_NETWORK_FILE_SYSTEM 0x00000014
++#define FILE_DEVICE_NULL 0x00000015
++#define FILE_DEVICE_PARALLEL_PORT 0x00000016
++#define FILE_DEVICE_PHYSICAL_NETCARD 0x00000017
++#define FILE_DEVICE_PRINTER 0x00000018
++#define FILE_DEVICE_SCANNER 0x00000019
++#define FILE_DEVICE_SERIAL_MOUSE_PORT 0x0000001a
++#define FILE_DEVICE_SERIAL_PORT 0x0000001b
++#define FILE_DEVICE_SCREEN 0x0000001c
++#define FILE_DEVICE_SOUND 0x0000001d
++#define FILE_DEVICE_STREAMS 0x0000001e
++#define FILE_DEVICE_TAPE 0x0000001f
++#define FILE_DEVICE_TAPE_FILE_SYSTEM 0x00000020
++#define FILE_DEVICE_TRANSPORT 0x00000021
++#define FILE_DEVICE_UNKNOWN 0x00000022
++#define FILE_DEVICE_VIDEO 0x00000023
++#define FILE_DEVICE_VIRTUAL_DISK 0x00000024
++#define FILE_DEVICE_WAVE_IN 0x00000025
++#define FILE_DEVICE_WAVE_OUT 0x00000026
++#define FILE_DEVICE_8042_PORT 0x00000027
++#define FILE_DEVICE_NETWORK_REDIRECTOR 0x00000028
++#define FILE_DEVICE_BATTERY 0x00000029
++#define FILE_DEVICE_BUS_EXTENDER 0x0000002a
++#define FILE_DEVICE_MODEM 0x0000002b
++#define FILE_DEVICE_VDM 0x0000002c
++#define FILE_DEVICE_MASS_STORAGE 0x0000002d
++
++#define CTL_CODE( DeviceType, Function, Method, Access ) ( \
++ ((DeviceType) << 16) | ((Access) << 14) | ((Function) << 2) | (Method) \
++)
++
++#define METHOD_BUFFERED 0
++#define METHOD_IN_DIRECT 1
++#define METHOD_OUT_DIRECT 2
++#define METHOD_NEITHER 3
++
++#define FILE_ANY_ACCESS 0
++#define FILE_READ_ACCESS ( 0x0001 )
++#define FILE_WRITE_ACCESS ( 0x0002 )
++
++#endif
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/include4/pdumpdefs.h b/drivers/gpu/drm/mrst/pvr/include4/pdumpdefs.h
+new file mode 100644
+index 0000000..3a2e4c1
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/include4/pdumpdefs.h
+@@ -0,0 +1,99 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined (__PDUMPDEFS_H__)
++#define __PDUMPDEFS_H__
++
++typedef enum _PDUMP_PIXEL_FORMAT_
++{
++ PVRSRV_PDUMP_PIXEL_FORMAT_UNSUPPORTED = 0,
++ PVRSRV_PDUMP_PIXEL_FORMAT_RGB8 = 1,
++ PVRSRV_PDUMP_PIXEL_FORMAT_RGB332 = 2,
++ PVRSRV_PDUMP_PIXEL_FORMAT_KRGB555 = 3,
++ PVRSRV_PDUMP_PIXEL_FORMAT_RGB565 = 4,
++ PVRSRV_PDUMP_PIXEL_FORMAT_ARGB4444 = 5,
++ PVRSRV_PDUMP_PIXEL_FORMAT_ARGB1555 = 6,
++ PVRSRV_PDUMP_PIXEL_FORMAT_RGB888 = 7,
++ PVRSRV_PDUMP_PIXEL_FORMAT_ARGB8888 = 8,
++ PVRSRV_PDUMP_PIXEL_FORMAT_YUV8 = 9,
++ PVRSRV_PDUMP_PIXEL_FORMAT_AYUV4444 = 10,
++ PVRSRV_PDUMP_PIXEL_FORMAT_VY0UY1_8888 = 11,
++ PVRSRV_PDUMP_PIXEL_FORMAT_UY0VY1_8888 = 12,
++ PVRSRV_PDUMP_PIXEL_FORMAT_Y0UY1V_8888 = 13,
++ PVRSRV_PDUMP_PIXEL_FORMAT_Y0VY1U_8888 = 14,
++ PVRSRV_PDUMP_PIXEL_FORMAT_YUV888 = 15,
++ PVRSRV_PDUMP_PIXEL_FORMAT_UYVY10101010 = 16,
++ PVRSRV_PDUMP_PIXEL_FORMAT_VYAUYA8888 = 17,
++ PVRSRV_PDUMP_PIXEL_FORMAT_AYUV8888 = 18,
++ PVRSRV_PDUMP_PIXEL_FORMAT_AYUV2101010 = 19,
++ PVRSRV_PDUMP_PIXEL_FORMAT_YUV101010 = 20,
++ PVRSRV_PDUMP_PIXEL_FORMAT_PL12Y8 = 21,
++ PVRSRV_PDUMP_PIXEL_FORMAT_YUV_IMC2 = 22,
++ PVRSRV_PDUMP_PIXEL_FORMAT_YUV_YV12 = 23,
++ PVRSRV_PDUMP_PIXEL_FORMAT_YUV_PL8 = 24,
++ PVRSRV_PDUMP_PIXEL_FORMAT_YUV_PL12 = 25,
++ PVRSRV_PDUMP_PIXEL_FORMAT_422PL12YUV8 = 26,
++ PVRSRV_PDUMP_PIXEL_FORMAT_420PL12YUV8 = 27,
++ PVRSRV_PDUMP_PIXEL_FORMAT_PL12Y10 = 28,
++ PVRSRV_PDUMP_PIXEL_FORMAT_422PL12YUV10 = 29,
++ PVRSRV_PDUMP_PIXEL_FORMAT_420PL12YUV10 = 30,
++ PVRSRV_PDUMP_PIXEL_FORMAT_ABGR8888 = 31,
++ PVRSRV_PDUMP_PIXEL_FORMAT_BGRA8888 = 32,
++ PVRSRV_PDUMP_PIXEL_FORMAT_ARGB8332 = 33,
++ PVRSRV_PDUMP_PIXEL_FORMAT_RGB555 = 34,
++ PVRSRV_PDUMP_PIXEL_FORMAT_F16 = 35,
++ PVRSRV_PDUMP_PIXEL_FORMAT_F32 = 36,
++ PVRSRV_PDUMP_PIXEL_FORMAT_L16 = 37,
++ PVRSRV_PDUMP_PIXEL_FORMAT_L32 = 38,
++
++ PVRSRV_PDUMP_PIXEL_FORMAT_FORCE_I32 = 0x7fffffff
++
++} PDUMP_PIXEL_FORMAT;
++
++typedef enum _PDUMP_MEM_FORMAT_
++{
++ PVRSRV_PDUMP_MEM_FORMAT_STRIDE = 0,
++ PVRSRV_PDUMP_MEM_FORMAT_RESERVED = 1,
++ PVRSRV_PDUMP_MEM_FORMAT_TILED = 8,
++ PVRSRV_PDUMP_MEM_FORMAT_TWIDDLED = 9,
++ PVRSRV_PDUMP_MEM_FORMAT_HYBRID = 10,
++
++ PVRSRV_PDUMP_MEM_FORMAT_FORCE_I32 = 0x7fffffff
++} PDUMP_MEM_FORMAT;
++
++typedef enum _PDUMP_POLL_OPERATOR
++{
++ PDUMP_POLL_OPERATOR_EQUAL = 0,
++ PDUMP_POLL_OPERATOR_LESS = 1,
++ PDUMP_POLL_OPERATOR_LESSEQUAL = 2,
++ PDUMP_POLL_OPERATOR_GREATER = 3,
++ PDUMP_POLL_OPERATOR_GREATEREQUAL = 4,
++ PDUMP_POLL_OPERATOR_NOTEQUAL = 5,
++} PDUMP_POLL_OPERATOR;
++
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/include4/pvr_debug.h b/drivers/gpu/drm/mrst/pvr/include4/pvr_debug.h
+new file mode 100644
+index 0000000..fe99f45
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/include4/pvr_debug.h
+@@ -0,0 +1,127 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __PVR_DEBUG_H__
++#define __PVR_DEBUG_H__
++
++
++#include "img_types.h"
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++#define PVR_MAX_DEBUG_MESSAGE_LEN (512)
++
++#define DBGPRIV_FATAL 0x01UL
++#define DBGPRIV_ERROR 0x02UL
++#define DBGPRIV_WARNING 0x04UL
++#define DBGPRIV_MESSAGE 0x08UL
++#define DBGPRIV_VERBOSE 0x10UL
++#define DBGPRIV_CALLTRACE 0x20UL
++#define DBGPRIV_ALLOC 0x40UL
++#define DBGPRIV_ALLLEVELS (DBGPRIV_FATAL | DBGPRIV_ERROR | DBGPRIV_WARNING | DBGPRIV_MESSAGE | DBGPRIV_VERBOSE)
++
++
++
++#define PVR_DBG_FATAL DBGPRIV_FATAL,__FILE__, __LINE__
++#define PVR_DBG_ERROR DBGPRIV_ERROR,__FILE__, __LINE__
++#define PVR_DBG_WARNING DBGPRIV_WARNING,__FILE__, __LINE__
++#define PVR_DBG_MESSAGE DBGPRIV_MESSAGE,__FILE__, __LINE__
++#define PVR_DBG_VERBOSE DBGPRIV_VERBOSE,__FILE__, __LINE__
++#define PVR_DBG_CALLTRACE DBGPRIV_CALLTRACE,__FILE__, __LINE__
++#define PVR_DBG_ALLOC DBGPRIV_ALLOC,__FILE__, __LINE__
++
++#if !defined(PVRSRV_NEED_PVR_ASSERT) && defined(DEBUG)
++#define PVRSRV_NEED_PVR_ASSERT
++#endif
++
++#if defined(PVRSRV_NEED_PVR_ASSERT) && !defined(PVRSRV_NEED_PVR_DPF)
++#define PVRSRV_NEED_PVR_DPF
++#endif
++
++#if !defined(PVRSRV_NEED_PVR_TRACE) && (defined(DEBUG) || defined(TIMING))
++#define PVRSRV_NEED_PVR_TRACE
++#endif
++
++
++#if defined(PVRSRV_NEED_PVR_ASSERT)
++
++ #define PVR_ASSERT(EXPR) if (!(EXPR)) PVRSRVDebugAssertFail(__FILE__, __LINE__);
++
++IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVDebugAssertFail(const IMG_CHAR *pszFile,
++ IMG_UINT32 ui32Line);
++
++ #if defined(PVR_DBG_BREAK_ASSERT_FAIL)
++ #define PVR_DBG_BREAK PVRSRVDebugAssertFail("PVR_DBG_BREAK", 0)
++ #else
++ #define PVR_DBG_BREAK
++ #endif
++
++#else
++
++ #define PVR_ASSERT(EXPR)
++ #define PVR_DBG_BREAK
++
++#endif
++
++
++#if defined(PVRSRV_NEED_PVR_DPF)
++
++ #define PVR_DPF(X) PVRSRVDebugPrintf X
++
++IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVDebugPrintf(IMG_UINT32 ui32DebugLevel,
++ const IMG_CHAR *pszFileName,
++ IMG_UINT32 ui32Line,
++ const IMG_CHAR *pszFormat,
++ ...);
++
++#else
++
++ #define PVR_DPF(X)
++
++#endif
++
++
++#if defined(PVRSRV_NEED_PVR_TRACE)
++
++ #define PVR_TRACE(X) PVRSRVTrace X
++
++IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVTrace(const IMG_CHAR* pszFormat, ... );
++
++#else
++
++ #define PVR_TRACE(X)
++
++#endif
++
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/include4/pvrmodule.h b/drivers/gpu/drm/mrst/pvr/include4/pvrmodule.h
+new file mode 100644
+index 0000000..5f77d1c
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/include4/pvrmodule.h
+@@ -0,0 +1,31 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _PVRMODULE_H_
++#define _PVRMODULE_H_
++MODULE_AUTHOR("Imagination Technologies Ltd. <gpl-support@imgtec.com>");
++MODULE_LICENSE("GPL");
++#endif
+diff --git a/drivers/gpu/drm/mrst/pvr/include4/pvrversion.h b/drivers/gpu/drm/mrst/pvr/include4/pvrversion.h
+new file mode 100644
+index 0000000..585e49b
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/include4/pvrversion.h
+@@ -0,0 +1,38 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _PVRVERSION_H_
++#define _PVRVERSION_H_
++
++#define PVRVERSION_MAJ 1
++#define PVRVERSION_MIN 5
++#define PVRVERSION_BRANCH 15
++#define PVRVERSION_BUILD 3014
++#define PVRVERSION_STRING "1.5.15.3014"
++#define PVRVERSION_FILE "eurasiacon.pj"
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/include4/regpaths.h b/drivers/gpu/drm/mrst/pvr/include4/regpaths.h
+new file mode 100644
+index 0000000..8dac213
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/include4/regpaths.h
+@@ -0,0 +1,43 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __REGPATHS_H__
++#define __REGPATHS_H__
++
++#define POWERVR_REG_ROOT "Drivers\\Display\\PowerVR"
++#define POWERVR_CHIP_KEY "\\SGX1\\"
++
++#define POWERVR_EURASIA_KEY "PowerVREurasia\\"
++
++#define POWERVR_SERVICES_KEY "\\Registry\\Machine\\System\\CurrentControlSet\\Services\\PowerVR\\"
++
++#define PVRSRV_REGISTRY_ROOT POWERVR_EURASIA_KEY "HWSettings\\PVRSRVKM"
++
++
++#define MAX_REG_STRING_SIZE 128
++
++
++#endif
+diff --git a/drivers/gpu/drm/mrst/pvr/include4/services.h b/drivers/gpu/drm/mrst/pvr/include4/services.h
+new file mode 100644
+index 0000000..7b8159d
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/include4/services.h
+@@ -0,0 +1,872 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __SERVICES_H__
++#define __SERVICES_H__
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++#include "img_defs.h"
++#include "servicesext.h"
++#include "pdumpdefs.h"
++
++
++#define PVRSRV_4K_PAGE_SIZE 4096UL
++
++#define PVRSRV_MAX_CMD_SIZE 1024
++
++#define PVRSRV_MAX_DEVICES 16
++
++#define EVENTOBJNAME_MAXLENGTH (50)
++
++#define PVRSRV_MEM_READ (1UL<<0)
++#define PVRSRV_MEM_WRITE (1UL<<1)
++#define PVRSRV_MEM_CACHE_CONSISTENT (1UL<<2)
++#define PVRSRV_MEM_NO_SYNCOBJ (1UL<<3)
++#define PVRSRV_MEM_INTERLEAVED (1UL<<4)
++#define PVRSRV_MEM_DUMMY (1UL<<5)
++#define PVRSRV_MEM_EDM_PROTECT (1UL<<6)
++#define PVRSRV_MEM_ZERO (1UL<<7)
++#define PVRSRV_MEM_USER_SUPPLIED_DEVVADDR (1UL<<8)
++#define PVRSRV_MEM_RAM_BACKED_ALLOCATION (1UL<<9)
++#define PVRSRV_MEM_NO_RESMAN (1UL<<10)
++#define PVRSRV_MEM_EXPORTED (1UL<<11)
++
++
++#define PVRSRV_HAP_CACHED (1UL<<12)
++#define PVRSRV_HAP_UNCACHED (1UL<<13)
++#define PVRSRV_HAP_WRITECOMBINE (1UL<<14)
++#define PVRSRV_HAP_CACHETYPE_MASK (PVRSRV_HAP_CACHED|PVRSRV_HAP_UNCACHED|PVRSRV_HAP_WRITECOMBINE)
++#define PVRSRV_HAP_KERNEL_ONLY (1UL<<15)
++#define PVRSRV_HAP_SINGLE_PROCESS (1UL<<16)
++#define PVRSRV_HAP_MULTI_PROCESS (1UL<<17)
++#define PVRSRV_HAP_FROM_EXISTING_PROCESS (1UL<<18)
++#define PVRSRV_HAP_NO_CPU_VIRTUAL (1UL<<19)
++#define PVRSRV_HAP_MAPTYPE_MASK (PVRSRV_HAP_KERNEL_ONLY \
++ |PVRSRV_HAP_SINGLE_PROCESS \
++ |PVRSRV_HAP_MULTI_PROCESS \
++ |PVRSRV_HAP_FROM_EXISTING_PROCESS \
++ |PVRSRV_HAP_NO_CPU_VIRTUAL)
++
++#define PVRSRV_MEM_CACHED PVRSRV_HAP_CACHED
++#define PVRSRV_MEM_UNCACHED PVRSRV_HAP_UNCACHED
++#define PVRSRV_MEM_WRITECOMBINE PVRSRV_HAP_WRITECOMBINE
++
++#define PVRSRV_MEM_BACKINGSTORE_FIELD_SHIFT (24)
++
++#define PVRSRV_MAP_NOUSERVIRTUAL (1UL<<27)
++
++#define PVRSRV_NO_CONTEXT_LOSS 0
++#define PVRSRV_SEVERE_LOSS_OF_CONTEXT 1
++#define PVRSRV_PRE_STATE_CHANGE_MASK 0x80
++
++
++#define PVRSRV_DEFAULT_DEV_COOKIE (1)
++
++
++#define PVRSRV_MISC_INFO_TIMER_PRESENT (1UL<<0)
++#define PVRSRV_MISC_INFO_CLOCKGATE_PRESENT (1UL<<1)
++#define PVRSRV_MISC_INFO_MEMSTATS_PRESENT (1UL<<2)
++#define PVRSRV_MISC_INFO_GLOBALEVENTOBJECT_PRESENT (1UL<<3)
++#define PVRSRV_MISC_INFO_DDKVERSION_PRESENT (1UL<<4)
++#define PVRSRV_MISC_INFO_CPUCACHEFLUSH_PRESENT (1UL<<5)
++
++#define PVRSRV_MISC_INFO_RESET_PRESENT (1UL<<31)
++
++#define PVRSRV_PDUMP_MAX_FILENAME_SIZE 20
++#define PVRSRV_PDUMP_MAX_COMMENT_SIZE 200
++
++
++#define PVRSRV_CHANGEDEVMEM_ATTRIBS_CACHECOHERENT 0x00000001
++
++#define PVRSRV_MAPEXTMEMORY_FLAGS_ALTERNATEVA 0x00000001
++#define PVRSRV_MAPEXTMEMORY_FLAGS_PHYSCONTIG 0x00000002
++
++#define PVRSRV_MODIFYSYNCOPS_FLAGS_WO_INC 0x00000001
++#define PVRSRV_MODIFYSYNCOPS_FLAGS_RO_INC 0x00000002
++
++typedef enum _PVRSRV_DEVICE_TYPE_
++{
++ PVRSRV_DEVICE_TYPE_UNKNOWN = 0 ,
++ PVRSRV_DEVICE_TYPE_MBX1 = 1 ,
++ PVRSRV_DEVICE_TYPE_MBX1_LITE = 2 ,
++
++ PVRSRV_DEVICE_TYPE_M24VA = 3,
++ PVRSRV_DEVICE_TYPE_MVDA2 = 4,
++ PVRSRV_DEVICE_TYPE_MVED1 = 5,
++ PVRSRV_DEVICE_TYPE_MSVDX = 6,
++
++ PVRSRV_DEVICE_TYPE_SGX = 7,
++
++ PVRSRV_DEVICE_TYPE_VGX = 8,
++
++ PVRSRV_DEVICE_TYPE_TOPAZ = 9,
++
++ PVRSRV_DEVICE_TYPE_EXT = 10,
++
++ PVRSRV_DEVICE_TYPE_LAST = 10,
++
++ PVRSRV_DEVICE_TYPE_FORCE_I32 = 0x7fffffff
++
++} PVRSRV_DEVICE_TYPE;
++
++#define HEAP_ID( _dev_ , _dev_heap_idx_ ) ( ((_dev_)<<24) | ((_dev_heap_idx_)&((1<<24)-1)) )
++#define HEAP_IDX( _heap_id_ ) ( (_heap_id_)&((1<<24) - 1 ) )
++#define HEAP_DEV( _heap_id_ ) ( (_heap_id_)>>24 )
++
++#define PVRSRV_UNDEFINED_HEAP_ID (~0LU)
++
++typedef enum
++{
++ IMG_EGL = 0x00000001,
++ IMG_OPENGLES1 = 0x00000002,
++ IMG_OPENGLES2 = 0x00000003,
++ IMG_D3DM = 0x00000004,
++ IMG_SRV_UM = 0x00000005,
++ IMG_OPENVG = 0x00000006,
++ IMG_SRVCLIENT = 0x00000007,
++ IMG_VISTAKMD = 0x00000008,
++ IMG_VISTA3DNODE = 0x00000009,
++ IMG_VISTAMVIDEONODE = 0x0000000A,
++ IMG_VISTAVPBNODE = 0x0000000B,
++ IMG_OPENGL = 0x0000000C,
++ IMG_D3D = 0x0000000D,
++#if defined(SUPPORT_GRAPHICS_HAL)
++ IMG_GRAPHICS_HAL = 0x0000000E
++#endif
++
++} IMG_MODULE_ID;
++
++
++#define APPHINT_MAX_STRING_SIZE 256
++
++typedef enum
++{
++ IMG_STRING_TYPE = 1,
++ IMG_FLOAT_TYPE ,
++ IMG_UINT_TYPE ,
++ IMG_INT_TYPE ,
++ IMG_FLAG_TYPE
++}IMG_DATA_TYPE;
++
++
++typedef struct _PVRSRV_DEV_DATA_ *PPVRSRV_DEV_DATA;
++
++typedef struct _PVRSRV_DEVICE_IDENTIFIER_
++{
++ PVRSRV_DEVICE_TYPE eDeviceType;
++ PVRSRV_DEVICE_CLASS eDeviceClass;
++ IMG_UINT32 ui32DeviceIndex;
++
++} PVRSRV_DEVICE_IDENTIFIER;
++
++
++typedef struct _PVRSRV_CLIENT_DEV_DATA_
++{
++ IMG_UINT32 ui32NumDevices;
++ PVRSRV_DEVICE_IDENTIFIER asDevID[PVRSRV_MAX_DEVICES];
++ PVRSRV_ERROR (*apfnDevConnect[PVRSRV_MAX_DEVICES])(PPVRSRV_DEV_DATA);
++ PVRSRV_ERROR (*apfnDumpTrace[PVRSRV_MAX_DEVICES])(PPVRSRV_DEV_DATA);
++
++} PVRSRV_CLIENT_DEV_DATA;
++
++
++typedef struct _PVRSRV_CONNECTION_
++{
++ IMG_HANDLE hServices;
++ IMG_UINT32 ui32ProcessID;
++ PVRSRV_CLIENT_DEV_DATA sClientDevData;
++}PVRSRV_CONNECTION;
++
++
++typedef struct _PVRSRV_DEV_DATA_
++{
++ PVRSRV_CONNECTION sConnection;
++ IMG_HANDLE hDevCookie;
++
++} PVRSRV_DEV_DATA;
++
++typedef struct _PVRSRV_MEMUPDATE_
++{
++ IMG_UINT32 ui32UpdateAddr;
++ IMG_UINT32 ui32UpdateVal;
++} PVRSRV_MEMUPDATE;
++
++typedef struct _PVRSRV_HWREG_
++{
++ IMG_UINT32 ui32RegAddr;
++ IMG_UINT32 ui32RegVal;
++} PVRSRV_HWREG;
++
++typedef struct _PVRSRV_MEMBLK_
++{
++ IMG_DEV_VIRTADDR sDevVirtAddr;
++ IMG_HANDLE hOSMemHandle;
++ IMG_HANDLE hOSWrapMem;
++ IMG_HANDLE hBuffer;
++ IMG_HANDLE hResItem;
++ IMG_SYS_PHYADDR *psIntSysPAddr;
++
++} PVRSRV_MEMBLK;
++
++typedef struct _PVRSRV_KERNEL_MEM_INFO_ *PPVRSRV_KERNEL_MEM_INFO;
++
++typedef struct _PVRSRV_CLIENT_MEM_INFO_
++{
++
++ IMG_PVOID pvLinAddr;
++
++
++ IMG_PVOID pvLinAddrKM;
++
++
++ IMG_DEV_VIRTADDR sDevVAddr;
++
++
++
++
++
++
++ IMG_CPU_PHYADDR sCpuPAddr;
++
++
++ IMG_UINT32 ui32Flags;
++
++
++
++
++ IMG_UINT32 ui32ClientFlags;
++
++
++ IMG_SIZE_T ui32AllocSize;
++
++
++
++ struct _PVRSRV_CLIENT_SYNC_INFO_ *psClientSyncInfo;
++
++
++ IMG_HANDLE hMappingInfo;
++
++
++ IMG_HANDLE hKernelMemInfo;
++
++
++ IMG_HANDLE hResItem;
++
++#if defined(SUPPORT_MEMINFO_IDS)
++ #if !defined(USE_CODE)
++
++ IMG_UINT64 ui64Stamp;
++ #else
++ IMG_UINT32 dummy1;
++ IMG_UINT32 dummy2;
++ #endif
++#endif
++
++
++
++
++ struct _PVRSRV_CLIENT_MEM_INFO_ *psNext;
++
++} PVRSRV_CLIENT_MEM_INFO, *PPVRSRV_CLIENT_MEM_INFO;
++
++
++#define PVRSRV_MAX_CLIENT_HEAPS (32)
++typedef struct _PVRSRV_HEAP_INFO_
++{
++ IMG_UINT32 ui32HeapID;
++ IMG_HANDLE hDevMemHeap;
++ IMG_DEV_VIRTADDR sDevVAddrBase;
++ IMG_UINT32 ui32HeapByteSize;
++ IMG_UINT32 ui32Attribs;
++}PVRSRV_HEAP_INFO;
++
++
++
++
++typedef struct _PVRSRV_EVENTOBJECT_
++{
++
++ IMG_CHAR szName[EVENTOBJNAME_MAXLENGTH];
++
++ IMG_HANDLE hOSEventKM;
++
++} PVRSRV_EVENTOBJECT;
++
++typedef struct _PVRSRV_MISC_INFO_
++{
++ IMG_UINT32 ui32StateRequest;
++ IMG_UINT32 ui32StatePresent;
++
++
++ IMG_VOID *pvSOCTimerRegisterKM;
++ IMG_VOID *pvSOCTimerRegisterUM;
++ IMG_HANDLE hSOCTimerRegisterOSMemHandle;
++ IMG_HANDLE hSOCTimerRegisterMappingInfo;
++
++
++ IMG_VOID *pvSOCClockGateRegs;
++ IMG_UINT32 ui32SOCClockGateRegsSize;
++
++
++ IMG_CHAR *pszMemoryStr;
++ IMG_UINT32 ui32MemoryStrLen;
++
++
++ PVRSRV_EVENTOBJECT sGlobalEventObject;
++ IMG_HANDLE hOSGlobalEvent;
++
++
++ IMG_UINT32 aui32DDKVersion[4];
++
++
++
++ IMG_BOOL bCPUCacheFlushAll;
++
++ IMG_BOOL bDeferCPUCacheFlush;
++
++ IMG_PVOID pvRangeAddrStart;
++
++ IMG_PVOID pvRangeAddrEnd;
++
++} PVRSRV_MISC_INFO;
++
++
++typedef enum _PVRSRV_CLIENT_EVENT_
++{
++ PVRSRV_CLIENT_EVENT_HWTIMEOUT = 0,
++} PVRSRV_CLIENT_EVENT;
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVClientEvent(IMG_CONST PVRSRV_CLIENT_EVENT eEvent,
++ PVRSRV_DEV_DATA *psDevData,
++ IMG_PVOID pvData);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVConnect(PVRSRV_CONNECTION *psConnection);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVDisconnect(PVRSRV_CONNECTION *psConnection);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVEnumerateDevices(IMG_CONST PVRSRV_CONNECTION *psConnection,
++ IMG_UINT32 *puiNumDevices,
++ PVRSRV_DEVICE_IDENTIFIER *puiDevIDs);
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVAcquireDeviceData(IMG_CONST PVRSRV_CONNECTION *psConnection,
++ IMG_UINT32 uiDevIndex,
++ PVRSRV_DEV_DATA *psDevData,
++ PVRSRV_DEVICE_TYPE eDeviceType);
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVGetMiscInfo (IMG_CONST PVRSRV_CONNECTION *psConnection, PVRSRV_MISC_INFO *psMiscInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVReleaseMiscInfo (IMG_CONST PVRSRV_CONNECTION *psConnection, PVRSRV_MISC_INFO *psMiscInfo);
++
++#if 1
++IMG_IMPORT
++IMG_UINT32 ReadHWReg(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset);
++
++IMG_IMPORT
++IMG_VOID WriteHWReg(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT32 ui32Value);
++
++IMG_IMPORT IMG_VOID WriteHWRegs(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Count, PVRSRV_HWREG *psHWRegs);
++#endif
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVPollForValue ( const PVRSRV_CONNECTION *psConnection,
++ IMG_HANDLE hOSEvent,
++ volatile IMG_UINT32 *pui32LinMemAddr,
++ IMG_UINT32 ui32Value,
++ IMG_UINT32 ui32Mask,
++ IMG_UINT32 ui32Waitus,
++ IMG_UINT32 ui32Tries);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVCreateDeviceMemContext(IMG_CONST PVRSRV_DEV_DATA *psDevData,
++ IMG_HANDLE *phDevMemContext,
++ IMG_UINT32 *pui32SharedHeapCount,
++ PVRSRV_HEAP_INFO *psHeapInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVDestroyDeviceMemContext(IMG_CONST PVRSRV_DEV_DATA *psDevData,
++ IMG_HANDLE hDevMemContext);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVGetDeviceMemHeapInfo(IMG_CONST PVRSRV_DEV_DATA *psDevData,
++ IMG_HANDLE hDevMemContext,
++ IMG_UINT32 *pui32SharedHeapCount,
++ PVRSRV_HEAP_INFO *psHeapInfo);
++
++#if defined(PVRSRV_LOG_MEMORY_ALLOCS)
++ #define PVRSRVAllocDeviceMem_log(psDevData, hDevMemHeap, ui32Attribs, ui32Size, ui32Alignment, ppsMemInfo, logStr) \
++ (PVR_TRACE(("PVRSRVAllocDeviceMem(" #psDevData "," #hDevMemHeap "," #ui32Attribs "," #ui32Size "," #ui32Alignment "," #ppsMemInfo ")" \
++ ": " logStr " (size = 0x%lx)", ui32Size)), \
++ PVRSRVAllocDeviceMem(psDevData, hDevMemHeap, ui32Attribs, ui32Size, ui32Alignment, ppsMemInfo))
++#else
++ #define PVRSRVAllocDeviceMem_log(psDevData, hDevMemHeap, ui32Attribs, ui32Size, ui32Alignment, ppsMemInfo, logStr) \
++ PVRSRVAllocDeviceMem(psDevData, hDevMemHeap, ui32Attribs, ui32Size, ui32Alignment, ppsMemInfo)
++#endif
++
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVAllocDeviceMem(IMG_CONST PVRSRV_DEV_DATA *psDevData,
++ IMG_HANDLE hDevMemHeap,
++ IMG_UINT32 ui32Attribs,
++ IMG_SIZE_T ui32Size,
++ IMG_SIZE_T ui32Alignment,
++ PVRSRV_CLIENT_MEM_INFO **ppsMemInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVFreeDeviceMem(IMG_CONST PVRSRV_DEV_DATA *psDevData,
++ PVRSRV_CLIENT_MEM_INFO *psMemInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVExportDeviceMem(IMG_CONST PVRSRV_DEV_DATA *psDevData,
++ PVRSRV_CLIENT_MEM_INFO *psMemInfo,
++ IMG_HANDLE *phMemInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVReserveDeviceVirtualMem(IMG_CONST PVRSRV_DEV_DATA *psDevData,
++ IMG_HANDLE hDevMemHeap,
++ IMG_DEV_VIRTADDR *psDevVAddr,
++ IMG_SIZE_T ui32Size,
++ IMG_SIZE_T ui32Alignment,
++ PVRSRV_CLIENT_MEM_INFO **ppsMemInfo);
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVFreeDeviceVirtualMem(IMG_CONST PVRSRV_DEV_DATA *psDevData,
++ PVRSRV_CLIENT_MEM_INFO *psMemInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVMapDeviceMemory (IMG_CONST PVRSRV_DEV_DATA *psDevData,
++ IMG_HANDLE hKernelMemInfo,
++ IMG_HANDLE hDstDevMemHeap,
++ PVRSRV_CLIENT_MEM_INFO **ppsDstMemInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVUnmapDeviceMemory (IMG_CONST PVRSRV_DEV_DATA *psDevData,
++ PVRSRV_CLIENT_MEM_INFO *psMemInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVMapExtMemory (IMG_CONST PVRSRV_DEV_DATA *psDevData,
++ PVRSRV_CLIENT_MEM_INFO *psMemInfo,
++ IMG_SYS_PHYADDR *psSysPAddr,
++ IMG_UINT32 ui32Flags);
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVUnmapExtMemory (IMG_CONST PVRSRV_DEV_DATA *psDevData,
++ PVRSRV_CLIENT_MEM_INFO *psMemInfo,
++ IMG_UINT32 ui32Flags);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVWrapExtMemory2(IMG_CONST PVRSRV_DEV_DATA *psDevData,
++ IMG_HANDLE hDevMemContext,
++ IMG_SIZE_T ui32ByteSize,
++ IMG_SIZE_T ui32PageOffset,
++ IMG_BOOL bPhysContig,
++ IMG_SYS_PHYADDR *psSysPAddr,
++ IMG_VOID *pvLinAddr,
++ IMG_UINT32 ui32Flags,
++ PVRSRV_CLIENT_MEM_INFO **ppsMemInfo);
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVWrapExtMemory(IMG_CONST PVRSRV_DEV_DATA *psDevData,
++ IMG_HANDLE hDevMemContext,
++ IMG_SIZE_T ui32ByteSize,
++ IMG_SIZE_T ui32PageOffset,
++ IMG_BOOL bPhysContig,
++ IMG_SYS_PHYADDR *psSysPAddr,
++ IMG_VOID *pvLinAddr,
++ PVRSRV_CLIENT_MEM_INFO **ppsMemInfo);
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVUnwrapExtMemory (IMG_CONST PVRSRV_DEV_DATA *psDevData,
++ PVRSRV_CLIENT_MEM_INFO *psMemInfo);
++
++PVRSRV_ERROR PVRSRVChangeDeviceMemoryAttributes(IMG_CONST PVRSRV_DEV_DATA *psDevData,
++ PVRSRV_CLIENT_MEM_INFO *psClientMemInfo,
++ IMG_UINT32 ui32Attribs);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVMapDeviceClassMemory (IMG_CONST PVRSRV_DEV_DATA *psDevData,
++ IMG_HANDLE hDevMemContext,
++ IMG_HANDLE hDeviceClassBuffer,
++ PVRSRV_CLIENT_MEM_INFO **ppsMemInfo);
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVUnmapDeviceClassMemory (IMG_CONST PVRSRV_DEV_DATA *psDevData,
++ PVRSRV_CLIENT_MEM_INFO *psMemInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVMapPhysToUserSpace(IMG_CONST PVRSRV_DEV_DATA *psDevData,
++ IMG_SYS_PHYADDR sSysPhysAddr,
++ IMG_UINT32 uiSizeInBytes,
++ IMG_PVOID *ppvUserAddr,
++ IMG_UINT32 *puiActualSize,
++ IMG_PVOID *ppvProcess);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVUnmapPhysToUserSpace(IMG_CONST PVRSRV_DEV_DATA *psDevData,
++ IMG_PVOID pvUserAddr,
++ IMG_PVOID pvProcess);
++
++typedef enum _PVRSRV_SYNCVAL_MODE_
++{
++ PVRSRV_SYNCVAL_READ = IMG_TRUE,
++ PVRSRV_SYNCVAL_WRITE = IMG_FALSE,
++
++} PVRSRV_SYNCVAL_MODE, *PPVRSRV_SYNCVAL_MODE;
++
++typedef IMG_UINT32 PVRSRV_SYNCVAL;
++
++IMG_IMPORT PVRSRV_ERROR PVRSRVWaitForOpsComplete(PPVRSRV_CLIENT_MEM_INFO psMemInfo,
++ PVRSRV_SYNCVAL_MODE eMode, PVRSRV_SYNCVAL OpRequired);
++
++IMG_IMPORT PVRSRV_ERROR PVRSRVWaitForAllOpsComplete(PPVRSRV_CLIENT_MEM_INFO psMemInfo,
++ PVRSRV_SYNCVAL_MODE eMode);
++
++IMG_IMPORT IMG_BOOL PVRSRVTestOpsComplete(PPVRSRV_CLIENT_MEM_INFO psMemInfo,
++ PVRSRV_SYNCVAL_MODE eMode, PVRSRV_SYNCVAL OpRequired);
++
++IMG_IMPORT IMG_BOOL PVRSRVTestAllOpsComplete(PPVRSRV_CLIENT_MEM_INFO psMemInfo,
++ PVRSRV_SYNCVAL_MODE eMode);
++
++IMG_IMPORT IMG_BOOL PVRSRVTestOpsNotComplete(PPVRSRV_CLIENT_MEM_INFO psMemInfo,
++ PVRSRV_SYNCVAL_MODE eMode, PVRSRV_SYNCVAL OpRequired);
++
++IMG_IMPORT IMG_BOOL PVRSRVTestAllOpsNotComplete(PPVRSRV_CLIENT_MEM_INFO psMemInfo,
++ PVRSRV_SYNCVAL_MODE eMode);
++
++IMG_IMPORT PVRSRV_SYNCVAL PVRSRVGetPendingOpSyncVal(PPVRSRV_CLIENT_MEM_INFO psMemInfo,
++ PVRSRV_SYNCVAL_MODE eMode);
++
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVEnumerateDeviceClass(IMG_CONST PVRSRV_CONNECTION *psConnection,
++ PVRSRV_DEVICE_CLASS DeviceClass,
++ IMG_UINT32 *pui32DevCount,
++ IMG_UINT32 *pui32DevID);
++
++IMG_IMPORT
++IMG_HANDLE IMG_CALLCONV PVRSRVOpenDCDevice(IMG_CONST PVRSRV_DEV_DATA *psDevData,
++ IMG_UINT32 ui32DeviceID);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVCloseDCDevice(IMG_CONST PVRSRV_CONNECTION *psConnection, IMG_HANDLE hDevice);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVEnumDCFormats (IMG_HANDLE hDevice,
++ IMG_UINT32 *pui32Count,
++ DISPLAY_FORMAT *psFormat);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVEnumDCDims (IMG_HANDLE hDevice,
++ IMG_UINT32 *pui32Count,
++ DISPLAY_FORMAT *psFormat,
++ DISPLAY_DIMS *psDims);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVGetDCSystemBuffer(IMG_HANDLE hDevice,
++ IMG_HANDLE *phBuffer);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVGetDCInfo(IMG_HANDLE hDevice,
++ DISPLAY_INFO* psDisplayInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVCreateDCSwapChain (IMG_HANDLE hDevice,
++ IMG_UINT32 ui32Flags,
++ DISPLAY_SURF_ATTRIBUTES *psDstSurfAttrib,
++ DISPLAY_SURF_ATTRIBUTES *psSrcSurfAttrib,
++ IMG_UINT32 ui32BufferCount,
++ IMG_UINT32 ui32OEMFlags,
++ IMG_UINT32 *pui32SwapChainID,
++ IMG_HANDLE *phSwapChain);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVDestroyDCSwapChain (IMG_HANDLE hDevice,
++ IMG_HANDLE hSwapChain);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVSetDCDstRect (IMG_HANDLE hDevice,
++ IMG_HANDLE hSwapChain,
++ IMG_RECT *psDstRect);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVSetDCSrcRect (IMG_HANDLE hDevice,
++ IMG_HANDLE hSwapChain,
++ IMG_RECT *psSrcRect);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVSetDCDstColourKey (IMG_HANDLE hDevice,
++ IMG_HANDLE hSwapChain,
++ IMG_UINT32 ui32CKColour);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVSetDCSrcColourKey (IMG_HANDLE hDevice,
++ IMG_HANDLE hSwapChain,
++ IMG_UINT32 ui32CKColour);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVGetDCBuffers(IMG_HANDLE hDevice,
++ IMG_HANDLE hSwapChain,
++ IMG_HANDLE *phBuffer);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVSwapToDCBuffer (IMG_HANDLE hDevice,
++ IMG_HANDLE hBuffer,
++ IMG_UINT32 ui32ClipRectCount,
++ IMG_RECT *psClipRect,
++ IMG_UINT32 ui32SwapInterval,
++ IMG_HANDLE hPrivateTag);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVSwapToDCSystem (IMG_HANDLE hDevice,
++ IMG_HANDLE hSwapChain);
++
++
++IMG_IMPORT
++IMG_HANDLE IMG_CALLCONV PVRSRVOpenBCDevice(IMG_CONST PVRSRV_DEV_DATA *psDevData,
++ IMG_UINT32 ui32DeviceID);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVCloseBCDevice(IMG_CONST PVRSRV_CONNECTION *psConnection,
++ IMG_HANDLE hDevice);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVGetBCBufferInfo(IMG_HANDLE hDevice,
++ BUFFER_INFO *psBuffer);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVGetBCBuffer(IMG_HANDLE hDevice,
++ IMG_UINT32 ui32BufferIndex,
++ IMG_HANDLE *phBuffer);
++
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpInit(IMG_CONST PVRSRV_CONNECTION *psConnection);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpStartInitPhase(IMG_CONST PVRSRV_CONNECTION *psConnection);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpStopInitPhase(IMG_CONST PVRSRV_CONNECTION *psConnection);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpMemPol(IMG_CONST PVRSRV_CONNECTION *psConnection,
++ PVRSRV_CLIENT_MEM_INFO *psMemInfo,
++ IMG_UINT32 ui32Offset,
++ IMG_UINT32 ui32Value,
++ IMG_UINT32 ui32Mask,
++ IMG_UINT32 ui32Flags);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpSyncPol(IMG_CONST PVRSRV_CONNECTION *psConnection,
++ PVRSRV_CLIENT_SYNC_INFO *psClientSyncInfo,
++ IMG_BOOL bIsRead,
++ IMG_UINT32 ui32Value,
++ IMG_UINT32 ui32Mask);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpMem(IMG_CONST PVRSRV_CONNECTION *psConnection,
++ IMG_PVOID pvAltLinAddr,
++ PVRSRV_CLIENT_MEM_INFO *psMemInfo,
++ IMG_UINT32 ui32Offset,
++ IMG_UINT32 ui32Bytes,
++ IMG_UINT32 ui32Flags);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpSync(IMG_CONST PVRSRV_CONNECTION *psConnection,
++ IMG_PVOID pvAltLinAddr,
++ PVRSRV_CLIENT_SYNC_INFO *psClientSyncInfo,
++ IMG_UINT32 ui32Offset,
++ IMG_UINT32 ui32Bytes);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpReg(IMG_CONST PVRSRV_CONNECTION *psConnection,
++ IMG_UINT32 ui32RegAddr,
++ IMG_UINT32 ui32RegValue,
++ IMG_UINT32 ui32Flags);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpRegPolWithFlags(IMG_CONST PVRSRV_CONNECTION *psConnection,
++ IMG_UINT32 ui32RegAddr,
++ IMG_UINT32 ui32RegValue,
++ IMG_UINT32 ui32Mask,
++ IMG_UINT32 ui32Flags);
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpRegPol(IMG_CONST PVRSRV_CONNECTION *psConnection,
++ IMG_UINT32 ui32RegAddr,
++ IMG_UINT32 ui32RegValue,
++ IMG_UINT32 ui32Mask);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpPDReg(IMG_CONST PVRSRV_CONNECTION *psConnection,
++ IMG_UINT32 ui32RegAddr,
++ IMG_UINT32 ui32RegValue);
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpPDDevPAddr(IMG_CONST PVRSRV_CONNECTION *psConnection,
++ PVRSRV_CLIENT_MEM_INFO *psMemInfo,
++ IMG_UINT32 ui32Offset,
++ IMG_DEV_PHYADDR sPDDevPAddr);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpMemPages(IMG_CONST PVRSRV_CONNECTION *psConnection,
++ IMG_HANDLE hKernelMemInfo,
++ IMG_DEV_PHYADDR *pPages,
++ IMG_UINT32 ui32NumPages,
++ IMG_DEV_VIRTADDR sDevAddr,
++ IMG_UINT32 ui32Start,
++ IMG_UINT32 ui32Length,
++ IMG_BOOL bContinuous);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpSetFrame(IMG_CONST PVRSRV_CONNECTION *psConnection,
++ IMG_UINT32 ui32Frame);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpComment(IMG_CONST PVRSRV_CONNECTION *psConnection,
++ IMG_CONST IMG_CHAR *pszComment,
++ IMG_BOOL bContinuous);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpCommentf(IMG_CONST PVRSRV_CONNECTION *psConnection,
++ IMG_BOOL bContinuous,
++ IMG_CONST IMG_CHAR *pszFormat, ...);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpCommentWithFlagsf(IMG_CONST PVRSRV_CONNECTION *psConnection,
++ IMG_UINT32 ui32Flags,
++ IMG_CONST IMG_CHAR *pszFormat, ...);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpDriverInfo(IMG_CONST PVRSRV_CONNECTION *psConnection,
++ IMG_CHAR *pszString,
++ IMG_BOOL bContinuous);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpIsCapturing(IMG_CONST PVRSRV_CONNECTION *psConnection,
++ IMG_BOOL *pbIsCapturing);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpBitmap(IMG_CONST PVRSRV_CONNECTION *psConnection,
++ IMG_CHAR *pszFileName,
++ IMG_UINT32 ui32FileOffset,
++ IMG_UINT32 ui32Width,
++ IMG_UINT32 ui32Height,
++ IMG_UINT32 ui32StrideInBytes,
++ IMG_DEV_VIRTADDR sDevBaseAddr,
++ IMG_UINT32 ui32Size,
++ PDUMP_PIXEL_FORMAT ePixelFormat,
++ PDUMP_MEM_FORMAT eMemFormat,
++ IMG_UINT32 ui32PDumpFlags);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpRegRead(IMG_CONST PVRSRV_CONNECTION *psConnection,
++ IMG_CONST IMG_CHAR *pszFileName,
++ IMG_UINT32 ui32FileOffset,
++ IMG_UINT32 ui32Address,
++ IMG_UINT32 ui32Size,
++ IMG_UINT32 ui32PDumpFlags);
++
++
++IMG_IMPORT
++IMG_BOOL IMG_CALLCONV PVRSRVPDumpIsCapturingTest(IMG_CONST PVRSRV_CONNECTION *psConnection);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpCycleCountRegRead(IMG_CONST PVRSRV_CONNECTION *psConnection,
++ IMG_UINT32 ui32RegOffset,
++ IMG_BOOL bLastFrame);
++
++IMG_IMPORT IMG_HANDLE PVRSRVLoadLibrary(const IMG_CHAR *pszLibraryName);
++IMG_IMPORT PVRSRV_ERROR PVRSRVUnloadLibrary(IMG_HANDLE hExtDrv);
++IMG_IMPORT PVRSRV_ERROR PVRSRVGetLibFuncAddr(IMG_HANDLE hExtDrv, const IMG_CHAR *pszFunctionName, IMG_VOID **ppvFuncAddr);
++
++IMG_IMPORT IMG_UINT32 PVRSRVClockus (void);
++IMG_IMPORT IMG_VOID PVRSRVWaitus (IMG_UINT32 ui32Timeus);
++IMG_IMPORT IMG_VOID PVRSRVReleaseThreadQuanta (void);
++IMG_IMPORT IMG_UINT32 IMG_CALLCONV PVRSRVGetCurrentProcessID(void);
++IMG_IMPORT IMG_CHAR * IMG_CALLCONV PVRSRVSetLocale(const IMG_CHAR *pszLocale);
++
++
++
++
++
++IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVCreateAppHintState(IMG_MODULE_ID eModuleID,
++ const IMG_CHAR *pszAppName,
++ IMG_VOID **ppvState);
++IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVFreeAppHintState(IMG_MODULE_ID eModuleID,
++ IMG_VOID *pvHintState);
++
++IMG_IMPORT IMG_BOOL IMG_CALLCONV PVRSRVGetAppHint(IMG_VOID *pvHintState,
++ const IMG_CHAR *pszHintName,
++ IMG_DATA_TYPE eDataType,
++ const IMG_VOID *pvDefault,
++ IMG_VOID *pvReturn);
++
++IMG_IMPORT IMG_PVOID IMG_CALLCONV PVRSRVAllocUserModeMem (IMG_SIZE_T ui32Size);
++IMG_IMPORT IMG_PVOID IMG_CALLCONV PVRSRVCallocUserModeMem (IMG_SIZE_T ui32Size);
++IMG_IMPORT IMG_PVOID IMG_CALLCONV PVRSRVReallocUserModeMem (IMG_PVOID pvBase, IMG_SIZE_T uNewSize);
++IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVFreeUserModeMem (IMG_PVOID pvMem);
++IMG_IMPORT IMG_VOID PVRSRVMemCopy(IMG_VOID *pvDst, const IMG_VOID *pvSrc, IMG_SIZE_T ui32Size);
++IMG_IMPORT IMG_VOID PVRSRVMemSet(IMG_VOID *pvDest, IMG_UINT8 ui8Value, IMG_SIZE_T ui32Size);
++
++struct _PVRSRV_MUTEX_OPAQUE_STRUCT_;
++typedef struct _PVRSRV_MUTEX_OPAQUE_STRUCT_ *PVRSRV_MUTEX_HANDLE;
++
++IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV PVRSRVCreateMutex(PVRSRV_MUTEX_HANDLE *phMutex);
++IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV PVRSRVDestroyMutex(PVRSRV_MUTEX_HANDLE hMutex);
++IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVLockMutex(PVRSRV_MUTEX_HANDLE hMutex);
++IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVUnlockMutex(PVRSRV_MUTEX_HANDLE hMutex);
++
++#if (defined(DEBUG) && defined(__linux__))
++IMG_PVOID PVRSRVAllocUserModeMemTracking(IMG_SIZE_T ui32Size, IMG_CHAR *pszFileName, IMG_UINT32 ui32LineNumber);
++IMG_PVOID PVRSRVCallocUserModeMemTracking(IMG_SIZE_T ui32Size, IMG_CHAR *pszFileName, IMG_UINT32 ui32LineNumber);
++IMG_VOID PVRSRVFreeUserModeMemTracking(IMG_VOID *pvMem);
++IMG_PVOID PVRSRVReallocUserModeMemTracking(IMG_VOID *pvMem, IMG_SIZE_T ui32NewSize, IMG_CHAR *pszFileName, IMG_UINT32 ui32LineNumber);
++#endif
++
++IMG_IMPORT PVRSRV_ERROR PVRSRVEventObjectWait(const PVRSRV_CONNECTION *psConnection,
++ IMG_HANDLE hOSEvent);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVModifyPendingSyncOps(PVRSRV_CONNECTION *psConnection,
++ IMG_HANDLE hKernelSyncInfo,
++ IMG_UINT32 ui32ModifyFlags,
++ IMG_UINT32 *pui32ReadOpsPending,
++ IMG_UINT32 *pui32WriteOpsPending);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVModifyCompleteSyncOps(PVRSRV_CONNECTION *psConnection,
++ IMG_HANDLE hKernelSyncInfo,
++ IMG_UINT32 ui32ModifyFlags);
++
++
++#define TIME_NOT_PASSED_UINT32(a,b,c) ((a - b) < c)
++
++#if defined (__cplusplus)
++}
++#endif
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/include4/servicesext.h b/drivers/gpu/drm/mrst/pvr/include4/servicesext.h
+new file mode 100644
+index 0000000..4bfb75c
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/include4/servicesext.h
+@@ -0,0 +1,648 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined (__SERVICESEXT_H__)
++#define __SERVICESEXT_H__
++
++#define PVRSRV_LOCKFLG_READONLY (1)
++
++typedef enum _PVRSRV_ERROR_
++{
++ PVRSRV_OK = 0,
++ PVRSRV_ERROR_GENERIC = 1,
++ PVRSRV_ERROR_OUT_OF_MEMORY = 2,
++ PVRSRV_ERROR_TOO_FEW_BUFFERS = 3,
++ PVRSRV_ERROR_SYMBOL_NOT_FOUND = 4,
++ PVRSRV_ERROR_OUT_OF_HSPACE = 5,
++ PVRSRV_ERROR_INVALID_PARAMS = 6,
++ PVRSRV_ERROR_TILE_MAP_FAILED = 7,
++ PVRSRV_ERROR_INIT_FAILURE = 8,
++ PVRSRV_ERROR_CANT_REGISTER_CALLBACK = 9,
++ PVRSRV_ERROR_INVALID_DEVICE = 10,
++ PVRSRV_ERROR_NOT_OWNER = 11,
++ PVRSRV_ERROR_BAD_MAPPING = 12,
++ PVRSRV_ERROR_TIMEOUT = 13,
++ PVRSRV_ERROR_NO_PRIMARY = 14,
++ PVRSRV_ERROR_FLIP_CHAIN_EXISTS = 15,
++ PVRSRV_ERROR_CANNOT_ACQUIRE_SYSDATA = 16,
++ PVRSRV_ERROR_SCENE_INVALID = 17,
++ PVRSRV_ERROR_STREAM_ERROR = 18,
++ PVRSRV_ERROR_INVALID_INTERRUPT = 19,
++ PVRSRV_ERROR_FAILED_DEPENDENCIES = 20,
++ PVRSRV_ERROR_CMD_NOT_PROCESSED = 21,
++ PVRSRV_ERROR_CMD_TOO_BIG = 22,
++ PVRSRV_ERROR_DEVICE_REGISTER_FAILED = 23,
++ PVRSRV_ERROR_FIFO_SPACE = 24,
++ PVRSRV_ERROR_TA_RECOVERY = 25,
++ PVRSRV_ERROR_INDOSORLOWPOWER = 26,
++ PVRSRV_ERROR_TOOMANYBUFFERS = 27,
++ PVRSRV_ERROR_NOT_SUPPORTED = 28,
++ PVRSRV_ERROR_PROCESSING_BLOCKED = 29,
++
++
++ PVRSRV_ERROR_CANNOT_FLUSH_QUEUE = 31,
++ PVRSRV_ERROR_CANNOT_GET_QUEUE_SPACE = 32,
++ PVRSRV_ERROR_CANNOT_GET_RENDERDETAILS = 33,
++ PVRSRV_ERROR_RETRY = 34,
++
++ PVRSRV_ERROR_DDK_VERSION_MISMATCH = 35,
++ PVRSRV_ERROR_BUILD_MISMATCH = 36,
++ PVRSRV_ERROR_PDUMP_BUF_OVERFLOW,
++
++ PVRSRV_ERROR_FORCE_I32 = 0x7fffffff
++
++} PVRSRV_ERROR;
++
++
++typedef enum _PVRSRV_DEVICE_CLASS_
++{
++ PVRSRV_DEVICE_CLASS_3D = 0 ,
++ PVRSRV_DEVICE_CLASS_DISPLAY = 1 ,
++ PVRSRV_DEVICE_CLASS_BUFFER = 2 ,
++ PVRSRV_DEVICE_CLASS_VIDEO = 3 ,
++
++ PVRSRV_DEVICE_CLASS_FORCE_I32 = 0x7fffffff
++
++} PVRSRV_DEVICE_CLASS;
++
++
++
++typedef enum _PVRSRV_SYS_POWER_STATE_
++{
++ PVRSRV_SYS_POWER_STATE_Unspecified = -1,
++ PVRSRV_SYS_POWER_STATE_D0 = 0,
++ PVRSRV_SYS_POWER_STATE_D1 = 1,
++ PVRSRV_SYS_POWER_STATE_D2 = 2,
++ PVRSRV_SYS_POWER_STATE_D3 = 3,
++ PVRSRV_SYS_POWER_STATE_D4 = 4,
++
++ PVRSRV_SYS_POWER_STATE_FORCE_I32 = 0x7fffffff
++
++} PVRSRV_SYS_POWER_STATE, *PPVRSRV_SYS_POWER_STATE;
++
++
++typedef enum _PVRSRV_DEV_POWER_STATE_
++{
++ PVRSRV_DEV_POWER_STATE_DEFAULT = -1,
++ PVRSRV_DEV_POWER_STATE_ON = 0,
++ PVRSRV_DEV_POWER_STATE_IDLE = 1,
++ PVRSRV_DEV_POWER_STATE_OFF = 2,
++
++ PVRSRV_DEV_POWER_STATE_FORCE_I32 = 0x7fffffff
++
++} PVRSRV_DEV_POWER_STATE, *PPVRSRV_DEV_POWER_STATE;
++
++
++typedef PVRSRV_ERROR (*PFN_PRE_POWER) (IMG_HANDLE hDevHandle,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
++typedef PVRSRV_ERROR (*PFN_POST_POWER) (IMG_HANDLE hDevHandle,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
++
++typedef PVRSRV_ERROR (*PFN_PRE_CLOCKSPEED_CHANGE) (IMG_HANDLE hDevHandle,
++ IMG_BOOL bIdleDevice,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
++typedef PVRSRV_ERROR (*PFN_POST_CLOCKSPEED_CHANGE) (IMG_HANDLE hDevHandle,
++ IMG_BOOL bIdleDevice,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
++
++
++typedef enum _PVRSRV_PIXEL_FORMAT_ {
++
++ PVRSRV_PIXEL_FORMAT_UNKNOWN = 0,
++ PVRSRV_PIXEL_FORMAT_RGB565 = 1,
++ PVRSRV_PIXEL_FORMAT_RGB555 = 2,
++ PVRSRV_PIXEL_FORMAT_RGB888 = 3,
++ PVRSRV_PIXEL_FORMAT_BGR888 = 4,
++ PVRSRV_PIXEL_FORMAT_GREY_SCALE = 8,
++ PVRSRV_PIXEL_FORMAT_PAL12 = 13,
++ PVRSRV_PIXEL_FORMAT_PAL8 = 14,
++ PVRSRV_PIXEL_FORMAT_PAL4 = 15,
++ PVRSRV_PIXEL_FORMAT_PAL2 = 16,
++ PVRSRV_PIXEL_FORMAT_PAL1 = 17,
++ PVRSRV_PIXEL_FORMAT_ARGB1555 = 18,
++ PVRSRV_PIXEL_FORMAT_ARGB4444 = 19,
++ PVRSRV_PIXEL_FORMAT_ARGB8888 = 20,
++ PVRSRV_PIXEL_FORMAT_ABGR8888 = 21,
++ PVRSRV_PIXEL_FORMAT_YV12 = 22,
++ PVRSRV_PIXEL_FORMAT_I420 = 23,
++ PVRSRV_PIXEL_FORMAT_IMC2 = 25,
++ PVRSRV_PIXEL_FORMAT_XRGB8888,
++ PVRSRV_PIXEL_FORMAT_XBGR8888,
++ PVRSRV_PIXEL_FORMAT_BGRA8888,
++ PVRSRV_PIXEL_FORMAT_XRGB4444,
++ PVRSRV_PIXEL_FORMAT_ARGB8332,
++ PVRSRV_PIXEL_FORMAT_A2RGB10,
++ PVRSRV_PIXEL_FORMAT_A2BGR10,
++ PVRSRV_PIXEL_FORMAT_P8,
++ PVRSRV_PIXEL_FORMAT_L8,
++ PVRSRV_PIXEL_FORMAT_A8L8,
++ PVRSRV_PIXEL_FORMAT_A4L4,
++ PVRSRV_PIXEL_FORMAT_L16,
++ PVRSRV_PIXEL_FORMAT_L6V5U5,
++ PVRSRV_PIXEL_FORMAT_V8U8,
++ PVRSRV_PIXEL_FORMAT_V16U16,
++ PVRSRV_PIXEL_FORMAT_QWVU8888,
++ PVRSRV_PIXEL_FORMAT_XLVU8888,
++ PVRSRV_PIXEL_FORMAT_QWVU16,
++ PVRSRV_PIXEL_FORMAT_D16,
++ PVRSRV_PIXEL_FORMAT_D24S8,
++ PVRSRV_PIXEL_FORMAT_D24X8,
++
++
++ PVRSRV_PIXEL_FORMAT_ABGR16,
++ PVRSRV_PIXEL_FORMAT_ABGR16F,
++ PVRSRV_PIXEL_FORMAT_ABGR32,
++ PVRSRV_PIXEL_FORMAT_ABGR32F,
++ PVRSRV_PIXEL_FORMAT_B10GR11,
++ PVRSRV_PIXEL_FORMAT_GR88,
++ PVRSRV_PIXEL_FORMAT_BGR32,
++ PVRSRV_PIXEL_FORMAT_GR32,
++ PVRSRV_PIXEL_FORMAT_E5BGR9,
++
++
++ PVRSRV_PIXEL_FORMAT_DXT1,
++ PVRSRV_PIXEL_FORMAT_DXT2,
++ PVRSRV_PIXEL_FORMAT_DXT3,
++ PVRSRV_PIXEL_FORMAT_DXT4,
++ PVRSRV_PIXEL_FORMAT_DXT5,
++
++
++ PVRSRV_PIXEL_FORMAT_R8G8_B8G8,
++ PVRSRV_PIXEL_FORMAT_G8R8_G8B8,
++
++
++ PVRSRV_PIXEL_FORMAT_NV11,
++ PVRSRV_PIXEL_FORMAT_NV12,
++
++
++ PVRSRV_PIXEL_FORMAT_YUY2,
++ PVRSRV_PIXEL_FORMAT_YUV420,
++ PVRSRV_PIXEL_FORMAT_YUV444,
++ PVRSRV_PIXEL_FORMAT_VUY444,
++ PVRSRV_PIXEL_FORMAT_YUYV,
++ PVRSRV_PIXEL_FORMAT_YVYU,
++ PVRSRV_PIXEL_FORMAT_UYVY,
++ PVRSRV_PIXEL_FORMAT_VYUY,
++
++ PVRSRV_PIXEL_FORMAT_FOURCC_ORG_UYVY,
++ PVRSRV_PIXEL_FORMAT_FOURCC_ORG_YUYV,
++ PVRSRV_PIXEL_FORMAT_FOURCC_ORG_YVYU,
++ PVRSRV_PIXEL_FORMAT_FOURCC_ORG_VYUY,
++ PVRSRV_PIXEL_FORMAT_FOURCC_ORG_AYUV,
++
++
++ PVRSRV_PIXEL_FORMAT_A32B32G32R32,
++ PVRSRV_PIXEL_FORMAT_A32B32G32R32F,
++ PVRSRV_PIXEL_FORMAT_A32B32G32R32_UINT,
++ PVRSRV_PIXEL_FORMAT_A32B32G32R32_SINT,
++
++
++ PVRSRV_PIXEL_FORMAT_B32G32R32,
++ PVRSRV_PIXEL_FORMAT_B32G32R32F,
++ PVRSRV_PIXEL_FORMAT_B32G32R32_UINT,
++ PVRSRV_PIXEL_FORMAT_B32G32R32_SINT,
++
++
++ PVRSRV_PIXEL_FORMAT_G32R32,
++ PVRSRV_PIXEL_FORMAT_G32R32F,
++ PVRSRV_PIXEL_FORMAT_G32R32_UINT,
++ PVRSRV_PIXEL_FORMAT_G32R32_SINT,
++
++
++ PVRSRV_PIXEL_FORMAT_D32F,
++ PVRSRV_PIXEL_FORMAT_R32,
++ PVRSRV_PIXEL_FORMAT_R32F,
++ PVRSRV_PIXEL_FORMAT_R32_UINT,
++ PVRSRV_PIXEL_FORMAT_R32_SINT,
++
++
++ PVRSRV_PIXEL_FORMAT_A16B16G16R16,
++ PVRSRV_PIXEL_FORMAT_A16B16G16R16F,
++ PVRSRV_PIXEL_FORMAT_A16B16G16R16_SINT,
++ PVRSRV_PIXEL_FORMAT_A16B16G16R16_SNORM,
++ PVRSRV_PIXEL_FORMAT_A16B16G16R16_UINT,
++ PVRSRV_PIXEL_FORMAT_A16B16G16R16_UNORM,
++
++
++ PVRSRV_PIXEL_FORMAT_G16R16,
++ PVRSRV_PIXEL_FORMAT_G16R16F,
++ PVRSRV_PIXEL_FORMAT_G16R16_UINT,
++ PVRSRV_PIXEL_FORMAT_G16R16_UNORM,
++ PVRSRV_PIXEL_FORMAT_G16R16_SINT,
++ PVRSRV_PIXEL_FORMAT_G16R16_SNORM,
++
++
++ PVRSRV_PIXEL_FORMAT_R16,
++ PVRSRV_PIXEL_FORMAT_R16F,
++ PVRSRV_PIXEL_FORMAT_R16_UINT,
++ PVRSRV_PIXEL_FORMAT_R16_UNORM,
++ PVRSRV_PIXEL_FORMAT_R16_SINT,
++ PVRSRV_PIXEL_FORMAT_R16_SNORM,
++
++
++ PVRSRV_PIXEL_FORMAT_X8R8G8B8,
++ PVRSRV_PIXEL_FORMAT_X8R8G8B8_UNORM,
++ PVRSRV_PIXEL_FORMAT_X8R8G8B8_UNORM_SRGB,
++
++ PVRSRV_PIXEL_FORMAT_A8R8G8B8,
++ PVRSRV_PIXEL_FORMAT_A8R8G8B8_UNORM,
++ PVRSRV_PIXEL_FORMAT_A8R8G8B8_UNORM_SRGB,
++
++ PVRSRV_PIXEL_FORMAT_A8B8G8R8,
++ PVRSRV_PIXEL_FORMAT_A8B8G8R8_UINT,
++ PVRSRV_PIXEL_FORMAT_A8B8G8R8_UNORM,
++ PVRSRV_PIXEL_FORMAT_A8B8G8R8_UNORM_SRGB,
++ PVRSRV_PIXEL_FORMAT_A8B8G8R8_SINT,
++ PVRSRV_PIXEL_FORMAT_A8B8G8R8_SNORM,
++
++
++ PVRSRV_PIXEL_FORMAT_G8R8,
++ PVRSRV_PIXEL_FORMAT_G8R8_UINT,
++ PVRSRV_PIXEL_FORMAT_G8R8_UNORM,
++ PVRSRV_PIXEL_FORMAT_G8R8_SINT,
++ PVRSRV_PIXEL_FORMAT_G8R8_SNORM,
++
++
++ PVRSRV_PIXEL_FORMAT_A8,
++ PVRSRV_PIXEL_FORMAT_R8,
++ PVRSRV_PIXEL_FORMAT_R8_UINT,
++ PVRSRV_PIXEL_FORMAT_R8_UNORM,
++ PVRSRV_PIXEL_FORMAT_R8_SINT,
++ PVRSRV_PIXEL_FORMAT_R8_SNORM,
++
++
++ PVRSRV_PIXEL_FORMAT_A2B10G10R10,
++ PVRSRV_PIXEL_FORMAT_A2B10G10R10_UNORM,
++ PVRSRV_PIXEL_FORMAT_A2B10G10R10_UINT,
++
++
++ PVRSRV_PIXEL_FORMAT_B10G11R11,
++ PVRSRV_PIXEL_FORMAT_B10G11R11F,
++
++
++ PVRSRV_PIXEL_FORMAT_X24G8R32,
++ PVRSRV_PIXEL_FORMAT_G8R24,
++ PVRSRV_PIXEL_FORMAT_X8R24,
++ PVRSRV_PIXEL_FORMAT_E5B9G9R9,
++ PVRSRV_PIXEL_FORMAT_R1,
++
++ PVRSRV_PIXEL_FORMAT_BC1,
++ PVRSRV_PIXEL_FORMAT_BC1_UNORM,
++ PVRSRV_PIXEL_FORMAT_BC1_SRGB,
++ PVRSRV_PIXEL_FORMAT_BC2,
++ PVRSRV_PIXEL_FORMAT_BC2_UNORM,
++ PVRSRV_PIXEL_FORMAT_BC2_SRGB,
++ PVRSRV_PIXEL_FORMAT_BC3,
++ PVRSRV_PIXEL_FORMAT_BC3_UNORM,
++ PVRSRV_PIXEL_FORMAT_BC3_SRGB,
++ PVRSRV_PIXEL_FORMAT_BC4,
++ PVRSRV_PIXEL_FORMAT_BC4_UNORM,
++ PVRSRV_PIXEL_FORMAT_BC4_SNORM,
++ PVRSRV_PIXEL_FORMAT_BC5,
++ PVRSRV_PIXEL_FORMAT_BC5_UNORM,
++ PVRSRV_PIXEL_FORMAT_BC5_SNORM,
++
++
++ PVRSRV_PIXEL_FORMAT_L_F16,
++ PVRSRV_PIXEL_FORMAT_L_F16_REP,
++ PVRSRV_PIXEL_FORMAT_L_F16_A_F16,
++ PVRSRV_PIXEL_FORMAT_A_F16,
++ PVRSRV_PIXEL_FORMAT_B16G16R16F,
++
++ PVRSRV_PIXEL_FORMAT_L_F32,
++ PVRSRV_PIXEL_FORMAT_A_F32,
++ PVRSRV_PIXEL_FORMAT_L_F32_A_F32,
++
++
++ PVRSRV_PIXEL_FORMAT_PVRTC2,
++ PVRSRV_PIXEL_FORMAT_PVRTC4,
++ PVRSRV_PIXEL_FORMAT_PVRTCII2,
++ PVRSRV_PIXEL_FORMAT_PVRTCII4,
++ PVRSRV_PIXEL_FORMAT_PVRTCIII,
++ PVRSRV_PIXEL_FORMAT_PVRO8,
++ PVRSRV_PIXEL_FORMAT_PVRO88,
++ PVRSRV_PIXEL_FORMAT_PT1,
++ PVRSRV_PIXEL_FORMAT_PT2,
++ PVRSRV_PIXEL_FORMAT_PT4,
++ PVRSRV_PIXEL_FORMAT_PT8,
++ PVRSRV_PIXEL_FORMAT_PTW,
++ PVRSRV_PIXEL_FORMAT_PTB,
++ PVRSRV_PIXEL_FORMAT_MONO8,
++ PVRSRV_PIXEL_FORMAT_MONO16,
++
++
++ PVRSRV_PIXEL_FORMAT_C0_YUYV,
++ PVRSRV_PIXEL_FORMAT_C0_UYVY,
++ PVRSRV_PIXEL_FORMAT_C0_YVYU,
++ PVRSRV_PIXEL_FORMAT_C0_VYUY,
++ PVRSRV_PIXEL_FORMAT_C1_YUYV,
++ PVRSRV_PIXEL_FORMAT_C1_UYVY,
++ PVRSRV_PIXEL_FORMAT_C1_YVYU,
++ PVRSRV_PIXEL_FORMAT_C1_VYUY,
++
++
++ PVRSRV_PIXEL_FORMAT_C0_YUV420_2P_UV,
++ PVRSRV_PIXEL_FORMAT_C0_YUV420_2P_VU,
++ PVRSRV_PIXEL_FORMAT_C0_YUV420_3P,
++ PVRSRV_PIXEL_FORMAT_C1_YUV420_2P_UV,
++ PVRSRV_PIXEL_FORMAT_C1_YUV420_2P_VU,
++ PVRSRV_PIXEL_FORMAT_C1_YUV420_3P,
++
++ PVRSRV_PIXEL_FORMAT_A2B10G10R10F,
++ PVRSRV_PIXEL_FORMAT_B8G8R8_SINT,
++ PVRSRV_PIXEL_FORMAT_PVRF32SIGNMASK,
++
++ PVRSRV_PIXEL_FORMAT_FORCE_I32 = 0x7fffffff,
++} PVRSRV_PIXEL_FORMAT;
++
++typedef enum _PVRSRV_ALPHA_FORMAT_ {
++ PVRSRV_ALPHA_FORMAT_UNKNOWN = 0x00000000,
++ PVRSRV_ALPHA_FORMAT_PRE = 0x00000001,
++ PVRSRV_ALPHA_FORMAT_NONPRE = 0x00000002,
++ PVRSRV_ALPHA_FORMAT_MASK = 0x0000000F,
++} PVRSRV_ALPHA_FORMAT;
++
++typedef enum _PVRSRV_COLOURSPACE_FORMAT_ {
++ PVRSRV_COLOURSPACE_FORMAT_UNKNOWN = 0x00000000,
++ PVRSRV_COLOURSPACE_FORMAT_LINEAR = 0x00010000,
++ PVRSRV_COLOURSPACE_FORMAT_NONLINEAR = 0x00020000,
++ PVRSRV_COLOURSPACE_FORMAT_MASK = 0x000F0000,
++} PVRSRV_COLOURSPACE_FORMAT;
++
++
++typedef enum _PVRSRV_ROTATION_ {
++ PVRSRV_ROTATE_0 = 0,
++ PVRSRV_ROTATE_90 = 1,
++ PVRSRV_ROTATE_180 = 2,
++ PVRSRV_ROTATE_270 = 3,
++ PVRSRV_FLIP_Y
++
++} PVRSRV_ROTATION;
++
++#define PVRSRV_CREATE_SWAPCHAIN_SHARED (1<<0)
++#define PVRSRV_CREATE_SWAPCHAIN_QUERY (1<<1)
++#define PVRSRV_CREATE_SWAPCHAIN_OEMOVERLAY (1<<2)
++
++typedef struct _PVRSRV_SYNC_DATA_
++{
++
++ IMG_UINT32 ui32WriteOpsPending;
++ volatile IMG_UINT32 ui32WriteOpsComplete;
++
++
++ IMG_UINT32 ui32ReadOpsPending;
++ volatile IMG_UINT32 ui32ReadOpsComplete;
++
++
++ IMG_UINT32 ui32LastOpDumpVal;
++ IMG_UINT32 ui32LastReadOpDumpVal;
++
++} PVRSRV_SYNC_DATA;
++
++typedef struct _PVRSRV_CLIENT_SYNC_INFO_
++{
++
++ PVRSRV_SYNC_DATA *psSyncData;
++
++
++
++
++
++ IMG_DEV_VIRTADDR sWriteOpsCompleteDevVAddr;
++
++
++ IMG_DEV_VIRTADDR sReadOpsCompleteDevVAddr;
++
++
++ IMG_HANDLE hMappingInfo;
++
++
++ IMG_HANDLE hKernelSyncInfo;
++
++} PVRSRV_CLIENT_SYNC_INFO, *PPVRSRV_CLIENT_SYNC_INFO;
++
++
++typedef struct PVRSRV_RESOURCE_TAG
++{
++ volatile IMG_UINT32 ui32Lock;
++ IMG_UINT32 ui32ID;
++}PVRSRV_RESOURCE;
++typedef PVRSRV_RESOURCE PVRSRV_RES_HANDLE;
++
++
++typedef IMG_VOID (*PFN_CMD_COMPLETE) (IMG_HANDLE);
++typedef IMG_VOID (**PPFN_CMD_COMPLETE) (IMG_HANDLE);
++
++typedef IMG_BOOL (*PFN_CMD_PROC) (IMG_HANDLE, IMG_UINT32, IMG_VOID*);
++typedef IMG_BOOL (**PPFN_CMD_PROC) (IMG_HANDLE, IMG_UINT32, IMG_VOID*);
++
++
++typedef struct _IMG_RECT_
++{
++ IMG_INT32 x0;
++ IMG_INT32 y0;
++ IMG_INT32 x1;
++ IMG_INT32 y1;
++}IMG_RECT;
++
++typedef struct _IMG_RECT_16_
++{
++ IMG_INT16 x0;
++ IMG_INT16 y0;
++ IMG_INT16 x1;
++ IMG_INT16 y1;
++}IMG_RECT_16;
++
++
++typedef PVRSRV_ERROR (*PFN_GET_BUFFER_ADDR)(IMG_HANDLE,
++ IMG_HANDLE,
++ IMG_SYS_PHYADDR**,
++ IMG_SIZE_T*,
++ IMG_VOID**,
++ IMG_HANDLE*,
++ IMG_BOOL*);
++
++
++typedef struct DISPLAY_DIMS_TAG
++{
++ IMG_UINT32 ui32ByteStride;
++ IMG_UINT32 ui32Width;
++ IMG_UINT32 ui32Height;
++} DISPLAY_DIMS;
++
++
++typedef struct DISPLAY_FORMAT_TAG
++{
++
++ PVRSRV_PIXEL_FORMAT pixelformat;
++} DISPLAY_FORMAT;
++
++typedef struct DISPLAY_SURF_ATTRIBUTES_TAG
++{
++
++ PVRSRV_PIXEL_FORMAT pixelformat;
++
++ DISPLAY_DIMS sDims;
++} DISPLAY_SURF_ATTRIBUTES;
++
++
++typedef struct DISPLAY_MODE_INFO_TAG
++{
++
++ PVRSRV_PIXEL_FORMAT pixelformat;
++
++ DISPLAY_DIMS sDims;
++
++ IMG_UINT32 ui32RefreshHZ;
++
++ IMG_UINT32 ui32OEMFlags;
++} DISPLAY_MODE_INFO;
++
++
++
++#define MAX_DISPLAY_NAME_SIZE (50)
++
++typedef struct DISPLAY_INFO_TAG
++{
++ IMG_UINT32 ui32MaxSwapChains;
++
++ IMG_UINT32 ui32MaxSwapChainBuffers;
++
++ IMG_UINT32 ui32MinSwapInterval;
++
++ IMG_UINT32 ui32MaxSwapInterval;
++
++ IMG_UINT32 ui32PhysicalWidthmm;
++ IMG_UINT32 ui32PhysicalHeightmm;
++
++ IMG_CHAR szDisplayName[MAX_DISPLAY_NAME_SIZE];
++
++#if defined(SUPPORT_HW_CURSOR)
++ IMG_UINT16 ui32CursorWidth;
++ IMG_UINT16 ui32CursorHeight;
++#endif
++
++} DISPLAY_INFO;
++
++typedef struct ACCESS_INFO_TAG
++{
++ IMG_UINT32 ui32Size;
++ IMG_UINT32 ui32FBPhysBaseAddress;
++ IMG_UINT32 ui32FBMemAvailable;
++ IMG_UINT32 ui32SysPhysBaseAddress;
++ IMG_UINT32 ui32SysSize;
++ IMG_UINT32 ui32DevIRQ;
++}ACCESS_INFO;
++
++
++typedef struct PVRSRV_CURSOR_SHAPE_TAG
++{
++ IMG_UINT16 ui16Width;
++ IMG_UINT16 ui16Height;
++ IMG_INT16 i16XHot;
++ IMG_INT16 i16YHot;
++
++
++ IMG_VOID* pvMask;
++ IMG_INT16 i16MaskByteStride;
++
++
++ IMG_VOID* pvColour;
++ IMG_INT16 i16ColourByteStride;
++ PVRSRV_PIXEL_FORMAT eColourPixelFormat;
++} PVRSRV_CURSOR_SHAPE;
++
++#define PVRSRV_SET_CURSOR_VISIBILITY (1<<0)
++#define PVRSRV_SET_CURSOR_POSITION (1<<1)
++#define PVRSRV_SET_CURSOR_SHAPE (1<<2)
++#define PVRSRV_SET_CURSOR_ROTATION (1<<3)
++
++typedef struct PVRSRV_CURSOR_INFO_TAG
++{
++
++ IMG_UINT32 ui32Flags;
++
++
++ IMG_BOOL bVisible;
++
++
++ IMG_INT16 i16XPos;
++ IMG_INT16 i16YPos;
++
++
++ PVRSRV_CURSOR_SHAPE sCursorShape;
++
++
++ IMG_UINT32 ui32Rotation;
++
++} PVRSRV_CURSOR_INFO;
++
++
++typedef struct _PVRSRV_REGISTRY_INFO_
++{
++ IMG_UINT32 ui32DevCookie;
++ IMG_PCHAR pszKey;
++ IMG_PCHAR pszValue;
++ IMG_PCHAR pszBuf;
++ IMG_UINT32 ui32BufSize;
++} PVRSRV_REGISTRY_INFO, *PPVRSRV_REGISTRY_INFO;
++
++
++PVRSRV_ERROR IMG_CALLCONV PVRSRVReadRegistryString (PPVRSRV_REGISTRY_INFO psRegInfo);
++PVRSRV_ERROR IMG_CALLCONV PVRSRVWriteRegistryString (PPVRSRV_REGISTRY_INFO psRegInfo);
++
++
++#define PVRSRV_BC_FLAGS_YUVCSC_CONFORMANT_RANGE (0 << 0)
++#define PVRSRV_BC_FLAGS_YUVCSC_FULL_RANGE (1 << 0)
++
++#define PVRSRV_BC_FLAGS_YUVCSC_BT601 (0 << 1)
++#define PVRSRV_BC_FLAGS_YUVCSC_BT709 (1 << 1)
++
++#define MAX_BUFFER_DEVICE_NAME_SIZE (50)
++
++typedef struct BUFFER_INFO_TAG
++{
++ IMG_UINT32 ui32BufferCount;
++ IMG_UINT32 ui32BufferDeviceID;
++ PVRSRV_PIXEL_FORMAT pixelformat;
++ IMG_UINT32 ui32ByteStride;
++ IMG_UINT32 ui32Width;
++ IMG_UINT32 ui32Height;
++ IMG_UINT32 ui32Flags;
++ IMG_CHAR szDeviceName[MAX_BUFFER_DEVICE_NAME_SIZE];
++} BUFFER_INFO;
++
++typedef enum _OVERLAY_DEINTERLACE_MODE_
++{
++ WEAVE=0x0,
++ BOB_ODD,
++ BOB_EVEN,
++ BOB_EVEN_NONINTERLEAVED
++} OVERLAY_DEINTERLACE_MODE;
++
++#endif
+diff --git a/drivers/gpu/drm/mrst/pvr/include4/sgx_options.h b/drivers/gpu/drm/mrst/pvr/include4/sgx_options.h
+new file mode 100644
+index 0000000..69dd25a
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/include4/sgx_options.h
+@@ -0,0 +1,224 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if defined(DEBUG) || defined (INTERNAL_TEST)
++#define DEBUG_SET_OFFSET OPTIONS_BIT0
++#define OPTIONS_BIT0 0x1
++#else
++#define OPTIONS_BIT0 0x0
++#endif
++
++#if defined(PDUMP) || defined (INTERNAL_TEST)
++#define PDUMP_SET_OFFSET OPTIONS_BIT1
++#define OPTIONS_BIT1 (0x1 << 1)
++#else
++#define OPTIONS_BIT1 0x0
++#endif
++
++#if defined(PVRSRV_USSE_EDM_STATUS_DEBUG) || defined (INTERNAL_TEST)
++#define PVRSRV_USSE_EDM_STATUS_DEBUG_SET_OFFSET OPTIONS_BIT2
++#define OPTIONS_BIT2 (0x1 << 2)
++#else
++#define OPTIONS_BIT2 0x0
++#endif
++
++#if defined(SUPPORT_HW_RECOVERY) || defined (INTERNAL_TEST)
++#define SUPPORT_HW_RECOVERY_SET_OFFSET OPTIONS_BIT3
++#define OPTIONS_BIT3 (0x1 << 3)
++#else
++#define OPTIONS_BIT3 0x0
++#endif
++
++
++
++#if defined(PVR_SECURE_HANDLES) || defined (INTERNAL_TEST)
++#define PVR_SECURE_HANDLES_SET_OFFSET OPTIONS_BIT4
++#define OPTIONS_BIT4 (0x1 << 4)
++#else
++#define OPTIONS_BIT4 0x0
++#endif
++
++#if defined(SGX_BYPASS_SYSTEM_CACHE) || defined (INTERNAL_TEST)
++#define SGX_BYPASS_SYSTEM_CACHE_SET_OFFSET OPTIONS_BIT5
++#define OPTIONS_BIT5 (0x1 << 5)
++#else
++#define OPTIONS_BIT5 0x0
++#endif
++
++#if defined(SGX_DMS_AGE_ENABLE) || defined (INTERNAL_TEST)
++#define SGX_DMS_AGE_ENABLE_SET_OFFSET OPTIONS_BIT6
++#define OPTIONS_BIT6 (0x1 << 6)
++#else
++#define OPTIONS_BIT6 0x0
++#endif
++
++#if defined(SGX_FAST_DPM_INIT) || defined (INTERNAL_TEST)
++#define SGX_FAST_DPM_INIT_SET_OFFSET OPTIONS_BIT8
++#define OPTIONS_BIT8 (0x1 << 8)
++#else
++#define OPTIONS_BIT8 0x0
++#endif
++
++#if defined(SGX_FEATURE_DCU) || defined (INTERNAL_TEST)
++#define SGX_FEATURE_DCU_SET_OFFSET OPTIONS_BIT9
++#define OPTIONS_BIT9 (0x1 << 9)
++#else
++#define OPTIONS_BIT9 0x0
++#endif
++
++#if defined(SGX_FEATURE_MP) || defined (INTERNAL_TEST)
++#define SGX_FEATURE_MP_SET_OFFSET OPTIONS_BIT10
++#define OPTIONS_BIT10 (0x1 << 10)
++#else
++#define OPTIONS_BIT10 0x0
++#endif
++
++#if defined(SGX_FEATURE_MULTITHREADED_UKERNEL) || defined (INTERNAL_TEST)
++#define SGX_FEATURE_MULTITHREADED_UKERNEL_SET_OFFSET OPTIONS_BIT11
++#define OPTIONS_BIT11 (0x1 << 11)
++#else
++#define OPTIONS_BIT11 0x0
++#endif
++
++
++
++#if defined(SGX_FEATURE_OVERLAPPED_SPM) || defined (INTERNAL_TEST)
++#define SGX_FEATURE_OVERLAPPED_SPM_SET_OFFSET OPTIONS_BIT12
++#define OPTIONS_BIT12 (0x1 << 12)
++#else
++#define OPTIONS_BIT12 0x0
++#endif
++
++
++#if defined(SGX_FEATURE_SYSTEM_CACHE) || defined (INTERNAL_TEST)
++#define SGX_FEATURE_SYSTEM_CACHE_SET_OFFSET OPTIONS_BIT13
++#define OPTIONS_BIT13 (0x1 << 13)
++#else
++#define OPTIONS_BIT13 0x0
++#endif
++
++#if defined(SGX_SUPPORT_HWPROFILING) || defined (INTERNAL_TEST)
++#define SGX_SUPPORT_HWPROFILING_SET_OFFSET OPTIONS_BIT14
++#define OPTIONS_BIT14 (0x1 << 14)
++#else
++#define OPTIONS_BIT14 0x0
++#endif
++
++
++
++#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT) || defined (INTERNAL_TEST)
++#define SUPPORT_ACTIVE_POWER_MANAGEMENT_SET_OFFSET OPTIONS_BIT15
++#define OPTIONS_BIT15 (0x1 << 15)
++#else
++#define OPTIONS_BIT15 0x0
++#endif
++
++#if defined(SUPPORT_DISPLAYCONTROLLER_TILING) || defined (INTERNAL_TEST)
++#define SUPPORT_DISPLAYCONTROLLER_TILING_SET_OFFSET OPTIONS_BIT16
++#define OPTIONS_BIT16 (0x1 << 16)
++#else
++#define OPTIONS_BIT16 0x0
++#endif
++
++#if defined(SUPPORT_PERCONTEXT_PB) || defined (INTERNAL_TEST)
++#define SUPPORT_PERCONTEXT_PB_SET_OFFSET OPTIONS_BIT17
++#define OPTIONS_BIT17 (0x1 << 17)
++#else
++#define OPTIONS_BIT17 0x0
++#endif
++
++#if defined(SUPPORT_SGX_HWPERF) || defined (INTERNAL_TEST)
++#define SUPPORT_SGX_HWPERF_SET_OFFSET OPTIONS_BIT18
++#define OPTIONS_BIT18 (0x1 << 18)
++#else
++#define OPTIONS_BIT18 0x0
++#endif
++
++
++
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE) || defined (INTERNAL_TEST)
++#define SUPPORT_SGX_MMU_DUMMY_PAGE_SET_OFFSET OPTIONS_BIT19
++#define OPTIONS_BIT19 (0x1 << 19)
++#else
++#define OPTIONS_BIT19 0x0
++#endif
++
++#if defined(SUPPORT_SGX_PRIORITY_SCHEDULING) || defined (INTERNAL_TEST)
++#define SUPPORT_SGX_PRIORITY_SCHEDULING_SET_OFFSET OPTIONS_BIT20
++#define OPTIONS_BIT20 (0x1 << 20)
++#else
++#define OPTIONS_BIT20 0x0
++#endif
++
++#if defined(SGX_LOW_LATENCY_SCHEDULING) || defined (INTERNAL_TEST)
++#define SUPPORT_SGX_LOW_LATENCY_SCHEDULING_SET_OFFSET OPTIONS_BIT21
++#define OPTIONS_BIT21 (0x1 << 21)
++#else
++#define OPTIONS_BIT21 0x0
++#endif
++
++#if defined(USE_SUPPORT_NO_TA3D_OVERLAP) || defined (INTERNAL_TEST)
++#define USE_SUPPORT_NO_TA3D_OVERLAP_SET_OFFSET OPTIONS_BIT22
++#define OPTIONS_BIT22 (0x1 << 22)
++#else
++#define OPTIONS_BIT22 0x0
++#endif
++
++
++#if defined(SGX_FEATURE_MP) || defined (INTERNAL_TEST)
++#define OPTIONS_HIGHBYTE ((SGX_FEATURE_MP_CORE_COUNT-1) << SGX_FEATURE_MP_CORE_COUNT_SET_OFFSET)
++#define SGX_FEATURE_MP_CORE_COUNT_SET_OFFSET 28UL
++#define SGX_FEATURE_MP_CORE_COUNT_SET_MASK 0xFF
++#else
++#define OPTIONS_HIGHBYTE 0x0
++#endif
++
++
++
++#define SGX_BUILD_OPTIONS \
++ OPTIONS_BIT0 |\
++ OPTIONS_BIT1 |\
++ OPTIONS_BIT2 |\
++ OPTIONS_BIT3 |\
++ OPTIONS_BIT4 |\
++ OPTIONS_BIT5 |\
++ OPTIONS_BIT6 |\
++ OPTIONS_BIT8 |\
++ OPTIONS_BIT9 |\
++ OPTIONS_BIT10 |\
++ OPTIONS_BIT11 |\
++ OPTIONS_BIT12 |\
++ OPTIONS_BIT13 |\
++ OPTIONS_BIT14 |\
++ OPTIONS_BIT15 |\
++ OPTIONS_BIT16 |\
++ OPTIONS_BIT17 |\
++ OPTIONS_BIT18 |\
++ OPTIONS_BIT19 |\
++ OPTIONS_BIT20 |\
++ OPTIONS_BIT21 |\
++ OPTIONS_HIGHBYTE
++
+diff --git a/drivers/gpu/drm/mrst/pvr/include4/sgxapi_km.h b/drivers/gpu/drm/mrst/pvr/include4/sgxapi_km.h
+new file mode 100644
+index 0000000..6cdbc1a
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/include4/sgxapi_km.h
+@@ -0,0 +1,323 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __SGXAPI_KM_H__
++#define __SGXAPI_KM_H__
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++#include "sgxdefs.h"
++
++#if defined(__linux__) && !defined(USE_CODE)
++ #if defined(__KERNEL__)
++ #include <asm/unistd.h>
++ #else
++ #include <unistd.h>
++ #endif
++#endif
++
++#define SGX_UNDEFINED_HEAP_ID (~0LU)
++#define SGX_GENERAL_HEAP_ID 0
++#define SGX_TADATA_HEAP_ID 1
++#define SGX_KERNEL_CODE_HEAP_ID 2
++#define SGX_KERNEL_DATA_HEAP_ID 3
++#define SGX_PIXELSHADER_HEAP_ID 4
++#define SGX_VERTEXSHADER_HEAP_ID 5
++#define SGX_PDSPIXEL_CODEDATA_HEAP_ID 6
++#define SGX_PDSVERTEX_CODEDATA_HEAP_ID 7
++#define SGX_SYNCINFO_HEAP_ID 8
++#define SGX_3DPARAMETERS_HEAP_ID 9
++#if defined(SUPPORT_SGX_GENERAL_MAPPING_HEAP)
++#define SGX_GENERAL_MAPPING_HEAP_ID 10
++#endif
++#if defined(SGX_FEATURE_2D_HARDWARE)
++#define SGX_2D_HEAP_ID 11
++#else
++#if defined(FIX_HW_BRN_26915)
++#define SGX_CGBUFFER_HEAP_ID 12
++#endif
++#endif
++#define SGX_MAX_HEAP_ID 13
++
++
++#define SGX_MAX_TA_STATUS_VALS 32
++#define SGX_MAX_3D_STATUS_VALS 3
++
++#if defined(SUPPORT_SGX_GENERALISED_SYNCOBJECTS)
++#define SGX_MAX_TA_DST_SYNCS 1
++#define SGX_MAX_TA_SRC_SYNCS 1
++#define SGX_MAX_3D_SRC_SYNCS 4
++#else
++#define SGX_MAX_SRC_SYNCS 4
++#endif
++
++#ifdef SUPPORT_SGX_HWPERF
++
++#define PVRSRV_SGX_HWPERF_NUM_COUNTERS 9
++
++#define PVRSRV_SGX_HWPERF_INVALID 0x1
++
++#define PVRSRV_SGX_HWPERF_TRANSFER 0x2
++#define PVRSRV_SGX_HWPERF_TA 0x3
++#define PVRSRV_SGX_HWPERF_3D 0x4
++#define PVRSRV_SGX_HWPERF_2D 0x5
++
++#define PVRSRV_SGX_HWPERF_MK_EVENT 0x101
++#define PVRSRV_SGX_HWPERF_MK_TA 0x102
++#define PVRSRV_SGX_HWPERF_MK_3D 0x103
++#define PVRSRV_SGX_HWPERF_MK_2D 0x104
++
++#define PVRSRV_SGX_HWPERF_TYPE_STARTEND_BIT 28
++#define PVRSRV_SGX_HWPERF_TYPE_OP_MASK ((1UL << PVRSRV_SGX_HWPERF_TYPE_STARTEND_BIT) - 1)
++#define PVRSRV_SGX_HWPERF_TYPE_OP_START (0UL << PVRSRV_SGX_HWPERF_TYPE_STARTEND_BIT)
++#define PVRSRV_SGX_HWPERF_TYPE_OP_END (1Ul << PVRSRV_SGX_HWPERF_TYPE_STARTEND_BIT)
++
++#define PVRSRV_SGX_HWPERF_TYPE_TRANSFER_START (PVRSRV_SGX_HWPERF_TRANSFER | PVRSRV_SGX_HWPERF_TYPE_OP_START)
++#define PVRSRV_SGX_HWPERF_TYPE_TRANSFER_END (PVRSRV_SGX_HWPERF_TRANSFER | PVRSRV_SGX_HWPERF_TYPE_OP_END)
++#define PVRSRV_SGX_HWPERF_TYPE_TA_START (PVRSRV_SGX_HWPERF_TA | PVRSRV_SGX_HWPERF_TYPE_OP_START)
++#define PVRSRV_SGX_HWPERF_TYPE_TA_END (PVRSRV_SGX_HWPERF_TA | PVRSRV_SGX_HWPERF_TYPE_OP_END)
++#define PVRSRV_SGX_HWPERF_TYPE_3D_START (PVRSRV_SGX_HWPERF_3D | PVRSRV_SGX_HWPERF_TYPE_OP_START)
++#define PVRSRV_SGX_HWPERF_TYPE_3D_END (PVRSRV_SGX_HWPERF_3D | PVRSRV_SGX_HWPERF_TYPE_OP_END)
++#define PVRSRV_SGX_HWPERF_TYPE_2D_START (PVRSRV_SGX_HWPERF_2D | PVRSRV_SGX_HWPERF_TYPE_OP_START)
++#define PVRSRV_SGX_HWPERF_TYPE_2D_END (PVRSRV_SGX_HWPERF_2D | PVRSRV_SGX_HWPERF_TYPE_OP_END)
++
++#define PVRSRV_SGX_HWPERF_TYPE_MK_EVENT_START (PVRSRV_SGX_HWPERF_MK_EVENT | PVRSRV_SGX_HWPERF_TYPE_OP_START)
++#define PVRSRV_SGX_HWPERF_TYPE_MK_EVENT_END (PVRSRV_SGX_HWPERF_MK_EVENT | PVRSRV_SGX_HWPERF_TYPE_OP_END)
++#define PVRSRV_SGX_HWPERF_TYPE_MK_TA_START (PVRSRV_SGX_HWPERF_MK_TA | PVRSRV_SGX_HWPERF_TYPE_OP_START)
++#define PVRSRV_SGX_HWPERF_TYPE_MK_TA_END (PVRSRV_SGX_HWPERF_MK_TA | PVRSRV_SGX_HWPERF_TYPE_OP_END)
++#define PVRSRV_SGX_HWPERF_TYPE_MK_3D_START (PVRSRV_SGX_HWPERF_MK_3D | PVRSRV_SGX_HWPERF_TYPE_OP_START)
++#define PVRSRV_SGX_HWPERF_TYPE_MK_3D_END (PVRSRV_SGX_HWPERF_MK_3D | PVRSRV_SGX_HWPERF_TYPE_OP_END)
++#define PVRSRV_SGX_HWPERF_TYPE_MK_2D_START (PVRSRV_SGX_HWPERF_MK_2D | PVRSRV_SGX_HWPERF_TYPE_OP_START)
++#define PVRSRV_SGX_HWPERF_TYPE_MK_2D_END (PVRSRV_SGX_HWPERF_MK_2D | PVRSRV_SGX_HWPERF_TYPE_OP_END)
++
++#define PVRSRV_SGX_HWPERF_OFF (0x0)
++#define PVRSRV_SGX_HWPERF_GRAPHICS_ON (1UL << 0)
++#define PVRSRV_SGX_HWPERF_MK_EXECUTION_ON (1UL << 1)
++
++
++typedef struct _PVRSRV_SGX_HWPERF_CB_ENTRY_
++{
++ IMG_UINT32 ui32FrameNo;
++ IMG_UINT32 ui32Type;
++ IMG_UINT32 ui32Ordinal;
++ IMG_UINT32 ui32Clocksx16;
++ IMG_UINT32 ui32Counters[PVRSRV_SGX_HWPERF_NUM_COUNTERS];
++} PVRSRV_SGX_HWPERF_CB_ENTRY;
++
++
++typedef struct _PVRSRV_SGX_HWPERF_CBDATA_
++{
++ IMG_UINT32 ui32FrameNo;
++ IMG_UINT32 ui32Type;
++ IMG_UINT32 ui32StartTimeWraps;
++ IMG_UINT32 ui32StartTime;
++ IMG_UINT32 ui32EndTimeWraps;
++ IMG_UINT32 ui32EndTime;
++ IMG_UINT32 ui32ClockSpeed;
++ IMG_UINT32 ui32TimeMax;
++} PVRSRV_SGX_HWPERF_CBDATA;
++
++
++typedef struct _SGX_MISC_INFO_HWPERF_RETRIEVE_CB
++{
++ PVRSRV_SGX_HWPERF_CBDATA* psHWPerfData;
++ IMG_UINT32 ui32ArraySize;
++ IMG_UINT32 ui32DataCount;
++ IMG_UINT32 ui32Time;
++} SGX_MISC_INFO_HWPERF_RETRIEVE_CB;
++#endif
++
++
++typedef struct _CTL_STATUS_
++{
++ IMG_DEV_VIRTADDR sStatusDevAddr;
++ IMG_UINT32 ui32StatusValue;
++} CTL_STATUS;
++
++
++typedef enum _SGX_MISC_INFO_REQUEST_
++{
++ SGX_MISC_INFO_REQUEST_CLOCKSPEED = 0,
++ SGX_MISC_INFO_REQUEST_SGXREV,
++ SGX_MISC_INFO_REQUEST_DRIVER_SGXREV,
++#if defined(SUPPORT_SGX_EDM_MEMORY_DEBUG)
++ SGX_MISC_INFO_REQUEST_MEMREAD,
++#endif
++#if defined(SUPPORT_SGX_HWPERF)
++ SGX_MISC_INFO_REQUEST_SET_HWPERF_STATUS,
++ SGX_MISC_INFO_REQUEST_HWPERF_CB_ON,
++ SGX_MISC_INFO_REQUEST_HWPERF_CB_OFF,
++ SGX_MISC_INFO_REQUEST_HWPERF_RETRIEVE_CB,
++#endif
++#if defined(SGX_FEATURE_DATA_BREAKPOINTS)
++ SGX_MISC_INFO_REQUEST_SET_BREAKPOINT,
++#endif
++ SGX_MISC_INFO_DUMP_DEBUG_INFO,
++ SGX_MISC_INFO_PANIC,
++ SGX_MISC_INFO_REQUEST_FORCE_I16 = 0x7fff
++} SGX_MISC_INFO_REQUEST;
++
++
++typedef struct _PVRSRV_SGX_MISCINFO_FEATURES
++{
++ IMG_UINT32 ui32CoreRev;
++ IMG_UINT32 ui32CoreID;
++ IMG_UINT32 ui32DDKVersion;
++ IMG_UINT32 ui32DDKBuild;
++ IMG_UINT32 ui32CoreIdSW;
++ IMG_UINT32 ui32CoreRevSW;
++ IMG_UINT32 ui32BuildOptions;
++#if defined(SUPPORT_SGX_EDM_MEMORY_DEBUG)
++ IMG_UINT32 ui32DeviceMemValue;
++#endif
++} PVRSRV_SGX_MISCINFO_FEATURES;
++
++
++#if defined(SGX_FEATURE_DATA_BREAKPOINTS)
++typedef struct _SGX_BREAKPOINT_INFO
++{
++
++ IMG_BOOL bBPEnable;
++
++
++
++ IMG_UINT32 ui32BPIndex;
++
++ IMG_DEV_VIRTADDR sBPDevVAddr;
++} SGX_BREAKPOINT_INFO;
++#endif
++
++typedef struct _SGX_MISC_INFO_
++{
++ SGX_MISC_INFO_REQUEST eRequest;
++#if defined(SUPPORT_SGX_EDM_MEMORY_DEBUG)
++ IMG_DEV_VIRTADDR sDevVAddr;
++ IMG_HANDLE hDevMemContext;
++#endif
++ union
++ {
++ IMG_UINT32 reserved;
++ PVRSRV_SGX_MISCINFO_FEATURES sSGXFeatures;
++ IMG_UINT32 ui32SGXClockSpeed;
++#if defined(SGX_FEATURE_DATA_BREAKPOINTS)
++ SGX_BREAKPOINT_INFO sSGXBreakpointInfo;
++#endif
++#ifdef SUPPORT_SGX_HWPERF
++ IMG_UINT32 ui32NewHWPerfStatus;
++ SGX_MISC_INFO_HWPERF_RETRIEVE_CB sRetrieveCB;
++#endif
++ } uData;
++} SGX_MISC_INFO;
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++#define PVRSRV_MAX_BLT_SRC_SYNCS 3
++#endif
++
++
++#define SGX_KICKTA_DUMPBITMAP_MAX_NAME_LENGTH 256
++
++typedef struct _SGX_KICKTA_DUMPBITMAP_
++{
++ IMG_DEV_VIRTADDR sDevBaseAddr;
++ IMG_UINT32 ui32Flags;
++ IMG_UINT32 ui32Width;
++ IMG_UINT32 ui32Height;
++ IMG_UINT32 ui32Stride;
++ IMG_UINT32 ui32PDUMPFormat;
++ IMG_UINT32 ui32BytesPP;
++ IMG_CHAR pszName[SGX_KICKTA_DUMPBITMAP_MAX_NAME_LENGTH];
++} SGX_KICKTA_DUMPBITMAP, *PSGX_KICKTA_DUMPBITMAP;
++
++#define PVRSRV_SGX_PDUMP_CONTEXT_MAX_BITMAP_ARRAY_SIZE (16)
++
++typedef struct _PVRSRV_SGX_PDUMP_CONTEXT_
++{
++
++ IMG_UINT32 ui32CacheControl;
++
++} PVRSRV_SGX_PDUMP_CONTEXT;
++
++
++typedef struct _SGX_KICKTA_DUMP_ROFF_
++{
++ IMG_HANDLE hKernelMemInfo;
++ IMG_UINT32 uiAllocIndex;
++ IMG_UINT32 ui32Offset;
++ IMG_UINT32 ui32Value;
++ IMG_PCHAR pszName;
++} SGX_KICKTA_DUMP_ROFF, *PSGX_KICKTA_DUMP_ROFF;
++
++typedef struct _SGX_KICKTA_DUMP_BUFFER_
++{
++ IMG_UINT32 ui32SpaceUsed;
++ IMG_UINT32 ui32Start;
++ IMG_UINT32 ui32End;
++ IMG_UINT32 ui32BufferSize;
++ IMG_UINT32 ui32BackEndLength;
++ IMG_UINT32 uiAllocIndex;
++ IMG_HANDLE hKernelMemInfo;
++ IMG_PVOID pvLinAddr;
++#if defined(SUPPORT_SGX_NEW_STATUS_VALS)
++ IMG_HANDLE hCtrlKernelMemInfo;
++ IMG_DEV_VIRTADDR sCtrlDevVAddr;
++#endif
++ IMG_PCHAR pszName;
++} SGX_KICKTA_DUMP_BUFFER, *PSGX_KICKTA_DUMP_BUFFER;
++
++#ifdef PDUMP
++typedef struct _SGX_KICKTA_PDUMP_
++{
++
++ PSGX_KICKTA_DUMPBITMAP psPDumpBitmapArray;
++ IMG_UINT32 ui32PDumpBitmapSize;
++
++
++ PSGX_KICKTA_DUMP_BUFFER psBufferArray;
++ IMG_UINT32 ui32BufferArraySize;
++
++
++ PSGX_KICKTA_DUMP_ROFF psROffArray;
++ IMG_UINT32 ui32ROffArraySize;
++} SGX_KICKTA_PDUMP, *PSGX_KICKTA_PDUMP;
++#endif
++
++#if defined(TRANSFER_QUEUE)
++#if defined(SGX_FEATURE_2D_HARDWARE)
++#define SGX_MAX_2D_BLIT_CMD_SIZE 26
++#define SGX_MAX_2D_SRC_SYNC_OPS 3
++#endif
++#define SGX_MAX_TRANSFER_STATUS_VALS 2
++#define SGX_MAX_TRANSFER_SYNC_OPS 5
++#endif
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/include4/sgxscript.h b/drivers/gpu/drm/mrst/pvr/include4/sgxscript.h
+new file mode 100644
+index 0000000..fb5efbb
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/include4/sgxscript.h
+@@ -0,0 +1,81 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __SGXSCRIPT_H__
++#define __SGXSCRIPT_H__
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++#define SGX_MAX_INIT_COMMANDS 64
++#define SGX_MAX_DEINIT_COMMANDS 16
++
++typedef enum _SGX_INIT_OPERATION
++{
++ SGX_INIT_OP_ILLEGAL = 0,
++ SGX_INIT_OP_WRITE_HW_REG,
++#if defined(PDUMP)
++ SGX_INIT_OP_PDUMP_HW_REG,
++#endif
++ SGX_INIT_OP_HALT
++} SGX_INIT_OPERATION;
++
++typedef union _SGX_INIT_COMMAND
++{
++ SGX_INIT_OPERATION eOp;
++ struct {
++ SGX_INIT_OPERATION eOp;
++ IMG_UINT32 ui32Offset;
++ IMG_UINT32 ui32Value;
++ } sWriteHWReg;
++#if defined(PDUMP)
++ struct {
++ SGX_INIT_OPERATION eOp;
++ IMG_UINT32 ui32Offset;
++ IMG_UINT32 ui32Value;
++ } sPDumpHWReg;
++#endif
++#if defined(FIX_HW_BRN_22997) && defined(FIX_HW_BRN_23030) && defined(SGX_FEATURE_HOST_PORT)
++ struct {
++ SGX_INIT_OPERATION eOp;
++ } sWorkaroundBRN22997;
++#endif
++} SGX_INIT_COMMAND;
++
++typedef struct _SGX_INIT_SCRIPTS_
++{
++ SGX_INIT_COMMAND asInitCommandsPart1[SGX_MAX_INIT_COMMANDS];
++ SGX_INIT_COMMAND asInitCommandsPart2[SGX_MAX_INIT_COMMANDS];
++ SGX_INIT_COMMAND asDeinitCommands[SGX_MAX_DEINIT_COMMANDS];
++} SGX_INIT_SCRIPTS;
++
++#if defined(__cplusplus)
++}
++#endif
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/3rdparty/linux_framebuffer_mrst/.gitignore b/drivers/gpu/drm/mrst/pvr/services4/3rdparty/linux_framebuffer_mrst/.gitignore
+new file mode 100644
+index 0000000..f558f8b
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/3rdparty/linux_framebuffer_mrst/.gitignore
+@@ -0,0 +1,6 @@
++bin_pc_i686*
++tmp_pc_i686*
++host_pc_i686*
++binary_pc_i686*
++*.o
++*.o.cmd
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/3rdparty/linux_framebuffer_mrst/makefile.linux.common b/drivers/gpu/drm/mrst/pvr/services4/3rdparty/linux_framebuffer_mrst/makefile.linux.common
+new file mode 100644
+index 0000000..c3ab6f4
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/3rdparty/linux_framebuffer_mrst/makefile.linux.common
+@@ -0,0 +1,41 @@
++#
++# Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++#
++# This program is free software; you can redistribute it and/or modify it
++# under the terms and conditions of the GNU General Public License,
++# version 2, as published by the Free Software Foundation.
++#
++# This program is distributed in the hope it will be useful but, except
++# as otherwise stated in writing, without any warranty; without even the
++# implied warranty of merchantability or fitness for a particular purpose.
++# See the GNU General Public License for more details.
++#
++# You should have received a copy of the GNU General Public License along with
++# this program; if not, write to the Free Software Foundation, Inc.,
++# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++#
++# The full GNU General Public License is included in this distribution in
++# the file called "COPYING".
++#
++# Contact Information:
++# Imagination Technologies Ltd. <gpl-support@imgtec.com>
++# Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++#
++#
++#
++
++ifeq ($(SUPPORT_DRI_DRM),1)
++DISPLAY_CONTROLLER_SOURCES_ROOT = $(KBUILDROOT)/$(DISPLAY_CONTROLLER_DIR)
++else
++DISPLAY_CONTROLLER_SOURCES_ROOT = ..
++endif
++
++INCLUDES += -I$(EURASIAROOT)/include4 \
++ -I$(EURASIAROOT)/services4/include \
++ -I$(EURASIAROOT)/services4/system/$(PVR_SYSTEM) \
++ -I$(EURASIAROOT)/services4/system/include \
++ -I$(EURASIAROOT)/services4/srvkm/env/linux/mrst
++
++SOURCES += $(DISPLAY_CONTROLLER_SOURCES_ROOT)/mrstlfb_displayclass.c \
++ $(DISPLAY_CONTROLLER_SOURCES_ROOT)/mrstlfb_linux.c
++MODULE_CFLAGS += -DPVR_MRST_FB_SET_PAR_ON_INIT
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/3rdparty/linux_framebuffer_mrst/mrstlfb.h b/drivers/gpu/drm/mrst/pvr/services4/3rdparty/linux_framebuffer_mrst/mrstlfb.h
+new file mode 100644
+index 0000000..9f4a116
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/3rdparty/linux_framebuffer_mrst/mrstlfb.h
+@@ -0,0 +1,295 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __MRSTLFB_H__
++#define __MRSTLFB_H__
++
++#include <drm/drmP.h>
++#include "psb_intel_reg.h"
++
++#define MRST_USING_INTERRUPTS
++
++#define PSB_HWSTAM 0x2098
++#define PSB_INSTPM 0x20C0
++#define PSB_INT_IDENTITY_R 0x20A4
++#define _PSB_VSYNC_PIPEB_FLAG (1<<5)
++#define _PSB_VSYNC_PIPEA_FLAG (1<<7)
++#define _PSB_IRQ_SGX_FLAG (1<<18)
++#define _PSB_IRQ_MSVDX_FLAG (1<<19)
++#define _LNC_IRQ_TOPAZ_FLAG (1<<20)
++#define PSB_INT_MASK_R 0x20A8
++#define PSB_INT_ENABLE_R 0x20A0
++
++/* IPC message and command defines used to enable/disable mipi panel voltages */
++#define IPC_MSG_PANEL_ON_OFF 0xE9
++#define IPC_CMD_PANEL_ON 1
++#define IPC_CMD_PANEL_OFF 0
++
++typedef void * MRST_HANDLE;
++
++typedef enum tag_mrst_bool
++{
++ MRST_FALSE = 0,
++ MRST_TRUE = 1,
++} MRST_BOOL, *MRST_PBOOL;
++
++typedef IMG_INT (* MRSTLFB_VSYNC_ISR_PFN)(struct drm_device* psDrmDevice, int iPipe);
++
++extern IMG_BOOL PVRGetDisplayClassJTable(PVRSRV_DC_DISP2SRV_KMJTABLE *psJTable);
++
++
++typedef struct MRSTLFB_BUFFER_TAG
++{
++
++ IMG_UINT32 ui32BufferSize;
++ union {
++
++ IMG_SYS_PHYADDR *psNonCont;
++
++ IMG_SYS_PHYADDR sCont;
++ } uSysAddr;
++
++ IMG_DEV_VIRTADDR sDevVAddr;
++
++ IMG_CPU_VIRTADDR sCPUVAddr;
++
++ PVRSRV_SYNC_DATA *psSyncData;
++
++ IMG_BOOL bIsContiguous;
++
++ IMG_BOOL bIsAllocated;
++
++ IMG_UINT32 ui32OwnerTaskID;
++
++ struct MRSTLFB_BUFFER_TAG *psNext;
++} MRSTLFB_BUFFER;
++
++typedef struct MRSTLFB_VSYNC_FLIP_ITEM_TAG
++{
++
++
++
++ MRST_HANDLE hCmdComplete;
++
++ unsigned long ulSwapInterval;
++
++ MRST_BOOL bValid;
++
++ MRST_BOOL bFlipped;
++
++ MRST_BOOL bCmdCompleted;
++
++
++
++
++
++ IMG_DEV_VIRTADDR sDevVAddr;
++} MRSTLFB_VSYNC_FLIP_ITEM;
++
++typedef struct MRSTLFB_SWAPCHAIN_TAG
++{
++
++ unsigned long ulBufferCount;
++
++ MRSTLFB_BUFFER **ppsBuffer;
++
++ MRSTLFB_VSYNC_FLIP_ITEM *psVSyncFlips;
++
++
++ unsigned long ulInsertIndex;
++
++
++ unsigned long ulRemoveIndex;
++
++
++ PVRSRV_DC_DISP2SRV_KMJTABLE *psPVRJTable;
++
++
++ MRST_BOOL bFlushCommands;
++
++
++ unsigned long ulSetFlushStateRefCount;
++
++
++ MRST_BOOL bBlanked;
++
++
++ spinlock_t *psSwapChainLock;
++
++
++ struct drm_driver *psDrmDriver;
++
++
++ struct drm_device *psDrmDev;
++
++ struct MRSTLFB_SWAPCHAIN_TAG *psNext;
++
++ struct MRSTLFB_DEVINFO_TAG *psDevInfo;
++
++} MRSTLFB_SWAPCHAIN;
++
++typedef struct MRSTLFB_FBINFO_TAG
++{
++ unsigned long ulFBSize;
++ unsigned long ulBufferSize;
++ unsigned long ulRoundedBufferSize;
++ unsigned long ulWidth;
++ unsigned long ulHeight;
++ unsigned long ulByteStride;
++
++
++
++ IMG_SYS_PHYADDR sSysAddr;
++ IMG_CPU_VIRTADDR sCPUVAddr;
++ IMG_DEV_VIRTADDR sDevVAddr;
++
++
++ PVRSRV_PIXEL_FORMAT ePixelFormat;
++}MRSTLFB_FBINFO;
++
++/**
++ * If DRI is enable then extemding drm_device
++ */
++typedef struct MRSTLFB_DEVINFO_TAG
++{
++ unsigned long ulDeviceID;
++
++ struct drm_device *psDrmDevice;
++
++ MRSTLFB_BUFFER sSystemBuffer;
++
++
++ PVRSRV_DC_DISP2SRV_KMJTABLE sPVRJTable;
++
++
++ PVRSRV_DC_SRV2DISP_KMJTABLE sDCJTable;
++
++
++ unsigned long ulRefCount;
++
++
++ MRSTLFB_SWAPCHAIN *psSwapChain;
++
++ IMG_UINT32 ui32SwapChainNum;
++
++ IMG_UINT32 ui32SwapChainIdCounter;
++
++
++ void *pvRegs;
++
++
++ MRST_BOOL bFlushCommands;
++
++
++ struct fb_info *psLINFBInfo;
++
++
++ struct notifier_block sLINNotifBlock;
++
++
++ MRST_BOOL bDeviceSuspended;
++
++
++ spinlock_t sSwapChainLock;
++
++
++
++
++
++ IMG_DEV_VIRTADDR sDisplayDevVAddr;
++
++ DISPLAY_INFO sDisplayInfo;
++
++
++ DISPLAY_FORMAT sDisplayFormat;
++
++
++ DISPLAY_DIMS sDisplayDim;
++
++ IMG_UINT32 ui32MainPipe;
++
++} MRSTLFB_DEVINFO;
++
++#if 0
++#define MRSTLFB_PAGE_SIZE 4096
++#define MRSTLFB_PAGE_MASK (MRSTLFB_PAGE_SIZE - 1)
++#define MRSTLFB_PAGE_TRUNC (~MRSTLFB_PAGE_MASK)
++
++#define MRSTLFB_PAGE_ROUNDUP(x) (((x) + MRSTLFB_PAGE_MASK) & MRSTLFB_PAGE_TRUNC)
++#endif
++
++#ifdef DEBUG
++#define DEBUG_PRINTK(x) printk x
++#else
++#define DEBUG_PRINTK(x)
++#endif
++
++#define DISPLAY_DEVICE_NAME "PowerVR Moorestown Linux Display Driver"
++#define DRVNAME "mrstlfb"
++#define DEVNAME DRVNAME
++#define DRIVER_PREFIX DRVNAME
++
++typedef enum _MRST_ERROR_
++{
++ MRST_OK = 0,
++ MRST_ERROR_GENERIC = 1,
++ MRST_ERROR_OUT_OF_MEMORY = 2,
++ MRST_ERROR_TOO_FEW_BUFFERS = 3,
++ MRST_ERROR_INVALID_PARAMS = 4,
++ MRST_ERROR_INIT_FAILURE = 5,
++ MRST_ERROR_CANT_REGISTER_CALLBACK = 6,
++ MRST_ERROR_INVALID_DEVICE = 7,
++ MRST_ERROR_DEVICE_REGISTER_FAILED = 8
++} MRST_ERROR;
++
++
++#ifndef UNREFERENCED_PARAMETER
++#define UNREFERENCED_PARAMETER(param) (param) = (param)
++#endif
++
++MRST_ERROR MRSTLFBInit(struct drm_device * dev);
++MRST_ERROR MRSTLFBDeinit(void);
++
++MRST_ERROR MRSTLFBAllocBuffer(struct MRSTLFB_DEVINFO_TAG *psDevInfo, IMG_UINT32 ui32Size, MRSTLFB_BUFFER **ppBuffer);
++MRST_ERROR MRSTLFBFreeBuffer(struct MRSTLFB_DEVINFO_TAG *psDevInfo, MRSTLFB_BUFFER **ppBuffer);
++
++void *MRSTLFBAllocKernelMem(unsigned long ulSize);
++void MRSTLFBFreeKernelMem(void *pvMem);
++MRST_ERROR MRSTLFBGetLibFuncAddr(char *szFunctionName, PFN_DC_GET_PVRJTABLE *ppfnFuncTable);
++MRST_ERROR MRSTLFBInstallVSyncISR (MRSTLFB_DEVINFO *psDevInfo, MRSTLFB_VSYNC_ISR_PFN pVsyncHandler);
++MRST_ERROR MRSTLFBUninstallVSyncISR(MRSTLFB_DEVINFO *psDevInfo);
++MRST_BOOL MRSTLFBVSyncIHandler(MRSTLFB_SWAPCHAIN *psSwapChain);
++
++void MRSTLFBEnableVSyncInterrupt(MRSTLFB_DEVINFO *psDevInfo);
++void MRSTLFBDisableVSyncInterrupt(MRSTLFB_DEVINFO *psDevInfo);
++
++void MRSTLFBEnableDisplayRegisterAccess(void);
++void MRSTLFBDisableDisplayRegisterAccess(void);
++
++void MRSTLFBFlip(MRSTLFB_DEVINFO *psDevInfo, unsigned long uiAddr);
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/3rdparty/linux_framebuffer_mrst/mrstlfb_displayclass.c b/drivers/gpu/drm/mrst/pvr/services4/3rdparty/linux_framebuffer_mrst/mrstlfb_displayclass.c
+new file mode 100644
+index 0000000..adca7e2
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/3rdparty/linux_framebuffer_mrst/mrstlfb_displayclass.c
+@@ -0,0 +1,2056 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include <linux/version.h>
++#include <linux/kernel.h>
++#include <linux/console.h>
++#include <linux/fb.h>
++#include <linux/module.h>
++#include <linux/string.h>
++#include <linux/notifier.h>
++#include <linux/spinlock.h>
++#include <asm/ipc_defs.h>
++
++#include "img_defs.h"
++#include "servicesext.h"
++#include "kerneldisplay.h"
++#include "mrstlfb.h"
++
++#include "psb_fb.h"
++#include "psb_drv.h"
++#include "ospm_power.h"
++
++#if !defined(SUPPORT_DRI_DRM)
++#error "SUPPORT_DRI_DRM must be set"
++#endif
++
++IMG_UINT32 gui32MRSTDisplayDeviceID;
++
++extern void MRSTLFBVSyncWriteReg(MRSTLFB_DEVINFO * psDevinfo, unsigned long ulOffset, unsigned long ulValue);
++extern unsigned long MRSTLFBVSyncReadReg(MRSTLFB_DEVINFO * psDevinfo, unsigned long ulOffset);
++
++PVRSRV_ERROR MRSTLFBPrePowerState(IMG_HANDLE hDevHandle,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
++
++PVRSRV_ERROR MRSTLFBPostPowerState(IMG_HANDLE hDevHandle,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
++
++#ifdef MODESET_640x480
++extern int psb_to_640 (struct fb_info* info);
++#endif
++
++extern void mrst_init_LGE_MIPI(struct drm_device *dev);
++extern void mrst_init_NSC_MIPI_bridge(struct drm_device *dev);
++
++struct psbfb_par {
++ struct drm_device *dev;
++ void *psbfb;
++
++ int dpms_state;
++
++ int crtc_count;
++
++ uint32_t crtc_ids[2];
++};
++
++extern void* psbfb_vdc_reg(struct drm_device* dev);
++
++static void *gpvAnchor;
++
++
++#define MRSTLFB_COMMAND_COUNT 1
++
++static PFN_DC_GET_PVRJTABLE pfnGetPVRJTable = 0;
++
++static MRSTLFB_DEVINFO * GetAnchorPtr(void)
++{
++ return (MRSTLFB_DEVINFO *)gpvAnchor;
++}
++
++static void SetAnchorPtr(MRSTLFB_DEVINFO *psDevInfo)
++{
++ gpvAnchor = (void*)psDevInfo;
++}
++
++
++static void FlushInternalVSyncQueue(MRSTLFB_SWAPCHAIN *psSwapChain)
++{
++ MRSTLFB_VSYNC_FLIP_ITEM *psFlipItem;
++ unsigned long ulMaxIndex;
++ unsigned long i;
++
++
++ psFlipItem = &psSwapChain->psVSyncFlips[psSwapChain->ulRemoveIndex];
++ ulMaxIndex = psSwapChain->ulBufferCount - 1;
++
++ for(i = 0; i < psSwapChain->ulBufferCount; i++)
++ {
++ if (psFlipItem->bValid == MRST_FALSE)
++ {
++ continue;
++ }
++
++ DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX ": FlushInternalVSyncQueue: Flushing swap buffer (index %lu)\n", psSwapChain->ulRemoveIndex));
++
++ if(psFlipItem->bFlipped == MRST_FALSE)
++ {
++
++ MRSTLFBFlip(psSwapChain->psDevInfo, (unsigned long)psFlipItem->sDevVAddr.uiAddr);
++ }
++
++ if(psFlipItem->bCmdCompleted == MRST_FALSE)
++ {
++ DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX ": FlushInternalVSyncQueue: Calling command complete for swap buffer (index %lu)\n", psSwapChain->ulRemoveIndex));
++
++ psSwapChain->psPVRJTable->pfnPVRSRVCmdComplete((IMG_HANDLE)psFlipItem->hCmdComplete, IMG_TRUE);
++ }
++
++
++ psSwapChain->ulRemoveIndex++;
++
++ if(psSwapChain->ulRemoveIndex > ulMaxIndex)
++ {
++ psSwapChain->ulRemoveIndex = 0;
++ }
++
++
++ psFlipItem->bFlipped = MRST_FALSE;
++ psFlipItem->bCmdCompleted = MRST_FALSE;
++ psFlipItem->bValid = MRST_FALSE;
++
++
++ psFlipItem = &psSwapChain->psVSyncFlips[psSwapChain->ulRemoveIndex];
++ }
++
++ psSwapChain->ulInsertIndex = 0;
++ psSwapChain->ulRemoveIndex = 0;
++}
++
++static void SetFlushStateInternalNoLock(MRSTLFB_DEVINFO* psDevInfo,
++ MRST_BOOL bFlushState)
++{
++ MRSTLFB_SWAPCHAIN *psSwapChain = psDevInfo->psSwapChain;
++
++ if (psSwapChain == NULL)
++ {
++ return;
++ }
++
++ if (bFlushState)
++ {
++ if (psSwapChain->ulSetFlushStateRefCount == 0)
++ {
++ MRSTLFBDisableVSyncInterrupt(psDevInfo);
++ psSwapChain->bFlushCommands = MRST_TRUE;
++ FlushInternalVSyncQueue(psSwapChain);
++ }
++ psSwapChain->ulSetFlushStateRefCount++;
++ }
++ else
++ {
++ if (psSwapChain->ulSetFlushStateRefCount != 0)
++ {
++ psSwapChain->ulSetFlushStateRefCount--;
++ if (psSwapChain->ulSetFlushStateRefCount == 0)
++ {
++ psSwapChain->bFlushCommands = MRST_FALSE;
++ MRSTLFBEnableVSyncInterrupt(psDevInfo);
++ }
++ }
++ }
++}
++
++static IMG_VOID SetFlushStateInternal(MRSTLFB_DEVINFO* psDevInfo,
++ MRST_BOOL bFlushState)
++{
++ unsigned long ulLockFlags;
++
++ spin_lock_irqsave(&psDevInfo->sSwapChainLock, ulLockFlags);
++
++ SetFlushStateInternalNoLock(psDevInfo, bFlushState);
++
++ spin_unlock_irqrestore(&psDevInfo->sSwapChainLock, ulLockFlags);
++}
++
++static void SetFlushStateExternal(MRSTLFB_DEVINFO* psDevInfo,
++ MRST_BOOL bFlushState)
++{
++ unsigned long ulLockFlags;
++
++ spin_lock_irqsave(&psDevInfo->sSwapChainLock, ulLockFlags);
++
++
++ if (psDevInfo->bFlushCommands != bFlushState)
++ {
++ psDevInfo->bFlushCommands = bFlushState;
++ SetFlushStateInternalNoLock(psDevInfo, bFlushState);
++ }
++
++ spin_unlock_irqrestore(&psDevInfo->sSwapChainLock, ulLockFlags);
++}
++
++static IMG_VOID SetDCState(IMG_HANDLE hDevice, IMG_UINT32 ui32State)
++{
++ MRSTLFB_DEVINFO *psDevInfo = (MRSTLFB_DEVINFO *)hDevice;
++
++ switch (ui32State)
++ {
++ case DC_STATE_FLUSH_COMMANDS:
++ SetFlushStateExternal(psDevInfo, MRST_TRUE);
++ break;
++ case DC_STATE_NO_FLUSH_COMMANDS:
++ SetFlushStateExternal(psDevInfo, MRST_FALSE);
++ break;
++ default:
++ break;
++ }
++
++ return;
++}
++
++static int FrameBufferEvents(struct notifier_block *psNotif,
++ unsigned long event, void *data)
++{
++ MRSTLFB_DEVINFO *psDevInfo;
++ MRSTLFB_SWAPCHAIN *psSwapChain;
++ struct fb_event *psFBEvent = (struct fb_event *)data;
++ MRST_BOOL bBlanked;
++
++
++ if (event != FB_EVENT_BLANK)
++ {
++ return 0;
++ }
++
++ psDevInfo = GetAnchorPtr();
++ psSwapChain = psDevInfo->psSwapChain;
++
++ bBlanked = (*(IMG_INT *)psFBEvent->data != 0) ? MRST_TRUE: MRST_FALSE;
++
++ if (bBlanked != psSwapChain->bBlanked)
++ {
++ psSwapChain->bBlanked = bBlanked;
++
++ if (bBlanked)
++ {
++
++ SetFlushStateInternal(psDevInfo, MRST_TRUE);
++ }
++ else
++ {
++
++ SetFlushStateInternal(psDevInfo, MRST_FALSE);
++ }
++ }
++
++ return 0;
++}
++
++
++static MRST_ERROR UnblankDisplay(MRSTLFB_DEVINFO *psDevInfo)
++{
++ int res;
++
++ acquire_console_sem();
++ res = fb_blank(psDevInfo->psLINFBInfo, 0);
++ release_console_sem();
++ if (res != 0)
++ {
++ printk(KERN_WARNING DRIVER_PREFIX
++ ": fb_blank failed (%d)", res);
++ return (MRST_ERROR_GENERIC);
++ }
++
++ return (MRST_OK);
++}
++
++static MRST_ERROR EnableLFBEventNotification(MRSTLFB_DEVINFO *psDevInfo)
++{
++ int res;
++ MRST_ERROR eError;
++
++
++ memset(&psDevInfo->sLINNotifBlock, 0, sizeof(psDevInfo->sLINNotifBlock));
++
++ psDevInfo->sLINNotifBlock.notifier_call = FrameBufferEvents;
++
++ res = fb_register_client(&psDevInfo->sLINNotifBlock);
++ if (res != 0)
++ {
++ printk(KERN_WARNING DRIVER_PREFIX
++ ": fb_register_client failed (%d)", res);
++
++ return (MRST_ERROR_GENERIC);
++ }
++
++ eError = UnblankDisplay(psDevInfo);
++ if (eError != MRST_OK)
++ {
++ DEBUG_PRINTK((KERN_WARNING DRIVER_PREFIX
++ ": UnblankDisplay failed (%d)", eError));
++ return eError;
++ }
++
++ return (MRST_OK);
++}
++
++static MRST_ERROR DisableLFBEventNotification(MRSTLFB_DEVINFO *psDevInfo)
++{
++ int res;
++
++
++ res = fb_unregister_client(&psDevInfo->sLINNotifBlock);
++ if (res != 0)
++ {
++ printk(KERN_WARNING DRIVER_PREFIX
++ ": fb_unregister_client failed (%d)", res);
++ return (MRST_ERROR_GENERIC);
++ }
++
++ return (MRST_OK);
++}
++
++static PVRSRV_ERROR OpenDCDevice(IMG_UINT32 ui32DeviceID,
++ IMG_HANDLE *phDevice,
++ PVRSRV_SYNC_DATA* psSystemBufferSyncData)
++{
++ MRSTLFB_DEVINFO *psDevInfo;
++ MRST_ERROR eError;
++
++ UNREFERENCED_PARAMETER(ui32DeviceID);
++
++ psDevInfo = GetAnchorPtr();
++
++
++ psDevInfo->sSystemBuffer.psSyncData = psSystemBufferSyncData;
++
++ eError = UnblankDisplay(psDevInfo);
++ if (eError != MRST_OK)
++ {
++ DEBUG_PRINTK((KERN_WARNING DRIVER_PREFIX
++ ": UnblankDisplay failed (%d)", eError));
++ return (PVRSRV_ERROR_GENERIC);
++ }
++
++
++ *phDevice = (IMG_HANDLE)psDevInfo;
++
++ return (PVRSRV_OK);
++}
++
++static PVRSRV_ERROR CloseDCDevice(IMG_HANDLE hDevice)
++{
++ UNREFERENCED_PARAMETER(hDevice);
++
++ return (PVRSRV_OK);
++}
++
++static PVRSRV_ERROR EnumDCFormats(IMG_HANDLE hDevice,
++ IMG_UINT32 *pui32NumFormats,
++ DISPLAY_FORMAT *psFormat)
++{
++ MRSTLFB_DEVINFO *psDevInfo;
++
++ if(!hDevice || !pui32NumFormats)
++ {
++ return (PVRSRV_ERROR_INVALID_PARAMS);
++ }
++
++ psDevInfo = (MRSTLFB_DEVINFO*)hDevice;
++
++ *pui32NumFormats = 1;
++
++ if(psFormat)
++ {
++ psFormat[0] = psDevInfo->sDisplayFormat;
++ }
++
++ return (PVRSRV_OK);
++}
++
++static PVRSRV_ERROR EnumDCDims(IMG_HANDLE hDevice,
++ DISPLAY_FORMAT *psFormat,
++ IMG_UINT32 *pui32NumDims,
++ DISPLAY_DIMS *psDim)
++{
++ MRSTLFB_DEVINFO *psDevInfo;
++
++ if(!hDevice || !psFormat || !pui32NumDims)
++ {
++ return (PVRSRV_ERROR_INVALID_PARAMS);
++ }
++
++ psDevInfo = (MRSTLFB_DEVINFO*)hDevice;
++
++ *pui32NumDims = 1;
++
++
++ if(psDim)
++ {
++ psDim[0] = psDevInfo->sDisplayDim;
++ }
++
++ return (PVRSRV_OK);
++}
++
++
++static PVRSRV_ERROR GetDCSystemBuffer(IMG_HANDLE hDevice, IMG_HANDLE *phBuffer)
++{
++ MRSTLFB_DEVINFO *psDevInfo;
++
++ if(!hDevice || !phBuffer)
++ {
++ return (PVRSRV_ERROR_INVALID_PARAMS);
++ }
++
++ psDevInfo = (MRSTLFB_DEVINFO*)hDevice;
++
++
++
++ *phBuffer = (IMG_HANDLE)&psDevInfo->sSystemBuffer;
++
++ return (PVRSRV_OK);
++}
++
++
++static PVRSRV_ERROR GetDCInfo(IMG_HANDLE hDevice, DISPLAY_INFO *psDCInfo)
++{
++ MRSTLFB_DEVINFO *psDevInfo;
++
++ if(!hDevice || !psDCInfo)
++ {
++ return (PVRSRV_ERROR_INVALID_PARAMS);
++ }
++
++ psDevInfo = (MRSTLFB_DEVINFO*)hDevice;
++
++ *psDCInfo = psDevInfo->sDisplayInfo;
++
++ return (PVRSRV_OK);
++}
++
++static PVRSRV_ERROR GetDCBufferAddr(IMG_HANDLE hDevice,
++ IMG_HANDLE hBuffer,
++ IMG_SYS_PHYADDR **ppsSysAddr,
++ IMG_UINT32 *pui32ByteSize,
++ IMG_VOID **ppvCpuVAddr,
++ IMG_HANDLE *phOSMapInfo,
++ IMG_BOOL *pbIsContiguous)
++{
++ MRSTLFB_DEVINFO *psDevInfo;
++ MRSTLFB_BUFFER *psSystemBuffer;
++
++ if(!hDevice)
++ {
++ return (PVRSRV_ERROR_INVALID_PARAMS);
++ }
++ psDevInfo = (MRSTLFB_DEVINFO*)hDevice;
++
++ if(!hBuffer)
++ {
++ return (PVRSRV_ERROR_INVALID_PARAMS);
++ }
++ psSystemBuffer = (MRSTLFB_BUFFER *)hBuffer;
++
++ if (!ppsSysAddr)
++ {
++ return (PVRSRV_ERROR_INVALID_PARAMS);
++ }
++
++ if( psSystemBuffer->bIsContiguous )
++ *ppsSysAddr = &psSystemBuffer->uSysAddr.sCont;
++ else
++ *ppsSysAddr = psSystemBuffer->uSysAddr.psNonCont;
++
++ if (!pui32ByteSize)
++ {
++ return (PVRSRV_ERROR_INVALID_PARAMS);
++ }
++ *pui32ByteSize = psSystemBuffer->ui32BufferSize;
++
++ if (ppvCpuVAddr)
++ {
++ *ppvCpuVAddr = psSystemBuffer->sCPUVAddr;
++ }
++
++ if (phOSMapInfo)
++ {
++ *phOSMapInfo = (IMG_HANDLE)0;
++ }
++
++ if (pbIsContiguous)
++ {
++ *pbIsContiguous = psSystemBuffer->bIsContiguous;
++ }
++
++ return (PVRSRV_OK);
++}
++
++
++static MRST_ERROR MRSTLFBEnableSwapChains(MRSTLFB_DEVINFO *psDevInfo)
++{
++ unsigned long ulLockFlags;
++
++ spin_lock_irqsave(&psDevInfo->sSwapChainLock, ulLockFlags);
++
++ if(!psDevInfo->bFlushCommands)
++ MRSTLFBEnableVSyncInterrupt(psDevInfo);
++
++ spin_unlock_irqrestore(&psDevInfo->sSwapChainLock, ulLockFlags);
++
++ if (EnableLFBEventNotification(psDevInfo)!= MRST_OK)
++ {
++ printk(KERN_WARNING DRIVER_PREFIX ": Couldn't enable framebuffer event notification\n");
++ }
++
++ return MRST_OK;
++}
++
++static MRST_ERROR MRSTLFBDisableSwapChains(MRSTLFB_DEVINFO *psDevInfo)
++{
++ MRST_ERROR eError;
++ unsigned long ulLockFlags;
++
++ eError = DisableLFBEventNotification(psDevInfo);
++ if (eError != MRST_OK)
++ {
++ printk(KERN_WARNING DRIVER_PREFIX ": Couldn't disable framebuffer event notification\n");
++ }
++
++ spin_lock_irqsave(&psDevInfo->sSwapChainLock, ulLockFlags);
++
++ MRSTLFBDisableVSyncInterrupt(psDevInfo);
++
++
++ MRSTLFBFlip(psDevInfo, (unsigned long)psDevInfo->sSystemBuffer.sDevVAddr.uiAddr);
++
++ psDevInfo->psSwapChain = NULL;
++
++ spin_unlock_irqrestore(&psDevInfo->sSwapChainLock, ulLockFlags);
++ return MRST_OK;
++}
++
++
++static PVRSRV_ERROR CreateDCSwapChain(IMG_HANDLE hDevice,
++ IMG_UINT32 ui32Flags,
++ DISPLAY_SURF_ATTRIBUTES *psDstSurfAttrib,
++ DISPLAY_SURF_ATTRIBUTES *psSrcSurfAttrib,
++ IMG_UINT32 ui32BufferCount,
++ PVRSRV_SYNC_DATA **ppsSyncData,
++ IMG_UINT32 ui32OEMFlags,
++ IMG_HANDLE *phSwapChain,
++ IMG_UINT32 *pui32SwapChainID)
++{
++ MRSTLFB_DEVINFO *psDevInfo;
++ MRSTLFB_SWAPCHAIN *psSwapChain;
++ MRSTLFB_BUFFER **ppsBuffer;
++ MRSTLFB_VSYNC_FLIP_ITEM *psVSyncFlips;
++ IMG_UINT32 i;
++ PVRSRV_ERROR eError = PVRSRV_ERROR_NOT_SUPPORTED;
++ unsigned long ulLockFlags;
++ struct drm_device* psDrmDev;
++
++ UNREFERENCED_PARAMETER(ui32OEMFlags);
++
++
++ if(!hDevice
++ || !psDstSurfAttrib
++ || !psSrcSurfAttrib
++ || !ppsSyncData
++ || !phSwapChain)
++ {
++ return (PVRSRV_ERROR_INVALID_PARAMS);
++ }
++
++ psDevInfo = (MRSTLFB_DEVINFO*)hDevice;
++
++
++ if(ui32BufferCount > psDevInfo->sDisplayInfo.ui32MaxSwapChainBuffers)
++ {
++ return (PVRSRV_ERROR_TOOMANYBUFFERS);
++ }
++
++
++
++
++
++ if(psDstSurfAttrib->pixelformat != psDevInfo->sDisplayFormat.pixelformat
++ || psDstSurfAttrib->sDims.ui32ByteStride != psDevInfo->sDisplayDim.ui32ByteStride
++ || psDstSurfAttrib->sDims.ui32Width != psDevInfo->sDisplayDim.ui32Width
++ || psDstSurfAttrib->sDims.ui32Height != psDevInfo->sDisplayDim.ui32Height)
++ {
++
++ return (PVRSRV_ERROR_INVALID_PARAMS);
++ }
++
++ if(psDstSurfAttrib->pixelformat != psSrcSurfAttrib->pixelformat
++ || psDstSurfAttrib->sDims.ui32ByteStride != psSrcSurfAttrib->sDims.ui32ByteStride
++ || psDstSurfAttrib->sDims.ui32Width != psSrcSurfAttrib->sDims.ui32Width
++ || psDstSurfAttrib->sDims.ui32Height != psSrcSurfAttrib->sDims.ui32Height)
++ {
++
++ return (PVRSRV_ERROR_INVALID_PARAMS);
++ }
++
++
++ UNREFERENCED_PARAMETER(ui32Flags);
++
++
++ psSwapChain = (MRSTLFB_SWAPCHAIN*)MRSTLFBAllocKernelMem(sizeof(MRSTLFB_SWAPCHAIN));
++ if(!psSwapChain)
++ {
++ return (PVRSRV_ERROR_OUT_OF_MEMORY);
++ }
++
++ ppsBuffer = (MRSTLFB_BUFFER**)MRSTLFBAllocKernelMem(sizeof(MRSTLFB_BUFFER*) * ui32BufferCount);
++ if(!ppsBuffer)
++ {
++ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
++ goto ErrorFreeSwapChain;
++ }
++
++ psVSyncFlips = (MRSTLFB_VSYNC_FLIP_ITEM *)MRSTLFBAllocKernelMem(sizeof(MRSTLFB_VSYNC_FLIP_ITEM) * ui32BufferCount);
++ if (!psVSyncFlips)
++ {
++ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
++ goto ErrorFreeBuffers;
++ }
++
++ psSwapChain->ulBufferCount = (unsigned long)ui32BufferCount;
++ psSwapChain->ppsBuffer = ppsBuffer;
++ psSwapChain->psVSyncFlips = psVSyncFlips;
++ psSwapChain->ulInsertIndex = 0;
++ psSwapChain->ulRemoveIndex = 0;
++ psSwapChain->psPVRJTable = &psDevInfo->sPVRJTable;
++ psSwapChain->psSwapChainLock = &psDevInfo->sSwapChainLock;
++
++
++
++ for(i=0; i<ui32BufferCount; i++)
++ {
++ MRSTLFBAllocBuffer(psDevInfo, psDevInfo->sSystemBuffer.ui32BufferSize, &ppsBuffer[i] );
++ ppsBuffer[i]->psSyncData = ppsSyncData[i];
++ }
++
++
++ for(i=0; i<ui32BufferCount-1; i++)
++ {
++ ppsBuffer[i]->psNext = ppsBuffer[i+1];
++ }
++
++ ppsBuffer[i]->psNext = ppsBuffer[0];
++
++
++ for(i=0; i<ui32BufferCount; i++)
++ {
++ psVSyncFlips[i].bValid = MRST_FALSE;
++ psVSyncFlips[i].bFlipped = MRST_FALSE;
++ psVSyncFlips[i].bCmdCompleted = MRST_FALSE;
++ }
++
++
++ psDrmDev = psDevInfo->psDrmDevice;
++
++ psSwapChain->psDevInfo = psDevInfo;
++ psSwapChain->psDrmDev = psDrmDev;
++ psSwapChain->psDrmDriver = psDrmDev->driver;
++ psSwapChain->bBlanked = MRST_FALSE;
++
++ spin_lock_irqsave(&psDevInfo->sSwapChainLock, ulLockFlags);
++
++
++ psSwapChain->bFlushCommands = psDevInfo->bFlushCommands;
++
++ if (psSwapChain->bFlushCommands)
++ {
++ psSwapChain->ulSetFlushStateRefCount = 1;
++ }
++ else
++ {
++ psSwapChain->ulSetFlushStateRefCount = 0;
++ }
++
++ spin_unlock_irqrestore(&psDevInfo->sSwapChainLock, ulLockFlags);
++
++
++
++
++
++
++
++ *phSwapChain = (IMG_HANDLE)psSwapChain;
++ *pui32SwapChainID = ++psDevInfo->ui32SwapChainIdCounter;
++ psDevInfo->psSwapChain = psSwapChain;
++
++ if( psDevInfo->ui32SwapChainNum++ == 0)
++ {
++ MRSTLFBEnableSwapChains( psDevInfo );
++ }
++
++ return (PVRSRV_OK);
++
++
++ MRSTLFBFreeKernelMem(psVSyncFlips);
++ErrorFreeBuffers:
++ MRSTLFBFreeKernelMem(ppsBuffer);
++ErrorFreeSwapChain:
++ MRSTLFBFreeKernelMem(psSwapChain);
++
++ return eError;
++}
++
++static PVRSRV_ERROR DestroyDCSwapChain(IMG_HANDLE hDevice,
++ IMG_HANDLE hSwapChain)
++{
++ MRSTLFB_DEVINFO *psDevInfo;
++ MRSTLFB_SWAPCHAIN *psSwapChain;
++ int i;
++
++
++ if(!hDevice || !hSwapChain)
++ {
++ return (PVRSRV_ERROR_INVALID_PARAMS);
++ }
++
++ psDevInfo = (MRSTLFB_DEVINFO*)hDevice;
++ psSwapChain = (MRSTLFB_SWAPCHAIN*)hSwapChain;
++
++
++ FlushInternalVSyncQueue(psSwapChain);
++
++
++ if(--psDevInfo->ui32SwapChainNum == 0)
++ {
++ MRSTLFBDisableSwapChains(psDevInfo);
++ }
++
++ if( psDevInfo->psSwapChain == psSwapChain )
++ psDevInfo->psSwapChain = IMG_NULL;
++
++
++
++ for(i=0; i< psSwapChain->ulBufferCount; i++)
++ {
++ MRSTLFBFreeBuffer(psDevInfo, &psSwapChain->ppsBuffer[i] );
++ }
++ MRSTLFBFreeKernelMem(psSwapChain->psVSyncFlips);
++ MRSTLFBFreeKernelMem(psSwapChain->ppsBuffer);
++ MRSTLFBFreeKernelMem(psSwapChain);
++
++ return (PVRSRV_OK);
++}
++
++static PVRSRV_ERROR SetDCDstRect(IMG_HANDLE hDevice,
++ IMG_HANDLE hSwapChain,
++ IMG_RECT *psRect)
++{
++ UNREFERENCED_PARAMETER(hDevice);
++ UNREFERENCED_PARAMETER(hSwapChain);
++ UNREFERENCED_PARAMETER(psRect);
++
++
++
++ return (PVRSRV_ERROR_NOT_SUPPORTED);
++}
++
++static PVRSRV_ERROR SetDCSrcRect(IMG_HANDLE hDevice,
++ IMG_HANDLE hSwapChain,
++ IMG_RECT *psRect)
++{
++ UNREFERENCED_PARAMETER(hDevice);
++ UNREFERENCED_PARAMETER(hSwapChain);
++ UNREFERENCED_PARAMETER(psRect);
++
++
++
++ return (PVRSRV_ERROR_NOT_SUPPORTED);
++}
++
++static PVRSRV_ERROR SetDCDstColourKey(IMG_HANDLE hDevice,
++ IMG_HANDLE hSwapChain,
++ IMG_UINT32 ui32CKColour)
++{
++ UNREFERENCED_PARAMETER(hDevice);
++ UNREFERENCED_PARAMETER(hSwapChain);
++ UNREFERENCED_PARAMETER(ui32CKColour);
++
++
++
++ return (PVRSRV_ERROR_NOT_SUPPORTED);
++}
++
++static PVRSRV_ERROR SetDCSrcColourKey(IMG_HANDLE hDevice,
++ IMG_HANDLE hSwapChain,
++ IMG_UINT32 ui32CKColour)
++{
++ UNREFERENCED_PARAMETER(hDevice);
++ UNREFERENCED_PARAMETER(hSwapChain);
++ UNREFERENCED_PARAMETER(ui32CKColour);
++
++
++
++ return (PVRSRV_ERROR_NOT_SUPPORTED);
++}
++
++static PVRSRV_ERROR GetDCBuffers(IMG_HANDLE hDevice,
++ IMG_HANDLE hSwapChain,
++ IMG_UINT32 *pui32BufferCount,
++ IMG_HANDLE *phBuffer)
++{
++ MRSTLFB_DEVINFO *psDevInfo;
++ MRSTLFB_SWAPCHAIN *psSwapChain;
++ unsigned long i;
++
++
++ if(!hDevice
++ || !hSwapChain
++ || !pui32BufferCount
++ || !phBuffer)
++ {
++ return (PVRSRV_ERROR_INVALID_PARAMS);
++ }
++
++ psDevInfo = (MRSTLFB_DEVINFO*)hDevice;
++ psSwapChain = (MRSTLFB_SWAPCHAIN*)hSwapChain;
++ if (psSwapChain != psDevInfo->psSwapChain)
++ {
++ return (PVRSRV_ERROR_INVALID_PARAMS);
++ }
++
++
++ *pui32BufferCount = (IMG_UINT32)psSwapChain->ulBufferCount;
++
++
++ for(i=0; i<psSwapChain->ulBufferCount; i++)
++ {
++ phBuffer[i] = (IMG_HANDLE)psSwapChain->ppsBuffer[i];
++ }
++
++ return (PVRSRV_OK);
++}
++
++static PVRSRV_ERROR SwapToDCBuffer(IMG_HANDLE hDevice,
++ IMG_HANDLE hBuffer,
++ IMG_UINT32 ui32SwapInterval,
++ IMG_HANDLE hPrivateTag,
++ IMG_UINT32 ui32ClipRectCount,
++ IMG_RECT *psClipRect)
++{
++ MRSTLFB_DEVINFO *psDevInfo;
++
++ UNREFERENCED_PARAMETER(ui32SwapInterval);
++ UNREFERENCED_PARAMETER(hPrivateTag);
++ UNREFERENCED_PARAMETER(psClipRect);
++
++ if(!hDevice
++ || !hBuffer
++ || (ui32ClipRectCount != 0))
++ {
++ return (PVRSRV_ERROR_INVALID_PARAMS);
++ }
++
++ psDevInfo = (MRSTLFB_DEVINFO*)hDevice;
++
++
++ return (PVRSRV_OK);
++}
++
++static PVRSRV_ERROR SwapToDCSystem(IMG_HANDLE hDevice,
++ IMG_HANDLE hSwapChain)
++{
++ MRSTLFB_DEVINFO *psDevInfo;
++ MRSTLFB_SWAPCHAIN *psSwapChain;
++ unsigned long ulLockFlags;
++
++ if(!hDevice || !hSwapChain)
++ {
++ return (PVRSRV_ERROR_INVALID_PARAMS);
++ }
++
++ psDevInfo = (MRSTLFB_DEVINFO*)hDevice;
++ psSwapChain = (MRSTLFB_SWAPCHAIN*)hSwapChain;
++ if (psSwapChain != psDevInfo->psSwapChain)
++ {
++ return (PVRSRV_ERROR_INVALID_PARAMS);
++ }
++
++ spin_lock_irqsave(&psDevInfo->sSwapChainLock, ulLockFlags);
++
++
++ FlushInternalVSyncQueue(psSwapChain);
++
++
++ MRSTLFBFlip(psDevInfo, (unsigned long)(psDevInfo->sSystemBuffer.sDevVAddr.uiAddr));
++
++ spin_unlock_irqrestore(&psDevInfo->sSwapChainLock, ulLockFlags);
++
++ return (PVRSRV_OK);
++}
++
++MRST_BOOL MRSTLFBVSyncIHandler(MRSTLFB_SWAPCHAIN *psSwapChain)
++{
++ IMG_BOOL bStatus = IMG_TRUE;
++ MRSTLFB_VSYNC_FLIP_ITEM *psFlipItem;
++ unsigned long ulMaxIndex;
++ unsigned long ulLockFlags;
++
++ psFlipItem = &psSwapChain->psVSyncFlips[psSwapChain->ulRemoveIndex];
++ ulMaxIndex = psSwapChain->ulBufferCount - 1;
++
++ spin_lock_irqsave(psSwapChain->psSwapChainLock, ulLockFlags);
++
++
++ if (psSwapChain->bFlushCommands)
++ {
++ goto ExitUnlock;
++ }
++
++ while(psFlipItem->bValid)
++ {
++
++ if(psFlipItem->bFlipped)
++ {
++
++ if(!psFlipItem->bCmdCompleted)
++ {
++
++ IMG_BOOL bScheduleMISR;
++
++ bScheduleMISR = IMG_TRUE;
++
++
++ psSwapChain->psPVRJTable->pfnPVRSRVCmdComplete((IMG_HANDLE)psFlipItem->hCmdComplete, bScheduleMISR);
++
++
++ psFlipItem->bCmdCompleted = MRST_TRUE;
++ }
++
++
++ psFlipItem->ulSwapInterval--;
++
++
++ if(psFlipItem->ulSwapInterval == 0)
++ {
++
++ psSwapChain->ulRemoveIndex++;
++
++ if(psSwapChain->ulRemoveIndex > ulMaxIndex)
++ {
++ psSwapChain->ulRemoveIndex = 0;
++ }
++
++
++ psFlipItem->bCmdCompleted = MRST_FALSE;
++ psFlipItem->bFlipped = MRST_FALSE;
++
++
++ psFlipItem->bValid = MRST_FALSE;
++ }
++ else
++ {
++
++ break;
++ }
++ }
++ else
++ {
++
++ MRSTLFBFlip(psSwapChain->psDevInfo, (unsigned long)psFlipItem->sDevVAddr.uiAddr);
++
++
++ psFlipItem->bFlipped = MRST_TRUE;
++
++
++ break;
++ }
++
++
++ psFlipItem = &psSwapChain->psVSyncFlips[psSwapChain->ulRemoveIndex];
++ }
++
++ExitUnlock:
++ spin_unlock_irqrestore(psSwapChain->psSwapChainLock, ulLockFlags);
++
++ return bStatus;
++}
++
++#if defined(MRST_USING_INTERRUPTS)
++static int
++MRSTLFBVSyncISR(struct drm_device *psDrmDevice, int iPipe)
++{
++ MRSTLFB_DEVINFO *psDevInfo = GetAnchorPtr();
++
++
++ if(!psDevInfo->psSwapChain)
++ {
++ return (IMG_TRUE);
++ }
++
++ (void) MRSTLFBVSyncIHandler(psDevInfo->psSwapChain);
++ return 0;
++}
++#endif
++
++#if defined(MRST_USING_INTERRUPTS)
++static IMG_BOOL
++MRSTLFBISRHandler(IMG_VOID* pvDevInfo)
++{
++ MRSTLFB_DEVINFO *psDevInfo = (MRSTLFB_DEVINFO *)pvDevInfo;
++#if 0
++#ifdef MRST_USING_INTERRUPTS
++ MRSTLFB_SWAPCHAIN *psSwapChain;
++#endif
++#endif
++ unsigned long vdc_stat;
++ struct drm_psb_private *dev_priv;
++#if defined(SUPPORT_DRI_DRM)
++ uint32_t pipea_stat = 0;
++#endif
++
++ if (!ospm_power_is_hw_on(OSPM_DISPLAY_ISLAND)) {
++ DRM_ERROR("ERROR: interrupt arrived but Display HW is power off\n");
++ return IMG_FALSE;
++ }
++
++#if defined(SUPPORT_DRI_DRM)
++ dev_priv = (struct drm_psb_private *) psDevInfo->psDrmDevice->dev_private;
++
++ pipea_stat = PSB_RVDC32(PIPEASTAT);
++ //write back to clear all interrupt status bits and reset interrupts.
++ PSB_WVDC32(pipea_stat, PIPEASTAT);
++
++ vdc_stat = PSB_RVDC32(PSB_INT_IDENTITY_R);
++ vdc_stat &= dev_priv->vdc_irq_mask;
++ if (vdc_stat & _PSB_VSYNC_PIPEA_FLAG)
++ {
++ drm_handle_vblank(psDevInfo->psDrmDevice, 0);
++ }
++#endif
++
++/* Use drm_handle_vblank() as the VSync handler, otherwise kernel would panic if handle
++ * the VSync event again. */
++#if 0
++#ifdef MRST_USING_INTERRUPTS
++
++ psSwapChain = psDevInfo->psSwapChain;
++ vdc_stat = MRSTLFBVSyncReadReg(psDevInfo, PSB_INT_IDENTITY_R);
++
++ if (vdc_stat & _PSB_VSYNC_PIPEA_FLAG)
++ {
++ if(!psDevInfo->psSwapChain)
++ {
++ psSwapChain = psDevInfo->psSwapChain;
++ (void) MRSTLFBVSyncIHandler(psSwapChain);
++ }
++ }
++#endif
++#endif
++
++#if defined(SUPPORT_DRI_DRM)
++ vdc_stat = PSB_RVDC32(PSB_INT_IDENTITY_R);
++ vdc_stat &= dev_priv->vdc_irq_mask;
++ if (vdc_stat & _PSB_DPST_PIPEA_FLAG) {
++
++ /* Check for DPST related interrupts */
++ if((pipea_stat & PIPE_DPST_EVENT_STATUS) &&
++ (dev_priv->psb_dpst_state != NULL)) {
++ uint32_t pwm_reg = 0;
++ uint32_t hist_reg = 0;
++ u32 irqCtrl = 0;
++ struct dpst_guardband guardband_reg;
++ struct dpst_ie_histogram_control ie_hist_cont_reg;
++
++ hist_reg = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
++
++ /* Determine if this is histogram or pwm interrupt */
++ if(hist_reg & HISTOGRAM_INT_CTRL_CLEAR) {
++ /* Notify UM of histogram interrupt */
++ psb_dpst_notify_change_um(DPST_EVENT_HIST_INTERRUPT,
++ dev_priv->psb_dpst_state);
++
++ /* disable dpst interrupts */
++ guardband_reg.data = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
++ guardband_reg.interrupt_enable = 0;
++ guardband_reg.interrupt_status = 1;
++ PSB_WVDC32(guardband_reg.data, HISTOGRAM_INT_CONTROL);
++
++ ie_hist_cont_reg.data = PSB_RVDC32(HISTOGRAM_LOGIC_CONTROL);
++ ie_hist_cont_reg.ie_histogram_enable = 0;
++ PSB_WVDC32(ie_hist_cont_reg.data, HISTOGRAM_LOGIC_CONTROL);
++
++ irqCtrl = PSB_RVDC32(PIPEASTAT);
++ irqCtrl &= ~PIPE_DPST_EVENT_ENABLE;
++ PSB_WVDC32(irqCtrl, PIPEASTAT);
++ }
++ pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
++ if((pwm_reg & PWM_PHASEIN_INT_ENABLE) &&
++ !(pwm_reg & PWM_PHASEIN_ENABLE)) {
++ /* Notify UM of the phase complete */
++ psb_dpst_notify_change_um(DPST_EVENT_PHASE_COMPLETE,
++ dev_priv->psb_dpst_state);
++
++ /* Temporarily get phase mngr ready to generate
++ * another interrupt until this can be moved to
++ * user mode */
++ /* PSB_WVDC32(pwm_reg | 0x80010100 | PWM_PHASEIN_ENABLE,
++ PWM_CONTROL_LOGIC); */
++ }
++ }
++ }
++#endif
++ return IMG_TRUE;
++}
++#endif
++
++static IMG_BOOL ProcessFlip(IMG_HANDLE hCmdCookie,
++ IMG_UINT32 ui32DataSize,
++ IMG_VOID *pvData)
++{
++ DISPLAYCLASS_FLIP_COMMAND *psFlipCmd;
++ MRSTLFB_DEVINFO *psDevInfo;
++ MRSTLFB_BUFFER *psBuffer;
++ MRSTLFB_SWAPCHAIN *psSwapChain;
++#if 0//defined(MRST_USING_INTERRUPTS)
++ MRSTLFB_VSYNC_FLIP_ITEM* psFlipItem;
++#endif
++ unsigned long ulLockFlags;
++
++
++ if(!hCmdCookie || !pvData)
++ {
++ return IMG_FALSE;
++ }
++
++
++ psFlipCmd = (DISPLAYCLASS_FLIP_COMMAND*)pvData;
++
++ if (psFlipCmd == IMG_NULL || sizeof(DISPLAYCLASS_FLIP_COMMAND) != ui32DataSize)
++ {
++ return IMG_FALSE;
++ }
++
++
++ psDevInfo = (MRSTLFB_DEVINFO*)psFlipCmd->hExtDevice;
++
++ psBuffer = (MRSTLFB_BUFFER*)psFlipCmd->hExtBuffer;
++ psSwapChain = (MRSTLFB_SWAPCHAIN*) psFlipCmd->hExtSwapChain;
++
++ spin_lock_irqsave(&psDevInfo->sSwapChainLock, ulLockFlags);
++
++
++
++ if (psDevInfo->bDeviceSuspended)
++ {
++ psSwapChain->psPVRJTable->pfnPVRSRVCmdComplete(hCmdCookie, IMG_TRUE);
++ goto ExitTrueUnlock;
++ }
++
++#if 0 //defined(MRST_USING_INTERRUPTS)
++
++ if(psFlipCmd->ui32SwapInterval == 0 || psSwapChain->bFlushCommands == MRST_TRUE || psBuffer == &psDevInfo->sSystemBuffer)
++ {
++#endif
++
++ MRSTLFBFlip(psDevInfo, (unsigned long)psBuffer->sDevVAddr.uiAddr);
++
++
++
++ psSwapChain->psPVRJTable->pfnPVRSRVCmdComplete(hCmdCookie, IMG_TRUE);
++
++#if 0 //defined(MRST_USING_INTERRUPTS)
++ goto ExitTrueUnlock;
++ }
++
++ psFlipItem = &psSwapChain->psVSyncFlips[psSwapChain->ulInsertIndex];
++
++
++ if(psFlipItem->bValid == MRST_FALSE)
++ {
++ unsigned long ulMaxIndex = psSwapChain->ulBufferCount - 1;
++
++ if(psSwapChain->ulInsertIndex == psSwapChain->ulRemoveIndex)
++ {
++
++ MRSTLFBFlip(psDevInfo, (unsigned long)psBuffer->sDevVAddr.uiAddr);
++
++ psFlipItem->bFlipped = MRST_TRUE;
++ }
++ else
++ {
++ psFlipItem->bFlipped = MRST_FALSE;
++ }
++
++ psFlipItem->hCmdComplete = (MRST_HANDLE)hCmdCookie;
++ psFlipItem->ulSwapInterval = (unsigned long)psFlipCmd->ui32SwapInterval;
++ psFlipItem->sDevVAddr = psBuffer->sDevVAddr;
++ psFlipItem->bValid = MRST_TRUE;
++
++ psSwapChain->ulInsertIndex++;
++ if(psSwapChain->ulInsertIndex > ulMaxIndex)
++ {
++ psSwapChain->ulInsertIndex = 0;
++ }
++
++ goto ExitTrueUnlock;
++ }
++
++ spin_unlock_irqrestore(&psDevInfo->sSwapChainLock, ulLockFlags);
++ return IMG_FALSE;
++#endif
++
++ExitTrueUnlock:
++ spin_unlock_irqrestore(&psDevInfo->sSwapChainLock, ulLockFlags);
++ return IMG_TRUE;
++}
++
++
++#if defined(PVR_MRST_FB_SET_PAR_ON_INIT)
++static void MRSTFBSetPar(struct fb_info *psLINFBInfo)
++{
++ acquire_console_sem();
++
++ if (psLINFBInfo->fbops->fb_set_par != NULL)
++ {
++ int res;
++
++ res = psLINFBInfo->fbops->fb_set_par(psLINFBInfo);
++ if (res != 0)
++ {
++ printk(KERN_WARNING DRIVER_PREFIX
++ ": fb_set_par failed: %d\n", res);
++
++ }
++ }
++ else
++ {
++ printk(KERN_WARNING DRIVER_PREFIX
++ ": fb_set_par not set - HW cursor may not work\n");
++ }
++
++ release_console_sem();
++}
++#endif
++
++
++static int MRSTLFBHandleChangeFB(struct drm_device* dev, struct psb_framebuffer *psbfb)
++{
++ MRSTLFB_DEVINFO *psDevInfo = GetAnchorPtr();
++ int i;
++ struct drm_psb_private * dev_priv;
++ struct psb_gtt * pg;
++
++ if( !psDevInfo->sSystemBuffer.bIsContiguous )
++ MRSTLFBFreeKernelMem( psDevInfo->sSystemBuffer.uSysAddr.psNonCont );
++
++ dev_priv = (struct drm_psb_private *)dev->dev_private;
++ pg = dev_priv->pg;
++
++
++ psDevInfo->sDisplayDim.ui32ByteStride = psbfb->base.pitch;
++ psDevInfo->sDisplayDim.ui32Width = psbfb->base.width;
++ psDevInfo->sDisplayDim.ui32Height = psbfb->base.height;
++
++ psDevInfo->sSystemBuffer.ui32BufferSize = psbfb->size;
++ //psDevInfo->sSystemBuffer.sCPUVAddr = psbfb->pvKMAddr;
++ psDevInfo->sSystemBuffer.sCPUVAddr = pg->vram_addr;
++ //psDevInfo->sSystemBuffer.sDevVAddr.uiAddr = psbfb->offsetGTT;
++ psDevInfo->sSystemBuffer.sDevVAddr.uiAddr = 0;
++ psDevInfo->sSystemBuffer.bIsAllocated = IMG_FALSE;
++
++ if(psbfb->bo )
++ {
++
++ psDevInfo->sSystemBuffer.bIsContiguous = IMG_FALSE;
++ psDevInfo->sSystemBuffer.uSysAddr.psNonCont = MRSTLFBAllocKernelMem( sizeof( IMG_SYS_PHYADDR ) * psbfb->bo->ttm->num_pages);
++ for(i = 0;i < psbfb->bo->ttm->num_pages;++i)
++ {
++ struct page *p = ttm_tt_get_page( psbfb->bo->ttm, i);
++ psDevInfo->sSystemBuffer.uSysAddr.psNonCont[i].uiAddr = page_to_pfn(p) << PAGE_SHIFT;
++
++ }
++ }
++ else
++ {
++
++ //struct drm_device * psDrmDevice = psDevInfo->psDrmDevice;
++ //struct drm_psb_private * dev_priv = (struct drm_psb_private *)psDrmDevice->dev_private;
++ //struct psb_gtt * pg = dev_priv->pg;
++
++ psDevInfo->sSystemBuffer.bIsContiguous = IMG_TRUE;
++ psDevInfo->sSystemBuffer.uSysAddr.sCont.uiAddr = pg->stolen_base;
++ }
++
++ return 0;
++}
++
++static int MRSTLFBFindMainPipe(struct drm_device *dev) {
++ struct drm_crtc *crtc;
++
++ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
++ {
++ if ( drm_helper_crtc_in_use(crtc) )
++ {
++ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++ return psb_intel_crtc->pipe;
++ }
++ }
++
++ return 0;
++}
++
++static MRST_ERROR InitDev(MRSTLFB_DEVINFO *psDevInfo)
++{
++ MRST_ERROR eError = MRST_ERROR_GENERIC;
++ struct fb_info *psLINFBInfo;
++ struct drm_device * psDrmDevice = psDevInfo->psDrmDevice;
++ struct drm_framebuffer * psDrmFB;
++ struct psb_framebuffer *psbfb;
++
++
++ int hdisplay;
++ int vdisplay;
++
++ unsigned long FBSize;
++
++ psDrmFB = list_first_entry(&psDrmDevice->mode_config.fb_kernel_list,
++ struct drm_framebuffer,
++ filp_head);
++ if(!psDrmFB) {
++ printk(KERN_INFO"%s:Cannot find drm FB", __FUNCTION__);
++ return eError;
++ }
++ psbfb = to_psb_fb(psDrmFB);
++
++ hdisplay = psDrmFB->width;
++ vdisplay = psDrmFB->height;
++ FBSize = psDrmFB->pitch * psDrmFB->height;
++
++ psLINFBInfo = (struct fb_info*)psDrmFB->fbdev;
++
++#if defined(PVR_MRST_FB_SET_PAR_ON_INIT)
++ MRSTFBSetPar(psLINFBInfo);
++#endif
++
++
++ psDevInfo->sSystemBuffer.bIsContiguous = IMG_TRUE;
++ psDevInfo->sSystemBuffer.bIsAllocated = IMG_FALSE;
++
++ MRSTLFBHandleChangeFB(psDrmDevice, psbfb);
++
++
++ psDevInfo->sDisplayFormat.pixelformat = PVRSRV_PIXEL_FORMAT_ARGB8888;
++ psDevInfo->psLINFBInfo = psLINFBInfo;
++
++
++ psDevInfo->ui32MainPipe = MRSTLFBFindMainPipe(psDevInfo->psDrmDevice);
++
++
++
++
++ psDevInfo->pvRegs = psbfb_vdc_reg(psDevInfo->psDrmDevice);
++
++ if (psDevInfo->pvRegs == NULL)
++ {
++ eError = PVRSRV_ERROR_BAD_MAPPING;
++ printk(KERN_WARNING DRIVER_PREFIX ": Couldn't map registers needed for flipping\n");
++ return eError;
++ }
++
++ return MRST_OK;
++}
++
++static void DeInitDev(MRSTLFB_DEVINFO *psDevInfo)
++{
++
++}
++
++MRST_ERROR MRSTLFBInit(struct drm_device * dev)
++{
++ MRSTLFB_DEVINFO *psDevInfo;
++ //struct drm_psb_private *psDrmPriv = (struct drm_psb_private *)dev->dev_private;
++
++ psDevInfo = GetAnchorPtr();
++
++ if (psDevInfo == NULL)
++ {
++ PFN_CMD_PROC pfnCmdProcList[MRSTLFB_COMMAND_COUNT];
++ IMG_UINT32 aui32SyncCountList[MRSTLFB_COMMAND_COUNT][2];
++
++ psDevInfo = (MRSTLFB_DEVINFO *)MRSTLFBAllocKernelMem(sizeof(MRSTLFB_DEVINFO));
++
++ if(!psDevInfo)
++ {
++ return (MRST_ERROR_OUT_OF_MEMORY);
++ }
++
++
++ memset(psDevInfo, 0, sizeof(MRSTLFB_DEVINFO));
++
++
++ SetAnchorPtr((void*)psDevInfo);
++
++ psDevInfo->psDrmDevice = dev;
++ psDevInfo->ulRefCount = 0;
++
++
++ if(InitDev(psDevInfo) != MRST_OK)
++ {
++ return (MRST_ERROR_INIT_FAILURE);
++ }
++
++ if(MRSTLFBGetLibFuncAddr ("PVRGetDisplayClassJTable", &pfnGetPVRJTable) != MRST_OK)
++ {
++ return (MRST_ERROR_INIT_FAILURE);
++ }
++
++
++ if(!(*pfnGetPVRJTable)(&psDevInfo->sPVRJTable))
++ {
++ return (MRST_ERROR_INIT_FAILURE);
++ }
++
++
++ spin_lock_init(&psDevInfo->sSwapChainLock);
++
++ psDevInfo->psSwapChain = 0;
++ psDevInfo->bFlushCommands = MRST_FALSE;
++ psDevInfo->bDeviceSuspended = MRST_FALSE;
++
++ psDevInfo->sDisplayInfo.ui32MaxSwapChainBuffers = 3;
++ psDevInfo->sDisplayInfo.ui32MaxSwapChains = 2;
++ psDevInfo->sDisplayInfo.ui32MaxSwapInterval = 3;
++ psDevInfo->sDisplayInfo.ui32MinSwapInterval = 0;
++
++ strncpy(psDevInfo->sDisplayInfo.szDisplayName, DISPLAY_DEVICE_NAME, MAX_DISPLAY_NAME_SIZE);
++
++
++
++
++ DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX
++ ": Maximum number of swap chain buffers: %lu\n",
++ psDevInfo->sDisplayInfo.ui32MaxSwapChainBuffers));
++
++
++
++
++ psDevInfo->sDCJTable.ui32TableSize = sizeof(PVRSRV_DC_SRV2DISP_KMJTABLE);
++ psDevInfo->sDCJTable.pfnOpenDCDevice = OpenDCDevice;
++ psDevInfo->sDCJTable.pfnCloseDCDevice = CloseDCDevice;
++ psDevInfo->sDCJTable.pfnEnumDCFormats = EnumDCFormats;
++ psDevInfo->sDCJTable.pfnEnumDCDims = EnumDCDims;
++ psDevInfo->sDCJTable.pfnGetDCSystemBuffer = GetDCSystemBuffer;
++ psDevInfo->sDCJTable.pfnGetDCInfo = GetDCInfo;
++ psDevInfo->sDCJTable.pfnGetBufferAddr = GetDCBufferAddr;
++ psDevInfo->sDCJTable.pfnCreateDCSwapChain = CreateDCSwapChain;
++ psDevInfo->sDCJTable.pfnDestroyDCSwapChain = DestroyDCSwapChain;
++ psDevInfo->sDCJTable.pfnSetDCDstRect = SetDCDstRect;
++ psDevInfo->sDCJTable.pfnSetDCSrcRect = SetDCSrcRect;
++ psDevInfo->sDCJTable.pfnSetDCDstColourKey = SetDCDstColourKey;
++ psDevInfo->sDCJTable.pfnSetDCSrcColourKey = SetDCSrcColourKey;
++ psDevInfo->sDCJTable.pfnGetDCBuffers = GetDCBuffers;
++ psDevInfo->sDCJTable.pfnSwapToDCBuffer = SwapToDCBuffer;
++ psDevInfo->sDCJTable.pfnSwapToDCSystem = SwapToDCSystem;
++ psDevInfo->sDCJTable.pfnSetDCState = SetDCState;
++
++
++ if(psDevInfo->sPVRJTable.pfnPVRSRVRegisterDCDevice (
++ &psDevInfo->sDCJTable,
++ &psDevInfo->ulDeviceID ) != PVRSRV_OK)
++ {
++ return (MRST_ERROR_DEVICE_REGISTER_FAILED);
++ }
++
++ printk("Device ID: %lu\n", psDevInfo->ulDeviceID);
++
++#if defined (SYS_USING_INTERRUPTS)
++ if (psDevInfo->sPVRJTable.pfnPVRSRVRegisterSystemISRHandler(MRSTLFBISRHandler,
++ psDevInfo,
++ 0,
++ (IMG_UINT32)psDevInfo->ulDeviceID) != PVRSRV_OK)
++ {
++ DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX "ISR Installation failed\n"));
++ return (MRST_ERROR_INIT_FAILURE);
++ }
++#endif
++ if (psDevInfo->sPVRJTable.pfnPVRSRVRegisterPowerDevice((IMG_UINT32)psDevInfo->ulDeviceID,
++ MRSTLFBPrePowerState, MRSTLFBPostPowerState,
++ IMG_NULL, IMG_NULL,
++ psDevInfo,
++ PVRSRV_DEV_POWER_STATE_ON,
++ PVRSRV_DEV_POWER_STATE_ON) != PVRSRV_OK)
++ {
++ return (MRST_ERROR_INIT_FAILURE);
++ }
++
++
++
++
++
++
++
++
++
++
++
++#if defined (MRST_USING_INTERRUPTS)
++
++ if(MRSTLFBInstallVSyncISR(psDevInfo,MRSTLFBVSyncISR) != MRST_OK)
++ {
++ DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX "ISR Installation failed\n"));
++ return (MRST_ERROR_INIT_FAILURE);
++ }
++#endif
++
++
++ pfnCmdProcList[DC_FLIP_COMMAND] = ProcessFlip;
++
++
++ aui32SyncCountList[DC_FLIP_COMMAND][0] = 0;
++ aui32SyncCountList[DC_FLIP_COMMAND][1] = 2;
++
++
++
++
++
++ if (psDevInfo->sPVRJTable.pfnPVRSRVRegisterCmdProcList (psDevInfo->ulDeviceID,
++ &pfnCmdProcList[0],
++ aui32SyncCountList,
++ MRSTLFB_COMMAND_COUNT) != PVRSRV_OK)
++ {
++ printk(KERN_WARNING DRIVER_PREFIX ": Can't register callback\n");
++ return (MRST_ERROR_CANT_REGISTER_CALLBACK);
++ }
++
++
++ }
++
++
++ //psDrmPriv->psb_change_fb_handler = MRSTLFBHandleChangeFB;
++
++
++ psDevInfo->ulRefCount++;
++
++
++ return (MRST_OK);
++}
++
++MRST_ERROR MRSTLFBDeinit(void)
++{
++ MRSTLFB_DEVINFO *psDevInfo, *psDevFirst;
++
++ psDevFirst = GetAnchorPtr();
++ psDevInfo = psDevFirst;
++
++
++ if (psDevInfo == NULL)
++ {
++ return (MRST_ERROR_GENERIC);
++ }
++
++
++ psDevInfo->ulRefCount--;
++
++ psDevInfo->psDrmDevice = NULL;
++ if (psDevInfo->ulRefCount == 0)
++ {
++
++ PVRSRV_DC_DISP2SRV_KMJTABLE *psJTable = &psDevInfo->sPVRJTable;
++
++ if (psDevInfo->sPVRJTable.pfnPVRSRVRemoveCmdProcList (psDevInfo->ulDeviceID, MRSTLFB_COMMAND_COUNT) != PVRSRV_OK)
++ {
++ return (MRST_ERROR_GENERIC);
++ }
++
++ if (psDevInfo->sPVRJTable.pfnPVRSRVRegisterPowerDevice((IMG_UINT32)psDevInfo->ulDeviceID,
++ IMG_NULL, IMG_NULL,
++ IMG_NULL, IMG_NULL, IMG_NULL,
++ PVRSRV_DEV_POWER_STATE_ON,
++ PVRSRV_DEV_POWER_STATE_ON) != PVRSRV_OK)
++ {
++ return (MRST_ERROR_GENERIC);
++ }
++
++#if defined (SYS_USING_INTERRUPTS)
++ if (psDevInfo->sPVRJTable.pfnPVRSRVRegisterSystemISRHandler(IMG_NULL, IMG_NULL, 0,
++ (IMG_UINT32)psDevInfo->ulDeviceID) != PVRSRV_OK)
++ {
++ return (MRST_ERROR_GENERIC);
++ }
++#endif
++
++#if defined (MRST_USING_INTERRUPTS)
++
++ if(MRSTLFBUninstallVSyncISR(psDevInfo) != MRST_OK)
++ {
++ return (MRST_ERROR_GENERIC);
++ }
++#endif
++
++ if (psJTable->pfnPVRSRVRemoveDCDevice(psDevInfo->ulDeviceID) != PVRSRV_OK)
++ {
++ return (MRST_ERROR_GENERIC);
++ }
++
++ DeInitDev(psDevInfo);
++
++
++ MRSTLFBFreeKernelMem(psDevInfo);
++ }
++
++
++ SetAnchorPtr(NULL);
++
++
++ return (MRST_OK);
++}
++
++
++/*
++ * save_display_registers
++ *
++ * Description: We are going to suspend so save current display
++ * register state.
++ */
++static void save_display_registers(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ int i;
++
++ /* Display arbitration control + watermarks */
++ dev_priv->saveDSPARB = PSB_RVDC32(DSPARB);
++ dev_priv->saveDSPFW1 = PSB_RVDC32(DSPFW1);
++ dev_priv->saveDSPFW2 = PSB_RVDC32(DSPFW2);
++ dev_priv->saveDSPFW3 = PSB_RVDC32(DSPFW3);
++ dev_priv->saveDSPFW4 = PSB_RVDC32(DSPFW4);
++ dev_priv->saveDSPFW5 = PSB_RVDC32(DSPFW5);
++ dev_priv->saveDSPFW6 = PSB_RVDC32(DSPFW6);
++ dev_priv->saveCHICKENBIT = PSB_RVDC32(DSPCHICKENBIT);
++
++ /* Pipe & plane A info */
++ dev_priv->savePIPEACONF = PSB_RVDC32(PIPEACONF);
++ dev_priv->savePIPEASRC = PSB_RVDC32(PIPEASRC);
++ dev_priv->saveFPA0 = PSB_RVDC32(MRST_FPA0);
++ dev_priv->saveFPA1 = PSB_RVDC32(MRST_FPA1);
++ dev_priv->saveDPLL_A = PSB_RVDC32(MRST_DPLL_A);
++ dev_priv->saveHTOTAL_A = PSB_RVDC32(HTOTAL_A);
++ dev_priv->saveHBLANK_A = PSB_RVDC32(HBLANK_A);
++ dev_priv->saveHSYNC_A = PSB_RVDC32(HSYNC_A);
++ dev_priv->saveVTOTAL_A = PSB_RVDC32(VTOTAL_A);
++ dev_priv->saveVBLANK_A = PSB_RVDC32(VBLANK_A);
++ dev_priv->saveVSYNC_A = PSB_RVDC32(VSYNC_A);
++ dev_priv->saveBCLRPAT_A = PSB_RVDC32(BCLRPAT_A);
++ dev_priv->saveDSPACNTR = PSB_RVDC32(DSPACNTR);
++ dev_priv->saveDSPASTRIDE = PSB_RVDC32(DSPASTRIDE);
++ dev_priv->saveDSPAADDR = PSB_RVDC32(DSPABASE);
++ dev_priv->saveDSPASURF = PSB_RVDC32(DSPASURF);
++ dev_priv->saveDSPALINOFF = PSB_RVDC32(DSPALINOFF);
++ dev_priv->saveDSPATILEOFF = PSB_RVDC32(DSPATILEOFF);
++
++ /*save cursor regs*/
++ dev_priv->saveDSPACURSOR_CTRL = PSB_RVDC32(CURACNTR);
++ dev_priv->saveDSPACURSOR_BASE = PSB_RVDC32(CURABASE);
++ dev_priv->saveDSPACURSOR_POS = PSB_RVDC32(CURAPOS);
++
++ /*save palette (gamma) */
++ for (i = 0; i < 256; i++)
++ dev_priv->save_palette_a[i] = PSB_RVDC32(PALETTE_A + (i<<2));
++
++ /*save performance state*/
++ dev_priv->savePERF_MODE = PSB_RVDC32(MRST_PERF_MODE);
++
++ /* LVDS state */
++ dev_priv->savePP_CONTROL = PSB_RVDC32(PP_CONTROL);
++ dev_priv->savePFIT_PGM_RATIOS = PSB_RVDC32(PFIT_PGM_RATIOS);
++ dev_priv->savePFIT_AUTO_RATIOS = PSB_RVDC32(PFIT_AUTO_RATIOS);
++ dev_priv->saveBLC_PWM_CTL = PSB_RVDC32(BLC_PWM_CTL);
++ dev_priv->saveBLC_PWM_CTL2 = PSB_RVDC32(BLC_PWM_CTL2);
++ dev_priv->saveLVDS = PSB_RVDC32(LVDS);
++ dev_priv->savePFIT_CONTROL = PSB_RVDC32(PFIT_CONTROL);
++ dev_priv->savePP_ON_DELAYS = PSB_RVDC32(LVDSPP_ON);
++ dev_priv->savePP_OFF_DELAYS = PSB_RVDC32(LVDSPP_OFF);
++ dev_priv->savePP_DIVISOR = PSB_RVDC32(PP_CYCLE);
++
++ /* HW overlay */
++ dev_priv->saveOV_OVADD = PSB_RVDC32(OV_OVADD);
++ dev_priv->saveOV_OGAMC0 = PSB_RVDC32(OV_OGAMC0);
++ dev_priv->saveOV_OGAMC1 = PSB_RVDC32(OV_OGAMC1);
++ dev_priv->saveOV_OGAMC2 = PSB_RVDC32(OV_OGAMC2);
++ dev_priv->saveOV_OGAMC3 = PSB_RVDC32(OV_OGAMC3);
++ dev_priv->saveOV_OGAMC4 = PSB_RVDC32(OV_OGAMC4);
++ dev_priv->saveOV_OGAMC5 = PSB_RVDC32(OV_OGAMC5);
++
++ /* MIPI DSI */
++ dev_priv->saveMIPI = PSB_RVDC32(MIPI);
++ dev_priv->saveDEVICE_READY_REG = PSB_RVDC32(DEVICE_READY_REG);
++ dev_priv->saveINTR_EN_REG = PSB_RVDC32(INTR_EN_REG);
++ dev_priv->saveDSI_FUNC_PRG_REG = PSB_RVDC32(DSI_FUNC_PRG_REG);
++ dev_priv->saveHS_TX_TIMEOUT_REG = PSB_RVDC32(HS_TX_TIMEOUT_REG);
++ dev_priv->saveLP_RX_TIMEOUT_REG = PSB_RVDC32(LP_RX_TIMEOUT_REG);
++ dev_priv->saveTURN_AROUND_TIMEOUT_REG =
++ PSB_RVDC32(TURN_AROUND_TIMEOUT_REG);
++ dev_priv->saveDEVICE_RESET_REG = PSB_RVDC32(DEVICE_RESET_REG);
++ dev_priv->saveDPI_RESOLUTION_REG =
++ PSB_RVDC32(DPI_RESOLUTION_REG);
++ dev_priv->saveHORIZ_SYNC_PAD_COUNT_REG =
++ PSB_RVDC32(HORIZ_SYNC_PAD_COUNT_REG);
++ dev_priv->saveHORIZ_BACK_PORCH_COUNT_REG =
++ PSB_RVDC32(HORIZ_BACK_PORCH_COUNT_REG);
++ dev_priv->saveHORIZ_FRONT_PORCH_COUNT_REG =
++ PSB_RVDC32(HORIZ_FRONT_PORCH_COUNT_REG);
++ dev_priv->saveHORIZ_ACTIVE_AREA_COUNT_REG =
++ PSB_RVDC32(HORIZ_ACTIVE_AREA_COUNT_REG);
++ dev_priv->saveVERT_SYNC_PAD_COUNT_REG =
++ PSB_RVDC32(VERT_SYNC_PAD_COUNT_REG);
++ dev_priv->saveVERT_BACK_PORCH_COUNT_REG =
++ PSB_RVDC32(VERT_BACK_PORCH_COUNT_REG);
++ dev_priv->saveVERT_FRONT_PORCH_COUNT_REG =
++ PSB_RVDC32(VERT_FRONT_PORCH_COUNT_REG);
++ dev_priv->saveHIGH_LOW_SWITCH_COUNT_REG =
++ PSB_RVDC32(HIGH_LOW_SWITCH_COUNT_REG);
++ dev_priv->saveINIT_COUNT_REG = PSB_RVDC32(INIT_COUNT_REG);
++ dev_priv->saveMAX_RET_PAK_REG = PSB_RVDC32(MAX_RET_PAK_REG);
++ dev_priv->saveVIDEO_FMT_REG = PSB_RVDC32(VIDEO_FMT_REG);
++ dev_priv->saveEOT_DISABLE_REG = PSB_RVDC32(EOT_DISABLE_REG);
++ dev_priv->saveLP_BYTECLK_REG = PSB_RVDC32(LP_BYTECLK_REG);
++ dev_priv->saveHS_LS_DBI_ENABLE_REG =
++ PSB_RVDC32(HS_LS_DBI_ENABLE_REG);
++ dev_priv->saveTXCLKESC_REG = PSB_RVDC32(TXCLKESC_REG);
++ dev_priv->saveDPHY_PARAM_REG = PSB_RVDC32(DPHY_PARAM_REG);
++ dev_priv->saveMIPI_CONTROL_REG = PSB_RVDC32(MIPI_CONTROL_REG);
++
++ /* DPST registers */
++ dev_priv->saveHISTOGRAM_INT_CONTROL_REG = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
++ dev_priv->saveHISTOGRAM_LOGIC_CONTROL_REG = PSB_RVDC32(HISTOGRAM_LOGIC_CONTROL);
++}
++
++
++/*
++ * restore_display_registers
++ *
++ * Description: We are going to resume so restore display register state.
++ */
++static void restore_display_registers(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ unsigned long i, pp_stat;
++
++ /* Display arbitration + watermarks */
++ PSB_WVDC32(dev_priv->saveDSPARB, DSPARB);
++ PSB_WVDC32(dev_priv->saveDSPFW1, DSPFW1);
++ PSB_WVDC32(dev_priv->saveDSPFW2, DSPFW2);
++ PSB_WVDC32(dev_priv->saveDSPFW3, DSPFW3);
++ PSB_WVDC32(dev_priv->saveDSPFW4, DSPFW4);
++ PSB_WVDC32(dev_priv->saveDSPFW5, DSPFW5);
++ PSB_WVDC32(dev_priv->saveDSPFW6, DSPFW6);
++ PSB_WVDC32(dev_priv->saveCHICKENBIT, DSPCHICKENBIT);
++
++ /*make sure VGA plane is off. it initializes to on after reset!*/
++ PSB_WVDC32(0x80000000, VGACNTRL);
++
++ /* set the plls */
++ PSB_WVDC32(dev_priv->saveFPA0, MRST_FPA0);
++ PSB_WVDC32(dev_priv->saveFPA1, MRST_FPA1);
++ /* Actually enable it */
++ PSB_WVDC32(dev_priv->saveDPLL_A, MRST_DPLL_A);
++ DRM_UDELAY(150);
++
++ /* Restore mode */
++ PSB_WVDC32(dev_priv->saveHTOTAL_A, HTOTAL_A);
++ PSB_WVDC32(dev_priv->saveHBLANK_A, HBLANK_A);
++ PSB_WVDC32(dev_priv->saveHSYNC_A, HSYNC_A);
++ PSB_WVDC32(dev_priv->saveVTOTAL_A, VTOTAL_A);
++ PSB_WVDC32(dev_priv->saveVBLANK_A, VBLANK_A);
++ PSB_WVDC32(dev_priv->saveVSYNC_A, VSYNC_A);
++ PSB_WVDC32(dev_priv->savePIPEASRC, PIPEASRC);
++ PSB_WVDC32(dev_priv->saveBCLRPAT_A, BCLRPAT_A);
++
++ /*restore performance mode*/
++ PSB_WVDC32(dev_priv->savePERF_MODE, MRST_PERF_MODE);
++
++ /*enable the pipe*/
++ if (dev_priv->iLVDS_enable)
++ PSB_WVDC32(dev_priv->savePIPEACONF, PIPEACONF);
++
++ /* set up MIPI */
++ PSB_WVDC32(dev_priv->saveINTR_EN_REG, INTR_EN_REG);
++ PSB_WVDC32(dev_priv->saveDSI_FUNC_PRG_REG, DSI_FUNC_PRG_REG);
++ PSB_WVDC32(dev_priv->saveHS_TX_TIMEOUT_REG, HS_TX_TIMEOUT_REG);
++ PSB_WVDC32(dev_priv->saveLP_RX_TIMEOUT_REG, LP_RX_TIMEOUT_REG);
++ PSB_WVDC32(dev_priv->saveTURN_AROUND_TIMEOUT_REG,
++ TURN_AROUND_TIMEOUT_REG);
++ PSB_WVDC32(dev_priv->saveDEVICE_RESET_REG, DEVICE_RESET_REG);
++ PSB_WVDC32(dev_priv->saveDPI_RESOLUTION_REG,
++ DPI_RESOLUTION_REG);
++ PSB_WVDC32(dev_priv->saveHORIZ_SYNC_PAD_COUNT_REG,
++ HORIZ_SYNC_PAD_COUNT_REG);
++ PSB_WVDC32(dev_priv->saveHORIZ_BACK_PORCH_COUNT_REG,
++ HORIZ_BACK_PORCH_COUNT_REG);
++ PSB_WVDC32(dev_priv->saveHORIZ_FRONT_PORCH_COUNT_REG,
++ HORIZ_FRONT_PORCH_COUNT_REG);
++ PSB_WVDC32(dev_priv->saveHORIZ_ACTIVE_AREA_COUNT_REG,
++ HORIZ_ACTIVE_AREA_COUNT_REG);
++ PSB_WVDC32(dev_priv->saveVERT_SYNC_PAD_COUNT_REG,
++ VERT_SYNC_PAD_COUNT_REG);
++ PSB_WVDC32(dev_priv->saveVERT_BACK_PORCH_COUNT_REG,
++ VERT_BACK_PORCH_COUNT_REG);
++ PSB_WVDC32(dev_priv->saveVERT_FRONT_PORCH_COUNT_REG,
++ VERT_FRONT_PORCH_COUNT_REG);
++ PSB_WVDC32(dev_priv->saveHIGH_LOW_SWITCH_COUNT_REG,
++ HIGH_LOW_SWITCH_COUNT_REG);
++ PSB_WVDC32(dev_priv->saveINIT_COUNT_REG, INIT_COUNT_REG);
++ PSB_WVDC32(dev_priv->saveMAX_RET_PAK_REG, MAX_RET_PAK_REG);
++ PSB_WVDC32(dev_priv->saveVIDEO_FMT_REG, VIDEO_FMT_REG);
++ PSB_WVDC32(dev_priv->saveEOT_DISABLE_REG, EOT_DISABLE_REG);
++ PSB_WVDC32(dev_priv->saveLP_BYTECLK_REG, LP_BYTECLK_REG);
++ PSB_WVDC32(dev_priv->saveHS_LS_DBI_ENABLE_REG,
++ HS_LS_DBI_ENABLE_REG);
++ PSB_WVDC32(dev_priv->saveTXCLKESC_REG, TXCLKESC_REG);
++ PSB_WVDC32(dev_priv->saveDPHY_PARAM_REG, DPHY_PARAM_REG);
++ PSB_WVDC32(dev_priv->saveMIPI_CONTROL_REG, MIPI_CONTROL_REG);
++
++ /*set up the plane*/
++ PSB_WVDC32(dev_priv->saveDSPALINOFF, DSPALINOFF);
++ PSB_WVDC32(dev_priv->saveDSPASTRIDE, DSPASTRIDE);
++ PSB_WVDC32(dev_priv->saveDSPATILEOFF, DSPATILEOFF);
++
++ /* Enable the plane */
++ PSB_WVDC32(dev_priv->saveDSPACNTR, DSPACNTR);
++ PSB_WVDC32(dev_priv->saveDSPASURF, DSPASURF);
++
++ /*Enable Cursor A*/
++ PSB_WVDC32(dev_priv->saveDSPACURSOR_CTRL, CURACNTR);
++ PSB_WVDC32(dev_priv->saveDSPACURSOR_POS, CURAPOS);
++ PSB_WVDC32(dev_priv->saveDSPACURSOR_BASE, CURABASE);
++
++ /* restore palette (gamma) */
++ /*DRM_UDELAY(50000); */
++ for (i = 0; i < 256; i++)
++ PSB_WVDC32(dev_priv->save_palette_a[i], PALETTE_A + (i<<2));
++
++ if (dev_priv->iLVDS_enable) {
++ PSB_WVDC32(dev_priv->saveBLC_PWM_CTL2, BLC_PWM_CTL2);
++ PSB_WVDC32(dev_priv->saveLVDS, LVDS); /*port 61180h*/
++ PSB_WVDC32(dev_priv->savePFIT_CONTROL, PFIT_CONTROL);
++ PSB_WVDC32(dev_priv->savePFIT_PGM_RATIOS, PFIT_PGM_RATIOS);
++ PSB_WVDC32(dev_priv->savePFIT_AUTO_RATIOS, PFIT_AUTO_RATIOS);
++ PSB_WVDC32(dev_priv->saveBLC_PWM_CTL, BLC_PWM_CTL);
++ PSB_WVDC32(dev_priv->savePP_ON_DELAYS, LVDSPP_ON);
++ PSB_WVDC32(dev_priv->savePP_OFF_DELAYS, LVDSPP_OFF);
++ PSB_WVDC32(dev_priv->savePP_DIVISOR, PP_CYCLE);
++ PSB_WVDC32(dev_priv->savePP_CONTROL, PP_CONTROL);
++ } else { /* enable MIPI */
++ PSB_WVDC32(MIPI_PORT_EN | MIPI_BORDER_EN, MIPI); /*force on port*/
++ PSB_WVDC32(1, DEVICE_READY_REG);/* force on to re-program */
++ dev_priv->init_drvIC(dev);
++ PSB_WVDC32(dev_priv->saveMIPI, MIPI); /*port 61190h*/
++ PSB_WVDC32(dev_priv->saveDEVICE_READY_REG, DEVICE_READY_REG);
++ if (dev_priv->saveDEVICE_READY_REG)
++ PSB_WVDC32(DPI_TURN_ON, DPI_CONTROL_REG);
++ PSB_WVDC32(dev_priv->savePIPEACONF, PIPEACONF);
++ PSB_WVDC32(dev_priv->saveBLC_PWM_CTL2, BLC_PWM_CTL2);
++ PSB_WVDC32(dev_priv->saveBLC_PWM_CTL, BLC_PWM_CTL);
++ }
++
++ /*wait for cycle delay*/
++ do {
++ pp_stat = PSB_RVDC32(PP_STATUS);
++ } while (pp_stat & 0x08000000);
++
++ DRM_UDELAY(999);
++ /*wait for panel power up*/
++ do {
++ pp_stat = PSB_RVDC32(PP_STATUS);
++ } while (pp_stat & 0x10000000);
++
++ /* restore HW overlay */
++ PSB_WVDC32(dev_priv->saveOV_OVADD, OV_OVADD);
++ PSB_WVDC32(dev_priv->saveOV_OGAMC0, OV_OGAMC0);
++ PSB_WVDC32(dev_priv->saveOV_OGAMC1, OV_OGAMC1);
++ PSB_WVDC32(dev_priv->saveOV_OGAMC2, OV_OGAMC2);
++ PSB_WVDC32(dev_priv->saveOV_OGAMC3, OV_OGAMC3);
++ PSB_WVDC32(dev_priv->saveOV_OGAMC4, OV_OGAMC4);
++ PSB_WVDC32(dev_priv->saveOV_OGAMC5, OV_OGAMC5);
++
++ /* DPST registers */
++ PSB_WVDC32(dev_priv->saveHISTOGRAM_INT_CONTROL_REG, HISTOGRAM_INT_CONTROL);
++ PSB_WVDC32(dev_priv->saveHISTOGRAM_LOGIC_CONTROL_REG, HISTOGRAM_LOGIC_CONTROL);
++}
++
++MRST_ERROR MRSTLFBAllocBuffer(struct MRSTLFB_DEVINFO_TAG *psDevInfo, IMG_UINT32 ui32Size, MRSTLFB_BUFFER **ppBuffer)
++{
++ IMG_VOID *pvBuf;
++ IMG_UINT32 ulPagesNumber;
++ IMG_UINT32 ulCounter;
++ int i;
++
++ pvBuf = __vmalloc( ui32Size, GFP_KERNEL | __GFP_HIGHMEM, __pgprot((pgprot_val(PAGE_KERNEL ) & ~_PAGE_CACHE_MASK) | _PAGE_CACHE_WC) );
++ if( pvBuf == NULL )
++ {
++ return MRST_ERROR_OUT_OF_MEMORY;
++ }
++
++ ulPagesNumber = (ui32Size + PAGE_SIZE -1) / PAGE_SIZE;
++
++ *ppBuffer = MRSTLFBAllocKernelMem( sizeof( MRSTLFB_BUFFER ) );
++ (*ppBuffer)->sCPUVAddr = pvBuf;
++ (*ppBuffer)->ui32BufferSize = ui32Size;
++ (*ppBuffer)->uSysAddr.psNonCont = MRSTLFBAllocKernelMem( sizeof( IMG_SYS_PHYADDR ) * ulPagesNumber);
++ (*ppBuffer)->bIsAllocated = IMG_TRUE;
++ (*ppBuffer)->bIsContiguous = IMG_FALSE;
++ (*ppBuffer)->ui32OwnerTaskID = task_tgid_nr(current);
++
++ i = 0;
++ for(ulCounter = 0; ulCounter < ui32Size; ulCounter += PAGE_SIZE)
++ {
++ (*ppBuffer)->uSysAddr.psNonCont[i++].uiAddr = vmalloc_to_pfn( pvBuf + ulCounter ) << PAGE_SHIFT;
++ }
++
++ psb_gtt_map_pvr_memory( psDevInfo->psDrmDevice,
++ (unsigned int)*ppBuffer,
++ (*ppBuffer)->ui32OwnerTaskID,
++ (IMG_CPU_PHYADDR*) (*ppBuffer)->uSysAddr.psNonCont,
++ ulPagesNumber,
++ (unsigned int *)&(*ppBuffer)->sDevVAddr.uiAddr );
++
++ (*ppBuffer)->sDevVAddr.uiAddr <<= PAGE_SHIFT;
++
++ return MRST_OK;
++}
++
++MRST_ERROR MRSTLFBFreeBuffer(struct MRSTLFB_DEVINFO_TAG *psDevInfo, MRSTLFB_BUFFER **ppBuffer)
++{
++ if( !(*ppBuffer)->bIsAllocated )
++ return MRST_ERROR_INVALID_PARAMS;
++
++ psb_gtt_unmap_pvr_memory( psDevInfo->psDrmDevice,
++ (unsigned int)*ppBuffer,
++ (*ppBuffer)->ui32OwnerTaskID);
++
++ vfree( (*ppBuffer)->sCPUVAddr );
++
++ MRSTLFBFreeKernelMem( (*ppBuffer)->uSysAddr.psNonCont );
++
++ MRSTLFBFreeKernelMem( *ppBuffer);
++
++ *ppBuffer = NULL;
++
++ return MRST_OK;
++}
++
++
++
++PVRSRV_ERROR MRSTLFBPrePowerState(IMG_HANDLE hDevHandle,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState)
++{
++ MRSTLFB_DEVINFO* psDevInfo = (MRSTLFB_DEVINFO *)hDevHandle;
++ struct drm_device* dev = psDevInfo->psDrmDevice;
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ int pp_stat, ret;
++
++ if ((eNewPowerState == eCurrentPowerState) ||
++ (eNewPowerState == PVRSRV_DEV_POWER_STATE_ON))
++ return PVRSRV_OK;
++
++ save_display_registers(dev);
++
++ if (dev_priv->iLVDS_enable) {
++ /*shutdown the panel*/
++ PSB_WVDC32(0, PP_CONTROL);
++
++ do {
++ pp_stat = PSB_RVDC32(PP_STATUS);
++ } while (pp_stat & 0x80000000);
++
++ /*turn off the plane*/
++ PSB_WVDC32(0x58000000, DSPACNTR);
++ PSB_WVDC32(0, DSPASURF);/*trigger the plane disable*/
++ msleep(4);
++
++ /*turn off pipe*/
++ PSB_WVDC32(0x0, PIPEACONF);
++ msleep(8);
++
++ /*turn off PLLs*/
++ PSB_WVDC32(0, MRST_DPLL_A);
++ } else {
++ PSB_WVDC32(DPI_SHUT_DOWN, DPI_CONTROL_REG);
++ PSB_WVDC32(0x0, PIPEACONF);
++ PSB_WVDC32(0x2faf0000, BLC_PWM_CTL);
++ while (REG_READ(0x70008) & 0x40000000);
++ while ((PSB_RVDC32(GEN_FIFO_STAT_REG) & DPI_FIFO_EMPTY)
++ != DPI_FIFO_EMPTY);
++ PSB_WVDC32(0, DEVICE_READY_REG);
++
++ /* turn off mipi panel power */
++ ret = lnw_ipc_single_cmd(IPC_MSG_PANEL_ON_OFF, IPC_CMD_PANEL_OFF, 0, 0);
++ if (ret)
++ printk(KERN_WARNING "IPC 0xE9 failed to turn off pnl pwr. Error is: %x\n", ret);
++ }
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR MRSTLFBPostPowerState(IMG_HANDLE hDevHandle,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState)
++{
++ MRSTLFB_DEVINFO* psDevInfo = (MRSTLFB_DEVINFO *)hDevHandle;
++ struct drm_device* dev = psDevInfo->psDrmDevice;
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ struct psb_gtt *pg = dev_priv->pg;
++ int ret;
++
++ if ((eNewPowerState == eCurrentPowerState) ||
++ (eNewPowerState == PVRSRV_DEV_POWER_STATE_OFF))
++ return PVRSRV_OK;
++
++ PSB_WVDC32(pg->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL);
++ pci_write_config_word(dev->pdev, PSB_GMCH_CTRL,
++ pg->gmch_ctrl | _PSB_GMCH_ENABLED);
++
++ /* Don't reinitialize the GTT as it is unnecessary. The gtt is
++ * stored in memory so it will automatically be restored. All
++ * we need to do is restore the PGETBL_CTL which we already do
++ * above.
++ */
++ /*psb_gtt_init(dev_priv->pg, 1);*/
++
++ if (!dev_priv->iLVDS_enable) {
++ /* turn on mipi panel power */
++ ret = lnw_ipc_single_cmd(IPC_MSG_PANEL_ON_OFF, IPC_CMD_PANEL_ON, 0, 0);
++ if (ret)
++ printk(KERN_WARNING "IPC 0xE9 failed to turn on pnl pwr. Error is: %x\n", ret);
++ msleep(2000); /* wait 2 seconds */
++ }
++
++ restore_display_registers(dev);
++
++ return PVRSRV_OK;
++}
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/3rdparty/linux_framebuffer_mrst/mrstlfb_linux.c b/drivers/gpu/drm/mrst/pvr/services4/3rdparty/linux_framebuffer_mrst/mrstlfb_linux.c
+new file mode 100644
+index 0000000..6001a9c
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/3rdparty/linux_framebuffer_mrst/mrstlfb_linux.c
+@@ -0,0 +1,206 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef AUTOCONF_INCLUDED
++#include <linux/config.h>
++#endif
++
++#include <linux/version.h>
++
++#include <linux/pci.h>
++#include <linux/slab.h>
++#include <linux/errno.h>
++#include <linux/interrupt.h>
++
++#include <drm/drmP.h>
++
++#include <asm/io.h>
++
++#include "img_defs.h"
++#include "servicesext.h"
++#include "kerneldisplay.h"
++#include "pvrmodule.h"
++#include "pvr_drm.h"
++#include "mrstlfb.h"
++#include "kerneldisplay.h"
++#include "sysirq.h"
++
++#include "psb_drv.h"
++
++#if !defined(SUPPORT_DRI_DRM)
++#error "SUPPORT_DRI_DRM must be set"
++#endif
++
++#define MAKESTRING(x) # x
++
++#if !defined(DISPLAY_CONTROLLER)
++#define DISPLAY_CONTROLLER pvrlfb
++#endif
++
++//#define MAKENAME_HELPER(x, y) x ## y
++//#define MAKENAME2(x, y) MAKENAME_HELPER(x, y)
++//#define MAKENAME(x) MAKENAME2(DISPLAY_CONTROLLER, x)
++
++#define unref__ __attribute__ ((unused))
++
++
++extern int fb_idx;
++
++void *MRSTLFBAllocKernelMem(unsigned long ulSize)
++{
++ return kmalloc(ulSize, GFP_KERNEL);
++}
++
++void MRSTLFBFreeKernelMem(void *pvMem)
++{
++ kfree(pvMem);
++}
++
++
++MRST_ERROR MRSTLFBGetLibFuncAddr (char *szFunctionName, PFN_DC_GET_PVRJTABLE *ppfnFuncTable)
++{
++ if(strcmp("PVRGetDisplayClassJTable", szFunctionName) != 0)
++ {
++ return (MRST_ERROR_INVALID_PARAMS);
++ }
++
++
++ *ppfnFuncTable = PVRGetDisplayClassJTable;
++
++ return (MRST_OK);
++}
++
++static void MRSTLFBVSyncWriteReg(MRSTLFB_DEVINFO *psDevInfo, unsigned long ulOffset, unsigned long ulValue)
++{
++
++ void *pvRegAddr = (void *)(psDevInfo->pvRegs + ulOffset);
++ mb();
++ iowrite32(ulValue, pvRegAddr);
++}
++
++unsigned long MRSTLFBVSyncReadReg(MRSTLFB_DEVINFO * psDevinfo, unsigned long ulOffset)
++{
++ mb();
++ return ioread32((char *)psDevinfo->pvRegs + ulOffset);
++}
++
++void MRSTLFBEnableVSyncInterrupt(MRSTLFB_DEVINFO * psDevinfo)
++{
++#if defined(MRST_USING_INTERRUPTS)
++
++#if defined(SUPPORT_DRI_DRM)
++
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) psDevinfo->psDrmDevice->dev_private;
++ dev_priv->vblanksEnabledForFlips = true;
++ sysirq_enable_vblank(psDevinfo->psDrmDevice, 0);
++
++#else
++
++ unsigned long vdc_irq_mask;
++
++ vdc_irq_mask = ~MRSTLFBVSyncReadReg( psDevinfo, PSB_INT_MASK_R);
++ vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG;
++
++ MRSTLFBVSyncWriteReg(psDevinfo, PSB_INT_MASK_R, ~vdc_irq_mask);
++ MRSTLFBVSyncWriteReg(psDevinfo, PSB_INT_ENABLE_R, vdc_irq_mask);
++
++ {
++ unsigned int writeVal = MRSTLFBVSyncReadReg(psDevinfo, PIPEASTAT);
++ unsigned int mask = PIPE_START_VBLANK_INTERRUPT_ENABLE | PIPE_VBLANK_INTERRUPT_ENABLE;
++
++ writeVal |= (mask | (mask >> 16));
++ MRSTLFBVSyncWriteReg(psDevinfo, PIPEASTAT, writeVal);
++ MRSTLFBVSyncReadReg(psDevinfo, PIPEASTAT);
++ }
++#endif
++#endif
++}
++
++void MRSTLFBDisableVSyncInterrupt(MRSTLFB_DEVINFO * psDevinfo)
++{
++#if defined(MRST_USING_INTERRUPTS)
++ struct drm_device * dev = psDevinfo->psDrmDevice;
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) psDevinfo->psDrmDevice->dev_private;
++ dev_priv->vblanksEnabledForFlips = false;
++ //Only turn off if DRM isn't currently using vblanks, otherwise, leave on.
++ if (!dev->vblank_enabled[0])
++ sysirq_disable_vblank(psDevinfo->psDrmDevice, 0);
++#endif
++}
++
++#if defined(MRST_USING_INTERRUPTS)
++MRST_ERROR MRSTLFBInstallVSyncISR(MRSTLFB_DEVINFO *psDevInfo, MRSTLFB_VSYNC_ISR_PFN pVsyncHandler)
++{
++ //struct drm_psb_private *dev_priv =
++ // (struct drm_psb_private *) psDevInfo->psDrmDevice->dev_private;
++ //dev_priv->psb_vsync_handler = pVsyncHandler;
++ return (MRST_OK);
++}
++
++
++MRST_ERROR MRSTLFBUninstallVSyncISR(MRSTLFB_DEVINFO *psDevInfo)
++{
++ //struct drm_psb_private *dev_priv =
++ // (struct drm_psb_private *) psDevInfo->psDrmDevice->dev_private;
++ //dev_priv->psb_vsync_handler = NULL;
++ return (MRST_OK);
++}
++#endif
++
++
++void MRSTLFBFlip(MRSTLFB_DEVINFO *psDevInfo, unsigned long uiAddr)
++{
++ int dspbase = (psDevInfo->ui32MainPipe == 0 ? DSPABASE : DSPBBASE);
++ int dspsurf = (psDevInfo->ui32MainPipe == 0 ? DSPASURF : DSPBSURF);
++
++ if (IS_MRST(psDevInfo->psDrmDevice)) {
++ MRSTLFBVSyncWriteReg(psDevInfo, dspsurf, uiAddr);
++ } else {
++ MRSTLFBVSyncWriteReg(psDevInfo, dspbase, uiAddr);
++ }
++}
++
++
++int PVR_DRM_MAKENAME(DISPLAY_CONTROLLER, _Init)(struct drm_device unref__ *dev)
++{
++ if(MRSTLFBInit(dev) != MRST_OK)
++ {
++ printk(KERN_WARNING DRIVER_PREFIX ": MRSTLFB_Init: MRSTLFBInit failed\n");
++ return -ENODEV;
++ }
++
++ return 0;
++}
++
++void PVR_DRM_MAKENAME(DISPLAY_CONTROLLER, _Cleanup)(struct drm_device unref__ *dev)
++{
++ if(MRSTLFBDeinit() != MRST_OK)
++ {
++ printk(KERN_WARNING DRIVER_PREFIX "%s: can't deinit device\n", __FUNCTION__);
++ }
++}
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/include/env/linux/pvr_drm_shared.h b/drivers/gpu/drm/mrst/pvr/services4/include/env/linux/pvr_drm_shared.h
+new file mode 100644
+index 0000000..573d9b9
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/include/env/linux/pvr_drm_shared.h
+@@ -0,0 +1,54 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined(__PVR_DRM_SHARED_H__)
++#define __PVR_DRM_SHARED_H__
++
++#if defined(SUPPORT_DRI_DRM)
++
++#define PVR_DRM_SRVKM_CMD 0x12
++#define PVR_DRM_DISP_CMD 0x13
++#define PVR_DRM_BC_CMD 0x14
++#define PVR_DRM_IS_MASTER_CMD 0x15
++#define PVR_DRM_UNPRIV_CMD 0x16
++#define PVR_DRM_DBGDRV_CMD 0x1E
++
++#define PVR_DRM_UNPRIV_INIT_SUCCESFUL 0
++#define PVR_DRM_UNPRIV_BUSID_TYPE 1
++#define PVR_DRM_UNPRIV_BUSID_FIELD 2
++
++#define PVR_DRM_BUS_TYPE_PCI 0
++
++#define PVR_DRM_PCI_DOMAIN 0
++#define PVR_DRM_PCI_BUS 1
++#define PVR_DRM_PCI_DEV 2
++#define PVR_DRM_PCI_FUNC 3
++
++#endif
++
++#endif
++
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/include/kernelbuffer.h b/drivers/gpu/drm/mrst/pvr/services4/include/kernelbuffer.h
+new file mode 100644
+index 0000000..33aa49c
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/include/kernelbuffer.h
+@@ -0,0 +1,60 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined (__KERNELBUFFER_H__)
++#define __KERNELBUFFER_H__
++
++typedef PVRSRV_ERROR (*PFN_OPEN_BC_DEVICE)(IMG_HANDLE*);
++typedef PVRSRV_ERROR (*PFN_CLOSE_BC_DEVICE)(IMG_HANDLE);
++typedef PVRSRV_ERROR (*PFN_GET_BC_INFO)(IMG_HANDLE, BUFFER_INFO*);
++typedef PVRSRV_ERROR (*PFN_GET_BC_BUFFER)(IMG_HANDLE, IMG_UINT32, PVRSRV_SYNC_DATA*, IMG_HANDLE*);
++
++typedef struct PVRSRV_BC_SRV2BUFFER_KMJTABLE_TAG
++{
++ IMG_UINT32 ui32TableSize;
++ PFN_OPEN_BC_DEVICE pfnOpenBCDevice;
++ PFN_CLOSE_BC_DEVICE pfnCloseBCDevice;
++ PFN_GET_BC_INFO pfnGetBCInfo;
++ PFN_GET_BC_BUFFER pfnGetBCBuffer;
++ PFN_GET_BUFFER_ADDR pfnGetBufferAddr;
++
++} PVRSRV_BC_SRV2BUFFER_KMJTABLE;
++
++
++typedef PVRSRV_ERROR (*PFN_BC_REGISTER_BUFFER_DEV)(PVRSRV_BC_SRV2BUFFER_KMJTABLE*, IMG_UINT32*);
++typedef PVRSRV_ERROR (*PFN_BC_REMOVE_BUFFER_DEV)(IMG_UINT32);
++
++typedef struct PVRSRV_BC_BUFFER2SRV_KMJTABLE_TAG
++{
++ IMG_UINT32 ui32TableSize;
++ PFN_BC_REGISTER_BUFFER_DEV pfnPVRSRVRegisterBCDevice;
++ PFN_BC_REMOVE_BUFFER_DEV pfnPVRSRVRemoveBCDevice;
++
++} PVRSRV_BC_BUFFER2SRV_KMJTABLE, *PPVRSRV_BC_BUFFER2SRV_KMJTABLE;
++
++typedef IMG_BOOL (*PFN_BC_GET_PVRJTABLE) (PPVRSRV_BC_BUFFER2SRV_KMJTABLE);
++
++#endif
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/include/kerneldisplay.h b/drivers/gpu/drm/mrst/pvr/services4/include/kerneldisplay.h
+new file mode 100644
+index 0000000..f735503
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/include/kerneldisplay.h
+@@ -0,0 +1,153 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined (__KERNELDISPLAY_H__)
++#define __KERNELDISPLAY_H__
++
++typedef PVRSRV_ERROR (*PFN_OPEN_DC_DEVICE)(IMG_UINT32, IMG_HANDLE*, PVRSRV_SYNC_DATA*);
++typedef PVRSRV_ERROR (*PFN_CLOSE_DC_DEVICE)(IMG_HANDLE);
++typedef PVRSRV_ERROR (*PFN_ENUM_DC_FORMATS)(IMG_HANDLE, IMG_UINT32*, DISPLAY_FORMAT*);
++typedef PVRSRV_ERROR (*PFN_ENUM_DC_DIMS)(IMG_HANDLE,
++ DISPLAY_FORMAT*,
++ IMG_UINT32*,
++ DISPLAY_DIMS*);
++typedef PVRSRV_ERROR (*PFN_GET_DC_SYSTEMBUFFER)(IMG_HANDLE, IMG_HANDLE*);
++typedef PVRSRV_ERROR (*PFN_GET_DC_INFO)(IMG_HANDLE, DISPLAY_INFO*);
++typedef PVRSRV_ERROR (*PFN_CREATE_DC_SWAPCHAIN)(IMG_HANDLE,
++ IMG_UINT32,
++ DISPLAY_SURF_ATTRIBUTES*,
++ DISPLAY_SURF_ATTRIBUTES*,
++ IMG_UINT32,
++ PVRSRV_SYNC_DATA**,
++ IMG_UINT32,
++ IMG_HANDLE*,
++ IMG_UINT32*);
++typedef PVRSRV_ERROR (*PFN_DESTROY_DC_SWAPCHAIN)(IMG_HANDLE,
++ IMG_HANDLE);
++typedef PVRSRV_ERROR (*PFN_SET_DC_DSTRECT)(IMG_HANDLE, IMG_HANDLE, IMG_RECT*);
++typedef PVRSRV_ERROR (*PFN_SET_DC_SRCRECT)(IMG_HANDLE, IMG_HANDLE, IMG_RECT*);
++typedef PVRSRV_ERROR (*PFN_SET_DC_DSTCK)(IMG_HANDLE, IMG_HANDLE, IMG_UINT32);
++typedef PVRSRV_ERROR (*PFN_SET_DC_SRCCK)(IMG_HANDLE, IMG_HANDLE, IMG_UINT32);
++typedef PVRSRV_ERROR (*PFN_GET_DC_BUFFERS)(IMG_HANDLE,
++ IMG_HANDLE,
++ IMG_UINT32*,
++ IMG_HANDLE*);
++typedef PVRSRV_ERROR (*PFN_SWAP_TO_DC_BUFFER)(IMG_HANDLE,
++ IMG_HANDLE,
++ IMG_UINT32,
++ IMG_HANDLE,
++ IMG_UINT32,
++ IMG_RECT*);
++typedef PVRSRV_ERROR (*PFN_SWAP_TO_DC_SYSTEM)(IMG_HANDLE, IMG_HANDLE);
++typedef IMG_VOID (*PFN_SET_DC_STATE)(IMG_HANDLE, IMG_UINT32);
++
++typedef struct PVRSRV_DC_SRV2DISP_KMJTABLE_TAG
++{
++ IMG_UINT32 ui32TableSize;
++ PFN_OPEN_DC_DEVICE pfnOpenDCDevice;
++ PFN_CLOSE_DC_DEVICE pfnCloseDCDevice;
++ PFN_ENUM_DC_FORMATS pfnEnumDCFormats;
++ PFN_ENUM_DC_DIMS pfnEnumDCDims;
++ PFN_GET_DC_SYSTEMBUFFER pfnGetDCSystemBuffer;
++ PFN_GET_DC_INFO pfnGetDCInfo;
++ PFN_GET_BUFFER_ADDR pfnGetBufferAddr;
++ PFN_CREATE_DC_SWAPCHAIN pfnCreateDCSwapChain;
++ PFN_DESTROY_DC_SWAPCHAIN pfnDestroyDCSwapChain;
++ PFN_SET_DC_DSTRECT pfnSetDCDstRect;
++ PFN_SET_DC_SRCRECT pfnSetDCSrcRect;
++ PFN_SET_DC_DSTCK pfnSetDCDstColourKey;
++ PFN_SET_DC_SRCCK pfnSetDCSrcColourKey;
++ PFN_GET_DC_BUFFERS pfnGetDCBuffers;
++ PFN_SWAP_TO_DC_BUFFER pfnSwapToDCBuffer;
++ PFN_SWAP_TO_DC_SYSTEM pfnSwapToDCSystem;
++ PFN_SET_DC_STATE pfnSetDCState;
++
++} PVRSRV_DC_SRV2DISP_KMJTABLE;
++
++typedef IMG_BOOL (*PFN_ISR_HANDLER)(IMG_VOID*);
++
++typedef PVRSRV_ERROR (*PFN_DC_REGISTER_DISPLAY_DEV)(PVRSRV_DC_SRV2DISP_KMJTABLE*, IMG_UINT32*);
++typedef PVRSRV_ERROR (*PFN_DC_REMOVE_DISPLAY_DEV)(IMG_UINT32);
++typedef PVRSRV_ERROR (*PFN_DC_OEM_FUNCTION)(IMG_UINT32, IMG_VOID*, IMG_UINT32, IMG_VOID*, IMG_UINT32);
++typedef PVRSRV_ERROR (*PFN_DC_REGISTER_COMMANDPROCLIST)(IMG_UINT32, PPFN_CMD_PROC,IMG_UINT32[][2], IMG_UINT32);
++typedef PVRSRV_ERROR (*PFN_DC_REMOVE_COMMANDPROCLIST)(IMG_UINT32, IMG_UINT32);
++typedef IMG_VOID (*PFN_DC_CMD_COMPLETE)(IMG_HANDLE, IMG_BOOL);
++typedef PVRSRV_ERROR (*PFN_DC_REGISTER_SYS_ISR)(PFN_ISR_HANDLER, IMG_VOID*, IMG_UINT32, IMG_UINT32);
++typedef PVRSRV_ERROR (*PFN_DC_REGISTER_POWER)(IMG_UINT32, PFN_PRE_POWER, PFN_POST_POWER,
++ PFN_PRE_CLOCKSPEED_CHANGE, PFN_POST_CLOCKSPEED_CHANGE,
++ IMG_HANDLE, PVRSRV_DEV_POWER_STATE, PVRSRV_DEV_POWER_STATE);
++
++typedef struct PVRSRV_DC_DISP2SRV_KMJTABLE_TAG
++{
++ IMG_UINT32 ui32TableSize;
++ PFN_DC_REGISTER_DISPLAY_DEV pfnPVRSRVRegisterDCDevice;
++ PFN_DC_REMOVE_DISPLAY_DEV pfnPVRSRVRemoveDCDevice;
++ PFN_DC_OEM_FUNCTION pfnPVRSRVOEMFunction;
++ PFN_DC_REGISTER_COMMANDPROCLIST pfnPVRSRVRegisterCmdProcList;
++ PFN_DC_REMOVE_COMMANDPROCLIST pfnPVRSRVRemoveCmdProcList;
++ PFN_DC_CMD_COMPLETE pfnPVRSRVCmdComplete;
++ PFN_DC_REGISTER_SYS_ISR pfnPVRSRVRegisterSystemISRHandler;
++ PFN_DC_REGISTER_POWER pfnPVRSRVRegisterPowerDevice;
++} PVRSRV_DC_DISP2SRV_KMJTABLE, *PPVRSRV_DC_DISP2SRV_KMJTABLE;
++
++
++typedef struct DISPLAYCLASS_FLIP_COMMAND_TAG
++{
++
++ IMG_HANDLE hExtDevice;
++
++
++ IMG_HANDLE hExtSwapChain;
++
++
++ IMG_HANDLE hExtBuffer;
++
++
++ IMG_HANDLE hPrivateTag;
++
++
++ IMG_UINT32 ui32ClipRectCount;
++
++
++ IMG_RECT *psClipRect;
++
++
++ IMG_UINT32 ui32SwapInterval;
++
++} DISPLAYCLASS_FLIP_COMMAND;
++
++#define DC_FLIP_COMMAND 0
++
++#define DC_STATE_NO_FLUSH_COMMANDS 0
++#define DC_STATE_FLUSH_COMMANDS 1
++
++
++typedef IMG_BOOL (*PFN_DC_GET_PVRJTABLE)(PPVRSRV_DC_DISP2SRV_KMJTABLE);
++
++
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/include/pvr_bridge.h b/drivers/gpu/drm/mrst/pvr/services4/include/pvr_bridge.h
+new file mode 100644
+index 0000000..3893db7
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/include/pvr_bridge.h
+@@ -0,0 +1,1383 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __PVR_BRIDGE_H__
++#define __PVR_BRIDGE_H__
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++#include "servicesint.h"
++
++#ifdef __linux__
++
++ #include <linux/ioctl.h>
++
++ #define PVRSRV_IOC_GID 'g'
++ #define PVRSRV_IO(INDEX) _IO(PVRSRV_IOC_GID, INDEX, PVRSRV_BRIDGE_PACKAGE)
++ #define PVRSRV_IOW(INDEX) _IOW(PVRSRV_IOC_GID, INDEX, PVRSRV_BRIDGE_PACKAGE)
++ #define PVRSRV_IOR(INDEX) _IOR(PVRSRV_IOC_GID, INDEX, PVRSRV_BRIDGE_PACKAGE)
++ #define PVRSRV_IOWR(INDEX) _IOWR(PVRSRV_IOC_GID, INDEX, PVRSRV_BRIDGE_PACKAGE)
++
++#else
++
++ #error Unknown platform: Cannot define ioctls
++
++ #define PVRSRV_IO(INDEX) (PVRSRV_IOC_GID + INDEX)
++ #define PVRSRV_IOW(INDEX) (PVRSRV_IOC_GID + INDEX)
++ #define PVRSRV_IOR(INDEX) (PVRSRV_IOC_GID + INDEX)
++ #define PVRSRV_IOWR(INDEX) (PVRSRV_IOC_GID + INDEX)
++
++ #define PVRSRV_BRIDGE_BASE PVRSRV_IOC_GID
++#endif
++
++
++#define PVRSRV_BRIDGE_CORE_CMD_FIRST 0UL
++#define PVRSRV_BRIDGE_ENUM_DEVICES PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_ACQUIRE_DEVICEINFO PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+1)
++#define PVRSRV_BRIDGE_RELEASE_DEVICEINFO PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+2)
++#define PVRSRV_BRIDGE_CREATE_DEVMEMCONTEXT PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+3)
++#define PVRSRV_BRIDGE_DESTROY_DEVMEMCONTEXT PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+4)
++#define PVRSRV_BRIDGE_GET_DEVMEM_HEAPINFO PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+5)
++#define PVRSRV_BRIDGE_ALLOC_DEVICEMEM PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+6)
++#define PVRSRV_BRIDGE_FREE_DEVICEMEM PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+7)
++#define PVRSRV_BRIDGE_GETFREE_DEVICEMEM PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+8)
++#define PVRSRV_BRIDGE_CREATE_COMMANDQUEUE PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+9)
++#define PVRSRV_BRIDGE_DESTROY_COMMANDQUEUE PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+10)
++#define PVRSRV_BRIDGE_MHANDLE_TO_MMAP_DATA PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+11)
++#define PVRSRV_BRIDGE_CONNECT_SERVICES PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+12)
++#define PVRSRV_BRIDGE_DISCONNECT_SERVICES PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+13)
++#define PVRSRV_BRIDGE_WRAP_DEVICE_MEM PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+14)
++#define PVRSRV_BRIDGE_GET_DEVICEMEMINFO PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+15)
++#define PVRSRV_BRIDGE_RESERVE_DEV_VIRTMEM PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+16)
++#define PVRSRV_BRIDGE_FREE_DEV_VIRTMEM PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+17)
++#define PVRSRV_BRIDGE_MAP_EXT_MEMORY PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+18)
++#define PVRSRV_BRIDGE_UNMAP_EXT_MEMORY PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+19)
++#define PVRSRV_BRIDGE_MAP_DEV_MEMORY PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+20)
++#define PVRSRV_BRIDGE_UNMAP_DEV_MEMORY PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+21)
++#define PVRSRV_BRIDGE_MAP_DEVICECLASS_MEMORY PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+22)
++#define PVRSRV_BRIDGE_UNMAP_DEVICECLASS_MEMORY PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+23)
++#define PVRSRV_BRIDGE_MAP_MEM_INFO_TO_USER PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+24)
++#define PVRSRV_BRIDGE_UNMAP_MEM_INFO_FROM_USER PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+25)
++#define PVRSRV_BRIDGE_EXPORT_DEVICEMEM PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+26)
++#define PVRSRV_BRIDGE_RELEASE_MMAP_DATA PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+27)
++#define PVRSRV_BRIDGE_CORE_CMD_LAST (PVRSRV_BRIDGE_CORE_CMD_FIRST+27)
++
++#define PVRSRV_BRIDGE_SIM_CMD_FIRST (PVRSRV_BRIDGE_CORE_CMD_LAST+1)
++#define PVRSRV_BRIDGE_PROCESS_SIMISR_EVENT PVRSRV_IOWR(PVRSRV_BRIDGE_SIM_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_REGISTER_SIM_PROCESS PVRSRV_IOWR(PVRSRV_BRIDGE_SIM_CMD_FIRST+1)
++#define PVRSRV_BRIDGE_UNREGISTER_SIM_PROCESS PVRSRV_IOWR(PVRSRV_BRIDGE_SIM_CMD_FIRST+2)
++#define PVRSRV_BRIDGE_SIM_CMD_LAST (PVRSRV_BRIDGE_SIM_CMD_FIRST+2)
++
++#define PVRSRV_BRIDGE_MAPPING_CMD_FIRST (PVRSRV_BRIDGE_SIM_CMD_LAST+1)
++#define PVRSRV_BRIDGE_MAPPHYSTOUSERSPACE PVRSRV_IOWR(PVRSRV_BRIDGE_MAPPING_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_UNMAPPHYSTOUSERSPACE PVRSRV_IOWR(PVRSRV_BRIDGE_MAPPING_CMD_FIRST+1)
++#define PVRSRV_BRIDGE_GETPHYSTOUSERSPACEMAP PVRSRV_IOWR(PVRSRV_BRIDGE_MAPPING_CMD_FIRST+2)
++#define PVRSRV_BRIDGE_MAPPING_CMD_LAST (PVRSRV_BRIDGE_MAPPING_CMD_FIRST+2)
++
++#define PVRSRV_BRIDGE_STATS_CMD_FIRST (PVRSRV_BRIDGE_MAPPING_CMD_LAST+1)
++#define PVRSRV_BRIDGE_GET_FB_STATS PVRSRV_IOWR(PVRSRV_BRIDGE_STATS_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_STATS_CMD_LAST (PVRSRV_BRIDGE_STATS_CMD_FIRST+0)
++
++#define PVRSRV_BRIDGE_MISC_CMD_FIRST (PVRSRV_BRIDGE_STATS_CMD_LAST+1)
++#define PVRSRV_BRIDGE_GET_MISC_INFO PVRSRV_IOWR(PVRSRV_BRIDGE_MISC_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_RELEASE_MISC_INFO PVRSRV_IOWR(PVRSRV_BRIDGE_MISC_CMD_FIRST+1)
++#define PVRSRV_BRIDGE_MISC_CMD_LAST (PVRSRV_BRIDGE_MISC_CMD_FIRST+1)
++
++#define PVRSRV_BRIDGE_OVERLAY_CMD_FIRST (PVRSRV_BRIDGE_MISC_CMD_LAST+1)
++#if defined (SUPPORT_OVERLAY_ROTATE_BLIT)
++#define PVRSRV_BRIDGE_INIT_3D_OVL_BLT_RES PVRSRV_IOWR(PVRSRV_BRIDGE_OVERLAY_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_DEINIT_3D_OVL_BLT_RES PVRSRV_IOWR(PVRSRV_BRIDGE_OVERLAY_CMD_FIRST+1)
++#endif
++#define PVRSRV_BRIDGE_OVERLAY_CMD_LAST (PVRSRV_BRIDGE_OVERLAY_CMD_FIRST+1)
++
++#if defined(PDUMP)
++#define PVRSRV_BRIDGE_PDUMP_CMD_FIRST (PVRSRV_BRIDGE_OVERLAY_CMD_FIRST+1)
++#define PVRSRV_BRIDGE_PDUMP_INIT PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_PDUMP_MEMPOL PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+1)
++#define PVRSRV_BRIDGE_PDUMP_DUMPMEM PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+2)
++#define PVRSRV_BRIDGE_PDUMP_REG PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+3)
++#define PVRSRV_BRIDGE_PDUMP_REGPOL PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+4)
++#define PVRSRV_BRIDGE_PDUMP_COMMENT PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+5)
++#define PVRSRV_BRIDGE_PDUMP_SETFRAME PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+6)
++#define PVRSRV_BRIDGE_PDUMP_ISCAPTURING PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+7)
++#define PVRSRV_BRIDGE_PDUMP_DUMPBITMAP PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+8)
++#define PVRSRV_BRIDGE_PDUMP_DUMPREADREG PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+9)
++#define PVRSRV_BRIDGE_PDUMP_SYNCPOL PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+10)
++#define PVRSRV_BRIDGE_PDUMP_DUMPSYNC PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+11)
++#define PVRSRV_BRIDGE_PDUMP_MEMPAGES PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+12)
++#define PVRSRV_BRIDGE_PDUMP_DRIVERINFO PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+13)
++#define PVRSRV_BRIDGE_PDUMP_PDREG PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+14)
++#define PVRSRV_BRIDGE_PDUMP_DUMPPDDEVPADDR PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+15)
++#define PVRSRV_BRIDGE_PDUMP_CYCLE_COUNT_REG_READ PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+16)
++#define PVRSRV_BRIDGE_PDUMP_STARTINITPHASE PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+17)
++#define PVRSRV_BRIDGE_PDUMP_STOPINITPHASE PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+18)
++#define PVRSRV_BRIDGE_PDUMP_CMD_LAST (PVRSRV_BRIDGE_PDUMP_CMD_FIRST+18)
++#else
++#define PVRSRV_BRIDGE_PDUMP_CMD_LAST PVRSRV_BRIDGE_OVERLAY_CMD_LAST
++#endif
++
++#define PVRSRV_BRIDGE_OEM_CMD_FIRST (PVRSRV_BRIDGE_PDUMP_CMD_LAST+1)
++#define PVRSRV_BRIDGE_GET_OEMJTABLE PVRSRV_IOWR(PVRSRV_BRIDGE_OEM_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_OEM_CMD_LAST (PVRSRV_BRIDGE_OEM_CMD_FIRST+0)
++
++#define PVRSRV_BRIDGE_DEVCLASS_CMD_FIRST (PVRSRV_BRIDGE_OEM_CMD_LAST+1)
++#define PVRSRV_BRIDGE_ENUM_CLASS PVRSRV_IOWR(PVRSRV_BRIDGE_DEVCLASS_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_DEVCLASS_CMD_LAST (PVRSRV_BRIDGE_DEVCLASS_CMD_FIRST+0)
++
++#define PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST (PVRSRV_BRIDGE_DEVCLASS_CMD_LAST+1)
++#define PVRSRV_BRIDGE_OPEN_DISPCLASS_DEVICE PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_CLOSE_DISPCLASS_DEVICE PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+1)
++#define PVRSRV_BRIDGE_ENUM_DISPCLASS_FORMATS PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+2)
++#define PVRSRV_BRIDGE_ENUM_DISPCLASS_DIMS PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+3)
++#define PVRSRV_BRIDGE_GET_DISPCLASS_SYSBUFFER PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+4)
++#define PVRSRV_BRIDGE_GET_DISPCLASS_INFO PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+5)
++#define PVRSRV_BRIDGE_CREATE_DISPCLASS_SWAPCHAIN PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+6)
++#define PVRSRV_BRIDGE_DESTROY_DISPCLASS_SWAPCHAIN PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+7)
++#define PVRSRV_BRIDGE_SET_DISPCLASS_DSTRECT PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+8)
++#define PVRSRV_BRIDGE_SET_DISPCLASS_SRCRECT PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+9)
++#define PVRSRV_BRIDGE_SET_DISPCLASS_DSTCOLOURKEY PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+10)
++#define PVRSRV_BRIDGE_SET_DISPCLASS_SRCCOLOURKEY PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+11)
++#define PVRSRV_BRIDGE_GET_DISPCLASS_BUFFERS PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+12)
++#define PVRSRV_BRIDGE_SWAP_DISPCLASS_TO_BUFFER PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+13)
++#define PVRSRV_BRIDGE_SWAP_DISPCLASS_TO_SYSTEM PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+14)
++#define PVRSRV_BRIDGE_DISPCLASS_CMD_LAST (PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+14)
++
++
++#define PVRSRV_BRIDGE_BUFCLASS_CMD_FIRST (PVRSRV_BRIDGE_DISPCLASS_CMD_LAST+1)
++#define PVRSRV_BRIDGE_OPEN_BUFFERCLASS_DEVICE PVRSRV_IOWR(PVRSRV_BRIDGE_BUFCLASS_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_CLOSE_BUFFERCLASS_DEVICE PVRSRV_IOWR(PVRSRV_BRIDGE_BUFCLASS_CMD_FIRST+1)
++#define PVRSRV_BRIDGE_GET_BUFFERCLASS_INFO PVRSRV_IOWR(PVRSRV_BRIDGE_BUFCLASS_CMD_FIRST+2)
++#define PVRSRV_BRIDGE_GET_BUFFERCLASS_BUFFER PVRSRV_IOWR(PVRSRV_BRIDGE_BUFCLASS_CMD_FIRST+3)
++#define PVRSRV_BRIDGE_BUFCLASS_CMD_LAST (PVRSRV_BRIDGE_BUFCLASS_CMD_FIRST+3)
++
++#define PVRSRV_BRIDGE_WRAP_CMD_FIRST (PVRSRV_BRIDGE_BUFCLASS_CMD_LAST+1)
++#define PVRSRV_BRIDGE_WRAP_EXT_MEMORY PVRSRV_IOWR(PVRSRV_BRIDGE_WRAP_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_UNWRAP_EXT_MEMORY PVRSRV_IOWR(PVRSRV_BRIDGE_WRAP_CMD_FIRST+1)
++#define PVRSRV_BRIDGE_WRAP_CMD_LAST (PVRSRV_BRIDGE_WRAP_CMD_FIRST+1)
++
++#define PVRSRV_BRIDGE_SHAREDMEM_CMD_FIRST (PVRSRV_BRIDGE_WRAP_CMD_LAST+1)
++#define PVRSRV_BRIDGE_ALLOC_SHARED_SYS_MEM PVRSRV_IOWR(PVRSRV_BRIDGE_SHAREDMEM_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_FREE_SHARED_SYS_MEM PVRSRV_IOWR(PVRSRV_BRIDGE_SHAREDMEM_CMD_FIRST+1)
++#define PVRSRV_BRIDGE_MAP_MEMINFO_MEM PVRSRV_IOWR(PVRSRV_BRIDGE_SHAREDMEM_CMD_FIRST+2)
++#define PVRSRV_BRIDGE_UNMAP_MEMINFO_MEM PVRSRV_IOWR(PVRSRV_BRIDGE_SHAREDMEM_CMD_FIRST+3)
++#define PVRSRV_BRIDGE_SHAREDMEM_CMD_LAST (PVRSRV_BRIDGE_SHAREDMEM_CMD_FIRST+3)
++
++#define PVRSRV_BRIDGE_SERVICES4_TMP_CMD_FIRST (PVRSRV_BRIDGE_SHAREDMEM_CMD_LAST+1)
++#define PVRSRV_BRIDGE_GETMMU_PD_DEVPADDR PVRSRV_IOWR(PVRSRV_BRIDGE_SERVICES4_TMP_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_SERVICES4_TMP_CMD_LAST (PVRSRV_BRIDGE_SERVICES4_TMP_CMD_FIRST+0)
++
++#define PVRSRV_BRIDGE_INITSRV_CMD_FIRST (PVRSRV_BRIDGE_SERVICES4_TMP_CMD_LAST+1)
++#define PVRSRV_BRIDGE_INITSRV_CONNECT PVRSRV_IOWR(PVRSRV_BRIDGE_INITSRV_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_INITSRV_DISCONNECT PVRSRV_IOWR(PVRSRV_BRIDGE_INITSRV_CMD_FIRST+1)
++#define PVRSRV_BRIDGE_INITSRV_CMD_LAST (PVRSRV_BRIDGE_INITSRV_CMD_FIRST+1)
++
++#define PVRSRV_BRIDGE_EVENT_OBJECT_CMD_FIRST (PVRSRV_BRIDGE_INITSRV_CMD_LAST+1)
++#define PVRSRV_BRIDGE_EVENT_OBJECT_WAIT PVRSRV_IOWR(PVRSRV_BRIDGE_EVENT_OBJECT_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_EVENT_OBJECT_OPEN PVRSRV_IOWR(PVRSRV_BRIDGE_EVENT_OBJECT_CMD_FIRST+1)
++#define PVRSRV_BRIDGE_EVENT_OBJECT_CLOSE PVRSRV_IOWR(PVRSRV_BRIDGE_EVENT_OBJECT_CMD_FIRST+2)
++#define PVRSRV_BRIDGE_EVENT_OBJECT_CMD_LAST (PVRSRV_BRIDGE_EVENT_OBJECT_CMD_FIRST+2)
++
++#define PVRSRV_BRIDGE_SYNC_OPS_CMD_FIRST (PVRSRV_BRIDGE_EVENT_OBJECT_CMD_LAST+1)
++#define PVRSRV_BRIDGE_MODIFY_PENDING_SYNC_OPS PVRSRV_IOWR(PVRSRV_BRIDGE_SYNC_OPS_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_MODIFY_COMPLETE_SYNC_OPS PVRSRV_IOWR(PVRSRV_BRIDGE_SYNC_OPS_CMD_FIRST+1)
++#define PVRSRV_BRIDGE_SYNC_OPS_CMD_LAST (PVRSRV_BRIDGE_SYNC_OPS_CMD_FIRST+1)
++
++#define PVRSRV_BRIDGE_LAST_NON_DEVICE_CMD (PVRSRV_BRIDGE_SYNC_OPS_CMD_LAST+1)
++
++
++#define PVRSRV_KERNEL_MODE_CLIENT 1
++
++typedef struct PVRSRV_BRIDGE_RETURN_TAG
++{
++ PVRSRV_ERROR eError;
++ IMG_VOID *pvData;
++
++}PVRSRV_BRIDGE_RETURN;
++
++
++typedef struct PVRSRV_BRIDGE_PACKAGE_TAG
++{
++ IMG_UINT32 ui32BridgeID;
++ IMG_UINT32 ui32Size;
++ IMG_VOID *pvParamIn;
++ IMG_UINT32 ui32InBufferSize;
++ IMG_VOID *pvParamOut;
++ IMG_UINT32 ui32OutBufferSize;
++
++ IMG_HANDLE hKernelServices;
++}PVRSRV_BRIDGE_PACKAGE;
++
++
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_ACQUIRE_DEVICEINFO_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_UINT32 uiDevIndex;
++ PVRSRV_DEVICE_TYPE eDeviceType;
++
++} PVRSRV_BRIDGE_IN_ACQUIRE_DEVICEINFO;
++
++
++typedef struct PVRSRV_BRIDGE_IN_ENUMCLASS_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ PVRSRV_DEVICE_CLASS sDeviceClass;
++} PVRSRV_BRIDGE_IN_ENUMCLASS;
++
++
++typedef struct PVRSRV_BRIDGE_IN_CLOSE_DISPCLASS_DEVICE_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDeviceKM;
++} PVRSRV_BRIDGE_IN_CLOSE_DISPCLASS_DEVICE;
++
++
++typedef struct PVRSRV_BRIDGE_IN_ENUM_DISPCLASS_FORMATS_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDeviceKM;
++} PVRSRV_BRIDGE_IN_ENUM_DISPCLASS_FORMATS;
++
++
++typedef struct PVRSRV_BRIDGE_IN_GET_DISPCLASS_SYSBUFFER_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDeviceKM;
++} PVRSRV_BRIDGE_IN_GET_DISPCLASS_SYSBUFFER;
++
++
++typedef struct PVRSRV_BRIDGE_IN_GET_DISPCLASS_INFO_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDeviceKM;
++} PVRSRV_BRIDGE_IN_GET_DISPCLASS_INFO;
++
++
++typedef struct PVRSRV_BRIDGE_IN_CLOSE_BUFFERCLASS_DEVICE_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDeviceKM;
++} PVRSRV_BRIDGE_IN_CLOSE_BUFFERCLASS_DEVICE;
++
++
++typedef struct PVRSRV_BRIDGE_IN_GET_BUFFERCLASS_INFO_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDeviceKM;
++} PVRSRV_BRIDGE_IN_GET_BUFFERCLASS_INFO;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_RELEASE_DEVICEINFO_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++
++} PVRSRV_BRIDGE_IN_RELEASE_DEVICEINFO;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_FREE_CLASSDEVICEINFO_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ PVRSRV_DEVICE_CLASS DeviceClass;
++ IMG_VOID* pvDevInfo;
++
++}PVRSRV_BRIDGE_IN_FREE_CLASSDEVICEINFO;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_GET_DEVMEM_HEAPINFO_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ IMG_HANDLE hDevMemContext;
++
++}PVRSRV_BRIDGE_IN_GET_DEVMEM_HEAPINFO;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_CREATE_DEVMEMCONTEXT_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++
++}PVRSRV_BRIDGE_IN_CREATE_DEVMEMCONTEXT;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_DESTROY_DEVMEMCONTEXT_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ IMG_HANDLE hDevMemContext;
++
++}PVRSRV_BRIDGE_IN_DESTROY_DEVMEMCONTEXT;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_ALLOCDEVICEMEM_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ IMG_HANDLE hDevMemHeap;
++ IMG_UINT32 ui32Attribs;
++ IMG_SIZE_T ui32Size;
++ IMG_SIZE_T ui32Alignment;
++
++}PVRSRV_BRIDGE_IN_ALLOCDEVICEMEM;
++
++
++typedef struct PVRSRV_BRIDGE_IN_MAPMEMINFOTOUSER_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++
++}PVRSRV_BRIDGE_IN_MAPMEMINFOTOUSER;
++
++
++typedef struct PVRSRV_BRIDGE_IN_UNMAPMEMINFOFROMUSER_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++ IMG_PVOID pvLinAddr;
++ IMG_HANDLE hMappingInfo;
++
++}PVRSRV_BRIDGE_IN_UNMAPMEMINFOFROMUSER;
++
++
++typedef struct PVRSRV_BRIDGE_IN_FREEDEVICEMEM_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++ PVRSRV_CLIENT_MEM_INFO sClientMemInfo;
++
++}PVRSRV_BRIDGE_IN_FREEDEVICEMEM;
++
++
++typedef struct PVRSRV_BRIDGE_IN_EXPORTDEVICEMEM_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++
++}PVRSRV_BRIDGE_IN_EXPORTDEVICEMEM;
++
++
++typedef struct PVRSRV_BRIDGE_IN_GETFREEDEVICEMEM_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_UINT32 ui32Flags;
++
++} PVRSRV_BRIDGE_IN_GETFREEDEVICEMEM;
++
++
++typedef struct PVRSRV_BRIDGE_IN_CREATECOMMANDQUEUE_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ IMG_SIZE_T ui32QueueSize;
++
++}PVRSRV_BRIDGE_IN_CREATECOMMANDQUEUE;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_DESTROYCOMMANDQUEUE_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ PVRSRV_QUEUE_INFO *psQueueInfo;
++
++}PVRSRV_BRIDGE_IN_DESTROYCOMMANDQUEUE;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_MHANDLE_TO_MMAP_DATA_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hMHandle;
++} PVRSRV_BRIDGE_IN_MHANDLE_TO_MMAP_DATA;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_RELEASE_MMAP_DATA_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hMHandle;
++} PVRSRV_BRIDGE_IN_RELEASE_MMAP_DATA;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_RESERVE_DEV_VIRTMEM_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevMemHeap;
++ IMG_DEV_VIRTADDR *psDevVAddr;
++ IMG_SIZE_T ui32Size;
++ IMG_SIZE_T ui32Alignment;
++
++}PVRSRV_BRIDGE_IN_RESERVE_DEV_VIRTMEM;
++
++
++typedef struct PVRSRV_BRIDGE_OUT_CONNECT_SERVICES_TAG
++{
++ PVRSRV_ERROR eError;
++ IMG_HANDLE hKernelServices;
++}PVRSRV_BRIDGE_OUT_CONNECT_SERVICES;
++
++
++typedef struct PVRSRV_BRIDGE_OUT_RESERVE_DEV_VIRTMEM_TAG
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++ PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
++ PVRSRV_CLIENT_MEM_INFO sClientMemInfo;
++ PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo;
++
++}PVRSRV_BRIDGE_OUT_RESERVE_DEV_VIRTMEM;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_FREE_DEV_VIRTMEM_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++ PVRSRV_CLIENT_MEM_INFO sClientMemInfo;
++ PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo;
++
++}PVRSRV_BRIDGE_IN_FREE_DEV_VIRTMEM;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_MAP_DEV_MEMORY_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hKernelMemInfo;
++ IMG_HANDLE hDstDevMemHeap;
++
++}PVRSRV_BRIDGE_IN_MAP_DEV_MEMORY;
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_MAP_DEV_MEMORY_TAG
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_KERNEL_MEM_INFO *psDstKernelMemInfo;
++ PVRSRV_KERNEL_SYNC_INFO *psDstKernelSyncInfo;
++ PVRSRV_CLIENT_MEM_INFO sDstClientMemInfo;
++ PVRSRV_CLIENT_SYNC_INFO sDstClientSyncInfo;
++
++}PVRSRV_BRIDGE_OUT_MAP_DEV_MEMORY;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_UNMAP_DEV_MEMORY_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++ PVRSRV_CLIENT_MEM_INFO sClientMemInfo;
++ PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo;
++
++}PVRSRV_BRIDGE_IN_UNMAP_DEV_MEMORY;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_MAP_EXT_MEMORY_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++ IMG_SYS_PHYADDR *psSysPAddr;
++ IMG_UINT32 ui32Flags;
++
++}PVRSRV_BRIDGE_IN_MAP_EXT_MEMORY;
++
++
++typedef struct PVRSRV_BRIDGE_IN_UNMAP_EXT_MEMORY_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ PVRSRV_CLIENT_MEM_INFO sClientMemInfo;
++ PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo;
++ IMG_UINT32 ui32Flags;
++
++}PVRSRV_BRIDGE_IN_UNMAP_EXT_MEMORY;
++
++
++typedef struct PVRSRV_BRIDGE_IN_MAP_DEVICECLASS_MEMORY_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDeviceClassBuffer;
++ IMG_HANDLE hDevMemContext;
++
++}PVRSRV_BRIDGE_IN_MAP_DEVICECLASS_MEMORY;
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_MAP_DEVICECLASS_MEMORY_TAG
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_CLIENT_MEM_INFO sClientMemInfo;
++ PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo;
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++ PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
++ IMG_HANDLE hMappingInfo;
++
++}PVRSRV_BRIDGE_OUT_MAP_DEVICECLASS_MEMORY;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_UNMAP_DEVICECLASS_MEMORY_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++ PVRSRV_CLIENT_MEM_INFO sClientMemInfo;
++ PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo;
++
++}PVRSRV_BRIDGE_IN_UNMAP_DEVICECLASS_MEMORY;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_MEMPOL_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++ IMG_UINT32 ui32Offset;
++ IMG_UINT32 ui32Value;
++ IMG_UINT32 ui32Mask;
++ IMG_UINT32 ui32Flags;
++
++}PVRSRV_BRIDGE_IN_PDUMP_MEMPOL;
++
++
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_SYNCPOL_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
++ IMG_BOOL bIsRead;
++ IMG_UINT32 ui32Value;
++ IMG_UINT32 ui32Mask;
++
++}PVRSRV_BRIDGE_IN_PDUMP_SYNCPOL;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_DUMPMEM_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_PVOID pvLinAddr;
++ IMG_PVOID pvAltLinAddr;
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++ IMG_UINT32 ui32Offset;
++ IMG_UINT32 ui32Bytes;
++ IMG_UINT32 ui32Flags;
++
++}PVRSRV_BRIDGE_IN_PDUMP_DUMPMEM;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_DUMPSYNC_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_PVOID pvAltLinAddr;
++ PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
++ IMG_UINT32 ui32Offset;
++ IMG_UINT32 ui32Bytes;
++
++}PVRSRV_BRIDGE_IN_PDUMP_DUMPSYNC;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_DUMPREG_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ PVRSRV_HWREG sHWReg;
++ IMG_UINT32 ui32Flags;
++
++}PVRSRV_BRIDGE_IN_PDUMP_DUMPREG;
++
++
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_REGPOL_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ PVRSRV_HWREG sHWReg;
++ IMG_UINT32 ui32Mask;
++ IMG_UINT32 ui32Flags;
++}PVRSRV_BRIDGE_IN_PDUMP_REGPOL;
++
++
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_DUMPPDREG_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ PVRSRV_HWREG sHWReg;
++ IMG_UINT32 ui32Flags;
++
++}PVRSRV_BRIDGE_IN_PDUMP_DUMPPDREG;
++
++
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_MEMPAGES_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hKernelMemInfo;
++ IMG_DEV_PHYADDR *pPages;
++ IMG_UINT32 ui32NumPages;
++ IMG_DEV_VIRTADDR sDevAddr;
++ IMG_UINT32 ui32Start;
++ IMG_UINT32 ui32Length;
++ IMG_BOOL bContinuous;
++
++}PVRSRV_BRIDGE_IN_PDUMP_MEMPAGES;
++
++
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_COMMENT_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_CHAR szComment[PVRSRV_PDUMP_MAX_COMMENT_SIZE];
++ IMG_UINT32 ui32Flags;
++
++}PVRSRV_BRIDGE_IN_PDUMP_COMMENT;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_SETFRAME_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_UINT32 ui32Frame;
++
++}PVRSRV_BRIDGE_IN_PDUMP_SETFRAME;
++
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_BITMAP_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_CHAR szFileName[PVRSRV_PDUMP_MAX_FILENAME_SIZE];
++ IMG_UINT32 ui32FileOffset;
++ IMG_UINT32 ui32Width;
++ IMG_UINT32 ui32Height;
++ IMG_UINT32 ui32StrideInBytes;
++ IMG_DEV_VIRTADDR sDevBaseAddr;
++ IMG_UINT32 ui32Size;
++ PDUMP_PIXEL_FORMAT ePixelFormat;
++ PDUMP_MEM_FORMAT eMemFormat;
++ IMG_UINT32 ui32Flags;
++
++}PVRSRV_BRIDGE_IN_PDUMP_BITMAP;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_READREG_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_CHAR szFileName[PVRSRV_PDUMP_MAX_FILENAME_SIZE];
++ IMG_UINT32 ui32FileOffset;
++ IMG_UINT32 ui32Address;
++ IMG_UINT32 ui32Size;
++ IMG_UINT32 ui32Flags;
++
++}PVRSRV_BRIDGE_IN_PDUMP_READREG;
++
++
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_DRIVERINFO_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_CHAR szString[PVRSRV_PDUMP_MAX_COMMENT_SIZE];
++ IMG_BOOL bContinuous;
++
++}PVRSRV_BRIDGE_IN_PDUMP_DRIVERINFO;
++
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_DUMPPDDEVPADDR_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hKernelMemInfo;
++ IMG_UINT32 ui32Offset;
++ IMG_DEV_PHYADDR sPDDevPAddr;
++}PVRSRV_BRIDGE_IN_PDUMP_DUMPPDDEVPADDR;
++
++
++typedef struct PVRSRV_BRIDGE_PDUM_IN_CYCLE_COUNT_REG_READ_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_UINT32 ui32RegOffset;
++ IMG_BOOL bLastFrame;
++}PVRSRV_BRIDGE_IN_PDUMP_CYCLE_COUNT_REG_READ;
++
++
++typedef struct PVRSRV_BRIDGE_OUT_ENUMDEVICE_TAG
++{
++ PVRSRV_ERROR eError;
++ IMG_UINT32 ui32NumDevices;
++ PVRSRV_DEVICE_IDENTIFIER asDeviceIdentifier[PVRSRV_MAX_DEVICES];
++
++}PVRSRV_BRIDGE_OUT_ENUMDEVICE;
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_ACQUIRE_DEVICEINFO_TAG
++{
++
++ PVRSRV_ERROR eError;
++ IMG_HANDLE hDevCookie;
++
++} PVRSRV_BRIDGE_OUT_ACQUIRE_DEVICEINFO;
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_ENUMCLASS_TAG
++{
++ PVRSRV_ERROR eError;
++ IMG_UINT32 ui32NumDevices;
++ IMG_UINT32 ui32DevID[PVRSRV_MAX_DEVICES];
++
++}PVRSRV_BRIDGE_OUT_ENUMCLASS;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_OPEN_DISPCLASS_DEVICE_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_UINT32 ui32DeviceID;
++ IMG_HANDLE hDevCookie;
++
++}PVRSRV_BRIDGE_IN_OPEN_DISPCLASS_DEVICE;
++
++
++typedef struct PVRSRV_BRIDGE_OUT_OPEN_DISPCLASS_DEVICE_TAG
++{
++ PVRSRV_ERROR eError;
++ IMG_HANDLE hDeviceKM;
++
++}PVRSRV_BRIDGE_OUT_OPEN_DISPCLASS_DEVICE;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_WRAP_EXT_MEMORY_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ IMG_HANDLE hDevMemContext;
++ IMG_VOID *pvLinAddr;
++ IMG_SIZE_T ui32ByteSize;
++ IMG_SIZE_T ui32PageOffset;
++ IMG_BOOL bPhysContig;
++ IMG_UINT32 ui32NumPageTableEntries;
++ IMG_SYS_PHYADDR *psSysPAddr;
++ IMG_UINT32 ui32Flags;
++
++}PVRSRV_BRIDGE_IN_WRAP_EXT_MEMORY;
++
++
++typedef struct PVRSRV_BRIDGE_OUT_WRAP_EXT_MEMORY_TAG
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_CLIENT_MEM_INFO sClientMemInfo;
++ PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo;
++
++}PVRSRV_BRIDGE_OUT_WRAP_EXT_MEMORY;
++
++
++typedef struct PVRSRV_BRIDGE_IN_UNWRAP_EXT_MEMORY_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hKernelMemInfo;
++ PVRSRV_CLIENT_MEM_INFO sClientMemInfo;
++ PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo;
++
++}PVRSRV_BRIDGE_IN_UNWRAP_EXT_MEMORY;
++
++
++#define PVRSRV_MAX_DC_DISPLAY_FORMATS 10
++#define PVRSRV_MAX_DC_DISPLAY_DIMENSIONS 10
++#define PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS 4
++#define PVRSRV_MAX_DC_CLIP_RECTS 32
++
++
++typedef struct PVRSRV_BRIDGE_OUT_ENUM_DISPCLASS_FORMATS_TAG
++{
++ PVRSRV_ERROR eError;
++ IMG_UINT32 ui32Count;
++ DISPLAY_FORMAT asFormat[PVRSRV_MAX_DC_DISPLAY_FORMATS];
++
++}PVRSRV_BRIDGE_OUT_ENUM_DISPCLASS_FORMATS;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_ENUM_DISPCLASS_DIMS_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDeviceKM;
++ DISPLAY_FORMAT sFormat;
++
++}PVRSRV_BRIDGE_IN_ENUM_DISPCLASS_DIMS;
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_ENUM_DISPCLASS_DIMS_TAG
++{
++ PVRSRV_ERROR eError;
++ IMG_UINT32 ui32Count;
++ DISPLAY_DIMS asDim[PVRSRV_MAX_DC_DISPLAY_DIMENSIONS];
++
++}PVRSRV_BRIDGE_OUT_ENUM_DISPCLASS_DIMS;
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_GET_DISPCLASS_INFO_TAG
++{
++ PVRSRV_ERROR eError;
++ DISPLAY_INFO sDisplayInfo;
++
++}PVRSRV_BRIDGE_OUT_GET_DISPCLASS_INFO;
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_GET_DISPCLASS_SYSBUFFER_TAG
++{
++ PVRSRV_ERROR eError;
++ IMG_HANDLE hBuffer;
++
++}PVRSRV_BRIDGE_OUT_GET_DISPCLASS_SYSBUFFER;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_CREATE_DISPCLASS_SWAPCHAIN_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDeviceKM;
++ IMG_UINT32 ui32Flags;
++ DISPLAY_SURF_ATTRIBUTES sDstSurfAttrib;
++ DISPLAY_SURF_ATTRIBUTES sSrcSurfAttrib;
++ IMG_UINT32 ui32BufferCount;
++ IMG_UINT32 ui32OEMFlags;
++ IMG_UINT32 ui32SwapChainID;
++
++} PVRSRV_BRIDGE_IN_CREATE_DISPCLASS_SWAPCHAIN;
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_CREATE_DISPCLASS_SWAPCHAIN_TAG
++{
++ PVRSRV_ERROR eError;
++ IMG_HANDLE hSwapChain;
++ IMG_UINT32 ui32SwapChainID;
++
++} PVRSRV_BRIDGE_OUT_CREATE_DISPCLASS_SWAPCHAIN;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_DESTROY_DISPCLASS_SWAPCHAIN_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDeviceKM;
++ IMG_HANDLE hSwapChain;
++
++} PVRSRV_BRIDGE_IN_DESTROY_DISPCLASS_SWAPCHAIN;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_SET_DISPCLASS_RECT_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDeviceKM;
++ IMG_HANDLE hSwapChain;
++ IMG_RECT sRect;
++
++} PVRSRV_BRIDGE_IN_SET_DISPCLASS_RECT;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_SET_DISPCLASS_COLOURKEY_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDeviceKM;
++ IMG_HANDLE hSwapChain;
++ IMG_UINT32 ui32CKColour;
++
++} PVRSRV_BRIDGE_IN_SET_DISPCLASS_COLOURKEY;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_GET_DISPCLASS_BUFFERS_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDeviceKM;
++ IMG_HANDLE hSwapChain;
++
++} PVRSRV_BRIDGE_IN_GET_DISPCLASS_BUFFERS;
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_GET_DISPCLASS_BUFFERS_TAG
++{
++ PVRSRV_ERROR eError;
++ IMG_UINT32 ui32BufferCount;
++ IMG_HANDLE ahBuffer[PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS];
++
++} PVRSRV_BRIDGE_OUT_GET_DISPCLASS_BUFFERS;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_SWAP_DISPCLASS_TO_BUFFER_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDeviceKM;
++ IMG_HANDLE hBuffer;
++ IMG_UINT32 ui32SwapInterval;
++ IMG_HANDLE hPrivateTag;
++ IMG_UINT32 ui32ClipRectCount;
++ IMG_RECT sClipRect[PVRSRV_MAX_DC_CLIP_RECTS];
++
++} PVRSRV_BRIDGE_IN_SWAP_DISPCLASS_TO_BUFFER;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_SWAP_DISPCLASS_TO_SYSTEM_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDeviceKM;
++ IMG_HANDLE hSwapChain;
++
++} PVRSRV_BRIDGE_IN_SWAP_DISPCLASS_TO_SYSTEM;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_OPEN_BUFFERCLASS_DEVICE_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_UINT32 ui32DeviceID;
++ IMG_HANDLE hDevCookie;
++
++} PVRSRV_BRIDGE_IN_OPEN_BUFFERCLASS_DEVICE;
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_OPEN_BUFFERCLASS_DEVICE_TAG
++{
++ PVRSRV_ERROR eError;
++ IMG_HANDLE hDeviceKM;
++
++} PVRSRV_BRIDGE_OUT_OPEN_BUFFERCLASS_DEVICE;
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_GET_BUFFERCLASS_INFO_TAG
++{
++ PVRSRV_ERROR eError;
++ BUFFER_INFO sBufferInfo;
++
++} PVRSRV_BRIDGE_OUT_GET_BUFFERCLASS_INFO;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_GET_BUFFERCLASS_BUFFER_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDeviceKM;
++ IMG_UINT32 ui32BufferIndex;
++
++} PVRSRV_BRIDGE_IN_GET_BUFFERCLASS_BUFFER;
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_GET_BUFFERCLASS_BUFFER_TAG
++{
++ PVRSRV_ERROR eError;
++ IMG_HANDLE hBuffer;
++
++} PVRSRV_BRIDGE_OUT_GET_BUFFERCLASS_BUFFER;
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_GET_DEVMEM_HEAPINFO_TAG
++{
++ PVRSRV_ERROR eError;
++ IMG_UINT32 ui32ClientHeapCount;
++ PVRSRV_HEAP_INFO sHeapInfo[PVRSRV_MAX_CLIENT_HEAPS];
++
++} PVRSRV_BRIDGE_OUT_GET_DEVMEM_HEAPINFO;
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_CREATE_DEVMEMCONTEXT_TAG
++{
++ PVRSRV_ERROR eError;
++ IMG_HANDLE hDevMemContext;
++ IMG_UINT32 ui32ClientHeapCount;
++ PVRSRV_HEAP_INFO sHeapInfo[PVRSRV_MAX_CLIENT_HEAPS];
++
++} PVRSRV_BRIDGE_OUT_CREATE_DEVMEMCONTEXT;
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_CREATE_DEVMEMHEAP_TAG
++{
++ PVRSRV_ERROR eError;
++ IMG_HANDLE hDevMemHeap;
++
++} PVRSRV_BRIDGE_OUT_CREATE_DEVMEMHEAP;
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_ALLOCDEVICEMEM_TAG
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++ PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
++ PVRSRV_CLIENT_MEM_INFO sClientMemInfo;
++ PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo;
++
++} PVRSRV_BRIDGE_OUT_ALLOCDEVICEMEM;
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_EXPORTDEVICEMEM_TAG
++{
++ PVRSRV_ERROR eError;
++ IMG_HANDLE hMemInfo;
++#if defined(SUPPORT_MEMINFO_IDS)
++ IMG_UINT64 ui64Stamp;
++#endif
++
++} PVRSRV_BRIDGE_OUT_EXPORTDEVICEMEM;
++
++
++typedef struct PVRSRV_BRIDGE_OUT_MAPMEMINFOTOUSER_TAG
++{
++ PVRSRV_ERROR eError;
++ IMG_PVOID pvLinAddr;
++ IMG_HANDLE hMappingInfo;
++
++}PVRSRV_BRIDGE_OUT_MAPMEMINFOTOUSER;
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_GETFREEDEVICEMEM_TAG
++{
++ PVRSRV_ERROR eError;
++ IMG_SIZE_T ui32Total;
++ IMG_SIZE_T ui32Free;
++ IMG_SIZE_T ui32LargestBlock;
++
++} PVRSRV_BRIDGE_OUT_GETFREEDEVICEMEM;
++
++
++#include "pvrmmap.h"
++typedef struct PVRSRV_BRIDGE_OUT_MHANDLE_TO_MMAP_DATA_TAG
++{
++ PVRSRV_ERROR eError;
++
++
++ IMG_UINT32 ui32MMapOffset;
++
++
++ IMG_UINT32 ui32ByteOffset;
++
++
++ IMG_UINT32 ui32RealByteSize;
++
++
++ IMG_UINT32 ui32UserVAddr;
++
++} PVRSRV_BRIDGE_OUT_MHANDLE_TO_MMAP_DATA;
++
++typedef struct PVRSRV_BRIDGE_OUT_RELEASE_MMAP_DATA_TAG
++{
++ PVRSRV_ERROR eError;
++
++
++ IMG_BOOL bMUnmap;
++
++
++ IMG_UINT32 ui32UserVAddr;
++
++
++ IMG_UINT32 ui32RealByteSize;
++} PVRSRV_BRIDGE_OUT_RELEASE_MMAP_DATA;
++
++typedef struct PVRSRV_BRIDGE_IN_GET_MISC_INFO_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ PVRSRV_MISC_INFO sMiscInfo;
++
++}PVRSRV_BRIDGE_IN_GET_MISC_INFO;
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_GET_MISC_INFO_TAG
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_MISC_INFO sMiscInfo;
++
++}PVRSRV_BRIDGE_OUT_GET_MISC_INFO;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_RELEASE_MISC_INFO_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ PVRSRV_MISC_INFO sMiscInfo;
++
++}PVRSRV_BRIDGE_IN_RELEASE_MISC_INFO;
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_RELEASE_MISC_INFO_TAG
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_MISC_INFO sMiscInfo;
++
++}PVRSRV_BRIDGE_OUT_RELEASE_MISC_INFO;
++
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_PDUMP_ISCAPTURING_TAG
++{
++ PVRSRV_ERROR eError;
++ IMG_BOOL bIsCapturing;
++
++} PVRSRV_BRIDGE_OUT_PDUMP_ISCAPTURING;
++
++
++typedef struct PVRSRV_BRIDGE_IN_GET_FB_STATS_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_SIZE_T ui32Total;
++ IMG_SIZE_T ui32Available;
++
++} PVRSRV_BRIDGE_IN_GET_FB_STATS;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_MAPPHYSTOUSERSPACE_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ IMG_SYS_PHYADDR sSysPhysAddr;
++ IMG_UINT32 uiSizeInBytes;
++
++} PVRSRV_BRIDGE_IN_MAPPHYSTOUSERSPACE;
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_MAPPHYSTOUSERSPACE_TAG
++{
++ IMG_PVOID pvUserAddr;
++ IMG_UINT32 uiActualSize;
++ IMG_PVOID pvProcess;
++
++} PVRSRV_BRIDGE_OUT_MAPPHYSTOUSERSPACE;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_UNMAPPHYSTOUSERSPACE_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ IMG_PVOID pvUserAddr;
++ IMG_PVOID pvProcess;
++
++} PVRSRV_BRIDGE_IN_UNMAPPHYSTOUSERSPACE;
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_GETPHYSTOUSERSPACEMAP_TAG
++{
++ IMG_PVOID *ppvTbl;
++ IMG_UINT32 uiTblSize;
++
++} PVRSRV_BRIDGE_OUT_GETPHYSTOUSERSPACEMAP;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_REGISTER_SIM_PROCESS_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ IMG_PVOID pvProcess;
++
++} PVRSRV_BRIDGE_IN_REGISTER_SIM_PROCESS;
++
++
++typedef struct PVRSRV_BRIDGE_OUT_REGISTER_SIM_PROCESS_TAG
++{
++ IMG_SYS_PHYADDR sRegsPhysBase;
++ IMG_VOID *pvRegsBase;
++ IMG_PVOID pvProcess;
++ IMG_UINT32 ulNoOfEntries;
++ IMG_PVOID pvTblLinAddr;
++
++} PVRSRV_BRIDGE_OUT_REGISTER_SIM_PROCESS;
++
++
++typedef struct PVRSRV_BRIDGE_IN_UNREGISTER_SIM_PROCESS_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ IMG_PVOID pvProcess;
++ IMG_VOID *pvRegsBase;
++
++} PVRSRV_BRIDGE_IN_UNREGISTER_SIM_PROCESS;
++
++typedef struct PVRSRV_BRIDGE_IN_PROCESS_SIMISR_EVENT_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ IMG_UINT32 ui32StatusAndMask;
++ PVRSRV_ERROR eError;
++
++} PVRSRV_BRIDGE_IN_PROCESS_SIMISR_EVENT;
++
++typedef struct PVRSRV_BRIDGE_IN_INITSRV_DISCONNECT_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_BOOL bInitSuccesful;
++} PVRSRV_BRIDGE_IN_INITSRV_DISCONNECT;
++
++
++typedef struct PVRSRV_BRIDGE_IN_ALLOC_SHARED_SYS_MEM_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_UINT32 ui32Flags;
++ IMG_SIZE_T ui32Size;
++}PVRSRV_BRIDGE_IN_ALLOC_SHARED_SYS_MEM;
++
++typedef struct PVRSRV_BRIDGE_OUT_ALLOC_SHARED_SYS_MEM_TAG
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++ PVRSRV_CLIENT_MEM_INFO sClientMemInfo;
++}PVRSRV_BRIDGE_OUT_ALLOC_SHARED_SYS_MEM;
++
++typedef struct PVRSRV_BRIDGE_IN_FREE_SHARED_SYS_MEM_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++ PVRSRV_CLIENT_MEM_INFO sClientMemInfo;
++}PVRSRV_BRIDGE_IN_FREE_SHARED_SYS_MEM;
++
++typedef struct PVRSRV_BRIDGE_OUT_FREE_SHARED_SYS_MEM_TAG
++{
++ PVRSRV_ERROR eError;
++}PVRSRV_BRIDGE_OUT_FREE_SHARED_SYS_MEM;
++
++typedef struct PVRSRV_BRIDGE_IN_MAP_MEMINFO_MEM_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hKernelMemInfo;
++}PVRSRV_BRIDGE_IN_MAP_MEMINFO_MEM;
++
++typedef struct PVRSRV_BRIDGE_OUT_MAP_MEMINFO_MEM_TAG
++{
++ PVRSRV_CLIENT_MEM_INFO sClientMemInfo;
++ PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo;
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++ PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
++ PVRSRV_ERROR eError;
++}PVRSRV_BRIDGE_OUT_MAP_MEMINFO_MEM;
++
++typedef struct PVRSRV_BRIDGE_IN_UNMAP_MEMINFO_MEM_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ PVRSRV_CLIENT_MEM_INFO sClientMemInfo;
++}PVRSRV_BRIDGE_IN_UNMAP_MEMINFO_MEM;
++
++typedef struct PVRSRV_BRIDGE_OUT_UNMAP_MEMINFO_MEM_TAG
++{
++ PVRSRV_ERROR eError;
++}PVRSRV_BRIDGE_OUT_UNMAP_MEMINFO_MEM;
++
++typedef struct PVRSRV_BRIDGE_IN_GETMMU_PD_DEVPADDR_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevMemContext;
++}PVRSRV_BRIDGE_IN_GETMMU_PD_DEVPADDR;
++
++typedef struct PVRSRV_BRIDGE_OUT_GETMMU_PD_DEVPADDR_TAG
++{
++ IMG_DEV_PHYADDR sPDDevPAddr;
++ PVRSRV_ERROR eError;
++}PVRSRV_BRIDGE_OUT_GETMMU_PD_DEVPADDR;
++
++typedef struct PVRSRV_BRIDGE_IN_EVENT_OBJECT_WAI_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hOSEventKM;
++} PVRSRV_BRIDGE_IN_EVENT_OBJECT_WAIT;
++
++typedef struct PVRSRV_BRIDGE_IN_EVENT_OBJECT_OPEN_TAG
++{
++ PVRSRV_EVENTOBJECT sEventObject;
++} PVRSRV_BRIDGE_IN_EVENT_OBJECT_OPEN;
++
++typedef struct PVRSRV_BRIDGE_OUT_EVENT_OBJECT_OPEN_TAG
++{
++ IMG_HANDLE hOSEvent;
++ PVRSRV_ERROR eError;
++} PVRSRV_BRIDGE_OUT_EVENT_OBJECT_OPEN;
++
++typedef struct PVRSRV_BRIDGE_IN_EVENT_OBJECT_CLOSE_TAG
++{
++ PVRSRV_EVENTOBJECT sEventObject;
++ IMG_HANDLE hOSEventKM;
++} PVRSRV_BRIDGE_IN_EVENT_OBJECT_CLOSE;
++
++typedef struct PVRSRV_BRIDGE_IN_MODIFY_PENDING_SYNC_OPS_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hKernelSyncInfo;
++ IMG_UINT32 ui32ModifyFlags;
++
++} PVRSRV_BRIDGE_IN_MODIFY_PENDING_SYNC_OPS;
++
++typedef struct PVRSRV_BRIDGE_IN_MODIFY_COMPLETE_SYNC_OPS_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hKernelSyncInfo;
++ IMG_UINT32 ui32ModifyFlags;
++
++} PVRSRV_BRIDGE_IN_MODIFY_COMPLETE_SYNC_OPS;
++
++typedef struct PVRSRV_BRIDGE_OUT_MODIFY_PENDING_SYNC_OPS_TAG
++{
++ PVRSRV_ERROR eError;
++
++
++ IMG_UINT32 ui32ReadOpsPending;
++ IMG_UINT32 ui32WriteOpsPending;
++
++} PVRSRV_BRIDGE_OUT_MODIFY_PENDING_SYNC_OPS;
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/include/pvr_bridge_km.h b/drivers/gpu/drm/mrst/pvr/services4/include/pvr_bridge_km.h
+new file mode 100644
+index 0000000..9c4b054
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/include/pvr_bridge_km.h
+@@ -0,0 +1,288 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __PVR_BRIDGE_KM_H_
++#define __PVR_BRIDGE_KM_H_
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++#include "pvr_bridge.h"
++#include "perproc.h"
++
++#if defined(__linux__)
++PVRSRV_ERROR LinuxBridgeInit(IMG_VOID);
++IMG_VOID LinuxBridgeDeInit(IMG_VOID);
++#endif
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVEnumerateDevicesKM(IMG_UINT32 *pui32NumDevices,
++ PVRSRV_DEVICE_IDENTIFIER *psDevIdList);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVAcquireDeviceDataKM(IMG_UINT32 uiDevIndex,
++ PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_HANDLE *phDevCookie);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVCreateCommandQueueKM(IMG_SIZE_T ui32QueueSize,
++ PVRSRV_QUEUE_INFO **ppsQueueInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVDestroyCommandQueueKM(PVRSRV_QUEUE_INFO *psQueueInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVGetDeviceMemHeapsKM(IMG_HANDLE hDevCookie,
++ PVRSRV_HEAP_INFO *psHeapInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVCreateDeviceMemContextKM(IMG_HANDLE hDevCookie,
++ PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_HANDLE *phDevMemContext,
++ IMG_UINT32 *pui32ClientHeapCount,
++ PVRSRV_HEAP_INFO *psHeapInfo,
++ IMG_BOOL *pbCreated,
++ IMG_BOOL *pbShared);
++
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVDestroyDeviceMemContextKM(IMG_HANDLE hDevCookie,
++ IMG_HANDLE hDevMemContext,
++ IMG_BOOL *pbDestroyed);
++
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVGetDeviceMemHeapInfoKM(IMG_HANDLE hDevCookie,
++ IMG_HANDLE hDevMemContext,
++ IMG_UINT32 *pui32ClientHeapCount,
++ PVRSRV_HEAP_INFO *psHeapInfo,
++ IMG_BOOL *pbShared
++ );
++
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV _PVRSRVAllocDeviceMemKM(IMG_HANDLE hDevCookie,
++ PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_HANDLE hDevMemHeap,
++ IMG_UINT32 ui32Flags,
++ IMG_SIZE_T ui32Size,
++ IMG_SIZE_T ui32Alignment,
++ PVRSRV_KERNEL_MEM_INFO **ppsMemInfo);
++
++
++#if defined(PVRSRV_LOG_MEMORY_ALLOCS)
++ #define PVRSRVAllocDeviceMemKM(devCookie, perProc, devMemHeap, flags, size, alignment, memInfo, logStr) \
++ (PVR_TRACE(("PVRSRVAllocDeviceMemKM(" #devCookie ", " #perProc ", " #devMemHeap ", " #flags ", " #size \
++ ", " #alignment "," #memInfo "): " logStr " (size = 0x%;x)", size)),\
++ _PVRSRVAllocDeviceMemKM(devCookie, perProc, devMemHeap, flags, size, alignment, memInfo))
++#else
++ #define PVRSRVAllocDeviceMemKM(devCookie, perProc, devMemHeap, flags, size, alignment, memInfo, logStr) \
++ _PVRSRVAllocDeviceMemKM(devCookie, perProc, devMemHeap, flags, size, alignment, memInfo)
++#endif
++
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVFreeDeviceMemKM(IMG_HANDLE hDevCookie,
++ PVRSRV_KERNEL_MEM_INFO *psMemInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVDissociateDeviceMemKM(IMG_HANDLE hDevCookie,
++ PVRSRV_KERNEL_MEM_INFO *psMemInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVReserveDeviceVirtualMemKM(IMG_HANDLE hDevMemHeap,
++ IMG_DEV_VIRTADDR *psDevVAddr,
++ IMG_SIZE_T ui32Size,
++ IMG_SIZE_T ui32Alignment,
++ PVRSRV_KERNEL_MEM_INFO **ppsMemInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVFreeDeviceVirtualMemKM(PVRSRV_KERNEL_MEM_INFO *psMemInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVMapDeviceMemoryKM(PVRSRV_PER_PROCESS_DATA *psPerProc,
++ PVRSRV_KERNEL_MEM_INFO *psSrcMemInfo,
++ IMG_HANDLE hDstDevMemHeap,
++ PVRSRV_KERNEL_MEM_INFO **ppsDstMemInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVUnmapDeviceMemoryKM(PVRSRV_KERNEL_MEM_INFO *psMemInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVWrapExtMemoryKM(IMG_HANDLE hDevCookie,
++ PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_HANDLE hDevMemContext,
++ IMG_SIZE_T ui32ByteSize,
++ IMG_SIZE_T ui32PageOffset,
++ IMG_BOOL bPhysContig,
++ IMG_SYS_PHYADDR *psSysAddr,
++ IMG_VOID *pvLinAddr,
++ IMG_UINT32 ui32Flags,
++ PVRSRV_KERNEL_MEM_INFO **ppsMemInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVUnwrapExtMemoryKM(PVRSRV_KERNEL_MEM_INFO *psMemInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVEnumerateDCKM(PVRSRV_DEVICE_CLASS DeviceClass,
++ IMG_UINT32 *pui32DevCount,
++ IMG_UINT32 *pui32DevID );
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVOpenDCDeviceKM(PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_UINT32 ui32DeviceID,
++ IMG_HANDLE hDevCookie,
++ IMG_HANDLE *phDeviceKM);
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVCloseDCDeviceKM(IMG_HANDLE hDeviceKM, IMG_BOOL bResManCallback);
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVEnumDCFormatsKM(IMG_HANDLE hDeviceKM,
++ IMG_UINT32 *pui32Count,
++ DISPLAY_FORMAT *psFormat);
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVEnumDCDimsKM(IMG_HANDLE hDeviceKM,
++ DISPLAY_FORMAT *psFormat,
++ IMG_UINT32 *pui32Count,
++ DISPLAY_DIMS *psDim);
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVGetDCSystemBufferKM(IMG_HANDLE hDeviceKM,
++ IMG_HANDLE *phBuffer);
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVGetDCInfoKM(IMG_HANDLE hDeviceKM,
++ DISPLAY_INFO *psDisplayInfo);
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVCreateDCSwapChainKM(PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_HANDLE hDeviceKM,
++ IMG_UINT32 ui32Flags,
++ DISPLAY_SURF_ATTRIBUTES *psDstSurfAttrib,
++ DISPLAY_SURF_ATTRIBUTES *psSrcSurfAttrib,
++ IMG_UINT32 ui32BufferCount,
++ IMG_UINT32 ui32OEMFlags,
++ IMG_HANDLE *phSwapChain,
++ IMG_UINT32 *pui32SwapChainID);
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVDestroyDCSwapChainKM(IMG_HANDLE hSwapChain);
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVSetDCDstRectKM(IMG_HANDLE hDeviceKM,
++ IMG_HANDLE hSwapChain,
++ IMG_RECT *psRect);
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVSetDCSrcRectKM(IMG_HANDLE hDeviceKM,
++ IMG_HANDLE hSwapChain,
++ IMG_RECT *psRect);
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVSetDCDstColourKeyKM(IMG_HANDLE hDeviceKM,
++ IMG_HANDLE hSwapChain,
++ IMG_UINT32 ui32CKColour);
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVSetDCSrcColourKeyKM(IMG_HANDLE hDeviceKM,
++ IMG_HANDLE hSwapChain,
++ IMG_UINT32 ui32CKColour);
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVGetDCBuffersKM(IMG_HANDLE hDeviceKM,
++ IMG_HANDLE hSwapChain,
++ IMG_UINT32 *pui32BufferCount,
++ IMG_HANDLE *phBuffer);
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVSwapToDCBufferKM(IMG_HANDLE hDeviceKM,
++ IMG_HANDLE hBuffer,
++ IMG_UINT32 ui32SwapInterval,
++ IMG_HANDLE hPrivateTag,
++ IMG_UINT32 ui32ClipRectCount,
++ IMG_RECT *psClipRect);
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVSwapToDCSystemKM(IMG_HANDLE hDeviceKM,
++ IMG_HANDLE hSwapChain);
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVOpenBCDeviceKM(PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_UINT32 ui32DeviceID,
++ IMG_HANDLE hDevCookie,
++ IMG_HANDLE *phDeviceKM);
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVCloseBCDeviceKM(IMG_HANDLE hDeviceKM, IMG_BOOL bResManCallback);
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVGetBCInfoKM(IMG_HANDLE hDeviceKM,
++ BUFFER_INFO *psBufferInfo);
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVGetBCBufferKM(IMG_HANDLE hDeviceKM,
++ IMG_UINT32 ui32BufferIndex,
++ IMG_HANDLE *phBuffer);
++
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVMapDeviceClassMemoryKM(PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_HANDLE hDevMemContext,
++ IMG_HANDLE hDeviceClassBuffer,
++ PVRSRV_KERNEL_MEM_INFO **ppsMemInfo,
++ IMG_HANDLE *phOSMapInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVUnmapDeviceClassMemoryKM(PVRSRV_KERNEL_MEM_INFO *psMemInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVGetFreeDeviceMemKM(IMG_UINT32 ui32Flags,
++ IMG_SIZE_T *pui32Total,
++ IMG_SIZE_T *pui32Free,
++ IMG_SIZE_T *pui32LargestBlock);
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVAllocSyncInfoKM(IMG_HANDLE hDevCookie,
++ IMG_HANDLE hDevMemContext,
++ PVRSRV_KERNEL_SYNC_INFO **ppsKernelSyncInfo);
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVFreeSyncInfoKM(PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVGetMiscInfoKM(PVRSRV_MISC_INFO *psMiscInfo);
++
++PVRSRV_ERROR PVRSRVGetFBStatsKM(IMG_SIZE_T *pui32Total,
++ IMG_SIZE_T *pui32Available);
++
++IMG_IMPORT PVRSRV_ERROR
++PVRSRVAllocSharedSysMemoryKM(PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_UINT32 ui32Flags,
++ IMG_SIZE_T ui32Size,
++ PVRSRV_KERNEL_MEM_INFO **ppsKernelMemInfo);
++
++IMG_IMPORT PVRSRV_ERROR
++PVRSRVFreeSharedSysMemoryKM(PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo);
++
++IMG_IMPORT PVRSRV_ERROR
++PVRSRVDissociateMemFromResmanKM(PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo);
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/include/pvrmmap.h b/drivers/gpu/drm/mrst/pvr/services4/include/pvrmmap.h
+new file mode 100644
+index 0000000..7270f54
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/include/pvrmmap.h
+@@ -0,0 +1,36 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __PVRMMAP_H__
++#define __PVRMMAP_H__
++
++PVRSRV_ERROR PVRPMapKMem(IMG_HANDLE hModule, IMG_VOID **ppvLinAddr, IMG_VOID *pvLinAddrKM, IMG_HANDLE *phMappingInfo, IMG_HANDLE hMHandle);
++
++
++IMG_BOOL PVRUnMapKMem(IMG_HANDLE hModule, IMG_HANDLE hMappingInfo, IMG_HANDLE hMHandle);
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/include/servicesint.h b/drivers/gpu/drm/mrst/pvr/services4/include/servicesint.h
+new file mode 100644
+index 0000000..a024fd5
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/include/servicesint.h
+@@ -0,0 +1,266 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined (__SERVICESINT_H__)
++#define __SERVICESINT_H__
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++#include "services.h"
++#include "sysinfo.h"
++
++#define HWREC_DEFAULT_TIMEOUT (500)
++
++#define DRIVERNAME_MAXLENGTH (100)
++
++
++
++typedef struct _PVRSRV_KERNEL_MEM_INFO_
++{
++
++ IMG_PVOID pvLinAddrKM;
++
++
++ IMG_DEV_VIRTADDR sDevVAddr;
++
++
++ IMG_UINT32 ui32Flags;
++
++
++ IMG_SIZE_T ui32AllocSize;
++
++
++ PVRSRV_MEMBLK sMemBlk;
++
++
++ IMG_PVOID pvSysBackupBuffer;
++
++
++ IMG_UINT32 ui32RefCount;
++
++
++ IMG_BOOL bPendingFree;
++
++
++ #if defined(SUPPORT_MEMINFO_IDS)
++ #if !defined(USE_CODE)
++
++ IMG_UINT64 ui64Stamp;
++ #else
++ IMG_UINT32 dummy1;
++ IMG_UINT32 dummy2;
++ #endif
++ #endif
++
++
++ struct _PVRSRV_KERNEL_SYNC_INFO_ *psKernelSyncInfo;
++
++} PVRSRV_KERNEL_MEM_INFO;
++
++
++typedef struct _PVRSRV_KERNEL_SYNC_INFO_
++{
++
++ PVRSRV_SYNC_DATA *psSyncData;
++
++
++ IMG_DEV_VIRTADDR sWriteOpsCompleteDevVAddr;
++
++
++ IMG_DEV_VIRTADDR sReadOpsCompleteDevVAddr;
++
++
++ PVRSRV_KERNEL_MEM_INFO *psSyncDataMemInfoKM;
++
++
++ IMG_HANDLE hResItem;
++
++
++
++ IMG_UINT32 ui32RefCount;
++
++} PVRSRV_KERNEL_SYNC_INFO;
++
++typedef struct _PVRSRV_DEVICE_SYNC_OBJECT_
++{
++
++ IMG_UINT32 ui32ReadOpsPendingVal;
++ IMG_DEV_VIRTADDR sReadOpsCompleteDevVAddr;
++ IMG_UINT32 ui32WriteOpsPendingVal;
++ IMG_DEV_VIRTADDR sWriteOpsCompleteDevVAddr;
++} PVRSRV_DEVICE_SYNC_OBJECT;
++
++typedef struct _PVRSRV_SYNC_OBJECT
++{
++ PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfoKM;
++ IMG_UINT32 ui32WriteOpsPending;
++ IMG_UINT32 ui32ReadOpsPending;
++
++}PVRSRV_SYNC_OBJECT, *PPVRSRV_SYNC_OBJECT;
++
++typedef struct _PVRSRV_COMMAND
++{
++ IMG_SIZE_T ui32CmdSize;
++ IMG_UINT32 ui32DevIndex;
++ IMG_UINT32 CommandType;
++ IMG_UINT32 ui32DstSyncCount;
++ IMG_UINT32 ui32SrcSyncCount;
++ PVRSRV_SYNC_OBJECT *psDstSync;
++ PVRSRV_SYNC_OBJECT *psSrcSync;
++ IMG_SIZE_T ui32DataSize;
++ IMG_UINT32 ui32ProcessID;
++ IMG_VOID *pvData;
++}PVRSRV_COMMAND, *PPVRSRV_COMMAND;
++
++
++typedef struct _PVRSRV_QUEUE_INFO_
++{
++ IMG_VOID *pvLinQueueKM;
++ IMG_VOID *pvLinQueueUM;
++ volatile IMG_SIZE_T ui32ReadOffset;
++ volatile IMG_SIZE_T ui32WriteOffset;
++ IMG_UINT32 *pui32KickerAddrKM;
++ IMG_UINT32 *pui32KickerAddrUM;
++ IMG_SIZE_T ui32QueueSize;
++
++ IMG_UINT32 ui32ProcessID;
++
++ IMG_HANDLE hMemBlock[2];
++
++ struct _PVRSRV_QUEUE_INFO_ *psNextKM;
++}PVRSRV_QUEUE_INFO;
++
++typedef PVRSRV_ERROR (*PFN_INSERT_CMD) (PVRSRV_QUEUE_INFO*,
++ PVRSRV_COMMAND**,
++ IMG_UINT32,
++ IMG_UINT16,
++ IMG_UINT32,
++ PVRSRV_KERNEL_SYNC_INFO*[],
++ IMG_UINT32,
++ PVRSRV_KERNEL_SYNC_INFO*[],
++ IMG_UINT32);
++typedef PVRSRV_ERROR (*PFN_SUBMIT_CMD) (PVRSRV_QUEUE_INFO*, PVRSRV_COMMAND*, IMG_BOOL);
++
++
++typedef struct PVRSRV_DEVICECLASS_BUFFER_TAG
++{
++ PFN_GET_BUFFER_ADDR pfnGetBufferAddr;
++ IMG_HANDLE hDevMemContext;
++ IMG_HANDLE hExtDevice;
++ IMG_HANDLE hExtBuffer;
++ PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
++
++} PVRSRV_DEVICECLASS_BUFFER;
++
++
++typedef struct PVRSRV_CLIENT_DEVICECLASS_INFO_TAG
++{
++ IMG_HANDLE hDeviceKM;
++ IMG_HANDLE hServices;
++} PVRSRV_CLIENT_DEVICECLASS_INFO;
++
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVGetWriteOpsPending)
++#endif
++static INLINE
++IMG_UINT32 PVRSRVGetWriteOpsPending(PVRSRV_KERNEL_SYNC_INFO *psSyncInfo, IMG_BOOL bIsReadOp)
++{
++ IMG_UINT32 ui32WriteOpsPending;
++
++ if(bIsReadOp)
++ {
++ ui32WriteOpsPending = psSyncInfo->psSyncData->ui32WriteOpsPending;
++ }
++ else
++ {
++
++
++
++ ui32WriteOpsPending = psSyncInfo->psSyncData->ui32WriteOpsPending++;
++ }
++
++ return ui32WriteOpsPending;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVGetReadOpsPending)
++#endif
++static INLINE
++IMG_UINT32 PVRSRVGetReadOpsPending(PVRSRV_KERNEL_SYNC_INFO *psSyncInfo, IMG_BOOL bIsReadOp)
++{
++ IMG_UINT32 ui32ReadOpsPending;
++
++ if(bIsReadOp)
++ {
++ ui32ReadOpsPending = psSyncInfo->psSyncData->ui32ReadOpsPending++;
++ }
++ else
++ {
++ ui32ReadOpsPending = psSyncInfo->psSyncData->ui32ReadOpsPending;
++ }
++
++ return ui32ReadOpsPending;
++}
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVQueueCommand(IMG_HANDLE hQueueInfo,
++ PVRSRV_COMMAND *psCommand);
++
++
++
++IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV
++PVRSRVGetMMUContextPDDevPAddr(const PVRSRV_CONNECTION *psConnection,
++ IMG_HANDLE hDevMemContext,
++ IMG_DEV_PHYADDR *sPDDevPAddr);
++
++IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV
++PVRSRVAllocSharedSysMem(const PVRSRV_CONNECTION *psConnection,
++ IMG_UINT32 ui32Flags,
++ IMG_SIZE_T ui32Size,
++ PVRSRV_CLIENT_MEM_INFO **ppsClientMemInfo);
++
++IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV
++PVRSRVFreeSharedSysMem(const PVRSRV_CONNECTION *psConnection,
++ PVRSRV_CLIENT_MEM_INFO *psClientMemInfo);
++
++IMG_IMPORT PVRSRV_ERROR
++PVRSRVUnrefSharedSysMem(const PVRSRV_CONNECTION *psConnection,
++ PVRSRV_CLIENT_MEM_INFO *psClientMemInfo);
++
++IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV
++PVRSRVMapMemInfoMem(const PVRSRV_CONNECTION *psConnection,
++ IMG_HANDLE hKernelMemInfo,
++ PVRSRV_CLIENT_MEM_INFO **ppsClientMemInfo);
++
++
++#if defined (__cplusplus)
++}
++#endif
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/include/sgx_bridge.h b/drivers/gpu/drm/mrst/pvr/services4/include/sgx_bridge.h
+new file mode 100644
+index 0000000..b2bfc0f
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/include/sgx_bridge.h
+@@ -0,0 +1,477 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined(__SGX_BRIDGE_H__)
++#define __SGX_BRIDGE_H__
++
++#include "sgxapi_km.h"
++#include "sgxinfo.h"
++#include "pvr_bridge.h"
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++
++#define PVRSRV_BRIDGE_SGX_CMD_BASE (PVRSRV_BRIDGE_LAST_NON_DEVICE_CMD+1)
++#define PVRSRV_BRIDGE_SGX_GETCLIENTINFO PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+0)
++#define PVRSRV_BRIDGE_SGX_RELEASECLIENTINFO PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+1)
++#define PVRSRV_BRIDGE_SGX_GETINTERNALDEVINFO PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+2)
++#define PVRSRV_BRIDGE_SGX_DOKICK PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+3)
++#define PVRSRV_BRIDGE_SGX_GETPHYSPAGEADDR PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+4)
++#define PVRSRV_BRIDGE_SGX_READREGISTRYDWORD PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+5)
++
++#define PVRSRV_BRIDGE_SGX_2DQUERYBLTSCOMPLETE PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+9)
++
++#define PVRSRV_BRIDGE_SGX_GETMMUPDADDR PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+10)
++
++#if defined(TRANSFER_QUEUE)
++#define PVRSRV_BRIDGE_SGX_SUBMITTRANSFER PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+13)
++#endif
++#define PVRSRV_BRIDGE_SGX_GETMISCINFO PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+14)
++#define PVRSRV_BRIDGE_SGXINFO_FOR_SRVINIT PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+15)
++#define PVRSRV_BRIDGE_SGX_DEVINITPART2 PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+16)
++
++#define PVRSRV_BRIDGE_SGX_FINDSHAREDPBDESC PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+17)
++#define PVRSRV_BRIDGE_SGX_UNREFSHAREDPBDESC PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+18)
++#define PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+19)
++#define PVRSRV_BRIDGE_SGX_REGISTER_HW_RENDER_CONTEXT PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+20)
++#define PVRSRV_BRIDGE_SGX_FLUSH_HW_RENDER_TARGET PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+21)
++#define PVRSRV_BRIDGE_SGX_UNREGISTER_HW_RENDER_CONTEXT PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+22)
++#if defined(SGX_FEATURE_2D_HARDWARE)
++#define PVRSRV_BRIDGE_SGX_SUBMIT2D PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+23)
++#define PVRSRV_BRIDGE_SGX_REGISTER_HW_2D_CONTEXT PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+24)
++#define PVRSRV_BRIDGE_SGX_UNREGISTER_HW_2D_CONTEXT PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+25)
++#endif
++#define PVRSRV_BRIDGE_SGX_REGISTER_HW_TRANSFER_CONTEXT PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+26)
++#define PVRSRV_BRIDGE_SGX_UNREGISTER_HW_TRANSFER_CONTEXT PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+27)
++
++#define PVRSRV_BRIDGE_SGX_SCHEDULE_PROCESS_QUEUES PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+28)
++
++#if defined(SUPPORT_SGX_HWPERF)
++#define PVRSRV_BRIDGE_SGX_READ_DIFF_COUNTERS PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+29)
++#define PVRSRV_BRIDGE_SGX_READ_HWPERF_CB PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+30)
++#endif
++
++#if defined(PDUMP)
++#define PVRSRV_BRIDGE_SGX_PDUMP_BUFFER_ARRAY PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+31)
++#define PVRSRV_BRIDGE_SGX_PDUMP_3D_SIGNATURE_REGISTERS PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+32)
++#define PVRSRV_BRIDGE_SGX_PDUMP_COUNTER_REGISTERS PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+33)
++#define PVRSRV_BRIDGE_SGX_PDUMP_TA_SIGNATURE_REGISTERS PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+34)
++#define PVRSRV_BRIDGE_SGX_PDUMP_HWPERFCB PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+35)
++#endif
++
++
++
++#define PVRSRV_BRIDGE_LAST_SGX_CMD (PVRSRV_BRIDGE_SGX_CMD_BASE+35)
++
++
++typedef struct PVRSRV_BRIDGE_IN_GETPHYSPAGEADDR
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevMemHeap;
++ IMG_DEV_VIRTADDR sDevVAddr;
++}PVRSRV_BRIDGE_IN_GETPHYSPAGEADDR;
++
++
++typedef struct PVRSRV_BRIDGE_OUT_GETPHYSPAGEADDR
++{
++ PVRSRV_ERROR eError;
++ IMG_DEV_PHYADDR DevPAddr;
++ IMG_CPU_PHYADDR CpuPAddr;
++}PVRSRV_BRIDGE_OUT_GETPHYSPAGEADDR;
++
++
++typedef struct PVRSRV_BRIDGE_IN_SGX_GETMMU_PDADDR_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ IMG_HANDLE hDevMemContext;
++}PVRSRV_BRIDGE_IN_SGX_GETMMU_PDADDR;
++
++
++typedef struct PVRSRV_BRIDGE_OUT_SGX_GETMMU_PDADDR_TAG
++{
++ IMG_DEV_PHYADDR sPDDevPAddr;
++ PVRSRV_ERROR eError;
++}PVRSRV_BRIDGE_OUT_SGX_GETMMU_PDADDR;
++
++
++typedef struct PVRSRV_BRIDGE_IN_GETCLIENTINFO_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++}PVRSRV_BRIDGE_IN_GETCLIENTINFO;
++
++
++typedef struct PVRSRV_BRIDGE_OUT_GETINTERNALDEVINFO_TAG
++{
++ SGX_INTERNAL_DEVINFO sSGXInternalDevInfo;
++ PVRSRV_ERROR eError;
++}PVRSRV_BRIDGE_OUT_GETINTERNALDEVINFO;
++
++
++typedef struct PVRSRV_BRIDGE_IN_GETINTERNALDEVINFO_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++}PVRSRV_BRIDGE_IN_GETINTERNALDEVINFO;
++
++
++typedef struct PVRSRV_BRIDGE_OUT_GETCLIENTINFO_TAG
++{
++ SGX_CLIENT_INFO sClientInfo;
++ PVRSRV_ERROR eError;
++}PVRSRV_BRIDGE_OUT_GETCLIENTINFO;
++
++
++typedef struct PVRSRV_BRIDGE_IN_RELEASECLIENTINFO_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ SGX_CLIENT_INFO sClientInfo;
++}PVRSRV_BRIDGE_IN_RELEASECLIENTINFO;
++
++
++typedef struct PVRSRV_BRIDGE_IN_ISPBREAKPOLL_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++}PVRSRV_BRIDGE_IN_ISPBREAKPOLL;
++
++
++typedef struct PVRSRV_BRIDGE_IN_DOKICK_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ SGX_CCB_KICK sCCBKick;
++}PVRSRV_BRIDGE_IN_DOKICK;
++
++
++typedef struct PVRSRV_BRIDGE_IN_SGX_SCHEDULE_PROCESS_QUEUES_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++}PVRSRV_BRIDGE_IN_SGX_SCHEDULE_PROCESS_QUEUES;
++
++
++#if defined(TRANSFER_QUEUE)
++
++typedef struct PVRSRV_BRIDGE_IN_SUBMITTRANSFER_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ PVRSRV_TRANSFER_SGX_KICK sKick;
++}PVRSRV_BRIDGE_IN_SUBMITTRANSFER;
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++
++typedef struct PVRSRV_BRIDGE_IN_SUBMIT2D_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ PVRSRV_2D_SGX_KICK sKick;
++} PVRSRV_BRIDGE_IN_SUBMIT2D;
++#endif
++#endif
++
++
++typedef struct PVRSRV_BRIDGE_IN_READREGDWORD_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ IMG_PCHAR pszKey;
++ IMG_PCHAR pszValue;
++}PVRSRV_BRIDGE_IN_READREGDWORD;
++
++
++typedef struct PVRSRV_BRIDGE_OUT_READREGDWORD_TAG
++{
++ PVRSRV_ERROR eError;
++ IMG_UINT32 ui32Data;
++}PVRSRV_BRIDGE_OUT_READREGDWORD;
++
++
++typedef struct PVRSRV_BRIDGE_IN_SGXGETMISCINFO_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ SGX_MISC_INFO *psMiscInfo;
++}PVRSRV_BRIDGE_IN_SGXGETMISCINFO;
++
++typedef struct PVRSRV_BRIDGE_IN_SGXINFO_FOR_SRVINIT_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++}PVRSRV_BRIDGE_IN_SGXINFO_FOR_SRVINIT;
++
++typedef struct PVRSRV_BRIDGE_OUT_SGXINFO_FOR_SRVINIT_TAG
++{
++ PVRSRV_ERROR eError;
++ SGX_BRIDGE_INFO_FOR_SRVINIT sInitInfo;
++}PVRSRV_BRIDGE_OUT_SGXINFO_FOR_SRVINIT;
++
++typedef struct PVRSRV_BRIDGE_IN_SGXDEVINITPART2_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ SGX_BRIDGE_INIT_INFO sInitInfo;
++}PVRSRV_BRIDGE_IN_SGXDEVINITPART2;
++
++
++typedef struct PVRSRV_BRIDGE_IN_2DQUERYBLTSCOMPLETE_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ IMG_HANDLE hKernSyncInfo;
++ IMG_BOOL bWaitForComplete;
++}PVRSRV_BRIDGE_IN_2DQUERYBLTSCOMPLETE;
++
++
++#define PVRSRV_BRIDGE_SGX_SHAREDPBDESC_MAX_SUBMEMINFOS 10
++
++typedef struct PVRSRV_BRIDGE_IN_SGXFINDSHAREDPBDESC_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ IMG_BOOL bLockOnFailure;
++ IMG_UINT32 ui32TotalPBSize;
++}PVRSRV_BRIDGE_IN_SGXFINDSHAREDPBDESC;
++
++typedef struct PVRSRV_BRIDGE_OUT_SGXFINDSHAREDPBDESC_TAG
++{
++ IMG_HANDLE hKernelMemInfo;
++ IMG_HANDLE hSharedPBDesc;
++ IMG_HANDLE hSharedPBDescKernelMemInfoHandle;
++ IMG_HANDLE hHWPBDescKernelMemInfoHandle;
++ IMG_HANDLE hBlockKernelMemInfoHandle;
++ IMG_HANDLE hHWBlockKernelMemInfoHandle;
++ IMG_HANDLE ahSharedPBDescSubKernelMemInfoHandles[PVRSRV_BRIDGE_SGX_SHAREDPBDESC_MAX_SUBMEMINFOS];
++ IMG_UINT32 ui32SharedPBDescSubKernelMemInfoHandlesCount;
++ PVRSRV_ERROR eError;
++}PVRSRV_BRIDGE_OUT_SGXFINDSHAREDPBDESC;
++
++typedef struct PVRSRV_BRIDGE_IN_SGXUNREFSHAREDPBDESC_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hSharedPBDesc;
++}PVRSRV_BRIDGE_IN_SGXUNREFSHAREDPBDESC;
++
++typedef struct PVRSRV_BRIDGE_OUT_SGXUNREFSHAREDPBDESC_TAG
++{
++ PVRSRV_ERROR eError;
++}PVRSRV_BRIDGE_OUT_SGXUNREFSHAREDPBDESC;
++
++
++typedef struct PVRSRV_BRIDGE_IN_SGXADDSHAREDPBDESC_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ IMG_HANDLE hSharedPBDescKernelMemInfo;
++ IMG_HANDLE hHWPBDescKernelMemInfo;
++ IMG_HANDLE hBlockKernelMemInfo;
++ IMG_HANDLE hHWBlockKernelMemInfo;
++ IMG_UINT32 ui32TotalPBSize;
++ IMG_HANDLE *phKernelMemInfoHandles;
++ IMG_UINT32 ui32KernelMemInfoHandlesCount;
++}PVRSRV_BRIDGE_IN_SGXADDSHAREDPBDESC;
++
++typedef struct PVRSRV_BRIDGE_OUT_SGXADDSHAREDPBDESC_TAG
++{
++ PVRSRV_ERROR eError;
++ IMG_HANDLE hSharedPBDesc;
++}PVRSRV_BRIDGE_OUT_SGXADDSHAREDPBDESC;
++
++
++#ifdef PDUMP
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_BUFFER_ARRAY_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ SGX_KICKTA_DUMP_BUFFER *psBufferArray;
++ IMG_UINT32 ui32BufferArrayLength;
++ IMG_BOOL bDumpPolls;
++} PVRSRV_BRIDGE_IN_PDUMP_BUFFER_ARRAY;
++
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_3D_SIGNATURE_REGISTERS_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ IMG_UINT32 ui32DumpFrameNum;
++ IMG_BOOL bLastFrame;
++ IMG_UINT32 *pui32Registers;
++ IMG_UINT32 ui32NumRegisters;
++}PVRSRV_BRIDGE_IN_PDUMP_3D_SIGNATURE_REGISTERS;
++
++typedef struct PVRSRV_BRIDGE_IN_PDUMPCOUNTER_REGISTERS_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_UINT32 ui32DumpFrameNum;
++ IMG_BOOL bLastFrame;
++ IMG_UINT32 *pui32Registers;
++ IMG_UINT32 ui32NumRegisters;
++}PVRSRV_BRIDGE_IN_PDUMP_COUNTER_REGISTERS;
++
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_TA_SIGNATURE_REGISTERS_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ IMG_UINT32 ui32DumpFrameNum;
++ IMG_UINT32 ui32TAKickCount;
++ IMG_BOOL bLastFrame;
++ IMG_UINT32 *pui32Registers;
++ IMG_UINT32 ui32NumRegisters;
++}PVRSRV_BRIDGE_IN_PDUMP_TA_SIGNATURE_REGISTERS;
++
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_HWPERFCB_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ IMG_CHAR szFileName[PVRSRV_PDUMP_MAX_FILENAME_SIZE];
++ IMG_UINT32 ui32FileOffset;
++ IMG_UINT32 ui32PDumpFlags;
++
++}PVRSRV_BRIDGE_IN_PDUMP_HWPERFCB;
++
++#endif
++
++typedef struct PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_RENDER_CONTEXT_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ IMG_DEV_VIRTADDR sHWRenderContextDevVAddr;
++}PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_RENDER_CONTEXT;
++
++typedef struct PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_RENDER_CONTEXT_TAG
++{
++ PVRSRV_ERROR eError;
++ IMG_HANDLE hHWRenderContext;
++}PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_RENDER_CONTEXT;
++
++typedef struct PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_RENDER_CONTEXT_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ IMG_HANDLE hHWRenderContext;
++}PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_RENDER_CONTEXT;
++
++typedef struct PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_TRANSFER_CONTEXT_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ IMG_DEV_VIRTADDR sHWTransferContextDevVAddr;
++}PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_TRANSFER_CONTEXT;
++
++typedef struct PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_TRANSFER_CONTEXT_TAG
++{
++ PVRSRV_ERROR eError;
++ IMG_HANDLE hHWTransferContext;
++}PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_TRANSFER_CONTEXT;
++
++typedef struct PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_TRANSFER_CONTEXT_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ IMG_HANDLE hHWTransferContext;
++}PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_TRANSFER_CONTEXT;
++
++typedef struct PVRSRV_BRIDGE_IN_SGX_FLUSH_HW_RENDER_TARGET_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ IMG_DEV_VIRTADDR sHWRTDataSetDevVAddr;
++}PVRSRV_BRIDGE_IN_SGX_FLUSH_HW_RENDER_TARGET;
++
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++typedef struct PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_2D_CONTEXT_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ IMG_DEV_VIRTADDR sHW2DContextDevVAddr;
++}PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_2D_CONTEXT;
++
++typedef struct PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_2D_CONTEXT_TAG
++{
++ PVRSRV_ERROR eError;
++ IMG_HANDLE hHW2DContext;
++}PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_2D_CONTEXT;
++
++typedef struct PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_2D_CONTEXT_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ IMG_HANDLE hHW2DContext;
++}PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_2D_CONTEXT;
++
++#define SGX2D_MAX_BLT_CMD_SIZ 256
++#endif
++
++
++typedef struct PVRSRV_BRIDGE_IN_SGX_READ_DIFF_COUNTERS_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ IMG_UINT32 ui32Reg;
++ IMG_BOOL bNew;
++ IMG_UINT32 ui32New;
++ IMG_UINT32 ui32NewReset;
++ IMG_UINT32 ui32CountersReg;
++ IMG_UINT32 ui32Reg2;
++} PVRSRV_BRIDGE_IN_SGX_READ_DIFF_COUNTERS;
++
++typedef struct PVRSRV_BRIDGE_OUT_SGX_READ_DIFF_COUNTERS_TAG
++{
++ PVRSRV_ERROR eError;
++ IMG_UINT32 ui32Old;
++ IMG_BOOL bActive;
++ PVRSRV_SGXDEV_DIFF_INFO sDiffs;
++} PVRSRV_BRIDGE_OUT_SGX_READ_DIFF_COUNTERS;
++
++
++#if defined(SUPPORT_SGX_HWPERF)
++typedef struct PVRSRV_BRIDGE_IN_SGX_READ_HWPERF_CB_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ IMG_UINT32 ui32ArraySize;
++ PVRSRV_SGX_HWPERF_CB_ENTRY *psHWPerfCBData;
++} PVRSRV_BRIDGE_IN_SGX_READ_HWPERF_CB;
++
++typedef struct PVRSRV_BRIDGE_OUT_SGX_READ_HWPERF_CB_TAG
++{
++ PVRSRV_ERROR eError;
++ IMG_UINT32 ui32DataCount;
++ IMG_UINT32 ui32ClockSpeed;
++ IMG_UINT32 ui32HostTimeStamp;
++} PVRSRV_BRIDGE_OUT_SGX_READ_HWPERF_CB;
++#endif
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/include/sgx_mkif_km.h b/drivers/gpu/drm/mrst/pvr/services4/include/sgx_mkif_km.h
+new file mode 100644
+index 0000000..99f29db
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/include/sgx_mkif_km.h
+@@ -0,0 +1,334 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined (__SGX_MKIF_KM_H__)
++#define __SGX_MKIF_KM_H__
++
++#include "img_types.h"
++#include "servicesint.h"
++#include "sgxapi_km.h"
++
++
++#if defined(SGX_FEATURE_MP)
++ #define SGX_REG_BANK_SHIFT (12)
++ #define SGX_REG_BANK_SIZE (0x4000)
++ #if defined(SGX541)
++ #define SGX_REG_BANK_BASE_INDEX (1)
++ #define SGX_REG_BANK_MASTER_INDEX (SGX_REG_BANK_BASE_INDEX + SGX_FEATURE_MP_CORE_COUNT)
++ #else
++ #define SGX_REG_BANK_BASE_INDEX (2)
++ #define SGX_REG_BANK_MASTER_INDEX (1)
++ #endif
++ #define SGX_MP_CORE_SELECT(x,i) (x + ((i + SGX_REG_BANK_BASE_INDEX) * SGX_REG_BANK_SIZE))
++ #define SGX_MP_MASTER_SELECT(x) (x + (SGX_REG_BANK_MASTER_INDEX * SGX_REG_BANK_SIZE))
++#else
++ #define SGX_MP_CORE_SELECT(x,i) (x)
++#endif
++
++
++typedef struct _SGXMKIF_COMMAND_
++{
++ IMG_UINT32 ui32ServiceAddress;
++ IMG_UINT32 ui32CacheControl;
++ IMG_UINT32 ui32Data[2];
++} SGXMKIF_COMMAND;
++
++
++typedef struct _PVRSRV_SGX_KERNEL_CCB_
++{
++ SGXMKIF_COMMAND asCommands[256];
++} PVRSRV_SGX_KERNEL_CCB;
++
++
++typedef struct _PVRSRV_SGX_CCB_CTL_
++{
++ IMG_UINT32 ui32WriteOffset;
++ IMG_UINT32 ui32ReadOffset;
++} PVRSRV_SGX_CCB_CTL;
++
++
++typedef struct _SGXMKIF_HOST_CTL_
++{
++#if defined(PVRSRV_USSE_EDM_BREAKPOINTS)
++ IMG_UINT32 ui32BreakpointDisable;
++ IMG_UINT32 ui32Continue;
++#endif
++
++ volatile IMG_UINT32 ui32InitStatus;
++ volatile IMG_UINT32 ui32PowerStatus;
++ volatile IMG_UINT32 ui32CleanupStatus;
++#if defined(SUPPORT_HW_RECOVERY)
++ IMG_UINT32 ui32uKernelDetectedLockups;
++ IMG_UINT32 ui32HostDetectedLockups;
++ IMG_UINT32 ui32HWRecoverySampleRate;
++#endif
++ IMG_UINT32 ui32uKernelTimerClock;
++ IMG_UINT32 ui32ActivePowManSampleRate;
++ IMG_UINT32 ui32InterruptFlags;
++ IMG_UINT32 ui32InterruptClearFlags;
++
++
++ IMG_UINT32 ui32NumActivePowerEvents;
++
++#if defined(SUPPORT_SGX_HWPERF)
++ IMG_UINT32 ui32HWPerfFlags;
++#endif
++
++
++ IMG_UINT32 ui32TimeWraps;
++} SGXMKIF_HOST_CTL;
++
++#define SGXMKIF_CMDTA_CTRLFLAGS_READY 0x00000001
++typedef struct _SGXMKIF_CMDTA_SHARED_
++{
++ IMG_UINT32 ui32CtrlFlags;
++
++ IMG_UINT32 ui32NumTAStatusVals;
++ IMG_UINT32 ui32Num3DStatusVals;
++
++
++ IMG_UINT32 ui32TATQSyncWriteOpsPendingVal;
++ IMG_DEV_VIRTADDR sTATQSyncWriteOpsCompleteDevVAddr;
++ IMG_UINT32 ui32TATQSyncReadOpsPendingVal;
++ IMG_DEV_VIRTADDR sTATQSyncReadOpsCompleteDevVAddr;
++
++
++ IMG_UINT32 ui323DTQSyncWriteOpsPendingVal;
++ IMG_DEV_VIRTADDR s3DTQSyncWriteOpsCompleteDevVAddr;
++ IMG_UINT32 ui323DTQSyncReadOpsPendingVal;
++ IMG_DEV_VIRTADDR s3DTQSyncReadOpsCompleteDevVAddr;
++
++
++#if defined(SUPPORT_SGX_GENERALISED_SYNCOBJECTS)
++
++ IMG_UINT32 ui32NumTASrcSyncs;
++ PVRSRV_DEVICE_SYNC_OBJECT asTASrcSyncs[SGX_MAX_TA_SRC_SYNCS];
++ IMG_UINT32 ui32NumTADstSyncs;
++ PVRSRV_DEVICE_SYNC_OBJECT asTADstSyncs[SGX_MAX_TA_DST_SYNCS];
++ IMG_UINT32 ui32Num3DSrcSyncs;
++ PVRSRV_DEVICE_SYNC_OBJECT as3DSrcSyncs[SGX_MAX_3D_SRC_SYNCS];
++#else
++
++ IMG_UINT32 ui32NumSrcSyncs;
++ PVRSRV_DEVICE_SYNC_OBJECT asSrcSyncs[SGX_MAX_SRC_SYNCS];
++#endif
++
++
++ PVRSRV_DEVICE_SYNC_OBJECT sTA3DDependency;
++
++ CTL_STATUS sCtlTAStatusInfo[SGX_MAX_TA_STATUS_VALS];
++ CTL_STATUS sCtl3DStatusInfo[SGX_MAX_3D_STATUS_VALS];
++
++} SGXMKIF_CMDTA_SHARED;
++
++#define SGXTQ_MAX_STATUS SGX_MAX_TRANSFER_STATUS_VALS + 2
++
++#define SGXMKIF_TQFLAGS_NOSYNCUPDATE 0x00000001
++#define SGXMKIF_TQFLAGS_KEEPPENDING 0x00000002
++#define SGXMKIF_TQFLAGS_TATQ_SYNC 0x00000004
++#define SGXMKIF_TQFLAGS_3DTQ_SYNC 0x00000008
++#if defined(SGX_FEATURE_FAST_RENDER_CONTEXT_SWITCH)
++#define SGXMKIF_TQFLAGS_CTXSWITCH 0x00000010
++#endif
++#define SGXMKIF_TQFLAGS_DUMMYTRANSFER 0x00000020
++
++typedef struct _SGXMKIF_TRANSFERCMD_SHARED_
++{
++
++
++ IMG_UINT32 ui32SrcReadOpPendingVal;
++ IMG_DEV_VIRTADDR sSrcReadOpsCompleteDevAddr;
++
++ IMG_UINT32 ui32SrcWriteOpPendingVal;
++ IMG_DEV_VIRTADDR sSrcWriteOpsCompleteDevAddr;
++
++
++
++ IMG_UINT32 ui32DstReadOpPendingVal;
++ IMG_DEV_VIRTADDR sDstReadOpsCompleteDevAddr;
++
++ IMG_UINT32 ui32DstWriteOpPendingVal;
++ IMG_DEV_VIRTADDR sDstWriteOpsCompleteDevAddr;
++
++
++ IMG_UINT32 ui32TASyncWriteOpsPendingVal;
++ IMG_DEV_VIRTADDR sTASyncWriteOpsCompleteDevVAddr;
++ IMG_UINT32 ui32TASyncReadOpsPendingVal;
++ IMG_DEV_VIRTADDR sTASyncReadOpsCompleteDevVAddr;
++
++
++ IMG_UINT32 ui323DSyncWriteOpsPendingVal;
++ IMG_DEV_VIRTADDR s3DSyncWriteOpsCompleteDevVAddr;
++ IMG_UINT32 ui323DSyncReadOpsPendingVal;
++ IMG_DEV_VIRTADDR s3DSyncReadOpsCompleteDevVAddr;
++
++ IMG_UINT32 ui32NumStatusVals;
++ CTL_STATUS sCtlStatusInfo[SGXTQ_MAX_STATUS];
++} SGXMKIF_TRANSFERCMD_SHARED, *PSGXMKIF_TRANSFERCMD_SHARED;
++
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++typedef struct _SGXMKIF_2DCMD_SHARED_ {
++
++ IMG_UINT32 ui32NumSrcSync;
++ PVRSRV_DEVICE_SYNC_OBJECT sSrcSyncData[SGX_MAX_2D_SRC_SYNC_OPS];
++
++
++ PVRSRV_DEVICE_SYNC_OBJECT sDstSyncData;
++
++
++ PVRSRV_DEVICE_SYNC_OBJECT sTASyncData;
++
++
++ PVRSRV_DEVICE_SYNC_OBJECT s3DSyncData;
++} SGXMKIF_2DCMD_SHARED, *PSGXMKIF_2DCMD_SHARED;
++#endif
++
++
++typedef struct _SGXMKIF_HWDEVICE_SYNC_LIST_
++{
++ IMG_DEV_VIRTADDR sAccessDevAddr;
++ IMG_UINT32 ui32NumSyncObjects;
++
++ PVRSRV_DEVICE_SYNC_OBJECT asSyncData[1];
++} SGXMKIF_HWDEVICE_SYNC_LIST, *PSGXMKIF_HWDEVICE_SYNC_LIST;
++
++
++#define PVRSRV_USSE_EDM_INIT_COMPLETE (1UL << 0)
++
++#define PVRSRV_USSE_EDM_POWMAN_IDLE_COMPLETE (1UL << 2)
++#define PVRSRV_USSE_EDM_POWMAN_POWEROFF_COMPLETE (1UL << 3)
++#define PVRSRV_USSE_EDM_POWMAN_POWEROFF_RESTART_IMMEDIATE (1UL << 4)
++#define PVRSRV_USSE_EDM_POWMAN_NO_WORK (1UL << 5)
++
++#define PVRSRV_USSE_EDM_INTERRUPT_HWR (1UL << 0)
++#define PVRSRV_USSE_EDM_INTERRUPT_ACTIVE_POWER (1UL << 1)
++
++#define PVRSRV_USSE_EDM_CLEANUPCMD_COMPLETE (1UL << 0)
++
++#define PVRSRV_USSE_MISCINFO_READY 0x1UL
++#define PVRSRV_USSE_MISCINFO_GET_STRUCT_SIZES 0x2UL
++#if defined(SUPPORT_SGX_EDM_MEMORY_DEBUG)
++#define PVRSRV_USSE_MISCINFO_MEMREAD 0x4UL
++
++#if !defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
++#define PVRSRV_USSE_MISCINFO_MEMREAD_FAIL 0x1UL << 31;
++#endif
++#endif
++
++
++#define PVRSRV_CLEANUPCMD_RT 0x1
++#define PVRSRV_CLEANUPCMD_RC 0x2
++#define PVRSRV_CLEANUPCMD_TC 0x3
++#define PVRSRV_CLEANUPCMD_2DC 0x4
++#define PVRSRV_CLEANUPCMD_PB 0x5
++
++#define PVRSRV_POWERCMD_POWEROFF 0x1
++#define PVRSRV_POWERCMD_IDLE 0x2
++#define PVRSRV_POWERCMD_RESUME 0x3
++
++
++#if defined(SGX_FEATURE_BIF_NUM_DIRLISTS)
++#define SGX_BIF_DIR_LIST_INDEX_EDM (SGX_FEATURE_BIF_NUM_DIRLISTS - 1)
++#else
++#define SGX_BIF_DIR_LIST_INDEX_EDM (0)
++#endif
++
++#define SGX_BIF_INVALIDATE_PTCACHE 0x1
++#define SGX_BIF_INVALIDATE_PDCACHE 0x2
++#define SGX_BIF_INVALIDATE_SLCACHE 0x4
++
++
++typedef struct _SGX_MISCINFO_STRUCT_SIZES_
++{
++#if defined (SGX_FEATURE_2D_HARDWARE)
++ IMG_UINT32 ui32Sizeof_2DCMD;
++ IMG_UINT32 ui32Sizeof_2DCMD_SHARED;
++#endif
++ IMG_UINT32 ui32Sizeof_CMDTA;
++ IMG_UINT32 ui32Sizeof_CMDTA_SHARED;
++ IMG_UINT32 ui32Sizeof_TRANSFERCMD;
++ IMG_UINT32 ui32Sizeof_TRANSFERCMD_SHARED;
++ IMG_UINT32 ui32Sizeof_3DREGISTERS;
++ IMG_UINT32 ui32Sizeof_HWPBDESC;
++ IMG_UINT32 ui32Sizeof_HWRENDERCONTEXT;
++ IMG_UINT32 ui32Sizeof_HWRENDERDETAILS;
++ IMG_UINT32 ui32Sizeof_HWRTDATA;
++ IMG_UINT32 ui32Sizeof_HWRTDATASET;
++ IMG_UINT32 ui32Sizeof_HWTRANSFERCONTEXT;
++ IMG_UINT32 ui32Sizeof_HOST_CTL;
++ IMG_UINT32 ui32Sizeof_COMMAND;
++} SGX_MISCINFO_STRUCT_SIZES;
++
++
++#if defined(SUPPORT_SGX_EDM_MEMORY_DEBUG)
++typedef struct _PVRSRV_SGX_MISCINFO_MEMREAD
++{
++ IMG_DEV_VIRTADDR sDevVAddr;
++ IMG_DEV_PHYADDR sPDDevPAddr;
++} PVRSRV_SGX_MISCINFO_MEMREAD;
++#endif
++
++typedef struct _PVRSRV_SGX_MISCINFO_INFO
++{
++ IMG_UINT32 ui32MiscInfoFlags;
++ PVRSRV_SGX_MISCINFO_FEATURES sSGXFeatures;
++ SGX_MISCINFO_STRUCT_SIZES sSGXStructSizes;
++#if defined(SUPPORT_SGX_EDM_MEMORY_DEBUG)
++ PVRSRV_SGX_MISCINFO_MEMREAD sSGXMemReadData;
++#endif
++} PVRSRV_SGX_MISCINFO_INFO;
++
++#ifdef PVRSRV_USSE_EDM_STATUS_DEBUG
++#define SGXMK_TRACE_BUFFER_SIZE 512
++#endif
++
++#define SGXMKIF_HWPERF_CB_SIZE 0x100
++
++#if defined(SUPPORT_SGX_HWPERF)
++typedef struct _SGXMKIF_HWPERF_CB_ENTRY_
++{
++ IMG_UINT32 ui32FrameNo;
++ IMG_UINT32 ui32Type;
++ IMG_UINT32 ui32Ordinal;
++ IMG_UINT32 ui32TimeWraps;
++ IMG_UINT32 ui32Time;
++ IMG_UINT32 ui32Counters[PVRSRV_SGX_HWPERF_NUM_COUNTERS];
++} SGXMKIF_HWPERF_CB_ENTRY;
++
++typedef struct _SGXMKIF_HWPERF_CB_
++{
++ IMG_UINT32 ui32Woff;
++ IMG_UINT32 ui32Roff;
++ IMG_UINT32 ui32OrdinalGRAPHICS;
++ IMG_UINT32 ui32OrdinalMK_EXECUTION;
++ SGXMKIF_HWPERF_CB_ENTRY psHWPerfCBData[SGXMKIF_HWPERF_CB_SIZE];
++} SGXMKIF_HWPERF_CB;
++#endif
++
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/include/sgxinfo.h b/drivers/gpu/drm/mrst/pvr/services4/include/sgxinfo.h
+new file mode 100644
+index 0000000..8caa7af
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/include/sgxinfo.h
+@@ -0,0 +1,288 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined (__SGXINFO_H__)
++#define __SGXINFO_H__
++
++#include "sgxscript.h"
++#include "servicesint.h"
++#include "services.h"
++#include "sgxapi_km.h"
++#include "sgx_mkif_km.h"
++
++
++#define SGX_MAX_DEV_DATA 24
++#define SGX_MAX_INIT_MEM_HANDLES 16
++
++
++typedef struct _SGX_BRIDGE_INFO_FOR_SRVINIT
++{
++ IMG_DEV_PHYADDR sPDDevPAddr;
++ PVRSRV_HEAP_INFO asHeapInfo[PVRSRV_MAX_CLIENT_HEAPS];
++} SGX_BRIDGE_INFO_FOR_SRVINIT;
++
++
++typedef enum _SGXMKIF_CMD_TYPE_
++{
++ SGXMKIF_CMD_TA = 0,
++ SGXMKIF_CMD_TRANSFER = 1,
++ SGXMKIF_CMD_2D = 2,
++ SGXMKIF_CMD_POWER = 3,
++ SGXMKIF_CMD_CLEANUP = 4,
++ SGXMKIF_CMD_GETMISCINFO = 5,
++ SGXMKIF_CMD_PROCESS_QUEUES = 6,
++ SGXMKIF_CMD_MAX = 7,
++
++ SGXMKIF_CMD_FORCE_I32 = -1,
++
++} SGXMKIF_CMD_TYPE;
++
++
++typedef struct _SGX_BRIDGE_INIT_INFO_
++{
++ IMG_HANDLE hKernelCCBMemInfo;
++ IMG_HANDLE hKernelCCBCtlMemInfo;
++ IMG_HANDLE hKernelCCBEventKickerMemInfo;
++ IMG_HANDLE hKernelSGXHostCtlMemInfo;
++ IMG_HANDLE hKernelSGXTA3DCtlMemInfo;
++ IMG_HANDLE hKernelSGXMiscMemInfo;
++
++ IMG_UINT32 aui32HostKickAddr[SGXMKIF_CMD_MAX];
++
++ SGX_INIT_SCRIPTS sScripts;
++
++ IMG_UINT32 ui32ClientBuildOptions;
++ SGX_MISCINFO_STRUCT_SIZES sSGXStructSizes;
++
++#if defined(SGX_SUPPORT_HWPROFILING)
++ IMG_HANDLE hKernelHWProfilingMemInfo;
++#endif
++#if defined(SUPPORT_SGX_HWPERF)
++ IMG_HANDLE hKernelHWPerfCBMemInfo;
++#endif
++#if defined(PVRSRV_USSE_EDM_STATUS_DEBUG)
++ IMG_HANDLE hKernelEDMStatusBufferMemInfo;
++#endif
++#if defined(SGX_FEATURE_OVERLAPPED_SPM)
++ IMG_HANDLE hKernelTmpRgnHeaderMemInfo;
++#endif
++#if defined(SGX_FEATURE_SPM_MODE_0)
++ IMG_HANDLE hKernelTmpDPMStateMemInfo;
++#endif
++
++ IMG_UINT32 ui32EDMTaskReg0;
++ IMG_UINT32 ui32EDMTaskReg1;
++
++ IMG_UINT32 ui32ClkGateStatusReg;
++ IMG_UINT32 ui32ClkGateStatusMask;
++#if defined(SGX_FEATURE_MP)
++ IMG_UINT32 ui32MasterClkGateStatusReg;
++ IMG_UINT32 ui32MasterClkGateStatusMask;
++#endif
++
++ IMG_UINT32 ui32CacheControl;
++
++ IMG_UINT32 asInitDevData[SGX_MAX_DEV_DATA];
++ IMG_HANDLE asInitMemHandles[SGX_MAX_INIT_MEM_HANDLES];
++
++} SGX_BRIDGE_INIT_INFO;
++
++
++typedef struct _SGX_DEVICE_SYNC_LIST_
++{
++ PSGXMKIF_HWDEVICE_SYNC_LIST psHWDeviceSyncList;
++
++ IMG_HANDLE hKernelHWSyncListMemInfo;
++ PVRSRV_CLIENT_MEM_INFO *psHWDeviceSyncListClientMemInfo;
++ PVRSRV_CLIENT_MEM_INFO *psAccessResourceClientMemInfo;
++
++ volatile IMG_UINT32 *pui32Lock;
++
++ struct _SGX_DEVICE_SYNC_LIST_ *psNext;
++
++
++ IMG_UINT32 ui32NumSyncObjects;
++ IMG_HANDLE ahSyncHandles[1];
++} SGX_DEVICE_SYNC_LIST, *PSGX_DEVICE_SYNC_LIST;
++
++
++typedef struct _SGX_INTERNEL_STATUS_UPDATE_
++{
++ CTL_STATUS sCtlStatus;
++ IMG_HANDLE hKernelMemInfo;
++
++ IMG_UINT32 ui32LastStatusUpdateDumpVal;
++} SGX_INTERNEL_STATUS_UPDATE;
++
++
++typedef struct _SGX_CCB_KICK_
++{
++ SGXMKIF_COMMAND sCommand;
++ IMG_HANDLE hCCBKernelMemInfo;
++
++ IMG_UINT32 ui32NumDstSyncObjects;
++ IMG_HANDLE hKernelHWSyncListMemInfo;
++
++
++ IMG_HANDLE *pahDstSyncHandles;
++
++ IMG_UINT32 ui32NumTAStatusVals;
++ IMG_UINT32 ui32Num3DStatusVals;
++
++#if defined(SUPPORT_SGX_NEW_STATUS_VALS)
++ SGX_INTERNEL_STATUS_UPDATE asTAStatusUpdate[SGX_MAX_TA_STATUS_VALS];
++ SGX_INTERNEL_STATUS_UPDATE as3DStatusUpdate[SGX_MAX_3D_STATUS_VALS];
++#else
++ IMG_HANDLE ahTAStatusSyncInfo[SGX_MAX_TA_STATUS_VALS];
++ IMG_HANDLE ah3DStatusSyncInfo[SGX_MAX_3D_STATUS_VALS];
++#endif
++
++ IMG_BOOL bFirstKickOrResume;
++#if (defined(NO_HARDWARE) || defined(PDUMP))
++ IMG_BOOL bTerminateOrAbort;
++#endif
++#if defined(SUPPORT_SGX_HWPERF)
++ IMG_BOOL bKickRender;
++#endif
++
++
++ IMG_UINT32 ui32CCBOffset;
++
++#if defined(SUPPORT_SGX_GENERALISED_SYNCOBJECTS)
++
++ IMG_UINT32 ui32NumTASrcSyncs;
++ IMG_HANDLE ahTASrcKernelSyncInfo[SGX_MAX_TA_SRC_SYNCS];
++ IMG_UINT32 ui32NumTADstSyncs;
++ IMG_HANDLE ahTADstKernelSyncInfo[SGX_MAX_TA_DST_SYNCS];
++ IMG_UINT32 ui32Num3DSrcSyncs;
++ IMG_HANDLE ah3DSrcKernelSyncInfo[SGX_MAX_3D_SRC_SYNCS];
++#else
++
++ IMG_UINT32 ui32NumSrcSyncs;
++ IMG_HANDLE ahSrcKernelSyncInfo[SGX_MAX_SRC_SYNCS];
++#endif
++
++
++ IMG_BOOL bTADependency;
++ IMG_HANDLE hTA3DSyncInfo;
++
++ IMG_HANDLE hTASyncInfo;
++ IMG_HANDLE h3DSyncInfo;
++#if defined(PDUMP)
++ IMG_UINT32 ui32CCBDumpWOff;
++#endif
++#if defined(NO_HARDWARE)
++ IMG_UINT32 ui32WriteOpsPendingVal;
++#endif
++} SGX_CCB_KICK;
++
++
++#define SGX_KERNEL_USE_CODE_BASE_INDEX 15
++
++
++typedef struct _SGX_CLIENT_INFO_
++{
++ IMG_UINT32 ui32ProcessID;
++ IMG_VOID *pvProcess;
++ PVRSRV_MISC_INFO sMiscInfo;
++
++ IMG_UINT32 asDevData[SGX_MAX_DEV_DATA];
++
++} SGX_CLIENT_INFO;
++
++typedef struct _SGX_INTERNAL_DEVINFO_
++{
++ IMG_UINT32 ui32Flags;
++ IMG_HANDLE hHostCtlKernelMemInfoHandle;
++ IMG_BOOL bForcePTOff;
++} SGX_INTERNAL_DEVINFO;
++
++
++#if defined(TRANSFER_QUEUE)
++typedef struct _PVRSRV_TRANSFER_SGX_KICK_
++{
++ IMG_HANDLE hCCBMemInfo;
++ IMG_UINT32 ui32SharedCmdCCBOffset;
++
++ IMG_DEV_VIRTADDR sHWTransferContextDevVAddr;
++
++ IMG_HANDLE hTASyncInfo;
++ IMG_HANDLE h3DSyncInfo;
++
++ IMG_UINT32 ui32NumSrcSync;
++ IMG_HANDLE ahSrcSyncInfo[SGX_MAX_TRANSFER_SYNC_OPS];
++
++ IMG_UINT32 ui32NumDstSync;
++ IMG_HANDLE ahDstSyncInfo[SGX_MAX_TRANSFER_SYNC_OPS];
++
++ IMG_UINT32 ui32Flags;
++
++ IMG_UINT32 ui32PDumpFlags;
++#if defined(PDUMP)
++ IMG_UINT32 ui32CCBDumpWOff;
++#endif
++} PVRSRV_TRANSFER_SGX_KICK, *PPVRSRV_TRANSFER_SGX_KICK;
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++typedef struct _PVRSRV_2D_SGX_KICK_
++{
++ IMG_HANDLE hCCBMemInfo;
++ IMG_UINT32 ui32SharedCmdCCBOffset;
++
++ IMG_DEV_VIRTADDR sHW2DContextDevVAddr;
++
++ IMG_UINT32 ui32NumSrcSync;
++ IMG_HANDLE ahSrcSyncInfo[SGX_MAX_2D_SRC_SYNC_OPS];
++
++
++ IMG_HANDLE hDstSyncInfo;
++
++
++ IMG_HANDLE hTASyncInfo;
++
++
++ IMG_HANDLE h3DSyncInfo;
++
++ IMG_UINT32 ui32PDumpFlags;
++#if defined(PDUMP)
++ IMG_UINT32 ui32CCBDumpWOff;
++#endif
++} PVRSRV_2D_SGX_KICK, *PPVRSRV_2D_SGX_KICK;
++#endif
++#endif
++
++#define PVRSRV_SGX_DIFF_NUM_COUNTERS 9
++
++typedef struct _PVRSRV_SGXDEV_DIFF_INFO_
++{
++ IMG_UINT32 aui32Counters[PVRSRV_SGX_DIFF_NUM_COUNTERS];
++ IMG_UINT32 ui32Time[3];
++ IMG_UINT32 ui32Marker[2];
++} PVRSRV_SGXDEV_DIFF_INFO, *PPVRSRV_SGXDEV_DIFF_INFO;
++
++
++
++#endif
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/bridged/.gitignore b/drivers/gpu/drm/mrst/pvr/services4/srvkm/bridged/.gitignore
+new file mode 100644
+index 0000000..2f89523
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/bridged/.gitignore
+@@ -0,0 +1,5 @@
++bin_pc_i686*
++tmp_pc_i686*
++host_pc_i686*
++*.o
++*.o.cmd
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/bridged/bridged_pvr_bridge.c b/drivers/gpu/drm/mrst/pvr/services4/srvkm/bridged/bridged_pvr_bridge.c
+new file mode 100644
+index 0000000..118c1d2
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/bridged/bridged_pvr_bridge.c
+@@ -0,0 +1,3426 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++
++
++#include <stddef.h>
++
++#include "img_defs.h"
++#include "services.h"
++#include "pvr_bridge_km.h"
++#include "pvr_debug.h"
++#include "ra.h"
++#include "pvr_bridge.h"
++#if defined(SUPPORT_SGX)
++#include "sgx_bridge.h"
++#endif
++#if defined(SUPPORT_VGX)
++#include "vgx_bridge.h"
++#endif
++#if defined(SUPPORT_MSVDX)
++#include "msvdx_bridge.h"
++#endif
++#include "perproc.h"
++#include "device.h"
++#include "buffer_manager.h"
++
++#include "pdump_km.h"
++#include "syscommon.h"
++
++#include "bridged_pvr_bridge.h"
++#if defined(SUPPORT_SGX)
++#include "bridged_sgx_bridge.h"
++#endif
++#if defined(SUPPORT_VGX)
++#include "bridged_vgx_bridge.h"
++#endif
++#if defined(SUPPORT_MSVDX)
++#include "bridged_msvdx_bridge.h"
++#endif
++
++#include "env_data.h"
++
++#if defined (__linux__)
++#include "mmap.h"
++#endif
++
++#include "srvkm.h"
++
++PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY g_BridgeDispatchTable[BRIDGE_DISPATCH_TABLE_ENTRY_COUNT];
++
++#if defined(DEBUG_BRIDGE_KM)
++PVRSRV_BRIDGE_GLOBAL_STATS g_BridgeGlobalStats;
++#endif
++
++#if defined(PVR_SECURE_HANDLES)
++static IMG_BOOL abSharedDeviceMemHeap[PVRSRV_MAX_CLIENT_HEAPS];
++static IMG_BOOL *pbSharedDeviceMemHeap = abSharedDeviceMemHeap;
++#else
++static IMG_BOOL *pbSharedDeviceMemHeap = (IMG_BOOL*)IMG_NULL;
++#endif
++
++
++#if defined(DEBUG_BRIDGE_KM)
++PVRSRV_ERROR
++CopyFromUserWrapper(PVRSRV_PER_PROCESS_DATA *pProcData,
++ IMG_UINT32 ui32BridgeID,
++ IMG_VOID *pvDest,
++ IMG_VOID *pvSrc,
++ IMG_UINT32 ui32Size)
++{
++ g_BridgeDispatchTable[ui32BridgeID].ui32CopyFromUserTotalBytes+=ui32Size;
++ g_BridgeGlobalStats.ui32TotalCopyFromUserBytes+=ui32Size;
++ return OSCopyFromUser(pProcData, pvDest, pvSrc, ui32Size);
++}
++PVRSRV_ERROR
++CopyToUserWrapper(PVRSRV_PER_PROCESS_DATA *pProcData,
++ IMG_UINT32 ui32BridgeID,
++ IMG_VOID *pvDest,
++ IMG_VOID *pvSrc,
++ IMG_UINT32 ui32Size)
++{
++ g_BridgeDispatchTable[ui32BridgeID].ui32CopyToUserTotalBytes+=ui32Size;
++ g_BridgeGlobalStats.ui32TotalCopyToUserBytes+=ui32Size;
++ return OSCopyToUser(pProcData, pvDest, pvSrc, ui32Size);
++}
++#endif
++
++
++static IMG_INT
++PVRSRVEnumerateDevicesBW(IMG_UINT32 ui32BridgeID,
++ IMG_VOID *psBridgeIn,
++ PVRSRV_BRIDGE_OUT_ENUMDEVICE *psEnumDeviceOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_ENUM_DEVICES);
++
++ PVR_UNREFERENCED_PARAMETER(psPerProc);
++ PVR_UNREFERENCED_PARAMETER(psBridgeIn);
++
++ psEnumDeviceOUT->eError =
++ PVRSRVEnumerateDevicesKM(&psEnumDeviceOUT->ui32NumDevices,
++ psEnumDeviceOUT->asDeviceIdentifier);
++
++ return 0;
++}
++
++static IMG_INT
++PVRSRVAcquireDeviceDataBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_ACQUIRE_DEVICEINFO *psAcquireDevInfoIN,
++ PVRSRV_BRIDGE_OUT_ACQUIRE_DEVICEINFO *psAcquireDevInfoOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hDevCookieInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_ACQUIRE_DEVICEINFO);
++
++ psAcquireDevInfoOUT->eError =
++ PVRSRVAcquireDeviceDataKM(psAcquireDevInfoIN->uiDevIndex,
++ psAcquireDevInfoIN->eDeviceType,
++ &hDevCookieInt);
++ if(psAcquireDevInfoOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++
++ psAcquireDevInfoOUT->eError =
++ PVRSRVAllocHandle(psPerProc->psHandleBase,
++ &psAcquireDevInfoOUT->hDevCookie,
++ hDevCookieInt,
++ PVRSRV_HANDLE_TYPE_DEV_NODE,
++ PVRSRV_HANDLE_ALLOC_FLAG_SHARED);
++
++ return 0;
++}
++
++
++static IMG_INT
++PVRSRVCreateDeviceMemContextBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_CREATE_DEVMEMCONTEXT *psCreateDevMemContextIN,
++ PVRSRV_BRIDGE_OUT_CREATE_DEVMEMCONTEXT *psCreateDevMemContextOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hDevCookieInt;
++ IMG_HANDLE hDevMemContextInt;
++ IMG_UINT32 i;
++ IMG_BOOL bCreated;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_CREATE_DEVMEMCONTEXT);
++
++
++ NEW_HANDLE_BATCH_OR_ERROR(psCreateDevMemContextOUT->eError, psPerProc, PVRSRV_MAX_CLIENT_HEAPS + 1);
++
++ psCreateDevMemContextOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
++ psCreateDevMemContextIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++
++ if(psCreateDevMemContextOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psCreateDevMemContextOUT->eError =
++ PVRSRVCreateDeviceMemContextKM(hDevCookieInt,
++ psPerProc,
++ &hDevMemContextInt,
++ &psCreateDevMemContextOUT->ui32ClientHeapCount,
++ &psCreateDevMemContextOUT->sHeapInfo[0],
++ &bCreated,
++ pbSharedDeviceMemHeap);
++
++ if(psCreateDevMemContextOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++
++ if(bCreated)
++ {
++ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
++ &psCreateDevMemContextOUT->hDevMemContext,
++ hDevMemContextInt,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++ }
++ else
++ {
++ psCreateDevMemContextOUT->eError =
++ PVRSRVFindHandle(psPerProc->psHandleBase,
++ &psCreateDevMemContextOUT->hDevMemContext,
++ hDevMemContextInt,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT);
++ if(psCreateDevMemContextOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++
++ for(i = 0; i < psCreateDevMemContextOUT->ui32ClientHeapCount; i++)
++ {
++ IMG_HANDLE hDevMemHeapExt;
++
++#if defined(PVR_SECURE_HANDLES)
++ if(abSharedDeviceMemHeap[i])
++#endif
++ {
++
++ PVRSRVAllocHandleNR(psPerProc->psHandleBase, &hDevMemHeapExt,
++ psCreateDevMemContextOUT->sHeapInfo[i].hDevMemHeap,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP,
++ PVRSRV_HANDLE_ALLOC_FLAG_SHARED);
++ }
++#if defined(PVR_SECURE_HANDLES)
++ else
++ {
++
++ if(bCreated)
++ {
++ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase, &hDevMemHeapExt,
++ psCreateDevMemContextOUT->sHeapInfo[i].hDevMemHeap,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE,
++ psCreateDevMemContextOUT->hDevMemContext);
++ }
++ else
++ {
++ psCreateDevMemContextOUT->eError =
++ PVRSRVFindHandle(psPerProc->psHandleBase, &hDevMemHeapExt,
++ psCreateDevMemContextOUT->sHeapInfo[i].hDevMemHeap,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP);
++ if(psCreateDevMemContextOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++ }
++#endif
++ psCreateDevMemContextOUT->sHeapInfo[i].hDevMemHeap = hDevMemHeapExt;
++ }
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psCreateDevMemContextOUT->eError, psPerProc);
++
++ return 0;
++}
++
++static IMG_INT
++PVRSRVDestroyDeviceMemContextBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_DESTROY_DEVMEMCONTEXT *psDestroyDevMemContextIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hDevCookieInt;
++ IMG_HANDLE hDevMemContextInt;
++ IMG_BOOL bDestroyed;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_DESTROY_DEVMEMCONTEXT);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
++ psDestroyDevMemContextIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevMemContextInt,
++ psDestroyDevMemContextIN->hDevMemContext,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVDestroyDeviceMemContextKM(hDevCookieInt, hDevMemContextInt, &bDestroyed);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ if(bDestroyed)
++ {
++ psRetOUT->eError =
++ PVRSRVReleaseHandle(psPerProc->psHandleBase,
++ psDestroyDevMemContextIN->hDevMemContext,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT);
++ }
++
++ return 0;
++}
++
++
++static IMG_INT
++PVRSRVGetDeviceMemHeapInfoBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_GET_DEVMEM_HEAPINFO *psGetDevMemHeapInfoIN,
++ PVRSRV_BRIDGE_OUT_GET_DEVMEM_HEAPINFO *psGetDevMemHeapInfoOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hDevCookieInt;
++ IMG_HANDLE hDevMemContextInt;
++ IMG_UINT32 i;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GET_DEVMEM_HEAPINFO);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psGetDevMemHeapInfoOUT->eError, psPerProc, PVRSRV_MAX_CLIENT_HEAPS);
++
++ psGetDevMemHeapInfoOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
++ psGetDevMemHeapInfoIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++
++ if(psGetDevMemHeapInfoOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psGetDevMemHeapInfoOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevMemContextInt,
++ psGetDevMemHeapInfoIN->hDevMemContext,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT);
++
++ if(psGetDevMemHeapInfoOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psGetDevMemHeapInfoOUT->eError =
++ PVRSRVGetDeviceMemHeapInfoKM(hDevCookieInt,
++ hDevMemContextInt,
++ &psGetDevMemHeapInfoOUT->ui32ClientHeapCount,
++ &psGetDevMemHeapInfoOUT->sHeapInfo[0],
++ pbSharedDeviceMemHeap);
++
++ if(psGetDevMemHeapInfoOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ for(i = 0; i < psGetDevMemHeapInfoOUT->ui32ClientHeapCount; i++)
++ {
++ IMG_HANDLE hDevMemHeapExt;
++
++#if defined(PVR_SECURE_HANDLES)
++ if(abSharedDeviceMemHeap[i])
++#endif
++ {
++
++ PVRSRVAllocHandleNR(psPerProc->psHandleBase, &hDevMemHeapExt,
++ psGetDevMemHeapInfoOUT->sHeapInfo[i].hDevMemHeap,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP,
++ PVRSRV_HANDLE_ALLOC_FLAG_SHARED);
++ }
++#if defined(PVR_SECURE_HANDLES)
++ else
++ {
++
++ psGetDevMemHeapInfoOUT->eError =
++ PVRSRVFindHandle(psPerProc->psHandleBase, &hDevMemHeapExt,
++ psGetDevMemHeapInfoOUT->sHeapInfo[i].hDevMemHeap,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP);
++ if(psGetDevMemHeapInfoOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++#endif
++ psGetDevMemHeapInfoOUT->sHeapInfo[i].hDevMemHeap = hDevMemHeapExt;
++ }
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psGetDevMemHeapInfoOUT->eError, psPerProc);
++
++ return 0;
++}
++
++
++#if defined(OS_PVRSRV_ALLOC_DEVICE_MEM_BW)
++IMG_INT
++PVRSRVAllocDeviceMemBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_ALLOCDEVICEMEM *psAllocDeviceMemIN,
++ PVRSRV_BRIDGE_OUT_ALLOCDEVICEMEM *psAllocDeviceMemOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc);
++#else
++static IMG_INT
++PVRSRVAllocDeviceMemBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_ALLOCDEVICEMEM *psAllocDeviceMemIN,
++ PVRSRV_BRIDGE_OUT_ALLOCDEVICEMEM *psAllocDeviceMemOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_KERNEL_MEM_INFO *psMemInfo;
++ IMG_HANDLE hDevCookieInt;
++ IMG_HANDLE hDevMemHeapInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_ALLOC_DEVICEMEM);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psAllocDeviceMemOUT->eError, psPerProc, 2);
++
++ psAllocDeviceMemOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
++ psAllocDeviceMemIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++
++ if(psAllocDeviceMemOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psAllocDeviceMemOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevMemHeapInt,
++ psAllocDeviceMemIN->hDevMemHeap,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP);
++
++ if(psAllocDeviceMemOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psAllocDeviceMemOUT->eError =
++ PVRSRVAllocDeviceMemKM(hDevCookieInt,
++ psPerProc,
++ hDevMemHeapInt,
++ psAllocDeviceMemIN->ui32Attribs,
++ psAllocDeviceMemIN->ui32Size,
++ psAllocDeviceMemIN->ui32Alignment,
++ &psMemInfo,
++ "" );
++
++ if(psAllocDeviceMemOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ OSMemSet(&psAllocDeviceMemOUT->sClientMemInfo,
++ 0,
++ sizeof(psAllocDeviceMemOUT->sClientMemInfo));
++
++ psAllocDeviceMemOUT->sClientMemInfo.pvLinAddrKM =
++ psMemInfo->pvLinAddrKM;
++
++#if defined (__linux__)
++ psAllocDeviceMemOUT->sClientMemInfo.pvLinAddr = 0;
++#else
++ psAllocDeviceMemOUT->sClientMemInfo.pvLinAddr = psMemInfo->pvLinAddrKM;
++#endif
++ psAllocDeviceMemOUT->sClientMemInfo.sDevVAddr = psMemInfo->sDevVAddr;
++ psAllocDeviceMemOUT->sClientMemInfo.ui32Flags = psMemInfo->ui32Flags;
++ psAllocDeviceMemOUT->sClientMemInfo.ui32AllocSize = psMemInfo->ui32AllocSize;
++ psAllocDeviceMemOUT->sClientMemInfo.hMappingInfo = psMemInfo->sMemBlk.hOSMemHandle;
++
++ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
++ &psAllocDeviceMemOUT->sClientMemInfo.hKernelMemInfo,
++ psMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++
++ if(psAllocDeviceMemIN->ui32Attribs & PVRSRV_MEM_NO_SYNCOBJ)
++ {
++
++ OSMemSet(&psAllocDeviceMemOUT->sClientSyncInfo,
++ 0,
++ sizeof (PVRSRV_CLIENT_SYNC_INFO));
++ psAllocDeviceMemOUT->sClientMemInfo.psClientSyncInfo = IMG_NULL;
++ psAllocDeviceMemOUT->psKernelSyncInfo = IMG_NULL;
++ }
++ else
++ {
++
++ psAllocDeviceMemOUT->psKernelSyncInfo = psMemInfo->psKernelSyncInfo;
++
++ psAllocDeviceMemOUT->sClientSyncInfo.psSyncData =
++ psMemInfo->psKernelSyncInfo->psSyncData;
++ psAllocDeviceMemOUT->sClientSyncInfo.sWriteOpsCompleteDevVAddr =
++ psMemInfo->psKernelSyncInfo->sWriteOpsCompleteDevVAddr;
++ psAllocDeviceMemOUT->sClientSyncInfo.sReadOpsCompleteDevVAddr =
++ psMemInfo->psKernelSyncInfo->sReadOpsCompleteDevVAddr;
++
++ psAllocDeviceMemOUT->sClientSyncInfo.hMappingInfo =
++ psMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.hOSMemHandle;
++
++ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
++ &psAllocDeviceMemOUT->sClientSyncInfo.hKernelSyncInfo,
++ psMemInfo->psKernelSyncInfo,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE,
++ psAllocDeviceMemOUT->sClientMemInfo.hKernelMemInfo);
++
++ psAllocDeviceMemOUT->sClientMemInfo.psClientSyncInfo =
++ &psAllocDeviceMemOUT->sClientSyncInfo;
++
++ }
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psAllocDeviceMemOUT->eError, psPerProc);
++
++ return 0;
++}
++
++#endif
++
++static IMG_INT
++PVRSRVFreeDeviceMemBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_FREEDEVICEMEM *psFreeDeviceMemIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hDevCookieInt;
++ IMG_VOID *pvKernelMemInfo;
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_FREE_DEVICEMEM);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
++ psFreeDeviceMemIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &pvKernelMemInfo,
++ psFreeDeviceMemIN->psKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++
++ psKernelMemInfo = (PVRSRV_KERNEL_MEM_INFO*)pvKernelMemInfo;
++
++ if (psKernelMemInfo->ui32RefCount == 1)
++ {
++ psRetOUT->eError =
++ PVRSRVFreeDeviceMemKM(hDevCookieInt, pvKernelMemInfo);
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_WARNING, "PVRSRVFreeDeviceMemBW: mappings are open "
++ "in other processes, deferring free!"));
++
++ psKernelMemInfo->bPendingFree = IMG_TRUE;
++ psRetOUT->eError = PVRSRV_OK;
++ }
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVReleaseHandle(psPerProc->psHandleBase,
++ psFreeDeviceMemIN->psKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++
++ return 0;
++}
++
++
++static IMG_INT
++PVRSRVExportDeviceMemBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_EXPORTDEVICEMEM *psExportDeviceMemIN,
++ PVRSRV_BRIDGE_OUT_EXPORTDEVICEMEM *psExportDeviceMemOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hDevCookieInt;
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_EXPORT_DEVICEMEM);
++
++
++ psExportDeviceMemOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
++ psExportDeviceMemIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++
++ if(psExportDeviceMemOUT->eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVExportDeviceMemBW: can't find devcookie"));
++ return 0;
++ }
++
++
++ psExportDeviceMemOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, (IMG_PVOID *)&psKernelMemInfo,
++ psExportDeviceMemIN->psKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++
++ if(psExportDeviceMemOUT->eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVExportDeviceMemBW: can't find kernel meminfo"));
++ return 0;
++ }
++
++
++ psExportDeviceMemOUT->eError =
++ PVRSRVFindHandle(KERNEL_HANDLE_BASE,
++ &psExportDeviceMemOUT->hMemInfo,
++ psKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ if(psExportDeviceMemOUT->eError == PVRSRV_OK)
++ {
++
++ PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVExportDeviceMemBW: allocation is already exported"));
++ return 0;
++ }
++
++
++ psExportDeviceMemOUT->eError = PVRSRVAllocHandle(KERNEL_HANDLE_BASE,
++ &psExportDeviceMemOUT->hMemInfo,
++ psKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++ if (psExportDeviceMemOUT->eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVExportDeviceMemBW: failed to allocate handle from global handle list"));
++ return 0;
++ }
++
++
++ psKernelMemInfo->ui32Flags |= PVRSRV_MEM_EXPORTED;
++
++ return 0;
++}
++
++
++static IMG_INT
++PVRSRVMapDeviceMemoryBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_MAP_DEV_MEMORY *psMapDevMemIN,
++ PVRSRV_BRIDGE_OUT_MAP_DEV_MEMORY *psMapDevMemOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_KERNEL_MEM_INFO *psSrcKernelMemInfo = IMG_NULL;
++ PVRSRV_KERNEL_MEM_INFO *psDstKernelMemInfo = IMG_NULL;
++ IMG_HANDLE hDstDevMemHeap = IMG_NULL;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_MAP_DEV_MEMORY);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psMapDevMemOUT->eError, psPerProc, 2);
++
++
++ psMapDevMemOUT->eError = PVRSRVLookupHandle(KERNEL_HANDLE_BASE,
++ (IMG_VOID**)&psSrcKernelMemInfo,
++ psMapDevMemIN->hKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ if(psMapDevMemOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++
++ psMapDevMemOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDstDevMemHeap,
++ psMapDevMemIN->hDstDevMemHeap,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP);
++ if(psMapDevMemOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++
++ psMapDevMemOUT->eError = PVRSRVMapDeviceMemoryKM(psPerProc,
++ psSrcKernelMemInfo,
++ hDstDevMemHeap,
++ &psDstKernelMemInfo);
++ if(psMapDevMemOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ OSMemSet(&psMapDevMemOUT->sDstClientMemInfo,
++ 0,
++ sizeof(psMapDevMemOUT->sDstClientMemInfo));
++ OSMemSet(&psMapDevMemOUT->sDstClientSyncInfo,
++ 0,
++ sizeof(psMapDevMemOUT->sDstClientSyncInfo));
++
++ psMapDevMemOUT->sDstClientMemInfo.pvLinAddrKM =
++ psDstKernelMemInfo->pvLinAddrKM;
++
++ psMapDevMemOUT->sDstClientMemInfo.pvLinAddr = 0;
++ psMapDevMemOUT->sDstClientMemInfo.sDevVAddr = psDstKernelMemInfo->sDevVAddr;
++ psMapDevMemOUT->sDstClientMemInfo.ui32Flags = psDstKernelMemInfo->ui32Flags;
++ psMapDevMemOUT->sDstClientMemInfo.ui32AllocSize = psDstKernelMemInfo->ui32AllocSize;
++ psMapDevMemOUT->sDstClientMemInfo.hMappingInfo = psDstKernelMemInfo->sMemBlk.hOSMemHandle;
++
++
++ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
++ &psMapDevMemOUT->sDstClientMemInfo.hKernelMemInfo,
++ psDstKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++ psMapDevMemOUT->sDstClientSyncInfo.hKernelSyncInfo = IMG_NULL;
++ psMapDevMemOUT->psDstKernelSyncInfo = IMG_NULL;
++
++
++ if(psDstKernelMemInfo->psKernelSyncInfo)
++ {
++ psMapDevMemOUT->psDstKernelSyncInfo = psDstKernelMemInfo->psKernelSyncInfo;
++
++ psMapDevMemOUT->sDstClientSyncInfo.psSyncData =
++ psDstKernelMemInfo->psKernelSyncInfo->psSyncData;
++ psMapDevMemOUT->sDstClientSyncInfo.sWriteOpsCompleteDevVAddr =
++ psDstKernelMemInfo->psKernelSyncInfo->sWriteOpsCompleteDevVAddr;
++ psMapDevMemOUT->sDstClientSyncInfo.sReadOpsCompleteDevVAddr =
++ psDstKernelMemInfo->psKernelSyncInfo->sReadOpsCompleteDevVAddr;
++
++ psMapDevMemOUT->sDstClientSyncInfo.hMappingInfo =
++ psDstKernelMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.hOSMemHandle;
++
++ psMapDevMemOUT->sDstClientMemInfo.psClientSyncInfo = &psMapDevMemOUT->sDstClientSyncInfo;
++
++ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
++ &psMapDevMemOUT->sDstClientSyncInfo.hKernelSyncInfo,
++ psDstKernelMemInfo->psKernelSyncInfo,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO,
++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
++ psMapDevMemOUT->sDstClientMemInfo.hKernelMemInfo);
++ }
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psMapDevMemOUT->eError, psPerProc);
++
++ return 0;
++}
++
++
++static IMG_INT
++PVRSRVUnmapDeviceMemoryBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_UNMAP_DEV_MEMORY *psUnmapDevMemIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo = IMG_NULL;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_UNMAP_DEV_MEMORY);
++
++ psRetOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ (IMG_VOID**)&psKernelMemInfo,
++ psUnmapDevMemIN->psKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError = PVRSRVUnmapDeviceMemoryKM(psKernelMemInfo);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError = PVRSRVReleaseHandle(psPerProc->psHandleBase,
++ psUnmapDevMemIN->psKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++
++ return 0;
++}
++
++
++
++static IMG_INT
++PVRSRVMapDeviceClassMemoryBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_MAP_DEVICECLASS_MEMORY *psMapDevClassMemIN,
++ PVRSRV_BRIDGE_OUT_MAP_DEVICECLASS_MEMORY *psMapDevClassMemOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_KERNEL_MEM_INFO *psMemInfo;
++ IMG_HANDLE hOSMapInfo;
++ IMG_HANDLE hDeviceClassBufferInt;
++ IMG_HANDLE hDevMemContextInt;
++ PVRSRV_HANDLE_TYPE eHandleType;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_MAP_DEVICECLASS_MEMORY);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psMapDevClassMemOUT->eError, psPerProc, 2);
++
++
++ psMapDevClassMemOUT->eError =
++ PVRSRVLookupHandleAnyType(psPerProc->psHandleBase, &hDeviceClassBufferInt,
++ &eHandleType,
++ psMapDevClassMemIN->hDeviceClassBuffer);
++
++ if(psMapDevClassMemOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++
++ psMapDevClassMemOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevMemContextInt,
++ psMapDevClassMemIN->hDevMemContext,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT);
++
++ if(psMapDevClassMemOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++
++ switch(eHandleType)
++ {
++#if defined(PVR_SECURE_HANDLES)
++ case PVRSRV_HANDLE_TYPE_DISP_BUFFER:
++ case PVRSRV_HANDLE_TYPE_BUF_BUFFER:
++#else
++ case PVRSRV_HANDLE_TYPE_NONE:
++#endif
++ break;
++ default:
++ psMapDevClassMemOUT->eError = PVRSRV_ERROR_GENERIC;
++ return 0;
++ }
++
++ psMapDevClassMemOUT->eError =
++ PVRSRVMapDeviceClassMemoryKM(psPerProc,
++ hDevMemContextInt,
++ hDeviceClassBufferInt,
++ &psMemInfo,
++ &hOSMapInfo);
++ if(psMapDevClassMemOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ OSMemSet(&psMapDevClassMemOUT->sClientMemInfo,
++ 0,
++ sizeof(psMapDevClassMemOUT->sClientMemInfo));
++ OSMemSet(&psMapDevClassMemOUT->sClientSyncInfo,
++ 0,
++ sizeof(psMapDevClassMemOUT->sClientSyncInfo));
++
++ psMapDevClassMemOUT->sClientMemInfo.pvLinAddrKM =
++ psMemInfo->pvLinAddrKM;
++
++ psMapDevClassMemOUT->sClientMemInfo.pvLinAddr = 0;
++ psMapDevClassMemOUT->sClientMemInfo.sDevVAddr = psMemInfo->sDevVAddr;
++ psMapDevClassMemOUT->sClientMemInfo.ui32Flags = psMemInfo->ui32Flags;
++ psMapDevClassMemOUT->sClientMemInfo.ui32AllocSize = psMemInfo->ui32AllocSize;
++ psMapDevClassMemOUT->sClientMemInfo.hMappingInfo = psMemInfo->sMemBlk.hOSMemHandle;
++
++ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
++ &psMapDevClassMemOUT->sClientMemInfo.hKernelMemInfo,
++ psMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE,
++ psMapDevClassMemIN->hDeviceClassBuffer);
++
++ psMapDevClassMemOUT->sClientSyncInfo.hKernelSyncInfo = IMG_NULL;
++ psMapDevClassMemOUT->psKernelSyncInfo = IMG_NULL;
++
++
++ if(psMemInfo->psKernelSyncInfo)
++ {
++ psMapDevClassMemOUT->psKernelSyncInfo = psMemInfo->psKernelSyncInfo;
++
++ psMapDevClassMemOUT->sClientSyncInfo.psSyncData =
++ psMemInfo->psKernelSyncInfo->psSyncData;
++ psMapDevClassMemOUT->sClientSyncInfo.sWriteOpsCompleteDevVAddr =
++ psMemInfo->psKernelSyncInfo->sWriteOpsCompleteDevVAddr;
++ psMapDevClassMemOUT->sClientSyncInfo.sReadOpsCompleteDevVAddr =
++ psMemInfo->psKernelSyncInfo->sReadOpsCompleteDevVAddr;
++
++ psMapDevClassMemOUT->sClientSyncInfo.hMappingInfo =
++ psMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.hOSMemHandle;
++
++ psMapDevClassMemOUT->sClientMemInfo.psClientSyncInfo = &psMapDevClassMemOUT->sClientSyncInfo;
++
++ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
++ &psMapDevClassMemOUT->sClientSyncInfo.hKernelSyncInfo,
++ psMemInfo->psKernelSyncInfo,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO,
++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
++ psMapDevClassMemOUT->sClientMemInfo.hKernelMemInfo);
++ }
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psMapDevClassMemOUT->eError, psPerProc);
++
++ return 0;
++}
++
++static IMG_INT
++PVRSRVUnmapDeviceClassMemoryBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_UNMAP_DEVICECLASS_MEMORY *psUnmapDevClassMemIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_VOID *pvKernelMemInfo;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_UNMAP_DEVICECLASS_MEMORY);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &pvKernelMemInfo,
++ psUnmapDevClassMemIN->psKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError = PVRSRVUnmapDeviceClassMemoryKM(pvKernelMemInfo);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVReleaseHandle(psPerProc->psHandleBase,
++ psUnmapDevClassMemIN->psKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++
++ return 0;
++}
++
++
++#if defined(OS_PVRSRV_WRAP_EXT_MEM_BW)
++IMG_INT
++PVRSRVWrapExtMemoryBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_WRAP_EXT_MEMORY *psWrapExtMemIN,
++ PVRSRV_BRIDGE_OUT_WRAP_EXT_MEMORY *psWrapExtMemOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc);
++#else
++static IMG_INT
++PVRSRVWrapExtMemoryBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_WRAP_EXT_MEMORY *psWrapExtMemIN,
++ PVRSRV_BRIDGE_OUT_WRAP_EXT_MEMORY *psWrapExtMemOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hDevCookieInt;
++ IMG_HANDLE hDevMemContextInt;
++ PVRSRV_KERNEL_MEM_INFO *psMemInfo;
++ IMG_UINT32 ui32PageTableSize = 0;
++ IMG_SYS_PHYADDR *psSysPAddr = IMG_NULL;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_WRAP_EXT_MEMORY);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psWrapExtMemOUT->eError, psPerProc, 2);
++
++
++ psWrapExtMemOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
++ psWrapExtMemIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++ if(psWrapExtMemOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++
++ psWrapExtMemOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevMemContextInt,
++ psWrapExtMemIN->hDevMemContext,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT);
++
++ if(psWrapExtMemOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ if(psWrapExtMemIN->ui32NumPageTableEntries)
++ {
++ ui32PageTableSize = psWrapExtMemIN->ui32NumPageTableEntries
++ * sizeof(IMG_SYS_PHYADDR);
++
++ ASSIGN_AND_EXIT_ON_ERROR(psWrapExtMemOUT->eError,
++ OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32PageTableSize,
++ (IMG_VOID **)&psSysPAddr, 0,
++ "Page Table"));
++
++ if(CopyFromUserWrapper(psPerProc,
++ ui32BridgeID,
++ psSysPAddr,
++ psWrapExtMemIN->psSysPAddr,
++ ui32PageTableSize) != PVRSRV_OK)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, ui32PageTableSize, (IMG_VOID *)psSysPAddr, 0);
++
++ return -EFAULT;
++ }
++ }
++
++ psWrapExtMemOUT->eError =
++ PVRSRVWrapExtMemoryKM(hDevCookieInt,
++ psPerProc,
++ hDevMemContextInt,
++ psWrapExtMemIN->ui32ByteSize,
++ psWrapExtMemIN->ui32PageOffset,
++ psWrapExtMemIN->bPhysContig,
++ psSysPAddr,
++ psWrapExtMemIN->pvLinAddr,
++ psWrapExtMemIN->ui32Flags,
++ &psMemInfo);
++ if(psWrapExtMemIN->ui32NumPageTableEntries)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32PageTableSize,
++ (IMG_VOID *)psSysPAddr, 0);
++
++ }
++ if(psWrapExtMemOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psWrapExtMemOUT->sClientMemInfo.pvLinAddrKM =
++ psMemInfo->pvLinAddrKM;
++
++
++ psWrapExtMemOUT->sClientMemInfo.pvLinAddr = 0;
++ psWrapExtMemOUT->sClientMemInfo.sDevVAddr = psMemInfo->sDevVAddr;
++ psWrapExtMemOUT->sClientMemInfo.ui32Flags = psMemInfo->ui32Flags;
++ psWrapExtMemOUT->sClientMemInfo.ui32AllocSize = psMemInfo->ui32AllocSize;
++ psWrapExtMemOUT->sClientMemInfo.hMappingInfo = psMemInfo->sMemBlk.hOSMemHandle;
++
++ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
++ &psWrapExtMemOUT->sClientMemInfo.hKernelMemInfo,
++ psMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++
++
++ psWrapExtMemOUT->sClientSyncInfo.psSyncData =
++ psMemInfo->psKernelSyncInfo->psSyncData;
++ psWrapExtMemOUT->sClientSyncInfo.sWriteOpsCompleteDevVAddr =
++ psMemInfo->psKernelSyncInfo->sWriteOpsCompleteDevVAddr;
++ psWrapExtMemOUT->sClientSyncInfo.sReadOpsCompleteDevVAddr =
++ psMemInfo->psKernelSyncInfo->sReadOpsCompleteDevVAddr;
++
++ psWrapExtMemOUT->sClientSyncInfo.hMappingInfo =
++ psMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.hOSMemHandle;
++
++ psWrapExtMemOUT->sClientMemInfo.psClientSyncInfo = &psWrapExtMemOUT->sClientSyncInfo;
++
++ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
++ &psWrapExtMemOUT->sClientSyncInfo.hKernelSyncInfo,
++ (IMG_HANDLE)psMemInfo->psKernelSyncInfo,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE,
++ psWrapExtMemOUT->sClientMemInfo.hKernelMemInfo);
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psWrapExtMemOUT->eError, psPerProc);
++
++ return 0;
++}
++#endif
++
++static IMG_INT
++PVRSRVUnwrapExtMemoryBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_UNWRAP_EXT_MEMORY *psUnwrapExtMemIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_VOID *pvMemInfo;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_UNWRAP_EXT_MEMORY);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvMemInfo,
++ psUnwrapExtMemIN->hKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVUnwrapExtMemoryKM((PVRSRV_KERNEL_MEM_INFO *)pvMemInfo);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVReleaseHandle(psPerProc->psHandleBase,
++ psUnwrapExtMemIN->hKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++
++ return 0;
++}
++
++static IMG_INT
++PVRSRVGetFreeDeviceMemBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_GETFREEDEVICEMEM *psGetFreeDeviceMemIN,
++ PVRSRV_BRIDGE_OUT_GETFREEDEVICEMEM *psGetFreeDeviceMemOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GETFREE_DEVICEMEM);
++
++ PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++ psGetFreeDeviceMemOUT->eError =
++ PVRSRVGetFreeDeviceMemKM(psGetFreeDeviceMemIN->ui32Flags,
++ &psGetFreeDeviceMemOUT->ui32Total,
++ &psGetFreeDeviceMemOUT->ui32Free,
++ &psGetFreeDeviceMemOUT->ui32LargestBlock);
++
++ return 0;
++}
++
++static IMG_INT
++PVRMMapOSMemHandleToMMapDataBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_MHANDLE_TO_MMAP_DATA *psMMapDataIN,
++ PVRSRV_BRIDGE_OUT_MHANDLE_TO_MMAP_DATA *psMMapDataOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_MHANDLE_TO_MMAP_DATA);
++
++#if defined (__linux__)
++ psMMapDataOUT->eError =
++ PVRMMapOSMemHandleToMMapData(psPerProc,
++ psMMapDataIN->hMHandle,
++ &psMMapDataOUT->ui32MMapOffset,
++ &psMMapDataOUT->ui32ByteOffset,
++ &psMMapDataOUT->ui32RealByteSize,
++ &psMMapDataOUT->ui32UserVAddr);
++#else
++ PVR_UNREFERENCED_PARAMETER(psPerProc);
++ PVR_UNREFERENCED_PARAMETER(psMMapDataIN);
++
++ psMMapDataOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
++#endif
++ return 0;
++}
++
++
++static IMG_INT
++PVRMMapReleaseMMapDataBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_RELEASE_MMAP_DATA *psMMapDataIN,
++ PVRSRV_BRIDGE_OUT_RELEASE_MMAP_DATA *psMMapDataOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_RELEASE_MMAP_DATA);
++
++#if defined (__linux__)
++ psMMapDataOUT->eError =
++ PVRMMapReleaseMMapData(psPerProc,
++ psMMapDataIN->hMHandle,
++ &psMMapDataOUT->bMUnmap,
++ &psMMapDataOUT->ui32RealByteSize,
++ &psMMapDataOUT->ui32UserVAddr);
++#else
++ PVR_UNREFERENCED_PARAMETER(psPerProc);
++ PVR_UNREFERENCED_PARAMETER(psMMapDataIN);
++
++ psMMapDataOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
++#endif
++ return 0;
++}
++
++
++#ifdef PDUMP
++static IMG_INT
++PDumpIsCaptureFrameBW(IMG_UINT32 ui32BridgeID,
++ IMG_VOID *psBridgeIn,
++ PVRSRV_BRIDGE_OUT_PDUMP_ISCAPTURING *psPDumpIsCapturingOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_ISCAPTURING);
++ PVR_UNREFERENCED_PARAMETER(psBridgeIn);
++ PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++ psPDumpIsCapturingOUT->bIsCapturing = PDumpIsCaptureFrameKM();
++ psPDumpIsCapturingOUT->eError = PVRSRV_OK;
++
++ return 0;
++}
++
++static IMG_INT
++PDumpCommentBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_PDUMP_COMMENT *psPDumpCommentIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_COMMENT);
++ PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++ psRetOUT->eError = PDumpCommentKM(&psPDumpCommentIN->szComment[0],
++ psPDumpCommentIN->ui32Flags);
++ return 0;
++}
++
++static IMG_INT
++PDumpSetFrameBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_PDUMP_SETFRAME *psPDumpSetFrameIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_SETFRAME);
++ PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++ psRetOUT->eError = PDumpSetFrameKM(psPDumpSetFrameIN->ui32Frame);
++
++ return 0;
++}
++
++static IMG_INT
++PDumpRegWithFlagsBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_PDUMP_DUMPREG *psPDumpRegDumpIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_REG);
++ PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++ psRetOUT->eError =
++ PDumpRegWithFlagsKM(psPDumpRegDumpIN->sHWReg.ui32RegAddr,
++ psPDumpRegDumpIN->sHWReg.ui32RegVal,
++ psPDumpRegDumpIN->ui32Flags);
++
++ return 0;
++}
++
++static IMG_INT
++PDumpRegPolBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_PDUMP_REGPOL *psPDumpRegPolIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_REGPOL);
++ PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++ psRetOUT->eError =
++ PDumpRegPolWithFlagsKM(psPDumpRegPolIN->sHWReg.ui32RegAddr,
++ psPDumpRegPolIN->sHWReg.ui32RegVal,
++ psPDumpRegPolIN->ui32Mask,
++ psPDumpRegPolIN->ui32Flags);
++
++ return 0;
++}
++
++static IMG_INT
++PDumpMemPolBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_PDUMP_MEMPOL *psPDumpMemPolIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_VOID *pvMemInfo;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_MEMPOL);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvMemInfo,
++ psPDumpMemPolIN->psKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PDumpMemPolKM(((PVRSRV_KERNEL_MEM_INFO *)pvMemInfo),
++ psPDumpMemPolIN->ui32Offset,
++ psPDumpMemPolIN->ui32Value,
++ psPDumpMemPolIN->ui32Mask,
++ PDUMP_POLL_OPERATOR_EQUAL,
++ psPDumpMemPolIN->ui32Flags,
++ MAKEUNIQUETAG(pvMemInfo));
++
++ return 0;
++}
++
++static IMG_INT
++PDumpMemBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_PDUMP_DUMPMEM *psPDumpMemDumpIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_VOID *pvMemInfo;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_DUMPMEM);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvMemInfo,
++ psPDumpMemDumpIN->psKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PDumpMemUM(psPerProc,
++ psPDumpMemDumpIN->pvAltLinAddr,
++ psPDumpMemDumpIN->pvLinAddr,
++ pvMemInfo,
++ psPDumpMemDumpIN->ui32Offset,
++ psPDumpMemDumpIN->ui32Bytes,
++ psPDumpMemDumpIN->ui32Flags,
++ MAKEUNIQUETAG(pvMemInfo));
++
++ return 0;
++}
++
++static IMG_INT
++PDumpBitmapBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_PDUMP_BITMAP *psPDumpBitmapIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVR_UNREFERENCED_PARAMETER(psPerProc);
++ PVR_UNREFERENCED_PARAMETER(ui32BridgeID);
++
++ psRetOUT->eError =
++ PDumpBitmapKM(&psPDumpBitmapIN->szFileName[0],
++ psPDumpBitmapIN->ui32FileOffset,
++ psPDumpBitmapIN->ui32Width,
++ psPDumpBitmapIN->ui32Height,
++ psPDumpBitmapIN->ui32StrideInBytes,
++ psPDumpBitmapIN->sDevBaseAddr,
++ psPDumpBitmapIN->ui32Size,
++ psPDumpBitmapIN->ePixelFormat,
++ psPDumpBitmapIN->eMemFormat,
++ psPDumpBitmapIN->ui32Flags);
++
++ return 0;
++}
++
++static IMG_INT
++PDumpReadRegBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_PDUMP_READREG *psPDumpReadRegIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_DUMPREADREG);
++ PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++ psRetOUT->eError =
++ PDumpReadRegKM(&psPDumpReadRegIN->szFileName[0],
++ psPDumpReadRegIN->ui32FileOffset,
++ psPDumpReadRegIN->ui32Address,
++ psPDumpReadRegIN->ui32Size,
++ psPDumpReadRegIN->ui32Flags);
++
++ return 0;
++}
++
++static IMG_INT
++PDumpDriverInfoBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_PDUMP_DRIVERINFO *psPDumpDriverInfoIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_UINT32 ui32PDumpFlags;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_DRIVERINFO);
++ PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++ ui32PDumpFlags = 0;
++ if(psPDumpDriverInfoIN->bContinuous)
++ {
++ ui32PDumpFlags |= PDUMP_FLAGS_CONTINUOUS;
++ }
++ psRetOUT->eError =
++ PDumpDriverInfoKM(&psPDumpDriverInfoIN->szString[0],
++ ui32PDumpFlags);
++
++ return 0;
++}
++
++static IMG_INT
++PDumpSyncDumpBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_PDUMP_DUMPSYNC *psPDumpSyncDumpIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_UINT32 ui32Bytes = psPDumpSyncDumpIN->ui32Bytes;
++ IMG_VOID *pvSyncInfo;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_DUMPSYNC);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &pvSyncInfo,
++ psPDumpSyncDumpIN->psKernelSyncInfo,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PDumpMemUM(psPerProc,
++ psPDumpSyncDumpIN->pvAltLinAddr,
++ IMG_NULL,
++ ((PVRSRV_KERNEL_SYNC_INFO *)pvSyncInfo)->psSyncDataMemInfoKM,
++ psPDumpSyncDumpIN->ui32Offset,
++ ui32Bytes,
++ 0,
++ MAKEUNIQUETAG(((PVRSRV_KERNEL_SYNC_INFO *)pvSyncInfo)->psSyncDataMemInfoKM));
++
++ return 0;
++}
++
++static IMG_INT
++PDumpSyncPolBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_PDUMP_SYNCPOL *psPDumpSyncPolIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_UINT32 ui32Offset;
++ IMG_VOID *pvSyncInfo;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_SYNCPOL);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &pvSyncInfo,
++ psPDumpSyncPolIN->psKernelSyncInfo,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ if(psPDumpSyncPolIN->bIsRead)
++ {
++ ui32Offset = offsetof(PVRSRV_SYNC_DATA, ui32ReadOpsComplete);
++ }
++ else
++ {
++ ui32Offset = offsetof(PVRSRV_SYNC_DATA, ui32WriteOpsComplete);
++ }
++
++ psRetOUT->eError =
++ PDumpMemPolKM(((PVRSRV_KERNEL_SYNC_INFO *)pvSyncInfo)->psSyncDataMemInfoKM,
++ ui32Offset,
++ psPDumpSyncPolIN->ui32Value,
++ psPDumpSyncPolIN->ui32Mask,
++ PDUMP_POLL_OPERATOR_EQUAL,
++ 0,
++ MAKEUNIQUETAG(((PVRSRV_KERNEL_SYNC_INFO *)pvSyncInfo)->psSyncDataMemInfoKM));
++
++ return 0;
++}
++
++static IMG_INT
++PDumpPDRegBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_PDUMP_DUMPPDREG *psPDumpPDRegDumpIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_PDREG);
++ PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++ PDumpPDReg(psPDumpPDRegDumpIN->sHWReg.ui32RegAddr,
++ psPDumpPDRegDumpIN->sHWReg.ui32RegVal,
++ PDUMP_PD_UNIQUETAG);
++
++ psRetOUT->eError = PVRSRV_OK;
++ return 0;
++}
++
++static IMG_INT
++PDumpCycleCountRegReadBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_PDUMP_CYCLE_COUNT_REG_READ *psPDumpCycleCountRegReadIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_CYCLE_COUNT_REG_READ);
++ PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++ PDumpCycleCountRegRead(psPDumpCycleCountRegReadIN->ui32RegOffset,
++ psPDumpCycleCountRegReadIN->bLastFrame);
++
++ psRetOUT->eError = PVRSRV_OK;
++
++ return 0;
++}
++
++static IMG_INT
++PDumpPDDevPAddrBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_PDUMP_DUMPPDDEVPADDR *psPDumpPDDevPAddrIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_VOID *pvMemInfo;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_DUMPPDDEVPADDR);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &pvMemInfo,
++ psPDumpPDDevPAddrIN->hKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PDumpPDDevPAddrKM((PVRSRV_KERNEL_MEM_INFO *)pvMemInfo,
++ psPDumpPDDevPAddrIN->ui32Offset,
++ psPDumpPDDevPAddrIN->sPDDevPAddr,
++ MAKEUNIQUETAG(pvMemInfo),
++ PDUMP_PD_UNIQUETAG);
++ return 0;
++}
++
++static IMG_INT
++PDumpStartInitPhaseBW(IMG_UINT32 ui32BridgeID,
++ IMG_VOID *psBridgeIn,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_STARTINITPHASE);
++ PVR_UNREFERENCED_PARAMETER(psBridgeIn);
++ PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++ psRetOUT->eError = PDumpStartInitPhaseKM();
++
++ return 0;
++}
++
++static IMG_INT
++PDumpStopInitPhaseBW(IMG_UINT32 ui32BridgeID,
++ IMG_VOID *psBridgeIn,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_STOPINITPHASE);
++ PVR_UNREFERENCED_PARAMETER(psBridgeIn);
++ PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++ psRetOUT->eError = PDumpStopInitPhaseKM();
++
++ return 0;
++}
++
++#endif
++
++
++static IMG_INT
++PVRSRVGetMiscInfoBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_GET_MISC_INFO *psGetMiscInfoIN,
++ PVRSRV_BRIDGE_OUT_GET_MISC_INFO *psGetMiscInfoOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_ERROR eError;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GET_MISC_INFO);
++
++ OSMemCopy(&psGetMiscInfoOUT->sMiscInfo,
++ &psGetMiscInfoIN->sMiscInfo,
++ sizeof(PVRSRV_MISC_INFO));
++
++ if (((psGetMiscInfoIN->sMiscInfo.ui32StateRequest & PVRSRV_MISC_INFO_MEMSTATS_PRESENT) != 0) &&
++ ((psGetMiscInfoIN->sMiscInfo.ui32StateRequest & PVRSRV_MISC_INFO_DDKVERSION_PRESENT) != 0))
++ {
++
++ psGetMiscInfoOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
++ return 0;
++ }
++
++ if (((psGetMiscInfoIN->sMiscInfo.ui32StateRequest & PVRSRV_MISC_INFO_MEMSTATS_PRESENT) != 0) ||
++ ((psGetMiscInfoIN->sMiscInfo.ui32StateRequest & PVRSRV_MISC_INFO_DDKVERSION_PRESENT) != 0))
++ {
++
++ ASSIGN_AND_EXIT_ON_ERROR(psGetMiscInfoOUT->eError,
++ OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ psGetMiscInfoOUT->sMiscInfo.ui32MemoryStrLen,
++ (IMG_VOID **)&psGetMiscInfoOUT->sMiscInfo.pszMemoryStr, 0,
++ "Output string buffer"));
++
++ psGetMiscInfoOUT->eError = PVRSRVGetMiscInfoKM(&psGetMiscInfoOUT->sMiscInfo);
++
++
++ eError = CopyToUserWrapper(psPerProc, ui32BridgeID,
++ psGetMiscInfoIN->sMiscInfo.pszMemoryStr,
++ psGetMiscInfoOUT->sMiscInfo.pszMemoryStr,
++ psGetMiscInfoOUT->sMiscInfo.ui32MemoryStrLen);
++
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ psGetMiscInfoOUT->sMiscInfo.ui32MemoryStrLen,
++ (IMG_VOID *)psGetMiscInfoOUT->sMiscInfo.pszMemoryStr, 0);
++ psGetMiscInfoOUT->sMiscInfo.pszMemoryStr = IMG_NULL;
++
++
++ psGetMiscInfoOUT->sMiscInfo.pszMemoryStr = psGetMiscInfoIN->sMiscInfo.pszMemoryStr;
++
++ if(eError != PVRSRV_OK)
++ {
++
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVGetMiscInfoBW Error copy to user"));
++ return -EFAULT;
++ }
++ }
++ else
++ {
++ psGetMiscInfoOUT->eError = PVRSRVGetMiscInfoKM(&psGetMiscInfoOUT->sMiscInfo);
++ }
++
++
++ if (psGetMiscInfoOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++
++ if (psGetMiscInfoIN->sMiscInfo.ui32StateRequest & PVRSRV_MISC_INFO_GLOBALEVENTOBJECT_PRESENT)
++ {
++ psGetMiscInfoOUT->eError = PVRSRVAllocHandle(psPerProc->psHandleBase,
++ &psGetMiscInfoOUT->sMiscInfo.sGlobalEventObject.hOSEventKM,
++ psGetMiscInfoOUT->sMiscInfo.sGlobalEventObject.hOSEventKM,
++ PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT,
++ PVRSRV_HANDLE_ALLOC_FLAG_SHARED);
++
++ if (psGetMiscInfoOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++
++ if (psGetMiscInfoOUT->sMiscInfo.hSOCTimerRegisterOSMemHandle)
++ {
++
++ psGetMiscInfoOUT->eError = PVRSRVAllocHandle(psPerProc->psHandleBase,
++ &psGetMiscInfoOUT->sMiscInfo.hSOCTimerRegisterOSMemHandle,
++ psGetMiscInfoOUT->sMiscInfo.hSOCTimerRegisterOSMemHandle,
++ PVRSRV_HANDLE_TYPE_SOC_TIMER,
++ PVRSRV_HANDLE_ALLOC_FLAG_SHARED);
++
++ if (psGetMiscInfoOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++
++ return 0;
++}
++
++static IMG_INT
++PVRSRVConnectBW(IMG_UINT32 ui32BridgeID,
++ IMG_VOID *psBridgeIn,
++ PVRSRV_BRIDGE_OUT_CONNECT_SERVICES *psConnectServicesOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVR_UNREFERENCED_PARAMETER(psBridgeIn);
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_CONNECT_SERVICES);
++
++ psConnectServicesOUT->hKernelServices = psPerProc->hPerProcData;
++ psConnectServicesOUT->eError = PVRSRV_OK;
++
++ return 0;
++}
++
++static IMG_INT
++PVRSRVDisconnectBW(IMG_UINT32 ui32BridgeID,
++ IMG_VOID *psBridgeIn,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVR_UNREFERENCED_PARAMETER(psPerProc);
++ PVR_UNREFERENCED_PARAMETER(psBridgeIn);
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_DISCONNECT_SERVICES);
++
++
++ psRetOUT->eError = PVRSRV_OK;
++
++ return 0;
++}
++
++static IMG_INT
++PVRSRVEnumerateDCBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_ENUMCLASS *psEnumDispClassIN,
++ PVRSRV_BRIDGE_OUT_ENUMCLASS *psEnumDispClassOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_ENUM_CLASS);
++
++ psEnumDispClassOUT->eError =
++ PVRSRVEnumerateDCKM(psEnumDispClassIN->sDeviceClass,
++ &psEnumDispClassOUT->ui32NumDevices,
++ &psEnumDispClassOUT->ui32DevID[0]);
++
++ return 0;
++}
++
++static IMG_INT
++PVRSRVOpenDCDeviceBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_OPEN_DISPCLASS_DEVICE *psOpenDispClassDeviceIN,
++ PVRSRV_BRIDGE_OUT_OPEN_DISPCLASS_DEVICE *psOpenDispClassDeviceOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hDevCookieInt;
++ IMG_HANDLE hDispClassInfoInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_OPEN_DISPCLASS_DEVICE);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psOpenDispClassDeviceOUT->eError, psPerProc, 1);
++
++ psOpenDispClassDeviceOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDevCookieInt,
++ psOpenDispClassDeviceIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++ if(psOpenDispClassDeviceOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psOpenDispClassDeviceOUT->eError =
++ PVRSRVOpenDCDeviceKM(psPerProc,
++ psOpenDispClassDeviceIN->ui32DeviceID,
++ hDevCookieInt,
++ &hDispClassInfoInt);
++
++ if(psOpenDispClassDeviceOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
++ &psOpenDispClassDeviceOUT->hDeviceKM,
++ hDispClassInfoInt,
++ PVRSRV_HANDLE_TYPE_DISP_INFO,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++ COMMIT_HANDLE_BATCH_OR_ERROR(psOpenDispClassDeviceOUT->eError, psPerProc);
++
++ return 0;
++}
++
++static IMG_INT
++PVRSRVCloseDCDeviceBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_CLOSE_DISPCLASS_DEVICE *psCloseDispClassDeviceIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_VOID *pvDispClassInfoInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_CLOSE_DISPCLASS_DEVICE);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvDispClassInfoInt,
++ psCloseDispClassDeviceIN->hDeviceKM,
++ PVRSRV_HANDLE_TYPE_DISP_INFO);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError = PVRSRVCloseDCDeviceKM(pvDispClassInfoInt, IMG_FALSE);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVReleaseHandle(psPerProc->psHandleBase,
++ psCloseDispClassDeviceIN->hDeviceKM,
++ PVRSRV_HANDLE_TYPE_DISP_INFO);
++ return 0;
++}
++
++static IMG_INT
++PVRSRVEnumDCFormatsBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_ENUM_DISPCLASS_FORMATS *psEnumDispClassFormatsIN,
++ PVRSRV_BRIDGE_OUT_ENUM_DISPCLASS_FORMATS *psEnumDispClassFormatsOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_VOID *pvDispClassInfoInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_ENUM_DISPCLASS_FORMATS);
++
++ psEnumDispClassFormatsOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvDispClassInfoInt,
++ psEnumDispClassFormatsIN->hDeviceKM,
++ PVRSRV_HANDLE_TYPE_DISP_INFO);
++ if(psEnumDispClassFormatsOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psEnumDispClassFormatsOUT->eError =
++ PVRSRVEnumDCFormatsKM(pvDispClassInfoInt,
++ &psEnumDispClassFormatsOUT->ui32Count,
++ psEnumDispClassFormatsOUT->asFormat);
++
++ return 0;
++}
++
++static IMG_INT
++PVRSRVEnumDCDimsBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_ENUM_DISPCLASS_DIMS *psEnumDispClassDimsIN,
++ PVRSRV_BRIDGE_OUT_ENUM_DISPCLASS_DIMS *psEnumDispClassDimsOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_VOID *pvDispClassInfoInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_ENUM_DISPCLASS_DIMS);
++
++ psEnumDispClassDimsOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvDispClassInfoInt,
++ psEnumDispClassDimsIN->hDeviceKM,
++ PVRSRV_HANDLE_TYPE_DISP_INFO);
++
++ if(psEnumDispClassDimsOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psEnumDispClassDimsOUT->eError =
++ PVRSRVEnumDCDimsKM(pvDispClassInfoInt,
++ &psEnumDispClassDimsIN->sFormat,
++ &psEnumDispClassDimsOUT->ui32Count,
++ psEnumDispClassDimsOUT->asDim);
++
++ return 0;
++}
++
++static IMG_INT
++PVRSRVGetDCSystemBufferBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_GET_DISPCLASS_SYSBUFFER *psGetDispClassSysBufferIN,
++ PVRSRV_BRIDGE_OUT_GET_DISPCLASS_SYSBUFFER *psGetDispClassSysBufferOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hBufferInt;
++ IMG_VOID *pvDispClassInfoInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GET_DISPCLASS_SYSBUFFER);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psGetDispClassSysBufferOUT->eError, psPerProc, 1);
++
++ psGetDispClassSysBufferOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvDispClassInfoInt,
++ psGetDispClassSysBufferIN->hDeviceKM,
++ PVRSRV_HANDLE_TYPE_DISP_INFO);
++ if(psGetDispClassSysBufferOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psGetDispClassSysBufferOUT->eError =
++ PVRSRVGetDCSystemBufferKM(pvDispClassInfoInt,
++ &hBufferInt);
++
++ if(psGetDispClassSysBufferOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++
++ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
++ &psGetDispClassSysBufferOUT->hBuffer,
++ hBufferInt,
++ PVRSRV_HANDLE_TYPE_DISP_BUFFER,
++ (PVRSRV_HANDLE_ALLOC_FLAG)(PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE | PVRSRV_HANDLE_ALLOC_FLAG_SHARED),
++ psGetDispClassSysBufferIN->hDeviceKM);
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psGetDispClassSysBufferOUT->eError, psPerProc);
++
++ return 0;
++}
++
++static IMG_INT
++PVRSRVGetDCInfoBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_GET_DISPCLASS_INFO *psGetDispClassInfoIN,
++ PVRSRV_BRIDGE_OUT_GET_DISPCLASS_INFO *psGetDispClassInfoOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_VOID *pvDispClassInfo;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GET_DISPCLASS_INFO);
++
++ psGetDispClassInfoOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvDispClassInfo,
++ psGetDispClassInfoIN->hDeviceKM,
++ PVRSRV_HANDLE_TYPE_DISP_INFO);
++ if(psGetDispClassInfoOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psGetDispClassInfoOUT->eError =
++ PVRSRVGetDCInfoKM(pvDispClassInfo,
++ &psGetDispClassInfoOUT->sDisplayInfo);
++
++ return 0;
++}
++
++static IMG_INT
++PVRSRVCreateDCSwapChainBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_CREATE_DISPCLASS_SWAPCHAIN *psCreateDispClassSwapChainIN,
++ PVRSRV_BRIDGE_OUT_CREATE_DISPCLASS_SWAPCHAIN *psCreateDispClassSwapChainOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_VOID *pvDispClassInfo;
++ IMG_HANDLE hSwapChainInt;
++ IMG_UINT32 ui32SwapChainID;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_CREATE_DISPCLASS_SWAPCHAIN);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psCreateDispClassSwapChainOUT->eError, psPerProc, 1);
++
++ psCreateDispClassSwapChainOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvDispClassInfo,
++ psCreateDispClassSwapChainIN->hDeviceKM,
++ PVRSRV_HANDLE_TYPE_DISP_INFO);
++
++ if(psCreateDispClassSwapChainOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++
++ ui32SwapChainID = psCreateDispClassSwapChainIN->ui32SwapChainID;
++
++ psCreateDispClassSwapChainOUT->eError =
++ PVRSRVCreateDCSwapChainKM(psPerProc, pvDispClassInfo,
++ psCreateDispClassSwapChainIN->ui32Flags,
++ &psCreateDispClassSwapChainIN->sDstSurfAttrib,
++ &psCreateDispClassSwapChainIN->sSrcSurfAttrib,
++ psCreateDispClassSwapChainIN->ui32BufferCount,
++ psCreateDispClassSwapChainIN->ui32OEMFlags,
++ &hSwapChainInt,
++ &ui32SwapChainID);
++
++ if(psCreateDispClassSwapChainOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++
++ psCreateDispClassSwapChainOUT->ui32SwapChainID = ui32SwapChainID;
++
++ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
++ &psCreateDispClassSwapChainOUT->hSwapChain,
++ hSwapChainInt,
++ PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE,
++ psCreateDispClassSwapChainIN->hDeviceKM);
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psCreateDispClassSwapChainOUT->eError, psPerProc);
++
++ return 0;
++}
++
++static IMG_INT
++PVRSRVDestroyDCSwapChainBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_DESTROY_DISPCLASS_SWAPCHAIN *psDestroyDispClassSwapChainIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_VOID *pvSwapChain;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_DESTROY_DISPCLASS_SWAPCHAIN);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &pvSwapChain,
++ psDestroyDispClassSwapChainIN->hSwapChain,
++ PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVDestroyDCSwapChainKM(pvSwapChain);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVReleaseHandle(psPerProc->psHandleBase,
++ psDestroyDispClassSwapChainIN->hSwapChain,
++ PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN);
++
++ return 0;
++}
++
++static IMG_INT
++PVRSRVSetDCDstRectBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SET_DISPCLASS_RECT *psSetDispClassDstRectIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_VOID *pvDispClassInfo;
++ IMG_VOID *pvSwapChain;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SET_DISPCLASS_DSTRECT);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvDispClassInfo,
++ psSetDispClassDstRectIN->hDeviceKM,
++ PVRSRV_HANDLE_TYPE_DISP_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvSwapChain,
++ psSetDispClassDstRectIN->hSwapChain,
++ PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVSetDCDstRectKM(pvDispClassInfo,
++ pvSwapChain,
++ &psSetDispClassDstRectIN->sRect);
++
++ return 0;
++}
++
++static IMG_INT
++PVRSRVSetDCSrcRectBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SET_DISPCLASS_RECT *psSetDispClassSrcRectIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_VOID *pvDispClassInfo;
++ IMG_VOID *pvSwapChain;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SET_DISPCLASS_SRCRECT);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvDispClassInfo,
++ psSetDispClassSrcRectIN->hDeviceKM,
++ PVRSRV_HANDLE_TYPE_DISP_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvSwapChain,
++ psSetDispClassSrcRectIN->hSwapChain,
++ PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVSetDCSrcRectKM(pvDispClassInfo,
++ pvSwapChain,
++ &psSetDispClassSrcRectIN->sRect);
++
++ return 0;
++}
++
++static IMG_INT
++PVRSRVSetDCDstColourKeyBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SET_DISPCLASS_COLOURKEY *psSetDispClassColKeyIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_VOID *pvDispClassInfo;
++ IMG_VOID *pvSwapChain;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SET_DISPCLASS_DSTCOLOURKEY);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvDispClassInfo,
++ psSetDispClassColKeyIN->hDeviceKM,
++ PVRSRV_HANDLE_TYPE_DISP_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvSwapChain,
++ psSetDispClassColKeyIN->hSwapChain,
++ PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVSetDCDstColourKeyKM(pvDispClassInfo,
++ pvSwapChain,
++ psSetDispClassColKeyIN->ui32CKColour);
++
++ return 0;
++}
++
++static IMG_INT
++PVRSRVSetDCSrcColourKeyBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SET_DISPCLASS_COLOURKEY *psSetDispClassColKeyIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_VOID *pvDispClassInfo;
++ IMG_VOID *pvSwapChain;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SET_DISPCLASS_SRCCOLOURKEY);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvDispClassInfo,
++ psSetDispClassColKeyIN->hDeviceKM,
++ PVRSRV_HANDLE_TYPE_DISP_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvSwapChain,
++ psSetDispClassColKeyIN->hSwapChain,
++ PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVSetDCSrcColourKeyKM(pvDispClassInfo,
++ pvSwapChain,
++ psSetDispClassColKeyIN->ui32CKColour);
++
++ return 0;
++}
++
++static IMG_INT
++PVRSRVGetDCBuffersBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_GET_DISPCLASS_BUFFERS *psGetDispClassBuffersIN,
++ PVRSRV_BRIDGE_OUT_GET_DISPCLASS_BUFFERS *psGetDispClassBuffersOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_VOID *pvDispClassInfo;
++ IMG_VOID *pvSwapChain;
++ IMG_UINT32 i;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GET_DISPCLASS_BUFFERS);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psGetDispClassBuffersOUT->eError, psPerProc, PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS);
++
++ psGetDispClassBuffersOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvDispClassInfo,
++ psGetDispClassBuffersIN->hDeviceKM,
++ PVRSRV_HANDLE_TYPE_DISP_INFO);
++ if(psGetDispClassBuffersOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psGetDispClassBuffersOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvSwapChain,
++ psGetDispClassBuffersIN->hSwapChain,
++ PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN);
++ if(psGetDispClassBuffersOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psGetDispClassBuffersOUT->eError =
++ PVRSRVGetDCBuffersKM(pvDispClassInfo,
++ pvSwapChain,
++ &psGetDispClassBuffersOUT->ui32BufferCount,
++ psGetDispClassBuffersOUT->ahBuffer);
++ if (psGetDispClassBuffersOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ PVR_ASSERT(psGetDispClassBuffersOUT->ui32BufferCount <= PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS);
++
++ for(i = 0; i < psGetDispClassBuffersOUT->ui32BufferCount; i++)
++ {
++ IMG_HANDLE hBufferExt;
++
++ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
++ &hBufferExt,
++ psGetDispClassBuffersOUT->ahBuffer[i],
++ PVRSRV_HANDLE_TYPE_DISP_BUFFER,
++ (PVRSRV_HANDLE_ALLOC_FLAG)(PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE | PVRSRV_HANDLE_ALLOC_FLAG_SHARED),
++ psGetDispClassBuffersIN->hSwapChain);
++
++ psGetDispClassBuffersOUT->ahBuffer[i] = hBufferExt;
++ }
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psGetDispClassBuffersOUT->eError, psPerProc);
++
++ return 0;
++}
++
++static IMG_INT
++PVRSRVSwapToDCBufferBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SWAP_DISPCLASS_TO_BUFFER *psSwapDispClassBufferIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_VOID *pvDispClassInfo;
++ IMG_VOID *pvSwapChainBuf;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SWAP_DISPCLASS_TO_BUFFER);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvDispClassInfo,
++ psSwapDispClassBufferIN->hDeviceKM,
++ PVRSRV_HANDLE_TYPE_DISP_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVLookupSubHandle(psPerProc->psHandleBase,
++ &pvSwapChainBuf,
++ psSwapDispClassBufferIN->hBuffer,
++ PVRSRV_HANDLE_TYPE_DISP_BUFFER,
++ psSwapDispClassBufferIN->hDeviceKM);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVSwapToDCBufferKM(pvDispClassInfo,
++ pvSwapChainBuf,
++ psSwapDispClassBufferIN->ui32SwapInterval,
++ psSwapDispClassBufferIN->hPrivateTag,
++ psSwapDispClassBufferIN->ui32ClipRectCount,
++ psSwapDispClassBufferIN->sClipRect);
++
++ return 0;
++}
++
++static IMG_INT
++PVRSRVSwapToDCSystemBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SWAP_DISPCLASS_TO_SYSTEM *psSwapDispClassSystemIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_VOID *pvDispClassInfo;
++ IMG_VOID *pvSwapChain;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SWAP_DISPCLASS_TO_SYSTEM);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvDispClassInfo,
++ psSwapDispClassSystemIN->hDeviceKM,
++ PVRSRV_HANDLE_TYPE_DISP_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVLookupSubHandle(psPerProc->psHandleBase,
++ &pvSwapChain,
++ psSwapDispClassSystemIN->hSwapChain,
++ PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN,
++ psSwapDispClassSystemIN->hDeviceKM);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ psRetOUT->eError =
++ PVRSRVSwapToDCSystemKM(pvDispClassInfo,
++ pvSwapChain);
++
++ return 0;
++}
++
++static IMG_INT
++PVRSRVOpenBCDeviceBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_OPEN_BUFFERCLASS_DEVICE *psOpenBufferClassDeviceIN,
++ PVRSRV_BRIDGE_OUT_OPEN_BUFFERCLASS_DEVICE *psOpenBufferClassDeviceOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hDevCookieInt;
++ IMG_HANDLE hBufClassInfo;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_OPEN_BUFFERCLASS_DEVICE);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psOpenBufferClassDeviceOUT->eError, psPerProc, 1);
++
++ psOpenBufferClassDeviceOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDevCookieInt,
++ psOpenBufferClassDeviceIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++ if(psOpenBufferClassDeviceOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psOpenBufferClassDeviceOUT->eError =
++ PVRSRVOpenBCDeviceKM(psPerProc,
++ psOpenBufferClassDeviceIN->ui32DeviceID,
++ hDevCookieInt,
++ &hBufClassInfo);
++ if(psOpenBufferClassDeviceOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
++ &psOpenBufferClassDeviceOUT->hDeviceKM,
++ hBufClassInfo,
++ PVRSRV_HANDLE_TYPE_BUF_INFO,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psOpenBufferClassDeviceOUT->eError, psPerProc);
++
++ return 0;
++}
++
++static IMG_INT
++PVRSRVCloseBCDeviceBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_CLOSE_BUFFERCLASS_DEVICE *psCloseBufferClassDeviceIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_VOID *pvBufClassInfo;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_CLOSE_BUFFERCLASS_DEVICE);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvBufClassInfo,
++ psCloseBufferClassDeviceIN->hDeviceKM,
++ PVRSRV_HANDLE_TYPE_BUF_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVCloseBCDeviceKM(pvBufClassInfo, IMG_FALSE);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError = PVRSRVReleaseHandle(psPerProc->psHandleBase,
++ psCloseBufferClassDeviceIN->hDeviceKM,
++ PVRSRV_HANDLE_TYPE_BUF_INFO);
++
++ return 0;
++}
++
++static IMG_INT
++PVRSRVGetBCInfoBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_GET_BUFFERCLASS_INFO *psGetBufferClassInfoIN,
++ PVRSRV_BRIDGE_OUT_GET_BUFFERCLASS_INFO *psGetBufferClassInfoOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_VOID *pvBufClassInfo;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GET_BUFFERCLASS_INFO);
++
++ psGetBufferClassInfoOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvBufClassInfo,
++ psGetBufferClassInfoIN->hDeviceKM,
++ PVRSRV_HANDLE_TYPE_BUF_INFO);
++ if(psGetBufferClassInfoOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psGetBufferClassInfoOUT->eError =
++ PVRSRVGetBCInfoKM(pvBufClassInfo,
++ &psGetBufferClassInfoOUT->sBufferInfo);
++ return 0;
++}
++
++static IMG_INT
++PVRSRVGetBCBufferBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_GET_BUFFERCLASS_BUFFER *psGetBufferClassBufferIN,
++ PVRSRV_BRIDGE_OUT_GET_BUFFERCLASS_BUFFER *psGetBufferClassBufferOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_VOID *pvBufClassInfo;
++ IMG_HANDLE hBufferInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GET_BUFFERCLASS_BUFFER);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psGetBufferClassBufferOUT->eError, psPerProc, 1);
++
++ psGetBufferClassBufferOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvBufClassInfo,
++ psGetBufferClassBufferIN->hDeviceKM,
++ PVRSRV_HANDLE_TYPE_BUF_INFO);
++ if(psGetBufferClassBufferOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psGetBufferClassBufferOUT->eError =
++ PVRSRVGetBCBufferKM(pvBufClassInfo,
++ psGetBufferClassBufferIN->ui32BufferIndex,
++ &hBufferInt);
++
++ if(psGetBufferClassBufferOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
++ &psGetBufferClassBufferOUT->hBuffer,
++ hBufferInt,
++ PVRSRV_HANDLE_TYPE_BUF_BUFFER,
++ (PVRSRV_HANDLE_ALLOC_FLAG)(PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE | PVRSRV_HANDLE_ALLOC_FLAG_SHARED),
++ psGetBufferClassBufferIN->hDeviceKM);
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psGetBufferClassBufferOUT->eError, psPerProc);
++
++ return 0;
++}
++
++
++static IMG_INT
++PVRSRVAllocSharedSysMemoryBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_ALLOC_SHARED_SYS_MEM *psAllocSharedSysMemIN,
++ PVRSRV_BRIDGE_OUT_ALLOC_SHARED_SYS_MEM *psAllocSharedSysMemOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_ALLOC_SHARED_SYS_MEM);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psAllocSharedSysMemOUT->eError, psPerProc, 1);
++
++ psAllocSharedSysMemOUT->eError =
++ PVRSRVAllocSharedSysMemoryKM(psPerProc,
++ psAllocSharedSysMemIN->ui32Flags,
++ psAllocSharedSysMemIN->ui32Size,
++ &psKernelMemInfo);
++ if(psAllocSharedSysMemOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ OSMemSet(&psAllocSharedSysMemOUT->sClientMemInfo,
++ 0,
++ sizeof(psAllocSharedSysMemOUT->sClientMemInfo));
++
++ psAllocSharedSysMemOUT->sClientMemInfo.pvLinAddrKM =
++ psKernelMemInfo->pvLinAddrKM;
++
++ psAllocSharedSysMemOUT->sClientMemInfo.pvLinAddr = 0;
++ psAllocSharedSysMemOUT->sClientMemInfo.ui32Flags =
++ psKernelMemInfo->ui32Flags;
++ psAllocSharedSysMemOUT->sClientMemInfo.ui32AllocSize =
++ psKernelMemInfo->ui32AllocSize;
++ psAllocSharedSysMemOUT->sClientMemInfo.hMappingInfo = psKernelMemInfo->sMemBlk.hOSMemHandle;
++
++ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
++ &psAllocSharedSysMemOUT->sClientMemInfo.hKernelMemInfo,
++ psKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psAllocSharedSysMemOUT->eError, psPerProc);
++
++ return 0;
++}
++
++static IMG_INT
++PVRSRVFreeSharedSysMemoryBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_FREE_SHARED_SYS_MEM *psFreeSharedSysMemIN,
++ PVRSRV_BRIDGE_OUT_FREE_SHARED_SYS_MEM *psFreeSharedSysMemOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_FREE_SHARED_SYS_MEM);
++
++ psFreeSharedSysMemOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ (IMG_VOID **)&psKernelMemInfo,
++ psFreeSharedSysMemIN->psKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO);
++
++ if(psFreeSharedSysMemOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psFreeSharedSysMemOUT->eError =
++ PVRSRVFreeSharedSysMemoryKM(psKernelMemInfo);
++ if(psFreeSharedSysMemOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psFreeSharedSysMemOUT->eError =
++ PVRSRVReleaseHandle(psPerProc->psHandleBase,
++ psFreeSharedSysMemIN->psKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO);
++ return 0;
++}
++
++static IMG_INT
++PVRSRVMapMemInfoMemBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_MAP_MEMINFO_MEM *psMapMemInfoMemIN,
++ PVRSRV_BRIDGE_OUT_MAP_MEMINFO_MEM *psMapMemInfoMemOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++ PVRSRV_HANDLE_TYPE eHandleType;
++ IMG_HANDLE hParent;
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_MAP_MEMINFO_MEM);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psMapMemInfoMemOUT->eError, psPerProc, 2);
++
++ psMapMemInfoMemOUT->eError =
++ PVRSRVLookupHandleAnyType(psPerProc->psHandleBase,
++ (IMG_VOID **)&psKernelMemInfo,
++ &eHandleType,
++ psMapMemInfoMemIN->hKernelMemInfo);
++ if(psMapMemInfoMemOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ switch (eHandleType)
++ {
++#if defined(PVR_SECURE_HANDLES)
++ case PVRSRV_HANDLE_TYPE_MEM_INFO:
++ case PVRSRV_HANDLE_TYPE_MEM_INFO_REF:
++ case PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO:
++#else
++ case PVRSRV_HANDLE_TYPE_NONE:
++#endif
++ break;
++ default:
++ psMapMemInfoMemOUT->eError = PVRSRV_ERROR_GENERIC;
++ return 0;
++ }
++
++
++ psMapMemInfoMemOUT->eError =
++ PVRSRVGetParentHandle(psPerProc->psHandleBase,
++ &hParent,
++ psMapMemInfoMemIN->hKernelMemInfo,
++ eHandleType);
++ if (psMapMemInfoMemOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ if (hParent == IMG_NULL)
++ {
++ hParent = psMapMemInfoMemIN->hKernelMemInfo;
++ }
++
++ OSMemSet(&psMapMemInfoMemOUT->sClientMemInfo,
++ 0,
++ sizeof(psMapMemInfoMemOUT->sClientMemInfo));
++
++ psMapMemInfoMemOUT->sClientMemInfo.pvLinAddrKM =
++ psKernelMemInfo->pvLinAddrKM;
++
++ psMapMemInfoMemOUT->sClientMemInfo.pvLinAddr = 0;
++ psMapMemInfoMemOUT->sClientMemInfo.sDevVAddr =
++ psKernelMemInfo->sDevVAddr;
++ psMapMemInfoMemOUT->sClientMemInfo.ui32Flags =
++ psKernelMemInfo->ui32Flags;
++ psMapMemInfoMemOUT->sClientMemInfo.ui32AllocSize =
++ psKernelMemInfo->ui32AllocSize;
++ psMapMemInfoMemOUT->sClientMemInfo.hMappingInfo = psKernelMemInfo->sMemBlk.hOSMemHandle;
++
++ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
++ &psMapMemInfoMemOUT->sClientMemInfo.hKernelMemInfo,
++ psKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO_REF,
++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
++ hParent);
++
++ if(psKernelMemInfo->ui32Flags & PVRSRV_MEM_NO_SYNCOBJ)
++ {
++
++ OSMemSet(&psMapMemInfoMemOUT->sClientSyncInfo,
++ 0,
++ sizeof (PVRSRV_CLIENT_SYNC_INFO));
++ psMapMemInfoMemOUT->psKernelSyncInfo = IMG_NULL;
++ }
++ else
++ {
++
++ psMapMemInfoMemOUT->sClientSyncInfo.psSyncData =
++ psKernelMemInfo->psKernelSyncInfo->psSyncData;
++ psMapMemInfoMemOUT->sClientSyncInfo.sWriteOpsCompleteDevVAddr =
++ psKernelMemInfo->psKernelSyncInfo->sWriteOpsCompleteDevVAddr;
++ psMapMemInfoMemOUT->sClientSyncInfo.sReadOpsCompleteDevVAddr =
++ psKernelMemInfo->psKernelSyncInfo->sReadOpsCompleteDevVAddr;
++
++ psMapMemInfoMemOUT->sClientSyncInfo.hMappingInfo =
++ psKernelMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.hOSMemHandle;
++
++ psMapMemInfoMemOUT->sClientMemInfo.psClientSyncInfo = &psMapMemInfoMemOUT->sClientSyncInfo;
++
++ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
++ &psMapMemInfoMemOUT->sClientSyncInfo.hKernelSyncInfo,
++ psKernelMemInfo->psKernelSyncInfo,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO,
++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
++ psMapMemInfoMemOUT->sClientMemInfo.hKernelMemInfo);
++ }
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psMapMemInfoMemOUT->eError, psPerProc);
++
++ return 0;
++}
++
++
++
++static IMG_INT
++MMU_GetPDDevPAddrBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_GETMMU_PD_DEVPADDR *psGetMmuPDDevPAddrIN,
++ PVRSRV_BRIDGE_OUT_GETMMU_PD_DEVPADDR *psGetMmuPDDevPAddrOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hDevMemContextInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GETMMU_PD_DEVPADDR);
++
++ psGetMmuPDDevPAddrOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevMemContextInt,
++ psGetMmuPDDevPAddrIN->hDevMemContext,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT);
++ if(psGetMmuPDDevPAddrOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psGetMmuPDDevPAddrOUT->sPDDevPAddr =
++ BM_GetDeviceNode(hDevMemContextInt)->pfnMMUGetPDDevPAddr(BM_GetMMUContextFromMemContext(hDevMemContextInt));
++ if(psGetMmuPDDevPAddrOUT->sPDDevPAddr.uiAddr)
++ {
++ psGetMmuPDDevPAddrOUT->eError = PVRSRV_OK;
++ }
++ else
++ {
++ psGetMmuPDDevPAddrOUT->eError = PVRSRV_ERROR_GENERIC;
++ }
++ return 0;
++}
++
++
++
++IMG_INT
++DummyBW(IMG_UINT32 ui32BridgeID,
++ IMG_VOID *psBridgeIn,
++ IMG_VOID *psBridgeOut,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++#if !defined(DEBUG)
++ PVR_UNREFERENCED_PARAMETER(ui32BridgeID);
++#endif
++ PVR_UNREFERENCED_PARAMETER(psBridgeIn);
++ PVR_UNREFERENCED_PARAMETER(psBridgeOut);
++ PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++#if defined(DEBUG_BRIDGE_KM)
++ PVR_DPF((PVR_DBG_ERROR, "%s: BRIDGE ERROR: BridgeID %lu (%s) mapped to "
++ "Dummy Wrapper (probably not what you want!)",
++ __FUNCTION__, ui32BridgeID, g_BridgeDispatchTable[ui32BridgeID].pszIOCName));
++#else
++ PVR_DPF((PVR_DBG_ERROR, "%s: BRIDGE ERROR: BridgeID %lu mapped to "
++ "Dummy Wrapper (probably not what you want!)",
++ __FUNCTION__, ui32BridgeID));
++#endif
++ return -ENOTTY;
++}
++
++
++IMG_VOID
++_SetDispatchTableEntry(IMG_UINT32 ui32Index,
++ const IMG_CHAR *pszIOCName,
++ BridgeWrapperFunction pfFunction,
++ const IMG_CHAR *pszFunctionName)
++{
++ static IMG_UINT32 ui32PrevIndex = ~0UL;
++#if !defined(DEBUG)
++ PVR_UNREFERENCED_PARAMETER(pszIOCName);
++#endif
++#if !defined(DEBUG_BRIDGE_KM_DISPATCH_TABLE) && !defined(DEBUG_BRIDGE_KM)
++ PVR_UNREFERENCED_PARAMETER(pszFunctionName);
++#endif
++
++#if defined(DEBUG_BRIDGE_KM_DISPATCH_TABLE)
++
++ PVR_DPF((PVR_DBG_WARNING, "%s: %d %s %s", __FUNCTION__, ui32Index, pszIOCName, pszFunctionName));
++#endif
++
++
++ if(g_BridgeDispatchTable[ui32Index].pfFunction)
++ {
++#if defined(DEBUG_BRIDGE_KM)
++ PVR_DPF((PVR_DBG_ERROR,
++ "%s: BUG!: Adding dispatch table entry for %s clobbers an existing entry for %s",
++ __FUNCTION__, pszIOCName, g_BridgeDispatchTable[ui32Index].pszIOCName));
++#else
++ PVR_DPF((PVR_DBG_ERROR,
++ "%s: BUG!: Adding dispatch table entry for %s clobbers an existing entry (index=%lu)",
++ __FUNCTION__, pszIOCName, ui32Index));
++#endif
++ PVR_DPF((PVR_DBG_ERROR, "NOTE: Enabling DEBUG_BRIDGE_KM_DISPATCH_TABLE may help debug this issue.",
++ __FUNCTION__));
++ }
++
++
++ if((ui32PrevIndex != ~0UL) &&
++ ((ui32Index >= ui32PrevIndex + DISPATCH_TABLE_GAP_THRESHOLD) ||
++ (ui32Index <= ui32PrevIndex)))
++ {
++#if defined(DEBUG_BRIDGE_KM)
++ PVR_DPF((PVR_DBG_WARNING,
++ "%s: There is a gap in the dispatch table between indices %lu (%s) and %lu (%s)",
++ __FUNCTION__, ui32PrevIndex, g_BridgeDispatchTable[ui32PrevIndex].pszIOCName,
++ ui32Index, pszIOCName));
++#else
++ PVR_DPF((PVR_DBG_WARNING,
++ "%s: There is a gap in the dispatch table between indices %u and %u (%s)",
++ __FUNCTION__, (IMG_UINT)ui32PrevIndex, (IMG_UINT)ui32Index, pszIOCName));
++#endif
++ PVR_DPF((PVR_DBG_ERROR, "NOTE: Enabling DEBUG_BRIDGE_KM_DISPATCH_TABLE may help debug this issue.",
++ __FUNCTION__));
++ }
++
++ g_BridgeDispatchTable[ui32Index].pfFunction = pfFunction;
++#if defined(DEBUG_BRIDGE_KM)
++ g_BridgeDispatchTable[ui32Index].pszIOCName = pszIOCName;
++ g_BridgeDispatchTable[ui32Index].pszFunctionName = pszFunctionName;
++ g_BridgeDispatchTable[ui32Index].ui32CallCount = 0;
++ g_BridgeDispatchTable[ui32Index].ui32CopyFromUserTotalBytes = 0;
++#endif
++
++ ui32PrevIndex = ui32Index;
++}
++
++static IMG_INT
++PVRSRVInitSrvConnectBW(IMG_UINT32 ui32BridgeID,
++ IMG_VOID *psBridgeIn,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVR_UNREFERENCED_PARAMETER(psBridgeIn);
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_INITSRV_CONNECT);
++ PVR_UNREFERENCED_PARAMETER(psBridgeIn);
++
++ if(!OSProcHasPrivSrvInit() || PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_RUNNING) || PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_RAN))
++ {
++ psRetOUT->eError = PVRSRV_ERROR_GENERIC;
++ return 0;
++ }
++
++#if defined (__linux__)
++ PVRSRVSetInitServerState(PVRSRV_INIT_SERVER_RUNNING, IMG_TRUE);
++#endif
++ psPerProc->bInitProcess = IMG_TRUE;
++
++ psRetOUT->eError = PVRSRV_OK;
++
++ return 0;
++}
++
++
++static IMG_INT
++PVRSRVInitSrvDisconnectBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_INITSRV_DISCONNECT *psInitSrvDisconnectIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_INITSRV_DISCONNECT);
++
++ if(!psPerProc->bInitProcess)
++ {
++ psRetOUT->eError = PVRSRV_ERROR_GENERIC;
++ return 0;
++ }
++
++ psPerProc->bInitProcess = IMG_FALSE;
++
++ PVRSRVSetInitServerState(PVRSRV_INIT_SERVER_RUNNING, IMG_FALSE);
++ PVRSRVSetInitServerState(PVRSRV_INIT_SERVER_RAN, IMG_TRUE);
++
++ psRetOUT->eError = PVRSRVFinaliseSystem(psInitSrvDisconnectIN->bInitSuccesful);
++
++ PVRSRVSetInitServerState( PVRSRV_INIT_SERVER_SUCCESSFUL,
++ (((psRetOUT->eError == PVRSRV_OK) && (psInitSrvDisconnectIN->bInitSuccesful)))
++ ? IMG_TRUE : IMG_FALSE);
++
++ return 0;
++}
++
++
++static IMG_INT
++PVRSRVEventObjectWaitBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_EVENT_OBJECT_WAIT *psEventObjectWaitIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hOSEventKM;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_EVENT_OBJECT_WAIT);
++
++ psRetOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hOSEventKM,
++ psEventObjectWaitIN->hOSEventKM,
++ PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError = OSEventObjectWait(hOSEventKM);
++
++ return 0;
++}
++
++
++static IMG_INT
++PVRSRVEventObjectOpenBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_EVENT_OBJECT_OPEN *psEventObjectOpenIN,
++ PVRSRV_BRIDGE_OUT_EVENT_OBJECT_OPEN *psEventObjectOpenOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_EVENT_OBJECT_OPEN);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psEventObjectOpenOUT->eError, psPerProc, 1);
++
++ psEventObjectOpenOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psEventObjectOpenIN->sEventObject.hOSEventKM,
++ psEventObjectOpenIN->sEventObject.hOSEventKM,
++ PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT);
++
++ if(psEventObjectOpenOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psEventObjectOpenOUT->eError = OSEventObjectOpen(&psEventObjectOpenIN->sEventObject, &psEventObjectOpenOUT->hOSEvent);
++
++ if(psEventObjectOpenOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
++ &psEventObjectOpenOUT->hOSEvent,
++ psEventObjectOpenOUT->hOSEvent,
++ PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT,
++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI);
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psEventObjectOpenOUT->eError, psPerProc);
++
++ return 0;
++}
++
++
++static IMG_INT
++PVRSRVEventObjectCloseBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_EVENT_OBJECT_CLOSE *psEventObjectCloseIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hOSEventKM;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_EVENT_OBJECT_CLOSE);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psEventObjectCloseIN->sEventObject.hOSEventKM,
++ psEventObjectCloseIN->sEventObject.hOSEventKM,
++ PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
++ &hOSEventKM,
++ psEventObjectCloseIN->hOSEventKM,
++ PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError = OSEventObjectClose(&psEventObjectCloseIN->sEventObject, hOSEventKM);
++
++ return 0;
++}
++
++
++typedef struct _MODIFY_SYNC_OP_INFO
++{
++ PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
++ IMG_UINT32 ui32ModifyFlags;
++ IMG_UINT32 ui32ReadOpsPendingSnapShot;
++ IMG_UINT32 ui32WriteOpsPendingSnapShot;
++} MODIFY_SYNC_OP_INFO;
++
++
++static PVRSRV_ERROR ModifyCompleteSyncOpsCallBack(IMG_PVOID pvParam,
++ IMG_UINT32 ui32Param)
++{
++ MODIFY_SYNC_OP_INFO *psModSyncOpInfo;
++ PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
++
++ PVR_UNREFERENCED_PARAMETER(ui32Param);
++
++ if (!pvParam)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "ModifyCompleteSyncOpsCallBack: invalid parameter"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psModSyncOpInfo = (MODIFY_SYNC_OP_INFO*)pvParam;
++ psKernelSyncInfo = psModSyncOpInfo->psKernelSyncInfo;
++
++ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
++ {
++ if((psModSyncOpInfo->ui32WriteOpsPendingSnapShot == psKernelSyncInfo->psSyncData->ui32WriteOpsComplete)
++ && (psModSyncOpInfo->ui32ReadOpsPendingSnapShot == psKernelSyncInfo->psSyncData->ui32ReadOpsComplete))
++ {
++ goto OpFlushedComplete;
++ }
++ PVR_DPF((PVR_DBG_ERROR, "ModifyCompleteSyncOpsCallBack: waiting for old Ops to flush"));
++ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
++ } END_LOOP_UNTIL_TIMEOUT();
++
++ PVR_DPF((PVR_DBG_ERROR, "ModifyCompleteSyncOpsCallBack: waiting for old Ops to flush timed out"));
++
++ return PVRSRV_ERROR_TIMEOUT;
++
++OpFlushedComplete:
++
++
++ if(psModSyncOpInfo->ui32ModifyFlags & PVRSRV_MODIFYSYNCOPS_FLAGS_WO_INC)
++ {
++ psKernelSyncInfo->psSyncData->ui32WriteOpsComplete++;
++ }
++
++
++ if(psModSyncOpInfo->ui32ModifyFlags & PVRSRV_MODIFYSYNCOPS_FLAGS_RO_INC)
++ {
++ psKernelSyncInfo->psSyncData->ui32ReadOpsComplete++;
++ }
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(MODIFY_SYNC_OP_INFO), (IMG_VOID *)psModSyncOpInfo, 0);
++
++
++
++ PVRSRVCommandCompleteCallbacks();
++
++ return PVRSRV_OK;
++}
++
++
++static IMG_INT
++PVRSRVModifyPendingSyncOpsBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_MODIFY_PENDING_SYNC_OPS *psModifySyncOpsIN,
++ PVRSRV_BRIDGE_OUT_MODIFY_PENDING_SYNC_OPS *psModifySyncOpsOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hKernelSyncInfo;
++ PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
++ MODIFY_SYNC_OP_INFO *psModSyncOpInfo;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_MODIFY_PENDING_SYNC_OPS);
++
++ psModifySyncOpsOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hKernelSyncInfo,
++ psModifySyncOpsIN->hKernelSyncInfo,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++ if (psModifySyncOpsOUT->eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVModifyPendingSyncOpsBW: PVRSRVLookupHandle failed"));
++ return 0;
++ }
++
++ psKernelSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)hKernelSyncInfo;
++
++ if(psKernelSyncInfo->hResItem != IMG_NULL)
++ {
++
++ psModifySyncOpsOUT->eError = PVRSRV_ERROR_RETRY;
++ return 0;
++ }
++
++ ASSIGN_AND_EXIT_ON_ERROR(psModifySyncOpsOUT->eError,
++ OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(MODIFY_SYNC_OP_INFO),
++ (IMG_VOID **)&psModSyncOpInfo, 0,
++ "ModSyncOpInfo (MODIFY_SYNC_OP_INFO)"));
++
++
++ psModSyncOpInfo->psKernelSyncInfo = psKernelSyncInfo;
++ psModSyncOpInfo->ui32ModifyFlags = psModifySyncOpsIN->ui32ModifyFlags;
++ psModSyncOpInfo->ui32ReadOpsPendingSnapShot = psKernelSyncInfo->psSyncData->ui32ReadOpsPending;
++ psModSyncOpInfo->ui32WriteOpsPendingSnapShot = psKernelSyncInfo->psSyncData->ui32WriteOpsPending;
++
++
++
++ psModifySyncOpsOUT->ui32ReadOpsPending = psKernelSyncInfo->psSyncData->ui32ReadOpsPending;
++ psModifySyncOpsOUT->ui32WriteOpsPending = psKernelSyncInfo->psSyncData->ui32WriteOpsPending;
++
++ if(psModifySyncOpsIN->ui32ModifyFlags & PVRSRV_MODIFYSYNCOPS_FLAGS_WO_INC)
++ {
++ psKernelSyncInfo->psSyncData->ui32WriteOpsPending++;
++ }
++
++ if(psModifySyncOpsIN->ui32ModifyFlags & PVRSRV_MODIFYSYNCOPS_FLAGS_RO_INC)
++ {
++ psKernelSyncInfo->psSyncData->ui32ReadOpsPending++;
++ }
++
++ psKernelSyncInfo->hResItem = ResManRegisterRes(psPerProc->hResManContext,
++ RESMAN_TYPE_MODIFY_SYNC_OPS,
++ psModSyncOpInfo,
++ 0,
++ ModifyCompleteSyncOpsCallBack);
++ return 0;
++}
++
++
++static IMG_INT
++PVRSRVModifyCompleteSyncOpsBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_MODIFY_COMPLETE_SYNC_OPS *psModifySyncOpsIN,
++ PVRSRV_BRIDGE_RETURN *psModifySyncOpsOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_MODIFY_COMPLETE_SYNC_OPS);
++
++ psModifySyncOpsOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ (IMG_VOID**)&psKernelSyncInfo,
++ psModifySyncOpsIN->hKernelSyncInfo,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++ if (psModifySyncOpsOUT->eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVModifyCompleteSyncOpsBW: PVRSRVLookupHandle failed"));
++ return 0;
++ }
++
++ if(psKernelSyncInfo->hResItem == IMG_NULL)
++ {
++
++ psModifySyncOpsOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
++ return 0;
++ }
++
++
++
++
++
++
++
++
++
++
++ eError = ResManFreeResByPtr(psKernelSyncInfo->hResItem);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVModifyCompleteSyncOpsBW: ResManFreeResByPtr failed"));
++ return 0;
++ }
++
++ psKernelSyncInfo->hResItem = IMG_NULL;
++
++ return 0;
++}
++
++
++PVRSRV_ERROR
++CommonBridgeInit(IMG_VOID)
++{
++ IMG_UINT32 i;
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_ENUM_DEVICES, PVRSRVEnumerateDevicesBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_ACQUIRE_DEVICEINFO, PVRSRVAcquireDeviceDataBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_RELEASE_DEVICEINFO, DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_CREATE_DEVMEMCONTEXT, PVRSRVCreateDeviceMemContextBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_DESTROY_DEVMEMCONTEXT, PVRSRVDestroyDeviceMemContextBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_GET_DEVMEM_HEAPINFO, PVRSRVGetDeviceMemHeapInfoBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_ALLOC_DEVICEMEM, PVRSRVAllocDeviceMemBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_FREE_DEVICEMEM, PVRSRVFreeDeviceMemBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_GETFREE_DEVICEMEM, PVRSRVGetFreeDeviceMemBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_CREATE_COMMANDQUEUE, DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_DESTROY_COMMANDQUEUE, DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_MHANDLE_TO_MMAP_DATA, PVRMMapOSMemHandleToMMapDataBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_CONNECT_SERVICES, PVRSRVConnectBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_DISCONNECT_SERVICES, PVRSRVDisconnectBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_WRAP_DEVICE_MEM, DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_GET_DEVICEMEMINFO, DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_RESERVE_DEV_VIRTMEM , DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_FREE_DEV_VIRTMEM, DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_MAP_EXT_MEMORY, DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_UNMAP_EXT_MEMORY, DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_MAP_DEV_MEMORY, PVRSRVMapDeviceMemoryBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_UNMAP_DEV_MEMORY, PVRSRVUnmapDeviceMemoryBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_MAP_DEVICECLASS_MEMORY, PVRSRVMapDeviceClassMemoryBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_UNMAP_DEVICECLASS_MEMORY, PVRSRVUnmapDeviceClassMemoryBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_MAP_MEM_INFO_TO_USER, DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_UNMAP_MEM_INFO_FROM_USER, DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_EXPORT_DEVICEMEM, PVRSRVExportDeviceMemBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_RELEASE_MMAP_DATA, PVRMMapReleaseMMapDataBW);
++
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PROCESS_SIMISR_EVENT, DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_REGISTER_SIM_PROCESS, DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_UNREGISTER_SIM_PROCESS, DummyBW);
++
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_MAPPHYSTOUSERSPACE, DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_UNMAPPHYSTOUSERSPACE, DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_GETPHYSTOUSERSPACEMAP, DummyBW);
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_GET_FB_STATS, DummyBW);
++
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_GET_MISC_INFO, PVRSRVGetMiscInfoBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_RELEASE_MISC_INFO, DummyBW);
++
++
++#if defined (SUPPORT_OVERLAY_ROTATE_BLIT)
++ SetDispatchTableEntry(PVRSRV_BRIDGE_INIT_3D_OVL_BLT_RES, DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_DEINIT_3D_OVL_BLT_RES, DummyBW);
++#endif
++
++
++
++#if defined(PDUMP)
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_INIT, DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_MEMPOL, PDumpMemPolBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_DUMPMEM, PDumpMemBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_REG, PDumpRegWithFlagsBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_REGPOL, PDumpRegPolBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_COMMENT, PDumpCommentBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_SETFRAME, PDumpSetFrameBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_ISCAPTURING, PDumpIsCaptureFrameBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_DUMPBITMAP, PDumpBitmapBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_DUMPREADREG, PDumpReadRegBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_SYNCPOL, PDumpSyncPolBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_DUMPSYNC, PDumpSyncDumpBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_DRIVERINFO, PDumpDriverInfoBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_PDREG, PDumpPDRegBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_DUMPPDDEVPADDR, PDumpPDDevPAddrBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_CYCLE_COUNT_REG_READ, PDumpCycleCountRegReadBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_STARTINITPHASE, PDumpStartInitPhaseBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_STOPINITPHASE, PDumpStopInitPhaseBW);
++#endif
++
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_GET_OEMJTABLE, DummyBW);
++
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_ENUM_CLASS, PVRSRVEnumerateDCBW);
++
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_OPEN_DISPCLASS_DEVICE, PVRSRVOpenDCDeviceBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_CLOSE_DISPCLASS_DEVICE, PVRSRVCloseDCDeviceBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_ENUM_DISPCLASS_FORMATS, PVRSRVEnumDCFormatsBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_ENUM_DISPCLASS_DIMS, PVRSRVEnumDCDimsBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_GET_DISPCLASS_SYSBUFFER, PVRSRVGetDCSystemBufferBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_GET_DISPCLASS_INFO, PVRSRVGetDCInfoBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_CREATE_DISPCLASS_SWAPCHAIN, PVRSRVCreateDCSwapChainBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_DESTROY_DISPCLASS_SWAPCHAIN, PVRSRVDestroyDCSwapChainBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SET_DISPCLASS_DSTRECT, PVRSRVSetDCDstRectBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SET_DISPCLASS_SRCRECT, PVRSRVSetDCSrcRectBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SET_DISPCLASS_DSTCOLOURKEY, PVRSRVSetDCDstColourKeyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SET_DISPCLASS_SRCCOLOURKEY, PVRSRVSetDCSrcColourKeyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_GET_DISPCLASS_BUFFERS, PVRSRVGetDCBuffersBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SWAP_DISPCLASS_TO_BUFFER, PVRSRVSwapToDCBufferBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SWAP_DISPCLASS_TO_SYSTEM, PVRSRVSwapToDCSystemBW);
++
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_OPEN_BUFFERCLASS_DEVICE, PVRSRVOpenBCDeviceBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_CLOSE_BUFFERCLASS_DEVICE, PVRSRVCloseBCDeviceBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_GET_BUFFERCLASS_INFO, PVRSRVGetBCInfoBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_GET_BUFFERCLASS_BUFFER, PVRSRVGetBCBufferBW);
++
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_WRAP_EXT_MEMORY, PVRSRVWrapExtMemoryBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_UNWRAP_EXT_MEMORY, PVRSRVUnwrapExtMemoryBW);
++
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_ALLOC_SHARED_SYS_MEM, PVRSRVAllocSharedSysMemoryBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_FREE_SHARED_SYS_MEM, PVRSRVFreeSharedSysMemoryBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_MAP_MEMINFO_MEM, PVRSRVMapMemInfoMemBW);
++
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_GETMMU_PD_DEVPADDR, MMU_GetPDDevPAddrBW);
++
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_INITSRV_CONNECT, PVRSRVInitSrvConnectBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_INITSRV_DISCONNECT, PVRSRVInitSrvDisconnectBW);
++
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_EVENT_OBJECT_WAIT, PVRSRVEventObjectWaitBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_EVENT_OBJECT_OPEN, PVRSRVEventObjectOpenBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_EVENT_OBJECT_CLOSE, PVRSRVEventObjectCloseBW);
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_MODIFY_PENDING_SYNC_OPS, PVRSRVModifyPendingSyncOpsBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_MODIFY_COMPLETE_SYNC_OPS, PVRSRVModifyCompleteSyncOpsBW);
++
++#if defined (SUPPORT_SGX)
++ SetSGXDispatchTableEntry();
++#endif
++#if defined (SUPPORT_VGX)
++ SetVGXDispatchTableEntry();
++#endif
++#if defined (SUPPORT_MSVDX)
++ SetMSVDXDispatchTableEntry();
++#endif
++
++
++
++
++ for(i=0;i<BRIDGE_DISPATCH_TABLE_ENTRY_COUNT;i++)
++ {
++ if(!g_BridgeDispatchTable[i].pfFunction)
++ {
++ g_BridgeDispatchTable[i].pfFunction = DummyBW;
++#if defined(DEBUG_BRIDGE_KM)
++ g_BridgeDispatchTable[i].pszIOCName = "_PVRSRV_BRIDGE_DUMMY";
++ g_BridgeDispatchTable[i].pszFunctionName = "DummyBW";
++ g_BridgeDispatchTable[i].ui32CallCount = 0;
++ g_BridgeDispatchTable[i].ui32CopyFromUserTotalBytes = 0;
++ g_BridgeDispatchTable[i].ui32CopyToUserTotalBytes = 0;
++#endif
++ }
++ }
++
++ return PVRSRV_OK;
++}
++
++
++IMG_INT BridgedDispatchKM(PVRSRV_PER_PROCESS_DATA * psPerProc,
++ PVRSRV_BRIDGE_PACKAGE * psBridgePackageKM)
++{
++
++ IMG_VOID * psBridgeIn;
++ IMG_VOID * psBridgeOut;
++ BridgeWrapperFunction pfBridgeHandler;
++ IMG_UINT32 ui32BridgeID = psBridgePackageKM->ui32BridgeID;
++ IMG_INT err = -EFAULT;
++
++#if defined(DEBUG_TRACE_BRIDGE_KM)
++ PVR_DPF((PVR_DBG_ERROR, "%s: %s",
++ __FUNCTION__,
++ g_BridgeDispatchTable[ui32BridgeID].pszIOCName));
++#endif
++
++#if defined(DEBUG_BRIDGE_KM)
++ g_BridgeDispatchTable[ui32BridgeID].ui32CallCount++;
++ g_BridgeGlobalStats.ui32IOCTLCount++;
++#endif
++
++ if(!psPerProc->bInitProcess)
++ {
++ if(PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_RAN))
++ {
++ if(!PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_SUCCESSFUL))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: Initialisation failed. Driver unusable.",
++ __FUNCTION__));
++ goto return_fault;
++ }
++ }
++ else
++ {
++ if(PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_RUNNING))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: Initialisation is in progress",
++ __FUNCTION__));
++ goto return_fault;
++ }
++ else
++ {
++
++ switch(ui32BridgeID)
++ {
++ case PVRSRV_GET_BRIDGE_ID(PVRSRV_BRIDGE_CONNECT_SERVICES):
++ case PVRSRV_GET_BRIDGE_ID(PVRSRV_BRIDGE_DISCONNECT_SERVICES):
++ case PVRSRV_GET_BRIDGE_ID(PVRSRV_BRIDGE_INITSRV_CONNECT):
++ case PVRSRV_GET_BRIDGE_ID(PVRSRV_BRIDGE_INITSRV_DISCONNECT):
++ break;
++ default:
++ PVR_DPF((PVR_DBG_ERROR, "%s: Driver initialisation not completed yet.",
++ __FUNCTION__));
++ goto return_fault;
++ }
++ }
++ }
++ }
++
++
++
++#if defined(__linux__)
++ {
++
++ SYS_DATA *psSysData;
++
++ SysAcquireData(&psSysData);
++
++
++ psBridgeIn = ((ENV_DATA *)psSysData->pvEnvSpecificData)->pvBridgeData;
++ psBridgeOut = (IMG_PVOID)((IMG_PBYTE)psBridgeIn + PVRSRV_MAX_BRIDGE_IN_SIZE);
++
++ if(psBridgePackageKM->ui32InBufferSize > 0)
++ {
++ if(!OSAccessOK(PVR_VERIFY_READ,
++ psBridgePackageKM->pvParamIn,
++ psBridgePackageKM->ui32InBufferSize))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid pvParamIn pointer", __FUNCTION__));
++ }
++
++ if(CopyFromUserWrapper(psPerProc,
++ ui32BridgeID,
++ psBridgeIn,
++ psBridgePackageKM->pvParamIn,
++ psBridgePackageKM->ui32InBufferSize)
++ != PVRSRV_OK)
++ {
++ goto return_fault;
++ }
++ }
++ }
++#else
++ psBridgeIn = psBridgePackageKM->pvParamIn;
++ psBridgeOut = psBridgePackageKM->pvParamOut;
++#endif
++
++ if(ui32BridgeID >= (BRIDGE_DISPATCH_TABLE_ENTRY_COUNT))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: ui32BridgeID = %d is out if range!",
++ __FUNCTION__, ui32BridgeID));
++ goto return_fault;
++ }
++ pfBridgeHandler =
++ (BridgeWrapperFunction)g_BridgeDispatchTable[ui32BridgeID].pfFunction;
++ err = pfBridgeHandler(ui32BridgeID,
++ psBridgeIn,
++ psBridgeOut,
++ psPerProc);
++ if(err < 0)
++ {
++ goto return_fault;
++ }
++
++
++#if defined(__linux__)
++
++ if(CopyToUserWrapper(psPerProc,
++ ui32BridgeID,
++ psBridgePackageKM->pvParamOut,
++ psBridgeOut,
++ psBridgePackageKM->ui32OutBufferSize)
++ != PVRSRV_OK)
++ {
++ goto return_fault;
++ }
++#endif
++
++ err = 0;
++return_fault:
++ ReleaseHandleBatch(psPerProc);
++ return err;
++}
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/bridged/bridged_pvr_bridge.h b/drivers/gpu/drm/mrst/pvr/services4/srvkm/bridged/bridged_pvr_bridge.h
+new file mode 100644
+index 0000000..95a6377
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/bridged/bridged_pvr_bridge.h
+@@ -0,0 +1,231 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __BRIDGED_PVR_BRIDGE_H__
++#define __BRIDGED_PVR_BRIDGE_H__
++
++#include "pvr_bridge.h"
++
++#if defined(__cplusplus)
++extern "C" {
++#endif
++
++#if defined(__linux__)
++#define PVRSRV_GET_BRIDGE_ID(X) _IOC_NR(X)
++#else
++#define PVRSRV_GET_BRIDGE_ID(X) (X - PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST))
++#endif
++
++#ifndef ENOMEM
++#define ENOMEM 12
++#endif
++#ifndef EFAULT
++#define EFAULT 14
++#endif
++#ifndef ENOTTY
++#define ENOTTY 25
++#endif
++
++#if defined(DEBUG_BRIDGE_KM)
++PVRSRV_ERROR
++CopyFromUserWrapper(PVRSRV_PER_PROCESS_DATA *pProcData,
++ IMG_UINT32 ui32BridgeID,
++ IMG_VOID *pvDest,
++ IMG_VOID *pvSrc,
++ IMG_UINT32 ui32Size);
++PVRSRV_ERROR
++CopyToUserWrapper(PVRSRV_PER_PROCESS_DATA *pProcData,
++ IMG_UINT32 ui32BridgeID,
++ IMG_VOID *pvDest,
++ IMG_VOID *pvSrc,
++ IMG_UINT32 ui32Size);
++#else
++#define CopyFromUserWrapper(pProcData, ui32BridgeID, pvDest, pvSrc, ui32Size) \
++ OSCopyFromUser(pProcData, pvDest, pvSrc, ui32Size)
++#define CopyToUserWrapper(pProcData, ui32BridgeID, pvDest, pvSrc, ui32Size) \
++ OSCopyToUser(pProcData, pvDest, pvSrc, ui32Size)
++#endif
++
++
++#define ASSIGN_AND_RETURN_ON_ERROR(error, src, res) \
++ do \
++ { \
++ (error) = (src); \
++ if ((error) != PVRSRV_OK) \
++ { \
++ return (res); \
++ } \
++ } while (error != PVRSRV_OK)
++
++#define ASSIGN_AND_EXIT_ON_ERROR(error, src) \
++ ASSIGN_AND_RETURN_ON_ERROR(error, src, 0)
++
++#if defined (PVR_SECURE_HANDLES)
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(NewHandleBatch)
++#endif
++static INLINE PVRSRV_ERROR
++NewHandleBatch(PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_UINT32 ui32BatchSize)
++{
++ PVRSRV_ERROR eError;
++
++ PVR_ASSERT(!psPerProc->bHandlesBatched);
++
++ eError = PVRSRVNewHandleBatch(psPerProc->psHandleBase, ui32BatchSize);
++
++ if (eError == PVRSRV_OK)
++ {
++ psPerProc->bHandlesBatched = IMG_TRUE;
++ }
++
++ return eError;
++}
++
++#define NEW_HANDLE_BATCH_OR_ERROR(error, psPerProc, ui32BatchSize) \
++ ASSIGN_AND_EXIT_ON_ERROR(error, NewHandleBatch(psPerProc, ui32BatchSize))
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(CommitHandleBatch)
++#endif
++static INLINE PVRSRV_ERROR
++CommitHandleBatch(PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVR_ASSERT(psPerProc->bHandlesBatched);
++
++ psPerProc->bHandlesBatched = IMG_FALSE;
++
++ return PVRSRVCommitHandleBatch(psPerProc->psHandleBase);
++}
++
++
++#define COMMIT_HANDLE_BATCH_OR_ERROR(error, psPerProc) \
++ ASSIGN_AND_EXIT_ON_ERROR(error, CommitHandleBatch(psPerProc))
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(ReleaseHandleBatch)
++#endif
++static INLINE IMG_VOID
++ReleaseHandleBatch(PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ if (psPerProc->bHandlesBatched)
++ {
++ psPerProc->bHandlesBatched = IMG_FALSE;
++
++ PVRSRVReleaseHandleBatch(psPerProc->psHandleBase);
++ }
++}
++#else
++#define NEW_HANDLE_BATCH_OR_ERROR(error, psPerProc, ui32BatchSize)
++#define COMMIT_HANDLE_BATCH_OR_ERROR(error, psPerProc)
++#define ReleaseHandleBatch(psPerProc)
++#endif
++
++IMG_INT
++DummyBW(IMG_UINT32 ui32BridgeID,
++ IMG_VOID *psBridgeIn,
++ IMG_VOID *psBridgeOut,
++ PVRSRV_PER_PROCESS_DATA *psPerProc);
++
++typedef IMG_INT (*BridgeWrapperFunction)(IMG_UINT32 ui32BridgeID,
++ IMG_VOID *psBridgeIn,
++ IMG_VOID *psBridgeOut,
++ PVRSRV_PER_PROCESS_DATA *psPerProc);
++
++typedef struct _PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY
++{
++ BridgeWrapperFunction pfFunction;
++#if defined(DEBUG_BRIDGE_KM)
++ const IMG_CHAR *pszIOCName;
++ const IMG_CHAR *pszFunctionName;
++ IMG_UINT32 ui32CallCount;
++ IMG_UINT32 ui32CopyFromUserTotalBytes;
++ IMG_UINT32 ui32CopyToUserTotalBytes;
++#endif
++}PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY;
++
++#if defined(SUPPORT_VGX) || defined(SUPPORT_MSVDX)
++ #if defined(SUPPORT_VGX)
++ #define BRIDGE_DISPATCH_TABLE_ENTRY_COUNT (PVRSRV_BRIDGE_LAST_VGX_CMD+1)
++ #define PVRSRV_BRIDGE_LAST_DEVICE_CMD PVRSRV_BRIDGE_LAST_VGX_CMD
++ #else
++ #define BRIDGE_DISPATCH_TABLE_ENTRY_COUNT (PVRSRV_BRIDGE_LAST_MSVDX_CMD+1)
++ #define PVRSRV_BRIDGE_LAST_DEVICE_CMD PVRSRV_BRIDGE_LAST_MSVDX_CMD
++ #endif
++#else
++ #if defined(SUPPORT_SGX)
++ #define BRIDGE_DISPATCH_TABLE_ENTRY_COUNT (PVRSRV_BRIDGE_LAST_SGX_CMD+1)
++ #define PVRSRV_BRIDGE_LAST_DEVICE_CMD PVRSRV_BRIDGE_LAST_SGX_CMD
++ #else
++ #define BRIDGE_DISPATCH_TABLE_ENTRY_COUNT (PVRSRV_BRIDGE_LAST_NON_DEVICE_CMD+1)
++ #define PVRSRV_BRIDGE_LAST_DEVICE_CMD PVRSRV_BRIDGE_LAST_NON_DEVICE_CMD
++ #endif
++#endif
++
++extern PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY g_BridgeDispatchTable[BRIDGE_DISPATCH_TABLE_ENTRY_COUNT];
++
++IMG_VOID
++_SetDispatchTableEntry(IMG_UINT32 ui32Index,
++ const IMG_CHAR *pszIOCName,
++ BridgeWrapperFunction pfFunction,
++ const IMG_CHAR *pszFunctionName);
++
++
++#define SetDispatchTableEntry(ui32Index, pfFunction) \
++ _SetDispatchTableEntry(PVRSRV_GET_BRIDGE_ID(ui32Index), #ui32Index, (BridgeWrapperFunction)pfFunction, #pfFunction)
++
++#define DISPATCH_TABLE_GAP_THRESHOLD 5
++
++#if defined(DEBUG)
++#define PVRSRV_BRIDGE_ASSERT_CMD(X, Y) PVR_ASSERT(X == PVRSRV_GET_BRIDGE_ID(Y))
++#else
++#define PVRSRV_BRIDGE_ASSERT_CMD(X, Y) PVR_UNREFERENCED_PARAMETER(X)
++#endif
++
++
++#if defined(DEBUG_BRIDGE_KM)
++typedef struct _PVRSRV_BRIDGE_GLOBAL_STATS
++{
++ IMG_UINT32 ui32IOCTLCount;
++ IMG_UINT32 ui32TotalCopyFromUserBytes;
++ IMG_UINT32 ui32TotalCopyToUserBytes;
++}PVRSRV_BRIDGE_GLOBAL_STATS;
++
++extern PVRSRV_BRIDGE_GLOBAL_STATS g_BridgeGlobalStats;
++#endif
++
++
++PVRSRV_ERROR CommonBridgeInit(IMG_VOID);
++
++IMG_INT BridgedDispatchKM(PVRSRV_PER_PROCESS_DATA * psPerProc,
++ PVRSRV_BRIDGE_PACKAGE * psBridgePackageKM);
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/bridged/bridged_support.c b/drivers/gpu/drm/mrst/pvr/services4/srvkm/bridged/bridged_support.c
+new file mode 100644
+index 0000000..adc9610
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/bridged/bridged_support.c
+@@ -0,0 +1,85 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "img_defs.h"
++#include "servicesint.h"
++#include "bridged_support.h"
++
++
++PVRSRV_ERROR
++PVRSRVLookupOSMemHandle(PVRSRV_HANDLE_BASE *psHandleBase, IMG_HANDLE *phOSMemHandle, IMG_HANDLE hMHandle)
++{
++ IMG_HANDLE hMHandleInt;
++ PVRSRV_HANDLE_TYPE eHandleType;
++ PVRSRV_ERROR eError;
++
++
++ eError = PVRSRVLookupHandleAnyType(psHandleBase, &hMHandleInt,
++ &eHandleType,
++ hMHandle);
++ if(eError != PVRSRV_OK)
++ {
++ return eError;
++ }
++
++ switch(eHandleType)
++ {
++#if defined(PVR_SECURE_HANDLES)
++ case PVRSRV_HANDLE_TYPE_MEM_INFO:
++ case PVRSRV_HANDLE_TYPE_MEM_INFO_REF:
++ case PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO:
++ {
++ PVRSRV_KERNEL_MEM_INFO *psMemInfo = (PVRSRV_KERNEL_MEM_INFO *)hMHandleInt;
++
++ *phOSMemHandle = psMemInfo->sMemBlk.hOSMemHandle;
++
++ break;
++ }
++ case PVRSRV_HANDLE_TYPE_SYNC_INFO:
++ {
++ PVRSRV_KERNEL_SYNC_INFO *psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)hMHandleInt;
++ PVRSRV_KERNEL_MEM_INFO *psMemInfo = psSyncInfo->psSyncDataMemInfoKM;
++
++ *phOSMemHandle = psMemInfo->sMemBlk.hOSMemHandle;
++
++ break;
++ }
++ case PVRSRV_HANDLE_TYPE_SOC_TIMER:
++ {
++ *phOSMemHandle = (IMG_VOID *)hMHandleInt;
++ break;
++ }
++#else
++ case PVRSRV_HANDLE_TYPE_NONE:
++ *phOSMemHandle = (IMG_VOID *)hMHandleInt;
++ break;
++#endif
++ default:
++ return PVRSRV_ERROR_BAD_MAPPING;
++ }
++
++ return PVRSRV_OK;
++}
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/bridged/bridged_support.h b/drivers/gpu/drm/mrst/pvr/services4/srvkm/bridged/bridged_support.h
+new file mode 100644
+index 0000000..9785d37
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/bridged/bridged_support.h
+@@ -0,0 +1,43 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __BRIDGED_SUPPORT_H__
++#define __BRIDGED_SUPPORT_H__
++
++#include "handle.h"
++
++#if defined(__cplusplus)
++extern "C" {
++#endif
++
++PVRSRV_ERROR PVRSRVLookupOSMemHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phOSMemHandle, IMG_HANDLE hMHandle);
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/bridged/sgx/bridged_sgx_bridge.c b/drivers/gpu/drm/mrst/pvr/services4/srvkm/bridged/sgx/bridged_sgx_bridge.c
+new file mode 100644
+index 0000000..be7e23d
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/bridged/sgx/bridged_sgx_bridge.c
+@@ -0,0 +1,2511 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++
++
++#include <stddef.h>
++
++#include "img_defs.h"
++
++#if defined(SUPPORT_SGX)
++
++#include "services.h"
++#include "pvr_debug.h"
++#include "pvr_bridge.h"
++#include "sgx_bridge.h"
++#include "perproc.h"
++#include "power.h"
++#include "pvr_bridge_km.h"
++#include "sgx_bridge_km.h"
++
++#if defined(SUPPORT_MSVDX)
++ #include "msvdx_bridge.h"
++#endif
++
++#include "bridged_pvr_bridge.h"
++#include "bridged_sgx_bridge.h"
++#include "sgxutils.h"
++#include "pdump_km.h"
++
++static IMG_INT
++SGXGetClientInfoBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_GETCLIENTINFO *psGetClientInfoIN,
++ PVRSRV_BRIDGE_OUT_GETCLIENTINFO *psGetClientInfoOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hDevCookieInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_GETCLIENTINFO);
++
++ psGetClientInfoOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
++ psGetClientInfoIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++ if(psGetClientInfoOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psGetClientInfoOUT->eError =
++ SGXGetClientInfoKM(hDevCookieInt,
++ &psGetClientInfoOUT->sClientInfo);
++ return 0;
++}
++
++static IMG_INT
++SGXReleaseClientInfoBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_RELEASECLIENTINFO *psReleaseClientInfoIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_SGXDEV_INFO *psDevInfo;
++ IMG_HANDLE hDevCookieInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_RELEASECLIENTINFO);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
++ psReleaseClientInfoIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psDevInfo = (PVRSRV_SGXDEV_INFO *)((PVRSRV_DEVICE_NODE *)hDevCookieInt)->pvDevice;
++
++ PVR_ASSERT(psDevInfo->ui32ClientRefCount > 0);
++
++ psDevInfo->ui32ClientRefCount--;
++
++ psRetOUT->eError = PVRSRV_OK;
++
++ return 0;
++}
++
++
++static IMG_INT
++SGXGetInternalDevInfoBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_GETINTERNALDEVINFO *psSGXGetInternalDevInfoIN,
++ PVRSRV_BRIDGE_OUT_GETINTERNALDEVINFO *psSGXGetInternalDevInfoOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hDevCookieInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_GETINTERNALDEVINFO);
++
++ psSGXGetInternalDevInfoOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
++ psSGXGetInternalDevInfoIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++ if(psSGXGetInternalDevInfoOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psSGXGetInternalDevInfoOUT->eError =
++ SGXGetInternalDevInfoKM(hDevCookieInt,
++ &psSGXGetInternalDevInfoOUT->sSGXInternalDevInfo);
++
++
++ psSGXGetInternalDevInfoOUT->eError =
++ PVRSRVAllocHandle(psPerProc->psHandleBase,
++ &psSGXGetInternalDevInfoOUT->sSGXInternalDevInfo.hHostCtlKernelMemInfoHandle,
++ psSGXGetInternalDevInfoOUT->sSGXInternalDevInfo.hHostCtlKernelMemInfoHandle,
++ PVRSRV_HANDLE_TYPE_MEM_INFO,
++ PVRSRV_HANDLE_ALLOC_FLAG_SHARED);
++
++ return 0;
++}
++
++
++static IMG_INT
++SGXDoKickBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_DOKICK *psDoKickIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hDevCookieInt;
++ IMG_UINT32 i;
++ IMG_INT ret = 0;
++ IMG_UINT32 ui32NumDstSyncs;
++ IMG_HANDLE *phKernelSyncInfoHandles = IMG_NULL;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_DOKICK);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDevCookieInt,
++ psDoKickIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psDoKickIN->sCCBKick.hCCBKernelMemInfo,
++ psDoKickIN->sCCBKick.hCCBKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ if(psDoKickIN->sCCBKick.hTA3DSyncInfo != IMG_NULL)
++ {
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psDoKickIN->sCCBKick.hTA3DSyncInfo,
++ psDoKickIN->sCCBKick.hTA3DSyncInfo,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++
++ if(psDoKickIN->sCCBKick.hTASyncInfo != IMG_NULL)
++ {
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psDoKickIN->sCCBKick.hTASyncInfo,
++ psDoKickIN->sCCBKick.hTASyncInfo,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++
++ if(psDoKickIN->sCCBKick.h3DSyncInfo != IMG_NULL)
++ {
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psDoKickIN->sCCBKick.h3DSyncInfo,
++ psDoKickIN->sCCBKick.h3DSyncInfo,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++
++
++#if defined(SUPPORT_SGX_GENERALISED_SYNCOBJECTS)
++
++ if (psDoKickIN->sCCBKick.ui32NumTASrcSyncs > SGX_MAX_TA_SRC_SYNCS)
++ {
++ psRetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
++ return 0;
++ }
++
++ for(i=0; i<psDoKickIN->sCCBKick.ui32NumTASrcSyncs; i++)
++ {
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psDoKickIN->sCCBKick.ahTASrcKernelSyncInfo[i],
++ psDoKickIN->sCCBKick.ahTASrcKernelSyncInfo[i],
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++
++ if (psDoKickIN->sCCBKick.ui32NumTADstSyncs > SGX_MAX_TA_DST_SYNCS)
++ {
++ psRetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
++ return 0;
++ }
++
++ for(i=0; i<psDoKickIN->sCCBKick.ui32NumTADstSyncs; i++)
++ {
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psDoKickIN->sCCBKick.ahTADstKernelSyncInfo[i],
++ psDoKickIN->sCCBKick.ahTADstKernelSyncInfo[i],
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++
++ if (psDoKickIN->sCCBKick.ui32Num3DSrcSyncs > SGX_MAX_3D_SRC_SYNCS)
++ {
++ psRetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
++ return 0;
++ }
++
++ for(i=0; i<psDoKickIN->sCCBKick.ui32Num3DSrcSyncs; i++)
++ {
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psDoKickIN->sCCBKick.ah3DSrcKernelSyncInfo[i],
++ psDoKickIN->sCCBKick.ah3DSrcKernelSyncInfo[i],
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++#else
++
++ if (psDoKickIN->sCCBKick.ui32NumSrcSyncs > SGX_MAX_SRC_SYNCS)
++ {
++ psRetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
++ return 0;
++ }
++ for(i=0; i<psDoKickIN->sCCBKick.ui32NumSrcSyncs; i++)
++ {
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psDoKickIN->sCCBKick.ahSrcKernelSyncInfo[i],
++ psDoKickIN->sCCBKick.ahSrcKernelSyncInfo[i],
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++#endif
++
++ if (psDoKickIN->sCCBKick.ui32NumTAStatusVals > SGX_MAX_TA_STATUS_VALS)
++ {
++ psRetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
++ return 0;
++ }
++ for (i = 0; i < psDoKickIN->sCCBKick.ui32NumTAStatusVals; i++)
++ {
++ psRetOUT->eError =
++#if defined(SUPPORT_SGX_NEW_STATUS_VALS)
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psDoKickIN->sCCBKick.asTAStatusUpdate[i].hKernelMemInfo,
++ psDoKickIN->sCCBKick.asTAStatusUpdate[i].hKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++#else
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psDoKickIN->sCCBKick.ahTAStatusSyncInfo[i],
++ psDoKickIN->sCCBKick.ahTAStatusSyncInfo[i],
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++#endif
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++
++ if (psDoKickIN->sCCBKick.ui32Num3DStatusVals > SGX_MAX_3D_STATUS_VALS)
++ {
++ psRetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
++ return 0;
++ }
++ for(i = 0; i < psDoKickIN->sCCBKick.ui32Num3DStatusVals; i++)
++ {
++ psRetOUT->eError =
++#if defined(SUPPORT_SGX_NEW_STATUS_VALS)
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psDoKickIN->sCCBKick.as3DStatusUpdate[i].hKernelMemInfo,
++ psDoKickIN->sCCBKick.as3DStatusUpdate[i].hKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++#else
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psDoKickIN->sCCBKick.ah3DStatusSyncInfo[i],
++ psDoKickIN->sCCBKick.ah3DStatusSyncInfo[i],
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++#endif
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++
++ ui32NumDstSyncs = psDoKickIN->sCCBKick.ui32NumDstSyncObjects;
++
++ if(ui32NumDstSyncs > 0)
++ {
++ if(!OSAccessOK(PVR_VERIFY_READ,
++ psDoKickIN->sCCBKick.pahDstSyncHandles,
++ ui32NumDstSyncs * sizeof(IMG_HANDLE)))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: SGXDoKickBW:"
++ " Invalid pasDstSyncHandles pointer", __FUNCTION__));
++ return -EFAULT;
++ }
++
++ psRetOUT->eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32NumDstSyncs * sizeof(IMG_HANDLE),
++ (IMG_VOID **)&phKernelSyncInfoHandles,
++ 0,
++ "Array of Synchronization Info Handles");
++ if (psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ if(CopyFromUserWrapper(psPerProc,
++ ui32BridgeID,
++ phKernelSyncInfoHandles,
++ psDoKickIN->sCCBKick.pahDstSyncHandles,
++ ui32NumDstSyncs * sizeof(IMG_HANDLE)) != PVRSRV_OK)
++ {
++ ret = -EFAULT;
++ goto PVRSRV_BRIDGE_SGX_DOKICK_RETURN_RESULT;
++ }
++
++
++ psDoKickIN->sCCBKick.pahDstSyncHandles = phKernelSyncInfoHandles;
++
++ for( i = 0; i < ui32NumDstSyncs; i++)
++ {
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psDoKickIN->sCCBKick.pahDstSyncHandles[i],
++ psDoKickIN->sCCBKick.pahDstSyncHandles[i],
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ goto PVRSRV_BRIDGE_SGX_DOKICK_RETURN_RESULT;
++ }
++
++ }
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psDoKickIN->sCCBKick.hKernelHWSyncListMemInfo,
++ psDoKickIN->sCCBKick.hKernelHWSyncListMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ goto PVRSRV_BRIDGE_SGX_DOKICK_RETURN_RESULT;
++ }
++ }
++
++ psRetOUT->eError =
++ SGXDoKickKM(hDevCookieInt,
++ &psDoKickIN->sCCBKick);
++
++PVRSRV_BRIDGE_SGX_DOKICK_RETURN_RESULT:
++
++ if(phKernelSyncInfoHandles)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32NumDstSyncs * sizeof(IMG_HANDLE),
++ (IMG_VOID *)phKernelSyncInfoHandles,
++ 0);
++
++ }
++
++ return ret;
++}
++
++
++static IMG_INT
++SGXScheduleProcessQueuesBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SGX_SCHEDULE_PROCESS_QUEUES *psScheduleProcQIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hDevCookieInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_SCHEDULE_PROCESS_QUEUES);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDevCookieInt,
++ psScheduleProcQIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError = SGXScheduleProcessQueuesKM(hDevCookieInt);
++
++ return 0;
++}
++
++
++#if defined(TRANSFER_QUEUE)
++static IMG_INT
++SGXSubmitTransferBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SUBMITTRANSFER *psSubmitTransferIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hDevCookieInt;
++ PVRSRV_TRANSFER_SGX_KICK *psKick;
++ IMG_UINT32 i;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_SUBMITTRANSFER);
++ PVR_UNREFERENCED_PARAMETER(ui32BridgeID);
++
++ psKick = &psSubmitTransferIN->sKick;
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDevCookieInt,
++ psSubmitTransferIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psKick->hCCBMemInfo,
++ psKick->hCCBMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ if (psKick->hTASyncInfo != IMG_NULL)
++ {
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psKick->hTASyncInfo,
++ psKick->hTASyncInfo,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++
++ if (psKick->h3DSyncInfo != IMG_NULL)
++ {
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psKick->h3DSyncInfo,
++ psKick->h3DSyncInfo,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++
++ if (psKick->ui32NumSrcSync > SGX_MAX_TRANSFER_SYNC_OPS)
++ {
++ psRetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
++ return 0;
++ }
++ for (i = 0; i < psKick->ui32NumSrcSync; i++)
++ {
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psKick->ahSrcSyncInfo[i],
++ psKick->ahSrcSyncInfo[i],
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++
++ if (psKick->ui32NumDstSync > SGX_MAX_TRANSFER_SYNC_OPS)
++ {
++ psRetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
++ return 0;
++ }
++ for (i = 0; i < psKick->ui32NumDstSync; i++)
++ {
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psKick->ahDstSyncInfo[i],
++ psKick->ahDstSyncInfo[i],
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++
++ psRetOUT->eError = SGXSubmitTransferKM(hDevCookieInt, psKick);
++
++ return 0;
++}
++
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++static IMG_INT
++SGXSubmit2DBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SUBMIT2D *psSubmit2DIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hDevCookieInt;
++ PVRSRV_2D_SGX_KICK *psKick;
++ IMG_UINT32 i;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_SUBMIT2D);
++ PVR_UNREFERENCED_PARAMETER(ui32BridgeID);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDevCookieInt,
++ psSubmit2DIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psKick = &psSubmit2DIN->sKick;
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psKick->hCCBMemInfo,
++ psKick->hCCBMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ if (psKick->hTASyncInfo != IMG_NULL)
++ {
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psKick->hTASyncInfo,
++ psKick->hTASyncInfo,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++
++ if (psKick->h3DSyncInfo != IMG_NULL)
++ {
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psKick->h3DSyncInfo,
++ psKick->h3DSyncInfo,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++
++ if (psKick->ui32NumSrcSync > SGX_MAX_2D_SRC_SYNC_OPS)
++ {
++ psRetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
++ return 0;
++ }
++ for (i = 0; i < psKick->ui32NumSrcSync; i++)
++ {
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psKick->ahSrcSyncInfo[i],
++ psKick->ahSrcSyncInfo[i],
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++
++ if (psKick->hDstSyncInfo != IMG_NULL)
++ {
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psKick->hDstSyncInfo,
++ psKick->hDstSyncInfo,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++
++ psRetOUT->eError =
++ SGXSubmit2DKM(hDevCookieInt, psKick);
++
++ return 0;
++}
++#endif
++#endif
++
++
++static IMG_INT
++SGXGetMiscInfoBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SGXGETMISCINFO *psSGXGetMiscInfoIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hDevCookieInt;
++ IMG_HANDLE hDevMemContextInt = 0;
++ PVRSRV_SGXDEV_INFO *psDevInfo;
++ SGX_MISC_INFO sMiscInfo;
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_SGX_GETMISCINFO);
++
++ psRetOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDevCookieInt,
++ psSGXGetMiscInfoIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++#if defined(SUPPORT_SGX_EDM_MEMORY_DEBUG)
++
++ if (psSGXGetMiscInfoIN->psMiscInfo->eRequest == SGX_MISC_INFO_REQUEST_MEMREAD)
++ {
++ psRetOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDevMemContextInt,
++ psSGXGetMiscInfoIN->psMiscInfo->hDevMemContext,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++#endif
++
++ psDeviceNode = hDevCookieInt;
++ PVR_ASSERT(psDeviceNode != IMG_NULL);
++ if (psDeviceNode == IMG_NULL)
++ {
++ return -EFAULT;
++ }
++
++ psDevInfo = psDeviceNode->pvDevice;
++
++
++ psRetOUT->eError = CopyFromUserWrapper(psPerProc,
++ ui32BridgeID,
++ &sMiscInfo,
++ psSGXGetMiscInfoIN->psMiscInfo,
++ sizeof(SGX_MISC_INFO));
++ if (psRetOUT->eError != PVRSRV_OK)
++ {
++ return -EFAULT;
++ }
++
++#ifdef SUPPORT_SGX_HWPERF
++ if (sMiscInfo.eRequest == SGX_MISC_INFO_REQUEST_HWPERF_RETRIEVE_CB)
++ {
++
++ IMG_VOID * pAllocated;
++ IMG_HANDLE hAllocatedHandle;
++ IMG_VOID * psTmpUserData;
++ IMG_UINT32 allocatedSize;
++
++ allocatedSize = (IMG_UINT32)(sMiscInfo.uData.sRetrieveCB.ui32ArraySize * sizeof(PVRSRV_SGX_HWPERF_CBDATA));
++
++ ASSIGN_AND_EXIT_ON_ERROR(psRetOUT->eError,
++ OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ allocatedSize,
++ &pAllocated,
++ &hAllocatedHandle,
++ "Array of Hardware Performance Circular Buffer Data"));
++
++
++ psTmpUserData = sMiscInfo.uData.sRetrieveCB.psHWPerfData;
++ sMiscInfo.uData.sRetrieveCB.psHWPerfData = pAllocated;
++
++ psRetOUT->eError = SGXGetMiscInfoKM(psDevInfo, &sMiscInfo, psDeviceNode, 0);
++ if (psRetOUT->eError != PVRSRV_OK)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ allocatedSize,
++ pAllocated,
++ hAllocatedHandle);
++
++ return 0;
++ }
++
++
++ psRetOUT->eError = CopyToUserWrapper(psPerProc,
++ ui32BridgeID,
++ psTmpUserData,
++ sMiscInfo.uData.sRetrieveCB.psHWPerfData,
++ allocatedSize);
++
++ sMiscInfo.uData.sRetrieveCB.psHWPerfData = psTmpUserData;
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ allocatedSize,
++ pAllocated,
++ hAllocatedHandle);
++
++ if (psRetOUT->eError != PVRSRV_OK)
++ {
++ return -EFAULT;
++ }
++ }
++ else
++#endif
++ {
++ psRetOUT->eError = SGXGetMiscInfoKM(psDevInfo, &sMiscInfo, psDeviceNode, hDevMemContextInt);
++
++ if (psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++
++
++ psRetOUT->eError = CopyToUserWrapper(psPerProc,
++ ui32BridgeID,
++ psSGXGetMiscInfoIN->psMiscInfo,
++ &sMiscInfo,
++ sizeof(SGX_MISC_INFO));
++ if (psRetOUT->eError != PVRSRV_OK)
++ {
++ return -EFAULT;
++ }
++ return 0;
++}
++
++
++#if defined(SUPPORT_SGX_HWPERF)
++static IMG_INT
++SGXReadDiffCountersBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SGX_READ_DIFF_COUNTERS *psSGXReadDiffCountersIN,
++ PVRSRV_BRIDGE_OUT_SGX_READ_DIFF_COUNTERS *psSGXReadDiffCountersOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hDevCookieInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_READ_DIFF_COUNTERS);
++
++ psSGXReadDiffCountersOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDevCookieInt,
++ psSGXReadDiffCountersIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++
++ if(psSGXReadDiffCountersOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psSGXReadDiffCountersOUT->eError = SGXReadDiffCountersKM(hDevCookieInt,
++ psSGXReadDiffCountersIN->ui32Reg,
++ &psSGXReadDiffCountersOUT->ui32Old,
++ psSGXReadDiffCountersIN->bNew,
++ psSGXReadDiffCountersIN->ui32New,
++ psSGXReadDiffCountersIN->ui32NewReset,
++ psSGXReadDiffCountersIN->ui32CountersReg,
++ psSGXReadDiffCountersIN->ui32Reg2,
++ &psSGXReadDiffCountersOUT->bActive,
++ &psSGXReadDiffCountersOUT->sDiffs);
++
++ return 0;
++}
++
++
++static IMG_INT
++SGXReadHWPerfCBBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SGX_READ_HWPERF_CB *psSGXReadHWPerfCBIN,
++ PVRSRV_BRIDGE_OUT_SGX_READ_HWPERF_CB *psSGXReadHWPerfCBOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hDevCookieInt;
++ PVRSRV_SGX_HWPERF_CB_ENTRY *psAllocated;
++ IMG_HANDLE hAllocatedHandle;
++ IMG_UINT32 ui32AllocatedSize;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_READ_HWPERF_CB);
++
++ psSGXReadHWPerfCBOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDevCookieInt,
++ psSGXReadHWPerfCBIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++
++ if(psSGXReadHWPerfCBOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ ui32AllocatedSize = psSGXReadHWPerfCBIN->ui32ArraySize *
++ sizeof(psSGXReadHWPerfCBIN->psHWPerfCBData[0]);
++ ASSIGN_AND_EXIT_ON_ERROR(psSGXReadHWPerfCBOUT->eError,
++ OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32AllocatedSize,
++ (IMG_VOID **)&psAllocated,
++ &hAllocatedHandle,
++ "Array of Hardware Performance Circular Buffer Data"));
++
++ psSGXReadHWPerfCBOUT->eError = SGXReadHWPerfCBKM(hDevCookieInt,
++ psSGXReadHWPerfCBIN->ui32ArraySize,
++ psAllocated,
++ &psSGXReadHWPerfCBOUT->ui32DataCount,
++ &psSGXReadHWPerfCBOUT->ui32ClockSpeed,
++ &psSGXReadHWPerfCBOUT->ui32HostTimeStamp);
++ if (psSGXReadHWPerfCBOUT->eError == PVRSRV_OK)
++ {
++ psSGXReadHWPerfCBOUT->eError = CopyToUserWrapper(psPerProc,
++ ui32BridgeID,
++ psSGXReadHWPerfCBIN->psHWPerfCBData,
++ psAllocated,
++ ui32AllocatedSize);
++ }
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32AllocatedSize,
++ psAllocated,
++ hAllocatedHandle);
++
++
++ return 0;
++}
++#endif
++
++
++static IMG_INT
++SGXDevInitPart2BW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SGXDEVINITPART2 *psSGXDevInitPart2IN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hDevCookieInt;
++ PVRSRV_ERROR eError;
++ IMG_BOOL bDissociateFailed = IMG_FALSE;
++ IMG_BOOL bLookupFailed = IMG_FALSE;
++ IMG_BOOL bReleaseFailed = IMG_FALSE;
++ IMG_HANDLE hDummy;
++ IMG_UINT32 i;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_DEVINITPART2);
++
++ if(!psPerProc->bInitProcess)
++ {
++ psRetOUT->eError = PVRSRV_ERROR_GENERIC;
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDevCookieInt,
++ psSGXDevInitPart2IN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDummy,
++ psSGXDevInitPart2IN->sInitInfo.hKernelCCBMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bLookupFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDummy,
++ psSGXDevInitPart2IN->sInitInfo.hKernelCCBCtlMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bLookupFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDummy,
++ psSGXDevInitPart2IN->sInitInfo.hKernelCCBEventKickerMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bLookupFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDummy,
++ psSGXDevInitPart2IN->sInitInfo.hKernelSGXHostCtlMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bLookupFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDummy,
++ psSGXDevInitPart2IN->sInitInfo.hKernelSGXTA3DCtlMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bLookupFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++
++
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDummy,
++ psSGXDevInitPart2IN->sInitInfo.hKernelSGXMiscMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bLookupFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++
++#if defined(SGX_SUPPORT_HWPROFILING)
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDummy,
++ psSGXDevInitPart2IN->sInitInfo.hKernelHWProfilingMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bLookupFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++#endif
++
++#if defined(SUPPORT_SGX_HWPERF)
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDummy,
++ psSGXDevInitPart2IN->sInitInfo.hKernelHWPerfCBMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bLookupFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++#endif
++
++#if defined(PVRSRV_USSE_EDM_STATUS_DEBUG)
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDummy,
++ psSGXDevInitPart2IN->sInitInfo.hKernelEDMStatusBufferMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bLookupFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++#endif
++
++#if defined(SGX_FEATURE_SPM_MODE_0)
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDummy,
++ psSGXDevInitPart2IN->sInitInfo.hKernelTmpDPMStateMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bLookupFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++#endif
++
++ for (i = 0; i < SGX_MAX_INIT_MEM_HANDLES; i++)
++ {
++ IMG_HANDLE hHandle = psSGXDevInitPart2IN->sInitInfo.asInitMemHandles[i];
++
++ if (hHandle == IMG_NULL)
++ {
++ continue;
++ }
++
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDummy,
++ hHandle,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bLookupFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++ }
++
++ if (bLookupFailed)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "DevInitSGXPart2BW: A handle lookup failed"));
++ psRetOUT->eError = PVRSRV_ERROR_GENERIC;
++ return 0;
++ }
++
++
++ eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
++ &psSGXDevInitPart2IN->sInitInfo.hKernelCCBMemInfo,
++ psSGXDevInitPart2IN->sInitInfo.hKernelCCBMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bReleaseFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++
++ eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
++ &psSGXDevInitPart2IN->sInitInfo.hKernelCCBCtlMemInfo,
++ psSGXDevInitPart2IN->sInitInfo.hKernelCCBCtlMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bReleaseFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++
++ eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
++ &psSGXDevInitPart2IN->sInitInfo.hKernelCCBEventKickerMemInfo,
++ psSGXDevInitPart2IN->sInitInfo.hKernelCCBEventKickerMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bReleaseFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++
++
++ eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
++ &psSGXDevInitPart2IN->sInitInfo.hKernelSGXHostCtlMemInfo,
++ psSGXDevInitPart2IN->sInitInfo.hKernelSGXHostCtlMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bReleaseFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++
++ eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
++ &psSGXDevInitPart2IN->sInitInfo.hKernelSGXTA3DCtlMemInfo,
++ psSGXDevInitPart2IN->sInitInfo.hKernelSGXTA3DCtlMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bReleaseFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++
++ eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
++ &psSGXDevInitPart2IN->sInitInfo.hKernelSGXMiscMemInfo,
++ psSGXDevInitPart2IN->sInitInfo.hKernelSGXMiscMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bReleaseFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++
++
++ #if defined(SGX_SUPPORT_HWPROFILING)
++ eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
++ &psSGXDevInitPart2IN->sInitInfo.hKernelHWProfilingMemInfo,
++ psSGXDevInitPart2IN->sInitInfo.hKernelHWProfilingMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bReleaseFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++#endif
++
++#if defined(SUPPORT_SGX_HWPERF)
++ eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
++ &psSGXDevInitPart2IN->sInitInfo.hKernelHWPerfCBMemInfo,
++ psSGXDevInitPart2IN->sInitInfo.hKernelHWPerfCBMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bReleaseFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++#endif
++
++#if defined(PVRSRV_USSE_EDM_STATUS_DEBUG)
++ eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
++ &psSGXDevInitPart2IN->sInitInfo.hKernelEDMStatusBufferMemInfo,
++ psSGXDevInitPart2IN->sInitInfo.hKernelEDMStatusBufferMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bReleaseFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++#endif
++
++#if defined(SGX_FEATURE_SPM_MODE_0)
++ eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
++ &psSGXDevInitPart2IN->sInitInfo.hKernelTmpDPMStateMemInfo,
++ psSGXDevInitPart2IN->sInitInfo.hKernelTmpDPMStateMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bReleaseFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++#endif
++
++
++ for (i = 0; i < SGX_MAX_INIT_MEM_HANDLES; i++)
++ {
++ IMG_HANDLE *phHandle = &psSGXDevInitPart2IN->sInitInfo.asInitMemHandles[i];
++
++ if (*phHandle == IMG_NULL)
++ continue;
++
++ eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
++ phHandle,
++ *phHandle,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bReleaseFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++ }
++
++ if (bReleaseFailed)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "DevInitSGXPart2BW: A handle release failed"));
++ psRetOUT->eError = PVRSRV_ERROR_GENERIC;
++
++ PVR_DBG_BREAK;
++ return 0;
++ }
++
++
++ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelCCBMemInfo);
++ bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++
++ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelCCBCtlMemInfo);
++ bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++
++ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelCCBEventKickerMemInfo);
++ bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++
++ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelSGXHostCtlMemInfo);
++ bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++
++ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelSGXTA3DCtlMemInfo);
++ bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++
++
++ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelSGXMiscMemInfo);
++ bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++
++
++#if defined(SGX_SUPPORT_HWPROFILING)
++ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelHWProfilingMemInfo);
++ bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++#endif
++
++#if defined(SUPPORT_SGX_HWPERF)
++ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelHWPerfCBMemInfo);
++ bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++#endif
++
++#if defined(PVRSRV_USSE_EDM_STATUS_DEBUG)
++ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelEDMStatusBufferMemInfo);
++ bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++#endif
++
++#if defined(SGX_FEATURE_SPM_MODE_0)
++ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelTmpDPMStateMemInfo);
++ bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++#endif
++
++ for (i = 0; i < SGX_MAX_INIT_MEM_HANDLES; i++)
++ {
++ IMG_HANDLE hHandle = psSGXDevInitPart2IN->sInitInfo.asInitMemHandles[i];
++
++ if (hHandle == IMG_NULL)
++ continue;
++
++ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, hHandle);
++ bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++ }
++
++
++ if(bDissociateFailed)
++ {
++ PVRSRVFreeDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelCCBMemInfo);
++ PVRSRVFreeDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelCCBCtlMemInfo);
++ PVRSRVFreeDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelSGXHostCtlMemInfo);
++ PVRSRVFreeDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelSGXTA3DCtlMemInfo);
++ PVRSRVFreeDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelSGXMiscMemInfo);
++
++ for (i = 0; i < SGX_MAX_INIT_MEM_HANDLES; i++)
++ {
++ IMG_HANDLE hHandle = psSGXDevInitPart2IN->sInitInfo.asInitMemHandles[i];
++
++ if (hHandle == IMG_NULL)
++ continue;
++
++ PVRSRVFreeDeviceMemKM(hDevCookieInt, (PVRSRV_KERNEL_MEM_INFO *)hHandle);
++
++ }
++
++ PVR_DPF((PVR_DBG_ERROR, "DevInitSGXPart2BW: A dissociate failed"));
++
++ psRetOUT->eError = PVRSRV_ERROR_GENERIC;
++
++
++ PVR_DBG_BREAK;
++ return 0;
++ }
++
++ psRetOUT->eError =
++ DevInitSGXPart2KM(psPerProc,
++ hDevCookieInt,
++ &psSGXDevInitPart2IN->sInitInfo);
++
++ return 0;
++}
++
++
++static IMG_INT
++SGXRegisterHWRenderContextBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_RENDER_CONTEXT *psSGXRegHWRenderContextIN,
++ PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_RENDER_CONTEXT *psSGXRegHWRenderContextOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hDevCookieInt;
++ IMG_HANDLE hHWRenderContextInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_REGISTER_HW_RENDER_CONTEXT);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psSGXRegHWRenderContextOUT->eError, psPerProc, 1);
++
++ psSGXRegHWRenderContextOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDevCookieInt,
++ psSGXRegHWRenderContextIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++ if(psSGXRegHWRenderContextOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ hHWRenderContextInt =
++ SGXRegisterHWRenderContextKM(hDevCookieInt,
++ &psSGXRegHWRenderContextIN->sHWRenderContextDevVAddr,
++ psPerProc);
++
++ if (hHWRenderContextInt == IMG_NULL)
++ {
++ psSGXRegHWRenderContextOUT->eError = PVRSRV_ERROR_GENERIC;
++ return 0;
++ }
++
++ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
++ &psSGXRegHWRenderContextOUT->hHWRenderContext,
++ hHWRenderContextInt,
++ PVRSRV_HANDLE_TYPE_SGX_HW_RENDER_CONTEXT,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psSGXRegHWRenderContextOUT->eError, psPerProc);
++
++ return 0;
++}
++
++
++static IMG_INT
++SGXUnregisterHWRenderContextBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_RENDER_CONTEXT *psSGXUnregHWRenderContextIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hHWRenderContextInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_UNREGISTER_HW_RENDER_CONTEXT);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hHWRenderContextInt,
++ psSGXUnregHWRenderContextIN->hHWRenderContext,
++ PVRSRV_HANDLE_TYPE_SGX_HW_RENDER_CONTEXT);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError = SGXUnregisterHWRenderContextKM(hHWRenderContextInt);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVReleaseHandle(psPerProc->psHandleBase,
++ psSGXUnregHWRenderContextIN->hHWRenderContext,
++ PVRSRV_HANDLE_TYPE_SGX_HW_RENDER_CONTEXT);
++
++ return 0;
++}
++
++
++static IMG_INT
++SGXRegisterHWTransferContextBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_TRANSFER_CONTEXT *psSGXRegHWTransferContextIN,
++ PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_TRANSFER_CONTEXT *psSGXRegHWTransferContextOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hDevCookieInt;
++ IMG_HANDLE hHWTransferContextInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_REGISTER_HW_TRANSFER_CONTEXT);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psSGXRegHWTransferContextOUT->eError, psPerProc, 1);
++
++ psSGXRegHWTransferContextOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDevCookieInt,
++ psSGXRegHWTransferContextIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++ if(psSGXRegHWTransferContextOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ hHWTransferContextInt =
++ SGXRegisterHWTransferContextKM(hDevCookieInt,
++ &psSGXRegHWTransferContextIN->sHWTransferContextDevVAddr,
++ psPerProc);
++
++ if (hHWTransferContextInt == IMG_NULL)
++ {
++ psSGXRegHWTransferContextOUT->eError = PVRSRV_ERROR_GENERIC;
++ return 0;
++ }
++
++ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
++ &psSGXRegHWTransferContextOUT->hHWTransferContext,
++ hHWTransferContextInt,
++ PVRSRV_HANDLE_TYPE_SGX_HW_TRANSFER_CONTEXT,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psSGXRegHWTransferContextOUT->eError, psPerProc);
++
++ return 0;
++}
++
++
++static IMG_INT
++SGXUnregisterHWTransferContextBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_TRANSFER_CONTEXT *psSGXUnregHWTransferContextIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hHWTransferContextInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_UNREGISTER_HW_TRANSFER_CONTEXT);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hHWTransferContextInt,
++ psSGXUnregHWTransferContextIN->hHWTransferContext,
++ PVRSRV_HANDLE_TYPE_SGX_HW_TRANSFER_CONTEXT);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError = SGXUnregisterHWTransferContextKM(hHWTransferContextInt);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVReleaseHandle(psPerProc->psHandleBase,
++ psSGXUnregHWTransferContextIN->hHWTransferContext,
++ PVRSRV_HANDLE_TYPE_SGX_HW_TRANSFER_CONTEXT);
++
++ return 0;
++}
++
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++static IMG_INT
++SGXRegisterHW2DContextBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_2D_CONTEXT *psSGXRegHW2DContextIN,
++ PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_2D_CONTEXT *psSGXRegHW2DContextOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hDevCookieInt;
++ IMG_HANDLE hHW2DContextInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_REGISTER_HW_2D_CONTEXT);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psSGXRegHW2DContextOUT->eError, psPerProc, 1);
++
++ psSGXRegHW2DContextOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDevCookieInt,
++ psSGXRegHW2DContextIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++ if(psSGXRegHW2DContextOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ hHW2DContextInt =
++ SGXRegisterHW2DContextKM(hDevCookieInt,
++ &psSGXRegHW2DContextIN->sHW2DContextDevVAddr,
++ psPerProc);
++
++ if (hHW2DContextInt == IMG_NULL)
++ {
++ psSGXRegHW2DContextOUT->eError = PVRSRV_ERROR_GENERIC;
++ return 0;
++ }
++
++ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
++ &psSGXRegHW2DContextOUT->hHW2DContext,
++ hHW2DContextInt,
++ PVRSRV_HANDLE_TYPE_SGX_HW_2D_CONTEXT,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psSGXRegHW2DContextOUT->eError, psPerProc);
++
++ return 0;
++}
++
++
++static IMG_INT
++SGXUnregisterHW2DContextBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_2D_CONTEXT *psSGXUnregHW2DContextIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hHW2DContextInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_UNREGISTER_HW_2D_CONTEXT);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hHW2DContextInt,
++ psSGXUnregHW2DContextIN->hHW2DContext,
++ PVRSRV_HANDLE_TYPE_SGX_HW_2D_CONTEXT);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError = SGXUnregisterHW2DContextKM(hHW2DContextInt);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVReleaseHandle(psPerProc->psHandleBase,
++ psSGXUnregHW2DContextIN->hHW2DContext,
++ PVRSRV_HANDLE_TYPE_SGX_HW_2D_CONTEXT);
++
++ return 0;
++}
++#endif
++
++static IMG_INT
++SGXFlushHWRenderTargetBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SGX_FLUSH_HW_RENDER_TARGET *psSGXFlushHWRenderTargetIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hDevCookieInt;
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_FLUSH_HW_RENDER_TARGET);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDevCookieInt,
++ psSGXFlushHWRenderTargetIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ SGXFlushHWRenderTargetKM(hDevCookieInt, psSGXFlushHWRenderTargetIN->sHWRTDataSetDevVAddr);
++
++ return 0;
++}
++
++
++static IMG_INT
++SGX2DQueryBlitsCompleteBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_2DQUERYBLTSCOMPLETE *ps2DQueryBltsCompleteIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hDevCookieInt;
++ IMG_VOID *pvSyncInfo;
++ PVRSRV_SGXDEV_INFO *psDevInfo;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_2DQUERYBLTSCOMPLETE);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
++ ps2DQueryBltsCompleteIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &pvSyncInfo,
++ ps2DQueryBltsCompleteIN->hKernSyncInfo,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psDevInfo = (PVRSRV_SGXDEV_INFO *)((PVRSRV_DEVICE_NODE *)hDevCookieInt)->pvDevice;
++
++ psRetOUT->eError =
++ SGX2DQueryBlitsCompleteKM(psDevInfo,
++ (PVRSRV_KERNEL_SYNC_INFO *)pvSyncInfo,
++ ps2DQueryBltsCompleteIN->bWaitForComplete);
++
++ return 0;
++}
++
++
++static IMG_INT
++SGXFindSharedPBDescBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SGXFINDSHAREDPBDESC *psSGXFindSharedPBDescIN,
++ PVRSRV_BRIDGE_OUT_SGXFINDSHAREDPBDESC *psSGXFindSharedPBDescOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hDevCookieInt;
++ PVRSRV_KERNEL_MEM_INFO *psSharedPBDescKernelMemInfo;
++ PVRSRV_KERNEL_MEM_INFO *psHWPBDescKernelMemInfo;
++ PVRSRV_KERNEL_MEM_INFO *psBlockKernelMemInfo;
++ PVRSRV_KERNEL_MEM_INFO *psHWBlockKernelMemInfo;
++ PVRSRV_KERNEL_MEM_INFO **ppsSharedPBDescSubKernelMemInfos = IMG_NULL;
++ IMG_UINT32 ui32SharedPBDescSubKernelMemInfosCount = 0;
++ IMG_UINT32 i;
++ IMG_HANDLE hSharedPBDesc = IMG_NULL;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_FINDSHAREDPBDESC);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psSGXFindSharedPBDescOUT->eError, psPerProc, PVRSRV_BRIDGE_SGX_SHAREDPBDESC_MAX_SUBMEMINFOS + 4);
++
++ psSGXFindSharedPBDescOUT->hSharedPBDesc = IMG_NULL;
++
++ psSGXFindSharedPBDescOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDevCookieInt,
++ psSGXFindSharedPBDescIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++ if(psSGXFindSharedPBDescOUT->eError != PVRSRV_OK)
++ goto PVRSRV_BRIDGE_SGX_FINDSHAREDPBDESC_EXIT;
++
++ psSGXFindSharedPBDescOUT->eError =
++ SGXFindSharedPBDescKM(psPerProc, hDevCookieInt,
++ psSGXFindSharedPBDescIN->bLockOnFailure,
++ psSGXFindSharedPBDescIN->ui32TotalPBSize,
++ &hSharedPBDesc,
++ &psSharedPBDescKernelMemInfo,
++ &psHWPBDescKernelMemInfo,
++ &psBlockKernelMemInfo,
++ &psHWBlockKernelMemInfo,
++ &ppsSharedPBDescSubKernelMemInfos,
++ &ui32SharedPBDescSubKernelMemInfosCount);
++ if(psSGXFindSharedPBDescOUT->eError != PVRSRV_OK)
++ goto PVRSRV_BRIDGE_SGX_FINDSHAREDPBDESC_EXIT;
++
++ PVR_ASSERT(ui32SharedPBDescSubKernelMemInfosCount
++ <= PVRSRV_BRIDGE_SGX_SHAREDPBDESC_MAX_SUBMEMINFOS);
++
++ psSGXFindSharedPBDescOUT->ui32SharedPBDescSubKernelMemInfoHandlesCount =
++ ui32SharedPBDescSubKernelMemInfosCount;
++
++ if(hSharedPBDesc == IMG_NULL)
++ {
++ psSGXFindSharedPBDescOUT->hSharedPBDescKernelMemInfoHandle = 0;
++
++ goto PVRSRV_BRIDGE_SGX_FINDSHAREDPBDESC_EXIT;
++ }
++
++ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
++ &psSGXFindSharedPBDescOUT->hSharedPBDesc,
++ hSharedPBDesc,
++ PVRSRV_HANDLE_TYPE_SHARED_PB_DESC,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++
++
++ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
++ &psSGXFindSharedPBDescOUT->hSharedPBDescKernelMemInfoHandle,
++ psSharedPBDescKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO_REF,
++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
++ psSGXFindSharedPBDescOUT->hSharedPBDesc);
++
++ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
++ &psSGXFindSharedPBDescOUT->hHWPBDescKernelMemInfoHandle,
++ psHWPBDescKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO_REF,
++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
++ psSGXFindSharedPBDescOUT->hSharedPBDesc);
++
++ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
++ &psSGXFindSharedPBDescOUT->hBlockKernelMemInfoHandle,
++ psBlockKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO_REF,
++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
++ psSGXFindSharedPBDescOUT->hSharedPBDesc);
++
++ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
++ &psSGXFindSharedPBDescOUT->hHWBlockKernelMemInfoHandle,
++ psHWBlockKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO_REF,
++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
++ psSGXFindSharedPBDescOUT->hSharedPBDesc);
++
++
++ for(i=0; i<ui32SharedPBDescSubKernelMemInfosCount; i++)
++ {
++ PVRSRV_BRIDGE_OUT_SGXFINDSHAREDPBDESC *psSGXFindSharedPBDescOut =
++ psSGXFindSharedPBDescOUT;
++
++ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
++ &psSGXFindSharedPBDescOut->ahSharedPBDescSubKernelMemInfoHandles[i],
++ ppsSharedPBDescSubKernelMemInfos[i],
++ PVRSRV_HANDLE_TYPE_MEM_INFO_REF,
++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
++ psSGXFindSharedPBDescOUT->hSharedPBDescKernelMemInfoHandle);
++ }
++
++PVRSRV_BRIDGE_SGX_FINDSHAREDPBDESC_EXIT:
++ if (ppsSharedPBDescSubKernelMemInfos != IMG_NULL)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(PVRSRV_KERNEL_MEM_INFO *) * ui32SharedPBDescSubKernelMemInfosCount,
++ ppsSharedPBDescSubKernelMemInfos,
++ IMG_NULL);
++ }
++
++ if(psSGXFindSharedPBDescOUT->eError != PVRSRV_OK)
++ {
++ if(hSharedPBDesc != IMG_NULL)
++ {
++ SGXUnrefSharedPBDescKM(hSharedPBDesc);
++ }
++ }
++ else
++ {
++ COMMIT_HANDLE_BATCH_OR_ERROR(psSGXFindSharedPBDescOUT->eError, psPerProc);
++ }
++
++ return 0;
++}
++
++
++static IMG_INT
++SGXUnrefSharedPBDescBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SGXUNREFSHAREDPBDESC *psSGXUnrefSharedPBDescIN,
++ PVRSRV_BRIDGE_OUT_SGXUNREFSHAREDPBDESC *psSGXUnrefSharedPBDescOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hSharedPBDesc;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_UNREFSHAREDPBDESC);
++
++ psSGXUnrefSharedPBDescOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hSharedPBDesc,
++ psSGXUnrefSharedPBDescIN->hSharedPBDesc,
++ PVRSRV_HANDLE_TYPE_SHARED_PB_DESC);
++ if(psSGXUnrefSharedPBDescOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psSGXUnrefSharedPBDescOUT->eError =
++ SGXUnrefSharedPBDescKM(hSharedPBDesc);
++
++ if(psSGXUnrefSharedPBDescOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psSGXUnrefSharedPBDescOUT->eError =
++ PVRSRVReleaseHandle(psPerProc->psHandleBase,
++ psSGXUnrefSharedPBDescIN->hSharedPBDesc,
++ PVRSRV_HANDLE_TYPE_SHARED_PB_DESC);
++
++ return 0;
++}
++
++
++static IMG_INT
++SGXAddSharedPBDescBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SGXADDSHAREDPBDESC *psSGXAddSharedPBDescIN,
++ PVRSRV_BRIDGE_OUT_SGXADDSHAREDPBDESC *psSGXAddSharedPBDescOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hDevCookieInt;
++ PVRSRV_KERNEL_MEM_INFO *psSharedPBDescKernelMemInfo;
++ PVRSRV_KERNEL_MEM_INFO *psHWPBDescKernelMemInfo;
++ PVRSRV_KERNEL_MEM_INFO *psBlockKernelMemInfo;
++ PVRSRV_KERNEL_MEM_INFO *psHWBlockKernelMemInfo;
++ IMG_UINT32 ui32KernelMemInfoHandlesCount =
++ psSGXAddSharedPBDescIN->ui32KernelMemInfoHandlesCount;
++ IMG_INT ret = 0;
++ IMG_HANDLE *phKernelMemInfoHandles = IMG_NULL;
++ PVRSRV_KERNEL_MEM_INFO **ppsKernelMemInfos = IMG_NULL;
++ IMG_UINT32 i;
++ PVRSRV_ERROR eError;
++ IMG_HANDLE hSharedPBDesc = IMG_NULL;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psSGXAddSharedPBDescOUT->eError, psPerProc, 1);
++
++ psSGXAddSharedPBDescOUT->hSharedPBDesc = IMG_NULL;
++
++ PVR_ASSERT(ui32KernelMemInfoHandlesCount
++ <= PVRSRV_BRIDGE_SGX_SHAREDPBDESC_MAX_SUBMEMINFOS);
++
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDevCookieInt,
++ psSGXAddSharedPBDescIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++ if(eError != PVRSRV_OK)
++ {
++ goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
++ }
++
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ (IMG_VOID **)&psSharedPBDescKernelMemInfo,
++ psSGXAddSharedPBDescIN->hSharedPBDescKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO);
++ if(eError != PVRSRV_OK)
++ {
++ goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
++ }
++
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ (IMG_VOID **)&psHWPBDescKernelMemInfo,
++ psSGXAddSharedPBDescIN->hHWPBDescKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ if(eError != PVRSRV_OK)
++ {
++ goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
++ }
++
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ (IMG_VOID **)&psBlockKernelMemInfo,
++ psSGXAddSharedPBDescIN->hBlockKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO);
++ if(eError != PVRSRV_OK)
++ {
++ goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
++ }
++
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ (IMG_VOID **)&psHWBlockKernelMemInfo,
++ psSGXAddSharedPBDescIN->hHWBlockKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ if(eError != PVRSRV_OK)
++ {
++ goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
++ }
++
++
++ if(!OSAccessOK(PVR_VERIFY_READ,
++ psSGXAddSharedPBDescIN->phKernelMemInfoHandles,
++ ui32KernelMemInfoHandlesCount * sizeof(IMG_HANDLE)))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC:"
++ " Invalid phKernelMemInfos pointer", __FUNCTION__));
++ ret = -EFAULT;
++ goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
++ }
++
++ eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32KernelMemInfoHandlesCount * sizeof(IMG_HANDLE),
++ (IMG_VOID **)&phKernelMemInfoHandles,
++ 0,
++ "Array of Handles");
++ if (eError != PVRSRV_OK)
++ {
++ goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
++ }
++
++ if(CopyFromUserWrapper(psPerProc,
++ ui32BridgeID,
++ phKernelMemInfoHandles,
++ psSGXAddSharedPBDescIN->phKernelMemInfoHandles,
++ ui32KernelMemInfoHandlesCount * sizeof(IMG_HANDLE))
++ != PVRSRV_OK)
++ {
++ ret = -EFAULT;
++ goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
++ }
++
++ eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32KernelMemInfoHandlesCount * sizeof(PVRSRV_KERNEL_MEM_INFO *),
++ (IMG_VOID **)&ppsKernelMemInfos,
++ 0,
++ "Array of pointers to Kernel Memory Info");
++ if (eError != PVRSRV_OK)
++ {
++ goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
++ }
++
++ for(i=0; i<ui32KernelMemInfoHandlesCount; i++)
++ {
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ (IMG_VOID **)&ppsKernelMemInfos[i],
++ phKernelMemInfoHandles[i],
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ if(eError != PVRSRV_OK)
++ {
++ goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
++ }
++ }
++
++
++
++ eError = PVRSRVReleaseHandle(psPerProc->psHandleBase,
++ psSGXAddSharedPBDescIN->hSharedPBDescKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO);
++ PVR_ASSERT(eError == PVRSRV_OK);
++
++
++ eError = PVRSRVReleaseHandle(psPerProc->psHandleBase,
++ psSGXAddSharedPBDescIN->hHWPBDescKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ PVR_ASSERT(eError == PVRSRV_OK);
++
++
++ eError = PVRSRVReleaseHandle(psPerProc->psHandleBase,
++ psSGXAddSharedPBDescIN->hBlockKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO);
++ PVR_ASSERT(eError == PVRSRV_OK);
++
++
++ eError = PVRSRVReleaseHandle(psPerProc->psHandleBase,
++ psSGXAddSharedPBDescIN->hHWBlockKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ PVR_ASSERT(eError == PVRSRV_OK);
++
++ for(i=0; i<ui32KernelMemInfoHandlesCount; i++)
++ {
++
++ eError = PVRSRVReleaseHandle(psPerProc->psHandleBase,
++ phKernelMemInfoHandles[i],
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ PVR_ASSERT(eError == PVRSRV_OK);
++ }
++
++ eError = SGXAddSharedPBDescKM(psPerProc, hDevCookieInt,
++ psSharedPBDescKernelMemInfo,
++ psHWPBDescKernelMemInfo,
++ psBlockKernelMemInfo,
++ psHWBlockKernelMemInfo,
++ psSGXAddSharedPBDescIN->ui32TotalPBSize,
++ &hSharedPBDesc,
++ ppsKernelMemInfos,
++ ui32KernelMemInfoHandlesCount);
++
++
++ if (eError != PVRSRV_OK)
++ {
++ goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
++ }
++
++ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
++ &psSGXAddSharedPBDescOUT->hSharedPBDesc,
++ hSharedPBDesc,
++ PVRSRV_HANDLE_TYPE_SHARED_PB_DESC,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++
++PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT:
++
++ if(phKernelMemInfoHandles)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ psSGXAddSharedPBDescIN->ui32KernelMemInfoHandlesCount * sizeof(IMG_HANDLE),
++ (IMG_VOID *)phKernelMemInfoHandles,
++ 0);
++ }
++ if(ppsKernelMemInfos)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ psSGXAddSharedPBDescIN->ui32KernelMemInfoHandlesCount * sizeof(PVRSRV_KERNEL_MEM_INFO *),
++ (IMG_VOID *)ppsKernelMemInfos,
++ 0);
++ }
++
++ if(ret == 0 && eError == PVRSRV_OK)
++ {
++ COMMIT_HANDLE_BATCH_OR_ERROR(psSGXAddSharedPBDescOUT->eError, psPerProc);
++ }
++
++ psSGXAddSharedPBDescOUT->eError = eError;
++
++ return ret;
++}
++
++static IMG_INT
++SGXGetInfoForSrvinitBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SGXINFO_FOR_SRVINIT *psSGXInfoForSrvinitIN,
++ PVRSRV_BRIDGE_OUT_SGXINFO_FOR_SRVINIT *psSGXInfoForSrvinitOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hDevCookieInt;
++ IMG_UINT32 i;
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGXINFO_FOR_SRVINIT);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psSGXInfoForSrvinitOUT->eError, psPerProc, PVRSRV_MAX_CLIENT_HEAPS);
++
++ if(!psPerProc->bInitProcess)
++ {
++ psSGXInfoForSrvinitOUT->eError = PVRSRV_ERROR_GENERIC;
++ return 0;
++ }
++
++ psSGXInfoForSrvinitOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
++ psSGXInfoForSrvinitIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++
++ if(psSGXInfoForSrvinitOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psSGXInfoForSrvinitOUT->eError =
++ SGXGetInfoForSrvinitKM(hDevCookieInt,
++ &psSGXInfoForSrvinitOUT->sInitInfo);
++
++ if(psSGXInfoForSrvinitOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ for(i = 0; i < PVRSRV_MAX_CLIENT_HEAPS; i++)
++ {
++ PVRSRV_HEAP_INFO *psHeapInfo;
++
++ psHeapInfo = &psSGXInfoForSrvinitOUT->sInitInfo.asHeapInfo[i];
++
++ if (psHeapInfo->ui32HeapID != (IMG_UINT32)SGX_UNDEFINED_HEAP_ID)
++ {
++ IMG_HANDLE hDevMemHeapExt;
++
++ if (psHeapInfo->hDevMemHeap != IMG_NULL)
++ {
++
++ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
++ &hDevMemHeapExt,
++ psHeapInfo->hDevMemHeap,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP,
++ PVRSRV_HANDLE_ALLOC_FLAG_SHARED);
++ psHeapInfo->hDevMemHeap = hDevMemHeapExt;
++ }
++ }
++ }
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psSGXInfoForSrvinitOUT->eError, psPerProc);
++
++ return 0;
++}
++
++#if defined(PDUMP)
++static IMG_VOID
++DumpBufferArray(PVRSRV_PER_PROCESS_DATA *psPerProc,
++ PSGX_KICKTA_DUMP_BUFFER psBufferArray,
++ IMG_UINT32 ui32BufferArrayLength,
++ IMG_BOOL bDumpPolls)
++{
++ IMG_UINT32 i;
++
++ for (i=0; i<ui32BufferArrayLength; i++)
++ {
++ PSGX_KICKTA_DUMP_BUFFER psBuffer;
++ PVRSRV_KERNEL_MEM_INFO *psCtrlMemInfoKM;
++ IMG_CHAR * pszName;
++ IMG_HANDLE hUniqueTag;
++ IMG_UINT32 ui32Offset;
++
++ psBuffer = &psBufferArray[i];
++ pszName = psBuffer->pszName;
++ if (!pszName)
++ {
++ pszName = "Nameless buffer";
++ }
++
++ hUniqueTag = MAKEUNIQUETAG((PVRSRV_KERNEL_MEM_INFO *)psBuffer->hKernelMemInfo);
++
++ #if defined(SUPPORT_SGX_NEW_STATUS_VALS)
++ psCtrlMemInfoKM = ((PVRSRV_KERNEL_MEM_INFO *)psBuffer->hCtrlKernelMemInfo);
++ ui32Offset = psBuffer->sCtrlDevVAddr.uiAddr - psCtrlMemInfoKM->sDevVAddr.uiAddr;
++ #else
++ psCtrlMemInfoKM = ((PVRSRV_KERNEL_MEM_INFO *)psBuffer->hKernelMemInfo)->psKernelSyncInfo->psSyncDataMemInfoKM;
++ ui32Offset = offsetof(PVRSRV_SYNC_DATA, ui32ReadOpsComplete);
++ #endif
++
++ if (psBuffer->ui32Start <= psBuffer->ui32End)
++ {
++ if (bDumpPolls)
++ {
++ PDUMPCOMMENTWITHFLAGS(0, "Wait for %s space\r\n", pszName);
++ PDUMPCBP(psCtrlMemInfoKM,
++ ui32Offset,
++ psBuffer->ui32Start,
++ psBuffer->ui32SpaceUsed,
++ psBuffer->ui32BufferSize,
++ 0,
++ MAKEUNIQUETAG(psCtrlMemInfoKM));
++ }
++
++ PDUMPCOMMENTWITHFLAGS(0, "%s\r\n", pszName);
++ PDUMPMEMUM(psPerProc,
++ IMG_NULL,
++ psBuffer->pvLinAddr,
++ (PVRSRV_KERNEL_MEM_INFO*)psBuffer->hKernelMemInfo,
++ psBuffer->ui32Start,
++ psBuffer->ui32End - psBuffer->ui32Start,
++ 0,
++ hUniqueTag);
++ }
++ else
++ {
++
++
++ if (bDumpPolls)
++ {
++ PDUMPCOMMENTWITHFLAGS(0, "Wait for %s space\r\n", pszName);
++ PDUMPCBP(psCtrlMemInfoKM,
++ ui32Offset,
++ psBuffer->ui32Start,
++ psBuffer->ui32BackEndLength,
++ psBuffer->ui32BufferSize,
++ 0,
++ MAKEUNIQUETAG(psCtrlMemInfoKM));
++ }
++ PDUMPCOMMENTWITHFLAGS(0, "%s (part 1)\r\n", pszName);
++ PDUMPMEMUM(psPerProc,
++ IMG_NULL,
++ psBuffer->pvLinAddr,
++ (PVRSRV_KERNEL_MEM_INFO*)psBuffer->hKernelMemInfo,
++ psBuffer->ui32Start,
++ psBuffer->ui32BackEndLength,
++ 0,
++ hUniqueTag);
++
++ if (bDumpPolls)
++ {
++ PDUMPMEMPOL(psCtrlMemInfoKM,
++ ui32Offset,
++ 0,
++ 0xFFFFFFFF,
++ PDUMP_POLL_OPERATOR_NOTEQUAL,
++ 0,
++ MAKEUNIQUETAG(psCtrlMemInfoKM));
++
++ PDUMPCOMMENTWITHFLAGS(0, "Wait for %s space\r\n", pszName);
++ PDUMPCBP(psCtrlMemInfoKM,
++ ui32Offset,
++ 0,
++ psBuffer->ui32End,
++ psBuffer->ui32BufferSize,
++ 0,
++ MAKEUNIQUETAG(psCtrlMemInfoKM));
++ }
++ PDUMPCOMMENTWITHFLAGS(0, "%s (part 2)\r\n", pszName);
++ PDUMPMEMUM(psPerProc,
++ IMG_NULL,
++ psBuffer->pvLinAddr,
++ (PVRSRV_KERNEL_MEM_INFO*)psBuffer->hKernelMemInfo,
++ 0,
++ psBuffer->ui32End,
++ 0,
++ hUniqueTag);
++ }
++ }
++}
++static IMG_INT
++SGXPDumpBufferArrayBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_PDUMP_BUFFER_ARRAY *psPDumpBufferArrayIN,
++ IMG_VOID *psBridgeOut,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_UINT32 i;
++ SGX_KICKTA_DUMP_BUFFER *psKickTADumpBuffer;
++ IMG_UINT32 ui32BufferArrayLength =
++ psPDumpBufferArrayIN->ui32BufferArrayLength;
++ IMG_UINT32 ui32BufferArraySize =
++ ui32BufferArrayLength * sizeof(SGX_KICKTA_DUMP_BUFFER);
++ PVRSRV_ERROR eError = PVRSRV_ERROR_GENERIC;
++
++ PVR_UNREFERENCED_PARAMETER(psBridgeOut);
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_PDUMP_BUFFER_ARRAY);
++
++ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32BufferArraySize,
++ (IMG_PVOID *)&psKickTADumpBuffer, 0,
++ "Array of Kick Tile Accelerator Dump Buffer") != PVRSRV_OK)
++ {
++ return -ENOMEM;
++ }
++
++ if(CopyFromUserWrapper(psPerProc,
++ ui32BridgeID,
++ psKickTADumpBuffer,
++ psPDumpBufferArrayIN->psBufferArray,
++ ui32BufferArraySize) != PVRSRV_OK)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, ui32BufferArraySize, psKickTADumpBuffer, 0);
++
++ return -EFAULT;
++ }
++
++ for(i = 0; i < ui32BufferArrayLength; i++)
++ {
++ IMG_VOID *pvMemInfo;
++
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvMemInfo,
++ psKickTADumpBuffer[i].hKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRV_BRIDGE_SGX_PDUMP_BUFFER_ARRAY: "
++ "PVRSRVLookupHandle failed (%d)", eError));
++ break;
++ }
++ psKickTADumpBuffer[i].hKernelMemInfo = pvMemInfo;
++
++#if defined(SUPPORT_SGX_NEW_STATUS_VALS)
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvMemInfo,
++ psKickTADumpBuffer[i].hCtrlKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRV_BRIDGE_SGX_PDUMP_BUFFER_ARRAY: "
++ "PVRSRVLookupHandle failed (%d)", eError));
++ break;
++ }
++ psKickTADumpBuffer[i].hCtrlKernelMemInfo = pvMemInfo;
++#endif
++ }
++
++ if(eError == PVRSRV_OK)
++ {
++ DumpBufferArray(psPerProc,
++ psKickTADumpBuffer,
++ ui32BufferArrayLength,
++ psPDumpBufferArrayIN->bDumpPolls);
++ }
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, ui32BufferArraySize, psKickTADumpBuffer, 0);
++
++
++ return 0;
++}
++
++static IMG_INT
++SGXPDump3DSignatureRegistersBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_PDUMP_3D_SIGNATURE_REGISTERS *psPDump3DSignatureRegistersIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_UINT32 ui32RegisterArraySize = psPDump3DSignatureRegistersIN->ui32NumRegisters * sizeof(IMG_UINT32);
++ IMG_UINT32 *pui32Registers = IMG_NULL;
++#if defined(SGX_FEATURE_MP) && defined(FIX_HW_BRN_27270)
++ PVRSRV_SGXDEV_INFO *psDevInfo = IMG_NULL;
++ IMG_HANDLE hDevCookieInt;
++ IMG_UINT32 ui32RegVal = 0;
++#endif
++ IMG_INT ret = -EFAULT;
++
++ PVR_UNREFERENCED_PARAMETER(psRetOUT);
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_PDUMP_3D_SIGNATURE_REGISTERS);
++
++ if (ui32RegisterArraySize == 0)
++ {
++ goto ExitNoError;
++ }
++
++#if defined(SGX_FEATURE_MP) && defined(FIX_HW_BRN_27270)
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
++ psPDump3DSignatureRegistersIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PDumpTASignatureRegistersBW: hDevCookie lookup failed"));
++ goto Exit;
++ }
++
++ psDevInfo = ((PVRSRV_DEVICE_NODE *)hDevCookieInt)->pvDevice;
++
++
++ ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_CORE);
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_CORE, (SGX_FEATURE_MP_CORE_COUNT - 1) << EUR_CR_MASTER_CORE_ENABLE_SHIFT);
++#if defined(PDUMP)
++ PDUMPREGWITHFLAGS(EUR_CR_MASTER_CORE, (SGX_FEATURE_MP_CORE_COUNT - 1) << EUR_CR_MASTER_CORE_ENABLE_SHIFT,
++ psPDump3DSignatureRegistersIN->bLastFrame ? PDUMP_FLAGS_LASTFRAME : 0);
++#endif
++#endif
++
++ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32RegisterArraySize,
++ (IMG_PVOID *)&pui32Registers, 0,
++ "Array of Registers") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PDump3DSignatureRegistersBW: OSAllocMem failed"));
++ goto Exit;
++ }
++
++ if(CopyFromUserWrapper(psPerProc,
++ ui32BridgeID,
++ pui32Registers,
++ psPDump3DSignatureRegistersIN->pui32Registers,
++ ui32RegisterArraySize) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PDump3DSignatureRegistersBW: CopyFromUserWrapper failed"));
++ goto Exit;
++ }
++
++ PDump3DSignatureRegisters(psPDump3DSignatureRegistersIN->ui32DumpFrameNum,
++ psPDump3DSignatureRegistersIN->bLastFrame,
++ pui32Registers,
++ psPDump3DSignatureRegistersIN->ui32NumRegisters);
++
++ExitNoError:
++ psRetOUT->eError = PVRSRV_OK;
++ ret = 0;
++Exit:
++ if (pui32Registers != IMG_NULL)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, ui32RegisterArraySize, pui32Registers, 0);
++ }
++
++#if defined(SGX_FEATURE_MP) && defined(FIX_HW_BRN_27270)
++ if (psDevInfo != IMG_NULL)
++ {
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_CORE, ui32RegVal);
++#if defined(PDUMP)
++ PDUMPREGWITHFLAGS(EUR_CR_MASTER_CORE, ui32RegVal,
++ psPDump3DSignatureRegistersIN->bLastFrame ? PDUMP_FLAGS_LASTFRAME : 0);
++#endif
++ }
++#endif
++
++ return ret;
++}
++
++static IMG_INT
++SGXPDumpCounterRegistersBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_PDUMP_COUNTER_REGISTERS *psPDumpCounterRegistersIN,
++ IMG_VOID *psBridgeOut,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_UINT32 ui32RegisterArraySize = psPDumpCounterRegistersIN->ui32NumRegisters * sizeof(IMG_UINT32);
++ IMG_UINT32 *pui32Registers = IMG_NULL;
++ IMG_INT ret = -EFAULT;
++
++ PVR_UNREFERENCED_PARAMETER(psBridgeOut);
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_PDUMP_COUNTER_REGISTERS);
++
++ if (ui32RegisterArraySize == 0)
++ {
++ goto ExitNoError;
++ }
++
++ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32RegisterArraySize,
++ (IMG_PVOID *)&pui32Registers, 0,
++ "Array of Registers") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PDumpCounterRegistersBW: OSAllocMem failed"));
++ ret = -ENOMEM;
++ goto Exit;
++ }
++
++ if(CopyFromUserWrapper(psPerProc,
++ ui32BridgeID,
++ pui32Registers,
++ psPDumpCounterRegistersIN->pui32Registers,
++ ui32RegisterArraySize) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PDumpCounterRegistersBW: CopyFromUserWrapper failed"));
++ goto Exit;
++ }
++
++ PDumpCounterRegisters(psPDumpCounterRegistersIN->ui32DumpFrameNum,
++ psPDumpCounterRegistersIN->bLastFrame,
++ pui32Registers,
++ psPDumpCounterRegistersIN->ui32NumRegisters);
++
++ExitNoError:
++ ret = 0;
++Exit:
++ if (pui32Registers != IMG_NULL)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, ui32RegisterArraySize, pui32Registers, 0);
++ }
++
++ return ret;
++}
++
++static IMG_INT
++SGXPDumpTASignatureRegistersBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_PDUMP_TA_SIGNATURE_REGISTERS *psPDumpTASignatureRegistersIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_UINT32 ui32RegisterArraySize = psPDumpTASignatureRegistersIN->ui32NumRegisters * sizeof(IMG_UINT32);
++ IMG_UINT32 *pui32Registers = IMG_NULL;
++#if defined(SGX_FEATURE_MP) && defined(FIX_HW_BRN_27270)
++ PVRSRV_SGXDEV_INFO *psDevInfo = IMG_NULL;
++ IMG_HANDLE hDevCookieInt;
++ IMG_UINT32 ui32RegVal = 0;
++#endif
++ IMG_INT ret = -EFAULT;
++
++ PVR_UNREFERENCED_PARAMETER(psRetOUT);
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_PDUMP_TA_SIGNATURE_REGISTERS);
++
++ if (ui32RegisterArraySize == 0)
++ {
++ goto ExitNoError;
++ }
++
++#if defined(SGX_FEATURE_MP) && defined(FIX_HW_BRN_27270)
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
++ psPDumpTASignatureRegistersIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PDumpTASignatureRegistersBW: hDevCookie lookup failed"));
++ goto Exit;
++ }
++
++ psDevInfo = ((PVRSRV_DEVICE_NODE *)hDevCookieInt)->pvDevice;
++
++
++ ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_CORE);
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_CORE, (SGX_FEATURE_MP_CORE_COUNT - 1) << EUR_CR_MASTER_CORE_ENABLE_SHIFT);
++#if defined(PDUMP)
++ PDUMPREGWITHFLAGS(EUR_CR_MASTER_CORE, (SGX_FEATURE_MP_CORE_COUNT - 1) << EUR_CR_MASTER_CORE_ENABLE_SHIFT,
++ psPDumpTASignatureRegistersIN->bLastFrame ? PDUMP_FLAGS_LASTFRAME : 0);
++#endif
++#endif
++
++ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32RegisterArraySize,
++ (IMG_PVOID *)&pui32Registers, 0,
++ "Array of Registers") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PDumpTASignatureRegistersBW: OSAllocMem failed"));
++ ret = -ENOMEM;
++ goto Exit;
++ }
++
++ if(CopyFromUserWrapper(psPerProc,
++ ui32BridgeID,
++ pui32Registers,
++ psPDumpTASignatureRegistersIN->pui32Registers,
++ ui32RegisterArraySize) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PDumpTASignatureRegistersBW: CopyFromUserWrapper failed"));
++ goto Exit;
++ }
++
++ PDumpTASignatureRegisters(psPDumpTASignatureRegistersIN->ui32DumpFrameNum,
++ psPDumpTASignatureRegistersIN->ui32TAKickCount,
++ psPDumpTASignatureRegistersIN->bLastFrame,
++ pui32Registers,
++ psPDumpTASignatureRegistersIN->ui32NumRegisters);
++
++ExitNoError:
++ psRetOUT->eError = PVRSRV_OK;
++ ret = 0;
++Exit:
++ if (pui32Registers != IMG_NULL)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, ui32RegisterArraySize, pui32Registers, 0);
++ }
++
++#if defined(SGX_FEATURE_MP) && defined(FIX_HW_BRN_27270)
++ if (psDevInfo != IMG_NULL)
++ {
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_CORE, ui32RegVal);
++#if defined(PDUMP)
++ PDUMPREGWITHFLAGS(EUR_CR_MASTER_CORE, ui32RegVal,
++ psPDumpTASignatureRegistersIN->bLastFrame ? PDUMP_FLAGS_LASTFRAME : 0);
++#endif
++ }
++#endif
++
++ return ret;
++}
++static IMG_INT
++SGXPDumpHWPerfCBBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_PDUMP_HWPERFCB *psPDumpHWPerfCBIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++#if defined(SUPPORT_SGX_HWPERF)
++#if defined(__linux__)
++ PVRSRV_SGXDEV_INFO *psDevInfo;
++ IMG_HANDLE hDevCookieInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_PDUMP_HWPERFCB);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
++ psPDumpHWPerfCBIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psDevInfo = ((PVRSRV_DEVICE_NODE *)hDevCookieInt)->pvDevice;
++
++ PDumpHWPerfCBKM(&psPDumpHWPerfCBIN->szFileName[0],
++ psPDumpHWPerfCBIN->ui32FileOffset,
++ psDevInfo->psKernelHWPerfCBMemInfo->sDevVAddr,
++ psDevInfo->psKernelHWPerfCBMemInfo->ui32AllocSize,
++ psPDumpHWPerfCBIN->ui32PDumpFlags);
++
++ return 0;
++#else
++ PVR_UNREFERENCED_PARAMETER(ui32BridgeID);
++ PVR_UNREFERENCED_PARAMETER(psPDumpHWPerfCBIN);
++ PVR_UNREFERENCED_PARAMETER(psRetOUT);
++ PVR_UNREFERENCED_PARAMETER(psPerProc);
++ return 0;
++#endif
++#else
++ PVR_UNREFERENCED_PARAMETER(ui32BridgeID);
++ PVR_UNREFERENCED_PARAMETER(psPDumpHWPerfCBIN);
++ PVR_UNREFERENCED_PARAMETER(psRetOUT);
++ PVR_UNREFERENCED_PARAMETER(psPerProc);
++ return -EFAULT;
++#endif
++}
++
++#endif
++
++
++IMG_VOID SetSGXDispatchTableEntry(IMG_VOID)
++{
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_GETCLIENTINFO, SGXGetClientInfoBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_RELEASECLIENTINFO, SGXReleaseClientInfoBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_GETINTERNALDEVINFO, SGXGetInternalDevInfoBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_DOKICK, SGXDoKickBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_GETPHYSPAGEADDR, DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_READREGISTRYDWORD, DummyBW);
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_2DQUERYBLTSCOMPLETE, SGX2DQueryBlitsCompleteBW);
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_GETMMUPDADDR, DummyBW);
++
++#if defined(TRANSFER_QUEUE)
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_SUBMITTRANSFER, SGXSubmitTransferBW);
++#endif
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_GETMISCINFO, SGXGetMiscInfoBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGXINFO_FOR_SRVINIT , SGXGetInfoForSrvinitBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_DEVINITPART2, SGXDevInitPart2BW);
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_FINDSHAREDPBDESC, SGXFindSharedPBDescBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_UNREFSHAREDPBDESC, SGXUnrefSharedPBDescBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC, SGXAddSharedPBDescBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_REGISTER_HW_RENDER_CONTEXT, SGXRegisterHWRenderContextBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_FLUSH_HW_RENDER_TARGET, SGXFlushHWRenderTargetBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_UNREGISTER_HW_RENDER_CONTEXT, SGXUnregisterHWRenderContextBW);
++#if defined(SGX_FEATURE_2D_HARDWARE)
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_SUBMIT2D, SGXSubmit2DBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_REGISTER_HW_2D_CONTEXT, SGXRegisterHW2DContextBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_UNREGISTER_HW_2D_CONTEXT, SGXUnregisterHW2DContextBW);
++#endif
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_REGISTER_HW_TRANSFER_CONTEXT, SGXRegisterHWTransferContextBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_UNREGISTER_HW_TRANSFER_CONTEXT, SGXUnregisterHWTransferContextBW);
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_SCHEDULE_PROCESS_QUEUES, SGXScheduleProcessQueuesBW);
++
++#if defined(SUPPORT_SGX_HWPERF)
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_READ_DIFF_COUNTERS, SGXReadDiffCountersBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_READ_HWPERF_CB, SGXReadHWPerfCBBW);
++#endif
++
++#if defined(PDUMP)
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_PDUMP_BUFFER_ARRAY, SGXPDumpBufferArrayBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_PDUMP_3D_SIGNATURE_REGISTERS, SGXPDump3DSignatureRegistersBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_PDUMP_COUNTER_REGISTERS, SGXPDumpCounterRegistersBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_PDUMP_TA_SIGNATURE_REGISTERS, SGXPDumpTASignatureRegistersBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_PDUMP_HWPERFCB, SGXPDumpHWPerfCBBW);
++#endif
++}
++
++#endif
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/bridged/sgx/bridged_sgx_bridge.h b/drivers/gpu/drm/mrst/pvr/services4/srvkm/bridged/sgx/bridged_sgx_bridge.h
+new file mode 100644
+index 0000000..23f3600
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/bridged/sgx/bridged_sgx_bridge.h
+@@ -0,0 +1,42 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __BRIDGED_SGX_BRIDGE_H__
++#define __BRIDGED_SGX_BRIDGE_H__
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++
++IMG_VOID SetSGXDispatchTableEntry(IMG_VOID);
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/.gitignore b/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/.gitignore
+new file mode 100644
+index 0000000..2f89523
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/.gitignore
+@@ -0,0 +1,5 @@
++bin_pc_i686*
++tmp_pc_i686*
++host_pc_i686*
++*.o
++*.o.cmd
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/buffer_manager.c b/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/buffer_manager.c
+new file mode 100644
+index 0000000..946fe79
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/buffer_manager.c
+@@ -0,0 +1,2036 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "services_headers.h"
++
++#include "sysconfig.h"
++#include "hash.h"
++#include "ra.h"
++#include "pdump_km.h"
++
++#define MIN(a,b) (a > b ? b : a)
++
++
++#include "lists.h"
++
++DECLARE_LIST_ANY_VA(BM_HEAP);
++DECLARE_LIST_ANY_2(BM_HEAP, PVRSRV_ERROR, PVRSRV_OK);
++DECLARE_LIST_ANY_VA_2(BM_HEAP, PVRSRV_ERROR, PVRSRV_OK);
++DECLARE_LIST_FOR_EACH_VA(BM_HEAP);
++DECLARE_LIST_INSERT(BM_HEAP);
++DECLARE_LIST_REMOVE(BM_HEAP);
++
++DECLARE_LIST_FOR_EACH(BM_CONTEXT);
++DECLARE_LIST_ANY_VA(BM_CONTEXT);
++DECLARE_LIST_ANY_VA_2(BM_CONTEXT, IMG_HANDLE, IMG_NULL);
++DECLARE_LIST_INSERT(BM_CONTEXT);
++DECLARE_LIST_REMOVE(BM_CONTEXT);
++
++
++static IMG_BOOL
++ZeroBuf(BM_BUF *pBuf, BM_MAPPING *pMapping, IMG_SIZE_T ui32Bytes, IMG_UINT32 ui32Flags);
++static IMG_VOID
++BM_FreeMemory (IMG_VOID *pH, IMG_UINTPTR_T base, BM_MAPPING *psMapping);
++static IMG_BOOL
++BM_ImportMemory(IMG_VOID *pH, IMG_SIZE_T uSize,
++ IMG_SIZE_T *pActualSize, BM_MAPPING **ppsMapping,
++ IMG_UINT32 uFlags, IMG_UINTPTR_T *pBase);
++
++static IMG_BOOL
++DevMemoryAlloc (BM_CONTEXT *pBMContext,
++ BM_MAPPING *pMapping,
++ IMG_SIZE_T *pActualSize,
++ IMG_UINT32 uFlags,
++ IMG_UINT32 dev_vaddr_alignment,
++ IMG_DEV_VIRTADDR *pDevVAddr);
++static IMG_VOID
++DevMemoryFree (BM_MAPPING *pMapping);
++
++static IMG_BOOL
++AllocMemory (BM_CONTEXT *pBMContext,
++ BM_HEAP *psBMHeap,
++ IMG_DEV_VIRTADDR *psDevVAddr,
++ IMG_SIZE_T uSize,
++ IMG_UINT32 uFlags,
++ IMG_UINT32 uDevVAddrAlignment,
++ BM_BUF *pBuf)
++{
++ BM_MAPPING *pMapping;
++ IMG_UINTPTR_T uOffset;
++ RA_ARENA *pArena = IMG_NULL;
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "AllocMemory (pBMContext=%08X, uSize=0x%x, uFlags=0x%x, align=0x%x, pBuf=%08X)",
++ pBMContext, uSize, uFlags, uDevVAddrAlignment, pBuf));
++
++
++
++
++ if(uFlags & PVRSRV_MEM_RAM_BACKED_ALLOCATION)
++ {
++ if(uFlags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR)
++ {
++
++ PVR_DPF ((PVR_DBG_ERROR, "AllocMemory: combination of DevVAddr management and RAM backing mode unsupported"));
++ return IMG_FALSE;
++ }
++
++
++
++
++ if(psBMHeap->ui32Attribs
++ & (PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG
++ |PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG))
++ {
++
++ pArena = psBMHeap->pImportArena;
++ }
++ else
++ {
++ PVR_DPF ((PVR_DBG_ERROR, "AllocMemory: backing store type doesn't match heap"));
++ return IMG_FALSE;
++ }
++
++
++ if (!RA_Alloc(pArena,
++ uSize,
++ IMG_NULL,
++ (IMG_VOID*) &pMapping,
++ uFlags,
++ uDevVAddrAlignment,
++ 0,
++ (IMG_UINTPTR_T *)&(pBuf->DevVAddr.uiAddr)))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "AllocMemory: RA_Alloc(0x%x) FAILED", uSize));
++ return IMG_FALSE;
++ }
++
++ uOffset = pBuf->DevVAddr.uiAddr - pMapping->DevVAddr.uiAddr;
++ if(pMapping->CpuVAddr)
++ {
++ pBuf->CpuVAddr = (IMG_VOID*) ((IMG_UINTPTR_T)pMapping->CpuVAddr + uOffset);
++ }
++ else
++ {
++ pBuf->CpuVAddr = IMG_NULL;
++ }
++
++ if(uSize == pMapping->uSize)
++ {
++ pBuf->hOSMemHandle = pMapping->hOSMemHandle;
++ }
++ else
++ {
++ if(OSGetSubMemHandle(pMapping->hOSMemHandle,
++ uOffset,
++ uSize,
++ psBMHeap->ui32Attribs,
++ &pBuf->hOSMemHandle)!=PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "AllocMemory: OSGetSubMemHandle FAILED"));
++ return IMG_FALSE;
++ }
++ }
++
++
++ pBuf->CpuPAddr.uiAddr = pMapping->CpuPAddr.uiAddr + uOffset;
++
++ if(uFlags & PVRSRV_MEM_ZERO)
++ {
++ if(!ZeroBuf(pBuf, pMapping, uSize, psBMHeap->ui32Attribs | uFlags))
++ {
++ return IMG_FALSE;
++ }
++ }
++ }
++ else
++ {
++ if(uFlags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR)
++ {
++
++ PVR_ASSERT(psDevVAddr != IMG_NULL);
++
++ if (psDevVAddr == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "AllocMemory: invalid parameter - psDevVAddr"));
++ return IMG_FALSE;
++ }
++
++
++ pBMContext->psDeviceNode->pfnMMUAlloc (psBMHeap->pMMUHeap,
++ uSize,
++ IMG_NULL,
++ PVRSRV_MEM_USER_SUPPLIED_DEVVADDR,
++ uDevVAddrAlignment,
++ psDevVAddr);
++
++
++ pBuf->DevVAddr = *psDevVAddr;
++ }
++ else
++ {
++
++
++
++ pBMContext->psDeviceNode->pfnMMUAlloc (psBMHeap->pMMUHeap,
++ uSize,
++ IMG_NULL,
++ 0,
++ uDevVAddrAlignment,
++ &pBuf->DevVAddr);
++ }
++
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof (struct _BM_MAPPING_),
++ (IMG_PVOID *)&pMapping, IMG_NULL,
++ "Buffer Manager Mapping") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "AllocMemory: OSAllocMem(0x%x) FAILED"));
++ return IMG_FALSE;
++ }
++
++
++ pBuf->CpuVAddr = IMG_NULL;
++ pBuf->hOSMemHandle = 0;
++ pBuf->CpuPAddr.uiAddr = 0;
++
++
++ pMapping->CpuVAddr = IMG_NULL;
++ pMapping->CpuPAddr.uiAddr = 0;
++ pMapping->DevVAddr = pBuf->DevVAddr;
++ pMapping->psSysAddr = IMG_NULL;
++ pMapping->uSize = uSize;
++ pMapping->hOSMemHandle = 0;
++ }
++
++
++ pMapping->pArena = pArena;
++
++
++ pMapping->pBMHeap = psBMHeap;
++ pBuf->pMapping = pMapping;
++
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "AllocMemory: pMapping=%08X: DevV=%08X CpuV=%08X CpuP=%08X uSize=0x%x",
++ pMapping,
++ pMapping->DevVAddr.uiAddr,
++ pMapping->CpuVAddr,
++ pMapping->CpuPAddr.uiAddr,
++ pMapping->uSize));
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "AllocMemory: pBuf=%08X: DevV=%08X CpuV=%08X CpuP=%08X uSize=0x%x",
++ pBuf,
++ pBuf->DevVAddr.uiAddr,
++ pBuf->CpuVAddr,
++ pBuf->CpuPAddr.uiAddr,
++ uSize));
++
++
++ PVR_ASSERT(((pBuf->DevVAddr.uiAddr) & (uDevVAddrAlignment - 1)) == 0);
++
++ return IMG_TRUE;
++}
++
++
++static IMG_BOOL
++WrapMemory (BM_HEAP *psBMHeap,
++ IMG_SIZE_T uSize,
++ IMG_SIZE_T ui32BaseOffset,
++ IMG_BOOL bPhysContig,
++ IMG_SYS_PHYADDR *psAddr,
++ IMG_VOID *pvCPUVAddr,
++ IMG_UINT32 uFlags,
++ BM_BUF *pBuf)
++{
++ IMG_DEV_VIRTADDR DevVAddr = {0};
++ BM_MAPPING *pMapping;
++ IMG_BOOL bResult;
++ IMG_SIZE_T const ui32PageSize = HOST_PAGESIZE();
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "WrapMemory(psBMHeap=%08X, size=0x%x, offset=0x%x, bPhysContig=0x%x, pvCPUVAddr = 0x%x, flags=0x%x, pBuf=%08X)",
++ psBMHeap, uSize, ui32BaseOffset, bPhysContig, pvCPUVAddr, uFlags, pBuf));
++
++ PVR_ASSERT((psAddr->uiAddr & (ui32PageSize - 1)) == 0);
++
++ PVR_ASSERT(((IMG_UINTPTR_T)pvCPUVAddr & (ui32PageSize - 1)) == 0);
++
++ uSize += ui32BaseOffset;
++ uSize = HOST_PAGEALIGN (uSize);
++
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(*pMapping),
++ (IMG_PVOID *)&pMapping, IMG_NULL,
++ "Mocked-up mapping") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "WrapMemory: OSAllocMem(0x%x) FAILED",sizeof(*pMapping)));
++ return IMG_FALSE;
++ }
++
++ OSMemSet(pMapping, 0, sizeof (*pMapping));
++
++ pMapping->uSize = uSize;
++ pMapping->pBMHeap = psBMHeap;
++
++ if(pvCPUVAddr)
++ {
++ pMapping->CpuVAddr = pvCPUVAddr;
++
++ if (bPhysContig)
++ {
++ pMapping->eCpuMemoryOrigin = hm_wrapped_virtaddr;
++ pMapping->CpuPAddr = SysSysPAddrToCpuPAddr(psAddr[0]);
++
++ if(OSRegisterMem(pMapping->CpuPAddr,
++ pMapping->CpuVAddr,
++ pMapping->uSize,
++ uFlags,
++ &pMapping->hOSMemHandle) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "WrapMemory: OSRegisterMem Phys=0x%08X, CpuVAddr = 0x%08X, Size=%d) failed",
++ pMapping->CpuPAddr, pMapping->CpuVAddr, pMapping->uSize));
++ goto fail_cleanup;
++ }
++ }
++ else
++ {
++ pMapping->eCpuMemoryOrigin = hm_wrapped_scatter_virtaddr;
++ pMapping->psSysAddr = psAddr;
++
++ if(OSRegisterDiscontigMem(pMapping->psSysAddr,
++ pMapping->CpuVAddr,
++ pMapping->uSize,
++ uFlags,
++ &pMapping->hOSMemHandle) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "WrapMemory: OSRegisterDiscontigMem CpuVAddr = 0x%08X, Size=%d) failed",
++ pMapping->CpuVAddr, pMapping->uSize));
++ goto fail_cleanup;
++ }
++ }
++ }
++ else
++ {
++ if (bPhysContig)
++ {
++ pMapping->eCpuMemoryOrigin = hm_wrapped;
++ pMapping->CpuPAddr = SysSysPAddrToCpuPAddr(psAddr[0]);
++
++ if(OSReservePhys(pMapping->CpuPAddr,
++ pMapping->uSize,
++ uFlags,
++ &pMapping->CpuVAddr,
++ &pMapping->hOSMemHandle) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "WrapMemory: OSReservePhys Phys=0x%08X, Size=%d) failed",
++ pMapping->CpuPAddr, pMapping->uSize));
++ goto fail_cleanup;
++ }
++ }
++ else
++ {
++ pMapping->eCpuMemoryOrigin = hm_wrapped_scatter;
++ pMapping->psSysAddr = psAddr;
++
++ if(OSReserveDiscontigPhys(pMapping->psSysAddr,
++ pMapping->uSize,
++ uFlags,
++ &pMapping->CpuVAddr,
++ &pMapping->hOSMemHandle) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "WrapMemory: OSReserveDiscontigPhys Size=%d) failed",
++ pMapping->uSize));
++ goto fail_cleanup;
++ }
++ }
++ }
++
++
++ bResult = DevMemoryAlloc(psBMHeap->pBMContext,
++ pMapping,
++ IMG_NULL,
++ uFlags | PVRSRV_MEM_READ | PVRSRV_MEM_WRITE,
++ IMG_CAST_TO_DEVVADDR_UINT(ui32PageSize),
++ &DevVAddr);
++ if (!bResult)
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "WrapMemory: DevMemoryAlloc(0x%x) failed",
++ pMapping->uSize));
++ goto fail_cleanup;
++ }
++
++
++ pBuf->CpuPAddr.uiAddr = pMapping->CpuPAddr.uiAddr + ui32BaseOffset;
++ if(!ui32BaseOffset)
++ {
++ pBuf->hOSMemHandle = pMapping->hOSMemHandle;
++ }
++ else
++ {
++ if(OSGetSubMemHandle(pMapping->hOSMemHandle,
++ ui32BaseOffset,
++ (pMapping->uSize-ui32BaseOffset),
++ uFlags,
++ &pBuf->hOSMemHandle)!=PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "WrapMemory: OSGetSubMemHandle failed"));
++ goto fail_cleanup;
++ }
++ }
++ if(pMapping->CpuVAddr)
++ {
++ pBuf->CpuVAddr = (IMG_VOID*) ((IMG_UINTPTR_T)pMapping->CpuVAddr + ui32BaseOffset);
++ }
++ pBuf->DevVAddr.uiAddr = pMapping->DevVAddr.uiAddr + IMG_CAST_TO_DEVVADDR_UINT(ui32BaseOffset);
++
++ if(uFlags & PVRSRV_MEM_ZERO)
++ {
++ if(!ZeroBuf(pBuf, pMapping, uSize, uFlags))
++ {
++ return IMG_FALSE;
++ }
++ }
++
++ PVR_DPF ((PVR_DBG_MESSAGE, "DevVaddr.uiAddr=%08X", DevVAddr.uiAddr));
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "WrapMemory: pMapping=%08X: DevV=%08X CpuV=%08X CpuP=%08X uSize=0x%x",
++ pMapping, pMapping->DevVAddr.uiAddr,
++ pMapping->CpuVAddr, pMapping->CpuPAddr.uiAddr, pMapping->uSize));
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "WrapMemory: pBuf=%08X: DevV=%08X CpuV=%08X CpuP=%08X uSize=0x%x",
++ pBuf, pBuf->DevVAddr.uiAddr,
++ pBuf->CpuVAddr, pBuf->CpuPAddr.uiAddr, uSize));
++
++ pBuf->pMapping = pMapping;
++ return IMG_TRUE;
++
++fail_cleanup:
++ if(ui32BaseOffset && pBuf->hOSMemHandle)
++ {
++ OSReleaseSubMemHandle(pBuf->hOSMemHandle, uFlags);
++ }
++
++ if(pMapping && (pMapping->CpuVAddr || pMapping->hOSMemHandle))
++ {
++ switch(pMapping->eCpuMemoryOrigin)
++ {
++ case hm_wrapped:
++ OSUnReservePhys(pMapping->CpuVAddr, pMapping->uSize, uFlags, pMapping->hOSMemHandle);
++ break;
++ case hm_wrapped_virtaddr:
++ OSUnRegisterMem(pMapping->CpuVAddr, pMapping->uSize, uFlags, pMapping->hOSMemHandle);
++ break;
++ case hm_wrapped_scatter:
++ OSUnReserveDiscontigPhys(pMapping->CpuVAddr, pMapping->uSize, uFlags, pMapping->hOSMemHandle);
++ break;
++ case hm_wrapped_scatter_virtaddr:
++ OSUnRegisterDiscontigMem(pMapping->CpuVAddr, pMapping->uSize, uFlags, pMapping->hOSMemHandle);
++ break;
++ default:
++ break;
++ }
++
++ }
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_MAPPING), pMapping, IMG_NULL);
++
++
++ return IMG_FALSE;
++}
++
++
++static IMG_BOOL
++ZeroBuf(BM_BUF *pBuf, BM_MAPPING *pMapping, IMG_SIZE_T ui32Bytes, IMG_UINT32 ui32Flags)
++{
++ IMG_VOID *pvCpuVAddr;
++
++ if(pBuf->CpuVAddr)
++ {
++ OSMemSet(pBuf->CpuVAddr, 0, ui32Bytes);
++ }
++ else if(pMapping->eCpuMemoryOrigin == hm_contiguous
++ || pMapping->eCpuMemoryOrigin == hm_wrapped)
++ {
++ pvCpuVAddr = OSMapPhysToLin(pBuf->CpuPAddr,
++ ui32Bytes,
++ PVRSRV_HAP_KERNEL_ONLY
++ | (ui32Flags & PVRSRV_HAP_CACHETYPE_MASK),
++ IMG_NULL);
++ if(!pvCpuVAddr)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "ZeroBuf: OSMapPhysToLin for contiguous buffer failed"));
++ return IMG_FALSE;
++ }
++ OSMemSet(pvCpuVAddr, 0, ui32Bytes);
++ OSUnMapPhysToLin(pvCpuVAddr,
++ ui32Bytes,
++ PVRSRV_HAP_KERNEL_ONLY
++ | (ui32Flags & PVRSRV_HAP_CACHETYPE_MASK),
++ IMG_NULL);
++ }
++ else
++ {
++ IMG_SIZE_T ui32BytesRemaining = ui32Bytes;
++ IMG_SIZE_T ui32CurrentOffset = 0;
++ IMG_CPU_PHYADDR CpuPAddr;
++
++
++ PVR_ASSERT(pBuf->hOSMemHandle);
++
++ while(ui32BytesRemaining > 0)
++ {
++ IMG_SIZE_T ui32BlockBytes = MIN(ui32BytesRemaining, HOST_PAGESIZE());
++ CpuPAddr = OSMemHandleToCpuPAddr(pBuf->hOSMemHandle, ui32CurrentOffset);
++
++ if(CpuPAddr.uiAddr & (HOST_PAGESIZE() -1))
++ {
++ ui32BlockBytes =
++ MIN(ui32BytesRemaining, HOST_PAGEALIGN(CpuPAddr.uiAddr) - CpuPAddr.uiAddr);
++ }
++
++ pvCpuVAddr = OSMapPhysToLin(CpuPAddr,
++ ui32BlockBytes,
++ PVRSRV_HAP_KERNEL_ONLY
++ | (ui32Flags & PVRSRV_HAP_CACHETYPE_MASK),
++ IMG_NULL);
++ if(!pvCpuVAddr)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "ZeroBuf: OSMapPhysToLin while zeroing non-contiguous memory FAILED"));
++ return IMG_FALSE;
++ }
++ OSMemSet(pvCpuVAddr, 0, ui32BlockBytes);
++ OSUnMapPhysToLin(pvCpuVAddr,
++ ui32BlockBytes,
++ PVRSRV_HAP_KERNEL_ONLY
++ | (ui32Flags & PVRSRV_HAP_CACHETYPE_MASK),
++ IMG_NULL);
++
++ ui32BytesRemaining -= ui32BlockBytes;
++ ui32CurrentOffset += ui32BlockBytes;
++ }
++ }
++
++ return IMG_TRUE;
++}
++
++static IMG_VOID
++FreeBuf (BM_BUF *pBuf, IMG_UINT32 ui32Flags)
++{
++ BM_MAPPING *pMapping;
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "FreeBuf: pBuf=%08X: DevVAddr=%08X CpuVAddr=%08X CpuPAddr=%08X",
++ pBuf, pBuf->DevVAddr.uiAddr, pBuf->CpuVAddr, pBuf->CpuPAddr.uiAddr));
++
++
++ pMapping = pBuf->pMapping;
++
++ if(ui32Flags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR)
++ {
++
++ if(ui32Flags & PVRSRV_MEM_RAM_BACKED_ALLOCATION)
++ {
++
++ PVR_DPF ((PVR_DBG_ERROR, "FreeBuf: combination of DevVAddr management and RAM backing mode unsupported"));
++ }
++ else
++ {
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_MAPPING), pMapping, IMG_NULL);
++ pBuf->pMapping = IMG_NULL;
++ }
++ }
++ else
++ {
++
++ if(pBuf->hOSMemHandle != pMapping->hOSMemHandle)
++ {
++ OSReleaseSubMemHandle(pBuf->hOSMemHandle, ui32Flags);
++ }
++ if(ui32Flags & PVRSRV_MEM_RAM_BACKED_ALLOCATION)
++ {
++
++
++
++ RA_Free (pBuf->pMapping->pArena, pBuf->DevVAddr.uiAddr, IMG_FALSE);
++ }
++ else
++ {
++ switch (pMapping->eCpuMemoryOrigin)
++ {
++ case hm_wrapped:
++ OSUnReservePhys(pMapping->CpuVAddr, pMapping->uSize, ui32Flags, pMapping->hOSMemHandle);
++ break;
++ case hm_wrapped_virtaddr:
++ OSUnRegisterMem(pMapping->CpuVAddr, pMapping->uSize, ui32Flags, pMapping->hOSMemHandle);
++ break;
++ case hm_wrapped_scatter:
++ OSUnReserveDiscontigPhys(pMapping->CpuVAddr, pMapping->uSize, ui32Flags, pMapping->hOSMemHandle);
++ break;
++ case hm_wrapped_scatter_virtaddr:
++ OSUnRegisterDiscontigMem(pMapping->CpuVAddr, pMapping->uSize, ui32Flags, pMapping->hOSMemHandle);
++ break;
++ default:
++ break;
++ }
++
++ DevMemoryFree (pMapping);
++
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_MAPPING), pMapping, IMG_NULL);
++ pBuf->pMapping = IMG_NULL;
++ }
++ }
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_BUF), pBuf, IMG_NULL);
++
++}
++
++PVRSRV_ERROR BM_DestroyContext_AnyCb(BM_HEAP *psBMHeap)
++{
++ if(psBMHeap->ui32Attribs
++ & (PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG
++ |PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG))
++ {
++ if (psBMHeap->pImportArena)
++ {
++ IMG_BOOL bTestDelete = RA_TestDelete(psBMHeap->pImportArena);
++ if (!bTestDelete)
++ {
++ PVR_DPF ((PVR_DBG_ERROR, "BM_DestroyContext_AnyCb: RA_TestDelete failed"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++ }
++ }
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR
++BM_DestroyContext(IMG_HANDLE hBMContext,
++ IMG_BOOL *pbDestroyed)
++{
++ PVRSRV_ERROR eError;
++ BM_CONTEXT *pBMContext = (BM_CONTEXT*)hBMContext;
++
++ PVR_DPF ((PVR_DBG_MESSAGE, "BM_DestroyContext"));
++
++ if (pbDestroyed != IMG_NULL)
++ {
++ *pbDestroyed = IMG_FALSE;
++ }
++
++
++
++ if (pBMContext == IMG_NULL)
++ {
++ PVR_DPF ((PVR_DBG_ERROR, "BM_DestroyContext: Invalid handle"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ pBMContext->ui32RefCount--;
++
++ if (pBMContext->ui32RefCount > 0)
++ {
++
++ return PVRSRV_OK;
++ }
++
++
++
++
++ eError = List_BM_HEAP_PVRSRV_ERROR_Any(pBMContext->psBMHeap, BM_DestroyContext_AnyCb);
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF ((PVR_DBG_ERROR, "BM_DestroyContext: List_BM_HEAP_PVRSRV_ERROR_Any failed"));
++#if 0
++
++
++
++
++ PVR_DPF ((PVR_DBG_ERROR, "BM_DestroyContext: Cleaning up with ResManFreeSpecial"));
++ if(ResManFreeSpecial() != PVRSRV_OK)
++ {
++ PVR_DPF ((PVR_DBG_ERROR, "BM_DestroyContext: ResManFreeSpecial failed %d",eError));
++ }
++
++#endif
++ return eError;
++ }
++ else
++ {
++
++ eError = ResManFreeResByPtr(pBMContext->hResItem);
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF ((PVR_DBG_ERROR, "BM_DestroyContext: ResManFreeResByPtr failed %d",eError));
++ return eError;
++ }
++
++
++ if (pbDestroyed != IMG_NULL)
++ {
++ *pbDestroyed = IMG_TRUE;
++ }
++ }
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR BM_DestroyContextCallBack_AnyVaCb(BM_HEAP *psBMHeap, va_list va)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ psDeviceNode = va_arg(va, PVRSRV_DEVICE_NODE*);
++
++
++ if(psBMHeap->ui32Attribs
++ & (PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG
++ |PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG))
++ {
++ if (psBMHeap->pImportArena)
++ {
++ RA_Delete (psBMHeap->pImportArena);
++ }
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR, "BM_DestroyContext: backing store type unsupported"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++
++ psDeviceNode->pfnMMUDelete(psBMHeap->pMMUHeap);
++
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_HEAP), psBMHeap, IMG_NULL);
++
++
++ return PVRSRV_OK;
++}
++
++
++static PVRSRV_ERROR BM_DestroyContextCallBack(IMG_PVOID pvParam,
++ IMG_UINT32 ui32Param)
++{
++ BM_CONTEXT *pBMContext = pvParam;
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ PVR_UNREFERENCED_PARAMETER(ui32Param);
++
++
++
++ psDeviceNode = pBMContext->psDeviceNode;
++
++
++
++ if(List_BM_HEAP_PVRSRV_ERROR_Any_va(pBMContext->psBMHeap,
++ BM_DestroyContextCallBack_AnyVaCb,
++ psDeviceNode) != PVRSRV_OK)
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++
++ if (pBMContext->psMMUContext)
++ {
++ psDeviceNode->pfnMMUFinalise(pBMContext->psMMUContext);
++ }
++
++
++
++ if (pBMContext->pBufferHash)
++ {
++ HASH_Delete(pBMContext->pBufferHash);
++ }
++
++ if (pBMContext == psDeviceNode->sDevMemoryInfo.pBMKernelContext)
++ {
++
++ psDeviceNode->sDevMemoryInfo.pBMKernelContext = IMG_NULL;
++ }
++ else
++ {
++
++ List_BM_CONTEXT_Remove(pBMContext);
++ }
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_CONTEXT), pBMContext, IMG_NULL);
++
++
++ return PVRSRV_OK;
++}
++
++
++IMG_HANDLE BM_CreateContext_IncRefCount_AnyVaCb(BM_CONTEXT *pBMContext, va_list va)
++{
++ PRESMAN_CONTEXT hResManContext;
++ hResManContext = va_arg(va, PRESMAN_CONTEXT);
++ if(ResManFindResourceByPtr(hResManContext, pBMContext->hResItem) == PVRSRV_OK)
++ {
++
++ pBMContext->ui32RefCount++;
++ return pBMContext;
++ }
++ return IMG_NULL;
++}
++
++IMG_VOID BM_CreateContext_InsertHeap_ForEachVaCb(BM_HEAP *psBMHeap, va_list va)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ BM_CONTEXT *pBMContext;
++ psDeviceNode = va_arg(va, PVRSRV_DEVICE_NODE*);
++ pBMContext = va_arg(va, BM_CONTEXT*);
++ switch(psBMHeap->sDevArena.DevMemHeapType)
++ {
++ case DEVICE_MEMORY_HEAP_SHARED:
++ case DEVICE_MEMORY_HEAP_SHARED_EXPORTED:
++ {
++
++ psDeviceNode->pfnMMUInsertHeap(pBMContext->psMMUContext, psBMHeap->pMMUHeap);
++ break;
++ }
++ }
++}
++
++IMG_HANDLE
++BM_CreateContext(PVRSRV_DEVICE_NODE *psDeviceNode,
++ IMG_DEV_PHYADDR *psPDDevPAddr,
++ PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_BOOL *pbCreated)
++{
++ BM_CONTEXT *pBMContext;
++ DEVICE_MEMORY_INFO *psDevMemoryInfo;
++ IMG_BOOL bKernelContext;
++ PRESMAN_CONTEXT hResManContext;
++
++ PVR_DPF((PVR_DBG_MESSAGE, "BM_CreateContext"));
++
++ if (psPerProc == IMG_NULL)
++ {
++ bKernelContext = IMG_TRUE;
++ hResManContext = psDeviceNode->hResManContext;
++ }
++ else
++ {
++ bKernelContext = IMG_FALSE;
++ hResManContext = psPerProc->hResManContext;
++ }
++
++ if (pbCreated != IMG_NULL)
++ {
++ *pbCreated = IMG_FALSE;
++ }
++
++
++ psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo;
++
++ if (bKernelContext == IMG_FALSE)
++ {
++ IMG_HANDLE res = (IMG_HANDLE) List_BM_CONTEXT_Any_va(psDevMemoryInfo->pBMContext,
++ BM_CreateContext_IncRefCount_AnyVaCb,
++ hResManContext);
++ if (res)
++ {
++ return res;
++ }
++ }
++
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof (struct _BM_CONTEXT_),
++ (IMG_PVOID *)&pBMContext, IMG_NULL,
++ "Buffer Manager Context") != PVRSRV_OK)
++ {
++ PVR_DPF ((PVR_DBG_ERROR, "BM_CreateContext: Alloc failed"));
++ return IMG_NULL;
++ }
++ OSMemSet(pBMContext, 0, sizeof (BM_CONTEXT));
++
++
++ pBMContext->psDeviceNode = psDeviceNode;
++
++
++
++ pBMContext->pBufferHash = HASH_Create(32);
++ if (pBMContext->pBufferHash==IMG_NULL)
++ {
++ PVR_DPF ((PVR_DBG_ERROR, "BM_CreateContext: HASH_Create failed"));
++ goto cleanup;
++ }
++
++ if(psDeviceNode->pfnMMUInitialise(psDeviceNode,
++ &pBMContext->psMMUContext,
++ psPDDevPAddr) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "BM_CreateContext: MMUInitialise failed"));
++ goto cleanup;
++ }
++
++ if(bKernelContext)
++ {
++
++ PVR_ASSERT(psDevMemoryInfo->pBMKernelContext == IMG_NULL);
++ psDevMemoryInfo->pBMKernelContext = pBMContext;
++ }
++ else
++ {
++
++
++
++
++
++ PVR_ASSERT(psDevMemoryInfo->pBMKernelContext);
++
++ if (psDevMemoryInfo->pBMKernelContext == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "BM_CreateContext: psDevMemoryInfo->pBMKernelContext invalid"));
++ goto cleanup;
++ }
++
++ PVR_ASSERT(psDevMemoryInfo->pBMKernelContext->psBMHeap);
++
++
++
++
++
++ pBMContext->psBMSharedHeap = psDevMemoryInfo->pBMKernelContext->psBMHeap;
++
++
++
++
++ List_BM_HEAP_ForEach_va(pBMContext->psBMSharedHeap,
++ BM_CreateContext_InsertHeap_ForEachVaCb,
++ psDeviceNode,
++ pBMContext);
++
++
++ List_BM_CONTEXT_Insert(&psDevMemoryInfo->pBMContext, pBMContext);
++ }
++
++
++ pBMContext->ui32RefCount++;
++
++
++ pBMContext->hResItem = ResManRegisterRes(hResManContext,
++ RESMAN_TYPE_DEVICEMEM_CONTEXT,
++ pBMContext,
++ 0,
++ BM_DestroyContextCallBack);
++ if (pBMContext->hResItem == IMG_NULL)
++ {
++ PVR_DPF ((PVR_DBG_ERROR, "BM_CreateContext: ResManRegisterRes failed"));
++ goto cleanup;
++ }
++
++ if (pbCreated != IMG_NULL)
++ {
++ *pbCreated = IMG_TRUE;
++ }
++ return (IMG_HANDLE)pBMContext;
++
++cleanup:
++ (IMG_VOID)BM_DestroyContextCallBack(pBMContext, 0);
++
++ return IMG_NULL;
++}
++
++
++IMG_VOID *BM_CreateHeap_AnyVaCb(BM_HEAP *psBMHeap, va_list va)
++{
++ DEVICE_MEMORY_HEAP_INFO *psDevMemHeapInfo;
++ psDevMemHeapInfo = va_arg(va, DEVICE_MEMORY_HEAP_INFO*);
++ if (psBMHeap->sDevArena.ui32HeapID == psDevMemHeapInfo->ui32HeapID)
++ {
++
++ return psBMHeap;
++ }
++ else
++ {
++ return IMG_NULL;
++ }
++}
++
++IMG_HANDLE
++BM_CreateHeap (IMG_HANDLE hBMContext,
++ DEVICE_MEMORY_HEAP_INFO *psDevMemHeapInfo)
++{
++ BM_CONTEXT *pBMContext = (BM_CONTEXT*)hBMContext;
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ BM_HEAP *psBMHeap;
++
++ PVR_DPF((PVR_DBG_MESSAGE, "BM_CreateHeap"));
++
++ if(!pBMContext)
++ {
++ return IMG_NULL;
++ }
++
++ psDeviceNode = pBMContext->psDeviceNode;
++
++
++
++
++
++
++ if(pBMContext->ui32RefCount > 0)
++ {
++ psBMHeap = (BM_HEAP*)List_BM_HEAP_Any_va(pBMContext->psBMHeap,
++ BM_CreateHeap_AnyVaCb,
++ psDevMemHeapInfo);
++
++ if (psBMHeap)
++ {
++ return psBMHeap;
++ }
++ }
++
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof (BM_HEAP),
++ (IMG_PVOID *)&psBMHeap, IMG_NULL,
++ "Buffer Manager Heap") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "BM_CreateHeap: Alloc failed"));
++ return IMG_NULL;
++ }
++
++ OSMemSet (psBMHeap, 0, sizeof (BM_HEAP));
++
++ psBMHeap->sDevArena.ui32HeapID = psDevMemHeapInfo->ui32HeapID;
++ psBMHeap->sDevArena.pszName = psDevMemHeapInfo->pszName;
++ psBMHeap->sDevArena.BaseDevVAddr = psDevMemHeapInfo->sDevVAddrBase;
++ psBMHeap->sDevArena.ui32Size = psDevMemHeapInfo->ui32HeapSize;
++ psBMHeap->sDevArena.DevMemHeapType = psDevMemHeapInfo->DevMemHeapType;
++ psBMHeap->sDevArena.ui32DataPageSize = psDevMemHeapInfo->ui32DataPageSize;
++ psBMHeap->sDevArena.psDeviceMemoryHeapInfo = psDevMemHeapInfo;
++ psBMHeap->ui32Attribs = psDevMemHeapInfo->ui32Attribs;
++
++
++ psBMHeap->pBMContext = pBMContext;
++
++ psBMHeap->pMMUHeap = psDeviceNode->pfnMMUCreate (pBMContext->psMMUContext,
++ &psBMHeap->sDevArena,
++ &psBMHeap->pVMArena);
++ if (!psBMHeap->pMMUHeap)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "BM_CreateHeap: MMUCreate failed"));
++ goto ErrorExit;
++ }
++
++
++ psBMHeap->pImportArena = RA_Create (psDevMemHeapInfo->pszBSName,
++ 0, 0, IMG_NULL,
++ psBMHeap->sDevArena.ui32DataPageSize,
++ BM_ImportMemory,
++ BM_FreeMemory,
++ IMG_NULL,
++ psBMHeap);
++ if(psBMHeap->pImportArena == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "BM_CreateHeap: RA_Create failed"));
++ goto ErrorExit;
++ }
++
++ if(psBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG)
++ {
++
++
++
++
++ psBMHeap->pLocalDevMemArena = psDevMemHeapInfo->psLocalDevMemArena;
++ if(psBMHeap->pLocalDevMemArena == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "BM_CreateHeap: LocalDevMemArena null"));
++ goto ErrorExit;
++ }
++ }
++
++
++ List_BM_HEAP_Insert(&pBMContext->psBMHeap, psBMHeap);
++
++ return (IMG_HANDLE)psBMHeap;
++
++
++ErrorExit:
++
++
++ if (psBMHeap->pMMUHeap != IMG_NULL)
++ {
++ psDeviceNode->pfnMMUDelete (psBMHeap->pMMUHeap);
++ psDeviceNode->pfnMMUFinalise (pBMContext->psMMUContext);
++ }
++
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_HEAP), psBMHeap, IMG_NULL);
++
++
++ return IMG_NULL;
++}
++
++IMG_VOID
++BM_DestroyHeap (IMG_HANDLE hDevMemHeap)
++{
++ BM_HEAP* psBMHeap = (BM_HEAP*)hDevMemHeap;
++ PVRSRV_DEVICE_NODE *psDeviceNode = psBMHeap->pBMContext->psDeviceNode;
++
++ PVR_DPF((PVR_DBG_MESSAGE, "BM_DestroyHeap"));
++
++ if(psBMHeap)
++ {
++
++ if(psBMHeap->ui32Attribs
++ & (PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG
++ |PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG))
++ {
++ if (psBMHeap->pImportArena)
++ {
++ RA_Delete (psBMHeap->pImportArena);
++ }
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR, "BM_DestroyHeap: backing store type unsupported"));
++ return;
++ }
++
++
++ psDeviceNode->pfnMMUDelete (psBMHeap->pMMUHeap);
++
++
++ List_BM_HEAP_Remove(psBMHeap);
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_HEAP), psBMHeap, IMG_NULL);
++
++ }
++ else
++ {
++ PVR_DPF ((PVR_DBG_ERROR, "BM_DestroyHeap: invalid heap handle"));
++ }
++}
++
++
++IMG_BOOL
++BM_Reinitialise (PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++
++ PVR_DPF((PVR_DBG_MESSAGE, "BM_Reinitialise"));
++ PVR_UNREFERENCED_PARAMETER(psDeviceNode);
++
++
++ return IMG_TRUE;
++}
++
++IMG_BOOL
++BM_Alloc ( IMG_HANDLE hDevMemHeap,
++ IMG_DEV_VIRTADDR *psDevVAddr,
++ IMG_SIZE_T uSize,
++ IMG_UINT32 *pui32Flags,
++ IMG_UINT32 uDevVAddrAlignment,
++ BM_HANDLE *phBuf)
++{
++ BM_BUF *pBuf;
++ BM_CONTEXT *pBMContext;
++ BM_HEAP *psBMHeap;
++ SYS_DATA *psSysData;
++ IMG_UINT32 uFlags;
++
++ if (pui32Flags == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "BM_Alloc: invalid parameter"));
++ PVR_DBG_BREAK;
++ return IMG_FALSE;
++ }
++
++ uFlags = *pui32Flags;
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "BM_Alloc (uSize=0x%x, uFlags=0x%x, uDevVAddrAlignment=0x%x)",
++ uSize, uFlags, uDevVAddrAlignment));
++
++ SysAcquireData(&psSysData);
++
++ psBMHeap = (BM_HEAP*)hDevMemHeap;
++ pBMContext = psBMHeap->pBMContext;
++
++ if(uDevVAddrAlignment == 0)
++ {
++ uDevVAddrAlignment = 1;
++ }
++
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof (BM_BUF),
++ (IMG_PVOID *)&pBuf, IMG_NULL,
++ "Buffer Manager buffer") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "BM_Alloc: BM_Buf alloc FAILED"));
++ return IMG_FALSE;
++ }
++ OSMemSet(pBuf, 0, sizeof (BM_BUF));
++
++
++ if (AllocMemory(pBMContext,
++ psBMHeap,
++ psDevVAddr,
++ uSize,
++ uFlags,
++ uDevVAddrAlignment,
++ pBuf) != IMG_TRUE)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof (BM_BUF), pBuf, IMG_NULL);
++
++ PVR_DPF((PVR_DBG_ERROR, "BM_Alloc: AllocMemory FAILED"));
++ return IMG_FALSE;
++ }
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "BM_Alloc (uSize=0x%x, uFlags=0x%x)=%08X",
++ uSize, uFlags, pBuf));
++
++
++ pBuf->ui32RefCount = 1;
++ *phBuf = (BM_HANDLE)pBuf;
++ *pui32Flags = uFlags | psBMHeap->ui32Attribs;
++
++
++ if(uFlags & PVRSRV_HAP_CACHETYPE_MASK)
++ {
++ *pui32Flags &= ~PVRSRV_HAP_CACHETYPE_MASK;
++ *pui32Flags |= (uFlags & PVRSRV_HAP_CACHETYPE_MASK);
++ }
++
++ return IMG_TRUE;
++}
++
++
++
++#if defined(PVR_LMA)
++static IMG_BOOL
++ValidSysPAddrArrayForDev(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_SYS_PHYADDR *psSysPAddr, IMG_UINT32 ui32PageCount, IMG_SIZE_T ui32PageSize)
++{
++ IMG_UINT32 i;
++
++ for (i = 0; i < ui32PageCount; i++)
++ {
++ IMG_SYS_PHYADDR sStartSysPAddr = psSysPAddr[i];
++ IMG_SYS_PHYADDR sEndSysPAddr;
++
++ if (!SysVerifySysPAddrToDevPAddr(psDeviceNode->sDevId.eDeviceType, sStartSysPAddr))
++ {
++ return IMG_FALSE;
++ }
++
++ sEndSysPAddr.uiAddr = sStartSysPAddr.uiAddr + ui32PageSize;
++
++ if (!SysVerifySysPAddrToDevPAddr(psDeviceNode->sDevId.eDeviceType, sEndSysPAddr))
++ {
++ return IMG_FALSE;
++ }
++ }
++
++ return IMG_TRUE;
++}
++
++static IMG_BOOL
++ValidSysPAddrRangeForDev(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_SYS_PHYADDR sStartSysPAddr, IMG_SIZE_T ui32Range)
++{
++ IMG_SYS_PHYADDR sEndSysPAddr;
++
++ if (!SysVerifySysPAddrToDevPAddr(psDeviceNode->sDevId.eDeviceType, sStartSysPAddr))
++ {
++ return IMG_FALSE;
++ }
++
++ sEndSysPAddr.uiAddr = sStartSysPAddr.uiAddr + ui32Range;
++
++ if (!SysVerifySysPAddrToDevPAddr(psDeviceNode->sDevId.eDeviceType, sEndSysPAddr))
++ {
++ return IMG_FALSE;
++ }
++
++ return IMG_TRUE;
++}
++
++#define WRAP_MAPPING_SIZE(ui32ByteSize, ui32PageOffset) HOST_PAGEALIGN((ui32ByteSize) + (ui32PageOffset))
++
++#define WRAP_PAGE_COUNT(ui32ByteSize, ui32PageOffset, ui32HostPageSize) (WRAP_MAPPING_SIZE(ui32ByteSize, ui32PageOffset) / (ui32HostPageSize))
++
++#endif
++
++
++IMG_BOOL
++BM_Wrap ( IMG_HANDLE hDevMemHeap,
++ IMG_SIZE_T ui32Size,
++ IMG_SIZE_T ui32Offset,
++ IMG_BOOL bPhysContig,
++ IMG_SYS_PHYADDR *psSysAddr,
++ IMG_VOID *pvCPUVAddr,
++ IMG_UINT32 *pui32Flags,
++ BM_HANDLE *phBuf)
++{
++ BM_BUF *pBuf;
++ BM_CONTEXT *psBMContext;
++ BM_HEAP *psBMHeap;
++ SYS_DATA *psSysData;
++ IMG_SYS_PHYADDR sHashAddress;
++ IMG_UINT32 uFlags;
++
++ psBMHeap = (BM_HEAP*)hDevMemHeap;
++ psBMContext = psBMHeap->pBMContext;
++
++ uFlags = psBMHeap->ui32Attribs & (PVRSRV_HAP_CACHETYPE_MASK | PVRSRV_HAP_MAPTYPE_MASK);
++
++ if ((pui32Flags != IMG_NULL) && ((*pui32Flags & PVRSRV_HAP_CACHETYPE_MASK) != 0))
++ {
++ uFlags &= ~PVRSRV_HAP_CACHETYPE_MASK;
++ uFlags |= *pui32Flags & PVRSRV_HAP_CACHETYPE_MASK;
++ }
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "BM_Wrap (uSize=0x%x, uOffset=0x%x, bPhysContig=0x%x, pvCPUVAddr=0x%x, uFlags=0x%x)",
++ ui32Size, ui32Offset, bPhysContig, pvCPUVAddr, uFlags));
++
++ SysAcquireData(&psSysData);
++
++#if defined(PVR_LMA)
++ if (bPhysContig)
++ {
++ if (!ValidSysPAddrRangeForDev(psBMContext->psDeviceNode, *psSysAddr, WRAP_MAPPING_SIZE(ui32Size, ui32Offset)))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "BM_Wrap: System address range invalid for device"));
++ return IMG_FALSE;
++ }
++ }
++ else
++ {
++ IMG_SIZE_T ui32HostPageSize = HOST_PAGESIZE();
++
++ if (!ValidSysPAddrArrayForDev(psBMContext->psDeviceNode, psSysAddr, WRAP_PAGE_COUNT(ui32Size, ui32Offset, ui32HostPageSize), ui32HostPageSize))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "BM_Wrap: Array of system addresses invalid for device"));
++ return IMG_FALSE;
++ }
++ }
++#endif
++
++ sHashAddress = psSysAddr[0];
++
++
++ sHashAddress.uiAddr += ui32Offset;
++
++
++ pBuf = (BM_BUF *)HASH_Retrieve(psBMContext->pBufferHash, (IMG_UINTPTR_T) sHashAddress.uiAddr);
++
++ if(pBuf)
++ {
++ IMG_SIZE_T ui32MappingSize = HOST_PAGEALIGN (ui32Size + ui32Offset);
++
++
++ if(pBuf->pMapping->uSize == ui32MappingSize && (pBuf->pMapping->eCpuMemoryOrigin == hm_wrapped ||
++ pBuf->pMapping->eCpuMemoryOrigin == hm_wrapped_virtaddr))
++ {
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "BM_Wrap (Matched previous Wrap! uSize=0x%x, uOffset=0x%x, SysAddr=%08X)",
++ ui32Size, ui32Offset, sHashAddress.uiAddr));
++
++ pBuf->ui32RefCount++;
++ *phBuf = (BM_HANDLE)pBuf;
++ if(pui32Flags)
++ *pui32Flags = uFlags;
++
++ return IMG_TRUE;
++ }
++ }
++
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof (BM_BUF),
++ (IMG_PVOID *)&pBuf, IMG_NULL,
++ "Buffer Manager buffer") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "BM_Wrap: BM_Buf alloc FAILED"));
++ return IMG_FALSE;
++ }
++ OSMemSet(pBuf, 0, sizeof (BM_BUF));
++
++
++ if (WrapMemory (psBMHeap, ui32Size, ui32Offset, bPhysContig, psSysAddr, pvCPUVAddr, uFlags, pBuf) != IMG_TRUE)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "BM_Wrap: WrapMemory FAILED"));
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof (BM_BUF), pBuf, IMG_NULL);
++
++ return IMG_FALSE;
++ }
++
++
++ if(pBuf->pMapping->eCpuMemoryOrigin == hm_wrapped || pBuf->pMapping->eCpuMemoryOrigin == hm_wrapped_virtaddr)
++ {
++
++ PVR_ASSERT(SysSysPAddrToCpuPAddr(sHashAddress).uiAddr == pBuf->CpuPAddr.uiAddr);
++
++ if (!HASH_Insert (psBMContext->pBufferHash, (IMG_UINTPTR_T) sHashAddress.uiAddr, (IMG_UINTPTR_T)pBuf))
++ {
++ FreeBuf (pBuf, uFlags);
++ PVR_DPF((PVR_DBG_ERROR, "BM_Wrap: HASH_Insert FAILED"));
++ return IMG_FALSE;
++ }
++ }
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "BM_Wrap (uSize=0x%x, uFlags=0x%x)=%08X(devVAddr=%08X)",
++ ui32Size, uFlags, pBuf, pBuf->DevVAddr.uiAddr));
++
++
++ pBuf->ui32RefCount = 1;
++ *phBuf = (BM_HANDLE)pBuf;
++ if(pui32Flags)
++ {
++
++ *pui32Flags = (uFlags & ~PVRSRV_HAP_MAPTYPE_MASK) | PVRSRV_HAP_MULTI_PROCESS;
++ }
++
++ return IMG_TRUE;
++}
++
++
++IMG_VOID
++BM_Free (BM_HANDLE hBuf,
++ IMG_UINT32 ui32Flags)
++{
++ BM_BUF *pBuf = (BM_BUF *)hBuf;
++ SYS_DATA *psSysData;
++ IMG_SYS_PHYADDR sHashAddr;
++
++ PVR_DPF ((PVR_DBG_MESSAGE, "BM_Free (h=%08X)", hBuf));
++ PVR_ASSERT (pBuf!=IMG_NULL);
++
++ if (pBuf == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "BM_Free: invalid parameter"));
++ return;
++ }
++
++ SysAcquireData(&psSysData);
++
++ pBuf->ui32RefCount--;
++
++ if(pBuf->ui32RefCount == 0)
++ {
++ if(pBuf->pMapping->eCpuMemoryOrigin == hm_wrapped || pBuf->pMapping->eCpuMemoryOrigin == hm_wrapped_virtaddr)
++ {
++ sHashAddr = SysCpuPAddrToSysPAddr(pBuf->CpuPAddr);
++
++ HASH_Remove (pBuf->pMapping->pBMHeap->pBMContext->pBufferHash, (IMG_UINTPTR_T)sHashAddr.uiAddr);
++ }
++ FreeBuf (pBuf, ui32Flags);
++ }
++}
++
++
++IMG_CPU_VIRTADDR
++BM_HandleToCpuVaddr (BM_HANDLE hBuf)
++{
++ BM_BUF *pBuf = (BM_BUF *)hBuf;
++
++ PVR_ASSERT (pBuf != IMG_NULL);
++ if (pBuf == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "BM_HandleToCpuVaddr: invalid parameter"));
++ return IMG_NULL;
++ }
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "BM_HandleToCpuVaddr(h=%08X)=%08X",
++ hBuf, pBuf->CpuVAddr));
++ return pBuf->CpuVAddr;
++}
++
++
++IMG_DEV_VIRTADDR
++BM_HandleToDevVaddr (BM_HANDLE hBuf)
++{
++ BM_BUF *pBuf = (BM_BUF *)hBuf;
++
++ PVR_ASSERT (pBuf != IMG_NULL);
++ if (pBuf == IMG_NULL)
++ {
++ IMG_DEV_VIRTADDR DevVAddr = {0};
++ PVR_DPF((PVR_DBG_ERROR, "BM_HandleToDevVaddr: invalid parameter"));
++ return DevVAddr;
++ }
++
++ PVR_DPF ((PVR_DBG_MESSAGE, "BM_HandleToDevVaddr(h=%08X)=%08X", hBuf, pBuf->DevVAddr));
++ return pBuf->DevVAddr;
++}
++
++
++IMG_SYS_PHYADDR
++BM_HandleToSysPaddr (BM_HANDLE hBuf)
++{
++ BM_BUF *pBuf = (BM_BUF *)hBuf;
++
++ PVR_ASSERT (pBuf != IMG_NULL);
++
++ if (pBuf == IMG_NULL)
++ {
++ IMG_SYS_PHYADDR PhysAddr = {0};
++ PVR_DPF((PVR_DBG_ERROR, "BM_HandleToSysPaddr: invalid parameter"));
++ return PhysAddr;
++ }
++
++ PVR_DPF ((PVR_DBG_MESSAGE, "BM_HandleToSysPaddr(h=%08X)=%08X", hBuf, pBuf->CpuPAddr.uiAddr));
++ return SysCpuPAddrToSysPAddr (pBuf->CpuPAddr);
++}
++
++IMG_HANDLE
++BM_HandleToOSMemHandle(BM_HANDLE hBuf)
++{
++ BM_BUF *pBuf = (BM_BUF *)hBuf;
++
++ PVR_ASSERT (pBuf != IMG_NULL);
++
++ if (pBuf == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "BM_HandleToOSMemHandle: invalid parameter"));
++ return IMG_NULL;
++ }
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "BM_HandleToOSMemHandle(h=%08X)=%08X",
++ hBuf, pBuf->hOSMemHandle));
++ return pBuf->hOSMemHandle;
++}
++
++IMG_BOOL
++BM_ContiguousStatistics (IMG_UINT32 uFlags,
++ IMG_UINT32 *pTotalBytes,
++ IMG_UINT32 *pAvailableBytes)
++{
++ if (pAvailableBytes || pTotalBytes || uFlags);
++ return IMG_FALSE;
++}
++
++
++static IMG_BOOL
++DevMemoryAlloc (BM_CONTEXT *pBMContext,
++ BM_MAPPING *pMapping,
++ IMG_SIZE_T *pActualSize,
++ IMG_UINT32 uFlags,
++ IMG_UINT32 dev_vaddr_alignment,
++ IMG_DEV_VIRTADDR *pDevVAddr)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++#ifdef PDUMP
++ IMG_UINT32 ui32PDumpSize = pMapping->uSize;
++#endif
++
++ psDeviceNode = pBMContext->psDeviceNode;
++
++ if(uFlags & PVRSRV_MEM_INTERLEAVED)
++ {
++
++ pMapping->uSize *= 2;
++ }
++
++#ifdef PDUMP
++ if(uFlags & PVRSRV_MEM_DUMMY)
++ {
++
++ ui32PDumpSize = pMapping->pBMHeap->sDevArena.ui32DataPageSize;
++ }
++#endif
++
++
++ if (!psDeviceNode->pfnMMUAlloc (pMapping->pBMHeap->pMMUHeap,
++ pMapping->uSize,
++ pActualSize,
++ 0,
++ dev_vaddr_alignment,
++ &(pMapping->DevVAddr)))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "DevMemoryAlloc ERROR MMU_Alloc"));
++ return IMG_FALSE;
++ }
++
++#ifdef SUPPORT_SGX_MMU_BYPASS
++ EnableHostAccess(pBMContext->psMMUContext);
++#endif
++
++
++
++ PDUMPMALLOCPAGES(psDeviceNode->sDevId.eDeviceType, pMapping->DevVAddr.uiAddr, pMapping->CpuVAddr, pMapping->hOSMemHandle, ui32PDumpSize, pMapping->pBMHeap->sDevArena.ui32DataPageSize, (IMG_HANDLE)pMapping);
++
++ switch (pMapping->eCpuMemoryOrigin)
++ {
++ case hm_wrapped:
++ case hm_wrapped_virtaddr:
++ case hm_contiguous:
++ {
++ psDeviceNode->pfnMMUMapPages ( pMapping->pBMHeap->pMMUHeap,
++ pMapping->DevVAddr,
++ SysCpuPAddrToSysPAddr (pMapping->CpuPAddr),
++ pMapping->uSize,
++ uFlags,
++ (IMG_HANDLE)pMapping);
++
++ *pDevVAddr = pMapping->DevVAddr;
++ break;
++ }
++ case hm_env:
++ {
++ psDeviceNode->pfnMMUMapShadow ( pMapping->pBMHeap->pMMUHeap,
++ pMapping->DevVAddr,
++ pMapping->uSize,
++ pMapping->CpuVAddr,
++ pMapping->hOSMemHandle,
++ pDevVAddr,
++ uFlags,
++ (IMG_HANDLE)pMapping);
++ break;
++ }
++ case hm_wrapped_scatter:
++ case hm_wrapped_scatter_virtaddr:
++ {
++ psDeviceNode->pfnMMUMapScatter (pMapping->pBMHeap->pMMUHeap,
++ pMapping->DevVAddr,
++ pMapping->psSysAddr,
++ pMapping->uSize,
++ uFlags,
++ (IMG_HANDLE)pMapping);
++
++ *pDevVAddr = pMapping->DevVAddr;
++ break;
++ }
++ default:
++ PVR_DPF((PVR_DBG_ERROR,
++ "Illegal value %d for pMapping->eCpuMemoryOrigin",
++ pMapping->eCpuMemoryOrigin));
++ return IMG_FALSE;
++ }
++
++#ifdef SUPPORT_SGX_MMU_BYPASS
++ DisableHostAccess(pBMContext->psMMUContext);
++#endif
++
++ return IMG_TRUE;
++}
++
++static IMG_VOID
++DevMemoryFree (BM_MAPPING *pMapping)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++#ifdef PDUMP
++ IMG_UINT32 ui32PSize;
++#endif
++
++#ifdef PDUMP
++
++ if(pMapping->ui32Flags & PVRSRV_MEM_DUMMY)
++ {
++
++ ui32PSize = pMapping->pBMHeap->sDevArena.ui32DataPageSize;
++ }
++ else
++ {
++ ui32PSize = pMapping->uSize;
++ }
++
++ PDUMPFREEPAGES(pMapping->pBMHeap,
++ pMapping->DevVAddr,
++ ui32PSize,
++ pMapping->pBMHeap->sDevArena.ui32DataPageSize,
++ (IMG_HANDLE)pMapping,
++ (pMapping->ui32Flags & PVRSRV_MEM_INTERLEAVED) ? IMG_TRUE : IMG_FALSE);
++#endif
++
++ psDeviceNode = pMapping->pBMHeap->pBMContext->psDeviceNode;
++
++ psDeviceNode->pfnMMUFree (pMapping->pBMHeap->pMMUHeap, pMapping->DevVAddr, IMG_CAST_TO_DEVVADDR_UINT(pMapping->uSize));
++}
++
++static IMG_BOOL
++BM_ImportMemory (IMG_VOID *pH,
++ IMG_SIZE_T uRequestSize,
++ IMG_SIZE_T *pActualSize,
++ BM_MAPPING **ppsMapping,
++ IMG_UINT32 uFlags,
++ IMG_UINTPTR_T *pBase)
++{
++ BM_MAPPING *pMapping;
++ BM_HEAP *pBMHeap = pH;
++ BM_CONTEXT *pBMContext = pBMHeap->pBMContext;
++ IMG_BOOL bResult;
++ IMG_SIZE_T uSize;
++ IMG_SIZE_T uPSize;
++ IMG_UINT32 uDevVAddrAlignment = 0;
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "BM_ImportMemory (pBMContext=%08X, uRequestSize=0x%x, uFlags=0x%x, uAlign=0x%x)",
++ pBMContext, uRequestSize, uFlags, uDevVAddrAlignment));
++
++ PVR_ASSERT (ppsMapping != IMG_NULL);
++ PVR_ASSERT (pBMContext != IMG_NULL);
++
++ if (ppsMapping == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "BM_ImportMemory: invalid parameter"));
++ goto fail_exit;
++ }
++
++ uSize = HOST_PAGEALIGN (uRequestSize);
++ PVR_ASSERT (uSize >= uRequestSize);
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof (BM_MAPPING),
++ (IMG_PVOID *)&pMapping, IMG_NULL,
++ "Buffer Manager Mapping") != PVRSRV_OK)
++ {
++ PVR_DPF ((PVR_DBG_ERROR, "BM_ImportMemory: failed BM_MAPPING alloc"));
++ goto fail_exit;
++ }
++
++ pMapping->hOSMemHandle = 0;
++ pMapping->CpuVAddr = 0;
++ pMapping->DevVAddr.uiAddr = 0;
++ pMapping->CpuPAddr.uiAddr = 0;
++ pMapping->uSize = uSize;
++ pMapping->pBMHeap = pBMHeap;
++ pMapping->ui32Flags = uFlags;
++
++
++ if (pActualSize)
++ {
++ *pActualSize = uSize;
++ }
++
++
++ if(pMapping->ui32Flags & PVRSRV_MEM_DUMMY)
++ {
++ uPSize = pBMHeap->sDevArena.ui32DataPageSize;
++ }
++ else
++ {
++ uPSize = pMapping->uSize;
++ }
++
++
++
++ if(pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG)
++ {
++ IMG_UINT32 ui32Attribs = pBMHeap->ui32Attribs;
++
++
++ if (pMapping->ui32Flags & PVRSRV_HAP_CACHETYPE_MASK)
++ {
++ ui32Attribs &= ~PVRSRV_HAP_CACHETYPE_MASK;
++ ui32Attribs |= (pMapping->ui32Flags & PVRSRV_HAP_CACHETYPE_MASK);
++ }
++
++
++ if (OSAllocPages(ui32Attribs,
++ uPSize,
++ pBMHeap->sDevArena.ui32DataPageSize,
++ (IMG_VOID **)&pMapping->CpuVAddr,
++ &pMapping->hOSMemHandle) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "BM_ImportMemory: OSAllocPages(0x%x) failed",
++ uPSize));
++ goto fail_mapping_alloc;
++ }
++
++
++ pMapping->eCpuMemoryOrigin = hm_env;
++ }
++ else if(pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG)
++ {
++ IMG_SYS_PHYADDR sSysPAddr;
++ IMG_UINT32 ui32Attribs = pBMHeap->ui32Attribs;
++
++
++ if (pMapping->ui32Flags & PVRSRV_HAP_CACHETYPE_MASK)
++ {
++ ui32Attribs &= ~PVRSRV_HAP_CACHETYPE_MASK;
++ ui32Attribs |= (pMapping->ui32Flags & PVRSRV_HAP_CACHETYPE_MASK);
++ }
++
++
++ PVR_ASSERT(pBMHeap->pLocalDevMemArena != IMG_NULL);
++
++ if (!RA_Alloc (pBMHeap->pLocalDevMemArena,
++ uPSize,
++ IMG_NULL,
++ IMG_NULL,
++ 0,
++ pBMHeap->sDevArena.ui32DataPageSize,
++ 0,
++ (IMG_UINTPTR_T *)&sSysPAddr.uiAddr))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "BM_ImportMemory: RA_Alloc(0x%x) FAILED", uPSize));
++ goto fail_mapping_alloc;
++ }
++
++
++ pMapping->CpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr);
++ if(OSReservePhys(pMapping->CpuPAddr,
++ uPSize,
++ ui32Attribs,
++ &pMapping->CpuVAddr,
++ &pMapping->hOSMemHandle) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "BM_ImportMemory: OSReservePhys failed"));
++ goto fail_dev_mem_alloc;
++ }
++
++
++ pMapping->eCpuMemoryOrigin = hm_contiguous;
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR, "BM_ImportMemory: Invalid backing store type"));
++ goto fail_mapping_alloc;
++ }
++
++
++ bResult = DevMemoryAlloc (pBMContext,
++ pMapping,
++ IMG_NULL,
++ uFlags,
++ uDevVAddrAlignment,
++ &pMapping->DevVAddr);
++ if (!bResult)
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "BM_ImportMemory: DevMemoryAlloc(0x%x) failed",
++ pMapping->uSize));
++ goto fail_dev_mem_alloc;
++ }
++
++
++
++ PVR_ASSERT (uDevVAddrAlignment>1?(pMapping->DevVAddr.uiAddr%uDevVAddrAlignment)==0:1);
++
++ *pBase = pMapping->DevVAddr.uiAddr;
++ *ppsMapping = pMapping;
++
++ PVR_DPF ((PVR_DBG_MESSAGE, "BM_ImportMemory: IMG_TRUE"));
++ return IMG_TRUE;
++
++fail_dev_mem_alloc:
++ if (pMapping && (pMapping->CpuVAddr || pMapping->hOSMemHandle))
++ {
++
++ if(pMapping->ui32Flags & PVRSRV_MEM_INTERLEAVED)
++ {
++ pMapping->uSize /= 2;
++ }
++
++ if(pMapping->ui32Flags & PVRSRV_MEM_DUMMY)
++ {
++ uPSize = pBMHeap->sDevArena.ui32DataPageSize;
++ }
++ else
++ {
++ uPSize = pMapping->uSize;
++ }
++
++ if(pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG)
++ {
++ OSFreePages(pBMHeap->ui32Attribs,
++ uPSize,
++ (IMG_VOID *)pMapping->CpuVAddr,
++ pMapping->hOSMemHandle);
++ }
++ else
++ {
++ IMG_SYS_PHYADDR sSysPAddr;
++
++ if(pMapping->CpuVAddr)
++ {
++ OSUnReservePhys(pMapping->CpuVAddr,
++ uPSize,
++ pBMHeap->ui32Attribs,
++ pMapping->hOSMemHandle);
++ }
++ sSysPAddr = SysCpuPAddrToSysPAddr(pMapping->CpuPAddr);
++ RA_Free (pBMHeap->pLocalDevMemArena, sSysPAddr.uiAddr, IMG_FALSE);
++ }
++ }
++fail_mapping_alloc:
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_MAPPING), pMapping, IMG_NULL);
++
++fail_exit:
++ return IMG_FALSE;
++}
++
++
++static IMG_VOID
++BM_FreeMemory (IMG_VOID *h, IMG_UINTPTR_T _base, BM_MAPPING *psMapping)
++{
++ BM_HEAP *pBMHeap = h;
++ IMG_SIZE_T uPSize;
++
++ PVR_UNREFERENCED_PARAMETER (_base);
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "BM_FreeMemory (h=%08X, base=0x%x, psMapping=0x%x)", h, _base, psMapping));
++
++ PVR_ASSERT (psMapping != IMG_NULL);
++
++ if (psMapping == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "BM_FreeMemory: invalid parameter"));
++ return;
++ }
++
++ DevMemoryFree (psMapping);
++
++
++ if((psMapping->ui32Flags & PVRSRV_MEM_INTERLEAVED) != 0)
++ {
++ psMapping->uSize /= 2;
++ }
++
++ if(psMapping->ui32Flags & PVRSRV_MEM_DUMMY)
++ {
++ uPSize = psMapping->pBMHeap->sDevArena.ui32DataPageSize;
++ }
++ else
++ {
++ uPSize = psMapping->uSize;
++ }
++
++ if(pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG)
++ {
++ OSFreePages(pBMHeap->ui32Attribs,
++ uPSize,
++ (IMG_VOID *) psMapping->CpuVAddr,
++ psMapping->hOSMemHandle);
++ }
++ else if(pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG)
++ {
++ IMG_SYS_PHYADDR sSysPAddr;
++
++ OSUnReservePhys(psMapping->CpuVAddr, uPSize, pBMHeap->ui32Attribs, psMapping->hOSMemHandle);
++
++ sSysPAddr = SysCpuPAddrToSysPAddr(psMapping->CpuPAddr);
++
++ RA_Free (pBMHeap->pLocalDevMemArena, sSysPAddr.uiAddr, IMG_FALSE);
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR, "BM_FreeMemory: Invalid backing store type"));
++ }
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_MAPPING), psMapping, IMG_NULL);
++
++
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "..BM_FreeMemory (h=%08X, base=0x%x, psMapping=0x%x)",
++ h, _base, psMapping));
++}
++
++IMG_VOID BM_GetPhysPageAddr(PVRSRV_KERNEL_MEM_INFO *psMemInfo,
++ IMG_DEV_VIRTADDR sDevVPageAddr,
++ IMG_DEV_PHYADDR *psDevPAddr)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++
++ PVR_DPF((PVR_DBG_MESSAGE, "BM_GetPhysPageAddr"));
++
++ PVR_ASSERT (psMemInfo && psDevPAddr)
++
++
++ PVR_ASSERT((sDevVPageAddr.uiAddr & 0xFFF) == 0);
++
++ psDeviceNode = ((BM_BUF*)psMemInfo->sMemBlk.hBuffer)->pMapping->pBMHeap->pBMContext->psDeviceNode;
++
++ *psDevPAddr = psDeviceNode->pfnMMUGetPhysPageAddr(((BM_BUF*)psMemInfo->sMemBlk.hBuffer)->pMapping->pBMHeap->pMMUHeap,
++ sDevVPageAddr);
++}
++
++
++PVRSRV_ERROR BM_GetHeapInfo(IMG_HANDLE hDevMemHeap, PVRSRV_HEAP_INFO *psHeapInfo)
++{
++ BM_HEAP *psBMHeap = (BM_HEAP *)hDevMemHeap;
++
++ PVR_DPF((PVR_DBG_VERBOSE, "BM_GetHeapInfo"));
++
++ psHeapInfo->hDevMemHeap = hDevMemHeap;
++ psHeapInfo->sDevVAddrBase = psBMHeap->sDevArena.BaseDevVAddr;
++ psHeapInfo->ui32HeapByteSize = psBMHeap->sDevArena.ui32Size;
++ psHeapInfo->ui32Attribs = psBMHeap->ui32Attribs;
++
++ return PVRSRV_OK;
++}
++
++
++MMU_CONTEXT* BM_GetMMUContext(IMG_HANDLE hDevMemHeap)
++{
++ BM_HEAP *pBMHeap = (BM_HEAP*)hDevMemHeap;
++
++ PVR_DPF((PVR_DBG_VERBOSE, "BM_GetMMUContext"));
++
++ return pBMHeap->pBMContext->psMMUContext;
++}
++
++MMU_CONTEXT* BM_GetMMUContextFromMemContext(IMG_HANDLE hDevMemContext)
++{
++ BM_CONTEXT *pBMContext = (BM_CONTEXT*)hDevMemContext;
++
++ PVR_DPF ((PVR_DBG_VERBOSE, "BM_GetMMUContextFromMemContext"));
++
++ return pBMContext->psMMUContext;
++}
++
++IMG_HANDLE BM_GetMMUHeap(IMG_HANDLE hDevMemHeap)
++{
++ PVR_DPF((PVR_DBG_VERBOSE, "BM_GetMMUHeap"));
++
++ return (IMG_HANDLE)((BM_HEAP*)hDevMemHeap)->pMMUHeap;
++}
++
++
++PVRSRV_DEVICE_NODE* BM_GetDeviceNode(IMG_HANDLE hDevMemContext)
++{
++ PVR_DPF((PVR_DBG_VERBOSE, "BM_GetDeviceNode"));
++
++ return ((BM_CONTEXT*)hDevMemContext)->psDeviceNode;
++}
++
++
++IMG_HANDLE BM_GetMappingHandle(PVRSRV_KERNEL_MEM_INFO *psMemInfo)
++{
++ PVR_DPF((PVR_DBG_VERBOSE, "BM_GetMappingHandle"));
++
++ return ((BM_BUF*)psMemInfo->sMemBlk.hBuffer)->pMapping->hOSMemHandle;
++}
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/deviceclass.c b/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/deviceclass.c
+new file mode 100644
+index 0000000..3340dd8
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/deviceclass.c
+@@ -0,0 +1,1937 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "services_headers.h"
++#include "buffer_manager.h"
++#include "kernelbuffer.h"
++#include "pvr_bridge_km.h"
++
++#include "lists.h"
++DECLARE_LIST_ANY_VA(PVRSRV_DEVICE_NODE);
++DECLARE_LIST_FOR_EACH_VA(PVRSRV_DEVICE_NODE);
++DECLARE_LIST_INSERT(PVRSRV_DEVICE_NODE);
++DECLARE_LIST_REMOVE(PVRSRV_DEVICE_NODE);
++
++IMG_VOID* MatchDeviceKM_AnyVaCb(PVRSRV_DEVICE_NODE* psDeviceNode, va_list va);
++
++PVRSRV_ERROR AllocateDeviceID(SYS_DATA *psSysData, IMG_UINT32 *pui32DevID);
++PVRSRV_ERROR FreeDeviceID(SYS_DATA *psSysData, IMG_UINT32 ui32DevID);
++
++#if defined(SUPPORT_MISR_IN_THREAD)
++void OSVSyncMISR(IMG_HANDLE, IMG_BOOL);
++#endif
++
++
++typedef struct PVRSRV_DC_SRV2DISP_KMJTABLE_TAG *PPVRSRV_DC_SRV2DISP_KMJTABLE;
++
++typedef struct PVRSRV_DC_BUFFER_TAG
++{
++
++ PVRSRV_DEVICECLASS_BUFFER sDeviceClassBuffer;
++
++ struct PVRSRV_DISPLAYCLASS_INFO_TAG *psDCInfo;
++ struct PVRSRV_DC_SWAPCHAIN_TAG *psSwapChain;
++} PVRSRV_DC_BUFFER;
++
++typedef struct PVRSRV_DC_SWAPCHAIN_TAG
++{
++ IMG_HANDLE hExtSwapChain;
++ IMG_UINT32 ui32SwapChainID;
++ IMG_UINT32 ui32Flags;
++ IMG_UINT32 ui32RefCount;
++ PVRSRV_QUEUE_INFO *psQueue;
++ PVRSRV_DC_BUFFER asBuffer[PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS];
++ IMG_UINT32 ui32BufferCount;
++ PVRSRV_DC_BUFFER *psLastFlipBuffer;
++ struct PVRSRV_DC_SWAPCHAIN_TAG *psNext;
++ struct PVRSRV_DISPLAYCLASS_INFO_TAG *psDCInfo;
++ //IMG_HANDLE hResItem;
++} PVRSRV_DC_SWAPCHAIN;
++
++typedef struct PVRSRV_DC_SWAPCHAIN_REF_TAG
++{
++ struct PVRSRV_DC_SWAPCHAIN_TAG *psSwapChain;
++ IMG_HANDLE hResItem;
++} PVRSRV_DC_SWAPCHAIN_REF;
++
++
++typedef struct PVRSRV_DISPLAYCLASS_INFO_TAG
++{
++ IMG_UINT32 ui32RefCount;
++ IMG_UINT32 ui32DeviceID;
++ IMG_HANDLE hExtDevice;
++ PPVRSRV_DC_SRV2DISP_KMJTABLE psFuncTable;
++ IMG_HANDLE hDevMemContext;
++ PVRSRV_DC_BUFFER sSystemBuffer;
++ struct PVRSRV_DC_SWAPCHAIN_TAG *psDCSwapChainShared;
++} PVRSRV_DISPLAYCLASS_INFO;
++
++
++typedef struct PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO_TAG
++{
++ PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++ PRESMAN_ITEM hResItem;
++} PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO;
++
++
++typedef struct PVRSRV_BC_SRV2BUFFER_KMJTABLE_TAG *PPVRSRV_BC_SRV2BUFFER_KMJTABLE;
++
++typedef struct PVRSRV_BC_BUFFER_TAG
++{
++
++ PVRSRV_DEVICECLASS_BUFFER sDeviceClassBuffer;
++
++ struct PVRSRV_BUFFERCLASS_INFO_TAG *psBCInfo;
++} PVRSRV_BC_BUFFER;
++
++
++typedef struct PVRSRV_BUFFERCLASS_INFO_TAG
++{
++ IMG_UINT32 ui32RefCount;
++ IMG_UINT32 ui32DeviceID;
++ IMG_HANDLE hExtDevice;
++ PPVRSRV_BC_SRV2BUFFER_KMJTABLE psFuncTable;
++ IMG_HANDLE hDevMemContext;
++
++ IMG_UINT32 ui32BufferCount;
++ PVRSRV_BC_BUFFER *psBuffer;
++
++} PVRSRV_BUFFERCLASS_INFO;
++
++
++typedef struct PVRSRV_BUFFERCLASS_PERCONTEXT_INFO_TAG
++{
++ PVRSRV_BUFFERCLASS_INFO *psBCInfo;
++ IMG_HANDLE hResItem;
++} PVRSRV_BUFFERCLASS_PERCONTEXT_INFO;
++
++
++static PVRSRV_DISPLAYCLASS_INFO* DCDeviceHandleToDCInfo (IMG_HANDLE hDeviceKM)
++{
++ PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO *psDCPerContextInfo;
++
++ psDCPerContextInfo = (PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO *)hDeviceKM;
++
++ return psDCPerContextInfo->psDCInfo;
++}
++
++
++static PVRSRV_BUFFERCLASS_INFO* BCDeviceHandleToBCInfo (IMG_HANDLE hDeviceKM)
++{
++ PVRSRV_BUFFERCLASS_PERCONTEXT_INFO *psBCPerContextInfo;
++
++ psBCPerContextInfo = (PVRSRV_BUFFERCLASS_PERCONTEXT_INFO *)hDeviceKM;
++
++ return psBCPerContextInfo->psBCInfo;
++}
++
++IMG_VOID PVRSRVEnumerateDCKM_ForEachVaCb(PVRSRV_DEVICE_NODE *psDeviceNode, va_list va)
++{
++ IMG_UINT *pui32DevCount;
++ IMG_UINT32 **ppui32DevID;
++ PVRSRV_DEVICE_CLASS peDeviceClass;
++
++ pui32DevCount = va_arg(va, IMG_UINT*);
++ ppui32DevID = va_arg(va, IMG_UINT32**);
++ peDeviceClass = va_arg(va, PVRSRV_DEVICE_CLASS);
++
++ if ((psDeviceNode->sDevId.eDeviceClass == peDeviceClass)
++ && (psDeviceNode->sDevId.eDeviceType == PVRSRV_DEVICE_TYPE_EXT))
++ {
++ (*pui32DevCount)++;
++ if(*ppui32DevID)
++ {
++ *(*ppui32DevID)++ = psDeviceNode->sDevId.ui32DeviceIndex;
++ }
++ }
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVEnumerateDCKM (PVRSRV_DEVICE_CLASS DeviceClass,
++ IMG_UINT32 *pui32DevCount,
++ IMG_UINT32 *pui32DevID )
++{
++
++ IMG_UINT ui32DevCount = 0;
++ SYS_DATA *psSysData;
++
++ SysAcquireData(&psSysData);
++
++
++ List_PVRSRV_DEVICE_NODE_ForEach_va(psSysData->psDeviceNodeList,
++ PVRSRVEnumerateDCKM_ForEachVaCb,
++ &ui32DevCount,
++ &pui32DevID,
++ DeviceClass);
++
++ if(pui32DevCount)
++ {
++ *pui32DevCount = ui32DevCount;
++ }
++ else if(pui32DevID == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVEnumerateDCKM: Invalid parameters"));
++ return (PVRSRV_ERROR_INVALID_PARAMS);
++ }
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR PVRSRVRegisterDCDeviceKM (PVRSRV_DC_SRV2DISP_KMJTABLE *psFuncTable,
++ IMG_UINT32 *pui32DeviceID)
++{
++ PVRSRV_DISPLAYCLASS_INFO *psDCInfo = IMG_NULL;
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ SYS_DATA *psSysData;
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++ SysAcquireData(&psSysData);
++
++
++
++
++
++
++ if(OSAllocMem( PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(*psDCInfo),
++ (IMG_VOID **)&psDCInfo, IMG_NULL,
++ "Display Class Info") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterDCDeviceKM: Failed psDCInfo alloc"));
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++ OSMemSet (psDCInfo, 0, sizeof(*psDCInfo));
++
++
++ if(OSAllocMem( PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(PVRSRV_DC_SRV2DISP_KMJTABLE),
++ (IMG_VOID **)&psDCInfo->psFuncTable, IMG_NULL,
++ "Function table for SRVKM->DISPLAY") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterDCDeviceKM: Failed psFuncTable alloc"));
++ goto ErrorExit;
++ }
++ OSMemSet (psDCInfo->psFuncTable, 0, sizeof(PVRSRV_DC_SRV2DISP_KMJTABLE));
++
++
++ *psDCInfo->psFuncTable = *psFuncTable;
++
++
++ if(OSAllocMem( PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(PVRSRV_DEVICE_NODE),
++ (IMG_VOID **)&psDeviceNode, IMG_NULL,
++ "Device Node") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterDCDeviceKM: Failed psDeviceNode alloc"));
++ goto ErrorExit;
++ }
++ OSMemSet (psDeviceNode, 0, sizeof(PVRSRV_DEVICE_NODE));
++
++ psDeviceNode->pvDevice = (IMG_VOID*)psDCInfo;
++ psDeviceNode->ui32pvDeviceSize = sizeof(*psDCInfo);
++ psDeviceNode->ui32RefCount = 1;
++ psDeviceNode->sDevId.eDeviceType = PVRSRV_DEVICE_TYPE_EXT;
++ psDeviceNode->sDevId.eDeviceClass = PVRSRV_DEVICE_CLASS_DISPLAY;
++ psDeviceNode->psSysData = psSysData;
++
++
++ if (AllocateDeviceID(psSysData, &psDeviceNode->sDevId.ui32DeviceIndex) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterBCDeviceKM: Failed to allocate Device ID"));
++ goto ErrorExit;
++ }
++ psDCInfo->ui32DeviceID = psDeviceNode->sDevId.ui32DeviceIndex;
++ if (pui32DeviceID)
++ {
++ *pui32DeviceID = psDeviceNode->sDevId.ui32DeviceIndex;
++ }
++
++
++ SysRegisterExternalDevice(psDeviceNode);
++
++
++ List_PVRSRV_DEVICE_NODE_Insert(&psSysData->psDeviceNodeList, psDeviceNode);
++
++ return PVRSRV_OK;
++
++ErrorExit:
++
++ if(psDCInfo->psFuncTable)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_DC_SRV2DISP_KMJTABLE), psDCInfo->psFuncTable, IMG_NULL);
++ psDCInfo->psFuncTable = IMG_NULL;
++ }
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_DISPLAYCLASS_INFO), psDCInfo, IMG_NULL);
++
++
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++}
++
++PVRSRV_ERROR PVRSRVRemoveDCDeviceKM(IMG_UINT32 ui32DevIndex)
++{
++ SYS_DATA *psSysData;
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++
++ SysAcquireData(&psSysData);
++
++
++ psDeviceNode = (PVRSRV_DEVICE_NODE*)
++ List_PVRSRV_DEVICE_NODE_Any_va(psSysData->psDeviceNodeList,
++ MatchDeviceKM_AnyVaCb,
++ ui32DevIndex,
++ IMG_FALSE,
++ PVRSRV_DEVICE_CLASS_DISPLAY);
++ if (!psDeviceNode)
++ {
++
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRemoveDCDeviceKM: requested device %d not present", ui32DevIndex));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++
++ psDCInfo = (PVRSRV_DISPLAYCLASS_INFO*)psDeviceNode->pvDevice;
++
++
++
++
++ if(psDCInfo->ui32RefCount == 0)
++ {
++
++
++ List_PVRSRV_DEVICE_NODE_Remove(psDeviceNode);
++
++
++ SysRemoveExternalDevice(psDeviceNode);
++
++
++
++
++ PVR_ASSERT(psDCInfo->ui32RefCount == 0);
++ (IMG_VOID)FreeDeviceID(psSysData, ui32DevIndex);
++ (IMG_VOID)OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_DC_SRV2DISP_KMJTABLE), psDCInfo->psFuncTable, IMG_NULL);
++ psDCInfo->psFuncTable = IMG_NULL;
++ (IMG_VOID)OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_DISPLAYCLASS_INFO), psDCInfo, IMG_NULL);
++
++ (IMG_VOID)OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_DEVICE_NODE), psDeviceNode, IMG_NULL);
++
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRemoveDCDeviceKM: failed as %d Services DC API connections are still open", psDCInfo->ui32RefCount));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR PVRSRVRegisterBCDeviceKM (PVRSRV_BC_SRV2BUFFER_KMJTABLE *psFuncTable,
++ IMG_UINT32 *pui32DeviceID)
++{
++ PVRSRV_BUFFERCLASS_INFO *psBCInfo = IMG_NULL;
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ SYS_DATA *psSysData;
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++ SysAcquireData(&psSysData);
++
++
++
++
++
++ if(OSAllocMem( PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(*psBCInfo),
++ (IMG_VOID **)&psBCInfo, IMG_NULL,
++ "Buffer Class Info") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterBCDeviceKM: Failed psBCInfo alloc"));
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++ OSMemSet (psBCInfo, 0, sizeof(*psBCInfo));
++
++
++ if(OSAllocMem( PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(PVRSRV_BC_SRV2BUFFER_KMJTABLE),
++ (IMG_VOID **)&psBCInfo->psFuncTable, IMG_NULL,
++ "Function table for SRVKM->BUFFER") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterBCDeviceKM: Failed psFuncTable alloc"));
++ goto ErrorExit;
++ }
++ OSMemSet (psBCInfo->psFuncTable, 0, sizeof(PVRSRV_BC_SRV2BUFFER_KMJTABLE));
++
++
++ *psBCInfo->psFuncTable = *psFuncTable;
++
++
++ if(OSAllocMem( PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(PVRSRV_DEVICE_NODE),
++ (IMG_VOID **)&psDeviceNode, IMG_NULL,
++ "Device Node") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterBCDeviceKM: Failed psDeviceNode alloc"));
++ goto ErrorExit;
++ }
++ OSMemSet (psDeviceNode, 0, sizeof(PVRSRV_DEVICE_NODE));
++
++ psDeviceNode->pvDevice = (IMG_VOID*)psBCInfo;
++ psDeviceNode->ui32pvDeviceSize = sizeof(*psBCInfo);
++ psDeviceNode->ui32RefCount = 1;
++ psDeviceNode->sDevId.eDeviceType = PVRSRV_DEVICE_TYPE_EXT;
++ psDeviceNode->sDevId.eDeviceClass = PVRSRV_DEVICE_CLASS_BUFFER;
++ psDeviceNode->psSysData = psSysData;
++
++
++ if (AllocateDeviceID(psSysData, &psDeviceNode->sDevId.ui32DeviceIndex) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterBCDeviceKM: Failed to allocate Device ID"));
++ goto ErrorExit;
++ }
++ psBCInfo->ui32DeviceID = psDeviceNode->sDevId.ui32DeviceIndex;
++ if (pui32DeviceID)
++ {
++ *pui32DeviceID = psDeviceNode->sDevId.ui32DeviceIndex;
++ }
++
++
++ List_PVRSRV_DEVICE_NODE_Insert(&psSysData->psDeviceNodeList, psDeviceNode);
++
++ return PVRSRV_OK;
++
++ErrorExit:
++
++ if(psBCInfo->psFuncTable)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PPVRSRV_BC_SRV2BUFFER_KMJTABLE), psBCInfo->psFuncTable, IMG_NULL);
++ psBCInfo->psFuncTable = IMG_NULL;
++ }
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_BUFFERCLASS_INFO), psBCInfo, IMG_NULL);
++
++
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++}
++
++
++PVRSRV_ERROR PVRSRVRemoveBCDeviceKM(IMG_UINT32 ui32DevIndex)
++{
++ SYS_DATA *psSysData;
++ PVRSRV_DEVICE_NODE *psDevNode;
++ PVRSRV_BUFFERCLASS_INFO *psBCInfo;
++
++ SysAcquireData(&psSysData);
++
++
++ psDevNode = (PVRSRV_DEVICE_NODE*)
++ List_PVRSRV_DEVICE_NODE_Any_va(psSysData->psDeviceNodeList,
++ MatchDeviceKM_AnyVaCb,
++ ui32DevIndex,
++ IMG_FALSE,
++ PVRSRV_DEVICE_CLASS_BUFFER);
++
++ if (!psDevNode)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRemoveBCDeviceKM: requested device %d not present", ui32DevIndex));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++
++
++ psBCInfo = (PVRSRV_BUFFERCLASS_INFO*)psDevNode->pvDevice;
++
++
++
++
++ if(psBCInfo->ui32RefCount == 0)
++ {
++
++
++ List_PVRSRV_DEVICE_NODE_Remove(psDevNode);
++
++
++
++
++ (IMG_VOID)FreeDeviceID(psSysData, ui32DevIndex);
++
++
++ (IMG_VOID)OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_BC_SRV2BUFFER_KMJTABLE), psBCInfo->psFuncTable, IMG_NULL);
++ psBCInfo->psFuncTable = IMG_NULL;
++ (IMG_VOID)OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_BUFFERCLASS_INFO), psBCInfo, IMG_NULL);
++
++ (IMG_VOID)OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_DEVICE_NODE), psDevNode, IMG_NULL);
++
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRemoveBCDeviceKM: failed as %d Services BC API connections are still open", psBCInfo->ui32RefCount));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ return PVRSRV_OK;
++}
++
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVCloseDCDeviceKM (IMG_HANDLE hDeviceKM,
++ IMG_BOOL bResManCallback)
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO *psDCPerContextInfo;
++
++ PVR_UNREFERENCED_PARAMETER(bResManCallback);
++
++ psDCPerContextInfo = (PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO *)hDeviceKM;
++
++
++ eError = ResManFreeResByPtr(psDCPerContextInfo->hResItem);
++
++ return eError;
++}
++
++
++static PVRSRV_ERROR CloseDCDeviceCallBack(IMG_PVOID pvParam,
++ IMG_UINT32 ui32Param)
++{
++ PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO *psDCPerContextInfo;
++ PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++
++ PVR_UNREFERENCED_PARAMETER(ui32Param);
++
++ psDCPerContextInfo = (PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO *)pvParam;
++ psDCInfo = psDCPerContextInfo->psDCInfo;
++
++ psDCInfo->ui32RefCount--;
++ if(psDCInfo->ui32RefCount == 0)
++ {
++
++ psDCInfo->psFuncTable->pfnCloseDCDevice(psDCInfo->hExtDevice);
++
++ if (--psDCInfo->sSystemBuffer.sDeviceClassBuffer.psKernelSyncInfo->ui32RefCount == 0)
++ {
++ PVRSRVFreeSyncInfoKM(psDCInfo->sSystemBuffer.sDeviceClassBuffer.psKernelSyncInfo);
++ }
++
++ psDCInfo->hDevMemContext = IMG_NULL;
++ psDCInfo->hExtDevice = IMG_NULL;
++ }
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO), psDCPerContextInfo, IMG_NULL);
++
++
++ return PVRSRV_OK;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVOpenDCDeviceKM (PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_UINT32 ui32DeviceID,
++ IMG_HANDLE hDevCookie,
++ IMG_HANDLE *phDeviceKM)
++{
++ PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++ PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO *psDCPerContextInfo;
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ SYS_DATA *psSysData;
++ PVRSRV_ERROR eError;
++
++ if(!phDeviceKM || !hDevCookie)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenDCDeviceKM: Invalid params"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ SysAcquireData(&psSysData);
++
++
++ psDeviceNode = (PVRSRV_DEVICE_NODE*)
++ List_PVRSRV_DEVICE_NODE_Any_va(psSysData->psDeviceNodeList,
++ MatchDeviceKM_AnyVaCb,
++ ui32DeviceID,
++ IMG_FALSE,
++ PVRSRV_DEVICE_CLASS_DISPLAY);
++ if (!psDeviceNode)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenDCDeviceKM: no devnode matching index %d", ui32DeviceID));
++ return PVRSRV_ERROR_GENERIC;
++ }
++ psDCInfo = (PVRSRV_DISPLAYCLASS_INFO*)psDeviceNode->pvDevice;
++
++
++
++
++ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(*psDCPerContextInfo),
++ (IMG_VOID **)&psDCPerContextInfo, IMG_NULL,
++ "Display Class per Context Info") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenDCDeviceKM: Failed psDCPerContextInfo alloc"));
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++ OSMemSet(psDCPerContextInfo, 0, sizeof(*psDCPerContextInfo));
++
++ if(psDCInfo->ui32RefCount++ == 0)
++ {
++
++ psDeviceNode = (PVRSRV_DEVICE_NODE *)hDevCookie;
++
++
++ psDCInfo->hDevMemContext = (IMG_HANDLE)psDeviceNode->sDevMemoryInfo.pBMKernelContext;
++
++
++ eError = PVRSRVAllocSyncInfoKM(IMG_NULL,
++ (IMG_HANDLE)psDeviceNode->sDevMemoryInfo.pBMKernelContext,
++ &psDCInfo->sSystemBuffer.sDeviceClassBuffer.psKernelSyncInfo);
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenDCDeviceKM: Failed sync info alloc"));
++ psDCInfo->ui32RefCount--;
++ return eError;
++ }
++
++
++ eError = psDCInfo->psFuncTable->pfnOpenDCDevice(ui32DeviceID,
++ &psDCInfo->hExtDevice,
++ (PVRSRV_SYNC_DATA*)psDCInfo->sSystemBuffer.sDeviceClassBuffer.psKernelSyncInfo->psSyncDataMemInfoKM->pvLinAddrKM);
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenDCDeviceKM: Failed to open external DC device"));
++ psDCInfo->ui32RefCount--;
++ PVRSRVFreeSyncInfoKM(psDCInfo->sSystemBuffer.sDeviceClassBuffer.psKernelSyncInfo);
++ return eError;
++ }
++
++ psDCInfo->sSystemBuffer.sDeviceClassBuffer.psKernelSyncInfo->ui32RefCount++;
++ }
++
++ psDCPerContextInfo->psDCInfo = psDCInfo;
++ psDCPerContextInfo->hResItem = ResManRegisterRes(psPerProc->hResManContext,
++ RESMAN_TYPE_DISPLAYCLASS_DEVICE,
++ psDCPerContextInfo,
++ 0,
++ CloseDCDeviceCallBack);
++
++
++ *phDeviceKM = (IMG_HANDLE)psDCPerContextInfo;
++
++ return PVRSRV_OK;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVEnumDCFormatsKM (IMG_HANDLE hDeviceKM,
++ IMG_UINT32 *pui32Count,
++ DISPLAY_FORMAT *psFormat)
++{
++ PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++
++ if(!hDeviceKM || !pui32Count || !psFormat)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVEnumDCFormatsKM: Invalid parameters"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
++
++
++ return psDCInfo->psFuncTable->pfnEnumDCFormats(psDCInfo->hExtDevice, pui32Count, psFormat);
++}
++
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVEnumDCDimsKM (IMG_HANDLE hDeviceKM,
++ DISPLAY_FORMAT *psFormat,
++ IMG_UINT32 *pui32Count,
++ DISPLAY_DIMS *psDim)
++{
++ PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++
++ if(!hDeviceKM || !pui32Count || !psFormat)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVEnumDCDimsKM: Invalid parameters"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
++
++
++ return psDCInfo->psFuncTable->pfnEnumDCDims(psDCInfo->hExtDevice, psFormat, pui32Count, psDim);
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVGetDCSystemBufferKM (IMG_HANDLE hDeviceKM,
++ IMG_HANDLE *phBuffer)
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++ IMG_HANDLE hExtBuffer;
++
++ if(!hDeviceKM || !phBuffer)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetDCSystemBufferKM: Invalid parameters"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
++
++
++ eError = psDCInfo->psFuncTable->pfnGetDCSystemBuffer(psDCInfo->hExtDevice, &hExtBuffer);
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetDCSystemBufferKM: Failed to get valid buffer handle from external driver"));
++ return eError;
++ }
++
++
++ psDCInfo->sSystemBuffer.sDeviceClassBuffer.pfnGetBufferAddr = psDCInfo->psFuncTable->pfnGetBufferAddr;
++ psDCInfo->sSystemBuffer.sDeviceClassBuffer.hDevMemContext = psDCInfo->hDevMemContext;
++ psDCInfo->sSystemBuffer.sDeviceClassBuffer.hExtDevice = psDCInfo->hExtDevice;
++ psDCInfo->sSystemBuffer.sDeviceClassBuffer.hExtBuffer = hExtBuffer;
++
++ psDCInfo->sSystemBuffer.psDCInfo = psDCInfo;
++
++
++ *phBuffer = (IMG_HANDLE)&(psDCInfo->sSystemBuffer);
++
++ return PVRSRV_OK;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVGetDCInfoKM (IMG_HANDLE hDeviceKM,
++ DISPLAY_INFO *psDisplayInfo)
++{
++ PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++ PVRSRV_ERROR eError;
++
++ if(!hDeviceKM || !psDisplayInfo)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetDCInfoKM: Invalid parameters"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
++
++
++ eError = psDCInfo->psFuncTable->pfnGetDCInfo(psDCInfo->hExtDevice, psDisplayInfo);
++ if (eError != PVRSRV_OK)
++ {
++ return eError;
++ }
++
++ if (psDisplayInfo->ui32MaxSwapChainBuffers > PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS)
++ {
++ psDisplayInfo->ui32MaxSwapChainBuffers = PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS;
++ }
++
++ return PVRSRV_OK;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVDestroyDCSwapChainKM(IMG_HANDLE hSwapChainRef)
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_DC_SWAPCHAIN_REF *psSwapChainRef;
++
++ if(!hSwapChainRef)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVDestroyDCSwapChainKM: Invalid parameters"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psSwapChainRef = hSwapChainRef;
++
++ eError = ResManFreeResByPtr(psSwapChainRef->hResItem);
++
++ return eError;
++}
++
++
++static PVRSRV_ERROR DestroyDCSwapChain(PVRSRV_DC_SWAPCHAIN *psSwapChain)
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_DISPLAYCLASS_INFO *psDCInfo = psSwapChain->psDCInfo;
++ IMG_UINT32 i;
++
++
++
++ if( psDCInfo->psDCSwapChainShared )
++ {
++ if( psDCInfo->psDCSwapChainShared == psSwapChain )
++ {
++ psDCInfo->psDCSwapChainShared = psSwapChain->psNext;
++ }
++ else
++ {
++ PVRSRV_DC_SWAPCHAIN *psCurrentSwapChain;
++ psCurrentSwapChain = psDCInfo->psDCSwapChainShared;
++ while( psCurrentSwapChain->psNext )
++ {
++ if( psCurrentSwapChain->psNext != psSwapChain )
++ {
++ psCurrentSwapChain = psCurrentSwapChain->psNext;
++ continue;
++ }
++ psCurrentSwapChain->psNext = psSwapChain->psNext;
++ break;
++ }
++ }
++ }
++
++
++ PVRSRVDestroyCommandQueueKM(psSwapChain->psQueue);
++
++
++ eError = psDCInfo->psFuncTable->pfnDestroyDCSwapChain(psDCInfo->hExtDevice,
++ psSwapChain->hExtSwapChain);
++
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"DestroyDCSwapChainCallBack: Failed to destroy DC swap chain"));
++ return eError;
++ }
++
++
++ for(i=0; i<psSwapChain->ui32BufferCount; i++)
++ {
++ if(psSwapChain->asBuffer[i].sDeviceClassBuffer.psKernelSyncInfo)
++ {
++ if (--psSwapChain->asBuffer[i].sDeviceClassBuffer.psKernelSyncInfo->ui32RefCount == 0)
++ {
++ PVRSRVFreeSyncInfoKM(psSwapChain->asBuffer[i].sDeviceClassBuffer.psKernelSyncInfo);
++ }
++ }
++ }
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_DC_SWAPCHAIN), psSwapChain, IMG_NULL);
++
++
++ return eError;
++}
++
++
++static PVRSRV_ERROR DestroyDCSwapChainRefCallBack(IMG_PVOID pvParam, IMG_UINT32 ui32Param)
++{
++ PVRSRV_DC_SWAPCHAIN_REF *psSwapChainRef = (PVRSRV_DC_SWAPCHAIN_REF *) pvParam;
++ PVRSRV_ERROR eError = PVRSRV_OK;
++
++ PVR_UNREFERENCED_PARAMETER(ui32Param);
++
++ if(--psSwapChainRef->psSwapChain->ui32RefCount == 0)
++ {
++ eError = DestroyDCSwapChain(psSwapChainRef->psSwapChain);
++ }
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_DC_SWAPCHAIN_REF), psSwapChainRef, IMG_NULL);
++ return eError;
++}
++
++static PVRSRV_DC_SWAPCHAIN* PVRSRVFindSharedDCSwapChainKM(PVRSRV_DISPLAYCLASS_INFO *psDCInfo,
++ IMG_UINT32 ui32SwapChainID)
++{
++ PVRSRV_DC_SWAPCHAIN *psCurrentSwapChain;
++
++ for(psCurrentSwapChain = psDCInfo->psDCSwapChainShared;
++ psCurrentSwapChain;
++ psCurrentSwapChain = psCurrentSwapChain->psNext)
++ {
++ if(psCurrentSwapChain->ui32SwapChainID == ui32SwapChainID)
++ return psCurrentSwapChain;
++ }
++ return IMG_NULL;
++}
++
++static PVRSRV_ERROR PVRSRVCreateDCSwapChainRefKM(PVRSRV_PER_PROCESS_DATA *psPerProc,
++ PVRSRV_DC_SWAPCHAIN *psSwapChain,
++ PVRSRV_DC_SWAPCHAIN_REF **ppsSwapChainRef)
++{
++ PVRSRV_DC_SWAPCHAIN_REF *psSwapChainRef = IMG_NULL;
++
++
++ if(OSAllocMem( PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(PVRSRV_DC_SWAPCHAIN_REF),
++ (IMG_VOID **)&psSwapChainRef, IMG_NULL,
++ "Display Class Swapchain Reference") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDCSwapChainRefKM: Failed psSwapChainRef alloc"));
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++ OSMemSet (psSwapChainRef, 0, sizeof(PVRSRV_DC_SWAPCHAIN_REF));
++
++
++ psSwapChain->ui32RefCount++;
++
++
++ psSwapChainRef->psSwapChain = psSwapChain;
++ psSwapChainRef->hResItem = ResManRegisterRes(psPerProc->hResManContext,
++ RESMAN_TYPE_DISPLAYCLASS_SWAPCHAIN_REF,
++ psSwapChainRef,
++ 0,
++ &DestroyDCSwapChainRefCallBack);
++ *ppsSwapChainRef = psSwapChainRef;
++
++ return PVRSRV_OK;
++}
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVCreateDCSwapChainKM (PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_HANDLE hDeviceKM,
++ IMG_UINT32 ui32Flags,
++ DISPLAY_SURF_ATTRIBUTES *psDstSurfAttrib,
++ DISPLAY_SURF_ATTRIBUTES *psSrcSurfAttrib,
++ IMG_UINT32 ui32BufferCount,
++ IMG_UINT32 ui32OEMFlags,
++ IMG_HANDLE *phSwapChainRef,
++ IMG_UINT32 *pui32SwapChainID)
++{
++ PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++ PVRSRV_DC_SWAPCHAIN *psSwapChain = IMG_NULL;
++ PVRSRV_DC_SWAPCHAIN_REF *psSwapChainRef = IMG_NULL;
++ PVRSRV_SYNC_DATA *apsSyncData[PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS];
++ PVRSRV_QUEUE_INFO *psQueue = IMG_NULL;
++ PVRSRV_ERROR eError;
++ IMG_UINT32 i;
++ DISPLAY_INFO sDisplayInfo;
++
++
++ if(!hDeviceKM
++ || !psDstSurfAttrib
++ || !psSrcSurfAttrib
++ || !phSwapChainRef
++ || !pui32SwapChainID)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDCSwapChainKM: Invalid parameters"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ if (ui32BufferCount > PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDCSwapChainKM: Too many buffers"));
++ return PVRSRV_ERROR_TOOMANYBUFFERS;
++ }
++
++ if (ui32BufferCount < 2)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDCSwapChainKM: Too few buffers"));
++ return PVRSRV_ERROR_TOO_FEW_BUFFERS;
++ }
++
++ psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
++
++ if( ui32Flags & PVRSRV_CREATE_SWAPCHAIN_QUERY )
++ {
++
++ psSwapChain = PVRSRVFindSharedDCSwapChainKM(psDCInfo, *pui32SwapChainID );
++ if( psSwapChain )
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDCSwapChainKM: found query"));
++
++ eError = PVRSRVCreateDCSwapChainRefKM(psPerProc,
++ psSwapChain,
++ &psSwapChainRef);
++ if( eError != PVRSRV_OK )
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDCSwapChainKM: Couldn't create swap chain reference"));
++ return eError;
++ }
++
++ *phSwapChainRef = (IMG_HANDLE)psSwapChainRef;
++ return PVRSRV_OK;
++ }
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDCSwapChainKM: No shared SwapChain found for query"));
++ return PVRSRV_ERROR_FLIP_CHAIN_EXISTS;
++ }
++
++
++ if(OSAllocMem( PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(PVRSRV_DC_SWAPCHAIN),
++ (IMG_VOID **)&psSwapChain, IMG_NULL,
++ "Display Class Swapchain") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDCSwapChainKM: Failed psSwapChain alloc"));
++ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
++ goto ErrorExit;
++ }
++ OSMemSet (psSwapChain, 0, sizeof(PVRSRV_DC_SWAPCHAIN));
++
++
++ eError = PVRSRVCreateCommandQueueKM(1024, &psQueue);
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDCSwapChainKM: Failed to create CmdQueue"));
++ goto ErrorExit;
++ }
++
++
++ psSwapChain->psQueue = psQueue;
++
++
++ for(i=0; i<ui32BufferCount; i++)
++ {
++ eError = PVRSRVAllocSyncInfoKM(IMG_NULL,
++ psDCInfo->hDevMemContext,
++ &psSwapChain->asBuffer[i].sDeviceClassBuffer.psKernelSyncInfo);
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDCSwapChainKM: Failed to alloc syninfo for psSwapChain"));
++ goto ErrorExit;
++ }
++
++ psSwapChain->asBuffer[i].sDeviceClassBuffer.psKernelSyncInfo->ui32RefCount++;
++
++
++ psSwapChain->asBuffer[i].sDeviceClassBuffer.pfnGetBufferAddr = psDCInfo->psFuncTable->pfnGetBufferAddr;
++ psSwapChain->asBuffer[i].sDeviceClassBuffer.hDevMemContext = psDCInfo->hDevMemContext;
++ psSwapChain->asBuffer[i].sDeviceClassBuffer.hExtDevice = psDCInfo->hExtDevice;
++
++
++ psSwapChain->asBuffer[i].psDCInfo = psDCInfo;
++ psSwapChain->asBuffer[i].psSwapChain = psSwapChain;
++
++
++ apsSyncData[i] = (PVRSRV_SYNC_DATA*)psSwapChain->asBuffer[i].sDeviceClassBuffer.psKernelSyncInfo->psSyncDataMemInfoKM->pvLinAddrKM;
++ }
++
++ psSwapChain->ui32BufferCount = ui32BufferCount;
++ psSwapChain->psDCInfo = psDCInfo;
++
++ eError = psDCInfo->psFuncTable->pfnGetDCInfo(psDCInfo->hExtDevice, &sDisplayInfo);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDCSwapChainKM: Failed to get DC info"));
++ return eError;
++ }
++
++
++ eError = psDCInfo->psFuncTable->pfnCreateDCSwapChain(psDCInfo->hExtDevice,
++ ui32Flags,
++ psDstSurfAttrib,
++ psSrcSurfAttrib,
++ ui32BufferCount,
++ apsSyncData,
++ ui32OEMFlags,
++ &psSwapChain->hExtSwapChain,
++ &psSwapChain->ui32SwapChainID);
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDCSwapChainKM: Failed to create 3rd party SwapChain"));
++ goto ErrorExit;
++ }
++
++
++ eError = PVRSRVCreateDCSwapChainRefKM(psPerProc,
++ psSwapChain,
++ &psSwapChainRef);
++ if( eError != PVRSRV_OK )
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDCSwapChainKM: Couldn't create swap chain reference"));
++ goto ErrorExit;
++ }
++
++ psSwapChain->ui32RefCount = 1;
++ psSwapChain->ui32Flags = ui32Flags;
++
++
++ if( ui32Flags & PVRSRV_CREATE_SWAPCHAIN_SHARED )
++ {
++ if(! psDCInfo->psDCSwapChainShared )
++ {
++ psDCInfo->psDCSwapChainShared = psSwapChain;
++ }
++ else
++ {
++ PVRSRV_DC_SWAPCHAIN *psOldHead = psDCInfo->psDCSwapChainShared;
++ psDCInfo->psDCSwapChainShared = psSwapChain;
++ psSwapChain->psNext = psOldHead;
++ }
++ }
++
++
++ *pui32SwapChainID = psSwapChain->ui32SwapChainID;
++
++
++ *phSwapChainRef= (IMG_HANDLE)psSwapChainRef;
++
++ return eError;
++
++ErrorExit:
++
++ for(i=0; i<ui32BufferCount; i++)
++ {
++ if(psSwapChain->asBuffer[i].sDeviceClassBuffer.psKernelSyncInfo)
++ {
++ if (--psSwapChain->asBuffer[i].sDeviceClassBuffer.psKernelSyncInfo->ui32RefCount == 0)
++ {
++ PVRSRVFreeSyncInfoKM(psSwapChain->asBuffer[i].sDeviceClassBuffer.psKernelSyncInfo);
++ }
++ }
++ }
++
++ if(psQueue)
++ {
++ PVRSRVDestroyCommandQueueKM(psQueue);
++ }
++
++ if(psSwapChain)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_DC_SWAPCHAIN), psSwapChain, IMG_NULL);
++
++ }
++
++ return eError;
++}
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVSetDCDstRectKM(IMG_HANDLE hDeviceKM,
++ IMG_HANDLE hSwapChainRef,
++ IMG_RECT *psRect)
++{
++ PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++ PVRSRV_DC_SWAPCHAIN *psSwapChain;
++
++ if(!hDeviceKM || !hSwapChainRef)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVSetDCDstRectKM: Invalid parameters"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
++ psSwapChain = ((PVRSRV_DC_SWAPCHAIN_REF*)hSwapChainRef)->psSwapChain;
++
++ return psDCInfo->psFuncTable->pfnSetDCDstRect(psDCInfo->hExtDevice,
++ psSwapChain->hExtSwapChain,
++ psRect);
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVSetDCSrcRectKM(IMG_HANDLE hDeviceKM,
++ IMG_HANDLE hSwapChainRef,
++ IMG_RECT *psRect)
++{
++ PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++ PVRSRV_DC_SWAPCHAIN *psSwapChain;
++
++ if(!hDeviceKM || !hSwapChainRef)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVSetDCSrcRectKM: Invalid parameters"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
++ psSwapChain = ((PVRSRV_DC_SWAPCHAIN_REF*)hSwapChainRef)->psSwapChain;
++
++ return psDCInfo->psFuncTable->pfnSetDCSrcRect(psDCInfo->hExtDevice,
++ psSwapChain->hExtSwapChain,
++ psRect);
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVSetDCDstColourKeyKM(IMG_HANDLE hDeviceKM,
++ IMG_HANDLE hSwapChainRef,
++ IMG_UINT32 ui32CKColour)
++{
++ PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++ PVRSRV_DC_SWAPCHAIN *psSwapChain;
++
++ if(!hDeviceKM || !hSwapChainRef)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVSetDCDstColourKeyKM: Invalid parameters"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
++ psSwapChain = ((PVRSRV_DC_SWAPCHAIN_REF*)hSwapChainRef)->psSwapChain;
++
++ return psDCInfo->psFuncTable->pfnSetDCDstColourKey(psDCInfo->hExtDevice,
++ psSwapChain->hExtSwapChain,
++ ui32CKColour);
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVSetDCSrcColourKeyKM(IMG_HANDLE hDeviceKM,
++ IMG_HANDLE hSwapChainRef,
++ IMG_UINT32 ui32CKColour)
++{
++ PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++ PVRSRV_DC_SWAPCHAIN *psSwapChain;
++
++ if(!hDeviceKM || !hSwapChainRef)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVSetDCSrcColourKeyKM: Invalid parameters"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
++ psSwapChain = ((PVRSRV_DC_SWAPCHAIN_REF*)hSwapChainRef)->psSwapChain;
++
++ return psDCInfo->psFuncTable->pfnSetDCSrcColourKey(psDCInfo->hExtDevice,
++ psSwapChain->hExtSwapChain,
++ ui32CKColour);
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVGetDCBuffersKM(IMG_HANDLE hDeviceKM,
++ IMG_HANDLE hSwapChainRef,
++ IMG_UINT32 *pui32BufferCount,
++ IMG_HANDLE *phBuffer)
++{
++ PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++ PVRSRV_DC_SWAPCHAIN *psSwapChain;
++ IMG_HANDLE ahExtBuffer[PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS];
++ PVRSRV_ERROR eError;
++ IMG_UINT32 i;
++
++ if(!hDeviceKM || !hSwapChainRef || !phBuffer)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetDCBuffersKM: Invalid parameters"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
++ psSwapChain = ((PVRSRV_DC_SWAPCHAIN_REF*)hSwapChainRef)->psSwapChain;
++
++
++ eError = psDCInfo->psFuncTable->pfnGetDCBuffers(psDCInfo->hExtDevice,
++ psSwapChain->hExtSwapChain,
++ pui32BufferCount,
++ ahExtBuffer);
++
++ PVR_ASSERT(*pui32BufferCount <= PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS);
++
++
++
++
++ for(i=0; i<*pui32BufferCount; i++)
++ {
++ psSwapChain->asBuffer[i].sDeviceClassBuffer.hExtBuffer = ahExtBuffer[i];
++ phBuffer[i] = (IMG_HANDLE)&psSwapChain->asBuffer[i];
++ }
++
++ return eError;
++}
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVSwapToDCBufferKM(IMG_HANDLE hDeviceKM,
++ IMG_HANDLE hBuffer,
++ IMG_UINT32 ui32SwapInterval,
++ IMG_HANDLE hPrivateTag,
++ IMG_UINT32 ui32ClipRectCount,
++ IMG_RECT *psClipRect)
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++ PVRSRV_DC_BUFFER *psBuffer;
++ PVRSRV_QUEUE_INFO *psQueue;
++ DISPLAYCLASS_FLIP_COMMAND *psFlipCmd;
++ IMG_UINT32 i;
++ IMG_UINT32 ui32NumSrcSyncs = 1;
++ PVRSRV_KERNEL_SYNC_INFO *apsSrcSync[2];
++ PVRSRV_COMMAND *psCommand;
++
++ if(!hDeviceKM || !hBuffer || !psClipRect)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCBufferKM: Invalid parameters"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++#if defined(SUPPORT_LMA)
++ eError = PVRSRVPowerLock(KERNEL_ID, IMG_FALSE);
++ if(eError != PVRSRV_OK)
++ {
++ return eError;
++ }
++#endif
++
++ psBuffer = (PVRSRV_DC_BUFFER*)hBuffer;
++ psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
++
++
++ psQueue = psBuffer->psSwapChain->psQueue;
++
++
++ apsSrcSync[0] = psBuffer->sDeviceClassBuffer.psKernelSyncInfo;
++
++
++
++ if(psBuffer->psSwapChain->psLastFlipBuffer &&
++ psBuffer != psBuffer->psSwapChain->psLastFlipBuffer)
++ {
++ apsSrcSync[1] = psBuffer->psSwapChain->psLastFlipBuffer->sDeviceClassBuffer.psKernelSyncInfo;
++
++
++
++ ui32NumSrcSyncs++;
++ }
++
++
++ eError = PVRSRVInsertCommandKM (psQueue,
++ &psCommand,
++ psDCInfo->ui32DeviceID,
++ DC_FLIP_COMMAND,
++ 0,
++ IMG_NULL,
++ ui32NumSrcSyncs,
++ apsSrcSync,
++ sizeof(DISPLAYCLASS_FLIP_COMMAND) + (sizeof(IMG_RECT) * ui32ClipRectCount));
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCBufferKM: Failed to get space in queue"));
++ goto Exit;
++ }
++
++
++ psFlipCmd = (DISPLAYCLASS_FLIP_COMMAND*)psCommand->pvData;
++
++
++ psFlipCmd->hExtDevice = psDCInfo->hExtDevice;
++
++
++ psFlipCmd->hExtSwapChain = psBuffer->psSwapChain->hExtSwapChain;
++
++
++ psFlipCmd->hExtBuffer = psBuffer->sDeviceClassBuffer.hExtBuffer;
++
++
++ psFlipCmd->hPrivateTag = hPrivateTag;
++
++
++ psFlipCmd->ui32ClipRectCount = ui32ClipRectCount;
++
++ psFlipCmd->psClipRect = (IMG_RECT*)((IMG_UINT8*)psFlipCmd + sizeof(DISPLAYCLASS_FLIP_COMMAND));
++
++ for(i=0; i<ui32ClipRectCount; i++)
++ {
++ psFlipCmd->psClipRect[i] = psClipRect[i];
++ }
++
++
++ psFlipCmd->ui32SwapInterval = ui32SwapInterval;
++
++
++ eError = PVRSRVSubmitCommandKM (psQueue, psCommand);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCBufferKM: Failed to submit command"));
++ goto Exit;
++ }
++
++
++
++
++
++
++
++
++
++ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
++ {
++ if(PVRSRVProcessQueues(KERNEL_ID, IMG_FALSE) != PVRSRV_ERROR_PROCESSING_BLOCKED)
++ {
++ goto ProcessedQueues;
++ }
++ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
++ } END_LOOP_UNTIL_TIMEOUT();
++
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCBufferKM: Failed to process queues"));
++
++ eError = PVRSRV_ERROR_GENERIC;
++ goto Exit;
++
++ProcessedQueues:
++
++ psBuffer->psSwapChain->psLastFlipBuffer = psBuffer;
++
++Exit:
++
++ if(eError == PVRSRV_ERROR_CANNOT_GET_QUEUE_SPACE)
++ {
++ eError = PVRSRV_ERROR_RETRY;
++ }
++
++#if defined(SUPPORT_LMA)
++ PVRSRVPowerUnlock(KERNEL_ID);
++#endif
++ return eError;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVSwapToDCSystemKM(IMG_HANDLE hDeviceKM,
++ IMG_HANDLE hSwapChainRef)
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_QUEUE_INFO *psQueue;
++ PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++ PVRSRV_DC_SWAPCHAIN *psSwapChain;
++ PVRSRV_DC_SWAPCHAIN_REF *psSwapChainRef;
++ DISPLAYCLASS_FLIP_COMMAND *psFlipCmd;
++ IMG_UINT32 ui32NumSrcSyncs = 1;
++ PVRSRV_KERNEL_SYNC_INFO *apsSrcSync[2];
++ PVRSRV_COMMAND *psCommand;
++
++ if(!hDeviceKM || !hSwapChainRef)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCSystemKM: Invalid parameters"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++#if defined(SUPPORT_LMA)
++ eError = PVRSRVPowerLock(KERNEL_ID, IMG_FALSE);
++ if(eError != PVRSRV_OK)
++ {
++ return eError;
++ }
++#endif
++
++ psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
++ psSwapChainRef = (PVRSRV_DC_SWAPCHAIN_REF*)hSwapChainRef;
++ psSwapChain = psSwapChainRef->psSwapChain;
++
++
++ psQueue = psSwapChain->psQueue;
++
++
++ apsSrcSync[0] = psDCInfo->sSystemBuffer.sDeviceClassBuffer.psKernelSyncInfo;
++
++
++
++ if(psSwapChain->psLastFlipBuffer)
++ {
++
++ if (apsSrcSync[0] != psSwapChain->psLastFlipBuffer->sDeviceClassBuffer.psKernelSyncInfo)
++ {
++ apsSrcSync[1] = psSwapChain->psLastFlipBuffer->sDeviceClassBuffer.psKernelSyncInfo;
++
++
++
++ ui32NumSrcSyncs++;
++ }
++ }
++
++
++ eError = PVRSRVInsertCommandKM (psQueue,
++ &psCommand,
++ psDCInfo->ui32DeviceID,
++ DC_FLIP_COMMAND,
++ 0,
++ IMG_NULL,
++ ui32NumSrcSyncs,
++ apsSrcSync,
++ sizeof(DISPLAYCLASS_FLIP_COMMAND));
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCSystemKM: Failed to get space in queue"));
++ goto Exit;
++ }
++
++
++ psFlipCmd = (DISPLAYCLASS_FLIP_COMMAND*)psCommand->pvData;
++
++
++ psFlipCmd->hExtDevice = psDCInfo->hExtDevice;
++
++
++ psFlipCmd->hExtSwapChain = psSwapChain->hExtSwapChain;
++
++
++ psFlipCmd->hExtBuffer = psDCInfo->sSystemBuffer.sDeviceClassBuffer.hExtBuffer;
++
++
++ psFlipCmd->hPrivateTag = IMG_NULL;
++
++
++ psFlipCmd->ui32ClipRectCount = 0;
++
++ psFlipCmd->ui32SwapInterval = 1;
++
++
++ eError = PVRSRVSubmitCommandKM (psQueue, psCommand);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCSystemKM: Failed to submit command"));
++ goto Exit;
++ }
++
++
++
++
++
++
++
++
++ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
++ {
++ if(PVRSRVProcessQueues(KERNEL_ID, IMG_FALSE) != PVRSRV_ERROR_PROCESSING_BLOCKED)
++ {
++ goto ProcessedQueues;
++ }
++
++ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
++ } END_LOOP_UNTIL_TIMEOUT();
++
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCSystemKM: Failed to process queues"));
++ eError = PVRSRV_ERROR_GENERIC;
++ goto Exit;
++
++ProcessedQueues:
++
++ psSwapChain->psLastFlipBuffer = &psDCInfo->sSystemBuffer;
++
++ eError = PVRSRV_OK;
++
++Exit:
++
++ if(eError == PVRSRV_ERROR_CANNOT_GET_QUEUE_SPACE)
++ {
++ eError = PVRSRV_ERROR_RETRY;
++ }
++
++#if defined(SUPPORT_LMA)
++ PVRSRVPowerUnlock(KERNEL_ID);
++#endif
++ return eError;
++}
++
++
++PVRSRV_ERROR PVRSRVRegisterSystemISRHandler (PFN_ISR_HANDLER pfnISRHandler,
++ IMG_VOID *pvISRHandlerData,
++ IMG_UINT32 ui32ISRSourceMask,
++ IMG_UINT32 ui32DeviceID)
++{
++ SYS_DATA *psSysData;
++ PVRSRV_DEVICE_NODE *psDevNode;
++
++ PVR_UNREFERENCED_PARAMETER(ui32ISRSourceMask);
++
++ SysAcquireData(&psSysData);
++
++
++ psDevNode = (PVRSRV_DEVICE_NODE*)
++ List_PVRSRV_DEVICE_NODE_Any_va(psSysData->psDeviceNodeList,
++ MatchDeviceKM_AnyVaCb,
++ ui32DeviceID,
++ IMG_TRUE);
++
++ if (psDevNode == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterSystemISRHandler: Failed to get psDevNode"));
++ PVR_DBG_BREAK;
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++
++ psDevNode->pvISRData = (IMG_VOID*) pvISRHandlerData;
++
++
++ psDevNode->pfnDeviceISR = pfnISRHandler;
++
++ return PVRSRV_OK;
++}
++
++IMG_VOID PVRSRVSetDCState_ForEachVaCb(PVRSRV_DEVICE_NODE *psDeviceNode, va_list va)
++{
++ PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++ IMG_UINT32 ui32State;
++ ui32State = va_arg(va, IMG_UINT32);
++
++ if (psDeviceNode->sDevId.eDeviceClass == PVRSRV_DEVICE_CLASS_DISPLAY)
++ {
++ psDCInfo = (PVRSRV_DISPLAYCLASS_INFO *)psDeviceNode->pvDevice;
++ if (psDCInfo->psFuncTable->pfnSetDCState && psDCInfo->hExtDevice)
++ {
++ psDCInfo->psFuncTable->pfnSetDCState(psDCInfo->hExtDevice, ui32State);
++ }
++ }
++}
++
++
++IMG_VOID IMG_CALLCONV PVRSRVSetDCState(IMG_UINT32 ui32State)
++{
++ SYS_DATA *psSysData;
++
++ SysAcquireData(&psSysData);
++
++ List_PVRSRV_DEVICE_NODE_ForEach_va(psSysData->psDeviceNodeList,
++ PVRSRVSetDCState_ForEachVaCb,
++ ui32State);
++}
++
++
++IMG_EXPORT
++IMG_BOOL PVRGetDisplayClassJTable(PVRSRV_DC_DISP2SRV_KMJTABLE *psJTable)
++{
++ psJTable->ui32TableSize = sizeof(PVRSRV_DC_DISP2SRV_KMJTABLE);
++ psJTable->pfnPVRSRVRegisterDCDevice = PVRSRVRegisterDCDeviceKM;
++ psJTable->pfnPVRSRVRemoveDCDevice = PVRSRVRemoveDCDeviceKM;
++ psJTable->pfnPVRSRVOEMFunction = SysOEMFunction;
++ psJTable->pfnPVRSRVRegisterCmdProcList = PVRSRVRegisterCmdProcListKM;
++ psJTable->pfnPVRSRVRemoveCmdProcList = PVRSRVRemoveCmdProcListKM;
++#if defined(SUPPORT_MISR_IN_THREAD)
++ psJTable->pfnPVRSRVCmdComplete = OSVSyncMISR;
++#else
++ psJTable->pfnPVRSRVCmdComplete = PVRSRVCommandCompleteKM;
++#endif
++ psJTable->pfnPVRSRVRegisterSystemISRHandler = PVRSRVRegisterSystemISRHandler;
++ psJTable->pfnPVRSRVRegisterPowerDevice = PVRSRVRegisterPowerDevice;
++
++ return IMG_TRUE;
++}
++
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVCloseBCDeviceKM (IMG_HANDLE hDeviceKM,
++ IMG_BOOL bResManCallback)
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_BUFFERCLASS_PERCONTEXT_INFO *psBCPerContextInfo;
++
++ PVR_UNREFERENCED_PARAMETER(bResManCallback);
++
++ psBCPerContextInfo = (PVRSRV_BUFFERCLASS_PERCONTEXT_INFO *)hDeviceKM;
++
++
++ eError = ResManFreeResByPtr(psBCPerContextInfo->hResItem);
++
++ return eError;
++}
++
++
++static PVRSRV_ERROR CloseBCDeviceCallBack(IMG_PVOID pvParam,
++ IMG_UINT32 ui32Param)
++{
++ PVRSRV_BUFFERCLASS_PERCONTEXT_INFO *psBCPerContextInfo;
++ PVRSRV_BUFFERCLASS_INFO *psBCInfo;
++
++ PVR_UNREFERENCED_PARAMETER(ui32Param);
++
++ psBCPerContextInfo = (PVRSRV_BUFFERCLASS_PERCONTEXT_INFO *)pvParam;
++ psBCInfo = psBCPerContextInfo->psBCInfo;
++
++ psBCInfo->ui32RefCount--;
++ if(psBCInfo->ui32RefCount == 0)
++ {
++ IMG_UINT32 i;
++
++
++ psBCInfo->psFuncTable->pfnCloseBCDevice(psBCInfo->hExtDevice);
++
++
++ for(i=0; i<psBCInfo->ui32BufferCount; i++)
++ {
++ if(psBCInfo->psBuffer[i].sDeviceClassBuffer.psKernelSyncInfo)
++ {
++ if (--psBCInfo->psBuffer[i].sDeviceClassBuffer.psKernelSyncInfo->ui32RefCount == 0)
++ {
++ PVRSRVFreeSyncInfoKM(psBCInfo->psBuffer[i].sDeviceClassBuffer.psKernelSyncInfo);
++ }
++ }
++ }
++
++
++ if(psBCInfo->psBuffer)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_BC_BUFFER), psBCInfo->psBuffer, IMG_NULL);
++ psBCInfo->psBuffer = IMG_NULL;
++ }
++ }
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_BUFFERCLASS_PERCONTEXT_INFO), psBCPerContextInfo, IMG_NULL);
++
++
++ return PVRSRV_OK;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVOpenBCDeviceKM (PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_UINT32 ui32DeviceID,
++ IMG_HANDLE hDevCookie,
++ IMG_HANDLE *phDeviceKM)
++{
++ PVRSRV_BUFFERCLASS_INFO *psBCInfo;
++ PVRSRV_BUFFERCLASS_PERCONTEXT_INFO *psBCPerContextInfo;
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ SYS_DATA *psSysData;
++ IMG_UINT32 i;
++ PVRSRV_ERROR eError;
++
++ if(!phDeviceKM || !hDevCookie)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenBCDeviceKM: Invalid params"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ SysAcquireData(&psSysData);
++
++
++ psDeviceNode = (PVRSRV_DEVICE_NODE*)
++ List_PVRSRV_DEVICE_NODE_Any_va(psSysData->psDeviceNodeList,
++ MatchDeviceKM_AnyVaCb,
++ ui32DeviceID,
++ IMG_FALSE,
++ PVRSRV_DEVICE_CLASS_BUFFER);
++ if (!psDeviceNode)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenBCDeviceKM: No devnode matching index %d", ui32DeviceID));
++ return PVRSRV_ERROR_GENERIC;
++ }
++ psBCInfo = (PVRSRV_BUFFERCLASS_INFO*)psDeviceNode->pvDevice;
++
++
++
++
++ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(*psBCPerContextInfo),
++ (IMG_VOID **)&psBCPerContextInfo, IMG_NULL,
++ "Buffer Class per Context Info") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenBCDeviceKM: Failed psBCPerContextInfo alloc"));
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++ OSMemSet(psBCPerContextInfo, 0, sizeof(*psBCPerContextInfo));
++
++ if(psBCInfo->ui32RefCount++ == 0)
++ {
++ BUFFER_INFO sBufferInfo;
++
++ psDeviceNode = (PVRSRV_DEVICE_NODE *)hDevCookie;
++
++
++ psBCInfo->hDevMemContext = (IMG_HANDLE)psDeviceNode->sDevMemoryInfo.pBMKernelContext;
++
++
++ eError = psBCInfo->psFuncTable->pfnOpenBCDevice(&psBCInfo->hExtDevice);
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenBCDeviceKM: Failed to open external BC device"));
++ return eError;
++ }
++
++
++ eError = psBCInfo->psFuncTable->pfnGetBCInfo(psBCInfo->hExtDevice, &sBufferInfo);
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenBCDeviceKM : Failed to get BC Info"));
++ return eError;
++ }
++
++
++ psBCInfo->ui32BufferCount = sBufferInfo.ui32BufferCount;
++
++
++
++ eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(PVRSRV_BC_BUFFER) * sBufferInfo.ui32BufferCount,
++ (IMG_VOID **)&psBCInfo->psBuffer,
++ IMG_NULL,
++ "Array of Buffer Class Buffer");
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenBCDeviceKM: Failed to allocate BC buffers"));
++ return eError;
++ }
++ OSMemSet (psBCInfo->psBuffer,
++ 0,
++ sizeof(PVRSRV_BC_BUFFER) * sBufferInfo.ui32BufferCount);
++
++ for(i=0; i<psBCInfo->ui32BufferCount; i++)
++ {
++
++ eError = PVRSRVAllocSyncInfoKM(IMG_NULL,
++ psBCInfo->hDevMemContext,
++ &psBCInfo->psBuffer[i].sDeviceClassBuffer.psKernelSyncInfo);
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenBCDeviceKM: Failed sync info alloc"));
++ goto ErrorExit;
++ }
++
++ psBCInfo->psBuffer[i].sDeviceClassBuffer.psKernelSyncInfo->ui32RefCount++;
++
++
++
++
++ eError = psBCInfo->psFuncTable->pfnGetBCBuffer(psBCInfo->hExtDevice,
++ i,
++ psBCInfo->psBuffer[i].sDeviceClassBuffer.psKernelSyncInfo->psSyncData,
++ &psBCInfo->psBuffer[i].sDeviceClassBuffer.hExtBuffer);
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenBCDeviceKM: Failed to get BC buffers"));
++ goto ErrorExit;
++ }
++
++
++ psBCInfo->psBuffer[i].sDeviceClassBuffer.pfnGetBufferAddr = psBCInfo->psFuncTable->pfnGetBufferAddr;
++ psBCInfo->psBuffer[i].sDeviceClassBuffer.hDevMemContext = psBCInfo->hDevMemContext;
++ psBCInfo->psBuffer[i].sDeviceClassBuffer.hExtDevice = psBCInfo->hExtDevice;
++ }
++ }
++
++ psBCPerContextInfo->psBCInfo = psBCInfo;
++ psBCPerContextInfo->hResItem = ResManRegisterRes(psPerProc->hResManContext,
++ RESMAN_TYPE_BUFFERCLASS_DEVICE,
++ psBCPerContextInfo,
++ 0,
++ CloseBCDeviceCallBack);
++
++
++ *phDeviceKM = (IMG_HANDLE)psBCPerContextInfo;
++
++ return PVRSRV_OK;
++
++ErrorExit:
++
++
++ for(i=0; i<psBCInfo->ui32BufferCount; i++)
++ {
++ if(psBCInfo->psBuffer[i].sDeviceClassBuffer.psKernelSyncInfo)
++ {
++ if (--psBCInfo->psBuffer[i].sDeviceClassBuffer.psKernelSyncInfo->ui32RefCount == 0)
++ {
++ PVRSRVFreeSyncInfoKM(psBCInfo->psBuffer[i].sDeviceClassBuffer.psKernelSyncInfo);
++ }
++ }
++ }
++
++
++ if(psBCInfo->psBuffer)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_BC_BUFFER), psBCInfo->psBuffer, IMG_NULL);
++ psBCInfo->psBuffer = IMG_NULL;
++ }
++
++ return eError;
++}
++
++
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVGetBCInfoKM (IMG_HANDLE hDeviceKM,
++ BUFFER_INFO *psBufferInfo)
++{
++ PVRSRV_BUFFERCLASS_INFO *psBCInfo;
++ PVRSRV_ERROR eError;
++
++ if(!hDeviceKM || !psBufferInfo)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetBCInfoKM: Invalid parameters"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psBCInfo = BCDeviceHandleToBCInfo(hDeviceKM);
++
++ eError = psBCInfo->psFuncTable->pfnGetBCInfo(psBCInfo->hExtDevice, psBufferInfo);
++
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetBCInfoKM : Failed to get BC Info"));
++ return eError;
++ }
++
++ return PVRSRV_OK;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVGetBCBufferKM (IMG_HANDLE hDeviceKM,
++ IMG_UINT32 ui32BufferIndex,
++ IMG_HANDLE *phBuffer)
++{
++ PVRSRV_BUFFERCLASS_INFO *psBCInfo;
++
++ if(!hDeviceKM || !phBuffer)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetBCBufferKM: Invalid parameters"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psBCInfo = BCDeviceHandleToBCInfo(hDeviceKM);
++
++ if(ui32BufferIndex < psBCInfo->ui32BufferCount)
++ {
++ *phBuffer = (IMG_HANDLE)&psBCInfo->psBuffer[ui32BufferIndex];
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetBCBufferKM: Buffer index %d out of range (%d)", ui32BufferIndex,psBCInfo->ui32BufferCount));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ return PVRSRV_OK;
++}
++
++
++IMG_EXPORT
++IMG_BOOL PVRGetBufferClassJTable(PVRSRV_BC_BUFFER2SRV_KMJTABLE *psJTable)
++{
++ psJTable->ui32TableSize = sizeof(PVRSRV_BC_BUFFER2SRV_KMJTABLE);
++
++ psJTable->pfnPVRSRVRegisterBCDevice = PVRSRVRegisterBCDeviceKM;
++ psJTable->pfnPVRSRVRemoveBCDevice = PVRSRVRemoveBCDeviceKM;
++
++ return IMG_TRUE;
++}
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/devicemem.c b/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/devicemem.c
+new file mode 100644
+index 0000000..ed60870
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/devicemem.c
+@@ -0,0 +1,1448 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include <stddef.h>
++
++#include "services_headers.h"
++#include "buffer_manager.h"
++#include "pdump_km.h"
++#include "pvr_bridge_km.h"
++
++static PVRSRV_ERROR AllocDeviceMem(IMG_HANDLE hDevCookie,
++ IMG_HANDLE hDevMemHeap,
++ IMG_UINT32 ui32Flags,
++ IMG_SIZE_T ui32Size,
++ IMG_SIZE_T ui32Alignment,
++ PVRSRV_KERNEL_MEM_INFO **ppsMemInfo);
++
++typedef struct _RESMAN_MAP_DEVICE_MEM_DATA_
++{
++
++ PVRSRV_KERNEL_MEM_INFO *psMemInfo;
++
++ PVRSRV_KERNEL_MEM_INFO *psSrcMemInfo;
++} RESMAN_MAP_DEVICE_MEM_DATA;
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVGetDeviceMemHeapsKM(IMG_HANDLE hDevCookie,
++ PVRSRV_HEAP_INFO *psHeapInfo)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ IMG_UINT32 ui32HeapCount;
++ DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap;
++ IMG_UINT32 i;
++
++ if (hDevCookie == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVGetDeviceMemHeapsKM: hDevCookie invalid"));
++ PVR_DBG_BREAK;
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psDeviceNode = (PVRSRV_DEVICE_NODE *)hDevCookie;
++
++
++ ui32HeapCount = psDeviceNode->sDevMemoryInfo.ui32HeapCount;
++ psDeviceMemoryHeap = psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeap;
++
++
++ PVR_ASSERT(ui32HeapCount <= PVRSRV_MAX_CLIENT_HEAPS);
++
++
++ for(i=0; i<ui32HeapCount; i++)
++ {
++
++ psHeapInfo[i].ui32HeapID = psDeviceMemoryHeap[i].ui32HeapID;
++ psHeapInfo[i].hDevMemHeap = psDeviceMemoryHeap[i].hDevMemHeap;
++ psHeapInfo[i].sDevVAddrBase = psDeviceMemoryHeap[i].sDevVAddrBase;
++ psHeapInfo[i].ui32HeapByteSize = psDeviceMemoryHeap[i].ui32HeapSize;
++ psHeapInfo[i].ui32Attribs = psDeviceMemoryHeap[i].ui32Attribs;
++ }
++
++ for(; i < PVRSRV_MAX_CLIENT_HEAPS; i++)
++ {
++ OSMemSet(psHeapInfo + i, 0, sizeof(*psHeapInfo));
++ psHeapInfo[i].ui32HeapID = (IMG_UINT32)PVRSRV_UNDEFINED_HEAP_ID;
++ }
++
++ return PVRSRV_OK;
++}
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVCreateDeviceMemContextKM(IMG_HANDLE hDevCookie,
++ PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_HANDLE *phDevMemContext,
++ IMG_UINT32 *pui32ClientHeapCount,
++ PVRSRV_HEAP_INFO *psHeapInfo,
++ IMG_BOOL *pbCreated,
++ IMG_BOOL *pbShared)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ IMG_UINT32 ui32HeapCount, ui32ClientHeapCount=0;
++ DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap;
++ IMG_HANDLE hDevMemContext;
++ IMG_HANDLE hDevMemHeap;
++ IMG_DEV_PHYADDR sPDDevPAddr;
++ IMG_UINT32 i;
++
++#if !defined(PVR_SECURE_HANDLES)
++ PVR_UNREFERENCED_PARAMETER(pbShared);
++#endif
++
++ if (hDevCookie == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVCreateDeviceMemContextKM: hDevCookie invalid"));
++ PVR_DBG_BREAK;
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psDeviceNode = (PVRSRV_DEVICE_NODE *)hDevCookie;
++
++
++
++ ui32HeapCount = psDeviceNode->sDevMemoryInfo.ui32HeapCount;
++ psDeviceMemoryHeap = psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeap;
++
++
++
++ PVR_ASSERT(ui32HeapCount <= PVRSRV_MAX_CLIENT_HEAPS);
++
++
++
++ hDevMemContext = BM_CreateContext(psDeviceNode,
++ &sPDDevPAddr,
++ psPerProc,
++ pbCreated);
++ if (hDevMemContext == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDeviceMemContextKM: Failed BM_CreateContext"));
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++
++ for(i=0; i<ui32HeapCount; i++)
++ {
++ switch(psDeviceMemoryHeap[i].DevMemHeapType)
++ {
++ case DEVICE_MEMORY_HEAP_SHARED_EXPORTED:
++ {
++
++ psHeapInfo[ui32ClientHeapCount].ui32HeapID = psDeviceMemoryHeap[i].ui32HeapID;
++ psHeapInfo[ui32ClientHeapCount].hDevMemHeap = psDeviceMemoryHeap[i].hDevMemHeap;
++ psHeapInfo[ui32ClientHeapCount].sDevVAddrBase = psDeviceMemoryHeap[i].sDevVAddrBase;
++ psHeapInfo[ui32ClientHeapCount].ui32HeapByteSize = psDeviceMemoryHeap[i].ui32HeapSize;
++ psHeapInfo[ui32ClientHeapCount].ui32Attribs = psDeviceMemoryHeap[i].ui32Attribs;
++#if defined(PVR_SECURE_HANDLES)
++ pbShared[ui32ClientHeapCount] = IMG_TRUE;
++#endif
++ ui32ClientHeapCount++;
++ break;
++ }
++ case DEVICE_MEMORY_HEAP_PERCONTEXT:
++ {
++ hDevMemHeap = BM_CreateHeap(hDevMemContext,
++ &psDeviceMemoryHeap[i]);
++
++
++ psHeapInfo[ui32ClientHeapCount].ui32HeapID = psDeviceMemoryHeap[i].ui32HeapID;
++ psHeapInfo[ui32ClientHeapCount].hDevMemHeap = hDevMemHeap;
++ psHeapInfo[ui32ClientHeapCount].sDevVAddrBase = psDeviceMemoryHeap[i].sDevVAddrBase;
++ psHeapInfo[ui32ClientHeapCount].ui32HeapByteSize = psDeviceMemoryHeap[i].ui32HeapSize;
++ psHeapInfo[ui32ClientHeapCount].ui32Attribs = psDeviceMemoryHeap[i].ui32Attribs;
++#if defined(PVR_SECURE_HANDLES)
++ pbShared[ui32ClientHeapCount] = IMG_FALSE;
++#endif
++
++ ui32ClientHeapCount++;
++ break;
++ }
++ }
++ }
++
++
++ *pui32ClientHeapCount = ui32ClientHeapCount;
++ *phDevMemContext = hDevMemContext;
++
++ return PVRSRV_OK;
++}
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVDestroyDeviceMemContextKM(IMG_HANDLE hDevCookie,
++ IMG_HANDLE hDevMemContext,
++ IMG_BOOL *pbDestroyed)
++{
++ PVR_UNREFERENCED_PARAMETER(hDevCookie);
++
++ return BM_DestroyContext(hDevMemContext, pbDestroyed);
++}
++
++
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVGetDeviceMemHeapInfoKM(IMG_HANDLE hDevCookie,
++ IMG_HANDLE hDevMemContext,
++ IMG_UINT32 *pui32ClientHeapCount,
++ PVRSRV_HEAP_INFO *psHeapInfo,
++ IMG_BOOL *pbShared)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ IMG_UINT32 ui32HeapCount, ui32ClientHeapCount=0;
++ DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap;
++ IMG_HANDLE hDevMemHeap;
++ IMG_UINT32 i;
++
++#if !defined(PVR_SECURE_HANDLES)
++ PVR_UNREFERENCED_PARAMETER(pbShared);
++#endif
++
++ if (hDevCookie == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVGetDeviceMemHeapInfoKM: hDevCookie invalid"));
++ PVR_DBG_BREAK;
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psDeviceNode = (PVRSRV_DEVICE_NODE *)hDevCookie;
++
++
++
++ ui32HeapCount = psDeviceNode->sDevMemoryInfo.ui32HeapCount;
++ psDeviceMemoryHeap = psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeap;
++
++
++
++ PVR_ASSERT(ui32HeapCount <= PVRSRV_MAX_CLIENT_HEAPS);
++
++
++ for(i=0; i<ui32HeapCount; i++)
++ {
++ switch(psDeviceMemoryHeap[i].DevMemHeapType)
++ {
++ case DEVICE_MEMORY_HEAP_SHARED_EXPORTED:
++ {
++
++ psHeapInfo[ui32ClientHeapCount].ui32HeapID = psDeviceMemoryHeap[i].ui32HeapID;
++ psHeapInfo[ui32ClientHeapCount].hDevMemHeap = psDeviceMemoryHeap[i].hDevMemHeap;
++ psHeapInfo[ui32ClientHeapCount].sDevVAddrBase = psDeviceMemoryHeap[i].sDevVAddrBase;
++ psHeapInfo[ui32ClientHeapCount].ui32HeapByteSize = psDeviceMemoryHeap[i].ui32HeapSize;
++ psHeapInfo[ui32ClientHeapCount].ui32Attribs = psDeviceMemoryHeap[i].ui32Attribs;
++#if defined(PVR_SECURE_HANDLES)
++ pbShared[ui32ClientHeapCount] = IMG_TRUE;
++#endif
++ ui32ClientHeapCount++;
++ break;
++ }
++ case DEVICE_MEMORY_HEAP_PERCONTEXT:
++ {
++ hDevMemHeap = BM_CreateHeap(hDevMemContext,
++ &psDeviceMemoryHeap[i]);
++
++
++ psHeapInfo[ui32ClientHeapCount].ui32HeapID = psDeviceMemoryHeap[i].ui32HeapID;
++ psHeapInfo[ui32ClientHeapCount].hDevMemHeap = hDevMemHeap;
++ psHeapInfo[ui32ClientHeapCount].sDevVAddrBase = psDeviceMemoryHeap[i].sDevVAddrBase;
++ psHeapInfo[ui32ClientHeapCount].ui32HeapByteSize = psDeviceMemoryHeap[i].ui32HeapSize;
++ psHeapInfo[ui32ClientHeapCount].ui32Attribs = psDeviceMemoryHeap[i].ui32Attribs;
++#if defined(PVR_SECURE_HANDLES)
++ pbShared[ui32ClientHeapCount] = IMG_FALSE;
++#endif
++
++ ui32ClientHeapCount++;
++ break;
++ }
++ }
++ }
++
++
++ *pui32ClientHeapCount = ui32ClientHeapCount;
++
++ return PVRSRV_OK;
++}
++
++
++static PVRSRV_ERROR AllocDeviceMem(IMG_HANDLE hDevCookie,
++ IMG_HANDLE hDevMemHeap,
++ IMG_UINT32 ui32Flags,
++ IMG_SIZE_T ui32Size,
++ IMG_SIZE_T ui32Alignment,
++ PVRSRV_KERNEL_MEM_INFO **ppsMemInfo)
++{
++ PVRSRV_KERNEL_MEM_INFO *psMemInfo;
++ BM_HANDLE hBuffer;
++
++ PVRSRV_MEMBLK *psMemBlock;
++ IMG_BOOL bBMError;
++
++ PVR_UNREFERENCED_PARAMETER(hDevCookie);
++
++ *ppsMemInfo = IMG_NULL;
++
++ if(OSAllocMem(PVRSRV_PAGEABLE_SELECT,
++ sizeof(PVRSRV_KERNEL_MEM_INFO),
++ (IMG_VOID **)&psMemInfo, IMG_NULL,
++ "Kernel Memory Info") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"AllocDeviceMem: Failed to alloc memory for block"));
++ return (PVRSRV_ERROR_OUT_OF_MEMORY);
++ }
++
++ OSMemSet(psMemInfo, 0, sizeof(*psMemInfo));
++
++ psMemBlock = &(psMemInfo->sMemBlk);
++
++
++ psMemInfo->ui32Flags = ui32Flags | PVRSRV_MEM_RAM_BACKED_ALLOCATION;
++
++ bBMError = BM_Alloc (hDevMemHeap,
++ IMG_NULL,
++ ui32Size,
++ &psMemInfo->ui32Flags,
++ IMG_CAST_TO_DEVVADDR_UINT(ui32Alignment),
++ &hBuffer);
++
++ if (!bBMError)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"AllocDeviceMem: BM_Alloc Failed"));
++ OSFreeMem(PVRSRV_PAGEABLE_SELECT, sizeof(PVRSRV_KERNEL_MEM_INFO), psMemInfo, IMG_NULL);
++
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++
++ psMemBlock->sDevVirtAddr = BM_HandleToDevVaddr(hBuffer);
++ psMemBlock->hOSMemHandle = BM_HandleToOSMemHandle(hBuffer);
++
++
++ psMemBlock->hBuffer = (IMG_HANDLE)hBuffer;
++
++
++
++ psMemInfo->pvLinAddrKM = BM_HandleToCpuVaddr(hBuffer);
++
++ psMemInfo->sDevVAddr = psMemBlock->sDevVirtAddr;
++
++ psMemInfo->ui32AllocSize = ui32Size;
++
++
++ psMemInfo->pvSysBackupBuffer = IMG_NULL;
++
++
++ *ppsMemInfo = psMemInfo;
++
++
++ return (PVRSRV_OK);
++}
++
++
++static PVRSRV_ERROR FreeDeviceMem(PVRSRV_KERNEL_MEM_INFO *psMemInfo)
++{
++ BM_HANDLE hBuffer;
++
++ if (!psMemInfo)
++ {
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ hBuffer = psMemInfo->sMemBlk.hBuffer;
++
++
++ BM_Free(hBuffer, psMemInfo->ui32Flags);
++
++ if(psMemInfo->pvSysBackupBuffer)
++ {
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, psMemInfo->ui32AllocSize, psMemInfo->pvSysBackupBuffer, IMG_NULL);
++ psMemInfo->pvSysBackupBuffer = IMG_NULL;
++ }
++
++ OSFreeMem(PVRSRV_PAGEABLE_SELECT, sizeof(PVRSRV_KERNEL_MEM_INFO), psMemInfo, IMG_NULL);
++
++
++ return(PVRSRV_OK);
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVAllocSyncInfoKM(IMG_HANDLE hDevCookie,
++ IMG_HANDLE hDevMemContext,
++ PVRSRV_KERNEL_SYNC_INFO **ppsKernelSyncInfo)
++{
++ IMG_HANDLE hSyncDevMemHeap;
++ DEVICE_MEMORY_INFO *psDevMemoryInfo;
++ BM_CONTEXT *pBMContext;
++ PVRSRV_ERROR eError;
++ PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
++ PVRSRV_SYNC_DATA *psSyncData;
++
++ eError = OSAllocMem(PVRSRV_PAGEABLE_SELECT,
++ sizeof(PVRSRV_KERNEL_SYNC_INFO),
++ (IMG_VOID **)&psKernelSyncInfo, IMG_NULL,
++ "Kernel Synchronization Info");
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVAllocSyncInfoKM: Failed to alloc memory"));
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++ psKernelSyncInfo->ui32RefCount = 0;
++
++
++ pBMContext = (BM_CONTEXT*)hDevMemContext;
++ psDevMemoryInfo = &pBMContext->psDeviceNode->sDevMemoryInfo;
++
++
++ hSyncDevMemHeap = psDevMemoryInfo->psDeviceMemoryHeap[psDevMemoryInfo->ui32SyncHeapID].hDevMemHeap;
++
++
++
++
++ eError = AllocDeviceMem(hDevCookie,
++ hSyncDevMemHeap,
++ PVRSRV_MEM_CACHE_CONSISTENT,
++ sizeof(PVRSRV_SYNC_DATA),
++ sizeof(IMG_UINT32),
++ &psKernelSyncInfo->psSyncDataMemInfoKM);
++
++ if (eError != PVRSRV_OK)
++ {
++
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVAllocSyncInfoKM: Failed to alloc memory"));
++ OSFreeMem(PVRSRV_PAGEABLE_SELECT, sizeof(PVRSRV_KERNEL_SYNC_INFO), psKernelSyncInfo, IMG_NULL);
++
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++
++ psKernelSyncInfo->psSyncData = psKernelSyncInfo->psSyncDataMemInfoKM->pvLinAddrKM;
++ psSyncData = psKernelSyncInfo->psSyncData;
++
++ psSyncData->ui32WriteOpsPending = 0;
++ psSyncData->ui32WriteOpsComplete = 0;
++ psSyncData->ui32ReadOpsPending = 0;
++ psSyncData->ui32ReadOpsComplete = 0;
++ psSyncData->ui32LastOpDumpVal = 0;
++ psSyncData->ui32LastReadOpDumpVal = 0;
++
++#if defined(PDUMP)
++ PDUMPMEM(psKernelSyncInfo->psSyncDataMemInfoKM->pvLinAddrKM,
++ psKernelSyncInfo->psSyncDataMemInfoKM,
++ 0,
++ psKernelSyncInfo->psSyncDataMemInfoKM->ui32AllocSize,
++ PDUMP_FLAGS_CONTINUOUS,
++ MAKEUNIQUETAG(psKernelSyncInfo->psSyncDataMemInfoKM));
++#endif
++
++ psKernelSyncInfo->sWriteOpsCompleteDevVAddr.uiAddr = psKernelSyncInfo->psSyncDataMemInfoKM->sDevVAddr.uiAddr + offsetof(PVRSRV_SYNC_DATA, ui32WriteOpsComplete);
++ psKernelSyncInfo->sReadOpsCompleteDevVAddr.uiAddr = psKernelSyncInfo->psSyncDataMemInfoKM->sDevVAddr.uiAddr + offsetof(PVRSRV_SYNC_DATA, ui32ReadOpsComplete);
++
++
++ psKernelSyncInfo->psSyncDataMemInfoKM->psKernelSyncInfo = IMG_NULL;
++
++
++ psKernelSyncInfo->hResItem = IMG_NULL;
++
++
++ *ppsKernelSyncInfo = psKernelSyncInfo;
++
++ return PVRSRV_OK;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVFreeSyncInfoKM(PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo)
++{
++ PVRSRV_ERROR eError;
++
++ if (psKernelSyncInfo->ui32RefCount != 0)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "oops: sync info ref count not zero at destruction"));
++
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++ eError = FreeDeviceMem(psKernelSyncInfo->psSyncDataMemInfoKM);
++ (IMG_VOID)OSFreeMem(PVRSRV_PAGEABLE_SELECT, sizeof(PVRSRV_KERNEL_SYNC_INFO), psKernelSyncInfo, IMG_NULL);
++
++
++ return eError;
++}
++
++
++static PVRSRV_ERROR FreeDeviceMemCallBack(IMG_PVOID pvParam,
++ IMG_UINT32 ui32Param)
++{
++ PVRSRV_ERROR eError = PVRSRV_OK;
++ PVRSRV_KERNEL_MEM_INFO *psMemInfo = pvParam;
++
++ PVR_UNREFERENCED_PARAMETER(ui32Param);
++
++
++ psMemInfo->ui32RefCount--;
++
++
++ if(psMemInfo->ui32Flags & PVRSRV_MEM_EXPORTED)
++ {
++ IMG_HANDLE hMemInfo = IMG_NULL;
++
++
++ if (psMemInfo->ui32RefCount != 0)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "FreeDeviceMemCallBack: mappings are open in other processes"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++
++ eError = PVRSRVFindHandle(KERNEL_HANDLE_BASE,
++ &hMemInfo,
++ psMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "FreeDeviceMemCallBack: can't find exported meminfo in the global handle list"));
++ return eError;
++ }
++
++
++ eError = PVRSRVReleaseHandle(KERNEL_HANDLE_BASE,
++ hMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "FreeDeviceMemCallBack: PVRSRVReleaseHandle failed for exported meminfo"));
++ return eError;
++ }
++ }
++
++ PVR_ASSERT(psMemInfo->ui32RefCount == 0);
++
++ if (psMemInfo->psKernelSyncInfo)
++ {
++ psMemInfo->psKernelSyncInfo->ui32RefCount--;
++
++ if (psMemInfo->psKernelSyncInfo->ui32RefCount == 0)
++ {
++ eError = PVRSRVFreeSyncInfoKM(psMemInfo->psKernelSyncInfo);
++ }
++ }
++
++ if (eError == PVRSRV_OK)
++ {
++ eError = FreeDeviceMem(psMemInfo);
++ }
++
++ return eError;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVFreeDeviceMemKM(IMG_HANDLE hDevCookie,
++ PVRSRV_KERNEL_MEM_INFO *psMemInfo)
++{
++ PVRSRV_ERROR eError;
++
++ PVR_UNREFERENCED_PARAMETER(hDevCookie);
++
++ if (!psMemInfo)
++ {
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ if (psMemInfo->sMemBlk.hResItem != IMG_NULL)
++ {
++ eError = ResManFreeResByPtr(psMemInfo->sMemBlk.hResItem);
++ }
++ else
++ {
++
++ eError = FreeDeviceMemCallBack(psMemInfo, 0);
++ }
++
++ return eError;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV _PVRSRVAllocDeviceMemKM(IMG_HANDLE hDevCookie,
++ PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_HANDLE hDevMemHeap,
++ IMG_UINT32 ui32Flags,
++ IMG_SIZE_T ui32Size,
++ IMG_SIZE_T ui32Alignment,
++ PVRSRV_KERNEL_MEM_INFO **ppsMemInfo)
++{
++ PVRSRV_KERNEL_MEM_INFO *psMemInfo;
++ PVRSRV_ERROR eError;
++ BM_HEAP *psBMHeap;
++ IMG_HANDLE hDevMemContext;
++
++ if (!hDevMemHeap ||
++ (ui32Size == 0))
++ {
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++
++ if (ui32Flags & PVRSRV_HAP_CACHETYPE_MASK)
++ {
++ if (((ui32Size % HOST_PAGESIZE()) != 0) ||
++ ((ui32Alignment % HOST_PAGESIZE()) != 0))
++ {
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++ }
++
++ eError = AllocDeviceMem(hDevCookie,
++ hDevMemHeap,
++ ui32Flags,
++ ui32Size,
++ ui32Alignment,
++ &psMemInfo);
++
++ if (eError != PVRSRV_OK)
++ {
++ return eError;
++ }
++
++ if (ui32Flags & PVRSRV_MEM_NO_SYNCOBJ)
++ {
++ psMemInfo->psKernelSyncInfo = IMG_NULL;
++ }
++ else
++ {
++
++
++
++ psBMHeap = (BM_HEAP*)hDevMemHeap;
++ hDevMemContext = (IMG_HANDLE)psBMHeap->pBMContext;
++ eError = PVRSRVAllocSyncInfoKM(hDevCookie,
++ hDevMemContext,
++ &psMemInfo->psKernelSyncInfo);
++ if(eError != PVRSRV_OK)
++ {
++ goto free_mainalloc;
++ }
++ psMemInfo->psKernelSyncInfo->ui32RefCount++;
++ }
++
++
++ *ppsMemInfo = psMemInfo;
++
++ if (ui32Flags & PVRSRV_MEM_NO_RESMAN)
++ {
++ psMemInfo->sMemBlk.hResItem = IMG_NULL;
++ }
++ else
++ {
++
++ psMemInfo->sMemBlk.hResItem = ResManRegisterRes(psPerProc->hResManContext,
++ RESMAN_TYPE_DEVICEMEM_ALLOCATION,
++ psMemInfo,
++ 0,
++ FreeDeviceMemCallBack);
++ if (psMemInfo->sMemBlk.hResItem == IMG_NULL)
++ {
++
++ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
++ goto free_mainalloc;
++ }
++ }
++
++
++ psMemInfo->ui32RefCount++;
++
++
++ return (PVRSRV_OK);
++
++free_mainalloc:
++ FreeDeviceMem(psMemInfo);
++
++ return eError;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVDissociateDeviceMemKM(IMG_HANDLE hDevCookie,
++ PVRSRV_KERNEL_MEM_INFO *psMemInfo)
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_DEVICE_NODE *psDeviceNode = hDevCookie;
++
++ PVR_UNREFERENCED_PARAMETER(hDevCookie);
++
++ if (!psMemInfo)
++ {
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ eError = ResManDissociateRes(psMemInfo->sMemBlk.hResItem, psDeviceNode->hResManContext);
++
++ PVR_ASSERT(eError == PVRSRV_OK);
++
++ return eError;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVGetFreeDeviceMemKM(IMG_UINT32 ui32Flags,
++ IMG_SIZE_T *pui32Total,
++ IMG_SIZE_T *pui32Free,
++ IMG_SIZE_T *pui32LargestBlock)
++{
++
++
++ PVR_UNREFERENCED_PARAMETER(ui32Flags);
++ PVR_UNREFERENCED_PARAMETER(pui32Total);
++ PVR_UNREFERENCED_PARAMETER(pui32Free);
++ PVR_UNREFERENCED_PARAMETER(pui32LargestBlock);
++
++ return PVRSRV_OK;
++}
++
++
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVUnwrapExtMemoryKM (PVRSRV_KERNEL_MEM_INFO *psMemInfo)
++{
++ if (!psMemInfo)
++ {
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ return ResManFreeResByPtr(psMemInfo->sMemBlk.hResItem);
++}
++
++
++static PVRSRV_ERROR UnwrapExtMemoryCallBack(IMG_PVOID pvParam,
++ IMG_UINT32 ui32Param)
++{
++ PVRSRV_ERROR eError = PVRSRV_OK;
++ PVRSRV_KERNEL_MEM_INFO *psMemInfo = pvParam;
++ IMG_HANDLE hOSWrapMem;
++
++ PVR_UNREFERENCED_PARAMETER(ui32Param);
++
++ hOSWrapMem = psMemInfo->sMemBlk.hOSWrapMem;
++
++ if (psMemInfo->psKernelSyncInfo)
++ {
++ psMemInfo->psKernelSyncInfo->ui32RefCount--;
++ if (psMemInfo->psKernelSyncInfo->ui32RefCount == 0)
++ {
++ eError = PVRSRVFreeSyncInfoKM(psMemInfo->psKernelSyncInfo);
++ }
++ }
++
++
++ if(psMemInfo->sMemBlk.psIntSysPAddr)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(IMG_SYS_PHYADDR), psMemInfo->sMemBlk.psIntSysPAddr, IMG_NULL);
++ psMemInfo->sMemBlk.psIntSysPAddr = IMG_NULL;
++ }
++
++ if (eError == PVRSRV_OK)
++ {
++
++ psMemInfo->ui32RefCount--;
++
++ eError = FreeDeviceMem(psMemInfo);
++ }
++
++ if(hOSWrapMem)
++ {
++ OSReleasePhysPageAddr(hOSWrapMem);
++ }
++
++ return eError;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVWrapExtMemoryKM(IMG_HANDLE hDevCookie,
++ PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_HANDLE hDevMemContext,
++ IMG_SIZE_T ui32ByteSize,
++ IMG_SIZE_T ui32PageOffset,
++ IMG_BOOL bPhysContig,
++ IMG_SYS_PHYADDR *psExtSysPAddr,
++ IMG_VOID *pvLinAddr,
++ IMG_UINT32 ui32Flags,
++ PVRSRV_KERNEL_MEM_INFO **ppsMemInfo)
++{
++ PVRSRV_KERNEL_MEM_INFO *psMemInfo = IMG_NULL;
++ DEVICE_MEMORY_INFO *psDevMemoryInfo;
++ IMG_SIZE_T ui32HostPageSize = HOST_PAGESIZE();
++ IMG_HANDLE hDevMemHeap = IMG_NULL;
++ PVRSRV_DEVICE_NODE* psDeviceNode;
++ BM_HANDLE hBuffer;
++ PVRSRV_MEMBLK *psMemBlock;
++ IMG_BOOL bBMError;
++ BM_HEAP *psBMHeap;
++ PVRSRV_ERROR eError;
++ IMG_VOID *pvPageAlignedCPUVAddr;
++ IMG_SYS_PHYADDR *psIntSysPAddr = IMG_NULL;
++ IMG_HANDLE hOSWrapMem = IMG_NULL;
++ DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap;
++ IMG_SIZE_T ui32PageCount = 0;
++ IMG_UINT32 i;
++
++ psDeviceNode = (PVRSRV_DEVICE_NODE*)hDevCookie;
++ PVR_ASSERT(psDeviceNode != IMG_NULL);
++
++ if (psDeviceNode == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVWrapExtMemoryKM: invalid parameter"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ if(pvLinAddr)
++ {
++
++ ui32PageOffset = (IMG_UINTPTR_T)pvLinAddr & (ui32HostPageSize - 1);
++
++
++ ui32PageCount = HOST_PAGEALIGN(ui32ByteSize + ui32PageOffset) / ui32HostPageSize;
++ pvPageAlignedCPUVAddr = (IMG_VOID *)((IMG_UINTPTR_T)pvLinAddr - ui32PageOffset);
++
++
++ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32PageCount * sizeof(IMG_SYS_PHYADDR),
++ (IMG_VOID **)&psIntSysPAddr, IMG_NULL,
++ "Array of Page Addresses") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVWrapExtMemoryKM: Failed to alloc memory for block"));
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++ eError = OSAcquirePhysPageAddr(pvPageAlignedCPUVAddr,
++ ui32PageCount * ui32HostPageSize,
++ psIntSysPAddr,
++ &hOSWrapMem,
++ (ui32Flags != 0) ? IMG_TRUE : IMG_FALSE);
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVWrapExtMemoryKM: Failed to alloc memory for block"));
++ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
++ goto ErrorExitPhase1;
++ }
++
++
++ psExtSysPAddr = psIntSysPAddr;
++
++
++
++ bPhysContig = IMG_FALSE;
++ }
++ else
++ {
++
++ }
++
++
++ psDevMemoryInfo = &((BM_CONTEXT*)hDevMemContext)->psDeviceNode->sDevMemoryInfo;
++ psDeviceMemoryHeap = psDevMemoryInfo->psDeviceMemoryHeap;
++ for(i=0; i<PVRSRV_MAX_CLIENT_HEAPS; i++)
++ {
++ if(HEAP_IDX(psDeviceMemoryHeap[i].ui32HeapID) == psDevMemoryInfo->ui32MappingHeapID)
++ {
++ if(psDeviceMemoryHeap[i].DevMemHeapType == DEVICE_MEMORY_HEAP_PERCONTEXT)
++ {
++
++ hDevMemHeap = BM_CreateHeap(hDevMemContext, &psDeviceMemoryHeap[i]);
++ }
++ else
++ {
++ hDevMemHeap = psDevMemoryInfo->psDeviceMemoryHeap[i].hDevMemHeap;
++ }
++ break;
++ }
++ }
++
++ if(hDevMemHeap == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVWrapExtMemoryKM: unable to find mapping heap"));
++ eError = PVRSRV_ERROR_GENERIC;
++ goto ErrorExitPhase2;
++ }
++
++ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(PVRSRV_KERNEL_MEM_INFO),
++ (IMG_VOID **)&psMemInfo, IMG_NULL,
++ "Kernel Memory Info") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVWrapExtMemoryKM: Failed to alloc memory for block"));
++ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
++ goto ErrorExitPhase2;
++ }
++
++ OSMemSet(psMemInfo, 0, sizeof(*psMemInfo));
++ psMemInfo->ui32Flags = ui32Flags;
++
++ psMemBlock = &(psMemInfo->sMemBlk);
++
++ bBMError = BM_Wrap(hDevMemHeap,
++ ui32ByteSize,
++ ui32PageOffset,
++ bPhysContig,
++ psExtSysPAddr,
++ IMG_NULL,
++ &psMemInfo->ui32Flags,
++ &hBuffer);
++ if (!bBMError)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVWrapExtMemoryKM: BM_Wrap Failed"));
++ eError = PVRSRV_ERROR_BAD_MAPPING;
++ goto ErrorExitPhase3;
++ }
++
++
++ psMemBlock->sDevVirtAddr = BM_HandleToDevVaddr(hBuffer);
++ psMemBlock->hOSMemHandle = BM_HandleToOSMemHandle(hBuffer);
++ psMemBlock->hOSWrapMem = hOSWrapMem;
++ psMemBlock->psIntSysPAddr = psIntSysPAddr;
++
++
++ psMemBlock->hBuffer = (IMG_HANDLE)hBuffer;
++
++
++ psMemInfo->pvLinAddrKM = BM_HandleToCpuVaddr(hBuffer);
++ psMemInfo->sDevVAddr = psMemBlock->sDevVirtAddr;
++ psMemInfo->ui32AllocSize = ui32ByteSize;
++
++
++
++ psMemInfo->pvSysBackupBuffer = IMG_NULL;
++
++
++
++
++ psBMHeap = (BM_HEAP*)hDevMemHeap;
++ hDevMemContext = (IMG_HANDLE)psBMHeap->pBMContext;
++ eError = PVRSRVAllocSyncInfoKM(hDevCookie,
++ hDevMemContext,
++ &psMemInfo->psKernelSyncInfo);
++ if(eError != PVRSRV_OK)
++ {
++ goto ErrorExitPhase4;
++ }
++
++ psMemInfo->psKernelSyncInfo->ui32RefCount++;
++
++
++ psMemInfo->ui32RefCount++;
++
++
++ psMemInfo->sMemBlk.hResItem = ResManRegisterRes(psPerProc->hResManContext,
++ RESMAN_TYPE_DEVICEMEM_WRAP,
++ psMemInfo,
++ 0,
++ UnwrapExtMemoryCallBack);
++
++
++ *ppsMemInfo = psMemInfo;
++
++ return PVRSRV_OK;
++
++
++
++ErrorExitPhase4:
++ if(psMemInfo)
++ {
++ FreeDeviceMem(psMemInfo);
++
++
++
++ psMemInfo = IMG_NULL;
++ }
++
++ErrorExitPhase3:
++ if(psMemInfo)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_KERNEL_MEM_INFO), psMemInfo, IMG_NULL);
++
++ }
++
++ErrorExitPhase2:
++ if(psIntSysPAddr)
++ {
++ OSReleasePhysPageAddr(hOSWrapMem);
++ }
++
++ErrorExitPhase1:
++ if(psIntSysPAddr)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, ui32PageCount * sizeof(IMG_SYS_PHYADDR), psIntSysPAddr, IMG_NULL);
++
++ }
++
++ return eError;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVUnmapDeviceMemoryKM (PVRSRV_KERNEL_MEM_INFO *psMemInfo)
++{
++ if (!psMemInfo)
++ {
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ return ResManFreeResByPtr(psMemInfo->sMemBlk.hResItem);
++}
++
++
++static PVRSRV_ERROR UnmapDeviceMemoryCallBack(IMG_PVOID pvParam,
++ IMG_UINT32 ui32Param)
++{
++ PVRSRV_ERROR eError;
++ RESMAN_MAP_DEVICE_MEM_DATA *psMapData = pvParam;
++
++ PVR_UNREFERENCED_PARAMETER(ui32Param);
++
++ if(psMapData->psMemInfo->sMemBlk.psIntSysPAddr)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(IMG_SYS_PHYADDR), psMapData->psMemInfo->sMemBlk.psIntSysPAddr, IMG_NULL);
++ psMapData->psMemInfo->sMemBlk.psIntSysPAddr = IMG_NULL;
++ }
++
++ psMapData->psMemInfo->psKernelSyncInfo->ui32RefCount--;
++ if (psMapData->psMemInfo->psKernelSyncInfo->ui32RefCount == 0)
++ {
++ eError = PVRSRVFreeSyncInfoKM(psMapData->psMemInfo->psKernelSyncInfo);
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"UnmapDeviceMemoryCallBack: Failed to free sync info"));
++ return eError;
++ }
++ }
++
++ eError = FreeDeviceMem(psMapData->psMemInfo);
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"UnmapDeviceMemoryCallBack: Failed to free DST meminfo"));
++ return eError;
++ }
++
++
++ psMapData->psSrcMemInfo->ui32RefCount--;
++
++ if (psMapData->psSrcMemInfo->ui32RefCount == 1 &&
++ psMapData->psSrcMemInfo->bPendingFree == IMG_TRUE)
++ {
++
++
++
++ if (psMapData->psSrcMemInfo->sMemBlk.hResItem != IMG_NULL)
++ {
++
++
++ eError = ResManFreeResByPtr(psMapData->psSrcMemInfo->sMemBlk.hResItem);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"UnmapDeviceMemoryCallBack: Failed to free SRC meminfo"));
++ PVR_DBG_BREAK;
++ }
++ }
++ else
++ {
++
++ eError = FreeDeviceMemCallBack(psMapData->psSrcMemInfo, 0);
++ }
++ }
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(RESMAN_MAP_DEVICE_MEM_DATA), psMapData, IMG_NULL);
++
++
++ return eError;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVMapDeviceMemoryKM(PVRSRV_PER_PROCESS_DATA *psPerProc,
++ PVRSRV_KERNEL_MEM_INFO *psSrcMemInfo,
++ IMG_HANDLE hDstDevMemHeap,
++ PVRSRV_KERNEL_MEM_INFO **ppsDstMemInfo)
++{
++ PVRSRV_ERROR eError;
++ IMG_UINT32 i;
++ IMG_SIZE_T ui32PageCount, ui32PageOffset;
++ IMG_SIZE_T ui32HostPageSize = HOST_PAGESIZE();
++ IMG_SYS_PHYADDR *psSysPAddr = IMG_NULL;
++ IMG_DEV_PHYADDR sDevPAddr;
++ BM_BUF *psBuf;
++ IMG_DEV_VIRTADDR sDevVAddr;
++ PVRSRV_KERNEL_MEM_INFO *psMemInfo = IMG_NULL;
++ BM_HANDLE hBuffer;
++ PVRSRV_MEMBLK *psMemBlock;
++ IMG_BOOL bBMError;
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ IMG_VOID *pvPageAlignedCPUVAddr;
++ RESMAN_MAP_DEVICE_MEM_DATA *psMapData = IMG_NULL;
++
++
++ if(!psSrcMemInfo || !hDstDevMemHeap || !ppsDstMemInfo)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceMemoryKM: invalid parameters"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++
++ *ppsDstMemInfo = IMG_NULL;
++
++ ui32PageOffset = psSrcMemInfo->sDevVAddr.uiAddr & (ui32HostPageSize - 1);
++ ui32PageCount = HOST_PAGEALIGN(psSrcMemInfo->ui32AllocSize + ui32PageOffset) / ui32HostPageSize;
++ pvPageAlignedCPUVAddr = (IMG_VOID *)((IMG_UINTPTR_T)psSrcMemInfo->pvLinAddrKM - ui32PageOffset);
++
++
++
++
++
++ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32PageCount*sizeof(IMG_SYS_PHYADDR),
++ (IMG_VOID **)&psSysPAddr, IMG_NULL,
++ "Array of Page Addresses") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceMemoryKM: Failed to alloc memory for block"));
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++ psBuf = psSrcMemInfo->sMemBlk.hBuffer;
++
++
++ psDeviceNode = psBuf->pMapping->pBMHeap->pBMContext->psDeviceNode;
++
++
++ sDevVAddr.uiAddr = psSrcMemInfo->sDevVAddr.uiAddr - IMG_CAST_TO_DEVVADDR_UINT(ui32PageOffset);
++ for(i=0; i<ui32PageCount; i++)
++ {
++ BM_GetPhysPageAddr(psSrcMemInfo, sDevVAddr, &sDevPAddr);
++
++
++ psSysPAddr[i] = SysDevPAddrToSysPAddr (psDeviceNode->sDevId.eDeviceType, sDevPAddr);
++
++
++ sDevVAddr.uiAddr += IMG_CAST_TO_DEVVADDR_UINT(ui32HostPageSize);
++ }
++
++
++ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(RESMAN_MAP_DEVICE_MEM_DATA),
++ (IMG_VOID **)&psMapData, IMG_NULL,
++ "Resource Manager Map Data") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceMemoryKM: Failed to alloc resman map data"));
++ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
++ goto ErrorExit;
++ }
++
++
++ if(OSAllocMem(PVRSRV_PAGEABLE_SELECT,
++ sizeof(PVRSRV_KERNEL_MEM_INFO),
++ (IMG_VOID **)&psMemInfo, IMG_NULL,
++ "Kernel Memory Info") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceMemoryKM: Failed to alloc memory for block"));
++ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
++ goto ErrorExit;
++ }
++
++ OSMemSet(psMemInfo, 0, sizeof(*psMemInfo));
++ psMemInfo->ui32Flags = psSrcMemInfo->ui32Flags;
++
++ psMemBlock = &(psMemInfo->sMemBlk);
++
++ bBMError = BM_Wrap(hDstDevMemHeap,
++ psSrcMemInfo->ui32AllocSize,
++ ui32PageOffset,
++ IMG_FALSE,
++ psSysPAddr,
++ pvPageAlignedCPUVAddr,
++ &psMemInfo->ui32Flags,
++ &hBuffer);
++
++ if (!bBMError)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceMemoryKM: BM_Wrap Failed"));
++ eError = PVRSRV_ERROR_BAD_MAPPING;
++ goto ErrorExit;
++ }
++
++
++ psMemBlock->sDevVirtAddr = BM_HandleToDevVaddr(hBuffer);
++ psMemBlock->hOSMemHandle = BM_HandleToOSMemHandle(hBuffer);
++
++
++ psMemBlock->hBuffer = (IMG_HANDLE)hBuffer;
++
++
++ psMemBlock->psIntSysPAddr = psSysPAddr;
++
++
++ psMemInfo->pvLinAddrKM = psSrcMemInfo->pvLinAddrKM;
++
++
++ psMemInfo->sDevVAddr = psMemBlock->sDevVirtAddr;
++ psMemInfo->ui32AllocSize = psSrcMemInfo->ui32AllocSize;
++ psMemInfo->psKernelSyncInfo = psSrcMemInfo->psKernelSyncInfo;
++
++
++ psMemInfo->psKernelSyncInfo->ui32RefCount++;
++
++
++
++ psMemInfo->pvSysBackupBuffer = IMG_NULL;
++
++
++ psSrcMemInfo->ui32RefCount++;
++
++
++ psMapData->psMemInfo = psMemInfo;
++ psMapData->psSrcMemInfo = psSrcMemInfo;
++
++
++ psMemInfo->sMemBlk.hResItem = ResManRegisterRes(psPerProc->hResManContext,
++ RESMAN_TYPE_DEVICEMEM_MAPPING,
++ psMapData,
++ 0,
++ UnmapDeviceMemoryCallBack);
++
++ *ppsDstMemInfo = psMemInfo;
++
++ return PVRSRV_OK;
++
++
++
++ErrorExit:
++
++ if(psSysPAddr)
++ {
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(IMG_SYS_PHYADDR), psSysPAddr, IMG_NULL);
++
++ }
++
++ if(psMemInfo)
++ {
++
++ OSFreeMem(PVRSRV_PAGEABLE_SELECT, sizeof(PVRSRV_KERNEL_MEM_INFO), psMemInfo, IMG_NULL);
++
++ }
++
++ if(psMapData)
++ {
++
++ OSFreeMem(PVRSRV_PAGEABLE_SELECT, sizeof(RESMAN_MAP_DEVICE_MEM_DATA), psMapData, IMG_NULL);
++
++ }
++
++ return eError;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVUnmapDeviceClassMemoryKM(PVRSRV_KERNEL_MEM_INFO *psMemInfo)
++{
++ if (!psMemInfo)
++ {
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ return ResManFreeResByPtr(psMemInfo->sMemBlk.hResItem);
++}
++
++
++static PVRSRV_ERROR UnmapDeviceClassMemoryCallBack(IMG_PVOID pvParam,
++ IMG_UINT32 ui32Param)
++{
++ PVRSRV_KERNEL_MEM_INFO *psMemInfo = pvParam;
++
++ PVR_UNREFERENCED_PARAMETER(ui32Param);
++
++ return FreeDeviceMem(psMemInfo);
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVMapDeviceClassMemoryKM(PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_HANDLE hDevMemContext,
++ IMG_HANDLE hDeviceClassBuffer,
++ PVRSRV_KERNEL_MEM_INFO **ppsMemInfo,
++ IMG_HANDLE *phOSMapInfo)
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_KERNEL_MEM_INFO *psMemInfo;
++ PVRSRV_DEVICECLASS_BUFFER *psDeviceClassBuffer;
++ IMG_SYS_PHYADDR *psSysPAddr;
++ IMG_VOID *pvCPUVAddr, *pvPageAlignedCPUVAddr;
++ IMG_BOOL bPhysContig;
++ BM_CONTEXT *psBMContext;
++ DEVICE_MEMORY_INFO *psDevMemoryInfo;
++ DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap;
++ IMG_HANDLE hDevMemHeap = IMG_NULL;
++ IMG_SIZE_T ui32ByteSize;
++ IMG_SIZE_T ui32Offset;
++ IMG_SIZE_T ui32PageSize = HOST_PAGESIZE();
++ BM_HANDLE hBuffer;
++ PVRSRV_MEMBLK *psMemBlock;
++ IMG_BOOL bBMError;
++ IMG_UINT32 i;
++
++ if(!hDeviceClassBuffer || !ppsMemInfo || !phOSMapInfo || !hDevMemContext)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceClassMemoryKM: invalid parameters"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psDeviceClassBuffer = (PVRSRV_DEVICECLASS_BUFFER*)hDeviceClassBuffer;
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++ eError = psDeviceClassBuffer->pfnGetBufferAddr(psDeviceClassBuffer->hExtDevice,
++ psDeviceClassBuffer->hExtBuffer,
++ &psSysPAddr,
++ &ui32ByteSize,
++ &pvCPUVAddr,
++ phOSMapInfo,
++ &bPhysContig);
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceClassMemoryKM: unable to get buffer address"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++
++ psBMContext = (BM_CONTEXT*)psDeviceClassBuffer->hDevMemContext;
++ psDevMemoryInfo = &psBMContext->psDeviceNode->sDevMemoryInfo;
++ psDeviceMemoryHeap = psDevMemoryInfo->psDeviceMemoryHeap;
++ for(i=0; i<PVRSRV_MAX_CLIENT_HEAPS; i++)
++ {
++ if(HEAP_IDX(psDeviceMemoryHeap[i].ui32HeapID) == psDevMemoryInfo->ui32MappingHeapID)
++ {
++ if(psDeviceMemoryHeap[i].DevMemHeapType == DEVICE_MEMORY_HEAP_PERCONTEXT)
++ {
++
++ hDevMemHeap = BM_CreateHeap(hDevMemContext, &psDeviceMemoryHeap[i]);
++ }
++ else
++ {
++ hDevMemHeap = psDevMemoryInfo->psDeviceMemoryHeap[i].hDevMemHeap;
++ }
++ break;
++ }
++ }
++
++ if(hDevMemHeap == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceClassMemoryKM: unable to find mapping heap"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++
++ ui32Offset = ((IMG_UINTPTR_T)pvCPUVAddr) & (ui32PageSize - 1);
++ pvPageAlignedCPUVAddr = (IMG_VOID *)((IMG_UINTPTR_T)pvCPUVAddr - ui32Offset);
++
++ if(OSAllocMem(PVRSRV_PAGEABLE_SELECT,
++ sizeof(PVRSRV_KERNEL_MEM_INFO),
++ (IMG_VOID **)&psMemInfo, IMG_NULL,
++ "Kernel Memory Info") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceClassMemoryKM: Failed to alloc memory for block"));
++ return (PVRSRV_ERROR_OUT_OF_MEMORY);
++ }
++
++ OSMemSet(psMemInfo, 0, sizeof(*psMemInfo));
++
++ psMemBlock = &(psMemInfo->sMemBlk);
++
++ bBMError = BM_Wrap(hDevMemHeap,
++ ui32ByteSize,
++ ui32Offset,
++ bPhysContig,
++ psSysPAddr,
++ pvPageAlignedCPUVAddr,
++ &psMemInfo->ui32Flags,
++ &hBuffer);
++
++ if (!bBMError)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceClassMemoryKM: BM_Wrap Failed"));
++ OSFreeMem(PVRSRV_PAGEABLE_SELECT, sizeof(PVRSRV_KERNEL_MEM_INFO), psMemInfo, IMG_NULL);
++
++ return PVRSRV_ERROR_BAD_MAPPING;
++ }
++
++
++ psMemBlock->sDevVirtAddr = BM_HandleToDevVaddr(hBuffer);
++ psMemBlock->hOSMemHandle = BM_HandleToOSMemHandle(hBuffer);
++
++
++ psMemBlock->hBuffer = (IMG_HANDLE)hBuffer;
++
++
++
++ psMemInfo->pvLinAddrKM = BM_HandleToCpuVaddr(hBuffer);
++
++
++ psMemInfo->sDevVAddr = psMemBlock->sDevVirtAddr;
++ psMemInfo->ui32AllocSize = ui32ByteSize;
++ psMemInfo->psKernelSyncInfo = psDeviceClassBuffer->psKernelSyncInfo;
++
++
++
++ psMemInfo->pvSysBackupBuffer = IMG_NULL;
++
++
++ psMemInfo->sMemBlk.hResItem = ResManRegisterRes(psPerProc->hResManContext,
++ RESMAN_TYPE_DEVICECLASSMEM_MAPPING,
++ psMemInfo,
++ 0,
++ UnmapDeviceClassMemoryCallBack);
++
++
++ *ppsMemInfo = psMemInfo;
++
++ return PVRSRV_OK;
++}
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/handle.c b/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/handle.c
+new file mode 100644
+index 0000000..6ac016a
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/handle.c
+@@ -0,0 +1,1547 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifdef PVR_SECURE_HANDLES
++#include <stddef.h>
++
++#include "services_headers.h"
++#include "handle.h"
++
++#ifdef DEBUG
++#define HANDLE_BLOCK_SIZE 1
++#else
++#define HANDLE_BLOCK_SIZE 256
++#endif
++
++#define HANDLE_HASH_TAB_INIT_SIZE 32
++
++#define DEFAULT_MAX_INDEX_PLUS_ONE 0xfffffffful
++#define DEFAULT_MAX_HANDLE DEFAULT_MAX_INDEX_PLUS_ONE
++
++#define INDEX_IS_VALID(psBase, i) ((i) < (psBase)->ui32TotalHandCount)
++
++#define INDEX_TO_HANDLE(psBase, idx) ((IMG_HANDLE)((idx) + 1))
++#define HANDLE_TO_INDEX(psBase, hand) ((IMG_UINT32)(hand) - 1)
++
++#define INDEX_TO_HANDLE_PTR(psBase, i) (((psBase)->psHandleArray) + (i))
++#define HANDLE_TO_HANDLE_PTR(psBase, h) (INDEX_TO_HANDLE_PTR(psBase, HANDLE_TO_INDEX(psBase, h)))
++
++#define HANDLE_PTR_TO_INDEX(psBase, psHandle) (IMG_UINT32)((psHandle) - ((psBase)->psHandleArray))
++#define HANDLE_PTR_TO_HANDLE(psBase, psHandle) \
++ INDEX_TO_HANDLE(psBase, HANDLE_PTR_TO_INDEX(psBase, psHandle))
++
++#define ROUND_UP_TO_MULTIPLE(a, b) ((((a) + (b) - 1) / (b)) * (b))
++
++#define HANDLES_BATCHED(psBase) ((psBase)->ui32HandBatchSize != 0)
++
++#define SET_FLAG(v, f) ((IMG_VOID)((v) |= (f)))
++#define CLEAR_FLAG(v, f) ((IMG_VOID)((v) &= ~(f)))
++#define TEST_FLAG(v, f) ((IMG_BOOL)(((v) & (f)) != 0))
++
++#define TEST_ALLOC_FLAG(psHandle, f) TEST_FLAG((psHandle)->eFlag, f)
++
++#define SET_INTERNAL_FLAG(psHandle, f) SET_FLAG((psHandle)->eInternalFlag, f)
++#define CLEAR_INTERNAL_FLAG(psHandle, f) CLEAR_FLAG((psHandle)->eInternalFlag, f)
++#define TEST_INTERNAL_FLAG(psHandle, f) TEST_FLAG((psHandle)->eInternalFlag, f)
++
++#define BATCHED_HANDLE(psHandle) TEST_INTERNAL_FLAG(psHandle, INTERNAL_HANDLE_FLAG_BATCHED)
++
++#define SET_BATCHED_HANDLE(psHandle) SET_INTERNAL_FLAG(psHandle, INTERNAL_HANDLE_FLAG_BATCHED)
++
++#define SET_UNBATCHED_HANDLE(psHandle) CLEAR_INTERNAL_FLAG(psHandle, INTERNAL_HANDLE_FLAG_BATCHED)
++
++#define BATCHED_HANDLE_PARTIALLY_FREE(psHandle) TEST_INTERNAL_FLAG(psHandle, INTERNAL_HANDLE_FLAG_BATCHED_PARTIALLY_FREE)
++
++#define SET_BATCHED_HANDLE_PARTIALLY_FREE(psHandle) SET_INTERNAL_FLAG(psHandle, INTERNAL_HANDLE_FLAG_BATCHED_PARTIALLY_FREE)
++
++#define HANDLE_STRUCT_IS_FREE(psHandle) ((psHandle)->eType == PVRSRV_HANDLE_TYPE_NONE && (psHandle)->eInternalFlag == INTERNAL_HANDLE_FLAG_NONE)
++
++#ifdef MIN
++#undef MIN
++#endif
++
++#define MIN(x, y) (((x) < (y)) ? (x) : (y))
++
++struct sHandleList
++{
++ IMG_UINT32 ui32Prev;
++ IMG_UINT32 ui32Next;
++ IMG_HANDLE hParent;
++};
++
++enum ePVRSRVInternalHandleFlag
++{
++ INTERNAL_HANDLE_FLAG_NONE = 0x00,
++ INTERNAL_HANDLE_FLAG_BATCHED = 0x01,
++ INTERNAL_HANDLE_FLAG_BATCHED_PARTIALLY_FREE = 0x02,
++};
++
++struct sHandle
++{
++
++ PVRSRV_HANDLE_TYPE eType;
++
++
++ IMG_VOID *pvData;
++
++
++ IMG_UINT32 ui32NextIndexPlusOne;
++
++
++ enum ePVRSRVInternalHandleFlag eInternalFlag;
++
++
++ PVRSRV_HANDLE_ALLOC_FLAG eFlag;
++
++
++ IMG_UINT32 ui32Index;
++
++
++ struct sHandleList sChildren;
++
++
++ struct sHandleList sSiblings;
++};
++
++struct _PVRSRV_HANDLE_BASE_
++{
++
++ IMG_HANDLE hBaseBlockAlloc;
++
++
++ IMG_HANDLE hHandBlockAlloc;
++
++
++ struct sHandle *psHandleArray;
++
++
++ HASH_TABLE *psHashTab;
++
++
++ IMG_UINT32 ui32FreeHandCount;
++
++
++ IMG_UINT32 ui32FirstFreeIndex;
++
++
++ IMG_UINT32 ui32MaxIndexPlusOne;
++
++
++ IMG_UINT32 ui32TotalHandCount;
++
++
++ IMG_UINT32 ui32LastFreeIndexPlusOne;
++
++
++ IMG_UINT32 ui32HandBatchSize;
++
++
++ IMG_UINT32 ui32TotalHandCountPreBatch;
++
++
++ IMG_UINT32 ui32FirstBatchIndexPlusOne;
++
++
++ IMG_UINT32 ui32BatchHandAllocFailures;
++
++
++ IMG_BOOL bPurgingEnabled;
++};
++
++enum eHandKey {
++ HAND_KEY_DATA = 0,
++ HAND_KEY_TYPE,
++ HAND_KEY_PARENT,
++ HAND_KEY_LEN
++};
++
++PVRSRV_HANDLE_BASE *gpsKernelHandleBase = IMG_NULL;
++
++typedef IMG_UINTPTR_T HAND_KEY[HAND_KEY_LEN];
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(HandleListInit)
++#endif
++static INLINE
++IMG_VOID HandleListInit(IMG_UINT32 ui32Index, struct sHandleList *psList, IMG_HANDLE hParent)
++{
++ psList->ui32Next = ui32Index;
++ psList->ui32Prev = ui32Index;
++ psList->hParent = hParent;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(InitParentList)
++#endif
++static INLINE
++IMG_VOID InitParentList(PVRSRV_HANDLE_BASE *psBase, struct sHandle *psHandle)
++{
++ IMG_UINT32 ui32Parent = HANDLE_PTR_TO_INDEX(psBase, psHandle);
++
++ HandleListInit(ui32Parent, &psHandle->sChildren, INDEX_TO_HANDLE(psBase, ui32Parent));
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(InitChildEntry)
++#endif
++static INLINE
++IMG_VOID InitChildEntry(PVRSRV_HANDLE_BASE *psBase, struct sHandle *psHandle)
++{
++ HandleListInit(HANDLE_PTR_TO_INDEX(psBase, psHandle), &psHandle->sSiblings, IMG_NULL);
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(HandleListIsEmpty)
++#endif
++static INLINE
++IMG_BOOL HandleListIsEmpty(IMG_UINT32 ui32Index, struct sHandleList *psList)
++{
++ IMG_BOOL bIsEmpty;
++
++ bIsEmpty = (IMG_BOOL)(psList->ui32Next == ui32Index);
++
++#ifdef DEBUG
++ {
++ IMG_BOOL bIsEmpty2;
++
++ bIsEmpty2 = (IMG_BOOL)(psList->ui32Prev == ui32Index);
++ PVR_ASSERT(bIsEmpty == bIsEmpty2);
++ }
++#endif
++
++ return bIsEmpty;
++}
++
++#ifdef DEBUG
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(NoChildren)
++#endif
++static INLINE
++IMG_BOOL NoChildren(PVRSRV_HANDLE_BASE *psBase, struct sHandle *psHandle)
++{
++ PVR_ASSERT(psHandle->sChildren.hParent == HANDLE_PTR_TO_HANDLE(psBase, psHandle));
++
++ return HandleListIsEmpty(HANDLE_PTR_TO_INDEX(psBase, psHandle), &psHandle->sChildren);
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(NoParent)
++#endif
++static INLINE
++IMG_BOOL NoParent(PVRSRV_HANDLE_BASE *psBase, struct sHandle *psHandle)
++{
++ if (HandleListIsEmpty(HANDLE_PTR_TO_INDEX(psBase, psHandle), &psHandle->sSiblings))
++ {
++ PVR_ASSERT(psHandle->sSiblings.hParent == IMG_NULL);
++
++ return IMG_TRUE;
++ }
++ else
++ {
++ PVR_ASSERT(psHandle->sSiblings.hParent != IMG_NULL);
++ }
++ return IMG_FALSE;
++}
++#endif
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(ParentHandle)
++#endif
++static INLINE
++IMG_HANDLE ParentHandle(struct sHandle *psHandle)
++{
++ return psHandle->sSiblings.hParent;
++}
++
++#define LIST_PTR_FROM_INDEX_AND_OFFSET(psBase, i, p, po, eo) \
++ ((struct sHandleList *)((IMG_CHAR *)(INDEX_TO_HANDLE_PTR(psBase, i)) + (((i) == (p)) ? (po) : (eo))))
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(HandleListInsertBefore)
++#endif
++static INLINE
++IMG_VOID HandleListInsertBefore(PVRSRV_HANDLE_BASE *psBase, IMG_UINT32 ui32InsIndex, struct sHandleList *psIns, IMG_SIZE_T uiParentOffset, IMG_UINT32 ui32EntryIndex, struct sHandleList *psEntry, IMG_SIZE_T uiEntryOffset, IMG_UINT32 ui32ParentIndex)
++{
++
++ struct sHandleList *psPrevIns = LIST_PTR_FROM_INDEX_AND_OFFSET(psBase, psIns->ui32Prev, ui32ParentIndex, uiParentOffset, uiEntryOffset);
++
++ PVR_ASSERT(psEntry->hParent == IMG_NULL);
++ PVR_ASSERT(ui32InsIndex == psPrevIns->ui32Next);
++ PVR_ASSERT(LIST_PTR_FROM_INDEX_AND_OFFSET(psBase, ui32ParentIndex, ui32ParentIndex, uiParentOffset, uiParentOffset)->hParent == INDEX_TO_HANDLE(psBase, ui32ParentIndex));
++
++ psEntry->ui32Prev = psIns->ui32Prev;
++ psIns->ui32Prev = ui32EntryIndex;
++ psEntry->ui32Next = ui32InsIndex;
++ psPrevIns->ui32Next = ui32EntryIndex;
++
++ psEntry->hParent = INDEX_TO_HANDLE(psBase, ui32ParentIndex);
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(AdoptChild)
++#endif
++static INLINE
++IMG_VOID AdoptChild(PVRSRV_HANDLE_BASE *psBase, struct sHandle *psParent, struct sHandle *psChild)
++{
++ IMG_UINT32 ui32Parent = HANDLE_TO_INDEX(psBase, psParent->sChildren.hParent);
++
++ PVR_ASSERT(ui32Parent == HANDLE_PTR_TO_INDEX(psBase, psParent));
++
++ HandleListInsertBefore(psBase, ui32Parent, &psParent->sChildren, offsetof(struct sHandle, sChildren), HANDLE_PTR_TO_INDEX(psBase, psChild), &psChild->sSiblings, offsetof(struct sHandle, sSiblings), ui32Parent);
++
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(HandleListRemove)
++#endif
++static INLINE
++IMG_VOID HandleListRemove(PVRSRV_HANDLE_BASE *psBase, IMG_UINT32 ui32EntryIndex, struct sHandleList *psEntry, IMG_SIZE_T uiEntryOffset, IMG_SIZE_T uiParentOffset)
++{
++ if (!HandleListIsEmpty(ui32EntryIndex, psEntry))
++ {
++
++ struct sHandleList *psPrev = LIST_PTR_FROM_INDEX_AND_OFFSET(psBase, psEntry->ui32Prev, HANDLE_TO_INDEX(psBase, psEntry->hParent), uiParentOffset, uiEntryOffset);
++ struct sHandleList *psNext = LIST_PTR_FROM_INDEX_AND_OFFSET(psBase, psEntry->ui32Next, HANDLE_TO_INDEX(psBase, psEntry->hParent), uiParentOffset, uiEntryOffset);
++
++
++ PVR_ASSERT(psEntry->hParent != IMG_NULL);
++
++ psPrev->ui32Next = psEntry->ui32Next;
++ psNext->ui32Prev = psEntry->ui32Prev;
++
++ HandleListInit(ui32EntryIndex, psEntry, IMG_NULL);
++ }
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(UnlinkFromParent)
++#endif
++static INLINE
++IMG_VOID UnlinkFromParent(PVRSRV_HANDLE_BASE *psBase, struct sHandle *psHandle)
++{
++ HandleListRemove(psBase, HANDLE_PTR_TO_INDEX(psBase, psHandle), &psHandle->sSiblings, offsetof(struct sHandle, sSiblings), offsetof(struct sHandle, sChildren));
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(HandleListIterate)
++#endif
++static INLINE
++PVRSRV_ERROR HandleListIterate(PVRSRV_HANDLE_BASE *psBase, struct sHandleList *psHead, IMG_SIZE_T uiParentOffset, IMG_SIZE_T uiEntryOffset, PVRSRV_ERROR (*pfnIterFunc)(PVRSRV_HANDLE_BASE *, struct sHandle *))
++{
++ IMG_UINT32 ui32Index;
++ IMG_UINT32 ui32Parent = HANDLE_TO_INDEX(psBase, psHead->hParent);
++
++ PVR_ASSERT(psHead->hParent != IMG_NULL);
++
++
++ for(ui32Index = psHead->ui32Next; ui32Index != ui32Parent; )
++ {
++ struct sHandle *psHandle = INDEX_TO_HANDLE_PTR(psBase, ui32Index);
++
++ struct sHandleList *psEntry = LIST_PTR_FROM_INDEX_AND_OFFSET(psBase, ui32Index, ui32Parent, uiParentOffset, uiEntryOffset);
++ PVRSRV_ERROR eError;
++
++ PVR_ASSERT(psEntry->hParent == psHead->hParent);
++
++ ui32Index = psEntry->ui32Next;
++
++ eError = (*pfnIterFunc)(psBase, psHandle);
++ if (eError != PVRSRV_OK)
++ {
++ return eError;
++ }
++ }
++
++ return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(IterateOverChildren)
++#endif
++static INLINE
++PVRSRV_ERROR IterateOverChildren(PVRSRV_HANDLE_BASE *psBase, struct sHandle *psParent, PVRSRV_ERROR (*pfnIterFunc)(PVRSRV_HANDLE_BASE *, struct sHandle *))
++{
++ return HandleListIterate(psBase, &psParent->sChildren, offsetof(struct sHandle, sChildren), offsetof(struct sHandle, sSiblings), pfnIterFunc);
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(GetHandleStructure)
++#endif
++static INLINE
++PVRSRV_ERROR GetHandleStructure(PVRSRV_HANDLE_BASE *psBase, struct sHandle **ppsHandle, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType)
++{
++ IMG_UINT32 ui32Index = HANDLE_TO_INDEX(psBase, hHandle);
++ struct sHandle *psHandle;
++
++
++ if (!INDEX_IS_VALID(psBase, ui32Index))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "GetHandleStructure: Handle index out of range (%u >= %u)", ui32Index, psBase->ui32TotalHandCount));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ psHandle = INDEX_TO_HANDLE_PTR(psBase, ui32Index);
++ if (psHandle->eType == PVRSRV_HANDLE_TYPE_NONE)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "GetHandleStructure: Handle not allocated (index: %u)", ui32Index));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++
++ if (eType != PVRSRV_HANDLE_TYPE_NONE && eType != psHandle->eType)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "GetHandleStructure: Handle type mismatch (%d != %d)", eType, psHandle->eType));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++
++ *ppsHandle = psHandle;
++
++ return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(ParentIfPrivate)
++#endif
++static INLINE
++IMG_HANDLE ParentIfPrivate(struct sHandle *psHandle)
++{
++ return TEST_ALLOC_FLAG(psHandle, PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE) ?
++ ParentHandle(psHandle) : IMG_NULL;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(InitKey)
++#endif
++static INLINE
++IMG_VOID InitKey(HAND_KEY aKey, PVRSRV_HANDLE_BASE *psBase, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, IMG_HANDLE hParent)
++{
++ PVR_UNREFERENCED_PARAMETER(psBase);
++
++ aKey[HAND_KEY_DATA] = (IMG_UINTPTR_T)pvData;
++ aKey[HAND_KEY_TYPE] = (IMG_UINTPTR_T)eType;
++ aKey[HAND_KEY_PARENT] = (IMG_UINTPTR_T)hParent;
++}
++
++static PVRSRV_ERROR FreeHandleArray(PVRSRV_HANDLE_BASE *psBase)
++{
++ PVRSRV_ERROR eError = PVRSRV_OK;
++
++ if (psBase->psHandleArray != IMG_NULL)
++ {
++ eError = OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ psBase->ui32TotalHandCount * sizeof(struct sHandle),
++ psBase->psHandleArray,
++ psBase->hHandBlockAlloc);
++
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "FreeHandleArray: Error freeing memory (%d)", eError));
++ }
++ else
++ {
++ psBase->psHandleArray = IMG_NULL;
++ }
++ }
++
++ return eError;
++}
++
++static PVRSRV_ERROR FreeHandle(PVRSRV_HANDLE_BASE *psBase, struct sHandle *psHandle)
++{
++ HAND_KEY aKey;
++ IMG_UINT32 ui32Index = HANDLE_PTR_TO_INDEX(psBase, psHandle);
++ PVRSRV_ERROR eError;
++
++
++ InitKey(aKey, psBase, psHandle->pvData, psHandle->eType, ParentIfPrivate(psHandle));
++
++ if (!TEST_ALLOC_FLAG(psHandle, PVRSRV_HANDLE_ALLOC_FLAG_MULTI) && !BATCHED_HANDLE_PARTIALLY_FREE(psHandle))
++ {
++ IMG_HANDLE hHandle;
++ hHandle = (IMG_HANDLE) HASH_Remove_Extended(psBase->psHashTab, aKey);
++
++ PVR_ASSERT(hHandle != IMG_NULL);
++ PVR_ASSERT(hHandle == INDEX_TO_HANDLE(psBase, ui32Index));
++ PVR_UNREFERENCED_PARAMETER(hHandle);
++ }
++
++
++ UnlinkFromParent(psBase, psHandle);
++
++
++ eError = IterateOverChildren(psBase, psHandle, FreeHandle);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "FreeHandle: Error whilst freeing subhandles (%d)", eError));
++ return eError;
++ }
++
++
++ psHandle->eType = PVRSRV_HANDLE_TYPE_NONE;
++
++ if (BATCHED_HANDLE(psHandle) && !BATCHED_HANDLE_PARTIALLY_FREE(psHandle))
++ {
++ SET_BATCHED_HANDLE_PARTIALLY_FREE(psHandle);
++
++ return PVRSRV_OK;
++ }
++
++
++ if (!psBase->bPurgingEnabled)
++ {
++ if (psBase->ui32FreeHandCount == 0)
++ {
++ PVR_ASSERT(psBase->ui32FirstFreeIndex == 0);
++ PVR_ASSERT(psBase->ui32LastFreeIndexPlusOne == 0);
++
++ psBase->ui32FirstFreeIndex = ui32Index;
++ }
++ else
++ {
++
++ PVR_ASSERT(psBase->ui32LastFreeIndexPlusOne != 0);
++ PVR_ASSERT(INDEX_TO_HANDLE_PTR(psBase, psBase->ui32LastFreeIndexPlusOne - 1)->ui32NextIndexPlusOne == 0);
++ INDEX_TO_HANDLE_PTR(psBase, psBase->ui32LastFreeIndexPlusOne - 1)->ui32NextIndexPlusOne = ui32Index + 1;
++ }
++
++ PVR_ASSERT(psHandle->ui32NextIndexPlusOne == 0);
++
++
++ psBase->ui32LastFreeIndexPlusOne = ui32Index + 1;
++ }
++
++ psBase->ui32FreeHandCount++;
++
++ return PVRSRV_OK;
++}
++
++static PVRSRV_ERROR FreeAllHandles(PVRSRV_HANDLE_BASE *psBase)
++{
++ IMG_UINT32 i;
++ PVRSRV_ERROR eError = PVRSRV_OK;
++
++ if (psBase->ui32FreeHandCount == psBase->ui32TotalHandCount)
++ {
++ return eError;
++ }
++
++ for (i = 0; i < psBase->ui32TotalHandCount; i++)
++ {
++ struct sHandle *psHandle;
++
++ psHandle = INDEX_TO_HANDLE_PTR(psBase, i);
++
++ if (psHandle->eType != PVRSRV_HANDLE_TYPE_NONE)
++ {
++ eError = FreeHandle(psBase, psHandle);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "FreeAllHandles: FreeHandle failed (%d)", eError));
++ break;
++ }
++
++
++ if (psBase->ui32FreeHandCount == psBase->ui32TotalHandCount)
++ {
++ break;
++ }
++ }
++ }
++
++ return eError;
++}
++
++static PVRSRV_ERROR FreeHandleBase(PVRSRV_HANDLE_BASE *psBase)
++{
++ PVRSRV_ERROR eError;
++
++ if (HANDLES_BATCHED(psBase))
++ {
++ PVR_DPF((PVR_DBG_WARNING, "FreeHandleBase: Uncommitted/Unreleased handle batch"));
++ PVRSRVReleaseHandleBatch(psBase);
++ }
++
++
++ eError = FreeAllHandles(psBase);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "FreeHandleBase: Couldn't free handles (%d)", eError));
++ return eError;
++ }
++
++
++ eError = FreeHandleArray(psBase);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "FreeHandleBase: Couldn't free handle array (%d)", eError));
++ return eError;
++ }
++
++ if (psBase->psHashTab != IMG_NULL)
++ {
++
++ HASH_Delete(psBase->psHashTab);
++ }
++
++ eError = OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(*psBase),
++ psBase,
++ psBase->hBaseBlockAlloc);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "FreeHandleBase: Couldn't free handle base (%d)", eError));
++ return eError;
++ }
++
++ return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(FindHandle)
++#endif
++static INLINE
++IMG_HANDLE FindHandle(PVRSRV_HANDLE_BASE *psBase, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, IMG_HANDLE hParent)
++{
++ HAND_KEY aKey;
++
++ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
++
++ InitKey(aKey, psBase, pvData, eType, hParent);
++
++ return (IMG_HANDLE) HASH_Retrieve_Extended(psBase->psHashTab, aKey);
++}
++
++static PVRSRV_ERROR ReallocMem(IMG_PVOID *ppvMem, IMG_HANDLE *phBlockAlloc, IMG_UINT32 ui32NewSize, IMG_UINT32 ui32OldSize)
++{
++ IMG_VOID *pvOldMem = *ppvMem;
++ IMG_HANDLE hOldBlockAlloc = *phBlockAlloc;
++ IMG_UINT32 ui32CopySize = MIN(ui32NewSize, ui32OldSize);
++ IMG_VOID *pvNewMem = IMG_NULL;
++ IMG_HANDLE hNewBlockAlloc = IMG_NULL;
++ PVRSRV_ERROR eError;
++
++ if (ui32NewSize == ui32OldSize)
++ {
++ return (PVRSRV_OK);
++ }
++
++ if (ui32NewSize != 0)
++ {
++
++ eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32NewSize,
++ &pvNewMem,
++ &hNewBlockAlloc,
++ "Memory Area");
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "ReallocMem: Couldn't allocate new memory area (%d)", eError));
++ return eError;
++ }
++ }
++
++ if (ui32CopySize != 0)
++ {
++
++ OSMemCopy(pvNewMem, pvOldMem, ui32CopySize);
++ }
++
++ if (ui32OldSize != 0)
++ {
++
++ eError = OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32OldSize,
++ pvOldMem,
++ hOldBlockAlloc);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "ReallocMem: Couldn't free old memory area (%d)", eError));
++ }
++ }
++
++ *ppvMem = pvNewMem;
++ *phBlockAlloc = hNewBlockAlloc;
++
++ return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(ReallocHandleArray)
++#endif
++static INLINE
++PVRSRV_ERROR ReallocHandleArray(PVRSRV_HANDLE_BASE *psBase, IMG_UINT32 ui32NewCount, IMG_UINT32 ui32OldCount)
++{
++ return ReallocMem((IMG_PVOID *)&psBase->psHandleArray,
++ &psBase->hHandBlockAlloc,
++ ui32NewCount * sizeof(struct sHandle),
++ ui32OldCount * sizeof(struct sHandle));
++}
++
++static PVRSRV_ERROR IncreaseHandleArraySize(PVRSRV_HANDLE_BASE *psBase, IMG_UINT32 ui32Delta)
++{
++ PVRSRV_ERROR eError;
++ struct sHandle *psHandle;
++ IMG_UINT32 ui32DeltaAdjusted = ROUND_UP_TO_MULTIPLE(ui32Delta, HANDLE_BLOCK_SIZE);
++ IMG_UINT32 ui32NewTotalHandCount = psBase->ui32TotalHandCount + ui32DeltaAdjusted;
++;
++
++ PVR_ASSERT(ui32Delta != 0);
++
++
++ if (ui32NewTotalHandCount > psBase->ui32MaxIndexPlusOne || ui32NewTotalHandCount <= psBase->ui32TotalHandCount)
++ {
++ ui32NewTotalHandCount = psBase->ui32MaxIndexPlusOne;
++
++ ui32DeltaAdjusted = ui32NewTotalHandCount - psBase->ui32TotalHandCount;
++
++ if (ui32DeltaAdjusted < ui32Delta)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "IncreaseHandleArraySize: Maximum handle limit reached (%d)", psBase->ui32MaxIndexPlusOne));
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++ }
++
++ PVR_ASSERT(ui32DeltaAdjusted >= ui32Delta);
++
++
++ eError = ReallocHandleArray(psBase, ui32NewTotalHandCount, psBase->ui32TotalHandCount);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "IncreaseHandleArraySize: ReallocHandleArray failed (%d)", eError));
++ return eError;
++ }
++
++
++ for(psHandle = psBase->psHandleArray + psBase->ui32TotalHandCount;
++ psHandle < psBase->psHandleArray + ui32NewTotalHandCount;
++ psHandle++)
++ {
++ psHandle->eType = PVRSRV_HANDLE_TYPE_NONE;
++ psHandle->eInternalFlag = INTERNAL_HANDLE_FLAG_NONE;
++ psHandle->ui32NextIndexPlusOne = 0;
++ }
++
++
++ psBase->ui32FreeHandCount += ui32DeltaAdjusted;
++
++ if (psBase->ui32FirstFreeIndex == 0)
++ {
++ PVR_ASSERT(psBase->ui32LastFreeIndexPlusOne == 0);
++
++ psBase->ui32FirstFreeIndex = psBase->ui32TotalHandCount;
++ }
++ else
++ {
++ if (!psBase->bPurgingEnabled)
++ {
++ PVR_ASSERT(psBase->ui32LastFreeIndexPlusOne != 0)
++ PVR_ASSERT(INDEX_TO_HANDLE_PTR(psBase, psBase->ui32LastFreeIndexPlusOne - 1)->ui32NextIndexPlusOne == 0);
++
++ INDEX_TO_HANDLE_PTR(psBase, psBase->ui32LastFreeIndexPlusOne - 1)->ui32NextIndexPlusOne = psBase->ui32TotalHandCount + 1;
++ }
++ }
++
++ if (!psBase->bPurgingEnabled)
++ {
++ psBase->ui32LastFreeIndexPlusOne = ui32NewTotalHandCount;
++ }
++
++ psBase->ui32TotalHandCount = ui32NewTotalHandCount;
++
++ return PVRSRV_OK;
++}
++
++static PVRSRV_ERROR EnsureFreeHandles(PVRSRV_HANDLE_BASE *psBase, IMG_UINT32 ui32Free)
++{
++ PVRSRV_ERROR eError;
++
++ if (ui32Free > psBase->ui32FreeHandCount)
++ {
++ IMG_UINT32 ui32FreeHandDelta = ui32Free - psBase->ui32FreeHandCount;
++ eError = IncreaseHandleArraySize(psBase, ui32FreeHandDelta);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "EnsureFreeHandles: Couldn't allocate %u handles to ensure %u free handles (IncreaseHandleArraySize failed with error %d)", ui32FreeHandDelta, ui32Free, eError));
++
++ return eError;
++ }
++ }
++
++ return PVRSRV_OK;
++}
++
++static PVRSRV_ERROR AllocHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag, IMG_HANDLE hParent)
++{
++ IMG_UINT32 ui32NewIndex;
++ struct sHandle *psNewHandle = IMG_NULL;
++ IMG_HANDLE hHandle;
++ HAND_KEY aKey;
++ PVRSRV_ERROR eError;
++
++
++ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
++
++ PVR_ASSERT(psBase->psHashTab != IMG_NULL);
++
++ if (!TEST_FLAG(eFlag, PVRSRV_HANDLE_ALLOC_FLAG_MULTI))
++ {
++
++ PVR_ASSERT(FindHandle(psBase, pvData, eType, hParent) == IMG_NULL);
++ }
++
++ if (psBase->ui32FreeHandCount == 0 && HANDLES_BATCHED(psBase))
++ {
++ PVR_DPF((PVR_DBG_WARNING, "AllocHandle: Handle batch size (%u) was too small, allocating additional space", psBase->ui32HandBatchSize));
++ }
++
++
++ eError = EnsureFreeHandles(psBase, 1);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "AllocHandle: EnsureFreeHandles failed (%d)", eError));
++ return eError;
++ }
++ PVR_ASSERT(psBase->ui32FreeHandCount != 0)
++
++ if (!psBase->bPurgingEnabled)
++ {
++
++ ui32NewIndex = psBase->ui32FirstFreeIndex;
++
++
++ psNewHandle = INDEX_TO_HANDLE_PTR(psBase, ui32NewIndex);
++ }
++ else
++ {
++
++ for(ui32NewIndex = psBase->ui32FirstFreeIndex; ui32NewIndex < psBase->ui32TotalHandCount; ui32NewIndex++)
++ {
++ psNewHandle = INDEX_TO_HANDLE_PTR(psBase, ui32NewIndex);
++ if (HANDLE_STRUCT_IS_FREE(psNewHandle))
++ {
++ break;
++ }
++
++ }
++ psBase->ui32FirstFreeIndex = 0;
++ PVR_ASSERT(ui32NewIndex < psBase->ui32TotalHandCount);
++ }
++ PVR_ASSERT(psNewHandle != IMG_NULL);
++
++
++ hHandle = INDEX_TO_HANDLE(psBase, ui32NewIndex);
++
++
++ if (!TEST_FLAG(eFlag, PVRSRV_HANDLE_ALLOC_FLAG_MULTI))
++ {
++
++ InitKey(aKey, psBase, pvData, eType, hParent);
++
++
++ if (!HASH_Insert_Extended(psBase->psHashTab, aKey, (IMG_UINTPTR_T)hHandle))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "AllocHandle: Couldn't add handle to hash table"));
++
++ return PVRSRV_ERROR_GENERIC;
++ }
++ }
++
++ psBase->ui32FreeHandCount--;
++
++
++ if (!psBase->bPurgingEnabled)
++ {
++
++ if (psBase->ui32FreeHandCount == 0)
++ {
++ PVR_ASSERT(psBase->ui32FirstFreeIndex == ui32NewIndex);
++ PVR_ASSERT(psBase->ui32LastFreeIndexPlusOne == (ui32NewIndex + 1));
++
++ psBase->ui32LastFreeIndexPlusOne = 0;
++ psBase->ui32FirstFreeIndex = 0;
++ }
++ else
++ {
++
++ psBase->ui32FirstFreeIndex = (psNewHandle->ui32NextIndexPlusOne == 0) ?
++ ui32NewIndex + 1 :
++ psNewHandle->ui32NextIndexPlusOne - 1;
++ }
++ }
++
++
++ psNewHandle->eType = eType;
++ psNewHandle->pvData = pvData;
++ psNewHandle->eInternalFlag = INTERNAL_HANDLE_FLAG_NONE;
++ psNewHandle->eFlag = eFlag;
++ psNewHandle->ui32Index = ui32NewIndex;
++
++ InitParentList(psBase, psNewHandle);
++#if defined(DEBUG)
++ PVR_ASSERT(NoChildren(psBase, psNewHandle));
++#endif
++
++ InitChildEntry(psBase, psNewHandle);
++#if defined(DEBUG)
++ PVR_ASSERT(NoParent(psBase, psNewHandle));
++#endif
++
++ if (HANDLES_BATCHED(psBase))
++ {
++
++ psNewHandle->ui32NextIndexPlusOne = psBase->ui32FirstBatchIndexPlusOne;
++
++ psBase->ui32FirstBatchIndexPlusOne = ui32NewIndex + 1;
++
++ SET_BATCHED_HANDLE(psNewHandle);
++ }
++ else
++ {
++ psNewHandle->ui32NextIndexPlusOne = 0;
++ }
++
++
++ *phHandle = hHandle;
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PVRSRVAllocHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag)
++{
++ IMG_HANDLE hHandle;
++ PVRSRV_ERROR eError;
++
++ *phHandle = IMG_NULL;
++
++ if (HANDLES_BATCHED(psBase))
++ {
++
++ psBase->ui32BatchHandAllocFailures++;
++ }
++
++
++ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
++
++ if (!TEST_FLAG(eFlag, PVRSRV_HANDLE_ALLOC_FLAG_MULTI))
++ {
++
++ hHandle = FindHandle(psBase, pvData, eType, IMG_NULL);
++ if (hHandle != IMG_NULL)
++ {
++ struct sHandle *psHandle;
++
++ eError = GetHandleStructure(psBase, &psHandle, hHandle, eType);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocHandle: Lookup of existing handle failed"));
++ return eError;
++ }
++
++
++ if (TEST_FLAG(psHandle->eFlag & eFlag, PVRSRV_HANDLE_ALLOC_FLAG_SHARED))
++ {
++ *phHandle = hHandle;
++ eError = PVRSRV_OK;
++ goto exit_ok;
++ }
++ return PVRSRV_ERROR_GENERIC;
++ }
++ }
++
++ eError = AllocHandle(psBase, phHandle, pvData, eType, eFlag, IMG_NULL);
++
++exit_ok:
++ if (HANDLES_BATCHED(psBase) && (eError == PVRSRV_OK))
++ {
++ psBase->ui32BatchHandAllocFailures--;
++ }
++
++ return eError;
++}
++
++PVRSRV_ERROR PVRSRVAllocSubHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag, IMG_HANDLE hParent)
++{
++ struct sHandle *psPHand;
++ struct sHandle *psCHand;
++ PVRSRV_ERROR eError;
++ IMG_HANDLE hParentKey;
++ IMG_HANDLE hHandle;
++
++ *phHandle = IMG_NULL;
++
++ if (HANDLES_BATCHED(psBase))
++ {
++
++ psBase->ui32BatchHandAllocFailures++;
++ }
++
++
++ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
++
++ hParentKey = TEST_FLAG(eFlag, PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE) ?
++ hParent : IMG_NULL;
++
++
++ eError = GetHandleStructure(psBase, &psPHand, hParent, PVRSRV_HANDLE_TYPE_NONE);
++ if (eError != PVRSRV_OK)
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ if (!TEST_FLAG(eFlag, PVRSRV_HANDLE_ALLOC_FLAG_MULTI))
++ {
++
++ hHandle = FindHandle(psBase, pvData, eType, hParentKey);
++ if (hHandle != IMG_NULL)
++ {
++ struct sHandle *psCHandle;
++ PVRSRV_ERROR eErr;
++
++ eErr = GetHandleStructure(psBase, &psCHandle, hHandle, eType);
++ if (eErr != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocSubHandle: Lookup of existing handle failed"));
++ return eErr;
++ }
++
++ PVR_ASSERT(hParentKey != IMG_NULL && ParentHandle(HANDLE_TO_HANDLE_PTR(psBase, hHandle)) == hParent);
++
++
++ if (TEST_FLAG(psCHandle->eFlag & eFlag, PVRSRV_HANDLE_ALLOC_FLAG_SHARED) && ParentHandle(HANDLE_TO_HANDLE_PTR(psBase, hHandle)) == hParent)
++ {
++ *phHandle = hHandle;
++ goto exit_ok;
++ }
++ return PVRSRV_ERROR_GENERIC;
++ }
++ }
++
++ eError = AllocHandle(psBase, &hHandle, pvData, eType, eFlag, hParentKey);
++ if (eError != PVRSRV_OK)
++ {
++ return eError;
++ }
++
++
++ psPHand = HANDLE_TO_HANDLE_PTR(psBase, hParent);
++
++ psCHand = HANDLE_TO_HANDLE_PTR(psBase, hHandle);
++
++ AdoptChild(psBase, psPHand, psCHand);
++
++ *phHandle = hHandle;
++
++exit_ok:
++ if (HANDLES_BATCHED(psBase))
++ {
++ psBase->ui32BatchHandAllocFailures--;
++ }
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PVRSRVFindHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType)
++{
++ IMG_HANDLE hHandle;
++
++ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
++
++
++ hHandle = (IMG_HANDLE) FindHandle(psBase, pvData, eType, IMG_NULL);
++ if (hHandle == IMG_NULL)
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ *phHandle = hHandle;
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PVRSRVLookupHandleAnyType(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, PVRSRV_HANDLE_TYPE *peType, IMG_HANDLE hHandle)
++{
++ struct sHandle *psHandle;
++ PVRSRV_ERROR eError;
++
++ eError = GetHandleStructure(psBase, &psHandle, hHandle, PVRSRV_HANDLE_TYPE_NONE);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVLookupHandleAnyType: Error looking up handle (%d)", eError));
++ return eError;
++ }
++
++ *ppvData = psHandle->pvData;
++ *peType = psHandle->eType;
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PVRSRVLookupHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType)
++{
++ struct sHandle *psHandle;
++ PVRSRV_ERROR eError;
++
++ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
++
++ eError = GetHandleStructure(psBase, &psHandle, hHandle, eType);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVLookupHandle: Error looking up handle (%d)", eError));
++ return eError;
++ }
++
++ *ppvData = psHandle->pvData;
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PVRSRVLookupSubHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType, IMG_HANDLE hAncestor)
++{
++ struct sHandle *psPHand;
++ struct sHandle *psCHand;
++ PVRSRV_ERROR eError;
++
++ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
++
++ eError = GetHandleStructure(psBase, &psCHand, hHandle, eType);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVLookupSubHandle: Error looking up subhandle (%d)", eError));
++ return eError;
++ }
++
++
++ for (psPHand = psCHand; ParentHandle(psPHand) != hAncestor; )
++ {
++ eError = GetHandleStructure(psBase, &psPHand, ParentHandle(psPHand), PVRSRV_HANDLE_TYPE_NONE);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVLookupSubHandle: Subhandle doesn't belong to given ancestor"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++ }
++
++ *ppvData = psCHand->pvData;
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PVRSRVGetParentHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *phParent, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType)
++{
++ struct sHandle *psHandle;
++ PVRSRV_ERROR eError;
++
++ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
++
++ eError = GetHandleStructure(psBase, &psHandle, hHandle, eType);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVGetParentHandle: Error looking up subhandle (%d)", eError));
++ return eError;
++ }
++
++ *phParent = ParentHandle(psHandle);
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PVRSRVLookupAndReleaseHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType)
++{
++ struct sHandle *psHandle;
++ PVRSRV_ERROR eError;
++
++ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
++
++ eError = GetHandleStructure(psBase, &psHandle, hHandle, eType);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVLookupAndReleaseHandle: Error looking up handle (%d)", eError));
++ return eError;
++ }
++
++ *ppvData = psHandle->pvData;
++
++ eError = FreeHandle(psBase, psHandle);
++
++ return eError;
++}
++
++PVRSRV_ERROR PVRSRVReleaseHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType)
++{
++ struct sHandle *psHandle;
++ PVRSRV_ERROR eError;
++
++ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
++
++ eError = GetHandleStructure(psBase, &psHandle, hHandle, eType);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVReleaseHandle: Error looking up handle (%d)", eError));
++ return eError;
++ }
++
++ eError = FreeHandle(psBase, psHandle);
++
++ return eError;
++}
++
++PVRSRV_ERROR PVRSRVNewHandleBatch(PVRSRV_HANDLE_BASE *psBase, IMG_UINT32 ui32BatchSize)
++{
++ PVRSRV_ERROR eError;
++
++ if (HANDLES_BATCHED(psBase))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVNewHandleBatch: There is a handle batch already in use (size %u)", psBase->ui32HandBatchSize));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ if (ui32BatchSize == 0)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVNewHandleBatch: Invalid batch size (%u)", ui32BatchSize));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ eError = EnsureFreeHandles(psBase, ui32BatchSize);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVNewHandleBatch: EnsureFreeHandles failed (error %d)", eError));
++ return eError;
++ }
++
++ psBase->ui32HandBatchSize = ui32BatchSize;
++
++
++ psBase->ui32TotalHandCountPreBatch = psBase->ui32TotalHandCount;
++
++ PVR_ASSERT(psBase->ui32BatchHandAllocFailures == 0);
++
++ PVR_ASSERT(psBase->ui32FirstBatchIndexPlusOne == 0);
++
++ PVR_ASSERT(HANDLES_BATCHED(psBase));
++
++ return PVRSRV_OK;
++}
++
++static PVRSRV_ERROR PVRSRVHandleBatchCommitOrRelease(PVRSRV_HANDLE_BASE *psBase, IMG_BOOL bCommit)
++{
++
++ IMG_UINT32 ui32IndexPlusOne;
++ IMG_BOOL bCommitBatch = bCommit;
++
++ if (!HANDLES_BATCHED(psBase))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVHandleBatchCommitOrRelease: There is no handle batch"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++
++ }
++
++ if (psBase->ui32BatchHandAllocFailures != 0)
++ {
++ if (bCommit)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVHandleBatchCommitOrRelease: Attempting to commit batch with handle allocation failures."));
++ }
++ bCommitBatch = IMG_FALSE;
++ }
++
++ PVR_ASSERT(psBase->ui32BatchHandAllocFailures == 0 || !bCommit);
++
++ ui32IndexPlusOne = psBase->ui32FirstBatchIndexPlusOne;
++ while(ui32IndexPlusOne != 0)
++ {
++ struct sHandle *psHandle = INDEX_TO_HANDLE_PTR(psBase, ui32IndexPlusOne - 1);
++ IMG_UINT32 ui32NextIndexPlusOne = psHandle->ui32NextIndexPlusOne;
++ PVR_ASSERT(BATCHED_HANDLE(psHandle));
++
++ psHandle->ui32NextIndexPlusOne = 0;
++
++ if (!bCommitBatch || BATCHED_HANDLE_PARTIALLY_FREE(psHandle))
++ {
++ PVRSRV_ERROR eError;
++
++
++ if (!BATCHED_HANDLE_PARTIALLY_FREE(psHandle))
++ {
++ SET_UNBATCHED_HANDLE(psHandle);
++ }
++
++ eError = FreeHandle(psBase, psHandle);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVHandleBatchCommitOrRelease: Error freeing handle (%d)", eError));
++ }
++ PVR_ASSERT(eError == PVRSRV_OK);
++ }
++ else
++ {
++ SET_UNBATCHED_HANDLE(psHandle);
++ }
++
++ ui32IndexPlusOne = ui32NextIndexPlusOne;
++ }
++
++#ifdef DEBUG
++ if (psBase->ui32TotalHandCountPreBatch != psBase->ui32TotalHandCount)
++ {
++ IMG_UINT32 ui32Delta = psBase->ui32TotalHandCount - psBase->ui32TotalHandCountPreBatch;
++
++ PVR_ASSERT(psBase->ui32TotalHandCount > psBase->ui32TotalHandCountPreBatch);
++
++ PVR_DPF((PVR_DBG_WARNING, "PVRSRVHandleBatchCommitOrRelease: The batch size was too small. Batch size was %u, but needs to be %u", psBase->ui32HandBatchSize, psBase->ui32HandBatchSize + ui32Delta));
++
++ }
++#endif
++
++ psBase->ui32HandBatchSize = 0;
++ psBase->ui32FirstBatchIndexPlusOne = 0;
++ psBase->ui32TotalHandCountPreBatch = 0;
++ psBase->ui32BatchHandAllocFailures = 0;
++
++ if (psBase->ui32BatchHandAllocFailures != 0 && bCommit)
++ {
++ PVR_ASSERT(!bCommitBatch);
++
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PVRSRVCommitHandleBatch(PVRSRV_HANDLE_BASE *psBase)
++{
++ return PVRSRVHandleBatchCommitOrRelease(psBase, IMG_TRUE);
++}
++
++IMG_VOID PVRSRVReleaseHandleBatch(PVRSRV_HANDLE_BASE *psBase)
++{
++ (IMG_VOID) PVRSRVHandleBatchCommitOrRelease(psBase, IMG_FALSE);
++}
++
++PVRSRV_ERROR PVRSRVSetMaxHandle(PVRSRV_HANDLE_BASE *psBase, IMG_UINT32 ui32MaxHandle)
++{
++ if (HANDLES_BATCHED(psBase))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVSetMaxHandle: Limit cannot be set whilst in batch mode"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++
++ if (ui32MaxHandle == 0 || ui32MaxHandle >= DEFAULT_MAX_HANDLE)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVSetMaxHandle: Limit must be between %u and %u, inclusive", 0, DEFAULT_MAX_HANDLE));
++
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++
++ if (psBase->ui32TotalHandCount != 0)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVSetMaxHandle: Limit cannot be set becuase handles have already been allocated"));
++
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psBase->ui32MaxIndexPlusOne = ui32MaxHandle;
++
++ return PVRSRV_OK;
++}
++
++IMG_UINT32 PVRSRVGetMaxHandle(PVRSRV_HANDLE_BASE *psBase)
++{
++ return psBase->ui32MaxIndexPlusOne;
++}
++
++PVRSRV_ERROR PVRSRVEnableHandlePurging(PVRSRV_HANDLE_BASE *psBase)
++{
++ if (psBase->bPurgingEnabled)
++ {
++ PVR_DPF((PVR_DBG_WARNING, "PVRSRVEnableHandlePurging: Purging already enabled"));
++ return PVRSRV_OK;
++ }
++
++
++ if (psBase->ui32TotalHandCount != 0)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVEnableHandlePurging: Handles have already been allocated"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psBase->bPurgingEnabled = IMG_TRUE;
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PVRSRVPurgeHandles(PVRSRV_HANDLE_BASE *psBase)
++{
++ IMG_UINT32 ui32Handle;
++ IMG_UINT32 ui32NewHandCount;
++
++ if (!psBase->bPurgingEnabled)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVPurgeHandles: Purging not enabled for this handle base"));
++ return PVRSRV_ERROR_NOT_SUPPORTED;
++ }
++
++ if (HANDLES_BATCHED(psBase))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVPurgeHandles: Purging not allowed whilst in batch mode"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ for (ui32Handle = psBase->ui32TotalHandCount; ui32Handle != 0; ui32Handle--)
++ {
++ struct sHandle *psHandle = HANDLE_TO_HANDLE_PTR(psBase, ui32Handle);
++ if (!HANDLE_STRUCT_IS_FREE(psHandle))
++ {
++ break;
++ }
++ }
++
++ ui32NewHandCount = ROUND_UP_TO_MULTIPLE(ui32Handle, HANDLE_BLOCK_SIZE);
++
++
++ if (ui32NewHandCount >= ui32Handle && ui32NewHandCount <= (psBase->ui32TotalHandCount/2))
++ {
++ IMG_UINT32 ui32Delta = psBase->ui32TotalHandCount - ui32NewHandCount;
++ PVRSRV_ERROR eError;
++
++
++
++ eError = ReallocHandleArray(psBase, ui32NewHandCount, psBase->ui32TotalHandCount);
++ if (eError != PVRSRV_OK)
++ {
++ return eError;
++ }
++
++
++ psBase->ui32TotalHandCount = ui32NewHandCount;
++ psBase->ui32FreeHandCount -= ui32Delta;
++ psBase->ui32FirstFreeIndex = 0;
++ }
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PVRSRVAllocHandleBase(PVRSRV_HANDLE_BASE **ppsBase)
++{
++ PVRSRV_HANDLE_BASE *psBase;
++ IMG_HANDLE hBlockAlloc;
++ PVRSRV_ERROR eError;
++
++ eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(*psBase),
++ (IMG_PVOID *)&psBase,
++ &hBlockAlloc,
++ "Handle Base");
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocHandleBase: Couldn't allocate handle base (%d)", eError));
++ return eError;
++ }
++ OSMemSet(psBase, 0, sizeof(*psBase));
++
++
++ psBase->psHashTab = HASH_Create_Extended(HANDLE_HASH_TAB_INIT_SIZE, sizeof(HAND_KEY), HASH_Func_Default, HASH_Key_Comp_Default);
++ if (psBase->psHashTab == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocHandleBase: Couldn't create data pointer hash table\n"));
++ goto failure;
++ }
++
++ psBase->hBaseBlockAlloc = hBlockAlloc;
++
++ psBase->ui32MaxIndexPlusOne = DEFAULT_MAX_INDEX_PLUS_ONE;
++
++ *ppsBase = psBase;
++
++ return PVRSRV_OK;
++failure:
++ (IMG_VOID)PVRSRVFreeHandleBase(psBase);
++ return PVRSRV_ERROR_GENERIC;
++}
++
++PVRSRV_ERROR PVRSRVFreeHandleBase(PVRSRV_HANDLE_BASE *psBase)
++{
++ PVRSRV_ERROR eError;
++
++ PVR_ASSERT(psBase != gpsKernelHandleBase);
++
++ eError = FreeHandleBase(psBase);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVFreeHandleBase: FreeHandleBase failed (%d)", eError));
++ }
++
++ return eError;
++}
++
++PVRSRV_ERROR PVRSRVHandleInit(IMG_VOID)
++{
++ PVRSRV_ERROR eError;
++
++ PVR_ASSERT(gpsKernelHandleBase == IMG_NULL);
++
++ eError = PVRSRVAllocHandleBase(&gpsKernelHandleBase);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVHandleInit: PVRSRVAllocHandleBase failed (%d)", eError));
++ goto error;
++ }
++
++ eError = PVRSRVEnableHandlePurging(gpsKernelHandleBase);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVHandleInit: PVRSRVEnableHandlePurging failed (%d)", eError));
++ goto error;
++ }
++
++ return PVRSRV_OK;
++error:
++ (IMG_VOID) PVRSRVHandleDeInit();
++ return eError;
++}
++
++PVRSRV_ERROR PVRSRVHandleDeInit(IMG_VOID)
++{
++ PVRSRV_ERROR eError = PVRSRV_OK;
++
++ if (gpsKernelHandleBase != IMG_NULL)
++ {
++ eError = FreeHandleBase(gpsKernelHandleBase);
++ if (eError == PVRSRV_OK)
++ {
++ gpsKernelHandleBase = IMG_NULL;
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVHandleDeInit: FreeHandleBase failed (%d)", eError));
++ }
++ }
++
++ return eError;
++}
++#else
++#endif
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/hash.c b/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/hash.c
+new file mode 100644
+index 0000000..489a9c5
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/hash.c
+@@ -0,0 +1,463 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "pvr_debug.h"
++#include "img_defs.h"
++#include "services.h"
++#include "servicesint.h"
++#include "hash.h"
++#include "osfunc.h"
++
++#define PRIVATE_MAX(a,b) ((a)>(b)?(a):(b))
++
++#define KEY_TO_INDEX(pHash, key, uSize) \
++ ((pHash)->pfnHashFunc((pHash)->uKeySize, key, uSize) % uSize)
++
++#define KEY_COMPARE(pHash, pKey1, pKey2) \
++ ((pHash)->pfnKeyComp((pHash)->uKeySize, pKey1, pKey2))
++
++struct _BUCKET_
++{
++
++ struct _BUCKET_ *pNext;
++
++
++ IMG_UINTPTR_T v;
++
++
++ IMG_UINTPTR_T k[];
++};
++typedef struct _BUCKET_ BUCKET;
++
++struct _HASH_TABLE_
++{
++
++ BUCKET **ppBucketTable;
++
++
++ IMG_UINT32 uSize;
++
++
++ IMG_UINT32 uCount;
++
++
++ IMG_UINT32 uMinimumSize;
++
++
++ IMG_UINT32 uKeySize;
++
++
++ HASH_FUNC *pfnHashFunc;
++
++
++ HASH_KEY_COMP *pfnKeyComp;
++};
++
++IMG_UINT32
++HASH_Func_Default (IMG_SIZE_T uKeySize, IMG_VOID *pKey, IMG_UINT32 uHashTabLen)
++{
++ IMG_UINTPTR_T *p = (IMG_UINTPTR_T *)pKey;
++ IMG_UINT32 uKeyLen = uKeySize / sizeof(IMG_UINTPTR_T);
++ IMG_UINT32 ui;
++ IMG_UINT32 uHashKey = 0;
++
++ PVR_UNREFERENCED_PARAMETER(uHashTabLen);
++
++ PVR_ASSERT((uKeySize % sizeof(IMG_UINTPTR_T)) == 0);
++
++ for (ui = 0; ui < uKeyLen; ui++)
++ {
++ IMG_UINT32 uHashPart = (IMG_UINT32)*p++;
++
++ uHashPart += (uHashPart << 12);
++ uHashPart ^= (uHashPart >> 22);
++ uHashPart += (uHashPart << 4);
++ uHashPart ^= (uHashPart >> 9);
++ uHashPart += (uHashPart << 10);
++ uHashPart ^= (uHashPart >> 2);
++ uHashPart += (uHashPart << 7);
++ uHashPart ^= (uHashPart >> 12);
++
++ uHashKey += uHashPart;
++ }
++
++ return uHashKey;
++}
++
++IMG_BOOL
++HASH_Key_Comp_Default (IMG_SIZE_T uKeySize, IMG_VOID *pKey1, IMG_VOID *pKey2)
++{
++ IMG_UINTPTR_T *p1 = (IMG_UINTPTR_T *)pKey1;
++ IMG_UINTPTR_T *p2 = (IMG_UINTPTR_T *)pKey2;
++ IMG_UINT32 uKeyLen = uKeySize / sizeof(IMG_UINTPTR_T);
++ IMG_UINT32 ui;
++
++ PVR_ASSERT((uKeySize % sizeof(IMG_UINTPTR_T)) == 0);
++
++ for (ui = 0; ui < uKeyLen; ui++)
++ {
++ if (*p1++ != *p2++)
++ return IMG_FALSE;
++ }
++
++ return IMG_TRUE;
++}
++
++static PVRSRV_ERROR
++_ChainInsert (HASH_TABLE *pHash, BUCKET *pBucket, BUCKET **ppBucketTable, IMG_UINT32 uSize)
++{
++ IMG_UINT32 uIndex;
++
++ PVR_ASSERT (pBucket != IMG_NULL);
++ PVR_ASSERT (ppBucketTable != IMG_NULL);
++ PVR_ASSERT (uSize != 0);
++
++ if ((pBucket == IMG_NULL) || (ppBucketTable == IMG_NULL) || (uSize == 0))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "_ChainInsert: invalid parameter"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ uIndex = KEY_TO_INDEX(pHash, pBucket->k, uSize);
++ pBucket->pNext = ppBucketTable[uIndex];
++ ppBucketTable[uIndex] = pBucket;
++
++ return PVRSRV_OK;
++}
++
++static PVRSRV_ERROR
++_Rehash (HASH_TABLE *pHash,
++ BUCKET **ppOldTable, IMG_UINT32 uOldSize,
++ BUCKET **ppNewTable, IMG_UINT32 uNewSize)
++{
++ IMG_UINT32 uIndex;
++ for (uIndex=0; uIndex< uOldSize; uIndex++)
++ {
++ BUCKET *pBucket;
++ pBucket = ppOldTable[uIndex];
++ while (pBucket != IMG_NULL)
++ {
++ BUCKET *pNextBucket = pBucket->pNext;
++ if (_ChainInsert (pHash, pBucket, ppNewTable, uNewSize) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "_Rehash: call to _ChainInsert failed"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++ pBucket = pNextBucket;
++ }
++ }
++ return PVRSRV_OK;
++}
++
++static IMG_BOOL
++_Resize (HASH_TABLE *pHash, IMG_UINT32 uNewSize)
++{
++ if (uNewSize != pHash->uSize)
++ {
++ BUCKET **ppNewTable;
++ IMG_UINT32 uIndex;
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "HASH_Resize: oldsize=0x%x newsize=0x%x count=0x%x",
++ pHash->uSize, uNewSize, pHash->uCount));
++
++ OSAllocMem(PVRSRV_PAGEABLE_SELECT,
++ sizeof (BUCKET *) * uNewSize,
++ (IMG_PVOID*)&ppNewTable, IMG_NULL,
++ "Hash Table Buckets");
++ if (ppNewTable == IMG_NULL)
++ return IMG_FALSE;
++
++ for (uIndex=0; uIndex<uNewSize; uIndex++)
++ ppNewTable[uIndex] = IMG_NULL;
++
++ if (_Rehash (pHash, pHash->ppBucketTable, pHash->uSize, ppNewTable, uNewSize) != PVRSRV_OK)
++ {
++ return IMG_FALSE;
++ }
++
++ OSFreeMem (PVRSRV_PAGEABLE_SELECT, sizeof(BUCKET *)*pHash->uSize, pHash->ppBucketTable, IMG_NULL);
++
++ pHash->ppBucketTable = ppNewTable;
++ pHash->uSize = uNewSize;
++ }
++ return IMG_TRUE;
++}
++
++
++HASH_TABLE * HASH_Create_Extended (IMG_UINT32 uInitialLen, IMG_SIZE_T uKeySize, HASH_FUNC *pfnHashFunc, HASH_KEY_COMP *pfnKeyComp)
++{
++ HASH_TABLE *pHash;
++ IMG_UINT32 uIndex;
++
++ PVR_DPF ((PVR_DBG_MESSAGE, "HASH_Create_Extended: InitialSize=0x%x", uInitialLen));
++
++ if(OSAllocMem(PVRSRV_PAGEABLE_SELECT,
++ sizeof(HASH_TABLE),
++ (IMG_VOID **)&pHash, IMG_NULL,
++ "Hash Table") != PVRSRV_OK)
++ {
++ return IMG_NULL;
++ }
++
++ pHash->uCount = 0;
++ pHash->uSize = uInitialLen;
++ pHash->uMinimumSize = uInitialLen;
++ pHash->uKeySize = uKeySize;
++ pHash->pfnHashFunc = pfnHashFunc;
++ pHash->pfnKeyComp = pfnKeyComp;
++
++ OSAllocMem(PVRSRV_PAGEABLE_SELECT,
++ sizeof (BUCKET *) * pHash->uSize,
++ (IMG_PVOID*)&pHash->ppBucketTable, IMG_NULL,
++ "Hash Table Buckets");
++
++ if (pHash->ppBucketTable == IMG_NULL)
++ {
++ OSFreeMem(PVRSRV_PAGEABLE_SELECT, sizeof(HASH_TABLE), pHash, IMG_NULL);
++
++ return IMG_NULL;
++ }
++
++ for (uIndex=0; uIndex<pHash->uSize; uIndex++)
++ pHash->ppBucketTable[uIndex] = IMG_NULL;
++ return pHash;
++}
++
++HASH_TABLE * HASH_Create (IMG_UINT32 uInitialLen)
++{
++ return HASH_Create_Extended(uInitialLen, sizeof(IMG_UINTPTR_T),
++ &HASH_Func_Default, &HASH_Key_Comp_Default);
++}
++
++IMG_VOID
++HASH_Delete (HASH_TABLE *pHash)
++{
++ if (pHash != IMG_NULL)
++ {
++ PVR_DPF ((PVR_DBG_MESSAGE, "HASH_Delete"));
++
++ PVR_ASSERT (pHash->uCount==0);
++ if(pHash->uCount != 0)
++ {
++ PVR_DPF ((PVR_DBG_ERROR, "HASH_Delete: leak detected in hash table!"));
++ PVR_DPF ((PVR_DBG_ERROR, "Likely Cause: client drivers not freeing alocations before destroying devmemcontext"));
++ }
++ OSFreeMem(PVRSRV_PAGEABLE_SELECT, sizeof(BUCKET *)*pHash->uSize, pHash->ppBucketTable, IMG_NULL);
++ pHash->ppBucketTable = IMG_NULL;
++ OSFreeMem(PVRSRV_PAGEABLE_SELECT, sizeof(HASH_TABLE), pHash, IMG_NULL);
++
++ }
++}
++
++IMG_BOOL
++HASH_Insert_Extended (HASH_TABLE *pHash, IMG_VOID *pKey, IMG_UINTPTR_T v)
++{
++ BUCKET *pBucket;
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "HASH_Insert_Extended: Hash=%08X, pKey=%08X, v=0x%x", pHash, pKey, v));
++
++ PVR_ASSERT (pHash != IMG_NULL);
++
++ if (pHash == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "HASH_Insert_Extended: invalid parameter"));
++ return IMG_FALSE;
++ }
++
++ if(OSAllocMem(PVRSRV_PAGEABLE_SELECT,
++ sizeof(BUCKET) + pHash->uKeySize,
++ (IMG_VOID **)&pBucket, IMG_NULL,
++ "Hash Table entry") != PVRSRV_OK)
++ {
++ return IMG_FALSE;
++ }
++
++ pBucket->v = v;
++
++ OSMemCopy(pBucket->k, pKey, pHash->uKeySize);
++ if (_ChainInsert (pHash, pBucket, pHash->ppBucketTable, pHash->uSize) != PVRSRV_OK)
++ {
++ return IMG_FALSE;
++ }
++
++ pHash->uCount++;
++
++
++ if (pHash->uCount << 1 > pHash->uSize)
++ {
++
++
++ _Resize (pHash, pHash->uSize << 1);
++ }
++
++
++ return IMG_TRUE;
++}
++
++IMG_BOOL
++HASH_Insert (HASH_TABLE *pHash, IMG_UINTPTR_T k, IMG_UINTPTR_T v)
++{
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "HASH_Insert: Hash=%08X, k=0x%x, v=0x%x", pHash, k, v));
++
++ return HASH_Insert_Extended(pHash, &k, v);
++}
++
++IMG_UINTPTR_T
++HASH_Remove_Extended(HASH_TABLE *pHash, IMG_VOID *pKey)
++{
++ BUCKET **ppBucket;
++ IMG_UINT32 uIndex;
++
++ PVR_DPF ((PVR_DBG_MESSAGE, "HASH_Remove_Extended: Hash=%08X, pKey=%08X", pHash, pKey));
++
++ PVR_ASSERT (pHash != IMG_NULL);
++
++ if (pHash == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "HASH_Remove_Extended: Null hash table"));
++ return 0;
++ }
++
++ uIndex = KEY_TO_INDEX(pHash, pKey, pHash->uSize);
++
++ for (ppBucket = &(pHash->ppBucketTable[uIndex]); *ppBucket != IMG_NULL; ppBucket = &((*ppBucket)->pNext))
++ {
++
++ if (KEY_COMPARE(pHash, (*ppBucket)->k, pKey))
++ {
++ BUCKET *pBucket = *ppBucket;
++ IMG_UINTPTR_T v = pBucket->v;
++ (*ppBucket) = pBucket->pNext;
++
++ OSFreeMem(PVRSRV_PAGEABLE_SELECT, sizeof(BUCKET) + pHash->uKeySize, pBucket, IMG_NULL);
++
++
++ pHash->uCount--;
++
++
++ if (pHash->uSize > (pHash->uCount << 2) &&
++ pHash->uSize > pHash->uMinimumSize)
++ {
++
++
++ _Resize (pHash,
++ PRIVATE_MAX (pHash->uSize >> 1,
++ pHash->uMinimumSize));
++ }
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "HASH_Remove_Extended: Hash=%08X, pKey=%08X = 0x%x",
++ pHash, pKey, v));
++ return v;
++ }
++ }
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "HASH_Remove_Extended: Hash=%08X, pKey=%08X = 0x0 !!!!", pHash, pKey));
++ return 0;
++}
++
++IMG_UINTPTR_T
++HASH_Remove (HASH_TABLE *pHash, IMG_UINTPTR_T k)
++{
++ PVR_DPF ((PVR_DBG_MESSAGE, "HASH_Remove: Hash=%08X, k=0x%x", pHash, k));
++
++ return HASH_Remove_Extended(pHash, &k);
++}
++
++IMG_UINTPTR_T
++HASH_Retrieve_Extended (HASH_TABLE *pHash, IMG_VOID *pKey)
++{
++ BUCKET **ppBucket;
++ IMG_UINT32 uIndex;
++
++ PVR_DPF ((PVR_DBG_MESSAGE, "HASH_Retrieve_Extended: Hash=%08X, pKey=%08X", pHash,pKey));
++
++ PVR_ASSERT (pHash != IMG_NULL);
++
++ if (pHash == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "HASH_Retrieve_Extended: Null hash table"));
++ return 0;
++ }
++
++ uIndex = KEY_TO_INDEX(pHash, pKey, pHash->uSize);
++
++ for (ppBucket = &(pHash->ppBucketTable[uIndex]); *ppBucket != IMG_NULL; ppBucket = &((*ppBucket)->pNext))
++ {
++
++ if (KEY_COMPARE(pHash, (*ppBucket)->k, pKey))
++ {
++ BUCKET *pBucket = *ppBucket;
++ IMG_UINTPTR_T v = pBucket->v;
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "HASH_Retrieve: Hash=%08X, pKey=%08X = 0x%x",
++ pHash, pKey, v));
++ return v;
++ }
++ }
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "HASH_Retrieve: Hash=%08X, pKey=%08X = 0x0 !!!!", pHash, pKey));
++ return 0;
++}
++
++IMG_UINTPTR_T
++HASH_Retrieve (HASH_TABLE *pHash, IMG_UINTPTR_T k)
++{
++ PVR_DPF ((PVR_DBG_MESSAGE, "HASH_Retrieve: Hash=%08X, k=0x%x", pHash,k));
++ return HASH_Retrieve_Extended(pHash, &k);
++}
++
++#ifdef HASH_TRACE
++IMG_VOID
++HASH_Dump (HASH_TABLE *pHash)
++{
++ IMG_UINT32 uIndex;
++ IMG_UINT32 uMaxLength=0;
++ IMG_UINT32 uEmptyCount=0;
++
++ PVR_ASSERT (pHash != IMG_NULL);
++ for (uIndex=0; uIndex<pHash->uSize; uIndex++)
++ {
++ BUCKET *pBucket;
++ IMG_UINT32 uLength = 0;
++ if (pHash->ppBucketTable[uIndex] == IMG_NULL)
++ uEmptyCount++;
++ for (pBucket=pHash->ppBucketTable[uIndex];
++ pBucket != IMG_NULL;
++ pBucket = pBucket->pNext)
++ uLength++;
++ uMaxLength = PRIVATE_MAX (uMaxLength, uLength);
++ }
++
++ PVR_TRACE(("hash table: uMinimumSize=%d size=%d count=%d",
++ pHash->uMinimumSize, pHash->uSize, pHash->uCount));
++ PVR_TRACE((" empty=%d max=%d", uEmptyCount, uMaxLength));
++}
++#endif
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/lists.c b/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/lists.c
+new file mode 100644
+index 0000000..cb54071
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/lists.c
+@@ -0,0 +1,99 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "lists.h"
++#include "services_headers.h"
++
++IMPLEMENT_LIST_ANY_VA(BM_HEAP)
++IMPLEMENT_LIST_ANY_2(BM_HEAP, PVRSRV_ERROR, PVRSRV_OK)
++IMPLEMENT_LIST_ANY_VA_2(BM_HEAP, PVRSRV_ERROR, PVRSRV_OK)
++IMPLEMENT_LIST_FOR_EACH_VA(BM_HEAP)
++IMPLEMENT_LIST_REMOVE(BM_HEAP)
++IMPLEMENT_LIST_INSERT(BM_HEAP)
++
++IMPLEMENT_LIST_ANY_VA(BM_CONTEXT)
++IMPLEMENT_LIST_ANY_VA_2(BM_CONTEXT, IMG_HANDLE, IMG_NULL)
++IMPLEMENT_LIST_ANY_VA_2(BM_CONTEXT, PVRSRV_ERROR, PVRSRV_OK)
++IMPLEMENT_LIST_FOR_EACH(BM_CONTEXT)
++IMPLEMENT_LIST_REMOVE(BM_CONTEXT)
++IMPLEMENT_LIST_INSERT(BM_CONTEXT)
++
++IMPLEMENT_LIST_ANY_2(PVRSRV_DEVICE_NODE, PVRSRV_ERROR, PVRSRV_OK)
++IMPLEMENT_LIST_ANY_VA(PVRSRV_DEVICE_NODE)
++IMPLEMENT_LIST_ANY_VA_2(PVRSRV_DEVICE_NODE, PVRSRV_ERROR, PVRSRV_OK)
++IMPLEMENT_LIST_FOR_EACH(PVRSRV_DEVICE_NODE)
++IMPLEMENT_LIST_FOR_EACH_VA(PVRSRV_DEVICE_NODE)
++IMPLEMENT_LIST_INSERT(PVRSRV_DEVICE_NODE)
++IMPLEMENT_LIST_REMOVE(PVRSRV_DEVICE_NODE)
++
++IMPLEMENT_LIST_ANY_VA(PVRSRV_POWER_DEV)
++IMPLEMENT_LIST_ANY_VA_2(PVRSRV_POWER_DEV, PVRSRV_ERROR, PVRSRV_OK)
++IMPLEMENT_LIST_INSERT(PVRSRV_POWER_DEV)
++IMPLEMENT_LIST_REMOVE(PVRSRV_POWER_DEV)
++
++
++IMG_VOID* MatchDeviceKM_AnyVaCb(PVRSRV_DEVICE_NODE* psDeviceNode, va_list va)
++{
++ IMG_UINT32 ui32DevIndex;
++ IMG_BOOL bIgnoreClass;
++ PVRSRV_DEVICE_CLASS eDevClass;
++
++ ui32DevIndex = va_arg(va, IMG_UINT32);
++ bIgnoreClass = va_arg(va, IMG_BOOL);
++ if (!bIgnoreClass)
++ {
++ eDevClass = va_arg(va, PVRSRV_DEVICE_CLASS);
++ }
++ else
++ {
++
++
++ eDevClass = PVRSRV_DEVICE_CLASS_FORCE_I32;
++ }
++
++ if ((bIgnoreClass || psDeviceNode->sDevId.eDeviceClass == eDevClass) &&
++ psDeviceNode->sDevId.ui32DeviceIndex == ui32DevIndex)
++ {
++ return psDeviceNode;
++ }
++ return IMG_NULL;
++}
++
++IMG_VOID* MatchPowerDeviceIndex_AnyVaCb(PVRSRV_POWER_DEV *psPowerDev, va_list va)
++{
++ IMG_UINT32 ui32DeviceIndex;
++
++ ui32DeviceIndex = va_arg(va, IMG_UINT32);
++
++ if (psPowerDev->ui32DeviceIndex == ui32DeviceIndex)
++ {
++ return psPowerDev;
++ }
++ else
++ {
++ return IMG_NULL;
++ }
++}
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/mem.c b/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/mem.c
+new file mode 100644
+index 0000000..ad2ec50
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/mem.c
+@@ -0,0 +1,151 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "services_headers.h"
++#include "pvr_bridge_km.h"
++
++
++static PVRSRV_ERROR
++FreeSharedSysMemCallBack(IMG_PVOID pvParam,
++ IMG_UINT32 ui32Param)
++{
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo = pvParam;
++
++ PVR_UNREFERENCED_PARAMETER(ui32Param);
++
++ OSFreePages(psKernelMemInfo->ui32Flags,
++ psKernelMemInfo->ui32AllocSize,
++ psKernelMemInfo->pvLinAddrKM,
++ psKernelMemInfo->sMemBlk.hOSMemHandle);
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(PVRSRV_KERNEL_MEM_INFO),
++ psKernelMemInfo,
++ IMG_NULL);
++
++
++ return PVRSRV_OK;
++}
++
++
++IMG_EXPORT PVRSRV_ERROR
++PVRSRVAllocSharedSysMemoryKM(PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_UINT32 ui32Flags,
++ IMG_SIZE_T ui32Size,
++ PVRSRV_KERNEL_MEM_INFO **ppsKernelMemInfo)
++{
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++
++ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(PVRSRV_KERNEL_MEM_INFO),
++ (IMG_VOID **)&psKernelMemInfo, IMG_NULL,
++ "Kernel Memory Info") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVAllocSharedSysMemoryKM: Failed to alloc memory for meminfo"));
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++ OSMemSet(psKernelMemInfo, 0, sizeof(*psKernelMemInfo));
++
++ ui32Flags &= ~PVRSRV_HAP_MAPTYPE_MASK;
++ ui32Flags |= PVRSRV_HAP_MULTI_PROCESS;
++ psKernelMemInfo->ui32Flags = ui32Flags;
++ psKernelMemInfo->ui32AllocSize = ui32Size;
++
++ if(OSAllocPages(psKernelMemInfo->ui32Flags,
++ psKernelMemInfo->ui32AllocSize,
++ HOST_PAGESIZE(),
++ &psKernelMemInfo->pvLinAddrKM,
++ &psKernelMemInfo->sMemBlk.hOSMemHandle)
++ != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocSharedSysMemoryKM: Failed to alloc memory for block"));
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(PVRSRV_KERNEL_MEM_INFO),
++ psKernelMemInfo,
++ 0);
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++
++ psKernelMemInfo->sMemBlk.hResItem =
++ ResManRegisterRes(psPerProc->hResManContext,
++ RESMAN_TYPE_SHARED_MEM_INFO,
++ psKernelMemInfo,
++ 0,
++ FreeSharedSysMemCallBack);
++
++ *ppsKernelMemInfo = psKernelMemInfo;
++
++ return PVRSRV_OK;
++}
++
++
++IMG_EXPORT PVRSRV_ERROR
++PVRSRVFreeSharedSysMemoryKM(PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo)
++{
++ PVRSRV_ERROR eError;
++
++ if(psKernelMemInfo->sMemBlk.hResItem)
++ {
++ eError = ResManFreeResByPtr(psKernelMemInfo->sMemBlk.hResItem);
++ }
++ else
++ {
++ eError = FreeSharedSysMemCallBack(psKernelMemInfo, 0);
++ }
++
++ return eError;
++}
++
++
++IMG_EXPORT PVRSRV_ERROR
++PVRSRVDissociateMemFromResmanKM(PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo)
++{
++ PVRSRV_ERROR eError = PVRSRV_OK;
++
++ if(!psKernelMemInfo)
++ {
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ if(psKernelMemInfo->sMemBlk.hResItem)
++ {
++ eError = ResManDissociateRes(psKernelMemInfo->sMemBlk.hResItem, IMG_NULL);
++
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVDissociateMemFromResmanKM: ResManDissociateRes failed"));
++ PVR_DBG_BREAK;
++ return eError;
++ }
++
++ psKernelMemInfo->sMemBlk.hResItem = IMG_NULL;
++ }
++
++ return eError;
++}
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/mem_debug.c b/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/mem_debug.c
+new file mode 100644
+index 0000000..eeb86ae
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/mem_debug.c
+@@ -0,0 +1,250 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef MEM_DEBUG_C
++#define MEM_DEBUG_C
++
++#if defined(PVRSRV_DEBUG_OS_MEMORY)
++
++#include "img_types.h"
++#include "services_headers.h"
++
++#if defined (__cplusplus)
++extern "C"
++{
++#endif
++
++#define STOP_ON_ERROR 0
++
++
++
++
++
++
++
++
++
++ IMG_BOOL MemCheck(const IMG_PVOID pvAddr, const IMG_UINT8 ui8Pattern, IMG_SIZE_T uSize)
++ {
++ IMG_UINT8 *pui8Addr;
++ for (pui8Addr = (IMG_UINT8*)pvAddr; uSize > 0; uSize--, pui8Addr++)
++ {
++ if (*pui8Addr != ui8Pattern)
++ {
++ return IMG_FALSE;
++ }
++ }
++ return IMG_TRUE;
++ }
++
++
++
++ IMG_VOID OSCheckMemDebug(IMG_PVOID pvCpuVAddr, IMG_SIZE_T uSize, const IMG_CHAR *pszFileName, const IMG_UINT32 uLine)
++ {
++ OSMEM_DEBUG_INFO const *psInfo = (OSMEM_DEBUG_INFO *)((IMG_UINT32)pvCpuVAddr - TEST_BUFFER_PADDING_STATUS);
++
++
++ if (pvCpuVAddr == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "Pointer 0x%X : null pointer"
++ " - referenced %s:%d - allocated %s:%d",
++ pvCpuVAddr,
++ pszFileName, uLine,
++ psInfo->sFileName, psInfo->uLineNo));
++ while (STOP_ON_ERROR);
++ }
++
++
++ if (((IMG_UINT32)pvCpuVAddr&3) != 0)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "Pointer 0x%X : invalid alignment"
++ " - referenced %s:%d - allocated %s:%d",
++ pvCpuVAddr,
++ pszFileName, uLine,
++ psInfo->sFileName, psInfo->uLineNo));
++ while (STOP_ON_ERROR);
++ }
++
++
++ if (!MemCheck((IMG_PVOID)psInfo->sGuardRegionBefore, 0xB1, sizeof(psInfo->sGuardRegionBefore)))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "Pointer 0x%X : guard region before overwritten"
++ " - referenced %s:%d - allocated %s:%d",
++ pvCpuVAddr,
++ pszFileName, uLine,
++ psInfo->sFileName, psInfo->uLineNo));
++ while (STOP_ON_ERROR);
++ }
++
++
++ if (uSize != psInfo->uSize)
++ {
++ PVR_DPF((PVR_DBG_WARNING, "Pointer 0x%X : supplied size was different to stored size (0x%X != 0x%X)"
++ " - referenced %s:%d - allocated %s:%d",
++ pvCpuVAddr, uSize, psInfo->uSize,
++ pszFileName, uLine,
++ psInfo->sFileName, psInfo->uLineNo));
++ while (STOP_ON_ERROR);
++ }
++
++
++ if ((0x01234567 ^ psInfo->uSizeParityCheck) != psInfo->uSize)
++ {
++ PVR_DPF((PVR_DBG_WARNING, "Pointer 0x%X : stored size parity error (0x%X != 0x%X)"
++ " - referenced %s:%d - allocated %s:%d",
++ pvCpuVAddr, psInfo->uSize, 0x01234567 ^ psInfo->uSizeParityCheck,
++ pszFileName, uLine,
++ psInfo->sFileName, psInfo->uLineNo));
++ while (STOP_ON_ERROR);
++ }
++ else
++ {
++
++ uSize = psInfo->uSize;
++ }
++
++
++ if (uSize)
++ {
++ if (!MemCheck((IMG_VOID*)((IMG_UINT32)pvCpuVAddr + uSize), 0xB2, TEST_BUFFER_PADDING_AFTER))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "Pointer 0x%X : guard region after overwritten"
++ " - referenced from %s:%d - allocated from %s:%d",
++ pvCpuVAddr,
++ pszFileName, uLine,
++ psInfo->sFileName, psInfo->uLineNo));
++ }
++ }
++
++
++ if (psInfo->eValid != isAllocated)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "Pointer 0x%X : not allocated (freed? %d)"
++ " - referenced %s:%d - freed %s:%d",
++ pvCpuVAddr, psInfo->eValid == isFree,
++ pszFileName, uLine,
++ psInfo->sFileName, psInfo->uLineNo));
++ while (STOP_ON_ERROR);
++ }
++ }
++
++ IMG_VOID debug_strcpy(IMG_CHAR *pDest, const IMG_CHAR *pSrc)
++ {
++ IMG_SIZE_T i = 0;
++
++ for (; i < 128; i++)
++ {
++ *pDest = *pSrc;
++ if (*pSrc == '\0') break;
++ pDest++;
++ pSrc++;
++ }
++ }
++
++ PVRSRV_ERROR OSAllocMem_Debug_Wrapper(IMG_UINT32 ui32Flags,
++ IMG_UINT32 ui32Size,
++ IMG_PVOID *ppvCpuVAddr,
++ IMG_HANDLE *phBlockAlloc,
++ IMG_CHAR *pszFilename,
++ IMG_UINT32 ui32Line)
++ {
++ OSMEM_DEBUG_INFO *psInfo;
++
++ PVRSRV_ERROR eError;
++
++ eError = OSAllocMem_Debug_Linux_Memory_Allocations(ui32Flags,
++ ui32Size + TEST_BUFFER_PADDING,
++ ppvCpuVAddr,
++ phBlockAlloc,
++ pszFilename,
++ ui32Line);
++
++ if (eError != PVRSRV_OK)
++ {
++ return eError;
++ }
++
++
++ OSMemSet((IMG_CHAR *)(*ppvCpuVAddr) + TEST_BUFFER_PADDING_STATUS, 0xBB, ui32Size);
++ OSMemSet((IMG_CHAR *)(*ppvCpuVAddr) + ui32Size + TEST_BUFFER_PADDING_STATUS, 0xB2, TEST_BUFFER_PADDING_AFTER);
++
++
++ psInfo = (OSMEM_DEBUG_INFO *)(*ppvCpuVAddr);
++
++ OSMemSet(psInfo->sGuardRegionBefore, 0xB1, sizeof(psInfo->sGuardRegionBefore));
++ debug_strcpy(psInfo->sFileName, pszFilename);
++ psInfo->uLineNo = ui32Line;
++ psInfo->eValid = isAllocated;
++ psInfo->uSize = ui32Size;
++ psInfo->uSizeParityCheck = 0x01234567 ^ ui32Size;
++
++
++ *ppvCpuVAddr = (IMG_PVOID) ((IMG_UINT32)*ppvCpuVAddr)+TEST_BUFFER_PADDING_STATUS;
++
++#ifdef PVRSRV_LOG_MEMORY_ALLOCS
++
++ PVR_TRACE(("Allocated pointer (after debug info): 0x%X from %s:%d", *ppvCpuVAddr, pszFilename, ui32Line));
++#endif
++
++ return PVRSRV_OK;
++ }
++
++ PVRSRV_ERROR OSFreeMem_Debug_Wrapper(IMG_UINT32 ui32Flags,
++ IMG_UINT32 ui32Size,
++ IMG_PVOID pvCpuVAddr,
++ IMG_HANDLE hBlockAlloc,
++ IMG_CHAR *pszFilename,
++ IMG_UINT32 ui32Line)
++ {
++ OSMEM_DEBUG_INFO *psInfo;
++
++
++ OSCheckMemDebug(pvCpuVAddr, ui32Size, pszFilename, ui32Line);
++
++
++ OSMemSet(pvCpuVAddr, 0xBF, ui32Size + TEST_BUFFER_PADDING_AFTER);
++
++
++ psInfo = (OSMEM_DEBUG_INFO *)((IMG_UINT32) pvCpuVAddr - TEST_BUFFER_PADDING_STATUS);
++
++
++ psInfo->uSize = 0;
++ psInfo->uSizeParityCheck = 0;
++ psInfo->eValid = isFree;
++ psInfo->uLineNo = ui32Line;
++ debug_strcpy(psInfo->sFileName, pszFilename);
++
++ return OSFreeMem_Debug_Linux_Memory_Allocations(ui32Flags, ui32Size + TEST_BUFFER_PADDING, psInfo, hBlockAlloc, pszFilename, ui32Line);
++ }
++
++#if defined (__cplusplus)
++
++}
++#endif
++
++#endif
++
++#endif
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/metrics.c b/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/metrics.c
+new file mode 100644
+index 0000000..216696e
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/metrics.c
+@@ -0,0 +1,160 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "services_headers.h"
++#include "metrics.h"
++
++#if defined(SUPPORT_VGX)
++#include "vgxapi_km.h"
++#endif
++
++#if defined(SUPPORT_SGX)
++#include "sgxapi_km.h"
++#endif
++
++#if defined(DEBUG) || defined(TIMING)
++
++static volatile IMG_UINT32 *pui32TimerRegister = 0;
++
++#define PVRSRV_TIMER_TOTAL_IN_TICKS(X) asTimers[X].ui32Total
++#define PVRSRV_TIMER_TOTAL_IN_MS(X) ((1000*asTimers[X].ui32Total)/ui32TicksPerMS)
++#define PVRSRV_TIMER_COUNT(X) asTimers[X].ui32Count
++
++
++Temporal_Data asTimers[PVRSRV_NUM_TIMERS];
++
++
++IMG_UINT32 PVRSRVTimeNow(IMG_VOID)
++{
++ if (!pui32TimerRegister)
++ {
++ static IMG_BOOL bFirstTime = IMG_TRUE;
++
++ if (bFirstTime)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVTimeNow: No timer register set up"));
++
++ bFirstTime = IMG_FALSE;
++ }
++
++ return 0;
++ }
++
++#if defined(__sh__)
++
++ return (0xffffffff-*pui32TimerRegister);
++
++#else
++
++ return 0;
++
++#endif
++}
++
++
++static IMG_UINT32 PVRSRVGetCPUFreq(IMG_VOID)
++{
++ IMG_UINT32 ui32Time1, ui32Time2;
++
++ ui32Time1 = PVRSRVTimeNow();
++
++ OSWaitus(1000000);
++
++ ui32Time2 = PVRSRVTimeNow();
++
++ PVR_DPF((PVR_DBG_WARNING, "PVRSRVGetCPUFreq: timer frequency = %d Hz", ui32Time2 - ui32Time1));
++
++ return (ui32Time2 - ui32Time1);
++}
++
++
++IMG_VOID PVRSRVSetupMetricTimers(IMG_VOID *pvDevInfo)
++{
++ IMG_UINT32 ui32Loop;
++
++ PVR_UNREFERENCED_PARAMETER(pvDevInfo);
++
++ for(ui32Loop=0; ui32Loop < (PVRSRV_NUM_TIMERS); ui32Loop++)
++ {
++ asTimers[ui32Loop].ui32Total = 0;
++ asTimers[ui32Loop].ui32Count = 0;
++ }
++
++
++ #if defined(__sh__)
++
++
++
++
++
++ *TCR_2 = TIMER_DIVISOR;
++
++
++ *TCOR_2 = *TCNT_2 = (IMG_UINT)0xffffffff;
++
++
++ *TST_REG |= (IMG_UINT8)0x04;
++
++ pui32TimerRegister = (IMG_UINT32 *)TCNT_2;
++
++ #else
++
++ pui32TimerRegister = 0;
++
++ #endif
++
++}
++
++
++IMG_VOID PVRSRVOutputMetricTotals(IMG_VOID)
++{
++ IMG_UINT32 ui32TicksPerMS, ui32Loop;
++
++ ui32TicksPerMS = PVRSRVGetCPUFreq();
++
++ if (!ui32TicksPerMS)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVOutputMetricTotals: Failed to get CPU Freq"));
++ return;
++ }
++
++ for(ui32Loop=0; ui32Loop < (PVRSRV_NUM_TIMERS); ui32Loop++)
++ {
++ if (asTimers[ui32Loop].ui32Count & 0x80000000L)
++ {
++ PVR_DPF((PVR_DBG_WARNING,"PVRSRVOutputMetricTotals: Timer %u is still ON", ui32Loop));
++ }
++ }
++#if 0
++
++ PVR_DPF((PVR_DBG_ERROR," Timer(%u): Total = %u",PVRSRV_TIMER_EXAMPLE_1, PVRSRV_TIMER_TOTAL_IN_TICKS(PVRSRV_TIMER_EXAMPLE_1)));
++ PVR_DPF((PVR_DBG_ERROR," Timer(%u): Time = %ums",PVRSRV_TIMER_EXAMPLE_1, PVRSRV_TIMER_TOTAL_IN_MS(PVRSRV_TIMER_EXAMPLE_1)));
++ PVR_DPF((PVR_DBG_ERROR," Timer(%u): Count = %u",PVRSRV_TIMER_EXAMPLE_1, PVRSRV_TIMER_COUNT(PVRSRV_TIMER_EXAMPLE_1)));
++#endif
++}
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/pdump_common.c b/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/pdump_common.c
+new file mode 100644
+index 0000000..94bfc09
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/pdump_common.c
+@@ -0,0 +1,1723 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if defined(PDUMP)
++#include <stdarg.h>
++
++#include "services_headers.h"
++#if defined(SUPPORT_SGX)
++#include "sgxdefs.h"
++#include "sgxmmu.h"
++#endif
++#include "pdump_km.h"
++
++#if !defined(PDUMP_TEMP_BUFFER_SIZE)
++#define PDUMP_TEMP_BUFFER_SIZE (64 * 1024L)
++#endif
++
++#if 1
++#define PDUMP_DBG(a) PDumpOSDebugPrintf a
++#else
++#define PDUMP_DBG(a)
++#endif
++
++#define PDUMP_DATAMASTER_PIXEL (1)
++#define PDUMP_DATAMASTER_EDM (3)
++
++#define MIN(x, y) (((x) < (y)) ? (x) : (y))
++#define PTR_PLUS(t, p, x) ((t *)(((IMG_CHAR *)(p)) + (x)))
++#define VPTR_PLUS(p, x) PTR_PLUS(IMG_VOID, p, x)
++#define VPTR_INC(p, x) (p = VPTR_PLUS(p, x))
++#define MAX_PDUMP_MMU_CONTEXTS (32)
++static IMG_VOID *gpvTempBuffer = IMG_NULL;
++static IMG_HANDLE ghTempBufferBlockAlloc;
++static IMG_UINT16 gui16MMUContextUsage = 0;
++
++
++
++static IMG_VOID *GetTempBuffer(IMG_VOID)
++{
++
++ if (gpvTempBuffer == IMG_NULL)
++ {
++ PVRSRV_ERROR eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ PDUMP_TEMP_BUFFER_SIZE,
++ &gpvTempBuffer,
++ &ghTempBufferBlockAlloc,
++ "PDUMP Temporary Buffer");
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "GetTempBuffer: OSAllocMem failed: %d", eError));
++ }
++ }
++
++ return gpvTempBuffer;
++}
++
++static IMG_VOID FreeTempBuffer(IMG_VOID)
++{
++
++ if (gpvTempBuffer != IMG_NULL)
++ {
++ PVRSRV_ERROR eError = OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ PDUMP_TEMP_BUFFER_SIZE,
++ gpvTempBuffer,
++ ghTempBufferBlockAlloc);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "FreeTempBuffer: OSFreeMem failed: %d", eError));
++ }
++ else
++ {
++ gpvTempBuffer = IMG_NULL;
++ }
++ }
++}
++
++IMG_VOID PDumpInitCommon(IMG_VOID)
++{
++
++ (IMG_VOID) GetTempBuffer();
++
++
++ PDumpInit();
++}
++
++IMG_VOID PDumpDeInitCommon(IMG_VOID)
++{
++
++ FreeTempBuffer();
++
++
++ PDumpDeInit();
++}
++
++#if defined(SGX_SUPPORT_COMMON_PDUMP)
++
++IMG_BOOL PDumpIsSuspended(IMG_VOID)
++{
++ return PDumpOSIsSuspended();
++}
++
++PVRSRV_ERROR PDumpRegWithFlagsKM(IMG_UINT32 ui32Reg, IMG_UINT32 ui32Data, IMG_UINT32 ui32Flags)
++{
++ PVRSRV_ERROR eErr;
++ PDUMP_GET_SCRIPT_STRING()
++ PDUMP_DBG(("PDumpRegWithFlagsKM"));
++ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "WRW :SGXREG:0x%8.8lX 0x%8.8lX\r\n", ui32Reg, ui32Data);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, ui32Flags);
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpRegKM(IMG_UINT32 ui32Reg,IMG_UINT32 ui32Data)
++{
++ return PDumpRegWithFlagsKM(ui32Reg, ui32Data, PDUMP_FLAGS_CONTINUOUS);
++}
++
++PVRSRV_ERROR PDumpRegPolWithFlagsKM(IMG_UINT32 ui32RegAddr, IMG_UINT32 ui32RegValue, IMG_UINT32 ui32Mask, IMG_UINT32 ui32Flags)
++{
++
++ #define POLL_DELAY 1000UL
++ #define POLL_COUNT_LONG (2000000000UL / POLL_DELAY)
++ #define POLL_COUNT_SHORT (1000000UL / POLL_DELAY)
++
++ PVRSRV_ERROR eErr;
++ IMG_UINT32 ui32PollCount;
++
++ PDUMP_GET_SCRIPT_STRING();
++ PDUMP_DBG(("PDumpRegPolWithFlagsKM"));
++
++ if (((ui32RegAddr == EUR_CR_EVENT_STATUS) &&
++ (ui32RegValue & ui32Mask & EUR_CR_EVENT_STATUS_TA_FINISHED_MASK) != 0) ||
++ ((ui32RegAddr == EUR_CR_EVENT_STATUS) &&
++ (ui32RegValue & ui32Mask & EUR_CR_EVENT_STATUS_PIXELBE_END_RENDER_MASK) != 0) ||
++ ((ui32RegAddr == EUR_CR_EVENT_STATUS) &&
++ (ui32RegValue & ui32Mask & EUR_CR_EVENT_STATUS_DPM_3D_MEM_FREE_MASK) != 0))
++ {
++ ui32PollCount = POLL_COUNT_LONG;
++ }
++ else
++ {
++ ui32PollCount = POLL_COUNT_SHORT;
++ }
++
++
++ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "POL :SGXREG:0x%8.8lX 0x%8.8lX 0x%8.8lX %d %lu %d\r\n",
++ ui32RegAddr, ui32RegValue, ui32Mask, 0, ui32PollCount, POLL_DELAY);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, ui32Flags);
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR PDumpRegPolKM(IMG_UINT32 ui32RegAddr, IMG_UINT32 ui32RegValue, IMG_UINT32 ui32Mask)
++{
++ return PDumpRegPolWithFlagsKM(ui32RegAddr, ui32RegValue, ui32Mask, PDUMP_FLAGS_CONTINUOUS);
++}
++
++PVRSRV_ERROR PDumpMallocPages (PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_UINT32 ui32DevVAddr,
++ IMG_CPU_VIRTADDR pvLinAddr,
++ IMG_HANDLE hOSMemHandle,
++ IMG_UINT32 ui32NumBytes,
++ IMG_UINT32 ui32PageSize,
++ IMG_HANDLE hUniqueTag)
++{
++ PVRSRV_ERROR eErr;
++ IMG_PUINT8 pui8LinAddr;
++ IMG_UINT32 ui32Offset;
++ IMG_UINT32 ui32NumPages;
++ IMG_DEV_PHYADDR sDevPAddr;
++ IMG_UINT32 ui32Page;
++
++ PDUMP_GET_SCRIPT_STRING();
++
++#if defined(LINUX)
++ PVR_ASSERT(hOSMemHandle);
++#else
++
++ PVR_UNREFERENCED_PARAMETER(hOSMemHandle);
++ PVR_ASSERT(((IMG_UINT32) pvLinAddr & (SGX_MMU_PAGE_MASK)) == 0);
++#endif
++
++ PVR_ASSERT(((IMG_UINT32) ui32DevVAddr & (SGX_MMU_PAGE_MASK)) == 0);
++ PVR_ASSERT(((IMG_UINT32) ui32NumBytes & (SGX_MMU_PAGE_MASK)) == 0);
++
++
++
++ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "-- MALLOC :SGXMEM:VA_%8.8lX 0x%8.8lX %lu\r\n",
++ ui32DevVAddr, ui32NumBytes, ui32PageSize);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, PDUMP_FLAGS_CONTINUOUS);
++
++
++
++ pui8LinAddr = (IMG_PUINT8) pvLinAddr;
++ ui32Offset = 0;
++ ui32NumPages = ui32NumBytes / ui32PageSize;
++ while (ui32NumPages)
++ {
++ ui32NumPages--;
++
++
++ PDumpOSCPUVAddrToDevPAddr(eDeviceType,
++ hOSMemHandle,
++ ui32Offset,
++ pui8LinAddr,
++ ui32PageSize,
++ &sDevPAddr);
++ ui32Page = sDevPAddr.uiAddr / ui32PageSize;
++
++ pui8LinAddr += ui32PageSize;
++ ui32Offset += ui32PageSize;
++
++ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "MALLOC :SGXMEM:PA_%8.8lX%8.8lX %lu %lu 0x%8.8lX\r\n",
++ (IMG_UINT32) hUniqueTag,
++ ui32Page * ui32PageSize,
++ ui32PageSize,
++ ui32PageSize,
++ ui32Page * ui32PageSize);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, PDUMP_FLAGS_CONTINUOUS);
++ }
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpMallocPageTable (PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_CPU_VIRTADDR pvLinAddr,
++ IMG_UINT32 ui32PTSize,
++ IMG_HANDLE hUniqueTag)
++{
++ PVRSRV_ERROR eErr;
++ IMG_DEV_PHYADDR sDevPAddr;
++ IMG_UINT32 ui32Page;
++
++ PDUMP_GET_SCRIPT_STRING();
++
++ PVR_ASSERT(((IMG_UINT32) pvLinAddr & (ui32PTSize - 1)) == 0);
++
++
++
++ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "-- MALLOC :SGXMEM:PAGE_TABLE 0x%8.8lX %lu\r\n", ui32PTSize, SGX_MMU_PAGE_SIZE);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, PDUMP_FLAGS_CONTINUOUS);
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++ {
++
++
++ PDumpOSCPUVAddrToDevPAddr(eDeviceType,
++ IMG_NULL,
++ 0,
++ (IMG_PUINT8) pvLinAddr,
++ SGX_MMU_PAGE_SIZE,
++ &sDevPAddr);
++ ui32Page = sDevPAddr.uiAddr >> SGX_MMU_PAGE_SHIFT;
++
++ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "MALLOC :SGXMEM:PA_%8.8lX%8.8lX 0x%lX %lu 0x%8.8lX\r\n",
++ (IMG_UINT32) hUniqueTag,
++ ui32Page * SGX_MMU_PAGE_SIZE,
++ SGX_MMU_PAGE_SIZE,
++ SGX_MMU_PAGE_SIZE,
++ ui32Page * SGX_MMU_PAGE_SIZE);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, PDUMP_FLAGS_CONTINUOUS);
++
++ }
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpFreePages (BM_HEAP *psBMHeap,
++ IMG_DEV_VIRTADDR sDevVAddr,
++ IMG_UINT32 ui32NumBytes,
++ IMG_UINT32 ui32PageSize,
++ IMG_HANDLE hUniqueTag,
++ IMG_BOOL bInterleaved)
++{
++ PVRSRV_ERROR eErr;
++ IMG_UINT32 ui32NumPages, ui32PageCounter;
++ IMG_DEV_PHYADDR sDevPAddr;
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++
++ PDUMP_GET_SCRIPT_STRING();
++
++ PVR_ASSERT(((IMG_UINT32) sDevVAddr.uiAddr & (ui32PageSize - 1)) == 0);
++ PVR_ASSERT(((IMG_UINT32) ui32NumBytes & (ui32PageSize - 1)) == 0);
++
++
++
++ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "-- FREE :SGXMEM:VA_%8.8lX\r\n", sDevVAddr.uiAddr);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, PDUMP_FLAGS_CONTINUOUS);
++
++
++
++ ui32NumPages = ui32NumBytes / ui32PageSize;
++ psDeviceNode = psBMHeap->pBMContext->psDeviceNode;
++ for (ui32PageCounter = 0; ui32PageCounter < ui32NumPages; ui32PageCounter++)
++ {
++ if (!bInterleaved || (ui32PageCounter % 2) == 0)
++ {
++ sDevPAddr = psDeviceNode->pfnMMUGetPhysPageAddr(psBMHeap->pMMUHeap, sDevVAddr);
++ {
++ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "FREE :SGXMEM:PA_%8.8lX%8.8lX\r\n", (IMG_UINT32) hUniqueTag, sDevPAddr.uiAddr);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, PDUMP_FLAGS_CONTINUOUS);
++ }
++ }
++ else
++ {
++
++ }
++
++ sDevVAddr.uiAddr += ui32PageSize;
++ }
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpFreePageTable (PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_CPU_VIRTADDR pvLinAddr,
++ IMG_UINT32 ui32PTSize,
++ IMG_HANDLE hUniqueTag)
++{
++ PVRSRV_ERROR eErr;
++ IMG_DEV_PHYADDR sDevPAddr;
++ IMG_UINT32 ui32Page;
++
++ PDUMP_GET_SCRIPT_STRING();
++
++ PVR_UNREFERENCED_PARAMETER(ui32PTSize);
++
++
++ PVR_ASSERT(((IMG_UINT32) pvLinAddr & (ui32PTSize-1UL)) == 0);
++
++
++
++ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "-- FREE :SGXMEM:PAGE_TABLE\r\n");
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, PDUMP_FLAGS_CONTINUOUS);
++
++
++
++
++
++
++
++
++
++
++
++
++ {
++ PDumpOSCPUVAddrToDevPAddr(eDeviceType,
++ IMG_NULL,
++ 0,
++ (IMG_PUINT8) pvLinAddr,
++ SGX_MMU_PAGE_SIZE,
++ &sDevPAddr);
++ ui32Page = sDevPAddr.uiAddr >> SGX_MMU_PAGE_SHIFT;
++
++ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "FREE :SGXMEM:PA_%8.8lX%8.8lX\r\n", (IMG_UINT32) hUniqueTag, ui32Page * SGX_MMU_PAGE_SIZE);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, PDUMP_FLAGS_CONTINUOUS);
++ }
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpPDRegWithFlags(IMG_UINT32 ui32Reg,
++ IMG_UINT32 ui32Data,
++ IMG_UINT32 ui32Flags,
++ IMG_HANDLE hUniqueTag)
++{
++ PVRSRV_ERROR eErr;
++ PDUMP_GET_SCRIPT_STRING()
++
++
++
++#if defined(SGX_FEATURE_36BIT_MMU)
++ eErr = PDumpOSBufprintf(hScript, ui32MaxLen,
++ "WRW :SGXMEM:$1 :SGXMEM:PA_%8.8lX%8.8lX:0x0\r\n",
++ (IMG_UINT32)hUniqueTag,
++ (ui32Data & SGX_MMU_PDE_ADDR_MASK) << SGX_MMU_PDE_ADDR_ALIGNSHIFT);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, ui32Flags);
++ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "SHR :SGXMEM:$1 :SGXMEM:$1 0x4\r\n");
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, ui32Flags);
++ eErr = PDumpOSBufprintf(hScript, ui32MaxLen,
++ "WRW :SGXREG:0x%8.8lX: SGXMEM:$1\r\n",
++ ui32Reg);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, ui32Flags);
++#else
++ eErr = PDumpOSBufprintf(hScript,
++ ui32MaxLen,
++ "WRW :SGXREG:0x%8.8lX :SGXMEM:PA_%8.8lX%8.8lX:0x%8.8lX\r\n",
++ ui32Reg,
++ (IMG_UINT32) hUniqueTag,
++ (ui32Data & SGX_MMU_PDE_ADDR_MASK) << SGX_MMU_PDE_ADDR_ALIGNSHIFT,
++ ui32Data & ~SGX_MMU_PDE_ADDR_MASK);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, ui32Flags);
++#endif
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpPDReg (IMG_UINT32 ui32Reg,
++ IMG_UINT32 ui32Data,
++ IMG_HANDLE hUniqueTag)
++{
++ return PDumpPDRegWithFlags(ui32Reg, ui32Data, PDUMP_FLAGS_CONTINUOUS, hUniqueTag);
++}
++
++PVRSRV_ERROR PDumpMemPolKM(PVRSRV_KERNEL_MEM_INFO *psMemInfo,
++ IMG_UINT32 ui32Offset,
++ IMG_UINT32 ui32Value,
++ IMG_UINT32 ui32Mask,
++ PDUMP_POLL_OPERATOR eOperator,
++ IMG_UINT32 ui32Flags,
++ IMG_HANDLE hUniqueTag)
++{
++ #define MEMPOLL_DELAY (1000)
++ #define MEMPOLL_COUNT (2000000000 / MEMPOLL_DELAY)
++
++ PVRSRV_ERROR eErr;
++ IMG_UINT32 ui32PageOffset;
++ IMG_UINT8 *pui8LinAddr;
++ IMG_DEV_PHYADDR sDevPAddr;
++ IMG_DEV_VIRTADDR sDevVPageAddr;
++ PDUMP_GET_SCRIPT_STRING();
++
++
++ PVR_ASSERT((ui32Offset + sizeof(IMG_UINT32)) <= psMemInfo->ui32AllocSize);
++
++
++
++ eErr = PDumpOSBufprintf(hScript,
++ ui32MaxLen,
++ "-- POL :SGXMEM:VA_%8.8lX 0x%8.8lX 0x%8.8lX %d %d %d\r\n",
++ psMemInfo->sDevVAddr.uiAddr + ui32Offset,
++ ui32Value,
++ ui32Mask,
++ eOperator,
++ MEMPOLL_COUNT,
++ MEMPOLL_DELAY);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, ui32Flags);
++
++
++ pui8LinAddr = psMemInfo->pvLinAddrKM;
++
++
++ pui8LinAddr += ui32Offset;
++
++
++
++
++ PDumpOSCPUVAddrToPhysPages(psMemInfo->sMemBlk.hOSMemHandle,
++ ui32Offset,
++ pui8LinAddr,
++ &ui32PageOffset);
++
++
++ sDevVPageAddr.uiAddr = psMemInfo->sDevVAddr.uiAddr + ui32Offset - ui32PageOffset;
++
++ PVR_ASSERT((sDevVPageAddr.uiAddr & 0xFFF) == 0);
++
++
++ BM_GetPhysPageAddr(psMemInfo, sDevVPageAddr, &sDevPAddr);
++
++
++ sDevPAddr.uiAddr += ui32PageOffset;
++
++ eErr = PDumpOSBufprintf(hScript,
++ ui32MaxLen,
++ "POL :SGXMEM:PA_%8.8lX%8.8lX:0x%8.8lX 0x%8.8lX 0x%8.8lX %d %d %d\r\n",
++ (IMG_UINT32) hUniqueTag,
++ sDevPAddr.uiAddr & ~(SGX_MMU_PAGE_MASK),
++ sDevPAddr.uiAddr & (SGX_MMU_PAGE_MASK),
++ ui32Value,
++ ui32Mask,
++ eOperator,
++ MEMPOLL_COUNT,
++ MEMPOLL_DELAY);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, ui32Flags);
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpMemKM(IMG_PVOID pvAltLinAddr,
++ PVRSRV_KERNEL_MEM_INFO *psMemInfo,
++ IMG_UINT32 ui32Offset,
++ IMG_UINT32 ui32Bytes,
++ IMG_UINT32 ui32Flags,
++ IMG_HANDLE hUniqueTag)
++{
++ PVRSRV_ERROR eErr;
++ IMG_UINT32 ui32NumPages;
++ IMG_UINT32 ui32PageByteOffset;
++ IMG_UINT32 ui32BlockBytes;
++ IMG_UINT8* pui8LinAddr;
++ IMG_UINT8* pui8DataLinAddr = IMG_NULL;
++ IMG_DEV_VIRTADDR sDevVPageAddr;
++ IMG_DEV_VIRTADDR sDevVAddr;
++ IMG_DEV_PHYADDR sDevPAddr;
++ IMG_UINT32 ui32ParamOutPos;
++
++ PDUMP_GET_SCRIPT_AND_FILE_STRING();
++
++
++ PVR_ASSERT((ui32Offset + ui32Bytes) <= psMemInfo->ui32AllocSize);
++
++ if (!PDumpOSJTInitialised())
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ if (ui32Bytes == 0 || PDumpOSIsSuspended())
++ {
++ return PVRSRV_OK;
++ }
++
++
++ if(pvAltLinAddr)
++ {
++ pui8DataLinAddr = pvAltLinAddr;
++ }
++ else if(psMemInfo->pvLinAddrKM)
++ {
++ pui8DataLinAddr = (IMG_UINT8 *)psMemInfo->pvLinAddrKM + ui32Offset;
++ }
++ pui8LinAddr = (IMG_UINT8 *)psMemInfo->pvLinAddrKM;
++ sDevVAddr = psMemInfo->sDevVAddr;
++
++
++ sDevVAddr.uiAddr += ui32Offset;
++ pui8LinAddr += ui32Offset;
++
++ PVR_ASSERT(pui8DataLinAddr);
++
++ PDumpOSCheckForSplitting(PDumpOSGetStream(PDUMP_STREAM_PARAM2), ui32Bytes, ui32Flags);
++
++ ui32ParamOutPos = PDumpOSGetStreamOffset(PDUMP_STREAM_PARAM2);
++
++
++
++ if(!PDumpOSWriteString(PDumpOSGetStream(PDUMP_STREAM_PARAM2),
++ pui8DataLinAddr,
++ ui32Bytes,
++ ui32Flags))
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ if (PDumpOSGetParamFileNum() == 0)
++ {
++ eErr = PDumpOSSprintf(pszFileName, ui32MaxLenFileName, "%%0%%.prm");
++ }
++ else
++ {
++ eErr = PDumpOSSprintf(pszFileName, ui32MaxLenFileName, "%%0%%%lu.prm", PDumpOSGetParamFileNum());
++ }
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++
++
++
++ eErr = PDumpOSBufprintf(hScript,
++ ui32MaxLenScript,
++ "-- LDB :SGXMEM:VA_%8.8lX%8.8lX:0x%8.8lX 0x%8.8lX 0x%8.8lX %s\r\n",
++ (IMG_UINT32)hUniqueTag,
++ psMemInfo->sDevVAddr.uiAddr,
++ ui32Offset,
++ ui32Bytes,
++ ui32ParamOutPos,
++ pszFileName);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, ui32Flags);
++
++
++
++
++ PDumpOSCPUVAddrToPhysPages(psMemInfo->sMemBlk.hOSMemHandle,
++ ui32Offset,
++ pui8LinAddr,
++ &ui32PageByteOffset);
++ ui32NumPages = (ui32PageByteOffset + ui32Bytes + HOST_PAGESIZE() - 1) / HOST_PAGESIZE();
++
++ while(ui32NumPages)
++ {
++#if 0
++ IMG_UINT32 ui32BlockBytes = MIN(ui32BytesRemaining, PAGE_SIZE);
++ CpuPAddr = OSMemHandleToCpuPAddr(psMemInfo->sMemBlk.hOSMemHandle,
++ ui32CurrentOffset);
++#endif
++ ui32NumPages--;
++
++
++ sDevVPageAddr.uiAddr = sDevVAddr.uiAddr - ui32PageByteOffset;
++
++ PVR_ASSERT((sDevVPageAddr.uiAddr & 0xFFF) == 0);
++
++
++ BM_GetPhysPageAddr(psMemInfo, sDevVPageAddr, &sDevPAddr);
++
++
++ sDevPAddr.uiAddr += ui32PageByteOffset;
++#if 0
++ if(ui32PageByteOffset)
++ {
++ ui32BlockBytes =
++ MIN(ui32BytesRemaining, PAGE_ALIGN(CpuPAddr.uiAddr) - CpuPAddr.uiAddr);
++
++ ui32PageByteOffset = 0;
++ }
++#endif
++
++ if (ui32PageByteOffset + ui32Bytes > HOST_PAGESIZE())
++ {
++
++ ui32BlockBytes = HOST_PAGESIZE() - ui32PageByteOffset;
++ }
++ else
++ {
++
++ ui32BlockBytes = ui32Bytes;
++ }
++
++ eErr = PDumpOSBufprintf(hScript,
++ ui32MaxLenScript,
++ "LDB :SGXMEM:PA_%8.8lX%8.8lX:0x%8.8lX 0x%8.8lX 0x%8.8lX %s\r\n",
++ (IMG_UINT32) hUniqueTag,
++ sDevPAddr.uiAddr & ~(SGX_MMU_PAGE_MASK),
++ sDevPAddr.uiAddr & (SGX_MMU_PAGE_MASK),
++ ui32BlockBytes,
++ ui32ParamOutPos,
++ pszFileName);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, ui32Flags);
++
++
++
++
++ ui32PageByteOffset = 0;
++
++ ui32Bytes -= ui32BlockBytes;
++
++ sDevVAddr.uiAddr += ui32BlockBytes;
++
++ pui8LinAddr += ui32BlockBytes;
++
++ ui32ParamOutPos += ui32BlockBytes;
++ }
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpMem2KM(PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_CPU_VIRTADDR pvLinAddr,
++ IMG_UINT32 ui32Bytes,
++ IMG_UINT32 ui32Flags,
++ IMG_BOOL bInitialisePages,
++ IMG_HANDLE hUniqueTag1,
++ IMG_HANDLE hUniqueTag2)
++{
++ PVRSRV_ERROR eErr;
++ IMG_UINT32 ui32NumPages;
++ IMG_UINT32 ui32PageOffset;
++ IMG_UINT32 ui32BlockBytes;
++ IMG_UINT8* pui8LinAddr;
++ IMG_DEV_PHYADDR sDevPAddr;
++ IMG_CPU_PHYADDR sCpuPAddr;
++ IMG_UINT32 ui32Offset;
++ IMG_UINT32 ui32ParamOutPos;
++
++ PDUMP_GET_SCRIPT_AND_FILE_STRING();
++
++ if (!pvLinAddr || !PDumpOSJTInitialised())
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ if (PDumpOSIsSuspended())
++ {
++ return PVRSRV_OK;
++ }
++
++ PDumpOSCheckForSplitting(PDumpOSGetStream(PDUMP_STREAM_PARAM2), ui32Bytes, ui32Flags);
++
++ ui32ParamOutPos = PDumpOSGetStreamOffset(PDUMP_STREAM_PARAM2);
++
++ if (bInitialisePages)
++ {
++
++
++
++ if (!PDumpOSWriteString(PDumpOSGetStream(PDUMP_STREAM_PARAM2),
++ pvLinAddr,
++ ui32Bytes,
++ PDUMP_FLAGS_CONTINUOUS))
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ if (PDumpOSGetParamFileNum() == 0)
++ {
++ eErr = PDumpOSSprintf(pszFileName, ui32MaxLenFileName, "%%0%%.prm");
++ }
++ else
++ {
++ eErr = PDumpOSSprintf(pszFileName, ui32MaxLenFileName, "%%0%%%lu.prm", PDumpOSGetParamFileNum());
++ }
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ }
++
++
++
++
++ ui32PageOffset = (IMG_UINT32) pvLinAddr & (HOST_PAGESIZE() - 1);
++ ui32NumPages = (ui32PageOffset + ui32Bytes + HOST_PAGESIZE() - 1) / HOST_PAGESIZE();
++ pui8LinAddr = (IMG_UINT8*) pvLinAddr;
++
++ while (ui32NumPages)
++ {
++ ui32NumPages--;
++ sCpuPAddr = OSMapLinToCPUPhys(pui8LinAddr);
++ sDevPAddr = SysCpuPAddrToDevPAddr(eDeviceType, sCpuPAddr);
++
++
++ if (ui32PageOffset + ui32Bytes > HOST_PAGESIZE())
++ {
++
++ ui32BlockBytes = HOST_PAGESIZE() - ui32PageOffset;
++ }
++ else
++ {
++
++ ui32BlockBytes = ui32Bytes;
++ }
++
++
++
++ if (bInitialisePages)
++ {
++ eErr = PDumpOSBufprintf(hScript,
++ ui32MaxLenScript,
++ "LDB :SGXMEM:PA_%8.8lX%8.8lX:0x%8.8lX 0x%8.8lX 0x%8.8lX %s\r\n",
++ (IMG_UINT32) hUniqueTag1,
++ sDevPAddr.uiAddr & ~(SGX_MMU_PAGE_MASK),
++ sDevPAddr.uiAddr & (SGX_MMU_PAGE_MASK),
++ ui32BlockBytes,
++ ui32ParamOutPos,
++ pszFileName);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, PDUMP_FLAGS_CONTINUOUS);
++ }
++ else
++ {
++ for (ui32Offset = 0; ui32Offset < ui32BlockBytes; ui32Offset += sizeof(IMG_UINT32))
++ {
++ IMG_UINT32 ui32PTE = *((IMG_UINT32 *) (pui8LinAddr + ui32Offset));
++
++ if ((ui32PTE & SGX_MMU_PDE_ADDR_MASK) != 0)
++ {
++#if defined(SGX_FEATURE_36BIT_MMU)
++ eErr = PDumpOSBufprintf(hScript,
++ ui32MaxLenScript,
++ "WRW :SGXMEM:$1 :SGXMEM:PA_%8.8lX%8.8lX:0x0\r\n",
++ (IMG_UINT32)hUniqueTag2,
++ (ui32PTE & SGX_MMU_PDE_ADDR_MASK) << SGX_MMU_PTE_ADDR_ALIGNSHIFT);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, PDUMP_FLAGS_CONTINUOUS);
++ eErr = PDumpOSBufprintf(hScript, ui32MaxLenScript, "SHR :SGXMEM:$1 :SGXMEM:$1 0x4\r\n");
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, PDUMP_FLAGS_CONTINUOUS);
++ eErr = PDumpOSBufprintf(hScript, ui32MaxLenScript, "OR :SGXMEM:$1 :SGXMEM:$1 0x%8.8lX\r\n", ui32PTE & ~SGX_MMU_PDE_ADDR_MASK);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, PDUMP_FLAGS_CONTINUOUS);
++ eErr = PDumpOSBufprintf(hScript,
++ ui32MaxLenScript,
++ "WRW :SGXMEM:PA_%8.8lX%8.8lX:0x%8.8lX :SGXMEM:$1\r\n",
++ (IMG_UINT32)hUniqueTag1,
++ (sDevPAddr.uiAddr + ui32Offset) & ~(SGX_MMU_PAGE_MASK),
++ (sDevPAddr.uiAddr + ui32Offset) & (SGX_MMU_PAGE_MASK));
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, PDUMP_FLAGS_CONTINUOUS);
++#else
++ eErr = PDumpOSBufprintf(hScript,
++ ui32MaxLenScript,
++ "WRW :SGXMEM:PA_%8.8lX%8.8lX:0x%8.8lX :SGXMEM:PA_%8.8lX%8.8lX:0x%8.8lX\r\n",
++ (IMG_UINT32) hUniqueTag1,
++ (sDevPAddr.uiAddr + ui32Offset) & ~(SGX_MMU_PAGE_MASK),
++ (sDevPAddr.uiAddr + ui32Offset) & (SGX_MMU_PAGE_MASK),
++ (IMG_UINT32) hUniqueTag2,
++ (ui32PTE & SGX_MMU_PDE_ADDR_MASK) << SGX_MMU_PTE_ADDR_ALIGNSHIFT,
++ ui32PTE & ~SGX_MMU_PDE_ADDR_MASK);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++#endif
++ }
++ else
++ {
++ PVR_ASSERT((ui32PTE & SGX_MMU_PTE_VALID) == 0UL);
++ eErr = PDumpOSBufprintf(hScript,
++ ui32MaxLenScript,
++ "WRW :SGXMEM:PA_%8.8lX%8.8lX:0x%8.8lX 0x%8.8lX%8.8lX\r\n",
++ (IMG_UINT32) hUniqueTag1,
++ (sDevPAddr.uiAddr + ui32Offset) & ~(SGX_MMU_PAGE_MASK),
++ (sDevPAddr.uiAddr + ui32Offset) & (SGX_MMU_PAGE_MASK),
++ (ui32PTE << SGX_MMU_PTE_ADDR_ALIGNSHIFT),
++ (IMG_UINT32) hUniqueTag2);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ }
++ PDumpOSWriteString2(hScript, PDUMP_FLAGS_CONTINUOUS);
++ }
++ }
++
++
++
++
++ ui32PageOffset = 0;
++
++ ui32Bytes -= ui32BlockBytes;
++
++ pui8LinAddr += ui32BlockBytes;
++
++ ui32ParamOutPos += ui32BlockBytes;
++ }
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpPDDevPAddrKM(PVRSRV_KERNEL_MEM_INFO *psMemInfo,
++ IMG_UINT32 ui32Offset,
++ IMG_DEV_PHYADDR sPDDevPAddr,
++ IMG_HANDLE hUniqueTag1,
++ IMG_HANDLE hUniqueTag2)
++{
++ PVRSRV_ERROR eErr;
++ IMG_UINT32 ui32PageByteOffset;
++ IMG_DEV_VIRTADDR sDevVAddr;
++ IMG_DEV_VIRTADDR sDevVPageAddr;
++ IMG_DEV_PHYADDR sDevPAddr;
++
++ PDUMP_GET_SCRIPT_STRING();
++
++ if(!PDumpOSWriteString(PDumpOSGetStream(PDUMP_STREAM_PARAM2),
++ (IMG_UINT8 *)&sPDDevPAddr,
++ sizeof(IMG_DEV_PHYADDR),
++ PDUMP_FLAGS_CONTINUOUS))
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ sDevVAddr = psMemInfo->sDevVAddr;
++ ui32PageByteOffset = sDevVAddr.uiAddr & (SGX_MMU_PAGE_MASK);
++
++ sDevVPageAddr.uiAddr = sDevVAddr.uiAddr - ui32PageByteOffset;
++ PVR_ASSERT((sDevVPageAddr.uiAddr & 0xFFF) == 0);
++
++ BM_GetPhysPageAddr(psMemInfo, sDevVPageAddr, &sDevPAddr);
++ sDevPAddr.uiAddr += ui32PageByteOffset + ui32Offset;
++
++ if ((sPDDevPAddr.uiAddr & SGX_MMU_PDE_ADDR_MASK) != 0UL)
++ {
++#if defined(SGX_FEATURE_36BIT_MMU)
++ eErr = PDumpOSBufprintf(hScript,
++ ui32MaxLen,
++ "WRW :SGXMEM:$1 :SGXMEM:PA_%8.8lX%8.8lX:0x0\r\n",
++ (IMG_UINT32)hUniqueTag2,
++ sPDDevPAddr.uiAddr);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, PDUMP_FLAGS_CONTINUOUS);
++
++ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "AND :SGXMEM:$2 :SGXMEM:$1 0xFFFFFFFF\r\n");
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, PDUMP_FLAGS_CONTINUOUS);
++
++ eErr = PDumpOSBufprintf(hScript,
++ ui32MaxLen,
++ "WRW :SGXMEM:PA_%8.8lX%8.8lX:0x%8.8lX :SGXMEM:$2\r\n",
++ (IMG_UINT32)hUniqueTag1,
++ (sDevPAddr.uiAddr) & ~(SGX_MMU_PAGE_MASK),
++ (sDevPAddr.uiAddr) & (SGX_MMU_PAGE_MASK));
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, PDUMP_FLAGS_CONTINUOUS);
++
++ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "SHR :SGXMEM:$2 :SGXMEM:$1 0x20\r\n");
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, PDUMP_FLAGS_CONTINUOUS);
++
++ eErr = PDumpOSBufprintf(hScript,
++ ui32MaxLen,
++ "WRW :SGXMEM:PA_%8.8lX%8.8lX:0x%8.8lX :SGXMEM:$2\r\n",
++ (IMG_UINT32)hUniqueTag1,
++ (sDevPAddr.uiAddr + 4) & ~(SGX_MMU_PAGE_MASK),
++ (sDevPAddr.uiAddr + 4) & (SGX_MMU_PAGE_MASK));
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, PDUMP_FLAGS_CONTINUOUS);
++#else
++ eErr = PDumpOSBufprintf(hScript,
++ ui32MaxLen,
++ "WRW :SGXMEM:PA_%8.8lX%8.8lX:0x%8.8lX :SGXMEM:PA_%8.8lX%8.8lX:0x%8.8lX\r\n",
++ (IMG_UINT32) hUniqueTag1,
++ sDevPAddr.uiAddr & ~(SGX_MMU_PAGE_MASK),
++ sDevPAddr.uiAddr & (SGX_MMU_PAGE_MASK),
++ (IMG_UINT32) hUniqueTag2,
++ sPDDevPAddr.uiAddr & SGX_MMU_PDE_ADDR_MASK,
++ sPDDevPAddr.uiAddr & ~SGX_MMU_PDE_ADDR_MASK);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++#endif
++ }
++ else
++ {
++ PVR_ASSERT(!(sDevPAddr.uiAddr & SGX_MMU_PTE_VALID));
++ eErr = PDumpOSBufprintf(hScript,
++ ui32MaxLen,
++ "WRW :SGXMEM:PA_%8.8lX%8.8lX:0x%8.8lX 0x%8.8lX\r\n",
++ (IMG_UINT32) hUniqueTag1,
++ sDevPAddr.uiAddr & ~(SGX_MMU_PAGE_MASK),
++ sDevPAddr.uiAddr & (SGX_MMU_PAGE_MASK),
++ sPDDevPAddr.uiAddr);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ }
++ PDumpOSWriteString2(hScript, PDUMP_FLAGS_CONTINUOUS);
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpCommentKM(IMG_CHAR *pszComment, IMG_UINT32 ui32Flags)
++{
++ PVRSRV_ERROR eErr;
++ PDUMP_GET_MSG_STRING();
++ PDUMP_DBG(("PDumpCommentKM"));
++
++
++ if (!PDumpOSWriteString2("-- ", ui32Flags))
++ {
++ if(ui32Flags & PDUMP_FLAGS_CONTINUOUS)
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++ else
++ {
++ return PVRSRV_ERROR_CMD_NOT_PROCESSED;
++ }
++ }
++
++
++ eErr = PDumpOSBufprintf(hMsg, ui32MaxLen, "%s", pszComment);
++ if( (eErr != PVRSRV_OK) &&
++ (eErr != PVRSRV_ERROR_PDUMP_BUF_OVERFLOW))
++ {
++ return eErr;
++ }
++
++
++ PDumpOSVerifyLineEnding(hMsg, ui32MaxLen);
++ PDumpOSWriteString2(hMsg, ui32Flags);
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpCommentWithFlags(IMG_UINT32 ui32Flags, IMG_CHAR * pszFormat, ...)
++{
++ PVRSRV_ERROR eErr;
++ PDUMP_va_list ap;
++ PDUMP_GET_MSG_STRING();
++
++
++ PDUMP_va_start(ap, pszFormat);
++ eErr = PDumpOSVSprintf(hMsg, ui32MaxLen, pszFormat, ap);
++ PDUMP_va_end(ap);
++
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ return PDumpCommentKM(hMsg, ui32Flags);
++}
++
++PVRSRV_ERROR PDumpComment(IMG_CHAR *pszFormat, ...)
++{
++ PVRSRV_ERROR eErr;
++ PDUMP_va_list ap;
++ PDUMP_GET_MSG_STRING();
++
++
++ PDUMP_va_start(ap, pszFormat);
++ eErr = PDumpOSVSprintf(hMsg, ui32MaxLen, pszFormat, ap);
++ PDUMP_va_end(ap);
++
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ return PDumpCommentKM(hMsg, PDUMP_FLAGS_CONTINUOUS);
++}
++
++PVRSRV_ERROR PDumpDriverInfoKM(IMG_CHAR *pszString, IMG_UINT32 ui32Flags)
++{
++ PVRSRV_ERROR eErr;
++ IMG_UINT32 ui32MsgLen;
++ PDUMP_GET_MSG_STRING();
++
++
++ eErr = PDumpOSBufprintf(hMsg, ui32MaxLen, "%s", pszString);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++
++
++ PDumpOSVerifyLineEnding(hMsg, ui32MaxLen);
++ ui32MsgLen = PDumpOSBuflen(hMsg, ui32MaxLen);
++
++ if (!PDumpOSWriteString(PDumpOSGetStream(PDUMP_STREAM_DRIVERINFO),
++ (IMG_UINT8 *)hMsg,
++ ui32MsgLen,
++ ui32Flags))
++ {
++ if (ui32Flags & PDUMP_FLAGS_CONTINUOUS)
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++ else
++ {
++ return PVRSRV_ERROR_CMD_NOT_PROCESSED;
++ }
++ }
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpBitmapKM( IMG_CHAR *pszFileName,
++ IMG_UINT32 ui32FileOffset,
++ IMG_UINT32 ui32Width,
++ IMG_UINT32 ui32Height,
++ IMG_UINT32 ui32StrideInBytes,
++ IMG_DEV_VIRTADDR sDevBaseAddr,
++ IMG_UINT32 ui32Size,
++ PDUMP_PIXEL_FORMAT ePixelFormat,
++ PDUMP_MEM_FORMAT eMemFormat,
++ IMG_UINT32 ui32PDumpFlags)
++{
++ PVRSRV_ERROR eErr;
++ PDUMP_GET_SCRIPT_STRING();
++ PDumpCommentWithFlags(ui32PDumpFlags, "\r\n-- Dump bitmap of render\r\n");
++
++#if defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
++
++ eErr = PDumpOSBufprintf(hScript,
++ ui32MaxLen,
++ "SII %s %s.bin :SGXMEM:v%x:0x%08lX 0x%08lX 0x%08lX 0x%08X 0x%08lX 0x%08lX 0x%08lX 0x%08X\r\n",
++ pszFileName,
++ pszFileName,
++ PDUMP_DATAMASTER_PIXEL,
++ sDevBaseAddr.uiAddr,
++ ui32Size,
++ ui32FileOffset,
++ ePixelFormat,
++ ui32Width,
++ ui32Height,
++ ui32StrideInBytes,
++ eMemFormat);
++#else
++ eErr = PDumpOSBufprintf(hScript,
++ ui32MaxLen,
++ "SII %s %s.bin :SGXMEM:v:0x%08lX 0x%08lX 0x%08lX 0x%08X 0x%08lX 0x%08lX 0x%08lX 0x%08X\r\n",
++ pszFileName,
++ pszFileName,
++ sDevBaseAddr.uiAddr,
++ ui32Size,
++ ui32FileOffset,
++ ePixelFormat,
++ ui32Width,
++ ui32Height,
++ ui32StrideInBytes,
++ eMemFormat);
++#endif
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++
++ PDumpOSWriteString2( hScript, ui32PDumpFlags);
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpReadRegKM ( IMG_CHAR *pszFileName,
++ IMG_UINT32 ui32FileOffset,
++ IMG_UINT32 ui32Address,
++ IMG_UINT32 ui32Size,
++ IMG_UINT32 ui32PDumpFlags)
++{
++ PVRSRV_ERROR eErr;
++ PDUMP_GET_SCRIPT_STRING();
++
++ PVR_UNREFERENCED_PARAMETER(ui32Size);
++
++ eErr = PDumpOSBufprintf(hScript,
++ ui32MaxLen,
++ "SAB :SGXREG:0x%08lX 0x%08lX %s\r\n",
++ ui32Address,
++ ui32FileOffset,
++ pszFileName);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++
++ PDumpOSWriteString2( hScript, ui32PDumpFlags);
++
++ return PVRSRV_OK;
++}
++
++IMG_BOOL PDumpTestNextFrame(IMG_UINT32 ui32CurrentFrame)
++{
++ IMG_BOOL bFrameDumped;
++
++
++
++ (IMG_VOID) PDumpSetFrameKM(ui32CurrentFrame + 1);
++ bFrameDumped = PDumpIsCaptureFrameKM();
++ (IMG_VOID) PDumpSetFrameKM(ui32CurrentFrame);
++
++ return bFrameDumped;
++}
++
++static PVRSRV_ERROR PDumpSignatureRegister (IMG_CHAR *pszFileName,
++ IMG_UINT32 ui32Address,
++ IMG_UINT32 ui32Size,
++ IMG_UINT32 *pui32FileOffset,
++ IMG_UINT32 ui32Flags)
++{
++ PVRSRV_ERROR eErr;
++ PDUMP_GET_SCRIPT_STRING();
++
++ eErr = PDumpOSBufprintf(hScript,
++ ui32MaxLen,
++ "SAB :SGXREG:0x%08X 0x%08X %s\r\n",
++ ui32Address,
++ *pui32FileOffset,
++ pszFileName);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++
++ PDumpOSWriteString2(hScript, ui32Flags);
++ *pui32FileOffset += ui32Size;
++ return PVRSRV_OK;
++}
++
++static IMG_VOID PDumpRegisterRange(IMG_CHAR *pszFileName,
++ IMG_UINT32 *pui32Registers,
++ IMG_UINT32 ui32NumRegisters,
++ IMG_UINT32 *pui32FileOffset,
++ IMG_UINT32 ui32Size,
++ IMG_UINT32 ui32Flags)
++{
++ IMG_UINT32 i;
++ for (i = 0; i < ui32NumRegisters; i++)
++ {
++ PDumpSignatureRegister(pszFileName, pui32Registers[i], ui32Size, pui32FileOffset, ui32Flags);
++ }
++}
++
++PVRSRV_ERROR PDump3DSignatureRegisters(IMG_UINT32 ui32DumpFrameNum,
++ IMG_BOOL bLastFrame,
++ IMG_UINT32 *pui32Registers,
++ IMG_UINT32 ui32NumRegisters)
++{
++ PVRSRV_ERROR eErr;
++ IMG_UINT32 ui32FileOffset, ui32Flags;
++
++ PDUMP_GET_FILE_STRING();
++
++ ui32Flags = bLastFrame ? PDUMP_FLAGS_LASTFRAME : 0;
++ ui32FileOffset = 0;
++
++ PDumpCommentWithFlags(ui32Flags, "\r\n-- Dump 3D signature registers\r\n");
++ eErr = PDumpOSSprintf(pszFileName, ui32MaxLen, "out%lu_3d.sig", ui32DumpFrameNum);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++
++ PDumpRegisterRange(pszFileName, pui32Registers, ui32NumRegisters, &ui32FileOffset, sizeof(IMG_UINT32), ui32Flags);
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpTASignatureRegisters (IMG_UINT32 ui32DumpFrameNum,
++ IMG_UINT32 ui32TAKickCount,
++ IMG_BOOL bLastFrame,
++ IMG_UINT32 *pui32Registers,
++ IMG_UINT32 ui32NumRegisters)
++{
++ PVRSRV_ERROR eErr;
++ IMG_UINT32 ui32FileOffset, ui32Flags;
++
++ PDUMP_GET_FILE_STRING();
++
++ ui32Flags = bLastFrame ? PDUMP_FLAGS_LASTFRAME : 0;
++ ui32FileOffset = ui32TAKickCount * ui32NumRegisters * sizeof(IMG_UINT32);
++
++ PDumpCommentWithFlags(ui32Flags, "\r\n-- Dump TA signature registers\r\n");
++ eErr = PDumpOSSprintf(pszFileName, ui32MaxLen, "out%lu_ta.sig", ui32DumpFrameNum);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++
++ PDumpRegisterRange(pszFileName, pui32Registers, ui32NumRegisters, &ui32FileOffset, sizeof(IMG_UINT32), ui32Flags);
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpCounterRegisters (IMG_UINT32 ui32DumpFrameNum,
++ IMG_BOOL bLastFrame,
++ IMG_UINT32 *pui32Registers,
++ IMG_UINT32 ui32NumRegisters)
++{
++ PVRSRV_ERROR eErr;
++ IMG_UINT32 ui32FileOffset, ui32Flags;
++
++ PDUMP_GET_FILE_STRING();
++
++ ui32Flags = bLastFrame ? PDUMP_FLAGS_LASTFRAME : 0UL;
++ ui32FileOffset = 0UL;
++
++ PDumpCommentWithFlags(ui32Flags, "\r\n-- Dump counter registers\r\n");
++ eErr = PDumpOSSprintf(pszFileName, ui32MaxLen, "out%lu.perf", ui32DumpFrameNum);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++
++ PDumpRegisterRange(pszFileName, pui32Registers, ui32NumRegisters, &ui32FileOffset, sizeof(IMG_UINT32), ui32Flags);
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpRegRead(const IMG_UINT32 ui32RegOffset, IMG_UINT32 ui32Flags)
++{
++ PVRSRV_ERROR eErr;
++ PDUMP_GET_SCRIPT_STRING();
++
++ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "RDW :SGXREG:0x%lX\r\n", ui32RegOffset);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, ui32Flags);
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpCycleCountRegRead(const IMG_UINT32 ui32RegOffset, IMG_BOOL bLastFrame)
++{
++ PVRSRV_ERROR eErr;
++ PDUMP_GET_SCRIPT_STRING();
++
++ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "RDW :SGXREG:0x%lX\r\n", ui32RegOffset);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, bLastFrame ? PDUMP_FLAGS_LASTFRAME : 0);
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpHWPerfCBKM (IMG_CHAR *pszFileName,
++ IMG_UINT32 ui32FileOffset,
++ IMG_DEV_VIRTADDR sDevBaseAddr,
++ IMG_UINT32 ui32Size,
++ IMG_UINT32 ui32PDumpFlags)
++{
++ PVRSRV_ERROR eErr;
++ PDUMP_GET_SCRIPT_STRING();
++ PDumpCommentWithFlags(ui32PDumpFlags, "\r\n-- Dump Hardware Performance Circular Buffer\r\n");
++
++ eErr = PDumpOSBufprintf(hScript,
++ ui32MaxLen,
++#if defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
++ "SAB :SGXMEM:v%x:0x%08lX 0x%08lX 0x%08lX %s.bin\r\n",
++ PDUMP_DATAMASTER_EDM,
++#else
++ "SAB :SGXMEM:v:0x%08lX 0x%08lX 0x%08lX %s.bin\r\n",
++#endif
++ sDevBaseAddr.uiAddr,
++ ui32Size,
++ ui32FileOffset,
++ pszFileName);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++
++ PDumpOSWriteString2(hScript, ui32PDumpFlags);
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR PDumpCBP(PPVRSRV_KERNEL_MEM_INFO psROffMemInfo,
++ IMG_UINT32 ui32ROffOffset,
++ IMG_UINT32 ui32WPosVal,
++ IMG_UINT32 ui32PacketSize,
++ IMG_UINT32 ui32BufferSize,
++ IMG_UINT32 ui32Flags,
++ IMG_HANDLE hUniqueTag)
++{
++ PVRSRV_ERROR eErr;
++ IMG_UINT32 ui32PageOffset;
++ IMG_UINT8 *pui8LinAddr;
++ IMG_DEV_VIRTADDR sDevVAddr;
++ IMG_DEV_PHYADDR sDevPAddr;
++ IMG_DEV_VIRTADDR sDevVPageAddr;
++
++
++ PDUMP_GET_SCRIPT_STRING();
++
++
++ PVR_ASSERT((ui32ROffOffset + sizeof(IMG_UINT32)) <= psROffMemInfo->ui32AllocSize);
++
++ pui8LinAddr = psROffMemInfo->pvLinAddrKM;
++ sDevVAddr = psROffMemInfo->sDevVAddr;
++
++
++ pui8LinAddr += ui32ROffOffset;
++ sDevVAddr.uiAddr += ui32ROffOffset;
++
++
++
++
++
++
++ PDumpOSCPUVAddrToPhysPages(psROffMemInfo->sMemBlk.hOSMemHandle,
++ ui32ROffOffset,
++ pui8LinAddr,
++ &ui32PageOffset);
++
++
++ sDevVPageAddr.uiAddr = sDevVAddr.uiAddr - ui32PageOffset;
++
++ PVR_ASSERT((sDevVPageAddr.uiAddr & 0xFFF) == 0);
++
++
++ BM_GetPhysPageAddr(psROffMemInfo, sDevVPageAddr, &sDevPAddr);
++
++
++ sDevPAddr.uiAddr += ui32PageOffset;
++
++ eErr = PDumpOSBufprintf(hScript,
++ ui32MaxLen,
++ "CBP :SGXMEM:PA_%8.8lX%8.8lX:0x%8.8lX 0x%8.8lX 0x%8.8lX 0x%8.8lX\r\n",
++ (IMG_UINT32) hUniqueTag,
++ sDevPAddr.uiAddr & ~(SGX_MMU_PAGE_MASK),
++ sDevPAddr.uiAddr & (SGX_MMU_PAGE_MASK),
++ ui32WPosVal,
++ ui32PacketSize,
++ ui32BufferSize);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, ui32Flags);
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR PDumpIDLWithFlags(IMG_UINT32 ui32Clocks, IMG_UINT32 ui32Flags)
++{
++ PVRSRV_ERROR eErr;
++ PDUMP_GET_SCRIPT_STRING();
++ PDUMP_DBG(("PDumpIDLWithFlags"));
++
++ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "IDL %lu\r\n", ui32Clocks);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, ui32Flags);
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR PDumpIDL(IMG_UINT32 ui32Clocks)
++{
++ return PDumpIDLWithFlags(ui32Clocks, PDUMP_FLAGS_CONTINUOUS);
++}
++#endif
++
++
++PVRSRV_ERROR PDumpMemUM(PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_PVOID pvAltLinAddrUM,
++ IMG_PVOID pvLinAddrUM,
++ PVRSRV_KERNEL_MEM_INFO *psMemInfo,
++ IMG_UINT32 ui32Offset,
++ IMG_UINT32 ui32Bytes,
++ IMG_UINT32 ui32Flags,
++ IMG_HANDLE hUniqueTag)
++{
++ IMG_VOID *pvAddrUM;
++ IMG_VOID *pvAddrKM;
++ IMG_UINT32 ui32BytesDumped;
++ IMG_UINT32 ui32CurrentOffset;
++
++ if (psMemInfo->pvLinAddrKM != IMG_NULL && pvAltLinAddrUM == IMG_NULL)
++ {
++
++ return PDumpMemKM(IMG_NULL,
++ psMemInfo,
++ ui32Offset,
++ ui32Bytes,
++ ui32Flags,
++ hUniqueTag);
++ }
++
++ pvAddrUM = (pvAltLinAddrUM != IMG_NULL) ? pvAltLinAddrUM : ((pvLinAddrUM != IMG_NULL) ? VPTR_PLUS(pvLinAddrUM, ui32Offset) : IMG_NULL);
++
++ pvAddrKM = GetTempBuffer();
++
++
++ PVR_ASSERT(pvAddrUM != IMG_NULL && pvAddrKM != IMG_NULL);
++ if (pvAddrUM == IMG_NULL || pvAddrKM == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PDumpMemUM: Nothing to dump"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ if (ui32Bytes > PDUMP_TEMP_BUFFER_SIZE)
++ {
++ PDumpCommentWithFlags(ui32Flags, "Dumping 0x%8.8lx bytes of memory, in blocks of 0x%8.8lx bytes", ui32Bytes, (IMG_UINT32)PDUMP_TEMP_BUFFER_SIZE);
++ }
++
++ ui32CurrentOffset = ui32Offset;
++ for (ui32BytesDumped = 0; ui32BytesDumped < ui32Bytes;)
++ {
++ PVRSRV_ERROR eError;
++ IMG_UINT32 ui32BytesToDump = MIN(PDUMP_TEMP_BUFFER_SIZE, ui32Bytes - ui32BytesDumped);
++
++ eError = OSCopyFromUser(psPerProc,
++ pvAddrKM,
++ pvAddrUM,
++ ui32BytesToDump);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PDumpMemUM: OSCopyFromUser failed (%d), eError"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ eError = PDumpMemKM(pvAddrKM,
++ psMemInfo,
++ ui32CurrentOffset,
++ ui32BytesToDump,
++ ui32Flags,
++ hUniqueTag);
++
++ if (eError != PVRSRV_OK)
++ {
++
++ if (ui32BytesDumped != 0)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PDumpMemUM: PDumpMemKM failed (%d)", eError));
++ }
++ PVR_ASSERT(ui32BytesDumped == 0);
++ return eError;
++ }
++
++ VPTR_INC(pvAddrUM, ui32BytesToDump);
++ ui32CurrentOffset += ui32BytesToDump;
++ ui32BytesDumped += ui32BytesToDump;
++ }
++
++ return PVRSRV_OK;
++}
++
++
++static PVRSRV_ERROR _PdumpAllocMMUContext(IMG_UINT32 *pui32MMUContextID)
++{
++ IMG_UINT32 i;
++
++
++ for(i=0; i<MAX_PDUMP_MMU_CONTEXTS; i++)
++ {
++ if((gui16MMUContextUsage & (1U << i)) == 0)
++ {
++
++ gui16MMUContextUsage |= 1U << i;
++ *pui32MMUContextID = i;
++ return PVRSRV_OK;
++ }
++ }
++
++ PVR_DPF((PVR_DBG_ERROR, "_PdumpAllocMMUContext: no free MMU context ids"));
++
++ return PVRSRV_ERROR_GENERIC;
++}
++
++
++static PVRSRV_ERROR _PdumpFreeMMUContext(IMG_UINT32 ui32MMUContextID)
++{
++ if(ui32MMUContextID < MAX_PDUMP_MMU_CONTEXTS)
++ {
++
++ gui16MMUContextUsage &= ~(1U << ui32MMUContextID);
++ return PVRSRV_OK;
++ }
++
++ PVR_DPF((PVR_DBG_ERROR, "_PdumpFreeMMUContext: MMU context ids invalid"));
++
++ return PVRSRV_ERROR_GENERIC;
++}
++
++
++PVRSRV_ERROR PDumpSetMMUContext(PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_CHAR *pszMemSpace,
++ IMG_UINT32 *pui32MMUContextID,
++ IMG_UINT32 ui32MMUType,
++ IMG_HANDLE hUniqueTag1,
++ IMG_VOID *pvPDCPUAddr)
++{
++ IMG_UINT8 *pui8LinAddr = (IMG_UINT8 *)pvPDCPUAddr;
++ IMG_CPU_PHYADDR sCpuPAddr;
++ IMG_DEV_PHYADDR sDevPAddr;
++ IMG_UINT32 ui32MMUContextID;
++ PVRSRV_ERROR eError;
++
++ eError = _PdumpAllocMMUContext(&ui32MMUContextID);
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PDumpSetMMUContext: _PdumpAllocMMUContext failed: %d", eError));
++ return eError;
++ }
++
++
++ sCpuPAddr = OSMapLinToCPUPhys(pui8LinAddr);
++ sDevPAddr = SysCpuPAddrToDevPAddr(eDeviceType, sCpuPAddr);
++
++ sDevPAddr.uiAddr &= ~((PVRSRV_4K_PAGE_SIZE) -1);
++
++ PDumpComment("Set MMU Context\r\n");
++
++ PDumpComment("MMU :%s:v%d %d :%s:PA_%8.8lX%8.8lX\r\n",
++ pszMemSpace,
++ ui32MMUContextID,
++ ui32MMUType,
++ pszMemSpace,
++ hUniqueTag1,
++ sDevPAddr.uiAddr);
++
++
++ *pui32MMUContextID = ui32MMUContextID;
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR PDumpClearMMUContext(PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_CHAR *pszMemSpace,
++ IMG_UINT32 ui32MMUContextID,
++ IMG_UINT32 ui32MMUType)
++{
++ PVRSRV_ERROR eError;
++
++ PVR_UNREFERENCED_PARAMETER(eDeviceType);
++
++
++ PDumpComment("Clear MMU Context for memory space %s\r\n", pszMemSpace);
++
++ PDumpComment("MMU :%s:v%d %d\r\n",
++ pszMemSpace,
++ ui32MMUContextID,
++ ui32MMUType);
++
++ eError = _PdumpFreeMMUContext(ui32MMUContextID);
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PDumpClearMMUContext: _PdumpFreeMMUContext failed: %d", eError));
++ return eError;
++ }
++
++ return PVRSRV_OK;
++}
++
++#else
++#endif
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/perproc.c b/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/perproc.c
+new file mode 100644
+index 0000000..982c31f
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/perproc.c
+@@ -0,0 +1,283 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "services_headers.h"
++#include "resman.h"
++#include "handle.h"
++#include "perproc.h"
++#include "osperproc.h"
++
++#define HASH_TAB_INIT_SIZE 32
++
++static HASH_TABLE *psHashTab = IMG_NULL;
++
++static PVRSRV_ERROR FreePerProcessData(PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_ERROR eError;
++ IMG_UINTPTR_T uiPerProc;
++
++ PVR_ASSERT(psPerProc != IMG_NULL);
++
++ if (psPerProc == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "FreePerProcessData: invalid parameter"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ uiPerProc = HASH_Remove(psHashTab, (IMG_UINTPTR_T)psPerProc->ui32PID);
++ if (uiPerProc == 0)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "FreePerProcessData: Couldn't find process in per-process data hash table"));
++
++ PVR_ASSERT(psPerProc->ui32PID == 0);
++ }
++ else
++ {
++ PVR_ASSERT((PVRSRV_PER_PROCESS_DATA *)uiPerProc == psPerProc);
++ PVR_ASSERT(((PVRSRV_PER_PROCESS_DATA *)uiPerProc)->ui32PID == psPerProc->ui32PID);
++ }
++
++
++ if (psPerProc->psHandleBase != IMG_NULL)
++ {
++ eError = PVRSRVFreeHandleBase(psPerProc->psHandleBase);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "FreePerProcessData: Couldn't free handle base for process (%d)", eError));
++ return eError;
++ }
++ }
++
++
++ if (psPerProc->hPerProcData != IMG_NULL)
++ {
++ eError = PVRSRVReleaseHandle(KERNEL_HANDLE_BASE, psPerProc->hPerProcData, PVRSRV_HANDLE_TYPE_PERPROC_DATA);
++
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "FreePerProcessData: Couldn't release per-process data handle (%d)", eError));
++ return eError;
++ }
++ }
++
++
++ eError = OSPerProcessPrivateDataDeInit(psPerProc->hOsPrivateData);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "FreePerProcessData: OSPerProcessPrivateDataDeInit failed (%d)", eError));
++ return eError;
++ }
++
++ eError = OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(*psPerProc),
++ psPerProc,
++ psPerProc->hBlockAlloc);
++
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "FreePerProcessData: Couldn't free per-process data (%d)", eError));
++ return eError;
++ }
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_PER_PROCESS_DATA *PVRSRVPerProcessData(IMG_UINT32 ui32PID)
++{
++ PVRSRV_PER_PROCESS_DATA *psPerProc;
++
++ PVR_ASSERT(psHashTab != IMG_NULL);
++
++
++ psPerProc = (PVRSRV_PER_PROCESS_DATA *)HASH_Retrieve(psHashTab, (IMG_UINTPTR_T)ui32PID);
++ return psPerProc;
++}
++
++
++PVRSRV_ERROR PVRSRVPerProcessDataConnect(IMG_UINT32 ui32PID)
++{
++ PVRSRV_PER_PROCESS_DATA *psPerProc;
++ IMG_HANDLE hBlockAlloc;
++ PVRSRV_ERROR eError = PVRSRV_OK;
++
++ PVR_ASSERT(psHashTab != IMG_NULL);
++
++
++ psPerProc = (PVRSRV_PER_PROCESS_DATA *)HASH_Retrieve(psHashTab, (IMG_UINTPTR_T)ui32PID);
++
++ if (psPerProc == IMG_NULL)
++ {
++
++ eError = OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(*psPerProc),
++ (IMG_PVOID *)&psPerProc,
++ &hBlockAlloc,
++ "Per Process Data");
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVPerProcessDataConnect: Couldn't allocate per-process data (%d)", eError));
++ return eError;
++ }
++ OSMemSet(psPerProc, 0, sizeof(*psPerProc));
++ psPerProc->hBlockAlloc = hBlockAlloc;
++
++ if (!HASH_Insert(psHashTab, (IMG_UINTPTR_T)ui32PID, (IMG_UINTPTR_T)psPerProc))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVPerProcessDataConnect: Couldn't insert per-process data into hash table"));
++ eError = PVRSRV_ERROR_GENERIC;
++ goto failure;
++ }
++
++ psPerProc->ui32PID = ui32PID;
++ psPerProc->ui32RefCount = 0;
++
++
++ eError = OSPerProcessPrivateDataInit(&psPerProc->hOsPrivateData);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVPerProcessDataConnect: OSPerProcessPrivateDataInit failed (%d)", eError));
++ goto failure;
++ }
++
++
++ eError = PVRSRVAllocHandle(KERNEL_HANDLE_BASE,
++ &psPerProc->hPerProcData,
++ psPerProc,
++ PVRSRV_HANDLE_TYPE_PERPROC_DATA,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVPerProcessDataConnect: Couldn't allocate handle for per-process data (%d)", eError));
++ goto failure;
++ }
++
++
++ eError = PVRSRVAllocHandleBase(&psPerProc->psHandleBase);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVPerProcessDataConnect: Couldn't allocate handle base for process (%d)", eError));
++ goto failure;
++ }
++
++
++ eError = OSPerProcessSetHandleOptions(psPerProc->psHandleBase);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVPerProcessDataConnect: Couldn't set handle options (%d)", eError));
++ goto failure;
++ }
++
++
++ eError = PVRSRVResManConnect(psPerProc, &psPerProc->hResManContext);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVPerProcessDataConnect: Couldn't register with the resource manager"));
++ goto failure;
++ }
++ }
++
++ psPerProc->ui32RefCount++;
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "PVRSRVPerProcessDataConnect: Process 0x%x has ref-count %d",
++ ui32PID, psPerProc->ui32RefCount));
++
++ return eError;
++
++failure:
++ (IMG_VOID)FreePerProcessData(psPerProc);
++ return eError;
++}
++
++
++IMG_VOID PVRSRVPerProcessDataDisconnect(IMG_UINT32 ui32PID)
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_PER_PROCESS_DATA *psPerProc;
++
++ PVR_ASSERT(psHashTab != IMG_NULL);
++
++ psPerProc = (PVRSRV_PER_PROCESS_DATA *)HASH_Retrieve(psHashTab, (IMG_UINTPTR_T)ui32PID);
++ if (psPerProc == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVPerProcessDataDealloc: Couldn't locate per-process data for PID %u", ui32PID));
++ }
++ else
++ {
++ psPerProc->ui32RefCount--;
++ if (psPerProc->ui32RefCount == 0)
++ {
++ PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVPerProcessDataDisconnect: "
++ "Last close from process 0x%x received", ui32PID));
++
++
++ PVRSRVResManDisconnect(psPerProc->hResManContext, IMG_FALSE);
++
++
++ eError = FreePerProcessData(psPerProc);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVPerProcessDataDisconnect: Error freeing per-process data"));
++ }
++ }
++ }
++
++ eError = PVRSRVPurgeHandles(KERNEL_HANDLE_BASE);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVPerProcessDataDisconnect: Purge of global handle pool failed (%d)", eError));
++ }
++}
++
++
++PVRSRV_ERROR PVRSRVPerProcessDataInit(IMG_VOID)
++{
++ PVR_ASSERT(psHashTab == IMG_NULL);
++
++
++ psHashTab = HASH_Create(HASH_TAB_INIT_SIZE);
++ if (psHashTab == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVPerProcessDataInit: Couldn't create per-process data hash table"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PVRSRVPerProcessDataDeInit(IMG_VOID)
++{
++
++ if (psHashTab != IMG_NULL)
++ {
++
++ HASH_Delete(psHashTab);
++ psHashTab = IMG_NULL;
++ }
++
++ return PVRSRV_OK;
++}
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/power.c b/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/power.c
+new file mode 100644
+index 0000000..826aaa2
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/power.c
+@@ -0,0 +1,818 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "services_headers.h"
++#include "pdump_km.h"
++
++#include "lists.h"
++
++DECLARE_LIST_ANY_VA(PVRSRV_POWER_DEV);
++DECLARE_LIST_ANY_VA_2(PVRSRV_POWER_DEV, PVRSRV_ERROR, PVRSRV_OK);
++DECLARE_LIST_INSERT(PVRSRV_POWER_DEV);
++DECLARE_LIST_REMOVE(PVRSRV_POWER_DEV);
++
++IMG_VOID* MatchPowerDeviceIndex_AnyVaCb(PVRSRV_POWER_DEV *psPowerDev, va_list va);
++
++
++static IMG_BOOL gbInitServerRunning = IMG_FALSE;
++static IMG_BOOL gbInitServerRan = IMG_FALSE;
++static IMG_BOOL gbInitSuccessful = IMG_FALSE;
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVSetInitServerState(PVRSRV_INIT_SERVER_STATE eInitServerState, IMG_BOOL bState)
++{
++
++ switch(eInitServerState)
++ {
++ case PVRSRV_INIT_SERVER_RUNNING:
++ gbInitServerRunning = bState;
++ break;
++ case PVRSRV_INIT_SERVER_RAN:
++ gbInitServerRan = bState;
++ break;
++ case PVRSRV_INIT_SERVER_SUCCESSFUL:
++ gbInitSuccessful = bState;
++ break;
++ default:
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVSetInitServerState : Unknown state %lx", eInitServerState));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ return PVRSRV_OK;
++}
++
++IMG_EXPORT
++IMG_BOOL PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_STATE eInitServerState)
++{
++ IMG_BOOL bReturnVal;
++
++ switch(eInitServerState)
++ {
++ case PVRSRV_INIT_SERVER_RUNNING:
++ bReturnVal = gbInitServerRunning;
++ break;
++ case PVRSRV_INIT_SERVER_RAN:
++ bReturnVal = gbInitServerRan;
++ break;
++ case PVRSRV_INIT_SERVER_SUCCESSFUL:
++ bReturnVal = gbInitSuccessful;
++ break;
++ default:
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVGetInitServerState : Unknown state %lx", eInitServerState));
++ bReturnVal = IMG_FALSE;
++ }
++
++ return bReturnVal;
++}
++
++static IMG_BOOL _IsSystemStatePowered(PVRSRV_SYS_POWER_STATE eSystemPowerState)
++{
++ return (IMG_BOOL)(eSystemPowerState < PVRSRV_SYS_POWER_STATE_D2);
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVPowerLock(IMG_UINT32 ui32CallerID,
++ IMG_BOOL bSystemPowerEvent)
++{
++ PVRSRV_ERROR eError;
++ SYS_DATA *psSysData;
++#if !defined(SYS_NO_POWER_LOCK_TIMEOUT)
++ IMG_UINT32 ui32Timeout = 1000000;
++
++#if defined(SUPPORT_LMA)
++ ui32Timeout *= 60;
++#endif
++#endif
++ SysAcquireData(&psSysData);
++
++#if defined(SYS_CUSTOM_POWERLOCK_WRAP)
++ eError = SysPowerLockWrap(psSysData);
++ if (eError != PVRSRV_OK)
++ {
++ return eError;
++ }
++#endif
++ do
++ {
++ eError = OSLockResource(&psSysData->sPowerStateChangeResource,
++ ui32CallerID);
++ if (eError == PVRSRV_OK)
++ {
++ break;
++ }
++ else if (ui32CallerID == ISR_ID)
++ {
++
++
++ eError = PVRSRV_ERROR_RETRY;
++ break;
++ }
++
++ OSWaitus(1);
++#if defined(SYS_NO_POWER_LOCK_TIMEOUT)
++ } while (1);
++#else
++ ui32Timeout--;
++ } while (ui32Timeout > 0);
++#endif
++
++#if defined(SYS_CUSTOM_POWERLOCK_WRAP)
++ if (eError != PVRSRV_OK)
++ {
++ SysPowerLockUnwrap(psSysData);
++ }
++#endif
++ if ((eError == PVRSRV_OK) &&
++ !bSystemPowerEvent &&
++ !_IsSystemStatePowered(psSysData->eCurrentPowerState))
++ {
++
++ PVRSRVPowerUnlock(ui32CallerID);
++ eError = PVRSRV_ERROR_RETRY;
++ }
++
++ return eError;
++}
++
++
++IMG_EXPORT
++IMG_VOID PVRSRVPowerUnlock(IMG_UINT32 ui32CallerID)
++{
++ OSUnlockResource(&gpsSysData->sPowerStateChangeResource, ui32CallerID);
++#if defined(SYS_CUSTOM_POWERLOCK_WRAP)
++ SysPowerLockUnwrap(gpsSysData);
++#endif
++}
++
++
++PVRSRV_ERROR PVRSRVDevicePrePowerStateKM_AnyVaCb(PVRSRV_POWER_DEV *psPowerDevice, va_list va)
++{
++ PVRSRV_DEV_POWER_STATE eNewDevicePowerState;
++ PVRSRV_ERROR eError;
++
++
++ IMG_BOOL bAllDevices;
++ IMG_UINT32 ui32DeviceIndex;
++ PVRSRV_DEV_POWER_STATE eNewPowerState;
++
++
++ bAllDevices = va_arg(va, IMG_BOOL);
++ ui32DeviceIndex = va_arg(va, IMG_UINT32);
++ eNewPowerState = va_arg(va, PVRSRV_DEV_POWER_STATE);
++
++ if (bAllDevices || (ui32DeviceIndex == psPowerDevice->ui32DeviceIndex))
++ {
++ eNewDevicePowerState = (eNewPowerState == PVRSRV_DEV_POWER_STATE_DEFAULT) ?
++ psPowerDevice->eDefaultPowerState : eNewPowerState;
++
++ if (psPowerDevice->eCurrentPowerState != eNewDevicePowerState)
++ {
++ if (psPowerDevice->pfnPrePower != IMG_NULL)
++ {
++
++ eError = psPowerDevice->pfnPrePower(psPowerDevice->hDevCookie,
++ eNewDevicePowerState,
++ psPowerDevice->eCurrentPowerState);
++ if (eError != PVRSRV_OK)
++ {
++ return eError;
++ }
++ }
++
++
++ eError = SysDevicePrePowerState(psPowerDevice->ui32DeviceIndex,
++ eNewDevicePowerState,
++ psPowerDevice->eCurrentPowerState);
++ if (eError != PVRSRV_OK)
++ {
++ return eError;
++ }
++ }
++ }
++
++ return PVRSRV_OK;
++}
++
++static
++PVRSRV_ERROR PVRSRVDevicePrePowerStateKM(IMG_BOOL bAllDevices,
++ IMG_UINT32 ui32DeviceIndex,
++ PVRSRV_DEV_POWER_STATE eNewPowerState)
++{
++ PVRSRV_ERROR eError;
++ SYS_DATA *psSysData;
++
++ SysAcquireData(&psSysData);
++
++
++ eError = List_PVRSRV_POWER_DEV_PVRSRV_ERROR_Any_va(psSysData->psPowerDeviceList,
++ PVRSRVDevicePrePowerStateKM_AnyVaCb,
++ bAllDevices,
++ ui32DeviceIndex,
++ eNewPowerState);
++
++ return eError;
++}
++
++PVRSRV_ERROR PVRSRVDevicePostPowerStateKM_AnyVaCb(PVRSRV_POWER_DEV *psPowerDevice, va_list va)
++{
++ PVRSRV_DEV_POWER_STATE eNewDevicePowerState;
++ PVRSRV_ERROR eError;
++
++
++ IMG_BOOL bAllDevices;
++ IMG_UINT32 ui32DeviceIndex;
++ PVRSRV_DEV_POWER_STATE eNewPowerState;
++
++
++ bAllDevices = va_arg(va, IMG_BOOL);
++ ui32DeviceIndex = va_arg(va, IMG_UINT32);
++ eNewPowerState = va_arg(va, PVRSRV_DEV_POWER_STATE);
++
++ if (bAllDevices || (ui32DeviceIndex == psPowerDevice->ui32DeviceIndex))
++ {
++ eNewDevicePowerState = (eNewPowerState == PVRSRV_DEV_POWER_STATE_DEFAULT) ?
++ psPowerDevice->eDefaultPowerState : eNewPowerState;
++
++ if (psPowerDevice->eCurrentPowerState != eNewDevicePowerState)
++ {
++
++ eError = SysDevicePostPowerState(psPowerDevice->ui32DeviceIndex,
++ eNewDevicePowerState,
++ psPowerDevice->eCurrentPowerState);
++ if (eError != PVRSRV_OK)
++ {
++ return eError;
++ }
++
++ if (psPowerDevice->pfnPostPower != IMG_NULL)
++ {
++
++ eError = psPowerDevice->pfnPostPower(psPowerDevice->hDevCookie,
++ eNewDevicePowerState,
++ psPowerDevice->eCurrentPowerState);
++ if (eError != PVRSRV_OK)
++ {
++ return eError;
++ }
++ }
++
++ psPowerDevice->eCurrentPowerState = eNewDevicePowerState;
++ }
++ }
++ return PVRSRV_OK;
++}
++
++static
++PVRSRV_ERROR PVRSRVDevicePostPowerStateKM(IMG_BOOL bAllDevices,
++ IMG_UINT32 ui32DeviceIndex,
++ PVRSRV_DEV_POWER_STATE eNewPowerState)
++{
++ PVRSRV_ERROR eError;
++ SYS_DATA *psSysData;
++
++ SysAcquireData(&psSysData);
++
++
++ eError = List_PVRSRV_POWER_DEV_PVRSRV_ERROR_Any_va(psSysData->psPowerDeviceList,
++ PVRSRVDevicePostPowerStateKM_AnyVaCb,
++ bAllDevices,
++ ui32DeviceIndex,
++ eNewPowerState);
++
++ return eError;
++}
++
++
++PVRSRV_ERROR PVRSRVSetDevicePowerStateCoreKM(IMG_UINT32 ui32DeviceIndex,
++ PVRSRV_DEV_POWER_STATE eNewPowerState)
++{
++ PVRSRV_ERROR eError;
++ eError = PVRSRVDevicePrePowerStateKM(IMG_FALSE, ui32DeviceIndex, eNewPowerState);
++ if(eError != PVRSRV_OK)
++ {
++ return eError;
++ }
++
++ eError = PVRSRVDevicePostPowerStateKM(IMG_FALSE, ui32DeviceIndex, eNewPowerState);
++ return eError;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVSetDevicePowerStateKM(IMG_UINT32 ui32DeviceIndex,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ IMG_UINT32 ui32CallerID,
++ IMG_BOOL bRetainMutex)
++{
++ PVRSRV_ERROR eError;
++ SYS_DATA *psSysData;
++
++ SysAcquireData(&psSysData);
++
++ eError = PVRSRVPowerLock(ui32CallerID, IMG_FALSE);
++ if(eError != PVRSRV_OK)
++ {
++ return eError;
++ }
++
++ #if defined(PDUMP)
++ if (eNewPowerState == PVRSRV_DEV_POWER_STATE_DEFAULT)
++ {
++
++
++
++
++ eError = PVRSRVDevicePrePowerStateKM(IMG_FALSE, ui32DeviceIndex, PVRSRV_DEV_POWER_STATE_ON);
++ if(eError != PVRSRV_OK)
++ {
++ goto Exit;
++ }
++
++ eError = PVRSRVDevicePostPowerStateKM(IMG_FALSE, ui32DeviceIndex, PVRSRV_DEV_POWER_STATE_ON);
++
++ if (eError != PVRSRV_OK)
++ {
++ goto Exit;
++ }
++
++ PDUMPSUSPEND();
++ }
++ #endif
++
++ eError = PVRSRVDevicePrePowerStateKM(IMG_FALSE, ui32DeviceIndex, eNewPowerState);
++ if(eError != PVRSRV_OK)
++ {
++ if (eNewPowerState == PVRSRV_DEV_POWER_STATE_DEFAULT)
++ {
++ PDUMPRESUME();
++ }
++ goto Exit;
++ }
++
++ eError = PVRSRVDevicePostPowerStateKM(IMG_FALSE, ui32DeviceIndex, eNewPowerState);
++
++ if (eNewPowerState == PVRSRV_DEV_POWER_STATE_DEFAULT)
++ {
++ PDUMPRESUME();
++ }
++
++Exit:
++
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVSetDevicePowerStateKM : Transition to %d FAILED 0x%x", eNewPowerState, eError));
++ }
++
++ if (!bRetainMutex || (eError != PVRSRV_OK))
++ {
++ PVRSRVPowerUnlock(ui32CallerID);
++ }
++
++ return eError;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVSystemPrePowerStateKM(PVRSRV_SYS_POWER_STATE eNewSysPowerState)
++{
++ PVRSRV_ERROR eError;
++ SYS_DATA *psSysData;
++ PVRSRV_DEV_POWER_STATE eNewDevicePowerState;
++
++ SysAcquireData(&psSysData);
++
++
++ eError = PVRSRVPowerLock(KERNEL_ID, IMG_TRUE);
++ if(eError != PVRSRV_OK)
++ {
++ return eError;
++ }
++
++ if (_IsSystemStatePowered(eNewSysPowerState) !=
++ _IsSystemStatePowered(psSysData->eCurrentPowerState))
++ {
++ if (_IsSystemStatePowered(eNewSysPowerState))
++ {
++
++ eNewDevicePowerState = PVRSRV_DEV_POWER_STATE_DEFAULT;
++ }
++ else
++ {
++ eNewDevicePowerState = PVRSRV_DEV_POWER_STATE_OFF;
++ }
++
++
++ eError = PVRSRVDevicePrePowerStateKM(IMG_TRUE, 0, eNewDevicePowerState);
++ if (eError != PVRSRV_OK)
++ {
++ goto ErrorExit;
++ }
++ }
++
++ if (eNewSysPowerState != psSysData->eCurrentPowerState)
++ {
++
++ eError = SysSystemPrePowerState(eNewSysPowerState);
++ if (eError != PVRSRV_OK)
++ {
++ goto ErrorExit;
++ }
++ }
++
++ return eError;
++
++ErrorExit:
++
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVSystemPrePowerStateKM: Transition from %d to %d FAILED 0x%x",
++ psSysData->eCurrentPowerState, eNewSysPowerState, eError));
++
++
++ psSysData->eFailedPowerState = eNewSysPowerState;
++
++ PVRSRVPowerUnlock(KERNEL_ID);
++
++ return eError;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVSystemPostPowerStateKM(PVRSRV_SYS_POWER_STATE eNewSysPowerState)
++{
++ PVRSRV_ERROR eError = PVRSRV_OK;
++ SYS_DATA *psSysData;
++ PVRSRV_DEV_POWER_STATE eNewDevicePowerState;
++
++ SysAcquireData(&psSysData);
++
++ if (eNewSysPowerState != psSysData->eCurrentPowerState)
++ {
++
++ eError = SysSystemPostPowerState(eNewSysPowerState);
++ if (eError != PVRSRV_OK)
++ {
++ goto Exit;
++ }
++ }
++
++ if (_IsSystemStatePowered(eNewSysPowerState) !=
++ _IsSystemStatePowered(psSysData->eCurrentPowerState))
++ {
++ if (_IsSystemStatePowered(eNewSysPowerState))
++ {
++
++ eNewDevicePowerState = PVRSRV_DEV_POWER_STATE_DEFAULT;
++ }
++ else
++ {
++ eNewDevicePowerState = PVRSRV_DEV_POWER_STATE_OFF;
++ }
++
++
++ eError = PVRSRVDevicePostPowerStateKM(IMG_TRUE, 0, eNewDevicePowerState);
++ if (eError != PVRSRV_OK)
++ {
++ goto Exit;
++ }
++ }
++
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "PVRSRVSystemPostPowerStateKM: System Power Transition from %d to %d OK",
++ psSysData->eCurrentPowerState, eNewSysPowerState));
++
++ psSysData->eCurrentPowerState = eNewSysPowerState;
++
++Exit:
++
++ PVRSRVPowerUnlock(KERNEL_ID);
++
++ if (_IsSystemStatePowered(eNewSysPowerState) &&
++ PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_SUCCESSFUL))
++ {
++
++
++
++ PVRSRVCommandCompleteCallbacks();
++ }
++
++ return eError;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVSetPowerStateKM(PVRSRV_SYS_POWER_STATE eNewSysPowerState)
++{
++ PVRSRV_ERROR eError;
++ SYS_DATA *psSysData;
++
++ SysAcquireData(&psSysData);
++
++ eError = PVRSRVSystemPrePowerStateKM(eNewSysPowerState);
++ if(eError != PVRSRV_OK)
++ {
++ goto ErrorExit;
++ }
++
++ eError = PVRSRVSystemPostPowerStateKM(eNewSysPowerState);
++ if(eError != PVRSRV_OK)
++ {
++ goto ErrorExit;
++ }
++
++
++ psSysData->eFailedPowerState = PVRSRV_SYS_POWER_STATE_Unspecified;
++
++ return PVRSRV_OK;
++
++ErrorExit:
++
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVSetPowerStateKM: Transition from %d to %d FAILED 0x%x",
++ psSysData->eCurrentPowerState, eNewSysPowerState, eError));
++
++
++ psSysData->eFailedPowerState = eNewSysPowerState;
++
++ return eError;
++}
++
++
++PVRSRV_ERROR PVRSRVRegisterPowerDevice(IMG_UINT32 ui32DeviceIndex,
++ PFN_PRE_POWER pfnPrePower,
++ PFN_POST_POWER pfnPostPower,
++ PFN_PRE_CLOCKSPEED_CHANGE pfnPreClockSpeedChange,
++ PFN_POST_CLOCKSPEED_CHANGE pfnPostClockSpeedChange,
++ IMG_HANDLE hDevCookie,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState,
++ PVRSRV_DEV_POWER_STATE eDefaultPowerState)
++{
++ PVRSRV_ERROR eError;
++ SYS_DATA *psSysData;
++ PVRSRV_POWER_DEV *psPowerDevice;
++
++ if (pfnPrePower == IMG_NULL &&
++ pfnPostPower == IMG_NULL)
++ {
++ return PVRSRVRemovePowerDevice(ui32DeviceIndex);
++ }
++
++ SysAcquireData(&psSysData);
++
++ eError = OSAllocMem( PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(PVRSRV_POWER_DEV),
++ (IMG_VOID **)&psPowerDevice, IMG_NULL,
++ "Power Device");
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterPowerDevice: Failed to alloc PVRSRV_POWER_DEV"));
++ return eError;
++ }
++
++
++ psPowerDevice->pfnPrePower = pfnPrePower;
++ psPowerDevice->pfnPostPower = pfnPostPower;
++ psPowerDevice->pfnPreClockSpeedChange = pfnPreClockSpeedChange;
++ psPowerDevice->pfnPostClockSpeedChange = pfnPostClockSpeedChange;
++ psPowerDevice->hDevCookie = hDevCookie;
++ psPowerDevice->ui32DeviceIndex = ui32DeviceIndex;
++ psPowerDevice->eCurrentPowerState = eCurrentPowerState;
++ psPowerDevice->eDefaultPowerState = eDefaultPowerState;
++
++
++ List_PVRSRV_POWER_DEV_Insert(&(psSysData->psPowerDeviceList), psPowerDevice);
++
++ return (PVRSRV_OK);
++}
++
++
++PVRSRV_ERROR PVRSRVRemovePowerDevice (IMG_UINT32 ui32DeviceIndex)
++{
++ SYS_DATA *psSysData;
++ PVRSRV_POWER_DEV *psPowerDev;
++
++ SysAcquireData(&psSysData);
++
++
++ psPowerDev = (PVRSRV_POWER_DEV*)
++ List_PVRSRV_POWER_DEV_Any_va(psSysData->psPowerDeviceList,
++ MatchPowerDeviceIndex_AnyVaCb,
++ ui32DeviceIndex);
++
++ if (psPowerDev)
++ {
++ List_PVRSRV_POWER_DEV_Remove(psPowerDev);
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_POWER_DEV), psPowerDev, IMG_NULL);
++
++ }
++
++ return (PVRSRV_OK);
++}
++
++
++IMG_EXPORT
++IMG_BOOL PVRSRVIsDevicePowered(IMG_UINT32 ui32DeviceIndex)
++{
++ SYS_DATA *psSysData;
++ PVRSRV_POWER_DEV *psPowerDevice;
++
++ SysAcquireData(&psSysData);
++
++ if (OSIsResourceLocked(&psSysData->sPowerStateChangeResource, KERNEL_ID) ||
++ OSIsResourceLocked(&psSysData->sPowerStateChangeResource, ISR_ID))
++ {
++ return IMG_FALSE;
++ }
++
++ psPowerDevice = (PVRSRV_POWER_DEV*)
++ List_PVRSRV_POWER_DEV_Any_va(psSysData->psPowerDeviceList,
++ MatchPowerDeviceIndex_AnyVaCb,
++ ui32DeviceIndex);
++ return (psPowerDevice && (psPowerDevice->eCurrentPowerState == PVRSRV_DEV_POWER_STATE_ON))
++ ? IMG_TRUE : IMG_FALSE;
++}
++
++
++PVRSRV_ERROR PVRSRVDevicePreClockSpeedChange(IMG_UINT32 ui32DeviceIndex,
++ IMG_BOOL bIdleDevice,
++ IMG_VOID *pvInfo)
++{
++ PVRSRV_ERROR eError = PVRSRV_OK;
++ SYS_DATA *psSysData;
++ PVRSRV_POWER_DEV *psPowerDevice;
++
++ PVR_UNREFERENCED_PARAMETER(pvInfo);
++
++ SysAcquireData(&psSysData);
++
++ if (bIdleDevice)
++ {
++
++ eError = PVRSRVPowerLock(KERNEL_ID, IMG_FALSE);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVDevicePreClockSpeedChange : failed to acquire lock, error:0x%lx", eError));
++ return eError;
++ }
++ }
++
++
++ psPowerDevice = (PVRSRV_POWER_DEV*)
++ List_PVRSRV_POWER_DEV_Any_va(psSysData->psPowerDeviceList,
++ MatchPowerDeviceIndex_AnyVaCb,
++ ui32DeviceIndex);
++
++ if (psPowerDevice && psPowerDevice->pfnPostClockSpeedChange)
++ {
++ eError = psPowerDevice->pfnPreClockSpeedChange(psPowerDevice->hDevCookie,
++ bIdleDevice,
++ psPowerDevice->eCurrentPowerState);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVDevicePreClockSpeedChange : Device %lu failed, error:0x%lx",
++ ui32DeviceIndex, eError));
++ }
++ }
++
++ if (bIdleDevice && eError != PVRSRV_OK)
++ {
++ PVRSRVPowerUnlock(KERNEL_ID);
++ }
++
++ return eError;
++}
++
++
++IMG_VOID PVRSRVDevicePostClockSpeedChange(IMG_UINT32 ui32DeviceIndex,
++ IMG_BOOL bIdleDevice,
++ IMG_VOID *pvInfo)
++{
++ PVRSRV_ERROR eError;
++ SYS_DATA *psSysData;
++ PVRSRV_POWER_DEV *psPowerDevice;
++
++ PVR_UNREFERENCED_PARAMETER(pvInfo);
++
++ SysAcquireData(&psSysData);
++
++
++ psPowerDevice = (PVRSRV_POWER_DEV*)
++ List_PVRSRV_POWER_DEV_Any_va(psSysData->psPowerDeviceList,
++ MatchPowerDeviceIndex_AnyVaCb,
++ ui32DeviceIndex);
++
++ if (psPowerDevice && psPowerDevice->pfnPostClockSpeedChange)
++ {
++ eError = psPowerDevice->pfnPostClockSpeedChange(psPowerDevice->hDevCookie,
++ bIdleDevice,
++ psPowerDevice->eCurrentPowerState);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVDevicePostClockSpeedChange : Device %lu failed, error:0x%lx",
++ ui32DeviceIndex, eError));
++ }
++ }
++
++
++ if (bIdleDevice)
++ {
++
++ PVRSRVPowerUnlock(KERNEL_ID);
++ }
++}
++
++
++/*
++ * PVRSRVPowerOnSystemWithDevice
++ *
++ * Description: Power on the System if it is off, but instead of powering all
++ * of the devices to their "default" state, only turn on the specified
++ * device index.
++ */
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVPowerOnSystemWithDevice(IMG_UINT32 ui32DeviceIndex,
++ IMG_UINT32 ui32CallerID,
++ IMG_BOOL bRetainMutex)
++{
++ PVRSRV_ERROR eError;
++ SYS_DATA *psSysData;
++
++ SysAcquireData(&psSysData);
++
++ eError = PVRSRVPowerLock(ui32CallerID, IMG_TRUE);
++ if(eError != PVRSRV_OK)
++ {
++ return eError;
++ }
++
++ eError = PVRSRVDevicePrePowerStateKM(IMG_FALSE, ui32DeviceIndex, PVRSRV_DEV_POWER_STATE_ON);
++ if (eError != PVRSRV_OK)
++ {
++ goto ErrorExit;
++ }
++
++ if (!_IsSystemStatePowered(psSysData->eCurrentPowerState))
++ {
++ eError = SysSystemPrePowerState(PVRSRV_SYS_POWER_STATE_D0);
++ if (eError != PVRSRV_OK)
++ {
++ goto ErrorExit;
++ }
++
++ eError = SysSystemPostPowerState(PVRSRV_SYS_POWER_STATE_D0);
++ if (eError != PVRSRV_OK)
++ {
++ goto ErrorExit;
++ }
++ psSysData->eCurrentPowerState = PVRSRV_SYS_POWER_STATE_D0;
++ }
++
++ eError = PVRSRVDevicePostPowerStateKM(IMG_FALSE, ui32DeviceIndex, PVRSRV_DEV_POWER_STATE_ON);
++ if (eError != PVRSRV_OK)
++ {
++ goto ErrorExit;
++ }
++
++ErrorExit:
++
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVPowerOnSystemWithDevice : FAILED 0x%x", eError));
++ }
++
++ if (!bRetainMutex || (eError != PVRSRV_OK))
++ {
++ PVRSRVPowerUnlock(ui32CallerID);
++ }
++
++ return eError;
++}
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/pvrsrv.c b/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/pvrsrv.c
+new file mode 100644
+index 0000000..2c33fce
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/pvrsrv.c
+@@ -0,0 +1,1195 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "services_headers.h"
++#include "buffer_manager.h"
++#include "handle.h"
++#include "perproc.h"
++#include "pdump_km.h"
++#include "ra.h"
++
++#include "pvrversion.h"
++
++#include "lists.h"
++
++DECLARE_LIST_ANY_VA_2(BM_CONTEXT, PVRSRV_ERROR, PVRSRV_OK);
++
++DECLARE_LIST_FOR_EACH_VA(BM_HEAP);
++
++DECLARE_LIST_ANY_2(PVRSRV_DEVICE_NODE, PVRSRV_ERROR, PVRSRV_OK);
++DECLARE_LIST_ANY_VA(PVRSRV_DEVICE_NODE);
++DECLARE_LIST_ANY_VA_2(PVRSRV_DEVICE_NODE, PVRSRV_ERROR, PVRSRV_OK);
++DECLARE_LIST_FOR_EACH_VA(PVRSRV_DEVICE_NODE);
++DECLARE_LIST_FOR_EACH(PVRSRV_DEVICE_NODE);
++DECLARE_LIST_INSERT(PVRSRV_DEVICE_NODE);
++DECLARE_LIST_REMOVE(PVRSRV_DEVICE_NODE);
++
++IMG_VOID* MatchDeviceKM_AnyVaCb(PVRSRV_DEVICE_NODE* psDeviceNode, va_list va);
++
++
++PVRSRV_ERROR AllocateDeviceID(SYS_DATA *psSysData, IMG_UINT32 *pui32DevID)
++{
++ SYS_DEVICE_ID* psDeviceWalker;
++ SYS_DEVICE_ID* psDeviceEnd;
++
++ psDeviceWalker = &psSysData->sDeviceID[0];
++ psDeviceEnd = psDeviceWalker + psSysData->ui32NumDevices;
++
++
++ while (psDeviceWalker < psDeviceEnd)
++ {
++ if (!psDeviceWalker->bInUse)
++ {
++ psDeviceWalker->bInUse = IMG_TRUE;
++ *pui32DevID = psDeviceWalker->uiID;
++ return PVRSRV_OK;
++ }
++ psDeviceWalker++;
++ }
++
++ PVR_DPF((PVR_DBG_ERROR,"AllocateDeviceID: No free and valid device IDs available!"));
++
++
++ PVR_ASSERT(psDeviceWalker < psDeviceEnd);
++
++ return PVRSRV_ERROR_GENERIC;
++}
++
++
++PVRSRV_ERROR FreeDeviceID(SYS_DATA *psSysData, IMG_UINT32 ui32DevID)
++{
++ SYS_DEVICE_ID* psDeviceWalker;
++ SYS_DEVICE_ID* psDeviceEnd;
++
++ psDeviceWalker = &psSysData->sDeviceID[0];
++ psDeviceEnd = psDeviceWalker + psSysData->ui32NumDevices;
++
++
++ while (psDeviceWalker < psDeviceEnd)
++ {
++
++ if (
++ (psDeviceWalker->uiID == ui32DevID) &&
++ (psDeviceWalker->bInUse)
++ )
++ {
++ psDeviceWalker->bInUse = IMG_FALSE;
++ return PVRSRV_OK;
++ }
++ psDeviceWalker++;
++ }
++
++ PVR_DPF((PVR_DBG_ERROR,"FreeDeviceID: no matching dev ID that is in use!"));
++
++
++ PVR_ASSERT(psDeviceWalker < psDeviceEnd);
++
++ return PVRSRV_ERROR_GENERIC;
++}
++
++
++#ifndef ReadHWReg
++IMG_EXPORT
++IMG_UINT32 ReadHWReg(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset)
++{
++ return *(volatile IMG_UINT32*)((IMG_UINTPTR_T)pvLinRegBaseAddr+ui32Offset);
++}
++#endif
++
++
++#ifndef WriteHWReg
++IMG_EXPORT
++IMG_VOID WriteHWReg(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT32 ui32Value)
++{
++ PVR_DPF((PVR_DBG_MESSAGE,"WriteHWReg Base:%x, Offset: %x, Value %x",pvLinRegBaseAddr,ui32Offset,ui32Value));
++
++ *(IMG_UINT32*)((IMG_UINTPTR_T)pvLinRegBaseAddr+ui32Offset) = ui32Value;
++}
++#endif
++
++
++#ifndef WriteHWRegs
++IMG_EXPORT
++IMG_VOID WriteHWRegs(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Count, PVRSRV_HWREG *psHWRegs)
++{
++ while (ui32Count)
++ {
++ WriteHWReg (pvLinRegBaseAddr, psHWRegs->ui32RegAddr, psHWRegs->ui32RegVal);
++ psHWRegs++;
++ ui32Count--;
++ }
++}
++#endif
++
++IMG_VOID PVRSRVEnumerateDevicesKM_ForEachVaCb(PVRSRV_DEVICE_NODE *psDeviceNode, va_list va)
++{
++ IMG_UINT *pui32DevCount;
++ PVRSRV_DEVICE_IDENTIFIER **ppsDevIdList;
++
++ pui32DevCount = va_arg(va, IMG_UINT*);
++ ppsDevIdList = va_arg(va, PVRSRV_DEVICE_IDENTIFIER**);
++
++ if (psDeviceNode->sDevId.eDeviceType != PVRSRV_DEVICE_TYPE_EXT)
++ {
++ *(*ppsDevIdList) = psDeviceNode->sDevId;
++ (*ppsDevIdList)++;
++ (*pui32DevCount)++;
++ }
++}
++
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVEnumerateDevicesKM(IMG_UINT32 *pui32NumDevices,
++ PVRSRV_DEVICE_IDENTIFIER *psDevIdList)
++{
++ SYS_DATA *psSysData;
++ IMG_UINT32 i;
++
++ if (!pui32NumDevices || !psDevIdList)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVEnumerateDevicesKM: Invalid params"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ SysAcquireData(&psSysData);
++
++
++
++ for (i=0; i<PVRSRV_MAX_DEVICES; i++)
++ {
++ psDevIdList[i].eDeviceType = PVRSRV_DEVICE_TYPE_UNKNOWN;
++ }
++
++
++ *pui32NumDevices = 0;
++
++
++
++
++
++ List_PVRSRV_DEVICE_NODE_ForEach_va(psSysData->psDeviceNodeList,
++ PVRSRVEnumerateDevicesKM_ForEachVaCb,
++ pui32NumDevices,
++ &psDevIdList);
++
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR IMG_CALLCONV PVRSRVInit(PSYS_DATA psSysData)
++{
++ PVRSRV_ERROR eError;
++
++
++ eError = ResManInit();
++ if (eError != PVRSRV_OK)
++ {
++ goto Error;
++ }
++
++ eError = PVRSRVPerProcessDataInit();
++ if(eError != PVRSRV_OK)
++ {
++ goto Error;
++ }
++
++
++ eError = PVRSRVHandleInit();
++ if(eError != PVRSRV_OK)
++ {
++ goto Error;
++ }
++
++
++ eError = OSCreateResource(&psSysData->sPowerStateChangeResource);
++ if (eError != PVRSRV_OK)
++ {
++ goto Error;
++ }
++
++
++ psSysData->eCurrentPowerState = PVRSRV_SYS_POWER_STATE_D0;
++ psSysData->eFailedPowerState = PVRSRV_SYS_POWER_STATE_Unspecified;
++
++
++ if(OSAllocMem( PVRSRV_PAGEABLE_SELECT,
++ sizeof(PVRSRV_EVENTOBJECT) ,
++ (IMG_VOID **)&psSysData->psGlobalEventObject, 0,
++ "Event Object") != PVRSRV_OK)
++ {
++
++ goto Error;
++ }
++
++ if(OSEventObjectCreate("PVRSRV_GLOBAL_EVENTOBJECT", psSysData->psGlobalEventObject) != PVRSRV_OK)
++ {
++ goto Error;
++ }
++
++ return eError;
++
++Error:
++ PVRSRVDeInit(psSysData);
++ return eError;
++}
++
++
++
++IMG_VOID IMG_CALLCONV PVRSRVDeInit(PSYS_DATA psSysData)
++{
++ PVRSRV_ERROR eError;
++
++ PVR_UNREFERENCED_PARAMETER(psSysData);
++
++ if (psSysData == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVDeInit: PVRSRVHandleDeInit failed - invalid param"));
++ return;
++ }
++
++
++ if(psSysData->psGlobalEventObject)
++ {
++ OSEventObjectDestroy(psSysData->psGlobalEventObject);
++ OSFreeMem( PVRSRV_PAGEABLE_SELECT,
++ sizeof(PVRSRV_EVENTOBJECT),
++ psSysData->psGlobalEventObject,
++ 0);
++ psSysData->psGlobalEventObject = IMG_NULL;
++ }
++
++ eError = PVRSRVHandleDeInit();
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVDeInit: PVRSRVHandleDeInit failed"));
++ }
++
++ eError = PVRSRVPerProcessDataDeInit();
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVDeInit: PVRSRVPerProcessDataDeInit failed"));
++ }
++
++ ResManDeInit();
++}
++
++
++PVRSRV_ERROR IMG_CALLCONV PVRSRVRegisterDevice(PSYS_DATA psSysData,
++ PVRSRV_ERROR (*pfnRegisterDevice)(PVRSRV_DEVICE_NODE*),
++ IMG_UINT32 ui32SOCInterruptBit,
++ IMG_UINT32 *pui32DeviceIndex)
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++
++
++ if(OSAllocMem( PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(PVRSRV_DEVICE_NODE),
++ (IMG_VOID **)&psDeviceNode, IMG_NULL,
++ "Device Node") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterDevice : Failed to alloc memory for psDeviceNode"));
++ return (PVRSRV_ERROR_OUT_OF_MEMORY);
++ }
++ OSMemSet (psDeviceNode, 0, sizeof(PVRSRV_DEVICE_NODE));
++
++ eError = pfnRegisterDevice(psDeviceNode);
++ if (eError != PVRSRV_OK)
++ {
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(PVRSRV_DEVICE_NODE), psDeviceNode, IMG_NULL);
++
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterDevice : Failed to register device"));
++ return (PVRSRV_ERROR_DEVICE_REGISTER_FAILED);
++ }
++
++
++
++
++
++
++ psDeviceNode->ui32RefCount = 1;
++ psDeviceNode->psSysData = psSysData;
++ psDeviceNode->ui32SOCInterruptBit = ui32SOCInterruptBit;
++
++
++ AllocateDeviceID(psSysData, &psDeviceNode->sDevId.ui32DeviceIndex);
++
++
++ List_PVRSRV_DEVICE_NODE_Insert(&psSysData->psDeviceNodeList, psDeviceNode);
++
++
++ *pui32DeviceIndex = psDeviceNode->sDevId.ui32DeviceIndex;
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR IMG_CALLCONV PVRSRVInitialiseDevice (IMG_UINT32 ui32DevIndex)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ SYS_DATA *psSysData;
++ PVRSRV_ERROR eError;
++
++ PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVInitialiseDevice"));
++
++ SysAcquireData(&psSysData);
++
++
++ psDeviceNode = (PVRSRV_DEVICE_NODE*)
++ List_PVRSRV_DEVICE_NODE_Any_va(psSysData->psDeviceNodeList,
++ MatchDeviceKM_AnyVaCb,
++ ui32DevIndex,
++ IMG_TRUE);
++ if(!psDeviceNode)
++ {
++
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVInitialiseDevice: requested device is not present"));
++ return PVRSRV_ERROR_INIT_FAILURE;
++ }
++ PVR_ASSERT (psDeviceNode->ui32RefCount > 0);
++
++
++
++ eError = PVRSRVResManConnect(IMG_NULL, &psDeviceNode->hResManContext);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVInitialiseDevice: Failed PVRSRVResManConnect call"));
++ return eError;
++ }
++
++
++ if(psDeviceNode->pfnInitDevice != IMG_NULL)
++ {
++ eError = psDeviceNode->pfnInitDevice(psDeviceNode);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVInitialiseDevice: Failed InitDevice call"));
++ return eError;
++ }
++ }
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR PVRSRVFinaliseSystem_SetPowerState_AnyCb(PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++ PVRSRV_ERROR eError;
++ eError = PVRSRVSetDevicePowerStateKM(psDeviceNode->sDevId.ui32DeviceIndex,
++ PVRSRV_DEV_POWER_STATE_DEFAULT,
++ KERNEL_ID, IMG_FALSE);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVFinaliseSystem: Failed PVRSRVSetDevicePowerStateKM call (device index: %d)", psDeviceNode->sDevId.ui32DeviceIndex));
++ }
++ return eError;
++}
++
++PVRSRV_ERROR PVRSRVFinaliseSystem_CompatCheck_AnyCb(PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++ PVRSRV_ERROR eError;
++ eError = PVRSRVDevInitCompatCheck(psDeviceNode);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVFinaliseSystem: Failed PVRSRVDevInitCompatCheck call (device index: %d)", psDeviceNode->sDevId.ui32DeviceIndex));
++ }
++ return eError;
++}
++
++
++PVRSRV_ERROR IMG_CALLCONV PVRSRVFinaliseSystem(IMG_BOOL bInitSuccessful)
++{
++ SYS_DATA *psSysData;
++ PVRSRV_ERROR eError;
++
++ PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVFinaliseSystem"));
++
++ SysAcquireData(&psSysData);
++
++ if (bInitSuccessful)
++ {
++ eError = SysFinalise();
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVFinaliseSystem: SysFinalise failed (%d)", eError));
++ return eError;
++ }
++
++
++ eError = List_PVRSRV_DEVICE_NODE_PVRSRV_ERROR_Any(psSysData->psDeviceNodeList,
++ PVRSRVFinaliseSystem_SetPowerState_AnyCb);
++ if (eError != PVRSRV_OK)
++ {
++ return eError;
++ }
++
++
++ eError = List_PVRSRV_DEVICE_NODE_PVRSRV_ERROR_Any(psSysData->psDeviceNodeList,
++ PVRSRVFinaliseSystem_CompatCheck_AnyCb);
++ if (eError != PVRSRV_OK)
++ {
++ return eError;
++ }
++ }
++
++
++
++
++
++
++
++
++#if !defined(SUPPORT_PDUMP_DELAYED_INITPHASE_TERMINATION)
++ PDUMPENDINITPHASE();
++#endif
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR PVRSRVDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++
++ if (psDeviceNode->pfnInitDeviceCompatCheck)
++ return psDeviceNode->pfnInitDeviceCompatCheck(psDeviceNode);
++ else
++ return PVRSRV_OK;
++}
++
++IMG_VOID * PVRSRVAcquireDeviceDataKM_Match_AnyVaCb(PVRSRV_DEVICE_NODE *psDeviceNode, va_list va)
++{
++ PVRSRV_DEVICE_TYPE eDeviceType;
++ IMG_UINT32 ui32DevIndex;
++
++ eDeviceType = va_arg(va, PVRSRV_DEVICE_TYPE);
++ ui32DevIndex = va_arg(va, IMG_UINT32);
++
++ if ((eDeviceType != PVRSRV_DEVICE_TYPE_UNKNOWN &&
++ psDeviceNode->sDevId.eDeviceType == eDeviceType) ||
++ (eDeviceType == PVRSRV_DEVICE_TYPE_UNKNOWN &&
++ psDeviceNode->sDevId.ui32DeviceIndex == ui32DevIndex))
++ {
++ return psDeviceNode;
++ }
++ else
++ {
++ return IMG_NULL;
++ }
++}
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVAcquireDeviceDataKM (IMG_UINT32 ui32DevIndex,
++ PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_HANDLE *phDevCookie)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ SYS_DATA *psSysData;
++
++ PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVAcquireDeviceDataKM"));
++
++ SysAcquireData(&psSysData);
++
++
++ psDeviceNode = List_PVRSRV_DEVICE_NODE_Any_va(psSysData->psDeviceNodeList,
++ PVRSRVAcquireDeviceDataKM_Match_AnyVaCb,
++ eDeviceType,
++ ui32DevIndex);
++
++
++ if (!psDeviceNode)
++ {
++
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVAcquireDeviceDataKM: requested device is not present"));
++ return PVRSRV_ERROR_INIT_FAILURE;
++ }
++
++ PVR_ASSERT (psDeviceNode->ui32RefCount > 0);
++
++
++ if (phDevCookie)
++ {
++ *phDevCookie = (IMG_HANDLE)psDeviceNode;
++ }
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR IMG_CALLCONV PVRSRVDeinitialiseDevice(IMG_UINT32 ui32DevIndex)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ SYS_DATA *psSysData;
++ PVRSRV_ERROR eError;
++
++ SysAcquireData(&psSysData);
++
++ psDeviceNode = (PVRSRV_DEVICE_NODE*)
++ List_PVRSRV_DEVICE_NODE_Any_va(psSysData->psDeviceNodeList,
++ MatchDeviceKM_AnyVaCb,
++ ui32DevIndex,
++ IMG_TRUE);
++
++ if (!psDeviceNode)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVDeinitialiseDevice: requested device %d is not present", ui32DevIndex));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++
++
++ eError = PVRSRVSetDevicePowerStateKM(ui32DevIndex,
++ PVRSRV_DEV_POWER_STATE_OFF,
++ KERNEL_ID,
++ IMG_FALSE);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVDeinitialiseDevice: Failed PVRSRVSetDevicePowerStateKM call"));
++ return eError;
++ }
++
++
++
++ eError = ResManFreeResByCriteria(psDeviceNode->hResManContext,
++ RESMAN_CRITERIA_RESTYPE,
++ RESMAN_TYPE_DEVICEMEM_ALLOCATION,
++ IMG_NULL, 0);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVDeinitialiseDevice: Failed ResManFreeResByCriteria call"));
++ return eError;
++ }
++
++
++
++ if(psDeviceNode->pfnDeInitDevice != IMG_NULL)
++ {
++ eError = psDeviceNode->pfnDeInitDevice(psDeviceNode);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVDeinitialiseDevice: Failed DeInitDevice call"));
++ return eError;
++ }
++ }
++
++
++
++ PVRSRVResManDisconnect(psDeviceNode->hResManContext, IMG_TRUE);
++ psDeviceNode->hResManContext = IMG_NULL;
++
++
++ List_PVRSRV_DEVICE_NODE_Remove(psDeviceNode);
++
++
++ (IMG_VOID)FreeDeviceID(psSysData, ui32DevIndex);
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(PVRSRV_DEVICE_NODE), psDeviceNode, IMG_NULL);
++
++
++ return (PVRSRV_OK);
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PollForValueKM (volatile IMG_UINT32* pui32LinMemAddr,
++ IMG_UINT32 ui32Value,
++ IMG_UINT32 ui32Mask,
++ IMG_UINT32 ui32Waitus,
++ IMG_UINT32 ui32Tries)
++{
++ {
++ IMG_UINT32 uiMaxTime = ui32Tries * ui32Waitus;
++
++ LOOP_UNTIL_TIMEOUT(uiMaxTime)
++ {
++ if((*pui32LinMemAddr & ui32Mask) == ui32Value)
++ {
++ return PVRSRV_OK;
++ }
++ OSWaitus(ui32Waitus);
++ } END_LOOP_UNTIL_TIMEOUT();
++ }
++
++
++ return PVRSRV_ERROR_GENERIC;
++}
++
++
++#if defined (USING_ISR_INTERRUPTS)
++
++extern IMG_UINT32 gui32EventStatusServicesByISR;
++
++PVRSRV_ERROR PollForInterruptKM (IMG_UINT32 ui32Value,
++ IMG_UINT32 ui32Mask,
++ IMG_UINT32 ui32Waitus,
++ IMG_UINT32 ui32Tries)
++{
++ IMG_UINT32 uiMaxTime;
++
++ uiMaxTime = ui32Tries * ui32Waitus;
++
++
++ LOOP_UNTIL_TIMEOUT(uiMaxTime)
++ {
++ if ((gui32EventStatusServicesByISR & ui32Mask) == ui32Value)
++ {
++ gui32EventStatusServicesByISR = 0;
++ return PVRSRV_OK;
++ }
++ OSWaitus(ui32Waitus);
++ } END_LOOP_UNTIL_TIMEOUT();
++
++ return PVRSRV_ERROR_GENERIC;
++}
++#endif
++
++IMG_VOID PVRSRVGetMiscInfoKM_RA_GetStats_ForEachVaCb(BM_HEAP *psBMHeap, va_list va)
++{
++ IMG_CHAR **ppszStr;
++ IMG_UINT32 *pui32StrLen;
++
++ ppszStr = va_arg(va, IMG_CHAR**);
++ pui32StrLen = va_arg(va, IMG_UINT32*);
++
++ if(psBMHeap->pImportArena)
++ {
++ RA_GetStats(psBMHeap->pImportArena,
++ ppszStr,
++ pui32StrLen);
++ }
++
++ if(psBMHeap->pVMArena)
++ {
++ RA_GetStats(psBMHeap->pVMArena,
++ ppszStr,
++ pui32StrLen);
++ }
++}
++
++PVRSRV_ERROR PVRSRVGetMiscInfoKM_BMContext_AnyVaCb(BM_CONTEXT *psBMContext, va_list va)
++{
++
++ IMG_UINT32 *pui32StrLen;
++ IMG_INT32 *pi32Count;
++ IMG_CHAR **ppszStr;
++
++ pui32StrLen = va_arg(va, IMG_UINT32*);
++ pi32Count = va_arg(va, IMG_INT32*);
++ ppszStr = va_arg(va, IMG_CHAR**);
++
++ CHECK_SPACE(*pui32StrLen);
++ *pi32Count = OSSNPrintf(*ppszStr, 100, "\nApplication Context (hDevMemContext) 0x%08X:\n",
++ (IMG_HANDLE)psBMContext);
++ UPDATE_SPACE(*ppszStr, *pi32Count, *pui32StrLen);
++
++ List_BM_HEAP_ForEach_va(psBMContext->psBMHeap,
++ PVRSRVGetMiscInfoKM_RA_GetStats_ForEachVaCb,
++ ppszStr,
++ pui32StrLen);
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR PVRSRVGetMiscInfoKM_Device_AnyVaCb(PVRSRV_DEVICE_NODE *psDeviceNode, va_list va)
++{
++ IMG_UINT32 *pui32StrLen;
++ IMG_INT32 *pi32Count;
++ IMG_CHAR **ppszStr;
++
++ pui32StrLen = va_arg(va, IMG_UINT32*);
++ pi32Count = va_arg(va, IMG_INT32*);
++ ppszStr = va_arg(va, IMG_CHAR**);
++
++ CHECK_SPACE(*pui32StrLen);
++ *pi32Count = OSSNPrintf(*ppszStr, 100, "\n\nDevice Type %d:\n", psDeviceNode->sDevId.eDeviceType);
++ UPDATE_SPACE(*ppszStr, *pi32Count, *pui32StrLen);
++
++
++ if(psDeviceNode->sDevMemoryInfo.pBMKernelContext)
++ {
++ CHECK_SPACE(*pui32StrLen);
++ *pi32Count = OSSNPrintf(*ppszStr, 100, "\nKernel Context:\n");
++ UPDATE_SPACE(*ppszStr, *pi32Count, *pui32StrLen);
++
++
++ List_BM_HEAP_ForEach_va(psDeviceNode->sDevMemoryInfo.pBMKernelContext->psBMHeap,
++ PVRSRVGetMiscInfoKM_RA_GetStats_ForEachVaCb,
++ ppszStr,
++ pui32StrLen);
++ }
++
++
++ return List_BM_CONTEXT_PVRSRV_ERROR_Any_va(psDeviceNode->sDevMemoryInfo.pBMContext,
++ PVRSRVGetMiscInfoKM_BMContext_AnyVaCb,
++ pui32StrLen,
++ pi32Count,
++ ppszStr);
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVGetMiscInfoKM(PVRSRV_MISC_INFO *psMiscInfo)
++{
++ SYS_DATA *psSysData;
++
++ if(!psMiscInfo)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetMiscInfoKM: invalid parameters"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psMiscInfo->ui32StatePresent = 0;
++
++
++ if(psMiscInfo->ui32StateRequest & ~(PVRSRV_MISC_INFO_TIMER_PRESENT
++ |PVRSRV_MISC_INFO_CLOCKGATE_PRESENT
++ |PVRSRV_MISC_INFO_MEMSTATS_PRESENT
++ |PVRSRV_MISC_INFO_GLOBALEVENTOBJECT_PRESENT
++ |PVRSRV_MISC_INFO_DDKVERSION_PRESENT
++ |PVRSRV_MISC_INFO_CPUCACHEFLUSH_PRESENT
++ |PVRSRV_MISC_INFO_RESET_PRESENT))
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetMiscInfoKM: invalid state request flags"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ SysAcquireData(&psSysData);
++
++
++ if(((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_TIMER_PRESENT) != 0UL) &&
++ (psSysData->pvSOCTimerRegisterKM != IMG_NULL))
++ {
++ psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_TIMER_PRESENT;
++ psMiscInfo->pvSOCTimerRegisterKM = psSysData->pvSOCTimerRegisterKM;
++ psMiscInfo->hSOCTimerRegisterOSMemHandle = psSysData->hSOCTimerRegisterOSMemHandle;
++ }
++ else
++ {
++ psMiscInfo->pvSOCTimerRegisterKM = IMG_NULL;
++ psMiscInfo->hSOCTimerRegisterOSMemHandle = IMG_NULL;
++ }
++
++
++ if(((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_CLOCKGATE_PRESENT) != 0UL) &&
++ (psSysData->pvSOCClockGateRegsBase != IMG_NULL))
++ {
++ psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_CLOCKGATE_PRESENT;
++ psMiscInfo->pvSOCClockGateRegs = psSysData->pvSOCClockGateRegsBase;
++ psMiscInfo->ui32SOCClockGateRegsSize = psSysData->ui32SOCClockGateRegsSize;
++ }
++
++
++ if(((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_MEMSTATS_PRESENT) != 0UL) &&
++ (psMiscInfo->pszMemoryStr != IMG_NULL))
++ {
++ RA_ARENA **ppArena;
++ IMG_CHAR *pszStr;
++ IMG_UINT32 ui32StrLen;
++ IMG_INT32 i32Count;
++
++ pszStr = psMiscInfo->pszMemoryStr;
++ ui32StrLen = psMiscInfo->ui32MemoryStrLen;
++
++ psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_MEMSTATS_PRESENT;
++
++
++ ppArena = &psSysData->apsLocalDevMemArena[0];
++ while(*ppArena)
++ {
++ CHECK_SPACE(ui32StrLen);
++ i32Count = OSSNPrintf(pszStr, 100, "\nLocal Backing Store:\n");
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++
++ RA_GetStats(*ppArena,
++ &pszStr,
++ &ui32StrLen);
++
++ ppArena++;
++ }
++
++
++
++ List_PVRSRV_DEVICE_NODE_PVRSRV_ERROR_Any_va(psSysData->psDeviceNodeList,
++ PVRSRVGetMiscInfoKM_Device_AnyVaCb,
++ &ui32StrLen,
++ &i32Count,
++ &pszStr);
++
++
++ i32Count = OSSNPrintf(pszStr, 100, "\n\0");
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++ }
++
++ if(((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_GLOBALEVENTOBJECT_PRESENT) != 0UL) &&
++ (psSysData->psGlobalEventObject != IMG_NULL))
++ {
++ psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_GLOBALEVENTOBJECT_PRESENT;
++ psMiscInfo->sGlobalEventObject = *psSysData->psGlobalEventObject;
++ }
++
++
++
++ if (((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_DDKVERSION_PRESENT) != 0UL)
++ && ((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_MEMSTATS_PRESENT) == 0UL)
++ && (psMiscInfo->pszMemoryStr != IMG_NULL))
++ {
++ IMG_CHAR *pszStr;
++ IMG_UINT32 ui32StrLen;
++ IMG_UINT32 ui32LenStrPerNum = 12;
++ IMG_INT32 i32Count;
++ IMG_INT i;
++ psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_DDKVERSION_PRESENT;
++
++
++ psMiscInfo->aui32DDKVersion[0] = PVRVERSION_MAJ;
++ psMiscInfo->aui32DDKVersion[1] = PVRVERSION_MIN;
++ psMiscInfo->aui32DDKVersion[2] = PVRVERSION_BRANCH;
++ psMiscInfo->aui32DDKVersion[3] = PVRVERSION_BUILD;
++
++ pszStr = psMiscInfo->pszMemoryStr;
++ ui32StrLen = psMiscInfo->ui32MemoryStrLen;
++
++ for (i=0; i<4; i++)
++ {
++ if (ui32StrLen < ui32LenStrPerNum)
++ {
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ i32Count = OSSNPrintf(pszStr, ui32LenStrPerNum, "%ld", psMiscInfo->aui32DDKVersion[i]);
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++ if (i != 3)
++ {
++ i32Count = OSSNPrintf(pszStr, 2, ".");
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++ }
++ }
++ }
++
++#if defined(SUPPORT_CPU_CACHED_BUFFERS)
++ if((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_CPUCACHEFLUSH_PRESENT) != 0UL)
++ {
++ if(psMiscInfo->bDeferCPUCacheFlush)
++ {
++
++ if(!psMiscInfo->bCPUCacheFlushAll)
++ {
++
++
++
++ PVR_DPF((PVR_DBG_MESSAGE,"PVRSRVGetMiscInfoKM: don't support deferred range flushes"));
++ PVR_DPF((PVR_DBG_MESSAGE," using deferred flush all instead"));
++ }
++
++ psSysData->bFlushAll = IMG_TRUE;
++ }
++ else
++ {
++
++ if(psMiscInfo->bCPUCacheFlushAll)
++ {
++
++ OSFlushCPUCacheKM();
++
++ psSysData->bFlushAll = IMG_FALSE;
++ }
++ else
++ {
++
++ OSFlushCPUCacheRangeKM(psMiscInfo->pvRangeAddrStart, psMiscInfo->pvRangeAddrEnd);
++ }
++ }
++ }
++#endif
++
++#if defined(PVRSRV_RESET_ON_HWTIMEOUT)
++ if((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_RESET_PRESENT) != 0UL)
++ {
++ PVR_LOG(("User requested OS reset"));
++ OSPanic();
++ }
++#endif
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR IMG_CALLCONV PVRSRVGetFBStatsKM(IMG_UINT32 *pui32Total,
++ IMG_UINT32 *pui32Available)
++{
++ IMG_UINT32 ui32Total = 0, i = 0;
++ IMG_UINT32 ui32Available = 0;
++
++ *pui32Total = 0;
++ *pui32Available = 0;
++
++
++ while(BM_ContiguousStatistics(i, &ui32Total, &ui32Available) == IMG_TRUE)
++ {
++ *pui32Total += ui32Total;
++ *pui32Available += ui32Available;
++
++ i++;
++ }
++
++ return PVRSRV_OK;
++}
++
++
++IMG_BOOL IMG_CALLCONV PVRSRVDeviceLISR(PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++ SYS_DATA *psSysData;
++ IMG_BOOL bStatus = IMG_FALSE;
++ IMG_UINT32 ui32InterruptSource;
++
++ if(!psDeviceNode)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVDeviceLISR: Invalid params\n"));
++ goto out;
++ }
++ psSysData = psDeviceNode->psSysData;
++
++
++ ui32InterruptSource = SysGetInterruptSource(psSysData, psDeviceNode);
++ if(ui32InterruptSource & psDeviceNode->ui32SOCInterruptBit)
++ {
++ if(psDeviceNode->pfnDeviceISR != IMG_NULL)
++ {
++ bStatus = (*psDeviceNode->pfnDeviceISR)(psDeviceNode->pvISRData);
++ }
++
++ SysClearInterrupts(psSysData, psDeviceNode->ui32SOCInterruptBit);
++ }
++
++out:
++ return bStatus;
++}
++
++IMG_VOID PVRSRVSystemLISR_ForEachVaCb(PVRSRV_DEVICE_NODE *psDeviceNode, va_list va)
++{
++
++ IMG_BOOL *pbStatus;
++ IMG_UINT32 *pui32InterruptSource;
++ IMG_UINT32 *pui32ClearInterrupts;
++
++ pbStatus = va_arg(va, IMG_BOOL*);
++ pui32InterruptSource = va_arg(va, IMG_UINT32*);
++ pui32ClearInterrupts = va_arg(va, IMG_UINT32*);
++
++
++ if(psDeviceNode->pfnDeviceISR != IMG_NULL)
++ {
++ if(*pui32InterruptSource & psDeviceNode->ui32SOCInterruptBit)
++ {
++ if((*psDeviceNode->pfnDeviceISR)(psDeviceNode->pvISRData))
++ {
++
++ *pbStatus = IMG_TRUE;
++ }
++
++ *pui32ClearInterrupts |= psDeviceNode->ui32SOCInterruptBit;
++ }
++ }
++}
++
++IMG_BOOL IMG_CALLCONV PVRSRVSystemLISR(IMG_VOID *pvSysData)
++{
++ SYS_DATA *psSysData = pvSysData;
++ IMG_BOOL bStatus = IMG_FALSE;
++ IMG_UINT32 ui32InterruptSource;
++ IMG_UINT32 ui32ClearInterrupts = 0;
++ if(!psSysData)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVSystemLISR: Invalid params\n"));
++ }
++ else
++ {
++
++ ui32InterruptSource = SysGetInterruptSource(psSysData, IMG_NULL);
++
++
++ if(ui32InterruptSource)
++ {
++
++ List_PVRSRV_DEVICE_NODE_ForEach_va(psSysData->psDeviceNodeList,
++ PVRSRVSystemLISR_ForEachVaCb,
++ &bStatus,
++ &ui32InterruptSource,
++ &ui32ClearInterrupts);
++
++ SysClearInterrupts(psSysData, ui32ClearInterrupts);
++ }
++ }
++ return bStatus;
++}
++
++
++IMG_VOID PVRSRVMISR_ForEachCb(PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++ if(psDeviceNode->pfnDeviceMISR != IMG_NULL)
++ {
++ (*psDeviceNode->pfnDeviceMISR)(psDeviceNode->pvISRData);
++ }
++}
++
++IMG_VOID IMG_CALLCONV PVRSRVMISR(IMG_VOID *pvSysData)
++{
++ SYS_DATA *psSysData = pvSysData;
++ if(!psSysData)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVMISR: Invalid params\n"));
++ return;
++ }
++
++
++ List_PVRSRV_DEVICE_NODE_ForEach(psSysData->psDeviceNodeList,
++ PVRSRVMISR_ForEachCb);
++
++
++ if (PVRSRVProcessQueues(ISR_ID, IMG_FALSE) == PVRSRV_ERROR_PROCESSING_BLOCKED)
++ {
++ PVRSRVProcessQueues(ISR_ID, IMG_FALSE);
++ }
++
++
++ if (psSysData->psGlobalEventObject)
++ {
++ IMG_HANDLE hOSEventKM = psSysData->psGlobalEventObject->hOSEventKM;
++ if(hOSEventKM)
++ {
++ OSEventObjectSignal(hOSEventKM);
++ }
++ }
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVProcessConnect(IMG_UINT32 ui32PID)
++{
++ return PVRSRVPerProcessDataConnect(ui32PID);
++}
++
++
++IMG_EXPORT
++IMG_VOID IMG_CALLCONV PVRSRVProcessDisconnect(IMG_UINT32 ui32PID)
++{
++ PVRSRVPerProcessDataDisconnect(ui32PID);
++}
++
++
++PVRSRV_ERROR IMG_CALLCONV PVRSRVSaveRestoreLiveSegments(IMG_HANDLE hArena, IMG_PBYTE pbyBuffer,
++ IMG_SIZE_T *puiBufSize, IMG_BOOL bSave)
++{
++ IMG_SIZE_T uiBytesSaved = 0;
++ IMG_PVOID pvLocalMemCPUVAddr;
++ RA_SEGMENT_DETAILS sSegDetails;
++
++ if (hArena == IMG_NULL)
++ {
++ return (PVRSRV_ERROR_INVALID_PARAMS);
++ }
++
++ sSegDetails.uiSize = 0;
++ sSegDetails.sCpuPhyAddr.uiAddr = 0;
++ sSegDetails.hSegment = 0;
++
++
++ while (RA_GetNextLiveSegment(hArena, &sSegDetails))
++ {
++ if (pbyBuffer == IMG_NULL)
++ {
++
++ uiBytesSaved += sizeof(sSegDetails.uiSize) + sSegDetails.uiSize;
++ }
++ else
++ {
++ if ((uiBytesSaved + sizeof(sSegDetails.uiSize) + sSegDetails.uiSize) > *puiBufSize)
++ {
++ return (PVRSRV_ERROR_OUT_OF_MEMORY);
++ }
++
++ PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVSaveRestoreLiveSegments: Base %08x size %08x", sSegDetails.sCpuPhyAddr.uiAddr, sSegDetails.uiSize));
++
++
++ pvLocalMemCPUVAddr = OSMapPhysToLin(sSegDetails.sCpuPhyAddr,
++ sSegDetails.uiSize,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++ if (pvLocalMemCPUVAddr == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVSaveRestoreLiveSegments: Failed to map local memory to host"));
++ return (PVRSRV_ERROR_OUT_OF_MEMORY);
++ }
++
++ if (bSave)
++ {
++
++ OSMemCopy(pbyBuffer, &sSegDetails.uiSize, sizeof(sSegDetails.uiSize));
++ pbyBuffer += sizeof(sSegDetails.uiSize);
++
++ OSMemCopy(pbyBuffer, pvLocalMemCPUVAddr, sSegDetails.uiSize);
++ pbyBuffer += sSegDetails.uiSize;
++ }
++ else
++ {
++ IMG_UINT32 uiSize;
++
++ OSMemCopy(&uiSize, pbyBuffer, sizeof(sSegDetails.uiSize));
++
++ if (uiSize != sSegDetails.uiSize)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVSaveRestoreLiveSegments: Segment size error"));
++ }
++ else
++ {
++ pbyBuffer += sizeof(sSegDetails.uiSize);
++
++ OSMemCopy(pvLocalMemCPUVAddr, pbyBuffer, sSegDetails.uiSize);
++ pbyBuffer += sSegDetails.uiSize;
++ }
++ }
++
++
++ uiBytesSaved += sizeof(sSegDetails.uiSize) + sSegDetails.uiSize;
++
++ OSUnMapPhysToLin(pvLocalMemCPUVAddr,
++ sSegDetails.uiSize,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++ }
++ }
++
++ if (pbyBuffer == IMG_NULL)
++ {
++ *puiBufSize = uiBytesSaved;
++ }
++
++ return (PVRSRV_OK);
++}
++
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/queue.c b/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/queue.c
+new file mode 100644
+index 0000000..e535ddd
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/queue.c
+@@ -0,0 +1,1137 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "services_headers.h"
++
++#include "lists.h"
++
++DECLARE_LIST_FOR_EACH(PVRSRV_DEVICE_NODE);
++
++#if defined(__linux__) && defined(__KERNEL__)
++
++#include "proc.h"
++
++static IMG_INT
++QueuePrintCommands (PVRSRV_QUEUE_INFO * psQueue, IMG_CHAR * buffer, size_t size)
++{
++ off_t off = 0;
++ IMG_INT cmds = 0;
++ IMG_SIZE_T ui32ReadOffset = psQueue->ui32ReadOffset;
++ IMG_SIZE_T ui32WriteOffset = psQueue->ui32WriteOffset;
++ PVRSRV_COMMAND * psCmd;
++
++ while (ui32ReadOffset != ui32WriteOffset)
++ {
++ psCmd= (PVRSRV_COMMAND *)((IMG_UINTPTR_T)psQueue->pvLinQueueKM + ui32ReadOffset);
++
++ off = printAppend(buffer, size, off, "%p %p %5lu %6lu %3lu %5lu %2lu %2lu %3lu \n",
++ psQueue,
++ psCmd,
++ psCmd->ui32ProcessID,
++ psCmd->CommandType,
++ psCmd->ui32CmdSize,
++ psCmd->ui32DevIndex,
++ psCmd->ui32DstSyncCount,
++ psCmd->ui32SrcSyncCount,
++ psCmd->ui32DataSize);
++
++ ui32ReadOffset += psCmd->ui32CmdSize;
++ ui32ReadOffset &= psQueue->ui32QueueSize - 1;
++ cmds++;
++ }
++ if (cmds == 0)
++ off = printAppend(buffer, size, off, "%p <empty>\n", psQueue);
++ return off;
++}
++
++
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++
++void ProcSeqShowQueue(struct seq_file *sfile,void* el)
++{
++ PVRSRV_QUEUE_INFO * psQueue = (PVRSRV_QUEUE_INFO*)el;
++ IMG_INT cmds = 0;
++ IMG_SIZE_T ui32ReadOffset;
++ IMG_SIZE_T ui32WriteOffset;
++ PVRSRV_COMMAND * psCmd;
++
++ if(el == PVR_PROC_SEQ_START_TOKEN)
++ {
++ seq_printf( sfile,
++ "Command Queues\n"
++ "Queue CmdPtr Pid Command Size DevInd DSC SSC #Data ...\n");
++ return;
++ }
++
++ ui32ReadOffset = psQueue->ui32ReadOffset;
++ ui32WriteOffset = psQueue->ui32WriteOffset;
++
++ while (ui32ReadOffset != ui32WriteOffset)
++ {
++ psCmd= (PVRSRV_COMMAND *)((IMG_UINTPTR_T)psQueue->pvLinQueueKM + ui32ReadOffset);
++
++ seq_printf(sfile, "%p %p %5lu %6lu %3lu %5lu %2lu %2lu %3lu \n",
++ psQueue,
++ psCmd,
++ psCmd->ui32ProcessID,
++ psCmd->CommandType,
++ psCmd->ui32CmdSize,
++ psCmd->ui32DevIndex,
++ psCmd->ui32DstSyncCount,
++ psCmd->ui32SrcSyncCount,
++ psCmd->ui32DataSize);
++
++ ui32ReadOffset += psCmd->ui32CmdSize;
++ ui32ReadOffset &= psQueue->ui32QueueSize - 1;
++ cmds++;
++ }
++
++ if (cmds == 0)
++ seq_printf(sfile, "%p <empty>\n", psQueue);
++}
++
++void* ProcSeqOff2ElementQueue(struct seq_file * sfile, loff_t off)
++{
++ PVRSRV_QUEUE_INFO * psQueue;
++ SYS_DATA * psSysData;
++
++ if(!off)
++ {
++ return PVR_PROC_SEQ_START_TOKEN;
++ }
++
++
++ SysAcquireData(&psSysData);
++
++ for (psQueue = psSysData->psQueueList; (((--off) > 0) && (psQueue != IMG_NULL)); psQueue = psQueue->psNextKM);
++ return psQueue;
++}
++
++#endif
++
++off_t
++QueuePrintQueues (IMG_CHAR * buffer, size_t size, off_t off)
++{
++ SYS_DATA * psSysData;
++ PVRSRV_QUEUE_INFO * psQueue;
++
++ SysAcquireData(&psSysData);
++
++ if (!off)
++ return printAppend (buffer, size, 0,
++ "Command Queues\n"
++ "Queue CmdPtr Pid Command Size DevInd DSC SSC #Data ...\n");
++
++
++
++ for (psQueue = psSysData->psQueueList; (((--off) > 0) && (psQueue != IMG_NULL)); psQueue = psQueue->psNextKM)
++ ;
++
++ return psQueue ? QueuePrintCommands (psQueue, buffer, size) : END_OF_FILE;
++}
++#endif
++
++#define GET_SPACE_IN_CMDQ(psQueue) \
++ (((psQueue->ui32ReadOffset - psQueue->ui32WriteOffset) \
++ + (psQueue->ui32QueueSize - 1)) & (psQueue->ui32QueueSize - 1))
++
++#define UPDATE_QUEUE_WOFF(psQueue, ui32Size) \
++ psQueue->ui32WriteOffset = (psQueue->ui32WriteOffset + ui32Size) \
++ & (psQueue->ui32QueueSize - 1);
++
++#define SYNCOPS_STALE(ui32OpsComplete, ui32OpsPending) \
++ (ui32OpsComplete >= ui32OpsPending)
++
++
++DECLARE_LIST_FOR_EACH(PVRSRV_DEVICE_NODE);
++
++static IMG_VOID QueueDumpCmdComplete(COMMAND_COMPLETE_DATA *psCmdCompleteData,
++ IMG_UINT32 i,
++ IMG_BOOL bIsSrc)
++{
++ PVRSRV_SYNC_OBJECT *psSyncObject;
++
++ psSyncObject = bIsSrc ? psCmdCompleteData->psSrcSync : psCmdCompleteData->psDstSync;
++
++ if (psCmdCompleteData->bInUse)
++ {
++ PVR_LOG(("\t%s %lu: ROC DevVAddr:0x%lX ROP:0x%lx ROC:0x%lx, WOC DevVAddr:0x%lX WOP:0x%lx WOC:0x%lx",
++ bIsSrc ? "SRC" : "DEST", i,
++ psSyncObject[i].psKernelSyncInfoKM->sReadOpsCompleteDevVAddr.uiAddr,
++ psSyncObject[i].psKernelSyncInfoKM->psSyncData->ui32ReadOpsPending,
++ psSyncObject[i].psKernelSyncInfoKM->psSyncData->ui32ReadOpsComplete,
++ psSyncObject[i].psKernelSyncInfoKM->sWriteOpsCompleteDevVAddr.uiAddr,
++ psSyncObject[i].psKernelSyncInfoKM->psSyncData->ui32WriteOpsPending,
++ psSyncObject[i].psKernelSyncInfoKM->psSyncData->ui32WriteOpsComplete));
++ }
++ else
++ {
++ PVR_LOG(("\t%s %lu: (Not in use)", bIsSrc ? "SRC" : "DEST", i));
++ }
++}
++
++
++static IMG_VOID QueueDumpDebugInfo_ForEachCb(PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++ if (psDeviceNode->sDevId.eDeviceClass == PVRSRV_DEVICE_CLASS_DISPLAY)
++ {
++ IMG_UINT32 i;
++ SYS_DATA *psSysData;
++ COMMAND_COMPLETE_DATA **ppsCmdCompleteData;
++ COMMAND_COMPLETE_DATA *psCmdCompleteData;
++
++ SysAcquireData(&psSysData);
++
++ ppsCmdCompleteData = psSysData->ppsCmdCompleteData[psDeviceNode->sDevId.ui32DeviceIndex];
++
++ if (ppsCmdCompleteData != IMG_NULL)
++ {
++ psCmdCompleteData = ppsCmdCompleteData[DC_FLIP_COMMAND];
++
++ PVR_LOG(("Command Complete Data for display device %lu:", psDeviceNode->sDevId.ui32DeviceIndex));
++
++ for (i = 0; i < psCmdCompleteData->ui32SrcSyncCount; i++)
++ {
++ QueueDumpCmdComplete(psCmdCompleteData, i, IMG_TRUE);
++ }
++
++ for (i = 0; i < psCmdCompleteData->ui32DstSyncCount; i++)
++ {
++ QueueDumpCmdComplete(psCmdCompleteData, i, IMG_FALSE);
++ }
++ }
++ else
++ {
++ PVR_LOG(("There is no Command Complete Data for display device %u", psDeviceNode->sDevId.ui32DeviceIndex));
++ }
++ }
++}
++
++
++IMG_VOID QueueDumpDebugInfo(IMG_VOID)
++{
++ SYS_DATA *psSysData;
++ SysAcquireData(&psSysData);
++ List_PVRSRV_DEVICE_NODE_ForEach(psSysData->psDeviceNodeList, QueueDumpDebugInfo_ForEachCb);
++}
++
++
++IMG_SIZE_T NearestPower2(IMG_SIZE_T ui32Value)
++{
++ IMG_SIZE_T ui32Temp, ui32Result = 1;
++
++ if(!ui32Value)
++ return 0;
++
++ ui32Temp = ui32Value - 1;
++ while(ui32Temp)
++ {
++ ui32Result <<= 1;
++ ui32Temp >>= 1;
++ }
++
++ return ui32Result;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVCreateCommandQueueKM(IMG_SIZE_T ui32QueueSize,
++ PVRSRV_QUEUE_INFO **ppsQueueInfo)
++{
++ PVRSRV_QUEUE_INFO *psQueueInfo;
++ IMG_SIZE_T ui32Power2QueueSize = NearestPower2(ui32QueueSize);
++ SYS_DATA *psSysData;
++ PVRSRV_ERROR eError;
++ IMG_HANDLE hMemBlock;
++
++ SysAcquireData(&psSysData);
++
++
++ if(OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(PVRSRV_QUEUE_INFO),
++ (IMG_VOID **)&psQueueInfo, &hMemBlock,
++ "Queue Info") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateCommandQueueKM: Failed to alloc queue struct"));
++ goto ErrorExit;
++ }
++ OSMemSet(psQueueInfo, 0, sizeof(PVRSRV_QUEUE_INFO));
++
++ psQueueInfo->hMemBlock[0] = hMemBlock;
++ psQueueInfo->ui32ProcessID = OSGetCurrentProcessIDKM();
++
++
++ if(OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ ui32Power2QueueSize + PVRSRV_MAX_CMD_SIZE,
++ &psQueueInfo->pvLinQueueKM, &hMemBlock,
++ "Command Queue") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateCommandQueueKM: Failed to alloc queue buffer"));
++ goto ErrorExit;
++ }
++
++ psQueueInfo->hMemBlock[1] = hMemBlock;
++ psQueueInfo->pvLinQueueUM = psQueueInfo->pvLinQueueKM;
++
++
++ PVR_ASSERT(psQueueInfo->ui32ReadOffset == 0);
++ PVR_ASSERT(psQueueInfo->ui32WriteOffset == 0);
++
++ psQueueInfo->ui32QueueSize = ui32Power2QueueSize;
++
++
++ if (psSysData->psQueueList == IMG_NULL)
++ {
++ eError = OSCreateResource(&psSysData->sQProcessResource);
++ if (eError != PVRSRV_OK)
++ {
++ goto ErrorExit;
++ }
++ }
++
++
++ if (OSLockResource(&psSysData->sQProcessResource,
++ KERNEL_ID) != PVRSRV_OK)
++ {
++ goto ErrorExit;
++ }
++
++ psQueueInfo->psNextKM = psSysData->psQueueList;
++ psSysData->psQueueList = psQueueInfo;
++
++ if (OSUnlockResource(&psSysData->sQProcessResource, KERNEL_ID) != PVRSRV_OK)
++ {
++ goto ErrorExit;
++ }
++
++ *ppsQueueInfo = psQueueInfo;
++
++ return PVRSRV_OK;
++
++ErrorExit:
++
++ if(psQueueInfo)
++ {
++ if(psQueueInfo->pvLinQueueKM)
++ {
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ psQueueInfo->ui32QueueSize,
++ psQueueInfo->pvLinQueueKM,
++ psQueueInfo->hMemBlock[1]);
++ psQueueInfo->pvLinQueueKM = IMG_NULL;
++ }
++
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(PVRSRV_QUEUE_INFO),
++ psQueueInfo,
++ psQueueInfo->hMemBlock[0]);
++
++ }
++
++ return PVRSRV_ERROR_GENERIC;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVDestroyCommandQueueKM(PVRSRV_QUEUE_INFO *psQueueInfo)
++{
++ PVRSRV_QUEUE_INFO *psQueue;
++ SYS_DATA *psSysData;
++ PVRSRV_ERROR eError;
++ IMG_BOOL bTimeout = IMG_TRUE;
++
++ SysAcquireData(&psSysData);
++
++ psQueue = psSysData->psQueueList;
++
++ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
++ {
++ if(psQueueInfo->ui32ReadOffset == psQueueInfo->ui32WriteOffset)
++ {
++ bTimeout = IMG_FALSE;
++ break;
++ }
++ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
++ } END_LOOP_UNTIL_TIMEOUT();
++
++ if (bTimeout)
++ {
++
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVDestroyCommandQueueKM : Failed to empty queue"));
++ eError = PVRSRV_ERROR_CANNOT_FLUSH_QUEUE;
++ goto ErrorExit;
++ }
++
++
++ eError = OSLockResource(&psSysData->sQProcessResource,
++ KERNEL_ID);
++ if (eError != PVRSRV_OK)
++ {
++ goto ErrorExit;
++ }
++
++ if(psQueue == psQueueInfo)
++ {
++ psSysData->psQueueList = psQueueInfo->psNextKM;
++
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ NearestPower2(psQueueInfo->ui32QueueSize) + PVRSRV_MAX_CMD_SIZE,
++ psQueueInfo->pvLinQueueKM,
++ psQueueInfo->hMemBlock[1]);
++ psQueueInfo->pvLinQueueKM = IMG_NULL;
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(PVRSRV_QUEUE_INFO),
++ psQueueInfo,
++ psQueueInfo->hMemBlock[0]);
++ psQueueInfo = IMG_NULL;
++ }
++ else
++ {
++ while(psQueue)
++ {
++ if(psQueue->psNextKM == psQueueInfo)
++ {
++ psQueue->psNextKM = psQueueInfo->psNextKM;
++
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ psQueueInfo->ui32QueueSize,
++ psQueueInfo->pvLinQueueKM,
++ psQueueInfo->hMemBlock[1]);
++ psQueueInfo->pvLinQueueKM = IMG_NULL;
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(PVRSRV_QUEUE_INFO),
++ psQueueInfo,
++ psQueueInfo->hMemBlock[0]);
++ psQueueInfo = IMG_NULL;
++ break;
++ }
++ psQueue = psQueue->psNextKM;
++ }
++
++ if(!psQueue)
++ {
++ eError = OSUnlockResource(&psSysData->sQProcessResource, KERNEL_ID);
++ if (eError != PVRSRV_OK)
++ {
++ goto ErrorExit;
++ }
++ eError = PVRSRV_ERROR_INVALID_PARAMS;
++ goto ErrorExit;
++ }
++ }
++
++
++ eError = OSUnlockResource(&psSysData->sQProcessResource, KERNEL_ID);
++ if (eError != PVRSRV_OK)
++ {
++ goto ErrorExit;
++ }
++
++
++ if (psSysData->psQueueList == IMG_NULL)
++ {
++ eError = OSDestroyResource(&psSysData->sQProcessResource);
++ if (eError != PVRSRV_OK)
++ {
++ goto ErrorExit;
++ }
++ }
++
++ErrorExit:
++
++ return eError;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVGetQueueSpaceKM(PVRSRV_QUEUE_INFO *psQueue,
++ IMG_SIZE_T ui32ParamSize,
++ IMG_VOID **ppvSpace)
++{
++ IMG_BOOL bTimeout = IMG_TRUE;
++
++
++ ui32ParamSize = (ui32ParamSize+3) & 0xFFFFFFFC;
++
++ if (ui32ParamSize > PVRSRV_MAX_CMD_SIZE)
++ {
++ PVR_DPF((PVR_DBG_WARNING,"PVRSRVGetQueueSpace: max command size is %d bytes", PVRSRV_MAX_CMD_SIZE));
++ return PVRSRV_ERROR_CMD_TOO_BIG;
++ }
++
++ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
++ {
++ if (GET_SPACE_IN_CMDQ(psQueue) > ui32ParamSize)
++ {
++ bTimeout = IMG_FALSE;
++ break;
++ }
++ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
++ } END_LOOP_UNTIL_TIMEOUT();
++
++ if (bTimeout == IMG_TRUE)
++ {
++ *ppvSpace = IMG_NULL;
++
++ return PVRSRV_ERROR_CANNOT_GET_QUEUE_SPACE;
++ }
++ else
++ {
++ *ppvSpace = (IMG_VOID *)((IMG_UINTPTR_T)psQueue->pvLinQueueUM + psQueue->ui32WriteOffset);
++ }
++
++ return PVRSRV_OK;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVInsertCommandKM(PVRSRV_QUEUE_INFO *psQueue,
++ PVRSRV_COMMAND **ppsCommand,
++ IMG_UINT32 ui32DevIndex,
++ IMG_UINT16 CommandType,
++ IMG_UINT32 ui32DstSyncCount,
++ PVRSRV_KERNEL_SYNC_INFO *apsDstSync[],
++ IMG_UINT32 ui32SrcSyncCount,
++ PVRSRV_KERNEL_SYNC_INFO *apsSrcSync[],
++ IMG_SIZE_T ui32DataByteSize )
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_COMMAND *psCommand;
++ IMG_SIZE_T ui32CommandSize;
++ IMG_UINT32 i;
++
++
++ ui32DataByteSize = (ui32DataByteSize + 3UL) & ~3UL;
++
++
++ ui32CommandSize = sizeof(PVRSRV_COMMAND)
++ + ((ui32DstSyncCount + ui32SrcSyncCount) * sizeof(PVRSRV_SYNC_OBJECT))
++ + ui32DataByteSize;
++
++
++ eError = PVRSRVGetQueueSpaceKM (psQueue, ui32CommandSize, (IMG_VOID**)&psCommand);
++ if(eError != PVRSRV_OK)
++ {
++ return eError;
++ }
++
++ psCommand->ui32ProcessID = OSGetCurrentProcessIDKM();
++
++
++ psCommand->ui32CmdSize = ui32CommandSize;
++ psCommand->ui32DevIndex = ui32DevIndex;
++ psCommand->CommandType = CommandType;
++ psCommand->ui32DstSyncCount = ui32DstSyncCount;
++ psCommand->ui32SrcSyncCount = ui32SrcSyncCount;
++
++
++ psCommand->psDstSync = (PVRSRV_SYNC_OBJECT*)(((IMG_UINTPTR_T)psCommand) + sizeof(PVRSRV_COMMAND));
++
++
++ psCommand->psSrcSync = (PVRSRV_SYNC_OBJECT*)(((IMG_UINTPTR_T)psCommand->psDstSync)
++ + (ui32DstSyncCount * sizeof(PVRSRV_SYNC_OBJECT)));
++
++ psCommand->pvData = (PVRSRV_SYNC_OBJECT*)(((IMG_UINTPTR_T)psCommand->psSrcSync)
++ + (ui32SrcSyncCount * sizeof(PVRSRV_SYNC_OBJECT)));
++ psCommand->ui32DataSize = ui32DataByteSize;
++
++
++ for (i=0; i<ui32DstSyncCount; i++)
++ {
++ psCommand->psDstSync[i].psKernelSyncInfoKM = apsDstSync[i];
++ psCommand->psDstSync[i].ui32WriteOpsPending = PVRSRVGetWriteOpsPending(apsDstSync[i], IMG_FALSE);
++ psCommand->psDstSync[i].ui32ReadOpsPending = PVRSRVGetReadOpsPending(apsDstSync[i], IMG_FALSE);
++
++ PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVInsertCommandKM: Dst %lu RO-VA:0x%lx WO-VA:0x%lx ROP:0x%lx WOP:0x%lx",
++ i, psCommand->psDstSync[i].psKernelSyncInfoKM->sReadOpsCompleteDevVAddr.uiAddr,
++ psCommand->psDstSync[i].psKernelSyncInfoKM->sWriteOpsCompleteDevVAddr.uiAddr,
++ psCommand->psDstSync[i].ui32ReadOpsPending,
++ psCommand->psDstSync[i].ui32WriteOpsPending));
++ }
++
++
++ for (i=0; i<ui32SrcSyncCount; i++)
++ {
++ psCommand->psSrcSync[i].psKernelSyncInfoKM = apsSrcSync[i];
++ psCommand->psSrcSync[i].ui32WriteOpsPending = PVRSRVGetWriteOpsPending(apsSrcSync[i], IMG_TRUE);
++ psCommand->psSrcSync[i].ui32ReadOpsPending = PVRSRVGetReadOpsPending(apsSrcSync[i], IMG_TRUE);
++
++ PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVInsertCommandKM: Src %lu RO-VA:0x%lx WO-VA:0x%lx ROP:0x%lx WOP:0x%lx",
++ i, psCommand->psSrcSync[i].psKernelSyncInfoKM->sReadOpsCompleteDevVAddr.uiAddr,
++ psCommand->psSrcSync[i].psKernelSyncInfoKM->sWriteOpsCompleteDevVAddr.uiAddr,
++ psCommand->psSrcSync[i].ui32ReadOpsPending,
++ psCommand->psSrcSync[i].ui32WriteOpsPending));
++ }
++
++
++ *ppsCommand = psCommand;
++
++ return PVRSRV_OK;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVSubmitCommandKM(PVRSRV_QUEUE_INFO *psQueue,
++ PVRSRV_COMMAND *psCommand)
++{
++
++
++
++ if (psCommand->ui32DstSyncCount > 0)
++ {
++ psCommand->psDstSync = (PVRSRV_SYNC_OBJECT*)(((IMG_UINTPTR_T)psQueue->pvLinQueueKM)
++ + psQueue->ui32WriteOffset + sizeof(PVRSRV_COMMAND));
++ }
++
++ if (psCommand->ui32SrcSyncCount > 0)
++ {
++ psCommand->psSrcSync = (PVRSRV_SYNC_OBJECT*)(((IMG_UINTPTR_T)psQueue->pvLinQueueKM)
++ + psQueue->ui32WriteOffset + sizeof(PVRSRV_COMMAND)
++ + (psCommand->ui32DstSyncCount * sizeof(PVRSRV_SYNC_OBJECT)));
++ }
++
++ psCommand->pvData = (PVRSRV_SYNC_OBJECT*)(((IMG_UINTPTR_T)psQueue->pvLinQueueKM)
++ + psQueue->ui32WriteOffset + sizeof(PVRSRV_COMMAND)
++ + (psCommand->ui32DstSyncCount * sizeof(PVRSRV_SYNC_OBJECT))
++ + (psCommand->ui32SrcSyncCount * sizeof(PVRSRV_SYNC_OBJECT)));
++
++
++ UPDATE_QUEUE_WOFF(psQueue, psCommand->ui32CmdSize);
++
++ return PVRSRV_OK;
++}
++
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVProcessCommand(SYS_DATA *psSysData,
++ PVRSRV_COMMAND *psCommand,
++ IMG_BOOL bFlush)
++{
++ PVRSRV_SYNC_OBJECT *psWalkerObj;
++ PVRSRV_SYNC_OBJECT *psEndObj;
++ IMG_UINT32 i;
++ COMMAND_COMPLETE_DATA *psCmdCompleteData;
++ PVRSRV_ERROR eError = PVRSRV_OK;
++ IMG_UINT32 ui32WriteOpsComplete;
++ IMG_UINT32 ui32ReadOpsComplete;
++
++
++ psWalkerObj = psCommand->psDstSync;
++ psEndObj = psWalkerObj + psCommand->ui32DstSyncCount;
++ while (psWalkerObj < psEndObj)
++ {
++ PVRSRV_SYNC_DATA *psSyncData = psWalkerObj->psKernelSyncInfoKM->psSyncData;
++
++ ui32WriteOpsComplete = psSyncData->ui32WriteOpsComplete;
++ ui32ReadOpsComplete = psSyncData->ui32ReadOpsComplete;
++
++ if ((ui32WriteOpsComplete != psWalkerObj->ui32WriteOpsPending)
++ || (ui32ReadOpsComplete != psWalkerObj->ui32ReadOpsPending))
++ {
++ if (!bFlush ||
++ !SYNCOPS_STALE(ui32WriteOpsComplete, psWalkerObj->ui32WriteOpsPending) ||
++ !SYNCOPS_STALE(ui32ReadOpsComplete, psWalkerObj->ui32ReadOpsPending))
++ {
++ return PVRSRV_ERROR_FAILED_DEPENDENCIES;
++ }
++ }
++
++ psWalkerObj++;
++ }
++
++
++ psWalkerObj = psCommand->psSrcSync;
++ psEndObj = psWalkerObj + psCommand->ui32SrcSyncCount;
++ while (psWalkerObj < psEndObj)
++ {
++ PVRSRV_SYNC_DATA *psSyncData = psWalkerObj->psKernelSyncInfoKM->psSyncData;
++
++ ui32ReadOpsComplete = psSyncData->ui32ReadOpsComplete;
++ ui32WriteOpsComplete = psSyncData->ui32WriteOpsComplete;
++
++ if ((ui32WriteOpsComplete != psWalkerObj->ui32WriteOpsPending)
++ || (ui32ReadOpsComplete != psWalkerObj->ui32ReadOpsPending))
++ {
++ if (!bFlush &&
++ SYNCOPS_STALE(ui32WriteOpsComplete, psWalkerObj->ui32WriteOpsPending) &&
++ SYNCOPS_STALE(ui32ReadOpsComplete, psWalkerObj->ui32ReadOpsPending))
++ {
++ PVR_DPF((PVR_DBG_WARNING,
++ "PVRSRVProcessCommand: Stale syncops psSyncData:0x%x ui32WriteOpsComplete:0x%x ui32WriteOpsPending:0x%x",
++ psSyncData, ui32WriteOpsComplete, psWalkerObj->ui32WriteOpsPending));
++ }
++
++ if (!bFlush ||
++ !SYNCOPS_STALE(ui32WriteOpsComplete, psWalkerObj->ui32WriteOpsPending) ||
++ !SYNCOPS_STALE(ui32ReadOpsComplete, psWalkerObj->ui32ReadOpsPending))
++ {
++ return PVRSRV_ERROR_FAILED_DEPENDENCIES;
++ }
++ }
++ psWalkerObj++;
++ }
++
++
++ if (psCommand->ui32DevIndex >= SYS_DEVICE_COUNT)
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVProcessCommand: invalid DeviceType 0x%x",
++ psCommand->ui32DevIndex));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++
++ psCmdCompleteData = psSysData->ppsCmdCompleteData[psCommand->ui32DevIndex][psCommand->CommandType];
++ if (psCmdCompleteData->bInUse)
++ {
++
++ return PVRSRV_ERROR_FAILED_DEPENDENCIES;
++ }
++
++
++ psCmdCompleteData->bInUse = IMG_TRUE;
++
++
++ psCmdCompleteData->ui32DstSyncCount = psCommand->ui32DstSyncCount;
++ for (i=0; i<psCommand->ui32DstSyncCount; i++)
++ {
++ psCmdCompleteData->psDstSync[i] = psCommand->psDstSync[i];
++
++ PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVProcessCommand: Dst %lu RO-VA:0x%lx WO-VA:0x%lx ROP:0x%lx WOP:0x%lx",
++ i, psCmdCompleteData->psDstSync[i].psKernelSyncInfoKM->sReadOpsCompleteDevVAddr.uiAddr,
++ psCmdCompleteData->psDstSync[i].psKernelSyncInfoKM->sWriteOpsCompleteDevVAddr.uiAddr,
++ psCmdCompleteData->psDstSync[i].ui32ReadOpsPending,
++ psCmdCompleteData->psDstSync[i].ui32WriteOpsPending));
++ }
++
++
++ psCmdCompleteData->ui32SrcSyncCount = psCommand->ui32SrcSyncCount;
++ for (i=0; i<psCommand->ui32SrcSyncCount; i++)
++ {
++ psCmdCompleteData->psSrcSync[i] = psCommand->psSrcSync[i];
++
++ PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVProcessCommand: Src %lu RO-VA:0x%lx WO-VA:0x%lx ROP:0x%lx WOP:0x%lx",
++ i, psCmdCompleteData->psSrcSync[i].psKernelSyncInfoKM->sReadOpsCompleteDevVAddr.uiAddr,
++ psCmdCompleteData->psSrcSync[i].psKernelSyncInfoKM->sWriteOpsCompleteDevVAddr.uiAddr,
++ psCmdCompleteData->psSrcSync[i].ui32ReadOpsPending,
++ psCmdCompleteData->psSrcSync[i].ui32WriteOpsPending));
++ }
++
++
++
++
++
++
++
++
++
++
++
++ if (psSysData->ppfnCmdProcList[psCommand->ui32DevIndex][psCommand->CommandType]((IMG_HANDLE)psCmdCompleteData,
++ psCommand->ui32DataSize,
++ psCommand->pvData) == IMG_FALSE)
++ {
++
++
++
++ psCmdCompleteData->bInUse = IMG_FALSE;
++ eError = PVRSRV_ERROR_CMD_NOT_PROCESSED;
++ }
++
++ return eError;
++}
++
++
++IMG_VOID PVRSRVProcessQueues_ForEachCb(PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++ if (psDeviceNode->bReProcessDeviceCommandComplete &&
++ psDeviceNode->pfnDeviceCommandComplete != IMG_NULL)
++ {
++ (*psDeviceNode->pfnDeviceCommandComplete)(psDeviceNode);
++ }
++}
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVProcessQueues(IMG_UINT32 ui32CallerID,
++ IMG_BOOL bFlush)
++{
++ PVRSRV_QUEUE_INFO *psQueue;
++ SYS_DATA *psSysData;
++ PVRSRV_COMMAND *psCommand;
++ PVRSRV_ERROR eError;
++
++ SysAcquireData(&psSysData);
++
++
++ psSysData->bReProcessQueues = IMG_FALSE;
++
++
++ eError = OSLockResource(&psSysData->sQProcessResource,
++ ui32CallerID);
++ if(eError != PVRSRV_OK)
++ {
++
++ psSysData->bReProcessQueues = IMG_TRUE;
++
++
++ if(ui32CallerID == ISR_ID)
++ {
++ if (bFlush)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVProcessQueues: Couldn't acquire queue processing lock for FLUSH"));
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_MESSAGE,"PVRSRVProcessQueues: Couldn't acquire queue processing lock"));
++ }
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_MESSAGE,"PVRSRVProcessQueues: Queue processing lock-acquire failed when called from the Services driver."));
++ PVR_DPF((PVR_DBG_MESSAGE," This is due to MISR queue processing being interrupted by the Services driver."));
++ }
++
++ return PVRSRV_OK;
++ }
++
++ psQueue = psSysData->psQueueList;
++
++ if(!psQueue)
++ {
++ PVR_DPF((PVR_DBG_MESSAGE,"No Queues installed - cannot process commands"));
++ }
++
++ if (bFlush)
++ {
++ PVRSRVSetDCState(DC_STATE_FLUSH_COMMANDS);
++ }
++
++ while (psQueue)
++ {
++ while (psQueue->ui32ReadOffset != psQueue->ui32WriteOffset)
++ {
++ psCommand = (PVRSRV_COMMAND*)((IMG_UINTPTR_T)psQueue->pvLinQueueKM + psQueue->ui32ReadOffset);
++
++ if (PVRSRVProcessCommand(psSysData, psCommand, bFlush) == PVRSRV_OK)
++ {
++
++ UPDATE_QUEUE_ROFF(psQueue, psCommand->ui32CmdSize)
++
++ if (bFlush)
++ {
++ continue;
++ }
++ }
++
++ break;
++ }
++ psQueue = psQueue->psNextKM;
++ }
++
++ if (bFlush)
++ {
++ PVRSRVSetDCState(DC_STATE_NO_FLUSH_COMMANDS);
++ }
++
++
++ List_PVRSRV_DEVICE_NODE_ForEach(psSysData->psDeviceNodeList,
++ PVRSRVProcessQueues_ForEachCb);
++
++
++
++ OSUnlockResource(&psSysData->sQProcessResource, ui32CallerID);
++
++
++ if(psSysData->bReProcessQueues)
++ {
++ return PVRSRV_ERROR_PROCESSING_BLOCKED;
++ }
++
++ return PVRSRV_OK;
++}
++
++
++IMG_EXPORT
++IMG_VOID PVRSRVCommandCompleteKM(IMG_HANDLE hCmdCookie,
++ IMG_BOOL bScheduleMISR)
++{
++ IMG_UINT32 i;
++ COMMAND_COMPLETE_DATA *psCmdCompleteData = (COMMAND_COMPLETE_DATA *)hCmdCookie;
++ SYS_DATA *psSysData;
++
++ SysAcquireData(&psSysData);
++
++
++ for (i=0; i<psCmdCompleteData->ui32DstSyncCount; i++)
++ {
++ psCmdCompleteData->psDstSync[i].psKernelSyncInfoKM->psSyncData->ui32WriteOpsComplete++;
++
++ PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVCommandCompleteKM: Dst %lu RO-VA:0x%lx WO-VA:0x%lx ROP:0x%lx WOP:0x%lx",
++ i, psCmdCompleteData->psDstSync[i].psKernelSyncInfoKM->sReadOpsCompleteDevVAddr.uiAddr,
++ psCmdCompleteData->psDstSync[i].psKernelSyncInfoKM->sWriteOpsCompleteDevVAddr.uiAddr,
++ psCmdCompleteData->psDstSync[i].ui32ReadOpsPending,
++ psCmdCompleteData->psDstSync[i].ui32WriteOpsPending));
++ }
++
++
++ for (i=0; i<psCmdCompleteData->ui32SrcSyncCount; i++)
++ {
++ psCmdCompleteData->psSrcSync[i].psKernelSyncInfoKM->psSyncData->ui32ReadOpsComplete++;
++
++ PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVCommandCompleteKM: Src %lu RO-VA:0x%lx WO-VA:0x%lx ROP:0x%lx WOP:0x%lx",
++ i, psCmdCompleteData->psSrcSync[i].psKernelSyncInfoKM->sReadOpsCompleteDevVAddr.uiAddr,
++ psCmdCompleteData->psSrcSync[i].psKernelSyncInfoKM->sWriteOpsCompleteDevVAddr.uiAddr,
++ psCmdCompleteData->psSrcSync[i].ui32ReadOpsPending,
++ psCmdCompleteData->psSrcSync[i].ui32WriteOpsPending));
++ }
++
++
++ psCmdCompleteData->bInUse = IMG_FALSE;
++
++
++ PVRSRVCommandCompleteCallbacks();
++
++#if defined(SYS_USING_INTERRUPTS)
++ if(bScheduleMISR)
++ {
++ OSScheduleMISR(psSysData);
++ }
++#else
++ PVR_UNREFERENCED_PARAMETER(bScheduleMISR);
++#endif
++}
++
++
++IMG_VOID PVRSRVCommandCompleteCallbacks_ForEachCb(PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++ if(psDeviceNode->pfnDeviceCommandComplete != IMG_NULL)
++ {
++
++ (*psDeviceNode->pfnDeviceCommandComplete)(psDeviceNode);
++ }
++}
++
++IMG_VOID PVRSRVCommandCompleteCallbacks(IMG_VOID)
++{
++ SYS_DATA *psSysData;
++ SysAcquireData(&psSysData);
++
++
++ List_PVRSRV_DEVICE_NODE_ForEach(psSysData->psDeviceNodeList,
++ PVRSRVCommandCompleteCallbacks_ForEachCb);
++}
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVRegisterCmdProcListKM(IMG_UINT32 ui32DevIndex,
++ PFN_CMD_PROC *ppfnCmdProcList,
++ IMG_UINT32 ui32MaxSyncsPerCmd[][2],
++ IMG_UINT32 ui32CmdCount)
++{
++ SYS_DATA *psSysData;
++ PVRSRV_ERROR eError;
++ IMG_UINT32 i;
++ IMG_SIZE_T ui32AllocSize;
++ PFN_CMD_PROC *ppfnCmdProc;
++ COMMAND_COMPLETE_DATA *psCmdCompleteData;
++
++
++ if(ui32DevIndex >= SYS_DEVICE_COUNT)
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVRegisterCmdProcListKM: invalid DeviceType 0x%x",
++ ui32DevIndex));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++
++ SysAcquireData(&psSysData);
++
++
++ eError = OSAllocMem( PVRSRV_OS_PAGEABLE_HEAP,
++ ui32CmdCount * sizeof(PFN_CMD_PROC),
++ (IMG_VOID **)&psSysData->ppfnCmdProcList[ui32DevIndex], IMG_NULL,
++ "Internal Queue Info structure");
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterCmdProcListKM: Failed to alloc queue"));
++ return eError;
++ }
++
++
++ ppfnCmdProc = psSysData->ppfnCmdProcList[ui32DevIndex];
++
++
++ for (i=0; i<ui32CmdCount; i++)
++ {
++ ppfnCmdProc[i] = ppfnCmdProcList[i];
++ }
++
++
++ ui32AllocSize = ui32CmdCount * sizeof(COMMAND_COMPLETE_DATA*);
++ eError = OSAllocMem( PVRSRV_OS_NON_PAGEABLE_HEAP,
++ ui32AllocSize,
++ (IMG_VOID **)&psSysData->ppsCmdCompleteData[ui32DevIndex], IMG_NULL,
++ "Array of Pointers for Command Store");
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterCmdProcListKM: Failed to alloc CC data"));
++ goto ErrorExit;
++ }
++
++ for (i=0; i<ui32CmdCount; i++)
++ {
++
++
++ ui32AllocSize = sizeof(COMMAND_COMPLETE_DATA)
++ + ((ui32MaxSyncsPerCmd[i][0]
++ + ui32MaxSyncsPerCmd[i][1])
++ * sizeof(PVRSRV_SYNC_OBJECT));
++
++ eError = OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ ui32AllocSize,
++ (IMG_VOID **)&psSysData->ppsCmdCompleteData[ui32DevIndex][i],
++ IMG_NULL,
++ "Command Complete Data");
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterCmdProcListKM: Failed to alloc cmd %d",i));
++ goto ErrorExit;
++ }
++
++
++ OSMemSet(psSysData->ppsCmdCompleteData[ui32DevIndex][i], 0x00, ui32AllocSize);
++
++ psCmdCompleteData = psSysData->ppsCmdCompleteData[ui32DevIndex][i];
++
++
++ psCmdCompleteData->psDstSync = (PVRSRV_SYNC_OBJECT*)
++ (((IMG_UINTPTR_T)psCmdCompleteData)
++ + sizeof(COMMAND_COMPLETE_DATA));
++ psCmdCompleteData->psSrcSync = (PVRSRV_SYNC_OBJECT*)
++ (((IMG_UINTPTR_T)psCmdCompleteData->psDstSync)
++ + (sizeof(PVRSRV_SYNC_OBJECT) * ui32MaxSyncsPerCmd[i][0]));
++
++ psCmdCompleteData->ui32AllocSize = ui32AllocSize;
++ }
++
++ return PVRSRV_OK;
++
++ErrorExit:
++
++
++
++ if(psSysData->ppsCmdCompleteData[ui32DevIndex] != IMG_NULL)
++ {
++ for (i=0; i<ui32CmdCount; i++)
++ {
++ if (psSysData->ppsCmdCompleteData[ui32DevIndex][i] != IMG_NULL)
++ {
++ ui32AllocSize = sizeof(COMMAND_COMPLETE_DATA)
++ + ((ui32MaxSyncsPerCmd[i][0]
++ + ui32MaxSyncsPerCmd[i][1])
++ * sizeof(PVRSRV_SYNC_OBJECT));
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, ui32AllocSize, psSysData->ppsCmdCompleteData[ui32DevIndex][i], IMG_NULL);
++ psSysData->ppsCmdCompleteData[ui32DevIndex][i] = IMG_NULL;
++ }
++ }
++ ui32AllocSize = ui32CmdCount * sizeof(COMMAND_COMPLETE_DATA*);
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, ui32AllocSize, psSysData->ppsCmdCompleteData[ui32DevIndex], IMG_NULL);
++ psSysData->ppsCmdCompleteData[ui32DevIndex] = IMG_NULL;
++ }
++
++ if(psSysData->ppfnCmdProcList[ui32DevIndex] != IMG_NULL)
++ {
++ ui32AllocSize = ui32CmdCount * sizeof(PFN_CMD_PROC);
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, ui32AllocSize, psSysData->ppfnCmdProcList[ui32DevIndex], IMG_NULL);
++ psSysData->ppfnCmdProcList[ui32DevIndex] = IMG_NULL;
++ }
++
++ return eError;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVRemoveCmdProcListKM(IMG_UINT32 ui32DevIndex,
++ IMG_UINT32 ui32CmdCount)
++{
++ SYS_DATA *psSysData;
++ IMG_UINT32 i;
++
++
++ if(ui32DevIndex >= SYS_DEVICE_COUNT)
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVRemoveCmdProcListKM: invalid DeviceType 0x%x",
++ ui32DevIndex));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++
++ SysAcquireData(&psSysData);
++
++ if(psSysData->ppsCmdCompleteData[ui32DevIndex] == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRemoveCmdProcListKM: Invalid command array"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++ else
++ {
++ for(i=0; i<ui32CmdCount; i++)
++ {
++
++ if(psSysData->ppsCmdCompleteData[ui32DevIndex][i] != IMG_NULL)
++ {
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ psSysData->ppsCmdCompleteData[ui32DevIndex][i]->ui32AllocSize,
++ psSysData->ppsCmdCompleteData[ui32DevIndex][i],
++ IMG_NULL);
++ psSysData->ppsCmdCompleteData[ui32DevIndex][i] = IMG_NULL;
++ }
++ }
++
++
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ ui32CmdCount * sizeof(COMMAND_COMPLETE_DATA*),
++ psSysData->ppsCmdCompleteData[ui32DevIndex],
++ IMG_NULL);
++ psSysData->ppsCmdCompleteData[ui32DevIndex] = IMG_NULL;
++ }
++
++
++ if(psSysData->ppfnCmdProcList[ui32DevIndex] != IMG_NULL)
++ {
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ ui32CmdCount * sizeof(PFN_CMD_PROC),
++ psSysData->ppfnCmdProcList[ui32DevIndex],
++ IMG_NULL);
++ psSysData->ppfnCmdProcList[ui32DevIndex] = IMG_NULL;
++ }
++
++ return PVRSRV_OK;
++}
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/ra.c b/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/ra.c
+new file mode 100644
+index 0000000..d4eab59
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/ra.c
+@@ -0,0 +1,1871 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "services_headers.h"
++#include "hash.h"
++#include "ra.h"
++#include "buffer_manager.h"
++#include "osfunc.h"
++
++#ifdef __linux__
++#include <linux/kernel.h>
++#include "proc.h"
++#endif
++
++#ifdef USE_BM_FREESPACE_CHECK
++#include <stdio.h>
++#endif
++
++#define MINIMUM_HASH_SIZE (64)
++
++#if defined(VALIDATE_ARENA_TEST)
++
++typedef enum RESOURCE_DESCRIPTOR_TAG {
++
++ RESOURCE_SPAN_LIVE = 10,
++ RESOURCE_SPAN_FREE,
++ IMPORTED_RESOURCE_SPAN_START,
++ IMPORTED_RESOURCE_SPAN_LIVE,
++ IMPORTED_RESOURCE_SPAN_FREE,
++ IMPORTED_RESOURCE_SPAN_END,
++
++} RESOURCE_DESCRIPTOR;
++
++typedef enum RESOURCE_TYPE_TAG {
++
++ IMPORTED_RESOURCE_TYPE = 20,
++ NON_IMPORTED_RESOURCE_TYPE
++
++} RESOURCE_TYPE;
++
++
++static IMG_UINT32 ui32BoundaryTagID = 0;
++
++IMG_UINT32 ValidateArena(RA_ARENA *pArena);
++#endif
++
++struct _BT_
++{
++ enum bt_type
++ {
++ btt_span,
++ btt_free,
++ btt_live
++ } type;
++
++
++ IMG_UINTPTR_T base;
++ IMG_SIZE_T uSize;
++
++
++ struct _BT_ *pNextSegment;
++ struct _BT_ *pPrevSegment;
++
++ struct _BT_ *pNextFree;
++ struct _BT_ *pPrevFree;
++
++ BM_MAPPING *psMapping;
++
++#if defined(VALIDATE_ARENA_TEST)
++ RESOURCE_DESCRIPTOR eResourceSpan;
++ RESOURCE_TYPE eResourceType;
++
++
++ IMG_UINT32 ui32BoundaryTagID;
++#endif
++
++};
++typedef struct _BT_ BT;
++
++
++struct _RA_ARENA_
++{
++
++ IMG_CHAR *name;
++
++
++ IMG_SIZE_T uQuantum;
++
++
++ IMG_BOOL (*pImportAlloc)(IMG_VOID *,
++ IMG_SIZE_T uSize,
++ IMG_SIZE_T *pActualSize,
++ BM_MAPPING **ppsMapping,
++ IMG_UINT32 uFlags,
++ IMG_UINTPTR_T *pBase);
++ IMG_VOID (*pImportFree) (IMG_VOID *,
++ IMG_UINTPTR_T,
++ BM_MAPPING *psMapping);
++ IMG_VOID (*pBackingStoreFree) (IMG_VOID *, IMG_SIZE_T, IMG_SIZE_T, IMG_HANDLE);
++
++
++ IMG_VOID *pImportHandle;
++
++
++#define FREE_TABLE_LIMIT 32
++
++
++ BT *aHeadFree [FREE_TABLE_LIMIT];
++
++
++ BT *pHeadSegment;
++ BT *pTailSegment;
++
++
++ HASH_TABLE *pSegmentHash;
++
++#ifdef RA_STATS
++ RA_STATISTICS sStatistics;
++#endif
++
++#if defined(CONFIG_PROC_FS) && defined(DEBUG)
++#define PROC_NAME_SIZE 32
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++ struct proc_dir_entry* pProcInfo;
++ struct proc_dir_entry* pProcSegs;
++#else
++ IMG_CHAR szProcInfoName[PROC_NAME_SIZE];
++ IMG_CHAR szProcSegsName[PROC_NAME_SIZE];
++#endif
++
++ IMG_BOOL bInitProcEntry;
++#endif
++};
++#if defined(ENABLE_RA_DUMP)
++IMG_VOID RA_Dump (RA_ARENA *pArena);
++#endif
++
++#if defined(CONFIG_PROC_FS) && defined(DEBUG)
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++
++static void RA_ProcSeqShowInfo(struct seq_file *sfile, void* el);
++static void* RA_ProcSeqOff2ElementInfo(struct seq_file * sfile, loff_t off);
++
++static void RA_ProcSeqShowRegs(struct seq_file *sfile, void* el);
++static void* RA_ProcSeqOff2ElementRegs(struct seq_file * sfile, loff_t off);
++
++#else
++static IMG_INT
++RA_DumpSegs(IMG_CHAR *page, IMG_CHAR **start, off_t off, IMG_INT count, IMG_INT *eof, IMG_VOID *data);
++static IMG_INT
++RA_DumpInfo(IMG_CHAR *page, IMG_CHAR **start, off_t off, IMG_INT count, IMG_INT *eof, IMG_VOID *data);
++#endif
++
++#endif
++
++#ifdef USE_BM_FREESPACE_CHECK
++IMG_VOID CheckBMFreespace(IMG_VOID);
++#endif
++
++#if defined(CONFIG_PROC_FS) && defined(DEBUG)
++static IMG_CHAR *ReplaceSpaces(IMG_CHAR * const pS)
++{
++ IMG_CHAR *pT;
++
++ for(pT = pS; *pT != 0; pT++)
++ {
++ if (*pT == ' ' || *pT == '\t')
++ {
++ *pT = '_';
++ }
++ }
++
++ return pS;
++}
++#endif
++
++static IMG_BOOL
++_RequestAllocFail (IMG_VOID *_h,
++ IMG_SIZE_T _uSize,
++ IMG_SIZE_T *_pActualSize,
++ BM_MAPPING **_ppsMapping,
++ IMG_UINT32 _uFlags,
++ IMG_UINTPTR_T *_pBase)
++{
++ PVR_UNREFERENCED_PARAMETER (_h);
++ PVR_UNREFERENCED_PARAMETER (_uSize);
++ PVR_UNREFERENCED_PARAMETER (_pActualSize);
++ PVR_UNREFERENCED_PARAMETER (_ppsMapping);
++ PVR_UNREFERENCED_PARAMETER (_uFlags);
++ PVR_UNREFERENCED_PARAMETER (_pBase);
++
++ return IMG_FALSE;
++}
++
++static IMG_UINT32
++pvr_log2 (IMG_SIZE_T n)
++{
++ IMG_UINT32 l = 0;
++ n>>=1;
++ while (n>0)
++ {
++ n>>=1;
++ l++;
++ }
++ return l;
++}
++
++static PVRSRV_ERROR
++_SegmentListInsertAfter (RA_ARENA *pArena,
++ BT *pInsertionPoint,
++ BT *pBT)
++{
++ PVR_ASSERT (pArena != IMG_NULL);
++ PVR_ASSERT (pInsertionPoint != IMG_NULL);
++
++ if ((pInsertionPoint == IMG_NULL) || (pArena == IMG_NULL))
++ {
++ PVR_DPF ((PVR_DBG_ERROR,"_SegmentListInsertAfter: invalid parameters"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ pBT->pNextSegment = pInsertionPoint->pNextSegment;
++ pBT->pPrevSegment = pInsertionPoint;
++ if (pInsertionPoint->pNextSegment == IMG_NULL)
++ pArena->pTailSegment = pBT;
++ else
++ pInsertionPoint->pNextSegment->pPrevSegment = pBT;
++ pInsertionPoint->pNextSegment = pBT;
++
++ return PVRSRV_OK;
++}
++
++static PVRSRV_ERROR
++_SegmentListInsert (RA_ARENA *pArena, BT *pBT)
++{
++ PVRSRV_ERROR eError = PVRSRV_OK;
++
++
++ if (pArena->pHeadSegment == IMG_NULL)
++ {
++ pArena->pHeadSegment = pArena->pTailSegment = pBT;
++ pBT->pNextSegment = pBT->pPrevSegment = IMG_NULL;
++ }
++ else
++ {
++ BT *pBTScan;
++
++ if (pBT->base < pArena->pHeadSegment->base)
++ {
++
++ pBT->pNextSegment = pArena->pHeadSegment;
++ pArena->pHeadSegment->pPrevSegment = pBT;
++ pArena->pHeadSegment = pBT;
++ pBT->pPrevSegment = IMG_NULL;
++ }
++ else
++ {
++
++
++
++
++ pBTScan = pArena->pHeadSegment;
++
++ while ((pBTScan->pNextSegment != IMG_NULL) && (pBT->base >= pBTScan->pNextSegment->base))
++ {
++ pBTScan = pBTScan->pNextSegment;
++ }
++
++ eError = _SegmentListInsertAfter (pArena, pBTScan, pBT);
++ if (eError != PVRSRV_OK)
++ {
++ return eError;
++ }
++ }
++ }
++ return eError;
++}
++
++static IMG_VOID
++_SegmentListRemove (RA_ARENA *pArena, BT *pBT)
++{
++ if (pBT->pPrevSegment == IMG_NULL)
++ pArena->pHeadSegment = pBT->pNextSegment;
++ else
++ pBT->pPrevSegment->pNextSegment = pBT->pNextSegment;
++
++ if (pBT->pNextSegment == IMG_NULL)
++ pArena->pTailSegment = pBT->pPrevSegment;
++ else
++ pBT->pNextSegment->pPrevSegment = pBT->pPrevSegment;
++}
++
++static BT *
++_SegmentSplit (RA_ARENA *pArena, BT *pBT, IMG_SIZE_T uSize)
++{
++ BT *pNeighbour;
++
++ PVR_ASSERT (pArena != IMG_NULL);
++
++ if (pArena == IMG_NULL)
++ {
++ PVR_DPF ((PVR_DBG_ERROR,"_SegmentSplit: invalid parameter - pArena"));
++ return IMG_NULL;
++ }
++
++ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(BT),
++ (IMG_VOID **)&pNeighbour, IMG_NULL,
++ "Boundary Tag") != PVRSRV_OK)
++ {
++ return IMG_NULL;
++ }
++
++ OSMemSet(pNeighbour, 0, sizeof(BT));
++
++#if defined(VALIDATE_ARENA_TEST)
++ pNeighbour->ui32BoundaryTagID = ++ui32BoundaryTagID;
++#endif
++
++ pNeighbour->pPrevSegment = pBT;
++ pNeighbour->pNextSegment = pBT->pNextSegment;
++ if (pBT->pNextSegment == IMG_NULL)
++ pArena->pTailSegment = pNeighbour;
++ else
++ pBT->pNextSegment->pPrevSegment = pNeighbour;
++ pBT->pNextSegment = pNeighbour;
++
++ pNeighbour->type = btt_free;
++ pNeighbour->uSize = pBT->uSize - uSize;
++ pNeighbour->base = pBT->base + uSize;
++ pNeighbour->psMapping = pBT->psMapping;
++ pBT->uSize = uSize;
++
++#if defined(VALIDATE_ARENA_TEST)
++ if (pNeighbour->pPrevSegment->eResourceType == IMPORTED_RESOURCE_TYPE)
++ {
++ pNeighbour->eResourceType = IMPORTED_RESOURCE_TYPE;
++ pNeighbour->eResourceSpan = IMPORTED_RESOURCE_SPAN_FREE;
++ }
++ else if (pNeighbour->pPrevSegment->eResourceType == NON_IMPORTED_RESOURCE_TYPE)
++ {
++ pNeighbour->eResourceType = NON_IMPORTED_RESOURCE_TYPE;
++ pNeighbour->eResourceSpan = RESOURCE_SPAN_FREE;
++ }
++ else
++ {
++ PVR_DPF ((PVR_DBG_ERROR,"_SegmentSplit: pNeighbour->pPrevSegment->eResourceType unrecognized"));
++ PVR_DBG_BREAK;
++ }
++#endif
++
++ return pNeighbour;
++}
++
++static IMG_VOID
++_FreeListInsert (RA_ARENA *pArena, BT *pBT)
++{
++ IMG_UINT32 uIndex;
++ uIndex = pvr_log2 (pBT->uSize);
++ pBT->type = btt_free;
++ pBT->pNextFree = pArena->aHeadFree [uIndex];
++ pBT->pPrevFree = IMG_NULL;
++ if (pArena->aHeadFree[uIndex] != IMG_NULL)
++ pArena->aHeadFree[uIndex]->pPrevFree = pBT;
++ pArena->aHeadFree [uIndex] = pBT;
++}
++
++static IMG_VOID
++_FreeListRemove (RA_ARENA *pArena, BT *pBT)
++{
++ IMG_UINT32 uIndex;
++ uIndex = pvr_log2 (pBT->uSize);
++ if (pBT->pNextFree != IMG_NULL)
++ pBT->pNextFree->pPrevFree = pBT->pPrevFree;
++ if (pBT->pPrevFree == IMG_NULL)
++ pArena->aHeadFree[uIndex] = pBT->pNextFree;
++ else
++ pBT->pPrevFree->pNextFree = pBT->pNextFree;
++}
++
++static BT *
++_BuildSpanMarker (IMG_UINTPTR_T base, IMG_SIZE_T uSize)
++{
++ BT *pBT;
++
++ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(BT),
++ (IMG_VOID **)&pBT, IMG_NULL,
++ "Boundary Tag") != PVRSRV_OK)
++ {
++ return IMG_NULL;
++ }
++
++ OSMemSet(pBT, 0, sizeof(BT));
++
++#if defined(VALIDATE_ARENA_TEST)
++ pBT->ui32BoundaryTagID = ++ui32BoundaryTagID;
++#endif
++
++ pBT->type = btt_span;
++ pBT->base = base;
++ pBT->uSize = uSize;
++ pBT->psMapping = IMG_NULL;
++
++ return pBT;
++}
++
++static BT *
++_BuildBT (IMG_UINTPTR_T base, IMG_SIZE_T uSize)
++{
++ BT *pBT;
++
++ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(BT),
++ (IMG_VOID **)&pBT, IMG_NULL,
++ "Boundary Tag") != PVRSRV_OK)
++ {
++ return IMG_NULL;
++ }
++
++ OSMemSet(pBT, 0, sizeof(BT));
++
++#if defined(VALIDATE_ARENA_TEST)
++ pBT->ui32BoundaryTagID = ++ui32BoundaryTagID;
++#endif
++
++ pBT->type = btt_free;
++ pBT->base = base;
++ pBT->uSize = uSize;
++
++ return pBT;
++}
++
++static BT *
++_InsertResource (RA_ARENA *pArena, IMG_UINTPTR_T base, IMG_SIZE_T uSize)
++{
++ BT *pBT;
++ PVR_ASSERT (pArena!=IMG_NULL);
++ if (pArena == IMG_NULL)
++ {
++ PVR_DPF ((PVR_DBG_ERROR,"_InsertResource: invalid parameter - pArena"));
++ return IMG_NULL;
++ }
++
++ pBT = _BuildBT (base, uSize);
++ if (pBT != IMG_NULL)
++ {
++
++#if defined(VALIDATE_ARENA_TEST)
++ pBT->eResourceSpan = RESOURCE_SPAN_FREE;
++ pBT->eResourceType = NON_IMPORTED_RESOURCE_TYPE;
++#endif
++
++ if (_SegmentListInsert (pArena, pBT) != PVRSRV_OK)
++ {
++ PVR_DPF ((PVR_DBG_ERROR,"_InsertResource: call to _SegmentListInsert failed"));
++ return IMG_NULL;
++ }
++ _FreeListInsert (pArena, pBT);
++#ifdef RA_STATS
++ pArena->sStatistics.uTotalResourceCount+=uSize;
++ pArena->sStatistics.uFreeResourceCount+=uSize;
++ pArena->sStatistics.uSpanCount++;
++#endif
++ }
++ return pBT;
++}
++
++static BT *
++_InsertResourceSpan (RA_ARENA *pArena, IMG_UINTPTR_T base, IMG_SIZE_T uSize)
++{
++ PVRSRV_ERROR eError;
++ BT *pSpanStart;
++ BT *pSpanEnd;
++ BT *pBT;
++
++ PVR_ASSERT (pArena != IMG_NULL);
++ if (pArena == IMG_NULL)
++ {
++ PVR_DPF ((PVR_DBG_ERROR,"_InsertResourceSpan: invalid parameter - pArena"));
++ return IMG_NULL;
++ }
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "RA_InsertResourceSpan: arena='%s', base=0x%x, size=0x%x",
++ pArena->name, base, uSize));
++
++ pSpanStart = _BuildSpanMarker (base, uSize);
++ if (pSpanStart == IMG_NULL)
++ {
++ goto fail_start;
++ }
++
++#if defined(VALIDATE_ARENA_TEST)
++ pSpanStart->eResourceSpan = IMPORTED_RESOURCE_SPAN_START;
++ pSpanStart->eResourceType = IMPORTED_RESOURCE_TYPE;
++#endif
++
++ pSpanEnd = _BuildSpanMarker (base + uSize, 0);
++ if (pSpanEnd == IMG_NULL)
++ {
++ goto fail_end;
++ }
++
++#if defined(VALIDATE_ARENA_TEST)
++ pSpanEnd->eResourceSpan = IMPORTED_RESOURCE_SPAN_END;
++ pSpanEnd->eResourceType = IMPORTED_RESOURCE_TYPE;
++#endif
++
++ pBT = _BuildBT (base, uSize);
++ if (pBT == IMG_NULL)
++ {
++ goto fail_bt;
++ }
++
++#if defined(VALIDATE_ARENA_TEST)
++ pBT->eResourceSpan = IMPORTED_RESOURCE_SPAN_FREE;
++ pBT->eResourceType = IMPORTED_RESOURCE_TYPE;
++#endif
++
++ eError = _SegmentListInsert (pArena, pSpanStart);
++ if (eError != PVRSRV_OK)
++ {
++ goto fail_SegListInsert;
++ }
++
++ eError = _SegmentListInsertAfter (pArena, pSpanStart, pBT);
++ if (eError != PVRSRV_OK)
++ {
++ goto fail_SegListInsert;
++ }
++
++ _FreeListInsert (pArena, pBT);
++
++ eError = _SegmentListInsertAfter (pArena, pBT, pSpanEnd);
++ if (eError != PVRSRV_OK)
++ {
++ goto fail_SegListInsert;
++ }
++
++#ifdef RA_STATS
++ pArena->sStatistics.uTotalResourceCount+=uSize;
++#endif
++ return pBT;
++
++ fail_SegListInsert:
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BT), pBT, IMG_NULL);
++
++ fail_bt:
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BT), pSpanEnd, IMG_NULL);
++
++ fail_end:
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BT), pSpanStart, IMG_NULL);
++
++ fail_start:
++ return IMG_NULL;
++}
++
++static IMG_VOID
++_FreeBT (RA_ARENA *pArena, BT *pBT, IMG_BOOL bFreeBackingStore)
++{
++ BT *pNeighbour;
++ IMG_UINTPTR_T uOrigBase;
++ IMG_SIZE_T uOrigSize;
++
++ PVR_ASSERT (pArena!=IMG_NULL);
++ PVR_ASSERT (pBT!=IMG_NULL);
++
++ if ((pArena == IMG_NULL) || (pBT == IMG_NULL))
++ {
++ PVR_DPF ((PVR_DBG_ERROR,"_FreeBT: invalid parameter"));
++ return;
++ }
++
++#ifdef RA_STATS
++ pArena->sStatistics.uLiveSegmentCount--;
++ pArena->sStatistics.uFreeSegmentCount++;
++ pArena->sStatistics.uFreeResourceCount+=pBT->uSize;
++#endif
++
++ uOrigBase = pBT->base;
++ uOrigSize = pBT->uSize;
++
++
++ pNeighbour = pBT->pPrevSegment;
++ if (pNeighbour!=IMG_NULL
++ && pNeighbour->type == btt_free
++ && pNeighbour->base + pNeighbour->uSize == pBT->base)
++ {
++ _FreeListRemove (pArena, pNeighbour);
++ _SegmentListRemove (pArena, pNeighbour);
++ pBT->base = pNeighbour->base;
++ pBT->uSize += pNeighbour->uSize;
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BT), pNeighbour, IMG_NULL);
++
++#ifdef RA_STATS
++ pArena->sStatistics.uFreeSegmentCount--;
++#endif
++ }
++
++
++ pNeighbour = pBT->pNextSegment;
++ if (pNeighbour!=IMG_NULL
++ && pNeighbour->type == btt_free
++ && pBT->base + pBT->uSize == pNeighbour->base)
++ {
++ _FreeListRemove (pArena, pNeighbour);
++ _SegmentListRemove (pArena, pNeighbour);
++ pBT->uSize += pNeighbour->uSize;
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BT), pNeighbour, IMG_NULL);
++
++#ifdef RA_STATS
++ pArena->sStatistics.uFreeSegmentCount--;
++#endif
++ }
++
++
++ if (pArena->pBackingStoreFree != IMG_NULL && bFreeBackingStore)
++ {
++ IMG_UINTPTR_T uRoundedStart, uRoundedEnd;
++
++
++ uRoundedStart = (uOrigBase / pArena->uQuantum) * pArena->uQuantum;
++
++ if (uRoundedStart < pBT->base)
++ {
++ uRoundedStart += pArena->uQuantum;
++ }
++
++
++ uRoundedEnd = ((uOrigBase + uOrigSize + pArena->uQuantum - 1) / pArena->uQuantum) * pArena->uQuantum;
++
++ if (uRoundedEnd > (pBT->base + pBT->uSize))
++ {
++ uRoundedEnd -= pArena->uQuantum;
++ }
++
++ if (uRoundedStart < uRoundedEnd)
++ {
++ pArena->pBackingStoreFree(pArena->pImportHandle, uRoundedStart, uRoundedEnd, (IMG_HANDLE)0);
++ }
++ }
++
++ if (pBT->pNextSegment!=IMG_NULL && pBT->pNextSegment->type == btt_span
++ && pBT->pPrevSegment!=IMG_NULL && pBT->pPrevSegment->type == btt_span)
++ {
++ BT *next = pBT->pNextSegment;
++ BT *prev = pBT->pPrevSegment;
++ _SegmentListRemove (pArena, next);
++ _SegmentListRemove (pArena, prev);
++ _SegmentListRemove (pArena, pBT);
++ pArena->pImportFree (pArena->pImportHandle, pBT->base, pBT->psMapping);
++#ifdef RA_STATS
++ pArena->sStatistics.uSpanCount--;
++ pArena->sStatistics.uExportCount++;
++ pArena->sStatistics.uFreeSegmentCount--;
++ pArena->sStatistics.uFreeResourceCount-=pBT->uSize;
++ pArena->sStatistics.uTotalResourceCount-=pBT->uSize;
++#endif
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BT), next, IMG_NULL);
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BT), prev, IMG_NULL);
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BT), pBT, IMG_NULL);
++
++ }
++ else
++ _FreeListInsert (pArena, pBT);
++}
++
++
++static IMG_BOOL
++_AttemptAllocAligned (RA_ARENA *pArena,
++ IMG_SIZE_T uSize,
++ BM_MAPPING **ppsMapping,
++ IMG_UINT32 uFlags,
++ IMG_UINT32 uAlignment,
++ IMG_UINT32 uAlignmentOffset,
++ IMG_UINTPTR_T *base)
++{
++ IMG_UINT32 uIndex;
++ PVR_ASSERT (pArena!=IMG_NULL);
++ if (pArena == IMG_NULL)
++ {
++ PVR_DPF ((PVR_DBG_ERROR,"_AttemptAllocAligned: invalid parameter - pArena"));
++ return IMG_FALSE;
++ }
++
++ if (uAlignment>1)
++ uAlignmentOffset %= uAlignment;
++
++
++
++ uIndex = pvr_log2 (uSize);
++
++#if 0
++
++ if (1u<<uIndex < uSize)
++ uIndex++;
++#endif
++
++ while (uIndex < FREE_TABLE_LIMIT && pArena->aHeadFree[uIndex]==IMG_NULL)
++ uIndex++;
++
++ while (uIndex < FREE_TABLE_LIMIT)
++ {
++ if (pArena->aHeadFree[uIndex]!=IMG_NULL)
++ {
++
++ BT *pBT;
++
++ pBT = pArena->aHeadFree [uIndex];
++ while (pBT!=IMG_NULL)
++ {
++ IMG_UINTPTR_T aligned_base;
++
++ if (uAlignment>1)
++ aligned_base = (pBT->base + uAlignmentOffset + uAlignment - 1) / uAlignment * uAlignment - uAlignmentOffset;
++ else
++ aligned_base = pBT->base;
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "RA_AttemptAllocAligned: pBT-base=0x%x "
++ "pBT-size=0x%x alignedbase=0x%x size=0x%x",
++ pBT->base, pBT->uSize, aligned_base, uSize));
++
++ if (pBT->base + pBT->uSize >= aligned_base + uSize)
++ {
++ if(!pBT->psMapping || pBT->psMapping->ui32Flags == uFlags)
++ {
++ _FreeListRemove (pArena, pBT);
++
++ PVR_ASSERT (pBT->type == btt_free);
++
++#ifdef RA_STATS
++ pArena->sStatistics.uLiveSegmentCount++;
++ pArena->sStatistics.uFreeSegmentCount--;
++ pArena->sStatistics.uFreeResourceCount-=pBT->uSize;
++#endif
++
++
++ if (aligned_base > pBT->base)
++ {
++ BT *pNeighbour;
++
++ pNeighbour = _SegmentSplit (pArena, pBT, aligned_base-pBT->base);
++
++ if (pNeighbour==IMG_NULL)
++ {
++ PVR_DPF ((PVR_DBG_ERROR,"_AttemptAllocAligned: Front split failed"));
++
++ _FreeListInsert (pArena, pBT);
++ return IMG_FALSE;
++ }
++
++ _FreeListInsert (pArena, pBT);
++ #ifdef RA_STATS
++ pArena->sStatistics.uFreeSegmentCount++;
++ pArena->sStatistics.uFreeResourceCount+=pBT->uSize;
++ #endif
++ pBT = pNeighbour;
++ }
++
++
++ if (pBT->uSize > uSize)
++ {
++ BT *pNeighbour;
++ pNeighbour = _SegmentSplit (pArena, pBT, uSize);
++
++ if (pNeighbour==IMG_NULL)
++ {
++ PVR_DPF ((PVR_DBG_ERROR,"_AttemptAllocAligned: Back split failed"));
++
++ _FreeListInsert (pArena, pBT);
++ return IMG_FALSE;
++ }
++
++ _FreeListInsert (pArena, pNeighbour);
++ #ifdef RA_STATS
++ pArena->sStatistics.uFreeSegmentCount++;
++ pArena->sStatistics.uFreeResourceCount+=pNeighbour->uSize;
++ #endif
++ }
++
++ pBT->type = btt_live;
++
++#if defined(VALIDATE_ARENA_TEST)
++ if (pBT->eResourceType == IMPORTED_RESOURCE_TYPE)
++ {
++ pBT->eResourceSpan = IMPORTED_RESOURCE_SPAN_LIVE;
++ }
++ else if (pBT->eResourceType == NON_IMPORTED_RESOURCE_TYPE)
++ {
++ pBT->eResourceSpan = RESOURCE_SPAN_LIVE;
++ }
++ else
++ {
++ PVR_DPF ((PVR_DBG_ERROR,"_AttemptAllocAligned ERROR: pBT->eResourceType unrecognized"));
++ PVR_DBG_BREAK;
++ }
++#endif
++ if (!HASH_Insert (pArena->pSegmentHash, pBT->base, (IMG_UINTPTR_T) pBT))
++ {
++ _FreeBT (pArena, pBT, IMG_FALSE);
++ return IMG_FALSE;
++ }
++
++ if (ppsMapping!=IMG_NULL)
++ *ppsMapping = pBT->psMapping;
++
++ *base = pBT->base;
++
++ return IMG_TRUE;
++ }
++ else
++ {
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "AttemptAllocAligned: mismatch in flags. Import has %x, request was %x", pBT->psMapping->ui32Flags, uFlags));
++
++ }
++ }
++ pBT = pBT->pNextFree;
++ }
++
++ }
++ uIndex++;
++ }
++
++ return IMG_FALSE;
++}
++
++
++
++RA_ARENA *
++RA_Create (IMG_CHAR *name,
++ IMG_UINTPTR_T base,
++ IMG_SIZE_T uSize,
++ BM_MAPPING *psMapping,
++ IMG_SIZE_T uQuantum,
++ IMG_BOOL (*imp_alloc)(IMG_VOID *, IMG_SIZE_T uSize, IMG_SIZE_T *pActualSize,
++ BM_MAPPING **ppsMapping, IMG_UINT32 _flags, IMG_UINTPTR_T *pBase),
++ IMG_VOID (*imp_free) (IMG_VOID *, IMG_UINTPTR_T, BM_MAPPING *),
++ IMG_VOID (*backingstore_free) (IMG_VOID*, IMG_SIZE_T, IMG_SIZE_T, IMG_HANDLE),
++ IMG_VOID *pImportHandle)
++{
++ RA_ARENA *pArena;
++ BT *pBT;
++ IMG_INT i;
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "RA_Create: name='%s', base=0x%x, uSize=0x%x, alloc=0x%x, free=0x%x",
++ name, base, uSize, imp_alloc, imp_free));
++
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof (*pArena),
++ (IMG_VOID **)&pArena, IMG_NULL,
++ "Resource Arena") != PVRSRV_OK)
++ {
++ goto arena_fail;
++ }
++
++ pArena->name = name;
++ pArena->pImportAlloc = (imp_alloc!=IMG_NULL) ? imp_alloc : _RequestAllocFail;
++ pArena->pImportFree = imp_free;
++ pArena->pBackingStoreFree = backingstore_free;
++ pArena->pImportHandle = pImportHandle;
++ for (i=0; i<FREE_TABLE_LIMIT; i++)
++ pArena->aHeadFree[i] = IMG_NULL;
++ pArena->pHeadSegment = IMG_NULL;
++ pArena->pTailSegment = IMG_NULL;
++ pArena->uQuantum = uQuantum;
++
++#ifdef RA_STATS
++ pArena->sStatistics.uSpanCount = 0;
++ pArena->sStatistics.uLiveSegmentCount = 0;
++ pArena->sStatistics.uFreeSegmentCount = 0;
++ pArena->sStatistics.uFreeResourceCount = 0;
++ pArena->sStatistics.uTotalResourceCount = 0;
++ pArena->sStatistics.uCumulativeAllocs = 0;
++ pArena->sStatistics.uCumulativeFrees = 0;
++ pArena->sStatistics.uImportCount = 0;
++ pArena->sStatistics.uExportCount = 0;
++#endif
++
++#if defined(CONFIG_PROC_FS) && defined(DEBUG)
++ if(strcmp(pArena->name,"") != 0)
++ {
++
++#ifndef PVR_PROC_USE_SEQ_FILE
++ IMG_INT ret;
++ IMG_INT (*pfnCreateProcEntry)(const IMG_CHAR *, read_proc_t, write_proc_t, IMG_VOID *);
++
++ pArena->bInitProcEntry = !PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_SUCCESSFUL);
++
++
++ pfnCreateProcEntry = pArena->bInitProcEntry ? CreateProcEntry : CreatePerProcessProcEntry;
++
++ ret = snprintf(pArena->szProcInfoName, sizeof(pArena->szProcInfoName), "ra_info_%s", pArena->name);
++ if (ret > 0 && ret < sizeof(pArena->szProcInfoName))
++ {
++ (IMG_VOID) pfnCreateProcEntry(ReplaceSpaces(pArena->szProcInfoName), RA_DumpInfo, 0, pArena);
++ }
++ else
++ {
++ pArena->szProcInfoName[0] = 0;
++ PVR_DPF((PVR_DBG_ERROR, "RA_Create: couldn't create ra_info proc entry for arena %s", pArena->name));
++ }
++
++ ret = snprintf(pArena->szProcSegsName, sizeof(pArena->szProcSegsName), "ra_segs_%s", pArena->name);
++ if (ret > 0 && ret < sizeof(pArena->szProcSegsName))
++ {
++ (IMG_VOID) pfnCreateProcEntry(ReplaceSpaces(pArena->szProcSegsName), RA_DumpSegs, 0, pArena);
++ }
++ else
++ {
++ pArena->szProcSegsName[0] = 0;
++ PVR_DPF((PVR_DBG_ERROR, "RA_Create: couldn't create ra_segs proc entry for arena %s", pArena->name));
++ }
++#else
++
++ IMG_INT ret;
++ IMG_CHAR szProcInfoName[PROC_NAME_SIZE];
++ IMG_CHAR szProcSegsName[PROC_NAME_SIZE];
++ struct proc_dir_entry* (*pfnCreateProcEntrySeq)(const IMG_CHAR *,
++ IMG_VOID*,
++ pvr_next_proc_seq_t,
++ pvr_show_proc_seq_t,
++ pvr_off2element_proc_seq_t,
++ pvr_startstop_proc_seq_t,
++ write_proc_t);
++
++ pArena->bInitProcEntry = !PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_SUCCESSFUL);
++
++
++ pfnCreateProcEntrySeq = pArena->bInitProcEntry ? CreateProcEntrySeq : CreatePerProcessProcEntrySeq;
++
++ ret = snprintf(szProcInfoName, sizeof(szProcInfoName), "ra_info_%s", pArena->name);
++ if (ret > 0 && ret < sizeof(szProcInfoName))
++ {
++ pArena->pProcInfo = pfnCreateProcEntrySeq(ReplaceSpaces(szProcInfoName), pArena, NULL,
++ RA_ProcSeqShowInfo, RA_ProcSeqOff2ElementInfo, NULL, NULL);
++ }
++ else
++ {
++ pArena->pProcInfo = 0;
++ PVR_DPF((PVR_DBG_ERROR, "RA_Create: couldn't create ra_info proc entry for arena %s", pArena->name));
++ }
++
++ ret = snprintf(szProcSegsName, sizeof(szProcSegsName), "ra_segs_%s", pArena->name);
++ if (ret > 0 && ret < sizeof(szProcInfoName))
++ {
++ pArena->pProcSegs = pfnCreateProcEntrySeq(ReplaceSpaces(szProcSegsName), pArena, NULL,
++ RA_ProcSeqShowRegs, RA_ProcSeqOff2ElementRegs, NULL, NULL);
++ }
++ else
++ {
++ pArena->pProcSegs = 0;
++ PVR_DPF((PVR_DBG_ERROR, "RA_Create: couldn't create ra_segs proc entry for arena %s", pArena->name));
++ }
++
++#endif
++
++ }
++#endif
++
++ pArena->pSegmentHash = HASH_Create (MINIMUM_HASH_SIZE);
++ if (pArena->pSegmentHash==IMG_NULL)
++ {
++ goto hash_fail;
++ }
++ if (uSize>0)
++ {
++ uSize = (uSize + uQuantum - 1) / uQuantum * uQuantum;
++ pBT = _InsertResource (pArena, base, uSize);
++ if (pBT == IMG_NULL)
++ {
++ goto insert_fail;
++ }
++ pBT->psMapping = psMapping;
++
++ }
++ return pArena;
++
++insert_fail:
++ HASH_Delete (pArena->pSegmentHash);
++hash_fail:
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(RA_ARENA), pArena, IMG_NULL);
++
++arena_fail:
++ return IMG_NULL;
++}
++
++IMG_VOID
++RA_Delete (RA_ARENA *pArena)
++{
++ IMG_UINT32 uIndex;
++
++ PVR_ASSERT(pArena != IMG_NULL);
++
++ if (pArena == IMG_NULL)
++ {
++ PVR_DPF ((PVR_DBG_ERROR,"RA_Delete: invalid parameter - pArena"));
++ return;
++ }
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "RA_Delete: name='%s'", pArena->name));
++
++ for (uIndex=0; uIndex<FREE_TABLE_LIMIT; uIndex++)
++ pArena->aHeadFree[uIndex] = IMG_NULL;
++
++ while (pArena->pHeadSegment != IMG_NULL)
++ {
++ BT *pBT = pArena->pHeadSegment;
++
++ if (pBT->type != btt_free)
++ {
++ PVR_DPF ((PVR_DBG_ERROR,"RA_Delete: allocations still exist in the arena that is being destroyed"));
++ PVR_DPF ((PVR_DBG_ERROR,"Likely Cause: client drivers not freeing alocations before destroying devmemcontext"));
++ PVR_DPF ((PVR_DBG_ERROR,"RA_Delete: base = 0x%x size=0x%x", pBT->base, pBT->uSize));
++ }
++
++ _SegmentListRemove (pArena, pBT);
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BT), pBT, IMG_NULL);
++
++#ifdef RA_STATS
++ pArena->sStatistics.uSpanCount--;
++#endif
++ }
++#if defined(CONFIG_PROC_FS) && defined(DEBUG)
++ {
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++ IMG_VOID (*pfnRemoveProcEntrySeq)(struct proc_dir_entry*);
++
++ pfnRemoveProcEntrySeq = pArena->bInitProcEntry ? RemoveProcEntrySeq : RemovePerProcessProcEntrySeq;
++
++ if (pArena->pProcInfo != 0)
++ {
++ pfnRemoveProcEntrySeq( pArena->pProcInfo );
++ }
++
++ if (pArena->pProcSegs != 0)
++ {
++ pfnRemoveProcEntrySeq( pArena->pProcSegs );
++ }
++
++#else
++ IMG_VOID (*pfnRemoveProcEntry)(const IMG_CHAR *);
++
++ pfnRemoveProcEntry = pArena->bInitProcEntry ? RemoveProcEntry : RemovePerProcessProcEntry;
++
++ if (pArena->szProcInfoName[0] != 0)
++ {
++ pfnRemoveProcEntry(pArena->szProcInfoName);
++ }
++
++ if (pArena->szProcSegsName[0] != 0)
++ {
++ pfnRemoveProcEntry(pArena->szProcSegsName);
++ }
++
++#endif
++ }
++#endif
++ HASH_Delete (pArena->pSegmentHash);
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(RA_ARENA), pArena, IMG_NULL);
++
++}
++
++IMG_BOOL
++RA_TestDelete (RA_ARENA *pArena)
++{
++ PVR_ASSERT(pArena != IMG_NULL);
++
++ if (pArena != IMG_NULL)
++ {
++ while (pArena->pHeadSegment != IMG_NULL)
++ {
++ BT *pBT = pArena->pHeadSegment;
++ if (pBT->type != btt_free)
++ {
++ PVR_DPF ((PVR_DBG_ERROR,"RA_TestDelete: detected resource leak!"));
++ PVR_DPF ((PVR_DBG_ERROR,"RA_TestDelete: base = 0x%x size=0x%x", pBT->base, pBT->uSize));
++ return IMG_FALSE;
++ }
++ }
++ }
++
++ return IMG_TRUE;
++}
++
++IMG_BOOL
++RA_Add (RA_ARENA *pArena, IMG_UINTPTR_T base, IMG_SIZE_T uSize)
++{
++ PVR_ASSERT (pArena != IMG_NULL);
++
++ if (pArena == IMG_NULL)
++ {
++ PVR_DPF ((PVR_DBG_ERROR,"RA_Add: invalid parameter - pArena"));
++ return IMG_FALSE;
++ }
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "RA_Add: name='%s', base=0x%x, size=0x%x", pArena->name, base, uSize));
++
++ uSize = (uSize + pArena->uQuantum - 1) / pArena->uQuantum * pArena->uQuantum;
++ return ((IMG_BOOL)(_InsertResource (pArena, base, uSize) != IMG_NULL));
++}
++
++IMG_BOOL
++RA_Alloc (RA_ARENA *pArena,
++ IMG_SIZE_T uRequestSize,
++ IMG_SIZE_T *pActualSize,
++ BM_MAPPING **ppsMapping,
++ IMG_UINT32 uFlags,
++ IMG_UINT32 uAlignment,
++ IMG_UINT32 uAlignmentOffset,
++ IMG_UINTPTR_T *base)
++{
++ IMG_BOOL bResult;
++ IMG_SIZE_T uSize = uRequestSize;
++
++ PVR_ASSERT (pArena!=IMG_NULL);
++
++ if (pArena == IMG_NULL)
++ {
++ PVR_DPF ((PVR_DBG_ERROR,"RA_Alloc: invalid parameter - pArena"));
++ return IMG_FALSE;
++ }
++
++#if defined(VALIDATE_ARENA_TEST)
++ ValidateArena(pArena);
++#endif
++
++#ifdef USE_BM_FREESPACE_CHECK
++ CheckBMFreespace();
++#endif
++
++ if (pActualSize != IMG_NULL)
++ {
++ *pActualSize = uSize;
++ }
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "RA_Alloc: arena='%s', size=0x%x(0x%x), alignment=0x%x, offset=0x%x",
++ pArena->name, uSize, uRequestSize, uAlignment, uAlignmentOffset));
++
++
++
++ bResult = _AttemptAllocAligned (pArena, uSize, ppsMapping, uFlags,
++ uAlignment, uAlignmentOffset, base);
++ if (!bResult)
++ {
++ BM_MAPPING *psImportMapping;
++ IMG_UINTPTR_T import_base;
++ IMG_SIZE_T uImportSize = uSize;
++
++
++
++
++ if (uAlignment > pArena->uQuantum)
++ {
++ uImportSize += (uAlignment - 1);
++ }
++
++
++ uImportSize = ((uImportSize + pArena->uQuantum - 1)/pArena->uQuantum)*pArena->uQuantum;
++
++ bResult =
++ pArena->pImportAlloc (pArena->pImportHandle, uImportSize, &uImportSize,
++ &psImportMapping, uFlags, &import_base);
++ if (bResult)
++ {
++ BT *pBT;
++ pBT = _InsertResourceSpan (pArena, import_base, uImportSize);
++
++ if (pBT == IMG_NULL)
++ {
++
++ pArena->pImportFree(pArena->pImportHandle, import_base,
++ psImportMapping);
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "RA_Alloc: name='%s', size=0x%x failed!",
++ pArena->name, uSize));
++
++ return IMG_FALSE;
++ }
++ pBT->psMapping = psImportMapping;
++#ifdef RA_STATS
++ pArena->sStatistics.uFreeSegmentCount++;
++ pArena->sStatistics.uFreeResourceCount += uImportSize;
++ pArena->sStatistics.uImportCount++;
++ pArena->sStatistics.uSpanCount++;
++#endif
++ bResult = _AttemptAllocAligned(pArena, uSize, ppsMapping, uFlags,
++ uAlignment, uAlignmentOffset,
++ base);
++ if (!bResult)
++ {
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "RA_Alloc: name='%s' uAlignment failed!",
++ pArena->name));
++ }
++ }
++ }
++#ifdef RA_STATS
++ if (bResult)
++ pArena->sStatistics.uCumulativeAllocs++;
++#endif
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "RA_Alloc: name='%s', size=0x%x, *base=0x%x = %d",
++ pArena->name, uSize, *base, bResult));
++
++
++
++#if defined(VALIDATE_ARENA_TEST)
++ ValidateArena(pArena);
++#endif
++
++ return bResult;
++}
++
++
++#if defined(VALIDATE_ARENA_TEST)
++
++IMG_UINT32 ValidateArena(RA_ARENA *pArena)
++{
++ BT* pSegment;
++ RESOURCE_DESCRIPTOR eNextSpan;
++
++ pSegment = pArena->pHeadSegment;
++
++ if (pSegment == IMG_NULL)
++ {
++ return 0;
++ }
++
++ if (pSegment->eResourceType == IMPORTED_RESOURCE_TYPE)
++ {
++ PVR_ASSERT(pSegment->eResourceSpan == IMPORTED_RESOURCE_SPAN_START);
++
++ while (pSegment->pNextSegment)
++ {
++ eNextSpan = pSegment->pNextSegment->eResourceSpan;
++
++ switch (pSegment->eResourceSpan)
++ {
++ case IMPORTED_RESOURCE_SPAN_LIVE:
++
++ if (!((eNextSpan == IMPORTED_RESOURCE_SPAN_LIVE) ||
++ (eNextSpan == IMPORTED_RESOURCE_SPAN_FREE) ||
++ (eNextSpan == IMPORTED_RESOURCE_SPAN_END)))
++ {
++
++ PVR_DPF((PVR_DBG_ERROR, "ValidateArena ERROR: adjacent boundary tags %d (base=0x%x) and %d (base=0x%x) are incompatible (arena: %s)",
++ pSegment->ui32BoundaryTagID, pSegment->base, pSegment->pNextSegment->ui32BoundaryTagID, pSegment->pNextSegment->base, pArena->name));
++
++ PVR_DBG_BREAK;
++ }
++ break;
++
++ case IMPORTED_RESOURCE_SPAN_FREE:
++
++ if (!((eNextSpan == IMPORTED_RESOURCE_SPAN_LIVE) ||
++ (eNextSpan == IMPORTED_RESOURCE_SPAN_END)))
++ {
++
++ PVR_DPF((PVR_DBG_ERROR, "ValidateArena ERROR: adjacent boundary tags %d (base=0x%x) and %d (base=0x%x) are incompatible (arena: %s)",
++ pSegment->ui32BoundaryTagID, pSegment->base, pSegment->pNextSegment->ui32BoundaryTagID, pSegment->pNextSegment->base, pArena->name));
++
++ PVR_DBG_BREAK;
++ }
++ break;
++
++ case IMPORTED_RESOURCE_SPAN_END:
++
++ if ((eNextSpan == IMPORTED_RESOURCE_SPAN_LIVE) ||
++ (eNextSpan == IMPORTED_RESOURCE_SPAN_FREE) ||
++ (eNextSpan == IMPORTED_RESOURCE_SPAN_END))
++ {
++
++ PVR_DPF((PVR_DBG_ERROR, "ValidateArena ERROR: adjacent boundary tags %d (base=0x%x) and %d (base=0x%x) are incompatible (arena: %s)",
++ pSegment->ui32BoundaryTagID, pSegment->base, pSegment->pNextSegment->ui32BoundaryTagID, pSegment->pNextSegment->base, pArena->name));
++
++ PVR_DBG_BREAK;
++ }
++ break;
++
++
++ case IMPORTED_RESOURCE_SPAN_START:
++
++ if (!((eNextSpan == IMPORTED_RESOURCE_SPAN_LIVE) ||
++ (eNextSpan == IMPORTED_RESOURCE_SPAN_FREE)))
++ {
++
++ PVR_DPF((PVR_DBG_ERROR, "ValidateArena ERROR: adjacent boundary tags %d (base=0x%x) and %d (base=0x%x) are incompatible (arena: %s)",
++ pSegment->ui32BoundaryTagID, pSegment->base, pSegment->pNextSegment->ui32BoundaryTagID, pSegment->pNextSegment->base, pArena->name));
++
++ PVR_DBG_BREAK;
++ }
++ break;
++
++ default:
++ PVR_DPF((PVR_DBG_ERROR, "ValidateArena ERROR: adjacent boundary tags %d (base=0x%x) and %d (base=0x%x) are incompatible (arena: %s)",
++ pSegment->ui32BoundaryTagID, pSegment->base, pSegment->pNextSegment->ui32BoundaryTagID, pSegment->pNextSegment->base, pArena->name));
++
++ PVR_DBG_BREAK;
++ break;
++ }
++ pSegment = pSegment->pNextSegment;
++ }
++ }
++ else if (pSegment->eResourceType == NON_IMPORTED_RESOURCE_TYPE)
++ {
++ PVR_ASSERT((pSegment->eResourceSpan == RESOURCE_SPAN_FREE) || (pSegment->eResourceSpan == RESOURCE_SPAN_LIVE));
++
++ while (pSegment->pNextSegment)
++ {
++ eNextSpan = pSegment->pNextSegment->eResourceSpan;
++
++ switch (pSegment->eResourceSpan)
++ {
++ case RESOURCE_SPAN_LIVE:
++
++ if (!((eNextSpan == RESOURCE_SPAN_FREE) ||
++ (eNextSpan == RESOURCE_SPAN_LIVE)))
++ {
++
++ PVR_DPF((PVR_DBG_ERROR, "ValidateArena ERROR: adjacent boundary tags %d (base=0x%x) and %d (base=0x%x) are incompatible (arena: %s)",
++ pSegment->ui32BoundaryTagID, pSegment->base, pSegment->pNextSegment->ui32BoundaryTagID, pSegment->pNextSegment->base, pArena->name));
++
++ PVR_DBG_BREAK;
++ }
++ break;
++
++ case RESOURCE_SPAN_FREE:
++
++ if (!((eNextSpan == RESOURCE_SPAN_FREE) ||
++ (eNextSpan == RESOURCE_SPAN_LIVE)))
++ {
++
++ PVR_DPF((PVR_DBG_ERROR, "ValidateArena ERROR: adjacent boundary tags %d (base=0x%x) and %d (base=0x%x) are incompatible (arena: %s)",
++ pSegment->ui32BoundaryTagID, pSegment->base, pSegment->pNextSegment->ui32BoundaryTagID, pSegment->pNextSegment->base, pArena->name));
++
++ PVR_DBG_BREAK;
++ }
++ break;
++
++ default:
++ PVR_DPF((PVR_DBG_ERROR, "ValidateArena ERROR: adjacent boundary tags %d (base=0x%x) and %d (base=0x%x) are incompatible (arena: %s)",
++ pSegment->ui32BoundaryTagID, pSegment->base, pSegment->pNextSegment->ui32BoundaryTagID, pSegment->pNextSegment->base, pArena->name));
++
++ PVR_DBG_BREAK;
++ break;
++ }
++ pSegment = pSegment->pNextSegment;
++ }
++
++ }
++ else
++ {
++ PVR_DPF ((PVR_DBG_ERROR,"ValidateArena ERROR: pSegment->eResourceType unrecognized"));
++
++ PVR_DBG_BREAK;
++ }
++
++ return 0;
++}
++
++#endif
++
++
++IMG_VOID
++RA_Free (RA_ARENA *pArena, IMG_UINTPTR_T base, IMG_BOOL bFreeBackingStore)
++{
++ BT *pBT;
++
++ PVR_ASSERT (pArena != IMG_NULL);
++
++ if (pArena == IMG_NULL)
++ {
++ PVR_DPF ((PVR_DBG_ERROR,"RA_Free: invalid parameter - pArena"));
++ return;
++ }
++
++#ifdef USE_BM_FREESPACE_CHECK
++ CheckBMFreespace();
++#endif
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "RA_Free: name='%s', base=0x%x", pArena->name, base));
++
++ pBT = (BT *) HASH_Remove (pArena->pSegmentHash, base);
++ PVR_ASSERT (pBT != IMG_NULL);
++
++ if (pBT)
++ {
++ PVR_ASSERT (pBT->base == base);
++
++#ifdef RA_STATS
++ pArena->sStatistics.uCumulativeFrees++;
++#endif
++
++#ifdef USE_BM_FREESPACE_CHECK
++{
++ IMG_BYTE* p;
++ IMG_BYTE* endp;
++
++ p = (IMG_BYTE*)pBT->base + SysGetDevicePhysOffset();
++ endp = (IMG_BYTE*)((IMG_UINT32)(p + pBT->uSize));
++ while ((IMG_UINT32)p & 3)
++ {
++ *p++ = 0xAA;
++ }
++ while (p < (IMG_BYTE*)((IMG_UINT32)endp & 0xfffffffc))
++ {
++ *(IMG_UINT32*)p = 0xAAAAAAAA;
++ p += sizeof(IMG_UINT32);
++ }
++ while (p < endp)
++ {
++ *p++ = 0xAA;
++ }
++ PVR_DPF((PVR_DBG_MESSAGE,"BM_FREESPACE_CHECK: RA_Free Cleared %08X to %08X (size=0x%x)",(IMG_BYTE*)pBT->base + SysGetDevicePhysOffset(),endp-1,pBT->uSize));
++}
++#endif
++ _FreeBT (pArena, pBT, bFreeBackingStore);
++ }
++}
++
++
++IMG_BOOL RA_GetNextLiveSegment(IMG_HANDLE hArena, RA_SEGMENT_DETAILS *psSegDetails)
++{
++ BT *pBT;
++
++ if (psSegDetails->hSegment)
++ {
++ pBT = (BT *)psSegDetails->hSegment;
++ }
++ else
++ {
++ RA_ARENA *pArena = (RA_ARENA *)hArena;
++
++ pBT = pArena->pHeadSegment;
++ }
++
++ while (pBT != IMG_NULL)
++ {
++ if (pBT->type == btt_live)
++ {
++ psSegDetails->uiSize = pBT->uSize;
++ psSegDetails->sCpuPhyAddr.uiAddr = pBT->base;
++ psSegDetails->hSegment = (IMG_HANDLE)pBT->pNextSegment;
++
++ return IMG_TRUE;
++ }
++
++ pBT = pBT->pNextSegment;
++ }
++
++ psSegDetails->uiSize = 0;
++ psSegDetails->sCpuPhyAddr.uiAddr = 0;
++ psSegDetails->hSegment = (IMG_HANDLE)-1;
++
++ return IMG_FALSE;
++}
++
++
++#ifdef USE_BM_FREESPACE_CHECK
++RA_ARENA* pJFSavedArena = IMG_NULL;
++
++IMG_VOID CheckBMFreespace(IMG_VOID)
++{
++ BT *pBT;
++ IMG_BYTE* p;
++ IMG_BYTE* endp;
++
++ if (pJFSavedArena != IMG_NULL)
++ {
++ for (pBT=pJFSavedArena->pHeadSegment; pBT!=IMG_NULL; pBT=pBT->pNextSegment)
++ {
++ if (pBT->type == btt_free)
++ {
++ p = (IMG_BYTE*)pBT->base + SysGetDevicePhysOffset();
++ endp = (IMG_BYTE*)((IMG_UINT32)(p + pBT->uSize) & 0xfffffffc);
++
++ while ((IMG_UINT32)p & 3)
++ {
++ if (*p++ != 0xAA)
++ {
++ fprintf(stderr,"BM_FREESPACE_CHECK: Blank space at %08X has changed to 0x%x\n",p,*(IMG_UINT32*)p);
++ for (;;);
++ break;
++ }
++ }
++ while (p < endp)
++ {
++ if (*(IMG_UINT32*)p != 0xAAAAAAAA)
++ {
++ fprintf(stderr,"BM_FREESPACE_CHECK: Blank space at %08X has changed to 0x%x\n",p,*(IMG_UINT32*)p);
++ for (;;);
++ break;
++ }
++ p += 4;
++ }
++ }
++ }
++ }
++}
++#endif
++
++
++#if (defined(CONFIG_PROC_FS) && defined(DEBUG)) || defined (RA_STATS)
++static IMG_CHAR *
++_BTType (IMG_INT eType)
++{
++ switch (eType)
++ {
++ case btt_span: return "span";
++ case btt_free: return "free";
++ case btt_live: return "live";
++ }
++ return "junk";
++}
++#endif
++
++#if defined(ENABLE_RA_DUMP)
++IMG_VOID
++RA_Dump (RA_ARENA *pArena)
++{
++ BT *pBT;
++ PVR_ASSERT (pArena != IMG_NULL);
++ PVR_DPF ((PVR_DBG_MESSAGE,"Arena '%s':", pArena->name));
++ PVR_DPF ((PVR_DBG_MESSAGE," alloc=%08X free=%08X handle=%08X quantum=%d",
++ pArena->pImportAlloc, pArena->pImportFree, pArena->pImportHandle,
++ pArena->uQuantum));
++ PVR_DPF ((PVR_DBG_MESSAGE," segment Chain:"));
++ if (pArena->pHeadSegment != IMG_NULL &&
++ pArena->pHeadSegment->pPrevSegment != IMG_NULL)
++ PVR_DPF ((PVR_DBG_MESSAGE," error: head boundary tag has invalid pPrevSegment"));
++ if (pArena->pTailSegment != IMG_NULL &&
++ pArena->pTailSegment->pNextSegment != IMG_NULL)
++ PVR_DPF ((PVR_DBG_MESSAGE," error: tail boundary tag has invalid pNextSegment"));
++
++ for (pBT=pArena->pHeadSegment; pBT!=IMG_NULL; pBT=pBT->pNextSegment)
++ {
++ PVR_DPF ((PVR_DBG_MESSAGE,"\tbase=0x%x size=0x%x type=%s ref=%08X",
++ (IMG_UINT32) pBT->base, pBT->uSize, _BTType (pBT->type),
++ pBT->pRef));
++ }
++
++#ifdef HASH_TRACE
++ HASH_Dump (pArena->pSegmentHash);
++#endif
++}
++#endif
++
++
++#if defined(CONFIG_PROC_FS) && defined(DEBUG)
++
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++
++static void RA_ProcSeqShowInfo(struct seq_file *sfile, void* el)
++{
++ PVR_PROC_SEQ_HANDLERS *handlers = (PVR_PROC_SEQ_HANDLERS*)sfile->private;
++ RA_ARENA *pArena = (RA_ARENA *)handlers->data;
++ IMG_INT off = (IMG_INT)el;
++
++ switch (off)
++ {
++ case 1:
++ seq_printf(sfile, "quantum\t\t\t%lu\n", pArena->uQuantum);
++ break;
++ case 2:
++ seq_printf(sfile, "import_handle\t\t%08X\n", (IMG_UINT)pArena->pImportHandle);
++ break;
++#ifdef RA_STATS
++ case 3:
++ seq_printf(sfile,"span count\t\t%lu\n", pArena->sStatistics.uSpanCount);
++ break;
++ case 4:
++ seq_printf(sfile, "live segment count\t%lu\n", pArena->sStatistics.uLiveSegmentCount);
++ break;
++ case 5:
++ seq_printf(sfile, "free segment count\t%lu\n", pArena->sStatistics.uFreeSegmentCount);
++ break;
++ case 6:
++ seq_printf(sfile, "free resource count\t%lu (0x%x)\n",
++ pArena->sStatistics.uFreeResourceCount,
++ (IMG_UINT)pArena->sStatistics.uFreeResourceCount);
++ break;
++ case 7:
++ seq_printf(sfile, "total allocs\t\t%lu\n", pArena->sStatistics.uCumulativeAllocs);
++ break;
++ case 8:
++ seq_printf(sfile, "total frees\t\t%lu\n", pArena->sStatistics.uCumulativeFrees);
++ break;
++ case 9:
++ seq_printf(sfile, "import count\t\t%lu\n", pArena->sStatistics.uImportCount);
++ break;
++ case 10:
++ seq_printf(sfile, "export count\t\t%lu\n", pArena->sStatistics.uExportCount);
++ break;
++#endif
++ }
++
++}
++
++static void* RA_ProcSeqOff2ElementInfo(struct seq_file * sfile, loff_t off)
++{
++#ifdef RA_STATS
++ if(off <= 9)
++#else
++ if(off <= 1)
++#endif
++ return (void*)(IMG_INT)(off+1);
++ return 0;
++}
++
++static void RA_ProcSeqShowRegs(struct seq_file *sfile, void* el)
++{
++ PVR_PROC_SEQ_HANDLERS *handlers = (PVR_PROC_SEQ_HANDLERS*)sfile->private;
++ RA_ARENA *pArena = (RA_ARENA *)handlers->data;
++ BT *pBT = (BT*)el;
++
++ if (el == PVR_PROC_SEQ_START_TOKEN)
++ {
++ seq_printf(sfile, "Arena \"%s\"\nBase Size Type Ref\n", pArena->name);
++ return;
++ }
++
++ if (pBT)
++ {
++ seq_printf(sfile, "%08x %8x %4s %08x\n",
++ (IMG_UINT)pBT->base, (IMG_UINT)pBT->uSize, _BTType (pBT->type),
++ (IMG_UINT)pBT->psMapping);
++ }
++}
++
++static void* RA_ProcSeqOff2ElementRegs(struct seq_file * sfile, loff_t off)
++{
++ PVR_PROC_SEQ_HANDLERS *handlers = (PVR_PROC_SEQ_HANDLERS*)sfile->private;
++ RA_ARENA *pArena = (RA_ARENA *)handlers->data;
++ BT *pBT = 0;
++
++ if(off == 0)
++ return PVR_PROC_SEQ_START_TOKEN;
++
++ for (pBT=pArena->pHeadSegment; --off && pBT; pBT=pBT->pNextSegment);
++
++ return (void*)pBT;
++}
++
++
++
++#else
++static IMG_INT
++RA_DumpSegs(IMG_CHAR *page, IMG_CHAR **start, off_t off, IMG_INT count, IMG_INT *eof, IMG_VOID *data)
++{
++ BT *pBT = 0;
++ IMG_INT len = 0;
++ RA_ARENA *pArena = (RA_ARENA *)data;
++
++ if (count < 80)
++ {
++ *start = (IMG_CHAR *)0;
++ return (0);
++ }
++ *eof = 0;
++ *start = (IMG_CHAR *)1;
++ if (off == 0)
++ {
++ return printAppend(page, count, 0, "Arena \"%s\"\nBase Size Type Ref\n", pArena->name);
++ }
++ for (pBT=pArena->pHeadSegment; --off && pBT; pBT=pBT->pNextSegment)
++ ;
++ if (pBT)
++ {
++ len = printAppend(page, count, 0, "%08x %8x %4s %08x\n",
++ (IMG_UINT)pBT->base, (IMG_UINT)pBT->uSize, _BTType (pBT->type),
++ (IMG_UINT)pBT->psMapping);
++ }
++ else
++ {
++ *eof = 1;
++ }
++ return (len);
++}
++
++static IMG_INT
++RA_DumpInfo(IMG_CHAR *page, IMG_CHAR **start, off_t off, IMG_INT count, IMG_INT *eof, IMG_VOID *data)
++{
++ IMG_INT len = 0;
++ RA_ARENA *pArena = (RA_ARENA *)data;
++
++ if (count < 80)
++ {
++ *start = (IMG_CHAR *)0;
++ return (0);
++ }
++ *eof = 0;
++ switch (off)
++ {
++ case 0:
++ len = printAppend(page, count, 0, "quantum\t\t\t%lu\n", pArena->uQuantum);
++ break;
++ case 1:
++ len = printAppend(page, count, 0, "import_handle\t\t%08X\n", (IMG_UINT)pArena->pImportHandle);
++ break;
++#ifdef RA_STATS
++ case 2:
++ len = printAppend(page, count, 0, "span count\t\t%lu\n", pArena->sStatistics.uSpanCount);
++ break;
++ case 3:
++ len = printAppend(page, count, 0, "live segment count\t%lu\n", pArena->sStatistics.uLiveSegmentCount);
++ break;
++ case 4:
++ len = printAppend(page, count, 0, "free segment count\t%lu\n", pArena->sStatistics.uFreeSegmentCount);
++ break;
++ case 5:
++ len = printAppend(page, count, 0, "free resource count\t%lu (0x%x)\n",
++ pArena->sStatistics.uFreeResourceCount,
++ (IMG_UINT)pArena->sStatistics.uFreeResourceCount);
++ break;
++ case 6:
++ len = printAppend(page, count, 0, "total allocs\t\t%lu\n", pArena->sStatistics.uCumulativeAllocs);
++ break;
++ case 7:
++ len = printAppend(page, count, 0, "total frees\t\t%lu\n", pArena->sStatistics.uCumulativeFrees);
++ break;
++ case 8:
++ len = printAppend(page, count, 0, "import count\t\t%lu\n", pArena->sStatistics.uImportCount);
++ break;
++ case 9:
++ len = printAppend(page, count, 0, "export count\t\t%lu\n", pArena->sStatistics.uExportCount);
++ break;
++#endif
++
++ default:
++ *eof = 1;
++ }
++ *start = (IMG_CHAR *)1;
++ return (len);
++}
++#endif
++#endif
++
++
++#ifdef RA_STATS
++PVRSRV_ERROR RA_GetStats(RA_ARENA *pArena,
++ IMG_CHAR **ppszStr,
++ IMG_UINT32 *pui32StrLen)
++{
++ IMG_CHAR *pszStr = *ppszStr;
++ IMG_UINT32 ui32StrLen = *pui32StrLen;
++ IMG_INT32 i32Count;
++ BT *pBT;
++
++ CHECK_SPACE(ui32StrLen);
++ i32Count = OSSNPrintf(pszStr, 100, "\nArena '%s':\n", pArena->name);
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++
++
++ CHECK_SPACE(ui32StrLen);
++ i32Count = OSSNPrintf(pszStr, 100, " allocCB=%08X freeCB=%08X handle=%08X quantum=%d\n",
++ pArena->pImportAlloc,
++ pArena->pImportFree,
++ pArena->pImportHandle,
++ pArena->uQuantum);
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++
++ CHECK_SPACE(ui32StrLen);
++ i32Count = OSSNPrintf(pszStr, 100, "span count\t\t%lu\n", pArena->sStatistics.uSpanCount);
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++
++ CHECK_SPACE(ui32StrLen);
++ i32Count = OSSNPrintf(pszStr, 100, "live segment count\t%lu\n", pArena->sStatistics.uLiveSegmentCount);
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++
++ CHECK_SPACE(ui32StrLen);
++ i32Count = OSSNPrintf(pszStr, 100, "free segment count\t%lu\n", pArena->sStatistics.uFreeSegmentCount);
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++
++ CHECK_SPACE(ui32StrLen);
++ i32Count = OSSNPrintf(pszStr, 100, "free resource count\t%lu (0x%x)\n",
++ pArena->sStatistics.uFreeResourceCount,
++ (IMG_UINT)pArena->sStatistics.uFreeResourceCount);
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++
++ CHECK_SPACE(ui32StrLen);
++ i32Count = OSSNPrintf(pszStr, 100, "total allocs\t\t%lu\n", pArena->sStatistics.uCumulativeAllocs);
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++
++ CHECK_SPACE(ui32StrLen);
++ i32Count = OSSNPrintf(pszStr, 100, "total frees\t\t%lu\n", pArena->sStatistics.uCumulativeFrees);
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++
++ CHECK_SPACE(ui32StrLen);
++ i32Count = OSSNPrintf(pszStr, 100, "import count\t\t%lu\n", pArena->sStatistics.uImportCount);
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++
++ CHECK_SPACE(ui32StrLen);
++ i32Count = OSSNPrintf(pszStr, 100, "export count\t\t%lu\n", pArena->sStatistics.uExportCount);
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++
++ CHECK_SPACE(ui32StrLen);
++ i32Count = OSSNPrintf(pszStr, 100, " segment Chain:\n");
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++
++ if (pArena->pHeadSegment != IMG_NULL &&
++ pArena->pHeadSegment->pPrevSegment != IMG_NULL)
++ {
++ CHECK_SPACE(ui32StrLen);
++ i32Count = OSSNPrintf(pszStr, 100, " error: head boundary tag has invalid pPrevSegment\n");
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++ }
++
++ if (pArena->pTailSegment != IMG_NULL &&
++ pArena->pTailSegment->pNextSegment != IMG_NULL)
++ {
++ CHECK_SPACE(ui32StrLen);
++ i32Count = OSSNPrintf(pszStr, 100, " error: tail boundary tag has invalid pNextSegment\n");
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++ }
++
++ for (pBT=pArena->pHeadSegment; pBT!=IMG_NULL; pBT=pBT->pNextSegment)
++ {
++ CHECK_SPACE(ui32StrLen);
++ i32Count = OSSNPrintf(pszStr, 100, "\tbase=0x%x size=0x%x type=%s ref=%08X\n",
++ (IMG_UINT32) pBT->base,
++ pBT->uSize,
++ _BTType(pBT->type),
++ pBT->psMapping);
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++ }
++
++ *ppszStr = pszStr;
++ *pui32StrLen = ui32StrLen;
++
++ return PVRSRV_OK;
++}
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/resman.c b/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/resman.c
+new file mode 100644
+index 0000000..3f7ff03
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/resman.c
+@@ -0,0 +1,717 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "services_headers.h"
++#include "resman.h"
++
++#ifdef __linux__
++#ifndef AUTOCONF_INCLUDED
++ #include <linux/config.h>
++#endif
++
++#include <linux/version.h>
++#include <linux/sched.h>
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9)
++#include <linux/hardirq.h>
++#else
++#include <asm/hardirq.h>
++#endif
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
++#include <linux/semaphore.h>
++#else
++#include <asm/semaphore.h>
++#endif
++
++static DECLARE_MUTEX(lock);
++
++#define ACQUIRE_SYNC_OBJ do { \
++ if (in_interrupt()) { \
++ printk ("ISR cannot take RESMAN mutex\n"); \
++ BUG(); \
++ } \
++ else down (&lock); \
++} while (0)
++#define RELEASE_SYNC_OBJ up (&lock)
++
++#else
++
++#define ACQUIRE_SYNC_OBJ
++#define RELEASE_SYNC_OBJ
++
++#endif
++
++#define RESMAN_SIGNATURE 0x12345678
++
++typedef struct _RESMAN_ITEM_
++{
++#ifdef DEBUG
++ IMG_UINT32 ui32Signature;
++#endif
++ struct _RESMAN_ITEM_ **ppsThis;
++ struct _RESMAN_ITEM_ *psNext;
++
++ IMG_UINT32 ui32Flags;
++ IMG_UINT32 ui32ResType;
++
++ IMG_PVOID pvParam;
++ IMG_UINT32 ui32Param;
++
++ RESMAN_FREE_FN pfnFreeResource;
++} RESMAN_ITEM;
++
++
++typedef struct _RESMAN_CONTEXT_
++{
++#ifdef DEBUG
++ IMG_UINT32 ui32Signature;
++#endif
++ struct _RESMAN_CONTEXT_ **ppsThis;
++ struct _RESMAN_CONTEXT_ *psNext;
++
++ PVRSRV_PER_PROCESS_DATA *psPerProc;
++
++ RESMAN_ITEM *psResItemList;
++
++} RESMAN_CONTEXT;
++
++
++typedef struct
++{
++ RESMAN_CONTEXT *psContextList;
++
++} RESMAN_LIST, *PRESMAN_LIST;
++
++
++PRESMAN_LIST gpsResList = IMG_NULL;
++
++#include "lists.h"
++
++static IMPLEMENT_LIST_ANY_VA(RESMAN_ITEM)
++static IMPLEMENT_LIST_ANY_VA_2(RESMAN_ITEM, IMG_BOOL, IMG_FALSE)
++static IMPLEMENT_LIST_INSERT(RESMAN_ITEM)
++static IMPLEMENT_LIST_REMOVE(RESMAN_ITEM)
++
++static IMPLEMENT_LIST_REMOVE(RESMAN_CONTEXT)
++static IMPLEMENT_LIST_INSERT(RESMAN_CONTEXT)
++
++
++#define PRINT_RESLIST(x, y, z)
++
++static PVRSRV_ERROR FreeResourceByPtr(RESMAN_ITEM *psItem, IMG_BOOL bExecuteCallback);
++
++static PVRSRV_ERROR FreeResourceByCriteria(PRESMAN_CONTEXT psContext,
++ IMG_UINT32 ui32SearchCriteria,
++ IMG_UINT32 ui32ResType,
++ IMG_PVOID pvParam,
++ IMG_UINT32 ui32Param,
++ IMG_BOOL bExecuteCallback);
++
++
++#ifdef DEBUG
++ static IMG_VOID ValidateResList(PRESMAN_LIST psResList);
++ #define VALIDATERESLIST() ValidateResList(gpsResList)
++#else
++ #define VALIDATERESLIST()
++#endif
++
++
++
++
++
++
++PVRSRV_ERROR ResManInit(IMG_VOID)
++{
++ if (gpsResList == IMG_NULL)
++ {
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(*gpsResList),
++ (IMG_VOID **)&gpsResList, IMG_NULL,
++ "Resource Manager List") != PVRSRV_OK)
++ {
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++
++ gpsResList->psContextList = IMG_NULL;
++
++
++ VALIDATERESLIST();
++ }
++
++ return PVRSRV_OK;
++}
++
++
++IMG_VOID ResManDeInit(IMG_VOID)
++{
++ if (gpsResList != IMG_NULL)
++ {
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(*gpsResList), gpsResList, IMG_NULL);
++ gpsResList = IMG_NULL;
++ }
++}
++
++
++PVRSRV_ERROR PVRSRVResManConnect(IMG_HANDLE hPerProc,
++ PRESMAN_CONTEXT *phResManContext)
++{
++ PVRSRV_ERROR eError;
++ PRESMAN_CONTEXT psResManContext;
++
++
++ ACQUIRE_SYNC_OBJ;
++
++
++ VALIDATERESLIST();
++
++
++ eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(*psResManContext),
++ (IMG_VOID **)&psResManContext, IMG_NULL,
++ "Resource Manager Context");
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVResManConnect: ERROR allocating new RESMAN context struct"));
++
++
++ VALIDATERESLIST();
++
++
++ RELEASE_SYNC_OBJ;
++
++ return eError;
++ }
++
++#ifdef DEBUG
++ psResManContext->ui32Signature = RESMAN_SIGNATURE;
++#endif
++ psResManContext->psResItemList = IMG_NULL;
++ psResManContext->psPerProc = hPerProc;
++
++
++ List_RESMAN_CONTEXT_Insert(&gpsResList->psContextList, psResManContext);
++
++
++ VALIDATERESLIST();
++
++
++ RELEASE_SYNC_OBJ;
++
++ *phResManContext = psResManContext;
++
++ return PVRSRV_OK;
++}
++
++
++IMG_VOID PVRSRVResManDisconnect(PRESMAN_CONTEXT psResManContext,
++ IMG_BOOL bKernelContext)
++{
++
++ ACQUIRE_SYNC_OBJ;
++
++
++ VALIDATERESLIST();
++
++
++ PRINT_RESLIST(gpsResList, psResManContext, IMG_TRUE);
++
++
++
++ if (!bKernelContext)
++ {
++
++ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_OS_USERMODE_MAPPING, 0, 0, IMG_TRUE);
++
++
++ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_EVENT_OBJECT, 0, 0, IMG_TRUE);
++
++
++ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_MODIFY_SYNC_OPS, 0, 0, IMG_TRUE);
++
++
++ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_HW_RENDER_CONTEXT, 0, 0, IMG_TRUE);
++ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_HW_TRANSFER_CONTEXT, 0, 0, IMG_TRUE);
++ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_HW_2D_CONTEXT, 0, 0, IMG_TRUE);
++ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_TRANSFER_CONTEXT, 0, 0, IMG_TRUE);
++ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_SHARED_PB_DESC_CREATE_LOCK, 0, 0, IMG_TRUE);
++ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_SHARED_PB_DESC, 0, 0, IMG_TRUE);
++
++
++
++
++ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_DISPLAYCLASS_SWAPCHAIN_REF, 0, 0, IMG_TRUE);
++ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_DISPLAYCLASS_DEVICE, 0, 0, IMG_TRUE);
++
++
++ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_BUFFERCLASS_DEVICE, 0, 0, IMG_TRUE);
++
++
++ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_DEVICECLASSMEM_MAPPING, 0, 0, IMG_TRUE);
++ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_DEVICEMEM_WRAP, 0, 0, IMG_TRUE);
++ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_DEVICEMEM_MAPPING, 0, 0, IMG_TRUE);
++ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_KERNEL_DEVICEMEM_ALLOCATION, 0, 0, IMG_TRUE);
++ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_DEVICEMEM_ALLOCATION, 0, 0, IMG_TRUE);
++ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_DEVICEMEM_CONTEXT, 0, 0, IMG_TRUE);
++ }
++
++
++ PVR_ASSERT(psResManContext->psResItemList == IMG_NULL);
++
++
++ List_RESMAN_CONTEXT_Remove(psResManContext);
++
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(RESMAN_CONTEXT), psResManContext, IMG_NULL);
++
++
++
++
++ VALIDATERESLIST();
++
++
++ PRINT_RESLIST(gpsResList, psResManContext, IMG_FALSE);
++
++
++ RELEASE_SYNC_OBJ;
++}
++
++
++PRESMAN_ITEM ResManRegisterRes(PRESMAN_CONTEXT psResManContext,
++ IMG_UINT32 ui32ResType,
++ IMG_PVOID pvParam,
++ IMG_UINT32 ui32Param,
++ RESMAN_FREE_FN pfnFreeResource)
++{
++ PRESMAN_ITEM psNewResItem;
++
++ PVR_ASSERT(psResManContext != IMG_NULL);
++ PVR_ASSERT(ui32ResType != 0);
++
++ if (psResManContext == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "ResManRegisterRes: invalid parameter - psResManContext"));
++ return (PRESMAN_ITEM) IMG_NULL;
++ }
++
++
++ ACQUIRE_SYNC_OBJ;
++
++
++ VALIDATERESLIST();
++
++ PVR_DPF((PVR_DBG_MESSAGE, "ResManRegisterRes: register resource "
++ "Context 0x%x, ResType 0x%x, pvParam 0x%x, ui32Param 0x%x, "
++ "FreeFunc %08X",
++ psResManContext, ui32ResType, (IMG_UINT32)pvParam,
++ ui32Param, pfnFreeResource));
++
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(RESMAN_ITEM), (IMG_VOID **)&psNewResItem,
++ IMG_NULL,
++ "Resource Manager Item") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "ResManRegisterRes: "
++ "ERROR allocating new resource item"));
++
++
++ RELEASE_SYNC_OBJ;
++
++ return((PRESMAN_ITEM)IMG_NULL);
++ }
++
++
++#ifdef DEBUG
++ psNewResItem->ui32Signature = RESMAN_SIGNATURE;
++#endif
++ psNewResItem->ui32ResType = ui32ResType;
++ psNewResItem->pvParam = pvParam;
++ psNewResItem->ui32Param = ui32Param;
++ psNewResItem->pfnFreeResource = pfnFreeResource;
++ psNewResItem->ui32Flags = 0;
++
++
++ List_RESMAN_ITEM_Insert(&psResManContext->psResItemList, psNewResItem);
++
++
++ VALIDATERESLIST();
++
++
++ RELEASE_SYNC_OBJ;
++
++ return(psNewResItem);
++}
++
++PVRSRV_ERROR ResManFreeResByPtr(RESMAN_ITEM *psResItem)
++{
++ PVRSRV_ERROR eError;
++
++ PVR_ASSERT(psResItem != IMG_NULL);
++
++ if (psResItem == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_MESSAGE, "ResManFreeResByPtr: NULL ptr - nothing to do"));
++ return PVRSRV_OK;
++ }
++
++ PVR_DPF((PVR_DBG_MESSAGE, "ResManFreeResByPtr: freeing resource at %08X", psResItem));
++
++
++ ACQUIRE_SYNC_OBJ;
++
++
++ VALIDATERESLIST();
++
++
++ eError = FreeResourceByPtr(psResItem, IMG_TRUE);
++
++
++ VALIDATERESLIST();
++
++
++ RELEASE_SYNC_OBJ;
++
++ return(eError);
++}
++
++
++PVRSRV_ERROR ResManFreeResByCriteria(PRESMAN_CONTEXT psResManContext,
++ IMG_UINT32 ui32SearchCriteria,
++ IMG_UINT32 ui32ResType,
++ IMG_PVOID pvParam,
++ IMG_UINT32 ui32Param)
++{
++ PVRSRV_ERROR eError;
++
++ PVR_ASSERT(psResManContext != IMG_NULL);
++
++
++ ACQUIRE_SYNC_OBJ;
++
++
++ VALIDATERESLIST();
++
++ PVR_DPF((PVR_DBG_MESSAGE, "ResManFreeResByCriteria: "
++ "Context 0x%x, Criteria 0x%x, Type 0x%x, Addr 0x%x, Param 0x%x",
++ psResManContext, ui32SearchCriteria, ui32ResType,
++ (IMG_UINT32)pvParam, ui32Param));
++
++
++ eError = FreeResourceByCriteria(psResManContext, ui32SearchCriteria,
++ ui32ResType, pvParam, ui32Param,
++ IMG_TRUE);
++
++
++ VALIDATERESLIST();
++
++
++ RELEASE_SYNC_OBJ;
++
++ return eError;
++}
++
++
++PVRSRV_ERROR ResManDissociateRes(RESMAN_ITEM *psResItem,
++ PRESMAN_CONTEXT psNewResManContext)
++{
++ PVRSRV_ERROR eError = PVRSRV_OK;
++
++ PVR_ASSERT(psResItem != IMG_NULL);
++
++ if (psResItem == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "ResManDissociateRes: invalid parameter - psResItem"));
++ PVR_DBG_BREAK;
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++#ifdef DEBUG
++ PVR_ASSERT(psResItem->ui32Signature == RESMAN_SIGNATURE);
++#endif
++
++ if (psNewResManContext != IMG_NULL)
++ {
++
++ List_RESMAN_ITEM_Remove(psResItem);
++
++
++ List_RESMAN_ITEM_Insert(&psNewResManContext->psResItemList, psResItem);
++
++ }
++ else
++ {
++ eError = FreeResourceByPtr(psResItem, IMG_FALSE);
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "ResManDissociateRes: failed to free resource by pointer"));
++ return eError;
++ }
++ }
++
++ return eError;
++}
++
++IMG_BOOL ResManFindResourceByPtr_AnyVaCb(RESMAN_ITEM *psCurItem, va_list va)
++{
++ RESMAN_ITEM *psItem;
++
++ psItem = va_arg(va, RESMAN_ITEM*);
++
++ return (IMG_BOOL)(psCurItem == psItem);
++}
++
++
++IMG_INTERNAL PVRSRV_ERROR ResManFindResourceByPtr(PRESMAN_CONTEXT psResManContext,
++ RESMAN_ITEM *psItem)
++{
++ PVRSRV_ERROR eResult;
++
++ PVR_ASSERT(psResManContext != IMG_NULL);
++ PVR_ASSERT(psItem != IMG_NULL);
++
++ if ((psItem == IMG_NULL) || (psResManContext == IMG_NULL))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "ResManFindResourceByPtr: invalid parameter"));
++ PVR_DBG_BREAK;
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++#ifdef DEBUG
++ PVR_ASSERT(psItem->ui32Signature == RESMAN_SIGNATURE);
++#endif
++
++
++ ACQUIRE_SYNC_OBJ;
++
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "FindResourceByPtr: psItem=%08X, psItem->psNext=%08X",
++ psItem, psItem->psNext));
++
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "FindResourceByPtr: Resource Ctx 0x%x, Type 0x%x, Addr 0x%x, "
++ "Param 0x%x, FnCall %08X, Flags 0x%x",
++ psResManContext,
++ psItem->ui32ResType, (IMG_UINT32)psItem->pvParam, psItem->ui32Param,
++ psItem->pfnFreeResource, psItem->ui32Flags));
++
++
++ if(List_RESMAN_ITEM_IMG_BOOL_Any_va(psResManContext->psResItemList,
++ ResManFindResourceByPtr_AnyVaCb,
++ psItem))
++ {
++ eResult = PVRSRV_OK;
++ }
++ else
++ {
++ eResult = PVRSRV_ERROR_NOT_OWNER;
++ }
++
++
++ RELEASE_SYNC_OBJ;
++
++ return eResult;
++}
++
++static PVRSRV_ERROR FreeResourceByPtr(RESMAN_ITEM *psItem,
++ IMG_BOOL bExecuteCallback)
++{
++ PVRSRV_ERROR eError = PVRSRV_OK;
++
++ PVR_ASSERT(psItem != IMG_NULL);
++
++ if (psItem == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "FreeResourceByPtr: invalid parameter"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++#ifdef DEBUG
++ PVR_ASSERT(psItem->ui32Signature == RESMAN_SIGNATURE);
++#endif
++
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "FreeResourceByPtr: psItem=%08X, psItem->psNext=%08X",
++ psItem, psItem->psNext));
++
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "FreeResourceByPtr: Type 0x%x, Addr 0x%x, "
++ "Param 0x%x, FnCall %08X, Flags 0x%x",
++ psItem->ui32ResType, (IMG_UINT32)psItem->pvParam, psItem->ui32Param,
++ psItem->pfnFreeResource, psItem->ui32Flags));
++
++
++ List_RESMAN_ITEM_Remove(psItem);
++
++
++
++ RELEASE_SYNC_OBJ;
++
++
++ if (bExecuteCallback)
++ {
++ eError = psItem->pfnFreeResource(psItem->pvParam, psItem->ui32Param);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "FreeResourceByPtr: ERROR calling FreeResource function"));
++ }
++ }
++
++
++ ACQUIRE_SYNC_OBJ;
++
++
++ if(OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(RESMAN_ITEM), psItem, IMG_NULL) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "FreeResourceByPtr: ERROR freeing resource list item memory"));
++ eError = PVRSRV_ERROR_GENERIC;
++ }
++
++ return(eError);
++}
++
++IMG_VOID* FreeResourceByCriteria_AnyVaCb(RESMAN_ITEM *psCurItem, va_list va)
++{
++ IMG_UINT32 ui32SearchCriteria;
++ IMG_UINT32 ui32ResType;
++ IMG_PVOID pvParam;
++ IMG_UINT32 ui32Param;
++
++ ui32SearchCriteria = va_arg(va, IMG_UINT32);
++ ui32ResType = va_arg(va, IMG_UINT32);
++ pvParam = va_arg(va, IMG_PVOID);
++ ui32Param = va_arg(va, IMG_UINT32);
++
++
++ if(
++
++ (((ui32SearchCriteria & RESMAN_CRITERIA_RESTYPE) == 0UL) ||
++ (psCurItem->ui32ResType == ui32ResType))
++ &&
++
++ (((ui32SearchCriteria & RESMAN_CRITERIA_PVOID_PARAM) == 0UL) ||
++ (psCurItem->pvParam == pvParam))
++ &&
++
++ (((ui32SearchCriteria & RESMAN_CRITERIA_UI32_PARAM) == 0UL) ||
++ (psCurItem->ui32Param == ui32Param))
++ )
++ {
++ return psCurItem;
++ }
++ else
++ {
++ return IMG_NULL;
++ }
++}
++
++static PVRSRV_ERROR FreeResourceByCriteria(PRESMAN_CONTEXT psResManContext,
++ IMG_UINT32 ui32SearchCriteria,
++ IMG_UINT32 ui32ResType,
++ IMG_PVOID pvParam,
++ IMG_UINT32 ui32Param,
++ IMG_BOOL bExecuteCallback)
++{
++ PRESMAN_ITEM psCurItem;
++ PVRSRV_ERROR eError = PVRSRV_OK;
++
++
++
++ while((psCurItem = (PRESMAN_ITEM)
++ List_RESMAN_ITEM_Any_va(psResManContext->psResItemList,
++ FreeResourceByCriteria_AnyVaCb,
++ ui32SearchCriteria,
++ ui32ResType,
++ pvParam,
++ ui32Param)) != IMG_NULL
++ && eError == PVRSRV_OK)
++ {
++ eError = FreeResourceByPtr(psCurItem, bExecuteCallback);
++ }
++
++ return eError;
++}
++
++
++#ifdef DEBUG
++static IMG_VOID ValidateResList(PRESMAN_LIST psResList)
++{
++ PRESMAN_ITEM psCurItem, *ppsThisItem;
++ PRESMAN_CONTEXT psCurContext, *ppsThisContext;
++
++
++ if (psResList == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_MESSAGE, "ValidateResList: resman not initialised yet"));
++ return;
++ }
++
++ psCurContext = psResList->psContextList;
++ ppsThisContext = &psResList->psContextList;
++
++
++ while(psCurContext != IMG_NULL)
++ {
++
++ PVR_ASSERT(psCurContext->ui32Signature == RESMAN_SIGNATURE);
++ if (psCurContext->ppsThis != ppsThisContext)
++ {
++ PVR_DPF((PVR_DBG_WARNING,
++ "psCC=%08X psCC->ppsThis=%08X psCC->psNext=%08X ppsTC=%08X",
++ psCurContext, psCurContext->ppsThis,
++ psCurContext->psNext, ppsThisContext));
++ PVR_ASSERT(psCurContext->ppsThis == ppsThisContext);
++ }
++
++
++ psCurItem = psCurContext->psResItemList;
++ ppsThisItem = &psCurContext->psResItemList;
++ while(psCurItem != IMG_NULL)
++ {
++
++ PVR_ASSERT(psCurItem->ui32Signature == RESMAN_SIGNATURE);
++ if (psCurItem->ppsThis != ppsThisItem)
++ {
++ PVR_DPF((PVR_DBG_WARNING,
++ "psCurItem=%08X psCurItem->ppsThis=%08X psCurItem->psNext=%08X ppsThisItem=%08X",
++ psCurItem, psCurItem->ppsThis, psCurItem->psNext, ppsThisItem));
++ PVR_ASSERT(psCurItem->ppsThis == ppsThisItem);
++ }
++
++
++ ppsThisItem = &psCurItem->psNext;
++ psCurItem = psCurItem->psNext;
++ }
++
++
++ ppsThisContext = &psCurContext->psNext;
++ psCurContext = psCurContext->psNext;
++ }
++}
++#endif
++
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/.gitignore b/drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/.gitignore
+new file mode 100644
+index 0000000..2f89523
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/.gitignore
+@@ -0,0 +1,5 @@
++bin_pc_i686*
++tmp_pc_i686*
++host_pc_i686*
++*.o
++*.o.cmd
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/mmu.c b/drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/mmu.c
+new file mode 100644
+index 0000000..7408661
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/mmu.c
+@@ -0,0 +1,2776 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "sgxdefs.h"
++#include "sgxmmu.h"
++#include "services_headers.h"
++#include "buffer_manager.h"
++#include "hash.h"
++#include "ra.h"
++#include "pdump_km.h"
++#include "sgxapi_km.h"
++#include "sgxinfo.h"
++#include "sgxinfokm.h"
++#include "mmu.h"
++#include "sgxconfig.h"
++
++#define UINT32_MAX_VALUE 0xFFFFFFFFUL
++
++#define SGX_MAX_PD_ENTRIES (1<<(SGX_FEATURE_ADDRESS_SPACE_SIZE - SGX_MMU_PT_SHIFT - SGX_MMU_PAGE_SHIFT))
++
++typedef struct _MMU_PT_INFO_
++{
++
++ IMG_VOID *hPTPageOSMemHandle;
++ IMG_CPU_VIRTADDR PTPageCpuVAddr;
++ IMG_UINT32 ui32ValidPTECount;
++} MMU_PT_INFO;
++
++struct _MMU_CONTEXT_
++{
++
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++
++
++ IMG_CPU_VIRTADDR pvPDCpuVAddr;
++ IMG_DEV_PHYADDR sPDDevPAddr;
++
++ IMG_VOID *hPDOSMemHandle;
++
++
++ MMU_PT_INFO *apsPTInfoList[SGX_MAX_PD_ENTRIES];
++
++ PVRSRV_SGXDEV_INFO *psDevInfo;
++
++#if defined(PDUMP)
++ IMG_UINT32 ui32PDumpMMUContextID;
++#endif
++
++ struct _MMU_CONTEXT_ *psNext;
++};
++
++struct _MMU_HEAP_
++{
++
++ MMU_CONTEXT *psMMUContext;
++
++
++
++
++ IMG_UINT32 ui32PDBaseIndex;
++
++ IMG_UINT32 ui32PageTableCount;
++
++ IMG_UINT32 ui32PTETotal;
++
++ IMG_UINT32 ui32PDEPageSizeCtrl;
++
++
++
++
++ IMG_UINT32 ui32DataPageSize;
++
++ IMG_UINT32 ui32DataPageBitWidth;
++
++ IMG_UINT32 ui32DataPageMask;
++
++
++
++
++ IMG_UINT32 ui32PTShift;
++
++ IMG_UINT32 ui32PTBitWidth;
++
++ IMG_UINT32 ui32PTMask;
++
++ IMG_UINT32 ui32PTSize;
++
++ IMG_UINT32 ui32PTECount;
++
++
++
++
++ IMG_UINT32 ui32PDShift;
++
++ IMG_UINT32 ui32PDBitWidth;
++
++ IMG_UINT32 ui32PDMask;
++
++
++
++ RA_ARENA *psVMArena;
++ DEV_ARENA_DESCRIPTOR *psDevArena;
++};
++
++
++
++#if defined (SUPPORT_SGX_MMU_DUMMY_PAGE)
++#define DUMMY_DATA_PAGE_SIGNATURE 0xDEADBEEF
++#endif
++
++#if defined(PDUMP)
++static IMG_VOID
++MMU_PDumpPageTables (MMU_HEAP *pMMUHeap,
++ IMG_DEV_VIRTADDR DevVAddr,
++ IMG_SIZE_T uSize,
++ IMG_BOOL bForUnmap,
++ IMG_HANDLE hUniqueTag);
++#endif
++
++#define PAGE_TEST 0
++#if PAGE_TEST
++static IMG_VOID PageTest(IMG_VOID* pMem, IMG_DEV_PHYADDR sDevPAddr);
++#endif
++
++#define PT_DEBUG 0
++#if PT_DEBUG
++static IMG_VOID DumpPT(MMU_PT_INFO *psPTInfoList)
++{
++ IMG_UINT32 *p = (IMG_UINT32*)psPTInfoList->PTPageCpuVAddr;
++ IMG_UINT32 i;
++
++
++ for(i = 0; i < 1024; i += 8)
++ {
++ PVR_DPF((PVR_DBG_WARNING,
++ "%.8lx %.8lx %.8lx %.8lx %.8lx %.8lx %.8lx %.8lx\n",
++ p[i + 0], p[i + 1], p[i + 2], p[i + 3],
++ p[i + 4], p[i + 5], p[i + 6], p[i + 7]));
++ }
++}
++
++static IMG_VOID CheckPT(MMU_PT_INFO *psPTInfoList)
++{
++ IMG_UINT32 *p = (IMG_UINT32*) psPTInfoList->PTPageCpuVAddr;
++ IMG_UINT32 i, ui32Count = 0;
++
++
++ for(i = 0; i < 1024; i++)
++ if(p[i] & SGX_MMU_PTE_VALID)
++ ui32Count++;
++
++ if(psPTInfoList->ui32ValidPTECount != ui32Count)
++ {
++ PVR_DPF((PVR_DBG_WARNING, "ui32ValidPTECount: %lu ui32Count: %lu\n",
++ psPTInfoList->ui32ValidPTECount, ui32Count));
++ DumpPT(psPTInfoList);
++ BUG();
++ }
++}
++#else
++static INLINE IMG_VOID DumpPT(MMU_PT_INFO *psPTInfoList)
++{
++ PVR_UNREFERENCED_PARAMETER(psPTInfoList);
++}
++
++static INLINE IMG_VOID CheckPT(MMU_PT_INFO *psPTInfoList)
++{
++ PVR_UNREFERENCED_PARAMETER(psPTInfoList);
++}
++#endif
++
++#ifdef SUPPORT_SGX_MMU_BYPASS
++IMG_VOID
++EnableHostAccess (MMU_CONTEXT *psMMUContext)
++{
++ IMG_UINT32 ui32RegVal;
++ IMG_VOID *pvRegsBaseKM = psMMUContext->psDevInfo->pvRegsBaseKM;
++
++
++
++
++ ui32RegVal = OSReadHWReg(pvRegsBaseKM, EUR_CR_BIF_CTRL);
++
++ OSWriteHWReg(pvRegsBaseKM,
++ EUR_CR_BIF_CTRL,
++ ui32RegVal | EUR_CR_BIF_CTRL_MMU_BYPASS_HOST_MASK);
++
++ PDUMPREG(EUR_CR_BIF_CTRL, EUR_CR_BIF_CTRL_MMU_BYPASS_HOST_MASK);
++}
++
++IMG_VOID
++DisableHostAccess (MMU_CONTEXT *psMMUContext)
++{
++ IMG_UINT32 ui32RegVal;
++ IMG_VOID *pvRegsBaseKM = psMMUContext->psDevInfo->pvRegsBaseKM;
++
++
++
++
++
++ OSWriteHWReg(pvRegsBaseKM,
++ EUR_CR_BIF_CTRL,
++ ui32RegVal & ~EUR_CR_BIF_CTRL_MMU_BYPASS_HOST_MASK);
++
++ PDUMPREG(EUR_CR_BIF_CTRL, 0);
++}
++#endif
++
++
++IMG_VOID MMU_InvalidateSystemLevelCache(PVRSRV_SGXDEV_INFO *psDevInfo)
++{
++ #if defined(SGX_FEATURE_MP)
++ psDevInfo->ui32CacheControl |= SGX_BIF_INVALIDATE_SLCACHE;
++ #else
++
++ PVR_UNREFERENCED_PARAMETER(psDevInfo);
++ #endif
++}
++
++
++IMG_VOID MMU_InvalidateDirectoryCache(PVRSRV_SGXDEV_INFO *psDevInfo)
++{
++ psDevInfo->ui32CacheControl |= SGX_BIF_INVALIDATE_PDCACHE;
++ #if defined(SGX_FEATURE_SYSTEM_CACHE)
++ MMU_InvalidateSystemLevelCache(psDevInfo);
++ #endif
++}
++
++
++IMG_VOID MMU_InvalidatePageTableCache(PVRSRV_SGXDEV_INFO *psDevInfo)
++{
++ psDevInfo->ui32CacheControl |= SGX_BIF_INVALIDATE_PTCACHE;
++ #if defined(SGX_FEATURE_SYSTEM_CACHE)
++ MMU_InvalidateSystemLevelCache(psDevInfo);
++ #endif
++}
++
++
++static IMG_BOOL
++_AllocPageTableMemory (MMU_HEAP *pMMUHeap,
++ MMU_PT_INFO *psPTInfoList,
++ IMG_DEV_PHYADDR *psDevPAddr)
++{
++ IMG_DEV_PHYADDR sDevPAddr;
++ IMG_CPU_PHYADDR sCpuPAddr;
++
++
++
++
++ if(pMMUHeap->psDevArena->psDeviceMemoryHeapInfo->psLocalDevMemArena == IMG_NULL)
++ {
++
++ if (OSAllocPages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
++ pMMUHeap->ui32PTSize,
++ SGX_MMU_PAGE_SIZE,
++ (IMG_VOID **)&psPTInfoList->PTPageCpuVAddr,
++ &psPTInfoList->hPTPageOSMemHandle) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "_AllocPageTableMemory: ERROR call to OSAllocPages failed"));
++ return IMG_FALSE;
++ }
++
++
++ if(psPTInfoList->PTPageCpuVAddr)
++ {
++ sCpuPAddr = OSMapLinToCPUPhys(psPTInfoList->PTPageCpuVAddr);
++ }
++ else
++ {
++
++ sCpuPAddr = OSMemHandleToCpuPAddr(psPTInfoList->hPTPageOSMemHandle, 0);
++ }
++
++ sDevPAddr = SysCpuPAddrToDevPAddr (PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
++ }
++ else
++ {
++ IMG_SYS_PHYADDR sSysPAddr;
++
++
++
++
++
++ if(RA_Alloc(pMMUHeap->psDevArena->psDeviceMemoryHeapInfo->psLocalDevMemArena,
++ SGX_MMU_PAGE_SIZE,
++ IMG_NULL,
++ IMG_NULL,
++ 0,
++ SGX_MMU_PAGE_SIZE,
++ 0,
++ &(sSysPAddr.uiAddr))!= IMG_TRUE)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "_AllocPageTableMemory: ERROR call to RA_Alloc failed"));
++ return IMG_FALSE;
++ }
++
++
++ sCpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr);
++
++ psPTInfoList->PTPageCpuVAddr = OSMapPhysToLin(sCpuPAddr,
++ SGX_MMU_PAGE_SIZE,
++ PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
++ &psPTInfoList->hPTPageOSMemHandle);
++ if(!psPTInfoList->PTPageCpuVAddr)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "_AllocPageTableMemory: ERROR failed to map page tables"));
++ return IMG_FALSE;
++ }
++
++
++ sDevPAddr = SysCpuPAddrToDevPAddr (PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
++
++ #if PAGE_TEST
++ PageTest(psPTInfoList->PTPageCpuVAddr, sDevPAddr);
++ #endif
++ }
++
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++ {
++ IMG_UINT32 *pui32Tmp;
++ IMG_UINT32 i;
++
++ pui32Tmp = (IMG_UINT32*)psPTInfoList->PTPageCpuVAddr;
++
++ for(i=0; i<pMMUHeap->ui32PTECount; i++)
++ {
++ pui32Tmp[i] = (pMMUHeap->psMMUContext->psDevInfo->sDummyDataDevPAddr.uiAddr>>SGX_MMU_PTE_ADDR_ALIGNSHIFT)
++ | SGX_MMU_PTE_VALID;
++ }
++ }
++#else
++
++ OSMemSet(psPTInfoList->PTPageCpuVAddr, 0, pMMUHeap->ui32PTSize);
++#endif
++
++
++ PDUMPMALLOCPAGETABLE(PVRSRV_DEVICE_TYPE_SGX, psPTInfoList->PTPageCpuVAddr, pMMUHeap->ui32PTSize, PDUMP_PT_UNIQUETAG);
++
++ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, psPTInfoList->PTPageCpuVAddr, pMMUHeap->ui32PTSize, 0, IMG_TRUE, PDUMP_PT_UNIQUETAG, PDUMP_PT_UNIQUETAG);
++
++
++ *psDevPAddr = sDevPAddr;
++
++ return IMG_TRUE;
++}
++
++
++static IMG_VOID
++_FreePageTableMemory (MMU_HEAP *pMMUHeap, MMU_PT_INFO *psPTInfoList)
++{
++
++
++
++
++ if(pMMUHeap->psDevArena->psDeviceMemoryHeapInfo->psLocalDevMemArena == IMG_NULL)
++ {
++
++ OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
++ pMMUHeap->ui32PTSize,
++ psPTInfoList->PTPageCpuVAddr,
++ psPTInfoList->hPTPageOSMemHandle);
++ }
++ else
++ {
++ IMG_SYS_PHYADDR sSysPAddr;
++ IMG_CPU_PHYADDR sCpuPAddr;
++
++
++ sCpuPAddr = OSMapLinToCPUPhys(psPTInfoList->PTPageCpuVAddr);
++ sSysPAddr = SysCpuPAddrToSysPAddr (sCpuPAddr);
++
++
++
++ OSUnMapPhysToLin(psPTInfoList->PTPageCpuVAddr,
++ SGX_MMU_PAGE_SIZE,
++ PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
++ psPTInfoList->hPTPageOSMemHandle);
++
++
++
++
++ RA_Free (pMMUHeap->psDevArena->psDeviceMemoryHeapInfo->psLocalDevMemArena, sSysPAddr.uiAddr, IMG_FALSE);
++ }
++}
++
++
++
++static IMG_VOID
++_DeferredFreePageTable (MMU_HEAP *pMMUHeap, IMG_UINT32 ui32PTIndex, IMG_BOOL bOSFreePT)
++{
++ IMG_UINT32 *pui32PDEntry;
++ IMG_UINT32 i;
++ IMG_UINT32 ui32PDIndex;
++ SYS_DATA *psSysData;
++ MMU_PT_INFO **ppsPTInfoList;
++
++ SysAcquireData(&psSysData);
++
++
++ ui32PDIndex = pMMUHeap->psDevArena->BaseDevVAddr.uiAddr >> pMMUHeap->ui32PDShift;
++
++
++ ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex];
++
++ {
++#if PT_DEBUG
++ if(ppsPTInfoList[ui32PTIndex] && ppsPTInfoList[ui32PTIndex]->ui32ValidPTECount > 0)
++ {
++ DumpPT(ppsPTInfoList[ui32PTIndex]);
++
++ }
++#endif
++
++
++ PVR_ASSERT(ppsPTInfoList[ui32PTIndex] == IMG_NULL || ppsPTInfoList[ui32PTIndex]->ui32ValidPTECount == 0);
++ }
++
++
++ PDUMPCOMMENT("Free page table (page count == %08X)", pMMUHeap->ui32PageTableCount);
++ if(ppsPTInfoList[ui32PTIndex] && ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr)
++ {
++ PDUMPFREEPAGETABLE(PVRSRV_DEVICE_TYPE_SGX, ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr, pMMUHeap->ui32PTSize, PDUMP_PT_UNIQUETAG);
++ }
++
++ switch(pMMUHeap->psDevArena->DevMemHeapType)
++ {
++ case DEVICE_MEMORY_HEAP_SHARED :
++ case DEVICE_MEMORY_HEAP_SHARED_EXPORTED :
++ {
++
++ MMU_CONTEXT *psMMUContext = (MMU_CONTEXT*)pMMUHeap->psMMUContext->psDevInfo->pvMMUContextList;
++
++ while(psMMUContext)
++ {
++
++ pui32PDEntry = (IMG_UINT32*)psMMUContext->pvPDCpuVAddr;
++ pui32PDEntry += ui32PDIndex;
++
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++
++ pui32PDEntry[ui32PTIndex] = (psMMUContext->psDevInfo->sDummyPTDevPAddr.uiAddr
++ >>SGX_MMU_PDE_ADDR_ALIGNSHIFT)
++ | SGX_MMU_PDE_PAGE_SIZE_4K
++ | SGX_MMU_PDE_VALID;
++#else
++
++ if(bOSFreePT)
++ {
++ pui32PDEntry[ui32PTIndex] = 0;
++ }
++#endif
++
++
++ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, (IMG_VOID*)&pui32PDEntry[ui32PTIndex], sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PT_UNIQUETAG, PDUMP_PT_UNIQUETAG);
++
++
++ psMMUContext = psMMUContext->psNext;
++ }
++ break;
++ }
++ case DEVICE_MEMORY_HEAP_PERCONTEXT :
++ case DEVICE_MEMORY_HEAP_KERNEL :
++ {
++
++ pui32PDEntry = (IMG_UINT32*)pMMUHeap->psMMUContext->pvPDCpuVAddr;
++ pui32PDEntry += ui32PDIndex;
++
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++
++ pui32PDEntry[ui32PTIndex] = (pMMUHeap->psMMUContext->psDevInfo->sDummyPTDevPAddr.uiAddr
++ >>SGX_MMU_PDE_ADDR_ALIGNSHIFT)
++ | SGX_MMU_PDE_PAGE_SIZE_4K
++ | SGX_MMU_PDE_VALID;
++#else
++
++ if(bOSFreePT)
++ {
++ pui32PDEntry[ui32PTIndex] = 0;
++ }
++#endif
++
++
++ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, (IMG_VOID*)&pui32PDEntry[ui32PTIndex], sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
++ break;
++ }
++ default:
++ {
++ PVR_DPF((PVR_DBG_ERROR, "_DeferredFreePagetable: ERROR invalid heap type"));
++ return;
++ }
++ }
++
++
++ if(ppsPTInfoList[ui32PTIndex] != IMG_NULL)
++ {
++ if(ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr != IMG_NULL)
++ {
++ IMG_PUINT32 pui32Tmp;
++
++ pui32Tmp = (IMG_UINT32*)ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr;
++
++
++ for(i=0;
++ (i<pMMUHeap->ui32PTETotal) && (i<pMMUHeap->ui32PTECount);
++ i++)
++ {
++ pui32Tmp[i] = 0;
++ }
++
++
++
++ if(bOSFreePT)
++ {
++ _FreePageTableMemory(pMMUHeap, ppsPTInfoList[ui32PTIndex]);
++ }
++
++
++
++
++ pMMUHeap->ui32PTETotal -= i;
++ }
++ else
++ {
++
++ pMMUHeap->ui32PTETotal -= pMMUHeap->ui32PTECount;
++ }
++
++ if(bOSFreePT)
++ {
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(MMU_PT_INFO),
++ ppsPTInfoList[ui32PTIndex],
++ IMG_NULL);
++ ppsPTInfoList[ui32PTIndex] = IMG_NULL;
++ }
++ }
++ else
++ {
++
++ pMMUHeap->ui32PTETotal -= pMMUHeap->ui32PTECount;
++ }
++
++ PDUMPCOMMENT("Finished free page table (page count == %08X)", pMMUHeap->ui32PageTableCount);
++}
++
++static IMG_VOID
++_DeferredFreePageTables (MMU_HEAP *pMMUHeap)
++{
++ IMG_UINT32 i;
++
++ for(i=0; i<pMMUHeap->ui32PageTableCount; i++)
++ {
++ _DeferredFreePageTable(pMMUHeap, i, IMG_TRUE);
++ }
++ MMU_InvalidateDirectoryCache(pMMUHeap->psMMUContext->psDevInfo);
++}
++
++
++static IMG_BOOL
++_DeferredAllocPagetables(MMU_HEAP *pMMUHeap, IMG_DEV_VIRTADDR DevVAddr, IMG_UINT32 ui32Size)
++{
++ IMG_UINT32 ui32PageTableCount;
++ IMG_UINT32 ui32PDIndex;
++ IMG_UINT32 i;
++ IMG_UINT32 *pui32PDEntry;
++ MMU_PT_INFO **ppsPTInfoList;
++ SYS_DATA *psSysData;
++ IMG_DEV_VIRTADDR sHighDevVAddr;
++
++
++#if SGX_FEATURE_ADDRESS_SPACE_SIZE < 32
++ PVR_ASSERT(DevVAddr.uiAddr < (1<<SGX_FEATURE_ADDRESS_SPACE_SIZE));
++#endif
++
++
++ SysAcquireData(&psSysData);
++
++
++ ui32PDIndex = DevVAddr.uiAddr >> pMMUHeap->ui32PDShift;
++
++
++
++ if((UINT32_MAX_VALUE - DevVAddr.uiAddr)
++ < (ui32Size + pMMUHeap->ui32DataPageMask + pMMUHeap->ui32PTMask))
++ {
++
++ sHighDevVAddr.uiAddr = UINT32_MAX_VALUE;
++ }
++ else
++ {
++ sHighDevVAddr.uiAddr = DevVAddr.uiAddr
++ + ui32Size
++ + pMMUHeap->ui32DataPageMask
++ + pMMUHeap->ui32PTMask;
++ }
++
++ ui32PageTableCount = sHighDevVAddr.uiAddr >> pMMUHeap->ui32PDShift;
++
++ ui32PageTableCount -= ui32PDIndex;
++
++
++ pui32PDEntry = (IMG_UINT32*)pMMUHeap->psMMUContext->pvPDCpuVAddr;
++ pui32PDEntry += ui32PDIndex;
++
++
++ ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex];
++
++ PDUMPCOMMENT("Alloc page table (page count == %08X)", ui32PageTableCount);
++ PDUMPCOMMENT("Page directory mods (page count == %08X)", ui32PageTableCount);
++
++
++ for(i=0; i<ui32PageTableCount; i++)
++ {
++ if(ppsPTInfoList[i] == IMG_NULL)
++ {
++ OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof (MMU_PT_INFO),
++ (IMG_VOID **)&ppsPTInfoList[i], IMG_NULL,
++ "MMU Page Table Info");
++ if (ppsPTInfoList[i] == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "_DeferredAllocPagetables: ERROR call to OSAllocMem failed"));
++ return IMG_FALSE;
++ }
++ OSMemSet (ppsPTInfoList[i], 0, sizeof(MMU_PT_INFO));
++ }
++
++ if(ppsPTInfoList[i]->hPTPageOSMemHandle == IMG_NULL
++ && ppsPTInfoList[i]->PTPageCpuVAddr == IMG_NULL)
++ {
++ IMG_DEV_PHYADDR sDevPAddr;
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++ IMG_UINT32 *pui32Tmp;
++ IMG_UINT32 j;
++#else
++
++ PVR_ASSERT(pui32PDEntry[i] == 0);
++#endif
++
++ if(_AllocPageTableMemory (pMMUHeap, ppsPTInfoList[i], &sDevPAddr) != IMG_TRUE)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "_DeferredAllocPagetables: ERROR call to _AllocPageTableMemory failed"));
++ return IMG_FALSE;
++ }
++
++ switch(pMMUHeap->psDevArena->DevMemHeapType)
++ {
++ case DEVICE_MEMORY_HEAP_SHARED :
++ case DEVICE_MEMORY_HEAP_SHARED_EXPORTED :
++ {
++
++ MMU_CONTEXT *psMMUContext = (MMU_CONTEXT*)pMMUHeap->psMMUContext->psDevInfo->pvMMUContextList;
++
++ while(psMMUContext)
++ {
++
++ pui32PDEntry = (IMG_UINT32*)psMMUContext->pvPDCpuVAddr;
++ pui32PDEntry += ui32PDIndex;
++
++
++ pui32PDEntry[i] = (sDevPAddr.uiAddr>>SGX_MMU_PDE_ADDR_ALIGNSHIFT)
++ | pMMUHeap->ui32PDEPageSizeCtrl
++ | SGX_MMU_PDE_VALID;
++
++
++ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, (IMG_VOID*)&pui32PDEntry[i], sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
++
++
++ psMMUContext = psMMUContext->psNext;
++ }
++ break;
++ }
++ case DEVICE_MEMORY_HEAP_PERCONTEXT :
++ case DEVICE_MEMORY_HEAP_KERNEL :
++ {
++
++ pui32PDEntry[i] = (sDevPAddr.uiAddr>>SGX_MMU_PDE_ADDR_ALIGNSHIFT)
++ | pMMUHeap->ui32PDEPageSizeCtrl
++ | SGX_MMU_PDE_VALID;
++
++
++ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, (IMG_VOID*)&pui32PDEntry[i], sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
++ break;
++ }
++ default:
++ {
++ PVR_DPF((PVR_DBG_ERROR, "_DeferredAllocPagetables: ERROR invalid heap type"));
++ return IMG_FALSE;
++ }
++ }
++
++#if !defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
++
++
++
++
++ MMU_InvalidateDirectoryCache(pMMUHeap->psMMUContext->psDevInfo);
++#endif
++ }
++ else
++ {
++
++ PVR_ASSERT(pui32PDEntry[i] != 0);
++ }
++ }
++
++ #if defined(SGX_FEATURE_SYSTEM_CACHE)
++ MMU_InvalidateSystemLevelCache(pMMUHeap->psMMUContext->psDevInfo);
++ #endif
++
++ return IMG_TRUE;
++}
++
++
++PVRSRV_ERROR
++MMU_Initialise (PVRSRV_DEVICE_NODE *psDeviceNode, MMU_CONTEXT **ppsMMUContext, IMG_DEV_PHYADDR *psPDDevPAddr)
++{
++ IMG_UINT32 *pui32Tmp;
++ IMG_UINT32 i;
++ IMG_CPU_VIRTADDR pvPDCpuVAddr;
++ IMG_DEV_PHYADDR sPDDevPAddr;
++ IMG_CPU_PHYADDR sCpuPAddr;
++ MMU_CONTEXT *psMMUContext;
++ IMG_HANDLE hPDOSMemHandle;
++ SYS_DATA *psSysData;
++ PVRSRV_SGXDEV_INFO *psDevInfo;
++
++ PVR_DPF ((PVR_DBG_MESSAGE, "MMU_Initialise"));
++
++ SysAcquireData(&psSysData);
++
++ OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof (MMU_CONTEXT),
++ (IMG_VOID **)&psMMUContext, IMG_NULL,
++ "MMU Context");
++ if (psMMUContext == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to OSAllocMem failed"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++ OSMemSet (psMMUContext, 0, sizeof(MMU_CONTEXT));
++
++
++ psDevInfo = (PVRSRV_SGXDEV_INFO*)psDeviceNode->pvDevice;
++ psMMUContext->psDevInfo = psDevInfo;
++
++
++ psMMUContext->psDeviceNode = psDeviceNode;
++
++
++ if(psDeviceNode->psLocalDevMemArena == IMG_NULL)
++ {
++ if (OSAllocPages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
++ SGX_MMU_PAGE_SIZE,
++ SGX_MMU_PAGE_SIZE,
++ &pvPDCpuVAddr,
++ &hPDOSMemHandle) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to OSAllocPages failed"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ if(pvPDCpuVAddr)
++ {
++ sCpuPAddr = OSMapLinToCPUPhys(pvPDCpuVAddr);
++ }
++ else
++ {
++
++ sCpuPAddr = OSMemHandleToCpuPAddr(hPDOSMemHandle, 0);
++ }
++ sPDDevPAddr = SysCpuPAddrToDevPAddr (PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
++
++ #if PAGE_TEST
++ PageTest(pvPDCpuVAddr, sPDDevPAddr);
++ #endif
++
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++
++ if(!psDevInfo->pvMMUContextList)
++ {
++
++ if (OSAllocPages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
++ SGX_MMU_PAGE_SIZE,
++ SGX_MMU_PAGE_SIZE,
++ &psDevInfo->pvDummyPTPageCpuVAddr,
++ &psDevInfo->hDummyPTPageOSMemHandle) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to OSAllocPages failed"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ if(psDevInfo->pvDummyPTPageCpuVAddr)
++ {
++ sCpuPAddr = OSMapLinToCPUPhys(psDevInfo->pvDummyPTPageCpuVAddr);
++ }
++ else
++ {
++
++ sCpuPAddr = OSMemHandleToCpuPAddr(psDevInfo->hDummyPTPageOSMemHandle, 0);
++ }
++ psDevInfo->sDummyPTDevPAddr = SysCpuPAddrToDevPAddr (PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
++
++
++ if (OSAllocPages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
++ SGX_MMU_PAGE_SIZE,
++ SGX_MMU_PAGE_SIZE,
++ &psDevInfo->pvDummyDataPageCpuVAddr,
++ &psDevInfo->hDummyDataPageOSMemHandle) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to OSAllocPages failed"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ if(psDevInfo->pvDummyDataPageCpuVAddr)
++ {
++ sCpuPAddr = OSMapLinToCPUPhys(psDevInfo->pvDummyDataPageCpuVAddr);
++ }
++ else
++ {
++ sCpuPAddr = OSMemHandleToCpuPAddr(psDevInfo->hDummyDataPageOSMemHandle, 0);
++ }
++ psDevInfo->sDummyDataDevPAddr = SysCpuPAddrToDevPAddr (PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
++ }
++#endif
++ }
++ else
++ {
++ IMG_SYS_PHYADDR sSysPAddr;
++
++
++ if(RA_Alloc(psDeviceNode->psLocalDevMemArena,
++ SGX_MMU_PAGE_SIZE,
++ IMG_NULL,
++ IMG_NULL,
++ 0,
++ SGX_MMU_PAGE_SIZE,
++ 0,
++ &(sSysPAddr.uiAddr))!= IMG_TRUE)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to RA_Alloc failed"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++
++ sCpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr);
++ sPDDevPAddr = SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sSysPAddr);
++ pvPDCpuVAddr = OSMapPhysToLin(sCpuPAddr,
++ SGX_MMU_PAGE_SIZE,
++ PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
++ &hPDOSMemHandle);
++ if(!pvPDCpuVAddr)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR failed to map page tables"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ #if PAGE_TEST
++ PageTest(pvPDCpuVAddr, sPDDevPAddr);
++ #endif
++
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++
++ if(!psDevInfo->pvMMUContextList)
++ {
++
++ if(RA_Alloc(psDeviceNode->psLocalDevMemArena,
++ SGX_MMU_PAGE_SIZE,
++ IMG_NULL,
++ IMG_NULL,
++ 0,
++ SGX_MMU_PAGE_SIZE,
++ 0,
++ &(sSysPAddr.uiAddr))!= IMG_TRUE)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to RA_Alloc failed"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++
++ sCpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr);
++ psDevInfo->sDummyPTDevPAddr = SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sSysPAddr);
++ psDevInfo->pvDummyPTPageCpuVAddr = OSMapPhysToLin(sCpuPAddr,
++ SGX_MMU_PAGE_SIZE,
++ PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
++ &psDevInfo->hDummyPTPageOSMemHandle);
++ if(!psDevInfo->pvDummyPTPageCpuVAddr)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR failed to map page tables"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++
++ if(RA_Alloc(psDeviceNode->psLocalDevMemArena,
++ SGX_MMU_PAGE_SIZE,
++ IMG_NULL,
++ IMG_NULL,
++ 0,
++ SGX_MMU_PAGE_SIZE,
++ 0,
++ &(sSysPAddr.uiAddr))!= IMG_TRUE)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to RA_Alloc failed"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++
++ sCpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr);
++ psDevInfo->sDummyDataDevPAddr = SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sSysPAddr);
++ psDevInfo->pvDummyDataPageCpuVAddr = OSMapPhysToLin(sCpuPAddr,
++ SGX_MMU_PAGE_SIZE,
++ PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
++ &psDevInfo->hDummyDataPageOSMemHandle);
++ if(!psDevInfo->pvDummyDataPageCpuVAddr)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR failed to map page tables"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++ }
++#endif
++ }
++
++
++ PDUMPCOMMENT("Alloc page directory");
++#ifdef SUPPORT_SGX_MMU_BYPASS
++ EnableHostAccess(psMMUContext);
++#endif
++
++ PDUMPMALLOCPAGETABLE(PVRSRV_DEVICE_TYPE_SGX, pvPDCpuVAddr, SGX_MMU_PAGE_SIZE, PDUMP_PD_UNIQUETAG);
++
++ if (pvPDCpuVAddr)
++ {
++ pui32Tmp = (IMG_UINT32 *)pvPDCpuVAddr;
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: pvPDCpuVAddr invalid"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++
++ for(i=0; i<SGX_MMU_PD_SIZE; i++)
++ {
++ pui32Tmp[i] = (psDevInfo->sDummyPTDevPAddr.uiAddr>>SGX_MMU_PDE_ADDR_ALIGNSHIFT)
++ | SGX_MMU_PDE_PAGE_SIZE_4K
++ | SGX_MMU_PDE_VALID;
++ }
++
++ if(!psDevInfo->pvMMUContextList)
++ {
++
++
++
++ pui32Tmp = (IMG_UINT32 *)psDevInfo->pvDummyPTPageCpuVAddr;
++ for(i=0; i<SGX_MMU_PT_SIZE; i++)
++ {
++ pui32Tmp[i] = (psDevInfo->sDummyDataDevPAddr.uiAddr>>SGX_MMU_PTE_ADDR_ALIGNSHIFT)
++ | SGX_MMU_PTE_VALID;
++ }
++
++ PDUMPCOMMENT("Dummy Page table contents");
++ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, psDevInfo->pvDummyPTPageCpuVAddr, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
++
++
++
++ pui32Tmp = (IMG_UINT32 *)psDevInfo->pvDummyDataPageCpuVAddr;
++ for(i=0; i<(SGX_MMU_PAGE_SIZE/4); i++)
++ {
++ pui32Tmp[i] = DUMMY_DATA_PAGE_SIGNATURE;
++ }
++
++ PDUMPCOMMENT("Dummy Data Page contents");
++ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, psDevInfo->pvDummyDataPageCpuVAddr, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
++ }
++#else
++
++ for(i=0; i<SGX_MMU_PD_SIZE; i++)
++ {
++
++ pui32Tmp[i] = 0;
++ }
++#endif
++
++
++ PDUMPCOMMENT("Page directory contents");
++ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, pvPDCpuVAddr, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
++
++
++#if defined(PDUMP)
++ if(PDumpSetMMUContext(PVRSRV_DEVICE_TYPE_SGX,
++ "SGXMEM",
++ &psMMUContext->ui32PDumpMMUContextID,
++ 2,
++ PDUMP_PT_UNIQUETAG,
++ pvPDCpuVAddr) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to PDumpSetMMUContext failed"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++#endif
++
++
++ psMMUContext->pvPDCpuVAddr = pvPDCpuVAddr;
++ psMMUContext->sPDDevPAddr = sPDDevPAddr;
++ psMMUContext->hPDOSMemHandle = hPDOSMemHandle;
++
++
++ *ppsMMUContext = psMMUContext;
++
++
++ *psPDDevPAddr = sPDDevPAddr;
++
++
++ psMMUContext->psNext = (MMU_CONTEXT*)psDevInfo->pvMMUContextList;
++ psDevInfo->pvMMUContextList = (IMG_VOID*)psMMUContext;
++
++#ifdef SUPPORT_SGX_MMU_BYPASS
++ DisableHostAccess(psMMUContext);
++#endif
++
++ return PVRSRV_OK;
++}
++
++IMG_VOID
++MMU_Finalise (MMU_CONTEXT *psMMUContext)
++{
++ IMG_UINT32 *pui32Tmp, i;
++ SYS_DATA *psSysData;
++ MMU_CONTEXT **ppsMMUContext;
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++ PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO*)psMMUContext->psDevInfo;
++ MMU_CONTEXT *psMMUContextList = (MMU_CONTEXT*)psDevInfo->pvMMUContextList;
++#endif
++
++ SysAcquireData(&psSysData);
++
++
++ PDUMPCLEARMMUCONTEXT(PVRSRV_DEVICE_TYPE_SGX, "SGXMEM", psMMUContext->ui32PDumpMMUContextID, 2);
++
++
++ PDUMPCOMMENT("Free page directory");
++ PDUMPFREEPAGETABLE(PVRSRV_DEVICE_TYPE_SGX, psMMUContext->pvPDCpuVAddr, SGX_MMU_PAGE_SIZE, PDUMP_PT_UNIQUETAG);
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++ PDUMPFREEPAGETABLE(PVRSRV_DEVICE_TYPE_SGX, psDevInfo->pvDummyPTPageCpuVAddr, SGX_MMU_PAGE_SIZE, PDUMP_PT_UNIQUETAG);
++ PDUMPFREEPAGETABLE(PVRSRV_DEVICE_TYPE_SGX, psDevInfo->pvDummyDataPageCpuVAddr, SGX_MMU_PAGE_SIZE, PDUMP_PT_UNIQUETAG);
++#endif
++
++ pui32Tmp = (IMG_UINT32 *)psMMUContext->pvPDCpuVAddr;
++
++
++ for(i=0; i<SGX_MMU_PD_SIZE; i++)
++ {
++
++ pui32Tmp[i] = 0;
++ }
++
++
++
++
++
++ if(psMMUContext->psDeviceNode->psLocalDevMemArena == IMG_NULL)
++ {
++ OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
++ SGX_MMU_PAGE_SIZE,
++ psMMUContext->pvPDCpuVAddr,
++ psMMUContext->hPDOSMemHandle);
++
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++
++ if(!psMMUContextList->psNext)
++ {
++ OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
++ SGX_MMU_PAGE_SIZE,
++ psDevInfo->pvDummyPTPageCpuVAddr,
++ psDevInfo->hDummyPTPageOSMemHandle);
++ OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
++ SGX_MMU_PAGE_SIZE,
++ psDevInfo->pvDummyDataPageCpuVAddr,
++ psDevInfo->hDummyDataPageOSMemHandle);
++ }
++#endif
++ }
++ else
++ {
++ IMG_SYS_PHYADDR sSysPAddr;
++ IMG_CPU_PHYADDR sCpuPAddr;
++
++
++ sCpuPAddr = OSMapLinToCPUPhys(psMMUContext->pvPDCpuVAddr);
++ sSysPAddr = SysCpuPAddrToSysPAddr(sCpuPAddr);
++
++
++ OSUnMapPhysToLin(psMMUContext->pvPDCpuVAddr,
++ SGX_MMU_PAGE_SIZE,
++ PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
++ psMMUContext->hPDOSMemHandle);
++
++ RA_Free (psMMUContext->psDeviceNode->psLocalDevMemArena, sSysPAddr.uiAddr, IMG_FALSE);
++
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++
++ if(!psMMUContextList->psNext)
++ {
++
++ sCpuPAddr = OSMapLinToCPUPhys(psDevInfo->pvDummyPTPageCpuVAddr);
++ sSysPAddr = SysCpuPAddrToSysPAddr(sCpuPAddr);
++
++
++ OSUnMapPhysToLin(psDevInfo->pvDummyPTPageCpuVAddr,
++ SGX_MMU_PAGE_SIZE,
++ PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
++ psDevInfo->hDummyPTPageOSMemHandle);
++
++ RA_Free (psMMUContext->psDeviceNode->psLocalDevMemArena, sSysPAddr.uiAddr, IMG_FALSE);
++
++
++ sCpuPAddr = OSMapLinToCPUPhys(psDevInfo->pvDummyDataPageCpuVAddr);
++ sSysPAddr = SysCpuPAddrToSysPAddr(sCpuPAddr);
++
++
++ OSUnMapPhysToLin(psDevInfo->pvDummyDataPageCpuVAddr,
++ SGX_MMU_PAGE_SIZE,
++ PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
++ psDevInfo->hDummyDataPageOSMemHandle);
++
++ RA_Free (psMMUContext->psDeviceNode->psLocalDevMemArena, sSysPAddr.uiAddr, IMG_FALSE);
++ }
++#endif
++ }
++
++ PVR_DPF ((PVR_DBG_MESSAGE, "MMU_Finalise"));
++
++
++ ppsMMUContext = (MMU_CONTEXT**)&psMMUContext->psDevInfo->pvMMUContextList;
++ while(*ppsMMUContext)
++ {
++ if(*ppsMMUContext == psMMUContext)
++ {
++
++ *ppsMMUContext = psMMUContext->psNext;
++ break;
++ }
++
++
++ ppsMMUContext = &((*ppsMMUContext)->psNext);
++ }
++
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(MMU_CONTEXT), psMMUContext, IMG_NULL);
++
++}
++
++
++IMG_VOID
++MMU_InsertHeap(MMU_CONTEXT *psMMUContext, MMU_HEAP *psMMUHeap)
++{
++ IMG_UINT32 *pui32PDCpuVAddr = (IMG_UINT32 *) psMMUContext->pvPDCpuVAddr;
++ IMG_UINT32 *pui32KernelPDCpuVAddr = (IMG_UINT32 *) psMMUHeap->psMMUContext->pvPDCpuVAddr;
++ IMG_UINT32 ui32PDEntry;
++#if !defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
++ IMG_BOOL bInvalidateDirectoryCache = IMG_FALSE;
++#endif
++
++
++ pui32PDCpuVAddr += psMMUHeap->psDevArena->BaseDevVAddr.uiAddr >> psMMUHeap->ui32PDShift;
++ pui32KernelPDCpuVAddr += psMMUHeap->psDevArena->BaseDevVAddr.uiAddr >> psMMUHeap->ui32PDShift;
++
++
++
++
++ PDUMPCOMMENT("Page directory shared heap range copy");
++#ifdef SUPPORT_SGX_MMU_BYPASS
++ EnableHostAccess(psMMUContext);
++#endif
++
++ for (ui32PDEntry = 0; ui32PDEntry < psMMUHeap->ui32PageTableCount; ui32PDEntry++)
++ {
++#if !defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++
++ PVR_ASSERT(pui32PDCpuVAddr[ui32PDEntry] == 0);
++#endif
++
++
++ pui32PDCpuVAddr[ui32PDEntry] = pui32KernelPDCpuVAddr[ui32PDEntry];
++ if (pui32PDCpuVAddr[ui32PDEntry])
++ {
++ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, (IMG_VOID *) &pui32PDCpuVAddr[ui32PDEntry], sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
++
++#if !defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
++ bInvalidateDirectoryCache = IMG_TRUE;
++#endif
++ }
++ }
++
++#ifdef SUPPORT_SGX_MMU_BYPASS
++ DisableHostAccess(psMMUContext);
++#endif
++
++#if !defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
++ if (bInvalidateDirectoryCache)
++ {
++
++
++
++
++ MMU_InvalidateDirectoryCache(psMMUContext->psDevInfo);
++ }
++#endif
++}
++
++
++static IMG_VOID
++MMU_UnmapPagesAndFreePTs (MMU_HEAP *psMMUHeap,
++ IMG_DEV_VIRTADDR sDevVAddr,
++ IMG_UINT32 ui32PageCount,
++ IMG_HANDLE hUniqueTag)
++{
++ IMG_DEV_VIRTADDR sTmpDevVAddr;
++ IMG_UINT32 i;
++ IMG_UINT32 ui32PDIndex;
++ IMG_UINT32 ui32PTIndex;
++ IMG_UINT32 *pui32Tmp;
++ IMG_BOOL bInvalidateDirectoryCache = IMG_FALSE;
++
++#if !defined (PDUMP)
++ PVR_UNREFERENCED_PARAMETER(hUniqueTag);
++#endif
++
++ sTmpDevVAddr = sDevVAddr;
++
++ for(i=0; i<ui32PageCount; i++)
++ {
++ MMU_PT_INFO **ppsPTInfoList;
++
++
++ ui32PDIndex = sTmpDevVAddr.uiAddr >> psMMUHeap->ui32PDShift;
++
++
++ ppsPTInfoList = &psMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex];
++
++ {
++
++ ui32PTIndex = (sTmpDevVAddr.uiAddr & psMMUHeap->ui32PTMask) >> psMMUHeap->ui32PTShift;
++
++
++ if (!ppsPTInfoList[0])
++ {
++ PVR_DPF((PVR_DBG_MESSAGE, "MMU_UnmapPagesAndFreePTs: Invalid PT for alloc at VAddr:0x%08lX (VaddrIni:0x%08lX AllocPage:%u) PDIdx:%u PTIdx:%u",sTmpDevVAddr.uiAddr, sDevVAddr.uiAddr,i, ui32PDIndex, ui32PTIndex ));
++
++
++ sTmpDevVAddr.uiAddr += psMMUHeap->ui32DataPageSize;
++
++
++ continue;
++ }
++
++
++ pui32Tmp = (IMG_UINT32*)ppsPTInfoList[0]->PTPageCpuVAddr;
++
++
++ if (!pui32Tmp)
++ {
++ continue;
++ }
++
++ CheckPT(ppsPTInfoList[0]);
++
++
++ if (pui32Tmp[ui32PTIndex] & SGX_MMU_PTE_VALID)
++ {
++ ppsPTInfoList[0]->ui32ValidPTECount--;
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_MESSAGE, "MMU_UnmapPagesAndFreePTs: Page is already invalid for alloc at VAddr:0x%08lX (VAddrIni:0x%08lX AllocPage:%u) PDIdx:%u PTIdx:%u",sTmpDevVAddr.uiAddr, sDevVAddr.uiAddr,i, ui32PDIndex, ui32PTIndex ));
++ }
++
++
++ PVR_ASSERT((IMG_INT32)ppsPTInfoList[0]->ui32ValidPTECount >= 0);
++
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++
++ pui32Tmp[ui32PTIndex] = (psMMUHeap->psMMUContext->psDevInfo->sDummyDataDevPAddr.uiAddr>>SGX_MMU_PTE_ADDR_ALIGNSHIFT)
++ | SGX_MMU_PTE_VALID;
++#else
++
++ pui32Tmp[ui32PTIndex] = 0;
++#endif
++
++ CheckPT(ppsPTInfoList[0]);
++ }
++
++
++
++ if (ppsPTInfoList[0] && ppsPTInfoList[0]->ui32ValidPTECount == 0)
++ {
++ _DeferredFreePageTable(psMMUHeap, ui32PDIndex - psMMUHeap->ui32PDBaseIndex, IMG_TRUE);
++ bInvalidateDirectoryCache = IMG_TRUE;
++ }
++
++
++ sTmpDevVAddr.uiAddr += psMMUHeap->ui32DataPageSize;
++ }
++
++ if(bInvalidateDirectoryCache)
++ {
++ MMU_InvalidateDirectoryCache(psMMUHeap->psMMUContext->psDevInfo);
++ }
++ else
++ {
++ MMU_InvalidatePageTableCache(psMMUHeap->psMMUContext->psDevInfo);
++ }
++
++#if defined(PDUMP)
++ MMU_PDumpPageTables(psMMUHeap,
++ sDevVAddr,
++ psMMUHeap->ui32DataPageSize * ui32PageCount,
++ IMG_TRUE,
++ hUniqueTag);
++#endif
++}
++
++
++IMG_VOID MMU_FreePageTables(IMG_PVOID pvMMUHeap,
++ IMG_SIZE_T ui32Start,
++ IMG_SIZE_T ui32End,
++ IMG_HANDLE hUniqueTag)
++{
++ MMU_HEAP *pMMUHeap = (MMU_HEAP*)pvMMUHeap;
++ IMG_DEV_VIRTADDR Start;
++
++ Start.uiAddr = ui32Start;
++
++ MMU_UnmapPagesAndFreePTs(pMMUHeap, Start, (ui32End - ui32Start) >> pMMUHeap->ui32PTShift, hUniqueTag);
++}
++
++MMU_HEAP *
++MMU_Create (MMU_CONTEXT *psMMUContext,
++ DEV_ARENA_DESCRIPTOR *psDevArena,
++ RA_ARENA **ppsVMArena)
++{
++ MMU_HEAP *pMMUHeap;
++ IMG_UINT32 ui32ScaleSize;
++
++ PVR_ASSERT (psDevArena != IMG_NULL);
++
++ if (psDevArena == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "MMU_Create: invalid parameter"));
++ return IMG_NULL;
++ }
++
++ OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof (MMU_HEAP),
++ (IMG_VOID **)&pMMUHeap, IMG_NULL,
++ "MMU Heap");
++ if (pMMUHeap == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "MMU_Create: ERROR call to OSAllocMem failed"));
++ return IMG_NULL;
++ }
++
++ pMMUHeap->psMMUContext = psMMUContext;
++ pMMUHeap->psDevArena = psDevArena;
++
++
++
++
++ switch(pMMUHeap->psDevArena->ui32DataPageSize)
++ {
++ case 0x1000:
++ ui32ScaleSize = 0;
++ pMMUHeap->ui32PDEPageSizeCtrl = SGX_MMU_PDE_PAGE_SIZE_4K;
++ break;
++#if defined(SGX_FEATURE_VARIABLE_MMU_PAGE_SIZE)
++ case 0x4000:
++ ui32ScaleSize = 2;
++ pMMUHeap->ui32PDEPageSizeCtrl = SGX_MMU_PDE_PAGE_SIZE_16K;
++ break;
++ case 0x10000:
++ ui32ScaleSize = 4;
++ pMMUHeap->ui32PDEPageSizeCtrl = SGX_MMU_PDE_PAGE_SIZE_64K;
++ break;
++ case 0x40000:
++ ui32ScaleSize = 6;
++ pMMUHeap->ui32PDEPageSizeCtrl = SGX_MMU_PDE_PAGE_SIZE_256K;
++ break;
++ case 0x100000:
++ ui32ScaleSize = 8;
++ pMMUHeap->ui32PDEPageSizeCtrl = SGX_MMU_PDE_PAGE_SIZE_1M;
++ break;
++ case 0x400000:
++ ui32ScaleSize = 10;
++ pMMUHeap->ui32PDEPageSizeCtrl = SGX_MMU_PDE_PAGE_SIZE_4M;
++ break;
++#endif
++ default:
++ PVR_DPF((PVR_DBG_ERROR, "MMU_Create: invalid data page size"));
++ goto ErrorFreeHeap;
++ }
++
++
++ pMMUHeap->ui32DataPageSize = psDevArena->ui32DataPageSize;
++ pMMUHeap->ui32DataPageBitWidth = SGX_MMU_PAGE_SHIFT + ui32ScaleSize;
++ pMMUHeap->ui32DataPageMask = pMMUHeap->ui32DataPageSize - 1;
++
++ pMMUHeap->ui32PTShift = pMMUHeap->ui32DataPageBitWidth;
++ pMMUHeap->ui32PTBitWidth = SGX_MMU_PT_SHIFT - ui32ScaleSize;
++ pMMUHeap->ui32PTMask = SGX_MMU_PT_MASK & (SGX_MMU_PT_MASK<<ui32ScaleSize);
++ pMMUHeap->ui32PTSize = (1UL<<pMMUHeap->ui32PTBitWidth) * sizeof(IMG_UINT32);
++
++ if(pMMUHeap->ui32PTSize < 4 * sizeof(IMG_UINT32))
++ {
++ pMMUHeap->ui32PTSize = 4 * sizeof(IMG_UINT32);
++ }
++ pMMUHeap->ui32PTECount = pMMUHeap->ui32PTSize >> 2;
++
++
++ pMMUHeap->ui32PDShift = pMMUHeap->ui32PTBitWidth + pMMUHeap->ui32PTShift;
++ pMMUHeap->ui32PDBitWidth = SGX_FEATURE_ADDRESS_SPACE_SIZE - pMMUHeap->ui32PTBitWidth - pMMUHeap->ui32DataPageBitWidth;
++ pMMUHeap->ui32PDMask = SGX_MMU_PD_MASK & (SGX_MMU_PD_MASK>>(32-SGX_FEATURE_ADDRESS_SPACE_SIZE));
++
++
++
++
++
++ if(psDevArena->BaseDevVAddr.uiAddr > (pMMUHeap->ui32DataPageMask | pMMUHeap->ui32PTMask))
++ {
++
++
++
++ PVR_ASSERT ((psDevArena->BaseDevVAddr.uiAddr
++ & (pMMUHeap->ui32DataPageMask
++ | pMMUHeap->ui32PTMask)) == 0);
++ }
++
++
++ pMMUHeap->ui32PTETotal = pMMUHeap->psDevArena->ui32Size >> pMMUHeap->ui32PTShift;
++
++
++ pMMUHeap->ui32PDBaseIndex = (pMMUHeap->psDevArena->BaseDevVAddr.uiAddr & pMMUHeap->ui32PDMask) >> pMMUHeap->ui32PDShift;
++
++
++
++
++ pMMUHeap->ui32PageTableCount = (pMMUHeap->ui32PTETotal + pMMUHeap->ui32PTECount - 1)
++ >> pMMUHeap->ui32PTBitWidth;
++
++
++ pMMUHeap->psVMArena = RA_Create(psDevArena->pszName,
++ psDevArena->BaseDevVAddr.uiAddr,
++ psDevArena->ui32Size,
++ IMG_NULL,
++ pMMUHeap->ui32DataPageSize,
++ IMG_NULL,
++ IMG_NULL,
++ MMU_FreePageTables,
++ pMMUHeap);
++
++ if (pMMUHeap->psVMArena == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "MMU_Create: ERROR call to RA_Create failed"));
++ goto ErrorFreePagetables;
++ }
++
++#if 0
++
++ if(psDevArena->ui32HeapID == SGX_TILED_HEAP_ID)
++ {
++ IMG_UINT32 ui32RegVal;
++ IMG_UINT32 ui32XTileStride;
++
++
++
++
++
++
++ ui32XTileStride = 2;
++
++ ui32RegVal = (EUR_CR_BIF_TILE0_MIN_ADDRESS_MASK
++ & ((psDevArena->BaseDevVAddr.uiAddr>>20)
++ << EUR_CR_BIF_TILE0_MIN_ADDRESS_SHIFT))
++ |(EUR_CR_BIF_TILE0_MAX_ADDRESS_MASK
++ & (((psDevArena->BaseDevVAddr.uiAddr+psDevArena->ui32Size)>>20)
++ << EUR_CR_BIF_TILE0_MAX_ADDRESS_SHIFT))
++ |(EUR_CR_BIF_TILE0_CFG_MASK
++ & (((ui32XTileStride<<1)|8) << EUR_CR_BIF_TILE0_CFG_SHIFT));
++ PDUMPREG(EUR_CR_BIF_TILE0, ui32RegVal);
++ }
++#endif
++
++
++
++ *ppsVMArena = pMMUHeap->psVMArena;
++
++ return pMMUHeap;
++
++
++ErrorFreePagetables:
++ _DeferredFreePageTables (pMMUHeap);
++
++ErrorFreeHeap:
++ OSFreeMem (PVRSRV_OS_PAGEABLE_HEAP, sizeof(MMU_HEAP), pMMUHeap, IMG_NULL);
++
++
++ return IMG_NULL;
++}
++
++IMG_VOID
++MMU_Delete (MMU_HEAP *pMMUHeap)
++{
++ if (pMMUHeap != IMG_NULL)
++ {
++ PVR_DPF ((PVR_DBG_MESSAGE, "MMU_Delete"));
++
++ if(pMMUHeap->psVMArena)
++ {
++ RA_Delete (pMMUHeap->psVMArena);
++ }
++
++#ifdef SUPPORT_SGX_MMU_BYPASS
++ EnableHostAccess(pMMUHeap->psMMUContext);
++#endif
++ _DeferredFreePageTables (pMMUHeap);
++#ifdef SUPPORT_SGX_MMU_BYPASS
++ DisableHostAccess(pMMUHeap->psMMUContext);
++#endif
++
++ OSFreeMem (PVRSRV_OS_PAGEABLE_HEAP, sizeof(MMU_HEAP), pMMUHeap, IMG_NULL);
++
++ }
++}
++
++IMG_BOOL
++MMU_Alloc (MMU_HEAP *pMMUHeap,
++ IMG_SIZE_T uSize,
++ IMG_SIZE_T *pActualSize,
++ IMG_UINT32 uFlags,
++ IMG_UINT32 uDevVAddrAlignment,
++ IMG_DEV_VIRTADDR *psDevVAddr)
++{
++ IMG_BOOL bStatus;
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "MMU_Alloc: uSize=0x%x, flags=0x%x, align=0x%x",
++ uSize, uFlags, uDevVAddrAlignment));
++
++
++
++ if((uFlags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR) == 0)
++ {
++ IMG_UINTPTR_T uiAddr;
++
++ bStatus = RA_Alloc (pMMUHeap->psVMArena,
++ uSize,
++ pActualSize,
++ IMG_NULL,
++ 0,
++ uDevVAddrAlignment,
++ 0,
++ &uiAddr);
++ if(!bStatus)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"MMU_Alloc: RA_Alloc of VMArena failed"));
++ return bStatus;
++ }
++
++ psDevVAddr->uiAddr = IMG_CAST_TO_DEVVADDR_UINT(uiAddr);
++ }
++
++ #ifdef SUPPORT_SGX_MMU_BYPASS
++ EnableHostAccess(pMMUHeap->psMMUContext);
++ #endif
++
++
++ bStatus = _DeferredAllocPagetables(pMMUHeap, *psDevVAddr, uSize);
++
++ #ifdef SUPPORT_SGX_MMU_BYPASS
++ DisableHostAccess(pMMUHeap->psMMUContext);
++ #endif
++
++ if (!bStatus)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"MMU_Alloc: _DeferredAllocPagetables failed"));
++ if((uFlags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR) == 0)
++ {
++
++ RA_Free (pMMUHeap->psVMArena, psDevVAddr->uiAddr, IMG_FALSE);
++ }
++ }
++
++ return bStatus;
++}
++
++IMG_VOID
++MMU_Free (MMU_HEAP *pMMUHeap, IMG_DEV_VIRTADDR DevVAddr, IMG_UINT32 ui32Size)
++{
++ PVR_ASSERT (pMMUHeap != IMG_NULL);
++
++ if (pMMUHeap == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "MMU_Free: invalid parameter"));
++ return;
++ }
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "MMU_Free: mmu=%08X, dev_vaddr=%08X", pMMUHeap, DevVAddr.uiAddr));
++
++ if((DevVAddr.uiAddr >= pMMUHeap->psDevArena->BaseDevVAddr.uiAddr) &&
++ (DevVAddr.uiAddr + ui32Size <= pMMUHeap->psDevArena->BaseDevVAddr.uiAddr + pMMUHeap->psDevArena->ui32Size))
++ {
++ RA_Free (pMMUHeap->psVMArena, DevVAddr.uiAddr, IMG_TRUE);
++ return;
++ }
++
++ PVR_DPF((PVR_DBG_ERROR,"MMU_Free: Couldn't find DevVAddr %08X in a DevArena",DevVAddr.uiAddr));
++}
++
++IMG_VOID
++MMU_Enable (MMU_HEAP *pMMUHeap)
++{
++ PVR_UNREFERENCED_PARAMETER(pMMUHeap);
++
++}
++
++IMG_VOID
++MMU_Disable (MMU_HEAP *pMMUHeap)
++{
++ PVR_UNREFERENCED_PARAMETER(pMMUHeap);
++
++}
++
++#if defined(PDUMP)
++static IMG_VOID
++MMU_PDumpPageTables (MMU_HEAP *pMMUHeap,
++ IMG_DEV_VIRTADDR DevVAddr,
++ IMG_SIZE_T uSize,
++ IMG_BOOL bForUnmap,
++ IMG_HANDLE hUniqueTag)
++{
++ IMG_UINT32 ui32NumPTEntries;
++ IMG_UINT32 ui32PTIndex;
++ IMG_UINT32 *pui32PTEntry;
++
++ MMU_PT_INFO **ppsPTInfoList;
++ IMG_UINT32 ui32PDIndex;
++ IMG_UINT32 ui32PTDumpCount;
++
++
++ ui32NumPTEntries = (uSize + pMMUHeap->ui32DataPageMask) >> pMMUHeap->ui32PTShift;
++
++
++ ui32PDIndex = DevVAddr.uiAddr >> pMMUHeap->ui32PDShift;
++
++
++ ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex];
++
++
++ ui32PTIndex = (DevVAddr.uiAddr & pMMUHeap->ui32PTMask) >> pMMUHeap->ui32PTShift;
++
++
++ PDUMPCOMMENT("Page table mods (num entries == %08X) %s", ui32NumPTEntries, bForUnmap ? "(for unmap)" : "");
++
++
++ while(ui32NumPTEntries > 0)
++ {
++ MMU_PT_INFO* psPTInfo = *ppsPTInfoList++;
++
++ if(ui32NumPTEntries <= pMMUHeap->ui32PTECount - ui32PTIndex)
++ {
++ ui32PTDumpCount = ui32NumPTEntries;
++ }
++ else
++ {
++ ui32PTDumpCount = pMMUHeap->ui32PTECount - ui32PTIndex;
++ }
++
++ if (psPTInfo)
++ {
++ pui32PTEntry = (IMG_UINT32*)psPTInfo->PTPageCpuVAddr;
++ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, (IMG_VOID *) &pui32PTEntry[ui32PTIndex], ui32PTDumpCount * sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PT_UNIQUETAG, hUniqueTag);
++ }
++
++
++ ui32NumPTEntries -= ui32PTDumpCount;
++
++
++ ui32PTIndex = 0;
++ }
++
++ PDUMPCOMMENT("Finished page table mods %s", bForUnmap ? "(for unmap)" : "");
++}
++#endif
++
++
++static IMG_VOID
++MMU_MapPage (MMU_HEAP *pMMUHeap,
++ IMG_DEV_VIRTADDR DevVAddr,
++ IMG_DEV_PHYADDR DevPAddr,
++ IMG_UINT32 ui32MemFlags)
++{
++ IMG_UINT32 ui32Index;
++ IMG_UINT32 *pui32Tmp;
++ IMG_UINT32 ui32MMUFlags = 0;
++ MMU_PT_INFO **ppsPTInfoList;
++
++
++ PVR_ASSERT((DevPAddr.uiAddr & pMMUHeap->ui32DataPageMask) == 0);
++
++
++
++ if(((PVRSRV_MEM_READ|PVRSRV_MEM_WRITE) & ui32MemFlags) == (PVRSRV_MEM_READ|PVRSRV_MEM_WRITE))
++ {
++
++ ui32MMUFlags = 0;
++ }
++ else if(PVRSRV_MEM_READ & ui32MemFlags)
++ {
++
++ ui32MMUFlags |= SGX_MMU_PTE_READONLY;
++ }
++ else if(PVRSRV_MEM_WRITE & ui32MemFlags)
++ {
++
++ ui32MMUFlags |= SGX_MMU_PTE_WRITEONLY;
++ }
++
++
++ if(PVRSRV_MEM_CACHE_CONSISTENT & ui32MemFlags)
++ {
++ ui32MMUFlags |= SGX_MMU_PTE_CACHECONSISTENT;
++ }
++
++#if !defined(FIX_HW_BRN_25503)
++
++ if(PVRSRV_MEM_EDM_PROTECT & ui32MemFlags)
++ {
++ ui32MMUFlags |= SGX_MMU_PTE_EDMPROTECT;
++ }
++#endif
++
++
++
++
++
++ ui32Index = DevVAddr.uiAddr >> pMMUHeap->ui32PDShift;
++
++
++ ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32Index];
++
++ CheckPT(ppsPTInfoList[0]);
++
++
++ ui32Index = (DevVAddr.uiAddr & pMMUHeap->ui32PTMask) >> pMMUHeap->ui32PTShift;
++
++
++ pui32Tmp = (IMG_UINT32*)ppsPTInfoList[0]->PTPageCpuVAddr;
++
++#if !defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++
++ if (pui32Tmp[ui32Index] & SGX_MMU_PTE_VALID)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "MMU_MapPage: Page is already valid for alloc at VAddr:0x%08lX PDIdx:%u PTIdx:%u",
++ DevVAddr.uiAddr,
++ DevVAddr.uiAddr >> pMMUHeap->ui32PDShift,
++ ui32Index ));
++ PVR_DPF((PVR_DBG_ERROR, "MMU_MapPage: Page table entry value: 0x%08lX", pui32Tmp[ui32Index]));
++ PVR_DPF((PVR_DBG_ERROR, "MMU_MapPage: Physical page to map: 0x%08lX", DevPAddr.uiAddr));
++ }
++
++ PVR_ASSERT((pui32Tmp[ui32Index] & SGX_MMU_PTE_VALID) == 0);
++#endif
++
++
++ ppsPTInfoList[0]->ui32ValidPTECount++;
++
++
++ pui32Tmp[ui32Index] = ((DevPAddr.uiAddr>>SGX_MMU_PTE_ADDR_ALIGNSHIFT)
++ & ((~pMMUHeap->ui32DataPageMask)>>SGX_MMU_PTE_ADDR_ALIGNSHIFT))
++ | SGX_MMU_PTE_VALID
++ | ui32MMUFlags;
++
++ CheckPT(ppsPTInfoList[0]);
++}
++
++
++IMG_VOID
++MMU_MapScatter (MMU_HEAP *pMMUHeap,
++ IMG_DEV_VIRTADDR DevVAddr,
++ IMG_SYS_PHYADDR *psSysAddr,
++ IMG_SIZE_T uSize,
++ IMG_UINT32 ui32MemFlags,
++ IMG_HANDLE hUniqueTag)
++{
++#if defined(PDUMP)
++ IMG_DEV_VIRTADDR MapBaseDevVAddr;
++#endif
++ IMG_UINT32 uCount, i;
++ IMG_DEV_PHYADDR DevPAddr;
++
++ PVR_ASSERT (pMMUHeap != IMG_NULL);
++
++#if defined(PDUMP)
++ MapBaseDevVAddr = DevVAddr;
++#else
++ PVR_UNREFERENCED_PARAMETER(hUniqueTag);
++#endif
++
++ for (i=0, uCount=0; uCount<uSize; i++, uCount+=pMMUHeap->ui32DataPageSize)
++ {
++ IMG_SYS_PHYADDR sSysAddr;
++
++ sSysAddr = psSysAddr[i];
++
++
++
++ PVR_ASSERT((sSysAddr.uiAddr & pMMUHeap->ui32DataPageMask) == 0);
++
++ DevPAddr = SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sSysAddr);
++
++ MMU_MapPage (pMMUHeap, DevVAddr, DevPAddr, ui32MemFlags);
++ DevVAddr.uiAddr += pMMUHeap->ui32DataPageSize;
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "MMU_MapScatter: devVAddr=%08X, SysAddr=%08X, size=0x%x/0x%x",
++ DevVAddr.uiAddr, sSysAddr.uiAddr, uCount, uSize));
++ }
++
++#if defined(PDUMP)
++ MMU_PDumpPageTables (pMMUHeap, MapBaseDevVAddr, uSize, IMG_FALSE, hUniqueTag);
++#endif
++}
++
++IMG_VOID
++MMU_MapPages (MMU_HEAP *pMMUHeap,
++ IMG_DEV_VIRTADDR DevVAddr,
++ IMG_SYS_PHYADDR SysPAddr,
++ IMG_SIZE_T uSize,
++ IMG_UINT32 ui32MemFlags,
++ IMG_HANDLE hUniqueTag)
++{
++ IMG_DEV_PHYADDR DevPAddr;
++#if defined(PDUMP)
++ IMG_DEV_VIRTADDR MapBaseDevVAddr;
++#endif
++ IMG_UINT32 uCount;
++ IMG_UINT32 ui32VAdvance;
++ IMG_UINT32 ui32PAdvance;
++
++ PVR_ASSERT (pMMUHeap != IMG_NULL);
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "MMU_MapPages: mmu=%08X, devVAddr=%08X, SysPAddr=%08X, size=0x%x",
++ pMMUHeap, DevVAddr.uiAddr, SysPAddr.uiAddr, uSize));
++
++
++ ui32VAdvance = pMMUHeap->ui32DataPageSize;
++ ui32PAdvance = pMMUHeap->ui32DataPageSize;
++
++#if defined(PDUMP)
++ MapBaseDevVAddr = DevVAddr;
++#else
++ PVR_UNREFERENCED_PARAMETER(hUniqueTag);
++#endif
++
++ DevPAddr = SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, SysPAddr);
++
++
++ PVR_ASSERT((DevPAddr.uiAddr & pMMUHeap->ui32DataPageMask) == 0);
++
++#if defined(FIX_HW_BRN_23281)
++ if(ui32MemFlags & PVRSRV_MEM_INTERLEAVED)
++ {
++ ui32VAdvance *= 2;
++ }
++#endif
++
++
++
++
++ if(ui32MemFlags & PVRSRV_MEM_DUMMY)
++ {
++ ui32PAdvance = 0;
++ }
++
++ for (uCount=0; uCount<uSize; uCount+=ui32VAdvance)
++ {
++ MMU_MapPage (pMMUHeap, DevVAddr, DevPAddr, ui32MemFlags);
++ DevVAddr.uiAddr += ui32VAdvance;
++ DevPAddr.uiAddr += ui32PAdvance;
++ }
++
++#if defined(PDUMP)
++ MMU_PDumpPageTables (pMMUHeap, MapBaseDevVAddr, uSize, IMG_FALSE, hUniqueTag);
++#endif
++}
++
++IMG_VOID
++MMU_MapShadow (MMU_HEAP *pMMUHeap,
++ IMG_DEV_VIRTADDR MapBaseDevVAddr,
++ IMG_SIZE_T uByteSize,
++ IMG_CPU_VIRTADDR CpuVAddr,
++ IMG_HANDLE hOSMemHandle,
++ IMG_DEV_VIRTADDR *pDevVAddr,
++ IMG_UINT32 ui32MemFlags,
++ IMG_HANDLE hUniqueTag)
++{
++ IMG_UINT32 i;
++ IMG_UINT32 uOffset = 0;
++ IMG_DEV_VIRTADDR MapDevVAddr;
++ IMG_UINT32 ui32VAdvance;
++ IMG_UINT32 ui32PAdvance;
++
++#if !defined (PDUMP)
++ PVR_UNREFERENCED_PARAMETER(hUniqueTag);
++#endif
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "MMU_MapShadow: %08X, 0x%x, %08X",
++ MapBaseDevVAddr.uiAddr,
++ uByteSize,
++ CpuVAddr));
++
++
++ ui32VAdvance = pMMUHeap->ui32DataPageSize;
++ ui32PAdvance = pMMUHeap->ui32DataPageSize;
++
++
++ PVR_ASSERT(((IMG_UINT32)CpuVAddr & (SGX_MMU_PAGE_SIZE - 1)) == 0);
++ PVR_ASSERT(((IMG_UINT32)uByteSize & pMMUHeap->ui32DataPageMask) == 0);
++ pDevVAddr->uiAddr = MapBaseDevVAddr.uiAddr;
++
++#if defined(FIX_HW_BRN_23281)
++ if(ui32MemFlags & PVRSRV_MEM_INTERLEAVED)
++ {
++ ui32VAdvance *= 2;
++ }
++#endif
++
++
++
++
++ if(ui32MemFlags & PVRSRV_MEM_DUMMY)
++ {
++ ui32PAdvance = 0;
++ }
++
++
++ MapDevVAddr = MapBaseDevVAddr;
++ for (i=0; i<uByteSize; i+=ui32VAdvance)
++ {
++ IMG_CPU_PHYADDR CpuPAddr;
++ IMG_DEV_PHYADDR DevPAddr;
++
++ if(CpuVAddr)
++ {
++ CpuPAddr = OSMapLinToCPUPhys ((IMG_VOID *)((IMG_UINT32)CpuVAddr + uOffset));
++ }
++ else
++ {
++ CpuPAddr = OSMemHandleToCpuPAddr(hOSMemHandle, uOffset);
++ }
++ DevPAddr = SysCpuPAddrToDevPAddr (PVRSRV_DEVICE_TYPE_SGX, CpuPAddr);
++
++
++ PVR_ASSERT((DevPAddr.uiAddr & pMMUHeap->ui32DataPageMask) == 0);
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "0x%x: CpuVAddr=%08X, CpuPAddr=%08X, DevVAddr=%08X, DevPAddr=%08X",
++ uOffset,
++ (IMG_UINTPTR_T)CpuVAddr + uOffset,
++ CpuPAddr.uiAddr,
++ MapDevVAddr.uiAddr,
++ DevPAddr.uiAddr));
++
++ MMU_MapPage (pMMUHeap, MapDevVAddr, DevPAddr, ui32MemFlags);
++
++
++ MapDevVAddr.uiAddr += ui32VAdvance;
++ uOffset += ui32PAdvance;
++ }
++
++#if defined(PDUMP)
++ MMU_PDumpPageTables (pMMUHeap, MapBaseDevVAddr, uByteSize, IMG_FALSE, hUniqueTag);
++#endif
++}
++
++
++IMG_VOID
++MMU_UnmapPages (MMU_HEAP *psMMUHeap,
++ IMG_DEV_VIRTADDR sDevVAddr,
++ IMG_UINT32 ui32PageCount,
++ IMG_HANDLE hUniqueTag)
++{
++ IMG_UINT32 uPageSize = psMMUHeap->ui32DataPageSize;
++ IMG_DEV_VIRTADDR sTmpDevVAddr;
++ IMG_UINT32 i;
++ IMG_UINT32 ui32PDIndex;
++ IMG_UINT32 ui32PTIndex;
++ IMG_UINT32 *pui32Tmp;
++
++#if !defined (PDUMP)
++ PVR_UNREFERENCED_PARAMETER(hUniqueTag);
++#endif
++
++
++ sTmpDevVAddr = sDevVAddr;
++
++ for(i=0; i<ui32PageCount; i++)
++ {
++ MMU_PT_INFO **ppsPTInfoList;
++
++
++ ui32PDIndex = sTmpDevVAddr.uiAddr >> psMMUHeap->ui32PDShift;
++
++
++ ppsPTInfoList = &psMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex];
++
++
++ ui32PTIndex = (sTmpDevVAddr.uiAddr & psMMUHeap->ui32PTMask) >> psMMUHeap->ui32PTShift;
++
++
++ if (!ppsPTInfoList[0])
++ {
++ PVR_DPF((PVR_DBG_ERROR, "MMU_UnmapPages: ERROR Invalid PT for alloc at VAddr:0x%08lX (VaddrIni:0x%08lX AllocPage:%u) PDIdx:%u PTIdx:%u",
++ sTmpDevVAddr.uiAddr,
++ sDevVAddr.uiAddr,
++ i,
++ ui32PDIndex,
++ ui32PTIndex));
++
++
++ sTmpDevVAddr.uiAddr += uPageSize;
++
++
++ continue;
++ }
++
++ CheckPT(ppsPTInfoList[0]);
++
++
++ pui32Tmp = (IMG_UINT32*)ppsPTInfoList[0]->PTPageCpuVAddr;
++
++
++ if (pui32Tmp[ui32PTIndex] & SGX_MMU_PTE_VALID)
++ {
++ ppsPTInfoList[0]->ui32ValidPTECount--;
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR, "MMU_UnmapPages: Page is already invalid for alloc at VAddr:0x%08lX (VAddrIni:0x%08lX AllocPage:%u) PDIdx:%u PTIdx:%u",
++ sTmpDevVAddr.uiAddr,
++ sDevVAddr.uiAddr,
++ i,
++ ui32PDIndex,
++ ui32PTIndex));
++ PVR_DPF((PVR_DBG_ERROR, "MMU_UnmapPages: Page table entry value: 0x%08lX", pui32Tmp[ui32PTIndex]));
++ }
++
++
++ PVR_ASSERT((IMG_INT32)ppsPTInfoList[0]->ui32ValidPTECount >= 0);
++
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++
++ pui32Tmp[ui32PTIndex] = (psMMUHeap->psMMUContext->psDevInfo->sDummyDataDevPAddr.uiAddr>>SGX_MMU_PTE_ADDR_ALIGNSHIFT)
++ | SGX_MMU_PTE_VALID;
++#else
++
++ pui32Tmp[ui32PTIndex] = 0;
++#endif
++
++ CheckPT(ppsPTInfoList[0]);
++
++
++ sTmpDevVAddr.uiAddr += uPageSize;
++ }
++
++ MMU_InvalidatePageTableCache(psMMUHeap->psMMUContext->psDevInfo);
++
++#if defined(PDUMP)
++ MMU_PDumpPageTables (psMMUHeap, sDevVAddr, uPageSize*ui32PageCount, IMG_TRUE, hUniqueTag);
++#endif
++}
++
++
++IMG_DEV_PHYADDR
++MMU_GetPhysPageAddr(MMU_HEAP *pMMUHeap, IMG_DEV_VIRTADDR sDevVPageAddr)
++{
++ IMG_UINT32 *pui32PageTable;
++ IMG_UINT32 ui32Index;
++ IMG_DEV_PHYADDR sDevPAddr;
++ MMU_PT_INFO **ppsPTInfoList;
++
++
++ ui32Index = sDevVPageAddr.uiAddr >> pMMUHeap->ui32PDShift;
++
++
++ ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32Index];
++ if (!ppsPTInfoList[0])
++ {
++ PVR_DPF((PVR_DBG_ERROR,"MMU_GetPhysPageAddr: Not mapped in at 0x%08x", sDevVPageAddr.uiAddr));
++ sDevPAddr.uiAddr = 0;
++ return sDevPAddr;
++ }
++
++
++ ui32Index = (sDevVPageAddr.uiAddr & pMMUHeap->ui32PTMask) >> pMMUHeap->ui32PTShift;
++
++
++ pui32PageTable = (IMG_UINT32*)ppsPTInfoList[0]->PTPageCpuVAddr;
++
++
++ sDevPAddr.uiAddr = pui32PageTable[ui32Index];
++
++
++ sDevPAddr.uiAddr &= ~(pMMUHeap->ui32DataPageMask>>SGX_MMU_PTE_ADDR_ALIGNSHIFT);
++
++
++ sDevPAddr.uiAddr <<= SGX_MMU_PTE_ADDR_ALIGNSHIFT;
++
++ return sDevPAddr;
++}
++
++
++IMG_DEV_PHYADDR MMU_GetPDDevPAddr(MMU_CONTEXT *pMMUContext)
++{
++ return (pMMUContext->sPDDevPAddr);
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR SGXGetPhysPageAddrKM (IMG_HANDLE hDevMemHeap,
++ IMG_DEV_VIRTADDR sDevVAddr,
++ IMG_DEV_PHYADDR *pDevPAddr,
++ IMG_CPU_PHYADDR *pCpuPAddr)
++{
++ MMU_HEAP *pMMUHeap;
++ IMG_DEV_PHYADDR DevPAddr;
++
++
++
++ pMMUHeap = (MMU_HEAP*)BM_GetMMUHeap(hDevMemHeap);
++
++ DevPAddr = MMU_GetPhysPageAddr(pMMUHeap, sDevVAddr);
++ pCpuPAddr->uiAddr = DevPAddr.uiAddr;
++ pDevPAddr->uiAddr = DevPAddr.uiAddr;
++
++ return (pDevPAddr->uiAddr != 0) ? PVRSRV_OK : PVRSRV_ERROR_INVALID_PARAMS;
++}
++
++
++PVRSRV_ERROR SGXGetMMUPDAddrKM(IMG_HANDLE hDevCookie,
++ IMG_HANDLE hDevMemContext,
++ IMG_DEV_PHYADDR *psPDDevPAddr)
++{
++ if (!hDevCookie || !hDevMemContext || !psPDDevPAddr)
++ {
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++
++ *psPDDevPAddr = ((BM_CONTEXT*)hDevMemContext)->psMMUContext->sPDDevPAddr;
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR MMU_BIFResetPDAlloc(PVRSRV_SGXDEV_INFO *psDevInfo)
++{
++ PVRSRV_ERROR eError;
++ SYS_DATA *psSysData;
++ RA_ARENA *psLocalDevMemArena;
++ IMG_HANDLE hOSMemHandle = IMG_NULL;
++ IMG_BYTE *pui8MemBlock = IMG_NULL;
++ IMG_SYS_PHYADDR sMemBlockSysPAddr;
++ IMG_CPU_PHYADDR sMemBlockCpuPAddr;
++
++ SysAcquireData(&psSysData);
++
++ psLocalDevMemArena = psSysData->apsLocalDevMemArena[0];
++
++
++ if(psLocalDevMemArena == IMG_NULL)
++ {
++
++ eError = OSAllocPages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
++ 3 * SGX_MMU_PAGE_SIZE,
++ SGX_MMU_PAGE_SIZE,
++ (IMG_VOID **)&pui8MemBlock,
++ &hOSMemHandle);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "MMU_BIFResetPDAlloc: ERROR call to OSAllocPages failed"));
++ return eError;
++ }
++
++
++ if(pui8MemBlock)
++ {
++ sMemBlockCpuPAddr = OSMapLinToCPUPhys(pui8MemBlock);
++ }
++ else
++ {
++
++ sMemBlockCpuPAddr = OSMemHandleToCpuPAddr(hOSMemHandle, 0);
++ }
++ }
++ else
++ {
++
++
++ if(RA_Alloc(psLocalDevMemArena,
++ 3 * SGX_MMU_PAGE_SIZE,
++ IMG_NULL,
++ IMG_NULL,
++ 0,
++ SGX_MMU_PAGE_SIZE,
++ 0,
++ &(sMemBlockSysPAddr.uiAddr)) != IMG_TRUE)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "MMU_BIFResetPDAlloc: ERROR call to RA_Alloc failed"));
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++
++ sMemBlockCpuPAddr = SysSysPAddrToCpuPAddr(sMemBlockSysPAddr);
++ pui8MemBlock = OSMapPhysToLin(sMemBlockCpuPAddr,
++ SGX_MMU_PAGE_SIZE * 3,
++ PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
++ &hOSMemHandle);
++ if(!pui8MemBlock)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "MMU_BIFResetPDAlloc: ERROR failed to map page tables"));
++ return PVRSRV_ERROR_BAD_MAPPING;
++ }
++ }
++
++ psDevInfo->hBIFResetPDOSMemHandle = hOSMemHandle;
++ psDevInfo->sBIFResetPDDevPAddr = SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sMemBlockCpuPAddr);
++ psDevInfo->sBIFResetPTDevPAddr.uiAddr = psDevInfo->sBIFResetPDDevPAddr.uiAddr + SGX_MMU_PAGE_SIZE;
++ psDevInfo->sBIFResetPageDevPAddr.uiAddr = psDevInfo->sBIFResetPTDevPAddr.uiAddr + SGX_MMU_PAGE_SIZE;
++
++
++ psDevInfo->pui32BIFResetPD = (IMG_UINT32 *)pui8MemBlock;
++ psDevInfo->pui32BIFResetPT = (IMG_UINT32 *)(pui8MemBlock + SGX_MMU_PAGE_SIZE);
++
++
++ OSMemSet(psDevInfo->pui32BIFResetPD, 0, SGX_MMU_PAGE_SIZE);
++ OSMemSet(psDevInfo->pui32BIFResetPT, 0, SGX_MMU_PAGE_SIZE);
++
++ OSMemSet(pui8MemBlock + (2 * SGX_MMU_PAGE_SIZE), 0xDB, SGX_MMU_PAGE_SIZE);
++
++ return PVRSRV_OK;
++}
++
++IMG_VOID MMU_BIFResetPDFree(PVRSRV_SGXDEV_INFO *psDevInfo)
++{
++ SYS_DATA *psSysData;
++ RA_ARENA *psLocalDevMemArena;
++ IMG_SYS_PHYADDR sPDSysPAddr;
++
++ SysAcquireData(&psSysData);
++
++ psLocalDevMemArena = psSysData->apsLocalDevMemArena[0];
++
++
++ if(psLocalDevMemArena == IMG_NULL)
++ {
++ OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
++ 3 * SGX_MMU_PAGE_SIZE,
++ psDevInfo->pui32BIFResetPD,
++ psDevInfo->hBIFResetPDOSMemHandle);
++ }
++ else
++ {
++ OSUnMapPhysToLin(psDevInfo->pui32BIFResetPD,
++ 3 * SGX_MMU_PAGE_SIZE,
++ PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
++ psDevInfo->hBIFResetPDOSMemHandle);
++
++ sPDSysPAddr = SysDevPAddrToSysPAddr(PVRSRV_DEVICE_TYPE_SGX, psDevInfo->sBIFResetPDDevPAddr);
++ RA_Free(psLocalDevMemArena, sPDSysPAddr.uiAddr, IMG_FALSE);
++ }
++}
++
++
++#if defined(FIX_HW_BRN_22997) && defined(FIX_HW_BRN_23030) && defined(SGX_FEATURE_HOST_PORT)
++PVRSRV_ERROR WorkaroundBRN22997Alloc(PVRSRV_SGXDEV_INFO *psDevInfo)
++{
++ PVRSRV_ERROR eError;
++ SYS_DATA *psSysData;
++ RA_ARENA *psLocalDevMemArena;
++ IMG_HANDLE hPTPageOSMemHandle = IMG_NULL;
++ IMG_HANDLE hPDPageOSMemHandle = IMG_NULL;
++ IMG_UINT32 *pui32PD = IMG_NULL;
++ IMG_UINT32 *pui32PT = IMG_NULL;
++ IMG_CPU_PHYADDR sCpuPAddr;
++ IMG_DEV_PHYADDR sPTDevPAddr;
++ IMG_DEV_PHYADDR sPDDevPAddr;
++
++ SysAcquireData(&psSysData);
++
++ psLocalDevMemArena = psSysData->apsLocalDevMemArena[0];
++
++
++ if(psLocalDevMemArena == IMG_NULL)
++ {
++
++ eError = OSAllocPages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
++ SGX_MMU_PAGE_SIZE,
++ SGX_MMU_PAGE_SIZE,
++ (IMG_VOID **)&pui32PT,
++ &hPTPageOSMemHandle);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "WorkaroundBRN22997: ERROR call to OSAllocPages failed"));
++ return eError;
++ }
++
++ eError = OSAllocPages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
++ SGX_MMU_PAGE_SIZE,
++ SGX_MMU_PAGE_SIZE,
++ (IMG_VOID **)&pui32PD,
++ &hPDPageOSMemHandle);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "WorkaroundBRN22997: ERROR call to OSAllocPages failed"));
++ return eError;
++ }
++
++
++ if(pui32PT)
++ {
++ sCpuPAddr = OSMapLinToCPUPhys(pui32PT);
++ }
++ else
++ {
++
++ sCpuPAddr = OSMemHandleToCpuPAddr(hPTPageOSMemHandle, 0);
++ }
++ sPTDevPAddr = SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
++
++ if(pui32PD)
++ {
++ sCpuPAddr = OSMapLinToCPUPhys(pui32PD);
++ }
++ else
++ {
++
++ sCpuPAddr = OSMemHandleToCpuPAddr(hPDPageOSMemHandle, 0);
++ }
++ sPDDevPAddr = SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
++
++ }
++ else
++ {
++
++
++ if(RA_Alloc(psLocalDevMemArena,
++ SGX_MMU_PAGE_SIZE * 2,
++ IMG_NULL,
++ IMG_NULL,
++ 0,
++ SGX_MMU_PAGE_SIZE,
++ 0,
++ &(psDevInfo->sBRN22997SysPAddr.uiAddr))!= IMG_TRUE)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "WorkaroundBRN22997: ERROR call to RA_Alloc failed"));
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++
++ sCpuPAddr = SysSysPAddrToCpuPAddr(psDevInfo->sBRN22997SysPAddr);
++ pui32PT = OSMapPhysToLin(sCpuPAddr,
++ SGX_MMU_PAGE_SIZE * 2,
++ PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
++ &hPTPageOSMemHandle);
++ if(!pui32PT)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "WorkaroundBRN22997: ERROR failed to map page tables"));
++ return PVRSRV_ERROR_BAD_MAPPING;
++ }
++
++
++ sPTDevPAddr = SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
++
++ pui32PD = pui32PT + 1024;
++ sPDDevPAddr.uiAddr = sPTDevPAddr.uiAddr + 4096;
++ }
++
++ OSMemSet(pui32PD, 0, SGX_MMU_PAGE_SIZE);
++ OSMemSet(pui32PT, 0, SGX_MMU_PAGE_SIZE);
++
++
++ PDUMPMALLOCPAGETABLE(PVRSRV_DEVICE_TYPE_SGX, pui32PD, SGX_MMU_PAGE_SIZE, PDUMP_PD_UNIQUETAG);
++ PDUMPMALLOCPAGETABLE(PVRSRV_DEVICE_TYPE_SGX, pui32PT, SGX_MMU_PAGE_SIZE, PDUMP_PT_UNIQUETAG);
++ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, pui32PD, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
++ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, pui32PT, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PT_UNIQUETAG, PDUMP_PD_UNIQUETAG);
++
++ psDevInfo->hBRN22997PTPageOSMemHandle = hPTPageOSMemHandle;
++ psDevInfo->hBRN22997PDPageOSMemHandle = hPDPageOSMemHandle;
++ psDevInfo->sBRN22997PTDevPAddr = sPTDevPAddr;
++ psDevInfo->sBRN22997PDDevPAddr = sPDDevPAddr;
++ psDevInfo->pui32BRN22997PD = pui32PD;
++ psDevInfo->pui32BRN22997PT = pui32PT;
++
++ return PVRSRV_OK;
++}
++
++
++IMG_VOID WorkaroundBRN22997ReadHostPort(PVRSRV_SGXDEV_INFO *psDevInfo)
++{
++ IMG_UINT32 *pui32PD = psDevInfo->pui32BRN22997PD;
++ IMG_UINT32 *pui32PT = psDevInfo->pui32BRN22997PT;
++ IMG_UINT32 ui32PDIndex;
++ IMG_UINT32 ui32PTIndex;
++ IMG_DEV_VIRTADDR sDevVAddr;
++ volatile IMG_UINT32 *pui32HostPort;
++ IMG_UINT32 ui32BIFCtrl;
++
++
++
++
++ pui32HostPort = (volatile IMG_UINT32*)(((IMG_UINT8*)psDevInfo->pvHostPortBaseKM) + SYS_SGX_HOSTPORT_BRN23030_OFFSET);
++
++
++ sDevVAddr.uiAddr = SYS_SGX_HOSTPORT_BASE_DEVVADDR + SYS_SGX_HOSTPORT_BRN23030_OFFSET;
++
++ ui32PDIndex = (sDevVAddr.uiAddr & SGX_MMU_PD_MASK) >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT);
++ ui32PTIndex = (sDevVAddr.uiAddr & SGX_MMU_PT_MASK) >> SGX_MMU_PAGE_SHIFT;
++
++
++ pui32PD[ui32PDIndex] = (psDevInfo->sBRN22997PTDevPAddr.uiAddr>>SGX_MMU_PDE_ADDR_ALIGNSHIFT)
++ | SGX_MMU_PDE_VALID;
++
++ pui32PT[ui32PTIndex] = (psDevInfo->sBRN22997PTDevPAddr.uiAddr>>SGX_MMU_PTE_ADDR_ALIGNSHIFT)
++ | SGX_MMU_PTE_VALID;
++
++ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, pui32PD, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
++ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, pui32PT, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PT_UNIQUETAG, PDUMP_PD_UNIQUETAG);
++
++
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_DIR_LIST_BASE0,
++ psDevInfo->sBRN22997PDDevPAddr.uiAddr);
++ PDUMPPDREG(EUR_CR_BIF_DIR_LIST_BASE0, psDevInfo->sBRN22997PDDevPAddr.uiAddr, PDUMP_PD_UNIQUETAG);
++
++
++ ui32BIFCtrl = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL);
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32BIFCtrl | EUR_CR_BIF_CTRL_INVALDC_MASK);
++ PDUMPREG(EUR_CR_BIF_CTRL, ui32BIFCtrl | EUR_CR_BIF_CTRL_INVALDC_MASK);
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32BIFCtrl);
++ PDUMPREG(EUR_CR_BIF_CTRL, ui32BIFCtrl);
++
++
++ if (pui32HostPort)
++ {
++
++ IMG_UINT32 ui32Tmp;
++ ui32Tmp = *pui32HostPort;
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR,"Host Port not present for BRN22997 workaround"));
++ }
++
++
++
++
++
++
++
++ PDUMPCOMMENT("RDW :SGXMEM:v4:%08lX\r\n", sDevVAddr.uiAddr);
++
++ PDUMPCOMMENT("SAB :SGXMEM:v4:%08lX 4 0 hostport.bin", sDevVAddr.uiAddr);
++
++
++ pui32PD[ui32PDIndex] = 0;
++ pui32PT[ui32PTIndex] = 0;
++
++
++ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, pui32PD, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
++ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, pui32PT, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PT_UNIQUETAG, PDUMP_PD_UNIQUETAG);
++
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32BIFCtrl | EUR_CR_BIF_CTRL_INVALDC_MASK);
++ PDUMPREG(EUR_CR_BIF_CTRL, ui32BIFCtrl | EUR_CR_BIF_CTRL_INVALDC_MASK);
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32BIFCtrl);
++ PDUMPREG(EUR_CR_BIF_CTRL, ui32BIFCtrl);
++}
++
++
++IMG_VOID WorkaroundBRN22997Free(PVRSRV_SGXDEV_INFO *psDevInfo)
++{
++ SYS_DATA *psSysData;
++ RA_ARENA *psLocalDevMemArena;
++
++ SysAcquireData(&psSysData);
++
++ psLocalDevMemArena = psSysData->apsLocalDevMemArena[0];
++
++ PDUMPFREEPAGETABLE(PVRSRV_DEVICE_TYPE_SGX, psDevInfo->pui32BRN22997PD, SGX_MMU_PAGE_SIZE, PDUMP_PD_UNIQUETAG);
++ PDUMPFREEPAGETABLE(PVRSRV_DEVICE_TYPE_SGX, psDevInfo->pui32BRN22997PT, SGX_MMU_PAGE_SIZE, PDUMP_PT_UNIQUETAG);
++
++
++ if(psLocalDevMemArena == IMG_NULL)
++ {
++ if (psDevInfo->pui32BRN22997PD != IMG_NULL)
++ {
++ OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
++ SGX_MMU_PAGE_SIZE,
++ psDevInfo->pui32BRN22997PD,
++ psDevInfo->hBRN22997PDPageOSMemHandle);
++ }
++
++ if (psDevInfo->pui32BRN22997PT != IMG_NULL)
++ {
++ OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
++ SGX_MMU_PAGE_SIZE,
++ psDevInfo->pui32BRN22997PT,
++ psDevInfo->hBRN22997PTPageOSMemHandle);
++ }
++ }
++ else
++ {
++ if (psDevInfo->pui32BRN22997PT != IMG_NULL)
++ {
++ OSUnMapPhysToLin(psDevInfo->pui32BRN22997PT,
++ SGX_MMU_PAGE_SIZE * 2,
++ PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
++ psDevInfo->hBRN22997PTPageOSMemHandle);
++
++
++ RA_Free(psLocalDevMemArena, psDevInfo->sBRN22997SysPAddr.uiAddr, IMG_FALSE);
++ }
++ }
++}
++#endif
++
++
++#if defined(SUPPORT_EXTERNAL_SYSTEM_CACHE)
++PVRSRV_ERROR MMU_MapExtSystemCacheRegs(PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++ PVRSRV_ERROR eError;
++ SYS_DATA *psSysData;
++ RA_ARENA *psLocalDevMemArena;
++ IMG_HANDLE hPTPageOSMemHandle = IMG_NULL;
++ IMG_UINT32 *pui32PD;
++ IMG_UINT32 *pui32PT = IMG_NULL;
++ IMG_CPU_PHYADDR sCpuPAddr;
++ IMG_DEV_PHYADDR sPTDevPAddr;
++ PVRSRV_SGXDEV_INFO *psDevInfo;
++ IMG_UINT32 ui32PDIndex;
++ IMG_UINT32 ui32PTIndex;
++
++ psDevInfo = (PVRSRV_SGXDEV_INFO*)psDeviceNode->pvDevice;
++ pui32PD = (IMG_UINT32*)psDeviceNode->sDevMemoryInfo.pBMKernelContext->psMMUContext->pvPDCpuVAddr;
++
++ SysAcquireData(&psSysData);
++
++ psLocalDevMemArena = psSysData->apsLocalDevMemArena[0];
++
++
++ if(psLocalDevMemArena == IMG_NULL)
++ {
++
++ eError = OSAllocPages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
++ SGX_MMU_PAGE_SIZE,
++ SGX_MMU_PAGE_SIZE,
++ (IMG_VOID **)&pui32PT,
++ &hPTPageOSMemHandle);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "MMU_MapExtSystemCacheRegs: ERROR call to OSAllocPages failed"));
++ return eError;
++ }
++
++
++ if(pui32PT)
++ {
++ sCpuPAddr = OSMapLinToCPUPhys(pui32PT);
++ }
++ else
++ {
++
++ sCpuPAddr = OSMemHandleToCpuPAddr(hPTPageOSMemHandle, 0);
++ }
++ sPTDevPAddr = SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
++ }
++ else
++ {
++ IMG_SYS_PHYADDR sSysPAddr;
++
++
++ if(RA_Alloc(psLocalDevMemArena,
++ SGX_MMU_PAGE_SIZE,
++ IMG_NULL,
++ IMG_NULL,
++ 0,
++ SGX_MMU_PAGE_SIZE,
++ 0,
++ &(sSysPAddr.uiAddr))!= IMG_TRUE)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "MMU_MapExtSystemCacheRegs: ERROR call to RA_Alloc failed"));
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++
++ sCpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr);
++ pui32PT = OSMapPhysToLin(sCpuPAddr,
++ SGX_MMU_PAGE_SIZE,
++ PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
++ &hPTPageOSMemHandle);
++ if(!pui32PT)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "MMU_MapExtSystemCacheRegs: ERROR failed to map page tables"));
++ return PVRSRV_ERROR_BAD_MAPPING;
++ }
++
++
++ sPTDevPAddr = SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
++
++
++ psDevInfo->sExtSystemCacheRegsPTSysPAddr = sSysPAddr;
++ }
++
++ OSMemSet(pui32PT, 0, SGX_MMU_PAGE_SIZE);
++
++ ui32PDIndex = (SGX_EXT_SYSTEM_CACHE_REGS_DEVVADDR_BASE & SGX_MMU_PD_MASK) >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT);
++ ui32PTIndex = (SGX_EXT_SYSTEM_CACHE_REGS_DEVVADDR_BASE & SGX_MMU_PT_MASK) >> SGX_MMU_PAGE_SHIFT;
++
++
++ pui32PD[ui32PDIndex] = (sPTDevPAddr.uiAddr>>SGX_MMU_PDE_ADDR_ALIGNSHIFT)
++ | SGX_MMU_PDE_VALID;
++
++ pui32PT[ui32PTIndex] = (psDevInfo->sExtSysCacheRegsDevPBase.uiAddr>>SGX_MMU_PTE_ADDR_ALIGNSHIFT)
++ | SGX_MMU_PTE_VALID;
++
++
++ PDUMPMALLOCPAGETABLE(PVRSRV_DEVICE_TYPE_SGX, pui32PT, SGX_MMU_PAGE_SIZE, PDUMP_PT_UNIQUETAG);
++ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, pui32PD, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
++ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, pui32PT, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PT_UNIQUETAG, PDUMP_PD_UNIQUETAG);
++
++
++ psDevInfo->pui32ExtSystemCacheRegsPT = pui32PT;
++ psDevInfo->hExtSystemCacheRegsPTPageOSMemHandle = hPTPageOSMemHandle;
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR MMU_UnmapExtSystemCacheRegs(PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++ SYS_DATA *psSysData;
++ RA_ARENA *psLocalDevMemArena;
++ PVRSRV_SGXDEV_INFO *psDevInfo;
++ IMG_UINT32 ui32PDIndex;
++ IMG_UINT32 *pui32PD;
++
++ psDevInfo = (PVRSRV_SGXDEV_INFO*)psDeviceNode->pvDevice;
++ pui32PD = (IMG_UINT32*)psDeviceNode->sDevMemoryInfo.pBMKernelContext->psMMUContext->pvPDCpuVAddr;
++
++ SysAcquireData(&psSysData);
++
++ psLocalDevMemArena = psSysData->apsLocalDevMemArena[0];
++
++
++ ui32PDIndex = (SGX_EXT_SYSTEM_CACHE_REGS_DEVVADDR_BASE & SGX_MMU_PD_MASK) >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT);
++ pui32PD[ui32PDIndex] = 0;
++
++ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, pui32PD, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
++ PDUMPFREEPAGETABLE(PVRSRV_DEVICE_TYPE_SGX, psDevInfo->pui32ExtSystemCacheRegsPT, SGX_MMU_PAGE_SIZE, PDUMP_PT_UNIQUETAG);
++
++
++ if(psLocalDevMemArena == IMG_NULL)
++ {
++ if (psDevInfo->pui32ExtSystemCacheRegsPT != IMG_NULL)
++ {
++ OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
++ SGX_MMU_PAGE_SIZE,
++ psDevInfo->pui32ExtSystemCacheRegsPT,
++ psDevInfo->hExtSystemCacheRegsPTPageOSMemHandle);
++ }
++ }
++ else
++ {
++ if (psDevInfo->pui32ExtSystemCacheRegsPT != IMG_NULL)
++ {
++ OSUnMapPhysToLin(psDevInfo->pui32ExtSystemCacheRegsPT,
++ SGX_MMU_PAGE_SIZE,
++ PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
++ psDevInfo->hExtSystemCacheRegsPTPageOSMemHandle);
++
++ RA_Free(psLocalDevMemArena, psDevInfo->sExtSystemCacheRegsPTSysPAddr.uiAddr, IMG_FALSE);
++ }
++ }
++
++ return PVRSRV_OK;
++}
++#endif
++
++
++#if PAGE_TEST
++static IMG_VOID PageTest(IMG_VOID* pMem, IMG_DEV_PHYADDR sDevPAddr)
++{
++ volatile IMG_UINT32 ui32WriteData;
++ volatile IMG_UINT32 ui32ReadData;
++ volatile IMG_UINT32 *pMem32 = (volatile IMG_UINT32 *)pMem;
++ IMG_INT n;
++ IMG_BOOL bOK=IMG_TRUE;
++
++ ui32WriteData = 0xffffffff;
++
++ for (n=0; n<1024; n++)
++ {
++ pMem32[n] = ui32WriteData;
++ ui32ReadData = pMem32[n];
++
++ if (ui32WriteData != ui32ReadData)
++ {
++
++ PVR_DPF ((PVR_DBG_ERROR, "Error - memory page test failed at device phys address 0x%08X", sDevPAddr.uiAddr + (n<<2) ));
++ PVR_DBG_BREAK;
++ bOK = IMG_FALSE;
++ }
++ }
++
++ ui32WriteData = 0;
++
++ for (n=0; n<1024; n++)
++ {
++ pMem32[n] = ui32WriteData;
++ ui32ReadData = pMem32[n];
++
++ if (ui32WriteData != ui32ReadData)
++ {
++
++ PVR_DPF ((PVR_DBG_ERROR, "Error - memory page test failed at device phys address 0x%08X", sDevPAddr.uiAddr + (n<<2) ));
++ PVR_DBG_BREAK;
++ bOK = IMG_FALSE;
++ }
++ }
++
++ if (bOK)
++ {
++ PVR_DPF ((PVR_DBG_VERBOSE, "MMU Page 0x%08X is OK", sDevPAddr.uiAddr));
++ }
++ else
++ {
++ PVR_DPF ((PVR_DBG_VERBOSE, "MMU Page 0x%08X *** FAILED ***", sDevPAddr.uiAddr));
++ }
++}
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/mmu.h b/drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/mmu.h
+new file mode 100644
+index 0000000..7313769
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/mmu.h
+@@ -0,0 +1,139 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _MMU_H_
++#define _MMU_H_
++
++#include "sgxinfokm.h"
++
++PVRSRV_ERROR
++MMU_Initialise (PVRSRV_DEVICE_NODE *psDeviceNode, MMU_CONTEXT **ppsMMUContext, IMG_DEV_PHYADDR *psPDDevPAddr);
++
++IMG_VOID
++MMU_Finalise (MMU_CONTEXT *psMMUContext);
++
++
++IMG_VOID
++MMU_InsertHeap(MMU_CONTEXT *psMMUContext, MMU_HEAP *psMMUHeap);
++
++MMU_HEAP *
++MMU_Create (MMU_CONTEXT *psMMUContext,
++ DEV_ARENA_DESCRIPTOR *psDevArena,
++ RA_ARENA **ppsVMArena);
++
++IMG_VOID
++MMU_Delete (MMU_HEAP *pMMU);
++
++IMG_BOOL
++MMU_Alloc (MMU_HEAP *pMMU,
++ IMG_SIZE_T uSize,
++ IMG_SIZE_T *pActualSize,
++ IMG_UINT32 uFlags,
++ IMG_UINT32 uDevVAddrAlignment,
++ IMG_DEV_VIRTADDR *pDevVAddr);
++
++IMG_VOID
++MMU_Free (MMU_HEAP *pMMU,
++ IMG_DEV_VIRTADDR DevVAddr,
++ IMG_UINT32 ui32Size);
++
++IMG_VOID
++MMU_Enable (MMU_HEAP *pMMU);
++
++IMG_VOID
++MMU_Disable (MMU_HEAP *pMMU);
++
++IMG_VOID
++MMU_MapPages (MMU_HEAP *pMMU,
++ IMG_DEV_VIRTADDR devVAddr,
++ IMG_SYS_PHYADDR SysPAddr,
++ IMG_SIZE_T uSize,
++ IMG_UINT32 ui32MemFlags,
++ IMG_HANDLE hUniqueTag);
++
++IMG_VOID
++MMU_MapShadow (MMU_HEAP * pMMU,
++ IMG_DEV_VIRTADDR MapBaseDevVAddr,
++ IMG_SIZE_T uSize,
++ IMG_CPU_VIRTADDR CpuVAddr,
++ IMG_HANDLE hOSMemHandle,
++ IMG_DEV_VIRTADDR * pDevVAddr,
++ IMG_UINT32 ui32MemFlags,
++ IMG_HANDLE hUniqueTag);
++
++IMG_VOID
++MMU_UnmapPages (MMU_HEAP *pMMU,
++ IMG_DEV_VIRTADDR dev_vaddr,
++ IMG_UINT32 ui32PageCount,
++ IMG_HANDLE hUniqueTag);
++
++IMG_VOID
++MMU_MapScatter (MMU_HEAP *pMMU,
++ IMG_DEV_VIRTADDR DevVAddr,
++ IMG_SYS_PHYADDR *psSysAddr,
++ IMG_SIZE_T uSize,
++ IMG_UINT32 ui32MemFlags,
++ IMG_HANDLE hUniqueTag);
++
++
++IMG_DEV_PHYADDR
++MMU_GetPhysPageAddr(MMU_HEAP *pMMUHeap, IMG_DEV_VIRTADDR sDevVPageAddr);
++
++
++IMG_DEV_PHYADDR
++MMU_GetPDDevPAddr(MMU_CONTEXT *pMMUContext);
++
++
++#ifdef SUPPORT_SGX_MMU_BYPASS
++IMG_VOID
++EnableHostAccess (MMU_CONTEXT *psMMUContext);
++
++
++IMG_VOID
++DisableHostAccess (MMU_CONTEXT *psMMUContext);
++#endif
++
++IMG_VOID MMU_InvalidateDirectoryCache(PVRSRV_SGXDEV_INFO *psDevInfo);
++
++PVRSRV_ERROR MMU_BIFResetPDAlloc(PVRSRV_SGXDEV_INFO *psDevInfo);
++
++IMG_VOID MMU_BIFResetPDFree(PVRSRV_SGXDEV_INFO *psDevInfo);
++
++#if defined(FIX_HW_BRN_22997) && defined(FIX_HW_BRN_23030) && defined(SGX_FEATURE_HOST_PORT)
++PVRSRV_ERROR WorkaroundBRN22997Alloc(PVRSRV_SGXDEV_INFO *psDevInfo);
++
++IMG_VOID WorkaroundBRN22997ReadHostPort(PVRSRV_SGXDEV_INFO *psDevInfo);
++
++IMG_VOID WorkaroundBRN22997Free(PVRSRV_SGXDEV_INFO *psDevInfo);
++#endif
++
++#if defined(SUPPORT_EXTERNAL_SYSTEM_CACHE)
++PVRSRV_ERROR MMU_MapExtSystemCacheRegs(PVRSRV_DEVICE_NODE *psDeviceNode);
++
++PVRSRV_ERROR MMU_UnmapExtSystemCacheRegs(PVRSRV_DEVICE_NODE *psDeviceNode);
++#endif
++
++#endif
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/pb.c b/drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/pb.c
+new file mode 100644
+index 0000000..afeb78a
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/pb.c
+@@ -0,0 +1,458 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include <stddef.h>
++
++#include "services_headers.h"
++#include "sgxapi_km.h"
++#include "sgxinfo.h"
++#include "sgxinfokm.h"
++#include "pvr_bridge_km.h"
++#include "pdump_km.h"
++#include "sgxutils.h"
++
++#ifndef __linux__
++#pragma message("TODO: Review use of OS_PAGEABLE vs OS_NON_PAGEABLE")
++#endif
++
++#include "lists.h"
++
++static IMPLEMENT_LIST_INSERT(PVRSRV_STUB_PBDESC)
++static IMPLEMENT_LIST_REMOVE(PVRSRV_STUB_PBDESC)
++
++static PRESMAN_ITEM psResItemCreateSharedPB = IMG_NULL;
++static PVRSRV_PER_PROCESS_DATA *psPerProcCreateSharedPB = IMG_NULL;
++
++static PVRSRV_ERROR SGXCleanupSharedPBDescCallback(IMG_PVOID pvParam, IMG_UINT32 ui32Param);
++static PVRSRV_ERROR SGXCleanupSharedPBDescCreateLockCallback(IMG_PVOID pvParam, IMG_UINT32 ui32Param);
++
++IMG_EXPORT PVRSRV_ERROR
++SGXFindSharedPBDescKM(PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_HANDLE hDevCookie,
++ IMG_BOOL bLockOnFailure,
++ IMG_UINT32 ui32TotalPBSize,
++ IMG_HANDLE *phSharedPBDesc,
++ PVRSRV_KERNEL_MEM_INFO **ppsSharedPBDescKernelMemInfo,
++ PVRSRV_KERNEL_MEM_INFO **ppsHWPBDescKernelMemInfo,
++ PVRSRV_KERNEL_MEM_INFO **ppsBlockKernelMemInfo,
++ PVRSRV_KERNEL_MEM_INFO **ppsHWBlockKernelMemInfo,
++ PVRSRV_KERNEL_MEM_INFO ***pppsSharedPBDescSubKernelMemInfos,
++ IMG_UINT32 *ui32SharedPBDescSubKernelMemInfosCount)
++{
++ PVRSRV_STUB_PBDESC *psStubPBDesc;
++ PVRSRV_KERNEL_MEM_INFO **ppsSharedPBDescSubKernelMemInfos=IMG_NULL;
++ PVRSRV_SGXDEV_INFO *psSGXDevInfo;
++ PVRSRV_ERROR eError;
++
++ psSGXDevInfo = ((PVRSRV_DEVICE_NODE *)hDevCookie)->pvDevice;
++
++ psStubPBDesc = psSGXDevInfo->psStubPBDescListKM;
++ if (psStubPBDesc != IMG_NULL)
++ {
++ IMG_UINT32 i;
++ PRESMAN_ITEM psResItem;
++
++ if(psStubPBDesc->ui32TotalPBSize != ui32TotalPBSize)
++ {
++ PVR_DPF((PVR_DBG_WARNING,
++ "SGXFindSharedPBDescKM: Shared PB requested with different size (0x%x) from existing shared PB (0x%x) - requested size ignored",
++ ui32TotalPBSize, psStubPBDesc->ui32TotalPBSize));
++ }
++
++ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(PVRSRV_KERNEL_MEM_INFO *)
++ * psStubPBDesc->ui32SubKernelMemInfosCount,
++ (IMG_VOID **)&ppsSharedPBDescSubKernelMemInfos,
++ IMG_NULL,
++ "Array of Kernel Memory Info") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "SGXFindSharedPBDescKM: OSAllocMem failed"));
++
++ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
++ goto ExitNotFound;
++ }
++
++ psResItem = ResManRegisterRes(psPerProc->hResManContext,
++ RESMAN_TYPE_SHARED_PB_DESC,
++ psStubPBDesc,
++ 0,
++ &SGXCleanupSharedPBDescCallback);
++
++ if (psResItem == IMG_NULL)
++ {
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(PVRSRV_KERNEL_MEM_INFO *) * psStubPBDesc->ui32SubKernelMemInfosCount,
++ ppsSharedPBDescSubKernelMemInfos,
++ 0);
++
++
++ PVR_DPF((PVR_DBG_ERROR, "SGXFindSharedPBDescKM: ResManRegisterRes failed"));
++
++ eError = PVRSRV_ERROR_GENERIC;
++ goto ExitNotFound;
++ }
++
++ *ppsSharedPBDescKernelMemInfo = psStubPBDesc->psSharedPBDescKernelMemInfo;
++ *ppsHWPBDescKernelMemInfo = psStubPBDesc->psHWPBDescKernelMemInfo;
++ *ppsBlockKernelMemInfo = psStubPBDesc->psBlockKernelMemInfo;
++ *ppsHWBlockKernelMemInfo = psStubPBDesc->psHWBlockKernelMemInfo;
++
++ *ui32SharedPBDescSubKernelMemInfosCount =
++ psStubPBDesc->ui32SubKernelMemInfosCount;
++
++ *pppsSharedPBDescSubKernelMemInfos = ppsSharedPBDescSubKernelMemInfos;
++
++ for(i=0; i<psStubPBDesc->ui32SubKernelMemInfosCount; i++)
++ {
++ ppsSharedPBDescSubKernelMemInfos[i] =
++ psStubPBDesc->ppsSubKernelMemInfos[i];
++ }
++
++ psStubPBDesc->ui32RefCount++;
++ *phSharedPBDesc = (IMG_HANDLE)psResItem;
++ return PVRSRV_OK;
++ }
++
++ eError = PVRSRV_OK;
++ if (bLockOnFailure)
++ {
++ if (psResItemCreateSharedPB == IMG_NULL)
++ {
++ psResItemCreateSharedPB = ResManRegisterRes(psPerProc->hResManContext,
++ RESMAN_TYPE_SHARED_PB_DESC_CREATE_LOCK,
++ psPerProc,
++ 0,
++ &SGXCleanupSharedPBDescCreateLockCallback);
++
++ if (psResItemCreateSharedPB == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "SGXFindSharedPBDescKM: ResManRegisterRes failed"));
++
++ eError = PVRSRV_ERROR_GENERIC;
++ goto ExitNotFound;
++ }
++ PVR_ASSERT(psPerProcCreateSharedPB == IMG_NULL);
++ psPerProcCreateSharedPB = psPerProc;
++ }
++ else
++ {
++ eError = PVRSRV_ERROR_PROCESSING_BLOCKED;
++ }
++ }
++ExitNotFound:
++ *phSharedPBDesc = IMG_NULL;
++
++ return eError;
++}
++
++
++static PVRSRV_ERROR
++SGXCleanupSharedPBDescKM(PVRSRV_STUB_PBDESC *psStubPBDescIn)
++{
++
++ IMG_UINT32 i;
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++
++ psDeviceNode = (PVRSRV_DEVICE_NODE*)psStubPBDescIn->hDevCookie;
++
++
++
++
++ psStubPBDescIn->ui32RefCount--;
++ if (psStubPBDescIn->ui32RefCount == 0)
++ {
++ List_PVRSRV_STUB_PBDESC_Remove(psStubPBDescIn);
++ for(i=0 ; i<psStubPBDescIn->ui32SubKernelMemInfosCount; i++)
++ {
++
++ PVRSRVFreeDeviceMemKM(psStubPBDescIn->hDevCookie,
++ psStubPBDescIn->ppsSubKernelMemInfos[i]);
++ }
++
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(PVRSRV_KERNEL_MEM_INFO *) * psStubPBDescIn->ui32SubKernelMemInfosCount,
++ psStubPBDescIn->ppsSubKernelMemInfos,
++ 0);
++ psStubPBDescIn->ppsSubKernelMemInfos = IMG_NULL;
++
++ PVRSRVFreeSharedSysMemoryKM(psStubPBDescIn->psBlockKernelMemInfo);
++
++ PVRSRVFreeDeviceMemKM(psStubPBDescIn->hDevCookie, psStubPBDescIn->psHWBlockKernelMemInfo);
++
++ PVRSRVFreeDeviceMemKM(psStubPBDescIn->hDevCookie, psStubPBDescIn->psHWPBDescKernelMemInfo);
++
++ PVRSRVFreeSharedSysMemoryKM(psStubPBDescIn->psSharedPBDescKernelMemInfo);
++
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(PVRSRV_STUB_PBDESC),
++ psStubPBDescIn,
++ 0);
++
++
++
++ SGXCleanupRequest(psDeviceNode,
++ IMG_NULL,
++ PVRSRV_CLEANUPCMD_PB);
++ }
++ return PVRSRV_OK;
++
++}
++
++static PVRSRV_ERROR SGXCleanupSharedPBDescCallback(IMG_PVOID pvParam, IMG_UINT32 ui32Param)
++{
++ PVRSRV_STUB_PBDESC *psStubPBDesc = (PVRSRV_STUB_PBDESC *)pvParam;
++
++ PVR_UNREFERENCED_PARAMETER(ui32Param);
++
++ return SGXCleanupSharedPBDescKM(psStubPBDesc);
++}
++
++static PVRSRV_ERROR SGXCleanupSharedPBDescCreateLockCallback(IMG_PVOID pvParam, IMG_UINT32 ui32Param)
++{
++#ifdef DEBUG
++ PVRSRV_PER_PROCESS_DATA *psPerProc = (PVRSRV_PER_PROCESS_DATA *)pvParam;
++ PVR_ASSERT(psPerProc == psPerProcCreateSharedPB);
++#else
++ PVR_UNREFERENCED_PARAMETER(pvParam);
++#endif
++
++ PVR_UNREFERENCED_PARAMETER(ui32Param);
++
++ psPerProcCreateSharedPB = IMG_NULL;
++ psResItemCreateSharedPB = IMG_NULL;
++
++ return PVRSRV_OK;
++}
++
++
++IMG_EXPORT PVRSRV_ERROR
++SGXUnrefSharedPBDescKM(IMG_HANDLE hSharedPBDesc)
++{
++ PVR_ASSERT(hSharedPBDesc != IMG_NULL);
++
++ return ResManFreeResByPtr(hSharedPBDesc);
++}
++
++
++IMG_EXPORT PVRSRV_ERROR
++SGXAddSharedPBDescKM(PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_HANDLE hDevCookie,
++ PVRSRV_KERNEL_MEM_INFO *psSharedPBDescKernelMemInfo,
++ PVRSRV_KERNEL_MEM_INFO *psHWPBDescKernelMemInfo,
++ PVRSRV_KERNEL_MEM_INFO *psBlockKernelMemInfo,
++ PVRSRV_KERNEL_MEM_INFO *psHWBlockKernelMemInfo,
++ IMG_UINT32 ui32TotalPBSize,
++ IMG_HANDLE *phSharedPBDesc,
++ PVRSRV_KERNEL_MEM_INFO **ppsSharedPBDescSubKernelMemInfos,
++ IMG_UINT32 ui32SharedPBDescSubKernelMemInfosCount)
++{
++ PVRSRV_STUB_PBDESC *psStubPBDesc=IMG_NULL;
++ PVRSRV_ERROR eRet = PVRSRV_ERROR_GENERIC;
++ IMG_UINT32 i;
++ PVRSRV_SGXDEV_INFO *psSGXDevInfo;
++ PRESMAN_ITEM psResItem;
++
++
++ if (psPerProcCreateSharedPB != psPerProc)
++ {
++ goto NoAdd;
++ }
++ else
++ {
++ PVR_ASSERT(psResItemCreateSharedPB != IMG_NULL);
++
++ ResManFreeResByPtr(psResItemCreateSharedPB);
++
++ PVR_ASSERT(psResItemCreateSharedPB == IMG_NULL);
++ PVR_ASSERT(psPerProcCreateSharedPB == IMG_NULL);
++ }
++
++ psSGXDevInfo = (PVRSRV_SGXDEV_INFO *)((PVRSRV_DEVICE_NODE *)hDevCookie)->pvDevice;
++
++ psStubPBDesc = psSGXDevInfo->psStubPBDescListKM;
++ if (psStubPBDesc != IMG_NULL)
++ {
++ if(psStubPBDesc->ui32TotalPBSize != ui32TotalPBSize)
++ {
++ PVR_DPF((PVR_DBG_WARNING,
++ "SGXAddSharedPBDescKM: Shared PB requested with different size (0x%x) from existing shared PB (0x%x) - requested size ignored",
++ ui32TotalPBSize, psStubPBDesc->ui32TotalPBSize));
++
++ }
++
++
++ psResItem = ResManRegisterRes(psPerProc->hResManContext,
++ RESMAN_TYPE_SHARED_PB_DESC,
++ psStubPBDesc,
++ 0,
++ &SGXCleanupSharedPBDescCallback);
++ if (psResItem == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "SGXAddSharedPBDescKM: "
++ "Failed to register existing shared "
++ "PBDesc with the resource manager"));
++ goto NoAddKeepPB;
++ }
++
++
++ psStubPBDesc->ui32RefCount++;
++
++ *phSharedPBDesc = (IMG_HANDLE)psResItem;
++ eRet = PVRSRV_OK;
++ goto NoAddKeepPB;
++ }
++
++ if(OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(PVRSRV_STUB_PBDESC),
++ (IMG_VOID **)&psStubPBDesc,
++ 0,
++ "Stub Parameter Buffer Description") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "SGXAddSharedPBDescKM: Failed to alloc "
++ "StubPBDesc"));
++ eRet = PVRSRV_ERROR_OUT_OF_MEMORY;
++ goto NoAdd;
++ }
++
++
++ psStubPBDesc->ppsSubKernelMemInfos = IMG_NULL;
++
++ if(OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(PVRSRV_KERNEL_MEM_INFO *)
++ * ui32SharedPBDescSubKernelMemInfosCount,
++ (IMG_VOID **)&psStubPBDesc->ppsSubKernelMemInfos,
++ 0,
++ "Array of Kernel Memory Info") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "SGXAddSharedPBDescKM: "
++ "Failed to alloc "
++ "StubPBDesc->ppsSubKernelMemInfos"));
++ eRet = PVRSRV_ERROR_OUT_OF_MEMORY;
++ goto NoAdd;
++ }
++
++ if(PVRSRVDissociateMemFromResmanKM(psSharedPBDescKernelMemInfo)
++ != PVRSRV_OK)
++ {
++ goto NoAdd;
++ }
++
++ if(PVRSRVDissociateMemFromResmanKM(psHWPBDescKernelMemInfo)
++ != PVRSRV_OK)
++ {
++ goto NoAdd;
++ }
++
++ if(PVRSRVDissociateMemFromResmanKM(psBlockKernelMemInfo)
++ != PVRSRV_OK)
++ {
++ goto NoAdd;
++ }
++
++ if(PVRSRVDissociateMemFromResmanKM(psHWBlockKernelMemInfo)
++ != PVRSRV_OK)
++ {
++ goto NoAdd;
++ }
++
++ psStubPBDesc->ui32RefCount = 1;
++ psStubPBDesc->ui32TotalPBSize = ui32TotalPBSize;
++ psStubPBDesc->psSharedPBDescKernelMemInfo = psSharedPBDescKernelMemInfo;
++ psStubPBDesc->psHWPBDescKernelMemInfo = psHWPBDescKernelMemInfo;
++ psStubPBDesc->psBlockKernelMemInfo = psBlockKernelMemInfo;
++ psStubPBDesc->psHWBlockKernelMemInfo = psHWBlockKernelMemInfo;
++
++ psStubPBDesc->ui32SubKernelMemInfosCount =
++ ui32SharedPBDescSubKernelMemInfosCount;
++ for(i=0; i<ui32SharedPBDescSubKernelMemInfosCount; i++)
++ {
++ psStubPBDesc->ppsSubKernelMemInfos[i] = ppsSharedPBDescSubKernelMemInfos[i];
++ if(PVRSRVDissociateMemFromResmanKM(ppsSharedPBDescSubKernelMemInfos[i])
++ != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "SGXAddSharedPBDescKM: "
++ "Failed to dissociate shared PBDesc "
++ "from process"));
++ goto NoAdd;
++ }
++ }
++
++ psResItem = ResManRegisterRes(psPerProc->hResManContext,
++ RESMAN_TYPE_SHARED_PB_DESC,
++ psStubPBDesc,
++ 0,
++ &SGXCleanupSharedPBDescCallback);
++ if (psResItem == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "SGXAddSharedPBDescKM: "
++ "Failed to register shared PBDesc "
++ " with the resource manager"));
++ goto NoAdd;
++ }
++ psStubPBDesc->hDevCookie = hDevCookie;
++
++
++ List_PVRSRV_STUB_PBDESC_Insert(&(psSGXDevInfo->psStubPBDescListKM),
++ psStubPBDesc);
++
++ *phSharedPBDesc = (IMG_HANDLE)psResItem;
++
++ return PVRSRV_OK;
++
++NoAdd:
++ if(psStubPBDesc)
++ {
++ if(psStubPBDesc->ppsSubKernelMemInfos)
++ {
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(PVRSRV_KERNEL_MEM_INFO *) * ui32SharedPBDescSubKernelMemInfosCount,
++ psStubPBDesc->ppsSubKernelMemInfos,
++ 0);
++ psStubPBDesc->ppsSubKernelMemInfos = IMG_NULL;
++ }
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(PVRSRV_STUB_PBDESC),
++ psStubPBDesc,
++ 0);
++
++ }
++
++NoAddKeepPB:
++ for (i = 0; i < ui32SharedPBDescSubKernelMemInfosCount; i++)
++ {
++ PVRSRVFreeDeviceMemKM(hDevCookie, ppsSharedPBDescSubKernelMemInfos[i]);
++ }
++
++ PVRSRVFreeSharedSysMemoryKM(psSharedPBDescKernelMemInfo);
++ PVRSRVFreeDeviceMemKM(hDevCookie, psHWPBDescKernelMemInfo);
++
++ PVRSRVFreeSharedSysMemoryKM(psBlockKernelMemInfo);
++ PVRSRVFreeDeviceMemKM(hDevCookie, psHWBlockKernelMemInfo);
++
++ return eRet;
++}
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/sgx_bridge_km.h b/drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/sgx_bridge_km.h
+new file mode 100644
+index 0000000..72f025d
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/sgx_bridge_km.h
+@@ -0,0 +1,147 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined(__SGX_BRIDGE_KM_H__)
++#define __SGX_BRIDGE_KM_H__
++
++#include "sgxapi_km.h"
++#include "sgxinfo.h"
++#include "sgxinfokm.h"
++#include "sgx_bridge.h"
++#include "pvr_bridge.h"
++#include "perproc.h"
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++IMG_IMPORT
++PVRSRV_ERROR SGXSubmitTransferKM(IMG_HANDLE hDevHandle, PVRSRV_TRANSFER_SGX_KICK *psKick);
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++IMG_IMPORT
++PVRSRV_ERROR SGXSubmit2DKM(IMG_HANDLE hDevHandle, PVRSRV_2D_SGX_KICK *psKick);
++#endif
++
++IMG_IMPORT
++PVRSRV_ERROR SGXDoKickKM(IMG_HANDLE hDevHandle,
++ SGX_CCB_KICK *psCCBKick);
++
++IMG_IMPORT
++PVRSRV_ERROR SGXGetPhysPageAddrKM(IMG_HANDLE hDevMemHeap,
++ IMG_DEV_VIRTADDR sDevVAddr,
++ IMG_DEV_PHYADDR *pDevPAddr,
++ IMG_CPU_PHYADDR *pCpuPAddr);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV SGXGetMMUPDAddrKM(IMG_HANDLE hDevCookie,
++ IMG_HANDLE hDevMemContext,
++ IMG_DEV_PHYADDR *psPDDevPAddr);
++
++IMG_IMPORT
++PVRSRV_ERROR SGXGetClientInfoKM(IMG_HANDLE hDevCookie,
++ SGX_CLIENT_INFO* psClientInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR SGXGetMiscInfoKM(PVRSRV_SGXDEV_INFO *psDevInfo,
++ SGX_MISC_INFO *psMiscInfo,
++ PVRSRV_DEVICE_NODE *psDeviceNode,
++ IMG_HANDLE hDevMemContext);
++
++#if defined(SUPPORT_SGX_HWPERF)
++IMG_IMPORT
++PVRSRV_ERROR SGXReadDiffCountersKM(IMG_HANDLE hDevHandle,
++ IMG_UINT32 ui32Reg,
++ IMG_UINT32 *pui32Old,
++ IMG_BOOL bNew,
++ IMG_UINT32 ui32New,
++ IMG_UINT32 ui32NewReset,
++ IMG_UINT32 ui32CountersReg,
++ IMG_UINT32 ui32Reg2,
++ IMG_BOOL *pbActive,
++ PVRSRV_SGXDEV_DIFF_INFO *psDiffs);
++IMG_IMPORT
++PVRSRV_ERROR SGXReadHWPerfCBKM(IMG_HANDLE hDevHandle,
++ IMG_UINT32 ui32ArraySize,
++ PVRSRV_SGX_HWPERF_CB_ENTRY *psHWPerfCBData,
++ IMG_UINT32 *pui32DataCount,
++ IMG_UINT32 *pui32ClockSpeed,
++ IMG_UINT32 *pui32HostTimeStamp);
++#endif
++
++IMG_IMPORT
++PVRSRV_ERROR SGX2DQueryBlitsCompleteKM(PVRSRV_SGXDEV_INFO *psDevInfo,
++ PVRSRV_KERNEL_SYNC_INFO *psSyncInfo,
++ IMG_BOOL bWaitForComplete);
++
++IMG_IMPORT
++PVRSRV_ERROR SGXGetInfoForSrvinitKM(IMG_HANDLE hDevHandle,
++ SGX_BRIDGE_INFO_FOR_SRVINIT *psInitInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR DevInitSGXPart2KM(PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_HANDLE hDevHandle,
++ SGX_BRIDGE_INIT_INFO *psInitInfo);
++
++IMG_IMPORT PVRSRV_ERROR
++SGXFindSharedPBDescKM(PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_HANDLE hDevCookie,
++ IMG_BOOL bLockOnFailure,
++ IMG_UINT32 ui32TotalPBSize,
++ IMG_HANDLE *phSharedPBDesc,
++ PVRSRV_KERNEL_MEM_INFO **ppsSharedPBDescKernelMemInfo,
++ PVRSRV_KERNEL_MEM_INFO **ppsHWPBDescKernelMemInfo,
++ PVRSRV_KERNEL_MEM_INFO **ppsBlockKernelMemInfo,
++ PVRSRV_KERNEL_MEM_INFO **ppsHWBlockKernelMemInfo,
++ PVRSRV_KERNEL_MEM_INFO ***pppsSharedPBDescSubKernelMemInfos,
++ IMG_UINT32 *ui32SharedPBDescSubKernelMemInfosCount);
++
++IMG_IMPORT PVRSRV_ERROR
++SGXUnrefSharedPBDescKM(IMG_HANDLE hSharedPBDesc);
++
++IMG_IMPORT PVRSRV_ERROR
++SGXAddSharedPBDescKM(PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_HANDLE hDevCookie,
++ PVRSRV_KERNEL_MEM_INFO *psSharedPBDescKernelMemInfo,
++ PVRSRV_KERNEL_MEM_INFO *psHWPBDescKernelMemInfo,
++ PVRSRV_KERNEL_MEM_INFO *psBlockKernelMemInfo,
++ PVRSRV_KERNEL_MEM_INFO *psHWBlockKernelMemInfo,
++ IMG_UINT32 ui32TotalPBSize,
++ IMG_HANDLE *phSharedPBDesc,
++ PVRSRV_KERNEL_MEM_INFO **psSharedPBDescSubKernelMemInfos,
++ IMG_UINT32 ui32SharedPBDescSubKernelMemInfosCount);
++
++
++IMG_IMPORT PVRSRV_ERROR
++SGXGetInternalDevInfoKM(IMG_HANDLE hDevCookie,
++ SGX_INTERNAL_DEVINFO *psSGXInternalDevInfo);
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/sgxconfig.h b/drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/sgxconfig.h
+new file mode 100644
+index 0000000..63cd151
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/sgxconfig.h
+@@ -0,0 +1,134 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __SGXCONFIG_H__
++#define __SGXCONFIG_H__
++
++#include "sgxdefs.h"
++
++#define DEV_DEVICE_TYPE PVRSRV_DEVICE_TYPE_SGX
++#define DEV_DEVICE_CLASS PVRSRV_DEVICE_CLASS_3D
++
++#define DEV_MAJOR_VERSION 1
++#define DEV_MINOR_VERSION 0
++
++#if SGX_FEATURE_ADDRESS_SPACE_SIZE == 32
++ #if defined(SGX_FEATURE_2D_HARDWARE)
++ #define SGX_2D_HEAP_BASE 0x00100000
++ #define SGX_2D_HEAP_SIZE (0x08000000-0x00100000-0x00001000)
++ #else
++ #if defined(FIX_HW_BRN_26915)
++ #define SGX_CGBUFFER_HEAP_BASE 0x00100000
++ #define SGX_CGBUFFER_HEAP_SIZE (0x08000000-0x00100000-0x00001000)
++ #endif
++ #endif
++
++ #if defined(SUPPORT_SGX_GENERAL_MAPPING_HEAP)
++ #define SGX_GENERAL_MAPPING_HEAP_BASE 0x08000000
++ #define SGX_GENERAL_MAPPING_HEAP_SIZE (0x08000000-0x00001000)
++ #endif
++
++ #define SGX_GENERAL_HEAP_BASE 0x10000000
++ #define SGX_GENERAL_HEAP_SIZE (0xC8000000-0x00001000)
++
++ #define SGX_3DPARAMETERS_HEAP_BASE 0xD8000000
++ #define SGX_3DPARAMETERS_HEAP_SIZE (0x10000000-0x00001000)
++
++ #define SGX_TADATA_HEAP_BASE 0xE8000000
++ #define SGX_TADATA_HEAP_SIZE (0x0D000000-0x00001000)
++
++ #define SGX_SYNCINFO_HEAP_BASE 0xF5000000
++ #define SGX_SYNCINFO_HEAP_SIZE (0x01000000-0x00001000)
++
++ #define SGX_PDSPIXEL_CODEDATA_HEAP_BASE 0xF6000000
++ #define SGX_PDSPIXEL_CODEDATA_HEAP_SIZE (0x02000000-0x00001000)
++
++ #define SGX_KERNEL_CODE_HEAP_BASE 0xF8000000
++ #define SGX_KERNEL_CODE_HEAP_SIZE (0x00080000-0x00001000)
++
++ #define SGX_PDSVERTEX_CODEDATA_HEAP_BASE 0xF8400000
++ #define SGX_PDSVERTEX_CODEDATA_HEAP_SIZE (0x01C00000-0x00001000)
++
++ #define SGX_KERNEL_DATA_HEAP_BASE 0xFA000000
++ #define SGX_KERNEL_DATA_HEAP_SIZE (0x05000000-0x00001000)
++
++ #define SGX_PIXELSHADER_HEAP_BASE 0xFF000000
++ #define SGX_PIXELSHADER_HEAP_SIZE (0x00500000-0x00001000)
++
++ #define SGX_VERTEXSHADER_HEAP_BASE 0xFF800000
++ #define SGX_VERTEXSHADER_HEAP_SIZE (0x00200000-0x00001000)
++
++
++ #define SGX_CORE_IDENTIFIED
++#endif
++
++#if SGX_FEATURE_ADDRESS_SPACE_SIZE == 28
++ #if defined(SUPPORT_SGX_GENERAL_MAPPING_HEAP)
++ #define SGX_GENERAL_MAPPING_HEAP_BASE 0x00001000
++ #define SGX_GENERAL_MAPPING_HEAP_SIZE (0x01800000-0x00001000-0x00001000)
++ #endif
++
++ #define SGX_GENERAL_HEAP_BASE 0x01800000
++ #define SGX_GENERAL_HEAP_SIZE (0x07000000-0x00001000)
++
++ #define SGX_3DPARAMETERS_HEAP_BASE 0x08800000
++ #define SGX_3DPARAMETERS_HEAP_SIZE (0x04000000-0x00001000)
++
++ #define SGX_TADATA_HEAP_BASE 0x0C800000
++ #define SGX_TADATA_HEAP_SIZE (0x01000000-0x00001000)
++
++ #define SGX_SYNCINFO_HEAP_BASE 0x0D800000
++ #define SGX_SYNCINFO_HEAP_SIZE (0x00400000-0x00001000)
++
++ #define SGX_PDSPIXEL_CODEDATA_HEAP_BASE 0x0DC00000
++ #define SGX_PDSPIXEL_CODEDATA_HEAP_SIZE (0x00800000-0x00001000)
++
++ #define SGX_KERNEL_CODE_HEAP_BASE 0x0E400000
++ #define SGX_KERNEL_CODE_HEAP_SIZE (0x00080000-0x00001000)
++
++ #define SGX_PDSVERTEX_CODEDATA_HEAP_BASE 0x0E800000
++ #define SGX_PDSVERTEX_CODEDATA_HEAP_SIZE (0x00800000-0x00001000)
++
++ #define SGX_KERNEL_DATA_HEAP_BASE 0x0F000000
++ #define SGX_KERNEL_DATA_HEAP_SIZE (0x00400000-0x00001000)
++
++ #define SGX_PIXELSHADER_HEAP_BASE 0x0F400000
++ #define SGX_PIXELSHADER_HEAP_SIZE (0x00500000-0x00001000)
++
++ #define SGX_VERTEXSHADER_HEAP_BASE 0x0FC00000
++ #define SGX_VERTEXSHADER_HEAP_SIZE (0x00200000-0x00001000)
++
++
++ #define SGX_CORE_IDENTIFIED
++
++#endif
++
++#if !defined(SGX_CORE_IDENTIFIED)
++ #error "sgxconfig.h: ERROR: unspecified SGX Core version"
++#endif
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/sgxinfokm.h b/drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/sgxinfokm.h
+new file mode 100644
+index 0000000..1ddd709
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/sgxinfokm.h
+@@ -0,0 +1,352 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __SGXINFOKM_H__
++#define __SGXINFOKM_H__
++
++#include "sgxdefs.h"
++#include "device.h"
++#include "power.h"
++#include "sysconfig.h"
++#include "sgxscript.h"
++#include "sgxinfo.h"
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++#define SGX_HOSTPORT_PRESENT 0x00000001UL
++
++
++typedef struct _PVRSRV_STUB_PBDESC_ PVRSRV_STUB_PBDESC;
++
++
++typedef struct _PVRSRV_SGX_CCB_INFO_ *PPVRSRV_SGX_CCB_INFO;
++
++typedef struct _PVRSRV_SGXDEV_INFO_
++{
++ PVRSRV_DEVICE_TYPE eDeviceType;
++ PVRSRV_DEVICE_CLASS eDeviceClass;
++
++ IMG_UINT8 ui8VersionMajor;
++ IMG_UINT8 ui8VersionMinor;
++ IMG_UINT32 ui32CoreConfig;
++ IMG_UINT32 ui32CoreFlags;
++
++
++ IMG_PVOID pvRegsBaseKM;
++
++#if defined(SGX_FEATURE_HOST_PORT)
++
++ IMG_PVOID pvHostPortBaseKM;
++
++ IMG_UINT32 ui32HPSize;
++
++ IMG_SYS_PHYADDR sHPSysPAddr;
++#endif
++
++
++ IMG_HANDLE hRegMapping;
++
++
++ IMG_SYS_PHYADDR sRegsPhysBase;
++
++ IMG_UINT32 ui32RegSize;
++
++#if defined(SUPPORT_EXTERNAL_SYSTEM_CACHE)
++
++ IMG_UINT32 ui32ExtSysCacheRegsSize;
++
++ IMG_DEV_PHYADDR sExtSysCacheRegsDevPBase;
++
++ IMG_UINT32 *pui32ExtSystemCacheRegsPT;
++
++ IMG_HANDLE hExtSystemCacheRegsPTPageOSMemHandle;
++
++ IMG_SYS_PHYADDR sExtSystemCacheRegsPTSysPAddr;
++#endif
++
++
++ IMG_UINT32 ui32CoreClockSpeed;
++ IMG_UINT32 ui32uKernelTimerClock;
++
++ PVRSRV_STUB_PBDESC *psStubPBDescListKM;
++
++
++
++ IMG_DEV_PHYADDR sKernelPDDevPAddr;
++
++ IMG_VOID *pvDeviceMemoryHeap;
++ PPVRSRV_KERNEL_MEM_INFO psKernelCCBMemInfo;
++ PVRSRV_SGX_KERNEL_CCB *psKernelCCB;
++ PPVRSRV_SGX_CCB_INFO psKernelCCBInfo;
++ PPVRSRV_KERNEL_MEM_INFO psKernelCCBCtlMemInfo;
++ PVRSRV_SGX_CCB_CTL *psKernelCCBCtl;
++ PPVRSRV_KERNEL_MEM_INFO psKernelCCBEventKickerMemInfo;
++ IMG_UINT32 *pui32KernelCCBEventKicker;
++#if defined(PDUMP)
++ IMG_UINT32 ui32KernelCCBEventKickerDumpVal;
++#endif
++ PVRSRV_KERNEL_MEM_INFO *psKernelSGXMiscMemInfo;
++ IMG_UINT32 aui32HostKickAddr[SGXMKIF_CMD_MAX];
++#if defined(SGX_SUPPORT_HWPROFILING)
++ PPVRSRV_KERNEL_MEM_INFO psKernelHWProfilingMemInfo;
++#endif
++ IMG_UINT32 ui32KickTACounter;
++ IMG_UINT32 ui32KickTARenderCounter;
++#if defined(SUPPORT_SGX_HWPERF)
++ PPVRSRV_KERNEL_MEM_INFO psKernelHWPerfCBMemInfo;
++ IMG_UINT32 ui32HWGroupRequested;
++ IMG_UINT32 ui32HWReset;
++#endif
++#ifdef PVRSRV_USSE_EDM_STATUS_DEBUG
++ PPVRSRV_KERNEL_MEM_INFO psKernelEDMStatusBufferMemInfo;
++#endif
++#if defined(SGX_FEATURE_OVERLAPPED_SPM)
++ PPVRSRV_KERNEL_MEM_INFO psKernelTmpRgnHeaderMemInfo;
++#endif
++#if defined(SGX_FEATURE_SPM_MODE_0)
++ PPVRSRV_KERNEL_MEM_INFO psKernelTmpDPMStateMemInfo;
++#endif
++
++
++ IMG_UINT32 ui32ClientRefCount;
++
++
++ IMG_UINT32 ui32CacheControl;
++
++
++ IMG_UINT32 ui32ClientBuildOptions;
++
++
++ SGX_MISCINFO_STRUCT_SIZES sSGXStructSizes;
++
++
++
++
++ IMG_VOID *pvMMUContextList;
++
++
++ IMG_BOOL bForcePTOff;
++
++ IMG_UINT32 ui32EDMTaskReg0;
++ IMG_UINT32 ui32EDMTaskReg1;
++
++ IMG_UINT32 ui32ClkGateStatusReg;
++ IMG_UINT32 ui32ClkGateStatusMask;
++#if defined(SGX_FEATURE_MP)
++ IMG_UINT32 ui32MasterClkGateStatusReg;
++ IMG_UINT32 ui32MasterClkGateStatusMask;
++#endif
++ SGX_INIT_SCRIPTS sScripts;
++
++
++ IMG_HANDLE hBIFResetPDOSMemHandle;
++ IMG_DEV_PHYADDR sBIFResetPDDevPAddr;
++ IMG_DEV_PHYADDR sBIFResetPTDevPAddr;
++ IMG_DEV_PHYADDR sBIFResetPageDevPAddr;
++ IMG_UINT32 *pui32BIFResetPD;
++ IMG_UINT32 *pui32BIFResetPT;
++
++#if defined(FIX_HW_BRN_22997) && defined(FIX_HW_BRN_23030) && defined(SGX_FEATURE_HOST_PORT)
++
++ IMG_HANDLE hBRN22997PTPageOSMemHandle;
++ IMG_HANDLE hBRN22997PDPageOSMemHandle;
++ IMG_DEV_PHYADDR sBRN22997PTDevPAddr;
++ IMG_DEV_PHYADDR sBRN22997PDDevPAddr;
++ IMG_UINT32 *pui32BRN22997PT;
++ IMG_UINT32 *pui32BRN22997PD;
++ IMG_SYS_PHYADDR sBRN22997SysPAddr;
++#endif
++
++#if defined(SUPPORT_HW_RECOVERY)
++
++ IMG_HANDLE hTimer;
++
++ IMG_UINT32 ui32TimeStamp;
++#endif
++
++
++ IMG_UINT32 ui32NumResets;
++
++
++ PVRSRV_KERNEL_MEM_INFO *psKernelSGXHostCtlMemInfo;
++ SGXMKIF_HOST_CTL *psSGXHostCtl;
++
++
++ PVRSRV_KERNEL_MEM_INFO *psKernelSGXTA3DCtlMemInfo;
++
++ IMG_UINT32 ui32Flags;
++
++ #if defined(PDUMP)
++ PVRSRV_SGX_PDUMP_CONTEXT sPDContext;
++ #endif
++
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++
++ IMG_VOID *pvDummyPTPageCpuVAddr;
++ IMG_DEV_PHYADDR sDummyPTDevPAddr;
++ IMG_HANDLE hDummyPTPageOSMemHandle;
++ IMG_VOID *pvDummyDataPageCpuVAddr;
++ IMG_DEV_PHYADDR sDummyDataDevPAddr;
++ IMG_HANDLE hDummyDataPageOSMemHandle;
++#endif
++
++ IMG_UINT32 asSGXDevData[SGX_MAX_DEV_DATA];
++
++} PVRSRV_SGXDEV_INFO;
++
++
++typedef struct _SGX_TIMING_INFORMATION_
++{
++ IMG_UINT32 ui32CoreClockSpeed;
++ IMG_UINT32 ui32HWRecoveryFreq;
++ IMG_BOOL bEnableActivePM;
++ IMG_UINT32 ui32ActivePowManLatencyms;
++ IMG_UINT32 ui32uKernelFreq;
++} SGX_TIMING_INFORMATION;
++
++typedef struct _SGX_DEVICE_MAP_
++{
++ IMG_UINT32 ui32Flags;
++
++
++ IMG_SYS_PHYADDR sRegsSysPBase;
++ IMG_CPU_PHYADDR sRegsCpuPBase;
++ IMG_CPU_VIRTADDR pvRegsCpuVBase;
++ IMG_UINT32 ui32RegsSize;
++
++#if defined(SGX_FEATURE_HOST_PORT)
++ IMG_SYS_PHYADDR sHPSysPBase;
++ IMG_CPU_PHYADDR sHPCpuPBase;
++ IMG_UINT32 ui32HPSize;
++#endif
++
++
++ IMG_SYS_PHYADDR sLocalMemSysPBase;
++ IMG_DEV_PHYADDR sLocalMemDevPBase;
++ IMG_CPU_PHYADDR sLocalMemCpuPBase;
++ IMG_UINT32 ui32LocalMemSize;
++
++#if defined(SUPPORT_EXTERNAL_SYSTEM_CACHE)
++ IMG_UINT32 ui32ExtSysCacheRegsSize;
++ IMG_DEV_PHYADDR sExtSysCacheRegsDevPBase;
++#endif
++
++
++ IMG_UINT32 ui32IRQ;
++
++#if !defined(SGX_DYNAMIC_TIMING_INFO)
++
++ SGX_TIMING_INFORMATION sTimingInfo;
++#endif
++} SGX_DEVICE_MAP;
++
++
++struct _PVRSRV_STUB_PBDESC_
++{
++ IMG_UINT32 ui32RefCount;
++ IMG_UINT32 ui32TotalPBSize;
++ PVRSRV_KERNEL_MEM_INFO *psSharedPBDescKernelMemInfo;
++ PVRSRV_KERNEL_MEM_INFO *psHWPBDescKernelMemInfo;
++ PVRSRV_KERNEL_MEM_INFO **ppsSubKernelMemInfos;
++ IMG_UINT32 ui32SubKernelMemInfosCount;
++ IMG_HANDLE hDevCookie;
++ PVRSRV_KERNEL_MEM_INFO *psBlockKernelMemInfo;
++ PVRSRV_KERNEL_MEM_INFO *psHWBlockKernelMemInfo;
++ PVRSRV_STUB_PBDESC *psNext;
++ PVRSRV_STUB_PBDESC **ppsThis;
++};
++
++typedef struct _PVRSRV_SGX_CCB_INFO_
++{
++ PVRSRV_KERNEL_MEM_INFO *psCCBMemInfo;
++ PVRSRV_KERNEL_MEM_INFO *psCCBCtlMemInfo;
++ SGXMKIF_COMMAND *psCommands;
++ IMG_UINT32 *pui32WriteOffset;
++ volatile IMG_UINT32 *pui32ReadOffset;
++#if defined(PDUMP)
++ IMG_UINT32 ui32CCBDumpWOff;
++#endif
++} PVRSRV_SGX_CCB_INFO;
++
++PVRSRV_ERROR SGXRegisterDevice (PVRSRV_DEVICE_NODE *psDeviceNode);
++
++IMG_VOID SGXOSTimer(IMG_VOID *pvData);
++
++IMG_VOID SGXReset(PVRSRV_SGXDEV_INFO *psDevInfo,
++ IMG_UINT32 ui32PDUMPFlags);
++
++PVRSRV_ERROR SGXInitialise(PVRSRV_SGXDEV_INFO *psDevInfo);
++PVRSRV_ERROR SGXDeinitialise(IMG_HANDLE hDevCookie);
++
++PVRSRV_ERROR SGXPrePowerState(IMG_HANDLE hDevHandle,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
++
++PVRSRV_ERROR SGXPostPowerState(IMG_HANDLE hDevHandle,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
++
++PVRSRV_ERROR SGXPreClockSpeedChange(IMG_HANDLE hDevHandle,
++ IMG_BOOL bIdleDevice,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
++
++PVRSRV_ERROR SGXPostClockSpeedChange(IMG_HANDLE hDevHandle,
++ IMG_BOOL bIdleDevice,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
++
++IMG_VOID SGXPanic(PVRSRV_DEVICE_NODE *psDeviceNode);
++
++PVRSRV_ERROR SGXDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode);
++
++#if defined(SGX_DYNAMIC_TIMING_INFO)
++IMG_VOID SysGetSGXTimingInformation(SGX_TIMING_INFORMATION *psSGXTimingInfo);
++#endif
++
++#if defined(NO_HARDWARE)
++static INLINE IMG_VOID NoHardwareGenerateEvent(PVRSRV_SGXDEV_INFO *psDevInfo,
++ IMG_UINT32 ui32StatusRegister,
++ IMG_UINT32 ui32StatusValue,
++ IMG_UINT32 ui32StatusMask)
++{
++ IMG_UINT32 ui32RegVal;
++
++ ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, ui32StatusRegister);
++
++ ui32RegVal &= ~ui32StatusMask;
++ ui32RegVal |= (ui32StatusValue & ui32StatusMask);
++
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, ui32StatusRegister, ui32RegVal);
++}
++#endif
++
++#if defined(__cplusplus)
++}
++#endif
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/sgxinit.c b/drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/sgxinit.c
+new file mode 100644
+index 0000000..d8f6aef
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/sgxinit.c
+@@ -0,0 +1,2218 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include <stddef.h>
++
++#include "sgxdefs.h"
++#include "sgxmmu.h"
++#include "services_headers.h"
++#include "buffer_manager.h"
++#include "sgxapi_km.h"
++#include "sgxinfo.h"
++#include "sgx_mkif_km.h"
++#include "sgxconfig.h"
++#include "sysconfig.h"
++#include "pvr_bridge_km.h"
++
++#include "sgx_bridge_km.h"
++
++#include "pdump_km.h"
++#include "ra.h"
++#include "mmu.h"
++#include "handle.h"
++#include "perproc.h"
++
++#include "sgxutils.h"
++#include "pvrversion.h"
++#include "sgx_options.h"
++
++#include "lists.h"
++#include "srvkm.h"
++
++DECLARE_LIST_ANY_VA(PVRSRV_POWER_DEV);
++
++#if defined(SUPPORT_SGX_HWPERF)
++IMG_VOID* MatchPowerDeviceIndex_AnyVaCb(PVRSRV_POWER_DEV *psPowerDev, va_list va);
++#endif
++
++#define VAR(x) #x
++
++#define CHECK_SIZE(NAME) \
++{ \
++ if (psSGXStructSizes->ui32Sizeof_##NAME != psDevInfo->sSGXStructSizes.ui32Sizeof_##NAME) \
++ { \
++ PVR_DPF((PVR_DBG_ERROR, "SGXDevInitCompatCheck: Size check failed for SGXMKIF_%s (client) = %d bytes, (ukernel) = %d bytes\n", \
++ VAR(NAME), \
++ psDevInfo->sSGXStructSizes.ui32Sizeof_##NAME, \
++ psSGXStructSizes->ui32Sizeof_##NAME )); \
++ bStructSizesFailed = IMG_TRUE; \
++ } \
++}
++
++#if defined (SYS_USING_INTERRUPTS)
++IMG_BOOL SGX_ISRHandler(IMG_VOID *pvData);
++#endif
++
++IMG_UINT32 gui32EventStatusServicesByISR = 0;
++
++
++static
++PVRSRV_ERROR SGXGetMiscInfoUkernel(PVRSRV_SGXDEV_INFO *psDevInfo,
++ PVRSRV_DEVICE_NODE *psDeviceNode);
++
++
++static IMG_VOID SGXCommandComplete(PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++#if defined(OS_SUPPORTS_IN_LISR)
++ if (OSInLISR(psDeviceNode->psSysData))
++ {
++
++ psDeviceNode->bReProcessDeviceCommandComplete = IMG_TRUE;
++ }
++ else
++ {
++ SGXScheduleProcessQueuesKM(psDeviceNode);
++ }
++#else
++ SGXScheduleProcessQueuesKM(psDeviceNode);
++#endif
++}
++
++static IMG_UINT32 DeinitDevInfo(PVRSRV_SGXDEV_INFO *psDevInfo)
++{
++ if (psDevInfo->psKernelCCBInfo != IMG_NULL)
++ {
++
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_SGX_CCB_INFO), psDevInfo->psKernelCCBInfo, IMG_NULL);
++ }
++
++ return PVRSRV_OK;
++}
++
++static PVRSRV_ERROR InitDevInfo(PVRSRV_PER_PROCESS_DATA *psPerProc,
++ PVRSRV_DEVICE_NODE *psDeviceNode,
++ SGX_BRIDGE_INIT_INFO *psInitInfo)
++{
++ PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO *)psDeviceNode->pvDevice;
++ PVRSRV_ERROR eError;
++
++ PVRSRV_SGX_CCB_INFO *psKernelCCBInfo = IMG_NULL;
++
++ PVR_UNREFERENCED_PARAMETER(psPerProc);
++ psDevInfo->sScripts = psInitInfo->sScripts;
++
++ psDevInfo->psKernelCCBMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelCCBMemInfo;
++ psDevInfo->psKernelCCB = (PVRSRV_SGX_KERNEL_CCB *) psDevInfo->psKernelCCBMemInfo->pvLinAddrKM;
++
++ psDevInfo->psKernelCCBCtlMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelCCBCtlMemInfo;
++ psDevInfo->psKernelCCBCtl = (PVRSRV_SGX_CCB_CTL *) psDevInfo->psKernelCCBCtlMemInfo->pvLinAddrKM;
++
++ psDevInfo->psKernelCCBEventKickerMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelCCBEventKickerMemInfo;
++ psDevInfo->pui32KernelCCBEventKicker = (IMG_UINT32 *)psDevInfo->psKernelCCBEventKickerMemInfo->pvLinAddrKM;
++
++ psDevInfo->psKernelSGXHostCtlMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelSGXHostCtlMemInfo;
++ psDevInfo->psSGXHostCtl = (SGXMKIF_HOST_CTL *)psDevInfo->psKernelSGXHostCtlMemInfo->pvLinAddrKM;
++
++ psDevInfo->psKernelSGXTA3DCtlMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelSGXTA3DCtlMemInfo;
++
++ psDevInfo->psKernelSGXMiscMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelSGXMiscMemInfo;
++
++#if defined(SGX_SUPPORT_HWPROFILING)
++ psDevInfo->psKernelHWProfilingMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelHWProfilingMemInfo;
++#endif
++#if defined(SUPPORT_SGX_HWPERF)
++ psDevInfo->psKernelHWPerfCBMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelHWPerfCBMemInfo;
++#endif
++#ifdef PVRSRV_USSE_EDM_STATUS_DEBUG
++ psDevInfo->psKernelEDMStatusBufferMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelEDMStatusBufferMemInfo;
++#endif
++#if defined(SGX_FEATURE_OVERLAPPED_SPM)
++ psDevInfo->psKernelTmpRgnHeaderMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelTmpRgnHeaderMemInfo;
++#endif
++#if defined(SGX_FEATURE_SPM_MODE_0)
++ psDevInfo->psKernelTmpDPMStateMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelTmpDPMStateMemInfo;
++#endif
++
++ psDevInfo->ui32ClientBuildOptions = psInitInfo->ui32ClientBuildOptions;
++
++
++ psDevInfo->sSGXStructSizes = psInitInfo->sSGXStructSizes;
++
++
++
++ eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(PVRSRV_SGX_CCB_INFO),
++ (IMG_VOID **)&psKernelCCBInfo, 0,
++ "SGX Circular Command Buffer Info");
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"InitDevInfo: Failed to alloc memory"));
++ goto failed_allockernelccb;
++ }
++
++
++ OSMemSet(psKernelCCBInfo, 0, sizeof(PVRSRV_SGX_CCB_INFO));
++ psKernelCCBInfo->psCCBMemInfo = psDevInfo->psKernelCCBMemInfo;
++ psKernelCCBInfo->psCCBCtlMemInfo = psDevInfo->psKernelCCBCtlMemInfo;
++ psKernelCCBInfo->psCommands = psDevInfo->psKernelCCB->asCommands;
++ psKernelCCBInfo->pui32WriteOffset = &psDevInfo->psKernelCCBCtl->ui32WriteOffset;
++ psKernelCCBInfo->pui32ReadOffset = &psDevInfo->psKernelCCBCtl->ui32ReadOffset;
++ psDevInfo->psKernelCCBInfo = psKernelCCBInfo;
++
++
++
++ OSMemCopy(psDevInfo->aui32HostKickAddr, psInitInfo->aui32HostKickAddr,
++ SGXMKIF_CMD_MAX * sizeof(psDevInfo->aui32HostKickAddr[0]));
++
++ psDevInfo->bForcePTOff = IMG_FALSE;
++
++ psDevInfo->ui32CacheControl = psInitInfo->ui32CacheControl;
++
++ psDevInfo->ui32EDMTaskReg0 = psInitInfo->ui32EDMTaskReg0;
++ psDevInfo->ui32EDMTaskReg1 = psInitInfo->ui32EDMTaskReg1;
++ psDevInfo->ui32ClkGateStatusReg = psInitInfo->ui32ClkGateStatusReg;
++ psDevInfo->ui32ClkGateStatusMask = psInitInfo->ui32ClkGateStatusMask;
++#if defined(SGX_FEATURE_MP)
++ psDevInfo->ui32MasterClkGateStatusReg = psInitInfo->ui32MasterClkGateStatusReg;
++ psDevInfo->ui32MasterClkGateStatusMask = psInitInfo->ui32MasterClkGateStatusMask;
++#endif
++
++
++
++ OSMemCopy(&psDevInfo->asSGXDevData, &psInitInfo->asInitDevData, sizeof(psDevInfo->asSGXDevData));
++
++ return PVRSRV_OK;
++
++failed_allockernelccb:
++ DeinitDevInfo(psDevInfo);
++
++ return eError;
++}
++
++
++
++
++static PVRSRV_ERROR SGXRunScript(PVRSRV_SGXDEV_INFO *psDevInfo, SGX_INIT_COMMAND *psScript, IMG_UINT32 ui32NumInitCommands)
++{
++ IMG_UINT32 ui32PC;
++ SGX_INIT_COMMAND *psComm;
++
++ for (ui32PC = 0, psComm = psScript;
++ ui32PC < ui32NumInitCommands;
++ ui32PC++, psComm++)
++ {
++ switch (psComm->eOp)
++ {
++ case SGX_INIT_OP_WRITE_HW_REG:
++ {
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, psComm->sWriteHWReg.ui32Offset, psComm->sWriteHWReg.ui32Value);
++ PDUMPREG(psComm->sWriteHWReg.ui32Offset, psComm->sWriteHWReg.ui32Value);
++ break;
++ }
++#if defined(PDUMP)
++ case SGX_INIT_OP_PDUMP_HW_REG:
++ {
++ PDUMPREG(psComm->sPDumpHWReg.ui32Offset, psComm->sPDumpHWReg.ui32Value);
++ break;
++ }
++#endif
++ case SGX_INIT_OP_HALT:
++ {
++ return PVRSRV_OK;
++ }
++ case SGX_INIT_OP_ILLEGAL:
++
++ default:
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SGXRunScript: PC %d: Illegal command: %d", ui32PC, psComm->eOp));
++ return PVRSRV_ERROR_GENERIC;
++ }
++ }
++
++ }
++
++ return PVRSRV_ERROR_GENERIC;
++}
++
++PVRSRV_ERROR SGXInitialise(PVRSRV_SGXDEV_INFO *psDevInfo)
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_KERNEL_MEM_INFO *psSGXHostCtlMemInfo = psDevInfo->psKernelSGXHostCtlMemInfo;
++ SGXMKIF_HOST_CTL *psSGXHostCtl = psSGXHostCtlMemInfo->pvLinAddrKM;
++#if defined(PDUMP)
++ static IMG_BOOL bFirstTime = IMG_TRUE;
++#endif
++
++
++
++ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "SGX initialisation script part 1\n");
++ eError = SGXRunScript(psDevInfo, psDevInfo->sScripts.asInitCommandsPart1, SGX_MAX_INIT_COMMANDS);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SGXInitialise: SGXRunScript (part 1) failed (%d)", eError));
++ return (PVRSRV_ERROR_GENERIC);
++ }
++ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "End of SGX initialisation script part 1\n");
++
++
++ SGXReset(psDevInfo, PDUMP_FLAGS_CONTINUOUS);
++
++#if defined(EUR_CR_POWER)
++#if defined(SGX531)
++
++
++
++
++
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_POWER, 1);
++ PDUMPREG(EUR_CR_POWER, 1);
++#else
++
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_POWER, 0);
++ PDUMPREG(EUR_CR_POWER, 0);
++#endif
++#endif
++
++
++ *psDevInfo->pui32KernelCCBEventKicker = 0;
++#if defined(PDUMP)
++ if (bFirstTime)
++ {
++ psDevInfo->ui32KernelCCBEventKickerDumpVal = 0;
++ PDUMPMEM(&psDevInfo->ui32KernelCCBEventKickerDumpVal,
++ psDevInfo->psKernelCCBEventKickerMemInfo, 0,
++ sizeof(*psDevInfo->pui32KernelCCBEventKicker), PDUMP_FLAGS_CONTINUOUS,
++ MAKEUNIQUETAG(psDevInfo->psKernelCCBEventKickerMemInfo));
++ }
++#endif
++
++
++
++ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "SGX initialisation script part 2\n");
++ eError = SGXRunScript(psDevInfo, psDevInfo->sScripts.asInitCommandsPart2, SGX_MAX_INIT_COMMANDS);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SGXInitialise: SGXRunScript (part 2) failed (%d)", eError));
++ return (PVRSRV_ERROR_GENERIC);
++ }
++ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "End of SGX initialisation script part 2\n");
++
++
++ psSGXHostCtl->ui32InitStatus = 0;
++#if defined(PDUMP)
++ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS,
++ "Reset the SGX microkernel initialisation status\n");
++ PDUMPMEM(IMG_NULL, psSGXHostCtlMemInfo,
++ offsetof(SGXMKIF_HOST_CTL, ui32InitStatus),
++ sizeof(IMG_UINT32), PDUMP_FLAGS_CONTINUOUS,
++ MAKEUNIQUETAG(psSGXHostCtlMemInfo));
++#endif
++
++ *psDevInfo->pui32KernelCCBEventKicker = (*psDevInfo->pui32KernelCCBEventKicker + 1) & 0xFF;
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM,
++ SGX_MP_CORE_SELECT(EUR_CR_EVENT_KICK, 0),
++ EUR_CR_EVENT_KICK_NOW_MASK);
++
++#if defined(PDUMP)
++
++
++
++
++
++
++ if (bFirstTime)
++ {
++ psDevInfo->ui32KernelCCBEventKickerDumpVal = 1;
++ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS,
++ "First increment of the SGX event kicker value\n");
++ PDUMPMEM(&psDevInfo->ui32KernelCCBEventKickerDumpVal,
++ psDevInfo->psKernelCCBEventKickerMemInfo,
++ 0,
++ sizeof(IMG_UINT32),
++ PDUMP_FLAGS_CONTINUOUS,
++ MAKEUNIQUETAG(psDevInfo->psKernelCCBEventKickerMemInfo));
++ PDUMPREG(SGX_MP_CORE_SELECT(EUR_CR_EVENT_KICK, 0), EUR_CR_EVENT_KICK_NOW_MASK);
++ bFirstTime = IMG_FALSE;
++ }
++#endif
++
++#if !defined(NO_HARDWARE)
++
++
++ if (PollForValueKM(&psSGXHostCtl->ui32InitStatus,
++ PVRSRV_USSE_EDM_INIT_COMPLETE,
++ PVRSRV_USSE_EDM_INIT_COMPLETE,
++ MAX_HW_TIME_US/WAIT_TRY_COUNT,
++ WAIT_TRY_COUNT) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "SGXInitialise: Wait for uKernel initialisation failed"));
++ PVR_DBG_BREAK;
++ return PVRSRV_ERROR_RETRY;
++ }
++#endif
++
++#if defined(PDUMP)
++ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS,
++ "Wait for the SGX microkernel initialisation to complete");
++ PDUMPMEMPOL(psSGXHostCtlMemInfo,
++ offsetof(SGXMKIF_HOST_CTL, ui32InitStatus),
++ PVRSRV_USSE_EDM_INIT_COMPLETE,
++ PVRSRV_USSE_EDM_INIT_COMPLETE,
++ PDUMP_POLL_OPERATOR_EQUAL,
++ PDUMP_FLAGS_CONTINUOUS,
++ MAKEUNIQUETAG(psSGXHostCtlMemInfo));
++#endif
++
++#if defined(FIX_HW_BRN_22997) && defined(FIX_HW_BRN_23030) && defined(SGX_FEATURE_HOST_PORT)
++
++
++
++ WorkaroundBRN22997ReadHostPort(psDevInfo);
++#endif
++
++ PVR_ASSERT(psDevInfo->psKernelCCBCtl->ui32ReadOffset == psDevInfo->psKernelCCBCtl->ui32WriteOffset);
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR SGXDeinitialise(IMG_HANDLE hDevCookie)
++
++{
++ PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO *) hDevCookie;
++ PVRSRV_ERROR eError;
++
++
++ if (psDevInfo->pvRegsBaseKM == IMG_NULL)
++ {
++ return PVRSRV_OK;
++ }
++
++ eError = SGXRunScript(psDevInfo, psDevInfo->sScripts.asDeinitCommands, SGX_MAX_DEINIT_COMMANDS);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SGXDeinitialise: SGXRunScript failed (%d)", eError));
++ return (PVRSRV_ERROR_GENERIC);
++ }
++
++ return PVRSRV_OK;
++}
++
++
++static PVRSRV_ERROR DevInitSGXPart1 (IMG_VOID *pvDeviceNode)
++{
++ PVRSRV_SGXDEV_INFO *psDevInfo;
++ IMG_HANDLE hKernelDevMemContext;
++ IMG_DEV_PHYADDR sPDDevPAddr;
++ IMG_UINT32 i;
++ PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)pvDeviceNode;
++ DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap = psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeap;
++ PVRSRV_ERROR eError;
++
++ PDUMPCOMMENT("SGX Initialisation Part 1");
++
++
++ PDUMPCOMMENT("SGX Core Version Information: %s", SGX_CORE_FRIENDLY_NAME);
++#ifdef SGX_CORE_REV
++ PDUMPCOMMENT("SGX Core Revision Information: %d", SGX_CORE_REV);
++#else
++ PDUMPCOMMENT("SGX Core Revision Information: head rtl");
++#endif
++
++ #if defined(SGX_FEATURE_SYSTEM_CACHE)
++ PDUMPCOMMENT("SGX System Level Cache is present\r\n");
++ #if defined(SGX_BYPASS_SYSTEM_CACHE)
++ PDUMPCOMMENT("SGX System Level Cache is bypassed\r\n");
++ #endif
++ #endif
++
++
++ if(OSAllocMem( PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(PVRSRV_SGXDEV_INFO),
++ (IMG_VOID **)&psDevInfo, IMG_NULL,
++ "SGX Device Info") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"DevInitSGXPart1 : Failed to alloc memory for DevInfo"));
++ return (PVRSRV_ERROR_OUT_OF_MEMORY);
++ }
++ OSMemSet (psDevInfo, 0, sizeof(PVRSRV_SGXDEV_INFO));
++
++
++ psDevInfo->eDeviceType = DEV_DEVICE_TYPE;
++ psDevInfo->eDeviceClass = DEV_DEVICE_CLASS;
++
++
++ psDeviceNode->pvDevice = (IMG_PVOID)psDevInfo;
++
++
++ psDevInfo->pvDeviceMemoryHeap = (IMG_VOID*)psDeviceMemoryHeap;
++
++
++ hKernelDevMemContext = BM_CreateContext(psDeviceNode,
++ &sPDDevPAddr,
++ IMG_NULL,
++ IMG_NULL);
++ if (hKernelDevMemContext == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"DevInitSGXPart1: Failed BM_CreateContext"));
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++ psDevInfo->sKernelPDDevPAddr = sPDDevPAddr;
++
++
++ for(i=0; i<psDeviceNode->sDevMemoryInfo.ui32HeapCount; i++)
++ {
++ IMG_HANDLE hDevMemHeap;
++
++ switch(psDeviceMemoryHeap[i].DevMemHeapType)
++ {
++ case DEVICE_MEMORY_HEAP_KERNEL:
++ case DEVICE_MEMORY_HEAP_SHARED:
++ case DEVICE_MEMORY_HEAP_SHARED_EXPORTED:
++ {
++ hDevMemHeap = BM_CreateHeap (hKernelDevMemContext,
++ &psDeviceMemoryHeap[i]);
++
++
++
++ psDeviceMemoryHeap[i].hDevMemHeap = hDevMemHeap;
++ break;
++ }
++ }
++ }
++
++ eError = MMU_BIFResetPDAlloc(psDevInfo);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"DevInitSGX : Failed to alloc memory for BIF reset"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ return PVRSRV_OK;
++}
++
++IMG_EXPORT
++PVRSRV_ERROR SGXGetInfoForSrvinitKM(IMG_HANDLE hDevHandle, SGX_BRIDGE_INFO_FOR_SRVINIT *psInitInfo)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ PVRSRV_SGXDEV_INFO *psDevInfo;
++ PVRSRV_ERROR eError;
++
++ PDUMPCOMMENT("SGXGetInfoForSrvinit");
++
++ psDeviceNode = (PVRSRV_DEVICE_NODE *)hDevHandle;
++ psDevInfo = (PVRSRV_SGXDEV_INFO *)psDeviceNode->pvDevice;
++
++ psInitInfo->sPDDevPAddr = psDevInfo->sKernelPDDevPAddr;
++
++ eError = PVRSRVGetDeviceMemHeapsKM(hDevHandle, &psInitInfo->asHeapInfo[0]);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SGXGetInfoForSrvinit: PVRSRVGetDeviceMemHeapsKM failed (%d)", eError));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ return eError;
++}
++
++IMG_EXPORT
++PVRSRV_ERROR DevInitSGXPart2KM (PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_HANDLE hDevHandle,
++ SGX_BRIDGE_INIT_INFO *psInitInfo)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ PVRSRV_SGXDEV_INFO *psDevInfo;
++ PVRSRV_ERROR eError;
++ SGX_DEVICE_MAP *psSGXDeviceMap;
++ PVRSRV_DEV_POWER_STATE eDefaultPowerState;
++
++ PDUMPCOMMENT("SGX Initialisation Part 2");
++
++ psDeviceNode = (PVRSRV_DEVICE_NODE *)hDevHandle;
++ psDevInfo = (PVRSRV_SGXDEV_INFO *)psDeviceNode->pvDevice;
++
++
++
++ eError = InitDevInfo(psPerProc, psDeviceNode, psInitInfo);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"DevInitSGXPart2KM: Failed to load EDM program"));
++ goto failed_init_dev_info;
++ }
++
++
++ eError = SysGetDeviceMemoryMap(PVRSRV_DEVICE_TYPE_SGX,
++ (IMG_VOID**)&psSGXDeviceMap);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"DevInitSGXPart2KM: Failed to get device memory map!"));
++ return PVRSRV_ERROR_INIT_FAILURE;
++ }
++
++
++ if (psSGXDeviceMap->pvRegsCpuVBase)
++ {
++ psDevInfo->pvRegsBaseKM = psSGXDeviceMap->pvRegsCpuVBase;
++ }
++ else
++ {
++
++ psDevInfo->pvRegsBaseKM = OSMapPhysToLin(psSGXDeviceMap->sRegsCpuPBase,
++ psSGXDeviceMap->ui32RegsSize,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++ if (!psDevInfo->pvRegsBaseKM)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"DevInitSGXPart2KM: Failed to map in regs\n"));
++ return PVRSRV_ERROR_BAD_MAPPING;
++ }
++ }
++ psDevInfo->ui32RegSize = psSGXDeviceMap->ui32RegsSize;
++ psDevInfo->sRegsPhysBase = psSGXDeviceMap->sRegsSysPBase;
++
++
++#if defined(SGX_FEATURE_HOST_PORT)
++ if (psSGXDeviceMap->ui32Flags & SGX_HOSTPORT_PRESENT)
++ {
++
++ psDevInfo->pvHostPortBaseKM = OSMapPhysToLin(psSGXDeviceMap->sHPCpuPBase,
++ psSGXDeviceMap->ui32HPSize,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++ if (!psDevInfo->pvHostPortBaseKM)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"DevInitSGXPart2KM: Failed to map in host port\n"));
++ return PVRSRV_ERROR_BAD_MAPPING;
++ }
++ psDevInfo->ui32HPSize = psSGXDeviceMap->ui32HPSize;
++ psDevInfo->sHPSysPAddr = psSGXDeviceMap->sHPSysPBase;
++ }
++#endif
++
++#if defined (SYS_USING_INTERRUPTS)
++
++
++ psDeviceNode->pvISRData = psDeviceNode;
++
++ PVR_ASSERT(psDeviceNode->pfnDeviceISR == SGX_ISRHandler);
++
++#endif
++
++
++ psDevInfo->psSGXHostCtl->ui32PowerStatus |= PVRSRV_USSE_EDM_POWMAN_NO_WORK;
++ eDefaultPowerState = PVRSRV_DEV_POWER_STATE_OFF;
++
++ eError = PVRSRVRegisterPowerDevice (psDeviceNode->sDevId.ui32DeviceIndex,
++ SGXPrePowerState, SGXPostPowerState,
++ SGXPreClockSpeedChange, SGXPostClockSpeedChange,
++ (IMG_HANDLE)psDeviceNode,
++ PVRSRV_DEV_POWER_STATE_OFF,
++ eDefaultPowerState);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"DevInitSGXPart2KM: failed to register device with power manager"));
++ return eError;
++ }
++
++#if defined(FIX_HW_BRN_22997) && defined(FIX_HW_BRN_23030) && defined(SGX_FEATURE_HOST_PORT)
++ eError = WorkaroundBRN22997Alloc(psDevInfo);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SGXInitialise : Failed to alloc memory for BRN22997 workaround"));
++ return eError;
++ }
++#endif
++
++#if defined(SUPPORT_EXTERNAL_SYSTEM_CACHE)
++
++ psDevInfo->ui32ExtSysCacheRegsSize = psSGXDeviceMap->ui32ExtSysCacheRegsSize;
++ psDevInfo->sExtSysCacheRegsDevPBase = psSGXDeviceMap->sExtSysCacheRegsDevPBase;
++ eError = MMU_MapExtSystemCacheRegs(psDeviceNode);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SGXInitialise : Failed to map external system cache registers"));
++ return eError;
++ }
++#endif
++
++
++
++ OSMemSet(psDevInfo->psKernelCCB, 0, sizeof(PVRSRV_SGX_KERNEL_CCB));
++ OSMemSet(psDevInfo->psKernelCCBCtl, 0, sizeof(PVRSRV_SGX_CCB_CTL));
++ OSMemSet(psDevInfo->pui32KernelCCBEventKicker, 0, sizeof(*psDevInfo->pui32KernelCCBEventKicker));
++ PDUMPCOMMENT("Initialise Kernel CCB");
++ PDUMPMEM(IMG_NULL, psDevInfo->psKernelCCBMemInfo, 0, sizeof(PVRSRV_SGX_KERNEL_CCB), PDUMP_FLAGS_CONTINUOUS, MAKEUNIQUETAG(psDevInfo->psKernelCCBMemInfo));
++ PDUMPCOMMENT("Initialise Kernel CCB Control");
++ PDUMPMEM(IMG_NULL, psDevInfo->psKernelCCBCtlMemInfo, 0, sizeof(PVRSRV_SGX_CCB_CTL), PDUMP_FLAGS_CONTINUOUS, MAKEUNIQUETAG(psDevInfo->psKernelCCBCtlMemInfo));
++ PDUMPCOMMENT("Initialise Kernel CCB Event Kicker");
++ PDUMPMEM(IMG_NULL, psDevInfo->psKernelCCBEventKickerMemInfo, 0, sizeof(*psDevInfo->pui32KernelCCBEventKicker), PDUMP_FLAGS_CONTINUOUS, MAKEUNIQUETAG(psDevInfo->psKernelCCBEventKickerMemInfo));
++
++ return PVRSRV_OK;
++
++failed_init_dev_info:
++ return eError;
++}
++
++static PVRSRV_ERROR DevDeInitSGX (IMG_VOID *pvDeviceNode)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)pvDeviceNode;
++ PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO*)psDeviceNode->pvDevice;
++ PVRSRV_ERROR eError;
++ IMG_UINT32 ui32Heap;
++ DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap;
++ SGX_DEVICE_MAP *psSGXDeviceMap;
++
++ if (!psDevInfo)
++ {
++
++ PVR_DPF((PVR_DBG_ERROR,"DevDeInitSGX: Null DevInfo"));
++ return PVRSRV_OK;
++ }
++
++#if defined(SUPPORT_HW_RECOVERY)
++ if (psDevInfo->hTimer)
++ {
++ eError = OSRemoveTimer(psDevInfo->hTimer);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"DevDeInitSGX: Failed to remove timer"));
++ return eError;
++ }
++ psDevInfo->hTimer = IMG_NULL;
++ }
++#endif
++
++#if defined(SUPPORT_EXTERNAL_SYSTEM_CACHE)
++
++ eError = MMU_UnmapExtSystemCacheRegs(psDeviceNode);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"DevDeInitSGX: Failed to unmap ext system cache registers"));
++ return eError;
++ }
++#endif
++
++#if defined(FIX_HW_BRN_22997) && defined(FIX_HW_BRN_23030) && defined(SGX_FEATURE_HOST_PORT)
++ WorkaroundBRN22997Free(psDevInfo);
++#endif
++
++ MMU_BIFResetPDFree(psDevInfo);
++
++
++
++
++ DeinitDevInfo(psDevInfo);
++
++
++ psDeviceMemoryHeap = (DEVICE_MEMORY_HEAP_INFO *)psDevInfo->pvDeviceMemoryHeap;
++ for(ui32Heap=0; ui32Heap<psDeviceNode->sDevMemoryInfo.ui32HeapCount; ui32Heap++)
++ {
++ switch(psDeviceMemoryHeap[ui32Heap].DevMemHeapType)
++ {
++ case DEVICE_MEMORY_HEAP_KERNEL:
++ case DEVICE_MEMORY_HEAP_SHARED:
++ case DEVICE_MEMORY_HEAP_SHARED_EXPORTED:
++ {
++ if (psDeviceMemoryHeap[ui32Heap].hDevMemHeap != IMG_NULL)
++ {
++ BM_DestroyHeap(psDeviceMemoryHeap[ui32Heap].hDevMemHeap);
++ }
++ break;
++ }
++ }
++ }
++
++
++ eError = BM_DestroyContext(psDeviceNode->sDevMemoryInfo.pBMKernelContext, IMG_NULL);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"DevDeInitSGX : Failed to destroy kernel context"));
++ return eError;
++ }
++
++
++ eError = PVRSRVRemovePowerDevice (((PVRSRV_DEVICE_NODE*)pvDeviceNode)->sDevId.ui32DeviceIndex);
++ if (eError != PVRSRV_OK)
++ {
++ return eError;
++ }
++
++ eError = SysGetDeviceMemoryMap(PVRSRV_DEVICE_TYPE_SGX,
++ (IMG_VOID**)&psSGXDeviceMap);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"DevDeInitSGX: Failed to get device memory map!"));
++ return eError;
++ }
++
++
++ if (!psSGXDeviceMap->pvRegsCpuVBase)
++ {
++
++ if (psDevInfo->pvRegsBaseKM != IMG_NULL)
++ {
++ OSUnMapPhysToLin(psDevInfo->pvRegsBaseKM,
++ psDevInfo->ui32RegSize,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++ }
++ }
++
++#if defined(SGX_FEATURE_HOST_PORT)
++ if (psSGXDeviceMap->ui32Flags & SGX_HOSTPORT_PRESENT)
++ {
++
++ if (psDevInfo->pvHostPortBaseKM != IMG_NULL)
++ {
++ OSUnMapPhysToLin(psDevInfo->pvHostPortBaseKM,
++ psDevInfo->ui32HPSize,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++ }
++ }
++#endif
++
++
++
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(PVRSRV_SGXDEV_INFO),
++ psDevInfo,
++ 0);
++
++ psDeviceNode->pvDevice = IMG_NULL;
++
++ if (psDeviceMemoryHeap != IMG_NULL)
++ {
++
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(DEVICE_MEMORY_HEAP_INFO) * SGX_MAX_HEAP_ID,
++ psDeviceMemoryHeap,
++ 0);
++ }
++
++ return PVRSRV_OK;
++}
++
++
++IMG_VOID SGXDumpDebugInfo (PVRSRV_DEVICE_NODE *psDeviceNode,
++ IMG_BOOL bDumpSGXRegs)
++{
++ IMG_UINT ui32RegVal;
++ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
++
++ if (bDumpSGXRegs)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SGX Register Base Address (Linear): 0x%08X", psDevInfo->pvRegsBaseKM));
++ PVR_DPF((PVR_DBG_ERROR,"SGX Register Base Address (Physical): 0x%08X", psDevInfo->sRegsPhysBase));
++
++
++
++
++ ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_STATUS);
++ if (ui32RegVal & (EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_GBL_MASK | EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_MT_MASK))
++ {
++ PVR_LOG(("DPM out of memory!!"));
++ }
++ PVR_LOG(("EUR_CR_EVENT_STATUS: %x", ui32RegVal));
++
++ ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_STATUS2);
++ PVR_LOG(("EUR_CR_EVENT_STATUS2: %x", ui32RegVal));
++
++ ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL);
++ PVR_LOG(("EUR_CR_BIF_CTRL: %x", ui32RegVal));
++
++ #if defined(EUR_CR_BIF_BANK0)
++ ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_BANK0);
++ PVR_LOG(("EUR_CR_BIF_BANK0: %x", ui32RegVal));
++ #endif
++
++ ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_INT_STAT);
++ PVR_LOG(("EUR_CR_BIF_INT_STAT: %x", ui32RegVal));
++
++ ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_FAULT);
++ PVR_LOG(("EUR_CR_BIF_FAULT: %x", ui32RegVal));
++
++ ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_MEM_REQ_STAT);
++ PVR_LOG(("EUR_CR_BIF_MEM_REQ_STAT: %x", ui32RegVal));
++
++ ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_CLKGATECTL);
++ PVR_LOG(("EUR_CR_CLKGATECTL: %x", ui32RegVal));
++
++ #if defined(EUR_CR_PDS_PC_BASE)
++ ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_PDS_PC_BASE);
++ PVR_LOG(("EUR_CR_PDS_PC_BASE: %x", ui32RegVal));
++ #endif
++
++
++ }
++
++ #if defined(PVRSRV_USSE_EDM_STATUS_DEBUG)
++ {
++ IMG_UINT32 *pui32MKTraceBuffer = psDevInfo->psKernelEDMStatusBufferMemInfo->pvLinAddrKM;
++ IMG_UINT32 ui32LastStatusCode, ui32WriteOffset;
++
++ ui32LastStatusCode = *pui32MKTraceBuffer;
++ pui32MKTraceBuffer++;
++ ui32WriteOffset = *pui32MKTraceBuffer;
++ pui32MKTraceBuffer++;
++
++ PVR_LOG(("Last SGX microkernel status code: 0x%x", ui32LastStatusCode));
++
++ #if defined(PVRSRV_DUMP_MK_TRACE)
++
++
++ {
++ IMG_UINT32 ui32LoopCounter;
++
++ for (ui32LoopCounter = 0;
++ ui32LoopCounter < SGXMK_TRACE_BUFFER_SIZE;
++ ui32LoopCounter++)
++ {
++ IMG_UINT32 *pui32BufPtr;
++ pui32BufPtr = pui32MKTraceBuffer +
++ (((ui32WriteOffset + ui32LoopCounter) % SGXMK_TRACE_BUFFER_SIZE) * 4);
++ PVR_LOG(("(MKT%u) %08X %08X %08X %08X", ui32LoopCounter,
++ pui32BufPtr[2], pui32BufPtr[3], pui32BufPtr[1], pui32BufPtr[0]));
++ }
++ }
++ #endif
++ }
++ #endif
++
++ {
++
++
++ IMG_UINT32 *pui32HostCtlBuffer = (IMG_UINT32 *)psDevInfo->psSGXHostCtl;
++ IMG_UINT32 ui32LoopCounter;
++
++ PVR_LOG(("SGX Host control:"));
++
++ for (ui32LoopCounter = 0;
++ ui32LoopCounter < sizeof(*psDevInfo->psSGXHostCtl) / sizeof(*pui32HostCtlBuffer);
++ ui32LoopCounter += 4)
++ {
++ PVR_LOG(("\t0x%X: 0x%08X 0x%08X 0x%08X 0x%08X", ui32LoopCounter * sizeof(*pui32HostCtlBuffer),
++ pui32HostCtlBuffer[ui32LoopCounter + 0], pui32HostCtlBuffer[ui32LoopCounter + 1],
++ pui32HostCtlBuffer[ui32LoopCounter + 2], pui32HostCtlBuffer[ui32LoopCounter + 3]));
++ }
++ }
++
++ {
++
++
++ IMG_UINT32 *pui32TA3DCtlBuffer = psDevInfo->psKernelSGXTA3DCtlMemInfo->pvLinAddrKM;
++ IMG_UINT32 ui32LoopCounter;
++
++ PVR_LOG(("SGX TA/3D control:"));
++
++ for (ui32LoopCounter = 0;
++ ui32LoopCounter < psDevInfo->psKernelSGXTA3DCtlMemInfo->ui32AllocSize / sizeof(*pui32TA3DCtlBuffer);
++ ui32LoopCounter += 4)
++ {
++ PVR_LOG(("\t0x%X: 0x%08X 0x%08X 0x%08X 0x%08X", ui32LoopCounter * sizeof(*pui32TA3DCtlBuffer),
++ pui32TA3DCtlBuffer[ui32LoopCounter + 0], pui32TA3DCtlBuffer[ui32LoopCounter + 1],
++ pui32TA3DCtlBuffer[ui32LoopCounter + 2], pui32TA3DCtlBuffer[ui32LoopCounter + 3]));
++ }
++ }
++
++ QueueDumpDebugInfo();
++}
++
++
++#if defined(SYS_USING_INTERRUPTS) || defined(SUPPORT_HW_RECOVERY)
++static
++IMG_VOID HWRecoveryResetSGX (PVRSRV_DEVICE_NODE *psDeviceNode,
++ IMG_UINT32 ui32Component,
++ IMG_UINT32 ui32CallerID)
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO*)psDeviceNode->pvDevice;
++ SGXMKIF_HOST_CTL *psSGXHostCtl = (SGXMKIF_HOST_CTL *)psDevInfo->psSGXHostCtl;
++
++ PVR_UNREFERENCED_PARAMETER(ui32Component);
++
++
++
++ eError = PVRSRVPowerLock(ui32CallerID, IMG_FALSE);
++ if(eError != PVRSRV_OK)
++ {
++
++
++
++ PVR_DPF((PVR_DBG_WARNING,"HWRecoveryResetSGX: Power transition in progress"));
++ return;
++ }
++
++ psSGXHostCtl->ui32InterruptClearFlags |= PVRSRV_USSE_EDM_INTERRUPT_HWR;
++
++ PVR_LOG(("HWRecoveryResetSGX: SGX Hardware Recovery triggered"));
++
++ SGXDumpDebugInfo(psDeviceNode, IMG_TRUE);
++
++
++ PDUMPSUSPEND();
++
++
++ eError = SGXInitialise(psDevInfo);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"HWRecoveryResetSGX: SGXInitialise failed (%d)", eError));
++ }
++
++
++ PDUMPRESUME();
++
++ PVRSRVPowerUnlock(ui32CallerID);
++
++
++ SGXScheduleProcessQueuesKM(psDeviceNode);
++
++
++
++ PVRSRVProcessQueues(ui32CallerID, IMG_TRUE);
++}
++#endif
++
++
++#if defined(SUPPORT_HW_RECOVERY)
++IMG_VOID SGXOSTimer(IMG_VOID *pvData)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode = pvData;
++ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
++ static IMG_UINT32 ui32EDMTasks = 0;
++ static IMG_UINT32 ui32LockupCounter = 0;
++ static IMG_UINT32 ui32NumResets = 0;
++ IMG_UINT32 ui32CurrentEDMTasks;
++ IMG_BOOL bLockup = IMG_FALSE;
++ IMG_BOOL bPoweredDown;
++
++
++ psDevInfo->ui32TimeStamp++;
++
++#if defined(NO_HARDWARE)
++ bPoweredDown = IMG_TRUE;
++#else
++ bPoweredDown = SGXIsDevicePowered(psDeviceNode) ? IMG_FALSE : IMG_TRUE;
++#endif
++
++
++
++ if (bPoweredDown)
++ {
++ ui32LockupCounter = 0;
++ }
++ else
++ {
++
++ ui32CurrentEDMTasks = OSReadHWReg(psDevInfo->pvRegsBaseKM, psDevInfo->ui32EDMTaskReg0);
++ if (psDevInfo->ui32EDMTaskReg1 != 0)
++ {
++ ui32CurrentEDMTasks ^= OSReadHWReg(psDevInfo->pvRegsBaseKM, psDevInfo->ui32EDMTaskReg1);
++ }
++ if ((ui32CurrentEDMTasks == ui32EDMTasks) &&
++ (psDevInfo->ui32NumResets == ui32NumResets))
++ {
++ ui32LockupCounter++;
++ if (ui32LockupCounter == 3)
++ {
++ ui32LockupCounter = 0;
++ PVR_DPF((PVR_DBG_ERROR, "SGXOSTimer() detected SGX lockup (0x%x tasks)", ui32EDMTasks));
++
++ bLockup = IMG_TRUE;
++ }
++ }
++ else
++ {
++ ui32LockupCounter = 0;
++ ui32EDMTasks = ui32CurrentEDMTasks;
++ ui32NumResets = psDevInfo->ui32NumResets;
++ }
++ }
++
++ if (bLockup)
++ {
++ SGXMKIF_HOST_CTL *psSGXHostCtl = (SGXMKIF_HOST_CTL *)psDevInfo->psSGXHostCtl;
++
++
++ psSGXHostCtl->ui32HostDetectedLockups ++;
++
++
++ HWRecoveryResetSGX(psDeviceNode, 0, KERNEL_ID);
++ }
++}
++#endif
++
++
++#if defined(SYS_USING_INTERRUPTS)
++
++IMG_BOOL SGX_ISRHandler (IMG_VOID *pvData)
++{
++ IMG_BOOL bInterruptProcessed = IMG_FALSE;
++
++
++
++ {
++ IMG_UINT32 ui32EventStatus, ui32EventEnable;
++ IMG_UINT32 ui32EventClear = 0;
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ PVRSRV_SGXDEV_INFO *psDevInfo;
++
++
++ if(pvData == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "SGX_ISRHandler: Invalid params\n"));
++ return bInterruptProcessed;
++ }
++
++ psDeviceNode = (PVRSRV_DEVICE_NODE *)pvData;
++ psDevInfo = (PVRSRV_SGXDEV_INFO *)psDeviceNode->pvDevice;
++
++ ui32EventStatus = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_STATUS);
++ ui32EventEnable = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_HOST_ENABLE);
++
++
++
++ gui32EventStatusServicesByISR = ui32EventStatus;
++
++
++ ui32EventStatus &= ui32EventEnable;
++
++ if (ui32EventStatus & EUR_CR_EVENT_STATUS_SW_EVENT_MASK)
++ {
++ ui32EventClear |= EUR_CR_EVENT_HOST_CLEAR_SW_EVENT_MASK;
++ }
++
++ if (ui32EventClear)
++ {
++ bInterruptProcessed = IMG_TRUE;
++
++
++ ui32EventClear |= EUR_CR_EVENT_HOST_CLEAR_MASTER_INTERRUPT_MASK;
++
++
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_HOST_CLEAR, ui32EventClear);
++ }
++ }
++
++ return bInterruptProcessed;
++}
++
++
++IMG_VOID SGX_MISRHandler (IMG_VOID *pvData)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)pvData;
++ PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO*)psDeviceNode->pvDevice;
++ SGXMKIF_HOST_CTL *psSGXHostCtl = (SGXMKIF_HOST_CTL *)psDevInfo->psSGXHostCtl;
++
++ if (((psSGXHostCtl->ui32InterruptFlags & PVRSRV_USSE_EDM_INTERRUPT_HWR) != 0UL) &&
++ ((psSGXHostCtl->ui32InterruptClearFlags & PVRSRV_USSE_EDM_INTERRUPT_HWR) == 0UL))
++ {
++ HWRecoveryResetSGX(psDeviceNode, 0, ISR_ID);
++ }
++
++#if defined(OS_SUPPORTS_IN_LISR)
++ if (psDeviceNode->bReProcessDeviceCommandComplete)
++ {
++ SGXScheduleProcessQueuesKM(psDeviceNode);
++ }
++#endif
++
++ SGXTestActivePowerEvent(psDeviceNode, ISR_ID);
++}
++#endif
++
++
++PVRSRV_ERROR SGXRegisterDevice (PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++ DEVICE_MEMORY_INFO *psDevMemoryInfo;
++ DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap;
++
++
++ psDeviceNode->sDevId.eDeviceType = DEV_DEVICE_TYPE;
++ psDeviceNode->sDevId.eDeviceClass = DEV_DEVICE_CLASS;
++
++ psDeviceNode->pfnInitDevice = DevInitSGXPart1;
++ psDeviceNode->pfnDeInitDevice = DevDeInitSGX;
++
++ psDeviceNode->pfnInitDeviceCompatCheck = SGXDevInitCompatCheck;
++
++
++
++ psDeviceNode->pfnMMUInitialise = MMU_Initialise;
++ psDeviceNode->pfnMMUFinalise = MMU_Finalise;
++ psDeviceNode->pfnMMUInsertHeap = MMU_InsertHeap;
++ psDeviceNode->pfnMMUCreate = MMU_Create;
++ psDeviceNode->pfnMMUDelete = MMU_Delete;
++ psDeviceNode->pfnMMUAlloc = MMU_Alloc;
++ psDeviceNode->pfnMMUFree = MMU_Free;
++ psDeviceNode->pfnMMUMapPages = MMU_MapPages;
++ psDeviceNode->pfnMMUMapShadow = MMU_MapShadow;
++ psDeviceNode->pfnMMUUnmapPages = MMU_UnmapPages;
++ psDeviceNode->pfnMMUMapScatter = MMU_MapScatter;
++ psDeviceNode->pfnMMUGetPhysPageAddr = MMU_GetPhysPageAddr;
++ psDeviceNode->pfnMMUGetPDDevPAddr = MMU_GetPDDevPAddr;
++
++#if defined (SYS_USING_INTERRUPTS)
++
++
++ psDeviceNode->pfnDeviceISR = SGX_ISRHandler;
++ psDeviceNode->pfnDeviceMISR = SGX_MISRHandler;
++#endif
++
++
++
++ psDeviceNode->pfnDeviceCommandComplete = SGXCommandComplete;
++
++
++
++ psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo;
++
++ psDevMemoryInfo->ui32AddressSpaceSizeLog2 = SGX_FEATURE_ADDRESS_SPACE_SIZE;
++
++
++ psDevMemoryInfo->ui32Flags = 0;
++
++
++ if(OSAllocMem( PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(DEVICE_MEMORY_HEAP_INFO) * SGX_MAX_HEAP_ID,
++ (IMG_VOID **)&psDevMemoryInfo->psDeviceMemoryHeap, 0,
++ "Array of Device Memory Heap Info") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SGXRegisterDevice : Failed to alloc memory for DEVICE_MEMORY_HEAP_INFO"));
++ return (PVRSRV_ERROR_OUT_OF_MEMORY);
++ }
++ OSMemSet(psDevMemoryInfo->psDeviceMemoryHeap, 0, sizeof(DEVICE_MEMORY_HEAP_INFO) * SGX_MAX_HEAP_ID);
++
++ psDeviceMemoryHeap = psDevMemoryInfo->psDeviceMemoryHeap;
++
++
++
++
++
++ psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_GENERAL_HEAP_ID);
++ psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_GENERAL_HEAP_BASE;
++ psDeviceMemoryHeap->ui32HeapSize = SGX_GENERAL_HEAP_SIZE;
++ psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE
++ | PVRSRV_MEM_RAM_BACKED_ALLOCATION
++ | PVRSRV_HAP_SINGLE_PROCESS;
++ psDeviceMemoryHeap->pszName = "General";
++ psDeviceMemoryHeap->pszBSName = "General BS";
++ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT;
++
++ psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE;
++#if !defined(SUPPORT_SGX_GENERAL_MAPPING_HEAP)
++
++ psDevMemoryInfo->ui32MappingHeapID = (IMG_UINT32)(psDeviceMemoryHeap - psDevMemoryInfo->psDeviceMemoryHeap);
++#endif
++ psDeviceMemoryHeap++;
++
++
++
++ psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_TADATA_HEAP_ID);
++ psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_TADATA_HEAP_BASE;
++ psDeviceMemoryHeap->ui32HeapSize = SGX_TADATA_HEAP_SIZE;
++ psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE
++ | PVRSRV_MEM_RAM_BACKED_ALLOCATION
++ | PVRSRV_HAP_MULTI_PROCESS;
++ psDeviceMemoryHeap->pszName = "TA Data";
++ psDeviceMemoryHeap->pszBSName = "TA Data BS";
++ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT;
++
++ psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE;
++ psDeviceMemoryHeap++;
++
++
++
++ psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_KERNEL_CODE_HEAP_ID);
++ psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_KERNEL_CODE_HEAP_BASE;
++ psDeviceMemoryHeap->ui32HeapSize = SGX_KERNEL_CODE_HEAP_SIZE;
++ psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE
++ | PVRSRV_MEM_RAM_BACKED_ALLOCATION
++ | PVRSRV_HAP_MULTI_PROCESS;
++ psDeviceMemoryHeap->pszName = "Kernel Code";
++ psDeviceMemoryHeap->pszBSName = "Kernel Code BS";
++ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_SHARED_EXPORTED;
++
++ psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE;
++ psDeviceMemoryHeap++;
++
++
++
++ psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_KERNEL_DATA_HEAP_ID);
++ psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_KERNEL_DATA_HEAP_BASE;
++ psDeviceMemoryHeap->ui32HeapSize = SGX_KERNEL_DATA_HEAP_SIZE;
++ psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE
++ | PVRSRV_MEM_RAM_BACKED_ALLOCATION
++ | PVRSRV_HAP_MULTI_PROCESS;
++ psDeviceMemoryHeap->pszName = "KernelData";
++ psDeviceMemoryHeap->pszBSName = "KernelData BS";
++ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_SHARED_EXPORTED;
++
++ psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE;
++ psDeviceMemoryHeap++;
++
++
++
++ psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_PIXELSHADER_HEAP_ID);
++ psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_PIXELSHADER_HEAP_BASE;
++ psDeviceMemoryHeap->ui32HeapSize = SGX_PIXELSHADER_HEAP_SIZE;
++ psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE
++ | PVRSRV_MEM_RAM_BACKED_ALLOCATION
++ | PVRSRV_HAP_SINGLE_PROCESS;
++ psDeviceMemoryHeap->pszName = "PixelShaderUSSE";
++ psDeviceMemoryHeap->pszBSName = "PixelShaderUSSE BS";
++ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT;
++
++ psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE;
++ psDeviceMemoryHeap++;
++
++
++
++ psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_VERTEXSHADER_HEAP_ID);
++ psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_VERTEXSHADER_HEAP_BASE;
++ psDeviceMemoryHeap->ui32HeapSize = SGX_VERTEXSHADER_HEAP_SIZE;
++ psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE
++ | PVRSRV_MEM_RAM_BACKED_ALLOCATION
++ | PVRSRV_HAP_SINGLE_PROCESS;
++ psDeviceMemoryHeap->pszName = "VertexShaderUSSE";
++ psDeviceMemoryHeap->pszBSName = "VertexShaderUSSE BS";
++ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT;
++
++ psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE;
++ psDeviceMemoryHeap++;
++
++
++
++ psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_PDSPIXEL_CODEDATA_HEAP_ID);
++ psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_PDSPIXEL_CODEDATA_HEAP_BASE;
++ psDeviceMemoryHeap->ui32HeapSize = SGX_PDSPIXEL_CODEDATA_HEAP_SIZE;
++ psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE
++ | PVRSRV_MEM_RAM_BACKED_ALLOCATION
++ | PVRSRV_HAP_SINGLE_PROCESS;
++ psDeviceMemoryHeap->pszName = "PDSPixelCodeData";
++ psDeviceMemoryHeap->pszBSName = "PDSPixelCodeData BS";
++ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT;
++
++ psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE;
++ psDeviceMemoryHeap++;
++
++
++
++ psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_PDSVERTEX_CODEDATA_HEAP_ID);
++ psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_PDSVERTEX_CODEDATA_HEAP_BASE;
++ psDeviceMemoryHeap->ui32HeapSize = SGX_PDSVERTEX_CODEDATA_HEAP_SIZE;
++ psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE
++ | PVRSRV_MEM_RAM_BACKED_ALLOCATION
++ | PVRSRV_HAP_SINGLE_PROCESS;
++ psDeviceMemoryHeap->pszName = "PDSVertexCodeData";
++ psDeviceMemoryHeap->pszBSName = "PDSVertexCodeData BS";
++ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT;
++
++ psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE;
++ psDeviceMemoryHeap++;
++
++
++
++ psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_SYNCINFO_HEAP_ID);
++ psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_SYNCINFO_HEAP_BASE;
++ psDeviceMemoryHeap->ui32HeapSize = SGX_SYNCINFO_HEAP_SIZE;
++ psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE
++ | PVRSRV_MEM_RAM_BACKED_ALLOCATION
++ | PVRSRV_HAP_MULTI_PROCESS;
++ psDeviceMemoryHeap->pszName = "CacheCoherent";
++ psDeviceMemoryHeap->pszBSName = "CacheCoherent BS";
++ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_SHARED_EXPORTED;
++
++ psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE;
++
++ psDevMemoryInfo->ui32SyncHeapID = (IMG_UINT32)(psDeviceMemoryHeap - psDevMemoryInfo->psDeviceMemoryHeap);
++ psDeviceMemoryHeap++;
++
++
++
++ psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_3DPARAMETERS_HEAP_ID);
++ psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_3DPARAMETERS_HEAP_BASE;
++ psDeviceMemoryHeap->ui32HeapSize = SGX_3DPARAMETERS_HEAP_SIZE;
++ psDeviceMemoryHeap->pszName = "3DParameters";
++ psDeviceMemoryHeap->pszBSName = "3DParameters BS";
++#if defined(SUPPORT_PERCONTEXT_PB)
++ psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE
++ | PVRSRV_MEM_RAM_BACKED_ALLOCATION
++ | PVRSRV_HAP_SINGLE_PROCESS;
++ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT;
++#else
++ psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE
++ | PVRSRV_MEM_RAM_BACKED_ALLOCATION
++ | PVRSRV_HAP_MULTI_PROCESS;
++ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_SHARED_EXPORTED;
++#endif
++
++ psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE;
++ psDeviceMemoryHeap++;
++
++
++#if defined(SUPPORT_SGX_GENERAL_MAPPING_HEAP)
++
++ psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_GENERAL_MAPPING_HEAP_ID);
++ psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_GENERAL_MAPPING_HEAP_BASE;
++ psDeviceMemoryHeap->ui32HeapSize = SGX_GENERAL_MAPPING_HEAP_SIZE;
++ psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_MULTI_PROCESS;
++ psDeviceMemoryHeap->pszName = "GeneralMapping";
++ psDeviceMemoryHeap->pszBSName = "GeneralMapping BS";
++ #if defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS) && defined(FIX_HW_BRN_23410)
++
++
++
++
++
++
++
++ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_SHARED_EXPORTED;
++#else
++ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT;
++#endif
++
++ psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE;
++
++ psDevMemoryInfo->ui32MappingHeapID = (IMG_UINT32)(psDeviceMemoryHeap - psDevMemoryInfo->psDeviceMemoryHeap);
++ psDeviceMemoryHeap++;
++#endif
++
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++
++ psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_2D_HEAP_ID);
++ psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_2D_HEAP_BASE;
++ psDeviceMemoryHeap->ui32HeapSize = SGX_2D_HEAP_SIZE;
++ psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE
++ | PVRSRV_MEM_RAM_BACKED_ALLOCATION
++ | PVRSRV_HAP_SINGLE_PROCESS;
++ psDeviceMemoryHeap->pszName = "2D";
++ psDeviceMemoryHeap->pszBSName = "2D BS";
++
++ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_SHARED_EXPORTED;
++
++ psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE;
++ psDeviceMemoryHeap++;
++#endif
++
++
++#if defined(FIX_HW_BRN_26915)
++
++
++ psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_CGBUFFER_HEAP_ID);
++ psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_CGBUFFER_HEAP_BASE;
++ psDeviceMemoryHeap->ui32HeapSize = SGX_CGBUFFER_HEAP_SIZE;
++ psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE
++ | PVRSRV_MEM_RAM_BACKED_ALLOCATION
++ | PVRSRV_HAP_SINGLE_PROCESS;
++ psDeviceMemoryHeap->pszName = "CGBuffer";
++ psDeviceMemoryHeap->pszBSName = "CGBuffer BS";
++
++ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT;
++
++ psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE;
++ psDeviceMemoryHeap++;
++#endif
++
++
++ psDevMemoryInfo->ui32HeapCount = (IMG_UINT32)(psDeviceMemoryHeap - psDevMemoryInfo->psDeviceMemoryHeap);
++
++ return PVRSRV_OK;
++}
++
++IMG_EXPORT
++PVRSRV_ERROR SGXGetClientInfoKM(IMG_HANDLE hDevCookie,
++ SGX_CLIENT_INFO* psClientInfo)
++{
++ PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO *)((PVRSRV_DEVICE_NODE *)hDevCookie)->pvDevice;
++
++
++
++ psDevInfo->ui32ClientRefCount++;
++
++#if defined(PDUMP)
++
++ psDevInfo->psKernelCCBInfo->ui32CCBDumpWOff = 0;
++#endif
++
++
++ psClientInfo->ui32ProcessID = OSGetCurrentProcessIDKM();
++
++
++
++ OSMemCopy(&psClientInfo->asDevData, &psDevInfo->asSGXDevData, sizeof(psClientInfo->asDevData));
++
++
++ return PVRSRV_OK;
++}
++
++
++IMG_VOID SGXPanic(PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++ PVR_LOG(("SGX panic"));
++ SGXDumpDebugInfo(psDeviceNode, IMG_FALSE);
++ OSPanic();
++}
++
++
++PVRSRV_ERROR SGXDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_SGXDEV_INFO *psDevInfo;
++ IMG_UINT32 ui32BuildOptions, ui32BuildOptionsMismatch;
++#if !defined(NO_HARDWARE)
++ PPVRSRV_KERNEL_MEM_INFO psMemInfo;
++ PVRSRV_SGX_MISCINFO_INFO *psSGXMiscInfoInt;
++ PVRSRV_SGX_MISCINFO_FEATURES *psSGXFeatures;
++ SGX_MISCINFO_STRUCT_SIZES *psSGXStructSizes;
++ IMG_BOOL bStructSizesFailed;
++
++
++ IMG_BOOL bCheckCoreRev;
++ const IMG_UINT32 aui32CoreRevExceptions[] = {
++ 0x10100, 0x10101
++ };
++ const IMG_UINT32 ui32NumCoreExceptions = sizeof(aui32CoreRevExceptions) / (2*sizeof(IMG_UINT32));
++ IMG_UINT i;
++#endif
++
++
++ if(psDeviceNode->sDevId.eDeviceType != PVRSRV_DEVICE_TYPE_SGX)
++ {
++ PVR_LOG(("(FAIL) SGXInit: Device not of type SGX"));
++ eError = PVRSRV_ERROR_INVALID_PARAMS;
++ goto chk_exit;
++ }
++
++ psDevInfo = psDeviceNode->pvDevice;
++
++
++
++ ui32BuildOptions = (SGX_BUILD_OPTIONS);
++ if (ui32BuildOptions != psDevInfo->ui32ClientBuildOptions)
++ {
++ ui32BuildOptionsMismatch = ui32BuildOptions ^ psDevInfo->ui32ClientBuildOptions;
++ if ( (psDevInfo->ui32ClientBuildOptions & ui32BuildOptionsMismatch) != 0)
++ {
++ PVR_LOG(("(FAIL) SGXInit: Mismatch in client-side and KM driver build options; "
++ "extra options present in client-side driver: (0x%lx). Please check sgx_options.h",
++ psDevInfo->ui32ClientBuildOptions & ui32BuildOptionsMismatch ));
++ }
++
++ if ( (ui32BuildOptions & ui32BuildOptionsMismatch) != 0)
++ {
++ PVR_LOG(("(FAIL) SGXInit: Mismatch in client-side and KM driver build options; "
++ "extra options present in KM: (0x%lx). Please check sgx_options.h",
++ ui32BuildOptions & ui32BuildOptionsMismatch ));
++ }
++ eError = PVRSRV_ERROR_BUILD_MISMATCH;
++ goto chk_exit;
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_MESSAGE, "SGXInit: Client-side and KM driver build options match. [ OK ]"));
++ }
++
++#if !defined (NO_HARDWARE)
++ psMemInfo = psDevInfo->psKernelSGXMiscMemInfo;
++
++
++ psSGXMiscInfoInt = psMemInfo->pvLinAddrKM;
++ psSGXMiscInfoInt->ui32MiscInfoFlags = 0;
++ psSGXMiscInfoInt->ui32MiscInfoFlags |= PVRSRV_USSE_MISCINFO_GET_STRUCT_SIZES;
++ eError = SGXGetMiscInfoUkernel(psDevInfo, psDeviceNode);
++
++
++ if(eError != PVRSRV_OK)
++ {
++ PVR_LOG(("(FAIL) SGXInit: Unable to validate device DDK version"));
++ goto chk_exit;
++ }
++ psSGXFeatures = &((PVRSRV_SGX_MISCINFO_INFO*)(psMemInfo->pvLinAddrKM))->sSGXFeatures;
++ if( (psSGXFeatures->ui32DDKVersion !=
++ ((PVRVERSION_MAJ << 16) |
++ (PVRVERSION_MIN << 8) |
++ PVRVERSION_BRANCH) ) ||
++ (psSGXFeatures->ui32DDKBuild != PVRVERSION_BUILD) )
++ {
++ PVR_LOG(("(FAIL) SGXInit: Incompatible driver DDK revision (%ld)/device DDK revision (%ld).",
++ PVRVERSION_BUILD, psSGXFeatures->ui32DDKBuild));
++ eError = PVRSRV_ERROR_DDK_VERSION_MISMATCH;
++ PVR_DBG_BREAK;
++ goto chk_exit;
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_MESSAGE, "SGXInit: driver DDK (%ld) and device DDK (%ld) match. [ OK ]",
++ PVRVERSION_BUILD, psSGXFeatures->ui32DDKBuild));
++ }
++
++
++ if (psSGXFeatures->ui32CoreRevSW == 0)
++ {
++
++
++ PVR_LOG(("SGXInit: HW core rev (%lx) check skipped.",
++ psSGXFeatures->ui32CoreRev));
++ }
++ else
++ {
++
++ bCheckCoreRev = IMG_TRUE;
++ for(i=0; i<ui32NumCoreExceptions; i+=2)
++ {
++ if( (psSGXFeatures->ui32CoreRev==aui32CoreRevExceptions[i]) &&
++ (psSGXFeatures->ui32CoreRevSW==aui32CoreRevExceptions[i+1]) )
++ {
++ PVR_LOG(("SGXInit: HW core rev (%lx), SW core rev (%lx) check skipped.",
++ psSGXFeatures->ui32CoreRev,
++ psSGXFeatures->ui32CoreRevSW));
++ bCheckCoreRev = IMG_FALSE;
++ }
++ }
++
++ if (bCheckCoreRev)
++ {
++ if (psSGXFeatures->ui32CoreRev != psSGXFeatures->ui32CoreRevSW)
++ {
++ PVR_LOG(("(FAIL) SGXInit: Incompatible HW core rev (%lx) and SW core rev (%lx).",
++ psSGXFeatures->ui32CoreRev, psSGXFeatures->ui32CoreRevSW));
++ eError = PVRSRV_ERROR_BUILD_MISMATCH;
++ goto chk_exit;
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_MESSAGE, "SGXInit: HW core rev (%lx) and SW core rev (%lx) match. [ OK ]",
++ psSGXFeatures->ui32CoreRev, psSGXFeatures->ui32CoreRevSW));
++ }
++ }
++ }
++
++
++ psSGXStructSizes = &((PVRSRV_SGX_MISCINFO_INFO*)(psMemInfo->pvLinAddrKM))->sSGXStructSizes;
++
++ bStructSizesFailed = IMG_FALSE;
++
++ CHECK_SIZE(HOST_CTL);
++ CHECK_SIZE(COMMAND);
++#if defined(SGX_FEATURE_2D_HARDWARE)
++ CHECK_SIZE(2DCMD);
++ CHECK_SIZE(2DCMD_SHARED);
++#endif
++ CHECK_SIZE(CMDTA);
++ CHECK_SIZE(CMDTA_SHARED);
++ CHECK_SIZE(TRANSFERCMD);
++ CHECK_SIZE(TRANSFERCMD_SHARED);
++
++ CHECK_SIZE(3DREGISTERS);
++ CHECK_SIZE(HWPBDESC);
++ CHECK_SIZE(HWRENDERCONTEXT);
++ CHECK_SIZE(HWRENDERDETAILS);
++ CHECK_SIZE(HWRTDATA);
++ CHECK_SIZE(HWRTDATASET);
++ CHECK_SIZE(HWTRANSFERCONTEXT);
++
++ if (bStructSizesFailed == IMG_TRUE)
++ {
++ PVR_LOG(("(FAIL) SGXInit: Mismatch in SGXMKIF structure sizes."));
++ eError = PVRSRV_ERROR_BUILD_MISMATCH;
++ goto chk_exit;
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_MESSAGE, "SGXInit: SGXMKIF structure sizes match. [ OK ]"));
++ }
++
++
++ ui32BuildOptions = psSGXFeatures->ui32BuildOptions;
++ if (ui32BuildOptions != (SGX_BUILD_OPTIONS))
++ {
++ ui32BuildOptionsMismatch = ui32BuildOptions ^ (SGX_BUILD_OPTIONS);
++ if ( ((SGX_BUILD_OPTIONS) & ui32BuildOptionsMismatch) != 0)
++ {
++ PVR_LOG(("(FAIL) SGXInit: Mismatch in driver and microkernel build options; "
++ "extra options present in driver: (0x%lx). Please check sgx_options.h",
++ (SGX_BUILD_OPTIONS) & ui32BuildOptionsMismatch ));
++ }
++
++ if ( (ui32BuildOptions & ui32BuildOptionsMismatch) != 0)
++ {
++ PVR_LOG(("(FAIL) SGXInit: Mismatch in driver and microkernel build options; "
++ "extra options present in microkernel: (0x%lx). Please check sgx_options.h",
++ ui32BuildOptions & ui32BuildOptionsMismatch ));
++ }
++ eError = PVRSRV_ERROR_BUILD_MISMATCH;
++ goto chk_exit;
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_MESSAGE, "SGXInit: Driver and microkernel build options match. [ OK ]"));
++ }
++#endif
++
++ eError = PVRSRV_OK;
++chk_exit:
++#if defined(IGNORE_SGX_INIT_COMPATIBILITY_CHECK)
++ return PVRSRV_OK;
++#else
++ return eError;
++#endif
++}
++
++static
++PVRSRV_ERROR SGXGetMiscInfoUkernel(PVRSRV_SGXDEV_INFO *psDevInfo,
++ PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++ PVRSRV_ERROR eError;
++ SGXMKIF_COMMAND sCommandData;
++ PVRSRV_SGX_MISCINFO_INFO *psSGXMiscInfoInt;
++ PVRSRV_SGX_MISCINFO_FEATURES *psSGXFeatures;
++ SGX_MISCINFO_STRUCT_SIZES *psSGXStructSizes;
++
++ PPVRSRV_KERNEL_MEM_INFO psMemInfo = psDevInfo->psKernelSGXMiscMemInfo;
++
++ if (! psMemInfo->pvLinAddrKM)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "SGXGetMiscInfoUkernel: Invalid address."));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++ psSGXMiscInfoInt = psMemInfo->pvLinAddrKM;
++ psSGXFeatures = &psSGXMiscInfoInt->sSGXFeatures;
++ psSGXStructSizes = &psSGXMiscInfoInt->sSGXStructSizes;
++
++ psSGXMiscInfoInt->ui32MiscInfoFlags &= ~PVRSRV_USSE_MISCINFO_READY;
++
++
++ OSMemSet(psSGXFeatures, 0, sizeof(*psSGXFeatures));
++ OSMemSet(psSGXStructSizes, 0, sizeof(*psSGXStructSizes));
++
++
++ sCommandData.ui32Data[1] = psMemInfo->sDevVAddr.uiAddr;
++
++ eError = SGXScheduleCCBCommandKM(psDeviceNode,
++ SGXMKIF_CMD_GETMISCINFO,
++ &sCommandData,
++ KERNEL_ID,
++ 0);
++
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "SGXGetMiscInfoUkernel: SGXScheduleCCBCommandKM failed."));
++ return eError;
++ }
++
++
++#if !defined(NO_HARDWARE)
++ {
++ IMG_BOOL bExit;
++
++ bExit = IMG_FALSE;
++ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
++ {
++ if ((psSGXMiscInfoInt->ui32MiscInfoFlags & PVRSRV_USSE_MISCINFO_READY) != 0)
++ {
++ bExit = IMG_TRUE;
++ break;
++ }
++ } END_LOOP_UNTIL_TIMEOUT();
++
++
++ if (!bExit)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "SGXGetMiscInfoUkernel: Timeout occurred waiting for misc info."));
++ return PVRSRV_ERROR_TIMEOUT;
++ }
++ }
++#endif
++
++ return PVRSRV_OK;
++}
++
++
++
++IMG_EXPORT
++PVRSRV_ERROR SGXGetMiscInfoKM(PVRSRV_SGXDEV_INFO *psDevInfo,
++ SGX_MISC_INFO *psMiscInfo,
++ PVRSRV_DEVICE_NODE *psDeviceNode,
++ IMG_HANDLE hDevMemContext)
++{
++ PPVRSRV_KERNEL_MEM_INFO psMemInfo = psDevInfo->psKernelSGXMiscMemInfo;
++ IMG_UINT32 *pui32MiscInfoFlags = &((PVRSRV_SGX_MISCINFO_INFO*)(psMemInfo->pvLinAddrKM))->ui32MiscInfoFlags;
++
++
++ *pui32MiscInfoFlags = 0;
++
++#if !defined(SUPPORT_SGX_EDM_MEMORY_DEBUG)
++ PVR_UNREFERENCED_PARAMETER(hDevMemContext);
++#endif
++
++ switch(psMiscInfo->eRequest)
++ {
++#if defined(SGX_FEATURE_DATA_BREAKPOINTS)
++ case SGX_MISC_INFO_REQUEST_SET_BREAKPOINT:
++ {
++ IMG_UINT32 ui32RegOffset;
++ IMG_UINT32 ui32RegVal;
++ IMG_UINT32 ui32BaseRegOffset;
++ IMG_UINT32 ui32BaseRegVal;
++ IMG_UINT32 ui32MaskRegOffset;
++ IMG_UINT32 ui32MaskRegVal;
++
++ switch(psMiscInfo->uData.sSGXBreakpointInfo.ui32BPIndex)
++ {
++ case 0:
++ ui32RegOffset = EUR_CR_BREAKPOINT0;
++ ui32BaseRegOffset = EUR_CR_BREAKPOINT0_BASE;
++ ui32MaskRegOffset = EUR_CR_BREAKPOINT0_MASK;
++ break;
++ case 1:
++ ui32RegOffset = EUR_CR_BREAKPOINT1;
++ ui32BaseRegOffset = EUR_CR_BREAKPOINT1_BASE;
++ ui32MaskRegOffset = EUR_CR_BREAKPOINT1_MASK;
++ break;
++ case 2:
++ ui32RegOffset = EUR_CR_BREAKPOINT2;
++ ui32BaseRegOffset = EUR_CR_BREAKPOINT2_BASE;
++ ui32MaskRegOffset = EUR_CR_BREAKPOINT2_MASK;
++ break;
++ case 3:
++ ui32RegOffset = EUR_CR_BREAKPOINT3;
++ ui32BaseRegOffset = EUR_CR_BREAKPOINT3_BASE;
++ ui32MaskRegOffset = EUR_CR_BREAKPOINT3_MASK;
++ break;
++ default:
++ PVR_DPF((PVR_DBG_ERROR,"SGXGetMiscInfoKM: SGX_MISC_INFO_REQUEST_SET_BREAKPOINT invalid BP idx %d", psMiscInfo->uData.sSGXBreakpointInfo.ui32BPIndex));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++
++ if(psMiscInfo->uData.sSGXBreakpointInfo.bBPEnable)
++ {
++
++ IMG_DEV_VIRTADDR sBPDevVAddr = psMiscInfo->uData.sSGXBreakpointInfo.sBPDevVAddr;
++
++
++ ui32MaskRegVal = EUR_CR_BREAKPOINT0_MASK_REGION_MASK | EUR_CR_BREAKPOINT0_MASK_DM_MASK;
++
++
++ ui32BaseRegVal = sBPDevVAddr.uiAddr & EUR_CR_BREAKPOINT0_BASE_ADDRESS_MASK;
++
++
++ ui32RegVal = EUR_CR_BREAKPOINT0_CTRL_WENABLE_MASK
++ | EUR_CR_BREAKPOINT0_CTRL_WENABLE_MASK
++ | EUR_CR_BREAKPOINT0_CTRL_TRAPENABLE_MASK;
++ }
++ else
++ {
++
++ ui32RegVal = ui32BaseRegVal = ui32MaskRegVal = 0;
++ }
++
++
++
++
++
++
++
++
++
++
++ return PVRSRV_OK;
++ }
++#endif
++
++ case SGX_MISC_INFO_REQUEST_CLOCKSPEED:
++ {
++ psMiscInfo->uData.ui32SGXClockSpeed = psDevInfo->ui32CoreClockSpeed;
++ return PVRSRV_OK;
++ }
++
++ case SGX_MISC_INFO_REQUEST_SGXREV:
++ {
++ PVRSRV_ERROR eError;
++ PVRSRV_SGX_MISCINFO_FEATURES *psSGXFeatures;
++
++ eError = SGXGetMiscInfoUkernel(psDevInfo, psDeviceNode);
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "An error occurred in SGXGetMiscInfoUkernel: %d\n",
++ eError));
++ return eError;
++ }
++ psSGXFeatures = &((PVRSRV_SGX_MISCINFO_INFO*)(psMemInfo->pvLinAddrKM))->sSGXFeatures;
++
++
++ psMiscInfo->uData.sSGXFeatures = *psSGXFeatures;
++
++
++ PVR_DPF((PVR_DBG_MESSAGE, "SGXGetMiscInfoKM: Core 0x%lx, sw ID 0x%lx, sw Rev 0x%lx\n",
++ psSGXFeatures->ui32CoreRev,
++ psSGXFeatures->ui32CoreIdSW,
++ psSGXFeatures->ui32CoreRevSW));
++ PVR_DPF((PVR_DBG_MESSAGE, "SGXGetMiscInfoKM: DDK version 0x%lx, DDK build 0x%lx\n",
++ psSGXFeatures->ui32DDKVersion,
++ psSGXFeatures->ui32DDKBuild));
++
++
++ return PVRSRV_OK;
++ }
++
++ case SGX_MISC_INFO_REQUEST_DRIVER_SGXREV:
++ {
++ PVRSRV_SGX_MISCINFO_FEATURES *psSGXFeatures;
++
++ psSGXFeatures = &((PVRSRV_SGX_MISCINFO_INFO*)(psMemInfo->pvLinAddrKM))->sSGXFeatures;
++
++
++ OSMemSet(psMemInfo->pvLinAddrKM, 0,
++ sizeof(PVRSRV_SGX_MISCINFO_INFO));
++
++ psSGXFeatures->ui32DDKVersion =
++ (PVRVERSION_MAJ << 16) |
++ (PVRVERSION_MIN << 8) |
++ PVRVERSION_BRANCH;
++ psSGXFeatures->ui32DDKBuild = PVRVERSION_BUILD;
++
++
++ psSGXFeatures->ui32BuildOptions = (SGX_BUILD_OPTIONS);
++
++
++ psMiscInfo->uData.sSGXFeatures = *psSGXFeatures;
++ return PVRSRV_OK;
++ }
++
++#if defined(SUPPORT_SGX_EDM_MEMORY_DEBUG)
++ case SGX_MISC_INFO_REQUEST_MEMREAD:
++ {
++ PVRSRV_ERROR eError;
++ PPVRSRV_KERNEL_MEM_INFO psMemInfo = psDevInfo->psKernelSGXMiscMemInfo;
++ PVRSRV_SGX_MISCINFO_FEATURES *psSGXFeatures;
++ PVRSRV_SGX_MISCINFO_MEMREAD *psSGXMemReadData;
++
++ psSGXMemReadData = &((PVRSRV_SGX_MISCINFO_INFO*)(psMemInfo->pvLinAddrKM))->sSGXMemReadData;
++
++
++ *pui32MiscInfoFlags |= PVRSRV_USSE_MISCINFO_MEMREAD;
++
++
++ if(psMiscInfo->hDevMemContext != IMG_NULL)
++ {
++ SGXGetMMUPDAddrKM( (IMG_HANDLE)psDeviceNode, hDevMemContext, &psSGXMemReadData->sPDDevPAddr);
++ }
++ else
++ {
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++
++ if(psMiscInfo->sDevVAddr.uiAddr != 0)
++ {
++ psSGXMemReadData->sDevVAddr = psMiscInfo->sDevVAddr;
++ }
++ else
++ {
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++
++ eError = SGXGetMiscInfoUkernel(psDevInfo, psDeviceNode);
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "An error occurred in SGXGetMiscInfoUkernel: %d\n",
++ eError));
++ return eError;
++ }
++ psSGXFeatures = &((PVRSRV_SGX_MISCINFO_INFO*)(psMemInfo->pvLinAddrKM))->sSGXFeatures;
++
++#if !defined SGX_FEATURE_MULTIPLE_MEM_CONTEXTS
++ if(*pui32MiscInfoFlags & PVRSRV_USSE_MISCINFO_MEMREAD_FAIL)
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++#endif
++
++ psMiscInfo->uData.sSGXFeatures = *psSGXFeatures;
++ return PVRSRV_OK;
++ }
++#endif
++
++#ifdef SUPPORT_SGX_HWPERF
++ case SGX_MISC_INFO_REQUEST_SET_HWPERF_STATUS:
++ {
++ SGXMKIF_HWPERF_CB *psHWPerfCB = psDevInfo->psKernelHWPerfCBMemInfo->pvLinAddrKM;
++ IMG_UINT ui32MatchingFlags;
++
++
++ if ((psMiscInfo->uData.ui32NewHWPerfStatus & ~(PVRSRV_SGX_HWPERF_GRAPHICS_ON | PVRSRV_SGX_HWPERF_MK_EXECUTION_ON)) != 0)
++ {
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++
++ ui32MatchingFlags = psMiscInfo->uData.ui32NewHWPerfStatus & psDevInfo->psSGXHostCtl->ui32HWPerfFlags;
++ if((ui32MatchingFlags & PVRSRV_SGX_HWPERF_GRAPHICS_ON) == 0UL)
++ {
++ psHWPerfCB->ui32OrdinalGRAPHICS = 0xffffffff;
++ }
++ if((ui32MatchingFlags & PVRSRV_SGX_HWPERF_MK_EXECUTION_ON) == 0UL)
++ {
++ psHWPerfCB->ui32OrdinalMK_EXECUTION = 0xffffffffUL;
++ }
++
++
++ psDevInfo->psSGXHostCtl->ui32HWPerfFlags = psMiscInfo->uData.ui32NewHWPerfStatus;
++ #if defined(PDUMP)
++ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "SGX ukernel HWPerf status %lu\n",
++ psDevInfo->psSGXHostCtl->ui32HWPerfFlags);
++ PDUMPMEM(IMG_NULL, psDevInfo->psKernelSGXHostCtlMemInfo,
++ offsetof(SGXMKIF_HOST_CTL, ui32HWPerfFlags),
++ sizeof(psDevInfo->psSGXHostCtl->ui32HWPerfFlags), PDUMP_FLAGS_CONTINUOUS,
++ MAKEUNIQUETAG(psDevInfo->psKernelSGXHostCtlMemInfo));
++ #endif
++
++ return PVRSRV_OK;
++ }
++ case SGX_MISC_INFO_REQUEST_HWPERF_CB_ON:
++ {
++
++ SGXMKIF_HWPERF_CB *psHWPerfCB = psDevInfo->psKernelHWPerfCBMemInfo->pvLinAddrKM;
++ psHWPerfCB->ui32OrdinalGRAPHICS = 0xffffffffUL;
++
++ psDevInfo->psSGXHostCtl->ui32HWPerfFlags |= PVRSRV_SGX_HWPERF_GRAPHICS_ON;
++ return PVRSRV_OK;
++ }
++ case SGX_MISC_INFO_REQUEST_HWPERF_CB_OFF:
++ {
++
++ psDevInfo->psSGXHostCtl->ui32HWPerfFlags = 0;
++ return PVRSRV_OK;
++ }
++ case SGX_MISC_INFO_REQUEST_HWPERF_RETRIEVE_CB:
++ {
++
++ SGX_MISC_INFO_HWPERF_RETRIEVE_CB *psRetrieve = &psMiscInfo->uData.sRetrieveCB;
++ SGXMKIF_HWPERF_CB *psHWPerfCB = psDevInfo->psKernelHWPerfCBMemInfo->pvLinAddrKM;
++ IMG_UINT i;
++
++ for (i = 0; psHWPerfCB->ui32Woff != psHWPerfCB->ui32Roff && i < psRetrieve->ui32ArraySize; i++)
++ {
++ SGXMKIF_HWPERF_CB_ENTRY *psData = &psHWPerfCB->psHWPerfCBData[psHWPerfCB->ui32Roff];
++
++
++
++ psRetrieve->psHWPerfData[i].ui32FrameNo = psData->ui32FrameNo;
++ psRetrieve->psHWPerfData[i].ui32Type = (psData->ui32Type & PVRSRV_SGX_HWPERF_TYPE_OP_MASK);
++ psRetrieve->psHWPerfData[i].ui32StartTime = psData->ui32Time;
++ psRetrieve->psHWPerfData[i].ui32StartTimeWraps = psData->ui32TimeWraps;
++ psRetrieve->psHWPerfData[i].ui32EndTime = psData->ui32Time;
++ psRetrieve->psHWPerfData[i].ui32EndTimeWraps = psData->ui32TimeWraps;
++ psRetrieve->psHWPerfData[i].ui32ClockSpeed = psDevInfo->ui32CoreClockSpeed;
++ psRetrieve->psHWPerfData[i].ui32TimeMax = psDevInfo->ui32uKernelTimerClock;
++ psHWPerfCB->ui32Roff = (psHWPerfCB->ui32Roff + 1) & (SGXMKIF_HWPERF_CB_SIZE - 1);
++ }
++ psRetrieve->ui32DataCount = i;
++ psRetrieve->ui32Time = OSClockus();
++ return PVRSRV_OK;
++ }
++#endif
++ case SGX_MISC_INFO_DUMP_DEBUG_INFO:
++ {
++ PVR_LOG(("User requested SGX debug info"));
++
++
++ SGXDumpDebugInfo(psDeviceNode, IMG_FALSE);
++
++ return PVRSRV_OK;
++ }
++
++ case SGX_MISC_INFO_PANIC:
++ {
++ PVR_LOG(("User requested SGX panic"));
++
++ SGXPanic(psDeviceNode);
++
++ return PVRSRV_OK;
++ }
++
++ default:
++ {
++
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++ }
++}
++
++#if defined(SUPPORT_SGX_HWPERF)
++IMG_EXPORT
++PVRSRV_ERROR SGXReadDiffCountersKM(IMG_HANDLE hDevHandle,
++ IMG_UINT32 ui32Reg,
++ IMG_UINT32 *pui32Old,
++ IMG_BOOL bNew,
++ IMG_UINT32 ui32New,
++ IMG_UINT32 ui32NewReset,
++ IMG_UINT32 ui32CountersReg,
++ IMG_UINT32 ui32Reg2,
++ IMG_BOOL *pbActive,
++ PVRSRV_SGXDEV_DIFF_INFO *psDiffs)
++{
++ PVRSRV_ERROR eError;
++ SYS_DATA *psSysData;
++ PVRSRV_POWER_DEV *psPowerDevice;
++ IMG_BOOL bPowered = IMG_FALSE;
++ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle;
++ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
++
++
++ if(bNew)
++ {
++ psDevInfo->ui32HWGroupRequested = ui32New;
++ }
++ psDevInfo->ui32HWReset |= ui32NewReset;
++
++
++ eError = PVRSRVPowerLock(KERNEL_ID, IMG_FALSE);
++ if (eError != PVRSRV_OK)
++ {
++ return eError;
++ }
++
++ SysAcquireData(&psSysData);
++
++
++ psPowerDevice = (PVRSRV_POWER_DEV*)
++ List_PVRSRV_POWER_DEV_Any_va(psSysData->psPowerDeviceList,
++ MatchPowerDeviceIndex_AnyVaCb,
++ psDeviceNode->sDevId.ui32DeviceIndex);
++
++ if (psPowerDevice)
++ {
++ bPowered = (IMG_BOOL)(psPowerDevice->eCurrentPowerState == PVRSRV_DEV_POWER_STATE_ON);
++ }
++
++
++
++ *pbActive = bPowered;
++
++
++
++ {
++ IMG_UINT32 ui32rval = 0;
++
++
++ if(bPowered)
++ {
++ IMG_UINT32 i;
++
++
++ *pui32Old = OSReadHWReg(psDevInfo->pvRegsBaseKM, ui32Reg);
++
++ for (i = 0; i < PVRSRV_SGX_DIFF_NUM_COUNTERS; ++i)
++ {
++ psDiffs->aui32Counters[i] = OSReadHWReg(psDevInfo->pvRegsBaseKM, ui32CountersReg + (i * 4));
++ }
++
++ if(ui32Reg2)
++ {
++ ui32rval = OSReadHWReg(psDevInfo->pvRegsBaseKM, ui32Reg2);
++ }
++
++
++
++ if (psDevInfo->ui32HWGroupRequested != *pui32Old)
++ {
++
++ if(psDevInfo->ui32HWReset != 0)
++ {
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, ui32Reg, psDevInfo->ui32HWGroupRequested | psDevInfo->ui32HWReset);
++ psDevInfo->ui32HWReset = 0;
++ }
++
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, ui32Reg, psDevInfo->ui32HWGroupRequested);
++ }
++ }
++
++ psDiffs->ui32Time[0] = OSClockus();
++ psDiffs->ui32Time[1] = psDevInfo->psSGXHostCtl->ui32TimeWraps;
++ psDiffs->ui32Time[2] = ui32rval;
++
++ psDiffs->ui32Marker[0] = psDevInfo->ui32KickTACounter;
++ psDiffs->ui32Marker[1] = psDevInfo->ui32KickTARenderCounter;
++ }
++
++
++ PVRSRVPowerUnlock(KERNEL_ID);
++
++ SGXTestActivePowerEvent(psDeviceNode, KERNEL_ID);
++
++ return eError;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR SGXReadHWPerfCBKM(IMG_HANDLE hDevHandle,
++ IMG_UINT32 ui32ArraySize,
++ PVRSRV_SGX_HWPERF_CB_ENTRY *psClientHWPerfEntry,
++ IMG_UINT32 *pui32DataCount,
++ IMG_UINT32 *pui32ClockSpeed,
++ IMG_UINT32 *pui32HostTimeStamp)
++{
++ PVRSRV_ERROR eError = PVRSRV_OK;
++ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle;
++ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
++ SGXMKIF_HWPERF_CB *psHWPerfCB = psDevInfo->psKernelHWPerfCBMemInfo->pvLinAddrKM;
++ IMG_UINT i;
++
++ for (i = 0;
++ psHWPerfCB->ui32Woff != psHWPerfCB->ui32Roff && i < ui32ArraySize;
++ i++)
++ {
++ SGXMKIF_HWPERF_CB_ENTRY *psMKPerfEntry = &psHWPerfCB->psHWPerfCBData[psHWPerfCB->ui32Roff];
++
++ psClientHWPerfEntry[i].ui32FrameNo = psMKPerfEntry->ui32FrameNo;
++ psClientHWPerfEntry[i].ui32Type = psMKPerfEntry->ui32Type;
++ psClientHWPerfEntry[i].ui32Ordinal = psMKPerfEntry->ui32Ordinal;
++ psClientHWPerfEntry[i].ui32Clocksx16 = SGXConvertTimeStamp(psDevInfo,
++ psMKPerfEntry->ui32TimeWraps,
++ psMKPerfEntry->ui32Time);
++ OSMemCopy(&psClientHWPerfEntry[i].ui32Counters[0],
++ &psMKPerfEntry->ui32Counters[0],
++ sizeof(psMKPerfEntry->ui32Counters));
++
++ psHWPerfCB->ui32Roff = (psHWPerfCB->ui32Roff + 1) & (SGXMKIF_HWPERF_CB_SIZE - 1);
++ }
++
++ *pui32DataCount = i;
++ *pui32ClockSpeed = psDevInfo->ui32CoreClockSpeed;
++ *pui32HostTimeStamp = OSClockus();
++
++ return eError;
++}
++#else
++#endif
++
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/sgxkick.c b/drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/sgxkick.c
+new file mode 100644
+index 0000000..2848313
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/sgxkick.c
+@@ -0,0 +1,744 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include <stddef.h>
++#include "services_headers.h"
++#include "sgxinfo.h"
++#include "sgxinfokm.h"
++#if defined (PDUMP)
++#include "sgxapi_km.h"
++#include "pdump_km.h"
++#endif
++#include "sgx_bridge_km.h"
++#include "osfunc.h"
++#include "pvr_debug.h"
++#include "sgxutils.h"
++
++IMG_EXPORT
++PVRSRV_ERROR SGXDoKickKM(IMG_HANDLE hDevHandle, SGX_CCB_KICK *psCCBKick)
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_KERNEL_SYNC_INFO *psSyncInfo;
++ PVRSRV_KERNEL_MEM_INFO *psCCBMemInfo = (PVRSRV_KERNEL_MEM_INFO *) psCCBKick->hCCBKernelMemInfo;
++ SGXMKIF_CMDTA_SHARED *psTACmd;
++ IMG_UINT32 i;
++#if defined(SUPPORT_SGX_HWPERF)
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ PVRSRV_SGXDEV_INFO *psDevInfo;
++
++ psDeviceNode = (PVRSRV_DEVICE_NODE *)hDevHandle;
++ psDevInfo = (PVRSRV_SGXDEV_INFO *)psDeviceNode->pvDevice;
++#endif
++
++#if defined(SUPPORT_SGX_HWPERF)
++ if (psCCBKick->bKickRender)
++ {
++ ++psDevInfo->ui32KickTARenderCounter;
++ }
++ ++psDevInfo->ui32KickTACounter;
++#endif
++
++ if (!CCB_OFFSET_IS_VALID(SGXMKIF_CMDTA_SHARED, psCCBMemInfo, psCCBKick, ui32CCBOffset))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "SGXDoKickKM: Invalid CCB offset"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++
++ psTACmd = CCB_DATA_FROM_OFFSET(SGXMKIF_CMDTA_SHARED, psCCBMemInfo, psCCBKick, ui32CCBOffset);
++
++
++ if (psCCBKick->hTA3DSyncInfo)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->hTA3DSyncInfo;
++ psTACmd->sTA3DDependency.sWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
++
++ psTACmd->sTA3DDependency.ui32WriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending;
++
++ if (psCCBKick->bTADependency)
++ {
++ psSyncInfo->psSyncData->ui32WriteOpsPending++;
++ }
++ }
++
++ if (psCCBKick->hTASyncInfo != IMG_NULL)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->hTASyncInfo;
++
++ psTACmd->sTATQSyncReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
++ psTACmd->sTATQSyncWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
++
++ psTACmd->ui32TATQSyncReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending++;
++ psTACmd->ui32TATQSyncWriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending;
++ }
++
++ if (psCCBKick->h3DSyncInfo != IMG_NULL)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->h3DSyncInfo;
++
++ psTACmd->s3DTQSyncReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
++ psTACmd->s3DTQSyncWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
++
++ psTACmd->ui323DTQSyncReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending++;
++ psTACmd->ui323DTQSyncWriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending;
++ }
++
++ psTACmd->ui32NumTAStatusVals = psCCBKick->ui32NumTAStatusVals;
++ if (psCCBKick->ui32NumTAStatusVals != 0)
++ {
++
++ for (i = 0; i < psCCBKick->ui32NumTAStatusVals; i++)
++ {
++#if defined(SUPPORT_SGX_NEW_STATUS_VALS)
++ psTACmd->sCtlTAStatusInfo[i] = psCCBKick->asTAStatusUpdate[i].sCtlStatus;
++#else
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->ahTAStatusSyncInfo[i];
++ psTACmd->sCtlTAStatusInfo[i].sStatusDevAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
++ psTACmd->sCtlTAStatusInfo[i].ui32StatusValue = psSyncInfo->psSyncData->ui32ReadOpsPending;
++#endif
++ }
++ }
++
++ psTACmd->ui32Num3DStatusVals = psCCBKick->ui32Num3DStatusVals;
++ if (psCCBKick->ui32Num3DStatusVals != 0)
++ {
++
++ for (i = 0; i < psCCBKick->ui32Num3DStatusVals; i++)
++ {
++#if defined(SUPPORT_SGX_NEW_STATUS_VALS)
++ psTACmd->sCtl3DStatusInfo[i] = psCCBKick->as3DStatusUpdate[i].sCtlStatus;
++#else
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->ah3DStatusSyncInfo[i];
++ psTACmd->sCtl3DStatusInfo[i].sStatusDevAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
++ psTACmd->sCtl3DStatusInfo[i].ui32StatusValue = psSyncInfo->psSyncData->ui32ReadOpsPending;
++#endif
++ }
++ }
++
++
++#if defined(SUPPORT_SGX_GENERALISED_SYNCOBJECTS)
++
++ psTACmd->ui32NumTASrcSyncs = psCCBKick->ui32NumTASrcSyncs;
++ for (i=0; i<psCCBKick->ui32NumTASrcSyncs; i++)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ahTASrcKernelSyncInfo[i];
++
++ psTACmd->asTASrcSyncs[i].sWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
++ psTACmd->asTASrcSyncs[i].sReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
++
++
++ psTACmd->asTASrcSyncs[i].ui32ReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending++;
++
++ psTACmd->asTASrcSyncs[i].ui32WriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending;
++ }
++
++ psTACmd->ui32NumTADstSyncs = psCCBKick->ui32NumTADstSyncs;
++ for (i=0; i<psCCBKick->ui32NumTADstSyncs; i++)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ahTADstKernelSyncInfo[i];
++
++ psTACmd->asTADstSyncs[i].sWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
++ psTACmd->asTADstSyncs[i].sReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
++
++
++ psTACmd->asTADstSyncs[i].ui32ReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending;
++
++ psTACmd->asTADstSyncs[i].ui32WriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending++;
++ }
++
++ psTACmd->ui32Num3DSrcSyncs = psCCBKick->ui32Num3DSrcSyncs;
++ for (i=0; i<psCCBKick->ui32Num3DSrcSyncs; i++)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ah3DSrcKernelSyncInfo[i];
++
++ psTACmd->as3DSrcSyncs[i].sWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
++ psTACmd->as3DSrcSyncs[i].sReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
++
++
++ psTACmd->as3DSrcSyncs[i].ui32ReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending++;
++
++ psTACmd->as3DSrcSyncs[i].ui32WriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending;
++ }
++#else
++
++ psTACmd->ui32NumSrcSyncs = psCCBKick->ui32NumSrcSyncs;
++ for (i=0; i<psCCBKick->ui32NumSrcSyncs; i++)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ahSrcKernelSyncInfo[i];
++
++ psTACmd->asSrcSyncs[i].sWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
++ psTACmd->asSrcSyncs[i].sReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
++
++
++ psTACmd->asSrcSyncs[i].ui32ReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending++;
++
++ psTACmd->asSrcSyncs[i].ui32WriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending;
++ }
++#endif
++
++ if (psCCBKick->bFirstKickOrResume && psCCBKick->ui32NumDstSyncObjects > 0)
++ {
++ PVRSRV_KERNEL_MEM_INFO *psHWDstSyncListMemInfo =
++ (PVRSRV_KERNEL_MEM_INFO *)psCCBKick->hKernelHWSyncListMemInfo;
++ SGXMKIF_HWDEVICE_SYNC_LIST *psHWDeviceSyncList = psHWDstSyncListMemInfo->pvLinAddrKM;
++ IMG_UINT32 ui32NumDstSyncs = psCCBKick->ui32NumDstSyncObjects;
++
++ PVR_ASSERT(((PVRSRV_KERNEL_MEM_INFO *)psCCBKick->hKernelHWSyncListMemInfo)->ui32AllocSize >= (sizeof(SGXMKIF_HWDEVICE_SYNC_LIST) +
++ (sizeof(PVRSRV_DEVICE_SYNC_OBJECT) * ui32NumDstSyncs)));
++
++ psHWDeviceSyncList->ui32NumSyncObjects = ui32NumDstSyncs;
++#if defined(PDUMP)
++ if (PDumpIsCaptureFrameKM())
++ {
++ PDUMPCOMMENT("HWDeviceSyncList for TACmd\r\n");
++ PDUMPMEM(IMG_NULL,
++ psHWDstSyncListMemInfo,
++ 0,
++ sizeof(SGXMKIF_HWDEVICE_SYNC_LIST),
++ 0,
++ MAKEUNIQUETAG(psHWDstSyncListMemInfo));
++ }
++#endif
++
++ for (i=0; i<ui32NumDstSyncs; i++)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->pahDstSyncHandles[i];
++
++ if (psSyncInfo)
++ {
++ psHWDeviceSyncList->asSyncData[i].sWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
++ psHWDeviceSyncList->asSyncData[i].sReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
++
++ psHWDeviceSyncList->asSyncData[i].ui32ReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending;
++ psHWDeviceSyncList->asSyncData[i].ui32WriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending++;
++
++ #if defined(PDUMP)
++ if (PDumpIsCaptureFrameKM())
++ {
++ IMG_UINT32 ui32ModifiedValue;
++ IMG_UINT32 ui32SyncOffset = offsetof(SGXMKIF_HWDEVICE_SYNC_LIST, asSyncData)
++ + (i * sizeof(PVRSRV_DEVICE_SYNC_OBJECT));
++ IMG_UINT32 ui32WOpsOffset = ui32SyncOffset
++ + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32WriteOpsPendingVal);
++ IMG_UINT32 ui32ROpsOffset = ui32SyncOffset
++ + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32ReadOpsPendingVal);
++
++ PDUMPCOMMENT("HWDeviceSyncObject for RT: %i\r\n", i);
++
++ PDUMPMEM(IMG_NULL,
++ psHWDstSyncListMemInfo,
++ ui32SyncOffset,
++ sizeof(PVRSRV_DEVICE_SYNC_OBJECT),
++ 0,
++ MAKEUNIQUETAG(psHWDstSyncListMemInfo));
++
++ if ((psSyncInfo->psSyncData->ui32LastOpDumpVal == 0) &&
++ (psSyncInfo->psSyncData->ui32LastReadOpDumpVal == 0))
++ {
++
++ PDUMPCOMMENT("Init RT ROpsComplete\r\n", i);
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal,
++ psSyncInfo->psSyncDataMemInfoKM,
++ offsetof(PVRSRV_SYNC_DATA, ui32ReadOpsComplete),
++ sizeof(psSyncInfo->psSyncData->ui32ReadOpsComplete),
++ 0,
++ MAKEUNIQUETAG(psSyncInfo->psSyncDataMemInfoKM));
++
++ PDUMPCOMMENT("Init RT WOpsComplete\r\n");
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
++ psSyncInfo->psSyncDataMemInfoKM,
++ offsetof(PVRSRV_SYNC_DATA, ui32WriteOpsComplete),
++ sizeof(psSyncInfo->psSyncData->ui32WriteOpsComplete),
++ 0,
++ MAKEUNIQUETAG(psSyncInfo->psSyncDataMemInfoKM));
++ }
++
++ psSyncInfo->psSyncData->ui32LastOpDumpVal++;
++
++ ui32ModifiedValue = psSyncInfo->psSyncData->ui32LastOpDumpVal - 1;
++
++ PDUMPCOMMENT("Modify RT %d WOpPendingVal in HWDevSyncList\r\n", i);
++
++ PDUMPMEM(&ui32ModifiedValue,
++ psHWDstSyncListMemInfo,
++ ui32WOpsOffset,
++ sizeof(IMG_UINT32),
++ 0,
++ MAKEUNIQUETAG(psHWDstSyncListMemInfo));
++
++ ui32ModifiedValue = 0;
++ PDUMPCOMMENT("Modify RT %d ROpsPendingVal in HWDevSyncList\r\n", i);
++
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal,
++ psHWDstSyncListMemInfo,
++ ui32ROpsOffset,
++ sizeof(IMG_UINT32),
++ 0,
++ MAKEUNIQUETAG(psHWDstSyncListMemInfo));
++ }
++ #endif
++ }
++ else
++ {
++ psHWDeviceSyncList->asSyncData[i].sWriteOpsCompleteDevVAddr.uiAddr = 0;
++ psHWDeviceSyncList->asSyncData[i].sReadOpsCompleteDevVAddr.uiAddr = 0;
++
++ psHWDeviceSyncList->asSyncData[i].ui32ReadOpsPendingVal = 0;
++ psHWDeviceSyncList->asSyncData[i].ui32WriteOpsPendingVal = 0;
++ }
++ }
++ }
++
++
++
++
++ psTACmd->ui32CtrlFlags |= SGXMKIF_CMDTA_CTRLFLAGS_READY;
++
++#if defined(PDUMP)
++ if (PDumpIsCaptureFrameKM())
++ {
++ PDUMPCOMMENT("Shared part of TA command\r\n");
++
++ PDUMPMEM(psTACmd,
++ psCCBMemInfo,
++ psCCBKick->ui32CCBDumpWOff,
++ sizeof(SGXMKIF_CMDTA_SHARED),
++ 0,
++ MAKEUNIQUETAG(psCCBMemInfo));
++
++#if defined(SUPPORT_SGX_GENERALISED_SYNCOBJECTS)
++ for (i=0; i<psCCBKick->ui32NumTASrcSyncs; i++)
++ {
++ IMG_UINT32 ui32ModifiedValue;
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ahTASrcKernelSyncInfo[i];
++
++ if ((psSyncInfo->psSyncData->ui32LastOpDumpVal == 0) &&
++ (psSyncInfo->psSyncData->ui32LastReadOpDumpVal == 0))
++ {
++
++ PDUMPCOMMENT("Init RT TA-SRC ROpsComplete\r\n", i);
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal,
++ psSyncInfo->psSyncDataMemInfoKM,
++ offsetof(PVRSRV_SYNC_DATA, ui32ReadOpsComplete),
++ sizeof(psSyncInfo->psSyncData->ui32ReadOpsComplete),
++ 0,
++ MAKEUNIQUETAG(psSyncInfo->psSyncDataMemInfoKM));
++
++ PDUMPCOMMENT("Init RT TA-SRC WOpsComplete\r\n");
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
++ psSyncInfo->psSyncDataMemInfoKM,
++ offsetof(PVRSRV_SYNC_DATA, ui32WriteOpsComplete),
++ sizeof(psSyncInfo->psSyncData->ui32WriteOpsComplete),
++ 0,
++ MAKEUNIQUETAG(psSyncInfo->psSyncDataMemInfoKM));
++ }
++
++ psSyncInfo->psSyncData->ui32LastReadOpDumpVal++;
++
++ ui32ModifiedValue = psSyncInfo->psSyncData->ui32LastReadOpDumpVal - 1;
++
++ PDUMPCOMMENT("Modify TA SrcSync %d ROpsPendingVal\r\n", i);
++
++ PDUMPMEM(&ui32ModifiedValue,
++ psCCBMemInfo,
++ psCCBKick->ui32CCBDumpWOff + offsetof(SGXMKIF_CMDTA_SHARED, asTASrcSyncs) +
++ (i * sizeof(PVRSRV_DEVICE_SYNC_OBJECT)) + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32ReadOpsPendingVal),
++ sizeof(IMG_UINT32),
++ 0,
++ MAKEUNIQUETAG(psCCBMemInfo));
++
++ PDUMPCOMMENT("Modify TA SrcSync %d WOpPendingVal\r\n", i);
++
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
++ psCCBMemInfo,
++ psCCBKick->ui32CCBDumpWOff + offsetof(SGXMKIF_CMDTA_SHARED, asTASrcSyncs) +
++ (i * sizeof(PVRSRV_DEVICE_SYNC_OBJECT)) + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32WriteOpsPendingVal),
++ sizeof(IMG_UINT32),
++ 0,
++ MAKEUNIQUETAG(psCCBMemInfo));
++ }
++
++ for (i=0; i<psCCBKick->ui32NumTADstSyncs; i++)
++ {
++ IMG_UINT32 ui32ModifiedValue;
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ahTADstKernelSyncInfo[i];
++
++ if ((psSyncInfo->psSyncData->ui32LastOpDumpVal == 0) &&
++ (psSyncInfo->psSyncData->ui32LastReadOpDumpVal == 0))
++ {
++
++ PDUMPCOMMENT("Init RT TA-DST ROpsComplete\r\n", i);
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal,
++ psSyncInfo->psSyncDataMemInfoKM,
++ offsetof(PVRSRV_SYNC_DATA, ui32ReadOpsComplete),
++ sizeof(psSyncInfo->psSyncData->ui32ReadOpsComplete),
++ 0,
++ MAKEUNIQUETAG(psSyncInfo->psSyncDataMemInfoKM));
++
++ PDUMPCOMMENT("Init RT TA-DST WOpsComplete\r\n");
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
++ psSyncInfo->psSyncDataMemInfoKM,
++ offsetof(PVRSRV_SYNC_DATA, ui32WriteOpsComplete),
++ sizeof(psSyncInfo->psSyncData->ui32WriteOpsComplete),
++ 0,
++ MAKEUNIQUETAG(psSyncInfo->psSyncDataMemInfoKM));
++ }
++
++ psSyncInfo->psSyncData->ui32LastOpDumpVal++;
++
++ ui32ModifiedValue = psSyncInfo->psSyncData->ui32LastOpDumpVal - 1;
++
++ PDUMPCOMMENT("Modify TA DstSync %d WOpPendingVal\r\n", i);
++
++ PDUMPMEM(&ui32ModifiedValue,
++ psCCBMemInfo,
++ psCCBKick->ui32CCBDumpWOff + offsetof(SGXMKIF_CMDTA_SHARED, asTADstSyncs) +
++ (i * sizeof(PVRSRV_DEVICE_SYNC_OBJECT)) + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32WriteOpsPendingVal),
++ sizeof(IMG_UINT32),
++ 0,
++ MAKEUNIQUETAG(psCCBMemInfo));
++
++ PDUMPCOMMENT("Modify TA DstSync %d ROpsPendingVal\r\n", i);
++
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal,
++ psCCBMemInfo,
++ psCCBKick->ui32CCBDumpWOff + offsetof(SGXMKIF_CMDTA_SHARED, asTADstSyncs) +
++ (i * sizeof(PVRSRV_DEVICE_SYNC_OBJECT)) + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32ReadOpsPendingVal),
++ sizeof(IMG_UINT32),
++ 0,
++ MAKEUNIQUETAG(psCCBMemInfo));
++ }
++
++ for (i=0; i<psCCBKick->ui32Num3DSrcSyncs; i++)
++ {
++ IMG_UINT32 ui32ModifiedValue;
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ah3DSrcKernelSyncInfo[i];
++
++ if ((psSyncInfo->psSyncData->ui32LastOpDumpVal == 0) &&
++ (psSyncInfo->psSyncData->ui32LastReadOpDumpVal == 0))
++ {
++
++ PDUMPCOMMENT("Init RT 3D-SRC ROpsComplete\r\n", i);
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal,
++ psSyncInfo->psSyncDataMemInfoKM,
++ offsetof(PVRSRV_SYNC_DATA, ui32ReadOpsComplete),
++ sizeof(psSyncInfo->psSyncData->ui32ReadOpsComplete),
++ 0,
++ MAKEUNIQUETAG(psSyncInfo->psSyncDataMemInfoKM));
++
++ PDUMPCOMMENT("Init RT 3D-SRC WOpsComplete\r\n");
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
++ psSyncInfo->psSyncDataMemInfoKM,
++ offsetof(PVRSRV_SYNC_DATA, ui32WriteOpsComplete),
++ sizeof(psSyncInfo->psSyncData->ui32WriteOpsComplete),
++ 0,
++ MAKEUNIQUETAG(psSyncInfo->psSyncDataMemInfoKM));
++ }
++
++ psSyncInfo->psSyncData->ui32LastReadOpDumpVal++;
++
++ ui32ModifiedValue = psSyncInfo->psSyncData->ui32LastReadOpDumpVal - 1;
++
++ PDUMPCOMMENT("Modify 3D SrcSync %d ROpsPendingVal\r\n", i);
++
++ PDUMPMEM(&ui32ModifiedValue,
++ psCCBMemInfo,
++ psCCBKick->ui32CCBDumpWOff + offsetof(SGXMKIF_CMDTA_SHARED, as3DSrcSyncs) +
++ (i * sizeof(PVRSRV_DEVICE_SYNC_OBJECT)) + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32ReadOpsPendingVal),
++ sizeof(IMG_UINT32),
++ 0,
++ MAKEUNIQUETAG(psCCBMemInfo));
++
++ PDUMPCOMMENT("Modify 3D SrcSync %d WOpPendingVal\r\n", i);
++
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
++ psCCBMemInfo,
++ psCCBKick->ui32CCBDumpWOff + offsetof(SGXMKIF_CMDTA_SHARED, as3DSrcSyncs) +
++ (i * sizeof(PVRSRV_DEVICE_SYNC_OBJECT)) + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32WriteOpsPendingVal),
++ sizeof(IMG_UINT32),
++ 0,
++ MAKEUNIQUETAG(psCCBMemInfo));
++ }
++#else
++ for (i=0; i<psCCBKick->ui32NumSrcSyncs; i++)
++ {
++ IMG_UINT32 ui32ModifiedValue;
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ahSrcKernelSyncInfo[i];
++
++ if ((psSyncInfo->psSyncData->ui32LastOpDumpVal == 0) &&
++ (psSyncInfo->psSyncData->ui32LastReadOpDumpVal == 0))
++ {
++
++ PDUMPCOMMENT("Init RT ROpsComplete\r\n", i);
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal,
++ psSyncInfo->psSyncDataMemInfoKM,
++ offsetof(PVRSRV_SYNC_DATA, ui32ReadOpsComplete),
++ sizeof(psSyncInfo->psSyncData->ui32ReadOpsComplete),
++ 0,
++ MAKEUNIQUETAG(psSyncInfo->psSyncDataMemInfoKM));
++
++ PDUMPCOMMENT("Init RT WOpsComplete\r\n");
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
++ psSyncInfo->psSyncDataMemInfoKM,
++ offsetof(PVRSRV_SYNC_DATA, ui32WriteOpsComplete),
++ sizeof(psSyncInfo->psSyncData->ui32WriteOpsComplete),
++ 0,
++ MAKEUNIQUETAG(psSyncInfo->psSyncDataMemInfoKM));
++ }
++
++ psSyncInfo->psSyncData->ui32LastReadOpDumpVal++;
++
++ ui32ModifiedValue = psSyncInfo->psSyncData->ui32LastReadOpDumpVal - 1;
++
++ PDUMPCOMMENT("Modify SrcSync %d ROpsPendingVal\r\n", i);
++
++ PDUMPMEM(&ui32ModifiedValue,
++ psCCBMemInfo,
++ psCCBKick->ui32CCBDumpWOff + offsetof(SGXMKIF_CMDTA_SHARED, asSrcSyncs) +
++ (i * sizeof(PVRSRV_DEVICE_SYNC_OBJECT)) + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32ReadOpsPendingVal),
++ sizeof(IMG_UINT32),
++ 0,
++ MAKEUNIQUETAG(psCCBMemInfo));
++
++ PDUMPCOMMENT("Modify SrcSync %d WOpPendingVal\r\n", i);
++
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
++ psCCBMemInfo,
++ psCCBKick->ui32CCBDumpWOff + offsetof(SGXMKIF_CMDTA_SHARED, asSrcSyncs) +
++ (i * sizeof(PVRSRV_DEVICE_SYNC_OBJECT)) + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32WriteOpsPendingVal),
++ sizeof(IMG_UINT32),
++ 0,
++ MAKEUNIQUETAG(psCCBMemInfo));
++ }
++#endif
++
++ for (i = 0; i < psCCBKick->ui32NumTAStatusVals; i++)
++ {
++#if defined(SUPPORT_SGX_NEW_STATUS_VALS)
++ PDUMPCOMMENT("Modify TA status value in TA cmd\r\n");
++ PDUMPMEM(&psCCBKick->asTAStatusUpdate[i].ui32LastStatusUpdateDumpVal,
++ psCCBMemInfo,
++ psCCBKick->ui32CCBDumpWOff + offsetof(SGXMKIF_CMDTA_SHARED, sCtlTAStatusInfo[i].ui32StatusValue),
++ sizeof(IMG_UINT32),
++ 0,
++ MAKEUNIQUETAG(psCCBMemInfo));
++#else
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->ahTAStatusSyncInfo[i];
++ PDUMPCOMMENT("Modify TA status value in TA cmd\r\n");
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
++ psCCBMemInfo,
++ psCCBKick->ui32CCBDumpWOff + offsetof(SGXMKIF_CMDTA_SHARED, sCtlTAStatusInfo[i].ui32StatusValue),
++ sizeof(IMG_UINT32),
++ 0,
++ MAKEUNIQUETAG(psCCBMemInfo));
++#endif
++ }
++
++ for (i = 0; i < psCCBKick->ui32Num3DStatusVals; i++)
++ {
++#if defined(SUPPORT_SGX_NEW_STATUS_VALS)
++ PDUMPCOMMENT("Modify 3D status value in TA cmd\r\n");
++ PDUMPMEM(&psCCBKick->as3DStatusUpdate[i].ui32LastStatusUpdateDumpVal,
++ psCCBMemInfo,
++ psCCBKick->ui32CCBDumpWOff + offsetof(SGXMKIF_CMDTA_SHARED, sCtl3DStatusInfo[i].ui32StatusValue),
++ sizeof(IMG_UINT32),
++ 0,
++ MAKEUNIQUETAG(psCCBMemInfo));
++#else
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->ah3DStatusSyncInfo[i];
++ PDUMPCOMMENT("Modify 3D status value in TA cmd\r\n");
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
++ psCCBMemInfo,
++ psCCBKick->ui32CCBDumpWOff + offsetof(SGXMKIF_CMDTA_SHARED, sCtl3DStatusInfo[i].ui32StatusValue),
++ sizeof(IMG_UINT32),
++ 0,
++ MAKEUNIQUETAG(psCCBMemInfo));
++#endif
++ }
++ }
++#endif
++
++ eError = SGXScheduleCCBCommandKM(hDevHandle, SGXMKIF_CMD_TA, &psCCBKick->sCommand, KERNEL_ID, 0);
++ if (eError == PVRSRV_ERROR_RETRY)
++ {
++ if (psCCBKick->bFirstKickOrResume && psCCBKick->ui32NumDstSyncObjects > 0)
++ {
++ for (i=0; i < psCCBKick->ui32NumDstSyncObjects; i++)
++ {
++
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->pahDstSyncHandles[i];
++
++ if (psSyncInfo)
++ {
++ psSyncInfo->psSyncData->ui32WriteOpsPending--;
++#if defined(PDUMP)
++ if (PDumpIsCaptureFrameKM())
++ {
++ psSyncInfo->psSyncData->ui32LastOpDumpVal--;
++ }
++#endif
++ }
++ }
++ }
++
++#if defined(SUPPORT_SGX_GENERALISED_SYNCOBJECTS)
++ for (i=0; i<psCCBKick->ui32NumTASrcSyncs; i++)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ahTASrcKernelSyncInfo[i];
++ psSyncInfo->psSyncData->ui32ReadOpsPending--;
++ }
++ for (i=0; i<psCCBKick->ui32NumTADstSyncs; i++)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ahTADstKernelSyncInfo[i];
++ psSyncInfo->psSyncData->ui32WriteOpsPending--;
++ }
++ for (i=0; i<psCCBKick->ui32Num3DSrcSyncs; i++)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ah3DSrcKernelSyncInfo[i];
++ psSyncInfo->psSyncData->ui32ReadOpsPending--;
++ }
++#else
++ for (i=0; i<psCCBKick->ui32NumSrcSyncs; i++)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ahSrcKernelSyncInfo[i];
++ psSyncInfo->psSyncData->ui32ReadOpsPending--;
++ }
++#endif
++
++ return eError;
++ }
++ else if (PVRSRV_OK != eError)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "SGXDoKickKM: SGXScheduleCCBCommandKM failed."));
++ return eError;
++ }
++
++
++#if defined(NO_HARDWARE)
++
++
++
++ if (psCCBKick->hTA3DSyncInfo)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->hTA3DSyncInfo;
++
++ if (psCCBKick->bTADependency)
++ {
++ psSyncInfo->psSyncData->ui32WriteOpsComplete = psSyncInfo->psSyncData->ui32WriteOpsPending;
++ }
++ }
++
++ if (psCCBKick->hTASyncInfo != IMG_NULL)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->hTASyncInfo;
++
++ psSyncInfo->psSyncData->ui32ReadOpsComplete = psSyncInfo->psSyncData->ui32ReadOpsPending;
++ }
++
++ if (psCCBKick->h3DSyncInfo != IMG_NULL)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->h3DSyncInfo;
++
++ psSyncInfo->psSyncData->ui32ReadOpsComplete = psSyncInfo->psSyncData->ui32ReadOpsPending;
++ }
++
++
++ for (i = 0; i < psCCBKick->ui32NumTAStatusVals; i++)
++ {
++#if defined(SUPPORT_SGX_NEW_STATUS_VALS)
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo = (PVRSRV_KERNEL_MEM_INFO*)psCCBKick->asTAStatusUpdate[i].hKernelMemInfo;
++
++ *(IMG_UINT32*)((IMG_UINTPTR_T)psKernelMemInfo->pvLinAddrKM
++ + (psTACmd->sCtlTAStatusInfo[i].sStatusDevAddr.uiAddr
++ - psKernelMemInfo->sDevVAddr.uiAddr)) = psTACmd->sCtlTAStatusInfo[i].ui32StatusValue;
++#else
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->ahTAStatusSyncInfo[i];
++ psSyncInfo->psSyncData->ui32ReadOpsComplete = psTACmd->sCtlTAStatusInfo[i].ui32StatusValue;
++#endif
++ }
++
++#if defined(SUPPORT_SGX_GENERALISED_SYNCOBJECTS)
++
++ for (i=0; i<psCCBKick->ui32NumTASrcSyncs; i++)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ahTASrcKernelSyncInfo[i];
++ psSyncInfo->psSyncData->ui32ReadOpsComplete = psSyncInfo->psSyncData->ui32ReadOpsPending;
++ }
++ for (i=0; i<psCCBKick->ui32NumTADstSyncs; i++)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ahTADstKernelSyncInfo[i];
++ psSyncInfo->psSyncData->ui32WriteOpsComplete = psSyncInfo->psSyncData->ui32WriteOpsPending;
++ }
++ for (i=0; i<psCCBKick->ui32Num3DSrcSyncs; i++)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ah3DSrcKernelSyncInfo[i];
++ psSyncInfo->psSyncData->ui32ReadOpsComplete = psSyncInfo->psSyncData->ui32ReadOpsPending;
++ }
++#else
++
++ for (i=0; i<psCCBKick->ui32NumSrcSyncs; i++)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ahSrcKernelSyncInfo[i];
++ psSyncInfo->psSyncData->ui32ReadOpsComplete = psSyncInfo->psSyncData->ui32ReadOpsPending;
++ }
++#endif
++
++ if (psCCBKick->bTerminateOrAbort)
++ {
++ if (psCCBKick->ui32NumDstSyncObjects > 0)
++ {
++ PVRSRV_KERNEL_MEM_INFO *psHWDstSyncListMemInfo =
++ (PVRSRV_KERNEL_MEM_INFO *)psCCBKick->hKernelHWSyncListMemInfo;
++ SGXMKIF_HWDEVICE_SYNC_LIST *psHWDeviceSyncList = psHWDstSyncListMemInfo->pvLinAddrKM;
++
++ for (i=0; i<psCCBKick->ui32NumDstSyncObjects; i++)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->pahDstSyncHandles[i];
++ if (psSyncInfo)
++ psSyncInfo->psSyncData->ui32WriteOpsComplete = psHWDeviceSyncList->asSyncData[i].ui32WriteOpsPendingVal+1;
++ }
++ }
++
++
++ for (i = 0; i < psCCBKick->ui32Num3DStatusVals; i++)
++ {
++#if defined(SUPPORT_SGX_NEW_STATUS_VALS)
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo = (PVRSRV_KERNEL_MEM_INFO*)psCCBKick->as3DStatusUpdate[i].hKernelMemInfo;
++
++ *(IMG_UINT32*)((IMG_UINTPTR_T)psKernelMemInfo->pvLinAddrKM
++ + (psTACmd->sCtl3DStatusInfo[i].sStatusDevAddr.uiAddr
++ - psKernelMemInfo->sDevVAddr.uiAddr)) = psTACmd->sCtl3DStatusInfo[i].ui32StatusValue;
++#else
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->ah3DStatusSyncInfo[i];
++ psSyncInfo->psSyncData->ui32ReadOpsComplete = psTACmd->sCtl3DStatusInfo[i].ui32StatusValue;
++#endif
++ }
++ }
++#endif
++
++ return eError;
++}
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/sgxpower.c b/drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/sgxpower.c
+new file mode 100644
+index 0000000..169ae20
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/sgxpower.c
+@@ -0,0 +1,453 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include <stddef.h>
++
++#include "sgxdefs.h"
++#include "services_headers.h"
++#include "sgxapi_km.h"
++#include "sgx_mkif_km.h"
++#include "sgxutils.h"
++#include "pdump_km.h"
++
++
++#if defined(SUPPORT_HW_RECOVERY)
++static PVRSRV_ERROR SGXAddTimer(PVRSRV_DEVICE_NODE *psDeviceNode,
++ SGX_TIMING_INFORMATION *psSGXTimingInfo,
++ IMG_HANDLE *phTimer)
++{
++ *phTimer = OSAddTimer(SGXOSTimer, psDeviceNode,
++ 1000 * 50 / psSGXTimingInfo->ui32uKernelFreq);
++ if(*phTimer == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SGXAddTimer : Failed to register timer callback function"));
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++ return PVRSRV_OK;
++}
++#endif
++
++static PVRSRV_ERROR SGXUpdateTimingInfo(PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
++#if defined(SGX_DYNAMIC_TIMING_INFO)
++ SGX_TIMING_INFORMATION sSGXTimingInfo = {0};
++#else
++ SGX_DEVICE_MAP *psSGXDeviceMap;
++#endif
++ IMG_UINT32 ui32ActivePowManSampleRate;
++ SGX_TIMING_INFORMATION *psSGXTimingInfo;
++
++
++#if defined(SGX_DYNAMIC_TIMING_INFO)
++ psSGXTimingInfo = &sSGXTimingInfo;
++ SysGetSGXTimingInformation(psSGXTimingInfo);
++#else
++ SysGetDeviceMemoryMap(PVRSRV_DEVICE_TYPE_SGX,
++ (IMG_VOID**)&psSGXDeviceMap);
++ psSGXTimingInfo = &psSGXDeviceMap->sTimingInfo;
++#endif
++
++#if defined(SUPPORT_HW_RECOVERY)
++ {
++ PVRSRV_ERROR eError;
++ IMG_UINT32 ui32OlduKernelFreq;
++
++ if (psDevInfo->hTimer != IMG_NULL)
++ {
++ ui32OlduKernelFreq = psDevInfo->ui32CoreClockSpeed / psDevInfo->ui32uKernelTimerClock;
++ if (ui32OlduKernelFreq != psSGXTimingInfo->ui32uKernelFreq)
++ {
++ IMG_HANDLE hNewTimer;
++
++ eError = SGXAddTimer(psDeviceNode, psSGXTimingInfo, &hNewTimer);
++ if (eError == PVRSRV_OK)
++ {
++ eError = OSRemoveTimer(psDevInfo->hTimer);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SGXUpdateTimingInfo: Failed to remove timer"));
++ }
++ psDevInfo->hTimer = hNewTimer;
++ }
++ else
++ {
++
++ }
++ }
++ }
++ else
++ {
++ eError = SGXAddTimer(psDeviceNode, psSGXTimingInfo, &psDevInfo->hTimer);
++ if (eError != PVRSRV_OK)
++ {
++ return eError;
++ }
++ }
++
++ psDevInfo->psSGXHostCtl->ui32HWRecoverySampleRate =
++ psSGXTimingInfo->ui32uKernelFreq / psSGXTimingInfo->ui32HWRecoveryFreq;
++ }
++#endif
++
++
++ psDevInfo->ui32CoreClockSpeed = psSGXTimingInfo->ui32CoreClockSpeed;
++ psDevInfo->ui32uKernelTimerClock = psSGXTimingInfo->ui32CoreClockSpeed / psSGXTimingInfo->ui32uKernelFreq;
++
++
++ psDevInfo->psSGXHostCtl->ui32uKernelTimerClock = psDevInfo->ui32uKernelTimerClock;
++#if defined(PDUMP)
++ PDUMPCOMMENT("Host Control - Microkernel clock");
++ PDUMPMEM(IMG_NULL, psDevInfo->psKernelSGXHostCtlMemInfo,
++ offsetof(SGXMKIF_HOST_CTL, ui32uKernelTimerClock),
++ sizeof(IMG_UINT32), PDUMP_FLAGS_CONTINUOUS,
++ MAKEUNIQUETAG(psDevInfo->psKernelSGXHostCtlMemInfo));
++#endif
++
++ if (psSGXTimingInfo->bEnableActivePM)
++ {
++ ui32ActivePowManSampleRate =
++ psSGXTimingInfo->ui32uKernelFreq * psSGXTimingInfo->ui32ActivePowManLatencyms / 1000;
++
++
++
++
++
++
++
++
++ ui32ActivePowManSampleRate += 1;
++ }
++ else
++ {
++ ui32ActivePowManSampleRate = 0;
++ }
++
++ psDevInfo->psSGXHostCtl->ui32ActivePowManSampleRate = ui32ActivePowManSampleRate;
++#if defined(PDUMP)
++ PDUMPMEM(IMG_NULL, psDevInfo->psKernelSGXHostCtlMemInfo,
++ offsetof(SGXMKIF_HOST_CTL, ui32ActivePowManSampleRate),
++ sizeof(IMG_UINT32), PDUMP_FLAGS_CONTINUOUS,
++ MAKEUNIQUETAG(psDevInfo->psKernelSGXHostCtlMemInfo));
++#endif
++
++ return PVRSRV_OK;
++}
++
++
++static IMG_VOID SGXStartTimer(PVRSRV_SGXDEV_INFO *psDevInfo)
++{
++ #if defined(SUPPORT_HW_RECOVERY)
++ PVRSRV_ERROR eError;
++
++ eError = OSEnableTimer(psDevInfo->hTimer);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SGXStartTimer : Failed to enable host timer"));
++ }
++ #else
++ PVR_UNREFERENCED_PARAMETER(psDevInfo);
++ #endif
++}
++
++
++static IMG_VOID SGXPollForClockGating (PVRSRV_SGXDEV_INFO *psDevInfo,
++ IMG_UINT32 ui32Register,
++ IMG_UINT32 ui32RegisterValue,
++ IMG_CHAR *pszComment)
++{
++ PVR_UNREFERENCED_PARAMETER(psDevInfo);
++ PVR_UNREFERENCED_PARAMETER(ui32Register);
++ PVR_UNREFERENCED_PARAMETER(ui32RegisterValue);
++ PVR_UNREFERENCED_PARAMETER(pszComment);
++
++ #if !defined(NO_HARDWARE)
++ PVR_ASSERT(psDevInfo != IMG_NULL);
++
++
++ if (PollForValueKM((IMG_UINT32 *)psDevInfo->pvRegsBaseKM + (ui32Register >> 2),
++ 0,
++ ui32RegisterValue,
++ MAX_HW_TIME_US/WAIT_TRY_COUNT,
++ WAIT_TRY_COUNT) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SGXPrePowerState: %s failed.", pszComment));
++ }
++ #endif
++
++ PDUMPCOMMENT(pszComment);
++ PDUMPREGPOL(ui32Register, 0, ui32RegisterValue);
++}
++
++
++PVRSRV_ERROR SGXPrePowerState (IMG_HANDLE hDevHandle,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState)
++{
++ if ((eNewPowerState != eCurrentPowerState) &&
++ (eNewPowerState != PVRSRV_DEV_POWER_STATE_ON))
++ {
++ PVRSRV_ERROR eError;
++ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle;
++ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
++ IMG_UINT32 ui32PowerCmd, ui32CompleteStatus;
++ SGXMKIF_COMMAND sCommand = {0};
++ IMG_UINT32 ui32Core;
++
++ #if defined(SUPPORT_HW_RECOVERY)
++
++ eError = OSDisableTimer(psDevInfo->hTimer);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SGXPrePowerState: Failed to disable timer"));
++ return eError;
++ }
++ #endif
++
++ if (eNewPowerState == PVRSRV_DEV_POWER_STATE_OFF)
++ {
++
++ ui32PowerCmd = PVRSRV_POWERCMD_POWEROFF;
++ ui32CompleteStatus = PVRSRV_USSE_EDM_POWMAN_POWEROFF_COMPLETE;
++ PDUMPCOMMENT("SGX power off request");
++ }
++ else
++ {
++
++ ui32PowerCmd = PVRSRV_POWERCMD_IDLE;
++ ui32CompleteStatus = PVRSRV_USSE_EDM_POWMAN_IDLE_COMPLETE;
++ PDUMPCOMMENT("SGX idle request");
++ }
++
++ sCommand.ui32Data[1] = ui32PowerCmd;
++
++ eError = SGXScheduleCCBCommand(psDevInfo, SGXMKIF_CMD_POWER, &sCommand, KERNEL_ID, 0);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SGXPrePowerState: Failed to submit power down command"));
++ return eError;
++ }
++
++
++ #if !defined(NO_HARDWARE)
++ if (PollForValueKM(&psDevInfo->psSGXHostCtl->ui32PowerStatus,
++ ui32CompleteStatus,
++ ui32CompleteStatus,
++ MAX_HW_TIME_US/WAIT_TRY_COUNT,
++ WAIT_TRY_COUNT) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SGXPrePowerState: Wait for SGX ukernel power transition failed."));
++ PVR_DBG_BREAK;
++ }
++ #endif
++
++ #if defined(PDUMP)
++ PDUMPCOMMENT("TA/3D CCB Control - Wait for power event on uKernel.");
++ PDUMPMEMPOL(psDevInfo->psKernelSGXHostCtlMemInfo,
++ offsetof(SGXMKIF_HOST_CTL, ui32PowerStatus),
++ ui32CompleteStatus,
++ ui32CompleteStatus,
++ PDUMP_POLL_OPERATOR_EQUAL,
++ 0,
++ MAKEUNIQUETAG(psDevInfo->psKernelSGXHostCtlMemInfo));
++ #endif
++
++ for (ui32Core = 0; ui32Core < SGX_FEATURE_MP_CORE_COUNT; ui32Core++)
++ {
++
++ SGXPollForClockGating(psDevInfo,
++ SGX_MP_CORE_SELECT(psDevInfo->ui32ClkGateStatusReg, ui32Core),
++ psDevInfo->ui32ClkGateStatusMask,
++ "Wait for SGX clock gating");
++ }
++
++ #if defined(SGX_FEATURE_MP)
++
++ SGXPollForClockGating(psDevInfo,
++ psDevInfo->ui32MasterClkGateStatusReg,
++ psDevInfo->ui32MasterClkGateStatusMask,
++ "Wait for SGX master clock gating");
++ #endif
++
++ if (eNewPowerState == PVRSRV_DEV_POWER_STATE_OFF)
++ {
++
++ eError = SGXDeinitialise(psDevInfo);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SGXPrePowerState: SGXDeinitialise failed: %lu", eError));
++ return eError;
++ }
++ }
++ }
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR SGXPostPowerState (IMG_HANDLE hDevHandle,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState)
++{
++ if ((eNewPowerState != eCurrentPowerState) &&
++ (eCurrentPowerState != PVRSRV_DEV_POWER_STATE_ON))
++ {
++ PVRSRV_ERROR eError;
++ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle;
++ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
++ SGXMKIF_HOST_CTL *psSGXHostCtl = psDevInfo->psSGXHostCtl;
++
++
++ psSGXHostCtl->ui32PowerStatus = 0;
++ #if defined(PDUMP)
++ PDUMPCOMMENT("TA/3D CCB Control - Reset power status");
++ PDUMPMEM(IMG_NULL, psDevInfo->psKernelSGXHostCtlMemInfo,
++ offsetof(SGXMKIF_HOST_CTL, ui32PowerStatus),
++ sizeof(IMG_UINT32), PDUMP_FLAGS_CONTINUOUS,
++ MAKEUNIQUETAG(psDevInfo->psKernelSGXHostCtlMemInfo));
++ #endif
++
++ if (eCurrentPowerState == PVRSRV_DEV_POWER_STATE_OFF)
++ {
++ eError = SGXUpdateTimingInfo(psDeviceNode);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SGXPostPowerState: SGXUpdateTimingInfo failed"));
++ return eError;
++ }
++
++ eError = SGXInitialise(psDevInfo);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SGXPostPowerState: SGXInitialise failed"));
++ return eError;
++ }
++ }
++ else
++ {
++
++
++ SGXMKIF_COMMAND sCommand = {0};
++
++ sCommand.ui32Data[1] = PVRSRV_POWERCMD_RESUME;
++ eError = SGXScheduleCCBCommand(psDevInfo, SGXMKIF_CMD_POWER, &sCommand, ISR_ID, 0);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SGXPostPowerState failed to schedule CCB command: %lu", eError));
++ return PVRSRV_ERROR_GENERIC;
++ }
++ }
++
++ SGXStartTimer(psDevInfo);
++ }
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR SGXPreClockSpeedChange (IMG_HANDLE hDevHandle,
++ IMG_BOOL bIdleDevice,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState)
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle;
++ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
++
++ PVR_UNREFERENCED_PARAMETER(psDevInfo);
++
++ if (eCurrentPowerState == PVRSRV_DEV_POWER_STATE_ON)
++ {
++ if (bIdleDevice)
++ {
++
++ PDUMPSUSPEND();
++
++ eError = SGXPrePowerState(hDevHandle, PVRSRV_DEV_POWER_STATE_IDLE,
++ PVRSRV_DEV_POWER_STATE_ON);
++
++ if (eError != PVRSRV_OK)
++ {
++ PDUMPRESUME();
++ return eError;
++ }
++ }
++ }
++
++ PVR_DPF((PVR_DBG_MESSAGE,"SGXPreClockSpeedChange: SGX clock speed was %luHz",
++ psDevInfo->ui32CoreClockSpeed));
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR SGXPostClockSpeedChange (IMG_HANDLE hDevHandle,
++ IMG_BOOL bIdleDevice,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle;
++ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
++ IMG_UINT32 ui32OldClockSpeed = psDevInfo->ui32CoreClockSpeed;
++
++ PVR_UNREFERENCED_PARAMETER(ui32OldClockSpeed);
++
++ if (eCurrentPowerState == PVRSRV_DEV_POWER_STATE_ON)
++ {
++ PVRSRV_ERROR eError;
++
++ eError = SGXUpdateTimingInfo(psDeviceNode);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SGXPostPowerState: SGXUpdateTimingInfo failed"));
++ return eError;
++ }
++
++ if (bIdleDevice)
++ {
++ eError = SGXPostPowerState(hDevHandle, PVRSRV_DEV_POWER_STATE_ON,
++ PVRSRV_DEV_POWER_STATE_IDLE);
++
++ PDUMPRESUME();
++
++ if (eError != PVRSRV_OK)
++ {
++ return eError;
++ }
++ }
++ else
++ {
++ SGXStartTimer(psDevInfo);
++ }
++
++ }
++
++ PVR_DPF((PVR_DBG_MESSAGE,"SGXPostClockSpeedChange: SGX clock speed changed from %luHz to %luHz",
++ ui32OldClockSpeed, psDevInfo->ui32CoreClockSpeed));
++
++ return PVRSRV_OK;
++}
++
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/sgxreset.c b/drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/sgxreset.c
+new file mode 100644
+index 0000000..5cf2519
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/sgxreset.c
+@@ -0,0 +1,489 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "sgxdefs.h"
++#include "sgxmmu.h"
++#include "services_headers.h"
++#include "sgxinfokm.h"
++#include "sgxconfig.h"
++
++#include "pdump_km.h"
++
++
++static IMG_VOID SGXResetSoftReset(PVRSRV_SGXDEV_INFO *psDevInfo,
++ IMG_BOOL bResetBIF,
++ IMG_UINT32 ui32PDUMPFlags,
++ IMG_BOOL bPDump)
++{
++ IMG_UINT32 ui32SoftResetRegVal;
++
++#if defined(SGX_FEATURE_MP)
++ ui32SoftResetRegVal =
++ EUR_CR_MASTER_SOFT_RESET_IPF_RESET_MASK |
++ EUR_CR_MASTER_SOFT_RESET_DPM_RESET_MASK |
++ EUR_CR_MASTER_SOFT_RESET_VDM_RESET_MASK;
++
++#if defined(SGX_FEATURE_SYSTEM_CACHE)
++ ui32SoftResetRegVal |= EUR_CR_MASTER_SOFT_RESET_SLC_RESET_MASK;
++#endif
++
++ if (bResetBIF)
++ {
++ ui32SoftResetRegVal |= EUR_CR_MASTER_SOFT_RESET_BIF_RESET_MASK;
++ }
++
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_SOFT_RESET, ui32SoftResetRegVal);
++ if (bPDump)
++ {
++ PDUMPREGWITHFLAGS(EUR_CR_MASTER_SOFT_RESET, ui32SoftResetRegVal, ui32PDUMPFlags);
++ }
++#endif
++
++ ui32SoftResetRegVal =
++
++ EUR_CR_SOFT_RESET_DPM_RESET_MASK |
++ EUR_CR_SOFT_RESET_TA_RESET_MASK |
++ EUR_CR_SOFT_RESET_USE_RESET_MASK |
++ EUR_CR_SOFT_RESET_ISP_RESET_MASK |
++ EUR_CR_SOFT_RESET_TSP_RESET_MASK;
++
++#ifdef EUR_CR_SOFT_RESET_TWOD_RESET_MASK
++ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_TWOD_RESET_MASK;
++#endif
++#if defined(EUR_CR_SOFT_RESET_TE_RESET_MASK)
++ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_TE_RESET_MASK;
++#endif
++#if defined(EUR_CR_SOFT_RESET_MTE_RESET_MASK)
++ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_MTE_RESET_MASK;
++#endif
++#if defined(EUR_CR_SOFT_RESET_ISP2_RESET_MASK)
++ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_ISP2_RESET_MASK;
++#endif
++#if defined(EUR_CR_SOFT_RESET_PDS_RESET_MASK)
++ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_PDS_RESET_MASK;
++#endif
++#if defined(EUR_CR_SOFT_RESET_PBE_RESET_MASK)
++ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_PBE_RESET_MASK;
++#endif
++#if defined(EUR_CR_SOFT_RESET_CACHEL2_RESET_MASK)
++ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_CACHEL2_RESET_MASK;
++#endif
++#if defined(EUR_CR_SOFT_RESET_TCU_L2_RESET_MASK)
++ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_TCU_L2_RESET_MASK;
++#endif
++#if defined(EUR_CR_SOFT_RESET_UCACHEL2_RESET_MASK)
++ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_UCACHEL2_RESET_MASK;
++#endif
++#if defined(EUR_CR_SOFT_RESET_MADD_RESET_MASK)
++ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_MADD_RESET_MASK;
++#endif
++#if defined(EUR_CR_SOFT_RESET_ITR_RESET_MASK)
++ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_ITR_RESET_MASK;
++#endif
++#if defined(EUR_CR_SOFT_RESET_TEX_RESET_MASK)
++ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_TEX_RESET_MASK;
++#endif
++#if defined(EUR_CR_SOFT_RESET_IDXFIFO_RESET_MASK)
++ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_IDXFIFO_RESET_MASK;
++#endif
++#if defined(EUR_CR_SOFT_RESET_VDM_RESET_MASK)
++ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_VDM_RESET_MASK;
++#endif
++#if defined(EUR_CR_SOFT_RESET_DCU_L2_RESET_MASK)
++ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_DCU_L2_RESET_MASK;
++#endif
++#if defined(EUR_CR_SOFT_RESET_DCU_L0L1_RESET_MASK)
++ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_DCU_L0L1_RESET_MASK;
++#endif
++
++#if !defined(PDUMP)
++ PVR_UNREFERENCED_PARAMETER(ui32PDUMPFlags);
++#endif
++
++ if (bResetBIF)
++ {
++ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_BIF_RESET_MASK;
++ }
++
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_SOFT_RESET, ui32SoftResetRegVal);
++ if (bPDump)
++ {
++ PDUMPREGWITHFLAGS(EUR_CR_SOFT_RESET, ui32SoftResetRegVal, ui32PDUMPFlags);
++ }
++}
++
++
++static IMG_VOID SGXResetSleep(PVRSRV_SGXDEV_INFO *psDevInfo,
++ IMG_UINT32 ui32PDUMPFlags,
++ IMG_BOOL bPDump)
++{
++#if !defined(PDUMP)
++ PVR_UNREFERENCED_PARAMETER(ui32PDUMPFlags);
++#endif
++
++
++ OSWaitus(1000 * 1000000 / psDevInfo->ui32CoreClockSpeed);
++ if (bPDump)
++ {
++ PDUMPIDLWITHFLAGS(30, ui32PDUMPFlags);
++#if defined(PDUMP)
++ PDumpRegRead(EUR_CR_SOFT_RESET, ui32PDUMPFlags);
++#endif
++ }
++
++
++
++}
++
++
++static IMG_VOID SGXResetInvalDC(PVRSRV_SGXDEV_INFO *psDevInfo,
++ IMG_UINT32 ui32PDUMPFlags,
++ IMG_BOOL bPDump)
++{
++ IMG_UINT32 ui32RegVal;
++
++
++#if defined(EUR_CR_BIF_CTRL_INVAL)
++ ui32RegVal = EUR_CR_BIF_CTRL_INVAL_ALL_MASK;
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL_INVAL, ui32RegVal);
++ if (bPDump)
++ {
++ PDUMPREGWITHFLAGS(EUR_CR_BIF_CTRL_INVAL, ui32RegVal, ui32PDUMPFlags);
++ }
++#else
++ ui32RegVal = EUR_CR_BIF_CTRL_INVALDC_MASK;
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal);
++ if (bPDump)
++ {
++ PDUMPREGWITHFLAGS(EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags);
++ }
++ SGXResetSleep(psDevInfo, ui32PDUMPFlags, bPDump);
++
++ ui32RegVal = 0;
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal);
++ if (bPDump)
++ {
++ PDUMPREGWITHFLAGS(EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags);
++ }
++#endif
++ SGXResetSleep(psDevInfo, ui32PDUMPFlags, bPDump);
++
++#if !defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
++ {
++
++
++
++ if (PollForValueKM((IMG_UINT32 *)((IMG_UINT8*)psDevInfo->pvRegsBaseKM + EUR_CR_BIF_MEM_REQ_STAT),
++ 0,
++ EUR_CR_BIF_MEM_REQ_STAT_READS_MASK,
++ MAX_HW_TIME_US/WAIT_TRY_COUNT,
++ WAIT_TRY_COUNT) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"Wait for DC invalidate failed."));
++ PVR_DBG_BREAK;
++ }
++
++ if (bPDump)
++ {
++ PDUMPREGPOLWITHFLAGS(EUR_CR_BIF_MEM_REQ_STAT, 0, EUR_CR_BIF_MEM_REQ_STAT_READS_MASK, ui32PDUMPFlags);
++ }
++ }
++#endif
++}
++
++
++IMG_VOID SGXReset(PVRSRV_SGXDEV_INFO *psDevInfo,
++ IMG_UINT32 ui32PDUMPFlags)
++{
++ IMG_UINT32 ui32RegVal;
++#if defined(EUR_CR_BIF_INT_STAT_FAULT_REQ_MASK)
++ const IMG_UINT32 ui32BifFaultMask = EUR_CR_BIF_INT_STAT_FAULT_REQ_MASK;
++#else
++ const IMG_UINT32 ui32BifFaultMask = EUR_CR_BIF_INT_STAT_FAULT_MASK;
++#endif
++
++#ifndef PDUMP
++ PVR_UNREFERENCED_PARAMETER(ui32PDUMPFlags);
++#endif
++
++ psDevInfo->ui32NumResets++;
++
++ PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "Start of SGX reset sequence\r\n");
++
++#if defined(FIX_HW_BRN_23944)
++
++ ui32RegVal = EUR_CR_BIF_CTRL_PAUSE_MASK;
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal);
++ PDUMPREGWITHFLAGS(EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags);
++
++ SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
++
++ ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_INT_STAT);
++ if (ui32RegVal & ui32BifFaultMask)
++ {
++
++ ui32RegVal = EUR_CR_BIF_CTRL_PAUSE_MASK | EUR_CR_BIF_CTRL_CLEAR_FAULT_MASK;
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal);
++ PDUMPREGWITHFLAGS(EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags);
++
++ SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
++
++ ui32RegVal = EUR_CR_BIF_CTRL_PAUSE_MASK;
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal);
++ PDUMPREGWITHFLAGS(EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags);
++
++ SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
++ }
++#endif
++
++
++ SGXResetSoftReset(psDevInfo, IMG_TRUE, ui32PDUMPFlags, IMG_TRUE);
++
++ SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
++
++
++
++#if defined(SGX_FEATURE_36BIT_MMU)
++
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_36BIT_ADDRESSING, EUR_CR_BIF_36BIT_ADDRESSING_ENABLE_MASK);
++ PDUMPREGWITHFLAGS(EUR_CR_BIF_36BIT_ADDRESSING, EUR_CR_BIF_36BIT_ADDRESSING_ENABLE_MASK, ui32PDUMPFlags);
++#endif
++
++ ui32RegVal = 0;
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal);
++ PDUMPREGWITHFLAGS(EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags);
++#if defined(SGX_FEATURE_MP)
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_BIF_CTRL, ui32RegVal);
++ PDUMPREGWITHFLAGS(EUR_CR_MASTER_BIF_CTRL, ui32RegVal, ui32PDUMPFlags);
++#endif
++#if defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_BANK_SET, ui32RegVal);
++ PDUMPREGWITHFLAGS(EUR_CR_BIF_BANK_SET, ui32RegVal, ui32PDUMPFlags);
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_BANK0, ui32RegVal);
++ PDUMPREGWITHFLAGS(EUR_CR_BIF_BANK0, ui32RegVal, ui32PDUMPFlags);
++#endif
++
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_DIR_LIST_BASE0, ui32RegVal);
++ PDUMPREGWITHFLAGS(EUR_CR_BIF_DIR_LIST_BASE0, ui32RegVal, ui32PDUMPFlags);
++
++#if defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
++ {
++ IMG_UINT32 ui32DirList, ui32DirListReg;
++
++ for (ui32DirList = 1;
++ ui32DirList < SGX_FEATURE_BIF_NUM_DIRLISTS;
++ ui32DirList++)
++ {
++ ui32DirListReg = EUR_CR_BIF_DIR_LIST_BASE1 + 4 * (ui32DirList - 1);
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, ui32DirListReg, ui32RegVal);
++ PDUMPREGWITHFLAGS(ui32DirListReg, ui32RegVal, ui32PDUMPFlags);
++ }
++ }
++#endif
++
++#if defined(EUR_CR_BIF_MEM_ARB_CONFIG)
++
++
++ ui32RegVal = (12UL << EUR_CR_BIF_MEM_ARB_CONFIG_PAGE_SIZE_SHIFT) |
++ (7UL << EUR_CR_BIF_MEM_ARB_CONFIG_BEST_CNT_SHIFT) |
++ (12UL << EUR_CR_BIF_MEM_ARB_CONFIG_TTE_THRESH_SHIFT);
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_MEM_ARB_CONFIG, ui32RegVal);
++ PDUMPREGWITHFLAGS(EUR_CR_BIF_MEM_ARB_CONFIG, ui32RegVal, ui32PDUMPFlags);
++#endif
++
++#if defined(SGX_FEATURE_SYSTEM_CACHE)
++#if defined(SGX_FEATURE_MP)
++ #if defined(SGX_BYPASS_SYSTEM_CACHE)
++ #error SGX_BYPASS_SYSTEM_CACHE not supported
++ #else
++ ui32RegVal = EUR_CR_MASTER_SLC_CTRL_USSE_INVAL_REQ0_MASK |
++ (0xC << EUR_CR_MASTER_SLC_CTRL_ARB_PAGE_SIZE_SHIFT);
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_SLC_CTRL, ui32RegVal);
++ PDUMPREG(EUR_CR_MASTER_SLC_CTRL, ui32RegVal);
++
++ ui32RegVal = EUR_CR_MASTER_SLC_CTRL_BYPASS_BYP_CC_MASK;
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_SLC_CTRL_BYPASS, ui32RegVal);
++ PDUMPREG(EUR_CR_MASTER_SLC_CTRL_BYPASS, ui32RegVal);
++ #endif
++#else
++ #if defined(SGX_BYPASS_SYSTEM_CACHE)
++
++ ui32RegVal = EUR_CR_MNE_CR_CTRL_BYPASS_ALL_MASK;
++ #else
++ #if defined(FIX_HW_BRN_26620)
++ ui32RegVal = 0;
++ #else
++
++ ui32RegVal = EUR_CR_MNE_CR_CTRL_BYP_CC_MASK;
++ #endif
++ #endif
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MNE_CR_CTRL, ui32RegVal);
++ PDUMPREG(EUR_CR_MNE_CR_CTRL, ui32RegVal);
++#endif
++#endif
++
++
++
++
++
++
++ ui32RegVal = psDevInfo->sBIFResetPDDevPAddr.uiAddr;
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_DIR_LIST_BASE0, ui32RegVal);
++
++ SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE);
++
++
++ SGXResetSoftReset(psDevInfo, IMG_FALSE, ui32PDUMPFlags, IMG_TRUE);
++ SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE);
++
++ SGXResetInvalDC(psDevInfo, ui32PDUMPFlags, IMG_FALSE);
++
++
++
++ for (;;)
++ {
++ IMG_UINT32 ui32BifIntStat = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_INT_STAT);
++ IMG_DEV_VIRTADDR sBifFault;
++ IMG_UINT32 ui32PDIndex, ui32PTIndex;
++
++ if ((ui32BifIntStat & ui32BifFaultMask) == 0)
++ {
++ break;
++ }
++
++
++
++
++ sBifFault.uiAddr = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_FAULT);
++ PVR_DPF((PVR_DBG_WARNING, "SGXReset: Page fault 0x%x/0x%x", ui32BifIntStat, sBifFault.uiAddr));
++ ui32PDIndex = sBifFault.uiAddr >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT);
++ ui32PTIndex = (sBifFault.uiAddr & SGX_MMU_PT_MASK) >> SGX_MMU_PAGE_SHIFT;
++
++
++ SGXResetSoftReset(psDevInfo, IMG_TRUE, ui32PDUMPFlags, IMG_FALSE);
++
++
++ psDevInfo->pui32BIFResetPD[ui32PDIndex] = (psDevInfo->sBIFResetPTDevPAddr.uiAddr
++ >>SGX_MMU_PDE_ADDR_ALIGNSHIFT)
++ | SGX_MMU_PDE_PAGE_SIZE_4K
++ | SGX_MMU_PDE_VALID;
++ psDevInfo->pui32BIFResetPT[ui32PTIndex] = (psDevInfo->sBIFResetPageDevPAddr.uiAddr
++ >>SGX_MMU_PTE_ADDR_ALIGNSHIFT)
++ | SGX_MMU_PTE_VALID;
++
++
++ ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_STATUS);
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_HOST_CLEAR, ui32RegVal);
++ ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_STATUS2);
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_HOST_CLEAR2, ui32RegVal);
++
++ SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE);
++
++
++ SGXResetSoftReset(psDevInfo, IMG_FALSE, ui32PDUMPFlags, IMG_FALSE);
++ SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE);
++
++
++ SGXResetInvalDC(psDevInfo, ui32PDUMPFlags, IMG_FALSE);
++
++
++ psDevInfo->pui32BIFResetPD[ui32PDIndex] = 0;
++ psDevInfo->pui32BIFResetPT[ui32PTIndex] = 0;
++ }
++
++
++
++
++ #if defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
++
++ ui32RegVal = (SGX_BIF_DIR_LIST_INDEX_EDM << EUR_CR_BIF_BANK0_INDEX_EDM_SHIFT);
++
++ #if defined(SGX_FEATURE_2D_HARDWARE)
++
++ ui32RegVal |= (SGX_BIF_DIR_LIST_INDEX_EDM << EUR_CR_BIF_BANK0_INDEX_2D_SHIFT);
++ #endif
++
++ #if defined(FIX_HW_BRN_23410)
++
++ ui32RegVal |= (SGX_BIF_DIR_LIST_INDEX_EDM << EUR_CR_BIF_BANK0_INDEX_TA_SHIFT);
++ #endif
++
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_BANK0, ui32RegVal);
++ PDUMPREGWITHFLAGS(EUR_CR_BIF_BANK0, ui32RegVal, ui32PDUMPFlags);
++ #endif
++
++ {
++ IMG_UINT32 ui32EDMDirListReg;
++
++
++ #if (SGX_BIF_DIR_LIST_INDEX_EDM == 0)
++ ui32EDMDirListReg = EUR_CR_BIF_DIR_LIST_BASE0;
++ #else
++
++ ui32EDMDirListReg = EUR_CR_BIF_DIR_LIST_BASE1 + 4 * (SGX_BIF_DIR_LIST_INDEX_EDM - 1);
++ #endif
++
++#if defined(FIX_HW_BRN_28011)
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_DIR_LIST_BASE0, psDevInfo->sKernelPDDevPAddr.uiAddr>>SGX_MMU_PDE_ADDR_ALIGNSHIFT);
++ PDUMPPDREGWITHFLAGS(EUR_CR_BIF_DIR_LIST_BASE0, psDevInfo->sKernelPDDevPAddr.uiAddr>>SGX_MMU_PDE_ADDR_ALIGNSHIFT, ui32PDUMPFlags, PDUMP_PD_UNIQUETAG);
++#endif
++
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, ui32EDMDirListReg, psDevInfo->sKernelPDDevPAddr.uiAddr>>SGX_MMU_PDE_ADDR_ALIGNSHIFT);
++ PDUMPPDREGWITHFLAGS(ui32EDMDirListReg, psDevInfo->sKernelPDDevPAddr.uiAddr>>SGX_MMU_PDE_ADDR_ALIGNSHIFT, ui32PDUMPFlags, PDUMP_PD_UNIQUETAG);
++ }
++
++#ifdef SGX_FEATURE_2D_HARDWARE
++
++ #if ((SGX_2D_HEAP_BASE & ~EUR_CR_BIF_TWOD_REQ_BASE_ADDR_MASK) != 0)
++ #error "SGXReset: SGX_2D_HEAP_BASE doesn't match EUR_CR_BIF_TWOD_REQ_BASE_ADDR_MASK alignment"
++ #endif
++
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_TWOD_REQ_BASE, SGX_2D_HEAP_BASE);
++ PDUMPREGWITHFLAGS(EUR_CR_BIF_TWOD_REQ_BASE, SGX_2D_HEAP_BASE, ui32PDUMPFlags);
++#endif
++
++
++ SGXResetInvalDC(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
++
++ PVR_DPF((PVR_DBG_MESSAGE,"Soft Reset of SGX"));
++ SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
++
++
++ ui32RegVal = 0;
++#if defined(SGX_FEATURE_MP)
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_SOFT_RESET, ui32RegVal);
++ PDUMPREGWITHFLAGS(EUR_CR_MASTER_SOFT_RESET, ui32RegVal, ui32PDUMPFlags);
++#endif
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_SOFT_RESET, ui32RegVal);
++ PDUMPREGWITHFLAGS(EUR_CR_SOFT_RESET, ui32RegVal, ui32PDUMPFlags);
++
++
++ SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
++
++ PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "End of SGX reset sequence\r\n");
++}
++
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/sgxtransfer.c b/drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/sgxtransfer.c
+new file mode 100644
+index 0000000..f851b75
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/sgxtransfer.c
+@@ -0,0 +1,543 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if defined(TRANSFER_QUEUE)
++
++#include <stddef.h>
++
++#include "sgxdefs.h"
++#include "services_headers.h"
++#include "buffer_manager.h"
++#include "sgxinfo.h"
++#include "sysconfig.h"
++#include "regpaths.h"
++#include "pdump_km.h"
++#include "mmu.h"
++#include "pvr_bridge.h"
++#include "sgx_bridge_km.h"
++#include "sgxinfokm.h"
++#include "osfunc.h"
++#include "pvr_debug.h"
++#include "sgxutils.h"
++
++IMG_EXPORT PVRSRV_ERROR SGXSubmitTransferKM(IMG_HANDLE hDevHandle, PVRSRV_TRANSFER_SGX_KICK *psKick)
++{
++ PVRSRV_KERNEL_MEM_INFO *psCCBMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psKick->hCCBMemInfo;
++ SGXMKIF_COMMAND sCommand = {0};
++ SGXMKIF_TRANSFERCMD_SHARED *psSharedTransferCmd;
++ PVRSRV_KERNEL_SYNC_INFO *psSyncInfo;
++ PVRSRV_ERROR eError;
++
++
++ if (!CCB_OFFSET_IS_VALID(SGXMKIF_TRANSFERCMD_SHARED, psCCBMemInfo, psKick, ui32SharedCmdCCBOffset))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "SGXSubmitTransferKM: Invalid CCB offset"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++
++ psSharedTransferCmd = CCB_DATA_FROM_OFFSET(SGXMKIF_TRANSFERCMD_SHARED, psCCBMemInfo, psKick, ui32SharedCmdCCBOffset);
++
++ if (psKick->hTASyncInfo != IMG_NULL)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->hTASyncInfo;
++
++ psSharedTransferCmd->ui32TASyncWriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending++;
++ psSharedTransferCmd->ui32TASyncReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending;
++
++ psSharedTransferCmd->sTASyncWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
++ psSharedTransferCmd->sTASyncReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
++ }
++ else
++ {
++ psSharedTransferCmd->sTASyncWriteOpsCompleteDevVAddr.uiAddr = 0;
++ psSharedTransferCmd->sTASyncReadOpsCompleteDevVAddr.uiAddr = 0;
++ }
++
++ if (psKick->h3DSyncInfo != IMG_NULL)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->h3DSyncInfo;
++
++ psSharedTransferCmd->ui323DSyncWriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending++;
++ psSharedTransferCmd->ui323DSyncReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending;
++
++ psSharedTransferCmd->s3DSyncWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
++ psSharedTransferCmd->s3DSyncReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
++ }
++ else
++ {
++ psSharedTransferCmd->s3DSyncWriteOpsCompleteDevVAddr.uiAddr = 0;
++ psSharedTransferCmd->s3DSyncReadOpsCompleteDevVAddr.uiAddr = 0;
++ }
++
++ if ((psKick->ui32Flags & SGXMKIF_TQFLAGS_KEEPPENDING) == 0UL)
++ {
++ if (psKick->ui32NumSrcSync > 0)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahSrcSyncInfo[0];
++
++ psSharedTransferCmd->ui32SrcWriteOpPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending;
++ psSharedTransferCmd->ui32SrcReadOpPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending;
++
++ psSharedTransferCmd->sSrcWriteOpsCompleteDevAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
++ psSharedTransferCmd->sSrcReadOpsCompleteDevAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
++
++ }
++
++ if (psKick->ui32NumDstSync > 0)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahDstSyncInfo[0];
++
++ psSharedTransferCmd->ui32DstWriteOpPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending;
++ psSharedTransferCmd->ui32DstReadOpPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending;
++
++ psSharedTransferCmd->sDstWriteOpsCompleteDevAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
++ psSharedTransferCmd->sDstReadOpsCompleteDevAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
++
++ }
++
++
++ if (psKick->ui32NumSrcSync > 0)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahSrcSyncInfo[0];
++ psSyncInfo->psSyncData->ui32ReadOpsPending++;
++ }
++ if (psKick->ui32NumDstSync > 0)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahDstSyncInfo[0];
++ psSyncInfo->psSyncData->ui32WriteOpsPending++;
++ }
++ }
++
++
++ if (psKick->ui32NumDstSync > 1 || psKick->ui32NumSrcSync > 1)
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "Transfer command doesn't support more than 1 sync object per src/dst\ndst: %d, src: %d",
++ psKick->ui32NumDstSync, psKick->ui32NumSrcSync));
++ }
++
++#if defined(PDUMP)
++ if (PDumpIsCaptureFrameKM()
++ || ((psKick->ui32PDumpFlags & PDUMP_FLAGS_CONTINUOUS) != 0))
++ {
++ PDUMPCOMMENT("Shared part of transfer command\r\n");
++ PDUMPMEM(psSharedTransferCmd,
++ psCCBMemInfo,
++ psKick->ui32CCBDumpWOff,
++ sizeof(SGXMKIF_TRANSFERCMD_SHARED),
++ psKick->ui32PDumpFlags,
++ MAKEUNIQUETAG(psCCBMemInfo));
++
++ if((psKick->ui32NumSrcSync > 0) && ((psKick->ui32Flags & SGXMKIF_TQFLAGS_KEEPPENDING) == 0UL))
++ {
++ psSyncInfo = psKick->ahSrcSyncInfo[0];
++
++ PDUMPCOMMENT("Hack src surface write op in transfer cmd\r\n");
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
++ psCCBMemInfo,
++ psKick->ui32CCBDumpWOff + offsetof(SGXMKIF_TRANSFERCMD_SHARED, ui32SrcWriteOpPendingVal),
++ sizeof(psSyncInfo->psSyncData->ui32LastOpDumpVal),
++ psKick->ui32PDumpFlags,
++ MAKEUNIQUETAG(psCCBMemInfo));
++
++ PDUMPCOMMENT("Hack src surface read op in transfer cmd\r\n");
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal,
++ psCCBMemInfo,
++ psKick->ui32CCBDumpWOff + offsetof(SGXMKIF_TRANSFERCMD_SHARED, ui32SrcReadOpPendingVal),
++ sizeof(psSyncInfo->psSyncData->ui32LastReadOpDumpVal),
++ psKick->ui32PDumpFlags,
++ MAKEUNIQUETAG(psCCBMemInfo));
++
++ }
++ if((psKick->ui32NumDstSync > 0) && ((psKick->ui32Flags & SGXMKIF_TQFLAGS_KEEPPENDING) == 0UL))
++ {
++ psSyncInfo = psKick->ahDstSyncInfo[0];
++
++ PDUMPCOMMENT("Hack dest surface write op in transfer cmd\r\n");
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
++ psCCBMemInfo,
++ psKick->ui32CCBDumpWOff + offsetof(SGXMKIF_TRANSFERCMD_SHARED, ui32DstWriteOpPendingVal),
++ sizeof(psSyncInfo->psSyncData->ui32LastOpDumpVal),
++ psKick->ui32PDumpFlags,
++ MAKEUNIQUETAG(psCCBMemInfo));
++
++ PDUMPCOMMENT("Hack dest surface read op in transfer cmd\r\n");
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal,
++ psCCBMemInfo,
++ psKick->ui32CCBDumpWOff + offsetof(SGXMKIF_TRANSFERCMD_SHARED, ui32DstReadOpPendingVal),
++ sizeof(psSyncInfo->psSyncData->ui32LastReadOpDumpVal),
++ psKick->ui32PDumpFlags,
++ MAKEUNIQUETAG(psCCBMemInfo));
++
++ }
++
++
++ if((psKick->ui32NumSrcSync > 0) && ((psKick->ui32Flags & SGXMKIF_TQFLAGS_KEEPPENDING)== 0UL))
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahSrcSyncInfo[0];
++ psSyncInfo->psSyncData->ui32LastReadOpDumpVal++;
++ }
++
++ if((psKick->ui32NumDstSync > 0) && ((psKick->ui32Flags & SGXMKIF_TQFLAGS_KEEPPENDING) == 0UL))
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahDstSyncInfo[0];
++ psSyncInfo->psSyncData->ui32LastOpDumpVal++;
++ }
++ }
++#endif
++
++ sCommand.ui32Data[1] = psKick->sHWTransferContextDevVAddr.uiAddr;
++
++ eError = SGXScheduleCCBCommandKM(hDevHandle, SGXMKIF_CMD_TRANSFER, &sCommand, KERNEL_ID, psKick->ui32PDumpFlags);
++
++ if (eError == PVRSRV_ERROR_RETRY)
++ {
++
++ if ((psKick->ui32Flags & SGXMKIF_TQFLAGS_KEEPPENDING) == 0UL)
++ {
++ if (psKick->ui32NumSrcSync > 0)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahSrcSyncInfo[0];
++ psSyncInfo->psSyncData->ui32ReadOpsPending--;
++ }
++ if (psKick->ui32NumDstSync > 0)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahDstSyncInfo[0];
++ psSyncInfo->psSyncData->ui32WriteOpsPending--;
++ }
++#if defined(PDUMP)
++ if (PDumpIsCaptureFrameKM()
++ || ((psKick->ui32PDumpFlags & PDUMP_FLAGS_CONTINUOUS) != 0))
++ {
++ if (psKick->ui32NumSrcSync > 0)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahSrcSyncInfo[0];
++ psSyncInfo->psSyncData->ui32LastReadOpDumpVal--;
++ }
++ if (psKick->ui32NumDstSync > 0)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahDstSyncInfo[0];
++ psSyncInfo->psSyncData->ui32LastOpDumpVal--;
++ }
++ }
++#endif
++ }
++
++
++ if (psKick->hTASyncInfo != IMG_NULL)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->hTASyncInfo;
++ psSyncInfo->psSyncData->ui32WriteOpsPending--;
++ }
++
++
++ if (psKick->h3DSyncInfo != IMG_NULL)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->h3DSyncInfo;
++ psSyncInfo->psSyncData->ui32WriteOpsPending--;
++ }
++ }
++ else if (PVRSRV_OK != eError)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "SGXSubmitTransferKM: SGXScheduleCCBCommandKM failed."));
++ return eError;
++ }
++
++
++#if defined(NO_HARDWARE)
++ if ((psKick->ui32Flags & SGXMKIF_TQFLAGS_NOSYNCUPDATE) == 0)
++ {
++ IMG_UINT32 i;
++
++
++ for(i = 0; i < psKick->ui32NumSrcSync; i++)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahSrcSyncInfo[i];
++ psSyncInfo->psSyncData->ui32ReadOpsComplete = psSyncInfo->psSyncData->ui32ReadOpsPending;
++ }
++
++ for(i = 0; i < psKick->ui32NumDstSync; i++)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahDstSyncInfo[i];
++ psSyncInfo->psSyncData->ui32WriteOpsComplete = psSyncInfo->psSyncData->ui32WriteOpsPending;
++
++ }
++
++ if (psKick->hTASyncInfo != IMG_NULL)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->hTASyncInfo;
++
++ psSyncInfo->psSyncData->ui32WriteOpsComplete = psSyncInfo->psSyncData->ui32WriteOpsPending;
++ }
++
++ if (psKick->h3DSyncInfo != IMG_NULL)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->h3DSyncInfo;
++
++ psSyncInfo->psSyncData->ui32WriteOpsComplete = psSyncInfo->psSyncData->ui32WriteOpsPending;
++ }
++ }
++#endif
++
++ return eError;
++}
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++IMG_EXPORT PVRSRV_ERROR SGXSubmit2DKM(IMG_HANDLE hDevHandle, PVRSRV_2D_SGX_KICK *psKick)
++
++{
++ PVRSRV_KERNEL_MEM_INFO *psCCBMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psKick->hCCBMemInfo;
++ SGXMKIF_COMMAND sCommand = {0};
++ SGXMKIF_2DCMD_SHARED *ps2DCmd;
++ PVRSRV_KERNEL_SYNC_INFO *psSyncInfo;
++ PVRSRV_ERROR eError;
++ IMG_UINT32 i;
++
++ if (!CCB_OFFSET_IS_VALID(SGXMKIF_2DCMD_SHARED, psCCBMemInfo, psKick, ui32SharedCmdCCBOffset))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "SGXSubmit2DKM: Invalid CCB offset"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++
++ ps2DCmd = CCB_DATA_FROM_OFFSET(SGXMKIF_2DCMD_SHARED, psCCBMemInfo, psKick, ui32SharedCmdCCBOffset);
++
++ OSMemSet(ps2DCmd, 0, sizeof(*ps2DCmd));
++
++
++ if (psKick->hTASyncInfo != IMG_NULL)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->hTASyncInfo;
++
++ ps2DCmd->sTASyncData.ui32WriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending++;
++ ps2DCmd->sTASyncData.ui32ReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending;
++
++ ps2DCmd->sTASyncData.sWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
++ ps2DCmd->sTASyncData.sReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
++ }
++
++
++ if (psKick->h3DSyncInfo != IMG_NULL)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->h3DSyncInfo;
++
++ ps2DCmd->s3DSyncData.ui32WriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending++;
++ ps2DCmd->s3DSyncData.ui32ReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending;
++
++ ps2DCmd->s3DSyncData.sWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
++ ps2DCmd->s3DSyncData.sReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
++ }
++
++
++ ps2DCmd->ui32NumSrcSync = psKick->ui32NumSrcSync;
++ for (i = 0; i < psKick->ui32NumSrcSync; i++)
++ {
++ psSyncInfo = psKick->ahSrcSyncInfo[i];
++
++ ps2DCmd->sSrcSyncData[i].ui32WriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending;
++ ps2DCmd->sSrcSyncData[i].ui32ReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending;
++
++ ps2DCmd->sSrcSyncData[i].sWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
++ ps2DCmd->sSrcSyncData[i].sReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
++ }
++
++ if (psKick->hDstSyncInfo != IMG_NULL)
++ {
++ psSyncInfo = psKick->hDstSyncInfo;
++
++ ps2DCmd->sDstSyncData.ui32WriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending;
++ ps2DCmd->sDstSyncData.ui32ReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending;
++
++ ps2DCmd->sDstSyncData.sWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
++ ps2DCmd->sDstSyncData.sReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
++ }
++
++
++ for (i = 0; i < psKick->ui32NumSrcSync; i++)
++ {
++ psSyncInfo = psKick->ahSrcSyncInfo[i];
++ psSyncInfo->psSyncData->ui32ReadOpsPending++;
++ }
++
++ if (psKick->hDstSyncInfo != IMG_NULL)
++ {
++ psSyncInfo = psKick->hDstSyncInfo;
++ psSyncInfo->psSyncData->ui32WriteOpsPending++;
++ }
++
++#if defined(PDUMP)
++ if (PDumpIsCaptureFrameKM()
++ || ((psKick->ui32PDumpFlags & PDUMP_FLAGS_CONTINUOUS) != 0))
++ {
++
++ PDUMPCOMMENT("Shared part of 2D command\r\n");
++ PDUMPMEM(ps2DCmd,
++ psCCBMemInfo,
++ psKick->ui32CCBDumpWOff,
++ sizeof(SGXMKIF_2DCMD_SHARED),
++ psKick->ui32PDumpFlags,
++ MAKEUNIQUETAG(psCCBMemInfo));
++
++ for (i = 0; i < psKick->ui32NumSrcSync; i++)
++ {
++ psSyncInfo = psKick->ahSrcSyncInfo[i];
++
++ PDUMPCOMMENT("Hack src surface write op in 2D cmd\r\n");
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
++ psCCBMemInfo,
++ psKick->ui32CCBDumpWOff + offsetof(SGXMKIF_2DCMD_SHARED, sSrcSyncData[i].ui32WriteOpsPendingVal),
++ sizeof(psSyncInfo->psSyncData->ui32LastOpDumpVal),
++ psKick->ui32PDumpFlags,
++ MAKEUNIQUETAG(psCCBMemInfo));
++
++ PDUMPCOMMENT("Hack src surface read op in 2D cmd\r\n");
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal,
++ psCCBMemInfo,
++ psKick->ui32CCBDumpWOff + offsetof(SGXMKIF_2DCMD_SHARED, sSrcSyncData[i].ui32ReadOpsPendingVal),
++ sizeof(psSyncInfo->psSyncData->ui32LastReadOpDumpVal),
++ psKick->ui32PDumpFlags,
++ MAKEUNIQUETAG(psCCBMemInfo));
++ }
++
++ if (psKick->hDstSyncInfo != IMG_NULL)
++ {
++ psSyncInfo = psKick->hDstSyncInfo;
++
++ PDUMPCOMMENT("Hack dest surface write op in 2D cmd\r\n");
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
++ psCCBMemInfo,
++ psKick->ui32CCBDumpWOff + offsetof(SGXMKIF_2DCMD_SHARED, sDstSyncData.ui32WriteOpsPendingVal),
++ sizeof(psSyncInfo->psSyncData->ui32LastOpDumpVal),
++ psKick->ui32PDumpFlags,
++ MAKEUNIQUETAG(psCCBMemInfo));
++
++ PDUMPCOMMENT("Hack dest surface read op in 2D cmd\r\n");
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal,
++ psCCBMemInfo,
++ psKick->ui32CCBDumpWOff + offsetof(SGXMKIF_2DCMD_SHARED, sDstSyncData.ui32ReadOpsPendingVal),
++ sizeof(psSyncInfo->psSyncData->ui32LastReadOpDumpVal),
++ psKick->ui32PDumpFlags,
++ MAKEUNIQUETAG(psCCBMemInfo));
++ }
++
++
++ for (i = 0; i < psKick->ui32NumSrcSync; i++)
++ {
++ psSyncInfo = psKick->ahSrcSyncInfo[i];
++ psSyncInfo->psSyncData->ui32LastReadOpDumpVal++;
++ }
++
++ if (psKick->hDstSyncInfo != IMG_NULL)
++ {
++ psSyncInfo = psKick->hDstSyncInfo;
++ psSyncInfo->psSyncData->ui32LastOpDumpVal++;
++ }
++ }
++#endif
++
++ sCommand.ui32Data[1] = psKick->sHW2DContextDevVAddr.uiAddr;
++
++ eError = SGXScheduleCCBCommandKM(hDevHandle, SGXMKIF_CMD_2D, &sCommand, KERNEL_ID, psKick->ui32PDumpFlags);
++
++ if (eError == PVRSRV_ERROR_RETRY)
++ {
++#if defined(PDUMP)
++ if (PDumpIsCaptureFrameKM())
++ {
++ for (i = 0; i < psKick->ui32NumSrcSync; i++)
++ {
++ psSyncInfo = psKick->ahSrcSyncInfo[i];
++ psSyncInfo->psSyncData->ui32LastReadOpDumpVal--;
++ }
++
++ if (psKick->hDstSyncInfo != IMG_NULL)
++ {
++ psSyncInfo = psKick->hDstSyncInfo;
++ psSyncInfo->psSyncData->ui32LastOpDumpVal--;
++ }
++ }
++#endif
++
++ for (i = 0; i < psKick->ui32NumSrcSync; i++)
++ {
++ psSyncInfo = psKick->ahSrcSyncInfo[i];
++ psSyncInfo->psSyncData->ui32ReadOpsPending--;
++ }
++
++ if (psKick->hDstSyncInfo != IMG_NULL)
++ {
++ psSyncInfo = psKick->hDstSyncInfo;
++ psSyncInfo->psSyncData->ui32WriteOpsPending--;
++ }
++
++
++ if (psKick->hTASyncInfo != IMG_NULL)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->hTASyncInfo;
++
++ psSyncInfo->psSyncData->ui32WriteOpsPending--;
++ }
++
++
++ if (psKick->h3DSyncInfo != IMG_NULL)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->h3DSyncInfo;
++
++ psSyncInfo->psSyncData->ui32WriteOpsPending--;
++ }
++ }
++
++#if defined(NO_HARDWARE)
++
++ for(i = 0; i < psKick->ui32NumSrcSync; i++)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahSrcSyncInfo[i];
++ psSyncInfo->psSyncData->ui32ReadOpsComplete = psSyncInfo->psSyncData->ui32ReadOpsPending;
++ }
++
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->hDstSyncInfo;
++ psSyncInfo->psSyncData->ui32WriteOpsComplete = psSyncInfo->psSyncData->ui32WriteOpsPending;
++
++ if (psKick->hTASyncInfo != IMG_NULL)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->hTASyncInfo;
++
++ psSyncInfo->psSyncData->ui32WriteOpsComplete = psSyncInfo->psSyncData->ui32WriteOpsPending;
++ }
++
++ if (psKick->h3DSyncInfo != IMG_NULL)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->h3DSyncInfo;
++
++ psSyncInfo->psSyncData->ui32WriteOpsComplete = psSyncInfo->psSyncData->ui32WriteOpsPending;
++ }
++#endif
++
++ return eError;
++}
++#endif
++#endif
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/sgxutils.c b/drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/sgxutils.c
+new file mode 100644
+index 0000000..2c31d22
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/sgxutils.c
+@@ -0,0 +1,928 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include <stddef.h>
++
++#include "sgxdefs.h"
++#include "services_headers.h"
++#include "buffer_manager.h"
++#include "sgxapi_km.h"
++#include "sgxinfo.h"
++#include "sgx_mkif_km.h"
++#include "sysconfig.h"
++#include "pdump_km.h"
++#include "mmu.h"
++#include "pvr_bridge_km.h"
++#include "osfunc.h"
++#include "pvr_debug.h"
++#include "sgxutils.h"
++
++#ifdef __linux__
++#include <linux/tty.h>
++#else
++#include <stdio.h>
++#endif
++#include "ospm_power.h"
++
++#if defined(SYS_CUSTOM_POWERDOWN)
++PVRSRV_ERROR SysPowerDownMISR(PVRSRV_DEVICE_NODE * psDeviceNode, IMG_UINT32 ui32CallerID);
++#endif
++
++
++
++IMG_VOID SGXPostActivePowerEvent(PVRSRV_DEVICE_NODE * psDeviceNode,
++ IMG_UINT32 ui32CallerID)
++{
++ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
++ SGXMKIF_HOST_CTL *psSGXHostCtl = psDevInfo->psSGXHostCtl;
++
++
++ psSGXHostCtl->ui32NumActivePowerEvents++;
++
++ if ((psSGXHostCtl->ui32PowerStatus & PVRSRV_USSE_EDM_POWMAN_POWEROFF_RESTART_IMMEDIATE) != 0)
++ {
++
++
++
++ if (ui32CallerID == ISR_ID)
++ {
++ psDeviceNode->bReProcessDeviceCommandComplete = IMG_TRUE;
++ }
++ else
++ {
++ SGXScheduleProcessQueuesKM(psDeviceNode);
++ }
++ }
++}
++
++
++IMG_VOID SGXTestActivePowerEvent (PVRSRV_DEVICE_NODE *psDeviceNode,
++ IMG_UINT32 ui32CallerID)
++{
++ PVRSRV_ERROR eError = PVRSRV_OK;
++ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
++ SGXMKIF_HOST_CTL *psSGXHostCtl = psDevInfo->psSGXHostCtl;
++
++ if (((psSGXHostCtl->ui32InterruptFlags & PVRSRV_USSE_EDM_INTERRUPT_ACTIVE_POWER) != 0) &&
++ ((psSGXHostCtl->ui32InterruptClearFlags & PVRSRV_USSE_EDM_INTERRUPT_ACTIVE_POWER) == 0))
++ {
++
++ psSGXHostCtl->ui32InterruptClearFlags |= PVRSRV_USSE_EDM_INTERRUPT_ACTIVE_POWER;
++
++
++ PDUMPSUSPEND();
++
++#if defined(SYS_CUSTOM_POWERDOWN)
++
++
++
++ eError = SysPowerDownMISR(psDeviceNode, ui32CallerID);
++#else
++ eError = PVRSRVSetDevicePowerStateKM(psDeviceNode->sDevId.ui32DeviceIndex,
++ PVRSRV_DEV_POWER_STATE_OFF,
++ ui32CallerID, IMG_FALSE);
++ if (eError == PVRSRV_OK)
++ {
++ SGXPostActivePowerEvent(psDeviceNode, ui32CallerID);
++ }
++#endif
++ if (eError == PVRSRV_ERROR_RETRY)
++ {
++
++
++ psSGXHostCtl->ui32InterruptClearFlags &= ~PVRSRV_USSE_EDM_INTERRUPT_ACTIVE_POWER;
++ eError = PVRSRV_OK;
++ }
++
++
++ PDUMPRESUME();
++ }
++
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "SGXTestActivePowerEvent error:%lu", eError));
++ }
++}
++
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(SGXAcquireKernelCCBSlot)
++#endif
++static INLINE SGXMKIF_COMMAND * SGXAcquireKernelCCBSlot(PVRSRV_SGX_CCB_INFO *psCCB)
++{
++ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
++ {
++ if(((*psCCB->pui32WriteOffset + 1) & 255) != *psCCB->pui32ReadOffset)
++ {
++ return &psCCB->psCommands[*psCCB->pui32WriteOffset];
++ }
++
++ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
++ } END_LOOP_UNTIL_TIMEOUT();
++
++
++ return IMG_NULL;
++}
++
++PVRSRV_ERROR SGXScheduleCCBCommand(PVRSRV_SGXDEV_INFO *psDevInfo,
++ SGXMKIF_CMD_TYPE eCmdType,
++ SGXMKIF_COMMAND *psCommandData,
++ IMG_UINT32 ui32CallerID,
++ IMG_UINT32 ui32PDumpFlags)
++{
++ PVRSRV_SGX_CCB_INFO *psKernelCCB;
++ PVRSRV_ERROR eError = PVRSRV_OK;
++ SGXMKIF_COMMAND *psSGXCommand;
++#if defined(PDUMP)
++ IMG_VOID *pvDumpCommand;
++ IMG_BOOL bPDumpIsSuspended = PDumpIsSuspended();
++#else
++ PVR_UNREFERENCED_PARAMETER(ui32CallerID);
++ PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
++#endif
++
++ psKernelCCB = psDevInfo->psKernelCCBInfo;
++
++ psSGXCommand = SGXAcquireKernelCCBSlot(psKernelCCB);
++
++
++ if(!psSGXCommand)
++ {
++ eError = PVRSRV_ERROR_TIMEOUT;
++ goto Exit;
++ }
++
++
++ psCommandData->ui32CacheControl = psDevInfo->ui32CacheControl;
++
++#if defined(PDUMP)
++
++ psDevInfo->sPDContext.ui32CacheControl |= psDevInfo->ui32CacheControl;
++#endif
++
++
++ psDevInfo->ui32CacheControl = 0;
++
++
++ *psSGXCommand = *psCommandData;
++
++ if (eCmdType >= SGXMKIF_CMD_MAX)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SGXScheduleCCBCommandKM: Unknown command type: %d", eCmdType)) ;
++ eError = PVRSRV_ERROR_GENERIC;
++ goto Exit;
++ }
++
++#if defined(SUPPORT_CPU_CACHED_BUFFERS)
++ {
++ SYS_DATA *psSysData;
++
++ SysAcquireData(&psSysData);
++
++ if (psSysData->bFlushAll)
++ {
++ OSFlushCPUCacheKM();
++
++ psSysData->bFlushAll = IMG_FALSE;
++ }
++ }
++#endif
++
++ psSGXCommand->ui32ServiceAddress = psDevInfo->aui32HostKickAddr[eCmdType];
++
++#if defined(PDUMP)
++ if ((ui32CallerID != ISR_ID) && (bPDumpIsSuspended == IMG_FALSE))
++ {
++
++ PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "Poll for space in the Kernel CCB\r\n");
++ PDUMPMEMPOL(psKernelCCB->psCCBCtlMemInfo,
++ offsetof(PVRSRV_SGX_CCB_CTL, ui32ReadOffset),
++ (psKernelCCB->ui32CCBDumpWOff + 1) & 0xff,
++ 0xff,
++ PDUMP_POLL_OPERATOR_NOTEQUAL,
++ ui32PDumpFlags,
++ MAKEUNIQUETAG(psKernelCCB->psCCBCtlMemInfo));
++
++ PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "Kernel CCB command\r\n");
++ pvDumpCommand = (IMG_VOID *)((IMG_UINT8 *)psKernelCCB->psCCBMemInfo->pvLinAddrKM + (*psKernelCCB->pui32WriteOffset * sizeof(SGXMKIF_COMMAND)));
++
++ PDUMPMEM(pvDumpCommand,
++ psKernelCCB->psCCBMemInfo,
++ psKernelCCB->ui32CCBDumpWOff * sizeof(SGXMKIF_COMMAND),
++ sizeof(SGXMKIF_COMMAND),
++ ui32PDumpFlags,
++ MAKEUNIQUETAG(psKernelCCB->psCCBMemInfo));
++
++
++ PDUMPMEM(&psDevInfo->sPDContext.ui32CacheControl,
++ psKernelCCB->psCCBMemInfo,
++ psKernelCCB->ui32CCBDumpWOff * sizeof(SGXMKIF_COMMAND) +
++ offsetof(SGXMKIF_COMMAND, ui32CacheControl),
++ sizeof(IMG_UINT32),
++ ui32PDumpFlags,
++ MAKEUNIQUETAG(psKernelCCB->psCCBMemInfo));
++
++ if (PDumpIsCaptureFrameKM()
++ || ((ui32PDumpFlags & PDUMP_FLAGS_CONTINUOUS) != 0))
++ {
++
++ psDevInfo->sPDContext.ui32CacheControl = 0;
++ }
++ }
++#endif
++
++#if defined(FIX_HW_BRN_26620) && defined(SGX_FEATURE_SYSTEM_CACHE) && !defined(SGX_BYPASS_SYSTEM_CACHE)
++
++ eError = PollForValueKM (psKernelCCB->pui32ReadOffset,
++ *psKernelCCB->pui32WriteOffset,
++ 0xFF,
++ MAX_HW_TIME_US/WAIT_TRY_COUNT,
++ WAIT_TRY_COUNT);
++ if (eError != PVRSRV_OK)
++ {
++ eError = PVRSRV_ERROR_TIMEOUT;
++ goto Exit;
++ }
++#endif
++
++
++
++ *psKernelCCB->pui32WriteOffset = (*psKernelCCB->pui32WriteOffset + 1) & 255;
++
++#if defined(PDUMP)
++ if ((ui32CallerID != ISR_ID) && (bPDumpIsSuspended == IMG_FALSE))
++ {
++ #if defined(FIX_HW_BRN_26620) && defined(SGX_FEATURE_SYSTEM_CACHE) && !defined(SGX_BYPASS_SYSTEM_CACHE)
++ PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "Poll for previous Kernel CCB CMD to be read\r\n");
++ PDUMPMEMPOL(psKernelCCB->psCCBCtlMemInfo,
++ offsetof(PVRSRV_SGX_CCB_CTL, ui32ReadOffset),
++ (psKernelCCB->ui32CCBDumpWOff),
++ 0xFF,
++ PDUMP_POLL_OPERATOR_EQUAL,
++ ui32PDumpFlags,
++ MAKEUNIQUETAG(psKernelCCB->psCCBCtlMemInfo));
++ #endif
++
++ if (PDumpIsCaptureFrameKM()
++ || ((ui32PDumpFlags & PDUMP_FLAGS_CONTINUOUS) != 0))
++ {
++ psKernelCCB->ui32CCBDumpWOff = (psKernelCCB->ui32CCBDumpWOff + 1) & 0xFF;
++ psDevInfo->ui32KernelCCBEventKickerDumpVal = (psDevInfo->ui32KernelCCBEventKickerDumpVal + 1) & 0xFF;
++ }
++
++ PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "Kernel CCB write offset\r\n");
++ PDUMPMEM(&psKernelCCB->ui32CCBDumpWOff,
++ psKernelCCB->psCCBCtlMemInfo,
++ offsetof(PVRSRV_SGX_CCB_CTL, ui32WriteOffset),
++ sizeof(IMG_UINT32),
++ ui32PDumpFlags,
++ MAKEUNIQUETAG(psKernelCCB->psCCBCtlMemInfo));
++ PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "Kernel CCB event kicker\r\n");
++ PDUMPMEM(&psDevInfo->ui32KernelCCBEventKickerDumpVal,
++ psDevInfo->psKernelCCBEventKickerMemInfo,
++ 0,
++ sizeof(IMG_UINT32),
++ ui32PDumpFlags,
++ MAKEUNIQUETAG(psDevInfo->psKernelCCBEventKickerMemInfo));
++ PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "Kick the SGX microkernel\r\n");
++ #if defined(FIX_HW_BRN_26620) && defined(SGX_FEATURE_SYSTEM_CACHE) && !defined(SGX_BYPASS_SYSTEM_CACHE)
++ PDUMPREGWITHFLAGS(SGX_MP_CORE_SELECT(EUR_CR_EVENT_KICK2, 0), EUR_CR_EVENT_KICK2_NOW_MASK, ui32PDumpFlags);
++ #else
++ PDUMPREGWITHFLAGS(SGX_MP_CORE_SELECT(EUR_CR_EVENT_KICK, 0), EUR_CR_EVENT_KICK_NOW_MASK, ui32PDumpFlags);
++ #endif
++ }
++#endif
++
++ *psDevInfo->pui32KernelCCBEventKicker = (*psDevInfo->pui32KernelCCBEventKicker + 1) & 0xFF;
++#if defined(FIX_HW_BRN_26620) && defined(SGX_FEATURE_SYSTEM_CACHE) && !defined(SGX_BYPASS_SYSTEM_CACHE)
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM,
++ SGX_MP_CORE_SELECT(EUR_CR_EVENT_KICK2, 0),
++ EUR_CR_EVENT_KICK2_NOW_MASK);
++#else
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM,
++ SGX_MP_CORE_SELECT(EUR_CR_EVENT_KICK, 0),
++ EUR_CR_EVENT_KICK_NOW_MASK);
++#endif
++
++#if defined(NO_HARDWARE)
++
++ *psKernelCCB->pui32ReadOffset = (*psKernelCCB->pui32ReadOffset + 1) & 255;
++#endif
++
++Exit:
++ return eError;
++}
++
++
++PVRSRV_ERROR SGXScheduleCCBCommandKM(PVRSRV_DEVICE_NODE *psDeviceNode,
++ SGXMKIF_CMD_TYPE eCmdType,
++ SGXMKIF_COMMAND *psCommandData,
++ IMG_UINT32 ui32CallerID,
++ IMG_UINT32 ui32PDumpFlags)
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
++
++
++ PDUMPSUSPEND();
++
++ ospm_power_using_hw_begin(OSPM_GRAPHICS_ISLAND, OSPM_UHB_IGNORE_POWER_OFF);
++
++ eError = PVRSRVSetDevicePowerStateKM(psDeviceNode->sDevId.ui32DeviceIndex,
++ PVRSRV_DEV_POWER_STATE_ON,
++ ui32CallerID,
++ IMG_TRUE);
++
++ PDUMPRESUME();
++
++ if (eError == PVRSRV_OK)
++ {
++ psDeviceNode->bReProcessDeviceCommandComplete = IMG_FALSE;
++ }
++ else
++ {
++ if (eError == PVRSRV_ERROR_RETRY)
++ {
++ if (ui32CallerID == ISR_ID)
++ {
++
++
++
++ psDeviceNode->bReProcessDeviceCommandComplete = IMG_TRUE;
++ eError = PVRSRV_OK;
++ }
++ else
++ {
++
++
++ }
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SGXScheduleCCBCommandKM failed to acquire lock - "
++ "ui32CallerID:%ld eError:%lu", ui32CallerID, eError));
++ }
++
++ ospm_power_using_hw_end(OSPM_GRAPHICS_ISLAND);
++ return eError;
++ }
++
++ eError = SGXScheduleCCBCommand(psDevInfo, eCmdType, psCommandData, ui32CallerID, ui32PDumpFlags);
++
++ PVRSRVPowerUnlock(ui32CallerID);
++
++ ospm_power_using_hw_end(OSPM_GRAPHICS_ISLAND);
++
++ if (ui32CallerID != ISR_ID)
++ {
++
++
++
++ SGXTestActivePowerEvent(psDeviceNode, ui32CallerID);
++ }
++
++ return eError;
++}
++
++
++PVRSRV_ERROR SGXScheduleProcessQueuesKM(PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
++ SGXMKIF_HOST_CTL *psHostCtl = psDevInfo->psKernelSGXHostCtlMemInfo->pvLinAddrKM;
++ IMG_UINT32 ui32PowerStatus;
++ SGXMKIF_COMMAND sCommand = {0};
++
++ ui32PowerStatus = psHostCtl->ui32PowerStatus;
++ if ((ui32PowerStatus & PVRSRV_USSE_EDM_POWMAN_NO_WORK) != 0)
++ {
++
++ return PVRSRV_OK;
++ }
++
++ eError = SGXScheduleCCBCommandKM(psDeviceNode, SGXMKIF_CMD_PROCESS_QUEUES, &sCommand, ISR_ID, 0);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SGXScheduleProcessQueuesKM failed to schedule CCB command: %lu", eError));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ return PVRSRV_OK;
++}
++
++
++IMG_BOOL SGXIsDevicePowered(PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++ return PVRSRVIsDevicePowered(psDeviceNode->sDevId.ui32DeviceIndex);
++}
++
++IMG_EXPORT
++PVRSRV_ERROR SGXGetInternalDevInfoKM(IMG_HANDLE hDevCookie,
++ SGX_INTERNAL_DEVINFO *psSGXInternalDevInfo)
++{
++ PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO *)((PVRSRV_DEVICE_NODE *)hDevCookie)->pvDevice;
++
++ psSGXInternalDevInfo->ui32Flags = psDevInfo->ui32Flags;
++ psSGXInternalDevInfo->bForcePTOff = (IMG_BOOL)psDevInfo->bForcePTOff;
++
++
++ psSGXInternalDevInfo->hHostCtlKernelMemInfoHandle =
++ (IMG_HANDLE)psDevInfo->psKernelSGXHostCtlMemInfo;
++
++ return PVRSRV_OK;
++}
++
++
++IMG_VOID SGXCleanupRequest(PVRSRV_DEVICE_NODE *psDeviceNode,
++ IMG_DEV_VIRTADDR *psHWDataDevVAddr,
++ IMG_UINT32 ui32CleanupType)
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_SGXDEV_INFO *psSGXDevInfo = psDeviceNode->pvDevice;
++ PVRSRV_KERNEL_MEM_INFO *psSGXHostCtlMemInfo = psSGXDevInfo->psKernelSGXHostCtlMemInfo;
++ SGXMKIF_HOST_CTL *psSGXHostCtl = psSGXHostCtlMemInfo->pvLinAddrKM;
++
++ if ((psSGXHostCtl->ui32PowerStatus & PVRSRV_USSE_EDM_POWMAN_NO_WORK) != 0)
++ {
++
++ }
++ else
++ {
++ SGXMKIF_COMMAND sCommand = {0};
++
++ PDUMPCOMMENTWITHFLAGS(0, "Request ukernel resouce clean-up");
++ sCommand.ui32Data[0] = ui32CleanupType;
++ sCommand.ui32Data[1] = (psHWDataDevVAddr == IMG_NULL) ? 0 : psHWDataDevVAddr->uiAddr;
++
++ eError = SGXScheduleCCBCommandKM(psDeviceNode, SGXMKIF_CMD_CLEANUP, &sCommand, KERNEL_ID, 0);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SGXCleanupRequest: Failed to submit clean-up command"));
++ PVR_DBG_BREAK;
++ }
++
++
++ #if !defined(NO_HARDWARE)
++ if(PollForValueKM(&psSGXHostCtl->ui32CleanupStatus,
++ PVRSRV_USSE_EDM_CLEANUPCMD_COMPLETE,
++ PVRSRV_USSE_EDM_CLEANUPCMD_COMPLETE,
++ MAX_HW_TIME_US/WAIT_TRY_COUNT,
++ WAIT_TRY_COUNT) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SGXCleanupRequest: Wait for uKernel to clean up failed"));
++ PVR_DBG_BREAK;
++ }
++ #endif
++
++ #if defined(PDUMP)
++
++ PDUMPCOMMENTWITHFLAGS(0, "Host Control - Poll for clean-up request to complete");
++ PDUMPMEMPOL(psSGXHostCtlMemInfo,
++ offsetof(SGXMKIF_HOST_CTL, ui32CleanupStatus),
++ PVRSRV_USSE_EDM_CLEANUPCMD_COMPLETE,
++ PVRSRV_USSE_EDM_CLEANUPCMD_COMPLETE,
++ PDUMP_POLL_OPERATOR_EQUAL,
++ 0,
++ MAKEUNIQUETAG(psSGXHostCtlMemInfo));
++ #endif
++
++ psSGXHostCtl->ui32CleanupStatus &= ~(PVRSRV_USSE_EDM_CLEANUPCMD_COMPLETE);
++ PDUMPMEM(IMG_NULL, psSGXHostCtlMemInfo, offsetof(SGXMKIF_HOST_CTL, ui32CleanupStatus), sizeof(IMG_UINT32), 0, MAKEUNIQUETAG(psSGXHostCtlMemInfo));
++ }
++}
++
++
++typedef struct _SGX_HW_RENDER_CONTEXT_CLEANUP_
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ IMG_DEV_VIRTADDR sHWRenderContextDevVAddr;
++ IMG_HANDLE hBlockAlloc;
++ PRESMAN_ITEM psResItem;
++} SGX_HW_RENDER_CONTEXT_CLEANUP;
++
++
++static PVRSRV_ERROR SGXCleanupHWRenderContextCallback(IMG_PVOID pvParam,
++ IMG_UINT32 ui32Param)
++{
++ SGX_HW_RENDER_CONTEXT_CLEANUP *psCleanup = pvParam;
++
++ PVR_UNREFERENCED_PARAMETER(ui32Param);
++
++ SGXCleanupRequest(psCleanup->psDeviceNode,
++ &psCleanup->sHWRenderContextDevVAddr,
++ PVRSRV_CLEANUPCMD_RC);
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(SGX_HW_RENDER_CONTEXT_CLEANUP),
++ psCleanup,
++ psCleanup->hBlockAlloc);
++
++
++ return PVRSRV_OK;
++}
++
++typedef struct _SGX_HW_TRANSFER_CONTEXT_CLEANUP_
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ IMG_DEV_VIRTADDR sHWTransferContextDevVAddr;
++ IMG_HANDLE hBlockAlloc;
++ PRESMAN_ITEM psResItem;
++} SGX_HW_TRANSFER_CONTEXT_CLEANUP;
++
++
++static PVRSRV_ERROR SGXCleanupHWTransferContextCallback(IMG_PVOID pvParam,
++ IMG_UINT32 ui32Param)
++{
++ SGX_HW_TRANSFER_CONTEXT_CLEANUP *psCleanup = (SGX_HW_TRANSFER_CONTEXT_CLEANUP *)pvParam;
++
++ PVR_UNREFERENCED_PARAMETER(ui32Param);
++
++ SGXCleanupRequest(psCleanup->psDeviceNode,
++ &psCleanup->sHWTransferContextDevVAddr,
++ PVRSRV_CLEANUPCMD_TC);
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(SGX_HW_TRANSFER_CONTEXT_CLEANUP),
++ psCleanup,
++ psCleanup->hBlockAlloc);
++
++
++ return PVRSRV_OK;
++}
++
++IMG_EXPORT
++IMG_HANDLE SGXRegisterHWRenderContextKM(IMG_HANDLE psDeviceNode,
++ IMG_DEV_VIRTADDR *psHWRenderContextDevVAddr,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_ERROR eError;
++ IMG_HANDLE hBlockAlloc;
++ SGX_HW_RENDER_CONTEXT_CLEANUP *psCleanup;
++ PRESMAN_ITEM psResItem;
++
++ eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(SGX_HW_RENDER_CONTEXT_CLEANUP),
++ (IMG_VOID **)&psCleanup,
++ &hBlockAlloc,
++ "SGX Hardware Render Context Cleanup");
++
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHWRenderContextKM: Couldn't allocate memory for SGX_HW_RENDER_CONTEXT_CLEANUP structure"));
++ return IMG_NULL;
++ }
++
++ psCleanup->hBlockAlloc = hBlockAlloc;
++ psCleanup->psDeviceNode = psDeviceNode;
++ psCleanup->sHWRenderContextDevVAddr = *psHWRenderContextDevVAddr;
++
++ psResItem = ResManRegisterRes(psPerProc->hResManContext,
++ RESMAN_TYPE_HW_RENDER_CONTEXT,
++ (IMG_VOID *)psCleanup,
++ 0,
++ &SGXCleanupHWRenderContextCallback);
++
++ if (psResItem == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHWRenderContextKM: ResManRegisterRes failed"));
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(SGX_HW_RENDER_CONTEXT_CLEANUP),
++ psCleanup,
++ psCleanup->hBlockAlloc);
++
++
++ return IMG_NULL;
++ }
++
++ psCleanup->psResItem = psResItem;
++
++ return (IMG_HANDLE)psCleanup;
++}
++
++IMG_EXPORT
++PVRSRV_ERROR SGXUnregisterHWRenderContextKM(IMG_HANDLE hHWRenderContext)
++{
++ PVRSRV_ERROR eError;
++ SGX_HW_RENDER_CONTEXT_CLEANUP *psCleanup;
++
++ PVR_ASSERT(hHWRenderContext != IMG_NULL);
++
++ psCleanup = (SGX_HW_RENDER_CONTEXT_CLEANUP *)hHWRenderContext;
++
++ if (psCleanup == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "SGXUnregisterHWRenderContextKM: invalid parameter"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ eError = ResManFreeResByPtr(psCleanup->psResItem);
++
++ return eError;
++}
++
++
++IMG_EXPORT
++IMG_HANDLE SGXRegisterHWTransferContextKM(IMG_HANDLE psDeviceNode,
++ IMG_DEV_VIRTADDR *psHWTransferContextDevVAddr,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_ERROR eError;
++ IMG_HANDLE hBlockAlloc;
++ SGX_HW_TRANSFER_CONTEXT_CLEANUP *psCleanup;
++ PRESMAN_ITEM psResItem;
++
++ eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(SGX_HW_TRANSFER_CONTEXT_CLEANUP),
++ (IMG_VOID **)&psCleanup,
++ &hBlockAlloc,
++ "SGX Hardware Transfer Context Cleanup");
++
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHWTransferContextKM: Couldn't allocate memory for SGX_HW_TRANSFER_CONTEXT_CLEANUP structure"));
++ return IMG_NULL;
++ }
++
++ psCleanup->hBlockAlloc = hBlockAlloc;
++ psCleanup->psDeviceNode = psDeviceNode;
++ psCleanup->sHWTransferContextDevVAddr = *psHWTransferContextDevVAddr;
++
++ psResItem = ResManRegisterRes(psPerProc->hResManContext,
++ RESMAN_TYPE_HW_TRANSFER_CONTEXT,
++ psCleanup,
++ 0,
++ &SGXCleanupHWTransferContextCallback);
++
++ if (psResItem == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHWTransferContextKM: ResManRegisterRes failed"));
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(SGX_HW_TRANSFER_CONTEXT_CLEANUP),
++ psCleanup,
++ psCleanup->hBlockAlloc);
++
++
++ return IMG_NULL;
++ }
++
++ psCleanup->psResItem = psResItem;
++
++ return (IMG_HANDLE)psCleanup;
++}
++
++IMG_EXPORT
++PVRSRV_ERROR SGXUnregisterHWTransferContextKM(IMG_HANDLE hHWTransferContext)
++{
++ PVRSRV_ERROR eError;
++ SGX_HW_TRANSFER_CONTEXT_CLEANUP *psCleanup;
++
++ PVR_ASSERT(hHWTransferContext != IMG_NULL);
++
++ psCleanup = (SGX_HW_TRANSFER_CONTEXT_CLEANUP *)hHWTransferContext;
++
++ if (psCleanup == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "SGXUnregisterHWTransferContextKM: invalid parameter"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ eError = ResManFreeResByPtr(psCleanup->psResItem);
++
++ return eError;
++}
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++typedef struct _SGX_HW_2D_CONTEXT_CLEANUP_
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ IMG_DEV_VIRTADDR sHW2DContextDevVAddr;
++ IMG_HANDLE hBlockAlloc;
++ PRESMAN_ITEM psResItem;
++} SGX_HW_2D_CONTEXT_CLEANUP;
++
++static PVRSRV_ERROR SGXCleanupHW2DContextCallback(IMG_PVOID pvParam, IMG_UINT32 ui32Param)
++{
++ SGX_HW_2D_CONTEXT_CLEANUP *psCleanup = (SGX_HW_2D_CONTEXT_CLEANUP *)pvParam;
++
++ PVR_UNREFERENCED_PARAMETER(ui32Param);
++
++ SGXCleanupRequest(psCleanup->psDeviceNode,
++ &psCleanup->sHW2DContextDevVAddr,
++ PVRSRV_CLEANUPCMD_2DC);
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(SGX_HW_2D_CONTEXT_CLEANUP),
++ psCleanup,
++ psCleanup->hBlockAlloc);
++
++
++ return PVRSRV_OK;
++}
++
++IMG_EXPORT
++IMG_HANDLE SGXRegisterHW2DContextKM(IMG_HANDLE psDeviceNode,
++ IMG_DEV_VIRTADDR *psHW2DContextDevVAddr,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_ERROR eError;
++ IMG_HANDLE hBlockAlloc;
++ SGX_HW_2D_CONTEXT_CLEANUP *psCleanup;
++ PRESMAN_ITEM psResItem;
++
++ eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(SGX_HW_2D_CONTEXT_CLEANUP),
++ (IMG_VOID **)&psCleanup,
++ &hBlockAlloc,
++ "SGX Hardware 2D Context Cleanup");
++
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHW2DContextKM: Couldn't allocate memory for SGX_HW_2D_CONTEXT_CLEANUP structure"));
++ return IMG_NULL;
++ }
++
++ psCleanup->hBlockAlloc = hBlockAlloc;
++ psCleanup->psDeviceNode = psDeviceNode;
++ psCleanup->sHW2DContextDevVAddr = *psHW2DContextDevVAddr;
++
++ psResItem = ResManRegisterRes(psPerProc->hResManContext,
++ RESMAN_TYPE_HW_2D_CONTEXT,
++ psCleanup,
++ 0,
++ &SGXCleanupHW2DContextCallback);
++
++ if (psResItem == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHW2DContextKM: ResManRegisterRes failed"));
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(SGX_HW_2D_CONTEXT_CLEANUP),
++ psCleanup,
++ psCleanup->hBlockAlloc);
++
++
++ return IMG_NULL;
++ }
++
++ psCleanup->psResItem = psResItem;
++
++ return (IMG_HANDLE)psCleanup;
++}
++
++IMG_EXPORT
++PVRSRV_ERROR SGXUnregisterHW2DContextKM(IMG_HANDLE hHW2DContext)
++{
++ PVRSRV_ERROR eError;
++ SGX_HW_2D_CONTEXT_CLEANUP *psCleanup;
++
++ PVR_ASSERT(hHW2DContext != IMG_NULL);
++
++ if (hHW2DContext == IMG_NULL)
++ {
++ return (PVRSRV_ERROR_INVALID_PARAMS);
++ }
++
++ psCleanup = (SGX_HW_2D_CONTEXT_CLEANUP *)hHW2DContext;
++
++ eError = ResManFreeResByPtr(psCleanup->psResItem);
++
++ return eError;
++}
++#endif
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(SGX2DQuerySyncOpsComplete)
++#endif
++static INLINE
++IMG_BOOL SGX2DQuerySyncOpsComplete(PVRSRV_KERNEL_SYNC_INFO *psSyncInfo,
++ IMG_UINT32 ui32ReadOpsPending,
++ IMG_UINT32 ui32WriteOpsPending)
++{
++ PVRSRV_SYNC_DATA *psSyncData = psSyncInfo->psSyncData;
++
++ return (IMG_BOOL)(
++ (psSyncData->ui32ReadOpsComplete >= ui32ReadOpsPending) &&
++ (psSyncData->ui32WriteOpsComplete >= ui32WriteOpsPending)
++ );
++}
++
++IMG_EXPORT
++PVRSRV_ERROR SGX2DQueryBlitsCompleteKM(PVRSRV_SGXDEV_INFO *psDevInfo,
++ PVRSRV_KERNEL_SYNC_INFO *psSyncInfo,
++ IMG_BOOL bWaitForComplete)
++{
++ IMG_UINT32 ui32ReadOpsPending, ui32WriteOpsPending;
++
++ PVR_UNREFERENCED_PARAMETER(psDevInfo);
++
++ PVR_DPF((PVR_DBG_CALLTRACE, "SGX2DQueryBlitsCompleteKM: Start"));
++
++ ui32ReadOpsPending = psSyncInfo->psSyncData->ui32ReadOpsPending;
++ ui32WriteOpsPending = psSyncInfo->psSyncData->ui32WriteOpsPending;
++
++ if(SGX2DQuerySyncOpsComplete(psSyncInfo, ui32ReadOpsPending, ui32WriteOpsPending))
++ {
++
++ PVR_DPF((PVR_DBG_CALLTRACE, "SGX2DQueryBlitsCompleteKM: No wait. Blits complete."));
++ return PVRSRV_OK;
++ }
++
++
++ if (!bWaitForComplete)
++ {
++
++ PVR_DPF((PVR_DBG_CALLTRACE, "SGX2DQueryBlitsCompleteKM: No wait. Ops pending."));
++ return PVRSRV_ERROR_CMD_NOT_PROCESSED;
++ }
++
++
++ PVR_DPF((PVR_DBG_MESSAGE, "SGX2DQueryBlitsCompleteKM: Ops pending. Start polling."));
++
++ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
++ {
++ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
++
++ if(SGX2DQuerySyncOpsComplete(psSyncInfo, ui32ReadOpsPending, ui32WriteOpsPending))
++ {
++
++ PVR_DPF((PVR_DBG_CALLTRACE, "SGX2DQueryBlitsCompleteKM: Wait over. Blits complete."));
++ return PVRSRV_OK;
++ }
++
++ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
++ } END_LOOP_UNTIL_TIMEOUT();
++
++
++ PVR_DPF((PVR_DBG_ERROR,"SGX2DQueryBlitsCompleteKM: Timed out. Ops pending."));
++
++#if defined(DEBUG)
++ {
++ PVRSRV_SYNC_DATA *psSyncData = psSyncInfo->psSyncData;
++
++ PVR_TRACE(("SGX2DQueryBlitsCompleteKM: Syncinfo: %p, Syncdata: %p", psSyncInfo, psSyncData));
++
++ PVR_TRACE(("SGX2DQueryBlitsCompleteKM: Read ops complete: %d, Read ops pending: %d", psSyncData->ui32ReadOpsComplete, psSyncData->ui32ReadOpsPending));
++ PVR_TRACE(("SGX2DQueryBlitsCompleteKM: Write ops complete: %d, Write ops pending: %d", psSyncData->ui32WriteOpsComplete, psSyncData->ui32WriteOpsPending));
++
++ }
++#endif
++
++ return PVRSRV_ERROR_TIMEOUT;
++}
++
++
++IMG_EXPORT
++IMG_VOID SGXFlushHWRenderTargetKM(IMG_HANDLE psDeviceNode, IMG_DEV_VIRTADDR sHWRTDataSetDevVAddr)
++{
++ PVR_ASSERT(sHWRTDataSetDevVAddr.uiAddr != IMG_NULL);
++
++ SGXCleanupRequest(psDeviceNode,
++ &sHWRTDataSetDevVAddr,
++ PVRSRV_CLEANUPCMD_RT);
++}
++
++
++IMG_UINT32 SGXConvertTimeStamp(PVRSRV_SGXDEV_INFO *psDevInfo,
++ IMG_UINT32 ui32TimeWraps,
++ IMG_UINT32 ui32Time)
++{
++#if defined(EUR_CR_TIMER)
++ PVR_UNREFERENCED_PARAMETER(psDevInfo);
++ PVR_UNREFERENCED_PARAMETER(ui32TimeWraps);
++ return ui32Time;
++#else
++ IMG_UINT64 ui64Clocks;
++ IMG_UINT32 ui32Clocksx16;
++
++ ui64Clocks = ((IMG_UINT64)ui32TimeWraps * psDevInfo->ui32uKernelTimerClock) +
++ (psDevInfo->ui32uKernelTimerClock - (ui32Time & EUR_CR_EVENT_TIMER_VALUE_MASK));
++ ui32Clocksx16 = (IMG_UINT32)(ui64Clocks / 16);
++
++ return ui32Clocksx16;
++#endif
++}
++
++
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/sgxutils.h b/drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/sgxutils.h
+new file mode 100644
+index 0000000..bc4c053
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/sgxutils.h
+@@ -0,0 +1,99 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "perproc.h"
++#include "sgxinfokm.h"
++
++#define CCB_OFFSET_IS_VALID(type, psCCBMemInfo, psCCBKick, offset) \
++ ((sizeof(type) <= (psCCBMemInfo)->ui32AllocSize) && \
++ ((psCCBKick)->offset <= (psCCBMemInfo)->ui32AllocSize - sizeof(type)))
++
++#define CCB_DATA_FROM_OFFSET(type, psCCBMemInfo, psCCBKick, offset) \
++ ((type *)(((IMG_CHAR *)(psCCBMemInfo)->pvLinAddrKM) + \
++ (psCCBKick)->offset))
++
++
++IMG_IMPORT
++IMG_VOID SGXTestActivePowerEvent(PVRSRV_DEVICE_NODE *psDeviceNode,
++ IMG_UINT32 ui32CallerID);
++
++IMG_IMPORT
++PVRSRV_ERROR SGXScheduleCCBCommand(PVRSRV_SGXDEV_INFO *psDevInfo,
++ SGXMKIF_CMD_TYPE eCommandType,
++ SGXMKIF_COMMAND *psCommandData,
++ IMG_UINT32 ui32CallerID,
++ IMG_UINT32 ui32PDumpFlags);
++IMG_IMPORT
++PVRSRV_ERROR SGXScheduleCCBCommandKM(PVRSRV_DEVICE_NODE *psDeviceNode,
++ SGXMKIF_CMD_TYPE eCommandType,
++ SGXMKIF_COMMAND *psCommandData,
++ IMG_UINT32 ui32CallerID,
++ IMG_UINT32 ui32PDumpFlags);
++
++IMG_IMPORT
++PVRSRV_ERROR SGXScheduleProcessQueuesKM(PVRSRV_DEVICE_NODE *psDeviceNode);
++
++IMG_IMPORT
++IMG_BOOL SGXIsDevicePowered(PVRSRV_DEVICE_NODE *psDeviceNode);
++
++IMG_IMPORT
++IMG_HANDLE SGXRegisterHWRenderContextKM(IMG_HANDLE psDeviceNode,
++ IMG_DEV_VIRTADDR *psHWRenderContextDevVAddr,
++ PVRSRV_PER_PROCESS_DATA *psPerProc);
++
++IMG_IMPORT
++IMG_HANDLE SGXRegisterHWTransferContextKM(IMG_HANDLE psDeviceNode,
++ IMG_DEV_VIRTADDR *psHWTransferContextDevVAddr,
++ PVRSRV_PER_PROCESS_DATA *psPerProc);
++
++IMG_IMPORT
++IMG_VOID SGXFlushHWRenderTargetKM(IMG_HANDLE psSGXDevInfo, IMG_DEV_VIRTADDR psHWRTDataSetDevVAddr);
++
++IMG_IMPORT
++PVRSRV_ERROR SGXUnregisterHWRenderContextKM(IMG_HANDLE hHWRenderContext);
++
++IMG_IMPORT
++PVRSRV_ERROR SGXUnregisterHWTransferContextKM(IMG_HANDLE hHWTransferContext);
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++IMG_IMPORT
++IMG_HANDLE SGXRegisterHW2DContextKM(IMG_HANDLE psDeviceNode,
++ IMG_DEV_VIRTADDR *psHW2DContextDevVAddr,
++ PVRSRV_PER_PROCESS_DATA *psPerProc);
++
++IMG_IMPORT
++PVRSRV_ERROR SGXUnregisterHW2DContextKM(IMG_HANDLE hHW2DContext);
++#endif
++
++IMG_UINT32 SGXConvertTimeStamp(PVRSRV_SGXDEV_INFO *psDevInfo,
++ IMG_UINT32 ui32TimeWraps,
++ IMG_UINT32 ui32Time);
++
++IMG_VOID SGXCleanupRequest(PVRSRV_DEVICE_NODE *psDeviceNode,
++ IMG_DEV_VIRTADDR *psHWDataDevVAddr,
++ IMG_UINT32 ui32CleanupType);
++
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/.gitignore b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/.gitignore
+new file mode 100644
+index 0000000..2f89523
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/.gitignore
+@@ -0,0 +1,5 @@
++bin_pc_i686*
++tmp_pc_i686*
++host_pc_i686*
++*.o
++*.o.cmd
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/env_data.h b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/env_data.h
+new file mode 100644
+index 0000000..3d41219
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/env_data.h
+@@ -0,0 +1,66 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _ENV_DATA_
++#define _ENV_DATA_
++
++#include <linux/interrupt.h>
++#include <linux/pci.h>
++
++#if defined(PVR_LINUX_MISR_USING_WORKQUEUE) || defined(PVR_LINUX_MISR_USING_PRIVATE_WORKQUEUE)
++#include <linux/workqueue.h>
++#endif
++
++#define PVRSRV_MAX_BRIDGE_IN_SIZE 0x1000
++#define PVRSRV_MAX_BRIDGE_OUT_SIZE 0x1000
++
++typedef struct _PVR_PCI_DEV_TAG
++{
++ struct pci_dev *psPCIDev;
++ HOST_PCI_INIT_FLAGS ePCIFlags;
++ IMG_BOOL abPCIResourceInUse[DEVICE_COUNT_RESOURCE];
++} PVR_PCI_DEV;
++
++typedef struct _ENV_DATA_TAG
++{
++ IMG_VOID *pvBridgeData;
++ struct pm_dev *psPowerDevice;
++ IMG_BOOL bLISRInstalled;
++ IMG_BOOL bMISRInstalled;
++ IMG_UINT32 ui32IRQ;
++ IMG_VOID *pvISRCookie;
++#if defined(PVR_LINUX_MISR_USING_PRIVATE_WORKQUEUE)
++ struct workqueue_struct *psWorkQueue;
++#endif
++#if defined(PVR_LINUX_MISR_USING_WORKQUEUE) || defined(PVR_LINUX_MISR_USING_PRIVATE_WORKQUEUE)
++ struct work_struct sMISRWork;
++ IMG_VOID *pvMISRData;
++#else
++ struct tasklet_struct sMISRTasklet;
++#endif
++} ENV_DATA;
++
++#endif
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/env_perproc.h b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/env_perproc.h
+new file mode 100644
+index 0000000..a6e49db
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/env_perproc.h
+@@ -0,0 +1,56 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __ENV_PERPROC_H__
++#define __ENV_PERPROC_H__
++
++#include <linux/list.h>
++#include <linux/proc_fs.h>
++
++#include "services.h"
++#include "handle.h"
++
++typedef struct _PVRSRV_ENV_PER_PROCESS_DATA_
++{
++ IMG_HANDLE hBlockAlloc;
++ struct proc_dir_entry *psProcDir;
++#if defined(SUPPORT_DRI_DRM) && defined(PVR_SECURE_DRM_AUTH_EXPORT)
++ struct list_head sDRMAuthListHead;
++#endif
++} PVRSRV_ENV_PER_PROCESS_DATA;
++
++IMG_VOID RemovePerProcessProcDir(PVRSRV_ENV_PER_PROCESS_DATA *psEnvPerProc);
++
++PVRSRV_ERROR LinuxMMapPerProcessConnect(PVRSRV_ENV_PER_PROCESS_DATA *psEnvPerProc);
++
++IMG_VOID LinuxMMapPerProcessDisconnect(PVRSRV_ENV_PER_PROCESS_DATA *psEnvPerProc);
++
++PVRSRV_ERROR LinuxMMapPerProcessHandleOptions(PVRSRV_HANDLE_BASE *psHandleBase);
++
++IMG_HANDLE LinuxTerminatingProcessPrivateData(IMG_VOID);
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/event.c b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/event.c
+new file mode 100644
+index 0000000..33eca49
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/event.c
+@@ -0,0 +1,273 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef AUTOCONF_INCLUDED
++ #include <linux/config.h>
++#endif
++
++#include <linux/version.h>
++#include <asm/io.h>
++#include <asm/page.h>
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22))
++#include <asm/system.h>
++#endif
++#include <linux/mm.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <linux/delay.h>
++#include <linux/pci.h>
++
++#include <linux/string.h>
++#include <linux/sched.h>
++#include <linux/interrupt.h>
++#include <asm/hardirq.h>
++#include <linux/timer.h>
++#include <linux/capability.h>
++#include <linux/sched.h>
++#include <asm/uaccess.h>
++
++#include "img_types.h"
++#include "services_headers.h"
++#include "mm.h"
++#include "pvrmmap.h"
++#include "mmap.h"
++#include "env_data.h"
++#include "proc.h"
++#include "mutex.h"
++#include "lock.h"
++
++typedef struct PVRSRV_LINUX_EVENT_OBJECT_LIST_TAG
++{
++ rwlock_t sLock;
++ struct list_head sList;
++
++} PVRSRV_LINUX_EVENT_OBJECT_LIST;
++
++
++typedef struct PVRSRV_LINUX_EVENT_OBJECT_TAG
++{
++ atomic_t sTimeStamp;
++ IMG_UINT32 ui32TimeStampPrevious;
++#if defined(DEBUG)
++ IMG_UINT ui32Stats;
++#endif
++ wait_queue_head_t sWait;
++ struct list_head sList;
++ IMG_HANDLE hResItem;
++ PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList;
++} PVRSRV_LINUX_EVENT_OBJECT;
++
++PVRSRV_ERROR LinuxEventObjectListCreate(IMG_HANDLE *phEventObjectList)
++{
++ PVRSRV_LINUX_EVENT_OBJECT_LIST *psEvenObjectList;
++
++ if(OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP, sizeof(PVRSRV_LINUX_EVENT_OBJECT_LIST),
++ (IMG_VOID **)&psEvenObjectList, IMG_NULL,
++ "Linux Event Object List") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectCreate: failed to allocate memory for event list"));
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++ INIT_LIST_HEAD(&psEvenObjectList->sList);
++
++ rwlock_init(&psEvenObjectList->sLock);
++
++ *phEventObjectList = (IMG_HANDLE *) psEvenObjectList;
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR LinuxEventObjectListDestroy(IMG_HANDLE hEventObjectList)
++{
++
++ PVRSRV_LINUX_EVENT_OBJECT_LIST *psEvenObjectList = (PVRSRV_LINUX_EVENT_OBJECT_LIST *) hEventObjectList ;
++
++ if(psEvenObjectList)
++ {
++ if (!list_empty(&psEvenObjectList->sList))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectListDestroy: Event List is not empty"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, sizeof(PVRSRV_LINUX_EVENT_OBJECT_LIST), psEvenObjectList, IMG_NULL);
++
++ }
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR LinuxEventObjectDelete(IMG_HANDLE hOSEventObjectList, IMG_HANDLE hOSEventObject)
++{
++ if(hOSEventObjectList)
++ {
++ if(hOSEventObject)
++ {
++ PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject = (PVRSRV_LINUX_EVENT_OBJECT *)hOSEventObject;
++#if defined(DEBUG)
++ PVR_DPF((PVR_DBG_MESSAGE, "LinuxEventObjectListDelete: Event object waits: %lu", psLinuxEventObject->ui32Stats));
++#endif
++ if(ResManFreeResByPtr(psLinuxEventObject->hResItem) != PVRSRV_OK)
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ return PVRSRV_OK;
++ }
++ }
++ return PVRSRV_ERROR_GENERIC;
++
++}
++
++static PVRSRV_ERROR LinuxEventObjectDeleteCallback(IMG_PVOID pvParam, IMG_UINT32 ui32Param)
++{
++ PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject = pvParam;
++ PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList = psLinuxEventObject->psLinuxEventObjectList;
++
++ PVR_UNREFERENCED_PARAMETER(ui32Param);
++
++ write_lock_bh(&psLinuxEventObjectList->sLock);
++ list_del(&psLinuxEventObject->sList);
++ write_unlock_bh(&psLinuxEventObjectList->sLock);
++
++#if defined(DEBUG)
++ PVR_DPF((PVR_DBG_MESSAGE, "LinuxEventObjectDeleteCallback: Event object waits: %lu", psLinuxEventObject->ui32Stats));
++#endif
++
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, sizeof(PVRSRV_LINUX_EVENT_OBJECT), psLinuxEventObject, IMG_NULL);
++
++
++ return PVRSRV_OK;
++}
++PVRSRV_ERROR LinuxEventObjectAdd(IMG_HANDLE hOSEventObjectList, IMG_HANDLE *phOSEventObject)
++ {
++ PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject;
++ PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList = (PVRSRV_LINUX_EVENT_OBJECT_LIST*)hOSEventObjectList;
++ IMG_UINT32 ui32PID = OSGetCurrentProcessIDKM();
++ PVRSRV_PER_PROCESS_DATA *psPerProc;
++
++ psPerProc = PVRSRVPerProcessData(ui32PID);
++ if (psPerProc == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectAdd: Couldn't find per-process data"));
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++
++ if(OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP, sizeof(PVRSRV_LINUX_EVENT_OBJECT),
++ (IMG_VOID **)&psLinuxEventObject, IMG_NULL,
++ "Linux Event Object") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectAdd: failed to allocate memory "));
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++ INIT_LIST_HEAD(&psLinuxEventObject->sList);
++
++ atomic_set(&psLinuxEventObject->sTimeStamp, 0);
++ psLinuxEventObject->ui32TimeStampPrevious = 0;
++
++#if defined(DEBUG)
++ psLinuxEventObject->ui32Stats = 0;
++#endif
++ init_waitqueue_head(&psLinuxEventObject->sWait);
++
++ psLinuxEventObject->psLinuxEventObjectList = psLinuxEventObjectList;
++
++ psLinuxEventObject->hResItem = ResManRegisterRes(psPerProc->hResManContext,
++ RESMAN_TYPE_EVENT_OBJECT,
++ psLinuxEventObject,
++ 0,
++ &LinuxEventObjectDeleteCallback);
++
++ write_lock_bh(&psLinuxEventObjectList->sLock);
++ list_add(&psLinuxEventObject->sList, &psLinuxEventObjectList->sList);
++ write_unlock_bh(&psLinuxEventObjectList->sLock);
++
++ *phOSEventObject = psLinuxEventObject;
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR LinuxEventObjectSignal(IMG_HANDLE hOSEventObjectList)
++{
++ PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject;
++ PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList = (PVRSRV_LINUX_EVENT_OBJECT_LIST*)hOSEventObjectList;
++ struct list_head *psListEntry, *psListEntryTemp, *psList;
++ psList = &psLinuxEventObjectList->sList;
++
++ list_for_each_safe(psListEntry, psListEntryTemp, psList)
++ {
++
++ psLinuxEventObject = (PVRSRV_LINUX_EVENT_OBJECT *)list_entry(psListEntry, PVRSRV_LINUX_EVENT_OBJECT, sList);
++
++ atomic_inc(&psLinuxEventObject->sTimeStamp);
++ wake_up_interruptible(&psLinuxEventObject->sWait);
++ }
++
++ return PVRSRV_OK;
++
++}
++
++PVRSRV_ERROR LinuxEventObjectWait(IMG_HANDLE hOSEventObject, IMG_UINT32 ui32MSTimeout)
++{
++ IMG_UINT32 ui32TimeStamp;
++ DEFINE_WAIT(sWait);
++
++ PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject = (PVRSRV_LINUX_EVENT_OBJECT *) hOSEventObject;
++
++ IMG_UINT32 ui32TimeOutJiffies = msecs_to_jiffies(ui32MSTimeout);
++
++ do
++ {
++ prepare_to_wait(&psLinuxEventObject->sWait, &sWait, TASK_INTERRUPTIBLE);
++ ui32TimeStamp = atomic_read(&psLinuxEventObject->sTimeStamp);
++
++ if(psLinuxEventObject->ui32TimeStampPrevious != ui32TimeStamp)
++ {
++ break;
++ }
++
++ LinuxUnLockMutex(&gPVRSRVLock);
++
++ ui32TimeOutJiffies = (IMG_UINT32)schedule_timeout((IMG_INT32)ui32TimeOutJiffies);
++
++ LinuxLockMutex(&gPVRSRVLock);
++#if defined(DEBUG)
++ psLinuxEventObject->ui32Stats++;
++#endif
++
++
++ } while (ui32TimeOutJiffies);
++
++ finish_wait(&psLinuxEventObject->sWait, &sWait);
++
++ psLinuxEventObject->ui32TimeStampPrevious = ui32TimeStamp;
++
++ return ui32TimeOutJiffies ? PVRSRV_OK : PVRSRV_ERROR_TIMEOUT;
++
++}
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/event.h b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/event.h
+new file mode 100644
+index 0000000..d07bc97
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/event.h
+@@ -0,0 +1,32 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++PVRSRV_ERROR LinuxEventObjectListCreate(IMG_HANDLE *phEventObjectList);
++PVRSRV_ERROR LinuxEventObjectListDestroy(IMG_HANDLE hEventObjectList);
++PVRSRV_ERROR LinuxEventObjectAdd(IMG_HANDLE hOSEventObjectList, IMG_HANDLE *phOSEventObject);
++PVRSRV_ERROR LinuxEventObjectDelete(IMG_HANDLE hOSEventObjectList, IMG_HANDLE hOSEventObject);
++PVRSRV_ERROR LinuxEventObjectSignal(IMG_HANDLE hOSEventObjectList);
++PVRSRV_ERROR LinuxEventObjectWait(IMG_HANDLE hOSEventObject, IMG_UINT32 ui32MSTimeout);
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/linkage.h b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/linkage.h
+new file mode 100644
+index 0000000..1ec2696
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/linkage.h
+@@ -0,0 +1,61 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __LINKAGE_H__
++#define __LINKAGE_H__
++
++#if !defined(SUPPORT_DRI_DRM)
++IMG_INT32 PVRSRV_BridgeDispatchKM(struct file *file, IMG_UINT cmd, IMG_UINT32 arg);
++#endif
++
++IMG_VOID PVRDPFInit(IMG_VOID);
++PVRSRV_ERROR PVROSFuncInit(IMG_VOID);
++IMG_VOID PVROSFuncDeInit(IMG_VOID);
++
++#ifdef DEBUG
++IMG_INT PVRDebugProcSetLevel(struct file *file, const IMG_CHAR *buffer, IMG_UINT32 count, IMG_VOID *data);
++IMG_VOID PVRDebugSetLevel(IMG_UINT32 uDebugLevel);
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++void ProcSeqShowDebugLevel(struct seq_file *sfile,void* el);
++#else
++IMG_INT PVRDebugProcGetLevel(IMG_CHAR *page, IMG_CHAR **start, off_t off, IMG_INT count, IMG_INT *eof, IMG_VOID *data);
++#endif
++
++#ifdef PVR_MANUAL_POWER_CONTROL
++IMG_INT PVRProcSetPowerLevel(struct file *file, const IMG_CHAR *buffer, IMG_UINT32 count, IMG_VOID *data);
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++void ProcSeqShowPowerLevel(struct seq_file *sfile,void* el);
++#else
++IMG_INT PVRProcGetPowerLevel(IMG_CHAR *page, IMG_CHAR **start, off_t off, IMG_INT count, IMG_INT *eof, IMG_VOID *data);
++#endif
++
++
++#endif
++#endif
++
++#endif
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/lock.h b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/lock.h
+new file mode 100644
+index 0000000..e0bf5ee
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/lock.h
+@@ -0,0 +1,32 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __LOCK_H__
++#define __LOCK_H__
++
++extern PVRSRV_LINUX_MUTEX gPVRSRVLock;
++
++#endif
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/mm.c b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/mm.c
+new file mode 100644
+index 0000000..97a4750
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/mm.c
+@@ -0,0 +1,2360 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef AUTOCONF_INCLUDED
++ #include <linux/config.h>
++#endif
++
++#include <linux/version.h>
++#include <linux/mm.h>
++#include <linux/vmalloc.h>
++#include <asm/io.h>
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0))
++#include <linux/wrapper.h>
++#endif
++#include <linux/slab.h>
++#include <linux/highmem.h>
++#include <linux/sched.h>
++
++#include "img_defs.h"
++#include "services.h"
++#include "servicesint.h"
++#include "syscommon.h"
++#include "mutils.h"
++#include "mm.h"
++#include "pvrmmap.h"
++#include "mmap.h"
++#include "osfunc.h"
++#include "pvr_debug.h"
++#include "proc.h"
++#include "mutex.h"
++#include "lock.h"
++
++#if defined(DEBUG_LINUX_MEM_AREAS) || defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ #include "lists.h"
++#endif
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++typedef enum {
++ DEBUG_MEM_ALLOC_TYPE_KMALLOC,
++ DEBUG_MEM_ALLOC_TYPE_VMALLOC,
++ DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES,
++ DEBUG_MEM_ALLOC_TYPE_IOREMAP,
++ DEBUG_MEM_ALLOC_TYPE_IO,
++ DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE,
++ DEBUG_MEM_ALLOC_TYPE_COUNT
++}DEBUG_MEM_ALLOC_TYPE;
++
++typedef struct _DEBUG_MEM_ALLOC_REC
++{
++ DEBUG_MEM_ALLOC_TYPE eAllocType;
++ IMG_VOID *pvKey;
++ IMG_VOID *pvCpuVAddr;
++ IMG_UINT32 ulCpuPAddr;
++ IMG_VOID *pvPrivateData;
++ IMG_UINT32 ui32Bytes;
++ pid_t pid;
++ IMG_CHAR *pszFileName;
++ IMG_UINT32 ui32Line;
++
++ struct _DEBUG_MEM_ALLOC_REC *psNext;
++ struct _DEBUG_MEM_ALLOC_REC **ppsThis;
++}DEBUG_MEM_ALLOC_REC;
++
++static IMPLEMENT_LIST_ANY_VA_2(DEBUG_MEM_ALLOC_REC, IMG_BOOL, IMG_FALSE)
++static IMPLEMENT_LIST_ANY_VA(DEBUG_MEM_ALLOC_REC)
++static IMPLEMENT_LIST_FOR_EACH(DEBUG_MEM_ALLOC_REC)
++static IMPLEMENT_LIST_INSERT(DEBUG_MEM_ALLOC_REC)
++static IMPLEMENT_LIST_REMOVE(DEBUG_MEM_ALLOC_REC)
++
++
++static DEBUG_MEM_ALLOC_REC *g_MemoryRecords;
++
++static IMG_UINT32 g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_COUNT];
++static IMG_UINT32 g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_COUNT];
++
++static IMG_UINT32 g_SysRAMWaterMark;
++static IMG_UINT32 g_SysRAMHighWaterMark;
++
++static IMG_UINT32 g_IOMemWaterMark;
++static IMG_UINT32 g_IOMemHighWaterMark;
++
++static IMG_VOID DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE eAllocType,
++ IMG_VOID *pvKey,
++ IMG_VOID *pvCpuVAddr,
++ IMG_UINT32 ulCpuPAddr,
++ IMG_VOID *pvPrivateData,
++ IMG_UINT32 ui32Bytes,
++ IMG_CHAR *pszFileName,
++ IMG_UINT32 ui32Line);
++
++static IMG_VOID DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE eAllocType, IMG_VOID *pvKey, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line);
++
++static IMG_CHAR *DebugMemAllocRecordTypeToString(DEBUG_MEM_ALLOC_TYPE eAllocType);
++
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++static struct proc_dir_entry *g_SeqFileMemoryRecords =0;
++static void* ProcSeqNextMemoryRecords(struct seq_file *sfile,void* el,loff_t off);
++static void ProcSeqShowMemoryRecords(struct seq_file *sfile,void* el);
++static void* ProcSeqOff2ElementMemoryRecords(struct seq_file * sfile, loff_t off);
++
++#else
++static off_t printMemoryRecords(IMG_CHAR * buffer, size_t size, off_t off);
++#endif
++
++#endif
++
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++typedef struct _DEBUG_LINUX_MEM_AREA_REC
++{
++ LinuxMemArea *psLinuxMemArea;
++ IMG_UINT32 ui32Flags;
++ pid_t pid;
++
++ struct _DEBUG_LINUX_MEM_AREA_REC *psNext;
++ struct _DEBUG_LINUX_MEM_AREA_REC **ppsThis;
++}DEBUG_LINUX_MEM_AREA_REC;
++
++
++static IMPLEMENT_LIST_ANY_VA(DEBUG_LINUX_MEM_AREA_REC)
++static IMPLEMENT_LIST_FOR_EACH(DEBUG_LINUX_MEM_AREA_REC)
++static IMPLEMENT_LIST_INSERT(DEBUG_LINUX_MEM_AREA_REC)
++static IMPLEMENT_LIST_REMOVE(DEBUG_LINUX_MEM_AREA_REC)
++
++
++
++#if defined(DEBUG_LINUX_MEM_AREAS) || defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++static PVRSRV_LINUX_MUTEX g_sDebugMutex;
++#endif
++
++static DEBUG_LINUX_MEM_AREA_REC *g_LinuxMemAreaRecords;
++static IMG_UINT32 g_LinuxMemAreaCount;
++static IMG_UINT32 g_LinuxMemAreaWaterMark;
++static IMG_UINT32 g_LinuxMemAreaHighWaterMark;
++
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++static struct proc_dir_entry *g_SeqFileMemArea=0;
++
++static void* ProcSeqNextMemArea(struct seq_file *sfile,void* el,loff_t off);
++static void ProcSeqShowMemArea(struct seq_file *sfile,void* el);
++static void* ProcSeqOff2ElementMemArea(struct seq_file *sfile, loff_t off);
++
++#else
++static off_t printLinuxMemAreaRecords(IMG_CHAR * buffer, size_t size, off_t off);
++#endif
++
++#endif
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++#if (defined(DEBUG_LINUX_MEM_AREAS) || defined(DEBUG_LINUX_MEMORY_ALLOCATIONS))
++static void ProcSeqStartstopDebugMutex(struct seq_file *sfile,IMG_BOOL start);
++#endif
++#endif
++
++static LinuxKMemCache *psLinuxMemAreaCache;
++
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
++static IMG_VOID ReservePages(IMG_VOID *pvAddress, IMG_UINT32 ui32Length);
++static IMG_VOID UnreservePages(IMG_VOID *pvAddress, IMG_UINT32 ui32Length);
++#endif
++
++static LinuxMemArea *LinuxMemAreaStructAlloc(IMG_VOID);
++static IMG_VOID LinuxMemAreaStructFree(LinuxMemArea *psLinuxMemArea);
++#if defined(DEBUG_LINUX_MEM_AREAS)
++static IMG_VOID DebugLinuxMemAreaRecordAdd(LinuxMemArea *psLinuxMemArea, IMG_UINT32 ui32Flags);
++static DEBUG_LINUX_MEM_AREA_REC *DebugLinuxMemAreaRecordFind(LinuxMemArea *psLinuxMemArea);
++static IMG_VOID DebugLinuxMemAreaRecordRemove(LinuxMemArea *psLinuxMemArea);
++#endif
++
++PVRSRV_ERROR
++LinuxMMInit(IMG_VOID)
++{
++#if defined(DEBUG_LINUX_MEM_AREAS) || defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ LinuxInitMutex(&g_sDebugMutex);
++#endif
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++ {
++ IMG_INT iStatus;
++#ifdef PVR_PROC_USE_SEQ_FILE
++ g_SeqFileMemArea = CreateProcReadEntrySeq(
++ "mem_areas",
++ NULL,
++ ProcSeqNextMemArea,
++ ProcSeqShowMemArea,
++ ProcSeqOff2ElementMemArea,
++ ProcSeqStartstopDebugMutex
++ );
++ iStatus = !g_SeqFileMemArea ? -1 : 0;
++#else
++ iStatus = CreateProcReadEntry("mem_areas", printLinuxMemAreaRecords);
++#endif
++ if(iStatus!=0)
++ {
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++ }
++#endif
++
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ {
++ IMG_INT iStatus;
++#ifdef PVR_PROC_USE_SEQ_FILE
++ g_SeqFileMemoryRecords =CreateProcReadEntrySeq(
++ "meminfo",
++ NULL,
++ ProcSeqNextMemoryRecords,
++ ProcSeqShowMemoryRecords,
++ ProcSeqOff2ElementMemoryRecords,
++ ProcSeqStartstopDebugMutex
++ );
++
++ iStatus = !g_SeqFileMemoryRecords ? -1 : 0;
++#else
++ iStatus = CreateProcReadEntry("meminfo", printMemoryRecords);
++#endif
++ if(iStatus!=0)
++ {
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++ }
++#endif
++
++ psLinuxMemAreaCache = KMemCacheCreateWrapper("img-mm", sizeof(LinuxMemArea), 0, 0);
++ if(!psLinuxMemAreaCache)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"%s: failed to allocate kmem_cache", __FUNCTION__));
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++ return PVRSRV_OK;
++}
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++IMG_VOID LinuxMMCleanup_MemAreas_ForEachCb(DEBUG_LINUX_MEM_AREA_REC *psCurrentRecord)
++{
++ LinuxMemArea *psLinuxMemArea;
++
++ psLinuxMemArea = psCurrentRecord->psLinuxMemArea;
++ PVR_DPF((PVR_DBG_ERROR, "%s: BUG!: Cleaning up Linux memory area (%p), type=%s, size=%ld bytes",
++ __FUNCTION__,
++ psCurrentRecord->psLinuxMemArea,
++ LinuxMemAreaTypeToString(psCurrentRecord->psLinuxMemArea->eAreaType),
++ psCurrentRecord->psLinuxMemArea->ui32ByteSize));
++
++ LinuxMemAreaDeepFree(psLinuxMemArea);
++}
++#endif
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++IMG_VOID LinuxMMCleanup_MemRecords_ForEachVa(DEBUG_MEM_ALLOC_REC *psCurrentRecord)
++
++{
++
++ PVR_DPF((PVR_DBG_ERROR, "%s: BUG!: Cleaning up memory: "
++ "type=%s "
++ "CpuVAddr=%p "
++ "CpuPAddr=0x%08lx, "
++ "allocated @ file=%s,line=%d",
++ __FUNCTION__,
++ DebugMemAllocRecordTypeToString(psCurrentRecord->eAllocType),
++ psCurrentRecord->pvCpuVAddr,
++ psCurrentRecord->ulCpuPAddr,
++ psCurrentRecord->pszFileName,
++ psCurrentRecord->ui32Line));
++ switch(psCurrentRecord->eAllocType)
++ {
++ case DEBUG_MEM_ALLOC_TYPE_KMALLOC:
++ KFreeWrapper(psCurrentRecord->pvCpuVAddr);
++ break;
++ case DEBUG_MEM_ALLOC_TYPE_IOREMAP:
++ IOUnmapWrapper(psCurrentRecord->pvCpuVAddr);
++ break;
++ case DEBUG_MEM_ALLOC_TYPE_IO:
++
++ DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_IO, psCurrentRecord->pvKey, __FILE__, __LINE__);
++ break;
++ case DEBUG_MEM_ALLOC_TYPE_VMALLOC:
++ VFreeWrapper(psCurrentRecord->pvCpuVAddr);
++ break;
++ case DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES:
++
++ DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES, psCurrentRecord->pvKey, __FILE__, __LINE__);
++ break;
++ case DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE:
++ KMemCacheFreeWrapper(psCurrentRecord->pvPrivateData, psCurrentRecord->pvCpuVAddr);
++ break;
++ default:
++ PVR_ASSERT(0);
++ }
++}
++#endif
++
++
++IMG_VOID
++LinuxMMCleanup(IMG_VOID)
++{
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++ {
++ if(g_LinuxMemAreaCount)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: BUG!: There are %d LinuxMemArea allocation unfreed (%ld bytes)",
++ __FUNCTION__, g_LinuxMemAreaCount, g_LinuxMemAreaWaterMark));
++ }
++
++ List_DEBUG_LINUX_MEM_AREA_REC_ForEach(g_LinuxMemAreaRecords,
++ LinuxMMCleanup_MemAreas_ForEachCb);
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++ RemoveProcEntrySeq( g_SeqFileMemArea );
++#else
++ RemoveProcEntry("mem_areas");
++#endif
++ }
++#endif
++
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ {
++
++
++ List_DEBUG_MEM_ALLOC_REC_ForEach(g_MemoryRecords,
++ LinuxMMCleanup_MemRecords_ForEachVa);
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++ RemoveProcEntrySeq( g_SeqFileMemoryRecords );
++#else
++ RemoveProcEntry("meminfo");
++#endif
++
++ }
++#endif
++
++ if(psLinuxMemAreaCache)
++ {
++ KMemCacheDestroyWrapper(psLinuxMemAreaCache);
++ psLinuxMemAreaCache=NULL;
++ }
++}
++
++
++IMG_VOID *
++_KMallocWrapper(IMG_UINT32 ui32ByteSize, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line)
++{
++ IMG_VOID *pvRet;
++ pvRet = kmalloc(ui32ByteSize, GFP_KERNEL);
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ if(pvRet)
++ {
++ DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE_KMALLOC,
++ pvRet,
++ pvRet,
++ 0,
++ NULL,
++ ui32ByteSize,
++ pszFileName,
++ ui32Line
++ );
++ }
++#else
++ PVR_UNREFERENCED_PARAMETER(pszFileName);
++ PVR_UNREFERENCED_PARAMETER(ui32Line);
++#endif
++ return pvRet;
++}
++
++
++IMG_VOID
++_KFreeWrapper(IMG_VOID *pvCpuVAddr, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line)
++{
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_KMALLOC, pvCpuVAddr, pszFileName, ui32Line);
++#else
++ PVR_UNREFERENCED_PARAMETER(pszFileName);
++ PVR_UNREFERENCED_PARAMETER(ui32Line);
++#endif
++ kfree(pvCpuVAddr);
++}
++
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++static IMG_VOID
++DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE eAllocType,
++ IMG_VOID *pvKey,
++ IMG_VOID *pvCpuVAddr,
++ IMG_UINT32 ulCpuPAddr,
++ IMG_VOID *pvPrivateData,
++ IMG_UINT32 ui32Bytes,
++ IMG_CHAR *pszFileName,
++ IMG_UINT32 ui32Line)
++{
++ DEBUG_MEM_ALLOC_REC *psRecord;
++
++ LinuxLockMutex(&g_sDebugMutex);
++
++ psRecord = kmalloc(sizeof(DEBUG_MEM_ALLOC_REC), GFP_KERNEL);
++
++ psRecord->eAllocType = eAllocType;
++ psRecord->pvKey = pvKey;
++ psRecord->pvCpuVAddr = pvCpuVAddr;
++ psRecord->ulCpuPAddr = ulCpuPAddr;
++ psRecord->pvPrivateData = pvPrivateData;
++ psRecord->pid = current->pid;
++ psRecord->ui32Bytes = ui32Bytes;
++ psRecord->pszFileName = pszFileName;
++ psRecord->ui32Line = ui32Line;
++
++ List_DEBUG_MEM_ALLOC_REC_Insert(&g_MemoryRecords, psRecord);
++
++ g_WaterMarkData[eAllocType] += ui32Bytes;
++ if(g_WaterMarkData[eAllocType] > g_HighWaterMarkData[eAllocType])
++ {
++ g_HighWaterMarkData[eAllocType] = g_WaterMarkData[eAllocType];
++ }
++
++ if(eAllocType == DEBUG_MEM_ALLOC_TYPE_KMALLOC
++ || eAllocType == DEBUG_MEM_ALLOC_TYPE_VMALLOC
++ || eAllocType == DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES
++ || eAllocType == DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE)
++ {
++ g_SysRAMWaterMark += ui32Bytes;
++ if(g_SysRAMWaterMark > g_SysRAMHighWaterMark)
++ {
++ g_SysRAMHighWaterMark = g_SysRAMWaterMark;
++ }
++ }
++ else if(eAllocType == DEBUG_MEM_ALLOC_TYPE_IOREMAP
++ || eAllocType == DEBUG_MEM_ALLOC_TYPE_IO)
++ {
++ g_IOMemWaterMark += ui32Bytes;
++ if(g_IOMemWaterMark > g_IOMemHighWaterMark)
++ {
++ g_IOMemHighWaterMark = g_IOMemWaterMark;
++ }
++ }
++
++ LinuxUnLockMutex(&g_sDebugMutex);
++}
++
++
++IMG_BOOL DebugMemAllocRecordRemove_AnyVaCb(DEBUG_MEM_ALLOC_REC *psCurrentRecord, va_list va)
++{
++ DEBUG_MEM_ALLOC_TYPE eAllocType;
++ IMG_VOID *pvKey;
++
++ eAllocType = va_arg(va, DEBUG_MEM_ALLOC_TYPE);
++ pvKey = va_arg(va, IMG_VOID*);
++
++ if(psCurrentRecord->eAllocType == eAllocType
++ && psCurrentRecord->pvKey == pvKey)
++ {
++ eAllocType = psCurrentRecord->eAllocType;
++ g_WaterMarkData[eAllocType] -= psCurrentRecord->ui32Bytes;
++
++ if(eAllocType == DEBUG_MEM_ALLOC_TYPE_KMALLOC
++ || eAllocType == DEBUG_MEM_ALLOC_TYPE_VMALLOC
++ || eAllocType == DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES
++ || eAllocType == DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE)
++ {
++ g_SysRAMWaterMark -= psCurrentRecord->ui32Bytes;
++ }
++ else if(eAllocType == DEBUG_MEM_ALLOC_TYPE_IOREMAP
++ || eAllocType == DEBUG_MEM_ALLOC_TYPE_IO)
++ {
++ g_IOMemWaterMark -= psCurrentRecord->ui32Bytes;
++ }
++
++ List_DEBUG_MEM_ALLOC_REC_Remove(psCurrentRecord);
++ kfree(psCurrentRecord);
++
++ return IMG_TRUE;
++ }
++ else
++ {
++ return IMG_FALSE;
++ }
++}
++
++
++static IMG_VOID
++DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE eAllocType, IMG_VOID *pvKey, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line)
++{
++ LinuxLockMutex(&g_sDebugMutex);
++
++
++ if(!List_DEBUG_MEM_ALLOC_REC_IMG_BOOL_Any_va(g_MemoryRecords,
++ DebugMemAllocRecordRemove_AnyVaCb,
++ eAllocType,
++ pvKey))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: couldn't find an entry for type=%s with pvKey=%p (called from %s, line %d\n",
++ __FUNCTION__, DebugMemAllocRecordTypeToString(eAllocType), pvKey,
++ pszFileName, ui32Line));
++ }
++
++ LinuxUnLockMutex(&g_sDebugMutex);
++}
++
++
++static IMG_CHAR *
++DebugMemAllocRecordTypeToString(DEBUG_MEM_ALLOC_TYPE eAllocType)
++{
++ IMG_CHAR *apszDebugMemoryRecordTypes[] = {
++ "KMALLOC",
++ "VMALLOC",
++ "ALLOC_PAGES",
++ "IOREMAP",
++ "IO",
++ "KMEM_CACHE_ALLOC"
++ };
++ return apszDebugMemoryRecordTypes[eAllocType];
++}
++#endif
++
++
++
++IMG_VOID *
++_VMallocWrapper(IMG_UINT32 ui32Bytes,
++ IMG_UINT32 ui32AllocFlags,
++ IMG_CHAR *pszFileName,
++ IMG_UINT32 ui32Line)
++{
++ pgprot_t PGProtFlags;
++ IMG_VOID *pvRet;
++
++ switch(ui32AllocFlags & PVRSRV_HAP_CACHETYPE_MASK)
++ {
++ case PVRSRV_HAP_CACHED:
++ PGProtFlags = PAGE_KERNEL;
++ break;
++ case PVRSRV_HAP_WRITECOMBINE:
++ PGProtFlags = PGPROT_WC(PAGE_KERNEL);
++ break;
++ case PVRSRV_HAP_UNCACHED:
++ PGProtFlags = PGPROT_UC(PAGE_KERNEL);
++ break;
++ default:
++ PVR_DPF((PVR_DBG_ERROR,
++ "VMAllocWrapper: unknown mapping flags=0x%08lx",
++ ui32AllocFlags));
++ dump_stack();
++ return NULL;
++ }
++
++
++ pvRet = __vmalloc(ui32Bytes, GFP_KERNEL | __GFP_HIGHMEM, PGProtFlags);
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ if(pvRet)
++ {
++ DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE_VMALLOC,
++ pvRet,
++ pvRet,
++ 0,
++ NULL,
++ PAGE_ALIGN(ui32Bytes),
++ pszFileName,
++ ui32Line
++ );
++ }
++#else
++ PVR_UNREFERENCED_PARAMETER(pszFileName);
++ PVR_UNREFERENCED_PARAMETER(ui32Line);
++#endif
++
++ return pvRet;
++}
++
++
++IMG_VOID
++_VFreeWrapper(IMG_VOID *pvCpuVAddr, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line)
++{
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_VMALLOC, pvCpuVAddr, pszFileName, ui32Line);
++#else
++ PVR_UNREFERENCED_PARAMETER(pszFileName);
++ PVR_UNREFERENCED_PARAMETER(ui32Line);
++#endif
++ vfree(pvCpuVAddr);
++}
++
++
++LinuxMemArea *
++NewVMallocLinuxMemArea(IMG_UINT32 ui32Bytes, IMG_UINT32 ui32AreaFlags)
++{
++ LinuxMemArea *psLinuxMemArea;
++ IMG_VOID *pvCpuVAddr;
++
++ psLinuxMemArea = LinuxMemAreaStructAlloc();
++ if(!psLinuxMemArea)
++ {
++ goto failed;
++ }
++
++ pvCpuVAddr = VMallocWrapper(ui32Bytes, ui32AreaFlags);
++ if(!pvCpuVAddr)
++ {
++ goto failed;
++ }
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
++
++ ReservePages(pvCpuVAddr, ui32Bytes);
++#endif
++
++ psLinuxMemArea->eAreaType = LINUX_MEM_AREA_VMALLOC;
++ psLinuxMemArea->uData.sVmalloc.pvVmallocAddress = pvCpuVAddr;
++ psLinuxMemArea->ui32ByteSize = ui32Bytes;
++ psLinuxMemArea->ui32AreaFlags = ui32AreaFlags;
++ psLinuxMemArea->bMMapRegistered = IMG_FALSE;
++ INIT_LIST_HEAD(&psLinuxMemArea->sMMapOffsetStructList);
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++ DebugLinuxMemAreaRecordAdd(psLinuxMemArea, ui32AreaFlags);
++#endif
++
++ return psLinuxMemArea;
++
++failed:
++ PVR_DPF((PVR_DBG_ERROR, "%s: failed!", __FUNCTION__));
++ if(psLinuxMemArea)
++ LinuxMemAreaStructFree(psLinuxMemArea);
++ return NULL;
++}
++
++
++IMG_VOID
++FreeVMallocLinuxMemArea(LinuxMemArea *psLinuxMemArea)
++{
++ PVR_ASSERT(psLinuxMemArea);
++ PVR_ASSERT(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_VMALLOC);
++ PVR_ASSERT(psLinuxMemArea->uData.sVmalloc.pvVmallocAddress);
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++ DebugLinuxMemAreaRecordRemove(psLinuxMemArea);
++#endif
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
++ UnreservePages(psLinuxMemArea->uData.sVmalloc.pvVmallocAddress,
++ psLinuxMemArea->ui32ByteSize);
++#endif
++
++ PVR_DPF((PVR_DBG_MESSAGE,"%s: pvCpuVAddr: %p",
++ __FUNCTION__, psLinuxMemArea->uData.sVmalloc.pvVmallocAddress));
++ VFreeWrapper(psLinuxMemArea->uData.sVmalloc.pvVmallocAddress);
++
++ LinuxMemAreaStructFree(psLinuxMemArea);
++}
++
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
++static IMG_VOID
++ReservePages(IMG_VOID *pvAddress, IMG_UINT32 ui32Length)
++{
++ IMG_VOID *pvPage;
++ IMG_VOID *pvEnd = pvAddress + ui32Length;
++
++ for(pvPage = pvAddress; pvPage < pvEnd; pvPage += PAGE_SIZE)
++ {
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0))
++ SetPageReserved(vmalloc_to_page(pvPage));
++#else
++ mem_map_reserve(vmalloc_to_page(pvPage));
++#endif
++ }
++}
++
++
++static IMG_VOID
++UnreservePages(IMG_VOID *pvAddress, IMG_UINT32 ui32Length)
++{
++ IMG_VOID *pvPage;
++ IMG_VOID *pvEnd = pvAddress + ui32Length;
++
++ for(pvPage = pvAddress; pvPage < pvEnd; pvPage += PAGE_SIZE)
++ {
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0))
++ ClearPageReserved(vmalloc_to_page(pvPage));
++#else
++ mem_map_unreserve(vmalloc_to_page(pvPage));
++#endif
++ }
++}
++#endif
++
++
++IMG_VOID *
++_IORemapWrapper(IMG_CPU_PHYADDR BasePAddr,
++ IMG_UINT32 ui32Bytes,
++ IMG_UINT32 ui32MappingFlags,
++ IMG_CHAR *pszFileName,
++ IMG_UINT32 ui32Line)
++{
++ IMG_VOID *pvIORemapCookie;
++
++ switch(ui32MappingFlags & PVRSRV_HAP_CACHETYPE_MASK)
++ {
++ case PVRSRV_HAP_CACHED:
++ pvIORemapCookie = (IMG_VOID *)IOREMAP(BasePAddr.uiAddr, ui32Bytes);
++ break;
++ case PVRSRV_HAP_WRITECOMBINE:
++ pvIORemapCookie = (IMG_VOID *)IOREMAP_WC(BasePAddr.uiAddr, ui32Bytes);
++ break;
++ case PVRSRV_HAP_UNCACHED:
++ pvIORemapCookie = (IMG_VOID *)IOREMAP_UC(BasePAddr.uiAddr, ui32Bytes);
++ break;
++ default:
++ PVR_DPF((PVR_DBG_ERROR, "IORemapWrapper: unknown mapping flags"));
++ return NULL;
++ }
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ if(pvIORemapCookie)
++ {
++ DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE_IOREMAP,
++ pvIORemapCookie,
++ pvIORemapCookie,
++ BasePAddr.uiAddr,
++ NULL,
++ ui32Bytes,
++ pszFileName,
++ ui32Line
++ );
++ }
++#else
++ PVR_UNREFERENCED_PARAMETER(pszFileName);
++ PVR_UNREFERENCED_PARAMETER(ui32Line);
++#endif
++
++ return pvIORemapCookie;
++}
++
++
++IMG_VOID
++_IOUnmapWrapper(IMG_VOID *pvIORemapCookie, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line)
++{
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_IOREMAP, pvIORemapCookie, pszFileName, ui32Line);
++#else
++ PVR_UNREFERENCED_PARAMETER(pszFileName);
++ PVR_UNREFERENCED_PARAMETER(ui32Line);
++#endif
++ iounmap(pvIORemapCookie);
++}
++
++
++LinuxMemArea *
++NewIORemapLinuxMemArea(IMG_CPU_PHYADDR BasePAddr,
++ IMG_UINT32 ui32Bytes,
++ IMG_UINT32 ui32AreaFlags)
++{
++ LinuxMemArea *psLinuxMemArea;
++ IMG_VOID *pvIORemapCookie;
++
++ psLinuxMemArea = LinuxMemAreaStructAlloc();
++ if(!psLinuxMemArea)
++ {
++ return NULL;
++ }
++
++ pvIORemapCookie = IORemapWrapper(BasePAddr, ui32Bytes, ui32AreaFlags);
++ if(!pvIORemapCookie)
++ {
++ LinuxMemAreaStructFree(psLinuxMemArea);
++ return NULL;
++ }
++
++ psLinuxMemArea->eAreaType = LINUX_MEM_AREA_IOREMAP;
++ psLinuxMemArea->uData.sIORemap.pvIORemapCookie = pvIORemapCookie;
++ psLinuxMemArea->uData.sIORemap.CPUPhysAddr = BasePAddr;
++ psLinuxMemArea->ui32ByteSize = ui32Bytes;
++ psLinuxMemArea->ui32AreaFlags = ui32AreaFlags;
++ psLinuxMemArea->bMMapRegistered = IMG_FALSE;
++ INIT_LIST_HEAD(&psLinuxMemArea->sMMapOffsetStructList);
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++ DebugLinuxMemAreaRecordAdd(psLinuxMemArea, ui32AreaFlags);
++#endif
++
++ return psLinuxMemArea;
++}
++
++
++IMG_VOID
++FreeIORemapLinuxMemArea(LinuxMemArea *psLinuxMemArea)
++{
++ PVR_ASSERT(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_IOREMAP);
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++ DebugLinuxMemAreaRecordRemove(psLinuxMemArea);
++#endif
++
++ IOUnmapWrapper(psLinuxMemArea->uData.sIORemap.pvIORemapCookie);
++
++ LinuxMemAreaStructFree(psLinuxMemArea);
++}
++
++
++static IMG_BOOL
++TreatExternalPagesAsContiguous(IMG_SYS_PHYADDR *psSysPhysAddr, IMG_UINT32 ui32Bytes, IMG_BOOL bPhysContig)
++{
++ IMG_UINT32 ui32;
++ IMG_UINT32 ui32AddrChk;
++ IMG_UINT32 ui32NumPages = RANGE_TO_PAGES(ui32Bytes);
++
++ for (ui32 = 0, ui32AddrChk = psSysPhysAddr[0].uiAddr;
++ ui32 < ui32NumPages;
++ ui32++, ui32AddrChk = (bPhysContig) ? (ui32AddrChk + PAGE_SIZE) : psSysPhysAddr[ui32].uiAddr)
++ {
++ if (!pfn_valid(PHYS_TO_PFN(ui32AddrChk)))
++ {
++ break;
++ }
++ }
++ if (ui32 == ui32NumPages)
++ {
++ return IMG_FALSE;
++ }
++
++ if (!bPhysContig)
++ {
++ for (ui32 = 0, ui32AddrChk = psSysPhysAddr[0].uiAddr;
++ ui32 < ui32NumPages;
++ ui32++, ui32AddrChk += PAGE_SIZE)
++ {
++ if (psSysPhysAddr[ui32].uiAddr != ui32AddrChk)
++ {
++ return IMG_FALSE;
++ }
++ }
++ }
++
++ return IMG_TRUE;
++}
++
++LinuxMemArea *NewExternalKVLinuxMemArea(IMG_SYS_PHYADDR *pBasePAddr, IMG_VOID *pvCPUVAddr, IMG_UINT32 ui32Bytes, IMG_BOOL bPhysContig, IMG_UINT32 ui32AreaFlags)
++{
++ LinuxMemArea *psLinuxMemArea;
++
++ psLinuxMemArea = LinuxMemAreaStructAlloc();
++ if(!psLinuxMemArea)
++ {
++ return NULL;
++ }
++
++ psLinuxMemArea->eAreaType = LINUX_MEM_AREA_EXTERNAL_KV;
++ psLinuxMemArea->uData.sExternalKV.pvExternalKV = pvCPUVAddr;
++ psLinuxMemArea->uData.sExternalKV.bPhysContig = (IMG_BOOL)(bPhysContig || TreatExternalPagesAsContiguous(pBasePAddr, ui32Bytes, bPhysContig));
++
++ if (psLinuxMemArea->uData.sExternalKV.bPhysContig)
++ {
++ psLinuxMemArea->uData.sExternalKV.uPhysAddr.SysPhysAddr = *pBasePAddr;
++ }
++ else
++ {
++ psLinuxMemArea->uData.sExternalKV.uPhysAddr.pSysPhysAddr = pBasePAddr;
++ }
++ psLinuxMemArea->ui32ByteSize = ui32Bytes;
++ psLinuxMemArea->ui32AreaFlags = ui32AreaFlags;
++ psLinuxMemArea->bMMapRegistered = IMG_FALSE;
++ INIT_LIST_HEAD(&psLinuxMemArea->sMMapOffsetStructList);
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++ DebugLinuxMemAreaRecordAdd(psLinuxMemArea, ui32AreaFlags);
++#endif
++
++ return psLinuxMemArea;
++}
++
++
++IMG_VOID
++FreeExternalKVLinuxMemArea(LinuxMemArea *psLinuxMemArea)
++{
++ PVR_ASSERT(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_EXTERNAL_KV);
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++ DebugLinuxMemAreaRecordRemove(psLinuxMemArea);
++#endif
++
++ LinuxMemAreaStructFree(psLinuxMemArea);
++}
++
++
++LinuxMemArea *
++NewIOLinuxMemArea(IMG_CPU_PHYADDR BasePAddr,
++ IMG_UINT32 ui32Bytes,
++ IMG_UINT32 ui32AreaFlags)
++{
++ LinuxMemArea *psLinuxMemArea = LinuxMemAreaStructAlloc();
++ if(!psLinuxMemArea)
++ {
++ return NULL;
++ }
++
++
++ psLinuxMemArea->eAreaType = LINUX_MEM_AREA_IO;
++ psLinuxMemArea->uData.sIO.CPUPhysAddr.uiAddr = BasePAddr.uiAddr;
++ psLinuxMemArea->ui32ByteSize = ui32Bytes;
++ psLinuxMemArea->ui32AreaFlags = ui32AreaFlags;
++ psLinuxMemArea->bMMapRegistered = IMG_FALSE;
++ INIT_LIST_HEAD(&psLinuxMemArea->sMMapOffsetStructList);
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE_IO,
++ (IMG_VOID *)BasePAddr.uiAddr,
++ 0,
++ BasePAddr.uiAddr,
++ NULL,
++ ui32Bytes,
++ "unknown",
++ 0
++ );
++#endif
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++ DebugLinuxMemAreaRecordAdd(psLinuxMemArea, ui32AreaFlags);
++#endif
++
++ return psLinuxMemArea;
++}
++
++
++IMG_VOID
++FreeIOLinuxMemArea(LinuxMemArea *psLinuxMemArea)
++{
++ PVR_ASSERT(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_IO);
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++ DebugLinuxMemAreaRecordRemove(psLinuxMemArea);
++#endif
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_IO,
++ (IMG_VOID *)psLinuxMemArea->uData.sIO.CPUPhysAddr.uiAddr, __FILE__, __LINE__);
++#endif
++
++
++
++ LinuxMemAreaStructFree(psLinuxMemArea);
++}
++
++
++LinuxMemArea *
++NewAllocPagesLinuxMemArea(IMG_UINT32 ui32Bytes, IMG_UINT32 ui32AreaFlags)
++{
++ LinuxMemArea *psLinuxMemArea;
++ IMG_UINT32 ui32PageCount;
++ struct page **pvPageList;
++ IMG_HANDLE hBlockPageList;
++ IMG_INT32 i;
++ PVRSRV_ERROR eError;
++
++ psLinuxMemArea = LinuxMemAreaStructAlloc();
++ if(!psLinuxMemArea)
++ {
++ goto failed_area_alloc;
++ }
++
++ ui32PageCount = RANGE_TO_PAGES(ui32Bytes);
++ eError = OSAllocMem(0, sizeof(*pvPageList) * ui32PageCount, (IMG_VOID **)&pvPageList, &hBlockPageList,
++ "Array of pages");
++ if(eError != PVRSRV_OK)
++ {
++ goto failed_page_list_alloc;
++ }
++
++ for(i=0; i<(IMG_INT32)ui32PageCount; i++)
++ {
++ pvPageList[i] = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, 0);
++ if(!pvPageList[i])
++ {
++ goto failed_alloc_pages;
++ }
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0))
++ SetPageReserved(pvPageList[i]);
++#else
++ mem_map_reserve(pvPageList[i]);
++#endif
++#endif
++
++ }
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES,
++ pvPageList,
++ 0,
++ 0,
++ NULL,
++ PAGE_ALIGN(ui32Bytes),
++ "unknown",
++ 0
++ );
++#endif
++
++ psLinuxMemArea->eAreaType = LINUX_MEM_AREA_ALLOC_PAGES;
++ psLinuxMemArea->uData.sPageList.pvPageList = pvPageList;
++ psLinuxMemArea->uData.sPageList.hBlockPageList = hBlockPageList;
++ psLinuxMemArea->ui32ByteSize = ui32Bytes;
++ psLinuxMemArea->ui32AreaFlags = ui32AreaFlags;
++ psLinuxMemArea->bMMapRegistered = IMG_FALSE;
++ INIT_LIST_HEAD(&psLinuxMemArea->sMMapOffsetStructList);
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++ DebugLinuxMemAreaRecordAdd(psLinuxMemArea, ui32AreaFlags);
++#endif
++
++ return psLinuxMemArea;
++
++failed_alloc_pages:
++ for(i--; i >= 0; i--)
++ {
++ __free_pages(pvPageList[i], 0);
++ }
++ (IMG_VOID) OSFreeMem(0, sizeof(*pvPageList) * ui32PageCount, pvPageList, hBlockPageList);
++ psLinuxMemArea->uData.sPageList.pvPageList = IMG_NULL;
++failed_page_list_alloc:
++ LinuxMemAreaStructFree(psLinuxMemArea);
++failed_area_alloc:
++ PVR_DPF((PVR_DBG_ERROR, "%s: failed", __FUNCTION__));
++
++ return NULL;
++}
++
++
++IMG_VOID
++FreeAllocPagesLinuxMemArea(LinuxMemArea *psLinuxMemArea)
++{
++ IMG_UINT32 ui32PageCount;
++ struct page **pvPageList;
++ IMG_HANDLE hBlockPageList;
++ IMG_INT32 i;
++
++ PVR_ASSERT(psLinuxMemArea);
++ PVR_ASSERT(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_ALLOC_PAGES);
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++ DebugLinuxMemAreaRecordRemove(psLinuxMemArea);
++#endif
++
++ ui32PageCount = RANGE_TO_PAGES(psLinuxMemArea->ui32ByteSize);
++ pvPageList = psLinuxMemArea->uData.sPageList.pvPageList;
++ hBlockPageList = psLinuxMemArea->uData.sPageList.hBlockPageList;
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES, pvPageList, __FILE__, __LINE__);
++#endif
++
++ for(i=0;i<(IMG_INT32)ui32PageCount;i++)
++ {
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0))
++ ClearPageReserved(pvPageList[i]);
++#else
++ mem_map_reserve(pvPageList[i]);
++#endif
++#endif
++ __free_pages(pvPageList[i], 0);
++ }
++
++ (IMG_VOID) OSFreeMem(0, sizeof(*pvPageList) * ui32PageCount, pvPageList, hBlockPageList);
++ psLinuxMemArea->uData.sPageList.pvPageList = IMG_NULL;
++
++ LinuxMemAreaStructFree(psLinuxMemArea);
++}
++
++
++struct page*
++LinuxMemAreaOffsetToPage(LinuxMemArea *psLinuxMemArea,
++ IMG_UINT32 ui32ByteOffset)
++{
++ IMG_UINT32 ui32PageIndex;
++ IMG_CHAR *pui8Addr;
++
++ switch(psLinuxMemArea->eAreaType)
++ {
++ case LINUX_MEM_AREA_ALLOC_PAGES:
++ ui32PageIndex = PHYS_TO_PFN(ui32ByteOffset);
++ return psLinuxMemArea->uData.sPageList.pvPageList[ui32PageIndex];
++
++ case LINUX_MEM_AREA_VMALLOC:
++ pui8Addr = psLinuxMemArea->uData.sVmalloc.pvVmallocAddress;
++ pui8Addr += ui32ByteOffset;
++ return vmalloc_to_page(pui8Addr);
++
++ case LINUX_MEM_AREA_SUB_ALLOC:
++
++ return LinuxMemAreaOffsetToPage(psLinuxMemArea->uData.sSubAlloc.psParentLinuxMemArea,
++ psLinuxMemArea->uData.sSubAlloc.ui32ByteOffset
++ + ui32ByteOffset);
++ default:
++ PVR_DPF((PVR_DBG_ERROR,
++ "%s: Unsupported request for struct page from LinuxMemArea with type=%s",
++ __FUNCTION__, LinuxMemAreaTypeToString(psLinuxMemArea->eAreaType)));
++ return NULL;
++ }
++}
++
++
++LinuxKMemCache *
++KMemCacheCreateWrapper(IMG_CHAR *pszName,
++ size_t Size,
++ size_t Align,
++ IMG_UINT32 ui32Flags)
++{
++#if defined(DEBUG_LINUX_SLAB_ALLOCATIONS)
++ ui32Flags |= SLAB_POISON|SLAB_RED_ZONE;
++#endif
++ return kmem_cache_create(pszName, Size, Align, ui32Flags, NULL
++#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,22))
++ , NULL
++#endif
++ );
++}
++
++
++IMG_VOID
++KMemCacheDestroyWrapper(LinuxKMemCache *psCache)
++{
++ kmem_cache_destroy(psCache);
++}
++
++
++IMG_VOID *
++_KMemCacheAllocWrapper(LinuxKMemCache *psCache,
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,14))
++ gfp_t Flags,
++#else
++ IMG_INT Flags,
++#endif
++ IMG_CHAR *pszFileName,
++ IMG_UINT32 ui32Line)
++{
++ IMG_VOID *pvRet;
++
++ pvRet = kmem_cache_alloc(psCache, Flags);
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE,
++ pvRet,
++ pvRet,
++ 0,
++ psCache,
++ kmem_cache_size(psCache),
++ pszFileName,
++ ui32Line
++ );
++#else
++ PVR_UNREFERENCED_PARAMETER(pszFileName);
++ PVR_UNREFERENCED_PARAMETER(ui32Line);
++#endif
++
++ return pvRet;
++}
++
++
++IMG_VOID
++_KMemCacheFreeWrapper(LinuxKMemCache *psCache, IMG_VOID *pvObject, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line)
++{
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE, pvObject, pszFileName, ui32Line);
++#else
++ PVR_UNREFERENCED_PARAMETER(pszFileName);
++ PVR_UNREFERENCED_PARAMETER(ui32Line);
++#endif
++
++ kmem_cache_free(psCache, pvObject);
++}
++
++
++const IMG_CHAR *
++KMemCacheNameWrapper(LinuxKMemCache *psCache)
++{
++ PVR_UNREFERENCED_PARAMETER(psCache);
++
++
++ return "";
++}
++
++
++LinuxMemArea *
++NewSubLinuxMemArea(LinuxMemArea *psParentLinuxMemArea,
++ IMG_UINT32 ui32ByteOffset,
++ IMG_UINT32 ui32Bytes)
++{
++ LinuxMemArea *psLinuxMemArea;
++
++ PVR_ASSERT((ui32ByteOffset+ui32Bytes) <= psParentLinuxMemArea->ui32ByteSize);
++
++ psLinuxMemArea = LinuxMemAreaStructAlloc();
++ if(!psLinuxMemArea)
++ {
++ return NULL;
++ }
++
++ psLinuxMemArea->eAreaType = LINUX_MEM_AREA_SUB_ALLOC;
++ psLinuxMemArea->uData.sSubAlloc.psParentLinuxMemArea = psParentLinuxMemArea;
++ psLinuxMemArea->uData.sSubAlloc.ui32ByteOffset = ui32ByteOffset;
++ psLinuxMemArea->ui32ByteSize = ui32Bytes;
++ psLinuxMemArea->ui32AreaFlags = psParentLinuxMemArea->ui32AreaFlags;
++ psLinuxMemArea->bMMapRegistered = IMG_FALSE;
++ INIT_LIST_HEAD(&psLinuxMemArea->sMMapOffsetStructList);
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++ {
++ DEBUG_LINUX_MEM_AREA_REC *psParentRecord;
++ psParentRecord = DebugLinuxMemAreaRecordFind(psParentLinuxMemArea);
++ DebugLinuxMemAreaRecordAdd(psLinuxMemArea, psParentRecord->ui32Flags);
++ }
++#endif
++
++ return psLinuxMemArea;
++}
++
++
++IMG_VOID
++FreeSubLinuxMemArea(LinuxMemArea *psLinuxMemArea)
++{
++ PVR_ASSERT(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_SUB_ALLOC);
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++ DebugLinuxMemAreaRecordRemove(psLinuxMemArea);
++#endif
++
++
++
++ LinuxMemAreaStructFree(psLinuxMemArea);
++}
++
++
++static LinuxMemArea *
++LinuxMemAreaStructAlloc(IMG_VOID)
++{
++#if 0
++ LinuxMemArea *psLinuxMemArea;
++ psLinuxMemArea = kmem_cache_alloc(psLinuxMemAreaCache, GFP_KERNEL);
++ printk(KERN_ERR "%s: psLinuxMemArea=%p\n", __FUNCTION__, psLinuxMemArea);
++ dump_stack();
++ return psLinuxMemArea;
++#else
++ return KMemCacheAllocWrapper(psLinuxMemAreaCache, GFP_KERNEL);
++#endif
++}
++
++
++static IMG_VOID
++LinuxMemAreaStructFree(LinuxMemArea *psLinuxMemArea)
++{
++ KMemCacheFreeWrapper(psLinuxMemAreaCache, psLinuxMemArea);
++
++
++}
++
++
++IMG_VOID
++LinuxMemAreaDeepFree(LinuxMemArea *psLinuxMemArea)
++{
++ switch(psLinuxMemArea->eAreaType)
++ {
++ case LINUX_MEM_AREA_VMALLOC:
++ FreeVMallocLinuxMemArea(psLinuxMemArea);
++ break;
++ case LINUX_MEM_AREA_ALLOC_PAGES:
++ FreeAllocPagesLinuxMemArea(psLinuxMemArea);
++ break;
++ case LINUX_MEM_AREA_IOREMAP:
++ FreeIORemapLinuxMemArea(psLinuxMemArea);
++ break;
++ case LINUX_MEM_AREA_EXTERNAL_KV:
++ FreeExternalKVLinuxMemArea(psLinuxMemArea);
++ break;
++ case LINUX_MEM_AREA_IO:
++ FreeIOLinuxMemArea(psLinuxMemArea);
++ break;
++ case LINUX_MEM_AREA_SUB_ALLOC:
++ FreeSubLinuxMemArea(psLinuxMemArea);
++ break;
++ default:
++ PVR_DPF((PVR_DBG_ERROR, "%s: Unknown are type (%d)\n",
++ __FUNCTION__, psLinuxMemArea->eAreaType));
++ break;
++ }
++}
++
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++static IMG_VOID
++DebugLinuxMemAreaRecordAdd(LinuxMemArea *psLinuxMemArea, IMG_UINT32 ui32Flags)
++{
++ DEBUG_LINUX_MEM_AREA_REC *psNewRecord;
++ const IMG_CHAR *pi8FlagsString;
++
++ LinuxLockMutex(&g_sDebugMutex);
++
++ if(psLinuxMemArea->eAreaType != LINUX_MEM_AREA_SUB_ALLOC)
++ {
++ g_LinuxMemAreaWaterMark += psLinuxMemArea->ui32ByteSize;
++ if(g_LinuxMemAreaWaterMark > g_LinuxMemAreaHighWaterMark)
++ {
++ g_LinuxMemAreaHighWaterMark = g_LinuxMemAreaWaterMark;
++ }
++ }
++ g_LinuxMemAreaCount++;
++
++
++ psNewRecord = kmalloc(sizeof(DEBUG_LINUX_MEM_AREA_REC), GFP_KERNEL);
++ if(psNewRecord)
++ {
++
++ psNewRecord->psLinuxMemArea = psLinuxMemArea;
++ psNewRecord->ui32Flags = ui32Flags;
++ psNewRecord->pid = current->pid;
++
++ List_DEBUG_LINUX_MEM_AREA_REC_Insert(&g_LinuxMemAreaRecords, psNewRecord);
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "%s: failed to allocate linux memory area record.",
++ __FUNCTION__));
++ }
++
++
++ pi8FlagsString = HAPFlagsToString(ui32Flags);
++ if(strstr(pi8FlagsString, "UNKNOWN"))
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "%s: Unexpected flags (0x%08lx) associated with psLinuxMemArea @ 0x%08lx",
++ __FUNCTION__,
++ ui32Flags,
++ psLinuxMemArea));
++
++ }
++
++ LinuxUnLockMutex(&g_sDebugMutex);
++}
++
++
++
++IMG_VOID* MatchLinuxMemArea_AnyVaCb(DEBUG_LINUX_MEM_AREA_REC *psCurrentRecord,
++ va_list va)
++{
++ LinuxMemArea *psLinuxMemArea;
++
++ psLinuxMemArea = va_arg(va, LinuxMemArea*);
++ if(psCurrentRecord->psLinuxMemArea == psLinuxMemArea)
++ {
++ return psCurrentRecord;
++ }
++ else
++ {
++ return IMG_NULL;
++ }
++}
++
++
++static DEBUG_LINUX_MEM_AREA_REC *
++DebugLinuxMemAreaRecordFind(LinuxMemArea *psLinuxMemArea)
++{
++ DEBUG_LINUX_MEM_AREA_REC *psCurrentRecord;
++
++ LinuxLockMutex(&g_sDebugMutex);
++ psCurrentRecord = List_DEBUG_LINUX_MEM_AREA_REC_Any_va(g_LinuxMemAreaRecords,
++ MatchLinuxMemArea_AnyVaCb,
++ psLinuxMemArea);
++
++ LinuxUnLockMutex(&g_sDebugMutex);
++
++ return psCurrentRecord;
++}
++
++
++static IMG_VOID
++DebugLinuxMemAreaRecordRemove(LinuxMemArea *psLinuxMemArea)
++{
++ DEBUG_LINUX_MEM_AREA_REC *psCurrentRecord;
++
++ LinuxLockMutex(&g_sDebugMutex);
++
++ if(psLinuxMemArea->eAreaType != LINUX_MEM_AREA_SUB_ALLOC)
++ {
++ g_LinuxMemAreaWaterMark -= psLinuxMemArea->ui32ByteSize;
++ }
++ g_LinuxMemAreaCount--;
++
++
++ psCurrentRecord = List_DEBUG_LINUX_MEM_AREA_REC_Any_va(g_LinuxMemAreaRecords,
++ MatchLinuxMemArea_AnyVaCb,
++ psLinuxMemArea);
++ if(psCurrentRecord)
++ {
++
++ List_DEBUG_LINUX_MEM_AREA_REC_Remove(psCurrentRecord);
++ kfree(psCurrentRecord);
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: couldn't find an entry for psLinuxMemArea=%p\n",
++ __FUNCTION__, psLinuxMemArea));
++ }
++
++ LinuxUnLockMutex(&g_sDebugMutex);
++}
++#endif
++
++
++IMG_VOID *
++LinuxMemAreaToCpuVAddr(LinuxMemArea *psLinuxMemArea)
++{
++ switch(psLinuxMemArea->eAreaType)
++ {
++ case LINUX_MEM_AREA_VMALLOC:
++ return psLinuxMemArea->uData.sVmalloc.pvVmallocAddress;
++ case LINUX_MEM_AREA_IOREMAP:
++ return psLinuxMemArea->uData.sIORemap.pvIORemapCookie;
++ case LINUX_MEM_AREA_EXTERNAL_KV:
++ return psLinuxMemArea->uData.sExternalKV.pvExternalKV;
++ case LINUX_MEM_AREA_SUB_ALLOC:
++ {
++ IMG_CHAR *pAddr =
++ LinuxMemAreaToCpuVAddr(psLinuxMemArea->uData.sSubAlloc.psParentLinuxMemArea);
++ if(!pAddr)
++ {
++ return NULL;
++ }
++ return pAddr + psLinuxMemArea->uData.sSubAlloc.ui32ByteOffset;
++ }
++ default:
++ return NULL;
++ }
++}
++
++
++IMG_CPU_PHYADDR
++LinuxMemAreaToCpuPAddr(LinuxMemArea *psLinuxMemArea, IMG_UINT32 ui32ByteOffset)
++{
++ IMG_CPU_PHYADDR CpuPAddr;
++
++ CpuPAddr.uiAddr = 0;
++
++ switch(psLinuxMemArea->eAreaType)
++ {
++ case LINUX_MEM_AREA_IOREMAP:
++ {
++ CpuPAddr = psLinuxMemArea->uData.sIORemap.CPUPhysAddr;
++ CpuPAddr.uiAddr += ui32ByteOffset;
++ break;
++ }
++ case LINUX_MEM_AREA_EXTERNAL_KV:
++ {
++ if (psLinuxMemArea->uData.sExternalKV.bPhysContig)
++ {
++ CpuPAddr = SysSysPAddrToCpuPAddr(psLinuxMemArea->uData.sExternalKV.uPhysAddr.SysPhysAddr);
++ CpuPAddr.uiAddr += ui32ByteOffset;
++ }
++ else
++ {
++ IMG_UINT32 ui32PageIndex = PHYS_TO_PFN(ui32ByteOffset);
++ IMG_SYS_PHYADDR SysPAddr = psLinuxMemArea->uData.sExternalKV.uPhysAddr.pSysPhysAddr[ui32PageIndex];
++
++ CpuPAddr = SysSysPAddrToCpuPAddr(SysPAddr);
++ CpuPAddr.uiAddr += ADDR_TO_PAGE_OFFSET(ui32ByteOffset);
++ }
++ break;
++ }
++ case LINUX_MEM_AREA_IO:
++ {
++ CpuPAddr = psLinuxMemArea->uData.sIO.CPUPhysAddr;
++ CpuPAddr.uiAddr += ui32ByteOffset;
++ break;
++ }
++ case LINUX_MEM_AREA_VMALLOC:
++ {
++ IMG_CHAR *pCpuVAddr;
++ pCpuVAddr =
++ (IMG_CHAR *)psLinuxMemArea->uData.sVmalloc.pvVmallocAddress;
++ pCpuVAddr += ui32ByteOffset;
++ CpuPAddr.uiAddr = VMallocToPhys(pCpuVAddr);
++ break;
++ }
++ case LINUX_MEM_AREA_ALLOC_PAGES:
++ {
++ struct page *page;
++ IMG_UINT32 ui32PageIndex = PHYS_TO_PFN(ui32ByteOffset);
++ page = psLinuxMemArea->uData.sPageList.pvPageList[ui32PageIndex];
++ CpuPAddr.uiAddr = page_to_phys(page);
++ CpuPAddr.uiAddr += ADDR_TO_PAGE_OFFSET(ui32ByteOffset);
++ break;
++ }
++ case LINUX_MEM_AREA_SUB_ALLOC:
++ {
++ CpuPAddr =
++ OSMemHandleToCpuPAddr(psLinuxMemArea->uData.sSubAlloc.psParentLinuxMemArea,
++ psLinuxMemArea->uData.sSubAlloc.ui32ByteOffset
++ + ui32ByteOffset);
++ break;
++ }
++ default:
++ PVR_DPF((PVR_DBG_ERROR, "%s: Unknown LinuxMemArea type (%d)\n",
++ __FUNCTION__, psLinuxMemArea->eAreaType));
++ break;
++ }
++
++ PVR_ASSERT(CpuPAddr.uiAddr);
++ return CpuPAddr;
++}
++
++
++IMG_BOOL
++LinuxMemAreaPhysIsContig(LinuxMemArea *psLinuxMemArea)
++{
++ switch(psLinuxMemArea->eAreaType)
++ {
++ case LINUX_MEM_AREA_IOREMAP:
++ case LINUX_MEM_AREA_IO:
++ return IMG_TRUE;
++
++ case LINUX_MEM_AREA_EXTERNAL_KV:
++ return psLinuxMemArea->uData.sExternalKV.bPhysContig;
++
++ case LINUX_MEM_AREA_VMALLOC:
++ case LINUX_MEM_AREA_ALLOC_PAGES:
++ return IMG_FALSE;
++
++ case LINUX_MEM_AREA_SUB_ALLOC:
++
++ return LinuxMemAreaPhysIsContig(psLinuxMemArea->uData.sSubAlloc.psParentLinuxMemArea);
++
++ default:
++ PVR_DPF((PVR_DBG_ERROR, "%s: Unknown LinuxMemArea type (%d)\n",
++ __FUNCTION__, psLinuxMemArea->eAreaType));
++ break;
++ }
++ return IMG_FALSE;
++}
++
++
++const IMG_CHAR *
++LinuxMemAreaTypeToString(LINUX_MEM_AREA_TYPE eMemAreaType)
++{
++
++ switch(eMemAreaType)
++ {
++ case LINUX_MEM_AREA_IOREMAP:
++ return "LINUX_MEM_AREA_IOREMAP";
++ case LINUX_MEM_AREA_EXTERNAL_KV:
++ return "LINUX_MEM_AREA_EXTERNAL_KV";
++ case LINUX_MEM_AREA_IO:
++ return "LINUX_MEM_AREA_IO";
++ case LINUX_MEM_AREA_VMALLOC:
++ return "LINUX_MEM_AREA_VMALLOC";
++ case LINUX_MEM_AREA_SUB_ALLOC:
++ return "LINUX_MEM_AREA_SUB_ALLOC";
++ case LINUX_MEM_AREA_ALLOC_PAGES:
++ return "LINUX_MEM_AREA_ALLOC_PAGES";
++ default:
++ PVR_ASSERT(0);
++ }
++
++ return "";
++}
++
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++#if defined(DEBUG_LINUX_MEM_AREAS) || defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++static void ProcSeqStartstopDebugMutex(struct seq_file *sfile, IMG_BOOL start)
++{
++ if(start)
++ {
++ LinuxLockMutex(&g_sDebugMutex);
++ }
++ else
++ {
++ LinuxUnLockMutex(&g_sDebugMutex);
++ }
++}
++#endif
++#endif
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++
++IMG_VOID* DecOffMemAreaRec_AnyVaCb(DEBUG_LINUX_MEM_AREA_REC *psNode, va_list va)
++{
++ off_t *pOff = va_arg(va, off_t*);
++ if (--(*pOff))
++ {
++ return IMG_NULL;
++ }
++ else
++ {
++ return psNode;
++ }
++}
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++
++static void* ProcSeqNextMemArea(struct seq_file *sfile,void* el,loff_t off)
++{
++ DEBUG_LINUX_MEM_AREA_REC *psRecord;
++ psRecord = (DEBUG_LINUX_MEM_AREA_REC*)
++ List_DEBUG_LINUX_MEM_AREA_REC_Any_va(g_LinuxMemAreaRecords,
++ DecOffMemAreaRec_AnyVaCb,
++ &off);
++ return (void*)psRecord;
++}
++
++static void* ProcSeqOff2ElementMemArea(struct seq_file * sfile, loff_t off)
++{
++ DEBUG_LINUX_MEM_AREA_REC *psRecord;
++ if(!off)
++ {
++ return PVR_PROC_SEQ_START_TOKEN;
++ }
++
++ psRecord = (DEBUG_LINUX_MEM_AREA_REC*)
++ List_DEBUG_LINUX_MEM_AREA_REC_Any_va(g_LinuxMemAreaRecords,
++ DecOffMemAreaRec_AnyVaCb,
++ &off);
++ return (void*)psRecord;
++}
++
++
++static void ProcSeqShowMemArea(struct seq_file *sfile,void* el)
++{
++ DEBUG_LINUX_MEM_AREA_REC *psRecord = (DEBUG_LINUX_MEM_AREA_REC*)el;
++ if(el == PVR_PROC_SEQ_START_TOKEN)
++ {
++
++#if !defined(DEBUG_LINUX_XML_PROC_FILES)
++ seq_printf( sfile,
++ "Number of Linux Memory Areas: %lu\n"
++ "At the current water mark these areas correspond to %lu bytes (excluding SUB areas)\n"
++ "At the highest water mark these areas corresponded to %lu bytes (excluding SUB areas)\n"
++ "\nDetails for all Linux Memory Areas:\n"
++ "%s %-24s %s %s %-8s %-5s %s\n",
++ g_LinuxMemAreaCount,
++ g_LinuxMemAreaWaterMark,
++ g_LinuxMemAreaHighWaterMark,
++ "psLinuxMemArea",
++ "LinuxMemType",
++ "CpuVAddr",
++ "CpuPAddr",
++ "Bytes",
++ "Pid",
++ "Flags"
++ );
++#else
++ seq_printf( sfile,
++ "<mem_areas_header>\n"
++ "\t<count>%lu</count>\n"
++ "\t<watermark key=\"mar0\" description=\"current\" bytes=\"%lu\"/>\n"
++ "\t<watermark key=\"mar1\" description=\"high\" bytes=\"%lu\"/>\n"
++ "</mem_areas_header>\n",
++ g_LinuxMemAreaCount,
++ g_LinuxMemAreaWaterMark,
++ g_LinuxMemAreaHighWaterMark
++ );
++#endif
++ return;
++ }
++
++ seq_printf( sfile,
++#if !defined(DEBUG_LINUX_XML_PROC_FILES)
++ "%8p %-24s %8p %08lx %-8ld %-5u %08lx=(%s)\n",
++#else
++ "<linux_mem_area>\n"
++ "\t<pointer>%8p</pointer>\n"
++ "\t<type>%s</type>\n"
++ "\t<cpu_virtual>%8p</cpu_virtual>\n"
++ "\t<cpu_physical>%08lx</cpu_physical>\n"
++ "\t<bytes>%ld</bytes>\n"
++ "\t<pid>%u</pid>\n"
++ "\t<flags>%08lx</flags>\n"
++ "\t<flags_string>%s</flags_string>\n"
++ "</linux_mem_area>\n",
++#endif
++ psRecord->psLinuxMemArea,
++ LinuxMemAreaTypeToString(psRecord->psLinuxMemArea->eAreaType),
++ LinuxMemAreaToCpuVAddr(psRecord->psLinuxMemArea),
++ LinuxMemAreaToCpuPAddr(psRecord->psLinuxMemArea,0).uiAddr,
++ psRecord->psLinuxMemArea->ui32ByteSize,
++ psRecord->pid,
++ psRecord->ui32Flags,
++ HAPFlagsToString(psRecord->ui32Flags)
++ );
++
++}
++
++#else
++
++static off_t
++printLinuxMemAreaRecords(IMG_CHAR * buffer, size_t count, off_t off)
++{
++ DEBUG_LINUX_MEM_AREA_REC *psRecord;
++ off_t Ret;
++
++ LinuxLockMutex(&g_sDebugMutex);
++
++ if(!off)
++ {
++ if(count < 500)
++ {
++ Ret = 0;
++ goto unlock_and_return;
++ }
++#if !defined(DEBUG_LINUX_XML_PROC_FILES)
++ Ret = printAppend(buffer, count, 0,
++ "Number of Linux Memory Areas: %lu\n"
++ "At the current water mark these areas correspond to %lu bytes (excluding SUB areas)\n"
++ "At the highest water mark these areas corresponded to %lu bytes (excluding SUB areas)\n"
++ "\nDetails for all Linux Memory Areas:\n"
++ "%s %-24s %s %s %-8s %-5s %s\n",
++ g_LinuxMemAreaCount,
++ g_LinuxMemAreaWaterMark,
++ g_LinuxMemAreaHighWaterMark,
++ "psLinuxMemArea",
++ "LinuxMemType",
++ "CpuVAddr",
++ "CpuPAddr",
++ "Bytes",
++ "Pid",
++ "Flags"
++ );
++#else
++ Ret = printAppend(buffer, count, 0,
++ "<mem_areas_header>\n"
++ "\t<count>%lu</count>\n"
++ "\t<watermark key=\"mar0\" description=\"current\" bytes=\"%lu\"/>\n"
++ "\t<watermark key=\"mar1\" description=\"high\" bytes=\"%lu\"/>\n"
++ "</mem_areas_header>\n",
++ g_LinuxMemAreaCount,
++ g_LinuxMemAreaWaterMark,
++ g_LinuxMemAreaHighWaterMark
++ );
++#endif
++ goto unlock_and_return;
++ }
++
++ psRecord = (DEBUG_LINUX_MEM_AREA_REC*)
++ List_DEBUG_LINUX_MEM_AREA_REC_Any_va(g_LinuxMemAreaRecords,
++ DecOffMemAreaRec_AnyVaCb,
++ &off);
++
++ if(!psRecord)
++ {
++ Ret = END_OF_FILE;
++ goto unlock_and_return;
++ }
++
++ if(count < 500)
++ {
++ Ret = 0;
++ goto unlock_and_return;
++ }
++
++ Ret = printAppend(buffer, count, 0,
++#if !defined(DEBUG_LINUX_XML_PROC_FILES)
++ "%8p %-24s %8p %08lx %-8ld %-5u %08lx=(%s)\n",
++#else
++ "<linux_mem_area>\n"
++ "\t<pointer>%8p</pointer>\n"
++ "\t<type>%s</type>\n"
++ "\t<cpu_virtual>%8p</cpu_virtual>\n"
++ "\t<cpu_physical>%08lx</cpu_physical>\n"
++ "\t<bytes>%ld</bytes>\n"
++ "\t<pid>%u</pid>\n"
++ "\t<flags>%08lx</flags>\n"
++ "\t<flags_string>%s</flags_string>\n"
++ "</linux_mem_area>\n",
++#endif
++ psRecord->psLinuxMemArea,
++ LinuxMemAreaTypeToString(psRecord->psLinuxMemArea->eAreaType),
++ LinuxMemAreaToCpuVAddr(psRecord->psLinuxMemArea),
++ LinuxMemAreaToCpuPAddr(psRecord->psLinuxMemArea,0).uiAddr,
++ psRecord->psLinuxMemArea->ui32ByteSize,
++ psRecord->pid,
++ psRecord->ui32Flags,
++ HAPFlagsToString(psRecord->ui32Flags)
++ );
++
++unlock_and_return:
++ LinuxUnLockMutex(&g_sDebugMutex);
++ return Ret;
++}
++#endif
++
++#endif
++
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++
++IMG_VOID* DecOffMemAllocRec_AnyVaCb(DEBUG_MEM_ALLOC_REC *psNode, va_list va)
++{
++ off_t *pOff = va_arg(va, off_t*);
++ if (--(*pOff))
++ {
++ return IMG_NULL;
++ }
++ else
++ {
++ return psNode;
++ }
++}
++
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++
++static void* ProcSeqNextMemoryRecords(struct seq_file *sfile,void* el,loff_t off)
++{
++ DEBUG_MEM_ALLOC_REC *psRecord;
++ psRecord = (DEBUG_MEM_ALLOC_REC*)
++ List_DEBUG_MEM_ALLOC_REC_Any_va(g_MemoryRecords,
++ DecOffMemAllocRec_AnyVaCb,
++ &off);
++#if defined(DEBUG_LINUX_XML_PROC_FILES)
++ if(!psRecord)
++ {
++ seq_printf( sfile, "</meminfo>\n");
++ }
++#endif
++
++ return (void*)psRecord;
++}
++
++static void* ProcSeqOff2ElementMemoryRecords(struct seq_file *sfile, loff_t off)
++{
++ DEBUG_MEM_ALLOC_REC *psRecord;
++ if(!off)
++ {
++ return PVR_PROC_SEQ_START_TOKEN;
++ }
++
++ psRecord = (DEBUG_MEM_ALLOC_REC*)
++ List_DEBUG_MEM_ALLOC_REC_Any_va(g_MemoryRecords,
++ DecOffMemAllocRec_AnyVaCb,
++ &off);
++
++#if defined(DEBUG_LINUX_XML_PROC_FILES)
++ if(!psRecord)
++ {
++ seq_printf( sfile, "</meminfo>\n");
++ }
++#endif
++
++ return (void*)psRecord;
++}
++
++static void ProcSeqShowMemoryRecords(struct seq_file *sfile,void* el)
++{
++ DEBUG_MEM_ALLOC_REC *psRecord = (DEBUG_MEM_ALLOC_REC*)el;
++ if(el == PVR_PROC_SEQ_START_TOKEN)
++ {
++#if !defined(DEBUG_LINUX_XML_PROC_FILES)
++
++ seq_printf( sfile, "%-60s: %ld bytes\n",
++ "Current Water Mark of bytes allocated via kmalloc",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMALLOC]);
++ seq_printf( sfile, "%-60s: %ld bytes\n",
++ "Highest Water Mark of bytes allocated via kmalloc",
++ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMALLOC]);
++ seq_printf( sfile, "%-60s: %ld bytes\n",
++ "Current Water Mark of bytes allocated via vmalloc",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_VMALLOC]);
++ seq_printf( sfile, "%-60s: %ld bytes\n",
++ "Highest Water Mark of bytes allocated via vmalloc",
++ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_VMALLOC]);
++ seq_printf( sfile, "%-60s: %ld bytes\n",
++ "Current Water Mark of bytes allocated via alloc_pages",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES]);
++ seq_printf( sfile, "%-60s: %ld bytes\n",
++ "Highest Water Mark of bytes allocated via alloc_pages",
++ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES]);
++ seq_printf( sfile, "%-60s: %ld bytes\n",
++ "Current Water Mark of bytes allocated via ioremap",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_IOREMAP]);
++ seq_printf( sfile, "%-60s: %ld bytes\n",
++ "Highest Water Mark of bytes allocated via ioremap",
++ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_IOREMAP]);
++ seq_printf( sfile, "%-60s: %ld bytes\n",
++ "Current Water Mark of bytes reserved for \"IO\" memory areas",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_IO]);
++ seq_printf( sfile, "%-60s: %ld bytes\n",
++ "Highest Water Mark of bytes allocated for \"IO\" memory areas",
++ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_IO]);
++ seq_printf( sfile, "%-60s: %ld bytes\n",
++ "Current Water Mark of bytes allocated via kmem_cache_alloc",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE]);
++ seq_printf( sfile, "%-60s: %ld bytes\n",
++ "Highest Water Mark of bytes allocated via kmem_cache_alloc",
++ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE]);
++ seq_printf( sfile, "\n");
++
++ seq_printf( sfile, "%-60s: %ld bytes\n",
++ "The Current Water Mark for memory allocated from system RAM",
++ g_SysRAMWaterMark);
++ seq_printf( sfile, "%-60s: %ld bytes\n",
++ "The Highest Water Mark for memory allocated from system RAM",
++ g_SysRAMHighWaterMark);
++ seq_printf( sfile, "%-60s: %ld bytes\n",
++ "The Current Water Mark for memory allocated from IO memory",
++ g_IOMemWaterMark);
++ seq_printf( sfile, "%-60s: %ld bytes\n",
++ "The Highest Water Mark for memory allocated from IO memory",
++ g_IOMemHighWaterMark);
++
++ seq_printf( sfile, "\n");
++
++ seq_printf( sfile, "Details for all known allocations:\n"
++ "%-16s %-8s %-8s %-10s %-5s %-10s %s\n",
++ "Type",
++ "CpuVAddr",
++ "CpuPAddr",
++ "Bytes",
++ "PID",
++ "PrivateData",
++ "Filename:Line");
++
++#else
++
++
++ seq_printf( sfile, "<meminfo>\n<meminfo_header>\n");
++ seq_printf( sfile,
++ "<watermark key=\"mr0\" description=\"kmalloc_current\" bytes=\"%ld\"/>\n",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMALLOC]);
++ seq_printf( sfile,
++ "<watermark key=\"mr1\" description=\"kmalloc_high\" bytes=\"%ld\"/>\n",
++ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMALLOC]);
++ seq_printf( sfile,
++ "<watermark key=\"mr2\" description=\"vmalloc_current\" bytes=\"%ld\"/>\n",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_VMALLOC]);
++ seq_printf( sfile,
++ "<watermark key=\"mr3\" description=\"vmalloc_high\" bytes=\"%ld\"/>\n",
++ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_VMALLOC]);
++ seq_printf( sfile,
++ "<watermark key=\"mr4\" description=\"alloc_pages_current\" bytes=\"%ld\"/>\n",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES]);
++ seq_printf( sfile,
++ "<watermark key=\"mr5\" description=\"alloc_pages_high\" bytes=\"%ld\"/>\n",
++ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES]);
++ seq_printf( sfile,
++ "<watermark key=\"mr6\" description=\"ioremap_current\" bytes=\"%ld\"/>\n",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_IOREMAP]);
++ seq_printf( sfile,
++ "<watermark key=\"mr7\" description=\"ioremap_high\" bytes=\"%ld\"/>\n",
++ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_IOREMAP]);
++ seq_printf( sfile,
++ "<watermark key=\"mr8\" description=\"io_current\" bytes=\"%ld\"/>\n",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_IO]);
++ seq_printf( sfile,
++ "<watermark key=\"mr9\" description=\"io_high\" bytes=\"%ld\"/>\n",
++ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_IO]);
++ seq_printf( sfile,
++ "<watermark key=\"mr10\" description=\"kmem_cache_current\" bytes=\"%ld\"/>\n",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE]);
++ seq_printf( sfile,
++ "<watermark key=\"mr11\" description=\"kmem_cache_high\" bytes=\"%ld\"/>\n",
++ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE]);
++ seq_printf( sfile,"\n" );
++
++ seq_printf( sfile,
++ "<watermark key=\"mr14\" description=\"system_ram_current\" bytes=\"%ld\"/>\n",
++ g_SysRAMWaterMark);
++ seq_printf( sfile,
++ "<watermark key=\"mr15\" description=\"system_ram_high\" bytes=\"%ld\"/>\n",
++ g_SysRAMHighWaterMark);
++ seq_printf( sfile,
++ "<watermark key=\"mr16\" description=\"system_io_current\" bytes=\"%ld\"/>\n",
++ g_IOMemWaterMark);
++ seq_printf( sfile,
++ "<watermark key=\"mr17\" description=\"system_io_high\" bytes=\"%ld\"/>\n",
++ g_IOMemHighWaterMark);
++
++ seq_printf( sfile, "</meminfo_header>\n");
++
++#endif
++ return;
++ }
++
++ if(psRecord->eAllocType != DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE)
++ {
++ seq_printf( sfile,
++#if !defined(DEBUG_LINUX_XML_PROC_FILES)
++ "%-16s %-8p %08lx %-10ld %-5d %-10s %s:%ld\n",
++#else
++ "<allocation>\n"
++ "\t<type>%s</type>\n"
++ "\t<cpu_virtual>%-8p</cpu_virtual>\n"
++ "\t<cpu_physical>%08lx</cpu_physical>\n"
++ "\t<bytes>%ld</bytes>\n"
++ "\t<pid>%d</pid>\n"
++ "\t<private>%s</private>\n"
++ "\t<filename>%s</filename>\n"
++ "\t<line>%ld</line>\n"
++ "</allocation>\n",
++#endif
++ DebugMemAllocRecordTypeToString(psRecord->eAllocType),
++ psRecord->pvCpuVAddr,
++ psRecord->ulCpuPAddr,
++ psRecord->ui32Bytes,
++ psRecord->pid,
++ "NULL",
++ psRecord->pszFileName,
++ psRecord->ui32Line);
++ }
++ else
++ {
++ seq_printf( sfile,
++#if !defined(DEBUG_LINUX_XML_PROC_FILES)
++ "%-16s %-8p %08lx %-10ld %-5d %-10s %s:%ld\n",
++#else
++ "<allocation>\n"
++ "\t<type>%s</type>\n"
++ "\t<cpu_virtual>%-8p</cpu_virtual>\n"
++ "\t<cpu_physical>%08lx</cpu_physical>\n"
++ "\t<bytes>%ld</bytes>\n"
++ "\t<pid>%d</pid>\n"
++ "\t<private>%s</private>\n"
++ "\t<filename>%s</filename>\n"
++ "\t<line>%ld</line>\n"
++ "</allocation>\n",
++#endif
++ DebugMemAllocRecordTypeToString(psRecord->eAllocType),
++ psRecord->pvCpuVAddr,
++ psRecord->ulCpuPAddr,
++ psRecord->ui32Bytes,
++ psRecord->pid,
++ KMemCacheNameWrapper(psRecord->pvPrivateData),
++ psRecord->pszFileName,
++ psRecord->ui32Line);
++ }
++}
++
++
++
++#else
++
++static off_t
++printMemoryRecords(IMG_CHAR * buffer, size_t count, off_t off)
++{
++ DEBUG_MEM_ALLOC_REC *psRecord;
++ off_t Ret;
++
++ LinuxLockMutex(&g_sDebugMutex);
++
++ if(!off)
++ {
++ if(count < 1000)
++ {
++ Ret = 0;
++ goto unlock_and_return;
++ }
++
++#if !defined(DEBUG_LINUX_XML_PROC_FILES)
++
++ Ret = printAppend(buffer, count, 0, "%-60s: %ld bytes\n",
++ "Current Water Mark of bytes allocated via kmalloc",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMALLOC]);
++ Ret = printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++ "Highest Water Mark of bytes allocated via kmalloc",
++ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMALLOC]);
++ Ret = printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++ "Current Water Mark of bytes allocated via vmalloc",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_VMALLOC]);
++ Ret = printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++ "Highest Water Mark of bytes allocated via vmalloc",
++ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_VMALLOC]);
++ Ret = printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++ "Current Water Mark of bytes allocated via alloc_pages",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES]);
++ Ret = printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++ "Highest Water Mark of bytes allocated via alloc_pages",
++ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES]);
++ Ret = printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++ "Current Water Mark of bytes allocated via ioremap",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_IOREMAP]);
++ Ret = printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++ "Highest Water Mark of bytes allocated via ioremap",
++ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_IOREMAP]);
++ Ret = printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++ "Current Water Mark of bytes reserved for \"IO\" memory areas",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_IO]);
++ Ret = printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++ "Highest Water Mark of bytes allocated for \"IO\" memory areas",
++ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_IO]);
++ Ret = printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++ "Current Water Mark of bytes allocated via kmem_cache_alloc",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE]);
++ Ret = printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++ "Highest Water Mark of bytes allocated via kmem_cache_alloc",
++ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE]);
++ Ret = printAppend(buffer, count, Ret, "\n");
++
++ Ret = printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++ "The Current Water Mark for memory allocated from system RAM",
++ g_SysRAMWaterMark);
++ Ret = printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++ "The Highest Water Mark for memory allocated from system RAM",
++ g_SysRAMHighWaterMark);
++ Ret = printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++ "The Current Water Mark for memory allocated from IO memory",
++ g_IOMemWaterMark);
++ Ret = printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++ "The Highest Water Mark for memory allocated from IO memory",
++ g_IOMemHighWaterMark);
++
++ Ret = printAppend(buffer, count, Ret, "\n");
++
++ Ret = printAppend(buffer, count, Ret, "Details for all known allocations:\n"
++ "%-16s %-8s %-8s %-10s %-5s %-10s %s\n",
++ "Type",
++ "CpuVAddr",
++ "CpuPAddr",
++ "Bytes",
++ "PID",
++ "PrivateData",
++ "Filename:Line");
++
++#else
++
++
++ Ret = printAppend(buffer, count, 0, "<meminfo>\n<meminfo_header>\n");
++ Ret = printAppend(buffer, count, Ret,
++ "<watermark key=\"mr0\" description=\"kmalloc_current\" bytes=\"%ld\"/>\n",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMALLOC]);
++ Ret = printAppend(buffer, count, Ret,
++ "<watermark key=\"mr1\" description=\"kmalloc_high\" bytes=\"%ld\"/>\n",
++ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMALLOC]);
++ Ret = printAppend(buffer, count, Ret,
++ "<watermark key=\"mr2\" description=\"vmalloc_current\" bytes=\"%ld\"/>\n",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_VMALLOC]);
++ Ret = printAppend(buffer, count, Ret,
++ "<watermark key=\"mr3\" description=\"vmalloc_high\" bytes=\"%ld\"/>\n",
++ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_VMALLOC]);
++ Ret = printAppend(buffer, count, Ret,
++ "<watermark key=\"mr4\" description=\"alloc_pages_current\" bytes=\"%ld\"/>\n",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES]);
++ Ret = printAppend(buffer, count, Ret,
++ "<watermark key=\"mr5\" description=\"alloc_pages_high\" bytes=\"%ld\"/>\n",
++ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES]);
++ Ret = printAppend(buffer, count, Ret,
++ "<watermark key=\"mr6\" description=\"ioremap_current\" bytes=\"%ld\"/>\n",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_IOREMAP]);
++ Ret = printAppend(buffer, count, Ret,
++ "<watermark key=\"mr7\" description=\"ioremap_high\" bytes=\"%ld\"/>\n",
++ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_IOREMAP]);
++ Ret = printAppend(buffer, count, Ret,
++ "<watermark key=\"mr8\" description=\"io_current\" bytes=\"%ld\"/>\n",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_IO]);
++ Ret = printAppend(buffer, count, Ret,
++ "<watermark key=\"mr9\" description=\"io_high\" bytes=\"%ld\"/>\n",
++ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_IO]);
++ Ret = printAppend(buffer, count, Ret,
++ "<watermark key=\"mr10\" description=\"kmem_cache_current\" bytes=\"%ld\"/>\n",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE]);
++ Ret = printAppend(buffer, count, Ret,
++ "<watermark key=\"mr11\" description=\"kmem_cache_high\" bytes=\"%ld\"/>\n",
++ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE]);
++ Ret = printAppend(buffer, count, Ret, "\n");
++
++ Ret = printAppend(buffer, count, Ret,
++ "<watermark key=\"mr14\" description=\"system_ram_current\" bytes=\"%ld\"/>\n",
++ g_SysRAMWaterMark);
++ Ret = printAppend(buffer, count, Ret,
++ "<watermark key=\"mr15\" description=\"system_ram_high\" bytes=\"%ld\"/>\n",
++ g_SysRAMHighWaterMark);
++ Ret = printAppend(buffer, count, Ret,
++ "<watermark key=\"mr16\" description=\"system_io_current\" bytes=\"%ld\"/>\n",
++ g_IOMemWaterMark);
++ Ret = printAppend(buffer, count, Ret,
++ "<watermark key=\"mr17\" description=\"system_io_high\" bytes=\"%ld\"/>\n",
++ g_IOMemHighWaterMark);
++
++ Ret = printAppend(buffer, count, Ret, "</meminfo_header>\n");
++
++#endif
++
++ goto unlock_and_return;
++ }
++
++ if(count < 1000)
++ {
++ Ret = 0;
++ goto unlock_and_return;
++ }
++
++ psRecord = (DEBUG_MEM_ALLOC_REC*)
++ List_DEBUG_MEM_ALLOC_REC_Any_va(g_MemoryRecords,
++ DecOffMemAllocRec_AnyVaCb,
++ &off);
++ if(!psRecord)
++ {
++#if defined(DEBUG_LINUX_XML_PROC_FILES)
++ if(off == 0)
++ {
++ Ret = printAppend(buffer, count, 0, "</meminfo>\n");
++ goto unlock_and_return;
++ }
++#endif
++ Ret = END_OF_FILE;
++ goto unlock_and_return;
++ }
++
++ if(psRecord->eAllocType != DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE)
++ {
++ Ret = printAppend(buffer, count, 0,
++#if !defined(DEBUG_LINUX_XML_PROC_FILES)
++ "%-16s %-8p %08lx %-10ld %-5d %-10s %s:%ld\n",
++#else
++ "<allocation>\n"
++ "\t<type>%s</type>\n"
++ "\t<cpu_virtual>%-8p</cpu_virtual>\n"
++ "\t<cpu_physical>%08lx</cpu_physical>\n"
++ "\t<bytes>%ld</bytes>\n"
++ "\t<pid>%d</pid>\n"
++ "\t<private>%s</private>\n"
++ "\t<filename>%s</filename>\n"
++ "\t<line>%ld</line>\n"
++ "</allocation>\n",
++#endif
++ DebugMemAllocRecordTypeToString(psRecord->eAllocType),
++ psRecord->pvCpuVAddr,
++ psRecord->ulCpuPAddr,
++ psRecord->ui32Bytes,
++ psRecord->pid,
++ "NULL",
++ psRecord->pszFileName,
++ psRecord->ui32Line);
++ }
++ else
++ {
++ Ret = printAppend(buffer, count, 0,
++#if !defined(DEBUG_LINUX_XML_PROC_FILES)
++ "%-16s %-8p %08lx %-10ld %-5d %-10s %s:%ld\n",
++#else
++ "<allocation>\n"
++ "\t<type>%s</type>\n"
++ "\t<cpu_virtual>%-8p</cpu_virtual>\n"
++ "\t<cpu_physical>%08lx</cpu_physical>\n"
++ "\t<bytes>%ld</bytes>\n"
++ "\t<pid>%d</pid>\n"
++ "\t<private>%s</private>\n"
++ "\t<filename>%s</filename>\n"
++ "\t<line>%ld</line>\n"
++ "</allocation>\n",
++#endif
++ DebugMemAllocRecordTypeToString(psRecord->eAllocType),
++ psRecord->pvCpuVAddr,
++ psRecord->ulCpuPAddr,
++ psRecord->ui32Bytes,
++ psRecord->pid,
++ KMemCacheNameWrapper(psRecord->pvPrivateData),
++ psRecord->pszFileName,
++ psRecord->ui32Line);
++ }
++
++unlock_and_return:
++ LinuxUnLockMutex(&g_sDebugMutex);
++ return Ret;
++}
++#endif
++#endif
++
++
++#if defined(DEBUG_LINUX_MEM_AREAS) || defined(DEBUG_LINUX_MMAP_AREAS)
++const IMG_CHAR *
++HAPFlagsToString(IMG_UINT32 ui32Flags)
++{
++ static IMG_CHAR szFlags[50];
++ IMG_INT32 i32Pos = 0;
++ IMG_UINT32 ui32CacheTypeIndex, ui32MapTypeIndex;
++ IMG_CHAR *apszCacheTypes[] = {
++ "UNCACHED",
++ "CACHED",
++ "WRITECOMBINE",
++ "UNKNOWN"
++ };
++ IMG_CHAR *apszMapType[] = {
++ "KERNEL_ONLY",
++ "SINGLE_PROCESS",
++ "MULTI_PROCESS",
++ "FROM_EXISTING_PROCESS",
++ "NO_CPU_VIRTUAL",
++ "UNKNOWN"
++ };
++
++
++ if(ui32Flags & PVRSRV_HAP_UNCACHED){
++ ui32CacheTypeIndex=0;
++ }else if(ui32Flags & PVRSRV_HAP_CACHED){
++ ui32CacheTypeIndex=1;
++ }else if(ui32Flags & PVRSRV_HAP_WRITECOMBINE){
++ ui32CacheTypeIndex=2;
++ }else{
++ ui32CacheTypeIndex=3;
++ PVR_DPF((PVR_DBG_ERROR, "%s: unknown cache type (%u)",
++ __FUNCTION__, (ui32Flags & PVRSRV_HAP_CACHETYPE_MASK)));
++ }
++
++
++ if(ui32Flags & PVRSRV_HAP_KERNEL_ONLY){
++ ui32MapTypeIndex = 0;
++ }else if(ui32Flags & PVRSRV_HAP_SINGLE_PROCESS){
++ ui32MapTypeIndex = 1;
++ }else if(ui32Flags & PVRSRV_HAP_MULTI_PROCESS){
++ ui32MapTypeIndex = 2;
++ }else if(ui32Flags & PVRSRV_HAP_FROM_EXISTING_PROCESS){
++ ui32MapTypeIndex = 3;
++ }else if(ui32Flags & PVRSRV_HAP_NO_CPU_VIRTUAL){
++ ui32MapTypeIndex = 4;
++ }else{
++ ui32MapTypeIndex = 5;
++ PVR_DPF((PVR_DBG_ERROR, "%s: unknown map type (%u)",
++ __FUNCTION__, (ui32Flags & PVRSRV_HAP_MAPTYPE_MASK)));
++ }
++
++ i32Pos = sprintf(szFlags, "%s|", apszCacheTypes[ui32CacheTypeIndex]);
++ if (i32Pos <= 0)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: sprintf for cache type %u failed (%d)",
++ __FUNCTION__, ui32CacheTypeIndex, i32Pos));
++ szFlags[0] = 0;
++ }
++ else
++ {
++ sprintf(szFlags + i32Pos, "%s", apszMapType[ui32MapTypeIndex]);
++ }
++
++ return szFlags;
++}
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/mm.h b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/mm.h
+new file mode 100644
+index 0000000..7d2da4e
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/mm.h
+@@ -0,0 +1,331 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __IMG_LINUX_MM_H__
++#define __IMG_LINUX_MM_H__
++
++#ifndef AUTOCONF_INCLUDED
++ #include <linux/config.h>
++#endif
++
++#include <linux/version.h>
++#include <linux/slab.h>
++#include <linux/mm.h>
++#include <linux/list.h>
++
++#include <asm/io.h>
++
++#define PHYS_TO_PFN(phys) ((phys) >> PAGE_SHIFT)
++#define PFN_TO_PHYS(pfn) ((pfn) << PAGE_SHIFT)
++
++#define RANGE_TO_PAGES(range) (((range) + (PAGE_SIZE - 1)) >> PAGE_SHIFT)
++
++#define ADDR_TO_PAGE_OFFSET(addr) (((unsigned long)(addr)) & (PAGE_SIZE - 1))
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10))
++#define REMAP_PFN_RANGE(vma, addr, pfn, size, prot) remap_pfn_range(vma, addr, pfn, size, prot)
++#else
++#define REMAP_PFN_RANGE(vma, addr, pfn, size, prot) remap_page_range(vma, addr, PFN_TO_PHYS(pfn), size, prot)
++#endif
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12))
++#define IO_REMAP_PFN_RANGE(vma, addr, pfn, size, prot) io_remap_pfn_range(vma, addr, pfn, size, prot)
++#else
++#define IO_REMAP_PFN_RANGE(vma, addr, pfn, size, prot) io_remap_page_range(vma, addr, PFN_TO_PHYS(pfn), size, prot)
++#endif
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
++#define VM_INSERT_PAGE(vma, addr, page) vm_insert_page(vma, addr, page)
++#else
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10))
++#define VM_INSERT_PAGE(vma, addr, page) remap_pfn_range(vma, addr, page_to_pfn(page), PAGE_SIZE, vma->vm_page_prot);
++#else
++#define VM_INSERT_PAGE(vma, addr, page) remap_page_range(vma, addr, page_to_phys(page), PAGE_SIZE, vma->vm_page_prot);
++#endif
++#endif
++
++static inline IMG_UINT32 VMallocToPhys(IMG_VOID *pCpuVAddr)
++{
++ return (page_to_phys(vmalloc_to_page(pCpuVAddr)) + ADDR_TO_PAGE_OFFSET(pCpuVAddr));
++
++}
++
++typedef enum {
++ LINUX_MEM_AREA_IOREMAP,
++ LINUX_MEM_AREA_EXTERNAL_KV,
++ LINUX_MEM_AREA_IO,
++ LINUX_MEM_AREA_VMALLOC,
++ LINUX_MEM_AREA_ALLOC_PAGES,
++ LINUX_MEM_AREA_SUB_ALLOC,
++ LINUX_MEM_AREA_TYPE_COUNT
++}LINUX_MEM_AREA_TYPE;
++
++typedef struct _LinuxMemArea LinuxMemArea;
++
++
++struct _LinuxMemArea {
++ LINUX_MEM_AREA_TYPE eAreaType;
++ union _uData
++ {
++ struct _sIORemap
++ {
++
++ IMG_CPU_PHYADDR CPUPhysAddr;
++ IMG_VOID *pvIORemapCookie;
++ }sIORemap;
++ struct _sExternalKV
++ {
++
++ IMG_BOOL bPhysContig;
++ union {
++
++ IMG_SYS_PHYADDR SysPhysAddr;
++ IMG_SYS_PHYADDR *pSysPhysAddr;
++ } uPhysAddr;
++ IMG_VOID *pvExternalKV;
++ }sExternalKV;
++ struct _sIO
++ {
++
++ IMG_CPU_PHYADDR CPUPhysAddr;
++ }sIO;
++ struct _sVmalloc
++ {
++
++ IMG_VOID *pvVmallocAddress;
++ }sVmalloc;
++ struct _sPageList
++ {
++
++ struct page **pvPageList;
++ IMG_HANDLE hBlockPageList;
++ }sPageList;
++ struct _sSubAlloc
++ {
++
++ LinuxMemArea *psParentLinuxMemArea;
++ IMG_UINT32 ui32ByteOffset;
++ }sSubAlloc;
++ }uData;
++
++ IMG_UINT32 ui32ByteSize;
++
++ IMG_UINT32 ui32AreaFlags;
++
++ IMG_BOOL bMMapRegistered;
++
++
++ struct list_head sMMapItem;
++
++
++ struct list_head sMMapOffsetStructList;
++};
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17))
++typedef kmem_cache_t LinuxKMemCache;
++#else
++typedef struct kmem_cache LinuxKMemCache;
++#endif
++
++
++PVRSRV_ERROR LinuxMMInit(IMG_VOID);
++
++
++IMG_VOID LinuxMMCleanup(IMG_VOID);
++
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++#define KMallocWrapper(ui32ByteSize) _KMallocWrapper(ui32ByteSize, __FILE__, __LINE__)
++#else
++#define KMallocWrapper(ui32ByteSize) _KMallocWrapper(ui32ByteSize, NULL, 0)
++#endif
++IMG_VOID *_KMallocWrapper(IMG_UINT32 ui32ByteSize, IMG_CHAR *szFileName, IMG_UINT32 ui32Line);
++
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++#define KFreeWrapper(pvCpuVAddr) _KFreeWrapper(pvCpuVAddr, __FILE__, __LINE__)
++#else
++#define KFreeWrapper(pvCpuVAddr) _KFreeWrapper(pvCpuVAddr, NULL, 0)
++#endif
++IMG_VOID _KFreeWrapper(IMG_VOID *pvCpuVAddr, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line);
++
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++#define VMallocWrapper(ui32Bytes, ui32AllocFlags) _VMallocWrapper(ui32Bytes, ui32AllocFlags, __FILE__, __LINE__)
++#else
++#define VMallocWrapper(ui32Bytes, ui32AllocFlags) _VMallocWrapper(ui32Bytes, ui32AllocFlags, NULL, 0)
++#endif
++IMG_VOID *_VMallocWrapper(IMG_UINT32 ui32Bytes, IMG_UINT32 ui32AllocFlags, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line);
++
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++#define VFreeWrapper(pvCpuVAddr) _VFreeWrapper(pvCpuVAddr, __FILE__, __LINE__)
++#else
++#define VFreeWrapper(pvCpuVAddr) _VFreeWrapper(pvCpuVAddr, NULL, 0)
++#endif
++IMG_VOID _VFreeWrapper(IMG_VOID *pvCpuVAddr, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line);
++
++
++LinuxMemArea *NewVMallocLinuxMemArea(IMG_UINT32 ui32Bytes, IMG_UINT32 ui32AreaFlags);
++
++
++IMG_VOID FreeVMallocLinuxMemArea(LinuxMemArea *psLinuxMemArea);
++
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++#define IORemapWrapper(BasePAddr, ui32Bytes, ui32MappingFlags) \
++ _IORemapWrapper(BasePAddr, ui32Bytes, ui32MappingFlags, __FILE__, __LINE__)
++#else
++#define IORemapWrapper(BasePAddr, ui32Bytes, ui32MappingFlags) \
++ _IORemapWrapper(BasePAddr, ui32Bytes, ui32MappingFlags, NULL, 0)
++#endif
++IMG_VOID *_IORemapWrapper(IMG_CPU_PHYADDR BasePAddr,
++ IMG_UINT32 ui32Bytes,
++ IMG_UINT32 ui32MappingFlags,
++ IMG_CHAR *pszFileName,
++ IMG_UINT32 ui32Line);
++
++
++LinuxMemArea *NewIORemapLinuxMemArea(IMG_CPU_PHYADDR BasePAddr, IMG_UINT32 ui32Bytes, IMG_UINT32 ui32AreaFlags);
++
++
++IMG_VOID FreeIORemapLinuxMemArea(LinuxMemArea *psLinuxMemArea);
++
++LinuxMemArea *NewExternalKVLinuxMemArea(IMG_SYS_PHYADDR *pBasePAddr, IMG_VOID *pvCPUVAddr, IMG_UINT32 ui32Bytes, IMG_BOOL bPhysContig, IMG_UINT32 ui32AreaFlags);
++
++
++IMG_VOID FreeExternalKVLinuxMemArea(LinuxMemArea *psLinuxMemArea);
++
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++#define IOUnmapWrapper(pvIORemapCookie) \
++ _IOUnmapWrapper(pvIORemapCookie, __FILE__, __LINE__)
++#else
++#define IOUnmapWrapper(pvIORemapCookie) \
++ _IOUnmapWrapper(pvIORemapCookie, NULL, 0)
++#endif
++IMG_VOID _IOUnmapWrapper(IMG_VOID *pvIORemapCookie, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line);
++
++
++struct page *LinuxMemAreaOffsetToPage(LinuxMemArea *psLinuxMemArea, IMG_UINT32 ui32ByteOffset);
++
++
++LinuxKMemCache *KMemCacheCreateWrapper(IMG_CHAR *pszName, size_t Size, size_t Align, IMG_UINT32 ui32Flags);
++
++
++IMG_VOID KMemCacheDestroyWrapper(LinuxKMemCache *psCache);
++
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++#define KMemCacheAllocWrapper(psCache, Flags) _KMemCacheAllocWrapper(psCache, Flags, __FILE__, __LINE__)
++#else
++#define KMemCacheAllocWrapper(psCache, Flags) _KMemCacheAllocWrapper(psCache, Flags, NULL, 0)
++#endif
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,14))
++IMG_VOID *_KMemCacheAllocWrapper(LinuxKMemCache *psCache, gfp_t Flags, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line);
++#else
++IMG_VOID *_KMemCacheAllocWrapper(LinuxKMemCache *psCache, int Flags, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line);
++#endif
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++#define KMemCacheFreeWrapper(psCache, pvObject) _KMemCacheFreeWrapper(psCache, pvObject, __FILE__, __LINE__)
++#else
++#define KMemCacheFreeWrapper(psCache, pvObject) _KMemCacheFreeWrapper(psCache, pvObject, NULL, 0)
++#endif
++IMG_VOID _KMemCacheFreeWrapper(LinuxKMemCache *psCache, IMG_VOID *pvObject, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line);
++
++
++const IMG_CHAR *KMemCacheNameWrapper(LinuxKMemCache *psCache);
++
++
++LinuxMemArea *NewIOLinuxMemArea(IMG_CPU_PHYADDR BasePAddr, IMG_UINT32 ui32Bytes, IMG_UINT32 ui32AreaFlags);
++
++
++IMG_VOID FreeIOLinuxMemArea(LinuxMemArea *psLinuxMemArea);
++
++
++LinuxMemArea *NewAllocPagesLinuxMemArea(IMG_UINT32 ui32Bytes, IMG_UINT32 ui32AreaFlags);
++
++
++IMG_VOID FreeAllocPagesLinuxMemArea(LinuxMemArea *psLinuxMemArea);
++
++
++LinuxMemArea *NewSubLinuxMemArea(LinuxMemArea *psParentLinuxMemArea,
++ IMG_UINT32 ui32ByteOffset,
++ IMG_UINT32 ui32Bytes);
++
++
++IMG_VOID LinuxMemAreaDeepFree(LinuxMemArea *psLinuxMemArea);
++
++
++#if defined(LINUX_MEM_AREAS_DEBUG)
++IMG_VOID LinuxMemAreaRegister(LinuxMemArea *psLinuxMemArea);
++#else
++#define LinuxMemAreaRegister(X)
++#endif
++
++
++IMG_VOID *LinuxMemAreaToCpuVAddr(LinuxMemArea *psLinuxMemArea);
++
++
++IMG_CPU_PHYADDR LinuxMemAreaToCpuPAddr(LinuxMemArea *psLinuxMemArea, IMG_UINT32 ui32ByteOffset);
++
++
++#define LinuxMemAreaToCpuPFN(psLinuxMemArea, ui32ByteOffset) PHYS_TO_PFN(LinuxMemAreaToCpuPAddr(psLinuxMemArea, ui32ByteOffset).uiAddr)
++
++IMG_BOOL LinuxMemAreaPhysIsContig(LinuxMemArea *psLinuxMemArea);
++
++static inline LinuxMemArea *
++LinuxMemAreaRoot(LinuxMemArea *psLinuxMemArea)
++{
++ if(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_SUB_ALLOC)
++ {
++ return psLinuxMemArea->uData.sSubAlloc.psParentLinuxMemArea;
++ }
++ else
++ {
++ return psLinuxMemArea;
++ }
++}
++
++
++static inline LINUX_MEM_AREA_TYPE
++LinuxMemAreaRootType(LinuxMemArea *psLinuxMemArea)
++{
++ return LinuxMemAreaRoot(psLinuxMemArea)->eAreaType;
++}
++
++
++const IMG_CHAR *LinuxMemAreaTypeToString(LINUX_MEM_AREA_TYPE eMemAreaType);
++
++
++#if defined(DEBUG) || defined(DEBUG_LINUX_MEM_AREAS)
++const IMG_CHAR *HAPFlagsToString(IMG_UINT32 ui32Flags);
++#endif
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/mmap.c b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/mmap.c
+new file mode 100644
+index 0000000..1689bd4
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/mmap.c
+@@ -0,0 +1,1148 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef AUTOCONF_INCLUDED
++ #include <linux/config.h>
++#endif
++
++#include <linux/version.h>
++#include <linux/mm.h>
++#include <linux/module.h>
++#include <linux/vmalloc.h>
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0))
++#include <linux/wrapper.h>
++#endif
++#include <linux/slab.h>
++#include <asm/io.h>
++#include <asm/page.h>
++#include <asm/shmparam.h>
++#include <asm/pgtable.h>
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22))
++#include <linux/sched.h>
++#include <asm/current.h>
++#endif
++#if defined(SUPPORT_DRI_DRM)
++#include <drm/drmP.h>
++#endif
++
++#include "img_defs.h"
++#include "services.h"
++#include "servicesint.h"
++#include "pvrmmap.h"
++#include "mutils.h"
++#include "mmap.h"
++#include "mm.h"
++#include "pvr_debug.h"
++#include "osfunc.h"
++#include "proc.h"
++#include "mutex.h"
++#include "handle.h"
++#include "perproc.h"
++#include "env_perproc.h"
++#include "bridged_support.h"
++#if defined(SUPPORT_DRI_DRM)
++#include "pvr_drm.h"
++#endif
++
++#if !defined(PVR_SECURE_HANDLES)
++#error "The mmap code requires PVR_SECURE_HANDLES"
++#endif
++
++static PVRSRV_LINUX_MUTEX g_sMMapMutex;
++
++static LinuxKMemCache *g_psMemmapCache = NULL;
++static LIST_HEAD(g_sMMapAreaList);
++static LIST_HEAD(g_sMMapOffsetStructList);
++#if defined(DEBUG_LINUX_MMAP_AREAS)
++static IMG_UINT32 g_ui32RegisteredAreas = 0;
++static IMG_UINT32 g_ui32TotalByteSize = 0;
++#endif
++
++
++#if defined(PVR_PROC_USE_SEQ_FILE) && defined(DEBUG_LINUX_MMAP_AREAS)
++static struct proc_dir_entry *g_ProcMMap;
++#endif
++
++#define FIRST_PHYSICAL_PFN 0
++#define LAST_PHYSICAL_PFN 0x7fffffffUL
++#define FIRST_SPECIAL_PFN (LAST_PHYSICAL_PFN + 1)
++#define LAST_SPECIAL_PFN 0xffffffffUL
++
++#define MAX_MMAP_HANDLE 0x7fffffffUL
++
++static inline IMG_BOOL
++PFNIsPhysical(IMG_UINT32 pfn)
++{
++
++ return ((pfn >= FIRST_PHYSICAL_PFN) && (pfn <= LAST_PHYSICAL_PFN)) ? IMG_TRUE : IMG_FALSE;
++}
++
++static inline IMG_BOOL
++PFNIsSpecial(IMG_UINT32 pfn)
++{
++
++ return ((pfn >= FIRST_SPECIAL_PFN) && (pfn <= LAST_SPECIAL_PFN)) ? IMG_TRUE : IMG_FALSE;
++}
++
++static inline IMG_HANDLE
++MMapOffsetToHandle(IMG_UINT32 pfn)
++{
++ if (PFNIsPhysical(pfn))
++ {
++ PVR_ASSERT(PFNIsPhysical(pfn));
++ return IMG_NULL;
++ }
++
++ return (IMG_HANDLE)(pfn - FIRST_SPECIAL_PFN);
++}
++
++static inline IMG_UINT32
++HandleToMMapOffset(IMG_HANDLE hHandle)
++{
++ IMG_UINT32 ulHandle = (IMG_UINT32)hHandle;
++
++ if (PFNIsSpecial(ulHandle))
++ {
++ PVR_ASSERT(PFNIsSpecial(ulHandle));
++ return 0;
++ }
++
++ return ulHandle + FIRST_SPECIAL_PFN;
++}
++
++static inline IMG_BOOL
++LinuxMemAreaUsesPhysicalMap(LinuxMemArea *psLinuxMemArea)
++{
++ return LinuxMemAreaPhysIsContig(psLinuxMemArea);
++}
++
++static inline IMG_UINT32
++GetCurrentThreadID(IMG_VOID)
++{
++
++ return (IMG_UINT32)current->pid;
++}
++
++static PKV_OFFSET_STRUCT
++CreateOffsetStruct(LinuxMemArea *psLinuxMemArea, IMG_UINT32 ui32Offset, IMG_UINT32 ui32RealByteSize)
++{
++ PKV_OFFSET_STRUCT psOffsetStruct;
++#if defined(DEBUG) || defined(DEBUG_LINUX_MMAP_AREAS)
++ const IMG_CHAR *pszName = LinuxMemAreaTypeToString(LinuxMemAreaRootType(psLinuxMemArea));
++#endif
++
++#if defined(DEBUG) || defined(DEBUG_LINUX_MMAP_AREAS)
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "%s(%s, psLinuxMemArea: 0x%p, ui32AllocFlags: 0x%8lx)",
++ __FUNCTION__, pszName, psLinuxMemArea, psLinuxMemArea->ui32AreaFlags));
++#endif
++
++ PVR_ASSERT(psLinuxMemArea->eAreaType != LINUX_MEM_AREA_SUB_ALLOC || LinuxMemAreaRoot(psLinuxMemArea)->eAreaType != LINUX_MEM_AREA_SUB_ALLOC);
++
++ PVR_ASSERT(psLinuxMemArea->bMMapRegistered);
++
++ psOffsetStruct = KMemCacheAllocWrapper(g_psMemmapCache, GFP_KERNEL);
++ if(psOffsetStruct == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRMMapRegisterArea: Couldn't alloc another mapping record from cache"));
++ return IMG_NULL;
++ }
++
++ psOffsetStruct->ui32MMapOffset = ui32Offset;
++
++ psOffsetStruct->psLinuxMemArea = psLinuxMemArea;
++
++ psOffsetStruct->ui32Mapped = 0;
++
++ psOffsetStruct->ui32RealByteSize = ui32RealByteSize;
++
++
++ psOffsetStruct->ui32TID = GetCurrentThreadID();
++
++ psOffsetStruct->ui32PID = OSGetCurrentProcessIDKM();
++
++ psOffsetStruct->bOnMMapList = IMG_FALSE;
++
++ psOffsetStruct->ui32RefCount = 0;
++
++ psOffsetStruct->ui32UserVAddr = 0;
++
++#if defined(DEBUG_LINUX_MMAP_AREAS)
++
++ psOffsetStruct->pszName = pszName;
++#endif
++
++ list_add_tail(&psOffsetStruct->sAreaItem, &psLinuxMemArea->sMMapOffsetStructList);
++
++ return psOffsetStruct;
++}
++
++
++static IMG_VOID
++DestroyOffsetStruct(PKV_OFFSET_STRUCT psOffsetStruct)
++{
++ list_del(&psOffsetStruct->sAreaItem);
++
++ if (psOffsetStruct->bOnMMapList)
++ {
++ list_del(&psOffsetStruct->sMMapItem);
++ }
++
++ PVR_DPF((PVR_DBG_MESSAGE, "%s: Table entry: "
++ "psLinuxMemArea=0x%08lX, CpuPAddr=0x%08lX", __FUNCTION__,
++ psOffsetStruct->psLinuxMemArea,
++ LinuxMemAreaToCpuPAddr(psOffsetStruct->psLinuxMemArea, 0)));
++
++ KMemCacheFreeWrapper(g_psMemmapCache, psOffsetStruct);
++}
++
++
++static inline IMG_VOID
++DetermineUsersSizeAndByteOffset(LinuxMemArea *psLinuxMemArea,
++ IMG_UINT32 *pui32RealByteSize,
++ IMG_UINT32 *pui32ByteOffset)
++{
++ IMG_UINT32 ui32PageAlignmentOffset;
++ IMG_CPU_PHYADDR CpuPAddr;
++
++ CpuPAddr = LinuxMemAreaToCpuPAddr(psLinuxMemArea, 0);
++ ui32PageAlignmentOffset = ADDR_TO_PAGE_OFFSET(CpuPAddr.uiAddr);
++
++ *pui32ByteOffset = ui32PageAlignmentOffset;
++
++ *pui32RealByteSize = PAGE_ALIGN(psLinuxMemArea->ui32ByteSize + ui32PageAlignmentOffset);
++}
++
++
++PVRSRV_ERROR
++PVRMMapOSMemHandleToMMapData(PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_HANDLE hMHandle,
++ IMG_UINT32 *pui32MMapOffset,
++ IMG_UINT32 *pui32ByteOffset,
++ IMG_UINT32 *pui32RealByteSize,
++ IMG_UINT32 *pui32UserVAddr)
++{
++ LinuxMemArea *psLinuxMemArea;
++ PKV_OFFSET_STRUCT psOffsetStruct;
++ IMG_HANDLE hOSMemHandle;
++ PVRSRV_ERROR eError;
++
++ LinuxLockMutex(&g_sMMapMutex);
++
++ PVR_ASSERT(PVRSRVGetMaxHandle(psPerProc->psHandleBase) <= MAX_MMAP_HANDLE);
++
++ eError = PVRSRVLookupOSMemHandle(psPerProc->psHandleBase, &hOSMemHandle, hMHandle);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: Lookup of handle 0x%lx failed", __FUNCTION__, hMHandle));
++
++ goto exit_unlock;
++ }
++
++ psLinuxMemArea = (LinuxMemArea *)hOSMemHandle;
++
++ DetermineUsersSizeAndByteOffset(psLinuxMemArea,
++ pui32RealByteSize,
++ pui32ByteOffset);
++
++
++ list_for_each_entry(psOffsetStruct, &psLinuxMemArea->sMMapOffsetStructList, sAreaItem)
++ {
++ if (psPerProc->ui32PID == psOffsetStruct->ui32PID)
++ {
++
++ PVR_ASSERT(*pui32RealByteSize == psOffsetStruct->ui32RealByteSize);
++
++ *pui32MMapOffset = psOffsetStruct->ui32MMapOffset;
++ *pui32UserVAddr = psOffsetStruct->ui32UserVAddr;
++ psOffsetStruct->ui32RefCount++;
++
++ eError = PVRSRV_OK;
++ goto exit_unlock;
++ }
++ }
++
++
++ *pui32UserVAddr = 0;
++
++ if (LinuxMemAreaUsesPhysicalMap(psLinuxMemArea))
++ {
++ *pui32MMapOffset = LinuxMemAreaToCpuPFN(psLinuxMemArea, 0);
++ PVR_ASSERT(PFNIsPhysical(*pui32MMapOffset));
++ }
++ else
++ {
++ *pui32MMapOffset = HandleToMMapOffset(hMHandle);
++ PVR_ASSERT(PFNIsSpecial(*pui32MMapOffset));
++ }
++
++ psOffsetStruct = CreateOffsetStruct(psLinuxMemArea, *pui32MMapOffset, *pui32RealByteSize);
++ if (psOffsetStruct == IMG_NULL)
++ {
++ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
++ goto exit_unlock;
++ }
++
++
++ list_add_tail(&psOffsetStruct->sMMapItem, &g_sMMapOffsetStructList);
++
++ psOffsetStruct->bOnMMapList = IMG_TRUE;
++
++ psOffsetStruct->ui32RefCount++;
++
++ eError = PVRSRV_OK;
++
++exit_unlock:
++ LinuxUnLockMutex(&g_sMMapMutex);
++
++ return eError;
++}
++
++
++PVRSRV_ERROR
++PVRMMapReleaseMMapData(PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_HANDLE hMHandle,
++ IMG_BOOL *pbMUnmap,
++ IMG_UINT32 *pui32RealByteSize,
++ IMG_UINT32 *pui32UserVAddr)
++{
++ LinuxMemArea *psLinuxMemArea;
++ PKV_OFFSET_STRUCT psOffsetStruct;
++ IMG_HANDLE hOSMemHandle;
++ PVRSRV_ERROR eError;
++ IMG_UINT32 ui32PID = OSGetCurrentProcessIDKM();
++
++ LinuxLockMutex(&g_sMMapMutex);
++
++ PVR_ASSERT(PVRSRVGetMaxHandle(psPerProc->psHandleBase) <= MAX_MMAP_HANDLE);
++
++ eError = PVRSRVLookupOSMemHandle(psPerProc->psHandleBase, &hOSMemHandle, hMHandle);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: Lookup of handle 0x%lx failed", __FUNCTION__, hMHandle));
++
++ goto exit_unlock;
++ }
++
++ psLinuxMemArea = (LinuxMemArea *)hOSMemHandle;
++
++
++ list_for_each_entry(psOffsetStruct, &psLinuxMemArea->sMMapOffsetStructList, sAreaItem)
++ {
++ if (psOffsetStruct->ui32PID == ui32PID)
++ {
++ if (psOffsetStruct->ui32RefCount == 0)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: Attempt to release mmap data with zero reference count for offset struct 0x%p, memory area 0x%p", __FUNCTION__, psOffsetStruct, psLinuxMemArea));
++ eError = PVRSRV_ERROR_GENERIC;
++ goto exit_unlock;
++ }
++
++ psOffsetStruct->ui32RefCount--;
++
++ *pbMUnmap = (IMG_BOOL)((psOffsetStruct->ui32RefCount == 0) && (psOffsetStruct->ui32UserVAddr != 0));
++
++ *pui32UserVAddr = (*pbMUnmap) ? psOffsetStruct->ui32UserVAddr : 0;
++ *pui32RealByteSize = (*pbMUnmap) ? psOffsetStruct->ui32RealByteSize : 0;
++
++ eError = PVRSRV_OK;
++ goto exit_unlock;
++ }
++ }
++
++
++ PVR_DPF((PVR_DBG_ERROR, "%s: Mapping data not found for handle 0x%lx (memory area 0x%p)", __FUNCTION__, hMHandle, psLinuxMemArea));
++
++ eError = PVRSRV_ERROR_GENERIC;
++
++exit_unlock:
++ LinuxUnLockMutex(&g_sMMapMutex);
++
++ return eError;
++}
++
++static inline PKV_OFFSET_STRUCT
++FindOffsetStructByOffset(IMG_UINT32 ui32Offset, IMG_UINT32 ui32RealByteSize)
++{
++ PKV_OFFSET_STRUCT psOffsetStruct;
++ IMG_UINT32 ui32TID = GetCurrentThreadID();
++ IMG_UINT32 ui32PID = OSGetCurrentProcessIDKM();
++
++ list_for_each_entry(psOffsetStruct, &g_sMMapOffsetStructList, sMMapItem)
++ {
++ if (ui32Offset == psOffsetStruct->ui32MMapOffset && ui32RealByteSize == psOffsetStruct->ui32RealByteSize && psOffsetStruct->ui32PID == ui32PID)
++ {
++
++ if (!PFNIsPhysical(ui32Offset) || psOffsetStruct->ui32TID == ui32TID)
++ {
++ return psOffsetStruct;
++ }
++ }
++ }
++
++ return IMG_NULL;
++}
++
++
++static IMG_BOOL
++DoMapToUser(LinuxMemArea *psLinuxMemArea,
++ struct vm_area_struct* ps_vma,
++ IMG_UINT32 ui32ByteOffset)
++{
++ IMG_UINT32 ui32ByteSize;
++
++ if (psLinuxMemArea->eAreaType == LINUX_MEM_AREA_SUB_ALLOC)
++ {
++ return DoMapToUser(LinuxMemAreaRoot(psLinuxMemArea),
++ ps_vma,
++ psLinuxMemArea->uData.sSubAlloc.ui32ByteOffset + ui32ByteOffset);
++ }
++
++
++ ui32ByteSize = ps_vma->vm_end - ps_vma->vm_start;
++ PVR_ASSERT(ADDR_TO_PAGE_OFFSET(ui32ByteSize) == 0);
++
++#if defined (__sparc__)
++
++#error "SPARC not supported"
++#endif
++
++ if (PFNIsPhysical(ps_vma->vm_pgoff))
++ {
++ IMG_INT result;
++
++ PVR_ASSERT(LinuxMemAreaPhysIsContig(psLinuxMemArea));
++ PVR_ASSERT(LinuxMemAreaToCpuPFN(psLinuxMemArea, ui32ByteOffset) == ps_vma->vm_pgoff);
++
++
++ result = IO_REMAP_PFN_RANGE(ps_vma, ps_vma->vm_start, ps_vma->vm_pgoff, ui32ByteSize, ps_vma->vm_page_prot);
++
++ if(result == 0)
++ {
++ return IMG_TRUE;
++ }
++
++ PVR_DPF((PVR_DBG_MESSAGE, "%s: Failed to map contiguous physical address range (%d), trying non-contiguous path", __FUNCTION__, result));
++ }
++
++ {
++
++ IMG_UINT32 ulVMAPos;
++ IMG_UINT32 ui32ByteEnd = ui32ByteOffset + ui32ByteSize;
++ IMG_UINT32 ui32PA;
++
++
++ for(ui32PA = ui32ByteOffset; ui32PA < ui32ByteEnd; ui32PA += PAGE_SIZE)
++ {
++ IMG_UINT32 pfn = LinuxMemAreaToCpuPFN(psLinuxMemArea, ui32PA);
++
++ if (!pfn_valid(pfn))
++ {
++ PVR_DPF((PVR_DBG_ERROR,"%s: Error - PFN invalid: 0x%lx", __FUNCTION__, pfn));
++ return IMG_FALSE;
++ }
++ }
++
++
++ ulVMAPos = ps_vma->vm_start;
++ for(ui32PA = ui32ByteOffset; ui32PA < ui32ByteEnd; ui32PA += PAGE_SIZE)
++ {
++ IMG_UINT32 pfn;
++ struct page *psPage;
++ IMG_INT result;
++
++ pfn = LinuxMemAreaToCpuPFN(psLinuxMemArea, ui32PA);
++ PVR_ASSERT(pfn_valid(pfn));
++
++ psPage = pfn_to_page(pfn);
++
++ result = VM_INSERT_PAGE(ps_vma, ulVMAPos, psPage);
++ if(result != 0)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"%s: Error - VM_INSERT_PAGE failed (%d)", __FUNCTION__, result));
++ return IMG_FALSE;
++ }
++ ulVMAPos += PAGE_SIZE;
++ }
++ }
++
++ return IMG_TRUE;
++}
++
++
++static IMG_VOID
++MMapVOpenNoLock(struct vm_area_struct* ps_vma)
++{
++ PKV_OFFSET_STRUCT psOffsetStruct = (PKV_OFFSET_STRUCT)ps_vma->vm_private_data;
++ PVR_ASSERT(psOffsetStruct != IMG_NULL)
++ psOffsetStruct->ui32Mapped++;
++ PVR_ASSERT(!psOffsetStruct->bOnMMapList);
++
++ if (psOffsetStruct->ui32Mapped > 1)
++ {
++ PVR_DPF((PVR_DBG_WARNING, "%s: Offset structure 0x%p is being shared across processes (psOffsetStruct->ui32Mapped: %lu)", __FUNCTION__, psOffsetStruct, psOffsetStruct->ui32Mapped));
++ PVR_ASSERT((ps_vma->vm_flags & VM_DONTCOPY) == 0);
++ }
++
++#if defined(DEBUG_LINUX_MMAP_AREAS)
++
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "%s: psLinuxMemArea 0x%p, KVAddress 0x%p MMapOffset %ld, ui32Mapped %d",
++ __FUNCTION__,
++ psOffsetStruct->psLinuxMemArea,
++ LinuxMemAreaToCpuVAddr(psOffsetStruct->psLinuxMemArea),
++ psOffsetStruct->ui32MMapOffset,
++ psOffsetStruct->ui32Mapped));
++#endif
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0))
++ MOD_INC_USE_COUNT;
++#endif
++}
++
++
++static void
++MMapVOpen(struct vm_area_struct* ps_vma)
++{
++ LinuxLockMutex(&g_sMMapMutex);
++
++ MMapVOpenNoLock(ps_vma);
++
++ LinuxUnLockMutex(&g_sMMapMutex);
++}
++
++
++static IMG_VOID
++MMapVCloseNoLock(struct vm_area_struct* ps_vma)
++{
++ PKV_OFFSET_STRUCT psOffsetStruct = (PKV_OFFSET_STRUCT)ps_vma->vm_private_data;
++ PVR_ASSERT(psOffsetStruct != IMG_NULL)
++
++#if defined(DEBUG_LINUX_MMAP_AREAS)
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "%s: psLinuxMemArea 0x%p, CpuVAddr 0x%p ui32MMapOffset %ld, ui32Mapped %d",
++ __FUNCTION__,
++ psOffsetStruct->psLinuxMemArea,
++ LinuxMemAreaToCpuVAddr(psOffsetStruct->psLinuxMemArea),
++ psOffsetStruct->ui32MMapOffset,
++ psOffsetStruct->ui32Mapped));
++#endif
++
++ PVR_ASSERT(!psOffsetStruct->bOnMMapList);
++ psOffsetStruct->ui32Mapped--;
++ if (psOffsetStruct->ui32Mapped == 0)
++ {
++ if (psOffsetStruct->ui32RefCount != 0)
++ {
++ PVR_DPF((PVR_DBG_MESSAGE, "%s: psOffsetStruct 0x%p has non-zero reference count (ui32RefCount = %lu). User mode address of start of mapping: 0x%lx", __FUNCTION__, psOffsetStruct, psOffsetStruct->ui32RefCount, psOffsetStruct->ui32UserVAddr));
++ }
++
++ DestroyOffsetStruct(psOffsetStruct);
++ }
++
++ ps_vma->vm_private_data = NULL;
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0))
++ MOD_DEC_USE_COUNT;
++#endif
++}
++
++static void
++MMapVClose(struct vm_area_struct* ps_vma)
++{
++ LinuxLockMutex(&g_sMMapMutex);
++
++ MMapVCloseNoLock(ps_vma);
++
++ LinuxUnLockMutex(&g_sMMapMutex);
++}
++
++
++static struct vm_operations_struct MMapIOOps =
++{
++ .open=MMapVOpen,
++ .close=MMapVClose
++};
++
++
++int
++PVRMMap(struct file* pFile, struct vm_area_struct* ps_vma)
++{
++ IMG_UINT32 ui32ByteSize;
++ PKV_OFFSET_STRUCT psOffsetStruct;
++ int iRetVal = 0;
++
++ PVR_UNREFERENCED_PARAMETER(pFile);
++
++ LinuxLockMutex(&g_sMMapMutex);
++
++ ui32ByteSize = ps_vma->vm_end - ps_vma->vm_start;
++
++ PVR_DPF((PVR_DBG_MESSAGE, "%s: Received mmap(2) request with ui32MMapOffset 0x%08lx,"
++ " and ui32ByteSize %ld(0x%08lx)",
++ __FUNCTION__,
++ ps_vma->vm_pgoff,
++ ui32ByteSize, ui32ByteSize));
++
++ psOffsetStruct = FindOffsetStructByOffset(ps_vma->vm_pgoff, ui32ByteSize);
++ if (psOffsetStruct == IMG_NULL)
++ {
++#if defined(SUPPORT_DRI_DRM)
++ LinuxUnLockMutex(&g_sMMapMutex);
++
++
++ return drm_mmap(pFile, ps_vma);
++#else
++ PVR_UNREFERENCED_PARAMETER(pFile);
++
++ PVR_DPF((PVR_DBG_ERROR,
++ "%s: Attempted to mmap unregistered area at vm_pgoff %ld",
++ __FUNCTION__, ps_vma->vm_pgoff));
++ iRetVal = -EINVAL;
++#endif
++ goto unlock_and_return;
++ }
++ list_del(&psOffsetStruct->sMMapItem);
++ psOffsetStruct->bOnMMapList = IMG_FALSE;
++
++
++ if (((ps_vma->vm_flags & VM_WRITE) != 0) &&
++ ((ps_vma->vm_flags & VM_SHARED) == 0))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: Cannot mmap non-shareable writable areas", __FUNCTION__));
++ iRetVal = -EINVAL;
++ goto unlock_and_return;
++ }
++
++ PVR_DPF((PVR_DBG_MESSAGE, "%s: Mapped psLinuxMemArea 0x%p\n",
++ __FUNCTION__, psOffsetStruct->psLinuxMemArea));
++
++ ps_vma->vm_flags |= VM_RESERVED;
++ ps_vma->vm_flags |= VM_IO;
++
++
++ ps_vma->vm_flags |= VM_DONTEXPAND;
++
++
++ ps_vma->vm_flags |= VM_DONTCOPY;
++
++ ps_vma->vm_private_data = (void *)psOffsetStruct;
++
++ switch(psOffsetStruct->psLinuxMemArea->ui32AreaFlags & PVRSRV_HAP_CACHETYPE_MASK)
++ {
++ case PVRSRV_HAP_CACHED:
++
++ break;
++ case PVRSRV_HAP_WRITECOMBINE:
++ ps_vma->vm_page_prot = PGPROT_WC(ps_vma->vm_page_prot);
++ break;
++ case PVRSRV_HAP_UNCACHED:
++ ps_vma->vm_page_prot = PGPROT_UC(ps_vma->vm_page_prot);
++ break;
++ default:
++ PVR_DPF((PVR_DBG_ERROR, "%s: unknown cache type", __FUNCTION__));
++ iRetVal = -EINVAL;
++ goto unlock_and_return;
++ }
++
++
++ ps_vma->vm_ops = &MMapIOOps;
++
++ if(!DoMapToUser(psOffsetStruct->psLinuxMemArea, ps_vma, 0))
++ {
++ iRetVal = -EAGAIN;
++ goto unlock_and_return;
++ }
++
++ PVR_ASSERT(psOffsetStruct->ui32UserVAddr == 0)
++
++ psOffsetStruct->ui32UserVAddr = ps_vma->vm_start;
++
++
++ MMapVOpenNoLock(ps_vma);
++
++ PVR_DPF((PVR_DBG_MESSAGE, "%s: Mapped area at offset 0x%08lx\n",
++ __FUNCTION__, ps_vma->vm_pgoff));
++
++unlock_and_return:
++ if (iRetVal != 0 && psOffsetStruct != IMG_NULL)
++ {
++ DestroyOffsetStruct(psOffsetStruct);
++ }
++
++ LinuxUnLockMutex(&g_sMMapMutex);
++
++ return iRetVal;
++}
++
++
++#if defined(DEBUG_LINUX_MMAP_AREAS)
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++
++static void ProcSeqStartstopMMapRegistations(struct seq_file *sfile,IMG_BOOL start)
++{
++ if(start)
++ {
++ LinuxLockMutex(&g_sMMapMutex);
++ }
++ else
++ {
++ LinuxUnLockMutex(&g_sMMapMutex);
++ }
++}
++
++
++static void* ProcSeqOff2ElementMMapRegistrations(struct seq_file *sfile, loff_t off)
++{
++ LinuxMemArea *psLinuxMemArea;
++ if(!off)
++ {
++ return PVR_PROC_SEQ_START_TOKEN;
++ }
++
++ list_for_each_entry(psLinuxMemArea, &g_sMMapAreaList, sMMapItem)
++ {
++ PKV_OFFSET_STRUCT psOffsetStruct;
++
++ list_for_each_entry(psOffsetStruct, &psLinuxMemArea->sMMapOffsetStructList, sAreaItem)
++ {
++ off--;
++ if (off == 0)
++ {
++ PVR_ASSERT(psOffsetStruct->psLinuxMemArea == psLinuxMemArea);
++ return (void*)psOffsetStruct;
++ }
++ }
++ }
++ return (void*)0;
++}
++
++static void* ProcSeqNextMMapRegistrations(struct seq_file *sfile,void* el,loff_t off)
++{
++ return ProcSeqOff2ElementMMapRegistrations(sfile,off);
++}
++
++
++static void ProcSeqShowMMapRegistrations(struct seq_file *sfile,void* el)
++{
++ KV_OFFSET_STRUCT *psOffsetStruct = (KV_OFFSET_STRUCT*)el;
++ LinuxMemArea *psLinuxMemArea;
++ IMG_UINT32 ui32RealByteSize;
++ IMG_UINT32 ui32ByteOffset;
++
++ if(el == PVR_PROC_SEQ_START_TOKEN)
++ {
++ seq_printf( sfile,
++#if !defined(DEBUG_LINUX_XML_PROC_FILES)
++ "Allocations registered for mmap: %lu\n"
++ "In total these areas correspond to %lu bytes\n"
++ "psLinuxMemArea "
++ "UserVAddr "
++ "KernelVAddr "
++ "CpuPAddr "
++ "MMapOffset "
++ "ByteLength "
++ "LinuxMemType "
++ "Pid Name Flags\n",
++#else
++ "<mmap_header>\n"
++ "\t<count>%lu</count>\n"
++ "\t<bytes>%lu</bytes>\n"
++ "</mmap_header>\n",
++#endif
++ g_ui32RegisteredAreas,
++ g_ui32TotalByteSize
++ );
++ return;
++ }
++
++ psLinuxMemArea = psOffsetStruct->psLinuxMemArea;
++
++ DetermineUsersSizeAndByteOffset(psLinuxMemArea,
++ &ui32RealByteSize,
++ &ui32ByteOffset);
++
++ seq_printf( sfile,
++#if !defined(DEBUG_LINUX_XML_PROC_FILES)
++ "%-8p %08lx %-8p %08lx %08lx %-8ld %-24s %-5lu %-8s %08lx(%s)\n",
++#else
++ "<mmap_record>\n"
++ "\t<pointer>%-8p</pointer>\n"
++ "\t<user_virtual>%-8lx</user_virtual>\n"
++ "\t<kernel_virtual>%-8p</kernel_virtual>\n"
++ "\t<cpu_physical>%08lx</cpu_physical>\n"
++ "\t<mmap_offset>%08lx</mmap_offset>\n"
++ "\t<bytes>%-8ld</bytes>\n"
++ "\t<linux_mem_area_type>%-24s</linux_mem_area_type>\n"
++ "\t<pid>%-5lu</pid>\n"
++ "\t<name>%-8s</name>\n"
++ "\t<flags>%08lx</flags>\n"
++ "\t<flags_string>%s</flags_string>\n"
++ "</mmap_record>\n",
++#endif
++ psLinuxMemArea,
++ psOffsetStruct->ui32UserVAddr + ui32ByteOffset,
++ LinuxMemAreaToCpuVAddr(psLinuxMemArea),
++ LinuxMemAreaToCpuPAddr(psLinuxMemArea,0).uiAddr,
++ psOffsetStruct->ui32MMapOffset,
++ psLinuxMemArea->ui32ByteSize,
++ LinuxMemAreaTypeToString(psLinuxMemArea->eAreaType),
++ psOffsetStruct->ui32PID,
++ psOffsetStruct->pszName,
++ psLinuxMemArea->ui32AreaFlags,
++ HAPFlagsToString(psLinuxMemArea->ui32AreaFlags));
++}
++
++#else
++
++static off_t
++PrintMMapRegistrations(IMG_CHAR *buffer, size_t size, off_t off)
++{
++ LinuxMemArea *psLinuxMemArea;
++ off_t Ret;
++
++ LinuxLockMutex(&g_sMMapMutex);
++
++ if(!off)
++ {
++ Ret = printAppend(buffer, size, 0,
++#if !defined(DEBUG_LINUX_XML_PROC_FILES)
++ "Allocations registered for mmap: %lu\n"
++ "In total these areas correspond to %lu bytes\n"
++ "psLinuxMemArea "
++ "UserVAddr "
++ "KernelVAddr "
++ "CpuPAddr "
++ "MMapOffset "
++ "ByteLength "
++ "LinuxMemType "
++ "Pid Name Flags\n",
++#else
++ "<mmap_header>\n"
++ "\t<count>%lu</count>\n"
++ "\t<bytes>%lu</bytes>\n"
++ "</mmap_header>\n",
++#endif
++ g_ui32RegisteredAreas,
++ g_ui32TotalByteSize
++ );
++
++ goto unlock_and_return;
++ }
++
++ if (size < 135)
++ {
++ Ret = 0;
++ goto unlock_and_return;
++ }
++
++ PVR_ASSERT(off != 0);
++ list_for_each_entry(psLinuxMemArea, &g_sMMapAreaList, sMMapItem)
++ {
++ PKV_OFFSET_STRUCT psOffsetStruct;
++
++ list_for_each_entry(psOffsetStruct, &psLinuxMemArea->sMMapOffsetStructList, sAreaItem)
++ {
++ off--;
++ if (off == 0)
++ {
++ IMG_UINT32 ui32RealByteSize;
++ IMG_UINT32 ui32ByteOffset;
++
++ PVR_ASSERT(psOffsetStruct->psLinuxMemArea == psLinuxMemArea);
++
++ DetermineUsersSizeAndByteOffset(psLinuxMemArea,
++ &ui32RealByteSize,
++ &ui32ByteOffset);
++
++ Ret = printAppend (buffer, size, 0,
++#if !defined(DEBUG_LINUX_XML_PROC_FILES)
++ "%-8p %08lx %-8p %08lx %08lx %-8ld %-24s %-5lu %-8s %08lx(%s)\n",
++#else
++ "<mmap_record>\n"
++ "\t<pointer>%-8p</pointer>\n"
++ "\t<user_virtual>%-8lx</user_virtual>\n"
++ "\t<kernel_virtual>%-8p</kernel_virtual>\n"
++ "\t<cpu_physical>%08lx</cpu_physical>\n"
++ "\t<mmap_offset>%08lx</mmap_offset>\n"
++ "\t<bytes>%-8ld</bytes>\n"
++ "\t<linux_mem_area_type>%-24s</linux_mem_area_type>\n"
++ "\t<pid>%-5lu</pid>\n"
++ "\t<name>%-8s</name>\n"
++ "\t<flags>%08lx</flags>\n"
++ "\t<flags_string>%s</flags_string>\n"
++ "</mmap_record>\n",
++#endif
++ psLinuxMemArea,
++ psOffsetStruct->ui32UserVAddr + ui32ByteOffset,
++ LinuxMemAreaToCpuVAddr(psLinuxMemArea),
++ LinuxMemAreaToCpuPAddr(psLinuxMemArea,0).uiAddr,
++ psOffsetStruct->ui32MMapOffset,
++ psLinuxMemArea->ui32ByteSize,
++ LinuxMemAreaTypeToString(psLinuxMemArea->eAreaType),
++ psOffsetStruct->ui32PID,
++ psOffsetStruct->pszName,
++ psLinuxMemArea->ui32AreaFlags,
++ HAPFlagsToString(psLinuxMemArea->ui32AreaFlags));
++ goto unlock_and_return;
++ }
++ }
++ }
++ Ret = END_OF_FILE;
++
++unlock_and_return:
++ LinuxUnLockMutex(&g_sMMapMutex);
++ return Ret;
++}
++#endif
++#endif
++
++
++PVRSRV_ERROR
++PVRMMapRegisterArea(LinuxMemArea *psLinuxMemArea)
++{
++ PVRSRV_ERROR eError;
++#if defined(DEBUG) || defined(DEBUG_LINUX_MMAP_AREAS)
++ const IMG_CHAR *pszName = LinuxMemAreaTypeToString(LinuxMemAreaRootType(psLinuxMemArea));
++#endif
++
++ LinuxLockMutex(&g_sMMapMutex);
++
++#if defined(DEBUG) || defined(DEBUG_LINUX_MMAP_AREAS)
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "%s(%s, psLinuxMemArea 0x%p, ui32AllocFlags 0x%8lx)",
++ __FUNCTION__, pszName, psLinuxMemArea, psLinuxMemArea->ui32AreaFlags));
++#endif
++
++ PVR_ASSERT(psLinuxMemArea->eAreaType != LINUX_MEM_AREA_SUB_ALLOC || LinuxMemAreaRoot(psLinuxMemArea)->eAreaType != LINUX_MEM_AREA_SUB_ALLOC);
++
++
++ if(psLinuxMemArea->bMMapRegistered)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: psLinuxMemArea 0x%p is already registered",
++ __FUNCTION__, psLinuxMemArea));
++ eError = PVRSRV_ERROR_INVALID_PARAMS;
++ goto exit_unlock;
++ }
++
++ list_add_tail(&psLinuxMemArea->sMMapItem, &g_sMMapAreaList);
++
++ psLinuxMemArea->bMMapRegistered = IMG_TRUE;
++
++#if defined(DEBUG_LINUX_MMAP_AREAS)
++ g_ui32RegisteredAreas++;
++
++ if (psLinuxMemArea->eAreaType != LINUX_MEM_AREA_SUB_ALLOC)
++ {
++ g_ui32TotalByteSize += psLinuxMemArea->ui32ByteSize;
++ }
++#endif
++
++ eError = PVRSRV_OK;
++
++exit_unlock:
++ LinuxUnLockMutex(&g_sMMapMutex);
++
++ return eError;
++}
++
++
++PVRSRV_ERROR
++PVRMMapRemoveRegisteredArea(LinuxMemArea *psLinuxMemArea)
++{
++ PVRSRV_ERROR eError;
++ PKV_OFFSET_STRUCT psOffsetStruct, psTmpOffsetStruct;
++
++ LinuxLockMutex(&g_sMMapMutex);
++
++ PVR_ASSERT(psLinuxMemArea->bMMapRegistered);
++
++ list_for_each_entry_safe(psOffsetStruct, psTmpOffsetStruct, &psLinuxMemArea->sMMapOffsetStructList, sAreaItem)
++ {
++ if (psOffsetStruct->ui32Mapped != 0)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: psOffsetStruct 0x%p for memory area 0x0x%p is still mapped; psOffsetStruct->ui32Mapped %lu", __FUNCTION__, psOffsetStruct, psLinuxMemArea, psOffsetStruct->ui32Mapped));
++ eError = PVRSRV_ERROR_GENERIC;
++ goto exit_unlock;
++ }
++ else
++ {
++
++ PVR_DPF((PVR_DBG_WARNING, "%s: psOffsetStruct 0x%p was never mapped", __FUNCTION__, psOffsetStruct));
++ }
++
++ PVR_ASSERT((psOffsetStruct->ui32Mapped == 0) && psOffsetStruct->bOnMMapList);
++
++ DestroyOffsetStruct(psOffsetStruct);
++ }
++
++ list_del(&psLinuxMemArea->sMMapItem);
++
++ psLinuxMemArea->bMMapRegistered = IMG_FALSE;
++
++#if defined(DEBUG_LINUX_MMAP_AREAS)
++ g_ui32RegisteredAreas--;
++ if (psLinuxMemArea->eAreaType != LINUX_MEM_AREA_SUB_ALLOC)
++ {
++ g_ui32TotalByteSize -= psLinuxMemArea->ui32ByteSize;
++ }
++#endif
++
++ eError = PVRSRV_OK;
++
++exit_unlock:
++ LinuxUnLockMutex(&g_sMMapMutex);
++ return eError;
++}
++
++
++PVRSRV_ERROR
++LinuxMMapPerProcessConnect(PVRSRV_ENV_PER_PROCESS_DATA *psEnvPerProc)
++{
++ PVR_UNREFERENCED_PARAMETER(psEnvPerProc);
++
++ return PVRSRV_OK;
++}
++
++IMG_VOID
++LinuxMMapPerProcessDisconnect(PVRSRV_ENV_PER_PROCESS_DATA *psEnvPerProc)
++{
++ PKV_OFFSET_STRUCT psOffsetStruct, psTmpOffsetStruct;
++ IMG_BOOL bWarn = IMG_FALSE;
++ IMG_UINT32 ui32PID = OSGetCurrentProcessIDKM();
++
++ PVR_UNREFERENCED_PARAMETER(psEnvPerProc);
++
++ LinuxLockMutex(&g_sMMapMutex);
++
++ list_for_each_entry_safe(psOffsetStruct, psTmpOffsetStruct, &g_sMMapOffsetStructList, sMMapItem)
++ {
++ if (psOffsetStruct->ui32PID == ui32PID)
++ {
++ if (!bWarn)
++ {
++ PVR_DPF((PVR_DBG_WARNING, "%s: process has unmapped offset structures. Removing them", __FUNCTION__));
++ bWarn = IMG_TRUE;
++ }
++ PVR_ASSERT(psOffsetStruct->ui32Mapped == 0);
++ PVR_ASSERT(psOffsetStruct->bOnMMapList);
++
++ DestroyOffsetStruct(psOffsetStruct);
++ }
++ }
++
++ LinuxUnLockMutex(&g_sMMapMutex);
++}
++
++
++PVRSRV_ERROR LinuxMMapPerProcessHandleOptions(PVRSRV_HANDLE_BASE *psHandleBase)
++{
++ PVRSRV_ERROR eError;
++
++ eError = PVRSRVSetMaxHandle(psHandleBase, MAX_MMAP_HANDLE);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"%s: failed to set handle limit (%d)", __FUNCTION__, eError));
++ return eError;
++ }
++
++ return eError;
++}
++
++
++IMG_VOID
++PVRMMapInit(IMG_VOID)
++{
++ LinuxInitMutex(&g_sMMapMutex);
++
++ g_psMemmapCache = KMemCacheCreateWrapper("img-mmap", sizeof(KV_OFFSET_STRUCT), 0, 0);
++ if (!g_psMemmapCache)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"%s: failed to allocate kmem_cache", __FUNCTION__));
++ goto error;
++ }
++
++#if defined(DEBUG_LINUX_MMAP_AREAS)
++#ifdef PVR_PROC_USE_SEQ_FILE
++ g_ProcMMap = CreateProcReadEntrySeq("mmap", NULL,
++ ProcSeqNextMMapRegistrations,
++ ProcSeqShowMMapRegistrations,
++ ProcSeqOff2ElementMMapRegistrations,
++ ProcSeqStartstopMMapRegistations
++ );
++#else
++ CreateProcReadEntry("mmap", PrintMMapRegistrations);
++#endif
++#endif
++ return;
++
++error:
++ PVRMMapCleanup();
++ return;
++}
++
++
++IMG_VOID
++PVRMMapCleanup(IMG_VOID)
++{
++ PVRSRV_ERROR eError;
++
++ if (!list_empty(&g_sMMapAreaList))
++ {
++ LinuxMemArea *psLinuxMemArea, *psTmpMemArea;
++
++ PVR_DPF((PVR_DBG_ERROR, "%s: Memory areas are still registered with MMap", __FUNCTION__));
++
++ PVR_TRACE(("%s: Unregistering memory areas", __FUNCTION__));
++ list_for_each_entry_safe(psLinuxMemArea, psTmpMemArea, &g_sMMapAreaList, sMMapItem)
++ {
++ eError = PVRMMapRemoveRegisteredArea(psLinuxMemArea);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: PVRMMapRemoveRegisteredArea failed (%d)", __FUNCTION__, eError));
++ }
++ PVR_ASSERT(eError == PVRSRV_OK);
++
++ LinuxMemAreaDeepFree(psLinuxMemArea);
++ }
++ }
++ PVR_ASSERT(list_empty((&g_sMMapAreaList)));
++
++#if defined(DEBUG_LINUX_MMAP_AREAS)
++#ifdef PVR_PROC_USE_SEQ_FILE
++ RemoveProcEntrySeq(g_ProcMMap);
++#else
++ RemoveProcEntry("mmap");
++#endif
++#endif
++
++ if(g_psMemmapCache)
++ {
++ KMemCacheDestroyWrapper(g_psMemmapCache);
++ g_psMemmapCache = NULL;
++ }
++}
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/mmap.h b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/mmap.h
+new file mode 100644
+index 0000000..5c9f2b2
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/mmap.h
+@@ -0,0 +1,107 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined(__MMAP_H__)
++#define __MMAP_H__
++
++#include <linux/mm.h>
++#include <linux/list.h>
++
++#include "perproc.h"
++#include "mm.h"
++
++typedef struct KV_OFFSET_STRUCT_TAG
++{
++
++ IMG_UINT32 ui32Mapped;
++
++
++ IMG_UINT32 ui32MMapOffset;
++
++ IMG_UINT32 ui32RealByteSize;
++
++
++ LinuxMemArea *psLinuxMemArea;
++
++
++ IMG_UINT32 ui32TID;
++
++
++ IMG_UINT32 ui32PID;
++
++
++ IMG_BOOL bOnMMapList;
++
++
++ IMG_UINT32 ui32RefCount;
++
++
++ IMG_UINT32 ui32UserVAddr;
++
++
++#if defined(DEBUG_LINUX_MMAP_AREAS)
++ const IMG_CHAR *pszName;
++#endif
++
++
++ struct list_head sMMapItem;
++
++
++ struct list_head sAreaItem;
++}KV_OFFSET_STRUCT, *PKV_OFFSET_STRUCT;
++
++
++
++IMG_VOID PVRMMapInit(IMG_VOID);
++
++
++IMG_VOID PVRMMapCleanup(IMG_VOID);
++
++
++PVRSRV_ERROR PVRMMapRegisterArea(LinuxMemArea *psLinuxMemArea);
++
++
++PVRSRV_ERROR PVRMMapRemoveRegisteredArea(LinuxMemArea *psLinuxMemArea);
++
++
++PVRSRV_ERROR PVRMMapOSMemHandleToMMapData(PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_HANDLE hMHandle,
++ IMG_UINT32 *pui32MMapOffset,
++ IMG_UINT32 *pui32ByteOffset,
++ IMG_UINT32 *pui32RealByteSize, IMG_UINT32 *pui32UserVAddr);
++
++PVRSRV_ERROR
++PVRMMapReleaseMMapData(PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_HANDLE hMHandle,
++ IMG_BOOL *pbMUnmap,
++ IMG_UINT32 *pui32RealByteSize,
++ IMG_UINT32 *pui32UserVAddr);
++
++int PVRMMap(struct file* pFile, struct vm_area_struct* ps_vma);
++
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/module.c b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/module.c
+new file mode 100644
+index 0000000..150fea5
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/module.c
+@@ -0,0 +1,765 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef AUTOCONF_INCLUDED
++ #include <linux/config.h>
++#endif
++
++#if !defined(SUPPORT_DRI_DRM)
++
++ #if defined(LDM_PLATFORM)
++ #define PVR_LDM_PLATFORM_MODULE
++ #define PVR_LDM_MODULE
++ #else
++ #if defined(LDM_PCI)
++ #define PVR_LDM_PCI_MODULE
++ #define PVR_LDM_MODULE
++ #endif
++ #endif
++#endif
++
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/version.h>
++#include <linux/fs.h>
++#include <linux/proc_fs.h>
++
++#if defined(SUPPORT_DRI_DRM)
++#include <drm/drmP.h>
++#if defined(PVR_SECURE_DRM_AUTH_EXPORT)
++#include "env_perproc.h"
++#endif
++#endif
++
++#if defined(PVR_LDM_PLATFORM_MODULE)
++#include <linux/platform_device.h>
++#endif
++
++#if defined(PVR_LDM_PCI_MODULE)
++#include <linux/pci.h>
++#endif
++
++#if defined(DEBUG) && defined(PVR_MANUAL_POWER_CONTROL)
++#include <asm/uaccess.h>
++#endif
++
++#include "img_defs.h"
++#include "services.h"
++#include "kerneldisplay.h"
++#include "kernelbuffer.h"
++#include "syscommon.h"
++#include "pvrmmap.h"
++#include "mutils.h"
++#include "mm.h"
++#include "mmap.h"
++#include "mutex.h"
++#include "pvr_debug.h"
++#include "srvkm.h"
++#include "perproc.h"
++#include "handle.h"
++#include "pvr_bridge_km.h"
++#include "proc.h"
++#include "pvrmodule.h"
++#include "private_data.h"
++#include "lock.h"
++#include "linkage.h"
++
++#if defined(SUPPORT_DRI_DRM)
++#include "pvr_drm.h"
++#endif
++#define DRVNAME "pvrsrvkm"
++#define DEVNAME "pvrsrvkm"
++
++#if defined(SUPPORT_DRI_DRM)
++#define PRIVATE_DATA(pFile) ((pFile)->driver_priv)
++#else
++#define PRIVATE_DATA(pFile) ((pFile)->private_data)
++#endif
++
++MODULE_SUPPORTED_DEVICE(DEVNAME);
++#ifdef DEBUG
++static IMG_INT debug = DBGPRIV_WARNING;
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0))
++#include <linux/moduleparam.h>
++module_param(debug, int, 0);
++#else
++MODULE_PARM(debug, "i");
++MODULE_PARM_DESC(debug, "Sets the level of debug output (default=0x4)");
++#endif
++#endif
++
++
++extern IMG_BOOL PVRGetDisplayClassJTable(PVRSRV_DC_DISP2SRV_KMJTABLE *psJTable);
++extern IMG_BOOL PVRGetBufferClassJTable(PVRSRV_BC_BUFFER2SRV_KMJTABLE *psJTable);
++
++EXPORT_SYMBOL(PVRGetDisplayClassJTable);
++EXPORT_SYMBOL(PVRGetBufferClassJTable);
++
++
++#if defined(PVR_LDM_MODULE)
++static struct class *psPvrClass;
++#endif
++
++#if !defined(SUPPORT_DRI_DRM)
++static IMG_INT AssignedMajorNumber;
++
++static IMG_INT PVRSRVOpen(struct inode* pInode, struct file* pFile);
++static IMG_INT PVRSRVRelease(struct inode* pInode, struct file* pFile);
++
++static struct file_operations pvrsrv_fops = {
++ .owner=THIS_MODULE,
++ .unlocked_ioctl=PVRSRV_BridgeDispatchKM,
++ .open=PVRSRVOpen,
++ .release=PVRSRVRelease,
++ .mmap=PVRMMap,
++};
++#endif
++
++PVRSRV_LINUX_MUTEX gPVRSRVLock;
++
++IMG_UINT32 gui32ReleasePID;
++
++#if defined(DEBUG) && defined(PVR_MANUAL_POWER_CONTROL)
++static IMG_UINT32 gPVRPowerLevel;
++#endif
++
++#if defined(PVR_LDM_MODULE)
++
++#if defined(PVR_LDM_PLATFORM_MODULE)
++#define LDM_DEV struct platform_device
++#define LDM_DRV struct platform_driver
++#endif
++
++#if defined(PVR_LDM_PCI_MODULE)
++#define LDM_DEV struct pci_dev
++#define LDM_DRV struct pci_driver
++#endif
++
++#if defined(PVR_LDM_PLATFORM_MODULE)
++static IMG_INT PVRSRVDriverRemove(LDM_DEV *device);
++static IMG_INT PVRSRVDriverProbe(LDM_DEV *device);
++#endif
++#if defined(PVR_LDM_PCI_MODULE)
++static IMG_VOID PVRSRVDriverRemove(LDM_DEV *device);
++static IMG_INT PVRSRVDriverProbe(LDM_DEV *device, const struct pci_device_id *id);
++#endif
++static IMG_INT PVRSRVDriverSuspend(LDM_DEV *device, pm_message_t state);
++static IMG_VOID PVRSRVDriverShutdown(LDM_DEV *device);
++static IMG_INT PVRSRVDriverResume(LDM_DEV *device);
++
++#if defined(PVR_LDM_PCI_MODULE)
++struct pci_device_id powervr_id_table[] __devinitdata = {
++ { PCI_DEVICE(SYS_SGX_DEV_VENDOR_ID, SYS_SGX_DEV_DEVICE_ID) },
++ { 0 }
++};
++
++MODULE_DEVICE_TABLE(pci, powervr_id_table);
++#endif
++
++static LDM_DRV powervr_driver = {
++#if defined(PVR_LDM_PLATFORM_MODULE)
++ .driver = {
++ .name = DRVNAME,
++ },
++#endif
++#if defined(PVR_LDM_PCI_MODULE)
++ .name = DRVNAME,
++ .id_table = powervr_id_table,
++#endif
++ .probe = PVRSRVDriverProbe,
++#if defined(PVR_LDM_PLATFORM_MODULE)
++ .remove = PVRSRVDriverRemove,
++#endif
++#if defined(PVR_LDM_PCI_MODULE)
++ .remove = __devexit_p(PVRSRVDriverRemove),
++#endif
++ .suspend = PVRSRVDriverSuspend,
++ .resume = PVRSRVDriverResume,
++ .shutdown = PVRSRVDriverShutdown,
++};
++
++LDM_DEV *gpsPVRLDMDev;
++
++#if defined(MODULE) && defined(PVR_LDM_PLATFORM_MODULE)
++
++static IMG_VOID PVRSRVDeviceRelease(struct device *pDevice)
++{
++ PVR_UNREFERENCED_PARAMETER(pDevice);
++}
++
++static struct platform_device powervr_device = {
++ .name = DEVNAME,
++ .id = -1,
++ .dev = {
++ .release = PVRSRVDeviceRelease
++ }
++};
++
++#endif
++
++#if defined(PVR_LDM_PLATFORM_MODULE)
++static IMG_INT PVRSRVDriverProbe(LDM_DEV *pDevice)
++#endif
++#if defined(PVR_LDM_PCI_MODULE)
++static IMG_INT __devinit PVRSRVDriverProbe(LDM_DEV *pDevice, const struct pci_device_id *id)
++#endif
++{
++ SYS_DATA *psSysData;
++
++ PVR_TRACE(("PVRSRVDriverProbe(pDevice=%p)", pDevice));
++
++#if 0
++
++ if (PerDeviceSysInitialise((IMG_PVOID)pDevice) != PVRSRV_OK)
++ {
++ return -EINVAL;
++ }
++#endif
++
++ if (SysAcquireData(&psSysData) != PVRSRV_OK)
++ {
++ gpsPVRLDMDev = pDevice;
++
++ if (SysInitialise() != PVRSRV_OK)
++ {
++ return -ENODEV;
++ }
++ }
++
++ return 0;
++}
++
++
++#if defined (PVR_LDM_PLATFORM_MODULE)
++static IMG_INT PVRSRVDriverRemove(LDM_DEV *pDevice)
++#endif
++#if defined(PVR_LDM_PCI_MODULE)
++static IMG_VOID __devexit PVRSRVDriverRemove(LDM_DEV *pDevice)
++#endif
++{
++ SYS_DATA *psSysData;
++
++ PVR_TRACE(("PVRSRVDriverRemove(pDevice=%p)", pDevice));
++
++ if (SysAcquireData(&psSysData) == PVRSRV_OK)
++ {
++#if defined(DEBUG) && defined(PVR_MANUAL_POWER_CONTROL)
++ if (gPVRPowerLevel != 0)
++ {
++ if (PVRSRVSetPowerStateKM(PVRSRV_SYS_POWER_STATE_D0) == PVRSRV_OK)
++ {
++ gPVRPowerLevel = 0;
++ }
++ }
++#endif
++ SysDeinitialise(psSysData);
++
++ gpsPVRLDMDev = IMG_NULL;
++ }
++
++#if 0
++ if (PerDeviceSysDeInitialise((IMG_PVOID)pDevice) != PVRSRV_OK)
++ {
++ return -EINVAL;
++ }
++#endif
++
++#if defined (PVR_LDM_PLATFORM_MODULE)
++ return 0;
++#endif
++#if defined (PVR_LDM_PCI_MODULE)
++ return;
++#endif
++}
++
++
++static IMG_VOID PVRSRVDriverShutdown(LDM_DEV *pDevice)
++{
++ PVR_TRACE(("PVRSRVDriverShutdown(pDevice=%p)", pDevice));
++
++ (IMG_VOID) PVRSRVSetPowerStateKM(PVRSRV_SYS_POWER_STATE_D3);
++}
++
++#endif
++
++
++#if defined(PVR_LDM_MODULE) || defined(SUPPORT_DRI_DRM)
++#if defined(SUPPORT_DRI_DRM)
++IMG_INT PVRSRVDriverSuspend(struct drm_device *pDevice, pm_message_t state)
++#else
++static IMG_INT PVRSRVDriverSuspend(LDM_DEV *pDevice, pm_message_t state)
++#endif
++{
++#if !(defined(DEBUG) && defined(PVR_MANUAL_POWER_CONTROL) && !defined(SUPPORT_DRI_DRM))
++ PVR_TRACE(( "PVRSRVDriverSuspend(pDevice=%p)", pDevice));
++
++ if (PVRSRVSetPowerStateKM(PVRSRV_SYS_POWER_STATE_D3) != PVRSRV_OK)
++ {
++ return -EINVAL;
++ }
++#endif
++ return 0;
++}
++
++
++#if defined(SUPPORT_DRI_DRM)
++IMG_INT PVRSRVDriverResume(struct drm_device *pDevice)
++#else
++static IMG_INT PVRSRVDriverResume(LDM_DEV *pDevice)
++#endif
++{
++#if !(defined(DEBUG) && defined(PVR_MANUAL_POWER_CONTROL) && !defined(SUPPORT_DRI_DRM))
++ PVR_TRACE(("PVRSRVDriverResume(pDevice=%p)", pDevice));
++
++ if (PVRSRVSetPowerStateKM(PVRSRV_SYS_POWER_STATE_D0) != PVRSRV_OK)
++ {
++ return -EINVAL;
++ }
++#endif
++ return 0;
++}
++#endif
++
++
++#if defined(DEBUG) && defined(PVR_MANUAL_POWER_CONTROL) && !defined(SUPPORT_DRI_DRM)
++IMG_INT PVRProcSetPowerLevel(struct file *file, const IMG_CHAR *buffer, IMG_UINT32 count, IMG_VOID *data)
++{
++ IMG_CHAR data_buffer[2];
++ IMG_UINT32 PVRPowerLevel;
++
++ if (count != sizeof(data_buffer))
++ {
++ return -EINVAL;
++ }
++ else
++ {
++ if (copy_from_user(data_buffer, buffer, count))
++ return -EINVAL;
++ if (data_buffer[count - 1] != '\n')
++ return -EINVAL;
++ PVRPowerLevel = data_buffer[0] - '0';
++ if (PVRPowerLevel != gPVRPowerLevel)
++ {
++ if (PVRPowerLevel != 0)
++ {
++ if (PVRSRVSetPowerStateKM(PVRSRV_SYS_POWER_STATE_D3) != PVRSRV_OK)
++ {
++ return -EINVAL;
++ }
++ }
++ else
++ {
++ if (PVRSRVSetPowerStateKM(PVRSRV_SYS_POWER_STATE_D0) != PVRSRV_OK)
++ {
++ return -EINVAL;
++ }
++ }
++
++ gPVRPowerLevel = PVRPowerLevel;
++ }
++ }
++ return (count);
++}
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++void ProcSeqShowPowerLevel(struct seq_file *sfile,void* el)
++{
++ seq_printf(sfile, "%lu\n", gPVRPowerLevel);
++}
++
++#else
++IMG_INT PVRProcGetPowerLevel(IMG_CHAR *page, IMG_CHAR **start, off_t off, IMG_INT count, IMG_INT *eof, IMG_VOID *data)
++{
++ if (off == 0) {
++ *start = (IMG_CHAR *)1;
++ return printAppend(page, count, 0, "%lu\n", gPVRPowerLevel);
++ }
++ *eof = 1;
++ return 0;
++}
++#endif
++
++#endif
++
++#if defined(SUPPORT_DRI_DRM)
++IMG_INT PVRSRVOpen(struct drm_device unref__ *dev, struct drm_file *pFile)
++#else
++static IMG_INT PVRSRVOpen(struct inode unref__ * pInode, struct file *pFile)
++#endif
++{
++ PVRSRV_FILE_PRIVATE_DATA *psPrivateData;
++ IMG_HANDLE hBlockAlloc;
++ IMG_INT iRet = -ENOMEM;
++ PVRSRV_ERROR eError;
++ IMG_UINT32 ui32PID;
++#if defined(SUPPORT_DRI_DRM) && defined(PVR_SECURE_DRM_AUTH_EXPORT)
++ PVRSRV_ENV_PER_PROCESS_DATA *psEnvPerProc;
++#endif
++
++#if defined(SUPPORT_DRI_DRM)
++ PVR_UNREFERENCED_PARAMETER(dev);
++#else
++ PVR_UNREFERENCED_PARAMETER(pInode);
++#endif
++
++ LinuxLockMutex(&gPVRSRVLock);
++
++ ui32PID = OSGetCurrentProcessIDKM();
++
++ if (PVRSRVProcessConnect(ui32PID) != PVRSRV_OK)
++ goto err_unlock;
++
++#if defined(SUPPORT_DRI_DRM) && defined(PVR_SECURE_DRM_AUTH_EXPORT)
++ psEnvPerProc = PVRSRVPerProcessPrivateData(ui32PID);
++ if (psEnvPerProc == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: No per-process private data", __FUNCTION__));
++ goto err_unlock;
++ }
++#endif
++
++ eError = OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(PVRSRV_FILE_PRIVATE_DATA),
++ (IMG_PVOID *)&psPrivateData,
++ &hBlockAlloc,
++ "File Private Data");
++
++ if(eError != PVRSRV_OK)
++ goto err_unlock;
++
++#if defined(PVR_SECURE_FD_EXPORT)
++ psPrivateData->hKernelMemInfo = NULL;
++#endif
++#if defined(SUPPORT_DRI_DRM) && defined(PVR_SECURE_DRM_AUTH_EXPORT)
++ psPrivateData->psDRMFile = pFile;
++
++ list_add_tail(&psPrivateData->sDRMAuthListItem, &psEnvPerProc->sDRMAuthListHead);
++#endif
++ psPrivateData->ui32OpenPID = ui32PID;
++ psPrivateData->hBlockAlloc = hBlockAlloc;
++ PRIVATE_DATA(pFile) = psPrivateData;
++ iRet = 0;
++err_unlock:
++ LinuxUnLockMutex(&gPVRSRVLock);
++ return iRet;
++}
++
++
++#if defined(SUPPORT_DRI_DRM)
++IMG_INT PVRSRVRelease(struct drm_device unref__ *dev, struct drm_file *pFile)
++#else
++static IMG_INT PVRSRVRelease(struct inode unref__ * pInode, struct file *pFile)
++#endif
++{
++ PVRSRV_FILE_PRIVATE_DATA *psPrivateData;
++
++#if defined(SUPPORT_DRI_DRM)
++ PVR_UNREFERENCED_PARAMETER(dev);
++#else
++ PVR_UNREFERENCED_PARAMETER(pInode);
++#endif
++
++ LinuxLockMutex(&gPVRSRVLock);
++
++ psPrivateData = PRIVATE_DATA(pFile);
++
++#if defined(SUPPORT_DRI_DRM) && defined(PVR_SECURE_DRM_AUTH_EXPORT)
++ list_del(&psPrivateData->sDRMAuthListItem);
++#endif
++
++
++ gui32ReleasePID = psPrivateData->ui32OpenPID;
++ PVRSRVProcessDisconnect(psPrivateData->ui32OpenPID);
++ gui32ReleasePID = 0;
++
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(PVRSRV_FILE_PRIVATE_DATA),
++ psPrivateData, psPrivateData->hBlockAlloc);
++ PRIVATE_DATA(pFile) = NULL;
++
++ LinuxUnLockMutex(&gPVRSRVLock);
++ return 0;
++}
++
++
++#if defined(SUPPORT_DRI_DRM)
++IMG_INT PVRCore_Init(IMG_VOID)
++#else
++static IMG_INT __init PVRCore_Init(IMG_VOID)
++#endif
++{
++ IMG_INT error;
++#if !defined(PVR_LDM_MODULE)
++ PVRSRV_ERROR eError;
++#else
++ struct device *psDev;
++#endif
++
++#if !defined(SUPPORT_DRI_DRM)
++
++ PVRDPFInit();
++#endif
++ PVR_TRACE(("PVRCore_Init"));
++
++ LinuxInitMutex(&gPVRSRVLock);
++
++#ifdef DEBUG
++ PVRDebugSetLevel(debug);
++#endif
++
++ if (CreateProcEntries ())
++ {
++ error = -ENOMEM;
++ return error;
++ }
++
++ if (PVROSFuncInit() != PVRSRV_OK)
++ {
++ error = -ENOMEM;
++ goto init_failed;
++ }
++
++ PVRLinuxMUtilsInit();
++
++ if(LinuxMMInit() != PVRSRV_OK)
++ {
++ error = -ENOMEM;
++ goto init_failed;
++ }
++
++ LinuxBridgeInit();
++
++ PVRMMapInit();
++
++#if defined(PVR_LDM_MODULE)
++
++#if defined(PVR_LDM_PLATFORM_MODULE)
++ if ((error = platform_driver_register(&powervr_driver)) != 0)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRCore_Init: unable to register platform driver (%d)", error));
++
++ goto init_failed;
++ }
++
++#if defined(MODULE)
++ if ((error = platform_device_register(&powervr_device)) != 0)
++ {
++ platform_driver_unregister(&powervr_driver);
++
++ PVR_DPF((PVR_DBG_ERROR, "PVRCore_Init: unable to register platform device (%d)", error));
++
++ goto init_failed;
++ }
++#endif
++#endif
++
++#if defined(PVR_LDM_PCI_MODULE)
++ if ((error = pci_register_driver(&powervr_driver)) != 0)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRCore_Init: unable to register PCI driver (%d)", error));
++
++ goto init_failed;
++ }
++#endif
++
++#else
++
++ if ((eError = SysInitialise()) != PVRSRV_OK)
++ {
++ error = -ENODEV;
++#if defined(TCF_REV) && (TCF_REV == 110)
++ if(eError == PVRSRV_ERROR_NOT_SUPPORTED)
++ {
++ printk("\nAtlas wrapper (FPGA image) version mismatch");
++ error = -ENODEV;
++ }
++#endif
++ goto init_failed;
++ }
++#endif
++
++#if !defined(SUPPORT_DRI_DRM)
++ AssignedMajorNumber = register_chrdev(0, DEVNAME, &pvrsrv_fops);
++
++ if (AssignedMajorNumber <= 0)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRCore_Init: unable to get major number"));
++
++ error = -EBUSY;
++ goto sys_deinit;
++ }
++
++ PVR_TRACE(("PVRCore_Init: major device %d", AssignedMajorNumber));
++#endif
++
++#if defined(PVR_LDM_MODULE)
++
++ psPvrClass = class_create(THIS_MODULE, "pvr");
++
++ if (IS_ERR(psPvrClass))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRCore_Init: unable to create class (%ld)", PTR_ERR(psPvrClass)));
++ error = -EBUSY;
++ goto unregister_device;
++ }
++
++ psDev = device_create(psPvrClass, NULL, MKDEV(AssignedMajorNumber, 0),
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26))
++ NULL,
++#endif
++ DEVNAME);
++ if (IS_ERR(psDev))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRCore_Init: unable to create device (%ld)", PTR_ERR(psDev)));
++ error = -EBUSY;
++ goto destroy_class;
++ }
++#endif
++
++ return 0;
++
++#if defined(PVR_LDM_MODULE)
++destroy_class:
++ class_destroy(psPvrClass);
++unregister_device:
++ unregister_chrdev((IMG_UINT)AssignedMajorNumber, DRVNAME);
++#endif
++#if !defined(SUPPORT_DRI_DRM)
++sys_deinit:
++#endif
++#if defined(PVR_LDM_MODULE)
++#if defined(PVR_LDM_PCI_MODULE)
++ pci_unregister_driver(&powervr_driver);
++#endif
++
++#if defined (PVR_LDM_PLATFORM_MODULE)
++#if defined (MODULE)
++ platform_device_unregister(&powervr_device);
++#endif
++ platform_driver_unregister(&powervr_driver);
++#endif
++
++#else
++
++ {
++ SYS_DATA *psSysData;
++
++ SysAcquireData(&psSysData);
++ if (psSysData != IMG_NULL)
++ {
++ SysDeinitialise(psSysData);
++ }
++ }
++#endif
++init_failed:
++ PVRMMapCleanup();
++ LinuxMMCleanup();
++ LinuxBridgeDeInit();
++ PVROSFuncDeInit();
++ RemoveProcEntries();
++
++ return error;
++
++}
++
++
++#if defined(SUPPORT_DRI_DRM)
++IMG_VOID PVRCore_Cleanup(IMG_VOID)
++#else
++static IMG_VOID __exit PVRCore_Cleanup(IMG_VOID)
++#endif
++{
++ SYS_DATA *psSysData;
++
++ PVR_TRACE(("PVRCore_Cleanup"));
++
++ SysAcquireData(&psSysData);
++
++#if defined(PVR_LDM_MODULE)
++ device_destroy(psPvrClass, MKDEV(AssignedMajorNumber, 0));
++ class_destroy(psPvrClass);
++#endif
++
++#if !defined(SUPPORT_DRI_DRM)
++#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,22))
++ if (
++#endif
++ unregister_chrdev((IMG_UINT)AssignedMajorNumber, DRVNAME)
++#if !(LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,22))
++ ;
++#else
++ )
++ {
++ PVR_DPF((PVR_DBG_ERROR," can't unregister device major %d", AssignedMajorNumber));
++ }
++#endif
++#endif
++
++#if defined(PVR_LDM_MODULE)
++
++#if defined(PVR_LDM_PCI_MODULE)
++ pci_unregister_driver(&powervr_driver);
++#endif
++
++#if defined (PVR_LDM_PLATFORM_MODULE)
++#if defined (MODULE)
++ platform_device_unregister(&powervr_device);
++#endif
++ platform_driver_unregister(&powervr_driver);
++#endif
++
++#else
++#if defined(DEBUG) && defined(PVR_MANUAL_POWER_CONTROL)
++ if (gPVRPowerLevel != 0)
++ {
++ if (PVRSRVSetPowerStateKM(PVRSRV_SYS_POWER_STATE_D0) == PVRSRV_OK)
++ {
++ gPVRPowerLevel = 0;
++ }
++ }
++#endif
++
++ SysDeinitialise(psSysData);
++#endif
++
++ PVRMMapCleanup();
++
++ LinuxMMCleanup();
++
++ LinuxBridgeDeInit();
++
++ PVROSFuncDeInit();
++
++ RemoveProcEntries();
++
++ PVR_TRACE(("PVRCore_Cleanup: unloading"));
++}
++
++#if !defined(SUPPORT_DRI_DRM)
++module_init(PVRCore_Init);
++module_exit(PVRCore_Cleanup);
++#endif
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/mutex.c b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/mutex.c
+new file mode 100644
+index 0000000..d66e697
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/mutex.c
+@@ -0,0 +1,136 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include <linux/version.h>
++#include <linux/errno.h>
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
++#include <linux/mutex.h>
++#else
++#include <asm/semaphore.h>
++#endif
++#include <linux/module.h>
++
++#include <img_defs.h>
++#include <services.h>
++
++#include "mutex.h"
++
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
++
++IMG_VOID LinuxInitMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex)
++{
++ mutex_init(psPVRSRVMutex);
++}
++
++IMG_VOID LinuxLockMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex)
++{
++ mutex_lock(psPVRSRVMutex);
++}
++
++PVRSRV_ERROR LinuxLockMutexInterruptible(PVRSRV_LINUX_MUTEX *psPVRSRVMutex)
++{
++ if(mutex_lock_interruptible(psPVRSRVMutex) == -EINTR)
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++ else
++ {
++ return PVRSRV_OK;
++ }
++}
++
++IMG_INT32 LinuxTryLockMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex)
++{
++ return mutex_trylock(psPVRSRVMutex);
++}
++
++IMG_VOID LinuxUnLockMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex)
++{
++ mutex_unlock(psPVRSRVMutex);
++}
++
++IMG_BOOL LinuxIsLockedMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex)
++{
++ return (IMG_BOOL)mutex_is_locked(psPVRSRVMutex);
++}
++
++
++#else
++
++
++IMG_VOID LinuxInitMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex)
++{
++ init_MUTEX(&psPVRSRVMutex->sSemaphore);
++ atomic_set(&psPVRSRVMutex->Count, 0);
++}
++
++IMG_VOID LinuxLockMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex)
++{
++ down(&psPVRSRVMutex->sSemaphore);
++ atomic_dec(&psPVRSRVMutex->Count);
++}
++
++PVRSRV_ERROR LinuxLockMutexInterruptible(PVRSRV_LINUX_MUTEX *psPVRSRVMutex)
++{
++ if(down_interruptible(&psPVRSRVMutex->sSemaphore) == -EINTR)
++ {
++
++ return PVRSRV_ERROR_GENERIC;
++ }else{
++ atomic_dec(&psPVRSRVMutex->Count);
++ return PVRSRV_OK;
++ }
++}
++
++IMG_INT32 LinuxTryLockMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex)
++{
++ IMG_INT32 Status = down_trylock(&psPVRSRVMutex->sSemaphore);
++ if(Status == 0)
++ {
++ atomic_dec(&psPVRSRVMutex->Count);
++ }
++
++ return Status;
++}
++
++IMG_VOID LinuxUnLockMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex)
++{
++ atomic_inc(&psPVRSRVMutex->Count);
++ up(&psPVRSRVMutex->sSemaphore);
++}
++
++IMG_BOOL LinuxIsLockedMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex)
++{
++ IMG_INT32 iCount;
++
++ iCount = atomic_read(&psPVRSRVMutex->Count);
++
++ return (IMG_BOOL)iCount;
++}
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/mutex.h b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/mutex.h
+new file mode 100644
+index 0000000..b24a599
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/mutex.h
+@@ -0,0 +1,70 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __INCLUDED_LINUX_MUTEX_H_
++#define __INCLUDED_LINUX_MUTEX_H_
++
++#include <linux/version.h>
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
++#include <linux/mutex.h>
++#else
++#include <asm/semaphore.h>
++#endif
++
++
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
++
++typedef struct mutex PVRSRV_LINUX_MUTEX;
++
++#else
++
++
++typedef struct {
++ struct semaphore sSemaphore;
++
++ atomic_t Count;
++}PVRSRV_LINUX_MUTEX;
++
++#endif
++
++
++extern IMG_VOID LinuxInitMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex);
++
++extern IMG_VOID LinuxLockMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex);
++
++extern PVRSRV_ERROR LinuxLockMutexInterruptible(PVRSRV_LINUX_MUTEX *psPVRSRVMutex);
++
++extern IMG_INT32 LinuxTryLockMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex);
++
++extern IMG_VOID LinuxUnLockMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex);
++
++extern IMG_BOOL LinuxIsLockedMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex);
++
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/mutils.c b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/mutils.c
+new file mode 100644
+index 0000000..83eab51
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/mutils.c
+@@ -0,0 +1,133 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef AUTOCONF_INCLUDED
++#include <linux/config.h>
++#endif
++#include <linux/version.h>
++
++#include <linux/spinlock.h>
++#include <linux/mm.h>
++#include <asm/page.h>
++#include <asm/pgtable.h>
++
++#include "img_defs.h"
++#include "pvr_debug.h"
++#include "mutils.h"
++
++#if defined(SUPPORT_LINUX_X86_PAT)
++#define PAT_LINUX_X86_WC 1
++
++#define PAT_X86_ENTRY_BITS 8
++
++#define PAT_X86_BIT_PWT 1U
++#define PAT_X86_BIT_PCD 2U
++#define PAT_X86_BIT_PAT 4U
++#define PAT_X86_BIT_MASK (PAT_X86_BIT_PAT | PAT_X86_BIT_PCD | PAT_X86_BIT_PWT)
++
++static IMG_BOOL g_write_combining_available = IMG_FALSE;
++
++#define PROT_TO_PAT_INDEX(v, B) ((v & _PAGE_ ## B) ? PAT_X86_BIT_ ## B : 0)
++
++static inline IMG_UINT
++pvr_pat_index(pgprotval_t prot_val)
++{
++ IMG_UINT ret = 0;
++ pgprotval_t val = prot_val & _PAGE_CACHE_MASK;
++
++ ret |= PROT_TO_PAT_INDEX(val, PAT);
++ ret |= PROT_TO_PAT_INDEX(val, PCD);
++ ret |= PROT_TO_PAT_INDEX(val, PWT);
++
++ return ret;
++}
++
++static inline IMG_UINT
++pvr_pat_entry(u64 pat, IMG_UINT index)
++{
++ return (IMG_UINT)(pat >> (index * PAT_X86_ENTRY_BITS)) & PAT_X86_BIT_MASK;
++}
++
++static IMG_VOID
++PVRLinuxX86PATProbe(IMG_VOID)
++{
++
++ if (cpu_has_pat)
++ {
++ u64 pat;
++ IMG_UINT pat_index;
++ IMG_UINT pat_entry;
++
++ PVR_TRACE(("%s: PAT available", __FUNCTION__));
++
++ rdmsrl(MSR_IA32_CR_PAT, pat);
++ PVR_TRACE(("%s: Top 32 bits of PAT: 0x%.8x", __FUNCTION__, (IMG_UINT)(pat >> 32)));
++ PVR_TRACE(("%s: Bottom 32 bits of PAT: 0x%.8x", __FUNCTION__, (IMG_UINT)(pat)));
++
++ pat_index = pvr_pat_index(_PAGE_CACHE_WC);
++ PVR_TRACE(("%s: PAT index for write combining: %u", __FUNCTION__, pat_index));
++
++ pat_entry = pvr_pat_entry(pat, pat_index);
++ PVR_TRACE(("%s: PAT entry for write combining: 0x%.2x (should be 0x%.2x)", __FUNCTION__, pat_entry, PAT_LINUX_X86_WC));
++
++#if defined(SUPPORT_LINUX_X86_WRITECOMBINE)
++ g_write_combining_available = (IMG_BOOL)(pat_entry == PAT_LINUX_X86_WC);
++#endif
++ }
++#if defined(DEBUG)
++#if defined(SUPPORT_LINUX_X86_WRITECOMBINE)
++ if (g_write_combining_available)
++ {
++ PVR_TRACE(("%s: Write combining available via PAT", __FUNCTION__));
++ }
++ else
++ {
++ PVR_TRACE(("%s: Write combining not available", __FUNCTION__));
++ }
++#else
++ PVR_TRACE(("%s: Write combining disabled in driver build", __FUNCTION__));
++#endif
++#endif
++}
++
++pgprot_t
++pvr_pgprot_writecombine(pgprot_t prot)
++{
++
++
++ return (g_write_combining_available) ?
++ __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_MASK) | _PAGE_CACHE_WC) : pgprot_noncached(prot);
++}
++#endif
++
++IMG_VOID
++PVRLinuxMUtilsInit(IMG_VOID)
++{
++#if defined(SUPPORT_LINUX_X86_PAT)
++ PVRLinuxX86PATProbe();
++#endif
++}
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/mutils.h b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/mutils.h
+new file mode 100644
+index 0000000..943c2bd
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/mutils.h
+@@ -0,0 +1,101 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __IMG_LINUX_MUTILS_H__
++#define __IMG_LINUX_MUTILS_H__
++
++#ifndef AUTOCONF_INCLUDED
++#include <linux/config.h>
++#endif
++
++#include <linux/version.h>
++
++#if !(defined(__i386__) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)))
++#if defined(SUPPORT_LINUX_X86_PAT)
++#undef SUPPORT_LINUX_X86_PAT
++#endif
++#endif
++
++#if defined(SUPPORT_LINUX_X86_PAT)
++ pgprot_t pvr_pgprot_writecombine(pgprot_t prot);
++ #define PGPROT_WC(pv) pvr_pgprot_writecombine(pv)
++#else
++ #if defined(__arm__) || defined(__sh__)
++ #define PGPROT_WC(pv) pgprot_writecombine(pv)
++ #else
++ #if defined(__i386__)
++ #define PGPROT_WC(pv) pgprot_noncached(pv)
++ #else
++ #define PGPROT_WC(pv) pgprot_noncached(pv)
++ #error Unsupported architecture!
++ #endif
++ #endif
++#endif
++
++#define PGPROT_UC(pv) pgprot_noncached(pv)
++
++#if defined(__i386__) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26))
++ #define IOREMAP(pa, bytes) ioremap_cache(pa, bytes)
++#else
++ #if defined(__arm__) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0))
++ #define IOREMAP(pa, bytes) ioremap_cached(pa, bytes)
++ #else
++ #define IOREMAP(pa, bytes) ioremap(pa, bytes)
++ #endif
++#endif
++
++#if defined(SUPPORT_LINUX_X86_PAT)
++ #if defined(SUPPORT_LINUX_X86_WRITECOMBINE)
++ #define IOREMAP_WC(pa, bytes) ioremap_wc(pa, bytes)
++ #else
++ #define IOREMAP_WC(pa, bytes) ioremap_nocache(pa, bytes)
++ #endif
++#else
++ #if defined(__arm__)
++ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27))
++ #define IOREMAP_WC(pa, bytes) ioremap_wc(pa, bytes)
++ #else
++ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22))
++ #define IOREMAP_WC(pa, bytes) ioremap_nocache(pa, bytes)
++ #else
++ #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)) || (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17))
++ #define IOREMAP_WC(pa, bytes) __ioremap(pa, bytes, L_PTE_BUFFERABLE)
++ #else
++ #define IOREMAP_WC(pa, bytes) __ioremap(pa, bytes, , L_PTE_BUFFERABLE, 1)
++ #endif
++ #endif
++ #endif
++ #else
++ #define IOREMAP_WC(pa, bytes) ioremap_nocache(pa, bytes)
++ #endif
++#endif
++
++#define IOREMAP_UC(pa, bytes) ioremap_nocache(pa, bytes)
++
++IMG_VOID PVRLinuxMUtilsInit(IMG_VOID);
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/osfunc.c b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/osfunc.c
+new file mode 100644
+index 0000000..0e2b68c
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/osfunc.c
+@@ -0,0 +1,2564 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef AUTOCONF_INCLUDED
++ #include <linux/config.h>
++#endif
++
++#include <linux/version.h>
++#include <asm/io.h>
++#include <asm/page.h>
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22))
++#include <asm/system.h>
++#endif
++#if defined(SUPPORT_CPU_CACHED_BUFFERS)
++#include <asm/cacheflush.h>
++#endif
++#include <linux/mm.h>
++#include <linux/pagemap.h>
++#include <linux/hugetlb.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <linux/delay.h>
++#include <linux/pci.h>
++
++#include <linux/string.h>
++#include <linux/sched.h>
++#include <linux/interrupt.h>
++#include <asm/hardirq.h>
++#include <linux/timer.h>
++#include <linux/capability.h>
++#include <asm/uaccess.h>
++#include <linux/spinlock.h>
++#if defined(PVR_LINUX_MISR_USING_WORKQUEUE) || \
++ defined(PVR_LINUX_MISR_USING_PRIVATE_WORKQUEUE) || \
++ defined(PVR_LINUX_TIMERS_USING_WORKQUEUES) || \
++ defined(PVR_LINUX_USING_WORKQUEUES)
++#include <linux/workqueue.h>
++#endif
++
++#include "img_types.h"
++#include "services_headers.h"
++#include "mm.h"
++#include "pvrmmap.h"
++#include "mmap.h"
++#include "env_data.h"
++#include "proc.h"
++#include "mutex.h"
++#include "event.h"
++#include "linkage.h"
++
++#define EVENT_OBJECT_TIMEOUT_MS (100)
++
++#if defined(SUPPORT_CPU_CACHED_BUFFERS) || \
++ defined(SUPPORT_CACHEFLUSH_ON_ALLOC)
++
++#if defined(__i386__)
++static void per_cpu_cache_flush(void *arg)
++{
++ PVR_UNREFERENCED_PARAMETER(arg);
++ wbinvd();
++}
++#endif
++
++#if !defined(SUPPORT_CPU_CACHED_BUFFERS)
++static
++#endif
++IMG_VOID OSFlushCPUCacheKM(IMG_VOID)
++{
++#if defined(__arm__)
++ flush_cache_all();
++#elif defined(__i386__)
++
++ on_each_cpu(per_cpu_cache_flush, NULL, 1);
++#else
++#error "Implement full CPU cache flush for this CPU!"
++#endif
++}
++
++#endif
++#if defined(SUPPORT_CPU_CACHED_BUFFERS)
++
++IMG_VOID OSFlushCPUCacheRangeKM(IMG_VOID *pvRangeAddrStart,
++ IMG_VOID *pvRangeAddrEnd)
++{
++ PVR_UNREFERENCED_PARAMETER(pvRangeAddrStart);
++ PVR_UNREFERENCED_PARAMETER(pvRangeAddrEnd);
++
++
++ OSFlushCPUCacheKM();
++}
++
++#endif
++
++#define HOST_ALLOC_MEM_USING_KMALLOC ((IMG_HANDLE)0)
++#define HOST_ALLOC_MEM_USING_VMALLOC ((IMG_HANDLE)1)
++
++#if !defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++PVRSRV_ERROR OSAllocMem_Impl(IMG_UINT32 ui32Flags, IMG_UINT32 ui32Size, IMG_PVOID *ppvCpuVAddr, IMG_HANDLE *phBlockAlloc)
++#else
++PVRSRV_ERROR OSAllocMem_Impl(IMG_UINT32 ui32Flags, IMG_UINT32 ui32Size, IMG_PVOID *ppvCpuVAddr, IMG_HANDLE *phBlockAlloc, IMG_CHAR *pszFilename, IMG_UINT32 ui32Line)
++#endif
++{
++ PVR_UNREFERENCED_PARAMETER(ui32Flags);
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ *ppvCpuVAddr = _KMallocWrapper(ui32Size, pszFilename, ui32Line);
++#else
++ *ppvCpuVAddr = KMallocWrapper(ui32Size);
++#endif
++ if(*ppvCpuVAddr)
++ {
++ if (phBlockAlloc)
++ {
++
++ *phBlockAlloc = HOST_ALLOC_MEM_USING_KMALLOC;
++ }
++ }
++ else
++ {
++ if (!phBlockAlloc)
++ {
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ *ppvCpuVAddr = _VMallocWrapper(ui32Size, PVRSRV_HAP_CACHED, pszFilename, ui32Line);
++#else
++ *ppvCpuVAddr = VMallocWrapper(ui32Size, PVRSRV_HAP_CACHED);
++#endif
++ if (!*ppvCpuVAddr)
++ {
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++
++ *phBlockAlloc = HOST_ALLOC_MEM_USING_VMALLOC;
++ }
++
++ return PVRSRV_OK;
++}
++
++
++#if !defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++PVRSRV_ERROR OSFreeMem_Impl(IMG_UINT32 ui32Flags, IMG_UINT32 ui32Size, IMG_PVOID pvCpuVAddr, IMG_HANDLE hBlockAlloc)
++#else
++PVRSRV_ERROR OSFreeMem_Impl(IMG_UINT32 ui32Flags, IMG_UINT32 ui32Size, IMG_PVOID pvCpuVAddr, IMG_HANDLE hBlockAlloc, IMG_CHAR *pszFilename, IMG_UINT32 ui32Line)
++#endif
++{
++ PVR_UNREFERENCED_PARAMETER(ui32Flags);
++ PVR_UNREFERENCED_PARAMETER(ui32Size);
++
++ if (hBlockAlloc == HOST_ALLOC_MEM_USING_VMALLOC)
++ {
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ _VFreeWrapper(pvCpuVAddr, pszFilename, ui32Line);
++#else
++ VFreeWrapper(pvCpuVAddr);
++#endif
++ }
++ else
++ {
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ _KFreeWrapper(pvCpuVAddr, pszFilename, ui32Line);
++#else
++ KFreeWrapper(pvCpuVAddr);
++#endif
++ }
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR
++OSAllocPages_Impl(IMG_UINT32 ui32AllocFlags,
++ IMG_UINT32 ui32Size,
++ IMG_UINT32 ui32PageSize,
++ IMG_VOID **ppvCpuVAddr,
++ IMG_HANDLE *phOSMemHandle)
++{
++ LinuxMemArea *psLinuxMemArea;
++
++ PVR_UNREFERENCED_PARAMETER(ui32PageSize);
++
++#if 0
++
++ if(ui32AllocFlags & PVRSRV_HAP_SINGLE_PROCESS)
++ {
++ ui32AllocFlags &= ~PVRSRV_HAP_SINGLE_PROCESS;
++ ui32AllocFlags |= PVRSRV_HAP_MULTI_PROCESS;
++ }
++#endif
++
++ switch(ui32AllocFlags & PVRSRV_HAP_MAPTYPE_MASK)
++ {
++ case PVRSRV_HAP_KERNEL_ONLY:
++ {
++ psLinuxMemArea = NewVMallocLinuxMemArea(ui32Size, ui32AllocFlags);
++ if(!psLinuxMemArea)
++ {
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++ break;
++ }
++ case PVRSRV_HAP_SINGLE_PROCESS:
++ {
++
++
++ psLinuxMemArea = NewAllocPagesLinuxMemArea(ui32Size, ui32AllocFlags);
++ if(!psLinuxMemArea)
++ {
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++ PVRMMapRegisterArea(psLinuxMemArea);
++ break;
++ }
++
++ case PVRSRV_HAP_MULTI_PROCESS:
++ {
++
++#if defined(VIVT_CACHE) || defined(__sh__)
++
++ ui32AllocFlags &= ~PVRSRV_HAP_CACHED;
++#endif
++ psLinuxMemArea = NewVMallocLinuxMemArea(ui32Size, ui32AllocFlags);
++ if(!psLinuxMemArea)
++ {
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++ PVRMMapRegisterArea(psLinuxMemArea);
++ break;
++ }
++ default:
++ PVR_DPF((PVR_DBG_ERROR, "OSAllocPages: invalid flags 0x%x\n", ui32AllocFlags));
++ *ppvCpuVAddr = NULL;
++ *phOSMemHandle = (IMG_HANDLE)0;
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++#if defined(SUPPORT_CACHEFLUSH_ON_ALLOC)
++
++ if(ui32AllocFlags & (PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_UNCACHED))
++ {
++ OSFlushCPUCacheKM();
++ }
++#endif
++
++ *ppvCpuVAddr = LinuxMemAreaToCpuVAddr(psLinuxMemArea);
++ *phOSMemHandle = psLinuxMemArea;
++
++ LinuxMemAreaRegister(psLinuxMemArea);
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR
++OSFreePages(IMG_UINT32 ui32AllocFlags, IMG_UINT32 ui32Bytes, IMG_VOID *pvCpuVAddr, IMG_HANDLE hOSMemHandle)
++{
++ LinuxMemArea *psLinuxMemArea;
++ PVR_UNREFERENCED_PARAMETER(ui32Bytes);
++ PVR_UNREFERENCED_PARAMETER(pvCpuVAddr);
++
++ psLinuxMemArea = (LinuxMemArea *)hOSMemHandle;
++
++ switch(ui32AllocFlags & PVRSRV_HAP_MAPTYPE_MASK)
++ {
++ case PVRSRV_HAP_KERNEL_ONLY:
++ break;
++ case PVRSRV_HAP_SINGLE_PROCESS:
++ case PVRSRV_HAP_MULTI_PROCESS:
++ if(PVRMMapRemoveRegisteredArea(psLinuxMemArea) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSFreePages(ui32AllocFlags=0x%08X, ui32Bytes=%ld, "
++ "pvCpuVAddr=%p, hOSMemHandle=%p) FAILED!",
++ ui32AllocFlags, ui32Bytes, pvCpuVAddr, hOSMemHandle));
++ return PVRSRV_ERROR_GENERIC;
++ }
++ break;
++ default:
++ PVR_DPF((PVR_DBG_ERROR,"%s: invalid flags 0x%x\n",
++ __FUNCTION__, ui32AllocFlags));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ LinuxMemAreaDeepFree(psLinuxMemArea);
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR
++OSGetSubMemHandle(IMG_HANDLE hOSMemHandle,
++ IMG_UINT32 ui32ByteOffset,
++ IMG_UINT32 ui32Bytes,
++ IMG_UINT32 ui32Flags,
++ IMG_HANDLE *phOSMemHandleRet)
++{
++ LinuxMemArea *psParentLinuxMemArea, *psLinuxMemArea;
++ PVRSRV_ERROR eError;
++
++ psParentLinuxMemArea = (LinuxMemArea *)hOSMemHandle;
++
++ psLinuxMemArea = NewSubLinuxMemArea(psParentLinuxMemArea, ui32ByteOffset, ui32Bytes);
++ if(!psLinuxMemArea)
++ {
++ *phOSMemHandleRet = NULL;
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++ *phOSMemHandleRet = psLinuxMemArea;
++
++
++ if(ui32Flags & PVRSRV_HAP_KERNEL_ONLY)
++ {
++ return PVRSRV_OK;
++ }
++
++ eError = PVRMMapRegisterArea(psLinuxMemArea);
++ if(eError != PVRSRV_OK)
++ {
++ goto failed_register_area;
++ }
++
++ return PVRSRV_OK;
++
++failed_register_area:
++ *phOSMemHandleRet = NULL;
++ LinuxMemAreaDeepFree(psLinuxMemArea);
++ return eError;
++}
++
++PVRSRV_ERROR
++OSReleaseSubMemHandle(IMG_VOID *hOSMemHandle, IMG_UINT32 ui32Flags)
++{
++ LinuxMemArea *psLinuxMemArea;
++ PVRSRV_ERROR eError;
++
++ psLinuxMemArea = (LinuxMemArea *)hOSMemHandle;
++ PVR_ASSERT(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_SUB_ALLOC);
++
++ if((ui32Flags & PVRSRV_HAP_KERNEL_ONLY) == 0)
++ {
++ eError = PVRMMapRemoveRegisteredArea(psLinuxMemArea);
++ if(eError != PVRSRV_OK)
++ {
++ return eError;
++ }
++ }
++ LinuxMemAreaDeepFree(psLinuxMemArea);
++
++ return PVRSRV_OK;
++}
++
++
++IMG_CPU_PHYADDR
++OSMemHandleToCpuPAddr(IMG_VOID *hOSMemHandle, IMG_UINT32 ui32ByteOffset)
++{
++ PVR_ASSERT(hOSMemHandle);
++
++ return LinuxMemAreaToCpuPAddr(hOSMemHandle, ui32ByteOffset);
++}
++
++
++
++IMG_VOID OSMemCopy(IMG_VOID *pvDst, IMG_VOID *pvSrc, IMG_UINT32 ui32Size)
++{
++#if defined(USE_UNOPTIMISED_MEMCPY)
++ IMG_UINT8 *Src,*Dst;
++ IMG_INT i;
++
++ Src=(IMG_UINT8 *)pvSrc;
++ Dst=(IMG_UINT8 *)pvDst;
++ for(i=0;i<ui32Size;i++)
++ {
++ Dst[i]=Src[i];
++ }
++#else
++ memcpy(pvDst, pvSrc, ui32Size);
++#endif
++}
++
++
++IMG_VOID OSMemSet(IMG_VOID *pvDest, IMG_UINT8 ui8Value, IMG_UINT32 ui32Size)
++{
++#if defined(USE_UNOPTIMISED_MEMSET)
++ IMG_UINT8 *Buff;
++ IMG_INT i;
++
++ Buff=(IMG_UINT8 *)pvDest;
++ for(i=0;i<ui32Size;i++)
++ {
++ Buff[i]=ui8Value;
++ }
++#else
++ memset(pvDest, (IMG_INT) ui8Value, (size_t) ui32Size);
++#endif
++}
++
++
++IMG_CHAR *OSStringCopy(IMG_CHAR *pszDest, const IMG_CHAR *pszSrc)
++{
++ return (strcpy(pszDest, pszSrc));
++}
++
++IMG_INT32 OSSNPrintf(IMG_CHAR *pStr, IMG_UINT32 ui32Size, const IMG_CHAR *pszFormat, ...)
++{
++ va_list argList;
++ IMG_INT32 iCount;
++
++ va_start(argList, pszFormat);
++ iCount = vsnprintf(pStr, (size_t)ui32Size, pszFormat, argList);
++ va_end(argList);
++
++ return iCount;
++}
++
++IMG_VOID OSBreakResourceLock (PVRSRV_RESOURCE *psResource, IMG_UINT32 ui32ID)
++{
++ volatile IMG_UINT32 *pui32Access = (volatile IMG_UINT32 *)&psResource->ui32Lock;
++
++ if(*pui32Access)
++ {
++ if(psResource->ui32ID == ui32ID)
++ {
++ psResource->ui32ID = 0;
++ *pui32Access = 0;
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_MESSAGE,"OSBreakResourceLock: Resource is not locked for this process."));
++ }
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_MESSAGE,"OSBreakResourceLock: Resource is not locked"));
++ }
++}
++
++
++PVRSRV_ERROR OSCreateResource(PVRSRV_RESOURCE *psResource)
++{
++ psResource->ui32ID = 0;
++ psResource->ui32Lock = 0;
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR OSDestroyResource (PVRSRV_RESOURCE *psResource)
++{
++ OSBreakResourceLock (psResource, psResource->ui32ID);
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR OSInitEnvData(IMG_PVOID *ppvEnvSpecificData)
++{
++ ENV_DATA *psEnvData;
++
++
++ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(ENV_DATA), (IMG_VOID **)&psEnvData, IMG_NULL,
++ "Environment Data") != PVRSRV_OK)
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, PVRSRV_MAX_BRIDGE_IN_SIZE + PVRSRV_MAX_BRIDGE_OUT_SIZE,
++ &psEnvData->pvBridgeData, IMG_NULL,
++ "Bridge Data") != PVRSRV_OK)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(ENV_DATA), psEnvData, IMG_NULL);
++
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++
++
++ psEnvData->bMISRInstalled = IMG_FALSE;
++ psEnvData->bLISRInstalled = IMG_FALSE;
++
++
++ *ppvEnvSpecificData = psEnvData;
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR OSDeInitEnvData(IMG_PVOID pvEnvSpecificData)
++{
++ ENV_DATA *psEnvData = (ENV_DATA*)pvEnvSpecificData;
++
++ PVR_ASSERT(!psEnvData->bMISRInstalled);
++ PVR_ASSERT(!psEnvData->bLISRInstalled);
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, PVRSRV_MAX_BRIDGE_IN_SIZE + PVRSRV_MAX_BRIDGE_OUT_SIZE, psEnvData->pvBridgeData, IMG_NULL);
++ psEnvData->pvBridgeData = IMG_NULL;
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(ENV_DATA), pvEnvSpecificData, IMG_NULL);
++
++
++ return PVRSRV_OK;
++}
++
++
++
++IMG_VOID OSReleaseThreadQuanta(IMG_VOID)
++{
++ schedule();
++}
++
++
++
++IMG_UINT32 OSClockus(IMG_VOID)
++{
++ IMG_UINT32 time, j = jiffies;
++
++ time = j * (1000000 / HZ);
++
++ return time;
++}
++
++
++
++IMG_VOID OSWaitus(IMG_UINT32 ui32Timeus)
++{
++ udelay(ui32Timeus);
++}
++
++
++IMG_UINT32 OSGetCurrentProcessIDKM(IMG_VOID)
++{
++ if (in_interrupt())
++ {
++ return KERNEL_ID;
++ }
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0))
++ return (IMG_UINT32)current->pgrp;
++#else
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
++ return (IMG_UINT32)task_tgid_nr(current);
++#else
++ return (IMG_UINT32)current->tgid;
++#endif
++#endif
++}
++
++
++IMG_UINT32 OSGetPageSize(IMG_VOID)
++{
++#if defined(__sh__)
++ IMG_UINT32 ui32ReturnValue = PAGE_SIZE;
++
++ return (ui32ReturnValue);
++#else
++ return PAGE_SIZE;
++#endif
++}
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0))
++static irqreturn_t DeviceISRWrapper(int irq, void *dev_id
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
++ , struct pt_regs *regs
++#endif
++ )
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ IMG_BOOL bStatus = IMG_FALSE;
++
++ PVR_UNREFERENCED_PARAMETER(irq);
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
++ PVR_UNREFERENCED_PARAMETER(regs);
++#endif
++ psDeviceNode = (PVRSRV_DEVICE_NODE*)dev_id;
++ if(!psDeviceNode)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "DeviceISRWrapper: invalid params\n"));
++ goto out;
++ }
++
++ bStatus = PVRSRVDeviceLISR(psDeviceNode);
++
++ if (bStatus)
++ {
++ OSScheduleMISR((IMG_VOID *)psDeviceNode->psSysData);
++ }
++
++out:
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0))
++ return bStatus ? IRQ_HANDLED : IRQ_NONE;
++#endif
++}
++
++
++
++static irqreturn_t SystemISRWrapper(int irq, void *dev_id
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
++ , struct pt_regs *regs
++#endif
++ )
++{
++ SYS_DATA *psSysData;
++ IMG_BOOL bStatus = IMG_FALSE;
++
++ PVR_UNREFERENCED_PARAMETER(irq);
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
++ PVR_UNREFERENCED_PARAMETER(regs);
++#endif
++ psSysData = (SYS_DATA *)dev_id;
++ if(!psSysData)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "SystemISRWrapper: invalid params\n"));
++ goto out;
++ }
++
++ bStatus = PVRSRVSystemLISR(psSysData);
++
++ if (bStatus)
++ {
++ OSScheduleMISR((IMG_VOID *)psSysData);
++ }
++
++out:
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0))
++ return bStatus ? IRQ_HANDLED : IRQ_NONE;
++#endif
++}
++PVRSRV_ERROR OSInstallDeviceLISR(IMG_VOID *pvSysData,
++ IMG_UINT32 ui32Irq,
++ IMG_CHAR *pszISRName,
++ IMG_VOID *pvDeviceNode)
++{
++ SYS_DATA *psSysData = (SYS_DATA*)pvSysData;
++ ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData;
++
++ if (psEnvData->bLISRInstalled)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSInstallDeviceLISR: An ISR has already been installed: IRQ %d cookie %x", psEnvData->ui32IRQ, psEnvData->pvISRCookie));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ PVR_TRACE(("Installing device LISR %s on IRQ %d with cookie %x", pszISRName, ui32Irq, pvDeviceNode));
++
++ if(request_irq(ui32Irq, DeviceISRWrapper,
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22))
++ SA_SHIRQ
++#else
++ IRQF_SHARED
++#endif
++ , pszISRName, pvDeviceNode))
++ {
++ PVR_DPF((PVR_DBG_ERROR,"OSInstallDeviceLISR: Couldn't install device LISR on IRQ %d", ui32Irq));
++
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ psEnvData->ui32IRQ = ui32Irq;
++ psEnvData->pvISRCookie = pvDeviceNode;
++ psEnvData->bLISRInstalled = IMG_TRUE;
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR OSUninstallDeviceLISR(IMG_VOID *pvSysData)
++{
++ SYS_DATA *psSysData = (SYS_DATA*)pvSysData;
++ ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData;
++
++ if (!psEnvData->bLISRInstalled)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSUninstallDeviceLISR: No LISR has been installed"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ PVR_TRACE(("Uninstalling device LISR on IRQ %d with cookie %x", psEnvData->ui32IRQ, psEnvData->pvISRCookie));
++
++ free_irq(psEnvData->ui32IRQ, psEnvData->pvISRCookie);
++
++ psEnvData->bLISRInstalled = IMG_FALSE;
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR OSInstallSystemLISR(IMG_VOID *pvSysData, IMG_UINT32 ui32Irq)
++{
++ SYS_DATA *psSysData = (SYS_DATA*)pvSysData;
++ ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData;
++
++ if (psEnvData->bLISRInstalled)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSInstallSystemLISR: An LISR has already been installed: IRQ %d cookie %x", psEnvData->ui32IRQ, psEnvData->pvISRCookie));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ PVR_TRACE(("Installing system LISR on IRQ %d with cookie %x", ui32Irq, pvSysData));
++
++ if(request_irq(ui32Irq, SystemISRWrapper,
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22))
++ SA_SHIRQ
++#else
++ IRQF_SHARED
++#endif
++ , "PowerVR", pvSysData))
++ {
++ PVR_DPF((PVR_DBG_ERROR,"OSInstallSystemLISR: Couldn't install system LISR on IRQ %d", ui32Irq));
++
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ psEnvData->ui32IRQ = ui32Irq;
++ psEnvData->pvISRCookie = pvSysData;
++ psEnvData->bLISRInstalled = IMG_TRUE;
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR OSUninstallSystemLISR(IMG_VOID *pvSysData)
++{
++ SYS_DATA *psSysData = (SYS_DATA*)pvSysData;
++ ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData;
++
++ if (!psEnvData->bLISRInstalled)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSUninstallSystemLISR: No LISR has been installed"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ PVR_TRACE(("Uninstalling system LISR on IRQ %d with cookie %x", psEnvData->ui32IRQ, psEnvData->pvISRCookie));
++
++ free_irq(psEnvData->ui32IRQ, psEnvData->pvISRCookie);
++
++ psEnvData->bLISRInstalled = IMG_FALSE;
++
++ return PVRSRV_OK;
++}
++
++#if defined(PVR_LINUX_MISR_USING_PRIVATE_WORKQUEUE)
++static void MISRWrapper(
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
++ void *data
++#else
++ struct work_struct *data
++#endif
++)
++{
++ ENV_DATA *psEnvData = container_of(data, ENV_DATA, sMISRWork);
++ SYS_DATA *psSysData = (SYS_DATA *)psEnvData->pvMISRData;
++
++ PVRSRVMISR(psSysData);
++}
++
++
++PVRSRV_ERROR OSInstallMISR(IMG_VOID *pvSysData)
++{
++ SYS_DATA *psSysData = (SYS_DATA*)pvSysData;
++ ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData;
++
++ if (psEnvData->bMISRInstalled)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSInstallMISR: An MISR has already been installed"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ PVR_TRACE(("Installing MISR with cookie %p", pvSysData));
++
++ psEnvData->psWorkQueue = create_singlethread_workqueue("pvr_workqueue");
++
++ if (psEnvData->psWorkQueue == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSInstallMISR: create_singlethreaded_workqueue failed"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ INIT_WORK(&psEnvData->sMISRWork, MISRWrapper
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
++ , (void *)&psEnvData->sMISRWork
++#endif
++ );
++
++ psEnvData->pvMISRData = pvSysData;
++ psEnvData->bMISRInstalled = IMG_TRUE;
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR OSUninstallMISR(IMG_VOID *pvSysData)
++{
++ SYS_DATA *psSysData = (SYS_DATA*)pvSysData;
++ ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData;
++
++ if (!psEnvData->bMISRInstalled)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSUninstallMISR: No MISR has been installed"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ PVR_TRACE(("Uninstalling MISR"));
++
++ destroy_workqueue(psEnvData->psWorkQueue);
++
++ psEnvData->bMISRInstalled = IMG_FALSE;
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR OSScheduleMISR(IMG_VOID *pvSysData)
++{
++ SYS_DATA *psSysData = (SYS_DATA*)pvSysData;
++ ENV_DATA *psEnvData = (ENV_DATA*)psSysData->pvEnvSpecificData;
++
++ if (psEnvData->bMISRInstalled)
++ {
++ queue_work(psEnvData->psWorkQueue, &psEnvData->sMISRWork);
++ }
++
++ return PVRSRV_OK;
++}
++#else
++#if defined(PVR_LINUX_MISR_USING_WORKQUEUE)
++static void MISRWrapper(
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
++ void *data
++#else
++ struct work_struct *data
++#endif
++)
++{
++ ENV_DATA *psEnvData = container_of(data, ENV_DATA, sMISRWork);
++ SYS_DATA *psSysData = (SYS_DATA *)psEnvData->pvMISRData;
++
++ PVRSRVMISR(psSysData);
++}
++
++
++PVRSRV_ERROR OSInstallMISR(IMG_VOID *pvSysData)
++{
++ SYS_DATA *psSysData = (SYS_DATA*)pvSysData;
++ ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData;
++
++ if (psEnvData->bMISRInstalled)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSInstallMISR: An MISR has already been installed"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ PVR_TRACE(("Installing MISR with cookie %x", pvSysData));
++
++ INIT_WORK(&psEnvData->sMISRWork, MISRWrapper
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
++ , (void *)&psEnvData->sMISRWork
++#endif
++ );
++
++ psEnvData->pvMISRData = pvSysData;
++ psEnvData->bMISRInstalled = IMG_TRUE;
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR OSUninstallMISR(IMG_VOID *pvSysData)
++{
++ SYS_DATA *psSysData = (SYS_DATA*)pvSysData;
++ ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData;
++
++ if (!psEnvData->bMISRInstalled)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSUninstallMISR: No MISR has been installed"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ PVR_TRACE(("Uninstalling MISR"));
++
++ flush_scheduled_work();
++
++ psEnvData->bMISRInstalled = IMG_FALSE;
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR OSScheduleMISR(IMG_VOID *pvSysData)
++{
++ SYS_DATA *psSysData = (SYS_DATA*)pvSysData;
++ ENV_DATA *psEnvData = (ENV_DATA*)psSysData->pvEnvSpecificData;
++
++ if (psEnvData->bMISRInstalled)
++ {
++ schedule_work(&psEnvData->sMISRWork);
++ }
++
++ return PVRSRV_OK;
++}
++
++#else
++
++
++static void MISRWrapper(unsigned long data)
++{
++ SYS_DATA *psSysData;
++
++ psSysData = (SYS_DATA *)data;
++
++ PVRSRVMISR(psSysData);
++}
++
++
++PVRSRV_ERROR OSInstallMISR(IMG_VOID *pvSysData)
++{
++ SYS_DATA *psSysData = (SYS_DATA*)pvSysData;
++ ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData;
++
++ if (psEnvData->bMISRInstalled)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSInstallMISR: An MISR has already been installed"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ PVR_TRACE(("Installing MISR with cookie %x", pvSysData));
++
++ tasklet_init(&psEnvData->sMISRTasklet, MISRWrapper, (unsigned long)pvSysData);
++
++ psEnvData->bMISRInstalled = IMG_TRUE;
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR OSUninstallMISR(IMG_VOID *pvSysData)
++{
++ SYS_DATA *psSysData = (SYS_DATA*)pvSysData;
++ ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData;
++
++ if (!psEnvData->bMISRInstalled)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSUninstallMISR: No MISR has been installed"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ PVR_TRACE(("Uninstalling MISR"));
++
++ tasklet_kill(&psEnvData->sMISRTasklet);
++
++ psEnvData->bMISRInstalled = IMG_FALSE;
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR OSScheduleMISR(IMG_VOID *pvSysData)
++{
++ SYS_DATA *psSysData = (SYS_DATA*)pvSysData;
++ ENV_DATA *psEnvData = (ENV_DATA*)psSysData->pvEnvSpecificData;
++
++ if (psEnvData->bMISRInstalled)
++ {
++ tasklet_schedule(&psEnvData->sMISRTasklet);
++ }
++
++ return PVRSRV_OK;
++}
++
++#endif
++#endif
++
++#endif
++
++IMG_VOID OSPanic(IMG_VOID)
++{
++ BUG();
++}
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22))
++#define OS_TAS(p) xchg((p), 1)
++#else
++#define OS_TAS(p) tas(p)
++#endif
++PVRSRV_ERROR OSLockResource ( PVRSRV_RESOURCE *psResource,
++ IMG_UINT32 ui32ID)
++
++{
++ PVRSRV_ERROR eError = PVRSRV_OK;
++
++ if(!OS_TAS(&psResource->ui32Lock))
++ psResource->ui32ID = ui32ID;
++ else
++ eError = PVRSRV_ERROR_GENERIC;
++
++ return eError;
++}
++
++
++PVRSRV_ERROR OSUnlockResource (PVRSRV_RESOURCE *psResource, IMG_UINT32 ui32ID)
++{
++ volatile IMG_UINT32 *pui32Access = (volatile IMG_UINT32 *)&psResource->ui32Lock;
++ PVRSRV_ERROR eError = PVRSRV_OK;
++
++ if(*pui32Access)
++ {
++ if(psResource->ui32ID == ui32ID)
++ {
++ psResource->ui32ID = 0;
++ *pui32Access = 0;
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR,"OSUnlockResource: Resource %p is not locked with expected value.", psResource));
++ PVR_DPF((PVR_DBG_MESSAGE,"Should be %x is actually %x", ui32ID, psResource->ui32ID));
++ eError = PVRSRV_ERROR_GENERIC;
++ }
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR,"OSUnlockResource: Resource %p is not locked", psResource));
++ eError = PVRSRV_ERROR_GENERIC;
++ }
++
++ return eError;
++}
++
++
++IMG_BOOL OSIsResourceLocked (PVRSRV_RESOURCE *psResource, IMG_UINT32 ui32ID)
++{
++ volatile IMG_UINT32 *pui32Access = (volatile IMG_UINT32 *)&psResource->ui32Lock;
++
++ return (*(volatile IMG_UINT32 *)pui32Access == 1) && (psResource->ui32ID == ui32ID)
++ ? IMG_TRUE
++ : IMG_FALSE;
++}
++
++
++IMG_CPU_PHYADDR OSMapLinToCPUPhys(IMG_VOID *pvLinAddr)
++{
++ IMG_CPU_PHYADDR CpuPAddr;
++
++ CpuPAddr.uiAddr = (IMG_UINTPTR_T)VMallocToPhys(pvLinAddr);
++
++ return CpuPAddr;
++}
++
++
++IMG_VOID *
++OSMapPhysToLin(IMG_CPU_PHYADDR BasePAddr,
++ IMG_UINT32 ui32Bytes,
++ IMG_UINT32 ui32MappingFlags,
++ IMG_HANDLE *phOSMemHandle)
++{
++ if(phOSMemHandle)
++ {
++ *phOSMemHandle = (IMG_HANDLE)0;
++ }
++
++ if(ui32MappingFlags & PVRSRV_HAP_KERNEL_ONLY)
++ {
++ IMG_VOID *pvIORemapCookie;
++ pvIORemapCookie = IORemapWrapper(BasePAddr, ui32Bytes, ui32MappingFlags);
++ if(pvIORemapCookie == IMG_NULL)
++ {
++ return NULL;
++ }
++ return pvIORemapCookie;
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSMapPhysToLin should only be used with PVRSRV_HAP_KERNEL_ONLY "
++ " (Use OSReservePhys otherwise)"));
++ return NULL;
++ }
++}
++
++IMG_BOOL
++OSUnMapPhysToLin(IMG_VOID *pvLinAddr, IMG_UINT32 ui32Bytes, IMG_UINT32 ui32MappingFlags, IMG_HANDLE hPageAlloc)
++{
++ PVR_TRACE(("%s: unmapping %d bytes from 0x%08x", __FUNCTION__, ui32Bytes, pvLinAddr));
++
++ PVR_UNREFERENCED_PARAMETER(hPageAlloc);
++ PVR_UNREFERENCED_PARAMETER(ui32Bytes);
++
++ if(ui32MappingFlags & PVRSRV_HAP_KERNEL_ONLY)
++ {
++ IOUnmapWrapper(pvLinAddr);
++ return IMG_TRUE;
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSUnMapPhysToLin should only be used with PVRSRV_HAP_KERNEL_ONLY "
++ " (Use OSUnReservePhys otherwise)"));
++ return IMG_FALSE;
++ }
++}
++
++static PVRSRV_ERROR
++RegisterExternalMem(IMG_SYS_PHYADDR *pBasePAddr,
++ IMG_VOID *pvCPUVAddr,
++ IMG_UINT32 ui32Bytes,
++ IMG_BOOL bPhysContig,
++ IMG_UINT32 ui32MappingFlags,
++ IMG_HANDLE *phOSMemHandle)
++{
++ LinuxMemArea *psLinuxMemArea;
++
++ switch(ui32MappingFlags & PVRSRV_HAP_MAPTYPE_MASK)
++ {
++ case PVRSRV_HAP_KERNEL_ONLY:
++ {
++ psLinuxMemArea = NewExternalKVLinuxMemArea(pBasePAddr, pvCPUVAddr, ui32Bytes, bPhysContig, ui32MappingFlags);
++
++ if(!psLinuxMemArea)
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++ break;
++ }
++ case PVRSRV_HAP_SINGLE_PROCESS:
++ {
++ psLinuxMemArea = NewExternalKVLinuxMemArea(pBasePAddr, pvCPUVAddr, ui32Bytes, bPhysContig, ui32MappingFlags);
++
++ if(!psLinuxMemArea)
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++ PVRMMapRegisterArea(psLinuxMemArea);
++ break;
++ }
++ case PVRSRV_HAP_MULTI_PROCESS:
++ {
++
++#if defined(VIVT_CACHE) || defined(__sh__)
++
++ ui32MappingFlags &= ~PVRSRV_HAP_CACHED;
++#endif
++ psLinuxMemArea = NewExternalKVLinuxMemArea(pBasePAddr, pvCPUVAddr, ui32Bytes, bPhysContig, ui32MappingFlags);
++
++ if(!psLinuxMemArea)
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++ PVRMMapRegisterArea(psLinuxMemArea);
++ break;
++ }
++ default:
++ PVR_DPF((PVR_DBG_ERROR,"OSRegisterMem : invalid flags 0x%x\n", ui32MappingFlags));
++ *phOSMemHandle = (IMG_HANDLE)0;
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ *phOSMemHandle = (IMG_HANDLE)psLinuxMemArea;
++
++ LinuxMemAreaRegister(psLinuxMemArea);
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR
++OSRegisterMem(IMG_CPU_PHYADDR BasePAddr,
++ IMG_VOID *pvCPUVAddr,
++ IMG_UINT32 ui32Bytes,
++ IMG_UINT32 ui32MappingFlags,
++ IMG_HANDLE *phOSMemHandle)
++{
++ IMG_SYS_PHYADDR SysPAddr = SysCpuPAddrToSysPAddr(BasePAddr);
++
++ return RegisterExternalMem(&SysPAddr, pvCPUVAddr, ui32Bytes, IMG_TRUE, ui32MappingFlags, phOSMemHandle);
++}
++
++
++PVRSRV_ERROR OSRegisterDiscontigMem(IMG_SYS_PHYADDR *pBasePAddr, IMG_VOID *pvCPUVAddr, IMG_UINT32 ui32Bytes, IMG_UINT32 ui32MappingFlags, IMG_HANDLE *phOSMemHandle)
++{
++ return RegisterExternalMem(pBasePAddr, pvCPUVAddr, ui32Bytes, IMG_FALSE, ui32MappingFlags, phOSMemHandle);
++}
++
++
++PVRSRV_ERROR
++OSUnRegisterMem (IMG_VOID *pvCpuVAddr,
++ IMG_UINT32 ui32Bytes,
++ IMG_UINT32 ui32MappingFlags,
++ IMG_HANDLE hOSMemHandle)
++{
++ LinuxMemArea *psLinuxMemArea = (LinuxMemArea *)hOSMemHandle;
++
++ PVR_UNREFERENCED_PARAMETER(pvCpuVAddr);
++ PVR_UNREFERENCED_PARAMETER(ui32Bytes);
++
++ switch(ui32MappingFlags & PVRSRV_HAP_MAPTYPE_MASK)
++ {
++ case PVRSRV_HAP_KERNEL_ONLY:
++ break;
++ case PVRSRV_HAP_SINGLE_PROCESS:
++ case PVRSRV_HAP_MULTI_PROCESS:
++ {
++ if(PVRMMapRemoveRegisteredArea(psLinuxMemArea) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s(%p, %d, 0x%08X, %p) FAILED!",
++ __FUNCTION__, pvCpuVAddr, ui32Bytes,
++ ui32MappingFlags, hOSMemHandle));
++ return PVRSRV_ERROR_GENERIC;
++ }
++ break;
++ }
++ default:
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSUnRegisterMem : invalid flags 0x%x", ui32MappingFlags));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++ }
++
++ LinuxMemAreaDeepFree(psLinuxMemArea);
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR OSUnRegisterDiscontigMem(IMG_VOID *pvCpuVAddr, IMG_UINT32 ui32Bytes, IMG_UINT32 ui32Flags, IMG_HANDLE hOSMemHandle)
++{
++ return OSUnRegisterMem(pvCpuVAddr, ui32Bytes, ui32Flags, hOSMemHandle);
++}
++
++PVRSRV_ERROR
++OSReservePhys(IMG_CPU_PHYADDR BasePAddr,
++ IMG_UINT32 ui32Bytes,
++ IMG_UINT32 ui32MappingFlags,
++ IMG_VOID **ppvCpuVAddr,
++ IMG_HANDLE *phOSMemHandle)
++{
++ LinuxMemArea *psLinuxMemArea;
++
++#if 0
++
++ if(ui32MappingFlags & PVRSRV_HAP_SINGLE_PROCESS)
++ {
++ ui32MappingFlags &= ~PVRSRV_HAP_SINGLE_PROCESS;
++ ui32MappingFlags |= PVRSRV_HAP_MULTI_PROCESS;
++ }
++#endif
++
++ switch(ui32MappingFlags & PVRSRV_HAP_MAPTYPE_MASK)
++ {
++ case PVRSRV_HAP_KERNEL_ONLY:
++ {
++
++ psLinuxMemArea = NewIORemapLinuxMemArea(BasePAddr, ui32Bytes, ui32MappingFlags);
++ if(!psLinuxMemArea)
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++ break;
++ }
++ case PVRSRV_HAP_SINGLE_PROCESS:
++ {
++
++ psLinuxMemArea = NewIOLinuxMemArea(BasePAddr, ui32Bytes, ui32MappingFlags);
++ if(!psLinuxMemArea)
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++ PVRMMapRegisterArea(psLinuxMemArea);
++ break;
++ }
++ case PVRSRV_HAP_MULTI_PROCESS:
++ {
++
++#if defined(VIVT_CACHE) || defined(__sh__)
++
++ ui32MappingFlags &= ~PVRSRV_HAP_CACHED;
++#endif
++ psLinuxMemArea = NewIORemapLinuxMemArea(BasePAddr, ui32Bytes, ui32MappingFlags);
++ if(!psLinuxMemArea)
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++ PVRMMapRegisterArea(psLinuxMemArea);
++ break;
++ }
++ default:
++ PVR_DPF((PVR_DBG_ERROR,"OSMapPhysToLin : invalid flags 0x%x\n", ui32MappingFlags));
++ *ppvCpuVAddr = NULL;
++ *phOSMemHandle = (IMG_HANDLE)0;
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ *phOSMemHandle = (IMG_HANDLE)psLinuxMemArea;
++ *ppvCpuVAddr = LinuxMemAreaToCpuVAddr(psLinuxMemArea);
++
++ LinuxMemAreaRegister(psLinuxMemArea);
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR
++OSUnReservePhys(IMG_VOID *pvCpuVAddr,
++ IMG_UINT32 ui32Bytes,
++ IMG_UINT32 ui32MappingFlags,
++ IMG_HANDLE hOSMemHandle)
++{
++ LinuxMemArea *psLinuxMemArea;
++
++ PVR_UNREFERENCED_PARAMETER(pvCpuVAddr);
++ PVR_UNREFERENCED_PARAMETER(ui32Bytes);
++
++ psLinuxMemArea = (LinuxMemArea *)hOSMemHandle;
++
++ switch(ui32MappingFlags & PVRSRV_HAP_MAPTYPE_MASK)
++ {
++ case PVRSRV_HAP_KERNEL_ONLY:
++ break;
++ case PVRSRV_HAP_SINGLE_PROCESS:
++ case PVRSRV_HAP_MULTI_PROCESS:
++ {
++ if(PVRMMapRemoveRegisteredArea(psLinuxMemArea) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s(%p, %d, 0x%08X, %p) FAILED!",
++ __FUNCTION__, pvCpuVAddr, ui32Bytes,
++ ui32MappingFlags, hOSMemHandle));
++ return PVRSRV_ERROR_GENERIC;
++ }
++ break;
++ }
++ default:
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSUnMapPhysToLin : invalid flags 0x%x", ui32MappingFlags));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++ }
++
++ LinuxMemAreaDeepFree(psLinuxMemArea);
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR OSBaseAllocContigMemory(IMG_UINT32 ui32Size, IMG_CPU_VIRTADDR *pvLinAddr, IMG_CPU_PHYADDR *psPhysAddr)
++{
++#if !defined(NO_HARDWARE)
++ PVR_UNREFERENCED_PARAMETER(ui32Size);
++ PVR_UNREFERENCED_PARAMETER(pvLinAddr);
++ PVR_UNREFERENCED_PARAMETER(psPhysAddr);
++ PVR_DPF((PVR_DBG_ERROR, "%s: Not available", __FUNCTION__));
++
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++#else
++ IMG_VOID *pvKernLinAddr;
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ pvKernLinAddr = _KMallocWrapper(ui32Size, __FILE__, __LINE__);
++#else
++ pvKernLinAddr = KMallocWrapper(ui32Size);
++#endif
++ if (!pvKernLinAddr)
++ {
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++ *pvLinAddr = pvKernLinAddr;
++
++ psPhysAddr->uiAddr = virt_to_phys(pvKernLinAddr);
++
++ return PVRSRV_OK;
++#endif
++}
++
++
++PVRSRV_ERROR OSBaseFreeContigMemory(IMG_UINT32 ui32Size, IMG_CPU_VIRTADDR pvLinAddr, IMG_CPU_PHYADDR psPhysAddr)
++{
++#if !defined(NO_HARDWARE)
++ PVR_UNREFERENCED_PARAMETER(ui32Size);
++ PVR_UNREFERENCED_PARAMETER(pvLinAddr);
++ PVR_UNREFERENCED_PARAMETER(psPhysAddr.uiAddr);
++
++ PVR_DPF((PVR_DBG_WARNING, "%s: Not available", __FUNCTION__));
++#else
++ PVR_UNREFERENCED_PARAMETER(ui32Size);
++ PVR_UNREFERENCED_PARAMETER(psPhysAddr.uiAddr);
++
++ KFreeWrapper(pvLinAddr);
++#endif
++ return PVRSRV_OK;
++}
++
++IMG_UINT32 OSReadHWReg(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset)
++{
++#if !defined(NO_HARDWARE)
++ return (IMG_UINT32) readl((IMG_PBYTE)pvLinRegBaseAddr+ui32Offset);
++#else
++ return *(IMG_UINT32 *)((IMG_PBYTE)pvLinRegBaseAddr+ui32Offset);
++#endif
++}
++
++IMG_VOID OSWriteHWReg(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT32 ui32Value)
++{
++#if !defined(NO_HARDWARE)
++ writel(ui32Value, (IMG_PBYTE)pvLinRegBaseAddr+ui32Offset);
++#else
++ *(IMG_UINT32 *)((IMG_PBYTE)pvLinRegBaseAddr+ui32Offset) = ui32Value;
++#endif
++}
++
++#if defined(CONFIG_PCI) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,14))
++
++PVRSRV_PCI_DEV_HANDLE OSPCISetDev(IMG_VOID *pvPCICookie, HOST_PCI_INIT_FLAGS eFlags)
++{
++ int err;
++ IMG_UINT32 i;
++ PVR_PCI_DEV *psPVRPCI;
++
++ PVR_TRACE(("OSPCISetDev"));
++
++ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(*psPVRPCI), (IMG_VOID **)&psPVRPCI, IMG_NULL,
++ "PCI Device") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSPCISetDev: Couldn't allocate PVR PCI structure"));
++ return IMG_NULL;
++ }
++
++ psPVRPCI->psPCIDev = (struct pci_dev *)pvPCICookie;
++ psPVRPCI->ePCIFlags = eFlags;
++
++ err = pci_enable_device(psPVRPCI->psPCIDev);
++ if (err != 0)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSPCISetDev: Couldn't enable device (%d)", err));
++ return IMG_NULL;
++ }
++
++ if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_BUS_MASTER)
++ {
++ pci_set_master(psPVRPCI->psPCIDev);
++ }
++
++ if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_MSI)
++ {
++#if defined(CONFIG_PCI_MSI)
++ if (psPVRPCI->psPCIDev->device == PSB_SYS_SGX_DEV_DEVICE_ID_1 ||
++ psPVRPCI->psPCIDev->device == PSB_SYS_SGX_DEV_DEVICE_ID_2) // Disable MSI for Menlow
++ psPVRPCI->ePCIFlags &= ~HOST_PCI_INIT_FLAG_MSI;
++ else if(!psPVRPCI->psPCIDev->msi_enabled)
++ {
++ err = pci_enable_msi(psPVRPCI->psPCIDev);
++ if (err != 0)
++ {
++ PVR_DPF((PVR_DBG_WARNING, "OSPCISetDev: Couldn't enable MSI (%d)", err));
++ psPVRPCI->ePCIFlags &= ~HOST_PCI_INIT_FLAG_MSI;
++ }
++ }
++#else
++ PVR_DPF((PVR_DBG_WARNING, "OSPCISetDev: MSI support not enabled in the kernel"));
++#endif
++ }
++
++
++ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
++ {
++ psPVRPCI->abPCIResourceInUse[i] = IMG_FALSE;
++ }
++
++ return (PVRSRV_PCI_DEV_HANDLE)psPVRPCI;
++}
++
++PVRSRV_PCI_DEV_HANDLE OSPCIAcquireDev(IMG_UINT16 ui16VendorID, IMG_UINT16 ui16DeviceID, HOST_PCI_INIT_FLAGS eFlags)
++{
++ struct pci_dev *psPCIDev;
++
++ psPCIDev = pci_get_device(ui16VendorID, ui16DeviceID, NULL);
++ if (psPCIDev == NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSPCIAcquireDev: Couldn't acquire device"));
++ return IMG_NULL;
++ }
++
++ return OSPCISetDev((IMG_VOID *)psPCIDev, eFlags);
++}
++
++PVRSRV_ERROR OSPCIIRQ(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 *pui32IRQ)
++{
++ PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI;
++
++ *pui32IRQ = psPVRPCI->psPCIDev->irq;
++
++ return PVRSRV_OK;
++}
++
++enum HOST_PCI_ADDR_RANGE_FUNC
++{
++ HOST_PCI_ADDR_RANGE_FUNC_LEN,
++ HOST_PCI_ADDR_RANGE_FUNC_START,
++ HOST_PCI_ADDR_RANGE_FUNC_END,
++ HOST_PCI_ADDR_RANGE_FUNC_REQUEST,
++ HOST_PCI_ADDR_RANGE_FUNC_RELEASE
++};
++
++static IMG_UINT32 OSPCIAddrRangeFunc(enum HOST_PCI_ADDR_RANGE_FUNC eFunc,
++ PVRSRV_PCI_DEV_HANDLE hPVRPCI,
++ IMG_UINT32 ui32Index)
++{
++ PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI;
++
++ if (ui32Index >= DEVICE_COUNT_RESOURCE)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSPCIAddrRangeFunc: Index out of range"));
++ return 0;
++
++ }
++
++ switch (eFunc)
++ {
++ case HOST_PCI_ADDR_RANGE_FUNC_LEN:
++ return pci_resource_len(psPVRPCI->psPCIDev, ui32Index);
++ case HOST_PCI_ADDR_RANGE_FUNC_START:
++ return pci_resource_start(psPVRPCI->psPCIDev, ui32Index);
++ case HOST_PCI_ADDR_RANGE_FUNC_END:
++ return pci_resource_end(psPVRPCI->psPCIDev, ui32Index);
++ case HOST_PCI_ADDR_RANGE_FUNC_REQUEST:
++ {
++ int err;
++
++ err = pci_request_region(psPVRPCI->psPCIDev, (IMG_INT)ui32Index, "PowerVR");
++ if (err != 0)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSPCIAddrRangeFunc: pci_request_region_failed (%d)", err));
++ return 0;
++ }
++ psPVRPCI->abPCIResourceInUse[ui32Index] = IMG_TRUE;
++ return 1;
++ }
++ case HOST_PCI_ADDR_RANGE_FUNC_RELEASE:
++ if (psPVRPCI->abPCIResourceInUse[ui32Index])
++ {
++ pci_release_region(psPVRPCI->psPCIDev, (IMG_INT)ui32Index);
++ psPVRPCI->abPCIResourceInUse[ui32Index] = IMG_FALSE;
++ }
++ return 1;
++ default:
++ PVR_DPF((PVR_DBG_ERROR, "OSPCIAddrRangeFunc: Unknown function"));
++ break;
++ }
++
++ return 0;
++}
++
++IMG_UINT32 OSPCIAddrRangeLen(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index)
++{
++ return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_LEN, hPVRPCI, ui32Index);
++}
++
++IMG_UINT32 OSPCIAddrRangeStart(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index)
++{
++ return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_START, hPVRPCI, ui32Index);
++}
++
++IMG_UINT32 OSPCIAddrRangeEnd(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index)
++{
++ return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_END, hPVRPCI, ui32Index);
++}
++
++PVRSRV_ERROR OSPCIRequestAddrRange(PVRSRV_PCI_DEV_HANDLE hPVRPCI,
++ IMG_UINT32 ui32Index)
++{
++ return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_REQUEST, hPVRPCI, ui32Index) == 0 ? PVRSRV_ERROR_GENERIC : PVRSRV_OK;
++}
++
++PVRSRV_ERROR OSPCIReleaseAddrRange(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index)
++{
++ return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_RELEASE, hPVRPCI, ui32Index) == 0 ? PVRSRV_ERROR_GENERIC : PVRSRV_OK;
++}
++
++PVRSRV_ERROR OSPCIReleaseDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI)
++{
++ PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI;
++ int i;
++
++ PVR_TRACE(("OSPCIReleaseDev"));
++
++
++ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
++ {
++ if (psPVRPCI->abPCIResourceInUse[i])
++ {
++ PVR_TRACE(("OSPCIReleaseDev: Releasing Address range %d", i));
++ pci_release_region(psPVRPCI->psPCIDev, i);
++ psPVRPCI->abPCIResourceInUse[i] = IMG_FALSE;
++ }
++ }
++
++#if defined(CONFIG_PCI_MSI)
++ if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_MSI)
++ {
++ pci_disable_msi(psPVRPCI->psPCIDev);
++ }
++#endif
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29))
++ if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_BUS_MASTER)
++ {
++ pci_clear_master(psPVRPCI->psPCIDev);
++ }
++#endif
++ pci_disable_device(psPVRPCI->psPCIDev);
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(*psPVRPCI), (IMG_VOID *)psPVRPCI, IMG_NULL);
++
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR OSPCISuspendDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI)
++{
++ PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI;
++ int i;
++ int err;
++
++ PVR_TRACE(("OSPCISuspendDev"));
++
++
++ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
++ {
++ if (psPVRPCI->abPCIResourceInUse[i])
++ {
++ pci_release_region(psPVRPCI->psPCIDev, i);
++ }
++ }
++
++ err = pci_save_state(psPVRPCI->psPCIDev);
++ if (err != 0)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSPCISuspendDev: pci_save_state_failed (%d)", err));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ pci_disable_device(psPVRPCI->psPCIDev);
++
++ err = pci_set_power_state(psPVRPCI->psPCIDev, PCI_D3hot);//pci_choose_state(psPVRPCI->psPCIDev, PMSG_SUSPEND));
++ switch(err)
++ {
++ case 0:
++ break;
++ case -EIO:
++ PVR_DPF((PVR_DBG_WARNING, "OSPCISuspendDev: device doesn't support PCI PM"));
++ break;
++ case -EINVAL:
++ PVR_DPF((PVR_DBG_ERROR, "OSPCISuspendDev: can't enter requested power state"));
++ break;
++ default:
++ PVR_DPF((PVR_DBG_ERROR, "OSPCISuspendDev: pci_set_power_state failed (%d)", err));
++ break;
++ }
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR OSPCIResumeDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI)
++{
++ PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI;
++ int err;
++ int i;
++
++ PVR_TRACE(("OSPCIResumeDev"));
++
++ err = pci_set_power_state(psPVRPCI->psPCIDev, PCI_D0);//pci_choose_state(psPVRPCI->psPCIDev, PMSG_ON));
++ switch(err)
++ {
++ case 0:
++ break;
++ case -EIO:
++ PVR_DPF((PVR_DBG_WARNING, "OSPCIResumeDev: device doesn't support PCI PM"));
++ break;
++ case -EINVAL:
++ PVR_DPF((PVR_DBG_ERROR, "OSPCIResumeDev: can't enter requested power state"));
++ return PVRSRV_ERROR_GENERIC;
++ default:
++ PVR_DPF((PVR_DBG_ERROR, "OSPCIResumeDev: pci_set_power_state failed (%d)", err));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ err = pci_restore_state(psPVRPCI->psPCIDev);
++ if (err != 0)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSPCIResumeDev: pci_restore_state failed (%d)", err));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ err = pci_enable_device(psPVRPCI->psPCIDev);
++ if (err != 0)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSPCIResumeDev: Couldn't enable device (%d)", err));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_BUS_MASTER)
++ pci_set_master(psPVRPCI->psPCIDev);
++
++
++ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
++ {
++ if (psPVRPCI->abPCIResourceInUse[i])
++ {
++ err = pci_request_region(psPVRPCI->psPCIDev, i, "PowerVR");
++ if (err != 0)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSPCIResumeDev: pci_request_region_failed (region %d, error %d)", i, err));
++ }
++ }
++
++ }
++
++ return PVRSRV_OK;
++}
++
++#endif
++
++#define OS_MAX_TIMERS 8
++
++typedef struct TIMER_CALLBACK_DATA_TAG
++{
++ IMG_BOOL bInUse;
++ PFN_TIMER_FUNC pfnTimerFunc;
++ IMG_VOID *pvData;
++ struct timer_list sTimer;
++ IMG_UINT32 ui32Delay;
++ IMG_BOOL bActive;
++#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES)
++ struct work_struct sWork;
++#endif
++}TIMER_CALLBACK_DATA;
++
++#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES)
++static struct workqueue_struct *psTimerWorkQueue;
++#endif
++
++static TIMER_CALLBACK_DATA sTimers[OS_MAX_TIMERS];
++
++#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES)
++DEFINE_MUTEX(sTimerStructLock);
++#else
++static spinlock_t sTimerStructLock = SPIN_LOCK_UNLOCKED;
++#endif
++
++static void OSTimerCallbackBody(TIMER_CALLBACK_DATA *psTimerCBData)
++{
++ if (!psTimerCBData->bActive)
++ return;
++
++
++ psTimerCBData->pfnTimerFunc(psTimerCBData->pvData);
++
++
++ mod_timer(&psTimerCBData->sTimer, psTimerCBData->ui32Delay + jiffies);
++}
++
++
++#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES)
++static void OSTimerWorkQueueCallBack(struct work_struct *psWork)
++{
++ TIMER_CALLBACK_DATA *psTimerCBData = container_of(psWork, TIMER_CALLBACK_DATA, sWork);
++
++ OSTimerCallbackBody(psTimerCBData);
++}
++#endif
++
++static IMG_VOID OSTimerCallbackWrapper(IMG_UINT32 ui32Data)
++{
++ TIMER_CALLBACK_DATA *psTimerCBData = (TIMER_CALLBACK_DATA*)ui32Data;
++
++#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES)
++ int res;
++
++ res = queue_work(psTimerWorkQueue, &psTimerCBData->sWork);
++ if (res == 0)
++ {
++ PVR_DPF((PVR_DBG_WARNING, "OSTimerCallbackWrapper: work already queued"));
++ }
++#else
++ OSTimerCallbackBody(psTimerCBData);
++#endif
++}
++
++
++IMG_HANDLE OSAddTimer(PFN_TIMER_FUNC pfnTimerFunc, IMG_VOID *pvData, IMG_UINT32 ui32MsTimeout)
++{
++ TIMER_CALLBACK_DATA *psTimerCBData;
++ IMG_UINT32 ui32i;
++#if !defined(PVR_LINUX_TIMERS_USING_WORKQUEUES)
++ unsigned long ulLockFlags;
++#endif
++
++
++ if(!pfnTimerFunc)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSAddTimer: passed invalid callback"));
++ return IMG_NULL;
++ }
++
++
++#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES)
++ mutex_lock(&sTimerStructLock);
++#else
++ spin_lock_irqsave(&sTimerStructLock, ulLockFlags);
++#endif
++ for (ui32i = 0; ui32i < OS_MAX_TIMERS; ui32i++)
++ {
++ psTimerCBData = &sTimers[ui32i];
++ if (!psTimerCBData->bInUse)
++ {
++ psTimerCBData->bInUse = IMG_TRUE;
++ break;
++ }
++ }
++#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES)
++ mutex_unlock(&sTimerStructLock);
++#else
++ spin_unlock_irqrestore(&sTimerStructLock, ulLockFlags);
++#endif
++ if (ui32i >= OS_MAX_TIMERS)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSAddTimer: all timers are in use"));
++ return IMG_NULL;
++ }
++
++ psTimerCBData->pfnTimerFunc = pfnTimerFunc;
++ psTimerCBData->pvData = pvData;
++ psTimerCBData->bActive = IMG_FALSE;
++
++
++
++
++ psTimerCBData->ui32Delay = ((HZ * ui32MsTimeout) < 1000)
++ ? 1
++ : ((HZ * ui32MsTimeout) / 1000);
++
++ init_timer(&psTimerCBData->sTimer);
++
++
++ psTimerCBData->sTimer.function = (IMG_VOID *)OSTimerCallbackWrapper;
++ psTimerCBData->sTimer.data = (IMG_UINT32)psTimerCBData;
++ psTimerCBData->sTimer.expires = psTimerCBData->ui32Delay + jiffies;
++
++ return (IMG_HANDLE)(ui32i + 1);
++}
++
++
++static inline TIMER_CALLBACK_DATA *GetTimerStructure(IMG_HANDLE hTimer)
++{
++ IMG_UINT32 ui32i = ((IMG_UINT32)hTimer) - 1;
++
++ PVR_ASSERT(ui32i < OS_MAX_TIMERS);
++
++ return &sTimers[ui32i];
++}
++
++PVRSRV_ERROR OSRemoveTimer (IMG_HANDLE hTimer)
++{
++ TIMER_CALLBACK_DATA *psTimerCBData = GetTimerStructure(hTimer);
++
++ PVR_ASSERT(psTimerCBData->bInUse);
++ PVR_ASSERT(!psTimerCBData->bActive);
++
++
++ psTimerCBData->bInUse = IMG_FALSE;
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR OSEnableTimer (IMG_HANDLE hTimer)
++{
++ TIMER_CALLBACK_DATA *psTimerCBData = GetTimerStructure(hTimer);
++
++ PVR_ASSERT(psTimerCBData->bInUse);
++ PVR_ASSERT(!psTimerCBData->bActive);
++
++
++ psTimerCBData->bActive = IMG_TRUE;
++
++
++ psTimerCBData->sTimer.expires = psTimerCBData->ui32Delay + jiffies;
++
++
++ add_timer(&psTimerCBData->sTimer);
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR OSDisableTimer (IMG_HANDLE hTimer)
++{
++ TIMER_CALLBACK_DATA *psTimerCBData = GetTimerStructure(hTimer);
++
++ PVR_ASSERT(psTimerCBData->bInUse);
++ PVR_ASSERT(psTimerCBData->bActive);
++
++
++ psTimerCBData->bActive = IMG_FALSE;
++ smp_mb();
++
++#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES)
++ flush_workqueue(psTimerWorkQueue);
++#endif
++
++
++ del_timer_sync(&psTimerCBData->sTimer);
++
++#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES)
++
++ flush_workqueue(psTimerWorkQueue);
++#endif
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR OSEventObjectCreate(const IMG_CHAR *pszName, PVRSRV_EVENTOBJECT *psEventObject)
++{
++
++ PVRSRV_ERROR eError = PVRSRV_OK;
++
++ if(psEventObject)
++ {
++ if(pszName)
++ {
++
++ strncpy(psEventObject->szName, pszName, EVENTOBJNAME_MAXLENGTH);
++ }
++ else
++ {
++
++ static IMG_UINT16 ui16NameIndex = 0;
++ snprintf(psEventObject->szName, EVENTOBJNAME_MAXLENGTH, "PVRSRV_EVENTOBJECT_%d", ui16NameIndex++);
++ }
++
++ if(LinuxEventObjectListCreate(&psEventObject->hOSEventKM) != PVRSRV_OK)
++ {
++ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSEventObjectCreate: psEventObject is not a valid pointer"));
++ eError = PVRSRV_ERROR_GENERIC;
++ }
++
++ return eError;
++
++}
++
++
++PVRSRV_ERROR OSEventObjectDestroy(PVRSRV_EVENTOBJECT *psEventObject)
++{
++ PVRSRV_ERROR eError = PVRSRV_OK;
++
++ if(psEventObject)
++ {
++ if(psEventObject->hOSEventKM)
++ {
++ LinuxEventObjectListDestroy(psEventObject->hOSEventKM);
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSEventObjectDestroy: hOSEventKM is not a valid pointer"));
++ eError = PVRSRV_ERROR_INVALID_PARAMS;
++ }
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSEventObjectDestroy: psEventObject is not a valid pointer"));
++ eError = PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ return eError;
++}
++
++PVRSRV_ERROR OSEventObjectWait(IMG_HANDLE hOSEventKM)
++{
++ PVRSRV_ERROR eError;
++
++ if(hOSEventKM)
++ {
++ eError = LinuxEventObjectWait(hOSEventKM, EVENT_OBJECT_TIMEOUT_MS);
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSEventObjectWait: hOSEventKM is not a valid handle"));
++ eError = PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ return eError;
++}
++
++PVRSRV_ERROR OSEventObjectOpen(PVRSRV_EVENTOBJECT *psEventObject,
++ IMG_HANDLE *phOSEvent)
++{
++ PVRSRV_ERROR eError = PVRSRV_OK;
++
++ if(psEventObject)
++ {
++ if(LinuxEventObjectAdd(psEventObject->hOSEventKM, phOSEvent) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectAdd: failed"));
++ eError = PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSEventObjectCreate: psEventObject is not a valid pointer"));
++ eError = PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ return eError;
++}
++
++PVRSRV_ERROR OSEventObjectClose(PVRSRV_EVENTOBJECT *psEventObject,
++ IMG_HANDLE hOSEventKM)
++{
++ PVRSRV_ERROR eError = PVRSRV_OK;
++
++ if(psEventObject)
++ {
++ if(LinuxEventObjectDelete(psEventObject->hOSEventKM, hOSEventKM) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectDelete: failed"));
++ eError = PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSEventObjectDestroy: psEventObject is not a valid pointer"));
++ eError = PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ return eError;
++
++}
++
++PVRSRV_ERROR OSEventObjectSignal(IMG_HANDLE hOSEventKM)
++{
++ PVRSRV_ERROR eError;
++
++ if(hOSEventKM)
++ {
++ eError = LinuxEventObjectSignal(hOSEventKM);
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSEventObjectSignal: hOSEventKM is not a valid handle"));
++ eError = PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ return eError;
++}
++
++IMG_BOOL OSProcHasPrivSrvInit(IMG_VOID)
++{
++ return (capable(CAP_SYS_MODULE) != 0) ? IMG_TRUE : IMG_FALSE;
++}
++
++PVRSRV_ERROR OSCopyToUser(IMG_PVOID pvProcess,
++ IMG_VOID *pvDest,
++ IMG_VOID *pvSrc,
++ IMG_UINT32 ui32Bytes)
++{
++ PVR_UNREFERENCED_PARAMETER(pvProcess);
++
++ if(copy_to_user(pvDest, pvSrc, ui32Bytes)==0)
++ return PVRSRV_OK;
++ else
++ return PVRSRV_ERROR_GENERIC;
++}
++
++PVRSRV_ERROR OSCopyFromUser( IMG_PVOID pvProcess,
++ IMG_VOID *pvDest,
++ IMG_VOID *pvSrc,
++ IMG_UINT32 ui32Bytes)
++{
++ PVR_UNREFERENCED_PARAMETER(pvProcess);
++
++ if(copy_from_user(pvDest, pvSrc, ui32Bytes)==0)
++ return PVRSRV_OK;
++ else
++ return PVRSRV_ERROR_GENERIC;
++}
++
++IMG_BOOL OSAccessOK(IMG_VERIFY_TEST eVerification, IMG_VOID *pvUserPtr, IMG_UINT32 ui32Bytes)
++{
++ IMG_INT linuxType;
++
++ if (eVerification == PVR_VERIFY_READ)
++ {
++ linuxType = VERIFY_READ;
++ }
++ else
++ {
++ PVR_ASSERT(eVerification == PVR_VERIFY_WRITE);
++ linuxType = VERIFY_WRITE;
++ }
++
++ return access_ok(linuxType, pvUserPtr, ui32Bytes);
++}
++
++typedef enum _eWrapMemType_
++{
++ WRAP_TYPE_CLEANUP,
++ WRAP_TYPE_GET_USER_PAGES,
++ WRAP_TYPE_FIND_VMA_PAGES,
++ WRAP_TYPE_FIND_VMA_PFN
++} eWrapMemType;
++
++typedef struct _sWrapMemInfo_
++{
++ eWrapMemType eType;
++ IMG_INT iNumPages;
++ struct page **ppsPages;
++ IMG_SYS_PHYADDR *psPhysAddr;
++ IMG_INT iPageOffset;
++ IMG_INT iContiguous;
++#if defined(DEBUG)
++ IMG_UINT32 ulStartAddr;
++ IMG_UINT32 ulBeyondEndAddr;
++ struct vm_area_struct *psVMArea;
++#endif
++ IMG_BOOL bWrapWorkaround;
++} sWrapMemInfo;
++
++static IMG_VOID CheckPagesContiguous(sWrapMemInfo *psInfo)
++{
++ IMG_INT i;
++ IMG_UINT32 ui32AddrChk;
++
++ BUG_ON(psInfo == IMG_NULL);
++
++ psInfo->iContiguous = 1;
++
++ for (i = 0, ui32AddrChk = psInfo->psPhysAddr[0].uiAddr;
++ i < psInfo->iNumPages;
++ i++, ui32AddrChk += PAGE_SIZE)
++ {
++ if (psInfo->psPhysAddr[i].uiAddr != ui32AddrChk)
++ {
++ psInfo->iContiguous = 0;
++ break;
++ }
++ }
++}
++
++static struct page *CPUVAddrToPage(struct vm_area_struct *psVMArea, IMG_UINT32 ulCPUVAddr)
++{
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,10))
++ pgd_t *psPGD;
++ pud_t *psPUD;
++ pmd_t *psPMD;
++ pte_t *psPTE;
++ struct mm_struct *psMM = psVMArea->vm_mm;
++ IMG_UINT32 ulPFN;
++ spinlock_t *psPTLock;
++ struct page *psPage;
++
++ psPGD = pgd_offset(psMM, ulCPUVAddr);
++ if (pgd_none(*psPGD) || pgd_bad(*psPGD))
++ return NULL;
++
++ psPUD = pud_offset(psPGD, ulCPUVAddr);
++ if (pud_none(*psPUD) || pud_bad(*psPUD))
++ return NULL;
++
++ psPMD = pmd_offset(psPUD, ulCPUVAddr);
++ if (pmd_none(*psPMD) || pmd_bad(*psPMD))
++ return NULL;
++
++ psPage = NULL;
++
++ psPTE = (pte_t *)pte_offset_map_lock(psMM, psPMD, ulCPUVAddr, &psPTLock);
++ if ((pte_none(*psPTE) != 0) || (pte_present(*psPTE) == 0) || (pte_write(*psPTE) == 0))
++ goto exit_unlock;
++
++ ulPFN = pte_pfn(*psPTE);
++ if (!pfn_valid(ulPFN))
++ goto exit_unlock;
++
++ psPage = pfn_to_page(ulPFN);
++
++ get_page(psPage);
++
++exit_unlock:
++ pte_unmap_unlock(psPTE, psPTLock);
++
++ return psPage;
++#else
++ return NULL;
++#endif
++}
++PVRSRV_ERROR OSReleasePhysPageAddr(IMG_HANDLE hOSWrapMem)
++{
++ sWrapMemInfo *psInfo = (sWrapMemInfo *)hOSWrapMem;
++ IMG_INT i;
++
++ BUG_ON(psInfo == IMG_NULL);
++
++ switch (psInfo->eType)
++ {
++ case WRAP_TYPE_CLEANUP:
++ break;
++ case WRAP_TYPE_FIND_VMA_PFN:
++ break;
++ case WRAP_TYPE_GET_USER_PAGES:
++ {
++ for (i = 0; i < psInfo->iNumPages; i++)
++ {
++ struct page *psPage = psInfo->ppsPages[i];
++
++
++ if (!PageReserved(psPage));
++ {
++ SetPageDirty(psPage);
++ }
++ page_cache_release(psPage);
++ }
++ break;
++ }
++ case WRAP_TYPE_FIND_VMA_PAGES:
++ {
++ for (i = 0; i < psInfo->iNumPages; i++)
++ {
++ if(psInfo->bWrapWorkaround)
++ put_page(psInfo->ppsPages[i]);
++ else
++ put_page_testzero(psInfo->ppsPages[i]);
++ }
++ break;
++ }
++ default:
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSReleasePhysPageAddr: Unknown wrap type (%d)", psInfo->eType));
++ return PVRSRV_ERROR_GENERIC;
++ }
++ }
++
++ if (psInfo->ppsPages != IMG_NULL)
++ {
++ kfree(psInfo->ppsPages);
++ }
++
++ if (psInfo->psPhysAddr != IMG_NULL)
++ {
++ kfree(psInfo->psPhysAddr);
++ }
++
++ kfree(psInfo);
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR OSAcquirePhysPageAddr(IMG_VOID* pvCPUVAddr,
++ IMG_UINT32 ui32Bytes,
++ IMG_SYS_PHYADDR *psSysPAddr,
++ IMG_HANDLE *phOSWrapMem,
++ IMG_BOOL bWrapWorkaround)
++{
++ IMG_UINT32 ulStartAddrOrig = (IMG_UINT32) pvCPUVAddr;
++ IMG_UINT32 ulAddrRangeOrig = (IMG_UINT32) ui32Bytes;
++ IMG_UINT32 ulBeyondEndAddrOrig = ulStartAddrOrig + ulAddrRangeOrig;
++ IMG_UINT32 ulStartAddr;
++ IMG_UINT32 ulAddrRange;
++ IMG_UINT32 ulBeyondEndAddr;
++ IMG_UINT32 ulAddr;
++ IMG_INT iNumPagesMapped;
++ IMG_INT i;
++ struct vm_area_struct *psVMArea;
++ sWrapMemInfo *psInfo;
++
++
++ ulStartAddr = ulStartAddrOrig & PAGE_MASK;
++ ulBeyondEndAddr = PAGE_ALIGN(ulBeyondEndAddrOrig);
++ ulAddrRange = ulBeyondEndAddr - ulStartAddr;
++
++
++ psInfo = kmalloc(sizeof(*psInfo), GFP_KERNEL);
++ if (psInfo == NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSAcquirePhysPageAddr: Couldn't allocate information structure"));
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++ memset(psInfo, 0, sizeof(*psInfo));
++ psInfo->bWrapWorkaround = bWrapWorkaround;
++
++#if defined(DEBUG)
++ psInfo->ulStartAddr = ulStartAddrOrig;
++ psInfo->ulBeyondEndAddr = ulBeyondEndAddrOrig;
++#endif
++
++ psInfo->iNumPages = (IMG_INT)(ulAddrRange >> PAGE_SHIFT);
++ psInfo->iPageOffset = (IMG_INT)(ulStartAddrOrig & ~PAGE_MASK);
++
++
++ psInfo->psPhysAddr = kmalloc((size_t)psInfo->iNumPages * sizeof(*psInfo->psPhysAddr), GFP_KERNEL);
++ if (psInfo->psPhysAddr == NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSAcquirePhysPageAddr: Couldn't allocate page array"));
++ goto error_free;
++ }
++
++
++ psInfo->ppsPages = kmalloc((size_t)psInfo->iNumPages * sizeof(*psInfo->ppsPages), GFP_KERNEL);
++ if (psInfo->ppsPages == NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSAcquirePhysPageAddr: Couldn't allocate page array"));
++ goto error_free;
++ }
++
++
++ down_read(&current->mm->mmap_sem);
++ iNumPagesMapped = get_user_pages(current, current->mm, ulStartAddr, psInfo->iNumPages, 1, 0, psInfo->ppsPages, NULL);
++ up_read(&current->mm->mmap_sem);
++
++ if (iNumPagesMapped >= 0)
++ {
++
++ if (iNumPagesMapped != psInfo->iNumPages)
++ {
++ PVR_TRACE(("OSAcquirePhysPageAddr: Couldn't map all the pages needed (wanted: %d, got %d)", psInfo->iNumPages, iNumPagesMapped));
++
++
++ for (i = 0; i < iNumPagesMapped; i++)
++ {
++ page_cache_release(psInfo->ppsPages[i]);
++
++ }
++ goto error_free;
++ }
++
++
++ for (i = 0; i < psInfo->iNumPages; i++)
++ {
++ IMG_CPU_PHYADDR CPUPhysAddr;
++
++ CPUPhysAddr.uiAddr = page_to_pfn(psInfo->ppsPages[i]) << PAGE_SHIFT;
++ psInfo->psPhysAddr[i] = SysCpuPAddrToSysPAddr(CPUPhysAddr);
++ psSysPAddr[i] = psInfo->psPhysAddr[i];
++
++ }
++
++ psInfo->eType = WRAP_TYPE_GET_USER_PAGES;
++
++ goto exit_check;
++ }
++
++ PVR_DPF((PVR_DBG_MESSAGE, "OSAcquirePhysPageAddr: get_user_pages failed (%d), trying something else", iNumPagesMapped));
++
++
++ down_read(&current->mm->mmap_sem);
++
++ psVMArea = find_vma(current->mm, ulStartAddrOrig);
++ if (psVMArea == NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSAcquirePhysPageAddr: Couldn't find memory region containing start address %lx", ulStartAddrOrig));
++
++ goto error_release_mmap_sem;
++ }
++#if defined(DEBUG)
++ psInfo->psVMArea = psVMArea;
++#endif
++
++
++ if (ulStartAddrOrig < psVMArea->vm_start)
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSAcquirePhysPageAddr: Start address %lx is outside of the region returned by find_vma", ulStartAddrOrig));
++ goto error_release_mmap_sem;
++ }
++
++
++ if (ulBeyondEndAddrOrig > psVMArea->vm_end)
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSAcquirePhysPageAddr: End address %lx is outside of the region returned by find_vma", ulBeyondEndAddrOrig));
++ goto error_release_mmap_sem;
++ }
++
++
++ if ((psVMArea->vm_flags & (VM_IO | VM_RESERVED)) != (VM_IO | VM_RESERVED))
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSAcquirePhysPageAddr: Memory region does not represent memory mapped I/O (VMA flags: 0x%lx)", psVMArea->vm_flags));
++ goto error_release_mmap_sem;
++ }
++
++
++ if ((psVMArea->vm_flags & (VM_READ | VM_WRITE)) != (VM_READ | VM_WRITE))
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSAcquirePhysPageAddr: No read/write access to memory region (VMA flags: 0x%lx)", psVMArea->vm_flags));
++ goto error_release_mmap_sem;
++ }
++
++
++ for (ulAddr = ulStartAddrOrig, i = 0; ulAddr < ulBeyondEndAddrOrig; ulAddr += PAGE_SIZE, i++)
++ {
++ struct page *psPage;
++
++ BUG_ON(i >= psInfo->iNumPages);
++
++ psPage = CPUVAddrToPage(psVMArea, ulAddr);
++ if (psPage == NULL)
++ {
++ IMG_INT j;
++
++ PVR_TRACE(("OSAcquirePhysPageAddr: Couldn't lookup page structure for address 0x%lx, trying something else", ulAddr));
++
++
++ for (j = 0; j < i; j++)
++ {
++ if(psInfo->bWrapWorkaround)
++ put_page(psInfo->ppsPages[j]);
++ else
++ put_page_testzero(psInfo->ppsPages[j]);
++ }
++ break;
++ }
++
++ psInfo->ppsPages[i] = psPage;
++ }
++
++ BUG_ON(i > psInfo->iNumPages);
++ if (i == psInfo->iNumPages)
++ {
++
++ for (i = 0; i < psInfo->iNumPages; i++)
++ {
++ struct page *psPage = psInfo->ppsPages[i];
++ IMG_CPU_PHYADDR CPUPhysAddr;
++
++
++ CPUPhysAddr.uiAddr = page_to_pfn(psPage) << PAGE_SHIFT;
++
++ psInfo->psPhysAddr[i] = SysCpuPAddrToSysPAddr(CPUPhysAddr);
++ psSysPAddr[i] = psInfo->psPhysAddr[i];
++ }
++
++ psInfo->eType = WRAP_TYPE_FIND_VMA_PAGES;
++ }
++ else
++ {
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,10)) && defined(PVR_SECURE_HANDLES)
++
++
++
++ if ((psVMArea->vm_flags & VM_PFNMAP) == 0)
++ {
++ PVR_DPF((PVR_DBG_WARNING,
++ "OSAcquirePhysPageAddr: Region isn't a raw PFN mapping. Giving up."));
++ goto error_release_mmap_sem;
++ }
++
++ for (ulAddr = ulStartAddrOrig, i = 0; ulAddr < ulBeyondEndAddrOrig; ulAddr += PAGE_SIZE, i++)
++ {
++ IMG_CPU_PHYADDR CPUPhysAddr;
++
++ CPUPhysAddr.uiAddr = ((ulAddr - psVMArea->vm_start) + (psVMArea->vm_pgoff << PAGE_SHIFT)) & PAGE_MASK;
++
++ psInfo->psPhysAddr[i] = SysCpuPAddrToSysPAddr(CPUPhysAddr);
++ psSysPAddr[i] = psInfo->psPhysAddr[i];
++ }
++ BUG_ON(i != psInfo->iNumPages);
++
++ psInfo->eType = WRAP_TYPE_FIND_VMA_PFN;
++
++
++ PVR_DPF((PVR_DBG_WARNING,
++ "OSAcquirePhysPageAddr: Region can't be locked down"));
++#else
++ PVR_DPF((PVR_DBG_WARNING,
++ "OSAcquirePhysPageAddr: Raw PFN mappings not supported. Giving up."));
++ goto error_release_mmap_sem;
++#endif
++ }
++
++ up_read(&current->mm->mmap_sem);
++
++exit_check:
++ CheckPagesContiguous(psInfo);
++
++
++
++ *phOSWrapMem = (IMG_HANDLE)psInfo;
++
++ return PVRSRV_OK;
++
++error_release_mmap_sem:
++ up_read(&current->mm->mmap_sem);
++error_free:
++ psInfo->eType = WRAP_TYPE_CLEANUP;
++ OSReleasePhysPageAddr((IMG_HANDLE)psInfo);
++ return PVRSRV_ERROR_GENERIC;
++}
++
++PVRSRV_ERROR PVROSFuncInit(IMG_VOID)
++{
++#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES)
++ {
++ IMG_UINT32 ui32i;
++
++ psTimerWorkQueue = create_workqueue("pvr_timer");
++ if (psTimerWorkQueue == NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: couldn't create timer workqueue", __FUNCTION__));
++ return PVRSRV_ERROR_GENERIC;
++
++ }
++
++ for (ui32i = 0; ui32i < OS_MAX_TIMERS; ui32i++)
++ {
++ TIMER_CALLBACK_DATA *psTimerCBData = &sTimers[ui32i];
++
++ INIT_WORK(&psTimerCBData->sWork, OSTimerWorkQueueCallBack);
++ }
++ }
++#endif
++ return PVRSRV_OK;
++}
++
++IMG_VOID PVROSFuncDeInit(IMG_VOID)
++{
++#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES)
++ if (psTimerWorkQueue != NULL)
++ {
++ destroy_workqueue(psTimerWorkQueue);
++ }
++#endif
++}
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/osperproc.c b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/osperproc.c
+new file mode 100644
+index 0000000..011c8f3
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/osperproc.c
+@@ -0,0 +1,113 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "services_headers.h"
++#include "osperproc.h"
++
++#include "env_perproc.h"
++#include "proc.h"
++
++extern IMG_UINT32 gui32ReleasePID;
++
++PVRSRV_ERROR OSPerProcessPrivateDataInit(IMG_HANDLE *phOsPrivateData)
++{
++ PVRSRV_ERROR eError;
++ IMG_HANDLE hBlockAlloc;
++ PVRSRV_ENV_PER_PROCESS_DATA *psEnvPerProc;
++
++ eError = OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(PVRSRV_ENV_PER_PROCESS_DATA),
++ phOsPrivateData,
++ &hBlockAlloc,
++ "Environment per Process Data");
++
++ if (eError != PVRSRV_OK)
++ {
++ *phOsPrivateData = IMG_NULL;
++
++ PVR_DPF((PVR_DBG_ERROR, "%s: OSAllocMem failed (%d)", __FUNCTION__, eError));
++ return eError;
++ }
++
++ psEnvPerProc = (PVRSRV_ENV_PER_PROCESS_DATA *)*phOsPrivateData;
++ OSMemSet(psEnvPerProc, 0, sizeof(*psEnvPerProc));
++
++ psEnvPerProc->hBlockAlloc = hBlockAlloc;
++
++
++ LinuxMMapPerProcessConnect(psEnvPerProc);
++
++#if defined(SUPPORT_DRI_DRM) && defined(PVR_SECURE_DRM_AUTH_EXPORT)
++
++ INIT_LIST_HEAD(&psEnvPerProc->sDRMAuthListHead);
++#endif
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR OSPerProcessPrivateDataDeInit(IMG_HANDLE hOsPrivateData)
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_ENV_PER_PROCESS_DATA *psEnvPerProc;
++
++ if (hOsPrivateData == IMG_NULL)
++ {
++ return PVRSRV_OK;
++ }
++
++ psEnvPerProc = (PVRSRV_ENV_PER_PROCESS_DATA *)hOsPrivateData;
++
++
++ LinuxMMapPerProcessDisconnect(psEnvPerProc);
++
++
++ RemovePerProcessProcDir(psEnvPerProc);
++
++ eError = OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(PVRSRV_ENV_PER_PROCESS_DATA),
++ hOsPrivateData,
++ psEnvPerProc->hBlockAlloc);
++
++
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: OSFreeMem failed (%d)", __FUNCTION__, eError));
++ }
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR OSPerProcessSetHandleOptions(PVRSRV_HANDLE_BASE *psHandleBase)
++{
++ return LinuxMMapPerProcessHandleOptions(psHandleBase);
++}
++
++IMG_HANDLE LinuxTerminatingProcessPrivateData(IMG_VOID)
++{
++ if(!gui32ReleasePID)
++ return NULL;
++ return PVRSRVPerProcessPrivateData(gui32ReleasePID);
++}
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/pdump.c b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/pdump.c
+new file mode 100644
+index 0000000..11d69d1
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/pdump.c
+@@ -0,0 +1,662 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if defined (SUPPORT_SGX)
++#if defined (PDUMP)
++
++#include <asm/atomic.h>
++#include <stdarg.h>
++#include "sgxdefs.h"
++#include "services_headers.h"
++
++#include "pvrversion.h"
++#include "pvr_debug.h"
++
++#include "dbgdrvif.h"
++#include "sgxmmu.h"
++#include "mm.h"
++#include "pdump_km.h"
++
++#include <linux/tty.h>
++
++static IMG_BOOL PDumpWriteString2 (IMG_CHAR * pszString, IMG_UINT32 ui32Flags);
++static IMG_BOOL PDumpWriteILock (PDBG_STREAM psStream, IMG_UINT8 *pui8Data, IMG_UINT32 ui32Count, IMG_UINT32 ui32Flags);
++static IMG_VOID DbgSetFrame (PDBG_STREAM psStream, IMG_UINT32 ui32Frame);
++static IMG_UINT32 DbgGetFrame (PDBG_STREAM psStream);
++static IMG_VOID DbgSetMarker (PDBG_STREAM psStream, IMG_UINT32 ui32Marker);
++static IMG_UINT32 DbgWrite (PDBG_STREAM psStream, IMG_UINT8 *pui8Data, IMG_UINT32 ui32BCount, IMG_UINT32 ui32Flags);
++
++#define PDUMP_DATAMASTER_PIXEL (1)
++#define PDUMP_DATAMASTER_EDM (3)
++
++#define MIN(a,b) (a > b ? b : a)
++
++#define MAX_FILE_SIZE 0x40000000
++
++static atomic_t gsPDumpSuspended = ATOMIC_INIT(0);
++
++static PDBGKM_SERVICE_TABLE gpfnDbgDrv = IMG_NULL;
++
++
++
++IMG_CHAR *pszStreamName[PDUMP_NUM_STREAMS] = { "ParamStream2",
++ "ScriptStream2",
++ "DriverInfoStream"};
++typedef struct PDBG_PDUMP_STATE_TAG
++{
++ PDBG_STREAM psStream[PDUMP_NUM_STREAMS];
++ IMG_UINT32 ui32ParamFileNum;
++
++ IMG_CHAR *pszMsg;
++ IMG_CHAR *pszScript;
++ IMG_CHAR *pszFile;
++
++} PDBG_PDUMP_STATE;
++
++static PDBG_PDUMP_STATE gsDBGPdumpState = {{IMG_NULL}, 0, IMG_NULL, IMG_NULL, IMG_NULL};
++
++#define SZ_MSG_SIZE_MAX PVRSRV_PDUMP_MAX_COMMENT_SIZE-1
++#define SZ_SCRIPT_SIZE_MAX PVRSRV_PDUMP_MAX_COMMENT_SIZE-1
++#define SZ_FILENAME_SIZE_MAX PVRSRV_PDUMP_MAX_COMMENT_SIZE-1
++
++
++
++
++IMG_VOID DBGDrvGetServiceTable(IMG_VOID **fn_table);
++
++static inline IMG_BOOL PDumpSuspended(IMG_VOID)
++{
++ return atomic_read(&gsPDumpSuspended) != 0;
++}
++
++PVRSRV_ERROR PDumpOSGetScriptString(IMG_HANDLE *phScript,
++ IMG_UINT32 *pui32MaxLen)
++{
++ *phScript = (IMG_HANDLE)gsDBGPdumpState.pszScript;
++ *pui32MaxLen = SZ_SCRIPT_SIZE_MAX;
++ if ((!*phScript) || PDumpSuspended())
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpOSGetMessageString(IMG_HANDLE *phMsg,
++ IMG_UINT32 *pui32MaxLen)
++{
++ *phMsg = (IMG_HANDLE)gsDBGPdumpState.pszMsg;
++ *pui32MaxLen = SZ_MSG_SIZE_MAX;
++ if ((!*phMsg) || PDumpSuspended())
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpOSGetFilenameString(IMG_CHAR **ppszFile,
++ IMG_UINT32 *pui32MaxLen)
++{
++ *ppszFile = gsDBGPdumpState.pszFile;
++ *pui32MaxLen = SZ_FILENAME_SIZE_MAX;
++ if ((!*ppszFile) || PDumpSuspended())
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++ return PVRSRV_OK;
++}
++
++IMG_BOOL PDumpOSWriteString2(IMG_HANDLE hScript, IMG_UINT32 ui32Flags)
++{
++ return PDumpWriteString2(hScript, ui32Flags);
++}
++
++PVRSRV_ERROR PDumpOSBufprintf(IMG_HANDLE hBuf, IMG_UINT32 ui32ScriptSizeMax, IMG_CHAR* pszFormat, ...)
++{
++ IMG_CHAR* pszBuf = hBuf;
++ IMG_UINT32 n;
++ va_list vaArgs;
++
++ va_start(vaArgs, pszFormat);
++
++ n = vsnprintf(pszBuf, ui32ScriptSizeMax, pszFormat, vaArgs);
++
++ va_end(vaArgs);
++
++ if (n>=ui32ScriptSizeMax || n==-1)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "Buffer overflow detected, pdump output may be incomplete."));
++
++ return PVRSRV_ERROR_PDUMP_BUF_OVERFLOW;
++ }
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpOSVSprintf(IMG_CHAR *pszComment, IMG_UINT32 ui32ScriptSizeMax, IMG_CHAR* pszFormat, PDUMP_va_list vaArgs)
++{
++ IMG_UINT32 n;
++
++ n = vsnprintf(pszComment, ui32ScriptSizeMax, pszFormat, vaArgs);
++
++ if (n>=ui32ScriptSizeMax || n==-1)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "Buffer overflow detected, pdump output may be incomplete."));
++
++ return PVRSRV_ERROR_PDUMP_BUF_OVERFLOW;
++ }
++
++ return PVRSRV_OK;
++}
++
++IMG_VOID PDumpOSDebugPrintf(IMG_CHAR* pszFormat, ...)
++{
++
++}
++
++PVRSRV_ERROR PDumpOSSprintf(IMG_CHAR *pszComment, IMG_UINT32 ui32ScriptSizeMax, IMG_CHAR *pszFormat, ...)
++{
++ IMG_UINT32 n;
++ va_list vaArgs;
++
++ va_start(vaArgs, pszFormat);
++
++ n = vsnprintf(pszComment, ui32ScriptSizeMax, pszFormat, vaArgs);
++
++ va_end(vaArgs);
++
++ if (n>=ui32ScriptSizeMax || n==-1)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "Buffer overflow detected, pdump output may be incomplete."));
++
++ return PVRSRV_ERROR_PDUMP_BUF_OVERFLOW;
++ }
++
++ return PVRSRV_OK;
++}
++
++IMG_UINT32 PDumpOSBuflen(IMG_HANDLE hBuffer, IMG_UINT32 ui32BufferSizeMax)
++{
++ IMG_CHAR* pszBuf = hBuffer;
++ IMG_UINT32 ui32Count = 0;
++
++ while ((pszBuf[ui32Count]!=0) && (ui32Count<ui32BufferSizeMax) )
++ {
++ ui32Count++;
++ }
++ return(ui32Count);
++}
++
++IMG_VOID PDumpOSVerifyLineEnding(IMG_HANDLE hBuffer, IMG_UINT32 ui32BufferSizeMax)
++{
++ IMG_UINT32 ui32Count = 0;
++ IMG_CHAR* pszBuf = hBuffer;
++
++
++ ui32Count = PDumpOSBuflen(hBuffer, ui32BufferSizeMax);
++
++
++ if ((ui32Count >= 1) && (pszBuf[ui32Count-1] != '\n') && (ui32Count<ui32BufferSizeMax))
++ {
++ pszBuf[ui32Count] = '\n';
++ ui32Count++;
++ pszBuf[ui32Count] = '\0';
++ }
++ if ((ui32Count >= 2) && (pszBuf[ui32Count-2] != '\r') && (ui32Count<ui32BufferSizeMax))
++ {
++ pszBuf[ui32Count-1] = '\r';
++ pszBuf[ui32Count] = '\n';
++ ui32Count++;
++ pszBuf[ui32Count] = '\0';
++ }
++}
++
++IMG_HANDLE PDumpOSGetStream(IMG_UINT32 ePDumpStream)
++{
++ return (IMG_HANDLE)gsDBGPdumpState.psStream[ePDumpStream];
++}
++
++IMG_UINT32 PDumpOSGetStreamOffset(IMG_UINT32 ePDumpStream)
++{
++ PDBG_STREAM psStream = gsDBGPdumpState.psStream[ePDumpStream];
++ return gpfnDbgDrv->pfnGetStreamOffset(psStream);
++}
++
++IMG_UINT32 PDumpOSGetParamFileNum(IMG_VOID)
++{
++ return gsDBGPdumpState.ui32ParamFileNum;
++}
++
++IMG_BOOL PDumpOSWriteString(IMG_HANDLE hStream,
++ IMG_UINT8 *psui8Data,
++ IMG_UINT32 ui32Size,
++ IMG_UINT32 ui32Flags)
++{
++ PDBG_STREAM psStream = (PDBG_STREAM)hStream;
++ return PDumpWriteILock(psStream,
++ psui8Data,
++ ui32Size,
++ ui32Flags);
++}
++
++IMG_VOID PDumpOSCheckForSplitting(IMG_HANDLE hStream, IMG_UINT32 ui32Size, IMG_UINT32 ui32Flags)
++{
++
++ PVR_UNREFERENCED_PARAMETER(hStream);
++ PVR_UNREFERENCED_PARAMETER(ui32Size);
++ PVR_UNREFERENCED_PARAMETER(ui32Size);
++}
++
++IMG_BOOL PDumpOSJTInitialised(IMG_VOID)
++{
++ if(gpfnDbgDrv)
++ {
++ return IMG_TRUE;
++ }
++ return IMG_FALSE;
++}
++
++inline IMG_BOOL PDumpOSIsSuspended(IMG_VOID)
++{
++ return atomic_read(&gsPDumpSuspended) != 0;
++}
++
++IMG_VOID PDumpOSCPUVAddrToDevPAddr(PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_HANDLE hOSMemHandle,
++ IMG_UINT32 ui32Offset,
++ IMG_UINT8 *pui8LinAddr,
++ IMG_UINT32 ui32PageSize,
++ IMG_DEV_PHYADDR *psDevPAddr)
++{
++ if(hOSMemHandle)
++ {
++
++ IMG_CPU_PHYADDR sCpuPAddr;
++
++ PVR_UNREFERENCED_PARAMETER(pui8LinAddr);
++
++ sCpuPAddr = OSMemHandleToCpuPAddr(hOSMemHandle, ui32Offset);
++ PVR_ASSERT((sCpuPAddr.uiAddr & (ui32PageSize - 1)) == 0);
++
++
++ *psDevPAddr = SysCpuPAddrToDevPAddr(eDeviceType, sCpuPAddr);
++ }
++ else
++ {
++ IMG_CPU_PHYADDR sCpuPAddr;
++
++ PVR_UNREFERENCED_PARAMETER(ui32Offset);
++
++ sCpuPAddr = OSMapLinToCPUPhys(pui8LinAddr);
++ *psDevPAddr = SysCpuPAddrToDevPAddr(eDeviceType, sCpuPAddr);
++ }
++}
++
++IMG_VOID PDumpOSCPUVAddrToPhysPages(IMG_HANDLE hOSMemHandle,
++ IMG_UINT32 ui32Offset,
++ IMG_PUINT8 pui8LinAddr,
++ IMG_UINT32 *pui32PageOffset)
++{
++ if(hOSMemHandle)
++ {
++
++ IMG_CPU_PHYADDR sCpuPAddr;
++
++ PVR_UNREFERENCED_PARAMETER(pui8LinAddr);
++
++ sCpuPAddr = OSMemHandleToCpuPAddr(hOSMemHandle, ui32Offset);
++ *pui32PageOffset = sCpuPAddr.uiAddr & (HOST_PAGESIZE() -1);
++ }
++ else
++ {
++ PVR_UNREFERENCED_PARAMETER(hOSMemHandle);
++ PVR_UNREFERENCED_PARAMETER(ui32Offset);
++
++ *pui32PageOffset = (IMG_UINT32)pui8LinAddr & (HOST_PAGESIZE() - 1);
++ }
++}
++
++
++
++IMG_VOID PDumpInit(IMG_VOID)
++{
++ IMG_UINT32 i;
++
++
++ if (!gpfnDbgDrv)
++ {
++ DBGDrvGetServiceTable((IMG_VOID **)&gpfnDbgDrv);
++
++
++
++
++ if (gpfnDbgDrv == IMG_NULL)
++ {
++ return;
++ }
++
++ if(!gsDBGPdumpState.pszFile)
++ {
++ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_FILENAME_SIZE_MAX, (IMG_PVOID *)&gsDBGPdumpState.pszFile, 0,
++ "Filename string") != PVRSRV_OK)
++ {
++ goto init_failed;
++ }
++ }
++
++ if(!gsDBGPdumpState.pszMsg)
++ {
++ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_MSG_SIZE_MAX, (IMG_PVOID *)&gsDBGPdumpState.pszMsg, 0,
++ "Message string") != PVRSRV_OK)
++ {
++ goto init_failed;
++ }
++ }
++
++ if(!gsDBGPdumpState.pszScript)
++ {
++ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_SCRIPT_SIZE_MAX, (IMG_PVOID *)&gsDBGPdumpState.pszScript, 0,
++ "Script string") != PVRSRV_OK)
++ {
++ goto init_failed;
++ }
++ }
++
++ for(i=0; i < PDUMP_NUM_STREAMS; i++)
++ {
++ gsDBGPdumpState.psStream[i] = gpfnDbgDrv->pfnCreateStream(pszStreamName[i],
++ DEBUG_CAPMODE_FRAMED,
++ DEBUG_OUTMODE_STREAMENABLE,
++ 0,
++ 10);
++
++ gpfnDbgDrv->pfnSetCaptureMode(gsDBGPdumpState.psStream[i],DEBUG_CAPMODE_FRAMED,0xFFFFFFFF, 0xFFFFFFFF, 1);
++ gpfnDbgDrv->pfnSetFrame(gsDBGPdumpState.psStream[i],0);
++ }
++
++ PDUMPCOMMENT("Driver Product Name: %s", VS_PRODUCT_NAME);
++ PDUMPCOMMENT("Driver Product Version: %s (%s)", PVRVERSION_STRING, PVRVERSION_FILE);
++ PDUMPCOMMENT("Start of Init Phase");
++ }
++
++ return;
++
++init_failed:
++
++ if(gsDBGPdumpState.pszFile)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_FILENAME_SIZE_MAX, (IMG_PVOID) gsDBGPdumpState.pszFile, 0);
++ gsDBGPdumpState.pszFile = IMG_NULL;
++ }
++
++ if(gsDBGPdumpState.pszScript)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_SCRIPT_SIZE_MAX, (IMG_PVOID) gsDBGPdumpState.pszScript, 0);
++ gsDBGPdumpState.pszScript = IMG_NULL;
++ }
++
++ if(gsDBGPdumpState.pszMsg)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_MSG_SIZE_MAX, (IMG_PVOID) gsDBGPdumpState.pszMsg, 0);
++ gsDBGPdumpState.pszMsg = IMG_NULL;
++ }
++
++ gpfnDbgDrv = IMG_NULL;
++}
++
++
++IMG_VOID PDumpDeInit(IMG_VOID)
++{
++ IMG_UINT32 i;
++
++ for(i=0; i < PDUMP_NUM_STREAMS; i++)
++ {
++ gpfnDbgDrv->pfnDestroyStream(gsDBGPdumpState.psStream[i]);
++ }
++
++ if(gsDBGPdumpState.pszFile)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_FILENAME_SIZE_MAX, (IMG_PVOID) gsDBGPdumpState.pszFile, 0);
++ gsDBGPdumpState.pszFile = IMG_NULL;
++ }
++
++ if(gsDBGPdumpState.pszScript)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_SCRIPT_SIZE_MAX, (IMG_PVOID) gsDBGPdumpState.pszScript, 0);
++ gsDBGPdumpState.pszScript = IMG_NULL;
++ }
++
++ if(gsDBGPdumpState.pszMsg)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_MSG_SIZE_MAX, (IMG_PVOID) gsDBGPdumpState.pszMsg, 0);
++ gsDBGPdumpState.pszMsg = IMG_NULL;
++ }
++
++ gpfnDbgDrv = IMG_NULL;
++}
++
++PVRSRV_ERROR PDumpStartInitPhaseKM(IMG_VOID)
++{
++ IMG_UINT32 i;
++
++ if (gpfnDbgDrv)
++ {
++ PDUMPCOMMENT("Start Init Phase");
++ for(i=0; i < PDUMP_NUM_STREAMS; i++)
++ {
++ gpfnDbgDrv->pfnStartInitPhase(gsDBGPdumpState.psStream[i]);
++ }
++ }
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpStopInitPhaseKM(IMG_VOID)
++{
++ IMG_UINT32 i;
++
++ if (gpfnDbgDrv)
++ {
++ PDUMPCOMMENT("Stop Init Phase");
++
++ for(i=0; i < PDUMP_NUM_STREAMS; i++)
++ {
++ gpfnDbgDrv->pfnStopInitPhase(gsDBGPdumpState.psStream[i]);
++ }
++ }
++ return PVRSRV_OK;
++}
++
++IMG_BOOL PDumpIsLastCaptureFrameKM(IMG_VOID)
++{
++ return gpfnDbgDrv->pfnIsLastCaptureFrame(gsDBGPdumpState.psStream[PDUMP_STREAM_SCRIPT2]);
++}
++
++
++IMG_BOOL PDumpIsCaptureFrameKM(IMG_VOID)
++{
++ if (PDumpSuspended())
++ {
++ return IMG_FALSE;
++ }
++ return gpfnDbgDrv->pfnIsCaptureFrame(gsDBGPdumpState.psStream[PDUMP_STREAM_SCRIPT2], IMG_FALSE);
++}
++
++PVRSRV_ERROR PDumpSetFrameKM(IMG_UINT32 ui32Frame)
++{
++ IMG_UINT32 ui32Stream;
++
++ for (ui32Stream = 0; ui32Stream < PDUMP_NUM_STREAMS; ui32Stream++)
++ {
++ if (gsDBGPdumpState.psStream[ui32Stream])
++ {
++ DbgSetFrame(gsDBGPdumpState.psStream[ui32Stream], ui32Frame);
++ }
++ }
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpGetFrameKM(IMG_PUINT32 pui32Frame)
++{
++ *pui32Frame = DbgGetFrame(gsDBGPdumpState.psStream[PDUMP_STREAM_SCRIPT2]);
++
++ return PVRSRV_OK;
++}
++
++
++
++static IMG_BOOL PDumpWriteString2(IMG_CHAR * pszString, IMG_UINT32 ui32Flags)
++{
++ return PDumpWriteILock(gsDBGPdumpState.psStream[PDUMP_STREAM_SCRIPT2], (IMG_UINT8 *) pszString, strlen(pszString), ui32Flags);
++}
++
++
++static IMG_BOOL PDumpWriteILock(PDBG_STREAM psStream, IMG_UINT8 *pui8Data, IMG_UINT32 ui32Count, IMG_UINT32 ui32Flags)
++{
++ IMG_UINT32 ui32Written = 0;
++ IMG_UINT32 ui32Off = 0;
++
++ if ((psStream == IMG_NULL) || PDumpSuspended() || ((ui32Flags & PDUMP_FLAGS_NEVER) != 0))
++ {
++ return IMG_TRUE;
++ }
++
++
++
++
++ if (psStream == gsDBGPdumpState.psStream[PDUMP_STREAM_PARAM2])
++ {
++ IMG_UINT32 ui32ParamOutPos = gpfnDbgDrv->pfnGetStreamOffset(gsDBGPdumpState.psStream[PDUMP_STREAM_PARAM2]);
++
++ if (ui32ParamOutPos + ui32Count > MAX_FILE_SIZE)
++ {
++ if ((gsDBGPdumpState.psStream[PDUMP_STREAM_SCRIPT2] && PDumpWriteString2("\r\n-- Splitting pdump output file\r\n\r\n", ui32Flags)))
++ {
++ DbgSetMarker(gsDBGPdumpState.psStream[PDUMP_STREAM_PARAM2], ui32ParamOutPos);
++ gsDBGPdumpState.ui32ParamFileNum++;
++ }
++ }
++ }
++
++
++ while (((IMG_UINT32) ui32Count > 0) && (ui32Written != 0xFFFFFFFF))
++ {
++ ui32Written = DbgWrite(psStream, &pui8Data[ui32Off], ui32Count, ui32Flags);
++
++
++
++
++ if (ui32Written == 0)
++ {
++ OSReleaseThreadQuanta();
++ }
++
++ if (ui32Written != 0xFFFFFFFF)
++ {
++ ui32Off += ui32Written;
++ ui32Count -= ui32Written;
++ }
++ }
++
++ if (ui32Written == 0xFFFFFFFF)
++ {
++ return IMG_FALSE;
++ }
++
++ return IMG_TRUE;
++}
++
++static IMG_VOID DbgSetFrame(PDBG_STREAM psStream, IMG_UINT32 ui32Frame)
++{
++ gpfnDbgDrv->pfnSetFrame(psStream, ui32Frame);
++}
++
++
++static IMG_UINT32 DbgGetFrame(PDBG_STREAM psStream)
++{
++ return gpfnDbgDrv->pfnGetFrame(psStream);
++}
++
++static IMG_VOID DbgSetMarker(PDBG_STREAM psStream, IMG_UINT32 ui32Marker)
++{
++ gpfnDbgDrv->pfnSetMarker(psStream, ui32Marker);
++}
++
++static IMG_UINT32 DbgWrite(PDBG_STREAM psStream, IMG_UINT8 *pui8Data, IMG_UINT32 ui32BCount, IMG_UINT32 ui32Flags)
++{
++ IMG_UINT32 ui32BytesWritten;
++
++ if ((ui32Flags & PDUMP_FLAGS_CONTINUOUS) != 0)
++ {
++
++
++ if (((psStream->ui32CapMode & DEBUG_CAPMODE_FRAMED) != 0) &&
++ (psStream->ui32Start == 0xFFFFFFFFUL) &&
++ (psStream->ui32End == 0xFFFFFFFFUL) &&
++ psStream->bInitPhaseComplete)
++ {
++ ui32BytesWritten = ui32BCount;
++ }
++ else
++ {
++ ui32BytesWritten = gpfnDbgDrv->pfnDBGDrivWrite2(psStream, pui8Data, ui32BCount, 1);
++ }
++ }
++ else
++ {
++ if (ui32Flags & PDUMP_FLAGS_LASTFRAME)
++ {
++ IMG_UINT32 ui32DbgFlags;
++
++ ui32DbgFlags = 0;
++ if (ui32Flags & PDUMP_FLAGS_RESETLFBUFFER)
++ {
++ ui32DbgFlags |= WRITELF_FLAGS_RESETBUF;
++ }
++
++ ui32BytesWritten = gpfnDbgDrv->pfnWriteLF(psStream, pui8Data, ui32BCount, 1, ui32DbgFlags);
++ }
++ else
++ {
++ ui32BytesWritten = gpfnDbgDrv->pfnWriteBINCM(psStream, pui8Data, ui32BCount, 1);
++ }
++ }
++
++ return ui32BytesWritten;
++}
++
++
++IMG_VOID PDumpSuspendKM(IMG_VOID)
++{
++ atomic_inc(&gsPDumpSuspended);
++}
++
++IMG_VOID PDumpResumeKM(IMG_VOID)
++{
++ atomic_dec(&gsPDumpSuspended);
++}
++
++#endif
++#endif
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/private_data.h b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/private_data.h
+new file mode 100644
+index 0000000..0751765
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/private_data.h
+@@ -0,0 +1,67 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __INCLUDED_PRIVATE_DATA_H_
++#define __INCLUDED_PRIVATE_DATA_H_
++
++#if defined(SUPPORT_DRI_DRM) && defined(PVR_SECURE_DRM_AUTH_EXPORT)
++#include <linux/list.h>
++#include <drm/drmP.h>
++#endif
++
++typedef struct
++{
++
++ IMG_UINT32 ui32OpenPID;
++
++#if defined(PVR_SECURE_FD_EXPORT)
++
++ IMG_HANDLE hKernelMemInfo;
++#endif
++
++#if defined(SUPPORT_DRI_DRM) && defined(PVR_SECURE_DRM_AUTH_EXPORT)
++
++ struct list_head sDRMAuthListItem;
++
++ struct drm_file *psDRMFile;
++#endif
++
++#if defined(SUPPORT_MEMINFO_IDS)
++
++ IMG_UINT64 ui64Stamp;
++#endif
++
++
++ IMG_HANDLE hBlockAlloc;
++
++#if defined(SUPPORT_DRI_DRM_EXT)
++ IMG_PVOID pPriv;
++#endif
++}
++PVRSRV_FILE_PRIVATE_DATA;
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/proc.c b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/proc.c
+new file mode 100644
+index 0000000..1ba2466
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/proc.c
+@@ -0,0 +1,970 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef AUTOCONF_INCLUDED
++ #include <linux/config.h>
++#endif
++
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/version.h>
++#include <linux/fs.h>
++#include <linux/proc_fs.h>
++#include <linux/seq_file.h>
++
++#include "services_headers.h"
++
++#include "queue.h"
++#include "resman.h"
++#include "pvrmmap.h"
++#include "pvr_debug.h"
++#include "pvrversion.h"
++#include "proc.h"
++#include "perproc.h"
++#include "env_perproc.h"
++#include "linkage.h"
++
++#include "lists.h"
++DECLARE_LIST_ANY_VA(PVRSRV_DEVICE_NODE);
++
++
++static struct proc_dir_entry * dir;
++
++#ifndef PVR_PROC_USE_SEQ_FILE
++static off_t procDumpSysNodes(IMG_CHAR *buf, size_t size, off_t off);
++static off_t procDumpVersion(IMG_CHAR *buf, size_t size, off_t off);
++#endif
++
++
++static const IMG_CHAR PVRProcDirRoot[] = "pvr";
++
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++
++#define PVR_PROC_SEQ_START_TOKEN (void*)1
++static IMG_INT pvr_proc_open(struct inode *inode,struct file *file);
++static void *pvr_proc_seq_start (struct seq_file *m, loff_t *pos);
++static void pvr_proc_seq_stop (struct seq_file *m, void *v);
++static void *pvr_proc_seq_next (struct seq_file *m, void *v, loff_t *pos);
++static int pvr_proc_seq_show (struct seq_file *m, void *v);
++static ssize_t pvr_proc_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos);
++
++static struct file_operations pvr_proc_operations =
++{
++ .open = pvr_proc_open,
++ .read = seq_read,
++ .write = pvr_proc_write,
++ .llseek = seq_lseek,
++ .release = seq_release,
++};
++
++static struct seq_operations pvr_proc_seq_operations =
++{
++ .start = pvr_proc_seq_start,
++ .next = pvr_proc_seq_next,
++ .stop = pvr_proc_seq_stop,
++ .show = pvr_proc_seq_show,
++};
++
++static struct proc_dir_entry* g_pProcQueue;
++static struct proc_dir_entry* g_pProcVersion;
++static struct proc_dir_entry* g_pProcSysNodes;
++
++#ifdef DEBUG
++static struct proc_dir_entry* g_pProcDebugLevel;
++#endif
++
++#ifdef PVR_MANUAL_POWER_CONTROL
++static struct proc_dir_entry* g_pProcPowerLevel;
++#endif
++
++
++static void ProcSeqShowVersion(struct seq_file *sfile,void* el);
++
++static void ProcSeqShowSysNodes(struct seq_file *sfile,void* el);
++static void* ProcSeqOff2ElementSysNodes(struct seq_file * sfile, loff_t off);
++
++#endif
++
++off_t printAppend(IMG_CHAR * buffer, size_t size, off_t off, const IMG_CHAR * format, ...)
++{
++ IMG_INT n;
++ size_t space = size - (size_t)off;
++ va_list ap;
++
++ PVR_ASSERT(space >= 0);
++
++ va_start (ap, format);
++
++ n = vsnprintf (buffer+off, space, format, ap);
++
++ va_end (ap);
++
++ if (n >= (IMG_INT)space || n < 0)
++ {
++
++ buffer[size - 1] = 0;
++ return (off_t)(size - 1);
++ }
++ else
++ {
++ return (off + (off_t)n);
++ }
++}
++
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++
++void* ProcSeq1ElementOff2Element(struct seq_file *sfile, loff_t off)
++{
++
++ if(!off)
++ return (void*)2;
++ return NULL;
++}
++
++
++void* ProcSeq1ElementHeaderOff2Element(struct seq_file *sfile, loff_t off)
++{
++ if(!off)
++ {
++ return PVR_PROC_SEQ_START_TOKEN;
++ }
++
++
++ if(off == 1)
++ return (void*)2;
++
++ return NULL;
++}
++
++
++static IMG_INT pvr_proc_open(struct inode *inode,struct file *file)
++{
++ IMG_INT ret = seq_open(file, &pvr_proc_seq_operations);
++
++ struct seq_file *seq = (struct seq_file*)file->private_data;
++ struct proc_dir_entry* pvr_proc_entry = PDE(inode);
++
++
++ seq->private = pvr_proc_entry->data;
++ return ret;
++}
++
++static ssize_t pvr_proc_write(struct file *file, const char __user *buffer,
++ size_t count, loff_t *ppos)
++{
++ struct inode *inode = file->f_path.dentry->d_inode;
++ struct proc_dir_entry * dp;
++
++ dp = PDE(inode);
++
++ if (!dp->write_proc)
++ return -EIO;
++
++ return dp->write_proc(file, buffer, count, dp->data);
++}
++
++
++static void *pvr_proc_seq_start (struct seq_file *proc_seq_file, loff_t *pos)
++{
++ PVR_PROC_SEQ_HANDLERS *handlers = (PVR_PROC_SEQ_HANDLERS*)proc_seq_file->private;
++ if(handlers->startstop != NULL)
++ handlers->startstop(proc_seq_file, IMG_TRUE);
++ return handlers->off2element(proc_seq_file, *pos);
++}
++
++static void pvr_proc_seq_stop (struct seq_file *proc_seq_file, void *v)
++{
++ PVR_PROC_SEQ_HANDLERS *handlers = (PVR_PROC_SEQ_HANDLERS*)proc_seq_file->private;
++ if(handlers->startstop != NULL)
++ handlers->startstop(proc_seq_file, IMG_FALSE);
++}
++
++static void *pvr_proc_seq_next (struct seq_file *proc_seq_file, void *v, loff_t *pos)
++{
++ PVR_PROC_SEQ_HANDLERS *handlers = (PVR_PROC_SEQ_HANDLERS*)proc_seq_file->private;
++ (*pos)++;
++ if( handlers->next != NULL)
++ return handlers->next( proc_seq_file, v, *pos );
++ return handlers->off2element(proc_seq_file, *pos);
++}
++
++static int pvr_proc_seq_show (struct seq_file *proc_seq_file, void *v)
++{
++ PVR_PROC_SEQ_HANDLERS *handlers = (PVR_PROC_SEQ_HANDLERS*)proc_seq_file->private;
++ handlers->show( proc_seq_file,v );
++ return 0;
++}
++
++
++
++static struct proc_dir_entry* CreateProcEntryInDirSeq(
++ struct proc_dir_entry *pdir,
++ const IMG_CHAR * name,
++ IMG_VOID* data,
++ pvr_next_proc_seq_t next_handler,
++ pvr_show_proc_seq_t show_handler,
++ pvr_off2element_proc_seq_t off2element_handler,
++ pvr_startstop_proc_seq_t startstop_handler,
++ write_proc_t whandler
++ )
++{
++
++ struct proc_dir_entry * file;
++ mode_t mode;
++
++ if (!dir)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "CreateProcEntryInDirSeq: cannot make proc entry /proc/%s/%s: no parent", PVRProcDirRoot, name));
++ return NULL;
++ }
++
++ mode = S_IFREG;
++
++ if (show_handler)
++ {
++ mode |= S_IRUGO;
++ }
++
++ if (whandler)
++ {
++ mode |= S_IWUSR;
++ }
++
++ file=create_proc_entry(name, mode, pdir);
++
++ if (file)
++ {
++ PVR_PROC_SEQ_HANDLERS *seq_handlers;
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30))
++ file->owner = THIS_MODULE;
++#endif
++
++ file->proc_fops = &pvr_proc_operations;
++ file->write_proc = whandler;
++
++
++ file->data = kmalloc(sizeof(PVR_PROC_SEQ_HANDLERS), GFP_KERNEL);
++ if(file->data)
++ {
++ seq_handlers = (PVR_PROC_SEQ_HANDLERS*)file->data;
++ seq_handlers->next = next_handler;
++ seq_handlers->show = show_handler;
++ seq_handlers->off2element = off2element_handler;
++ seq_handlers->startstop = startstop_handler;
++ seq_handlers->data = data;
++
++ return file;
++ }
++ }
++
++ PVR_DPF((PVR_DBG_ERROR, "CreateProcEntryInDirSeq: cannot make proc entry /proc/%s/%s: no memory", PVRProcDirRoot, name));
++ return 0;
++}
++
++
++struct proc_dir_entry* CreateProcReadEntrySeq (
++ const IMG_CHAR * name,
++ IMG_VOID* data,
++ pvr_next_proc_seq_t next_handler,
++ pvr_show_proc_seq_t show_handler,
++ pvr_off2element_proc_seq_t off2element_handler,
++ pvr_startstop_proc_seq_t startstop_handler
++ )
++{
++ return CreateProcEntrySeq(name,
++ data,
++ next_handler,
++ show_handler,
++ off2element_handler,
++ startstop_handler,
++ NULL);
++}
++
++struct proc_dir_entry* CreateProcEntrySeq (
++ const IMG_CHAR * name,
++ IMG_VOID* data,
++ pvr_next_proc_seq_t next_handler,
++ pvr_show_proc_seq_t show_handler,
++ pvr_off2element_proc_seq_t off2element_handler,
++ pvr_startstop_proc_seq_t startstop_handler,
++ write_proc_t whandler
++ )
++{
++ return CreateProcEntryInDirSeq(
++ dir,
++ name,
++ data,
++ next_handler,
++ show_handler,
++ off2element_handler,
++ startstop_handler,
++ NULL
++ );
++}
++
++
++
++struct proc_dir_entry* CreatePerProcessProcEntrySeq (
++ const IMG_CHAR * name,
++ IMG_VOID* data,
++ pvr_next_proc_seq_t next_handler,
++ pvr_show_proc_seq_t show_handler,
++ pvr_off2element_proc_seq_t off2element_handler,
++ pvr_startstop_proc_seq_t startstop_handler,
++ write_proc_t whandler
++ )
++{
++ PVRSRV_ENV_PER_PROCESS_DATA *psPerProc;
++ IMG_UINT32 ui32PID;
++
++ if (!dir)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "CreatePerProcessProcEntrySeq: /proc/%s doesn't exist", PVRProcDirRoot));
++ return NULL;
++ }
++
++ ui32PID = OSGetCurrentProcessIDKM();
++
++ psPerProc = PVRSRVPerProcessPrivateData(ui32PID);
++ if (!psPerProc)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "CreatePerProcessProcEntrySeq: no per process data"));
++
++ return NULL;
++ }
++
++ if (!psPerProc->psProcDir)
++ {
++ IMG_CHAR dirname[16];
++ IMG_INT ret;
++
++ ret = snprintf(dirname, sizeof(dirname), "%lu", ui32PID);
++
++ if (ret <=0 || ret >= (IMG_INT)sizeof(dirname))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "CreatePerProcessProcEntries: couldn't generate per process proc directory name \"%u\"", ui32PID));
++ return NULL;
++ }
++ else
++ {
++ psPerProc->psProcDir = proc_mkdir(dirname, dir);
++ if (!psPerProc->psProcDir)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "CreatePerProcessProcEntries: couldn't create per process proc directory /proc/%s/%u",
++ PVRProcDirRoot, ui32PID));
++ return NULL;
++ }
++ }
++ }
++
++ return CreateProcEntryInDirSeq(psPerProc->psProcDir, name, data, next_handler,
++ show_handler,off2element_handler,startstop_handler,whandler);
++}
++
++
++IMG_VOID RemoveProcEntrySeq( struct proc_dir_entry* proc_entry )
++{
++ if (dir)
++ {
++ void* data = proc_entry->data ;
++ PVR_DPF((PVR_DBG_MESSAGE, "Removing /proc/%s/%s", PVRProcDirRoot, proc_entry->name));
++
++ remove_proc_entry(proc_entry->name, dir);
++ if( data)
++ kfree( data );
++
++ }
++}
++
++IMG_VOID RemovePerProcessProcEntrySeq(struct proc_dir_entry* proc_entry)
++{
++ PVRSRV_ENV_PER_PROCESS_DATA *psPerProc;
++
++ psPerProc = LinuxTerminatingProcessPrivateData();
++ if (!psPerProc)
++ {
++ psPerProc = PVRSRVFindPerProcessPrivateData();
++ if (!psPerProc)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "CreatePerProcessProcEntries: can't "
++ "remove %s, no per process data", proc_entry->name));
++ return;
++ }
++ }
++
++ if (psPerProc->psProcDir)
++ {
++ void* data = proc_entry->data ;
++ PVR_DPF((PVR_DBG_MESSAGE, "Removing proc entry %s from %s", proc_entry->name, psPerProc->psProcDir->name));
++
++ remove_proc_entry(proc_entry->name, psPerProc->psProcDir);
++ if(data)
++ kfree( data );
++ }
++}
++
++#endif
++
++static IMG_INT pvr_read_proc(IMG_CHAR *page, IMG_CHAR **start, off_t off,
++ IMG_INT count, IMG_INT *eof, IMG_VOID *data)
++{
++ pvr_read_proc_t *pprn = (pvr_read_proc_t *)data;
++
++ off_t len = pprn (page, (size_t)count, off);
++
++ if (len == END_OF_FILE)
++ {
++ len = 0;
++ *eof = 1;
++ }
++ else if (!len)
++ {
++ *start = (IMG_CHAR *) 0;
++ }
++ else
++ {
++ *start = (IMG_CHAR *) 1;
++ }
++
++ return len;
++}
++
++
++static IMG_INT CreateProcEntryInDir(struct proc_dir_entry *pdir, const IMG_CHAR * name, read_proc_t rhandler, write_proc_t whandler, IMG_VOID *data)
++{
++ struct proc_dir_entry * file;
++ mode_t mode;
++
++ if (!pdir)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "CreateProcEntryInDir: parent directory doesn't exist"));
++
++ return -ENOMEM;
++ }
++
++ mode = S_IFREG;
++
++ if (rhandler)
++ {
++ mode |= S_IRUGO;
++ }
++
++ if (whandler)
++ {
++ mode |= S_IWUSR;
++ }
++
++ file = create_proc_entry(name, mode, pdir);
++
++ if (file)
++ {
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30))
++ file->owner = THIS_MODULE;
++#endif
++ file->read_proc = rhandler;
++ file->write_proc = whandler;
++ file->data = data;
++
++ PVR_DPF((PVR_DBG_MESSAGE, "Created proc entry %s in %s", name, pdir->name));
++
++ return 0;
++ }
++
++ PVR_DPF((PVR_DBG_ERROR, "CreateProcEntry: cannot create proc entry %s in %s", name, pdir->name));
++
++ return -ENOMEM;
++}
++
++
++IMG_INT CreateProcEntry(const IMG_CHAR * name, read_proc_t rhandler, write_proc_t whandler, IMG_VOID *data)
++{
++ return CreateProcEntryInDir(dir, name, rhandler, whandler, data);
++}
++
++
++IMG_INT CreatePerProcessProcEntry(const IMG_CHAR * name, read_proc_t rhandler, write_proc_t whandler, IMG_VOID *data)
++{
++ PVRSRV_ENV_PER_PROCESS_DATA *psPerProc;
++ IMG_UINT32 ui32PID;
++
++ if (!dir)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "CreatePerProcessProcEntries: /proc/%s doesn't exist", PVRProcDirRoot));
++
++ return -ENOMEM;
++ }
++
++ ui32PID = OSGetCurrentProcessIDKM();
++
++ psPerProc = PVRSRVPerProcessPrivateData(ui32PID);
++ if (!psPerProc)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "CreatePerProcessProcEntries: no per process data"));
++
++ return -ENOMEM;
++ }
++
++ if (!psPerProc->psProcDir)
++ {
++ IMG_CHAR dirname[16];
++ IMG_INT ret;
++
++ ret = snprintf(dirname, sizeof(dirname), "%lu", ui32PID);
++
++ if (ret <=0 || ret >= (IMG_INT)sizeof(dirname))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "CreatePerProcessProcEntries: couldn't generate per process proc directory name \"%u\"", ui32PID));
++
++ return -ENOMEM;
++ }
++ else
++ {
++ psPerProc->psProcDir = proc_mkdir(dirname, dir);
++ if (!psPerProc->psProcDir)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "CreatePerProcessProcEntries: couldn't create per process proc directory /proc/%s/%u", PVRProcDirRoot, ui32PID));
++
++ return -ENOMEM;
++ }
++ }
++ }
++
++ return CreateProcEntryInDir(psPerProc->psProcDir, name, rhandler, whandler, data);
++}
++
++
++IMG_INT CreateProcReadEntry(const IMG_CHAR * name, pvr_read_proc_t handler)
++{
++ struct proc_dir_entry * file;
++
++ if (!dir)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "CreateProcReadEntry: cannot make proc entry /proc/%s/%s: no parent", PVRProcDirRoot, name));
++
++ return -ENOMEM;
++ }
++
++ file = create_proc_read_entry (name, S_IFREG | S_IRUGO, dir, pvr_read_proc, (IMG_VOID *)handler);
++
++ if (file)
++ {
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30))
++ file->owner = THIS_MODULE;
++#endif
++ return 0;
++ }
++
++ PVR_DPF((PVR_DBG_ERROR, "CreateProcReadEntry: cannot make proc entry /proc/%s/%s: no memory", PVRProcDirRoot, name));
++
++ return -ENOMEM;
++}
++
++
++IMG_INT CreateProcEntries(IMG_VOID)
++{
++ dir = proc_mkdir (PVRProcDirRoot, NULL);
++
++ if (!dir)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "CreateProcEntries: cannot make /proc/%s directory", PVRProcDirRoot));
++
++ return -ENOMEM;
++ }
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++ g_pProcQueue = CreateProcReadEntrySeq("queue", NULL, NULL, ProcSeqShowQueue, ProcSeqOff2ElementQueue, NULL);
++ g_pProcVersion = CreateProcReadEntrySeq("version", NULL, NULL, ProcSeqShowVersion, ProcSeq1ElementHeaderOff2Element, NULL);
++ g_pProcSysNodes = CreateProcReadEntrySeq("nodes", NULL, NULL, ProcSeqShowSysNodes, ProcSeqOff2ElementSysNodes, NULL);
++
++ if(!g_pProcQueue || !g_pProcVersion || !g_pProcSysNodes)
++#else
++ if (CreateProcReadEntry("queue", QueuePrintQueues) ||
++ CreateProcReadEntry("version", procDumpVersion) ||
++ CreateProcReadEntry("nodes", procDumpSysNodes))
++#endif
++ {
++ PVR_DPF((PVR_DBG_ERROR, "CreateProcEntries: couldn't make /proc/%s files", PVRProcDirRoot));
++
++ return -ENOMEM;
++ }
++
++
++#ifdef DEBUG
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++ g_pProcDebugLevel = CreateProcEntrySeq("debug_level", NULL, NULL,
++ ProcSeqShowDebugLevel,
++ ProcSeq1ElementOff2Element, NULL,
++ PVRDebugProcSetLevel);
++ if(!g_pProcDebugLevel)
++#else
++ if (CreateProcEntry ("debug_level", PVRDebugProcGetLevel, PVRDebugProcSetLevel, 0))
++#endif
++ {
++ PVR_DPF((PVR_DBG_ERROR, "CreateProcEntries: couldn't make /proc/%s/debug_level", PVRProcDirRoot));
++
++ return -ENOMEM;
++ }
++
++#ifdef PVR_MANUAL_POWER_CONTROL
++#ifdef PVR_PROC_USE_SEQ_FILE
++ g_pProcPowerLevel = CreateProcEntrySeq("power_control", NULL, NULL,
++ ProcSeqShowPowerLevel,
++ ProcSeq1ElementOff2Element, NULL,
++ PVRProcSetPowerLevel);
++ if(!g_pProcPowerLevel)
++#else
++ if (CreateProcEntry("power_control", PVRProcGetPowerLevel, PVRProcSetPowerLevel, 0))
++#endif
++ {
++ PVR_DPF((PVR_DBG_ERROR, "CreateProcEntries: couldn't make /proc/%s/power_control", PVRProcDirRoot));
++
++ return -ENOMEM;
++ }
++#endif
++#endif
++
++ return 0;
++}
++
++
++IMG_VOID RemoveProcEntry(const IMG_CHAR * name)
++{
++ if (dir)
++ {
++ remove_proc_entry(name, dir);
++ PVR_DPF((PVR_DBG_MESSAGE, "Removing /proc/%s/%s", PVRProcDirRoot, name));
++ }
++}
++
++
++IMG_VOID RemovePerProcessProcEntry(const IMG_CHAR *name)
++{
++ PVRSRV_ENV_PER_PROCESS_DATA *psPerProc;
++
++ psPerProc = LinuxTerminatingProcessPrivateData();
++ if (!psPerProc)
++ {
++ psPerProc = PVRSRVFindPerProcessPrivateData();
++ if (!psPerProc)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "CreatePerProcessProcEntries: can't "
++ "remove %s, no per process data", name));
++ return;
++ }
++ }
++
++ if (psPerProc->psProcDir)
++ {
++ remove_proc_entry(name, psPerProc->psProcDir);
++
++ PVR_DPF((PVR_DBG_MESSAGE, "Removing proc entry %s from %s", name, psPerProc->psProcDir->name));
++ }
++}
++
++
++IMG_VOID RemovePerProcessProcDir(PVRSRV_ENV_PER_PROCESS_DATA *psPerProc)
++{
++ if (psPerProc->psProcDir)
++ {
++ while (psPerProc->psProcDir->subdir)
++ {
++ PVR_DPF((PVR_DBG_WARNING, "Belatedly removing /proc/%s/%s/%s", PVRProcDirRoot, psPerProc->psProcDir->name, psPerProc->psProcDir->subdir->name));
++
++ RemoveProcEntry(psPerProc->psProcDir->subdir->name);
++ }
++ RemoveProcEntry(psPerProc->psProcDir->name);
++ }
++}
++
++IMG_VOID RemoveProcEntries(IMG_VOID)
++{
++#ifdef DEBUG
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++ RemoveProcEntrySeq( g_pProcDebugLevel );
++#else
++ RemoveProcEntry("debug_level");
++#endif
++
++#ifdef PVR_MANUAL_POWER_CONTROL
++#ifdef PVR_PROC_USE_SEQ_FILE
++ RemoveProcEntrySeq( g_pProcPowerLevel );
++#else
++ RemoveProcEntry("power_control");
++#endif
++#endif
++
++#endif
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++ RemoveProcEntrySeq(g_pProcQueue);
++ RemoveProcEntrySeq(g_pProcVersion);
++ RemoveProcEntrySeq(g_pProcSysNodes);
++#else
++ RemoveProcEntry("queue");
++ RemoveProcEntry("version");
++ RemoveProcEntry("nodes");
++#endif
++
++ while (dir->subdir)
++ {
++ PVR_DPF((PVR_DBG_WARNING, "Belatedly removing /proc/%s/%s", PVRProcDirRoot, dir->subdir->name));
++
++ RemoveProcEntry(dir->subdir->name);
++ }
++
++ remove_proc_entry(PVRProcDirRoot, NULL);
++}
++
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++
++static void ProcSeqShowVersion(struct seq_file *sfile,void* el)
++{
++ SYS_DATA * psSysData;
++ IMG_CHAR *pszSystemVersionString = "None";
++
++ if(el == PVR_PROC_SEQ_START_TOKEN)
++ {
++ seq_printf( sfile,
++ "Version %s (%s) %s\n",
++ PVRVERSION_STRING,
++ PVR_BUILD_TYPE, PVR_BUILD_DIR);
++ return;
++ }
++
++ SysAcquireData(&psSysData);
++
++ if(psSysData->pszVersionString)
++ {
++ pszSystemVersionString = psSysData->pszVersionString;
++ }
++
++ seq_printf( sfile, "System Version String: %s\n", pszSystemVersionString);
++}
++
++#else
++
++static off_t procDumpVersion(IMG_CHAR *buf, size_t size, off_t off)
++{
++ SYS_DATA *psSysData;
++
++ if (off == 0)
++ {
++ return printAppend(buf, size, 0,
++ "Version %s (%s) %s\n",
++ PVRVERSION_STRING,
++ PVR_BUILD_TYPE, PVR_BUILD_DIR);
++ }
++
++ SysAcquireData(&psSysData)
++
++ if (off == 1)
++ {
++ IMG_CHAR *pszSystemVersionString = "None";
++
++ if(psSysData->pszVersionString)
++ {
++ pszSystemVersionString = psSysData->pszVersionString;
++ }
++
++ if(strlen(pszSystemVersionString)
++ + strlen("System Version String: \n")
++ + 1 > size)
++ {
++ return 0;
++ }
++ return printAppend(buf, size, 0,
++ "System Version String: %s\n",
++ pszSystemVersionString);
++ }
++
++ return END_OF_FILE;
++}
++
++#endif
++
++
++static const IMG_CHAR *deviceTypeToString(PVRSRV_DEVICE_TYPE deviceType)
++{
++ switch (deviceType)
++ {
++ default:
++ {
++ static IMG_CHAR text[10];
++
++ sprintf(text, "?%x", (IMG_UINT)deviceType);
++
++ return text;
++ }
++ }
++}
++
++
++static const IMG_CHAR *deviceClassToString(PVRSRV_DEVICE_CLASS deviceClass)
++{
++ switch (deviceClass)
++ {
++ case PVRSRV_DEVICE_CLASS_3D:
++ {
++ return "3D";
++ }
++ case PVRSRV_DEVICE_CLASS_DISPLAY:
++ {
++ return "display";
++ }
++ case PVRSRV_DEVICE_CLASS_BUFFER:
++ {
++ return "buffer";
++ }
++ default:
++ {
++ static IMG_CHAR text[10];
++
++ sprintf(text, "?%x", (IMG_UINT)deviceClass);
++ return text;
++ }
++ }
++}
++
++IMG_VOID* DecOffPsDev_AnyVaCb(PVRSRV_DEVICE_NODE *psNode, va_list va)
++{
++ off_t *pOff = va_arg(va, off_t*);
++ if (--(*pOff))
++ {
++ return IMG_NULL;
++ }
++ else
++ {
++ return psNode;
++ }
++}
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++
++static void ProcSeqShowSysNodes(struct seq_file *sfile,void* el)
++{
++ SYS_DATA * psSysData;
++ PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE*)el;
++
++ if(el == PVR_PROC_SEQ_START_TOKEN)
++ {
++ seq_printf( sfile,
++ "Registered nodes\n"
++ "Addr Type Class Index Ref pvDev Size Res\n");
++ return;
++ }
++
++ SysAcquireData(&psSysData);
++
++ seq_printf( sfile,
++ "%p %-8s %-8s %4d %2lu %p %3lu %p\n",
++ psDevNode,
++ deviceTypeToString(psDevNode->sDevId.eDeviceType),
++ deviceClassToString(psDevNode->sDevId.eDeviceClass),
++ psDevNode->sDevId.eDeviceClass,
++ psDevNode->ui32RefCount,
++ psDevNode->pvDevice,
++ psDevNode->ui32pvDeviceSize,
++ psDevNode->hResManContext);
++
++}
++
++static void* ProcSeqOff2ElementSysNodes(struct seq_file * sfile, loff_t off)
++{
++ SYS_DATA *psSysData;
++ PVRSRV_DEVICE_NODE *psDevNode;
++ if(!off)
++ {
++ return PVR_PROC_SEQ_START_TOKEN;
++ }
++
++ SysAcquireData(&psSysData);
++
++
++ psDevNode = (PVRSRV_DEVICE_NODE*)
++ List_PVRSRV_DEVICE_NODE_Any_va(psSysData->psDeviceNodeList,
++ DecOffPsDev_AnyVaCb,
++ &off);
++
++
++ return (void*)psDevNode;
++}
++
++#else
++
++static
++off_t procDumpSysNodes(IMG_CHAR *buf, size_t size, off_t off)
++{
++ SYS_DATA *psSysData;
++ PVRSRV_DEVICE_NODE *psDevNode;
++ off_t len;
++
++
++ if (size < 80)
++ {
++ return 0;
++ }
++
++ if (off == 0)
++ {
++ return printAppend(buf, size, 0,
++ "Registered nodes\n"
++ "Addr Type Class Index Ref pvDev Size Res\n");
++ }
++
++ SysAcquireData(&psSysData);
++
++
++ psDevNode = (PVRSRV_DEVICE_NODE*)
++ List_PVRSRV_DEVICE_NODE_Any_va(psSysData->psDeviceNodeList,
++ DecOffPsDev_AnyVaCb,
++ &off);
++
++ if (!psDevNode)
++ {
++ return END_OF_FILE;
++ }
++
++ len = printAppend(buf, size, 0,
++ "%p %-8s %-8s %4d %2lu %p %3lu %p\n",
++ psDevNode,
++ deviceTypeToString(psDevNode->sDevId.eDeviceType),
++ deviceClassToString(psDevNode->sDevId.eDeviceClass),
++ psDevNode->sDevId.eDeviceClass,
++ psDevNode->ui32RefCount,
++ psDevNode->pvDevice,
++ psDevNode->ui32pvDeviceSize,
++ psDevNode->hResManContext);
++ return (len);
++}
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/proc.h b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/proc.h
+new file mode 100644
+index 0000000..3200961
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/proc.h
+@@ -0,0 +1,115 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __SERVICES_PROC_H__
++#define __SERVICES_PROC_H__
++
++#include <asm/system.h>
++#include <linux/proc_fs.h>
++#include <linux/seq_file.h>
++
++#define END_OF_FILE (off_t) -1
++
++typedef off_t (pvr_read_proc_t)(IMG_CHAR *, size_t, off_t);
++
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++#define PVR_PROC_SEQ_START_TOKEN (void*)1
++typedef void* (pvr_next_proc_seq_t)(struct seq_file *,void*,loff_t);
++typedef void* (pvr_off2element_proc_seq_t)(struct seq_file *, loff_t);
++typedef void (pvr_show_proc_seq_t)(struct seq_file *,void*);
++typedef void (pvr_startstop_proc_seq_t)(struct seq_file *, IMG_BOOL start);
++
++typedef struct _PVR_PROC_SEQ_HANDLERS_ {
++ pvr_next_proc_seq_t *next;
++ pvr_show_proc_seq_t *show;
++ pvr_off2element_proc_seq_t *off2element;
++ pvr_startstop_proc_seq_t *startstop;
++ IMG_VOID *data;
++} PVR_PROC_SEQ_HANDLERS;
++
++
++void* ProcSeq1ElementOff2Element(struct seq_file *sfile, loff_t off);
++
++void* ProcSeq1ElementHeaderOff2Element(struct seq_file *sfile, loff_t off);
++
++
++#endif
++
++off_t printAppend(IMG_CHAR * buffer, size_t size, off_t off, const IMG_CHAR * format, ...)
++ __attribute__((format(printf, 4, 5)));
++
++IMG_INT CreateProcEntries(IMG_VOID);
++
++IMG_INT CreateProcReadEntry (const IMG_CHAR * name, pvr_read_proc_t handler);
++
++IMG_INT CreateProcEntry(const IMG_CHAR * name, read_proc_t rhandler, write_proc_t whandler, IMG_VOID *data);
++
++IMG_INT CreatePerProcessProcEntry(const IMG_CHAR * name, read_proc_t rhandler, write_proc_t whandler, IMG_VOID *data);
++
++IMG_VOID RemoveProcEntry(const IMG_CHAR * name);
++
++IMG_VOID RemovePerProcessProcEntry(const IMG_CHAR * name);
++
++IMG_VOID RemoveProcEntries(IMG_VOID);
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++struct proc_dir_entry* CreateProcReadEntrySeq (
++ const IMG_CHAR* name,
++ IMG_VOID* data,
++ pvr_next_proc_seq_t next_handler,
++ pvr_show_proc_seq_t show_handler,
++ pvr_off2element_proc_seq_t off2element_handler,
++ pvr_startstop_proc_seq_t startstop_handler
++ );
++
++struct proc_dir_entry* CreateProcEntrySeq (
++ const IMG_CHAR* name,
++ IMG_VOID* data,
++ pvr_next_proc_seq_t next_handler,
++ pvr_show_proc_seq_t show_handler,
++ pvr_off2element_proc_seq_t off2element_handler,
++ pvr_startstop_proc_seq_t startstop_handler,
++ write_proc_t whandler
++ );
++
++struct proc_dir_entry* CreatePerProcessProcEntrySeq (
++ const IMG_CHAR* name,
++ IMG_VOID* data,
++ pvr_next_proc_seq_t next_handler,
++ pvr_show_proc_seq_t show_handler,
++ pvr_off2element_proc_seq_t off2element_handler,
++ pvr_startstop_proc_seq_t startstop_handler,
++ write_proc_t whandler
++ );
++
++
++IMG_VOID RemoveProcEntrySeq(struct proc_dir_entry* proc_entry);
++IMG_VOID RemovePerProcessProcEntrySeq(struct proc_dir_entry* proc_entry);
++
++#endif
++
++#endif
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/pvr_bridge_k.c b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/pvr_bridge_k.c
+new file mode 100644
+index 0000000..e4e4946
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/pvr_bridge_k.c
+@@ -0,0 +1,651 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "img_defs.h"
++#include "services.h"
++#include "pvr_bridge.h"
++#include "perproc.h"
++#include "mutex.h"
++#include "syscommon.h"
++#include "pvr_debug.h"
++#include "proc.h"
++#include "private_data.h"
++#include "linkage.h"
++#include "pvr_bridge_km.h"
++
++#if defined(SUPPORT_DRI_DRM)
++#include <drm/drmP.h>
++#include "pvr_drm.h"
++#if defined(PVR_SECURE_DRM_AUTH_EXPORT)
++#include "env_perproc.h"
++#endif
++#endif
++
++#if defined(SUPPORT_VGX)
++#include "vgx_bridge.h"
++#endif
++
++#if defined(SUPPORT_SGX)
++#include "sgx_bridge.h"
++#endif
++
++#include "bridged_pvr_bridge.h"
++
++#ifdef MODULE_TEST
++#include "pvr_test_bridge.h"
++#include "kern_test.h"
++#endif
++
++
++#if defined(SUPPORT_DRI_DRM)
++#define PRIVATE_DATA(pFile) ((pFile)->driver_priv)
++#else
++#define PRIVATE_DATA(pFile) ((pFile)->private_data)
++#endif
++
++#if defined(DEBUG_BRIDGE_KM)
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++static struct proc_dir_entry *g_ProcBridgeStats =0;
++static void* ProcSeqNextBridgeStats(struct seq_file *sfile,void* el,loff_t off);
++static void ProcSeqShowBridgeStats(struct seq_file *sfile,void* el);
++static void* ProcSeqOff2ElementBridgeStats(struct seq_file * sfile, loff_t off);
++static void ProcSeqStartstopBridgeStats(struct seq_file *sfile,IMG_BOOL start);
++
++#else
++static off_t printLinuxBridgeStats(IMG_CHAR * buffer, size_t size, off_t off);
++#endif
++
++#endif
++
++extern PVRSRV_LINUX_MUTEX gPVRSRVLock;
++
++#if defined(SUPPORT_MEMINFO_IDS)
++static IMG_UINT64 ui64Stamp;
++#endif
++
++PVRSRV_ERROR
++LinuxBridgeInit(IMG_VOID)
++{
++#if defined(DEBUG_BRIDGE_KM)
++ {
++ IMG_INT iStatus;
++#ifdef PVR_PROC_USE_SEQ_FILE
++ g_ProcBridgeStats = CreateProcReadEntrySeq(
++ "bridge_stats",
++ NULL,
++ ProcSeqNextBridgeStats,
++ ProcSeqShowBridgeStats,
++ ProcSeqOff2ElementBridgeStats,
++ ProcSeqStartstopBridgeStats
++ );
++ iStatus = !g_ProcBridgeStats ? -1 : 0;
++#else
++ iStatus = CreateProcReadEntry("bridge_stats", printLinuxBridgeStats);
++#endif
++
++ if(iStatus!=0)
++ {
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++ }
++#endif
++ return CommonBridgeInit();
++}
++
++IMG_VOID
++LinuxBridgeDeInit(IMG_VOID)
++{
++#if defined(DEBUG_BRIDGE_KM)
++#ifdef PVR_PROC_USE_SEQ_FILE
++ RemoveProcEntrySeq(g_ProcBridgeStats);
++#else
++ RemoveProcEntry("bridge_stats");
++#endif
++#endif
++}
++
++#if defined(DEBUG_BRIDGE_KM)
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++
++static void ProcSeqStartstopBridgeStats(struct seq_file *sfile,IMG_BOOL start)
++{
++ if(start)
++ {
++ LinuxLockMutex(&gPVRSRVLock);
++ }
++ else
++ {
++ LinuxUnLockMutex(&gPVRSRVLock);
++ }
++}
++
++
++static void* ProcSeqOff2ElementBridgeStats(struct seq_file *sfile, loff_t off)
++{
++ if(!off)
++ {
++ return PVR_PROC_SEQ_START_TOKEN;
++ }
++
++ if(off > BRIDGE_DISPATCH_TABLE_ENTRY_COUNT)
++ {
++ return (void*)0;
++ }
++
++
++ return (void*)&g_BridgeDispatchTable[off-1];
++}
++
++static void* ProcSeqNextBridgeStats(struct seq_file *sfile,void* el,loff_t off)
++{
++ return ProcSeqOff2ElementBridgeStats(sfile,off);
++}
++
++
++static void ProcSeqShowBridgeStats(struct seq_file *sfile,void* el)
++{
++ PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY *psEntry = ( PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY*)el;
++
++ if(el == PVR_PROC_SEQ_START_TOKEN)
++ {
++ seq_printf(sfile,
++ "Total ioctl call count = %lu\n"
++ "Total number of bytes copied via copy_from_user = %lu\n"
++ "Total number of bytes copied via copy_to_user = %lu\n"
++ "Total number of bytes copied via copy_*_user = %lu\n\n"
++ "%-45s | %-40s | %10s | %20s | %10s\n",
++ g_BridgeGlobalStats.ui32IOCTLCount,
++ g_BridgeGlobalStats.ui32TotalCopyFromUserBytes,
++ g_BridgeGlobalStats.ui32TotalCopyToUserBytes,
++ g_BridgeGlobalStats.ui32TotalCopyFromUserBytes+g_BridgeGlobalStats.ui32TotalCopyToUserBytes,
++ "Bridge Name",
++ "Wrapper Function",
++ "Call Count",
++ "copy_from_user Bytes",
++ "copy_to_user Bytes"
++ );
++ return;
++ }
++
++ seq_printf(sfile,
++ "%-45s %-40s %-10lu %-20lu %-10lu\n",
++ psEntry->pszIOCName,
++ psEntry->pszFunctionName,
++ psEntry->ui32CallCount,
++ psEntry->ui32CopyFromUserTotalBytes,
++ psEntry->ui32CopyToUserTotalBytes);
++}
++
++#else
++
++static off_t
++printLinuxBridgeStats(IMG_CHAR * buffer, size_t count, off_t off)
++{
++ PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY *psEntry;
++ off_t Ret;
++
++ LinuxLockMutex(&gPVRSRVLock);
++
++ if(!off)
++ {
++ if(count < 500)
++ {
++ Ret = 0;
++ goto unlock_and_return;
++ }
++ Ret = printAppend(buffer, count, 0,
++ "Total ioctl call count = %lu\n"
++ "Total number of bytes copied via copy_from_user = %lu\n"
++ "Total number of bytes copied via copy_to_user = %lu\n"
++ "Total number of bytes copied via copy_*_user = %lu\n\n"
++ "%-45s | %-40s | %10s | %20s | %10s\n",
++ g_BridgeGlobalStats.ui32IOCTLCount,
++ g_BridgeGlobalStats.ui32TotalCopyFromUserBytes,
++ g_BridgeGlobalStats.ui32TotalCopyToUserBytes,
++ g_BridgeGlobalStats.ui32TotalCopyFromUserBytes+g_BridgeGlobalStats.ui32TotalCopyToUserBytes,
++ "Bridge Name",
++ "Wrapper Function",
++ "Call Count",
++ "copy_from_user Bytes",
++ "copy_to_user Bytes"
++ );
++ goto unlock_and_return;
++ }
++
++ if(off > BRIDGE_DISPATCH_TABLE_ENTRY_COUNT)
++ {
++ Ret = END_OF_FILE;
++ goto unlock_and_return;
++ }
++
++ if(count < 300)
++ {
++ Ret = 0;
++ goto unlock_and_return;
++ }
++
++ psEntry = &g_BridgeDispatchTable[off-1];
++ Ret = printAppend(buffer, count, 0,
++ "%-45s %-40s %-10lu %-20lu %-10lu\n",
++ psEntry->pszIOCName,
++ psEntry->pszFunctionName,
++ psEntry->ui32CallCount,
++ psEntry->ui32CopyFromUserTotalBytes,
++ psEntry->ui32CopyToUserTotalBytes);
++
++unlock_and_return:
++ LinuxUnLockMutex(&gPVRSRVLock);
++ return Ret;
++}
++#endif
++#endif
++
++
++
++#if defined(SUPPORT_DRI_DRM)
++IMG_INT
++PVRSRV_BridgeDispatchKM(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile)
++#else
++IMG_INT32
++PVRSRV_BridgeDispatchKM(struct file *pFile, IMG_UINT unref__ ioctlCmd, IMG_UINT32 arg)
++#endif
++{
++ IMG_UINT32 cmd;
++#if !defined(SUPPORT_DRI_DRM)
++ PVRSRV_BRIDGE_PACKAGE *psBridgePackageUM = (PVRSRV_BRIDGE_PACKAGE *)arg;
++ PVRSRV_BRIDGE_PACKAGE sBridgePackageKM;
++#endif
++ PVRSRV_BRIDGE_PACKAGE *psBridgePackageKM;
++ IMG_UINT32 ui32PID = OSGetCurrentProcessIDKM();
++ PVRSRV_PER_PROCESS_DATA *psPerProc;
++ IMG_INT err = -EFAULT;
++
++ LinuxLockMutex(&gPVRSRVLock);
++
++#if defined(SUPPORT_DRI_DRM)
++ PVR_UNREFERENCED_PARAMETER(dev);
++
++ psBridgePackageKM = (PVRSRV_BRIDGE_PACKAGE *)arg;
++ PVR_ASSERT(psBridgePackageKM != IMG_NULL);
++#else
++ PVR_UNREFERENCED_PARAMETER(ioctlCmd);
++
++ psBridgePackageKM = &sBridgePackageKM;
++
++ if(!OSAccessOK(PVR_VERIFY_WRITE,
++ psBridgePackageUM,
++ sizeof(PVRSRV_BRIDGE_PACKAGE)))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: Received invalid pointer to function arguments",
++ __FUNCTION__));
++
++ goto unlock_and_return;
++ }
++
++
++ if(OSCopyFromUser(IMG_NULL,
++ psBridgePackageKM,
++ psBridgePackageUM,
++ sizeof(PVRSRV_BRIDGE_PACKAGE))
++ != PVRSRV_OK)
++ {
++ goto unlock_and_return;
++ }
++#endif
++
++ cmd = psBridgePackageKM->ui32BridgeID;
++
++#if defined(MODULE_TEST)
++ switch (cmd)
++ {
++ case PVRSRV_BRIDGE_SERVICES_TEST_MEM1:
++ {
++ PVRSRV_ERROR eError = MemTest1();
++ if (psBridgePackageKM->ui32OutBufferSize == sizeof(PVRSRV_BRIDGE_RETURN))
++ {
++ PVRSRV_BRIDGE_RETURN* pReturn = (PVRSRV_BRIDGE_RETURN*)psBridgePackageKM->pvParamOut ;
++ pReturn->eError = eError;
++ }
++ }
++ err = 0;
++ goto unlock_and_return;
++ case PVRSRV_BRIDGE_SERVICES_TEST_MEM2:
++ {
++ PVRSRV_ERROR eError = MemTest2();
++ if (psBridgePackageKM->ui32OutBufferSize == sizeof(PVRSRV_BRIDGE_RETURN))
++ {
++ PVRSRV_BRIDGE_RETURN* pReturn = (PVRSRV_BRIDGE_RETURN*)psBridgePackageKM->pvParamOut ;
++ pReturn->eError = eError;
++ }
++ }
++ err = 0;
++ goto unlock_and_return;
++
++ case PVRSRV_BRIDGE_SERVICES_TEST_RESOURCE:
++ {
++ PVRSRV_ERROR eError = ResourceTest();
++ if (psBridgePackageKM->ui32OutBufferSize == sizeof(PVRSRV_BRIDGE_RETURN))
++ {
++ PVRSRV_BRIDGE_RETURN* pReturn = (PVRSRV_BRIDGE_RETURN*)psBridgePackageKM->pvParamOut ;
++ pReturn->eError = eError;
++ }
++ }
++ err = 0;
++ goto unlock_and_return;
++
++ case PVRSRV_BRIDGE_SERVICES_TEST_EVENTOBJECT:
++ {
++ PVRSRV_ERROR eError = EventObjectTest();
++ if (psBridgePackageKM->ui32OutBufferSize == sizeof(PVRSRV_BRIDGE_RETURN))
++ {
++ PVRSRV_BRIDGE_RETURN* pReturn = (PVRSRV_BRIDGE_RETURN*)psBridgePackageKM->pvParamOut ;
++ pReturn->eError = eError;
++ }
++ }
++ err = 0;
++ goto unlock_and_return;
++
++ case PVRSRV_BRIDGE_SERVICES_TEST_MEMMAPPING:
++ {
++ PVRSRV_ERROR eError = MemMappingTest();
++ if (psBridgePackageKM->ui32OutBufferSize == sizeof(PVRSRV_BRIDGE_RETURN))
++ {
++ PVRSRV_BRIDGE_RETURN* pReturn = (PVRSRV_BRIDGE_RETURN*)psBridgePackageKM->pvParamOut ;
++ pReturn->eError = eError;
++ }
++ }
++ err = 0;
++ goto unlock_and_return;
++
++ case PVRSRV_BRIDGE_SERVICES_TEST_PROCESSID:
++ {
++ PVRSRV_ERROR eError = ProcessIDTest();
++ if (psBridgePackageKM->ui32OutBufferSize == sizeof(PVRSRV_BRIDGE_RETURN))
++ {
++ PVRSRV_BRIDGE_RETURN* pReturn = (PVRSRV_BRIDGE_RETURN*)psBridgePackageKM->pvParamOut ;
++ pReturn->eError = eError;
++ }
++ }
++ err = 0;
++ goto unlock_and_return;
++
++ case PVRSRV_BRIDGE_SERVICES_TEST_CLOCKUSWAITUS:
++ {
++ PVRSRV_ERROR eError = ClockusWaitusTest();
++ if (psBridgePackageKM->ui32OutBufferSize == sizeof(PVRSRV_BRIDGE_RETURN))
++ {
++ PVRSRV_BRIDGE_RETURN* pReturn = (PVRSRV_BRIDGE_RETURN*)psBridgePackageKM->pvParamOut ;
++ pReturn->eError = eError;
++ }
++ }
++ err = 0;
++ goto unlock_and_return;
++
++ case PVRSRV_BRIDGE_SERVICES_TEST_TIMER:
++ {
++ PVRSRV_ERROR eError = TimerTest();
++ if (psBridgePackageKM->ui32OutBufferSize == sizeof(PVRSRV_BRIDGE_RETURN))
++ {
++ PVRSRV_BRIDGE_RETURN* pReturn = (PVRSRV_BRIDGE_RETURN*)psBridgePackageKM->pvParamOut ;
++ pReturn->eError = eError;
++ }
++ }
++ err = 0;
++ goto unlock_and_return;
++
++ case PVRSRV_BRIDGE_SERVICES_TEST_PRIVSRV:
++ {
++ PVRSRV_ERROR eError = PrivSrvTest();
++ if (psBridgePackageKM->ui32OutBufferSize == sizeof(PVRSRV_BRIDGE_RETURN))
++ {
++ PVRSRV_BRIDGE_RETURN* pReturn = (PVRSRV_BRIDGE_RETURN*)psBridgePackageKM->pvParamOut ;
++ pReturn->eError = eError;
++ }
++ }
++ err = 0;
++ goto unlock_and_return;
++ case PVRSRV_BRIDGE_SERVICES_TEST_COPYDATA:
++ {
++ IMG_UINT32 ui32PID;
++ PVRSRV_PER_PROCESS_DATA *psPerProc;
++ PVRSRV_ERROR eError;
++
++ ui32PID = OSGetCurrentProcessIDKM();
++
++ PVRSRVTrace("PVRSRV_BRIDGE_SERVICES_TEST_COPYDATA %d", ui32PID);
++
++ psPerProc = PVRSRVPerProcessData(ui32PID);
++
++ eError = CopyDataTest(psBridgePackageKM->pvParamIn, psBridgePackageKM->pvParamOut, psPerProc);
++
++ *(PVRSRV_ERROR*)psBridgePackageKM->pvParamOut = eError;
++ err = 0;
++ goto unlock_and_return;
++ }
++
++
++ case PVRSRV_BRIDGE_SERVICES_TEST_POWERMGMT:
++ {
++ PVRSRV_ERROR eError = PowerMgmtTest();
++ if (psBridgePackageKM->ui32OutBufferSize == sizeof(PVRSRV_BRIDGE_RETURN))
++ {
++ PVRSRV_BRIDGE_RETURN* pReturn = (PVRSRV_BRIDGE_RETURN*)psBridgePackageKM->pvParamOut ;
++ pReturn->eError = eError;
++ }
++ }
++ err = 0;
++ goto unlock_and_return;
++
++ }
++#endif
++
++ if(cmd != PVRSRV_BRIDGE_CONNECT_SERVICES)
++ {
++ PVRSRV_ERROR eError;
++
++ eError = PVRSRVLookupHandle(KERNEL_HANDLE_BASE,
++ (IMG_PVOID *)&psPerProc,
++ psBridgePackageKM->hKernelServices,
++ PVRSRV_HANDLE_TYPE_PERPROC_DATA);
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid kernel services handle (%d)",
++ __FUNCTION__, eError));
++ goto unlock_and_return;
++ }
++
++ if(psPerProc->ui32PID != ui32PID)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: Process %d tried to access data "
++ "belonging to process %d", __FUNCTION__, ui32PID,
++ psPerProc->ui32PID));
++ goto unlock_and_return;
++ }
++ }
++ else
++ {
++
++ psPerProc = PVRSRVPerProcessData(ui32PID);
++ if(psPerProc == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRV_BridgeDispatchKM: "
++ "Couldn't create per-process data area"));
++ goto unlock_and_return;
++ }
++ }
++
++ psBridgePackageKM->ui32BridgeID = PVRSRV_GET_BRIDGE_ID(psBridgePackageKM->ui32BridgeID);
++
++#if defined(PVR_SECURE_FD_EXPORT)
++ switch(cmd)
++ {
++ case PVRSRV_BRIDGE_EXPORT_DEVICEMEM:
++ {
++ PVRSRV_FILE_PRIVATE_DATA *psPrivateData = PRIVATE_DATA(pFile);
++
++ if(psPrivateData->hKernelMemInfo)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: Can only export one MemInfo "
++ "per file descriptor", __FUNCTION__));
++ err = -EINVAL;
++ goto unlock_and_return;
++ }
++ break;
++ }
++
++ case PVRSRV_BRIDGE_MAP_DEV_MEMORY:
++ {
++ PVRSRV_BRIDGE_IN_MAP_DEV_MEMORY *psMapDevMemIN =
++ (PVRSRV_BRIDGE_IN_MAP_DEV_MEMORY *)psBridgePackageKM->pvParamIn;
++ PVRSRV_FILE_PRIVATE_DATA *psPrivateData = PRIVATE_DATA(pFile);
++
++ if(!psPrivateData->hKernelMemInfo)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: File descriptor has no "
++ "associated MemInfo handle", __FUNCTION__));
++ err = -EINVAL;
++ goto unlock_and_return;
++ }
++
++ psMapDevMemIN->hKernelMemInfo = psPrivateData->hKernelMemInfo;
++ break;
++ }
++
++ default:
++ {
++ PVRSRV_FILE_PRIVATE_DATA *psPrivateData = PRIVATE_DATA(pFile);
++
++ if(psPrivateData->hKernelMemInfo)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: Import/Export handle tried "
++ "to use privileged service", __FUNCTION__));
++ goto unlock_and_return;
++ }
++ break;
++ }
++ }
++#endif
++#if defined(SUPPORT_DRI_DRM) && defined(PVR_SECURE_DRM_AUTH_EXPORT)
++ switch(cmd)
++ {
++ case PVRSRV_BRIDGE_MAP_DEV_MEMORY:
++ case PVRSRV_BRIDGE_MAP_DEVICECLASS_MEMORY:
++ {
++ PVRSRV_FILE_PRIVATE_DATA *psPrivateData;
++ int authenticated = pFile->authenticated;
++ PVRSRV_ENV_PER_PROCESS_DATA *psEnvPerProc;
++
++ if (authenticated)
++ {
++ break;
++ }
++
++
++ psEnvPerProc = (PVRSRV_ENV_PER_PROCESS_DATA *)PVRSRVProcessPrivateData(psPerProc);
++ if (psEnvPerProc == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: Process private data not allocated", __FUNCTION__));
++ err = -EFAULT;
++ goto unlock_and_return;
++ }
++
++ list_for_each_entry(psPrivateData, &psEnvPerProc->sDRMAuthListHead, sDRMAuthListItem)
++ {
++ struct drm_file *psDRMFile = psPrivateData->psDRMFile;
++
++ if (pFile->master == psDRMFile->master)
++ {
++ authenticated |= psDRMFile->authenticated;
++ if (authenticated)
++ {
++ break;
++ }
++ }
++ }
++
++ if (!authenticated)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: Not authenticated for mapping device or device class memory", __FUNCTION__));
++ err = -EPERM;
++ goto unlock_and_return;
++ }
++ break;
++ }
++ default:
++ break;
++ }
++#endif
++
++ err = BridgedDispatchKM(psPerProc, psBridgePackageKM);
++ if(err != PVRSRV_OK)
++ goto unlock_and_return;
++
++ switch(cmd)
++ {
++#if defined(PVR_SECURE_FD_EXPORT)
++ case PVRSRV_BRIDGE_EXPORT_DEVICEMEM:
++ {
++ PVRSRV_BRIDGE_OUT_EXPORTDEVICEMEM *psExportDeviceMemOUT =
++ (PVRSRV_BRIDGE_OUT_EXPORTDEVICEMEM *)psBridgePackageKM->pvParamOut;
++ PVRSRV_FILE_PRIVATE_DATA *psPrivateData = PRIVATE_DATA(pFile);
++
++ psPrivateData->hKernelMemInfo = psExportDeviceMemOUT->hMemInfo;
++#if defined(SUPPORT_MEMINFO_IDS)
++ psExportDeviceMemOUT->ui64Stamp = psPrivateData->ui64Stamp = ++ui64Stamp;
++#endif
++ break;
++ }
++#endif
++
++#if defined(SUPPORT_MEMINFO_IDS)
++ case PVRSRV_BRIDGE_MAP_DEV_MEMORY:
++ {
++ PVRSRV_BRIDGE_OUT_MAP_DEV_MEMORY *psMapDeviceMemoryOUT =
++ (PVRSRV_BRIDGE_OUT_MAP_DEV_MEMORY *)psBridgePackageKM->pvParamOut;
++ PVRSRV_FILE_PRIVATE_DATA *psPrivateData = PRIVATE_DATA(pFile);
++ psMapDeviceMemoryOUT->sDstClientMemInfo.ui64Stamp = psPrivateData->ui64Stamp;
++ break;
++ }
++
++ case PVRSRV_BRIDGE_MAP_DEVICECLASS_MEMORY:
++ {
++ PVRSRV_BRIDGE_OUT_MAP_DEVICECLASS_MEMORY *psDeviceClassMemoryOUT =
++ (PVRSRV_BRIDGE_OUT_MAP_DEVICECLASS_MEMORY *)psBridgePackageKM->pvParamOut;
++ psDeviceClassMemoryOUT->sClientMemInfo.ui64Stamp = ++ui64Stamp;
++ break;
++ }
++#endif
++
++ default:
++ break;
++ }
++
++unlock_and_return:
++ LinuxUnLockMutex(&gPVRSRVLock);
++ return err;
++}
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/pvr_debug.c b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/pvr_debug.c
+new file mode 100644
+index 0000000..dbd54b1
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/pvr_debug.c
+@@ -0,0 +1,426 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++
++#ifndef AUTOCONF_INCLUDED
++ #include <linux/config.h>
++#endif
++
++#include <asm/io.h>
++#include <asm/uaccess.h>
++#include <linux/kernel.h>
++#include <linux/hardirq.h>
++#include <linux/module.h>
++#include <linux/spinlock.h>
++#include <linux/tty.h>
++#include <stdarg.h>
++#include "img_types.h"
++#include "servicesext.h"
++#include "pvr_debug.h"
++#include "proc.h"
++#include "mutex.h"
++#include "linkage.h"
++
++#if defined(PVRSRV_NEED_PVR_DPF)
++
++#define PVR_MAX_FILEPATH_LEN 256
++
++static IMG_UINT32 gPVRDebugLevel = DBGPRIV_WARNING;
++
++#endif
++
++#define PVR_MAX_MSG_LEN PVR_MAX_DEBUG_MESSAGE_LEN
++
++static IMG_CHAR gszBufferNonIRQ[PVR_MAX_MSG_LEN + 1];
++
++static IMG_CHAR gszBufferIRQ[PVR_MAX_MSG_LEN + 1];
++
++static PVRSRV_LINUX_MUTEX gsDebugMutexNonIRQ;
++
++static spinlock_t gsDebugLockIRQ = SPIN_LOCK_UNLOCKED;
++
++#define USE_SPIN_LOCK (in_interrupt() || !preemptible())
++
++static inline void GetBufferLock(unsigned long *pulLockFlags)
++{
++ if (USE_SPIN_LOCK)
++ {
++ spin_lock_irqsave(&gsDebugLockIRQ, *pulLockFlags);
++ }
++ else
++ {
++ LinuxLockMutex(&gsDebugMutexNonIRQ);
++ }
++}
++
++static inline void ReleaseBufferLock(unsigned long ulLockFlags)
++{
++ if (USE_SPIN_LOCK)
++ {
++ spin_unlock_irqrestore(&gsDebugLockIRQ, ulLockFlags);
++ }
++ else
++ {
++ LinuxUnLockMutex(&gsDebugMutexNonIRQ);
++ }
++}
++
++static inline void SelectBuffer(IMG_CHAR **ppszBuf, IMG_UINT32 *pui32BufSiz)
++{
++ if (USE_SPIN_LOCK)
++ {
++ *ppszBuf = gszBufferIRQ;
++ *pui32BufSiz = sizeof(gszBufferIRQ);
++ }
++ else
++ {
++ *ppszBuf = gszBufferNonIRQ;
++ *pui32BufSiz = sizeof(gszBufferNonIRQ);
++ }
++}
++
++static IMG_BOOL VBAppend(IMG_CHAR *pszBuf, IMG_UINT32 ui32BufSiz, const IMG_CHAR* pszFormat, va_list VArgs)
++{
++ IMG_UINT32 ui32Used;
++ IMG_UINT32 ui32Space;
++ IMG_INT32 i32Len;
++
++ ui32Used = strlen(pszBuf);
++ BUG_ON(ui32Used >= ui32BufSiz);
++ ui32Space = ui32BufSiz - ui32Used;
++
++ i32Len = vsnprintf(&pszBuf[ui32Used], ui32Space, pszFormat, VArgs);
++ pszBuf[ui32BufSiz - 1] = 0;
++
++
++ return (i32Len < 0 || i32Len >= ui32Space);
++}
++
++IMG_VOID PVRDPFInit(IMG_VOID)
++{
++ LinuxInitMutex(&gsDebugMutexNonIRQ);
++}
++
++IMG_VOID PVRSRVReleasePrintf(const IMG_CHAR *pszFormat, ...)
++{
++ va_list vaArgs;
++ unsigned long ulLockFlags = 0;
++ IMG_CHAR *pszBuf;
++ IMG_UINT32 ui32BufSiz;
++
++ SelectBuffer(&pszBuf, &ui32BufSiz);
++
++ va_start(vaArgs, pszFormat);
++
++ GetBufferLock(&ulLockFlags);
++ strncpy (pszBuf, "PVR_K: ", (ui32BufSiz -1));
++
++ if (VBAppend(pszBuf, ui32BufSiz, pszFormat, vaArgs))
++ {
++ printk(KERN_INFO "PVR_K:(Message Truncated): %s\n", pszBuf);
++ }
++ else
++ {
++ printk(KERN_INFO "%s\n", pszBuf);
++ }
++
++ ReleaseBufferLock(ulLockFlags);
++ va_end(vaArgs);
++
++}
++
++#if defined(PVRSRV_NEED_PVR_ASSERT)
++
++IMG_VOID PVRSRVDebugAssertFail(const IMG_CHAR* pszFile, IMG_UINT32 uLine)
++{
++ PVRSRVDebugPrintf(DBGPRIV_FATAL, pszFile, uLine, "Debug assertion failed!");
++ BUG();
++}
++
++#endif
++
++#if defined(PVRSRV_NEED_PVR_TRACE)
++
++IMG_VOID PVRSRVTrace(const IMG_CHAR* pszFormat, ...)
++{
++ va_list VArgs;
++ unsigned long ulLockFlags = 0;
++ IMG_CHAR *pszBuf;
++ IMG_UINT32 ui32BufSiz;
++
++ SelectBuffer(&pszBuf, &ui32BufSiz);
++
++ va_start(VArgs, pszFormat);
++
++ GetBufferLock(&ulLockFlags);
++
++ strncpy(pszBuf, "PVR: ", (ui32BufSiz -1));
++
++ if (VBAppend(pszBuf, ui32BufSiz, pszFormat, VArgs))
++ {
++ printk(KERN_INFO "PVR_K:(Message Truncated): %s\n", pszBuf);
++ }
++ else
++ {
++ printk(KERN_INFO "%s\n", pszBuf);
++ }
++
++ ReleaseBufferLock(ulLockFlags);
++
++ va_end(VArgs);
++}
++
++#endif
++
++#if defined(PVRSRV_NEED_PVR_DPF)
++
++static IMG_BOOL BAppend(IMG_CHAR *pszBuf, IMG_UINT32 ui32BufSiz, const IMG_CHAR *pszFormat, ...)
++{
++ va_list VArgs;
++ IMG_BOOL bTrunc;
++
++ va_start (VArgs, pszFormat);
++
++ bTrunc = VBAppend(pszBuf, ui32BufSiz, pszFormat, VArgs);
++
++ va_end (VArgs);
++
++ return bTrunc;
++}
++
++IMG_VOID PVRSRVDebugPrintf (
++ IMG_UINT32 ui32DebugLevel,
++ const IMG_CHAR* pszFullFileName,
++ IMG_UINT32 ui32Line,
++ const IMG_CHAR* pszFormat,
++ ...
++ )
++{
++ IMG_BOOL bTrace, bDebug;
++ const IMG_CHAR *pszFileName = pszFullFileName;
++ IMG_CHAR *pszLeafName;
++
++ bTrace = gPVRDebugLevel & ui32DebugLevel & DBGPRIV_CALLTRACE;
++ bDebug = ((gPVRDebugLevel & DBGPRIV_ALLLEVELS) >= ui32DebugLevel);
++
++ if (bTrace || bDebug)
++ {
++ va_list vaArgs;
++ unsigned long ulLockFlags = 0;
++ IMG_CHAR *pszBuf;
++ IMG_UINT32 ui32BufSiz;
++
++ SelectBuffer(&pszBuf, &ui32BufSiz);
++
++ va_start(vaArgs, pszFormat);
++
++ GetBufferLock(&ulLockFlags);
++
++
++ if (bDebug)
++ {
++ switch(ui32DebugLevel)
++ {
++ case DBGPRIV_FATAL:
++ {
++ strncpy (pszBuf, "PVR_K:(Fatal): ", (ui32BufSiz -1));
++ break;
++ }
++ case DBGPRIV_ERROR:
++ {
++ strncpy (pszBuf, "PVR_K:(Error): ", (ui32BufSiz -1));
++ break;
++ }
++ case DBGPRIV_WARNING:
++ {
++ strncpy (pszBuf, "PVR_K:(Warning): ", (ui32BufSiz -1));
++ break;
++ }
++ case DBGPRIV_MESSAGE:
++ {
++ strncpy (pszBuf, "PVR_K:(Message): ", (ui32BufSiz -1));
++ break;
++ }
++ case DBGPRIV_VERBOSE:
++ {
++ strncpy (pszBuf, "PVR_K:(Verbose): ", (ui32BufSiz -1));
++ break;
++ }
++ default:
++ {
++ strncpy (pszBuf, "PVR_K:(Unknown message level)", (ui32BufSiz -1));
++ break;
++ }
++ }
++ }
++ else
++ {
++ strncpy (pszBuf, "PVR_K: ", (ui32BufSiz -1));
++ }
++
++ if (VBAppend(pszBuf, ui32BufSiz, pszFormat, vaArgs))
++ {
++ printk(KERN_INFO "PVR_K:(Message Truncated): %s\n", pszBuf);
++ }
++ else
++ {
++
++ if (!bTrace)
++ {
++#ifdef DEBUG_LOG_PATH_TRUNCATE
++
++ static IMG_CHAR szFileNameRewrite[PVR_MAX_FILEPATH_LEN];
++
++ IMG_CHAR* pszTruncIter;
++ IMG_CHAR* pszTruncBackInter;
++
++
++ pszFileName = pszFullFileName + strlen(DEBUG_LOG_PATH_TRUNCATE)+1;
++
++
++ strncpy(szFileNameRewrite, pszFileName,PVR_MAX_FILEPATH_LEN);
++
++ if(strlen(szFileNameRewrite) == PVR_MAX_FILEPATH_LEN-1) {
++ IMG_CHAR szTruncateMassage[] = "FILENAME TRUNCATED";
++ strcpy(szFileNameRewrite + (PVR_MAX_FILEPATH_LEN - 1 - strlen(szTruncateMassage)), szTruncateMassage);
++ }
++
++ pszTruncIter = szFileNameRewrite;
++ while(*pszTruncIter++ != 0)
++ {
++ IMG_CHAR* pszNextStartPoint;
++
++ if(
++ !( ( *pszTruncIter == '/' && (pszTruncIter-4 >= szFileNameRewrite) ) &&
++ ( *(pszTruncIter-1) == '.') &&
++ ( *(pszTruncIter-2) == '.') &&
++ ( *(pszTruncIter-3) == '/') )
++ ) continue;
++
++
++ pszTruncBackInter = pszTruncIter - 3;
++ while(*(--pszTruncBackInter) != '/')
++ {
++ if(pszTruncBackInter <= szFileNameRewrite) break;
++ }
++ pszNextStartPoint = pszTruncBackInter;
++
++
++ while(*pszTruncIter != 0)
++ {
++ *pszTruncBackInter++ = *pszTruncIter++;
++ }
++ *pszTruncBackInter = 0;
++
++
++ pszTruncIter = pszNextStartPoint;
++ }
++
++ pszFileName = szFileNameRewrite;
++
++ if(*pszFileName == '/') pszFileName++;
++#endif
++
++#if !defined(__sh__)
++ pszLeafName = (IMG_CHAR *)strrchr (pszFileName, '\\');
++
++ if (pszLeafName)
++ {
++ pszFileName = pszLeafName;
++ }
++#endif
++
++ if (BAppend(pszBuf, ui32BufSiz, " [%lu, %s]", ui32Line, pszFileName))
++ {
++ printk(KERN_INFO "PVR_K:(Message Truncated): %s\n", pszBuf);
++ }
++ else
++ {
++ printk(KERN_INFO "%s\n", pszBuf);
++ }
++ }
++ else
++ {
++ printk(KERN_INFO "%s\n", pszBuf);
++ }
++ }
++
++ ReleaseBufferLock(ulLockFlags);
++
++ va_end (vaArgs);
++ }
++}
++
++#endif
++
++#if defined(DEBUG)
++
++IMG_VOID PVRDebugSetLevel(IMG_UINT32 uDebugLevel)
++{
++ printk(KERN_INFO "PVR: Setting Debug Level = 0x%x\n",(IMG_UINT)uDebugLevel);
++
++ gPVRDebugLevel = uDebugLevel;
++}
++
++IMG_INT PVRDebugProcSetLevel(struct file *file, const IMG_CHAR *buffer, IMG_UINT32 count, IMG_VOID *data)
++{
++#define _PROC_SET_BUFFER_SZ 2
++ IMG_CHAR data_buffer[_PROC_SET_BUFFER_SZ];
++
++ if (count != _PROC_SET_BUFFER_SZ)
++ {
++ return -EINVAL;
++ }
++ else
++ {
++ if (copy_from_user(data_buffer, buffer, count))
++ return -EINVAL;
++ if (data_buffer[count - 1] != '\n')
++ return -EINVAL;
++ PVRDebugSetLevel(data_buffer[0] - '0');
++ }
++ return (count);
++}
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++void ProcSeqShowDebugLevel(struct seq_file *sfile,void* el)
++{
++ seq_printf(sfile, "%lu\n", gPVRDebugLevel);
++}
++
++#else
++IMG_INT PVRDebugProcGetLevel(IMG_CHAR *page, IMG_CHAR **start, off_t off, IMG_INT count, IMG_INT *eof, IMG_VOID *data)
++{
++ if (off == 0) {
++ *start = (IMG_CHAR *)1;
++ return printAppend(page, count, 0, "%lu\n", gPVRDebugLevel);
++ }
++ *eof = 1;
++ return 0;
++}
++#endif
++
++#endif
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/pvr_drm.c b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/pvr_drm.c
+new file mode 100644
+index 0000000..9fa678d
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/pvr_drm.c
+@@ -0,0 +1,310 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if defined(SUPPORT_DRI_DRM)
++
++#ifndef AUTOCONF_INCLUDED
++ #include <linux/config.h>
++#endif
++
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/version.h>
++#include <linux/fs.h>
++#include <linux/proc_fs.h>
++#include <asm/ioctl.h>
++#include <drm/drmP.h>
++#include <drm/drm.h>
++
++#include "img_defs.h"
++#include "services.h"
++#include "kerneldisplay.h"
++#include "kernelbuffer.h"
++#include "syscommon.h"
++#include "pvrmmap.h"
++#include "mm.h"
++#include "mmap.h"
++#include "mutex.h"
++#include "pvr_debug.h"
++#include "srvkm.h"
++#include "perproc.h"
++#include "handle.h"
++#include "pvr_bridge_km.h"
++#include "pvr_bridge.h"
++#include "proc.h"
++#include "pvrmodule.h"
++#include "pvrversion.h"
++#include "lock.h"
++#include "linkage.h"
++#include "pvr_drm_shared.h"
++#include "pvr_drm.h"
++
++#define MAKENAME_HELPER(x, y) x ## y
++#define MAKENAME(x, y) MAKENAME_HELPER(x, y)
++
++#define PVR_DRM_NAME "pvrsrvkm"
++#define PVR_DRM_DESC "Imagination Technologies PVR DRM"
++
++#define PVR_PCI_IDS \
++ {SYS_SGX_DEV_VENDOR_ID, SYS_SGX_DEV_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++ {0, 0, 0}
++
++struct pci_dev *gpsPVRLDMDev;
++struct drm_device *gpsPVRDRMDev;
++
++#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,24))
++#error "Linux kernel version 2.6.25 or later required for PVR DRM support"
++#endif
++
++#define PVR_DRM_FILE struct drm_file *
++
++#if !defined(SUPPORT_DRI_DRM_EXT)
++static struct pci_device_id asPciIdList[] = {
++ PVR_PCI_IDS
++};
++#endif
++
++IMG_INT PVRSRVDrmLoad(struct drm_device *dev, unsigned long flags)
++{
++ IMG_INT iRes;
++
++ PVR_TRACE(("PVRSRVDrmLoad"));
++
++ gpsPVRDRMDev = dev;
++ gpsPVRLDMDev = dev->pdev;
++
++#if defined(PDUMP)
++ iRes = dbgdrv_init();
++ if (iRes != 0)
++ {
++ return iRes;
++ }
++#endif
++
++ iRes = PVRCore_Init();
++ if (iRes != 0)
++ {
++ goto exit_dbgdrv_cleanup;
++ }
++
++#if defined(DISPLAY_CONTROLLER)
++ iRes = PVR_DRM_MAKENAME(DISPLAY_CONTROLLER, _Init)(dev);
++ if (iRes != 0)
++ {
++ goto exit_pvrcore_cleanup;
++ }
++#endif
++ return 0;
++
++#if defined(DISPLAY_CONTROLLER)
++exit_pvrcore_cleanup:
++ PVRCore_Cleanup();
++#endif
++exit_dbgdrv_cleanup:
++#if defined(PDUMP)
++ dbgdrv_cleanup();
++#endif
++ return iRes;
++}
++
++IMG_INT PVRSRVDrmUnload(struct drm_device *dev)
++{
++ PVR_TRACE(("PVRSRVDrmUnload"));
++
++#if defined(DISPLAY_CONTROLLER)
++ PVR_DRM_MAKENAME(DISPLAY_CONTROLLER, _Cleanup)(dev);
++#endif
++
++ PVRCore_Cleanup();
++
++#if defined(PDUMP)
++ dbgdrv_cleanup();
++#endif
++
++ return 0;
++}
++
++IMG_INT PVRSRVDrmOpen(struct drm_device *dev, struct drm_file *file)
++{
++ return PVRSRVOpen(dev, file);
++}
++
++IMG_VOID PVRSRVDrmPostClose(struct drm_device *dev, struct drm_file *file)
++{
++ PVRSRVRelease(dev, file);
++}
++
++DRI_DRM_STATIC IMG_INT
++PVRDRMIsMaster(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile)
++{
++ return 0;
++}
++
++#if defined(SUPPORT_DRI_DRM_EXT)
++IMG_INT
++PVRDRM_Dummy_ioctl(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile)
++{
++ return 0;
++}
++#endif
++
++static IMG_INT
++PVRDRMPCIBusIDField(struct drm_device *dev, IMG_UINT32 *pui32Field, IMG_UINT32 ui32FieldType)
++{
++ struct pci_dev *psPCIDev = (struct pci_dev *)dev->pdev;
++
++ switch (ui32FieldType)
++ {
++ case PVR_DRM_PCI_DOMAIN:
++ *pui32Field = pci_domain_nr(psPCIDev->bus);
++ break;
++
++ case PVR_DRM_PCI_BUS:
++ *pui32Field = psPCIDev->bus->number;
++ break;
++
++ case PVR_DRM_PCI_DEV:
++ *pui32Field = PCI_SLOT(psPCIDev->devfn);
++ break;
++
++ case PVR_DRM_PCI_FUNC:
++ *pui32Field = PCI_FUNC(psPCIDev->devfn);
++ break;
++
++ default:
++ return -EFAULT;
++ }
++
++ return 0;
++}
++
++DRI_DRM_STATIC IMG_INT
++PVRDRMUnprivCmd(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile)
++{
++ IMG_UINT32 *pui32Args = (IMG_UINT32 *)arg;
++ IMG_UINT32 ui32Cmd = pui32Args[0];
++ IMG_UINT32 ui32Arg1 = pui32Args[1];
++ IMG_UINT32 *pui32OutArg = (IMG_UINT32 *)arg;
++ IMG_INT ret = 0;
++
++ LinuxLockMutex(&gPVRSRVLock);
++
++ switch (ui32Cmd)
++ {
++ case PVR_DRM_UNPRIV_INIT_SUCCESFUL:
++ *pui32OutArg = PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_SUCCESSFUL) ? 1 : 0;
++ break;
++
++ case PVR_DRM_UNPRIV_BUSID_TYPE:
++ *pui32OutArg = PVR_DRM_BUS_TYPE_PCI;
++ break;
++
++ case PVR_DRM_UNPRIV_BUSID_FIELD:
++ ret = PVRDRMPCIBusIDField(dev, pui32OutArg, ui32Arg1);
++
++ default:
++ ret = -EFAULT;
++ }
++
++ LinuxUnLockMutex(&gPVRSRVLock);
++
++ return ret;
++}
++
++#if 0
++struct drm_ioctl_desc sPVRDrmIoctls[] = {
++ DRM_IOCTL_DEF(PVR_DRM_SRVKM_IOCTL, PVRSRV_BridgeDispatchKM, 0),
++ DRM_IOCTL_DEF(PVR_DRM_IS_MASTER_IOCTL, PVRDRMIsMaster, DRM_MASTER),
++ DRM_IOCTL_DEF(PVR_DRM_UNPRIV_IOCTL, PVRDRMUnprivCmd, 0),
++#if defined(PDUMP)
++ DRM_IOCTL_DEF(PVR_DRM_DBGDRV_IOCTL, dbgdrv_ioctl, 0),
++#endif
++};
++
++static IMG_INT pvr_max_ioctl = DRM_ARRAY_SIZE(sPVRDrmIoctls);
++
++static struct drm_driver sPVRDrmDriver =
++{
++ .driver_features = 0,
++ .dev_priv_size = sizeof(sPVRDrmBuffer),
++ .load = PVRSRVDrmLoad,
++ .unload = PVRSRVDrmUnload,
++ .open = PVRSRVDrmOpen,
++ .postclose = PVRSRVDrmPostClose,
++ .suspend = PVRSRVDriverSuspend,
++ .resume = PVRSRVDriverResume,
++ .get_map_ofs = drm_core_get_map_ofs,
++ .get_reg_ofs = drm_core_get_reg_ofs,
++ .ioctls = sPVRDrmIoctls,
++ .fops =
++ {
++ .owner = THIS_MODULE,
++ .open = drm_open,
++ .release = drm_release,
++ .ioctl = drm_ioctl,
++ .mmap = PVRMMap,
++ .poll = drm_poll,
++ .fasync = drm_fasync,
++ },
++ .pci_driver =
++ {
++ .name = PVR_DRM_NAME,
++ .id_table = asPciIdList,
++ },
++
++ .name = PVR_DRM_NAME,
++ .desc = PVR_DRM_DESC,
++ .date = PVR_BUILD_DATE,
++ .major = PVRVERSION_MAJ,
++ .minor = PVRVERSION_MIN,
++ .patchlevel = PVRVERSION_BUILD,
++};
++
++static IMG_INT __init PVRSRVDrmInit(IMG_VOID)
++{
++ IMG_INT iRes;
++ sPVRDrmDriver.num_ioctls = pvr_max_ioctl;
++
++
++ PVRDPFInit();
++
++ iRes = drm_init(&sPVRDrmDriver);
++
++ return iRes;
++}
++
++static IMG_VOID __exit PVRSRVDrmExit(IMG_VOID)
++{
++ drm_exit(&sPVRDrmDriver);
++}
++
++module_init(PVRSRVDrmInit);
++module_exit(PVRSRVDrmExit);
++#endif
++#endif
++
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/pvr_drm.h b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/pvr_drm.h
+new file mode 100644
+index 0000000..fd8c81d
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/pvr_drm.h
+@@ -0,0 +1,80 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined(__PVR_DRM_H__)
++#define __PVR_DRM_H__
++
++#include "pvr_drm_shared.h"
++
++#if defined(SUPPORT_DRI_DRM)
++#define PVR_DRM_MAKENAME_HELPER(x, y) x ## y
++#define PVR_DRM_MAKENAME(x, y) PVR_DRM_MAKENAME_HELPER(x, y)
++
++IMG_INT PVRCore_Init(IMG_VOID);
++IMG_VOID PVRCore_Cleanup(IMG_VOID);
++IMG_INT PVRSRVOpen(struct drm_device *dev, struct drm_file *pFile);
++IMG_INT PVRSRVRelease(struct drm_device *dev, struct drm_file *pFile);
++IMG_INT PVRSRVDriverSuspend(struct drm_device *pDevice, pm_message_t state);
++IMG_INT PVRSRVDriverResume(struct drm_device *pDevice);
++
++IMG_INT PVRSRV_BridgeDispatchKM(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile);
++
++#if defined(SUPPORT_DRI_DRM_EXT)
++#define DRI_DRM_STATIC
++IMG_INT PVRSRVDrmLoad(struct drm_device *dev, unsigned long flags);
++IMG_INT PVRSRVDrmUnload(struct drm_device *dev);
++IMG_INT PVRSRVDrmOpen(struct drm_device *dev, struct drm_file *file);
++IMG_VOID PVRSRVDrmPostClose(struct drm_device *dev, struct drm_file *file);
++IMG_INT PVRDRMIsMaster(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile);
++IMG_INT PVRDRMUnprivCmd(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile);
++IMG_INT PVRDRM_Dummy_ioctl(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile);
++#else
++#define DRI_DRM_STATIC static
++#endif
++
++#if defined(DISPLAY_CONTROLLER)
++extern int PVR_DRM_MAKENAME(DISPLAY_CONTROLLER, _Init)(struct drm_device *);
++extern void PVR_DRM_MAKENAME(DISPLAY_CONTROLLER, _Cleanup)(struct drm_device *);
++#endif
++
++#if defined(PDUMP)
++int dbgdrv_init(void);
++void dbgdrv_cleanup(void);
++IMG_INT dbgdrv_ioctl(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile);
++#endif
++
++#if !defined(SUPPORT_DRI_DRM_EXT)
++#define PVR_DRM_SRVKM_IOCTL _IO(0, PVR_DRM_SRVKM_CMD)
++#define PVR_DRM_IS_MASTER_IOCTL _IO(0, PVR_DRM_IS_MASTER_CMD)
++#define PVR_DRM_UNPRIV_IOCTL _IO(0, PVR_DRM_UNPRIV_CMD)
++#define PVR_DRM_DBGDRV_IOCTL _IO(0, PVR_DRM_DBGDRV_CMD)
++#endif
++
++#endif
++
++#endif
++
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/hwdefs/sgx535defs.h b/drivers/gpu/drm/mrst/pvr/services4/srvkm/hwdefs/sgx535defs.h
+new file mode 100644
+index 0000000..a683e9b
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/hwdefs/sgx535defs.h
+@@ -0,0 +1,637 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _SGX535DEFS_KM_H_
++#define _SGX535DEFS_KM_H_
++
++#define EUR_CR_CLKGATECTL 0x0000
++#define EUR_CR_CLKGATECTL_2D_CLKG_MASK 0x00000003UL
++#define EUR_CR_CLKGATECTL_2D_CLKG_SHIFT 0
++#define EUR_CR_CLKGATECTL_ISP_CLKG_MASK 0x00000030UL
++#define EUR_CR_CLKGATECTL_ISP_CLKG_SHIFT 4
++#define EUR_CR_CLKGATECTL_TSP_CLKG_MASK 0x00000300UL
++#define EUR_CR_CLKGATECTL_TSP_CLKG_SHIFT 8
++#define EUR_CR_CLKGATECTL_TA_CLKG_MASK 0x00003000UL
++#define EUR_CR_CLKGATECTL_TA_CLKG_SHIFT 12
++#define EUR_CR_CLKGATECTL_DPM_CLKG_MASK 0x00030000UL
++#define EUR_CR_CLKGATECTL_DPM_CLKG_SHIFT 16
++#define EUR_CR_CLKGATECTL_USE_CLKG_MASK 0x00300000UL
++#define EUR_CR_CLKGATECTL_USE_CLKG_SHIFT 20
++#define EUR_CR_CLKGATECTL_AUTO_MAN_REG_MASK 0x01000000UL
++#define EUR_CR_CLKGATECTL_AUTO_MAN_REG_SHIFT 24
++#define EUR_CR_CLKGATESTATUS 0x0004
++#define EUR_CR_CLKGATESTATUS_2D_CLKS_MASK 0x00000001UL
++#define EUR_CR_CLKGATESTATUS_2D_CLKS_SHIFT 0
++#define EUR_CR_CLKGATESTATUS_ISP_CLKS_MASK 0x00000010UL
++#define EUR_CR_CLKGATESTATUS_ISP_CLKS_SHIFT 4
++#define EUR_CR_CLKGATESTATUS_TSP_CLKS_MASK 0x00000100UL
++#define EUR_CR_CLKGATESTATUS_TSP_CLKS_SHIFT 8
++#define EUR_CR_CLKGATESTATUS_TA_CLKS_MASK 0x00001000UL
++#define EUR_CR_CLKGATESTATUS_TA_CLKS_SHIFT 12
++#define EUR_CR_CLKGATESTATUS_DPM_CLKS_MASK 0x00010000UL
++#define EUR_CR_CLKGATESTATUS_DPM_CLKS_SHIFT 16
++#define EUR_CR_CLKGATESTATUS_USE_CLKS_MASK 0x00100000UL
++#define EUR_CR_CLKGATESTATUS_USE_CLKS_SHIFT 20
++#define EUR_CR_CLKGATECTLOVR 0x0008
++#define EUR_CR_CLKGATECTLOVR_2D_CLKO_MASK 0x00000003UL
++#define EUR_CR_CLKGATECTLOVR_2D_CLKO_SHIFT 0
++#define EUR_CR_CLKGATECTLOVR_ISP_CLKO_MASK 0x00000030UL
++#define EUR_CR_CLKGATECTLOVR_ISP_CLKO_SHIFT 4
++#define EUR_CR_CLKGATECTLOVR_TSP_CLKO_MASK 0x00000300UL
++#define EUR_CR_CLKGATECTLOVR_TSP_CLKO_SHIFT 8
++#define EUR_CR_CLKGATECTLOVR_TA_CLKO_MASK 0x00003000UL
++#define EUR_CR_CLKGATECTLOVR_TA_CLKO_SHIFT 12
++#define EUR_CR_CLKGATECTLOVR_DPM_CLKO_MASK 0x00030000UL
++#define EUR_CR_CLKGATECTLOVR_DPM_CLKO_SHIFT 16
++#define EUR_CR_CLKGATECTLOVR_USE_CLKO_MASK 0x00300000UL
++#define EUR_CR_CLKGATECTLOVR_USE_CLKO_SHIFT 20
++#define EUR_CR_CORE_ID 0x0010
++#define EUR_CR_CORE_ID_CONFIG_MASK 0x0000FFFFUL
++#define EUR_CR_CORE_ID_CONFIG_SHIFT 0
++#define EUR_CR_CORE_ID_ID_MASK 0xFFFF0000UL
++#define EUR_CR_CORE_ID_ID_SHIFT 16
++#define EUR_CR_CORE_REVISION 0x0014
++#define EUR_CR_CORE_REVISION_MAINTENANCE_MASK 0x000000FFUL
++#define EUR_CR_CORE_REVISION_MAINTENANCE_SHIFT 0
++#define EUR_CR_CORE_REVISION_MINOR_MASK 0x0000FF00UL
++#define EUR_CR_CORE_REVISION_MINOR_SHIFT 8
++#define EUR_CR_CORE_REVISION_MAJOR_MASK 0x00FF0000UL
++#define EUR_CR_CORE_REVISION_MAJOR_SHIFT 16
++#define EUR_CR_CORE_REVISION_DESIGNER_MASK 0xFF000000UL
++#define EUR_CR_CORE_REVISION_DESIGNER_SHIFT 24
++#define EUR_CR_DESIGNER_REV_FIELD1 0x0018
++#define EUR_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_MASK 0xFFFFFFFFUL
++#define EUR_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_SHIFT 0
++#define EUR_CR_DESIGNER_REV_FIELD2 0x001C
++#define EUR_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_MASK 0xFFFFFFFFUL
++#define EUR_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_SHIFT 0
++#define EUR_CR_SOFT_RESET 0x0080
++#define EUR_CR_SOFT_RESET_BIF_RESET_MASK 0x00000001UL
++#define EUR_CR_SOFT_RESET_BIF_RESET_SHIFT 0
++#define EUR_CR_SOFT_RESET_TWOD_RESET_MASK 0x00000002UL
++#define EUR_CR_SOFT_RESET_TWOD_RESET_SHIFT 1
++#define EUR_CR_SOFT_RESET_DPM_RESET_MASK 0x00000004UL
++#define EUR_CR_SOFT_RESET_DPM_RESET_SHIFT 2
++#define EUR_CR_SOFT_RESET_TA_RESET_MASK 0x00000008UL
++#define EUR_CR_SOFT_RESET_TA_RESET_SHIFT 3
++#define EUR_CR_SOFT_RESET_USE_RESET_MASK 0x00000010UL
++#define EUR_CR_SOFT_RESET_USE_RESET_SHIFT 4
++#define EUR_CR_SOFT_RESET_ISP_RESET_MASK 0x00000020UL
++#define EUR_CR_SOFT_RESET_ISP_RESET_SHIFT 5
++#define EUR_CR_SOFT_RESET_TSP_RESET_MASK 0x00000040UL
++#define EUR_CR_SOFT_RESET_TSP_RESET_SHIFT 6
++#define EUR_CR_EVENT_HOST_ENABLE2 0x0110
++#define EUR_CR_EVENT_HOST_ENABLE2_BIF_REQUESTER_FAULT_MASK 0x00000010UL
++#define EUR_CR_EVENT_HOST_ENABLE2_BIF_REQUESTER_FAULT_SHIFT 4
++#define EUR_CR_EVENT_HOST_ENABLE2_DPM_DHOST_FREE_LOAD_MASK 0x00000008UL
++#define EUR_CR_EVENT_HOST_ENABLE2_DPM_DHOST_FREE_LOAD_SHIFT 3
++#define EUR_CR_EVENT_HOST_ENABLE2_DPM_HOST_FREE_LOAD_MASK 0x00000004UL
++#define EUR_CR_EVENT_HOST_ENABLE2_DPM_HOST_FREE_LOAD_SHIFT 2
++#define EUR_CR_EVENT_HOST_ENABLE2_DPM_3D_FREE_LOAD_MASK 0x00000002UL
++#define EUR_CR_EVENT_HOST_ENABLE2_DPM_3D_FREE_LOAD_SHIFT 1
++#define EUR_CR_EVENT_HOST_ENABLE2_DPM_TA_FREE_LOAD_MASK 0x00000001UL
++#define EUR_CR_EVENT_HOST_ENABLE2_DPM_TA_FREE_LOAD_SHIFT 0
++#define EUR_CR_EVENT_HOST_CLEAR2 0x0114
++#define EUR_CR_EVENT_HOST_CLEAR2_BIF_REQUESTER_FAULT_MASK 0x00000010UL
++#define EUR_CR_EVENT_HOST_CLEAR2_BIF_REQUESTER_FAULT_SHIFT 4
++#define EUR_CR_EVENT_HOST_CLEAR2_DPM_DHOST_FREE_LOAD_MASK 0x00000008UL
++#define EUR_CR_EVENT_HOST_CLEAR2_DPM_DHOST_FREE_LOAD_SHIFT 3
++#define EUR_CR_EVENT_HOST_CLEAR2_DPM_HOST_FREE_LOAD_MASK 0x00000004UL
++#define EUR_CR_EVENT_HOST_CLEAR2_DPM_HOST_FREE_LOAD_SHIFT 2
++#define EUR_CR_EVENT_HOST_CLEAR2_DPM_3D_FREE_LOAD_MASK 0x00000002UL
++#define EUR_CR_EVENT_HOST_CLEAR2_DPM_3D_FREE_LOAD_SHIFT 1
++#define EUR_CR_EVENT_HOST_CLEAR2_DPM_TA_FREE_LOAD_MASK 0x00000001UL
++#define EUR_CR_EVENT_HOST_CLEAR2_DPM_TA_FREE_LOAD_SHIFT 0
++#define EUR_CR_EVENT_STATUS2 0x0118
++#define EUR_CR_EVENT_STATUS2_BIF_REQUESTER_FAULT_MASK 0x00000010UL
++#define EUR_CR_EVENT_STATUS2_BIF_REQUESTER_FAULT_SHIFT 4
++#define EUR_CR_EVENT_STATUS2_DPM_DHOST_FREE_LOAD_MASK 0x00000008UL
++#define EUR_CR_EVENT_STATUS2_DPM_DHOST_FREE_LOAD_SHIFT 3
++#define EUR_CR_EVENT_STATUS2_DPM_HOST_FREE_LOAD_MASK 0x00000004UL
++#define EUR_CR_EVENT_STATUS2_DPM_HOST_FREE_LOAD_SHIFT 2
++#define EUR_CR_EVENT_STATUS2_DPM_3D_FREE_LOAD_MASK 0x00000002UL
++#define EUR_CR_EVENT_STATUS2_DPM_3D_FREE_LOAD_SHIFT 1
++#define EUR_CR_EVENT_STATUS2_DPM_TA_FREE_LOAD_MASK 0x00000001UL
++#define EUR_CR_EVENT_STATUS2_DPM_TA_FREE_LOAD_SHIFT 0
++#define EUR_CR_EVENT_STATUS 0x012CUL
++#define EUR_CR_EVENT_STATUS_MASTER_INTERRUPT_MASK 0x80000000UL
++#define EUR_CR_EVENT_STATUS_MASTER_INTERRUPT_SHIFT 31
++#define EUR_CR_EVENT_STATUS_TIMER_MASK 0x20000000UL
++#define EUR_CR_EVENT_STATUS_TIMER_SHIFT 29
++#define EUR_CR_EVENT_STATUS_TA_DPM_FAULT_MASK 0x10000000UL
++#define EUR_CR_EVENT_STATUS_TA_DPM_FAULT_SHIFT 28
++#define EUR_CR_EVENT_STATUS_TWOD_COMPLETE_MASK 0x08000000UL
++#define EUR_CR_EVENT_STATUS_TWOD_COMPLETE_SHIFT 27
++#define EUR_CR_EVENT_STATUS_MADD_CACHE_INVALCOMPLETE_MASK 0x04000000UL
++#define EUR_CR_EVENT_STATUS_MADD_CACHE_INVALCOMPLETE_SHIFT 26
++#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_ZLS_MASK 0x02000000UL
++#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_ZLS_SHIFT 25
++#define EUR_CR_EVENT_STATUS_DPM_TA_MEM_FREE_MASK 0x01000000UL
++#define EUR_CR_EVENT_STATUS_DPM_TA_MEM_FREE_SHIFT 24
++#define EUR_CR_EVENT_STATUS_ISP_END_TILE_MASK 0x00800000UL
++#define EUR_CR_EVENT_STATUS_ISP_END_TILE_SHIFT 23
++#define EUR_CR_EVENT_STATUS_DPM_INITEND_MASK 0x00400000UL
++#define EUR_CR_EVENT_STATUS_DPM_INITEND_SHIFT 22
++#define EUR_CR_EVENT_STATUS_OTPM_LOADED_MASK 0x00200000UL
++#define EUR_CR_EVENT_STATUS_OTPM_LOADED_SHIFT 21
++#define EUR_CR_EVENT_STATUS_OTPM_INV_MASK 0x00100000UL
++#define EUR_CR_EVENT_STATUS_OTPM_INV_SHIFT 20
++#define EUR_CR_EVENT_STATUS_OTPM_FLUSHED_MASK 0x00080000UL
++#define EUR_CR_EVENT_STATUS_OTPM_FLUSHED_SHIFT 19
++#define EUR_CR_EVENT_STATUS_PIXELBE_END_RENDER_MASK 0x00040000UL
++#define EUR_CR_EVENT_STATUS_PIXELBE_END_RENDER_SHIFT 18
++#define EUR_CR_EVENT_STATUS_ISP_HALT_MASK 0x00020000UL
++#define EUR_CR_EVENT_STATUS_ISP_HALT_SHIFT 17
++#define EUR_CR_EVENT_STATUS_ISP_VISIBILITY_FAIL_MASK 0x00010000UL
++#define EUR_CR_EVENT_STATUS_ISP_VISIBILITY_FAIL_SHIFT 16
++#define EUR_CR_EVENT_STATUS_BREAKPOINT_MASK 0x00008000UL
++#define EUR_CR_EVENT_STATUS_BREAKPOINT_SHIFT 15
++#define EUR_CR_EVENT_STATUS_SW_EVENT_MASK 0x00004000UL
++#define EUR_CR_EVENT_STATUS_SW_EVENT_SHIFT 14
++#define EUR_CR_EVENT_STATUS_TA_FINISHED_MASK 0x00002000UL
++#define EUR_CR_EVENT_STATUS_TA_FINISHED_SHIFT 13
++#define EUR_CR_EVENT_STATUS_TA_TERMINATE_MASK 0x00001000UL
++#define EUR_CR_EVENT_STATUS_TA_TERMINATE_SHIFT 12
++#define EUR_CR_EVENT_STATUS_TPC_CLEAR_MASK 0x00000800UL
++#define EUR_CR_EVENT_STATUS_TPC_CLEAR_SHIFT 11
++#define EUR_CR_EVENT_STATUS_TPC_FLUSH_MASK 0x00000400UL
++#define EUR_CR_EVENT_STATUS_TPC_FLUSH_SHIFT 10
++#define EUR_CR_EVENT_STATUS_DPM_CONTROL_CLEAR_MASK 0x00000200UL
++#define EUR_CR_EVENT_STATUS_DPM_CONTROL_CLEAR_SHIFT 9
++#define EUR_CR_EVENT_STATUS_DPM_CONTROL_LOAD_MASK 0x00000100UL
++#define EUR_CR_EVENT_STATUS_DPM_CONTROL_LOAD_SHIFT 8
++#define EUR_CR_EVENT_STATUS_DPM_CONTROL_STORE_MASK 0x00000080UL
++#define EUR_CR_EVENT_STATUS_DPM_CONTROL_STORE_SHIFT 7
++#define EUR_CR_EVENT_STATUS_DPM_STATE_CLEAR_MASK 0x00000040UL
++#define EUR_CR_EVENT_STATUS_DPM_STATE_CLEAR_SHIFT 6
++#define EUR_CR_EVENT_STATUS_DPM_STATE_LOAD_MASK 0x00000020UL
++#define EUR_CR_EVENT_STATUS_DPM_STATE_LOAD_SHIFT 5
++#define EUR_CR_EVENT_STATUS_DPM_STATE_STORE_MASK 0x00000010UL
++#define EUR_CR_EVENT_STATUS_DPM_STATE_STORE_SHIFT 4
++#define EUR_CR_EVENT_STATUS_DPM_REACHED_MEM_THRESH_MASK 0x00000008UL
++#define EUR_CR_EVENT_STATUS_DPM_REACHED_MEM_THRESH_SHIFT 3
++#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_GBL_MASK 0x00000004UL
++#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_GBL_SHIFT 2
++#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_MT_MASK 0x00000002UL
++#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_MT_SHIFT 1
++#define EUR_CR_EVENT_STATUS_DPM_3D_MEM_FREE_MASK 0x00000001UL
++#define EUR_CR_EVENT_STATUS_DPM_3D_MEM_FREE_SHIFT 0
++#define EUR_CR_EVENT_HOST_ENABLE 0x0130
++#define EUR_CR_EVENT_HOST_ENABLE_MASTER_INTERRUPT_MASK 0x80000000UL
++#define EUR_CR_EVENT_HOST_ENABLE_MASTER_INTERRUPT_SHIFT 31
++#define EUR_CR_EVENT_HOST_ENABLE_TIMER_MASK 0x20000000UL
++#define EUR_CR_EVENT_HOST_ENABLE_TIMER_SHIFT 29
++#define EUR_CR_EVENT_HOST_ENABLE_TA_DPM_FAULT_MASK 0x10000000UL
++#define EUR_CR_EVENT_HOST_ENABLE_TA_DPM_FAULT_SHIFT 28
++#define EUR_CR_EVENT_HOST_ENABLE_TWOD_COMPLETE_MASK 0x08000000UL
++#define EUR_CR_EVENT_HOST_ENABLE_TWOD_COMPLETE_SHIFT 27
++#define EUR_CR_EVENT_HOST_ENABLE_MADD_CACHE_INVALCOMPLETE_MASK 0x04000000UL
++#define EUR_CR_EVENT_HOST_ENABLE_MADD_CACHE_INVALCOMPLETE_SHIFT 26
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_ZLS_MASK 0x02000000UL
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_ZLS_SHIFT 25
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_TA_MEM_FREE_MASK 0x01000000UL
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_TA_MEM_FREE_SHIFT 24
++#define EUR_CR_EVENT_HOST_ENABLE_ISP_END_TILE_MASK 0x00800000UL
++#define EUR_CR_EVENT_HOST_ENABLE_ISP_END_TILE_SHIFT 23
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_INITEND_MASK 0x00400000UL
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_INITEND_SHIFT 22
++#define EUR_CR_EVENT_HOST_ENABLE_OTPM_LOADED_MASK 0x00200000UL
++#define EUR_CR_EVENT_HOST_ENABLE_OTPM_LOADED_SHIFT 21
++#define EUR_CR_EVENT_HOST_ENABLE_OTPM_INV_MASK 0x00100000UL
++#define EUR_CR_EVENT_HOST_ENABLE_OTPM_INV_SHIFT 20
++#define EUR_CR_EVENT_HOST_ENABLE_OTPM_FLUSHED_MASK 0x00080000UL
++#define EUR_CR_EVENT_HOST_ENABLE_OTPM_FLUSHED_SHIFT 19
++#define EUR_CR_EVENT_HOST_ENABLE_PIXELBE_END_RENDER_MASK 0x00040000UL
++#define EUR_CR_EVENT_HOST_ENABLE_PIXELBE_END_RENDER_SHIFT 18
++#define EUR_CR_EVENT_HOST_ENABLE_ISP_HALT_MASK 0x00020000UL
++#define EUR_CR_EVENT_HOST_ENABLE_ISP_HALT_SHIFT 17
++#define EUR_CR_EVENT_HOST_ENABLE_ISP_VISIBILITY_FAIL_MASK 0x00010000UL
++#define EUR_CR_EVENT_HOST_ENABLE_ISP_VISIBILITY_FAIL_SHIFT 16
++#define EUR_CR_EVENT_HOST_ENABLE_BREAKPOINT_MASK 0x00008000UL
++#define EUR_CR_EVENT_HOST_ENABLE_BREAKPOINT_SHIFT 15
++#define EUR_CR_EVENT_HOST_ENABLE_SW_EVENT_MASK 0x00004000UL
++#define EUR_CR_EVENT_HOST_ENABLE_SW_EVENT_SHIFT 14
++#define EUR_CR_EVENT_HOST_ENABLE_TA_FINISHED_MASK 0x00002000UL
++#define EUR_CR_EVENT_HOST_ENABLE_TA_FINISHED_SHIFT 13
++#define EUR_CR_EVENT_HOST_ENABLE_TA_TERMINATE_MASK 0x00001000UL
++#define EUR_CR_EVENT_HOST_ENABLE_TA_TERMINATE_SHIFT 12
++#define EUR_CR_EVENT_HOST_ENABLE_TPC_CLEAR_MASK 0x00000800UL
++#define EUR_CR_EVENT_HOST_ENABLE_TPC_CLEAR_SHIFT 11
++#define EUR_CR_EVENT_HOST_ENABLE_TPC_FLUSH_MASK 0x00000400UL
++#define EUR_CR_EVENT_HOST_ENABLE_TPC_FLUSH_SHIFT 10
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_CLEAR_MASK 0x00000200UL
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_CLEAR_SHIFT 9
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_LOAD_MASK 0x00000100UL
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_LOAD_SHIFT 8
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_STORE_MASK 0x00000080UL
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_STORE_SHIFT 7
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_CLEAR_MASK 0x00000040UL
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_CLEAR_SHIFT 6
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_LOAD_MASK 0x00000020UL
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_LOAD_SHIFT 5
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_STORE_MASK 0x00000010UL
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_STORE_SHIFT 4
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_REACHED_MEM_THRESH_MASK 0x00000008UL
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_REACHED_MEM_THRESH_SHIFT 3
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_GBL_MASK 0x00000004UL
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_GBL_SHIFT 2
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_MT_MASK 0x00000002UL
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_MT_SHIFT 1
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_3D_MEM_FREE_MASK 0x00000001UL
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_3D_MEM_FREE_SHIFT 0
++#define EUR_CR_EVENT_HOST_CLEAR 0x0134
++#define EUR_CR_EVENT_HOST_CLEAR_MASTER_INTERRUPT_MASK 0x80000000UL
++#define EUR_CR_EVENT_HOST_CLEAR_MASTER_INTERRUPT_SHIFT 31
++#define EUR_CR_EVENT_HOST_CLEAR_TIMER_MASK 0x20000000UL
++#define EUR_CR_EVENT_HOST_CLEAR_TIMER_SHIFT 29
++#define EUR_CR_EVENT_HOST_CLEAR_TA_DPM_FAULT_MASK 0x10000000UL
++#define EUR_CR_EVENT_HOST_CLEAR_TA_DPM_FAULT_SHIFT 28
++#define EUR_CR_EVENT_HOST_CLEAR_TWOD_COMPLETE_MASK 0x08000000UL
++#define EUR_CR_EVENT_HOST_CLEAR_TWOD_COMPLETE_SHIFT 27
++#define EUR_CR_EVENT_HOST_CLEAR_MADD_CACHE_INVALCOMPLETE_MASK 0x04000000UL
++#define EUR_CR_EVENT_HOST_CLEAR_MADD_CACHE_INVALCOMPLETE_SHIFT 26
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_ZLS_MASK 0x02000000UL
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_ZLS_SHIFT 25
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_TA_MEM_FREE_MASK 0x01000000UL
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_TA_MEM_FREE_SHIFT 24
++#define EUR_CR_EVENT_HOST_CLEAR_ISP_END_TILE_MASK 0x00800000UL
++#define EUR_CR_EVENT_HOST_CLEAR_ISP_END_TILE_SHIFT 23
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_INITEND_MASK 0x00400000UL
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_INITEND_SHIFT 22
++#define EUR_CR_EVENT_HOST_CLEAR_OTPM_LOADED_MASK 0x00200000UL
++#define EUR_CR_EVENT_HOST_CLEAR_OTPM_LOADED_SHIFT 21
++#define EUR_CR_EVENT_HOST_CLEAR_OTPM_INV_MASK 0x00100000UL
++#define EUR_CR_EVENT_HOST_CLEAR_OTPM_INV_SHIFT 20
++#define EUR_CR_EVENT_HOST_CLEAR_OTPM_FLUSHED_MASK 0x00080000UL
++#define EUR_CR_EVENT_HOST_CLEAR_OTPM_FLUSHED_SHIFT 19
++#define EUR_CR_EVENT_HOST_CLEAR_PIXELBE_END_RENDER_MASK 0x00040000UL
++#define EUR_CR_EVENT_HOST_CLEAR_PIXELBE_END_RENDER_SHIFT 18
++#define EUR_CR_EVENT_HOST_CLEAR_ISP_HALT_MASK 0x00020000UL
++#define EUR_CR_EVENT_HOST_CLEAR_ISP_HALT_SHIFT 17
++#define EUR_CR_EVENT_HOST_CLEAR_ISP_VISIBILITY_FAIL_MASK 0x00010000UL
++#define EUR_CR_EVENT_HOST_CLEAR_ISP_VISIBILITY_FAIL_SHIFT 16
++#define EUR_CR_EVENT_HOST_CLEAR_BREAKPOINT_MASK 0x00008000UL
++#define EUR_CR_EVENT_HOST_CLEAR_BREAKPOINT_SHIFT 15
++#define EUR_CR_EVENT_HOST_CLEAR_SW_EVENT_MASK 0x00004000UL
++#define EUR_CR_EVENT_HOST_CLEAR_SW_EVENT_SHIFT 14
++#define EUR_CR_EVENT_HOST_CLEAR_TA_FINISHED_MASK 0x00002000UL
++#define EUR_CR_EVENT_HOST_CLEAR_TA_FINISHED_SHIFT 13
++#define EUR_CR_EVENT_HOST_CLEAR_TA_TERMINATE_MASK 0x00001000UL
++#define EUR_CR_EVENT_HOST_CLEAR_TA_TERMINATE_SHIFT 12
++#define EUR_CR_EVENT_HOST_CLEAR_TPC_CLEAR_MASK 0x00000800UL
++#define EUR_CR_EVENT_HOST_CLEAR_TPC_CLEAR_SHIFT 11
++#define EUR_CR_EVENT_HOST_CLEAR_TPC_FLUSH_MASK 0x00000400UL
++#define EUR_CR_EVENT_HOST_CLEAR_TPC_FLUSH_SHIFT 10
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_CLEAR_MASK 0x00000200UL
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_CLEAR_SHIFT 9
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_LOAD_MASK 0x00000100UL
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_LOAD_SHIFT 8
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_STORE_MASK 0x00000080UL
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_STORE_SHIFT 7
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_CLEAR_MASK 0x00000040UL
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_CLEAR_SHIFT 6
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_LOAD_MASK 0x00000020UL
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_LOAD_SHIFT 5
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_STORE_MASK 0x00000010UL
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_STORE_SHIFT 4
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_REACHED_MEM_THRESH_MASK 0x00000008UL
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_REACHED_MEM_THRESH_SHIFT 3
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_GBL_MASK 0x00000004UL
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_GBL_SHIFT 2
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_MT_MASK 0x00000002UL
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_MT_SHIFT 1
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_3D_MEM_FREE_MASK 0x00000001UL
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_3D_MEM_FREE_SHIFT 0
++#define EUR_CR_PDS 0x0ABC
++#define EUR_CR_PDS_DOUT_TIMEOUT_DISABLE_MASK 0x00000040UL
++#define EUR_CR_PDS_DOUT_TIMEOUT_DISABLE_SHIFT 6
++#define EUR_CR_PDS_EXEC_BASE 0x0AB8
++#define EUR_CR_PDS_EXEC_BASE_ADDR_MASK 0xFFF00000UL
++#define EUR_CR_PDS_EXEC_BASE_ADDR_SHIFT 20
++#define EUR_CR_EVENT_KICKER 0x0AC4
++#define EUR_CR_EVENT_KICKER_ADDRESS_MASK 0xFFFFFFF0UL
++#define EUR_CR_EVENT_KICKER_ADDRESS_SHIFT 4
++#define EUR_CR_EVENT_KICK 0x0AC8
++#define EUR_CR_EVENT_KICK_NOW_MASK 0x00000001UL
++#define EUR_CR_EVENT_KICK_NOW_SHIFT 0
++#define EUR_CR_EVENT_TIMER 0x0ACC
++#define EUR_CR_EVENT_TIMER_ENABLE_MASK 0x01000000UL
++#define EUR_CR_EVENT_TIMER_ENABLE_SHIFT 24
++#define EUR_CR_EVENT_TIMER_VALUE_MASK 0x00FFFFFFUL
++#define EUR_CR_EVENT_TIMER_VALUE_SHIFT 0
++#define EUR_CR_PDS_INV0 0x0AD0
++#define EUR_CR_PDS_INV0_DSC_MASK 0x00000001UL
++#define EUR_CR_PDS_INV0_DSC_SHIFT 0
++#define EUR_CR_PDS_INV1 0x0AD4
++#define EUR_CR_PDS_INV1_DSC_MASK 0x00000001UL
++#define EUR_CR_PDS_INV1_DSC_SHIFT 0
++#define EUR_CR_PDS_INV2 0x0AD8
++#define EUR_CR_PDS_INV2_DSC_MASK 0x00000001UL
++#define EUR_CR_PDS_INV2_DSC_SHIFT 0
++#define EUR_CR_PDS_INV3 0x0ADC
++#define EUR_CR_PDS_INV3_DSC_MASK 0x00000001UL
++#define EUR_CR_PDS_INV3_DSC_SHIFT 0
++#define EUR_CR_PDS_INV_CSC 0x0AE0
++#define EUR_CR_PDS_INV_CSC_KICK_MASK 0x00000001UL
++#define EUR_CR_PDS_INV_CSC_KICK_SHIFT 0
++#define EUR_CR_PDS_PC_BASE 0x0B2C
++#define EUR_CR_PDS_PC_BASE_ADDRESS_MASK 0x3FFFFFFFUL
++#define EUR_CR_PDS_PC_BASE_ADDRESS_SHIFT 0
++#define EUR_CR_BIF_CTRL 0x0C00
++#define EUR_CR_BIF_CTRL_NOREORDER_MASK 0x00000001UL
++#define EUR_CR_BIF_CTRL_NOREORDER_SHIFT 0
++#define EUR_CR_BIF_CTRL_PAUSE_MASK 0x00000002UL
++#define EUR_CR_BIF_CTRL_PAUSE_SHIFT 1
++#define EUR_CR_BIF_CTRL_FLUSH_MASK 0x00000004UL
++#define EUR_CR_BIF_CTRL_FLUSH_SHIFT 2
++#define EUR_CR_BIF_CTRL_INVALDC_MASK 0x00000008UL
++#define EUR_CR_BIF_CTRL_INVALDC_SHIFT 3
++#define EUR_CR_BIF_CTRL_CLEAR_FAULT_MASK 0x00000010UL
++#define EUR_CR_BIF_CTRL_CLEAR_FAULT_SHIFT 4
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_CACHE_MASK 0x00000100UL
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_CACHE_SHIFT 8
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_VDM_MASK 0x00000200UL
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_VDM_SHIFT 9
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_TE_MASK 0x00000400UL
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_TE_SHIFT 10
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_TWOD_MASK 0x00000800UL
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_TWOD_SHIFT 11
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_PBE_MASK 0x00001000UL
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_PBE_SHIFT 12
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_TSPP_MASK 0x00002000UL
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_TSPP_SHIFT 13
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_ISP_MASK 0x00004000UL
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_ISP_SHIFT 14
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_USE_MASK 0x00008000UL
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_USE_SHIFT 15
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_HOST_MASK 0x00010000UL
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_HOST_SHIFT 16
++#define EUR_CR_BIF_INT_STAT 0x0C04
++#define EUR_CR_BIF_INT_STAT_FAULT_MASK 0x00003FFFUL
++#define EUR_CR_BIF_INT_STAT_FAULT_SHIFT 0
++#define EUR_CR_BIF_INT_STAT_PF_N_RW_MASK 0x00004000UL
++#define EUR_CR_BIF_INT_STAT_PF_N_RW_SHIFT 14
++#define EUR_CR_BIF_FAULT 0x0C08
++#define EUR_CR_BIF_FAULT_ADDR_MASK 0xFFFFF000UL
++#define EUR_CR_BIF_FAULT_ADDR_SHIFT 12
++#define EUR_CR_BIF_TILE0 0x0C0C
++#define EUR_CR_BIF_TILE0_MIN_ADDRESS_MASK 0x00000FFFUL
++#define EUR_CR_BIF_TILE0_MIN_ADDRESS_SHIFT 0
++#define EUR_CR_BIF_TILE0_MAX_ADDRESS_MASK 0x00FFF000UL
++#define EUR_CR_BIF_TILE0_MAX_ADDRESS_SHIFT 12
++#define EUR_CR_BIF_TILE0_CFG_MASK 0x0F000000UL
++#define EUR_CR_BIF_TILE0_CFG_SHIFT 24
++#define EUR_CR_BIF_TILE1 0x0C10
++#define EUR_CR_BIF_TILE1_MIN_ADDRESS_MASK 0x00000FFFUL
++#define EUR_CR_BIF_TILE1_MIN_ADDRESS_SHIFT 0
++#define EUR_CR_BIF_TILE1_MAX_ADDRESS_MASK 0x00FFF000UL
++#define EUR_CR_BIF_TILE1_MAX_ADDRESS_SHIFT 12
++#define EUR_CR_BIF_TILE1_CFG_MASK 0x0F000000UL
++#define EUR_CR_BIF_TILE1_CFG_SHIFT 24
++#define EUR_CR_BIF_TILE2 0x0C14
++#define EUR_CR_BIF_TILE2_MIN_ADDRESS_MASK 0x00000FFFUL
++#define EUR_CR_BIF_TILE2_MIN_ADDRESS_SHIFT 0
++#define EUR_CR_BIF_TILE2_MAX_ADDRESS_MASK 0x00FFF000UL
++#define EUR_CR_BIF_TILE2_MAX_ADDRESS_SHIFT 12
++#define EUR_CR_BIF_TILE2_CFG_MASK 0x0F000000UL
++#define EUR_CR_BIF_TILE2_CFG_SHIFT 24
++#define EUR_CR_BIF_TILE3 0x0C18
++#define EUR_CR_BIF_TILE3_MIN_ADDRESS_MASK 0x00000FFFUL
++#define EUR_CR_BIF_TILE3_MIN_ADDRESS_SHIFT 0
++#define EUR_CR_BIF_TILE3_MAX_ADDRESS_MASK 0x00FFF000UL
++#define EUR_CR_BIF_TILE3_MAX_ADDRESS_SHIFT 12
++#define EUR_CR_BIF_TILE3_CFG_MASK 0x0F000000UL
++#define EUR_CR_BIF_TILE3_CFG_SHIFT 24
++#define EUR_CR_BIF_TILE4 0x0C1C
++#define EUR_CR_BIF_TILE4_MIN_ADDRESS_MASK 0x00000FFFUL
++#define EUR_CR_BIF_TILE4_MIN_ADDRESS_SHIFT 0
++#define EUR_CR_BIF_TILE4_MAX_ADDRESS_MASK 0x00FFF000UL
++#define EUR_CR_BIF_TILE4_MAX_ADDRESS_SHIFT 12
++#define EUR_CR_BIF_TILE4_CFG_MASK 0x0F000000UL
++#define EUR_CR_BIF_TILE4_CFG_SHIFT 24
++#define EUR_CR_BIF_TILE5 0x0C20
++#define EUR_CR_BIF_TILE5_MIN_ADDRESS_MASK 0x00000FFFUL
++#define EUR_CR_BIF_TILE5_MIN_ADDRESS_SHIFT 0
++#define EUR_CR_BIF_TILE5_MAX_ADDRESS_MASK 0x00FFF000UL
++#define EUR_CR_BIF_TILE5_MAX_ADDRESS_SHIFT 12
++#define EUR_CR_BIF_TILE5_CFG_MASK 0x0F000000UL
++#define EUR_CR_BIF_TILE5_CFG_SHIFT 24
++#define EUR_CR_BIF_TILE6 0x0C24
++#define EUR_CR_BIF_TILE6_MIN_ADDRESS_MASK 0x00000FFFUL
++#define EUR_CR_BIF_TILE6_MIN_ADDRESS_SHIFT 0
++#define EUR_CR_BIF_TILE6_MAX_ADDRESS_MASK 0x00FFF000UL
++#define EUR_CR_BIF_TILE6_MAX_ADDRESS_SHIFT 12
++#define EUR_CR_BIF_TILE6_CFG_MASK 0x0F000000UL
++#define EUR_CR_BIF_TILE6_CFG_SHIFT 24
++#define EUR_CR_BIF_TILE7 0x0C28
++#define EUR_CR_BIF_TILE7_MIN_ADDRESS_MASK 0x00000FFFUL
++#define EUR_CR_BIF_TILE7_MIN_ADDRESS_SHIFT 0
++#define EUR_CR_BIF_TILE7_MAX_ADDRESS_MASK 0x00FFF000UL
++#define EUR_CR_BIF_TILE7_MAX_ADDRESS_SHIFT 12
++#define EUR_CR_BIF_TILE7_CFG_MASK 0x0F000000UL
++#define EUR_CR_BIF_TILE7_CFG_SHIFT 24
++#define EUR_CR_BIF_TILE8 0x0C2C
++#define EUR_CR_BIF_TILE8_MIN_ADDRESS_MASK 0x00000FFFUL
++#define EUR_CR_BIF_TILE8_MIN_ADDRESS_SHIFT 0
++#define EUR_CR_BIF_TILE8_MAX_ADDRESS_MASK 0x00FFF000UL
++#define EUR_CR_BIF_TILE8_MAX_ADDRESS_SHIFT 12
++#define EUR_CR_BIF_TILE8_CFG_MASK 0x0F000000UL
++#define EUR_CR_BIF_TILE8_CFG_SHIFT 24
++#define EUR_CR_BIF_TILE9 0x0C30
++#define EUR_CR_BIF_TILE9_MIN_ADDRESS_MASK 0x00000FFFUL
++#define EUR_CR_BIF_TILE9_MIN_ADDRESS_SHIFT 0
++#define EUR_CR_BIF_TILE9_MAX_ADDRESS_MASK 0x00FFF000UL
++#define EUR_CR_BIF_TILE9_MAX_ADDRESS_SHIFT 12
++#define EUR_CR_BIF_TILE9_CFG_MASK 0x0F000000UL
++#define EUR_CR_BIF_TILE9_CFG_SHIFT 24
++#define EUR_CR_BIF_DIR_LIST_BASE1 0x0C38
++#define EUR_CR_BIF_DIR_LIST_BASE1_ADDR_MASK 0xFFFFF000UL
++#define EUR_CR_BIF_DIR_LIST_BASE1_ADDR_SHIFT 12
++#define EUR_CR_BIF_DIR_LIST_BASE2 0x0C3C
++#define EUR_CR_BIF_DIR_LIST_BASE2_ADDR_MASK 0xFFFFF000UL
++#define EUR_CR_BIF_DIR_LIST_BASE2_ADDR_SHIFT 12
++#define EUR_CR_BIF_DIR_LIST_BASE3 0x0C40
++#define EUR_CR_BIF_DIR_LIST_BASE3_ADDR_MASK 0xFFFFF000UL
++#define EUR_CR_BIF_DIR_LIST_BASE3_ADDR_SHIFT 12
++#define EUR_CR_BIF_DIR_LIST_BASE4 0x0C44
++#define EUR_CR_BIF_DIR_LIST_BASE4_ADDR_MASK 0xFFFFF000UL
++#define EUR_CR_BIF_DIR_LIST_BASE4_ADDR_SHIFT 12
++#define EUR_CR_BIF_DIR_LIST_BASE5 0x0C48
++#define EUR_CR_BIF_DIR_LIST_BASE5_ADDR_MASK 0xFFFFF000UL
++#define EUR_CR_BIF_DIR_LIST_BASE5_ADDR_SHIFT 12
++#define EUR_CR_BIF_DIR_LIST_BASE6 0x0C4C
++#define EUR_CR_BIF_DIR_LIST_BASE6_ADDR_MASK 0xFFFFF000UL
++#define EUR_CR_BIF_DIR_LIST_BASE6_ADDR_SHIFT 12
++#define EUR_CR_BIF_DIR_LIST_BASE7 0x0C50
++#define EUR_CR_BIF_DIR_LIST_BASE7_ADDR_MASK 0xFFFFF000UL
++#define EUR_CR_BIF_DIR_LIST_BASE7_ADDR_SHIFT 12
++#define EUR_CR_BIF_DIR_LIST_BASE8 0x0C54
++#define EUR_CR_BIF_DIR_LIST_BASE8_ADDR_MASK 0xFFFFF000UL
++#define EUR_CR_BIF_DIR_LIST_BASE8_ADDR_SHIFT 12
++#define EUR_CR_BIF_DIR_LIST_BASE9 0x0C58
++#define EUR_CR_BIF_DIR_LIST_BASE9_ADDR_MASK 0xFFFFF000UL
++#define EUR_CR_BIF_DIR_LIST_BASE9_ADDR_SHIFT 12
++#define EUR_CR_BIF_DIR_LIST_BASE10 0x0C5C
++#define EUR_CR_BIF_DIR_LIST_BASE10_ADDR_MASK 0xFFFFF000UL
++#define EUR_CR_BIF_DIR_LIST_BASE10_ADDR_SHIFT 12
++#define EUR_CR_BIF_DIR_LIST_BASE11 0x0C60
++#define EUR_CR_BIF_DIR_LIST_BASE11_ADDR_MASK 0xFFFFF000UL
++#define EUR_CR_BIF_DIR_LIST_BASE11_ADDR_SHIFT 12
++#define EUR_CR_BIF_DIR_LIST_BASE12 0x0C64
++#define EUR_CR_BIF_DIR_LIST_BASE12_ADDR_MASK 0xFFFFF000UL
++#define EUR_CR_BIF_DIR_LIST_BASE12_ADDR_SHIFT 12
++#define EUR_CR_BIF_DIR_LIST_BASE13 0x0C68
++#define EUR_CR_BIF_DIR_LIST_BASE13_ADDR_MASK 0xFFFFF000UL
++#define EUR_CR_BIF_DIR_LIST_BASE13_ADDR_SHIFT 12
++#define EUR_CR_BIF_DIR_LIST_BASE14 0x0C6C
++#define EUR_CR_BIF_DIR_LIST_BASE14_ADDR_MASK 0xFFFFF000UL
++#define EUR_CR_BIF_DIR_LIST_BASE14_ADDR_SHIFT 12
++#define EUR_CR_BIF_DIR_LIST_BASE15 0x0C70
++#define EUR_CR_BIF_DIR_LIST_BASE15_ADDR_MASK 0xFFFFF000UL
++#define EUR_CR_BIF_DIR_LIST_BASE15_ADDR_SHIFT 12
++#define EUR_CR_BIF_BANK_SET 0x0C74
++#define EUR_CR_BIF_BANK_SET_SELECT_MASK 0x000003FFUL
++#define EUR_CR_BIF_BANK_SET_SELECT_SHIFT 0
++#define EUR_CR_BIF_BANK0 0x0C78
++#define EUR_CR_BIF_BANK0_INDEX_EDM_MASK 0x0000000FUL
++#define EUR_CR_BIF_BANK0_INDEX_EDM_SHIFT 0
++#define EUR_CR_BIF_BANK0_INDEX_TA_MASK 0x000000F0UL
++#define EUR_CR_BIF_BANK0_INDEX_TA_SHIFT 4
++#define EUR_CR_BIF_BANK0_INDEX_HOST_MASK 0x00000F00UL
++#define EUR_CR_BIF_BANK0_INDEX_HOST_SHIFT 8
++#define EUR_CR_BIF_BANK0_INDEX_3D_MASK 0x0000F000UL
++#define EUR_CR_BIF_BANK0_INDEX_3D_SHIFT 12
++#define EUR_CR_BIF_BANK0_INDEX_2D_MASK 0x000F0000UL
++#define EUR_CR_BIF_BANK0_INDEX_2D_SHIFT 16
++#define EUR_CR_BIF_BANK1 0x0C7C
++#define EUR_CR_BIF_BANK1_INDEX_EDM_MASK 0x0000000FUL
++#define EUR_CR_BIF_BANK1_INDEX_EDM_SHIFT 0
++#define EUR_CR_BIF_BANK1_INDEX_TA_MASK 0x000000F0UL
++#define EUR_CR_BIF_BANK1_INDEX_TA_SHIFT 4
++#define EUR_CR_BIF_BANK1_INDEX_HOST_MASK 0x00000F00UL
++#define EUR_CR_BIF_BANK1_INDEX_HOST_SHIFT 8
++#define EUR_CR_BIF_BANK1_INDEX_3D_MASK 0x0000F000UL
++#define EUR_CR_BIF_BANK1_INDEX_3D_SHIFT 12
++#define EUR_CR_BIF_BANK1_INDEX_2D_MASK 0x000F0000UL
++#define EUR_CR_BIF_BANK1_INDEX_2D_SHIFT 16
++#define EUR_CR_BIF_ADT_TTE 0x0C80
++#define EUR_CR_BIF_ADT_TTE_VALUE_MASK 0x000000FFUL
++#define EUR_CR_BIF_ADT_TTE_VALUE_SHIFT 0
++#define EUR_CR_BIF_DIR_LIST_BASE0 0x0C84
++#define EUR_CR_BIF_DIR_LIST_BASE0_ADDR_MASK 0xFFFFF000UL
++#define EUR_CR_BIF_DIR_LIST_BASE0_ADDR_SHIFT 12
++#define EUR_CR_BIF_TWOD_REQ_BASE 0x0C88
++#define EUR_CR_BIF_TWOD_REQ_BASE_ADDR_MASK 0xFFF00000UL
++#define EUR_CR_BIF_TWOD_REQ_BASE_ADDR_SHIFT 20
++#define EUR_CR_BIF_TA_REQ_BASE 0x0C90
++#define EUR_CR_BIF_TA_REQ_BASE_ADDR_MASK 0xFFF00000UL
++#define EUR_CR_BIF_TA_REQ_BASE_ADDR_SHIFT 20
++#define EUR_CR_BIF_MEM_ARB_FLOWRATES_1 0x0C94
++#define EUR_CR_BIF_MEM_ARB_FLOWRATES_1_MMU_MASK 0x00000007UL
++#define EUR_CR_BIF_MEM_ARB_FLOWRATES_1_MMU_SHIFT 0
++#define EUR_CR_BIF_MEM_ARB_FLOWRATES_1_CACHE_MASK 0x00000038UL
++#define EUR_CR_BIF_MEM_ARB_FLOWRATES_1_CACHE_SHIFT 3
++#define EUR_CR_BIF_MEM_ARB_FLOWRATES_1_VDM_MASK 0x000001C0UL
++#define EUR_CR_BIF_MEM_ARB_FLOWRATES_1_VDM_SHIFT 6
++#define EUR_CR_BIF_MEM_ARB_FLOWRATES_1_TE_MASK 0x00000E00UL
++#define EUR_CR_BIF_MEM_ARB_FLOWRATES_1_TE_SHIFT 9
++#define EUR_CR_BIF_MEM_ARB_FLOWRATES_1_TWOD_MASK 0x00007000UL
++#define EUR_CR_BIF_MEM_ARB_FLOWRATES_1_TWOD_SHIFT 12
++#define EUR_CR_BIF_MEM_ARB_FLOWRATES_1_PBE_MASK 0x00038000UL
++#define EUR_CR_BIF_MEM_ARB_FLOWRATES_1_PBE_SHIFT 15
++#define EUR_CR_BIF_MEM_ARB_FLOWRATES_2 0x0C98
++#define EUR_CR_BIF_MEM_ARB_FLOWRATES_2_HOST_MASK 0x00000007UL
++#define EUR_CR_BIF_MEM_ARB_FLOWRATES_2_HOST_SHIFT 0
++#define EUR_CR_BIF_MEM_ARB_FLOWRATES_2_USE_MASK 0x00000038UL
++#define EUR_CR_BIF_MEM_ARB_FLOWRATES_2_USE_SHIFT 3
++#define EUR_CR_BIF_MEM_ARB_FLOWRATES_2_ISP_MASK 0x000001C0UL
++#define EUR_CR_BIF_MEM_ARB_FLOWRATES_2_ISP_SHIFT 6
++#define EUR_CR_BIF_MEM_ARB_FLOWRATES_2_TSPP_MASK 0x00000E00UL
++#define EUR_CR_BIF_MEM_ARB_FLOWRATES_2_TSPP_SHIFT 9
++#define EUR_CR_BIF_MEM_ARB_CONFIG 0x0CA0
++#define EUR_CR_BIF_MEM_ARB_CONFIG_PAGE_SIZE_MASK 0x0000000FUL
++#define EUR_CR_BIF_MEM_ARB_CONFIG_PAGE_SIZE_SHIFT 0
++#define EUR_CR_BIF_MEM_ARB_CONFIG_BEST_CNT_MASK 0x00000FF0UL
++#define EUR_CR_BIF_MEM_ARB_CONFIG_BEST_CNT_SHIFT 4
++#define EUR_CR_BIF_MEM_ARB_CONFIG_TTE_THRESH_MASK 0x00FFF000UL
++#define EUR_CR_BIF_MEM_ARB_CONFIG_TTE_THRESH_SHIFT 12
++#define EUR_CR_BIF_MEM_REQ_STAT 0x0CA8
++#define EUR_CR_BIF_MEM_REQ_STAT_READS_MASK 0x000000FFUL
++#define EUR_CR_BIF_MEM_REQ_STAT_READS_SHIFT 0
++#define EUR_CR_BIF_3D_REQ_BASE 0x0CAC
++#define EUR_CR_BIF_3D_REQ_BASE_ADDR_MASK 0xFFF00000UL
++#define EUR_CR_BIF_3D_REQ_BASE_ADDR_SHIFT 20
++#define EUR_CR_BIF_ZLS_REQ_BASE 0x0CB0
++#define EUR_CR_BIF_ZLS_REQ_BASE_ADDR_MASK 0xFFF00000UL
++#define EUR_CR_BIF_ZLS_REQ_BASE_ADDR_SHIFT 20
++#define EUR_CR_BIF_BANK_STATUS 0x0CB4
++#define EUR_CR_BIF_BANK_STATUS_3D_CURRENT_BANK_MASK 0x00000001UL
++#define EUR_CR_BIF_BANK_STATUS_3D_CURRENT_BANK_SHIFT 0
++#define EUR_CR_BIF_BANK_STATUS_TA_CURRENT_BANK_MASK 0x00000002UL
++#define EUR_CR_BIF_BANK_STATUS_TA_CURRENT_BANK_SHIFT 1
++#define EUR_CR_2D_BLIT_STATUS 0x0E04
++#define EUR_CR_2D_BLIT_STATUS_COMPLETE_MASK 0x00FFFFFFUL
++#define EUR_CR_2D_BLIT_STATUS_COMPLETE_SHIFT 0
++#define EUR_CR_2D_BLIT_STATUS_BUSY_MASK 0x01000000UL
++#define EUR_CR_2D_BLIT_STATUS_BUSY_SHIFT 24
++#define EUR_CR_2D_VIRTUAL_FIFO_0 0x0E10
++#define EUR_CR_2D_VIRTUAL_FIFO_0_ENABLE_MASK 0x00000001UL
++#define EUR_CR_2D_VIRTUAL_FIFO_0_ENABLE_SHIFT 0
++#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_MASK 0x0000000EUL
++#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_SHIFT 1
++#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_DIV_MASK 0x00000FF0UL
++#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_DIV_SHIFT 4
++#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_MUL_MASK 0x0000F000UL
++#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_MUL_SHIFT 12
++#define EUR_CR_2D_VIRTUAL_FIFO_1 0x0E14
++#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_ACC_MASK 0x00000FFFUL
++#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_ACC_SHIFT 0
++#define EUR_CR_2D_VIRTUAL_FIFO_1_MAX_ACC_MASK 0x00FFF000UL
++#define EUR_CR_2D_VIRTUAL_FIFO_1_MAX_ACC_SHIFT 12
++#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_METRIC_MASK 0xFF000000UL
++#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_METRIC_SHIFT 24
++#define EUR_CR_2D_SOCIF 0x0E18
++#define EUR_CR_2D_SOCIF_FREESPACE_MASK 0x000000FFUL
++#define EUR_CR_2D_SOCIF_FREESPACE_SHIFT 0
++#define EUR_CR_2D_ALPHA 0x0E1C
++#define EUR_CR_2D_ALPHA_COMPONENT_ONE_MASK 0x0000FF00UL
++#define EUR_CR_2D_ALPHA_COMPONENT_ONE_SHIFT 8
++#define EUR_CR_2D_ALPHA_COMPONENT_ZERO_MASK 0x000000FFUL
++#define EUR_CR_2D_ALPHA_COMPONENT_ZERO_SHIFT 0
++#define EUR_CR_USE_CODE_BASE(X) (0x0A0C + (4 * (X)))
++#define EUR_CR_USE_CODE_BASE_ADDR_MASK 0x01FFFFFFUL
++#define EUR_CR_USE_CODE_BASE_ADDR_SHIFT 0
++#define EUR_CR_USE_CODE_BASE_DM_MASK 0x06000000UL
++#define EUR_CR_USE_CODE_BASE_DM_SHIFT 25
++#define EUR_CR_USE_CODE_BASE_SIZE_UINT32 16
++#define EUR_CR_USE_CODE_BASE_NUM_ENTRIES 16
++
++#define EUR_CR_MNE_CR_CTRL 0x0D00
++#define EUR_CR_MNE_CR_CTRL_BYP_CC_MASK 0x00008000UL
++#define EUR_CR_MNE_CR_CTRL_INVAL 0x0D20
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/hwdefs/sgxdefs.h b/drivers/gpu/drm/mrst/pvr/services4/srvkm/hwdefs/sgxdefs.h
+new file mode 100644
+index 0000000..fbffbf0
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/hwdefs/sgxdefs.h
+@@ -0,0 +1,82 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _SGXDEFS_H_
++#define _SGXDEFS_H_
++
++#include "sgxerrata.h"
++#include "sgxfeaturedefs.h"
++
++#if defined(SGX520)
++#include "sgx520defs.h"
++#else
++#if defined(SGX530)
++#include "sgx530defs.h"
++#else
++#if defined(SGX535)
++#include "sgx535defs.h"
++#else
++#if defined(SGX535_V1_1)
++#include "sgx535defs.h"
++#else
++#if defined(SGX540)
++#include "sgx540defs.h"
++#else
++#if defined(SGX541)
++#include "sgx541defs.h"
++#else
++#if defined(SGX543)
++#include "sgx543defs.h"
++#else
++#if defined(SGX545)
++#include "sgx545defs.h"
++#else
++#if defined(SGX531)
++#include "sgx531defs.h"
++#endif
++#endif
++#endif
++#endif
++#endif
++#endif
++#endif
++#endif
++#endif
++
++#if defined(SGX_FEATURE_MP)
++#if defined(SGX541)
++#if SGX_CORE_REV == 100
++#include "sgx541_100mpdefs.h"
++#else
++#include "sgx541mpdefs.h"
++#endif
++#else
++#include "sgxmpdefs.h"
++#endif
++#endif
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/hwdefs/sgxerrata.h b/drivers/gpu/drm/mrst/pvr/services4/srvkm/hwdefs/sgxerrata.h
+new file mode 100644
+index 0000000..fe3e619
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/hwdefs/sgxerrata.h
+@@ -0,0 +1,308 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _SGXERRATA_KM_H_
++#define _SGXERRATA_KM_H_
++
++
++#if defined(SGX520) && !defined(SGX_CORE_DEFINED)
++
++ #define SGX_CORE_REV_HEAD 0
++ #if defined(USE_SGX_CORE_REV_HEAD)
++
++ #define SGX_CORE_REV SGX_CORE_REV_HEAD
++ #endif
++
++ #if SGX_CORE_REV == 100
++ #else
++ #if SGX_CORE_REV == SGX_CORE_REV_HEAD
++
++ #else
++ #error "sgxerrata.h: SGX520 Core Revision unspecified"
++ #endif
++ #endif
++
++ #define SGX_CORE_DEFINED
++#endif
++
++#if defined(SGX530) && !defined(SGX_CORE_DEFINED)
++
++ #define SGX_CORE_REV_HEAD 0
++ #if defined(USE_SGX_CORE_REV_HEAD)
++
++ #define SGX_CORE_REV SGX_CORE_REV_HEAD
++ #endif
++
++ #if SGX_CORE_REV == 103
++ #define FIX_HW_BRN_22934
++ #else
++ #if SGX_CORE_REV == 110
++ #define FIX_HW_BRN_22934
++ #else
++ #if SGX_CORE_REV == 111
++ #define FIX_HW_BRN_22934
++ #else
++ #if SGX_CORE_REV == 120
++ #define FIX_HW_BRN_22934
++ #else
++ #if SGX_CORE_REV == 121
++ #define FIX_HW_BRN_22934
++ #else
++ #if SGX_CORE_REV == 125
++ #define FIX_HW_BRN_22934
++ #else
++ #if SGX_CORE_REV == SGX_CORE_REV_HEAD
++
++ #else
++ #error "sgxerrata.h: SGX530 Core Revision unspecified"
++ #endif
++ #endif
++ #endif
++ #endif
++ #endif
++#endif
++ #endif
++
++ #define SGX_CORE_DEFINED
++#endif
++
++#if defined(SGX531) && !defined(SGX_CORE_DEFINED)
++
++ #define SGX_CORE_REV_HEAD 0
++ #if defined(USE_SGX_CORE_REV_HEAD)
++
++ #define SGX_CORE_REV SGX_CORE_REV_HEAD
++ #endif
++
++ #if SGX_CORE_REV == 101
++ #define FIX_HW_BRN_26620
++ #define FIX_HW_BRN_28011
++ #else
++ #if SGX_CORE_REV == SGX_CORE_REV_HEAD
++
++ #else
++ #error "sgxerrata.h: SGX531 Core Revision unspecified"
++ #endif
++ #endif
++
++ #define SGX_CORE_DEFINED
++#endif
++
++#if (defined(SGX535) || defined(SGX535_V1_1)) && !defined(SGX_CORE_DEFINED)
++
++ #define SGX_CORE_REV_HEAD 0
++ #if defined(USE_SGX_CORE_REV_HEAD)
++
++ #define SGX_CORE_REV SGX_CORE_REV_HEAD
++ #endif
++
++ #if SGX_CORE_REV == 111
++ #define FIX_HW_BRN_23281
++ #define FIX_HW_BRN_23410
++ #define FIX_HW_BRN_22693
++ #define FIX_HW_BRN_22934
++ #define FIX_HW_BRN_22997
++ #define FIX_HW_BRN_23030
++ #else
++ #if SGX_CORE_REV == 1111
++ #define FIX_HW_BRN_23281
++ #define FIX_HW_BRN_23410
++ #define FIX_HW_BRN_22693
++ #define FIX_HW_BRN_22934
++ #define FIX_HW_BRN_22997
++ #define FIX_HW_BRN_23030
++ #else
++ #if SGX_CORE_REV == 112
++ #define FIX_HW_BRN_23281
++ #define FIX_HW_BRN_23410
++ #define FIX_HW_BRN_22693
++ #define FIX_HW_BRN_22934
++ #define FIX_HW_BRN_22997
++ #define FIX_HW_BRN_23030
++ #else
++ #if SGX_CORE_REV == 113
++ #define FIX_HW_BRN_22934
++ #define FIX_HW_BRN_23281
++ #define FIX_HW_BRN_23944
++ #define FIX_HW_BRN_23410
++ #else
++ #if SGX_CORE_REV == 121
++ #define FIX_HW_BRN_22934
++ #define FIX_HW_BRN_23944
++ #define FIX_HW_BRN_23410
++ #else
++ #if SGX_CORE_REV == 126
++ #define FIX_HW_BRN_22934
++ #else
++ #if SGX_CORE_REV == SGX_CORE_REV_HEAD
++
++ #else
++ #error "sgxerrata.h: SGX535 Core Revision unspecified"
++
++ #endif
++ #endif
++ #endif
++ #endif
++ #endif
++ #endif
++ #endif
++
++ #define SGX_CORE_DEFINED
++#endif
++
++#if defined(SGX540) && !defined(SGX_CORE_DEFINED)
++
++ #define SGX_CORE_REV_HEAD 0
++ #if defined(USE_SGX_CORE_REV_HEAD)
++
++ #define SGX_CORE_REV SGX_CORE_REV_HEAD
++ #endif
++
++ #if SGX_CORE_REV == 101
++ #define FIX_HW_BRN_25499
++ #define FIX_HW_BRN_25503
++ #define FIX_HW_BRN_26620
++ #define FIX_HW_BRN_28011
++ #else
++ #if SGX_CORE_REV == 110
++ #define FIX_HW_BRN_25503
++ #define FIX_HW_BRN_26620
++ #define FIX_HW_BRN_28011
++ #else
++ #if SGX_CORE_REV == 120
++ #define FIX_HW_BRN_28011
++ #else
++ #if SGX_CORE_REV == 121
++ #define FIX_HW_BRN_28011
++ #else
++ #if SGX_CORE_REV == SGX_CORE_REV_HEAD
++
++ #else
++ #error "sgxerrata.h: SGX540 Core Revision unspecified"
++ #endif
++ #endif
++ #endif
++ #endif
++ #endif
++
++ #define SGX_CORE_DEFINED
++#endif
++
++#if defined(SGX541) && !defined(SGX_CORE_DEFINED)
++ #if defined(SGX_FEATURE_MP)
++
++ #define SGX_CORE_REV_HEAD 0
++ #if defined(USE_SGX_CORE_REV_HEAD)
++
++ #define SGX_CORE_REV SGX_CORE_REV_HEAD
++ #endif
++
++ #if SGX_CORE_REV == 100
++ #define FIX_HW_BRN_27270
++ #define FIX_HW_BRN_28011
++ #define FIX_HW_BRN_27510
++
++ #else
++ #if SGX_CORE_REV == 101
++
++ #else
++ #if SGX_CORE_REV == SGX_CORE_REV_HEAD
++
++ #else
++ #error "sgxerrata.h: SGX541 Core Revision unspecified"
++ #endif
++ #endif
++ #endif
++
++ #define SGX_CORE_DEFINED
++ #else
++ #error "sgxerrata.h: SGX541 only supports MP configs (SGX_FEATURE_MP)"
++ #endif
++#endif
++
++#if defined(SGX543) && !defined(SGX_CORE_DEFINED)
++ #if defined(SGX_FEATURE_MP)
++
++ #define SGX_CORE_REV_HEAD 0
++ #if defined(USE_SGX_CORE_REV_HEAD)
++
++ #define SGX_CORE_REV SGX_CORE_REV_HEAD
++ #endif
++
++ #if SGX_CORE_REV == 100
++
++ #else
++ #if SGX_CORE_REV == SGX_CORE_REV_HEAD
++
++ #else
++ #error "sgxerrata.h: SGX543 Core Revision unspecified"
++ #endif
++ #endif
++
++ #define SGX_CORE_DEFINED
++ #else
++ #error "sgxerrata.h: SGX543 only supports MP configs (SGX_FEATURE_MP)"
++ #endif
++#endif
++
++#if defined(SGX545) && !defined(SGX_CORE_DEFINED)
++
++ #define SGX_CORE_REV_HEAD 0
++ #if defined(USE_SGX_CORE_REV_HEAD)
++
++ #define SGX_CORE_REV SGX_CORE_REV_HEAD
++ #endif
++
++ #if SGX_CORE_REV == 100
++ #define FIX_HW_BRN_26620
++ #define FIX_HW_BRN_27266
++ #define FIX_HW_BRN_27456
++ #else
++ #if SGX_CORE_REV == 109
++
++ #else
++ #if SGX_CORE_REV == SGX_CORE_REV_HEAD
++
++ #else
++ #error "sgxerrata.h: SGX545 Core Revision unspecified"
++ #endif
++ #endif
++ #endif
++
++ #define SGX_CORE_DEFINED
++#endif
++
++#if !defined(SGX_CORE_DEFINED)
++#if defined (__GNUC__)
++ #warning "sgxerrata.h: SGX Core Version unspecified"
++#else
++ #pragma message("sgxerrata.h: SGX Core Version unspecified")
++#endif
++#endif
++
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/hwdefs/sgxfeaturedefs.h b/drivers/gpu/drm/mrst/pvr/services4/srvkm/hwdefs/sgxfeaturedefs.h
+new file mode 100644
+index 0000000..782f613
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/hwdefs/sgxfeaturedefs.h
+@@ -0,0 +1,163 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if defined(SGX520)
++ #define SGX_CORE_FRIENDLY_NAME "SGX520"
++ #define SGX_CORE_ID SGX_CORE_ID_520
++ #define SGX_FEATURE_ADDRESS_SPACE_SIZE (28)
++ #define SGX_FEATURE_AUTOCLOCKGATING
++#else
++#if defined(SGX530)
++ #define SGX_CORE_FRIENDLY_NAME "SGX530"
++ #define SGX_CORE_ID SGX_CORE_ID_530
++ #define SGX_FEATURE_ADDRESS_SPACE_SIZE (28)
++ #define SGX_FEATURE_AUTOCLOCKGATING
++#else
++#if defined(SGX535)
++ #define SGX_CORE_FRIENDLY_NAME "SGX535"
++ #define SGX_CORE_ID SGX_CORE_ID_535
++ #define SGX_FEATURE_ADDRESS_SPACE_SIZE (32)
++ #define SGX_FEATURE_MULTIPLE_MEM_CONTEXTS
++ #define SGX_FEATURE_BIF_NUM_DIRLISTS (16)
++ #define SGX_FEATURE_2D_HARDWARE
++ #define SGX_FEATURE_AUTOCLOCKGATING
++ #define SUPPORT_SGX_GENERAL_MAPPING_HEAP
++#else
++#if defined(SGX540)
++ #define SGX_CORE_FRIENDLY_NAME "SGX540"
++ #define SGX_CORE_ID SGX_CORE_ID_540
++ #define SGX_FEATURE_ADDRESS_SPACE_SIZE (28)
++ #define SGX_FEATURE_AUTOCLOCKGATING
++ #define SGX_FEATURE_MULTI_EVENT_KICK
++#else
++#if defined(SGX541)
++ #define SGX_CORE_FRIENDLY_NAME "SGX541"
++ #define SGX_CORE_ID SGX_CORE_ID_541
++ #define SGX_FEATURE_ADDRESS_SPACE_SIZE (32)
++ #define SGX_FEATURE_MULTIPLE_MEM_CONTEXTS
++ #define SGX_FEATURE_BIF_NUM_DIRLISTS (8)
++ #define SGX_FEATURE_AUTOCLOCKGATING
++ #define SGX_FEATURE_SPM_MODE_0
++ #define SGX_FEATURE_MULTI_EVENT_KICK
++#else
++#if defined(SGX543)
++ #define SGX_CORE_FRIENDLY_NAME "SGX543"
++ #define SGX_CORE_ID SGX_CORE_ID_543
++ #define SGX_FEATURE_USE_NO_INSTRUCTION_PAIRING
++ #define SGX_FEATURE_USE_UNLIMITED_PHASES
++ #define SGX_FEATURE_ADDRESS_SPACE_SIZE (32)
++ #define SGX_FEATURE_MULTIPLE_MEM_CONTEXTS
++ #define SGX_FEATURE_BIF_NUM_DIRLISTS (8)
++ #define SGX_FEATURE_AUTOCLOCKGATING
++ #define SGX_FEATURE_MONOLITHIC_UKERNEL
++ #define SGX_FEATURE_MULTI_EVENT_KICK
++ #define SGX_FEATURE_DATA_BREAKPOINTS
++#else
++#if defined(SGX531)
++ #define SGX_CORE_FRIENDLY_NAME "SGX531"
++ #define SGX_CORE_ID SGX_CORE_ID_531
++ #define SGX_FEATURE_ADDRESS_SPACE_SIZE (28)
++ #define SGX_FEATURE_AUTOCLOCKGATING
++ #define SGX_FEATURE_MULTI_EVENT_KICK
++#else
++#if defined(SGX545)
++ #define SGX_CORE_FRIENDLY_NAME "SGX545"
++ #define SGX_CORE_ID SGX_CORE_ID_545
++ #define SGX_FEATURE_ADDRESS_SPACE_SIZE (32)
++ #define SGX_FEATURE_AUTOCLOCKGATING
++ #define SGX_FEATURE_USE_NO_INSTRUCTION_PAIRING
++ #define SGX_FEATURE_USE_UNLIMITED_PHASES
++ #define SGX_FEATURE_DXT_TEXTURES
++ #define SGX_FEATURE_VOLUME_TEXTURES
++ #define SGX_FEATURE_HOST_ALLOC_FROM_DPM
++ #define SGX_FEATURE_MULTIPLE_MEM_CONTEXTS
++ #define SGX_FEATURE_BIF_NUM_DIRLISTS (16)
++ #define SGX_FEATURE_NUM_USE_PIPES (4)
++ #define SGX_FEATURE_TEXTURESTRIDE_EXTENSION
++ #define SGX_FEATURE_PDS_DATA_INTERLEAVE_2DWORDS
++ #define SGX_FEATURE_MONOLITHIC_UKERNEL
++ #define SGX_FEATURE_ZLS_EXTERNALZ
++ #define SGX_FEATURE_VDM_CONTEXT_SWITCH_REV_2
++ #define SGX_FEATURE_ISP_CONTEXT_SWITCH_REV_2
++ #define SGX_FEATURE_NUM_PDS_PIPES (2)
++ #define SGX_FEATURE_NATIVE_BACKWARD_BLIT
++ #define SGX_FEATURE_MAX_TA_RENDER_TARGETS (512)
++ #define SGX_FEATURE_SPM_MODE_0
++ #define SGX_FEATURE_SECONDARY_REQUIRES_USE_KICK
++ #define SGX_FEATURE_DCU
++
++
++ #define SGX_FEATURE_BIF_WIDE_TILING_AND_4K_ADDRESS
++ #define SGX_FEATURE_MULTI_EVENT_KICK
++#endif
++#endif
++#endif
++#endif
++#endif
++#endif
++#endif
++#endif
++
++#if defined(FIX_HW_BRN_22693)
++#undef SGX_FEATURE_AUTOCLOCKGATING
++#endif
++
++#if defined(FIX_HW_BRN_27266)
++#undef SGX_FEATURE_36BIT_MMU
++#endif
++
++#if defined(FIX_HW_BRN_27456)
++#undef SGX_FEATURE_BIF_WIDE_TILING_AND_4K_ADDRESS
++#endif
++
++#if defined(FIX_HW_BRN_22934) \
++ || defined(FIX_HW_BRN_25499)
++#undef SGX_FEATURE_MULTI_EVENT_KICK
++#endif
++
++#if defined(SGX_FEATURE_SYSTEM_CACHE)
++ #if defined(SGX_FEATURE_36BIT_MMU)
++ #error SGX_FEATURE_SYSTEM_CACHE is incompatible with SGX_FEATURE_36BIT_MMU
++ #endif
++ #if defined(FIX_HW_BRN_26620) && !defined(SGX_FEATURE_MULTI_EVENT_KICK)
++ #define SGX_BYPASS_SYSTEM_CACHE
++ #endif
++#endif
++
++#if defined(SGX_FEATURE_MP)
++#if !defined(SGX_FEATURE_MP_CORE_COUNT)
++#error SGX_FEATURE_MP_CORE_COUNT must be defined when SGX_FEATURE_MP is defined
++#endif
++#else
++#define SGX_FEATURE_MP_CORE_COUNT (1)
++#endif
++
++#if defined(SUPPORT_SGX_LOW_LATENCY_SCHEDULING) && !defined(SUPPORT_SGX_PRIORITY_SCHEDULING)
++#define SUPPORT_SGX_PRIORITY_SCHEDULING
++#endif
++
++#include "img_types.h"
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/hwdefs/sgxmmu.h b/drivers/gpu/drm/mrst/pvr/services4/srvkm/hwdefs/sgxmmu.h
+new file mode 100644
+index 0000000..309de47
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/hwdefs/sgxmmu.h
+@@ -0,0 +1,79 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined(__SGXMMU_KM_H__)
++#define __SGXMMU_KM_H__
++
++#define SGX_MMU_PAGE_SHIFT (12)
++#define SGX_MMU_PAGE_SIZE (1UL<<SGX_MMU_PAGE_SHIFT)
++#define SGX_MMU_PAGE_MASK (SGX_MMU_PAGE_SIZE - 1UL)
++
++#define SGX_MMU_PD_SHIFT (10)
++#define SGX_MMU_PD_SIZE (1UL<<SGX_MMU_PD_SHIFT)
++#define SGX_MMU_PD_MASK (0xFFC00000UL)
++
++#if defined(SGX_FEATURE_36BIT_MMU)
++ #define SGX_MMU_PDE_ADDR_MASK (0xFFFFFF00UL)
++ #define SGX_MMU_PDE_ADDR_ALIGNSHIFT (4)
++#else
++ #define SGX_MMU_PDE_ADDR_MASK (0xFFFFF000UL)
++ #define SGX_MMU_PDE_ADDR_ALIGNSHIFT (0)
++#endif
++#define SGX_MMU_PDE_VALID (0x00000001UL)
++#define SGX_MMU_PDE_PAGE_SIZE_4K (0x00000000UL)
++#if defined(SGX_FEATURE_VARIABLE_MMU_PAGE_SIZE)
++ #define SGX_MMU_PDE_PAGE_SIZE_16K (0x00000002UL)
++ #define SGX_MMU_PDE_PAGE_SIZE_64K (0x00000004UL)
++ #define SGX_MMU_PDE_PAGE_SIZE_256K (0x00000006UL)
++ #define SGX_MMU_PDE_PAGE_SIZE_1M (0x00000008UL)
++ #define SGX_MMU_PDE_PAGE_SIZE_4M (0x0000000AUL)
++ #define SGX_MMU_PDE_PAGE_SIZE_MASK (0x0000000EUL)
++#else
++ #define SGX_MMU_PDE_WRITEONLY (0x00000002UL)
++ #define SGX_MMU_PDE_READONLY (0x00000004UL)
++ #define SGX_MMU_PDE_CACHECONSISTENT (0x00000008UL)
++ #define SGX_MMU_PDE_EDMPROTECT (0x00000010UL)
++#endif
++
++#define SGX_MMU_PT_SHIFT (10)
++#define SGX_MMU_PT_SIZE (1UL<<SGX_MMU_PT_SHIFT)
++#define SGX_MMU_PT_MASK (0x003FF000UL)
++
++#if defined(SGX_FEATURE_36BIT_MMU)
++ #define SGX_MMU_PTE_ADDR_MASK (0xFFFFFF00UL)
++ #define SGX_MMU_PTE_ADDR_ALIGNSHIFT (4)
++#else
++ #define SGX_MMU_PTE_ADDR_MASK (0xFFFFF000UL)
++ #define SGX_MMU_PTE_ADDR_ALIGNSHIFT (0)
++#endif
++#define SGX_MMU_PTE_VALID (0x00000001UL)
++#define SGX_MMU_PTE_WRITEONLY (0x00000002UL)
++#define SGX_MMU_PTE_READONLY (0x00000004UL)
++#define SGX_MMU_PTE_CACHECONSISTENT (0x00000008UL)
++#define SGX_MMU_PTE_EDMPROTECT (0x00000010UL)
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/buffer_manager.h b/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/buffer_manager.h
+new file mode 100644
+index 0000000..a47086d
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/buffer_manager.h
+@@ -0,0 +1,213 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _BUFFER_MANAGER_H_
++#define _BUFFER_MANAGER_H_
++
++#include "img_types.h"
++#include "ra.h"
++#include "perproc.h"
++
++#if defined(__cplusplus)
++extern "C"{
++#endif
++
++typedef struct _BM_HEAP_ BM_HEAP;
++
++struct _BM_MAPPING_
++{
++ enum
++ {
++ hm_wrapped = 1,
++ hm_wrapped_scatter,
++ hm_wrapped_virtaddr,
++ hm_wrapped_scatter_virtaddr,
++ hm_env,
++ hm_contiguous
++ } eCpuMemoryOrigin;
++
++ BM_HEAP *pBMHeap;
++ RA_ARENA *pArena;
++
++ IMG_CPU_VIRTADDR CpuVAddr;
++ IMG_CPU_PHYADDR CpuPAddr;
++ IMG_DEV_VIRTADDR DevVAddr;
++ IMG_SYS_PHYADDR *psSysAddr;
++ IMG_SIZE_T uSize;
++ IMG_HANDLE hOSMemHandle;
++ IMG_UINT32 ui32Flags;
++};
++
++typedef struct _BM_BUF_
++{
++ IMG_CPU_VIRTADDR *CpuVAddr;
++ IMG_VOID *hOSMemHandle;
++ IMG_CPU_PHYADDR CpuPAddr;
++ IMG_DEV_VIRTADDR DevVAddr;
++
++ BM_MAPPING *pMapping;
++ IMG_UINT32 ui32RefCount;
++} BM_BUF;
++
++struct _BM_HEAP_
++{
++ IMG_UINT32 ui32Attribs;
++ BM_CONTEXT *pBMContext;
++ RA_ARENA *pImportArena;
++ RA_ARENA *pLocalDevMemArena;
++ RA_ARENA *pVMArena;
++ DEV_ARENA_DESCRIPTOR sDevArena;
++ MMU_HEAP *pMMUHeap;
++
++ struct _BM_HEAP_ *psNext;
++ struct _BM_HEAP_ **ppsThis;
++};
++
++struct _BM_CONTEXT_
++{
++ MMU_CONTEXT *psMMUContext;
++
++
++ BM_HEAP *psBMHeap;
++
++
++ BM_HEAP *psBMSharedHeap;
++
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++
++
++ HASH_TABLE *pBufferHash;
++
++
++ IMG_HANDLE hResItem;
++
++ IMG_UINT32 ui32RefCount;
++
++
++
++ struct _BM_CONTEXT_ *psNext;
++ struct _BM_CONTEXT_ **ppsThis;
++};
++
++
++
++typedef IMG_VOID *BM_HANDLE;
++
++#define BP_POOL_MASK 0x7
++
++#define BP_CONTIGUOUS (1 << 3)
++#define BP_PARAMBUFFER (1 << 4)
++
++#define BM_MAX_DEVMEM_ARENAS 2
++
++IMG_HANDLE
++BM_CreateContext(PVRSRV_DEVICE_NODE *psDeviceNode,
++ IMG_DEV_PHYADDR *psPDDevPAddr,
++ PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_BOOL *pbCreated);
++
++
++PVRSRV_ERROR
++BM_DestroyContext (IMG_HANDLE hBMContext,
++ IMG_BOOL *pbCreated);
++
++
++IMG_HANDLE
++BM_CreateHeap (IMG_HANDLE hBMContext,
++ DEVICE_MEMORY_HEAP_INFO *psDevMemHeapInfo);
++
++IMG_VOID
++BM_DestroyHeap (IMG_HANDLE hDevMemHeap);
++
++
++IMG_BOOL
++BM_Reinitialise (PVRSRV_DEVICE_NODE *psDeviceNode);
++
++IMG_BOOL
++BM_Alloc (IMG_HANDLE hDevMemHeap,
++ IMG_DEV_VIRTADDR *psDevVAddr,
++ IMG_SIZE_T uSize,
++ IMG_UINT32 *pui32Flags,
++ IMG_UINT32 uDevVAddrAlignment,
++ BM_HANDLE *phBuf);
++
++IMG_BOOL
++BM_Wrap ( IMG_HANDLE hDevMemHeap,
++ IMG_SIZE_T ui32Size,
++ IMG_SIZE_T ui32Offset,
++ IMG_BOOL bPhysContig,
++ IMG_SYS_PHYADDR *psSysAddr,
++ IMG_VOID *pvCPUVAddr,
++ IMG_UINT32 *pui32Flags,
++ BM_HANDLE *phBuf);
++
++IMG_VOID
++BM_Free (BM_HANDLE hBuf,
++ IMG_UINT32 ui32Flags);
++
++
++IMG_CPU_VIRTADDR
++BM_HandleToCpuVaddr (BM_HANDLE hBuf);
++
++IMG_DEV_VIRTADDR
++BM_HandleToDevVaddr (BM_HANDLE hBuf);
++
++IMG_SYS_PHYADDR
++BM_HandleToSysPaddr (BM_HANDLE hBuf);
++
++IMG_HANDLE
++BM_HandleToOSMemHandle (BM_HANDLE hBuf);
++
++IMG_BOOL
++BM_ContiguousStatistics (IMG_UINT32 uFlags,
++ IMG_UINT32 *pTotalBytes,
++ IMG_UINT32 *pAvailableBytes);
++
++
++IMG_VOID BM_GetPhysPageAddr(PVRSRV_KERNEL_MEM_INFO *psMemInfo,
++ IMG_DEV_VIRTADDR sDevVPageAddr,
++ IMG_DEV_PHYADDR *psDevPAddr);
++
++PVRSRV_ERROR BM_GetHeapInfo(IMG_HANDLE hDevMemHeap,
++ PVRSRV_HEAP_INFO *psHeapInfo);
++
++MMU_CONTEXT* BM_GetMMUContext(IMG_HANDLE hDevMemHeap);
++
++MMU_CONTEXT* BM_GetMMUContextFromMemContext(IMG_HANDLE hDevMemContext);
++
++IMG_HANDLE BM_GetMMUHeap(IMG_HANDLE hDevMemHeap);
++
++PVRSRV_DEVICE_NODE* BM_GetDeviceNode(IMG_HANDLE hDevMemContext);
++
++
++IMG_HANDLE BM_GetMappingHandle(PVRSRV_KERNEL_MEM_INFO *psMemInfo);
++
++#if defined(__cplusplus)
++}
++#endif
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/device.h b/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/device.h
+new file mode 100644
+index 0000000..90c8c7a
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/device.h
+@@ -0,0 +1,278 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __DEVICE_H__
++#define __DEVICE_H__
++
++#if defined(__cplusplus)
++extern "C" {
++#endif
++
++#include "ra.h"
++#include "resman.h"
++
++typedef struct _BM_CONTEXT_ BM_CONTEXT;
++
++typedef struct _MMU_HEAP_ MMU_HEAP;
++typedef struct _MMU_CONTEXT_ MMU_CONTEXT;
++
++#define PVRSRV_BACKINGSTORE_SYSMEM_CONTIG (1<<(PVRSRV_MEM_BACKINGSTORE_FIELD_SHIFT+0))
++#define PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG (1<<(PVRSRV_MEM_BACKINGSTORE_FIELD_SHIFT+1))
++#define PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG (1<<(PVRSRV_MEM_BACKINGSTORE_FIELD_SHIFT+2))
++#define PVRSRV_BACKINGSTORE_LOCALMEM_NONCONTIG (1<<(PVRSRV_MEM_BACKINGSTORE_FIELD_SHIFT+3))
++
++typedef IMG_UINT32 DEVICE_MEMORY_HEAP_TYPE;
++#define DEVICE_MEMORY_HEAP_PERCONTEXT 0
++#define DEVICE_MEMORY_HEAP_KERNEL 1
++#define DEVICE_MEMORY_HEAP_SHARED 2
++#define DEVICE_MEMORY_HEAP_SHARED_EXPORTED 3
++
++#define PVRSRV_DEVICE_NODE_FLAGS_PORT80DISPLAY 1
++#define PVRSRV_DEVICE_NODE_FLAGS_MMU_OPT_INV 2
++
++typedef struct _DEVICE_MEMORY_HEAP_INFO_
++{
++
++ IMG_UINT32 ui32HeapID;
++
++
++ IMG_CHAR *pszName;
++
++
++ IMG_CHAR *pszBSName;
++
++
++ IMG_DEV_VIRTADDR sDevVAddrBase;
++
++
++ IMG_UINT32 ui32HeapSize;
++
++
++ IMG_UINT32 ui32Attribs;
++
++
++ DEVICE_MEMORY_HEAP_TYPE DevMemHeapType;
++
++
++ IMG_HANDLE hDevMemHeap;
++
++
++ RA_ARENA *psLocalDevMemArena;
++
++
++ IMG_UINT32 ui32DataPageSize;
++
++} DEVICE_MEMORY_HEAP_INFO;
++
++typedef struct _DEVICE_MEMORY_INFO_
++{
++
++ IMG_UINT32 ui32AddressSpaceSizeLog2;
++
++
++
++
++ IMG_UINT32 ui32Flags;
++
++
++ IMG_UINT32 ui32HeapCount;
++
++
++ IMG_UINT32 ui32SyncHeapID;
++
++
++ IMG_UINT32 ui32MappingHeapID;
++
++
++ DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap;
++
++
++ BM_CONTEXT *pBMKernelContext;
++
++
++ BM_CONTEXT *pBMContext;
++
++} DEVICE_MEMORY_INFO;
++
++
++typedef struct DEV_ARENA_DESCRIPTOR_TAG
++{
++ IMG_UINT32 ui32HeapID;
++
++ IMG_CHAR *pszName;
++
++ IMG_DEV_VIRTADDR BaseDevVAddr;
++
++ IMG_UINT32 ui32Size;
++
++ DEVICE_MEMORY_HEAP_TYPE DevMemHeapType;
++
++
++ IMG_UINT32 ui32DataPageSize;
++
++ DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeapInfo;
++
++} DEV_ARENA_DESCRIPTOR;
++
++typedef struct _SYS_DATA_TAG_ *PSYS_DATA;
++
++typedef struct _PVRSRV_DEVICE_NODE_
++{
++ PVRSRV_DEVICE_IDENTIFIER sDevId;
++ IMG_UINT32 ui32RefCount;
++
++
++
++
++ PVRSRV_ERROR (*pfnInitDevice) (IMG_VOID*);
++
++ PVRSRV_ERROR (*pfnDeInitDevice) (IMG_VOID*);
++
++
++ PVRSRV_ERROR (*pfnInitDeviceCompatCheck) (struct _PVRSRV_DEVICE_NODE_*);
++
++
++ PVRSRV_ERROR (*pfnMMUInitialise)(struct _PVRSRV_DEVICE_NODE_*, MMU_CONTEXT**, IMG_DEV_PHYADDR*);
++ IMG_VOID (*pfnMMUFinalise)(MMU_CONTEXT*);
++ IMG_VOID (*pfnMMUInsertHeap)(MMU_CONTEXT*, MMU_HEAP*);
++ MMU_HEAP* (*pfnMMUCreate)(MMU_CONTEXT*,DEV_ARENA_DESCRIPTOR*,RA_ARENA**);
++ IMG_VOID (*pfnMMUDelete)(MMU_HEAP*);
++ IMG_BOOL (*pfnMMUAlloc)(MMU_HEAP*pMMU,
++ IMG_SIZE_T uSize,
++ IMG_SIZE_T *pActualSize,
++ IMG_UINT32 uFlags,
++ IMG_UINT32 uDevVAddrAlignment,
++ IMG_DEV_VIRTADDR *pDevVAddr);
++ IMG_VOID (*pfnMMUFree)(MMU_HEAP*,IMG_DEV_VIRTADDR,IMG_UINT32);
++ IMG_VOID (*pfnMMUEnable)(MMU_HEAP*);
++ IMG_VOID (*pfnMMUDisable)(MMU_HEAP*);
++ IMG_VOID (*pfnMMUMapPages)(MMU_HEAP *pMMU,
++ IMG_DEV_VIRTADDR devVAddr,
++ IMG_SYS_PHYADDR SysPAddr,
++ IMG_SIZE_T uSize,
++ IMG_UINT32 ui32MemFlags,
++ IMG_HANDLE hUniqueTag);
++ IMG_VOID (*pfnMMUMapShadow)(MMU_HEAP *pMMU,
++ IMG_DEV_VIRTADDR MapBaseDevVAddr,
++ IMG_SIZE_T uSize,
++ IMG_CPU_VIRTADDR CpuVAddr,
++ IMG_HANDLE hOSMemHandle,
++ IMG_DEV_VIRTADDR *pDevVAddr,
++ IMG_UINT32 ui32MemFlags,
++ IMG_HANDLE hUniqueTag);
++ IMG_VOID (*pfnMMUUnmapPages)(MMU_HEAP *pMMU,
++ IMG_DEV_VIRTADDR dev_vaddr,
++ IMG_UINT32 ui32PageCount,
++ IMG_HANDLE hUniqueTag);
++
++ IMG_VOID (*pfnMMUMapScatter)(MMU_HEAP *pMMU,
++ IMG_DEV_VIRTADDR DevVAddr,
++ IMG_SYS_PHYADDR *psSysAddr,
++ IMG_SIZE_T uSize,
++ IMG_UINT32 ui32MemFlags,
++ IMG_HANDLE hUniqueTag);
++
++ IMG_DEV_PHYADDR (*pfnMMUGetPhysPageAddr)(MMU_HEAP *pMMUHeap, IMG_DEV_VIRTADDR sDevVPageAddr);
++ IMG_DEV_PHYADDR (*pfnMMUGetPDDevPAddr)(MMU_CONTEXT *pMMUContext);
++
++
++ IMG_BOOL (*pfnDeviceISR)(IMG_VOID*);
++
++ IMG_VOID *pvISRData;
++
++ IMG_UINT32 ui32SOCInterruptBit;
++
++ IMG_VOID (*pfnDeviceMISR)(IMG_VOID*);
++
++
++ IMG_VOID (*pfnDeviceCommandComplete)(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode);
++
++ IMG_BOOL bReProcessDeviceCommandComplete;
++
++
++ DEVICE_MEMORY_INFO sDevMemoryInfo;
++
++
++ IMG_VOID *pvDevice;
++ IMG_UINT32 ui32pvDeviceSize;
++
++
++ PRESMAN_CONTEXT hResManContext;
++
++
++ PSYS_DATA psSysData;
++
++
++ RA_ARENA *psLocalDevMemArena;
++
++ IMG_UINT32 ui32Flags;
++
++ struct _PVRSRV_DEVICE_NODE_ *psNext;
++ struct _PVRSRV_DEVICE_NODE_ **ppsThis;
++} PVRSRV_DEVICE_NODE;
++
++PVRSRV_ERROR IMG_CALLCONV PVRSRVRegisterDevice(PSYS_DATA psSysData,
++ PVRSRV_ERROR (*pfnRegisterDevice)(PVRSRV_DEVICE_NODE*),
++ IMG_UINT32 ui32SOCInterruptBit,
++ IMG_UINT32 *pui32DeviceIndex );
++
++PVRSRV_ERROR IMG_CALLCONV PVRSRVInitialiseDevice(IMG_UINT32 ui32DevIndex);
++PVRSRV_ERROR IMG_CALLCONV PVRSRVFinaliseSystem(IMG_BOOL bInitSuccesful);
++
++PVRSRV_ERROR IMG_CALLCONV PVRSRVDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode);
++
++PVRSRV_ERROR IMG_CALLCONV PVRSRVDeinitialiseDevice(IMG_UINT32 ui32DevIndex);
++
++#if !defined(USE_CODE)
++
++IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV PollForValueKM(volatile IMG_UINT32* pui32LinMemAddr,
++ IMG_UINT32 ui32Value,
++ IMG_UINT32 ui32Mask,
++ IMG_UINT32 ui32Waitus,
++ IMG_UINT32 ui32Tries);
++
++#endif
++
++
++#if defined (USING_ISR_INTERRUPTS)
++PVRSRV_ERROR IMG_CALLCONV PollForInterruptKM(IMG_UINT32 ui32Value,
++ IMG_UINT32 ui32Mask,
++ IMG_UINT32 ui32Waitus,
++ IMG_UINT32 ui32Tries);
++#endif
++
++PVRSRV_ERROR IMG_CALLCONV PVRSRVInit(PSYS_DATA psSysData);
++IMG_VOID IMG_CALLCONV PVRSRVDeInit(PSYS_DATA psSysData);
++IMG_BOOL IMG_CALLCONV PVRSRVDeviceLISR(PVRSRV_DEVICE_NODE *psDeviceNode);
++IMG_BOOL IMG_CALLCONV PVRSRVSystemLISR(IMG_VOID *pvSysData);
++IMG_VOID IMG_CALLCONV PVRSRVMISR(IMG_VOID *pvSysData);
++
++#if defined(__cplusplus)
++}
++#endif
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/handle.h b/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/handle.h
+new file mode 100644
+index 0000000..fda74f1
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/handle.h
+@@ -0,0 +1,382 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __HANDLE_H__
++#define __HANDLE_H__
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++#include "img_types.h"
++#include "hash.h"
++#include "resman.h"
++
++typedef enum
++{
++ PVRSRV_HANDLE_TYPE_NONE = 0,
++ PVRSRV_HANDLE_TYPE_PERPROC_DATA,
++ PVRSRV_HANDLE_TYPE_DEV_NODE,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP,
++ PVRSRV_HANDLE_TYPE_MEM_INFO,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO,
++ PVRSRV_HANDLE_TYPE_DISP_INFO,
++ PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN,
++ PVRSRV_HANDLE_TYPE_BUF_INFO,
++ PVRSRV_HANDLE_TYPE_DISP_BUFFER,
++ PVRSRV_HANDLE_TYPE_BUF_BUFFER,
++ PVRSRV_HANDLE_TYPE_SGX_HW_RENDER_CONTEXT,
++ PVRSRV_HANDLE_TYPE_SGX_HW_TRANSFER_CONTEXT,
++ PVRSRV_HANDLE_TYPE_SGX_HW_2D_CONTEXT,
++ PVRSRV_HANDLE_TYPE_SHARED_PB_DESC,
++ PVRSRV_HANDLE_TYPE_MEM_INFO_REF,
++ PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO,
++ PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT,
++ PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT,
++ PVRSRV_HANDLE_TYPE_MMAP_INFO,
++ PVRSRV_HANDLE_TYPE_SOC_TIMER
++} PVRSRV_HANDLE_TYPE;
++
++typedef enum
++{
++
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE = 0,
++
++ PVRSRV_HANDLE_ALLOC_FLAG_SHARED = 0x01,
++
++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI = 0x02,
++
++ PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE = 0x04
++} PVRSRV_HANDLE_ALLOC_FLAG;
++
++struct _PVRSRV_HANDLE_BASE_;
++typedef struct _PVRSRV_HANDLE_BASE_ PVRSRV_HANDLE_BASE;
++
++#ifdef PVR_SECURE_HANDLES
++extern PVRSRV_HANDLE_BASE *gpsKernelHandleBase;
++
++#define KERNEL_HANDLE_BASE (gpsKernelHandleBase)
++
++PVRSRV_ERROR PVRSRVAllocHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag);
++
++PVRSRV_ERROR PVRSRVAllocSubHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag, IMG_HANDLE hParent);
++
++PVRSRV_ERROR PVRSRVFindHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType);
++
++PVRSRV_ERROR PVRSRVLookupHandleAnyType(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, PVRSRV_HANDLE_TYPE *peType, IMG_HANDLE hHandle);
++
++PVRSRV_ERROR PVRSRVLookupHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType);
++
++PVRSRV_ERROR PVRSRVLookupSubHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType, IMG_HANDLE hAncestor);
++
++PVRSRV_ERROR PVRSRVGetParentHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *phParent, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType);
++
++PVRSRV_ERROR PVRSRVLookupAndReleaseHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType);
++
++PVRSRV_ERROR PVRSRVReleaseHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType);
++
++PVRSRV_ERROR PVRSRVNewHandleBatch(PVRSRV_HANDLE_BASE *psBase, IMG_UINT32 ui32BatchSize);
++
++PVRSRV_ERROR PVRSRVCommitHandleBatch(PVRSRV_HANDLE_BASE *psBase);
++
++IMG_VOID PVRSRVReleaseHandleBatch(PVRSRV_HANDLE_BASE *psBase);
++
++PVRSRV_ERROR PVRSRVSetMaxHandle(PVRSRV_HANDLE_BASE *psBase, IMG_UINT32 ui32MaxHandle);
++
++IMG_UINT32 PVRSRVGetMaxHandle(PVRSRV_HANDLE_BASE *psBase);
++
++PVRSRV_ERROR PVRSRVEnableHandlePurging(PVRSRV_HANDLE_BASE *psBase);
++
++PVRSRV_ERROR PVRSRVPurgeHandles(PVRSRV_HANDLE_BASE *psBase);
++
++PVRSRV_ERROR PVRSRVAllocHandleBase(PVRSRV_HANDLE_BASE **ppsBase);
++
++PVRSRV_ERROR PVRSRVFreeHandleBase(PVRSRV_HANDLE_BASE *psBase);
++
++PVRSRV_ERROR PVRSRVHandleInit(IMG_VOID);
++
++PVRSRV_ERROR PVRSRVHandleDeInit(IMG_VOID);
++
++#else
++
++#define KERNEL_HANDLE_BASE IMG_NULL
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVAllocHandle)
++#endif
++static INLINE
++PVRSRV_ERROR PVRSRVAllocHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag)
++{
++ PVR_UNREFERENCED_PARAMETER(eType);
++ PVR_UNREFERENCED_PARAMETER(eFlag);
++ PVR_UNREFERENCED_PARAMETER(psBase);
++
++ *phHandle = pvData;
++ return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVAllocSubHandle)
++#endif
++static INLINE
++PVRSRV_ERROR PVRSRVAllocSubHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag, IMG_HANDLE hParent)
++{
++ PVR_UNREFERENCED_PARAMETER(eType);
++ PVR_UNREFERENCED_PARAMETER(eFlag);
++ PVR_UNREFERENCED_PARAMETER(hParent);
++ PVR_UNREFERENCED_PARAMETER(psBase);
++
++ *phHandle = pvData;
++ return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVFindHandle)
++#endif
++static INLINE
++PVRSRV_ERROR PVRSRVFindHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType)
++{
++ PVR_UNREFERENCED_PARAMETER(eType);
++ PVR_UNREFERENCED_PARAMETER(psBase);
++
++ *phHandle = pvData;
++ return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVLookupHandleAnyType)
++#endif
++static INLINE
++PVRSRV_ERROR PVRSRVLookupHandleAnyType(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, PVRSRV_HANDLE_TYPE *peType, IMG_HANDLE hHandle)
++{
++ PVR_UNREFERENCED_PARAMETER(psBase);
++
++ *peType = PVRSRV_HANDLE_TYPE_NONE;
++
++ *ppvData = hHandle;
++ return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVLookupHandle)
++#endif
++static INLINE
++PVRSRV_ERROR PVRSRVLookupHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType)
++{
++ PVR_UNREFERENCED_PARAMETER(psBase);
++ PVR_UNREFERENCED_PARAMETER(eType);
++
++ *ppvData = hHandle;
++ return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVLookupSubHandle)
++#endif
++static INLINE
++PVRSRV_ERROR PVRSRVLookupSubHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType, IMG_HANDLE hAncestor)
++{
++ PVR_UNREFERENCED_PARAMETER(psBase);
++ PVR_UNREFERENCED_PARAMETER(eType);
++ PVR_UNREFERENCED_PARAMETER(hAncestor);
++
++ *ppvData = hHandle;
++ return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVGetParentHandle)
++#endif
++static INLINE
++PVRSRV_ERROR PVRSRVGetParentHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *phParent, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType)
++{
++ PVR_UNREFERENCED_PARAMETER(psBase);
++ PVR_UNREFERENCED_PARAMETER(eType);
++ PVR_UNREFERENCED_PARAMETER(hHandle);
++
++ *phParent = IMG_NULL;
++
++ return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVLookupAndReleaseHandle)
++#endif
++static INLINE
++PVRSRV_ERROR PVRSRVLookupAndReleaseHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType)
++{
++ PVR_UNREFERENCED_PARAMETER(eType);
++ PVR_UNREFERENCED_PARAMETER(psBase);
++
++ *ppvData = hHandle;
++ return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVReleaseHandle)
++#endif
++static INLINE
++PVRSRV_ERROR PVRSRVReleaseHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType)
++{
++ PVR_UNREFERENCED_PARAMETER(hHandle);
++ PVR_UNREFERENCED_PARAMETER(eType);
++ PVR_UNREFERENCED_PARAMETER(psBase);
++
++ return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVNewHandleBatch)
++#endif
++static INLINE
++PVRSRV_ERROR PVRSRVNewHandleBatch(PVRSRV_HANDLE_BASE *psBase, IMG_UINT32 ui32BatchSize)
++{
++ PVR_UNREFERENCED_PARAMETER(psBase);
++ PVR_UNREFERENCED_PARAMETER(ui32BatchSize);
++
++ return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVCommitHandleBatch)
++#endif
++static INLINE
++PVRSRV_ERROR PVRSRVCommitHandleBatch(PVRSRV_HANDLE_BASE *psBase)
++{
++ PVR_UNREFERENCED_PARAMETER(psBase);
++
++ return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVReleaseHandleBatch)
++#endif
++static INLINE
++IMG_VOID PVRSRVReleaseHandleBatch(PVRSRV_HANDLE_BASE *psBase)
++{
++ PVR_UNREFERENCED_PARAMETER(psBase);
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVSetMaxHandle)
++#endif
++static INLINE
++PVRSRV_ERROR PVRSRVSetMaxHandle(PVRSRV_HANDLE_BASE *psBase, IMG_UINT32 ui32MaxHandle)
++{
++ PVR_UNREFERENCED_PARAMETER(psBase);
++ PVR_UNREFERENCED_PARAMETER(ui32MaxHandle);
++
++ return PVRSRV_ERROR_NOT_SUPPORTED;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVGetMaxHandle)
++#endif
++static INLINE
++IMG_UINT32 PVRSRVGetMaxHandle(PVRSRV_HANDLE_BASE *psBase)
++{
++ PVR_UNREFERENCED_PARAMETER(psBase);
++
++ return 0;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVEnableHandlePurging)
++#endif
++static INLINE
++PVRSRV_ERROR PVRSRVEnableHandlePurging(PVRSRV_HANDLE_BASE *psBase)
++{
++ PVR_UNREFERENCED_PARAMETER(psBase);
++
++ return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVPurgeHandles)
++#endif
++static INLINE
++PVRSRV_ERROR PVRSRVPurgeHandles(PVRSRV_HANDLE_BASE *psBase)
++{
++ PVR_UNREFERENCED_PARAMETER(psBase);
++
++ return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVAllocHandleBase)
++#endif
++static INLINE
++PVRSRV_ERROR PVRSRVAllocHandleBase(PVRSRV_HANDLE_BASE **ppsBase)
++{
++ *ppsBase = IMG_NULL;
++
++ return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVFreeHandleBase)
++#endif
++static INLINE
++PVRSRV_ERROR PVRSRVFreeHandleBase(PVRSRV_HANDLE_BASE *psBase)
++{
++ PVR_UNREFERENCED_PARAMETER(psBase);
++
++ return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVHandleInit)
++#endif
++static INLINE
++PVRSRV_ERROR PVRSRVHandleInit(IMG_VOID)
++{
++ return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVHandleDeInit)
++#endif
++static INLINE
++PVRSRV_ERROR PVRSRVHandleDeInit(IMG_VOID)
++{
++ return PVRSRV_OK;
++}
++
++#endif
++
++#define PVRSRVAllocHandleNR(psBase, phHandle, pvData, eType, eFlag) \
++ (IMG_VOID)PVRSRVAllocHandle(psBase, phHandle, pvData, eType, eFlag)
++
++#define PVRSRVAllocSubHandleNR(psBase, phHandle, pvData, eType, eFlag, hParent) \
++ (IMG_VOID)PVRSRVAllocSubHandle(psBase, phHandle, pvData, eType, eFlag, hParent)
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/hash.h b/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/hash.h
+new file mode 100644
+index 0000000..d45f4a9
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/hash.h
+@@ -0,0 +1,73 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _HASH_H_
++#define _HASH_H_
++
++#include "img_types.h"
++#include "osfunc.h"
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++typedef IMG_UINT32 HASH_FUNC(IMG_SIZE_T uKeySize, IMG_VOID *pKey, IMG_UINT32 uHashTabLen);
++typedef IMG_BOOL HASH_KEY_COMP(IMG_SIZE_T uKeySize, IMG_VOID *pKey1, IMG_VOID *pKey2);
++
++typedef struct _HASH_TABLE_ HASH_TABLE;
++
++IMG_UINT32 HASH_Func_Default (IMG_SIZE_T uKeySize, IMG_VOID *pKey, IMG_UINT32 uHashTabLen);
++
++IMG_BOOL HASH_Key_Comp_Default (IMG_SIZE_T uKeySize, IMG_VOID *pKey1, IMG_VOID *pKey2);
++
++HASH_TABLE * HASH_Create_Extended (IMG_UINT32 uInitialLen, IMG_SIZE_T uKeySize, HASH_FUNC *pfnHashFunc, HASH_KEY_COMP *pfnKeyComp);
++
++HASH_TABLE * HASH_Create (IMG_UINT32 uInitialLen);
++
++IMG_VOID HASH_Delete (HASH_TABLE *pHash);
++
++IMG_BOOL HASH_Insert_Extended (HASH_TABLE *pHash, IMG_VOID *pKey, IMG_UINTPTR_T v);
++
++IMG_BOOL HASH_Insert (HASH_TABLE *pHash, IMG_UINTPTR_T k, IMG_UINTPTR_T v);
++
++IMG_UINTPTR_T HASH_Remove_Extended(HASH_TABLE *pHash, IMG_VOID *pKey);
++
++IMG_UINTPTR_T HASH_Remove (HASH_TABLE *pHash, IMG_UINTPTR_T k);
++
++IMG_UINTPTR_T HASH_Retrieve_Extended (HASH_TABLE *pHash, IMG_VOID *pKey);
++
++IMG_UINTPTR_T HASH_Retrieve (HASH_TABLE *pHash, IMG_UINTPTR_T k);
++
++#ifdef HASH_TRACE
++IMG_VOID HASH_Dump (HASH_TABLE *pHash);
++#endif
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/lists.h b/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/lists.h
+new file mode 100644
+index 0000000..76d5af2
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/lists.h
+@@ -0,0 +1,176 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __LISTS_UTILS__
++#define __LISTS_UTILS__
++
++#include <stdarg.h>
++#include "img_types.h"
++
++#define DECLARE_LIST_FOR_EACH(TYPE) \
++IMG_VOID List_##TYPE##_ForEach(TYPE *psHead, IMG_VOID(*pfnCallBack)(TYPE* psNode))
++
++#define IMPLEMENT_LIST_FOR_EACH(TYPE) \
++IMG_VOID List_##TYPE##_ForEach(TYPE *psHead, IMG_VOID(*pfnCallBack)(TYPE* psNode))\
++{\
++ while(psHead)\
++ {\
++ pfnCallBack(psHead);\
++ psHead = psHead->psNext;\
++ }\
++}
++
++
++#define DECLARE_LIST_FOR_EACH_VA(TYPE) \
++IMG_VOID List_##TYPE##_ForEach_va(TYPE *psHead, IMG_VOID(*pfnCallBack)(TYPE* psNode, va_list va), ...)
++
++#define IMPLEMENT_LIST_FOR_EACH_VA(TYPE) \
++IMG_VOID List_##TYPE##_ForEach_va(TYPE *psHead, IMG_VOID(*pfnCallBack)(TYPE* psNode, va_list va), ...) \
++{\
++ va_list ap;\
++ while(psHead)\
++ {\
++ va_start(ap, pfnCallBack);\
++ pfnCallBack(psHead, ap);\
++ psHead = psHead->psNext;\
++ va_end(ap);\
++ }\
++}
++
++
++#define DECLARE_LIST_ANY(TYPE) \
++IMG_VOID* List_##TYPE##_Any(TYPE *psHead, IMG_VOID* (*pfnCallBack)(TYPE* psNode))
++
++#define IMPLEMENT_LIST_ANY(TYPE) \
++IMG_VOID* List_##TYPE##_Any(TYPE *psHead, IMG_VOID* (*pfnCallBack)(TYPE* psNode))\
++{ \
++ IMG_VOID *pResult;\
++ TYPE *psNextNode;\
++ pResult = IMG_NULL;\
++ psNextNode = psHead;\
++ while(psHead && !pResult)\
++ {\
++ psNextNode = psNextNode->psNext;\
++ pResult = pfnCallBack(psHead);\
++ psHead = psNextNode;\
++ }\
++ return pResult;\
++}
++
++
++#define DECLARE_LIST_ANY_VA(TYPE) \
++IMG_VOID* List_##TYPE##_Any_va(TYPE *psHead, IMG_VOID*(*pfnCallBack)(TYPE* psNode, va_list va), ...)
++
++#define IMPLEMENT_LIST_ANY_VA(TYPE) \
++IMG_VOID* List_##TYPE##_Any_va(TYPE *psHead, IMG_VOID*(*pfnCallBack)(TYPE* psNode, va_list va), ...)\
++{\
++ va_list ap;\
++ TYPE *psNextNode;\
++ IMG_VOID* pResult = IMG_NULL;\
++ while(psHead && !pResult)\
++ {\
++ psNextNode = psHead->psNext;\
++ va_start(ap, pfnCallBack);\
++ pResult = pfnCallBack(psHead, ap);\
++ va_end(ap);\
++ psHead = psNextNode;\
++ }\
++ return pResult;\
++}
++
++#define DECLARE_LIST_ANY_2(TYPE, RTYPE, CONTINUE) \
++RTYPE List_##TYPE##_##RTYPE##_Any(TYPE *psHead, RTYPE (*pfnCallBack)(TYPE* psNode))
++
++#define IMPLEMENT_LIST_ANY_2(TYPE, RTYPE, CONTINUE) \
++RTYPE List_##TYPE##_##RTYPE##_Any(TYPE *psHead, RTYPE (*pfnCallBack)(TYPE* psNode))\
++{ \
++ RTYPE result;\
++ TYPE *psNextNode;\
++ result = CONTINUE;\
++ psNextNode = psHead;\
++ while(psHead && result == CONTINUE)\
++ {\
++ psNextNode = psNextNode->psNext;\
++ result = pfnCallBack(psHead);\
++ psHead = psNextNode;\
++ }\
++ return result;\
++}
++
++
++#define DECLARE_LIST_ANY_VA_2(TYPE, RTYPE, CONTINUE) \
++RTYPE List_##TYPE##_##RTYPE##_Any_va(TYPE *psHead, RTYPE(*pfnCallBack)(TYPE* psNode, va_list va), ...)
++
++#define IMPLEMENT_LIST_ANY_VA_2(TYPE, RTYPE, CONTINUE) \
++RTYPE List_##TYPE##_##RTYPE##_Any_va(TYPE *psHead, RTYPE(*pfnCallBack)(TYPE* psNode, va_list va), ...)\
++{\
++ va_list ap;\
++ TYPE *psNextNode;\
++ RTYPE result = CONTINUE;\
++ while(psHead && result == CONTINUE)\
++ {\
++ psNextNode = psHead->psNext;\
++ va_start(ap, pfnCallBack);\
++ result = pfnCallBack(psHead, ap);\
++ va_end(ap);\
++ psHead = psNextNode;\
++ }\
++ return result;\
++}
++
++
++#define DECLARE_LIST_REMOVE(TYPE) \
++IMG_VOID List_##TYPE##_Remove(TYPE *psNode)
++
++#define IMPLEMENT_LIST_REMOVE(TYPE) \
++IMG_VOID List_##TYPE##_Remove(TYPE *psNode)\
++{\
++ (*psNode->ppsThis)=psNode->psNext;\
++ if(psNode->psNext)\
++ {\
++ psNode->psNext->ppsThis = psNode->ppsThis;\
++ }\
++}
++
++#define DECLARE_LIST_INSERT(TYPE) \
++IMG_VOID List_##TYPE##_Insert(TYPE **ppsHead, TYPE *psNewNode)
++
++#define IMPLEMENT_LIST_INSERT(TYPE) \
++IMG_VOID List_##TYPE##_Insert(TYPE **ppsHead, TYPE *psNewNode)\
++{\
++ psNewNode->ppsThis = ppsHead;\
++ psNewNode->psNext = *ppsHead;\
++ *ppsHead = psNewNode;\
++ if(psNewNode->psNext)\
++ {\
++ psNewNode->psNext->ppsThis = &(psNewNode->psNext);\
++ }\
++}
++
++
++#define IS_LAST_ELEMENT(x) ((x)->psNext == IMG_NULL)
++
++#endif
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/metrics.h b/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/metrics.h
+new file mode 100644
+index 0000000..2632f8d
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/metrics.h
+@@ -0,0 +1,130 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _METRICS_
++#define _METRICS_
++
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++
++#if defined(DEBUG) || defined(TIMING)
++
++
++typedef struct
++{
++ IMG_UINT32 ui32Start;
++ IMG_UINT32 ui32Stop;
++ IMG_UINT32 ui32Total;
++ IMG_UINT32 ui32Count;
++} Temporal_Data;
++
++extern Temporal_Data asTimers[];
++
++extern IMG_UINT32 PVRSRVTimeNow(IMG_VOID);
++extern IMG_VOID PVRSRVSetupMetricTimers(IMG_VOID *pvDevInfo);
++extern IMG_VOID PVRSRVOutputMetricTotals(IMG_VOID);
++
++
++#define PVRSRV_TIMER_DUMMY 0
++
++#define PVRSRV_TIMER_EXAMPLE_1 1
++#define PVRSRV_TIMER_EXAMPLE_2 2
++
++
++#define PVRSRV_NUM_TIMERS (PVRSRV_TIMER_EXAMPLE_2 + 1)
++
++#define PVRSRV_TIME_START(X) { \
++ asTimers[X].ui32Count += 1; \
++ asTimers[X].ui32Count |= 0x80000000L; \
++ asTimers[X].ui32Start = PVRSRVTimeNow(); \
++ asTimers[X].ui32Stop = 0; \
++ }
++
++#define PVRSRV_TIME_SUSPEND(X) { \
++ asTimers[X].ui32Stop += PVRSRVTimeNow() - asTimers[X].ui32Start; \
++ }
++
++#define PVRSRV_TIME_RESUME(X) { \
++ asTimers[X].ui32Start = PVRSRVTimeNow(); \
++ }
++
++#define PVRSRV_TIME_STOP(X) { \
++ asTimers[X].ui32Stop += PVRSRVTimeNow() - asTimers[X].ui32Start; \
++ asTimers[X].ui32Total += asTimers[X].ui32Stop; \
++ asTimers[X].ui32Count &= 0x7FFFFFFFL; \
++ }
++
++#define PVRSRV_TIME_RESET(X) { \
++ asTimers[X].ui32Start = 0; \
++ asTimers[X].ui32Stop = 0; \
++ asTimers[X].ui32Total = 0; \
++ asTimers[X].ui32Count = 0; \
++ }
++
++
++#if defined(__sh__)
++
++#define TST_REG ((volatile IMG_UINT8 *) (psDevInfo->pvSOCRegsBaseKM))
++
++#define TCOR_2 ((volatile IMG_UINT *) (psDevInfo->pvSOCRegsBaseKM+28))
++#define TCNT_2 ((volatile IMG_UINT *) (psDevInfo->pvSOCRegsBaseKM+32))
++#define TCR_2 ((volatile IMG_UINT16 *)(psDevInfo->pvSOCRegsBaseKM+36))
++
++#define TIMER_DIVISOR 4
++
++#endif
++
++
++
++
++
++#else
++
++
++
++#define PVRSRV_TIME_START(X)
++#define PVRSRV_TIME_SUSPEND(X)
++#define PVRSRV_TIME_RESUME(X)
++#define PVRSRV_TIME_STOP(X)
++#define PVRSRV_TIME_RESET(X)
++
++#define PVRSRVSetupMetricTimers(X)
++#define PVRSRVOutputMetricTotals()
++
++
++
++#endif
++
++#if defined(__cplusplus)
++}
++#endif
++
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/osfunc.h b/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/osfunc.h
+new file mode 100644
+index 0000000..7686c69
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/osfunc.h
+@@ -0,0 +1,487 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifdef DEBUG_RELEASE_BUILD
++#pragma optimize( "", off )
++#define DEBUG 1
++#endif
++
++#ifndef __OSFUNC_H__
++#define __OSFUNC_H__
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++#if defined(__linux__) && defined(__KERNEL__)
++#include <linux/hardirq.h>
++#include <linux/string.h>
++#endif
++
++
++
++ #define PVRSRV_PAGEABLE_SELECT PVRSRV_OS_PAGEABLE_HEAP
++
++#define KERNEL_ID 0xffffffffL
++#define POWER_MANAGER_ID 0xfffffffeL
++#define ISR_ID 0xfffffffdL
++#define TIMER_ID 0xfffffffcL
++
++
++#define HOST_PAGESIZE OSGetPageSize
++#define HOST_PAGEMASK (~(HOST_PAGESIZE()-1))
++#define HOST_PAGEALIGN(addr) (((addr)+HOST_PAGESIZE()-1)&HOST_PAGEMASK)
++
++#define PVRSRV_OS_HEAP_MASK 0xf
++#define PVRSRV_OS_PAGEABLE_HEAP 0x1
++#define PVRSRV_OS_NON_PAGEABLE_HEAP 0x2
++
++
++IMG_UINT32 OSClockus(IMG_VOID);
++IMG_SIZE_T OSGetPageSize(IMG_VOID);
++PVRSRV_ERROR OSInstallDeviceLISR(IMG_VOID *pvSysData,
++ IMG_UINT32 ui32Irq,
++ IMG_CHAR *pszISRName,
++ IMG_VOID *pvDeviceNode);
++PVRSRV_ERROR OSUninstallDeviceLISR(IMG_VOID *pvSysData);
++PVRSRV_ERROR OSInstallSystemLISR(IMG_VOID *pvSysData, IMG_UINT32 ui32Irq);
++PVRSRV_ERROR OSUninstallSystemLISR(IMG_VOID *pvSysData);
++PVRSRV_ERROR OSInstallMISR(IMG_VOID *pvSysData);
++PVRSRV_ERROR OSUninstallMISR(IMG_VOID *pvSysData);
++IMG_CPU_PHYADDR OSMapLinToCPUPhys(IMG_VOID* pvLinAddr);
++IMG_VOID OSMemCopy(IMG_VOID *pvDst, IMG_VOID *pvSrc, IMG_SIZE_T ui32Size);
++IMG_VOID *OSMapPhysToLin(IMG_CPU_PHYADDR BasePAddr, IMG_SIZE_T ui32Bytes, IMG_UINT32 ui32Flags, IMG_HANDLE *phOSMemHandle);
++IMG_BOOL OSUnMapPhysToLin(IMG_VOID *pvLinAddr, IMG_SIZE_T ui32Bytes, IMG_UINT32 ui32Flags, IMG_HANDLE hOSMemHandle);
++
++PVRSRV_ERROR OSReservePhys(IMG_CPU_PHYADDR BasePAddr, IMG_SIZE_T ui32Bytes, IMG_UINT32 ui32Flags, IMG_VOID **ppvCpuVAddr, IMG_HANDLE *phOSMemHandle);
++PVRSRV_ERROR OSUnReservePhys(IMG_VOID *pvCpuVAddr, IMG_SIZE_T ui32Bytes, IMG_UINT32 ui32Flags, IMG_HANDLE hOSMemHandle);
++
++#if defined(SUPPORT_CPU_CACHED_BUFFERS)
++IMG_VOID OSFlushCPUCacheKM(IMG_VOID);
++IMG_VOID OSFlushCPUCacheRangeKM(IMG_VOID *pvRangeAddrStart,
++ IMG_VOID *pvRangeAddrEnd);
++#endif
++
++#if defined(__linux__)
++PVRSRV_ERROR OSRegisterDiscontigMem(IMG_SYS_PHYADDR *pBasePAddr,
++ IMG_VOID *pvCpuVAddr,
++ IMG_SIZE_T ui32Bytes,
++ IMG_UINT32 ui32Flags,
++ IMG_HANDLE *phOSMemHandle);
++PVRSRV_ERROR OSUnRegisterDiscontigMem(IMG_VOID *pvCpuVAddr,
++ IMG_SIZE_T ui32Bytes,
++ IMG_UINT32 ui32Flags,
++ IMG_HANDLE hOSMemHandle);
++#else
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(OSRegisterDiscontigMem)
++#endif
++static INLINE PVRSRV_ERROR OSRegisterDiscontigMem(IMG_SYS_PHYADDR *pBasePAddr,
++ IMG_VOID *pvCpuVAddr,
++ IMG_SIZE_T ui32Bytes,
++ IMG_UINT32 ui32Flags,
++ IMG_HANDLE *phOSMemHandle)
++{
++ PVR_UNREFERENCED_PARAMETER(pBasePAddr);
++ PVR_UNREFERENCED_PARAMETER(pvCpuVAddr);
++ PVR_UNREFERENCED_PARAMETER(ui32Bytes);
++ PVR_UNREFERENCED_PARAMETER(ui32Flags);
++ PVR_UNREFERENCED_PARAMETER(phOSMemHandle);
++
++ return PVRSRV_ERROR_NOT_SUPPORTED;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(OSUnRegisterDiscontigMem)
++#endif
++static INLINE PVRSRV_ERROR OSUnRegisterDiscontigMem(IMG_VOID *pvCpuVAddr,
++ IMG_SIZE_T ui32Bytes,
++ IMG_UINT32 ui32Flags,
++ IMG_HANDLE hOSMemHandle)
++{
++ PVR_UNREFERENCED_PARAMETER(pvCpuVAddr);
++ PVR_UNREFERENCED_PARAMETER(ui32Bytes);
++ PVR_UNREFERENCED_PARAMETER(ui32Flags);
++ PVR_UNREFERENCED_PARAMETER(hOSMemHandle);
++
++ return PVRSRV_ERROR_NOT_SUPPORTED;
++}
++#endif
++
++
++#if defined(__linux__)
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(OSReserveDiscontigPhys)
++#endif
++static INLINE PVRSRV_ERROR OSReserveDiscontigPhys(IMG_SYS_PHYADDR *pBasePAddr, IMG_SIZE_T ui32Bytes, IMG_UINT32 ui32Flags, IMG_VOID **ppvCpuVAddr, IMG_HANDLE *phOSMemHandle)
++{
++#if defined(__linux__)
++ *ppvCpuVAddr = IMG_NULL;
++ return OSRegisterDiscontigMem(pBasePAddr, *ppvCpuVAddr, ui32Bytes, ui32Flags, phOSMemHandle);
++#else
++ extern IMG_CPU_PHYADDR SysSysPAddrToCpuPAddr(IMG_SYS_PHYADDR SysPAddr);
++
++
++ return OSReservePhys(SysSysPAddrToCpuPAddr(pBasePAddr[0]), ui32Bytes, ui32Flags, ppvCpuVAddr, phOSMemHandle);
++#endif
++}
++
++static INLINE PVRSRV_ERROR OSUnReserveDiscontigPhys(IMG_VOID *pvCpuVAddr, IMG_SIZE_T ui32Bytes, IMG_UINT32 ui32Flags, IMG_HANDLE hOSMemHandle)
++{
++#if defined(__linux__)
++ OSUnRegisterDiscontigMem(pvCpuVAddr, ui32Bytes, ui32Flags, hOSMemHandle);
++#endif
++
++ return PVRSRV_OK;
++}
++#else
++
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(OSReserveDiscontigPhys)
++#endif
++static INLINE PVRSRV_ERROR OSReserveDiscontigPhys(IMG_SYS_PHYADDR *pBasePAddr, IMG_SIZE_T ui32Bytes, IMG_UINT32 ui32Flags, IMG_VOID **ppvCpuVAddr, IMG_HANDLE *phOSMemHandle)
++{
++ PVR_UNREFERENCED_PARAMETER(pBasePAddr);
++ PVR_UNREFERENCED_PARAMETER(ui32Bytes);
++ PVR_UNREFERENCED_PARAMETER(ui32Flags);
++ PVR_UNREFERENCED_PARAMETER(ppvCpuVAddr);
++ PVR_UNREFERENCED_PARAMETER(phOSMemHandle);
++
++ return PVRSRV_ERROR_NOT_SUPPORTED;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(OSUnReserveDiscontigPhys)
++#endif
++static INLINE PVRSRV_ERROR OSUnReserveDiscontigPhys(IMG_VOID *pvCpuVAddr, IMG_SIZE_T ui32Bytes, IMG_UINT32 ui32Flags, IMG_HANDLE hOSMemHandle)
++{
++ PVR_UNREFERENCED_PARAMETER(pvCpuVAddr);
++ PVR_UNREFERENCED_PARAMETER(ui32Bytes);
++ PVR_UNREFERENCED_PARAMETER(ui32Flags);
++ PVR_UNREFERENCED_PARAMETER(hOSMemHandle);
++
++ return PVRSRV_ERROR_NOT_SUPPORTED;
++}
++#endif
++
++PVRSRV_ERROR OSRegisterMem(IMG_CPU_PHYADDR BasePAddr,
++ IMG_VOID *pvCpuVAddr,
++ IMG_SIZE_T ui32Bytes,
++ IMG_UINT32 ui32Flags,
++ IMG_HANDLE *phOSMemHandle);
++PVRSRV_ERROR OSUnRegisterMem(IMG_VOID *pvCpuVAddr,
++ IMG_SIZE_T ui32Bytes,
++ IMG_UINT32 ui32Flags,
++ IMG_HANDLE hOSMemHandle);
++
++
++
++#if defined(__linux__)
++PVRSRV_ERROR OSGetSubMemHandle(IMG_HANDLE hOSMemHandle,
++ IMG_UINTPTR_T ui32ByteOffset,
++ IMG_SIZE_T ui32Bytes,
++ IMG_UINT32 ui32Flags,
++ IMG_HANDLE *phOSMemHandleRet);
++PVRSRV_ERROR OSReleaseSubMemHandle(IMG_HANDLE hOSMemHandle, IMG_UINT32 ui32Flags);
++#else
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(OSGetSubMemHandle)
++#endif
++static INLINE PVRSRV_ERROR OSGetSubMemHandle(IMG_HANDLE hOSMemHandle,
++ IMG_UINTPTR_T ui32ByteOffset,
++ IMG_SIZE_T ui32Bytes,
++ IMG_UINT32 ui32Flags,
++ IMG_HANDLE *phOSMemHandleRet)
++{
++ PVR_UNREFERENCED_PARAMETER(ui32ByteOffset);
++ PVR_UNREFERENCED_PARAMETER(ui32Bytes);
++ PVR_UNREFERENCED_PARAMETER(ui32Flags);
++
++ *phOSMemHandleRet = hOSMemHandle;
++ return PVRSRV_OK;
++}
++
++static INLINE PVRSRV_ERROR OSReleaseSubMemHandle(IMG_HANDLE hOSMemHandle, IMG_UINT32 ui32Flags)
++{
++ PVR_UNREFERENCED_PARAMETER(hOSMemHandle);
++ PVR_UNREFERENCED_PARAMETER(ui32Flags);
++ return PVRSRV_OK;
++}
++#endif
++
++IMG_UINT32 OSGetCurrentProcessIDKM(IMG_VOID);
++IMG_UINT32 OSGetCurrentThreadID( IMG_VOID );
++IMG_VOID OSMemSet(IMG_VOID *pvDest, IMG_UINT8 ui8Value, IMG_SIZE_T ui32Size);
++
++PVRSRV_ERROR OSAllocPages_Impl(IMG_UINT32 ui32Flags, IMG_SIZE_T ui32Size, IMG_UINT32 ui32PageSize, IMG_PVOID *ppvLinAddr, IMG_HANDLE *phPageAlloc);
++PVRSRV_ERROR OSFreePages(IMG_UINT32 ui32Flags, IMG_SIZE_T ui32Size, IMG_PVOID pvLinAddr, IMG_HANDLE hPageAlloc);
++
++
++#ifdef PVRSRV_LOG_MEMORY_ALLOCS
++ #define OSAllocMem(flags, size, linAddr, blockAlloc, logStr) \
++ (PVR_TRACE(("OSAllocMem(" #flags ", " #size ", " #linAddr ", " #blockAlloc "): " logStr " (size = 0x%lx)", size)), \
++ OSAllocMem_Debug_Wrapper(flags, size, linAddr, blockAlloc, __FILE__, __LINE__))
++
++ #define OSAllocPages(flags, size, pageSize, linAddr, pageAlloc) \
++ (PVR_TRACE(("OSAllocPages(" #flags ", " #size ", " #pageSize ", " #linAddr ", " #pageAlloc "): (size = 0x%lx)", size)), \
++ OSAllocPages_Impl(flags, size, pageSize, linAddr, pageAlloc))
++
++ #define OSFreeMem(flags, size, linAddr, blockAlloc) \
++ (PVR_TRACE(("OSFreeMem(" #flags ", " #size ", " #linAddr ", " #blockAlloc "): (pointer = 0x%X)", linAddr)), \
++ OSFreeMem_Debug_Wrapper(flags, size, linAddr, blockAlloc, __FILE__, __LINE__))
++#else
++ #define OSAllocMem(flags, size, linAddr, blockAlloc, logString) \
++ OSAllocMem_Debug_Wrapper(flags, size, linAddr, blockAlloc, __FILE__, __LINE__)
++
++ #define OSAllocPages OSAllocPages_Impl
++
++ #define OSFreeMem(flags, size, linAddr, blockAlloc) \
++ OSFreeMem_Debug_Wrapper(flags, size, linAddr, blockAlloc, __FILE__, __LINE__)
++#endif
++
++#ifdef PVRSRV_DEBUG_OS_MEMORY
++
++ PVRSRV_ERROR OSAllocMem_Debug_Wrapper(IMG_UINT32 ui32Flags,
++ IMG_UINT32 ui32Size,
++ IMG_PVOID *ppvCpuVAddr,
++ IMG_HANDLE *phBlockAlloc,
++ IMG_CHAR *pszFilename,
++ IMG_UINT32 ui32Line);
++
++ PVRSRV_ERROR OSFreeMem_Debug_Wrapper(IMG_UINT32 ui32Flags,
++ IMG_UINT32 ui32Size,
++ IMG_PVOID pvCpuVAddr,
++ IMG_HANDLE hBlockAlloc,
++ IMG_CHAR *pszFilename,
++ IMG_UINT32 ui32Line);
++
++
++ typedef struct
++ {
++ IMG_UINT8 sGuardRegionBefore[8];
++ IMG_CHAR sFileName[128];
++ IMG_UINT32 uLineNo;
++ IMG_SIZE_T uSize;
++ IMG_SIZE_T uSizeParityCheck;
++ enum valid_tag
++ { isFree = 0x277260FF,
++ isAllocated = 0x260511AA
++ } eValid;
++ } OSMEM_DEBUG_INFO;
++
++ #define TEST_BUFFER_PADDING_STATUS (sizeof(OSMEM_DEBUG_INFO))
++ #define TEST_BUFFER_PADDING_AFTER (8)
++ #define TEST_BUFFER_PADDING (TEST_BUFFER_PADDING_STATUS + TEST_BUFFER_PADDING_AFTER)
++#else
++ #define OSAllocMem_Debug_Wrapper OSAllocMem_Debug_Linux_Memory_Allocations
++ #define OSFreeMem_Debug_Wrapper OSFreeMem_Debug_Linux_Memory_Allocations
++#endif
++
++#if defined(__linux__) && defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ PVRSRV_ERROR OSAllocMem_Impl(IMG_UINT32 ui32Flags, IMG_SIZE_T ui32Size, IMG_PVOID *ppvLinAddr, IMG_HANDLE *phBlockAlloc, IMG_CHAR *pszFilename, IMG_UINT32 ui32Line);
++ PVRSRV_ERROR OSFreeMem_Impl(IMG_UINT32 ui32Flags, IMG_SIZE_T ui32Size, IMG_PVOID pvLinAddr, IMG_HANDLE hBlockAlloc, IMG_CHAR *pszFilename, IMG_UINT32 ui32Line);
++
++ #define OSAllocMem_Debug_Linux_Memory_Allocations OSAllocMem_Impl
++ #define OSFreeMem_Debug_Linux_Memory_Allocations OSFreeMem_Impl
++#else
++ PVRSRV_ERROR OSAllocMem_Impl(IMG_UINT32 ui32Flags, IMG_SIZE_T ui32Size, IMG_PVOID *ppvLinAddr, IMG_HANDLE *phBlockAlloc);
++ PVRSRV_ERROR OSFreeMem_Impl(IMG_UINT32 ui32Flags, IMG_SIZE_T ui32Size, IMG_PVOID pvLinAddr, IMG_HANDLE hBlockAlloc);
++
++ #define OSAllocMem_Debug_Linux_Memory_Allocations(flags, size, addr, blockAlloc, file, line) \
++ OSAllocMem_Impl(flags, size, addr, blockAlloc)
++ #define OSFreeMem_Debug_Linux_Memory_Allocations(flags, size, addr, blockAlloc, file, line) \
++ OSFreeMem_Impl(flags, size, addr, blockAlloc)
++#endif
++
++
++
++#if defined(__linux__)
++IMG_CPU_PHYADDR OSMemHandleToCpuPAddr(IMG_VOID *hOSMemHandle, IMG_SIZE_T ui32ByteOffset);
++#else
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(OSMemHandleToCpuPAddr)
++#endif
++static INLINE IMG_CPU_PHYADDR OSMemHandleToCpuPAddr(IMG_HANDLE hOSMemHandle, IMG_SIZE_T ui32ByteOffset)
++{
++ IMG_CPU_PHYADDR sCpuPAddr;
++ PVR_UNREFERENCED_PARAMETER(hOSMemHandle);
++ PVR_UNREFERENCED_PARAMETER(ui32ByteOffset);
++ sCpuPAddr.uiAddr = 0;
++ return sCpuPAddr;
++}
++#endif
++PVRSRV_ERROR OSInitEnvData(IMG_PVOID *ppvEnvSpecificData);
++PVRSRV_ERROR OSDeInitEnvData(IMG_PVOID pvEnvSpecificData);
++IMG_CHAR* OSStringCopy(IMG_CHAR *pszDest, const IMG_CHAR *pszSrc);
++IMG_INT32 OSSNPrintf(IMG_CHAR *pStr, IMG_SIZE_T ui32Size, const IMG_CHAR *pszFormat, ...);
++#define OSStringLength(pszString) strlen(pszString)
++
++PVRSRV_ERROR OSEventObjectCreate(const IMG_CHAR *pszName,
++ PVRSRV_EVENTOBJECT *psEventObject);
++PVRSRV_ERROR OSEventObjectDestroy(PVRSRV_EVENTOBJECT *psEventObject);
++PVRSRV_ERROR OSEventObjectSignal(IMG_HANDLE hOSEventKM);
++PVRSRV_ERROR OSEventObjectWait(IMG_HANDLE hOSEventKM);
++PVRSRV_ERROR OSEventObjectOpen(PVRSRV_EVENTOBJECT *psEventObject,
++ IMG_HANDLE *phOSEvent);
++PVRSRV_ERROR OSEventObjectClose(PVRSRV_EVENTOBJECT *psEventObject,
++ IMG_HANDLE hOSEventKM);
++
++
++PVRSRV_ERROR OSBaseAllocContigMemory(IMG_SIZE_T ui32Size, IMG_CPU_VIRTADDR *pLinAddr, IMG_CPU_PHYADDR *pPhysAddr);
++PVRSRV_ERROR OSBaseFreeContigMemory(IMG_SIZE_T ui32Size, IMG_CPU_VIRTADDR LinAddr, IMG_CPU_PHYADDR PhysAddr);
++
++IMG_PVOID MapUserFromKernel(IMG_PVOID pvLinAddrKM,IMG_SIZE_T ui32Size,IMG_HANDLE *phMemBlock);
++IMG_PVOID OSMapHWRegsIntoUserSpace(IMG_HANDLE hDevCookie, IMG_SYS_PHYADDR sRegAddr, IMG_UINT32 ulSize, IMG_PVOID *ppvProcess);
++IMG_VOID OSUnmapHWRegsFromUserSpace(IMG_HANDLE hDevCookie, IMG_PVOID pvUserAddr, IMG_PVOID pvProcess);
++
++IMG_VOID UnmapUserFromKernel(IMG_PVOID pvLinAddrUM, IMG_SIZE_T ui32Size, IMG_HANDLE hMemBlock);
++
++PVRSRV_ERROR OSMapPhysToUserSpace(IMG_HANDLE hDevCookie,
++ IMG_SYS_PHYADDR sCPUPhysAddr,
++ IMG_SIZE_T uiSizeInBytes,
++ IMG_UINT32 ui32CacheFlags,
++ IMG_PVOID *ppvUserAddr,
++ IMG_SIZE_T *puiActualSize,
++ IMG_HANDLE hMappingHandle);
++
++PVRSRV_ERROR OSUnmapPhysToUserSpace(IMG_HANDLE hDevCookie,
++ IMG_PVOID pvUserAddr,
++ IMG_PVOID pvProcess);
++
++PVRSRV_ERROR OSLockResource(PVRSRV_RESOURCE *psResource, IMG_UINT32 ui32ID);
++PVRSRV_ERROR OSUnlockResource(PVRSRV_RESOURCE *psResource, IMG_UINT32 ui32ID);
++IMG_BOOL OSIsResourceLocked(PVRSRV_RESOURCE *psResource, IMG_UINT32 ui32ID);
++PVRSRV_ERROR OSCreateResource(PVRSRV_RESOURCE *psResource);
++PVRSRV_ERROR OSDestroyResource(PVRSRV_RESOURCE *psResource);
++IMG_VOID OSBreakResourceLock(PVRSRV_RESOURCE *psResource, IMG_UINT32 ui32ID);
++IMG_VOID OSWaitus(IMG_UINT32 ui32Timeus);
++IMG_VOID OSReleaseThreadQuanta(IMG_VOID);
++IMG_UINT32 OSPCIReadDword(IMG_UINT32 ui32Bus, IMG_UINT32 ui32Dev, IMG_UINT32 ui32Func, IMG_UINT32 ui32Reg);
++IMG_VOID OSPCIWriteDword(IMG_UINT32 ui32Bus, IMG_UINT32 ui32Dev, IMG_UINT32 ui32Func, IMG_UINT32 ui32Reg, IMG_UINT32 ui32Value);
++
++#ifndef OSReadHWReg
++IMG_UINT32 OSReadHWReg(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset);
++#endif
++#ifndef OSWriteHWReg
++IMG_VOID OSWriteHWReg(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT32 ui32Value);
++#endif
++
++typedef IMG_VOID (*PFN_TIMER_FUNC)(IMG_VOID*);
++IMG_HANDLE OSAddTimer(PFN_TIMER_FUNC pfnTimerFunc, IMG_VOID *pvData, IMG_UINT32 ui32MsTimeout);
++PVRSRV_ERROR OSRemoveTimer (IMG_HANDLE hTimer);
++PVRSRV_ERROR OSEnableTimer (IMG_HANDLE hTimer);
++PVRSRV_ERROR OSDisableTimer (IMG_HANDLE hTimer);
++
++PVRSRV_ERROR OSGetSysMemSize(IMG_SIZE_T *pui32Bytes);
++
++typedef enum _HOST_PCI_INIT_FLAGS_
++{
++ HOST_PCI_INIT_FLAG_BUS_MASTER = 0x00000001,
++ HOST_PCI_INIT_FLAG_MSI = 0x00000002,
++ HOST_PCI_INIT_FLAG_FORCE_I32 = 0x7fffffff
++} HOST_PCI_INIT_FLAGS;
++
++struct _PVRSRV_PCI_DEV_OPAQUE_STRUCT_;
++typedef struct _PVRSRV_PCI_DEV_OPAQUE_STRUCT_ *PVRSRV_PCI_DEV_HANDLE;
++
++PVRSRV_PCI_DEV_HANDLE OSPCIAcquireDev(IMG_UINT16 ui16VendorID, IMG_UINT16 ui16DeviceID, HOST_PCI_INIT_FLAGS eFlags);
++PVRSRV_PCI_DEV_HANDLE OSPCISetDev(IMG_VOID *pvPCICookie, HOST_PCI_INIT_FLAGS eFlags);
++PVRSRV_ERROR OSPCIReleaseDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI);
++PVRSRV_ERROR OSPCIIRQ(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 *pui32IRQ);
++IMG_UINT32 OSPCIAddrRangeLen(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index);
++IMG_UINT32 OSPCIAddrRangeStart(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index);
++IMG_UINT32 OSPCIAddrRangeEnd(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index);
++PVRSRV_ERROR OSPCIRequestAddrRange(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index);
++PVRSRV_ERROR OSPCIReleaseAddrRange(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index);
++PVRSRV_ERROR OSPCISuspendDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI);
++PVRSRV_ERROR OSPCIResumeDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI);
++
++PVRSRV_ERROR OSScheduleMISR(IMG_VOID *pvSysData);
++
++IMG_VOID OSPanic(IMG_VOID);
++
++IMG_BOOL OSProcHasPrivSrvInit(IMG_VOID);
++
++typedef enum _img_verify_test
++{
++ PVR_VERIFY_WRITE = 0,
++ PVR_VERIFY_READ
++} IMG_VERIFY_TEST;
++
++IMG_BOOL OSAccessOK(IMG_VERIFY_TEST eVerification, IMG_VOID *pvUserPtr, IMG_SIZE_T ui32Bytes);
++
++PVRSRV_ERROR OSCopyToUser(IMG_PVOID pvProcess, IMG_VOID *pvDest, IMG_VOID *pvSrc, IMG_SIZE_T ui32Bytes);
++PVRSRV_ERROR OSCopyFromUser(IMG_PVOID pvProcess, IMG_VOID *pvDest, IMG_VOID *pvSrc, IMG_SIZE_T ui32Bytes);
++
++#if defined(__linux__)
++PVRSRV_ERROR OSAcquirePhysPageAddr(IMG_VOID* pvCPUVAddr,
++ IMG_SIZE_T ui32Bytes,
++ IMG_SYS_PHYADDR *psSysPAddr,
++ IMG_HANDLE *phOSWrapMem,
++ IMG_BOOL bWrapWorkaround);
++PVRSRV_ERROR OSReleasePhysPageAddr(IMG_HANDLE hOSWrapMem);
++#else
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(OSAcquirePhysPageAddr)
++#endif
++static INLINE PVRSRV_ERROR OSAcquirePhysPageAddr(IMG_VOID* pvCPUVAddr,
++ IMG_SIZE_T ui32Bytes,
++ IMG_SYS_PHYADDR *psSysPAddr,
++ IMG_HANDLE *phOSWrapMem,
++ IMG_BOOL bWrapWorkaround)
++{
++ PVR_UNREFERENCED_PARAMETER(pvCPUVAddr);
++ PVR_UNREFERENCED_PARAMETER(ui32Bytes);
++ PVR_UNREFERENCED_PARAMETER(psSysPAddr);
++ PVR_UNREFERENCED_PARAMETER(phOSWrapMem);
++ PVR_UNREFERENCED_PARAMETER(bWrapWorkaround);
++ return PVRSRV_OK;
++}
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(OSReleasePhysPageAddr)
++#endif
++static INLINE PVRSRV_ERROR OSReleasePhysPageAddr(IMG_HANDLE hOSWrapMem)
++{
++ PVR_UNREFERENCED_PARAMETER(hOSWrapMem);
++ return PVRSRV_OK;
++}
++#endif
++
++#if defined(__linux__) && defined(__KERNEL__)
++#define OS_SUPPORTS_IN_LISR
++static inline IMG_BOOL OSInLISR(IMG_VOID unref__ *pvSysData)
++{
++ return in_irq();
++}
++#endif
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/osperproc.h b/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/osperproc.h
+new file mode 100644
+index 0000000..80a912f
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/osperproc.h
+@@ -0,0 +1,76 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __OSPERPROC_H__
++#define __OSPERPROC_H__
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++#if defined(__linux__)
++PVRSRV_ERROR OSPerProcessPrivateDataInit(IMG_HANDLE *phOsPrivateData);
++PVRSRV_ERROR OSPerProcessPrivateDataDeInit(IMG_HANDLE hOsPrivateData);
++
++PVRSRV_ERROR OSPerProcessSetHandleOptions(PVRSRV_HANDLE_BASE *psHandleBase);
++#else
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(OSPerProcessPrivateDataInit)
++#endif
++static INLINE PVRSRV_ERROR OSPerProcessPrivateDataInit(IMG_HANDLE *phOsPrivateData)
++{
++ PVR_UNREFERENCED_PARAMETER(phOsPrivateData);
++
++ return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(OSPerProcessPrivateDataDeInit)
++#endif
++static INLINE PVRSRV_ERROR OSPerProcessPrivateDataDeInit(IMG_HANDLE hOsPrivateData)
++{
++ PVR_UNREFERENCED_PARAMETER(hOsPrivateData);
++
++ return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(OSPerProcessSetHandleOptions)
++#endif
++static INLINE PVRSRV_ERROR OSPerProcessSetHandleOptions(PVRSRV_HANDLE_BASE *psHandleBase)
++{
++ PVR_UNREFERENCED_PARAMETER(psHandleBase);
++
++ return PVRSRV_OK;
++}
++#endif
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/pdump_km.h b/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/pdump_km.h
+new file mode 100644
+index 0000000..c780e22
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/pdump_km.h
+@@ -0,0 +1,451 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _PDUMP_KM_H_
++#define _PDUMP_KM_H_
++
++#if (defined(LINUX) && (defined(SUPPORT_SGX) || defined(SUPPORT_MSVDX)))
++
++#define SGX_SUPPORT_COMMON_PDUMP
++
++#if defined(SGX_SUPPORT_COMMON_PDUMP)
++#include <pdump_osfunc.h>
++#endif
++#endif
++
++#if defined(__cplusplus)
++extern "C" {
++#endif
++
++#define PDUMP_FLAGS_NEVER 0x08000000UL
++#define PDUMP_FLAGS_TOOUT2MEM 0x10000000UL
++#define PDUMP_FLAGS_LASTFRAME 0x20000000UL
++#define PDUMP_FLAGS_RESETLFBUFFER 0x40000000UL
++#define PDUMP_FLAGS_CONTINUOUS 0x80000000UL
++
++#define PDUMP_PD_UNIQUETAG (IMG_HANDLE)0
++#define PDUMP_PT_UNIQUETAG (IMG_HANDLE)0
++
++#define PDUMP_STREAM_PARAM2 0
++#define PDUMP_STREAM_SCRIPT2 1
++#define PDUMP_STREAM_DRIVERINFO 2
++#define PDUMP_NUM_STREAMS 3
++
++
++#ifndef PDUMP
++#define MAKEUNIQUETAG(hMemInfo) (0)
++#endif
++
++#ifdef PDUMP
++
++#define MAKEUNIQUETAG(hMemInfo) (((BM_BUF *)(((PVRSRV_KERNEL_MEM_INFO *)hMemInfo)->sMemBlk.hBuffer))->pMapping)
++
++ IMG_IMPORT PVRSRV_ERROR PDumpMemPolKM(PVRSRV_KERNEL_MEM_INFO *psMemInfo,
++ IMG_UINT32 ui32Offset,
++ IMG_UINT32 ui32Value,
++ IMG_UINT32 ui32Mask,
++ PDUMP_POLL_OPERATOR eOperator,
++ IMG_UINT32 ui32Flags,
++ IMG_HANDLE hUniqueTag);
++
++ IMG_IMPORT PVRSRV_ERROR PDumpMemUM(PVRSRV_PER_PROCESS_DATA *psProcData,
++ IMG_PVOID pvAltLinAddr,
++ IMG_PVOID pvLinAddr,
++ PVRSRV_KERNEL_MEM_INFO *psMemInfo,
++ IMG_UINT32 ui32Offset,
++ IMG_UINT32 ui32Bytes,
++ IMG_UINT32 ui32Flags,
++ IMG_HANDLE hUniqueTag);
++
++ IMG_IMPORT PVRSRV_ERROR PDumpMemKM(IMG_PVOID pvAltLinAddr,
++ PVRSRV_KERNEL_MEM_INFO *psMemInfo,
++ IMG_UINT32 ui32Offset,
++ IMG_UINT32 ui32Bytes,
++ IMG_UINT32 ui32Flags,
++ IMG_HANDLE hUniqueTag);
++ PVRSRV_ERROR PDumpMemPagesKM(PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_DEV_PHYADDR *pPages,
++ IMG_UINT32 ui32NumPages,
++ IMG_DEV_VIRTADDR sDevAddr,
++ IMG_UINT32 ui32Start,
++ IMG_UINT32 ui32Length,
++ IMG_UINT32 ui32Flags,
++ IMG_HANDLE hUniqueTag);
++
++ PVRSRV_ERROR PDumpMem2KM(PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_CPU_VIRTADDR pvLinAddr,
++ IMG_UINT32 ui32Bytes,
++ IMG_UINT32 ui32Flags,
++ IMG_BOOL bInitialisePages,
++ IMG_HANDLE hUniqueTag1,
++ IMG_HANDLE hUniqueTag2);
++ IMG_VOID PDumpInitCommon(IMG_VOID);
++ IMG_VOID PDumpDeInitCommon(IMG_VOID);
++ IMG_VOID PDumpInit(IMG_VOID);
++ IMG_VOID PDumpDeInit(IMG_VOID);
++ PVRSRV_ERROR PDumpStartInitPhaseKM(IMG_VOID);
++ PVRSRV_ERROR PDumpStopInitPhaseKM(IMG_VOID);
++ IMG_IMPORT PVRSRV_ERROR PDumpSetFrameKM(IMG_UINT32 ui32Frame);
++ IMG_IMPORT PVRSRV_ERROR PDumpCommentKM(IMG_CHAR *pszComment, IMG_UINT32 ui32Flags);
++ IMG_IMPORT PVRSRV_ERROR PDumpDriverInfoKM(IMG_CHAR *pszString, IMG_UINT32 ui32Flags);
++
++ PVRSRV_ERROR PDumpRegWithFlagsKM(IMG_UINT32 ui32RegAddr,
++ IMG_UINT32 ui32RegValue,
++ IMG_UINT32 ui32Flags);
++ PVRSRV_ERROR PDumpRegPolWithFlagsKM(IMG_UINT32 ui32RegAddr,
++ IMG_UINT32 ui32RegValue,
++ IMG_UINT32 ui32Mask,
++ IMG_UINT32 ui32Flags);
++ PVRSRV_ERROR PDumpRegPolKM(IMG_UINT32 ui32RegAddr,
++ IMG_UINT32 ui32RegValue,
++ IMG_UINT32 ui32Mask);
++
++ IMG_IMPORT PVRSRV_ERROR PDumpBitmapKM(IMG_CHAR *pszFileName,
++ IMG_UINT32 ui32FileOffset,
++ IMG_UINT32 ui32Width,
++ IMG_UINT32 ui32Height,
++ IMG_UINT32 ui32StrideInBytes,
++ IMG_DEV_VIRTADDR sDevBaseAddr,
++ IMG_UINT32 ui32Size,
++ PDUMP_PIXEL_FORMAT ePixelFormat,
++ PDUMP_MEM_FORMAT eMemFormat,
++ IMG_UINT32 ui32PDumpFlags);
++ IMG_IMPORT PVRSRV_ERROR PDumpReadRegKM(IMG_CHAR *pszFileName,
++ IMG_UINT32 ui32FileOffset,
++ IMG_UINT32 ui32Address,
++ IMG_UINT32 ui32Size,
++ IMG_UINT32 ui32PDumpFlags);
++
++ IMG_BOOL PDumpIsSuspended(IMG_VOID);
++
++#if defined(SGX_SUPPORT_COMMON_PDUMP) || !defined(SUPPORT_VGX)
++
++ PVRSRV_ERROR PDumpRegKM(IMG_UINT32 dwReg,
++ IMG_UINT32 dwData);
++ PVRSRV_ERROR PDumpComment(IMG_CHAR* pszFormat, ...);
++ PVRSRV_ERROR PDumpCommentWithFlags(IMG_UINT32 ui32Flags,
++ IMG_CHAR* pszFormat,
++ ...);
++
++ PVRSRV_ERROR PDumpPDReg(IMG_UINT32 ui32Reg,
++ IMG_UINT32 ui32dwData,
++ IMG_HANDLE hUniqueTag);
++ PVRSRV_ERROR PDumpPDRegWithFlags(IMG_UINT32 ui32Reg,
++ IMG_UINT32 ui32Data,
++ IMG_UINT32 ui32Flags,
++ IMG_HANDLE hUniqueTag);
++#else
++ IMG_VOID PDumpRegKM(IMG_UINT32 dwReg,
++ IMG_UINT32 dwData);
++ IMG_VOID PDumpComment(IMG_CHAR* pszFormat, ...);
++ IMG_VOID PDumpCommentWithFlags(IMG_UINT32 ui32Flags,
++ IMG_CHAR* pszFormat,
++ ...);
++
++
++ IMG_VOID PDumpPDReg(IMG_UINT32 ui32Reg,
++ IMG_UINT32 ui32dwData,
++ IMG_HANDLE hUniqueTag);
++ IMG_VOID PDumpPDRegWithFlags(IMG_UINT32 ui32Reg,
++ IMG_UINT32 ui32Data,
++ IMG_UINT32 ui32Flags,
++ IMG_HANDLE hUniqueTag);
++#endif
++
++ IMG_VOID PDumpMsvdxRegRead(const IMG_CHAR* const pRegRegion,
++ const IMG_UINT32 dwRegOffset);
++
++ IMG_VOID PDumpMsvdxRegWrite(const IMG_CHAR* const pRegRegion,
++ const IMG_UINT32 dwRegOffset,
++ const IMG_UINT32 dwData);
++
++ PVRSRV_ERROR PDumpMsvdxRegPol(const IMG_CHAR* const pRegRegion,
++ const IMG_UINT32 ui32Offset,
++ const IMG_UINT32 ui32CheckFuncIdExt,
++ const IMG_UINT32 ui32RequValue,
++ const IMG_UINT32 ui32Enable,
++ const IMG_UINT32 ui32PollCount,
++ const IMG_UINT32 ui32TimeOut);
++
++ PVRSRV_ERROR PDumpMsvdxWriteRef(const IMG_CHAR* const pRegRegion,
++ const IMG_UINT32 ui32VLROffset,
++ const IMG_UINT32 ui32Physical );
++
++ IMG_BOOL PDumpIsLastCaptureFrameKM(IMG_VOID);
++ IMG_IMPORT IMG_BOOL PDumpIsCaptureFrameKM(IMG_VOID);
++
++ IMG_VOID PDumpMallocPagesPhys(PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_UINT32 ui32DevVAddr,
++ IMG_PUINT32 pui32PhysPages,
++ IMG_UINT32 ui32NumPages,
++ IMG_HANDLE hUniqueTag);
++ PVRSRV_ERROR PDumpSetMMUContext(PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_CHAR *pszMemSpace,
++ IMG_UINT32 *pui32MMUContextID,
++ IMG_UINT32 ui32MMUType,
++ IMG_HANDLE hUniqueTag1,
++ IMG_VOID *pvPDCPUAddr);
++ PVRSRV_ERROR PDumpClearMMUContext(PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_CHAR *pszMemSpace,
++ IMG_UINT32 ui32MMUContextID,
++ IMG_UINT32 ui32MMUType);
++
++ PVRSRV_ERROR PDumpPDDevPAddrKM(PVRSRV_KERNEL_MEM_INFO *psMemInfo,
++ IMG_UINT32 ui32Offset,
++ IMG_DEV_PHYADDR sPDDevPAddr,
++ IMG_HANDLE hUniqueTag1,
++ IMG_HANDLE hUniqueTag2);
++
++ IMG_BOOL PDumpTestNextFrame(IMG_UINT32 ui32CurrentFrame);
++
++
++#if defined(LINUX)
++#define COMMON_PDUMP_OS_SUPPORT
++#endif
++
++#if defined (COMMON_PDUMP_OS_SUPPORT) && !defined(SUPPORT_VGX)
++
++ PVRSRV_ERROR PDumpTASignatureRegisters(IMG_UINT32 ui32DumpFrameNum,
++ IMG_UINT32 ui32TAKickCount,
++ IMG_BOOL bLastFrame,
++ IMG_UINT32 *pui32Registers,
++ IMG_UINT32 ui32NumRegisters);
++
++ PVRSRV_ERROR PDump3DSignatureRegisters(IMG_UINT32 ui32DumpFrameNum,
++ IMG_BOOL bLastFrame,
++ IMG_UINT32 *pui32Registers,
++ IMG_UINT32 ui32NumRegisters);
++
++ PVRSRV_ERROR PDumpCounterRegisters(IMG_UINT32 ui32DumpFrameNum,
++ IMG_BOOL bLastFrame,
++ IMG_UINT32 *pui32Registers,
++ IMG_UINT32 ui32NumRegisters);
++
++ PVRSRV_ERROR PDumpRegRead(const IMG_UINT32 dwRegOffset, IMG_UINT32 ui32Flags);
++
++ PVRSRV_ERROR PDumpCycleCountRegRead(const IMG_UINT32 dwRegOffset, IMG_BOOL bLastFrame);
++
++ PVRSRV_ERROR PDumpIDLWithFlags(IMG_UINT32 ui32Clocks, IMG_UINT32 ui32Flags);
++ PVRSRV_ERROR PDumpIDL(IMG_UINT32 ui32Clocks);
++
++ PVRSRV_ERROR PDumpMallocPages(PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_UINT32 ui32DevVAddr,
++ IMG_CPU_VIRTADDR pvLinAddr,
++ IMG_HANDLE hOSMemHandle,
++ IMG_UINT32 ui32NumBytes,
++ IMG_UINT32 ui32PageSize,
++ IMG_HANDLE hUniqueTag);
++ PVRSRV_ERROR PDumpMallocPageTable(PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_CPU_VIRTADDR pvLinAddr,
++ IMG_UINT32 ui32NumBytes,
++ IMG_HANDLE hUniqueTag);
++ PVRSRV_ERROR PDumpFreePages(struct _BM_HEAP_ *psBMHeap,
++ IMG_DEV_VIRTADDR sDevVAddr,
++ IMG_UINT32 ui32NumBytes,
++ IMG_UINT32 ui32PageSize,
++ IMG_HANDLE hUniqueTag,
++ IMG_BOOL bInterleaved);
++ PVRSRV_ERROR PDumpFreePageTable(PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_CPU_VIRTADDR pvLinAddr,
++ IMG_UINT32 ui32NumBytes,
++ IMG_HANDLE hUniqueTag);
++
++ IMG_IMPORT PVRSRV_ERROR PDumpHWPerfCBKM(IMG_CHAR *pszFileName,
++ IMG_UINT32 ui32FileOffset,
++ IMG_DEV_VIRTADDR sDevBaseAddr,
++ IMG_UINT32 ui32Size,
++ IMG_UINT32 ui32PDumpFlags);
++
++ PVRSRV_ERROR PDumpCBP(PPVRSRV_KERNEL_MEM_INFO psROffMemInfo,
++ IMG_UINT32 ui32ROffOffset,
++ IMG_UINT32 ui32WPosVal,
++ IMG_UINT32 ui32PacketSize,
++ IMG_UINT32 ui32BufferSize,
++ IMG_UINT32 ui32Flags,
++ IMG_HANDLE hUniqueTag);
++
++#else
++ IMG_VOID PDumpTASignatureRegisters(IMG_UINT32 ui32DumpFrameNum,
++ IMG_UINT32 ui32TAKickCount,
++ IMG_BOOL bLastFrame,
++ IMG_UINT32 *pui32Registers,
++ IMG_UINT32 ui32NumRegisters);
++ IMG_VOID PDump3DSignatureRegisters(IMG_UINT32 ui32DumpFrameNum,
++ IMG_BOOL bLastFrame,
++ IMG_UINT32 *pui32Registers,
++ IMG_UINT32 ui32NumRegisters);
++ IMG_VOID PDumpCounterRegisters(IMG_UINT32 ui32DumpFrameNum,
++ IMG_BOOL bLastFrame,
++ IMG_UINT32 *pui32Registers,
++ IMG_UINT32 ui32NumRegisters);
++
++ IMG_VOID PDumpRegRead(const IMG_UINT32 dwRegOffset, IMG_UINT32 ui32Flags);
++ IMG_VOID PDumpCycleCountRegRead(const IMG_UINT32 dwRegOffset, IMG_BOOL bLastFrame);
++
++ IMG_VOID PDumpIDLWithFlags(IMG_UINT32 ui32Clocks, IMG_UINT32 ui32Flags);
++ IMG_VOID PDumpIDL(IMG_UINT32 ui32Clocks);
++
++
++ IMG_VOID PDumpMallocPages(PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_UINT32 ui32DevVAddr,
++ IMG_CPU_VIRTADDR pvLinAddr,
++ IMG_HANDLE hOSMemHandle,
++ IMG_UINT32 ui32NumBytes,
++ IMG_UINT32 ui32PageSize,
++ IMG_HANDLE hUniqueTag);
++ IMG_VOID PDumpMallocPageTable(PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_CPU_VIRTADDR pvLinAddr,
++ IMG_UINT32 ui32NumBytes,
++ IMG_HANDLE hUniqueTag);
++ IMG_VOID PDumpFreePages(struct _BM_HEAP_ *psBMHeap,
++ IMG_DEV_VIRTADDR sDevVAddr,
++ IMG_UINT32 ui32NumBytes,
++ IMG_UINT32 ui32PageSize,
++ IMG_HANDLE hUniqueTag,
++ IMG_BOOL bInterleaved);
++ IMG_VOID PDumpFreePageTable(PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_CPU_VIRTADDR pvLinAddr,
++ IMG_UINT32 ui32NumBytes,
++ IMG_HANDLE hUniqueTag);
++
++ IMG_IMPORT IMG_VOID PDumpHWPerfCBKM(IMG_CHAR *pszFileName,
++ IMG_UINT32 ui32FileOffset,
++ IMG_DEV_VIRTADDR sDevBaseAddr,
++ IMG_UINT32 ui32Size,
++ IMG_UINT32 ui32PDumpFlags);
++
++ IMG_VOID PDumpCBP(PPVRSRV_KERNEL_MEM_INFO psROffMemInfo,
++ IMG_UINT32 ui32ROffOffset,
++ IMG_UINT32 ui32WPosVal,
++ IMG_UINT32 ui32PacketSize,
++ IMG_UINT32 ui32BufferSize,
++ IMG_UINT32 ui32Flags,
++ IMG_HANDLE hUniqueTag);
++
++#endif
++
++ IMG_VOID PDumpVGXMemToFile(IMG_CHAR *pszFileName,
++ IMG_UINT32 ui32FileOffset,
++ PVRSRV_KERNEL_MEM_INFO *psMemInfo,
++ IMG_UINT32 uiAddr,
++ IMG_UINT32 ui32Size,
++ IMG_UINT32 ui32PDumpFlags,
++ IMG_HANDLE hUniqueTag);
++
++ IMG_VOID PDumpSuspendKM(IMG_VOID);
++ IMG_VOID PDumpResumeKM(IMG_VOID);
++
++ #define PDUMPMEMPOL PDumpMemPolKM
++ #define PDUMPMEM PDumpMemKM
++ #define PDUMPMEM2 PDumpMem2KM
++ #define PDUMPMEMUM PDumpMemUM
++ #define PDUMPINIT PDumpInitCommon
++ #define PDUMPDEINIT PDumpDeInitCommon
++ #define PDUMPISLASTFRAME PDumpIsLastCaptureFrameKM
++ #define PDUMPTESTFRAME PDumpIsCaptureFrameKM
++ #define PDUMPTESTNEXTFRAME PDumpTestNextFrame
++ #define PDUMPREGWITHFLAGS PDumpRegWithFlagsKM
++ #define PDUMPREG PDumpRegKM
++ #define PDUMPCOMMENT PDumpComment
++ #define PDUMPCOMMENTWITHFLAGS PDumpCommentWithFlags
++ #define PDUMPREGPOL PDumpRegPolKM
++ #define PDUMPREGPOLWITHFLAGS PDumpRegPolWithFlagsKM
++ #define PDUMPMALLOCPAGES PDumpMallocPages
++ #define PDUMPMALLOCPAGETABLE PDumpMallocPageTable
++ #define PDUMPSETMMUCONTEXT PDumpSetMMUContext
++ #define PDUMPCLEARMMUCONTEXT PDumpClearMMUContext
++ #define PDUMPFREEPAGES PDumpFreePages
++ #define PDUMPFREEPAGETABLE PDumpFreePageTable
++ #define PDUMPPDREG PDumpPDReg
++ #define PDUMPPDREGWITHFLAGS PDumpPDRegWithFlags
++ #define PDUMPCBP PDumpCBP
++ #define PDUMPMALLOCPAGESPHYS PDumpMallocPagesPhys
++ #define PDUMPENDINITPHASE PDumpStopInitPhaseKM
++ #define PDUMPMSVDXREGWRITE PDumpMsvdxRegWrite
++ #define PDUMPMSVDXREGREAD PDumpMsvdxRegRead
++ #define PDUMPMSVDXPOL PDumpMsvdxRegPol
++ #define PDUMPMSVDXWRITEREF PDumpMsvdxWriteRef
++ #define PDUMPBITMAPKM PDumpBitmapKM
++ #define PDUMPDRIVERINFO PDumpDriverInfoKM
++ #define PDUMPIDLWITHFLAGS PDumpIDLWithFlags
++ #define PDUMPIDL PDumpIDL
++ #define PDUMPSUSPEND PDumpSuspendKM
++ #define PDUMPRESUME PDumpResumeKM
++
++#else
++ #if ((defined(LINUX) || defined(GCC_IA32)) || defined(GCC_ARM))
++ #define PDUMPMEMPOL(args...)
++ #define PDUMPMEM(args...)
++ #define PDUMPMEM2(args...)
++ #define PDUMPMEMUM(args...)
++ #define PDUMPINIT(args...)
++ #define PDUMPDEINIT(args...)
++ #define PDUMPISLASTFRAME(args...)
++ #define PDUMPTESTFRAME(args...)
++ #define PDUMPTESTNEXTFRAME(args...)
++ #define PDUMPREGWITHFLAGS(args...)
++ #define PDUMPREG(args...)
++ #define PDUMPCOMMENT(args...)
++ #define PDUMPREGPOL(args...)
++ #define PDUMPREGPOLWITHFLAGS(args...)
++ #define PDUMPMALLOCPAGES(args...)
++ #define PDUMPMALLOCPAGETABLE(args...)
++ #define PDUMPSETMMUCONTEXT(args...)
++ #define PDUMPCLEARMMUCONTEXT(args...)
++ #define PDUMPFREEPAGES(args...)
++ #define PDUMPFREEPAGETABLE(args...)
++ #define PDUMPPDREG(args...)
++ #define PDUMPPDREGWITHFLAGS(args...)
++ #define PDUMPSYNC(args...)
++ #define PDUMPCOPYTOMEM(args...)
++ #define PDUMPWRITE(args...)
++ #define PDUMPCBP(args...)
++ #define PDUMPCOMMENTWITHFLAGS(args...)
++ #define PDUMPMALLOCPAGESPHYS(args...)
++ #define PDUMPENDINITPHASE(args...)
++ #define PDUMPMSVDXREG(args...)
++ #define PDUMPMSVDXREGWRITE(args...)
++ #define PDUMPMSVDXREGREAD(args...)
++ #define PDUMPMSVDXPOLEQ(args...)
++ #define PDUMPMSVDXPOL(args...)
++ #define PDUMPBITMAPKM(args...)
++ #define PDUMPDRIVERINFO(args...)
++ #define PDUMPIDLWITHFLAGS(args...)
++ #define PDUMPIDL(args...)
++ #define PDUMPSUSPEND(args...)
++ #define PDUMPRESUME(args...)
++ #define PDUMPMSVDXWRITEREF(args...)
++ #else
++ #error Compiler not specified
++ #endif
++#endif
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/pdump_osfunc.h b/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/pdump_osfunc.h
+new file mode 100644
+index 0000000..7c6db05
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/pdump_osfunc.h
+@@ -0,0 +1,137 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __PDUMP_OSFUNC_H__
++#define __PDUMP_OSFUNC_H__
++
++#include <stdarg.h>
++
++#if defined(__cplusplus)
++extern "C" {
++#endif
++
++
++#define MAX_PDUMP_STRING_LENGTH (256)
++#define PDUMP_GET_SCRIPT_STRING() \
++ IMG_HANDLE hScript; \
++ IMG_UINT32 ui32MaxLen; \
++ PVRSRV_ERROR eError; \
++ eError = PDumpOSGetScriptString(&hScript, &ui32MaxLen);\
++ if(eError != PVRSRV_OK) return eError;
++
++#define PDUMP_GET_MSG_STRING() \
++ IMG_HANDLE hMsg; \
++ IMG_UINT32 ui32MaxLen; \
++ PVRSRV_ERROR eError; \
++ eError = PDumpOSGetMessageString(&hMsg, &ui32MaxLen);\
++ if(eError != PVRSRV_OK) return eError;
++
++#define PDUMP_GET_FILE_STRING() \
++ IMG_CHAR *pszFileName; \
++ IMG_UINT32 ui32MaxLen; \
++ PVRSRV_ERROR eError; \
++ eError = PDumpOSGetFilenameString(&pszFileName, &ui32MaxLen);\
++ if(eError != PVRSRV_OK) return eError;
++
++#define PDUMP_GET_SCRIPT_AND_FILE_STRING() \
++ IMG_HANDLE hScript; \
++ IMG_CHAR *pszFileName; \
++ IMG_UINT32 ui32MaxLenScript; \
++ IMG_UINT32 ui32MaxLenFileName; \
++ PVRSRV_ERROR eError; \
++ eError = PDumpOSGetScriptString(&hScript, &ui32MaxLenScript);\
++ if(eError != PVRSRV_OK) return eError; \
++ eError = PDumpOSGetFilenameString(&pszFileName, &ui32MaxLenFileName);\
++ if(eError != PVRSRV_OK) return eError;
++
++
++
++ PVRSRV_ERROR PDumpOSGetScriptString(IMG_HANDLE *phScript, IMG_UINT32 *pui32MaxLen);
++
++
++ PVRSRV_ERROR PDumpOSGetMessageString(IMG_HANDLE *phMsg, IMG_UINT32 *pui32MaxLen);
++
++
++ PVRSRV_ERROR PDumpOSGetFilenameString(IMG_CHAR **ppszFile, IMG_UINT32 *pui32MaxLen);
++
++
++
++
++#define PDUMP_va_list va_list
++#define PDUMP_va_start va_start
++#define PDUMP_va_end va_end
++
++
++
++IMG_HANDLE PDumpOSGetStream(IMG_UINT32 ePDumpStream);
++
++IMG_UINT32 PDumpOSGetStreamOffset(IMG_UINT32 ePDumpStream);
++
++IMG_UINT32 PDumpOSGetParamFileNum(IMG_VOID);
++
++IMG_VOID PDumpOSCheckForSplitting(IMG_HANDLE hStream, IMG_UINT32 ui32Size, IMG_UINT32 ui32Flags);
++
++IMG_BOOL PDumpOSIsSuspended(IMG_VOID);
++
++IMG_BOOL PDumpOSJTInitialised(IMG_VOID);
++
++IMG_BOOL PDumpOSWriteString(IMG_HANDLE hDbgStream,
++ IMG_UINT8 *psui8Data,
++ IMG_UINT32 ui32Size,
++ IMG_UINT32 ui32Flags);
++
++IMG_BOOL PDumpOSWriteString2(IMG_HANDLE hScript, IMG_UINT32 ui32Flags);
++
++PVRSRV_ERROR PDumpOSBufprintf(IMG_HANDLE hBuf, IMG_UINT32 ui32ScriptSizeMax, IMG_CHAR* pszFormat, ...);
++
++IMG_VOID PDumpOSDebugPrintf(IMG_CHAR* pszFormat, ...);
++
++PVRSRV_ERROR PDumpOSSprintf(IMG_CHAR *pszComment, IMG_UINT32 ui32ScriptSizeMax, IMG_CHAR *pszFormat, ...);
++
++PVRSRV_ERROR PDumpOSVSprintf(IMG_CHAR *pszMsg, IMG_UINT32 ui32ScriptSizeMax, IMG_CHAR* pszFormat, PDUMP_va_list vaArgs);
++
++IMG_UINT32 PDumpOSBuflen(IMG_HANDLE hBuffer, IMG_UINT32 ui32BufferSizeMax);
++
++IMG_VOID PDumpOSVerifyLineEnding(IMG_HANDLE hBuffer, IMG_UINT32 ui32BufferSizeMax);
++
++IMG_VOID PDumpOSCPUVAddrToDevPAddr(PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_HANDLE hOSMemHandle,
++ IMG_UINT32 ui32Offset,
++ IMG_UINT8 *pui8LinAddr,
++ IMG_UINT32 ui32PageSize,
++ IMG_DEV_PHYADDR *psDevPAddr);
++
++IMG_VOID PDumpOSCPUVAddrToPhysPages(IMG_HANDLE hOSMemHandle,
++ IMG_UINT32 ui32Offset,
++ IMG_PUINT8 pui8LinAddr,
++ IMG_UINT32 *pui32PageOffset);
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/perproc.h b/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/perproc.h
+new file mode 100644
+index 0000000..233bb59
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/perproc.h
+@@ -0,0 +1,110 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __PERPROC_H__
++#define __PERPROC_H__
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++#include "img_types.h"
++#include "resman.h"
++
++#include "handle.h"
++
++typedef struct _PVRSRV_PER_PROCESS_DATA_
++{
++ IMG_UINT32 ui32PID;
++ IMG_HANDLE hBlockAlloc;
++ PRESMAN_CONTEXT hResManContext;
++ IMG_HANDLE hPerProcData;
++ PVRSRV_HANDLE_BASE *psHandleBase;
++#if defined (PVR_SECURE_HANDLES)
++
++ IMG_BOOL bHandlesBatched;
++#endif
++ IMG_UINT32 ui32RefCount;
++
++
++ IMG_BOOL bInitProcess;
++
++
++ IMG_HANDLE hOsPrivateData;
++} PVRSRV_PER_PROCESS_DATA;
++
++PVRSRV_PER_PROCESS_DATA *PVRSRVPerProcessData(IMG_UINT32 ui32PID);
++
++PVRSRV_ERROR PVRSRVPerProcessDataConnect(IMG_UINT32 ui32PID);
++IMG_VOID PVRSRVPerProcessDataDisconnect(IMG_UINT32 ui32PID);
++
++PVRSRV_ERROR PVRSRVPerProcessDataInit(IMG_VOID);
++PVRSRV_ERROR PVRSRVPerProcessDataDeInit(IMG_VOID);
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVFindPerProcessData)
++#endif
++static INLINE
++PVRSRV_PER_PROCESS_DATA *PVRSRVFindPerProcessData(IMG_VOID)
++{
++ return PVRSRVPerProcessData(OSGetCurrentProcessIDKM());
++}
++
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVProcessPrivateData)
++#endif
++static INLINE
++IMG_HANDLE PVRSRVProcessPrivateData(PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ return (psPerProc != IMG_NULL) ? psPerProc->hOsPrivateData : IMG_NULL;
++}
++
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVPerProcessPrivateData)
++#endif
++static INLINE
++IMG_HANDLE PVRSRVPerProcessPrivateData(IMG_UINT32 ui32PID)
++{
++ return PVRSRVProcessPrivateData(PVRSRVPerProcessData(ui32PID));
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVFindPerProcessPrivateData)
++#endif
++static INLINE
++IMG_HANDLE PVRSRVFindPerProcessPrivateData(IMG_VOID)
++{
++ return PVRSRVProcessPrivateData(PVRSRVFindPerProcessData());
++}
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/power.h b/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/power.h
+new file mode 100644
+index 0000000..cd8d737
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/power.h
+@@ -0,0 +1,133 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef POWER_H
++#define POWER_H
++
++#if defined(__cplusplus)
++extern "C" {
++#endif
++
++
++
++typedef struct _PVRSRV_POWER_DEV_TAG_
++{
++ PFN_PRE_POWER pfnPrePower;
++ PFN_POST_POWER pfnPostPower;
++ PFN_PRE_CLOCKSPEED_CHANGE pfnPreClockSpeedChange;
++ PFN_POST_CLOCKSPEED_CHANGE pfnPostClockSpeedChange;
++ IMG_HANDLE hDevCookie;
++ IMG_UINT32 ui32DeviceIndex;
++ PVRSRV_DEV_POWER_STATE eDefaultPowerState;
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState;
++ struct _PVRSRV_POWER_DEV_TAG_ *psNext;
++ struct _PVRSRV_POWER_DEV_TAG_ **ppsThis;
++
++} PVRSRV_POWER_DEV;
++
++typedef enum _PVRSRV_INIT_SERVER_STATE_
++{
++ PVRSRV_INIT_SERVER_Unspecified = -1,
++ PVRSRV_INIT_SERVER_RUNNING = 0,
++ PVRSRV_INIT_SERVER_RAN = 1,
++ PVRSRV_INIT_SERVER_SUCCESSFUL = 2,
++ PVRSRV_INIT_SERVER_NUM = 3,
++ PVRSRV_INIT_SERVER_FORCE_I32 = 0x7fffffff
++
++} PVRSRV_INIT_SERVER_STATE, *PPVRSRV_INIT_SERVER_STATE;
++
++IMG_IMPORT
++IMG_BOOL PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_STATE eInitServerState);
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVSetInitServerState(PVRSRV_INIT_SERVER_STATE eInitServerState, IMG_BOOL bState);
++
++
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVPowerLock(IMG_UINT32 ui32CallerID,
++ IMG_BOOL bSystemPowerEvent);
++IMG_IMPORT
++IMG_VOID PVRSRVPowerUnlock(IMG_UINT32 ui32CallerID);
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVSetDevicePowerStateKM(IMG_UINT32 ui32DeviceIndex,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ IMG_UINT32 ui32CallerID,
++ IMG_BOOL bRetainMutex);
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVSystemPrePowerStateKM(PVRSRV_SYS_POWER_STATE eNewPowerState);
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVSystemPostPowerStateKM(PVRSRV_SYS_POWER_STATE eNewPowerState);
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVSetPowerStateKM (PVRSRV_SYS_POWER_STATE ePVRState);
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVRegisterPowerDevice(IMG_UINT32 ui32DeviceIndex,
++ PFN_PRE_POWER pfnPrePower,
++ PFN_POST_POWER pfnPostPower,
++ PFN_PRE_CLOCKSPEED_CHANGE pfnPreClockSpeedChange,
++ PFN_POST_CLOCKSPEED_CHANGE pfnPostClockSpeedChange,
++ IMG_HANDLE hDevCookie,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState,
++ PVRSRV_DEV_POWER_STATE eDefaultPowerState);
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVRemovePowerDevice (IMG_UINT32 ui32DeviceIndex);
++
++IMG_IMPORT
++IMG_BOOL PVRSRVIsDevicePowered(IMG_UINT32 ui32DeviceIndex);
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVDevicePreClockSpeedChange(IMG_UINT32 ui32DeviceIndex,
++ IMG_BOOL bIdleDevice,
++ IMG_VOID *pvInfo);
++
++IMG_IMPORT
++IMG_VOID PVRSRVDevicePostClockSpeedChange(IMG_UINT32 ui32DeviceIndex,
++ IMG_BOOL bIdleDevice,
++ IMG_VOID *pvInfo);
++
++
++/*
++ * PVRSRVPowerOnSystemWithDevice
++ *
++ * Description: Power on the System if it is off, but instead of powering all
++ * of the devices to their "default" state, only turn on the specified
++ * device index.
++ */
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVPowerOnSystemWithDevice(IMG_UINT32 ui32DeviceIndex,
++ IMG_UINT32 ui32CallerID,
++ IMG_BOOL bRetainMutex);
++
++#if defined (__cplusplus)
++}
++#endif
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/queue.h b/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/queue.h
+new file mode 100644
+index 0000000..0646137
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/queue.h
+@@ -0,0 +1,119 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef QUEUE_H
++#define QUEUE_H
++
++
++#if defined(__cplusplus)
++extern "C" {
++#endif
++
++#define UPDATE_QUEUE_ROFF(psQueue, ui32Size) \
++ psQueue->ui32ReadOffset = (psQueue->ui32ReadOffset + ui32Size) \
++ & (psQueue->ui32QueueSize - 1);
++
++ typedef struct _COMMAND_COMPLETE_DATA_
++ {
++ IMG_BOOL bInUse;
++
++ IMG_UINT32 ui32DstSyncCount;
++ IMG_UINT32 ui32SrcSyncCount;
++ PVRSRV_SYNC_OBJECT *psDstSync;
++ PVRSRV_SYNC_OBJECT *psSrcSync;
++ IMG_UINT32 ui32AllocSize;
++ }COMMAND_COMPLETE_DATA, *PCOMMAND_COMPLETE_DATA;
++
++#if !defined(USE_CODE)
++IMG_VOID QueueDumpDebugInfo(IMG_VOID);
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVProcessQueues (IMG_UINT32 ui32CallerID,
++ IMG_BOOL bFlush);
++
++#if defined(__linux__) && defined(__KERNEL__)
++#include <linux/types.h>
++#include <linux/seq_file.h>
++off_t
++QueuePrintQueues (IMG_CHAR * buffer, size_t size, off_t off);
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++void* ProcSeqOff2ElementQueue(struct seq_file * sfile, loff_t off);
++void ProcSeqShowQueue(struct seq_file *sfile,void* el);
++#endif
++
++#endif
++
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVCreateCommandQueueKM(IMG_SIZE_T ui32QueueSize,
++ PVRSRV_QUEUE_INFO **ppsQueueInfo);
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVDestroyCommandQueueKM(PVRSRV_QUEUE_INFO *psQueueInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVInsertCommandKM(PVRSRV_QUEUE_INFO *psQueue,
++ PVRSRV_COMMAND **ppsCommand,
++ IMG_UINT32 ui32DevIndex,
++ IMG_UINT16 CommandType,
++ IMG_UINT32 ui32DstSyncCount,
++ PVRSRV_KERNEL_SYNC_INFO *apsDstSync[],
++ IMG_UINT32 ui32SrcSyncCount,
++ PVRSRV_KERNEL_SYNC_INFO *apsSrcSync[],
++ IMG_SIZE_T ui32DataByteSize );
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVGetQueueSpaceKM(PVRSRV_QUEUE_INFO *psQueue,
++ IMG_SIZE_T ui32ParamSize,
++ IMG_VOID **ppvSpace);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVSubmitCommandKM(PVRSRV_QUEUE_INFO *psQueue,
++ PVRSRV_COMMAND *psCommand);
++
++IMG_IMPORT
++IMG_VOID PVRSRVCommandCompleteKM(IMG_HANDLE hCmdCookie, IMG_BOOL bScheduleMISR);
++
++IMG_VOID PVRSRVCommandCompleteCallbacks(IMG_VOID);
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVRegisterCmdProcListKM(IMG_UINT32 ui32DevIndex,
++ PFN_CMD_PROC *ppfnCmdProcList,
++ IMG_UINT32 ui32MaxSyncsPerCmd[][2],
++ IMG_UINT32 ui32CmdCount);
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVRemoveCmdProcListKM(IMG_UINT32 ui32DevIndex,
++ IMG_UINT32 ui32CmdCount);
++
++#endif
++
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/ra.h b/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/ra.h
+new file mode 100644
+index 0000000..3cb7e78
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/ra.h
+@@ -0,0 +1,155 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _RA_H_
++#define _RA_H_
++
++#include "img_types.h"
++#include "hash.h"
++#include "osfunc.h"
++
++typedef struct _RA_ARENA_ RA_ARENA;
++typedef struct _BM_MAPPING_ BM_MAPPING;
++
++
++
++#define RA_STATS
++
++
++struct _RA_STATISTICS_
++{
++
++ IMG_SIZE_T uSpanCount;
++
++
++ IMG_SIZE_T uLiveSegmentCount;
++
++
++ IMG_SIZE_T uFreeSegmentCount;
++
++
++ IMG_SIZE_T uTotalResourceCount;
++
++
++ IMG_SIZE_T uFreeResourceCount;
++
++
++ IMG_SIZE_T uCumulativeAllocs;
++
++
++ IMG_SIZE_T uCumulativeFrees;
++
++
++ IMG_SIZE_T uImportCount;
++
++
++ IMG_SIZE_T uExportCount;
++};
++typedef struct _RA_STATISTICS_ RA_STATISTICS;
++
++struct _RA_SEGMENT_DETAILS_
++{
++ IMG_SIZE_T uiSize;
++ IMG_CPU_PHYADDR sCpuPhyAddr;
++ IMG_HANDLE hSegment;
++};
++typedef struct _RA_SEGMENT_DETAILS_ RA_SEGMENT_DETAILS;
++
++RA_ARENA *
++RA_Create (IMG_CHAR *name,
++ IMG_UINTPTR_T base,
++ IMG_SIZE_T uSize,
++ BM_MAPPING *psMapping,
++ IMG_SIZE_T uQuantum,
++ IMG_BOOL (*imp_alloc)(IMG_VOID *_h,
++ IMG_SIZE_T uSize,
++ IMG_SIZE_T *pActualSize,
++ BM_MAPPING **ppsMapping,
++ IMG_UINT32 uFlags,
++ IMG_UINTPTR_T *pBase),
++ IMG_VOID (*imp_free) (IMG_VOID *,
++ IMG_UINTPTR_T,
++ BM_MAPPING *),
++ IMG_VOID (*backingstore_free) (IMG_VOID *,
++ IMG_SIZE_T,
++ IMG_SIZE_T,
++ IMG_HANDLE),
++ IMG_VOID *import_handle);
++
++IMG_VOID
++RA_Delete (RA_ARENA *pArena);
++
++IMG_BOOL
++RA_TestDelete (RA_ARENA *pArena);
++
++IMG_BOOL
++RA_Add (RA_ARENA *pArena, IMG_UINTPTR_T base, IMG_SIZE_T uSize);
++
++IMG_BOOL
++RA_Alloc (RA_ARENA *pArena,
++ IMG_SIZE_T uSize,
++ IMG_SIZE_T *pActualSize,
++ BM_MAPPING **ppsMapping,
++ IMG_UINT32 uFlags,
++ IMG_UINT32 uAlignment,
++ IMG_UINT32 uAlignmentOffset,
++ IMG_UINTPTR_T *pBase);
++
++IMG_VOID
++RA_Free (RA_ARENA *pArena, IMG_UINTPTR_T base, IMG_BOOL bFreeBackingStore);
++
++
++#ifdef RA_STATS
++
++#define CHECK_SPACE(total) \
++{ \
++ if(total<100) \
++ return PVRSRV_ERROR_INVALID_PARAMS; \
++}
++
++#define UPDATE_SPACE(str, count, total) \
++{ \
++ if(count == -1) \
++ return PVRSRV_ERROR_INVALID_PARAMS; \
++ else \
++ { \
++ str += count; \
++ total -= count; \
++ } \
++}
++
++
++IMG_BOOL RA_GetNextLiveSegment(IMG_HANDLE hArena, RA_SEGMENT_DETAILS *psSegDetails);
++
++
++PVRSRV_ERROR RA_GetStats(RA_ARENA *pArena,
++ IMG_CHAR **ppszStr,
++ IMG_UINT32 *pui32StrLen);
++
++#endif
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/resman.h b/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/resman.h
+new file mode 100644
+index 0000000..c5571f7
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/resman.h
+@@ -0,0 +1,113 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __RESMAN_H__
++#define __RESMAN_H__
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++enum {
++
++ RESMAN_TYPE_SHARED_PB_DESC = 1,
++ RESMAN_TYPE_SHARED_PB_DESC_CREATE_LOCK,
++ RESMAN_TYPE_HW_RENDER_CONTEXT,
++ RESMAN_TYPE_HW_TRANSFER_CONTEXT,
++ RESMAN_TYPE_HW_2D_CONTEXT,
++ RESMAN_TYPE_TRANSFER_CONTEXT,
++
++
++
++
++
++ RESMAN_TYPE_DISPLAYCLASS_SWAPCHAIN_REF,
++ RESMAN_TYPE_DISPLAYCLASS_DEVICE,
++
++
++ RESMAN_TYPE_BUFFERCLASS_DEVICE,
++
++
++ RESMAN_TYPE_OS_USERMODE_MAPPING,
++
++
++ RESMAN_TYPE_DEVICEMEM_CONTEXT,
++ RESMAN_TYPE_DEVICECLASSMEM_MAPPING,
++ RESMAN_TYPE_DEVICEMEM_MAPPING,
++ RESMAN_TYPE_DEVICEMEM_WRAP,
++ RESMAN_TYPE_DEVICEMEM_ALLOCATION,
++ RESMAN_TYPE_EVENT_OBJECT,
++ RESMAN_TYPE_SHARED_MEM_INFO,
++ RESMAN_TYPE_MODIFY_SYNC_OPS,
++
++
++ RESMAN_TYPE_KERNEL_DEVICEMEM_ALLOCATION
++};
++
++#define RESMAN_CRITERIA_ALL 0x00000000
++#define RESMAN_CRITERIA_RESTYPE 0x00000001
++#define RESMAN_CRITERIA_PVOID_PARAM 0x00000002
++#define RESMAN_CRITERIA_UI32_PARAM 0x00000004
++
++typedef PVRSRV_ERROR (*RESMAN_FREE_FN)(IMG_PVOID pvParam, IMG_UINT32 ui32Param);
++
++typedef struct _RESMAN_ITEM_ *PRESMAN_ITEM;
++typedef struct _RESMAN_CONTEXT_ *PRESMAN_CONTEXT;
++
++PVRSRV_ERROR ResManInit(IMG_VOID);
++IMG_VOID ResManDeInit(IMG_VOID);
++
++PRESMAN_ITEM ResManRegisterRes(PRESMAN_CONTEXT hResManContext,
++ IMG_UINT32 ui32ResType,
++ IMG_PVOID pvParam,
++ IMG_UINT32 ui32Param,
++ RESMAN_FREE_FN pfnFreeResource);
++
++PVRSRV_ERROR ResManFreeResByPtr(PRESMAN_ITEM psResItem);
++
++PVRSRV_ERROR ResManFreeResByCriteria(PRESMAN_CONTEXT hResManContext,
++ IMG_UINT32 ui32SearchCriteria,
++ IMG_UINT32 ui32ResType,
++ IMG_PVOID pvParam,
++ IMG_UINT32 ui32Param);
++
++PVRSRV_ERROR ResManDissociateRes(PRESMAN_ITEM psResItem,
++ PRESMAN_CONTEXT psNewResManContext);
++
++PVRSRV_ERROR ResManFindResourceByPtr(PRESMAN_CONTEXT hResManContext,
++ PRESMAN_ITEM psItem);
++
++PVRSRV_ERROR PVRSRVResManConnect(IMG_HANDLE hPerProc,
++ PRESMAN_CONTEXT *phResManContext);
++IMG_VOID PVRSRVResManDisconnect(PRESMAN_CONTEXT hResManContext,
++ IMG_BOOL bKernelContext);
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/services_headers.h b/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/services_headers.h
+new file mode 100644
+index 0000000..eb00dbb
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/services_headers.h
+@@ -0,0 +1,49 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef SERVICES_HEADERS_H
++#define SERVICES_HEADERS_H
++
++#ifdef DEBUG_RELEASE_BUILD
++#pragma optimize( "", off )
++#define DEBUG 1
++#endif
++
++#include "img_defs.h"
++#include "services.h"
++#include "servicesint.h"
++#include "power.h"
++#include "resman.h"
++#include "queue.h"
++#include "srvkm.h"
++#include "kerneldisplay.h"
++#include "syscommon.h"
++#include "pvr_debug.h"
++#include "metrics.h"
++#include "osfunc.h"
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/srvkm.h b/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/srvkm.h
+new file mode 100644
+index 0000000..a344253
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/srvkm.h
+@@ -0,0 +1,69 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef SRVKM_H
++#define SRVKM_H
++
++
++#if defined(__cplusplus)
++extern "C" {
++#endif
++
++
++ #ifdef PVR_DISABLE_LOGGING
++ #define PVR_LOG(X)
++ #else
++ #define PVR_LOG(X) PVRSRVReleasePrintf X
++ #endif
++
++ IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVReleasePrintf(const IMG_CHAR *pszFormat,
++ ...);
++
++ IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV PVRSRVProcessConnect(IMG_UINT32 ui32PID);
++ IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVProcessDisconnect(IMG_UINT32 ui32PID);
++
++ IMG_VOID IMG_CALLCONV PVRSRVSetDCState(IMG_UINT32 ui32State);
++
++ PVRSRV_ERROR IMG_CALLCONV PVRSRVSaveRestoreLiveSegments(IMG_HANDLE hArena, IMG_PBYTE pbyBuffer, IMG_SIZE_T *puiBufSize, IMG_BOOL bSave);
++
++#if defined (__cplusplus)
++}
++#endif
++
++#define LOOP_UNTIL_TIMEOUT(TIMEOUT) \
++{\
++ IMG_UINT32 uiOffset, uiStart, uiCurrent, uiNotLastLoop; \
++ for(uiOffset = 0, uiStart = OSClockus(), uiCurrent = uiStart + 1, uiNotLastLoop = 1;\
++ ((uiCurrent - uiStart + uiOffset) < TIMEOUT) || uiNotLastLoop--; \
++ uiCurrent = OSClockus(), \
++ uiOffset = uiCurrent < uiStart ? IMG_UINT32_MAX - uiStart : uiOffset, \
++ uiStart = uiCurrent < uiStart ? 0 : uiStart)
++
++#define END_LOOP_UNTIL_TIMEOUT() \
++}
++
++
++#endif
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/system/include/syscommon.h b/drivers/gpu/drm/mrst/pvr/services4/system/include/syscommon.h
+new file mode 100644
+index 0000000..20b83c1
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/system/include/syscommon.h
+@@ -0,0 +1,217 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _SYSCOMMON_H
++#define _SYSCOMMON_H
++
++#include "sysconfig.h"
++#include "sysinfo.h"
++#include "servicesint.h"
++#include "queue.h"
++#include "power.h"
++#include "resman.h"
++#include "ra.h"
++#include "device.h"
++#include "buffer_manager.h"
++
++#if defined(NO_HARDWARE) && defined(__linux__) && defined(__KERNEL__)
++#include <asm/io.h>
++#endif
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++typedef struct _SYS_DEVICE_ID_TAG
++{
++ IMG_UINT32 uiID;
++ IMG_BOOL bInUse;
++
++} SYS_DEVICE_ID;
++
++
++#define SYS_MAX_LOCAL_DEVMEM_ARENAS 4
++
++typedef struct _SYS_DATA_TAG_
++{
++ IMG_UINT32 ui32NumDevices;
++ SYS_DEVICE_ID sDeviceID[SYS_DEVICE_COUNT];
++ PVRSRV_DEVICE_NODE *psDeviceNodeList;
++ PVRSRV_POWER_DEV *psPowerDeviceList;
++ PVRSRV_RESOURCE sPowerStateChangeResource;
++ PVRSRV_SYS_POWER_STATE eCurrentPowerState;
++ PVRSRV_SYS_POWER_STATE eFailedPowerState;
++ IMG_UINT32 ui32CurrentOSPowerState;
++ PVRSRV_QUEUE_INFO *psQueueList;
++ PVRSRV_KERNEL_SYNC_INFO *psSharedSyncInfoList;
++ IMG_PVOID pvEnvSpecificData;
++ IMG_PVOID pvSysSpecificData;
++ PVRSRV_RESOURCE sQProcessResource;
++ IMG_VOID *pvSOCRegsBase;
++ IMG_HANDLE hSOCTimerRegisterOSMemHandle;
++ IMG_UINT32 *pvSOCTimerRegisterKM;
++ IMG_VOID *pvSOCClockGateRegsBase;
++ IMG_UINT32 ui32SOCClockGateRegsSize;
++ PFN_CMD_PROC *ppfnCmdProcList[SYS_DEVICE_COUNT];
++
++
++
++ PCOMMAND_COMPLETE_DATA *ppsCmdCompleteData[SYS_DEVICE_COUNT];
++
++
++ IMG_BOOL bReProcessQueues;
++
++ RA_ARENA *apsLocalDevMemArena[SYS_MAX_LOCAL_DEVMEM_ARENAS];
++
++ IMG_CHAR *pszVersionString;
++ PVRSRV_EVENTOBJECT *psGlobalEventObject;
++
++ IMG_BOOL bFlushAll;
++
++} SYS_DATA;
++
++
++
++PVRSRV_ERROR SysInitialise(IMG_VOID);
++PVRSRV_ERROR SysFinalise(IMG_VOID);
++
++PVRSRV_ERROR SysDeinitialise(SYS_DATA *psSysData);
++PVRSRV_ERROR SysGetDeviceMemoryMap(PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_VOID **ppvDeviceMap);
++
++IMG_VOID SysRegisterExternalDevice(PVRSRV_DEVICE_NODE *psDeviceNode);
++IMG_VOID SysRemoveExternalDevice(PVRSRV_DEVICE_NODE *psDeviceNode);
++
++IMG_UINT32 SysGetInterruptSource(SYS_DATA *psSysData,
++ PVRSRV_DEVICE_NODE *psDeviceNode);
++
++IMG_VOID SysClearInterrupts(SYS_DATA* psSysData, IMG_UINT32 ui32ClearBits);
++
++PVRSRV_ERROR SysResetDevice(IMG_UINT32 ui32DeviceIndex);
++
++PVRSRV_ERROR SysSystemPrePowerState(PVRSRV_SYS_POWER_STATE eNewPowerState);
++PVRSRV_ERROR SysSystemPostPowerState(PVRSRV_SYS_POWER_STATE eNewPowerState);
++PVRSRV_ERROR SysDevicePrePowerState(IMG_UINT32 ui32DeviceIndex,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
++PVRSRV_ERROR SysDevicePostPowerState(IMG_UINT32 ui32DeviceIndex,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
++
++#if defined(SYS_CUSTOM_POWERLOCK_WRAP)
++PVRSRV_ERROR SysPowerLockWrap(SYS_DATA *psSysData);
++IMG_VOID SysPowerLockUnwrap(SYS_DATA *psSysData);
++#endif
++
++PVRSRV_ERROR SysOEMFunction ( IMG_UINT32 ui32ID,
++ IMG_VOID *pvIn,
++ IMG_UINT32 ulInSize,
++ IMG_VOID *pvOut,
++ IMG_UINT32 ulOutSize);
++
++
++IMG_DEV_PHYADDR SysCpuPAddrToDevPAddr (PVRSRV_DEVICE_TYPE eDeviceType, IMG_CPU_PHYADDR cpu_paddr);
++IMG_DEV_PHYADDR SysSysPAddrToDevPAddr (PVRSRV_DEVICE_TYPE eDeviceType, IMG_SYS_PHYADDR SysPAddr);
++IMG_SYS_PHYADDR SysDevPAddrToSysPAddr (PVRSRV_DEVICE_TYPE eDeviceType, IMG_DEV_PHYADDR SysPAddr);
++IMG_CPU_PHYADDR SysSysPAddrToCpuPAddr (IMG_SYS_PHYADDR SysPAddr);
++IMG_SYS_PHYADDR SysCpuPAddrToSysPAddr (IMG_CPU_PHYADDR cpu_paddr);
++#if defined(PVR_LMA)
++IMG_BOOL SysVerifyCpuPAddrToDevPAddr (PVRSRV_DEVICE_TYPE eDeviceType, IMG_CPU_PHYADDR CpuPAddr);
++IMG_BOOL SysVerifySysPAddrToDevPAddr (PVRSRV_DEVICE_TYPE eDeviceType, IMG_SYS_PHYADDR SysPAddr);
++#endif
++
++extern SYS_DATA* gpsSysData;
++
++#if !defined(USE_CODE)
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(SysAcquireData)
++#endif
++static INLINE PVRSRV_ERROR SysAcquireData(SYS_DATA **ppsSysData)
++{
++
++ *ppsSysData = gpsSysData;
++
++
++
++
++
++ if (!gpsSysData)
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ return PVRSRV_OK;
++}
++
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(SysInitialiseCommon)
++#endif
++static INLINE PVRSRV_ERROR SysInitialiseCommon(SYS_DATA *psSysData)
++{
++ PVRSRV_ERROR eError;
++
++
++ eError = PVRSRVInit(psSysData);
++
++ return eError;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(SysDeinitialiseCommon)
++#endif
++static INLINE IMG_VOID SysDeinitialiseCommon(SYS_DATA *psSysData)
++{
++
++ PVRSRVDeInit(psSysData);
++
++ OSDestroyResource(&psSysData->sPowerStateChangeResource);
++}
++#endif
++
++
++#if !(defined(NO_HARDWARE) && defined(__linux__) && defined(__KERNEL__))
++#define SysReadHWReg(p, o) OSReadHWReg(p, o)
++#define SysWriteHWReg(p, o, v) OSWriteHWReg(p, o, v)
++#else
++static inline IMG_UINT32 SysReadHWReg(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset)
++{
++ return (IMG_UINT32) readl(pvLinRegBaseAddr + ui32Offset);
++}
++
++static inline IMG_VOID SysWriteHWReg(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT32 ui32Value)
++{
++ writel(ui32Value, pvLinRegBaseAddr + ui32Offset);
++}
++#endif
++
++#if defined(__cplusplus)
++}
++#endif
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/system/moorestown/.gitignore b/drivers/gpu/drm/mrst/pvr/services4/system/moorestown/.gitignore
+new file mode 100644
+index 0000000..2f89523
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/system/moorestown/.gitignore
+@@ -0,0 +1,5 @@
++bin_pc_i686*
++tmp_pc_i686*
++host_pc_i686*
++*.o
++*.o.cmd
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/system/moorestown/oemfuncs.h b/drivers/gpu/drm/mrst/pvr/services4/system/moorestown/oemfuncs.h
+new file mode 100644
+index 0000000..0d3b6d7
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/system/moorestown/oemfuncs.h
+@@ -0,0 +1,72 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined(__OEMFUNCS_H__)
++#define __OEMFUNCS_H__
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++#define OEM_EXCHANGE_POWER_STATE (1<<0)
++#define OEM_DEVICE_MEMORY_POWER (1<<1)
++#define OEM_DISPLAY_POWER (1<<2)
++#define OEM_GET_EXT_FUNCS (1<<3)
++
++typedef struct OEM_ACCESS_INFO_TAG
++{
++ IMG_UINT32 ui32Size;
++ IMG_UINT32 ui32FBPhysBaseAddress;
++ IMG_UINT32 ui32FBMemAvailable;
++ IMG_UINT32 ui32SysPhysBaseAddress;
++ IMG_UINT32 ui32SysSize;
++ IMG_UINT32 ui32DevIRQ;
++} OEM_ACCESS_INFO, *POEM_ACCESS_INFO;
++
++typedef IMG_UINT32 (*PFN_SRV_BRIDGEDISPATCH)( IMG_UINT32 Ioctl,
++ IMG_BYTE *pInBuf,
++ IMG_UINT32 InBufLen,
++ IMG_BYTE *pOutBuf,
++ IMG_UINT32 OutBufLen,
++ IMG_UINT32 *pdwBytesTransferred);
++
++
++typedef PVRSRV_ERROR (*PFN_SRV_READREGSTRING)(PPVRSRV_REGISTRY_INFO psRegInfo);
++
++
++typedef struct PVRSRV_DC_OEM_JTABLE_TAG
++{
++ PFN_SRV_BRIDGEDISPATCH pfnOEMBridgeDispatch;
++ PFN_SRV_READREGSTRING pfnOEMReadRegistryString;
++ PFN_SRV_READREGSTRING pfnOEMWriteRegistryString;
++
++} PVRSRV_DC_OEM_JTABLE;
++#if defined(__cplusplus)
++}
++#endif
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/system/moorestown/ospm_power.c b/drivers/gpu/drm/mrst/pvr/services4/system/moorestown/ospm_power.c
+new file mode 100644
+index 0000000..b7fa0c4
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/system/moorestown/ospm_power.c
+@@ -0,0 +1,479 @@
++/**************************************************************************
++ * Copyright (c) 2009, Intel Corporation.
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors:
++ * Benjamin Defnet <benjamin.r.defnet@intel.com>
++ * Rajesh Poornachandran <rajesh.poornachandran@intel.com>
++ *
++ **************************************************************************/
++
++#include "ospm_power.h"
++#include "psb_drv.h"
++#include "psb_msvdx.h"
++#include "lnc_topaz.h"
++#include "servicesext.h"
++#include "power.h"
++#include "services.h"
++#include "osfunc.h"
++#include <linux/mutex.h>
++
++extern IMG_UINT32 gui32SGXDeviceID;
++extern IMG_UINT32 gui32MRSTDisplayDeviceID;
++extern IMG_UINT32 gui32MRSTMSVDXDeviceID;
++extern IMG_UINT32 gui32MRSTTOPAZDeviceID;
++
++struct drm_device *gpDrmDevice = NULL;
++static struct mutex g_ospm_mutex;
++static bool gbSuspendInProgress = false;
++static bool gbResumeInProgress = false;
++static int g_hw_power_status_mask;
++static atomic_t g_display_access_count;
++static atomic_t g_graphics_access_count;
++static atomic_t g_videoenc_access_count;
++static atomic_t g_videodec_access_count;
++
++/*
++ * ospm_power_init
++ *
++ * Description: Initialize this ospm power management module
++ */
++void ospm_power_init(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv = (struct drm_psb_private *)dev->dev_private;
++ struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
++
++ pci_write_config_dword(pci_root, 0xD0, 0xd0047800);
++ pci_read_config_dword(pci_root, 0xD4, &dev_priv->ospm_base);
++ dev_priv->ospm_base &= 0xffff;
++
++ dev_priv->apm_reg = MSG_READ32(PSB_PUNIT_PORT, PSB_APMBA);
++ dev_priv->apm_base = dev_priv->apm_reg & 0xffff;
++
++ gpDrmDevice = dev;
++ mutex_init(&g_ospm_mutex);
++ g_hw_power_status_mask = OSPM_ALL_ISLANDS;
++ atomic_set(&g_display_access_count, 0);
++ atomic_set(&g_graphics_access_count, 0);
++ atomic_set(&g_videoenc_access_count, 0);
++ atomic_set(&g_videodec_access_count, 0);
++
++
++#ifdef OSPM_STAT
++ dev_priv->graphics_state = PSB_PWR_STATE_ON;
++ dev_priv->gfx_last_mode_change = jiffies;
++ dev_priv->gfx_on_time = 0;
++ dev_priv->gfx_off_time = 0;
++#endif
++}
++
++/*
++ * ospm_power_uninit
++ *
++ * Description: Uninitialize this ospm power management module
++ */
++void ospm_power_uninit(void)
++{
++ mutex_destroy(&g_ospm_mutex);
++}
++
++/*
++ * ospm_power_suspend
++ *
++ * Description: OSPM is telling our driver to suspend so save state
++ * and power down all hardware.
++ */
++int ospm_power_suspend(struct pci_dev *pdev, pm_message_t state)
++{
++ struct drm_device *dev = pci_get_drvdata(pdev);
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) gpDrmDevice->dev_private;
++ struct drm_mode_config *mode_config = &dev->mode_config;
++ struct drm_connector *connector;
++ int ret = 0;
++ bool bDisplayOff = false;
++
++ mutex_lock(&g_ospm_mutex);
++
++ if (atomic_read(&g_graphics_access_count) ||
++ atomic_read(&g_videoenc_access_count) ||
++ atomic_read(&g_videodec_access_count) ||
++ atomic_read(&g_display_access_count))
++ ret = -EBUSY;
++ //SGX will be powered off when idle due to D0i3 support. If we don't wait
++ //for D0i3, then we hit cases where user mode driver gets stuck waiting
++ //for command completion when SGX is powered off.
++ else if (ospm_power_is_hw_on(OSPM_GRAPHICS_ISLAND))
++ ret = -EBUSY;
++ else if (psb_check_msvdx_idle(dev))
++ ret = -EBUSY;
++ else if (IS_MRST(dev) && !dev_priv->topaz_disabled && lnc_check_topaz_idle(dev))
++ ret = -EBUSY;
++
++ gbSuspendInProgress = true;
++
++ if (!ret) {
++ PVRSRVSetPowerStateKM(PVRSRV_SYS_POWER_STATE_D3);
++ bDisplayOff = true;
++ } else if (!atomic_read(&g_display_access_count)) {
++ //At least power down the display
++ PVRSRVSetDevicePowerStateKM(gui32MRSTDisplayDeviceID,
++ PVRSRV_DEV_POWER_STATE_OFF,
++ KERNEL_ID,
++ IMG_FALSE);
++ bDisplayOff = true;
++ }
++
++ if (bDisplayOff) {
++ //Set dpms status to off so that an "xset dpms force on" from the
++ //OSPM Framework (or elsewhere) actually executes
++ list_for_each_entry(connector, &mode_config->connector_list, head) {
++ connector->dpms = DRM_MODE_DPMS_OFF;
++ }
++ }
++
++ gbSuspendInProgress = false;
++
++ mutex_unlock(&g_ospm_mutex);
++ return ret;
++}
++
++/*
++ * ospm_power_resume
++ *
++ * Description: OSPM is telling our driver to resume so restore state
++ * and power up necessary hardware.
++ */
++int ospm_power_resume(struct pci_dev *pdev)
++{
++ struct drm_device *dev = pci_get_drvdata(pdev);
++ struct drm_mode_config *mode_config = &dev->mode_config;
++ struct drm_connector *connector;
++
++ mutex_lock(&g_ospm_mutex);
++ gbResumeInProgress = true;
++ PVRSRVSetPowerStateKM(PVRSRV_SYS_POWER_STATE_D0);
++
++ //Set dpms status to on. We should probably only do this for
++ //connectors that were on prior to the suspend, but for Moorestown
++ //we only have one connector so just brute force it.
++ list_for_each_entry(connector, &mode_config->connector_list, head) {
++ connector->dpms = DRM_MODE_DPMS_ON;
++ }
++
++ gbResumeInProgress = false;
++ mutex_unlock(&g_ospm_mutex);
++ return 0;
++}
++
++
++/*
++ * ospm_power_island_down
++ *
++ * Description: Cut power to the specified island(s) (powergating)
++ */
++void ospm_power_island_down(int hw_islands)
++{
++ u32 pwr_cnt = 0;
++ u32 pwr_mask = 0;
++ u32 pwr_sts;
++
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) gpDrmDevice->dev_private;
++
++ g_hw_power_status_mask &= ~hw_islands;
++
++ if (hw_islands & OSPM_GRAPHICS_ISLAND) {
++ pwr_cnt |= PSB_PWRGT_GFX_MASK;
++ pwr_mask |= PSB_PWRGT_GFX_MASK;
++ #ifdef OSPM_STAT
++ if (dev_priv->graphics_state == PSB_PWR_STATE_ON) {
++ dev_priv->gfx_on_time += (jiffies - dev_priv->gfx_last_mode_change) * 1000 / HZ;
++ dev_priv->gfx_last_mode_change = jiffies;
++ dev_priv->graphics_state = PSB_PWR_STATE_OFF;
++ dev_priv->gfx_off_cnt++;
++ }
++ #endif
++ }
++ if (hw_islands & OSPM_VIDEO_ENC_ISLAND) {
++ pwr_cnt |= PSB_PWRGT_VID_ENC_MASK;
++ pwr_mask |= PSB_PWRGT_VID_ENC_MASK;
++ }
++ if (hw_islands & OSPM_VIDEO_DEC_ISLAND) {
++ pwr_cnt |= PSB_PWRGT_VID_DEC_MASK;
++ pwr_mask |= PSB_PWRGT_VID_DEC_MASK;
++ }
++ if (pwr_cnt) {
++ pwr_cnt |= inl(dev_priv->apm_base);
++ outl(pwr_cnt, dev_priv->apm_base);
++ while (true) {
++ pwr_sts = inl(dev_priv->apm_base + PSB_APM_STS);
++ if ((pwr_sts & pwr_mask) == pwr_mask)
++ break;
++ else
++ udelay(10);
++ }
++ }
++
++ if (hw_islands & OSPM_DISPLAY_ISLAND) {
++ pwr_mask = PSB_PWRGT_DISPLAY_MASK;
++ outl(PSB_PWRGT_DISPLAY_MASK, (dev_priv->ospm_base + PSB_PM_SSC));
++ while (true) {
++ pwr_sts = inl(dev_priv->ospm_base + PSB_PM_SSS);
++ if ((pwr_sts & pwr_mask) == pwr_mask)
++ break;
++ else
++ udelay(10);
++ }
++ }
++}
++
++/*
++ * ospm_power_island_up
++ *
++ * Description: Restore power to the specified island(s) (powergating)
++ */
++void ospm_power_island_up(int hw_islands)
++{
++ u32 pwr_cnt;
++ u32 pwr_sts;
++ u32 pwr_mask;
++
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) gpDrmDevice->dev_private;
++
++ if (IS_MRST(gpDrmDevice) &&
++ (hw_islands & (OSPM_GRAPHICS_ISLAND | OSPM_VIDEO_ENC_ISLAND |
++ OSPM_VIDEO_DEC_ISLAND))) {
++ pwr_cnt = inl(dev_priv->apm_base + PSB_APM_CMD);
++ pwr_mask = 0;
++ if (hw_islands & OSPM_GRAPHICS_ISLAND) {
++ pwr_cnt &= ~PSB_PWRGT_GFX_MASK;
++ pwr_mask |= PSB_PWRGT_GFX_MASK;
++ #ifdef OSPM_STAT
++ if (dev_priv->graphics_state == PSB_PWR_STATE_OFF) {
++ dev_priv->gfx_off_time += (jiffies - dev_priv->gfx_last_mode_change) * 1000 / HZ;
++ dev_priv->gfx_last_mode_change = jiffies;
++ dev_priv->graphics_state = PSB_PWR_STATE_ON;
++ dev_priv->gfx_on_cnt++;
++ }
++ #endif
++ }
++ if (hw_islands & OSPM_VIDEO_ENC_ISLAND) {
++ pwr_cnt &= ~PSB_PWRGT_VID_ENC_MASK;
++ pwr_mask |= PSB_PWRGT_VID_ENC_MASK;
++ }
++ if (hw_islands & OSPM_VIDEO_DEC_ISLAND) {
++ pwr_cnt &= ~PSB_PWRGT_VID_DEC_MASK;
++ pwr_mask |= PSB_PWRGT_VID_DEC_MASK;
++ }
++
++ outl(pwr_cnt, dev_priv->apm_base + PSB_APM_CMD);
++ while (true) {
++ pwr_sts = inl(dev_priv->apm_base + PSB_APM_STS);
++ if ((pwr_sts & pwr_mask) == 0)
++ break;
++ else
++ udelay(10);
++ }
++ }
++
++ if (hw_islands & OSPM_DISPLAY_ISLAND) {
++ pwr_cnt = inl(dev_priv->ospm_base + PSB_PM_SSC);
++ pwr_cnt &= ~PSB_PWRGT_DISPLAY_MASK;
++ pwr_mask = PSB_PWRGT_DISPLAY_MASK;
++ outl(pwr_cnt, (dev_priv->ospm_base + PSB_PM_SSC));
++ while (true) {
++ pwr_sts = inl(dev_priv->ospm_base + PSB_PM_SSS);
++ if ((pwr_sts & pwr_mask) == 0)
++ break;
++ else
++ udelay(10);
++ }
++ }
++
++ g_hw_power_status_mask |= hw_islands;
++}
++
++/*
++ * ospm_power_using_hw_begin
++ *
++ * Description: Notify PowerMgmt module that you will be accessing the
++ * specified island's hw so don't power it off. If the island is off,
++ * this function will behave differently depending on the type param.
++ *
++ * OSPM_UHB_FORCE_POWER_ON:
++ * Power on the specified island.
++ * OSPM_UHB_IGNORE_POWER_OFF:
++ * Increment the access counters. The caller is expected to power on
++ * the island if necessary.
++ * OSPM_UHB_ONLY_IF_ON:
++ * Return false and the caller is expected to not access the hw.
++ *
++ * NOTE *** If this is called from and interrupt handler or other atomic
++ * context, then it will return false if we are in the middle of a
++ * power state transition and the caller will be expected to handle that
++ * even if type is OSPM_UHB_FORCE_POWER_ON.
++ */
++bool ospm_power_using_hw_begin(int hw_island, UHBUsage usage)
++{
++ bool ret = false;
++ bool b_island_is_off = false;
++ bool b_atomic = (in_interrupt() || in_atomic());
++ bool b_force_on = (usage == OSPM_UHB_FORCE_POWER_ON);
++ bool b_ignore_off = (usage == OSPM_UHB_IGNORE_POWER_OFF);
++ IMG_UINT32 deviceID = 0;
++
++ if (!b_atomic)
++ mutex_lock(&g_ospm_mutex);
++ else if ((gbSuspendInProgress || gbResumeInProgress) && b_force_on)
++ goto FailExit;
++
++ b_island_is_off = hw_island & (OSPM_ALL_ISLANDS & ~g_hw_power_status_mask);
++
++ if (b_island_is_off && !b_force_on && !b_ignore_off)
++ goto FailExit;
++
++ if (b_island_is_off && b_force_on) {
++ switch(hw_island)
++ {
++ case OSPM_GRAPHICS_ISLAND:
++ deviceID = gui32SGXDeviceID;
++ break;
++ case OSPM_DISPLAY_ISLAND:
++ deviceID = gui32MRSTDisplayDeviceID;
++ break;
++ case OSPM_VIDEO_DEC_ISLAND:
++ deviceID = gui32MRSTMSVDXDeviceID;
++ break;
++ case OSPM_VIDEO_ENC_ISLAND:
++ deviceID = gui32MRSTTOPAZDeviceID;
++ break;
++ }
++
++ if (PVRSRVPowerOnSystemWithDevice(deviceID, b_atomic ? ISR_ID : KERNEL_ID, IMG_FALSE) != PVRSRV_OK)
++ goto FailExit;
++ }
++
++ switch(hw_island)
++ {
++ case OSPM_GRAPHICS_ISLAND:
++ atomic_inc(&g_graphics_access_count);
++ case OSPM_VIDEO_ENC_ISLAND:
++ atomic_inc(&g_videoenc_access_count);
++ case OSPM_VIDEO_DEC_ISLAND:
++ atomic_inc(&g_videodec_access_count);
++ case OSPM_DISPLAY_ISLAND:
++ atomic_inc(&g_display_access_count);
++ }
++
++ ret = true;
++FailExit:
++
++ if (!b_atomic)
++ mutex_unlock(&g_ospm_mutex);
++
++ return ret;
++}
++
++
++/*
++ * ospm_power_using_hw_end
++ *
++ * Description: Notify PowerMgmt module that you are done accessing the
++ * specified island's hw so feel free to power it off. Note that this
++ * function doesn't actually power off the islands.
++ */
++void ospm_power_using_hw_end(int hw_island)
++{
++ switch(hw_island)
++ {
++ case OSPM_GRAPHICS_ISLAND:
++ atomic_dec(&g_graphics_access_count);
++ case OSPM_VIDEO_ENC_ISLAND:
++ atomic_dec(&g_videoenc_access_count);
++ case OSPM_VIDEO_DEC_ISLAND:
++ atomic_dec(&g_videodec_access_count);
++ case OSPM_DISPLAY_ISLAND:
++ atomic_dec(&g_display_access_count);
++ }
++
++ WARN_ON(atomic_read(&g_graphics_access_count) < 0);
++ WARN_ON(atomic_read(&g_videoenc_access_count) < 0);
++ WARN_ON(atomic_read(&g_videodec_access_count) < 0);
++ WARN_ON(atomic_read(&g_display_access_count) < 0);
++}
++
++/*
++ * ospm_power_is_hw_on
++ *
++ * Description: do an instantaneous check for if the specified islands
++ * are on. Only use this in cases where you know the g_state_change_mutex
++ * is already held such as in irq install/uninstall. Otherwise, use
++ * ospm_power_using_hw_begin().
++ */
++bool ospm_power_is_hw_on(int hw_islands)
++{
++ return ((g_hw_power_status_mask & hw_islands) == hw_islands);
++}
++
++void ospm_apm_power_down_msvdx(struct drm_device *dev)
++{
++ uint32_t ui32_reg_value = 0;
++ mutex_lock(&g_ospm_mutex);
++
++ if (atomic_read(&g_videodec_access_count))
++ goto out;
++ if (psb_check_msvdx_idle(dev))
++ goto out;
++
++ /* FIXME: workaround for HSD3469585
++ * re-enable DRAM Self Refresh Mode
++ * by setting DUNIT.DPMC0
++ */
++ ui32_reg_value = MSG_READ32(0x1, 0x4);
++ MSG_WRITE32(0x1, 0x4, (ui32_reg_value | (0x1 << 7)));
++
++ gbSuspendInProgress = true;
++ PVRSRVSetDevicePowerStateKM(gui32MRSTMSVDXDeviceID,
++ PVRSRV_DEV_POWER_STATE_OFF,
++ ISR_ID,
++ IMG_FALSE);
++ gbSuspendInProgress = false;
++out:
++ mutex_unlock(&g_ospm_mutex);
++ return;
++}
++
++void ospm_apm_power_down_topaz(struct drm_device *dev)
++{
++ mutex_lock(&g_ospm_mutex);
++
++ if (atomic_read(&g_videoenc_access_count))
++ goto out;
++ if (lnc_check_topaz_idle(dev))
++ goto out;
++
++ gbSuspendInProgress = true;
++ PVRSRVSetDevicePowerStateKM(gui32MRSTTOPAZDeviceID,
++ PVRSRV_DEV_POWER_STATE_OFF,
++ ISR_ID,
++ IMG_FALSE);
++ gbSuspendInProgress = false;
++out:
++ mutex_unlock(&g_ospm_mutex);
++ return;
++}
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/system/moorestown/ospm_power.h b/drivers/gpu/drm/mrst/pvr/services4/system/moorestown/ospm_power.h
+new file mode 100644
+index 0000000..835bfae
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/system/moorestown/ospm_power.h
+@@ -0,0 +1,79 @@
++/**************************************************************************
++ * Copyright (c) 2009, Intel Corporation.
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors:
++ * Benjamin Defnet <benjamin.r.defnet@intel.com>
++ * Rajesh Poornachandran <rajesh.poornachandran@intel.com>
++ *
++ **************************************************************************/
++
++#ifndef _OSPM_POWER_H_
++#define _OSPM_POWER_H_
++
++#include <linux/pci.h>
++#include <drm/drmP.h>
++
++#define OSPM_GRAPHICS_ISLAND 0x1
++#define OSPM_VIDEO_ENC_ISLAND 0x2
++#define OSPM_VIDEO_DEC_ISLAND 0x4
++#define OSPM_DISPLAY_ISLAND 0x8
++#define OSPM_ALL_ISLANDS 0xf
++
++
++typedef enum _UHBUsage
++{
++ OSPM_UHB_ONLY_IF_ON = 0,
++ OSPM_UHB_FORCE_POWER_ON,
++ OSPM_UHB_IGNORE_POWER_OFF,
++} UHBUsage;
++
++
++void ospm_power_init(struct drm_device *dev);
++void ospm_power_uninit(void);
++
++/*
++ * OSPM will call these functions
++ */
++int ospm_power_suspend(struct pci_dev *pdev, pm_message_t state);
++int ospm_power_resume(struct pci_dev *pdev);
++
++/*
++ * These are the functions the driver should use to wrap all hw access
++ * (i.e. register reads and writes)
++ */
++bool ospm_power_using_hw_begin(int hw_island, UHBUsage type);
++void ospm_power_using_hw_end(int hw_island);
++
++/*
++ * Power up/down different hw component rails/islands
++ */
++void ospm_power_island_down(int hw_islands);
++void ospm_power_island_up(int hw_islands);
++
++/*
++ * Use this function to do an instantaneous check for if the hw is on.
++ * Only use this in cases where you know the g_state_change_mutex
++ * is already held such as in irq install/uninstall and you need to
++ * prevent a deadlock situation. Otherwise use ospm_power_using_hw_begin().
++ */
++bool ospm_power_is_hw_on(int hw_islands);
++
++/* Use these functions to power down video HW for D0i3 purpose */
++void ospm_apm_power_down_msvdx(struct drm_device *dev);
++void ospm_apm_power_down_topaz(struct drm_device *dev);
++
++#endif /*_OSPM_POWER_H_*/
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/system/moorestown/sys_pvr_drm_export.c b/drivers/gpu/drm/mrst/pvr/services4/system/moorestown/sys_pvr_drm_export.c
+new file mode 100644
+index 0000000..6c56df5
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/system/moorestown/sys_pvr_drm_export.c
+@@ -0,0 +1,135 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include <drm/drmP.h>
++#include <drm/drm.h>
++
++#include "pvr_drm_shared.h"
++
++#include "services_headers.h"
++#include "private_data.h"
++#include "pvr_drm.h"
++
++#include "pvr_bridge.h"
++#include "linkage.h"
++#include "mmap.h"
++
++#if defined(PDUMP)
++#include "client/linuxsrv.h"
++#endif
++
++#include "sys_pvr_drm_import.h"
++
++#include "sys_pvr_drm_export.h"
++
++int
++SYSPVRInit(void)
++{
++ PVRDPFInit();
++
++ return 0;
++}
++
++
++int
++SYSPVRLoad(struct drm_device *dev, unsigned long flags)
++{
++ return PVRSRVDrmLoad(dev, flags);
++}
++
++int
++SYSPVROpen(struct drm_device *dev, struct drm_file *pFile)
++{
++ return PVRSRVDrmOpen(dev, pFile);
++}
++
++int
++SYSPVRUnload(struct drm_device *dev)
++{
++ return PVRSRVDrmUnload(dev);
++}
++
++void
++SYSPVRPostClose(struct drm_device *dev, struct drm_file *file)
++{
++ return PVRSRVDrmPostClose(dev, file);
++}
++
++int
++SYSPVRBridgeDispatch(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile)
++{
++ return PVRSRV_BridgeDispatchKM(dev, arg, pFile);
++}
++
++int
++SYSPVRDCDriverIoctl(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile)
++{
++ return PVRDRM_Dummy_ioctl(dev, arg, pFile);
++
++}
++
++int
++SYSPVRBCDriverIoctl(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile)
++{
++ return PVRDRM_Dummy_ioctl(dev, arg, pFile);
++
++}
++
++int
++SYSPVRIsMaster(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile)
++{
++ return PVRDRMIsMaster(dev, arg, pFile);
++}
++
++int
++SYSPVRUnprivCmd(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile)
++{
++ return PVRDRMUnprivCmd(dev, arg, pFile);
++}
++
++int
++SYSPVRMMap(struct file* pFile, struct vm_area_struct* ps_vma)
++{
++ int ret;
++
++ ret = PVRMMap(pFile, ps_vma);
++ if (ret == -ENOENT)
++ {
++ ret = drm_mmap(pFile, ps_vma);
++ }
++
++ return ret;
++}
++
++int
++SYSPVRDBGDrivIoctl(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile)
++{
++#if defined(PDUMP)
++ return dbgdrv_ioctl(dev, arg, pFile);
++#else
++ return -EINVAL;
++#endif
++}
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/system/moorestown/sys_pvr_drm_export.h b/drivers/gpu/drm/mrst/pvr/services4/system/moorestown/sys_pvr_drm_export.h
+new file mode 100644
+index 0000000..c73cea1
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/system/moorestown/sys_pvr_drm_export.h
+@@ -0,0 +1,87 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined(__SYS_PVR_DRM_EXPORT_H__)
++#define __SYS_PVR_DRM_EXPORT_H__
++
++#include "pvr_drm_shared.h"
++
++#if defined(__KERNEL__)
++
++#include "services_headers.h"
++#include "private_data.h"
++#include "pvr_drm.h"
++
++#include "pvr_bridge.h"
++
++#if defined(PDUMP)
++#include "client/linuxsrv.h"
++#endif
++
++#define PVR_DRM_SRVKM_IOCTL \
++ DRM_IOW(DRM_COMMAND_BASE + PVR_DRM_SRVKM_CMD, PVRSRV_BRIDGE_PACKAGE)
++
++#define PVR_DRM_DISP_IOCTL \
++ DRM_IO(DRM_COMMAND_BASE + PVR_DRM_DISP_CMD)
++
++#define PVR_DRM_BC_IOCTL \
++ DRM_IO(DRM_COMMAND_BASE + PVR_DRM_BC_CMD)
++
++#define PVR_DRM_IS_MASTER_IOCTL \
++ DRM_IO(DRM_COMMAND_BASE + PVR_DRM_IS_MASTER_CMD)
++
++#define PVR_DRM_UNPRIV_IOCTL \
++ DRM_IOWR(DRM_COMMAND_BASE + PVR_DRM_UNPRIV_CMD, IMG_UINT32)
++
++#if defined(PDUMP)
++#define PVR_DRM_DBGDRV_IOCTL \
++ DRM_IOW(DRM_COMMAND_BASE + PVR_DRM_DBGDRV_CMD, IOCTL_PACKAGE)
++#else
++#define PVR_DRM_DBGDRV_IOCTL \
++ DRM_IO(DRM_COMMAND_BASE + PVR_DRM_DBGDRV_CMD)
++#endif
++
++int SYSPVRInit(void);
++int SYSPVRLoad(struct drm_device *dev, unsigned long flags);
++int SYSPVROpen(struct drm_device *dev, struct drm_file *pFile);
++int SYSPVRUnload(struct drm_device *dev);
++void SYSPVRPostClose(struct drm_device *dev, struct drm_file *file);
++int SYSPVRBridgeDispatch(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile);
++int SYSPVRDCDriverIoctl(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile);
++int SYSPVRBCDriverIoctl(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile);
++int SYSPVRIsMaster(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile);
++int SYSPVRUnprivCmd(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile);
++
++int SYSPVRMMap(struct file* pFile, struct vm_area_struct* ps_vma);
++
++int SYSPVRDBGDrivIoctl(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile);
++
++int SYSPVRServiceSGXInterrupt(struct drm_device *dev);
++
++#endif
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/system/moorestown/sys_pvr_drm_import.h b/drivers/gpu/drm/mrst/pvr/services4/system/moorestown/sys_pvr_drm_import.h
+new file mode 100644
+index 0000000..1efeb75
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/system/moorestown/sys_pvr_drm_import.h
+@@ -0,0 +1,45 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined(__SYS_PVR_DRM_IMPORT_H__)
++#define __SYS_PVR_DRM_IMPORT_H__
++
++#if defined(__KERNEL__)
++#include "psb_drm.h"
++#endif
++
++#define DRM_PSB_PLACEMENT_OFFSET 0x13
++
++#if 0
++#define DRM_PVR_RESERVED1 0x0D
++#define DRM_PVR_RESERVED2 0x0E
++#define DRM_PVR_RESERVED3 0x0F
++#define DRM_PVR_RESERVED4 0x10
++#define DRM_PVR_RESERVED5 0x11
++#define DRM_PVR_RESERVED6 0x12
++#endif
++
++#endif
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/system/moorestown/sysconfig.c b/drivers/gpu/drm/mrst/pvr/services4/system/moorestown/sysconfig.c
+new file mode 100644
+index 0000000..955f793
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/system/moorestown/sysconfig.c
+@@ -0,0 +1,1022 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if defined(LDM_PCI) || defined(SUPPORT_DRI_DRM)
++#include "linux/pci.h"
++#endif
++#if defined(SUPPORT_DRI_DRM)
++#include "drm/drmP.h"
++#endif
++
++#include "sgxdefs.h"
++#include "services_headers.h"
++#include "kerneldisplay.h"
++#include "oemfuncs.h"
++#include "sgxinfo.h"
++#include "sgxinfokm.h"
++#include "pdump_km.h"
++#include "syslocal.h"
++#include "env_data.h"
++#include "ospm_power.h"
++#include "psb_drv.h"
++#include "sysirq.h"
++#include "msvdx_power.h"
++#include "topaz_power.h"
++#include "sys_pvr_drm_export.h"
++
++/* Graphics MSI address and data region in PCIx */
++#define MRST_PCIx_MSI_ADDR_LOC 0x94
++#define MRST_PCIx_MSI_DATA_LOC 0x98
++
++#define SYS_SGX_CLOCK_SPEED (400000000)
++#define SYS_SGX_HWRECOVERY_TIMEOUT_FREQ (100)
++#define SYS_SGX_PDS_TIMER_FREQ (1000)
++#define SYS_SGX_ACTIVE_POWER_LATENCY_MS (50)
++
++#if defined(SUPPORT_DRI_DRM_EXT)
++#define DRI_DRM_STATIC
++#else
++#define DRI_DRM_STATIC static
++#endif
++
++SYS_DATA* gpsSysData = (SYS_DATA*)IMG_NULL;
++SYS_DATA gsSysData;
++
++static SYS_SPECIFIC_DATA gsSysSpecificData;
++
++IMG_UINT32 gui32SGXDeviceID;
++static SGX_DEVICE_MAP gsSGXDeviceMap;
++extern IMG_UINT32 gui32MRSTDisplayDeviceID;
++IMG_UINT32 gui32MRSTMSVDXDeviceID;
++IMG_UINT32 gui32MRSTTOPAZDeviceID;
++
++extern struct drm_device *gpDrmDevice;
++
++#if !defined(NO_HARDWARE)
++IMG_CPU_VIRTADDR gsPoulsboRegsCPUVaddr;
++
++IMG_CPU_VIRTADDR gsPoulsboDisplayRegsCPUVaddr;
++#endif
++
++#ifdef LDM_PCI
++extern struct pci_dev *gpsPVRLDMDev;
++#endif
++
++#define POULSBO_ADDR_RANGE_INDEX (MMADR_INDEX - 4)
++#define POULSBO_HP_ADDR_RANGE_INDEX (GMADR_INDEX - 4)
++static PVRSRV_ERROR PCIInitDev(SYS_DATA *psSysData)
++{
++ SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;
++
++#ifdef LDM_PCI
++ psSysSpecData->hSGXPCI = OSPCISetDev((IMG_VOID *)psSysSpecData->psPCIDev, HOST_PCI_INIT_FLAG_BUS_MASTER | HOST_PCI_INIT_FLAG_MSI);
++#else
++ psSysSpecData->hSGXPCI = OSPCIAcquireDev(SYS_SGX_DEV_VENDOR_ID, gpDrmDevice->pci_device, HOST_PCI_INIT_FLAG_BUS_MASTER | HOST_PCI_INIT_FLAG_MSI);
++#endif
++ if (!psSysSpecData->hSGXPCI)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PCIInitDev: Failed to acquire PCI device"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ SYS_SPECIFIC_DATA_SET(psSysSpecData, SYS_SPECIFIC_DATA_PCI_ACQUIRE_DEV);
++
++ PVR_TRACE(("PCI memory region: %x to %x", OSPCIAddrRangeStart(psSysSpecData->hSGXPCI, POULSBO_ADDR_RANGE_INDEX), OSPCIAddrRangeEnd(psSysSpecData->hSGXPCI, POULSBO_ADDR_RANGE_INDEX)));
++ PVR_TRACE(("Host Port region: %x to %x", OSPCIAddrRangeStart(psSysSpecData->hSGXPCI, POULSBO_HP_ADDR_RANGE_INDEX), OSPCIAddrRangeEnd(psSysSpecData->hSGXPCI, POULSBO_HP_ADDR_RANGE_INDEX)));
++
++
++ if (OSPCIAddrRangeLen(psSysSpecData->hSGXPCI, POULSBO_ADDR_RANGE_INDEX) < (IS_MRST(gpDrmDevice)? POULSBO_MAX_OFFSET:PSB_POULSBO_MAX_OFFSET))
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PCIInitDev: Device memory region isn't big enough"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++
++ if (OSPCIRequestAddrRange(psSysSpecData->hSGXPCI, POULSBO_ADDR_RANGE_INDEX) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PCIInitDev: Device memory region not available"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++ SYS_SPECIFIC_DATA_SET(psSysSpecData, SYS_SPECIFIC_DATA_PCI_REQUEST_SGX_ADDR_RANGE);
++
++
++ if (OSPCIRequestAddrRange(psSysSpecData->hSGXPCI, POULSBO_HP_ADDR_RANGE_INDEX) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PCIInitDev: Host Port region not available"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++ SYS_SPECIFIC_DATA_SET(psSysSpecData, SYS_SPECIFIC_DATA_PCI_REQUEST_HOST_PORT_RANGE);
++
++ return PVRSRV_OK;
++}
++
++static IMG_VOID PCIDeInitDev(SYS_DATA *psSysData)
++{
++ SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;
++
++ if (SYS_SPECIFIC_DATA_TEST(psSysSpecData, SYS_SPECIFIC_DATA_PCI_REQUEST_SGX_ADDR_RANGE))
++ {
++ OSPCIReleaseAddrRange(psSysSpecData->hSGXPCI, POULSBO_ADDR_RANGE_INDEX);
++ }
++
++ if (SYS_SPECIFIC_DATA_TEST(psSysSpecData, SYS_SPECIFIC_DATA_PCI_REQUEST_HOST_PORT_RANGE))
++ {
++ OSPCIReleaseAddrRange(psSysSpecData->hSGXPCI, POULSBO_HP_ADDR_RANGE_INDEX);
++ }
++
++ if (SYS_SPECIFIC_DATA_TEST(psSysSpecData, SYS_SPECIFIC_DATA_PCI_ACQUIRE_DEV))
++ {
++ OSPCIReleaseDev(psSysSpecData->hSGXPCI);
++ }
++}
++static PVRSRV_ERROR SysLocateDevices(SYS_DATA *psSysData)
++{
++ IMG_UINT32 ui32BaseAddr = 0;
++ IMG_UINT32 ui32IRQ = 0;
++ IMG_UINT32 ui32HostPortAddr = 0;
++ SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;
++
++ ui32BaseAddr = OSPCIAddrRangeStart(psSysSpecData->hSGXPCI, POULSBO_ADDR_RANGE_INDEX);
++ ui32HostPortAddr = OSPCIAddrRangeStart(psSysSpecData->hSGXPCI, POULSBO_HP_ADDR_RANGE_INDEX);
++ if (OSPCIIRQ(psSysSpecData->hSGXPCI, &ui32IRQ) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysLocateDevices: Couldn't get IRQ"));
++ return PVRSRV_ERROR_INVALID_DEVICE;
++ }
++
++ PVR_TRACE(("ui32BaseAddr: %p", ui32BaseAddr));
++ PVR_TRACE(("ui32HostPortAddr: %p", ui32HostPortAddr));
++ PVR_TRACE(("IRQ: %d", ui32IRQ));
++
++
++ gsSGXDeviceMap.ui32Flags = 0x0;
++ gsSGXDeviceMap.ui32IRQ = ui32IRQ;
++
++ if (IS_MRST(gpDrmDevice))
++ gsSGXDeviceMap.sRegsSysPBase.uiAddr = ui32BaseAddr + SGX_REGS_OFFSET;
++ else
++ gsSGXDeviceMap.sRegsSysPBase.uiAddr = ui32BaseAddr + PSB_SGX_REGS_OFFSET;
++
++ gsSGXDeviceMap.sRegsCpuPBase = SysSysPAddrToCpuPAddr(gsSGXDeviceMap.sRegsSysPBase);
++ gsSGXDeviceMap.ui32RegsSize = SGX_REG_SIZE;
++
++#if defined(SGX_FEATURE_HOST_PORT)
++
++ gsSGXDeviceMap.ui32Flags = SGX_HOSTPORT_PRESENT;
++ gsSGXDeviceMap.sHPSysPBase.uiAddr = ui32HostPortAddr;
++ gsSGXDeviceMap.sHPCpuPBase = SysSysPAddrToCpuPAddr(gsSGXDeviceMap.sHPSysPBase);
++
++ if (IS_MRST(gpDrmDevice))
++ gsSGXDeviceMap.ui32HPSize = SYS_SGX_HP_SIZE;
++ else
++ gsSGXDeviceMap.ui32HPSize = PSB_SYS_SGX_HP_SIZE;
++#endif
++
++#if defined(MRST_SLAVEPORT)
++
++ gsSGXDeviceMap.sSPSysPBase.uiAddr = ui32BaseAddr + MRST_SGX_SP_OFFSET;
++ gsSGXDeviceMap.sSPCpuPBase = SysSysPAddrToCpuPAddr(gsSGXDeviceMap.sSPSysPBase);
++ gsSGXDeviceMap.ui32SPSize = SGX_SP_SIZE;
++#endif
++
++
++
++
++ gsSGXDeviceMap.sLocalMemSysPBase.uiAddr = 0;
++ gsSGXDeviceMap.sLocalMemDevPBase.uiAddr = 0;
++ gsSGXDeviceMap.sLocalMemCpuPBase.uiAddr = 0;
++ gsSGXDeviceMap.ui32LocalMemSize = 0;
++
++
++ {
++ IMG_SYS_PHYADDR sPoulsboRegsCpuPBase;
++ sPoulsboRegsCpuPBase.uiAddr = ui32BaseAddr + POULSBO_REGS_OFFSET;
++ gsPoulsboRegsCPUVaddr = OSMapPhysToLin(SysSysPAddrToCpuPAddr(sPoulsboRegsCpuPBase),
++ POULSBO_REG_SIZE,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++
++ sPoulsboRegsCpuPBase.uiAddr = ui32BaseAddr + POULSBO_DISPLAY_REGS_OFFSET;
++ gsPoulsboDisplayRegsCPUVaddr = OSMapPhysToLin(SysSysPAddrToCpuPAddr(sPoulsboRegsCpuPBase),
++ POULSBO_DISPLAY_REG_SIZE,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++ }
++
++ return PVRSRV_OK;
++}
++
++
++#define VERSION_STR_MAX_LEN_TEMPLATE "SGX revision = 000.000.000"
++static PVRSRV_ERROR SysCreateVersionString(SYS_DATA *psSysData)
++{
++ IMG_UINT32 ui32MaxStrLen;
++ PVRSRV_ERROR eError;
++ IMG_INT32 i32Count;
++ IMG_CHAR *pszVersionString;
++ IMG_UINT32 ui32SGXRevision = 0;
++ IMG_VOID *pvSGXRegs;
++
++ pvSGXRegs = OSMapPhysToLin(gsSGXDeviceMap.sRegsCpuPBase,
++ gsSGXDeviceMap.ui32RegsSize,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++
++ if (pvSGXRegs != IMG_NULL)
++ {
++ ui32SGXRevision = OSReadHWReg(pvSGXRegs, EUR_CR_CORE_REVISION);
++ OSUnMapPhysToLin(pvSGXRegs,
++ gsSGXDeviceMap.ui32RegsSize,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysCreateVersionString: Couldn't map SGX registers"));
++ }
++
++ ui32MaxStrLen = OSStringLength(VERSION_STR_MAX_LEN_TEMPLATE);
++ eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32MaxStrLen + 1,
++ (IMG_PVOID *)&pszVersionString,
++ IMG_NULL,
++ "Version String");
++ if(eError != PVRSRV_OK)
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ i32Count = OSSNPrintf(pszVersionString, ui32MaxStrLen + 1,
++ "SGX revision = %u.%u.%u",
++ (IMG_UINT)((ui32SGXRevision & EUR_CR_CORE_REVISION_MAJOR_MASK)
++ >> EUR_CR_CORE_REVISION_MAJOR_SHIFT),
++ (IMG_UINT)((ui32SGXRevision & EUR_CR_CORE_REVISION_MINOR_MASK)
++ >> EUR_CR_CORE_REVISION_MINOR_SHIFT),
++ (IMG_UINT)((ui32SGXRevision & EUR_CR_CORE_REVISION_MAINTENANCE_MASK)
++ >> EUR_CR_CORE_REVISION_MAINTENANCE_SHIFT)
++ );
++ if(i32Count == -1)
++ {
++ ui32MaxStrLen = OSStringLength(VERSION_STR_MAX_LEN_TEMPLATE);
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32MaxStrLen + 1,
++ pszVersionString,
++ IMG_NULL);
++
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ psSysData->pszVersionString = pszVersionString;
++
++ return PVRSRV_OK;
++}
++
++static IMG_VOID SysFreeVersionString(SYS_DATA *psSysData)
++{
++ if(psSysData->pszVersionString)
++ {
++ IMG_UINT32 ui32MaxStrLen;
++ ui32MaxStrLen = OSStringLength(VERSION_STR_MAX_LEN_TEMPLATE);
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32MaxStrLen+1,
++ psSysData->pszVersionString,
++ IMG_NULL);
++ psSysData->pszVersionString = IMG_NULL;
++ }
++}
++
++extern int drm_psb_ospm;
++
++PVRSRV_ERROR SysInitialise(IMG_VOID)
++{
++ IMG_UINT32 i = 0;
++ PVRSRV_ERROR eError;
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ SGX_TIMING_INFORMATION* psTimingInfo;
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) gpDrmDevice->dev_private;
++
++ gpsSysData = &gsSysData;
++ OSMemSet(gpsSysData, 0, sizeof(SYS_DATA));
++
++ gpsSysData->pvSysSpecificData = &gsSysSpecificData;
++ gsSysSpecificData.ui32SysSpecificData = 0;
++#ifdef LDM_PCI
++
++ PVR_ASSERT(gpsPVRLDMDev != IMG_NULL);
++ gsSysSpecificData.psPCIDev = gpsPVRLDMDev;
++#endif
++
++ eError = OSInitEnvData(&gpsSysData->pvEnvSpecificData);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to setup env structure"));
++ SysDeinitialise(gpsSysData);
++ gpsSysData = IMG_NULL;
++ return eError;
++ }
++
++
++ psTimingInfo = &gsSGXDeviceMap.sTimingInfo;
++ psTimingInfo->ui32CoreClockSpeed = SYS_SGX_CLOCK_SPEED;
++ psTimingInfo->ui32HWRecoveryFreq = SYS_SGX_HWRECOVERY_TIMEOUT_FREQ;
++#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
++ psTimingInfo->bEnableActivePM = (drm_psb_ospm != 0);
++ printk(KERN_ERR "SGX APM is %s\n", (drm_psb_ospm != 0)? "enabled":"disabled");
++#else
++ psTimingInfo->bEnableActivePM = IMG_FALSE;
++#endif
++ psTimingInfo->ui32ActivePowManLatencyms = SYS_SGX_ACTIVE_POWER_LATENCY_MS;
++ psTimingInfo->ui32uKernelFreq = SYS_SGX_PDS_TIMER_FREQ;
++
++ eError = PCIInitDev(gpsSysData);
++ if (eError != PVRSRV_OK)
++ {
++ SysDeinitialise(gpsSysData);
++ gpsSysData = IMG_NULL;
++ return eError;
++ }
++
++ gpsSysData->ui32NumDevices = SYS_DEVICE_COUNT;
++
++
++ for(i=0; i<SYS_DEVICE_COUNT; i++)
++ {
++ gpsSysData->sDeviceID[i].uiID = i;
++ gpsSysData->sDeviceID[i].bInUse = IMG_FALSE;
++ }
++
++ gpsSysData->psDeviceNodeList = IMG_NULL;
++ gpsSysData->psQueueList = IMG_NULL;
++
++ eError = SysInitialiseCommon(gpsSysData);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed in SysInitialiseCommon"));
++ SysDeinitialise(gpsSysData);
++ gpsSysData = IMG_NULL;
++ return eError;
++ }
++
++
++
++
++
++ eError = SysLocateDevices(gpsSysData);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to locate devices"));
++ SysDeinitialise(gpsSysData);
++ gpsSysData = IMG_NULL;
++ return eError;
++ }
++
++
++
++
++ eError = PVRSRVRegisterDevice(gpsSysData, SGXRegisterDevice,
++ DEVICE_SGX_INTERRUPT, &gui32SGXDeviceID);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to register device!"));
++ SysDeinitialise(gpsSysData);
++ gpsSysData = IMG_NULL;
++ return eError;
++ }
++
++
++ /* register MSVDX, with 0 interrupt bit, no interrupt will be served */
++ eError = PVRSRVRegisterDevice(gpsSysData, MSVDXRegisterDevice,
++ DEVICE_MSVDX_INTERRUPT, &gui32MRSTMSVDXDeviceID);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to register MSVDXdevice!"));
++ SysDeinitialise(gpsSysData);
++ gpsSysData = IMG_NULL;
++ return eError;
++ }
++
++ if (IS_MRST(gpDrmDevice) && !dev_priv->topaz_disabled)
++ {
++ /* register TOPAZ, with 0 interrupt bit, no interrupt will be served */
++ eError = PVRSRVRegisterDevice(gpsSysData, TOPAZRegisterDevice,
++ DEVICE_TOPAZ_INTERRUPT, &gui32MRSTTOPAZDeviceID);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to register TOPAZdevice!"));
++ SysDeinitialise(gpsSysData);
++ gpsSysData = IMG_NULL;
++ return eError;
++ }
++ }
++
++ psDeviceNode = gpsSysData->psDeviceNodeList;
++
++ while(psDeviceNode)
++ {
++
++ switch(psDeviceNode->sDevId.eDeviceType)
++ {
++ case PVRSRV_DEVICE_TYPE_SGX:
++ {
++ DEVICE_MEMORY_INFO *psDevMemoryInfo;
++ DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap;
++
++
++ psDeviceNode->psLocalDevMemArena = IMG_NULL;
++
++
++ psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo;
++ psDeviceMemoryHeap = psDevMemoryInfo->psDeviceMemoryHeap;
++
++
++ for(i=0; i<psDevMemoryInfo->ui32HeapCount; i++)
++ {
++ psDeviceMemoryHeap[i].ui32Attribs |= PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG;
++#ifdef OEM_CUSTOMISE
++
++#endif
++ }
++
++ break;
++ }
++ case PVRSRV_DEVICE_TYPE_MSVDX:
++ /* nothing need to do here */
++ break;
++ case PVRSRV_DEVICE_TYPE_TOPAZ:
++ break;
++ default:
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to find SGX device node!"));
++ return PVRSRV_ERROR_INIT_FAILURE;
++ }
++ }
++
++
++ psDeviceNode = psDeviceNode->psNext;
++ }
++
++ PDUMPINIT();
++ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_PDUMP_INIT);
++
++
++ eError = PVRSRVInitialiseDevice (gui32SGXDeviceID);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to initialise device!"));
++ SysDeinitialise(gpsSysData);
++ gpsSysData = IMG_NULL;
++ return eError;
++ }
++ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_SGX_INITIALISED);
++
++ eError = PVRSRVInitialiseDevice (gui32MRSTMSVDXDeviceID);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to initialise device!"));
++ SysDeinitialise(gpsSysData);
++ gpsSysData = IMG_NULL;
++ return eError;
++ }
++
++ if (IS_MRST(gpDrmDevice) && !dev_priv->topaz_disabled)
++ {
++ eError = PVRSRVInitialiseDevice (gui32MRSTTOPAZDeviceID);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to initialise device!"));
++ SysDeinitialise(gpsSysData);
++ gpsSysData = IMG_NULL;
++ return eError;
++ }
++ }
++
++ if (!sysirq_init(gpDrmDevice))
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to initialise device!"));
++ SysDeinitialise(gpsSysData);
++ gpsSysData = IMG_NULL;
++ return PVRSRV_ERROR_INIT_FAILURE;
++ }
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR SysFinalise(IMG_VOID)
++{
++ PVRSRV_ERROR eError = PVRSRV_OK;
++
++ eError = SysCreateVersionString(gpsSysData);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to create a system version string"));
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_WARNING, "SysFinalise: Version string: %s", gpsSysData->pszVersionString));
++ }
++
++ return eError;
++}
++
++PVRSRV_ERROR SysDeinitialise (SYS_DATA *psSysData)
++{
++ PVRSRV_ERROR eError;
++
++ SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;
++
++ sysirq_uninit(gpDrmDevice);
++
++ if (SYS_SPECIFIC_DATA_TEST(psSysSpecData, SYS_SPECIFIC_DATA_SGX_INITIALISED))
++ {
++
++ eError = PVRSRVDeinitialiseDevice(gui32SGXDeviceID);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysDeinitialise: failed to de-init the device"));
++ return eError;
++ }
++ }
++
++ SysFreeVersionString(psSysData);
++
++ PCIDeInitDev(psSysData);
++
++ eError = OSDeInitEnvData(psSysData->pvEnvSpecificData);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysDeinitialise: failed to de-init env structure"));
++ return eError;
++ }
++
++ SysDeinitialiseCommon(gpsSysData);
++
++
++#if !defined(NO_HARDWARE)
++
++ OSUnMapPhysToLin(gsPoulsboRegsCPUVaddr,
++ POULSBO_REG_SIZE,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++
++ OSUnMapPhysToLin(gsPoulsboDisplayRegsCPUVaddr,
++ POULSBO_DISPLAY_REG_SIZE,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++#endif
++ if (SYS_SPECIFIC_DATA_TEST(psSysSpecData, SYS_SPECIFIC_DATA_PDUMP_INIT))
++ {
++ PDUMPDEINIT();
++ }
++
++ gpsSysData = IMG_NULL;
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR SysGetDeviceMemoryMap(PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_VOID **ppvDeviceMap)
++{
++ switch(eDeviceType)
++ {
++ case PVRSRV_DEVICE_TYPE_SGX:
++ {
++
++ *ppvDeviceMap = (IMG_VOID*)&gsSGXDeviceMap;
++ break;
++ }
++ default:
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysGetDeviceMemoryMap: unsupported device type"));
++ }
++ }
++ return PVRSRV_OK;
++}
++
++
++IMG_DEV_PHYADDR SysCpuPAddrToDevPAddr (PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_CPU_PHYADDR CpuPAddr)
++{
++ IMG_DEV_PHYADDR DevPAddr;
++
++ PVR_UNREFERENCED_PARAMETER(eDeviceType);
++
++
++ DevPAddr.uiAddr = CpuPAddr.uiAddr;
++
++ return DevPAddr;
++}
++
++
++IMG_CPU_PHYADDR SysSysPAddrToCpuPAddr (IMG_SYS_PHYADDR sys_paddr)
++{
++ IMG_CPU_PHYADDR cpu_paddr;
++
++
++ cpu_paddr.uiAddr = sys_paddr.uiAddr;
++ return cpu_paddr;
++}
++
++IMG_SYS_PHYADDR SysCpuPAddrToSysPAddr (IMG_CPU_PHYADDR cpu_paddr)
++{
++ IMG_SYS_PHYADDR sys_paddr;
++
++
++ sys_paddr.uiAddr = cpu_paddr.uiAddr;
++ return sys_paddr;
++}
++
++
++IMG_DEV_PHYADDR SysSysPAddrToDevPAddr (PVRSRV_DEVICE_TYPE eDeviceType, IMG_SYS_PHYADDR SysPAddr)
++{
++ IMG_DEV_PHYADDR DevPAddr;
++
++ PVR_UNREFERENCED_PARAMETER(eDeviceType);
++
++
++ DevPAddr.uiAddr = SysPAddr.uiAddr;
++
++ return DevPAddr;
++}
++
++
++IMG_SYS_PHYADDR SysDevPAddrToSysPAddr (PVRSRV_DEVICE_TYPE eDeviceType, IMG_DEV_PHYADDR DevPAddr)
++{
++ IMG_SYS_PHYADDR SysPAddr;
++
++ PVR_UNREFERENCED_PARAMETER(eDeviceType);
++
++
++ SysPAddr.uiAddr = DevPAddr.uiAddr;
++
++ return SysPAddr;
++}
++
++
++IMG_VOID SysRegisterExternalDevice(PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++
++ psDeviceNode->ui32SOCInterruptBit = DEVICE_DISP_INTERRUPT;
++}
++
++
++IMG_VOID SysRemoveExternalDevice(PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++ PVR_UNREFERENCED_PARAMETER(psDeviceNode);
++}
++
++PVRSRV_ERROR SysOEMFunction ( IMG_UINT32 ui32ID,
++ IMG_VOID *pvIn,
++ IMG_UINT32 ulInSize,
++ IMG_VOID *pvOut,
++ IMG_UINT32 ulOutSize)
++{
++ if (ulInSize || pvIn);
++
++ if ((ui32ID == OEM_GET_EXT_FUNCS) &&
++ (ulOutSize == sizeof(PVRSRV_DC_OEM_JTABLE)))
++ {
++ PVRSRV_DC_OEM_JTABLE *psOEMJTable = (PVRSRV_DC_OEM_JTABLE*)pvOut;
++
++ psOEMJTable->pfnOEMReadRegistryString = IMG_NULL;
++ psOEMJTable->pfnOEMWriteRegistryString = IMG_NULL;
++
++ return PVRSRV_OK;
++ }
++
++ return PVRSRV_ERROR_INVALID_PARAMS;
++}
++
++
++PVRSRV_ERROR SysMapInRegisters(IMG_VOID)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNodeList;
++
++ psDeviceNodeList = gpsSysData->psDeviceNodeList;
++
++ while (psDeviceNodeList)
++ {
++ switch(psDeviceNodeList->sDevId.eDeviceType)
++ {
++ case PVRSRV_DEVICE_TYPE_SGX:
++ {
++ PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO *)psDeviceNodeList->pvDevice;
++
++ if (SYS_SPECIFIC_DATA_TEST(&gsSysSpecificData, SYS_SPECIFIC_DATA_PM_UNMAP_SGX_REGS))
++ {
++ psDevInfo->pvRegsBaseKM = OSMapPhysToLin(gsSGXDeviceMap.sRegsCpuPBase,
++ gsSGXDeviceMap.ui32RegsSize,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++
++ if (!psDevInfo->pvRegsBaseKM)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysMapInRegisters : Failed to map in SGX registers\n"));
++ return PVRSRV_ERROR_BAD_MAPPING;
++ }
++ SYS_SPECIFIC_DATA_CLEAR(&gsSysSpecificData, SYS_SPECIFIC_DATA_PM_UNMAP_SGX_REGS);
++ }
++ psDevInfo->ui32RegSize = gsSGXDeviceMap.ui32RegsSize;
++ psDevInfo->sRegsPhysBase = gsSGXDeviceMap.sRegsSysPBase;
++
++#if defined(SGX_FEATURE_HOST_PORT)
++ if (gsSGXDeviceMap.ui32Flags & SGX_HOSTPORT_PRESENT)
++ {
++ if (SYS_SPECIFIC_DATA_TEST(&gsSysSpecificData, SYS_SPECIFIC_DATA_PM_UNMAP_SGX_HP))
++ {
++
++ psDevInfo->pvHostPortBaseKM = OSMapPhysToLin(gsSGXDeviceMap.sHPCpuPBase,
++ gsSGXDeviceMap.ui32HPSize,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++ if (!psDevInfo->pvHostPortBaseKM)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysMapInRegisters : Failed to map in host port\n"));
++ return PVRSRV_ERROR_BAD_MAPPING;
++ }
++ SYS_SPECIFIC_DATA_CLEAR(&gsSysSpecificData, SYS_SPECIFIC_DATA_PM_UNMAP_SGX_HP);
++ }
++ psDevInfo->ui32HPSize = gsSGXDeviceMap.ui32HPSize;
++ psDevInfo->sHPSysPAddr = gsSGXDeviceMap.sHPSysPBase;
++ }
++#endif
++ break;
++ }
++ default:
++ break;
++ }
++ psDeviceNodeList = psDeviceNodeList->psNext;
++ }
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR SysUnmapRegisters(IMG_VOID)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNodeList;
++
++ psDeviceNodeList = gpsSysData->psDeviceNodeList;
++
++ while (psDeviceNodeList)
++ {
++ switch (psDeviceNodeList->sDevId.eDeviceType)
++ {
++ case PVRSRV_DEVICE_TYPE_SGX:
++ {
++ PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO *)psDeviceNodeList->pvDevice;
++#if !(defined(NO_HARDWARE) && defined(__linux__))
++
++ if (psDevInfo->pvRegsBaseKM)
++ {
++ OSUnMapPhysToLin(psDevInfo->pvRegsBaseKM,
++ gsSGXDeviceMap.ui32RegsSize,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++
++ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_PM_UNMAP_SGX_REGS);
++ }
++#endif
++
++ psDevInfo->pvRegsBaseKM = IMG_NULL;
++ psDevInfo->ui32RegSize = 0;
++ psDevInfo->sRegsPhysBase.uiAddr = 0;
++
++#if defined(SGX_FEATURE_HOST_PORT)
++ if (gsSGXDeviceMap.ui32Flags & SGX_HOSTPORT_PRESENT)
++ {
++
++ if (psDevInfo->pvHostPortBaseKM)
++ {
++ OSUnMapPhysToLin(psDevInfo->pvHostPortBaseKM,
++ gsSGXDeviceMap.ui32HPSize,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++
++ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_PM_UNMAP_SGX_HP);
++
++ psDevInfo->pvHostPortBaseKM = IMG_NULL;
++ }
++
++ psDevInfo->ui32HPSize = 0;
++ psDevInfo->sHPSysPAddr.uiAddr = 0;
++ }
++#endif
++ break;
++ }
++ default:
++ break;
++ }
++ psDeviceNodeList = psDeviceNodeList->psNext;
++ }
++
++#if !(defined(NO_HARDWARE) && defined(__linux__))
++
++ OSUnMapPhysToLin(gsPoulsboRegsCPUVaddr,
++ POULSBO_REG_SIZE,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++
++
++ OSUnMapPhysToLin(gsPoulsboDisplayRegsCPUVaddr,
++ POULSBO_DISPLAY_REG_SIZE,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++
++#endif
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR SysSystemPrePowerState(PVRSRV_SYS_POWER_STATE eNewPowerState)
++{
++ PVRSRV_ERROR eError= PVRSRV_OK;
++ PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)(gsSysSpecificData.hSGXPCI);
++
++ if (eNewPowerState != gpsSysData->eCurrentPowerState)
++ {
++ if ((eNewPowerState == PVRSRV_SYS_POWER_STATE_D3) &&
++ (gpsSysData->eCurrentPowerState < PVRSRV_SYS_POWER_STATE_D3))
++ {
++ drm_irq_uninstall(gpDrmDevice);
++
++ SysUnmapRegisters();
++
++ //Save some pci state that won't get saved properly by pci_save_state()
++ pci_read_config_dword(psPVRPCI->psPCIDev, 0x5C, &gsSysSpecificData.saveBSM);
++ pci_read_config_dword(psPVRPCI->psPCIDev, 0xFC, &gsSysSpecificData.saveVBT);
++ pci_read_config_dword(psPVRPCI->psPCIDev, MRST_PCIx_MSI_ADDR_LOC, &gsSysSpecificData.msi_addr);
++ pci_read_config_dword(psPVRPCI->psPCIDev, MRST_PCIx_MSI_DATA_LOC, &gsSysSpecificData.msi_data);
++
++ eError = OSPCISuspendDev(gsSysSpecificData.hSGXPCI);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysSystemPrePowerState: OSPCISuspendDev failed (%d)", eError));
++ }
++ }
++ }
++
++ return eError;
++}
++
++PVRSRV_ERROR SysSystemPostPowerState(PVRSRV_SYS_POWER_STATE eNewPowerState)
++{
++ PVRSRV_ERROR eError = PVRSRV_OK;
++ PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)(gsSysSpecificData.hSGXPCI);
++
++ if (eNewPowerState != gpsSysData->eCurrentPowerState)
++ {
++ if ((gpsSysData->eCurrentPowerState == PVRSRV_SYS_POWER_STATE_D3) &&
++ (eNewPowerState < PVRSRV_SYS_POWER_STATE_D3))
++ {
++ eError = OSPCIResumeDev(gsSysSpecificData.hSGXPCI);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysSystemPostPowerState: OSPCIResumeDev failed (%d)", eError));
++ return eError;
++ }
++
++ //Restore some pci state that will not have gotten restored properly by pci_restore_state()
++ pci_write_config_dword(psPVRPCI->psPCIDev, 0x5c, gsSysSpecificData.saveBSM);
++ pci_write_config_dword(psPVRPCI->psPCIDev, 0xFC, gsSysSpecificData.saveVBT);
++ pci_write_config_dword(psPVRPCI->psPCIDev, MRST_PCIx_MSI_ADDR_LOC, gsSysSpecificData.msi_addr);
++ pci_write_config_dword(psPVRPCI->psPCIDev, MRST_PCIx_MSI_DATA_LOC, gsSysSpecificData.msi_data);
++
++ eError = SysLocateDevices(gpsSysData);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysSystemPostPowerState: Failed to locate devices"));
++ return eError;
++ }
++
++ eError = SysMapInRegisters();
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysSystemPostPowerState: Failed to map in registers"));
++ return eError;
++ }
++
++ drm_irq_install(gpDrmDevice);
++ }
++ }
++ return eError;
++}
++
++
++PVRSRV_ERROR SysDevicePrePowerState(IMG_UINT32 ui32DeviceIndex,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState)
++{
++ if ((eNewPowerState != eCurrentPowerState) &&
++ (eNewPowerState == PVRSRV_DEV_POWER_STATE_OFF))
++ {
++ if (ui32DeviceIndex == gui32SGXDeviceID)
++ {
++ PVR_DPF((PVR_DBG_MESSAGE,"SysDevicePrePowerState: Remove SGX power"));
++ sysirq_uninstall_islands(gpDrmDevice, OSPM_GRAPHICS_ISLAND);
++ ospm_power_island_down(OSPM_GRAPHICS_ISLAND);
++ }
++ else if (ui32DeviceIndex == gui32MRSTDisplayDeviceID)
++ {
++ sysirq_uninstall_islands(gpDrmDevice, OSPM_DISPLAY_ISLAND);
++ ospm_power_island_down(OSPM_DISPLAY_ISLAND);
++ }
++ else if (ui32DeviceIndex == gui32MRSTMSVDXDeviceID)
++ {
++ sysirq_uninstall_islands(gpDrmDevice, OSPM_VIDEO_DEC_ISLAND);
++ if (ospm_power_is_hw_on(OSPM_DISPLAY_ISLAND)) {
++ ospm_power_island_down(OSPM_VIDEO_DEC_ISLAND);
++ } else {
++ ospm_power_island_up(OSPM_DISPLAY_ISLAND);
++ ospm_power_island_down(OSPM_VIDEO_DEC_ISLAND);
++ ospm_power_island_down(OSPM_DISPLAY_ISLAND);
++ }
++ }
++ else if (ui32DeviceIndex == gui32MRSTTOPAZDeviceID)
++ {
++ if (ospm_power_is_hw_on(OSPM_DISPLAY_ISLAND)) {
++ ospm_power_island_down(OSPM_VIDEO_ENC_ISLAND);
++ } else {
++ ospm_power_island_up(OSPM_DISPLAY_ISLAND);
++ ospm_power_island_down(OSPM_VIDEO_ENC_ISLAND);
++ ospm_power_island_down(OSPM_DISPLAY_ISLAND);
++ }
++ }
++ }
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR SysDevicePostPowerState(IMG_UINT32 ui32DeviceIndex,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState)
++{
++ if ((eNewPowerState != eCurrentPowerState) &&
++ (eCurrentPowerState == PVRSRV_DEV_POWER_STATE_OFF))
++ {
++ if (ui32DeviceIndex == gui32SGXDeviceID)
++ {
++ PVR_DPF((PVR_DBG_MESSAGE,"SysDevicePostPowerState: Restore SGX power"));
++ ospm_power_island_up(OSPM_GRAPHICS_ISLAND);
++ sysirq_preinstall_islands(gpDrmDevice, OSPM_GRAPHICS_ISLAND);
++ sysirq_postinstall_islands(gpDrmDevice, OSPM_GRAPHICS_ISLAND);
++ }
++ else if (ui32DeviceIndex == gui32MRSTDisplayDeviceID)
++ {
++ ospm_power_island_up(OSPM_DISPLAY_ISLAND);
++ sysirq_preinstall_islands(gpDrmDevice, OSPM_DISPLAY_ISLAND);
++ sysirq_postinstall_islands(gpDrmDevice, OSPM_DISPLAY_ISLAND);
++ }
++ else if (ui32DeviceIndex == gui32MRSTMSVDXDeviceID)
++ {
++ if (ospm_power_is_hw_on(OSPM_DISPLAY_ISLAND)) {
++ ospm_power_island_up(OSPM_VIDEO_DEC_ISLAND);
++ } else {
++ ospm_power_island_up(OSPM_DISPLAY_ISLAND);
++ ospm_power_island_up(OSPM_VIDEO_DEC_ISLAND);
++ ospm_power_island_down(OSPM_DISPLAY_ISLAND);
++ }
++ sysirq_preinstall_islands(gpDrmDevice, OSPM_VIDEO_DEC_ISLAND);
++ sysirq_postinstall_islands(gpDrmDevice, OSPM_VIDEO_DEC_ISLAND);
++ }
++ else if (ui32DeviceIndex == gui32MRSTTOPAZDeviceID)
++ {
++ if (ospm_power_is_hw_on(OSPM_DISPLAY_ISLAND)) {
++ ospm_power_island_up(OSPM_VIDEO_ENC_ISLAND);
++ } else {
++ ospm_power_island_up(OSPM_DISPLAY_ISLAND);
++ ospm_power_island_up(OSPM_VIDEO_ENC_ISLAND);
++ ospm_power_island_down(OSPM_DISPLAY_ISLAND);
++ }
++ }
++ }
++
++ return PVRSRV_OK;
++}
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/system/moorestown/sysconfig.h b/drivers/gpu/drm/mrst/pvr/services4/system/moorestown/sysconfig.h
+new file mode 100644
+index 0000000..0476e2c
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/system/moorestown/sysconfig.h
+@@ -0,0 +1,139 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined(__SOCCONFIG_H__)
++#define __SOCCONFIG_H__
++#include "syscommon.h"
++
++#define VS_PRODUCT_NAME "SGX Moorestown"
++
++#define SYS_NO_POWER_LOCK_TIMEOUT
++
++#define SGX_FEATURE_HOST_PORT
++
++#define SYS_SGX_USSE_COUNT (2)
++
++#define POULSBO_REGS_OFFSET 0x00000
++#define POULSBO_REG_SIZE 0x2100
++
++#define SGX_REGS_OFFSET 0x80000
++#define PSB_SGX_REGS_OFFSET 0x40000
++#define SGX_REG_SIZE 0x4000
++#define MSVDX_REGS_OFFSET 0x50000
++
++#ifdef SUPPORT_MSVDX
++#define POULSBO_MAX_OFFSET (MSVDX_REGS_OFFSET + MSVDX_REG_SIZE)
++#else
++#define POULSBO_MAX_OFFSET (SGX_REGS_OFFSET + SGX_REG_SIZE)
++#define PSB_POULSBO_MAX_OFFSET (PSB_SGX_REGS_OFFSET + SGX_REG_SIZE)
++#endif
++
++#define SYS_SGX_DEV_VENDOR_ID 0x8086
++#define PSB_SYS_SGX_DEV_DEVICE_ID_1 0x8108
++#define PSB_SYS_SGX_DEV_DEVICE_ID_2 0x8109
++
++#define SYS_SGX_DEVICE_IDS \
++ {0x8086, 0x8108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PSB_8108}, \
++ {0x8086, 0x8109, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PSB_8109}, \
++ {0x8086, 0x4100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
++ {0x8086, 0x4101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
++ {0x8086, 0x4102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
++ {0x8086, 0x4103, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
++ {0x8086, 0x4104, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
++ {0x8086, 0x4105, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
++ {0x8086, 0x4106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
++ {0x8086, 0x4107, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
++ {0, 0, 0}
++
++
++#define MMADR_INDEX 4
++#define IOPORT_INDEX 5
++#define GMADR_INDEX 6
++#define MMUADR_INDEX 7
++#define FBADR_INDEX 23
++#define FBSIZE_INDEX 24
++
++#define DISPLAY_SURFACE_SIZE (4 * 1024 * 1024)
++
++#define DEVICE_SGX_INTERRUPT (1<<0)
++#define DEVICE_MSVDX_INTERRUPT (1<<1)
++#define DEVICE_DISP_INTERRUPT (1<<2)
++#define DEVICE_TOPAZ_INTERRUPT (1<<3)
++
++#define POULSBO_DISP_MASK (1<<17)
++#define POULSBO_THALIA_MASK (1<<18)
++#define POULSBO_MSVDX_MASK (1<<19)
++#define POULSBO_VSYNC_PIPEA_VBLANK_MASK (1<<7)
++#define POULSBO_VSYNC_PIPEA_EVENT_MASK (1<<6)
++#define POULSBO_VSYNC_PIPEB_VBLANK_MASK (1<<5)
++#define POULSBO_VSYNC_PIPEB_EVENT_MASK (1<<4)
++
++#define POULSBO_DISPLAY_REGS_OFFSET 0x70000
++#define POULSBO_DISPLAY_REG_SIZE 0x2000
++
++#define POULSBO_DISPLAY_A_CONFIG 0x00008
++#define POULSBO_DISPLAY_A_STATUS_SELECT 0x00024
++#define POULSBO_DISPLAY_B_CONFIG 0x01008
++#define POULSBO_DISPLAY_B_STATUS_SELECT 0x01024
++
++#define POULSBO_DISPLAY_PIPE_ENABLE (1<<31)
++#define POULSBO_DISPLAY_VSYNC_STS_EN (1<<25)
++#define POULSBO_DISPLAY_VSYNC_STS (1<<9)
++
++#if defined(SGX_FEATURE_HOST_PORT)
++ #define SYS_SGX_HP_SIZE 0x8000000
++ #define PSB_SYS_SGX_HP_SIZE 0x4000000
++
++ #define SYS_SGX_HOSTPORT_BASE_DEVVADDR 0xD0000000
++ #if defined(FIX_HW_BRN_22997) && defined(FIX_HW_BRN_23030)
++
++
++
++ #define SYS_SGX_HOSTPORT_BRN23030_OFFSET 0x7C00000
++ #endif
++#endif
++
++
++typedef struct
++{
++ union
++ {
++#if !defined(VISTA)
++ IMG_UINT8 aui8PCISpace[256];
++ IMG_UINT16 aui16PCISpace[128];
++ IMG_UINT32 aui32PCISpace[64];
++#endif
++ struct
++ {
++ IMG_UINT16 ui16VenID;
++ IMG_UINT16 ui16DevID;
++ IMG_UINT16 ui16PCICmd;
++ IMG_UINT16 ui16PCIStatus;
++ }s;
++ }u;
++} PCICONFIG_SPACE, *PPCICONFIG_SPACE;
++
++#endif
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/system/moorestown/sysinfo.h b/drivers/gpu/drm/mrst/pvr/services4/system/moorestown/sysinfo.h
+new file mode 100644
+index 0000000..97d02dd
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/system/moorestown/sysinfo.h
+@@ -0,0 +1,43 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined(__SYSINFO_H__)
++#define __SYSINFO_H__
++
++#define MAX_HW_TIME_US (500000)
++#define WAIT_TRY_COUNT (10000)
++
++typedef enum _SYS_DEVICE_TYPE_
++{
++ SYS_DEVICE_SGX = 0,
++
++ SYS_DEVICE_FORCE_I16 = 0x7fff
++
++} SYS_DEVICE_TYPE;
++
++#define SYS_DEVICE_COUNT 4
++
++#endif
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/system/moorestown/sysirq.c b/drivers/gpu/drm/mrst/pvr/services4/system/moorestown/sysirq.c
+new file mode 100644
+index 0000000..d71196e
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/system/moorestown/sysirq.c
+@@ -0,0 +1,565 @@
++/**************************************************************************
++ * Copyright (c) 2009, Intel Corporation.
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors:
++ * Benjamin Defnet <benjamin.r.defnet@intel.com>
++ *
++ **************************************************************************/
++
++#include "sysirq.h"
++#include "sysconfig.h"
++#include "psb_drv.h"
++#include "ospm_power.h"
++#include "lnc_topaz.h"
++#include "psb_msvdx.h"
++#include "psb_intel_reg.h"
++
++extern SYS_DATA* gpsSysData;
++extern struct drm_device *gpDrmDevice;
++
++void sysirq_preinstall_islands(struct drm_device *dev, int hw_islands);
++int sysirq_postinstall_islands(struct drm_device *dev, int hw_islands);
++static void sysirq_enable_pipestat(struct drm_psb_private *dev_priv, u32 mask);
++static void sysirq_disable_pipestat(struct drm_psb_private *dev_priv, u32 mask);
++
++bool sysirq_init(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++
++ OSInstallMISR(gpsSysData);
++
++ PSB_WVDC32(0x00000000, PSB_INT_ENABLE_R);
++ PSB_WVDC32(0xFFFFFFFF, PSB_INT_MASK_R);
++
++ dev_priv->vdc_irq_mask = 0;
++ dev_priv->pipestat[0] = 0;
++ dev_priv->pipestat[1] = 0;
++
++ dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
++
++ if (drm_vblank_init(dev, PSB_NUM_PIPE) != 0)
++ return false;
++
++ if (drm_irq_install(dev) != 0)
++ return false;
++
++ dev->vblank_disable_allowed = 1;
++ dev_priv->vblanksEnabledForFlips = false;
++
++ return true;
++}
++
++void sysirq_uninit(struct drm_device *dev)
++{
++ drm_irq_uninstall(dev);
++ drm_vblank_cleanup(dev);
++ OSUninstallMISR(gpsSysData);
++}
++
++void sysirq_preinstall(struct drm_device *dev)
++{
++ sysirq_preinstall_islands(dev, OSPM_ALL_ISLANDS);
++}
++
++void sysirq_preinstall_islands(struct drm_device *dev, int hw_islands)
++{
++#if defined (SYS_USING_INTERRUPTS)
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++ unsigned long irqflags;
++
++ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
++
++ if ((hw_islands & OSPM_DISPLAY_ISLAND) && ospm_power_is_hw_on(OSPM_DISPLAY_ISLAND)) {
++ if (dev->vblank_enabled[0] || dev_priv->vblanksEnabledForFlips)
++ dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG;
++ if (dev_priv->psb_dpst_state)
++ dev_priv->vdc_irq_mask |= _PSB_DPST_PIPEA_FLAG;
++ }
++ if ((hw_islands & OSPM_GRAPHICS_ISLAND) && ospm_power_is_hw_on(OSPM_GRAPHICS_ISLAND))
++ dev_priv->vdc_irq_mask |= _PSB_IRQ_SGX_FLAG;
++ if ((hw_islands & OSPM_VIDEO_DEC_ISLAND) && ospm_power_is_hw_on(OSPM_VIDEO_DEC_ISLAND))
++ dev_priv->vdc_irq_mask |= _PSB_IRQ_MSVDX_FLAG;
++ if (IS_MRST(dev) && (hw_islands & OSPM_VIDEO_ENC_ISLAND) && !dev_priv->topaz_disabled &&
++ ospm_power_is_hw_on(OSPM_VIDEO_ENC_ISLAND))
++ dev_priv->vdc_irq_mask |= _LNC_IRQ_TOPAZ_FLAG;
++
++ PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
++
++ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
++#endif
++}
++
++
++int sysirq_postinstall(struct drm_device *dev)
++{
++ return sysirq_postinstall_islands(dev, OSPM_ALL_ISLANDS);
++}
++
++int sysirq_postinstall_islands(struct drm_device *dev, int hw_islands)
++{
++#if defined (SYS_USING_INTERRUPTS)
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++ unsigned long irqflags;
++
++ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
++
++ if ((hw_islands & OSPM_DISPLAY_ISLAND) && ospm_power_is_hw_on(OSPM_DISPLAY_ISLAND)) {
++ if (IS_POULSBO(dev))
++ PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
++
++ if (dev_priv->vdc_irq_mask & _PSB_VSYNC_PIPEA_FLAG) {
++ if (IS_MRST(dev))
++ sysirq_enable_pipestat(dev_priv,
++ PIPE_START_VBLANK_INTERRUPT_ENABLE |
++ PIPE_VBLANK_INTERRUPT_ENABLE);
++ else
++ sysirq_enable_pipestat(dev_priv,
++ PIPE_VBLANK_INTERRUPT_ENABLE);
++
++ } else {
++ sysirq_disable_pipestat(dev_priv,
++ PIPE_VBLANK_INTERRUPT_ENABLE |
++ PIPE_START_VBLANK_INTERRUPT_ENABLE);
++ }
++
++ if (dev_priv->vdc_irq_mask & _PSB_DPST_PIPEA_FLAG) {
++ printk(KERN_ALERT "TURNING ON DPST\n");
++ sysirq_turn_on_dpst(dev);
++ } else {
++ printk(KERN_ALERT "TURNING OFF DPST\n");
++ sysirq_turn_off_dpst(dev);
++ }
++ }
++
++ if (IS_MRST(dev) && (hw_islands & OSPM_VIDEO_ENC_ISLAND) && !dev_priv->topaz_disabled &&
++ ospm_power_is_hw_on(OSPM_VIDEO_ENC_ISLAND))
++ lnc_topaz_enableirq(dev);
++
++ if ((hw_islands & OSPM_VIDEO_DEC_ISLAND) && ospm_power_is_hw_on(OSPM_VIDEO_DEC_ISLAND))
++ psb_msvdx_enableirq(dev);
++
++ /*This register is safe even if display island is off*/
++ PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
++
++ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
++#endif
++ return 0;
++}
++
++void sysirq_uninstall(struct drm_device *dev)
++{
++ sysirq_uninstall_islands(dev, OSPM_ALL_ISLANDS);
++}
++
++void sysirq_uninstall_islands(struct drm_device *dev, int hw_islands)
++{
++#if defined (SYS_USING_INTERRUPTS)
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++ unsigned long irqflags;
++
++ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
++
++ if ((hw_islands & OSPM_DISPLAY_ISLAND) && ospm_power_is_hw_on(OSPM_DISPLAY_ISLAND)) {
++ if (dev_priv->vdc_irq_mask & _PSB_VSYNC_PIPEA_FLAG)
++ sysirq_disable_pipestat(dev_priv,
++ PIPE_VBLANK_INTERRUPT_ENABLE |
++ PIPE_START_VBLANK_INTERRUPT_ENABLE);
++ if (dev_priv->vdc_irq_mask & _PSB_DPST_PIPEA_FLAG)
++ sysirq_turn_off_dpst(dev);
++
++ dev_priv->vdc_irq_mask &= _PSB_IRQ_SGX_FLAG |
++ _PSB_IRQ_MSVDX_FLAG |
++ _LNC_IRQ_TOPAZ_FLAG;
++ }
++
++ if (hw_islands & OSPM_GRAPHICS_ISLAND)
++ dev_priv->vdc_irq_mask &= ~_PSB_IRQ_SGX_FLAG;
++
++ if (hw_islands & OSPM_VIDEO_DEC_ISLAND)
++ dev_priv->vdc_irq_mask &= ~_PSB_IRQ_MSVDX_FLAG;
++
++ if (hw_islands & OSPM_VIDEO_ENC_ISLAND)
++ dev_priv->vdc_irq_mask &= ~_LNC_IRQ_TOPAZ_FLAG;
++
++ /*These two registers are safe even if display island is off*/
++ PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
++ PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
++
++ wmb();
++
++ /*This register is safe even if display island is off*/
++ PSB_WVDC32(PSB_RVDC32(PSB_INT_IDENTITY_R), PSB_INT_IDENTITY_R);
++
++ if (IS_MRST(dev) && (hw_islands & OSPM_VIDEO_ENC_ISLAND) && !dev_priv->topaz_disabled &&
++ ospm_power_is_hw_on(OSPM_VIDEO_ENC_ISLAND))
++ lnc_topaz_disableirq(dev);
++
++ if ((hw_islands & OSPM_VIDEO_DEC_ISLAND) && ospm_power_is_hw_on(OSPM_VIDEO_DEC_ISLAND))
++ psb_msvdx_disableirq(dev);
++
++ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
++#endif
++}
++
++irqreturn_t sysirq_handler(DRM_IRQ_ARGS)
++{
++ bool bStatus = false;
++#if defined(SYS_USING_INTERRUPTS)
++ struct drm_device *dev = (struct drm_device *) arg;
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++
++ spin_lock(&dev_priv->irqmask_lock);
++
++ /* Now process all of the other interrupts */
++ bStatus = PVRSRVSystemLISR(gpsSysData);
++
++ if (bStatus)
++ {
++ OSScheduleMISR((IMG_VOID *)gpsSysData);
++ }
++
++ spin_unlock(&dev_priv->irqmask_lock);
++
++#endif
++ return bStatus ? IRQ_HANDLED : IRQ_NONE;
++}
++
++
++IMG_UINT32 SysGetInterruptSource(SYS_DATA* psSysData, PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) gpDrmDevice->dev_private;
++
++ IMG_UINT32 ui32Devices = 0;
++ IMG_UINT32 ui32Data, ui32DIMMask;
++
++ PVR_UNREFERENCED_PARAMETER(psSysData);
++ PVR_UNREFERENCED_PARAMETER(psDeviceNode);
++
++ ui32Data = PSB_RVDC32(PSB_INT_IDENTITY_R);
++
++ if ((ui32Data & _PSB_IRQ_SGX_FLAG) && ospm_power_is_hw_on(OSPM_GRAPHICS_ISLAND))
++ {
++ ui32Devices |= DEVICE_SGX_INTERRUPT;
++ }
++
++ if ((ui32Data & _PSB_IRQ_MSVDX_FLAG) && ospm_power_is_hw_on(OSPM_VIDEO_DEC_ISLAND)) {
++ ui32Devices |= DEVICE_MSVDX_INTERRUPT;
++ }
++
++ if ((ui32Data & _LNC_IRQ_TOPAZ_FLAG) && ospm_power_is_hw_on(OSPM_VIDEO_ENC_ISLAND)) {
++ ui32Devices |= DEVICE_TOPAZ_INTERRUPT;
++ }
++
++ ui32DIMMask = PSB_RVDC32(PSB_INT_ENABLE_R);
++ ui32DIMMask &= ~(_PSB_IRQ_SGX_FLAG | _PSB_IRQ_MSVDX_FLAG | _LNC_IRQ_TOPAZ_FLAG);
++
++ if ((ui32Data & ui32DIMMask) && ospm_power_is_hw_on(OSPM_DISPLAY_ISLAND))
++ {
++ ui32Devices |= DEVICE_DISP_INTERRUPT;
++ }
++
++ return (ui32Devices);
++}
++
++IMG_VOID SysClearInterrupts(SYS_DATA* psSysData, IMG_UINT32 ui32ClearBits)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) gpDrmDevice->dev_private;
++ IMG_UINT32 ui32Data;
++
++ PVR_UNREFERENCED_PARAMETER(psSysData);
++ PVR_UNREFERENCED_PARAMETER(ui32ClearBits);
++
++ ui32Data = PSB_RVDC32(PSB_INT_IDENTITY_R);
++ ui32Data &= dev_priv->vdc_irq_mask;
++ PSB_WVDC32(ui32Data, PSB_INT_IDENTITY_R);
++ ui32Data = PSB_RVDC32(PSB_INT_IDENTITY_R);
++}
++
++void sysirq_turn_on_dpst(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++ u32 hist_reg;
++ u32 pwm_reg;
++ u32 pipea_stat;
++
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, OSPM_UHB_ONLY_IF_ON)) {
++ PSB_WVDC32(BIT31, HISTOGRAM_LOGIC_CONTROL);
++ hist_reg = PSB_RVDC32(HISTOGRAM_LOGIC_CONTROL);
++ PSB_WVDC32(BIT31, HISTOGRAM_INT_CONTROL);
++ hist_reg = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
++
++ PSB_WVDC32(0x80010100, PWM_CONTROL_LOGIC);
++ pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
++ PSB_WVDC32(pwm_reg | PWM_PHASEIN_ENABLE | PWM_PHASEIN_INT_ENABLE,
++ PWM_CONTROL_LOGIC);
++ pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
++
++ pipea_stat = PSB_RVDC32(PIPEASTAT);
++ PSB_WVDC32(pipea_stat | PIPE_DPST_EVENT_ENABLE, PIPEASTAT);
++ pipea_stat = PSB_RVDC32(PIPEASTAT);
++
++ PSB_WVDC32(pipea_stat | PIPE_DPST_EVENT_STATUS, PIPEASTAT);
++ hist_reg = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
++ PSB_WVDC32(hist_reg | HISTOGRAM_INT_CTRL_CLEAR,HISTOGRAM_INT_CONTROL);
++ pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
++ PSB_WVDC32(pwm_reg | 0x80010100 | PWM_PHASEIN_ENABLE, PWM_CONTROL_LOGIC);
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ }
++}
++
++int sysirq_enable_dpst(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++ unsigned long irqflags;
++
++ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, OSPM_UHB_ONLY_IF_ON)) {
++ /* enable DPST */
++ dev_priv->vdc_irq_mask |= _PSB_DPST_PIPEA_FLAG;
++ PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
++ PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
++
++ sysirq_turn_on_dpst(dev);
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ }
++ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
++ return 0;
++}
++
++void sysirq_turn_off_dpst(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++ u32 hist_reg;
++ u32 pwm_reg;
++ u32 pipea_stat;
++
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, OSPM_UHB_ONLY_IF_ON)) {
++ PSB_WVDC32(0x00000000, HISTOGRAM_INT_CONTROL);
++ hist_reg = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
++
++ pipea_stat = PSB_RVDC32(PIPEASTAT);
++ PSB_WVDC32(pipea_stat & ~PIPE_DPST_EVENT_ENABLE, PIPEASTAT);
++ pipea_stat = PSB_RVDC32(PIPEASTAT);
++
++ pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
++ PSB_WVDC32(pwm_reg & !(PWM_PHASEIN_INT_ENABLE), PWM_CONTROL_LOGIC);
++ pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ }
++}
++
++int sysirq_disable_dpst(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++ unsigned long irqflags;
++ u32 hist_reg;
++ u32 pwm_reg;
++ u32 pipea_stat;
++
++ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, OSPM_UHB_ONLY_IF_ON)) {
++ dev_priv->vdc_irq_mask &= ~_PSB_DPST_PIPEA_FLAG;
++ PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
++ PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
++
++ PSB_WVDC32(0x00000000, HISTOGRAM_INT_CONTROL);
++ hist_reg = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
++
++ pipea_stat = PSB_RVDC32(PIPEASTAT);
++ PSB_WVDC32(pipea_stat & ~PIPE_DPST_EVENT_ENABLE, PIPEASTAT);
++ pipea_stat = PSB_RVDC32(PIPEASTAT);
++
++ pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
++ PSB_WVDC32(pwm_reg & !(PWM_PHASEIN_INT_ENABLE), PWM_CONTROL_LOGIC);
++ pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ }
++ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
++
++ return 0;
++}
++
++/* Called from drm generic code, passed 'crtc' which
++ * we use as a pipe index
++ */
++int sysirq_enable_vblank(struct drm_device *dev, int pipe)
++{
++#if defined(SYS_USING_INTERRUPTS)
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++ unsigned long irqflags;
++ int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
++ u32 pipeconf = 0;
++
++ if (pipe != 0)
++ return -EINVAL;
++
++ //Check if already enabled
++ if (dev_priv->vdc_irq_mask & _PSB_VSYNC_PIPEA_FLAG)
++ return 0;
++
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, OSPM_UHB_ONLY_IF_ON)) {
++ pipeconf = REG_READ(pipeconf_reg);
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ }
++ if (!(pipeconf & PIPEACONF_ENABLE))
++ return -EINVAL;
++
++ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, OSPM_UHB_ONLY_IF_ON)) {
++ if (pipe == 0)
++ dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG;
++ PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
++ PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
++ if (IS_MRST(dev)) {
++ sysirq_enable_pipestat(dev_priv,
++ PIPE_START_VBLANK_INTERRUPT_ENABLE |
++ PIPE_VBLANK_INTERRUPT_ENABLE);
++ } else
++ sysirq_enable_pipestat(dev_priv,
++ PIPE_VBLANK_INTERRUPT_ENABLE);
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ }
++ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
++#endif
++ return 0;
++}
++
++
++/* Called from drm generic code, passed 'crtc' which
++ * we use as a pipe index
++ */
++void sysirq_disable_vblank(struct drm_device *dev, int pipe)
++{
++#if defined(SYS_USING_INTERRUPTS)
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++ unsigned long irqflags;
++
++ //Don't disable if flips currently require vblanks to be enabled
++ if (dev_priv->vblanksEnabledForFlips)
++ return;
++
++ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, OSPM_UHB_ONLY_IF_ON)) {
++ if (pipe == 0)
++ dev_priv->vdc_irq_mask &= ~_PSB_VSYNC_PIPEA_FLAG;
++ PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
++ PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
++ sysirq_disable_pipestat(dev_priv,
++ PIPE_VBLANK_INTERRUPT_ENABLE |
++ PIPE_START_VBLANK_INTERRUPT_ENABLE);
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ }
++ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
++#endif
++}
++
++
++static void
++sysirq_enable_pipestat(struct drm_psb_private *dev_priv, u32 mask)
++{
++ if ((dev_priv->pipestat[0] & mask) != mask) {
++ dev_priv->pipestat[0] |= mask;
++ /* Enable the interrupt, clear any pending status */
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, OSPM_UHB_ONLY_IF_ON)) {
++ u32 writeVal = PSB_RVDC32(PIPEASTAT);
++ writeVal |= (mask | (mask >> 16));
++ PSB_WVDC32(writeVal, PIPEASTAT);
++ (void) PSB_RVDC32(PIPEASTAT);
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ }
++ }
++}
++
++static void
++sysirq_disable_pipestat(struct drm_psb_private *dev_priv, u32 mask)
++{
++ if ((dev_priv->pipestat[0] & mask) != 0) {
++ dev_priv->pipestat[0] &= ~mask;
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, OSPM_UHB_ONLY_IF_ON)) {
++ u32 writeVal = PSB_RVDC32(PIPEASTAT);
++ writeVal &= ~mask;
++ PSB_WVDC32(writeVal, PIPEASTAT);
++ (void) PSB_RVDC32(PIPEASTAT);
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ }
++ }
++}
++
++
++/* Called from drm generic code, passed a 'crtc', which
++ * we use as a pipe index
++ */
++u32 sysirq_get_vblank_counter(struct drm_device *dev, int pipe)
++{
++ u32 count = 0;
++#if defined(SYS_USING_INTERRUPTS)
++ unsigned long high_frame;
++ unsigned long low_frame;
++ u32 high1, high2, low;
++
++ if (pipe != 0)
++ return 0;
++
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, OSPM_UHB_ONLY_IF_ON))
++ return 0;
++
++ high_frame = PIPEAFRAMEHIGH;
++ low_frame = PIPEAFRAMEPIXEL;
++
++ if (!(REG_READ(PIPEACONF) & PIPEACONF_ENABLE)) {
++ DRM_DEBUG("trying to get vblank count for disabled pipe %d\n", pipe);
++ goto sysirq_get_vblank_counter_exit;
++ }
++
++ /*
++ * High & low register fields aren't synchronized, so make sure
++ * we get a low value that's stable across two reads of the high
++ * register.
++ */
++ do {
++ high1 = ((REG_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
++ PIPE_FRAME_HIGH_SHIFT);
++ low = ((REG_READ(low_frame) & PIPE_FRAME_LOW_MASK) >>
++ PIPE_FRAME_LOW_SHIFT);
++ high2 = ((REG_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
++ PIPE_FRAME_HIGH_SHIFT);
++ } while (high1 != high2);
++
++ count = (high1 << 8) | low;
++
++sysirq_get_vblank_counter_exit:
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++#endif
++ return count;
++}
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/system/moorestown/sysirq.h b/drivers/gpu/drm/mrst/pvr/services4/system/moorestown/sysirq.h
+new file mode 100644
+index 0000000..fef16be
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/system/moorestown/sysirq.h
+@@ -0,0 +1,49 @@
++/**************************************************************************
++ * Copyright (c) 2009, Intel Corporation.
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors:
++ * Benjamin Defnet <benjamin.r.defnet@intel.com>
++ * Rajesh Poornachandran <rajesh.poornachandran@intel.com>
++ *
++ **************************************************************************/
++
++#ifndef _SYSIRQ_H_
++#define _SYSIRQ_H_
++
++#include <drm/drmP.h>
++
++bool sysirq_init(struct drm_device *dev);
++void sysirq_uninit(struct drm_device *dev);
++
++void sysirq_preinstall(struct drm_device *dev);
++int sysirq_postinstall(struct drm_device *dev);
++void sysirq_uninstall(struct drm_device *dev);
++irqreturn_t sysirq_handler(DRM_IRQ_ARGS);
++
++void sysirq_preinstall_islands(struct drm_device *dev, int hw_islands);
++int sysirq_postinstall_islands(struct drm_device *dev, int hw_islands);
++void sysirq_uninstall_islands(struct drm_device *dev, int hw_islands);
++
++int sysirq_enable_dpst(struct drm_device *dev);
++int sysirq_disable_dpst(struct drm_device *dev);
++void sysirq_turn_on_dpst(struct drm_device *dev);
++void sysirq_turn_off_dpst(struct drm_device *dev);
++int sysirq_enable_vblank(struct drm_device *dev, int pipe);
++void sysirq_disable_vblank(struct drm_device *dev, int pipe);
++u32 sysirq_get_vblank_counter(struct drm_device *dev, int pipe);
++
++#endif //_SYSIRQ_H_
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/system/moorestown/syslocal.h b/drivers/gpu/drm/mrst/pvr/services4/system/moorestown/syslocal.h
+new file mode 100644
+index 0000000..8e97cab
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/system/moorestown/syslocal.h
+@@ -0,0 +1,82 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined(__SYSLOCAL_H__)
++#define __SYSLOCAL_H__
++
++#define SYS_SPECIFIC_DATA_PCI_ACQUIRE_DEV 0x00000001
++#define SYS_SPECIFIC_DATA_PCI_REQUEST_SGX_ADDR_RANGE 0x00000002
++#define SYS_SPECIFIC_DATA_PCI_REQUEST_HOST_PORT_RANGE 0x00000004
++#if defined(NO_HARDWARE)
++#define SYS_SPECIFIC_DATA_ALLOC_DUMMY_SGX_REGS 0x00000008
++#if defined(SUPPORT_MSVDX)
++#define SYS_SPECIFIC_DATA_ALLOC_DUMMY_MSVDX_REGS 0x00000020
++#endif
++#endif
++#define SYS_SPECIFIC_DATA_SGX_INITIALISED 0x00000040
++#if defined(SUPPORT_MSVDX)
++#define SYS_SPECIFIC_DATA_MSVDX_INITIALISED 0x00000080
++#endif
++#define SYS_SPECIFIC_DATA_MISR_INSTALLED 0x00000100
++#define SYS_SPECIFIC_DATA_LISR_INSTALLED 0x00000200
++#define SYS_SPECIFIC_DATA_PDUMP_INIT 0x00000400
++#define SYS_SPECIFIC_DATA_IRQ_ENABLED 0x00000800
++
++#define SYS_SPECIFIC_DATA_PM_UNMAP_SGX_REGS 0x00001000
++#define SYS_SPECIFIC_DATA_PM_UNMAP_SGX_HP 0x00004000
++#define SYS_SPECIFIC_DATA_PM_UNMAP_MSVDX_REGS 0x00008000
++#define SYS_SPECIFIC_DATA_PM_IRQ_DISABLE 0x00010000
++#define SYS_SPECIFIC_DATA_PM_UNINSTALL_LISR 0x00020000
++
++#define SYS_SPECIFIC_DATA_SET(psSysSpecData, flag) ((IMG_VOID)((psSysSpecData)->ui32SysSpecificData |= (flag)))
++
++#define SYS_SPECIFIC_DATA_CLEAR(psSysSpecData, flag) ((IMG_VOID)((psSysSpecData)->ui32SysSpecificData &= ~(flag)))
++
++#define SYS_SPECIFIC_DATA_TEST(psSysSpecData, flag) (((psSysSpecData)->ui32SysSpecificData & (flag)) != 0)
++
++
++typedef struct _SYS_SPECIFIC_DATA_TAG_
++{
++
++ IMG_UINT32 ui32SysSpecificData;
++#ifdef __linux__
++ PVRSRV_PCI_DEV_HANDLE hSGXPCI;
++#endif
++#ifdef LDM_PCI
++ struct pci_dev *psPCIDev;
++#endif
++ /* MSI reg save */
++ uint32_t msi_addr;
++ uint32_t msi_data;
++
++ uint32_t saveBSM;
++ uint32_t saveVBT;
++} SYS_SPECIFIC_DATA;
++
++
++#endif
++
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/system/moorestown/sysutils.c b/drivers/gpu/drm/mrst/pvr/services4/system/moorestown/sysutils.c
+new file mode 100644
+index 0000000..b89a1da
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/system/moorestown/sysutils.c
+@@ -0,0 +1,30 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "services_headers.h"
++#include "sysinfo.h"
++#include "syslocal.h"
++
+diff --git a/drivers/gpu/drm/mrst/pvr/tools/intern/debug/client/linuxsrv.h b/drivers/gpu/drm/mrst/pvr/tools/intern/debug/client/linuxsrv.h
+new file mode 100644
+index 0000000..adfcd75
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/tools/intern/debug/client/linuxsrv.h
+@@ -0,0 +1,48 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _LINUXSRV_H__
++#define _LINUXSRV_H__
++
++typedef struct tagIOCTL_PACKAGE
++{
++ IMG_UINT32 ui32Cmd;
++ IMG_UINT32 ui32Size;
++ IMG_VOID *pInBuffer;
++ IMG_UINT32 ui32InBufferSize;
++ IMG_VOID *pOutBuffer;
++ IMG_UINT32 ui32OutBufferSize;
++} IOCTL_PACKAGE;
++
++IMG_UINT32 DeviceIoControl(IMG_UINT32 hDevice,
++ IMG_UINT32 ui32ControlCode,
++ IMG_VOID *pInBuffer,
++ IMG_UINT32 ui32InBufferSize,
++ IMG_VOID *pOutBuffer,
++ IMG_UINT32 ui32OutBufferSize,
++ IMG_UINT32 *pui32BytesReturned);
++
++#endif
+diff --git a/drivers/gpu/drm/mrst/pvr/tools/intern/debug/dbgdriv/common/dbgdriv.c b/drivers/gpu/drm/mrst/pvr/tools/intern/debug/dbgdriv/common/dbgdriv.c
+new file mode 100644
+index 0000000..b769273
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/tools/intern/debug/dbgdriv/common/dbgdriv.c
+@@ -0,0 +1,2075 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++
++#ifdef LINUX
++#include <linux/string.h>
++#endif
++
++#include "img_types.h"
++#include "pvr_debug.h"
++#include "dbgdrvif.h"
++#include "dbgdriv.h"
++#include "hotkey.h"
++#include "hostfunc.h"
++
++
++
++
++#define LAST_FRAME_BUF_SIZE 1024
++
++typedef struct _DBG_LASTFRAME_BUFFER_ {
++ PDBG_STREAM psStream;
++ IMG_UINT8 ui8Buffer[LAST_FRAME_BUF_SIZE];
++ IMG_UINT32 ui32BufLen;
++ struct _DBG_LASTFRAME_BUFFER_ *psNext;
++} *PDBG_LASTFRAME_BUFFER;
++
++
++static PDBG_STREAM g_psStreamList = 0;
++static PDBG_LASTFRAME_BUFFER g_psLFBufferList;
++
++static IMG_UINT32 g_ui32LOff = 0;
++static IMG_UINT32 g_ui32Line = 0;
++static IMG_UINT32 g_ui32MonoLines = 25;
++
++static IMG_BOOL g_bHotkeyMiddump = IMG_FALSE;
++static IMG_UINT32 g_ui32HotkeyMiddumpStart = 0xffffffff;
++static IMG_UINT32 g_ui32HotkeyMiddumpEnd = 0xffffffff;
++
++IMG_VOID * g_pvAPIMutex=IMG_NULL;
++
++extern IMG_UINT32 g_ui32HotKeyFrame;
++extern IMG_BOOL g_bHotKeyPressed;
++extern IMG_BOOL g_bHotKeyRegistered;
++
++IMG_BOOL gbDumpThisFrame = IMG_FALSE;
++
++
++IMG_UINT32 SpaceInStream(PDBG_STREAM psStream);
++IMG_BOOL ExpandStreamBuffer(PDBG_STREAM psStream, IMG_UINT32 ui32NewSize);
++PDBG_LASTFRAME_BUFFER FindLFBuf(PDBG_STREAM psStream);
++
++DBGKM_SERVICE_TABLE g_sDBGKMServices =
++{
++ sizeof (DBGKM_SERVICE_TABLE),
++ ExtDBGDrivCreateStream,
++ ExtDBGDrivDestroyStream,
++ ExtDBGDrivFindStream,
++ ExtDBGDrivWriteString,
++ ExtDBGDrivReadString,
++ ExtDBGDrivWrite,
++ ExtDBGDrivRead,
++ ExtDBGDrivSetCaptureMode,
++ ExtDBGDrivSetOutputMode,
++ ExtDBGDrivSetDebugLevel,
++ ExtDBGDrivSetFrame,
++ ExtDBGDrivGetFrame,
++ ExtDBGDrivOverrideMode,
++ ExtDBGDrivDefaultMode,
++ ExtDBGDrivWrite2,
++ ExtDBGDrivWriteStringCM,
++ ExtDBGDrivWriteCM,
++ ExtDBGDrivSetMarker,
++ ExtDBGDrivGetMarker,
++ ExtDBGDrivStartInitPhase,
++ ExtDBGDrivStopInitPhase,
++ ExtDBGDrivIsCaptureFrame,
++ ExtDBGDrivWriteLF,
++ ExtDBGDrivReadLF,
++ ExtDBGDrivGetStreamOffset,
++ ExtDBGDrivSetStreamOffset,
++ ExtDBGDrivIsLastCaptureFrame,
++ ExtDBGDrivWaitForEvent
++};
++
++
++
++
++
++IMG_VOID * IMG_CALLCONV ExtDBGDrivCreateStream(IMG_CHAR * pszName, IMG_UINT32 ui32CapMode, IMG_UINT32 ui32OutMode, IMG_UINT32 ui32Flags, IMG_UINT32 ui32Size)
++{
++ IMG_VOID * pvRet;
++
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ pvRet=DBGDrivCreateStream(pszName, ui32CapMode, ui32OutMode, ui32Flags, ui32Size);
++
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return pvRet;
++}
++
++void IMG_CALLCONV ExtDBGDrivDestroyStream(PDBG_STREAM psStream)
++{
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ DBGDrivDestroyStream(psStream);
++
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return;
++}
++
++IMG_VOID * IMG_CALLCONV ExtDBGDrivFindStream(IMG_CHAR * pszName, IMG_BOOL bResetStream)
++{
++ IMG_VOID * pvRet;
++
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ pvRet=DBGDrivFindStream(pszName, bResetStream);
++
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return pvRet;
++}
++
++IMG_UINT32 IMG_CALLCONV ExtDBGDrivWriteString(PDBG_STREAM psStream,IMG_CHAR * pszString,IMG_UINT32 ui32Level)
++{
++ IMG_UINT32 ui32Ret;
++
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ ui32Ret=DBGDrivWriteString(psStream, pszString, ui32Level);
++
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return ui32Ret;
++}
++
++IMG_UINT32 IMG_CALLCONV ExtDBGDrivReadString(PDBG_STREAM psStream,IMG_CHAR * pszString,IMG_UINT32 ui32Limit)
++{
++ IMG_UINT32 ui32Ret;
++
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ ui32Ret=DBGDrivReadString(psStream, pszString, ui32Limit);
++
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return ui32Ret;
++}
++
++IMG_UINT32 IMG_CALLCONV ExtDBGDrivWrite(PDBG_STREAM psStream,IMG_UINT8 * pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level)
++{
++ IMG_UINT32 ui32Ret;
++
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ ui32Ret=DBGDrivWrite(psStream, pui8InBuf, ui32InBuffSize, ui32Level);
++
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return ui32Ret;
++}
++
++IMG_UINT32 IMG_CALLCONV ExtDBGDrivRead(PDBG_STREAM psStream, IMG_BOOL bReadInitBuffer, IMG_UINT32 ui32OutBuffSize,IMG_UINT8 * pui8OutBuf)
++{
++ IMG_UINT32 ui32Ret;
++
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ ui32Ret=DBGDrivRead(psStream, bReadInitBuffer, ui32OutBuffSize, pui8OutBuf);
++
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return ui32Ret;
++}
++
++void IMG_CALLCONV ExtDBGDrivSetCaptureMode(PDBG_STREAM psStream,IMG_UINT32 ui32Mode,IMG_UINT32 ui32Start,IMG_UINT32 ui32End,IMG_UINT32 ui32SampleRate)
++{
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ DBGDrivSetCaptureMode(psStream, ui32Mode, ui32Start, ui32End, ui32SampleRate);
++
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return;
++}
++
++void IMG_CALLCONV ExtDBGDrivSetOutputMode(PDBG_STREAM psStream,IMG_UINT32 ui32OutMode)
++{
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ DBGDrivSetOutputMode(psStream, ui32OutMode);
++
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return;
++}
++
++void IMG_CALLCONV ExtDBGDrivSetDebugLevel(PDBG_STREAM psStream,IMG_UINT32 ui32DebugLevel)
++{
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ DBGDrivSetDebugLevel(psStream, ui32DebugLevel);
++
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return;
++}
++
++void IMG_CALLCONV ExtDBGDrivSetFrame(PDBG_STREAM psStream,IMG_UINT32 ui32Frame)
++{
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ DBGDrivSetFrame(psStream, ui32Frame);
++
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return;
++}
++
++IMG_UINT32 IMG_CALLCONV ExtDBGDrivGetFrame(PDBG_STREAM psStream)
++{
++ IMG_UINT32 ui32Ret;
++
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ ui32Ret=DBGDrivGetFrame(psStream);
++
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return ui32Ret;
++}
++
++IMG_BOOL IMG_CALLCONV ExtDBGDrivIsLastCaptureFrame(PDBG_STREAM psStream)
++{
++ IMG_BOOL bRet;
++
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ bRet = DBGDrivIsLastCaptureFrame(psStream);
++
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return bRet;
++}
++
++IMG_BOOL IMG_CALLCONV ExtDBGDrivIsCaptureFrame(PDBG_STREAM psStream, IMG_BOOL bCheckPreviousFrame)
++{
++ IMG_BOOL bRet;
++
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ bRet = DBGDrivIsCaptureFrame(psStream, bCheckPreviousFrame);
++
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return bRet;
++}
++
++void IMG_CALLCONV ExtDBGDrivOverrideMode(PDBG_STREAM psStream,IMG_UINT32 ui32Mode)
++{
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ DBGDrivOverrideMode(psStream, ui32Mode);
++
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return;
++}
++
++void IMG_CALLCONV ExtDBGDrivDefaultMode(PDBG_STREAM psStream)
++{
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ DBGDrivDefaultMode(psStream);
++
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return;
++}
++
++IMG_UINT32 IMG_CALLCONV ExtDBGDrivWrite2(PDBG_STREAM psStream,IMG_UINT8 * pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level)
++{
++ IMG_UINT32 ui32Ret;
++
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ ui32Ret=DBGDrivWrite2(psStream, pui8InBuf, ui32InBuffSize, ui32Level);
++
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return ui32Ret;
++}
++
++IMG_UINT32 IMG_CALLCONV ExtDBGDrivWriteStringCM(PDBG_STREAM psStream,IMG_CHAR * pszString,IMG_UINT32 ui32Level)
++{
++ IMG_UINT32 ui32Ret;
++
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ ui32Ret=DBGDrivWriteStringCM(psStream, pszString, ui32Level);
++
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return ui32Ret;
++}
++
++IMG_UINT32 IMG_CALLCONV ExtDBGDrivWriteCM(PDBG_STREAM psStream,IMG_UINT8 * pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level)
++{
++ IMG_UINT32 ui32Ret;
++
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ ui32Ret=DBGDrivWriteCM(psStream, pui8InBuf, ui32InBuffSize, ui32Level);
++
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return ui32Ret;
++}
++
++void IMG_CALLCONV ExtDBGDrivSetMarker(PDBG_STREAM psStream, IMG_UINT32 ui32Marker)
++{
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ DBGDrivSetMarker(psStream, ui32Marker);
++
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return;
++}
++
++IMG_UINT32 IMG_CALLCONV ExtDBGDrivGetMarker(PDBG_STREAM psStream)
++{
++ IMG_UINT32 ui32Marker;
++
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ ui32Marker = DBGDrivGetMarker(psStream);
++
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return ui32Marker;
++}
++
++IMG_UINT32 IMG_CALLCONV ExtDBGDrivWriteLF(PDBG_STREAM psStream, IMG_UINT8 * pui8InBuf, IMG_UINT32 ui32InBuffSize, IMG_UINT32 ui32Level, IMG_UINT32 ui32Flags)
++{
++ IMG_UINT32 ui32Ret;
++
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ ui32Ret = DBGDrivWriteLF(psStream, pui8InBuf, ui32InBuffSize, ui32Level, ui32Flags);
++
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return ui32Ret;
++}
++
++IMG_UINT32 IMG_CALLCONV ExtDBGDrivReadLF(PDBG_STREAM psStream, IMG_UINT32 ui32OutBuffSize, IMG_UINT8 * pui8OutBuf)
++{
++ IMG_UINT32 ui32Ret;
++
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ ui32Ret = DBGDrivReadLF(psStream, ui32OutBuffSize, pui8OutBuf);
++
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return ui32Ret;
++}
++
++
++IMG_VOID IMG_CALLCONV ExtDBGDrivStartInitPhase(PDBG_STREAM psStream)
++{
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ DBGDrivStartInitPhase(psStream);
++
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return;
++}
++
++IMG_VOID IMG_CALLCONV ExtDBGDrivStopInitPhase(PDBG_STREAM psStream)
++{
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ DBGDrivStopInitPhase(psStream);
++
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return;
++}
++
++IMG_UINT32 IMG_CALLCONV ExtDBGDrivGetStreamOffset(PDBG_STREAM psStream)
++{
++ IMG_UINT32 ui32Ret;
++
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ ui32Ret = DBGDrivGetStreamOffset(psStream);
++
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return ui32Ret;
++}
++
++IMG_VOID IMG_CALLCONV ExtDBGDrivSetStreamOffset(PDBG_STREAM psStream, IMG_UINT32 ui32StreamOffset)
++{
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ DBGDrivSetStreamOffset(psStream, ui32StreamOffset);
++
++
++ HostReleaseMutex(g_pvAPIMutex);
++}
++
++IMG_VOID IMG_CALLCONV ExtDBGDrivWaitForEvent(DBG_EVENT eEvent)
++{
++#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
++ DBGDrivWaitForEvent(eEvent);
++#else
++ PVR_UNREFERENCED_PARAMETER(eEvent);
++#endif
++}
++
++IMG_UINT32 AtoI(IMG_CHAR *szIn)
++{
++ IMG_INT iLen = 0;
++ IMG_UINT32 ui32Value = 0;
++ IMG_UINT32 ui32Digit=1;
++ IMG_UINT32 ui32Base=10;
++ IMG_INT iPos;
++ IMG_CHAR bc;
++
++
++ while (szIn[iLen] > 0)
++ {
++ iLen ++;
++ }
++
++
++ if (iLen == 0)
++ {
++ return (0);
++ }
++
++
++ iPos=0;
++ while (szIn[iPos] == '0')
++ {
++ iPos++;
++ }
++ if (szIn[iPos] == '\0')
++ {
++ return 0;
++ }
++ if (szIn[iPos] == 'x' || szIn[iPos] == 'X')
++ {
++ ui32Base=16;
++ szIn[iPos]='0';
++ }
++
++
++ for (iPos = iLen - 1; iPos >= 0; iPos --)
++ {
++ bc = szIn[iPos];
++
++ if ( (bc >= 'a') && (bc <= 'f') && ui32Base == 16)
++ {
++ bc -= 'a' - 0xa;
++ }
++ else
++ if ( (bc >= 'A') && (bc <= 'F') && ui32Base == 16)
++ {
++ bc -= 'A' - 0xa;
++ }
++ else
++ if ((bc >= '0') && (bc <= '9'))
++ {
++ bc -= '0';
++ }
++ else
++ return (0);
++
++ ui32Value += (IMG_UINT32)bc * ui32Digit;
++
++ ui32Digit = ui32Digit * ui32Base;
++ }
++ return (ui32Value);
++}
++
++
++IMG_BOOL StreamValid(PDBG_STREAM psStream)
++{
++ PDBG_STREAM psThis;
++
++ psThis = g_psStreamList;
++
++ while (psThis)
++ {
++ if (psStream && (psThis == psStream))
++ {
++ return(IMG_TRUE);
++ }
++ else
++ {
++ psThis = psThis->psNext;
++ }
++ }
++
++ return(IMG_FALSE);
++}
++
++
++void Write(PDBG_STREAM psStream,IMG_UINT8 * pui8Data,IMG_UINT32 ui32InBuffSize)
++{
++
++
++ if ((psStream->ui32WPtr + ui32InBuffSize) > psStream->ui32Size)
++ {
++ IMG_UINT32 ui32B1 = psStream->ui32Size - psStream->ui32WPtr;
++ IMG_UINT32 ui32B2 = ui32InBuffSize - ui32B1;
++
++
++ HostMemCopy((IMG_VOID *)(psStream->ui32Base + psStream->ui32WPtr),
++ (IMG_VOID *) pui8Data,
++ ui32B1);
++
++
++ HostMemCopy((IMG_VOID *)psStream->ui32Base,
++ (IMG_VOID *)((IMG_UINT32) pui8Data + ui32B1),
++ ui32B2);
++
++
++ psStream->ui32WPtr = ui32B2;
++ }
++ else
++ {
++ HostMemCopy((IMG_VOID *)(psStream->ui32Base + psStream->ui32WPtr),
++ (IMG_VOID *) pui8Data,
++ ui32InBuffSize);
++
++ psStream->ui32WPtr += ui32InBuffSize;
++
++ if (psStream->ui32WPtr == psStream->ui32Size)
++ {
++ psStream->ui32WPtr = 0;
++ }
++ }
++ psStream->ui32DataWritten += ui32InBuffSize;
++}
++
++
++void MonoOut(IMG_CHAR * pszString,IMG_BOOL bNewLine)
++{
++ IMG_UINT32 i;
++ IMG_CHAR * pScreen;
++
++ pScreen = (IMG_CHAR *) DBGDRIV_MONOBASE;
++
++ pScreen += g_ui32Line * 160;
++
++
++
++ i=0;
++ do
++ {
++ pScreen[g_ui32LOff + (i*2)] = pszString[i];
++ pScreen[g_ui32LOff + (i*2)+1] = 127;
++ i++;
++ }
++ while ((pszString[i] != 0) && (i < 4096));
++
++ g_ui32LOff += i * 2;
++
++ if (bNewLine)
++ {
++ g_ui32LOff = 0;
++ g_ui32Line++;
++ }
++
++
++
++ if (g_ui32Line == g_ui32MonoLines)
++ {
++ g_ui32Line = g_ui32MonoLines - 1;
++
++ HostMemCopy((IMG_VOID *)DBGDRIV_MONOBASE,(IMG_VOID *)(DBGDRIV_MONOBASE + 160),160 * (g_ui32MonoLines - 1));
++
++ HostMemSet((IMG_VOID *)(DBGDRIV_MONOBASE + (160 * (g_ui32MonoLines - 1))),0,160);
++ }
++}
++
++
++
++void AppendName(IMG_CHAR * pszOut,IMG_CHAR * pszBase,IMG_CHAR * pszName)
++{
++ IMG_UINT32 i;
++ IMG_UINT32 ui32Off;
++
++ i = 0;
++
++ while (pszBase[i] != 0)
++ {
++ pszOut[i] = pszBase[i];
++ i++;
++ }
++
++ ui32Off = i;
++ i = 0;
++
++ while (pszName[i] != 0)
++ {
++ pszOut[ui32Off+i] = pszName[i];
++ i++;
++ }
++
++ pszOut[ui32Off+i] = pszName[i];
++}
++
++
++IMG_VOID * IMG_CALLCONV DBGDrivCreateStream(IMG_CHAR * pszName,
++ IMG_UINT32 ui32CapMode,
++ IMG_UINT32 ui32OutMode,
++ IMG_UINT32 ui32Flags,
++ IMG_UINT32 ui32Size)
++{
++ PDBG_STREAM psStream;
++ PDBG_STREAM psInitStream;
++ PDBG_LASTFRAME_BUFFER psLFBuffer;
++ IMG_UINT32 ui32Off;
++ IMG_VOID * pvBase;
++
++
++
++
++ psStream = (PDBG_STREAM) DBGDrivFindStream(pszName, IMG_FALSE);
++
++ if (psStream)
++ {
++ return ((IMG_VOID *) psStream);
++ }
++
++
++
++ psStream = HostNonPageablePageAlloc(1);
++ psInitStream = HostNonPageablePageAlloc(1);
++ psLFBuffer = HostNonPageablePageAlloc(1);
++ if (
++ (!psStream) ||
++ (!psInitStream) ||
++ (!psLFBuffer)
++ )
++ {
++ PVR_DPF((PVR_DBG_ERROR,"DBGDriv: Couldn't alloc control structs\n\r"));
++ return((IMG_VOID *) 0);
++ }
++
++
++ if ((ui32Flags & DEBUG_FLAGS_USE_NONPAGED_MEM) != 0)
++ {
++ pvBase = HostNonPageablePageAlloc(ui32Size);
++ }
++ else
++ {
++ pvBase = HostPageablePageAlloc(ui32Size);
++ }
++
++ if (!pvBase)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"DBGDriv: Couldn't alloc Stream buffer\n\r"));
++ HostNonPageablePageFree(psStream);
++ return((IMG_VOID *) 0);
++ }
++
++
++
++ psStream->psNext = 0;
++ psStream->ui32Flags = ui32Flags;
++ psStream->ui32Base = (IMG_UINT32)pvBase;
++ psStream->ui32Size = ui32Size * 4096UL;
++ psStream->ui32RPtr = 0;
++ psStream->ui32WPtr = 0;
++ psStream->ui32DataWritten = 0;
++ psStream->ui32CapMode = ui32CapMode;
++ psStream->ui32OutMode = ui32OutMode;
++ psStream->ui32DebugLevel = DEBUG_LEVEL_0;
++ psStream->ui32DefaultMode = ui32CapMode;
++ psStream->ui32Start = 0;
++ psStream->ui32End = 0;
++ psStream->ui32Current = 0;
++ psStream->ui32SampleRate = 1;
++ psStream->ui32Access = 0;
++ psStream->ui32Timeout = 0;
++ psStream->ui32Marker = 0;
++ psStream->bInitPhaseComplete = IMG_FALSE;
++
++
++ if ((ui32Flags & DEBUG_FLAGS_USE_NONPAGED_MEM) != 0)
++ {
++ pvBase = HostNonPageablePageAlloc(ui32Size);
++ }
++ else
++ {
++ pvBase = HostPageablePageAlloc(ui32Size);
++ }
++
++ if (!pvBase)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"DBGDriv: Couldn't alloc InitStream buffer\n\r"));
++
++ if ((psStream->ui32Flags & DEBUG_FLAGS_USE_NONPAGED_MEM) != 0)
++ {
++ HostNonPageablePageFree((IMG_VOID *)psStream->ui32Base);
++ }
++ else
++ {
++ HostPageablePageFree((IMG_VOID *)psStream->ui32Base);
++ }
++ HostNonPageablePageFree(psStream);
++ return((IMG_VOID *) 0);
++ }
++
++ psInitStream->psNext = 0;
++ psInitStream->ui32Flags = ui32Flags;
++ psInitStream->ui32Base = (IMG_UINT32)pvBase;
++ psInitStream->ui32Size = ui32Size * 4096UL;
++ psInitStream->ui32RPtr = 0;
++ psInitStream->ui32WPtr = 0;
++ psInitStream->ui32DataWritten = 0;
++ psInitStream->ui32CapMode = ui32CapMode;
++ psInitStream->ui32OutMode = ui32OutMode;
++ psInitStream->ui32DebugLevel = DEBUG_LEVEL_0;
++ psInitStream->ui32DefaultMode = ui32CapMode;
++ psInitStream->ui32Start = 0;
++ psInitStream->ui32End = 0;
++ psInitStream->ui32Current = 0;
++ psInitStream->ui32SampleRate = 1;
++ psInitStream->ui32Access = 0;
++ psInitStream->ui32Timeout = 0;
++ psInitStream->ui32Marker = 0;
++ psInitStream->bInitPhaseComplete = IMG_FALSE;
++
++ psStream->psInitStream = psInitStream;
++
++
++ psLFBuffer->psStream = psStream;
++ psLFBuffer->ui32BufLen = 0UL;
++
++ g_bHotkeyMiddump = IMG_FALSE;
++ g_ui32HotkeyMiddumpStart = 0xffffffffUL;
++ g_ui32HotkeyMiddumpEnd = 0xffffffffUL;
++
++
++
++ ui32Off = 0;
++
++ do
++ {
++ psStream->szName[ui32Off] = pszName[ui32Off];
++
++ ui32Off++;
++ }
++ while ((pszName[ui32Off] != 0) && (ui32Off < (4096UL - sizeof(DBG_STREAM))));
++
++ psStream->szName[ui32Off] = pszName[ui32Off];
++
++
++
++ psStream->psNext = g_psStreamList;
++ g_psStreamList = psStream;
++
++ psLFBuffer->psNext = g_psLFBufferList;
++ g_psLFBufferList = psLFBuffer;
++
++
++ return((IMG_VOID *) psStream);
++}
++
++void IMG_CALLCONV DBGDrivDestroyStream(PDBG_STREAM psStream)
++{
++ PDBG_STREAM psStreamThis;
++ PDBG_STREAM psStreamPrev;
++ PDBG_LASTFRAME_BUFFER psLFBuffer;
++ PDBG_LASTFRAME_BUFFER psLFThis;
++ PDBG_LASTFRAME_BUFFER psLFPrev;
++
++ PVR_DPF((PVR_DBG_MESSAGE, "DBGDriv: Destroying stream %s\r\n", psStream->szName ));
++
++
++
++ if (!StreamValid(psStream))
++ {
++ return;
++ }
++
++ psLFBuffer = FindLFBuf(psStream);
++
++
++
++ psStreamThis = g_psStreamList;
++ psStreamPrev = 0;
++
++ while (psStreamThis)
++ {
++ if (psStreamThis == psStream)
++ {
++ if (psStreamPrev)
++ {
++ psStreamPrev->psNext = psStreamThis->psNext;
++ }
++ else
++ {
++ g_psStreamList = psStreamThis->psNext;
++ }
++
++ psStreamThis = 0;
++ }
++ else
++ {
++ psStreamPrev = psStreamThis;
++ psStreamThis = psStreamThis->psNext;
++ }
++ }
++
++ psLFThis = g_psLFBufferList;
++ psLFPrev = 0;
++
++ while (psLFThis)
++ {
++ if (psLFThis == psLFBuffer)
++ {
++ if (psLFPrev)
++ {
++ psLFPrev->psNext = psLFThis->psNext;
++ }
++ else
++ {
++ g_psLFBufferList = psLFThis->psNext;
++ }
++
++ psLFThis = 0;
++ }
++ else
++ {
++ psLFPrev = psLFThis;
++ psLFThis = psLFThis->psNext;
++ }
++ }
++
++
++ if (psStream->ui32CapMode & DEBUG_CAPMODE_HOTKEY)
++ {
++ DeactivateHotKeys();
++ }
++
++
++
++ if ((psStream->ui32Flags & DEBUG_FLAGS_USE_NONPAGED_MEM) != 0)
++ {
++ HostNonPageablePageFree((IMG_VOID *)psStream->ui32Base);
++ HostNonPageablePageFree((IMG_VOID *)psStream->psInitStream->ui32Base);
++ }
++ else
++ {
++ HostPageablePageFree((IMG_VOID *)psStream->ui32Base);
++ HostPageablePageFree((IMG_VOID *)psStream->psInitStream->ui32Base);
++ }
++
++ HostNonPageablePageFree(psStream->psInitStream);
++ HostNonPageablePageFree(psStream);
++ HostNonPageablePageFree(psLFBuffer);
++
++ if (g_psStreamList == 0)
++ {
++ PVR_DPF((PVR_DBG_MESSAGE,"DBGDriv: Stream list now empty" ));
++ }
++
++ return;
++}
++
++IMG_VOID * IMG_CALLCONV DBGDrivFindStream(IMG_CHAR * pszName, IMG_BOOL bResetStream)
++{
++ PDBG_STREAM psStream;
++ PDBG_STREAM psThis;
++ IMG_UINT32 ui32Off;
++ IMG_BOOL bAreSame;
++
++ psStream = 0;
++
++
++
++ for (psThis = g_psStreamList; psThis != IMG_NULL; psThis = psThis->psNext)
++ {
++ bAreSame = IMG_TRUE;
++ ui32Off = 0;
++
++ if (strlen(psThis->szName) == strlen(pszName))
++ {
++ while ((psThis->szName[ui32Off] != 0) && (pszName[ui32Off] != 0) && (ui32Off < 128) && bAreSame)
++ {
++ if (psThis->szName[ui32Off] != pszName[ui32Off])
++ {
++ bAreSame = IMG_FALSE;
++ }
++
++ ui32Off++;
++ }
++ }
++ else
++ {
++ bAreSame = IMG_FALSE;
++ }
++
++ if (bAreSame)
++ {
++ psStream = psThis;
++ break;
++ }
++ }
++
++ if(bResetStream && psStream)
++ {
++ static IMG_CHAR szComment[] = "-- Init phase terminated\r\n";
++ psStream->psInitStream->ui32RPtr = 0;
++ psStream->ui32RPtr = 0;
++ psStream->ui32WPtr = 0;
++ psStream->ui32DataWritten = psStream->psInitStream->ui32DataWritten;
++ if (psStream->bInitPhaseComplete == IMG_FALSE)
++ {
++ if (psStream->ui32Flags & DEBUG_FLAGS_TEXTSTREAM)
++ {
++ DBGDrivWrite2(psStream, (IMG_UINT8 *)szComment, sizeof(szComment) - 1, 0x01);
++ }
++ psStream->bInitPhaseComplete = IMG_TRUE;
++ }
++ }
++
++ return((IMG_VOID *) psStream);
++}
++
++IMG_UINT32 IMG_CALLCONV DBGDrivWriteStringCM(PDBG_STREAM psStream,IMG_CHAR * pszString,IMG_UINT32 ui32Level)
++{
++
++
++ if (!StreamValid(psStream))
++ {
++ return(0xFFFFFFFFUL);
++ }
++
++
++
++ if (psStream->ui32CapMode & DEBUG_CAPMODE_FRAMED)
++ {
++ if ((psStream->ui32Flags & DEBUG_FLAGS_ENABLESAMPLE) == 0)
++ {
++ return(0);
++ }
++ }
++ else
++ {
++ if (psStream->ui32CapMode == DEBUG_CAPMODE_HOTKEY)
++ {
++ if ((psStream->ui32Current != g_ui32HotKeyFrame) || (g_bHotKeyPressed == IMG_FALSE))
++ {
++ return(0);
++ }
++ }
++ }
++
++ return(DBGDrivWriteString(psStream,pszString,ui32Level));
++
++}
++
++IMG_UINT32 IMG_CALLCONV DBGDrivWriteString(PDBG_STREAM psStream,IMG_CHAR * pszString,IMG_UINT32 ui32Level)
++{
++ IMG_UINT32 ui32Len;
++ IMG_UINT32 ui32Space;
++ IMG_UINT32 ui32WPtr;
++ IMG_UINT8 * pui8Buffer;
++
++
++
++ if (!StreamValid(psStream))
++ {
++ return(0xFFFFFFFFUL);
++ }
++
++
++
++ if ((psStream->ui32DebugLevel & ui32Level) == 0)
++ {
++ return(0xFFFFFFFFUL);
++ }
++
++
++
++
++ if ((psStream->ui32OutMode & DEBUG_OUTMODE_ASYNC) == 0)
++ {
++ if (psStream->ui32OutMode & DEBUG_OUTMODE_STANDARDDBG)
++ {
++ PVR_DPF((PVR_DBG_MESSAGE,"%s: %s\r\n",psStream->szName, pszString));
++ }
++
++
++
++ if (psStream->ui32OutMode & DEBUG_OUTMODE_MONO)
++ {
++ MonoOut(psStream->szName,IMG_FALSE);
++ MonoOut(": ",IMG_FALSE);
++ MonoOut(pszString,IMG_TRUE);
++ }
++ }
++
++
++
++ if (
++ !(
++ ((psStream->ui32OutMode & DEBUG_OUTMODE_STREAMENABLE) != 0) ||
++ ((psStream->ui32OutMode & DEBUG_OUTMODE_ASYNC) != 0)
++ )
++ )
++ {
++ return(0xFFFFFFFFUL);
++ }
++
++
++
++ ui32Space=SpaceInStream(psStream);
++
++ if(ui32Space > 0)
++ {
++ ui32Space--;
++ }
++
++ ui32Len = 0;
++ ui32WPtr = psStream->ui32WPtr;
++ pui8Buffer = (IMG_UINT8 *) psStream->ui32Base;
++
++ while((pszString[ui32Len] != 0) && (ui32Len < ui32Space))
++ {
++ pui8Buffer[ui32WPtr] = (IMG_UINT8)pszString[ui32Len];
++ ui32Len++;
++ ui32WPtr++;
++ if (ui32WPtr == psStream->ui32Size)
++ {
++ ui32WPtr = 0;
++ }
++ }
++
++ if (ui32Len < ui32Space)
++ {
++
++ pui8Buffer[ui32WPtr] = (IMG_UINT8)pszString[ui32Len];
++ ui32Len++;
++ ui32WPtr++;
++ if (ui32WPtr == psStream->ui32Size)
++ {
++ ui32WPtr = 0;
++ }
++
++
++ psStream->ui32WPtr = ui32WPtr;
++ psStream->ui32DataWritten+= ui32Len;
++ } else
++ {
++ ui32Len = 0;
++ }
++
++#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
++ if (ui32Len)
++ {
++ HostSignalEvent(DBG_EVENT_STREAM_DATA);
++ }
++#endif
++
++ return(ui32Len);
++}
++
++IMG_UINT32 IMG_CALLCONV DBGDrivReadString(PDBG_STREAM psStream,IMG_CHAR * pszString,IMG_UINT32 ui32Limit)
++{
++ IMG_UINT32 ui32OutLen;
++ IMG_UINT32 ui32Len;
++ IMG_UINT32 ui32Offset;
++ IMG_UINT8 *pui8Buff;
++
++
++
++ if (!StreamValid(psStream))
++ {
++ return(0);
++ }
++
++
++
++ pui8Buff = (IMG_UINT8 *) psStream->ui32Base;
++ ui32Offset = psStream->ui32RPtr;
++
++ if (psStream->ui32RPtr == psStream->ui32WPtr)
++ {
++ return(0);
++ }
++
++
++
++ ui32Len = 0;
++ while((pui8Buff[ui32Offset] != 0) && (ui32Offset != psStream->ui32WPtr))
++ {
++ ui32Offset++;
++ ui32Len++;
++
++
++
++ if (ui32Offset == psStream->ui32Size)
++ {
++ ui32Offset = 0;
++ }
++ }
++
++ ui32OutLen = ui32Len + 1;
++
++
++
++ if (ui32Len > ui32Limit)
++ {
++ return(0);
++ }
++
++
++
++ ui32Offset = psStream->ui32RPtr;
++ ui32Len = 0;
++
++ while ((pui8Buff[ui32Offset] != 0) && (ui32Len < ui32Limit))
++ {
++ pszString[ui32Len] = (IMG_CHAR)pui8Buff[ui32Offset];
++ ui32Offset++;
++ ui32Len++;
++
++
++
++ if (ui32Offset == psStream->ui32Size)
++ {
++ ui32Offset = 0;
++ }
++ }
++
++ pszString[ui32Len] = (IMG_CHAR)pui8Buff[ui32Offset];
++
++ psStream->ui32RPtr = ui32Offset + 1;
++
++ if (psStream->ui32RPtr == psStream->ui32Size)
++ {
++ psStream->ui32RPtr = 0;
++ }
++
++ return(ui32OutLen);
++}
++
++IMG_UINT32 IMG_CALLCONV DBGDrivWrite(PDBG_STREAM psMainStream,IMG_UINT8 * pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level)
++{
++ IMG_UINT32 ui32Space;
++ DBG_STREAM *psStream;
++
++
++
++ if (!StreamValid(psMainStream))
++ {
++ return(0xFFFFFFFFUL);
++ }
++
++
++
++ if ((psMainStream->ui32DebugLevel & ui32Level) == 0)
++ {
++ return(0xFFFFFFFFUL);
++ }
++
++
++
++ if (psMainStream->ui32CapMode & DEBUG_CAPMODE_FRAMED)
++ {
++ if ((psMainStream->ui32Flags & DEBUG_FLAGS_ENABLESAMPLE) == 0)
++ {
++ return(0xFFFFFFFFUL);
++ }
++ }
++ else if (psMainStream->ui32CapMode == DEBUG_CAPMODE_HOTKEY)
++ {
++ if ((psMainStream->ui32Current != g_ui32HotKeyFrame) || (g_bHotKeyPressed == IMG_FALSE))
++ return(0xFFFFFFFFUL);
++ }
++
++ if(psMainStream->bInitPhaseComplete)
++ {
++ psStream = psMainStream;
++ }
++ else
++ {
++ psStream = psMainStream->psInitStream;
++ }
++
++
++
++ ui32Space=SpaceInStream(psStream);
++
++
++
++ if ((psStream->ui32OutMode & DEBUG_OUTMODE_STREAMENABLE) == 0)
++ {
++ return(0);
++ }
++
++ if (ui32Space < 8)
++ {
++ return(0);
++ }
++
++
++
++ if (ui32Space <= (ui32InBuffSize + 4))
++ {
++ ui32InBuffSize = ui32Space - 8;
++ }
++
++
++
++ Write(psStream,(IMG_UINT8 *) &ui32InBuffSize,4);
++ Write(psStream,pui8InBuf,ui32InBuffSize);
++
++#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
++ if (ui32InBuffSize)
++ {
++ HostSignalEvent(DBG_EVENT_STREAM_DATA);
++ }
++#endif
++ return(ui32InBuffSize);
++}
++
++IMG_UINT32 IMG_CALLCONV DBGDrivWriteCM(PDBG_STREAM psStream,IMG_UINT8 * pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level)
++{
++
++
++ if (!StreamValid(psStream))
++ {
++ return(0xFFFFFFFFUL);
++ }
++
++
++
++ if (psStream->ui32CapMode & DEBUG_CAPMODE_FRAMED)
++ {
++ if ((psStream->ui32Flags & DEBUG_FLAGS_ENABLESAMPLE) == 0)
++ {
++ return(0xFFFFFFFFUL);
++ }
++ }
++ else
++ {
++ if (psStream->ui32CapMode == DEBUG_CAPMODE_HOTKEY)
++ {
++ if ((psStream->ui32Current != g_ui32HotKeyFrame) || (g_bHotKeyPressed == IMG_FALSE))
++ {
++ return(0xFFFFFFFFUL);
++ }
++ }
++ }
++
++ return(DBGDrivWrite2(psStream,pui8InBuf,ui32InBuffSize,ui32Level));
++}
++
++IMG_UINT32 IMG_CALLCONV DBGDrivWrite2(PDBG_STREAM psMainStream,IMG_UINT8 * pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level)
++{
++ IMG_UINT32 ui32Space;
++ DBG_STREAM *psStream;
++
++
++
++ if (!StreamValid(psMainStream))
++ {
++ return(0xFFFFFFFFUL);
++ }
++
++
++
++ if ((psMainStream->ui32DebugLevel & ui32Level) == 0)
++ {
++ return(0xFFFFFFFFUL);
++ }
++
++ if(psMainStream->bInitPhaseComplete)
++ {
++ psStream = psMainStream;
++ }
++ else
++ {
++ psStream = psMainStream->psInitStream;
++ }
++
++
++
++ ui32Space=SpaceInStream(psStream);
++
++
++
++ if ((psStream->ui32OutMode & DEBUG_OUTMODE_STREAMENABLE) == 0)
++ {
++ return(0);
++ }
++
++
++
++ if (psStream->ui32Flags & DEBUG_FLAGS_NO_BUF_EXPANDSION)
++ {
++
++
++
++ if (ui32Space < 32)
++ {
++ return(0);
++ }
++ }
++ else
++ {
++ if ((ui32Space < 32) || (ui32Space <= (ui32InBuffSize + 4)))
++ {
++ IMG_UINT32 ui32NewBufSize;
++
++
++
++ ui32NewBufSize = 2 * psStream->ui32Size;
++
++ if (ui32InBuffSize > psStream->ui32Size)
++ {
++ ui32NewBufSize += ui32InBuffSize;
++ }
++
++
++
++ if (!ExpandStreamBuffer(psStream,ui32NewBufSize))
++ {
++ if (ui32Space < 32)
++ {
++ return(0);
++ }
++ }
++
++
++
++ ui32Space = SpaceInStream(psStream);
++ }
++ }
++
++
++
++ if (ui32Space <= (ui32InBuffSize + 4))
++ {
++ ui32InBuffSize = ui32Space - 4;
++ }
++
++
++
++ Write(psStream,pui8InBuf,ui32InBuffSize);
++
++#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
++ if (ui32InBuffSize)
++ {
++ HostSignalEvent(DBG_EVENT_STREAM_DATA);
++ }
++#endif
++ return(ui32InBuffSize);
++}
++
++IMG_UINT32 IMG_CALLCONV DBGDrivRead(PDBG_STREAM psMainStream, IMG_BOOL bReadInitBuffer, IMG_UINT32 ui32OutBuffSize,IMG_UINT8 * pui8OutBuf)
++{
++ IMG_UINT32 ui32Data;
++ DBG_STREAM *psStream;
++
++
++
++ if (!StreamValid(psMainStream))
++ {
++ return(0);
++ }
++
++ if(bReadInitBuffer)
++ {
++ psStream = psMainStream->psInitStream;
++ }
++ else
++ {
++ psStream = psMainStream;
++ }
++
++ if (psStream->ui32RPtr == psStream->ui32WPtr)
++ {
++ return(0);
++ }
++
++
++
++ if (psStream->ui32RPtr <= psStream->ui32WPtr)
++ {
++ ui32Data = psStream->ui32WPtr - psStream->ui32RPtr;
++ }
++ else
++ {
++ ui32Data = psStream->ui32WPtr + (psStream->ui32Size - psStream->ui32RPtr);
++ }
++
++
++
++ if (ui32Data > ui32OutBuffSize)
++ {
++ ui32Data = ui32OutBuffSize;
++ }
++
++
++
++ if ((psStream->ui32RPtr + ui32Data) > psStream->ui32Size)
++ {
++ IMG_UINT32 ui32B1 = psStream->ui32Size - psStream->ui32RPtr;
++ IMG_UINT32 ui32B2 = ui32Data - ui32B1;
++
++
++ HostMemCopy((IMG_VOID *) pui8OutBuf,
++ (IMG_VOID *)(psStream->ui32Base + psStream->ui32RPtr),
++ ui32B1);
++
++
++ HostMemCopy((IMG_VOID *)((IMG_UINT32) pui8OutBuf + ui32B1),
++ (IMG_VOID *)psStream->ui32Base,
++ ui32B2);
++
++
++ psStream->ui32RPtr = ui32B2;
++ }
++ else
++ {
++ HostMemCopy((IMG_VOID *) pui8OutBuf,
++ (IMG_VOID *)(psStream->ui32Base + psStream->ui32RPtr),
++ ui32Data);
++
++
++ psStream->ui32RPtr += ui32Data;
++
++
++ if (psStream->ui32RPtr == psStream->ui32Size)
++ {
++ psStream->ui32RPtr = 0;
++ }
++ }
++
++ return(ui32Data);
++}
++
++void IMG_CALLCONV DBGDrivSetCaptureMode(PDBG_STREAM psStream,IMG_UINT32 ui32Mode,IMG_UINT32 ui32Start,IMG_UINT32 ui32End,IMG_UINT32 ui32SampleRate)
++{
++
++
++ if (!StreamValid(psStream))
++ {
++ return;
++ }
++
++ psStream->ui32CapMode = ui32Mode;
++ psStream->ui32DefaultMode = ui32Mode;
++ psStream->ui32Start = ui32Start;
++ psStream->ui32End = ui32End;
++ psStream->ui32SampleRate = ui32SampleRate;
++
++
++
++ if (psStream->ui32CapMode & DEBUG_CAPMODE_HOTKEY)
++ {
++ ActivateHotKeys(psStream);
++ }
++}
++
++void IMG_CALLCONV DBGDrivSetOutputMode(PDBG_STREAM psStream,IMG_UINT32 ui32OutMode)
++{
++
++
++ if (!StreamValid(psStream))
++ {
++ return;
++ }
++
++ psStream->ui32OutMode = ui32OutMode;
++}
++
++void IMG_CALLCONV DBGDrivSetDebugLevel(PDBG_STREAM psStream,IMG_UINT32 ui32DebugLevel)
++{
++
++
++ if (!StreamValid(psStream))
++ {
++ return;
++ }
++
++ psStream->ui32DebugLevel = ui32DebugLevel;
++}
++
++void IMG_CALLCONV DBGDrivSetFrame(PDBG_STREAM psStream,IMG_UINT32 ui32Frame)
++{
++
++
++ if (!StreamValid(psStream))
++ {
++ return;
++ }
++
++ psStream->ui32Current = ui32Frame;
++
++ if ((ui32Frame >= psStream->ui32Start) &&
++ (ui32Frame <= psStream->ui32End) &&
++ (((ui32Frame - psStream->ui32Start) % psStream->ui32SampleRate) == 0))
++ {
++ psStream->ui32Flags |= DEBUG_FLAGS_ENABLESAMPLE;
++ }
++ else
++ {
++ psStream->ui32Flags &= ~DEBUG_FLAGS_ENABLESAMPLE;
++ }
++
++ if (g_bHotkeyMiddump)
++ {
++ if ((ui32Frame >= g_ui32HotkeyMiddumpStart) &&
++ (ui32Frame <= g_ui32HotkeyMiddumpEnd) &&
++ (((ui32Frame - g_ui32HotkeyMiddumpStart) % psStream->ui32SampleRate) == 0))
++ {
++ psStream->ui32Flags |= DEBUG_FLAGS_ENABLESAMPLE;
++ }
++ else
++ {
++ psStream->ui32Flags &= ~DEBUG_FLAGS_ENABLESAMPLE;
++ if (psStream->ui32Current > g_ui32HotkeyMiddumpEnd)
++ {
++ g_bHotkeyMiddump = IMG_FALSE;
++ }
++ }
++ }
++
++
++ if (g_bHotKeyRegistered)
++ {
++ g_bHotKeyRegistered = IMG_FALSE;
++
++ PVR_DPF((PVR_DBG_MESSAGE,"Hotkey pressed (%08x)!\n",psStream));
++
++ if (!g_bHotKeyPressed)
++ {
++
++
++ g_ui32HotKeyFrame = psStream->ui32Current + 2;
++
++
++
++ g_bHotKeyPressed = IMG_TRUE;
++ }
++
++
++
++ if (((psStream->ui32CapMode & DEBUG_CAPMODE_FRAMED) != 0) &&
++ ((psStream->ui32CapMode & DEBUG_CAPMODE_HOTKEY) != 0))
++ {
++ if (!g_bHotkeyMiddump)
++ {
++
++ g_ui32HotkeyMiddumpStart = g_ui32HotKeyFrame + 1;
++ g_ui32HotkeyMiddumpEnd = 0xffffffff;
++ g_bHotkeyMiddump = IMG_TRUE;
++ PVR_DPF((PVR_DBG_MESSAGE,"Sampling every %d frame(s)\n", psStream->ui32SampleRate));
++ }
++ else
++ {
++
++ g_ui32HotkeyMiddumpEnd = g_ui32HotKeyFrame;
++ PVR_DPF((PVR_DBG_MESSAGE,"Turning off sampling\n"));
++ }
++ }
++
++ }
++
++
++
++ if (psStream->ui32Current > g_ui32HotKeyFrame)
++ {
++ g_bHotKeyPressed = IMG_FALSE;
++ }
++}
++
++IMG_UINT32 IMG_CALLCONV DBGDrivGetFrame(PDBG_STREAM psStream)
++{
++
++
++ if (!StreamValid(psStream))
++ {
++ return(0);
++ }
++
++ return(psStream->ui32Current);
++}
++
++IMG_BOOL IMG_CALLCONV DBGDrivIsLastCaptureFrame(PDBG_STREAM psStream)
++{
++ IMG_UINT32 ui32NextFrame;
++
++
++
++ if (!StreamValid(psStream))
++ {
++ return IMG_FALSE;
++ }
++
++ if (psStream->ui32CapMode & DEBUG_CAPMODE_FRAMED)
++ {
++ ui32NextFrame = psStream->ui32Current + psStream->ui32SampleRate;
++ if (ui32NextFrame > psStream->ui32End)
++ {
++ return IMG_TRUE;
++ }
++ }
++ return IMG_FALSE;
++}
++
++IMG_BOOL IMG_CALLCONV DBGDrivIsCaptureFrame(PDBG_STREAM psStream, IMG_BOOL bCheckPreviousFrame)
++{
++ IMG_UINT32 ui32FrameShift = bCheckPreviousFrame ? 1UL : 0UL;
++
++
++
++ if (!StreamValid(psStream))
++ {
++ return IMG_FALSE;
++ }
++
++ if (psStream->ui32CapMode & DEBUG_CAPMODE_FRAMED)
++ {
++
++ if (g_bHotkeyMiddump)
++ {
++ if ((psStream->ui32Current >= (g_ui32HotkeyMiddumpStart - ui32FrameShift)) &&
++ (psStream->ui32Current <= (g_ui32HotkeyMiddumpEnd - ui32FrameShift)) &&
++ ((((psStream->ui32Current + ui32FrameShift) - g_ui32HotkeyMiddumpStart) % psStream->ui32SampleRate) == 0))
++ {
++ return IMG_TRUE;
++ }
++ }
++ else
++ {
++ if ((psStream->ui32Current >= (psStream->ui32Start - ui32FrameShift)) &&
++ (psStream->ui32Current <= (psStream->ui32End - ui32FrameShift)) &&
++ ((((psStream->ui32Current + ui32FrameShift) - psStream->ui32Start) % psStream->ui32SampleRate) == 0))
++ {
++ return IMG_TRUE;
++ }
++ }
++ }
++ else if (psStream->ui32CapMode == DEBUG_CAPMODE_HOTKEY)
++ {
++ if ((psStream->ui32Current == (g_ui32HotKeyFrame-ui32FrameShift)) && (g_bHotKeyPressed))
++ {
++ return IMG_TRUE;
++ }
++ }
++ return IMG_FALSE;
++}
++
++void IMG_CALLCONV DBGDrivOverrideMode(PDBG_STREAM psStream,IMG_UINT32 ui32Mode)
++{
++
++
++ if (!StreamValid(psStream))
++ {
++ return;
++ }
++
++ psStream->ui32CapMode = ui32Mode;
++}
++
++void IMG_CALLCONV DBGDrivDefaultMode(PDBG_STREAM psStream)
++{
++
++
++ if (!StreamValid(psStream))
++ {
++ return;
++ }
++
++ psStream->ui32CapMode = psStream->ui32DefaultMode;
++}
++
++void IMG_CALLCONV DBGDrivSetMarker(PDBG_STREAM psStream, IMG_UINT32 ui32Marker)
++{
++
++
++ if (!StreamValid(psStream))
++ {
++ return;
++ }
++
++ psStream->ui32Marker = ui32Marker;
++}
++
++IMG_UINT32 IMG_CALLCONV DBGDrivGetMarker(PDBG_STREAM psStream)
++{
++
++
++ if (!StreamValid(psStream))
++ {
++ return 0;
++ }
++
++ return psStream->ui32Marker;
++}
++
++
++IMG_UINT32 IMG_CALLCONV DBGDrivGetStreamOffset(PDBG_STREAM psMainStream)
++{
++ PDBG_STREAM psStream;
++
++
++
++ if (!StreamValid(psMainStream))
++ {
++ return 0;
++ }
++
++ if(psMainStream->bInitPhaseComplete)
++ {
++ psStream = psMainStream;
++ }
++ else
++ {
++ psStream = psMainStream->psInitStream;
++ }
++
++ return psStream->ui32DataWritten;
++}
++
++IMG_VOID IMG_CALLCONV DBGDrivSetStreamOffset(PDBG_STREAM psMainStream, IMG_UINT32 ui32StreamOffset)
++{
++ PDBG_STREAM psStream;
++
++
++
++ if (!StreamValid(psMainStream))
++ {
++ return;
++ }
++
++ if(psMainStream->bInitPhaseComplete)
++ {
++ psStream = psMainStream;
++ }
++ else
++ {
++ psStream = psMainStream->psInitStream;
++ }
++
++ psStream->ui32DataWritten = ui32StreamOffset;
++}
++
++IMG_UINT32 IMG_CALLCONV DBGDrivGetServiceTable(void)
++{
++ return((IMG_UINT32) &g_sDBGKMServices);
++}
++
++IMG_UINT32 IMG_CALLCONV DBGDrivWriteLF(PDBG_STREAM psStream, IMG_UINT8 * pui8InBuf, IMG_UINT32 ui32InBuffSize, IMG_UINT32 ui32Level, IMG_UINT32 ui32Flags)
++{
++ PDBG_LASTFRAME_BUFFER psLFBuffer;
++
++
++
++ if (!StreamValid(psStream))
++ {
++ return(0xFFFFFFFFUL);
++ }
++
++
++
++ if ((psStream->ui32DebugLevel & ui32Level) == 0)
++ {
++ return(0xFFFFFFFFUL);
++ }
++
++
++
++ if ((psStream->ui32CapMode & DEBUG_CAPMODE_FRAMED) != 0)
++ {
++ if ((psStream->ui32Flags & DEBUG_FLAGS_ENABLESAMPLE) == 0)
++ {
++ return(0xFFFFFFFFUL);
++ }
++ }
++ else if (psStream->ui32CapMode == DEBUG_CAPMODE_HOTKEY)
++ {
++ if ((psStream->ui32Current != g_ui32HotKeyFrame) || (g_bHotKeyPressed == IMG_FALSE))
++ return(0xFFFFFFFFUL);
++ }
++
++ psLFBuffer = FindLFBuf(psStream);
++
++ if (ui32Flags & WRITELF_FLAGS_RESETBUF)
++ {
++
++
++ ui32InBuffSize = (ui32InBuffSize > LAST_FRAME_BUF_SIZE) ? LAST_FRAME_BUF_SIZE : ui32InBuffSize;
++ HostMemCopy((IMG_VOID *)psLFBuffer->ui8Buffer, (IMG_VOID *)pui8InBuf, ui32InBuffSize);
++ psLFBuffer->ui32BufLen = ui32InBuffSize;
++ }
++ else
++ {
++
++
++ ui32InBuffSize = ((psLFBuffer->ui32BufLen + ui32InBuffSize) > LAST_FRAME_BUF_SIZE) ? (LAST_FRAME_BUF_SIZE - psLFBuffer->ui32BufLen) : ui32InBuffSize;
++ HostMemCopy((IMG_VOID *)(&psLFBuffer->ui8Buffer[psLFBuffer->ui32BufLen]), (IMG_VOID *)pui8InBuf, ui32InBuffSize);
++ psLFBuffer->ui32BufLen += ui32InBuffSize;
++ }
++
++ return(ui32InBuffSize);
++}
++
++IMG_UINT32 IMG_CALLCONV DBGDrivReadLF(PDBG_STREAM psStream, IMG_UINT32 ui32OutBuffSize, IMG_UINT8 * pui8OutBuf)
++{
++ PDBG_LASTFRAME_BUFFER psLFBuffer;
++ IMG_UINT32 ui32Data;
++
++
++
++ if (!StreamValid(psStream))
++ {
++ return(0);
++ }
++
++ psLFBuffer = FindLFBuf(psStream);
++
++
++
++ ui32Data = (ui32OutBuffSize < psLFBuffer->ui32BufLen) ? ui32OutBuffSize : psLFBuffer->ui32BufLen;
++
++
++
++ HostMemCopy((IMG_VOID *)pui8OutBuf, (IMG_VOID *)psLFBuffer->ui8Buffer, ui32Data);
++
++ return ui32Data;
++}
++
++IMG_VOID IMG_CALLCONV DBGDrivStartInitPhase(PDBG_STREAM psStream)
++{
++ psStream->bInitPhaseComplete = IMG_FALSE;
++}
++
++IMG_VOID IMG_CALLCONV DBGDrivStopInitPhase(PDBG_STREAM psStream)
++{
++ psStream->bInitPhaseComplete = IMG_TRUE;
++}
++
++#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
++IMG_VOID IMG_CALLCONV DBGDrivWaitForEvent(DBG_EVENT eEvent)
++{
++ HostWaitForEvent(eEvent);
++}
++#endif
++
++IMG_BOOL ExpandStreamBuffer(PDBG_STREAM psStream, IMG_UINT32 ui32NewSize)
++{
++ IMG_VOID * pvNewBuf;
++ IMG_UINT32 ui32NewSizeInPages;
++ IMG_UINT32 ui32NewWOffset;
++ IMG_UINT32 ui32SpaceInOldBuf;
++
++
++
++ if (psStream->ui32Size >= ui32NewSize)
++ {
++ return IMG_FALSE;
++ }
++
++
++
++ ui32SpaceInOldBuf = SpaceInStream(psStream);
++
++
++
++ ui32NewSizeInPages = ((ui32NewSize + 0xfffUL) & ~0xfffUL) / 4096UL;
++
++ if ((psStream->ui32Flags & DEBUG_FLAGS_USE_NONPAGED_MEM) != 0)
++ {
++ pvNewBuf = HostNonPageablePageAlloc(ui32NewSizeInPages);
++ }
++ else
++ {
++ pvNewBuf = HostPageablePageAlloc(ui32NewSizeInPages);
++ }
++
++ if (pvNewBuf == IMG_NULL)
++ {
++ return IMG_FALSE;
++ }
++
++
++
++
++ if (psStream->ui32RPtr <= psStream->ui32WPtr)
++ {
++
++
++ HostMemCopy((IMG_VOID *)pvNewBuf, (IMG_VOID *)(psStream->ui32Base + psStream->ui32RPtr), psStream->ui32WPtr - psStream->ui32RPtr);
++ }
++ else
++ {
++ IMG_UINT32 ui32FirstCopySize;
++
++
++
++ ui32FirstCopySize = psStream->ui32Size - psStream->ui32RPtr;
++
++ HostMemCopy((IMG_VOID *)pvNewBuf, (IMG_VOID *)(psStream->ui32Base + psStream->ui32RPtr), ui32FirstCopySize);
++
++
++
++ HostMemCopy((IMG_VOID *)((IMG_UINT32)pvNewBuf + ui32FirstCopySize), (IMG_VOID *)psStream->ui32Base, psStream->ui32WPtr);
++ }
++
++
++
++ ui32NewWOffset = psStream->ui32Size - ui32SpaceInOldBuf;
++
++
++
++ if ((psStream->ui32Flags & DEBUG_FLAGS_USE_NONPAGED_MEM) != 0)
++ {
++ HostNonPageablePageFree((IMG_VOID *)psStream->ui32Base);
++ }
++ else
++ {
++ HostPageablePageFree((IMG_VOID *)psStream->ui32Base);
++ }
++
++
++
++ psStream->ui32Base = (IMG_UINT32)pvNewBuf;
++ psStream->ui32RPtr = 0;
++ psStream->ui32WPtr = ui32NewWOffset;
++ psStream->ui32Size = ui32NewSizeInPages * 4096;
++
++ return IMG_TRUE;
++}
++
++IMG_UINT32 SpaceInStream(PDBG_STREAM psStream)
++{
++ IMG_UINT32 ui32Space;
++
++ if (psStream->ui32RPtr > psStream->ui32WPtr)
++ {
++ ui32Space = psStream->ui32RPtr - psStream->ui32WPtr;
++ }
++ else
++ {
++ ui32Space = psStream->ui32RPtr + (psStream->ui32Size - psStream->ui32WPtr);
++ }
++
++ return ui32Space;
++}
++
++
++void DestroyAllStreams(void)
++{
++ while (g_psStreamList != IMG_NULL)
++ {
++ DBGDrivDestroyStream(g_psStreamList);
++ }
++ return;
++}
++
++PDBG_LASTFRAME_BUFFER FindLFBuf(PDBG_STREAM psStream)
++{
++ PDBG_LASTFRAME_BUFFER psLFBuffer;
++
++ psLFBuffer = g_psLFBufferList;
++
++ while (psLFBuffer)
++ {
++ if (psLFBuffer->psStream == psStream)
++ {
++ break;
++ }
++
++ psLFBuffer = psLFBuffer->psNext;
++ }
++
++ return psLFBuffer;
++}
++
+diff --git a/drivers/gpu/drm/mrst/pvr/tools/intern/debug/dbgdriv/common/dbgdriv.h b/drivers/gpu/drm/mrst/pvr/tools/intern/debug/dbgdriv/common/dbgdriv.h
+new file mode 100644
+index 0000000..1c9b1c5
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/tools/intern/debug/dbgdriv/common/dbgdriv.h
+@@ -0,0 +1,116 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _DBGDRIV_
++#define _DBGDRIV_
++
++#define BUFFER_SIZE 64*PAGESIZE
++
++#define DBGDRIV_VERSION 0x100
++#define MAX_PROCESSES 2
++#define BLOCK_USED 0x01
++#define BLOCK_LOCKED 0x02
++#define DBGDRIV_MONOBASE 0x000B0000
++
++
++extern IMG_VOID * g_pvAPIMutex;
++
++IMG_VOID * IMG_CALLCONV DBGDrivCreateStream(IMG_CHAR * pszName,
++ IMG_UINT32 ui32CapMode,
++ IMG_UINT32 ui32OutMode,
++ IMG_UINT32 ui32Flags,
++ IMG_UINT32 ui32Pages);
++IMG_VOID IMG_CALLCONV DBGDrivDestroyStream(PDBG_STREAM psStream);
++IMG_VOID * IMG_CALLCONV DBGDrivFindStream(IMG_CHAR *pszName, IMG_BOOL bResetStream);
++IMG_UINT32 IMG_CALLCONV DBGDrivWriteString(PDBG_STREAM psStream, IMG_CHAR *pszString, IMG_UINT32 ui32Level);
++IMG_UINT32 IMG_CALLCONV DBGDrivReadString(PDBG_STREAM psStream,IMG_CHAR *pszString, IMG_UINT32 ui32Limit);
++IMG_UINT32 IMG_CALLCONV DBGDrivWrite(PDBG_STREAM psStream,IMG_UINT8 *pui8InBuf, IMG_UINT32 ui32InBuffSize, IMG_UINT32 ui32Level);
++IMG_UINT32 IMG_CALLCONV DBGDrivWrite2(PDBG_STREAM psStream, IMG_UINT8 *pui8InBuf, IMG_UINT32 ui32InBuffSize, IMG_UINT32 ui32Level);
++IMG_UINT32 IMG_CALLCONV DBGDrivRead(PDBG_STREAM psStream, IMG_BOOL bReadInitBuffer, IMG_UINT32 ui32OutBufferSize, IMG_UINT8 *pui8OutBuf);
++IMG_VOID IMG_CALLCONV DBGDrivSetCaptureMode(PDBG_STREAM psStream, IMG_UINT32 ui32Mode, IMG_UINT32 ui32Start, IMG_UINT32 ui32Stop, IMG_UINT32 ui32SampleRate);
++IMG_VOID IMG_CALLCONV DBGDrivSetOutputMode(PDBG_STREAM psStream, IMG_UINT32 ui32OutMode);
++IMG_VOID IMG_CALLCONV DBGDrivSetDebugLevel(PDBG_STREAM psStream, IMG_UINT32 ui32DebugLevel);
++IMG_VOID IMG_CALLCONV DBGDrivSetFrame(PDBG_STREAM psStream, IMG_UINT32 ui32Frame);
++IMG_UINT32 IMG_CALLCONV DBGDrivGetFrame(PDBG_STREAM psStream);
++IMG_VOID IMG_CALLCONV DBGDrivOverrideMode(PDBG_STREAM psStream, IMG_UINT32 ui32Mode);
++IMG_VOID IMG_CALLCONV DBGDrivDefaultMode(PDBG_STREAM psStream);
++IMG_UINT32 IMG_CALLCONV DBGDrivGetServiceTable(IMG_VOID);
++IMG_UINT32 IMG_CALLCONV DBGDrivWriteStringCM(PDBG_STREAM psStream, IMG_CHAR *pszString, IMG_UINT32 ui32Level);
++IMG_UINT32 IMG_CALLCONV DBGDrivWriteCM(PDBG_STREAM psStream,IMG_UINT8 *pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level);
++IMG_VOID IMG_CALLCONV DBGDrivSetMarker(PDBG_STREAM psStream, IMG_UINT32 ui32Marker);
++IMG_UINT32 IMG_CALLCONV DBGDrivGetMarker(PDBG_STREAM psStream);
++IMG_BOOL IMG_CALLCONV DBGDrivIsLastCaptureFrame(PDBG_STREAM psStream);
++IMG_BOOL IMG_CALLCONV DBGDrivIsCaptureFrame(PDBG_STREAM psStream, IMG_BOOL bCheckPreviousFrame);
++IMG_UINT32 IMG_CALLCONV DBGDrivWriteLF(PDBG_STREAM psStream, IMG_UINT8 *pui8InBuf, IMG_UINT32 ui32InBuffSize, IMG_UINT32 ui32Level, IMG_UINT32 ui32Flags);
++IMG_UINT32 IMG_CALLCONV DBGDrivReadLF(PDBG_STREAM psStream, IMG_UINT32 ui32OutBuffSize, IMG_UINT8 *pui8OutBuf);
++IMG_VOID IMG_CALLCONV DBGDrivStartInitPhase(PDBG_STREAM psStream);
++IMG_VOID IMG_CALLCONV DBGDrivStopInitPhase(PDBG_STREAM psStream);
++IMG_UINT32 IMG_CALLCONV DBGDrivGetStreamOffset(PDBG_STREAM psStream);
++IMG_VOID IMG_CALLCONV DBGDrivSetStreamOffset(PDBG_STREAM psStream, IMG_UINT32 ui32StreamOffset);
++IMG_VOID IMG_CALLCONV DBGDrivWaitForEvent(DBG_EVENT eEvent);
++
++IMG_VOID DestroyAllStreams(IMG_VOID);
++
++IMG_UINT32 AtoI(IMG_CHAR *szIn);
++
++IMG_VOID HostMemSet(IMG_VOID *pvDest, IMG_UINT8 ui8Value, IMG_UINT32 ui32Size);
++IMG_VOID HostMemCopy(IMG_VOID *pvDest, IMG_VOID *pvSrc, IMG_UINT32 ui32Size);
++IMG_BOOL StreamValid(PDBG_STREAM psStream);
++IMG_VOID Write(PDBG_STREAM psStream,IMG_UINT8 *pui8Data, IMG_UINT32 ui32InBuffSize);
++IMG_VOID MonoOut(IMG_CHAR *pszString, IMG_BOOL bNewLine);
++
++
++IMG_VOID * IMG_CALLCONV ExtDBGDrivCreateStream(IMG_CHAR *pszName, IMG_UINT32 ui32CapMode, IMG_UINT32 ui32OutMode, IMG_UINT32 ui32Flags, IMG_UINT32 ui32Size);
++IMG_VOID IMG_CALLCONV ExtDBGDrivDestroyStream(PDBG_STREAM psStream);
++IMG_VOID * IMG_CALLCONV ExtDBGDrivFindStream(IMG_CHAR *pszName, IMG_BOOL bResetStream);
++IMG_UINT32 IMG_CALLCONV ExtDBGDrivWriteString(PDBG_STREAM psStream,IMG_CHAR *pszString, IMG_UINT32 ui32Level);
++IMG_UINT32 IMG_CALLCONV ExtDBGDrivReadString(PDBG_STREAM psStream,IMG_CHAR *pszString, IMG_UINT32 ui32Limit);
++IMG_UINT32 IMG_CALLCONV ExtDBGDrivWrite(PDBG_STREAM psStream, IMG_UINT8 *pui8InBuf, IMG_UINT32 ui32InBuffSize, IMG_UINT32 ui32Level);
++IMG_UINT32 IMG_CALLCONV ExtDBGDrivRead(PDBG_STREAM psStream, IMG_BOOL bReadInitBuffer, IMG_UINT32 ui32OutBuffSize,IMG_UINT8 *pui8OutBuf);
++IMG_VOID IMG_CALLCONV ExtDBGDrivSetCaptureMode(PDBG_STREAM psStream, IMG_UINT32 ui32Mode, IMG_UINT32 ui32Start, IMG_UINT32 ui32End, IMG_UINT32 ui32SampleRate);
++IMG_VOID IMG_CALLCONV ExtDBGDrivSetOutputMode(PDBG_STREAM psStream, IMG_UINT32 ui32OutMode);
++IMG_VOID IMG_CALLCONV ExtDBGDrivSetDebugLevel(PDBG_STREAM psStream, IMG_UINT32 ui32DebugLevel);
++IMG_VOID IMG_CALLCONV ExtDBGDrivSetFrame(PDBG_STREAM psStream, IMG_UINT32 ui32Frame);
++IMG_UINT32 IMG_CALLCONV ExtDBGDrivGetFrame(PDBG_STREAM psStream);
++IMG_VOID IMG_CALLCONV ExtDBGDrivOverrideMode(PDBG_STREAM psStream, IMG_UINT32 ui32Mode);
++IMG_VOID IMG_CALLCONV ExtDBGDrivDefaultMode(PDBG_STREAM psStream);
++IMG_UINT32 IMG_CALLCONV ExtDBGDrivWrite2(PDBG_STREAM psStream,IMG_UINT8 *pui8InBuf, IMG_UINT32 ui32InBuffSize, IMG_UINT32 ui32Level);
++IMG_UINT32 IMG_CALLCONV ExtDBGDrivWriteStringCM(PDBG_STREAM psStream,IMG_CHAR * pszString,IMG_UINT32 ui32Level);
++IMG_UINT32 IMG_CALLCONV ExtDBGDrivWriteCM(PDBG_STREAM psStream,IMG_UINT8 *pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level);
++IMG_VOID IMG_CALLCONV ExtDBGDrivSetMarker(PDBG_STREAM psStream, IMG_UINT32 ui32Marker);
++IMG_UINT32 IMG_CALLCONV ExtDBGDrivGetMarker(PDBG_STREAM psStream);
++IMG_VOID IMG_CALLCONV ExtDBGDrivStartInitPhase(PDBG_STREAM psStream);
++IMG_VOID IMG_CALLCONV ExtDBGDrivStopInitPhase(PDBG_STREAM psStream);
++IMG_BOOL IMG_CALLCONV ExtDBGDrivIsLastCaptureFrame(PDBG_STREAM psStream);
++IMG_BOOL IMG_CALLCONV ExtDBGDrivIsCaptureFrame(PDBG_STREAM psStream, IMG_BOOL bCheckPreviousFrame);
++IMG_UINT32 IMG_CALLCONV ExtDBGDrivWriteLF(PDBG_STREAM psStream, IMG_UINT8 *pui8InBuf, IMG_UINT32 ui32InBuffSize, IMG_UINT32 ui32Level, IMG_UINT32 ui32Flags);
++IMG_UINT32 IMG_CALLCONV ExtDBGDrivReadLF(PDBG_STREAM psStream, IMG_UINT32 ui32OutBuffSize, IMG_UINT8 *pui8OutBuf);
++IMG_UINT32 IMG_CALLCONV ExtDBGDrivGetStreamOffset(PDBG_STREAM psStream);
++IMG_VOID IMG_CALLCONV ExtDBGDrivSetStreamOffset(PDBG_STREAM psStream, IMG_UINT32 ui32StreamOffset);
++IMG_VOID IMG_CALLCONV ExtDBGDrivWaitForEvent(DBG_EVENT eEvent);
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/tools/intern/debug/dbgdriv/common/hostfunc.h b/drivers/gpu/drm/mrst/pvr/tools/intern/debug/dbgdriv/common/hostfunc.h
+new file mode 100644
+index 0000000..3a29db6
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/tools/intern/debug/dbgdriv/common/hostfunc.h
+@@ -0,0 +1,58 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _HOSTFUNC_
++#define _HOSTFUNC_
++
++#define HOST_PAGESIZE (4096)
++#define DBG_MEMORY_INITIALIZER (0xe2)
++
++IMG_UINT32 HostReadRegistryDWORDFromString(IMG_CHAR *pcKey, IMG_CHAR *pcValueName, IMG_UINT32 *pui32Data);
++
++IMG_VOID * HostPageablePageAlloc(IMG_UINT32 ui32Pages);
++IMG_VOID HostPageablePageFree(IMG_VOID * pvBase);
++IMG_VOID * HostNonPageablePageAlloc(IMG_UINT32 ui32Pages);
++IMG_VOID HostNonPageablePageFree(IMG_VOID * pvBase);
++
++IMG_VOID * HostMapKrnBufIntoUser(IMG_VOID * pvKrnAddr, IMG_UINT32 ui32Size, IMG_VOID * *ppvMdl);
++IMG_VOID HostUnMapKrnBufFromUser(IMG_VOID * pvUserAddr, IMG_VOID * pvMdl, IMG_VOID * pvProcess);
++
++IMG_VOID HostCreateRegDeclStreams(IMG_VOID);
++
++IMG_VOID * HostCreateMutex(IMG_VOID);
++IMG_VOID HostAquireMutex(IMG_VOID * pvMutex);
++IMG_VOID HostReleaseMutex(IMG_VOID * pvMutex);
++IMG_VOID HostDestroyMutex(IMG_VOID * pvMutex);
++
++#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
++IMG_INT32 HostCreateEventObjects(IMG_VOID);
++IMG_VOID HostWaitForEvent(DBG_EVENT eEvent);
++IMG_VOID HostSignalEvent(DBG_EVENT eEvent);
++IMG_VOID HostDestroyEventObjects(IMG_VOID);
++#endif
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/tools/intern/debug/dbgdriv/common/hotkey.c b/drivers/gpu/drm/mrst/pvr/tools/intern/debug/dbgdriv/common/hotkey.c
+new file mode 100644
+index 0000000..1997ad0
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/tools/intern/debug/dbgdriv/common/hotkey.c
+@@ -0,0 +1,135 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++
++#if !defined(LINUX)
++#include <ntddk.h>
++#include <windef.h>
++#endif
++
++#include "img_types.h"
++#include "pvr_debug.h"
++#include "dbgdrvif.h"
++#include "dbgdriv.h"
++#include "hotkey.h"
++#include "hostfunc.h"
++
++
++
++
++
++IMG_UINT32 g_ui32HotKeyFrame = 0xFFFFFFFF;
++IMG_BOOL g_bHotKeyPressed = IMG_FALSE;
++IMG_BOOL g_bHotKeyRegistered = IMG_FALSE;
++
++PRIVATEHOTKEYDATA g_PrivateHotKeyData;
++
++
++IMG_VOID ReadInHotKeys(IMG_VOID)
++{
++ g_PrivateHotKeyData.ui32ScanCode = 0x58;
++ g_PrivateHotKeyData.ui32ShiftState = 0x0;
++
++
++
++#if 0
++ if (_RegOpenKey(HKEY_LOCAL_MACHINE,pszRegPath,&hKey) == ERROR_SUCCESS)
++ {
++
++
++ QueryReg(hKey,"ui32ScanCode",&g_PrivateHotKeyData.ui32ScanCode);
++ QueryReg(hKey,"ui32ShiftState",&g_PrivateHotKeyData.ui32ShiftState);
++ }
++#else
++ HostReadRegistryDWORDFromString("DEBUG\\Streams", "ui32ScanCode" , &g_PrivateHotKeyData.ui32ScanCode);
++ HostReadRegistryDWORDFromString("DEBUG\\Streams", "ui32ShiftState", &g_PrivateHotKeyData.ui32ShiftState);
++#endif
++}
++
++IMG_VOID RegisterKeyPressed(IMG_UINT32 dwui32ScanCode, PHOTKEYINFO pInfo)
++{
++ PDBG_STREAM psStream;
++
++ PVR_UNREFERENCED_PARAMETER(pInfo);
++
++ if (dwui32ScanCode == g_PrivateHotKeyData.ui32ScanCode)
++ {
++ PVR_DPF((PVR_DBG_MESSAGE,"PDUMP Hotkey pressed !\n"));
++
++ psStream = (PDBG_STREAM) g_PrivateHotKeyData.sHotKeyInfo.pvStream;
++
++ if (!g_bHotKeyPressed)
++ {
++
++
++ g_ui32HotKeyFrame = psStream->ui32Current + 2;
++
++
++
++ g_bHotKeyPressed = IMG_TRUE;
++ }
++ }
++}
++
++IMG_VOID ActivateHotKeys(PDBG_STREAM psStream)
++{
++
++
++ ReadInHotKeys();
++
++
++
++ if (!g_PrivateHotKeyData.sHotKeyInfo.hHotKey)
++ {
++ if (g_PrivateHotKeyData.ui32ScanCode != 0)
++ {
++ PVR_DPF((PVR_DBG_MESSAGE,"Activate HotKey for PDUMP.\n"));
++
++
++
++ g_PrivateHotKeyData.sHotKeyInfo.pvStream = psStream;
++
++ DefineHotKey(g_PrivateHotKeyData.ui32ScanCode, g_PrivateHotKeyData.ui32ShiftState, &g_PrivateHotKeyData.sHotKeyInfo);
++ }
++ else
++ {
++ g_PrivateHotKeyData.sHotKeyInfo.hHotKey = 0;
++ }
++ }
++}
++
++IMG_VOID DeactivateHotKeys(IMG_VOID)
++{
++ if (g_PrivateHotKeyData.sHotKeyInfo.hHotKey != 0)
++ {
++ PVR_DPF((PVR_DBG_MESSAGE,"Deactivate HotKey.\n"));
++
++ RemoveHotKey(g_PrivateHotKeyData.sHotKeyInfo.hHotKey);
++ g_PrivateHotKeyData.sHotKeyInfo.hHotKey = 0;
++ }
++}
++
++
+diff --git a/drivers/gpu/drm/mrst/pvr/tools/intern/debug/dbgdriv/common/hotkey.h b/drivers/gpu/drm/mrst/pvr/tools/intern/debug/dbgdriv/common/hotkey.h
+new file mode 100644
+index 0000000..d9c9458
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/tools/intern/debug/dbgdriv/common/hotkey.h
+@@ -0,0 +1,60 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _HOTKEY_
++#define _HOTKEY_
++
++
++typedef struct _hotkeyinfo
++{
++ IMG_UINT8 ui8ScanCode;
++ IMG_UINT8 ui8Type;
++ IMG_UINT8 ui8Flag;
++ IMG_UINT8 ui8Filler1;
++ IMG_UINT32 ui32ShiftState;
++ IMG_UINT32 ui32HotKeyProc;
++ IMG_VOID *pvStream;
++ IMG_UINT32 hHotKey;
++} HOTKEYINFO, *PHOTKEYINFO;
++
++typedef struct _privatehotkeydata
++{
++ IMG_UINT32 ui32ScanCode;
++ IMG_UINT32 ui32ShiftState;
++ HOTKEYINFO sHotKeyInfo;
++} PRIVATEHOTKEYDATA, *PPRIVATEHOTKEYDATA;
++
++
++IMG_VOID ReadInHotKeys (IMG_VOID);
++IMG_VOID ActivateHotKeys(PDBG_STREAM psStream);
++IMG_VOID DeactivateHotKeys(IMG_VOID);
++
++IMG_VOID RemoveHotKey (IMG_UINT32 hHotKey);
++IMG_VOID DefineHotKey (IMG_UINT32 ui32ScanCode, IMG_UINT32 ui32ShiftState, PHOTKEYINFO psInfo);
++IMG_VOID RegisterKeyPressed (IMG_UINT32 ui32ScanCode, PHOTKEYINFO psInfo);
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/tools/intern/debug/dbgdriv/common/ioctl.c b/drivers/gpu/drm/mrst/pvr/tools/intern/debug/dbgdriv/common/ioctl.c
+new file mode 100644
+index 0000000..a624635
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/tools/intern/debug/dbgdriv/common/ioctl.c
+@@ -0,0 +1,371 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++
++
++#ifdef LINUX
++#include <asm/uaccess.h>
++#endif
++
++#include "img_types.h"
++#include "dbgdrvif.h"
++#include "dbgdriv.h"
++#include "hotkey.h"
++
++
++IMG_UINT32 DBGDIOCDrivCreateStream(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
++{
++ PDBG_IN_CREATESTREAM psIn;
++ IMG_VOID * *ppvOut;
++ #ifdef LINUX
++ static IMG_CHAR name[32];
++ #endif
++
++ psIn = (PDBG_IN_CREATESTREAM) pvInBuffer;
++ ppvOut = (IMG_VOID * *) pvOutBuffer;
++
++ #ifdef LINUX
++
++ if(copy_from_user(name, psIn->pszName, 32) != 0)
++ {
++ return IMG_FALSE;
++ }
++
++ *ppvOut = ExtDBGDrivCreateStream(name, psIn->ui32CapMode, psIn->ui32OutMode, 0, psIn->ui32Pages);
++
++ #else
++ *ppvOut = ExtDBGDrivCreateStream(psIn->pszName, psIn->ui32CapMode, psIn->ui32OutMode, DEBUG_FLAGS_NO_BUF_EXPANDSION, psIn->ui32Pages);
++ #endif
++
++
++ return(IMG_TRUE);
++}
++
++IMG_UINT32 DBGDIOCDrivDestroyStream(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
++{
++ IMG_UINT32 * pStream;
++ PDBG_STREAM psStream;
++
++ pStream = (IMG_UINT32 *) pvInBuffer;
++ psStream = (PDBG_STREAM) *pStream;
++
++ PVR_UNREFERENCED_PARAMETER( pvOutBuffer);
++
++ ExtDBGDrivDestroyStream(psStream);
++
++ return(IMG_TRUE);
++}
++
++IMG_UINT32 DBGDIOCDrivGetStream(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
++{
++ PDBG_IN_FINDSTREAM psParams;
++ IMG_UINT32 * pui32Stream;
++
++ psParams = (PDBG_IN_FINDSTREAM)pvInBuffer;
++ pui32Stream = (IMG_UINT32 *)pvOutBuffer;
++
++ *pui32Stream = (IMG_UINT32)ExtDBGDrivFindStream(psParams->pszName, psParams->bResetStream);
++
++ return(IMG_TRUE);
++}
++
++IMG_UINT32 DBGDIOCDrivWriteString(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
++{
++ PDBG_IN_WRITESTRING psParams;
++ IMG_UINT32 * pui32OutLen;
++
++ psParams = (PDBG_IN_WRITESTRING) pvInBuffer;
++ pui32OutLen = (IMG_UINT32 *) pvOutBuffer;
++
++ *pui32OutLen = ExtDBGDrivWriteString((PDBG_STREAM) psParams->pvStream,psParams->pszString,psParams->ui32Level);
++
++ return(IMG_TRUE);
++}
++
++IMG_UINT32 DBGDIOCDrivWriteStringCM(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
++{
++ PDBG_IN_WRITESTRING psParams;
++ IMG_UINT32 * pui32OutLen;
++
++ psParams = (PDBG_IN_WRITESTRING) pvInBuffer;
++ pui32OutLen = (IMG_UINT32 *) pvOutBuffer;
++
++ *pui32OutLen = ExtDBGDrivWriteStringCM((PDBG_STREAM) psParams->pvStream,psParams->pszString,psParams->ui32Level);
++
++ return(IMG_TRUE);
++}
++
++IMG_UINT32 DBGDIOCDrivReadString(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
++{
++ IMG_UINT32 * pui32OutLen;
++ PDBG_IN_READSTRING psParams;
++
++ psParams = (PDBG_IN_READSTRING) pvInBuffer;
++ pui32OutLen = (IMG_UINT32 *) pvOutBuffer;
++
++ *pui32OutLen = ExtDBGDrivReadString(psParams->pvStream,psParams->pszString,psParams->ui32StringLen);
++
++ return(IMG_TRUE);
++}
++
++IMG_UINT32 DBGDIOCDrivWrite(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
++{
++ IMG_UINT32 * pui32BytesCopied;
++ PDBG_IN_WRITE psInParams;
++
++ psInParams = (PDBG_IN_WRITE) pvInBuffer;
++ pui32BytesCopied = (IMG_UINT32 *) pvOutBuffer;
++
++ *pui32BytesCopied = ExtDBGDrivWrite((PDBG_STREAM) psInParams->pvStream,psInParams->pui8InBuffer,psInParams->ui32TransferSize,psInParams->ui32Level);
++
++ return(IMG_TRUE);
++}
++
++IMG_UINT32 DBGDIOCDrivWrite2(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
++{
++ IMG_UINT32 * pui32BytesCopied;
++ PDBG_IN_WRITE psInParams;
++
++ psInParams = (PDBG_IN_WRITE) pvInBuffer;
++ pui32BytesCopied = (IMG_UINT32 *) pvOutBuffer;
++
++ *pui32BytesCopied = ExtDBGDrivWrite2((PDBG_STREAM) psInParams->pvStream,psInParams->pui8InBuffer,psInParams->ui32TransferSize,psInParams->ui32Level);
++
++ return(IMG_TRUE);
++}
++
++IMG_UINT32 DBGDIOCDrivWriteCM(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
++{
++ IMG_UINT32 * pui32BytesCopied;
++ PDBG_IN_WRITE psInParams;
++
++ psInParams = (PDBG_IN_WRITE) pvInBuffer;
++ pui32BytesCopied = (IMG_UINT32 *) pvOutBuffer;
++
++ *pui32BytesCopied = ExtDBGDrivWriteCM((PDBG_STREAM) psInParams->pvStream,psInParams->pui8InBuffer,psInParams->ui32TransferSize,psInParams->ui32Level);
++
++ return(IMG_TRUE);
++}
++
++IMG_UINT32 DBGDIOCDrivRead(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
++{
++ IMG_UINT32 * pui32BytesCopied;
++ PDBG_IN_READ psInParams;
++
++ psInParams = (PDBG_IN_READ) pvInBuffer;
++ pui32BytesCopied = (IMG_UINT32 *) pvOutBuffer;
++
++ *pui32BytesCopied = ExtDBGDrivRead((PDBG_STREAM) psInParams->pvStream,psInParams->bReadInitBuffer, psInParams->ui32OutBufferSize,psInParams->pui8OutBuffer);
++
++ return(IMG_TRUE);
++}
++
++IMG_UINT32 DBGDIOCDrivSetCaptureMode(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
++{
++ PDBG_IN_SETDEBUGMODE psParams;
++
++ psParams = (PDBG_IN_SETDEBUGMODE) pvInBuffer;
++ PVR_UNREFERENCED_PARAMETER(pvOutBuffer);
++
++ ExtDBGDrivSetCaptureMode((PDBG_STREAM) psParams->pvStream,
++ psParams->ui32Mode,
++ psParams->ui32Start,
++ psParams->ui32End,
++ psParams->ui32SampleRate);
++
++ return(IMG_TRUE);
++}
++
++IMG_UINT32 DBGDIOCDrivSetOutMode(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
++{
++ PDBG_IN_SETDEBUGOUTMODE psParams;
++
++ psParams = (PDBG_IN_SETDEBUGOUTMODE) pvInBuffer;
++ PVR_UNREFERENCED_PARAMETER(pvOutBuffer);
++
++ ExtDBGDrivSetOutputMode((PDBG_STREAM) psParams->pvStream,psParams->ui32Mode);
++
++ return(IMG_TRUE);
++}
++
++IMG_UINT32 DBGDIOCDrivSetDebugLevel(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
++{
++ PDBG_IN_SETDEBUGLEVEL psParams;
++
++ psParams = (PDBG_IN_SETDEBUGLEVEL) pvInBuffer;
++ PVR_UNREFERENCED_PARAMETER(pvOutBuffer);
++
++ ExtDBGDrivSetDebugLevel((PDBG_STREAM) psParams->pvStream,psParams->ui32Level);
++
++ return(IMG_TRUE);
++}
++
++IMG_UINT32 DBGDIOCDrivSetFrame(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
++{
++ PDBG_IN_SETFRAME psParams;
++
++ psParams = (PDBG_IN_SETFRAME) pvInBuffer;
++ PVR_UNREFERENCED_PARAMETER(pvOutBuffer);
++
++ ExtDBGDrivSetFrame((PDBG_STREAM) psParams->pvStream,psParams->ui32Frame);
++
++ return(IMG_TRUE);
++}
++
++IMG_UINT32 DBGDIOCDrivGetFrame(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
++{
++ IMG_UINT32 * pStream;
++ PDBG_STREAM psStream;
++ IMG_UINT32 * pui32Current;
++
++ pStream = (IMG_UINT32 *) pvInBuffer;
++ psStream = (PDBG_STREAM) *pStream;
++ pui32Current = (IMG_UINT32 *) pvOutBuffer;
++
++ *pui32Current = ExtDBGDrivGetFrame(psStream);
++
++ return(IMG_TRUE);
++}
++
++IMG_UINT32 DBGDIOCDrivIsCaptureFrame(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
++{
++ PDBG_IN_ISCAPTUREFRAME psParams;
++ IMG_UINT32 * pui32Current;
++
++ psParams = (PDBG_IN_ISCAPTUREFRAME) pvInBuffer;
++ pui32Current = (IMG_UINT32 *) pvOutBuffer;
++
++ *pui32Current = ExtDBGDrivIsCaptureFrame((PDBG_STREAM) psParams->pvStream, psParams->bCheckPreviousFrame);
++
++ return(IMG_TRUE);
++}
++
++IMG_UINT32 DBGDIOCDrivOverrideMode(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
++{
++ PDBG_IN_OVERRIDEMODE psParams;
++
++ psParams = (PDBG_IN_OVERRIDEMODE) pvInBuffer;
++ PVR_UNREFERENCED_PARAMETER( pvOutBuffer);
++
++ ExtDBGDrivOverrideMode((PDBG_STREAM) psParams->pvStream,psParams->ui32Mode);
++
++ return(IMG_TRUE);
++}
++
++IMG_UINT32 DBGDIOCDrivDefaultMode(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
++{
++ IMG_UINT32 * pStream;
++ PDBG_STREAM psStream;
++
++ pStream = (IMG_UINT32 *) pvInBuffer;
++ psStream = (PDBG_STREAM) *pStream;
++
++ PVR_UNREFERENCED_PARAMETER(pvOutBuffer);
++
++ ExtDBGDrivDefaultMode(psStream);
++
++ return(IMG_TRUE);
++}
++
++IMG_UINT32 DBGDIOCDrivSetMarker(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
++{
++ PDBG_IN_SETMARKER psParams;
++
++ psParams = (PDBG_IN_SETMARKER) pvInBuffer;
++ PVR_UNREFERENCED_PARAMETER(pvOutBuffer);
++
++ ExtDBGDrivSetMarker((PDBG_STREAM) psParams->pvStream, psParams->ui32Marker);
++
++ return(IMG_TRUE);
++}
++
++IMG_UINT32 DBGDIOCDrivGetMarker(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
++{
++ IMG_UINT32 * pStream;
++ PDBG_STREAM psStream;
++ IMG_UINT32 * pui32Current;
++
++ pStream = (IMG_UINT32 *) pvInBuffer;
++ psStream = (PDBG_STREAM) *pStream;
++ pui32Current = (IMG_UINT32 *) pvOutBuffer;
++
++ *pui32Current = ExtDBGDrivGetMarker(psStream);
++
++ return(IMG_TRUE);
++}
++
++IMG_UINT32 DBGDIOCDrivGetServiceTable(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
++{
++ IMG_UINT32 * pui32Out;
++
++ PVR_UNREFERENCED_PARAMETER(pvInBuffer);
++ pui32Out = (IMG_UINT32 *) pvOutBuffer;
++
++ *pui32Out = DBGDrivGetServiceTable();
++
++ return(IMG_TRUE);
++}
++
++IMG_UINT32 DBGDIOCDrivWriteLF(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
++{
++ PDBG_IN_WRITE_LF psInParams;
++ IMG_UINT32 * pui32BytesCopied;
++
++ psInParams = (PDBG_IN_WRITE_LF) pvInBuffer;
++ pui32BytesCopied = (IMG_UINT32 *) pvOutBuffer;
++
++ *pui32BytesCopied = ExtDBGDrivWriteLF(psInParams->pvStream,
++ psInParams->pui8InBuffer,
++ psInParams->ui32BufferSize,
++ psInParams->ui32Level,
++ psInParams->ui32Flags);
++
++ return IMG_TRUE;
++}
++
++IMG_UINT32 DBGDIOCDrivReadLF(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
++{
++ IMG_UINT32 * pui32BytesCopied;
++ PDBG_IN_READ psInParams;
++
++ psInParams = (PDBG_IN_READ) pvInBuffer;
++ pui32BytesCopied = (IMG_UINT32 *) pvOutBuffer;
++
++ *pui32BytesCopied = ExtDBGDrivReadLF((PDBG_STREAM) psInParams->pvStream,psInParams->ui32OutBufferSize,psInParams->pui8OutBuffer);
++
++ return(IMG_TRUE);
++}
++
++IMG_UINT32 DBGDIOCDrivWaitForEvent(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
++{
++ DBG_EVENT eEvent = (DBG_EVENT)(*(IMG_UINT32 *)pvInBuffer);
++
++ PVR_UNREFERENCED_PARAMETER(pvOutBuffer);
++
++ ExtDBGDrivWaitForEvent(eEvent);
++
++ return(IMG_TRUE);
++}
+diff --git a/drivers/gpu/drm/mrst/pvr/tools/intern/debug/dbgdriv/common/ioctl.h b/drivers/gpu/drm/mrst/pvr/tools/intern/debug/dbgdriv/common/ioctl.h
+new file mode 100644
+index 0000000..061be9a
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/tools/intern/debug/dbgdriv/common/ioctl.h
+@@ -0,0 +1,87 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _IOCTL_
++#define _IOCTL_
++
++
++IMG_UINT32 DBGDIOCDrivCreateStream(IMG_VOID *, IMG_VOID *);
++IMG_UINT32 DBGDIOCDrivDestroyStream(IMG_VOID *, IMG_VOID *);
++IMG_UINT32 DBGDIOCDrivGetStream(IMG_VOID *, IMG_VOID *);
++IMG_UINT32 DBGDIOCDrivWriteString(IMG_VOID *, IMG_VOID *);
++IMG_UINT32 DBGDIOCDrivReadString(IMG_VOID *, IMG_VOID *);
++IMG_UINT32 DBGDIOCDrivWrite(IMG_VOID *, IMG_VOID *);
++IMG_UINT32 DBGDIOCDrivWrite2(IMG_VOID *, IMG_VOID *);
++IMG_UINT32 DBGDIOCDrivRead(IMG_VOID *, IMG_VOID *);
++IMG_UINT32 DBGDIOCDrivSetCaptureMode(IMG_VOID *, IMG_VOID *);
++IMG_UINT32 DBGDIOCDrivSetOutMode(IMG_VOID *, IMG_VOID *);
++IMG_UINT32 DBGDIOCDrivSetDebugLevel(IMG_VOID *, IMG_VOID *);
++IMG_UINT32 DBGDIOCDrivSetFrame(IMG_VOID *, IMG_VOID *);
++IMG_UINT32 DBGDIOCDrivGetFrame(IMG_VOID *, IMG_VOID *);
++IMG_UINT32 DBGDIOCDrivOverrideMode(IMG_VOID *, IMG_VOID *);
++IMG_UINT32 DBGDIOCDrivDefaultMode(IMG_VOID *, IMG_VOID *);
++IMG_UINT32 DBGDIOCDrivGetServiceTable(IMG_VOID *, IMG_VOID *);
++IMG_UINT32 DBGDIOCDrivWriteStringCM(IMG_VOID *, IMG_VOID *);
++IMG_UINT32 DBGDIOCDrivWriteCM(IMG_VOID *, IMG_VOID *);
++IMG_UINT32 DBGDIOCDrivSetMarker(IMG_VOID *, IMG_VOID *);
++IMG_UINT32 DBGDIOCDrivGetMarker(IMG_VOID *, IMG_VOID *);
++IMG_UINT32 DBGDIOCDrivIsCaptureFrame(IMG_VOID *, IMG_VOID *);
++IMG_UINT32 DBGDIOCDrivWriteLF(IMG_VOID *, IMG_VOID *);
++IMG_UINT32 DBGDIOCDrivReadLF(IMG_VOID *, IMG_VOID *);
++IMG_UINT32 DBGDIOCDrivWaitForEvent(IMG_VOID*, IMG_VOID *);
++
++IMG_UINT32 (*g_DBGDrivProc[])(IMG_VOID *, IMG_VOID *) =
++{
++ DBGDIOCDrivCreateStream,
++ DBGDIOCDrivDestroyStream,
++ DBGDIOCDrivGetStream,
++ DBGDIOCDrivWriteString,
++ DBGDIOCDrivReadString,
++ DBGDIOCDrivWrite,
++ DBGDIOCDrivRead,
++ DBGDIOCDrivSetCaptureMode,
++ DBGDIOCDrivSetOutMode,
++ DBGDIOCDrivSetDebugLevel,
++ DBGDIOCDrivSetFrame,
++ DBGDIOCDrivGetFrame,
++ DBGDIOCDrivOverrideMode,
++ DBGDIOCDrivDefaultMode,
++ DBGDIOCDrivGetServiceTable,
++ DBGDIOCDrivWrite2,
++ DBGDIOCDrivWriteStringCM,
++ DBGDIOCDrivWriteCM,
++ DBGDIOCDrivSetMarker,
++ DBGDIOCDrivGetMarker,
++ DBGDIOCDrivIsCaptureFrame,
++ DBGDIOCDrivWriteLF,
++ DBGDIOCDrivReadLF,
++ DBGDIOCDrivWaitForEvent
++};
++
++#define MAX_DBGVXD_W32_API (sizeof(g_DBGDrivProc)/sizeof(IMG_UINT32))
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/tools/intern/debug/dbgdriv/linux/hostfunc.c b/drivers/gpu/drm/mrst/pvr/tools/intern/debug/dbgdriv/linux/hostfunc.c
+new file mode 100644
+index 0000000..3ccec84
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/tools/intern/debug/dbgdriv/linux/hostfunc.c
+@@ -0,0 +1,302 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include <linux/version.h>
++#include <linux/errno.h>
++#include <linux/module.h>
++#include <linux/fs.h>
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/string.h>
++#include <asm/page.h>
++#include <linux/vmalloc.h>
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
++#include <linux/mutex.h>
++#else
++#include <asm/semaphore.h>
++#endif
++#include <linux/hardirq.h>
++
++#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
++#include <linux/sched.h>
++#include <linux/wait.h>
++#include <linux/jiffies.h>
++#include <linux/delay.h>
++#endif
++
++#include "img_types.h"
++#include "pvr_debug.h"
++
++#include "dbgdrvif.h"
++#include "dbgdriv/common/hostfunc.h"
++
++#if !defined(SUPPORT_DRI_DRM)
++IMG_UINT32 gPVRDebugLevel = DBGPRIV_WARNING;
++
++#define PVR_STRING_TERMINATOR '\0'
++#define PVR_IS_FILE_SEPARATOR(character) ( ((character) == '\\') || ((character) == '/') )
++
++void PVRSRVDebugPrintf (
++ IMG_UINT32 ui32DebugLevel,
++ const IMG_CHAR* pszFileName,
++ IMG_UINT32 ui32Line,
++ const IMG_CHAR* pszFormat,
++ ...
++ )
++{
++ IMG_BOOL bTrace, bDebug;
++#if !defined(__sh__)
++ IMG_CHAR *pszLeafName;
++
++ pszLeafName = (char *)strrchr (pszFileName, '\\');
++
++ if (pszLeafName)
++ {
++ pszFileName = pszLeafName;
++ }
++#endif
++
++ bTrace = gPVRDebugLevel & ui32DebugLevel & DBGPRIV_CALLTRACE;
++ bDebug = ((gPVRDebugLevel & DBGPRIV_ALLLEVELS) >= ui32DebugLevel);
++
++ if (bTrace || bDebug)
++ {
++ va_list vaArgs;
++ static char szBuffer[256];
++
++ va_start (vaArgs, pszFormat);
++
++
++ if (bDebug)
++ {
++ switch(ui32DebugLevel)
++ {
++ case DBGPRIV_FATAL:
++ {
++ strncpy (szBuffer, "PVR_K:(Fatal): ", sizeof(szBuffer));
++ break;
++ }
++ case DBGPRIV_ERROR:
++ {
++ strncpy (szBuffer, "PVR_K:(Error): ", sizeof(szBuffer));
++ break;
++ }
++ case DBGPRIV_WARNING:
++ {
++ strncpy (szBuffer, "PVR_K:(Warning): ", sizeof(szBuffer));
++ break;
++ }
++ case DBGPRIV_MESSAGE:
++ {
++ strncpy (szBuffer, "PVR_K:(Message): ", sizeof(szBuffer));
++ break;
++ }
++ case DBGPRIV_VERBOSE:
++ {
++ strncpy (szBuffer, "PVR_K:(Verbose): ", sizeof(szBuffer));
++ break;
++ }
++ default:
++ {
++ strncpy (szBuffer, "PVR_K:(Unknown message level)", sizeof(szBuffer));
++ break;
++ }
++ }
++ }
++ else
++ {
++ strncpy (szBuffer, "PVR_K: ", sizeof(szBuffer));
++ }
++
++ vsnprintf (&szBuffer[strlen(szBuffer)], sizeof(szBuffer), pszFormat, vaArgs);
++
++
++
++ if (!bTrace)
++ {
++ snprintf (&szBuffer[strlen(szBuffer)], sizeof(szBuffer), " [%d, %s]", (int)ui32Line, pszFileName);
++ }
++
++ printk(KERN_INFO "%s\r\n", szBuffer);
++
++ va_end (vaArgs);
++ }
++}
++#endif
++
++IMG_VOID HostMemSet(IMG_VOID *pvDest, IMG_UINT8 ui8Value, IMG_UINT32 ui32Size)
++{
++ memset(pvDest, (int) ui8Value, (size_t) ui32Size);
++}
++
++IMG_VOID HostMemCopy(IMG_VOID *pvDst, IMG_VOID *pvSrc, IMG_UINT32 ui32Size)
++{
++#if defined(USE_UNOPTIMISED_MEMCPY)
++ unsigned char *src,*dst;
++ int i;
++
++ src=(unsigned char *)pvSrc;
++ dst=(unsigned char *)pvDst;
++ for(i=0;i<ui32Size;i++)
++ {
++ dst[i]=src[i];
++ }
++#else
++ memcpy(pvDst, pvSrc, ui32Size);
++#endif
++}
++
++IMG_UINT32 HostReadRegistryDWORDFromString(char *pcKey, char *pcValueName, IMG_UINT32 *pui32Data)
++{
++
++ return 0;
++}
++
++IMG_VOID * HostPageablePageAlloc(IMG_UINT32 ui32Pages)
++{
++ return (void*)vmalloc(ui32Pages * PAGE_SIZE);
++}
++
++IMG_VOID HostPageablePageFree(IMG_VOID * pvBase)
++{
++ vfree(pvBase);
++}
++
++IMG_VOID * HostNonPageablePageAlloc(IMG_UINT32 ui32Pages)
++{
++ return (void*)vmalloc(ui32Pages * PAGE_SIZE);
++}
++
++IMG_VOID HostNonPageablePageFree(IMG_VOID * pvBase)
++{
++ vfree(pvBase);
++}
++
++IMG_VOID * HostMapKrnBufIntoUser(IMG_VOID * pvKrnAddr, IMG_UINT32 ui32Size, IMG_VOID **ppvMdl)
++{
++
++ return IMG_NULL;
++}
++
++IMG_VOID HostUnMapKrnBufFromUser(IMG_VOID * pvUserAddr, IMG_VOID * pvMdl, IMG_VOID * pvProcess)
++{
++
++}
++
++IMG_VOID HostCreateRegDeclStreams(IMG_VOID)
++{
++
++}
++
++IMG_VOID * HostCreateMutex(IMG_VOID)
++{
++ struct semaphore *psSem;
++
++ psSem = kmalloc(sizeof(*psSem), GFP_KERNEL);
++ if (psSem)
++ {
++ init_MUTEX(psSem);
++ }
++
++ return psSem;
++}
++
++IMG_VOID HostAquireMutex(IMG_VOID * pvMutex)
++{
++ BUG_ON(in_interrupt());
++
++#if defined(PVR_DEBUG_DBGDRV_DETECT_HOST_MUTEX_COLLISIONS)
++ if (down_trylock((struct semaphore *)pvMutex))
++ {
++ printk(KERN_INFO "HostAquireMutex: Waiting for mutex\n");
++ down((struct semaphore *)pvMutex);
++ }
++#else
++ down((struct semaphore *)pvMutex);
++#endif
++}
++
++IMG_VOID HostReleaseMutex(IMG_VOID * pvMutex)
++{
++ up((struct semaphore *)pvMutex);
++}
++
++IMG_VOID HostDestroyMutex(IMG_VOID * pvMutex)
++{
++ if (pvMutex)
++ {
++ kfree(pvMutex);
++ }
++}
++
++#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
++
++#define EVENT_WAIT_TIMEOUT_MS 500
++#define EVENT_WAIT_TIMEOUT_JIFFIES (EVENT_WAIT_TIMEOUT_MS * HZ / 1000)
++
++static int iStreamData;
++static wait_queue_head_t sStreamDataEvent;
++
++IMG_INT32 HostCreateEventObjects(IMG_VOID)
++{
++ init_waitqueue_head(&sStreamDataEvent);
++
++ return 0;
++}
++
++IMG_VOID HostWaitForEvent(DBG_EVENT eEvent)
++{
++ switch(eEvent)
++ {
++ case DBG_EVENT_STREAM_DATA:
++
++ wait_event_interruptible_timeout(sStreamDataEvent, iStreamData != 0, EVENT_WAIT_TIMEOUT_JIFFIES);
++ iStreamData = 0;
++ break;
++ default:
++
++ msleep_interruptible(EVENT_WAIT_TIMEOUT_MS);
++ break;
++ }
++}
++
++IMG_VOID HostSignalEvent(DBG_EVENT eEvent)
++{
++ switch(eEvent)
++ {
++ case DBG_EVENT_STREAM_DATA:
++ iStreamData = 1;
++ wake_up_interruptible(&sStreamDataEvent);
++ break;
++ default:
++ break;
++ }
++}
++
++IMG_VOID HostDestroyEventObjects(IMG_VOID)
++{
++}
++#endif
+diff --git a/drivers/gpu/drm/mrst/pvr/tools/intern/debug/dbgdriv/linux/kbuild/Makefile b/drivers/gpu/drm/mrst/pvr/tools/intern/debug/dbgdriv/linux/kbuild/Makefile
+new file mode 100644
+index 0000000..5fb9b1e
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/tools/intern/debug/dbgdriv/linux/kbuild/Makefile
+@@ -0,0 +1,35 @@
++#
++# Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++#
++# This program is free software; you can redistribute it and/or modify it
++# under the terms and conditions of the GNU General Public License,
++# version 2, as published by the Free Software Foundation.
++#
++# This program is distributed in the hope it will be useful but, except
++# as otherwise stated in writing, without any warranty; without even the
++# implied warranty of merchantability or fitness for a particular purpose.
++# See the GNU General Public License for more details.
++#
++# You should have received a copy of the GNU General Public License along with
++# this program; if not, write to the Free Software Foundation, Inc.,
++# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++#
++# The full GNU General Public License is included in this distribution in
++# the file called "COPYING".
++#
++# Contact Information:
++# Imagination Technologies Ltd. <gpl-support@imgtec.com>
++# Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++#
++#
++#
++
++include $(EURASIAROOT)/eurasiacon/build/linux/kbuild/Makefile.kbuild_subdir_common
++
++MODULE = dbgdrv
++
++INCLUDES =
++
++SOURCES =
++
++include $(EURASIAROOT)/tools/intern/debug/dbgdriv/linux/makefile.linux.common
+diff --git a/drivers/gpu/drm/mrst/pvr/tools/intern/debug/dbgdriv/linux/main.c b/drivers/gpu/drm/mrst/pvr/tools/intern/debug/dbgdriv/linux/main.c
+new file mode 100644
+index 0000000..b57cc43
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/tools/intern/debug/dbgdriv/linux/main.c
+@@ -0,0 +1,298 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include <linux/errno.h>
++#include <linux/module.h>
++#include <linux/fs.h>
++#include <linux/kernel.h>
++#include <linux/kdev_t.h>
++#include <linux/pci.h>
++#include <linux/list.h>
++#include <linux/init.h>
++#include <linux/vmalloc.h>
++#include <linux/version.h>
++
++#if defined(LDM_PLATFORM) && !defined(SUPPORT_DRI_DRM)
++#include <linux/platform_device.h>
++#endif
++
++#if defined(LDM_PCI) && !defined(SUPPORT_DRI_DRM)
++#include <linux/pci.h>
++#endif
++
++#include <asm/uaccess.h>
++
++#if defined(SUPPORT_DRI_DRM)
++#include "drmP.h"
++#include "drm.h"
++#endif
++
++#include "img_types.h"
++#include "client/linuxsrv.h"
++#include "dbgdriv/common/ioctl.h"
++#include "dbgdrvif.h"
++#include "dbgdriv/common/dbgdriv.h"
++#include "dbgdriv/common/hostfunc.h"
++#include "pvr_debug.h"
++#include "pvrmodule.h"
++
++#if defined(SUPPORT_DRI_DRM)
++
++#include "pvr_drm_shared.h"
++#include "pvr_drm.h"
++
++#else
++
++#define DRVNAME "dbgdrv"
++MODULE_SUPPORTED_DEVICE(DRVNAME);
++
++#if (defined(LDM_PLATFORM) || defined(LDM_PCI)) && !defined(SUPPORT_DRI_DRM)
++static struct class *psDbgDrvClass;
++#endif
++
++static int AssignedMajorNumber = 0;
++
++long dbgdrv_ioctl(struct file *, unsigned int, unsigned long);
++
++static int dbgdrv_open(struct inode unref__ * pInode, struct file unref__ * pFile)
++{
++ return 0;
++}
++
++static int dbgdrv_release(struct inode unref__ * pInode, struct file unref__ * pFile)
++{
++ return 0;
++}
++
++static int dbgdrv_mmap(struct file* pFile, struct vm_area_struct* ps_vma)
++{
++ return 0;
++}
++
++static struct file_operations dbgdrv_fops = {
++ .owner = THIS_MODULE,
++ .unlocked_ioctl = dbgdrv_ioctl,
++ .open = dbgdrv_open,
++ .release = dbgdrv_release,
++ .mmap = dbgdrv_mmap,
++};
++
++#endif
++
++void DBGDrvGetServiceTable(void **fn_table)
++{
++ extern DBGKM_SERVICE_TABLE g_sDBGKMServices;
++
++ *fn_table = &g_sDBGKMServices;
++}
++
++#if defined(SUPPORT_DRI_DRM)
++void dbgdrv_cleanup(void)
++#else
++void cleanup_module(void)
++#endif
++{
++#if !defined(SUPPORT_DRI_DRM)
++#if defined(LDM_PLATFORM) || defined(LDM_PCI)
++ device_destroy(psDbgDrvClass, MKDEV(AssignedMajorNumber, 0));
++ class_destroy(psDbgDrvClass);
++#endif
++ unregister_chrdev(AssignedMajorNumber, DRVNAME);
++#endif
++#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
++ HostDestroyEventObjects();
++#endif
++ HostDestroyMutex(g_pvAPIMutex);
++ return;
++}
++
++#if defined(SUPPORT_DRI_DRM)
++IMG_INT dbgdrv_init(void)
++#else
++int init_module(void)
++#endif
++{
++#if (defined(LDM_PLATFORM) || defined(LDM_PCI)) && !defined(SUPPORT_DRI_DRM)
++ struct device *psDev;
++#endif
++
++#if !defined(SUPPORT_DRI_DRM)
++ int err = -EBUSY;
++#endif
++
++
++ if ((g_pvAPIMutex=HostCreateMutex()) == IMG_NULL)
++ {
++ return -ENOMEM;
++ }
++
++#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
++
++ (void) HostCreateEventObjects();
++#endif
++
++#if !defined(SUPPORT_DRI_DRM)
++ AssignedMajorNumber =
++ register_chrdev(AssignedMajorNumber, DRVNAME, &dbgdrv_fops);
++
++ if (AssignedMajorNumber <= 0)
++ {
++ PVR_DPF((PVR_DBG_ERROR," unable to get major\n"));
++ goto ErrDestroyEventObjects;
++ }
++
++#if defined(LDM_PLATFORM) || defined(LDM_PCI)
++
++ psDbgDrvClass = class_create(THIS_MODULE, DRVNAME);
++ if (IS_ERR(psDbgDrvClass))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: unable to create class (%ld)",
++ __func__, PTR_ERR(psDbgDrvClass)));
++ goto ErrUnregisterCharDev;
++ }
++
++ psDev = device_create(psDbgDrvClass, NULL, MKDEV(AssignedMajorNumber, 0),
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26))
++ NULL,
++#endif
++ DRVNAME);
++ if (IS_ERR(psDev))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: unable to create device (%ld)",
++ __func__, PTR_ERR(psDev)));
++ goto ErrDestroyClass;
++ }
++#endif
++#endif
++
++ return 0;
++
++#if !defined(SUPPORT_DRI_DRM)
++ErrDestroyEventObjects:
++#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
++ HostDestroyEventObjects();
++#endif
++#if defined(LDM_PLATFORM) || defined(LDM_PCI)
++ErrUnregisterCharDev:
++ unregister_chrdev(AssignedMajorNumber, DRVNAME);
++ErrDestroyClass:
++ class_destroy(psDbgDrvClass);
++#endif
++ return err;
++#endif
++}
++
++#if defined(SUPPORT_DRI_DRM)
++IMG_INT dbgdrv_ioctl(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile)
++#else
++long dbgdrv_ioctl(struct file *file, unsigned int ioctlCmd, unsigned long arg)
++#endif
++{
++ IOCTL_PACKAGE *pIP = (IOCTL_PACKAGE *) arg;
++ char *buffer, *in, *out;
++ unsigned int cmd;
++
++ if((pIP->ui32InBufferSize > (PAGE_SIZE >> 1) ) || (pIP->ui32OutBufferSize > (PAGE_SIZE >> 1)))
++ {
++ PVR_DPF((PVR_DBG_ERROR,"Sizes of the buffers are too large, cannot do ioctl\n"));
++ return -1;
++ }
++
++ buffer = (char *) HostPageablePageAlloc(1);
++ if(!buffer)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"Failed to allocate buffer, cannot do ioctl\n"));
++ return -EFAULT;
++ }
++
++ in = buffer;
++ out = buffer + (PAGE_SIZE >>1);
++
++ if(copy_from_user(in, pIP->pInBuffer, pIP->ui32InBufferSize) != 0)
++ {
++ goto init_failed;
++ }
++
++ cmd = ((pIP->ui32Cmd >> 2) & 0xFFF) - 0x801;
++
++ if(pIP->ui32Cmd == DEBUG_SERVICE_READ)
++ {
++ IMG_CHAR *ui8Tmp;
++ IMG_UINT32 *pui32BytesCopied = (IMG_UINT32 *)out;
++ DBG_IN_READ *psReadInParams = (DBG_IN_READ *)in;
++
++ ui8Tmp = vmalloc(psReadInParams->ui32OutBufferSize);
++
++ if(!ui8Tmp)
++ {
++ goto init_failed;
++ }
++
++ *pui32BytesCopied = ExtDBGDrivRead((DBG_STREAM *)psReadInParams->pvStream,
++ psReadInParams->bReadInitBuffer,
++ psReadInParams->ui32OutBufferSize,
++ ui8Tmp);
++
++ if(copy_to_user(psReadInParams->pui8OutBuffer,
++ ui8Tmp,
++ *pui32BytesCopied) != 0)
++ {
++ vfree(ui8Tmp);
++ goto init_failed;
++ }
++
++ vfree(ui8Tmp);
++ }
++ else
++ {
++ (g_DBGDrivProc[cmd])(in, out);
++ }
++
++ if(copy_to_user(pIP->pOutBuffer, out, pIP->ui32OutBufferSize) != 0)
++ {
++ goto init_failed;
++ }
++
++ HostPageablePageFree((IMG_VOID *)buffer);
++ return 0;
++
++init_failed:
++ HostPageablePageFree((IMG_VOID *)buffer);
++ return -EFAULT;
++}
++
++
++void RemoveHotKey(unsigned hHotKey)
++{
++
++}
++
++void DefineHotKey(unsigned ScanCode, unsigned ShiftState, void *pInfo)
++{
++
++}
++
++EXPORT_SYMBOL(DBGDrvGetServiceTable);
+diff --git a/drivers/gpu/drm/mrst/pvr/tools/intern/debug/dbgdriv/linux/makefile.linux.common b/drivers/gpu/drm/mrst/pvr/tools/intern/debug/dbgdriv/linux/makefile.linux.common
+new file mode 100644
+index 0000000..105197f
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/tools/intern/debug/dbgdriv/linux/makefile.linux.common
+@@ -0,0 +1,40 @@
++#
++# Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++#
++# This program is free software; you can redistribute it and/or modify it
++# under the terms and conditions of the GNU General Public License,
++# version 2, as published by the Free Software Foundation.
++#
++# This program is distributed in the hope it will be useful but, except
++# as otherwise stated in writing, without any warranty; without even the
++# implied warranty of merchantability or fitness for a particular purpose.
++# See the GNU General Public License for more details.
++#
++# You should have received a copy of the GNU General Public License along with
++# this program; if not, write to the Free Software Foundation, Inc.,
++# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++#
++# The full GNU General Public License is included in this distribution in
++# the file called "COPYING".
++#
++# Contact Information:
++# Imagination Technologies Ltd. <gpl-support@imgtec.com>
++# Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++#
++#
++#
++
++ifeq ($(SUPPORT_DRI_DRM),1)
++DBGDRV_SOURCES_ROOT = $(KBUILDROOT)/../tools/intern/debug/dbgdriv
++else
++DBGDRV_SOURCES_ROOT = ../..
++endif
++
++INCLUDES += -I$(EURASIAROOT)/include4 \
++ -I$(EURASIAROOT)/tools/intern/debug
++
++SOURCES += $(DBGDRV_SOURCES_ROOT)/linux/main.c \
++ $(DBGDRV_SOURCES_ROOT)/common/dbgdriv.c \
++ $(DBGDRV_SOURCES_ROOT)/common/ioctl.c \
++ $(DBGDRV_SOURCES_ROOT)/linux/hostfunc.c \
++ $(DBGDRV_SOURCES_ROOT)/common/hotkey.c
+diff --git a/include/drm/drmP.h b/include/drm/drmP.h
+index ffac157..e8673fd 100644
+--- a/include/drm/drmP.h
++++ b/include/drm/drmP.h
+@@ -1131,6 +1131,8 @@ extern int drm_init(struct drm_driver *driver);
+ extern void drm_exit(struct drm_driver *driver);
+ extern long drm_ioctl(struct file *filp,
+ unsigned int cmd, unsigned long arg);
++extern long drm_unlocked_ioctl(struct file *filp,
++ unsigned int cmd, unsigned long arg);
+ extern long drm_compat_ioctl(struct file *filp,
+ unsigned int cmd, unsigned long arg);
+ extern int drm_lastclose(struct drm_device *dev);
+@@ -1558,5 +1560,25 @@ static __inline void drm_free_large(void *ptr)
+ }
+ /*@}*/
+
++enum drm_global_types {
++ DRM_GLOBAL_TTM_MEM = 0,
++ DRM_GLOBAL_TTM_BO,
++ DRM_GLOBAL_TTM_OBJECT,
++ DRM_GLOBAL_NUM
++};
++
++struct drm_global_reference {
++ enum drm_global_types global_type;
++ size_t size;
++ void *object;
++ int (*init) (struct drm_global_reference *);
++ void (*release) (struct drm_global_reference *);
++};
++
++extern void drm_global_init(void);
++extern void drm_global_release(void);
++extern int drm_global_item_ref(struct drm_global_reference *ref);
++extern void drm_global_item_unref(struct drm_global_reference *ref);
++
+ #endif /* __KERNEL__ */
+ #endif
+diff --git a/include/drm/drm_mode.h b/include/drm/drm_mode.h
+index c5ba163..e107b17 100644
+--- a/include/drm/drm_mode.h
++++ b/include/drm/drm_mode.h
+@@ -124,6 +124,7 @@ struct drm_mode_crtc {
+ #define DRM_MODE_ENCODER_TMDS 2
+ #define DRM_MODE_ENCODER_LVDS 3
+ #define DRM_MODE_ENCODER_TVDAC 4
++#define DRM_MODE_ENCODER_MIPI 5
+
+ struct drm_mode_get_encoder {
+ __u32 encoder_id;
+@@ -161,6 +162,7 @@ struct drm_mode_get_encoder {
+ #define DRM_MODE_CONNECTOR_HDMIB 12
+ #define DRM_MODE_CONNECTOR_TV 13
+ #define DRM_MODE_CONNECTOR_eDP 14
++#define DRM_MODE_CONNECTOR_MIPI 15
+
+ struct drm_mode_get_connector {
+
+diff --git a/include/linux/backlight.h b/include/linux/backlight.h
+index 8c4f884..05ff433 100644
+--- a/include/linux/backlight.h
++++ b/include/linux/backlight.h
+@@ -92,6 +92,9 @@ struct backlight_device {
+ struct notifier_block fb_notif;
+
+ struct device dev;
++
++ /* Private Backlight Data */
++ void *priv;
+ };
+
+ static inline void backlight_update_status(struct backlight_device *bd)
+--
+1.6.2.5
+