summaryrefslogtreecommitdiff
path: root/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-nand-driver-2.0.patch
diff options
context:
space:
mode:
authorSaul Wold <Saul.Wold@intel.com>2010-09-24 15:36:24 -0700
committerSaul Wold <Saul.Wold@intel.com>2010-09-24 16:43:21 -0700
commit239a368d5715d8f5b7733f9400339c2350c49369 (patch)
tree2953f12b45e590d9e14b6f72f8e4ee7188e41508 /meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-nand-driver-2.0.patch
parentc5b9525263dac6844d152e40acf8cee4d27b60bc (diff)
downloadopenembedded-core-239a368d5715d8f5b7733f9400339c2350c49369.tar.gz
openembedded-core-239a368d5715d8f5b7733f9400339c2350c49369.tar.bz2
openembedded-core-239a368d5715d8f5b7733f9400339c2350c49369.tar.xz
openembedded-core-239a368d5715d8f5b7733f9400339c2350c49369.zip
netbook: Correct netbook build by moving netbook configuration from moblin to meta
Signed-off-by: Saul Wold <Saul.Wold@intel.com>
Diffstat (limited to 'meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-nand-driver-2.0.patch')
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-nand-driver-2.0.patch11841
1 files changed, 0 insertions, 11841 deletions
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-nand-driver-2.0.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-nand-driver-2.0.patch
deleted file mode 100644
index 418a38d2e..000000000
--- a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-nand-driver-2.0.patch
+++ /dev/null
@@ -1,11841 +0,0 @@
-From ccdae3998beb883307f0ea6aedcfa856f8714137 Mon Sep 17 00:00:00 2001
-From: Alan Olsen <alan.r.olsen@intel.com>
-Date: Mon, 26 Apr 2010 10:50:19 -0700
-Subject: [PATCH] Post Beta nand driver.
-
-Contains the fixes for nand corruption with the watchdog driver.
-
-New features compares to MRST NAND driver Post Alpah2 2.0:
-1. Enable CDMA feature of NAND controller
-
-How to use this driver:
-The same with before. That is, to enable this driver,
-you can set
-CONFIG_MRST_NAND=y
-CONFIG_MRST_NAND_HW=y
-
-Signed-off-by: Gao Yunpeng <yunpeng.gao@intel.com>
-
-Signed-off-by: Alan Olsen <alan.r.olsen@intel.com>
-
-Patch-mainline: 2.6.34?
----
- drivers/block/Kconfig | 2 +
- drivers/block/Makefile | 2 +
- drivers/block/spectra/Kconfig | 27 +
- drivers/block/spectra/Makefile | 7 +
- drivers/block/spectra/README | 29 +
- drivers/block/spectra/ffsdefs.h | 58 +
- drivers/block/spectra/ffsport.c | 847 ++++++
- drivers/block/spectra/ffsport.h | 84 +
- drivers/block/spectra/flash.c | 4731 +++++++++++++++++++++++++++++++
- drivers/block/spectra/flash.h | 198 ++
- drivers/block/spectra/lld.c | 258 ++
- drivers/block/spectra/lld.h | 111 +
- drivers/block/spectra/lld_cdma.c | 910 ++++++
- drivers/block/spectra/lld_cdma.h | 123 +
- drivers/block/spectra/lld_emu.c | 780 +++++
- drivers/block/spectra/lld_emu.h | 51 +
- drivers/block/spectra/lld_nand.c | 2601 +++++++++++++++++
- drivers/block/spectra/lld_nand.h | 131 +
- drivers/block/spectra/nand_regs.h | 619 ++++
- drivers/block/spectra/spectraswconfig.h | 81 +
- 20 files changed, 11650 insertions(+), 0 deletions(-)
- create mode 100644 drivers/block/spectra/Kconfig
- create mode 100644 drivers/block/spectra/Makefile
- create mode 100644 drivers/block/spectra/README
- create mode 100644 drivers/block/spectra/ffsdefs.h
- create mode 100644 drivers/block/spectra/ffsport.c
- create mode 100644 drivers/block/spectra/ffsport.h
- create mode 100644 drivers/block/spectra/flash.c
- create mode 100644 drivers/block/spectra/flash.h
- create mode 100644 drivers/block/spectra/lld.c
- create mode 100644 drivers/block/spectra/lld.h
- create mode 100644 drivers/block/spectra/lld_cdma.c
- create mode 100644 drivers/block/spectra/lld_cdma.h
- create mode 100644 drivers/block/spectra/lld_emu.c
- create mode 100644 drivers/block/spectra/lld_emu.h
- create mode 100644 drivers/block/spectra/lld_nand.c
- create mode 100644 drivers/block/spectra/lld_nand.h
- create mode 100644 drivers/block/spectra/nand_regs.h
- create mode 100644 drivers/block/spectra/spectraswconfig.h
-
-diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
-index 77bfce5..d62b95d 100644
---- a/drivers/block/Kconfig
-+++ b/drivers/block/Kconfig
-@@ -488,4 +488,6 @@ config BLK_DEV_HD
-
- If unsure, say N.
-
-+source "drivers/block/spectra/Kconfig"
-+
- endif # BLK_DEV
-diff --git a/drivers/block/Makefile b/drivers/block/Makefile
-index aff5ac9..568ba65 100644
---- a/drivers/block/Makefile
-+++ b/drivers/block/Makefile
-@@ -38,4 +38,6 @@ obj-$(CONFIG_BLK_DEV_HD) += hd.o
- obj-$(CONFIG_XEN_BLKDEV_FRONTEND) += xen-blkfront.o
- obj-$(CONFIG_BLK_DEV_DRBD) += drbd/
-
-+obj-$(CONFIG_MRST_NAND) += spectra/
-+
- swim_mod-objs := swim.o swim_asm.o
-diff --git a/drivers/block/spectra/Kconfig b/drivers/block/spectra/Kconfig
-new file mode 100644
-index 0000000..fbece10
---- /dev/null
-+++ b/drivers/block/spectra/Kconfig
-@@ -0,0 +1,27 @@
-+
-+menuconfig MRST_NAND
-+ tristate "Moorestown NAND Flash controller"
-+ depends on BLOCK
-+ default n
-+ ---help---
-+ Enable the driver for the NAND Flash controller in Intel Moorestown
-+ Platform
-+
-+choice
-+ prompt "Compile for"
-+ depends on MRST_NAND
-+ default MRST_NAND_HW
-+
-+config MRST_NAND_HW
-+ bool "Actual hardware mode"
-+ help
-+ Driver communicates with the actual hardware's register interface.
-+ in DMA mode.
-+
-+config MRST_NAND_EMU
-+ bool "RAM emulator testing"
-+ help
-+ Driver emulates Flash on a RAM buffer and / or disk file. Useful to test the behavior of FTL layer.
-+
-+endchoice
-+
-diff --git a/drivers/block/spectra/Makefile b/drivers/block/spectra/Makefile
-new file mode 100644
-index 0000000..261891c
---- /dev/null
-+++ b/drivers/block/spectra/Makefile
-@@ -0,0 +1,7 @@
-+#
-+# Makefile of Intel Moorestown NAND controller driver
-+#
-+
-+obj-$(CONFIG_MRST_NAND) += spectra.o
-+spectra-objs := ffsport.o flash.o lld.o lld_emu.o lld_nand.o lld_cdma.o
-+
-diff --git a/drivers/block/spectra/README b/drivers/block/spectra/README
-new file mode 100644
-index 0000000..ecba559
---- /dev/null
-+++ b/drivers/block/spectra/README
-@@ -0,0 +1,29 @@
-+This is a driver for NAND controller of Intel Moorestown platform.
-+
-+This driver is a standalone linux block device driver, it acts as if it's a normal hard disk.
-+It includes three layer:
-+ block layer interface - file ffsport.c
-+ Flash Translation Layer (FTL) - file flash.c (implement the NAND flash Translation Layer, includs address mapping, garbage collection, wear-leveling and so on)
-+ Low level layer - file lld_nand.c/lld_cdma.c/lld_emu.c (which implements actual controller hardware registers access)
-+
-+This driver can be build as modules or build-in.
-+
-+Dependency:
-+This driver has dependency on IA Firmware of Intel Moorestown platform.
-+It need the IA Firmware to create the block table for the first time.
-+And to validate this driver code without IA Firmware, you can change the
-+macro AUTO_FORMAT_FLASH from 0 to 1 in file spectraswconfig.h. Thus the
-+driver will erase the whole nand flash and create a new block table.
-+
-+TODO:
-+ - Enable Command DMA feature support
-+ - lower the memory footprint
-+ - Remove most of the unnecessary global variables
-+ - Change all the upcase variable / functions name to lowercase
-+ - Some other misc bugs
-+
-+Please send patches to:
-+ Greg Kroah-Hartman <gregkh@suse.de>
-+
-+And Cc to: Gao Yunpeng <yunpeng.gao@intel.com>
-+
-diff --git a/drivers/block/spectra/ffsdefs.h b/drivers/block/spectra/ffsdefs.h
-new file mode 100644
-index 0000000..a9e9cd2
---- /dev/null
-+++ b/drivers/block/spectra/ffsdefs.h
-@@ -0,0 +1,58 @@
-+/*
-+ * NAND Flash Controller Device Driver
-+ * Copyright (c) 2009, Intel Corporation and its suppliers.
-+ *
-+ * This program is free software; you can redistribute it and/or modify it
-+ * under the terms and conditions of the GNU General Public License,
-+ * version 2, as published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope it will be useful, but WITHOUT
-+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-+ * more details.
-+ *
-+ * You should have received a copy of the GNU General Public License along with
-+ * this program; if not, write to the Free Software Foundation, Inc.,
-+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-+ *
-+ */
-+
-+#ifndef _FFSDEFS_
-+#define _FFSDEFS_
-+
-+#define CLEAR 0 /*use this to clear a field instead of "fail"*/
-+#define SET 1 /*use this to set a field instead of "pass"*/
-+#define FAIL 1 /*failed flag*/
-+#define PASS 0 /*success flag*/
-+#define ERR -1 /*error flag*/
-+
-+#define ERASE_CMD 10
-+#define WRITE_MAIN_CMD 11
-+#define READ_MAIN_CMD 12
-+#define WRITE_SPARE_CMD 13
-+#define READ_SPARE_CMD 14
-+#define WRITE_MAIN_SPARE_CMD 15
-+#define READ_MAIN_SPARE_CMD 16
-+#define MEMCOPY_CMD 17
-+#define DUMMY_CMD 99
-+
-+#define EVENT_PASS 0x00
-+#define EVENT_CORRECTABLE_DATA_ERROR_FIXED 0x01
-+#define EVENT_UNCORRECTABLE_DATA_ERROR 0x02
-+#define EVENT_TIME_OUT 0x03
-+#define EVENT_PROGRAM_FAILURE 0x04
-+#define EVENT_ERASE_FAILURE 0x05
-+#define EVENT_MEMCOPY_FAILURE 0x06
-+#define EVENT_FAIL 0x07
-+
-+#define EVENT_NONE 0x22
-+#define EVENT_DMA_CMD_COMP 0x77
-+#define EVENT_ECC_TRANSACTION_DONE 0x88
-+#define EVENT_DMA_CMD_FAIL 0x99
-+
-+#define CMD_PASS 0
-+#define CMD_FAIL 1
-+#define CMD_ABORT 2
-+#define CMD_NOT_DONE 3
-+
-+#endif /* _FFSDEFS_ */
-diff --git a/drivers/block/spectra/ffsport.c b/drivers/block/spectra/ffsport.c
-new file mode 100644
-index 0000000..0b3d49d
---- /dev/null
-+++ b/drivers/block/spectra/ffsport.c
-@@ -0,0 +1,847 @@
-+/*
-+ * NAND Flash Controller Device Driver
-+ * Copyright (c) 2009, Intel Corporation and its suppliers.
-+ *
-+ * This program is free software; you can redistribute it and/or modify it
-+ * under the terms and conditions of the GNU General Public License,
-+ * version 2, as published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope it will be useful, but WITHOUT
-+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-+ * more details.
-+ *
-+ * You should have received a copy of the GNU General Public License along with
-+ * this program; if not, write to the Free Software Foundation, Inc.,
-+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-+ *
-+ */
-+
-+#include "ffsport.h"
-+#include "flash.h"
-+#include <linux/interrupt.h>
-+#include <linux/delay.h>
-+#include <linux/blkdev.h>
-+#include <linux/wait.h>
-+#include <linux/mutex.h>
-+#include <linux/kthread.h>
-+#include <linux/log2.h>
-+#include <linux/init.h>
-+
-+/**** Helper functions used for Div, Remainder operation on u64 ****/
-+
-+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-+* Function: GLOB_Calc_Used_Bits
-+* Inputs: Power of 2 number
-+* Outputs: Number of Used Bits
-+* 0, if the argument is 0
-+* Description: Calculate the number of bits used by a given power of 2 number
-+* Number can be upto 32 bit
-+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-+int GLOB_Calc_Used_Bits(u32 n)
-+{
-+ int tot_bits = 0;
-+
-+ if (n >= 1 << 16) {
-+ n >>= 16;
-+ tot_bits += 16;
-+ }
-+
-+ if (n >= 1 << 8) {
-+ n >>= 8;
-+ tot_bits += 8;
-+ }
-+
-+ if (n >= 1 << 4) {
-+ n >>= 4;
-+ tot_bits += 4;
-+ }
-+
-+ if (n >= 1 << 2) {
-+ n >>= 2;
-+ tot_bits += 2;
-+ }
-+
-+ if (n >= 1 << 1)
-+ tot_bits += 1;
-+
-+ return ((n == 0) ? (0) : tot_bits);
-+}
-+
-+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-+* Function: GLOB_u64_Div
-+* Inputs: Number of u64
-+* A power of 2 number as Division
-+* Outputs: Quotient of the Divisor operation
-+* Description: It divides the address by divisor by using bit shift operation
-+* (essentially without explicitely using "/").
-+* Divisor is a power of 2 number and Divided is of u64
-+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-+u64 GLOB_u64_Div(u64 addr, u32 divisor)
-+{
-+ return (u64)(addr >> GLOB_Calc_Used_Bits(divisor));
-+}
-+
-+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-+* Function: GLOB_u64_Remainder
-+* Inputs: Number of u64
-+* Divisor Type (1 -PageAddress, 2- BlockAddress)
-+* Outputs: Remainder of the Division operation
-+* Description: It calculates the remainder of a number (of u64) by
-+* divisor(power of 2 number ) by using bit shifting and multiply
-+* operation(essentially without explicitely using "/").
-+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-+u64 GLOB_u64_Remainder(u64 addr, u32 divisor_type)
-+{
-+ u64 result = 0;
-+
-+ if (divisor_type == 1) { /* Remainder -- Page */
-+ result = (addr >> DeviceInfo.nBitsInPageDataSize);
-+ result = result * DeviceInfo.wPageDataSize;
-+ } else if (divisor_type == 2) { /* Remainder -- Block */
-+ result = (addr >> DeviceInfo.nBitsInBlockDataSize);
-+ result = result * DeviceInfo.wBlockDataSize;
-+ }
-+
-+ result = addr - result;
-+
-+ return result;
-+}
-+
-+#define NUM_DEVICES 1
-+#define PARTITIONS 8
-+
-+#define GLOB_SBD_NAME "nd"
-+#define GLOB_SBD_IRQ_NUM (29)
-+#define GLOB_VERSION "driver version 20091110"
-+
-+#define GLOB_SBD_IOCTL_GC (0x7701)
-+#define GLOB_SBD_IOCTL_WL (0x7702)
-+#define GLOB_SBD_IOCTL_FORMAT (0x7703)
-+#define GLOB_SBD_IOCTL_ERASE_FLASH (0x7704)
-+#define GLOB_SBD_IOCTL_FLUSH_CACHE (0x7705)
-+#define GLOB_SBD_IOCTL_COPY_BLK_TABLE (0x7706)
-+#define GLOB_SBD_IOCTL_COPY_WEAR_LEVELING_TABLE (0x7707)
-+#define GLOB_SBD_IOCTL_GET_NAND_INFO (0x7708)
-+#define GLOB_SBD_IOCTL_WRITE_DATA (0x7709)
-+#define GLOB_SBD_IOCTL_READ_DATA (0x770A)
-+
-+static u32 reserved_mb_for_os_image = 0;
-+
-+int nand_debug_level;
-+module_param(nand_debug_level, int, 0644);
-+MODULE_PARM_DESC(nand_debug_level, "debug level value: 1-3");
-+
-+MODULE_LICENSE("GPL");
-+
-+struct spectra_nand_dev {
-+ struct pci_dev *dev;
-+ u64 size;
-+ u16 users;
-+ spinlock_t qlock;
-+ void __iomem *ioaddr; /* Mapped address */
-+ struct request_queue *queue;
-+ struct task_struct *thread;
-+ struct gendisk *gd;
-+ u8 *tmp_buf;
-+};
-+
-+
-+static int GLOB_SBD_majornum;
-+
-+static char *GLOB_version = GLOB_VERSION;
-+
-+static struct spectra_nand_dev nand_device[NUM_DEVICES];
-+
-+static struct mutex spectra_lock;
-+
-+static int res_blks_os = 1;
-+
-+struct spectra_indentfy_dev_tag IdentifyDeviceData;
-+
-+static int force_flush_cache(void)
-+{
-+ nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ if (ERR == GLOB_FTL_Flush_Cache()) {
-+ printk(KERN_ERR "Fail to Flush FTL Cache!\n");
-+ return -EFAULT;
-+ }
-+#if CMD_DMA
-+ if (glob_ftl_execute_cmds())
-+ return -EIO;
-+ else
-+ return 0;
-+#endif
-+ return 0;
-+}
-+
-+struct ioctl_rw_page_info {
-+ u8 *data;
-+ unsigned int page;
-+};
-+
-+static int ioctl_read_page_data(unsigned long arg)
-+{
-+ u8 *buf;
-+ struct ioctl_rw_page_info info;
-+ int result = PASS;
-+
-+ if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
-+ return -EFAULT;
-+
-+ buf = kmalloc(IdentifyDeviceData.PageDataSize, GFP_ATOMIC);
-+ if (!buf) {
-+ printk(KERN_ERR "ioctl_read_page_data: "
-+ "failed to allocate memory\n");
-+ return -ENOMEM;
-+ }
-+
-+ mutex_lock(&spectra_lock);
-+ result = GLOB_FTL_Page_Read(buf,
-+ (u64)info.page * IdentifyDeviceData.PageDataSize);
-+ mutex_unlock(&spectra_lock);
-+
-+ if (copy_to_user((void __user *)info.data, buf,
-+ IdentifyDeviceData.PageDataSize)) {
-+ printk(KERN_ERR "ioctl_read_page_data: "
-+ "failed to copy user data\n");
-+ kfree(buf);
-+ return -EFAULT;
-+ }
-+
-+ kfree(buf);
-+ return result;
-+}
-+
-+static int ioctl_write_page_data(unsigned long arg)
-+{
-+ u8 *buf;
-+ struct ioctl_rw_page_info info;
-+ int result = PASS;
-+
-+ if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
-+ return -EFAULT;
-+
-+ buf = kmalloc(IdentifyDeviceData.PageDataSize, GFP_ATOMIC);
-+ if (!buf) {
-+ printk(KERN_ERR "ioctl_write_page_data: "
-+ "failed to allocate memory\n");
-+ return -ENOMEM;
-+ }
-+
-+ if (copy_from_user(buf, (void __user *)info.data,
-+ IdentifyDeviceData.PageDataSize)) {
-+ printk(KERN_ERR "ioctl_write_page_data: "
-+ "failed to copy user data\n");
-+ kfree(buf);
-+ return -EFAULT;
-+ }
-+
-+ mutex_lock(&spectra_lock);
-+ result = GLOB_FTL_Page_Write(buf,
-+ (u64)info.page * IdentifyDeviceData.PageDataSize);
-+ mutex_unlock(&spectra_lock);
-+
-+ kfree(buf);
-+ return result;
-+}
-+
-+/* Return how many blocks should be reserved for bad block replacement */
-+static int get_res_blk_num_bad_blk(void)
-+{
-+ return IdentifyDeviceData.wDataBlockNum / 10;
-+}
-+
-+/* Return how many blocks should be reserved for OS image */
-+static int get_res_blk_num_os(void)
-+{
-+ u32 res_blks, blk_size;
-+
-+ blk_size = IdentifyDeviceData.PageDataSize *
-+ IdentifyDeviceData.PagesPerBlock;
-+
-+ res_blks = (reserved_mb_for_os_image * 1024 * 1024) / blk_size;
-+
-+ if ((res_blks < 1) || (res_blks >= IdentifyDeviceData.wDataBlockNum))
-+ res_blks = 1; /* Reserved 1 block for block table */
-+
-+ return res_blks;
-+}
-+
-+static void SBD_prepare_flush(struct request_queue *q, struct request *rq)
-+{
-+ rq->cmd_type = REQ_TYPE_LINUX_BLOCK;
-+ /* rq->timeout = 5 * HZ; */
-+ rq->cmd[0] = REQ_LB_OP_FLUSH;
-+}
-+
-+/* Transfer a full request. */
-+static int do_transfer(struct spectra_nand_dev *tr, struct request *req)
-+{
-+ u64 start_addr, addr;
-+ u32 logical_start_sect, hd_start_sect;
-+ u32 nsect, hd_sects;
-+ u32 rsect, tsect = 0;
-+ char *buf;
-+ u32 ratio = IdentifyDeviceData.PageDataSize >> 9;
-+
-+ start_addr = (u64)(blk_rq_pos(req)) << 9;
-+ /* Add a big enough offset to prevent the OS Image from
-+ * being accessed or damaged by file system */
-+ start_addr += IdentifyDeviceData.PageDataSize *
-+ IdentifyDeviceData.PagesPerBlock *
-+ res_blks_os;
-+
-+ if (req->cmd_type == REQ_TYPE_LINUX_BLOCK &&
-+ req->cmd[0] == REQ_LB_OP_FLUSH) {
-+ if (force_flush_cache()) /* Fail to flush cache */
-+ return -EIO;
-+ else
-+ return 0;
-+ }
-+
-+ if (!blk_fs_request(req))
-+ return -EIO;
-+
-+ if (blk_rq_pos(req) + blk_rq_cur_sectors(req) > get_capacity(tr->gd)) {
-+ printk(KERN_ERR "Spectra error: request over the NAND "
-+ "capacity!sector %d, current_nr_sectors %d, "
-+ "while capacity is %d\n",
-+ (int)blk_rq_pos(req),
-+ blk_rq_cur_sectors(req),
-+ (int)get_capacity(tr->gd));
-+ return -EIO;
-+ }
-+
-+ logical_start_sect = start_addr >> 9;
-+ hd_start_sect = logical_start_sect / ratio;
-+ rsect = logical_start_sect - hd_start_sect * ratio;
-+
-+ addr = (u64)hd_start_sect * ratio * 512;
-+ buf = req->buffer;
-+ nsect = blk_rq_cur_sectors(req);
-+
-+ if (rsect)
-+ tsect = (ratio - rsect) < nsect ? (ratio - rsect) : nsect;
-+
-+ switch (rq_data_dir(req)) {
-+ case READ:
-+ /* Read the first NAND page */
-+ if (rsect) {
-+ if (GLOB_FTL_Page_Read(tr->tmp_buf, addr)) {
-+ printk(KERN_ERR "Error in %s, Line %d\n",
-+ __FILE__, __LINE__);
-+ return -EIO;
-+ }
-+ memcpy(buf, tr->tmp_buf + (rsect << 9), tsect << 9);
-+ addr += IdentifyDeviceData.PageDataSize;
-+ buf += tsect << 9;
-+ nsect -= tsect;
-+ }
-+
-+ /* Read the other NAND pages */
-+ for (hd_sects = nsect / ratio; hd_sects > 0; hd_sects--) {
-+ if (GLOB_FTL_Page_Read(buf, addr)) {
-+ printk(KERN_ERR "Error in %s, Line %d\n",
-+ __FILE__, __LINE__);
-+ return -EIO;
-+ }
-+ addr += IdentifyDeviceData.PageDataSize;
-+ buf += IdentifyDeviceData.PageDataSize;
-+ }
-+
-+ /* Read the last NAND pages */
-+ if (nsect % ratio) {
-+ if (GLOB_FTL_Page_Read(tr->tmp_buf, addr)) {
-+ printk(KERN_ERR "Error in %s, Line %d\n",
-+ __FILE__, __LINE__);
-+ return -EIO;
-+ }
-+ memcpy(buf, tr->tmp_buf, (nsect % ratio) << 9);
-+ }
-+#if CMD_DMA
-+ if (glob_ftl_execute_cmds())
-+ return -EIO;
-+ else
-+ return 0;
-+#endif
-+ return 0;
-+
-+ case WRITE:
-+ /* Write the first NAND page */
-+ if (rsect) {
-+ if (GLOB_FTL_Page_Read(tr->tmp_buf, addr)) {
-+ printk(KERN_ERR "Error in %s, Line %d\n",
-+ __FILE__, __LINE__);
-+ return -EIO;
-+ }
-+ memcpy(tr->tmp_buf + (rsect << 9), buf, tsect << 9);
-+ if (GLOB_FTL_Page_Write(tr->tmp_buf, addr)) {
-+ printk(KERN_ERR "Error in %s, Line %d\n",
-+ __FILE__, __LINE__);
-+ return -EIO;
-+ }
-+ addr += IdentifyDeviceData.PageDataSize;
-+ buf += tsect << 9;
-+ nsect -= tsect;
-+ }
-+
-+ /* Write the other NAND pages */
-+ for (hd_sects = nsect / ratio; hd_sects > 0; hd_sects--) {
-+ if (GLOB_FTL_Page_Write(buf, addr)) {
-+ printk(KERN_ERR "Error in %s, Line %d\n",
-+ __FILE__, __LINE__);
-+ return -EIO;
-+ }
-+ addr += IdentifyDeviceData.PageDataSize;
-+ buf += IdentifyDeviceData.PageDataSize;
-+ }
-+
-+ /* Write the last NAND pages */
-+ if (nsect % ratio) {
-+ if (GLOB_FTL_Page_Read(tr->tmp_buf, addr)) {
-+ printk(KERN_ERR "Error in %s, Line %d\n",
-+ __FILE__, __LINE__);
-+ return -EIO;
-+ }
-+ memcpy(tr->tmp_buf, buf, (nsect % ratio) << 9);
-+ if (GLOB_FTL_Page_Write(tr->tmp_buf, addr)) {
-+ printk(KERN_ERR "Error in %s, Line %d\n",
-+ __FILE__, __LINE__);
-+ return -EIO;
-+ }
-+ }
-+#if CMD_DMA
-+ if (glob_ftl_execute_cmds())
-+ return -EIO;
-+ else
-+ return 0;
-+#endif
-+ return 0;
-+
-+ default:
-+ printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req));
-+ return -EIO;
-+ }
-+}
-+
-+/* This function is copied from drivers/mtd/mtd_blkdevs.c */
-+static int spectra_trans_thread(void *arg)
-+{
-+ struct spectra_nand_dev *tr = arg;
-+ struct request_queue *rq = tr->queue;
-+ struct request *req = NULL;
-+
-+ /* we might get involved when memory gets low, so use PF_MEMALLOC */
-+ current->flags |= PF_MEMALLOC;
-+
-+ spin_lock_irq(rq->queue_lock);
-+ while (!kthread_should_stop()) {
-+ int res;
-+
-+ if (!req) {
-+ req = blk_fetch_request(rq);
-+ if (!req) {
-+ set_current_state(TASK_INTERRUPTIBLE);
-+ spin_unlock_irq(rq->queue_lock);
-+ schedule();
-+ spin_lock_irq(rq->queue_lock);
-+ continue;
-+ }
-+ }
-+
-+ spin_unlock_irq(rq->queue_lock);
-+
-+ mutex_lock(&spectra_lock);
-+ res = do_transfer(tr, req);
-+ mutex_unlock(&spectra_lock);
-+
-+ spin_lock_irq(rq->queue_lock);
-+
-+ if (!__blk_end_request_cur(req, res))
-+ req = NULL;
-+ }
-+
-+ if (req)
-+ __blk_end_request_all(req, -EIO);
-+
-+ spin_unlock_irq(rq->queue_lock);
-+
-+ return 0;
-+}
-+
-+
-+/* Request function that "handles clustering". */
-+static void GLOB_SBD_request(struct request_queue *rq)
-+{
-+ struct spectra_nand_dev *pdev = rq->queuedata;
-+ wake_up_process(pdev->thread);
-+}
-+
-+static int GLOB_SBD_open(struct block_device *bdev, fmode_t mode)
-+
-+{
-+ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+ return 0;
-+}
-+
-+static int GLOB_SBD_release(struct gendisk *disk, fmode_t mode)
-+{
-+ int ret;
-+
-+ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ mutex_lock(&spectra_lock);
-+ ret = force_flush_cache();
-+ mutex_unlock(&spectra_lock);
-+
-+ return 0;
-+}
-+
-+static int GLOB_SBD_getgeo(struct block_device *bdev, struct hd_geometry *geo)
-+{
-+ geo->heads = 4;
-+ geo->sectors = 16;
-+ geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
-+
-+ nand_dbg_print(NAND_DBG_DEBUG,
-+ "heads: %d, sectors: %d, cylinders: %d\n",
-+ geo->heads, geo->sectors, geo->cylinders);
-+
-+ return 0;
-+}
-+
-+int GLOB_SBD_ioctl(struct block_device *bdev, fmode_t mode,
-+ unsigned int cmd, unsigned long arg)
-+{
-+ int ret;
-+
-+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ switch (cmd) {
-+ case GLOB_SBD_IOCTL_GC:
-+ nand_dbg_print(NAND_DBG_DEBUG,
-+ "Spectra IOCTL: Garbage Collection "
-+ "being performed\n");
-+ if (PASS != GLOB_FTL_Garbage_Collection())
-+ return -EFAULT;
-+ return 0;
-+
-+ case GLOB_SBD_IOCTL_WL:
-+ nand_dbg_print(NAND_DBG_DEBUG,
-+ "Spectra IOCTL: Static Wear Leveling "
-+ "being performed\n");
-+ if (PASS != GLOB_FTL_Wear_Leveling())
-+ return -EFAULT;
-+ return 0;
-+
-+ case GLOB_SBD_IOCTL_FORMAT:
-+ nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: Flash format "
-+ "being performed\n");
-+ if (PASS != GLOB_FTL_Flash_Format())
-+ return -EFAULT;
-+ return 0;
-+
-+ case GLOB_SBD_IOCTL_FLUSH_CACHE:
-+ nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: Cache flush "
-+ "being performed\n");
-+ mutex_lock(&spectra_lock);
-+ ret = force_flush_cache();
-+ mutex_unlock(&spectra_lock);
-+ return ret;
-+
-+ case GLOB_SBD_IOCTL_COPY_BLK_TABLE:
-+ nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: "
-+ "Copy block table\n");
-+ if (copy_to_user((void __user *)arg,
-+ get_blk_table_start_addr(),
-+ get_blk_table_len()))
-+ return -EFAULT;
-+ return 0;
-+
-+ case GLOB_SBD_IOCTL_COPY_WEAR_LEVELING_TABLE:
-+ nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: "
-+ "Copy wear leveling table\n");
-+ if (copy_to_user((void __user *)arg,
-+ get_wear_leveling_table_start_addr(),
-+ get_wear_leveling_table_len()))
-+ return -EFAULT;
-+ return 0;
-+
-+ case GLOB_SBD_IOCTL_GET_NAND_INFO:
-+ nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: "
-+ "Get NAND info\n");
-+ if (copy_to_user((void __user *)arg, &IdentifyDeviceData,
-+ sizeof(IdentifyDeviceData)))
-+ return -EFAULT;
-+ return 0;
-+
-+ case GLOB_SBD_IOCTL_WRITE_DATA:
-+ nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: "
-+ "Write one page data\n");
-+ return ioctl_write_page_data(arg);
-+
-+ case GLOB_SBD_IOCTL_READ_DATA:
-+ nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: "
-+ "Read one page data\n");
-+ return ioctl_read_page_data(arg);
-+ }
-+
-+ return -ENOTTY;
-+}
-+
-+static struct block_device_operations GLOB_SBD_ops = {
-+ .owner = THIS_MODULE,
-+ .open = GLOB_SBD_open,
-+ .release = GLOB_SBD_release,
-+ .locked_ioctl = GLOB_SBD_ioctl,
-+ .getgeo = GLOB_SBD_getgeo,
-+};
-+
-+static int SBD_setup_device(struct spectra_nand_dev *dev, int which)
-+{
-+ int res_blks;
-+ u32 sects;
-+
-+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ memset(dev, 0, sizeof(struct spectra_nand_dev));
-+
-+ nand_dbg_print(NAND_DBG_WARN, "Reserved %d blocks "
-+ "for OS image, %d blocks for bad block replacement.\n",
-+ get_res_blk_num_os(),
-+ get_res_blk_num_bad_blk());
-+
-+ res_blks = get_res_blk_num_bad_blk() + get_res_blk_num_os();
-+
-+ dev->size = (u64)IdentifyDeviceData.PageDataSize *
-+ IdentifyDeviceData.PagesPerBlock *
-+ (IdentifyDeviceData.wDataBlockNum - res_blks);
-+
-+ res_blks_os = get_res_blk_num_os();
-+
-+ spin_lock_init(&dev->qlock);
-+
-+ dev->tmp_buf = kmalloc(IdentifyDeviceData.PageDataSize, GFP_ATOMIC);
-+ if (!dev->tmp_buf) {
-+ printk(KERN_ERR "Failed to kmalloc memory in %s Line %d, exit.\n",
-+ __FILE__, __LINE__);
-+ goto out_vfree;
-+ }
-+
-+ dev->queue = blk_init_queue(GLOB_SBD_request, &dev->qlock);
-+ if (dev->queue == NULL) {
-+ printk(KERN_ERR
-+ "Spectra: Request queue could not be initialized."
-+ " Aborting\n ");
-+ goto out_vfree;
-+ }
-+ dev->queue->queuedata = dev;
-+
-+ /* As Linux block layer doens't support >4KB hardware sector, */
-+ /* Here we force report 512 byte hardware sector size to Kernel */
-+ blk_queue_logical_block_size(dev->queue, 512);
-+
-+ blk_queue_ordered(dev->queue, QUEUE_ORDERED_DRAIN_FLUSH,
-+ SBD_prepare_flush);
-+
-+ dev->thread = kthread_run(spectra_trans_thread, dev, "nand_thd");
-+ if (IS_ERR(dev->thread)) {
-+ blk_cleanup_queue(dev->queue);
-+ unregister_blkdev(GLOB_SBD_majornum, GLOB_SBD_NAME);
-+ return PTR_ERR(dev->thread);
-+ }
-+
-+ dev->gd = alloc_disk(PARTITIONS);
-+ if (!dev->gd) {
-+ printk(KERN_ERR
-+ "Spectra: Could not allocate disk. Aborting \n ");
-+ goto out_vfree;
-+ }
-+ dev->gd->major = GLOB_SBD_majornum;
-+ dev->gd->first_minor = which * PARTITIONS;
-+ dev->gd->fops = &GLOB_SBD_ops;
-+ dev->gd->queue = dev->queue;
-+ dev->gd->private_data = dev;
-+ snprintf(dev->gd->disk_name, 32, "%s%c", GLOB_SBD_NAME, which + 'a');
-+
-+ sects = dev->size >> 9;
-+ nand_dbg_print(NAND_DBG_WARN, "Capacity sects: %d\n", sects);
-+ set_capacity(dev->gd, sects);
-+
-+ add_disk(dev->gd);
-+
-+ return 0;
-+out_vfree:
-+ return -ENOMEM;
-+}
-+
-+/*
-+static ssize_t show_nand_block_num(struct device *dev,
-+ struct device_attribute *attr, char *buf)
-+{
-+ return snprintf(buf, PAGE_SIZE, "%d\n",
-+ (int)IdentifyDeviceData.wDataBlockNum);
-+}
-+
-+static ssize_t show_nand_pages_per_block(struct device *dev,
-+ struct device_attribute *attr, char *buf)
-+{
-+ return snprintf(buf, PAGE_SIZE, "%d\n",
-+ (int)IdentifyDeviceData.PagesPerBlock);
-+}
-+
-+static ssize_t show_nand_page_size(struct device *dev,
-+ struct device_attribute *attr, char *buf)
-+{
-+ return snprintf(buf, PAGE_SIZE, "%d\n",
-+ (int)IdentifyDeviceData.PageDataSize);
-+}
-+
-+static DEVICE_ATTR(nand_block_num, 0444, show_nand_block_num, NULL);
-+static DEVICE_ATTR(nand_pages_per_block, 0444, show_nand_pages_per_block, NULL);
-+static DEVICE_ATTR(nand_page_size, 0444, show_nand_page_size, NULL);
-+
-+static void create_sysfs_entry(struct device *dev)
-+{
-+ if (device_create_file(dev, &dev_attr_nand_block_num))
-+ printk(KERN_ERR "Spectra: "
-+ "failed to create sysfs entry nand_block_num.\n");
-+ if (device_create_file(dev, &dev_attr_nand_pages_per_block))
-+ printk(KERN_ERR "Spectra: "
-+ "failed to create sysfs entry nand_pages_per_block.\n");
-+ if (device_create_file(dev, &dev_attr_nand_page_size))
-+ printk(KERN_ERR "Spectra: "
-+ "failed to create sysfs entry nand_page_size.\n");
-+}
-+*/
-+
-+static int GLOB_SBD_init(void)
-+{
-+ int i;
-+
-+ /* Set debug output level (0~3) here. 3 is most verbose */
-+ nand_debug_level = 0;
-+
-+ printk(KERN_ALERT "Spectra: %s\n", GLOB_version);
-+
-+ mutex_init(&spectra_lock);
-+
-+ GLOB_SBD_majornum = register_blkdev(0, GLOB_SBD_NAME);
-+ if (GLOB_SBD_majornum <= 0) {
-+ printk(KERN_ERR "Unable to get the major %d for Spectra",
-+ GLOB_SBD_majornum);
-+ return -EBUSY;
-+ }
-+
-+ if (PASS != GLOB_FTL_Flash_Init()) {
-+ printk(KERN_ERR "Spectra: Unable to Initialize Flash Device. "
-+ "Aborting\n");
-+ goto out_flash_register;
-+ }
-+
-+ /* create_sysfs_entry(&dev->dev); */
-+
-+ if (PASS != GLOB_FTL_IdentifyDevice(&IdentifyDeviceData)) {
-+ printk(KERN_ERR "Spectra: Unable to Read Flash Device. "
-+ "Aborting\n");
-+ goto out_flash_register;
-+ } else {
-+ nand_dbg_print(NAND_DBG_WARN, "In GLOB_SBD_init: "
-+ "Num blocks=%d, pagesperblock=%d, "
-+ "pagedatasize=%d, ECCBytesPerSector=%d\n",
-+ (int)IdentifyDeviceData.NumBlocks,
-+ (int)IdentifyDeviceData.PagesPerBlock,
-+ (int)IdentifyDeviceData.PageDataSize,
-+ (int)IdentifyDeviceData.wECCBytesPerSector);
-+ }
-+
-+ printk(KERN_ALERT "Spectra: searching block table, please wait ...\n");
-+ if (GLOB_FTL_Init() != PASS) {
-+ printk(KERN_ERR "Spectra: Unable to Initialize FTL Layer. "
-+ "Aborting\n");
-+ goto out_ftl_flash_register;
-+ }
-+ printk(KERN_ALERT "Spectra: block table has been found.\n");
-+
-+ for (i = 0; i < NUM_DEVICES; i++)
-+ if (SBD_setup_device(&nand_device[i], i) == -ENOMEM)
-+ goto out_ftl_flash_register;
-+
-+ nand_dbg_print(NAND_DBG_DEBUG,
-+ "Spectra: module loaded with major number %d\n",
-+ GLOB_SBD_majornum);
-+
-+ return 0;
-+
-+out_ftl_flash_register:
-+ GLOB_FTL_Cache_Release();
-+out_flash_register:
-+ GLOB_FTL_Flash_Release();
-+ unregister_blkdev(GLOB_SBD_majornum, GLOB_SBD_NAME);
-+ printk(KERN_ERR "Spectra: Module load failed.\n");
-+
-+ return -ENOMEM;
-+}
-+
-+static void __exit GLOB_SBD_exit(void)
-+{
-+ int i;
-+
-+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ for (i = 0; i < NUM_DEVICES; i++) {
-+ struct spectra_nand_dev *dev = &nand_device[i];
-+ if (dev->gd) {
-+ del_gendisk(dev->gd);
-+ put_disk(dev->gd);
-+ }
-+ if (dev->queue)
-+ blk_cleanup_queue(dev->queue);
-+ kfree(dev->tmp_buf);
-+ }
-+
-+ unregister_blkdev(GLOB_SBD_majornum, GLOB_SBD_NAME);
-+
-+ mutex_lock(&spectra_lock);
-+ force_flush_cache();
-+ mutex_unlock(&spectra_lock);
-+
-+ GLOB_FTL_Cache_Release();
-+
-+ GLOB_FTL_Flash_Release();
-+
-+ nand_dbg_print(NAND_DBG_DEBUG,
-+ "Spectra FTL module (major number %d) unloaded.\n",
-+ GLOB_SBD_majornum);
-+}
-+
-+static int __init setup_reserve_space_for_os_image(char *cmdline)
-+{
-+ unsigned long value;
-+ int error;
-+
-+ printk(KERN_ALERT "Spectra - cmdline: %s\n", cmdline);
-+ if (!cmdline)
-+ return -EINVAL;
-+
-+ error = strict_strtoul((const char *)cmdline, 10, &value);
-+ if (error)
-+ return -EINVAL;
-+
-+ reserved_mb_for_os_image = value;
-+
-+ return 0;
-+}
-+
-+early_param("res_nand", setup_reserve_space_for_os_image);
-+
-+module_init(GLOB_SBD_init);
-+module_exit(GLOB_SBD_exit);
-diff --git a/drivers/block/spectra/ffsport.h b/drivers/block/spectra/ffsport.h
-new file mode 100644
-index 0000000..6c5d90c
---- /dev/null
-+++ b/drivers/block/spectra/ffsport.h
-@@ -0,0 +1,84 @@
-+/*
-+ * NAND Flash Controller Device Driver
-+ * Copyright (c) 2009, Intel Corporation and its suppliers.
-+ *
-+ * This program is free software; you can redistribute it and/or modify it
-+ * under the terms and conditions of the GNU General Public License,
-+ * version 2, as published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope it will be useful, but WITHOUT
-+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-+ * more details.
-+ *
-+ * You should have received a copy of the GNU General Public License along with
-+ * this program; if not, write to the Free Software Foundation, Inc.,
-+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-+ *
-+ */
-+
-+#ifndef _FFSPORT_
-+#define _FFSPORT_
-+
-+#include "ffsdefs.h"
-+
-+#if defined __GNUC__
-+#define PACKED
-+#define PACKED_GNU __attribute__ ((packed))
-+#define UNALIGNED
-+#endif
-+
-+#include <linux/semaphore.h>
-+#include <linux/string.h> /* for strcpy(), stricmp(), etc */
-+#include <linux/mm.h> /* for kmalloc(), kfree() */
-+#include <linux/vmalloc.h>
-+#include <linux/module.h>
-+#include <linux/moduleparam.h>
-+#include <linux/init.h>
-+
-+#include <linux/kernel.h> /* printk() */
-+#include <linux/fs.h> /* everything... */
-+#include <linux/errno.h> /* error codes */
-+#include <linux/types.h> /* size_t */
-+#include <linux/genhd.h>
-+#include <linux/blkdev.h>
-+#include <linux/hdreg.h>
-+#include <linux/pci.h>
-+#include "flash.h"
-+
-+#define VERBOSE 1
-+
-+#define NAND_DBG_WARN 1
-+#define NAND_DBG_DEBUG 2
-+#define NAND_DBG_TRACE 3
-+
-+extern int nand_debug_level;
-+
-+#ifdef VERBOSE
-+#define nand_dbg_print(level, args...) \
-+ do { \
-+ if (level <= nand_debug_level) \
-+ printk(KERN_ALERT args); \
-+ } while (0)
-+#else
-+#define nand_dbg_print(level, args...)
-+#endif
-+
-+#ifdef SUPPORT_BIG_ENDIAN
-+#define INVERTUINT16(w) ((u16)(((u16)(w)) << 8) | \
-+ (u16)((u16)(w) >> 8))
-+
-+#define INVERTUINT32(dw) (((u32)(dw) << 24) | \
-+ (((u32)(dw) << 8) & 0x00ff0000) | \
-+ (((u32)(dw) >> 8) & 0x0000ff00) | \
-+ ((u32)(dw) >> 24))
-+#else
-+#define INVERTUINT16(w) w
-+#define INVERTUINT32(dw) dw
-+#endif
-+
-+extern int GLOB_Calc_Used_Bits(u32 n);
-+extern u64 GLOB_u64_Div(u64 addr, u32 divisor);
-+extern u64 GLOB_u64_Remainder(u64 addr, u32 divisor_type);
-+
-+#endif /* _FFSPORT_ */
-diff --git a/drivers/block/spectra/flash.c b/drivers/block/spectra/flash.c
-new file mode 100644
-index 0000000..134aa51
---- /dev/null
-+++ b/drivers/block/spectra/flash.c
-@@ -0,0 +1,4731 @@
-+/*
-+ * NAND Flash Controller Device Driver
-+ * Copyright (c) 2009, Intel Corporation and its suppliers.
-+ *
-+ * This program is free software; you can redistribute it and/or modify it
-+ * under the terms and conditions of the GNU General Public License,
-+ * version 2, as published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope it will be useful, but WITHOUT
-+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-+ * more details.
-+ *
-+ * You should have received a copy of the GNU General Public License along with
-+ * this program; if not, write to the Free Software Foundation, Inc.,
-+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-+ *
-+ */
-+
-+#include <linux/fs.h>
-+#include <linux/slab.h>
-+
-+#include "flash.h"
-+#include "ffsdefs.h"
-+#include "lld.h"
-+#include "lld_nand.h"
-+#if CMD_DMA
-+#include "lld_cdma.h"
-+#endif
-+
-+#define BLK_FROM_ADDR(addr) ((u32)(addr >> DeviceInfo.nBitsInBlockDataSize))
-+#define PAGE_FROM_ADDR(addr, Block) ((u16)((addr - (u64)Block * \
-+ DeviceInfo.wBlockDataSize) >> DeviceInfo.nBitsInPageDataSize))
-+
-+#define IS_SPARE_BLOCK(blk) (BAD_BLOCK != (pbt[blk] &\
-+ BAD_BLOCK) && SPARE_BLOCK == (pbt[blk] & SPARE_BLOCK))
-+
-+#define IS_DATA_BLOCK(blk) (0 == (pbt[blk] & BAD_BLOCK))
-+
-+#define IS_DISCARDED_BLOCK(blk) (BAD_BLOCK != (pbt[blk] &\
-+ BAD_BLOCK) && DISCARD_BLOCK == (pbt[blk] & DISCARD_BLOCK))
-+
-+#define IS_BAD_BLOCK(blk) (BAD_BLOCK == (pbt[blk] & BAD_BLOCK))
-+
-+#if DEBUG_BNDRY
-+void debug_boundary_lineno_error(int chnl, int limit, int no,
-+ int lineno, char *filename)
-+{
-+ if (chnl >= limit)
-+ printk(KERN_ERR "Boundary Check Fail value %d >= limit %d, "
-+ "at %s:%d. Other info:%d. Aborting...\n",
-+ chnl, limit, filename, lineno, no);
-+}
-+/* static int globalmemsize; */
-+#endif
-+
-+static u16 FTL_Cache_If_Hit(u64 dwPageAddr);
-+static int FTL_Cache_Read(u64 dwPageAddr);
-+static void FTL_Cache_Read_Page(u8 *pData, u64 dwPageAddr,
-+ u16 cache_blk);
-+static void FTL_Cache_Write_Page(u8 *pData, u64 dwPageAddr,
-+ u8 cache_blk, u16 flag);
-+static int FTL_Cache_Write(void);
-+static int FTL_Cache_Write_Back(u8 *pData, u64 blk_addr);
-+static void FTL_Calculate_LRU(void);
-+static u32 FTL_Get_Block_Index(u32 wBlockNum);
-+
-+static int FTL_Search_Block_Table_IN_Block(u32 BT_Block,
-+ u8 BT_Tag, u16 *Page);
-+static int FTL_Read_Block_Table(void);
-+static int FTL_Write_Block_Table(int wForce);
-+static int FTL_Write_Block_Table_Data(void);
-+static int FTL_Check_Block_Table(int wOldTable);
-+static int FTL_Static_Wear_Leveling(void);
-+static u32 FTL_Replace_Block_Table(void);
-+static int FTL_Write_IN_Progress_Block_Table_Page(void);
-+
-+static u32 FTL_Get_Page_Num(u64 length);
-+static u64 FTL_Get_Physical_Block_Addr(u64 blk_addr);
-+
-+static u32 FTL_Replace_OneBlock(u32 wBlockNum,
-+ u32 wReplaceNum);
-+static u32 FTL_Replace_LWBlock(u32 wBlockNum,
-+ int *pGarbageCollect);
-+static u32 FTL_Replace_MWBlock(void);
-+static int FTL_Replace_Block(u64 blk_addr);
-+static int FTL_Adjust_Relative_Erase_Count(u32 Index_of_MAX);
-+
-+static int FTL_Flash_Error_Handle(u8 *pData, u64 old_page_addr, u64 blk_addr);
-+
-+struct device_info_tag DeviceInfo;
-+struct flash_cache_tag Cache;
-+static struct spectra_l2_cache_info cache_l2;
-+
-+static u8 *cache_l2_page_buf;
-+static u8 *cache_l2_blk_buf;
-+
-+u8 *g_pBlockTable;
-+u8 *g_pWearCounter;
-+u16 *g_pReadCounter;
-+u32 *g_pBTBlocks;
-+static u16 g_wBlockTableOffset;
-+static u32 g_wBlockTableIndex;
-+static u8 g_cBlockTableStatus;
-+
-+static u8 *g_pTempBuf;
-+static u8 *flag_check_blk_table;
-+static u8 *tmp_buf_search_bt_in_block;
-+static u8 *spare_buf_search_bt_in_block;
-+static u8 *spare_buf_bt_search_bt_in_block;
-+static u8 *tmp_buf1_read_blk_table;
-+static u8 *tmp_buf2_read_blk_table;
-+static u8 *flags_static_wear_leveling;
-+static u8 *tmp_buf_write_blk_table_data;
-+static u8 *tmp_buf_read_disturbance;
-+
-+u8 *buf_read_page_main_spare;
-+u8 *buf_write_page_main_spare;
-+u8 *buf_read_page_spare;
-+u8 *buf_get_bad_block;
-+
-+#if (RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE && CMD_DMA)
-+struct flash_cache_delta_list_tag int_cache[MAX_CHANS + MAX_DESCS];
-+struct flash_cache_tag cache_start_copy;
-+#endif
-+
-+int g_wNumFreeBlocks;
-+u8 g_SBDCmdIndex;
-+
-+static u8 *g_pIPF;
-+static u8 bt_flag = FIRST_BT_ID;
-+static u8 bt_block_changed;
-+
-+static u16 cache_block_to_write;
-+static u8 last_erased = FIRST_BT_ID;
-+
-+static u8 GC_Called;
-+static u8 BT_GC_Called;
-+
-+#if CMD_DMA
-+#define COPY_BACK_BUF_NUM 10
-+
-+static u8 ftl_cmd_cnt; /* Init value is 0 */
-+u8 *g_pBTDelta;
-+u8 *g_pBTDelta_Free;
-+u8 *g_pBTStartingCopy;
-+u8 *g_pWearCounterCopy;
-+u16 *g_pReadCounterCopy;
-+u8 *g_pBlockTableCopies;
-+u8 *g_pNextBlockTable;
-+static u8 *cp_back_buf_copies[COPY_BACK_BUF_NUM];
-+static int cp_back_buf_idx;
-+
-+static u8 *g_temp_buf;
-+
-+#pragma pack(push, 1)
-+#pragma pack(1)
-+struct BTableChangesDelta {
-+ u8 ftl_cmd_cnt;
-+ u8 ValidFields;
-+ u16 g_wBlockTableOffset;
-+ u32 g_wBlockTableIndex;
-+ u32 BT_Index;
-+ u32 BT_Entry_Value;
-+ u32 WC_Index;
-+ u8 WC_Entry_Value;
-+ u32 RC_Index;
-+ u16 RC_Entry_Value;
-+};
-+
-+#pragma pack(pop)
-+
-+struct BTableChangesDelta *p_BTableChangesDelta;
-+#endif
-+
-+
-+#define MARK_BLOCK_AS_BAD(blocknode) (blocknode |= BAD_BLOCK)
-+#define MARK_BLK_AS_DISCARD(blk) (blk = (blk & ~SPARE_BLOCK) | DISCARD_BLOCK)
-+
-+#define FTL_Get_LBAPBA_Table_Mem_Size_Bytes() (DeviceInfo.wDataBlockNum *\
-+ sizeof(u32))
-+#define FTL_Get_WearCounter_Table_Mem_Size_Bytes() (DeviceInfo.wDataBlockNum *\
-+ sizeof(u8))
-+#define FTL_Get_ReadCounter_Table_Mem_Size_Bytes() (DeviceInfo.wDataBlockNum *\
-+ sizeof(u16))
-+#if SUPPORT_LARGE_BLOCKNUM
-+#define FTL_Get_LBAPBA_Table_Flash_Size_Bytes() (DeviceInfo.wDataBlockNum *\
-+ sizeof(u8) * 3)
-+#else
-+#define FTL_Get_LBAPBA_Table_Flash_Size_Bytes() (DeviceInfo.wDataBlockNum *\
-+ sizeof(u16))
-+#endif
-+#define FTL_Get_WearCounter_Table_Flash_Size_Bytes \
-+ FTL_Get_WearCounter_Table_Mem_Size_Bytes
-+#define FTL_Get_ReadCounter_Table_Flash_Size_Bytes \
-+ FTL_Get_ReadCounter_Table_Mem_Size_Bytes
-+
-+static u32 FTL_Get_Block_Table_Flash_Size_Bytes(void)
-+{
-+ u32 byte_num;
-+
-+ if (DeviceInfo.MLCDevice) {
-+ byte_num = FTL_Get_LBAPBA_Table_Flash_Size_Bytes() +
-+ DeviceInfo.wDataBlockNum * sizeof(u8) +
-+ DeviceInfo.wDataBlockNum * sizeof(u16);
-+ } else {
-+ byte_num = FTL_Get_LBAPBA_Table_Flash_Size_Bytes() +
-+ DeviceInfo.wDataBlockNum * sizeof(u8);
-+ }
-+
-+ byte_num += 4 * sizeof(u8);
-+
-+ return byte_num;
-+}
-+
-+static u16 FTL_Get_Block_Table_Flash_Size_Pages(void)
-+{
-+ return (u16)FTL_Get_Page_Num(FTL_Get_Block_Table_Flash_Size_Bytes());
-+}
-+
-+static int FTL_Copy_Block_Table_To_Flash(u8 *flashBuf, u32 sizeToTx,
-+ u32 sizeTxed)
-+{
-+ u32 wBytesCopied, blk_tbl_size, wBytes;
-+ u32 *pbt = (u32 *)g_pBlockTable;
-+
-+ blk_tbl_size = FTL_Get_LBAPBA_Table_Flash_Size_Bytes();
-+ for (wBytes = 0;
-+ (wBytes < sizeToTx) && ((wBytes + sizeTxed) < blk_tbl_size);
-+ wBytes++) {
-+#if SUPPORT_LARGE_BLOCKNUM
-+ flashBuf[wBytes] = (u8)(pbt[(wBytes + sizeTxed) / 3]
-+ >> (((wBytes + sizeTxed) % 3) ?
-+ ((((wBytes + sizeTxed) % 3) == 2) ? 0 : 8) : 16)) & 0xFF;
-+#else
-+ flashBuf[wBytes] = (u8)(pbt[(wBytes + sizeTxed) / 2]
-+ >> (((wBytes + sizeTxed) % 2) ? 0 : 8)) & 0xFF;
-+#endif
-+ }
-+
-+ sizeTxed = (sizeTxed > blk_tbl_size) ? (sizeTxed - blk_tbl_size) : 0;
-+ blk_tbl_size = FTL_Get_WearCounter_Table_Flash_Size_Bytes();
-+ wBytesCopied = wBytes;
-+ wBytes = ((blk_tbl_size - sizeTxed) > (sizeToTx - wBytesCopied)) ?
-+ (sizeToTx - wBytesCopied) : (blk_tbl_size - sizeTxed);
-+ memcpy(flashBuf + wBytesCopied, g_pWearCounter + sizeTxed, wBytes);
-+
-+ sizeTxed = (sizeTxed > blk_tbl_size) ? (sizeTxed - blk_tbl_size) : 0;
-+
-+ if (DeviceInfo.MLCDevice) {
-+ blk_tbl_size = FTL_Get_ReadCounter_Table_Flash_Size_Bytes();
-+ wBytesCopied += wBytes;
-+ for (wBytes = 0; ((wBytes + wBytesCopied) < sizeToTx) &&
-+ ((wBytes + sizeTxed) < blk_tbl_size); wBytes++)
-+ flashBuf[wBytes + wBytesCopied] =
-+ (g_pReadCounter[(wBytes + sizeTxed) / 2] >>
-+ (((wBytes + sizeTxed) % 2) ? 0 : 8)) & 0xFF;
-+ }
-+
-+ return wBytesCopied + wBytes;
-+}
-+
-+static int FTL_Copy_Block_Table_From_Flash(u8 *flashBuf,
-+ u32 sizeToTx, u32 sizeTxed)
-+{
-+ u32 wBytesCopied, blk_tbl_size, wBytes;
-+ u32 *pbt = (u32 *)g_pBlockTable;
-+
-+ blk_tbl_size = FTL_Get_LBAPBA_Table_Flash_Size_Bytes();
-+ for (wBytes = 0; (wBytes < sizeToTx) &&
-+ ((wBytes + sizeTxed) < blk_tbl_size); wBytes++) {
-+#if SUPPORT_LARGE_BLOCKNUM
-+ if (!((wBytes + sizeTxed) % 3))
-+ pbt[(wBytes + sizeTxed) / 3] = 0;
-+ pbt[(wBytes + sizeTxed) / 3] |=
-+ (flashBuf[wBytes] << (((wBytes + sizeTxed) % 3) ?
-+ ((((wBytes + sizeTxed) % 3) == 2) ? 0 : 8) : 16));
-+#else
-+ if (!((wBytes + sizeTxed) % 2))
-+ pbt[(wBytes + sizeTxed) / 2] = 0;
-+ pbt[(wBytes + sizeTxed) / 2] |=
-+ (flashBuf[wBytes] << (((wBytes + sizeTxed) % 2) ?
-+ 0 : 8));
-+#endif
-+ }
-+
-+ sizeTxed = (sizeTxed > blk_tbl_size) ? (sizeTxed - blk_tbl_size) : 0;
-+ blk_tbl_size = FTL_Get_WearCounter_Table_Flash_Size_Bytes();
-+ wBytesCopied = wBytes;
-+ wBytes = ((blk_tbl_size - sizeTxed) > (sizeToTx - wBytesCopied)) ?
-+ (sizeToTx - wBytesCopied) : (blk_tbl_size - sizeTxed);
-+ memcpy(g_pWearCounter + sizeTxed, flashBuf + wBytesCopied, wBytes);
-+ sizeTxed = (sizeTxed > blk_tbl_size) ? (sizeTxed - blk_tbl_size) : 0;
-+
-+ if (DeviceInfo.MLCDevice) {
-+ wBytesCopied += wBytes;
-+ blk_tbl_size = FTL_Get_ReadCounter_Table_Flash_Size_Bytes();
-+ for (wBytes = 0; ((wBytes + wBytesCopied) < sizeToTx) &&
-+ ((wBytes + sizeTxed) < blk_tbl_size); wBytes++) {
-+ if (((wBytes + sizeTxed) % 2))
-+ g_pReadCounter[(wBytes + sizeTxed) / 2] = 0;
-+ g_pReadCounter[(wBytes + sizeTxed) / 2] |=
-+ (flashBuf[wBytes] <<
-+ (((wBytes + sizeTxed) % 2) ? 0 : 8));
-+ }
-+ }
-+
-+ return wBytesCopied+wBytes;
-+}
-+
-+static int FTL_Insert_Block_Table_Signature(u8 *buf, u8 tag)
-+{
-+ int i;
-+
-+ for (i = 0; i < BTSIG_BYTES; i++)
-+ buf[BTSIG_OFFSET + i] =
-+ ((tag + (i * BTSIG_DELTA) - FIRST_BT_ID) %
-+ (1 + LAST_BT_ID-FIRST_BT_ID)) + FIRST_BT_ID;
-+
-+ return PASS;
-+}
-+
-+static int FTL_Extract_Block_Table_Tag(u8 *buf, u8 **tagarray)
-+{
-+ static u8 tag[BTSIG_BYTES >> 1];
-+ int i, j, k, tagi, tagtemp, status;
-+
-+ *tagarray = (u8 *)tag;
-+ tagi = 0;
-+
-+ for (i = 0; i < (BTSIG_BYTES - 1); i++) {
-+ for (j = i + 1; (j < BTSIG_BYTES) &&
-+ (tagi < (BTSIG_BYTES >> 1)); j++) {
-+ tagtemp = buf[BTSIG_OFFSET + j] -
-+ buf[BTSIG_OFFSET + i];
-+ if (tagtemp && !(tagtemp % BTSIG_DELTA)) {
-+ tagtemp = (buf[BTSIG_OFFSET + i] +
-+ (1 + LAST_BT_ID - FIRST_BT_ID) -
-+ (i * BTSIG_DELTA)) %
-+ (1 + LAST_BT_ID - FIRST_BT_ID);
-+ status = FAIL;
-+ for (k = 0; k < tagi; k++) {
-+ if (tagtemp == tag[k])
-+ status = PASS;
-+ }
-+
-+ if (status == FAIL) {
-+ tag[tagi++] = tagtemp;
-+ i = (j == (i + 1)) ? i + 1 : i;
-+ j = (j == (i + 1)) ? i + 1 : i;
-+ }
-+ }
-+ }
-+ }
-+
-+ return tagi;
-+}
-+
-+
-+static int FTL_Execute_SPL_Recovery(void)
-+{
-+ u32 j, block, blks;
-+ u32 *pbt = (u32 *)g_pBlockTable;
-+ int ret;
-+
-+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ blks = DeviceInfo.wSpectraEndBlock - DeviceInfo.wSpectraStartBlock;
-+ for (j = 0; j <= blks; j++) {
-+ block = (pbt[j]);
-+ if (((block & BAD_BLOCK) != BAD_BLOCK) &&
-+ ((block & SPARE_BLOCK) == SPARE_BLOCK)) {
-+ ret = GLOB_LLD_Erase_Block(block & ~BAD_BLOCK);
-+ if (FAIL == ret) {
-+ nand_dbg_print(NAND_DBG_WARN,
-+ "NAND Program fail in %s, Line %d, "
-+ "Function: %s, new Bad Block %d "
-+ "generated!\n",
-+ __FILE__, __LINE__, __func__,
-+ (int)(block & ~BAD_BLOCK));
-+ MARK_BLOCK_AS_BAD(pbt[j]);
-+ }
-+ }
-+ }
-+
-+ return PASS;
-+}
-+
-+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-+* Function: GLOB_FTL_IdentifyDevice
-+* Inputs: pointer to identify data structure
-+* Outputs: PASS / FAIL
-+* Description: the identify data structure is filled in with
-+* information for the block driver.
-+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-+int GLOB_FTL_IdentifyDevice(struct spectra_indentfy_dev_tag *dev_data)
-+{
-+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ dev_data->NumBlocks = DeviceInfo.wTotalBlocks;
-+ dev_data->PagesPerBlock = DeviceInfo.wPagesPerBlock;
-+ dev_data->PageDataSize = DeviceInfo.wPageDataSize;
-+ dev_data->wECCBytesPerSector = DeviceInfo.wECCBytesPerSector;
-+ dev_data->wDataBlockNum = DeviceInfo.wDataBlockNum;
-+
-+ return PASS;
-+}
-+
-+/* ..... */
-+static int allocate_memory(void)
-+{
-+ u32 block_table_size, page_size, block_size, mem_size;
-+ u32 total_bytes = 0;
-+ int i;
-+#if CMD_DMA
-+ int j;
-+#endif
-+
-+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ page_size = DeviceInfo.wPageSize;
-+ block_size = DeviceInfo.wPagesPerBlock * DeviceInfo.wPageDataSize;
-+
-+ block_table_size = DeviceInfo.wDataBlockNum *
-+ (sizeof(u32) + sizeof(u8) + sizeof(u16));
-+ block_table_size += (DeviceInfo.wPageDataSize -
-+ (block_table_size % DeviceInfo.wPageDataSize)) %
-+ DeviceInfo.wPageDataSize;
-+
-+ /* Malloc memory for block tables */
-+ g_pBlockTable = kmalloc(block_table_size, GFP_ATOMIC);
-+ if (!g_pBlockTable)
-+ goto block_table_fail;
-+ memset(g_pBlockTable, 0, block_table_size);
-+ total_bytes += block_table_size;
-+
-+ g_pWearCounter = (u8 *)(g_pBlockTable +
-+ DeviceInfo.wDataBlockNum * sizeof(u32));
-+
-+ if (DeviceInfo.MLCDevice)
-+ g_pReadCounter = (u16 *)(g_pBlockTable +
-+ DeviceInfo.wDataBlockNum *
-+ (sizeof(u32) + sizeof(u8)));
-+
-+ /* Malloc memory and init for cache items */
-+ for (i = 0; i < CACHE_ITEM_NUM; i++) {
-+ Cache.array[i].address = NAND_CACHE_INIT_ADDR;
-+ Cache.array[i].use_cnt = 0;
-+ Cache.array[i].changed = CLEAR;
-+ Cache.array[i].buf = kmalloc(Cache.cache_item_size,
-+ GFP_ATOMIC);
-+ if (!Cache.array[i].buf)
-+ goto cache_item_fail;
-+ memset(Cache.array[i].buf, 0, Cache.cache_item_size);
-+ total_bytes += Cache.cache_item_size;
-+ }
-+
-+ /* Malloc memory for IPF */
-+ g_pIPF = kmalloc(page_size, GFP_ATOMIC);
-+ if (!g_pIPF)
-+ goto ipf_fail;
-+ memset(g_pIPF, 0, page_size);
-+ total_bytes += page_size;
-+
-+ /* Malloc memory for data merging during Level2 Cache flush */
-+ cache_l2_page_buf = kmalloc(page_size, GFP_ATOMIC);
-+ if (!cache_l2_page_buf)
-+ goto cache_l2_page_buf_fail;
-+ memset(cache_l2_page_buf, 0xff, page_size);
-+ total_bytes += page_size;
-+
-+ cache_l2_blk_buf = kmalloc(block_size, GFP_ATOMIC);
-+ if (!cache_l2_blk_buf)
-+ goto cache_l2_blk_buf_fail;
-+ memset(cache_l2_blk_buf, 0xff, block_size);
-+ total_bytes += block_size;
-+
-+ /* Malloc memory for temp buffer */
-+ g_pTempBuf = kmalloc(Cache.cache_item_size, GFP_ATOMIC);
-+ if (!g_pTempBuf)
-+ goto Temp_buf_fail;
-+ memset(g_pTempBuf, 0, Cache.cache_item_size);
-+ total_bytes += Cache.cache_item_size;
-+
-+ /* Malloc memory for block table blocks */
-+ mem_size = (1 + LAST_BT_ID - FIRST_BT_ID) * sizeof(u32);
-+ g_pBTBlocks = kmalloc(mem_size, GFP_ATOMIC);
-+ if (!g_pBTBlocks)
-+ goto bt_blocks_fail;
-+ memset(g_pBTBlocks, 0xff, mem_size);
-+ total_bytes += mem_size;
-+
-+ /* Malloc memory for function FTL_Check_Block_Table */
-+ flag_check_blk_table = kmalloc(DeviceInfo.wDataBlockNum, GFP_ATOMIC);
-+ if (!flag_check_blk_table)
-+ goto flag_check_blk_table_fail;
-+ total_bytes += DeviceInfo.wDataBlockNum;
-+
-+ /* Malloc memory for function FTL_Search_Block_Table_IN_Block */
-+ tmp_buf_search_bt_in_block = kmalloc(page_size, GFP_ATOMIC);
-+ if (!tmp_buf_search_bt_in_block)
-+ goto tmp_buf_search_bt_in_block_fail;
-+ memset(tmp_buf_search_bt_in_block, 0xff, page_size);
-+ total_bytes += page_size;
-+
-+ mem_size = DeviceInfo.wPageSize - DeviceInfo.wPageDataSize;
-+ spare_buf_search_bt_in_block = kmalloc(mem_size, GFP_ATOMIC);
-+ if (!spare_buf_search_bt_in_block)
-+ goto spare_buf_search_bt_in_block_fail;
-+ memset(spare_buf_search_bt_in_block, 0xff, mem_size);
-+ total_bytes += mem_size;
-+
-+ spare_buf_bt_search_bt_in_block = kmalloc(mem_size, GFP_ATOMIC);
-+ if (!spare_buf_bt_search_bt_in_block)
-+ goto spare_buf_bt_search_bt_in_block_fail;
-+ memset(spare_buf_bt_search_bt_in_block, 0xff, mem_size);
-+ total_bytes += mem_size;
-+
-+ /* Malloc memory for function FTL_Read_Block_Table */
-+ tmp_buf1_read_blk_table = kmalloc(page_size, GFP_ATOMIC);
-+ if (!tmp_buf1_read_blk_table)
-+ goto tmp_buf1_read_blk_table_fail;
-+ memset(tmp_buf1_read_blk_table, 0xff, page_size);
-+ total_bytes += page_size;
-+
-+ tmp_buf2_read_blk_table = kmalloc(page_size, GFP_ATOMIC);
-+ if (!tmp_buf2_read_blk_table)
-+ goto tmp_buf2_read_blk_table_fail;
-+ memset(tmp_buf2_read_blk_table, 0xff, page_size);
-+ total_bytes += page_size;
-+
-+ /* Malloc memory for function FTL_Static_Wear_Leveling */
-+ flags_static_wear_leveling = kmalloc(DeviceInfo.wDataBlockNum,
-+ GFP_ATOMIC);
-+ if (!flags_static_wear_leveling)
-+ goto flags_static_wear_leveling_fail;
-+ total_bytes += DeviceInfo.wDataBlockNum;
-+
-+ /* Malloc memory for function FTL_Write_Block_Table_Data */
-+ if (FTL_Get_Block_Table_Flash_Size_Pages() > 3)
-+ mem_size = FTL_Get_Block_Table_Flash_Size_Bytes() -
-+ 2 * DeviceInfo.wPageSize;
-+ else
-+ mem_size = DeviceInfo.wPageSize;
-+ tmp_buf_write_blk_table_data = kmalloc(mem_size, GFP_ATOMIC);
-+ if (!tmp_buf_write_blk_table_data)
-+ goto tmp_buf_write_blk_table_data_fail;
-+ memset(tmp_buf_write_blk_table_data, 0xff, mem_size);
-+ total_bytes += mem_size;
-+
-+ /* Malloc memory for function FTL_Read_Disturbance */
-+ tmp_buf_read_disturbance = kmalloc(block_size, GFP_ATOMIC);
-+ if (!tmp_buf_read_disturbance)
-+ goto tmp_buf_read_disturbance_fail;
-+ memset(tmp_buf_read_disturbance, 0xff, block_size);
-+ total_bytes += block_size;
-+
-+ /* Alloc mem for function NAND_Read_Page_Main_Spare of lld_nand.c */
-+ buf_read_page_main_spare = kmalloc(DeviceInfo.wPageSize, GFP_ATOMIC);
-+ if (!buf_read_page_main_spare)
-+ goto buf_read_page_main_spare_fail;
-+ total_bytes += DeviceInfo.wPageSize;
-+
-+ /* Alloc mem for function NAND_Write_Page_Main_Spare of lld_nand.c */
-+ buf_write_page_main_spare = kmalloc(DeviceInfo.wPageSize, GFP_ATOMIC);
-+ if (!buf_write_page_main_spare)
-+ goto buf_write_page_main_spare_fail;
-+ total_bytes += DeviceInfo.wPageSize;
-+
-+ /* Alloc mem for function NAND_Read_Page_Spare of lld_nand.c */
-+ buf_read_page_spare = kmalloc(DeviceInfo.wPageSpareSize, GFP_ATOMIC);
-+ if (!buf_read_page_spare)
-+ goto buf_read_page_spare_fail;
-+ memset(buf_read_page_spare, 0xff, DeviceInfo.wPageSpareSize);
-+ total_bytes += DeviceInfo.wPageSpareSize;
-+
-+ /* Alloc mem for function NAND_Get_Bad_Block of lld_nand.c */
-+ buf_get_bad_block = kmalloc(DeviceInfo.wPageSpareSize, GFP_ATOMIC);
-+ if (!buf_get_bad_block)
-+ goto buf_get_bad_block_fail;
-+ memset(buf_get_bad_block, 0xff, DeviceInfo.wPageSpareSize);
-+ total_bytes += DeviceInfo.wPageSpareSize;
-+
-+#if CMD_DMA
-+ g_temp_buf = kmalloc(block_size, GFP_ATOMIC);
-+ if (!g_temp_buf)
-+ goto temp_buf_fail;
-+ memset(g_temp_buf, 0xff, block_size);
-+ total_bytes += block_size;
-+
-+ /* Malloc memory for copy of block table used in CDMA mode */
-+ g_pBTStartingCopy = kmalloc(block_table_size, GFP_ATOMIC);
-+ if (!g_pBTStartingCopy)
-+ goto bt_starting_copy;
-+ memset(g_pBTStartingCopy, 0, block_table_size);
-+ total_bytes += block_table_size;
-+
-+ g_pWearCounterCopy = (u8 *)(g_pBTStartingCopy +
-+ DeviceInfo.wDataBlockNum * sizeof(u32));
-+
-+ if (DeviceInfo.MLCDevice)
-+ g_pReadCounterCopy = (u16 *)(g_pBTStartingCopy +
-+ DeviceInfo.wDataBlockNum *
-+ (sizeof(u32) + sizeof(u8)));
-+
-+ /* Malloc memory for block table copies */
-+ mem_size = 5 * DeviceInfo.wDataBlockNum * sizeof(u32) +
-+ 5 * DeviceInfo.wDataBlockNum * sizeof(u8);
-+ if (DeviceInfo.MLCDevice)
-+ mem_size += 5 * DeviceInfo.wDataBlockNum * sizeof(u16);
-+ g_pBlockTableCopies = kmalloc(mem_size, GFP_ATOMIC);
-+ if (!g_pBlockTableCopies)
-+ goto blk_table_copies_fail;
-+ memset(g_pBlockTableCopies, 0, mem_size);
-+ total_bytes += mem_size;
-+ g_pNextBlockTable = g_pBlockTableCopies;
-+
-+ /* Malloc memory for Block Table Delta */
-+ mem_size = MAX_DESCS * sizeof(struct BTableChangesDelta);
-+ g_pBTDelta = kmalloc(mem_size, GFP_ATOMIC);
-+ if (!g_pBTDelta)
-+ goto bt_delta_fail;
-+ memset(g_pBTDelta, 0, mem_size);
-+ total_bytes += mem_size;
-+ g_pBTDelta_Free = g_pBTDelta;
-+
-+ /* Malloc memory for Copy Back Buffers */
-+ for (j = 0; j < COPY_BACK_BUF_NUM; j++) {
-+ cp_back_buf_copies[j] = kmalloc(block_size, GFP_ATOMIC);
-+ if (!cp_back_buf_copies[j])
-+ goto cp_back_buf_copies_fail;
-+ memset(cp_back_buf_copies[j], 0, block_size);
-+ total_bytes += block_size;
-+ }
-+ cp_back_buf_idx = 0;
-+
-+ /* Malloc memory for pending commands list */
-+ mem_size = sizeof(struct pending_cmd) * MAX_DESCS;
-+ info.pcmds = kzalloc(mem_size, GFP_KERNEL);
-+ if (!info.pcmds)
-+ goto pending_cmds_buf_fail;
-+ total_bytes += mem_size;
-+
-+ /* Malloc memory for CDMA descripter table */
-+ mem_size = sizeof(struct cdma_descriptor) * MAX_DESCS;
-+ info.cdma_desc_buf = kzalloc(mem_size, GFP_KERNEL);
-+ if (!info.cdma_desc_buf)
-+ goto cdma_desc_buf_fail;
-+ total_bytes += mem_size;
-+
-+ /* Malloc memory for Memcpy descripter table */
-+ mem_size = sizeof(struct memcpy_descriptor) * MAX_DESCS;
-+ info.memcp_desc_buf = kzalloc(mem_size, GFP_KERNEL);
-+ if (!info.memcp_desc_buf)
-+ goto memcp_desc_buf_fail;
-+ total_bytes += mem_size;
-+#endif
-+
-+ nand_dbg_print(NAND_DBG_WARN,
-+ "Total memory allocated in FTL layer: %d\n", total_bytes);
-+
-+ return PASS;
-+
-+#if CMD_DMA
-+memcp_desc_buf_fail:
-+ kfree(info.cdma_desc_buf);
-+cdma_desc_buf_fail:
-+ kfree(info.pcmds);
-+pending_cmds_buf_fail:
-+cp_back_buf_copies_fail:
-+ j--;
-+ for (; j >= 0; j--)
-+ kfree(cp_back_buf_copies[j]);
-+ kfree(g_pBTDelta);
-+bt_delta_fail:
-+ kfree(g_pBlockTableCopies);
-+blk_table_copies_fail:
-+ kfree(g_pBTStartingCopy);
-+bt_starting_copy:
-+ kfree(g_temp_buf);
-+temp_buf_fail:
-+ kfree(buf_get_bad_block);
-+#endif
-+
-+buf_get_bad_block_fail:
-+ kfree(buf_read_page_spare);
-+buf_read_page_spare_fail:
-+ kfree(buf_write_page_main_spare);
-+buf_write_page_main_spare_fail:
-+ kfree(buf_read_page_main_spare);
-+buf_read_page_main_spare_fail:
-+ kfree(tmp_buf_read_disturbance);
-+tmp_buf_read_disturbance_fail:
-+ kfree(tmp_buf_write_blk_table_data);
-+tmp_buf_write_blk_table_data_fail:
-+ kfree(flags_static_wear_leveling);
-+flags_static_wear_leveling_fail:
-+ kfree(tmp_buf2_read_blk_table);
-+tmp_buf2_read_blk_table_fail:
-+ kfree(tmp_buf1_read_blk_table);
-+tmp_buf1_read_blk_table_fail:
-+ kfree(spare_buf_bt_search_bt_in_block);
-+spare_buf_bt_search_bt_in_block_fail:
-+ kfree(spare_buf_search_bt_in_block);
-+spare_buf_search_bt_in_block_fail:
-+ kfree(tmp_buf_search_bt_in_block);
-+tmp_buf_search_bt_in_block_fail:
-+ kfree(flag_check_blk_table);
-+flag_check_blk_table_fail:
-+ kfree(g_pBTBlocks);
-+bt_blocks_fail:
-+ kfree(g_pTempBuf);
-+Temp_buf_fail:
-+ kfree(cache_l2_blk_buf);
-+cache_l2_blk_buf_fail:
-+ kfree(cache_l2_page_buf);
-+cache_l2_page_buf_fail:
-+ kfree(g_pIPF);
-+ipf_fail:
-+cache_item_fail:
-+ i--;
-+ for (; i >= 0; i--)
-+ kfree(Cache.array[i].buf);
-+ kfree(g_pBlockTable);
-+block_table_fail:
-+ printk(KERN_ERR "Failed to kmalloc memory in %s Line %d.\n",
-+ __FILE__, __LINE__);
-+
-+ return -ENOMEM;
-+}
-+
-+/* .... */
-+static int free_memory(void)
-+{
-+ int i;
-+
-+#if CMD_DMA
-+ kfree(info.memcp_desc_buf);
-+ kfree(info.cdma_desc_buf);
-+ kfree(info.pcmds);
-+ for (i = COPY_BACK_BUF_NUM - 1; i >= 0; i--)
-+ kfree(cp_back_buf_copies[i]);
-+ kfree(g_pBTDelta);
-+ kfree(g_pBlockTableCopies);
-+ kfree(g_pBTStartingCopy);
-+ kfree(g_temp_buf);
-+ kfree(buf_get_bad_block);
-+#endif
-+ kfree(buf_read_page_spare);
-+ kfree(buf_write_page_main_spare);
-+ kfree(buf_read_page_main_spare);
-+ kfree(tmp_buf_read_disturbance);
-+ kfree(tmp_buf_write_blk_table_data);
-+ kfree(flags_static_wear_leveling);
-+ kfree(tmp_buf2_read_blk_table);
-+ kfree(tmp_buf1_read_blk_table);
-+ kfree(spare_buf_bt_search_bt_in_block);
-+ kfree(spare_buf_search_bt_in_block);
-+ kfree(tmp_buf_search_bt_in_block);
-+ kfree(flag_check_blk_table);
-+ kfree(g_pBTBlocks);
-+ kfree(g_pTempBuf);
-+ kfree(g_pIPF);
-+ for (i = CACHE_ITEM_NUM - 1; i >= 0; i--)
-+ kfree(Cache.array[i].buf);
-+ kfree(g_pBlockTable);
-+
-+ return 0;
-+}
-+
-+static void dump_cache_l2_table(void)
-+{
-+ struct list_head *p;
-+ struct spectra_l2_cache_list *pnd;
-+ int n, i;
-+
-+ n = 0;
-+ list_for_each(p, &cache_l2.table.list) {
-+ pnd = list_entry(p, struct spectra_l2_cache_list, list);
-+ nand_dbg_print(NAND_DBG_WARN, "dump_cache_l2_table node: %d, logical_blk_num: %d\n", n, pnd->logical_blk_num);
-+/*
-+ for (i = 0; i < DeviceInfo.wPagesPerBlock; i++) {
-+ if (pnd->pages_array[i] != MAX_U32_VALUE)
-+ nand_dbg_print(NAND_DBG_WARN, " pages_array[%d]: 0x%x\n", i, pnd->pages_array[i]);
-+ }
-+*/
-+ n++;
-+ }
-+}
-+
-+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-+* Function: GLOB_FTL_Init
-+* Inputs: none
-+* Outputs: PASS=0 / FAIL=1
-+* Description: allocates the memory for cache array,
-+* important data structures
-+* clears the cache array
-+* reads the block table from flash into array
-+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-+int GLOB_FTL_Init(void)
-+{
-+ int i;
-+
-+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ Cache.pages_per_item = 1;
-+ Cache.cache_item_size = 1 * DeviceInfo.wPageDataSize;
-+
-+ if (allocate_memory() != PASS)
-+ return FAIL;
-+
-+#if CMD_DMA
-+#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
-+ memcpy((void *)&cache_start_copy, (void *)&Cache,
-+ sizeof(struct flash_cache_tag));
-+ memset((void *)&int_cache, -1,
-+ sizeof(struct flash_cache_delta_list_tag) *
-+ (MAX_CHANS + MAX_DESCS));
-+#endif
-+ ftl_cmd_cnt = 0;
-+#endif
-+
-+ if (FTL_Read_Block_Table() != PASS)
-+ return FAIL;
-+
-+ /* Init the Level2 Cache data structure */
-+ for (i = 0; i < BLK_NUM_FOR_L2_CACHE; i++)
-+ cache_l2.blk_array[i] = MAX_U32_VALUE;
-+ cache_l2.cur_blk_idx = 0;
-+ cache_l2.cur_page_num = 0;
-+ INIT_LIST_HEAD(&cache_l2.table.list);
-+ cache_l2.table.logical_blk_num = MAX_U32_VALUE;
-+
-+ dump_cache_l2_table();
-+
-+ return 0;
-+}
-+
-+
-+#if CMD_DMA
-+#if 0
-+static void save_blk_table_changes(u16 idx)
-+{
-+ u8 ftl_cmd;
-+ u32 *pbt = (u32 *)g_pBTStartingCopy;
-+
-+#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
-+ u16 id;
-+ u8 cache_blks;
-+
-+ id = idx - MAX_CHANS;
-+ if (int_cache[id].item != -1) {
-+ cache_blks = int_cache[id].item;
-+ cache_start_copy.array[cache_blks].address =
-+ int_cache[id].cache.address;
-+ cache_start_copy.array[cache_blks].changed =
-+ int_cache[id].cache.changed;
-+ }
-+#endif
-+
-+ ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
-+
-+ while (ftl_cmd <= PendingCMD[idx].Tag) {
-+ if (p_BTableChangesDelta->ValidFields == 0x01) {
-+ g_wBlockTableOffset =
-+ p_BTableChangesDelta->g_wBlockTableOffset;
-+ } else if (p_BTableChangesDelta->ValidFields == 0x0C) {
-+ pbt[p_BTableChangesDelta->BT_Index] =
-+ p_BTableChangesDelta->BT_Entry_Value;
-+ debug_boundary_error(((
-+ p_BTableChangesDelta->BT_Index)),
-+ DeviceInfo.wDataBlockNum, 0);
-+ } else if (p_BTableChangesDelta->ValidFields == 0x03) {
-+ g_wBlockTableOffset =
-+ p_BTableChangesDelta->g_wBlockTableOffset;
-+ g_wBlockTableIndex =
-+ p_BTableChangesDelta->g_wBlockTableIndex;
-+ } else if (p_BTableChangesDelta->ValidFields == 0x30) {
-+ g_pWearCounterCopy[p_BTableChangesDelta->WC_Index] =
-+ p_BTableChangesDelta->WC_Entry_Value;
-+ } else if ((DeviceInfo.MLCDevice) &&
-+ (p_BTableChangesDelta->ValidFields == 0xC0)) {
-+ g_pReadCounterCopy[p_BTableChangesDelta->RC_Index] =
-+ p_BTableChangesDelta->RC_Entry_Value;
-+ nand_dbg_print(NAND_DBG_DEBUG,
-+ "In event status setting read counter "
-+ "GLOB_ftl_cmd_cnt %u Count %u Index %u\n",
-+ ftl_cmd,
-+ p_BTableChangesDelta->RC_Entry_Value,
-+ (unsigned int)p_BTableChangesDelta->RC_Index);
-+ } else {
-+ nand_dbg_print(NAND_DBG_DEBUG,
-+ "This should never occur \n");
-+ }
-+ p_BTableChangesDelta += 1;
-+ ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
-+ }
-+}
-+
-+static void discard_cmds(u16 n)
-+{
-+ u32 *pbt = (u32 *)g_pBTStartingCopy;
-+ u8 ftl_cmd;
-+ unsigned long k;
-+#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
-+ u8 cache_blks;
-+ u16 id;
-+#endif
-+
-+ if ((PendingCMD[n].CMD == WRITE_MAIN_CMD) ||
-+ (PendingCMD[n].CMD == WRITE_MAIN_SPARE_CMD)) {
-+ for (k = 0; k < DeviceInfo.wDataBlockNum; k++) {
-+ if (PendingCMD[n].Block == (pbt[k] & (~BAD_BLOCK)))
-+ MARK_BLK_AS_DISCARD(pbt[k]);
-+ }
-+ }
-+
-+ ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
-+ while (ftl_cmd <= PendingCMD[n].Tag) {
-+ p_BTableChangesDelta += 1;
-+ ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
-+ }
-+
-+#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
-+ id = n - MAX_CHANS;
-+
-+ if (int_cache[id].item != -1) {
-+ cache_blks = int_cache[id].item;
-+ if (PendingCMD[n].CMD == MEMCOPY_CMD) {
-+ if ((cache_start_copy.array[cache_blks].buf <=
-+ PendingCMD[n].DataDestAddr) &&
-+ ((cache_start_copy.array[cache_blks].buf +
-+ Cache.cache_item_size) >
-+ PendingCMD[n].DataDestAddr)) {
-+ cache_start_copy.array[cache_blks].address =
-+ NAND_CACHE_INIT_ADDR;
-+ cache_start_copy.array[cache_blks].use_cnt =
-+ 0;
-+ cache_start_copy.array[cache_blks].changed =
-+ CLEAR;
-+ }
-+ } else {
-+ cache_start_copy.array[cache_blks].address =
-+ int_cache[id].cache.address;
-+ cache_start_copy.array[cache_blks].changed =
-+ int_cache[id].cache.changed;
-+ }
-+ }
-+#endif
-+}
-+
-+static void process_cmd_pass(int *first_failed_cmd, u16 idx)
-+{
-+ if (0 == *first_failed_cmd)
-+ save_blk_table_changes(idx);
-+ else
-+ discard_cmds(idx);
-+}
-+
-+static void process_cmd_fail_abort(int *first_failed_cmd,
-+ u16 idx, int event)
-+{
-+ u32 *pbt = (u32 *)g_pBTStartingCopy;
-+ u8 ftl_cmd;
-+ unsigned long i;
-+ int erase_fail, program_fail;
-+#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
-+ u8 cache_blks;
-+ u16 id;
-+#endif
-+
-+ if (0 == *first_failed_cmd)
-+ *first_failed_cmd = PendingCMD[idx].SBDCmdIndex;
-+
-+ nand_dbg_print(NAND_DBG_DEBUG, "Uncorrectable error has occured "
-+ "while executing %u Command %u accesing Block %u\n",
-+ (unsigned int)p_BTableChangesDelta->ftl_cmd_cnt,
-+ PendingCMD[idx].CMD,
-+ (unsigned int)PendingCMD[idx].Block);
-+
-+ ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
-+ while (ftl_cmd <= PendingCMD[idx].Tag) {
-+ p_BTableChangesDelta += 1;
-+ ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
-+ }
-+
-+#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
-+ id = idx - MAX_CHANS;
-+
-+ if (int_cache[id].item != -1) {
-+ cache_blks = int_cache[id].item;
-+ if ((PendingCMD[idx].CMD == WRITE_MAIN_CMD)) {
-+ cache_start_copy.array[cache_blks].address =
-+ int_cache[id].cache.address;
-+ cache_start_copy.array[cache_blks].changed = SET;
-+ } else if ((PendingCMD[idx].CMD == READ_MAIN_CMD)) {
-+ cache_start_copy.array[cache_blks].address =
-+ NAND_CACHE_INIT_ADDR;
-+ cache_start_copy.array[cache_blks].use_cnt = 0;
-+ cache_start_copy.array[cache_blks].changed =
-+ CLEAR;
-+ } else if (PendingCMD[idx].CMD == ERASE_CMD) {
-+ /* ? */
-+ } else if (PendingCMD[idx].CMD == MEMCOPY_CMD) {
-+ /* ? */
-+ }
-+ }
-+#endif
-+
-+ erase_fail = (event == EVENT_ERASE_FAILURE) &&
-+ (PendingCMD[idx].CMD == ERASE_CMD);
-+
-+ program_fail = (event == EVENT_PROGRAM_FAILURE) &&
-+ ((PendingCMD[idx].CMD == WRITE_MAIN_CMD) ||
-+ (PendingCMD[idx].CMD == WRITE_MAIN_SPARE_CMD));
-+
-+ if (erase_fail || program_fail) {
-+ for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
-+ if (PendingCMD[idx].Block ==
-+ (pbt[i] & (~BAD_BLOCK)))
-+ MARK_BLOCK_AS_BAD(pbt[i]);
-+ }
-+ }
-+}
-+
-+static void process_cmd(int *first_failed_cmd, u16 idx, int event)
-+{
-+ u8 ftl_cmd;
-+ int cmd_match = 0;
-+
-+ if (p_BTableChangesDelta->ftl_cmd_cnt == PendingCMD[idx].Tag)
-+ cmd_match = 1;
-+
-+ if (PendingCMD[idx].Status == CMD_PASS) {
-+ process_cmd_pass(first_failed_cmd, idx);
-+ } else if ((PendingCMD[idx].Status == CMD_FAIL) ||
-+ (PendingCMD[idx].Status == CMD_ABORT)) {
-+ process_cmd_fail_abort(first_failed_cmd, idx, event);
-+ } else if ((PendingCMD[idx].Status == CMD_NOT_DONE) &&
-+ PendingCMD[idx].Tag) {
-+ nand_dbg_print(NAND_DBG_DEBUG,
-+ " Command no. %hu is not executed\n",
-+ (unsigned int)PendingCMD[idx].Tag);
-+ ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
-+ while (ftl_cmd <= PendingCMD[idx].Tag) {
-+ p_BTableChangesDelta += 1;
-+ ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
-+ }
-+ }
-+}
-+#endif
-+
-+static void process_cmd(int *first_failed_cmd, u16 idx, int event)
-+{
-+ printk(KERN_ERR "temporary workaround function. "
-+ "Should not be called! \n");
-+}
-+
-+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-+* Function: GLOB_FTL_Event_Status
-+* Inputs: none
-+* Outputs: Event Code
-+* Description: It is called by SBD after hardware interrupt signalling
-+* completion of commands chain
-+* It does following things
-+* get event status from LLD
-+* analyze command chain status
-+* determine last command executed
-+* analyze results
-+* rebuild the block table in case of uncorrectable error
-+* return event code
-+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-+int GLOB_FTL_Event_Status(int *first_failed_cmd)
-+{
-+ int event_code = PASS;
-+ u16 i_P;
-+
-+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ *first_failed_cmd = 0;
-+
-+ event_code = GLOB_LLD_Event_Status();
-+
-+ switch (event_code) {
-+ case EVENT_PASS:
-+ nand_dbg_print(NAND_DBG_DEBUG, "Handling EVENT_PASS\n");
-+ break;
-+ case EVENT_UNCORRECTABLE_DATA_ERROR:
-+ nand_dbg_print(NAND_DBG_DEBUG, "Handling Uncorrectable ECC!\n");
-+ break;
-+ case EVENT_PROGRAM_FAILURE:
-+ case EVENT_ERASE_FAILURE:
-+ nand_dbg_print(NAND_DBG_WARN, "Handling Ugly case. "
-+ "Event code: 0x%x\n", event_code);
-+ p_BTableChangesDelta =
-+ (struct BTableChangesDelta *)g_pBTDelta;
-+ for (i_P = MAX_CHANS; i_P < (ftl_cmd_cnt + MAX_CHANS);
-+ i_P++)
-+ process_cmd(first_failed_cmd, i_P, event_code);
-+ memcpy(g_pBlockTable, g_pBTStartingCopy,
-+ DeviceInfo.wDataBlockNum * sizeof(u32));
-+ memcpy(g_pWearCounter, g_pWearCounterCopy,
-+ DeviceInfo.wDataBlockNum * sizeof(u8));
-+ if (DeviceInfo.MLCDevice)
-+ memcpy(g_pReadCounter, g_pReadCounterCopy,
-+ DeviceInfo.wDataBlockNum * sizeof(u16));
-+
-+#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
-+ memcpy((void *)&Cache, (void *)&cache_start_copy,
-+ sizeof(struct flash_cache_tag));
-+ memset((void *)&int_cache, -1,
-+ sizeof(struct flash_cache_delta_list_tag) *
-+ (MAX_DESCS + MAX_CHANS));
-+#endif
-+ break;
-+ default:
-+ nand_dbg_print(NAND_DBG_WARN,
-+ "Handling unexpected event code - 0x%x\n",
-+ event_code);
-+ event_code = ERR;
-+ break;
-+ }
-+
-+ memcpy(g_pBTStartingCopy, g_pBlockTable,
-+ DeviceInfo.wDataBlockNum * sizeof(u32));
-+ memcpy(g_pWearCounterCopy, g_pWearCounter,
-+ DeviceInfo.wDataBlockNum * sizeof(u8));
-+ if (DeviceInfo.MLCDevice)
-+ memcpy(g_pReadCounterCopy, g_pReadCounter,
-+ DeviceInfo.wDataBlockNum * sizeof(u16));
-+
-+ g_pBTDelta_Free = g_pBTDelta;
-+ ftl_cmd_cnt = 0;
-+ g_pNextBlockTable = g_pBlockTableCopies;
-+ cp_back_buf_idx = 0;
-+
-+#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
-+ memcpy((void *)&cache_start_copy, (void *)&Cache,
-+ sizeof(struct flash_cache_tag));
-+ memset((void *)&int_cache, -1,
-+ sizeof(struct flash_cache_delta_list_tag) *
-+ (MAX_DESCS + MAX_CHANS));
-+#endif
-+
-+ return event_code;
-+}
-+
-+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-+* Function: glob_ftl_execute_cmds
-+* Inputs: none
-+* Outputs: none
-+* Description: pass thru to LLD
-+***************************************************************/
-+u16 glob_ftl_execute_cmds(void)
-+{
-+ nand_dbg_print(NAND_DBG_TRACE,
-+ "glob_ftl_execute_cmds: ftl_cmd_cnt %u\n",
-+ (unsigned int)ftl_cmd_cnt);
-+ g_SBDCmdIndex = 0;
-+ return glob_lld_execute_cmds();
-+}
-+
-+#endif
-+
-+#if !CMD_DMA
-+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-+* Function: GLOB_FTL_Read Immediate
-+* Inputs: pointer to data
-+* address of data
-+* Outputs: PASS / FAIL
-+* Description: Reads one page of data into RAM directly from flash without
-+* using or disturbing cache.It is assumed this function is called
-+* with CMD-DMA disabled.
-+*****************************************************************/
-+int GLOB_FTL_Read_Immediate(u8 *read_data, u64 addr)
-+{
-+ int wResult = FAIL;
-+ u32 Block;
-+ u16 Page;
-+ u32 phy_blk;
-+ u32 *pbt = (u32 *)g_pBlockTable;
-+
-+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ Block = BLK_FROM_ADDR(addr);
-+ Page = PAGE_FROM_ADDR(addr, Block);
-+
-+ if (!IS_SPARE_BLOCK(Block))
-+ return FAIL;
-+
-+ phy_blk = pbt[Block];
-+ wResult = GLOB_LLD_Read_Page_Main(read_data, phy_blk, Page, 1);
-+
-+ if (DeviceInfo.MLCDevice) {
-+ g_pReadCounter[phy_blk - DeviceInfo.wSpectraStartBlock]++;
-+ if (g_pReadCounter[phy_blk - DeviceInfo.wSpectraStartBlock]
-+ >= MAX_READ_COUNTER)
-+ FTL_Read_Disturbance(phy_blk);
-+ if (g_cBlockTableStatus != IN_PROGRESS_BLOCK_TABLE) {
-+ g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
-+ FTL_Write_IN_Progress_Block_Table_Page();
-+ }
-+ }
-+
-+ return wResult;
-+}
-+#endif
-+
-+#ifdef SUPPORT_BIG_ENDIAN
-+/*********************************************************************
-+* Function: FTL_Invert_Block_Table
-+* Inputs: none
-+* Outputs: none
-+* Description: Re-format the block table in ram based on BIG_ENDIAN and
-+* LARGE_BLOCKNUM if necessary
-+**********************************************************************/
-+static void FTL_Invert_Block_Table(void)
-+{
-+ u32 i;
-+ u32 *pbt = (u32 *)g_pBlockTable;
-+
-+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+#ifdef SUPPORT_LARGE_BLOCKNUM
-+ for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
-+ pbt[i] = INVERTUINT32(pbt[i]);
-+ g_pWearCounter[i] = INVERTUINT32(g_pWearCounter[i]);
-+ }
-+#else
-+ for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
-+ pbt[i] = INVERTUINT16(pbt[i]);
-+ g_pWearCounter[i] = INVERTUINT16(g_pWearCounter[i]);
-+ }
-+#endif
-+}
-+#endif
-+
-+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-+* Function: GLOB_FTL_Flash_Init
-+* Inputs: none
-+* Outputs: PASS=0 / FAIL=0x01 (based on read ID)
-+* Description: The flash controller is initialized
-+* The flash device is reset
-+* Perform a flash READ ID command to confirm that a
-+* valid device is attached and active.
-+* The DeviceInfo structure gets filled in
-+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-+int GLOB_FTL_Flash_Init(void)
-+{
-+ int status = FAIL;
-+
-+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ g_SBDCmdIndex = 0;
-+
-+ GLOB_LLD_Flash_Init();
-+
-+ status = GLOB_LLD_Read_Device_ID();
-+
-+ return status;
-+}
-+
-+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-+* Inputs: none
-+* Outputs: PASS=0 / FAIL=0x01 (based on read ID)
-+* Description: The flash controller is released
-+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-+int GLOB_FTL_Flash_Release(void)
-+{
-+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ return GLOB_LLD_Flash_Release();
-+}
-+
-+
-+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-+* Function: GLOB_FTL_Cache_Release
-+* Inputs: none
-+* Outputs: none
-+* Description: release all allocated memory in GLOB_FTL_Init
-+* (allocated in GLOB_FTL_Init)
-+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-+void GLOB_FTL_Cache_Release(void)
-+{
-+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ free_memory();
-+}
-+
-+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-+* Function: FTL_Cache_If_Hit
-+* Inputs: Page Address
-+* Outputs: Block number/UNHIT BLOCK
-+* Description: Determines if the addressed page is in cache
-+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-+static u16 FTL_Cache_If_Hit(u64 page_addr)
-+{
-+ u16 item;
-+ u64 addr;
-+ int i;
-+
-+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ item = UNHIT_CACHE_ITEM;
-+ for (i = 0; i < CACHE_ITEM_NUM; i++) {
-+ addr = Cache.array[i].address;
-+ if ((page_addr >= addr) &&
-+ (page_addr < (addr + Cache.cache_item_size))) {
-+ item = i;
-+ break;
-+ }
-+ }
-+
-+ return item;
-+}
-+
-+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-+* Function: FTL_Calculate_LRU
-+* Inputs: None
-+* Outputs: None
-+* Description: Calculate the least recently block in a cache and record its
-+* index in LRU field.
-+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-+static void FTL_Calculate_LRU(void)
-+{
-+ u16 i, bCurrentLRU, bTempCount;
-+
-+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ bCurrentLRU = 0;
-+ bTempCount = MAX_WORD_VALUE;
-+
-+ for (i = 0; i < CACHE_ITEM_NUM; i++) {
-+ if (Cache.array[i].use_cnt < bTempCount) {
-+ bCurrentLRU = i;
-+ bTempCount = Cache.array[i].use_cnt;
-+ }
-+ }
-+
-+ Cache.LRU = bCurrentLRU;
-+}
-+
-+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-+* Function: FTL_Cache_Read_Page
-+* Inputs: pointer to read buffer, logical address and cache item number
-+* Outputs: None
-+* Description: Read the page from the cached block addressed by blocknumber
-+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-+static void FTL_Cache_Read_Page(u8 *data_buf, u64 logic_addr, u16 cache_item)
-+{
-+ u8 *start_addr;
-+
-+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ start_addr = Cache.array[cache_item].buf;
-+ start_addr += (u32)(((logic_addr - Cache.array[cache_item].address) >>
-+ DeviceInfo.nBitsInPageDataSize) * DeviceInfo.wPageDataSize);
-+
-+#if CMD_DMA
-+ GLOB_LLD_MemCopy_CMD(data_buf, start_addr,
-+ DeviceInfo.wPageDataSize, 0);
-+ ftl_cmd_cnt++;
-+#else
-+ memcpy(data_buf, start_addr, DeviceInfo.wPageDataSize);
-+#endif
-+
-+ if (Cache.array[cache_item].use_cnt < MAX_WORD_VALUE)
-+ Cache.array[cache_item].use_cnt++;
-+}
-+
-+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-+* Function: FTL_Cache_Read_All
-+* Inputs: pointer to read buffer,block address
-+* Outputs: PASS=0 / FAIL =1
-+* Description: It reads pages in cache
-+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-+static int FTL_Cache_Read_All(u8 *pData, u64 phy_addr)
-+{
-+ int wResult = PASS;
-+ u32 Block;
-+ u32 lba;
-+ u16 Page;
-+ u16 PageCount;
-+ u32 *pbt = (u32 *)g_pBlockTable;
-+ u32 i;
-+
-+ Block = BLK_FROM_ADDR(phy_addr);
-+ Page = PAGE_FROM_ADDR(phy_addr, Block);
-+ PageCount = Cache.pages_per_item;
-+
-+ nand_dbg_print(NAND_DBG_DEBUG,
-+ "%s, Line %d, Function: %s, Block: 0x%x\n",
-+ __FILE__, __LINE__, __func__, Block);
-+
-+ lba = 0xffffffff;
-+ for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
-+ if ((pbt[i] & (~BAD_BLOCK)) == Block) {
-+ lba = i;
-+ if (IS_SPARE_BLOCK(i) || IS_BAD_BLOCK(i) ||
-+ IS_DISCARDED_BLOCK(i)) {
-+ /* Add by yunpeng -2008.12.3 */
-+#if CMD_DMA
-+ GLOB_LLD_MemCopy_CMD(pData, g_temp_buf,
-+ PageCount * DeviceInfo.wPageDataSize, 0);
-+ ftl_cmd_cnt++;
-+#else
-+ memset(pData, 0xFF,
-+ PageCount * DeviceInfo.wPageDataSize);
-+#endif
-+ return wResult;
-+ } else {
-+ continue; /* break ?? */
-+ }
-+ }
-+ }
-+
-+ if (0xffffffff == lba)
-+ printk(KERN_ERR "FTL_Cache_Read_All: Block is not found in BT\n");
-+
-+#if CMD_DMA
-+ wResult = GLOB_LLD_Read_Page_Main_cdma(pData, Block, Page,
-+ PageCount, LLD_CMD_FLAG_MODE_CDMA);
-+ if (DeviceInfo.MLCDevice) {
-+ g_pReadCounter[Block - DeviceInfo.wSpectraStartBlock]++;
-+ nand_dbg_print(NAND_DBG_DEBUG,
-+ "Read Counter modified in ftl_cmd_cnt %u"
-+ " Block %u Counter%u\n",
-+ ftl_cmd_cnt, (unsigned int)Block,
-+ g_pReadCounter[Block -
-+ DeviceInfo.wSpectraStartBlock]);
-+
-+ p_BTableChangesDelta =
-+ (struct BTableChangesDelta *)g_pBTDelta_Free;
-+ g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
-+ p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
-+ p_BTableChangesDelta->RC_Index =
-+ Block - DeviceInfo.wSpectraStartBlock;
-+ p_BTableChangesDelta->RC_Entry_Value =
-+ g_pReadCounter[Block - DeviceInfo.wSpectraStartBlock];
-+ p_BTableChangesDelta->ValidFields = 0xC0;
-+
-+ ftl_cmd_cnt++;
-+
-+ if (g_pReadCounter[Block - DeviceInfo.wSpectraStartBlock] >=
-+ MAX_READ_COUNTER)
-+ FTL_Read_Disturbance(Block);
-+ if (g_cBlockTableStatus != IN_PROGRESS_BLOCK_TABLE) {
-+ g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
-+ FTL_Write_IN_Progress_Block_Table_Page();
-+ }
-+ } else {
-+ ftl_cmd_cnt++;
-+ }
-+#else
-+ wResult = GLOB_LLD_Read_Page_Main(pData, Block, Page, PageCount);
-+ if (wResult == FAIL)
-+ return wResult;
-+
-+ if (DeviceInfo.MLCDevice) {
-+ g_pReadCounter[Block - DeviceInfo.wSpectraStartBlock]++;
-+ if (g_pReadCounter[Block - DeviceInfo.wSpectraStartBlock] >=
-+ MAX_READ_COUNTER)
-+ FTL_Read_Disturbance(Block);
-+ if (g_cBlockTableStatus != IN_PROGRESS_BLOCK_TABLE) {
-+ g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
-+ FTL_Write_IN_Progress_Block_Table_Page();
-+ }
-+ }
-+#endif
-+ return wResult;
-+}
-+
-+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-+* Function: FTL_Cache_Write_All
-+* Inputs: pointer to cache in sys memory
-+* address of free block in flash
-+* Outputs: PASS=0 / FAIL=1
-+* Description: writes all the pages of the block in cache to flash
-+*
-+* NOTE:need to make sure this works ok when cache is limited
-+* to a partial block. This is where copy-back would be
-+* activated. This would require knowing which pages in the
-+* cached block are clean/dirty.Right now we only know if
-+* the whole block is clean/dirty.
-+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-+static int FTL_Cache_Write_All(u8 *pData, u64 blk_addr)
-+{
-+ u16 wResult = PASS;
-+ u32 Block;
-+ u16 Page;
-+ u16 PageCount;
-+
-+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ nand_dbg_print(NAND_DBG_DEBUG, "This block %d going to be written "
-+ "on %d\n", cache_block_to_write,
-+ (u32)(blk_addr >> DeviceInfo.nBitsInBlockDataSize));
-+
-+ Block = BLK_FROM_ADDR(blk_addr);
-+ Page = PAGE_FROM_ADDR(blk_addr, Block);
-+ PageCount = Cache.pages_per_item;
-+
-+#if CMD_DMA
-+ if (FAIL == GLOB_LLD_Write_Page_Main_cdma(pData,
-+ Block, Page, PageCount)) {
-+ nand_dbg_print(NAND_DBG_WARN,
-+ "NAND Program fail in %s, Line %d, "
-+ "Function: %s, new Bad Block %d generated! "
-+ "Need Bad Block replacing.\n",
-+ __FILE__, __LINE__, __func__, Block);
-+ wResult = FAIL;
-+ }
-+ ftl_cmd_cnt++;
-+#else
-+ if (FAIL == GLOB_LLD_Write_Page_Main(pData, Block, Page, PageCount)) {
-+ nand_dbg_print(NAND_DBG_WARN, "NAND Program fail in %s,"
-+ " Line %d, Function %s, new Bad Block %d generated!"
-+ "Need Bad Block replacing.\n",
-+ __FILE__, __LINE__, __func__, Block);
-+ wResult = FAIL;
-+ }
-+#endif
-+ return wResult;
-+}
-+
-+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-+* Function: FTL_Cache_Update_Block
-+* Inputs: pointer to buffer,page address,block address
-+* Outputs: PASS=0 / FAIL=1
-+* Description: It updates the cache
-+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-+static int FTL_Cache_Update_Block(u8 *pData,
-+ u64 old_page_addr, u64 blk_addr)
-+{
-+ int i, j;
-+ u8 *buf = pData;
-+ int wResult = PASS;
-+ int wFoundInCache;
-+ u64 page_addr;
-+ u64 addr;
-+ u64 old_blk_addr;
-+ u16 page_offset;
-+
-+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ old_blk_addr = (u64)(old_page_addr >>
-+ DeviceInfo.nBitsInBlockDataSize) * DeviceInfo.wBlockDataSize;
-+ page_offset = (u16)(GLOB_u64_Remainder(old_page_addr, 2) >>
-+ DeviceInfo.nBitsInPageDataSize);
-+
-+ for (i = 0; i < DeviceInfo.wPagesPerBlock; i += Cache.pages_per_item) {
-+ page_addr = old_blk_addr + i * DeviceInfo.wPageDataSize;
-+ if (i != page_offset) {
-+ wFoundInCache = FAIL;
-+ for (j = 0; j < CACHE_ITEM_NUM; j++) {
-+ addr = Cache.array[j].address;
-+ addr = FTL_Get_Physical_Block_Addr(addr) +
-+ GLOB_u64_Remainder(addr, 2);
-+ if ((addr >= page_addr) && addr <
-+ (page_addr + Cache.cache_item_size)) {
-+ wFoundInCache = PASS;
-+ buf = Cache.array[j].buf;
-+ Cache.array[j].changed = SET;
-+#if CMD_DMA
-+#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
-+ int_cache[ftl_cmd_cnt].item = j;
-+ int_cache[ftl_cmd_cnt].cache.address =
-+ Cache.array[j].address;
-+ int_cache[ftl_cmd_cnt].cache.changed =
-+ Cache.array[j].changed;
-+#endif
-+#endif
-+ break;
-+ }
-+ }
-+ if (FAIL == wFoundInCache) {
-+ if (ERR == FTL_Cache_Read_All(g_pTempBuf,
-+ page_addr)) {
-+ wResult = FAIL;
-+ break;
-+ }
-+ buf = g_pTempBuf;
-+ }
-+ } else {
-+ buf = pData;
-+ }
-+
-+ if (FAIL == FTL_Cache_Write_All(buf,
-+ blk_addr + (page_addr - old_blk_addr))) {
-+ wResult = FAIL;
-+ break;
-+ }
-+ }
-+
-+ return wResult;
-+}
-+
-+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-+* Function: FTL_Copy_Block
-+* Inputs: source block address
-+* Destination block address
-+* Outputs: PASS=0 / FAIL=1
-+* Description: used only for static wear leveling to move the block
-+* containing static data to new blocks(more worn)
-+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-+int FTL_Copy_Block(u64 old_blk_addr, u64 blk_addr)
-+{
-+ int i, r1, r2, wResult = PASS;
-+
-+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ for (i = 0; i < DeviceInfo.wPagesPerBlock; i += Cache.pages_per_item) {
-+ r1 = FTL_Cache_Read_All(g_pTempBuf, old_blk_addr +
-+ i * DeviceInfo.wPageDataSize);
-+ r2 = FTL_Cache_Write_All(g_pTempBuf, blk_addr +
-+ i * DeviceInfo.wPageDataSize);
-+ if ((ERR == r1) || (FAIL == r2)) {
-+ wResult = FAIL;
-+ break;
-+ }
-+ }
-+
-+ return wResult;
-+}
-+
-+/* Search the block table to find out the least wear block and then return it */
-+static u32 find_least_worn_blk_for_l2_cache(void)
-+{
-+ int i;
-+ u32 *pbt = (u32 *)g_pBlockTable;
-+ u8 least_wear_cnt = MAX_BYTE_VALUE;
-+ u32 least_wear_blk_idx = MAX_U32_VALUE;
-+ u32 phy_idx;
-+
-+ for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
-+ if (IS_SPARE_BLOCK(i)) {
-+ phy_idx = (u32)((~BAD_BLOCK) & pbt[i]);
-+ if (phy_idx > DeviceInfo.wSpectraEndBlock)
-+ printk(KERN_ERR "find_least_worn_blk_for_l2_cache: "
-+ "Too big phy block num (%d)\n", phy_idx);
-+ if (g_pWearCounter[phy_idx -DeviceInfo.wSpectraStartBlock] < least_wear_cnt) {
-+ least_wear_cnt = g_pWearCounter[phy_idx - DeviceInfo.wSpectraStartBlock];
-+ least_wear_blk_idx = i;
-+ }
-+ }
-+ }
-+
-+ nand_dbg_print(NAND_DBG_WARN,
-+ "find_least_worn_blk_for_l2_cache: "
-+ "find block %d with least worn counter (%d)\n",
-+ least_wear_blk_idx, least_wear_cnt);
-+
-+ return least_wear_blk_idx;
-+}
-+
-+
-+
-+/* Get blocks for Level2 Cache */
-+static int get_l2_cache_blks(void)
-+{
-+ int n;
-+ u32 blk;
-+ u32 *pbt = (u32 *)g_pBlockTable;
-+
-+ for (n = 0; n < BLK_NUM_FOR_L2_CACHE; n++) {
-+ blk = find_least_worn_blk_for_l2_cache();
-+ if (blk > DeviceInfo.wDataBlockNum) {
-+ nand_dbg_print(NAND_DBG_WARN,
-+ "find_least_worn_blk_for_l2_cache: "
-+ "No enough free NAND blocks (n: %d) for L2 Cache!\n", n);
-+ return FAIL;
-+ }
-+ /* Tag the free block as discard in block table */
-+ pbt[blk] = (pbt[blk] & (~BAD_BLOCK)) | DISCARD_BLOCK;
-+ /* Add the free block to the L2 Cache block array */
-+ cache_l2.blk_array[n] = pbt[blk] & (~BAD_BLOCK);
-+ }
-+
-+ return PASS;
-+}
-+
-+static int erase_l2_cache_blocks(void)
-+{
-+ int i, ret = PASS;
-+ u32 pblk, lblk;
-+ u64 addr;
-+ u32 *pbt = (u32 *)g_pBlockTable;
-+
-+ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ for (i = 0; i < BLK_NUM_FOR_L2_CACHE; i++) {
-+ pblk = cache_l2.blk_array[i];
-+
-+ /* If the L2 cache block is invalid, then just skip it */
-+ if (MAX_U32_VALUE == pblk)
-+ continue;
-+
-+ BUG_ON(pblk > DeviceInfo.wSpectraEndBlock);
-+
-+ addr = (u64)pblk << DeviceInfo.nBitsInBlockDataSize;
-+ if (PASS == GLOB_FTL_Block_Erase(addr)) {
-+ /* Get logical block number of the erased block */
-+ lblk = FTL_Get_Block_Index(pblk);
-+ BUG_ON(BAD_BLOCK == lblk);
-+ /* Tag it as free in the block table */
-+ pbt[lblk] &= (u32)(~DISCARD_BLOCK);
-+ pbt[lblk] |= (u32)(SPARE_BLOCK);
-+ } else {
-+ MARK_BLOCK_AS_BAD(pbt[lblk]);
-+ ret = ERR;
-+ }
-+ }
-+
-+ return ret;
-+}
-+
-+/*
-+ * Merge the valid data page in the L2 cache blocks into NAND.
-+*/
-+static int flush_l2_cache(void)
-+{
-+ struct list_head *p;
-+ struct spectra_l2_cache_list *pnd, *tmp_pnd;
-+ u32 *pbt = (u32 *)g_pBlockTable;
-+ u32 phy_blk, l2_blk;
-+ u64 addr;
-+ u16 l2_page;
-+ int i, ret = PASS;
-+
-+ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ if (list_empty(&cache_l2.table.list)) /* No data to flush */
-+ return ret;
-+
-+ //dump_cache_l2_table();
-+
-+ if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) {
-+ g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
-+ FTL_Write_IN_Progress_Block_Table_Page();
-+ }
-+
-+ list_for_each(p, &cache_l2.table.list) {
-+ pnd = list_entry(p, struct spectra_l2_cache_list, list);
-+ if (IS_SPARE_BLOCK(pnd->logical_blk_num) ||
-+ IS_BAD_BLOCK(pnd->logical_blk_num) ||
-+ IS_DISCARDED_BLOCK(pnd->logical_blk_num)) {
-+ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d\n", __FILE__, __LINE__);
-+ memset(cache_l2_blk_buf, 0xff, DeviceInfo.wPagesPerBlock * DeviceInfo.wPageDataSize);
-+ } else {
-+ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d\n", __FILE__, __LINE__);
-+ phy_blk = pbt[pnd->logical_blk_num] & (~BAD_BLOCK);
-+ ret = GLOB_LLD_Read_Page_Main(cache_l2_blk_buf,
-+ phy_blk, 0, DeviceInfo.wPagesPerBlock);
-+ if (ret == FAIL) {
-+ printk(KERN_ERR "Read NAND page fail in %s, Line %d\n", __FILE__, __LINE__);
-+ }
-+ }
-+
-+ for (i = 0; i < DeviceInfo.wPagesPerBlock; i++) {
-+ if (pnd->pages_array[i] != MAX_U32_VALUE) {
-+ l2_blk = cache_l2.blk_array[(pnd->pages_array[i] >> 16) & 0xffff];
-+ l2_page = pnd->pages_array[i] & 0xffff;
-+ ret = GLOB_LLD_Read_Page_Main(cache_l2_page_buf, l2_blk, l2_page, 1);
-+ if (ret == FAIL) {
-+ printk(KERN_ERR "Read NAND page fail in %s, Line %d\n", __FILE__, __LINE__);
-+ }
-+ memcpy(cache_l2_blk_buf + i * DeviceInfo.wPageDataSize, cache_l2_page_buf, DeviceInfo.wPageDataSize);
-+ }
-+ }
-+
-+ /* Find a free block and tag the original block as discarded */
-+ addr = (u64)pnd->logical_blk_num << DeviceInfo.nBitsInBlockDataSize;
-+ ret = FTL_Replace_Block(addr);
-+ if (ret == FAIL) {
-+ printk(KERN_ERR "FTL_Replace_Block fail in %s, Line %d\n", __FILE__, __LINE__);
-+ }
-+
-+ /* Write back the updated data into NAND */
-+ phy_blk = pbt[pnd->logical_blk_num] & (~BAD_BLOCK);
-+ if (FAIL == GLOB_LLD_Write_Page_Main(cache_l2_blk_buf, phy_blk, 0, DeviceInfo.wPagesPerBlock)) {
-+ nand_dbg_print(NAND_DBG_WARN,
-+ "Program NAND block %d fail in %s, Line %d\n",
-+ phy_blk, __FILE__, __LINE__);
-+ /* This may not be really a bad block. So just tag it as discarded. */
-+ /* Then it has a chance to be erased when garbage collection. */
-+ /* If it is really bad, then the erase will fail and it will be marked */
-+ /* as bad then. Otherwise it will be marked as free and can be used again */
-+ MARK_BLK_AS_DISCARD(pbt[pnd->logical_blk_num]);
-+ /* Find another free block and write it again */
-+ FTL_Replace_Block(addr);
-+ phy_blk = pbt[pnd->logical_blk_num] & (~BAD_BLOCK);
-+ if (FAIL == GLOB_LLD_Write_Page_Main(cache_l2_blk_buf, phy_blk, 0, DeviceInfo.wPagesPerBlock)) {
-+ printk(KERN_ERR "Failed to write back block %d when flush L2 cache."
-+ "Some data will be lost!\n", phy_blk);
-+ MARK_BLOCK_AS_BAD(pbt[pnd->logical_blk_num]);
-+ }
-+ } else {
-+ /* tag the new free block as used block */
-+ pbt[pnd->logical_blk_num] &= (~SPARE_BLOCK);
-+ }
-+ }
-+
-+ /* Destroy the L2 Cache table and free the memory of all nodes */
-+ list_for_each_entry_safe(pnd, tmp_pnd, &cache_l2.table.list, list) {
-+ list_del(&pnd->list);
-+ kfree(pnd);
-+ }
-+
-+ /* Erase discard L2 cache blocks */
-+ if (erase_l2_cache_blocks() != PASS)
-+ nand_dbg_print(NAND_DBG_WARN,
-+ " Erase L2 cache blocks error in %s, Line %d\n",
-+ __FILE__, __LINE__);
-+
-+ /* Init the Level2 Cache data structure */
-+ for (i = 0; i < BLK_NUM_FOR_L2_CACHE; i++)
-+ cache_l2.blk_array[i] = MAX_U32_VALUE;
-+ cache_l2.cur_blk_idx = 0;
-+ cache_l2.cur_page_num = 0;
-+ INIT_LIST_HEAD(&cache_l2.table.list);
-+ cache_l2.table.logical_blk_num = MAX_U32_VALUE;
-+
-+ return ret;
-+}
-+
-+/*
-+ * Write back a changed victim cache item to the Level2 Cache
-+ * and update the L2 Cache table to map the change.
-+ * If the L2 Cache is full, then start to do the L2 Cache flush.
-+*/
-+static int write_back_to_l2_cache(u8 *buf, u64 logical_addr)
-+{
-+ u32 logical_blk_num;
-+ u16 logical_page_num;
-+ struct list_head *p;
-+ struct spectra_l2_cache_list *pnd, *pnd_new;
-+ u32 node_size;
-+ int i, found;
-+
-+ nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ /*
-+ * If Level2 Cache table is empty, then it means either:
-+ * 1. This is the first time that the function called after FTL_init
-+ * or
-+ * 2. The Level2 Cache has just been flushed
-+ *
-+ * So, 'steal' some free blocks from NAND for L2 Cache using
-+ * by just mask them as discard in the block table
-+ */
-+ if (list_empty(&cache_l2.table.list)) {
-+ BUG_ON(cache_l2.cur_blk_idx != 0);
-+ BUG_ON(cache_l2.cur_page_num!= 0);
-+ BUG_ON(cache_l2.table.logical_blk_num != MAX_U32_VALUE);
-+ if (FAIL == get_l2_cache_blks()) {
-+ GLOB_FTL_Garbage_Collection();
-+ if (FAIL == get_l2_cache_blks()) {
-+ printk(KERN_ALERT "Fail to get L2 cache blks!\n");
-+ return FAIL;
-+ }
-+ }
-+ }
-+
-+ logical_blk_num = BLK_FROM_ADDR(logical_addr);
-+ logical_page_num = PAGE_FROM_ADDR(logical_addr, logical_blk_num);
-+ BUG_ON(logical_blk_num == MAX_U32_VALUE);
-+
-+ /* Write the cache item data into the current position of L2 Cache */
-+#if CMD_DMA
-+ /*
-+ * TODO
-+ */
-+#else
-+ if (FAIL == GLOB_LLD_Write_Page_Main(buf,
-+ cache_l2.blk_array[cache_l2.cur_blk_idx],
-+ cache_l2.cur_page_num, 1)) {
-+ nand_dbg_print(NAND_DBG_WARN, "NAND Program fail in "
-+ "%s, Line %d, new Bad Block %d generated!\n",
-+ __FILE__, __LINE__,
-+ cache_l2.blk_array[cache_l2.cur_blk_idx]);
-+
-+ /* TODO: tag the current block as bad and try again */
-+
-+ return FAIL;
-+ }
-+#endif
-+
-+ /*
-+ * Update the L2 Cache table.
-+ *
-+ * First seaching in the table to see whether the logical block
-+ * has been mapped. If not, then kmalloc a new node for the
-+ * logical block, fill data, and then insert it to the list.
-+ * Otherwise, just update the mapped node directly.
-+ */
-+ found = 0;
-+ list_for_each(p, &cache_l2.table.list) {
-+ pnd = list_entry(p, struct spectra_l2_cache_list, list);
-+ if (pnd->logical_blk_num == logical_blk_num) {
-+ pnd->pages_array[logical_page_num] =
-+ (cache_l2.cur_blk_idx << 16) |
-+ cache_l2.cur_page_num;
-+ found = 1;
-+ break;
-+ }
-+ }
-+ if (!found) { /* Create new node for the logical block here */
-+
-+ /* The logical pages to physical pages map array is
-+ * located at the end of struct spectra_l2_cache_list.
-+ */
-+ node_size = sizeof(struct spectra_l2_cache_list) +
-+ sizeof(u32) * DeviceInfo.wPagesPerBlock;
-+ pnd_new = kmalloc(node_size, GFP_ATOMIC);
-+ if (!pnd_new) {
-+ printk(KERN_ERR "Failed to kmalloc in %s Line %d\n",
-+ __FILE__, __LINE__);
-+ /*
-+ * TODO: Need to flush all the L2 cache into NAND ASAP
-+ * since no memory available here
-+ */
-+ }
-+ pnd_new->logical_blk_num = logical_blk_num;
-+ for (i = 0; i < DeviceInfo.wPagesPerBlock; i++)
-+ pnd_new->pages_array[i] = MAX_U32_VALUE;
-+ pnd_new->pages_array[logical_page_num] =
-+ (cache_l2.cur_blk_idx << 16) | cache_l2.cur_page_num;
-+ list_add(&pnd_new->list, &cache_l2.table.list);
-+ }
-+
-+ /* Increasing the current position pointer of the L2 Cache */
-+ cache_l2.cur_page_num++;
-+ if (cache_l2.cur_page_num >= DeviceInfo.wPagesPerBlock) {
-+ cache_l2.cur_blk_idx++;
-+ if (cache_l2.cur_blk_idx >= BLK_NUM_FOR_L2_CACHE) {
-+ /* The L2 Cache is full. Need to flush it now */
-+ nand_dbg_print(NAND_DBG_WARN,
-+ "L2 Cache is full, will start to flush it\n");
-+ flush_l2_cache();
-+ } else {
-+ cache_l2.cur_page_num = 0;
-+ }
-+ }
-+
-+ return PASS;
-+}
-+
-+/*
-+ * Seach in the Level2 Cache table to find the cache item.
-+ * If find, read the data from the NAND page of L2 Cache,
-+ * Otherwise, return FAIL.
-+ */
-+static int search_l2_cache(u8 *buf, u64 logical_addr)
-+{
-+ u32 logical_blk_num;
-+ u16 logical_page_num;
-+ struct list_head *p;
-+ struct spectra_l2_cache_list *pnd;
-+ u32 tmp = MAX_U32_VALUE;
-+ u32 phy_blk;
-+ u16 phy_page;
-+ int ret = FAIL;
-+
-+ logical_blk_num = BLK_FROM_ADDR(logical_addr);
-+ logical_page_num = PAGE_FROM_ADDR(logical_addr, logical_blk_num);
-+
-+ list_for_each(p, &cache_l2.table.list) {
-+ pnd = list_entry(p, struct spectra_l2_cache_list, list);
-+ if (pnd->logical_blk_num == logical_blk_num) {
-+ tmp = pnd->pages_array[logical_page_num];
-+ break;
-+ }
-+ }
-+
-+ if (tmp != MAX_U32_VALUE) { /* Found valid map */
-+ phy_blk = cache_l2.blk_array[(tmp >> 16) & 0xFFFF];
-+ phy_page = tmp & 0xFFFF;
-+#if CMD_DMA
-+ /* TODO */
-+#else
-+ ret = GLOB_LLD_Read_Page_Main(buf, phy_blk, phy_page, 1);
-+#endif
-+ }
-+
-+ return ret;
-+}
-+
-+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-+* Function: FTL_Cache_Write_Back
-+* Inputs: pointer to data cached in sys memory
-+* address of free block in flash
-+* Outputs: PASS=0 / FAIL=1
-+* Description: writes all the pages of Cache Block to flash
-+*
-+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-+static int FTL_Cache_Write_Back(u8 *pData, u64 blk_addr)
-+{
-+ int i, j, iErase;
-+ u64 old_page_addr, addr, phy_addr;
-+ u32 *pbt = (u32 *)g_pBlockTable;
-+ u32 lba;
-+
-+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ old_page_addr = FTL_Get_Physical_Block_Addr(blk_addr) +
-+ GLOB_u64_Remainder(blk_addr, 2);
-+
-+ iErase = (FAIL == FTL_Replace_Block(blk_addr)) ? PASS : FAIL;
-+
-+ pbt[BLK_FROM_ADDR(blk_addr)] &= (~SPARE_BLOCK);
-+
-+#if CMD_DMA
-+ p_BTableChangesDelta = (struct BTableChangesDelta *)g_pBTDelta_Free;
-+ g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
-+
-+ p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
-+ p_BTableChangesDelta->BT_Index = (u32)(blk_addr >>
-+ DeviceInfo.nBitsInBlockDataSize);
-+ p_BTableChangesDelta->BT_Entry_Value =
-+ pbt[(u32)(blk_addr >> DeviceInfo.nBitsInBlockDataSize)];
-+ p_BTableChangesDelta->ValidFields = 0x0C;
-+#endif
-+
-+ if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) {
-+ g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
-+ FTL_Write_IN_Progress_Block_Table_Page();
-+ }
-+
-+ for (i = 0; i < RETRY_TIMES; i++) {
-+ if (PASS == iErase) {
-+ phy_addr = FTL_Get_Physical_Block_Addr(blk_addr);
-+ if (FAIL == GLOB_FTL_Block_Erase(phy_addr)) {
-+ lba = BLK_FROM_ADDR(blk_addr);
-+ MARK_BLOCK_AS_BAD(pbt[lba]);
-+ i = RETRY_TIMES;
-+ break;
-+ }
-+ }
-+
-+ for (j = 0; j < CACHE_ITEM_NUM; j++) {
-+ addr = Cache.array[j].address;
-+ if ((addr <= blk_addr) &&
-+ ((addr + Cache.cache_item_size) > blk_addr))
-+ cache_block_to_write = j;
-+ }
-+
-+ phy_addr = FTL_Get_Physical_Block_Addr(blk_addr);
-+ if (PASS == FTL_Cache_Update_Block(pData,
-+ old_page_addr, phy_addr)) {
-+ cache_block_to_write = UNHIT_CACHE_ITEM;
-+ break;
-+ } else {
-+ iErase = PASS;
-+ }
-+ }
-+
-+ if (i >= RETRY_TIMES) {
-+ if (ERR == FTL_Flash_Error_Handle(pData,
-+ old_page_addr, blk_addr))
-+ return ERR;
-+ else
-+ return FAIL;
-+ }
-+
-+ return PASS;
-+}
-+
-+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-+* Function: FTL_Cache_Write_Page
-+* Inputs: Pointer to buffer, page address, cache block number
-+* Outputs: PASS=0 / FAIL=1
-+* Description: It writes the data in Cache Block
-+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-+static void FTL_Cache_Write_Page(u8 *pData, u64 page_addr,
-+ u8 cache_blk, u16 flag)
-+{
-+ u8 *pDest;
-+ u64 addr;
-+
-+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ addr = Cache.array[cache_blk].address;
-+ pDest = Cache.array[cache_blk].buf;
-+
-+ pDest += (unsigned long)(page_addr - addr);
-+ Cache.array[cache_blk].changed = SET;
-+#if CMD_DMA
-+#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
-+ int_cache[ftl_cmd_cnt].item = cache_blk;
-+ int_cache[ftl_cmd_cnt].cache.address =
-+ Cache.array[cache_blk].address;
-+ int_cache[ftl_cmd_cnt].cache.changed =
-+ Cache.array[cache_blk].changed;
-+#endif
-+ GLOB_LLD_MemCopy_CMD(pDest, pData, DeviceInfo.wPageDataSize, flag);
-+ ftl_cmd_cnt++;
-+#else
-+ memcpy(pDest, pData, DeviceInfo.wPageDataSize);
-+#endif
-+ if (Cache.array[cache_blk].use_cnt < MAX_WORD_VALUE)
-+ Cache.array[cache_blk].use_cnt++;
-+}
-+
-+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-+* Function: FTL_Cache_Write
-+* Inputs: none
-+* Outputs: PASS=0 / FAIL=1
-+* Description: It writes least frequently used Cache block to flash if it
-+* has been changed
-+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-+static int FTL_Cache_Write(void)
-+{
-+ int i, bResult = PASS;
-+ u16 bNO, least_count = 0xFFFF;
-+
-+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ FTL_Calculate_LRU();
-+
-+ bNO = Cache.LRU;
-+ nand_dbg_print(NAND_DBG_DEBUG, "FTL_Cache_Write: "
-+ "Least used cache block is %d\n", bNO);
-+
-+ if (Cache.array[bNO].changed != SET)
-+ return bResult;
-+
-+ nand_dbg_print(NAND_DBG_DEBUG, "FTL_Cache_Write: Cache"
-+ " Block %d containing logical block %d is dirty\n",
-+ bNO,
-+ (u32)(Cache.array[bNO].address >>
-+ DeviceInfo.nBitsInBlockDataSize));
-+#if CMD_DMA
-+#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
-+ int_cache[ftl_cmd_cnt].item = bNO;
-+ int_cache[ftl_cmd_cnt].cache.address =
-+ Cache.array[bNO].address;
-+ int_cache[ftl_cmd_cnt].cache.changed = CLEAR;
-+#endif
-+#endif
-+ bResult = write_back_to_l2_cache(Cache.array[bNO].buf,
-+ Cache.array[bNO].address);
-+ if (bResult != ERR)
-+ Cache.array[bNO].changed = CLEAR;
-+
-+ least_count = Cache.array[bNO].use_cnt;
-+
-+ for (i = 0; i < CACHE_ITEM_NUM; i++) {
-+ if (i == bNO)
-+ continue;
-+ if (Cache.array[i].use_cnt > 0)
-+ Cache.array[i].use_cnt -= least_count;
-+ }
-+
-+ return bResult;
-+}
-+
-+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-+* Function: FTL_Cache_Read
-+* Inputs: Page address
-+* Outputs: PASS=0 / FAIL=1
-+* Description: It reads the block from device in Cache Block
-+* Set the LRU count to 1
-+* Mark the Cache Block as clean
-+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-+static int FTL_Cache_Read(u64 logical_addr)
-+{
-+ u64 item_addr, phy_addr;
-+ u16 num;
-+ int ret;
-+
-+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ num = Cache.LRU; /* The LRU cache item will be overwritten */
-+
-+ item_addr = (u64)GLOB_u64_Div(logical_addr, Cache.cache_item_size) *
-+ Cache.cache_item_size;
-+ Cache.array[num].address = item_addr;
-+ Cache.array[num].use_cnt = 1;
-+ Cache.array[num].changed = CLEAR;
-+
-+#if CMD_DMA
-+#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
-+ int_cache[ftl_cmd_cnt].item = num;
-+ int_cache[ftl_cmd_cnt].cache.address =
-+ Cache.array[num].address;
-+ int_cache[ftl_cmd_cnt].cache.changed =
-+ Cache.array[num].changed;
-+#endif
-+#endif
-+ /*
-+ * Search in L2 Cache. If hit, fill data into L1 Cache item buffer,
-+ * Otherwise, read it from NAND
-+ */
-+ ret = search_l2_cache(Cache.array[num].buf, logical_addr);
-+ if (PASS == ret) /* Hit in L2 Cache */
-+ return ret;
-+
-+ /* Compute the physical start address of NAND device according to */
-+ /* the logical start address of the cache item (LRU cache item) */
-+ phy_addr = FTL_Get_Physical_Block_Addr(item_addr) +
-+ GLOB_u64_Remainder(item_addr, 2);
-+
-+ return FTL_Cache_Read_All(Cache.array[num].buf, phy_addr);
-+}
-+
-+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-+* Function: FTL_Check_Block_Table
-+* Inputs: ?
-+* Outputs: PASS=0 / FAIL=1
-+* Description: It checks the correctness of each block table entry
-+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-+static int FTL_Check_Block_Table(int wOldTable)
-+{
-+ u32 i;
-+ int wResult = PASS;
-+ u32 blk_idx;
-+ u32 *pbt = (u32 *)g_pBlockTable;
-+ u8 *pFlag = flag_check_blk_table;
-+
-+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ if (NULL != pFlag) {
-+ memset(pFlag, FAIL, DeviceInfo.wDataBlockNum);
-+ for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
-+ blk_idx = (u32)(pbt[i] & (~BAD_BLOCK));
-+
-+ /*
-+ * 20081006/KBV - Changed to pFlag[i] reference
-+ * to avoid buffer overflow
-+ */
-+
-+ /*
-+ * 2008-10-20 Yunpeng Note: This change avoid
-+ * buffer overflow, but changed function of
-+ * the code, so it should be re-write later
-+ */
-+ if ((blk_idx > DeviceInfo.wSpectraEndBlock) ||
-+ PASS == pFlag[i]) {
-+ wResult = FAIL;
-+ break;
-+ } else {
-+ pFlag[i] = PASS;
-+ }
-+ }
-+ }
-+
-+ return wResult;
-+}
-+
-+
-+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-+* Function: FTL_Write_Block_Table
-+* Inputs: flasg
-+* Outputs: 0=Block Table was updated. No write done. 1=Block write needs to
-+* happen. -1 Error
-+* Description: It writes the block table
-+* Block table always mapped to LBA 0 which inturn mapped
-+* to any physical block
-+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-+static int FTL_Write_Block_Table(int wForce)
-+{
-+ u32 *pbt = (u32 *)g_pBlockTable;
-+ int wSuccess = PASS;
-+ u32 wTempBlockTableIndex;
-+ u16 bt_pages, new_bt_offset;
-+ u8 blockchangeoccured = 0;
-+
-+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ bt_pages = FTL_Get_Block_Table_Flash_Size_Pages();
-+
-+ if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus)
-+ return 0;
-+
-+ if (PASS == wForce) {
-+ g_wBlockTableOffset =
-+ (u16)(DeviceInfo.wPagesPerBlock - bt_pages);
-+#if CMD_DMA
-+ p_BTableChangesDelta =
-+ (struct BTableChangesDelta *)g_pBTDelta_Free;
-+ g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
-+
-+ p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
-+ p_BTableChangesDelta->g_wBlockTableOffset =
-+ g_wBlockTableOffset;
-+ p_BTableChangesDelta->ValidFields = 0x01;
-+#endif
-+ }
-+
-+ nand_dbg_print(NAND_DBG_DEBUG,
-+ "Inside FTL_Write_Block_Table: block %d Page:%d\n",
-+ g_wBlockTableIndex, g_wBlockTableOffset);
-+
-+ do {
-+ new_bt_offset = g_wBlockTableOffset + bt_pages + 1;
-+ if ((0 == (new_bt_offset % DeviceInfo.wPagesPerBlock)) ||
-+ (new_bt_offset > DeviceInfo.wPagesPerBlock) ||
-+ (FAIL == wSuccess)) {
-+ wTempBlockTableIndex = FTL_Replace_Block_Table();
-+ if (BAD_BLOCK == wTempBlockTableIndex)
-+ return ERR;
-+ if (!blockchangeoccured) {
-+ bt_block_changed = 1;
-+ blockchangeoccured = 1;
-+ }
-+
-+ g_wBlockTableIndex = wTempBlockTableIndex;
-+ g_wBlockTableOffset = 0;
-+ pbt[BLOCK_TABLE_INDEX] = g_wBlockTableIndex;
-+#if CMD_DMA
-+ p_BTableChangesDelta =
-+ (struct BTableChangesDelta *)g_pBTDelta_Free;
-+ g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
-+
-+ p_BTableChangesDelta->ftl_cmd_cnt =
-+ ftl_cmd_cnt;
-+ p_BTableChangesDelta->g_wBlockTableOffset =
-+ g_wBlockTableOffset;
-+ p_BTableChangesDelta->g_wBlockTableIndex =
-+ g_wBlockTableIndex;
-+ p_BTableChangesDelta->ValidFields = 0x03;
-+
-+ p_BTableChangesDelta =
-+ (struct BTableChangesDelta *)g_pBTDelta_Free;
-+ g_pBTDelta_Free +=
-+ sizeof(struct BTableChangesDelta);
-+
-+ p_BTableChangesDelta->ftl_cmd_cnt =
-+ ftl_cmd_cnt;
-+ p_BTableChangesDelta->BT_Index =
-+ BLOCK_TABLE_INDEX;
-+ p_BTableChangesDelta->BT_Entry_Value =
-+ pbt[BLOCK_TABLE_INDEX];
-+ p_BTableChangesDelta->ValidFields = 0x0C;
-+#endif
-+ }
-+
-+ wSuccess = FTL_Write_Block_Table_Data();
-+ if (FAIL == wSuccess)
-+ MARK_BLOCK_AS_BAD(pbt[BLOCK_TABLE_INDEX]);
-+ } while (FAIL == wSuccess);
-+
-+ g_cBlockTableStatus = CURRENT_BLOCK_TABLE;
-+
-+ return 1;
-+}
-+
-+/******************************************************************
-+* Function: GLOB_FTL_Flash_Format
-+* Inputs: none
-+* Outputs: PASS
-+* Description: The block table stores bad block info, including MDF+
-+* blocks gone bad over the ages. Therefore, if we have a
-+* block table in place, then use it to scan for bad blocks
-+* If not, then scan for MDF.
-+* Now, a block table will only be found if spectra was already
-+* being used. For a fresh flash, we'll go thru scanning for
-+* MDF. If spectra was being used, then there is a chance that
-+* the MDF has been corrupted. Spectra avoids writing to the
-+* first 2 bytes of the spare area to all pages in a block. This
-+* covers all known flash devices. However, since flash
-+* manufacturers have no standard of where the MDF is stored,
-+* this cannot guarantee that the MDF is protected for future
-+* devices too. The initial scanning for the block table assures
-+* this. It is ok even if the block table is outdated, as all
-+* we're looking for are bad block markers.
-+* Use this when mounting a file system or starting a
-+* new flash.
-+*
-+*********************************************************************/
-+static int FTL_Format_Flash(u8 valid_block_table)
-+{
-+ u32 i, j;
-+ u32 *pbt = (u32 *)g_pBlockTable;
-+ u32 tempNode;
-+ int ret;
-+
-+#if CMD_DMA
-+ u32 *pbtStartingCopy = (u32 *)g_pBTStartingCopy;
-+ if (ftl_cmd_cnt)
-+ return FAIL;
-+#endif
-+
-+ if (FAIL == FTL_Check_Block_Table(FAIL))
-+ valid_block_table = 0;
-+
-+ if (valid_block_table) {
-+ u8 switched = 1;
-+ u32 block, k;
-+
-+ k = DeviceInfo.wSpectraStartBlock;
-+ while (switched && (k < DeviceInfo.wSpectraEndBlock)) {
-+ switched = 0;
-+ k++;
-+ for (j = DeviceInfo.wSpectraStartBlock, i = 0;
-+ j <= DeviceInfo.wSpectraEndBlock;
-+ j++, i++) {
-+ block = (pbt[i] & ~BAD_BLOCK) -
-+ DeviceInfo.wSpectraStartBlock;
-+ if (block != i) {
-+ switched = 1;
-+ tempNode = pbt[i];
-+ pbt[i] = pbt[block];
-+ pbt[block] = tempNode;
-+ }
-+ }
-+ }
-+ if ((k == DeviceInfo.wSpectraEndBlock) && switched)
-+ valid_block_table = 0;
-+ }
-+
-+ if (!valid_block_table) {
-+ memset(g_pBlockTable, 0,
-+ DeviceInfo.wDataBlockNum * sizeof(u32));
-+ memset(g_pWearCounter, 0,
-+ DeviceInfo.wDataBlockNum * sizeof(u8));
-+ if (DeviceInfo.MLCDevice)
-+ memset(g_pReadCounter, 0,
-+ DeviceInfo.wDataBlockNum * sizeof(u16));
-+#if CMD_DMA
-+ memset(g_pBTStartingCopy, 0,
-+ DeviceInfo.wDataBlockNum * sizeof(u32));
-+ memset(g_pWearCounterCopy, 0,
-+ DeviceInfo.wDataBlockNum * sizeof(u8));
-+ if (DeviceInfo.MLCDevice)
-+ memset(g_pReadCounterCopy, 0,
-+ DeviceInfo.wDataBlockNum * sizeof(u16));
-+#endif
-+ for (j = DeviceInfo.wSpectraStartBlock, i = 0;
-+ j <= DeviceInfo.wSpectraEndBlock;
-+ j++, i++) {
-+ if (GLOB_LLD_Get_Bad_Block((u32)j))
-+ pbt[i] = (u32)(BAD_BLOCK | j);
-+ }
-+ }
-+
-+ nand_dbg_print(NAND_DBG_WARN, "Erasing all blocks in the NAND\n");
-+
-+ for (j = DeviceInfo.wSpectraStartBlock, i = 0;
-+ j <= DeviceInfo.wSpectraEndBlock;
-+ j++, i++) {
-+ if ((pbt[i] & BAD_BLOCK) != BAD_BLOCK) {
-+ ret = GLOB_LLD_Erase_Block(j);
-+ if (FAIL == ret) {
-+ pbt[i] = (u32)(j);
-+ MARK_BLOCK_AS_BAD(pbt[i]);
-+ nand_dbg_print(NAND_DBG_WARN,
-+ "NAND Program fail in %s, Line %d, "
-+ "Function: %s, new Bad Block %d generated!\n",
-+ __FILE__, __LINE__, __func__, (int)j);
-+ } else {
-+ pbt[i] = (u32)(SPARE_BLOCK | j);
-+ }
-+ }
-+#if CMD_DMA
-+ pbtStartingCopy[i] = pbt[i];
-+#endif
-+ }
-+
-+ g_wBlockTableOffset = 0;
-+ for (i = 0; (i <= (DeviceInfo.wSpectraEndBlock -
-+ DeviceInfo.wSpectraStartBlock))
-+ && ((pbt[i] & BAD_BLOCK) == BAD_BLOCK); i++)
-+ ;
-+ if (i > (DeviceInfo.wSpectraEndBlock - DeviceInfo.wSpectraStartBlock)) {
-+ printk(KERN_ERR "All blocks bad!\n");
-+ return FAIL;
-+ } else {
-+ g_wBlockTableIndex = pbt[i] & ~BAD_BLOCK;
-+ if (i != BLOCK_TABLE_INDEX) {
-+ tempNode = pbt[i];
-+ pbt[i] = pbt[BLOCK_TABLE_INDEX];
-+ pbt[BLOCK_TABLE_INDEX] = tempNode;
-+ }
-+ }
-+ pbt[BLOCK_TABLE_INDEX] &= (~SPARE_BLOCK);
-+
-+#if CMD_DMA
-+ pbtStartingCopy[BLOCK_TABLE_INDEX] &= (~SPARE_BLOCK);
-+#endif
-+
-+ g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
-+ memset(g_pBTBlocks, 0xFF,
-+ (1 + LAST_BT_ID - FIRST_BT_ID) * sizeof(u32));
-+ g_pBTBlocks[FIRST_BT_ID-FIRST_BT_ID] = g_wBlockTableIndex;
-+ FTL_Write_Block_Table(FAIL);
-+
-+ for (i = 0; i < CACHE_ITEM_NUM; i++) {
-+ Cache.array[i].address = NAND_CACHE_INIT_ADDR;
-+ Cache.array[i].use_cnt = 0;
-+ Cache.array[i].changed = CLEAR;
-+ }
-+
-+#if (RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE && CMD_DMA)
-+ memcpy((void *)&cache_start_copy, (void *)&Cache,
-+ sizeof(struct flash_cache_tag));
-+#endif
-+ return PASS;
-+}
-+
-+static int force_format_nand(void)
-+{
-+ u32 i;
-+
-+ /* Force erase the whole unprotected physical partiton of NAND */
-+ printk(KERN_ALERT "Start to force erase whole NAND device ...\n");
-+ printk(KERN_ALERT "From phyical block %d to %d\n",
-+ DeviceInfo.wSpectraStartBlock, DeviceInfo.wSpectraEndBlock);
-+ for (i = DeviceInfo.wSpectraStartBlock; i <= DeviceInfo.wSpectraEndBlock; i++) {
-+ if (GLOB_LLD_Erase_Block(i))
-+ printk(KERN_ERR "Failed to force erase NAND block %d\n", i);
-+ }
-+ printk(KERN_ALERT "Force Erase ends. Please reboot the system ...\n");
-+ while(1);
-+
-+ return PASS;
-+}
-+
-+int GLOB_FTL_Flash_Format(void)
-+{
-+ //return FTL_Format_Flash(1);
-+ return force_format_nand();
-+
-+}
-+
-+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-+* Function: FTL_Search_Block_Table_IN_Block
-+* Inputs: Block Number
-+* Pointer to page
-+* Outputs: PASS / FAIL
-+* Page contatining the block table
-+* Description: It searches the block table in the block
-+* passed as an argument.
-+*
-+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-+static int FTL_Search_Block_Table_IN_Block(u32 BT_Block,
-+ u8 BT_Tag, u16 *Page)
-+{
-+ u16 i, j, k;
-+ u16 Result = PASS;
-+ u16 Last_IPF = 0;
-+ u8 BT_Found = 0;
-+ u8 *tagarray;
-+ u8 *tempbuf = tmp_buf_search_bt_in_block;
-+ u8 *pSpareBuf = spare_buf_search_bt_in_block;
-+ u8 *pSpareBufBTLastPage = spare_buf_bt_search_bt_in_block;
-+ u8 bt_flag_last_page = 0xFF;
-+ u8 search_in_previous_pages = 0;
-+ u16 bt_pages;
-+
-+ nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ nand_dbg_print(NAND_DBG_DEBUG,
-+ "Searching block table in %u block\n",
-+ (unsigned int)BT_Block);
-+
-+ bt_pages = FTL_Get_Block_Table_Flash_Size_Pages();
-+
-+ for (i = bt_pages; i < DeviceInfo.wPagesPerBlock;
-+ i += (bt_pages + 1)) {
-+ nand_dbg_print(NAND_DBG_DEBUG,
-+ "Searching last IPF: %d\n", i);
-+ Result = GLOB_LLD_Read_Page_Main_Polling(tempbuf,
-+ BT_Block, i, 1);
-+
-+ if (0 == memcmp(tempbuf, g_pIPF, DeviceInfo.wPageDataSize)) {
-+ if ((i + bt_pages + 1) < DeviceInfo.wPagesPerBlock) {
-+ continue;
-+ } else {
-+ search_in_previous_pages = 1;
-+ Last_IPF = i;
-+ }
-+ }
-+
-+ if (!search_in_previous_pages) {
-+ if (i != bt_pages) {
-+ i -= (bt_pages + 1);
-+ Last_IPF = i;
-+ }
-+ }
-+
-+ if (0 == Last_IPF)
-+ break;
-+
-+ if (!search_in_previous_pages) {
-+ i = i + 1;
-+ nand_dbg_print(NAND_DBG_DEBUG,
-+ "Reading the spare area of Block %u Page %u",
-+ (unsigned int)BT_Block, i);
-+ Result = GLOB_LLD_Read_Page_Spare(pSpareBuf,
-+ BT_Block, i, 1);
-+ nand_dbg_print(NAND_DBG_DEBUG,
-+ "Reading the spare area of Block %u Page %u",
-+ (unsigned int)BT_Block, i + bt_pages - 1);
-+ Result = GLOB_LLD_Read_Page_Spare(pSpareBufBTLastPage,
-+ BT_Block, i + bt_pages - 1, 1);
-+
-+ k = 0;
-+ j = FTL_Extract_Block_Table_Tag(pSpareBuf, &tagarray);
-+ if (j) {
-+ for (; k < j; k++) {
-+ if (tagarray[k] == BT_Tag)
-+ break;
-+ }
-+ }
-+
-+ if (k < j)
-+ bt_flag = tagarray[k];
-+ else
-+ Result = FAIL;
-+
-+ if (Result == PASS) {
-+ k = 0;
-+ j = FTL_Extract_Block_Table_Tag(
-+ pSpareBufBTLastPage, &tagarray);
-+ if (j) {
-+ for (; k < j; k++) {
-+ if (tagarray[k] == BT_Tag)
-+ break;
-+ }
-+ }
-+
-+ if (k < j)
-+ bt_flag_last_page = tagarray[k];
-+ else
-+ Result = FAIL;
-+
-+ if (Result == PASS) {
-+ if (bt_flag == bt_flag_last_page) {
-+ nand_dbg_print(NAND_DBG_DEBUG,
-+ "Block table is found"
-+ " in page after IPF "
-+ "at block %d "
-+ "page %d\n",
-+ (int)BT_Block, i);
-+ BT_Found = 1;
-+ *Page = i;
-+ g_cBlockTableStatus =
-+ CURRENT_BLOCK_TABLE;
-+ break;
-+ } else {
-+ Result = FAIL;
-+ }
-+ }
-+ }
-+ }
-+
-+ if (search_in_previous_pages)
-+ i = i - bt_pages;
-+ else
-+ i = i - (bt_pages + 1);
-+
-+ Result = PASS;
-+
-+ nand_dbg_print(NAND_DBG_DEBUG,
-+ "Reading the spare area of Block %d Page %d",
-+ (int)BT_Block, i);
-+
-+ Result = GLOB_LLD_Read_Page_Spare(pSpareBuf, BT_Block, i, 1);
-+ nand_dbg_print(NAND_DBG_DEBUG,
-+ "Reading the spare area of Block %u Page %u",
-+ (unsigned int)BT_Block, i + bt_pages - 1);
-+
-+ Result = GLOB_LLD_Read_Page_Spare(pSpareBufBTLastPage,
-+ BT_Block, i + bt_pages - 1, 1);
-+
-+ k = 0;
-+ j = FTL_Extract_Block_Table_Tag(pSpareBuf, &tagarray);
-+ if (j) {
-+ for (; k < j; k++) {
-+ if (tagarray[k] == BT_Tag)
-+ break;
-+ }
-+ }
-+
-+ if (k < j)
-+ bt_flag = tagarray[k];
-+ else
-+ Result = FAIL;
-+
-+ if (Result == PASS) {
-+ k = 0;
-+ j = FTL_Extract_Block_Table_Tag(pSpareBufBTLastPage,
-+ &tagarray);
-+ if (j) {
-+ for (; k < j; k++) {
-+ if (tagarray[k] == BT_Tag)
-+ break;
-+ }
-+ }
-+
-+ if (k < j) {
-+ bt_flag_last_page = tagarray[k];
-+ } else {
-+ Result = FAIL;
-+ break;
-+ }
-+
-+ if (Result == PASS) {
-+ if (bt_flag == bt_flag_last_page) {
-+ nand_dbg_print(NAND_DBG_DEBUG,
-+ "Block table is found "
-+ "in page prior to IPF "
-+ "at block %u page %d\n",
-+ (unsigned int)BT_Block, i);
-+ BT_Found = 1;
-+ *Page = i;
-+ g_cBlockTableStatus =
-+ IN_PROGRESS_BLOCK_TABLE;
-+ break;
-+ } else {
-+ Result = FAIL;
-+ break;
-+ }
-+ }
-+ }
-+ }
-+
-+ if (Result == FAIL) {
-+ if ((Last_IPF > bt_pages) && (i < Last_IPF) && (!BT_Found)) {
-+ BT_Found = 1;
-+ *Page = i - (bt_pages + 1);
-+ }
-+ if ((Last_IPF == bt_pages) && (i < Last_IPF) && (!BT_Found))
-+ goto func_return;
-+ }
-+
-+ if (Last_IPF == 0) {
-+ i = 0;
-+ Result = PASS;
-+ nand_dbg_print(NAND_DBG_DEBUG, "Reading the spare area of "
-+ "Block %u Page %u", (unsigned int)BT_Block, i);
-+
-+ Result = GLOB_LLD_Read_Page_Spare(pSpareBuf, BT_Block, i, 1);
-+ nand_dbg_print(NAND_DBG_DEBUG,
-+ "Reading the spare area of Block %u Page %u",
-+ (unsigned int)BT_Block, i + bt_pages - 1);
-+ Result = GLOB_LLD_Read_Page_Spare(pSpareBufBTLastPage,
-+ BT_Block, i + bt_pages - 1, 1);
-+
-+ k = 0;
-+ j = FTL_Extract_Block_Table_Tag(pSpareBuf, &tagarray);
-+ if (j) {
-+ for (; k < j; k++) {
-+ if (tagarray[k] == BT_Tag)
-+ break;
-+ }
-+ }
-+
-+ if (k < j)
-+ bt_flag = tagarray[k];
-+ else
-+ Result = FAIL;
-+
-+ if (Result == PASS) {
-+ k = 0;
-+ j = FTL_Extract_Block_Table_Tag(pSpareBufBTLastPage,
-+ &tagarray);
-+ if (j) {
-+ for (; k < j; k++) {
-+ if (tagarray[k] == BT_Tag)
-+ break;
-+ }
-+ }
-+
-+ if (k < j)
-+ bt_flag_last_page = tagarray[k];
-+ else
-+ Result = FAIL;
-+
-+ if (Result == PASS) {
-+ if (bt_flag == bt_flag_last_page) {
-+ nand_dbg_print(NAND_DBG_DEBUG,
-+ "Block table is found "
-+ "in page after IPF at "
-+ "block %u page %u\n",
-+ (unsigned int)BT_Block,
-+ (unsigned int)i);
-+ BT_Found = 1;
-+ *Page = i;
-+ g_cBlockTableStatus =
-+ CURRENT_BLOCK_TABLE;
-+ goto func_return;
-+ } else {
-+ Result = FAIL;
-+ }
-+ }
-+ }
-+
-+ if (Result == FAIL)
-+ goto func_return;
-+ }
-+func_return:
-+ return Result;
-+}
-+
-+u8 *get_blk_table_start_addr(void)
-+{
-+ return g_pBlockTable;
-+}
-+
-+unsigned long get_blk_table_len(void)
-+{
-+ return DeviceInfo.wDataBlockNum * sizeof(u32);
-+}
-+
-+u8 *get_wear_leveling_table_start_addr(void)
-+{
-+ return g_pWearCounter;
-+}
-+
-+unsigned long get_wear_leveling_table_len(void)
-+{
-+ return DeviceInfo.wDataBlockNum * sizeof(u8);
-+}
-+
-+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-+* Function: FTL_Read_Block_Table
-+* Inputs: none
-+* Outputs: PASS / FAIL
-+* Description: read the flash spare area and find a block containing the
-+* most recent block table(having largest block_table_counter).
-+* Find the last written Block table in this block.
-+* Check the correctness of Block Table
-+* If CDMA is enabled, this function is called in
-+* polling mode.
-+* We don't need to store changes in Block table in this
-+* function as it is called only at initialization
-+*
-+* Note: Currently this function is called at initialization
-+* before any read/erase/write command issued to flash so,
-+* there is no need to wait for CDMA list to complete as of now
-+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-+static int FTL_Read_Block_Table(void)
-+{
-+ u16 i = 0;
-+ int k, j;
-+ u8 *tempBuf, *tagarray;
-+ int wResult = FAIL;
-+ int status = FAIL;
-+ u8 block_table_found = 0;
-+ int search_result;
-+ u32 Block;
-+ u16 Page = 0;
-+ u16 PageCount;
-+ u16 bt_pages;
-+ int wBytesCopied = 0, tempvar;
-+
-+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ tempBuf = tmp_buf1_read_blk_table;
-+ bt_pages = FTL_Get_Block_Table_Flash_Size_Pages();
-+
-+ for (j = DeviceInfo.wSpectraStartBlock;
-+ j <= (int)DeviceInfo.wSpectraEndBlock;
-+ j++) {
-+ status = GLOB_LLD_Read_Page_Spare(tempBuf, j, 0, 1);
-+ k = 0;
-+ i = FTL_Extract_Block_Table_Tag(tempBuf, &tagarray);
-+ if (i) {
-+ status = GLOB_LLD_Read_Page_Main_Polling(tempBuf,
-+ j, 0, 1);
-+ for (; k < i; k++) {
-+ if (tagarray[k] == tempBuf[3])
-+ break;
-+ }
-+ }
-+
-+ if (k < i)
-+ k = tagarray[k];
-+ else
-+ continue;
-+
-+ nand_dbg_print(NAND_DBG_DEBUG,
-+ "Block table is contained in Block %d %d\n",
-+ (unsigned int)j, (unsigned int)k);
-+
-+ if (g_pBTBlocks[k-FIRST_BT_ID] == BTBLOCK_INVAL) {
-+ g_pBTBlocks[k-FIRST_BT_ID] = j;
-+ block_table_found = 1;
-+ } else {
-+ printk(KERN_ERR "FTL_Read_Block_Table -"
-+ "This should never happens. "
-+ "Two block table have same counter %u!\n", k);
-+ }
-+ }
-+
-+ if (block_table_found) {
-+ if (g_pBTBlocks[FIRST_BT_ID - FIRST_BT_ID] != BTBLOCK_INVAL &&
-+ g_pBTBlocks[LAST_BT_ID - FIRST_BT_ID] != BTBLOCK_INVAL) {
-+ j = LAST_BT_ID;
-+ while ((j > FIRST_BT_ID) &&
-+ (g_pBTBlocks[j - FIRST_BT_ID] != BTBLOCK_INVAL))
-+ j--;
-+ if (j == FIRST_BT_ID) {
-+ j = LAST_BT_ID;
-+ last_erased = LAST_BT_ID;
-+ } else {
-+ last_erased = (u8)j + 1;
-+ while ((j > FIRST_BT_ID) && (BTBLOCK_INVAL ==
-+ g_pBTBlocks[j - FIRST_BT_ID]))
-+ j--;
-+ }
-+ } else {
-+ j = FIRST_BT_ID;
-+ while (g_pBTBlocks[j - FIRST_BT_ID] == BTBLOCK_INVAL)
-+ j++;
-+ last_erased = (u8)j;
-+ while ((j < LAST_BT_ID) && (BTBLOCK_INVAL !=
-+ g_pBTBlocks[j - FIRST_BT_ID]))
-+ j++;
-+ if (g_pBTBlocks[j-FIRST_BT_ID] == BTBLOCK_INVAL)
-+ j--;
-+ }
-+
-+ if (last_erased > j)
-+ j += (1 + LAST_BT_ID - FIRST_BT_ID);
-+
-+ for (; (j >= last_erased) && (FAIL == wResult); j--) {
-+ i = (j - FIRST_BT_ID) %
-+ (1 + LAST_BT_ID - FIRST_BT_ID);
-+ search_result =
-+ FTL_Search_Block_Table_IN_Block(g_pBTBlocks[i],
-+ i + FIRST_BT_ID, &Page);
-+ if (g_cBlockTableStatus == IN_PROGRESS_BLOCK_TABLE)
-+ block_table_found = 0;
-+
-+ while ((search_result == PASS) && (FAIL == wResult)) {
-+ nand_dbg_print(NAND_DBG_DEBUG,
-+ "FTL_Read_Block_Table:"
-+ "Block: %u Page: %u "
-+ "contains block table\n",
-+ (unsigned int)g_pBTBlocks[i],
-+ (unsigned int)Page);
-+
-+ tempBuf = tmp_buf2_read_blk_table;
-+
-+ for (k = 0; k < bt_pages; k++) {
-+ Block = g_pBTBlocks[i];
-+ PageCount = 1;
-+
-+ status =
-+ GLOB_LLD_Read_Page_Main_Polling(
-+ tempBuf, Block, Page, PageCount);
-+
-+ tempvar = k ? 0 : 4;
-+
-+ wBytesCopied +=
-+ FTL_Copy_Block_Table_From_Flash(
-+ tempBuf + tempvar,
-+ DeviceInfo.wPageDataSize - tempvar,
-+ wBytesCopied);
-+
-+ Page++;
-+ }
-+
-+ wResult = FTL_Check_Block_Table(FAIL);
-+ if (FAIL == wResult) {
-+ block_table_found = 0;
-+ if (Page > bt_pages)
-+ Page -= ((bt_pages<<1) + 1);
-+ else
-+ search_result = FAIL;
-+ }
-+ }
-+ }
-+ }
-+
-+ if (PASS == wResult) {
-+ if (!block_table_found)
-+ FTL_Execute_SPL_Recovery();
-+
-+ if (g_cBlockTableStatus == IN_PROGRESS_BLOCK_TABLE)
-+ g_wBlockTableOffset = (u16)Page + 1;
-+ else
-+ g_wBlockTableOffset = (u16)Page - bt_pages;
-+
-+ g_wBlockTableIndex = (u32)g_pBTBlocks[i];
-+
-+#if CMD_DMA
-+ if (DeviceInfo.MLCDevice)
-+ memcpy(g_pBTStartingCopy, g_pBlockTable,
-+ DeviceInfo.wDataBlockNum * sizeof(u32)
-+ + DeviceInfo.wDataBlockNum * sizeof(u8)
-+ + DeviceInfo.wDataBlockNum * sizeof(u16));
-+ else
-+ memcpy(g_pBTStartingCopy, g_pBlockTable,
-+ DeviceInfo.wDataBlockNum * sizeof(u32)
-+ + DeviceInfo.wDataBlockNum * sizeof(u8));
-+#endif
-+ }
-+
-+ if (FAIL == wResult)
-+ printk(KERN_ERR "Yunpeng - "
-+ "Can not find valid spectra block table!\n");
-+
-+#if AUTO_FORMAT_FLASH
-+ if (FAIL == wResult) {
-+ nand_dbg_print(NAND_DBG_DEBUG, "doing auto-format\n");
-+ wResult = FTL_Format_Flash(0);
-+ }
-+#endif
-+
-+ return wResult;
-+}
-+
-+
-+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-+* Function: FTL_Flash_Error_Handle
-+* Inputs: Pointer to data
-+* Page address
-+* Block address
-+* Outputs: PASS=0 / FAIL=1
-+* Description: It handles any error occured during Spectra operation
-+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-+static int FTL_Flash_Error_Handle(u8 *pData, u64 old_page_addr,
-+ u64 blk_addr)
-+{
-+ u32 i;
-+ int j;
-+ u32 tmp_node, blk_node = BLK_FROM_ADDR(blk_addr);
-+ u64 phy_addr;
-+ int wErase = FAIL;
-+ int wResult = FAIL;
-+ u32 *pbt = (u32 *)g_pBlockTable;
-+
-+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ if (ERR == GLOB_FTL_Garbage_Collection())
-+ return ERR;
-+
-+ do {
-+ for (i = DeviceInfo.wSpectraEndBlock -
-+ DeviceInfo.wSpectraStartBlock;
-+ i > 0; i--) {
-+ if (IS_SPARE_BLOCK(i)) {
-+ tmp_node = (u32)(BAD_BLOCK |
-+ pbt[blk_node]);
-+ pbt[blk_node] = (u32)(pbt[i] &
-+ (~SPARE_BLOCK));
-+ pbt[i] = tmp_node;
-+#if CMD_DMA
-+ p_BTableChangesDelta =
-+ (struct BTableChangesDelta *)
-+ g_pBTDelta_Free;
-+ g_pBTDelta_Free +=
-+ sizeof(struct BTableChangesDelta);
-+
-+ p_BTableChangesDelta->ftl_cmd_cnt =
-+ ftl_cmd_cnt;
-+ p_BTableChangesDelta->BT_Index =
-+ blk_node;
-+ p_BTableChangesDelta->BT_Entry_Value =
-+ pbt[blk_node];
-+ p_BTableChangesDelta->ValidFields = 0x0C;
-+
-+ p_BTableChangesDelta =
-+ (struct BTableChangesDelta *)
-+ g_pBTDelta_Free;
-+ g_pBTDelta_Free +=
-+ sizeof(struct BTableChangesDelta);
-+
-+ p_BTableChangesDelta->ftl_cmd_cnt =
-+ ftl_cmd_cnt;
-+ p_BTableChangesDelta->BT_Index = i;
-+ p_BTableChangesDelta->BT_Entry_Value = pbt[i];
-+ p_BTableChangesDelta->ValidFields = 0x0C;
-+#endif
-+ wResult = PASS;
-+ break;
-+ }
-+ }
-+
-+ if (FAIL == wResult) {
-+ if (FAIL == GLOB_FTL_Garbage_Collection())
-+ break;
-+ else
-+ continue;
-+ }
-+
-+ if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) {
-+ g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
-+ FTL_Write_IN_Progress_Block_Table_Page();
-+ }
-+
-+ phy_addr = FTL_Get_Physical_Block_Addr(blk_addr);
-+
-+ for (j = 0; j < RETRY_TIMES; j++) {
-+ if (PASS == wErase) {
-+ if (FAIL == GLOB_FTL_Block_Erase(phy_addr)) {
-+ MARK_BLOCK_AS_BAD(pbt[blk_node]);
-+ break;
-+ }
-+ }
-+ if (PASS == FTL_Cache_Update_Block(pData,
-+ old_page_addr,
-+ phy_addr)) {
-+ wResult = PASS;
-+ break;
-+ } else {
-+ wResult = FAIL;
-+ wErase = PASS;
-+ }
-+ }
-+ } while (FAIL == wResult);
-+
-+ FTL_Write_Block_Table(FAIL);
-+
-+ return wResult;
-+}
-+
-+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-+* Function: FTL_Get_Page_Num
-+* Inputs: Size in bytes
-+* Outputs: Size in pages
-+* Description: It calculates the pages required for the length passed
-+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-+static u32 FTL_Get_Page_Num(u64 length)
-+{
-+ return (u32)((length >> DeviceInfo.nBitsInPageDataSize) +
-+ (GLOB_u64_Remainder(length , 1) > 0 ? 1 : 0));
-+}
-+
-+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-+* Function: FTL_Get_Physical_Block_Addr
-+* Inputs: Block Address (byte format)
-+* Outputs: Physical address of the block.
-+* Description: It translates LBA to PBA by returning address stored
-+* at the LBA location in the block table
-+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-+static u64 FTL_Get_Physical_Block_Addr(u64 logical_addr)
-+{
-+ u32 *pbt;
-+ u64 physical_addr;
-+
-+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ pbt = (u32 *)g_pBlockTable;
-+ physical_addr = (u64) DeviceInfo.wBlockDataSize *
-+ (pbt[BLK_FROM_ADDR(logical_addr)] & (~BAD_BLOCK));
-+
-+ return physical_addr;
-+}
-+
-+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-+* Function: FTL_Get_Block_Index
-+* Inputs: Physical Block no.
-+* Outputs: Logical block no. /BAD_BLOCK
-+* Description: It returns the logical block no. for the PBA passed
-+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-+static u32 FTL_Get_Block_Index(u32 wBlockNum)
-+{
-+ u32 *pbt = (u32 *)g_pBlockTable;
-+ u32 i;
-+
-+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ for (i = 0; i < DeviceInfo.wDataBlockNum; i++)
-+ if (wBlockNum == (pbt[i] & (~BAD_BLOCK)))
-+ return i;
-+
-+ return BAD_BLOCK;
-+}
-+
-+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-+* Function: GLOB_FTL_Wear_Leveling
-+* Inputs: none
-+* Outputs: PASS=0
-+* Description: This is static wear leveling (done by explicit call)
-+* do complete static wear leveling
-+* do complete garbage collection
-+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-+int GLOB_FTL_Wear_Leveling(void)
-+{
-+ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ FTL_Static_Wear_Leveling();
-+ GLOB_FTL_Garbage_Collection();
-+
-+ return PASS;
-+}
-+
-+static void find_least_most_worn(u8 *chg,
-+ u32 *least_idx, u8 *least_cnt,
-+ u32 *most_idx, u8 *most_cnt)
-+{
-+ u32 *pbt = (u32 *)g_pBlockTable;
-+ u32 idx;
-+ u8 cnt;
-+ int i;
-+
-+ for (i = BLOCK_TABLE_INDEX + 1; i < DeviceInfo.wDataBlockNum; i++) {
-+ if (IS_BAD_BLOCK(i) || PASS == chg[i])
-+ continue;
-+
-+ idx = (u32) ((~BAD_BLOCK) & pbt[i]);
-+ cnt = g_pWearCounter[idx - DeviceInfo.wSpectraStartBlock];
-+
-+ if (IS_SPARE_BLOCK(i)) {
-+ if (cnt > *most_cnt) {
-+ *most_cnt = cnt;
-+ *most_idx = idx;
-+ }
-+ }
-+
-+ if (IS_DATA_BLOCK(i)) {
-+ if (cnt < *least_cnt) {
-+ *least_cnt = cnt;
-+ *least_idx = idx;
-+ }
-+ }
-+
-+ if (PASS == chg[*most_idx] || PASS == chg[*least_idx]) {
-+ debug_boundary_error(*most_idx,
-+ DeviceInfo.wDataBlockNum, 0);
-+ debug_boundary_error(*least_idx,
-+ DeviceInfo.wDataBlockNum, 0);
-+ continue;
-+ }
-+ }
-+}
-+
-+static int move_blks_for_wear_leveling(u8 *chg,
-+ u32 *least_idx, u32 *rep_blk_num, int *result)
-+{
-+ u32 *pbt = (u32 *)g_pBlockTable;
-+ u32 rep_blk;
-+ int j, ret_cp_blk, ret_erase;
-+ int ret = PASS;
-+
-+ chg[*least_idx] = PASS;
-+ debug_boundary_error(*least_idx, DeviceInfo.wDataBlockNum, 0);
-+
-+ rep_blk = FTL_Replace_MWBlock();
-+ if (rep_blk != BAD_BLOCK) {
-+ nand_dbg_print(NAND_DBG_DEBUG,
-+ "More than two spare blocks exist so do it\n");
-+ nand_dbg_print(NAND_DBG_DEBUG, "Block Replaced is %d\n",
-+ rep_blk);
-+
-+ chg[rep_blk] = PASS;
-+
-+ if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) {
-+ g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
-+ FTL_Write_IN_Progress_Block_Table_Page();
-+ }
-+
-+ for (j = 0; j < RETRY_TIMES; j++) {
-+ ret_cp_blk = FTL_Copy_Block((u64)(*least_idx) *
-+ DeviceInfo.wBlockDataSize,
-+ (u64)rep_blk * DeviceInfo.wBlockDataSize);
-+ if (FAIL == ret_cp_blk) {
-+ ret_erase = GLOB_FTL_Block_Erase((u64)rep_blk
-+ * DeviceInfo.wBlockDataSize);
-+ if (FAIL == ret_erase)
-+ MARK_BLOCK_AS_BAD(pbt[rep_blk]);
-+ } else {
-+ nand_dbg_print(NAND_DBG_DEBUG,
-+ "FTL_Copy_Block == OK\n");
-+ break;
-+ }
-+ }
-+
-+ if (j < RETRY_TIMES) {
-+ u32 tmp;
-+ u32 old_idx = FTL_Get_Block_Index(*least_idx);
-+ u32 rep_idx = FTL_Get_Block_Index(rep_blk);
-+ tmp = (u32)(DISCARD_BLOCK | pbt[old_idx]);
-+ pbt[old_idx] = (u32)((~SPARE_BLOCK) &
-+ pbt[rep_idx]);
-+ pbt[rep_idx] = tmp;
-+#if CMD_DMA
-+ p_BTableChangesDelta = (struct BTableChangesDelta *)
-+ g_pBTDelta_Free;
-+ g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
-+ p_BTableChangesDelta->ftl_cmd_cnt =
-+ ftl_cmd_cnt;
-+ p_BTableChangesDelta->BT_Index = old_idx;
-+ p_BTableChangesDelta->BT_Entry_Value = pbt[old_idx];
-+ p_BTableChangesDelta->ValidFields = 0x0C;
-+
-+ p_BTableChangesDelta = (struct BTableChangesDelta *)
-+ g_pBTDelta_Free;
-+ g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
-+
-+ p_BTableChangesDelta->ftl_cmd_cnt =
-+ ftl_cmd_cnt;
-+ p_BTableChangesDelta->BT_Index = rep_idx;
-+ p_BTableChangesDelta->BT_Entry_Value = pbt[rep_idx];
-+ p_BTableChangesDelta->ValidFields = 0x0C;
-+#endif
-+ } else {
-+ pbt[FTL_Get_Block_Index(rep_blk)] |= BAD_BLOCK;
-+#if CMD_DMA
-+ p_BTableChangesDelta = (struct BTableChangesDelta *)
-+ g_pBTDelta_Free;
-+ g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
-+
-+ p_BTableChangesDelta->ftl_cmd_cnt =
-+ ftl_cmd_cnt;
-+ p_BTableChangesDelta->BT_Index =
-+ FTL_Get_Block_Index(rep_blk);
-+ p_BTableChangesDelta->BT_Entry_Value =
-+ pbt[FTL_Get_Block_Index(rep_blk)];
-+ p_BTableChangesDelta->ValidFields = 0x0C;
-+#endif
-+ *result = FAIL;
-+ ret = FAIL;
-+ }
-+
-+ if (((*rep_blk_num)++) > WEAR_LEVELING_BLOCK_NUM)
-+ ret = FAIL;
-+ } else {
-+ printk(KERN_ERR "Less than 3 spare blocks exist so quit\n");
-+ ret = FAIL;
-+ }
-+
-+ return ret;
-+}
-+
-+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-+* Function: FTL_Static_Wear_Leveling
-+* Inputs: none
-+* Outputs: PASS=0 / FAIL=1
-+* Description: This is static wear leveling (done by explicit call)
-+* search for most&least used
-+* if difference < GATE:
-+* update the block table with exhange
-+* mark block table in flash as IN_PROGRESS
-+* copy flash block
-+* the caller should handle GC clean up after calling this function
-+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-+int FTL_Static_Wear_Leveling(void)
-+{
-+ u8 most_worn_cnt;
-+ u8 least_worn_cnt;
-+ u32 most_worn_idx;
-+ u32 least_worn_idx;
-+ int result = PASS;
-+ int go_on = PASS;
-+ u32 replaced_blks = 0;
-+ u8 *chang_flag = flags_static_wear_leveling;
-+
-+ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ if (!chang_flag)
-+ return FAIL;
-+
-+ memset(chang_flag, FAIL, DeviceInfo.wDataBlockNum);
-+ while (go_on == PASS) {
-+ nand_dbg_print(NAND_DBG_DEBUG,
-+ "starting static wear leveling\n");
-+ most_worn_cnt = 0;
-+ least_worn_cnt = 0xFF;
-+ least_worn_idx = BLOCK_TABLE_INDEX;
-+ most_worn_idx = BLOCK_TABLE_INDEX;
-+
-+ find_least_most_worn(chang_flag, &least_worn_idx,
-+ &least_worn_cnt, &most_worn_idx, &most_worn_cnt);
-+
-+ nand_dbg_print(NAND_DBG_DEBUG,
-+ "Used and least worn is block %u, whos count is %u\n",
-+ (unsigned int)least_worn_idx,
-+ (unsigned int)least_worn_cnt);
-+
-+ nand_dbg_print(NAND_DBG_DEBUG,
-+ "Free and most worn is block %u, whos count is %u\n",
-+ (unsigned int)most_worn_idx,
-+ (unsigned int)most_worn_cnt);
-+
-+ if ((most_worn_cnt > least_worn_cnt) &&
-+ (most_worn_cnt - least_worn_cnt > WEAR_LEVELING_GATE))
-+ go_on = move_blks_for_wear_leveling(chang_flag,
-+ &least_worn_idx, &replaced_blks, &result);
-+ else
-+ go_on = FAIL;
-+ }
-+
-+ return result;
-+}
-+
-+#if CMD_DMA
-+static int do_garbage_collection(u32 discard_cnt)
-+{
-+ u32 *pbt = (u32 *)g_pBlockTable;
-+ u32 pba;
-+ u8 bt_block_erased = 0;
-+ int i, cnt, ret = FAIL;
-+ u64 addr;
-+
-+ i = 0;
-+ while ((i < DeviceInfo.wDataBlockNum) && (discard_cnt > 0) &&
-+ ((ftl_cmd_cnt + 28) < 256)) {
-+ if (((pbt[i] & BAD_BLOCK) != BAD_BLOCK) &&
-+ (pbt[i] & DISCARD_BLOCK)) {
-+ if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) {
-+ g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
-+ FTL_Write_IN_Progress_Block_Table_Page();
-+ }
-+
-+ addr = FTL_Get_Physical_Block_Addr((u64)i *
-+ DeviceInfo.wBlockDataSize);
-+ pba = BLK_FROM_ADDR(addr);
-+
-+ for (cnt = FIRST_BT_ID; cnt <= LAST_BT_ID; cnt++) {
-+ if (pba == g_pBTBlocks[cnt - FIRST_BT_ID]) {
-+ nand_dbg_print(NAND_DBG_DEBUG,
-+ "GC will erase BT block %u\n",
-+ (unsigned int)pba);
-+ discard_cnt--;
-+ i++;
-+ bt_block_erased = 1;
-+ break;
-+ }
-+ }
-+
-+ if (bt_block_erased) {
-+ bt_block_erased = 0;
-+ continue;
-+ }
-+
-+ addr = FTL_Get_Physical_Block_Addr((u64)i *
-+ DeviceInfo.wBlockDataSize);
-+
-+ if (PASS == GLOB_FTL_Block_Erase(addr)) {
-+ pbt[i] &= (u32)(~DISCARD_BLOCK);
-+ pbt[i] |= (u32)(SPARE_BLOCK);
-+ p_BTableChangesDelta =
-+ (struct BTableChangesDelta *)
-+ g_pBTDelta_Free;
-+ g_pBTDelta_Free +=
-+ sizeof(struct BTableChangesDelta);
-+ p_BTableChangesDelta->ftl_cmd_cnt =
-+ ftl_cmd_cnt - 1;
-+ p_BTableChangesDelta->BT_Index = i;
-+ p_BTableChangesDelta->BT_Entry_Value = pbt[i];
-+ p_BTableChangesDelta->ValidFields = 0x0C;
-+ discard_cnt--;
-+ ret = PASS;
-+ } else {
-+ MARK_BLOCK_AS_BAD(pbt[i]);
-+ }
-+ }
-+
-+ i++;
-+ }
-+
-+ return ret;
-+}
-+
-+#else
-+static int do_garbage_collection(u32 discard_cnt)
-+{
-+ u32 *pbt = (u32 *)g_pBlockTable;
-+ u32 pba;
-+ u8 bt_block_erased = 0;
-+ int i, cnt, ret = FAIL;
-+ u64 addr;
-+
-+ i = 0;
-+ while ((i < DeviceInfo.wDataBlockNum) && (discard_cnt > 0)) {
-+ if (((pbt[i] & BAD_BLOCK) != BAD_BLOCK) &&
-+ (pbt[i] & DISCARD_BLOCK)) {
-+ if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) {
-+ g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
-+ FTL_Write_IN_Progress_Block_Table_Page();
-+ }
-+
-+ addr = FTL_Get_Physical_Block_Addr((u64)i *
-+ DeviceInfo.wBlockDataSize);
-+ pba = BLK_FROM_ADDR(addr);
-+
-+ for (cnt = FIRST_BT_ID; cnt <= LAST_BT_ID; cnt++) {
-+ if (pba == g_pBTBlocks[cnt - FIRST_BT_ID]) {
-+ nand_dbg_print(NAND_DBG_DEBUG,
-+ "GC will erase BT block %d\n",
-+ pba);
-+ discard_cnt--;
-+ i++;
-+ bt_block_erased = 1;
-+ break;
-+ }
-+ }
-+
-+ if (bt_block_erased) {
-+ bt_block_erased = 0;
-+ continue;
-+ }
-+
-+ /* If the discard block is L2 cache block, then just skip it */
-+ for (cnt = 0; cnt < BLK_NUM_FOR_L2_CACHE; cnt++) {
-+ if (cache_l2.blk_array[cnt] == pba) {
-+ nand_dbg_print(NAND_DBG_DEBUG,
-+ "GC will erase L2 cache blk %d\n",
-+ pba);
-+ break;
-+ }
-+ }
-+ if (cnt < BLK_NUM_FOR_L2_CACHE) { /* Skip it */
-+ discard_cnt--;
-+ i++;
-+ continue;
-+ }
-+
-+ addr = FTL_Get_Physical_Block_Addr((u64)i *
-+ DeviceInfo.wBlockDataSize);
-+
-+ if (PASS == GLOB_FTL_Block_Erase(addr)) {
-+ pbt[i] &= (u32)(~DISCARD_BLOCK);
-+ pbt[i] |= (u32)(SPARE_BLOCK);
-+ discard_cnt--;
-+ ret = PASS;
-+ } else {
-+ MARK_BLOCK_AS_BAD(pbt[i]);
-+ }
-+ }
-+
-+ i++;
-+ }
-+
-+ return ret;
-+}
-+#endif
-+
-+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-+* Function: GLOB_FTL_Garbage_Collection
-+* Inputs: none
-+* Outputs: PASS / FAIL (returns the number of un-erased blocks
-+* Description: search the block table for all discarded blocks to erase
-+* for each discarded block:
-+* set the flash block to IN_PROGRESS
-+* erase the block
-+* update the block table
-+* write the block table to flash
-+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-+int GLOB_FTL_Garbage_Collection(void)
-+{
-+ u32 i;
-+ u32 wDiscard = 0;
-+ int wResult = FAIL;
-+ u32 *pbt = (u32 *)g_pBlockTable;
-+
-+ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ if (GC_Called) {
-+ printk(KERN_ALERT "GLOB_FTL_Garbage_Collection() "
-+ "has been re-entered! Exit.\n");
-+ return PASS;
-+ }
-+
-+ GC_Called = 1;
-+
-+ GLOB_FTL_BT_Garbage_Collection();
-+
-+ for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
-+ if (IS_DISCARDED_BLOCK(i))
-+ wDiscard++;
-+ }
-+
-+ if (wDiscard <= 0) {
-+ GC_Called = 0;
-+ return wResult;
-+ }
-+
-+ nand_dbg_print(NAND_DBG_DEBUG,
-+ "Found %d discarded blocks\n", wDiscard);
-+
-+ FTL_Write_Block_Table(FAIL);
-+
-+ wResult = do_garbage_collection(wDiscard);
-+
-+ FTL_Write_Block_Table(FAIL);
-+
-+ GC_Called = 0;
-+
-+ return wResult;
-+}
-+
-+
-+#if CMD_DMA
-+static int do_bt_garbage_collection(void)
-+{
-+ u32 pba, lba;
-+ u32 *pbt = (u32 *)g_pBlockTable;
-+ u32 *pBTBlocksNode = (u32 *)g_pBTBlocks;
-+ u64 addr;
-+ int i, ret = FAIL;
-+
-+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ if (BT_GC_Called)
-+ return PASS;
-+
-+ BT_GC_Called = 1;
-+
-+ for (i = last_erased; (i <= LAST_BT_ID) &&
-+ (g_pBTBlocks[((i + 2) % (1 + LAST_BT_ID - FIRST_BT_ID)) +
-+ FIRST_BT_ID - FIRST_BT_ID] != BTBLOCK_INVAL) &&
-+ ((ftl_cmd_cnt + 28)) < 256; i++) {
-+ pba = pBTBlocksNode[i - FIRST_BT_ID];
-+ lba = FTL_Get_Block_Index(pba);
-+ nand_dbg_print(NAND_DBG_DEBUG,
-+ "do_bt_garbage_collection: pba %d, lba %d\n",
-+ pba, lba);
-+ nand_dbg_print(NAND_DBG_DEBUG,
-+ "Block Table Entry: %d", pbt[lba]);
-+
-+ if (((pbt[lba] & BAD_BLOCK) != BAD_BLOCK) &&
-+ (pbt[lba] & DISCARD_BLOCK)) {
-+ nand_dbg_print(NAND_DBG_DEBUG,
-+ "do_bt_garbage_collection_cdma: "
-+ "Erasing Block tables present in block %d\n",
-+ pba);
-+ addr = FTL_Get_Physical_Block_Addr((u64)lba *
-+ DeviceInfo.wBlockDataSize);
-+ if (PASS == GLOB_FTL_Block_Erase(addr)) {
-+ pbt[lba] &= (u32)(~DISCARD_BLOCK);
-+ pbt[lba] |= (u32)(SPARE_BLOCK);
-+
-+ p_BTableChangesDelta =
-+ (struct BTableChangesDelta *)
-+ g_pBTDelta_Free;
-+ g_pBTDelta_Free +=
-+ sizeof(struct BTableChangesDelta);
-+
-+ p_BTableChangesDelta->ftl_cmd_cnt =
-+ ftl_cmd_cnt - 1;
-+ p_BTableChangesDelta->BT_Index = lba;
-+ p_BTableChangesDelta->BT_Entry_Value =
-+ pbt[lba];
-+
-+ p_BTableChangesDelta->ValidFields = 0x0C;
-+
-+ ret = PASS;
-+ pBTBlocksNode[last_erased - FIRST_BT_ID] =
-+ BTBLOCK_INVAL;
-+ nand_dbg_print(NAND_DBG_DEBUG,
-+ "resetting bt entry at index %d "
-+ "value %d\n", i,
-+ pBTBlocksNode[i - FIRST_BT_ID]);
-+ if (last_erased == LAST_BT_ID)
-+ last_erased = FIRST_BT_ID;
-+ else
-+ last_erased++;
-+ } else {
-+ MARK_BLOCK_AS_BAD(pbt[lba]);
-+ }
-+ }
-+ }
-+
-+ BT_GC_Called = 0;
-+
-+ return ret;
-+}
-+
-+#else
-+static int do_bt_garbage_collection(void)
-+{
-+ u32 pba, lba;
-+ u32 *pbt = (u32 *)g_pBlockTable;
-+ u32 *pBTBlocksNode = (u32 *)g_pBTBlocks;
-+ u64 addr;
-+ int i, ret = FAIL;
-+
-+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ if (BT_GC_Called)
-+ return PASS;
-+
-+ BT_GC_Called = 1;
-+
-+ for (i = last_erased; (i <= LAST_BT_ID) &&
-+ (g_pBTBlocks[((i + 2) % (1 + LAST_BT_ID - FIRST_BT_ID)) +
-+ FIRST_BT_ID - FIRST_BT_ID] != BTBLOCK_INVAL); i++) {
-+ pba = pBTBlocksNode[i - FIRST_BT_ID];
-+ lba = FTL_Get_Block_Index(pba);
-+ nand_dbg_print(NAND_DBG_DEBUG,
-+ "do_bt_garbage_collection_cdma: pba %d, lba %d\n",
-+ pba, lba);
-+ nand_dbg_print(NAND_DBG_DEBUG,
-+ "Block Table Entry: %d", pbt[lba]);
-+
-+ if (((pbt[lba] & BAD_BLOCK) != BAD_BLOCK) &&
-+ (pbt[lba] & DISCARD_BLOCK)) {
-+ nand_dbg_print(NAND_DBG_DEBUG,
-+ "do_bt_garbage_collection: "
-+ "Erasing Block tables present in block %d\n",
-+ pba);
-+ addr = FTL_Get_Physical_Block_Addr((u64)lba *
-+ DeviceInfo.wBlockDataSize);
-+ if (PASS == GLOB_FTL_Block_Erase(addr)) {
-+ pbt[lba] &= (u32)(~DISCARD_BLOCK);
-+ pbt[lba] |= (u32)(SPARE_BLOCK);
-+ ret = PASS;
-+ pBTBlocksNode[last_erased - FIRST_BT_ID] =
-+ BTBLOCK_INVAL;
-+ nand_dbg_print(NAND_DBG_DEBUG,
-+ "resetting bt entry at index %d "
-+ "value %d\n", i,
-+ pBTBlocksNode[i - FIRST_BT_ID]);
-+ if (last_erased == LAST_BT_ID)
-+ last_erased = FIRST_BT_ID;
-+ else
-+ last_erased++;
-+ } else {
-+ MARK_BLOCK_AS_BAD(pbt[lba]);
-+ }
-+ }
-+ }
-+
-+ BT_GC_Called = 0;
-+
-+ return ret;
-+}
-+
-+#endif
-+
-+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-+* Function: GLOB_FTL_BT_Garbage_Collection
-+* Inputs: none
-+* Outputs: PASS / FAIL (returns the number of un-erased blocks
-+* Description: Erases discarded blocks containing Block table
-+*
-+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-+int GLOB_FTL_BT_Garbage_Collection(void)
-+{
-+ return do_bt_garbage_collection();
-+}
-+
-+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-+* Function: FTL_Replace_OneBlock
-+* Inputs: Block number 1
-+* Block number 2
-+* Outputs: Replaced Block Number
-+* Description: Interchange block table entries at wBlockNum and wReplaceNum
-+*
-+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-+static u32 FTL_Replace_OneBlock(u32 blk, u32 rep_blk)
-+{
-+ u32 tmp_blk;
-+ u32 replace_node = BAD_BLOCK;
-+ u32 *pbt = (u32 *)g_pBlockTable;
-+
-+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ if (rep_blk != BAD_BLOCK) {
-+ if (IS_BAD_BLOCK(blk))
-+ tmp_blk = pbt[blk];
-+ else
-+ tmp_blk = DISCARD_BLOCK | (~SPARE_BLOCK & pbt[blk]);
-+
-+ replace_node = (u32) ((~SPARE_BLOCK) & pbt[rep_blk]);
-+ pbt[blk] = replace_node;
-+ pbt[rep_blk] = tmp_blk;
-+
-+#if CMD_DMA
-+ p_BTableChangesDelta =
-+ (struct BTableChangesDelta *)g_pBTDelta_Free;
-+ g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
-+
-+ p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
-+ p_BTableChangesDelta->BT_Index = blk;
-+ p_BTableChangesDelta->BT_Entry_Value = pbt[blk];
-+
-+ p_BTableChangesDelta->ValidFields = 0x0C;
-+
-+ p_BTableChangesDelta =
-+ (struct BTableChangesDelta *)g_pBTDelta_Free;
-+ g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
-+
-+ p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
-+ p_BTableChangesDelta->BT_Index = rep_blk;
-+ p_BTableChangesDelta->BT_Entry_Value = pbt[rep_blk];
-+ p_BTableChangesDelta->ValidFields = 0x0C;
-+#endif
-+ }
-+
-+ return replace_node;
-+}
-+
-+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-+* Function: FTL_Write_Block_Table_Data
-+* Inputs: Block table size in pages
-+* Outputs: PASS=0 / FAIL=1
-+* Description: Write block table data in flash
-+* If first page and last page
-+* Write data+BT flag
-+* else
-+* Write data
-+* BT flag is a counter. Its value is incremented for block table
-+* write in a new Block
-+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-+static int FTL_Write_Block_Table_Data(void)
-+{
-+ u64 dwBlockTableAddr, pTempAddr;
-+ u32 Block;
-+ u16 Page, PageCount;
-+ u8 *tempBuf = tmp_buf_write_blk_table_data;
-+ int wBytesCopied;
-+ u16 bt_pages;
-+
-+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ dwBlockTableAddr =
-+ (u64)((u64)g_wBlockTableIndex * DeviceInfo.wBlockDataSize +
-+ (u64)g_wBlockTableOffset * DeviceInfo.wPageDataSize);
-+ pTempAddr = dwBlockTableAddr;
-+
-+ bt_pages = FTL_Get_Block_Table_Flash_Size_Pages();
-+
-+ nand_dbg_print(NAND_DBG_DEBUG, "FTL_Write_Block_Table_Data: "
-+ "page= %d BlockTableIndex= %d "
-+ "BlockTableOffset=%d\n", bt_pages,
-+ g_wBlockTableIndex, g_wBlockTableOffset);
-+
-+ Block = BLK_FROM_ADDR(pTempAddr);
-+ Page = PAGE_FROM_ADDR(pTempAddr, Block);
-+ PageCount = 1;
-+
-+ if (bt_block_changed) {
-+ if (bt_flag == LAST_BT_ID) {
-+ bt_flag = FIRST_BT_ID;
-+ g_pBTBlocks[bt_flag - FIRST_BT_ID] = Block;
-+ } else if (bt_flag < LAST_BT_ID) {
-+ bt_flag++;
-+ g_pBTBlocks[bt_flag - FIRST_BT_ID] = Block;
-+ }
-+
-+ if ((bt_flag > (LAST_BT_ID-4)) &&
-+ g_pBTBlocks[FIRST_BT_ID - FIRST_BT_ID] !=
-+ BTBLOCK_INVAL) {
-+ bt_block_changed = 0;
-+ GLOB_FTL_BT_Garbage_Collection();
-+ }
-+
-+ bt_block_changed = 0;
-+ nand_dbg_print(NAND_DBG_DEBUG,
-+ "Block Table Counter is %u Block %u\n",
-+ bt_flag, (unsigned int)Block);
-+ }
-+
-+ memset(tempBuf, 0, 3);
-+ tempBuf[3] = bt_flag;
-+ wBytesCopied = FTL_Copy_Block_Table_To_Flash(tempBuf + 4,
-+ DeviceInfo.wPageDataSize - 4, 0);
-+ memset(&tempBuf[wBytesCopied + 4], 0xff,
-+ DeviceInfo.wPageSize - (wBytesCopied + 4));
-+ FTL_Insert_Block_Table_Signature(&tempBuf[DeviceInfo.wPageDataSize],
-+ bt_flag);
-+
-+#if CMD_DMA
-+ memcpy(g_pNextBlockTable, tempBuf,
-+ DeviceInfo.wPageSize * sizeof(u8));
-+ nand_dbg_print(NAND_DBG_DEBUG, "Writing First Page of Block Table "
-+ "Block %u Page %u\n", (unsigned int)Block, Page);
-+ if (FAIL == GLOB_LLD_Write_Page_Main_Spare_cdma(g_pNextBlockTable,
-+ Block, Page, 1,
-+ LLD_CMD_FLAG_MODE_CDMA | LLD_CMD_FLAG_ORDER_BEFORE_REST)) {
-+ nand_dbg_print(NAND_DBG_WARN, "NAND Program fail in "
-+ "%s, Line %d, Function: %s, "
-+ "new Bad Block %d generated!\n",
-+ __FILE__, __LINE__, __func__, Block);
-+ goto func_return;
-+ }
-+
-+ ftl_cmd_cnt++;
-+ g_pNextBlockTable += ((DeviceInfo.wPageSize * sizeof(u8)));
-+#else
-+ if (FAIL == GLOB_LLD_Write_Page_Main_Spare(tempBuf, Block, Page, 1)) {
-+ nand_dbg_print(NAND_DBG_WARN,
-+ "NAND Program fail in %s, Line %d, Function: %s, "
-+ "new Bad Block %d generated!\n",
-+ __FILE__, __LINE__, __func__, Block);
-+ goto func_return;
-+ }
-+#endif
-+
-+ if (bt_pages > 1) {
-+ PageCount = bt_pages - 1;
-+ if (PageCount > 1) {
-+ wBytesCopied += FTL_Copy_Block_Table_To_Flash(tempBuf,
-+ DeviceInfo.wPageDataSize * (PageCount - 1),
-+ wBytesCopied);
-+
-+#if CMD_DMA
-+ memcpy(g_pNextBlockTable, tempBuf,
-+ (PageCount - 1) * DeviceInfo.wPageDataSize);
-+ if (FAIL == GLOB_LLD_Write_Page_Main_cdma(
-+ g_pNextBlockTable, Block, Page + 1,
-+ PageCount - 1)) {
-+ nand_dbg_print(NAND_DBG_WARN,
-+ "NAND Program fail in %s, Line %d, "
-+ "Function: %s, "
-+ "new Bad Block %d generated!\n",
-+ __FILE__, __LINE__, __func__,
-+ (int)Block);
-+ goto func_return;
-+ }
-+
-+ ftl_cmd_cnt++;
-+ g_pNextBlockTable += (PageCount - 1) *
-+ DeviceInfo.wPageDataSize * sizeof(u8);
-+#else
-+ if (FAIL == GLOB_LLD_Write_Page_Main(tempBuf,
-+ Block, Page + 1, PageCount - 1)) {
-+ nand_dbg_print(NAND_DBG_WARN,
-+ "NAND Program fail in %s, Line %d, "
-+ "Function: %s, "
-+ "new Bad Block %d generated!\n",
-+ __FILE__, __LINE__, __func__,
-+ (int)Block);
-+ goto func_return;
-+ }
-+#endif
-+ }
-+
-+ wBytesCopied = FTL_Copy_Block_Table_To_Flash(tempBuf,
-+ DeviceInfo.wPageDataSize, wBytesCopied);
-+ memset(&tempBuf[wBytesCopied], 0xff,
-+ DeviceInfo.wPageSize-wBytesCopied);
-+ FTL_Insert_Block_Table_Signature(
-+ &tempBuf[DeviceInfo.wPageDataSize], bt_flag);
-+#if CMD_DMA
-+ memcpy(g_pNextBlockTable, tempBuf,
-+ DeviceInfo.wPageSize * sizeof(u8));
-+ nand_dbg_print(NAND_DBG_DEBUG,
-+ "Writing the last Page of Block Table "
-+ "Block %u Page %u\n",
-+ (unsigned int)Block, Page + bt_pages - 1);
-+ if (FAIL == GLOB_LLD_Write_Page_Main_Spare_cdma(
-+ g_pNextBlockTable, Block, Page + bt_pages - 1, 1,
-+ LLD_CMD_FLAG_MODE_CDMA |
-+ LLD_CMD_FLAG_ORDER_BEFORE_REST)) {
-+ nand_dbg_print(NAND_DBG_WARN,
-+ "NAND Program fail in %s, Line %d, "
-+ "Function: %s, new Bad Block %d generated!\n",
-+ __FILE__, __LINE__, __func__, Block);
-+ goto func_return;
-+ }
-+ ftl_cmd_cnt++;
-+#else
-+ if (FAIL == GLOB_LLD_Write_Page_Main_Spare(tempBuf,
-+ Block, Page+bt_pages - 1, 1)) {
-+ nand_dbg_print(NAND_DBG_WARN,
-+ "NAND Program fail in %s, Line %d, "
-+ "Function: %s, "
-+ "new Bad Block %d generated!\n",
-+ __FILE__, __LINE__, __func__, Block);
-+ goto func_return;
-+ }
-+#endif
-+ }
-+
-+ nand_dbg_print(NAND_DBG_DEBUG, "FTL_Write_Block_Table_Data: done\n");
-+
-+func_return:
-+ return PASS;
-+}
-+
-+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-+* Function: FTL_Replace_Block_Table
-+* Inputs: None
-+* Outputs: PASS=0 / FAIL=1
-+* Description: Get a new block to write block table
-+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-+static u32 FTL_Replace_Block_Table(void)
-+{
-+ u32 blk;
-+ int gc;
-+
-+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ blk = FTL_Replace_LWBlock(BLOCK_TABLE_INDEX, &gc);
-+
-+ if ((BAD_BLOCK == blk) && (PASS == gc)) {
-+ GLOB_FTL_Garbage_Collection();
-+ blk = FTL_Replace_LWBlock(BLOCK_TABLE_INDEX, &gc);
-+ }
-+ if (BAD_BLOCK == blk)
-+ printk(KERN_ERR "%s, %s: There is no spare block. "
-+ "It should never happen\n",
-+ __FILE__, __func__);
-+
-+ nand_dbg_print(NAND_DBG_DEBUG, "New Block table Block is %d\n", blk);
-+
-+ return blk;
-+}
-+
-+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-+* Function: FTL_Replace_LWBlock
-+* Inputs: Block number
-+* Pointer to Garbage Collect flag
-+* Outputs:
-+* Description: Determine the least weared block by traversing
-+* block table
-+* Set Garbage collection to be called if number of spare
-+* block is less than Free Block Gate count
-+* Change Block table entry to map least worn block for current
-+* operation
-+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-+static u32 FTL_Replace_LWBlock(u32 wBlockNum, int *pGarbageCollect)
-+{
-+ u32 i;
-+ u32 *pbt = (u32 *)g_pBlockTable;
-+ u8 wLeastWornCounter = 0xFF;
-+ u32 wLeastWornIndex = BAD_BLOCK;
-+ u32 wSpareBlockNum = 0;
-+ u32 wDiscardBlockNum = 0;
-+
-+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ if (IS_SPARE_BLOCK(wBlockNum)) {
-+ *pGarbageCollect = FAIL;
-+ pbt[wBlockNum] = (u32)(pbt[wBlockNum] & (~SPARE_BLOCK));
-+#if CMD_DMA
-+ p_BTableChangesDelta =
-+ (struct BTableChangesDelta *)g_pBTDelta_Free;
-+ g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
-+ p_BTableChangesDelta->ftl_cmd_cnt =
-+ ftl_cmd_cnt;
-+ p_BTableChangesDelta->BT_Index = (u32)(wBlockNum);
-+ p_BTableChangesDelta->BT_Entry_Value = pbt[wBlockNum];
-+ p_BTableChangesDelta->ValidFields = 0x0C;
-+#endif
-+ return pbt[wBlockNum];
-+ }
-+
-+ for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
-+ if (IS_DISCARDED_BLOCK(i))
-+ wDiscardBlockNum++;
-+
-+ if (IS_SPARE_BLOCK(i)) {
-+ u32 wPhysicalIndex = (u32)((~BAD_BLOCK) & pbt[i]);
-+ if (wPhysicalIndex > DeviceInfo.wSpectraEndBlock)
-+ printk(KERN_ERR "FTL_Replace_LWBlock: "
-+ "This should never occur!\n");
-+ if (g_pWearCounter[wPhysicalIndex -
-+ DeviceInfo.wSpectraStartBlock] <
-+ wLeastWornCounter) {
-+ wLeastWornCounter =
-+ g_pWearCounter[wPhysicalIndex -
-+ DeviceInfo.wSpectraStartBlock];
-+ wLeastWornIndex = i;
-+ }
-+ wSpareBlockNum++;
-+ }
-+ }
-+
-+ nand_dbg_print(NAND_DBG_WARN,
-+ "FTL_Replace_LWBlock: Least Worn Counter %d\n",
-+ (int)wLeastWornCounter);
-+
-+ if ((wDiscardBlockNum >= NUM_FREE_BLOCKS_GATE) ||
-+ (wSpareBlockNum <= NUM_FREE_BLOCKS_GATE))
-+ *pGarbageCollect = PASS;
-+ else
-+ *pGarbageCollect = FAIL;
-+
-+ nand_dbg_print(NAND_DBG_DEBUG,
-+ "FTL_Replace_LWBlock: Discarded Blocks %u Spare"
-+ " Blocks %u\n",
-+ (unsigned int)wDiscardBlockNum,
-+ (unsigned int)wSpareBlockNum);
-+
-+ return FTL_Replace_OneBlock(wBlockNum, wLeastWornIndex);
-+}
-+
-+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-+* Function: FTL_Replace_MWBlock
-+* Inputs: None
-+* Outputs: most worn spare block no./BAD_BLOCK
-+* Description: It finds most worn spare block.
-+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-+static u32 FTL_Replace_MWBlock(void)
-+{
-+ u32 i;
-+ u32 *pbt = (u32 *)g_pBlockTable;
-+ u8 wMostWornCounter = 0;
-+ u32 wMostWornIndex = BAD_BLOCK;
-+ u32 wSpareBlockNum = 0;
-+
-+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
-+ if (IS_SPARE_BLOCK(i)) {
-+ u32 wPhysicalIndex = (u32)((~SPARE_BLOCK) & pbt[i]);
-+ if (g_pWearCounter[wPhysicalIndex -
-+ DeviceInfo.wSpectraStartBlock] >
-+ wMostWornCounter) {
-+ wMostWornCounter =
-+ g_pWearCounter[wPhysicalIndex -
-+ DeviceInfo.wSpectraStartBlock];
-+ wMostWornIndex = wPhysicalIndex;
-+ }
-+ wSpareBlockNum++;
-+ }
-+ }
-+
-+ if (wSpareBlockNum <= 2)
-+ return BAD_BLOCK;
-+
-+ return wMostWornIndex;
-+}
-+
-+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-+* Function: FTL_Replace_Block
-+* Inputs: Block Address
-+* Outputs: PASS=0 / FAIL=1
-+* Description: If block specified by blk_addr parameter is not free,
-+* replace it with the least worn block.
-+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-+static int FTL_Replace_Block(u64 blk_addr)
-+{
-+ u32 current_blk = BLK_FROM_ADDR(blk_addr);
-+ u32 *pbt = (u32 *)g_pBlockTable;
-+ int wResult = PASS;
-+ int GarbageCollect = FAIL;
-+
-+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ if (IS_SPARE_BLOCK(current_blk)) {
-+ pbt[current_blk] = (~SPARE_BLOCK) & pbt[current_blk];
-+#if CMD_DMA
-+ p_BTableChangesDelta =
-+ (struct BTableChangesDelta *)g_pBTDelta_Free;
-+ g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
-+ p_BTableChangesDelta->ftl_cmd_cnt =
-+ ftl_cmd_cnt;
-+ p_BTableChangesDelta->BT_Index = current_blk;
-+ p_BTableChangesDelta->BT_Entry_Value = pbt[current_blk];
-+ p_BTableChangesDelta->ValidFields = 0x0C ;
-+#endif
-+ return wResult;
-+ }
-+
-+ FTL_Replace_LWBlock(current_blk, &GarbageCollect);
-+
-+ if (PASS == GarbageCollect)
-+ wResult = GLOB_FTL_Garbage_Collection();
-+
-+ return wResult;
-+}
-+
-+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-+* Function: GLOB_FTL_Is_BadBlock
-+* Inputs: block number to test
-+* Outputs: PASS (block is BAD) / FAIL (block is not bad)
-+* Description: test if this block number is flagged as bad
-+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-+int GLOB_FTL_Is_BadBlock(u32 wBlockNum)
-+{
-+ u32 *pbt = (u32 *)g_pBlockTable;
-+
-+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ if (wBlockNum >= DeviceInfo.wSpectraStartBlock
-+ && BAD_BLOCK == (pbt[wBlockNum] & BAD_BLOCK))
-+ return PASS;
-+ else
-+ return FAIL;
-+}
-+
-+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-+* Function: GLOB_FTL_Flush_Cache
-+* Inputs: none
-+* Outputs: PASS=0 / FAIL=1
-+* Description: flush all the cache blocks to flash
-+* if a cache block is not dirty, don't do anything with it
-+* else, write the block and update the block table
-+* Note: This function should be called at shutdown/power down.
-+* to write important data into device
-+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-+int GLOB_FTL_Flush_Cache(void)
-+{
-+ int i, ret;
-+
-+ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ for (i = 0; i < CACHE_ITEM_NUM; i++) {
-+ if (SET == Cache.array[i].changed) {
-+#if CMD_DMA
-+#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
-+ int_cache[ftl_cmd_cnt].item = i;
-+ int_cache[ftl_cmd_cnt].cache.address =
-+ Cache.array[i].address;
-+ int_cache[ftl_cmd_cnt].cache.changed = CLEAR;
-+#endif
-+#endif
-+ ret = write_back_to_l2_cache(Cache.array[i].buf, Cache.array[i].address);
-+ if (PASS == ret) {
-+ Cache.array[i].changed = CLEAR;
-+ } else {
-+ printk(KERN_ALERT "Failed when write back to L2 cache!\n");
-+ /* TODO - How to handle this? */
-+ }
-+ }
-+ }
-+
-+ flush_l2_cache();
-+
-+ return FTL_Write_Block_Table(FAIL);
-+}
-+
-+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-+* Function: GLOB_FTL_Page_Read
-+* Inputs: pointer to data
-+* logical address of data (u64 is LBA * Bytes/Page)
-+* Outputs: PASS=0 / FAIL=1
-+* Description: reads a page of data into RAM from the cache
-+* if the data is not already in cache, read from flash to cache
-+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-+int GLOB_FTL_Page_Read(u8 *data, u64 logical_addr)
-+{
-+ u16 cache_item;
-+ int res = PASS;
-+
-+ nand_dbg_print(NAND_DBG_DEBUG, "GLOB_FTL_Page_Read - "
-+ "page_addr: %llu\n", logical_addr);
-+
-+ cache_item = FTL_Cache_If_Hit(logical_addr);
-+
-+ if (UNHIT_CACHE_ITEM == cache_item) {
-+ nand_dbg_print(NAND_DBG_DEBUG,
-+ "GLOB_FTL_Page_Read: Cache not hit\n");
-+ res = FTL_Cache_Write();
-+ if (ERR == FTL_Cache_Read(logical_addr))
-+ res = ERR;
-+ cache_item = Cache.LRU;
-+ }
-+
-+ FTL_Cache_Read_Page(data, logical_addr, cache_item);
-+
-+ return res;
-+}
-+
-+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-+* Function: GLOB_FTL_Page_Write
-+* Inputs: pointer to data
-+* address of data (ADDRESSTYPE is LBA * Bytes/Page)
-+* Outputs: PASS=0 / FAIL=1
-+* Description: writes a page of data from RAM to the cache
-+* if the data is not already in cache, write back the
-+* least recently used block and read the addressed block
-+* from flash to cache
-+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-+int GLOB_FTL_Page_Write(u8 *pData, u64 dwPageAddr)
-+{
-+ u16 cache_blk;
-+ u32 *pbt = (u32 *)g_pBlockTable;
-+ int wResult = PASS;
-+
-+ nand_dbg_print(NAND_DBG_TRACE, "GLOB_FTL_Page_Write - "
-+ "dwPageAddr: %llu\n", dwPageAddr);
-+
-+ cache_blk = FTL_Cache_If_Hit(dwPageAddr);
-+
-+ if (UNHIT_CACHE_ITEM == cache_blk) {
-+ wResult = FTL_Cache_Write();
-+ if (IS_BAD_BLOCK(BLK_FROM_ADDR(dwPageAddr))) {
-+ wResult = FTL_Replace_Block(dwPageAddr);
-+ pbt[BLK_FROM_ADDR(dwPageAddr)] |= SPARE_BLOCK;
-+ if (wResult == FAIL)
-+ return FAIL;
-+ }
-+ if (ERR == FTL_Cache_Read(dwPageAddr))
-+ wResult = ERR;
-+ cache_blk = Cache.LRU;
-+ FTL_Cache_Write_Page(pData, dwPageAddr, cache_blk, 0);
-+ } else {
-+#if CMD_DMA
-+ FTL_Cache_Write_Page(pData, dwPageAddr, cache_blk,
-+ LLD_CMD_FLAG_ORDER_BEFORE_REST);
-+#else
-+ FTL_Cache_Write_Page(pData, dwPageAddr, cache_blk, 0);
-+#endif
-+ }
-+
-+ return wResult;
-+}
-+
-+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-+* Function: GLOB_FTL_Block_Erase
-+* Inputs: address of block to erase (now in byte format, should change to
-+* block format)
-+* Outputs: PASS=0 / FAIL=1
-+* Description: erases the specified block
-+* increments the erase count
-+* If erase count reaches its upper limit,call function to
-+* do the ajustment as per the relative erase count values
-+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-+int GLOB_FTL_Block_Erase(u64 blk_addr)
-+{
-+ int status;
-+ u32 BlkIdx;
-+
-+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ BlkIdx = (u32)(blk_addr >> DeviceInfo.nBitsInBlockDataSize);
-+
-+ if (BlkIdx < DeviceInfo.wSpectraStartBlock) {
-+ printk(KERN_ERR "GLOB_FTL_Block_Erase: "
-+ "This should never occur\n");
-+ return FAIL;
-+ }
-+
-+#if CMD_DMA
-+ status = GLOB_LLD_Erase_Block_cdma(BlkIdx, LLD_CMD_FLAG_MODE_CDMA);
-+ if (status == FAIL)
-+ nand_dbg_print(NAND_DBG_WARN,
-+ "NAND Program fail in %s, Line %d, "
-+ "Function: %s, new Bad Block %d generated!\n",
-+ __FILE__, __LINE__, __func__, BlkIdx);
-+#else
-+ status = GLOB_LLD_Erase_Block(BlkIdx);
-+ if (status == FAIL) {
-+ nand_dbg_print(NAND_DBG_WARN,
-+ "NAND Program fail in %s, Line %d, "
-+ "Function: %s, new Bad Block %d generated!\n",
-+ __FILE__, __LINE__, __func__, BlkIdx);
-+ return status;
-+ }
-+#endif
-+
-+ if (DeviceInfo.MLCDevice) {
-+ g_pReadCounter[BlkIdx - DeviceInfo.wSpectraStartBlock] = 0;
-+ if (g_cBlockTableStatus != IN_PROGRESS_BLOCK_TABLE) {
-+ g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
-+ FTL_Write_IN_Progress_Block_Table_Page();
-+ }
-+ }
-+
-+ g_pWearCounter[BlkIdx - DeviceInfo.wSpectraStartBlock]++;
-+
-+#if CMD_DMA
-+ p_BTableChangesDelta =
-+ (struct BTableChangesDelta *)g_pBTDelta_Free;
-+ g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
-+ p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
-+ p_BTableChangesDelta->WC_Index =
-+ BlkIdx - DeviceInfo.wSpectraStartBlock;
-+ p_BTableChangesDelta->WC_Entry_Value =
-+ g_pWearCounter[BlkIdx - DeviceInfo.wSpectraStartBlock];
-+ p_BTableChangesDelta->ValidFields = 0x30;
-+
-+ if (DeviceInfo.MLCDevice) {
-+ p_BTableChangesDelta =
-+ (struct BTableChangesDelta *)g_pBTDelta_Free;
-+ g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
-+ p_BTableChangesDelta->ftl_cmd_cnt =
-+ ftl_cmd_cnt;
-+ p_BTableChangesDelta->RC_Index =
-+ BlkIdx - DeviceInfo.wSpectraStartBlock;
-+ p_BTableChangesDelta->RC_Entry_Value =
-+ g_pReadCounter[BlkIdx -
-+ DeviceInfo.wSpectraStartBlock];
-+ p_BTableChangesDelta->ValidFields = 0xC0;
-+ }
-+
-+ ftl_cmd_cnt++;
-+#endif
-+
-+ if (g_pWearCounter[BlkIdx - DeviceInfo.wSpectraStartBlock] == 0xFE)
-+ FTL_Adjust_Relative_Erase_Count(BlkIdx);
-+
-+ return status;
-+}
-+
-+
-+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-+* Function: FTL_Adjust_Relative_Erase_Count
-+* Inputs: index to block that was just incremented and is at the max
-+* Outputs: PASS=0 / FAIL=1
-+* Description: If any erase counts at MAX, adjusts erase count of every
-+* block by substracting least worn
-+* counter from counter value of every entry in wear table
-+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-+static int FTL_Adjust_Relative_Erase_Count(u32 Index_of_MAX)
-+{
-+ u8 wLeastWornCounter = MAX_BYTE_VALUE;
-+ u8 wWearCounter;
-+ u32 i, wWearIndex;
-+ u32 *pbt = (u32 *)g_pBlockTable;
-+ int wResult = PASS;
-+
-+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
-+ if (IS_BAD_BLOCK(i))
-+ continue;
-+ wWearIndex = (u32)(pbt[i] & (~BAD_BLOCK));
-+
-+ if ((wWearIndex - DeviceInfo.wSpectraStartBlock) < 0)
-+ printk(KERN_ERR "FTL_Adjust_Relative_Erase_Count:"
-+ "This should never occur\n");
-+ wWearCounter = g_pWearCounter[wWearIndex -
-+ DeviceInfo.wSpectraStartBlock];
-+ if (wWearCounter < wLeastWornCounter)
-+ wLeastWornCounter = wWearCounter;
-+ }
-+
-+ if (wLeastWornCounter == 0) {
-+ nand_dbg_print(NAND_DBG_WARN,
-+ "Adjusting Wear Levelling Counters: Special Case\n");
-+ g_pWearCounter[Index_of_MAX -
-+ DeviceInfo.wSpectraStartBlock]--;
-+#if CMD_DMA
-+ p_BTableChangesDelta =
-+ (struct BTableChangesDelta *)g_pBTDelta_Free;
-+ g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
-+ p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
-+ p_BTableChangesDelta->WC_Index =
-+ Index_of_MAX - DeviceInfo.wSpectraStartBlock;
-+ p_BTableChangesDelta->WC_Entry_Value =
-+ g_pWearCounter[Index_of_MAX -
-+ DeviceInfo.wSpectraStartBlock];
-+ p_BTableChangesDelta->ValidFields = 0x30;
-+#endif
-+ FTL_Static_Wear_Leveling();
-+ } else {
-+ for (i = 0; i < DeviceInfo.wDataBlockNum; i++)
-+ if (!IS_BAD_BLOCK(i)) {
-+ wWearIndex = (u32)(pbt[i] & (~BAD_BLOCK));
-+ g_pWearCounter[wWearIndex -
-+ DeviceInfo.wSpectraStartBlock] =
-+ (u8)(g_pWearCounter
-+ [wWearIndex -
-+ DeviceInfo.wSpectraStartBlock] -
-+ wLeastWornCounter);
-+#if CMD_DMA
-+ p_BTableChangesDelta =
-+ (struct BTableChangesDelta *)g_pBTDelta_Free;
-+ g_pBTDelta_Free +=
-+ sizeof(struct BTableChangesDelta);
-+
-+ p_BTableChangesDelta->ftl_cmd_cnt =
-+ ftl_cmd_cnt;
-+ p_BTableChangesDelta->WC_Index = wWearIndex -
-+ DeviceInfo.wSpectraStartBlock;
-+ p_BTableChangesDelta->WC_Entry_Value =
-+ g_pWearCounter[wWearIndex -
-+ DeviceInfo.wSpectraStartBlock];
-+ p_BTableChangesDelta->ValidFields = 0x30;
-+#endif
-+ }
-+ }
-+
-+ return wResult;
-+}
-+
-+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-+* Function: FTL_Write_IN_Progress_Block_Table_Page
-+* Inputs: None
-+* Outputs: None
-+* Description: It writes in-progress flag page to the page next to
-+* block table
-+***********************************************************************/
-+static int FTL_Write_IN_Progress_Block_Table_Page(void)
-+{
-+ int wResult = PASS;
-+ u16 bt_pages;
-+ u16 dwIPFPageAddr;
-+#if CMD_DMA
-+#else
-+ u32 *pbt = (u32 *)g_pBlockTable;
-+ u32 wTempBlockTableIndex;
-+#endif
-+
-+ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ bt_pages = FTL_Get_Block_Table_Flash_Size_Pages();
-+
-+ dwIPFPageAddr = g_wBlockTableOffset + bt_pages;
-+
-+ nand_dbg_print(NAND_DBG_DEBUG, "Writing IPF at "
-+ "Block %d Page %d\n",
-+ g_wBlockTableIndex, dwIPFPageAddr);
-+
-+#if CMD_DMA
-+ wResult = GLOB_LLD_Write_Page_Main_Spare_cdma(g_pIPF,
-+ g_wBlockTableIndex, dwIPFPageAddr, 1,
-+ LLD_CMD_FLAG_MODE_CDMA | LLD_CMD_FLAG_ORDER_BEFORE_REST);
-+ if (wResult == FAIL) {
-+ nand_dbg_print(NAND_DBG_WARN,
-+ "NAND Program fail in %s, Line %d, "
-+ "Function: %s, new Bad Block %d generated!\n",
-+ __FILE__, __LINE__, __func__,
-+ g_wBlockTableIndex);
-+ }
-+ g_wBlockTableOffset = dwIPFPageAddr + 1;
-+ p_BTableChangesDelta = (struct BTableChangesDelta *)g_pBTDelta_Free;
-+ g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
-+ p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
-+ p_BTableChangesDelta->g_wBlockTableOffset = g_wBlockTableOffset;
-+ p_BTableChangesDelta->ValidFields = 0x01;
-+ ftl_cmd_cnt++;
-+#else
-+ wResult = GLOB_LLD_Write_Page_Main_Spare(g_pIPF,
-+ g_wBlockTableIndex, dwIPFPageAddr, 1);
-+ if (wResult == FAIL) {
-+ nand_dbg_print(NAND_DBG_WARN,
-+ "NAND Program fail in %s, Line %d, "
-+ "Function: %s, new Bad Block %d generated!\n",
-+ __FILE__, __LINE__, __func__,
-+ (int)g_wBlockTableIndex);
-+ MARK_BLOCK_AS_BAD(pbt[BLOCK_TABLE_INDEX]);
-+ wTempBlockTableIndex = FTL_Replace_Block_Table();
-+ bt_block_changed = 1;
-+ if (BAD_BLOCK == wTempBlockTableIndex)
-+ return ERR;
-+ g_wBlockTableIndex = wTempBlockTableIndex;
-+ g_wBlockTableOffset = 0;
-+ /* Block table tag is '00'. Means it's used one */
-+ pbt[BLOCK_TABLE_INDEX] = g_wBlockTableIndex;
-+ return FAIL;
-+ }
-+ g_wBlockTableOffset = dwIPFPageAddr + 1;
-+#endif
-+ return wResult;
-+}
-+
-+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-+* Function: FTL_Read_Disturbance
-+* Inputs: block address
-+* Outputs: PASS=0 / FAIL=1
-+* Description: used to handle read disturbance. Data in block that
-+* reaches its read limit is moved to new block
-+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-+int FTL_Read_Disturbance(u32 blk_addr)
-+{
-+ int wResult = FAIL;
-+ u32 *pbt = (u32 *) g_pBlockTable;
-+ u32 dwOldBlockAddr = blk_addr;
-+ u32 wBlockNum;
-+ u32 i;
-+ u32 wLeastReadCounter = 0xFFFF;
-+ u32 wLeastReadIndex = BAD_BLOCK;
-+ u32 wSpareBlockNum = 0;
-+ u32 wTempNode;
-+ u32 wReplacedNode;
-+ u8 *g_pTempBuf;
-+
-+ nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+#if CMD_DMA
-+ g_pTempBuf = cp_back_buf_copies[cp_back_buf_idx];
-+ cp_back_buf_idx++;
-+ if (cp_back_buf_idx > COPY_BACK_BUF_NUM) {
-+ printk(KERN_ERR "cp_back_buf_copies overflow! Exit."
-+ "Maybe too many pending commands in your CDMA chain.\n");
-+ return FAIL;
-+ }
-+#else
-+ g_pTempBuf = tmp_buf_read_disturbance;
-+#endif
-+
-+ wBlockNum = FTL_Get_Block_Index(blk_addr);
-+
-+ do {
-+ /* This is a bug.Here 'i' should be logical block number
-+ * and start from 1 (0 is reserved for block table).
-+ * Have fixed it. - Yunpeng 2008. 12. 19
-+ */
-+ for (i = 1; i < DeviceInfo.wDataBlockNum; i++) {
-+ if (IS_SPARE_BLOCK(i)) {
-+ u32 wPhysicalIndex =
-+ (u32)((~SPARE_BLOCK) & pbt[i]);
-+ if (g_pReadCounter[wPhysicalIndex -
-+ DeviceInfo.wSpectraStartBlock] <
-+ wLeastReadCounter) {
-+ wLeastReadCounter =
-+ g_pReadCounter[wPhysicalIndex -
-+ DeviceInfo.wSpectraStartBlock];
-+ wLeastReadIndex = i;
-+ }
-+ wSpareBlockNum++;
-+ }
-+ }
-+
-+ if (wSpareBlockNum <= NUM_FREE_BLOCKS_GATE) {
-+ wResult = GLOB_FTL_Garbage_Collection();
-+ if (PASS == wResult)
-+ continue;
-+ else
-+ break;
-+ } else {
-+ wTempNode = (u32)(DISCARD_BLOCK | pbt[wBlockNum]);
-+ wReplacedNode = (u32)((~SPARE_BLOCK) &
-+ pbt[wLeastReadIndex]);
-+#if CMD_DMA
-+ pbt[wBlockNum] = wReplacedNode;
-+ pbt[wLeastReadIndex] = wTempNode;
-+ p_BTableChangesDelta =
-+ (struct BTableChangesDelta *)g_pBTDelta_Free;
-+ g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
-+
-+ p_BTableChangesDelta->ftl_cmd_cnt =
-+ ftl_cmd_cnt;
-+ p_BTableChangesDelta->BT_Index = wBlockNum;
-+ p_BTableChangesDelta->BT_Entry_Value = pbt[wBlockNum];
-+ p_BTableChangesDelta->ValidFields = 0x0C;
-+
-+ p_BTableChangesDelta =
-+ (struct BTableChangesDelta *)g_pBTDelta_Free;
-+ g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
-+
-+ p_BTableChangesDelta->ftl_cmd_cnt =
-+ ftl_cmd_cnt;
-+ p_BTableChangesDelta->BT_Index = wLeastReadIndex;
-+ p_BTableChangesDelta->BT_Entry_Value =
-+ pbt[wLeastReadIndex];
-+ p_BTableChangesDelta->ValidFields = 0x0C;
-+
-+ wResult = GLOB_LLD_Read_Page_Main_cdma(g_pTempBuf,
-+ dwOldBlockAddr, 0, DeviceInfo.wPagesPerBlock,
-+ LLD_CMD_FLAG_MODE_CDMA);
-+ if (wResult == FAIL)
-+ return wResult;
-+
-+ ftl_cmd_cnt++;
-+
-+ if (wResult != FAIL) {
-+ if (FAIL == GLOB_LLD_Write_Page_Main_cdma(
-+ g_pTempBuf, pbt[wBlockNum], 0,
-+ DeviceInfo.wPagesPerBlock)) {
-+ nand_dbg_print(NAND_DBG_WARN,
-+ "NAND Program fail in "
-+ "%s, Line %d, Function: %s, "
-+ "new Bad Block %d "
-+ "generated!\n",
-+ __FILE__, __LINE__, __func__,
-+ (int)pbt[wBlockNum]);
-+ wResult = FAIL;
-+ MARK_BLOCK_AS_BAD(pbt[wBlockNum]);
-+ }
-+ ftl_cmd_cnt++;
-+ }
-+#else
-+ wResult = GLOB_LLD_Read_Page_Main(g_pTempBuf,
-+ dwOldBlockAddr, 0, DeviceInfo.wPagesPerBlock);
-+ if (wResult == FAIL)
-+ return wResult;
-+
-+ if (wResult != FAIL) {
-+ /* This is a bug. At this time, pbt[wBlockNum]
-+ is still the physical address of
-+ discard block, and should not be write.
-+ Have fixed it as below.
-+ -- Yunpeng 2008.12.19
-+ */
-+ wResult = GLOB_LLD_Write_Page_Main(g_pTempBuf,
-+ wReplacedNode, 0,
-+ DeviceInfo.wPagesPerBlock);
-+ if (wResult == FAIL) {
-+ nand_dbg_print(NAND_DBG_WARN,
-+ "NAND Program fail in "
-+ "%s, Line %d, Function: %s, "
-+ "new Bad Block %d "
-+ "generated!\n",
-+ __FILE__, __LINE__, __func__,
-+ (int)wReplacedNode);
-+ MARK_BLOCK_AS_BAD(wReplacedNode);
-+ } else {
-+ pbt[wBlockNum] = wReplacedNode;
-+ pbt[wLeastReadIndex] = wTempNode;
-+ }
-+ }
-+
-+ if ((wResult == PASS) && (g_cBlockTableStatus !=
-+ IN_PROGRESS_BLOCK_TABLE)) {
-+ g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
-+ FTL_Write_IN_Progress_Block_Table_Page();
-+ }
-+#endif
-+ }
-+ } while (wResult != PASS)
-+ ;
-+
-+#if CMD_DMA
-+ /* ... */
-+#endif
-+
-+ return wResult;
-+}
-+
-diff --git a/drivers/block/spectra/flash.h b/drivers/block/spectra/flash.h
-new file mode 100644
-index 0000000..5ed0580
---- /dev/null
-+++ b/drivers/block/spectra/flash.h
-@@ -0,0 +1,198 @@
-+/*
-+ * NAND Flash Controller Device Driver
-+ * Copyright (c) 2009, Intel Corporation and its suppliers.
-+ *
-+ * This program is free software; you can redistribute it and/or modify it
-+ * under the terms and conditions of the GNU General Public License,
-+ * version 2, as published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope it will be useful, but WITHOUT
-+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-+ * more details.
-+ *
-+ * You should have received a copy of the GNU General Public License along with
-+ * this program; if not, write to the Free Software Foundation, Inc.,
-+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-+ *
-+ */
-+
-+#ifndef _FLASH_INTERFACE_
-+#define _FLASH_INTERFACE_
-+
-+#include "ffsport.h"
-+#include "spectraswconfig.h"
-+
-+#define MAX_BYTE_VALUE 0xFF
-+#define MAX_WORD_VALUE 0xFFFF
-+#define MAX_U32_VALUE 0xFFFFFFFF
-+
-+#define MAX_BLOCKNODE_VALUE 0xFFFFFF
-+#define DISCARD_BLOCK 0x800000
-+#define SPARE_BLOCK 0x400000
-+#define BAD_BLOCK 0xC00000
-+
-+#define UNHIT_CACHE_ITEM 0xFFFF
-+
-+#define NAND_CACHE_INIT_ADDR 0xffffffffffffffffULL
-+
-+#define IN_PROGRESS_BLOCK_TABLE 0x00
-+#define CURRENT_BLOCK_TABLE 0x01
-+
-+#define BTSIG_OFFSET (0)
-+#define BTSIG_BYTES (5)
-+#define BTSIG_DELTA (3)
-+
-+#define MAX_READ_COUNTER 0x2710
-+
-+#define FIRST_BT_ID (1)
-+#define LAST_BT_ID (254)
-+#define BTBLOCK_INVAL (u32)(0xFFFFFFFF)
-+
-+struct device_info_tag {
-+ u16 wDeviceMaker;
-+ u16 wDeviceID;
-+ u32 wDeviceType;
-+ u32 wSpectraStartBlock;
-+ u32 wSpectraEndBlock;
-+ u32 wTotalBlocks;
-+ u16 wPagesPerBlock;
-+ u16 wPageSize;
-+ u16 wPageDataSize;
-+ u16 wPageSpareSize;
-+ u16 wNumPageSpareFlag;
-+ u16 wECCBytesPerSector;
-+ u32 wBlockSize;
-+ u32 wBlockDataSize;
-+ u32 wDataBlockNum;
-+ u8 bPlaneNum;
-+ u16 wDeviceMainAreaSize;
-+ u16 wDeviceSpareAreaSize;
-+ u16 wDevicesConnected;
-+ u16 wDeviceWidth;
-+ u16 wHWRevision;
-+ u16 wHWFeatures;
-+
-+ u16 wONFIDevFeatures;
-+ u16 wONFIOptCommands;
-+ u16 wONFITimingMode;
-+ u16 wONFIPgmCacheTimingMode;
-+
-+ u16 MLCDevice;
-+ u16 wSpareSkipBytes;
-+
-+ u8 nBitsInPageNumber;
-+ u8 nBitsInPageDataSize;
-+ u8 nBitsInBlockDataSize;
-+};
-+
-+extern struct device_info_tag DeviceInfo;
-+
-+/* Cache item format */
-+struct flash_cache_item_tag {
-+ u64 address;
-+ u16 use_cnt;
-+ u16 changed;
-+ u8 *buf;
-+};
-+
-+struct flash_cache_tag {
-+ u32 cache_item_size; /* Size in bytes of each cache item */
-+ u16 pages_per_item; /* How many NAND pages in each cache item */
-+ u16 LRU; /* No. of the least recently used cache item */
-+ struct flash_cache_item_tag array[CACHE_ITEM_NUM];
-+};
-+
-+/*
-+ *Data structure for each list node of the managment table
-+ * used for the Level 2 Cache. Each node maps one logical NAND block.
-+ */
-+struct spectra_l2_cache_list {
-+ struct list_head list;
-+ u32 logical_blk_num; /* Logical block number */
-+ u32 pages_array[]; /* Page map array of this logical block.
-+ * Array index is the logical block number,
-+ * and for every item of this arry:
-+ * high 16 bit is index of the L2 cache block num,
-+ * low 16 bit is the phy page num
-+ * of the above L2 cache block.
-+ * This array will be kmalloc during run time.
-+ */
-+};
-+
-+struct spectra_l2_cache_info {
-+ u32 blk_array[BLK_NUM_FOR_L2_CACHE];
-+ u16 cur_blk_idx; /* idx to the phy block number of current using */
-+ u16 cur_page_num; /* pages number of current using */
-+ struct spectra_l2_cache_list table; /* First node of the table */
-+};
-+
-+#define RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE 1
-+
-+#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
-+struct flash_cache_mod_item_tag {
-+ u64 address;
-+ u8 changed;
-+};
-+
-+struct flash_cache_delta_list_tag {
-+ u8 item; /* used cache item */
-+ struct flash_cache_mod_item_tag cache;
-+};
-+#endif
-+
-+extern struct flash_cache_tag Cache;
-+
-+extern u8 *buf_read_page_main_spare;
-+extern u8 *buf_write_page_main_spare;
-+extern u8 *buf_read_page_spare;
-+extern u8 *buf_get_bad_block;
-+extern u8 *cdma_desc_buf;
-+extern u8 *memcp_desc_buf;
-+
-+/* struture used for IndentfyDevice function */
-+struct spectra_indentfy_dev_tag {
-+ u32 NumBlocks;
-+ u16 PagesPerBlock;
-+ u16 PageDataSize;
-+ u16 wECCBytesPerSector;
-+ u32 wDataBlockNum;
-+};
-+
-+int GLOB_FTL_Flash_Init(void);
-+int GLOB_FTL_Flash_Release(void);
-+/*void GLOB_FTL_Erase_Flash(void);*/
-+int GLOB_FTL_Block_Erase(u64 block_addr);
-+int GLOB_FTL_Is_BadBlock(u32 block_num);
-+int GLOB_FTL_IdentifyDevice(struct spectra_indentfy_dev_tag *dev_data);
-+int GLOB_FTL_Event_Status(int *);
-+u16 glob_ftl_execute_cmds(void);
-+
-+/*int FTL_Read_Disturbance(ADDRESSTYPE dwBlockAddr);*/
-+int FTL_Read_Disturbance(u32 dwBlockAddr);
-+
-+/*Flash r/w based on cache*/
-+int GLOB_FTL_Page_Read(u8 *read_data, u64 page_addr);
-+int GLOB_FTL_Page_Write(u8 *write_data, u64 page_addr);
-+int GLOB_FTL_Wear_Leveling(void);
-+int GLOB_FTL_Flash_Format(void);
-+int GLOB_FTL_Init(void);
-+int GLOB_FTL_Flush_Cache(void);
-+int GLOB_FTL_Garbage_Collection(void);
-+int GLOB_FTL_BT_Garbage_Collection(void);
-+void GLOB_FTL_Cache_Release(void);
-+u8 *get_blk_table_start_addr(void);
-+u8 *get_wear_leveling_table_start_addr(void);
-+unsigned long get_blk_table_len(void);
-+unsigned long get_wear_leveling_table_len(void);
-+
-+#if DEBUG_BNDRY
-+void debug_boundary_lineno_error(int chnl, int limit, int no, int lineno,
-+ char *filename);
-+#define debug_boundary_error(chnl, limit, no) debug_boundary_lineno_error(chnl,\
-+ limit, no, __LINE__, __FILE__)
-+#else
-+#define debug_boundary_error(chnl, limit, no) ;
-+#endif
-+
-+#endif /*_FLASH_INTERFACE_*/
-diff --git a/drivers/block/spectra/lld.c b/drivers/block/spectra/lld.c
-new file mode 100644
-index 0000000..3f411af
---- /dev/null
-+++ b/drivers/block/spectra/lld.c
-@@ -0,0 +1,258 @@
-+/*
-+ * NAND Flash Controller Device Driver
-+ * Copyright (c) 2009, Intel Corporation and its suppliers.
-+ *
-+ * This program is free software; you can redistribute it and/or modify it
-+ * under the terms and conditions of the GNU General Public License,
-+ * version 2, as published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope it will be useful, but WITHOUT
-+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-+ * more details.
-+ *
-+ * You should have received a copy of the GNU General Public License along with
-+ * this program; if not, write to the Free Software Foundation, Inc.,
-+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-+ *
-+ */
-+
-+#include "spectraswconfig.h"
-+#include "ffsport.h"
-+#include "ffsdefs.h"
-+#include "lld.h"
-+#include "lld_nand.h"
-+
-+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-+#if FLASH_EMU /* vector all the LLD calls to the LLD_EMU code */
-+#include "lld_emu.h"
-+#include "lld_cdma.h"
-+
-+/* common functions: */
-+u16 GLOB_LLD_Flash_Reset(void)
-+{
-+ return emu_Flash_Reset();
-+}
-+
-+u16 GLOB_LLD_Read_Device_ID(void)
-+{
-+ return emu_Read_Device_ID();
-+}
-+
-+int GLOB_LLD_Flash_Release(void)
-+{
-+ return emu_Flash_Release();
-+}
-+
-+u16 GLOB_LLD_Flash_Init(void)
-+{
-+ return emu_Flash_Init();
-+}
-+
-+u16 GLOB_LLD_Erase_Block(u32 block_add)
-+{
-+ return emu_Erase_Block(block_add);
-+}
-+
-+u16 GLOB_LLD_Write_Page_Main(u8 *write_data, u32 block, u16 Page,
-+ u16 PageCount)
-+{
-+ return emu_Write_Page_Main(write_data, block, Page, PageCount);
-+}
-+
-+u16 GLOB_LLD_Read_Page_Main(u8 *read_data, u32 block, u16 Page,
-+ u16 PageCount)
-+{
-+ return emu_Read_Page_Main(read_data, block, Page, PageCount);
-+}
-+
-+u16 GLOB_LLD_Read_Page_Main_Polling(u8 *read_data,
-+ u32 block, u16 page, u16 page_count)
-+{
-+ return emu_Read_Page_Main(read_data, block, page, page_count);
-+}
-+
-+u16 GLOB_LLD_Write_Page_Main_Spare(u8 *write_data, u32 block,
-+ u16 Page, u16 PageCount)
-+{
-+ return emu_Write_Page_Main_Spare(write_data, block, Page, PageCount);
-+}
-+
-+u16 GLOB_LLD_Read_Page_Main_Spare(u8 *read_data, u32 block,
-+ u16 Page, u16 PageCount)
-+{
-+ return emu_Read_Page_Main_Spare(read_data, block, Page, PageCount);
-+}
-+
-+u16 GLOB_LLD_Write_Page_Spare(u8 *write_data, u32 block, u16 Page,
-+ u16 PageCount)
-+{
-+ return emu_Write_Page_Spare(write_data, block, Page, PageCount);
-+}
-+
-+u16 GLOB_LLD_Read_Page_Spare(u8 *read_data, u32 block, u16 Page,
-+ u16 PageCount)
-+{
-+ return emu_Read_Page_Spare(read_data, block, Page, PageCount);
-+}
-+
-+u16 GLOB_LLD_Get_Bad_Block(u32 block)
-+{
-+ return emu_Get_Bad_Block(block);
-+}
-+
-+#endif /* FLASH_EMU */
-+
-+
-+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-+#if FLASH_NAND /* vector all the LLD calls to the NAND controller code */
-+#include "lld_nand.h"
-+#include "lld_cdma.h"
-+#include "flash.h"
-+
-+/* common functions for LLD_NAND */
-+void GLOB_LLD_ECC_Control(int enable)
-+{
-+ NAND_ECC_Ctrl(enable);
-+}
-+
-+/* common functions for LLD_NAND */
-+u16 GLOB_LLD_Flash_Reset(void)
-+{
-+ return NAND_Flash_Reset();
-+}
-+
-+u16 GLOB_LLD_Read_Device_ID(void)
-+{
-+ return NAND_Read_Device_ID();
-+}
-+
-+u16 GLOB_LLD_UnlockArrayAll(void)
-+{
-+ return NAND_UnlockArrayAll();
-+}
-+
-+u16 GLOB_LLD_Flash_Init(void)
-+{
-+ return NAND_Flash_Init();
-+}
-+
-+int GLOB_LLD_Flash_Release(void)
-+{
-+ return nand_release();
-+}
-+
-+u16 GLOB_LLD_Erase_Block(u32 block_add)
-+{
-+ return NAND_Erase_Block(block_add);
-+}
-+
-+
-+u16 GLOB_LLD_Write_Page_Main(u8 *write_data, u32 block, u16 Page,
-+ u16 PageCount)
-+{
-+ return NAND_Write_Page_Main(write_data, block, Page, PageCount);
-+}
-+
-+u16 GLOB_LLD_Read_Page_Main(u8 *read_data, u32 block, u16 page,
-+ u16 page_count)
-+{
-+ if (page_count == 1) /* Using polling to improve read speed */
-+ return NAND_Read_Page_Main_Polling(read_data, block, page, 1);
-+ else
-+ return NAND_Read_Page_Main(read_data, block, page, page_count);
-+}
-+
-+u16 GLOB_LLD_Read_Page_Main_Polling(u8 *read_data,
-+ u32 block, u16 page, u16 page_count)
-+{
-+ return NAND_Read_Page_Main_Polling(read_data,
-+ block, page, page_count);
-+}
-+
-+u16 GLOB_LLD_Write_Page_Main_Spare(u8 *write_data, u32 block,
-+ u16 Page, u16 PageCount)
-+{
-+ return NAND_Write_Page_Main_Spare(write_data, block, Page, PageCount);
-+}
-+
-+u16 GLOB_LLD_Write_Page_Spare(u8 *write_data, u32 block, u16 Page,
-+ u16 PageCount)
-+{
-+ return NAND_Write_Page_Spare(write_data, block, Page, PageCount);
-+}
-+
-+u16 GLOB_LLD_Read_Page_Main_Spare(u8 *read_data, u32 block,
-+ u16 page, u16 page_count)
-+{
-+ return NAND_Read_Page_Main_Spare(read_data, block, page, page_count);
-+}
-+
-+u16 GLOB_LLD_Read_Page_Spare(u8 *read_data, u32 block, u16 Page,
-+ u16 PageCount)
-+{
-+ return NAND_Read_Page_Spare(read_data, block, Page, PageCount);
-+}
-+
-+u16 GLOB_LLD_Get_Bad_Block(u32 block)
-+{
-+ return NAND_Get_Bad_Block(block);
-+}
-+
-+u16 GLOB_LLD_Event_Status(void)
-+{
-+ return CDMA_Event_Status();
-+}
-+
-+u16 glob_lld_execute_cmds(void)
-+{
-+ return CDMA_Execute_CMDs();
-+}
-+
-+u16 GLOB_LLD_MemCopy_CMD(u8 *dest, u8 *src,
-+ u32 ByteCount, u16 flag)
-+{
-+ /* Replace the hardware memcopy with software memcpy function */
-+ if (CDMA_Execute_CMDs())
-+ return FAIL;
-+ memcpy(dest, src, ByteCount);
-+ return PASS;
-+
-+ /* return CDMA_MemCopy_CMD(dest, src, ByteCount, flag); */
-+}
-+
-+u16 GLOB_LLD_Erase_Block_cdma(u32 block, u16 flags)
-+{
-+ return CDMA_Data_CMD(ERASE_CMD, 0, block, 0, 0, flags);
-+}
-+
-+u16 GLOB_LLD_Write_Page_Main_cdma(u8 *data, u32 block, u16 page, u16 count)
-+{
-+ return CDMA_Data_CMD(WRITE_MAIN_CMD, data, block, page, count, 0);
-+}
-+
-+u16 GLOB_LLD_Read_Page_Main_cdma(u8 *data, u32 block, u16 page,
-+ u16 count, u16 flags)
-+{
-+ return CDMA_Data_CMD(READ_MAIN_CMD, data, block, page, count, flags);
-+}
-+
-+u16 GLOB_LLD_Write_Page_Main_Spare_cdma(u8 *data, u32 block, u16 page,
-+ u16 count, u16 flags)
-+{
-+ return CDMA_Data_CMD(WRITE_MAIN_SPARE_CMD,
-+ data, block, page, count, flags);
-+}
-+
-+u16 GLOB_LLD_Read_Page_Main_Spare_cdma(u8 *data,
-+ u32 block, u16 page, u16 count)
-+{
-+ return CDMA_Data_CMD(READ_MAIN_SPARE_CMD, data, block, page, count,
-+ LLD_CMD_FLAG_MODE_CDMA);
-+}
-+
-+#endif /* FLASH_NAND */
-+
-+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-+
-+/* end of LLD.c */
-diff --git a/drivers/block/spectra/lld.h b/drivers/block/spectra/lld.h
-new file mode 100644
-index 0000000..d3738e0
---- /dev/null
-+++ b/drivers/block/spectra/lld.h
-@@ -0,0 +1,111 @@
-+/*
-+ * NAND Flash Controller Device Driver
-+ * Copyright (c) 2009, Intel Corporation and its suppliers.
-+ *
-+ * This program is free software; you can redistribute it and/or modify it
-+ * under the terms and conditions of the GNU General Public License,
-+ * version 2, as published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope it will be useful, but WITHOUT
-+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-+ * more details.
-+ *
-+ * You should have received a copy of the GNU General Public License along with
-+ * this program; if not, write to the Free Software Foundation, Inc.,
-+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-+ *
-+ */
-+
-+
-+
-+#ifndef _LLD_
-+#define _LLD_
-+
-+#include "ffsport.h"
-+#include "spectraswconfig.h"
-+#include "flash.h"
-+
-+#define GOOD_BLOCK 0
-+#define DEFECTIVE_BLOCK 1
-+#define READ_ERROR 2
-+
-+#define CLK_X 5
-+#define CLK_MULTI 4
-+
-+/* Typedefs */
-+
-+/* prototypes: API for LLD */
-+/* Currently, Write_Page_Main
-+ * MemCopy
-+ * Read_Page_Main_Spare
-+ * do not have flag because they were not implemented prior to this
-+ * They are not being added to keep changes to a minimum for now.
-+ * Currently, they are not required (only reqd for Wr_P_M_S.)
-+ * Later on, these NEED to be changed.
-+ */
-+
-+extern void GLOB_LLD_ECC_Control(int enable);
-+
-+extern u16 GLOB_LLD_Flash_Reset(void);
-+
-+extern u16 GLOB_LLD_Read_Device_ID(void);
-+
-+extern u16 GLOB_LLD_UnlockArrayAll(void);
-+
-+extern u16 GLOB_LLD_Flash_Init(void);
-+
-+extern int GLOB_LLD_Flash_Release(void);
-+
-+extern u16 GLOB_LLD_Erase_Block(u32 block_add);
-+
-+extern u16 GLOB_LLD_Write_Page_Main(u8 *write_data,
-+ u32 block, u16 Page, u16 PageCount);
-+
-+extern u16 GLOB_LLD_Read_Page_Main(u8 *read_data,
-+ u32 block, u16 page, u16 page_count);
-+
-+extern u16 GLOB_LLD_Read_Page_Main_Polling(u8 *read_data,
-+ u32 block, u16 page, u16 page_count);
-+
-+extern u16 GLOB_LLD_Write_Page_Main_Spare(u8 *write_data,
-+ u32 block, u16 Page, u16 PageCount);
-+
-+extern u16 GLOB_LLD_Write_Page_Spare(u8 *write_data,
-+ u32 block, u16 Page, u16 PageCount);
-+
-+extern u16 GLOB_LLD_Read_Page_Main_Spare(u8 *read_data,
-+ u32 block, u16 page, u16 page_count);
-+
-+extern u16 GLOB_LLD_Read_Page_Spare(u8 *read_data,
-+ u32 block, u16 Page, u16 PageCount);
-+
-+extern u16 GLOB_LLD_Get_Bad_Block(u32 block);
-+
-+extern u16 GLOB_LLD_Event_Status(void);
-+
-+extern u16 GLOB_LLD_MemCopy_CMD(u8 *dest, u8 *src, u32 ByteCount, u16 flag);
-+
-+extern u16 glob_lld_execute_cmds(void);
-+
-+extern u16 GLOB_LLD_Erase_Block_cdma(u32 block, u16 flags);
-+
-+extern u16 GLOB_LLD_Write_Page_Main_cdma(u8 *data,
-+ u32 block, u16 page, u16 count);
-+
-+extern u16 GLOB_LLD_Read_Page_Main_cdma(u8 *data,
-+ u32 block, u16 page, u16 count, u16 flags);
-+
-+extern u16 GLOB_LLD_Write_Page_Main_Spare_cdma(u8 *data,
-+ u32 block, u16 page, u16 count, u16 flags);
-+
-+extern u16 GLOB_LLD_Read_Page_Main_Spare_cdma(u8 *data,
-+ u32 block, u16 page, u16 count);
-+
-+#define LLD_CMD_FLAG_ORDER_BEFORE_REST (0x1)
-+#define LLD_CMD_FLAG_MODE_CDMA (0x8)
-+
-+
-+#endif /*_LLD_ */
-+
-+
-diff --git a/drivers/block/spectra/lld_cdma.c b/drivers/block/spectra/lld_cdma.c
-new file mode 100644
-index 0000000..c6e7610
---- /dev/null
-+++ b/drivers/block/spectra/lld_cdma.c
-@@ -0,0 +1,910 @@
-+/*
-+ * NAND Flash Controller Device Driver
-+ * Copyright (c) 2009, Intel Corporation and its suppliers.
-+ *
-+ * This program is free software; you can redistribute it and/or modify it
-+ * under the terms and conditions of the GNU General Public License,
-+ * version 2, as published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope it will be useful, but WITHOUT
-+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-+ * more details.
-+ *
-+ * You should have received a copy of the GNU General Public License along with
-+ * this program; if not, write to the Free Software Foundation, Inc.,
-+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-+ *
-+ */
-+
-+#include <linux/fs.h>
-+#include <linux/slab.h>
-+
-+#include "spectraswconfig.h"
-+#include "lld.h"
-+#include "lld_nand.h"
-+#include "lld_cdma.h"
-+#include "lld_emu.h"
-+#include "flash.h"
-+#include "nand_regs.h"
-+
-+#define MAX_PENDING_CMDS 4
-+#define MODE_02 (0x2 << 26)
-+
-+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-+* Function: CDMA_Data_Cmd
-+* Inputs: cmd code (aligned for hw)
-+* data: pointer to source or destination
-+* block: block address
-+* page: page address
-+* num: num pages to transfer
-+* Outputs: PASS
-+* Description: This function takes the parameters and puts them
-+* into the "pending commands" array.
-+* It does not parse or validate the parameters.
-+* The array index is same as the tag.
-+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-+u16 CDMA_Data_CMD(u8 cmd, u8 *data, u32 block, u16 page, u16 num, u16 flags)
-+{
-+ u8 bank;
-+
-+ nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ if (0 == cmd)
-+ nand_dbg_print(NAND_DBG_DEBUG,
-+ "%s, Line %d, Illegal cmd (0)\n", __FILE__, __LINE__);
-+
-+ /* If a command of another bank comes, then first execute */
-+ /* pending commands of the current bank, then set the new */
-+ /* bank as current bank */
-+ bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
-+ if (bank != info.flash_bank) {
-+ nand_dbg_print(NAND_DBG_WARN,
-+ "Will access new bank. old bank: %d, new bank: %d\n",
-+ info.flash_bank, bank);
-+ if (CDMA_Execute_CMDs()) {
-+ printk(KERN_ERR "CDMA_Execute_CMDs fail!\n");
-+ return FAIL;
-+ }
-+ info.flash_bank = bank;
-+ }
-+
-+ info.pcmds[info.pcmds_num].CMD = cmd;
-+ info.pcmds[info.pcmds_num].DataAddr = data;
-+ info.pcmds[info.pcmds_num].Block = block;
-+ info.pcmds[info.pcmds_num].Page = page;
-+ info.pcmds[info.pcmds_num].PageCount = num;
-+ info.pcmds[info.pcmds_num].DataDestAddr = 0;
-+ info.pcmds[info.pcmds_num].DataSrcAddr = 0;
-+ info.pcmds[info.pcmds_num].MemCopyByteCnt = 0;
-+ info.pcmds[info.pcmds_num].Flags = flags;
-+ info.pcmds[info.pcmds_num].Status = 0xB0B;
-+
-+ switch (cmd) {
-+ case WRITE_MAIN_SPARE_CMD:
-+ Conv_Main_Spare_Data_Log2Phy_Format(data, num);
-+ break;
-+ case WRITE_SPARE_CMD:
-+ Conv_Spare_Data_Log2Phy_Format(data);
-+ break;
-+ default:
-+ break;
-+ }
-+
-+ info.pcmds_num++;
-+
-+ if (info.pcmds_num >= MAX_PENDING_CMDS) {
-+ if (CDMA_Execute_CMDs()) {
-+ printk(KERN_ERR "CDMA_Execute_CMDs fail!\n");
-+ return FAIL;
-+ }
-+ }
-+
-+ return PASS;
-+}
-+
-+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-+* Function: CDMA_MemCopy_CMD
-+* Inputs: dest: pointer to destination
-+* src: pointer to source
-+* count: num bytes to transfer
-+* Outputs: PASS
-+* Description: This function takes the parameters and puts them
-+* into the "pending commands" array.
-+* It does not parse or validate the parameters.
-+* The array index is same as the tag.
-+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-+u16 CDMA_MemCopy_CMD(u8 *dest, u8 *src, u32 byte_cnt, u16 flags)
-+{
-+ nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ info.pcmds[info.pcmds_num].CMD = MEMCOPY_CMD;
-+ info.pcmds[info.pcmds_num].DataAddr = 0;
-+ info.pcmds[info.pcmds_num].Block = 0;
-+ info.pcmds[info.pcmds_num].Page = 0;
-+ info.pcmds[info.pcmds_num].PageCount = 0;
-+ info.pcmds[info.pcmds_num].DataDestAddr = dest;
-+ info.pcmds[info.pcmds_num].DataSrcAddr = src;
-+ info.pcmds[info.pcmds_num].MemCopyByteCnt = byte_cnt;
-+ info.pcmds[info.pcmds_num].Flags = flags;
-+ info.pcmds[info.pcmds_num].Status = 0xB0B;
-+
-+ info.pcmds_num++;
-+
-+ if (info.pcmds_num >= MAX_PENDING_CMDS) {
-+ if (CDMA_Execute_CMDs()) {
-+ printk(KERN_ERR "CDMA_Execute_CMDs fail!\n");
-+ return FAIL;
-+ }
-+ }
-+
-+ return PASS;
-+}
-+
-+#if 0
-+/* Prints the PendingCMDs array */
-+void print_pending_cmds(void)
-+{
-+ u16 i;
-+
-+ nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ for (i = 0; i < info.pcmds_num; i++) {
-+ nand_dbg_print(NAND_DBG_DEBUG, "\ni: %d\n", i);
-+ switch (info.pcmds[i].CMD) {
-+ case ERASE_CMD:
-+ nand_dbg_print(NAND_DBG_DEBUG,
-+ "Erase Command (0x%x)\n",
-+ info.pcmds[i].CMD);
-+ break;
-+ case WRITE_MAIN_CMD:
-+ nand_dbg_print(NAND_DBG_DEBUG,
-+ "Write Main Command (0x%x)\n",
-+ info.pcmds[i].CMD);
-+ break;
-+ case WRITE_MAIN_SPARE_CMD:
-+ nand_dbg_print(NAND_DBG_DEBUG,
-+ "Write Main Spare Command (0x%x)\n",
-+ info.pcmds[i].CMD);
-+ break;
-+ case READ_MAIN_SPARE_CMD:
-+ nand_dbg_print(NAND_DBG_DEBUG,
-+ "Read Main Spare Command (0x%x)\n",
-+ info.pcmds[i].CMD);
-+ break;
-+ case READ_MAIN_CMD:
-+ nand_dbg_print(NAND_DBG_DEBUG,
-+ "Read Main Command (0x%x)\n",
-+ info.pcmds[i].CMD);
-+ break;
-+ case MEMCOPY_CMD:
-+ nand_dbg_print(NAND_DBG_DEBUG,
-+ "Memcopy Command (0x%x)\n",
-+ info.pcmds[i].CMD);
-+ break;
-+ case DUMMY_CMD:
-+ nand_dbg_print(NAND_DBG_DEBUG,
-+ "Dummy Command (0x%x)\n",
-+ info.pcmds[i].CMD);
-+ break;
-+ default:
-+ nand_dbg_print(NAND_DBG_DEBUG,
-+ "Illegal Command (0x%x)\n",
-+ info.pcmds[i].CMD);
-+ break;
-+ }
-+
-+ nand_dbg_print(NAND_DBG_DEBUG, "DataAddr: 0x%x\n",
-+ (u32)info.pcmds[i].DataAddr);
-+ nand_dbg_print(NAND_DBG_DEBUG, "Block: %d\n",
-+ info.pcmds[i].Block);
-+ nand_dbg_print(NAND_DBG_DEBUG, "Page: %d\n",
-+ info.pcmds[i].Page);
-+ nand_dbg_print(NAND_DBG_DEBUG, "PageCount: %d\n",
-+ info.pcmds[i].PageCount);
-+ nand_dbg_print(NAND_DBG_DEBUG, "DataDestAddr: 0x%x\n",
-+ (u32)info.pcmds[i].DataDestAddr);
-+ nand_dbg_print(NAND_DBG_DEBUG, "DataSrcAddr: 0x%x\n",
-+ (u32)info.pcmds[i].DataSrcAddr);
-+ nand_dbg_print(NAND_DBG_DEBUG, "MemCopyByteCnt: %d\n",
-+ info.pcmds[i].MemCopyByteCnt);
-+ nand_dbg_print(NAND_DBG_DEBUG, "Flags: 0x%x\n",
-+ info.pcmds[i].Flags);
-+ nand_dbg_print(NAND_DBG_DEBUG, "Status: 0x%x\n",
-+ info.pcmds[i].Status);
-+ }
-+}
-+
-+/* Print the CDMA descriptors */
-+void print_cdma_descriptors(void)
-+{
-+ struct cdma_descriptor *pc;
-+ int i;
-+
-+ pc = (struct cdma_descriptor *)info.cdma_desc_buf;
-+
-+ nand_dbg_print(NAND_DBG_DEBUG, "\nWill dump cdma descriptors:\n");
-+
-+ for (i = 0; i < info.cdma_num; i++) {
-+ nand_dbg_print(NAND_DBG_DEBUG, "\ni: %d\n", i);
-+ nand_dbg_print(NAND_DBG_DEBUG,
-+ "NxtPointerHi: 0x%x, NxtPointerLo: 0x%x\n",
-+ pc[i].NxtPointerHi, pc[i].NxtPointerLo);
-+ nand_dbg_print(NAND_DBG_DEBUG,
-+ "FlashPointerHi: 0x%x, FlashPointerLo: 0x%x\n",
-+ pc[i].FlashPointerHi, pc[i].FlashPointerLo);
-+ nand_dbg_print(NAND_DBG_DEBUG, "CommandType: 0x%x\n",
-+ pc[i].CommandType);
-+ nand_dbg_print(NAND_DBG_DEBUG,
-+ "MemAddrHi: 0x%x, MemAddrLo: 0x%x\n",
-+ pc[i].MemAddrHi, pc[i].MemAddrLo);
-+ nand_dbg_print(NAND_DBG_DEBUG, "CommandFlags: 0x%x\n",
-+ pc[i].CommandFlags);
-+ nand_dbg_print(NAND_DBG_DEBUG, "Channel: %d, Status: 0x%x\n",
-+ pc[i].Channel, pc[i].Status);
-+ nand_dbg_print(NAND_DBG_DEBUG,
-+ "MemCopyPointerHi: 0x%x, MemCopyPointerLo: 0x%x\n",
-+ pc[i].MemCopyPointerHi, pc[i].MemCopyPointerLo);
-+ nand_dbg_print(NAND_DBG_DEBUG,
-+ "Reserved12: 0x%x, Reserved13: 0x%x, "
-+ "Reserved14: 0x%x, pcmd: %d\n",
-+ pc[i].Reserved12, pc[i].Reserved13,
-+ pc[i].Reserved14, pc[i].pcmd);
-+ }
-+}
-+
-+/* Print the Memory copy descriptors */
-+static void print_memcp_descriptors(void)
-+{
-+ struct memcpy_descriptor *pm;
-+ int i;
-+
-+ pm = (struct memcpy_descriptor *)info.memcp_desc_buf;
-+
-+ nand_dbg_print(NAND_DBG_DEBUG, "\nWill dump mem_cpy descriptors:\n");
-+
-+ for (i = 0; i < info.cdma_num; i++) {
-+ nand_dbg_print(NAND_DBG_DEBUG, "\ni: %d\n", i);
-+ nand_dbg_print(NAND_DBG_DEBUG,
-+ "NxtPointerHi: 0x%x, NxtPointerLo: 0x%x\n",
-+ pm[i].NxtPointerHi, pm[i].NxtPointerLo);
-+ nand_dbg_print(NAND_DBG_DEBUG,
-+ "SrcAddrHi: 0x%x, SrcAddrLo: 0x%x\n",
-+ pm[i].SrcAddrHi, pm[i].SrcAddrLo);
-+ nand_dbg_print(NAND_DBG_DEBUG,
-+ "DestAddrHi: 0x%x, DestAddrLo: 0x%x\n",
-+ pm[i].DestAddrHi, pm[i].DestAddrLo);
-+ nand_dbg_print(NAND_DBG_DEBUG, "XferSize: %d\n",
-+ pm[i].XferSize);
-+ nand_dbg_print(NAND_DBG_DEBUG, "MemCopyFlags: 0x%x\n",
-+ pm[i].MemCopyFlags);
-+ nand_dbg_print(NAND_DBG_DEBUG, "MemCopyStatus: %d\n",
-+ pm[i].MemCopyStatus);
-+ nand_dbg_print(NAND_DBG_DEBUG, "reserved9: 0x%x\n",
-+ pm[i].reserved9);
-+ nand_dbg_print(NAND_DBG_DEBUG, "reserved10: 0x%x\n",
-+ pm[i].reserved10);
-+ nand_dbg_print(NAND_DBG_DEBUG, "reserved11: 0x%x\n",
-+ pm[i].reserved11);
-+ nand_dbg_print(NAND_DBG_DEBUG, "reserved12: 0x%x\n",
-+ pm[i].reserved12);
-+ nand_dbg_print(NAND_DBG_DEBUG, "reserved13: 0x%x\n",
-+ pm[i].reserved13);
-+ nand_dbg_print(NAND_DBG_DEBUG, "reserved14: 0x%x\n",
-+ pm[i].reserved14);
-+ nand_dbg_print(NAND_DBG_DEBUG, "reserved15: 0x%x\n",
-+ pm[i].reserved15);
-+ }
-+}
-+#endif
-+
-+/* Reset cdma_descriptor chain to 0 */
-+static void reset_cdma_desc(int i)
-+{
-+ struct cdma_descriptor *ptr;
-+
-+ BUG_ON(i >= MAX_DESCS);
-+
-+ ptr = (struct cdma_descriptor *)info.cdma_desc_buf;
-+
-+ ptr[i].NxtPointerHi = 0;
-+ ptr[i].NxtPointerLo = 0;
-+ ptr[i].FlashPointerHi = 0;
-+ ptr[i].FlashPointerLo = 0;
-+ ptr[i].CommandType = 0;
-+ ptr[i].MemAddrHi = 0;
-+ ptr[i].MemAddrLo = 0;
-+ ptr[i].CommandFlags = 0;
-+ ptr[i].Channel = 0;
-+ ptr[i].Status = 0;
-+ ptr[i].MemCopyPointerHi = 0;
-+ ptr[i].MemCopyPointerLo = 0;
-+}
-+
-+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-+* Function: CDMA_UpdateEventStatus
-+* Inputs: none
-+* Outputs: none
-+* Description: This function update the event status of all the channels
-+* when an error condition is reported.
-+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-+void CDMA_UpdateEventStatus(void)
-+{
-+ int i, j, active_chan;
-+ struct cdma_descriptor *ptr;
-+
-+ nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ ptr = (struct cdma_descriptor *)info.cdma_desc_buf;
-+
-+ for (j = 0; j < info.cdma_num; j++) {
-+ /* Check for the descriptor with failure */
-+ if ((ptr[j].Status & CMD_DMA_DESC_FAIL))
-+ break;
-+
-+ }
-+
-+ /* All the previous cmd's status for this channel must be good */
-+ for (i = 0; i < j; i++) {
-+ if (ptr[i].pcmd != 0xff)
-+ info.pcmds[ptr[i].pcmd].Status = CMD_PASS;
-+ }
-+
-+ /* Abort the channel with type 0 reset command. It resets the */
-+ /* selected channel after the descriptor completes the flash */
-+ /* operation and status has been updated for the descriptor. */
-+ /* Memory Copy and Sync associated with this descriptor will */
-+ /* not be executed */
-+ active_chan = ioread32(FlashReg + CHNL_ACTIVE);
-+ if ((active_chan & (1 << info.flash_bank)) == (1 << info.flash_bank)) {
-+ iowrite32(MODE_02 | (0 << 4), FlashMem); /* Type 0 reset */
-+ iowrite32((0xF << 4) | info.flash_bank, FlashMem + 0x10);
-+ } else { /* Should not reached here */
-+ printk(KERN_ERR "Error! Used bank is not set in"
-+ " reg CHNL_ACTIVE\n");
-+ }
-+}
-+
-+static void cdma_trans(u16 chan)
-+{
-+ u32 addr;
-+
-+ addr = info.cdma_desc;
-+
-+ iowrite32(MODE_10 | (chan << 24), FlashMem);
-+ iowrite32((1 << 7) | chan, FlashMem + 0x10);
-+
-+ iowrite32(MODE_10 | (chan << 24) | ((0x0FFFF & (addr >> 16)) << 8),
-+ FlashMem);
-+ iowrite32((1 << 7) | (1 << 4) | 0, FlashMem + 0x10);
-+
-+ iowrite32(MODE_10 | (chan << 24) | ((0x0FFFF & addr) << 8), FlashMem);
-+ iowrite32((1 << 7) | (1 << 5) | 0, FlashMem + 0x10);
-+
-+ iowrite32(MODE_10 | (chan << 24), FlashMem);
-+ iowrite32((1 << 7) | (1 << 5) | (1 << 4) | 0, FlashMem + 0x10);
-+}
-+
-+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-+* Function: CDMA_Execute_CMDs (for use with CMD_DMA)
-+* Inputs: tag_count: the number of pending cmds to do
-+* Outputs: PASS/FAIL
-+* Description: Build the SDMA chain(s) by making one CMD-DMA descriptor
-+* for each pending command, start the CDMA engine, and return.
-+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-+u16 CDMA_Execute_CMDs(void)
-+{
-+ int i, ret;
-+ u64 flash_add;
-+ u32 ptr;
-+ dma_addr_t map_addr, next_ptr;
-+ u16 status = PASS;
-+ u16 tmp_c;
-+ struct cdma_descriptor *pc;
-+ struct memcpy_descriptor *pm;
-+
-+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ /* No pending cmds to execute, just exit */
-+ if (0 == info.pcmds_num) {
-+ nand_dbg_print(NAND_DBG_TRACE,
-+ "No pending cmds to execute. Just exit.\n");
-+ return PASS;
-+ }
-+
-+ for (i = 0; i < MAX_DESCS; i++)
-+ reset_cdma_desc(i);
-+
-+ pc = (struct cdma_descriptor *)info.cdma_desc_buf;
-+ pm = (struct memcpy_descriptor *)info.memcp_desc_buf;
-+
-+ info.cdma_desc = virt_to_bus(info.cdma_desc_buf);
-+ info.memcp_desc = virt_to_bus(info.memcp_desc_buf);
-+ next_ptr = info.cdma_desc;
-+ info.cdma_num = 0;
-+
-+ for (i = 0; i < info.pcmds_num; i++) {
-+ if (info.pcmds[i].Block >= DeviceInfo.wTotalBlocks) {
-+ info.pcmds[i].Status = CMD_NOT_DONE;
-+ continue;
-+ }
-+
-+ next_ptr += sizeof(struct cdma_descriptor);
-+ pc[info.cdma_num].NxtPointerHi = next_ptr >> 16;
-+ pc[info.cdma_num].NxtPointerLo = next_ptr & 0xffff;
-+
-+ /* Use the Block offset within a bank */
-+ tmp_c = info.pcmds[i].Block /
-+ (DeviceInfo.wTotalBlocks / totalUsedBanks);
-+ flash_add = (u64)(info.pcmds[i].Block - tmp_c *
-+ (DeviceInfo.wTotalBlocks / totalUsedBanks)) *
-+ DeviceInfo.wBlockDataSize +
-+ (u64)(info.pcmds[i].Page) *
-+ DeviceInfo.wPageDataSize;
-+
-+ ptr = MODE_10 | (info.flash_bank << 24) |
-+ (u32)GLOB_u64_Div(flash_add,
-+ DeviceInfo.wPageDataSize);
-+ pc[info.cdma_num].FlashPointerHi = ptr >> 16;
-+ pc[info.cdma_num].FlashPointerLo = ptr & 0xffff;
-+
-+ if ((info.pcmds[i].CMD == WRITE_MAIN_SPARE_CMD) ||
-+ (info.pcmds[i].CMD == READ_MAIN_SPARE_CMD)) {
-+ /* Descriptor to set Main+Spare Access Mode */
-+ pc[info.cdma_num].CommandType = 0x43;
-+ pc[info.cdma_num].CommandFlags =
-+ (0 << 10) | (1 << 9) | (0 << 8) | 0x40;
-+ pc[info.cdma_num].MemAddrHi = 0;
-+ pc[info.cdma_num].MemAddrLo = 0;
-+ pc[info.cdma_num].Channel = 0;
-+ pc[info.cdma_num].Status = 0;
-+ pc[info.cdma_num].pcmd = i;
-+
-+ info.cdma_num++;
-+ BUG_ON(info.cdma_num >= MAX_DESCS);
-+
-+ reset_cdma_desc(info.cdma_num);
-+ next_ptr += sizeof(struct cdma_descriptor);
-+ pc[info.cdma_num].NxtPointerHi = next_ptr >> 16;
-+ pc[info.cdma_num].NxtPointerLo = next_ptr & 0xffff;
-+ pc[info.cdma_num].FlashPointerHi = ptr >> 16;
-+ pc[info.cdma_num].FlashPointerLo = ptr & 0xffff;
-+ }
-+
-+ switch (info.pcmds[i].CMD) {
-+ case ERASE_CMD:
-+ pc[info.cdma_num].CommandType = 1;
-+ pc[info.cdma_num].CommandFlags =
-+ (0 << 10) | (1 << 9) | (0 << 8) | 0x40;
-+ pc[info.cdma_num].MemAddrHi = 0;
-+ pc[info.cdma_num].MemAddrLo = 0;
-+ break;
-+
-+ case WRITE_MAIN_CMD:
-+ pc[info.cdma_num].CommandType =
-+ 0x2100 | info.pcmds[i].PageCount;
-+ pc[info.cdma_num].CommandFlags =
-+ (0 << 10) | (1 << 9) | (0 << 8) | 0x40;
-+ map_addr = virt_to_bus(info.pcmds[i].DataAddr);
-+ pc[info.cdma_num].MemAddrHi = map_addr >> 16;
-+ pc[info.cdma_num].MemAddrLo = map_addr & 0xffff;
-+ break;
-+
-+ case READ_MAIN_CMD:
-+ pc[info.cdma_num].CommandType =
-+ 0x2000 | info.pcmds[i].PageCount;
-+ pc[info.cdma_num].CommandFlags =
-+ (0 << 10) | (1 << 9) | (0 << 8) | 0x40;
-+ map_addr = virt_to_bus(info.pcmds[i].DataAddr);
-+ pc[info.cdma_num].MemAddrHi = map_addr >> 16;
-+ pc[info.cdma_num].MemAddrLo = map_addr & 0xffff;
-+ break;
-+
-+ case WRITE_MAIN_SPARE_CMD:
-+ pc[info.cdma_num].CommandType =
-+ 0x2100 | info.pcmds[i].PageCount;
-+ pc[info.cdma_num].CommandFlags =
-+ (0 << 10) | (1 << 9) | (0 << 8) | 0x40;
-+ map_addr = virt_to_bus(info.pcmds[i].DataAddr);
-+ pc[info.cdma_num].MemAddrHi = map_addr >> 16;
-+ pc[info.cdma_num].MemAddrLo = map_addr & 0xffff;
-+ break;
-+
-+ case READ_MAIN_SPARE_CMD:
-+ pc[info.cdma_num].CommandType =
-+ 0x2000 | info.pcmds[i].PageCount;
-+ pc[info.cdma_num].CommandFlags =
-+ (0 << 10) | (1 << 9) | (0 << 8) | 0x40;
-+ map_addr = virt_to_bus(info.pcmds[i].DataAddr);
-+ pc[info.cdma_num].MemAddrHi = map_addr >> 16;
-+ pc[info.cdma_num].MemAddrLo = map_addr & 0xffff;
-+ break;
-+
-+ case MEMCOPY_CMD:
-+ pc[info.cdma_num].CommandType = 0xFFFF; /* NOP cmd */
-+ /* Set bit 11 to let the CDMA engine continue to */
-+ /* execute only after it has finished processing */
-+ /* the memcopy descriptor. */
-+ /* Also set bit 10 and bit 9 to 1 */
-+ pc[info.cdma_num].CommandFlags = 0x0E40;
-+ map_addr = info.memcp_desc + info.cdma_num *
-+ sizeof(struct memcpy_descriptor);
-+ pc[info.cdma_num].MemCopyPointerHi = map_addr >> 16;
-+ pc[info.cdma_num].MemCopyPointerLo = map_addr & 0xffff;
-+
-+ pm[info.cdma_num].NxtPointerHi = 0;
-+ pm[info.cdma_num].NxtPointerLo = 0;
-+
-+ map_addr = virt_to_bus(info.pcmds[i].DataSrcAddr);
-+ pm[info.cdma_num].SrcAddrHi = map_addr >> 16;
-+ pm[info.cdma_num].SrcAddrLo = map_addr & 0xffff;
-+ map_addr = virt_to_bus(info.pcmds[i].DataDestAddr);
-+ pm[info.cdma_num].DestAddrHi = map_addr >> 16;
-+ pm[info.cdma_num].DestAddrLo = map_addr & 0xffff;
-+
-+ pm[info.cdma_num].XferSize =
-+ info.pcmds[i].MemCopyByteCnt;
-+ pm[info.cdma_num].MemCopyFlags =
-+ (0 << 15 | 0 << 14 | 27 << 8 | 0x40);
-+ pm[info.cdma_num].MemCopyStatus = 0;
-+ break;
-+
-+ case DUMMY_CMD:
-+ default:
-+ pc[info.cdma_num].CommandType = 0XFFFF;
-+ pc[info.cdma_num].CommandFlags =
-+ (0 << 10) | (1 << 9) | (0 << 8) | 0x40;
-+ pc[info.cdma_num].MemAddrHi = 0;
-+ pc[info.cdma_num].MemAddrLo = 0;
-+ break;
-+ }
-+
-+ pc[info.cdma_num].Channel = 0;
-+ pc[info.cdma_num].Status = 0;
-+ pc[info.cdma_num].pcmd = i;
-+
-+ info.cdma_num++;
-+ BUG_ON(info.cdma_num >= MAX_DESCS);
-+
-+ if ((info.pcmds[i].CMD == WRITE_MAIN_SPARE_CMD) ||
-+ (info.pcmds[i].CMD == READ_MAIN_SPARE_CMD)) {
-+ /* Descriptor to set back Main Area Access Mode */
-+ reset_cdma_desc(info.cdma_num);
-+ next_ptr += sizeof(struct cdma_descriptor);
-+ pc[info.cdma_num].NxtPointerHi = next_ptr >> 16;
-+ pc[info.cdma_num].NxtPointerLo = next_ptr & 0xffff;
-+
-+ pc[info.cdma_num].FlashPointerHi = ptr >> 16;
-+ pc[info.cdma_num].FlashPointerLo = ptr & 0xffff;
-+
-+ pc[info.cdma_num].CommandType = 0x42;
-+ pc[info.cdma_num].CommandFlags =
-+ (0 << 10) | (1 << 9) | (0 << 8) | 0x40;
-+ pc[info.cdma_num].MemAddrHi = 0;
-+ pc[info.cdma_num].MemAddrLo = 0;
-+
-+ pc[info.cdma_num].Channel = 0;
-+ pc[info.cdma_num].Status = 0;
-+ pc[info.cdma_num].pcmd = i;
-+
-+ info.cdma_num++;
-+ BUG_ON(info.cdma_num >= MAX_DESCS);
-+ }
-+ }
-+
-+ /* Add a dummy descriptor at end of the CDMA chain */
-+ reset_cdma_desc(info.cdma_num);
-+ ptr = MODE_10 | (info.flash_bank << 24);
-+ pc[info.cdma_num].FlashPointerHi = ptr >> 16;
-+ pc[info.cdma_num].FlashPointerLo = ptr & 0xffff;
-+ pc[info.cdma_num].CommandType = 0xFFFF; /* NOP command */
-+ /* Set Command Flags for the last CDMA descriptor: */
-+ /* set Continue bit (bit 9) to 0 and Interrupt bit (bit 8) to 1 */
-+ pc[info.cdma_num].CommandFlags =
-+ (0 << 10) | (0 << 9) | (1 << 8) | 0x40;
-+ pc[info.cdma_num].pcmd = 0xff; /* Set it to an illegal value */
-+ info.cdma_num++;
-+ BUG_ON(info.cdma_num >= MAX_DESCS);
-+
-+ iowrite32(1, FlashReg + GLOBAL_INT_ENABLE); /* Enable Interrupt */
-+
-+ iowrite32(1, FlashReg + DMA_ENABLE);
-+ /* Wait for DMA to be enabled before issuing the next command */
-+ while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
-+ ;
-+ cdma_trans(info.flash_bank);
-+
-+ ret = wait_for_completion_timeout(&info.complete, 50 * HZ);
-+ if (!ret)
-+ printk(KERN_ERR "Wait for completion timeout "
-+ "in %s, Line %d\n", __FILE__, __LINE__);
-+ status = info.ret;
-+
-+ info.pcmds_num = 0; /* Clear the pending cmds number to 0 */
-+
-+ return status;
-+}
-+
-+int is_cdma_interrupt(void)
-+{
-+ u32 ints_b0, ints_b1, ints_b2, ints_b3, ints_cdma;
-+ u32 int_en_mask;
-+ u32 cdma_int_en_mask;
-+
-+ nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ /* Set the global Enable masks for only those interrupts
-+ * that are supported */
-+ cdma_int_en_mask = (DMA_INTR__DESC_COMP_CHANNEL0 |
-+ DMA_INTR__DESC_COMP_CHANNEL1 |
-+ DMA_INTR__DESC_COMP_CHANNEL2 |
-+ DMA_INTR__DESC_COMP_CHANNEL3 |
-+ DMA_INTR__MEMCOPY_DESC_COMP);
-+
-+ int_en_mask = (INTR_STATUS0__ECC_ERR |
-+ INTR_STATUS0__PROGRAM_FAIL |
-+ INTR_STATUS0__ERASE_FAIL);
-+
-+ ints_b0 = ioread32(FlashReg + INTR_STATUS0) & int_en_mask;
-+ ints_b1 = ioread32(FlashReg + INTR_STATUS1) & int_en_mask;
-+ ints_b2 = ioread32(FlashReg + INTR_STATUS2) & int_en_mask;
-+ ints_b3 = ioread32(FlashReg + INTR_STATUS3) & int_en_mask;
-+ ints_cdma = ioread32(FlashReg + DMA_INTR) & cdma_int_en_mask;
-+
-+ nand_dbg_print(NAND_DBG_WARN, "ints_bank0 to ints_bank3: "
-+ "0x%x, 0x%x, 0x%x, 0x%x, ints_cdma: 0x%x\n",
-+ ints_b0, ints_b1, ints_b2, ints_b3, ints_cdma);
-+
-+ if (ints_b0 || ints_b1 || ints_b2 || ints_b3 || ints_cdma) {
-+ return 1;
-+ } else {
-+ iowrite32(ints_b0, FlashReg + INTR_STATUS0);
-+ iowrite32(ints_b1, FlashReg + INTR_STATUS1);
-+ iowrite32(ints_b2, FlashReg + INTR_STATUS2);
-+ iowrite32(ints_b3, FlashReg + INTR_STATUS3);
-+ nand_dbg_print(NAND_DBG_DEBUG,
-+ "Not a NAND controller interrupt! Ignore it.\n");
-+ return 0;
-+ }
-+}
-+
-+static void update_event_status(void)
-+{
-+ int i;
-+ struct cdma_descriptor *ptr;
-+
-+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ ptr = (struct cdma_descriptor *)info.cdma_desc_buf;
-+
-+ for (i = 0; i < info.cdma_num; i++) {
-+ if (ptr[i].pcmd != 0xff)
-+ info.pcmds[ptr[i].pcmd].Status = CMD_PASS;
-+ if ((ptr[i].CommandType == 0x41) ||
-+ (ptr[i].CommandType == 0x42) ||
-+ (ptr[i].CommandType == 0x43))
-+ continue;
-+
-+ switch (info.pcmds[ptr[i].pcmd].CMD) {
-+ case READ_MAIN_SPARE_CMD:
-+ Conv_Main_Spare_Data_Phy2Log_Format(
-+ info.pcmds[ptr[i].pcmd].DataAddr,
-+ info.pcmds[ptr[i].pcmd].PageCount);
-+ break;
-+ case READ_SPARE_CMD:
-+ Conv_Spare_Data_Phy2Log_Format(
-+ info.pcmds[ptr[i].pcmd].DataAddr);
-+ break;
-+ }
-+ }
-+}
-+
-+static u16 do_ecc_for_desc(u32 ch, u8 *buf, u16 page)
-+{
-+ u16 event = EVENT_NONE;
-+ u16 err_byte;
-+ u16 err_page = 0;
-+ u8 err_sector;
-+ u8 err_device;
-+ u16 ecc_correction_info;
-+ u16 err_address;
-+ u32 eccSectorSize;
-+ u8 *err_pos;
-+
-+ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ eccSectorSize = ECC_SECTOR_SIZE * (DeviceInfo.wDevicesConnected);
-+
-+ do {
-+ if (0 == ch)
-+ err_page = ioread32(FlashReg + ERR_PAGE_ADDR0);
-+ else if (1 == ch)
-+ err_page = ioread32(FlashReg + ERR_PAGE_ADDR1);
-+ else if (2 == ch)
-+ err_page = ioread32(FlashReg + ERR_PAGE_ADDR2);
-+ else if (3 == ch)
-+ err_page = ioread32(FlashReg + ERR_PAGE_ADDR3);
-+
-+ err_address = ioread32(FlashReg + ECC_ERROR_ADDRESS);
-+ err_byte = err_address & ECC_ERROR_ADDRESS__OFFSET;
-+ err_sector = ((err_address &
-+ ECC_ERROR_ADDRESS__SECTOR_NR) >> 12);
-+
-+ ecc_correction_info = ioread32(FlashReg + ERR_CORRECTION_INFO);
-+ err_device = ((ecc_correction_info &
-+ ERR_CORRECTION_INFO__DEVICE_NR) >> 8);
-+
-+ if (ecc_correction_info & ERR_CORRECTION_INFO__ERROR_TYPE) {
-+ event = EVENT_UNCORRECTABLE_DATA_ERROR;
-+ } else {
-+ event = EVENT_CORRECTABLE_DATA_ERROR_FIXED;
-+ if (err_byte < ECC_SECTOR_SIZE) {
-+ err_pos = buf +
-+ (err_page - page) *
-+ DeviceInfo.wPageDataSize +
-+ err_sector * eccSectorSize +
-+ err_byte *
-+ DeviceInfo.wDevicesConnected +
-+ err_device;
-+ *err_pos ^= ecc_correction_info &
-+ ERR_CORRECTION_INFO__BYTEMASK;
-+ }
-+ }
-+ } while (!(ecc_correction_info & ERR_CORRECTION_INFO__LAST_ERR_INFO));
-+
-+ return event;
-+}
-+
-+static u16 process_ecc_int(u32 c, u16 *p_desc_num)
-+{
-+ struct cdma_descriptor *ptr;
-+ u16 j;
-+ int event = EVENT_PASS;
-+
-+ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ if (c != info.flash_bank)
-+ printk(KERN_ERR "Error!info.flash_bank is %d, while c is %d\n",
-+ info.flash_bank, c);
-+
-+ ptr = (struct cdma_descriptor *)info.cdma_desc_buf;
-+
-+ for (j = 0; j < info.cdma_num; j++)
-+ if ((ptr[j].Status & CMD_DMA_DESC_COMP) != CMD_DMA_DESC_COMP)
-+ break;
-+
-+ *p_desc_num = j; /* Pass the descripter number found here */
-+
-+ if (j >= info.cdma_num) {
-+ printk(KERN_ERR "Can not find the correct descriptor number "
-+ "when ecc interrupt triggered!"
-+ "info.cdma_num: %d, j: %d\n", info.cdma_num, j);
-+ return EVENT_UNCORRECTABLE_DATA_ERROR;
-+ }
-+
-+ event = do_ecc_for_desc(c, info.pcmds[ptr[j].pcmd].DataAddr,
-+ info.pcmds[ptr[j].pcmd].Page);
-+
-+ if (EVENT_UNCORRECTABLE_DATA_ERROR == event) {
-+ printk(KERN_ERR "Uncorrectable ECC error!"
-+ "info.cdma_num: %d, j: %d, "
-+ "pending cmd CMD: 0x%x, "
-+ "Block: 0x%x, Page: 0x%x, PageCount: 0x%x\n",
-+ info.cdma_num, j,
-+ info.pcmds[ptr[j].pcmd].CMD,
-+ info.pcmds[ptr[j].pcmd].Block,
-+ info.pcmds[ptr[j].pcmd].Page,
-+ info.pcmds[ptr[j].pcmd].PageCount);
-+
-+ if (ptr[j].pcmd != 0xff)
-+ info.pcmds[ptr[j].pcmd].Status = CMD_FAIL;
-+ CDMA_UpdateEventStatus();
-+ }
-+
-+ return event;
-+}
-+
-+static void process_prog_erase_fail_int(u16 desc_num)
-+{
-+ struct cdma_descriptor *ptr;
-+
-+ nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ ptr = (struct cdma_descriptor *)info.cdma_desc_buf;
-+
-+ if (ptr[desc_num].pcmd != 0xFF)
-+ info.pcmds[ptr[desc_num].pcmd].Status = CMD_FAIL;
-+
-+ CDMA_UpdateEventStatus();
-+}
-+
-+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-+* Function: CDMA_Event_Status (for use with CMD_DMA)
-+* Inputs: none
-+* Outputs: Event_Status code
-+* Description: This function is called after an interrupt has happened
-+* It reads the HW status register and ...tbd
-+* It returns the appropriate event status
-+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-+u16 CDMA_Event_Status(void)
-+{
-+ u32 ints_addr[4] = {INTR_STATUS0, INTR_STATUS1,
-+ INTR_STATUS2, INTR_STATUS3};
-+ u32 dma_intr_bit[4] = {DMA_INTR__DESC_COMP_CHANNEL0,
-+ DMA_INTR__DESC_COMP_CHANNEL1,
-+ DMA_INTR__DESC_COMP_CHANNEL2,
-+ DMA_INTR__DESC_COMP_CHANNEL3};
-+ u32 cdma_int_status, int_status;
-+ u32 ecc_enable = 0;
-+ u16 event = EVENT_PASS;
-+ u16 cur_desc = 0;
-+
-+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ ecc_enable = ioread32(FlashReg + ECC_ENABLE);
-+
-+ while (1) {
-+ int_status = ioread32(FlashReg + ints_addr[info.flash_bank]);
-+ if (ecc_enable && (int_status & INTR_STATUS0__ECC_ERR)) {
-+ event = process_ecc_int(info.flash_bank, &cur_desc);
-+ iowrite32(INTR_STATUS0__ECC_ERR,
-+ FlashReg + ints_addr[info.flash_bank]);
-+ if (EVENT_UNCORRECTABLE_DATA_ERROR == event) {
-+ nand_dbg_print(NAND_DBG_WARN,
-+ "ints_bank0 to ints_bank3: "
-+ "0x%x, 0x%x, 0x%x, 0x%x, "
-+ "ints_cdma: 0x%x\n",
-+ ioread32(FlashReg + INTR_STATUS0),
-+ ioread32(FlashReg + INTR_STATUS1),
-+ ioread32(FlashReg + INTR_STATUS2),
-+ ioread32(FlashReg + INTR_STATUS3),
-+ ioread32(FlashReg + DMA_INTR));
-+ break;
-+ }
-+ } else if (int_status & INTR_STATUS0__PROGRAM_FAIL) {
-+ printk(KERN_ERR "NAND program fail interrupt!\n");
-+ process_prog_erase_fail_int(cur_desc);
-+ event = EVENT_PROGRAM_FAILURE;
-+ break;
-+ } else if (int_status & INTR_STATUS0__ERASE_FAIL) {
-+ printk(KERN_ERR "NAND erase fail interrupt!\n");
-+ process_prog_erase_fail_int(cur_desc);
-+ event = EVENT_ERASE_FAILURE;
-+ break;
-+ } else {
-+ cdma_int_status = ioread32(FlashReg + DMA_INTR);
-+ if (cdma_int_status & dma_intr_bit[info.flash_bank]) {
-+ iowrite32(dma_intr_bit[info.flash_bank],
-+ FlashReg + DMA_INTR);
-+ update_event_status();
-+ event = EVENT_PASS;
-+ break;
-+ }
-+ }
-+ }
-+
-+ int_status = ioread32(FlashReg + ints_addr[info.flash_bank]);
-+ iowrite32(int_status, FlashReg + ints_addr[info.flash_bank]);
-+ cdma_int_status = ioread32(FlashReg + DMA_INTR);
-+ iowrite32(cdma_int_status, FlashReg + DMA_INTR);
-+
-+ iowrite32(0, FlashReg + DMA_ENABLE);
-+ while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
-+ ;
-+
-+ return event;
-+}
-+
-+
-+
-diff --git a/drivers/block/spectra/lld_cdma.h b/drivers/block/spectra/lld_cdma.h
-new file mode 100644
-index 0000000..854ea06
---- /dev/null
-+++ b/drivers/block/spectra/lld_cdma.h
-@@ -0,0 +1,123 @@
-+/*
-+ * NAND Flash Controller Device Driver
-+ * Copyright (c) 2009, Intel Corporation and its suppliers.
-+ *
-+ * This program is free software; you can redistribute it and/or modify it
-+ * under the terms and conditions of the GNU General Public License,
-+ * version 2, as published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope it will be useful, but WITHOUT
-+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-+ * more details.
-+ *
-+ * You should have received a copy of the GNU General Public License along with
-+ * this program; if not, write to the Free Software Foundation, Inc.,
-+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-+ *
-+ */
-+
-+/* header for LLD_CDMA.c module */
-+
-+#ifndef _LLD_CDMA_
-+#define _LLD_CDMA_
-+
-+#include "flash.h"
-+
-+#define DEBUG_SYNC 1
-+
-+/*/////////// CDMA specific MACRO definition */
-+#define MAX_DESCS (255)
-+#define MAX_CHANS (4)
-+#define MAX_SYNC_POINTS (16)
-+#define MAX_DESC_PER_CHAN (MAX_DESCS * 3 + MAX_SYNC_POINTS + 2)
-+
-+#define CHANNEL_SYNC_MASK (0x000F)
-+#define CHANNEL_DMA_MASK (0x00F0)
-+#define CHANNEL_ID_MASK (0x0300)
-+#define CHANNEL_CONT_MASK (0x4000)
-+#define CHANNEL_INTR_MASK (0x8000)
-+
-+#define CHANNEL_SYNC_OFFSET (0)
-+#define CHANNEL_DMA_OFFSET (4)
-+#define CHANNEL_ID_OFFSET (8)
-+#define CHANNEL_CONT_OFFSET (14)
-+#define CHANNEL_INTR_OFFSET (15)
-+
-+u16 CDMA_Data_CMD(u8 cmd, u8 *data, u32 block, u16 page, u16 num, u16 flags);
-+u16 CDMA_MemCopy_CMD(u8 *dest, u8 *src, u32 byte_cnt, u16 flags);
-+u16 CDMA_Execute_CMDs(void);
-+void print_pending_cmds(void);
-+void print_cdma_descriptors(void);
-+
-+extern u8 g_SBDCmdIndex;
-+extern struct mrst_nand_info info;
-+
-+
-+/*/////////// prototypes: APIs for LLD_CDMA */
-+int is_cdma_interrupt(void);
-+u16 CDMA_Event_Status(void);
-+
-+/* CMD-DMA Descriptor Struct. These are defined by the CMD_DMA HW */
-+struct cdma_descriptor {
-+ u32 NxtPointerHi;
-+ u32 NxtPointerLo;
-+ u32 FlashPointerHi;
-+ u32 FlashPointerLo;
-+ u32 CommandType;
-+ u32 MemAddrHi;
-+ u32 MemAddrLo;
-+ u32 CommandFlags;
-+ u32 Channel;
-+ u32 Status;
-+ u32 MemCopyPointerHi;
-+ u32 MemCopyPointerLo;
-+ u32 Reserved12;
-+ u32 Reserved13;
-+ u32 Reserved14;
-+ u32 pcmd; /* pending cmd num related to this descriptor */
-+};
-+
-+/* This struct holds one MemCopy descriptor as defined by the HW */
-+struct memcpy_descriptor {
-+ u32 NxtPointerHi;
-+ u32 NxtPointerLo;
-+ u32 SrcAddrHi;
-+ u32 SrcAddrLo;
-+ u32 DestAddrHi;
-+ u32 DestAddrLo;
-+ u32 XferSize;
-+ u32 MemCopyFlags;
-+ u32 MemCopyStatus;
-+ u32 reserved9;
-+ u32 reserved10;
-+ u32 reserved11;
-+ u32 reserved12;
-+ u32 reserved13;
-+ u32 reserved14;
-+ u32 reserved15;
-+};
-+
-+/* Pending CMD table entries (includes MemCopy parameters */
-+struct pending_cmd {
-+ u8 CMD;
-+ u8 *DataAddr;
-+ u32 Block;
-+ u16 Page;
-+ u16 PageCount;
-+ u8 *DataDestAddr;
-+ u8 *DataSrcAddr;
-+ u32 MemCopyByteCnt;
-+ u16 Flags;
-+ u16 Status;
-+};
-+
-+#if DEBUG_SYNC
-+extern u32 debug_sync_cnt;
-+#endif
-+
-+/* Definitions for CMD DMA descriptor chain fields */
-+#define CMD_DMA_DESC_COMP 0x8000
-+#define CMD_DMA_DESC_FAIL 0x4000
-+
-+#endif /*_LLD_CDMA_*/
-diff --git a/drivers/block/spectra/lld_emu.c b/drivers/block/spectra/lld_emu.c
-new file mode 100644
-index 0000000..60eb0f6
---- /dev/null
-+++ b/drivers/block/spectra/lld_emu.c
-@@ -0,0 +1,780 @@
-+/*
-+ * NAND Flash Controller Device Driver
-+ * Copyright (c) 2009, Intel Corporation and its suppliers.
-+ *
-+ * This program is free software; you can redistribute it and/or modify it
-+ * under the terms and conditions of the GNU General Public License,
-+ * version 2, as published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope it will be useful, but WITHOUT
-+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-+ * more details.
-+ *
-+ * You should have received a copy of the GNU General Public License along with
-+ * this program; if not, write to the Free Software Foundation, Inc.,
-+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-+ *
-+ */
-+
-+#include <linux/fs.h>
-+#include <linux/slab.h>
-+#include "flash.h"
-+#include "ffsdefs.h"
-+#include "lld_emu.h"
-+#include "lld.h"
-+#if CMD_DMA
-+#include "lld_cdma.h"
-+#endif
-+
-+#define GLOB_LLD_PAGES 64
-+#define GLOB_LLD_PAGE_SIZE (512+16)
-+#define GLOB_LLD_PAGE_DATA_SIZE 512
-+#define GLOB_LLD_BLOCKS 2048
-+
-+#if (CMD_DMA && FLASH_EMU)
-+#include "lld_cdma.h"
-+u32 totalUsedBanks;
-+u32 valid_banks[MAX_CHANS];
-+#endif
-+
-+#if FLASH_EMU /* This is for entire module */
-+
-+static u8 *flash_memory[GLOB_LLD_BLOCKS * GLOB_LLD_PAGES];
-+
-+/* Read nand emu file and then fill it's content to flash_memory */
-+int emu_load_file_to_mem(void)
-+{
-+ mm_segment_t fs;
-+ struct file *nef_filp = NULL;
-+ struct inode *inode = NULL;
-+ loff_t nef_size = 0;
-+ loff_t tmp_file_offset, file_offset;
-+ ssize_t nread;
-+ int i, rc = -EINVAL;
-+
-+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ fs = get_fs();
-+ set_fs(get_ds());
-+
-+ nef_filp = filp_open("/root/nand_emu_file", O_RDWR | O_LARGEFILE, 0);
-+ if (IS_ERR(nef_filp)) {
-+ printk(KERN_ERR "filp_open error: "
-+ "Unable to open nand emu file!\n");
-+ return PTR_ERR(nef_filp);
-+ }
-+
-+ if (nef_filp->f_path.dentry) {
-+ inode = nef_filp->f_path.dentry->d_inode;
-+ } else {
-+ printk(KERN_ERR "Can not get valid inode!\n");
-+ goto out;
-+ }
-+
-+ nef_size = i_size_read(inode->i_mapping->host);
-+ if (nef_size <= 0) {
-+ printk(KERN_ERR "Invalid nand emu file size: "
-+ "0x%llx\n", nef_size);
-+ goto out;
-+ } else {
-+ nand_dbg_print(NAND_DBG_DEBUG, "nand emu file size: %lld\n",
-+ nef_size);
-+ }
-+
-+ file_offset = 0;
-+ for (i = 0; i < GLOB_LLD_BLOCKS * GLOB_LLD_PAGES; i++) {
-+ tmp_file_offset = file_offset;
-+ nread = vfs_read(nef_filp,
-+ (char __user *)flash_memory[i],
-+ GLOB_LLD_PAGE_SIZE, &tmp_file_offset);
-+ if (nread < GLOB_LLD_PAGE_SIZE) {
-+ printk(KERN_ERR "%s, Line %d - "
-+ "nand emu file partial read: "
-+ "%d bytes\n", __FILE__, __LINE__, (int)nread);
-+ goto out;
-+ }
-+ file_offset += GLOB_LLD_PAGE_SIZE;
-+ }
-+ rc = 0;
-+
-+out:
-+ filp_close(nef_filp, current->files);
-+ set_fs(fs);
-+ return rc;
-+}
-+
-+/* Write contents of flash_memory to nand emu file */
-+int emu_write_mem_to_file(void)
-+{
-+ mm_segment_t fs;
-+ struct file *nef_filp = NULL;
-+ struct inode *inode = NULL;
-+ loff_t nef_size = 0;
-+ loff_t tmp_file_offset, file_offset;
-+ ssize_t nwritten;
-+ int i, rc = -EINVAL;
-+
-+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ fs = get_fs();
-+ set_fs(get_ds());
-+
-+ nef_filp = filp_open("/root/nand_emu_file", O_RDWR | O_LARGEFILE, 0);
-+ if (IS_ERR(nef_filp)) {
-+ printk(KERN_ERR "filp_open error: "
-+ "Unable to open nand emu file!\n");
-+ return PTR_ERR(nef_filp);
-+ }
-+
-+ if (nef_filp->f_path.dentry) {
-+ inode = nef_filp->f_path.dentry->d_inode;
-+ } else {
-+ printk(KERN_ERR "Invalid " "nef_filp->f_path.dentry value!\n");
-+ goto out;
-+ }
-+
-+ nef_size = i_size_read(inode->i_mapping->host);
-+ if (nef_size <= 0) {
-+ printk(KERN_ERR "Invalid "
-+ "nand emu file size: 0x%llx\n", nef_size);
-+ goto out;
-+ } else {
-+ nand_dbg_print(NAND_DBG_DEBUG, "nand emu file size: "
-+ "%lld\n", nef_size);
-+ }
-+
-+ file_offset = 0;
-+ for (i = 0; i < GLOB_LLD_BLOCKS * GLOB_LLD_PAGES; i++) {
-+ tmp_file_offset = file_offset;
-+ nwritten = vfs_write(nef_filp,
-+ (char __user *)flash_memory[i],
-+ GLOB_LLD_PAGE_SIZE, &tmp_file_offset);
-+ if (nwritten < GLOB_LLD_PAGE_SIZE) {
-+ printk(KERN_ERR "%s, Line %d - "
-+ "nand emu file partial write: "
-+ "%d bytes\n", __FILE__, __LINE__, (int)nwritten);
-+ goto out;
-+ }
-+ file_offset += GLOB_LLD_PAGE_SIZE;
-+ }
-+ rc = 0;
-+
-+out:
-+ filp_close(nef_filp, current->files);
-+ set_fs(fs);
-+ return rc;
-+}
-+
-+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-+* Function: emu_Flash_Init
-+* Inputs: none
-+* Outputs: PASS=0 (notice 0=ok here)
-+* Description: Creates & initializes the flash RAM array.
-+*
-+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-+u16 emu_Flash_Init(void)
-+{
-+ int i;
-+
-+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ flash_memory[0] = (u8 *)vmalloc(GLOB_LLD_PAGE_SIZE *
-+ GLOB_LLD_BLOCKS *
-+ GLOB_LLD_PAGES *
-+ sizeof(u8));
-+ if (!flash_memory[0]) {
-+ printk(KERN_ERR "Fail to allocate memory "
-+ "for nand emulator!\n");
-+ return ERR;
-+ }
-+
-+ memset((char *)(flash_memory[0]), 0xFF,
-+ GLOB_LLD_PAGE_SIZE * GLOB_LLD_BLOCKS * GLOB_LLD_PAGES *
-+ sizeof(u8));
-+
-+ for (i = 1; i < GLOB_LLD_BLOCKS * GLOB_LLD_PAGES; i++)
-+ flash_memory[i] = flash_memory[i - 1] + GLOB_LLD_PAGE_SIZE;
-+
-+ emu_load_file_to_mem(); /* Load nand emu file to mem */
-+
-+ return PASS;
-+}
-+
-+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-+* Function: emu_Flash_Release
-+* Inputs: none
-+* Outputs: PASS=0 (notice 0=ok here)
-+* Description: Releases the flash.
-+*
-+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-+int emu_Flash_Release(void)
-+{
-+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ emu_write_mem_to_file(); /* Write back mem to nand emu file */
-+
-+ vfree(flash_memory[0]);
-+ return PASS;
-+}
-+
-+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-+* Function: emu_Read_Device_ID
-+* Inputs: none
-+* Outputs: PASS=1 FAIL=0
-+* Description: Reads the info from the controller registers.
-+* Sets up DeviceInfo structure with device parameters
-+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-+
-+u16 emu_Read_Device_ID(void)
-+{
-+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ DeviceInfo.wDeviceMaker = 0;
-+ DeviceInfo.wDeviceType = 8;
-+ DeviceInfo.wSpectraStartBlock = 36;
-+ DeviceInfo.wSpectraEndBlock = GLOB_LLD_BLOCKS - 1;
-+ DeviceInfo.wTotalBlocks = GLOB_LLD_BLOCKS;
-+ DeviceInfo.wPagesPerBlock = GLOB_LLD_PAGES;
-+ DeviceInfo.wPageSize = GLOB_LLD_PAGE_SIZE;
-+ DeviceInfo.wPageDataSize = GLOB_LLD_PAGE_DATA_SIZE;
-+ DeviceInfo.wPageSpareSize = GLOB_LLD_PAGE_SIZE -
-+ GLOB_LLD_PAGE_DATA_SIZE;
-+ DeviceInfo.wBlockSize = DeviceInfo.wPageSize * GLOB_LLD_PAGES;
-+ DeviceInfo.wBlockDataSize = DeviceInfo.wPageDataSize * GLOB_LLD_PAGES;
-+ DeviceInfo.wDataBlockNum = (u32) (DeviceInfo.wSpectraEndBlock -
-+ DeviceInfo.wSpectraStartBlock
-+ + 1);
-+ DeviceInfo.MLCDevice = 1; /* Emulate MLC device */
-+ DeviceInfo.nBitsInPageNumber =
-+ (u8)GLOB_Calc_Used_Bits(DeviceInfo.wPagesPerBlock);
-+ DeviceInfo.nBitsInPageDataSize =
-+ (u8)GLOB_Calc_Used_Bits(DeviceInfo.wPageDataSize);
-+ DeviceInfo.nBitsInBlockDataSize =
-+ (u8)GLOB_Calc_Used_Bits(DeviceInfo.wBlockDataSize);
-+
-+#if CMD_DMA
-+ totalUsedBanks = 4;
-+ valid_banks[0] = 1;
-+ valid_banks[1] = 1;
-+ valid_banks[2] = 1;
-+ valid_banks[3] = 1;
-+#endif
-+
-+ return PASS;
-+}
-+
-+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-+* Function: emu_Flash_Reset
-+* Inputs: none
-+* Outputs: PASS=0 (notice 0=ok here)
-+* Description: Reset the flash
-+*
-+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-+u16 emu_Flash_Reset(void)
-+{
-+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ return PASS;
-+}
-+
-+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-+* Function: emu_Erase_Block
-+* Inputs: Address
-+* Outputs: PASS=0 (notice 0=ok here)
-+* Description: Erase a block
-+*
-+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-+u16 emu_Erase_Block(u32 block_add)
-+{
-+ int i;
-+
-+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ if (block_add >= DeviceInfo.wTotalBlocks) {
-+ printk(KERN_ERR "emu_Erase_Block error! "
-+ "Too big block address: %d\n", block_add);
-+ return FAIL;
-+ }
-+
-+ nand_dbg_print(NAND_DBG_DEBUG, "Erasing block %d\n",
-+ (int)block_add);
-+
-+ for (i = block_add * GLOB_LLD_PAGES;
-+ i < ((block_add + 1) * GLOB_LLD_PAGES); i++) {
-+ if (flash_memory[i]) {
-+ memset((u8 *)(flash_memory[i]), 0xFF,
-+ DeviceInfo.wPageSize * sizeof(u8));
-+ }
-+ }
-+
-+ return PASS;
-+}
-+
-+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-+* Function: emu_Write_Page_Main
-+* Inputs: Write buffer address pointer
-+* Block number
-+* Page number
-+* Number of pages to process
-+* Outputs: PASS=0 (notice 0=ok here)
-+* Description: Write the data in the buffer to main area of flash
-+*
-+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-+u16 emu_Write_Page_Main(u8 *write_data, u32 Block,
-+ u16 Page, u16 PageCount)
-+{
-+ int i;
-+
-+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ if (Block >= DeviceInfo.wTotalBlocks)
-+ return FAIL;
-+
-+ if (Page + PageCount > DeviceInfo.wPagesPerBlock)
-+ return FAIL;
-+
-+ nand_dbg_print(NAND_DBG_DEBUG, "emu_Write_Page_Main: "
-+ "lba %u Page %u PageCount %u\n",
-+ (unsigned int)Block,
-+ (unsigned int)Page, (unsigned int)PageCount);
-+
-+ for (i = 0; i < PageCount; i++) {
-+ if (NULL == flash_memory[Block * GLOB_LLD_PAGES + Page]) {
-+ printk(KERN_ERR "Run out of memory\n");
-+ return FAIL;
-+ }
-+ memcpy((u8 *) (flash_memory[Block * GLOB_LLD_PAGES + Page]),
-+ write_data, DeviceInfo.wPageDataSize);
-+ write_data += DeviceInfo.wPageDataSize;
-+ Page++;
-+ }
-+
-+ return PASS;
-+}
-+
-+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-+* Function: emu_Read_Page_Main
-+* Inputs: Read buffer address pointer
-+* Block number
-+* Page number
-+* Number of pages to process
-+* Outputs: PASS=0 (notice 0=ok here)
-+* Description: Read the data from the flash main area to the buffer
-+*
-+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-+u16 emu_Read_Page_Main(u8 *read_data, u32 Block,
-+ u16 Page, u16 PageCount)
-+{
-+ int i;
-+
-+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ if (Block >= DeviceInfo.wTotalBlocks)
-+ return FAIL;
-+
-+ if (Page + PageCount > DeviceInfo.wPagesPerBlock)
-+ return FAIL;
-+
-+ nand_dbg_print(NAND_DBG_DEBUG, "emu_Read_Page_Main: "
-+ "lba %u Page %u PageCount %u\n",
-+ (unsigned int)Block,
-+ (unsigned int)Page, (unsigned int)PageCount);
-+
-+ for (i = 0; i < PageCount; i++) {
-+ if (NULL == flash_memory[Block * GLOB_LLD_PAGES + Page]) {
-+ memset(read_data, 0xFF, DeviceInfo.wPageDataSize);
-+ } else {
-+ memcpy(read_data,
-+ (u8 *) (flash_memory[Block * GLOB_LLD_PAGES
-+ + Page]),
-+ DeviceInfo.wPageDataSize);
-+ }
-+ read_data += DeviceInfo.wPageDataSize;
-+ Page++;
-+ }
-+
-+ return PASS;
-+}
-+
-+#ifndef ELDORA
-+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-+* Function: emu_Read_Page_Main_Spare
-+* Inputs: Write Buffer
-+* Address
-+* Buffer size
-+* Outputs: PASS=0 (notice 0=ok here)
-+* Description: Read from flash main+spare area
-+*
-+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-+u16 emu_Read_Page_Main_Spare(u8 *read_data, u32 Block,
-+ u16 Page, u16 PageCount)
-+{
-+ int i;
-+
-+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ if (Block >= DeviceInfo.wTotalBlocks) {
-+ printk(KERN_ERR "Read Page Main+Spare "
-+ "Error: Block Address too big\n");
-+ return FAIL;
-+ }
-+
-+ if (Page + PageCount > DeviceInfo.wPagesPerBlock) {
-+ printk(KERN_ERR "Read Page Main+Spare "
-+ "Error: Page number too big\n");
-+ return FAIL;
-+ }
-+
-+ nand_dbg_print(NAND_DBG_DEBUG, "Read Page Main + Spare - "
-+ "No. of pages %u block %u start page %u\n",
-+ (unsigned int)PageCount,
-+ (unsigned int)Block, (unsigned int)Page);
-+
-+ for (i = 0; i < PageCount; i++) {
-+ if (NULL == flash_memory[Block * GLOB_LLD_PAGES + Page]) {
-+ memset(read_data, 0xFF, DeviceInfo.wPageSize);
-+ } else {
-+ memcpy(read_data, (u8 *) (flash_memory[Block *
-+ GLOB_LLD_PAGES
-+ + Page]),
-+ DeviceInfo.wPageSize);
-+ }
-+
-+ read_data += DeviceInfo.wPageSize;
-+ Page++;
-+ }
-+
-+ return PASS;
-+}
-+
-+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-+* Function: emu_Write_Page_Main_Spare
-+* Inputs: Write buffer
-+* address
-+* buffer length
-+* Outputs: PASS=0 (notice 0=ok here)
-+* Description: Write the buffer to main+spare area of flash
-+*
-+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-+u16 emu_Write_Page_Main_Spare(u8 *write_data, u32 Block,
-+ u16 Page, u16 page_count)
-+{
-+ u16 i;
-+
-+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ if (Block >= DeviceInfo.wTotalBlocks) {
-+ printk(KERN_ERR "Write Page Main + Spare "
-+ "Error: Block Address too big\n");
-+ return FAIL;
-+ }
-+
-+ if (Page + page_count > DeviceInfo.wPagesPerBlock) {
-+ printk(KERN_ERR "Write Page Main + Spare "
-+ "Error: Page number too big\n");
-+ return FAIL;
-+ }
-+
-+ nand_dbg_print(NAND_DBG_DEBUG, "Write Page Main+Spare - "
-+ "No. of pages %u block %u start page %u\n",
-+ (unsigned int)page_count,
-+ (unsigned int)Block, (unsigned int)Page);
-+
-+ for (i = 0; i < page_count; i++) {
-+ if (NULL == flash_memory[Block * GLOB_LLD_PAGES + Page]) {
-+ printk(KERN_ERR "Run out of memory!\n");
-+ return FAIL;
-+ }
-+ memcpy((u8 *) (flash_memory[Block * GLOB_LLD_PAGES + Page]),
-+ write_data, DeviceInfo.wPageSize);
-+ write_data += DeviceInfo.wPageSize;
-+ Page++;
-+ }
-+
-+ return PASS;
-+}
-+
-+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-+* Function: emu_Write_Page_Spare
-+* Inputs: Write buffer
-+* Address
-+* buffer size
-+* Outputs: PASS=0 (notice 0=ok here)
-+* Description: Write the buffer in the spare area
-+*
-+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-+u16 emu_Write_Page_Spare(u8 *write_data, u32 Block,
-+ u16 Page, u16 PageCount)
-+{
-+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ if (Block >= DeviceInfo.wTotalBlocks) {
-+ printk(KERN_ERR "Read Page Spare Error: "
-+ "Block Address too big\n");
-+ return FAIL;
-+ }
-+
-+ if (Page + PageCount > DeviceInfo.wPagesPerBlock) {
-+ printk(KERN_ERR "Read Page Spare Error: "
-+ "Page number too big\n");
-+ return FAIL;
-+ }
-+
-+ nand_dbg_print(NAND_DBG_DEBUG, "Write Page Spare- "
-+ "block %u page %u\n",
-+ (unsigned int)Block, (unsigned int)Page);
-+
-+ if (NULL == flash_memory[Block * GLOB_LLD_PAGES + Page]) {
-+ printk(KERN_ERR "Run out of memory!\n");
-+ return FAIL;
-+ }
-+
-+ memcpy((u8 *) (flash_memory[Block * GLOB_LLD_PAGES + Page] +
-+ DeviceInfo.wPageDataSize), write_data,
-+ (DeviceInfo.wPageSize - DeviceInfo.wPageDataSize));
-+
-+ return PASS;
-+}
-+
-+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-+* Function: emu_Read_Page_Spare
-+* Inputs: Write Buffer
-+* Address
-+* Buffer size
-+* Outputs: PASS=0 (notice 0=ok here)
-+* Description: Read data from the spare area
-+*
-+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-+u16 emu_Read_Page_Spare(u8 *write_data, u32 Block,
-+ u16 Page, u16 PageCount)
-+{
-+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ if (Block >= DeviceInfo.wTotalBlocks) {
-+ printk(KERN_ERR "Read Page Spare "
-+ "Error: Block Address too big\n");
-+ return FAIL;
-+ }
-+
-+ if (Page + PageCount > DeviceInfo.wPagesPerBlock) {
-+ printk(KERN_ERR "Read Page Spare "
-+ "Error: Page number too big\n");
-+ return FAIL;
-+ }
-+
-+ nand_dbg_print(NAND_DBG_DEBUG, "Read Page Spare- "
-+ "block %u page %u\n",
-+ (unsigned int)Block, (unsigned int)Page);
-+
-+ if (NULL == flash_memory[Block * GLOB_LLD_PAGES + Page]) {
-+ memset(write_data, 0xFF,
-+ (DeviceInfo.wPageSize - DeviceInfo.wPageDataSize));
-+ } else {
-+ memcpy(write_data,
-+ (u8 *) (flash_memory[Block * GLOB_LLD_PAGES + Page]
-+ + DeviceInfo.wPageDataSize),
-+ (DeviceInfo.wPageSize - DeviceInfo.wPageDataSize));
-+ }
-+
-+ return PASS;
-+}
-+
-+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-+* Function: emu_Enable_Disable_Interrupts
-+* Inputs: enable or disable
-+* Outputs: none
-+* Description: NOP
-+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-+void emu_Enable_Disable_Interrupts(u16 INT_ENABLE)
-+{
-+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+}
-+
-+u16 emu_Get_Bad_Block(u32 block)
-+{
-+ return 0;
-+}
-+
-+#if CMD_DMA
-+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-+* Support for CDMA functions
-+************************************
-+* emu_CDMA_Flash_Init
-+* CDMA_process_data command (use LLD_CDMA)
-+* CDMA_MemCopy_CMD (use LLD_CDMA)
-+* emu_CDMA_execute all commands
-+* emu_CDMA_Event_Status
-+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-+u16 emu_CDMA_Flash_Init(void)
-+{
-+ u16 i;
-+
-+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ for (i = 0; i < MAX_DESCS + MAX_CHANS; i++) {
-+ PendingCMD[i].CMD = 0;
-+ PendingCMD[i].Tag = 0;
-+ PendingCMD[i].DataAddr = 0;
-+ PendingCMD[i].Block = 0;
-+ PendingCMD[i].Page = 0;
-+ PendingCMD[i].PageCount = 0;
-+ PendingCMD[i].DataDestAddr = 0;
-+ PendingCMD[i].DataSrcAddr = 0;
-+ PendingCMD[i].MemCopyByteCnt = 0;
-+ PendingCMD[i].ChanSync[0] = 0;
-+ PendingCMD[i].ChanSync[1] = 0;
-+ PendingCMD[i].ChanSync[2] = 0;
-+ PendingCMD[i].ChanSync[3] = 0;
-+ PendingCMD[i].ChanSync[4] = 0;
-+ PendingCMD[i].Status = 3;
-+ }
-+
-+ return PASS;
-+}
-+
-+static void emu_isr(int irq, void *dev_id)
-+{
-+ /* TODO: ... */
-+}
-+
-+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-+* Function: CDMA_Execute_CMDs
-+* Inputs: tag_count: the number of pending cmds to do
-+* Outputs: PASS/FAIL
-+* Description: execute each command in the pending CMD array
-+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-+u16 emu_CDMA_Execute_CMDs(u16 tag_count)
-+{
-+ u16 i, j;
-+ u8 CMD; /* cmd parameter */
-+ u8 *data;
-+ u32 block;
-+ u16 page;
-+ u16 count;
-+ u16 status = PASS;
-+
-+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ nand_dbg_print(NAND_DBG_TRACE, "At start of Execute CMDs: "
-+ "Tag Count %u\n", tag_count);
-+
-+ for (i = 0; i < totalUsedBanks; i++) {
-+ PendingCMD[i].CMD = DUMMY_CMD;
-+ PendingCMD[i].Tag = 0xFF;
-+ PendingCMD[i].Block =
-+ (DeviceInfo.wTotalBlocks / totalUsedBanks) * i;
-+
-+ for (j = 0; j <= MAX_CHANS; j++)
-+ PendingCMD[i].ChanSync[j] = 0;
-+ }
-+
-+ CDMA_Execute_CMDs(tag_count);
-+
-+ print_pending_cmds(tag_count);
-+
-+#if DEBUG_SYNC
-+ }
-+ debug_sync_cnt++;
-+#endif
-+
-+ for (i = MAX_CHANS;
-+ i < tag_count + MAX_CHANS; i++) {
-+ CMD = PendingCMD[i].CMD;
-+ data = PendingCMD[i].DataAddr;
-+ block = PendingCMD[i].Block;
-+ page = PendingCMD[i].Page;
-+ count = PendingCMD[i].PageCount;
-+
-+ switch (CMD) {
-+ case ERASE_CMD:
-+ emu_Erase_Block(block);
-+ PendingCMD[i].Status = PASS;
-+ break;
-+ case WRITE_MAIN_CMD:
-+ emu_Write_Page_Main(data, block, page, count);
-+ PendingCMD[i].Status = PASS;
-+ break;
-+ case WRITE_MAIN_SPARE_CMD:
-+ emu_Write_Page_Main_Spare(data, block, page, count);
-+ PendingCMD[i].Status = PASS;
-+ break;
-+ case READ_MAIN_CMD:
-+ emu_Read_Page_Main(data, block, page, count);
-+ PendingCMD[i].Status = PASS;
-+ break;
-+ case MEMCOPY_CMD:
-+ memcpy(PendingCMD[i].DataDestAddr,
-+ PendingCMD[i].DataSrcAddr,
-+ PendingCMD[i].MemCopyByteCnt);
-+ case DUMMY_CMD:
-+ PendingCMD[i].Status = PASS;
-+ break;
-+ default:
-+ PendingCMD[i].Status = FAIL;
-+ break;
-+ }
-+ }
-+
-+ /*
-+ * Temperory adding code to reset PendingCMD array for basic testing.
-+ * It should be done at the end of event status function.
-+ */
-+ for (i = tag_count + MAX_CHANS; i < MAX_DESCS; i++) {
-+ PendingCMD[i].CMD = 0;
-+ PendingCMD[i].Tag = 0;
-+ PendingCMD[i].DataAddr = 0;
-+ PendingCMD[i].Block = 0;
-+ PendingCMD[i].Page = 0;
-+ PendingCMD[i].PageCount = 0;
-+ PendingCMD[i].DataDestAddr = 0;
-+ PendingCMD[i].DataSrcAddr = 0;
-+ PendingCMD[i].MemCopyByteCnt = 0;
-+ PendingCMD[i].ChanSync[0] = 0;
-+ PendingCMD[i].ChanSync[1] = 0;
-+ PendingCMD[i].ChanSync[2] = 0;
-+ PendingCMD[i].ChanSync[3] = 0;
-+ PendingCMD[i].ChanSync[4] = 0;
-+ PendingCMD[i].Status = CMD_NOT_DONE;
-+ }
-+
-+ nand_dbg_print(NAND_DBG_TRACE, "At end of Execute CMDs.\n");
-+
-+ emu_isr(0, 0); /* This is a null isr now. Need fill it in future */
-+
-+ return status;
-+}
-+
-+/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-+* Function: emu_Event_Status
-+* Inputs: none
-+* Outputs: Event_Status code
-+* Description: This function can also be used to force errors
-+*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-+u16 emu_CDMA_Event_Status(void)
-+{
-+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ return EVENT_PASS;
-+}
-+
-+#endif /* CMD_DMA */
-+#endif /* !ELDORA */
-+#endif /* FLASH_EMU */
-diff --git a/drivers/block/spectra/lld_emu.h b/drivers/block/spectra/lld_emu.h
-new file mode 100644
-index 0000000..63f84c3
---- /dev/null
-+++ b/drivers/block/spectra/lld_emu.h
-@@ -0,0 +1,51 @@
-+/*
-+ * NAND Flash Controller Device Driver
-+ * Copyright (c) 2009, Intel Corporation and its suppliers.
-+ *
-+ * This program is free software; you can redistribute it and/or modify it
-+ * under the terms and conditions of the GNU General Public License,
-+ * version 2, as published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope it will be useful, but WITHOUT
-+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-+ * more details.
-+ *
-+ * You should have received a copy of the GNU General Public License along with
-+ * this program; if not, write to the Free Software Foundation, Inc.,
-+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-+ *
-+ */
-+
-+#ifndef _LLD_EMU_
-+#define _LLD_EMU_
-+
-+#include "ffsport.h"
-+#include "ffsdefs.h"
-+
-+/* prototypes: emulator API functions */
-+extern u16 emu_Flash_Reset(void);
-+extern u16 emu_Flash_Init(void);
-+extern int emu_Flash_Release(void);
-+extern u16 emu_Read_Device_ID(void);
-+extern u16 emu_Erase_Block(u32 block_addr);
-+extern u16 emu_Write_Page_Main(u8 *write_data, u32 Block,
-+ u16 Page, u16 PageCount);
-+extern u16 emu_Read_Page_Main(u8 *read_data, u32 Block, u16 Page,
-+ u16 PageCount);
-+extern u16 emu_Event_Status(void);
-+extern void emu_Enable_Disable_Interrupts(u16 INT_ENABLE);
-+extern u16 emu_Write_Page_Main_Spare(u8 *write_data, u32 Block,
-+ u16 Page, u16 PageCount);
-+extern u16 emu_Write_Page_Spare(u8 *write_data, u32 Block,
-+ u16 Page, u16 PageCount);
-+extern u16 emu_Read_Page_Main_Spare(u8 *read_data, u32 Block,
-+ u16 Page, u16 PageCount);
-+extern u16 emu_Read_Page_Spare(u8 *read_data, u32 Block, u16 Page,
-+ u16 PageCount);
-+extern u16 emu_Get_Bad_Block(u32 block);
-+
-+u16 emu_CDMA_Flash_Init(void);
-+u16 emu_CDMA_Execute_CMDs(u16 tag_count);
-+u16 emu_CDMA_Event_Status(void);
-+#endif /*_LLD_EMU_*/
-diff --git a/drivers/block/spectra/lld_nand.c b/drivers/block/spectra/lld_nand.c
-new file mode 100644
-index 0000000..8c279b8
---- /dev/null
-+++ b/drivers/block/spectra/lld_nand.c
-@@ -0,0 +1,2601 @@
-+/*
-+ * NAND Flash Controller Device Driver
-+ * Copyright (c) 2009, Intel Corporation and its suppliers.
-+ *
-+ * This program is free software; you can redistribute it and/or modify it
-+ * under the terms and conditions of the GNU General Public License,
-+ * version 2, as published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope it will be useful, but WITHOUT
-+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-+ * more details.
-+ *
-+ * You should have received a copy of the GNU General Public License along with
-+ * this program; if not, write to the Free Software Foundation, Inc.,
-+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-+ *
-+ */
-+
-+#include "lld.h"
-+#include "lld_nand.h"
-+#include "lld_cdma.h"
-+
-+#include "spectraswconfig.h"
-+#include "flash.h"
-+#include "ffsdefs.h"
-+
-+#include <linux/interrupt.h>
-+#include <linux/delay.h>
-+#include <linux/wait.h>
-+#include <linux/mutex.h>
-+
-+#include "nand_regs.h"
-+
-+#define SPECTRA_NAND_NAME "nd"
-+
-+#define CEIL_DIV(X, Y) (((X)%(Y)) ? ((X)/(Y)+1) : ((X)/(Y)))
-+#define MAX_PAGES_PER_RW 128
-+
-+#define INT_IDLE_STATE 0
-+#define INT_READ_PAGE_MAIN 0x01
-+#define INT_WRITE_PAGE_MAIN 0x02
-+#define INT_PIPELINE_READ_AHEAD 0x04
-+#define INT_PIPELINE_WRITE_AHEAD 0x08
-+#define INT_MULTI_PLANE_READ 0x10
-+#define INT_MULTI_PLANE_WRITE 0x11
-+
-+static u32 enable_ecc;
-+
-+struct mrst_nand_info info;
-+
-+int totalUsedBanks;
-+u32 GLOB_valid_banks[LLD_MAX_FLASH_BANKS];
-+
-+void __iomem *FlashReg;
-+void __iomem *FlashMem;
-+
-+u16 conf_parameters[] = {
-+ 0x0000,
-+ 0x0000,
-+ 0x01F4,
-+ 0x01F4,
-+ 0x01F4,
-+ 0x01F4,
-+ 0x0000,
-+ 0x0000,
-+ 0x0001,
-+ 0x0000,
-+ 0x0000,
-+ 0x0000,
-+ 0x0000,
-+ 0x0040,
-+ 0x0001,
-+ 0x000A,
-+ 0x000A,
-+ 0x000A,
-+ 0x0000,
-+ 0x0000,
-+ 0x0005,
-+ 0x0012,
-+ 0x000C
-+};
-+
-+u16 NAND_Get_Bad_Block(u32 block)
-+{
-+ u32 status = PASS;
-+ u32 flag_bytes = 0;
-+ u32 skip_bytes = DeviceInfo.wSpareSkipBytes;
-+ u32 page, i;
-+ u8 *pReadSpareBuf = buf_get_bad_block;
-+
-+ if (enable_ecc)
-+ flag_bytes = DeviceInfo.wNumPageSpareFlag;
-+
-+ for (page = 0; page < 2; page++) {
-+ status = NAND_Read_Page_Spare(pReadSpareBuf, block, page, 1);
-+ if (status != PASS)
-+ return READ_ERROR;
-+ for (i = flag_bytes; i < (flag_bytes + skip_bytes); i++)
-+ if (pReadSpareBuf[i] != 0xff)
-+ return DEFECTIVE_BLOCK;
-+ }
-+
-+ for (page = 1; page < 3; page++) {
-+ status = NAND_Read_Page_Spare(pReadSpareBuf, block,
-+ DeviceInfo.wPagesPerBlock - page , 1);
-+ if (status != PASS)
-+ return READ_ERROR;
-+ for (i = flag_bytes; i < (flag_bytes + skip_bytes); i++)
-+ if (pReadSpareBuf[i] != 0xff)
-+ return DEFECTIVE_BLOCK;
-+ }
-+
-+ return GOOD_BLOCK;
-+}
-+
-+
-+u16 NAND_Flash_Reset(void)
-+{
-+ u32 i;
-+ u32 intr_status_rst_comp[4] = {INTR_STATUS0__RST_COMP,
-+ INTR_STATUS1__RST_COMP,
-+ INTR_STATUS2__RST_COMP,
-+ INTR_STATUS3__RST_COMP};
-+ u32 intr_status_time_out[4] = {INTR_STATUS0__TIME_OUT,
-+ INTR_STATUS1__TIME_OUT,
-+ INTR_STATUS2__TIME_OUT,
-+ INTR_STATUS3__TIME_OUT};
-+ u32 intr_status[4] = {INTR_STATUS0, INTR_STATUS1,
-+ INTR_STATUS2, INTR_STATUS3};
-+ u32 device_reset_banks[4] = {DEVICE_RESET__BANK0,
-+ DEVICE_RESET__BANK1,
-+ DEVICE_RESET__BANK2,
-+ DEVICE_RESET__BANK3};
-+
-+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ for (i = 0 ; i < LLD_MAX_FLASH_BANKS; i++)
-+ iowrite32(intr_status_rst_comp[i] | intr_status_time_out[i],
-+ FlashReg + intr_status[i]);
-+
-+ for (i = 0 ; i < LLD_MAX_FLASH_BANKS; i++) {
-+ iowrite32(device_reset_banks[i], FlashReg + DEVICE_RESET);
-+ while (!(ioread32(FlashReg + intr_status[i]) &
-+ (intr_status_rst_comp[i] | intr_status_time_out[i])))
-+ ;
-+ if (ioread32(FlashReg + intr_status[i]) &
-+ intr_status_time_out[i])
-+ nand_dbg_print(NAND_DBG_WARN,
-+ "NAND Reset operation timed out on bank %d\n", i);
-+ }
-+
-+ for (i = 0; i < LLD_MAX_FLASH_BANKS; i++)
-+ iowrite32(intr_status_rst_comp[i] | intr_status_time_out[i],
-+ FlashReg + intr_status[i]);
-+
-+ return PASS;
-+}
-+
-+static void NAND_ONFi_Timing_Mode(u16 mode)
-+{
-+ u16 Trea[6] = {40, 30, 25, 20, 20, 16};
-+ u16 Trp[6] = {50, 25, 17, 15, 12, 10};
-+ u16 Treh[6] = {30, 15, 15, 10, 10, 7};
-+ u16 Trc[6] = {100, 50, 35, 30, 25, 20};
-+ u16 Trhoh[6] = {0, 15, 15, 15, 15, 15};
-+ u16 Trloh[6] = {0, 0, 0, 0, 5, 5};
-+ u16 Tcea[6] = {100, 45, 30, 25, 25, 25};
-+ u16 Tadl[6] = {200, 100, 100, 100, 70, 70};
-+ u16 Trhw[6] = {200, 100, 100, 100, 100, 100};
-+ u16 Trhz[6] = {200, 100, 100, 100, 100, 100};
-+ u16 Twhr[6] = {120, 80, 80, 60, 60, 60};
-+ u16 Tcs[6] = {70, 35, 25, 25, 20, 15};
-+
-+ u16 TclsRising = 1;
-+ u16 data_invalid_rhoh, data_invalid_rloh, data_invalid;
-+ u16 dv_window = 0;
-+ u16 en_lo, en_hi;
-+ u16 acc_clks;
-+ u16 addr_2_data, re_2_we, re_2_re, we_2_re, cs_cnt;
-+
-+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ en_lo = CEIL_DIV(Trp[mode], CLK_X);
-+ en_hi = CEIL_DIV(Treh[mode], CLK_X);
-+
-+#if ONFI_BLOOM_TIME
-+ if ((en_hi * CLK_X) < (Treh[mode] + 2))
-+ en_hi++;
-+#endif
-+
-+ if ((en_lo + en_hi) * CLK_X < Trc[mode])
-+ en_lo += CEIL_DIV((Trc[mode] - (en_lo + en_hi) * CLK_X), CLK_X);
-+
-+ if ((en_lo + en_hi) < CLK_MULTI)
-+ en_lo += CLK_MULTI - en_lo - en_hi;
-+
-+ while (dv_window < 8) {
-+ data_invalid_rhoh = en_lo * CLK_X + Trhoh[mode];
-+
-+ data_invalid_rloh = (en_lo + en_hi) * CLK_X + Trloh[mode];
-+
-+ data_invalid =
-+ data_invalid_rhoh <
-+ data_invalid_rloh ? data_invalid_rhoh : data_invalid_rloh;
-+
-+ dv_window = data_invalid - Trea[mode];
-+
-+ if (dv_window < 8)
-+ en_lo++;
-+ }
-+
-+ acc_clks = CEIL_DIV(Trea[mode], CLK_X);
-+
-+ while (((acc_clks * CLK_X) - Trea[mode]) < 3)
-+ acc_clks++;
-+
-+ if ((data_invalid - acc_clks * CLK_X) < 2)
-+ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d: Warning!\n",
-+ __FILE__, __LINE__);
-+
-+ addr_2_data = CEIL_DIV(Tadl[mode], CLK_X);
-+ re_2_we = CEIL_DIV(Trhw[mode], CLK_X);
-+ re_2_re = CEIL_DIV(Trhz[mode], CLK_X);
-+ we_2_re = CEIL_DIV(Twhr[mode], CLK_X);
-+ cs_cnt = CEIL_DIV((Tcs[mode] - Trp[mode]), CLK_X);
-+ if (!TclsRising)
-+ cs_cnt = CEIL_DIV(Tcs[mode], CLK_X);
-+ if (cs_cnt == 0)
-+ cs_cnt = 1;
-+
-+ if (Tcea[mode]) {
-+ while (((cs_cnt * CLK_X) + Trea[mode]) < Tcea[mode])
-+ cs_cnt++;
-+ }
-+
-+#if MODE5_WORKAROUND
-+ if (mode == 5)
-+ acc_clks = 5;
-+#endif
-+
-+ /* Sighting 3462430: Temporary hack for MT29F128G08CJABAWP:B */
-+ if ((ioread32(FlashReg + MANUFACTURER_ID) == 0) &&
-+ (ioread32(FlashReg + DEVICE_ID) == 0x88))
-+ acc_clks = 6;
-+
-+ iowrite32(acc_clks, FlashReg + ACC_CLKS);
-+ iowrite32(re_2_we, FlashReg + RE_2_WE);
-+ iowrite32(re_2_re, FlashReg + RE_2_RE);
-+ iowrite32(we_2_re, FlashReg + WE_2_RE);
-+ iowrite32(addr_2_data, FlashReg + ADDR_2_DATA);
-+ iowrite32(en_lo, FlashReg + RDWR_EN_LO_CNT);
-+ iowrite32(en_hi, FlashReg + RDWR_EN_HI_CNT);
-+ iowrite32(cs_cnt, FlashReg + CS_SETUP_CNT);
-+}
-+
-+static void index_addr(u32 address, u32 data)
-+{
-+ iowrite32(address, FlashMem);
-+ iowrite32(data, FlashMem + 0x10);
-+}
-+
-+static void index_addr_read_data(u32 address, u32 *pdata)
-+{
-+ iowrite32(address, FlashMem);
-+ *pdata = ioread32(FlashMem + 0x10);
-+}
-+
-+static void set_ecc_config(void)
-+{
-+#if SUPPORT_8BITECC
-+ if ((ioread32(FlashReg + DEVICE_MAIN_AREA_SIZE) < 4096) ||
-+ (ioread32(FlashReg + DEVICE_SPARE_AREA_SIZE) <= 128))
-+ iowrite32(8, FlashReg + ECC_CORRECTION);
-+#endif
-+
-+ if ((ioread32(FlashReg + ECC_CORRECTION) & ECC_CORRECTION__VALUE)
-+ == 1) {
-+ DeviceInfo.wECCBytesPerSector = 4;
-+ DeviceInfo.wECCBytesPerSector *= DeviceInfo.wDevicesConnected;
-+ DeviceInfo.wNumPageSpareFlag =
-+ DeviceInfo.wPageSpareSize -
-+ DeviceInfo.wPageDataSize /
-+ (ECC_SECTOR_SIZE * DeviceInfo.wDevicesConnected) *
-+ DeviceInfo.wECCBytesPerSector
-+ - DeviceInfo.wSpareSkipBytes;
-+ } else {
-+ DeviceInfo.wECCBytesPerSector =
-+ (ioread32(FlashReg + ECC_CORRECTION) &
-+ ECC_CORRECTION__VALUE) * 13 / 8;
-+ if ((DeviceInfo.wECCBytesPerSector) % 2 == 0)
-+ DeviceInfo.wECCBytesPerSector += 2;
-+ else
-+ DeviceInfo.wECCBytesPerSector += 1;
-+
-+ DeviceInfo.wECCBytesPerSector *= DeviceInfo.wDevicesConnected;
-+ DeviceInfo.wNumPageSpareFlag = DeviceInfo.wPageSpareSize -
-+ DeviceInfo.wPageDataSize /
-+ (ECC_SECTOR_SIZE * DeviceInfo.wDevicesConnected) *
-+ DeviceInfo.wECCBytesPerSector
-+ - DeviceInfo.wSpareSkipBytes;
-+ }
-+}
-+
-+static u16 get_onfi_nand_para(void)
-+{
-+ int i;
-+ u16 blks_lun_l, blks_lun_h, n_of_luns;
-+ u32 blockperlun, id;
-+
-+ iowrite32(DEVICE_RESET__BANK0, FlashReg + DEVICE_RESET);
-+
-+ while (!((ioread32(FlashReg + INTR_STATUS0) &
-+ INTR_STATUS0__RST_COMP) |
-+ (ioread32(FlashReg + INTR_STATUS0) &
-+ INTR_STATUS0__TIME_OUT)))
-+ ;
-+
-+ if (ioread32(FlashReg + INTR_STATUS0) & INTR_STATUS0__RST_COMP) {
-+ iowrite32(DEVICE_RESET__BANK1, FlashReg + DEVICE_RESET);
-+ while (!((ioread32(FlashReg + INTR_STATUS1) &
-+ INTR_STATUS1__RST_COMP) |
-+ (ioread32(FlashReg + INTR_STATUS1) &
-+ INTR_STATUS1__TIME_OUT)))
-+ ;
-+
-+ if (ioread32(FlashReg + INTR_STATUS1) &
-+ INTR_STATUS1__RST_COMP) {
-+ iowrite32(DEVICE_RESET__BANK2,
-+ FlashReg + DEVICE_RESET);
-+ while (!((ioread32(FlashReg + INTR_STATUS2) &
-+ INTR_STATUS2__RST_COMP) |
-+ (ioread32(FlashReg + INTR_STATUS2) &
-+ INTR_STATUS2__TIME_OUT)))
-+ ;
-+
-+ if (ioread32(FlashReg + INTR_STATUS2) &
-+ INTR_STATUS2__RST_COMP) {
-+ iowrite32(DEVICE_RESET__BANK3,
-+ FlashReg + DEVICE_RESET);
-+ while (!((ioread32(FlashReg + INTR_STATUS3) &
-+ INTR_STATUS3__RST_COMP) |
-+ (ioread32(FlashReg + INTR_STATUS3) &
-+ INTR_STATUS3__TIME_OUT)))
-+ ;
-+ } else {
-+ printk(KERN_ERR "Getting a time out for bank 2!\n");
-+ }
-+ } else {
-+ printk(KERN_ERR "Getting a time out for bank 1!\n");
-+ }
-+ }
-+
-+ iowrite32(INTR_STATUS0__TIME_OUT, FlashReg + INTR_STATUS0);
-+ iowrite32(INTR_STATUS1__TIME_OUT, FlashReg + INTR_STATUS1);
-+ iowrite32(INTR_STATUS2__TIME_OUT, FlashReg + INTR_STATUS2);
-+ iowrite32(INTR_STATUS3__TIME_OUT, FlashReg + INTR_STATUS3);
-+
-+ DeviceInfo.wONFIDevFeatures =
-+ ioread32(FlashReg + ONFI_DEVICE_FEATURES);
-+ DeviceInfo.wONFIOptCommands =
-+ ioread32(FlashReg + ONFI_OPTIONAL_COMMANDS);
-+ DeviceInfo.wONFITimingMode =
-+ ioread32(FlashReg + ONFI_TIMING_MODE);
-+ DeviceInfo.wONFIPgmCacheTimingMode =
-+ ioread32(FlashReg + ONFI_PGM_CACHE_TIMING_MODE);
-+
-+ n_of_luns = ioread32(FlashReg + ONFI_DEVICE_NO_OF_LUNS) &
-+ ONFI_DEVICE_NO_OF_LUNS__NO_OF_LUNS;
-+ blks_lun_l = ioread32(FlashReg + ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_L);
-+ blks_lun_h = ioread32(FlashReg + ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_U);
-+
-+ blockperlun = (blks_lun_h << 16) | blks_lun_l;
-+
-+ DeviceInfo.wTotalBlocks = n_of_luns * blockperlun;
-+
-+ if (!(ioread32(FlashReg + ONFI_TIMING_MODE) &
-+ ONFI_TIMING_MODE__VALUE))
-+ return FAIL;
-+
-+ for (i = 5; i > 0; i--) {
-+ if (ioread32(FlashReg + ONFI_TIMING_MODE) & (0x01 << i))
-+ break;
-+ }
-+
-+ NAND_ONFi_Timing_Mode(i);
-+
-+ index_addr(MODE_11 | 0, 0x90);
-+ index_addr(MODE_11 | 1, 0);
-+
-+ for (i = 0; i < 3; i++)
-+ index_addr_read_data(MODE_11 | 2, &id);
-+
-+ nand_dbg_print(NAND_DBG_DEBUG, "3rd ID: 0x%x\n", id);
-+
-+ DeviceInfo.MLCDevice = id & 0x0C;
-+
-+ /* By now, all the ONFI devices we know support the page cache */
-+ /* rw feature. So here we enable the pipeline_rw_ahead feature */
-+ /* iowrite32(1, FlashReg + CACHE_WRITE_ENABLE); */
-+ /* iowrite32(1, FlashReg + CACHE_READ_ENABLE); */
-+
-+ return PASS;
-+}
-+
-+static void get_samsung_nand_para(void)
-+{
-+ u8 no_of_planes;
-+ u32 blk_size;
-+ u64 plane_size, capacity;
-+ u32 id_bytes[5];
-+ int i;
-+
-+ index_addr((u32)(MODE_11 | 0), 0x90);
-+ index_addr((u32)(MODE_11 | 1), 0);
-+ for (i = 0; i < 5; i++)
-+ index_addr_read_data((u32)(MODE_11 | 2), &id_bytes[i]);
-+
-+ nand_dbg_print(NAND_DBG_DEBUG,
-+ "ID bytes: 0x%x, 0x%x, 0x%x, 0x%x, 0x%x\n",
-+ id_bytes[0], id_bytes[1], id_bytes[2],
-+ id_bytes[3], id_bytes[4]);
-+
-+ if ((id_bytes[1] & 0xff) == 0xd3) { /* Samsung K9WAG08U1A */
-+ /* Set timing register values according to datasheet */
-+ iowrite32(5, FlashReg + ACC_CLKS);
-+ iowrite32(20, FlashReg + RE_2_WE);
-+ iowrite32(12, FlashReg + WE_2_RE);
-+ iowrite32(14, FlashReg + ADDR_2_DATA);
-+ iowrite32(3, FlashReg + RDWR_EN_LO_CNT);
-+ iowrite32(2, FlashReg + RDWR_EN_HI_CNT);
-+ iowrite32(2, FlashReg + CS_SETUP_CNT);
-+ }
-+
-+ no_of_planes = 1 << ((id_bytes[4] & 0x0c) >> 2);
-+ plane_size = (u64)64 << ((id_bytes[4] & 0x70) >> 4);
-+ blk_size = 64 << ((ioread32(FlashReg + DEVICE_PARAM_1) & 0x30) >> 4);
-+ capacity = (u64)128 * plane_size * no_of_planes;
-+
-+ DeviceInfo.wTotalBlocks = (u32)GLOB_u64_Div(capacity, blk_size);
-+}
-+
-+static void get_toshiba_nand_para(void)
-+{
-+ void __iomem *scratch_reg;
-+ u32 tmp;
-+
-+ /* Workaround to fix a controller bug which reports a wrong */
-+ /* spare area size for some kind of Toshiba NAND device */
-+ if ((ioread32(FlashReg + DEVICE_MAIN_AREA_SIZE) == 4096) &&
-+ (ioread32(FlashReg + DEVICE_SPARE_AREA_SIZE) == 64)) {
-+ iowrite32(216, FlashReg + DEVICE_SPARE_AREA_SIZE);
-+ tmp = ioread32(FlashReg + DEVICES_CONNECTED) *
-+ ioread32(FlashReg + DEVICE_SPARE_AREA_SIZE);
-+ iowrite32(tmp, FlashReg + LOGICAL_PAGE_SPARE_SIZE);
-+#if SUPPORT_15BITECC
-+ iowrite32(15, FlashReg + ECC_CORRECTION);
-+#elif SUPPORT_8BITECC
-+ iowrite32(8, FlashReg + ECC_CORRECTION);
-+#endif
-+ }
-+
-+ /* As Toshiba NAND can not provide it's block number, */
-+ /* so here we need user to provide the correct block */
-+ /* number in a scratch register before the Linux NAND */
-+ /* driver is loaded. If no valid value found in the scratch */
-+ /* register, then we use default block number value */
-+ scratch_reg = ioremap_nocache(SCRATCH_REG_ADDR, SCRATCH_REG_SIZE);
-+ if (!scratch_reg) {
-+ printk(KERN_ERR "Spectra: ioremap failed in %s, Line %d",
-+ __FILE__, __LINE__);
-+ DeviceInfo.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
-+ } else {
-+ nand_dbg_print(NAND_DBG_WARN,
-+ "Spectra: ioremap reg address: 0x%p\n", scratch_reg);
-+ DeviceInfo.wTotalBlocks = 1 << ioread8(scratch_reg);
-+ if (DeviceInfo.wTotalBlocks < 512)
-+ DeviceInfo.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
-+ iounmap(scratch_reg);
-+ }
-+}
-+
-+static void get_hynix_nand_para(void)
-+{
-+ void __iomem *scratch_reg;
-+ u32 main_size, spare_size;
-+
-+ switch (DeviceInfo.wDeviceID) {
-+ case 0xD5: /* Hynix H27UAG8T2A, H27UBG8U5A or H27UCG8VFA */
-+ case 0xD7: /* Hynix H27UDG8VEM, H27UCG8UDM or H27UCG8V5A */
-+ iowrite32(128, FlashReg + PAGES_PER_BLOCK);
-+ iowrite32(4096, FlashReg + DEVICE_MAIN_AREA_SIZE);
-+ iowrite32(224, FlashReg + DEVICE_SPARE_AREA_SIZE);
-+ main_size = 4096 * ioread32(FlashReg + DEVICES_CONNECTED);
-+ spare_size = 224 * ioread32(FlashReg + DEVICES_CONNECTED);
-+ iowrite32(main_size, FlashReg + LOGICAL_PAGE_DATA_SIZE);
-+ iowrite32(spare_size, FlashReg + LOGICAL_PAGE_SPARE_SIZE);
-+ iowrite32(0, FlashReg + DEVICE_WIDTH);
-+#if SUPPORT_15BITECC
-+ iowrite32(15, FlashReg + ECC_CORRECTION);
-+#elif SUPPORT_8BITECC
-+ iowrite32(8, FlashReg + ECC_CORRECTION);
-+#endif
-+ DeviceInfo.MLCDevice = 1;
-+ break;
-+ default:
-+ nand_dbg_print(NAND_DBG_WARN,
-+ "Spectra: Unknown Hynix NAND (Device ID: 0x%x)."
-+ "Will use default parameter values instead.\n",
-+ DeviceInfo.wDeviceID);
-+ }
-+
-+ scratch_reg = ioremap_nocache(SCRATCH_REG_ADDR, SCRATCH_REG_SIZE);
-+ if (!scratch_reg) {
-+ printk(KERN_ERR "Spectra: ioremap failed in %s, Line %d",
-+ __FILE__, __LINE__);
-+ DeviceInfo.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
-+ } else {
-+ nand_dbg_print(NAND_DBG_WARN,
-+ "Spectra: ioremap reg address: 0x%p\n", scratch_reg);
-+ DeviceInfo.wTotalBlocks = 1 << ioread8(scratch_reg);
-+ if (DeviceInfo.wTotalBlocks < 512)
-+ DeviceInfo.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
-+ iounmap(scratch_reg);
-+ }
-+}
-+
-+static void find_valid_banks(void)
-+{
-+ u32 id[LLD_MAX_FLASH_BANKS];
-+ int i;
-+
-+ totalUsedBanks = 0;
-+ for (i = 0; i < LLD_MAX_FLASH_BANKS; i++) {
-+ index_addr((u32)(MODE_11 | (i << 24) | 0), 0x90);
-+ index_addr((u32)(MODE_11 | (i << 24) | 1), 0);
-+ index_addr_read_data((u32)(MODE_11 | (i << 24) | 2), &id[i]);
-+
-+ nand_dbg_print(NAND_DBG_DEBUG,
-+ "Return 1st ID for bank[%d]: %x\n", i, id[i]);
-+
-+ if (i == 0) {
-+ if (id[i] & 0x0ff)
-+ GLOB_valid_banks[i] = 1;
-+ } else {
-+ if ((id[i] & 0x0ff) == (id[0] & 0x0ff))
-+ GLOB_valid_banks[i] = 1;
-+ }
-+
-+ totalUsedBanks += GLOB_valid_banks[i];
-+ }
-+
-+ nand_dbg_print(NAND_DBG_DEBUG,
-+ "totalUsedBanks: %d\n", totalUsedBanks);
-+}
-+
-+static void detect_partition_feature(void)
-+{
-+ if (ioread32(FlashReg + FEATURES) & FEATURES__PARTITION) {
-+ if ((ioread32(FlashReg + PERM_SRC_ID_1) &
-+ PERM_SRC_ID_1__SRCID) == SPECTRA_PARTITION_ID) {
-+ DeviceInfo.wSpectraStartBlock =
-+ ((ioread32(FlashReg + MIN_MAX_BANK_1) &
-+ MIN_MAX_BANK_1__MIN_VALUE) *
-+ DeviceInfo.wTotalBlocks)
-+ +
-+ (ioread32(FlashReg + MIN_BLK_ADDR_1) &
-+ MIN_BLK_ADDR_1__VALUE);
-+
-+ DeviceInfo.wSpectraEndBlock =
-+ (((ioread32(FlashReg + MIN_MAX_BANK_1) &
-+ MIN_MAX_BANK_1__MAX_VALUE) >> 2) *
-+ DeviceInfo.wTotalBlocks)
-+ +
-+ (ioread32(FlashReg + MAX_BLK_ADDR_1) &
-+ MAX_BLK_ADDR_1__VALUE);
-+
-+ DeviceInfo.wTotalBlocks *= totalUsedBanks;
-+
-+ if (DeviceInfo.wSpectraEndBlock >=
-+ DeviceInfo.wTotalBlocks) {
-+ DeviceInfo.wSpectraEndBlock =
-+ DeviceInfo.wTotalBlocks - 1;
-+ }
-+
-+ DeviceInfo.wDataBlockNum =
-+ DeviceInfo.wSpectraEndBlock -
-+ DeviceInfo.wSpectraStartBlock + 1;
-+ } else {
-+ DeviceInfo.wTotalBlocks *= totalUsedBanks;
-+ DeviceInfo.wSpectraStartBlock = SPECTRA_START_BLOCK;
-+ DeviceInfo.wSpectraEndBlock =
-+ DeviceInfo.wTotalBlocks - 1;
-+ DeviceInfo.wDataBlockNum =
-+ DeviceInfo.wSpectraEndBlock -
-+ DeviceInfo.wSpectraStartBlock + 1;
-+ }
-+ } else {
-+ DeviceInfo.wTotalBlocks *= totalUsedBanks;
-+ DeviceInfo.wSpectraStartBlock = SPECTRA_START_BLOCK;
-+ DeviceInfo.wSpectraEndBlock = DeviceInfo.wTotalBlocks - 1;
-+ DeviceInfo.wDataBlockNum =
-+ DeviceInfo.wSpectraEndBlock -
-+ DeviceInfo.wSpectraStartBlock + 1;
-+ }
-+}
-+
-+static void dump_device_info(void)
-+{
-+ nand_dbg_print(NAND_DBG_DEBUG, "DeviceInfo:\n");
-+ nand_dbg_print(NAND_DBG_DEBUG, "DeviceMaker: 0x%x\n",
-+ DeviceInfo.wDeviceMaker);
-+ nand_dbg_print(NAND_DBG_DEBUG, "DeviceID: 0x%x\n",
-+ DeviceInfo.wDeviceID);
-+ nand_dbg_print(NAND_DBG_DEBUG, "DeviceType: 0x%x\n",
-+ DeviceInfo.wDeviceType);
-+ nand_dbg_print(NAND_DBG_DEBUG, "SpectraStartBlock: %d\n",
-+ DeviceInfo.wSpectraStartBlock);
-+ nand_dbg_print(NAND_DBG_DEBUG, "SpectraEndBlock: %d\n",
-+ DeviceInfo.wSpectraEndBlock);
-+ nand_dbg_print(NAND_DBG_DEBUG, "TotalBlocks: %d\n",
-+ DeviceInfo.wTotalBlocks);
-+ nand_dbg_print(NAND_DBG_DEBUG, "PagesPerBlock: %d\n",
-+ DeviceInfo.wPagesPerBlock);
-+ nand_dbg_print(NAND_DBG_DEBUG, "PageSize: %d\n",
-+ DeviceInfo.wPageSize);
-+ nand_dbg_print(NAND_DBG_DEBUG, "PageDataSize: %d\n",
-+ DeviceInfo.wPageDataSize);
-+ nand_dbg_print(NAND_DBG_DEBUG, "PageSpareSize: %d\n",
-+ DeviceInfo.wPageSpareSize);
-+ nand_dbg_print(NAND_DBG_DEBUG, "NumPageSpareFlag: %d\n",
-+ DeviceInfo.wNumPageSpareFlag);
-+ nand_dbg_print(NAND_DBG_DEBUG, "ECCBytesPerSector: %d\n",
-+ DeviceInfo.wECCBytesPerSector);
-+ nand_dbg_print(NAND_DBG_DEBUG, "BlockSize: %d\n",
-+ DeviceInfo.wBlockSize);
-+ nand_dbg_print(NAND_DBG_DEBUG, "BlockDataSize: %d\n",
-+ DeviceInfo.wBlockDataSize);
-+ nand_dbg_print(NAND_DBG_DEBUG, "DataBlockNum: %d\n",
-+ DeviceInfo.wDataBlockNum);
-+ nand_dbg_print(NAND_DBG_DEBUG, "PlaneNum: %d\n",
-+ DeviceInfo.bPlaneNum);
-+ nand_dbg_print(NAND_DBG_DEBUG, "DeviceMainAreaSize: %d\n",
-+ DeviceInfo.wDeviceMainAreaSize);
-+ nand_dbg_print(NAND_DBG_DEBUG, "DeviceSpareAreaSize: %d\n",
-+ DeviceInfo.wDeviceSpareAreaSize);
-+ nand_dbg_print(NAND_DBG_DEBUG, "DevicesConnected: %d\n",
-+ DeviceInfo.wDevicesConnected);
-+ nand_dbg_print(NAND_DBG_DEBUG, "DeviceWidth: %d\n",
-+ DeviceInfo.wDeviceWidth);
-+ nand_dbg_print(NAND_DBG_DEBUG, "HWRevision: 0x%x\n",
-+ DeviceInfo.wHWRevision);
-+ nand_dbg_print(NAND_DBG_DEBUG, "HWFeatures: 0x%x\n",
-+ DeviceInfo.wHWFeatures);
-+ nand_dbg_print(NAND_DBG_DEBUG, "ONFIDevFeatures: 0x%x\n",
-+ DeviceInfo.wONFIDevFeatures);
-+ nand_dbg_print(NAND_DBG_DEBUG, "ONFIOptCommands: 0x%x\n",
-+ DeviceInfo.wONFIOptCommands);
-+ nand_dbg_print(NAND_DBG_DEBUG, "ONFITimingMode: 0x%x\n",
-+ DeviceInfo.wONFITimingMode);
-+ nand_dbg_print(NAND_DBG_DEBUG, "ONFIPgmCacheTimingMode: 0x%x\n",
-+ DeviceInfo.wONFIPgmCacheTimingMode);
-+ nand_dbg_print(NAND_DBG_DEBUG, "MLCDevice: %s\n",
-+ DeviceInfo.MLCDevice ? "Yes" : "No");
-+ nand_dbg_print(NAND_DBG_DEBUG, "SpareSkipBytes: %d\n",
-+ DeviceInfo.wSpareSkipBytes);
-+ nand_dbg_print(NAND_DBG_DEBUG, "BitsInPageNumber: %d\n",
-+ DeviceInfo.nBitsInPageNumber);
-+ nand_dbg_print(NAND_DBG_DEBUG, "BitsInPageDataSize: %d\n",
-+ DeviceInfo.nBitsInPageDataSize);
-+ nand_dbg_print(NAND_DBG_DEBUG, "BitsInBlockDataSize: %d\n",
-+ DeviceInfo.nBitsInBlockDataSize);
-+}
-+
-+u16 NAND_Read_Device_ID(void)
-+{
-+ u16 status = PASS;
-+ u8 no_of_planes;
-+
-+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ iowrite32(0x02, FlashReg + SPARE_AREA_SKIP_BYTES);
-+ iowrite32(0xffff, FlashReg + SPARE_AREA_MARKER);
-+ DeviceInfo.wDeviceMaker = ioread32(FlashReg + MANUFACTURER_ID);
-+ DeviceInfo.wDeviceID = ioread32(FlashReg + DEVICE_ID);
-+ DeviceInfo.MLCDevice = ioread32(FlashReg + DEVICE_PARAM_0) & 0x0c;
-+
-+ if (ioread32(FlashReg + ONFI_DEVICE_NO_OF_LUNS) &
-+ ONFI_DEVICE_NO_OF_LUNS__ONFI_DEVICE) { /* ONFI 1.0 NAND */
-+ if (FAIL == get_onfi_nand_para())
-+ return FAIL;
-+ } else if (DeviceInfo.wDeviceMaker == 0xEC) { /* Samsung NAND */
-+ get_samsung_nand_para();
-+ } else if (DeviceInfo.wDeviceMaker == 0x98) { /* Toshiba NAND */
-+ get_toshiba_nand_para();
-+ } else if (DeviceInfo.wDeviceMaker == 0xAD) { /* Hynix NAND */
-+ get_hynix_nand_para();
-+ } else {
-+ DeviceInfo.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
-+ }
-+
-+ nand_dbg_print(NAND_DBG_DEBUG, "Dump timing register values:"
-+ "acc_clks: %d, re_2_we: %d, we_2_re: %d,"
-+ "addr_2_data: %d, rdwr_en_lo_cnt: %d, "
-+ "rdwr_en_hi_cnt: %d, cs_setup_cnt: %d\n",
-+ ioread32(FlashReg + ACC_CLKS),
-+ ioread32(FlashReg + RE_2_WE),
-+ ioread32(FlashReg + WE_2_RE),
-+ ioread32(FlashReg + ADDR_2_DATA),
-+ ioread32(FlashReg + RDWR_EN_LO_CNT),
-+ ioread32(FlashReg + RDWR_EN_HI_CNT),
-+ ioread32(FlashReg + CS_SETUP_CNT));
-+
-+ DeviceInfo.wHWRevision = ioread32(FlashReg + REVISION);
-+ DeviceInfo.wHWFeatures = ioread32(FlashReg + FEATURES);
-+
-+ DeviceInfo.wDeviceMainAreaSize =
-+ ioread32(FlashReg + DEVICE_MAIN_AREA_SIZE);
-+ DeviceInfo.wDeviceSpareAreaSize =
-+ ioread32(FlashReg + DEVICE_SPARE_AREA_SIZE);
-+
-+ DeviceInfo.wPageDataSize =
-+ ioread32(FlashReg + LOGICAL_PAGE_DATA_SIZE);
-+
-+ /* Note: When using the Micon 4K NAND device, the controller will report
-+ * Page Spare Size as 216 bytes. But Micron's Spec say it's 218 bytes.
-+ * And if force set it to 218 bytes, the controller can not work
-+ * correctly. So just let it be. But keep in mind that this bug may
-+ * cause
-+ * other problems in future. - Yunpeng 2008-10-10
-+ */
-+ DeviceInfo.wPageSpareSize =
-+ ioread32(FlashReg + LOGICAL_PAGE_SPARE_SIZE);
-+
-+ DeviceInfo.wPagesPerBlock = ioread32(FlashReg + PAGES_PER_BLOCK);
-+
-+ DeviceInfo.wPageSize =
-+ DeviceInfo.wPageDataSize + DeviceInfo.wPageSpareSize;
-+ DeviceInfo.wBlockSize =
-+ DeviceInfo.wPageSize * DeviceInfo.wPagesPerBlock;
-+ DeviceInfo.wBlockDataSize =
-+ DeviceInfo.wPagesPerBlock * DeviceInfo.wPageDataSize;
-+
-+ DeviceInfo.wDeviceWidth = ioread32(FlashReg + DEVICE_WIDTH);
-+ DeviceInfo.wDeviceType =
-+ ((ioread32(FlashReg + DEVICE_WIDTH) > 0) ? 16 : 8);
-+
-+ DeviceInfo.wDevicesConnected = ioread32(FlashReg + DEVICES_CONNECTED);
-+
-+ DeviceInfo.wSpareSkipBytes =
-+ ioread32(FlashReg + SPARE_AREA_SKIP_BYTES) *
-+ DeviceInfo.wDevicesConnected;
-+
-+ DeviceInfo.nBitsInPageNumber =
-+ (u8)GLOB_Calc_Used_Bits(DeviceInfo.wPagesPerBlock);
-+ DeviceInfo.nBitsInPageDataSize =
-+ (u8)GLOB_Calc_Used_Bits(DeviceInfo.wPageDataSize);
-+ DeviceInfo.nBitsInBlockDataSize =
-+ (u8)GLOB_Calc_Used_Bits(DeviceInfo.wBlockDataSize);
-+
-+ set_ecc_config();
-+
-+ no_of_planes = ioread32(FlashReg + NUMBER_OF_PLANES) &
-+ NUMBER_OF_PLANES__VALUE;
-+
-+ switch (no_of_planes) {
-+ case 0:
-+ case 1:
-+ case 3:
-+ case 7:
-+ DeviceInfo.bPlaneNum = no_of_planes + 1;
-+ break;
-+ default:
-+ status = FAIL;
-+ break;
-+ }
-+
-+ find_valid_banks();
-+
-+ detect_partition_feature();
-+
-+ dump_device_info();
-+
-+ return status;
-+}
-+
-+u16 NAND_UnlockArrayAll(void)
-+{
-+ u64 start_addr, end_addr;
-+
-+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ start_addr = 0;
-+ end_addr = ((u64)DeviceInfo.wBlockSize *
-+ (DeviceInfo.wTotalBlocks - 1)) >>
-+ DeviceInfo.nBitsInPageDataSize;
-+
-+ index_addr((u32)(MODE_10 | (u32)start_addr), 0x10);
-+ index_addr((u32)(MODE_10 | (u32)end_addr), 0x11);
-+
-+ return PASS;
-+}
-+
-+void NAND_LLD_Enable_Disable_Interrupts(u16 INT_ENABLE)
-+{
-+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ if (INT_ENABLE)
-+ iowrite32(1, FlashReg + GLOBAL_INT_ENABLE);
-+ else
-+ iowrite32(0, FlashReg + GLOBAL_INT_ENABLE);
-+}
-+
-+u16 NAND_Erase_Block(u32 block)
-+{
-+ u16 status = PASS;
-+ u64 flash_add;
-+ u16 flash_bank;
-+ u32 intr_status = 0;
-+ u32 intr_status_addresses[4] = {INTR_STATUS0,
-+ INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
-+
-+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
-+ * DeviceInfo.wBlockDataSize;
-+
-+ flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
-+
-+ if (block >= DeviceInfo.wTotalBlocks)
-+ status = FAIL;
-+
-+ if (status == PASS) {
-+ intr_status = intr_status_addresses[flash_bank];
-+
-+ iowrite32(INTR_STATUS0__ERASE_COMP | INTR_STATUS0__ERASE_FAIL,
-+ FlashReg + intr_status);
-+
-+ index_addr((u32)(MODE_10 | (flash_bank << 24) |
-+ (flash_add >> DeviceInfo.nBitsInPageDataSize)), 1);
-+
-+ while (!(ioread32(FlashReg + intr_status) &
-+ (INTR_STATUS0__ERASE_COMP | INTR_STATUS0__ERASE_FAIL)))
-+ ;
-+
-+ if (ioread32(FlashReg + intr_status) &
-+ INTR_STATUS0__ERASE_FAIL)
-+ status = FAIL;
-+
-+ iowrite32(INTR_STATUS0__ERASE_COMP | INTR_STATUS0__ERASE_FAIL,
-+ FlashReg + intr_status);
-+ }
-+
-+ return status;
-+}
-+
-+static u32 Boundary_Check_Block_Page(u32 block, u16 page,
-+ u16 page_count)
-+{
-+ u32 status = PASS;
-+
-+ if (block >= DeviceInfo.wTotalBlocks)
-+ status = FAIL;
-+
-+ if (page + page_count > DeviceInfo.wPagesPerBlock)
-+ status = FAIL;
-+
-+ return status;
-+}
-+
-+u16 NAND_Read_Page_Spare(u8 *read_data, u32 block, u16 page,
-+ u16 page_count)
-+{
-+ u32 status = PASS;
-+ u32 i;
-+ u64 flash_add;
-+ u32 PageSpareSize = DeviceInfo.wPageSpareSize;
-+ u32 spareFlagBytes = DeviceInfo.wNumPageSpareFlag;
-+ u32 flash_bank;
-+ u32 intr_status = 0;
-+ u32 intr_status_addresses[4] = {INTR_STATUS0,
-+ INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
-+ u8 *page_spare = buf_read_page_spare;
-+
-+ if (block >= DeviceInfo.wTotalBlocks) {
-+ printk(KERN_ERR "block too big: %d\n", (int)block);
-+ status = FAIL;
-+ }
-+
-+ if (page >= DeviceInfo.wPagesPerBlock) {
-+ printk(KERN_ERR "page too big: %d\n", page);
-+ status = FAIL;
-+ }
-+
-+ if (page_count > 1) {
-+ printk(KERN_ERR "page count too big: %d\n", page_count);
-+ status = FAIL;
-+ }
-+
-+ flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
-+ * DeviceInfo.wBlockDataSize +
-+ (u64)page * DeviceInfo.wPageDataSize;
-+
-+ flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
-+
-+ if (status == PASS) {
-+ intr_status = intr_status_addresses[flash_bank];
-+ iowrite32(ioread32(FlashReg + intr_status),
-+ FlashReg + intr_status);
-+
-+ index_addr((u32)(MODE_10 | (flash_bank << 24) |
-+ (flash_add >> DeviceInfo.nBitsInPageDataSize)),
-+ 0x41);
-+ index_addr((u32)(MODE_10 | (flash_bank << 24) |
-+ (flash_add >> DeviceInfo.nBitsInPageDataSize)),
-+ 0x2000 | page_count);
-+ while (!(ioread32(FlashReg + intr_status) &
-+ INTR_STATUS0__LOAD_COMP))
-+ ;
-+
-+ iowrite32((u32)(MODE_01 | (flash_bank << 24) |
-+ (flash_add >> DeviceInfo.nBitsInPageDataSize)),
-+ FlashMem);
-+
-+ for (i = 0; i < (PageSpareSize / 4); i++)
-+ *((u32 *)page_spare + i) =
-+ ioread32(FlashMem + 0x10);
-+
-+ if (enable_ecc) {
-+ for (i = 0; i < spareFlagBytes; i++)
-+ read_data[i] =
-+ page_spare[PageSpareSize -
-+ spareFlagBytes + i];
-+ for (i = 0; i < (PageSpareSize - spareFlagBytes); i++)
-+ read_data[spareFlagBytes + i] =
-+ page_spare[i];
-+ } else {
-+ for (i = 0; i < PageSpareSize; i++)
-+ read_data[i] = page_spare[i];
-+ }
-+
-+ index_addr((u32)(MODE_10 | (flash_bank << 24) |
-+ (flash_add >> DeviceInfo.nBitsInPageDataSize)), 0x42);
-+ }
-+
-+ return status;
-+}
-+
-+/* No use function. Should be removed later */
-+u16 NAND_Write_Page_Spare(u8 *write_data, u32 block, u16 page,
-+ u16 page_count)
-+{
-+ printk(KERN_ERR
-+ "Error! This function (NAND_Write_Page_Spare) should never"
-+ " be called!\n");
-+ return ERR;
-+}
-+
-+/* op value: 0 - DDMA read; 1 - DDMA write */
-+static void ddma_trans(u8 *data, u64 flash_add,
-+ u32 flash_bank, int op, u32 numPages)
-+{
-+ u32 data_addr;
-+
-+ /* Map virtual address to bus address for DDMA */
-+ data_addr = virt_to_bus(data);
-+
-+ index_addr((u32)(MODE_10 | (flash_bank << 24) |
-+ (flash_add >> DeviceInfo.nBitsInPageDataSize)),
-+ (u16)(2 << 12) | (op << 8) | numPages);
-+
-+ index_addr((u32)(MODE_10 | (flash_bank << 24) |
-+ ((u16)(0x0FFFF & (data_addr >> 16)) << 8)),
-+ (u16)(2 << 12) | (2 << 8) | 0);
-+
-+ index_addr((u32)(MODE_10 | (flash_bank << 24) |
-+ ((u16)(0x0FFFF & data_addr) << 8)),
-+ (u16)(2 << 12) | (3 << 8) | 0);
-+
-+ index_addr((u32)(MODE_10 | (flash_bank << 24) |
-+ (1 << 16) | (0x40 << 8)),
-+ (u16)(2 << 12) | (4 << 8) | 0);
-+}
-+
-+/* If data in buf are all 0xff, then return 1; otherwise return 0 */
-+static int check_all_1(u8 *buf)
-+{
-+ int i, j, cnt;
-+
-+ for (i = 0; i < DeviceInfo.wPageDataSize; i++) {
-+ if (buf[i] != 0xff) {
-+ cnt = 0;
-+ nand_dbg_print(NAND_DBG_WARN,
-+ "the first non-0xff data byte is: %d\n", i);
-+ for (j = i; j < DeviceInfo.wPageDataSize; j++) {
-+ nand_dbg_print(NAND_DBG_WARN, "0x%x ", buf[j]);
-+ cnt++;
-+ if (cnt > 8)
-+ break;
-+ }
-+ nand_dbg_print(NAND_DBG_WARN, "\n");
-+ return 0;
-+ }
-+ }
-+
-+ return 1;
-+}
-+
-+static int do_ecc_new(unsigned long bank, u8 *buf,
-+ u32 block, u16 page)
-+{
-+ int status = PASS;
-+ u16 err_page = 0;
-+ u16 err_byte;
-+ u8 err_sect;
-+ u8 err_dev;
-+ u16 err_fix_info;
-+ u16 err_addr;
-+ u32 ecc_sect_size;
-+ u8 *err_pos;
-+ u32 err_page_addr[4] = {ERR_PAGE_ADDR0,
-+ ERR_PAGE_ADDR1, ERR_PAGE_ADDR2, ERR_PAGE_ADDR3};
-+
-+ ecc_sect_size = ECC_SECTOR_SIZE * (DeviceInfo.wDevicesConnected);
-+
-+ do {
-+ err_page = ioread32(FlashReg + err_page_addr[bank]);
-+ err_addr = ioread32(FlashReg + ECC_ERROR_ADDRESS);
-+ err_byte = err_addr & ECC_ERROR_ADDRESS__OFFSET;
-+ err_sect = ((err_addr & ECC_ERROR_ADDRESS__SECTOR_NR) >> 12);
-+ err_fix_info = ioread32(FlashReg + ERR_CORRECTION_INFO);
-+ err_dev = ((err_fix_info & ERR_CORRECTION_INFO__DEVICE_NR)
-+ >> 8);
-+ if (err_fix_info & ERR_CORRECTION_INFO__ERROR_TYPE) {
-+ nand_dbg_print(NAND_DBG_WARN,
-+ "%s, Line %d Uncorrectable ECC error "
-+ "when read block %d page %d."
-+ "PTN_INTR register: 0x%x "
-+ "err_page: %d, err_sect: %d, err_byte: %d, "
-+ "err_dev: %d, ecc_sect_size: %d, "
-+ "err_fix_info: 0x%x\n",
-+ __FILE__, __LINE__, block, page,
-+ ioread32(FlashReg + PTN_INTR),
-+ err_page, err_sect, err_byte, err_dev,
-+ ecc_sect_size, (u32)err_fix_info);
-+
-+ if (check_all_1(buf))
-+ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d"
-+ "All 0xff!\n",
-+ __FILE__, __LINE__);
-+ else
-+ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d"
-+ "Not all 0xff!\n",
-+ __FILE__, __LINE__);
-+ status = FAIL;
-+ } else {
-+ nand_dbg_print(NAND_DBG_WARN,
-+ "%s, Line %d Found ECC error "
-+ "when read block %d page %d."
-+ "err_page: %d, err_sect: %d, err_byte: %d, "
-+ "err_dev: %d, ecc_sect_size: %d, "
-+ "err_fix_info: 0x%x\n",
-+ __FILE__, __LINE__, block, page,
-+ err_page, err_sect, err_byte, err_dev,
-+ ecc_sect_size, (u32)err_fix_info);
-+ if (err_byte < ECC_SECTOR_SIZE) {
-+ err_pos = buf +
-+ (err_page - page) *
-+ DeviceInfo.wPageDataSize +
-+ err_sect * ecc_sect_size +
-+ err_byte *
-+ DeviceInfo.wDevicesConnected +
-+ err_dev;
-+
-+ *err_pos ^= err_fix_info &
-+ ERR_CORRECTION_INFO__BYTEMASK;
-+ }
-+ }
-+ } while (!(err_fix_info & ERR_CORRECTION_INFO__LAST_ERR_INFO));
-+
-+ return status;
-+}
-+
-+u16 NAND_Read_Page_Main_Polling(u8 *read_data,
-+ u32 block, u16 page, u16 page_count)
-+{
-+ u32 status = PASS;
-+ u64 flash_add;
-+ u32 intr_status = 0;
-+ u32 flash_bank;
-+ u32 intr_status_addresses[4] = {INTR_STATUS0,
-+ INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
-+ u8 *read_data_l;
-+
-+ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ status = Boundary_Check_Block_Page(block, page, page_count);
-+ if (status != PASS)
-+ return status;
-+
-+ flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
-+ * DeviceInfo.wBlockDataSize +
-+ (u64)page * DeviceInfo.wPageDataSize;
-+ flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
-+
-+ iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
-+
-+ intr_status = intr_status_addresses[flash_bank];
-+ iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
-+
-+ if (page_count > 1) {
-+ read_data_l = read_data;
-+ while (page_count > MAX_PAGES_PER_RW) {
-+ if (ioread32(FlashReg + MULTIPLANE_OPERATION))
-+ status = NAND_Multiplane_Read(read_data_l,
-+ block, page, MAX_PAGES_PER_RW);
-+ else
-+ status = NAND_Pipeline_Read_Ahead_Polling(
-+ read_data_l, block, page,
-+ MAX_PAGES_PER_RW);
-+
-+ if (status == FAIL)
-+ return status;
-+
-+ read_data_l += DeviceInfo.wPageDataSize *
-+ MAX_PAGES_PER_RW;
-+ page_count -= MAX_PAGES_PER_RW;
-+ page += MAX_PAGES_PER_RW;
-+ }
-+ if (ioread32(FlashReg + MULTIPLANE_OPERATION))
-+ status = NAND_Multiplane_Read(read_data_l,
-+ block, page, page_count);
-+ else
-+ status = NAND_Pipeline_Read_Ahead_Polling(
-+ read_data_l, block, page, page_count);
-+
-+ return status;
-+ }
-+
-+ iowrite32(1, FlashReg + DMA_ENABLE);
-+ while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
-+ ;
-+
-+ iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
-+ iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
-+
-+ ddma_trans(read_data, flash_add, flash_bank, 0, 1);
-+
-+ if (enable_ecc) {
-+ while (!(ioread32(FlashReg + intr_status) &
-+ (INTR_STATUS0__ECC_TRANSACTION_DONE |
-+ INTR_STATUS0__ECC_ERR)))
-+ ;
-+
-+ if (ioread32(FlashReg + intr_status) &
-+ INTR_STATUS0__ECC_ERR) {
-+ iowrite32(INTR_STATUS0__ECC_ERR,
-+ FlashReg + intr_status);
-+ status = do_ecc_new(flash_bank, read_data,
-+ block, page);
-+ }
-+
-+ if (ioread32(FlashReg + intr_status) &
-+ INTR_STATUS0__ECC_TRANSACTION_DONE &
-+ INTR_STATUS0__ECC_ERR)
-+ iowrite32(INTR_STATUS0__ECC_TRANSACTION_DONE |
-+ INTR_STATUS0__ECC_ERR,
-+ FlashReg + intr_status);
-+ else if (ioread32(FlashReg + intr_status) &
-+ INTR_STATUS0__ECC_TRANSACTION_DONE)
-+ iowrite32(INTR_STATUS0__ECC_TRANSACTION_DONE,
-+ FlashReg + intr_status);
-+ else if (ioread32(FlashReg + intr_status) &
-+ INTR_STATUS0__ECC_ERR)
-+ iowrite32(INTR_STATUS0__ECC_ERR,
-+ FlashReg + intr_status);
-+ } else {
-+ while (!(ioread32(FlashReg + intr_status) &
-+ INTR_STATUS0__DMA_CMD_COMP))
-+ ;
-+ iowrite32(INTR_STATUS0__DMA_CMD_COMP, FlashReg + intr_status);
-+ }
-+
-+ iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
-+
-+ iowrite32(0, FlashReg + DMA_ENABLE);
-+ while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
-+ ;
-+
-+ return status;
-+}
-+
-+u16 NAND_Pipeline_Read_Ahead_Polling(u8 *read_data,
-+ u32 block, u16 page, u16 page_count)
-+{
-+ u32 status = PASS;
-+ u32 NumPages = page_count;
-+ u64 flash_add;
-+ u32 flash_bank;
-+ u32 intr_status = 0;
-+ u32 intr_status_addresses[4] = {INTR_STATUS0,
-+ INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
-+ u32 ecc_done_OR_dma_comp;
-+
-+ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ status = Boundary_Check_Block_Page(block, page, page_count);
-+
-+ if (page_count < 2)
-+ status = FAIL;
-+
-+ flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
-+ *DeviceInfo.wBlockDataSize +
-+ (u64)page * DeviceInfo.wPageDataSize;
-+
-+ flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
-+
-+ if (status == PASS) {
-+ intr_status = intr_status_addresses[flash_bank];
-+ iowrite32(ioread32(FlashReg + intr_status),
-+ FlashReg + intr_status);
-+
-+ iowrite32(1, FlashReg + DMA_ENABLE);
-+ while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
-+ ;
-+
-+ iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
-+
-+ index_addr((u32)(MODE_10 | (flash_bank << 24) |
-+ (flash_add >> DeviceInfo.nBitsInPageDataSize)), 0x42);
-+ ddma_trans(read_data, flash_add, flash_bank, 0, NumPages);
-+
-+ ecc_done_OR_dma_comp = 0;
-+ while (1) {
-+ if (enable_ecc) {
-+ while (!ioread32(FlashReg + intr_status))
-+ ;
-+
-+ if (ioread32(FlashReg + intr_status) &
-+ INTR_STATUS0__ECC_ERR) {
-+ iowrite32(INTR_STATUS0__ECC_ERR,
-+ FlashReg + intr_status);
-+ status = do_ecc_new(flash_bank,
-+ read_data, block, page);
-+ } else if (ioread32(FlashReg + intr_status) &
-+ INTR_STATUS0__DMA_CMD_COMP) {
-+ iowrite32(INTR_STATUS0__DMA_CMD_COMP,
-+ FlashReg + intr_status);
-+
-+ if (1 == ecc_done_OR_dma_comp)
-+ break;
-+
-+ ecc_done_OR_dma_comp = 1;
-+ } else if (ioread32(FlashReg + intr_status) &
-+ INTR_STATUS0__ECC_TRANSACTION_DONE) {
-+ iowrite32(
-+ INTR_STATUS0__ECC_TRANSACTION_DONE,
-+ FlashReg + intr_status);
-+
-+ if (1 == ecc_done_OR_dma_comp)
-+ break;
-+
-+ ecc_done_OR_dma_comp = 1;
-+ }
-+ } else {
-+ while (!(ioread32(FlashReg + intr_status) &
-+ INTR_STATUS0__DMA_CMD_COMP))
-+ ;
-+
-+ iowrite32(INTR_STATUS0__DMA_CMD_COMP,
-+ FlashReg + intr_status);
-+ break;
-+ }
-+
-+ iowrite32((~INTR_STATUS0__ECC_ERR) &
-+ (~INTR_STATUS0__ECC_TRANSACTION_DONE) &
-+ (~INTR_STATUS0__DMA_CMD_COMP),
-+ FlashReg + intr_status);
-+
-+ }
-+
-+ iowrite32(ioread32(FlashReg + intr_status),
-+ FlashReg + intr_status);
-+
-+ iowrite32(0, FlashReg + DMA_ENABLE);
-+
-+ while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
-+ ;
-+ }
-+ return status;
-+}
-+
-+u16 NAND_Read_Page_Main(u8 *read_data, u32 block, u16 page,
-+ u16 page_count)
-+{
-+ u32 status = PASS;
-+ u64 flash_add;
-+ u32 intr_status = 0;
-+ u32 flash_bank;
-+ u32 intr_status_addresses[4] = {INTR_STATUS0,
-+ INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
-+ int ret;
-+ u8 *read_data_l;
-+
-+ nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ status = Boundary_Check_Block_Page(block, page, page_count);
-+ if (status != PASS)
-+ return status;
-+
-+ flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
-+ * DeviceInfo.wBlockDataSize +
-+ (u64)page * DeviceInfo.wPageDataSize;
-+ flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
-+
-+ iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
-+
-+ intr_status = intr_status_addresses[flash_bank];
-+ iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
-+
-+ if (page_count > 1) {
-+ read_data_l = read_data;
-+ while (page_count > MAX_PAGES_PER_RW) {
-+ if (ioread32(FlashReg + MULTIPLANE_OPERATION))
-+ status = NAND_Multiplane_Read(read_data_l,
-+ block, page, MAX_PAGES_PER_RW);
-+ else
-+ status = NAND_Pipeline_Read_Ahead(
-+ read_data_l, block, page,
-+ MAX_PAGES_PER_RW);
-+
-+ if (status == FAIL)
-+ return status;
-+
-+ read_data_l += DeviceInfo.wPageDataSize *
-+ MAX_PAGES_PER_RW;
-+ page_count -= MAX_PAGES_PER_RW;
-+ page += MAX_PAGES_PER_RW;
-+ }
-+ if (ioread32(FlashReg + MULTIPLANE_OPERATION))
-+ status = NAND_Multiplane_Read(read_data_l,
-+ block, page, page_count);
-+ else
-+ status = NAND_Pipeline_Read_Ahead(
-+ read_data_l, block, page, page_count);
-+
-+ return status;
-+ }
-+
-+ iowrite32(1, FlashReg + DMA_ENABLE);
-+ while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
-+ ;
-+
-+ iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
-+ iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
-+
-+ /* Fill the mrst_nand_info structure */
-+ info.state = INT_READ_PAGE_MAIN;
-+ info.read_data = read_data;
-+ info.flash_bank = flash_bank;
-+ info.block = block;
-+ info.page = page;
-+ info.ret = PASS;
-+
-+ ddma_trans(read_data, flash_add, flash_bank, 0, 1);
-+
-+ iowrite32(1, FlashReg + GLOBAL_INT_ENABLE); /* Enable Interrupt */
-+
-+ ret = wait_for_completion_timeout(&info.complete, 10 * HZ);
-+ if (!ret) {
-+ printk(KERN_ERR "Wait for completion timeout "
-+ "in %s, Line %d\n", __FILE__, __LINE__);
-+ status = ERR;
-+ } else {
-+ status = info.ret;
-+ }
-+
-+ iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
-+
-+ iowrite32(0, FlashReg + DMA_ENABLE);
-+ while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
-+ ;
-+
-+ return status;
-+}
-+
-+void Conv_Spare_Data_Log2Phy_Format(u8 *data)
-+{
-+ int i;
-+ const u32 spareFlagBytes = DeviceInfo.wNumPageSpareFlag;
-+ const u32 PageSpareSize = DeviceInfo.wPageSpareSize;
-+
-+ if (enable_ecc) {
-+ for (i = spareFlagBytes - 1; i >= 0; i++)
-+ data[PageSpareSize - spareFlagBytes + i] = data[i];
-+ }
-+}
-+
-+void Conv_Spare_Data_Phy2Log_Format(u8 *data)
-+{
-+ int i;
-+ const u32 spareFlagBytes = DeviceInfo.wNumPageSpareFlag;
-+ const u32 PageSpareSize = DeviceInfo.wPageSpareSize;
-+
-+ if (enable_ecc) {
-+ for (i = 0; i < spareFlagBytes; i++)
-+ data[i] = data[PageSpareSize - spareFlagBytes + i];
-+ }
-+}
-+
-+
-+void Conv_Main_Spare_Data_Log2Phy_Format(u8 *data, u16 page_count)
-+{
-+ const u32 PageSize = DeviceInfo.wPageSize;
-+ const u32 PageDataSize = DeviceInfo.wPageDataSize;
-+ const u32 eccBytes = DeviceInfo.wECCBytesPerSector;
-+ const u32 spareSkipBytes = DeviceInfo.wSpareSkipBytes;
-+ const u32 spareFlagBytes = DeviceInfo.wNumPageSpareFlag;
-+ u32 eccSectorSize;
-+ u32 page_offset;
-+ int i, j;
-+
-+ eccSectorSize = ECC_SECTOR_SIZE * (DeviceInfo.wDevicesConnected);
-+ if (enable_ecc) {
-+ while (page_count > 0) {
-+ page_offset = (page_count - 1) * PageSize;
-+ j = (DeviceInfo.wPageDataSize / eccSectorSize);
-+ for (i = spareFlagBytes - 1; i >= 0; i--)
-+ data[page_offset +
-+ (eccSectorSize + eccBytes) * j + i] =
-+ data[page_offset + PageDataSize + i];
-+ for (j--; j >= 1; j--) {
-+ for (i = eccSectorSize - 1; i >= 0; i--)
-+ data[page_offset +
-+ (eccSectorSize + eccBytes) * j + i] =
-+ data[page_offset +
-+ eccSectorSize * j + i];
-+ }
-+ for (i = (PageSize - spareSkipBytes) - 1;
-+ i >= PageDataSize; i--)
-+ data[page_offset + i + spareSkipBytes] =
-+ data[page_offset + i];
-+ page_count--;
-+ }
-+ }
-+}
-+
-+void Conv_Main_Spare_Data_Phy2Log_Format(u8 *data, u16 page_count)
-+{
-+ const u32 PageSize = DeviceInfo.wPageSize;
-+ const u32 PageDataSize = DeviceInfo.wPageDataSize;
-+ const u32 eccBytes = DeviceInfo.wECCBytesPerSector;
-+ const u32 spareSkipBytes = DeviceInfo.wSpareSkipBytes;
-+ const u32 spareFlagBytes = DeviceInfo.wNumPageSpareFlag;
-+ u32 eccSectorSize;
-+ u32 page_offset;
-+ int i, j;
-+
-+ eccSectorSize = ECC_SECTOR_SIZE * (DeviceInfo.wDevicesConnected);
-+ if (enable_ecc) {
-+ while (page_count > 0) {
-+ page_offset = (page_count - 1) * PageSize;
-+ for (i = PageDataSize;
-+ i < PageSize - spareSkipBytes;
-+ i++)
-+ data[page_offset + i] =
-+ data[page_offset + i +
-+ spareSkipBytes];
-+ for (j = 1;
-+ j < DeviceInfo.wPageDataSize / eccSectorSize;
-+ j++) {
-+ for (i = 0; i < eccSectorSize; i++)
-+ data[page_offset +
-+ eccSectorSize * j + i] =
-+ data[page_offset +
-+ (eccSectorSize + eccBytes) * j
-+ + i];
-+ }
-+ for (i = 0; i < spareFlagBytes; i++)
-+ data[page_offset + PageDataSize + i] =
-+ data[page_offset +
-+ (eccSectorSize + eccBytes) * j + i];
-+ page_count--;
-+ }
-+ }
-+}
-+
-+/* Un-tested function */
-+u16 NAND_Multiplane_Read(u8 *read_data, u32 block, u16 page,
-+ u16 page_count)
-+{
-+ u32 status = PASS;
-+ u32 NumPages = page_count;
-+ u64 flash_add;
-+ u32 flash_bank;
-+ u32 intr_status = 0;
-+ u32 intr_status_addresses[4] = {INTR_STATUS0,
-+ INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
-+ u32 ecc_done_OR_dma_comp;
-+
-+ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ status = Boundary_Check_Block_Page(block, page, page_count);
-+
-+ flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
-+ * DeviceInfo.wBlockDataSize +
-+ (u64)page * DeviceInfo.wPageDataSize;
-+
-+ flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
-+
-+ if (status == PASS) {
-+ intr_status = intr_status_addresses[flash_bank];
-+ iowrite32(ioread32(FlashReg + intr_status),
-+ FlashReg + intr_status);
-+
-+ iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
-+ iowrite32(0x01, FlashReg + MULTIPLANE_OPERATION);
-+
-+ iowrite32(1, FlashReg + DMA_ENABLE);
-+ while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
-+ ;
-+ index_addr((u32)(MODE_10 | (flash_bank << 24) |
-+ (flash_add >> DeviceInfo.nBitsInPageDataSize)), 0x42);
-+ ddma_trans(read_data, flash_add, flash_bank, 0, NumPages);
-+
-+ ecc_done_OR_dma_comp = 0;
-+ while (1) {
-+ if (enable_ecc) {
-+ while (!ioread32(FlashReg + intr_status))
-+ ;
-+
-+ if (ioread32(FlashReg + intr_status) &
-+ INTR_STATUS0__ECC_ERR) {
-+ iowrite32(INTR_STATUS0__ECC_ERR,
-+ FlashReg + intr_status);
-+ status = do_ecc_new(flash_bank,
-+ read_data, block, page);
-+ } else if (ioread32(FlashReg + intr_status) &
-+ INTR_STATUS0__DMA_CMD_COMP) {
-+ iowrite32(INTR_STATUS0__DMA_CMD_COMP,
-+ FlashReg + intr_status);
-+
-+ if (1 == ecc_done_OR_dma_comp)
-+ break;
-+
-+ ecc_done_OR_dma_comp = 1;
-+ } else if (ioread32(FlashReg + intr_status) &
-+ INTR_STATUS0__ECC_TRANSACTION_DONE) {
-+ iowrite32(
-+ INTR_STATUS0__ECC_TRANSACTION_DONE,
-+ FlashReg + intr_status);
-+
-+ if (1 == ecc_done_OR_dma_comp)
-+ break;
-+
-+ ecc_done_OR_dma_comp = 1;
-+ }
-+ } else {
-+ while (!(ioread32(FlashReg + intr_status) &
-+ INTR_STATUS0__DMA_CMD_COMP))
-+ ;
-+ iowrite32(INTR_STATUS0__DMA_CMD_COMP,
-+ FlashReg + intr_status);
-+ break;
-+ }
-+
-+ iowrite32((~INTR_STATUS0__ECC_ERR) &
-+ (~INTR_STATUS0__ECC_TRANSACTION_DONE) &
-+ (~INTR_STATUS0__DMA_CMD_COMP),
-+ FlashReg + intr_status);
-+
-+ }
-+
-+ iowrite32(ioread32(FlashReg + intr_status),
-+ FlashReg + intr_status);
-+
-+ iowrite32(0, FlashReg + DMA_ENABLE);
-+
-+ while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
-+ ;
-+
-+ iowrite32(0, FlashReg + MULTIPLANE_OPERATION);
-+ }
-+
-+ return status;
-+}
-+
-+u16 NAND_Pipeline_Read_Ahead(u8 *read_data, u32 block,
-+ u16 page, u16 page_count)
-+{
-+ u32 status = PASS;
-+ u32 NumPages = page_count;
-+ u64 flash_add;
-+ u32 flash_bank;
-+ u32 intr_status = 0;
-+ u32 intr_status_addresses[4] = {INTR_STATUS0,
-+ INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
-+ int ret;
-+
-+ nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ status = Boundary_Check_Block_Page(block, page, page_count);
-+
-+ if (page_count < 2)
-+ status = FAIL;
-+
-+ if (status != PASS)
-+ return status;
-+
-+ flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
-+ *DeviceInfo.wBlockDataSize +
-+ (u64)page * DeviceInfo.wPageDataSize;
-+
-+ flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
-+
-+ intr_status = intr_status_addresses[flash_bank];
-+ iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
-+
-+ iowrite32(1, FlashReg + DMA_ENABLE);
-+ while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
-+ ;
-+
-+ iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
-+
-+ /* Fill the mrst_nand_info structure */
-+ info.state = INT_PIPELINE_READ_AHEAD;
-+ info.read_data = read_data;
-+ info.flash_bank = flash_bank;
-+ info.block = block;
-+ info.page = page;
-+ info.ret = PASS;
-+
-+ index_addr((u32)(MODE_10 | (flash_bank << 24) |
-+ (flash_add >> DeviceInfo.nBitsInPageDataSize)), 0x42);
-+
-+ ddma_trans(read_data, flash_add, flash_bank, 0, NumPages);
-+
-+ iowrite32(1, FlashReg + GLOBAL_INT_ENABLE); /* Enable Interrupt */
-+
-+ ret = wait_for_completion_timeout(&info.complete, 10 * HZ);
-+ if (!ret) {
-+ printk(KERN_ERR "Wait for completion timeout "
-+ "in %s, Line %d\n", __FILE__, __LINE__);
-+ status = ERR;
-+ } else {
-+ status = info.ret;
-+ }
-+
-+ iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
-+
-+ iowrite32(0, FlashReg + DMA_ENABLE);
-+
-+ while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
-+ ;
-+
-+ return status;
-+}
-+
-+
-+u16 NAND_Write_Page_Main(u8 *write_data, u32 block, u16 page,
-+ u16 page_count)
-+{
-+ u32 status = PASS;
-+ u64 flash_add;
-+ u32 intr_status = 0;
-+ u32 flash_bank;
-+ u32 intr_status_addresses[4] = {INTR_STATUS0,
-+ INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
-+ int ret;
-+ u8 *write_data_l;
-+
-+ nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ status = Boundary_Check_Block_Page(block, page, page_count);
-+ if (status != PASS)
-+ return status;
-+
-+ flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
-+ * DeviceInfo.wBlockDataSize +
-+ (u64)page * DeviceInfo.wPageDataSize;
-+
-+ flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
-+
-+ intr_status = intr_status_addresses[flash_bank];
-+
-+ iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
-+
-+ iowrite32(INTR_STATUS0__PROGRAM_COMP |
-+ INTR_STATUS0__PROGRAM_FAIL, FlashReg + intr_status);
-+
-+ if (page_count > 1) {
-+ write_data_l = write_data;
-+ while (page_count > MAX_PAGES_PER_RW) {
-+ if (ioread32(FlashReg + MULTIPLANE_OPERATION))
-+ status = NAND_Multiplane_Write(write_data_l,
-+ block, page, MAX_PAGES_PER_RW);
-+ else
-+ status = NAND_Pipeline_Write_Ahead(
-+ write_data_l, block, page,
-+ MAX_PAGES_PER_RW);
-+ if (status == FAIL)
-+ return status;
-+
-+ write_data_l += DeviceInfo.wPageDataSize *
-+ MAX_PAGES_PER_RW;
-+ page_count -= MAX_PAGES_PER_RW;
-+ page += MAX_PAGES_PER_RW;
-+ }
-+ if (ioread32(FlashReg + MULTIPLANE_OPERATION))
-+ status = NAND_Multiplane_Write(write_data_l,
-+ block, page, page_count);
-+ else
-+ status = NAND_Pipeline_Write_Ahead(write_data_l,
-+ block, page, page_count);
-+
-+ return status;
-+ }
-+
-+ iowrite32(1, FlashReg + DMA_ENABLE);
-+ while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
-+ ;
-+
-+ iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
-+
-+ iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
-+
-+ /* Fill the mrst_nand_info structure */
-+ info.state = INT_WRITE_PAGE_MAIN;
-+ info.write_data = write_data;
-+ info.flash_bank = flash_bank;
-+ info.block = block;
-+ info.page = page;
-+ info.ret = PASS;
-+
-+ ddma_trans(write_data, flash_add, flash_bank, 1, 1);
-+
-+ iowrite32(1, FlashReg + GLOBAL_INT_ENABLE); /* Enable interrupt */
-+
-+ ret = wait_for_completion_timeout(&info.complete, 10 * HZ);
-+ if (!ret) {
-+ printk(KERN_ERR "Wait for completion timeout "
-+ "in %s, Line %d\n", __FILE__, __LINE__);
-+ status = ERR;
-+ } else {
-+ status = info.ret;
-+ }
-+
-+ iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
-+
-+ iowrite32(0, FlashReg + DMA_ENABLE);
-+ while (ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG)
-+ ;
-+
-+ return status;
-+}
-+
-+void NAND_ECC_Ctrl(int enable)
-+{
-+ if (enable) {
-+ nand_dbg_print(NAND_DBG_WARN,
-+ "Will enable ECC in %s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+ iowrite32(1, FlashReg + ECC_ENABLE);
-+ enable_ecc = 1;
-+ } else {
-+ nand_dbg_print(NAND_DBG_WARN,
-+ "Will disable ECC in %s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+ iowrite32(0, FlashReg + ECC_ENABLE);
-+ enable_ecc = 0;
-+ }
-+}
-+
-+u16 NAND_Write_Page_Main_Spare(u8 *write_data, u32 block,
-+ u16 page, u16 page_count)
-+{
-+ u32 status = PASS;
-+ u32 i, j, page_num = 0;
-+ u32 PageSize = DeviceInfo.wPageSize;
-+ u32 PageDataSize = DeviceInfo.wPageDataSize;
-+ u32 eccBytes = DeviceInfo.wECCBytesPerSector;
-+ u32 spareFlagBytes = DeviceInfo.wNumPageSpareFlag;
-+ u32 spareSkipBytes = DeviceInfo.wSpareSkipBytes;
-+ u64 flash_add;
-+ u32 eccSectorSize;
-+ u32 flash_bank;
-+ u32 intr_status = 0;
-+ u32 intr_status_addresses[4] = {INTR_STATUS0,
-+ INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
-+ u8 *page_main_spare = buf_write_page_main_spare;
-+
-+ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ eccSectorSize = ECC_SECTOR_SIZE * (DeviceInfo.wDevicesConnected);
-+
-+ status = Boundary_Check_Block_Page(block, page, page_count);
-+
-+ flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
-+
-+ if (status == PASS) {
-+ intr_status = intr_status_addresses[flash_bank];
-+
-+ iowrite32(1, FlashReg + TRANSFER_SPARE_REG);
-+
-+ while ((status != FAIL) && (page_count > 0)) {
-+ flash_add = (u64)(block %
-+ (DeviceInfo.wTotalBlocks / totalUsedBanks)) *
-+ DeviceInfo.wBlockDataSize +
-+ (u64)page * DeviceInfo.wPageDataSize;
-+
-+ iowrite32(ioread32(FlashReg + intr_status),
-+ FlashReg + intr_status);
-+
-+ iowrite32((u32)(MODE_01 | (flash_bank << 24) |
-+ (flash_add >>
-+ DeviceInfo.nBitsInPageDataSize)),
-+ FlashMem);
-+
-+ if (enable_ecc) {
-+ for (j = 0;
-+ j <
-+ DeviceInfo.wPageDataSize / eccSectorSize;
-+ j++) {
-+ for (i = 0; i < eccSectorSize; i++)
-+ page_main_spare[(eccSectorSize +
-+ eccBytes) * j +
-+ i] =
-+ write_data[eccSectorSize *
-+ j + i];
-+
-+ for (i = 0; i < eccBytes; i++)
-+ page_main_spare[(eccSectorSize +
-+ eccBytes) * j +
-+ eccSectorSize +
-+ i] =
-+ write_data[PageDataSize +
-+ spareFlagBytes +
-+ eccBytes * j +
-+ i];
-+ }
-+
-+ for (i = 0; i < spareFlagBytes; i++)
-+ page_main_spare[(eccSectorSize +
-+ eccBytes) * j + i] =
-+ write_data[PageDataSize + i];
-+
-+ for (i = PageSize - 1; i >= PageDataSize +
-+ spareSkipBytes; i--)
-+ page_main_spare[i] = page_main_spare[i -
-+ spareSkipBytes];
-+
-+ for (i = PageDataSize; i < PageDataSize +
-+ spareSkipBytes; i++)
-+ page_main_spare[i] = 0xff;
-+
-+ for (i = 0; i < PageSize / 4; i++)
-+ iowrite32(
-+ *((u32 *)page_main_spare + i),
-+ FlashMem + 0x10);
-+ } else {
-+
-+ for (i = 0; i < PageSize / 4; i++)
-+ iowrite32(*((u32 *)write_data + i),
-+ FlashMem + 0x10);
-+ }
-+
-+ while (!(ioread32(FlashReg + intr_status) &
-+ (INTR_STATUS0__PROGRAM_COMP |
-+ INTR_STATUS0__PROGRAM_FAIL)))
-+ ;
-+
-+ if (ioread32(FlashReg + intr_status) &
-+ INTR_STATUS0__PROGRAM_FAIL)
-+ status = FAIL;
-+
-+ iowrite32(ioread32(FlashReg + intr_status),
-+ FlashReg + intr_status);
-+
-+ page_num++;
-+ page_count--;
-+ write_data += PageSize;
-+ }
-+
-+ iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
-+ }
-+
-+ return status;
-+}
-+
-+u16 NAND_Read_Page_Main_Spare(u8 *read_data, u32 block, u16 page,
-+ u16 page_count)
-+{
-+ u32 status = PASS;
-+ u32 i, j;
-+ u64 flash_add = 0;
-+ u32 PageSize = DeviceInfo.wPageSize;
-+ u32 PageDataSize = DeviceInfo.wPageDataSize;
-+ u32 PageSpareSize = DeviceInfo.wPageSpareSize;
-+ u32 eccBytes = DeviceInfo.wECCBytesPerSector;
-+ u32 spareFlagBytes = DeviceInfo.wNumPageSpareFlag;
-+ u32 spareSkipBytes = DeviceInfo.wSpareSkipBytes;
-+ u32 eccSectorSize;
-+ u32 flash_bank;
-+ u32 intr_status = 0;
-+ u8 *read_data_l = read_data;
-+ u32 intr_status_addresses[4] = {INTR_STATUS0,
-+ INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
-+ u8 *page_main_spare = buf_read_page_main_spare;
-+
-+ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ eccSectorSize = ECC_SECTOR_SIZE * (DeviceInfo.wDevicesConnected);
-+
-+ status = Boundary_Check_Block_Page(block, page, page_count);
-+
-+ flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
-+
-+ if (status == PASS) {
-+ intr_status = intr_status_addresses[flash_bank];
-+
-+ iowrite32(1, FlashReg + TRANSFER_SPARE_REG);
-+
-+ iowrite32(ioread32(FlashReg + intr_status),
-+ FlashReg + intr_status);
-+
-+ while ((status != FAIL) && (page_count > 0)) {
-+ flash_add = (u64)(block %
-+ (DeviceInfo.wTotalBlocks / totalUsedBanks))
-+ * DeviceInfo.wBlockDataSize +
-+ (u64)page * DeviceInfo.wPageDataSize;
-+
-+ index_addr((u32)(MODE_10 | (flash_bank << 24) |
-+ (flash_add >> DeviceInfo.nBitsInPageDataSize)),
-+ 0x43);
-+ index_addr((u32)(MODE_10 | (flash_bank << 24) |
-+ (flash_add >> DeviceInfo.nBitsInPageDataSize)),
-+ 0x2000 | page_count);
-+
-+ while (!(ioread32(FlashReg + intr_status) &
-+ INTR_STATUS0__LOAD_COMP))
-+ ;
-+
-+ iowrite32((u32)(MODE_01 | (flash_bank << 24) |
-+ (flash_add >>
-+ DeviceInfo.nBitsInPageDataSize)),
-+ FlashMem);
-+
-+ for (i = 0; i < PageSize / 4; i++)
-+ *(((u32 *)page_main_spare) + i) =
-+ ioread32(FlashMem + 0x10);
-+
-+ if (enable_ecc) {
-+ for (i = PageDataSize; i < PageSize -
-+ spareSkipBytes; i++)
-+ page_main_spare[i] = page_main_spare[i +
-+ spareSkipBytes];
-+
-+ for (j = 0;
-+ j < DeviceInfo.wPageDataSize / eccSectorSize;
-+ j++) {
-+
-+ for (i = 0; i < eccSectorSize; i++)
-+ read_data_l[eccSectorSize * j +
-+ i] =
-+ page_main_spare[
-+ (eccSectorSize +
-+ eccBytes) * j + i];
-+
-+ for (i = 0; i < eccBytes; i++)
-+ read_data_l[PageDataSize +
-+ spareFlagBytes +
-+ eccBytes * j + i] =
-+ page_main_spare[
-+ (eccSectorSize +
-+ eccBytes) * j +
-+ eccSectorSize + i];
-+ }
-+
-+ for (i = 0; i < spareFlagBytes; i++)
-+ read_data_l[PageDataSize + i] =
-+ page_main_spare[(eccSectorSize +
-+ eccBytes) * j + i];
-+ } else {
-+ for (i = 0; i < (PageDataSize + PageSpareSize);
-+ i++)
-+ read_data_l[i] = page_main_spare[i];
-+
-+ }
-+
-+ if (enable_ecc) {
-+ while (!(ioread32(FlashReg + intr_status) &
-+ (INTR_STATUS0__ECC_TRANSACTION_DONE |
-+ INTR_STATUS0__ECC_ERR)))
-+ ;
-+
-+ if (ioread32(FlashReg + intr_status) &
-+ INTR_STATUS0__ECC_ERR) {
-+ iowrite32(INTR_STATUS0__ECC_ERR,
-+ FlashReg + intr_status);
-+ status = do_ecc_new(flash_bank,
-+ read_data, block, page);
-+ }
-+
-+ if (ioread32(FlashReg + intr_status) &
-+ INTR_STATUS0__ECC_TRANSACTION_DONE &
-+ INTR_STATUS0__ECC_ERR) {
-+ iowrite32(INTR_STATUS0__ECC_ERR |
-+ INTR_STATUS0__ECC_TRANSACTION_DONE,
-+ FlashReg + intr_status);
-+ } else if (ioread32(FlashReg + intr_status) &
-+ INTR_STATUS0__ECC_TRANSACTION_DONE) {
-+ iowrite32(
-+ INTR_STATUS0__ECC_TRANSACTION_DONE,
-+ FlashReg + intr_status);
-+ } else if (ioread32(FlashReg + intr_status) &
-+ INTR_STATUS0__ECC_ERR) {
-+ iowrite32(INTR_STATUS0__ECC_ERR,
-+ FlashReg + intr_status);
-+ }
-+ }
-+
-+ page++;
-+ page_count--;
-+ read_data_l += PageSize;
-+ }
-+ }
-+
-+ iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
-+
-+ index_addr((u32)(MODE_10 | (flash_bank << 24) |
-+ (flash_add >> DeviceInfo.nBitsInPageDataSize)), 0x42);
-+
-+ return status;
-+}
-+
-+u16 NAND_Pipeline_Write_Ahead(u8 *write_data, u32 block,
-+ u16 page, u16 page_count)
-+{
-+ u16 status = PASS;
-+ u32 NumPages = page_count;
-+ u64 flash_add;
-+ u32 flash_bank;
-+ u32 intr_status = 0;
-+ u32 intr_status_addresses[4] = {INTR_STATUS0,
-+ INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
-+ int ret;
-+
-+ nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ status = Boundary_Check_Block_Page(block, page, page_count);
-+
-+ if (page_count < 2)
-+ status = FAIL;
-+
-+ if (status != PASS)
-+ return status;
-+
-+ flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
-+ * DeviceInfo.wBlockDataSize +
-+ (u64)page * DeviceInfo.wPageDataSize;
-+
-+ flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
-+
-+ intr_status = intr_status_addresses[flash_bank];
-+ iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
-+
-+ iowrite32(1, FlashReg + DMA_ENABLE);
-+ while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
-+ ;
-+
-+ iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
-+
-+ /* Fill the mrst_nand_info structure */
-+ info.state = INT_PIPELINE_WRITE_AHEAD;
-+ info.write_data = write_data;
-+ info.flash_bank = flash_bank;
-+ info.block = block;
-+ info.page = page;
-+ info.ret = PASS;
-+
-+ index_addr((u32)(MODE_10 | (flash_bank << 24) |
-+ (flash_add >> DeviceInfo.nBitsInPageDataSize)), 0x42);
-+
-+ ddma_trans(write_data, flash_add, flash_bank, 1, NumPages);
-+
-+ iowrite32(1, FlashReg + GLOBAL_INT_ENABLE); /* Enable interrupt */
-+
-+ ret = wait_for_completion_timeout(&info.complete, 10 * HZ);
-+ if (!ret) {
-+ printk(KERN_ERR "Wait for completion timeout "
-+ "in %s, Line %d\n", __FILE__, __LINE__);
-+ status = ERR;
-+ } else {
-+ status = info.ret;
-+ }
-+
-+ iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
-+
-+ iowrite32(0, FlashReg + DMA_ENABLE);
-+ while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
-+ ;
-+
-+ return status;
-+}
-+
-+/* Un-tested function */
-+u16 NAND_Multiplane_Write(u8 *write_data, u32 block, u16 page,
-+ u16 page_count)
-+{
-+ u16 status = PASS;
-+ u32 NumPages = page_count;
-+ u64 flash_add;
-+ u32 flash_bank;
-+ u32 intr_status = 0;
-+ u32 intr_status_addresses[4] = {INTR_STATUS0,
-+ INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
-+ u16 status2 = PASS;
-+ u32 t;
-+
-+ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ status = Boundary_Check_Block_Page(block, page, page_count);
-+ if (status != PASS)
-+ return status;
-+
-+ flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
-+ * DeviceInfo.wBlockDataSize +
-+ (u64)page * DeviceInfo.wPageDataSize;
-+
-+ flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
-+
-+ intr_status = intr_status_addresses[flash_bank];
-+ iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
-+
-+ iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
-+ iowrite32(0x01, FlashReg + MULTIPLANE_OPERATION);
-+
-+ iowrite32(1, FlashReg + DMA_ENABLE);
-+ while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
-+ ;
-+
-+ iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
-+
-+ index_addr((u32)(MODE_10 | (flash_bank << 24) |
-+ (flash_add >> DeviceInfo.nBitsInPageDataSize)), 0x42);
-+
-+ ddma_trans(write_data, flash_add, flash_bank, 1, NumPages);
-+
-+ while (1) {
-+ while (!ioread32(FlashReg + intr_status))
-+ ;
-+
-+ if (ioread32(FlashReg + intr_status) &
-+ INTR_STATUS0__DMA_CMD_COMP) {
-+ iowrite32(INTR_STATUS0__DMA_CMD_COMP,
-+ FlashReg + intr_status);
-+ status = PASS;
-+ if (status2 == FAIL)
-+ status = FAIL;
-+ break;
-+ } else if (ioread32(FlashReg + intr_status) &
-+ INTR_STATUS0__PROGRAM_FAIL) {
-+ status2 = FAIL;
-+ status = FAIL;
-+ t = ioread32(FlashReg + intr_status) &
-+ INTR_STATUS0__PROGRAM_FAIL;
-+ iowrite32(t, FlashReg + intr_status);
-+ } else {
-+ iowrite32((~INTR_STATUS0__PROGRAM_FAIL) &
-+ (~INTR_STATUS0__DMA_CMD_COMP),
-+ FlashReg + intr_status);
-+ }
-+ }
-+
-+ iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
-+
-+ iowrite32(0, FlashReg + DMA_ENABLE);
-+
-+ while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
-+ ;
-+
-+ iowrite32(0, FlashReg + MULTIPLANE_OPERATION);
-+
-+ return status;
-+}
-+
-+
-+#if CMD_DMA
-+static irqreturn_t cdma_isr(int irq, void *dev_id)
-+{
-+ struct mrst_nand_info *dev = dev_id;
-+ int first_failed_cmd;
-+
-+ nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ if (!is_cdma_interrupt())
-+ return IRQ_NONE;
-+
-+ /* Disable controller interrupts */
-+ iowrite32(0, FlashReg + GLOBAL_INT_ENABLE);
-+ GLOB_FTL_Event_Status(&first_failed_cmd);
-+ complete(&dev->complete);
-+
-+ return IRQ_HANDLED;
-+}
-+#else
-+static void handle_nand_int_read(struct mrst_nand_info *dev)
-+{
-+ u32 intr_status_addresses[4] = {INTR_STATUS0,
-+ INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
-+ u32 intr_status;
-+ u32 ecc_done_OR_dma_comp = 0;
-+
-+ nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ dev->ret = PASS;
-+ intr_status = intr_status_addresses[dev->flash_bank];
-+
-+ while (1) {
-+ if (enable_ecc) {
-+ if (ioread32(FlashReg + intr_status) &
-+ INTR_STATUS0__ECC_ERR) {
-+ iowrite32(INTR_STATUS0__ECC_ERR,
-+ FlashReg + intr_status);
-+ dev->ret = do_ecc_new(dev->flash_bank,
-+ dev->read_data,
-+ dev->block, dev->page);
-+ } else if (ioread32(FlashReg + intr_status) &
-+ INTR_STATUS0__DMA_CMD_COMP) {
-+ iowrite32(INTR_STATUS0__DMA_CMD_COMP,
-+ FlashReg + intr_status);
-+ if (1 == ecc_done_OR_dma_comp)
-+ break;
-+ ecc_done_OR_dma_comp = 1;
-+ } else if (ioread32(FlashReg + intr_status) &
-+ INTR_STATUS0__ECC_TRANSACTION_DONE) {
-+ iowrite32(INTR_STATUS0__ECC_TRANSACTION_DONE,
-+ FlashReg + intr_status);
-+ if (1 == ecc_done_OR_dma_comp)
-+ break;
-+ ecc_done_OR_dma_comp = 1;
-+ }
-+ } else {
-+ if (ioread32(FlashReg + intr_status) &
-+ INTR_STATUS0__DMA_CMD_COMP) {
-+ iowrite32(INTR_STATUS0__DMA_CMD_COMP,
-+ FlashReg + intr_status);
-+ break;
-+ } else {
-+ printk(KERN_ERR "Illegal INTS "
-+ "(offset addr 0x%x) value: 0x%x\n",
-+ intr_status,
-+ ioread32(FlashReg + intr_status));
-+ }
-+ }
-+
-+ iowrite32((~INTR_STATUS0__ECC_ERR) &
-+ (~INTR_STATUS0__ECC_TRANSACTION_DONE) &
-+ (~INTR_STATUS0__DMA_CMD_COMP),
-+ FlashReg + intr_status);
-+ }
-+}
-+
-+static void handle_nand_int_write(struct mrst_nand_info *dev)
-+{
-+ u32 intr_status;
-+ u32 intr[4] = {INTR_STATUS0, INTR_STATUS1,
-+ INTR_STATUS2, INTR_STATUS3};
-+ int status = PASS;
-+
-+ nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ dev->ret = PASS;
-+ intr_status = intr[dev->flash_bank];
-+
-+ while (1) {
-+ while (!ioread32(FlashReg + intr_status))
-+ ;
-+
-+ if (ioread32(FlashReg + intr_status) &
-+ INTR_STATUS0__DMA_CMD_COMP) {
-+ iowrite32(INTR_STATUS0__DMA_CMD_COMP,
-+ FlashReg + intr_status);
-+ if (FAIL == status)
-+ dev->ret = FAIL;
-+ break;
-+ } else if (ioread32(FlashReg + intr_status) &
-+ INTR_STATUS0__PROGRAM_FAIL) {
-+ status = FAIL;
-+ iowrite32(INTR_STATUS0__PROGRAM_FAIL,
-+ FlashReg + intr_status);
-+ } else {
-+ iowrite32((~INTR_STATUS0__PROGRAM_FAIL) &
-+ (~INTR_STATUS0__DMA_CMD_COMP),
-+ FlashReg + intr_status);
-+ }
-+ }
-+}
-+
-+static irqreturn_t ddma_isr(int irq, void *dev_id)
-+{
-+ struct mrst_nand_info *dev = dev_id;
-+ u32 int_mask, ints0, ints1, ints2, ints3, ints_offset;
-+ u32 intr[4] = {INTR_STATUS0, INTR_STATUS1,
-+ INTR_STATUS2, INTR_STATUS3};
-+
-+ int_mask = INTR_STATUS0__DMA_CMD_COMP |
-+ INTR_STATUS0__ECC_TRANSACTION_DONE |
-+ INTR_STATUS0__ECC_ERR |
-+ INTR_STATUS0__PROGRAM_FAIL |
-+ INTR_STATUS0__ERASE_FAIL;
-+
-+ ints0 = ioread32(FlashReg + INTR_STATUS0);
-+ ints1 = ioread32(FlashReg + INTR_STATUS1);
-+ ints2 = ioread32(FlashReg + INTR_STATUS2);
-+ ints3 = ioread32(FlashReg + INTR_STATUS3);
-+
-+ ints_offset = intr[dev->flash_bank];
-+
-+ nand_dbg_print(NAND_DBG_DEBUG,
-+ "INTR0: 0x%x, INTR1: 0x%x, INTR2: 0x%x, INTR3: 0x%x, "
-+ "DMA_INTR: 0x%x, "
-+ "dev->state: 0x%x, dev->flash_bank: %d\n",
-+ ints0, ints1, ints2, ints3,
-+ ioread32(FlashReg + DMA_INTR),
-+ dev->state, dev->flash_bank);
-+
-+ if (!(ioread32(FlashReg + ints_offset) & int_mask)) {
-+ iowrite32(ints0, FlashReg + INTR_STATUS0);
-+ iowrite32(ints1, FlashReg + INTR_STATUS1);
-+ iowrite32(ints2, FlashReg + INTR_STATUS2);
-+ iowrite32(ints3, FlashReg + INTR_STATUS3);
-+ nand_dbg_print(NAND_DBG_WARN,
-+ "ddma_isr: Invalid interrupt for NAND controller. "
-+ "Ignore it\n");
-+ return IRQ_NONE;
-+ }
-+
-+ switch (dev->state) {
-+ case INT_READ_PAGE_MAIN:
-+ case INT_PIPELINE_READ_AHEAD:
-+ /* Disable controller interrupts */
-+ iowrite32(0, FlashReg + GLOBAL_INT_ENABLE);
-+ handle_nand_int_read(dev);
-+ break;
-+ case INT_WRITE_PAGE_MAIN:
-+ case INT_PIPELINE_WRITE_AHEAD:
-+ iowrite32(0, FlashReg + GLOBAL_INT_ENABLE);
-+ handle_nand_int_write(dev);
-+ break;
-+ default:
-+ printk(KERN_ERR "ddma_isr - Illegal state: 0x%x\n",
-+ dev->state);
-+ return IRQ_NONE;
-+ }
-+
-+ dev->state = INT_IDLE_STATE;
-+ complete(&dev->complete);
-+ return IRQ_HANDLED;
-+}
-+#endif
-+
-+static const struct pci_device_id nand_pci_ids[] = {
-+ {
-+ .vendor = 0x8086,
-+ .device = 0x0809,
-+ .subvendor = PCI_ANY_ID,
-+ .subdevice = PCI_ANY_ID,
-+ },
-+ { /* end: all zeroes */ }
-+};
-+
-+static int nand_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
-+{
-+ int ret = -ENODEV;
-+ unsigned long csr_base;
-+ unsigned long csr_len;
-+ struct mrst_nand_info *pndev = &info;
-+
-+ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ ret = pci_enable_device(dev);
-+ if (ret) {
-+ printk(KERN_ERR "Spectra: pci_enable_device failed.\n");
-+ return ret;
-+ }
-+
-+ pci_set_master(dev);
-+ pndev->dev = dev;
-+
-+ csr_base = pci_resource_start(dev, 0);
-+ if (!csr_base) {
-+ printk(KERN_ERR "Spectra: pci_resource_start failed!\n");
-+ return -ENODEV;
-+ }
-+
-+ csr_len = pci_resource_len(dev, 0);
-+ if (!csr_len) {
-+ printk(KERN_ERR "Spectra: pci_resource_len failed!\n");
-+ return -ENODEV;
-+ }
-+
-+ ret = pci_request_regions(dev, SPECTRA_NAND_NAME);
-+ if (ret) {
-+ printk(KERN_ERR "Spectra: Unable to request "
-+ "memory region\n");
-+ goto failed_req_csr;
-+ }
-+
-+ pndev->ioaddr = ioremap_nocache(csr_base, csr_len);
-+ if (!pndev->ioaddr) {
-+ printk(KERN_ERR "Spectra: Unable to remap memory region\n");
-+ ret = -ENOMEM;
-+ goto failed_remap_csr;
-+ }
-+ nand_dbg_print(NAND_DBG_DEBUG, "Spectra: CSR 0x%08lx -> 0x%p (0x%lx)\n",
-+ csr_base, pndev->ioaddr, csr_len);
-+
-+ init_completion(&pndev->complete);
-+ nand_dbg_print(NAND_DBG_DEBUG, "Spectra: IRQ %d\n", dev->irq);
-+
-+#if CMD_DMA
-+ if (request_irq(dev->irq, cdma_isr, IRQF_SHARED,
-+ SPECTRA_NAND_NAME, &info)) {
-+ printk(KERN_ERR "Spectra: Unable to allocate IRQ\n");
-+ ret = -ENODEV;
-+ iounmap(pndev->ioaddr);
-+ goto failed_remap_csr;
-+ }
-+#else
-+ if (request_irq(dev->irq, ddma_isr, IRQF_SHARED,
-+ SPECTRA_NAND_NAME, &info)) {
-+ printk(KERN_ERR "Spectra: Unable to allocate IRQ\n");
-+ ret = -ENODEV;
-+ iounmap(pndev->ioaddr);
-+ goto failed_remap_csr;
-+ }
-+#endif
-+
-+ pci_set_drvdata(dev, pndev);
-+
-+ return 0;
-+
-+failed_remap_csr:
-+ pci_release_regions(dev);
-+failed_req_csr:
-+
-+ return ret;
-+}
-+
-+static void nand_pci_remove(struct pci_dev *dev)
-+{
-+ struct mrst_nand_info *pndev = pci_get_drvdata(dev);
-+
-+ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+#if CMD_DMA
-+ free_irq(dev->irq, pndev);
-+#endif
-+ iounmap(pndev->ioaddr);
-+ pci_release_regions(dev);
-+ pci_disable_device(dev);
-+}
-+
-+MODULE_DEVICE_TABLE(pci, nand_pci_ids);
-+
-+static struct pci_driver nand_pci_driver = {
-+ .name = SPECTRA_NAND_NAME,
-+ .id_table = nand_pci_ids,
-+ .probe = nand_pci_probe,
-+ .remove = nand_pci_remove,
-+};
-+
-+int NAND_Flash_Init(void)
-+{
-+ int retval;
-+ u32 int_mask;
-+
-+ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
-+ __FILE__, __LINE__, __func__);
-+
-+ FlashReg = ioremap_nocache(GLOB_HWCTL_REG_BASE,
-+ GLOB_HWCTL_REG_SIZE);
-+ if (!FlashReg) {
-+ printk(KERN_ERR "Spectra: ioremap_nocache failed!");
-+ return -ENOMEM;
-+ }
-+ nand_dbg_print(NAND_DBG_WARN,
-+ "Spectra: Remapped reg base address: "
-+ "0x%p, len: %d\n",
-+ FlashReg, GLOB_HWCTL_REG_SIZE);
-+
-+ FlashMem = ioremap_nocache(GLOB_HWCTL_MEM_BASE,
-+ GLOB_HWCTL_MEM_SIZE);
-+ if (!FlashMem) {
-+ printk(KERN_ERR "Spectra: ioremap_nocache failed!");
-+ iounmap(FlashReg);
-+ return -ENOMEM;
-+ }
-+ nand_dbg_print(NAND_DBG_WARN,
-+ "Spectra: Remapped flash base address: "
-+ "0x%p, len: %d\n",
-+ (void *)FlashMem, GLOB_HWCTL_MEM_SIZE);
-+
-+ nand_dbg_print(NAND_DBG_DEBUG, "Dump timing register values:"
-+ "acc_clks: %d, re_2_we: %d, we_2_re: %d,"
-+ "addr_2_data: %d, rdwr_en_lo_cnt: %d, "
-+ "rdwr_en_hi_cnt: %d, cs_setup_cnt: %d\n",
-+ ioread32(FlashReg + ACC_CLKS),
-+ ioread32(FlashReg + RE_2_WE),
-+ ioread32(FlashReg + WE_2_RE),
-+ ioread32(FlashReg + ADDR_2_DATA),
-+ ioread32(FlashReg + RDWR_EN_LO_CNT),
-+ ioread32(FlashReg + RDWR_EN_HI_CNT),
-+ ioread32(FlashReg + CS_SETUP_CNT));
-+
-+ NAND_Flash_Reset();
-+
-+ iowrite32(0, FlashReg + GLOBAL_INT_ENABLE);
-+
-+#if CMD_DMA
-+ info.pcmds_num = 0;
-+ info.flash_bank = 0;
-+ info.cdma_num = 0;
-+ int_mask = (DMA_INTR__DESC_COMP_CHANNEL0 |
-+ DMA_INTR__DESC_COMP_CHANNEL1 |
-+ DMA_INTR__DESC_COMP_CHANNEL2 |
-+ DMA_INTR__DESC_COMP_CHANNEL3 |
-+ DMA_INTR__MEMCOPY_DESC_COMP);
-+ iowrite32(int_mask, FlashReg + DMA_INTR_EN);
-+ iowrite32(0xFFFF, FlashReg + DMA_INTR);
-+
-+ int_mask = (INTR_STATUS0__ECC_ERR |
-+ INTR_STATUS0__PROGRAM_FAIL |
-+ INTR_STATUS0__ERASE_FAIL);
-+#else
-+ int_mask = INTR_STATUS0__DMA_CMD_COMP |
-+ INTR_STATUS0__ECC_TRANSACTION_DONE |
-+ INTR_STATUS0__ECC_ERR |
-+ INTR_STATUS0__PROGRAM_FAIL |
-+ INTR_STATUS0__ERASE_FAIL;
-+#endif
-+ iowrite32(int_mask, FlashReg + INTR_EN0);
-+ iowrite32(int_mask, FlashReg + INTR_EN1);
-+ iowrite32(int_mask, FlashReg + INTR_EN2);
-+ iowrite32(int_mask, FlashReg + INTR_EN3);
-+
-+ /* Clear all status bits */
-+ iowrite32(0xFFFF, FlashReg + INTR_STATUS0);
-+ iowrite32(0xFFFF, FlashReg + INTR_STATUS1);
-+ iowrite32(0xFFFF, FlashReg + INTR_STATUS2);
-+ iowrite32(0xFFFF, FlashReg + INTR_STATUS3);
-+
-+ iowrite32(0x0F, FlashReg + RB_PIN_ENABLED);
-+ iowrite32(CHIP_EN_DONT_CARE__FLAG, FlashReg + CHIP_ENABLE_DONT_CARE);
-+
-+ /* Should set value for these registers when init */
-+ iowrite32(0, FlashReg + TWO_ROW_ADDR_CYCLES);
-+ iowrite32(1, FlashReg + ECC_ENABLE);
-+ enable_ecc = 1;
-+
-+ retval = pci_register_driver(&nand_pci_driver);
-+ if (retval)
-+ return -ENOMEM;
-+
-+ return PASS;
-+}
-+
-+/* Free memory */
-+int nand_release(void)
-+{
-+ pci_unregister_driver(&nand_pci_driver);
-+ iounmap(FlashMem);
-+ iounmap(FlashReg);
-+
-+ return 0;
-+}
-+
-+
-+
-diff --git a/drivers/block/spectra/lld_nand.h b/drivers/block/spectra/lld_nand.h
-new file mode 100644
-index 0000000..c7d62c5
---- /dev/null
-+++ b/drivers/block/spectra/lld_nand.h
-@@ -0,0 +1,131 @@
-+/*
-+ * NAND Flash Controller Device Driver
-+ * Copyright (c) 2009, Intel Corporation and its suppliers.
-+ *
-+ * This program is free software; you can redistribute it and/or modify it
-+ * under the terms and conditions of the GNU General Public License,
-+ * version 2, as published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope it will be useful, but WITHOUT
-+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-+ * more details.
-+ *
-+ * You should have received a copy of the GNU General Public License along with
-+ * this program; if not, write to the Free Software Foundation, Inc.,
-+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-+ *
-+ */
-+
-+#ifndef _LLD_NAND_
-+#define _LLD_NAND_
-+
-+#ifdef ELDORA
-+#include "defs.h"
-+#else
-+#include "flash.h"
-+#include "ffsport.h"
-+#endif
-+
-+#define MODE_00 0x00000000
-+#define MODE_01 0x04000000
-+#define MODE_10 0x08000000
-+#define MODE_11 0x0C000000
-+
-+
-+#define DATA_TRANSFER_MODE 0
-+#define PROTECTION_PER_BLOCK 1
-+#define LOAD_WAIT_COUNT 2
-+#define PROGRAM_WAIT_COUNT 3
-+#define ERASE_WAIT_COUNT 4
-+#define INT_MONITOR_CYCLE_COUNT 5
-+#define READ_BUSY_PIN_ENABLED 6
-+#define MULTIPLANE_OPERATION_SUPPORT 7
-+#define PRE_FETCH_MODE 8
-+#define CE_DONT_CARE_SUPPORT 9
-+#define COPYBACK_SUPPORT 10
-+#define CACHE_WRITE_SUPPORT 11
-+#define CACHE_READ_SUPPORT 12
-+#define NUM_PAGES_IN_BLOCK 13
-+#define ECC_ENABLE_SELECT 14
-+#define WRITE_ENABLE_2_READ_ENABLE 15
-+#define ADDRESS_2_DATA 16
-+#define READ_ENABLE_2_WRITE_ENABLE 17
-+#define TWO_ROW_ADDRESS_CYCLES 18
-+#define MULTIPLANE_ADDRESS_RESTRICT 19
-+#define ACC_CLOCKS 20
-+#define READ_WRITE_ENABLE_LOW_COUNT 21
-+#define READ_WRITE_ENABLE_HIGH_COUNT 22
-+
-+#define ECC_SECTOR_SIZE 512
-+#define LLD_MAX_FLASH_BANKS 4
-+
-+struct mrst_nand_info {
-+ struct pci_dev *dev;
-+ u32 state;
-+ u32 flash_bank;
-+ u8 *read_data;
-+ u8 *write_data;
-+ u32 block;
-+ u16 page;
-+ u32 use_dma;
-+ void __iomem *ioaddr; /* Mapped io reg base address */
-+ int ret;
-+ u32 pcmds_num;
-+ struct pending_cmd *pcmds;
-+ int cdma_num; /* CDMA descriptor number in this chan */
-+ u8 *cdma_desc_buf; /* CDMA descriptor table */
-+ u8 *memcp_desc_buf; /* Memory copy descriptor table */
-+ dma_addr_t cdma_desc; /* Mapped CDMA descriptor table */
-+ dma_addr_t memcp_desc; /* Mapped memory copy descriptor table */
-+ struct completion complete;
-+};
-+
-+int NAND_Flash_Init(void);
-+int nand_release(void);
-+u16 NAND_Flash_Reset(void);
-+u16 NAND_Read_Device_ID(void);
-+u16 NAND_Erase_Block(u32 flash_add);
-+u16 NAND_Write_Page_Main(u8 *write_data, u32 block, u16 page,
-+ u16 page_count);
-+u16 NAND_Read_Page_Main(u8 *read_data, u32 block, u16 page,
-+ u16 page_count);
-+u16 NAND_UnlockArrayAll(void);
-+u16 NAND_Write_Page_Main_Spare(u8 *write_data, u32 block,
-+ u16 page, u16 page_count);
-+u16 NAND_Write_Page_Spare(u8 *read_data, u32 block, u16 page,
-+ u16 page_count);
-+u16 NAND_Read_Page_Main_Spare(u8 *read_data, u32 block, u16 page,
-+ u16 page_count);
-+u16 NAND_Read_Page_Spare(u8 *read_data, u32 block, u16 page,
-+ u16 page_count);
-+void NAND_LLD_Enable_Disable_Interrupts(u16 INT_ENABLE);
-+u16 NAND_Get_Bad_Block(u32 block);
-+u16 NAND_Pipeline_Read_Ahead(u8 *read_data, u32 block, u16 page,
-+ u16 page_count);
-+u16 NAND_Pipeline_Write_Ahead(u8 *write_data, u32 block,
-+ u16 page, u16 page_count);
-+u16 NAND_Multiplane_Read(u8 *read_data, u32 block, u16 page,
-+ u16 page_count);
-+u16 NAND_Multiplane_Write(u8 *write_data, u32 block, u16 page,
-+ u16 page_count);
-+void NAND_ECC_Ctrl(int enable);
-+u16 NAND_Read_Page_Main_Polling(u8 *read_data,
-+ u32 block, u16 page, u16 page_count);
-+u16 NAND_Pipeline_Read_Ahead_Polling(u8 *read_data,
-+ u32 block, u16 page, u16 page_count);
-+void Conv_Spare_Data_Log2Phy_Format(u8 *data);
-+void Conv_Spare_Data_Phy2Log_Format(u8 *data);
-+void Conv_Main_Spare_Data_Log2Phy_Format(u8 *data, u16 page_count);
-+void Conv_Main_Spare_Data_Phy2Log_Format(u8 *data, u16 page_count);
-+
-+extern void __iomem *FlashReg;
-+extern void __iomem *FlashMem;
-+
-+extern int totalUsedBanks;
-+extern u32 GLOB_valid_banks[LLD_MAX_FLASH_BANKS];
-+
-+#endif /*_LLD_NAND_*/
-+
-+
-+
-diff --git a/drivers/block/spectra/nand_regs.h b/drivers/block/spectra/nand_regs.h
-new file mode 100644
-index 0000000..e192e4a
---- /dev/null
-+++ b/drivers/block/spectra/nand_regs.h
-@@ -0,0 +1,619 @@
-+/*
-+ * NAND Flash Controller Device Driver
-+ * Copyright (c) 2009, Intel Corporation and its suppliers.
-+ *
-+ * This program is free software; you can redistribute it and/or modify it
-+ * under the terms and conditions of the GNU General Public License,
-+ * version 2, as published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope it will be useful, but WITHOUT
-+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-+ * more details.
-+ *
-+ * You should have received a copy of the GNU General Public License along with
-+ * this program; if not, write to the Free Software Foundation, Inc.,
-+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-+ *
-+ */
-+
-+#define DEVICE_RESET 0x0
-+#define DEVICE_RESET__BANK0 0x0001
-+#define DEVICE_RESET__BANK1 0x0002
-+#define DEVICE_RESET__BANK2 0x0004
-+#define DEVICE_RESET__BANK3 0x0008
-+
-+#define TRANSFER_SPARE_REG 0x10
-+#define TRANSFER_SPARE_REG__FLAG 0x0001
-+
-+#define LOAD_WAIT_CNT 0x20
-+#define LOAD_WAIT_CNT__VALUE 0xffff
-+
-+#define PROGRAM_WAIT_CNT 0x30
-+#define PROGRAM_WAIT_CNT__VALUE 0xffff
-+
-+#define ERASE_WAIT_CNT 0x40
-+#define ERASE_WAIT_CNT__VALUE 0xffff
-+
-+#define INT_MON_CYCCNT 0x50
-+#define INT_MON_CYCCNT__VALUE 0xffff
-+
-+#define RB_PIN_ENABLED 0x60
-+#define RB_PIN_ENABLED__BANK0 0x0001
-+#define RB_PIN_ENABLED__BANK1 0x0002
-+#define RB_PIN_ENABLED__BANK2 0x0004
-+#define RB_PIN_ENABLED__BANK3 0x0008
-+
-+#define MULTIPLANE_OPERATION 0x70
-+#define MULTIPLANE_OPERATION__FLAG 0x0001
-+
-+#define MULTIPLANE_READ_ENABLE 0x80
-+#define MULTIPLANE_READ_ENABLE__FLAG 0x0001
-+
-+#define COPYBACK_DISABLE 0x90
-+#define COPYBACK_DISABLE__FLAG 0x0001
-+
-+#define CACHE_WRITE_ENABLE 0xa0
-+#define CACHE_WRITE_ENABLE__FLAG 0x0001
-+
-+#define CACHE_READ_ENABLE 0xb0
-+#define CACHE_READ_ENABLE__FLAG 0x0001
-+
-+#define PREFETCH_MODE 0xc0
-+#define PREFETCH_MODE__PREFETCH_EN 0x0001
-+#define PREFETCH_MODE__PREFETCH_BURST_LENGTH 0xfff0
-+
-+#define CHIP_ENABLE_DONT_CARE 0xd0
-+#define CHIP_EN_DONT_CARE__FLAG 0x01
-+
-+#define ECC_ENABLE 0xe0
-+#define ECC_ENABLE__FLAG 0x0001
-+
-+#define GLOBAL_INT_ENABLE 0xf0
-+#define GLOBAL_INT_EN_FLAG 0x01
-+
-+#define WE_2_RE 0x100
-+#define WE_2_RE__VALUE 0x003f
-+
-+#define ADDR_2_DATA 0x110
-+#define ADDR_2_DATA__VALUE 0x003f
-+
-+#define RE_2_WE 0x120
-+#define RE_2_WE__VALUE 0x003f
-+
-+#define ACC_CLKS 0x130
-+#define ACC_CLKS__VALUE 0x000f
-+
-+#define NUMBER_OF_PLANES 0x140
-+#define NUMBER_OF_PLANES__VALUE 0x0007
-+
-+#define PAGES_PER_BLOCK 0x150
-+#define PAGES_PER_BLOCK__VALUE 0xffff
-+
-+#define DEVICE_WIDTH 0x160
-+#define DEVICE_WIDTH__VALUE 0x0003
-+
-+#define DEVICE_MAIN_AREA_SIZE 0x170
-+#define DEVICE_MAIN_AREA_SIZE__VALUE 0xffff
-+
-+#define DEVICE_SPARE_AREA_SIZE 0x180
-+#define DEVICE_SPARE_AREA_SIZE__VALUE 0xffff
-+
-+#define TWO_ROW_ADDR_CYCLES 0x190
-+#define TWO_ROW_ADDR_CYCLES__FLAG 0x0001
-+
-+#define MULTIPLANE_ADDR_RESTRICT 0x1a0
-+#define MULTIPLANE_ADDR_RESTRICT__FLAG 0x0001
-+
-+#define ECC_CORRECTION 0x1b0
-+#define ECC_CORRECTION__VALUE 0x001f
-+
-+#define READ_MODE 0x1c0
-+#define READ_MODE__VALUE 0x000f
-+
-+#define WRITE_MODE 0x1d0
-+#define WRITE_MODE__VALUE 0x000f
-+
-+#define COPYBACK_MODE 0x1e0
-+#define COPYBACK_MODE__VALUE 0x000f
-+
-+#define RDWR_EN_LO_CNT 0x1f0
-+#define RDWR_EN_LO_CNT__VALUE 0x001f
-+
-+#define RDWR_EN_HI_CNT 0x200
-+#define RDWR_EN_HI_CNT__VALUE 0x001f
-+
-+#define MAX_RD_DELAY 0x210
-+#define MAX_RD_DELAY__VALUE 0x000f
-+
-+#define CS_SETUP_CNT 0x220
-+#define CS_SETUP_CNT__VALUE 0x001f
-+
-+#define SPARE_AREA_SKIP_BYTES 0x230
-+#define SPARE_AREA_SKIP_BYTES__VALUE 0x003f
-+
-+#define SPARE_AREA_MARKER 0x240
-+#define SPARE_AREA_MARKER__VALUE 0xffff
-+
-+#define DEVICES_CONNECTED 0x250
-+#define DEVICES_CONNECTED__VALUE 0x0007
-+
-+#define DIE_MASK 0x260
-+#define DIE_MASK__VALUE 0x00ff
-+
-+#define FIRST_BLOCK_OF_NEXT_PLANE 0x270
-+#define FIRST_BLOCK_OF_NEXT_PLANE__VALUE 0xffff
-+
-+#define WRITE_PROTECT 0x280
-+#define WRITE_PROTECT__FLAG 0x0001
-+
-+#define RE_2_RE 0x290
-+#define RE_2_RE__VALUE 0x003f
-+
-+#define MANUFACTURER_ID 0x300
-+#define MANUFACTURER_ID__VALUE 0x00ff
-+
-+#define DEVICE_ID 0x310
-+#define DEVICE_ID__VALUE 0x00ff
-+
-+#define DEVICE_PARAM_0 0x320
-+#define DEVICE_PARAM_0__VALUE 0x00ff
-+
-+#define DEVICE_PARAM_1 0x330
-+#define DEVICE_PARAM_1__VALUE 0x00ff
-+
-+#define DEVICE_PARAM_2 0x340
-+#define DEVICE_PARAM_2__VALUE 0x00ff
-+
-+#define LOGICAL_PAGE_DATA_SIZE 0x350
-+#define LOGICAL_PAGE_DATA_SIZE__VALUE 0xffff
-+
-+#define LOGICAL_PAGE_SPARE_SIZE 0x360
-+#define LOGICAL_PAGE_SPARE_SIZE__VALUE 0xffff
-+
-+#define REVISION 0x370
-+#define REVISION__VALUE 0xffff
-+
-+#define ONFI_DEVICE_FEATURES 0x380
-+#define ONFI_DEVICE_FEATURES__VALUE 0x003f
-+
-+#define ONFI_OPTIONAL_COMMANDS 0x390
-+#define ONFI_OPTIONAL_COMMANDS__VALUE 0x003f
-+
-+#define ONFI_TIMING_MODE 0x3a0
-+#define ONFI_TIMING_MODE__VALUE 0x003f
-+
-+#define ONFI_PGM_CACHE_TIMING_MODE 0x3b0
-+#define ONFI_PGM_CACHE_TIMING_MODE__VALUE 0x003f
-+
-+#define ONFI_DEVICE_NO_OF_LUNS 0x3c0
-+#define ONFI_DEVICE_NO_OF_LUNS__NO_OF_LUNS 0x00ff
-+#define ONFI_DEVICE_NO_OF_LUNS__ONFI_DEVICE 0x0100
-+
-+#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_L 0x3d0
-+#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_L__VALUE 0xffff
-+
-+#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_U 0x3e0
-+#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_U__VALUE 0xffff
-+
-+#define FEATURES 0x3f0
-+#define FEATURES__N_BANKS 0x0003
-+#define FEATURES__ECC_MAX_ERR 0x003c
-+#define FEATURES__DMA 0x0040
-+#define FEATURES__CMD_DMA 0x0080
-+#define FEATURES__PARTITION 0x0100
-+#define FEATURES__XDMA_SIDEBAND 0x0200
-+#define FEATURES__GPREG 0x0400
-+#define FEATURES__INDEX_ADDR 0x0800
-+
-+#define TRANSFER_MODE 0x400
-+#define TRANSFER_MODE__VALUE 0x0003
-+
-+#define INTR_STATUS0 0x410
-+#define INTR_STATUS0__ECC_TRANSACTION_DONE 0x0001
-+#define INTR_STATUS0__ECC_ERR 0x0002
-+#define INTR_STATUS0__DMA_CMD_COMP 0x0004
-+#define INTR_STATUS0__TIME_OUT 0x0008
-+#define INTR_STATUS0__PROGRAM_FAIL 0x0010
-+#define INTR_STATUS0__ERASE_FAIL 0x0020
-+#define INTR_STATUS0__LOAD_COMP 0x0040
-+#define INTR_STATUS0__PROGRAM_COMP 0x0080
-+#define INTR_STATUS0__ERASE_COMP 0x0100
-+#define INTR_STATUS0__PIPE_CPYBCK_CMD_COMP 0x0200
-+#define INTR_STATUS0__LOCKED_BLK 0x0400
-+#define INTR_STATUS0__UNSUP_CMD 0x0800
-+#define INTR_STATUS0__INT_ACT 0x1000
-+#define INTR_STATUS0__RST_COMP 0x2000
-+#define INTR_STATUS0__PIPE_CMD_ERR 0x4000
-+#define INTR_STATUS0__PAGE_XFER_INC 0x8000
-+
-+#define INTR_EN0 0x420
-+#define INTR_EN0__ECC_TRANSACTION_DONE 0x0001
-+#define INTR_EN0__ECC_ERR 0x0002
-+#define INTR_EN0__DMA_CMD_COMP 0x0004
-+#define INTR_EN0__TIME_OUT 0x0008
-+#define INTR_EN0__PROGRAM_FAIL 0x0010
-+#define INTR_EN0__ERASE_FAIL 0x0020
-+#define INTR_EN0__LOAD_COMP 0x0040
-+#define INTR_EN0__PROGRAM_COMP 0x0080
-+#define INTR_EN0__ERASE_COMP 0x0100
-+#define INTR_EN0__PIPE_CPYBCK_CMD_COMP 0x0200
-+#define INTR_EN0__LOCKED_BLK 0x0400
-+#define INTR_EN0__UNSUP_CMD 0x0800
-+#define INTR_EN0__INT_ACT 0x1000
-+#define INTR_EN0__RST_COMP 0x2000
-+#define INTR_EN0__PIPE_CMD_ERR 0x4000
-+#define INTR_EN0__PAGE_XFER_INC 0x8000
-+
-+#define PAGE_CNT0 0x430
-+#define PAGE_CNT0__VALUE 0x00ff
-+
-+#define ERR_PAGE_ADDR0 0x440
-+#define ERR_PAGE_ADDR0__VALUE 0xffff
-+
-+#define ERR_BLOCK_ADDR0 0x450
-+#define ERR_BLOCK_ADDR0__VALUE 0xffff
-+
-+#define INTR_STATUS1 0x460
-+#define INTR_STATUS1__ECC_TRANSACTION_DONE 0x0001
-+#define INTR_STATUS1__ECC_ERR 0x0002
-+#define INTR_STATUS1__DMA_CMD_COMP 0x0004
-+#define INTR_STATUS1__TIME_OUT 0x0008
-+#define INTR_STATUS1__PROGRAM_FAIL 0x0010
-+#define INTR_STATUS1__ERASE_FAIL 0x0020
-+#define INTR_STATUS1__LOAD_COMP 0x0040
-+#define INTR_STATUS1__PROGRAM_COMP 0x0080
-+#define INTR_STATUS1__ERASE_COMP 0x0100
-+#define INTR_STATUS1__PIPE_CPYBCK_CMD_COMP 0x0200
-+#define INTR_STATUS1__LOCKED_BLK 0x0400
-+#define INTR_STATUS1__UNSUP_CMD 0x0800
-+#define INTR_STATUS1__INT_ACT 0x1000
-+#define INTR_STATUS1__RST_COMP 0x2000
-+#define INTR_STATUS1__PIPE_CMD_ERR 0x4000
-+#define INTR_STATUS1__PAGE_XFER_INC 0x8000
-+
-+#define INTR_EN1 0x470
-+#define INTR_EN1__ECC_TRANSACTION_DONE 0x0001
-+#define INTR_EN1__ECC_ERR 0x0002
-+#define INTR_EN1__DMA_CMD_COMP 0x0004
-+#define INTR_EN1__TIME_OUT 0x0008
-+#define INTR_EN1__PROGRAM_FAIL 0x0010
-+#define INTR_EN1__ERASE_FAIL 0x0020
-+#define INTR_EN1__LOAD_COMP 0x0040
-+#define INTR_EN1__PROGRAM_COMP 0x0080
-+#define INTR_EN1__ERASE_COMP 0x0100
-+#define INTR_EN1__PIPE_CPYBCK_CMD_COMP 0x0200
-+#define INTR_EN1__LOCKED_BLK 0x0400
-+#define INTR_EN1__UNSUP_CMD 0x0800
-+#define INTR_EN1__INT_ACT 0x1000
-+#define INTR_EN1__RST_COMP 0x2000
-+#define INTR_EN1__PIPE_CMD_ERR 0x4000
-+#define INTR_EN1__PAGE_XFER_INC 0x8000
-+
-+#define PAGE_CNT1 0x480
-+#define PAGE_CNT1__VALUE 0x00ff
-+
-+#define ERR_PAGE_ADDR1 0x490
-+#define ERR_PAGE_ADDR1__VALUE 0xffff
-+
-+#define ERR_BLOCK_ADDR1 0x4a0
-+#define ERR_BLOCK_ADDR1__VALUE 0xffff
-+
-+#define INTR_STATUS2 0x4b0
-+#define INTR_STATUS2__ECC_TRANSACTION_DONE 0x0001
-+#define INTR_STATUS2__ECC_ERR 0x0002
-+#define INTR_STATUS2__DMA_CMD_COMP 0x0004
-+#define INTR_STATUS2__TIME_OUT 0x0008
-+#define INTR_STATUS2__PROGRAM_FAIL 0x0010
-+#define INTR_STATUS2__ERASE_FAIL 0x0020
-+#define INTR_STATUS2__LOAD_COMP 0x0040
-+#define INTR_STATUS2__PROGRAM_COMP 0x0080
-+#define INTR_STATUS2__ERASE_COMP 0x0100
-+#define INTR_STATUS2__PIPE_CPYBCK_CMD_COMP 0x0200
-+#define INTR_STATUS2__LOCKED_BLK 0x0400
-+#define INTR_STATUS2__UNSUP_CMD 0x0800
-+#define INTR_STATUS2__INT_ACT 0x1000
-+#define INTR_STATUS2__RST_COMP 0x2000
-+#define INTR_STATUS2__PIPE_CMD_ERR 0x4000
-+#define INTR_STATUS2__PAGE_XFER_INC 0x8000
-+
-+#define INTR_EN2 0x4c0
-+#define INTR_EN2__ECC_TRANSACTION_DONE 0x0001
-+#define INTR_EN2__ECC_ERR 0x0002
-+#define INTR_EN2__DMA_CMD_COMP 0x0004
-+#define INTR_EN2__TIME_OUT 0x0008
-+#define INTR_EN2__PROGRAM_FAIL 0x0010
-+#define INTR_EN2__ERASE_FAIL 0x0020
-+#define INTR_EN2__LOAD_COMP 0x0040
-+#define INTR_EN2__PROGRAM_COMP 0x0080
-+#define INTR_EN2__ERASE_COMP 0x0100
-+#define INTR_EN2__PIPE_CPYBCK_CMD_COMP 0x0200
-+#define INTR_EN2__LOCKED_BLK 0x0400
-+#define INTR_EN2__UNSUP_CMD 0x0800
-+#define INTR_EN2__INT_ACT 0x1000
-+#define INTR_EN2__RST_COMP 0x2000
-+#define INTR_EN2__PIPE_CMD_ERR 0x4000
-+#define INTR_EN2__PAGE_XFER_INC 0x8000
-+
-+#define PAGE_CNT2 0x4d0
-+#define PAGE_CNT2__VALUE 0x00ff
-+
-+#define ERR_PAGE_ADDR2 0x4e0
-+#define ERR_PAGE_ADDR2__VALUE 0xffff
-+
-+#define ERR_BLOCK_ADDR2 0x4f0
-+#define ERR_BLOCK_ADDR2__VALUE 0xffff
-+
-+#define INTR_STATUS3 0x500
-+#define INTR_STATUS3__ECC_TRANSACTION_DONE 0x0001
-+#define INTR_STATUS3__ECC_ERR 0x0002
-+#define INTR_STATUS3__DMA_CMD_COMP 0x0004
-+#define INTR_STATUS3__TIME_OUT 0x0008
-+#define INTR_STATUS3__PROGRAM_FAIL 0x0010
-+#define INTR_STATUS3__ERASE_FAIL 0x0020
-+#define INTR_STATUS3__LOAD_COMP 0x0040
-+#define INTR_STATUS3__PROGRAM_COMP 0x0080
-+#define INTR_STATUS3__ERASE_COMP 0x0100
-+#define INTR_STATUS3__PIPE_CPYBCK_CMD_COMP 0x0200
-+#define INTR_STATUS3__LOCKED_BLK 0x0400
-+#define INTR_STATUS3__UNSUP_CMD 0x0800
-+#define INTR_STATUS3__INT_ACT 0x1000
-+#define INTR_STATUS3__RST_COMP 0x2000
-+#define INTR_STATUS3__PIPE_CMD_ERR 0x4000
-+#define INTR_STATUS3__PAGE_XFER_INC 0x8000
-+
-+#define INTR_EN3 0x510
-+#define INTR_EN3__ECC_TRANSACTION_DONE 0x0001
-+#define INTR_EN3__ECC_ERR 0x0002
-+#define INTR_EN3__DMA_CMD_COMP 0x0004
-+#define INTR_EN3__TIME_OUT 0x0008
-+#define INTR_EN3__PROGRAM_FAIL 0x0010
-+#define INTR_EN3__ERASE_FAIL 0x0020
-+#define INTR_EN3__LOAD_COMP 0x0040
-+#define INTR_EN3__PROGRAM_COMP 0x0080
-+#define INTR_EN3__ERASE_COMP 0x0100
-+#define INTR_EN3__PIPE_CPYBCK_CMD_COMP 0x0200
-+#define INTR_EN3__LOCKED_BLK 0x0400
-+#define INTR_EN3__UNSUP_CMD 0x0800
-+#define INTR_EN3__INT_ACT 0x1000
-+#define INTR_EN3__RST_COMP 0x2000
-+#define INTR_EN3__PIPE_CMD_ERR 0x4000
-+#define INTR_EN3__PAGE_XFER_INC 0x8000
-+
-+#define PAGE_CNT3 0x520
-+#define PAGE_CNT3__VALUE 0x00ff
-+
-+#define ERR_PAGE_ADDR3 0x530
-+#define ERR_PAGE_ADDR3__VALUE 0xffff
-+
-+#define ERR_BLOCK_ADDR3 0x540
-+#define ERR_BLOCK_ADDR3__VALUE 0xffff
-+
-+#define DATA_INTR 0x550
-+#define DATA_INTR__WRITE_SPACE_AV 0x0001
-+#define DATA_INTR__READ_DATA_AV 0x0002
-+
-+#define DATA_INTR_EN 0x560
-+#define DATA_INTR_EN__WRITE_SPACE_AV 0x0001
-+#define DATA_INTR_EN__READ_DATA_AV 0x0002
-+
-+#define GPREG_0 0x570
-+#define GPREG_0__VALUE 0xffff
-+
-+#define GPREG_1 0x580
-+#define GPREG_1__VALUE 0xffff
-+
-+#define GPREG_2 0x590
-+#define GPREG_2__VALUE 0xffff
-+
-+#define GPREG_3 0x5a0
-+#define GPREG_3__VALUE 0xffff
-+
-+#define ECC_THRESHOLD 0x600
-+#define ECC_THRESHOLD__VALUE 0x03ff
-+
-+#define ECC_ERROR_BLOCK_ADDRESS 0x610
-+#define ECC_ERROR_BLOCK_ADDRESS__VALUE 0xffff
-+
-+#define ECC_ERROR_PAGE_ADDRESS 0x620
-+#define ECC_ERROR_PAGE_ADDRESS__VALUE 0x0fff
-+#define ECC_ERROR_PAGE_ADDRESS__BANK 0xf000
-+
-+#define ECC_ERROR_ADDRESS 0x630
-+#define ECC_ERROR_ADDRESS__OFFSET 0x0fff
-+#define ECC_ERROR_ADDRESS__SECTOR_NR 0xf000
-+
-+#define ERR_CORRECTION_INFO 0x640
-+#define ERR_CORRECTION_INFO__BYTEMASK 0x00ff
-+#define ERR_CORRECTION_INFO__DEVICE_NR 0x0f00
-+#define ERR_CORRECTION_INFO__ERROR_TYPE 0x4000
-+#define ERR_CORRECTION_INFO__LAST_ERR_INFO 0x8000
-+
-+#define DMA_ENABLE 0x700
-+#define DMA_ENABLE__FLAG 0x0001
-+
-+#define IGNORE_ECC_DONE 0x710
-+#define IGNORE_ECC_DONE__FLAG 0x0001
-+
-+#define DMA_INTR 0x720
-+#define DMA_INTR__TARGET_ERROR 0x0001
-+#define DMA_INTR__DESC_COMP_CHANNEL0 0x0002
-+#define DMA_INTR__DESC_COMP_CHANNEL1 0x0004
-+#define DMA_INTR__DESC_COMP_CHANNEL2 0x0008
-+#define DMA_INTR__DESC_COMP_CHANNEL3 0x0010
-+#define DMA_INTR__MEMCOPY_DESC_COMP 0x0020
-+
-+#define DMA_INTR_EN 0x730
-+#define DMA_INTR_EN__TARGET_ERROR 0x0001
-+#define DMA_INTR_EN__DESC_COMP_CHANNEL0 0x0002
-+#define DMA_INTR_EN__DESC_COMP_CHANNEL1 0x0004
-+#define DMA_INTR_EN__DESC_COMP_CHANNEL2 0x0008
-+#define DMA_INTR_EN__DESC_COMP_CHANNEL3 0x0010
-+#define DMA_INTR_EN__MEMCOPY_DESC_COMP 0x0020
-+
-+#define TARGET_ERR_ADDR_LO 0x740
-+#define TARGET_ERR_ADDR_LO__VALUE 0xffff
-+
-+#define TARGET_ERR_ADDR_HI 0x750
-+#define TARGET_ERR_ADDR_HI__VALUE 0xffff
-+
-+#define CHNL_ACTIVE 0x760
-+#define CHNL_ACTIVE__CHANNEL0 0x0001
-+#define CHNL_ACTIVE__CHANNEL1 0x0002
-+#define CHNL_ACTIVE__CHANNEL2 0x0004
-+#define CHNL_ACTIVE__CHANNEL3 0x0008
-+
-+#define ACTIVE_SRC_ID 0x800
-+#define ACTIVE_SRC_ID__VALUE 0x00ff
-+
-+#define PTN_INTR 0x810
-+#define PTN_INTR__CONFIG_ERROR 0x0001
-+#define PTN_INTR__ACCESS_ERROR_BANK0 0x0002
-+#define PTN_INTR__ACCESS_ERROR_BANK1 0x0004
-+#define PTN_INTR__ACCESS_ERROR_BANK2 0x0008
-+#define PTN_INTR__ACCESS_ERROR_BANK3 0x0010
-+#define PTN_INTR__REG_ACCESS_ERROR 0x0020
-+
-+#define PTN_INTR_EN 0x820
-+#define PTN_INTR_EN__CONFIG_ERROR 0x0001
-+#define PTN_INTR_EN__ACCESS_ERROR_BANK0 0x0002
-+#define PTN_INTR_EN__ACCESS_ERROR_BANK1 0x0004
-+#define PTN_INTR_EN__ACCESS_ERROR_BANK2 0x0008
-+#define PTN_INTR_EN__ACCESS_ERROR_BANK3 0x0010
-+#define PTN_INTR_EN__REG_ACCESS_ERROR 0x0020
-+
-+#define PERM_SRC_ID_0 0x830
-+#define PERM_SRC_ID_0__SRCID 0x00ff
-+#define PERM_SRC_ID_0__DIRECT_ACCESS_ACTIVE 0x0800
-+#define PERM_SRC_ID_0__WRITE_ACTIVE 0x2000
-+#define PERM_SRC_ID_0__READ_ACTIVE 0x4000
-+#define PERM_SRC_ID_0__PARTITION_VALID 0x8000
-+
-+#define MIN_BLK_ADDR_0 0x840
-+#define MIN_BLK_ADDR_0__VALUE 0xffff
-+
-+#define MAX_BLK_ADDR_0 0x850
-+#define MAX_BLK_ADDR_0__VALUE 0xffff
-+
-+#define MIN_MAX_BANK_0 0x860
-+#define MIN_MAX_BANK_0__MIN_VALUE 0x0003
-+#define MIN_MAX_BANK_0__MAX_VALUE 0x000c
-+
-+#define PERM_SRC_ID_1 0x870
-+#define PERM_SRC_ID_1__SRCID 0x00ff
-+#define PERM_SRC_ID_1__DIRECT_ACCESS_ACTIVE 0x0800
-+#define PERM_SRC_ID_1__WRITE_ACTIVE 0x2000
-+#define PERM_SRC_ID_1__READ_ACTIVE 0x4000
-+#define PERM_SRC_ID_1__PARTITION_VALID 0x8000
-+
-+#define MIN_BLK_ADDR_1 0x880
-+#define MIN_BLK_ADDR_1__VALUE 0xffff
-+
-+#define MAX_BLK_ADDR_1 0x890
-+#define MAX_BLK_ADDR_1__VALUE 0xffff
-+
-+#define MIN_MAX_BANK_1 0x8a0
-+#define MIN_MAX_BANK_1__MIN_VALUE 0x0003
-+#define MIN_MAX_BANK_1__MAX_VALUE 0x000c
-+
-+#define PERM_SRC_ID_2 0x8b0
-+#define PERM_SRC_ID_2__SRCID 0x00ff
-+#define PERM_SRC_ID_2__DIRECT_ACCESS_ACTIVE 0x0800
-+#define PERM_SRC_ID_2__WRITE_ACTIVE 0x2000
-+#define PERM_SRC_ID_2__READ_ACTIVE 0x4000
-+#define PERM_SRC_ID_2__PARTITION_VALID 0x8000
-+
-+#define MIN_BLK_ADDR_2 0x8c0
-+#define MIN_BLK_ADDR_2__VALUE 0xffff
-+
-+#define MAX_BLK_ADDR_2 0x8d0
-+#define MAX_BLK_ADDR_2__VALUE 0xffff
-+
-+#define MIN_MAX_BANK_2 0x8e0
-+#define MIN_MAX_BANK_2__MIN_VALUE 0x0003
-+#define MIN_MAX_BANK_2__MAX_VALUE 0x000c
-+
-+#define PERM_SRC_ID_3 0x8f0
-+#define PERM_SRC_ID_3__SRCID 0x00ff
-+#define PERM_SRC_ID_3__DIRECT_ACCESS_ACTIVE 0x0800
-+#define PERM_SRC_ID_3__WRITE_ACTIVE 0x2000
-+#define PERM_SRC_ID_3__READ_ACTIVE 0x4000
-+#define PERM_SRC_ID_3__PARTITION_VALID 0x8000
-+
-+#define MIN_BLK_ADDR_3 0x900
-+#define MIN_BLK_ADDR_3__VALUE 0xffff
-+
-+#define MAX_BLK_ADDR_3 0x910
-+#define MAX_BLK_ADDR_3__VALUE 0xffff
-+
-+#define MIN_MAX_BANK_3 0x920
-+#define MIN_MAX_BANK_3__MIN_VALUE 0x0003
-+#define MIN_MAX_BANK_3__MAX_VALUE 0x000c
-+
-+#define PERM_SRC_ID_4 0x930
-+#define PERM_SRC_ID_4__SRCID 0x00ff
-+#define PERM_SRC_ID_4__DIRECT_ACCESS_ACTIVE 0x0800
-+#define PERM_SRC_ID_4__WRITE_ACTIVE 0x2000
-+#define PERM_SRC_ID_4__READ_ACTIVE 0x4000
-+#define PERM_SRC_ID_4__PARTITION_VALID 0x8000
-+
-+#define MIN_BLK_ADDR_4 0x940
-+#define MIN_BLK_ADDR_4__VALUE 0xffff
-+
-+#define MAX_BLK_ADDR_4 0x950
-+#define MAX_BLK_ADDR_4__VALUE 0xffff
-+
-+#define MIN_MAX_BANK_4 0x960
-+#define MIN_MAX_BANK_4__MIN_VALUE 0x0003
-+#define MIN_MAX_BANK_4__MAX_VALUE 0x000c
-+
-+#define PERM_SRC_ID_5 0x970
-+#define PERM_SRC_ID_5__SRCID 0x00ff
-+#define PERM_SRC_ID_5__DIRECT_ACCESS_ACTIVE 0x0800
-+#define PERM_SRC_ID_5__WRITE_ACTIVE 0x2000
-+#define PERM_SRC_ID_5__READ_ACTIVE 0x4000
-+#define PERM_SRC_ID_5__PARTITION_VALID 0x8000
-+
-+#define MIN_BLK_ADDR_5 0x980
-+#define MIN_BLK_ADDR_5__VALUE 0xffff
-+
-+#define MAX_BLK_ADDR_5 0x990
-+#define MAX_BLK_ADDR_5__VALUE 0xffff
-+
-+#define MIN_MAX_BANK_5 0x9a0
-+#define MIN_MAX_BANK_5__MIN_VALUE 0x0003
-+#define MIN_MAX_BANK_5__MAX_VALUE 0x000c
-+
-+#define PERM_SRC_ID_6 0x9b0
-+#define PERM_SRC_ID_6__SRCID 0x00ff
-+#define PERM_SRC_ID_6__DIRECT_ACCESS_ACTIVE 0x0800
-+#define PERM_SRC_ID_6__WRITE_ACTIVE 0x2000
-+#define PERM_SRC_ID_6__READ_ACTIVE 0x4000
-+#define PERM_SRC_ID_6__PARTITION_VALID 0x8000
-+
-+#define MIN_BLK_ADDR_6 0x9c0
-+#define MIN_BLK_ADDR_6__VALUE 0xffff
-+
-+#define MAX_BLK_ADDR_6 0x9d0
-+#define MAX_BLK_ADDR_6__VALUE 0xffff
-+
-+#define MIN_MAX_BANK_6 0x9e0
-+#define MIN_MAX_BANK_6__MIN_VALUE 0x0003
-+#define MIN_MAX_BANK_6__MAX_VALUE 0x000c
-+
-+#define PERM_SRC_ID_7 0x9f0
-+#define PERM_SRC_ID_7__SRCID 0x00ff
-+#define PERM_SRC_ID_7__DIRECT_ACCESS_ACTIVE 0x0800
-+#define PERM_SRC_ID_7__WRITE_ACTIVE 0x2000
-+#define PERM_SRC_ID_7__READ_ACTIVE 0x4000
-+#define PERM_SRC_ID_7__PARTITION_VALID 0x8000
-+
-+#define MIN_BLK_ADDR_7 0xa00
-+#define MIN_BLK_ADDR_7__VALUE 0xffff
-+
-+#define MAX_BLK_ADDR_7 0xa10
-+#define MAX_BLK_ADDR_7__VALUE 0xffff
-+
-+#define MIN_MAX_BANK_7 0xa20
-+#define MIN_MAX_BANK_7__MIN_VALUE 0x0003
-+#define MIN_MAX_BANK_7__MAX_VALUE 0x000c
-diff --git a/drivers/block/spectra/spectraswconfig.h b/drivers/block/spectra/spectraswconfig.h
-new file mode 100644
-index 0000000..b630f06
---- /dev/null
-+++ b/drivers/block/spectra/spectraswconfig.h
-@@ -0,0 +1,81 @@
-+/*
-+ * NAND Flash Controller Device Driver
-+ * Copyright (c) 2009, Intel Corporation and its suppliers.
-+ *
-+ * This program is free software; you can redistribute it and/or modify it
-+ * under the terms and conditions of the GNU General Public License,
-+ * version 2, as published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope it will be useful, but WITHOUT
-+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-+ * more details.
-+ *
-+ * You should have received a copy of the GNU General Public License along with
-+ * this program; if not, write to the Free Software Foundation, Inc.,
-+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-+ *
-+ */
-+
-+#ifndef _SPECTRASWCONFIG_
-+#define _SPECTRASWCONFIG_
-+
-+/* NAND driver version */
-+#define GLOB_VERSION "driver version 20100311"
-+
-+
-+/***** Common Parameters *****/
-+#define RETRY_TIMES 3
-+
-+#define READ_BADBLOCK_INFO 1
-+#define READBACK_VERIFY 0
-+#define AUTO_FORMAT_FLASH 0
-+
-+/***** Cache Parameters *****/
-+#define CACHE_ITEM_NUM 128
-+#define BLK_NUM_FOR_L2_CACHE 16
-+
-+/***** Block Table Parameters *****/
-+#define BLOCK_TABLE_INDEX 0
-+
-+/***** Wear Leveling Parameters *****/
-+#define WEAR_LEVELING_GATE 0x10
-+#define WEAR_LEVELING_BLOCK_NUM 10
-+
-+#define DEBUG_BNDRY 0
-+
-+/***** Product Feature Support *****/
-+#define FLASH_EMU defined(CONFIG_MRST_NAND_EMU)
-+#define FLASH_NAND defined(CONFIG_MRST_NAND_HW)
-+#define CMD_DMA 0
-+
-+#define SPECTRA_PARTITION_ID 0
-+
-+/* Enable this macro if the number of flash blocks is larger than 16K. */
-+#define SUPPORT_LARGE_BLOCKNUM 1
-+
-+/**** Block Table and Reserved Block Parameters *****/
-+#define SPECTRA_START_BLOCK 3
-+//#define NUM_FREE_BLOCKS_GATE 30
-+#define NUM_FREE_BLOCKS_GATE 60
-+
-+/**** Hardware Parameters ****/
-+#define GLOB_HWCTL_REG_BASE 0xFFA40000
-+#define GLOB_HWCTL_REG_SIZE 4096
-+
-+#define GLOB_HWCTL_MEM_BASE 0xFFA48000
-+#define GLOB_HWCTL_MEM_SIZE 4096
-+
-+/* KBV - Updated to LNW scratch register address */
-+#define SCRATCH_REG_ADDR 0xFF108018
-+#define SCRATCH_REG_SIZE 64
-+
-+#define GLOB_HWCTL_DEFAULT_BLKS 2048
-+
-+#define SUPPORT_15BITECC 1
-+#define SUPPORT_8BITECC 1
-+
-+#define ONFI_BLOOM_TIME 0
-+#define MODE5_WORKAROUND 1
-+
-+#endif /*_SPECTRASWCONFIG_*/
---
-1.6.0.6
-