This thread has been locked.

If you have a related question, please click the "Ask a related question" button in the top right corner. The newly created question will be automatically linked to this question.

Flashing File System to 1G Nand in Uboot

Hi,

i use command as follow to flash in my file system from Uboot

mw.b 0x82000000 0xFF 0x3000000

tftp 0x82000000 ubi.img

nand erase 0x780000 0xF880000

nandecc hw 2

nand write 0x82000000 0x780000 0x3000000

on the nand boot right after flash the file system, the kernel can boot sucessfully, Once i reboot/reset the AM335x device, the kernel can't boot sucessfully and end up with error :

[    4.091952] UBI error: ubi_io_read: error -74 (ECC error) while reading 126976 bytes from PEB 3:4096, read 126976 bytes                                              
[    4.234131] UBI error: ubi_io_read: error -74 (ECC error) while reading 126976 bytes from PEB 4:4096, read 126976 bytes                                              
[    4.249068] UBI error: ubi_io_read: error -74 (ECC error) while reading 188 bytes from PEB 383:8960, read 188 bytes                                                  
[    4.262689] UBI error: ubi_io_read: error -74 (ECC error) while reading 11 bytes from PEB 10:8192, read 11 bytes                                                     
[    4.273429] UBIFS error (pid 1): ubifs_leb_read: reading 11 bytes from LEB 8:4096 failed, error -74

How to fix this problem?

Am i in the correct forum?

Thanks and Regards

Keldy

 

  • Hi,Keldy

    I am facing the same problem.

    Have you fixed it?

  • Keldy, John,

    I have fixed this issue for other platforms. Could you please share more details? If possible to could you please send the files arch/arm/mach-omap2/board-flash.c, gpmc.c drivers/mtd/nand/omap2.c nand_base.c to my email: renjith.thomas@pathpartnertech.com?

  • /*
     * board-flash.c
     * Modified from mach-omap2/board-3430sdp-flash.c
     *
     * Copyright (C) 2009 Nokia Corporation
     * Copyright (C) 2009 Texas Instruments
     *
     * Vimal Singh <vimalsingh@ti.com>
     *
     * This program is free software; you can redistribute it and/or modify
     * it under the terms of the GNU General Public License version 2 as
     * published by the Free Software Foundation.
     */
    
    #include <linux/kernel.h>
    #include <linux/platform_device.h>
    #include <linux/mtd/physmap.h>
    #include <linux/io.h>
    #include <plat/irqs.h>
    
    #include <plat/gpmc.h>
    #include <plat/nand.h>
    #include <plat/onenand.h>
    #include <plat/tc.h>
    
    #include "board-flash.h"
    
    #define REG_FPGA_REV			0x10
    #define REG_FPGA_DIP_SWITCH_INPUT2	0x60
    #define MAX_SUPPORTED_GPMC_CONFIG	3
    
    #define DEBUG_BASE		0x08000000 /* debug board */
    
    /* various memory sizes */
    #define FLASH_SIZE_SDPV1	SZ_64M	/* NOR flash (64 Meg aligned) */
    #define FLASH_SIZE_SDPV2	SZ_128M	/* NOR flash (256 Meg aligned) */
    
    static struct physmap_flash_data board_nor_data = {
    	.width		= 2,
    };
    
    static struct resource board_nor_resource = {
    	.flags		= IORESOURCE_MEM,
    };
    
    static struct platform_device board_nor_device = {
    	.name		= "physmap-flash",
    	.id		= 0,
    	.dev		= {
    			.platform_data = &board_nor_data,
    	},
    	.num_resources	= 1,
    	.resource	= &board_nor_resource,
    };
    
    static void
    __init board_nor_init(struct mtd_partition *nor_parts, u8 nr_parts, u8 cs)
    {
    	int err;
    
    	board_nor_data.parts	= nor_parts;
    	board_nor_data.nr_parts	= nr_parts;
    
    	/* Configure start address and size of NOR device */
    	if (omap_rev() >= OMAP3430_REV_ES1_0) {
    		err = gpmc_cs_request(cs, FLASH_SIZE_SDPV2 - 1,
    				(unsigned long *)&board_nor_resource.start);
    		board_nor_resource.end = board_nor_resource.start
    					+ FLASH_SIZE_SDPV2 - 1;
    	} else {
    		err = gpmc_cs_request(cs, FLASH_SIZE_SDPV1 - 1,
    				(unsigned long *)&board_nor_resource.start);
    		board_nor_resource.end = board_nor_resource.start
    					+ FLASH_SIZE_SDPV1 - 1;
    	}
    	if (err < 0) {
    		pr_err("NOR: Can't request GPMC CS\n");
    		return;
    	}
    	if (platform_device_register(&board_nor_device) < 0)
    		pr_err("Unable to register NOR device\n");
    }
    
    #if defined(CONFIG_MTD_ONENAND_OMAP2) || \
    		defined(CONFIG_MTD_ONENAND_OMAP2_MODULE)
    static struct omap_onenand_platform_data board_onenand_data = {
    	.dma_channel	= -1,   /* disable DMA in OMAP OneNAND driver */
    };
    
    static void
    __init board_onenand_init(struct mtd_partition *onenand_parts,
    				u8 nr_parts, u8 cs)
    {
    	board_onenand_data.cs		= cs;
    	board_onenand_data.parts	= onenand_parts;
    	board_onenand_data.nr_parts	= nr_parts;
    
    	gpmc_onenand_init(&board_onenand_data);
    }
    #else
    static void
    __init board_onenand_init(struct mtd_partition *nor_parts, u8 nr_parts, u8 cs)
    {
    }
    #endif /* CONFIG_MTD_ONENAND_OMAP2 || CONFIG_MTD_ONENAND_OMAP2_MODULE */
    
    #if defined(CONFIG_MTD_NAND_OMAP2) || \
    		defined(CONFIG_MTD_NAND_OMAP2_MODULE)
    
    /* Note that all values in this struct are in nanoseconds */
    static struct gpmc_timings nand_timings = {
    
    	.sync_clk = 0,
    
    	.cs_on = 0,
    	.cs_rd_off = 36,
    	.cs_wr_off = 36,
    
    	.adv_on = 6,
    	.adv_rd_off = 24,
    	.adv_wr_off = 36,
    
    	.we_off = 30,
    	.oe_off = 48,
    
    	.access = 54,
    	.rd_cycle = 72,
    	.wr_cycle = 72,
    
    	.wr_access = 30,
    	.wr_data_mux_bus = 0,
    };
    
    static struct omap_nand_platform_data board_nand_data = {
    	.gpmc_t		= &nand_timings,
    };
    
    void
    __init board_nand_init(struct mtd_partition *nand_parts,
    			u8 nr_parts, u8 cs, int nand_type)
    {
    	board_nand_data.cs		= cs;
    	board_nand_data.parts		= nand_parts;
    	board_nand_data.nr_parts	= nr_parts;
    	board_nand_data.devsize		= nand_type;
    
    	board_nand_data.ecc_opt = OMAP_ECC_HAMMING_CODE_DEFAULT;
    	board_nand_data.gpmc_irq = OMAP_GPMC_IRQ_BASE + cs;
    
    	if (cpu_is_am335x()) {
    		board_nand_data.ecc_opt = OMAP_ECC_HAMMING_CODE_HW;
    		board_nand_data.xfer_type = NAND_OMAP_PREFETCH_POLLED;
    	}
    
    	gpmc_nand_init(&board_nand_data);
    }
    #endif /* CONFIG_MTD_NAND_OMAP2 || CONFIG_MTD_NAND_OMAP2_MODULE */
    
    /**
     * get_gpmc0_type - Reads the FPGA DIP_SWITCH_INPUT_REGISTER2 to get
     * the various cs values.
     */
    static u8 get_gpmc0_type(void)
    {
    	u8 cs = 0;
    	void __iomem *fpga_map_addr;
    
    	fpga_map_addr = ioremap(DEBUG_BASE, 4096);
    	if (!fpga_map_addr)
    		return -ENOMEM;
    
    	if (!(__raw_readw(fpga_map_addr + REG_FPGA_REV)))
    		/* we dont have an DEBUG FPGA??? */
    		/* Depend on #defines!! default to strata boot return param */
    		goto unmap;
    
    	/* S8-DIP-OFF = 1, S8-DIP-ON = 0 */
    	cs = __raw_readw(fpga_map_addr + REG_FPGA_DIP_SWITCH_INPUT2) & 0xf;
    
    	/* ES2.0 SDP's onwards 4 dip switches are provided for CS */
    	if (omap_rev() >= OMAP3430_REV_ES1_0)
    		/* change (S8-1:4=DS-2:0) to (S8-4:1=DS-2:0) */
    		cs = ((cs & 8) >> 3) | ((cs & 4) >> 1) |
    			((cs & 2) << 1) | ((cs & 1) << 3);
    	else
    		/* change (S8-1:3=DS-2:0) to (S8-3:1=DS-2:0) */
    		cs = ((cs & 4) >> 2) | (cs & 2) | ((cs & 1) << 2);
    unmap:
    	iounmap(fpga_map_addr);
    	return cs;
    }
    
    /**
     * board_flash_init - Identify devices connected to GPMC and register.
     *
     * @return - void.
     */
    void board_flash_init(struct flash_partitions partition_info[],
    			char chip_sel_board[][GPMC_CS_NUM], int nand_type)
    {
    	u8		cs = 0;
    	u8		norcs = GPMC_CS_NUM + 1;
    	u8		nandcs = GPMC_CS_NUM + 1;
    	u8		onenandcs = GPMC_CS_NUM + 1;
    	u8		idx;
    	unsigned char	*config_sel = NULL;
    
    	/* REVISIT: Is this return correct idx for 2430 SDP?
    	 * for which cs configuration matches for 2430 SDP?
    	 */
    	idx = get_gpmc0_type();
    	if (idx >= MAX_SUPPORTED_GPMC_CONFIG) {
    		pr_err("%s: Invalid chip select: %d\n", __func__, cs);
    		return;
    	}
    	config_sel = (unsigned char *)(chip_sel_board[idx]);
    
    	while (cs < GPMC_CS_NUM) {
    		switch (config_sel[cs]) {
    		case PDC_NOR:
    			if (norcs > GPMC_CS_NUM)
    				norcs = cs;
    			break;
    		case PDC_NAND:
    			if (nandcs > GPMC_CS_NUM)
    				nandcs = cs;
    			break;
    		case PDC_ONENAND:
    			if (onenandcs > GPMC_CS_NUM)
    				onenandcs = cs;
    			break;
    		};
    		cs++;
    	}
    
    	if (norcs > GPMC_CS_NUM)
    		pr_err("NOR: Unable to find configuration in GPMC\n");
    	else
    		board_nor_init(partition_info[0].parts,
    				partition_info[0].nr_parts, norcs);
    
    	if (onenandcs > GPMC_CS_NUM)
    		pr_err("OneNAND: Unable to find configuration in GPMC\n");
    	else
    		board_onenand_init(partition_info[1].parts,
    					partition_info[1].nr_parts, onenandcs);
    
    	if (nandcs > GPMC_CS_NUM)
    		pr_err("NAND: Unable to find configuration in GPMC\n");
    	else
    		board_nand_init(partition_info[2].parts,
    			partition_info[2].nr_parts, nandcs, nand_type);
    }
    
    /*
     * GPMC support functions
     *
     * Copyright (C) 2005-2006 Nokia Corporation
     *
     * Author: Juha Yrjola
     *
     * Copyright (C) 2009 Texas Instruments
     * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
     *
     * This program is free software; you can redistribute it and/or modify
     * it under the terms of the GNU General Public License version 2 as
     * published by the Free Software Foundation.
     */
    #undef DEBUG
    
    #include <linux/irq.h>
    #include <linux/kernel.h>
    #include <linux/init.h>
    #include <linux/err.h>
    #include <linux/clk.h>
    #include <linux/ioport.h>
    #include <linux/spinlock.h>
    #include <linux/io.h>
    #include <linux/module.h>
    #include <linux/interrupt.h>
    
    #include <asm/mach-types.h>
    #include <plat/gpmc.h>
    
    #include <plat/sdrc.h>
    
    /* GPMC register offsets */
    #define GPMC_REVISION		0x00
    #define GPMC_SYSCONFIG		0x10
    #define GPMC_SYSSTATUS		0x14
    #define GPMC_IRQSTATUS		0x18
    #define GPMC_IRQENABLE		0x1c
    #define GPMC_TIMEOUT_CONTROL	0x40
    #define GPMC_ERR_ADDRESS	0x44
    #define GPMC_ERR_TYPE		0x48
    #define GPMC_CONFIG		0x50
    #define GPMC_STATUS		0x54
    #define GPMC_PREFETCH_CONFIG1	0x1e0
    #define GPMC_PREFETCH_CONFIG2	0x1e4
    #define GPMC_PREFETCH_CONTROL	0x1ec
    #define GPMC_PREFETCH_STATUS	0x1f0
    #define GPMC_ECC_CONFIG		0x1f4
    #define GPMC_ECC_CONTROL	0x1f8
    #define GPMC_ECC_SIZE_CONFIG	0x1fc
    #define GPMC_ECC1_RESULT        0x200
    
    #define GPMC_CS0_OFFSET		0x60
    #define GPMC_CS_SIZE		0x30
    
    #define GPMC_MEM_START		0x00000000
    #define GPMC_MEM_END		0x3FFFFFFF
    #define BOOT_ROM_SPACE		0x100000	/* 1MB */
    
    #define GPMC_CHUNK_SHIFT	24		/* 16 MB */
    #define GPMC_SECTION_SHIFT	28		/* 128 MB */
    
    #define CS_NUM_SHIFT		24
    #define ENABLE_PREFETCH		(0x1 << 7)
    #define DMA_MPU_MODE		2
    
    /* Structure to save gpmc cs context */
    struct gpmc_cs_config {
    	u32 config1;
    	u32 config2;
    	u32 config3;
    	u32 config4;
    	u32 config5;
    	u32 config6;
    	u32 config7;
    	int is_valid;
    };
    
    /*
     * Structure to save/restore gpmc context
     * to support core off on OMAP3
     */
    struct omap3_gpmc_regs {
    	u32 sysconfig;
    	u32 irqenable;
    	u32 timeout_ctrl;
    	u32 config;
    	u32 prefetch_config1;
    	u32 prefetch_config2;
    	u32 prefetch_control;
    	struct gpmc_cs_config cs_context[GPMC_CS_NUM];
    };
    
    static struct resource	gpmc_mem_root;
    static struct resource	gpmc_cs_mem[GPMC_CS_NUM];
    static DEFINE_SPINLOCK(gpmc_mem_lock);
    static unsigned int gpmc_cs_map;	/* flag for cs which are initialized */
    static int gpmc_ecc_used = -EINVAL;	/* cs using ecc engine */
    
    static void __iomem *gpmc_base;
    
    static struct clk *gpmc_l3_clk;
    
    static irqreturn_t gpmc_handle_irq(int irq, void *dev);
    
    static void gpmc_write_reg(int idx, u32 val)
    {
    	__raw_writel(val, gpmc_base + idx);
    }
    
    static u32 gpmc_read_reg(int idx)
    {
    	return __raw_readl(gpmc_base + idx);
    }
    
    static void gpmc_cs_write_byte(int cs, int idx, u8 val)
    {
    	void __iomem *reg_addr;
    
    	reg_addr = gpmc_base + GPMC_CS0_OFFSET + (cs * GPMC_CS_SIZE) + idx;
    	__raw_writeb(val, reg_addr);
    }
    
    static u8 gpmc_cs_read_byte(int cs, int idx)
    {
    	void __iomem *reg_addr;
    
    	reg_addr = gpmc_base + GPMC_CS0_OFFSET + (cs * GPMC_CS_SIZE) + idx;
    	return __raw_readb(reg_addr);
    }
    
    void gpmc_cs_write_reg(int cs, int idx, u32 val)
    {
    	void __iomem *reg_addr;
    
    	reg_addr = gpmc_base + GPMC_CS0_OFFSET + (cs * GPMC_CS_SIZE) + idx;
    	__raw_writel(val, reg_addr);
    }
    
    u32 gpmc_cs_read_reg(int cs, int idx)
    {
    	void __iomem *reg_addr;
    
    	reg_addr = gpmc_base + GPMC_CS0_OFFSET + (cs * GPMC_CS_SIZE) + idx;
    	return __raw_readl(reg_addr);
    }
    
    /* TODO: Add support for gpmc_fck to clock framework and use it */
    unsigned long gpmc_get_fclk_period(void)
    {
    	unsigned long rate = clk_get_rate(gpmc_l3_clk);
    
    	if (rate == 0) {
    		printk(KERN_WARNING "gpmc_l3_clk not enabled\n");
    		return 0;
    	}
    
    	rate /= 1000;
    	rate = 1000000000 / rate;	/* In picoseconds */
    
    	return rate;
    }
    
    unsigned int gpmc_ns_to_ticks(unsigned int time_ns)
    {
    	unsigned long tick_ps;
    
    	/* Calculate in picosecs to yield more exact results */
    	tick_ps = gpmc_get_fclk_period();
    
    	return (time_ns * 1000 + tick_ps - 1) / tick_ps;
    }
    
    unsigned int gpmc_ps_to_ticks(unsigned int time_ps)
    {
    	unsigned long tick_ps;
    
    	/* Calculate in picosecs to yield more exact results */
    	tick_ps = gpmc_get_fclk_period();
    
    	return (time_ps + tick_ps - 1) / tick_ps;
    }
    
    unsigned int gpmc_ticks_to_ns(unsigned int ticks)
    {
    	return ticks * gpmc_get_fclk_period() / 1000;
    }
    
    unsigned int gpmc_round_ns_to_ticks(unsigned int time_ns)
    {
    	unsigned long ticks = gpmc_ns_to_ticks(time_ns);
    
    	return ticks * gpmc_get_fclk_period() / 1000;
    }
    
    #ifdef DEBUG
    static int set_gpmc_timing_reg(int cs, int reg, int st_bit, int end_bit,
    			       int time, const char *name)
    #else
    static int set_gpmc_timing_reg(int cs, int reg, int st_bit, int end_bit,
    			       int time)
    #endif
    {
    	u32 l;
    	int ticks, mask, nr_bits;
    
    	if (time == 0)
    		ticks = 0;
    	else
    		ticks = gpmc_ns_to_ticks(time);
    	nr_bits = end_bit - st_bit + 1;
    	if (ticks >= 1 << nr_bits) {
    #ifdef DEBUG
    		printk(KERN_INFO "GPMC CS%d: %-10s* %3d ns, %3d ticks >= %d\n",
    				cs, name, time, ticks, 1 << nr_bits);
    #endif
    		return -1;
    	}
    
    	mask = (1 << nr_bits) - 1;
    	l = gpmc_cs_read_reg(cs, reg);
    #ifdef DEBUG
    	printk(KERN_INFO
    		"GPMC CS%d: %-10s: %3d ticks, %3lu ns (was %3i ticks) %3d ns\n",
    	       cs, name, ticks, gpmc_get_fclk_period() * ticks / 1000,
    			(l >> st_bit) & mask, time);
    #endif
    	l &= ~(mask << st_bit);
    	l |= ticks << st_bit;
    	gpmc_cs_write_reg(cs, reg, l);
    
    	return 0;
    }
    
    #ifdef DEBUG
    #define GPMC_SET_ONE(reg, st, end, field) \
    	if (set_gpmc_timing_reg(cs, (reg), (st), (end),		\
    			t->field, #field) < 0)			\
    		return -1
    #else
    #define GPMC_SET_ONE(reg, st, end, field) \
    	if (set_gpmc_timing_reg(cs, (reg), (st), (end), t->field) < 0) \
    		return -1
    #endif
    
    int gpmc_cs_calc_divider(int cs, unsigned int sync_clk)
    {
    	int div;
    	u32 l;
    
    	l = sync_clk + (gpmc_get_fclk_period() - 1);
    	div = l / gpmc_get_fclk_period();
    	if (div > 4)
    		return -1;
    	if (div <= 0)
    		div = 1;
    
    	return div;
    }
    
    int gpmc_cs_set_timings(int cs, const struct gpmc_timings *t)
    {
    	int div;
    	u32 l;
    
    	div = gpmc_cs_calc_divider(cs, t->sync_clk);
    	if (div < 0)
    		return -1;
    
    	GPMC_SET_ONE(GPMC_CS_CONFIG2,  0,  3, cs_on);
    	GPMC_SET_ONE(GPMC_CS_CONFIG2,  8, 12, cs_rd_off);
    	GPMC_SET_ONE(GPMC_CS_CONFIG2, 16, 20, cs_wr_off);
    
    	GPMC_SET_ONE(GPMC_CS_CONFIG3,  0,  3, adv_on);
    	GPMC_SET_ONE(GPMC_CS_CONFIG3,  8, 12, adv_rd_off);
    	GPMC_SET_ONE(GPMC_CS_CONFIG3, 16, 20, adv_wr_off);
    
    	GPMC_SET_ONE(GPMC_CS_CONFIG4,  0,  3, oe_on);
    	GPMC_SET_ONE(GPMC_CS_CONFIG4,  8, 12, oe_off);
    	GPMC_SET_ONE(GPMC_CS_CONFIG4, 16, 19, we_on);
    	GPMC_SET_ONE(GPMC_CS_CONFIG4, 24, 28, we_off);
    
    	GPMC_SET_ONE(GPMC_CS_CONFIG5,  0,  4, rd_cycle);
    	GPMC_SET_ONE(GPMC_CS_CONFIG5,  8, 12, wr_cycle);
    	GPMC_SET_ONE(GPMC_CS_CONFIG5, 16, 20, access);
    
    	GPMC_SET_ONE(GPMC_CS_CONFIG5, 24, 27, page_burst_access);
    
    	if (cpu_is_omap34xx()) {
    		GPMC_SET_ONE(GPMC_CS_CONFIG6, 16, 19, wr_data_mux_bus);
    		GPMC_SET_ONE(GPMC_CS_CONFIG6, 24, 28, wr_access);
    	}
    
    	/* caller is expected to have initialized CONFIG1 to cover
    	 * at least sync vs async
    	 */
    	l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG1);
    	if (l & (GPMC_CONFIG1_READTYPE_SYNC | GPMC_CONFIG1_WRITETYPE_SYNC)) {
    #ifdef DEBUG
    		printk(KERN_INFO "GPMC CS%d CLK period is %lu ns (div %d)\n",
    				cs, (div * gpmc_get_fclk_period()) / 1000, div);
    #endif
    		l &= ~0x03;
    		l |= (div - 1);
    		gpmc_cs_write_reg(cs, GPMC_CS_CONFIG1, l);
    	}
    
    	return 0;
    }
    
    static void gpmc_cs_enable_mem(int cs, u32 base, u32 size)
    {
    	u32 l;
    	u32 mask;
    
    	mask = (1 << GPMC_SECTION_SHIFT) - size;
    	l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7);
    	l &= ~0x3f;
    	l = (base >> GPMC_CHUNK_SHIFT) & 0x3f;
    	l &= ~(0x0f << 8);
    	l |= ((mask >> GPMC_CHUNK_SHIFT) & 0x0f) << 8;
    	l |= GPMC_CONFIG7_CSVALID;
    	gpmc_cs_write_reg(cs, GPMC_CS_CONFIG7, l);
    }
    
    static void gpmc_cs_disable_mem(int cs)
    {
    	u32 l;
    
    	l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7);
    	l &= ~GPMC_CONFIG7_CSVALID;
    	gpmc_cs_write_reg(cs, GPMC_CS_CONFIG7, l);
    }
    
    static void gpmc_cs_get_memconf(int cs, u32 *base, u32 *size)
    {
    	u32 l;
    	u32 mask;
    
    	l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7);
    	*base = (l & 0x3f) << GPMC_CHUNK_SHIFT;
    	mask = (l >> 8) & 0x0f;
    	*size = (1 << GPMC_SECTION_SHIFT) - (mask << GPMC_CHUNK_SHIFT);
    }
    
    static int gpmc_cs_mem_enabled(int cs)
    {
    	u32 l;
    
    	l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7);
    	return l & GPMC_CONFIG7_CSVALID;
    }
    
    int gpmc_cs_set_reserved(int cs, int reserved)
    {
    	if (cs > GPMC_CS_NUM)
    		return -ENODEV;
    
    	gpmc_cs_map &= ~(1 << cs);
    	gpmc_cs_map |= (reserved ? 1 : 0) << cs;
    
    	return 0;
    }
    
    int gpmc_cs_reserved(int cs)
    {
    	if (cs > GPMC_CS_NUM)
    		return -ENODEV;
    
    	return gpmc_cs_map & (1 << cs);
    }
    
    static unsigned long gpmc_mem_align(unsigned long size)
    {
    	int order;
    
    	size = (size - 1) >> (GPMC_CHUNK_SHIFT - 1);
    	order = GPMC_CHUNK_SHIFT - 1;
    	do {
    		size >>= 1;
    		order++;
    	} while (size);
    	size = 1 << order;
    	return size;
    }
    
    static int gpmc_cs_insert_mem(int cs, unsigned long base, unsigned long size)
    {
    	struct resource	*res = &gpmc_cs_mem[cs];
    	int r;
    
    	size = gpmc_mem_align(size);
    	spin_lock(&gpmc_mem_lock);
    	res->start = base;
    	res->end = base + size - 1;
    	r = request_resource(&gpmc_mem_root, res);
    	spin_unlock(&gpmc_mem_lock);
    
    	return r;
    }
    
    int gpmc_cs_request(int cs, unsigned long size, unsigned long *base)
    {
    	struct resource *res = &gpmc_cs_mem[cs];
    	int r = -1;
    
    	if (cs > GPMC_CS_NUM)
    		return -ENODEV;
    
    	size = gpmc_mem_align(size);
    	if (size > (1 << GPMC_SECTION_SHIFT))
    		return -ENOMEM;
    
    	spin_lock(&gpmc_mem_lock);
    	if (gpmc_cs_reserved(cs)) {
    		r = -EBUSY;
    		goto out;
    	}
    	if (gpmc_cs_mem_enabled(cs))
    		r = adjust_resource(res, res->start & ~(size - 1), size);
    	if (r < 0)
    		r = allocate_resource(&gpmc_mem_root, res, size, 0, ~0,
    				      size, NULL, NULL);
    	if (r < 0)
    		goto out;
    
    	gpmc_cs_enable_mem(cs, res->start, resource_size(res));
    	*base = res->start;
    	gpmc_cs_set_reserved(cs, 1);
    out:
    	spin_unlock(&gpmc_mem_lock);
    	return r;
    }
    EXPORT_SYMBOL(gpmc_cs_request);
    
    void gpmc_cs_free(int cs)
    {
    	spin_lock(&gpmc_mem_lock);
    	if (cs >= GPMC_CS_NUM || cs < 0 || !gpmc_cs_reserved(cs)) {
    		printk(KERN_ERR "Trying to free non-reserved GPMC CS%d\n", cs);
    		BUG();
    		spin_unlock(&gpmc_mem_lock);
    		return;
    	}
    	gpmc_cs_disable_mem(cs);
    	release_resource(&gpmc_cs_mem[cs]);
    	gpmc_cs_set_reserved(cs, 0);
    	spin_unlock(&gpmc_mem_lock);
    }
    EXPORT_SYMBOL(gpmc_cs_free);
    
    /**
     * gpmc_read_status - read access request to get the different gpmc status
     * @cmd: command type
     * @return status
     */
    int gpmc_read_status(int cmd)
    {
    	int	status = -EINVAL;
    	u32	regval = 0;
    
    	switch (cmd) {
    	case GPMC_GET_IRQ_STATUS:
    		status = gpmc_read_reg(GPMC_IRQSTATUS);
    		break;
    
    	case GPMC_PREFETCH_FIFO_CNT:
    		regval = gpmc_read_reg(GPMC_PREFETCH_STATUS);
    		status = GPMC_PREFETCH_STATUS_FIFO_CNT(regval);
    		break;
    
    	case GPMC_PREFETCH_COUNT:
    		regval = gpmc_read_reg(GPMC_PREFETCH_STATUS);
    		status = GPMC_PREFETCH_STATUS_COUNT(regval);
    		break;
    
    	case GPMC_STATUS_BUFFER:
    		regval = gpmc_read_reg(GPMC_STATUS);
    		/* 1 : buffer is available to write */
    		status = regval & GPMC_STATUS_BUFF_EMPTY;
    		break;
    
    	default:
    		printk(KERN_ERR "gpmc_read_status: Not supported\n");
    	}
    	return status;
    }
    EXPORT_SYMBOL(gpmc_read_status);
    
    /**
     * gpmc_cs_configure - write request to configure gpmc
     * @cs: chip select number
     * @cmd: command type
     * @wval: value to write
     * @return status of the operation
     */
    int gpmc_cs_configure(int cs, int cmd, int wval)
    {
    	int err = 0;
    	u32 regval = 0;
    
    	switch (cmd) {
    	case GPMC_ENABLE_IRQ:
    		gpmc_write_reg(GPMC_IRQENABLE, wval);
    		break;
    
    	case GPMC_SET_IRQ_STATUS:
    		gpmc_write_reg(GPMC_IRQSTATUS, wval);
    		break;
    
    	case GPMC_CONFIG_WP:
    		regval = gpmc_read_reg(GPMC_CONFIG);
    		if (wval)
    			regval &= ~GPMC_CONFIG_WRITEPROTECT; /* WP is ON */
    		else
    			regval |= GPMC_CONFIG_WRITEPROTECT;  /* WP is OFF */
    		gpmc_write_reg(GPMC_CONFIG, regval);
    		break;
    
    	case GPMC_CONFIG_RDY_BSY:
    		regval  = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG1);
    		if (wval)
    			regval |= WR_RD_PIN_MONITORING;
    		else
    			regval &= ~WR_RD_PIN_MONITORING;
    		gpmc_cs_write_reg(cs, GPMC_CS_CONFIG1, regval);
    		break;
    
    	case GPMC_CONFIG_DEV_SIZE:
    		regval  = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG1);
    		regval |= GPMC_CONFIG1_DEVICESIZE(wval);
    		gpmc_cs_write_reg(cs, GPMC_CS_CONFIG1, regval);
    		break;
    
    	case GPMC_CONFIG_DEV_TYPE:
    		regval  = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG1);
    		regval |= GPMC_CONFIG1_DEVICETYPE(wval);
    		if (wval == GPMC_DEVICETYPE_NOR)
    			regval |= GPMC_CONFIG1_MUXADDDATA;
    		gpmc_cs_write_reg(cs, GPMC_CS_CONFIG1, regval);
    		break;
    
    	default:
    		printk(KERN_ERR "gpmc_configure_cs: Not supported\n");
    		err = -EINVAL;
    	}
    
    	return err;
    }
    EXPORT_SYMBOL(gpmc_cs_configure);
    
    /**
     * gpmc_nand_read - nand specific read access request
     * @cs: chip select number
     * @cmd: command type
     */
    int gpmc_nand_read(int cs, int cmd)
    {
    	int rval = -EINVAL;
    
    	switch (cmd) {
    	case GPMC_NAND_DATA:
    		rval = gpmc_cs_read_byte(cs, GPMC_CS_NAND_DATA);
    		break;
    
    	default:
    		printk(KERN_ERR "gpmc_read_nand_ctrl: Not supported\n");
    	}
    	return rval;
    }
    EXPORT_SYMBOL(gpmc_nand_read);
    
    /**
     * gpmc_nand_write - nand specific write request
     * @cs: chip select number
     * @cmd: command type
     * @wval: value to write
     */
    int gpmc_nand_write(int cs, int cmd, int wval)
    {
    	int err = 0;
    
    	switch (cmd) {
    	case GPMC_NAND_COMMAND:
    		gpmc_cs_write_byte(cs, GPMC_CS_NAND_COMMAND, wval);
    		break;
    
    	case GPMC_NAND_ADDRESS:
    		gpmc_cs_write_byte(cs, GPMC_CS_NAND_ADDRESS, wval);
    		break;
    
    	case GPMC_NAND_DATA:
    		gpmc_cs_write_byte(cs, GPMC_CS_NAND_DATA, wval);
    
    	default:
    		printk(KERN_ERR "gpmc_write_nand_ctrl: Not supported\n");
    		err = -EINVAL;
    	}
    	return err;
    }
    EXPORT_SYMBOL(gpmc_nand_write);
    
    
    
    /**
     * gpmc_prefetch_enable - configures and starts prefetch transfer
     * @cs: cs (chip select) number
     * @fifo_th: fifo threshold to be used for read/ write
     * @dma_mode: dma mode enable (1) or disable (0)
     * @u32_count: number of bytes to be transferred
     * @is_write: prefetch read(0) or write post(1) mode
     */
    int gpmc_prefetch_enable(int cs, int fifo_th, int dma_mode,
    				unsigned int u32_count, int is_write)
    {
    
    	if (fifo_th > PREFETCH_FIFOTHRESHOLD_MAX) {
    		pr_err("gpmc: fifo threshold is not supported\n");
    		return -1;
    	} else if (!(gpmc_read_reg(GPMC_PREFETCH_CONTROL))) {
    		/* Set the amount of bytes to be prefetched */
    		gpmc_write_reg(GPMC_PREFETCH_CONFIG2, u32_count);
    
    		/* Set dma/mpu mode, the prefetch read / post write and
    		 * enable the engine. Set which cs is has requested for.
    		 */
    		gpmc_write_reg(GPMC_PREFETCH_CONFIG1, ((cs << CS_NUM_SHIFT) |
    					PREFETCH_FIFOTHRESHOLD(fifo_th) |
    					ENABLE_PREFETCH |
    					(dma_mode << DMA_MPU_MODE) |
    					(0x1 & is_write)));
    
    		/*  Start the prefetch engine */
    		gpmc_write_reg(GPMC_PREFETCH_CONTROL, 0x1);
    	} else {
    		return -EBUSY;
    	}
    
    	return 0;
    }
    EXPORT_SYMBOL(gpmc_prefetch_enable);
    
    /**
     * gpmc_prefetch_reset - disables and stops the prefetch engine
     */
    int gpmc_prefetch_reset(int cs)
    {
    	u32 config1;
    
    	/* check if the same module/cs is trying to reset */
    	config1 = gpmc_read_reg(GPMC_PREFETCH_CONFIG1);
    	if (((config1 >> CS_NUM_SHIFT) & 0x7) != cs)
    		return -EINVAL;
    
    	/* Stop the PFPW engine */
    	gpmc_write_reg(GPMC_PREFETCH_CONTROL, 0x0);
    
    	/* Reset/disable the PFPW engine */
    	gpmc_write_reg(GPMC_PREFETCH_CONFIG1, 0x0);
    
    	return 0;
    }
    EXPORT_SYMBOL(gpmc_prefetch_reset);
    
    static void __init gpmc_mem_init(void)
    {
    	int cs;
    	unsigned long boot_rom_space = 0;
    
    	/* never allocate the first page, to facilitate bug detection;
    	 * even if we didn't boot from ROM.
    	 */
    	boot_rom_space = BOOT_ROM_SPACE;
    	/* In apollon the CS0 is mapped as 0x0000 0000 */
    	if (machine_is_omap_apollon())
    		boot_rom_space = 0;
    	gpmc_mem_root.start = GPMC_MEM_START + boot_rom_space;
    	gpmc_mem_root.end = GPMC_MEM_END;
    
    	/* Reserve all regions that has been set up by bootloader */
    	for (cs = 0; cs < GPMC_CS_NUM; cs++) {
    		u32 base, size;
    
    		if (!gpmc_cs_mem_enabled(cs))
    			continue;
    		gpmc_cs_get_memconf(cs, &base, &size);
    		if (gpmc_cs_insert_mem(cs, base, size) < 0)
    			BUG();
    	}
    }
    
    static int __init gpmc_init(void)
    {
    	u32 l, irq;
    	int cs, ret = -EINVAL;
    	int gpmc_irq;
    	char *ck = NULL;
    
    	if (cpu_is_omap24xx()) {
    		ck = "core_l3_ck";
    		if (cpu_is_omap2420())
    			l = OMAP2420_GPMC_BASE;
    		else
    			l = OMAP34XX_GPMC_BASE;
    		gpmc_irq = INT_34XX_GPMC_IRQ;
    	} else if (cpu_is_omap34xx()) {
    		ck = "gpmc_fck";
    		if (cpu_is_am33xx())
    			l = OMAP44XX_GPMC_BASE;
    		else
    			l = OMAP34XX_GPMC_BASE;
    		gpmc_irq = INT_34XX_GPMC_IRQ;
    	} else if (cpu_is_omap44xx()) {
    		ck = "gpmc_ck";
    		l = OMAP44XX_GPMC_BASE;
    		gpmc_irq = OMAP44XX_IRQ_GPMC;
    	}
    
    	if (WARN_ON(!ck))
    		return ret;
    
    	gpmc_l3_clk = clk_get(NULL, ck);
    	if (IS_ERR(gpmc_l3_clk)) {
    		printk(KERN_ERR "Could not get GPMC clock %s\n", ck);
    		BUG();
    	}
    
    	gpmc_base = ioremap(l, SZ_4K);
    	if (!gpmc_base) {
    		clk_put(gpmc_l3_clk);
    		printk(KERN_ERR "Could not get GPMC register memory\n");
    		BUG();
    	}
    
    	clk_enable(gpmc_l3_clk);
    
    	l = gpmc_read_reg(GPMC_REVISION);
    	printk(KERN_INFO "GPMC revision %d.%d\n", (l >> 4) & 0x0f, l & 0x0f);
    	/* Set smart idle mode and automatic L3 clock gating */
    	l = gpmc_read_reg(GPMC_SYSCONFIG);
    	l &= 0x03 << 3;
    	l |= (0x02 << 3) | (1 << 0);
    	gpmc_write_reg(GPMC_SYSCONFIG, l);
    	gpmc_mem_init();
    
    	/* initalize the irq_chained */
    	irq = OMAP_GPMC_IRQ_BASE;
    	for (cs = 0; cs < GPMC_CS_NUM; cs++) {
    		irq_set_chip_and_handler(irq, &dummy_irq_chip,
    						handle_simple_irq);
    		set_irq_flags(irq, IRQF_VALID);
    		irq++;
    	}
    
    	ret = request_irq(gpmc_irq,
    			gpmc_handle_irq, IRQF_SHARED, "gpmc", gpmc_base);
    	if (ret)
    		pr_err("gpmc: irq-%d could not claim: err %d\n",
    						gpmc_irq, ret);
    	return ret;
    }
    postcore_initcall(gpmc_init);
    
    static irqreturn_t gpmc_handle_irq(int irq, void *dev)
    {
    	u8 cs;
    
    	/* check cs to invoke the irq */
    	cs = ((gpmc_read_reg(GPMC_PREFETCH_CONFIG1)) >> CS_NUM_SHIFT) & 0x7;
    	if (OMAP_GPMC_IRQ_BASE+cs <= OMAP_GPMC_IRQ_END)
    		generic_handle_irq(OMAP_GPMC_IRQ_BASE+cs);
    
    	return IRQ_HANDLED;
    }
    
    #ifdef CONFIG_ARCH_OMAP3
    static struct omap3_gpmc_regs gpmc_context;
    
    void omap3_gpmc_save_context(void)
    {
    	int i;
    
    	gpmc_context.sysconfig = gpmc_read_reg(GPMC_SYSCONFIG);
    	gpmc_context.irqenable = gpmc_read_reg(GPMC_IRQENABLE);
    	gpmc_context.timeout_ctrl = gpmc_read_reg(GPMC_TIMEOUT_CONTROL);
    	gpmc_context.config = gpmc_read_reg(GPMC_CONFIG);
    	gpmc_context.prefetch_config1 = gpmc_read_reg(GPMC_PREFETCH_CONFIG1);
    	gpmc_context.prefetch_config2 = gpmc_read_reg(GPMC_PREFETCH_CONFIG2);
    	gpmc_context.prefetch_control = gpmc_read_reg(GPMC_PREFETCH_CONTROL);
    	for (i = 0; i < GPMC_CS_NUM; i++) {
    		gpmc_context.cs_context[i].is_valid = gpmc_cs_mem_enabled(i);
    		if (gpmc_context.cs_context[i].is_valid) {
    			gpmc_context.cs_context[i].config1 =
    				gpmc_cs_read_reg(i, GPMC_CS_CONFIG1);
    			gpmc_context.cs_context[i].config2 =
    				gpmc_cs_read_reg(i, GPMC_CS_CONFIG2);
    			gpmc_context.cs_context[i].config3 =
    				gpmc_cs_read_reg(i, GPMC_CS_CONFIG3);
    			gpmc_context.cs_context[i].config4 =
    				gpmc_cs_read_reg(i, GPMC_CS_CONFIG4);
    			gpmc_context.cs_context[i].config5 =
    				gpmc_cs_read_reg(i, GPMC_CS_CONFIG5);
    			gpmc_context.cs_context[i].config6 =
    				gpmc_cs_read_reg(i, GPMC_CS_CONFIG6);
    			gpmc_context.cs_context[i].config7 =
    				gpmc_cs_read_reg(i, GPMC_CS_CONFIG7);
    		}
    	}
    }
    
    void omap3_gpmc_restore_context(void)
    {
    	int i;
    
    	gpmc_write_reg(GPMC_SYSCONFIG, gpmc_context.sysconfig);
    	gpmc_write_reg(GPMC_IRQENABLE, gpmc_context.irqenable);
    	gpmc_write_reg(GPMC_TIMEOUT_CONTROL, gpmc_context.timeout_ctrl);
    	gpmc_write_reg(GPMC_CONFIG, gpmc_context.config);
    	gpmc_write_reg(GPMC_PREFETCH_CONFIG1, gpmc_context.prefetch_config1);
    	gpmc_write_reg(GPMC_PREFETCH_CONFIG2, gpmc_context.prefetch_config2);
    	gpmc_write_reg(GPMC_PREFETCH_CONTROL, gpmc_context.prefetch_control);
    	for (i = 0; i < GPMC_CS_NUM; i++) {
    		if (gpmc_context.cs_context[i].is_valid) {
    			gpmc_cs_write_reg(i, GPMC_CS_CONFIG1,
    				gpmc_context.cs_context[i].config1);
    			gpmc_cs_write_reg(i, GPMC_CS_CONFIG2,
    				gpmc_context.cs_context[i].config2);
    			gpmc_cs_write_reg(i, GPMC_CS_CONFIG3,
    				gpmc_context.cs_context[i].config3);
    			gpmc_cs_write_reg(i, GPMC_CS_CONFIG4,
    				gpmc_context.cs_context[i].config4);
    			gpmc_cs_write_reg(i, GPMC_CS_CONFIG5,
    				gpmc_context.cs_context[i].config5);
    			gpmc_cs_write_reg(i, GPMC_CS_CONFIG6,
    				gpmc_context.cs_context[i].config6);
    			gpmc_cs_write_reg(i, GPMC_CS_CONFIG7,
    				gpmc_context.cs_context[i].config7);
    		}
    	}
    }
    #endif /* CONFIG_ARCH_OMAP3 */
    
    /**
     * gpmc_enable_hwecc - enable hardware ecc functionality
     * @cs: chip select number
     * @mode: read/write mode
     * @dev_width: device bus width(1 for x16, 0 for x8)
     * @ecc_size: bytes for which ECC will be generated
     */
    int gpmc_enable_hwecc(int cs, int mode, int dev_width, int ecc_size)
    {
    	unsigned int val;
    
    	/* check if ecc module is in used */
    	if (gpmc_ecc_used != -EINVAL)
    		return -EINVAL;
    
    	gpmc_ecc_used = cs;
    
    	/* clear ecc and enable bits */
    	val = ((0x00000001<<8) | 0x00000001);
    	gpmc_write_reg(GPMC_ECC_CONTROL, val);
    
    	/* program ecc and result sizes */
    	val = ((((ecc_size >> 1) - 1) << 22) | (0x0000000F));
    	gpmc_write_reg(GPMC_ECC_SIZE_CONFIG, val);
    
    	switch (mode) {
    	case GPMC_ECC_READ:
    		gpmc_write_reg(GPMC_ECC_CONTROL, 0x101);
    		break;
    	case GPMC_ECC_READSYN:
    		 gpmc_write_reg(GPMC_ECC_CONTROL, 0x100);
    		break;
    	case GPMC_ECC_WRITE:
    		gpmc_write_reg(GPMC_ECC_CONTROL, 0x101);
    		break;
    	default:
    		printk(KERN_INFO "Error: Unrecognized Mode[%d]!\n", mode);
    		break;
    	}
    
    	/* (ECC 16 or 8 bit col) | ( CS  )  | ECC Enable */
    	val = (dev_width << 7) | (cs << 1) | (0x1);
    	gpmc_write_reg(GPMC_ECC_CONFIG, val);
    	return 0;
    }
    
    /**
     * gpmc_calculate_ecc - generate non-inverted ecc bytes
     * @cs: chip select number
     * @dat: data pointer over which ecc is computed
     * @ecc_code: ecc code buffer
     *
     * Using non-inverted ECC is considered ugly since writing a blank
     * page (padding) will clear the ECC bytes. This is not a problem as long
     * no one is trying to write data on the seemingly unused page. Reading
     * an erased page will produce an ECC mismatch between generated and read
     * ECC bytes that has to be dealt with separately.
     */
    int gpmc_calculate_ecc(int cs, const u_char *dat, u_char *ecc_code)
    {
    	unsigned int val = 0x0;
    
    	if (gpmc_ecc_used != cs)
    		return -EINVAL;
    
    	/* read ecc result */
    	val = gpmc_read_reg(GPMC_ECC1_RESULT);
    	*ecc_code++ = val;          /* P128e, ..., P1e */
    	*ecc_code++ = val >> 16;    /* P128o, ..., P1o */
    	/* P2048o, P1024o, P512o, P256o, P2048e, P1024e, P512e, P256e */
    	*ecc_code++ = ((val >> 8) & 0x0f) | ((val >> 20) & 0xf0);
    
    	gpmc_ecc_used = -EINVAL;
    	return 0;
    }
    
    2021.nand_base.c
    /*
     * Copyright © 2004 Texas Instruments, Jian Zhang <jzhang@ti.com>
     * Copyright © 2004 Micron Technology Inc.
     * Copyright © 2004 David Brownell
     *
     * This program is free software; you can redistribute it and/or modify
     * it under the terms of the GNU General Public License version 2 as
     * published by the Free Software Foundation.
     */
    
    #include <linux/platform_device.h>
    #include <linux/dma-mapping.h>
    #include <linux/delay.h>
    #include <linux/interrupt.h>
    #include <linux/jiffies.h>
    #include <linux/sched.h>
    #include <linux/mtd/mtd.h>
    #include <linux/mtd/nand.h>
    #include <linux/mtd/partitions.h>
    #include <linux/io.h>
    #include <linux/slab.h>
    
    #include <plat/dma.h>
    #include <plat/gpmc.h>
    #include <plat/nand.h>
    
    #define	DRIVER_NAME	"omap2-nand"
    #define	OMAP_NAND_TIMEOUT_MS	5000
    
    #define NAND_Ecc_P1e		(1 << 0)
    #define NAND_Ecc_P2e		(1 << 1)
    #define NAND_Ecc_P4e		(1 << 2)
    #define NAND_Ecc_P8e		(1 << 3)
    #define NAND_Ecc_P16e		(1 << 4)
    #define NAND_Ecc_P32e		(1 << 5)
    #define NAND_Ecc_P64e		(1 << 6)
    #define NAND_Ecc_P128e		(1 << 7)
    #define NAND_Ecc_P256e		(1 << 8)
    #define NAND_Ecc_P512e		(1 << 9)
    #define NAND_Ecc_P1024e		(1 << 10)
    #define NAND_Ecc_P2048e		(1 << 11)
    
    #define NAND_Ecc_P1o		(1 << 16)
    #define NAND_Ecc_P2o		(1 << 17)
    #define NAND_Ecc_P4o		(1 << 18)
    #define NAND_Ecc_P8o		(1 << 19)
    #define NAND_Ecc_P16o		(1 << 20)
    #define NAND_Ecc_P32o		(1 << 21)
    #define NAND_Ecc_P64o		(1 << 22)
    #define NAND_Ecc_P128o		(1 << 23)
    #define NAND_Ecc_P256o		(1 << 24)
    #define NAND_Ecc_P512o		(1 << 25)
    #define NAND_Ecc_P1024o		(1 << 26)
    #define NAND_Ecc_P2048o		(1 << 27)
    
    #define TF(value)	(value ? 1 : 0)
    
    #define P2048e(a)	(TF(a & NAND_Ecc_P2048e)	<< 0)
    #define P2048o(a)	(TF(a & NAND_Ecc_P2048o)	<< 1)
    #define P1e(a)		(TF(a & NAND_Ecc_P1e)		<< 2)
    #define P1o(a)		(TF(a & NAND_Ecc_P1o)		<< 3)
    #define P2e(a)		(TF(a & NAND_Ecc_P2e)		<< 4)
    #define P2o(a)		(TF(a & NAND_Ecc_P2o)		<< 5)
    #define P4e(a)		(TF(a & NAND_Ecc_P4e)		<< 6)
    #define P4o(a)		(TF(a & NAND_Ecc_P4o)		<< 7)
    
    #define P8e(a)		(TF(a & NAND_Ecc_P8e)		<< 0)
    #define P8o(a)		(TF(a & NAND_Ecc_P8o)		<< 1)
    #define P16e(a)		(TF(a & NAND_Ecc_P16e)		<< 2)
    #define P16o(a)		(TF(a & NAND_Ecc_P16o)		<< 3)
    #define P32e(a)		(TF(a & NAND_Ecc_P32e)		<< 4)
    #define P32o(a)		(TF(a & NAND_Ecc_P32o)		<< 5)
    #define P64e(a)		(TF(a & NAND_Ecc_P64e)		<< 6)
    #define P64o(a)		(TF(a & NAND_Ecc_P64o)		<< 7)
    
    #define P128e(a)	(TF(a & NAND_Ecc_P128e)		<< 0)
    #define P128o(a)	(TF(a & NAND_Ecc_P128o)		<< 1)
    #define P256e(a)	(TF(a & NAND_Ecc_P256e)		<< 2)
    #define P256o(a)	(TF(a & NAND_Ecc_P256o)		<< 3)
    #define P512e(a)	(TF(a & NAND_Ecc_P512e)		<< 4)
    #define P512o(a)	(TF(a & NAND_Ecc_P512o)		<< 5)
    #define P1024e(a)	(TF(a & NAND_Ecc_P1024e)	<< 6)
    #define P1024o(a)	(TF(a & NAND_Ecc_P1024o)	<< 7)
    
    #define P8e_s(a)	(TF(a & NAND_Ecc_P8e)		<< 0)
    #define P8o_s(a)	(TF(a & NAND_Ecc_P8o)		<< 1)
    #define P16e_s(a)	(TF(a & NAND_Ecc_P16e)		<< 2)
    #define P16o_s(a)	(TF(a & NAND_Ecc_P16o)		<< 3)
    #define P1e_s(a)	(TF(a & NAND_Ecc_P1e)		<< 4)
    #define P1o_s(a)	(TF(a & NAND_Ecc_P1o)		<< 5)
    #define P2e_s(a)	(TF(a & NAND_Ecc_P2e)		<< 6)
    #define P2o_s(a)	(TF(a & NAND_Ecc_P2o)		<< 7)
    
    #define P4e_s(a)	(TF(a & NAND_Ecc_P4e)		<< 0)
    #define P4o_s(a)	(TF(a & NAND_Ecc_P4o)		<< 1)
    
    #define MAX_HWECC_BYTES_OOB_64     24
    #define JFFS2_CLEAN_MARKER_OFFSET  0x2
    
    static const char *part_probes[] = { "cmdlinepart", NULL };
    
    /* oob info generated runtime depending on ecc algorithm and layout selected */
    static struct nand_ecclayout omap_oobinfo;
    /* Define some generic bad / good block scan pattern which are used
     * while scanning a device for factory marked good / bad blocks
     */
    static uint8_t scan_ff_pattern[] = { 0xff };
    static struct nand_bbt_descr bb_descrip_flashbased = {
    	.options = NAND_BBT_SCANEMPTY | NAND_BBT_SCANALLPAGES,
    	.offs = 0,
    	.len = 1,
    	.pattern = scan_ff_pattern,
    };
    
    
    struct omap_nand_info {
    	struct nand_hw_control		controller;
    	struct omap_nand_platform_data	*pdata;
    	struct mtd_info			mtd;
    	struct mtd_partition		*parts;
    	struct nand_chip		nand;
    	struct platform_device		*pdev;
    
    	int				gpmc_cs;
    	unsigned long			phys_base;
    	struct completion		comp;
    	int				dma_ch;
    	int				gpmc_irq;
    	enum {
    		OMAP_NAND_IO_READ = 0,	/* read */
    		OMAP_NAND_IO_WRITE,	/* write */
    	} iomode;
    	u_char				*buf;
    	int					buf_len;
    };
    
    /**
     * omap_hwcontrol - hardware specific access to control-lines
     * @mtd: MTD device structure
     * @cmd: command to device
     * @ctrl:
     * NAND_NCE: bit 0 -> don't care
     * NAND_CLE: bit 1 -> Command Latch
     * NAND_ALE: bit 2 -> Address Latch
     *
     * NOTE: boards may use different bits for these!!
     */
    static void omap_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
    {
    	struct omap_nand_info *info = container_of(mtd,
    					struct omap_nand_info, mtd);
    
    	if (cmd != NAND_CMD_NONE) {
    		if (ctrl & NAND_CLE)
    			gpmc_nand_write(info->gpmc_cs, GPMC_NAND_COMMAND, cmd);
    
    		else if (ctrl & NAND_ALE)
    			gpmc_nand_write(info->gpmc_cs, GPMC_NAND_ADDRESS, cmd);
    
    		else /* NAND_NCE */
    			gpmc_nand_write(info->gpmc_cs, GPMC_NAND_DATA, cmd);
    	}
    }
    
    /**
     * omap_read_buf8 - read data from NAND controller into buffer
     * @mtd: MTD device structure
     * @buf: buffer to store date
     * @len: number of bytes to read
     */
    static void omap_read_buf8(struct mtd_info *mtd, u_char *buf, int len)
    {
    	struct nand_chip *nand = mtd->priv;
    
    	ioread8_rep(nand->IO_ADDR_R, buf, len);
    }
    
    /**
     * omap_write_buf8 - write buffer to NAND controller
     * @mtd: MTD device structure
     * @buf: data buffer
     * @len: number of bytes to write
     */
    static void omap_write_buf8(struct mtd_info *mtd, const u_char *buf, int len)
    {
    	struct omap_nand_info *info = container_of(mtd,
    						struct omap_nand_info, mtd);
    	u_char *p = (u_char *)buf;
    	u32	status = 0;
    
    	while (len--) {
    		iowrite8(*p++, info->nand.IO_ADDR_W);
    		/* wait until buffer is available for write */
    		do {
    			status = gpmc_read_status(GPMC_STATUS_BUFFER);
    		} while (!status);
    	}
    }
    
    /**
     * omap_read_buf16 - read data from NAND controller into buffer
     * @mtd: MTD device structure
     * @buf: buffer to store date
     * @len: number of bytes to read
     */
    static void omap_read_buf16(struct mtd_info *mtd, u_char *buf, int len)
    {
    	struct nand_chip *nand = mtd->priv;
    
    	ioread16_rep(nand->IO_ADDR_R, buf, len / 2);
    }
    
    /**
     * omap_write_buf16 - write buffer to NAND controller
     * @mtd: MTD device structure
     * @buf: data buffer
     * @len: number of bytes to write
     */
    static void omap_write_buf16(struct mtd_info *mtd, const u_char * buf, int len)
    {
    	struct omap_nand_info *info = container_of(mtd,
    						struct omap_nand_info, mtd);
    	u16 *p = (u16 *) buf;
    	u32	status = 0;
    	/* FIXME try bursts of writesw() or DMA ... */
    	len >>= 1;
    
    	while (len--) {
    		iowrite16(*p++, info->nand.IO_ADDR_W);
    		/* wait until buffer is available for write */
    		do {
    			status = gpmc_read_status(GPMC_STATUS_BUFFER);
    		} while (!status);
    	}
    }
    
    /**
     * omap_read_buf_pref - read data from NAND controller into buffer
     * @mtd: MTD device structure
     * @buf: buffer to store date
     * @len: number of bytes to read
     */
    static void omap_read_buf_pref(struct mtd_info *mtd, u_char *buf, int len)
    {
    	struct omap_nand_info *info = container_of(mtd,
    						struct omap_nand_info, mtd);
    	uint32_t r_count = 0;
    	int ret = 0;
    	u32 *p = (u32 *)buf;
    
    	/* take care of subpage reads */
    	if (len % 4) {
    		if (info->nand.options & NAND_BUSWIDTH_16)
    			omap_read_buf16(mtd, buf, len % 4);
    		else
    			omap_read_buf8(mtd, buf, len % 4);
    		p = (u32 *) (buf + len % 4);
    		len -= len % 4;
    	}
    
    	/* configure and start prefetch transfer */
    	ret = gpmc_prefetch_enable(info->gpmc_cs,
    			PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x0);
    	if (ret) {
    		/* PFPW engine is busy, use cpu copy method */
    		if (info->nand.options & NAND_BUSWIDTH_16)
    			omap_read_buf16(mtd, (u_char *)p, len);
    		else
    			omap_read_buf8(mtd, (u_char *)p, len);
    	} else {
    		do {
    			r_count = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT);
    			r_count = r_count >> 2;
    			ioread32_rep(info->nand.IO_ADDR_R, p, r_count);
    			p += r_count;
    			len -= r_count << 2;
    		} while (len);
    		/* disable and stop the PFPW engine */
    		gpmc_prefetch_reset(info->gpmc_cs);
    	}
    }
    
    /**
     * omap_write_buf_pref - write buffer to NAND controller
     * @mtd: MTD device structure
     * @buf: data buffer
     * @len: number of bytes to write
     */
    static void omap_write_buf_pref(struct mtd_info *mtd,
    					const u_char *buf, int len)
    {
    	struct omap_nand_info *info = container_of(mtd,
    						struct omap_nand_info, mtd);
    	uint32_t w_count = 0;
    	int i = 0, ret = 0;
    	u16 *p = (u16 *)buf;
    	unsigned long tim, limit;
    
    	/* take care of subpage writes */
    	if (len % 2 != 0) {
    		writeb(*buf, info->nand.IO_ADDR_W);
    		p = (u16 *)(buf + 1);
    		len--;
    	}
    
    	/*  configure and start prefetch transfer */
    	ret = gpmc_prefetch_enable(info->gpmc_cs,
    			PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x1);
    	if (ret) {
    		/* PFPW engine is busy, use cpu copy method */
    		if (info->nand.options & NAND_BUSWIDTH_16)
    			omap_write_buf16(mtd, (u_char *)p, len);
    		else
    			omap_write_buf8(mtd, (u_char *)p, len);
    	} else {
    		while (len) {
    			w_count = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT);
    			w_count = w_count >> 1;
    			for (i = 0; (i < w_count) && len; i++, len -= 2)
    				iowrite16(*p++, info->nand.IO_ADDR_W);
    		}
    		/* wait for data to flushed-out before reset the prefetch */
    		tim = 0;
    		limit = (loops_per_jiffy *
    					msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
    		while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit))
    			cpu_relax();
    
    		/* disable and stop the PFPW engine */
    		gpmc_prefetch_reset(info->gpmc_cs);
    	}
    }
    
    /*
     * omap_nand_dma_cb: callback on the completion of dma transfer
     * @lch: logical channel
     * @ch_satuts: channel status
     * @data: pointer to completion data structure
     */
    static void omap_nand_dma_cb(int lch, u16 ch_status, void *data)
    {
    	complete((struct completion *) data);
    }
    
    /*
     * omap_nand_dma_transfer: configer and start dma transfer
     * @mtd: MTD device structure
     * @addr: virtual address in RAM of source/destination
     * @len: number of data bytes to be transferred
     * @is_write: flag for read/write operation
     */
    static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
    					unsigned int len, int is_write)
    {
    	struct omap_nand_info *info = container_of(mtd,
    					struct omap_nand_info, mtd);
    	enum dma_data_direction dir = is_write ? DMA_TO_DEVICE :
    							DMA_FROM_DEVICE;
    	dma_addr_t dma_addr;
    	int ret;
    	unsigned long tim, limit;
    
    	/* The fifo depth is 64 bytes max.
    	 * But configure the FIFO-threahold to 32 to get a sync at each frame
    	 * and frame length is 32 bytes.
    	 */
    	int buf_len = len >> 6;
    
    	if (addr >= high_memory) {
    		struct page *p1;
    
    		if (((size_t)addr & PAGE_MASK) !=
    			((size_t)(addr + len - 1) & PAGE_MASK))
    			goto out_copy;
    		p1 = vmalloc_to_page(addr);
    		if (!p1)
    			goto out_copy;
    		addr = page_address(p1) + ((size_t)addr & ~PAGE_MASK);
    	}
    
    	dma_addr = dma_map_single(&info->pdev->dev, addr, len, dir);
    	if (dma_mapping_error(&info->pdev->dev, dma_addr)) {
    		dev_err(&info->pdev->dev,
    			"Couldn't DMA map a %d byte buffer\n", len);
    		goto out_copy;
    	}
    
    	if (is_write) {
    	    omap_set_dma_dest_params(info->dma_ch, 0, OMAP_DMA_AMODE_CONSTANT,
    						info->phys_base, 0, 0);
    	    omap_set_dma_src_params(info->dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
    							dma_addr, 0, 0);
    	    omap_set_dma_transfer_params(info->dma_ch, OMAP_DMA_DATA_TYPE_S32,
    					0x10, buf_len, OMAP_DMA_SYNC_FRAME,
    					OMAP24XX_DMA_GPMC, OMAP_DMA_DST_SYNC);
    	} else {
    	    omap_set_dma_src_params(info->dma_ch, 0, OMAP_DMA_AMODE_CONSTANT,
    						info->phys_base, 0, 0);
    	    omap_set_dma_dest_params(info->dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
    							dma_addr, 0, 0);
    	    omap_set_dma_transfer_params(info->dma_ch, OMAP_DMA_DATA_TYPE_S32,
    					0x10, buf_len, OMAP_DMA_SYNC_FRAME,
    					OMAP24XX_DMA_GPMC, OMAP_DMA_SRC_SYNC);
    	}
    	/*  configure and start prefetch transfer */
    	ret = gpmc_prefetch_enable(info->gpmc_cs,
    			PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write);
    	if (ret)
    		/* PFPW engine is busy, use cpu copy method */
    		goto out_copy;
    
    	init_completion(&info->comp);
    
    	omap_start_dma(info->dma_ch);
    
    	/* setup and start DMA using dma_addr */
    	wait_for_completion(&info->comp);
    	tim = 0;
    	limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
    	while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit))
    		cpu_relax();
    
    	/* disable and stop the PFPW engine */
    	gpmc_prefetch_reset(info->gpmc_cs);
    
    	dma_unmap_single(&info->pdev->dev, dma_addr, len, dir);
    	return 0;
    
    out_copy:
    	if (info->nand.options & NAND_BUSWIDTH_16)
    		is_write == 0 ? omap_read_buf16(mtd, (u_char *) addr, len)
    			: omap_write_buf16(mtd, (u_char *) addr, len);
    	else
    		is_write == 0 ? omap_read_buf8(mtd, (u_char *) addr, len)
    			: omap_write_buf8(mtd, (u_char *) addr, len);
    	return 0;
    }
    
    /**
     * omap_read_buf_dma_pref - read data from NAND controller into buffer
     * @mtd: MTD device structure
     * @buf: buffer to store date
     * @len: number of bytes to read
     */
    static void omap_read_buf_dma_pref(struct mtd_info *mtd, u_char *buf, int len)
    {
    	if (len <= mtd->oobsize)
    		omap_read_buf_pref(mtd, buf, len);
    	else
    		/* start transfer in DMA mode */
    		omap_nand_dma_transfer(mtd, buf, len, 0x0);
    }
    
    /**
     * omap_write_buf_dma_pref - write buffer to NAND controller
     * @mtd: MTD device structure
     * @buf: data buffer
     * @len: number of bytes to write
     */
    static void omap_write_buf_dma_pref(struct mtd_info *mtd,
    					const u_char *buf, int len)
    {
    	if (len <= mtd->oobsize)
    		omap_write_buf_pref(mtd, buf, len);
    	else
    		/* start transfer in DMA mode */
    		omap_nand_dma_transfer(mtd, (u_char *) buf, len, 0x1);
    }
    
    /*
     * omap_nand_irq - GMPC irq handler
     * @this_irq: gpmc irq number
     * @dev: omap_nand_info structure pointer is passed here
     */
    static irqreturn_t omap_nand_irq(int this_irq, void *dev)
    {
    	struct omap_nand_info *info = (struct omap_nand_info *) dev;
    	u32 bytes;
    	u32 irq_stat;
    
    	irq_stat = gpmc_read_status(GPMC_GET_IRQ_STATUS);
    	bytes = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT);
    	bytes = bytes  & 0xFFFC; /* io in multiple of 4 bytes */
    	if (info->iomode == OMAP_NAND_IO_WRITE) { /* checks for write io */
    		if (irq_stat & 0x2)
    			goto done;
    
    		if (info->buf_len && (info->buf_len < bytes))
    			bytes = info->buf_len;
    		else if (!info->buf_len)
    			bytes = 0;
    		iowrite32_rep(info->nand.IO_ADDR_W,
    						(u32 *)info->buf, bytes >> 2);
    		info->buf = info->buf + bytes;
    		info->buf_len -= bytes;
    
    	} else {
    		ioread32_rep(info->nand.IO_ADDR_R,
    						(u32 *)info->buf, bytes >> 2);
    		info->buf = info->buf + bytes;
    
    		if (irq_stat & 0x2)
    			goto done;
    	}
    	gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, irq_stat);
    
    	return IRQ_HANDLED;
    
    done:
    	complete(&info->comp);
    	/* disable irq */
    	gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ, 0);
    
    	/* clear status */
    	gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, irq_stat);
    
    	return IRQ_HANDLED;
    }
    
    /*
     * omap_read_buf_irq_pref - read data from NAND controller into buffer
     * @mtd: MTD device structure
     * @buf: buffer to store date
     * @len: number of bytes to read
     */
    static void omap_read_buf_irq_pref(struct mtd_info *mtd, u_char *buf, int len)
    {
    	struct omap_nand_info *info = container_of(mtd,
    						struct omap_nand_info, mtd);
    	int ret = 0;
    
    	if (len <= mtd->oobsize) {
    		omap_read_buf_pref(mtd, buf, len);
    		return;
    	}
    
    	info->iomode = OMAP_NAND_IO_READ;
    	info->buf = buf;
    	init_completion(&info->comp);
    
    	/*  configure and start prefetch transfer */
    	ret = gpmc_prefetch_enable(info->gpmc_cs,
    			PREFETCH_FIFOTHRESHOLD_MAX/2, 0x0, len, 0x0);
    	if (ret)
    		/* PFPW engine is busy, use cpu copy method */
    		goto out_copy;
    
    	info->buf_len = len;
    	/* enable irq */
    	gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ,
    		(GPMC_IRQ_FIFOEVENTENABLE | GPMC_IRQ_COUNT_EVENT));
    
    	/* waiting for read to complete */
    	wait_for_completion(&info->comp);
    
    	/* disable and stop the PFPW engine */
    	gpmc_prefetch_reset(info->gpmc_cs);
    	return;
    
    out_copy:
    	if (info->nand.options & NAND_BUSWIDTH_16)
    		omap_read_buf16(mtd, buf, len);
    	else
    		omap_read_buf8(mtd, buf, len);
    }
    
    /*
     * omap_write_buf_irq_pref - write buffer to NAND controller
     * @mtd: MTD device structure
     * @buf: data buffer
     * @len: number of bytes to write
     */
    static void omap_write_buf_irq_pref(struct mtd_info *mtd,
    					const u_char *buf, int len)
    {
    	struct omap_nand_info *info = container_of(mtd,
    						struct omap_nand_info, mtd);
    	int ret = 0;
    	unsigned long tim, limit;
    
    	if (len <= mtd->oobsize) {
    		omap_write_buf_pref(mtd, buf, len);
    		return;
    	}
    
    	info->iomode = OMAP_NAND_IO_WRITE;
    	info->buf = (u_char *) buf;
    	init_completion(&info->comp);
    
    	/* configure and start prefetch transfer : size=24 */
    	ret = gpmc_prefetch_enable(info->gpmc_cs,
    			(PREFETCH_FIFOTHRESHOLD_MAX * 3) / 8, 0x0, len, 0x1);
    	if (ret)
    		/* PFPW engine is busy, use cpu copy method */
    		goto out_copy;
    
    	info->buf_len = len;
    	/* enable irq */
    	gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ,
    			(GPMC_IRQ_FIFOEVENTENABLE | GPMC_IRQ_COUNT_EVENT));
    
    	/* waiting for write to complete */
    	wait_for_completion(&info->comp);
    	/* wait for data to flushed-out before reset the prefetch */
    	tim = 0;
    	limit = (loops_per_jiffy *  msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
    	while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit))
    		cpu_relax();
    
    	/* disable and stop the PFPW engine */
    	gpmc_prefetch_reset(info->gpmc_cs);
    	return;
    
    out_copy:
    	if (info->nand.options & NAND_BUSWIDTH_16)
    		omap_write_buf16(mtd, buf, len);
    	else
    		omap_write_buf8(mtd, buf, len);
    }
    
    /**
     * omap_verify_buf - Verify chip data against buffer
     * @mtd: MTD device structure
     * @buf: buffer containing the data to compare
     * @len: number of bytes to compare
     */
    static int omap_verify_buf(struct mtd_info *mtd, const u_char * buf, int len)
    {
    	struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
    							mtd);
    	u16 *p = (u16 *) buf;
    
    	len >>= 1;
    	while (len--) {
    		if (*p++ != cpu_to_le16(readw(info->nand.IO_ADDR_R)))
    			return -EFAULT;
    	}
    
    	return 0;
    }
    
    /**
     * gen_true_ecc - This function will generate true ECC value
     * @ecc_buf: buffer to store ecc code
     *
     * This generated true ECC value can be used when correcting
     * data read from NAND flash memory core
     */
    static void gen_true_ecc(u8 *ecc_buf)
    {
    	u32 tmp = ecc_buf[0] | (ecc_buf[1] << 16) |
    		((ecc_buf[2] & 0xF0) << 20) | ((ecc_buf[2] & 0x0F) << 8);
    
    	ecc_buf[0] = ~(P64o(tmp) | P64e(tmp) | P32o(tmp) | P32e(tmp) |
    			P16o(tmp) | P16e(tmp) | P8o(tmp) | P8e(tmp));
    	ecc_buf[1] = ~(P1024o(tmp) | P1024e(tmp) | P512o(tmp) | P512e(tmp) |
    			P256o(tmp) | P256e(tmp) | P128o(tmp) | P128e(tmp));
    	ecc_buf[2] = ~(P4o(tmp) | P4e(tmp) | P2o(tmp) | P2e(tmp) | P1o(tmp) |
    			P1e(tmp) | P2048o(tmp) | P2048e(tmp));
    }
    
    /**
     * omap_compare_ecc - Detect (2 bits) and correct (1 bit) error in data
     * @ecc_data1:  ecc code from nand spare area
     * @ecc_data2:  ecc code from hardware register obtained from hardware ecc
     * @page_data:  page data
     *
     * This function compares two ECC's and indicates if there is an error.
     * If the error can be corrected it will be corrected to the buffer.
     * If there is no error, %0 is returned. If there is an error but it
     * was corrected, %1 is returned. Otherwise, %-1 is returned.
     */
    static int omap_compare_ecc(u8 *ecc_data1,	/* read from NAND memory */
    			    u8 *ecc_data2,	/* read from register */
    			    u8 *page_data)
    {
    	uint	i;
    	u8	tmp0_bit[8], tmp1_bit[8], tmp2_bit[8];
    	u8	comp0_bit[8], comp1_bit[8], comp2_bit[8];
    	u8	ecc_bit[24];
    	u8	ecc_sum = 0;
    	u8	find_bit = 0;
    	uint	find_byte = 0;
    	int	isEccFF;
    
    	isEccFF = ((*(u32 *)ecc_data1 & 0xFFFFFF) == 0xFFFFFF);
    
    	gen_true_ecc(ecc_data1);
    	gen_true_ecc(ecc_data2);
    
    	for (i = 0; i <= 2; i++) {
    		*(ecc_data1 + i) = ~(*(ecc_data1 + i));
    		*(ecc_data2 + i) = ~(*(ecc_data2 + i));
    	}
    
    	for (i = 0; i < 8; i++) {
    		tmp0_bit[i]     = *ecc_data1 % 2;
    		*ecc_data1	= *ecc_data1 / 2;
    	}
    
    	for (i = 0; i < 8; i++) {
    		tmp1_bit[i]	 = *(ecc_data1 + 1) % 2;
    		*(ecc_data1 + 1) = *(ecc_data1 + 1) / 2;
    	}
    
    	for (i = 0; i < 8; i++) {
    		tmp2_bit[i]	 = *(ecc_data1 + 2) % 2;
    		*(ecc_data1 + 2) = *(ecc_data1 + 2) / 2;
    	}
    
    	for (i = 0; i < 8; i++) {
    		comp0_bit[i]     = *ecc_data2 % 2;
    		*ecc_data2       = *ecc_data2 / 2;
    	}
    
    	for (i = 0; i < 8; i++) {
    		comp1_bit[i]     = *(ecc_data2 + 1) % 2;
    		*(ecc_data2 + 1) = *(ecc_data2 + 1) / 2;
    	}
    
    	for (i = 0; i < 8; i++) {
    		comp2_bit[i]     = *(ecc_data2 + 2) % 2;
    		*(ecc_data2 + 2) = *(ecc_data2 + 2) / 2;
    	}
    
    	for (i = 0; i < 6; i++)
    		ecc_bit[i] = tmp2_bit[i + 2] ^ comp2_bit[i + 2];
    
    	for (i = 0; i < 8; i++)
    		ecc_bit[i + 6] = tmp0_bit[i] ^ comp0_bit[i];
    
    	for (i = 0; i < 8; i++)
    		ecc_bit[i + 14] = tmp1_bit[i] ^ comp1_bit[i];
    
    	ecc_bit[22] = tmp2_bit[0] ^ comp2_bit[0];
    	ecc_bit[23] = tmp2_bit[1] ^ comp2_bit[1];
    
    	for (i = 0; i < 24; i++)
    		ecc_sum += ecc_bit[i];
    
    	switch (ecc_sum) {
    	case 0:
    		/* Not reached because this function is not called if
    		 *  ECC values are equal
    		 */
    		return 0;
    
    	case 1:
    		/* Uncorrectable error */
    		DEBUG(MTD_DEBUG_LEVEL0, "ECC UNCORRECTED_ERROR 1\n");
    		return -1;
    
    	case 11:
    		/* UN-Correctable error */
    		DEBUG(MTD_DEBUG_LEVEL0, "ECC UNCORRECTED_ERROR B\n");
    		return -1;
    
    	case 12:
    		/* Correctable error */
    		find_byte = (ecc_bit[23] << 8) +
    			    (ecc_bit[21] << 7) +
    			    (ecc_bit[19] << 6) +
    			    (ecc_bit[17] << 5) +
    			    (ecc_bit[15] << 4) +
    			    (ecc_bit[13] << 3) +
    			    (ecc_bit[11] << 2) +
    			    (ecc_bit[9]  << 1) +
    			    ecc_bit[7];
    
    		find_bit = (ecc_bit[5] << 2) + (ecc_bit[3] << 1) + ecc_bit[1];
    
    		DEBUG(MTD_DEBUG_LEVEL0, "Correcting single bit ECC error at "
    				"offset: %d, bit: %d\n", find_byte, find_bit);
    
    		page_data[find_byte] ^= (1 << find_bit);
    
    		return 1;
    	default:
    		if (isEccFF) {
    			if (ecc_data2[0] == 0 &&
    			    ecc_data2[1] == 0 &&
    			    ecc_data2[2] == 0)
    				return 0;
    		}
    		DEBUG(MTD_DEBUG_LEVEL0, "UNCORRECTED_ERROR default\n");
    		return -1;
    	}
    }
    
    /**
     * omap_correct_data - Compares the ECC read with HW generated ECC
     * @mtd: MTD device structure
     * @dat: page data
     * @read_ecc: ecc read from nand flash
     * @calc_ecc: ecc read from HW ECC registers
     *
     * Compares the ecc read from nand spare area with ECC registers values
     * and if ECC's mismatched, it will call 'omap_compare_ecc' for error
     * detection and correction. If there are no errors, %0 is returned. If
     * there were errors and all of the errors were corrected, the number of
     * corrected errors is returned. If uncorrectable errors exist, %-1 is
     * returned.
     */
    static int omap_correct_data(struct mtd_info *mtd, u_char *dat,
    				u_char *read_ecc, u_char *calc_ecc)
    {
    	struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
    							mtd);
    	int blockCnt = 0, i = 0, ret = 0;
    	int stat = 0;
    
    	/* Ex NAND_ECC_HW12_2048 */
    	if ((info->nand.ecc.mode == NAND_ECC_HW) &&
    			(info->nand.ecc.size  == 2048))
    		blockCnt = 4;
    	else
    		blockCnt = 1;
    
    	for (i = 0; i < blockCnt; i++) {
    		if (memcmp(read_ecc, calc_ecc, 3) != 0) {
    			ret = omap_compare_ecc(read_ecc, calc_ecc, dat);
    			if (ret < 0)
    				return ret;
    			/* keep track of the number of corrected errors */
    			stat += ret;
    		}
    		read_ecc += 3;
    		calc_ecc += 3;
    		dat      += 512;
    	}
    	return stat;
    }
    
    /**
     * omap_calcuate_ecc - Generate non-inverted ECC bytes.
     * @mtd: MTD device structure
     * @dat: The pointer to data on which ecc is computed
     * @ecc_code: The ecc_code buffer
     *
     * Using noninverted ECC can be considered ugly since writing a blank
     * page ie. padding will clear the ECC bytes. This is no problem as long
     * nobody is trying to write data on the seemingly unused page. Reading
     * an erased page will produce an ECC mismatch between generated and read
     * ECC bytes that has to be dealt with separately.
     */
    static int omap_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
    				u_char *ecc_code)
    {
    	struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
    							mtd);
    	return gpmc_calculate_ecc(info->gpmc_cs, dat, ecc_code);
    }
    
    /**
     * omap_enable_hwecc - This function enables the hardware ecc functionality
     * @mtd: MTD device structure
     * @mode: Read/Write mode
     */
    static void omap_enable_hwecc(struct mtd_info *mtd, int mode)
    {
    	struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
    							mtd);
    	struct nand_chip *chip = mtd->priv;
    	unsigned int dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0;
    
    	gpmc_enable_hwecc(info->gpmc_cs, mode, dev_width, info->nand.ecc.size);
    }
    
    /**
     * omap_wait - wait until the command is done
     * @mtd: MTD device structure
     * @chip: NAND Chip structure
     *
     * Wait function is called during Program and erase operations and
     * the way it is called from MTD layer, we should wait till the NAND
     * chip is ready after the programming/erase operation has completed.
     *
     * Erase can take up to 400ms and program up to 20ms according to
     * general NAND and SmartMedia specs
     */
    static int omap_wait(struct mtd_info *mtd, struct nand_chip *chip)
    {
    	struct nand_chip *this = mtd->priv;
    	struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
    							mtd);
    	unsigned long timeo = jiffies;
    	int status = NAND_STATUS_FAIL, state = this->state;
    
    	if (state == FL_ERASING)
    		timeo += (HZ * 400) / 1000;
    	else
    		timeo += (HZ * 20) / 1000;
    
    	gpmc_nand_write(info->gpmc_cs,
    			GPMC_NAND_COMMAND, (NAND_CMD_STATUS & 0xFF));
    	while (time_before(jiffies, timeo)) {
    		status = gpmc_nand_read(info->gpmc_cs, GPMC_NAND_DATA);
    		if (status & NAND_STATUS_READY)
    			break;
    		cond_resched();
    	}
    	return status;
    }
    
    /**
     * omap_dev_ready - calls the platform specific dev_ready function
     * @mtd: MTD device structure
     */
    static int omap_dev_ready(struct mtd_info *mtd)
    {
    	unsigned int val = 0;
    	struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
    							mtd);
    
    	val = gpmc_read_status(GPMC_GET_IRQ_STATUS);
    	if ((val & 0x100) == 0x100) {
    		/* Clear IRQ Interrupt */
    		val |= 0x100;
    		val &= ~(0x0);
    		gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, val);
    	} else {
    		unsigned int cnt = 0;
    		while (cnt++ < 0x1FF) {
    			if  ((val & 0x100) == 0x100)
    				return 0;
    			val = gpmc_read_status(GPMC_GET_IRQ_STATUS);
    		}
    	}
    
    	return 1;
    }
    
    static int __devinit omap_nand_probe(struct platform_device *pdev)
    {
    	struct omap_nand_info		*info;
    	struct omap_nand_platform_data	*pdata;
    	int				err;
    	int				i, offset;
    
    	pdata = pdev->dev.platform_data;
    	if (pdata == NULL) {
    		dev_err(&pdev->dev, "platform data missing\n");
    		return -ENODEV;
    	}
    
    	info = kzalloc(sizeof(struct omap_nand_info), GFP_KERNEL);
    	if (!info)
    		return -ENOMEM;
    
    	platform_set_drvdata(pdev, info);
    
    	spin_lock_init(&info->controller.lock);
    	init_waitqueue_head(&info->controller.wq);
    
    	info->pdev = pdev;
    
    	info->gpmc_cs		= pdata->cs;
    	info->phys_base		= pdata->phys_base;
    
    	info->mtd.priv		= &info->nand;
    	info->mtd.name		= dev_name(&pdev->dev);
    	info->mtd.owner		= THIS_MODULE;
    
    	info->nand.options	= pdata->devsize;
    	info->nand.options	|= NAND_SKIP_BBTSCAN;
    
    	/* NAND write protect off */
    	gpmc_cs_configure(info->gpmc_cs, GPMC_CONFIG_WP, 0);
    
    	if (!request_mem_region(info->phys_base, NAND_IO_SIZE,
    				pdev->dev.driver->name)) {
    		err = -EBUSY;
    		goto out_free_info;
    	}
    
    	info->nand.IO_ADDR_R = ioremap(info->phys_base, NAND_IO_SIZE);
    	if (!info->nand.IO_ADDR_R) {
    		err = -ENOMEM;
    		goto out_release_mem_region;
    	}
    
    	info->nand.controller = &info->controller;
    
    	info->nand.IO_ADDR_W = info->nand.IO_ADDR_R;
    	info->nand.cmd_ctrl  = omap_hwcontrol;
    
    	/*
    	 * If RDY/BSY line is connected to OMAP then use the omap ready
    	 * funcrtion and the generic nand_wait function which reads the status
    	 * register after monitoring the RDY/BSY line.Otherwise use a standard
    	 * chip delay which is slightly more than tR (AC Timing) of the NAND
    	 * device and read status register until you get a failure or success
    	 */
    	if (pdata->dev_ready) {
    		info->nand.dev_ready = omap_dev_ready;
    		info->nand.chip_delay = 0;
    	} else {
    		info->nand.waitfunc = omap_wait;
    		info->nand.chip_delay = 50;
    	}
    
    	switch (pdata->xfer_type) {
    	case NAND_OMAP_PREFETCH_POLLED:
    		info->nand.read_buf   = omap_read_buf_pref;
    		info->nand.write_buf  = omap_write_buf_pref;
    		break;
    
    	case NAND_OMAP_POLLED:
    		if (info->nand.options & NAND_BUSWIDTH_16) {
    			info->nand.read_buf   = omap_read_buf16;
    			info->nand.write_buf  = omap_write_buf16;
    		} else {
    			info->nand.read_buf   = omap_read_buf8;
    			info->nand.write_buf  = omap_write_buf8;
    		}
    		break;
    
    	case NAND_OMAP_PREFETCH_DMA:
    		err = omap_request_dma(OMAP24XX_DMA_GPMC, "NAND",
    				omap_nand_dma_cb, &info->comp, &info->dma_ch);
    		if (err < 0) {
    			info->dma_ch = -1;
    			dev_err(&pdev->dev, "DMA request failed!\n");
    			goto out_release_mem_region;
    		} else {
    			omap_set_dma_dest_burst_mode(info->dma_ch,
    					OMAP_DMA_DATA_BURST_16);
    			omap_set_dma_src_burst_mode(info->dma_ch,
    					OMAP_DMA_DATA_BURST_16);
    
    			info->nand.read_buf   = omap_read_buf_dma_pref;
    			info->nand.write_buf  = omap_write_buf_dma_pref;
    		}
    		break;
    
    	case NAND_OMAP_PREFETCH_IRQ:
    		err = request_irq(pdata->gpmc_irq,
    				omap_nand_irq, IRQF_SHARED, "gpmc-nand", info);
    		if (err) {
    			dev_err(&pdev->dev, "requesting irq(%d) error:%d",
    							pdata->gpmc_irq, err);
    			goto out_release_mem_region;
    		} else {
    			info->gpmc_irq	     = pdata->gpmc_irq;
    			info->nand.read_buf  = omap_read_buf_irq_pref;
    			info->nand.write_buf = omap_write_buf_irq_pref;
    		}
    		break;
    
    	default:
    		dev_err(&pdev->dev,
    			"xfer_type(%d) not supported!\n", pdata->xfer_type);
    		err = -EINVAL;
    		goto out_release_mem_region;
    	}
    
    	info->nand.verify_buf = omap_verify_buf;
    
    	/* selsect the ecc type */
    	if (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_DEFAULT)
    		info->nand.ecc.mode = NAND_ECC_SOFT;
    	else if ((pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW) ||
    		(pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW_ROMCODE)) {
    		info->nand.ecc.bytes            = 3;
    		info->nand.ecc.size             = 512;
    		info->nand.ecc.calculate        = omap_calculate_ecc;
    		info->nand.ecc.hwctl            = omap_enable_hwecc;
    		info->nand.ecc.correct          = omap_correct_data;
    		info->nand.ecc.mode             = NAND_ECC_HW;
    	}
    
    	/* DIP switches on some boards change between 8 and 16 bit
    	 * bus widths for flash.  Try the other width if the first try fails.
    	 */
    	if (nand_scan_ident(&info->mtd, 1, NULL)) {
    		info->nand.options ^= NAND_BUSWIDTH_16;
    		if (nand_scan_ident(&info->mtd, 1, NULL)) {
    			err = -ENXIO;
    			goto out_release_mem_region;
    		}
    	}
    
    	/* select ecc lyout */
    	if (info->nand.ecc.mode != NAND_ECC_SOFT) {
    
    		if (info->nand.options & NAND_BUSWIDTH_16)
    			offset = JFFS2_CLEAN_MARKER_OFFSET;
    		else {
    			offset = JFFS2_CLEAN_MARKER_OFFSET;
    			info->nand.badblock_pattern = &bb_descrip_flashbased;
    		}
    
    		if (info->mtd.oobsize == 64)
    			omap_oobinfo.eccbytes = info->nand.ecc.bytes *
    						2048/info->nand.ecc.size;
    		else
    			omap_oobinfo.eccbytes = info->nand.ecc.bytes;
    
    		if (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW_ROMCODE) {
    			omap_oobinfo.oobfree->offset =
    						offset + omap_oobinfo.eccbytes;
    			omap_oobinfo.oobfree->length = info->mtd.oobsize -
    				(offset + omap_oobinfo.eccbytes);
    		} else {
    			omap_oobinfo.oobfree->offset = offset;
    			omap_oobinfo.oobfree->length = info->mtd.oobsize -
    						offset - omap_oobinfo.eccbytes;
    			/*
    			offset is calculated considering the following :
    			1) 12 bytes ECC for 512 byte access and 24 bytes ECC for
    			256 byte access in OOB_64 can be supported
    			2)Ecc bytes lie to the end of OOB area.
    			3)Ecc layout must match with u-boot's ECC layout.
    			*/
    			offset = info->mtd.oobsize - MAX_HWECC_BYTES_OOB_64;
    		}
    
    		for (i = 0; i < omap_oobinfo.eccbytes; i++)
    			omap_oobinfo.eccpos[i] = i+offset;
    
    		info->nand.ecc.layout = &omap_oobinfo;
    	}
    
    	/* second phase scan */
    	if (nand_scan_tail(&info->mtd)) {
    		err = -ENXIO;
    		goto out_release_mem_region;
    	}
    
    	err = parse_mtd_partitions(&info->mtd, part_probes, &info->parts, 0);
    	if (err > 0)
    		mtd_device_register(&info->mtd, info->parts, err);
    	else if (pdata->parts)
    		mtd_device_register(&info->mtd, pdata->parts, pdata->nr_parts);
    	else
    		mtd_device_register(&info->mtd, NULL, 0);
    
    	platform_set_drvdata(pdev, &info->mtd);
    
    	return 0;
    
    out_release_mem_region:
    	release_mem_region(info->phys_base, NAND_IO_SIZE);
    out_free_info:
    	kfree(info);
    
    	return err;
    }
    
    static int omap_nand_remove(struct platform_device *pdev)
    {
    	struct mtd_info *mtd = platform_get_drvdata(pdev);
    	struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
    							mtd);
    
    	platform_set_drvdata(pdev, NULL);
    	if (info->dma_ch != -1)
    		omap_free_dma(info->dma_ch);
    
    	if (info->gpmc_irq)
    		free_irq(info->gpmc_irq, info);
    
    	/* Release NAND device, its internal structures and partitions */
    	nand_release(&info->mtd);
    	iounmap(info->nand.IO_ADDR_R);
    	release_mem_region(info->phys_base, NAND_IO_SIZE);
    	kfree(&info->mtd);
    	return 0;
    }
    
    static struct platform_driver omap_nand_driver = {
    	.probe		= omap_nand_probe,
    	.remove		= omap_nand_remove,
    	.driver		= {
    		.name	= DRIVER_NAME,
    		.owner	= THIS_MODULE,
    	},
    };
    
    static int __init omap_nand_init(void)
    {
    	pr_info("%s driver initializing\n", DRIVER_NAME);
    
    	return platform_driver_register(&omap_nand_driver);
    }
    
    static void __exit omap_nand_exit(void)
    {
    	platform_driver_unregister(&omap_nand_driver);
    }
    
    module_init(omap_nand_init);
    module_exit(omap_nand_exit);
    
    MODULE_ALIAS("platform:" DRIVER_NAME);
    MODULE_LICENSE("GPL");
    MODULE_DESCRIPTION("Glue layer for NAND flash on TI OMAP boards");
    

     

    Hi,

    Here the files.

    but these files are linux kernel code, not u-boot code. can these files help in flashing image in u-boot?

    Thanks & Regards

    Keldy 

  • Which is the kernel that you are using. From the files that you've sent it doesn't support Hardware BCH8. You need to figure out which is the ECC algorithm used in the kernel. In your case the errors occur due to the ECC algorithm mismatch.

  • Hi,

    I'm using linux kernel (arch/arm/mach-omap2) . if the file doesn't support the BCH8, is this mean that i need to select other ECC mode in flashing image in u-boot?

    Thanks & Regards

    Keldy 

  • HI,Renjith

    Thanks your reply first.

    The NandFlash I am using is K9F1G08 and I changed ECC as HAMMING ECC in the file arch/arm/mach-omap2/board-am335xevm.c.I don't have ideas that how to change the file board-flash.c.

  • Keldy,

    I need the Linux kernel version which you are using.  Like "2.6.37" etc. You can get this info from the bootlogs.

  • Hi John,

    Can you send me these files by email to renjith.thomas@pathpartnertech.com?

    arch/arm/mach-omap2/board-am335xevm.c
    arch/arm/mach-omap2/board-flash.c
    arch/arm/mach-omap2/gpmc.c
    drivers/mtd/nand/nand_base.c
    drivers/mtd/nand/omap2.c