驱动代码源码:/*
* davinci_nand.c - NAND Flash Driver for DaVinci family chips
*
* Copyright © 2006 Texas Instruments.
*
* Port to 2.6.23 Copyright © 2008 by:
* Sander Huijsen <Shuijsen@optelecom-nkf.com>
* Troy Kisky <troy.kisky@boundarydevices.com>
* Dirk Behme <Dirk.Behme@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
#include <linux/slab.h>
#include <mach/nand.h>
#include <asm/mach-types.h>
#include <mach/emif.h>
#include <mach/nand.h>
#include "uni_c66_trace.h"
#define dev_dbg(dev, format, ...) do { \
EST_Log(LOG_INFO, NULL, format, ##__VA_ARGS__); \
} while (0)
/*
* This is a device driver for the NAND flash controller found on the
* various DaVinci family chips. It handles up to four SoC chipselects,
* and some flavors of secondary chipselect (e.g. based on A12) as used
* with multichip packages.
*
* The 1-bit ECC hardware is supported, as well as the newer 4-bit ECC
* available on chips like the DM355 and OMAP-L137 and needed with the
* more error-prone MLC NAND chips.
*
* This driver assumes EM_WAIT connects all the NAND devices' RDY/nBUSY
* outputs in a "wire-AND" configuration, with no per-chip signals.
*/
struct uni_nand_info {
struct mtd_info mtd;
struct nand_chip chip;
struct nand_ecclayout ecclayout;
struct device *dev;
struct clk *clk;
bool partitioned;
bool is_readmode;
void __iomem *base;
void __iomem *vaddr;
uint32_t ioaddr;
uint32_t current_cs;
uint32_t mask_chipsel;
uint32_t mask_ale;
uint32_t mask_cle;
uint32_t core_chipsel;
};
static DEFINE_SPINLOCK(uni_nand_lock);
static bool ecc4_busy;
#define to_davinci_nand(m) container_of(m, struct uni_nand_info, mtd)
#define EST_PLATFORM 1
#define SUCCESS (0)
#define FAIL (1)
#define NAND_DATA_ADDR ((volatile uint8_t*)0x74000000) /*emif16_cs2_base */
#define NAND_ALE_ADDR ((volatile uint8_t*)0x74002000)
#define NAND_CMD_ADDR ((volatile uint8_t*)0x74004000)
// Macros for delay in micro Sec
#define STD_DELAY (25)
#define EMIF16_NAND_PROG_TIMEOUT (100000)
#define EMIF16_NAND_RESET_TIMEOUT (100000)
#define EMIF16_NAND_BLOCK_ERASE_TIMEOUT (2000000)
#define EMIF16_WAIT_PIN_POLL_ST_DLY (10)
#define CSL_EMIF16_NANDFSR_WAITSTAT_MASK (0x0000000Fu)
#define CSL_EMIF16_NANDFSR_WAITSTAT_SHIFT (0x00000000u)
#define CSL_EMIF16_NANDFSR_WAITSTAT_RESETVAL (0x00000000u)
#define CSL_EMIF16_NANDFCTL_CE1NAND_MASK (0x00000002u)
#define CSL_EMIF16_NANDFCTL_CE1NAND_SHIFT (0x00000001u)
#define CSL_EMIF16_NANDFCTL_CE1NAND_RESETVAL (0x00000000u)
#define CSL_EMIF16_NANDFCTL_CE1NAND_DISABLE (0x00000000u)
#define CSL_EMIF16_NANDFCTL_CE1NAND_ENABLE (0x00000001u)
#define CSL_EMIF16_NANDFCTL_4BIT_ECC_SEL_MASK (0x00000030u)
#define CSL_EMIF16_NANDFCTL_4BIT_ECC_SEL_SHIFT (0x00000004u)
#define CSL_EMIF16_NANDFCTL_4BIT_ECC_SEL_RESETVAL (0x00000000u)
#define CSL_EMIF16_NANDFCTL_4BIT_ECC_ST_MASK (0x00001000u)
#define CSL_EMIF16_NANDFCTL_4BIT_ECC_ST_SHIFT (0x0000000Cu)
#define CSL_EMIF16_NANDFCTL_4BIT_ECC_ST_RESETVAL (0x00000000u)
/*----4bit_ecc_st Tokens----*/
#define CSL_EMIF16_NANDFCTL_4BIT_ECC_ST_NOACTION (0x00000000u)
#define CSL_EMIF16_NANDFCTL_4BIT_ECC_ST_STARTCALC (0x00000001u)
#define CSL_PSC_PDCTL_NEXT_MASK (0x00000001u)
#define CSL_PSC_PDCTL_NEXT_SHIFT (0x00000000u)
#define CSL_PSC_PDCTL_NEXT_RESETVAL (0x00000001u)
#define CSL_PSC_PDCTL_NEXT_OFF (0x00000000u)
#define CSL_PSC_PDCTL_NEXT_ON (0x00000001u)
#define CSL_PSC_MDCTL_NEXT_MASK (0x0000001Fu)
#define CSL_PSC_MDSTAT_STATE_MASK (0x0000003Fu)
#define CSL_PSC_MDSTAT_STATE_SHIFT (0x00000000u)
#define CSL_PSC_MDCTL_NEXT_SHIFT (0x00000000u)
#define CSL_PSC_REGS (0x02350000)
#define CSL_EMIF16_REGS (0x20c00000)
/* NAND FLASH COMMANDS */
#define NAND_LO_PAGE (0) /* Read the lo page CMD */
#define NAND_HI_PAGE (1) /* Read the hi page CMD */
#define NAND_SPARE_AREA_READ (0x50) /* Read the Spare area CMD */
#define NAND_ADD_00H (0x00)
#define NAND_ADD_08H (0x08)
#define NAND_CMD_05H (0x05) /* Random Data Read Command */
#define NAND_CMD_10H (0x10) /* Program Confirm Command */
#define NAND_CMD_30H (0x30)
#define NAND_CMD_E0H (0xE0)
#define NAND_BLOCK_ERASE (0x60) /* Block Erase Command */
#define NAND_ERASE_CONFIRM (0xD0) /* Erase Confirm Command */
#define NAND_GET_FEATURES (0xEE)
#define NAND_OTP_DATA_PROG (0xA0)
#define NAND_OTP_DATA_PROT (0xA5)
#define NAND_OTP_DATA_READ (0xAF)
#define NAND_PAGE_READ (0x00) /* Page Read Command */
#define NAND_PAGE_READ_LAST (0x3F) /* Page Read Cache Mode Start Last*/
#define NAND_PAGE_READ_RANDOM (0x00)
#define NAND_PAGE_READ_SEQUENTIAL (0x31) /* page Read Cache mode start */
#define NAND_INT_DATA_MOVE_PROG (0x85) /* Program for Internal Data Move */
#define NAND_PROG_PAGE (0x80) /* Program Page Command */
#define NAND_PROG_PAGE_CACHE (0x80) /* Program Page command */
#define NAND_RANDOM_DATA_IN (0x85) /* Program for internal Data Move */
#define NAND_RANDOM_DATA_READ (0x00)
#define NAND_INT_DATA_MOVE_READ (0xA5)
#define NAND_RDID (0x90) /* Read NAND ID Command */
#define NAND_READ_PARAM_PAGE (0xEC)
#define NAND_STATUS (0x70) /* Read Status command */
#define NAND_READ_UNIQUE_ID (0xED)
#define NAND_RST (0xFF) /* Reset Command */
#define NAND_RDY (0x40)
#define NAND_RDIDADD (0x20)
typedef unsigned int Uint32;
typedef unsigned char Uint8;
typedef struct {
volatile Uint32 PID;
volatile Uint8 RSVD0[16];//20
volatile Uint32 VCNTLID;//24
volatile Uint8 RSVD1[264];//288
volatile Uint32 PTCMD;//292
volatile Uint8 RSVD2[4];//296
volatile Uint32 PTSTAT;//300
volatile Uint8 RSVD3[212];//512
volatile Uint32 PDSTAT[32];//640
volatile Uint8 RSVD4[128];//768
volatile Uint32 PDCTL[32];//896
volatile Uint8 RSVD5[1152];//2048
volatile Uint32 MDSTAT[32];//2176
volatile Uint8 RSVD6[384];//2560
volatile Uint32 MDCTL[32];
} CSL_PscRegs;
static CSL_PscRegs *hPscRegs = (CSL_PscRegs *) (CSL_PSC_REGS);
typedef struct {
volatile Uint32 RCSR;
volatile Uint32 AWCCR;
volatile Uint8 RSVD0[8];
volatile Uint32 A0CR;//20
volatile Uint32 A1CR;//24
volatile Uint32 A2CR;//28
volatile Uint32 A3CR;//32
volatile Uint8 RSVD1[32];//64
volatile Uint32 IRR;//68
volatile Uint32 IMR;//72
volatile Uint32 IMSR;//76
volatile Uint32 IMCR;//80
volatile Uint32 IOCR;//84
volatile Uint32 IOSR;//88
volatile Uint8 RSVD2[8];//96
volatile Uint32 NANDFCTL;
volatile Uint32 NANDFSR;
volatile Uint32 PMCR;
volatile Uint8 RSVD3[4];
volatile Uint32 NFECCCE0;
volatile Uint32 NFECCCE1;
volatile Uint32 NFECCCE2;
volatile Uint32 NFECCCE3;
volatile Uint8 RSVD4[4];
volatile Uint32 IODFTEXECNT;
volatile Uint32 IODFTGBLCTRL;
volatile Uint8 RSVD5[4];
volatile Uint32 IODFTTLAMISR;
volatile Uint32 IODFTTLDMISR;
volatile Uint32 IODFTTLDCMISR;
volatile Uint8 RSVD6[20];
volatile Uint32 MODRELNUM;
volatile Uint8 RSVD7[8];
volatile Uint32 NANDF4BECCLR;
volatile Uint32 NANDF4BECC1R;
volatile Uint32 NANDF4BECC2R;
volatile Uint32 NANDF4BECC3R;
volatile Uint32 NANDF4BECC4R;
volatile Uint32 NANDFEA1R;
volatile Uint32 NANDFEA2R;
volatile Uint32 NANDFEV1R;
volatile Uint32 NANDFEV2R;
} CSL_Emif16Regs;
static CSL_Emif16Regs *hEmif16Cfg = (CSL_Emif16Regs*)CSL_EMIF16_REGS;
/* the "expression" macros */
/* the Field MaKe macro */
#define CSL_FMK(PER_REG_FIELD, val) \
(((val) << CSL_##PER_REG_FIELD##_SHIFT) & CSL_##PER_REG_FIELD##_MASK)
/* the Field EXTract macro */
#define CSL_FEXT(reg, PER_REG_FIELD) \
(((reg) & CSL_##PER_REG_FIELD##_MASK) >> CSL_##PER_REG_FIELD##_SHIFT)
/* the Field INSert macro */
#define CSL_FINS(reg, PER_REG_FIELD, val) \
((reg) = ((reg) & ~CSL_##PER_REG_FIELD##_MASK) \
| CSL_FMK(PER_REG_FIELD, val))
/* the "token" macros */
/* the Field MaKe (Token) macro */
#define CSL_FMKT(PER_REG_FIELD, TOKEN) \
CSL_FMK(PER_REG_FIELD, CSL_##PER_REG_FIELD##_##TOKEN)
/* the Field INSert (Token) macro */
#define CSL_FINST(reg, PER_REG_FIELD, TOKEN) \
CSL_FINS((reg), PER_REG_FIELD, CSL_##PER_REG_FIELD##_##TOKEN)
/* the "raw" macros */
/* the Field MaKe (Raw) macro */
#define CSL_FMKR(msb, lsb, val) \
(((val) & ((1 << ((msb) - (lsb) + 1)) - 1)) << (lsb))
/* the Field EXTract (Raw) macro */
#define CSL_FEXTR(reg, msb, lsb) \
(((reg) >> (lsb)) & ((1 << ((msb) - (lsb) + 1)) - 1))
/* the Field INSert (Raw) macro */
#define CSL_FINSR(reg, msb, lsb, val) \
((reg) = ((reg) &~ (((1 << ((msb) - (lsb) + 1)) - 1) << (lsb))) \
| CSL_FMKR(msb, lsb, val))
/** @brief
*
* Possible PSC Module states
*/
typedef enum {
/** Module is in Reset state. Clock is off. */
PSC_MODSTATE_SWRSTDISABLE = 0,
/** Module is in Sync Reset state. */
PSC_MODSTATE_SYNCRST = 1,
/** Module is in disable state. */
PSC_MODSTATE_DISABLE = 2,
/** Module is in enable state. */
PSC_MODSTATE_ENABLE = 3,
/** Module is in Auto sleep state */
PSC_MODSTATE_AUTOSLP = 4,
/** Module is in Auto wake state */
PSC_MODSTATE_AUTOWK = 5
} CSL_PSC_MODSTATE;
#define CSL_IDEF_INLINE static inline
int platform_delay(uint32_t usecs)
{
uint32_t i;
for ( i = 0 ; i < usecs ; i++ ){ asm (" nop "); };
return 0;
}
static inline unsigned int davinci_nand_readl(struct uni_nand_info *info,
int offset)
{
/* Start 4-bit ECC HW calculation for read */
CSL_FINS(hEmif16Cfg->NANDFCTL, EMIF16_NANDFCTL_4BIT_ECC_ST , 1);
platform_delay (10);
return __raw_readl(info->base + offset);
}
static inline void davinci_nand_writel(struct uni_nand_info *info,
int offset, unsigned long value)
{
/* Start 4-bit ECC HW calculation for write */
CSL_FINS(hEmif16Cfg->NANDFCTL, EMIF16_NANDFCTL_4BIT_ECC_ST , 1);
platform_delay (10);
__raw_writel(value, info->base + offset);
}
/*----------------------------------------------------------------------*/
/*
* Access to hardware control lines: ALE, CLE, secondary chipselect.
*/
static void nand_davinci_hwcontrol(struct mtd_info *mtd, int cmd,
unsigned int ctrl)
{
struct uni_nand_info *info = to_davinci_nand(mtd);
uint32_t addr = info->current_cs;
struct nand_chip *nand = mtd->priv;
/*EST_Log(LOG_INFO, NULL, "nand_davinci_hwcontrol: cmd=%X ctrl=%X\n", cmd, ctrl);*/
/* Did the control lines change? */
if (ctrl & NAND_CTRL_CHANGE) {
if ((ctrl & NAND_CTRL_CLE) == NAND_CTRL_CLE)
addr |= info->mask_cle;
else if ((ctrl & NAND_CTRL_ALE) == NAND_CTRL_ALE)
addr |= info->mask_ale;
nand->IO_ADDR_W = (void __iomem __force *)addr;
}
if (cmd != NAND_CMD_NONE)
iowrite16(cmd, nand->IO_ADDR_W);
}
static void nand_davinci_select_chip(struct mtd_info *mtd, int chip)
{
struct uni_nand_info *info = to_davinci_nand(mtd);
uint32_t addr = info->ioaddr;
/* maybe kick in a second chipselect */
if (chip > 0)
addr |= info->mask_chipsel;
info->current_cs = addr;
info->chip.IO_ADDR_W = (void __iomem __force *)addr;
info->chip.IO_ADDR_R = info->chip.IO_ADDR_W;
}
/*----------------------------------------------------------------------*/
/*
* 1-bit hardware ECC ... context maintained for each core chipselect
*/
static inline uint32_t nand_davinci_readecc_1bit(struct mtd_info *mtd)
{
struct uni_nand_info *info = to_davinci_nand(mtd);
return davinci_nand_readl(info, NANDF1ECC_OFFSET
+ 4 * info->core_chipsel);
}
static void nand_davinci_hwctl_1bit(struct mtd_info *mtd, int mode)
{
struct uni_nand_info *info;
uint32_t nandcfr;
unsigned long flags;
info = to_davinci_nand(mtd);
/* Reset ECC hardware */
nand_davinci_readecc_1bit(mtd);
spin_lock_irqsave(&uni_nand_lock, flags);
/* Restart ECC hardware */
nandcfr = davinci_nand_readl(info, NANDFCR_OFFSET);
nandcfr |= BIT(8 + info->core_chipsel);
davinci_nand_writel(info, NANDFCR_OFFSET, nandcfr);
spin_unlock_irqrestore(&uni_nand_lock, flags);
}
/*
* Read hardware ECC value and pack into three bytes
*/
static int nand_davinci_calculate_1bit(struct mtd_info *mtd,
const u_char *dat, u_char *ecc_code)
{
unsigned int ecc_val = nand_davinci_readecc_1bit(mtd);
unsigned int ecc24 = (ecc_val & 0x0fff) | ((ecc_val & 0x0fff0000) >> 4);
/* invert so that erased block ecc is correct */
ecc24 = ~ecc24;
ecc_code[0] = (u_char)(ecc24);
ecc_code[1] = (u_char)(ecc24 >> 8);
ecc_code[2] = (u_char)(ecc24 >> 16);
return 0;
}
static int nand_davinci_correct_1bit(struct mtd_info *mtd, u_char *dat,
u_char *read_ecc, u_char *calc_ecc)
{
struct nand_chip *chip = mtd->priv;
uint32_t eccNand = read_ecc[0] | (read_ecc[1] << 8) |
(read_ecc[2] << 16);
uint32_t eccCalc = calc_ecc[0] | (calc_ecc[1] << 8) |
(calc_ecc[2] << 16);
uint32_t diff = eccCalc ^ eccNand;
if (diff) {
if ((((diff >> 12) ^ diff) & 0xfff) == 0xfff) {
/* Correctable error */
if ((diff >> (12 + 3)) < chip->ecc.size) {
dat[diff >> (12 + 3)] ^= BIT((diff >> 12) & 7);
return 1;
} else {
return -1;
}
} else if (!(diff & (diff - 1))) {
/* Single bit ECC error in the ECC itself,
* nothing to fix */
return 1;
} else {
/* Uncorrectable error */
return -1;
}
}
return 0;
}
/*----------------------------------------------------------------------*/
/*
* 4-bit hardware ECC ... context maintained over entire AEMIF
*
* This is a syndrome engine, but we avoid NAND_ECC_HW_SYNDROME
* since that forces use of a problematic "infix OOB" layout.
* Among other things, it trashes manufacturer bad block markers.
* Also, and specific to this hardware, it ECC-protects the "prepad"
* in the OOB ... while having ECC protection for parts of OOB would
* seem useful, the current MTD stack sometimes wants to update the
* OOB without recomputing ECC.
*/
static void nand_davinci_hwctl_4bit(struct mtd_info *mtd, int mode)
{
struct uni_nand_info *info = to_davinci_nand(mtd);
unsigned long flags;
u32 val;
spin_lock_irqsave(&uni_nand_lock, flags);
/* Start 4-bit ECC calculation for read/write */
val = davinci_nand_readl(info, NANDFCR_OFFSET);
val &= ~(0x03 << 4);
val |= (info->core_chipsel << 4) | BIT(12);
davinci_nand_writel(info, NANDFCR_OFFSET, val);
info->is_readmode = (mode == NAND_ECC_READ);
spin_unlock_irqrestore(&uni_nand_lock, flags);
}
/* Read raw ECC code after writing to NAND. */
static void
nand_davinci_readecc_4bit(struct uni_nand_info *info, u32 code[4])
{
const u32 mask = 0x03ff03ff;
code[0] = davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET) & mask;
code[1] = davinci_nand_readl(info, NAND_4BIT_ECC2_OFFSET) & mask;
code[2] = davinci_nand_readl(info, NAND_4BIT_ECC3_OFFSET) & mask;
code[3] = davinci_nand_readl(info, NAND_4BIT_ECC4_OFFSET) & mask;
}
/* Terminate read ECC; or return ECC (as bytes) of data written to NAND. */
static int nand_davinci_calculate_4bit(struct mtd_info *mtd,
const u_char *dat, u_char *ecc_code)
{
struct uni_nand_info *info = to_davinci_nand(mtd);
u32 raw_ecc[4], *p;
unsigned i;
/* After a read, terminate ECC calculation by a dummy read
* of some 4-bit ECC register. ECC covers everything that
* was read; correct() just uses the hardware state, so
* ecc_code is not needed.
*/
if (info->is_readmode) {
davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET);
return 0;
}
/* Pack eight raw 10-bit ecc values into ten bytes, making
* two passes which each convert four values (in upper and
* lower halves of two 32-bit words) into five bytes. The
* ROM boot loader uses this same packing scheme.
*/
nand_davinci_readecc_4bit(info, raw_ecc);
for (i = 0, p = raw_ecc; i < 2; i++, p += 2) {
*ecc_code++ = p[0] & 0xff;
*ecc_code++ = ((p[0] >> 8) & 0x03) | ((p[0] >> 14) & 0xfc);
*ecc_code++ = ((p[0] >> 22) & 0x0f) | ((p[1] << 4) & 0xf0);
*ecc_code++ = ((p[1] >> 4) & 0x3f) | ((p[1] >> 10) & 0xc0);
*ecc_code++ = (p[1] >> 18) & 0xff;
}
return 0;
}
/* Correct up to 4 bits in data we just read, using state left in the
* hardware plus the ecc_code computed when it was first written.
*/
static int nand_davinci_correct_4bit(struct mtd_info *mtd,
u_char *data, u_char *ecc_code, u_char *null)
{
int i;
struct uni_nand_info *info = to_davinci_nand(mtd);
unsigned short ecc10[8];
unsigned short *ecc16;
u32 syndrome[4];
unsigned num_errors, corrected;
/* All bytes 0xff? It's an erased page; ignore its ECC. */
for (i = 0; i < 10; i++) {
if (ecc_code[i] != 0xff)
goto compare;
}
return 0;
compare:
/* Unpack ten bytes into eight 10 bit values. We know we're
* little-endian, and use type punning for less shifting/masking.
*/
if (WARN_ON(0x01 & (unsigned) ecc_code))
return -EINVAL;
ecc16 = (unsigned short *)ecc_code;
ecc10[0] = (ecc16[0] >> 0) & 0x3ff;
ecc10[1] = ((ecc16[0] >> 10) & 0x3f) | ((ecc16[1] << 6) & 0x3c0);
ecc10[2] = (ecc16[1] >> 4) & 0x3ff;
ecc10[3] = ((ecc16[1] >> 14) & 0x3) | ((ecc16[2] << 2) & 0x3fc);
ecc10[4] = (ecc16[2] >> 8) | ((ecc16[3] << 8) & 0x300);
ecc10[5] = (ecc16[3] >> 2) & 0x3ff;
ecc10[6] = ((ecc16[3] >> 12) & 0xf) | ((ecc16[4] << 4) & 0x3f0);
ecc10[7] = (ecc16[4] >> 6) & 0x3ff;
/* Tell ECC controller about the expected ECC codes. */
for (i = 7; i >= 0; i--)
davinci_nand_writel(info, NAND_4BIT_ECC_LOAD_OFFSET, ecc10[i]);
/* Allow time for syndrome calculation ... then read it.
* A syndrome of all zeroes 0 means no detected errors.
*/
davinci_nand_readl(info, NANDFSR_OFFSET);
nand_davinci_readecc_4bit(info, syndrome);
if (!(syndrome[0] | syndrome[1] | syndrome[2] | syndrome[3]))
return 0;
/*
* Clear any previous address calculation by doing a dummy read of an
* error address register.
*/
davinci_nand_readl(info, NAND_ERR_ADD1_OFFSET);
/* Start address calculation, and wait for it to complete.
* We _could_ start reading more data while this is working,
* to speed up the overall page read.
*/
davinci_nand_writel(info, NANDFCR_OFFSET,
davinci_nand_readl(info, NANDFCR_OFFSET) | BIT(13));
for (;;) {
u32 fsr = davinci_nand_readl(info, NANDFSR_OFFSET);
switch ((fsr >> 8) & 0x0f) {
case 0: /* no error, should not happen */
davinci_nand_readl(info, NAND_ERR_ERRVAL1_OFFSET);
return 0;
case 1: /* five or more errors detected */
davinci_nand_readl(info, NAND_ERR_ERRVAL1_OFFSET);
return -EIO;
case 2: /* error addresses computed */
case 3:
num_errors = 1 + ((fsr >> 16) & 0x03);
goto correct;
default: /* still working on it */
cpu_relax();
continue;
}
}
correct:
/* correct each error */
for (i = 0, corrected = 0; i < num_errors; i++) {
int error_address, error_value;
if (i > 1) {
error_address = davinci_nand_readl(info,
NAND_ERR_ADD2_OFFSET);
error_value = davinci_nand_readl(info,
NAND_ERR_ERRVAL2_OFFSET);
} else {
error_address = davinci_nand_readl(info,
NAND_ERR_ADD1_OFFSET);
error_value = davinci_nand_readl(info,
NAND_ERR_ERRVAL1_OFFSET);
}
if (i & 1) {
error_address >>= 16;
error_value >>= 16;
}
error_address &= 0x3ff;
error_address = (512 + 7) - error_address;
if (error_address < 512) {
data[error_address] ^= error_value;
corrected++;
}
}
return corrected;
}
/*----------------------------------------------------------------------*/
/*
* NOTE: NAND boot requires ALE == EM_A[1], CLE == EM_A[2], so that's
* how these chips are normally wired. This translates to both 8 and 16
* bit busses using ALE == BIT(3) in byte addresses, and CLE == BIT(4).
*
* For now we assume that configuration, or any other one which ignores
* the two LSBs for NAND access ... so we can issue 32-bit reads/writes
* and have that transparently morphed into multiple NAND operations.
*/
static void nand_davinci_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
{
struct nand_chip *chip = mtd->priv;
/*EST_Log(LOG_INFO, NULL, "nand_davinci_read_buf: buf=%X len=%X\n", buf, len);*/
if ((0x03 & ((unsigned)buf)) == 0 && (0x03 & len) == 0)
ioread32_rep(chip->IO_ADDR_R, buf, len >> 2);
else if ((0x01 & ((unsigned)buf)) == 0 && (0x01 & len) == 0)
ioread16_rep(chip->IO_ADDR_R, buf, len >> 1);
else
ioread8_rep(chip->IO_ADDR_R, buf, len);
}
static void nand_davinci_write_buf(struct mtd_info *mtd,
const uint8_t *buf, int len)
{
struct nand_chip *chip = mtd->priv;
/*EST_Log(LOG_INFO, NULL, "nand_davinci_write_buf: buf=%X len=%X\n", buf, len);*/
if ((0x03 & ((unsigned)buf)) == 0 && (0x03 & len) == 0)
iowrite32_rep(chip->IO_ADDR_R, buf, len >> 2);
else if ((0x01 & ((unsigned)buf)) == 0 && (0x01 & len) == 0)
iowrite16_rep(chip->IO_ADDR_R, buf, len >> 1);
else
iowrite8_rep(chip->IO_ADDR_R, buf, len);
}
/*
* Check hardware register for wait status. Returns 1 if device is ready,
* 0 if it is still busy.
*/
static int nand_davinci_dev_ready(struct mtd_info *mtd)
{
struct uni_nand_info *info = to_davinci_nand(mtd);
int ret;
ret = davinci_nand_readl(info, NANDFSR_OFFSET) & BIT(0);
/*EST_Log(LOG_INFO, NULL, "nand_davinci_dev_ready: ret=%X\n", ret);*/
return ret;
}
static void __init nand_dm6446evm_flash_init(struct uni_nand_info *info)
{
uint32_t regval, a1cr;
/*
* NAND FLASH timings @ PLL1 == 459 MHz
* - AEMIF.CLK freq = PLL1/6 = 459/6 = 76.5 MHz
* - AEMIF.CLK period = 1/76.5 MHz = 13.1 ns
*/
regval = 0
| (0 << 31) /* selectStrobe */
| (0 << 30) /* extWait (never with NAND) */
| (1 << 26) /* writeSetup 10 ns */
| (3 << 20) /* writeStrobe 40 ns */
| (1 << 17) /* writeHold 10 ns */
| (0 << 13) /* readSetup 10 ns */
| (3 << 7) /* readStrobe 60 ns */
| (0 << 4) /* readHold 10 ns */
| (3 << 2) /* turnAround ?? ns */
| (0 << 0) /* asyncSize 8-bit bus */
;
a1cr = davinci_nand_readl(info, A1CR_OFFSET);
if (a1cr != regval) {
dev_dbg(info->dev, "Warning: NAND config: Set A1CR " \
"reg to 0x%08x, was 0x%08x, should be done by " \
"bootloader.\n", regval, a1cr);
davinci_nand_writel(info, A1CR_OFFSET, regval);
}
}
/*----------------------------------------------------------------------*/
/* An ECC layout for using 4-bit ECC with small-page flash, storing
* ten ECC bytes plus the manufacturer's bad block marker byte, and
* and not overlapping the default BBT markers.
*/
static struct nand_ecclayout hwecc4_small __initconst = {
.eccbytes = 10,
.eccpos = { 0, 1, 2, 3, 4,
/* offset 5 holds the badblock marker */
6, 7,
13, 14, 15, },
.oobfree = {
{.offset = 8, .length = 5, },
{.offset = 16, },
},
};
/* An ECC layout for using 4-bit ECC with large-page (2048bytes) flash,
* storing ten ECC bytes plus the manufacturer's bad block marker byte,
* and not overlapping the default BBT markers.
*/
static struct nand_ecclayout hwecc4_2048 __initconst = {
.eccbytes = 40,
.eccpos = {
/* at the end of spare sector */
24, 25, 26, 27, 28, 29, 30, 31, 32, 33,
34, 35, 36, 37, 38, 39, 40, 41, 42, 43,
44, 45, 46, 47, 48, 49, 50, 51, 52, 53,
54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
},
.oobfree = {
/* 2 bytes at offset 0 hold manufacturer badblock markers */
{.offset = 2, .length = 22, },
/* 5 bytes at offset 8 hold BBT markers */
/* 8 bytes at offset 16 hold JFFS2 clean markers */
},
};
static u16 nand_uni_read_word(struct mtd_info *mtd)
{
struct nand_chip *chip = mtd->priv;
u16 val;
val = readw(chip->IO_ADDR_R);
/*EST_Log(LOG_INFO, NULL, "nand_uni_read_word: IO_ADDR_R=%X val=%X\n", chip->IO_ADDR_R, val);*/
return val;
}
static int __init nand_uni_probe(struct platform_device *pdev)
{
struct davinci_nand_pdata *pdata = pdev->dev.platform_data;
struct uni_nand_info *info;
struct resource *res1;
struct resource *res2;
void __iomem *vaddr;
void __iomem *base;
int ret;
uint32_t val;
nand_ecc_modes_t ecc_mode;
EST_Log(LOG_INFO, NULL, "+++platform_driver_probe \n");
/* insist on board-specific configuration */
if (!pdata) {
EST_Log(LOG_ERR, NULL, "nand_uni_probe:err pdata=%X\n", pdata);
return -ENODEV;
}
/* which external chipselect will we be managing? */
if (pdev->id < 0 || pdev->id > 3)
return -ENODEV;
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info) {
dev_err(&pdev->dev, "unable to allocate memory\n");
ret = -ENOMEM;
goto err_nomem;
}
platform_set_drvdata(pdev, info);
res1 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
res2 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (!res1 || !res2) {
dev_err(&pdev->dev, "resource missing\n");
ret = -EINVAL;
goto err_nomem;
}
vaddr = ioremap(res1->start, res1->end - res1->start);
base = ioremap(res2->start, res2->end - res2->start);
if (!vaddr || !base) {
dev_err(&pdev->dev, "ioremap failed\n");
ret = -EINVAL;
goto err_ioremap;
}
info->dev = &pdev->dev;
info->base = base;
info->vaddr = vaddr;
info->mtd.priv = &info->chip;
info->mtd.name = dev_name(&pdev->dev);
info->mtd.owner = THIS_MODULE;
info->mtd.dev.parent = &pdev->dev;
info->chip.IO_ADDR_R = vaddr;
info->chip.IO_ADDR_W = vaddr;
info->chip.chip_delay = 0;
info->chip.select_chip = nand_davinci_select_chip;
info->chip.read_word = nand_uni_read_word;
info->chip.read_byte = nand_uni_read_word;
EST_Log(LOG_INFO, NULL, "IO_ADDR_R=%X IO_ADDR_W=%X mask_chipsel=%d\n",
info->chip.IO_ADDR_R, info->chip.IO_ADDR_W, pdata->mask_chipsel);
/* options such as NAND_USE_FLASH_BBT or 16-bit widths */
info->chip.options = pdata->options;
info->chip.bbt_td = pdata->bbt_td;
info->chip.bbt_md = pdata->bbt_md;
info->ioaddr = (uint32_t __force) vaddr;
info->current_cs = info->ioaddr;
info->core_chipsel = pdev->id;
info->mask_chipsel = pdata->mask_chipsel;
/* use nandboot-capable ALE/CLE masks by default */
info->mask_ale = pdata->mask_ale ? : MASK_ALE;
info->mask_cle = pdata->mask_cle ? : MASK_CLE;
/* Set address of hardware control function */
info->chip.cmd_ctrl = nand_davinci_hwcontrol;
info->chip.dev_ready = nand_davinci_dev_ready;
/* Speed up buffer I/O */
info->chip.read_buf = nand_davinci_read_buf;
info->chip.write_buf = nand_davinci_write_buf;
/* Use board-specific ECC config */
ecc_mode = pdata->ecc_mode;
ret = -EINVAL;
switch (ecc_mode) {
case NAND_ECC_NONE:
case NAND_ECC_SOFT:
pdata->ecc_bits = 0;
break;
case NAND_ECC_HW:
printk("++++++++++++++++NAND_ECC_HW=%d\n",NAND_ECC_HW);
printk("++++++++++++++++pdata->ecc_bits=%d\n",pdata->ecc_bits);
if (pdata->ecc_bits == 4) {
/* No sanity checks: CPUs must support this,
* and the chips may not use NAND_BUSWIDTH_16.
*/
/* No sharing 4-bit hardware between chipselects yet */
spin_lock_irq(&uni_nand_lock);
if (ecc4_busy)
ret = -EBUSY;
else
ecc4_busy = true;
spin_unlock_irq(&uni_nand_lock);
if (ret == -EBUSY)
goto err_ecc;
info->chip.ecc.calculate = nand_davinci_calculate_4bit;
info->chip.ecc.correct = nand_davinci_correct_4bit;
info->chip.ecc.hwctl = nand_davinci_hwctl_4bit;
info->chip.ecc.bytes = 10;
} else {
info->chip.ecc.calculate = nand_davinci_calculate_1bit;
info->chip.ecc.correct = nand_davinci_correct_1bit;
info->chip.ecc.hwctl = nand_davinci_hwctl_1bit;
info->chip.ecc.bytes = 3;
}
info->chip.ecc.size = 512;
break;
default:
ret = -EINVAL;
goto err_ecc;
}
info->chip.ecc.mode = ecc_mode;
info->clk = clk_get(&pdev->dev, "aemif");
if (IS_ERR(info->clk)) {
ret = PTR_ERR(info->clk);
dev_dbg(&pdev->dev, "unable to get AEMIF clock, err %d\n", ret);
goto err_clk;
}
ret = clk_enable(info->clk);
if (ret < 0) {
dev_dbg(&pdev->dev, "unable to enable AEMIF clock, err %d\n",
ret);
goto err_clk_enable;
}
/* EMIF timings should normally be set by the boot loader,
* especially after boot-from-NAND. The *only* reason to
* have this special casing for the DM6446 EVM is to work
* with boot-from-NOR ... with CS0 manually re-jumpered
* (after startup) so it addresses the NAND flash, not NOR.
* Even for dev boards, that's unusually rude...
*/
if (machine_is_davinci_evm())
nand_dm6446evm_flash_init(info);
spin_lock_irq(&uni_nand_lock);
/* put CSxNAND into NAND mode */
val = davinci_nand_readl(info, NANDFCR_OFFSET);
val |= BIT(info->core_chipsel);
if((ecc_mode==NAND_ECC_HW) && (pdata->ecc_bits==4)){
val &= ~(0x03 << 4);
val |= (info->core_chipsel << 4) | BIT(12);
}
davinci_nand_writel(info, NANDFCR_OFFSET, val);
spin_unlock_irq(&uni_nand_lock);
EST_Log(LOG_INFO, NULL, "NANDFCR_OFFSET=%X val=%X\n", NANDFCR_OFFSET, val);
/* Scan to find existence of the device(s) */
ret = nand_scan_ident(&info->mtd, pdata->mask_chipsel ? 2 : 1);
if (ret < 0) {
dev_dbg(&pdev->dev, "no NAND chip(s) found\n");
goto err_scan;
}
/* Update ECC layout if needed ... for 1-bit HW ECC, the default
* is OK, but it allocates 6 bytes when only 3 are needed (for
* each 512 bytes). For the 4-bit HW ECC, that default is not
* usable: 10 bytes are needed, not 6.
*/
if (pdata->ecc_bits == 4) {
int chunks = info->mtd.writesize / 512;
if (!chunks || info->mtd.oobsize < 16) {
dev_dbg(&pdev->dev, "too small\n");
ret = -EINVAL;
goto err_scan;
}
/* For small page chips, preserve the manufacturer's
* badblock marking data ... and make sure a flash BBT
* table marker fits in the free bytes.
*/
if (chunks == 1) {
info->ecclayout = hwecc4_small;
info->ecclayout.oobfree[1].length =
info->mtd.oobsize - 16;
goto syndrome_done;
}
if (chunks == 4) {
info->ecclayout = hwecc4_2048;
info->chip.ecc.mode = NAND_ECC_HW_OOB_FIRST;
goto syndrome_done;
}
/* 4KiB page chips are not yet supported. The eccpos from
* nand_ecclayout cannot hold 80 bytes and change to eccpos[]
* breaks userspace ioctl interface with mtd-utils. Once we
* resolve this issue, NAND_ECC_HW_OOB_FIRST mode can be used
* for the 4KiB page chips.
*/
dev_warn(&pdev->dev, "no 4-bit ECC support yet "
"for 4KiB-page NAND\n");
ret = -EIO;
goto err_scan;
syndrome_done:
info->chip.ecc.layout = &info->ecclayout;
}
ret = nand_scan_tail(&info->mtd);
if (ret < 0)
goto err_scan;
if (mtd_has_partitions()) {
struct mtd_partition *mtd_parts = NULL;
int mtd_parts_nb = 0;
if (mtd_has_cmdlinepart()) {
static const char *probes[] __initconst =
{ "cmdlinepart", NULL };
mtd_parts_nb = parse_mtd_partitions(&info->mtd, probes,
&mtd_parts, 0);
}
if (mtd_parts_nb <= 0) {
mtd_parts = pdata->parts;
mtd_parts_nb = pdata->nr_parts;
}
/* Register any partitions */
if (mtd_parts_nb > 0) {
ret = add_mtd_partitions(&info->mtd,
mtd_parts, mtd_parts_nb);
if (ret == 0)
info->partitioned = true;
}
} else if (pdata->nr_parts) {
dev_warn(&pdev->dev, "ignoring %d default partitions on %s\n",
pdata->nr_parts, info->mtd.name);
}
/* If there's no partition info, just package the whole chip
* as a single MTD device.
*/
if (!info->partitioned)
ret = add_mtd_device(&info->mtd) ? -ENODEV : 0;
if (ret < 0)
goto err_scan;
val = davinci_nand_readl(info, NRCSR_OFFSET);
dev_info(&pdev->dev, "controller rev. %d.%d\n",
(val >> 8) & 0xff, val & 0xff);
return 0;
err_scan:
clk_disable(info->clk);
err_clk_enable:
clk_put(info->clk);
spin_lock_irq(&uni_nand_lock);
if (ecc_mode == NAND_ECC_HW_SYNDROME)
ecc4_busy = false;
spin_unlock_irq(&uni_nand_lock);
err_ecc:
err_clk:
err_ioremap:
if (base)
iounmap(base);
if (vaddr)
iounmap(vaddr);
err_nomem:
kfree(info);
EST_Log(LOG_INFO, NULL, "---platform_driver_probe ret=%d\n", ret);
return ret;
}
static int __exit nand_uni_remove(struct platform_device *pdev)
{
struct uni_nand_info *info = platform_get_drvdata(pdev);
int status;
if (mtd_has_partitions() && info->partitioned)
status = del_mtd_partitions(&info->mtd);
else
status = del_mtd_device(&info->mtd);
spin_lock_irq(&uni_nand_lock);
if (info->chip.ecc.mode == NAND_ECC_HW_SYNDROME)
ecc4_busy = false;
spin_unlock_irq(&uni_nand_lock);
iounmap(info->base);
iounmap(info->vaddr);
nand_release(&info->mtd);
clk_disable(info->clk);
clk_put(info->clk);
kfree(info);
return 0;
}
static struct mtd_partition evm6678_uni_parts[] = {
{
.name = "nand_disk",
.offset = MTDPART_OFS_APPEND,
.size = MTDPART_SIZ_FULL,
.mask_flags = 0,
}
};
static struct davinci_nand_pdata evmc6678_uni_data = {
.mask_cle = 0x4000,
.mask_ale = 0x2000,
.parts = evm6678_uni_parts,
.nr_parts = ARRAY_SIZE(evm6678_uni_parts),
.ecc_mode = NAND_ECC_HW,
.ecc_bits = 4,
.options = NAND_BUSWIDTH_16,
};
static struct resource evmc6678_uni_resources[] = {
{
.start = 0x74000000,
.end = 0x74000000 + 0x3FFFFFF,
.flags = IORESOURCE_MEM,
},
{
.start = 0x20C00000,
.end = 0x20C00000 + 0xFF,
.flags = IORESOURCE_MEM,
},
};
static void uni_device_release(struct device *dev)
{
}
static struct platform_device evmc6678_uni_device = {
.name = "uni_nand",
.id = 1,
.num_resources = ARRAY_SIZE(evmc6678_uni_resources),
.resource = evmc6678_uni_resources,
.dev = {
.platform_data = &evmc6678_uni_data,
.release = uni_device_release,
},
};
CSL_IDEF_INLINE void CSL_PSC_enablePowerDomain (
Uint32 pwrDmnNum
)
{
CSL_FINST (hPscRegs->PDCTL[pwrDmnNum], PSC_PDCTL_NEXT, ON);
return;
}
CSL_IDEF_INLINE void CSL_PSC_setModuleNextState (
Uint32 moduleNum,
CSL_PSC_MODSTATE state
)
{
CSL_FINS (hPscRegs->MDCTL[moduleNum], PSC_MDCTL_NEXT, state);
return;
}
CSL_IDEF_INLINE void CSL_PSC_startStateTransition (
Uint32 pwrDmnNum
)
{
hPscRegs->PTCMD = (1 << pwrDmnNum);
return;
}
CSL_IDEF_INLINE Uint32 CSL_PSC_isStateTransitionDone (
Uint32 pwrDmnNum
)
{
Uint32 pdTransStatus;
pdTransStatus = CSL_FEXTR (hPscRegs->PTSTAT, pwrDmnNum, pwrDmnNum);
if (pdTransStatus)
{
/* Power domain transition is in progress. Return 0 to indicate not yet done. */
return 0;
}
else
{
/* Power domain transition is done. */
return 1;
}
}
CSL_IDEF_INLINE CSL_PSC_MODSTATE CSL_PSC_getModuleState (
Uint32 moduleNum
)
{
return (CSL_PSC_MODSTATE) CSL_FEXT(hPscRegs->MDSTAT[moduleNum], PSC_MDSTAT_STATE);
}
/******************************************************************************
*
* Function: NandConfig
*
* Description: This function is used to congigure the NAND Device
*
* Parameters: None
*
* Return Value: Err Status
*
*****************************************************************************/
static uint32_t NandConfig (void)
{
uint32_t loop_cnt = 0;
uint32_t power_domain_num = 0;
uint32_t mdctl_emif16_module_num = 3;
uint32_t mdstat_emif16_module_num = 3;
CSL_PSC_MODSTATE mdstat;
/* Wake up EMIF16 module:
mdstat = CSL_PSC_getModuleState(mdstat_emif16_module_num); */
{
/* program pdctl and mdctl to enable the module. */
CSL_PSC_enablePowerDomain(power_domain_num);
CSL_PSC_setModuleNextState (mdctl_emif16_module_num, PSC_MODSTATE_ENABLE);
// start the process and wait. but timeout in 1000 loops.
CSL_PSC_startStateTransition(power_domain_num);
while(((CSL_PSC_isStateTransitionDone (power_domain_num)) != 0) && (loop_cnt < 1000)) {
loop_cnt++;
}
mdstat = CSL_PSC_getModuleState(mdstat_emif16_module_num);
/* report result. */
if (mdstat != PSC_MODSTATE_ENABLE) {
//platform_errno = PLATFORM_ERRNO_PSCMOD_ENABLE;
EST_Log(LOG_ERR, NULL, "ERR: mdstat=%d", mdstat);
return 1; /* Could not enable the PSC Module */
}
}
#ifdef EST_PLATFORM
//EMIF16 is clocked at CPU/6 frequency
/* Config nand FCR reg. 16 bit NAND, no HW ECC */
#if 0
hEmif16Cfg->A1CR = (0 \
| (0 << 31) /* selectStrobe */ \
| (0 << 30) /* extWait (never with NAND) */ \
| (1 << 26) /* writeSetup 10 ns */ \
| (4 << 20) /* writeStrobe 40 ns */ \
| (1 << 17) /* writeHold 10 ns */ \
| (1 << 13) /* readSetup 10 ns */ \
| (4 << 7) /* readStrobe 60 ns */ \
| (1 << 4) /* readHold 10 ns */ \
| (2 << 2) /* turnAround 40 ns */ \
| (1 << 0)); /* asyncSize 16-bit bus */
#else
hEmif16Cfg->A1CR = (0 \
| (0 << 31) /* selectStrobe */ \
| (0 << 30) /* extWait (never with NAND) */ \
| (2 << 26) /* writeSetup 10 ns */ \
| (8 << 20) /* writeStrobe 40 ns */ \
| (2 << 17) /* writeHold 10 ns */ \
| (2 << 13) /* readSetup 10 ns */ \
| (8 << 7) /* readStrobe 60 ns */ \
| (2 << 4) /* readHold 10 ns */ \
| (4 << 2) /* turnAround 40 ns (3 << 2) */ \
| (1 << 0)); /* asyncSize 16-bit bus */
#endif
hEmif16Cfg->A3CR = (0 \
| (1 << 31) /* selectStrobe */ \
| (0 << 30) /* extWait (never with NAND) */ \
| (0xf << 26) /* writeSetup 10 ns */ \
| (0x3f << 20) /* writeStrobe 40 ns */ \
| (7 << 17) /* writeHold 10 ns */ \
| (0xf << 13) /* readSetup 10 ns */ \
| (0x3f << 7) /* readStrobe 60 ns */ \
| (7 << 4) /* readHold 10 ns */ \
| (3 << 2) /* turnAround 40 ns */ \
| (1 << 0)); /* asyncSize 16-bit bus */
CSL_FINS(hEmif16Cfg->NANDFCTL, EMIF16_NANDFCTL_CE1NAND , CSL_EMIF16_NANDFCTL_CE1NAND_ENABLE); //EMIF16_NANDFCTL_CE0NAND CSL_EMIF16_NANDFCTL_CE1NAND_ENABLE
CSL_FINS(hEmif16Cfg->NANDFCTL, EMIF16_NANDFCTL_4BIT_ECC_SEL , CSL_EMIF16_NANDFCTL_4BIT_ECC_SEL_RESETVAL);
/* Set the wait polarity */
hEmif16Cfg->AWCCR = (0x80 /* max extended wait cycle */ \
| (0 << 18) /* CS2 uses WAIT0 dzc: not use wait*/ \
| (0 << 28)); /* WAIT0 polarity low */
printk("+++Debug....................................................0\n");
#else
/* Config nand FCR reg. 8 bit NAND, 4-bit HW ECC */
printk("+++Debug....................................................1\n");
hEmif16Cfg->A0CR = (0 \
| (0 << 31) /* selectStrobe */ \
| (0 << 30) /* extWait (never with NAND) */ \
| (0xf << 26) /* writeSetup 10 ns */ \
| (0x3f << 20) /* writeStrobe 40 ns */ \
| (7 << 17) /* writeHold 10 ns */ \
| (0xf << 13) /* readSetup 10 ns */ \
| (0x3f << 7) /* readStrobe 60 ns */ \
| (7 << 4) /* readHold 10 ns */ \
| (3 << 2) /* turnAround 40 ns */ \
| (0 << 0)); /* asyncSize 8-bit bus */ \
CSL_FINS(hEmif16Cfg->NANDFCTL, EMIF16_NANDFCTL_CE0NAND , CSL_EMIF16_NANDFCTL_CE0NAND_ENABLE);
CSL_FINS(hEmif16Cfg->NANDFCTL, EMIF16_NANDFCTL_4BIT_ECC_SEL , CSL_EMIF16_NANDFCTL_4BIT_ECC_SEL_RESETVAL);
/* Set the wait polarity */
hEmif16Cfg->AWCCR = (0x80 /* max extended wait cycle */ \
| (0 << 18) /* CS2 uses WAIT0 (0 << 16)*/ \
| (0 << 28)); /* WAIT0 polarity low */
#endif
/*
Wait Rise.
Set to 1 by hardware to indicate rising edge on the
corresponding WAIT pin has been detected.
The WP0-3 bits in the Async Wait Cycle Config register have
no effect on these bits.
*/
/*
Asynchronous Timeout.
Set to 1 by hardware to indicate that during an extended
asynchronous memory access cycle, the WAIT signal did not
go inactive within the number of cycles defined by the
MAX_EXT_WAIT field in Async Wait Cycle Config register.
*/
hEmif16Cfg->IRR = (1 /* clear async timeout */ \
| (1 << 2)); /* clear wait rise */ \
EST_Log(LOG_INFO, NULL, "loop_cnt=%d, PDCTL=0x%X MDCTL=0x%X PTCMD=0x%X, PTSTAT=0x%X\n", loop_cnt, \
hPscRegs->PDCTL[power_domain_num], hPscRegs->MDCTL[mdctl_emif16_module_num], \
hPscRegs->PTCMD, hPscRegs->PTSTAT);
EST_Log(LOG_INFO, NULL, "MDSTAT=0x%X NANDFCTL=0x%X AWCCR=0x%X, IRR=0x%X\n", \
hPscRegs->MDSTAT[mdstat_emif16_module_num], hEmif16Cfg->NANDFCTL, \
hEmif16Cfg->AWCCR, hEmif16Cfg->IRR);
EST_Log(LOG_INFO, NULL, "20251009 a0cr=0x%X a1cr=0x%X a2cr=0x%X, a3cr=0x%X\n", \
hEmif16Cfg->A0CR, hEmif16Cfg->A1CR, hEmif16Cfg->A2CR, hEmif16Cfg->A3CR);
return 0;
}
static void NandCmdSet(uint32_t cmd)
{
volatile uint16_t *cle_addr = (volatile uint16_t *) NAND_CMD_ADDR;
*cle_addr = cmd;
}
static uint32_t NandWaitRdy(uint32_t in_timeout)
{
uint32_t count = 0;
do {
platform_delay(1);
if ((CSL_FEXT(hEmif16Cfg->NANDFSR, EMIF16_NANDFSR_WAITSTAT) & 1) == 1) {
break;
}
count ++;
} while (count < in_timeout);
if (count >= in_timeout)
return FAIL;
else
return SUCCESS;
}
static inline void NandReadDataWord(uint16_t* puchValue)
{
/*8-bit NAND*/
volatile uint16_t *data_addr = (volatile uint16_t *) NAND_DATA_ADDR;
*puchValue = *data_addr;
}
typedef enum {
PLATFORM_DEVICE_NAND,
/**<NAND Flash*/
PLATFORM_DEVICE_NOR,
/**<NOR Flash*/
PLATFORM_DEVICE_EEPROM,
/**<NOR Flash*/
PLATFORM_DEVICE_MAX
/**<End of devices*/
} PLATFORM_DEVICE_TYPE;
typedef uint32_t PLATFORM_DEVHANDLE;
typedef struct {
int32_t manufacturer_id; /**<manufacturer ID*/
int32_t device_id; /**<Manufacturers device ID*/
PLATFORM_DEVICE_TYPE type; /**<Type of device */
int32_t width; /**<Width in bits*/
int32_t block_count; /**<Total blocks. First block starts at 0. */
int32_t page_count; /**<Page count per block*/
int32_t page_size; /**<Number of bytes in a page including spare area*/
int32_t spare_size; /**<Spare area size in bytes*/
PLATFORM_DEVHANDLE handle; /**<Handle to the block device as returned by Open. Handle is Opaque, do not interpret or modify */
int32_t bboffset; /**<Offset into spare area to check for a bad block */
uint32_t column; /**<Column for a NAND device */
uint32_t flags; /**<Flags is a copy of the flags that were used to open the device */
void *internal; /**<Do not use. Used internally by the platform library */
uint8_t *bblist; /** <Bad Block list or NULL if device does not support one */
} PLATFORM_DEVICE_info;
#define PACK_ADDR(col, page, block) \
((col & 0x000003ff) | ((page & 0x0000003f)<<10) | ((block & 0x000003ff) << 16 ))
static inline void NandAleSet(uint32_t addr)
{
/* 8-bit NAND */
#ifdef EST_PLATFORM
volatile uint16_t *ale_addr = (volatile uint16_t *) NAND_ALE_ADDR;
#else
uint8_t *ale_addr = (uint8_t *) NAND_ALE_ADDR;
#endif
*ale_addr = addr;
return;
}
#define WORDS_PER_PAGE (1024)
#define SPARE_WORDS_PER_PAGE (32)
#define PAGES_PER_BLOCK (64)
#define TOTAL_WORDS_PER_PAGE (WORDS_PER_PAGE + SPARE_WORDS_PER_PAGE)
#define BLOCKS_PER_DEVICE (1024)
#define PLATFORM_DEVID_NANDMT1G16 0x2CB1 /**< NAND Flash */
PLATFORM_DEVICE_info gDeviceNand = {0x2c, 0xb1,PLATFORM_DEVICE_NAND, 16,
BLOCKS_PER_DEVICE,
PAGES_PER_BLOCK,
WORDS_PER_PAGE*2,
SPARE_WORDS_PER_PAGE*2,
PLATFORM_DEVID_NANDMT1G16,
5, 0x200, 0, NULL, NULL};
uint32_t nandFlashBlockErase(PLATFORM_DEVICE_info *p_device, uint32_t uiBlockNumber)
{
uint32_t addr = 0, ret_val = SUCCESS;
uint16_t status;
NandCmdSet(NAND_BLOCK_ERASE); // Block erase command
platform_delay(25);
/*
* Send address of the block + page to be read
* Address cycles = 2, Block shift = 22, Page shiht = 16
*/
addr = PACK_ADDR(0x0, 0x0, uiBlockNumber);
/* Properly adjust the shifts to match to the data sheet */
NandAleSet((addr >> 10u) & 0xFF); // A9-A16 2nd Cycle; page addr & blk
platform_delay(25);
NandAleSet((addr >> 18u) & 0xFF); // A25-A26 4th Cycle; Plane addr
platform_delay(1000);
NandCmdSet(NAND_ERASE_CONFIRM); // Erase confirm
platform_delay(EMIF16_WAIT_PIN_POLL_ST_DLY);
/* Wait for erase operation to finish: 2msec */
ret_val = NandWaitRdy(EMIF16_NAND_BLOCK_ERASE_TIMEOUT);
if (ret_val != SUCCESS) {
//platform_errno = PLATFORM_ERRNO_DEV_TIMEOUT;
EST_Log(LOG_ERR, NULL, "%s: 1 err uiBlockNumber=%d", __FUNCTION__, uiBlockNumber);
return FAIL;
}
NandCmdSet(NAND_STATUS);
platform_delay(10);
NandReadDataWord(&status);
if ((status & 0x01) == 1) {
/* if SR0 bit is set to 1, there is Error - operation failed */
//platform_errno = PLATFORM_ERRNO_DEV_FAIL;
EST_Log(LOG_ERR, NULL, "%s: 2 err uiBlockNumber=%d", __FUNCTION__, uiBlockNumber);
return FAIL;
}
return SUCCESS;
}
static int evm_nand_init(void)
{
#if 1
uint16_t status;
int ret;
int i = 0;
ret = NandConfig();
EST_Log(LOG_INFO, NULL, "NandConfig 20250206 ret=%d", ret);
ret = platform_device_register(&evmc6678_uni_device);
EST_Log(LOG_INFO, NULL, "evm_nand_init 20250206: ret=%d", ret);
NandCmdSet(NAND_RST);
platform_delay(10);
if (NandWaitRdy(EMIF16_NAND_RESET_TIMEOUT) != SUCCESS) {
//platform_errno = PLATFORM_ERRNO_DEV_TIMEOUT;
EST_Log(LOG_ERR, NULL, "NandOpenDevice ... Nand wait ready failed. \n");
return FAIL;
}
NandCmdSet(NAND_STATUS);
platform_delay(10);
NandReadDataWord(&status);
if ((status & 0x01) == 1) {
/* if SR0 bit is set to 1, there is Error - operation failed */
//platform_errno = PLATFORM_ERRNO_DEV_FAIL;
EST_Log(LOG_ERR, NULL, "NandOpenDevice ... Nand status error bit was set. \n");
return FAIL;
}
//for(i = 0; i < 1024; i++){
// ret += nandFlashBlockErase(&gDeviceNand, i);
//}
//EST_Log(LOG_INFO, NULL, "nandFlashBlockErase: err block count = %d", ret);
#else
uint32_t regval, a1cr, a1cr2;
int ret;
regval = (0 \
| (0 << 31) /* selectStrobe */ \
| (0 << 30) /* extWait (never with NAND) */ \
| (1 << 26) /* writeSetup 10 ns */ \
| (4 << 20) /* writeStrobe 40 ns */ \
| (1 << 17) /* writeHold 10 ns */ \
| (1 << 13) /* readSetup 10 ns */ \
| (4 << 7) /* readStrobe 60 ns */ \
| (1 << 4) /* readHold 10 ns */ \
| (2 << 2) /* turnAround 40 ns */ \
| (1 << 0)); /* asyncSize 16-bit bus */
a1cr = __raw_readl(0x20C00000+0x14);
if (a1cr != regval) {
__raw_writel(0x20C00000+0x14, regval);
}
a1cr2 = __raw_readl(0x20C00000+0x14);
ret = platform_device_register(&evmc6678_uni_device);
EST_Log(LOG_INFO, NULL, "evm_nand_init: ret=%d A2CR=0x%X, a1cr=0x%X a1cr2=0x%X\n", ret, regval, a1cr, a1cr2);
#endif
/* Start 4-bit ECC HW calculation for write or read */
//CSL_FINS(hEmif16Cfg->NANDFCTL, EMIF16_NANDFCTL_4BIT_ECC_ST , 1);
//platform_delay (10);
return ret;
}
static void evm_nand_exit(void)
{
platform_device_unregister(&evmc6678_uni_device);
}
static struct platform_driver nand_uni_driver = {
.remove = __exit_p(nand_uni_remove),
.driver = {
.name = "uni_nand",
},
};
MODULE_ALIAS("platform:uni_nand");
static int __init nand_uni_init(void)
{
int ret;
printk("++++++++++++++++NAND DRIVER VERALL:uni-nand-ko-20251020-1543\n");
evm_nand_init();
ret = platform_driver_probe(&nand_uni_driver, nand_uni_probe);
if(ret){
EST_Log(LOG_ERR, NULL, "platform_driver_probe fail ret=%d\n", ret);
evm_nand_exit();
}
return ret;
}
module_init(nand_uni_init);
static void __exit nand_uni_exit(void)
{
platform_driver_unregister(&nand_uni_driver);
evm_nand_exit();
}
module_exit(nand_uni_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Zhichun Dai");
MODULE_DESCRIPTION("Uninav Board NAND flash driver");
如果我现在想启用软件4bit ECC,是不是我将内核的板级配置修改成NAND_ECC_SOFT就可以,驱动代码里case NAND_ECC_SOFT:
pdata->ecc_bits = 0;
break;这里pdata->ecc_bits = 0;要修改成pdata->ecc_bits = 4吗
最新发布