diff --git a/Documentation/devicetree/bindings/mtd/arasan,nand-controller.yaml b/Documentation/devicetree/bindings/mtd/arasan,nand-controller.yaml new file mode 100644 index 000000000000..db8f115a13ec --- /dev/null +++ b/Documentation/devicetree/bindings/mtd/arasan,nand-controller.yaml @@ -0,0 +1,63 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/mtd/arasan,nand-controller.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Arasan NAND Flash Controller with ONFI 3.1 support device tree bindings + +allOf: + - $ref: "nand-controller.yaml" + +maintainers: + - Naga Sureshkumar Relli + +properties: + compatible: + oneOf: + - items: + - enum: + - xlnx,zynqmp-nand-controller + - enum: + - arasan,nfc-v3p10 + + reg: + maxItems: 1 + + clocks: + items: + - description: Controller clock + - description: NAND bus clock + + clock-names: + items: + - const: controller + - const: bus + + interrupts: + maxItems: 1 + + "#address-cells": true + "#size-cells": true + +required: + - compatible + - reg + - clocks + - clock-names + - interrupts + +additionalProperties: true + +examples: + - | + nfc: nand-controller@ff100000 { + compatible = "xlnx,zynqmp-nand-controller", "arasan,nfc-v3p10"; + reg = <0x0 0xff100000 0x0 0x1000>; + clock-names = "controller", "bus"; + clocks = <&clk200>, <&clk100>; + interrupt-parent = <&gic>; + interrupts = <0 14 4>; + #address-cells = <1>; + #size-cells = <0>; + }; diff --git a/Documentation/devicetree/bindings/mtd/brcm,brcmnand.txt b/Documentation/devicetree/bindings/mtd/brcm,brcmnand.txt index 05651a654c66..44335a4f8bfb 100644 --- a/Documentation/devicetree/bindings/mtd/brcm,brcmnand.txt +++ b/Documentation/devicetree/bindings/mtd/brcm,brcmnand.txt @@ -20,6 +20,8 @@ Required properties: "brcm,brcmnand" and an appropriate version compatibility string, like "brcm,brcmnand-v7.0" Possible values: + brcm,brcmnand-v2.1 + brcm,brcmnand-v2.2 brcm,brcmnand-v4.0 brcm,brcmnand-v5.0 brcm,brcmnand-v6.0 diff --git a/Documentation/devicetree/bindings/mtd/partition.txt b/Documentation/devicetree/bindings/mtd/partition.txt index afbbd870496d..4a39698221a2 100644 --- a/Documentation/devicetree/bindings/mtd/partition.txt +++ b/Documentation/devicetree/bindings/mtd/partition.txt @@ -61,6 +61,9 @@ Optional properties: clobbered. - lock : Do not unlock the partition at initialization time (not supported on all devices) +- slc-mode: This parameter, if present, allows one to emulate SLC mode on a + partition attached to an MLC NAND thus making this partition immune to + paired-pages corruptions Examples: diff --git a/Documentation/driver-api/mtdnand.rst b/Documentation/driver-api/mtdnand.rst index 55447659b81f..0bf8d6ec3f54 100644 --- a/Documentation/driver-api/mtdnand.rst +++ b/Documentation/driver-api/mtdnand.rst @@ -276,8 +276,10 @@ unregisters the partitions in the MTD layer. #ifdef MODULE static void __exit board_cleanup (void) { - /* Release resources, unregister device */ - nand_release (mtd_to_nand(board_mtd)); + /* Unregister device */ + WARN_ON(mtd_device_unregister(board_mtd)); + /* Release resources */ + nand_cleanup(mtd_to_nand(board_mtd)); /* unmap physical address */ iounmap(baseaddr); diff --git a/MAINTAINERS b/MAINTAINERS index 50659d76976b..4e92be394802 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1284,6 +1284,13 @@ S: Supported W: http://www.aquantia.com F: drivers/net/ethernet/aquantia/atlantic/aq_ptp* +ARASAN NAND CONTROLLER DRIVER +M: Naga Sureshkumar Relli +L: linux-mtd@lists.infradead.org +S: Maintained +F: Documentation/devicetree/bindings/mtd/arasan,nand-controller.yaml +F: drivers/mtd/nand/raw/arasan-nand-controller.c + ARC FRAMEBUFFER DRIVER M: Jaya Kumar S: Maintained @@ -3741,9 +3748,8 @@ F: Documentation/devicetree/bindings/media/cdns,*.txt F: drivers/media/platform/cadence/cdns-csi2* CADENCE NAND DRIVER -M: Piotr Sroka L: linux-mtd@lists.infradead.org -S: Maintained +S: Orphan F: Documentation/devicetree/bindings/mtd/cadence-nand-controller.txt F: drivers/mtd/nand/raw/cadence-nand-controller.c @@ -10727,9 +10733,8 @@ F: Documentation/devicetree/bindings/i2c/i2c-mt7621.txt F: drivers/i2c/busses/i2c-mt7621.c MEDIATEK NAND CONTROLLER DRIVER -M: Xiaolei Li L: linux-mtd@lists.infradead.org -S: Maintained +S: Orphan F: Documentation/devicetree/bindings/mtd/mtk-nand.txt F: drivers/mtd/nand/raw/mtk_* diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c index eb0f4600efd1..a030792115bc 100644 --- a/drivers/mtd/devices/docg3.c +++ b/drivers/mtd/devices/docg3.c @@ -647,7 +647,7 @@ static int doc_ecc_bch_fix_data(struct docg3 *docg3, void *buf, u8 *hwecc) for (i = 0; i < DOC_ECC_BCH_SIZE; i++) ecc[i] = bitrev8(hwecc[i]); - numerrs = decode_bch(docg3->cascade->bch, NULL, + numerrs = bch_decode(docg3->cascade->bch, NULL, DOC_ECC_BCH_COVERED_BYTES, NULL, ecc, NULL, errorpos); BUG_ON(numerrs == -EINVAL); @@ -1984,8 +1984,8 @@ static int __init docg3_probe(struct platform_device *pdev) return ret; cascade->base = base; mutex_init(&cascade->lock); - cascade->bch = init_bch(DOC_ECC_BCH_M, DOC_ECC_BCH_T, - DOC_ECC_BCH_PRIMPOLY); + cascade->bch = bch_init(DOC_ECC_BCH_M, DOC_ECC_BCH_T, + DOC_ECC_BCH_PRIMPOLY, false); if (!cascade->bch) return ret; @@ -2021,7 +2021,7 @@ static int __init docg3_probe(struct platform_device *pdev) ret = -ENODEV; dev_info(dev, "No supported DiskOnChip found\n"); err_probe: - free_bch(cascade->bch); + bch_free(cascade->bch); for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++) if (cascade->floors[floor]) doc_release_device(cascade->floors[floor]); @@ -2045,7 +2045,7 @@ static int docg3_release(struct platform_device *pdev) if (cascade->floors[floor]) doc_release_device(cascade->floors[floor]); - free_bch(docg3->cascade->bch); + bch_free(docg3->cascade->bch); return 0; } diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c index 29d41003d6e0..b4db3ffb8500 100644 --- a/drivers/mtd/mtdcore.c +++ b/drivers/mtd/mtdcore.c @@ -617,6 +617,19 @@ int add_mtd_device(struct mtd_info *mtd) !(mtd->flags & MTD_NO_ERASE))) return -EINVAL; + /* + * MTD_SLC_ON_MLC_EMULATION can only be set on partitions, when the + * master is an MLC NAND and has a proper pairing scheme defined. + * We also reject masters that implement ->_writev() for now, because + * NAND controller drivers don't implement this hook, and adding the + * SLC -> MLC address/length conversion to this path is useless if we + * don't have a user. + */ + if (mtd->flags & MTD_SLC_ON_MLC_EMULATION && + (!mtd_is_partition(mtd) || master->type != MTD_MLCNANDFLASH || + !master->pairing || master->_writev)) + return -EINVAL; + mutex_lock(&mtd_table_mutex); i = idr_alloc(&mtd_idr, mtd, 0, 0, GFP_KERNEL); @@ -632,6 +645,14 @@ int add_mtd_device(struct mtd_info *mtd) if (mtd->bitflip_threshold == 0) mtd->bitflip_threshold = mtd->ecc_strength; + if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) { + int ngroups = mtd_pairing_groups(master); + + mtd->erasesize /= ngroups; + mtd->size = (u64)mtd_div_by_eb(mtd->size, master) * + mtd->erasesize; + } + if (is_power_of_2(mtd->erasesize)) mtd->erasesize_shift = ffs(mtd->erasesize) - 1; else @@ -1074,9 +1095,11 @@ int mtd_erase(struct mtd_info *mtd, struct erase_info *instr) { struct mtd_info *master = mtd_get_master(mtd); u64 mst_ofs = mtd_get_master_ofs(mtd, 0); + struct erase_info adjinstr; int ret; instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN; + adjinstr = *instr; if (!mtd->erasesize || !master->_erase) return -ENOTSUPP; @@ -1091,12 +1114,27 @@ int mtd_erase(struct mtd_info *mtd, struct erase_info *instr) ledtrig_mtd_activity(); - instr->addr += mst_ofs; - ret = master->_erase(master, instr); - if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN) - instr->fail_addr -= mst_ofs; + if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) { + adjinstr.addr = (loff_t)mtd_div_by_eb(instr->addr, mtd) * + master->erasesize; + adjinstr.len = ((u64)mtd_div_by_eb(instr->addr + instr->len, mtd) * + master->erasesize) - + adjinstr.addr; + } + + adjinstr.addr += mst_ofs; + + ret = master->_erase(master, &adjinstr); + + if (adjinstr.fail_addr != MTD_FAIL_ADDR_UNKNOWN) { + instr->fail_addr = adjinstr.fail_addr - mst_ofs; + if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) { + instr->fail_addr = mtd_div_by_eb(instr->fail_addr, + master); + instr->fail_addr *= mtd->erasesize; + } + } - instr->addr -= mst_ofs; return ret; } EXPORT_SYMBOL_GPL(mtd_erase); @@ -1276,6 +1314,101 @@ static int mtd_check_oob_ops(struct mtd_info *mtd, loff_t offs, return 0; } +static int mtd_read_oob_std(struct mtd_info *mtd, loff_t from, + struct mtd_oob_ops *ops) +{ + struct mtd_info *master = mtd_get_master(mtd); + int ret; + + from = mtd_get_master_ofs(mtd, from); + if (master->_read_oob) + ret = master->_read_oob(master, from, ops); + else + ret = master->_read(master, from, ops->len, &ops->retlen, + ops->datbuf); + + return ret; +} + +static int mtd_write_oob_std(struct mtd_info *mtd, loff_t to, + struct mtd_oob_ops *ops) +{ + struct mtd_info *master = mtd_get_master(mtd); + int ret; + + to = mtd_get_master_ofs(mtd, to); + if (master->_write_oob) + ret = master->_write_oob(master, to, ops); + else + ret = master->_write(master, to, ops->len, &ops->retlen, + ops->datbuf); + + return ret; +} + +static int mtd_io_emulated_slc(struct mtd_info *mtd, loff_t start, bool read, + struct mtd_oob_ops *ops) +{ + struct mtd_info *master = mtd_get_master(mtd); + int ngroups = mtd_pairing_groups(master); + int npairs = mtd_wunit_per_eb(master) / ngroups; + struct mtd_oob_ops adjops = *ops; + unsigned int wunit, oobavail; + struct mtd_pairing_info info; + int max_bitflips = 0; + u32 ebofs, pageofs; + loff_t base, pos; + + ebofs = mtd_mod_by_eb(start, mtd); + base = (loff_t)mtd_div_by_eb(start, mtd) * master->erasesize; + info.group = 0; + info.pair = mtd_div_by_ws(ebofs, mtd); + pageofs = mtd_mod_by_ws(ebofs, mtd); + oobavail = mtd_oobavail(mtd, ops); + + while (ops->retlen < ops->len || ops->oobretlen < ops->ooblen) { + int ret; + + if (info.pair >= npairs) { + info.pair = 0; + base += master->erasesize; + } + + wunit = mtd_pairing_info_to_wunit(master, &info); + pos = mtd_wunit_to_offset(mtd, base, wunit); + + adjops.len = ops->len - ops->retlen; + if (adjops.len > mtd->writesize - pageofs) + adjops.len = mtd->writesize - pageofs; + + adjops.ooblen = ops->ooblen - ops->oobretlen; + if (adjops.ooblen > oobavail - adjops.ooboffs) + adjops.ooblen = oobavail - adjops.ooboffs; + + if (read) { + ret = mtd_read_oob_std(mtd, pos + pageofs, &adjops); + if (ret > 0) + max_bitflips = max(max_bitflips, ret); + } else { + ret = mtd_write_oob_std(mtd, pos + pageofs, &adjops); + } + + if (ret < 0) + return ret; + + max_bitflips = max(max_bitflips, ret); + ops->retlen += adjops.retlen; + ops->oobretlen += adjops.oobretlen; + adjops.datbuf += adjops.retlen; + adjops.oobbuf += adjops.oobretlen; + adjops.ooboffs = 0; + pageofs = 0; + info.pair++; + } + + return max_bitflips; +} + int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops) { struct mtd_info *master = mtd_get_master(mtd); @@ -1294,12 +1427,10 @@ int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops) if (!master->_read_oob && (!master->_read || ops->oobbuf)) return -EOPNOTSUPP; - from = mtd_get_master_ofs(mtd, from); - if (master->_read_oob) - ret_code = master->_read_oob(master, from, ops); + if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) + ret_code = mtd_io_emulated_slc(mtd, from, true, ops); else - ret_code = master->_read(master, from, ops->len, &ops->retlen, - ops->datbuf); + ret_code = mtd_read_oob_std(mtd, from, ops); mtd_update_ecc_stats(mtd, master, &old_stats); @@ -1338,13 +1469,10 @@ int mtd_write_oob(struct mtd_info *mtd, loff_t to, if (!master->_write_oob && (!master->_write || ops->oobbuf)) return -EOPNOTSUPP; - to = mtd_get_master_ofs(mtd, to); + if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) + return mtd_io_emulated_slc(mtd, to, false, ops); - if (master->_write_oob) - return master->_write_oob(master, to, ops); - else - return master->_write(master, to, ops->len, &ops->retlen, - ops->datbuf); + return mtd_write_oob_std(mtd, to, ops); } EXPORT_SYMBOL_GPL(mtd_write_oob); @@ -1672,7 +1800,7 @@ EXPORT_SYMBOL_GPL(mtd_ooblayout_get_databytes); * @start: first ECC byte to set * @nbytes: number of ECC bytes to set * - * Works like mtd_ooblayout_get_bytes(), except it acts on free bytes. + * Works like mtd_ooblayout_set_bytes(), except it acts on free bytes. * * Returns zero on success, a negative error code otherwise. */ @@ -1817,6 +1945,12 @@ int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) return -EINVAL; if (!len) return 0; + + if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) { + ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize; + len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize; + } + return master->_lock(master, mtd_get_master_ofs(mtd, ofs), len); } EXPORT_SYMBOL_GPL(mtd_lock); @@ -1831,6 +1965,12 @@ int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) return -EINVAL; if (!len) return 0; + + if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) { + ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize; + len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize; + } + return master->_unlock(master, mtd_get_master_ofs(mtd, ofs), len); } EXPORT_SYMBOL_GPL(mtd_unlock); @@ -1845,6 +1985,12 @@ int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len) return -EINVAL; if (!len) return 0; + + if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) { + ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize; + len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize; + } + return master->_is_locked(master, mtd_get_master_ofs(mtd, ofs), len); } EXPORT_SYMBOL_GPL(mtd_is_locked); @@ -1857,6 +2003,10 @@ int mtd_block_isreserved(struct mtd_info *mtd, loff_t ofs) return -EINVAL; if (!master->_block_isreserved) return 0; + + if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) + ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize; + return master->_block_isreserved(master, mtd_get_master_ofs(mtd, ofs)); } EXPORT_SYMBOL_GPL(mtd_block_isreserved); @@ -1869,6 +2019,10 @@ int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs) return -EINVAL; if (!master->_block_isbad) return 0; + + if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) + ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize; + return master->_block_isbad(master, mtd_get_master_ofs(mtd, ofs)); } EXPORT_SYMBOL_GPL(mtd_block_isbad); @@ -1885,6 +2039,9 @@ int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs) if (!(mtd->flags & MTD_WRITEABLE)) return -EROFS; + if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) + ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize; + ret = master->_block_markbad(master, mtd_get_master_ofs(mtd, ofs)); if (ret) return ret; diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c index 3f6025684f58..c3575b686f79 100644 --- a/drivers/mtd/mtdpart.c +++ b/drivers/mtd/mtdpart.c @@ -35,9 +35,12 @@ static struct mtd_info *allocate_partition(struct mtd_info *parent, const struct mtd_partition *part, int partno, uint64_t cur_offset) { - int wr_alignment = (parent->flags & MTD_NO_ERASE) ? parent->writesize : - parent->erasesize; - struct mtd_info *child, *master = mtd_get_master(parent); + struct mtd_info *master = mtd_get_master(parent); + int wr_alignment = (parent->flags & MTD_NO_ERASE) ? + master->writesize : master->erasesize; + u64 parent_size = mtd_is_partition(parent) ? + parent->part.size : parent->size; + struct mtd_info *child; u32 remainder; char *name; u64 tmp; @@ -56,8 +59,9 @@ static struct mtd_info *allocate_partition(struct mtd_info *parent, /* set up the MTD object for this partition */ child->type = parent->type; child->part.flags = parent->flags & ~part->mask_flags; + child->part.flags |= part->add_flags; child->flags = child->part.flags; - child->size = part->size; + child->part.size = part->size; child->writesize = parent->writesize; child->writebufsize = parent->writebufsize; child->oobsize = parent->oobsize; @@ -98,29 +102,29 @@ static struct mtd_info *allocate_partition(struct mtd_info *parent, } if (child->part.offset == MTDPART_OFS_RETAIN) { child->part.offset = cur_offset; - if (parent->size - child->part.offset >= child->size) { - child->size = parent->size - child->part.offset - - child->size; + if (parent_size - child->part.offset >= child->part.size) { + child->part.size = parent_size - child->part.offset - + child->part.size; } else { printk(KERN_ERR "mtd partition \"%s\" doesn't have enough space: %#llx < %#llx, disabled\n", - part->name, parent->size - child->part.offset, - child->size); + part->name, parent_size - child->part.offset, + child->part.size); /* register to preserve ordering */ goto out_register; } } - if (child->size == MTDPART_SIZ_FULL) - child->size = parent->size - child->part.offset; + if (child->part.size == MTDPART_SIZ_FULL) + child->part.size = parent_size - child->part.offset; printk(KERN_NOTICE "0x%012llx-0x%012llx : \"%s\"\n", - child->part.offset, child->part.offset + child->size, + child->part.offset, child->part.offset + child->part.size, child->name); /* let's do some sanity checks */ - if (child->part.offset >= parent->size) { + if (child->part.offset >= parent_size) { /* let's register it anyway to preserve ordering */ child->part.offset = 0; - child->size = 0; + child->part.size = 0; /* Initialize ->erasesize to make add_mtd_device() happy. */ child->erasesize = parent->erasesize; @@ -128,15 +132,16 @@ static struct mtd_info *allocate_partition(struct mtd_info *parent, part->name); goto out_register; } - if (child->part.offset + child->size > parent->size) { - child->size = parent->size - child->part.offset; + if (child->part.offset + child->part.size > parent->size) { + child->part.size = parent_size - child->part.offset; printk(KERN_WARNING"mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#llx\n", - part->name, parent->name, child->size); + part->name, parent->name, child->part.size); } + if (parent->numeraseregions > 1) { /* Deal with variable erase size stuff */ int i, max = parent->numeraseregions; - u64 end = child->part.offset + child->size; + u64 end = child->part.offset + child->part.size; struct mtd_erase_region_info *regions = parent->eraseregions; /* Find the first erase regions which is part of this @@ -156,7 +161,7 @@ static struct mtd_info *allocate_partition(struct mtd_info *parent, BUG_ON(child->erasesize == 0); } else { /* Single erase size */ - child->erasesize = parent->erasesize; + child->erasesize = master->erasesize; } /* @@ -178,7 +183,7 @@ static struct mtd_info *allocate_partition(struct mtd_info *parent, part->name); } - tmp = mtd_get_master_ofs(child, 0) + child->size; + tmp = mtd_get_master_ofs(child, 0) + child->part.size; remainder = do_div(tmp, wr_alignment); if ((child->flags & MTD_WRITEABLE) && remainder) { child->flags &= ~MTD_WRITEABLE; @@ -186,6 +191,7 @@ static struct mtd_info *allocate_partition(struct mtd_info *parent, part->name); } + child->size = child->part.size; child->ecc_step_size = parent->ecc_step_size; child->ecc_strength = parent->ecc_strength; child->bitflip_threshold = parent->bitflip_threshold; @@ -193,7 +199,7 @@ static struct mtd_info *allocate_partition(struct mtd_info *parent, if (master->_block_isbad) { uint64_t offs = 0; - while (offs < child->size) { + while (offs < child->part.size) { if (mtd_block_isreserved(child, offs)) child->ecc_stats.bbtblocks++; else if (mtd_block_isbad(child, offs)) @@ -234,6 +240,8 @@ int mtd_add_partition(struct mtd_info *parent, const char *name, long long offset, long long length) { struct mtd_info *master = mtd_get_master(parent); + u64 parent_size = mtd_is_partition(parent) ? + parent->part.size : parent->size; struct mtd_partition part; struct mtd_info *child; int ret = 0; @@ -244,7 +252,7 @@ int mtd_add_partition(struct mtd_info *parent, const char *name, return -EINVAL; if (length == MTDPART_SIZ_FULL) - length = parent->size - offset; + length = parent_size - offset; if (length <= 0) return -EINVAL; @@ -419,7 +427,7 @@ int add_mtd_partitions(struct mtd_info *parent, /* Look for subpartitions */ parse_mtd_partitions(child, parts[i].types, NULL); - cur_offset = child->part.offset + child->size; + cur_offset = child->part.offset + child->part.size; } return 0; diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig index a80a46bb5b8b..113f61052269 100644 --- a/drivers/mtd/nand/raw/Kconfig +++ b/drivers/mtd/nand/raw/Kconfig @@ -213,10 +213,6 @@ config MTD_NAND_MLC_LPC32XX Please check the actual NAND chip connected and its support by the MLC NAND controller. -config MTD_NAND_CM_X270 - tristate "CM-X270 modules NAND controller" - depends on MACH_ARMCORE - config MTD_NAND_PASEMI tristate "PA Semi PWRficient NAND controller" depends on PPC_PASEMI @@ -457,6 +453,14 @@ config MTD_NAND_CADENCE Enable the driver for NAND flash on platforms using a Cadence NAND controller. +config MTD_NAND_ARASAN + tristate "Support for Arasan NAND flash controller" + depends on HAS_IOMEM && HAS_DMA + select BCH + help + Enables the driver for the Arasan NAND flash controller on + Zynq Ultrascale+ MPSoC. + comment "Misc" config MTD_SM_COMMON diff --git a/drivers/mtd/nand/raw/Makefile b/drivers/mtd/nand/raw/Makefile index 2d136b158fb7..2930f5b9015d 100644 --- a/drivers/mtd/nand/raw/Makefile +++ b/drivers/mtd/nand/raw/Makefile @@ -25,7 +25,6 @@ obj-$(CONFIG_MTD_NAND_GPIO) += gpio.o omap2_nand-objs := omap2.o obj-$(CONFIG_MTD_NAND_OMAP2) += omap2_nand.o obj-$(CONFIG_MTD_NAND_OMAP_BCH_BUILD) += omap_elm.o -obj-$(CONFIG_MTD_NAND_CM_X270) += cmx270_nand.o obj-$(CONFIG_MTD_NAND_MARVELL) += marvell_nand.o obj-$(CONFIG_MTD_NAND_TMIO) += tmio_nand.o obj-$(CONFIG_MTD_NAND_PLATFORM) += plat_nand.o @@ -58,6 +57,7 @@ obj-$(CONFIG_MTD_NAND_TEGRA) += tegra_nand.o obj-$(CONFIG_MTD_NAND_STM32_FMC2) += stm32_fmc2_nand.o obj-$(CONFIG_MTD_NAND_MESON) += meson_nand.o obj-$(CONFIG_MTD_NAND_CADENCE) += cadence-nand-controller.o +obj-$(CONFIG_MTD_NAND_ARASAN) += arasan-nand-controller.o nand-objs := nand_base.o nand_legacy.o nand_bbt.o nand_timings.o nand_ids.o nand-objs += nand_onfi.o diff --git a/drivers/mtd/nand/raw/ams-delta.c b/drivers/mtd/nand/raw/ams-delta.c index d66dab25df20..3711e7a0436c 100644 --- a/drivers/mtd/nand/raw/ams-delta.c +++ b/drivers/mtd/nand/raw/ams-delta.c @@ -387,12 +387,15 @@ static int gpio_nand_remove(struct platform_device *pdev) { struct gpio_nand *priv = platform_get_drvdata(pdev); struct mtd_info *mtd = nand_to_mtd(&priv->nand_chip); + int ret; /* Apply write protection */ gpiod_set_value(priv->gpiod_nwp, 1); /* Unregister device */ - nand_release(mtd_to_nand(mtd)); + ret = mtd_device_unregister(mtd); + WARN_ON(ret); + nand_cleanup(mtd_to_nand(mtd)); return 0; } diff --git a/drivers/mtd/nand/raw/arasan-nand-controller.c b/drivers/mtd/nand/raw/arasan-nand-controller.c new file mode 100644 index 000000000000..7141dcccba3c --- /dev/null +++ b/drivers/mtd/nand/raw/arasan-nand-controller.c @@ -0,0 +1,1297 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Arasan NAND Flash Controller Driver + * + * Copyright (C) 2014 - 2020 Xilinx, Inc. + * Author: + * Miquel Raynal + * Original work (fully rewritten): + * Punnaiah Choudary Kalluri + * Naga Sureshkumar Relli + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define PKT_REG 0x00 +#define PKT_SIZE(x) FIELD_PREP(GENMASK(10, 0), (x)) +#define PKT_STEPS(x) FIELD_PREP(GENMASK(23, 12), (x)) + +#define MEM_ADDR1_REG 0x04 + +#define MEM_ADDR2_REG 0x08 +#define ADDR2_STRENGTH(x) FIELD_PREP(GENMASK(27, 25), (x)) +#define ADDR2_CS(x) FIELD_PREP(GENMASK(31, 30), (x)) + +#define CMD_REG 0x0C +#define CMD_1(x) FIELD_PREP(GENMASK(7, 0), (x)) +#define CMD_2(x) FIELD_PREP(GENMASK(15, 8), (x)) +#define CMD_PAGE_SIZE(x) FIELD_PREP(GENMASK(25, 23), (x)) +#define CMD_DMA_ENABLE BIT(27) +#define CMD_NADDRS(x) FIELD_PREP(GENMASK(30, 28), (x)) +#define CMD_ECC_ENABLE BIT(31) + +#define PROG_REG 0x10 +#define PROG_PGRD BIT(0) +#define PROG_ERASE BIT(2) +#define PROG_STATUS BIT(3) +#define PROG_PGPROG BIT(4) +#define PROG_RDID BIT(6) +#define PROG_RDPARAM BIT(7) +#define PROG_RST BIT(8) +#define PROG_GET_FEATURE BIT(9) +#define PROG_SET_FEATURE BIT(10) + +#define INTR_STS_EN_REG 0x14 +#define INTR_SIG_EN_REG 0x18 +#define INTR_STS_REG 0x1C +#define WRITE_READY BIT(0) +#define READ_READY BIT(1) +#define XFER_COMPLETE BIT(2) +#define DMA_BOUNDARY BIT(6) +#define EVENT_MASK GENMASK(7, 0) + +#define READY_STS_REG 0x20 + +#define DMA_ADDR0_REG 0x50 +#define DMA_ADDR1_REG 0x24 + +#define FLASH_STS_REG 0x28 + +#define DATA_PORT_REG 0x30 + +#define ECC_CONF_REG 0x34 +#define ECC_CONF_COL(x) FIELD_PREP(GENMASK(15, 0), (x)) +#define ECC_CONF_LEN(x) FIELD_PREP(GENMASK(26, 16), (x)) +#define ECC_CONF_BCH_EN BIT(27) + +#define ECC_ERR_CNT_REG 0x38 +#define GET_PKT_ERR_CNT(x) FIELD_GET(GENMASK(7, 0), (x)) +#define GET_PAGE_ERR_CNT(x) FIELD_GET(GENMASK(16, 8), (x)) + +#define ECC_SP_REG 0x3C +#define ECC_SP_CMD1(x) FIELD_PREP(GENMASK(7, 0), (x)) +#define ECC_SP_CMD2(x) FIELD_PREP(GENMASK(15, 8), (x)) +#define ECC_SP_ADDRS(x) FIELD_PREP(GENMASK(30, 28), (x)) + +#define ECC_1ERR_CNT_REG 0x40 +#define ECC_2ERR_CNT_REG 0x44 + +#define DATA_INTERFACE_REG 0x6C +#define DIFACE_SDR_MODE(x) FIELD_PREP(GENMASK(2, 0), (x)) +#define DIFACE_DDR_MODE(x) FIELD_PREP(GENMASK(5, 3), (X)) +#define DIFACE_SDR 0 +#define DIFACE_NVDDR BIT(9) + +#define ANFC_MAX_CS 2 +#define ANFC_DFLT_TIMEOUT_US 1000000 +#define ANFC_MAX_CHUNK_SIZE SZ_1M +#define ANFC_MAX_PARAM_SIZE SZ_4K +#define ANFC_MAX_STEPS SZ_2K +#define ANFC_MAX_PKT_SIZE (SZ_2K - 1) +#define ANFC_MAX_ADDR_CYC 5U +#define ANFC_RSVD_ECC_BYTES 21 + +#define ANFC_XLNX_SDR_DFLT_CORE_CLK 100000000 +#define ANFC_XLNX_SDR_HS_CORE_CLK 80000000 + +/** + * struct anfc_op - Defines how to execute an operation + * @pkt_reg: Packet register + * @addr1_reg: Memory address 1 register + * @addr2_reg: Memory address 2 register + * @cmd_reg: Command register + * @prog_reg: Program register + * @steps: Number of "packets" to read/write + * @rdy_timeout_ms: Timeout for waits on Ready/Busy pin + * @len: Data transfer length + * @read: Data transfer direction from the controller point of view + */ +struct anfc_op { + u32 pkt_reg; + u32 addr1_reg; + u32 addr2_reg; + u32 cmd_reg; + u32 prog_reg; + int steps; + unsigned int rdy_timeout_ms; + unsigned int len; + bool read; + u8 *buf; +}; + +/** + * struct anand - Defines the NAND chip related information + * @node: Used to store NAND chips into a list + * @chip: NAND chip information structure + * @cs: Chip select line + * @rb: Ready-busy line + * @page_sz: Register value of the page_sz field to use + * @clk: Expected clock frequency to use + * @timings: Data interface timing mode to use + * @ecc_conf: Hardware ECC configuration value + * @strength: Register value of the ECC strength + * @raddr_cycles: Row address cycle information + * @caddr_cycles: Column address cycle information + * @ecc_bits: Exact number of ECC bits per syndrome + * @ecc_total: Total number of ECC bytes + * @errloc: Array of errors located with soft BCH + * @hw_ecc: Buffer to store syndromes computed by hardware + * @bch: BCH structure + */ +struct anand { + struct list_head node; + struct nand_chip chip; + unsigned int cs; + unsigned int rb; + unsigned int page_sz; + unsigned long clk; + u32 timings; + u32 ecc_conf; + u32 strength; + u16 raddr_cycles; + u16 caddr_cycles; + unsigned int ecc_bits; + unsigned int ecc_total; + unsigned int *errloc; + u8 *hw_ecc; + struct bch_control *bch; +}; + +/** + * struct arasan_nfc - Defines the Arasan NAND flash controller driver instance + * @dev: Pointer to the device structure + * @base: Remapped register area + * @controller_clk: Pointer to the system clock + * @bus_clk: Pointer to the flash clock + * @controller: Base controller structure + * @chips: List of all NAND chips attached to the controller + * @assigned_cs: Bitmask describing already assigned CS lines + * @cur_clk: Current clock rate + */ +struct arasan_nfc { + struct device *dev; + void __iomem *base; + struct clk *controller_clk; + struct clk *bus_clk; + struct nand_controller controller; + struct list_head chips; + unsigned long assigned_cs; + unsigned int cur_clk; +}; + +static struct anand *to_anand(struct nand_chip *nand) +{ + return container_of(nand, struct anand, chip); +} + +static struct arasan_nfc *to_anfc(struct nand_controller *ctrl) +{ + return container_of(ctrl, struct arasan_nfc, controller); +} + +static int anfc_wait_for_event(struct arasan_nfc *nfc, unsigned int event) +{ + u32 val; + int ret; + + ret = readl_relaxed_poll_timeout(nfc->base + INTR_STS_REG, val, + val & event, 0, + ANFC_DFLT_TIMEOUT_US); + if (ret) { + dev_err(nfc->dev, "Timeout waiting for event 0x%x\n", event); + return -ETIMEDOUT; + } + + writel_relaxed(event, nfc->base + INTR_STS_REG); + + return 0; +} + +static int anfc_wait_for_rb(struct arasan_nfc *nfc, struct nand_chip *chip, + unsigned int timeout_ms) +{ + struct anand *anand = to_anand(chip); + u32 val; + int ret; + + /* There is no R/B interrupt, we must poll a register */ + ret = readl_relaxed_poll_timeout(nfc->base + READY_STS_REG, val, + val & BIT(anand->rb), + 1, timeout_ms * 1000); + if (ret) { + dev_err(nfc->dev, "Timeout waiting for R/B 0x%x\n", + readl_relaxed(nfc->base + READY_STS_REG)); + return -ETIMEDOUT; + } + + return 0; +} + +static void anfc_trigger_op(struct arasan_nfc *nfc, struct anfc_op *nfc_op) +{ + writel_relaxed(nfc_op->pkt_reg, nfc->base + PKT_REG); + writel_relaxed(nfc_op->addr1_reg, nfc->base + MEM_ADDR1_REG); + writel_relaxed(nfc_op->addr2_reg, nfc->base + MEM_ADDR2_REG); + writel_relaxed(nfc_op->cmd_reg, nfc->base + CMD_REG); + writel_relaxed(nfc_op->prog_reg, nfc->base + PROG_REG); +} + +static int anfc_pkt_len_config(unsigned int len, unsigned int *steps, + unsigned int *pktsize) +{ + unsigned int nb, sz; + + for (nb = 1; nb < ANFC_MAX_STEPS; nb *= 2) { + sz = len / nb; + if (sz <= ANFC_MAX_PKT_SIZE) + break; + } + + if (sz * nb != len) + return -ENOTSUPP; + + if (steps) + *steps = nb; + + if (pktsize) + *pktsize = sz; + + return 0; +} + +/* + * When using the embedded hardware ECC engine, the controller is in charge of + * feeding the engine with, first, the ECC residue present in the data array. + * A typical read operation is: + * 1/ Assert the read operation by sending the relevant command/address cycles + * but targeting the column of the first ECC bytes in the OOB area instead of + * the main data directly. + * 2/ After having read the relevant number of ECC bytes, the controller uses + * the RNDOUT/RNDSTART commands which are set into the "ECC Spare Command + * Register" to move the pointer back at the beginning of the main data. + * 3/ It will read the content of the main area for a given size (pktsize) and + * will feed the ECC engine with this buffer again. + * 4/ The ECC engine derives the ECC bytes for the given data and compare them + * with the ones already received. It eventually trigger status flags and + * then set the "Buffer Read Ready" flag. + * 5/ The corrected data is then available for reading from the data port + * register. + * + * The hardware BCH ECC engine is known to be inconstent in BCH mode and never + * reports uncorrectable errors. Because of this bug, we have to use the + * software BCH implementation in the read path. + */ +static int anfc_read_page_hw_ecc(struct nand_chip *chip, u8 *buf, + int oob_required, int page) +{ + struct arasan_nfc *nfc = to_anfc(chip->controller); + struct mtd_info *mtd = nand_to_mtd(chip); + struct anand *anand = to_anand(chip); + unsigned int len = mtd->writesize + (oob_required ? mtd->oobsize : 0); + unsigned int max_bitflips = 0; + dma_addr_t dma_addr; + int step, ret; + struct anfc_op nfc_op = { + .pkt_reg = + PKT_SIZE(chip->ecc.size) | + PKT_STEPS(chip->ecc.steps), + .addr1_reg = + (page & 0xFF) << (8 * (anand->caddr_cycles)) | + (((page >> 8) & 0xFF) << (8 * (1 + anand->caddr_cycles))), + .addr2_reg = + ((page >> 16) & 0xFF) | + ADDR2_STRENGTH(anand->strength) | + ADDR2_CS(anand->cs), + .cmd_reg = + CMD_1(NAND_CMD_READ0) | + CMD_2(NAND_CMD_READSTART) | + CMD_PAGE_SIZE(anand->page_sz) | + CMD_DMA_ENABLE | + CMD_NADDRS(anand->caddr_cycles + + anand->raddr_cycles), + .prog_reg = PROG_PGRD, + }; + + dma_addr = dma_map_single(nfc->dev, (void *)buf, len, DMA_FROM_DEVICE); + if (dma_mapping_error(nfc->dev, dma_addr)) { + dev_err(nfc->dev, "Buffer mapping error"); + return -EIO; + } + + writel_relaxed(lower_32_bits(dma_addr), nfc->base + DMA_ADDR0_REG); + writel_relaxed(upper_32_bits(dma_addr), nfc->base + DMA_ADDR1_REG); + + anfc_trigger_op(nfc, &nfc_op); + + ret = anfc_wait_for_event(nfc, XFER_COMPLETE); + dma_unmap_single(nfc->dev, dma_addr, len, DMA_FROM_DEVICE); + if (ret) { + dev_err(nfc->dev, "Error reading page %d\n", page); + return ret; + } + + /* Store the raw OOB bytes as well */ + ret = nand_change_read_column_op(chip, mtd->writesize, chip->oob_poi, + mtd->oobsize, 0); + if (ret) + return ret; + + /* + * For each step, compute by softare the BCH syndrome over the raw data. + * Compare the theoretical amount of errors and compare with the + * hardware engine feedback. + */ + for (step = 0; step < chip->ecc.steps; step++) { + u8 *raw_buf = &buf[step * chip->ecc.size]; + unsigned int bit, byte; + int bf, i; + + /* Extract the syndrome, it is not necessarily aligned */ + memset(anand->hw_ecc, 0, chip->ecc.bytes); + nand_extract_bits(anand->hw_ecc, 0, + &chip->oob_poi[mtd->oobsize - anand->ecc_total], + anand->ecc_bits * step, anand->ecc_bits); + + bf = bch_decode(anand->bch, raw_buf, chip->ecc.size, + anand->hw_ecc, NULL, NULL, anand->errloc); + if (!bf) { + continue; + } else if (bf > 0) { + for (i = 0; i < bf; i++) { + /* Only correct the data, not the syndrome */ + if (anand->errloc[i] < (chip->ecc.size * 8)) { + bit = BIT(anand->errloc[i] & 7); + byte = anand->errloc[i] >> 3; + raw_buf[byte] ^= bit; + } + } + + mtd->ecc_stats.corrected += bf; + max_bitflips = max_t(unsigned int, max_bitflips, bf); + + continue; + } + + bf = nand_check_erased_ecc_chunk(raw_buf, chip->ecc.size, + NULL, 0, NULL, 0, + chip->ecc.strength); + if (bf > 0) { + mtd->ecc_stats.corrected += bf; + max_bitflips = max_t(unsigned int, max_bitflips, bf); + memset(raw_buf, 0xFF, chip->ecc.size); + } else if (bf < 0) { + mtd->ecc_stats.failed++; + } + } + + return 0; +} + +static int anfc_write_page_hw_ecc(struct nand_chip *chip, const u8 *buf, + int oob_required, int page) +{ + struct anand *anand = to_anand(chip); + struct arasan_nfc *nfc = to_anfc(chip->controller); + struct mtd_info *mtd = nand_to_mtd(chip); + unsigned int len = mtd->writesize + (oob_required ? mtd->oobsize : 0); + dma_addr_t dma_addr; + int ret; + struct anfc_op nfc_op = { + .pkt_reg = + PKT_SIZE(chip->ecc.size) | + PKT_STEPS(chip->ecc.steps), + .addr1_reg = + (page & 0xFF) << (8 * (anand->caddr_cycles)) | + (((page >> 8) & 0xFF) << (8 * (1 + anand->caddr_cycles))), + .addr2_reg = + ((page >> 16) & 0xFF) | + ADDR2_STRENGTH(anand->strength) | + ADDR2_CS(anand->cs), + .cmd_reg = + CMD_1(NAND_CMD_SEQIN) | + CMD_2(NAND_CMD_PAGEPROG) | + CMD_PAGE_SIZE(anand->page_sz) | + CMD_DMA_ENABLE | + CMD_NADDRS(anand->caddr_cycles + + anand->raddr_cycles) | + CMD_ECC_ENABLE, + .prog_reg = PROG_PGPROG, + }; + + writel_relaxed(anand->ecc_conf, nfc->base + ECC_CONF_REG); + writel_relaxed(ECC_SP_CMD1(NAND_CMD_RNDIN) | + ECC_SP_ADDRS(anand->caddr_cycles), + nfc->base + ECC_SP_REG); + + dma_addr = dma_map_single(nfc->dev, (void *)buf, len, DMA_TO_DEVICE); + if (dma_mapping_error(nfc->dev, dma_addr)) { + dev_err(nfc->dev, "Buffer mapping error"); + return -EIO; + } + + writel_relaxed(lower_32_bits(dma_addr), nfc->base + DMA_ADDR0_REG); + writel_relaxed(upper_32_bits(dma_addr), nfc->base + DMA_ADDR1_REG); + + anfc_trigger_op(nfc, &nfc_op); + ret = anfc_wait_for_event(nfc, XFER_COMPLETE); + dma_unmap_single(nfc->dev, dma_addr, len, DMA_TO_DEVICE); + if (ret) { + dev_err(nfc->dev, "Error writing page %d\n", page); + return ret; + } + + /* Spare data is not protected */ + if (oob_required) + ret = nand_write_oob_std(chip, page); + + return ret; +} + +/* NAND framework ->exec_op() hooks and related helpers */ +static int anfc_parse_instructions(struct nand_chip *chip, + const struct nand_subop *subop, + struct anfc_op *nfc_op) +{ + struct anand *anand = to_anand(chip); + const struct nand_op_instr *instr = NULL; + bool first_cmd = true; + unsigned int op_id; + int ret, i; + + memset(nfc_op, 0, sizeof(*nfc_op)); + nfc_op->addr2_reg = ADDR2_CS(anand->cs); + nfc_op->cmd_reg = CMD_PAGE_SIZE(anand->page_sz); + + for (op_id = 0; op_id < subop->ninstrs; op_id++) { + unsigned int offset, naddrs, pktsize; + const u8 *addrs; + u8 *buf; + + instr = &subop->instrs[op_id]; + + switch (instr->type) { + case NAND_OP_CMD_INSTR: + if (first_cmd) + nfc_op->cmd_reg |= CMD_1(instr->ctx.cmd.opcode); + else + nfc_op->cmd_reg |= CMD_2(instr->ctx.cmd.opcode); + + first_cmd = false; + break; + + case NAND_OP_ADDR_INSTR: + offset = nand_subop_get_addr_start_off(subop, op_id); + naddrs = nand_subop_get_num_addr_cyc(subop, op_id); + addrs = &instr->ctx.addr.addrs[offset]; + nfc_op->cmd_reg |= CMD_NADDRS(naddrs); + + for (i = 0; i < min(ANFC_MAX_ADDR_CYC, naddrs); i++) { + if (i < 4) + nfc_op->addr1_reg |= (u32)addrs[i] << i * 8; + else + nfc_op->addr2_reg |= addrs[i]; + } + + break; + case NAND_OP_DATA_IN_INSTR: + nfc_op->read = true; + fallthrough; + case NAND_OP_DATA_OUT_INSTR: + offset = nand_subop_get_data_start_off(subop, op_id); + buf = instr->ctx.data.buf.in; + nfc_op->buf = &buf[offset]; + nfc_op->len = nand_subop_get_data_len(subop, op_id); + ret = anfc_pkt_len_config(nfc_op->len, &nfc_op->steps, + &pktsize); + if (ret) + return ret; + + /* + * Number of DATA cycles must be aligned on 4, this + * means the controller might read/write more than + * requested. This is harmless most of the time as extra + * DATA are discarded in the write path and read pointer + * adjusted in the read path. + * + * FIXME: The core should mark operations where + * reading/writing more is allowed so the exec_op() + * implementation can take the right decision when the + * alignment constraint is not met: adjust the number of + * DATA cycles when it's allowed, reject the operation + * otherwise. + */ + nfc_op->pkt_reg |= PKT_SIZE(round_up(pktsize, 4)) | + PKT_STEPS(nfc_op->steps); + break; + case NAND_OP_WAITRDY_INSTR: + nfc_op->rdy_timeout_ms = instr->ctx.waitrdy.timeout_ms; + break; + } + } + + return 0; +} + +static int anfc_rw_pio_op(struct arasan_nfc *nfc, struct anfc_op *nfc_op) +{ + unsigned int dwords = (nfc_op->len / 4) / nfc_op->steps; + unsigned int last_len = nfc_op->len % 4; + unsigned int offset, dir; + u8 *buf = nfc_op->buf; + int ret, i; + + for (i = 0; i < nfc_op->steps; i++) { + dir = nfc_op->read ? READ_READY : WRITE_READY; + ret = anfc_wait_for_event(nfc, dir); + if (ret) { + dev_err(nfc->dev, "PIO %s ready signal not received\n", + nfc_op->read ? "Read" : "Write"); + return ret; + } + + offset = i * (dwords * 4); + if (nfc_op->read) + ioread32_rep(nfc->base + DATA_PORT_REG, &buf[offset], + dwords); + else + iowrite32_rep(nfc->base + DATA_PORT_REG, &buf[offset], + dwords); + } + + if (last_len) { + u32 remainder; + + offset = nfc_op->len - last_len; + + if (nfc_op->read) { + remainder = readl_relaxed(nfc->base + DATA_PORT_REG); + memcpy(&buf[offset], &remainder, last_len); + } else { + memcpy(&remainder, &buf[offset], last_len); + writel_relaxed(remainder, nfc->base + DATA_PORT_REG); + } + } + + return anfc_wait_for_event(nfc, XFER_COMPLETE); +} + +static int anfc_misc_data_type_exec(struct nand_chip *chip, + const struct nand_subop *subop, + u32 prog_reg) +{ + struct arasan_nfc *nfc = to_anfc(chip->controller); + struct anfc_op nfc_op = {}; + int ret; + + ret = anfc_parse_instructions(chip, subop, &nfc_op); + if (ret) + return ret; + + nfc_op.prog_reg = prog_reg; + anfc_trigger_op(nfc, &nfc_op); + + if (nfc_op.rdy_timeout_ms) { + ret = anfc_wait_for_rb(nfc, chip, nfc_op.rdy_timeout_ms); + if (ret) + return ret; + } + + return anfc_rw_pio_op(nfc, &nfc_op); +} + +static int anfc_param_read_type_exec(struct nand_chip *chip, + const struct nand_subop *subop) +{ + return anfc_misc_data_type_exec(chip, subop, PROG_RDPARAM); +} + +static int anfc_data_read_type_exec(struct nand_chip *chip, + const struct nand_subop *subop) +{ + return anfc_misc_data_type_exec(chip, subop, PROG_PGRD); +} + +static int anfc_param_write_type_exec(struct nand_chip *chip, + const struct nand_subop *subop) +{ + return anfc_misc_data_type_exec(chip, subop, PROG_SET_FEATURE); +} + +static int anfc_data_write_type_exec(struct nand_chip *chip, + const struct nand_subop *subop) +{ + return anfc_misc_data_type_exec(chip, subop, PROG_PGPROG); +} + +static int anfc_misc_zerolen_type_exec(struct nand_chip *chip, + const struct nand_subop *subop, + u32 prog_reg) +{ + struct arasan_nfc *nfc = to_anfc(chip->controller); + struct anfc_op nfc_op = {}; + int ret; + + ret = anfc_parse_instructions(chip, subop, &nfc_op); + if (ret) + return ret; + + nfc_op.prog_reg = prog_reg; + anfc_trigger_op(nfc, &nfc_op); + + ret = anfc_wait_for_event(nfc, XFER_COMPLETE); + if (ret) + return ret; + + if (nfc_op.rdy_timeout_ms) + ret = anfc_wait_for_rb(nfc, chip, nfc_op.rdy_timeout_ms); + + return ret; +} + +static int anfc_status_type_exec(struct nand_chip *chip, + const struct nand_subop *subop) +{ + struct arasan_nfc *nfc = to_anfc(chip->controller); + u32 tmp; + int ret; + + /* See anfc_check_op() for details about this constraint */ + if (subop->instrs[0].ctx.cmd.opcode != NAND_CMD_STATUS) + return -ENOTSUPP; + + ret = anfc_misc_zerolen_type_exec(chip, subop, PROG_STATUS); + if (ret) + return ret; + + tmp = readl_relaxed(nfc->base + FLASH_STS_REG); + memcpy(subop->instrs[1].ctx.data.buf.in, &tmp, 1); + + return 0; +} + +static int anfc_reset_type_exec(struct nand_chip *chip, + const struct nand_subop *subop) +{ + return anfc_misc_zerolen_type_exec(chip, subop, PROG_RST); +} + +static int anfc_erase_type_exec(struct nand_chip *chip, + const struct nand_subop *subop) +{ + return anfc_misc_zerolen_type_exec(chip, subop, PROG_ERASE); +} + +static int anfc_wait_type_exec(struct nand_chip *chip, + const struct nand_subop *subop) +{ + struct arasan_nfc *nfc = to_anfc(chip->controller); + struct anfc_op nfc_op = {}; + int ret; + + ret = anfc_parse_instructions(chip, subop, &nfc_op); + if (ret) + return ret; + + return anfc_wait_for_rb(nfc, chip, nfc_op.rdy_timeout_ms); +} + +static const struct nand_op_parser anfc_op_parser = NAND_OP_PARSER( + NAND_OP_PARSER_PATTERN( + anfc_param_read_type_exec, + NAND_OP_PARSER_PAT_CMD_ELEM(false), + NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYC), + NAND_OP_PARSER_PAT_WAITRDY_ELEM(true), + NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, ANFC_MAX_CHUNK_SIZE)), + NAND_OP_PARSER_PATTERN( + anfc_param_write_type_exec, + NAND_OP_PARSER_PAT_CMD_ELEM(false), + NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYC), + NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, ANFC_MAX_PARAM_SIZE)), + NAND_OP_PARSER_PATTERN( + anfc_data_read_type_exec, + NAND_OP_PARSER_PAT_CMD_ELEM(false), + NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYC), + NAND_OP_PARSER_PAT_CMD_ELEM(false), + NAND_OP_PARSER_PAT_WAITRDY_ELEM(true), + NAND_OP_PARSER_PAT_DATA_IN_ELEM(true, ANFC_MAX_CHUNK_SIZE)), + NAND_OP_PARSER_PATTERN( + anfc_data_write_type_exec, + NAND_OP_PARSER_PAT_CMD_ELEM(false), + NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYC), + NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, ANFC_MAX_CHUNK_SIZE), + NAND_OP_PARSER_PAT_CMD_ELEM(false)), + NAND_OP_PARSER_PATTERN( + anfc_reset_type_exec, + NAND_OP_PARSER_PAT_CMD_ELEM(false), + NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)), + NAND_OP_PARSER_PATTERN( + anfc_erase_type_exec, + NAND_OP_PARSER_PAT_CMD_ELEM(false), + NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYC), + NAND_OP_PARSER_PAT_CMD_ELEM(false), + NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)), + NAND_OP_PARSER_PATTERN( + anfc_status_type_exec, + NAND_OP_PARSER_PAT_CMD_ELEM(false), + NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, ANFC_MAX_CHUNK_SIZE)), + NAND_OP_PARSER_PATTERN( + anfc_wait_type_exec, + NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)), + ); + +static int anfc_select_target(struct nand_chip *chip, int target) +{ + struct anand *anand = to_anand(chip); + struct arasan_nfc *nfc = to_anfc(chip->controller); + int ret; + + /* Update the controller timings and the potential ECC configuration */ + writel_relaxed(anand->timings, nfc->base + DATA_INTERFACE_REG); + + /* Update clock frequency */ + if (nfc->cur_clk != anand->clk) { + clk_disable_unprepare(nfc->controller_clk); + ret = clk_set_rate(nfc->controller_clk, anand->clk); + if (ret) { + dev_err(nfc->dev, "Failed to change clock rate\n"); + return ret; + } + + ret = clk_prepare_enable(nfc->controller_clk); + if (ret) { + dev_err(nfc->dev, + "Failed to re-enable the controller clock\n"); + return ret; + } + + nfc->cur_clk = anand->clk; + } + + return 0; +} + +static int anfc_check_op(struct nand_chip *chip, + const struct nand_operation *op) +{ + const struct nand_op_instr *instr; + int op_id; + + /* + * The controller abstracts all the NAND operations and do not support + * data only operations. + * + * TODO: The nand_op_parser framework should be extended to + * support custom checks on DATA instructions. + */ + for (op_id = 0; op_id < op->ninstrs; op_id++) { + instr = &op->instrs[op_id]; + + switch (instr->type) { + case NAND_OP_ADDR_INSTR: + if (instr->ctx.addr.naddrs > ANFC_MAX_ADDR_CYC) + return -ENOTSUPP; + + break; + case NAND_OP_DATA_IN_INSTR: + case NAND_OP_DATA_OUT_INSTR: + if (instr->ctx.data.len > ANFC_MAX_CHUNK_SIZE) + return -ENOTSUPP; + + if (anfc_pkt_len_config(instr->ctx.data.len, 0, 0)) + return -ENOTSUPP; + + break; + default: + break; + } + } + + /* + * The controller does not allow to proceed with a CMD+DATA_IN cycle + * manually on the bus by reading data from the data register. Instead, + * the controller abstract a status read operation with its own status + * register after ordering a read status operation. Hence, we cannot + * support any CMD+DATA_IN operation other than a READ STATUS. + * + * TODO: The nand_op_parser() framework should be extended to describe + * fixed patterns instead of open-coding this check here. + */ + if (op->ninstrs == 2 && + op->instrs[0].type == NAND_OP_CMD_INSTR && + op->instrs[0].ctx.cmd.opcode != NAND_CMD_STATUS && + op->instrs[1].type == NAND_OP_DATA_IN_INSTR) + return -ENOTSUPP; + + return nand_op_parser_exec_op(chip, &anfc_op_parser, op, true); +} + +static int anfc_exec_op(struct nand_chip *chip, + const struct nand_operation *op, + bool check_only) +{ + int ret; + + if (check_only) + return anfc_check_op(chip, op); + + ret = anfc_select_target(chip, op->cs); + if (ret) + return ret; + + return nand_op_parser_exec_op(chip, &anfc_op_parser, op, check_only); +} + +static int anfc_setup_data_interface(struct nand_chip *chip, int target, + const struct nand_data_interface *conf) +{ + struct anand *anand = to_anand(chip); + struct arasan_nfc *nfc = to_anfc(chip->controller); + struct device_node *np = nfc->dev->of_node; + + if (target < 0) + return 0; + + anand->timings = DIFACE_SDR | DIFACE_SDR_MODE(conf->timings.mode); + anand->clk = ANFC_XLNX_SDR_DFLT_CORE_CLK; + + /* + * Due to a hardware bug in the ZynqMP SoC, SDR timing modes 0-1 work + * with f > 90MHz (default clock is 100MHz) but signals are unstable + * with higher modes. Hence we decrease a little bit the clock rate to + * 80MHz when using modes 2-5 with this SoC. + */ + if (of_device_is_compatible(np, "xlnx,zynqmp-nand-controller") && + conf->timings.mode >= 2) + anand->clk = ANFC_XLNX_SDR_HS_CORE_CLK; + + return 0; +} + +static int anfc_calc_hw_ecc_bytes(int step_size, int strength) +{ + unsigned int bch_gf_mag, ecc_bits; + + switch (step_size) { + case SZ_512: + bch_gf_mag = 13; + break; + case SZ_1K: + bch_gf_mag = 14; + break; + default: + return -EINVAL; + } + + ecc_bits = bch_gf_mag * strength; + + return DIV_ROUND_UP(ecc_bits, 8); +} + +static const int anfc_hw_ecc_512_strengths[] = {4, 8, 12}; + +static const int anfc_hw_ecc_1024_strengths[] = {24}; + +static const struct nand_ecc_step_info anfc_hw_ecc_step_infos[] = { + { + .stepsize = SZ_512, + .strengths = anfc_hw_ecc_512_strengths, + .nstrengths = ARRAY_SIZE(anfc_hw_ecc_512_strengths), + }, + { + .stepsize = SZ_1K, + .strengths = anfc_hw_ecc_1024_strengths, + .nstrengths = ARRAY_SIZE(anfc_hw_ecc_1024_strengths), + }, +}; + +static const struct nand_ecc_caps anfc_hw_ecc_caps = { + .stepinfos = anfc_hw_ecc_step_infos, + .nstepinfos = ARRAY_SIZE(anfc_hw_ecc_step_infos), + .calc_ecc_bytes = anfc_calc_hw_ecc_bytes, +}; + +static int anfc_init_hw_ecc_controller(struct arasan_nfc *nfc, + struct nand_chip *chip) +{ + struct anand *anand = to_anand(chip); + struct mtd_info *mtd = nand_to_mtd(chip); + struct nand_ecc_ctrl *ecc = &chip->ecc; + unsigned int bch_prim_poly = 0, bch_gf_mag = 0, ecc_offset; + int ret; + + switch (mtd->writesize) { + case SZ_512: + case SZ_2K: + case SZ_4K: + case SZ_8K: + case SZ_16K: + break; + default: + dev_err(nfc->dev, "Unsupported page size %d\n", mtd->writesize); + return -EINVAL; + } + + ret = nand_ecc_choose_conf(chip, &anfc_hw_ecc_caps, mtd->oobsize); + if (ret) + return ret; + + switch (ecc->strength) { + case 12: + anand->strength = 0x1; + break; + case 8: + anand->strength = 0x2; + break; + case 4: + anand->strength = 0x3; + break; + case 24: + anand->strength = 0x4; + break; + default: + dev_err(nfc->dev, "Unsupported strength %d\n", ecc->strength); + return -EINVAL; + } + + switch (ecc->size) { + case SZ_512: + bch_gf_mag = 13; + bch_prim_poly = 0x201b; + break; + case SZ_1K: + bch_gf_mag = 14; + bch_prim_poly = 0x4443; + break; + default: + dev_err(nfc->dev, "Unsupported step size %d\n", ecc->strength); + return -EINVAL; + } + + mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops); + + ecc->steps = mtd->writesize / ecc->size; + ecc->algo = NAND_ECC_BCH; + anand->ecc_bits = bch_gf_mag * ecc->strength; + ecc->bytes = DIV_ROUND_UP(anand->ecc_bits, 8); + anand->ecc_total = DIV_ROUND_UP(anand->ecc_bits * ecc->steps, 8); + ecc_offset = mtd->writesize + mtd->oobsize - anand->ecc_total; + anand->ecc_conf = ECC_CONF_COL(ecc_offset) | + ECC_CONF_LEN(anand->ecc_total) | + ECC_CONF_BCH_EN; + + anand->errloc = devm_kmalloc_array(nfc->dev, ecc->strength, + sizeof(*anand->errloc), GFP_KERNEL); + if (!anand->errloc) + return -ENOMEM; + + anand->hw_ecc = devm_kmalloc(nfc->dev, ecc->bytes, GFP_KERNEL); + if (!anand->hw_ecc) + return -ENOMEM; + + /* Enforce bit swapping to fit the hardware */ + anand->bch = bch_init(bch_gf_mag, ecc->strength, bch_prim_poly, true); + if (!anand->bch) + return -EINVAL; + + ecc->read_page = anfc_read_page_hw_ecc; + ecc->write_page = anfc_write_page_hw_ecc; + + return 0; +} + +static int anfc_attach_chip(struct nand_chip *chip) +{ + struct anand *anand = to_anand(chip); + struct arasan_nfc *nfc = to_anfc(chip->controller); + struct mtd_info *mtd = nand_to_mtd(chip); + int ret = 0; + + if (mtd->writesize <= SZ_512) + anand->caddr_cycles = 1; + else + anand->caddr_cycles = 2; + + if (chip->options & NAND_ROW_ADDR_3) + anand->raddr_cycles = 3; + else + anand->raddr_cycles = 2; + + switch (mtd->writesize) { + case 512: + anand->page_sz = 0; + break; + case 1024: + anand->page_sz = 5; + break; + case 2048: + anand->page_sz = 1; + break; + case 4096: + anand->page_sz = 2; + break; + case 8192: + anand->page_sz = 3; + break; + case 16384: + anand->page_sz = 4; + break; + default: + return -EINVAL; + } + + /* These hooks are valid for all ECC providers */ + chip->ecc.read_page_raw = nand_monolithic_read_page_raw; + chip->ecc.write_page_raw = nand_monolithic_write_page_raw; + + switch (chip->ecc.mode) { + case NAND_ECC_NONE: + case NAND_ECC_SOFT: + case NAND_ECC_ON_DIE: + break; + case NAND_ECC_HW: + ret = anfc_init_hw_ecc_controller(nfc, chip); + break; + default: + dev_err(nfc->dev, "Unsupported ECC mode: %d\n", + chip->ecc.mode); + return -EINVAL; + } + + return ret; +} + +static void anfc_detach_chip(struct nand_chip *chip) +{ + struct anand *anand = to_anand(chip); + + if (anand->bch) + bch_free(anand->bch); +} + +static const struct nand_controller_ops anfc_ops = { + .exec_op = anfc_exec_op, + .setup_data_interface = anfc_setup_data_interface, + .attach_chip = anfc_attach_chip, + .detach_chip = anfc_detach_chip, +}; + +static int anfc_chip_init(struct arasan_nfc *nfc, struct device_node *np) +{ + struct anand *anand; + struct nand_chip *chip; + struct mtd_info *mtd; + int cs, rb, ret; + + anand = devm_kzalloc(nfc->dev, sizeof(*anand), GFP_KERNEL); + if (!anand) + return -ENOMEM; + + /* We do not support multiple CS per chip yet */ + if (of_property_count_elems_of_size(np, "reg", sizeof(u32)) != 1) { + dev_err(nfc->dev, "Invalid reg property\n"); + return -EINVAL; + } + + ret = of_property_read_u32(np, "reg", &cs); + if (ret) + return ret; + + ret = of_property_read_u32(np, "nand-rb", &rb); + if (ret) + return ret; + + if (cs >= ANFC_MAX_CS || rb >= ANFC_MAX_CS) { + dev_err(nfc->dev, "Wrong CS %d or RB %d\n", cs, rb); + return -EINVAL; + } + + if (test_and_set_bit(cs, &nfc->assigned_cs)) { + dev_err(nfc->dev, "Already assigned CS %d\n", cs); + return -EINVAL; + } + + anand->cs = cs; + anand->rb = rb; + + chip = &anand->chip; + mtd = nand_to_mtd(chip); + mtd->dev.parent = nfc->dev; + chip->controller = &nfc->controller; + chip->options = NAND_BUSWIDTH_AUTO | NAND_NO_SUBPAGE_WRITE | + NAND_USES_DMA; + + nand_set_flash_node(chip, np); + if (!mtd->name) { + dev_err(nfc->dev, "NAND label property is mandatory\n"); + return -EINVAL; + } + + ret = nand_scan(chip, 1); + if (ret) { + dev_err(nfc->dev, "Scan operation failed\n"); + return ret; + } + + ret = mtd_device_register(mtd, NULL, 0); + if (ret) { + nand_cleanup(chip); + return ret; + } + + list_add_tail(&anand->node, &nfc->chips); + + return 0; +} + +static void anfc_chips_cleanup(struct arasan_nfc *nfc) +{ + struct anand *anand, *tmp; + struct nand_chip *chip; + int ret; + + list_for_each_entry_safe(anand, tmp, &nfc->chips, node) { + chip = &anand->chip; + ret = mtd_device_unregister(nand_to_mtd(chip)); + WARN_ON(ret); + nand_cleanup(chip); + list_del(&anand->node); + } +} + +static int anfc_chips_init(struct arasan_nfc *nfc) +{ + struct device_node *np = nfc->dev->of_node, *nand_np; + int nchips = of_get_child_count(np); + int ret; + + if (!nchips || nchips > ANFC_MAX_CS) { + dev_err(nfc->dev, "Incorrect number of NAND chips (%d)\n", + nchips); + return -EINVAL; + } + + for_each_child_of_node(np, nand_np) { + ret = anfc_chip_init(nfc, nand_np); + if (ret) { + of_node_put(nand_np); + anfc_chips_cleanup(nfc); + break; + } + } + + return ret; +} + +static void anfc_reset(struct arasan_nfc *nfc) +{ + /* Disable interrupt signals */ + writel_relaxed(0, nfc->base + INTR_SIG_EN_REG); + + /* Enable interrupt status */ + writel_relaxed(EVENT_MASK, nfc->base + INTR_STS_EN_REG); +} + +static int anfc_probe(struct platform_device *pdev) +{ + struct arasan_nfc *nfc; + int ret; + + nfc = devm_kzalloc(&pdev->dev, sizeof(*nfc), GFP_KERNEL); + if (!nfc) + return -ENOMEM; + + nfc->dev = &pdev->dev; + nand_controller_init(&nfc->controller); + nfc->controller.ops = &anfc_ops; + INIT_LIST_HEAD(&nfc->chips); + + nfc->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(nfc->base)) + return PTR_ERR(nfc->base); + + anfc_reset(nfc); + + nfc->controller_clk = devm_clk_get(&pdev->dev, "controller"); + if (IS_ERR(nfc->controller_clk)) + return PTR_ERR(nfc->controller_clk); + + nfc->bus_clk = devm_clk_get(&pdev->dev, "bus"); + if (IS_ERR(nfc->bus_clk)) + return PTR_ERR(nfc->bus_clk); + + ret = clk_prepare_enable(nfc->controller_clk); + if (ret) + return ret; + + ret = clk_prepare_enable(nfc->bus_clk); + if (ret) + goto disable_controller_clk; + + ret = anfc_chips_init(nfc); + if (ret) + goto disable_bus_clk; + + platform_set_drvdata(pdev, nfc); + + return 0; + +disable_bus_clk: + clk_disable_unprepare(nfc->bus_clk); + +disable_controller_clk: + clk_disable_unprepare(nfc->controller_clk); + + return ret; +} + +static int anfc_remove(struct platform_device *pdev) +{ + struct arasan_nfc *nfc = platform_get_drvdata(pdev); + + anfc_chips_cleanup(nfc); + + clk_disable_unprepare(nfc->bus_clk); + clk_disable_unprepare(nfc->controller_clk); + + return 0; +} + +static const struct of_device_id anfc_ids[] = { + { + .compatible = "xlnx,zynqmp-nand-controller", + }, + { + .compatible = "arasan,nfc-v3p10", + }, + {} +}; +MODULE_DEVICE_TABLE(of, anfc_ids); + +static struct platform_driver anfc_driver = { + .driver = { + .name = "arasan-nand-controller", + .of_match_table = anfc_ids, + }, + .probe = anfc_probe, + .remove = anfc_remove, +}; +module_platform_driver(anfc_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Punnaiah Choudary Kalluri "); +MODULE_AUTHOR("Naga Sureshkumar Relli "); +MODULE_AUTHOR("Miquel Raynal "); +MODULE_DESCRIPTION("Arasan NAND Flash Controller Driver"); diff --git a/drivers/mtd/nand/raw/atmel/nand-controller.c b/drivers/mtd/nand/raw/atmel/nand-controller.c index 3ba17a98df4d..46a3724a788e 100644 --- a/drivers/mtd/nand/raw/atmel/nand-controller.c +++ b/drivers/mtd/nand/raw/atmel/nand-controller.c @@ -1494,7 +1494,7 @@ static void atmel_nand_init(struct atmel_nand_controller *nc, * suitable for DMA. */ if (nc->dmac) - chip->options |= NAND_USE_BOUNCE_BUFFER; + chip->options |= NAND_USES_DMA; /* Default to HW ECC if pmecc is available. */ if (nc->pmecc) diff --git a/drivers/mtd/nand/raw/au1550nd.c b/drivers/mtd/nand/raw/au1550nd.c index 75eb3e97fae3..d865200ccd08 100644 --- a/drivers/mtd/nand/raw/au1550nd.c +++ b/drivers/mtd/nand/raw/au1550nd.c @@ -16,63 +16,16 @@ struct au1550nd_ctx { + struct nand_controller controller; struct nand_chip chip; int cs; void __iomem *base; - void (*write_byte)(struct nand_chip *, u_char); }; -/** - * au_read_byte - read one byte from the chip - * @this: NAND chip object - * - * read function for 8bit buswidth - */ -static u_char au_read_byte(struct nand_chip *this) +static struct au1550nd_ctx *chip_to_au_ctx(struct nand_chip *this) { - u_char ret = readb(this->legacy.IO_ADDR_R); - wmb(); /* drain writebuffer */ - return ret; -} - -/** - * au_write_byte - write one byte to the chip - * @this: NAND chip object - * @byte: pointer to data byte to write - * - * write function for 8it buswidth - */ -static void au_write_byte(struct nand_chip *this, u_char byte) -{ - writeb(byte, this->legacy.IO_ADDR_W); - wmb(); /* drain writebuffer */ -} - -/** - * au_read_byte16 - read one byte endianness aware from the chip - * @this: NAND chip object - * - * read function for 16bit buswidth with endianness conversion - */ -static u_char au_read_byte16(struct nand_chip *this) -{ - u_char ret = (u_char) cpu_to_le16(readw(this->legacy.IO_ADDR_R)); - wmb(); /* drain writebuffer */ - return ret; -} - -/** - * au_write_byte16 - write one byte endianness aware to the chip - * @this: NAND chip object - * @byte: pointer to data byte to write - * - * write function for 16bit buswidth with endianness conversion - */ -static void au_write_byte16(struct nand_chip *this, u_char byte) -{ - writew(le16_to_cpu((u16) byte), this->legacy.IO_ADDR_W); - wmb(); /* drain writebuffer */ + return container_of(this, struct au1550nd_ctx, chip); } /** @@ -83,12 +36,15 @@ static void au_write_byte16(struct nand_chip *this, u_char byte) * * write function for 8bit buswidth */ -static void au_write_buf(struct nand_chip *this, const u_char *buf, int len) +static void au_write_buf(struct nand_chip *this, const void *buf, + unsigned int len) { + struct au1550nd_ctx *ctx = chip_to_au_ctx(this); + const u8 *p = buf; int i; for (i = 0; i < len; i++) { - writeb(buf[i], this->legacy.IO_ADDR_W); + writeb(p[i], ctx->base + MEM_STNAND_DATA); wmb(); /* drain writebuffer */ } } @@ -101,12 +57,15 @@ static void au_write_buf(struct nand_chip *this, const u_char *buf, int len) * * read function for 8bit buswidth */ -static void au_read_buf(struct nand_chip *this, u_char *buf, int len) +static void au_read_buf(struct nand_chip *this, void *buf, + unsigned int len) { + struct au1550nd_ctx *ctx = chip_to_au_ctx(this); + u8 *p = buf; int i; for (i = 0; i < len; i++) { - buf[i] = readb(this->legacy.IO_ADDR_R); + p[i] = readb(ctx->base + MEM_STNAND_DATA); wmb(); /* drain writebuffer */ } } @@ -119,17 +78,18 @@ static void au_read_buf(struct nand_chip *this, u_char *buf, int len) * * write function for 16bit buswidth */ -static void au_write_buf16(struct nand_chip *this, const u_char *buf, int len) +static void au_write_buf16(struct nand_chip *this, const void *buf, + unsigned int len) { - int i; - u16 *p = (u16 *) buf; - len >>= 1; + struct au1550nd_ctx *ctx = chip_to_au_ctx(this); + const u16 *p = buf; + unsigned int i; + len >>= 1; for (i = 0; i < len; i++) { - writew(p[i], this->legacy.IO_ADDR_W); + writew(p[i], ctx->base + MEM_STNAND_DATA); wmb(); /* drain writebuffer */ } - } /** @@ -140,218 +100,19 @@ static void au_write_buf16(struct nand_chip *this, const u_char *buf, int len) * * read function for 16bit buswidth */ -static void au_read_buf16(struct nand_chip *this, u_char *buf, int len) +static void au_read_buf16(struct nand_chip *this, void *buf, unsigned int len) { - int i; - u16 *p = (u16 *) buf; - len >>= 1; + struct au1550nd_ctx *ctx = chip_to_au_ctx(this); + unsigned int i; + u16 *p = buf; + len >>= 1; for (i = 0; i < len; i++) { - p[i] = readw(this->legacy.IO_ADDR_R); + p[i] = readw(ctx->base + MEM_STNAND_DATA); wmb(); /* drain writebuffer */ } } -/* Select the chip by setting nCE to low */ -#define NAND_CTL_SETNCE 1 -/* Deselect the chip by setting nCE to high */ -#define NAND_CTL_CLRNCE 2 -/* Select the command latch by setting CLE to high */ -#define NAND_CTL_SETCLE 3 -/* Deselect the command latch by setting CLE to low */ -#define NAND_CTL_CLRCLE 4 -/* Select the address latch by setting ALE to high */ -#define NAND_CTL_SETALE 5 -/* Deselect the address latch by setting ALE to low */ -#define NAND_CTL_CLRALE 6 - -static void au1550_hwcontrol(struct mtd_info *mtd, int cmd) -{ - struct nand_chip *this = mtd_to_nand(mtd); - struct au1550nd_ctx *ctx = container_of(this, struct au1550nd_ctx, - chip); - - switch (cmd) { - - case NAND_CTL_SETCLE: - this->legacy.IO_ADDR_W = ctx->base + MEM_STNAND_CMD; - break; - - case NAND_CTL_CLRCLE: - this->legacy.IO_ADDR_W = ctx->base + MEM_STNAND_DATA; - break; - - case NAND_CTL_SETALE: - this->legacy.IO_ADDR_W = ctx->base + MEM_STNAND_ADDR; - break; - - case NAND_CTL_CLRALE: - this->legacy.IO_ADDR_W = ctx->base + MEM_STNAND_DATA; - /* FIXME: Nobody knows why this is necessary, - * but it works only that way */ - udelay(1); - break; - - case NAND_CTL_SETNCE: - /* assert (force assert) chip enable */ - alchemy_wrsmem((1 << (4 + ctx->cs)), AU1000_MEM_STNDCTL); - break; - - case NAND_CTL_CLRNCE: - /* deassert chip enable */ - alchemy_wrsmem(0, AU1000_MEM_STNDCTL); - break; - } - - this->legacy.IO_ADDR_R = this->legacy.IO_ADDR_W; - - wmb(); /* Drain the writebuffer */ -} - -int au1550_device_ready(struct nand_chip *this) -{ - return (alchemy_rdsmem(AU1000_MEM_STSTAT) & 0x1) ? 1 : 0; -} - -/** - * au1550_select_chip - control -CE line - * Forbid driving -CE manually permitting the NAND controller to do this. - * Keeping -CE asserted during the whole sector reads interferes with the - * NOR flash and PCMCIA drivers as it causes contention on the static bus. - * We only have to hold -CE low for the NAND read commands since the flash - * chip needs it to be asserted during chip not ready time but the NAND - * controller keeps it released. - * - * @this: NAND chip object - * @chip: chipnumber to select, -1 for deselect - */ -static void au1550_select_chip(struct nand_chip *this, int chip) -{ -} - -/** - * au1550_command - Send command to NAND device - * @this: NAND chip object - * @command: the command to be sent - * @column: the column address for this command, -1 if none - * @page_addr: the page address for this command, -1 if none - */ -static void au1550_command(struct nand_chip *this, unsigned command, - int column, int page_addr) -{ - struct mtd_info *mtd = nand_to_mtd(this); - struct au1550nd_ctx *ctx = container_of(this, struct au1550nd_ctx, - chip); - int ce_override = 0, i; - unsigned long flags = 0; - - /* Begin command latch cycle */ - au1550_hwcontrol(mtd, NAND_CTL_SETCLE); - /* - * Write out the command to the device. - */ - if (command == NAND_CMD_SEQIN) { - int readcmd; - - if (column >= mtd->writesize) { - /* OOB area */ - column -= mtd->writesize; - readcmd = NAND_CMD_READOOB; - } else if (column < 256) { - /* First 256 bytes --> READ0 */ - readcmd = NAND_CMD_READ0; - } else { - column -= 256; - readcmd = NAND_CMD_READ1; - } - ctx->write_byte(this, readcmd); - } - ctx->write_byte(this, command); - - /* Set ALE and clear CLE to start address cycle */ - au1550_hwcontrol(mtd, NAND_CTL_CLRCLE); - - if (column != -1 || page_addr != -1) { - au1550_hwcontrol(mtd, NAND_CTL_SETALE); - - /* Serially input address */ - if (column != -1) { - /* Adjust columns for 16 bit buswidth */ - if (this->options & NAND_BUSWIDTH_16 && - !nand_opcode_8bits(command)) - column >>= 1; - ctx->write_byte(this, column); - } - if (page_addr != -1) { - ctx->write_byte(this, (u8)(page_addr & 0xff)); - - if (command == NAND_CMD_READ0 || - command == NAND_CMD_READ1 || - command == NAND_CMD_READOOB) { - /* - * NAND controller will release -CE after - * the last address byte is written, so we'll - * have to forcibly assert it. No interrupts - * are allowed while we do this as we don't - * want the NOR flash or PCMCIA drivers to - * steal our precious bytes of data... - */ - ce_override = 1; - local_irq_save(flags); - au1550_hwcontrol(mtd, NAND_CTL_SETNCE); - } - - ctx->write_byte(this, (u8)(page_addr >> 8)); - - if (this->options & NAND_ROW_ADDR_3) - ctx->write_byte(this, - ((page_addr >> 16) & 0x0f)); - } - /* Latch in address */ - au1550_hwcontrol(mtd, NAND_CTL_CLRALE); - } - - /* - * Program and erase have their own busy handlers. - * Status and sequential in need no delay. - */ - switch (command) { - - case NAND_CMD_PAGEPROG: - case NAND_CMD_ERASE1: - case NAND_CMD_ERASE2: - case NAND_CMD_SEQIN: - case NAND_CMD_STATUS: - return; - - case NAND_CMD_RESET: - break; - - case NAND_CMD_READ0: - case NAND_CMD_READ1: - case NAND_CMD_READOOB: - /* Check if we're really driving -CE low (just in case) */ - if (unlikely(!ce_override)) - break; - - /* Apply a short delay always to ensure that we do wait tWB. */ - ndelay(100); - /* Wait for a chip to become ready... */ - for (i = this->legacy.chip_delay; - !this->legacy.dev_ready(this) && i > 0; --i) - udelay(1); - - /* Release -CE and re-enable interrupts. */ - au1550_hwcontrol(mtd, NAND_CTL_CLRNCE); - local_irq_restore(flags); - return; - } - /* Apply this short delay always to ensure that we do wait tWB. */ - ndelay(100); - - while(!this->legacy.dev_ready(this)); -} - static int find_nand_cs(unsigned long nand_base) { void __iomem *base = @@ -373,6 +134,112 @@ static int find_nand_cs(unsigned long nand_base) return -ENODEV; } +static int au1550nd_waitrdy(struct nand_chip *this, unsigned int timeout_ms) +{ + unsigned long timeout_jiffies = jiffies; + + timeout_jiffies += msecs_to_jiffies(timeout_ms) + 1; + do { + if (alchemy_rdsmem(AU1000_MEM_STSTAT) & 0x1) + return 0; + + usleep_range(10, 100); + } while (time_before(jiffies, timeout_jiffies)); + + return -ETIMEDOUT; +} + +static int au1550nd_exec_instr(struct nand_chip *this, + const struct nand_op_instr *instr) +{ + struct au1550nd_ctx *ctx = chip_to_au_ctx(this); + unsigned int i; + int ret = 0; + + switch (instr->type) { + case NAND_OP_CMD_INSTR: + writeb(instr->ctx.cmd.opcode, + ctx->base + MEM_STNAND_CMD); + /* Drain the writebuffer */ + wmb(); + break; + + case NAND_OP_ADDR_INSTR: + for (i = 0; i < instr->ctx.addr.naddrs; i++) { + writeb(instr->ctx.addr.addrs[i], + ctx->base + MEM_STNAND_ADDR); + /* Drain the writebuffer */ + wmb(); + } + break; + + case NAND_OP_DATA_IN_INSTR: + if ((this->options & NAND_BUSWIDTH_16) && + !instr->ctx.data.force_8bit) + au_read_buf16(this, instr->ctx.data.buf.in, + instr->ctx.data.len); + else + au_read_buf(this, instr->ctx.data.buf.in, + instr->ctx.data.len); + break; + + case NAND_OP_DATA_OUT_INSTR: + if ((this->options & NAND_BUSWIDTH_16) && + !instr->ctx.data.force_8bit) + au_write_buf16(this, instr->ctx.data.buf.out, + instr->ctx.data.len); + else + au_write_buf(this, instr->ctx.data.buf.out, + instr->ctx.data.len); + break; + + case NAND_OP_WAITRDY_INSTR: + ret = au1550nd_waitrdy(this, instr->ctx.waitrdy.timeout_ms); + break; + default: + return -EINVAL; + } + + if (instr->delay_ns) + ndelay(instr->delay_ns); + + return ret; +} + +static int au1550nd_exec_op(struct nand_chip *this, + const struct nand_operation *op, + bool check_only) +{ + struct au1550nd_ctx *ctx = chip_to_au_ctx(this); + unsigned int i; + int ret; + + if (check_only) + return 0; + + /* assert (force assert) chip enable */ + alchemy_wrsmem((1 << (4 + ctx->cs)), AU1000_MEM_STNDCTL); + /* Drain the writebuffer */ + wmb(); + + for (i = 0; i < op->ninstrs; i++) { + ret = au1550nd_exec_instr(this, &op->instrs[i]); + if (ret) + break; + } + + /* deassert chip enable */ + alchemy_wrsmem(0, AU1000_MEM_STNDCTL); + /* Drain the writebuffer */ + wmb(); + + return ret; +} + +static const struct nand_controller_ops au1550nd_ops = { + .exec_op = au1550nd_exec_op, +}; + static int au1550nd_probe(struct platform_device *pdev) { struct au1550nd_platdata *pd; @@ -424,23 +291,15 @@ static int au1550nd_probe(struct platform_device *pdev) } ctx->cs = cs; - this->legacy.dev_ready = au1550_device_ready; - this->legacy.select_chip = au1550_select_chip; - this->legacy.cmdfunc = au1550_command; - - /* 30 us command delay time */ - this->legacy.chip_delay = 30; + nand_controller_init(&ctx->controller); + ctx->controller.ops = &au1550nd_ops; + this->controller = &ctx->controller; this->ecc.mode = NAND_ECC_SOFT; this->ecc.algo = NAND_ECC_HAMMING; if (pd->devwidth) this->options |= NAND_BUSWIDTH_16; - this->legacy.read_byte = (pd->devwidth) ? au_read_byte16 : au_read_byte; - ctx->write_byte = (pd->devwidth) ? au_write_byte16 : au_write_byte; - this->legacy.write_buf = (pd->devwidth) ? au_write_buf16 : au_write_buf; - this->legacy.read_buf = (pd->devwidth) ? au_read_buf16 : au_read_buf; - ret = nand_scan(this, 1); if (ret) { dev_err(&pdev->dev, "NAND scan failed with %d\n", ret); @@ -466,8 +325,12 @@ static int au1550nd_remove(struct platform_device *pdev) { struct au1550nd_ctx *ctx = platform_get_drvdata(pdev); struct resource *r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + struct nand_chip *chip = &ctx->chip; + int ret; - nand_release(&ctx->chip); + ret = mtd_device_unregister(nand_to_mtd(chip)); + WARN_ON(ret); + nand_cleanup(chip); iounmap(ctx->base); release_mem_region(r->start, 0x1000); kfree(ctx); diff --git a/drivers/mtd/nand/raw/bcm47xxnflash/main.c b/drivers/mtd/nand/raw/bcm47xxnflash/main.c index 8dae97c1dbe7..dcc70d9dc6e5 100644 --- a/drivers/mtd/nand/raw/bcm47xxnflash/main.c +++ b/drivers/mtd/nand/raw/bcm47xxnflash/main.c @@ -60,8 +60,12 @@ static int bcm47xxnflash_probe(struct platform_device *pdev) static int bcm47xxnflash_remove(struct platform_device *pdev) { struct bcm47xxnflash *nflash = platform_get_drvdata(pdev); + struct nand_chip *chip = &nflash->nand_chip; + int ret; - nand_release(&nflash->nand_chip); + ret = mtd_device_unregister(nand_to_mtd(chip)); + WARN_ON(ret); + nand_cleanup(chip); return 0; } diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c index 8f9ffb46a09f..44068e9eea03 100644 --- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c +++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c @@ -4,7 +4,6 @@ */ #include -#include #include #include #include @@ -264,6 +263,7 @@ struct brcmnand_controller { const unsigned int *block_sizes; unsigned int max_page_size; const unsigned int *page_sizes; + unsigned int page_size_shift; unsigned int max_oob; u32 features; @@ -338,8 +338,38 @@ enum brcmnand_reg { BRCMNAND_FC_BASE, }; -/* BRCMNAND v4.0 */ -static const u16 brcmnand_regs_v40[] = { +/* BRCMNAND v2.1-v2.2 */ +static const u16 brcmnand_regs_v21[] = { + [BRCMNAND_CMD_START] = 0x04, + [BRCMNAND_CMD_EXT_ADDRESS] = 0x08, + [BRCMNAND_CMD_ADDRESS] = 0x0c, + [BRCMNAND_INTFC_STATUS] = 0x5c, + [BRCMNAND_CS_SELECT] = 0x14, + [BRCMNAND_CS_XOR] = 0x18, + [BRCMNAND_LL_OP] = 0, + [BRCMNAND_CS0_BASE] = 0x40, + [BRCMNAND_CS1_BASE] = 0, + [BRCMNAND_CORR_THRESHOLD] = 0, + [BRCMNAND_CORR_THRESHOLD_EXT] = 0, + [BRCMNAND_UNCORR_COUNT] = 0, + [BRCMNAND_CORR_COUNT] = 0, + [BRCMNAND_CORR_EXT_ADDR] = 0x60, + [BRCMNAND_CORR_ADDR] = 0x64, + [BRCMNAND_UNCORR_EXT_ADDR] = 0x68, + [BRCMNAND_UNCORR_ADDR] = 0x6c, + [BRCMNAND_SEMAPHORE] = 0x50, + [BRCMNAND_ID] = 0x54, + [BRCMNAND_ID_EXT] = 0, + [BRCMNAND_LL_RDATA] = 0, + [BRCMNAND_OOB_READ_BASE] = 0x20, + [BRCMNAND_OOB_READ_10_BASE] = 0, + [BRCMNAND_OOB_WRITE_BASE] = 0x30, + [BRCMNAND_OOB_WRITE_10_BASE] = 0, + [BRCMNAND_FC_BASE] = 0x200, +}; + +/* BRCMNAND v3.3-v4.0 */ +static const u16 brcmnand_regs_v33[] = { [BRCMNAND_CMD_START] = 0x04, [BRCMNAND_CMD_EXT_ADDRESS] = 0x08, [BRCMNAND_CMD_ADDRESS] = 0x0c, @@ -536,6 +566,9 @@ enum { CFG_BUS_WIDTH = BIT(CFG_BUS_WIDTH_SHIFT), CFG_DEVICE_SIZE_SHIFT = 24, + /* Only for v2.1 */ + CFG_PAGE_SIZE_SHIFT_v2_1 = 30, + /* Only for pre-v7.1 (with no CFG_EXT register) */ CFG_PAGE_SIZE_SHIFT = 20, CFG_BLK_SIZE_SHIFT = 28, @@ -571,12 +604,16 @@ static int brcmnand_revision_init(struct brcmnand_controller *ctrl) { static const unsigned int block_sizes_v6[] = { 8, 16, 128, 256, 512, 1024, 2048, 0 }; static const unsigned int block_sizes_v4[] = { 16, 128, 8, 512, 256, 1024, 2048, 0 }; - static const unsigned int page_sizes[] = { 512, 2048, 4096, 8192, 0 }; + static const unsigned int block_sizes_v2_2[] = { 16, 128, 8, 512, 256, 0 }; + static const unsigned int block_sizes_v2_1[] = { 16, 128, 8, 512, 0 }; + static const unsigned int page_sizes_v3_4[] = { 512, 2048, 4096, 8192, 0 }; + static const unsigned int page_sizes_v2_2[] = { 512, 2048, 4096, 0 }; + static const unsigned int page_sizes_v2_1[] = { 512, 2048, 0 }; ctrl->nand_version = nand_readreg(ctrl, 0) & 0xffff; - /* Only support v4.0+? */ - if (ctrl->nand_version < 0x0400) { + /* Only support v2.1+ */ + if (ctrl->nand_version < 0x0201) { dev_err(ctrl->dev, "version %#x not supported\n", ctrl->nand_version); return -ENODEV; @@ -591,8 +628,10 @@ static int brcmnand_revision_init(struct brcmnand_controller *ctrl) ctrl->reg_offsets = brcmnand_regs_v60; else if (ctrl->nand_version >= 0x0500) ctrl->reg_offsets = brcmnand_regs_v50; - else if (ctrl->nand_version >= 0x0400) - ctrl->reg_offsets = brcmnand_regs_v40; + else if (ctrl->nand_version >= 0x0303) + ctrl->reg_offsets = brcmnand_regs_v33; + else if (ctrl->nand_version >= 0x0201) + ctrl->reg_offsets = brcmnand_regs_v21; /* Chip-select stride */ if (ctrl->nand_version >= 0x0701) @@ -606,8 +645,9 @@ static int brcmnand_revision_init(struct brcmnand_controller *ctrl) } else { ctrl->cs_offsets = brcmnand_cs_offsets; - /* v5.0 and earlier has a different CS0 offset layout */ - if (ctrl->nand_version <= 0x0500) + /* v3.3-5.0 have a different CS0 offset layout */ + if (ctrl->nand_version >= 0x0303 && + ctrl->nand_version <= 0x0500) ctrl->cs0_offsets = brcmnand_cs_offsets_cs0; } @@ -617,14 +657,32 @@ static int brcmnand_revision_init(struct brcmnand_controller *ctrl) ctrl->max_page_size = 16 * 1024; ctrl->max_block_size = 2 * 1024 * 1024; } else { - ctrl->page_sizes = page_sizes; + if (ctrl->nand_version >= 0x0304) + ctrl->page_sizes = page_sizes_v3_4; + else if (ctrl->nand_version >= 0x0202) + ctrl->page_sizes = page_sizes_v2_2; + else + ctrl->page_sizes = page_sizes_v2_1; + + if (ctrl->nand_version >= 0x0202) + ctrl->page_size_shift = CFG_PAGE_SIZE_SHIFT; + else + ctrl->page_size_shift = CFG_PAGE_SIZE_SHIFT_v2_1; + if (ctrl->nand_version >= 0x0600) ctrl->block_sizes = block_sizes_v6; - else + else if (ctrl->nand_version >= 0x0400) ctrl->block_sizes = block_sizes_v4; + else if (ctrl->nand_version >= 0x0202) + ctrl->block_sizes = block_sizes_v2_2; + else + ctrl->block_sizes = block_sizes_v2_1; if (ctrl->nand_version < 0x0400) { - ctrl->max_page_size = 4096; + if (ctrl->nand_version < 0x0202) + ctrl->max_page_size = 2048; + else + ctrl->max_page_size = 4096; ctrl->max_block_size = 512 * 1024; } } @@ -810,6 +868,9 @@ static void brcmnand_wr_corr_thresh(struct brcmnand_host *host, u8 val) enum brcmnand_reg reg = BRCMNAND_CORR_THRESHOLD; int cs = host->cs; + if (!ctrl->reg_offsets[reg]) + return; + if (ctrl->nand_version == 0x0702) bits = 7; else if (ctrl->nand_version >= 0x0600) @@ -868,8 +929,10 @@ static inline u32 brcmnand_spare_area_mask(struct brcmnand_controller *ctrl) return GENMASK(7, 0); else if (ctrl->nand_version >= 0x0600) return GENMASK(6, 0); - else + else if (ctrl->nand_version >= 0x0303) return GENMASK(5, 0); + else + return GENMASK(4, 0); } #define NAND_ACC_CONTROL_ECC_SHIFT 16 @@ -1100,30 +1163,30 @@ static int brcmnand_hamming_ooblayout_free(struct mtd_info *mtd, int section, struct brcmnand_cfg *cfg = &host->hwcfg; int sas = cfg->spare_area_size << cfg->sector_size_1k; int sectors = cfg->page_size / (512 << cfg->sector_size_1k); + u32 next; - if (section >= sectors * 2) + if (section > sectors) return -ERANGE; - oobregion->offset = (section / 2) * sas; + next = (section * sas); + if (section < sectors) + next += 6; - if (section & 1) { - oobregion->offset += 9; - oobregion->length = 7; + if (section) { + oobregion->offset = ((section - 1) * sas) + 9; } else { - oobregion->length = 6; - - /* First sector of each page may have BBI */ - if (!section) { - /* - * Small-page NAND use byte 6 for BBI while large-page - * NAND use byte 0. - */ - if (cfg->page_size > 512) - oobregion->offset++; - oobregion->length--; + if (cfg->page_size > 512) { + /* Large page NAND uses first 2 bytes for BBI */ + oobregion->offset = 2; + } else { + /* Small page NAND uses last byte before ECC for BBI */ + oobregion->offset = 0; + next--; } } + oobregion->length = next - oobregion->offset; + return 0; } @@ -2018,28 +2081,31 @@ static int brcmnand_read_by_pio(struct mtd_info *mtd, struct nand_chip *chip, static int brcmstb_nand_verify_erased_page(struct mtd_info *mtd, struct nand_chip *chip, void *buf, u64 addr) { - int i, sas; - void *oob = chip->oob_poi; + struct mtd_oob_region ecc; + int i; int bitflips = 0; int page = addr >> chip->page_shift; int ret; + void *ecc_bytes; void *ecc_chunk; if (!buf) buf = nand_get_data_buf(chip); - sas = mtd->oobsize / chip->ecc.steps; - /* read without ecc for verification */ ret = chip->ecc.read_page_raw(chip, buf, true, page); if (ret) return ret; - for (i = 0; i < chip->ecc.steps; i++, oob += sas) { + for (i = 0; i < chip->ecc.steps; i++) { ecc_chunk = buf + chip->ecc.size * i; - ret = nand_check_erased_ecc_chunk(ecc_chunk, - chip->ecc.size, - oob, sas, NULL, 0, + + mtd_ooblayout_ecc(mtd, i, &ecc); + ecc_bytes = chip->oob_poi + ecc.offset; + + ret = nand_check_erased_ecc_chunk(ecc_chunk, chip->ecc.size, + ecc_bytes, ecc.length, + NULL, 0, chip->ecc.strength); if (ret < 0) return ret; @@ -2377,7 +2443,7 @@ static int brcmnand_set_cfg(struct brcmnand_host *host, (!!(cfg->device_width == 16) << CFG_BUS_WIDTH_SHIFT) | (device_size << CFG_DEVICE_SIZE_SHIFT); if (cfg_offs == cfg_ext_offs) { - tmp |= (page_size << CFG_PAGE_SIZE_SHIFT) | + tmp |= (page_size << ctrl->page_size_shift) | (block_size << CFG_BLK_SIZE_SHIFT); nand_writereg(ctrl, cfg_offs, tmp); } else { @@ -2389,9 +2455,11 @@ static int brcmnand_set_cfg(struct brcmnand_host *host, tmp = nand_readreg(ctrl, acc_control_offs); tmp &= ~brcmnand_ecc_level_mask(ctrl); - tmp |= cfg->ecc_level << NAND_ACC_CONTROL_ECC_SHIFT; tmp &= ~brcmnand_spare_area_mask(ctrl); - tmp |= cfg->spare_area_size; + if (ctrl->nand_version >= 0x0302) { + tmp |= cfg->ecc_level << NAND_ACC_CONTROL_ECC_SHIFT; + tmp |= cfg->spare_area_size; + } nand_writereg(ctrl, acc_control_offs, tmp); brcmnand_set_sector_size_1k(host, cfg->sector_size_1k); @@ -2577,7 +2645,7 @@ static int brcmnand_attach_chip(struct nand_chip *chip) * to/from, and have nand_base pass us a bounce buffer instead, as * needed. */ - chip->options |= NAND_USE_BOUNCE_BUFFER; + chip->options |= NAND_USES_DMA; if (chip->bbt_options & NAND_BBT_USE_FLASH) chip->bbt_options |= NAND_BBT_NO_OOB; @@ -2764,6 +2832,8 @@ const struct dev_pm_ops brcmnand_pm_ops = { EXPORT_SYMBOL_GPL(brcmnand_pm_ops); static const struct of_device_id brcmnand_of_match[] = { + { .compatible = "brcm,brcmnand-v2.1" }, + { .compatible = "brcm,brcmnand-v2.2" }, { .compatible = "brcm,brcmnand-v4.0" }, { .compatible = "brcm,brcmnand-v5.0" }, { .compatible = "brcm,brcmnand-v6.0" }, @@ -3045,9 +3115,15 @@ int brcmnand_remove(struct platform_device *pdev) { struct brcmnand_controller *ctrl = dev_get_drvdata(&pdev->dev); struct brcmnand_host *host; + struct nand_chip *chip; + int ret; - list_for_each_entry(host, &ctrl->host_list, node) - nand_release(&host->chip); + list_for_each_entry(host, &ctrl->host_list, node) { + chip = &host->chip; + ret = mtd_device_unregister(nand_to_mtd(chip)); + WARN_ON(ret); + nand_cleanup(chip); + } clk_disable_unprepare(ctrl->clk); diff --git a/drivers/mtd/nand/raw/cadence-nand-controller.c b/drivers/mtd/nand/raw/cadence-nand-controller.c index efddc5c68afb..c405722adfe1 100644 --- a/drivers/mtd/nand/raw/cadence-nand-controller.c +++ b/drivers/mtd/nand/raw/cadence-nand-controller.c @@ -2223,10 +2223,12 @@ static int cadence_nand_exec_op(struct nand_chip *chip, const struct nand_operation *op, bool check_only) { - int status = cadence_nand_select_target(chip); + if (!check_only) { + int status = cadence_nand_select_target(chip); - if (status) - return status; + if (status) + return status; + } return nand_op_parser_exec_op(chip, &cadence_nand_op_parser, op, check_only); @@ -2592,7 +2594,7 @@ cadence_nand_setup_data_interface(struct nand_chip *chip, int chipnr, return 0; } -int cadence_nand_attach_chip(struct nand_chip *chip) +static int cadence_nand_attach_chip(struct nand_chip *chip) { struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller); struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip); @@ -2778,9 +2780,14 @@ static int cadence_nand_chip_init(struct cdns_nand_ctrl *cdns_ctrl, static void cadence_nand_chips_cleanup(struct cdns_nand_ctrl *cdns_ctrl) { struct cdns_nand_chip *entry, *temp; + struct nand_chip *chip; + int ret; list_for_each_entry_safe(entry, temp, &cdns_ctrl->chips, node) { - nand_release(&entry->chip); + chip = &entry->chip; + ret = mtd_device_unregister(nand_to_mtd(chip)); + WARN_ON(ret); + nand_cleanup(chip); list_del(&entry->node); } } diff --git a/drivers/mtd/nand/raw/cafe_nand.c b/drivers/mtd/nand/raw/cafe_nand.c index 2d1c22dc88c1..92173790f20b 100644 --- a/drivers/mtd/nand/raw/cafe_nand.c +++ b/drivers/mtd/nand/raw/cafe_nand.c @@ -546,11 +546,6 @@ static int cafe_nand_write_page_lowlevel(struct nand_chip *chip, return nand_prog_page_end_op(chip); } -static int cafe_nand_block_bad(struct nand_chip *chip, loff_t ofs) -{ - return 0; -} - /* F_2[X]/(X**6+X+1) */ static unsigned short gf64_mul(u8 a, u8 b) { @@ -718,10 +713,8 @@ static int cafe_nand_probe(struct pci_dev *pdev, /* Enable the following for a flash based bad block table */ cafe->nand.bbt_options = NAND_BBT_USE_FLASH; - if (skipbbt) { - cafe->nand.options |= NAND_SKIP_BBTSCAN; - cafe->nand.legacy.block_bad = cafe_nand_block_bad; - } + if (skipbbt) + cafe->nand.options |= NAND_SKIP_BBTSCAN | NAND_NO_BBM_QUIRK; if (numtimings && numtimings != 3) { dev_warn(&cafe->pdev->dev, "%d timing register values ignored; precisely three are required\n", numtimings); @@ -814,11 +807,14 @@ static void cafe_nand_remove(struct pci_dev *pdev) struct mtd_info *mtd = pci_get_drvdata(pdev); struct nand_chip *chip = mtd_to_nand(mtd); struct cafe_priv *cafe = nand_get_controller_data(chip); + int ret; /* Disable NAND IRQ in global IRQ mask register */ cafe_writel(cafe, ~1 & cafe_readl(cafe, GLOBAL_IRQ_MASK), GLOBAL_IRQ_MASK); free_irq(pdev->irq, mtd); - nand_release(chip); + ret = mtd_device_unregister(mtd); + WARN_ON(ret); + nand_cleanup(chip); free_rs(cafe->rs); pci_iounmap(pdev, cafe->mmio); dma_free_coherent(&cafe->pdev->dev, 2112, cafe->dmabuf, cafe->dmaaddr); diff --git a/drivers/mtd/nand/raw/cmx270_nand.c b/drivers/mtd/nand/raw/cmx270_nand.c deleted file mode 100644 index 045b6175ae79..000000000000 --- a/drivers/mtd/nand/raw/cmx270_nand.c +++ /dev/null @@ -1,236 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) 2006 Compulab, Ltd. - * Mike Rapoport - * - * Derived from drivers/mtd/nand/h1910.c (removed in v3.10) - * Copyright (C) 2002 Marius Gröger (mag@sysgo.de) - * Copyright (c) 2001 Thomas Gleixner (gleixner@autronix.de) - * - * Overview: - * This is a device driver for the NAND flash device found on the - * CM-X270 board. - */ - -#include -#include -#include -#include -#include - -#include -#include -#include - -#include - -#define GPIO_NAND_CS (11) -#define GPIO_NAND_RB (89) - -/* MTD structure for CM-X270 board */ -static struct mtd_info *cmx270_nand_mtd; - -/* remaped IO address of the device */ -static void __iomem *cmx270_nand_io; - -/* - * Define static partitions for flash device - */ -static const struct mtd_partition partition_info[] = { - [0] = { - .name = "cmx270-0", - .offset = 0, - .size = MTDPART_SIZ_FULL - } -}; -#define NUM_PARTITIONS (ARRAY_SIZE(partition_info)) - -static u_char cmx270_read_byte(struct nand_chip *this) -{ - return (readl(this->legacy.IO_ADDR_R) >> 16); -} - -static void cmx270_write_buf(struct nand_chip *this, const u_char *buf, - int len) -{ - int i; - - for (i=0; ilegacy.IO_ADDR_W); -} - -static void cmx270_read_buf(struct nand_chip *this, u_char *buf, int len) -{ - int i; - - for (i=0; ilegacy.IO_ADDR_R) >> 16; -} - -static inline void nand_cs_on(void) -{ - gpio_set_value(GPIO_NAND_CS, 0); -} - -static void nand_cs_off(void) -{ - dsb(); - - gpio_set_value(GPIO_NAND_CS, 1); -} - -/* - * hardware specific access to control-lines - */ -static void cmx270_hwcontrol(struct nand_chip *this, int dat, - unsigned int ctrl) -{ - unsigned int nandaddr = (unsigned int)this->legacy.IO_ADDR_W; - - dsb(); - - if (ctrl & NAND_CTRL_CHANGE) { - if ( ctrl & NAND_ALE ) - nandaddr |= (1 << 3); - else - nandaddr &= ~(1 << 3); - if ( ctrl & NAND_CLE ) - nandaddr |= (1 << 2); - else - nandaddr &= ~(1 << 2); - if ( ctrl & NAND_NCE ) - nand_cs_on(); - else - nand_cs_off(); - } - - dsb(); - this->legacy.IO_ADDR_W = (void __iomem*)nandaddr; - if (dat != NAND_CMD_NONE) - writel((dat << 16), this->legacy.IO_ADDR_W); - - dsb(); -} - -/* - * read device ready pin - */ -static int cmx270_device_ready(struct nand_chip *this) -{ - dsb(); - - return (gpio_get_value(GPIO_NAND_RB)); -} - -/* - * Main initialization routine - */ -static int __init cmx270_init(void) -{ - struct nand_chip *this; - int ret; - - if (!(machine_is_armcore() && cpu_is_pxa27x())) - return -ENODEV; - - ret = gpio_request(GPIO_NAND_CS, "NAND CS"); - if (ret) { - pr_warn("CM-X270: failed to request NAND CS gpio\n"); - return ret; - } - - gpio_direction_output(GPIO_NAND_CS, 1); - - ret = gpio_request(GPIO_NAND_RB, "NAND R/B"); - if (ret) { - pr_warn("CM-X270: failed to request NAND R/B gpio\n"); - goto err_gpio_request; - } - - gpio_direction_input(GPIO_NAND_RB); - - /* Allocate memory for MTD device structure and private data */ - this = kzalloc(sizeof(struct nand_chip), GFP_KERNEL); - if (!this) { - ret = -ENOMEM; - goto err_kzalloc; - } - - cmx270_nand_io = ioremap(PXA_CS1_PHYS, 12); - if (!cmx270_nand_io) { - pr_debug("Unable to ioremap NAND device\n"); - ret = -EINVAL; - goto err_ioremap; - } - - cmx270_nand_mtd = nand_to_mtd(this); - - /* Link the private data with the MTD structure */ - cmx270_nand_mtd->owner = THIS_MODULE; - - /* insert callbacks */ - this->legacy.IO_ADDR_R = cmx270_nand_io; - this->legacy.IO_ADDR_W = cmx270_nand_io; - this->legacy.cmd_ctrl = cmx270_hwcontrol; - this->legacy.dev_ready = cmx270_device_ready; - - /* 15 us command delay time */ - this->legacy.chip_delay = 20; - this->ecc.mode = NAND_ECC_SOFT; - this->ecc.algo = NAND_ECC_HAMMING; - - /* read/write functions */ - this->legacy.read_byte = cmx270_read_byte; - this->legacy.read_buf = cmx270_read_buf; - this->legacy.write_buf = cmx270_write_buf; - - /* Scan to find existence of the device */ - ret = nand_scan(this, 1); - if (ret) { - pr_notice("No NAND device\n"); - goto err_scan; - } - - /* Register the partitions */ - ret = mtd_device_register(cmx270_nand_mtd, partition_info, - NUM_PARTITIONS); - if (ret) - goto err_scan; - - /* Return happy */ - return 0; - -err_scan: - iounmap(cmx270_nand_io); -err_ioremap: - kfree(this); -err_kzalloc: - gpio_free(GPIO_NAND_RB); -err_gpio_request: - gpio_free(GPIO_NAND_CS); - - return ret; - -} -module_init(cmx270_init); - -/* - * Clean up routine - */ -static void __exit cmx270_cleanup(void) -{ - /* Release resources, unregister device */ - nand_release(mtd_to_nand(cmx270_nand_mtd)); - - gpio_free(GPIO_NAND_RB); - gpio_free(GPIO_NAND_CS); - - iounmap(cmx270_nand_io); - - kfree(mtd_to_nand(cmx270_nand_mtd)); -} -module_exit(cmx270_cleanup); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Mike Rapoport "); -MODULE_DESCRIPTION("NAND flash driver for Compulab CM-X270 Module"); diff --git a/drivers/mtd/nand/raw/cs553x_nand.c b/drivers/mtd/nand/raw/cs553x_nand.c index e2322cee3229..9472bf798ed5 100644 --- a/drivers/mtd/nand/raw/cs553x_nand.c +++ b/drivers/mtd/nand/raw/cs553x_nand.c @@ -21,9 +21,9 @@ #include #include #include +#include #include -#include #define NR_CS553X_CONTROLLERS 4 @@ -89,76 +89,151 @@ #define CS_NAND_ECC_CLRECC (1<<1) #define CS_NAND_ECC_ENECC (1<<0) -static void cs553x_read_buf(struct nand_chip *this, u_char *buf, int len) +struct cs553x_nand_controller { + struct nand_controller base; + struct nand_chip chip; + void __iomem *mmio; +}; + +static struct cs553x_nand_controller * +to_cs553x(struct nand_controller *controller) { + return container_of(controller, struct cs553x_nand_controller, base); +} + +static int cs553x_write_ctrl_byte(struct cs553x_nand_controller *cs553x, + u32 ctl, u8 data) +{ + u8 status; + int ret; + + writeb(ctl, cs553x->mmio + MM_NAND_CTL); + writeb(data, cs553x->mmio + MM_NAND_IO); + ret = readb_poll_timeout_atomic(cs553x->mmio + MM_NAND_STS, status, + !(status & CS_NAND_CTLR_BUSY), 1, + 100000); + if (ret) + return ret; + + return 0; +} + +static void cs553x_data_in(struct cs553x_nand_controller *cs553x, void *buf, + unsigned int len) +{ + writeb(0, cs553x->mmio + MM_NAND_CTL); while (unlikely(len > 0x800)) { - memcpy_fromio(buf, this->legacy.IO_ADDR_R, 0x800); + memcpy_fromio(buf, cs553x->mmio, 0x800); buf += 0x800; len -= 0x800; } - memcpy_fromio(buf, this->legacy.IO_ADDR_R, len); + memcpy_fromio(buf, cs553x->mmio, len); } -static void cs553x_write_buf(struct nand_chip *this, const u_char *buf, int len) +static void cs553x_data_out(struct cs553x_nand_controller *cs553x, + const void *buf, unsigned int len) { + writeb(0, cs553x->mmio + MM_NAND_CTL); while (unlikely(len > 0x800)) { - memcpy_toio(this->legacy.IO_ADDR_R, buf, 0x800); + memcpy_toio(cs553x->mmio, buf, 0x800); buf += 0x800; len -= 0x800; } - memcpy_toio(this->legacy.IO_ADDR_R, buf, len); + memcpy_toio(cs553x->mmio, buf, len); } -static unsigned char cs553x_read_byte(struct nand_chip *this) +static int cs553x_wait_ready(struct cs553x_nand_controller *cs553x, + unsigned int timeout_ms) { - return readb(this->legacy.IO_ADDR_R); + u8 mask = CS_NAND_CTLR_BUSY | CS_NAND_STS_FLASH_RDY; + u8 status; + + return readb_poll_timeout(cs553x->mmio + MM_NAND_STS, status, + (status & mask) == CS_NAND_STS_FLASH_RDY, 100, + timeout_ms * 1000); } -static void cs553x_write_byte(struct nand_chip *this, u_char byte) +static int cs553x_exec_instr(struct cs553x_nand_controller *cs553x, + const struct nand_op_instr *instr) { - int i = 100000; + unsigned int i; + int ret = 0; - while (i && readb(this->legacy.IO_ADDR_R + MM_NAND_STS) & CS_NAND_CTLR_BUSY) { - udelay(1); - i--; + switch (instr->type) { + case NAND_OP_CMD_INSTR: + ret = cs553x_write_ctrl_byte(cs553x, CS_NAND_CTL_CLE, + instr->ctx.cmd.opcode); + break; + + case NAND_OP_ADDR_INSTR: + for (i = 0; i < instr->ctx.addr.naddrs; i++) { + ret = cs553x_write_ctrl_byte(cs553x, CS_NAND_CTL_ALE, + instr->ctx.addr.addrs[i]); + if (ret) + break; + } + break; + + case NAND_OP_DATA_IN_INSTR: + cs553x_data_in(cs553x, instr->ctx.data.buf.in, + instr->ctx.data.len); + break; + + case NAND_OP_DATA_OUT_INSTR: + cs553x_data_out(cs553x, instr->ctx.data.buf.out, + instr->ctx.data.len); + break; + + case NAND_OP_WAITRDY_INSTR: + ret = cs553x_wait_ready(cs553x, instr->ctx.waitrdy.timeout_ms); + break; } - writeb(byte, this->legacy.IO_ADDR_W + 0x801); + + if (instr->delay_ns) + ndelay(instr->delay_ns); + + return ret; } -static void cs553x_hwcontrol(struct nand_chip *this, int cmd, - unsigned int ctrl) +static int cs553x_exec_op(struct nand_chip *this, + const struct nand_operation *op, + bool check_only) { - void __iomem *mmio_base = this->legacy.IO_ADDR_R; - if (ctrl & NAND_CTRL_CHANGE) { - unsigned char ctl = (ctrl & ~NAND_CTRL_CHANGE ) ^ 0x01; - writeb(ctl, mmio_base + MM_NAND_CTL); + struct cs553x_nand_controller *cs553x = to_cs553x(this->controller); + unsigned int i; + int ret; + + if (check_only) + return true; + + /* De-assert the CE pin */ + writeb(0, cs553x->mmio + MM_NAND_CTL); + for (i = 0; i < op->ninstrs; i++) { + ret = cs553x_exec_instr(cs553x, &op->instrs[i]); + if (ret) + break; } - if (cmd != NAND_CMD_NONE) - cs553x_write_byte(this, cmd); -} -static int cs553x_device_ready(struct nand_chip *this) -{ - void __iomem *mmio_base = this->legacy.IO_ADDR_R; - unsigned char foo = readb(mmio_base + MM_NAND_STS); + /* Re-assert the CE pin. */ + writeb(CS_NAND_CTL_CE, cs553x->mmio + MM_NAND_CTL); - return (foo & CS_NAND_STS_FLASH_RDY) && !(foo & CS_NAND_CTLR_BUSY); + return ret; } static void cs_enable_hwecc(struct nand_chip *this, int mode) { - void __iomem *mmio_base = this->legacy.IO_ADDR_R; + struct cs553x_nand_controller *cs553x = to_cs553x(this->controller); - writeb(0x07, mmio_base + MM_NAND_ECC_CTL); + writeb(0x07, cs553x->mmio + MM_NAND_ECC_CTL); } static int cs_calculate_ecc(struct nand_chip *this, const u_char *dat, u_char *ecc_code) { + struct cs553x_nand_controller *cs553x = to_cs553x(this->controller); uint32_t ecc; - void __iomem *mmio_base = this->legacy.IO_ADDR_R; - ecc = readl(mmio_base + MM_NAND_STS); + ecc = readl(cs553x->mmio + MM_NAND_STS); ecc_code[1] = ecc >> 8; ecc_code[0] = ecc >> 16; @@ -166,10 +241,15 @@ static int cs_calculate_ecc(struct nand_chip *this, const u_char *dat, return 0; } -static struct mtd_info *cs553x_mtd[4]; +static struct cs553x_nand_controller *controllers[4]; + +static const struct nand_controller_ops cs553x_nand_controller_ops = { + .exec_op = cs553x_exec_op, +}; static int __init cs553x_init_one(int cs, int mmio, unsigned long adr) { + struct cs553x_nand_controller *controller; int err = 0; struct nand_chip *this; struct mtd_info *new_mtd; @@ -183,33 +263,29 @@ static int __init cs553x_init_one(int cs, int mmio, unsigned long adr) } /* Allocate memory for MTD device structure and private data */ - this = kzalloc(sizeof(struct nand_chip), GFP_KERNEL); - if (!this) { + controller = kzalloc(sizeof(*controller), GFP_KERNEL); + if (!controller) { err = -ENOMEM; goto out; } + this = &controller->chip; + nand_controller_init(&controller->base); + controller->base.ops = &cs553x_nand_controller_ops; + this->controller = &controller->base; new_mtd = nand_to_mtd(this); /* Link the private data with the MTD structure */ new_mtd->owner = THIS_MODULE; /* map physical address */ - this->legacy.IO_ADDR_R = this->legacy.IO_ADDR_W = ioremap(adr, 4096); - if (!this->legacy.IO_ADDR_R) { + controller->mmio = ioremap(adr, 4096); + if (!controller->mmio) { pr_warn("ioremap cs553x NAND @0x%08lx failed\n", adr); err = -EIO; goto out_mtd; } - this->legacy.cmd_ctrl = cs553x_hwcontrol; - this->legacy.dev_ready = cs553x_device_ready; - this->legacy.read_byte = cs553x_read_byte; - this->legacy.read_buf = cs553x_read_buf; - this->legacy.write_buf = cs553x_write_buf; - - this->legacy.chip_delay = 0; - this->ecc.mode = NAND_ECC_HW; this->ecc.size = 256; this->ecc.bytes = 3; @@ -232,15 +308,15 @@ static int __init cs553x_init_one(int cs, int mmio, unsigned long adr) if (err) goto out_free; - cs553x_mtd[cs] = new_mtd; + controllers[cs] = controller; goto out; out_free: kfree(new_mtd->name); out_ior: - iounmap(this->legacy.IO_ADDR_R); + iounmap(controller->mmio); out_mtd: - kfree(this); + kfree(controller); out: return err; } @@ -295,9 +371,10 @@ static int __init cs553x_init(void) /* Register all devices together here. This means we can easily hack it to do mtdconcat etc. if we want to. */ for (i = 0; i < NR_CS553X_CONTROLLERS; i++) { - if (cs553x_mtd[i]) { + if (controllers[i]) { /* If any devices registered, return success. Else the last error. */ - mtd_device_register(cs553x_mtd[i], NULL, 0); + mtd_device_register(nand_to_mtd(&controllers[i]->chip), + NULL, 0); err = 0; } } @@ -312,26 +389,26 @@ static void __exit cs553x_cleanup(void) int i; for (i = 0; i < NR_CS553X_CONTROLLERS; i++) { - struct mtd_info *mtd = cs553x_mtd[i]; - struct nand_chip *this; - void __iomem *mmio_base; + struct cs553x_nand_controller *controller = controllers[i]; + struct nand_chip *this = &controller->chip; + struct mtd_info *mtd = nand_to_mtd(this); + int ret; if (!mtd) continue; - this = mtd_to_nand(mtd); - mmio_base = this->legacy.IO_ADDR_R; - /* Release resources, unregister device */ - nand_release(this); + ret = mtd_device_unregister(mtd); + WARN_ON(ret); + nand_cleanup(this); kfree(mtd->name); - cs553x_mtd[i] = NULL; + controllers[i] = NULL; /* unmap physical address */ - iounmap(mmio_base); + iounmap(controller->mmio); /* Free the MTD device structure */ - kfree(this); + kfree(controller); } } diff --git a/drivers/mtd/nand/raw/davinci_nand.c b/drivers/mtd/nand/raw/davinci_nand.c index 25c185bea50c..d975a62caaa5 100644 --- a/drivers/mtd/nand/raw/davinci_nand.c +++ b/drivers/mtd/nand/raw/davinci_nand.c @@ -14,7 +14,7 @@ #include #include #include -#include +#include #include #include #include @@ -38,6 +38,7 @@ * outputs in a "wire-AND" configuration, with no per-chip signals. */ struct davinci_nand_info { + struct nand_controller controller; struct nand_chip chip; struct platform_device *pdev; @@ -80,46 +81,6 @@ static inline void davinci_nand_writel(struct davinci_nand_info *info, /*----------------------------------------------------------------------*/ -/* - * Access to hardware control lines: ALE, CLE, secondary chipselect. - */ - -static void nand_davinci_hwcontrol(struct nand_chip *nand, int cmd, - unsigned int ctrl) -{ - struct davinci_nand_info *info = to_davinci_nand(nand_to_mtd(nand)); - void __iomem *addr = info->current_cs; - - /* Did the control lines change? */ - if (ctrl & NAND_CTRL_CHANGE) { - if ((ctrl & NAND_CTRL_CLE) == NAND_CTRL_CLE) - addr += info->mask_cle; - else if ((ctrl & NAND_CTRL_ALE) == NAND_CTRL_ALE) - addr += info->mask_ale; - - nand->legacy.IO_ADDR_W = addr; - } - - if (cmd != NAND_CMD_NONE) - iowrite8(cmd, nand->legacy.IO_ADDR_W); -} - -static void nand_davinci_select_chip(struct nand_chip *nand, int chip) -{ - struct davinci_nand_info *info = to_davinci_nand(nand_to_mtd(nand)); - - info->current_cs = info->vaddr; - - /* maybe kick in a second chipselect */ - if (chip > 0) - info->current_cs += info->mask_chipsel; - - info->chip.legacy.IO_ADDR_W = info->current_cs; - info->chip.legacy.IO_ADDR_R = info->chip.legacy.IO_ADDR_W; -} - -/*----------------------------------------------------------------------*/ - /* * 1-bit hardware ECC ... context maintained for each core chipselect */ @@ -410,48 +371,75 @@ static int nand_davinci_correct_4bit(struct nand_chip *chip, u_char *data, return corrected; } -/*----------------------------------------------------------------------*/ - -/* - * NOTE: NAND boot requires ALE == EM_A[1], CLE == EM_A[2], so that's - * how these chips are normally wired. This translates to both 8 and 16 - * bit busses using ALE == BIT(3) in byte addresses, and CLE == BIT(4). +/** + * nand_read_page_hwecc_oob_first - hw ecc, read oob first + * @chip: nand chip info structure + * @buf: buffer to store read data + * @oob_required: caller requires OOB data read to chip->oob_poi + * @page: page number to read * - * For now we assume that configuration, or any other one which ignores - * the two LSBs for NAND access ... so we can issue 32-bit reads/writes - * and have that transparently morphed into multiple NAND operations. + * Hardware ECC for large page chips, require OOB to be read first. For this + * ECC mode, the write_page method is re-used from ECC_HW. These methods + * read/write ECC from the OOB area, unlike the ECC_HW_SYNDROME support with + * multiple ECC steps, follows the "infix ECC" scheme and reads/writes ECC from + * the data area, by overwriting the NAND manufacturer bad block markings. */ -static void nand_davinci_read_buf(struct nand_chip *chip, uint8_t *buf, - int len) +static int nand_davinci_read_page_hwecc_oob_first(struct nand_chip *chip, + uint8_t *buf, + int oob_required, int page) { - if ((0x03 & ((uintptr_t)buf)) == 0 && (0x03 & len) == 0) - ioread32_rep(chip->legacy.IO_ADDR_R, buf, len >> 2); - else if ((0x01 & ((uintptr_t)buf)) == 0 && (0x01 & len) == 0) - ioread16_rep(chip->legacy.IO_ADDR_R, buf, len >> 1); - else - ioread8_rep(chip->legacy.IO_ADDR_R, buf, len); -} + struct mtd_info *mtd = nand_to_mtd(chip); + int i, eccsize = chip->ecc.size, ret; + int eccbytes = chip->ecc.bytes; + int eccsteps = chip->ecc.steps; + uint8_t *p = buf; + uint8_t *ecc_code = chip->ecc.code_buf; + uint8_t *ecc_calc = chip->ecc.calc_buf; + unsigned int max_bitflips = 0; -static void nand_davinci_write_buf(struct nand_chip *chip, const uint8_t *buf, - int len) -{ - if ((0x03 & ((uintptr_t)buf)) == 0 && (0x03 & len) == 0) - iowrite32_rep(chip->legacy.IO_ADDR_R, buf, len >> 2); - else if ((0x01 & ((uintptr_t)buf)) == 0 && (0x01 & len) == 0) - iowrite16_rep(chip->legacy.IO_ADDR_R, buf, len >> 1); - else - iowrite8_rep(chip->legacy.IO_ADDR_R, buf, len); -} + /* Read the OOB area first */ + ret = nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize); + if (ret) + return ret; -/* - * Check hardware register for wait status. Returns 1 if device is ready, - * 0 if it is still busy. - */ -static int nand_davinci_dev_ready(struct nand_chip *chip) -{ - struct davinci_nand_info *info = to_davinci_nand(nand_to_mtd(chip)); + ret = nand_read_page_op(chip, page, 0, NULL, 0); + if (ret) + return ret; - return davinci_nand_readl(info, NANDFSR_OFFSET) & BIT(0); + ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0, + chip->ecc.total); + if (ret) + return ret; + + for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { + int stat; + + chip->ecc.hwctl(chip, NAND_ECC_READ); + + ret = nand_read_data_op(chip, p, eccsize, false, false); + if (ret) + return ret; + + chip->ecc.calculate(chip, p, &ecc_calc[i]); + + stat = chip->ecc.correct(chip, p, &ecc_code[i], NULL); + if (stat == -EBADMSG && + (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) { + /* check for empty pages with bitflips */ + stat = nand_check_erased_ecc_chunk(p, eccsize, + &ecc_code[i], + eccbytes, NULL, 0, + chip->ecc.strength); + } + + if (stat < 0) { + mtd->ecc_stats.failed++; + } else { + mtd->ecc_stats.corrected += stat; + max_bitflips = max_t(unsigned int, max_bitflips, stat); + } + } + return max_bitflips; } /*----------------------------------------------------------------------*/ @@ -613,6 +601,13 @@ static int davinci_nand_attach_chip(struct nand_chip *chip) break; case NAND_ECC_HW: if (pdata->ecc_bits == 4) { + int chunks = mtd->writesize / 512; + + if (!chunks || mtd->oobsize < 16) { + dev_dbg(&info->pdev->dev, "too small\n"); + return -EINVAL; + } + /* * No sanity checks: CPUs must support this, * and the chips may not use NAND_BUSWIDTH_16. @@ -635,6 +630,26 @@ static int davinci_nand_attach_chip(struct nand_chip *chip) info->chip.ecc.bytes = 10; info->chip.ecc.options = NAND_ECC_GENERIC_ERASED_CHECK; info->chip.ecc.algo = NAND_ECC_BCH; + + /* + * Update ECC layout if needed ... for 1-bit HW ECC, the + * default is OK, but it allocates 6 bytes when only 3 + * are needed (for each 512 bytes). For 4-bit HW ECC, + * the default is not usable: 10 bytes needed, not 6. + * + * For small page chips, preserve the manufacturer's + * badblock marking data ... and make sure a flash BBT + * table marker fits in the free bytes. + */ + if (chunks == 1) { + mtd_set_ooblayout(mtd, + &hwecc4_small_ooblayout_ops); + } else if (chunks == 4 || chunks == 8) { + mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops); + info->chip.ecc.read_page = nand_davinci_read_page_hwecc_oob_first; + } else { + return -EIO; + } } else { /* 1bit ecc hamming */ info->chip.ecc.calculate = nand_davinci_calculate_1bit; @@ -650,39 +665,111 @@ static int davinci_nand_attach_chip(struct nand_chip *chip) return -EINVAL; } - /* - * Update ECC layout if needed ... for 1-bit HW ECC, the default - * is OK, but it allocates 6 bytes when only 3 are needed (for - * each 512 bytes). For the 4-bit HW ECC, that default is not - * usable: 10 bytes are needed, not 6. - */ - if (pdata->ecc_bits == 4) { - int chunks = mtd->writesize / 512; + return ret; +} - if (!chunks || mtd->oobsize < 16) { - dev_dbg(&info->pdev->dev, "too small\n"); - return -EINVAL; - } +static void nand_davinci_data_in(struct davinci_nand_info *info, void *buf, + unsigned int len, bool force_8bit) +{ + u32 alignment = ((uintptr_t)buf | len) & 3; - /* For small page chips, preserve the manufacturer's - * badblock marking data ... and make sure a flash BBT - * table marker fits in the free bytes. - */ - if (chunks == 1) { - mtd_set_ooblayout(mtd, &hwecc4_small_ooblayout_ops); - } else if (chunks == 4 || chunks == 8) { - mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops); - info->chip.ecc.mode = NAND_ECC_HW_OOB_FIRST; - } else { - return -EIO; + if (force_8bit || (alignment & 1)) + ioread8_rep(info->current_cs, buf, len); + else if (alignment & 3) + ioread16_rep(info->current_cs, buf, len >> 1); + else + ioread32_rep(info->current_cs, buf, len >> 2); +} + +static void nand_davinci_data_out(struct davinci_nand_info *info, + const void *buf, unsigned int len, + bool force_8bit) +{ + u32 alignment = ((uintptr_t)buf | len) & 3; + + if (force_8bit || (alignment & 1)) + iowrite8_rep(info->current_cs, buf, len); + else if (alignment & 3) + iowrite16_rep(info->current_cs, buf, len >> 1); + else + iowrite32_rep(info->current_cs, buf, len >> 2); +} + +static int davinci_nand_exec_instr(struct davinci_nand_info *info, + const struct nand_op_instr *instr) +{ + unsigned int i, timeout_us; + u32 status; + int ret; + + switch (instr->type) { + case NAND_OP_CMD_INSTR: + iowrite8(instr->ctx.cmd.opcode, + info->current_cs + info->mask_cle); + break; + + case NAND_OP_ADDR_INSTR: + for (i = 0; i < instr->ctx.addr.naddrs; i++) { + iowrite8(instr->ctx.addr.addrs[i], + info->current_cs + info->mask_ale); } + break; + + case NAND_OP_DATA_IN_INSTR: + nand_davinci_data_in(info, instr->ctx.data.buf.in, + instr->ctx.data.len, + instr->ctx.data.force_8bit); + break; + + case NAND_OP_DATA_OUT_INSTR: + nand_davinci_data_out(info, instr->ctx.data.buf.out, + instr->ctx.data.len, + instr->ctx.data.force_8bit); + break; + + case NAND_OP_WAITRDY_INSTR: + timeout_us = instr->ctx.waitrdy.timeout_ms * 1000; + ret = readl_relaxed_poll_timeout(info->base + NANDFSR_OFFSET, + status, status & BIT(0), 100, + timeout_us); + if (ret) + return ret; + + break; } - return ret; + if (instr->delay_ns) + ndelay(instr->delay_ns); + + return 0; +} + +static int davinci_nand_exec_op(struct nand_chip *chip, + const struct nand_operation *op, + bool check_only) +{ + struct davinci_nand_info *info = to_davinci_nand(nand_to_mtd(chip)); + unsigned int i; + + if (check_only) + return 0; + + info->current_cs = info->vaddr + (op->cs * info->mask_chipsel); + + for (i = 0; i < op->ninstrs; i++) { + int ret; + + ret = davinci_nand_exec_instr(info, &op->instrs[i]); + if (ret) + return ret; + } + + return 0; } static const struct nand_controller_ops davinci_nand_controller_ops = { .attach_chip = davinci_nand_attach_chip, + .exec_op = davinci_nand_exec_op, }; static int nand_davinci_probe(struct platform_device *pdev) @@ -746,11 +833,6 @@ static int nand_davinci_probe(struct platform_device *pdev) mtd->dev.parent = &pdev->dev; nand_set_flash_node(&info->chip, pdev->dev.of_node); - info->chip.legacy.IO_ADDR_R = vaddr; - info->chip.legacy.IO_ADDR_W = vaddr; - info->chip.legacy.chip_delay = 0; - info->chip.legacy.select_chip = nand_davinci_select_chip; - /* options such as NAND_BBT_USE_FLASH */ info->chip.bbt_options = pdata->bbt_options; /* options such as 16-bit widths */ @@ -767,14 +849,6 @@ static int nand_davinci_probe(struct platform_device *pdev) info->mask_ale = pdata->mask_ale ? : MASK_ALE; info->mask_cle = pdata->mask_cle ? : MASK_CLE; - /* Set address of hardware control function */ - info->chip.legacy.cmd_ctrl = nand_davinci_hwcontrol; - info->chip.legacy.dev_ready = nand_davinci_dev_ready; - - /* Speed up buffer I/O */ - info->chip.legacy.read_buf = nand_davinci_read_buf; - info->chip.legacy.write_buf = nand_davinci_write_buf; - /* Use board-specific ECC config */ info->chip.ecc.mode = pdata->ecc_mode; @@ -788,7 +862,9 @@ static int nand_davinci_probe(struct platform_device *pdev) spin_unlock_irq(&davinci_nand_lock); /* Scan to find existence of the device(s) */ - info->chip.legacy.dummy_controller.ops = &davinci_nand_controller_ops; + nand_controller_init(&info->controller); + info->controller.ops = &davinci_nand_controller_ops; + info->chip.controller = &info->controller; ret = nand_scan(&info->chip, pdata->mask_chipsel ? 2 : 1); if (ret < 0) { dev_dbg(&pdev->dev, "no NAND chip(s) found\n"); @@ -817,13 +893,17 @@ static int nand_davinci_probe(struct platform_device *pdev) static int nand_davinci_remove(struct platform_device *pdev) { struct davinci_nand_info *info = platform_get_drvdata(pdev); + struct nand_chip *chip = &info->chip; + int ret; spin_lock_irq(&davinci_nand_lock); if (info->chip.ecc.mode == NAND_ECC_HW_SYNDROME) ecc4_busy = false; spin_unlock_irq(&davinci_nand_lock); - nand_release(&info->chip); + ret = mtd_device_unregister(nand_to_mtd(chip)); + WARN_ON(ret); + nand_cleanup(chip); return 0; } diff --git a/drivers/mtd/nand/raw/denali.c b/drivers/mtd/nand/raw/denali.c index 6a6c919b2569..4e6e1578aa2d 100644 --- a/drivers/mtd/nand/raw/denali.c +++ b/drivers/mtd/nand/raw/denali.c @@ -764,6 +764,7 @@ static int denali_write_page(struct nand_chip *chip, const u8 *buf, static int denali_setup_data_interface(struct nand_chip *chip, int chipnr, const struct nand_data_interface *conf) { + static const unsigned int data_setup_on_host = 10000; struct denali_controller *denali = to_denali_controller(chip); struct denali_chip_sel *sel; const struct nand_sdr_timings *timings; @@ -796,15 +797,6 @@ static int denali_setup_data_interface(struct nand_chip *chip, int chipnr, sel = &to_denali_chip(chip)->sels[chipnr]; - /* tREA -> ACC_CLKS */ - acc_clks = DIV_ROUND_UP(timings->tREA_max, t_x); - acc_clks = min_t(int, acc_clks, ACC_CLKS__VALUE); - - tmp = ioread32(denali->reg + ACC_CLKS); - tmp &= ~ACC_CLKS__VALUE; - tmp |= FIELD_PREP(ACC_CLKS__VALUE, acc_clks); - sel->acc_clks = tmp; - /* tRWH -> RE_2_WE */ re_2_we = DIV_ROUND_UP(timings->tRHW_min, t_x); re_2_we = min_t(int, re_2_we, RE_2_WE__VALUE); @@ -862,14 +854,45 @@ static int denali_setup_data_interface(struct nand_chip *chip, int chipnr, tmp |= FIELD_PREP(RDWR_EN_HI_CNT__VALUE, rdwr_en_hi); sel->rdwr_en_hi_cnt = tmp; - /* tRP, tWP -> RDWR_EN_LO_CNT */ + /* + * tREA -> ACC_CLKS + * tRP, tWP, tRHOH, tRC, tWC -> RDWR_EN_LO_CNT + */ + + /* + * Determine the minimum of acc_clks to meet the setup timing when + * capturing the incoming data. + * + * The delay on the chip side is well-defined as tREA, but we need to + * take additional delay into account. This includes a certain degree + * of unknowledge, such as signal propagation delays on the PCB and + * in the SoC, load capacity of the I/O pins, etc. + */ + acc_clks = DIV_ROUND_UP(timings->tREA_max + data_setup_on_host, t_x); + + /* Determine the minimum of rdwr_en_lo_cnt from RE#/WE# pulse width */ rdwr_en_lo = DIV_ROUND_UP(max(timings->tRP_min, timings->tWP_min), t_x); + + /* Extend rdwr_en_lo to meet the data hold timing */ + rdwr_en_lo = max_t(int, rdwr_en_lo, + acc_clks - timings->tRHOH_min / t_x); + + /* Extend rdwr_en_lo to meet the requirement for RE#/WE# cycle time */ rdwr_en_lo_hi = DIV_ROUND_UP(max(timings->tRC_min, timings->tWC_min), t_x); - rdwr_en_lo_hi = max_t(int, rdwr_en_lo_hi, mult_x); rdwr_en_lo = max(rdwr_en_lo, rdwr_en_lo_hi - rdwr_en_hi); rdwr_en_lo = min_t(int, rdwr_en_lo, RDWR_EN_LO_CNT__VALUE); + /* Center the data latch timing for extra safety */ + acc_clks = (acc_clks + rdwr_en_lo + + DIV_ROUND_UP(timings->tRHOH_min, t_x)) / 2; + acc_clks = min_t(int, acc_clks, ACC_CLKS__VALUE); + + tmp = ioread32(denali->reg + ACC_CLKS); + tmp &= ~ACC_CLKS__VALUE; + tmp |= FIELD_PREP(ACC_CLKS__VALUE, acc_clks); + sel->acc_clks = tmp; + tmp = ioread32(denali->reg + RDWR_EN_LO_CNT); tmp &= ~RDWR_EN_LO_CNT__VALUE; tmp |= FIELD_PREP(RDWR_EN_LO_CNT__VALUE, rdwr_en_lo); @@ -1203,7 +1226,7 @@ int denali_chip_init(struct denali_controller *denali, mtd->name = "denali-nand"; if (denali->dma_avail) { - chip->options |= NAND_USE_BOUNCE_BUFFER; + chip->options |= NAND_USES_DMA; chip->buf_align = 16; } @@ -1336,10 +1359,17 @@ EXPORT_SYMBOL(denali_init); void denali_remove(struct denali_controller *denali) { - struct denali_chip *dchip; + struct denali_chip *dchip, *tmp; + struct nand_chip *chip; + int ret; - list_for_each_entry(dchip, &denali->chips, node) - nand_release(&dchip->chip); + list_for_each_entry_safe(dchip, tmp, &denali->chips, node) { + chip = &dchip->chip; + ret = mtd_device_unregister(nand_to_mtd(chip)); + WARN_ON(ret); + nand_cleanup(chip); + list_del(&dchip->node); + } denali_disable_irq(denali); } diff --git a/drivers/mtd/nand/raw/diskonchip.c b/drivers/mtd/nand/raw/diskonchip.c index c2a391ad2c35..43721863a0d8 100644 --- a/drivers/mtd/nand/raw/diskonchip.c +++ b/drivers/mtd/nand/raw/diskonchip.c @@ -58,6 +58,7 @@ static unsigned long doc_locations[] __initdata = { static struct mtd_info *doclist = NULL; struct doc_priv { + struct nand_controller base; void __iomem *virtadr; unsigned long physadr; u_char ChipID; @@ -69,6 +70,7 @@ struct doc_priv { int mh1_page; struct rs_control *rs_decoder; struct mtd_info *nextdoc; + bool supports_32b_reads; /* Handle the last stage of initialization (BBT scan, partitioning) */ int (*late_init)(struct mtd_info *mtd); @@ -84,10 +86,6 @@ static u_char empty_write_ecc[6] = { 0x4b, 0x00, 0xe2, 0x0e, 0x93, 0xf7 }; #define DoC_is_Millennium(doc) ((doc)->ChipID == DOC_ChipID_DocMil) #define DoC_is_2000(doc) ((doc)->ChipID == DOC_ChipID_Doc2k) -static void doc200x_hwcontrol(struct nand_chip *this, int cmd, - unsigned int bitmask); -static void doc200x_select_chip(struct nand_chip *this, int chip); - static int debug = 0; module_param(debug, int, 0); @@ -302,20 +300,6 @@ static void doc2000_write_byte(struct nand_chip *this, u_char datum) WriteDOC(datum, docptr, 2k_CDSN_IO); } -static u_char doc2000_read_byte(struct nand_chip *this) -{ - struct doc_priv *doc = nand_get_controller_data(this); - void __iomem *docptr = doc->virtadr; - u_char ret; - - ReadDOC(docptr, CDSNSlowIO); - DoC_Delay(doc, 2); - ret = ReadDOC(docptr, 2k_CDSN_IO); - if (debug) - printk("read_byte returns %02x\n", ret); - return ret; -} - static void doc2000_writebuf(struct nand_chip *this, const u_char *buf, int len) { @@ -337,33 +321,42 @@ static void doc2000_readbuf(struct nand_chip *this, u_char *buf, int len) { struct doc_priv *doc = nand_get_controller_data(this); void __iomem *docptr = doc->virtadr; + u32 *buf32 = (u32 *)buf; int i; if (debug) printk("readbuf of %d bytes: ", len); - for (i = 0; i < len; i++) - buf[i] = ReadDOC(docptr, 2k_CDSN_IO + i); + if (!doc->supports_32b_reads || + ((((unsigned long)buf) | len) & 3)) { + for (i = 0; i < len; i++) + buf[i] = ReadDOC(docptr, 2k_CDSN_IO + i); + } else { + for (i = 0; i < len / 4; i++) + buf32[i] = readl(docptr + DoC_2k_CDSN_IO + i); + } } -static void doc2000_readbuf_dword(struct nand_chip *this, u_char *buf, int len) +/* + * We need our own readid() here because it's called before the NAND chip + * has been initialized, and calling nand_op_readid() would lead to a NULL + * pointer exception when dereferencing the NAND timings. + */ +static void doc200x_readid(struct nand_chip *this, unsigned int cs, u8 *id) { - struct doc_priv *doc = nand_get_controller_data(this); - void __iomem *docptr = doc->virtadr; - int i; + u8 addr = 0; + struct nand_op_instr instrs[] = { + NAND_OP_CMD(NAND_CMD_READID, 0), + NAND_OP_ADDR(1, &addr, 50), + NAND_OP_8BIT_DATA_IN(2, id, 0), + }; - if (debug) - printk("readbuf_dword of %d bytes: ", len); + struct nand_operation op = NAND_OPERATION(cs, instrs); - if (unlikely((((unsigned long)buf) | len) & 3)) { - for (i = 0; i < len; i++) { - *(uint8_t *) (&buf[i]) = ReadDOC(docptr, 2k_CDSN_IO + i); - } - } else { - for (i = 0; i < len; i += 4) { - *(uint32_t *) (&buf[i]) = readl(docptr + DoC_2k_CDSN_IO + i); - } - } + if (!id) + op.ninstrs--; + + this->controller->ops->exec_op(this, &op, false); } static uint16_t __init doc200x_ident_chip(struct mtd_info *mtd, int nr) @@ -371,20 +364,11 @@ static uint16_t __init doc200x_ident_chip(struct mtd_info *mtd, int nr) struct nand_chip *this = mtd_to_nand(mtd); struct doc_priv *doc = nand_get_controller_data(this); uint16_t ret; + u8 id[2]; - doc200x_select_chip(this, nr); - doc200x_hwcontrol(this, NAND_CMD_READID, - NAND_CTRL_CLE | NAND_CTRL_CHANGE); - doc200x_hwcontrol(this, 0, NAND_CTRL_ALE | NAND_CTRL_CHANGE); - doc200x_hwcontrol(this, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE); + doc200x_readid(this, nr, id); - /* We can't use dev_ready here, but at least we wait for the - * command to complete - */ - udelay(50); - - ret = this->legacy.read_byte(this) << 8; - ret |= this->legacy.read_byte(this); + ret = ((u16)id[0] << 8) | id[1]; if (doc->ChipID == DOC_ChipID_Doc2k && try_dword && !nr) { /* First chip probe. See if we get same results by 32-bit access */ @@ -394,18 +378,12 @@ static uint16_t __init doc200x_ident_chip(struct mtd_info *mtd, int nr) } ident; void __iomem *docptr = doc->virtadr; - doc200x_hwcontrol(this, NAND_CMD_READID, - NAND_CTRL_CLE | NAND_CTRL_CHANGE); - doc200x_hwcontrol(this, 0, NAND_CTRL_ALE | NAND_CTRL_CHANGE); - doc200x_hwcontrol(this, NAND_CMD_NONE, - NAND_NCE | NAND_CTRL_CHANGE); - - udelay(50); + doc200x_readid(this, nr, NULL); ident.dword = readl(docptr + DoC_2k_CDSN_IO); if (((ident.byte[0] << 8) | ident.byte[1]) == ret) { pr_info("DiskOnChip 2000 responds to DWORD access\n"); - this->legacy.read_buf = &doc2000_readbuf_dword; + doc->supports_32b_reads = true; } } @@ -434,20 +412,6 @@ static void __init doc2000_count_chips(struct mtd_info *mtd) pr_debug("Detected %d chips per floor.\n", i); } -static int doc200x_wait(struct nand_chip *this) -{ - struct doc_priv *doc = nand_get_controller_data(this); - - int status; - - DoC_WaitReady(doc); - nand_status_op(this, NULL); - DoC_WaitReady(doc); - status = (int)this->legacy.read_byte(this); - - return status; -} - static void doc2001_write_byte(struct nand_chip *this, u_char datum) { struct doc_priv *doc = nand_get_controller_data(this); @@ -458,19 +422,6 @@ static void doc2001_write_byte(struct nand_chip *this, u_char datum) WriteDOC(datum, docptr, WritePipeTerm); } -static u_char doc2001_read_byte(struct nand_chip *this) -{ - struct doc_priv *doc = nand_get_controller_data(this); - void __iomem *docptr = doc->virtadr; - - //ReadDOC(docptr, CDSNSlowIO); - /* 11.4.5 -- delay twice to allow extended length cycle */ - DoC_Delay(doc, 2); - ReadDOC(docptr, ReadPipeInit); - //return ReadDOC(docptr, Mil_CDSN_IO); - return ReadDOC(docptr, LastDataRead); -} - static void doc2001_writebuf(struct nand_chip *this, const u_char *buf, int len) { struct doc_priv *doc = nand_get_controller_data(this); @@ -499,20 +450,6 @@ static void doc2001_readbuf(struct nand_chip *this, u_char *buf, int len) buf[i] = ReadDOC(docptr, LastDataRead); } -static u_char doc2001plus_read_byte(struct nand_chip *this) -{ - struct doc_priv *doc = nand_get_controller_data(this); - void __iomem *docptr = doc->virtadr; - u_char ret; - - ReadDOC(docptr, Mplus_ReadPipeInit); - ReadDOC(docptr, Mplus_ReadPipeInit); - ret = ReadDOC(docptr, Mplus_LastDataRead); - if (debug) - printk("read_byte returns %02x\n", ret); - return ret; -} - static void doc2001plus_writebuf(struct nand_chip *this, const u_char *buf, int len) { struct doc_priv *doc = nand_get_controller_data(this); @@ -550,9 +487,12 @@ static void doc2001plus_readbuf(struct nand_chip *this, u_char *buf, int len) } /* Terminate read pipeline */ - buf[len - 2] = ReadDOC(docptr, Mplus_LastDataRead); - if (debug && i < 16) - printk("%02x ", buf[len - 2]); + if (len >= 2) { + buf[len - 2] = ReadDOC(docptr, Mplus_LastDataRead); + if (debug && i < 16) + printk("%02x ", buf[len - 2]); + } + buf[len - 1] = ReadDOC(docptr, Mplus_LastDataRead); if (debug && i < 16) printk("%02x ", buf[len - 1]); @@ -560,226 +500,163 @@ static void doc2001plus_readbuf(struct nand_chip *this, u_char *buf, int len) printk("\n"); } -static void doc2001plus_select_chip(struct nand_chip *this, int chip) +static void doc200x_write_control(struct doc_priv *doc, u8 value) +{ + WriteDOC(value, doc->virtadr, CDSNControl); + /* 11.4.3 -- 4 NOPs after CSDNControl write */ + DoC_Delay(doc, 4); +} + +static void doc200x_exec_instr(struct nand_chip *this, + const struct nand_op_instr *instr) { struct doc_priv *doc = nand_get_controller_data(this); - void __iomem *docptr = doc->virtadr; - int floor = 0; + unsigned int i; - if (debug) - printk("select chip (%d)\n", chip); + switch (instr->type) { + case NAND_OP_CMD_INSTR: + doc200x_write_control(doc, CDSN_CTRL_CE | CDSN_CTRL_CLE); + doc2000_write_byte(this, instr->ctx.cmd.opcode); + break; - if (chip == -1) { - /* Disable flash internally */ - WriteDOC(0, docptr, Mplus_FlashSelect); - return; + case NAND_OP_ADDR_INSTR: + doc200x_write_control(doc, CDSN_CTRL_CE | CDSN_CTRL_ALE); + for (i = 0; i < instr->ctx.addr.naddrs; i++) { + u8 addr = instr->ctx.addr.addrs[i]; + + if (DoC_is_2000(doc)) + doc2000_write_byte(this, addr); + else + doc2001_write_byte(this, addr); + } + break; + + case NAND_OP_DATA_IN_INSTR: + doc200x_write_control(doc, CDSN_CTRL_CE); + if (DoC_is_2000(doc)) + doc2000_readbuf(this, instr->ctx.data.buf.in, + instr->ctx.data.len); + else + doc2001_readbuf(this, instr->ctx.data.buf.in, + instr->ctx.data.len); + break; + + case NAND_OP_DATA_OUT_INSTR: + doc200x_write_control(doc, CDSN_CTRL_CE); + if (DoC_is_2000(doc)) + doc2000_writebuf(this, instr->ctx.data.buf.out, + instr->ctx.data.len); + else + doc2001_writebuf(this, instr->ctx.data.buf.out, + instr->ctx.data.len); + break; + + case NAND_OP_WAITRDY_INSTR: + DoC_WaitReady(doc); + break; } - floor = chip / doc->chips_per_floor; - chip -= (floor * doc->chips_per_floor); + if (instr->delay_ns) + ndelay(instr->delay_ns); +} + +static int doc200x_exec_op(struct nand_chip *this, + const struct nand_operation *op, + bool check_only) +{ + struct doc_priv *doc = nand_get_controller_data(this); + unsigned int i; + + if (check_only) + return true; + + doc->curchip = op->cs % doc->chips_per_floor; + doc->curfloor = op->cs / doc->chips_per_floor; + + WriteDOC(doc->curfloor, doc->virtadr, FloorSelect); + WriteDOC(doc->curchip, doc->virtadr, CDSNDeviceSelect); + + /* Assert CE pin */ + doc200x_write_control(doc, CDSN_CTRL_CE); + + for (i = 0; i < op->ninstrs; i++) + doc200x_exec_instr(this, &op->instrs[i]); + + /* De-assert CE pin */ + doc200x_write_control(doc, 0); + + return 0; +} + +static void doc2001plus_write_pipe_term(struct doc_priv *doc) +{ + WriteDOC(0x00, doc->virtadr, Mplus_WritePipeTerm); + WriteDOC(0x00, doc->virtadr, Mplus_WritePipeTerm); +} + +static void doc2001plus_exec_instr(struct nand_chip *this, + const struct nand_op_instr *instr) +{ + struct doc_priv *doc = nand_get_controller_data(this); + unsigned int i; + + switch (instr->type) { + case NAND_OP_CMD_INSTR: + WriteDOC(instr->ctx.cmd.opcode, doc->virtadr, Mplus_FlashCmd); + doc2001plus_write_pipe_term(doc); + break; + + case NAND_OP_ADDR_INSTR: + for (i = 0; i < instr->ctx.addr.naddrs; i++) { + u8 addr = instr->ctx.addr.addrs[i]; + + WriteDOC(addr, doc->virtadr, Mplus_FlashAddress); + } + doc2001plus_write_pipe_term(doc); + /* deassert ALE */ + WriteDOC(0, doc->virtadr, Mplus_FlashControl); + break; + + case NAND_OP_DATA_IN_INSTR: + doc2001plus_readbuf(this, instr->ctx.data.buf.in, + instr->ctx.data.len); + break; + case NAND_OP_DATA_OUT_INSTR: + doc2001plus_writebuf(this, instr->ctx.data.buf.out, + instr->ctx.data.len); + doc2001plus_write_pipe_term(doc); + break; + case NAND_OP_WAITRDY_INSTR: + DoC_WaitReady(doc); + break; + } + + if (instr->delay_ns) + ndelay(instr->delay_ns); +} + +static int doc2001plus_exec_op(struct nand_chip *this, + const struct nand_operation *op, + bool check_only) +{ + struct doc_priv *doc = nand_get_controller_data(this); + unsigned int i; + + if (check_only) + return true; + + doc->curchip = op->cs % doc->chips_per_floor; + doc->curfloor = op->cs / doc->chips_per_floor; /* Assert ChipEnable and deassert WriteProtect */ - WriteDOC((DOC_FLASH_CE), docptr, Mplus_FlashSelect); - nand_reset_op(this); + WriteDOC(DOC_FLASH_CE, doc->virtadr, Mplus_FlashSelect); - doc->curchip = chip; - doc->curfloor = floor; -} + for (i = 0; i < op->ninstrs; i++) + doc2001plus_exec_instr(this, &op->instrs[i]); -static void doc200x_select_chip(struct nand_chip *this, int chip) -{ - struct doc_priv *doc = nand_get_controller_data(this); - void __iomem *docptr = doc->virtadr; - int floor = 0; + /* De-assert ChipEnable */ + WriteDOC(0, doc->virtadr, Mplus_FlashSelect); - if (debug) - printk("select chip (%d)\n", chip); - - if (chip == -1) - return; - - floor = chip / doc->chips_per_floor; - chip -= (floor * doc->chips_per_floor); - - /* 11.4.4 -- deassert CE before changing chip */ - doc200x_hwcontrol(this, NAND_CMD_NONE, 0 | NAND_CTRL_CHANGE); - - WriteDOC(floor, docptr, FloorSelect); - WriteDOC(chip, docptr, CDSNDeviceSelect); - - doc200x_hwcontrol(this, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE); - - doc->curchip = chip; - doc->curfloor = floor; -} - -#define CDSN_CTRL_MSK (CDSN_CTRL_CE | CDSN_CTRL_CLE | CDSN_CTRL_ALE) - -static void doc200x_hwcontrol(struct nand_chip *this, int cmd, - unsigned int ctrl) -{ - struct doc_priv *doc = nand_get_controller_data(this); - void __iomem *docptr = doc->virtadr; - - if (ctrl & NAND_CTRL_CHANGE) { - doc->CDSNControl &= ~CDSN_CTRL_MSK; - doc->CDSNControl |= ctrl & CDSN_CTRL_MSK; - if (debug) - printk("hwcontrol(%d): %02x\n", cmd, doc->CDSNControl); - WriteDOC(doc->CDSNControl, docptr, CDSNControl); - /* 11.4.3 -- 4 NOPs after CSDNControl write */ - DoC_Delay(doc, 4); - } - if (cmd != NAND_CMD_NONE) { - if (DoC_is_2000(doc)) - doc2000_write_byte(this, cmd); - else - doc2001_write_byte(this, cmd); - } -} - -static void doc2001plus_command(struct nand_chip *this, unsigned command, - int column, int page_addr) -{ - struct mtd_info *mtd = nand_to_mtd(this); - struct doc_priv *doc = nand_get_controller_data(this); - void __iomem *docptr = doc->virtadr; - - /* - * Must terminate write pipeline before sending any commands - * to the device. - */ - if (command == NAND_CMD_PAGEPROG) { - WriteDOC(0x00, docptr, Mplus_WritePipeTerm); - WriteDOC(0x00, docptr, Mplus_WritePipeTerm); - } - - /* - * Write out the command to the device. - */ - if (command == NAND_CMD_SEQIN) { - int readcmd; - - if (column >= mtd->writesize) { - /* OOB area */ - column -= mtd->writesize; - readcmd = NAND_CMD_READOOB; - } else if (column < 256) { - /* First 256 bytes --> READ0 */ - readcmd = NAND_CMD_READ0; - } else { - column -= 256; - readcmd = NAND_CMD_READ1; - } - WriteDOC(readcmd, docptr, Mplus_FlashCmd); - } - WriteDOC(command, docptr, Mplus_FlashCmd); - WriteDOC(0, docptr, Mplus_WritePipeTerm); - WriteDOC(0, docptr, Mplus_WritePipeTerm); - - if (column != -1 || page_addr != -1) { - /* Serially input address */ - if (column != -1) { - /* Adjust columns for 16 bit buswidth */ - if (this->options & NAND_BUSWIDTH_16 && - !nand_opcode_8bits(command)) - column >>= 1; - WriteDOC(column, docptr, Mplus_FlashAddress); - } - if (page_addr != -1) { - WriteDOC((unsigned char)(page_addr & 0xff), docptr, Mplus_FlashAddress); - WriteDOC((unsigned char)((page_addr >> 8) & 0xff), docptr, Mplus_FlashAddress); - if (this->options & NAND_ROW_ADDR_3) { - WriteDOC((unsigned char)((page_addr >> 16) & 0x0f), docptr, Mplus_FlashAddress); - printk("high density\n"); - } - } - WriteDOC(0, docptr, Mplus_WritePipeTerm); - WriteDOC(0, docptr, Mplus_WritePipeTerm); - /* deassert ALE */ - if (command == NAND_CMD_READ0 || command == NAND_CMD_READ1 || - command == NAND_CMD_READOOB || command == NAND_CMD_READID) - WriteDOC(0, docptr, Mplus_FlashControl); - } - - /* - * program and erase have their own busy handlers - * status and sequential in needs no delay - */ - switch (command) { - - case NAND_CMD_PAGEPROG: - case NAND_CMD_ERASE1: - case NAND_CMD_ERASE2: - case NAND_CMD_SEQIN: - case NAND_CMD_STATUS: - return; - - case NAND_CMD_RESET: - if (this->legacy.dev_ready) - break; - udelay(this->legacy.chip_delay); - WriteDOC(NAND_CMD_STATUS, docptr, Mplus_FlashCmd); - WriteDOC(0, docptr, Mplus_WritePipeTerm); - WriteDOC(0, docptr, Mplus_WritePipeTerm); - while (!(this->legacy.read_byte(this) & 0x40)) ; - return; - - /* This applies to read commands */ - default: - /* - * If we don't have access to the busy pin, we apply the given - * command delay - */ - if (!this->legacy.dev_ready) { - udelay(this->legacy.chip_delay); - return; - } - } - - /* Apply this short delay always to ensure that we do wait tWB in - * any case on any machine. */ - ndelay(100); - /* wait until command is processed */ - while (!this->legacy.dev_ready(this)) ; -} - -static int doc200x_dev_ready(struct nand_chip *this) -{ - struct doc_priv *doc = nand_get_controller_data(this); - void __iomem *docptr = doc->virtadr; - - if (DoC_is_MillenniumPlus(doc)) { - /* 11.4.2 -- must NOP four times before checking FR/B# */ - DoC_Delay(doc, 4); - if ((ReadDOC(docptr, Mplus_FlashControl) & CDSN_CTRL_FR_B_MASK) != CDSN_CTRL_FR_B_MASK) { - if (debug) - printk("not ready\n"); - return 0; - } - if (debug) - printk("was ready\n"); - return 1; - } else { - /* 11.4.2 -- must NOP four times before checking FR/B# */ - DoC_Delay(doc, 4); - if (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B)) { - if (debug) - printk("not ready\n"); - return 0; - } - /* 11.4.2 -- Must NOP twice if it's ready */ - DoC_Delay(doc, 2); - if (debug) - printk("was ready\n"); - return 1; - } -} - -static int doc200x_block_bad(struct nand_chip *this, loff_t ofs) -{ - /* This is our last resort if we couldn't find or create a BBT. Just - pretend all blocks are good. */ return 0; } @@ -1344,9 +1221,6 @@ static inline int __init doc2000_init(struct mtd_info *mtd) struct nand_chip *this = mtd_to_nand(mtd); struct doc_priv *doc = nand_get_controller_data(this); - this->legacy.read_byte = doc2000_read_byte; - this->legacy.write_buf = doc2000_writebuf; - this->legacy.read_buf = doc2000_readbuf; doc->late_init = nftl_scan_bbt; doc->CDSNControl = CDSN_CTRL_FLASH_IO | CDSN_CTRL_ECC_IO; @@ -1360,10 +1234,6 @@ static inline int __init doc2001_init(struct mtd_info *mtd) struct nand_chip *this = mtd_to_nand(mtd); struct doc_priv *doc = nand_get_controller_data(this); - this->legacy.read_byte = doc2001_read_byte; - this->legacy.write_buf = doc2001_writebuf; - this->legacy.read_buf = doc2001_readbuf; - ReadDOC(doc->virtadr, ChipID); ReadDOC(doc->virtadr, ChipID); ReadDOC(doc->virtadr, ChipID); @@ -1390,13 +1260,7 @@ static inline int __init doc2001plus_init(struct mtd_info *mtd) struct nand_chip *this = mtd_to_nand(mtd); struct doc_priv *doc = nand_get_controller_data(this); - this->legacy.read_byte = doc2001plus_read_byte; - this->legacy.write_buf = doc2001plus_writebuf; - this->legacy.read_buf = doc2001plus_readbuf; doc->late_init = inftl_scan_bbt; - this->legacy.cmd_ctrl = NULL; - this->legacy.select_chip = doc2001plus_select_chip; - this->legacy.cmdfunc = doc2001plus_command; this->ecc.hwctl = doc2001plus_enable_hwecc; doc->chips_per_floor = 1; @@ -1405,6 +1269,14 @@ static inline int __init doc2001plus_init(struct mtd_info *mtd) return 1; } +static const struct nand_controller_ops doc200x_ops = { + .exec_op = doc200x_exec_op, +}; + +static const struct nand_controller_ops doc2001plus_ops = { + .exec_op = doc2001plus_exec_op, +}; + static int __init doc_probe(unsigned long physadr) { struct nand_chip *nand = NULL; @@ -1548,7 +1420,6 @@ static int __init doc_probe(unsigned long physadr) goto fail; } - /* * Allocate a RS codec instance * @@ -1566,6 +1437,12 @@ static int __init doc_probe(unsigned long physadr) goto fail; } + nand_controller_init(&doc->base); + if (ChipID == DOC_ChipID_DocMilPlus16) + doc->base.ops = &doc2001plus_ops; + else + doc->base.ops = &doc200x_ops; + mtd = nand_to_mtd(nand); nand->bbt_td = (struct nand_bbt_descr *) (doc + 1); nand->bbt_md = nand->bbt_td + 1; @@ -1573,12 +1450,8 @@ static int __init doc_probe(unsigned long physadr) mtd->owner = THIS_MODULE; mtd_set_ooblayout(mtd, &doc200x_ooblayout_ops); + nand->controller = &doc->base; nand_set_controller_data(nand, doc); - nand->legacy.select_chip = doc200x_select_chip; - nand->legacy.cmd_ctrl = doc200x_hwcontrol; - nand->legacy.dev_ready = doc200x_dev_ready; - nand->legacy.waitfunc = doc200x_wait; - nand->legacy.block_bad = doc200x_block_bad; nand->ecc.hwctl = doc200x_enable_hwecc; nand->ecc.calculate = doc200x_calculate_ecc; nand->ecc.correct = doc200x_correct_data; @@ -1590,7 +1463,7 @@ static int __init doc_probe(unsigned long physadr) nand->ecc.options = NAND_ECC_GENERIC_ERASED_CHECK; nand->bbt_options = NAND_BBT_USE_FLASH; /* Skip the automatic BBT scan so we can run it manually */ - nand->options |= NAND_SKIP_BBTSCAN; + nand->options |= NAND_SKIP_BBTSCAN | NAND_NO_BBM_QUIRK; doc->physadr = physadr; doc->virtadr = virtadr; @@ -1609,13 +1482,10 @@ static int __init doc_probe(unsigned long physadr) numchips = doc2001_init(mtd); if ((ret = nand_scan(nand, numchips)) || (ret = doc->late_init(mtd))) { - /* DBB note: i believe nand_release is necessary here, as + /* DBB note: i believe nand_cleanup is necessary here, as buffers may have been allocated in nand_base. Check with Thomas. FIX ME! */ - /* nand_release will call mtd_device_unregister, but we - haven't yet added it. This is handled without incident by - mtd_device_unregister, as far as I can tell. */ - nand_release(nand); + nand_cleanup(nand); goto fail; } @@ -1644,13 +1514,16 @@ static void release_nanddoc(void) struct mtd_info *mtd, *nextmtd; struct nand_chip *nand; struct doc_priv *doc; + int ret; for (mtd = doclist; mtd; mtd = nextmtd) { nand = mtd_to_nand(mtd); doc = nand_get_controller_data(nand); nextmtd = doc->nextdoc; - nand_release(nand); + ret = mtd_device_unregister(mtd); + WARN_ON(ret); + nand_cleanup(nand); iounmap(doc->virtadr); release_mem_region(doc->physadr, DOC_IOREMAP_LEN); free_rs(doc->rs_decoder); diff --git a/drivers/mtd/nand/raw/fsl_elbc_nand.c b/drivers/mtd/nand/raw/fsl_elbc_nand.c index e1dc675b12bb..088692b2e27a 100644 --- a/drivers/mtd/nand/raw/fsl_elbc_nand.c +++ b/drivers/mtd/nand/raw/fsl_elbc_nand.c @@ -956,8 +956,13 @@ static int fsl_elbc_nand_remove(struct platform_device *pdev) { struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = fsl_lbc_ctrl_dev->nand; struct fsl_elbc_mtd *priv = dev_get_drvdata(&pdev->dev); + struct nand_chip *chip = &priv->chip; + int ret; + + ret = mtd_device_unregister(nand_to_mtd(chip)); + WARN_ON(ret); + nand_cleanup(chip); - nand_release(&priv->chip); fsl_elbc_chip_remove(priv); mutex_lock(&fsl_elbc_nand_mutex); diff --git a/drivers/mtd/nand/raw/fsl_ifc_nand.c b/drivers/mtd/nand/raw/fsl_ifc_nand.c index 2af09edf405b..00ae7a910b03 100644 --- a/drivers/mtd/nand/raw/fsl_ifc_nand.c +++ b/drivers/mtd/nand/raw/fsl_ifc_nand.c @@ -1093,8 +1093,13 @@ static int fsl_ifc_nand_probe(struct platform_device *dev) static int fsl_ifc_nand_remove(struct platform_device *dev) { struct fsl_ifc_mtd *priv = dev_get_drvdata(&dev->dev); + struct nand_chip *chip = &priv->chip; + int ret; + + ret = mtd_device_unregister(nand_to_mtd(chip)); + WARN_ON(ret); + nand_cleanup(chip); - nand_release(&priv->chip); fsl_ifc_chip_remove(priv); mutex_lock(&fsl_ifc_nand_mutex); diff --git a/drivers/mtd/nand/raw/fsl_upm.c b/drivers/mtd/nand/raw/fsl_upm.c index f31fae3a4c68..627deb26db51 100644 --- a/drivers/mtd/nand/raw/fsl_upm.c +++ b/drivers/mtd/nand/raw/fsl_upm.c @@ -317,10 +317,13 @@ static int fun_probe(struct platform_device *ofdev) static int fun_remove(struct platform_device *ofdev) { struct fsl_upm_nand *fun = dev_get_drvdata(&ofdev->dev); - struct mtd_info *mtd = nand_to_mtd(&fun->chip); - int i; + struct nand_chip *chip = &fun->chip; + struct mtd_info *mtd = nand_to_mtd(chip); + int ret, i; - nand_release(&fun->chip); + ret = mtd_device_unregister(mtd); + WARN_ON(ret); + nand_cleanup(chip); kfree(mtd->name); for (i = 0; i < fun->mchip_count; i++) { diff --git a/drivers/mtd/nand/raw/fsmc_nand.c b/drivers/mtd/nand/raw/fsmc_nand.c index a6964feeec77..3909752b14c5 100644 --- a/drivers/mtd/nand/raw/fsmc_nand.c +++ b/drivers/mtd/nand/raw/fsmc_nand.c @@ -608,6 +608,9 @@ static int fsmc_exec_op(struct nand_chip *chip, const struct nand_operation *op, unsigned int op_id; int i; + if (check_only) + return 0; + pr_debug("Executing operation [%d instructions]:\n", op->ninstrs); for (op_id = 0; op_id < op->ninstrs; op_id++) { @@ -691,7 +694,7 @@ static int fsmc_read_page_hwecc(struct nand_chip *chip, u8 *buf, for (i = 0, s = 0; s < eccsteps; s++, i += eccbytes, p += eccsize) { nand_read_page_op(chip, page, s * eccsize, NULL, 0); chip->ecc.hwctl(chip, NAND_ECC_READ); - ret = nand_read_data_op(chip, p, eccsize, false); + ret = nand_read_data_op(chip, p, eccsize, false, false); if (ret) return ret; @@ -809,11 +812,12 @@ static int fsmc_bch8_correct_data(struct nand_chip *chip, u8 *dat, i = 0; while (num_err--) { - change_bit(0, (unsigned long *)&err_idx[i]); - change_bit(1, (unsigned long *)&err_idx[i]); + err_idx[i] ^= 3; if (err_idx[i] < chip->ecc.size * 8) { - change_bit(err_idx[i], (unsigned long *)dat); + int err = err_idx[i]; + + dat[err >> 3] ^= BIT(err & 7); i++; } } @@ -1132,7 +1136,12 @@ static int fsmc_nand_remove(struct platform_device *pdev) struct fsmc_nand_data *host = platform_get_drvdata(pdev); if (host) { - nand_release(&host->nand); + struct nand_chip *chip = &host->nand; + int ret; + + ret = mtd_device_unregister(nand_to_mtd(chip)); + WARN_ON(ret); + nand_cleanup(chip); fsmc_nand_disable(host); if (host->mode == USE_DMA_ACCESS) { diff --git a/drivers/mtd/nand/raw/gpio.c b/drivers/mtd/nand/raw/gpio.c index f6b12354024f..938077e5c6a9 100644 --- a/drivers/mtd/nand/raw/gpio.c +++ b/drivers/mtd/nand/raw/gpio.c @@ -190,8 +190,12 @@ gpio_nand_get_io_sync(struct platform_device *pdev) static int gpio_nand_remove(struct platform_device *pdev) { struct gpiomtd *gpiomtd = platform_get_drvdata(pdev); + struct nand_chip *chip = &gpiomtd->nand_chip; + int ret; - nand_release(&gpiomtd->nand_chip); + ret = mtd_device_unregister(nand_to_mtd(chip)); + WARN_ON(ret); + nand_cleanup(chip); /* Enable write protection and disable the chip */ if (gpiomtd->nwp && !IS_ERR(gpiomtd->nwp)) diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c index 53b00c841aec..061a8ddda275 100644 --- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c +++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c @@ -540,8 +540,10 @@ static int bch_set_geometry(struct gpmi_nand_data *this) return ret; ret = pm_runtime_get_sync(this->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_autosuspend(this->dev); return ret; + } /* * Due to erratum #2847 of the MX23, the BCH cannot be soft reset on this @@ -834,158 +836,6 @@ static bool prepare_data_dma(struct gpmi_nand_data *this, const void *buf, return false; } -/** - * gpmi_copy_bits - copy bits from one memory region to another - * @dst: destination buffer - * @dst_bit_off: bit offset we're starting to write at - * @src: source buffer - * @src_bit_off: bit offset we're starting to read from - * @nbits: number of bits to copy - * - * This functions copies bits from one memory region to another, and is used by - * the GPMI driver to copy ECC sections which are not guaranteed to be byte - * aligned. - * - * src and dst should not overlap. - * - */ -static void gpmi_copy_bits(u8 *dst, size_t dst_bit_off, const u8 *src, - size_t src_bit_off, size_t nbits) -{ - size_t i; - size_t nbytes; - u32 src_buffer = 0; - size_t bits_in_src_buffer = 0; - - if (!nbits) - return; - - /* - * Move src and dst pointers to the closest byte pointer and store bit - * offsets within a byte. - */ - src += src_bit_off / 8; - src_bit_off %= 8; - - dst += dst_bit_off / 8; - dst_bit_off %= 8; - - /* - * Initialize the src_buffer value with bits available in the first - * byte of data so that we end up with a byte aligned src pointer. - */ - if (src_bit_off) { - src_buffer = src[0] >> src_bit_off; - if (nbits >= (8 - src_bit_off)) { - bits_in_src_buffer += 8 - src_bit_off; - } else { - src_buffer &= GENMASK(nbits - 1, 0); - bits_in_src_buffer += nbits; - } - nbits -= bits_in_src_buffer; - src++; - } - - /* Calculate the number of bytes that can be copied from src to dst. */ - nbytes = nbits / 8; - - /* Try to align dst to a byte boundary. */ - if (dst_bit_off) { - if (bits_in_src_buffer < (8 - dst_bit_off) && nbytes) { - src_buffer |= src[0] << bits_in_src_buffer; - bits_in_src_buffer += 8; - src++; - nbytes--; - } - - if (bits_in_src_buffer >= (8 - dst_bit_off)) { - dst[0] &= GENMASK(dst_bit_off - 1, 0); - dst[0] |= src_buffer << dst_bit_off; - src_buffer >>= (8 - dst_bit_off); - bits_in_src_buffer -= (8 - dst_bit_off); - dst_bit_off = 0; - dst++; - if (bits_in_src_buffer > 7) { - bits_in_src_buffer -= 8; - dst[0] = src_buffer; - dst++; - src_buffer >>= 8; - } - } - } - - if (!bits_in_src_buffer && !dst_bit_off) { - /* - * Both src and dst pointers are byte aligned, thus we can - * just use the optimized memcpy function. - */ - if (nbytes) - memcpy(dst, src, nbytes); - } else { - /* - * src buffer is not byte aligned, hence we have to copy each - * src byte to the src_buffer variable before extracting a byte - * to store in dst. - */ - for (i = 0; i < nbytes; i++) { - src_buffer |= src[i] << bits_in_src_buffer; - dst[i] = src_buffer; - src_buffer >>= 8; - } - } - /* Update dst and src pointers */ - dst += nbytes; - src += nbytes; - - /* - * nbits is the number of remaining bits. It should not exceed 8 as - * we've already copied as much bytes as possible. - */ - nbits %= 8; - - /* - * If there's no more bits to copy to the destination and src buffer - * was already byte aligned, then we're done. - */ - if (!nbits && !bits_in_src_buffer) - return; - - /* Copy the remaining bits to src_buffer */ - if (nbits) - src_buffer |= (*src & GENMASK(nbits - 1, 0)) << - bits_in_src_buffer; - bits_in_src_buffer += nbits; - - /* - * In case there were not enough bits to get a byte aligned dst buffer - * prepare the src_buffer variable to match the dst organization (shift - * src_buffer by dst_bit_off and retrieve the least significant bits - * from dst). - */ - if (dst_bit_off) - src_buffer = (src_buffer << dst_bit_off) | - (*dst & GENMASK(dst_bit_off - 1, 0)); - bits_in_src_buffer += dst_bit_off; - - /* - * Keep most significant bits from dst if we end up with an unaligned - * number of bits. - */ - nbytes = bits_in_src_buffer / 8; - if (bits_in_src_buffer % 8) { - src_buffer |= (dst[nbytes] & - GENMASK(7, bits_in_src_buffer % 8)) << - (nbytes * 8); - nbytes++; - } - - /* Copy the remaining bytes to dst */ - for (i = 0; i < nbytes; i++) { - dst[i] = src_buffer; - src_buffer >>= 8; - } -} - /* add our owner bbt descriptor */ static uint8_t scan_ff_pattern[] = { 0xff }; static struct nand_bbt_descr gpmi_bbt_descr = { @@ -1713,7 +1563,7 @@ static int gpmi_ecc_write_oob(struct nand_chip *chip, int page) * inline (interleaved with payload DATA), and do not align data chunk on * byte boundaries. * We thus need to take care moving the payload data and ECC bits stored in the - * page into the provided buffers, which is why we're using gpmi_copy_bits. + * page into the provided buffers, which is why we're using nand_extract_bits(). * * See set_geometry_by_ecc_info inline comments to have a full description * of the layout used by the GPMI controller. @@ -1762,9 +1612,8 @@ static int gpmi_ecc_read_page_raw(struct nand_chip *chip, uint8_t *buf, /* Extract interleaved payload data and ECC bits */ for (step = 0; step < nfc_geo->ecc_chunk_count; step++) { if (buf) - gpmi_copy_bits(buf, step * eccsize * 8, - tmp_buf, src_bit_off, - eccsize * 8); + nand_extract_bits(buf, step * eccsize, tmp_buf, + src_bit_off, eccsize * 8); src_bit_off += eccsize * 8; /* Align last ECC block to align a byte boundary */ @@ -1773,9 +1622,8 @@ static int gpmi_ecc_read_page_raw(struct nand_chip *chip, uint8_t *buf, eccbits += 8 - ((oob_bit_off + eccbits) % 8); if (oob_required) - gpmi_copy_bits(oob, oob_bit_off, - tmp_buf, src_bit_off, - eccbits); + nand_extract_bits(oob, oob_bit_off, tmp_buf, + src_bit_off, eccbits); src_bit_off += eccbits; oob_bit_off += eccbits; @@ -1800,7 +1648,7 @@ static int gpmi_ecc_read_page_raw(struct nand_chip *chip, uint8_t *buf, * inline (interleaved with payload DATA), and do not align data chunk on * byte boundaries. * We thus need to take care moving the OOB area at the right place in the - * final page, which is why we're using gpmi_copy_bits. + * final page, which is why we're using nand_extract_bits(). * * See set_geometry_by_ecc_info inline comments to have a full description * of the layout used by the GPMI controller. @@ -1839,8 +1687,8 @@ static int gpmi_ecc_write_page_raw(struct nand_chip *chip, const uint8_t *buf, /* Interleave payload data and ECC bits */ for (step = 0; step < nfc_geo->ecc_chunk_count; step++) { if (buf) - gpmi_copy_bits(tmp_buf, dst_bit_off, - buf, step * eccsize * 8, eccsize * 8); + nand_extract_bits(tmp_buf, dst_bit_off, buf, + step * eccsize * 8, eccsize * 8); dst_bit_off += eccsize * 8; /* Align last ECC block to align a byte boundary */ @@ -1849,8 +1697,8 @@ static int gpmi_ecc_write_page_raw(struct nand_chip *chip, const uint8_t *buf, eccbits += 8 - ((oob_bit_off + eccbits) % 8); if (oob_required) - gpmi_copy_bits(tmp_buf, dst_bit_off, - oob, oob_bit_off, eccbits); + nand_extract_bits(tmp_buf, dst_bit_off, oob, + oob_bit_off, eccbits); dst_bit_off += eccbits; oob_bit_off += eccbits; @@ -2408,6 +2256,9 @@ static int gpmi_nfc_exec_op(struct nand_chip *chip, struct completion *completion; unsigned long to; + if (check_only) + return 0; + this->ntransfers = 0; for (i = 0; i < GPMI_MAX_TRANSFERS; i++) this->transfers[i].direction = DMA_NONE; @@ -2658,7 +2509,7 @@ static int gpmi_nand_probe(struct platform_device *pdev) ret = __gpmi_enable_clk(this, true); if (ret) - goto exit_nfc_init; + goto exit_acquire_resources; pm_runtime_set_autosuspend_delay(&pdev->dev, 500); pm_runtime_use_autosuspend(&pdev->dev); @@ -2693,11 +2544,15 @@ static int gpmi_nand_probe(struct platform_device *pdev) static int gpmi_nand_remove(struct platform_device *pdev) { struct gpmi_nand_data *this = platform_get_drvdata(pdev); + struct nand_chip *chip = &this->nand; + int ret; pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); - nand_release(&this->nand); + ret = mtd_device_unregister(nand_to_mtd(chip)); + WARN_ON(ret); + nand_cleanup(chip); gpmi_free_dma_buffer(this); release_resources(this); return 0; diff --git a/drivers/mtd/nand/raw/hisi504_nand.c b/drivers/mtd/nand/raw/hisi504_nand.c index 0b48be54ba6f..b84238e2268a 100644 --- a/drivers/mtd/nand/raw/hisi504_nand.c +++ b/drivers/mtd/nand/raw/hisi504_nand.c @@ -806,8 +806,12 @@ static int hisi_nfc_probe(struct platform_device *pdev) static int hisi_nfc_remove(struct platform_device *pdev) { struct hinfc_host *host = platform_get_drvdata(pdev); + struct nand_chip *chip = &host->chip; + int ret; - nand_release(&host->chip); + ret = mtd_device_unregister(nand_to_mtd(chip)); + WARN_ON(ret); + nand_cleanup(chip); return 0; } diff --git a/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c b/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c index 935c4902ada7..69423bb29adb 100644 --- a/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c +++ b/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c @@ -27,9 +27,6 @@ #define DRV_NAME "ingenic-nand" -/* Command delay when there is no R/B pin. */ -#define RB_DELAY_US 100 - struct jz_soc_info { unsigned long data_offset; unsigned long addr_offset; @@ -49,7 +46,6 @@ struct ingenic_nfc { struct nand_controller controller; unsigned int num_banks; struct list_head chips; - int selected; struct ingenic_nand_cs cs[]; }; @@ -102,7 +98,7 @@ static int qi_lb60_ooblayout_free(struct mtd_info *mtd, int section, return 0; } -const struct mtd_ooblayout_ops qi_lb60_ooblayout_ops = { +static const struct mtd_ooblayout_ops qi_lb60_ooblayout_ops = { .ecc = qi_lb60_ooblayout_ecc, .free = qi_lb60_ooblayout_free, }; @@ -142,51 +138,6 @@ static const struct mtd_ooblayout_ops jz4725b_ooblayout_ops = { .free = jz4725b_ooblayout_free, }; -static void ingenic_nand_select_chip(struct nand_chip *chip, int chipnr) -{ - struct ingenic_nand *nand = to_ingenic_nand(nand_to_mtd(chip)); - struct ingenic_nfc *nfc = to_ingenic_nfc(nand->chip.controller); - struct ingenic_nand_cs *cs; - - /* Ensure the currently selected chip is deasserted. */ - if (chipnr == -1 && nfc->selected >= 0) { - cs = &nfc->cs[nfc->selected]; - jz4780_nemc_assert(nfc->dev, cs->bank, false); - } - - nfc->selected = chipnr; -} - -static void ingenic_nand_cmd_ctrl(struct nand_chip *chip, int cmd, - unsigned int ctrl) -{ - struct ingenic_nand *nand = to_ingenic_nand(nand_to_mtd(chip)); - struct ingenic_nfc *nfc = to_ingenic_nfc(nand->chip.controller); - struct ingenic_nand_cs *cs; - - if (WARN_ON(nfc->selected < 0)) - return; - - cs = &nfc->cs[nfc->selected]; - - jz4780_nemc_assert(nfc->dev, cs->bank, ctrl & NAND_NCE); - - if (cmd == NAND_CMD_NONE) - return; - - if (ctrl & NAND_ALE) - writeb(cmd, cs->base + nfc->soc_info->addr_offset); - else if (ctrl & NAND_CLE) - writeb(cmd, cs->base + nfc->soc_info->cmd_offset); -} - -static int ingenic_nand_dev_ready(struct nand_chip *chip) -{ - struct ingenic_nand *nand = to_ingenic_nand(nand_to_mtd(chip)); - - return !gpiod_get_value_cansleep(nand->busy_gpio); -} - static void ingenic_nand_ecc_hwctl(struct nand_chip *chip, int mode) { struct ingenic_nand *nand = to_ingenic_nand(nand_to_mtd(chip)); @@ -298,8 +249,91 @@ static int ingenic_nand_attach_chip(struct nand_chip *chip) return 0; } +static int ingenic_nand_exec_instr(struct nand_chip *chip, + struct ingenic_nand_cs *cs, + const struct nand_op_instr *instr) +{ + struct ingenic_nand *nand = to_ingenic_nand(nand_to_mtd(chip)); + struct ingenic_nfc *nfc = to_ingenic_nfc(chip->controller); + unsigned int i; + + switch (instr->type) { + case NAND_OP_CMD_INSTR: + writeb(instr->ctx.cmd.opcode, + cs->base + nfc->soc_info->cmd_offset); + return 0; + case NAND_OP_ADDR_INSTR: + for (i = 0; i < instr->ctx.addr.naddrs; i++) + writeb(instr->ctx.addr.addrs[i], + cs->base + nfc->soc_info->addr_offset); + return 0; + case NAND_OP_DATA_IN_INSTR: + if (instr->ctx.data.force_8bit || + !(chip->options & NAND_BUSWIDTH_16)) + ioread8_rep(cs->base + nfc->soc_info->data_offset, + instr->ctx.data.buf.in, + instr->ctx.data.len); + else + ioread16_rep(cs->base + nfc->soc_info->data_offset, + instr->ctx.data.buf.in, + instr->ctx.data.len); + return 0; + case NAND_OP_DATA_OUT_INSTR: + if (instr->ctx.data.force_8bit || + !(chip->options & NAND_BUSWIDTH_16)) + iowrite8_rep(cs->base + nfc->soc_info->data_offset, + instr->ctx.data.buf.out, + instr->ctx.data.len); + else + iowrite16_rep(cs->base + nfc->soc_info->data_offset, + instr->ctx.data.buf.out, + instr->ctx.data.len); + return 0; + case NAND_OP_WAITRDY_INSTR: + if (!nand->busy_gpio) + return nand_soft_waitrdy(chip, + instr->ctx.waitrdy.timeout_ms); + + return nand_gpio_waitrdy(chip, nand->busy_gpio, + instr->ctx.waitrdy.timeout_ms); + default: + break; + } + + return -EINVAL; +} + +static int ingenic_nand_exec_op(struct nand_chip *chip, + const struct nand_operation *op, + bool check_only) +{ + struct ingenic_nand *nand = to_ingenic_nand(nand_to_mtd(chip)); + struct ingenic_nfc *nfc = to_ingenic_nfc(nand->chip.controller); + struct ingenic_nand_cs *cs; + unsigned int i; + int ret = 0; + + if (check_only) + return 0; + + cs = &nfc->cs[op->cs]; + jz4780_nemc_assert(nfc->dev, cs->bank, true); + for (i = 0; i < op->ninstrs; i++) { + ret = ingenic_nand_exec_instr(chip, cs, &op->instrs[i]); + if (ret) + break; + + if (op->instrs[i].delay_ns) + ndelay(op->instrs[i].delay_ns); + } + jz4780_nemc_assert(nfc->dev, cs->bank, false); + + return ret; +} + static const struct nand_controller_ops ingenic_nand_controller_ops = { .attach_chip = ingenic_nand_attach_chip, + .exec_op = ingenic_nand_exec_op, }; static int ingenic_nand_init_chip(struct platform_device *pdev, @@ -339,10 +373,20 @@ static int ingenic_nand_init_chip(struct platform_device *pdev, ret = PTR_ERR(nand->busy_gpio); dev_err(dev, "failed to request busy GPIO: %d\n", ret); return ret; - } else if (nand->busy_gpio) { - nand->chip.legacy.dev_ready = ingenic_nand_dev_ready; } + /* + * The rb-gpios semantics was undocumented and qi,lb60 (along with + * the ingenic driver) got it wrong. The active state encodes the + * NAND ready state, which is high level. Since there's no signal + * inverter on this board, it should be active-high. Let's fix that + * here for older DTs so we can re-use the generic nand_gpio_waitrdy() + * helper, and be consistent with what other drivers do. + */ + if (of_machine_is_compatible("qi,lb60") && + gpiod_is_active_low(nand->busy_gpio)) + gpiod_toggle_active_low(nand->busy_gpio); + nand->wp_gpio = devm_gpiod_get_optional(dev, "wp", GPIOD_OUT_LOW); if (IS_ERR(nand->wp_gpio)) { @@ -359,12 +403,7 @@ static int ingenic_nand_init_chip(struct platform_device *pdev, return -ENOMEM; mtd->dev.parent = dev; - chip->legacy.IO_ADDR_R = cs->base + nfc->soc_info->data_offset; - chip->legacy.IO_ADDR_W = cs->base + nfc->soc_info->data_offset; - chip->legacy.chip_delay = RB_DELAY_US; chip->options = NAND_NO_SUBPAGE_WRITE; - chip->legacy.select_chip = ingenic_nand_select_chip; - chip->legacy.cmd_ctrl = ingenic_nand_cmd_ctrl; chip->ecc.mode = NAND_ECC_HW; chip->controller = &nfc->controller; nand_set_flash_node(chip, np); @@ -376,7 +415,7 @@ static int ingenic_nand_init_chip(struct platform_device *pdev, ret = mtd_device_register(mtd, NULL, 0); if (ret) { - nand_release(chip); + nand_cleanup(chip); return ret; } @@ -387,13 +426,18 @@ static int ingenic_nand_init_chip(struct platform_device *pdev, static void ingenic_nand_cleanup_chips(struct ingenic_nfc *nfc) { - struct ingenic_nand *chip; + struct ingenic_nand *ingenic_chip; + struct nand_chip *chip; + int ret; while (!list_empty(&nfc->chips)) { - chip = list_first_entry(&nfc->chips, - struct ingenic_nand, chip_list); - nand_release(&chip->chip); - list_del(&chip->chip_list); + ingenic_chip = list_first_entry(&nfc->chips, + struct ingenic_nand, chip_list); + chip = &ingenic_chip->chip; + ret = mtd_device_unregister(nand_to_mtd(chip)); + WARN_ON(ret); + nand_cleanup(chip); + list_del(&ingenic_chip->chip_list); } } diff --git a/drivers/mtd/nand/raw/internals.h b/drivers/mtd/nand/raw/internals.h index 9d0caadf940e..03866b0aadea 100644 --- a/drivers/mtd/nand/raw/internals.h +++ b/drivers/mtd/nand/raw/internals.h @@ -75,6 +75,9 @@ extern const struct nand_manufacturer_ops micron_nand_manuf_ops; extern const struct nand_manufacturer_ops samsung_nand_manuf_ops; extern const struct nand_manufacturer_ops toshiba_nand_manuf_ops; +/* MLC pairing schemes */ +extern const struct mtd_pairing_scheme dist3_pairing_scheme; + /* Core functions */ const struct nand_manufacturer *nand_get_manufacturer(u8 id); int nand_bbm_get_next_page(struct nand_chip *chip, int page); @@ -106,6 +109,15 @@ static inline bool nand_has_exec_op(struct nand_chip *chip) return true; } +static inline int nand_check_op(struct nand_chip *chip, + const struct nand_operation *op) +{ + if (!nand_has_exec_op(chip)) + return 0; + + return chip->controller->ops->exec_op(chip, op, true); +} + static inline int nand_exec_op(struct nand_chip *chip, const struct nand_operation *op) { diff --git a/drivers/mtd/nand/raw/lpc32xx_mlc.c b/drivers/mtd/nand/raw/lpc32xx_mlc.c index 241b58b83240..7521038af2ef 100644 --- a/drivers/mtd/nand/raw/lpc32xx_mlc.c +++ b/drivers/mtd/nand/raw/lpc32xx_mlc.c @@ -826,8 +826,13 @@ static int lpc32xx_nand_probe(struct platform_device *pdev) static int lpc32xx_nand_remove(struct platform_device *pdev) { struct lpc32xx_nand_host *host = platform_get_drvdata(pdev); + struct nand_chip *chip = &host->nand_chip; + int ret; + + ret = mtd_device_unregister(nand_to_mtd(chip)); + WARN_ON(ret); + nand_cleanup(chip); - nand_release(&host->nand_chip); free_irq(host->irq, host); if (use_dma) dma_release_channel(host->dma_chan); diff --git a/drivers/mtd/nand/raw/lpc32xx_slc.c b/drivers/mtd/nand/raw/lpc32xx_slc.c index 163f976353f8..b151fd000815 100644 --- a/drivers/mtd/nand/raw/lpc32xx_slc.c +++ b/drivers/mtd/nand/raw/lpc32xx_slc.c @@ -947,8 +947,12 @@ static int lpc32xx_nand_remove(struct platform_device *pdev) { uint32_t tmp; struct lpc32xx_nand_host *host = platform_get_drvdata(pdev); + struct nand_chip *chip = &host->nand_chip; + int ret; - nand_release(&host->nand_chip); + ret = mtd_device_unregister(nand_to_mtd(chip)); + WARN_ON(ret); + nand_cleanup(chip); dma_release_channel(host->dma_chan); /* Force CE high */ diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c index 179f0ca585f8..260a0430313e 100644 --- a/drivers/mtd/nand/raw/marvell_nand.c +++ b/drivers/mtd/nand/raw/marvell_nand.c @@ -707,7 +707,7 @@ static int marvell_nfc_wait_op(struct nand_chip *chip, unsigned int timeout_ms) * In case the interrupt was not served in the required time frame, * check if the ISR was not served or if something went actually wrong. */ - if (ret && !pending) { + if (!ret && !pending) { dev_err(nfc->dev, "Timeout waiting for RB signal\n"); return -ETIMEDOUT; } @@ -932,14 +932,14 @@ static void marvell_nfc_check_empty_chunk(struct nand_chip *chip, } /* - * Check a chunk is correct or not according to hardware ECC engine. + * Check if a chunk is correct or not according to the hardware ECC engine. * mtd->ecc_stats.corrected is updated, as well as max_bitflips, however * mtd->ecc_stats.failure is not, the function will instead return a non-zero * value indicating that a check on the emptyness of the subpage must be - * performed before declaring the subpage corrupted. + * performed before actually declaring the subpage as "corrupted". */ -static int marvell_nfc_hw_ecc_correct(struct nand_chip *chip, - unsigned int *max_bitflips) +static int marvell_nfc_hw_ecc_check_bitflips(struct nand_chip *chip, + unsigned int *max_bitflips) { struct mtd_info *mtd = nand_to_mtd(chip); struct marvell_nfc *nfc = to_marvell_nfc(chip->controller); @@ -1053,7 +1053,7 @@ static int marvell_nfc_hw_ecc_hmg_read_page(struct nand_chip *chip, u8 *buf, marvell_nfc_enable_hw_ecc(chip); marvell_nfc_hw_ecc_hmg_do_read_page(chip, buf, chip->oob_poi, false, page); - ret = marvell_nfc_hw_ecc_correct(chip, &max_bitflips); + ret = marvell_nfc_hw_ecc_check_bitflips(chip, &max_bitflips); marvell_nfc_disable_hw_ecc(chip); if (!ret) @@ -1224,12 +1224,12 @@ static int marvell_nfc_hw_ecc_bch_read_page_raw(struct nand_chip *chip, u8 *buf, /* Read spare bytes */ nand_read_data_op(chip, oob + (lt->spare_bytes * chunk), - spare_len, false); + spare_len, false, false); /* Read ECC bytes */ nand_read_data_op(chip, oob + ecc_offset + (ALIGN(lt->ecc_bytes, 32) * chunk), - ecc_len, false); + ecc_len, false, false); } return 0; @@ -1336,7 +1336,7 @@ static int marvell_nfc_hw_ecc_bch_read_page(struct nand_chip *chip, /* Read the chunk and detect number of bitflips */ marvell_nfc_hw_ecc_bch_read_chunk(chip, chunk, data, data_len, spare, spare_len, page); - ret = marvell_nfc_hw_ecc_correct(chip, &max_bitflips); + ret = marvell_nfc_hw_ecc_check_bitflips(chip, &max_bitflips); if (ret) failure_mask |= BIT(chunk); @@ -1358,10 +1358,9 @@ static int marvell_nfc_hw_ecc_bch_read_page(struct nand_chip *chip, */ /* - * In case there is any subpage read error reported by ->correct(), we - * usually re-read only ECC bytes in raw mode and check if the whole - * page is empty. In this case, it is normal that the ECC check failed - * and we just ignore the error. + * In case there is any subpage read error, we usually re-read only ECC + * bytes in raw mode and check if the whole page is empty. In this case, + * it is normal that the ECC check failed and we just ignore the error. * * However, it has been empirically observed that for some layouts (e.g * 2k page, 8b strength per 512B chunk), the controller tries to correct @@ -2107,7 +2106,8 @@ static int marvell_nfc_exec_op(struct nand_chip *chip, { struct marvell_nfc *nfc = to_marvell_nfc(chip->controller); - marvell_nfc_select_target(chip, op->cs); + if (!check_only) + marvell_nfc_select_target(chip, op->cs); if (nfc->caps->is_nfcv2) return nand_op_parser_exec_op(chip, &marvell_nfcv2_op_parser, @@ -2166,8 +2166,8 @@ static const struct mtd_ooblayout_ops marvell_nand_ooblayout_ops = { .free = marvell_nand_ooblayout_free, }; -static int marvell_nand_hw_ecc_ctrl_init(struct mtd_info *mtd, - struct nand_ecc_ctrl *ecc) +static int marvell_nand_hw_ecc_controller_init(struct mtd_info *mtd, + struct nand_ecc_ctrl *ecc) { struct nand_chip *chip = mtd_to_nand(mtd); struct marvell_nfc *nfc = to_marvell_nfc(chip->controller); @@ -2261,7 +2261,7 @@ static int marvell_nand_ecc_init(struct mtd_info *mtd, switch (ecc->mode) { case NAND_ECC_HW: - ret = marvell_nand_hw_ecc_ctrl_init(mtd, ecc); + ret = marvell_nand_hw_ecc_controller_init(mtd, ecc); if (ret) return ret; break; @@ -2664,7 +2664,7 @@ static int marvell_nand_chip_init(struct device *dev, struct marvell_nfc *nfc, ret = mtd_device_register(mtd, NULL, 0); if (ret) { dev_err(dev, "failed to register mtd device: %d\n", ret); - nand_release(chip); + nand_cleanup(chip); return ret; } @@ -2673,6 +2673,21 @@ static int marvell_nand_chip_init(struct device *dev, struct marvell_nfc *nfc, return 0; } +static void marvell_nand_chips_cleanup(struct marvell_nfc *nfc) +{ + struct marvell_nand_chip *entry, *temp; + struct nand_chip *chip; + int ret; + + list_for_each_entry_safe(entry, temp, &nfc->chips, node) { + chip = &entry->chip; + ret = mtd_device_unregister(nand_to_mtd(chip)); + WARN_ON(ret); + nand_cleanup(chip); + list_del(&entry->node); + } +} + static int marvell_nand_chips_init(struct device *dev, struct marvell_nfc *nfc) { struct device_node *np = dev->of_node; @@ -2707,21 +2722,16 @@ static int marvell_nand_chips_init(struct device *dev, struct marvell_nfc *nfc) ret = marvell_nand_chip_init(dev, nfc, nand_np); if (ret) { of_node_put(nand_np); - return ret; + goto cleanup_chips; } } return 0; -} -static void marvell_nand_chips_cleanup(struct marvell_nfc *nfc) -{ - struct marvell_nand_chip *entry, *temp; +cleanup_chips: + marvell_nand_chips_cleanup(nfc); - list_for_each_entry_safe(entry, temp, &nfc->chips, node) { - nand_release(&entry->chip); - list_del(&entry->node); - } + return ret; } static int marvell_nfc_init_dma(struct marvell_nfc *nfc) @@ -2854,7 +2864,6 @@ static int marvell_nfc_init(struct marvell_nfc *nfc) static int marvell_nfc_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; - struct resource *r; struct marvell_nfc *nfc; int ret; int irq; @@ -2869,8 +2878,7 @@ static int marvell_nfc_probe(struct platform_device *pdev) nfc->controller.ops = &marvell_nand_controller_ops; INIT_LIST_HEAD(&nfc->chips); - r = platform_get_resource(pdev, IORESOURCE_MEM, 0); - nfc->regs = devm_ioremap_resource(dev, r); + nfc->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(nfc->regs)) return PTR_ERR(nfc->regs); diff --git a/drivers/mtd/nand/raw/meson_nand.c b/drivers/mtd/nand/raw/meson_nand.c index f6fb5c0e6255..3f376471f3f7 100644 --- a/drivers/mtd/nand/raw/meson_nand.c +++ b/drivers/mtd/nand/raw/meson_nand.c @@ -899,6 +899,9 @@ static int meson_nfc_exec_op(struct nand_chip *nand, u32 op_id, delay_idle, cmd; int i; + if (check_only) + return 0; + meson_nfc_select_chip(nand, op->cs); for (op_id = 0; op_id < op->ninstrs; op_id++) { instr = &op->instrs[op_id]; @@ -1266,7 +1269,7 @@ meson_nfc_nand_chip_init(struct device *dev, nand_set_flash_node(nand, np); nand_set_controller_data(nand, nfc); - nand->options |= NAND_USE_BOUNCE_BUFFER; + nand->options |= NAND_USES_DMA; mtd = nand_to_mtd(nand); mtd->owner = THIS_MODULE; mtd->dev.parent = dev; diff --git a/drivers/mtd/nand/raw/mpc5121_nfc.c b/drivers/mtd/nand/raw/mpc5121_nfc.c index a2fcb739e5f8..18ecb096a32d 100644 --- a/drivers/mtd/nand/raw/mpc5121_nfc.c +++ b/drivers/mtd/nand/raw/mpc5121_nfc.c @@ -805,8 +805,11 @@ static int mpc5121_nfc_remove(struct platform_device *op) { struct device *dev = &op->dev; struct mtd_info *mtd = dev_get_drvdata(dev); + int ret; - nand_release(mtd_to_nand(mtd)); + ret = mtd_device_unregister(mtd); + WARN_ON(ret); + nand_cleanup(mtd_to_nand(mtd)); mpc5121_nfc_free(dev, mtd); return 0; diff --git a/drivers/mtd/nand/raw/mtk_nand.c b/drivers/mtd/nand/raw/mtk_nand.c index ef149e8b26d0..c1a6e31aabb8 100644 --- a/drivers/mtd/nand/raw/mtk_nand.c +++ b/drivers/mtd/nand/raw/mtk_nand.c @@ -1380,7 +1380,7 @@ static int mtk_nfc_nand_chip_init(struct device *dev, struct mtk_nfc *nfc, nand_set_flash_node(nand, np); nand_set_controller_data(nand, nfc); - nand->options |= NAND_USE_BOUNCE_BUFFER | NAND_SUBPAGE_READ; + nand->options |= NAND_USES_DMA | NAND_SUBPAGE_READ; nand->legacy.dev_ready = mtk_nfc_dev_ready; nand->legacy.select_chip = mtk_nfc_select_chip; nand->legacy.write_byte = mtk_nfc_write_byte; @@ -1419,7 +1419,7 @@ static int mtk_nfc_nand_chip_init(struct device *dev, struct mtk_nfc *nfc, ret = mtd_device_register(mtd, NULL, 0); if (ret) { dev_err(dev, "mtd parse partition error\n"); - nand_release(nand); + nand_cleanup(nand); return ret; } @@ -1578,13 +1578,18 @@ static int mtk_nfc_probe(struct platform_device *pdev) static int mtk_nfc_remove(struct platform_device *pdev) { struct mtk_nfc *nfc = platform_get_drvdata(pdev); - struct mtk_nfc_nand_chip *chip; + struct mtk_nfc_nand_chip *mtk_chip; + struct nand_chip *chip; + int ret; while (!list_empty(&nfc->chips)) { - chip = list_first_entry(&nfc->chips, struct mtk_nfc_nand_chip, - node); - nand_release(&chip->nand); - list_del(&chip->node); + mtk_chip = list_first_entry(&nfc->chips, + struct mtk_nfc_nand_chip, node); + chip = &mtk_chip->nand; + ret = mtd_device_unregister(nand_to_mtd(chip)); + WARN_ON(ret); + nand_cleanup(chip); + list_del(&mtk_chip->node); } mtk_ecc_release(nfc->ecc); diff --git a/drivers/mtd/nand/raw/mxc_nand.c b/drivers/mtd/nand/raw/mxc_nand.c index 59554c187e01..09dacb83cb5a 100644 --- a/drivers/mtd/nand/raw/mxc_nand.c +++ b/drivers/mtd/nand/raw/mxc_nand.c @@ -1919,8 +1919,12 @@ static int mxcnd_probe(struct platform_device *pdev) static int mxcnd_remove(struct platform_device *pdev) { struct mxc_nand_host *host = platform_get_drvdata(pdev); + struct nand_chip *chip = &host->nand; + int ret; - nand_release(&host->nand); + ret = mtd_device_unregister(nand_to_mtd(chip)); + WARN_ON(ret); + nand_cleanup(chip); if (host->clk_act) clk_disable_unprepare(host->clk); diff --git a/drivers/mtd/nand/raw/mxic_nand.c b/drivers/mtd/nand/raw/mxic_nand.c index ed7a4e021bf5..57f36721f4c6 100644 --- a/drivers/mtd/nand/raw/mxic_nand.c +++ b/drivers/mtd/nand/raw/mxic_nand.c @@ -393,6 +393,9 @@ static int mxic_nfc_exec_op(struct nand_chip *chip, int ret = 0; unsigned int op_id; + if (check_only) + return 0; + mxic_nfc_cs_enable(nfc); init_completion(&nfc->complete); for (op_id = 0; op_id < op->ninstrs; op_id++) { @@ -553,8 +556,13 @@ static int mxic_nfc_probe(struct platform_device *pdev) static int mxic_nfc_remove(struct platform_device *pdev) { struct mxic_nand_ctlr *nfc = platform_get_drvdata(pdev); + struct nand_chip *chip = &nfc->chip; + int ret; + + ret = mtd_device_unregister(nand_to_mtd(chip)); + WARN_ON(ret); + nand_cleanup(chip); - nand_release(&nfc->chip); mxic_nfc_clk_disable(nfc); return 0; } diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c index c24e5e2ba130..45124dbb1835 100644 --- a/drivers/mtd/nand/raw/nand_base.c +++ b/drivers/mtd/nand/raw/nand_base.c @@ -205,6 +205,56 @@ static const struct mtd_ooblayout_ops nand_ooblayout_lp_hamming_ops = { .free = nand_ooblayout_free_lp_hamming, }; +static int nand_pairing_dist3_get_info(struct mtd_info *mtd, int page, + struct mtd_pairing_info *info) +{ + int lastpage = (mtd->erasesize / mtd->writesize) - 1; + int dist = 3; + + if (page == lastpage) + dist = 2; + + if (!page || (page & 1)) { + info->group = 0; + info->pair = (page + 1) / 2; + } else { + info->group = 1; + info->pair = (page + 1 - dist) / 2; + } + + return 0; +} + +static int nand_pairing_dist3_get_wunit(struct mtd_info *mtd, + const struct mtd_pairing_info *info) +{ + int lastpair = ((mtd->erasesize / mtd->writesize) - 1) / 2; + int page = info->pair * 2; + int dist = 3; + + if (!info->group && !info->pair) + return 0; + + if (info->pair == lastpair && info->group) + dist = 2; + + if (!info->group) + page--; + else if (info->pair) + page += dist - 1; + + if (page >= mtd->erasesize / mtd->writesize) + return -EINVAL; + + return page; +} + +const struct mtd_pairing_scheme dist3_pairing_scheme = { + .ngroups = 2, + .get_info = nand_pairing_dist3_get_info, + .get_wunit = nand_pairing_dist3_get_wunit, +}; + static int check_offs_len(struct nand_chip *chip, loff_t ofs, uint64_t len) { int ret = 0; @@ -224,6 +274,50 @@ static int check_offs_len(struct nand_chip *chip, loff_t ofs, uint64_t len) return ret; } +/** + * nand_extract_bits - Copy unaligned bits from one buffer to another one + * @dst: destination buffer + * @dst_off: bit offset at which the writing starts + * @src: source buffer + * @src_off: bit offset at which the reading starts + * @nbits: number of bits to copy from @src to @dst + * + * Copy bits from one memory region to another (overlap authorized). + */ +void nand_extract_bits(u8 *dst, unsigned int dst_off, const u8 *src, + unsigned int src_off, unsigned int nbits) +{ + unsigned int tmp, n; + + dst += dst_off / 8; + dst_off %= 8; + src += src_off / 8; + src_off %= 8; + + while (nbits) { + n = min3(8 - dst_off, 8 - src_off, nbits); + + tmp = (*src >> src_off) & GENMASK(n - 1, 0); + *dst &= ~GENMASK(n - 1 + dst_off, dst_off); + *dst |= tmp << dst_off; + + dst_off += n; + if (dst_off >= 8) { + dst++; + dst_off -= 8; + } + + src_off += n; + if (src_off >= 8) { + src++; + src_off -= 8; + } + + nbits -= n; + } +} +EXPORT_SYMBOL_GPL(nand_extract_bits); + /** * nand_select_target() - Select a NAND target (A.K.A. die) * @chip: NAND chip object @@ -345,6 +439,9 @@ static int nand_block_bad(struct nand_chip *chip, loff_t ofs) static int nand_isbad_bbm(struct nand_chip *chip, loff_t ofs) { + if (chip->options & NAND_NO_BBM_QUIRK) + return 0; + if (chip->legacy.block_bad) return chip->legacy.block_bad(chip, ofs); @@ -690,7 +787,8 @@ int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms) */ timeout_ms = jiffies + msecs_to_jiffies(timeout_ms) + 1; do { - ret = nand_read_data_op(chip, &status, sizeof(status), true); + ret = nand_read_data_op(chip, &status, sizeof(status), true, + false); if (ret) break; @@ -736,8 +834,14 @@ EXPORT_SYMBOL_GPL(nand_soft_waitrdy); int nand_gpio_waitrdy(struct nand_chip *chip, struct gpio_desc *gpiod, unsigned long timeout_ms) { - /* Wait until R/B pin indicates chip is ready or timeout occurs */ - timeout_ms = jiffies + msecs_to_jiffies(timeout_ms); + + /* + * Wait until R/B pin indicates chip is ready or timeout occurs. + * +1 below is necessary because if we are now in the last fraction + * of jiffy and msecs_to_jiffies is 1 then we will wait only that + * small jiffy fraction - possibly leading to false timeout. + */ + timeout_ms = jiffies + msecs_to_jiffies(timeout_ms) + 1; do { if (gpiod_get_value_cansleep(gpiod)) return 0; @@ -770,7 +874,7 @@ void panic_nand_wait(struct nand_chip *chip, unsigned long timeo) u8 status; ret = nand_read_data_op(chip, &status, sizeof(status), - true); + true, false); if (ret) return; @@ -1868,6 +1972,8 @@ EXPORT_SYMBOL_GPL(nand_reset_op); * @buf: buffer used to store the data * @len: length of the buffer * @force_8bit: force 8-bit bus access + * @check_only: do not actually run the command, only checks if the + * controller driver supports it * * This function does a raw data read on the bus. Usually used after launching * another NAND operation like nand_read_page_op(). @@ -1876,7 +1982,7 @@ EXPORT_SYMBOL_GPL(nand_reset_op); * Returns 0 on success, a negative error code otherwise. */ int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len, - bool force_8bit) + bool force_8bit, bool check_only) { if (!len || !buf) return -EINVAL; @@ -1889,9 +1995,15 @@ int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len, instrs[0].ctx.data.force_8bit = force_8bit; + if (check_only) + return nand_check_op(chip, &op); + return nand_exec_op(chip, &op); } + if (check_only) + return 0; + if (force_8bit) { u8 *p = buf; unsigned int i; @@ -2112,7 +2224,7 @@ static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx) char *prefix = " "; unsigned int i; - pr_debug("executing subop:\n"); + pr_debug("executing subop (CS%d):\n", ctx->subop.cs); for (i = 0; i < ctx->ninstrs; i++) { instr = &ctx->instrs[i]; @@ -2176,6 +2288,7 @@ int nand_op_parser_exec_op(struct nand_chip *chip, const struct nand_operation *op, bool check_only) { struct nand_op_parser_ctx ctx = { + .subop.cs = op->cs, .subop.instrs = op->instrs, .instrs = op->instrs, .ninstrs = op->ninstrs, @@ -2620,7 +2733,7 @@ int nand_read_page_raw(struct nand_chip *chip, uint8_t *buf, int oob_required, if (oob_required) { ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, - false); + false, false); if (ret) return ret; } @@ -2629,6 +2742,47 @@ int nand_read_page_raw(struct nand_chip *chip, uint8_t *buf, int oob_required, } EXPORT_SYMBOL(nand_read_page_raw); +/** + * nand_monolithic_read_page_raw - Monolithic page read in raw mode + * @chip: NAND chip info structure + * @buf: buffer to store read data + * @oob_required: caller requires OOB data read to chip->oob_poi + * @page: page number to read + * + * This is a raw page read, ie. without any error detection/correction. + * Monolithic means we are requesting all the relevant data (main plus + * eventually OOB) to be loaded in the NAND cache and sent over the + * bus (from the NAND chip to the NAND controller) in a single + * operation. This is an alternative to nand_read_page_raw(), which + * first reads the main data, and if the OOB data is requested too, + * then reads more data on the bus. + */ +int nand_monolithic_read_page_raw(struct nand_chip *chip, u8 *buf, + int oob_required, int page) +{ + struct mtd_info *mtd = nand_to_mtd(chip); + unsigned int size = mtd->writesize; + u8 *read_buf = buf; + int ret; + + if (oob_required) { + size += mtd->oobsize; + + if (buf != chip->data_buf) + read_buf = nand_get_data_buf(chip); + } + + ret = nand_read_page_op(chip, page, 0, read_buf, size); + if (ret) + return ret; + + if (buf != chip->data_buf) + memcpy(buf, read_buf, mtd->writesize); + + return 0; +} +EXPORT_SYMBOL(nand_monolithic_read_page_raw); + /** * nand_read_page_raw_syndrome - [INTERN] read raw page data without ecc * @chip: nand chip info structure @@ -2652,7 +2806,7 @@ static int nand_read_page_raw_syndrome(struct nand_chip *chip, uint8_t *buf, return ret; for (steps = chip->ecc.steps; steps > 0; steps--) { - ret = nand_read_data_op(chip, buf, eccsize, false); + ret = nand_read_data_op(chip, buf, eccsize, false, false); if (ret) return ret; @@ -2660,14 +2814,14 @@ static int nand_read_page_raw_syndrome(struct nand_chip *chip, uint8_t *buf, if (chip->ecc.prepad) { ret = nand_read_data_op(chip, oob, chip->ecc.prepad, - false); + false, false); if (ret) return ret; oob += chip->ecc.prepad; } - ret = nand_read_data_op(chip, oob, eccbytes, false); + ret = nand_read_data_op(chip, oob, eccbytes, false, false); if (ret) return ret; @@ -2675,7 +2829,7 @@ static int nand_read_page_raw_syndrome(struct nand_chip *chip, uint8_t *buf, if (chip->ecc.postpad) { ret = nand_read_data_op(chip, oob, chip->ecc.postpad, - false); + false, false); if (ret) return ret; @@ -2685,7 +2839,7 @@ static int nand_read_page_raw_syndrome(struct nand_chip *chip, uint8_t *buf, size = mtd->oobsize - (oob - chip->oob_poi); if (size) { - ret = nand_read_data_op(chip, oob, size, false); + ret = nand_read_data_op(chip, oob, size, false, false); if (ret) return ret; } @@ -2878,14 +3032,15 @@ static int nand_read_page_hwecc(struct nand_chip *chip, uint8_t *buf, for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { chip->ecc.hwctl(chip, NAND_ECC_READ); - ret = nand_read_data_op(chip, p, eccsize, false); + ret = nand_read_data_op(chip, p, eccsize, false, false); if (ret) return ret; chip->ecc.calculate(chip, p, &ecc_calc[i]); } - ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, false); + ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, false, + false); if (ret) return ret; @@ -2920,76 +3075,6 @@ static int nand_read_page_hwecc(struct nand_chip *chip, uint8_t *buf, return max_bitflips; } -/** - * nand_read_page_hwecc_oob_first - [REPLACEABLE] hw ecc, read oob first - * @chip: nand chip info structure - * @buf: buffer to store read data - * @oob_required: caller requires OOB data read to chip->oob_poi - * @page: page number to read - * - * Hardware ECC for large page chips, require OOB to be read first. For this - * ECC mode, the write_page method is re-used from ECC_HW. These methods - * read/write ECC from the OOB area, unlike the ECC_HW_SYNDROME support with - * multiple ECC steps, follows the "infix ECC" scheme and reads/writes ECC from - * the data area, by overwriting the NAND manufacturer bad block markings. - */ -static int nand_read_page_hwecc_oob_first(struct nand_chip *chip, uint8_t *buf, - int oob_required, int page) -{ - struct mtd_info *mtd = nand_to_mtd(chip); - int i, eccsize = chip->ecc.size, ret; - int eccbytes = chip->ecc.bytes; - int eccsteps = chip->ecc.steps; - uint8_t *p = buf; - uint8_t *ecc_code = chip->ecc.code_buf; - uint8_t *ecc_calc = chip->ecc.calc_buf; - unsigned int max_bitflips = 0; - - /* Read the OOB area first */ - ret = nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize); - if (ret) - return ret; - - ret = nand_read_page_op(chip, page, 0, NULL, 0); - if (ret) - return ret; - - ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0, - chip->ecc.total); - if (ret) - return ret; - - for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { - int stat; - - chip->ecc.hwctl(chip, NAND_ECC_READ); - - ret = nand_read_data_op(chip, p, eccsize, false); - if (ret) - return ret; - - chip->ecc.calculate(chip, p, &ecc_calc[i]); - - stat = chip->ecc.correct(chip, p, &ecc_code[i], NULL); - if (stat == -EBADMSG && - (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) { - /* check for empty pages with bitflips */ - stat = nand_check_erased_ecc_chunk(p, eccsize, - &ecc_code[i], eccbytes, - NULL, 0, - chip->ecc.strength); - } - - if (stat < 0) { - mtd->ecc_stats.failed++; - } else { - mtd->ecc_stats.corrected += stat; - max_bitflips = max_t(unsigned int, max_bitflips, stat); - } - } - return max_bitflips; -} - /** * nand_read_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page read * @chip: nand chip info structure @@ -3021,13 +3106,13 @@ static int nand_read_page_syndrome(struct nand_chip *chip, uint8_t *buf, chip->ecc.hwctl(chip, NAND_ECC_READ); - ret = nand_read_data_op(chip, p, eccsize, false); + ret = nand_read_data_op(chip, p, eccsize, false, false); if (ret) return ret; if (chip->ecc.prepad) { ret = nand_read_data_op(chip, oob, chip->ecc.prepad, - false); + false, false); if (ret) return ret; @@ -3036,7 +3121,7 @@ static int nand_read_page_syndrome(struct nand_chip *chip, uint8_t *buf, chip->ecc.hwctl(chip, NAND_ECC_READSYN); - ret = nand_read_data_op(chip, oob, eccbytes, false); + ret = nand_read_data_op(chip, oob, eccbytes, false, false); if (ret) return ret; @@ -3046,7 +3131,7 @@ static int nand_read_page_syndrome(struct nand_chip *chip, uint8_t *buf, if (chip->ecc.postpad) { ret = nand_read_data_op(chip, oob, chip->ecc.postpad, - false); + false, false); if (ret) return ret; @@ -3074,7 +3159,7 @@ static int nand_read_page_syndrome(struct nand_chip *chip, uint8_t *buf, /* Calculate remaining oob bytes */ i = mtd->oobsize - (oob - chip->oob_poi); if (i) { - ret = nand_read_data_op(chip, oob, i, false); + ret = nand_read_data_op(chip, oob, i, false, false); if (ret) return ret; } @@ -3166,7 +3251,7 @@ static int nand_do_read_ops(struct nand_chip *chip, loff_t from, uint32_t max_oobsize = mtd_oobavail(mtd, ops); uint8_t *bufpoi, *oob, *buf; - int use_bufpoi; + int use_bounce_buf; unsigned int max_bitflips = 0; int retry_mode = 0; bool ecc_fail = false; @@ -3184,25 +3269,25 @@ static int nand_do_read_ops(struct nand_chip *chip, loff_t from, oob_required = oob ? 1 : 0; while (1) { - unsigned int ecc_failures = mtd->ecc_stats.failed; + struct mtd_ecc_stats ecc_stats = mtd->ecc_stats; bytes = min(mtd->writesize - col, readlen); aligned = (bytes == mtd->writesize); if (!aligned) - use_bufpoi = 1; - else if (chip->options & NAND_USE_BOUNCE_BUFFER) - use_bufpoi = !virt_addr_valid(buf) || - !IS_ALIGNED((unsigned long)buf, - chip->buf_align); + use_bounce_buf = 1; + else if (chip->options & NAND_USES_DMA) + use_bounce_buf = !virt_addr_valid(buf) || + !IS_ALIGNED((unsigned long)buf, + chip->buf_align); else - use_bufpoi = 0; + use_bounce_buf = 0; /* Is the current page in the buffer? */ if (realpage != chip->pagecache.page || oob) { - bufpoi = use_bufpoi ? chip->data_buf : buf; + bufpoi = use_bounce_buf ? chip->data_buf : buf; - if (use_bufpoi && aligned) + if (use_bounce_buf && aligned) pr_debug("%s: using read bounce buffer for buf@%p\n", __func__, buf); @@ -3223,16 +3308,19 @@ static int nand_do_read_ops(struct nand_chip *chip, loff_t from, ret = chip->ecc.read_page(chip, bufpoi, oob_required, page); if (ret < 0) { - if (use_bufpoi) + if (use_bounce_buf) /* Invalidate page cache */ chip->pagecache.page = -1; break; } - /* Transfer not aligned data */ - if (use_bufpoi) { + /* + * Copy back the data in the initial buffer when reading + * partial pages or when a bounce buffer is required. + */ + if (use_bounce_buf) { if (!NAND_HAS_SUBPAGE_READ(chip) && !oob && - !(mtd->ecc_stats.failed - ecc_failures) && + !(mtd->ecc_stats.failed - ecc_stats.failed) && (ops->mode != MTD_OPS_RAW)) { chip->pagecache.page = realpage; chip->pagecache.bitflips = ret; @@ -3240,7 +3328,7 @@ static int nand_do_read_ops(struct nand_chip *chip, loff_t from, /* Invalidate page cache */ chip->pagecache.page = -1; } - memcpy(buf, chip->data_buf + col, bytes); + memcpy(buf, bufpoi + col, bytes); } if (unlikely(oob)) { @@ -3255,7 +3343,7 @@ static int nand_do_read_ops(struct nand_chip *chip, loff_t from, nand_wait_readrdy(chip); - if (mtd->ecc_stats.failed - ecc_failures) { + if (mtd->ecc_stats.failed - ecc_stats.failed) { if (retry_mode + 1 < chip->read_retries) { retry_mode++; ret = nand_setup_read_retry(chip, @@ -3263,8 +3351,8 @@ static int nand_do_read_ops(struct nand_chip *chip, loff_t from, if (ret < 0) break; - /* Reset failures; retry */ - mtd->ecc_stats.failed = ecc_failures; + /* Reset ecc_stats; retry */ + mtd->ecc_stats = ecc_stats; goto read_retry; } else { /* No more retry modes; real failure */ @@ -3373,7 +3461,7 @@ static int nand_read_oob_syndrome(struct nand_chip *chip, int page) sndrnd = 1; toread = min_t(int, length, chunk); - ret = nand_read_data_op(chip, bufpoi, toread, false); + ret = nand_read_data_op(chip, bufpoi, toread, false, false); if (ret) return ret; @@ -3381,7 +3469,7 @@ static int nand_read_oob_syndrome(struct nand_chip *chip, int page) length -= toread; } if (length > 0) { - ret = nand_read_data_op(chip, bufpoi, length, false); + ret = nand_read_data_op(chip, bufpoi, length, false, false); if (ret) return ret; } @@ -3633,6 +3721,42 @@ int nand_write_page_raw(struct nand_chip *chip, const uint8_t *buf, } EXPORT_SYMBOL(nand_write_page_raw); +/** + * nand_monolithic_write_page_raw - Monolithic page write in raw mode + * @chip: NAND chip info structure + * @buf: data buffer to write + * @oob_required: must write chip->oob_poi to OOB + * @page: page number to write + * + * This is a raw page write, ie. without any error detection/correction. + * Monolithic means we are requesting all the relevant data (main plus + * eventually OOB) to be sent over the bus and effectively programmed + * into the NAND chip arrays in a single operation. This is an + * alternative to nand_write_page_raw(), which first sends the main + * data, then eventually send the OOB data by latching more data + * cycles on the NAND bus, and finally sends the program command to + * synchronyze the NAND chip cache. + */ +int nand_monolithic_write_page_raw(struct nand_chip *chip, const u8 *buf, + int oob_required, int page) +{ + struct mtd_info *mtd = nand_to_mtd(chip); + unsigned int size = mtd->writesize; + u8 *write_buf = (u8 *)buf; + + if (oob_required) { + size += mtd->oobsize; + + if (buf != chip->data_buf) { + write_buf = nand_get_data_buf(chip); + memcpy(write_buf, buf, mtd->writesize); + } + } + + return nand_prog_page_op(chip, page, 0, write_buf, size); +} +EXPORT_SYMBOL(nand_monolithic_write_page_raw); + /** * nand_write_page_raw_syndrome - [INTERN] raw page write function * @chip: nand chip info structure @@ -4012,20 +4136,23 @@ static int nand_do_write_ops(struct nand_chip *chip, loff_t to, while (1) { int bytes = mtd->writesize; uint8_t *wbuf = buf; - int use_bufpoi; + int use_bounce_buf; int part_pagewr = (column || writelen < mtd->writesize); if (part_pagewr) - use_bufpoi = 1; - else if (chip->options & NAND_USE_BOUNCE_BUFFER) - use_bufpoi = !virt_addr_valid(buf) || - !IS_ALIGNED((unsigned long)buf, - chip->buf_align); + use_bounce_buf = 1; + else if (chip->options & NAND_USES_DMA) + use_bounce_buf = !virt_addr_valid(buf) || + !IS_ALIGNED((unsigned long)buf, + chip->buf_align); else - use_bufpoi = 0; + use_bounce_buf = 0; - /* Partial page write?, or need to use bounce buffer */ - if (use_bufpoi) { + /* + * Copy the data from the initial buffer when doing partial page + * writes or when a bounce buffer is required. + */ + if (use_bounce_buf) { pr_debug("%s: using write bounce buffer for buf@%p\n", __func__, buf); if (part_pagewr) @@ -4883,7 +5010,6 @@ static const char * const nand_ecc_modes[] = { [NAND_ECC_SOFT] = "soft", [NAND_ECC_HW] = "hw", [NAND_ECC_HW_SYNDROME] = "hw_syndrome", - [NAND_ECC_HW_OOB_FIRST] = "hw_oob_first", [NAND_ECC_ON_DIE] = "on-die", }; @@ -4896,14 +5022,14 @@ static int of_get_nand_ecc_mode(struct device_node *np) if (err < 0) return err; - for (i = 0; i < ARRAY_SIZE(nand_ecc_modes); i++) + for (i = NAND_ECC_NONE; i < ARRAY_SIZE(nand_ecc_modes); i++) if (!strcasecmp(pm, nand_ecc_modes[i])) return i; /* * For backward compatibility we support few obsoleted values that don't - * have their mappings into nand_ecc_modes_t anymore (they were merged - * with other enums). + * have their mappings into the nand_ecc_mode enum anymore (they were + * merged with other enums). */ if (!strcasecmp(pm, "soft_bch")) return NAND_ECC_SOFT; @@ -4917,17 +5043,20 @@ static const char * const nand_ecc_algos[] = { [NAND_ECC_RS] = "rs", }; -static int of_get_nand_ecc_algo(struct device_node *np) +static enum nand_ecc_algo of_get_nand_ecc_algo(struct device_node *np) { + enum nand_ecc_algo ecc_algo; const char *pm; - int err, i; + int err; err = of_property_read_string(np, "nand-ecc-algo", &pm); if (!err) { - for (i = NAND_ECC_HAMMING; i < ARRAY_SIZE(nand_ecc_algos); i++) - if (!strcasecmp(pm, nand_ecc_algos[i])) - return i; - return -ENODEV; + for (ecc_algo = NAND_ECC_HAMMING; + ecc_algo < ARRAY_SIZE(nand_ecc_algos); + ecc_algo++) { + if (!strcasecmp(pm, nand_ecc_algos[ecc_algo])) + return ecc_algo; + } } /* @@ -4935,15 +5064,14 @@ static int of_get_nand_ecc_algo(struct device_node *np) * for some obsoleted values that were specifying ECC algorithm. */ err = of_property_read_string(np, "nand-ecc-mode", &pm); - if (err < 0) - return err; + if (!err) { + if (!strcasecmp(pm, "soft")) + return NAND_ECC_HAMMING; + else if (!strcasecmp(pm, "soft_bch")) + return NAND_ECC_BCH; + } - if (!strcasecmp(pm, "soft")) - return NAND_ECC_HAMMING; - else if (!strcasecmp(pm, "soft_bch")) - return NAND_ECC_BCH; - - return -ENODEV; + return NAND_ECC_UNKNOWN; } static int of_get_nand_ecc_step_size(struct device_node *np) @@ -4988,7 +5116,8 @@ static bool of_get_nand_on_flash_bbt(struct device_node *np) static int nand_dt_init(struct nand_chip *chip) { struct device_node *dn = nand_get_flash_node(chip); - int ecc_mode, ecc_algo, ecc_strength, ecc_step; + enum nand_ecc_algo ecc_algo; + int ecc_mode, ecc_strength, ecc_step; if (!dn) return 0; @@ -5010,7 +5139,7 @@ static int nand_dt_init(struct nand_chip *chip) if (ecc_mode >= 0) chip->ecc.mode = ecc_mode; - if (ecc_algo >= 0) + if (ecc_algo != NAND_ECC_UNKNOWN) chip->ecc.algo = ecc_algo; if (ecc_strength >= 0) @@ -5140,8 +5269,10 @@ static int nand_set_ecc_soft_ops(struct nand_chip *chip) ecc->read_page = nand_read_page_swecc; ecc->read_subpage = nand_read_subpage; ecc->write_page = nand_write_page_swecc; - ecc->read_page_raw = nand_read_page_raw; - ecc->write_page_raw = nand_write_page_raw; + if (!ecc->read_page_raw) + ecc->read_page_raw = nand_read_page_raw; + if (!ecc->write_page_raw) + ecc->write_page_raw = nand_write_page_raw; ecc->read_oob = nand_read_oob_std; ecc->write_oob = nand_write_oob_std; if (!ecc->size) @@ -5163,8 +5294,10 @@ static int nand_set_ecc_soft_ops(struct nand_chip *chip) ecc->read_page = nand_read_page_swecc; ecc->read_subpage = nand_read_subpage; ecc->write_page = nand_write_page_swecc; - ecc->read_page_raw = nand_read_page_raw; - ecc->write_page_raw = nand_write_page_raw; + if (!ecc->read_page_raw) + ecc->read_page_raw = nand_read_page_raw; + if (!ecc->write_page_raw) + ecc->write_page_raw = nand_write_page_raw; ecc->read_oob = nand_read_oob_std; ecc->write_oob = nand_write_oob_std; @@ -5628,16 +5761,6 @@ static int nand_scan_tail(struct nand_chip *chip) */ switch (ecc->mode) { - case NAND_ECC_HW_OOB_FIRST: - /* Similar to NAND_ECC_HW, but a separate read_page handle */ - if (!ecc->calculate || !ecc->correct || !ecc->hwctl) { - WARN(1, "No ECC functions supplied; hardware ECC not possible\n"); - ret = -EINVAL; - goto err_nand_manuf_cleanup; - } - if (!ecc->read_page) - ecc->read_page = nand_read_page_hwecc_oob_first; - fallthrough; case NAND_ECC_HW: /* Use standard hwecc read page function? */ if (!ecc->read_page) @@ -5781,8 +5904,10 @@ static int nand_scan_tail(struct nand_chip *chip) /* ECC sanity check: warn if it's too weak */ if (!nand_ecc_strength_good(chip)) - pr_warn("WARNING: %s: the ECC used on your system is too weak compared to the one required by the NAND chip\n", - mtd->name); + pr_warn("WARNING: %s: the ECC used on your system (%db/%dB) is too weak compared to the one required by the NAND chip (%db/%dB)\n", + mtd->name, chip->ecc.strength, chip->ecc.size, + chip->base.eccreq.strength, + chip->base.eccreq.step_size); /* Allow subpage writes up to ecc.steps. Not possible for MLC flash */ if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && nand_is_slc(chip)) { @@ -5975,18 +6100,6 @@ void nand_cleanup(struct nand_chip *chip) EXPORT_SYMBOL_GPL(nand_cleanup); -/** - * nand_release - [NAND Interface] Unregister the MTD device and free resources - * held by the NAND device - * @chip: NAND chip object - */ -void nand_release(struct nand_chip *chip) -{ - mtd_device_unregister(nand_to_mtd(chip)); - nand_cleanup(chip); -} -EXPORT_SYMBOL_GPL(nand_release); - MODULE_LICENSE("GPL"); MODULE_AUTHOR("Steven J. Hill "); MODULE_AUTHOR("Thomas Gleixner "); diff --git a/drivers/mtd/nand/raw/nand_bch.c b/drivers/mtd/nand/raw/nand_bch.c index 17527310c3a1..d5af8c5fd02f 100644 --- a/drivers/mtd/nand/raw/nand_bch.c +++ b/drivers/mtd/nand/raw/nand_bch.c @@ -41,7 +41,7 @@ int nand_bch_calculate_ecc(struct nand_chip *chip, const unsigned char *buf, unsigned int i; memset(code, 0, chip->ecc.bytes); - encode_bch(nbc->bch, buf, chip->ecc.size, code); + bch_encode(nbc->bch, buf, chip->ecc.size, code); /* apply mask so that an erased page is a valid codeword */ for (i = 0; i < chip->ecc.bytes; i++) @@ -67,7 +67,7 @@ int nand_bch_correct_data(struct nand_chip *chip, unsigned char *buf, unsigned int *errloc = nbc->errloc; int i, count; - count = decode_bch(nbc->bch, NULL, chip->ecc.size, read_ecc, calc_ecc, + count = bch_decode(nbc->bch, NULL, chip->ecc.size, read_ecc, calc_ecc, NULL, errloc); if (count > 0) { for (i = 0; i < count; i++) { @@ -130,7 +130,7 @@ struct nand_bch_control *nand_bch_init(struct mtd_info *mtd) if (!nbc) goto fail; - nbc->bch = init_bch(m, t, 0); + nbc->bch = bch_init(m, t, 0, false); if (!nbc->bch) goto fail; @@ -182,7 +182,7 @@ struct nand_bch_control *nand_bch_init(struct mtd_info *mtd) goto fail; memset(erased_page, 0xff, eccsize); - encode_bch(nbc->bch, erased_page, eccsize, nbc->eccmask); + bch_encode(nbc->bch, erased_page, eccsize, nbc->eccmask); kfree(erased_page); for (i = 0; i < eccbytes; i++) @@ -205,7 +205,7 @@ EXPORT_SYMBOL(nand_bch_init); void nand_bch_free(struct nand_bch_control *nbc) { if (nbc) { - free_bch(nbc->bch); + bch_free(nbc->bch); kfree(nbc->errloc); kfree(nbc->eccmask); kfree(nbc); diff --git a/drivers/mtd/nand/raw/nand_jedec.c b/drivers/mtd/nand/raw/nand_jedec.c index 9b540e76f84f..b15c42f48755 100644 --- a/drivers/mtd/nand/raw/nand_jedec.c +++ b/drivers/mtd/nand/raw/nand_jedec.c @@ -16,6 +16,8 @@ #include "internals.h" +#define JEDEC_PARAM_PAGES 3 + /* * Check if the NAND chip is JEDEC compliant, returns 1 if it is, 0 otherwise. */ @@ -25,9 +27,11 @@ int nand_jedec_detect(struct nand_chip *chip) struct nand_memory_organization *memorg; struct nand_jedec_params *p; struct jedec_ecc_info *ecc; + bool use_datain = false; int jedec_version = 0; char id[5]; int i, val, ret; + u16 crc; memorg = nanddev_get_memorg(&chip->base); @@ -41,25 +45,31 @@ int nand_jedec_detect(struct nand_chip *chip) if (!p) return -ENOMEM; - ret = nand_read_param_page_op(chip, 0x40, NULL, 0); - if (ret) { - ret = 0; - goto free_jedec_param_page; - } + if (!nand_has_exec_op(chip) || + !nand_read_data_op(chip, p, sizeof(*p), true, true)) + use_datain = true; - for (i = 0; i < 3; i++) { - ret = nand_read_data_op(chip, p, sizeof(*p), true); + for (i = 0; i < JEDEC_PARAM_PAGES; i++) { + if (!i) + ret = nand_read_param_page_op(chip, 0x40, p, + sizeof(*p)); + else if (use_datain) + ret = nand_read_data_op(chip, p, sizeof(*p), true, + false); + else + ret = nand_change_read_column_op(chip, sizeof(*p) * i, + p, sizeof(*p), true); if (ret) { ret = 0; goto free_jedec_param_page; } - if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 510) == - le16_to_cpu(p->crc)) + crc = onfi_crc16(ONFI_CRC_BASE, (u8 *)p, 510); + if (crc == le16_to_cpu(p->crc)) break; } - if (i == 3) { + if (i == JEDEC_PARAM_PAGES) { pr_err("Could not find valid JEDEC parameter page; aborting\n"); goto free_jedec_param_page; } diff --git a/drivers/mtd/nand/raw/nand_legacy.c b/drivers/mtd/nand/raw/nand_legacy.c index f91e92e1b972..d64791c06a97 100644 --- a/drivers/mtd/nand/raw/nand_legacy.c +++ b/drivers/mtd/nand/raw/nand_legacy.c @@ -225,7 +225,8 @@ static void nand_wait_status_ready(struct nand_chip *chip, unsigned long timeo) do { u8 status; - ret = nand_read_data_op(chip, &status, sizeof(status), true); + ret = nand_read_data_op(chip, &status, sizeof(status), true, + false); if (ret) return; @@ -552,7 +553,8 @@ static int nand_wait(struct nand_chip *chip) break; } else { ret = nand_read_data_op(chip, &status, - sizeof(status), true); + sizeof(status), true, + false); if (ret) return ret; @@ -563,7 +565,7 @@ static int nand_wait(struct nand_chip *chip) } while (time_before(jiffies, timeo)); } - ret = nand_read_data_op(chip, &status, sizeof(status), true); + ret = nand_read_data_op(chip, &status, sizeof(status), true, false); if (ret) return ret; diff --git a/drivers/mtd/nand/raw/nand_micron.c b/drivers/mtd/nand/raw/nand_micron.c index 56654030ec7f..3589b4fce0d4 100644 --- a/drivers/mtd/nand/raw/nand_micron.c +++ b/drivers/mtd/nand/raw/nand_micron.c @@ -192,6 +192,7 @@ static int micron_nand_on_die_ecc_status_4(struct nand_chip *chip, u8 status, struct micron_nand *micron = nand_get_manufacturer_data(chip); struct mtd_info *mtd = nand_to_mtd(chip); unsigned int step, max_bitflips = 0; + bool use_datain = false; int ret; if (!(status & NAND_ECC_STATUS_WRITE_RECOMMENDED)) { @@ -211,8 +212,27 @@ static int micron_nand_on_die_ecc_status_4(struct nand_chip *chip, u8 status, * in non-raw mode, even if the user did not request those bytes. */ if (!oob_required) { - ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, - false); + /* + * We first check which operation is supported by the controller + * before running it. This trick makes it possible to support + * all controllers, even the most constraints, without almost + * any performance hit. + * + * TODO: could be enhanced to avoid repeating the same check + * over and over in the fast path. + */ + if (!nand_has_exec_op(chip) || + !nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, false, + true)) + use_datain = true; + + if (use_datain) + ret = nand_read_data_op(chip, chip->oob_poi, + mtd->oobsize, false, false); + else + ret = nand_change_read_column_op(chip, mtd->writesize, + chip->oob_poi, + mtd->oobsize, false); if (ret) return ret; } @@ -285,6 +305,7 @@ micron_nand_read_page_on_die_ecc(struct nand_chip *chip, uint8_t *buf, int oob_required, int page) { struct mtd_info *mtd = nand_to_mtd(chip); + bool use_datain = false; u8 status; int ret, max_bitflips = 0; @@ -300,14 +321,36 @@ micron_nand_read_page_on_die_ecc(struct nand_chip *chip, uint8_t *buf, if (ret) goto out; - ret = nand_exit_status_op(chip); - if (ret) - goto out; + /* + * We first check which operation is supported by the controller before + * running it. This trick makes it possible to support all controllers, + * even the most constraints, without almost any performance hit. + * + * TODO: could be enhanced to avoid repeating the same check over and + * over in the fast path. + */ + if (!nand_has_exec_op(chip) || + !nand_read_data_op(chip, buf, mtd->writesize, false, true)) + use_datain = true; - ret = nand_read_data_op(chip, buf, mtd->writesize, false); - if (!ret && oob_required) - ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, + if (use_datain) { + ret = nand_exit_status_op(chip); + if (ret) + goto out; + + ret = nand_read_data_op(chip, buf, mtd->writesize, false, false); + if (!ret && oob_required) + ret = nand_read_data_op(chip, chip->oob_poi, + mtd->oobsize, false, false); + } else { + ret = nand_change_read_column_op(chip, 0, buf, mtd->writesize, + false); + if (!ret && oob_required) + ret = nand_change_read_column_op(chip, mtd->writesize, + chip->oob_poi, + mtd->oobsize, false); + } if (chip->ecc.strength == 4) max_bitflips = micron_nand_on_die_ecc_status_4(chip, status, @@ -508,8 +551,10 @@ static int micron_nand_init(struct nand_chip *chip) chip->ecc.read_page_raw = nand_read_page_raw_notsupp; chip->ecc.write_page_raw = nand_write_page_raw_notsupp; } else { - chip->ecc.read_page_raw = nand_read_page_raw; - chip->ecc.write_page_raw = nand_write_page_raw; + if (!chip->ecc.read_page_raw) + chip->ecc.read_page_raw = nand_read_page_raw; + if (!chip->ecc.write_page_raw) + chip->ecc.write_page_raw = nand_write_page_raw; } } diff --git a/drivers/mtd/nand/raw/nand_onfi.c b/drivers/mtd/nand/raw/nand_onfi.c index 0b879bd0a68c..be3456627288 100644 --- a/drivers/mtd/nand/raw/nand_onfi.c +++ b/drivers/mtd/nand/raw/nand_onfi.c @@ -16,6 +16,8 @@ #include "internals.h" +#define ONFI_PARAM_PAGES 3 + u16 onfi_crc16(u16 crc, u8 const *p, size_t len) { int i; @@ -45,12 +47,10 @@ static int nand_flash_detect_ext_param_page(struct nand_chip *chip, if (!ep) return -ENOMEM; - /* Send our own NAND_CMD_PARAM. */ - ret = nand_read_param_page_op(chip, 0, NULL, 0); - if (ret) - goto ext_out; - - /* Use the Change Read Column command to skip the ONFI param pages. */ + /* + * Use the Change Read Column command to skip the ONFI param pages and + * ensure we read at the right location. + */ ret = nand_change_read_column_op(chip, sizeof(*p) * p->num_of_param_pages, ep, len, true); @@ -141,11 +141,13 @@ int nand_onfi_detect(struct nand_chip *chip) { struct mtd_info *mtd = nand_to_mtd(chip); struct nand_memory_organization *memorg; - struct nand_onfi_params *p; + struct nand_onfi_params *p = NULL, *pbuf; struct onfi_params *onfi; + bool use_datain = false; int onfi_version = 0; char id[4]; int i, ret, val; + u16 crc; memorg = nanddev_get_memorg(&chip->base); @@ -155,43 +157,54 @@ int nand_onfi_detect(struct nand_chip *chip) return 0; /* ONFI chip: allocate a buffer to hold its parameter page */ - p = kzalloc((sizeof(*p) * 3), GFP_KERNEL); - if (!p) + pbuf = kzalloc((sizeof(*pbuf) * ONFI_PARAM_PAGES), GFP_KERNEL); + if (!pbuf) return -ENOMEM; - ret = nand_read_param_page_op(chip, 0, NULL, 0); - if (ret) { - ret = 0; - goto free_onfi_param_page; - } + if (!nand_has_exec_op(chip) || + !nand_read_data_op(chip, &pbuf[0], sizeof(*pbuf), true, true)) + use_datain = true; - for (i = 0; i < 3; i++) { - ret = nand_read_data_op(chip, &p[i], sizeof(*p), true); + for (i = 0; i < ONFI_PARAM_PAGES; i++) { + if (!i) + ret = nand_read_param_page_op(chip, 0, &pbuf[i], + sizeof(*pbuf)); + else if (use_datain) + ret = nand_read_data_op(chip, &pbuf[i], sizeof(*pbuf), + true, false); + else + ret = nand_change_read_column_op(chip, sizeof(*pbuf) * i, + &pbuf[i], sizeof(*pbuf), + true); if (ret) { ret = 0; goto free_onfi_param_page; } - if (onfi_crc16(ONFI_CRC_BASE, (u8 *)&p[i], 254) == - le16_to_cpu(p->crc)) { - if (i) - memcpy(p, &p[i], sizeof(*p)); + crc = onfi_crc16(ONFI_CRC_BASE, (u8 *)&pbuf[i], 254); + if (crc == le16_to_cpu(pbuf[i].crc)) { + p = &pbuf[i]; break; } } - if (i == 3) { - const void *srcbufs[3] = {p, p + 1, p + 2}; + if (i == ONFI_PARAM_PAGES) { + const void *srcbufs[ONFI_PARAM_PAGES]; + unsigned int j; + + for (j = 0; j < ONFI_PARAM_PAGES; j++) + srcbufs[j] = pbuf + j; pr_warn("Could not find a valid ONFI parameter page, trying bit-wise majority to recover it\n"); - nand_bit_wise_majority(srcbufs, ARRAY_SIZE(srcbufs), p, - sizeof(*p)); + nand_bit_wise_majority(srcbufs, ONFI_PARAM_PAGES, pbuf, + sizeof(*pbuf)); - if (onfi_crc16(ONFI_CRC_BASE, (u8 *)p, 254) != - le16_to_cpu(p->crc)) { + crc = onfi_crc16(ONFI_CRC_BASE, (u8 *)pbuf, 254); + if (crc != le16_to_cpu(pbuf->crc)) { pr_err("ONFI parameter recovery failed, aborting\n"); goto free_onfi_param_page; } + p = pbuf; } if (chip->manufacturer.desc && chip->manufacturer.desc->ops && @@ -299,14 +312,14 @@ int nand_onfi_detect(struct nand_chip *chip) chip->parameters.onfi = onfi; /* Identification done, free the full ONFI parameter page and exit */ - kfree(p); + kfree(pbuf); return 1; free_model: kfree(chip->parameters.model); free_onfi_param_page: - kfree(p); + kfree(pbuf); return ret; } diff --git a/drivers/mtd/nand/raw/nand_timings.c b/drivers/mtd/nand/raw/nand_timings.c index f64b06a71dfa..36d21be3dfe5 100644 --- a/drivers/mtd/nand/raw/nand_timings.c +++ b/drivers/mtd/nand/raw/nand_timings.c @@ -16,6 +16,7 @@ static const struct nand_data_interface onfi_sdr_timings[] = { /* Mode 0 */ { .type = NAND_SDR_IFACE, + .timings.mode = 0, .timings.sdr = { .tCCS_min = 500000, .tR_max = 200000000, @@ -58,6 +59,7 @@ static const struct nand_data_interface onfi_sdr_timings[] = { /* Mode 1 */ { .type = NAND_SDR_IFACE, + .timings.mode = 1, .timings.sdr = { .tCCS_min = 500000, .tR_max = 200000000, @@ -100,6 +102,7 @@ static const struct nand_data_interface onfi_sdr_timings[] = { /* Mode 2 */ { .type = NAND_SDR_IFACE, + .timings.mode = 2, .timings.sdr = { .tCCS_min = 500000, .tR_max = 200000000, @@ -142,6 +145,7 @@ static const struct nand_data_interface onfi_sdr_timings[] = { /* Mode 3 */ { .type = NAND_SDR_IFACE, + .timings.mode = 3, .timings.sdr = { .tCCS_min = 500000, .tR_max = 200000000, @@ -184,6 +188,7 @@ static const struct nand_data_interface onfi_sdr_timings[] = { /* Mode 4 */ { .type = NAND_SDR_IFACE, + .timings.mode = 4, .timings.sdr = { .tCCS_min = 500000, .tR_max = 200000000, @@ -226,6 +231,7 @@ static const struct nand_data_interface onfi_sdr_timings[] = { /* Mode 5 */ { .type = NAND_SDR_IFACE, + .timings.mode = 5, .timings.sdr = { .tCCS_min = 500000, .tR_max = 200000000, @@ -314,10 +320,9 @@ int onfi_fill_data_interface(struct nand_chip *chip, /* microseconds -> picoseconds */ timings->tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX; timings->tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX; - timings->tR_max = 1000000ULL * 200000000ULL; - /* nanoseconds -> picoseconds */ - timings->tCCS_min = 1000UL * 500000; + timings->tR_max = 200000000; + timings->tCCS_min = 500000; } return 0; diff --git a/drivers/mtd/nand/raw/nand_toshiba.c b/drivers/mtd/nand/raw/nand_toshiba.c index f3dcd695b5db..ae069905d7e4 100644 --- a/drivers/mtd/nand/raw/nand_toshiba.c +++ b/drivers/mtd/nand/raw/nand_toshiba.c @@ -194,6 +194,17 @@ static void toshiba_nand_decode_id(struct nand_chip *chip) } } +static int tc58teg5dclta00_init(struct nand_chip *chip) +{ + struct mtd_info *mtd = nand_to_mtd(chip); + + chip->onfi_timing_mode_default = 5; + chip->options |= NAND_NEED_SCRAMBLING; + mtd_set_pairing_scheme(mtd, &dist3_pairing_scheme); + + return 0; +} + static int toshiba_nand_init(struct nand_chip *chip) { if (nand_is_slc(chip)) @@ -204,6 +215,9 @@ static int toshiba_nand_init(struct nand_chip *chip) chip->id.data[4] & TOSHIBA_NAND_ID4_IS_BENAND) toshiba_nand_benand_init(chip); + if (!strcmp("TC58TEG5DCLTA00", chip->parameters.model)) + tc58teg5dclta00_init(chip); + return 0; } diff --git a/drivers/mtd/nand/raw/nandsim.c b/drivers/mtd/nand/raw/nandsim.c index 1de03bb34e84..0a5cb77966cc 100644 --- a/drivers/mtd/nand/raw/nandsim.c +++ b/drivers/mtd/nand/raw/nandsim.c @@ -353,6 +353,9 @@ struct nandsim { void *file_buf; struct page *held_pages[NS_MAX_HELD_PAGES]; int held_cnt; + + /* debugfs entry */ + struct dentry *dent; }; /* @@ -432,7 +435,7 @@ static unsigned long total_wear = 0; /* MTD structure for NAND controller */ static struct mtd_info *nsmtd; -static int nandsim_show(struct seq_file *m, void *private) +static int ns_show(struct seq_file *m, void *private) { unsigned long wmin = -1, wmax = 0, avg; unsigned long deciles[10], decile_max[10], tot = 0; @@ -483,19 +486,18 @@ static int nandsim_show(struct seq_file *m, void *private) return 0; } -DEFINE_SHOW_ATTRIBUTE(nandsim); +DEFINE_SHOW_ATTRIBUTE(ns); /** - * nandsim_debugfs_create - initialize debugfs - * @dev: nandsim device description object + * ns_debugfs_create - initialize debugfs + * @ns: nandsim device description object * * This function creates all debugfs files for UBI device @ubi. Returns zero in * case of success and a negative error code in case of failure. */ -static int nandsim_debugfs_create(struct nandsim *dev) +static int ns_debugfs_create(struct nandsim *ns) { struct dentry *root = nsmtd->dbg.dfs_dir; - struct dentry *dent; /* * Just skip debugfs initialization when the debugfs directory is @@ -508,9 +510,9 @@ static int nandsim_debugfs_create(struct nandsim *dev) return 0; } - dent = debugfs_create_file("nandsim_wear_report", S_IRUSR, - root, dev, &nandsim_fops); - if (IS_ERR_OR_NULL(dent)) { + ns->dent = debugfs_create_file("nandsim_wear_report", 0400, root, ns, + &ns_fops); + if (IS_ERR_OR_NULL(ns->dent)) { NS_ERR("cannot create \"nandsim_wear_report\" debugfs entry\n"); return -1; } @@ -518,13 +520,18 @@ static int nandsim_debugfs_create(struct nandsim *dev) return 0; } +static void ns_debugfs_remove(struct nandsim *ns) +{ + debugfs_remove_recursive(ns->dent); +} + /* * Allocate array of page pointers, create slab allocation for an array * and initialize the array by NULL pointers. * * RETURNS: 0 if success, -ENOMEM if memory alloc fails. */ -static int __init alloc_device(struct nandsim *ns) +static int __init ns_alloc_device(struct nandsim *ns) { struct file *cfile; int i, err; @@ -536,12 +543,12 @@ static int __init alloc_device(struct nandsim *ns) if (!(cfile->f_mode & FMODE_CAN_READ)) { NS_ERR("alloc_device: cache file not readable\n"); err = -EINVAL; - goto err_close; + goto err_close_filp; } if (!(cfile->f_mode & FMODE_CAN_WRITE)) { NS_ERR("alloc_device: cache file not writeable\n"); err = -EINVAL; - goto err_close; + goto err_close_filp; } ns->pages_written = vzalloc(array_size(sizeof(unsigned long), @@ -549,16 +556,24 @@ static int __init alloc_device(struct nandsim *ns) if (!ns->pages_written) { NS_ERR("alloc_device: unable to allocate pages written array\n"); err = -ENOMEM; - goto err_close; + goto err_close_filp; } ns->file_buf = kmalloc(ns->geom.pgszoob, GFP_KERNEL); if (!ns->file_buf) { NS_ERR("alloc_device: unable to allocate file buf\n"); err = -ENOMEM; - goto err_free; + goto err_free_pw; } ns->cfile = cfile; + return 0; + +err_free_pw: + vfree(ns->pages_written); +err_close_filp: + filp_close(cfile, NULL); + + return err; } ns->pages = vmalloc(array_size(sizeof(union ns_mem), ns->geom.pgnum)); @@ -573,22 +588,22 @@ static int __init alloc_device(struct nandsim *ns) ns->geom.pgszoob, 0, 0, NULL); if (!ns->nand_pages_slab) { NS_ERR("cache_create: unable to create kmem_cache\n"); - return -ENOMEM; + err = -ENOMEM; + goto err_free_pg; } return 0; -err_free: - vfree(ns->pages_written); -err_close: - filp_close(cfile, NULL); +err_free_pg: + vfree(ns->pages); + return err; } /* * Free any allocated pages, and free the array of page pointers. */ -static void free_device(struct nandsim *ns) +static void ns_free_device(struct nandsim *ns) { int i; @@ -610,7 +625,7 @@ static void free_device(struct nandsim *ns) } } -static char __init *get_partition_name(int i) +static char __init *ns_get_partition_name(int i) { return kasprintf(GFP_KERNEL, "NAND simulator partition %d", i); } @@ -620,7 +635,7 @@ static char __init *get_partition_name(int i) * * RETURNS: 0 if success, -ERRNO if failure. */ -static int __init init_nandsim(struct mtd_info *mtd) +static int __init ns_init(struct mtd_info *mtd) { struct nand_chip *chip = mtd_to_nand(mtd); struct nandsim *ns = nand_get_controller_data(chip); @@ -693,7 +708,7 @@ static int __init init_nandsim(struct mtd_info *mtd) NS_ERR("bad partition size.\n"); return -EINVAL; } - ns->partitions[i].name = get_partition_name(i); + ns->partitions[i].name = ns_get_partition_name(i); if (!ns->partitions[i].name) { NS_ERR("unable to allocate memory.\n"); return -ENOMEM; @@ -707,12 +722,14 @@ static int __init init_nandsim(struct mtd_info *mtd) if (remains) { if (parts_num + 1 > ARRAY_SIZE(ns->partitions)) { NS_ERR("too many partitions.\n"); - return -EINVAL; + ret = -EINVAL; + goto free_partition_names; } - ns->partitions[i].name = get_partition_name(i); + ns->partitions[i].name = ns_get_partition_name(i); if (!ns->partitions[i].name) { NS_ERR("unable to allocate memory.\n"); - return -ENOMEM; + ret = -ENOMEM; + goto free_partition_names; } ns->partitions[i].offset = next_offset; ns->partitions[i].size = remains; @@ -739,33 +756,48 @@ static int __init init_nandsim(struct mtd_info *mtd) printk("sector address bytes: %u\n", ns->geom.secaddrbytes); printk("options: %#x\n", ns->options); - if ((ret = alloc_device(ns)) != 0) - return ret; + ret = ns_alloc_device(ns); + if (ret) + goto free_partition_names; /* Allocate / initialize the internal buffer */ ns->buf.byte = kmalloc(ns->geom.pgszoob, GFP_KERNEL); if (!ns->buf.byte) { NS_ERR("init_nandsim: unable to allocate %u bytes for the internal buffer\n", ns->geom.pgszoob); - return -ENOMEM; + ret = -ENOMEM; + goto free_device; } memset(ns->buf.byte, 0xFF, ns->geom.pgszoob); return 0; + +free_device: + ns_free_device(ns); +free_partition_names: + for (i = 0; i < ARRAY_SIZE(ns->partitions); ++i) + kfree(ns->partitions[i].name); + + return ret; } /* * Free the nandsim structure. */ -static void free_nandsim(struct nandsim *ns) +static void ns_free(struct nandsim *ns) { + int i; + + for (i = 0; i < ARRAY_SIZE(ns->partitions); ++i) + kfree(ns->partitions[i].name); + kfree(ns->buf.byte); - free_device(ns); + ns_free_device(ns); return; } -static int parse_badblocks(struct nandsim *ns, struct mtd_info *mtd) +static int ns_parse_badblocks(struct nandsim *ns, struct mtd_info *mtd) { char *w; int zero_ok; @@ -793,7 +825,7 @@ static int parse_badblocks(struct nandsim *ns, struct mtd_info *mtd) return 0; } -static int parse_weakblocks(void) +static int ns_parse_weakblocks(void) { char *w; int zero_ok; @@ -830,7 +862,7 @@ static int parse_weakblocks(void) return 0; } -static int erase_error(unsigned int erase_block_no) +static int ns_erase_error(unsigned int erase_block_no) { struct weak_block *wb; @@ -844,7 +876,7 @@ static int erase_error(unsigned int erase_block_no) return 0; } -static int parse_weakpages(void) +static int ns_parse_weakpages(void) { char *w; int zero_ok; @@ -881,7 +913,7 @@ static int parse_weakpages(void) return 0; } -static int write_error(unsigned int page_no) +static int ns_write_error(unsigned int page_no) { struct weak_page *wp; @@ -895,7 +927,7 @@ static int write_error(unsigned int page_no) return 0; } -static int parse_gravepages(void) +static int ns_parse_gravepages(void) { char *g; int zero_ok; @@ -932,7 +964,7 @@ static int parse_gravepages(void) return 0; } -static int read_error(unsigned int page_no) +static int ns_read_error(unsigned int page_no) { struct grave_page *gp; @@ -946,25 +978,7 @@ static int read_error(unsigned int page_no) return 0; } -static void free_lists(void) -{ - struct list_head *pos, *n; - list_for_each_safe(pos, n, &weak_blocks) { - list_del(pos); - kfree(list_entry(pos, struct weak_block, list)); - } - list_for_each_safe(pos, n, &weak_pages) { - list_del(pos); - kfree(list_entry(pos, struct weak_page, list)); - } - list_for_each_safe(pos, n, &grave_pages) { - list_del(pos); - kfree(list_entry(pos, struct grave_page, list)); - } - kfree(erase_block_wear); -} - -static int setup_wear_reporting(struct mtd_info *mtd) +static int ns_setup_wear_reporting(struct mtd_info *mtd) { size_t mem; @@ -982,7 +996,7 @@ static int setup_wear_reporting(struct mtd_info *mtd) return 0; } -static void update_wear(unsigned int erase_block_no) +static void ns_update_wear(unsigned int erase_block_no) { if (!erase_block_wear) return; @@ -1001,7 +1015,7 @@ static void update_wear(unsigned int erase_block_no) /* * Returns the string representation of 'state' state. */ -static char *get_state_name(uint32_t state) +static char *ns_get_state_name(uint32_t state) { switch (NS_STATE(state)) { case STATE_CMD_READ0: @@ -1061,7 +1075,7 @@ static char *get_state_name(uint32_t state) * * RETURNS: 1 if wrong command, 0 if right. */ -static int check_command(int cmd) +static int ns_check_command(int cmd) { switch (cmd) { @@ -1088,7 +1102,7 @@ static int check_command(int cmd) /* * Returns state after command is accepted by command number. */ -static uint32_t get_state_by_command(unsigned command) +static uint32_t ns_get_state_by_command(unsigned command) { switch (command) { case NAND_CMD_READ0: @@ -1126,7 +1140,7 @@ static uint32_t get_state_by_command(unsigned command) /* * Move an address byte to the correspondent internal register. */ -static inline void accept_addr_byte(struct nandsim *ns, u_char bt) +static inline void ns_accept_addr_byte(struct nandsim *ns, u_char bt) { uint byte = (uint)bt; @@ -1144,9 +1158,10 @@ static inline void accept_addr_byte(struct nandsim *ns, u_char bt) /* * Switch to STATE_READY state. */ -static inline void switch_to_ready_state(struct nandsim *ns, u_char status) +static inline void ns_switch_to_ready_state(struct nandsim *ns, u_char status) { - NS_DBG("switch_to_ready_state: switch to %s state\n", get_state_name(STATE_READY)); + NS_DBG("switch_to_ready_state: switch to %s state\n", + ns_get_state_name(STATE_READY)); ns->state = STATE_READY; ns->nxstate = STATE_UNKNOWN; @@ -1203,7 +1218,7 @@ static inline void switch_to_ready_state(struct nandsim *ns, u_char status) * -1 - several matches. * 0 - operation is found. */ -static int find_operation(struct nandsim *ns, uint32_t flag) +static int ns_find_operation(struct nandsim *ns, uint32_t flag) { int opsfound = 0; int i, j, idx = 0; @@ -1256,7 +1271,8 @@ static int find_operation(struct nandsim *ns, uint32_t flag) ns->state = ns->op[ns->stateidx]; ns->nxstate = ns->op[ns->stateidx + 1]; NS_DBG("find_operation: operation found, index: %d, state: %s, nxstate %s\n", - idx, get_state_name(ns->state), get_state_name(ns->nxstate)); + idx, ns_get_state_name(ns->state), + ns_get_state_name(ns->nxstate)); return 0; } @@ -1264,13 +1280,13 @@ static int find_operation(struct nandsim *ns, uint32_t flag) /* Nothing was found. Try to ignore previous commands (if any) and search again */ if (ns->npstates != 0) { NS_DBG("find_operation: no operation found, try again with state %s\n", - get_state_name(ns->state)); + ns_get_state_name(ns->state)); ns->npstates = 0; - return find_operation(ns, 0); + return ns_find_operation(ns, 0); } NS_DBG("find_operation: no operations found\n"); - switch_to_ready_state(ns, NS_STATUS_FAILED(ns)); + ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns)); return -2; } @@ -1287,7 +1303,7 @@ static int find_operation(struct nandsim *ns, uint32_t flag) return -1; } -static void put_pages(struct nandsim *ns) +static void ns_put_pages(struct nandsim *ns) { int i; @@ -1296,7 +1312,8 @@ static void put_pages(struct nandsim *ns) } /* Get page cache pages in advance to provide NOFS memory allocation */ -static int get_pages(struct nandsim *ns, struct file *file, size_t count, loff_t pos) +static int ns_get_pages(struct nandsim *ns, struct file *file, size_t count, + loff_t pos) { pgoff_t index, start_index, end_index; struct page *page; @@ -1316,7 +1333,7 @@ static int get_pages(struct nandsim *ns, struct file *file, size_t count, loff_t page = find_or_create_page(mapping, index, GFP_NOFS); } if (page == NULL) { - put_pages(ns); + ns_put_pages(ns); return -ENOMEM; } unlock_page(page); @@ -1326,35 +1343,37 @@ static int get_pages(struct nandsim *ns, struct file *file, size_t count, loff_t return 0; } -static ssize_t read_file(struct nandsim *ns, struct file *file, void *buf, size_t count, loff_t pos) +static ssize_t ns_read_file(struct nandsim *ns, struct file *file, void *buf, + size_t count, loff_t pos) { ssize_t tx; int err; unsigned int noreclaim_flag; - err = get_pages(ns, file, count, pos); + err = ns_get_pages(ns, file, count, pos); if (err) return err; noreclaim_flag = memalloc_noreclaim_save(); tx = kernel_read(file, buf, count, &pos); memalloc_noreclaim_restore(noreclaim_flag); - put_pages(ns); + ns_put_pages(ns); return tx; } -static ssize_t write_file(struct nandsim *ns, struct file *file, void *buf, size_t count, loff_t pos) +static ssize_t ns_write_file(struct nandsim *ns, struct file *file, void *buf, + size_t count, loff_t pos) { ssize_t tx; int err; unsigned int noreclaim_flag; - err = get_pages(ns, file, count, pos); + err = ns_get_pages(ns, file, count, pos); if (err) return err; noreclaim_flag = memalloc_noreclaim_save(); tx = kernel_write(file, buf, count, &pos); memalloc_noreclaim_restore(noreclaim_flag); - put_pages(ns); + ns_put_pages(ns); return tx; } @@ -1374,11 +1393,11 @@ static inline u_char *NS_PAGE_BYTE_OFF(struct nandsim *ns) return NS_GET_PAGE(ns)->byte + ns->regs.column + ns->regs.off; } -static int do_read_error(struct nandsim *ns, int num) +static int ns_do_read_error(struct nandsim *ns, int num) { unsigned int page_no = ns->regs.row; - if (read_error(page_no)) { + if (ns_read_error(page_no)) { prandom_bytes(ns->buf.byte, num); NS_WARN("simulating read error in page %u\n", page_no); return 1; @@ -1386,7 +1405,7 @@ static int do_read_error(struct nandsim *ns, int num) return 0; } -static void do_bit_flips(struct nandsim *ns, int num) +static void ns_do_bit_flips(struct nandsim *ns, int num) { if (bitflips && prandom_u32() < (1 << 22)) { int flips = 1; @@ -1406,7 +1425,7 @@ static void do_bit_flips(struct nandsim *ns, int num) /* * Fill the NAND buffer with data read from the specified page. */ -static void read_page(struct nandsim *ns, int num) +static void ns_read_page(struct nandsim *ns, int num) { union ns_mem *mypage; @@ -1420,15 +1439,16 @@ static void read_page(struct nandsim *ns, int num) NS_DBG("read_page: page %d written, reading from %d\n", ns->regs.row, ns->regs.column + ns->regs.off); - if (do_read_error(ns, num)) + if (ns_do_read_error(ns, num)) return; pos = (loff_t)NS_RAW_OFFSET(ns) + ns->regs.off; - tx = read_file(ns, ns->cfile, ns->buf.byte, num, pos); + tx = ns_read_file(ns, ns->cfile, ns->buf.byte, num, + pos); if (tx != num) { NS_ERR("read_page: read error for page %d ret %ld\n", ns->regs.row, (long)tx); return; } - do_bit_flips(ns, num); + ns_do_bit_flips(ns, num); } return; } @@ -1440,17 +1460,17 @@ static void read_page(struct nandsim *ns, int num) } else { NS_DBG("read_page: page %d allocated, reading from %d\n", ns->regs.row, ns->regs.column + ns->regs.off); - if (do_read_error(ns, num)) + if (ns_do_read_error(ns, num)) return; memcpy(ns->buf.byte, NS_PAGE_BYTE_OFF(ns), num); - do_bit_flips(ns, num); + ns_do_bit_flips(ns, num); } } /* * Erase all pages in the specified sector. */ -static void erase_sector(struct nandsim *ns) +static void ns_erase_sector(struct nandsim *ns) { union ns_mem *mypage; int i; @@ -1478,7 +1498,7 @@ static void erase_sector(struct nandsim *ns) /* * Program the specified page with the contents from the NAND buffer. */ -static int prog_page(struct nandsim *ns, int num) +static int ns_prog_page(struct nandsim *ns, int num) { int i; union ns_mem *mypage; @@ -1497,7 +1517,7 @@ static int prog_page(struct nandsim *ns, int num) memset(ns->file_buf, 0xff, ns->geom.pgszoob); } else { all = 0; - tx = read_file(ns, ns->cfile, pg_off, num, off); + tx = ns_read_file(ns, ns->cfile, pg_off, num, off); if (tx != num) { NS_ERR("prog_page: read error for page %d ret %ld\n", ns->regs.row, (long)tx); return -1; @@ -1507,14 +1527,15 @@ static int prog_page(struct nandsim *ns, int num) pg_off[i] &= ns->buf.byte[i]; if (all) { loff_t pos = (loff_t)ns->regs.row * ns->geom.pgszoob; - tx = write_file(ns, ns->cfile, ns->file_buf, ns->geom.pgszoob, pos); + tx = ns_write_file(ns, ns->cfile, ns->file_buf, + ns->geom.pgszoob, pos); if (tx != ns->geom.pgszoob) { NS_ERR("prog_page: write error for page %d ret %ld\n", ns->regs.row, (long)tx); return -1; } __set_bit(ns->regs.row, ns->pages_written); } else { - tx = write_file(ns, ns->cfile, pg_off, num, off); + tx = ns_write_file(ns, ns->cfile, pg_off, num, off); if (tx != num) { NS_ERR("prog_page: write error for page %d ret %ld\n", ns->regs.row, (long)tx); return -1; @@ -1552,7 +1573,7 @@ static int prog_page(struct nandsim *ns, int num) * * RETURNS: 0 if success, -1 if error. */ -static int do_state_action(struct nandsim *ns, uint32_t action) +static int ns_do_state_action(struct nandsim *ns, uint32_t action) { int num; int busdiv = ns->busw == 8 ? 1 : 2; @@ -1579,7 +1600,7 @@ static int do_state_action(struct nandsim *ns, uint32_t action) break; } num = ns->geom.pgszoob - ns->regs.off - ns->regs.column; - read_page(ns, num); + ns_read_page(ns, num); NS_DBG("do_state_action: (ACTION_CPY:) copy %d bytes to int buf, raw offset %d\n", num, NS_RAW_OFFSET(ns) + ns->regs.off); @@ -1622,14 +1643,14 @@ static int do_state_action(struct nandsim *ns, uint32_t action) ns->regs.row, NS_RAW_OFFSET(ns)); NS_LOG("erase sector %u\n", erase_block_no); - erase_sector(ns); + ns_erase_sector(ns); NS_MDELAY(erase_delay); if (erase_block_wear) - update_wear(erase_block_no); + ns_update_wear(erase_block_no); - if (erase_error(erase_block_no)) { + if (ns_erase_error(erase_block_no)) { NS_WARN("simulating erase failure in erase block %u\n", erase_block_no); return -1; } @@ -1653,7 +1674,7 @@ static int do_state_action(struct nandsim *ns, uint32_t action) return -1; } - if (prog_page(ns, num) == -1) + if (ns_prog_page(ns, num) == -1) return -1; page_no = ns->regs.row; @@ -1665,7 +1686,7 @@ static int do_state_action(struct nandsim *ns, uint32_t action) NS_UDELAY(programm_delay); NS_UDELAY(output_cycle * ns->geom.pgsz / 1000 / busdiv); - if (write_error(page_no)) { + if (ns_write_error(page_no)) { NS_WARN("simulating write failure in page %u\n", page_no); return -1; } @@ -1702,7 +1723,7 @@ static int do_state_action(struct nandsim *ns, uint32_t action) /* * Switch simulator's state. */ -static void switch_state(struct nandsim *ns) +static void ns_switch_state(struct nandsim *ns) { if (ns->op) { /* @@ -1716,11 +1737,13 @@ static void switch_state(struct nandsim *ns) NS_DBG("switch_state: operation is known, switch to the next state, " "state: %s, nxstate: %s\n", - get_state_name(ns->state), get_state_name(ns->nxstate)); + ns_get_state_name(ns->state), + ns_get_state_name(ns->nxstate)); /* See, whether we need to do some action */ - if ((ns->state & ACTION_MASK) && do_state_action(ns, ns->state) < 0) { - switch_to_ready_state(ns, NS_STATUS_FAILED(ns)); + if ((ns->state & ACTION_MASK) && + ns_do_state_action(ns, ns->state) < 0) { + ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns)); return; } @@ -1734,15 +1757,16 @@ static void switch_state(struct nandsim *ns) * The only event causing the switch_state function to * be called with yet unknown operation is new command. */ - ns->state = get_state_by_command(ns->regs.command); + ns->state = ns_get_state_by_command(ns->regs.command); NS_DBG("switch_state: operation is unknown, try to find it\n"); - if (find_operation(ns, 0) != 0) + if (!ns_find_operation(ns, 0)) return; - if ((ns->state & ACTION_MASK) && do_state_action(ns, ns->state) < 0) { - switch_to_ready_state(ns, NS_STATUS_FAILED(ns)); + if ((ns->state & ACTION_MASK) && + ns_do_state_action(ns, ns->state) < 0) { + ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns)); return; } } @@ -1770,7 +1794,7 @@ static void switch_state(struct nandsim *ns) NS_DBG("switch_state: operation complete, switch to STATE_READY state\n"); - switch_to_ready_state(ns, status); + ns_switch_to_ready_state(ns, status); return; } else if (ns->nxstate & (STATE_DATAIN_MASK | STATE_DATAOUT_MASK)) { @@ -1784,7 +1808,8 @@ static void switch_state(struct nandsim *ns) NS_DBG("switch_state: the next state is data I/O, switch, " "state: %s, nxstate: %s\n", - get_state_name(ns->state), get_state_name(ns->nxstate)); + ns_get_state_name(ns->state), + ns_get_state_name(ns->nxstate)); /* * Set the internal register to the count of bytes which @@ -1862,8 +1887,8 @@ static u_char ns_nand_read_byte(struct nand_chip *chip) return outb; } if (!(ns->state & STATE_DATAOUT_MASK)) { - NS_WARN("read_byte: unexpected data output cycle, state is %s " - "return %#x\n", get_state_name(ns->state), (uint)outb); + NS_WARN("read_byte: unexpected data output cycle, state is %s return %#x\n", + ns_get_state_name(ns->state), (uint)outb); return outb; } @@ -1902,7 +1927,7 @@ static u_char ns_nand_read_byte(struct nand_chip *chip) NS_DBG("read_byte: all bytes were read\n"); if (NS_STATE(ns->nxstate) == STATE_READY) - switch_state(ns); + ns_switch_state(ns); } return outb; @@ -1929,12 +1954,12 @@ static void ns_nand_write_byte(struct nand_chip *chip, u_char byte) if (byte == NAND_CMD_RESET) { NS_LOG("reset chip\n"); - switch_to_ready_state(ns, NS_STATUS_OK(ns)); + ns_switch_to_ready_state(ns, NS_STATUS_OK(ns)); return; } /* Check that the command byte is correct */ - if (check_command(byte)) { + if (ns_check_command(byte)) { NS_ERR("write_byte: unknown command %#x\n", (uint)byte); return; } @@ -1943,7 +1968,7 @@ static void ns_nand_write_byte(struct nand_chip *chip, u_char byte) || NS_STATE(ns->state) == STATE_DATAOUT) { int row = ns->regs.row; - switch_state(ns); + ns_switch_state(ns); if (byte == NAND_CMD_RNDOUT) ns->regs.row = row; } @@ -1958,16 +1983,17 @@ static void ns_nand_write_byte(struct nand_chip *chip, u_char byte) * was expected but command was input. In this case ignore * previous command(s)/state(s) and accept the last one. */ - NS_WARN("write_byte: command (%#x) wasn't expected, expected state is %s, " - "ignore previous states\n", (uint)byte, get_state_name(ns->nxstate)); + NS_WARN("write_byte: command (%#x) wasn't expected, expected state is %s, ignore previous states\n", + (uint)byte, + ns_get_state_name(ns->nxstate)); } - switch_to_ready_state(ns, NS_STATUS_FAILED(ns)); + ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns)); } NS_DBG("command byte corresponding to %s state accepted\n", - get_state_name(get_state_by_command(byte))); + ns_get_state_name(ns_get_state_by_command(byte))); ns->regs.command = byte; - switch_state(ns); + ns_switch_state(ns); } else if (ns->lines.ale == 1) { /* @@ -1978,11 +2004,13 @@ static void ns_nand_write_byte(struct nand_chip *chip, u_char byte) NS_DBG("write_byte: operation isn't known yet, identify it\n"); - if (find_operation(ns, 1) < 0) + if (ns_find_operation(ns, 1) < 0) return; - if ((ns->state & ACTION_MASK) && do_state_action(ns, ns->state) < 0) { - switch_to_ready_state(ns, NS_STATUS_FAILED(ns)); + if ((ns->state & ACTION_MASK) && + ns_do_state_action(ns, ns->state) < 0) { + ns_switch_to_ready_state(ns, + NS_STATUS_FAILED(ns)); return; } @@ -2004,20 +2032,20 @@ static void ns_nand_write_byte(struct nand_chip *chip, u_char byte) /* Check that chip is expecting address */ if (!(ns->nxstate & STATE_ADDR_MASK)) { - NS_ERR("write_byte: address (%#x) isn't expected, expected state is %s, " - "switch to STATE_READY\n", (uint)byte, get_state_name(ns->nxstate)); - switch_to_ready_state(ns, NS_STATUS_FAILED(ns)); + NS_ERR("write_byte: address (%#x) isn't expected, expected state is %s, switch to STATE_READY\n", + (uint)byte, ns_get_state_name(ns->nxstate)); + ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns)); return; } /* Check if this is expected byte */ if (ns->regs.count == ns->regs.num) { NS_ERR("write_byte: no more address bytes expected\n"); - switch_to_ready_state(ns, NS_STATUS_FAILED(ns)); + ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns)); return; } - accept_addr_byte(ns, byte); + ns_accept_addr_byte(ns, byte); ns->regs.count += 1; @@ -2026,7 +2054,7 @@ static void ns_nand_write_byte(struct nand_chip *chip, u_char byte) if (ns->regs.count == ns->regs.num) { NS_DBG("address (%#x, %#x) is accepted\n", ns->regs.row, ns->regs.column); - switch_state(ns); + ns_switch_state(ns); } } else { @@ -2036,10 +2064,10 @@ static void ns_nand_write_byte(struct nand_chip *chip, u_char byte) /* Check that chip is expecting data input */ if (!(ns->state & STATE_DATAIN_MASK)) { - NS_ERR("write_byte: data input (%#x) isn't expected, state is %s, " - "switch to %s\n", (uint)byte, - get_state_name(ns->state), get_state_name(STATE_READY)); - switch_to_ready_state(ns, NS_STATUS_FAILED(ns)); + NS_ERR("write_byte: data input (%#x) isn't expected, state is %s, switch to %s\n", + (uint)byte, ns_get_state_name(ns->state), + ns_get_state_name(STATE_READY)); + ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns)); return; } @@ -2069,16 +2097,16 @@ static void ns_nand_write_buf(struct nand_chip *chip, const u_char *buf, /* Check that chip is expecting data input */ if (!(ns->state & STATE_DATAIN_MASK)) { - NS_ERR("write_buf: data input isn't expected, state is %s, " - "switch to STATE_READY\n", get_state_name(ns->state)); - switch_to_ready_state(ns, NS_STATUS_FAILED(ns)); + NS_ERR("write_buf: data input isn't expected, state is %s, switch to STATE_READY\n", + ns_get_state_name(ns->state)); + ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns)); return; } /* Check if these are expected bytes */ if (ns->regs.count + len > ns->regs.num) { NS_ERR("write_buf: too many input bytes\n"); - switch_to_ready_state(ns, NS_STATUS_FAILED(ns)); + ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns)); return; } @@ -2105,7 +2133,7 @@ static void ns_nand_read_buf(struct nand_chip *chip, u_char *buf, int len) } if (!(ns->state & STATE_DATAOUT_MASK)) { NS_WARN("read_buf: unexpected data output cycle, current state is %s\n", - get_state_name(ns->state)); + ns_get_state_name(ns->state)); return; } @@ -2121,7 +2149,7 @@ static void ns_nand_read_buf(struct nand_chip *chip, u_char *buf, int len) /* Check if these are expected bytes */ if (ns->regs.count + len > ns->regs.num) { NS_ERR("read_buf: too many bytes to read\n"); - switch_to_ready_state(ns, NS_STATUS_FAILED(ns)); + ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns)); return; } @@ -2130,7 +2158,7 @@ static void ns_nand_read_buf(struct nand_chip *chip, u_char *buf, int len) if (ns->regs.count == ns->regs.num) { if (NS_STATE(ns->nxstate) == STATE_READY) - switch_state(ns); + ns_switch_state(ns); } return; @@ -2144,6 +2172,9 @@ static int ns_exec_op(struct nand_chip *chip, const struct nand_operation *op, const struct nand_op_instr *instr = NULL; struct nandsim *ns = nand_get_controller_data(chip); + if (check_only) + return 0; + ns->lines.ce = 1; for (op_id = 0; op_id < op->ninstrs; op_id++) { @@ -2224,9 +2255,10 @@ static const struct nand_controller_ops ns_controller_ops = { */ static int __init ns_init_module(void) { + struct list_head *pos, *n; struct nand_chip *chip; struct nandsim *ns; - int retval = -ENOMEM, i; + int ret; if (bus_width != 8 && bus_width != 16) { NS_ERR("wrong bus width (%d), use only 8 or 16\n", bus_width); @@ -2259,8 +2291,8 @@ static int __init ns_init_module(void) break; default: NS_ERR("bbt has to be 0..2\n"); - retval = -EINVAL; - goto error; + ret = -EINVAL; + goto free_ns_struct; } /* * Perform minimum nandsim structure initialization to handle @@ -2285,23 +2317,26 @@ static int __init ns_init_module(void) nsmtd->owner = THIS_MODULE; - if ((retval = parse_weakblocks()) != 0) - goto error; + ret = ns_parse_weakblocks(); + if (ret) + goto free_ns_struct; - if ((retval = parse_weakpages()) != 0) - goto error; + ret = ns_parse_weakpages(); + if (ret) + goto free_wb_list; - if ((retval = parse_gravepages()) != 0) - goto error; + ret = ns_parse_gravepages(); + if (ret) + goto free_wp_list; nand_controller_init(&ns->base); ns->base.ops = &ns_controller_ops; chip->controller = &ns->base; - retval = nand_scan(chip, 1); - if (retval) { + ret = nand_scan(chip, 1); + if (ret) { NS_ERR("Could not scan NAND Simulator device\n"); - goto error; + goto free_gp_list; } if (overridesize) { @@ -2313,8 +2348,8 @@ static int __init ns_init_module(void) if (new_size >> overridesize != nsmtd->erasesize) { NS_ERR("overridesize is too big\n"); - retval = -EINVAL; - goto err_exit; + ret = -EINVAL; + goto cleanup_nand; } /* N.B. This relies on nand_scan not doing anything with the size before we change it */ @@ -2325,39 +2360,60 @@ static int __init ns_init_module(void) chip->pagemask = (targetsize >> chip->page_shift) - 1; } - if ((retval = setup_wear_reporting(nsmtd)) != 0) - goto err_exit; + ret = ns_setup_wear_reporting(nsmtd); + if (ret) + goto cleanup_nand; - if ((retval = init_nandsim(nsmtd)) != 0) - goto err_exit; + ret = ns_init(nsmtd); + if (ret) + goto free_ebw; - if ((retval = nand_create_bbt(chip)) != 0) - goto err_exit; + ret = nand_create_bbt(chip); + if (ret) + goto free_ns_object; - if ((retval = parse_badblocks(ns, nsmtd)) != 0) - goto err_exit; + ret = ns_parse_badblocks(ns, nsmtd); + if (ret) + goto free_ns_object; /* Register NAND partitions */ - retval = mtd_device_register(nsmtd, &ns->partitions[0], - ns->nbparts); - if (retval != 0) - goto err_exit; + ret = mtd_device_register(nsmtd, &ns->partitions[0], ns->nbparts); + if (ret) + goto free_ns_object; - if ((retval = nandsim_debugfs_create(ns)) != 0) - goto err_exit; + ret = ns_debugfs_create(ns); + if (ret) + goto unregister_mtd; return 0; -err_exit: - free_nandsim(ns); - nand_release(chip); - for (i = 0;i < ARRAY_SIZE(ns->partitions); ++i) - kfree(ns->partitions[i].name); -error: +unregister_mtd: + WARN_ON(mtd_device_unregister(nsmtd)); +free_ns_object: + ns_free(ns); +free_ebw: + kfree(erase_block_wear); +cleanup_nand: + nand_cleanup(chip); +free_gp_list: + list_for_each_safe(pos, n, &grave_pages) { + list_del(pos); + kfree(list_entry(pos, struct grave_page, list)); + } +free_wp_list: + list_for_each_safe(pos, n, &weak_pages) { + list_del(pos); + kfree(list_entry(pos, struct weak_page, list)); + } +free_wb_list: + list_for_each_safe(pos, n, &weak_blocks) { + list_del(pos); + kfree(list_entry(pos, struct weak_block, list)); + } +free_ns_struct: kfree(ns); - free_lists(); - return retval; + return ret; } module_init(ns_init_module); @@ -2369,14 +2425,30 @@ static void __exit ns_cleanup_module(void) { struct nand_chip *chip = mtd_to_nand(nsmtd); struct nandsim *ns = nand_get_controller_data(chip); - int i; + struct list_head *pos, *n; - free_nandsim(ns); /* Free nandsim private resources */ - nand_release(chip); /* Unregister driver */ - for (i = 0;i < ARRAY_SIZE(ns->partitions); ++i) - kfree(ns->partitions[i].name); - kfree(ns); /* Free other structures */ - free_lists(); + ns_debugfs_remove(ns); + WARN_ON(mtd_device_unregister(nsmtd)); + ns_free(ns); + kfree(erase_block_wear); + nand_cleanup(chip); + + list_for_each_safe(pos, n, &grave_pages) { + list_del(pos); + kfree(list_entry(pos, struct grave_page, list)); + } + + list_for_each_safe(pos, n, &weak_pages) { + list_del(pos); + kfree(list_entry(pos, struct weak_page, list)); + } + + list_for_each_safe(pos, n, &weak_blocks) { + list_del(pos); + kfree(list_entry(pos, struct weak_block, list)); + } + + kfree(ns); } module_exit(ns_cleanup_module); diff --git a/drivers/mtd/nand/raw/ndfc.c b/drivers/mtd/nand/raw/ndfc.c index d324396ab7ff..ed38338c1383 100644 --- a/drivers/mtd/nand/raw/ndfc.c +++ b/drivers/mtd/nand/raw/ndfc.c @@ -244,9 +244,13 @@ static int ndfc_probe(struct platform_device *ofdev) static int ndfc_remove(struct platform_device *ofdev) { struct ndfc_controller *ndfc = dev_get_drvdata(&ofdev->dev); - struct mtd_info *mtd = nand_to_mtd(&ndfc->chip); + struct nand_chip *chip = &ndfc->chip; + struct mtd_info *mtd = nand_to_mtd(chip); + int ret; - nand_release(&ndfc->chip); + ret = mtd_device_unregister(mtd); + WARN_ON(ret); + nand_cleanup(chip); kfree(mtd->name); return 0; diff --git a/drivers/mtd/nand/raw/omap2.c b/drivers/mtd/nand/raw/omap2.c index ad77c112a78a..eb7fcfd9276b 100644 --- a/drivers/mtd/nand/raw/omap2.c +++ b/drivers/mtd/nand/raw/omap2.c @@ -2283,14 +2283,18 @@ static int omap_nand_remove(struct platform_device *pdev) struct mtd_info *mtd = platform_get_drvdata(pdev); struct nand_chip *nand_chip = mtd_to_nand(mtd); struct omap_nand_info *info = mtd_to_omap(mtd); + int ret; + if (nand_chip->ecc.priv) { nand_bch_free(nand_chip->ecc.priv); nand_chip->ecc.priv = NULL; } if (info->dma) dma_release_channel(info->dma); - nand_release(nand_chip); - return 0; + ret = mtd_device_unregister(mtd); + WARN_ON(ret); + nand_cleanup(nand_chip); + return ret; } static const struct of_device_id omap_nand_ids[] = { diff --git a/drivers/mtd/nand/raw/omap_elm.c b/drivers/mtd/nand/raw/omap_elm.c index 3fa0e2cbbe53..078b1022ac2a 100644 --- a/drivers/mtd/nand/raw/omap_elm.c +++ b/drivers/mtd/nand/raw/omap_elm.c @@ -411,6 +411,7 @@ static int elm_probe(struct platform_device *pdev) pm_runtime_enable(&pdev->dev); if (pm_runtime_get_sync(&pdev->dev) < 0) { ret = -EINVAL; + pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); dev_err(&pdev->dev, "can't enable clock\n"); return ret; diff --git a/drivers/mtd/nand/raw/orion_nand.c b/drivers/mtd/nand/raw/orion_nand.c index d27b39a7223c..880b54ca1b41 100644 --- a/drivers/mtd/nand/raw/orion_nand.c +++ b/drivers/mtd/nand/raw/orion_nand.c @@ -180,7 +180,7 @@ static int __init orion_nand_probe(struct platform_device *pdev) mtd->name = "orion_nand"; ret = mtd_device_register(mtd, board->parts, board->nr_parts); if (ret) { - nand_release(nc); + nand_cleanup(nc); goto no_dev; } @@ -195,8 +195,12 @@ static int orion_nand_remove(struct platform_device *pdev) { struct orion_nand_info *info = platform_get_drvdata(pdev); struct nand_chip *chip = &info->chip; + int ret; - nand_release(chip); + ret = mtd_device_unregister(nand_to_mtd(chip)); + WARN_ON(ret); + + nand_cleanup(chip); clk_disable_unprepare(info->clk); diff --git a/drivers/mtd/nand/raw/oxnas_nand.c b/drivers/mtd/nand/raw/oxnas_nand.c index c43cb4d92d3d..8d0d76ad319d 100644 --- a/drivers/mtd/nand/raw/oxnas_nand.c +++ b/drivers/mtd/nand/raw/oxnas_nand.c @@ -32,6 +32,7 @@ struct oxnas_nand_ctrl { void __iomem *io_base; struct clk *clk; struct nand_chip *chips[OXNAS_NAND_MAX_CHIPS]; + unsigned int nchips; }; static uint8_t oxnas_nand_read_byte(struct nand_chip *chip) @@ -79,9 +80,9 @@ static int oxnas_nand_probe(struct platform_device *pdev) struct nand_chip *chip; struct mtd_info *mtd; struct resource *res; - int nchips = 0; int count = 0; int err = 0; + int i; /* Allocate memory for the device structure (and zero it) */ oxnas = devm_kzalloc(&pdev->dev, sizeof(*oxnas), @@ -140,17 +141,15 @@ static int oxnas_nand_probe(struct platform_device *pdev) goto err_release_child; err = mtd_device_register(mtd, NULL, 0); - if (err) { - nand_release(chip); - goto err_release_child; - } + if (err) + goto err_cleanup_nand; - oxnas->chips[nchips] = chip; - ++nchips; + oxnas->chips[oxnas->nchips] = chip; + ++oxnas->nchips; } /* Exit if no chips found */ - if (!nchips) { + if (!oxnas->nchips) { err = -ENODEV; goto err_clk_unprepare; } @@ -159,8 +158,17 @@ static int oxnas_nand_probe(struct platform_device *pdev) return 0; +err_cleanup_nand: + nand_cleanup(chip); err_release_child: of_node_put(nand_np); + + for (i = 0; i < oxnas->nchips; i++) { + chip = oxnas->chips[i]; + WARN_ON(mtd_device_unregister(nand_to_mtd(chip))); + nand_cleanup(chip); + } + err_clk_unprepare: clk_disable_unprepare(oxnas->clk); return err; @@ -169,9 +177,14 @@ static int oxnas_nand_probe(struct platform_device *pdev) static int oxnas_nand_remove(struct platform_device *pdev) { struct oxnas_nand_ctrl *oxnas = platform_get_drvdata(pdev); + struct nand_chip *chip; + int i; - if (oxnas->chips[0]) - nand_release(oxnas->chips[0]); + for (i = 0; i < oxnas->nchips; i++) { + chip = oxnas->chips[i]; + WARN_ON(mtd_device_unregister(nand_to_mtd(chip))); + nand_cleanup(chip); + } clk_disable_unprepare(oxnas->clk); diff --git a/drivers/mtd/nand/raw/pasemi_nand.c b/drivers/mtd/nand/raw/pasemi_nand.c index 9cfe7395172a..d8eca8c3fdcd 100644 --- a/drivers/mtd/nand/raw/pasemi_nand.c +++ b/drivers/mtd/nand/raw/pasemi_nand.c @@ -146,7 +146,7 @@ static int pasemi_nand_probe(struct platform_device *ofdev) if (mtd_device_register(pasemi_nand_mtd, NULL, 0)) { dev_err(dev, "Unable to register MTD device\n"); err = -ENODEV; - goto out_lpc; + goto out_cleanup_nand; } dev_info(dev, "PA Semi NAND flash at %pR, control at I/O %x\n", &res, @@ -154,6 +154,8 @@ static int pasemi_nand_probe(struct platform_device *ofdev) return 0; + out_cleanup_nand: + nand_cleanup(chip); out_lpc: release_region(lpcctl, 4); out_ior: @@ -167,6 +169,7 @@ static int pasemi_nand_probe(struct platform_device *ofdev) static int pasemi_nand_remove(struct platform_device *ofdev) { struct nand_chip *chip; + int ret; if (!pasemi_nand_mtd) return 0; @@ -174,7 +177,9 @@ static int pasemi_nand_remove(struct platform_device *ofdev) chip = mtd_to_nand(pasemi_nand_mtd); /* Release resources, unregister device */ - nand_release(chip); + ret = mtd_device_unregister(pasemi_nand_mtd); + WARN_ON(ret); + nand_cleanup(chip); release_region(lpcctl, 4); diff --git a/drivers/mtd/nand/raw/plat_nand.c b/drivers/mtd/nand/raw/plat_nand.c index dc0f3074ddbf..556182f26057 100644 --- a/drivers/mtd/nand/raw/plat_nand.c +++ b/drivers/mtd/nand/raw/plat_nand.c @@ -92,7 +92,7 @@ static int plat_nand_probe(struct platform_device *pdev) if (!err) return err; - nand_release(&data->chip); + nand_cleanup(&data->chip); out: if (pdata->ctrl.remove) pdata->ctrl.remove(pdev); @@ -106,8 +106,12 @@ static int plat_nand_remove(struct platform_device *pdev) { struct plat_nand_data *data = platform_get_drvdata(pdev); struct platform_nand_data *pdata = dev_get_platdata(&pdev->dev); + struct nand_chip *chip = &data->chip; + int ret; - nand_release(&data->chip); + ret = mtd_device_unregister(nand_to_mtd(chip)); + WARN_ON(ret); + nand_cleanup(chip); if (pdata->ctrl.remove) pdata->ctrl.remove(pdev); diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c index 5b11c7061497..f1daf330951b 100644 --- a/drivers/mtd/nand/raw/qcom_nandc.c +++ b/drivers/mtd/nand/raw/qcom_nandc.c @@ -2836,7 +2836,7 @@ static int qcom_nand_host_init_and_register(struct qcom_nand_controller *nandc, chip->legacy.block_markbad = qcom_nandc_block_markbad; chip->controller = &nandc->controller; - chip->options |= NAND_NO_SUBPAGE_WRITE | NAND_USE_BOUNCE_BUFFER | + chip->options |= NAND_NO_SUBPAGE_WRITE | NAND_USES_DMA | NAND_SKIP_BBTSCAN; /* set up initial status value */ @@ -3005,10 +3005,15 @@ static int qcom_nandc_remove(struct platform_device *pdev) struct qcom_nand_controller *nandc = platform_get_drvdata(pdev); struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); struct qcom_nand_host *host; + struct nand_chip *chip; + int ret; - list_for_each_entry(host, &nandc->host_list, node) - nand_release(&host->chip); - + list_for_each_entry(host, &nandc->host_list, node) { + chip = &host->chip; + ret = mtd_device_unregister(nand_to_mtd(chip)); + WARN_ON(ret); + nand_cleanup(chip); + } qcom_nandc_unalloc(nandc); diff --git a/drivers/mtd/nand/raw/r852.c b/drivers/mtd/nand/raw/r852.c index 77774250fb11..f865e3a47b01 100644 --- a/drivers/mtd/nand/raw/r852.c +++ b/drivers/mtd/nand/raw/r852.c @@ -651,7 +651,8 @@ static int r852_register_nand_device(struct r852_device *dev) dev->card_registered = 1; return 0; error3: - nand_release(dev->chip); + WARN_ON(mtd_device_unregister(nand_to_mtd(dev->chip))); + nand_cleanup(dev->chip); error1: /* Force card redetect */ dev->card_detected = 0; @@ -670,7 +671,8 @@ static void r852_unregister_nand_device(struct r852_device *dev) return; device_remove_file(&mtd->dev, &dev_attr_media_type); - nand_release(dev->chip); + WARN_ON(mtd_device_unregister(mtd)); + nand_cleanup(dev->chip); r852_engine_disable(dev); dev->card_registered = 0; } diff --git a/drivers/mtd/nand/raw/s3c2410.c b/drivers/mtd/nand/raw/s3c2410.c index 0009c1820e21..f86dff311464 100644 --- a/drivers/mtd/nand/raw/s3c2410.c +++ b/drivers/mtd/nand/raw/s3c2410.c @@ -779,7 +779,8 @@ static int s3c24xx_nand_remove(struct platform_device *pdev) for (mtdno = 0; mtdno < info->mtd_count; mtdno++, ptr++) { pr_debug("releasing mtd %d (%p)\n", mtdno, ptr); - nand_release(&ptr->chip); + WARN_ON(mtd_device_unregister(nand_to_mtd(&ptr->chip))); + nand_cleanup(&ptr->chip); } } diff --git a/drivers/mtd/nand/raw/sh_flctl.c b/drivers/mtd/nand/raw/sh_flctl.c index 058e99d0cbcf..a661b8bb2dd5 100644 --- a/drivers/mtd/nand/raw/sh_flctl.c +++ b/drivers/mtd/nand/raw/sh_flctl.c @@ -1204,9 +1204,13 @@ static int flctl_probe(struct platform_device *pdev) static int flctl_remove(struct platform_device *pdev) { struct sh_flctl *flctl = platform_get_drvdata(pdev); + struct nand_chip *chip = &flctl->chip; + int ret; flctl_release_dma(flctl); - nand_release(&flctl->chip); + ret = mtd_device_unregister(nand_to_mtd(chip)); + WARN_ON(ret); + nand_cleanup(chip); pm_runtime_disable(&pdev->dev); return 0; diff --git a/drivers/mtd/nand/raw/sharpsl.c b/drivers/mtd/nand/raw/sharpsl.c index b47a9eaff89b..51286f7acf54 100644 --- a/drivers/mtd/nand/raw/sharpsl.c +++ b/drivers/mtd/nand/raw/sharpsl.c @@ -183,7 +183,7 @@ static int sharpsl_nand_probe(struct platform_device *pdev) return 0; err_add: - nand_release(this); + nand_cleanup(this); err_scan: iounmap(sharpsl->io); @@ -199,13 +199,19 @@ static int sharpsl_nand_probe(struct platform_device *pdev) static int sharpsl_nand_remove(struct platform_device *pdev) { struct sharpsl_nand *sharpsl = platform_get_drvdata(pdev); + struct nand_chip *chip = &sharpsl->chip; + int ret; - /* Release resources, unregister device */ - nand_release(&sharpsl->chip); + /* Unregister device */ + ret = mtd_device_unregister(nand_to_mtd(chip)); + WARN_ON(ret); + + /* Release resources */ + nand_cleanup(chip); iounmap(sharpsl->io); - /* Free the MTD device structure */ + /* Free the driver's structure */ kfree(sharpsl); return 0; diff --git a/drivers/mtd/nand/raw/socrates_nand.c b/drivers/mtd/nand/raw/socrates_nand.c index 20f40c0e812c..243b34cfbc1b 100644 --- a/drivers/mtd/nand/raw/socrates_nand.c +++ b/drivers/mtd/nand/raw/socrates_nand.c @@ -169,7 +169,7 @@ static int socrates_nand_probe(struct platform_device *ofdev) if (!res) return res; - nand_release(nand_chip); + nand_cleanup(nand_chip); out: iounmap(host->io_base); @@ -182,8 +182,12 @@ static int socrates_nand_probe(struct platform_device *ofdev) static int socrates_nand_remove(struct platform_device *ofdev) { struct socrates_nand_host *host = dev_get_drvdata(&ofdev->dev); + struct nand_chip *chip = &host->nand_chip; + int ret; - nand_release(&host->nand_chip); + ret = mtd_device_unregister(nand_to_mtd(chip)); + WARN_ON(ret); + nand_cleanup(chip); iounmap(host->io_base); diff --git a/drivers/mtd/nand/raw/stm32_fmc2_nand.c b/drivers/mtd/nand/raw/stm32_fmc2_nand.c index b6d45cd911ae..65c9d17b25a3 100644 --- a/drivers/mtd/nand/raw/stm32_fmc2_nand.c +++ b/drivers/mtd/nand/raw/stm32_fmc2_nand.c @@ -4,6 +4,7 @@ * Author: Christophe Kerello */ +#include #include #include #include @@ -37,8 +38,7 @@ /* Max ECC buffer length */ #define FMC2_MAX_ECC_BUF_LEN (FMC2_BCHDSRS_LEN * FMC2_MAX_SG) -#define FMC2_TIMEOUT_US 1000 -#define FMC2_TIMEOUT_MS 1000 +#define FMC2_TIMEOUT_MS 5000 /* Timings */ #define FMC2_THIZ 1 @@ -85,20 +85,16 @@ /* Register: FMC2_PCR */ #define FMC2_PCR_PWAITEN BIT(1) #define FMC2_PCR_PBKEN BIT(2) -#define FMC2_PCR_PWID_MASK GENMASK(5, 4) -#define FMC2_PCR_PWID(x) (((x) & 0x3) << 4) +#define FMC2_PCR_PWID GENMASK(5, 4) #define FMC2_PCR_PWID_BUSWIDTH_8 0 #define FMC2_PCR_PWID_BUSWIDTH_16 1 #define FMC2_PCR_ECCEN BIT(6) #define FMC2_PCR_ECCALG BIT(8) -#define FMC2_PCR_TCLR_MASK GENMASK(12, 9) -#define FMC2_PCR_TCLR(x) (((x) & 0xf) << 9) +#define FMC2_PCR_TCLR GENMASK(12, 9) #define FMC2_PCR_TCLR_DEFAULT 0xf -#define FMC2_PCR_TAR_MASK GENMASK(16, 13) -#define FMC2_PCR_TAR(x) (((x) & 0xf) << 13) +#define FMC2_PCR_TAR GENMASK(16, 13) #define FMC2_PCR_TAR_DEFAULT 0xf -#define FMC2_PCR_ECCSS_MASK GENMASK(19, 17) -#define FMC2_PCR_ECCSS(x) (((x) & 0x7) << 17) +#define FMC2_PCR_ECCSS GENMASK(19, 17) #define FMC2_PCR_ECCSS_512 1 #define FMC2_PCR_ECCSS_2048 3 #define FMC2_PCR_BCHECC BIT(24) @@ -108,17 +104,17 @@ #define FMC2_SR_NWRF BIT(6) /* Register: FMC2_PMEM */ -#define FMC2_PMEM_MEMSET(x) (((x) & 0xff) << 0) -#define FMC2_PMEM_MEMWAIT(x) (((x) & 0xff) << 8) -#define FMC2_PMEM_MEMHOLD(x) (((x) & 0xff) << 16) -#define FMC2_PMEM_MEMHIZ(x) (((x) & 0xff) << 24) +#define FMC2_PMEM_MEMSET GENMASK(7, 0) +#define FMC2_PMEM_MEMWAIT GENMASK(15, 8) +#define FMC2_PMEM_MEMHOLD GENMASK(23, 16) +#define FMC2_PMEM_MEMHIZ GENMASK(31, 24) #define FMC2_PMEM_DEFAULT 0x0a0a0a0a /* Register: FMC2_PATT */ -#define FMC2_PATT_ATTSET(x) (((x) & 0xff) << 0) -#define FMC2_PATT_ATTWAIT(x) (((x) & 0xff) << 8) -#define FMC2_PATT_ATTHOLD(x) (((x) & 0xff) << 16) -#define FMC2_PATT_ATTHIZ(x) (((x) & 0xff) << 24) +#define FMC2_PATT_ATTSET GENMASK(7, 0) +#define FMC2_PATT_ATTWAIT GENMASK(15, 8) +#define FMC2_PATT_ATTHOLD GENMASK(23, 16) +#define FMC2_PATT_ATTHIZ GENMASK(31, 24) #define FMC2_PATT_DEFAULT 0x0a0a0a0a /* Register: FMC2_ISR */ @@ -133,9 +129,9 @@ /* Register: FMC2_CSQCFGR1 */ #define FMC2_CSQCFGR1_CMD2EN BIT(1) #define FMC2_CSQCFGR1_DMADEN BIT(2) -#define FMC2_CSQCFGR1_ACYNBR(x) (((x) & 0x7) << 4) -#define FMC2_CSQCFGR1_CMD1(x) (((x) & 0xff) << 8) -#define FMC2_CSQCFGR1_CMD2(x) (((x) & 0xff) << 16) +#define FMC2_CSQCFGR1_ACYNBR GENMASK(6, 4) +#define FMC2_CSQCFGR1_CMD1 GENMASK(15, 8) +#define FMC2_CSQCFGR1_CMD2 GENMASK(23, 16) #define FMC2_CSQCFGR1_CMD1T BIT(24) #define FMC2_CSQCFGR1_CMD2T BIT(25) @@ -143,13 +139,13 @@ #define FMC2_CSQCFGR2_SQSDTEN BIT(0) #define FMC2_CSQCFGR2_RCMD2EN BIT(1) #define FMC2_CSQCFGR2_DMASEN BIT(2) -#define FMC2_CSQCFGR2_RCMD1(x) (((x) & 0xff) << 8) -#define FMC2_CSQCFGR2_RCMD2(x) (((x) & 0xff) << 16) +#define FMC2_CSQCFGR2_RCMD1 GENMASK(15, 8) +#define FMC2_CSQCFGR2_RCMD2 GENMASK(23, 16) #define FMC2_CSQCFGR2_RCMD1T BIT(24) #define FMC2_CSQCFGR2_RCMD2T BIT(25) /* Register: FMC2_CSQCFGR3 */ -#define FMC2_CSQCFGR3_SNBR(x) (((x) & 0x1f) << 8) +#define FMC2_CSQCFGR3_SNBR GENMASK(13, 8) #define FMC2_CSQCFGR3_AC1T BIT(16) #define FMC2_CSQCFGR3_AC2T BIT(17) #define FMC2_CSQCFGR3_AC3T BIT(18) @@ -160,15 +156,15 @@ #define FMC2_CSQCFGR3_RAC2T BIT(23) /* Register: FMC2_CSQCAR1 */ -#define FMC2_CSQCAR1_ADDC1(x) (((x) & 0xff) << 0) -#define FMC2_CSQCAR1_ADDC2(x) (((x) & 0xff) << 8) -#define FMC2_CSQCAR1_ADDC3(x) (((x) & 0xff) << 16) -#define FMC2_CSQCAR1_ADDC4(x) (((x) & 0xff) << 24) +#define FMC2_CSQCAR1_ADDC1 GENMASK(7, 0) +#define FMC2_CSQCAR1_ADDC2 GENMASK(15, 8) +#define FMC2_CSQCAR1_ADDC3 GENMASK(23, 16) +#define FMC2_CSQCAR1_ADDC4 GENMASK(31, 24) /* Register: FMC2_CSQCAR2 */ -#define FMC2_CSQCAR2_ADDC5(x) (((x) & 0xff) << 0) -#define FMC2_CSQCAR2_NANDCEN(x) (((x) & 0x3) << 10) -#define FMC2_CSQCAR2_SAO(x) (((x) & 0xffff) << 16) +#define FMC2_CSQCAR2_ADDC5 GENMASK(7, 0) +#define FMC2_CSQCAR2_NANDCEN GENMASK(11, 10) +#define FMC2_CSQCAR2_SAO GENMASK(31, 16) /* Register: FMC2_CSQIER */ #define FMC2_CSQIER_TCIE BIT(0) @@ -189,28 +185,23 @@ /* Register: FMC2_BCHDSR0 */ #define FMC2_BCHDSR0_DUE BIT(0) #define FMC2_BCHDSR0_DEF BIT(1) -#define FMC2_BCHDSR0_DEN_MASK GENMASK(7, 4) -#define FMC2_BCHDSR0_DEN_SHIFT 4 +#define FMC2_BCHDSR0_DEN GENMASK(7, 4) /* Register: FMC2_BCHDSR1 */ -#define FMC2_BCHDSR1_EBP1_MASK GENMASK(12, 0) -#define FMC2_BCHDSR1_EBP2_MASK GENMASK(28, 16) -#define FMC2_BCHDSR1_EBP2_SHIFT 16 +#define FMC2_BCHDSR1_EBP1 GENMASK(12, 0) +#define FMC2_BCHDSR1_EBP2 GENMASK(28, 16) /* Register: FMC2_BCHDSR2 */ -#define FMC2_BCHDSR2_EBP3_MASK GENMASK(12, 0) -#define FMC2_BCHDSR2_EBP4_MASK GENMASK(28, 16) -#define FMC2_BCHDSR2_EBP4_SHIFT 16 +#define FMC2_BCHDSR2_EBP3 GENMASK(12, 0) +#define FMC2_BCHDSR2_EBP4 GENMASK(28, 16) /* Register: FMC2_BCHDSR3 */ -#define FMC2_BCHDSR3_EBP5_MASK GENMASK(12, 0) -#define FMC2_BCHDSR3_EBP6_MASK GENMASK(28, 16) -#define FMC2_BCHDSR3_EBP6_SHIFT 16 +#define FMC2_BCHDSR3_EBP5 GENMASK(12, 0) +#define FMC2_BCHDSR3_EBP6 GENMASK(28, 16) /* Register: FMC2_BCHDSR4 */ -#define FMC2_BCHDSR4_EBP7_MASK GENMASK(12, 0) -#define FMC2_BCHDSR4_EBP8_MASK GENMASK(28, 16) -#define FMC2_BCHDSR4_EBP8_SHIFT 16 +#define FMC2_BCHDSR4_EBP7 GENMASK(12, 0) +#define FMC2_BCHDSR4_EBP8 GENMASK(28, 16) enum stm32_fmc2_ecc { FMC2_ECC_HAM = 1, @@ -281,43 +272,41 @@ static inline struct stm32_fmc2_nfc *to_stm32_nfc(struct nand_controller *base) return container_of(base, struct stm32_fmc2_nfc, base); } -/* Timings configuration */ -static void stm32_fmc2_timings_init(struct nand_chip *chip) +static void stm32_fmc2_nfc_timings_init(struct nand_chip *chip) { - struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller); + struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller); struct stm32_fmc2_nand *nand = to_fmc2_nand(chip); struct stm32_fmc2_timings *timings = &nand->timings; - u32 pcr = readl_relaxed(fmc2->io_base + FMC2_PCR); + u32 pcr = readl_relaxed(nfc->io_base + FMC2_PCR); u32 pmem, patt; /* Set tclr/tar timings */ - pcr &= ~FMC2_PCR_TCLR_MASK; - pcr |= FMC2_PCR_TCLR(timings->tclr); - pcr &= ~FMC2_PCR_TAR_MASK; - pcr |= FMC2_PCR_TAR(timings->tar); + pcr &= ~FMC2_PCR_TCLR; + pcr |= FIELD_PREP(FMC2_PCR_TCLR, timings->tclr); + pcr &= ~FMC2_PCR_TAR; + pcr |= FIELD_PREP(FMC2_PCR_TAR, timings->tar); /* Set tset/twait/thold/thiz timings in common bank */ - pmem = FMC2_PMEM_MEMSET(timings->tset_mem); - pmem |= FMC2_PMEM_MEMWAIT(timings->twait); - pmem |= FMC2_PMEM_MEMHOLD(timings->thold_mem); - pmem |= FMC2_PMEM_MEMHIZ(timings->thiz); + pmem = FIELD_PREP(FMC2_PMEM_MEMSET, timings->tset_mem); + pmem |= FIELD_PREP(FMC2_PMEM_MEMWAIT, timings->twait); + pmem |= FIELD_PREP(FMC2_PMEM_MEMHOLD, timings->thold_mem); + pmem |= FIELD_PREP(FMC2_PMEM_MEMHIZ, timings->thiz); /* Set tset/twait/thold/thiz timings in attribut bank */ - patt = FMC2_PATT_ATTSET(timings->tset_att); - patt |= FMC2_PATT_ATTWAIT(timings->twait); - patt |= FMC2_PATT_ATTHOLD(timings->thold_att); - patt |= FMC2_PATT_ATTHIZ(timings->thiz); + patt = FIELD_PREP(FMC2_PATT_ATTSET, timings->tset_att); + patt |= FIELD_PREP(FMC2_PATT_ATTWAIT, timings->twait); + patt |= FIELD_PREP(FMC2_PATT_ATTHOLD, timings->thold_att); + patt |= FIELD_PREP(FMC2_PATT_ATTHIZ, timings->thiz); - writel_relaxed(pcr, fmc2->io_base + FMC2_PCR); - writel_relaxed(pmem, fmc2->io_base + FMC2_PMEM); - writel_relaxed(patt, fmc2->io_base + FMC2_PATT); + writel_relaxed(pcr, nfc->io_base + FMC2_PCR); + writel_relaxed(pmem, nfc->io_base + FMC2_PMEM); + writel_relaxed(patt, nfc->io_base + FMC2_PATT); } -/* Controller configuration */ -static void stm32_fmc2_setup(struct nand_chip *chip) +static void stm32_fmc2_nfc_setup(struct nand_chip *chip) { - struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller); - u32 pcr = readl_relaxed(fmc2->io_base + FMC2_PCR); + struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller); + u32 pcr = readl_relaxed(nfc->io_base + FMC2_PCR); /* Configure ECC algorithm (default configuration is Hamming) */ pcr &= ~FMC2_PCR_ECCALG; @@ -330,195 +319,182 @@ static void stm32_fmc2_setup(struct nand_chip *chip) } /* Set buswidth */ - pcr &= ~FMC2_PCR_PWID_MASK; + pcr &= ~FMC2_PCR_PWID; if (chip->options & NAND_BUSWIDTH_16) - pcr |= FMC2_PCR_PWID(FMC2_PCR_PWID_BUSWIDTH_16); + pcr |= FIELD_PREP(FMC2_PCR_PWID, FMC2_PCR_PWID_BUSWIDTH_16); /* Set ECC sector size */ - pcr &= ~FMC2_PCR_ECCSS_MASK; - pcr |= FMC2_PCR_ECCSS(FMC2_PCR_ECCSS_512); + pcr &= ~FMC2_PCR_ECCSS; + pcr |= FIELD_PREP(FMC2_PCR_ECCSS, FMC2_PCR_ECCSS_512); - writel_relaxed(pcr, fmc2->io_base + FMC2_PCR); + writel_relaxed(pcr, nfc->io_base + FMC2_PCR); } -/* Select target */ -static int stm32_fmc2_select_chip(struct nand_chip *chip, int chipnr) +static int stm32_fmc2_nfc_select_chip(struct nand_chip *chip, int chipnr) { - struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller); + struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller); struct stm32_fmc2_nand *nand = to_fmc2_nand(chip); struct dma_slave_config dma_cfg; int ret; - if (nand->cs_used[chipnr] == fmc2->cs_sel) + if (nand->cs_used[chipnr] == nfc->cs_sel) return 0; - fmc2->cs_sel = nand->cs_used[chipnr]; + nfc->cs_sel = nand->cs_used[chipnr]; + stm32_fmc2_nfc_setup(chip); + stm32_fmc2_nfc_timings_init(chip); - /* FMC2 setup routine */ - stm32_fmc2_setup(chip); - - /* Apply timings */ - stm32_fmc2_timings_init(chip); - - if (fmc2->dma_tx_ch && fmc2->dma_rx_ch) { + if (nfc->dma_tx_ch && nfc->dma_rx_ch) { memset(&dma_cfg, 0, sizeof(dma_cfg)); - dma_cfg.src_addr = fmc2->data_phys_addr[fmc2->cs_sel]; - dma_cfg.dst_addr = fmc2->data_phys_addr[fmc2->cs_sel]; + dma_cfg.src_addr = nfc->data_phys_addr[nfc->cs_sel]; + dma_cfg.dst_addr = nfc->data_phys_addr[nfc->cs_sel]; dma_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; dma_cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; dma_cfg.src_maxburst = 32; dma_cfg.dst_maxburst = 32; - ret = dmaengine_slave_config(fmc2->dma_tx_ch, &dma_cfg); + ret = dmaengine_slave_config(nfc->dma_tx_ch, &dma_cfg); if (ret) { - dev_err(fmc2->dev, "tx DMA engine slave config failed\n"); + dev_err(nfc->dev, "tx DMA engine slave config failed\n"); return ret; } - ret = dmaengine_slave_config(fmc2->dma_rx_ch, &dma_cfg); + ret = dmaengine_slave_config(nfc->dma_rx_ch, &dma_cfg); if (ret) { - dev_err(fmc2->dev, "rx DMA engine slave config failed\n"); + dev_err(nfc->dev, "rx DMA engine slave config failed\n"); return ret; } } - if (fmc2->dma_ecc_ch) { + if (nfc->dma_ecc_ch) { /* * Hamming: we read HECCR register * BCH4/BCH8: we read BCHDSRSx registers */ memset(&dma_cfg, 0, sizeof(dma_cfg)); - dma_cfg.src_addr = fmc2->io_phys_addr; + dma_cfg.src_addr = nfc->io_phys_addr; dma_cfg.src_addr += chip->ecc.strength == FMC2_ECC_HAM ? FMC2_HECCR : FMC2_BCHDSR0; dma_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; - ret = dmaengine_slave_config(fmc2->dma_ecc_ch, &dma_cfg); + ret = dmaengine_slave_config(nfc->dma_ecc_ch, &dma_cfg); if (ret) { - dev_err(fmc2->dev, "ECC DMA engine slave config failed\n"); + dev_err(nfc->dev, "ECC DMA engine slave config failed\n"); return ret; } /* Calculate ECC length needed for one sector */ - fmc2->dma_ecc_len = chip->ecc.strength == FMC2_ECC_HAM ? - FMC2_HECCR_LEN : FMC2_BCHDSRS_LEN; + nfc->dma_ecc_len = chip->ecc.strength == FMC2_ECC_HAM ? + FMC2_HECCR_LEN : FMC2_BCHDSRS_LEN; } return 0; } -/* Set bus width to 16-bit or 8-bit */ -static void stm32_fmc2_set_buswidth_16(struct stm32_fmc2_nfc *fmc2, bool set) +static void stm32_fmc2_nfc_set_buswidth_16(struct stm32_fmc2_nfc *nfc, bool set) { - u32 pcr = readl_relaxed(fmc2->io_base + FMC2_PCR); + u32 pcr = readl_relaxed(nfc->io_base + FMC2_PCR); - pcr &= ~FMC2_PCR_PWID_MASK; + pcr &= ~FMC2_PCR_PWID; if (set) - pcr |= FMC2_PCR_PWID(FMC2_PCR_PWID_BUSWIDTH_16); - writel_relaxed(pcr, fmc2->io_base + FMC2_PCR); + pcr |= FIELD_PREP(FMC2_PCR_PWID, FMC2_PCR_PWID_BUSWIDTH_16); + writel_relaxed(pcr, nfc->io_base + FMC2_PCR); } -/* Enable/disable ECC */ -static void stm32_fmc2_set_ecc(struct stm32_fmc2_nfc *fmc2, bool enable) +static void stm32_fmc2_nfc_set_ecc(struct stm32_fmc2_nfc *nfc, bool enable) { - u32 pcr = readl(fmc2->io_base + FMC2_PCR); + u32 pcr = readl(nfc->io_base + FMC2_PCR); pcr &= ~FMC2_PCR_ECCEN; if (enable) pcr |= FMC2_PCR_ECCEN; - writel(pcr, fmc2->io_base + FMC2_PCR); + writel(pcr, nfc->io_base + FMC2_PCR); } -/* Enable irq sources in case of the sequencer is used */ -static inline void stm32_fmc2_enable_seq_irq(struct stm32_fmc2_nfc *fmc2) +static inline void stm32_fmc2_nfc_enable_seq_irq(struct stm32_fmc2_nfc *nfc) { - u32 csqier = readl_relaxed(fmc2->io_base + FMC2_CSQIER); + u32 csqier = readl_relaxed(nfc->io_base + FMC2_CSQIER); csqier |= FMC2_CSQIER_TCIE; - fmc2->irq_state = FMC2_IRQ_SEQ; + nfc->irq_state = FMC2_IRQ_SEQ; - writel_relaxed(csqier, fmc2->io_base + FMC2_CSQIER); + writel_relaxed(csqier, nfc->io_base + FMC2_CSQIER); } -/* Disable irq sources in case of the sequencer is used */ -static inline void stm32_fmc2_disable_seq_irq(struct stm32_fmc2_nfc *fmc2) +static inline void stm32_fmc2_nfc_disable_seq_irq(struct stm32_fmc2_nfc *nfc) { - u32 csqier = readl_relaxed(fmc2->io_base + FMC2_CSQIER); + u32 csqier = readl_relaxed(nfc->io_base + FMC2_CSQIER); csqier &= ~FMC2_CSQIER_TCIE; - writel_relaxed(csqier, fmc2->io_base + FMC2_CSQIER); + writel_relaxed(csqier, nfc->io_base + FMC2_CSQIER); - fmc2->irq_state = FMC2_IRQ_UNKNOWN; + nfc->irq_state = FMC2_IRQ_UNKNOWN; } -/* Clear irq sources in case of the sequencer is used */ -static inline void stm32_fmc2_clear_seq_irq(struct stm32_fmc2_nfc *fmc2) +static inline void stm32_fmc2_nfc_clear_seq_irq(struct stm32_fmc2_nfc *nfc) { - writel_relaxed(FMC2_CSQICR_CLEAR_IRQ, fmc2->io_base + FMC2_CSQICR); + writel_relaxed(FMC2_CSQICR_CLEAR_IRQ, nfc->io_base + FMC2_CSQICR); } -/* Enable irq sources in case of bch is used */ -static inline void stm32_fmc2_enable_bch_irq(struct stm32_fmc2_nfc *fmc2, - int mode) +static inline void stm32_fmc2_nfc_enable_bch_irq(struct stm32_fmc2_nfc *nfc, + int mode) { - u32 bchier = readl_relaxed(fmc2->io_base + FMC2_BCHIER); + u32 bchier = readl_relaxed(nfc->io_base + FMC2_BCHIER); if (mode == NAND_ECC_WRITE) bchier |= FMC2_BCHIER_EPBRIE; else bchier |= FMC2_BCHIER_DERIE; - fmc2->irq_state = FMC2_IRQ_BCH; + nfc->irq_state = FMC2_IRQ_BCH; - writel_relaxed(bchier, fmc2->io_base + FMC2_BCHIER); + writel_relaxed(bchier, nfc->io_base + FMC2_BCHIER); } -/* Disable irq sources in case of bch is used */ -static inline void stm32_fmc2_disable_bch_irq(struct stm32_fmc2_nfc *fmc2) +static inline void stm32_fmc2_nfc_disable_bch_irq(struct stm32_fmc2_nfc *nfc) { - u32 bchier = readl_relaxed(fmc2->io_base + FMC2_BCHIER); + u32 bchier = readl_relaxed(nfc->io_base + FMC2_BCHIER); bchier &= ~FMC2_BCHIER_DERIE; bchier &= ~FMC2_BCHIER_EPBRIE; - writel_relaxed(bchier, fmc2->io_base + FMC2_BCHIER); + writel_relaxed(bchier, nfc->io_base + FMC2_BCHIER); - fmc2->irq_state = FMC2_IRQ_UNKNOWN; + nfc->irq_state = FMC2_IRQ_UNKNOWN; } -/* Clear irq sources in case of bch is used */ -static inline void stm32_fmc2_clear_bch_irq(struct stm32_fmc2_nfc *fmc2) +static inline void stm32_fmc2_nfc_clear_bch_irq(struct stm32_fmc2_nfc *nfc) { - writel_relaxed(FMC2_BCHICR_CLEAR_IRQ, fmc2->io_base + FMC2_BCHICR); + writel_relaxed(FMC2_BCHICR_CLEAR_IRQ, nfc->io_base + FMC2_BCHICR); } /* * Enable ECC logic and reset syndrome/parity bits previously calculated * Syndrome/parity bits is cleared by setting the ECCEN bit to 0 */ -static void stm32_fmc2_hwctl(struct nand_chip *chip, int mode) +static void stm32_fmc2_nfc_hwctl(struct nand_chip *chip, int mode) { - struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller); + struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller); - stm32_fmc2_set_ecc(fmc2, false); + stm32_fmc2_nfc_set_ecc(nfc, false); if (chip->ecc.strength != FMC2_ECC_HAM) { - u32 pcr = readl_relaxed(fmc2->io_base + FMC2_PCR); + u32 pcr = readl_relaxed(nfc->io_base + FMC2_PCR); if (mode == NAND_ECC_WRITE) pcr |= FMC2_PCR_WEN; else pcr &= ~FMC2_PCR_WEN; - writel_relaxed(pcr, fmc2->io_base + FMC2_PCR); + writel_relaxed(pcr, nfc->io_base + FMC2_PCR); - reinit_completion(&fmc2->complete); - stm32_fmc2_clear_bch_irq(fmc2); - stm32_fmc2_enable_bch_irq(fmc2, mode); + reinit_completion(&nfc->complete); + stm32_fmc2_nfc_clear_bch_irq(nfc); + stm32_fmc2_nfc_enable_bch_irq(nfc, mode); } - stm32_fmc2_set_ecc(fmc2, true); + stm32_fmc2_nfc_set_ecc(nfc, true); } /* @@ -526,40 +502,37 @@ static void stm32_fmc2_hwctl(struct nand_chip *chip, int mode) * ECC is 3 bytes for 512 bytes of data (supports error correction up to * max of 1-bit) */ -static inline void stm32_fmc2_ham_set_ecc(const u32 ecc_sta, u8 *ecc) +static inline void stm32_fmc2_nfc_ham_set_ecc(const u32 ecc_sta, u8 *ecc) { ecc[0] = ecc_sta; ecc[1] = ecc_sta >> 8; ecc[2] = ecc_sta >> 16; } -static int stm32_fmc2_ham_calculate(struct nand_chip *chip, const u8 *data, - u8 *ecc) +static int stm32_fmc2_nfc_ham_calculate(struct nand_chip *chip, const u8 *data, + u8 *ecc) { - struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller); + struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller); u32 sr, heccr; int ret; - ret = readl_relaxed_poll_timeout(fmc2->io_base + FMC2_SR, - sr, sr & FMC2_SR_NWRF, 10, - FMC2_TIMEOUT_MS); + ret = readl_relaxed_poll_timeout(nfc->io_base + FMC2_SR, + sr, sr & FMC2_SR_NWRF, 1, + 1000 * FMC2_TIMEOUT_MS); if (ret) { - dev_err(fmc2->dev, "ham timeout\n"); + dev_err(nfc->dev, "ham timeout\n"); return ret; } - heccr = readl_relaxed(fmc2->io_base + FMC2_HECCR); - - stm32_fmc2_ham_set_ecc(heccr, ecc); - - /* Disable ECC */ - stm32_fmc2_set_ecc(fmc2, false); + heccr = readl_relaxed(nfc->io_base + FMC2_HECCR); + stm32_fmc2_nfc_ham_set_ecc(heccr, ecc); + stm32_fmc2_nfc_set_ecc(nfc, false); return 0; } -static int stm32_fmc2_ham_correct(struct nand_chip *chip, u8 *dat, - u8 *read_ecc, u8 *calc_ecc) +static int stm32_fmc2_nfc_ham_correct(struct nand_chip *chip, u8 *dat, + u8 *read_ecc, u8 *calc_ecc) { u8 bit_position = 0, b0, b1, b2; u32 byte_addr = 0, b; @@ -615,28 +588,28 @@ static int stm32_fmc2_ham_correct(struct nand_chip *chip, u8 *dat, * ECC is 7/13 bytes for 512 bytes of data (supports error correction up to * max of 4-bit/8-bit) */ -static int stm32_fmc2_bch_calculate(struct nand_chip *chip, const u8 *data, - u8 *ecc) +static int stm32_fmc2_nfc_bch_calculate(struct nand_chip *chip, const u8 *data, + u8 *ecc) { - struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller); + struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller); u32 bchpbr; /* Wait until the BCH code is ready */ - if (!wait_for_completion_timeout(&fmc2->complete, + if (!wait_for_completion_timeout(&nfc->complete, msecs_to_jiffies(FMC2_TIMEOUT_MS))) { - dev_err(fmc2->dev, "bch timeout\n"); - stm32_fmc2_disable_bch_irq(fmc2); + dev_err(nfc->dev, "bch timeout\n"); + stm32_fmc2_nfc_disable_bch_irq(nfc); return -ETIMEDOUT; } /* Read parity bits */ - bchpbr = readl_relaxed(fmc2->io_base + FMC2_BCHPBR1); + bchpbr = readl_relaxed(nfc->io_base + FMC2_BCHPBR1); ecc[0] = bchpbr; ecc[1] = bchpbr >> 8; ecc[2] = bchpbr >> 16; ecc[3] = bchpbr >> 24; - bchpbr = readl_relaxed(fmc2->io_base + FMC2_BCHPBR2); + bchpbr = readl_relaxed(nfc->io_base + FMC2_BCHPBR2); ecc[4] = bchpbr; ecc[5] = bchpbr >> 8; ecc[6] = bchpbr >> 16; @@ -644,24 +617,22 @@ static int stm32_fmc2_bch_calculate(struct nand_chip *chip, const u8 *data, if (chip->ecc.strength == FMC2_ECC_BCH8) { ecc[7] = bchpbr >> 24; - bchpbr = readl_relaxed(fmc2->io_base + FMC2_BCHPBR3); + bchpbr = readl_relaxed(nfc->io_base + FMC2_BCHPBR3); ecc[8] = bchpbr; ecc[9] = bchpbr >> 8; ecc[10] = bchpbr >> 16; ecc[11] = bchpbr >> 24; - bchpbr = readl_relaxed(fmc2->io_base + FMC2_BCHPBR4); + bchpbr = readl_relaxed(nfc->io_base + FMC2_BCHPBR4); ecc[12] = bchpbr; } - /* Disable ECC */ - stm32_fmc2_set_ecc(fmc2, false); + stm32_fmc2_nfc_set_ecc(nfc, false); return 0; } -/* BCH algorithm correction */ -static int stm32_fmc2_bch_decode(int eccsize, u8 *dat, u32 *ecc_sta) +static int stm32_fmc2_nfc_bch_decode(int eccsize, u8 *dat, u32 *ecc_sta) { u32 bchdsr0 = ecc_sta[0]; u32 bchdsr1 = ecc_sta[1]; @@ -680,16 +651,16 @@ static int stm32_fmc2_bch_decode(int eccsize, u8 *dat, u32 *ecc_sta) if (unlikely(bchdsr0 & FMC2_BCHDSR0_DUE)) return -EBADMSG; - pos[0] = bchdsr1 & FMC2_BCHDSR1_EBP1_MASK; - pos[1] = (bchdsr1 & FMC2_BCHDSR1_EBP2_MASK) >> FMC2_BCHDSR1_EBP2_SHIFT; - pos[2] = bchdsr2 & FMC2_BCHDSR2_EBP3_MASK; - pos[3] = (bchdsr2 & FMC2_BCHDSR2_EBP4_MASK) >> FMC2_BCHDSR2_EBP4_SHIFT; - pos[4] = bchdsr3 & FMC2_BCHDSR3_EBP5_MASK; - pos[5] = (bchdsr3 & FMC2_BCHDSR3_EBP6_MASK) >> FMC2_BCHDSR3_EBP6_SHIFT; - pos[6] = bchdsr4 & FMC2_BCHDSR4_EBP7_MASK; - pos[7] = (bchdsr4 & FMC2_BCHDSR4_EBP8_MASK) >> FMC2_BCHDSR4_EBP8_SHIFT; + pos[0] = FIELD_GET(FMC2_BCHDSR1_EBP1, bchdsr1); + pos[1] = FIELD_GET(FMC2_BCHDSR1_EBP2, bchdsr1); + pos[2] = FIELD_GET(FMC2_BCHDSR2_EBP3, bchdsr2); + pos[3] = FIELD_GET(FMC2_BCHDSR2_EBP4, bchdsr2); + pos[4] = FIELD_GET(FMC2_BCHDSR3_EBP5, bchdsr3); + pos[5] = FIELD_GET(FMC2_BCHDSR3_EBP6, bchdsr3); + pos[6] = FIELD_GET(FMC2_BCHDSR4_EBP7, bchdsr4); + pos[7] = FIELD_GET(FMC2_BCHDSR4_EBP8, bchdsr4); - den = (bchdsr0 & FMC2_BCHDSR0_DEN_MASK) >> FMC2_BCHDSR0_DEN_SHIFT; + den = FIELD_GET(FMC2_BCHDSR0_DEN, bchdsr0); for (i = 0; i < den; i++) { if (pos[i] < eccsize * 8) { change_bit(pos[i], (unsigned long *)dat); @@ -700,34 +671,33 @@ static int stm32_fmc2_bch_decode(int eccsize, u8 *dat, u32 *ecc_sta) return nb_errs; } -static int stm32_fmc2_bch_correct(struct nand_chip *chip, u8 *dat, - u8 *read_ecc, u8 *calc_ecc) +static int stm32_fmc2_nfc_bch_correct(struct nand_chip *chip, u8 *dat, + u8 *read_ecc, u8 *calc_ecc) { - struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller); + struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller); u32 ecc_sta[5]; /* Wait until the decoding error is ready */ - if (!wait_for_completion_timeout(&fmc2->complete, + if (!wait_for_completion_timeout(&nfc->complete, msecs_to_jiffies(FMC2_TIMEOUT_MS))) { - dev_err(fmc2->dev, "bch timeout\n"); - stm32_fmc2_disable_bch_irq(fmc2); + dev_err(nfc->dev, "bch timeout\n"); + stm32_fmc2_nfc_disable_bch_irq(nfc); return -ETIMEDOUT; } - ecc_sta[0] = readl_relaxed(fmc2->io_base + FMC2_BCHDSR0); - ecc_sta[1] = readl_relaxed(fmc2->io_base + FMC2_BCHDSR1); - ecc_sta[2] = readl_relaxed(fmc2->io_base + FMC2_BCHDSR2); - ecc_sta[3] = readl_relaxed(fmc2->io_base + FMC2_BCHDSR3); - ecc_sta[4] = readl_relaxed(fmc2->io_base + FMC2_BCHDSR4); + ecc_sta[0] = readl_relaxed(nfc->io_base + FMC2_BCHDSR0); + ecc_sta[1] = readl_relaxed(nfc->io_base + FMC2_BCHDSR1); + ecc_sta[2] = readl_relaxed(nfc->io_base + FMC2_BCHDSR2); + ecc_sta[3] = readl_relaxed(nfc->io_base + FMC2_BCHDSR3); + ecc_sta[4] = readl_relaxed(nfc->io_base + FMC2_BCHDSR4); - /* Disable ECC */ - stm32_fmc2_set_ecc(fmc2, false); + stm32_fmc2_nfc_set_ecc(nfc, false); - return stm32_fmc2_bch_decode(chip->ecc.size, dat, ecc_sta); + return stm32_fmc2_nfc_bch_decode(chip->ecc.size, dat, ecc_sta); } -static int stm32_fmc2_read_page(struct nand_chip *chip, u8 *buf, - int oob_required, int page) +static int stm32_fmc2_nfc_read_page(struct nand_chip *chip, u8 *buf, + int oob_required, int page) { struct mtd_info *mtd = nand_to_mtd(chip); int ret, i, s, stat, eccsize = chip->ecc.size; @@ -789,21 +759,21 @@ static int stm32_fmc2_read_page(struct nand_chip *chip, u8 *buf, } /* Sequencer read/write configuration */ -static void stm32_fmc2_rw_page_init(struct nand_chip *chip, int page, - int raw, bool write_data) +static void stm32_fmc2_nfc_rw_page_init(struct nand_chip *chip, int page, + int raw, bool write_data) { - struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller); + struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller); struct mtd_info *mtd = nand_to_mtd(chip); u32 csqcfgr1, csqcfgr2, csqcfgr3; u32 csqar1, csqar2; u32 ecc_offset = mtd->writesize + FMC2_BBM_LEN; - u32 pcr = readl_relaxed(fmc2->io_base + FMC2_PCR); + u32 pcr = readl_relaxed(nfc->io_base + FMC2_PCR); if (write_data) pcr |= FMC2_PCR_WEN; else pcr &= ~FMC2_PCR_WEN; - writel_relaxed(pcr, fmc2->io_base + FMC2_PCR); + writel_relaxed(pcr, nfc->io_base + FMC2_PCR); /* * - Set Program Page/Page Read command @@ -812,11 +782,11 @@ static void stm32_fmc2_rw_page_init(struct nand_chip *chip, int page, */ csqcfgr1 = FMC2_CSQCFGR1_DMADEN | FMC2_CSQCFGR1_CMD1T; if (write_data) - csqcfgr1 |= FMC2_CSQCFGR1_CMD1(NAND_CMD_SEQIN); + csqcfgr1 |= FIELD_PREP(FMC2_CSQCFGR1_CMD1, NAND_CMD_SEQIN); else - csqcfgr1 |= FMC2_CSQCFGR1_CMD1(NAND_CMD_READ0) | + csqcfgr1 |= FIELD_PREP(FMC2_CSQCFGR1_CMD1, NAND_CMD_READ0) | FMC2_CSQCFGR1_CMD2EN | - FMC2_CSQCFGR1_CMD2(NAND_CMD_READSTART) | + FIELD_PREP(FMC2_CSQCFGR1_CMD2, NAND_CMD_READSTART) | FMC2_CSQCFGR1_CMD2T; /* @@ -826,11 +796,12 @@ static void stm32_fmc2_rw_page_init(struct nand_chip *chip, int page, * - Set timings */ if (write_data) - csqcfgr2 = FMC2_CSQCFGR2_RCMD1(NAND_CMD_RNDIN); + csqcfgr2 = FIELD_PREP(FMC2_CSQCFGR2_RCMD1, NAND_CMD_RNDIN); else - csqcfgr2 = FMC2_CSQCFGR2_RCMD1(NAND_CMD_RNDOUT) | + csqcfgr2 = FIELD_PREP(FMC2_CSQCFGR2_RCMD1, NAND_CMD_RNDOUT) | FMC2_CSQCFGR2_RCMD2EN | - FMC2_CSQCFGR2_RCMD2(NAND_CMD_RNDOUTSTART) | + FIELD_PREP(FMC2_CSQCFGR2_RCMD2, + NAND_CMD_RNDOUTSTART) | FMC2_CSQCFGR2_RCMD1T | FMC2_CSQCFGR2_RCMD2T; if (!raw) { @@ -842,7 +813,7 @@ static void stm32_fmc2_rw_page_init(struct nand_chip *chip, int page, * - Set the number of sectors to be written * - Set timings */ - csqcfgr3 = FMC2_CSQCFGR3_SNBR(chip->ecc.steps - 1); + csqcfgr3 = FIELD_PREP(FMC2_CSQCFGR3_SNBR, chip->ecc.steps - 1); if (write_data) { csqcfgr3 |= FMC2_CSQCFGR3_RAC2T; if (chip->options & NAND_ROW_ADDR_3) @@ -856,8 +827,8 @@ static void stm32_fmc2_rw_page_init(struct nand_chip *chip, int page, * Byte 1 and byte 2 => column, we start at 0x0 * Byte 3 and byte 4 => page */ - csqar1 = FMC2_CSQCAR1_ADDC3(page); - csqar1 |= FMC2_CSQCAR1_ADDC4(page >> 8); + csqar1 = FIELD_PREP(FMC2_CSQCAR1_ADDC3, page); + csqar1 |= FIELD_PREP(FMC2_CSQCAR1_ADDC4, page >> 8); /* * - Set chip enable number @@ -865,43 +836,44 @@ static void stm32_fmc2_rw_page_init(struct nand_chip *chip, int page, * - Calculate the number of address cycles to be issued * - Set byte 5 of address cycle if needed */ - csqar2 = FMC2_CSQCAR2_NANDCEN(fmc2->cs_sel); + csqar2 = FIELD_PREP(FMC2_CSQCAR2_NANDCEN, nfc->cs_sel); if (chip->options & NAND_BUSWIDTH_16) - csqar2 |= FMC2_CSQCAR2_SAO(ecc_offset >> 1); + csqar2 |= FIELD_PREP(FMC2_CSQCAR2_SAO, ecc_offset >> 1); else - csqar2 |= FMC2_CSQCAR2_SAO(ecc_offset); + csqar2 |= FIELD_PREP(FMC2_CSQCAR2_SAO, ecc_offset); if (chip->options & NAND_ROW_ADDR_3) { - csqcfgr1 |= FMC2_CSQCFGR1_ACYNBR(5); - csqar2 |= FMC2_CSQCAR2_ADDC5(page >> 16); + csqcfgr1 |= FIELD_PREP(FMC2_CSQCFGR1_ACYNBR, 5); + csqar2 |= FIELD_PREP(FMC2_CSQCAR2_ADDC5, page >> 16); } else { - csqcfgr1 |= FMC2_CSQCFGR1_ACYNBR(4); + csqcfgr1 |= FIELD_PREP(FMC2_CSQCFGR1_ACYNBR, 4); } - writel_relaxed(csqcfgr1, fmc2->io_base + FMC2_CSQCFGR1); - writel_relaxed(csqcfgr2, fmc2->io_base + FMC2_CSQCFGR2); - writel_relaxed(csqcfgr3, fmc2->io_base + FMC2_CSQCFGR3); - writel_relaxed(csqar1, fmc2->io_base + FMC2_CSQAR1); - writel_relaxed(csqar2, fmc2->io_base + FMC2_CSQAR2); + writel_relaxed(csqcfgr1, nfc->io_base + FMC2_CSQCFGR1); + writel_relaxed(csqcfgr2, nfc->io_base + FMC2_CSQCFGR2); + writel_relaxed(csqcfgr3, nfc->io_base + FMC2_CSQCFGR3); + writel_relaxed(csqar1, nfc->io_base + FMC2_CSQAR1); + writel_relaxed(csqar2, nfc->io_base + FMC2_CSQAR2); } -static void stm32_fmc2_dma_callback(void *arg) +static void stm32_fmc2_nfc_dma_callback(void *arg) { complete((struct completion *)arg); } /* Read/write data from/to a page */ -static int stm32_fmc2_xfer(struct nand_chip *chip, const u8 *buf, - int raw, bool write_data) +static int stm32_fmc2_nfc_xfer(struct nand_chip *chip, const u8 *buf, + int raw, bool write_data) { - struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller); + struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller); struct dma_async_tx_descriptor *desc_data, *desc_ecc; struct scatterlist *sg; - struct dma_chan *dma_ch = fmc2->dma_rx_ch; + struct dma_chan *dma_ch = nfc->dma_rx_ch; enum dma_data_direction dma_data_dir = DMA_FROM_DEVICE; enum dma_transfer_direction dma_transfer_dir = DMA_DEV_TO_MEM; - u32 csqcr = readl_relaxed(fmc2->io_base + FMC2_CSQCR); + u32 csqcr = readl_relaxed(nfc->io_base + FMC2_CSQCR); int eccsteps = chip->ecc.steps; int eccsize = chip->ecc.size; + unsigned long timeout = msecs_to_jiffies(FMC2_TIMEOUT_MS); const u8 *p = buf; int s, ret; @@ -909,20 +881,20 @@ static int stm32_fmc2_xfer(struct nand_chip *chip, const u8 *buf, if (write_data) { dma_data_dir = DMA_TO_DEVICE; dma_transfer_dir = DMA_MEM_TO_DEV; - dma_ch = fmc2->dma_tx_ch; + dma_ch = nfc->dma_tx_ch; } - for_each_sg(fmc2->dma_data_sg.sgl, sg, eccsteps, s) { + for_each_sg(nfc->dma_data_sg.sgl, sg, eccsteps, s) { sg_set_buf(sg, p, eccsize); p += eccsize; } - ret = dma_map_sg(fmc2->dev, fmc2->dma_data_sg.sgl, + ret = dma_map_sg(nfc->dev, nfc->dma_data_sg.sgl, eccsteps, dma_data_dir); if (ret < 0) return ret; - desc_data = dmaengine_prep_slave_sg(dma_ch, fmc2->dma_data_sg.sgl, + desc_data = dmaengine_prep_slave_sg(dma_ch, nfc->dma_data_sg.sgl, eccsteps, dma_transfer_dir, DMA_PREP_INTERRUPT); if (!desc_data) { @@ -930,10 +902,10 @@ static int stm32_fmc2_xfer(struct nand_chip *chip, const u8 *buf, goto err_unmap_data; } - reinit_completion(&fmc2->dma_data_complete); - reinit_completion(&fmc2->complete); - desc_data->callback = stm32_fmc2_dma_callback; - desc_data->callback_param = &fmc2->dma_data_complete; + reinit_completion(&nfc->dma_data_complete); + reinit_completion(&nfc->complete); + desc_data->callback = stm32_fmc2_nfc_dma_callback; + desc_data->callback_param = &nfc->dma_data_complete; ret = dma_submit_error(dmaengine_submit(desc_data)); if (ret) goto err_unmap_data; @@ -942,19 +914,19 @@ static int stm32_fmc2_xfer(struct nand_chip *chip, const u8 *buf, if (!write_data && !raw) { /* Configure DMA ECC status */ - p = fmc2->ecc_buf; - for_each_sg(fmc2->dma_ecc_sg.sgl, sg, eccsteps, s) { - sg_set_buf(sg, p, fmc2->dma_ecc_len); - p += fmc2->dma_ecc_len; + p = nfc->ecc_buf; + for_each_sg(nfc->dma_ecc_sg.sgl, sg, eccsteps, s) { + sg_set_buf(sg, p, nfc->dma_ecc_len); + p += nfc->dma_ecc_len; } - ret = dma_map_sg(fmc2->dev, fmc2->dma_ecc_sg.sgl, + ret = dma_map_sg(nfc->dev, nfc->dma_ecc_sg.sgl, eccsteps, dma_data_dir); if (ret < 0) goto err_unmap_data; - desc_ecc = dmaengine_prep_slave_sg(fmc2->dma_ecc_ch, - fmc2->dma_ecc_sg.sgl, + desc_ecc = dmaengine_prep_slave_sg(nfc->dma_ecc_ch, + nfc->dma_ecc_sg.sgl, eccsteps, dma_transfer_dir, DMA_PREP_INTERRUPT); if (!desc_ecc) { @@ -962,76 +934,73 @@ static int stm32_fmc2_xfer(struct nand_chip *chip, const u8 *buf, goto err_unmap_ecc; } - reinit_completion(&fmc2->dma_ecc_complete); - desc_ecc->callback = stm32_fmc2_dma_callback; - desc_ecc->callback_param = &fmc2->dma_ecc_complete; + reinit_completion(&nfc->dma_ecc_complete); + desc_ecc->callback = stm32_fmc2_nfc_dma_callback; + desc_ecc->callback_param = &nfc->dma_ecc_complete; ret = dma_submit_error(dmaengine_submit(desc_ecc)); if (ret) goto err_unmap_ecc; - dma_async_issue_pending(fmc2->dma_ecc_ch); + dma_async_issue_pending(nfc->dma_ecc_ch); } - stm32_fmc2_clear_seq_irq(fmc2); - stm32_fmc2_enable_seq_irq(fmc2); + stm32_fmc2_nfc_clear_seq_irq(nfc); + stm32_fmc2_nfc_enable_seq_irq(nfc); /* Start the transfer */ csqcr |= FMC2_CSQCR_CSQSTART; - writel_relaxed(csqcr, fmc2->io_base + FMC2_CSQCR); + writel_relaxed(csqcr, nfc->io_base + FMC2_CSQCR); /* Wait end of sequencer transfer */ - if (!wait_for_completion_timeout(&fmc2->complete, - msecs_to_jiffies(FMC2_TIMEOUT_MS))) { - dev_err(fmc2->dev, "seq timeout\n"); - stm32_fmc2_disable_seq_irq(fmc2); + if (!wait_for_completion_timeout(&nfc->complete, timeout)) { + dev_err(nfc->dev, "seq timeout\n"); + stm32_fmc2_nfc_disable_seq_irq(nfc); dmaengine_terminate_all(dma_ch); if (!write_data && !raw) - dmaengine_terminate_all(fmc2->dma_ecc_ch); + dmaengine_terminate_all(nfc->dma_ecc_ch); ret = -ETIMEDOUT; goto err_unmap_ecc; } /* Wait DMA data transfer completion */ - if (!wait_for_completion_timeout(&fmc2->dma_data_complete, - msecs_to_jiffies(FMC2_TIMEOUT_MS))) { - dev_err(fmc2->dev, "data DMA timeout\n"); + if (!wait_for_completion_timeout(&nfc->dma_data_complete, timeout)) { + dev_err(nfc->dev, "data DMA timeout\n"); dmaengine_terminate_all(dma_ch); ret = -ETIMEDOUT; } /* Wait DMA ECC transfer completion */ if (!write_data && !raw) { - if (!wait_for_completion_timeout(&fmc2->dma_ecc_complete, - msecs_to_jiffies(FMC2_TIMEOUT_MS))) { - dev_err(fmc2->dev, "ECC DMA timeout\n"); - dmaengine_terminate_all(fmc2->dma_ecc_ch); + if (!wait_for_completion_timeout(&nfc->dma_ecc_complete, + timeout)) { + dev_err(nfc->dev, "ECC DMA timeout\n"); + dmaengine_terminate_all(nfc->dma_ecc_ch); ret = -ETIMEDOUT; } } err_unmap_ecc: if (!write_data && !raw) - dma_unmap_sg(fmc2->dev, fmc2->dma_ecc_sg.sgl, + dma_unmap_sg(nfc->dev, nfc->dma_ecc_sg.sgl, eccsteps, dma_data_dir); err_unmap_data: - dma_unmap_sg(fmc2->dev, fmc2->dma_data_sg.sgl, eccsteps, dma_data_dir); + dma_unmap_sg(nfc->dev, nfc->dma_data_sg.sgl, eccsteps, dma_data_dir); return ret; } -static int stm32_fmc2_sequencer_write(struct nand_chip *chip, - const u8 *buf, int oob_required, - int page, int raw) +static int stm32_fmc2_nfc_seq_write(struct nand_chip *chip, const u8 *buf, + int oob_required, int page, int raw) { struct mtd_info *mtd = nand_to_mtd(chip); int ret; /* Configure the sequencer */ - stm32_fmc2_rw_page_init(chip, page, raw, true); + stm32_fmc2_nfc_rw_page_init(chip, page, raw, true); /* Write the page */ - ret = stm32_fmc2_xfer(chip, buf, raw, true); + ret = stm32_fmc2_nfc_xfer(chip, buf, raw, true); if (ret) return ret; @@ -1047,55 +1016,50 @@ static int stm32_fmc2_sequencer_write(struct nand_chip *chip, return nand_prog_page_end_op(chip); } -static int stm32_fmc2_sequencer_write_page(struct nand_chip *chip, - const u8 *buf, - int oob_required, - int page) +static int stm32_fmc2_nfc_seq_write_page(struct nand_chip *chip, const u8 *buf, + int oob_required, int page) { int ret; - /* Select the target */ - ret = stm32_fmc2_select_chip(chip, chip->cur_cs); + ret = stm32_fmc2_nfc_select_chip(chip, chip->cur_cs); if (ret) return ret; - return stm32_fmc2_sequencer_write(chip, buf, oob_required, page, false); + return stm32_fmc2_nfc_seq_write(chip, buf, oob_required, page, false); } -static int stm32_fmc2_sequencer_write_page_raw(struct nand_chip *chip, - const u8 *buf, - int oob_required, - int page) +static int stm32_fmc2_nfc_seq_write_page_raw(struct nand_chip *chip, + const u8 *buf, int oob_required, + int page) { int ret; - /* Select the target */ - ret = stm32_fmc2_select_chip(chip, chip->cur_cs); + ret = stm32_fmc2_nfc_select_chip(chip, chip->cur_cs); if (ret) return ret; - return stm32_fmc2_sequencer_write(chip, buf, oob_required, page, true); + return stm32_fmc2_nfc_seq_write(chip, buf, oob_required, page, true); } /* Get a status indicating which sectors have errors */ -static inline u16 stm32_fmc2_get_mapping_status(struct stm32_fmc2_nfc *fmc2) +static inline u16 stm32_fmc2_nfc_get_mapping_status(struct stm32_fmc2_nfc *nfc) { - u32 csqemsr = readl_relaxed(fmc2->io_base + FMC2_CSQEMSR); + u32 csqemsr = readl_relaxed(nfc->io_base + FMC2_CSQEMSR); return csqemsr & FMC2_CSQEMSR_SEM; } -static int stm32_fmc2_sequencer_correct(struct nand_chip *chip, u8 *dat, - u8 *read_ecc, u8 *calc_ecc) +static int stm32_fmc2_nfc_seq_correct(struct nand_chip *chip, u8 *dat, + u8 *read_ecc, u8 *calc_ecc) { struct mtd_info *mtd = nand_to_mtd(chip); - struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller); + struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller); int eccbytes = chip->ecc.bytes; int eccsteps = chip->ecc.steps; int eccstrength = chip->ecc.strength; int i, s, eccsize = chip->ecc.size; - u32 *ecc_sta = (u32 *)fmc2->ecc_buf; - u16 sta_map = stm32_fmc2_get_mapping_status(fmc2); + u32 *ecc_sta = (u32 *)nfc->ecc_buf; + u16 sta_map = stm32_fmc2_nfc_get_mapping_status(nfc); unsigned int max_bitflips = 0; for (i = 0, s = 0; s < eccsteps; s++, i += eccbytes, dat += eccsize) { @@ -1104,10 +1068,11 @@ static int stm32_fmc2_sequencer_correct(struct nand_chip *chip, u8 *dat, if (eccstrength == FMC2_ECC_HAM) { /* Ecc_sta = FMC2_HECCR */ if (sta_map & BIT(s)) { - stm32_fmc2_ham_set_ecc(*ecc_sta, &calc_ecc[i]); - stat = stm32_fmc2_ham_correct(chip, dat, - &read_ecc[i], - &calc_ecc[i]); + stm32_fmc2_nfc_ham_set_ecc(*ecc_sta, + &calc_ecc[i]); + stat = stm32_fmc2_nfc_ham_correct(chip, dat, + &read_ecc[i], + &calc_ecc[i]); } ecc_sta++; } else { @@ -1119,8 +1084,8 @@ static int stm32_fmc2_sequencer_correct(struct nand_chip *chip, u8 *dat, * Ecc_sta[4] = FMC2_BCHDSR4 */ if (sta_map & BIT(s)) - stat = stm32_fmc2_bch_decode(eccsize, dat, - ecc_sta); + stat = stm32_fmc2_nfc_bch_decode(eccsize, dat, + ecc_sta); ecc_sta += 5; } @@ -1143,30 +1108,29 @@ static int stm32_fmc2_sequencer_correct(struct nand_chip *chip, u8 *dat, return max_bitflips; } -static int stm32_fmc2_sequencer_read_page(struct nand_chip *chip, u8 *buf, - int oob_required, int page) +static int stm32_fmc2_nfc_seq_read_page(struct nand_chip *chip, u8 *buf, + int oob_required, int page) { struct mtd_info *mtd = nand_to_mtd(chip); - struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller); + struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller); u8 *ecc_calc = chip->ecc.calc_buf; u8 *ecc_code = chip->ecc.code_buf; u16 sta_map; int ret; - /* Select the target */ - ret = stm32_fmc2_select_chip(chip, chip->cur_cs); + ret = stm32_fmc2_nfc_select_chip(chip, chip->cur_cs); if (ret) return ret; /* Configure the sequencer */ - stm32_fmc2_rw_page_init(chip, page, 0, false); + stm32_fmc2_nfc_rw_page_init(chip, page, 0, false); /* Read the page */ - ret = stm32_fmc2_xfer(chip, buf, 0, false); + ret = stm32_fmc2_nfc_xfer(chip, buf, 0, false); if (ret) return ret; - sta_map = stm32_fmc2_get_mapping_status(fmc2); + sta_map = stm32_fmc2_nfc_get_mapping_status(nfc); /* Check if errors happen */ if (likely(!sta_map)) { @@ -1193,22 +1157,21 @@ static int stm32_fmc2_sequencer_read_page(struct nand_chip *chip, u8 *buf, return chip->ecc.correct(chip, buf, ecc_code, ecc_calc); } -static int stm32_fmc2_sequencer_read_page_raw(struct nand_chip *chip, u8 *buf, - int oob_required, int page) +static int stm32_fmc2_nfc_seq_read_page_raw(struct nand_chip *chip, u8 *buf, + int oob_required, int page) { struct mtd_info *mtd = nand_to_mtd(chip); int ret; - /* Select the target */ - ret = stm32_fmc2_select_chip(chip, chip->cur_cs); + ret = stm32_fmc2_nfc_select_chip(chip, chip->cur_cs); if (ret) return ret; /* Configure the sequencer */ - stm32_fmc2_rw_page_init(chip, page, 1, false); + stm32_fmc2_nfc_rw_page_init(chip, page, 1, false); /* Read the page */ - ret = stm32_fmc2_xfer(chip, buf, 1, false); + ret = stm32_fmc2_nfc_xfer(chip, buf, 1, false); if (ret) return ret; @@ -1221,31 +1184,31 @@ static int stm32_fmc2_sequencer_read_page_raw(struct nand_chip *chip, u8 *buf, return 0; } -static irqreturn_t stm32_fmc2_irq(int irq, void *dev_id) +static irqreturn_t stm32_fmc2_nfc_irq(int irq, void *dev_id) { - struct stm32_fmc2_nfc *fmc2 = (struct stm32_fmc2_nfc *)dev_id; + struct stm32_fmc2_nfc *nfc = (struct stm32_fmc2_nfc *)dev_id; - if (fmc2->irq_state == FMC2_IRQ_SEQ) + if (nfc->irq_state == FMC2_IRQ_SEQ) /* Sequencer is used */ - stm32_fmc2_disable_seq_irq(fmc2); - else if (fmc2->irq_state == FMC2_IRQ_BCH) + stm32_fmc2_nfc_disable_seq_irq(nfc); + else if (nfc->irq_state == FMC2_IRQ_BCH) /* BCH is used */ - stm32_fmc2_disable_bch_irq(fmc2); + stm32_fmc2_nfc_disable_bch_irq(nfc); - complete(&fmc2->complete); + complete(&nfc->complete); return IRQ_HANDLED; } -static void stm32_fmc2_read_data(struct nand_chip *chip, void *buf, - unsigned int len, bool force_8bit) +static void stm32_fmc2_nfc_read_data(struct nand_chip *chip, void *buf, + unsigned int len, bool force_8bit) { - struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller); - void __iomem *io_addr_r = fmc2->data_base[fmc2->cs_sel]; + struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller); + void __iomem *io_addr_r = nfc->data_base[nfc->cs_sel]; if (force_8bit && chip->options & NAND_BUSWIDTH_16) /* Reconfigure bus width to 8-bit */ - stm32_fmc2_set_buswidth_16(fmc2, false); + stm32_fmc2_nfc_set_buswidth_16(nfc, false); if (!IS_ALIGNED((uintptr_t)buf, sizeof(u32))) { if (!IS_ALIGNED((uintptr_t)buf, sizeof(u16)) && len) { @@ -1281,18 +1244,18 @@ static void stm32_fmc2_read_data(struct nand_chip *chip, void *buf, if (force_8bit && chip->options & NAND_BUSWIDTH_16) /* Reconfigure bus width to 16-bit */ - stm32_fmc2_set_buswidth_16(fmc2, true); + stm32_fmc2_nfc_set_buswidth_16(nfc, true); } -static void stm32_fmc2_write_data(struct nand_chip *chip, const void *buf, - unsigned int len, bool force_8bit) +static void stm32_fmc2_nfc_write_data(struct nand_chip *chip, const void *buf, + unsigned int len, bool force_8bit) { - struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller); - void __iomem *io_addr_w = fmc2->data_base[fmc2->cs_sel]; + struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller); + void __iomem *io_addr_w = nfc->data_base[nfc->cs_sel]; if (force_8bit && chip->options & NAND_BUSWIDTH_16) /* Reconfigure bus width to 8-bit */ - stm32_fmc2_set_buswidth_16(fmc2, false); + stm32_fmc2_nfc_set_buswidth_16(nfc, false); if (!IS_ALIGNED((uintptr_t)buf, sizeof(u32))) { if (!IS_ALIGNED((uintptr_t)buf, sizeof(u16)) && len) { @@ -1328,48 +1291,49 @@ static void stm32_fmc2_write_data(struct nand_chip *chip, const void *buf, if (force_8bit && chip->options & NAND_BUSWIDTH_16) /* Reconfigure bus width to 16-bit */ - stm32_fmc2_set_buswidth_16(fmc2, true); + stm32_fmc2_nfc_set_buswidth_16(nfc, true); } -static int stm32_fmc2_waitrdy(struct nand_chip *chip, unsigned long timeout_ms) +static int stm32_fmc2_nfc_waitrdy(struct nand_chip *chip, + unsigned long timeout_ms) { - struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller); + struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller); const struct nand_sdr_timings *timings; u32 isr, sr; /* Check if there is no pending requests to the NAND flash */ - if (readl_relaxed_poll_timeout_atomic(fmc2->io_base + FMC2_SR, sr, + if (readl_relaxed_poll_timeout_atomic(nfc->io_base + FMC2_SR, sr, sr & FMC2_SR_NWRF, 1, - FMC2_TIMEOUT_US)) - dev_warn(fmc2->dev, "Waitrdy timeout\n"); + 1000 * FMC2_TIMEOUT_MS)) + dev_warn(nfc->dev, "Waitrdy timeout\n"); /* Wait tWB before R/B# signal is low */ timings = nand_get_sdr_timings(&chip->data_interface); ndelay(PSEC_TO_NSEC(timings->tWB_max)); /* R/B# signal is low, clear high level flag */ - writel_relaxed(FMC2_ICR_CIHLF, fmc2->io_base + FMC2_ICR); + writel_relaxed(FMC2_ICR_CIHLF, nfc->io_base + FMC2_ICR); /* Wait R/B# signal is high */ - return readl_relaxed_poll_timeout_atomic(fmc2->io_base + FMC2_ISR, + return readl_relaxed_poll_timeout_atomic(nfc->io_base + FMC2_ISR, isr, isr & FMC2_ISR_IHLF, 5, 1000 * timeout_ms); } -static int stm32_fmc2_exec_op(struct nand_chip *chip, - const struct nand_operation *op, - bool check_only) +static int stm32_fmc2_nfc_exec_op(struct nand_chip *chip, + const struct nand_operation *op, + bool check_only) { - struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller); + struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller); const struct nand_op_instr *instr = NULL; - unsigned int op_id, i; + unsigned int op_id, i, timeout; int ret; - ret = stm32_fmc2_select_chip(chip, op->cs); - if (ret) - return ret; - if (check_only) + return 0; + + ret = stm32_fmc2_nfc_select_chip(chip, op->cs); + if (ret) return ret; for (op_id = 0; op_id < op->ninstrs; op_id++) { @@ -1378,30 +1342,30 @@ static int stm32_fmc2_exec_op(struct nand_chip *chip, switch (instr->type) { case NAND_OP_CMD_INSTR: writeb_relaxed(instr->ctx.cmd.opcode, - fmc2->cmd_base[fmc2->cs_sel]); + nfc->cmd_base[nfc->cs_sel]); break; case NAND_OP_ADDR_INSTR: for (i = 0; i < instr->ctx.addr.naddrs; i++) writeb_relaxed(instr->ctx.addr.addrs[i], - fmc2->addr_base[fmc2->cs_sel]); + nfc->addr_base[nfc->cs_sel]); break; case NAND_OP_DATA_IN_INSTR: - stm32_fmc2_read_data(chip, instr->ctx.data.buf.in, - instr->ctx.data.len, - instr->ctx.data.force_8bit); + stm32_fmc2_nfc_read_data(chip, instr->ctx.data.buf.in, + instr->ctx.data.len, + instr->ctx.data.force_8bit); break; case NAND_OP_DATA_OUT_INSTR: - stm32_fmc2_write_data(chip, instr->ctx.data.buf.out, - instr->ctx.data.len, - instr->ctx.data.force_8bit); + stm32_fmc2_nfc_write_data(chip, instr->ctx.data.buf.out, + instr->ctx.data.len, + instr->ctx.data.force_8bit); break; case NAND_OP_WAITRDY_INSTR: - ret = stm32_fmc2_waitrdy(chip, - instr->ctx.waitrdy.timeout_ms); + timeout = instr->ctx.waitrdy.timeout_ms; + ret = stm32_fmc2_nfc_waitrdy(chip, timeout); break; } } @@ -1409,21 +1373,20 @@ static int stm32_fmc2_exec_op(struct nand_chip *chip, return ret; } -/* Controller initialization */ -static void stm32_fmc2_init(struct stm32_fmc2_nfc *fmc2) +static void stm32_fmc2_nfc_init(struct stm32_fmc2_nfc *nfc) { - u32 pcr = readl_relaxed(fmc2->io_base + FMC2_PCR); - u32 bcr1 = readl_relaxed(fmc2->io_base + FMC2_BCR1); + u32 pcr = readl_relaxed(nfc->io_base + FMC2_PCR); + u32 bcr1 = readl_relaxed(nfc->io_base + FMC2_BCR1); /* Set CS used to undefined */ - fmc2->cs_sel = -1; + nfc->cs_sel = -1; /* Enable wait feature and nand flash memory bank */ pcr |= FMC2_PCR_PWAITEN; pcr |= FMC2_PCR_PBKEN; /* Set buswidth to 8 bits mode for identification */ - pcr &= ~FMC2_PCR_PWID_MASK; + pcr &= ~FMC2_PCR_PWID; /* ECC logic is disabled */ pcr &= ~FMC2_PCR_ECCEN; @@ -1434,32 +1397,31 @@ static void stm32_fmc2_init(struct stm32_fmc2_nfc *fmc2) pcr &= ~FMC2_PCR_WEN; /* Set default ECC sector size */ - pcr &= ~FMC2_PCR_ECCSS_MASK; - pcr |= FMC2_PCR_ECCSS(FMC2_PCR_ECCSS_2048); + pcr &= ~FMC2_PCR_ECCSS; + pcr |= FIELD_PREP(FMC2_PCR_ECCSS, FMC2_PCR_ECCSS_2048); /* Set default tclr/tar timings */ - pcr &= ~FMC2_PCR_TCLR_MASK; - pcr |= FMC2_PCR_TCLR(FMC2_PCR_TCLR_DEFAULT); - pcr &= ~FMC2_PCR_TAR_MASK; - pcr |= FMC2_PCR_TAR(FMC2_PCR_TAR_DEFAULT); + pcr &= ~FMC2_PCR_TCLR; + pcr |= FIELD_PREP(FMC2_PCR_TCLR, FMC2_PCR_TCLR_DEFAULT); + pcr &= ~FMC2_PCR_TAR; + pcr |= FIELD_PREP(FMC2_PCR_TAR, FMC2_PCR_TAR_DEFAULT); /* Enable FMC2 controller */ bcr1 |= FMC2_BCR1_FMC2EN; - writel_relaxed(bcr1, fmc2->io_base + FMC2_BCR1); - writel_relaxed(pcr, fmc2->io_base + FMC2_PCR); - writel_relaxed(FMC2_PMEM_DEFAULT, fmc2->io_base + FMC2_PMEM); - writel_relaxed(FMC2_PATT_DEFAULT, fmc2->io_base + FMC2_PATT); + writel_relaxed(bcr1, nfc->io_base + FMC2_BCR1); + writel_relaxed(pcr, nfc->io_base + FMC2_PCR); + writel_relaxed(FMC2_PMEM_DEFAULT, nfc->io_base + FMC2_PMEM); + writel_relaxed(FMC2_PATT_DEFAULT, nfc->io_base + FMC2_PATT); } -/* Controller timings */ -static void stm32_fmc2_calc_timings(struct nand_chip *chip, - const struct nand_sdr_timings *sdrt) +static void stm32_fmc2_nfc_calc_timings(struct nand_chip *chip, + const struct nand_sdr_timings *sdrt) { - struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller); + struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller); struct stm32_fmc2_nand *nand = to_fmc2_nand(chip); struct stm32_fmc2_timings *tims = &nand->timings; - unsigned long hclk = clk_get_rate(fmc2->clk); + unsigned long hclk = clk_get_rate(nfc->clk); unsigned long hclkp = NSEC_PER_SEC / (hclk / 1000); unsigned long timing, tar, tclr, thiz, twait; unsigned long tset_mem, tset_att, thold_mem, thold_att; @@ -1583,8 +1545,8 @@ static void stm32_fmc2_calc_timings(struct nand_chip *chip, tims->thold_att = clamp_val(timing, 1, FMC2_PMEM_PATT_TIMING_MASK); } -static int stm32_fmc2_setup_interface(struct nand_chip *chip, int chipnr, - const struct nand_data_interface *conf) +static int stm32_fmc2_nfc_setup_interface(struct nand_chip *chip, int chipnr, + const struct nand_data_interface *conf) { const struct nand_sdr_timings *sdrt; @@ -1595,71 +1557,67 @@ static int stm32_fmc2_setup_interface(struct nand_chip *chip, int chipnr, if (chipnr == NAND_DATA_IFACE_CHECK_ONLY) return 0; - stm32_fmc2_calc_timings(chip, sdrt); - - /* Apply timings */ - stm32_fmc2_timings_init(chip); + stm32_fmc2_nfc_calc_timings(chip, sdrt); + stm32_fmc2_nfc_timings_init(chip); return 0; } -/* DMA configuration */ -static int stm32_fmc2_dma_setup(struct stm32_fmc2_nfc *fmc2) +static int stm32_fmc2_nfc_dma_setup(struct stm32_fmc2_nfc *nfc) { int ret = 0; - fmc2->dma_tx_ch = dma_request_chan(fmc2->dev, "tx"); - if (IS_ERR(fmc2->dma_tx_ch)) { - ret = PTR_ERR(fmc2->dma_tx_ch); + nfc->dma_tx_ch = dma_request_chan(nfc->dev, "tx"); + if (IS_ERR(nfc->dma_tx_ch)) { + ret = PTR_ERR(nfc->dma_tx_ch); if (ret != -ENODEV) - dev_err(fmc2->dev, + dev_err(nfc->dev, "failed to request tx DMA channel: %d\n", ret); - fmc2->dma_tx_ch = NULL; + nfc->dma_tx_ch = NULL; goto err_dma; } - fmc2->dma_rx_ch = dma_request_chan(fmc2->dev, "rx"); - if (IS_ERR(fmc2->dma_rx_ch)) { - ret = PTR_ERR(fmc2->dma_rx_ch); + nfc->dma_rx_ch = dma_request_chan(nfc->dev, "rx"); + if (IS_ERR(nfc->dma_rx_ch)) { + ret = PTR_ERR(nfc->dma_rx_ch); if (ret != -ENODEV) - dev_err(fmc2->dev, + dev_err(nfc->dev, "failed to request rx DMA channel: %d\n", ret); - fmc2->dma_rx_ch = NULL; + nfc->dma_rx_ch = NULL; goto err_dma; } - fmc2->dma_ecc_ch = dma_request_chan(fmc2->dev, "ecc"); - if (IS_ERR(fmc2->dma_ecc_ch)) { - ret = PTR_ERR(fmc2->dma_ecc_ch); + nfc->dma_ecc_ch = dma_request_chan(nfc->dev, "ecc"); + if (IS_ERR(nfc->dma_ecc_ch)) { + ret = PTR_ERR(nfc->dma_ecc_ch); if (ret != -ENODEV) - dev_err(fmc2->dev, + dev_err(nfc->dev, "failed to request ecc DMA channel: %d\n", ret); - fmc2->dma_ecc_ch = NULL; + nfc->dma_ecc_ch = NULL; goto err_dma; } - ret = sg_alloc_table(&fmc2->dma_ecc_sg, FMC2_MAX_SG, GFP_KERNEL); + ret = sg_alloc_table(&nfc->dma_ecc_sg, FMC2_MAX_SG, GFP_KERNEL); if (ret) return ret; /* Allocate a buffer to store ECC status registers */ - fmc2->ecc_buf = devm_kzalloc(fmc2->dev, FMC2_MAX_ECC_BUF_LEN, - GFP_KERNEL); - if (!fmc2->ecc_buf) + nfc->ecc_buf = devm_kzalloc(nfc->dev, FMC2_MAX_ECC_BUF_LEN, GFP_KERNEL); + if (!nfc->ecc_buf) return -ENOMEM; - ret = sg_alloc_table(&fmc2->dma_data_sg, FMC2_MAX_SG, GFP_KERNEL); + ret = sg_alloc_table(&nfc->dma_data_sg, FMC2_MAX_SG, GFP_KERNEL); if (ret) return ret; - init_completion(&fmc2->dma_data_complete); - init_completion(&fmc2->dma_ecc_complete); + init_completion(&nfc->dma_data_complete); + init_completion(&nfc->dma_ecc_complete); return 0; err_dma: if (ret == -ENODEV) { - dev_warn(fmc2->dev, + dev_warn(nfc->dev, "DMAs not defined in the DT, polling mode is used\n"); ret = 0; } @@ -1667,35 +1625,34 @@ static int stm32_fmc2_dma_setup(struct stm32_fmc2_nfc *fmc2) return ret; } -/* NAND callbacks setup */ -static void stm32_fmc2_nand_callbacks_setup(struct nand_chip *chip) +static void stm32_fmc2_nfc_nand_callbacks_setup(struct nand_chip *chip) { - struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller); + struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller); /* * Specific callbacks to read/write a page depending on * the mode (polling/sequencer) and the algo used (Hamming, BCH). */ - if (fmc2->dma_tx_ch && fmc2->dma_rx_ch && fmc2->dma_ecc_ch) { + if (nfc->dma_tx_ch && nfc->dma_rx_ch && nfc->dma_ecc_ch) { /* DMA => use sequencer mode callbacks */ - chip->ecc.correct = stm32_fmc2_sequencer_correct; - chip->ecc.write_page = stm32_fmc2_sequencer_write_page; - chip->ecc.read_page = stm32_fmc2_sequencer_read_page; - chip->ecc.write_page_raw = stm32_fmc2_sequencer_write_page_raw; - chip->ecc.read_page_raw = stm32_fmc2_sequencer_read_page_raw; + chip->ecc.correct = stm32_fmc2_nfc_seq_correct; + chip->ecc.write_page = stm32_fmc2_nfc_seq_write_page; + chip->ecc.read_page = stm32_fmc2_nfc_seq_read_page; + chip->ecc.write_page_raw = stm32_fmc2_nfc_seq_write_page_raw; + chip->ecc.read_page_raw = stm32_fmc2_nfc_seq_read_page_raw; } else { /* No DMA => use polling mode callbacks */ - chip->ecc.hwctl = stm32_fmc2_hwctl; + chip->ecc.hwctl = stm32_fmc2_nfc_hwctl; if (chip->ecc.strength == FMC2_ECC_HAM) { /* Hamming is used */ - chip->ecc.calculate = stm32_fmc2_ham_calculate; - chip->ecc.correct = stm32_fmc2_ham_correct; + chip->ecc.calculate = stm32_fmc2_nfc_ham_calculate; + chip->ecc.correct = stm32_fmc2_nfc_ham_correct; chip->ecc.options |= NAND_ECC_GENERIC_ERASED_CHECK; } else { /* BCH is used */ - chip->ecc.calculate = stm32_fmc2_bch_calculate; - chip->ecc.correct = stm32_fmc2_bch_correct; - chip->ecc.read_page = stm32_fmc2_read_page; + chip->ecc.calculate = stm32_fmc2_nfc_bch_calculate; + chip->ecc.correct = stm32_fmc2_nfc_bch_correct; + chip->ecc.read_page = stm32_fmc2_nfc_read_page; } } @@ -1708,9 +1665,8 @@ static void stm32_fmc2_nand_callbacks_setup(struct nand_chip *chip) chip->ecc.bytes = chip->options & NAND_BUSWIDTH_16 ? 8 : 7; } -/* FMC2 layout */ -static int stm32_fmc2_nand_ooblayout_ecc(struct mtd_info *mtd, int section, - struct mtd_oob_region *oobregion) +static int stm32_fmc2_nfc_ooblayout_ecc(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) { struct nand_chip *chip = mtd_to_nand(mtd); struct nand_ecc_ctrl *ecc = &chip->ecc; @@ -1724,8 +1680,8 @@ static int stm32_fmc2_nand_ooblayout_ecc(struct mtd_info *mtd, int section, return 0; } -static int stm32_fmc2_nand_ooblayout_free(struct mtd_info *mtd, int section, - struct mtd_oob_region *oobregion) +static int stm32_fmc2_nfc_ooblayout_free(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) { struct nand_chip *chip = mtd_to_nand(mtd); struct nand_ecc_ctrl *ecc = &chip->ecc; @@ -1739,13 +1695,12 @@ static int stm32_fmc2_nand_ooblayout_free(struct mtd_info *mtd, int section, return 0; } -static const struct mtd_ooblayout_ops stm32_fmc2_nand_ooblayout_ops = { - .ecc = stm32_fmc2_nand_ooblayout_ecc, - .free = stm32_fmc2_nand_ooblayout_free, +static const struct mtd_ooblayout_ops stm32_fmc2_nfc_ooblayout_ops = { + .ecc = stm32_fmc2_nfc_ooblayout_ecc, + .free = stm32_fmc2_nfc_ooblayout_free, }; -/* FMC2 caps */ -static int stm32_fmc2_calc_ecc_bytes(int step_size, int strength) +static int stm32_fmc2_nfc_calc_ecc_bytes(int step_size, int strength) { /* Hamming */ if (strength == FMC2_ECC_HAM) @@ -1759,14 +1714,13 @@ static int stm32_fmc2_calc_ecc_bytes(int step_size, int strength) return 8; } -NAND_ECC_CAPS_SINGLE(stm32_fmc2_ecc_caps, stm32_fmc2_calc_ecc_bytes, +NAND_ECC_CAPS_SINGLE(stm32_fmc2_nfc_ecc_caps, stm32_fmc2_nfc_calc_ecc_bytes, FMC2_ECC_STEP_SIZE, FMC2_ECC_HAM, FMC2_ECC_BCH4, FMC2_ECC_BCH8); -/* FMC2 controller ops */ -static int stm32_fmc2_attach_chip(struct nand_chip *chip) +static int stm32_fmc2_nfc_attach_chip(struct nand_chip *chip) { - struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller); + struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller); struct mtd_info *mtd = nand_to_mtd(chip); int ret; @@ -1778,49 +1732,45 @@ static int stm32_fmc2_attach_chip(struct nand_chip *chip) * ECC sector size = 512 */ if (chip->ecc.mode != NAND_ECC_HW) { - dev_err(fmc2->dev, "nand_ecc_mode is not well defined in the DT\n"); + dev_err(nfc->dev, "nand_ecc_mode is not well defined in the DT\n"); return -EINVAL; } - ret = nand_ecc_choose_conf(chip, &stm32_fmc2_ecc_caps, + ret = nand_ecc_choose_conf(chip, &stm32_fmc2_nfc_ecc_caps, mtd->oobsize - FMC2_BBM_LEN); if (ret) { - dev_err(fmc2->dev, "no valid ECC settings set\n"); + dev_err(nfc->dev, "no valid ECC settings set\n"); return ret; } if (mtd->writesize / chip->ecc.size > FMC2_MAX_SG) { - dev_err(fmc2->dev, "nand page size is not supported\n"); + dev_err(nfc->dev, "nand page size is not supported\n"); return -EINVAL; } if (chip->bbt_options & NAND_BBT_USE_FLASH) chip->bbt_options |= NAND_BBT_NO_OOB; - /* NAND callbacks setup */ - stm32_fmc2_nand_callbacks_setup(chip); + stm32_fmc2_nfc_nand_callbacks_setup(chip); - /* Define ECC layout */ - mtd_set_ooblayout(mtd, &stm32_fmc2_nand_ooblayout_ops); + mtd_set_ooblayout(mtd, &stm32_fmc2_nfc_ooblayout_ops); - /* Configure bus width to 16-bit */ if (chip->options & NAND_BUSWIDTH_16) - stm32_fmc2_set_buswidth_16(fmc2, true); + stm32_fmc2_nfc_set_buswidth_16(nfc, true); return 0; } -static const struct nand_controller_ops stm32_fmc2_nand_controller_ops = { - .attach_chip = stm32_fmc2_attach_chip, - .exec_op = stm32_fmc2_exec_op, - .setup_data_interface = stm32_fmc2_setup_interface, +static const struct nand_controller_ops stm32_fmc2_nfc_controller_ops = { + .attach_chip = stm32_fmc2_nfc_attach_chip, + .exec_op = stm32_fmc2_nfc_exec_op, + .setup_data_interface = stm32_fmc2_nfc_setup_interface, }; -/* FMC2 probe */ -static int stm32_fmc2_parse_child(struct stm32_fmc2_nfc *fmc2, - struct device_node *dn) +static int stm32_fmc2_nfc_parse_child(struct stm32_fmc2_nfc *nfc, + struct device_node *dn) { - struct stm32_fmc2_nand *nand = &fmc2->nand; + struct stm32_fmc2_nand *nand = &nfc->nand; u32 cs; int ret, i; @@ -1829,29 +1779,29 @@ static int stm32_fmc2_parse_child(struct stm32_fmc2_nfc *fmc2, nand->ncs /= sizeof(u32); if (!nand->ncs) { - dev_err(fmc2->dev, "invalid reg property size\n"); + dev_err(nfc->dev, "invalid reg property size\n"); return -EINVAL; } for (i = 0; i < nand->ncs; i++) { ret = of_property_read_u32_index(dn, "reg", i, &cs); if (ret) { - dev_err(fmc2->dev, "could not retrieve reg property: %d\n", + dev_err(nfc->dev, "could not retrieve reg property: %d\n", ret); return ret; } if (cs > FMC2_MAX_CE) { - dev_err(fmc2->dev, "invalid reg value: %d\n", cs); + dev_err(nfc->dev, "invalid reg value: %d\n", cs); return -EINVAL; } - if (fmc2->cs_assigned & BIT(cs)) { - dev_err(fmc2->dev, "cs already assigned: %d\n", cs); + if (nfc->cs_assigned & BIT(cs)) { + dev_err(nfc->dev, "cs already assigned: %d\n", cs); return -EINVAL; } - fmc2->cs_assigned |= BIT(cs); + nfc->cs_assigned |= BIT(cs); nand->cs_used[i] = cs; } @@ -1860,25 +1810,25 @@ static int stm32_fmc2_parse_child(struct stm32_fmc2_nfc *fmc2, return 0; } -static int stm32_fmc2_parse_dt(struct stm32_fmc2_nfc *fmc2) +static int stm32_fmc2_nfc_parse_dt(struct stm32_fmc2_nfc *nfc) { - struct device_node *dn = fmc2->dev->of_node; + struct device_node *dn = nfc->dev->of_node; struct device_node *child; int nchips = of_get_child_count(dn); int ret = 0; if (!nchips) { - dev_err(fmc2->dev, "NAND chip not defined\n"); + dev_err(nfc->dev, "NAND chip not defined\n"); return -EINVAL; } if (nchips > 1) { - dev_err(fmc2->dev, "too many NAND chips defined\n"); + dev_err(nfc->dev, "too many NAND chips defined\n"); return -EINVAL; } for_each_child_of_node(dn, child) { - ret = stm32_fmc2_parse_child(fmc2, child); + ret = stm32_fmc2_nfc_parse_child(nfc, child); if (ret < 0) { of_node_put(child); return ret; @@ -1888,106 +1838,108 @@ static int stm32_fmc2_parse_dt(struct stm32_fmc2_nfc *fmc2) return ret; } -static int stm32_fmc2_probe(struct platform_device *pdev) +static int stm32_fmc2_nfc_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct reset_control *rstc; - struct stm32_fmc2_nfc *fmc2; + struct stm32_fmc2_nfc *nfc; struct stm32_fmc2_nand *nand; struct resource *res; struct mtd_info *mtd; struct nand_chip *chip; int chip_cs, mem_region, ret, irq; - fmc2 = devm_kzalloc(dev, sizeof(*fmc2), GFP_KERNEL); - if (!fmc2) + nfc = devm_kzalloc(dev, sizeof(*nfc), GFP_KERNEL); + if (!nfc) return -ENOMEM; - fmc2->dev = dev; - nand_controller_init(&fmc2->base); - fmc2->base.ops = &stm32_fmc2_nand_controller_ops; + nfc->dev = dev; + nand_controller_init(&nfc->base); + nfc->base.ops = &stm32_fmc2_nfc_controller_ops; - ret = stm32_fmc2_parse_dt(fmc2); + ret = stm32_fmc2_nfc_parse_dt(nfc); if (ret) return ret; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - fmc2->io_base = devm_ioremap_resource(dev, res); - if (IS_ERR(fmc2->io_base)) - return PTR_ERR(fmc2->io_base); + nfc->io_base = devm_ioremap_resource(dev, res); + if (IS_ERR(nfc->io_base)) + return PTR_ERR(nfc->io_base); - fmc2->io_phys_addr = res->start; + nfc->io_phys_addr = res->start; for (chip_cs = 0, mem_region = 1; chip_cs < FMC2_MAX_CE; chip_cs++, mem_region += 3) { - if (!(fmc2->cs_assigned & BIT(chip_cs))) + if (!(nfc->cs_assigned & BIT(chip_cs))) continue; res = platform_get_resource(pdev, IORESOURCE_MEM, mem_region); - fmc2->data_base[chip_cs] = devm_ioremap_resource(dev, res); - if (IS_ERR(fmc2->data_base[chip_cs])) - return PTR_ERR(fmc2->data_base[chip_cs]); + nfc->data_base[chip_cs] = devm_ioremap_resource(dev, res); + if (IS_ERR(nfc->data_base[chip_cs])) + return PTR_ERR(nfc->data_base[chip_cs]); - fmc2->data_phys_addr[chip_cs] = res->start; + nfc->data_phys_addr[chip_cs] = res->start; res = platform_get_resource(pdev, IORESOURCE_MEM, mem_region + 1); - fmc2->cmd_base[chip_cs] = devm_ioremap_resource(dev, res); - if (IS_ERR(fmc2->cmd_base[chip_cs])) - return PTR_ERR(fmc2->cmd_base[chip_cs]); + nfc->cmd_base[chip_cs] = devm_ioremap_resource(dev, res); + if (IS_ERR(nfc->cmd_base[chip_cs])) + return PTR_ERR(nfc->cmd_base[chip_cs]); res = platform_get_resource(pdev, IORESOURCE_MEM, mem_region + 2); - fmc2->addr_base[chip_cs] = devm_ioremap_resource(dev, res); - if (IS_ERR(fmc2->addr_base[chip_cs])) - return PTR_ERR(fmc2->addr_base[chip_cs]); + nfc->addr_base[chip_cs] = devm_ioremap_resource(dev, res); + if (IS_ERR(nfc->addr_base[chip_cs])) + return PTR_ERR(nfc->addr_base[chip_cs]); } irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; - ret = devm_request_irq(dev, irq, stm32_fmc2_irq, 0, - dev_name(dev), fmc2); + ret = devm_request_irq(dev, irq, stm32_fmc2_nfc_irq, 0, + dev_name(dev), nfc); if (ret) { dev_err(dev, "failed to request irq\n"); return ret; } - init_completion(&fmc2->complete); + init_completion(&nfc->complete); - fmc2->clk = devm_clk_get(dev, NULL); - if (IS_ERR(fmc2->clk)) - return PTR_ERR(fmc2->clk); + nfc->clk = devm_clk_get(dev, NULL); + if (IS_ERR(nfc->clk)) + return PTR_ERR(nfc->clk); - ret = clk_prepare_enable(fmc2->clk); + ret = clk_prepare_enable(nfc->clk); if (ret) { dev_err(dev, "can not enable the clock\n"); return ret; } rstc = devm_reset_control_get(dev, NULL); - if (!IS_ERR(rstc)) { + if (IS_ERR(rstc)) { + ret = PTR_ERR(rstc); + if (ret == -EPROBE_DEFER) + goto err_clk_disable; + } else { reset_control_assert(rstc); reset_control_deassert(rstc); } - /* DMA setup */ - ret = stm32_fmc2_dma_setup(fmc2); + ret = stm32_fmc2_nfc_dma_setup(nfc); if (ret) - return ret; + goto err_release_dma; - /* FMC2 init routine */ - stm32_fmc2_init(fmc2); + stm32_fmc2_nfc_init(nfc); - nand = &fmc2->nand; + nand = &nfc->nand; chip = &nand->chip; mtd = nand_to_mtd(chip); mtd->dev.parent = dev; - chip->controller = &fmc2->base; + chip->controller = &nfc->base; chip->options |= NAND_BUSWIDTH_AUTO | NAND_NO_SUBPAGE_WRITE | - NAND_USE_BOUNCE_BUFFER; + NAND_USES_DMA; /* Default ECC settings */ chip->ecc.mode = NAND_ECC_HW; @@ -1997,86 +1949,91 @@ static int stm32_fmc2_probe(struct platform_device *pdev) /* Scan to find existence of the device */ ret = nand_scan(chip, nand->ncs); if (ret) - goto err_scan; + goto err_release_dma; ret = mtd_device_register(mtd, NULL, 0); if (ret) - goto err_device_register; + goto err_nand_cleanup; - platform_set_drvdata(pdev, fmc2); + platform_set_drvdata(pdev, nfc); return 0; -err_device_register: +err_nand_cleanup: nand_cleanup(chip); -err_scan: - if (fmc2->dma_ecc_ch) - dma_release_channel(fmc2->dma_ecc_ch); - if (fmc2->dma_tx_ch) - dma_release_channel(fmc2->dma_tx_ch); - if (fmc2->dma_rx_ch) - dma_release_channel(fmc2->dma_rx_ch); +err_release_dma: + if (nfc->dma_ecc_ch) + dma_release_channel(nfc->dma_ecc_ch); + if (nfc->dma_tx_ch) + dma_release_channel(nfc->dma_tx_ch); + if (nfc->dma_rx_ch) + dma_release_channel(nfc->dma_rx_ch); - sg_free_table(&fmc2->dma_data_sg); - sg_free_table(&fmc2->dma_ecc_sg); + sg_free_table(&nfc->dma_data_sg); + sg_free_table(&nfc->dma_ecc_sg); - clk_disable_unprepare(fmc2->clk); +err_clk_disable: + clk_disable_unprepare(nfc->clk); return ret; } -static int stm32_fmc2_remove(struct platform_device *pdev) +static int stm32_fmc2_nfc_remove(struct platform_device *pdev) { - struct stm32_fmc2_nfc *fmc2 = platform_get_drvdata(pdev); - struct stm32_fmc2_nand *nand = &fmc2->nand; + struct stm32_fmc2_nfc *nfc = platform_get_drvdata(pdev); + struct stm32_fmc2_nand *nand = &nfc->nand; + struct nand_chip *chip = &nand->chip; + int ret; - nand_release(&nand->chip); + ret = mtd_device_unregister(nand_to_mtd(chip)); + WARN_ON(ret); + nand_cleanup(chip); - if (fmc2->dma_ecc_ch) - dma_release_channel(fmc2->dma_ecc_ch); - if (fmc2->dma_tx_ch) - dma_release_channel(fmc2->dma_tx_ch); - if (fmc2->dma_rx_ch) - dma_release_channel(fmc2->dma_rx_ch); + if (nfc->dma_ecc_ch) + dma_release_channel(nfc->dma_ecc_ch); + if (nfc->dma_tx_ch) + dma_release_channel(nfc->dma_tx_ch); + if (nfc->dma_rx_ch) + dma_release_channel(nfc->dma_rx_ch); - sg_free_table(&fmc2->dma_data_sg); - sg_free_table(&fmc2->dma_ecc_sg); + sg_free_table(&nfc->dma_data_sg); + sg_free_table(&nfc->dma_ecc_sg); - clk_disable_unprepare(fmc2->clk); + clk_disable_unprepare(nfc->clk); return 0; } -static int __maybe_unused stm32_fmc2_suspend(struct device *dev) +static int __maybe_unused stm32_fmc2_nfc_suspend(struct device *dev) { - struct stm32_fmc2_nfc *fmc2 = dev_get_drvdata(dev); + struct stm32_fmc2_nfc *nfc = dev_get_drvdata(dev); - clk_disable_unprepare(fmc2->clk); + clk_disable_unprepare(nfc->clk); pinctrl_pm_select_sleep_state(dev); return 0; } -static int __maybe_unused stm32_fmc2_resume(struct device *dev) +static int __maybe_unused stm32_fmc2_nfc_resume(struct device *dev) { - struct stm32_fmc2_nfc *fmc2 = dev_get_drvdata(dev); - struct stm32_fmc2_nand *nand = &fmc2->nand; + struct stm32_fmc2_nfc *nfc = dev_get_drvdata(dev); + struct stm32_fmc2_nand *nand = &nfc->nand; int chip_cs, ret; pinctrl_pm_select_default_state(dev); - ret = clk_prepare_enable(fmc2->clk); + ret = clk_prepare_enable(nfc->clk); if (ret) { dev_err(dev, "can not enable the clock\n"); return ret; } - stm32_fmc2_init(fmc2); + stm32_fmc2_nfc_init(nfc); for (chip_cs = 0; chip_cs < FMC2_MAX_CE; chip_cs++) { - if (!(fmc2->cs_assigned & BIT(chip_cs))) + if (!(nfc->cs_assigned & BIT(chip_cs))) continue; nand_reset(&nand->chip, chip_cs); @@ -2085,27 +2042,27 @@ static int __maybe_unused stm32_fmc2_resume(struct device *dev) return 0; } -static SIMPLE_DEV_PM_OPS(stm32_fmc2_pm_ops, stm32_fmc2_suspend, - stm32_fmc2_resume); +static SIMPLE_DEV_PM_OPS(stm32_fmc2_nfc_pm_ops, stm32_fmc2_nfc_suspend, + stm32_fmc2_nfc_resume); -static const struct of_device_id stm32_fmc2_match[] = { +static const struct of_device_id stm32_fmc2_nfc_match[] = { {.compatible = "st,stm32mp15-fmc2"}, {} }; -MODULE_DEVICE_TABLE(of, stm32_fmc2_match); +MODULE_DEVICE_TABLE(of, stm32_fmc2_nfc_match); -static struct platform_driver stm32_fmc2_driver = { - .probe = stm32_fmc2_probe, - .remove = stm32_fmc2_remove, +static struct platform_driver stm32_fmc2_nfc_driver = { + .probe = stm32_fmc2_nfc_probe, + .remove = stm32_fmc2_nfc_remove, .driver = { - .name = "stm32_fmc2_nand", - .of_match_table = stm32_fmc2_match, - .pm = &stm32_fmc2_pm_ops, + .name = "stm32_fmc2_nfc", + .of_match_table = stm32_fmc2_nfc_match, + .pm = &stm32_fmc2_nfc_pm_ops, }, }; -module_platform_driver(stm32_fmc2_driver); +module_platform_driver(stm32_fmc2_nfc_driver); -MODULE_ALIAS("platform:stm32_fmc2_nand"); +MODULE_ALIAS("platform:stm32_fmc2_nfc"); MODULE_AUTHOR("Christophe Kerello "); -MODULE_DESCRIPTION("STMicroelectronics STM32 FMC2 nand driver"); +MODULE_DESCRIPTION("STMicroelectronics STM32 FMC2 NFC driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/mtd/nand/raw/sunxi_nand.c b/drivers/mtd/nand/raw/sunxi_nand.c index 5f3e40b79fb1..ffbc1651fadc 100644 --- a/drivers/mtd/nand/raw/sunxi_nand.c +++ b/drivers/mtd/nand/raw/sunxi_nand.c @@ -1698,7 +1698,7 @@ static int sunxi_nand_hw_ecc_ctrl_init(struct nand_chip *nand, ecc->read_page = sunxi_nfc_hw_ecc_read_page_dma; ecc->read_subpage = sunxi_nfc_hw_ecc_read_subpage_dma; ecc->write_page = sunxi_nfc_hw_ecc_write_page_dma; - nand->options |= NAND_USE_BOUNCE_BUFFER; + nand->options |= NAND_USES_DMA; } else { ecc->read_page = sunxi_nfc_hw_ecc_read_page; ecc->read_subpage = sunxi_nfc_hw_ecc_read_subpage; @@ -1907,7 +1907,8 @@ static int sunxi_nfc_exec_op(struct nand_chip *nand, struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand); const struct nand_op_parser *parser; - sunxi_nfc_select_chip(nand, op->cs); + if (!check_only) + sunxi_nfc_select_chip(nand, op->cs); if (sunxi_nand->sels[op->cs].rb >= 0) parser = &sunxi_nfc_op_parser; @@ -2003,7 +2004,7 @@ static int sunxi_nand_chip_init(struct device *dev, struct sunxi_nfc *nfc, ret = mtd_device_register(mtd, NULL, 0); if (ret) { dev_err(dev, "failed to register mtd device: %d\n", ret); - nand_release(nand); + nand_cleanup(nand); return ret; } @@ -2038,13 +2039,18 @@ static int sunxi_nand_chips_init(struct device *dev, struct sunxi_nfc *nfc) static void sunxi_nand_chips_cleanup(struct sunxi_nfc *nfc) { struct sunxi_nand_chip *sunxi_nand; + struct nand_chip *chip; + int ret; while (!list_empty(&nfc->chips)) { sunxi_nand = list_first_entry(&nfc->chips, struct sunxi_nand_chip, node); - nand_release(&sunxi_nand->nand); - sunxi_nand_ecc_cleanup(&sunxi_nand->nand.ecc); + chip = &sunxi_nand->nand; + ret = mtd_device_unregister(nand_to_mtd(chip)); + WARN_ON(ret); + nand_cleanup(chip); + sunxi_nand_ecc_cleanup(&chip->ecc); list_del(&sunxi_nand->node); } } diff --git a/drivers/mtd/nand/raw/tango_nand.c b/drivers/mtd/nand/raw/tango_nand.c index 9acf2de37ee0..246871e01027 100644 --- a/drivers/mtd/nand/raw/tango_nand.c +++ b/drivers/mtd/nand/raw/tango_nand.c @@ -568,7 +568,7 @@ static int chip_init(struct device *dev, struct device_node *np) chip->legacy.select_chip = tango_select_chip; chip->legacy.cmd_ctrl = tango_cmd_ctrl; chip->legacy.dev_ready = tango_dev_ready; - chip->options = NAND_USE_BOUNCE_BUFFER | + chip->options = NAND_USES_DMA | NAND_NO_SUBPAGE_WRITE | NAND_WAIT_TCCS; chip->controller = &nfc->hw; @@ -600,14 +600,19 @@ static int chip_init(struct device *dev, struct device_node *np) static int tango_nand_remove(struct platform_device *pdev) { - int cs; struct tango_nfc *nfc = platform_get_drvdata(pdev); + struct nand_chip *chip; + int cs, ret; dma_release_channel(nfc->chan); for (cs = 0; cs < MAX_CS; ++cs) { - if (nfc->chips[cs]) - nand_release(&nfc->chips[cs]->nand_chip); + if (nfc->chips[cs]) { + chip = &nfc->chips[cs]->nand_chip; + ret = mtd_device_unregister(nand_to_mtd(chip)); + WARN_ON(ret); + nand_cleanup(chip); + } } return 0; diff --git a/drivers/mtd/nand/raw/tegra_nand.c b/drivers/mtd/nand/raw/tegra_nand.c index 3cc9a4c41443..f9d046b2cd3b 100644 --- a/drivers/mtd/nand/raw/tegra_nand.c +++ b/drivers/mtd/nand/raw/tegra_nand.c @@ -467,7 +467,9 @@ static int tegra_nand_exec_op(struct nand_chip *chip, const struct nand_operation *op, bool check_only) { - tegra_nand_select_target(chip, op->cs); + if (!check_only) + tegra_nand_select_target(chip, op->cs); + return nand_op_parser_exec_op(chip, &tegra_nand_op_parser, op, check_only); } @@ -1113,7 +1115,7 @@ static int tegra_nand_chips_init(struct device *dev, if (!mtd->name) mtd->name = "tegra_nand"; - chip->options = NAND_NO_SUBPAGE_WRITE | NAND_USE_BOUNCE_BUFFER; + chip->options = NAND_NO_SUBPAGE_WRITE | NAND_USES_DMA; ret = nand_scan(chip, 1); if (ret) diff --git a/drivers/mtd/nand/raw/tmio_nand.c b/drivers/mtd/nand/raw/tmio_nand.c index db030f1701ee..843a8683b737 100644 --- a/drivers/mtd/nand/raw/tmio_nand.c +++ b/drivers/mtd/nand/raw/tmio_nand.c @@ -448,7 +448,7 @@ static int tmio_probe(struct platform_device *dev) if (!retval) return retval; - nand_release(nand_chip); + nand_cleanup(nand_chip); err_irq: tmio_hw_stop(dev, tmio); @@ -458,8 +458,12 @@ static int tmio_probe(struct platform_device *dev) static int tmio_remove(struct platform_device *dev) { struct tmio_nand *tmio = platform_get_drvdata(dev); + struct nand_chip *chip = &tmio->chip; + int ret; - nand_release(&tmio->chip); + ret = mtd_device_unregister(nand_to_mtd(chip)); + WARN_ON(ret); + nand_cleanup(chip); tmio_hw_stop(dev, tmio); return 0; } diff --git a/drivers/mtd/nand/raw/txx9ndfmc.c b/drivers/mtd/nand/raw/txx9ndfmc.c index 2642d5bb3241..47d966871445 100644 --- a/drivers/mtd/nand/raw/txx9ndfmc.c +++ b/drivers/mtd/nand/raw/txx9ndfmc.c @@ -371,7 +371,7 @@ static int __init txx9ndfmc_probe(struct platform_device *dev) static int __exit txx9ndfmc_remove(struct platform_device *dev) { struct txx9ndfmc_drvdata *drvdata = platform_get_drvdata(dev); - int i; + int ret, i; if (!drvdata) return 0; @@ -385,7 +385,9 @@ static int __exit txx9ndfmc_remove(struct platform_device *dev) chip = mtd_to_nand(mtd); txx9_priv = nand_get_controller_data(chip); - nand_release(chip); + ret = mtd_device_unregister(nand_to_mtd(chip)); + WARN_ON(ret); + nand_cleanup(chip); kfree(txx9_priv->mtdname); kfree(txx9_priv); } diff --git a/drivers/mtd/nand/raw/vf610_nfc.c b/drivers/mtd/nand/raw/vf610_nfc.c index 6b399a75f9ae..7248c5901183 100644 --- a/drivers/mtd/nand/raw/vf610_nfc.c +++ b/drivers/mtd/nand/raw/vf610_nfc.c @@ -502,7 +502,9 @@ static int vf610_nfc_exec_op(struct nand_chip *chip, const struct nand_operation *op, bool check_only) { - vf610_nfc_select_target(chip, op->cs); + if (!check_only) + vf610_nfc_select_target(chip, op->cs); + return nand_op_parser_exec_op(chip, &vf610_nfc_op_parser, op, check_only); } @@ -915,8 +917,12 @@ static int vf610_nfc_probe(struct platform_device *pdev) static int vf610_nfc_remove(struct platform_device *pdev) { struct vf610_nfc *nfc = platform_get_drvdata(pdev); + struct nand_chip *chip = &nfc->chip; + int ret; - nand_release(&nfc->chip); + ret = mtd_device_unregister(nand_to_mtd(chip)); + WARN_ON(ret); + nand_cleanup(chip); clk_disable_unprepare(nfc->clk); return 0; } diff --git a/drivers/mtd/nand/raw/xway_nand.c b/drivers/mtd/nand/raw/xway_nand.c index 834f794816a9..94bfba994326 100644 --- a/drivers/mtd/nand/raw/xway_nand.c +++ b/drivers/mtd/nand/raw/xway_nand.c @@ -210,7 +210,7 @@ static int xway_nand_probe(struct platform_device *pdev) err = mtd_device_register(mtd, NULL, 0); if (err) - nand_release(&data->chip); + nand_cleanup(&data->chip); return err; } @@ -221,8 +221,12 @@ static int xway_nand_probe(struct platform_device *pdev) static int xway_nand_remove(struct platform_device *pdev) { struct xway_nand_data *data = platform_get_drvdata(pdev); + struct nand_chip *chip = &data->chip; + int ret; - nand_release(&data->chip); + ret = mtd_device_unregister(mtd); + WARN_ON(ret); + nand_cleanup(chip); return 0; } diff --git a/drivers/mtd/parsers/cmdlinepart.c b/drivers/mtd/parsers/cmdlinepart.c index c86f2db8c882..af712f1519c5 100644 --- a/drivers/mtd/parsers/cmdlinepart.c +++ b/drivers/mtd/parsers/cmdlinepart.c @@ -9,7 +9,7 @@ * * mtdparts=[; := :[,] - * := [@][][ro][lk] + * := [@][][ro][lk][slc] * := unique name used in mapping driver/device (mtd->name) * := standard linux memsize OR "-" to denote all remaining space * size is automatically truncated at end of device @@ -92,7 +92,7 @@ static struct mtd_partition * newpart(char *s, int name_len; unsigned char *extra_mem; char delim; - unsigned int mask_flags; + unsigned int mask_flags, add_flags; /* fetch the partition size */ if (*s == '-') { @@ -109,6 +109,7 @@ static struct mtd_partition * newpart(char *s, /* fetch partition name and flags */ mask_flags = 0; /* this is going to be a regular partition */ + add_flags = 0; delim = 0; /* check for offset */ @@ -152,6 +153,12 @@ static struct mtd_partition * newpart(char *s, s += 2; } + /* if slc is found use emulated SLC mode on this partition*/ + if (!strncmp(s, "slc", 3)) { + add_flags |= MTD_SLC_ON_MLC_EMULATION; + s += 3; + } + /* test if more partitions are following */ if (*s == ',') { if (size == SIZE_REMAINING) { @@ -184,6 +191,7 @@ static struct mtd_partition * newpart(char *s, parts[this_part].size = size; parts[this_part].offset = offset; parts[this_part].mask_flags = mask_flags; + parts[this_part].add_flags = add_flags; if (name) strlcpy(extra_mem, name, name_len + 1); else diff --git a/drivers/mtd/parsers/ofpart.c b/drivers/mtd/parsers/ofpart.c index 3caeabf27987..daf507c123e6 100644 --- a/drivers/mtd/parsers/ofpart.c +++ b/drivers/mtd/parsers/ofpart.c @@ -117,6 +117,9 @@ static int parse_fixed_partitions(struct mtd_info *master, if (of_get_property(pp, "lock", &len)) parts[i].mask_flags |= MTD_POWERUP_LOCK; + if (of_property_read_bool(pp, "slc-mode")) + parts[i].add_flags |= MTD_SLC_ON_MLC_EMULATION; + i++; } diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c index 12c02342149c..e85b04e9716b 100644 --- a/drivers/mtd/ubi/build.c +++ b/drivers/mtd/ubi/build.c @@ -867,8 +867,11 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, * Both UBI and UBIFS have been designed for SLC NAND and NOR flashes. * MLC NAND is different and needs special care, otherwise UBI or UBIFS * will die soon and you will lose all your data. + * Relax this rule if the partition we're attaching to operates in SLC + * mode. */ - if (mtd->type == MTD_MLCNANDFLASH) { + if (mtd->type == MTD_MLCNANDFLASH && + !(mtd->flags & MTD_SLC_ON_MLC_EMULATION)) { pr_err("ubi: refuse attaching mtd%d - MLC NAND is not supported\n", mtd->index); return -EINVAL; diff --git a/include/linux/bch.h b/include/linux/bch.h index aa765af85c38..85fdce83d4e2 100644 --- a/include/linux/bch.h +++ b/include/linux/bch.h @@ -33,6 +33,7 @@ * @cache: log-based polynomial representation buffer * @elp: error locator polynomial * @poly_2t: temporary polynomials of degree 2t + * @swap_bits: swap bits within data and syndrome bytes */ struct bch_control { unsigned int m; @@ -51,16 +52,18 @@ struct bch_control { int *cache; struct gf_poly *elp; struct gf_poly *poly_2t[4]; + bool swap_bits; }; -struct bch_control *init_bch(int m, int t, unsigned int prim_poly); +struct bch_control *bch_init(int m, int t, unsigned int prim_poly, + bool swap_bits); -void free_bch(struct bch_control *bch); +void bch_free(struct bch_control *bch); -void encode_bch(struct bch_control *bch, const uint8_t *data, +void bch_encode(struct bch_control *bch, const uint8_t *data, unsigned int len, uint8_t *ecc); -int decode_bch(struct bch_control *bch, const uint8_t *data, unsigned int len, +int bch_decode(struct bch_control *bch, const uint8_t *data, unsigned int len, const uint8_t *recv_ecc, const uint8_t *calc_ecc, const unsigned int *syn, unsigned int *errloc); diff --git a/include/linux/mtd/bbm.h b/include/linux/mtd/bbm.h index 886e30441c90..d890805f5494 100644 --- a/include/linux/mtd/bbm.h +++ b/include/linux/mtd/bbm.h @@ -98,7 +98,7 @@ struct nand_bbt_descr { /* * Flag set by nand_create_default_bbt_descr(), marking that the nand_bbt_descr - * was allocated dynamicaly and must be freed in nand_release(). Has no meaning + * was allocated dynamicaly and must be freed in nand_cleanup(). Has no meaning * in nand_chip.bbt_options. */ #define NAND_BBT_DYNAMICSTRUCT 0x80000000 diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h index 2d1f4a61f4ac..157357ec1441 100644 --- a/include/linux/mtd/mtd.h +++ b/include/linux/mtd/mtd.h @@ -200,6 +200,8 @@ struct mtd_debug_info { * * @node: list node used to add an MTD partition to the parent partition list * @offset: offset of the partition relatively to the parent offset + * @size: partition size. Should be equal to mtd->size unless + * MTD_SLC_ON_MLC_EMULATION is set * @flags: original flags (before the mtdpart logic decided to tweak them based * on flash constraints, like eraseblock/pagesize alignment) * @@ -209,6 +211,7 @@ struct mtd_debug_info { struct mtd_part { struct list_head node; u64 offset; + u64 size; u32 flags; }; @@ -622,7 +625,9 @@ static inline uint32_t mtd_mod_by_ws(uint64_t sz, struct mtd_info *mtd) static inline int mtd_wunit_per_eb(struct mtd_info *mtd) { - return mtd->erasesize / mtd->writesize; + struct mtd_info *master = mtd_get_master(mtd); + + return master->erasesize / mtd->writesize; } static inline int mtd_offset_to_wunit(struct mtd_info *mtd, loff_t offs) diff --git a/include/linux/mtd/partitions.h b/include/linux/mtd/partitions.h index e545c050d3e8..b74a539ec581 100644 --- a/include/linux/mtd/partitions.h +++ b/include/linux/mtd/partitions.h @@ -37,6 +37,7 @@ * master MTD flag set for the corresponding MTD partition. * For example, to force a read-only partition, simply adding * MTD_WRITEABLE to the mask_flags will do the trick. + * add_flags: contains flags to add to the parent flags * * Note: writeable partitions require their size and offset be * erasesize aligned (e.g. use MTDPART_OFS_NEXTBLK). @@ -48,6 +49,7 @@ struct mtd_partition { uint64_t size; /* partition size */ uint64_t offset; /* offset within the master MTD space */ uint32_t mask_flags; /* master MTD flags to mask out for this partition */ + uint32_t add_flags; /* flags to add to the partition */ struct device_node *of_node; }; diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h index 1e76196f9829..65b1c1c18b41 100644 --- a/include/linux/mtd/rawnand.h +++ b/include/linux/mtd/rawnand.h @@ -83,14 +83,14 @@ struct nand_chip; /* * Constants for ECC_MODES */ -typedef enum { +enum nand_ecc_mode { + NAND_ECC_INVALID, NAND_ECC_NONE, NAND_ECC_SOFT, NAND_ECC_HW, NAND_ECC_HW_SYNDROME, - NAND_ECC_HW_OOB_FIRST, NAND_ECC_ON_DIE, -} nand_ecc_modes_t; +}; enum nand_ecc_algo { NAND_ECC_UNKNOWN, @@ -118,86 +118,74 @@ enum nand_ecc_algo { #define NAND_ECC_GENERIC_ERASED_CHECK BIT(0) #define NAND_ECC_MAXIMIZE BIT(1) +/* + * Option constants for bizarre disfunctionality and real + * features. + */ + +/* Buswidth is 16 bit */ +#define NAND_BUSWIDTH_16 BIT(1) + /* * When using software implementation of Hamming, we can specify which byte * ordering should be used. */ #define NAND_ECC_SOFT_HAMMING_SM_ORDER BIT(2) -/* - * Option constants for bizarre disfunctionality and real - * features. - */ -/* Buswidth is 16 bit */ -#define NAND_BUSWIDTH_16 0x00000002 /* Chip has cache program function */ -#define NAND_CACHEPRG 0x00000008 +#define NAND_CACHEPRG BIT(3) +/* Options valid for Samsung large page devices */ +#define NAND_SAMSUNG_LP_OPTIONS NAND_CACHEPRG + /* * Chip requires ready check on read (for auto-incremented sequential read). * True only for small page devices; large page devices do not support * autoincrement. */ -#define NAND_NEED_READRDY 0x00000100 +#define NAND_NEED_READRDY BIT(8) /* Chip does not allow subpage writes */ -#define NAND_NO_SUBPAGE_WRITE 0x00000200 +#define NAND_NO_SUBPAGE_WRITE BIT(9) /* Device is one of 'new' xD cards that expose fake nand command set */ -#define NAND_BROKEN_XD 0x00000400 +#define NAND_BROKEN_XD BIT(10) /* Device behaves just like nand, but is readonly */ -#define NAND_ROM 0x00000800 +#define NAND_ROM BIT(11) /* Device supports subpage reads */ -#define NAND_SUBPAGE_READ 0x00001000 +#define NAND_SUBPAGE_READ BIT(12) +/* Macros to identify the above */ +#define NAND_HAS_SUBPAGE_READ(chip) ((chip->options & NAND_SUBPAGE_READ)) /* * Some MLC NANDs need data scrambling to limit bitflips caused by repeated * patterns. */ -#define NAND_NEED_SCRAMBLING 0x00002000 +#define NAND_NEED_SCRAMBLING BIT(13) /* Device needs 3rd row address cycle */ -#define NAND_ROW_ADDR_3 0x00004000 - -/* Options valid for Samsung large page devices */ -#define NAND_SAMSUNG_LP_OPTIONS NAND_CACHEPRG - -/* Macros to identify the above */ -#define NAND_HAS_SUBPAGE_READ(chip) ((chip->options & NAND_SUBPAGE_READ)) - -/* - * There are different places where the manufacturer stores the factory bad - * block markers. - * - * Position within the block: Each of these pages needs to be checked for a - * bad block marking pattern. - */ -#define NAND_BBM_FIRSTPAGE 0x01000000 -#define NAND_BBM_SECONDPAGE 0x02000000 -#define NAND_BBM_LASTPAGE 0x04000000 - -/* Position within the OOB data of the page */ -#define NAND_BBM_POS_SMALL 5 -#define NAND_BBM_POS_LARGE 0 +#define NAND_ROW_ADDR_3 BIT(14) /* Non chip related options */ /* This option skips the bbt scan during initialization. */ -#define NAND_SKIP_BBTSCAN 0x00010000 +#define NAND_SKIP_BBTSCAN BIT(16) /* Chip may not exist, so silence any errors in scan */ -#define NAND_SCAN_SILENT_NODEV 0x00040000 +#define NAND_SCAN_SILENT_NODEV BIT(18) + /* * Autodetect nand buswidth with readid/onfi. * This suppose the driver will configure the hardware in 8 bits mode * when calling nand_scan_ident, and update its configuration * before calling nand_scan_tail. */ -#define NAND_BUSWIDTH_AUTO 0x00080000 +#define NAND_BUSWIDTH_AUTO BIT(19) + /* * This option could be defined by controller drivers to protect against * kmap'ed, vmalloc'ed highmem buffers being passed from upper layers */ -#define NAND_USE_BOUNCE_BUFFER 0x00100000 +#define NAND_USES_DMA BIT(20) /* * In case your controller is implementing ->legacy.cmd_ctrl() and is relying @@ -207,26 +195,49 @@ enum nand_ecc_algo { * If your controller already takes care of this delay, you don't need to set * this flag. */ -#define NAND_WAIT_TCCS 0x00200000 +#define NAND_WAIT_TCCS BIT(21) /* * Whether the NAND chip is a boot medium. Drivers might use this information * to select ECC algorithms supported by the boot ROM or similar restrictions. */ -#define NAND_IS_BOOT_MEDIUM 0x00400000 +#define NAND_IS_BOOT_MEDIUM BIT(22) /* * Do not try to tweak the timings at runtime. This is needed when the * controller initializes the timings on itself or when it relies on * configuration done by the bootloader. */ -#define NAND_KEEP_TIMINGS 0x00800000 +#define NAND_KEEP_TIMINGS BIT(23) + +/* + * There are different places where the manufacturer stores the factory bad + * block markers. + * + * Position within the block: Each of these pages needs to be checked for a + * bad block marking pattern. + */ +#define NAND_BBM_FIRSTPAGE BIT(24) +#define NAND_BBM_SECONDPAGE BIT(25) +#define NAND_BBM_LASTPAGE BIT(26) + +/* + * Some controllers with pipelined ECC engines override the BBM marker with + * data or ECC bytes, thus making bad block detection through bad block marker + * impossible. Let's flag those chips so the core knows it shouldn't check the + * BBM and consider all blocks good. + */ +#define NAND_NO_BBM_QUIRK BIT(27) /* Cell info constants */ #define NAND_CI_CHIPNR_MSK 0x03 #define NAND_CI_CELLTYPE_MSK 0x0C #define NAND_CI_CELLTYPE_SHIFT 2 +/* Position within the OOB data of the page */ +#define NAND_BBM_POS_SMALL 5 +#define NAND_BBM_POS_LARGE 0 + /** * struct nand_parameters - NAND generic parameters from the parameter page * @model: Model name @@ -351,7 +362,7 @@ static const struct nand_ecc_caps __name = { \ * @write_oob: function to write chip OOB data */ struct nand_ecc_ctrl { - nand_ecc_modes_t mode; + enum nand_ecc_mode mode; enum nand_ecc_algo algo; int steps; int size; @@ -491,13 +502,17 @@ enum nand_data_interface_type { /** * struct nand_data_interface - NAND interface timing * @type: type of the timing - * @timings: The timing, type according to @type + * @timings: The timing information + * @timings.mode: Timing mode as defined in the specification * @timings.sdr: Use it when @type is %NAND_SDR_IFACE. */ struct nand_data_interface { enum nand_data_interface_type type; - union { - struct nand_sdr_timings sdr; + struct nand_timings { + unsigned int mode; + union { + struct nand_sdr_timings sdr; + }; } timings; }; @@ -694,6 +709,7 @@ struct nand_op_instr { /** * struct nand_subop - a sub operation + * @cs: the CS line to select for this NAND sub-operation * @instrs: array of instructions * @ninstrs: length of the @instrs array * @first_instr_start_off: offset to start from for the first instruction @@ -709,6 +725,7 @@ struct nand_op_instr { * controller driver. */ struct nand_subop { + unsigned int cs; const struct nand_op_instr *instrs; unsigned int ninstrs; unsigned int first_instr_start_off; @@ -1321,13 +1338,17 @@ int nand_read_oob_std(struct nand_chip *chip, int page); int nand_get_set_features_notsupp(struct nand_chip *chip, int addr, u8 *subfeature_param); -/* Default read_page_raw implementation */ +/* read_page_raw implementations */ int nand_read_page_raw(struct nand_chip *chip, uint8_t *buf, int oob_required, int page); +int nand_monolithic_read_page_raw(struct nand_chip *chip, uint8_t *buf, + int oob_required, int page); -/* Default write_page_raw implementation */ +/* write_page_raw implementations */ int nand_write_page_raw(struct nand_chip *chip, const uint8_t *buf, int oob_required, int page); +int nand_monolithic_write_page_raw(struct nand_chip *chip, const uint8_t *buf, + int oob_required, int page); /* Reset and initialize a NAND device */ int nand_reset(struct nand_chip *chip, int chipnr); @@ -1356,7 +1377,7 @@ int nand_change_write_column_op(struct nand_chip *chip, unsigned int offset_in_page, const void *buf, unsigned int len, bool force_8bit); int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len, - bool force_8bit); + bool force_8bit, bool check_only); int nand_write_data_op(struct nand_chip *chip, const void *buf, unsigned int len, bool force_8bit); @@ -1377,8 +1398,6 @@ void nand_wait_ready(struct nand_chip *chip); * sucessful nand_scan(). */ void nand_cleanup(struct nand_chip *chip); -/* Unregister the MTD device and calls nand_cleanup() */ -void nand_release(struct nand_chip *chip); /* * External helper for controller drivers that have to implement the WAITRDY @@ -1393,6 +1412,10 @@ int nand_gpio_waitrdy(struct nand_chip *chip, struct gpio_desc *gpiod, void nand_select_target(struct nand_chip *chip, unsigned int cs); void nand_deselect_target(struct nand_chip *chip); +/* Bitops */ +void nand_extract_bits(u8 *dst, unsigned int dst_off, const u8 *src, + unsigned int src_off, unsigned int nbits); + /** * nand_get_data_buf() - Get the internal page buffer * @chip: NAND chip object diff --git a/include/linux/platform_data/mtd-davinci.h b/include/linux/platform_data/mtd-davinci.h index 08e639e047e5..03e92c71b3fa 100644 --- a/include/linux/platform_data/mtd-davinci.h +++ b/include/linux/platform_data/mtd-davinci.h @@ -68,7 +68,7 @@ struct davinci_nand_pdata { /* platform_data */ * Newer ones also support 4-bit ECC, but are awkward * using it with large page chips. */ - nand_ecc_modes_t ecc_mode; + enum nand_ecc_mode ecc_mode; u8 ecc_bits; /* e.g. NAND_BUSWIDTH_16 */ diff --git a/include/linux/platform_data/mtd-nand-s3c2410.h b/include/linux/platform_data/mtd-nand-s3c2410.h index deb849bcf0ec..08675b16f9e1 100644 --- a/include/linux/platform_data/mtd-nand-s3c2410.h +++ b/include/linux/platform_data/mtd-nand-s3c2410.h @@ -49,7 +49,7 @@ struct s3c2410_platform_nand { unsigned int ignore_unset_ecc:1; - nand_ecc_modes_t ecc_mode; + enum nand_ecc_mode ecc_mode; int nr_sets; struct s3c2410_nand_set *sets; diff --git a/include/uapi/mtd/mtd-abi.h b/include/uapi/mtd/mtd-abi.h index 47ffe3208c27..4b48fbf7d343 100644 --- a/include/uapi/mtd/mtd-abi.h +++ b/include/uapi/mtd/mtd-abi.h @@ -104,6 +104,7 @@ struct mtd_write_req { #define MTD_BIT_WRITEABLE 0x800 /* Single bits can be flipped */ #define MTD_NO_ERASE 0x1000 /* No erase necessary */ #define MTD_POWERUP_LOCK 0x2000 /* Always locked after reset */ +#define MTD_SLC_ON_MLC_EMULATION 0x4000 /* Emulate SLC behavior on MLC NANDs */ /* Some common devices / combinations of capabilities */ #define MTD_CAP_ROM 0 diff --git a/lib/bch.c b/lib/bch.c index 052d3fb753a0..7c031ee8b93b 100644 --- a/lib/bch.c +++ b/lib/bch.c @@ -23,15 +23,15 @@ * This library provides runtime configurable encoding/decoding of binary * Bose-Chaudhuri-Hocquenghem (BCH) codes. * - * Call init_bch to get a pointer to a newly allocated bch_control structure for + * Call bch_init to get a pointer to a newly allocated bch_control structure for * the given m (Galois field order), t (error correction capability) and * (optional) primitive polynomial parameters. * - * Call encode_bch to compute and store ecc parity bytes to a given buffer. - * Call decode_bch to detect and locate errors in received data. + * Call bch_encode to compute and store ecc parity bytes to a given buffer. + * Call bch_decode to detect and locate errors in received data. * * On systems supporting hw BCH features, intermediate results may be provided - * to decode_bch in order to skip certain steps. See decode_bch() documentation + * to bch_decode in order to skip certain steps. See bch_decode() documentation * for details. * * Option CONFIG_BCH_CONST_PARAMS can be used to force fixed values of @@ -114,10 +114,53 @@ struct gf_poly_deg1 { unsigned int c[2]; }; +static u8 swap_bits_table[] = { + 0x00, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, + 0x10, 0x90, 0x50, 0xd0, 0x30, 0xb0, 0x70, 0xf0, + 0x08, 0x88, 0x48, 0xc8, 0x28, 0xa8, 0x68, 0xe8, + 0x18, 0x98, 0x58, 0xd8, 0x38, 0xb8, 0x78, 0xf8, + 0x04, 0x84, 0x44, 0xc4, 0x24, 0xa4, 0x64, 0xe4, + 0x14, 0x94, 0x54, 0xd4, 0x34, 0xb4, 0x74, 0xf4, + 0x0c, 0x8c, 0x4c, 0xcc, 0x2c, 0xac, 0x6c, 0xec, + 0x1c, 0x9c, 0x5c, 0xdc, 0x3c, 0xbc, 0x7c, 0xfc, + 0x02, 0x82, 0x42, 0xc2, 0x22, 0xa2, 0x62, 0xe2, + 0x12, 0x92, 0x52, 0xd2, 0x32, 0xb2, 0x72, 0xf2, + 0x0a, 0x8a, 0x4a, 0xca, 0x2a, 0xaa, 0x6a, 0xea, + 0x1a, 0x9a, 0x5a, 0xda, 0x3a, 0xba, 0x7a, 0xfa, + 0x06, 0x86, 0x46, 0xc6, 0x26, 0xa6, 0x66, 0xe6, + 0x16, 0x96, 0x56, 0xd6, 0x36, 0xb6, 0x76, 0xf6, + 0x0e, 0x8e, 0x4e, 0xce, 0x2e, 0xae, 0x6e, 0xee, + 0x1e, 0x9e, 0x5e, 0xde, 0x3e, 0xbe, 0x7e, 0xfe, + 0x01, 0x81, 0x41, 0xc1, 0x21, 0xa1, 0x61, 0xe1, + 0x11, 0x91, 0x51, 0xd1, 0x31, 0xb1, 0x71, 0xf1, + 0x09, 0x89, 0x49, 0xc9, 0x29, 0xa9, 0x69, 0xe9, + 0x19, 0x99, 0x59, 0xd9, 0x39, 0xb9, 0x79, 0xf9, + 0x05, 0x85, 0x45, 0xc5, 0x25, 0xa5, 0x65, 0xe5, + 0x15, 0x95, 0x55, 0xd5, 0x35, 0xb5, 0x75, 0xf5, + 0x0d, 0x8d, 0x4d, 0xcd, 0x2d, 0xad, 0x6d, 0xed, + 0x1d, 0x9d, 0x5d, 0xdd, 0x3d, 0xbd, 0x7d, 0xfd, + 0x03, 0x83, 0x43, 0xc3, 0x23, 0xa3, 0x63, 0xe3, + 0x13, 0x93, 0x53, 0xd3, 0x33, 0xb3, 0x73, 0xf3, + 0x0b, 0x8b, 0x4b, 0xcb, 0x2b, 0xab, 0x6b, 0xeb, + 0x1b, 0x9b, 0x5b, 0xdb, 0x3b, 0xbb, 0x7b, 0xfb, + 0x07, 0x87, 0x47, 0xc7, 0x27, 0xa7, 0x67, 0xe7, + 0x17, 0x97, 0x57, 0xd7, 0x37, 0xb7, 0x77, 0xf7, + 0x0f, 0x8f, 0x4f, 0xcf, 0x2f, 0xaf, 0x6f, 0xef, + 0x1f, 0x9f, 0x5f, 0xdf, 0x3f, 0xbf, 0x7f, 0xff, +}; + +static u8 swap_bits(struct bch_control *bch, u8 in) +{ + if (!bch->swap_bits) + return in; + + return swap_bits_table[in]; +} + /* - * same as encode_bch(), but process input data one byte at a time + * same as bch_encode(), but process input data one byte at a time */ -static void encode_bch_unaligned(struct bch_control *bch, +static void bch_encode_unaligned(struct bch_control *bch, const unsigned char *data, unsigned int len, uint32_t *ecc) { @@ -126,7 +169,9 @@ static void encode_bch_unaligned(struct bch_control *bch, const int l = BCH_ECC_WORDS(bch)-1; while (len--) { - p = bch->mod8_tab + (l+1)*(((ecc[0] >> 24)^(*data++)) & 0xff); + u8 tmp = swap_bits(bch, *data++); + + p = bch->mod8_tab + (l+1)*(((ecc[0] >> 24)^(tmp)) & 0xff); for (i = 0; i < l; i++) ecc[i] = ((ecc[i] << 8)|(ecc[i+1] >> 24))^(*p++); @@ -145,10 +190,16 @@ static void load_ecc8(struct bch_control *bch, uint32_t *dst, unsigned int i, nwords = BCH_ECC_WORDS(bch)-1; for (i = 0; i < nwords; i++, src += 4) - dst[i] = (src[0] << 24)|(src[1] << 16)|(src[2] << 8)|src[3]; + dst[i] = ((u32)swap_bits(bch, src[0]) << 24) | + ((u32)swap_bits(bch, src[1]) << 16) | + ((u32)swap_bits(bch, src[2]) << 8) | + swap_bits(bch, src[3]); memcpy(pad, src, BCH_ECC_BYTES(bch)-4*nwords); - dst[nwords] = (pad[0] << 24)|(pad[1] << 16)|(pad[2] << 8)|pad[3]; + dst[nwords] = ((u32)swap_bits(bch, pad[0]) << 24) | + ((u32)swap_bits(bch, pad[1]) << 16) | + ((u32)swap_bits(bch, pad[2]) << 8) | + swap_bits(bch, pad[3]); } /* @@ -161,20 +212,20 @@ static void store_ecc8(struct bch_control *bch, uint8_t *dst, unsigned int i, nwords = BCH_ECC_WORDS(bch)-1; for (i = 0; i < nwords; i++) { - *dst++ = (src[i] >> 24); - *dst++ = (src[i] >> 16) & 0xff; - *dst++ = (src[i] >> 8) & 0xff; - *dst++ = (src[i] >> 0) & 0xff; + *dst++ = swap_bits(bch, src[i] >> 24); + *dst++ = swap_bits(bch, src[i] >> 16); + *dst++ = swap_bits(bch, src[i] >> 8); + *dst++ = swap_bits(bch, src[i]); } - pad[0] = (src[nwords] >> 24); - pad[1] = (src[nwords] >> 16) & 0xff; - pad[2] = (src[nwords] >> 8) & 0xff; - pad[3] = (src[nwords] >> 0) & 0xff; + pad[0] = swap_bits(bch, src[nwords] >> 24); + pad[1] = swap_bits(bch, src[nwords] >> 16); + pad[2] = swap_bits(bch, src[nwords] >> 8); + pad[3] = swap_bits(bch, src[nwords]); memcpy(dst, pad, BCH_ECC_BYTES(bch)-4*nwords); } /** - * encode_bch - calculate BCH ecc parity of data + * bch_encode - calculate BCH ecc parity of data * @bch: BCH control structure * @data: data to encode * @len: data length in bytes @@ -187,7 +238,7 @@ static void store_ecc8(struct bch_control *bch, uint8_t *dst, * The exact number of computed ecc parity bits is given by member @ecc_bits of * @bch; it may be less than m*t for large values of t. */ -void encode_bch(struct bch_control *bch, const uint8_t *data, +void bch_encode(struct bch_control *bch, const uint8_t *data, unsigned int len, uint8_t *ecc) { const unsigned int l = BCH_ECC_WORDS(bch)-1; @@ -215,7 +266,7 @@ void encode_bch(struct bch_control *bch, const uint8_t *data, m = ((unsigned long)data) & 3; if (m) { mlen = (len < (4-m)) ? len : 4-m; - encode_bch_unaligned(bch, data, mlen, bch->ecc_buf); + bch_encode_unaligned(bch, data, mlen, bch->ecc_buf); data += mlen; len -= mlen; } @@ -240,7 +291,13 @@ void encode_bch(struct bch_control *bch, const uint8_t *data, */ while (mlen--) { /* input data is read in big-endian format */ - w = r[0]^cpu_to_be32(*pdata++); + w = cpu_to_be32(*pdata++); + if (bch->swap_bits) + w = (u32)swap_bits(bch, w) | + ((u32)swap_bits(bch, w >> 8) << 8) | + ((u32)swap_bits(bch, w >> 16) << 16) | + ((u32)swap_bits(bch, w >> 24) << 24); + w ^= r[0]; p0 = tab0 + (l+1)*((w >> 0) & 0xff); p1 = tab1 + (l+1)*((w >> 8) & 0xff); p2 = tab2 + (l+1)*((w >> 16) & 0xff); @@ -255,13 +312,13 @@ void encode_bch(struct bch_control *bch, const uint8_t *data, /* process last unaligned bytes */ if (len) - encode_bch_unaligned(bch, data, len, bch->ecc_buf); + bch_encode_unaligned(bch, data, len, bch->ecc_buf); /* store ecc parity bytes into original parity buffer */ if (ecc) store_ecc8(bch, ecc, bch->ecc_buf); } -EXPORT_SYMBOL_GPL(encode_bch); +EXPORT_SYMBOL_GPL(bch_encode); static inline int modulo(struct bch_control *bch, unsigned int v) { @@ -952,7 +1009,7 @@ static int chien_search(struct bch_control *bch, unsigned int len, #endif /* USE_CHIEN_SEARCH */ /** - * decode_bch - decode received codeword and find bit error locations + * bch_decode - decode received codeword and find bit error locations * @bch: BCH control structure * @data: received data, ignored if @calc_ecc is provided * @len: data length in bytes, must always be provided @@ -966,22 +1023,22 @@ static int chien_search(struct bch_control *bch, unsigned int len, * invalid parameters were provided * * Depending on the available hw BCH support and the need to compute @calc_ecc - * separately (using encode_bch()), this function should be called with one of + * separately (using bch_encode()), this function should be called with one of * the following parameter configurations - * * by providing @data and @recv_ecc only: - * decode_bch(@bch, @data, @len, @recv_ecc, NULL, NULL, @errloc) + * bch_decode(@bch, @data, @len, @recv_ecc, NULL, NULL, @errloc) * * by providing @recv_ecc and @calc_ecc: - * decode_bch(@bch, NULL, @len, @recv_ecc, @calc_ecc, NULL, @errloc) + * bch_decode(@bch, NULL, @len, @recv_ecc, @calc_ecc, NULL, @errloc) * * by providing ecc = recv_ecc XOR calc_ecc: - * decode_bch(@bch, NULL, @len, NULL, ecc, NULL, @errloc) + * bch_decode(@bch, NULL, @len, NULL, ecc, NULL, @errloc) * * by providing syndrome results @syn: - * decode_bch(@bch, NULL, @len, NULL, NULL, @syn, @errloc) + * bch_decode(@bch, NULL, @len, NULL, NULL, @syn, @errloc) * - * Once decode_bch() has successfully returned with a positive value, error + * Once bch_decode() has successfully returned with a positive value, error * locations returned in array @errloc should be interpreted as follows - * * if (errloc[n] >= 8*len), then n-th error is located in ecc (no need for @@ -993,7 +1050,7 @@ static int chien_search(struct bch_control *bch, unsigned int len, * Note that this function does not perform any data correction by itself, it * merely indicates error locations. */ -int decode_bch(struct bch_control *bch, const uint8_t *data, unsigned int len, +int bch_decode(struct bch_control *bch, const uint8_t *data, unsigned int len, const uint8_t *recv_ecc, const uint8_t *calc_ecc, const unsigned int *syn, unsigned int *errloc) { @@ -1012,7 +1069,7 @@ int decode_bch(struct bch_control *bch, const uint8_t *data, unsigned int len, /* compute received data ecc into an internal buffer */ if (!data || !recv_ecc) return -EINVAL; - encode_bch(bch, data, len, NULL); + bch_encode(bch, data, len, NULL); } else { /* load provided calculated ecc */ load_ecc8(bch, bch->ecc_buf, calc_ecc); @@ -1048,12 +1105,14 @@ int decode_bch(struct bch_control *bch, const uint8_t *data, unsigned int len, break; } errloc[i] = nbits-1-errloc[i]; - errloc[i] = (errloc[i] & ~7)|(7-(errloc[i] & 7)); + if (!bch->swap_bits) + errloc[i] = (errloc[i] & ~7) | + (7-(errloc[i] & 7)); } } return (err >= 0) ? err : -EBADMSG; } -EXPORT_SYMBOL_GPL(decode_bch); +EXPORT_SYMBOL_GPL(bch_decode); /* * generate Galois field lookup tables @@ -1236,27 +1295,29 @@ static uint32_t *compute_generator_polynomial(struct bch_control *bch) } /** - * init_bch - initialize a BCH encoder/decoder + * bch_init - initialize a BCH encoder/decoder * @m: Galois field order, should be in the range 5-15 * @t: maximum error correction capability, in bits * @prim_poly: user-provided primitive polynomial (or 0 to use default) + * @swap_bits: swap bits within data and syndrome bytes * * Returns: * a newly allocated BCH control structure if successful, NULL otherwise * * This initialization can take some time, as lookup tables are built for fast * encoding/decoding; make sure not to call this function from a time critical - * path. Usually, init_bch() should be called on module/driver init and - * free_bch() should be called to release memory on exit. + * path. Usually, bch_init() should be called on module/driver init and + * bch_free() should be called to release memory on exit. * * You may provide your own primitive polynomial of degree @m in argument - * @prim_poly, or let init_bch() use its default polynomial. + * @prim_poly, or let bch_init() use its default polynomial. * - * Once init_bch() has successfully returned a pointer to a newly allocated + * Once bch_init() has successfully returned a pointer to a newly allocated * BCH control structure, ecc length in bytes is given by member @ecc_bytes of * the structure. */ -struct bch_control *init_bch(int m, int t, unsigned int prim_poly) +struct bch_control *bch_init(int m, int t, unsigned int prim_poly, + bool swap_bits) { int err = 0; unsigned int i, words; @@ -1321,6 +1382,7 @@ struct bch_control *init_bch(int m, int t, unsigned int prim_poly) bch->syn = bch_alloc(2*t*sizeof(*bch->syn), &err); bch->cache = bch_alloc(2*t*sizeof(*bch->cache), &err); bch->elp = bch_alloc((t+1)*sizeof(struct gf_poly_deg1), &err); + bch->swap_bits = swap_bits; for (i = 0; i < ARRAY_SIZE(bch->poly_2t); i++) bch->poly_2t[i] = bch_alloc(GF_POLY_SZ(2*t), &err); @@ -1347,16 +1409,16 @@ struct bch_control *init_bch(int m, int t, unsigned int prim_poly) return bch; fail: - free_bch(bch); + bch_free(bch); return NULL; } -EXPORT_SYMBOL_GPL(init_bch); +EXPORT_SYMBOL_GPL(bch_init); /** - * free_bch - free the BCH control structure + * bch_free - free the BCH control structure * @bch: BCH control structure to release */ -void free_bch(struct bch_control *bch) +void bch_free(struct bch_control *bch) { unsigned int i; @@ -1377,7 +1439,7 @@ void free_bch(struct bch_control *bch) kfree(bch); } } -EXPORT_SYMBOL_GPL(free_bch); +EXPORT_SYMBOL_GPL(bch_free); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Ivan Djelic ");