A big core MTD change is the introduction of a new class to always

register a master device. This is a problem that has been there forever:
 the "master" device was not always present depending on a number of
 heuristics such as the presence of fixed partitions and the absence of a
 Kconfig symbol to force its presence. This was a problem for runtime PM
 operations which might not have the "master" device available in all
 situation.
 
 The SPI NAND subsystem has seen the introduction of DTR operations (the
 equivalent of DDR transfers), which involved quite a few preparation
 patches for clarifying macro names.
 
 In the raw NAND subsystem, the brcmnand driver has been "fixed" for old
 legacy SoCs with an update of the ->exec_op() hook, there has been the
 introduction of a new controller driver named Loongson-1, and the
 Qualcomm driver has received quite a few misc fixes as well as a new
 compatible.
 
 Finally, Macornix SPI NOR entries have been cleaned-up and some SFDP
 table fixups for Macronix MX25L3255E have been merged.
 
 Aside from this, there is the usual load of misc improvement, fixes,
 and yaml conversion.
 -----BEGIN PGP SIGNATURE-----
 
 iQEzBAABCAAdFiEE9HuaYnbmDhq/XIDIJWrqGEe9VoQFAmg91NEACgkQJWrqGEe9
 VoR8egf+JPTtykQ+Bip9bHStgfpxi0+OKL81PUuPBsbs5C546u5t6ZRhoPsU+KqL
 ZTyUk3ys7HcSwZCuiS9V6ea6TNV1FvAVZm+ONpkuPnU60++z8YfVr8ZvtgT0IUXx
 COi+5ksdsTFZMyRG+VybRKnYNrTYB7j7CqVL8ifxq1XYWnYdhjy5PH4DgAKnBhGL
 523kN2eR85AhvJ4aulsKbcqQQYKvJIQbhjAKklA3yQVarDmoOdiSmndoZlRy+MDu
 B9DaRMvRRa/g1nI0stvR9K7FtdSrMYTg4kowR9jPRx0zEZXFFsZbASgcC+3G7HdK
 7rLFWrN+G6fitcEjeAnfyV+sKF48LA==
 =tW1t
 -----END PGP SIGNATURE-----

Merge tag 'mtd/for-6.16' of git://git.kernel.org/pub/scm/linux/kernel/git/mtd/linux

Pull MTD updates from Miquel Raynal:
 "A big core MTD change is the introduction of a new class to always
  register a master device. This is a problem that has been there
  forever: the "master" device was not always present depending on a
  number of heuristics such as the presence of fixed partitions and the
  absence of a Kconfig symbol to force its presence. This was a problem
  for runtime PM operations which might not have the "master" device
  available in all situation.

  The SPI NAND subsystem has seen the introduction of DTR operations
  (the equivalent of DDR transfers), which involved quite a few
  preparation patches for clarifying macro names.

  In the raw NAND subsystem, the brcmnand driver has been "fixed" for
  old legacy SoCs with an update of the ->exec_op() hook, there has been
  the introduction of a new controller driver named Loongson-1, and the
  Qualcomm driver has received quite a few misc fixes as well as a new
  compatible.

  Finally, Macornix SPI NOR entries have been cleaned-up and some SFDP
  table fixups for Macronix MX25L3255E have been merged.

  Aside from this, there is the usual load of misc improvement, fixes,
  and yaml conversion"

* tag 'mtd/for-6.16' of git://git.kernel.org/pub/scm/linux/kernel/git/mtd/linux: (42 commits)
  mtd: rawnand: brcmnand: legacy exec_op implementation
  mtd: rawnand: sunxi: Add randomizer configuration in sunxi_nfc_hw_ecc_write_chunk
  mtd: nand: brcmnand: fix NAND timeout when accessing eMMC
  mtd: nand: sunxi: Add randomizer configuration before randomizer enable
  mtd: spinand: esmt: fix id code for F50D1G41LB
  mtd: rawnand: brcmnand: remove unused parameters
  mtd: core: always create master device
  mtd: rawnand: loongson1: Fix inconsistent refcounting in ls1x_nand_chip_init()
  mtd: rawnand: loongson1: Fix error code in ls1x_nand_dma_transfer()
  mtd: rawnand: qcom: Fix read len for onfi param page
  mtd: rawnand: qcom: Fix last codeword read in qcom_param_page_type_exec()
  mtd: rawnand: qcom: Pass 18 bit offset from NANDc base to BAM base
  dt-bindings: mtd: qcom,nandc: Document the SDX75 NAND controller
  mtd: bcm47xxnflash: Add error handling for bcm47xxnflash_ops_bcm4706_ctl_cmd()
  mtd: rawnand: Use non-hybrid PCI devres API
  mtd: nand: ecc-mxic: Fix use of uninitialized variable ret
  mtd: spinand: winbond: Add support for W35N02JW and W35N04JW chips
  mtd: spinand: winbond: Add octal support
  mtd: spinand: winbond: Add support for W35N01JW in single mode
  mtd: spinand: winbond: Rename DTR variants
  ...
This commit is contained in:
Linus Torvalds 2025-06-02 11:08:17 -07:00
commit 4c3b7df784
38 changed files with 1774 additions and 414 deletions

View File

@ -0,0 +1,89 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/mtd/fsl,vf610-nfc.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Freescale's NAND flash controller (NFC)
description:
This variant of the Freescale NAND flash controller (NFC) can be found on
Vybrid (vf610), MPC5125, MCF54418 and Kinetis K70.
maintainers:
- Frank Li <Frank.Li@nxp.com>
properties:
compatible:
enum:
- fsl,vf610-nfc
reg:
maxItems: 1
interrupts:
maxItems: 1
clocks:
maxItems: 1
clock-names:
items:
- const: nfc
patternProperties:
"^nand@[a-f0-9]$":
type: object
$ref: raw-nand-chip.yaml
properties:
compatible:
const: fsl,vf610-nfc-nandcs
reg:
const: 0
nand-ecc-strength:
enum: [24, 32]
nand-ecc-step-size:
const: 2048
unevaluatedProperties: false
required:
- compatible
- reg
- interrupts
allOf:
- $ref: nand-controller.yaml
unevaluatedProperties: false
examples:
- |
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/clock/vf610-clock.h>
nand-controller@400e0000 {
compatible = "fsl,vf610-nfc";
reg = <0x400e0000 0x4000>;
#address-cells = <1>;
#size-cells = <0>;
interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks VF610_CLK_NFC>;
clock-names = "nfc";
assigned-clocks = <&clks VF610_CLK_NFC>;
assigned-clock-rates = <33000000>;
nand@0 {
compatible = "fsl,vf610-nfc-nandcs";
reg = <0>;
nand-bus-width = <8>;
nand-ecc-mode = "hw";
nand-ecc-strength = <32>;
nand-ecc-step-size = <2048>;
nand-on-flash-bbt;
};
};

View File

@ -0,0 +1,72 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/mtd/loongson,ls1b-nand-controller.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Loongson-1 NAND Controller
maintainers:
- Keguang Zhang <keguang.zhang@gmail.com>
description:
The Loongson-1 NAND controller abstracts all supported operations,
meaning it does not support low-level access to raw NAND flash chips.
Moreover, the controller is paired with the DMA engine to perform
READ and PROGRAM functions.
allOf:
- $ref: nand-controller.yaml
properties:
compatible:
oneOf:
- enum:
- loongson,ls1b-nand-controller
- loongson,ls1c-nand-controller
- items:
- enum:
- loongson,ls1a-nand-controller
- const: loongson,ls1b-nand-controller
reg:
maxItems: 2
reg-names:
items:
- const: nand
- const: nand-dma
dmas:
maxItems: 1
dma-names:
const: rxtx
required:
- compatible
- reg
- reg-names
- dmas
- dma-names
unevaluatedProperties: false
examples:
- |
nand-controller@1fe78000 {
compatible = "loongson,ls1b-nand-controller";
reg = <0x1fe78000 0x24>, <0x1fe78040 0x4>;
reg-names = "nand", "nand-dma";
dmas = <&dma 0>;
dma-names = "rxtx";
#address-cells = <1>;
#size-cells = <0>;
nand@0 {
reg = <0>;
label = "ls1x-nand";
nand-use-soft-ecc-engine;
nand-ecc-algo = "hamming";
};
};

View File

@ -11,12 +11,18 @@ maintainers:
properties:
compatible:
enum:
- qcom,ipq806x-nand
- qcom,ipq4019-nand
- qcom,ipq6018-nand
- qcom,ipq8074-nand
- qcom,sdx55-nand
oneOf:
- items:
- enum:
- qcom,sdx75-nand
- const: qcom,sdx55-nand
- items:
- enum:
- qcom,ipq806x-nand
- qcom,ipq4019-nand
- qcom,ipq6018-nand
- qcom,ipq8074-nand
- qcom,sdx55-nand
reg:
maxItems: 1
@ -95,6 +101,18 @@ allOf:
items:
- const: rxtx
- if:
properties:
compatible:
contains:
enum:
- qcom,sdx75-nand
then:
properties:
iommus:
maxItems: 1
- if:
properties:
compatible:

View File

@ -1,59 +0,0 @@
Freescale's NAND flash controller (NFC)
This variant of the Freescale NAND flash controller (NFC) can be found on
Vybrid (vf610), MPC5125, MCF54418 and Kinetis K70.
Required properties:
- compatible: Should be set to "fsl,vf610-nfc".
- reg: address range of the NFC.
- interrupts: interrupt of the NFC.
- #address-cells: shall be set to 1. Encode the nand CS.
- #size-cells : shall be set to 0.
- assigned-clocks: main clock from the SoC, for Vybrid <&clks VF610_CLK_NFC>;
- assigned-clock-rates: The NAND bus timing is derived from this clock
rate and should not exceed maximum timing for any NAND memory chip
in a board stuffing. Typical NAND memory timings derived from this
clock are found in the SoC hardware reference manual. Furthermore,
there might be restrictions on maximum rates when using hardware ECC.
- #address-cells, #size-cells : Must be present if the device has sub-nodes
representing partitions.
Required children nodes:
Children nodes represent the available nand chips. Currently the driver can
only handle one NAND chip.
Required properties:
- compatible: Should be set to "fsl,vf610-nfc-cs".
- nand-bus-width: see nand-controller.yaml
- nand-ecc-mode: see nand-controller.yaml
Required properties for hardware ECC:
- nand-ecc-strength: supported strengths are 24 and 32 bit (see nand-controller.yaml)
- nand-ecc-step-size: step size equals page size, currently only 2k pages are
supported
- nand-on-flash-bbt: see nand-controller.yaml
Example:
nfc: nand@400e0000 {
compatible = "fsl,vf610-nfc";
#address-cells = <1>;
#size-cells = <0>;
reg = <0x400e0000 0x4000>;
interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks VF610_CLK_NFC>;
clock-names = "nfc";
assigned-clocks = <&clks VF610_CLK_NFC>;
assigned-clock-rates = <33000000>;
nand@0 {
compatible = "fsl,vf610-nfc-nandcs";
reg = <0>;
nand-bus-width = <8>;
nand-ecc-mode = "hw";
nand-ecc-strength = <32>;
nand-ecc-step-size = <2048>;
nand-on-flash-bbt;
};
};

View File

@ -16589,6 +16589,7 @@ F: Documentation/devicetree/bindings/*/loongson,ls1*.yaml
F: arch/mips/include/asm/mach-loongson32/
F: arch/mips/loongson32/
F: drivers/*/*loongson1*
F: drivers/mtd/nand/raw/loongson1-nand-controller.c
F: drivers/net/ethernet/stmicro/stmmac/dwmac-loongson1.c
F: sound/soc/loongson/loongson1_ac97.c

View File

@ -98,7 +98,7 @@ config MTD_MCHP48L640
config MTD_SPEAR_SMI
tristate "SPEAR MTD NOR Support through SMI controller"
depends on PLAT_SPEAR || COMPILE_TEST
default y
default PLAT_SPEAR
help
This enable SNOR support on SPEAR platforms using SMI controller

View File

@ -559,7 +559,7 @@ static int mtdchar_blkpg_ioctl(struct mtd_info *mtd,
/* Sanitize user input */
p.devname[BLKPG_DEVNAMELTH - 1] = '\0';
return mtd_add_partition(mtd, p.devname, p.start, p.length);
return mtd_add_partition(mtd, p.devname, p.start, p.length, NULL);
case BLKPG_DEL_PARTITION:

View File

@ -68,7 +68,13 @@ static struct class mtd_class = {
.pm = MTD_CLS_PM_OPS,
};
static struct class mtd_master_class = {
.name = "mtd_master",
.pm = MTD_CLS_PM_OPS,
};
static DEFINE_IDR(mtd_idr);
static DEFINE_IDR(mtd_master_idr);
/* These are exported solely for the purpose of mtd_blkdevs.c. You
should not use them for _anything_ else */
@ -83,8 +89,9 @@ EXPORT_SYMBOL_GPL(__mtd_next_device);
static LIST_HEAD(mtd_notifiers);
#define MTD_MASTER_DEVS 255
#define MTD_DEVT(index) MKDEV(MTD_CHAR_MAJOR, (index)*2)
static dev_t mtd_master_devt;
/* REVISIT once MTD uses the driver model better, whoever allocates
* the mtd_info will probably want to use the release() hook...
@ -104,6 +111,17 @@ static void mtd_release(struct device *dev)
device_destroy(&mtd_class, index + 1);
}
static void mtd_master_release(struct device *dev)
{
struct mtd_info *mtd = dev_get_drvdata(dev);
idr_remove(&mtd_master_idr, mtd->index);
of_node_put(mtd_get_of_node(mtd));
if (mtd_is_partition(mtd))
release_mtd_partition(mtd);
}
static void mtd_device_release(struct kref *kref)
{
struct mtd_info *mtd = container_of(kref, struct mtd_info, refcnt);
@ -367,6 +385,11 @@ static const struct device_type mtd_devtype = {
.release = mtd_release,
};
static const struct device_type mtd_master_devtype = {
.name = "mtd_master",
.release = mtd_master_release,
};
static bool mtd_expert_analysis_mode;
#ifdef CONFIG_DEBUG_FS
@ -634,13 +657,13 @@ exit_parent:
/**
* add_mtd_device - register an MTD device
* @mtd: pointer to new MTD device info structure
* @partitioned: create partitioned device
*
* Add a device to the list of MTD devices present in the system, and
* notify each currently active MTD 'user' of its arrival. Returns
* zero on success or non-zero on failure.
*/
int add_mtd_device(struct mtd_info *mtd)
int add_mtd_device(struct mtd_info *mtd, bool partitioned)
{
struct device_node *np = mtd_get_of_node(mtd);
struct mtd_info *master = mtd_get_master(mtd);
@ -687,10 +710,17 @@ int add_mtd_device(struct mtd_info *mtd)
ofidx = -1;
if (np)
ofidx = of_alias_get_id(np, "mtd");
if (ofidx >= 0)
i = idr_alloc(&mtd_idr, mtd, ofidx, ofidx + 1, GFP_KERNEL);
else
i = idr_alloc(&mtd_idr, mtd, 0, 0, GFP_KERNEL);
if (partitioned) {
if (ofidx >= 0)
i = idr_alloc(&mtd_idr, mtd, ofidx, ofidx + 1, GFP_KERNEL);
else
i = idr_alloc(&mtd_idr, mtd, 0, 0, GFP_KERNEL);
} else {
if (ofidx >= 0)
i = idr_alloc(&mtd_master_idr, mtd, ofidx, ofidx + 1, GFP_KERNEL);
else
i = idr_alloc(&mtd_master_idr, mtd, 0, 0, GFP_KERNEL);
}
if (i < 0) {
error = i;
goto fail_locked;
@ -738,10 +768,18 @@ int add_mtd_device(struct mtd_info *mtd)
/* Caller should have set dev.parent to match the
* physical device, if appropriate.
*/
mtd->dev.type = &mtd_devtype;
mtd->dev.class = &mtd_class;
mtd->dev.devt = MTD_DEVT(i);
error = dev_set_name(&mtd->dev, "mtd%d", i);
if (partitioned) {
mtd->dev.type = &mtd_devtype;
mtd->dev.class = &mtd_class;
mtd->dev.devt = MTD_DEVT(i);
dev_set_name(&mtd->dev, "mtd%d", i);
error = dev_set_name(&mtd->dev, "mtd%d", i);
} else {
mtd->dev.type = &mtd_master_devtype;
mtd->dev.class = &mtd_master_class;
mtd->dev.devt = MKDEV(MAJOR(mtd_master_devt), i);
error = dev_set_name(&mtd->dev, "mtd_master%d", i);
}
if (error)
goto fail_devname;
dev_set_drvdata(&mtd->dev, mtd);
@ -749,6 +787,7 @@ int add_mtd_device(struct mtd_info *mtd)
of_node_get(mtd_get_of_node(mtd));
error = device_register(&mtd->dev);
if (error) {
pr_err("mtd: %s device_register fail %d\n", mtd->name, error);
put_device(&mtd->dev);
goto fail_added;
}
@ -760,10 +799,13 @@ int add_mtd_device(struct mtd_info *mtd)
mtd_debugfs_populate(mtd);
device_create(&mtd_class, mtd->dev.parent, MTD_DEVT(i) + 1, NULL,
"mtd%dro", i);
if (partitioned) {
device_create(&mtd_class, mtd->dev.parent, MTD_DEVT(i) + 1, NULL,
"mtd%dro", i);
}
pr_debug("mtd: Giving out device %d to %s\n", i, mtd->name);
pr_debug("mtd: Giving out %spartitioned device %d to %s\n",
partitioned ? "" : "un-", i, mtd->name);
/* No need to get a refcount on the module containing
the notifier, since we hold the mtd_table_mutex */
list_for_each_entry(not, &mtd_notifiers, list)
@ -771,13 +813,16 @@ int add_mtd_device(struct mtd_info *mtd)
mutex_unlock(&mtd_table_mutex);
if (of_property_read_bool(mtd_get_of_node(mtd), "linux,rootfs")) {
if (IS_BUILTIN(CONFIG_MTD)) {
pr_info("mtd: setting mtd%d (%s) as root device\n", mtd->index, mtd->name);
ROOT_DEV = MKDEV(MTD_BLOCK_MAJOR, mtd->index);
} else {
pr_warn("mtd: can't set mtd%d (%s) as root device - mtd must be builtin\n",
mtd->index, mtd->name);
if (partitioned) {
if (of_property_read_bool(mtd_get_of_node(mtd), "linux,rootfs")) {
if (IS_BUILTIN(CONFIG_MTD)) {
pr_info("mtd: setting mtd%d (%s) as root device\n",
mtd->index, mtd->name);
ROOT_DEV = MKDEV(MTD_BLOCK_MAJOR, mtd->index);
} else {
pr_warn("mtd: can't set mtd%d (%s) as root device - mtd must be builtin\n",
mtd->index, mtd->name);
}
}
}
@ -793,7 +838,10 @@ fail_nvmem_add:
fail_added:
of_node_put(mtd_get_of_node(mtd));
fail_devname:
idr_remove(&mtd_idr, i);
if (partitioned)
idr_remove(&mtd_idr, i);
else
idr_remove(&mtd_master_idr, i);
fail_locked:
mutex_unlock(&mtd_table_mutex);
return error;
@ -811,12 +859,14 @@ fail_locked:
int del_mtd_device(struct mtd_info *mtd)
{
int ret;
struct mtd_notifier *not;
struct idr *idr;
int ret;
mutex_lock(&mtd_table_mutex);
if (idr_find(&mtd_idr, mtd->index) != mtd) {
idr = mtd->dev.class == &mtd_class ? &mtd_idr : &mtd_master_idr;
if (idr_find(idr, mtd->index) != mtd) {
ret = -ENODEV;
goto out_error;
}
@ -1056,6 +1106,7 @@ int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
const struct mtd_partition *parts,
int nr_parts)
{
struct mtd_info *parent;
int ret, err;
mtd_set_dev_defaults(mtd);
@ -1064,25 +1115,30 @@ int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
if (ret)
goto out;
ret = add_mtd_device(mtd, false);
if (ret)
goto out;
if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER)) {
ret = add_mtd_device(mtd);
ret = mtd_add_partition(mtd, mtd->name, 0, MTDPART_SIZ_FULL, &parent);
if (ret)
goto out;
} else {
parent = mtd;
}
/* Prefer parsed partitions over driver-provided fallback */
ret = parse_mtd_partitions(mtd, types, parser_data);
ret = parse_mtd_partitions(parent, types, parser_data);
if (ret == -EPROBE_DEFER)
goto out;
if (ret > 0)
ret = 0;
else if (nr_parts)
ret = add_mtd_partitions(mtd, parts, nr_parts);
else if (!device_is_registered(&mtd->dev))
ret = add_mtd_device(mtd);
else
ret = 0;
ret = add_mtd_partitions(parent, parts, nr_parts);
else if (!IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER))
ret = mtd_add_partition(parent, mtd->name, 0, MTDPART_SIZ_FULL, NULL);
if (ret)
goto out;
@ -1102,13 +1158,14 @@ int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
register_reboot_notifier(&mtd->reboot_notifier);
}
return 0;
out:
if (ret) {
nvmem_unregister(mtd->otp_user_nvmem);
nvmem_unregister(mtd->otp_factory_nvmem);
}
nvmem_unregister(mtd->otp_user_nvmem);
nvmem_unregister(mtd->otp_factory_nvmem);
if (ret && device_is_registered(&mtd->dev)) {
del_mtd_partitions(mtd);
if (device_is_registered(&mtd->dev)) {
err = del_mtd_device(mtd);
if (err)
pr_err("Error when deleting MTD device (%d)\n", err);
@ -1267,8 +1324,7 @@ int __get_mtd_device(struct mtd_info *mtd)
mtd = mtd->parent;
}
if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER))
kref_get(&master->refcnt);
kref_get(&master->refcnt);
return 0;
}
@ -1362,8 +1418,7 @@ void __put_mtd_device(struct mtd_info *mtd)
mtd = parent;
}
if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER))
kref_put(&master->refcnt, mtd_device_release);
kref_put(&master->refcnt, mtd_device_release);
module_put(master->owner);
@ -2530,6 +2585,16 @@ static int __init init_mtd(void)
if (ret)
goto err_reg;
ret = class_register(&mtd_master_class);
if (ret)
goto err_reg2;
ret = alloc_chrdev_region(&mtd_master_devt, 0, MTD_MASTER_DEVS, "mtd_master");
if (ret < 0) {
pr_err("unable to allocate char dev region\n");
goto err_chrdev;
}
mtd_bdi = mtd_bdi_init("mtd");
if (IS_ERR(mtd_bdi)) {
ret = PTR_ERR(mtd_bdi);
@ -2554,6 +2619,10 @@ out_procfs:
bdi_unregister(mtd_bdi);
bdi_put(mtd_bdi);
err_bdi:
unregister_chrdev_region(mtd_master_devt, MTD_MASTER_DEVS);
err_chrdev:
class_unregister(&mtd_master_class);
err_reg2:
class_unregister(&mtd_class);
err_reg:
pr_err("Error registering mtd class or bdi: %d\n", ret);
@ -2567,9 +2636,12 @@ static void __exit cleanup_mtd(void)
if (proc_mtd)
remove_proc_entry("mtd", NULL);
class_unregister(&mtd_class);
class_unregister(&mtd_master_class);
unregister_chrdev_region(mtd_master_devt, MTD_MASTER_DEVS);
bdi_unregister(mtd_bdi);
bdi_put(mtd_bdi);
idr_destroy(&mtd_idr);
idr_destroy(&mtd_master_idr);
}
module_init(init_mtd);

View File

@ -8,7 +8,7 @@ extern struct mutex mtd_table_mutex;
extern struct backing_dev_info *mtd_bdi;
struct mtd_info *__mtd_next_device(int i);
int __must_check add_mtd_device(struct mtd_info *mtd);
int __must_check add_mtd_device(struct mtd_info *mtd, bool partitioned);
int del_mtd_device(struct mtd_info *mtd);
int add_mtd_partitions(struct mtd_info *, const struct mtd_partition *, int);
int del_mtd_partitions(struct mtd_info *);

View File

@ -86,8 +86,7 @@ static struct mtd_info *allocate_partition(struct mtd_info *parent,
* parent conditional on that option. Note, this is a way to
* distinguish between the parent and its partitions in sysfs.
*/
child->dev.parent = IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER) || mtd_is_partition(parent) ?
&parent->dev : parent->dev.parent;
child->dev.parent = &parent->dev;
child->dev.of_node = part->of_node;
child->parent = parent;
child->part.offset = part->offset;
@ -243,7 +242,7 @@ static int mtd_add_partition_attrs(struct mtd_info *new)
}
int mtd_add_partition(struct mtd_info *parent, const char *name,
long long offset, long long length)
long long offset, long long length, struct mtd_info **out)
{
struct mtd_info *master = mtd_get_master(parent);
u64 parent_size = mtd_is_partition(parent) ?
@ -276,12 +275,15 @@ int mtd_add_partition(struct mtd_info *parent, const char *name,
list_add_tail(&child->part.node, &parent->partitions);
mutex_unlock(&master->master.partitions_lock);
ret = add_mtd_device(child);
ret = add_mtd_device(child, true);
if (ret)
goto err_remove_part;
mtd_add_partition_attrs(child);
if (out)
*out = child;
return 0;
err_remove_part:
@ -413,7 +415,7 @@ int add_mtd_partitions(struct mtd_info *parent,
list_add_tail(&child->part.node, &parent->partitions);
mutex_unlock(&master->master.partitions_lock);
ret = add_mtd_device(child);
ret = add_mtd_device(child, true);
if (ret) {
mutex_lock(&master->master.partitions_lock);
list_del(&child->part.node);
@ -590,9 +592,6 @@ static int mtd_part_of_parse(struct mtd_info *master,
int ret, err = 0;
dev = &master->dev;
/* Use parent device (controller) if the top level MTD is not registered */
if (!IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER) && !mtd_is_partition(master))
dev = master->dev.parent;
np = mtd_get_of_node(master);
if (mtd_is_partition(master))
@ -711,6 +710,7 @@ int parse_mtd_partitions(struct mtd_info *master, const char *const *types,
if (ret < 0 && !err)
err = ret;
}
return err;
}

View File

@ -614,7 +614,7 @@ static int mxic_ecc_finish_io_req_external(struct nand_device *nand,
{
struct mxic_ecc_engine *mxic = nand_to_mxic(nand);
struct mxic_ecc_ctx *ctx = nand_to_ecc_ctx(nand);
int nents, step, ret;
int nents, step, ret = 0;
if (req->mode == MTD_OPS_RAW)
return 0;

View File

@ -236,21 +236,21 @@ int qcom_prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
int i, ret;
struct bam_cmd_element *bam_ce_buffer;
struct bam_transaction *bam_txn = nandc->bam_txn;
u32 offset;
bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_pos];
/* fill the command desc */
for (i = 0; i < size; i++) {
offset = nandc->props->bam_offset + reg_off + 4 * i;
if (read)
bam_prep_ce(&bam_ce_buffer[i],
nandc_reg_phys(nandc, reg_off + 4 * i),
BAM_READ_COMMAND,
offset, BAM_READ_COMMAND,
reg_buf_dma_addr(nandc,
(__le32 *)vaddr + i));
else
bam_prep_ce_le32(&bam_ce_buffer[i],
nandc_reg_phys(nandc, reg_off + 4 * i),
BAM_WRITE_COMMAND,
offset, BAM_WRITE_COMMAND,
*((__le32 *)vaddr + i));
}

View File

@ -34,7 +34,7 @@ config MTD_NAND_DENALI_DT
config MTD_NAND_AMS_DELTA
tristate "Amstrad E3 NAND controller"
depends on MACH_AMS_DELTA || COMPILE_TEST
default y
default MACH_AMS_DELTA
help
Support for NAND flash on Amstrad E3 (Delta).
@ -462,6 +462,13 @@ config MTD_NAND_NUVOTON_MA35
Enables support for the NAND controller found on
the Nuvoton MA35 series SoCs.
config MTD_NAND_LOONGSON1
tristate "Loongson1 NAND controller"
depends on LOONGSON1_APB_DMA || COMPILE_TEST
select REGMAP_MMIO
help
Enables support for NAND controller on Loongson1 SoCs.
comment "Misc"
config MTD_SM_COMMON

View File

@ -59,6 +59,7 @@ obj-$(CONFIG_MTD_NAND_ROCKCHIP) += rockchip-nand-controller.o
obj-$(CONFIG_MTD_NAND_PL35X) += pl35x-nand-controller.o
obj-$(CONFIG_MTD_NAND_RENESAS) += renesas-nand-controller.o
obj-$(CONFIG_MTD_NAND_NUVOTON_MA35) += nuvoton-ma35d1-nand-controller.o
obj-$(CONFIG_MTD_NAND_LOONGSON1) += loongson1-nand-controller.o
nand-objs := nand_base.o nand_legacy.o nand_bbt.o nand_timings.o nand_ids.o
nand-objs += nand_onfi.o

View File

@ -171,6 +171,7 @@ static void bcm47xxnflash_ops_bcm4706_cmd_ctrl(struct nand_chip *nand_chip,
{
struct bcm47xxnflash *b47n = nand_get_controller_data(nand_chip);
u32 code = 0;
int rc;
if (cmd == NAND_CMD_NONE)
return;
@ -182,7 +183,9 @@ static void bcm47xxnflash_ops_bcm4706_cmd_ctrl(struct nand_chip *nand_chip,
if (cmd != NAND_CMD_RESET)
code |= NCTL_CSA;
bcm47xxnflash_ops_bcm4706_ctl_cmd(b47n->cc, code);
rc = bcm47xxnflash_ops_bcm4706_ctl_cmd(b47n->cc, code);
if (rc)
pr_err("ctl_cmd didn't work with error %d\n", rc);
}
/* Default nand_select_chip calls cmd_ctrl, which is not used in BCM4706 */

View File

@ -65,6 +65,7 @@ module_param(wp_on, int, 0444);
#define CMD_PARAMETER_READ 0x0e
#define CMD_PARAMETER_CHANGE_COL 0x0f
#define CMD_LOW_LEVEL_OP 0x10
#define CMD_NOT_SUPPORTED 0xff
struct brcm_nand_dma_desc {
u32 next_desc;
@ -101,7 +102,7 @@ struct brcm_nand_dma_desc {
#define BRCMNAND_MIN_DEVSIZE (4ULL * 1024 * 1024)
#define NAND_CTRL_RDY (INTFC_CTLR_READY | INTFC_FLASH_READY)
#define NAND_POLL_STATUS_TIMEOUT_MS 100
#define NAND_POLL_STATUS_TIMEOUT_MS 500
#define EDU_CMD_WRITE 0x00
#define EDU_CMD_READ 0x01
@ -199,6 +200,30 @@ static const u16 flash_dma_regs_v4[] = {
[FLASH_DMA_CURRENT_DESC_EXT] = 0x34,
};
/* Native command conversion for legacy controllers (< v5.0) */
static const u8 native_cmd_conv[] = {
[NAND_CMD_READ0] = CMD_NOT_SUPPORTED,
[NAND_CMD_READ1] = CMD_NOT_SUPPORTED,
[NAND_CMD_RNDOUT] = CMD_PARAMETER_CHANGE_COL,
[NAND_CMD_PAGEPROG] = CMD_NOT_SUPPORTED,
[NAND_CMD_READOOB] = CMD_NOT_SUPPORTED,
[NAND_CMD_ERASE1] = CMD_BLOCK_ERASE,
[NAND_CMD_STATUS] = CMD_NOT_SUPPORTED,
[NAND_CMD_SEQIN] = CMD_NOT_SUPPORTED,
[NAND_CMD_RNDIN] = CMD_NOT_SUPPORTED,
[NAND_CMD_READID] = CMD_DEVICE_ID_READ,
[NAND_CMD_ERASE2] = CMD_NULL,
[NAND_CMD_PARAM] = CMD_PARAMETER_READ,
[NAND_CMD_GET_FEATURES] = CMD_NOT_SUPPORTED,
[NAND_CMD_SET_FEATURES] = CMD_NOT_SUPPORTED,
[NAND_CMD_RESET] = CMD_NOT_SUPPORTED,
[NAND_CMD_READSTART] = CMD_NOT_SUPPORTED,
[NAND_CMD_READCACHESEQ] = CMD_NOT_SUPPORTED,
[NAND_CMD_READCACHEEND] = CMD_NOT_SUPPORTED,
[NAND_CMD_RNDOUTSTART] = CMD_NULL,
[NAND_CMD_CACHEDPROG] = CMD_NOT_SUPPORTED,
};
/* Controller feature flags */
enum {
BRCMNAND_HAS_1K_SECTORS = BIT(0),
@ -237,6 +262,12 @@ struct brcmnand_controller {
/* List of NAND hosts (one for each chip-select) */
struct list_head host_list;
/* Functions to be called from exec_op */
int (*check_instr)(struct nand_chip *chip,
const struct nand_operation *op);
int (*exec_instr)(struct nand_chip *chip,
const struct nand_operation *op);
/* EDU info, per-transaction */
const u16 *edu_offsets;
void __iomem *edu_base;
@ -310,9 +341,6 @@ struct brcmnand_host {
struct platform_device *pdev;
int cs;
unsigned int last_cmd;
unsigned int last_byte;
u64 last_addr;
struct brcmnand_cfg hwcfg;
struct brcmnand_controller *ctrl;
};
@ -2233,14 +2261,11 @@ static int brcmnand_read_page(struct nand_chip *chip, uint8_t *buf,
int oob_required, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct brcmnand_host *host = nand_get_controller_data(chip);
u8 *oob = oob_required ? (u8 *)chip->oob_poi : NULL;
u64 addr = (u64)page << chip->page_shift;
host->last_addr = addr;
return brcmnand_read(mtd, chip, host->last_addr,
mtd->writesize >> FC_SHIFT, (u32 *)buf, oob);
return brcmnand_read(mtd, chip, addr, mtd->writesize >> FC_SHIFT,
(u32 *)buf, oob);
}
static int brcmnand_read_page_raw(struct nand_chip *chip, uint8_t *buf,
@ -2252,11 +2277,9 @@ static int brcmnand_read_page_raw(struct nand_chip *chip, uint8_t *buf,
int ret;
u64 addr = (u64)page << chip->page_shift;
host->last_addr = addr;
brcmnand_set_ecc_enabled(host, 0);
ret = brcmnand_read(mtd, chip, host->last_addr,
mtd->writesize >> FC_SHIFT, (u32 *)buf, oob);
ret = brcmnand_read(mtd, chip, addr, mtd->writesize >> FC_SHIFT,
(u32 *)buf, oob);
brcmnand_set_ecc_enabled(host, 1);
return ret;
}
@ -2363,13 +2386,10 @@ static int brcmnand_write_page(struct nand_chip *chip, const uint8_t *buf,
int oob_required, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct brcmnand_host *host = nand_get_controller_data(chip);
void *oob = oob_required ? chip->oob_poi : NULL;
u64 addr = (u64)page << chip->page_shift;
host->last_addr = addr;
return brcmnand_write(mtd, chip, host->last_addr, (const u32 *)buf, oob);
return brcmnand_write(mtd, chip, addr, (const u32 *)buf, oob);
}
static int brcmnand_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
@ -2381,9 +2401,8 @@ static int brcmnand_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
u64 addr = (u64)page << chip->page_shift;
int ret = 0;
host->last_addr = addr;
brcmnand_set_ecc_enabled(host, 0);
ret = brcmnand_write(mtd, chip, host->last_addr, (const u32 *)buf, oob);
ret = brcmnand_write(mtd, chip, addr, (const u32 *)buf, oob);
brcmnand_set_ecc_enabled(host, 1);
return ret;
@ -2490,18 +2509,190 @@ static int brcmnand_op_is_reset(const struct nand_operation *op)
return 0;
}
static int brcmnand_check_instructions(struct nand_chip *chip,
const struct nand_operation *op)
{
return 0;
}
static int brcmnand_exec_instructions(struct nand_chip *chip,
const struct nand_operation *op)
{
struct brcmnand_host *host = nand_get_controller_data(chip);
unsigned int i;
int ret = 0;
for (i = 0; i < op->ninstrs; i++) {
ret = brcmnand_exec_instr(host, i, op);
if (ret)
break;
}
return ret;
}
static int brcmnand_check_instructions_legacy(struct nand_chip *chip,
const struct nand_operation *op)
{
const struct nand_op_instr *instr;
unsigned int i;
u8 cmd;
for (i = 0; i < op->ninstrs; i++) {
instr = &op->instrs[i];
switch (instr->type) {
case NAND_OP_CMD_INSTR:
cmd = native_cmd_conv[instr->ctx.cmd.opcode];
if (cmd == CMD_NOT_SUPPORTED)
return -EOPNOTSUPP;
break;
case NAND_OP_ADDR_INSTR:
case NAND_OP_DATA_IN_INSTR:
case NAND_OP_WAITRDY_INSTR:
break;
default:
return -EOPNOTSUPP;
}
}
return 0;
}
static int brcmnand_exec_instructions_legacy(struct nand_chip *chip,
const struct nand_operation *op)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct brcmnand_host *host = nand_get_controller_data(chip);
struct brcmnand_controller *ctrl = host->ctrl;
const struct nand_op_instr *instr;
unsigned int i, j;
u8 cmd = CMD_NULL, last_cmd = CMD_NULL;
int ret = 0;
u64 last_addr;
for (i = 0; i < op->ninstrs; i++) {
instr = &op->instrs[i];
if (instr->type == NAND_OP_CMD_INSTR) {
cmd = native_cmd_conv[instr->ctx.cmd.opcode];
if (cmd == CMD_NOT_SUPPORTED) {
dev_err(ctrl->dev, "unsupported cmd=%d\n",
instr->ctx.cmd.opcode);
ret = -EOPNOTSUPP;
break;
}
} else if (instr->type == NAND_OP_ADDR_INSTR) {
u64 addr = 0;
if (cmd == CMD_NULL)
continue;
if (instr->ctx.addr.naddrs > 8) {
dev_err(ctrl->dev, "unsupported naddrs=%u\n",
instr->ctx.addr.naddrs);
ret = -EOPNOTSUPP;
break;
}
for (j = 0; j < instr->ctx.addr.naddrs; j++)
addr |= (instr->ctx.addr.addrs[j]) << (j << 3);
if (cmd == CMD_BLOCK_ERASE)
addr <<= chip->page_shift;
else if (cmd == CMD_PARAMETER_CHANGE_COL)
addr &= ~((u64)(FC_BYTES - 1));
brcmnand_set_cmd_addr(mtd, addr);
brcmnand_send_cmd(host, cmd);
last_addr = addr;
last_cmd = cmd;
cmd = CMD_NULL;
brcmnand_waitfunc(chip);
if (last_cmd == CMD_PARAMETER_READ ||
last_cmd == CMD_PARAMETER_CHANGE_COL) {
/* Copy flash cache word-wise */
u32 *flash_cache = (u32 *)ctrl->flash_cache;
brcmnand_soc_data_bus_prepare(ctrl->soc, true);
/*
* Must cache the FLASH_CACHE now, since changes in
* SECTOR_SIZE_1K may invalidate it
*/
for (j = 0; j < FC_WORDS; j++)
/*
* Flash cache is big endian for parameter pages, at
* least on STB SoCs
*/
flash_cache[j] = be32_to_cpu(brcmnand_read_fc(ctrl, j));
brcmnand_soc_data_bus_unprepare(ctrl->soc, true);
}
} else if (instr->type == NAND_OP_DATA_IN_INSTR) {
u8 *in = instr->ctx.data.buf.in;
if (last_cmd == CMD_DEVICE_ID_READ) {
u32 val;
if (instr->ctx.data.len > 8) {
dev_err(ctrl->dev, "unsupported len=%u\n",
instr->ctx.data.len);
ret = -EOPNOTSUPP;
break;
}
for (j = 0; j < instr->ctx.data.len; j++) {
if (j == 0)
val = brcmnand_read_reg(ctrl, BRCMNAND_ID);
else if (j == 4)
val = brcmnand_read_reg(ctrl, BRCMNAND_ID_EXT);
in[j] = (val >> (24 - ((j % 4) << 3))) & 0xff;
}
} else if (last_cmd == CMD_PARAMETER_READ ||
last_cmd == CMD_PARAMETER_CHANGE_COL) {
u64 addr;
u32 offs;
for (j = 0; j < instr->ctx.data.len; j++) {
addr = last_addr + j;
offs = addr & (FC_BYTES - 1);
if (j > 0 && offs == 0)
nand_change_read_column_op(chip, addr, NULL, 0,
false);
in[j] = ctrl->flash_cache[offs];
}
}
} else if (instr->type == NAND_OP_WAITRDY_INSTR) {
ret = bcmnand_ctrl_poll_status(host, NAND_CTRL_RDY, NAND_CTRL_RDY, 0);
if (ret)
break;
} else {
dev_err(ctrl->dev, "unsupported instruction type: %d\n", instr->type);
ret = -EOPNOTSUPP;
break;
}
}
return ret;
}
static int brcmnand_exec_op(struct nand_chip *chip,
const struct nand_operation *op,
bool check_only)
{
struct brcmnand_host *host = nand_get_controller_data(chip);
struct brcmnand_controller *ctrl = host->ctrl;
struct mtd_info *mtd = nand_to_mtd(chip);
u8 *status;
unsigned int i;
int ret = 0;
if (check_only)
return 0;
return ctrl->check_instr(chip, op);
if (brcmnand_op_is_status(op)) {
status = op->instrs[1].ctx.data.buf.in;
@ -2525,11 +2716,7 @@ static int brcmnand_exec_op(struct nand_chip *chip,
if (op->deassert_wp)
brcmnand_wp(mtd, 0);
for (i = 0; i < op->ninstrs; i++) {
ret = brcmnand_exec_instr(host, i, op);
if (ret)
break;
}
ret = ctrl->exec_instr(chip, op);
if (op->deassert_wp)
brcmnand_wp(mtd, 1);
@ -3142,6 +3329,15 @@ int brcmnand_probe(struct platform_device *pdev, struct brcmnand_soc *soc)
if (ret)
goto err;
/* Only v5.0+ controllers have low level ops support */
if (ctrl->nand_version >= 0x0500) {
ctrl->check_instr = brcmnand_check_instructions;
ctrl->exec_instr = brcmnand_exec_instructions;
} else {
ctrl->check_instr = brcmnand_check_instructions_legacy;
ctrl->exec_instr = brcmnand_exec_instructions_legacy;
}
/*
* Most chips have this cache at a fixed offset within 'nand' block.
* Some must specify this region separately.

View File

@ -68,7 +68,7 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
denali->clk_rate = 50000000; /* 50 MHz */
denali->clk_x_rate = 200000000; /* 200 MHz */
ret = pci_request_regions(dev, DENALI_NAND_NAME);
ret = pcim_request_all_regions(dev, DENALI_NAND_NAME);
if (ret) {
dev_err(&dev->dev, "Spectra: Unable to request memory regions\n");
return ret;
@ -77,20 +77,18 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
denali->reg = devm_ioremap(denali->dev, csr_base, csr_len);
if (!denali->reg) {
dev_err(&dev->dev, "Spectra: Unable to remap memory region\n");
ret = -ENOMEM;
goto regions_release;
return -ENOMEM;
}
denali->host = devm_ioremap(denali->dev, mem_base, mem_len);
if (!denali->host) {
dev_err(&dev->dev, "Spectra: ioremap failed!");
ret = -ENOMEM;
goto regions_release;
return -ENOMEM;
}
ret = denali_init(denali);
if (ret)
goto regions_release;
return ret;
nsels = denali->nbanks;
@ -118,8 +116,6 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
out_remove_denali:
denali_remove(denali);
regions_release:
pci_release_regions(dev);
return ret;
}
@ -127,7 +123,6 @@ static void denali_pci_remove(struct pci_dev *dev)
{
struct denali_controller *denali = pci_get_drvdata(dev);
pci_release_regions(dev);
denali_remove(denali);
}

View File

@ -0,0 +1,836 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* NAND Controller Driver for Loongson-1 SoC
*
* Copyright (C) 2015-2025 Keguang Zhang <keguang.zhang@gmail.com>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/iopoll.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/rawnand.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/sizes.h>
/* Loongson-1 NAND Controller Registers */
#define LS1X_NAND_CMD 0x0
#define LS1X_NAND_ADDR1 0x4
#define LS1X_NAND_ADDR2 0x8
#define LS1X_NAND_TIMING 0xc
#define LS1X_NAND_IDL 0x10
#define LS1X_NAND_IDH_STATUS 0x14
#define LS1X_NAND_PARAM 0x18
#define LS1X_NAND_OP_NUM 0x1c
/* NAND Command Register Bits */
#define LS1X_NAND_CMD_OP_DONE BIT(10)
#define LS1X_NAND_CMD_OP_SPARE BIT(9)
#define LS1X_NAND_CMD_OP_MAIN BIT(8)
#define LS1X_NAND_CMD_STATUS BIT(7)
#define LS1X_NAND_CMD_RESET BIT(6)
#define LS1X_NAND_CMD_READID BIT(5)
#define LS1X_NAND_CMD_BLOCKS_ERASE BIT(4)
#define LS1X_NAND_CMD_ERASE BIT(3)
#define LS1X_NAND_CMD_WRITE BIT(2)
#define LS1X_NAND_CMD_READ BIT(1)
#define LS1X_NAND_CMD_VALID BIT(0)
#define LS1X_NAND_WAIT_CYCLE_MASK GENMASK(7, 0)
#define LS1X_NAND_HOLD_CYCLE_MASK GENMASK(15, 8)
#define LS1X_NAND_CELL_SIZE_MASK GENMASK(11, 8)
#define LS1X_NAND_COL_ADDR_CYC 2U
#define LS1X_NAND_MAX_ADDR_CYC 5U
#define BITS_PER_WORD (4 * BITS_PER_BYTE)
struct ls1x_nand_host;
struct ls1x_nand_op {
char addrs[LS1X_NAND_MAX_ADDR_CYC];
unsigned int naddrs;
unsigned int addrs_offset;
unsigned int aligned_offset;
unsigned int cmd_reg;
unsigned int row_start;
unsigned int rdy_timeout_ms;
unsigned int orig_len;
bool is_readid;
bool is_erase;
bool is_write;
bool is_read;
bool is_change_column;
size_t len;
char *buf;
};
struct ls1x_nand_data {
unsigned int status_field;
unsigned int op_scope_field;
unsigned int hold_cycle;
unsigned int wait_cycle;
void (*set_addr)(struct ls1x_nand_host *host, struct ls1x_nand_op *op);
};
struct ls1x_nand_host {
struct device *dev;
struct nand_chip chip;
struct nand_controller controller;
const struct ls1x_nand_data *data;
void __iomem *reg_base;
struct regmap *regmap;
/* DMA Engine stuff */
dma_addr_t dma_base;
struct dma_chan *dma_chan;
dma_cookie_t dma_cookie;
struct completion dma_complete;
};
static const struct regmap_config ls1x_nand_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
};
static int ls1x_nand_op_cmd_mapping(struct nand_chip *chip, struct ls1x_nand_op *op, u8 opcode)
{
struct ls1x_nand_host *host = nand_get_controller_data(chip);
op->row_start = chip->page_shift + 1;
/* The controller abstracts the following NAND operations. */
switch (opcode) {
case NAND_CMD_STATUS:
op->cmd_reg = LS1X_NAND_CMD_STATUS;
break;
case NAND_CMD_RESET:
op->cmd_reg = LS1X_NAND_CMD_RESET;
break;
case NAND_CMD_READID:
op->is_readid = true;
op->cmd_reg = LS1X_NAND_CMD_READID;
break;
case NAND_CMD_ERASE1:
op->is_erase = true;
op->addrs_offset = LS1X_NAND_COL_ADDR_CYC;
break;
case NAND_CMD_ERASE2:
if (!op->is_erase)
return -EOPNOTSUPP;
/* During erasing, row_start differs from the default value. */
op->row_start = chip->page_shift;
op->cmd_reg = LS1X_NAND_CMD_ERASE;
break;
case NAND_CMD_SEQIN:
op->is_write = true;
break;
case NAND_CMD_PAGEPROG:
if (!op->is_write)
return -EOPNOTSUPP;
op->cmd_reg = LS1X_NAND_CMD_WRITE;
break;
case NAND_CMD_READ0:
op->is_read = true;
break;
case NAND_CMD_READSTART:
if (!op->is_read)
return -EOPNOTSUPP;
op->cmd_reg = LS1X_NAND_CMD_READ;
break;
case NAND_CMD_RNDOUT:
op->is_change_column = true;
break;
case NAND_CMD_RNDOUTSTART:
if (!op->is_change_column)
return -EOPNOTSUPP;
op->cmd_reg = LS1X_NAND_CMD_READ;
break;
default:
dev_dbg(host->dev, "unsupported opcode: %u\n", opcode);
return -EOPNOTSUPP;
}
return 0;
}
static int ls1x_nand_parse_instructions(struct nand_chip *chip,
const struct nand_subop *subop, struct ls1x_nand_op *op)
{
unsigned int op_id;
int ret;
for (op_id = 0; op_id < subop->ninstrs; op_id++) {
const struct nand_op_instr *instr = &subop->instrs[op_id];
unsigned int offset, naddrs;
const u8 *addrs;
switch (instr->type) {
case NAND_OP_CMD_INSTR:
ret = ls1x_nand_op_cmd_mapping(chip, op, instr->ctx.cmd.opcode);
if (ret < 0)
return ret;
break;
case NAND_OP_ADDR_INSTR:
naddrs = nand_subop_get_num_addr_cyc(subop, op_id);
if (naddrs > LS1X_NAND_MAX_ADDR_CYC)
return -EOPNOTSUPP;
op->naddrs = naddrs;
offset = nand_subop_get_addr_start_off(subop, op_id);
addrs = &instr->ctx.addr.addrs[offset];
memcpy(op->addrs + op->addrs_offset, addrs, naddrs);
break;
case NAND_OP_DATA_IN_INSTR:
case NAND_OP_DATA_OUT_INSTR:
offset = nand_subop_get_data_start_off(subop, op_id);
op->orig_len = nand_subop_get_data_len(subop, op_id);
if (instr->type == NAND_OP_DATA_IN_INSTR)
op->buf = instr->ctx.data.buf.in + offset;
else if (instr->type == NAND_OP_DATA_OUT_INSTR)
op->buf = (void *)instr->ctx.data.buf.out + offset;
break;
case NAND_OP_WAITRDY_INSTR:
op->rdy_timeout_ms = instr->ctx.waitrdy.timeout_ms;
break;
default:
break;
}
}
return 0;
}
static void ls1b_nand_set_addr(struct ls1x_nand_host *host, struct ls1x_nand_op *op)
{
struct nand_chip *chip = &host->chip;
int i;
for (i = 0; i < LS1X_NAND_MAX_ADDR_CYC; i++) {
int shift, mask, val;
if (i < LS1X_NAND_COL_ADDR_CYC) {
shift = i * BITS_PER_BYTE;
mask = (u32)0xff << shift;
mask &= GENMASK(chip->page_shift, 0);
val = (u32)op->addrs[i] << shift;
regmap_update_bits(host->regmap, LS1X_NAND_ADDR1, mask, val);
} else if (!op->is_change_column) {
shift = op->row_start + (i - LS1X_NAND_COL_ADDR_CYC) * BITS_PER_BYTE;
mask = (u32)0xff << shift;
val = (u32)op->addrs[i] << shift;
regmap_update_bits(host->regmap, LS1X_NAND_ADDR1, mask, val);
if (i == 4) {
mask = (u32)0xff >> (BITS_PER_WORD - shift);
val = (u32)op->addrs[i] >> (BITS_PER_WORD - shift);
regmap_update_bits(host->regmap, LS1X_NAND_ADDR2, mask, val);
}
}
}
}
static void ls1c_nand_set_addr(struct ls1x_nand_host *host, struct ls1x_nand_op *op)
{
int i;
for (i = 0; i < LS1X_NAND_MAX_ADDR_CYC; i++) {
int shift, mask, val;
if (i < LS1X_NAND_COL_ADDR_CYC) {
shift = i * BITS_PER_BYTE;
mask = (u32)0xff << shift;
val = (u32)op->addrs[i] << shift;
regmap_update_bits(host->regmap, LS1X_NAND_ADDR1, mask, val);
} else if (!op->is_change_column) {
shift = (i - LS1X_NAND_COL_ADDR_CYC) * BITS_PER_BYTE;
mask = (u32)0xff << shift;
val = (u32)op->addrs[i] << shift;
regmap_update_bits(host->regmap, LS1X_NAND_ADDR2, mask, val);
}
}
}
static void ls1x_nand_trigger_op(struct ls1x_nand_host *host, struct ls1x_nand_op *op)
{
struct nand_chip *chip = &host->chip;
struct mtd_info *mtd = nand_to_mtd(chip);
int col0 = op->addrs[0];
short col;
if (!IS_ALIGNED(col0, chip->buf_align)) {
col0 = ALIGN_DOWN(op->addrs[0], chip->buf_align);
op->aligned_offset = op->addrs[0] - col0;
op->addrs[0] = col0;
}
if (host->data->set_addr)
host->data->set_addr(host, op);
/* set operation length */
if (op->is_write || op->is_read || op->is_change_column)
op->len = ALIGN(op->orig_len + op->aligned_offset, chip->buf_align);
else if (op->is_erase)
op->len = 1;
else
op->len = op->orig_len;
writel(op->len, host->reg_base + LS1X_NAND_OP_NUM);
/* set operation area and scope */
col = op->addrs[1] << BITS_PER_BYTE | op->addrs[0];
if (op->orig_len && !op->is_readid) {
unsigned int op_scope = 0;
if (col < mtd->writesize) {
op->cmd_reg |= LS1X_NAND_CMD_OP_MAIN;
op_scope = mtd->writesize;
}
op->cmd_reg |= LS1X_NAND_CMD_OP_SPARE;
op_scope += mtd->oobsize;
op_scope <<= __ffs(host->data->op_scope_field);
regmap_update_bits(host->regmap, LS1X_NAND_PARAM,
host->data->op_scope_field, op_scope);
}
/* set command */
writel(op->cmd_reg, host->reg_base + LS1X_NAND_CMD);
/* trigger operation */
regmap_write_bits(host->regmap, LS1X_NAND_CMD, LS1X_NAND_CMD_VALID, LS1X_NAND_CMD_VALID);
}
static int ls1x_nand_wait_for_op_done(struct ls1x_nand_host *host, struct ls1x_nand_op *op)
{
unsigned int val;
int ret = 0;
if (op->rdy_timeout_ms) {
ret = regmap_read_poll_timeout(host->regmap, LS1X_NAND_CMD,
val, val & LS1X_NAND_CMD_OP_DONE,
0, op->rdy_timeout_ms * MSEC_PER_SEC);
if (ret)
dev_err(host->dev, "operation failed\n");
}
return ret;
}
static void ls1x_nand_dma_callback(void *data)
{
struct ls1x_nand_host *host = (struct ls1x_nand_host *)data;
struct dma_chan *chan = host->dma_chan;
struct device *dev = chan->device->dev;
enum dma_status status;
status = dmaengine_tx_status(chan, host->dma_cookie, NULL);
if (likely(status == DMA_COMPLETE)) {
dev_dbg(dev, "DMA complete with cookie=%d\n", host->dma_cookie);
complete(&host->dma_complete);
} else {
dev_err(dev, "DMA error with cookie=%d\n", host->dma_cookie);
}
}
static int ls1x_nand_dma_transfer(struct ls1x_nand_host *host, struct ls1x_nand_op *op)
{
struct nand_chip *chip = &host->chip;
struct dma_chan *chan = host->dma_chan;
struct device *dev = chan->device->dev;
struct dma_async_tx_descriptor *desc;
enum dma_data_direction data_dir = op->is_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
enum dma_transfer_direction xfer_dir = op->is_write ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
void *buf = op->buf;
char *dma_buf = NULL;
dma_addr_t dma_addr;
int ret;
if (IS_ALIGNED((uintptr_t)buf, chip->buf_align) &&
IS_ALIGNED(op->orig_len, chip->buf_align)) {
dma_addr = dma_map_single(dev, buf, op->orig_len, data_dir);
if (dma_mapping_error(dev, dma_addr)) {
dev_err(dev, "failed to map DMA buffer\n");
return -ENXIO;
}
} else if (!op->is_write) {
dma_buf = dma_alloc_coherent(dev, op->len, &dma_addr, GFP_KERNEL);
if (!dma_buf)
return -ENOMEM;
} else {
dev_err(dev, "subpage writing not supported\n");
return -EOPNOTSUPP;
}
desc = dmaengine_prep_slave_single(chan, dma_addr, op->len, xfer_dir, DMA_PREP_INTERRUPT);
if (!desc) {
dev_err(dev, "failed to prepare DMA descriptor\n");
ret = -ENOMEM;
goto err;
}
desc->callback = ls1x_nand_dma_callback;
desc->callback_param = host;
host->dma_cookie = dmaengine_submit(desc);
ret = dma_submit_error(host->dma_cookie);
if (ret) {
dev_err(dev, "failed to submit DMA descriptor\n");
goto err;
}
dev_dbg(dev, "issue DMA with cookie=%d\n", host->dma_cookie);
dma_async_issue_pending(chan);
if (!wait_for_completion_timeout(&host->dma_complete, msecs_to_jiffies(1000))) {
dmaengine_terminate_sync(chan);
reinit_completion(&host->dma_complete);
ret = -ETIMEDOUT;
goto err;
}
if (dma_buf)
memcpy(buf, dma_buf + op->aligned_offset, op->orig_len);
err:
if (dma_buf)
dma_free_coherent(dev, op->len, dma_buf, dma_addr);
else
dma_unmap_single(dev, dma_addr, op->orig_len, data_dir);
return ret;
}
static int ls1x_nand_data_type_exec(struct nand_chip *chip, const struct nand_subop *subop)
{
struct ls1x_nand_host *host = nand_get_controller_data(chip);
struct ls1x_nand_op op = {};
int ret;
ret = ls1x_nand_parse_instructions(chip, subop, &op);
if (ret)
return ret;
ls1x_nand_trigger_op(host, &op);
ret = ls1x_nand_dma_transfer(host, &op);
if (ret)
return ret;
return ls1x_nand_wait_for_op_done(host, &op);
}
static int ls1x_nand_misc_type_exec(struct nand_chip *chip,
const struct nand_subop *subop, struct ls1x_nand_op *op)
{
struct ls1x_nand_host *host = nand_get_controller_data(chip);
int ret;
ret = ls1x_nand_parse_instructions(chip, subop, op);
if (ret)
return ret;
ls1x_nand_trigger_op(host, op);
return ls1x_nand_wait_for_op_done(host, op);
}
static int ls1x_nand_zerolen_type_exec(struct nand_chip *chip, const struct nand_subop *subop)
{
struct ls1x_nand_op op = {};
return ls1x_nand_misc_type_exec(chip, subop, &op);
}
static int ls1x_nand_read_id_type_exec(struct nand_chip *chip, const struct nand_subop *subop)
{
struct ls1x_nand_host *host = nand_get_controller_data(chip);
struct ls1x_nand_op op = {};
int i, ret;
union {
char ids[5];
struct {
int idl;
char idh;
};
} nand_id;
ret = ls1x_nand_misc_type_exec(chip, subop, &op);
if (ret)
return ret;
nand_id.idl = readl(host->reg_base + LS1X_NAND_IDL);
nand_id.idh = readb(host->reg_base + LS1X_NAND_IDH_STATUS);
for (i = 0; i < min(sizeof(nand_id.ids), op.orig_len); i++)
op.buf[i] = nand_id.ids[sizeof(nand_id.ids) - 1 - i];
return ret;
}
static int ls1x_nand_read_status_type_exec(struct nand_chip *chip, const struct nand_subop *subop)
{
struct ls1x_nand_host *host = nand_get_controller_data(chip);
struct ls1x_nand_op op = {};
int val, ret;
ret = ls1x_nand_misc_type_exec(chip, subop, &op);
if (ret)
return ret;
val = readl(host->reg_base + LS1X_NAND_IDH_STATUS);
val &= ~host->data->status_field;
op.buf[0] = val << ffs(host->data->status_field);
return ret;
}
static const struct nand_op_parser ls1x_nand_op_parser = NAND_OP_PARSER(
NAND_OP_PARSER_PATTERN(
ls1x_nand_read_id_type_exec,
NAND_OP_PARSER_PAT_CMD_ELEM(false),
NAND_OP_PARSER_PAT_ADDR_ELEM(false, LS1X_NAND_MAX_ADDR_CYC),
NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 8)),
NAND_OP_PARSER_PATTERN(
ls1x_nand_read_status_type_exec,
NAND_OP_PARSER_PAT_CMD_ELEM(false),
NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 1)),
NAND_OP_PARSER_PATTERN(
ls1x_nand_zerolen_type_exec,
NAND_OP_PARSER_PAT_CMD_ELEM(false),
NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
NAND_OP_PARSER_PATTERN(
ls1x_nand_zerolen_type_exec,
NAND_OP_PARSER_PAT_CMD_ELEM(false),
NAND_OP_PARSER_PAT_ADDR_ELEM(false, LS1X_NAND_MAX_ADDR_CYC),
NAND_OP_PARSER_PAT_CMD_ELEM(false),
NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
NAND_OP_PARSER_PATTERN(
ls1x_nand_data_type_exec,
NAND_OP_PARSER_PAT_CMD_ELEM(false),
NAND_OP_PARSER_PAT_ADDR_ELEM(false, LS1X_NAND_MAX_ADDR_CYC),
NAND_OP_PARSER_PAT_CMD_ELEM(false),
NAND_OP_PARSER_PAT_WAITRDY_ELEM(true),
NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 0)),
NAND_OP_PARSER_PATTERN(
ls1x_nand_data_type_exec,
NAND_OP_PARSER_PAT_CMD_ELEM(false),
NAND_OP_PARSER_PAT_ADDR_ELEM(false, LS1X_NAND_MAX_ADDR_CYC),
NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, 0),
NAND_OP_PARSER_PAT_CMD_ELEM(false),
NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)),
);
static int ls1x_nand_is_valid_cmd(u8 opcode)
{
if (opcode == NAND_CMD_STATUS || opcode == NAND_CMD_RESET || opcode == NAND_CMD_READID)
return 0;
return -EOPNOTSUPP;
}
static int ls1x_nand_is_valid_cmd_seq(u8 opcode1, u8 opcode2)
{
if (opcode1 == NAND_CMD_RNDOUT && opcode2 == NAND_CMD_RNDOUTSTART)
return 0;
if (opcode1 == NAND_CMD_READ0 && opcode2 == NAND_CMD_READSTART)
return 0;
if (opcode1 == NAND_CMD_ERASE1 && opcode2 == NAND_CMD_ERASE2)
return 0;
if (opcode1 == NAND_CMD_SEQIN && opcode2 == NAND_CMD_PAGEPROG)
return 0;
return -EOPNOTSUPP;
}
static int ls1x_nand_check_op(struct nand_chip *chip, const struct nand_operation *op)
{
const struct nand_op_instr *instr1 = NULL, *instr2 = NULL;
int op_id;
for (op_id = 0; op_id < op->ninstrs; op_id++) {
const struct nand_op_instr *instr = &op->instrs[op_id];
if (instr->type == NAND_OP_CMD_INSTR) {
if (!instr1)
instr1 = instr;
else if (!instr2)
instr2 = instr;
else
break;
}
}
if (!instr1)
return -EOPNOTSUPP;
if (!instr2)
return ls1x_nand_is_valid_cmd(instr1->ctx.cmd.opcode);
return ls1x_nand_is_valid_cmd_seq(instr1->ctx.cmd.opcode, instr2->ctx.cmd.opcode);
}
static int ls1x_nand_exec_op(struct nand_chip *chip,
const struct nand_operation *op, bool check_only)
{
if (check_only)
return ls1x_nand_check_op(chip, op);
return nand_op_parser_exec_op(chip, &ls1x_nand_op_parser, op, check_only);
}
static int ls1x_nand_attach_chip(struct nand_chip *chip)
{
struct ls1x_nand_host *host = nand_get_controller_data(chip);
u64 chipsize = nanddev_target_size(&chip->base);
int cell_size = 0;
switch (chipsize) {
case SZ_128M:
cell_size = 0x0;
break;
case SZ_256M:
cell_size = 0x1;
break;
case SZ_512M:
cell_size = 0x2;
break;
case SZ_1G:
cell_size = 0x3;
break;
case SZ_2G:
cell_size = 0x4;
break;
case SZ_4G:
cell_size = 0x5;
break;
case SZ_8G:
cell_size = 0x6;
break;
case SZ_16G:
cell_size = 0x7;
break;
default:
dev_err(host->dev, "unsupported chip size: %llu MB\n", chipsize);
return -EINVAL;
}
switch (chip->ecc.engine_type) {
case NAND_ECC_ENGINE_TYPE_NONE:
break;
case NAND_ECC_ENGINE_TYPE_SOFT:
break;
default:
return -EINVAL;
}
/* set cell size */
regmap_update_bits(host->regmap, LS1X_NAND_PARAM, LS1X_NAND_CELL_SIZE_MASK,
FIELD_PREP(LS1X_NAND_CELL_SIZE_MASK, cell_size));
regmap_update_bits(host->regmap, LS1X_NAND_TIMING, LS1X_NAND_HOLD_CYCLE_MASK,
FIELD_PREP(LS1X_NAND_HOLD_CYCLE_MASK, host->data->hold_cycle));
regmap_update_bits(host->regmap, LS1X_NAND_TIMING, LS1X_NAND_WAIT_CYCLE_MASK,
FIELD_PREP(LS1X_NAND_WAIT_CYCLE_MASK, host->data->wait_cycle));
chip->ecc.read_page_raw = nand_monolithic_read_page_raw;
chip->ecc.write_page_raw = nand_monolithic_write_page_raw;
return 0;
}
static const struct nand_controller_ops ls1x_nand_controller_ops = {
.exec_op = ls1x_nand_exec_op,
.attach_chip = ls1x_nand_attach_chip,
};
static void ls1x_nand_controller_cleanup(struct ls1x_nand_host *host)
{
if (host->dma_chan)
dma_release_channel(host->dma_chan);
}
static int ls1x_nand_controller_init(struct ls1x_nand_host *host)
{
struct device *dev = host->dev;
struct dma_chan *chan;
struct dma_slave_config cfg = {};
int ret;
host->regmap = devm_regmap_init_mmio(dev, host->reg_base, &ls1x_nand_regmap_config);
if (IS_ERR(host->regmap))
return dev_err_probe(dev, PTR_ERR(host->regmap), "failed to init regmap\n");
chan = dma_request_chan(dev, "rxtx");
if (IS_ERR(chan))
return dev_err_probe(dev, PTR_ERR(chan), "failed to request DMA channel\n");
host->dma_chan = chan;
cfg.src_addr = host->dma_base;
cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
cfg.dst_addr = host->dma_base;
cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
ret = dmaengine_slave_config(host->dma_chan, &cfg);
if (ret)
return dev_err_probe(dev, ret, "failed to config DMA channel\n");
init_completion(&host->dma_complete);
return 0;
}
static int ls1x_nand_chip_init(struct ls1x_nand_host *host)
{
struct device *dev = host->dev;
int nchips = of_get_child_count(dev->of_node);
struct device_node *chip_np;
struct nand_chip *chip = &host->chip;
struct mtd_info *mtd = nand_to_mtd(chip);
int ret;
if (nchips != 1)
return dev_err_probe(dev, -EINVAL, "Currently one NAND chip supported\n");
chip_np = of_get_next_child(dev->of_node, NULL);
if (!chip_np)
return dev_err_probe(dev, -ENODEV, "failed to get child node for NAND chip\n");
nand_set_flash_node(chip, chip_np);
of_node_put(chip_np);
if (!mtd->name)
return dev_err_probe(dev, -EINVAL, "Missing MTD label\n");
nand_set_controller_data(chip, host);
chip->controller = &host->controller;
chip->options = NAND_NO_SUBPAGE_WRITE | NAND_USES_DMA | NAND_BROKEN_XD;
chip->buf_align = 16;
mtd->dev.parent = dev;
mtd->owner = THIS_MODULE;
ret = nand_scan(chip, 1);
if (ret)
return dev_err_probe(dev, ret, "failed to scan NAND chip\n");
ret = mtd_device_register(mtd, NULL, 0);
if (ret) {
nand_cleanup(chip);
return dev_err_probe(dev, ret, "failed to register MTD device\n");
}
return 0;
}
static int ls1x_nand_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct ls1x_nand_data *data;
struct ls1x_nand_host *host;
struct resource *res;
int ret;
data = of_device_get_match_data(dev);
if (!data)
return -ENODEV;
host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
if (!host)
return -ENOMEM;
host->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(host->reg_base))
return PTR_ERR(host->reg_base);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand-dma");
if (!res)
return dev_err_probe(dev, -EINVAL, "Missing 'nand-dma' in reg-names property\n");
host->dma_base = dma_map_resource(dev, res->start, resource_size(res),
DMA_BIDIRECTIONAL, 0);
if (dma_mapping_error(dev, host->dma_base))
return -ENXIO;
host->dev = dev;
host->data = data;
host->controller.ops = &ls1x_nand_controller_ops;
nand_controller_init(&host->controller);
ret = ls1x_nand_controller_init(host);
if (ret)
goto err;
ret = ls1x_nand_chip_init(host);
if (ret)
goto err;
platform_set_drvdata(pdev, host);
return 0;
err:
ls1x_nand_controller_cleanup(host);
return ret;
}
static void ls1x_nand_remove(struct platform_device *pdev)
{
struct ls1x_nand_host *host = platform_get_drvdata(pdev);
struct nand_chip *chip = &host->chip;
int ret;
ret = mtd_device_unregister(nand_to_mtd(chip));
WARN_ON(ret);
nand_cleanup(chip);
ls1x_nand_controller_cleanup(host);
}
static const struct ls1x_nand_data ls1b_nand_data = {
.status_field = GENMASK(15, 8),
.hold_cycle = 0x2,
.wait_cycle = 0xc,
.set_addr = ls1b_nand_set_addr,
};
static const struct ls1x_nand_data ls1c_nand_data = {
.status_field = GENMASK(23, 16),
.op_scope_field = GENMASK(29, 16),
.hold_cycle = 0x2,
.wait_cycle = 0xc,
.set_addr = ls1c_nand_set_addr,
};
static const struct of_device_id ls1x_nand_match[] = {
{
.compatible = "loongson,ls1b-nand-controller",
.data = &ls1b_nand_data,
},
{
.compatible = "loongson,ls1c-nand-controller",
.data = &ls1c_nand_data,
},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, ls1x_nand_match);
static struct platform_driver ls1x_nand_driver = {
.probe = ls1x_nand_probe,
.remove = ls1x_nand_remove,
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = ls1x_nand_match,
},
};
module_platform_driver(ls1x_nand_driver);
MODULE_AUTHOR("Keguang Zhang <keguang.zhang@gmail.com>");
MODULE_DESCRIPTION("Loongson-1 NAND Controller Driver");
MODULE_LICENSE("GPL");

View File

@ -1863,7 +1863,12 @@ static int qcom_param_page_type_exec(struct nand_chip *chip, const struct nand_
const struct nand_op_instr *instr = NULL;
unsigned int op_id = 0;
unsigned int len = 0;
int ret;
int ret, reg_base;
reg_base = NAND_READ_LOCATION_0;
if (nandc->props->qpic_version2)
reg_base = NAND_READ_LOCATION_LAST_CW_0;
ret = qcom_parse_instructions(chip, subop, &q_op);
if (ret)
@ -1915,14 +1920,17 @@ static int qcom_param_page_type_exec(struct nand_chip *chip, const struct nand_
op_id = q_op.data_instr_idx;
len = nand_subop_get_data_len(subop, op_id);
nandc_set_read_loc(chip, 0, 0, 0, len, 1);
if (nandc->props->qpic_version2)
nandc_set_read_loc_last(chip, reg_base, 0, len, 1);
else
nandc_set_read_loc_first(chip, reg_base, 0, len, 1);
if (!nandc->props->qpic_version2) {
qcom_write_reg_dma(nandc, &nandc->regs->vld, NAND_DEV_CMD_VLD, 1, 0);
qcom_write_reg_dma(nandc, &nandc->regs->cmd1, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL);
}
nandc->buf_count = len;
nandc->buf_count = 512;
memset(nandc->data_buffer, 0xff, nandc->buf_count);
config_nand_single_cw_page_read(chip, false, 0);
@ -2360,6 +2368,7 @@ static const struct qcom_nandc_props ipq806x_nandc_props = {
.supports_bam = false,
.use_codeword_fixup = true,
.dev_cmd_reg_start = 0x0,
.bam_offset = 0x30000,
};
static const struct qcom_nandc_props ipq4019_nandc_props = {
@ -2367,6 +2376,7 @@ static const struct qcom_nandc_props ipq4019_nandc_props = {
.supports_bam = true,
.nandc_part_of_qpic = true,
.dev_cmd_reg_start = 0x0,
.bam_offset = 0x30000,
};
static const struct qcom_nandc_props ipq8074_nandc_props = {
@ -2374,6 +2384,7 @@ static const struct qcom_nandc_props ipq8074_nandc_props = {
.supports_bam = true,
.nandc_part_of_qpic = true,
.dev_cmd_reg_start = 0x7000,
.bam_offset = 0x30000,
};
static const struct qcom_nandc_props sdx55_nandc_props = {
@ -2382,6 +2393,7 @@ static const struct qcom_nandc_props sdx55_nandc_props = {
.nandc_part_of_qpic = true,
.qpic_version2 = true,
.dev_cmd_reg_start = 0x7000,
.bam_offset = 0x30000,
};
/*

View File

@ -817,6 +817,7 @@ static int sunxi_nfc_hw_ecc_read_chunk(struct nand_chip *nand,
if (ret)
return ret;
sunxi_nfc_randomizer_config(nand, page, false);
sunxi_nfc_randomizer_enable(nand);
writel(NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD | NFC_ECC_OP,
nfc->regs + NFC_REG_CMD);
@ -1049,6 +1050,7 @@ static int sunxi_nfc_hw_ecc_write_chunk(struct nand_chip *nand,
if (ret)
return ret;
sunxi_nfc_randomizer_config(nand, page, false);
sunxi_nfc_randomizer_enable(nand);
sunxi_nfc_hw_ecc_set_prot_oob_bytes(nand, oob, 0, bbm, page);

View File

@ -17,20 +17,20 @@
#define AM_STATUS_ECC_MAX_CORRECTED (3 << 4)
static SPINAND_OP_VARIANTS(read_cache_variants,
SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
SPINAND_PROG_LOAD(true, 0, NULL, 0));
SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0),
SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(update_cache_variants,
SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
SPINAND_PROG_LOAD(false, 0, NULL, 0));
SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0),
SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
static int am_get_eccsize(struct mtd_info *mtd)
{

View File

@ -14,17 +14,17 @@
static SPINAND_OP_VARIANTS(read_cache_variants,
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
SPINAND_PROG_LOAD(true, 0, NULL, 0));
SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0),
SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(update_cache_variants,
SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
SPINAND_PROG_LOAD(false, 0, NULL, 0));
SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0),
SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
static int ato25d1ga_ooblayout_ecc(struct mtd_info *mtd, int section,

View File

@ -22,7 +22,7 @@
static int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val)
{
struct spi_mem_op op = SPINAND_GET_FEATURE_OP(reg,
struct spi_mem_op op = SPINAND_GET_FEATURE_1S_1S_1S_OP(reg,
spinand->scratchbuf);
int ret;
@ -36,7 +36,7 @@ static int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val)
int spinand_write_reg_op(struct spinand_device *spinand, u8 reg, u8 val)
{
struct spi_mem_op op = SPINAND_SET_FEATURE_OP(reg,
struct spi_mem_op op = SPINAND_SET_FEATURE_1S_1S_1S_OP(reg,
spinand->scratchbuf);
*spinand->scratchbuf = val;
@ -362,7 +362,7 @@ static void spinand_ondie_ecc_save_status(struct nand_device *nand, u8 status)
static int spinand_write_enable_op(struct spinand_device *spinand)
{
struct spi_mem_op op = SPINAND_WR_EN_DIS_OP(true);
struct spi_mem_op op = SPINAND_WR_EN_DIS_1S_0_0_OP(true);
return spi_mem_exec_op(spinand->spimem, &op);
}
@ -372,7 +372,7 @@ static int spinand_load_page_op(struct spinand_device *spinand,
{
struct nand_device *nand = spinand_to_nand(spinand);
unsigned int row = nanddev_pos_to_row(nand, &req->pos);
struct spi_mem_op op = SPINAND_PAGE_READ_OP(row);
struct spi_mem_op op = SPINAND_PAGE_READ_1S_1S_0_OP(row);
return spi_mem_exec_op(spinand->spimem, &op);
}
@ -519,7 +519,7 @@ static int spinand_program_op(struct spinand_device *spinand,
{
struct nand_device *nand = spinand_to_nand(spinand);
unsigned int row = nanddev_pos_to_row(nand, &req->pos);
struct spi_mem_op op = SPINAND_PROG_EXEC_OP(row);
struct spi_mem_op op = SPINAND_PROG_EXEC_1S_1S_0_OP(row);
return spi_mem_exec_op(spinand->spimem, &op);
}
@ -529,7 +529,7 @@ static int spinand_erase_op(struct spinand_device *spinand,
{
struct nand_device *nand = spinand_to_nand(spinand);
unsigned int row = nanddev_pos_to_row(nand, pos);
struct spi_mem_op op = SPINAND_BLK_ERASE_OP(row);
struct spi_mem_op op = SPINAND_BLK_ERASE_1S_1S_0_OP(row);
return spi_mem_exec_op(spinand->spimem, &op);
}
@ -549,8 +549,8 @@ static int spinand_erase_op(struct spinand_device *spinand,
int spinand_wait(struct spinand_device *spinand, unsigned long initial_delay_us,
unsigned long poll_delay_us, u8 *s)
{
struct spi_mem_op op = SPINAND_GET_FEATURE_OP(REG_STATUS,
spinand->scratchbuf);
struct spi_mem_op op = SPINAND_GET_FEATURE_1S_1S_1S_OP(REG_STATUS,
spinand->scratchbuf);
u8 status;
int ret;
@ -583,7 +583,7 @@ out:
static int spinand_read_id_op(struct spinand_device *spinand, u8 naddr,
u8 ndummy, u8 *buf)
{
struct spi_mem_op op = SPINAND_READID_OP(
struct spi_mem_op op = SPINAND_READID_1S_1S_1S_OP(
naddr, ndummy, spinand->scratchbuf, SPINAND_MAX_ID_LEN);
int ret;
@ -596,7 +596,7 @@ static int spinand_read_id_op(struct spinand_device *spinand, u8 naddr,
static int spinand_reset_op(struct spinand_device *spinand)
{
struct spi_mem_op op = SPINAND_RESET_OP;
struct spi_mem_op op = SPINAND_RESET_1S_0_0_OP;
int ret;
ret = spi_mem_exec_op(spinand->spimem, &op);

View File

@ -18,18 +18,18 @@
(CFG_OTP_ENABLE | ESMT_F50L1G41LB_CFG_OTP_PROTECT)
static SPINAND_OP_VARIANTS(read_cache_variants,
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
SPINAND_PROG_LOAD(true, 0, NULL, 0));
SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0),
SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(update_cache_variants,
SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
SPINAND_PROG_LOAD(false, 0, NULL, 0));
SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0),
SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
/*
* OOB spare area map (64 bytes)
@ -137,8 +137,8 @@ static int f50l1g41lb_user_otp_info(struct spinand_device *spinand, size_t len,
static int f50l1g41lb_otp_lock(struct spinand_device *spinand, loff_t from,
size_t len)
{
struct spi_mem_op write_op = SPINAND_WR_EN_DIS_OP(true);
struct spi_mem_op exec_op = SPINAND_PROG_EXEC_OP(0);
struct spi_mem_op write_op = SPINAND_WR_EN_DIS_1S_0_0_OP(true);
struct spi_mem_op exec_op = SPINAND_PROG_EXEC_1S_1S_0_OP(0);
u8 status;
int ret;
@ -199,7 +199,7 @@ static const struct spinand_info esmt_c8_spinand_table[] = {
SPINAND_FACT_OTP_INFO(2, 0, &f50l1g41lb_fact_otp_ops)),
SPINAND_INFO("F50D1G41LB",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x11, 0x7f,
0x7f, 0x7f),
0x7f),
NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(1, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,

View File

@ -12,18 +12,18 @@
#define SPINAND_MFR_FORESEE 0xCD
static SPINAND_OP_VARIANTS(read_cache_variants,
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
SPINAND_PROG_LOAD(true, 0, NULL, 0));
SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0),
SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(update_cache_variants,
SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
SPINAND_PROG_LOAD(false, 0, NULL, 0));
SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0),
SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
static int f35sqa002g_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)

View File

@ -24,44 +24,44 @@
#define GD5FXGQ4UXFXXG_STATUS_ECC_UNCOR_ERROR (7 << 4)
static SPINAND_OP_VARIANTS(read_cache_variants,
SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(read_cache_variants_f,
SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X4_OP_3A(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X2_OP_3A(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_FAST_OP_3A(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_OP_3A(0, 0, NULL, 0));
SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_3A_1S_1S_4S_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_3A_1S_1S_2S_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_FAST_3A_1S_1S_1S_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_3A_1S_1S_1S_OP(0, 0, NULL, 0));
static SPINAND_OP_VARIANTS(read_cache_variants_1gq5,
SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 2, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(read_cache_variants_2gq5,
SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 4, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 2, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 4, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 2, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
SPINAND_PROG_LOAD(true, 0, NULL, 0));
SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0),
SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(update_cache_variants,
SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
SPINAND_PROG_LOAD(false, 0, NULL, 0));
SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0),
SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
static int gd5fxgq4xa_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
@ -185,7 +185,7 @@ static int gd5fxgq4uexxg_ecc_get_status(struct spinand_device *spinand,
u8 status)
{
u8 status2;
struct spi_mem_op op = SPINAND_GET_FEATURE_OP(GD5FXGQXXEXXG_REG_STATUS2,
struct spi_mem_op op = SPINAND_GET_FEATURE_1S_1S_1S_OP(GD5FXGQXXEXXG_REG_STATUS2,
spinand->scratchbuf);
int ret;
@ -228,7 +228,7 @@ static int gd5fxgq5xexxg_ecc_get_status(struct spinand_device *spinand,
u8 status)
{
u8 status2;
struct spi_mem_op op = SPINAND_GET_FEATURE_OP(GD5FXGQXXEXXG_REG_STATUS2,
struct spi_mem_op op = SPINAND_GET_FEATURE_1S_1S_1S_OP(GD5FXGQXXEXXG_REG_STATUS2,
spinand->scratchbuf);
int ret;

View File

@ -28,18 +28,18 @@ struct macronix_priv {
};
static SPINAND_OP_VARIANTS(read_cache_variants,
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
SPINAND_PROG_LOAD(false, 0, NULL, 0));
SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0),
SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
static SPINAND_OP_VARIANTS(update_cache_variants,
SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
SPINAND_PROG_LOAD(false, 0, NULL, 0));
SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0),
SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
static int mx35lfxge4ab_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
@ -148,8 +148,8 @@ static int macronix_set_cont_read(struct spinand_device *spinand, bool enable)
static int macronix_set_read_retry(struct spinand_device *spinand,
unsigned int retry_mode)
{
struct spi_mem_op op = SPINAND_SET_FEATURE_OP(MACRONIX_FEATURE_ADDR_READ_RETRY,
spinand->scratchbuf);
struct spi_mem_op op = SPINAND_SET_FEATURE_1S_1S_1S_OP(MACRONIX_FEATURE_ADDR_READ_RETRY,
spinand->scratchbuf);
*spinand->scratchbuf = retry_mode;
return spi_mem_exec_op(spinand->spimem, &op);

View File

@ -35,33 +35,33 @@
(CFG_OTP_ENABLE | MICRON_MT29F2G01ABAGD_CFG_OTP_STATE)
static SPINAND_OP_VARIANTS(quadio_read_cache_variants,
SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 2, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(x4_write_cache_variants,
SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
SPINAND_PROG_LOAD(true, 0, NULL, 0));
SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0),
SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(x4_update_cache_variants,
SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
SPINAND_PROG_LOAD(false, 0, NULL, 0));
SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0),
SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
/* Micron MT29F2G01AAAED Device */
static SPINAND_OP_VARIANTS(x4_read_cache_variants,
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(x1_write_cache_variants,
SPINAND_PROG_LOAD(true, 0, NULL, 0));
SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(x1_update_cache_variants,
SPINAND_PROG_LOAD(false, 0, NULL, 0));
SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
static int micron_8_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
@ -137,7 +137,7 @@ static const struct mtd_ooblayout_ops micron_4_ooblayout = {
static int micron_select_target(struct spinand_device *spinand,
unsigned int target)
{
struct spi_mem_op op = SPINAND_SET_FEATURE_OP(MICRON_DIE_SELECT_REG,
struct spi_mem_op op = SPINAND_SET_FEATURE_1S_1S_1S_OP(MICRON_DIE_SELECT_REG,
spinand->scratchbuf);
if (target > 1)
@ -251,8 +251,8 @@ static int mt29f2g01abagd_user_otp_info(struct spinand_device *spinand,
static int mt29f2g01abagd_otp_lock(struct spinand_device *spinand, loff_t from,
size_t len)
{
struct spi_mem_op write_op = SPINAND_WR_EN_DIS_OP(true);
struct spi_mem_op exec_op = SPINAND_PROG_EXEC_OP(0);
struct spi_mem_op write_op = SPINAND_WR_EN_DIS_1S_0_0_OP(true);
struct spi_mem_op exec_op = SPINAND_PROG_EXEC_1S_1S_0_OP(0);
u8 status;
int ret;

View File

@ -22,20 +22,20 @@
static SPINAND_OP_VARIANTS(read_cache_variants,
SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 2, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
SPINAND_PROG_LOAD(true, 0, NULL, 0));
SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0),
SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(update_cache_variants,
SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
SPINAND_PROG_LOAD(false, 0, NULL, 0));
SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0),
SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
static int pn26g0xa_ooblayout_ecc(struct mtd_info *mtd, int section,

View File

@ -17,20 +17,20 @@
#define SKYHIGH_CONFIG_PROTECT_EN BIT(1)
static SPINAND_OP_VARIANTS(read_cache_variants,
SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 4, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 2, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 4, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 2, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
SPINAND_PROG_LOAD(true, 0, NULL, 0));
SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0),
SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(update_cache_variants,
SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
SPINAND_PROG_LOAD(false, 0, NULL, 0));
SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0),
SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
static int skyhigh_spinand_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)

View File

@ -15,28 +15,28 @@
#define TOSH_STATUS_ECC_HAS_BITFLIPS_T (3 << 4)
static SPINAND_OP_VARIANTS(read_cache_variants,
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_x4_variants,
SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
SPINAND_PROG_LOAD(true, 0, NULL, 0));
SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0),
SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(update_cache_x4_variants,
SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
SPINAND_PROG_LOAD(false, 0, NULL, 0));
SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0),
SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
/*
* Backward compatibility for 1st generation Serial NAND devices
* which don't support Quad Program Load operation.
*/
static SPINAND_OP_VARIANTS(write_cache_variants,
SPINAND_PROG_LOAD(true, 0, NULL, 0));
SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(update_cache_variants,
SPINAND_PROG_LOAD(false, 0, NULL, 0));
SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
static int tx58cxgxsxraix_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
@ -73,7 +73,7 @@ static int tx58cxgxsxraix_ecc_get_status(struct spinand_device *spinand,
{
struct nand_device *nand = spinand_to_nand(spinand);
u8 mbf = 0;
struct spi_mem_op op = SPINAND_GET_FEATURE_OP(0x30, spinand->scratchbuf);
struct spi_mem_op op = SPINAND_GET_FEATURE_1S_1S_1S_OP(0x30, spinand->scratchbuf);
switch (status & STATUS_ECC_MASK) {
case STATUS_ECC_NO_BITFLIPS:

View File

@ -23,34 +23,50 @@
* "X4" in the core is equivalent to "quad output" in the datasheets.
*/
static SPINAND_OP_VARIANTS(read_cache_dtr_variants,
SPINAND_PAGE_READ_FROM_CACHE_QUADIO_DTR_OP(0, 8, NULL, 0, 80 * HZ_PER_MHZ),
SPINAND_PAGE_READ_FROM_CACHE_X4_DTR_OP(0, 2, NULL, 0, 80 * HZ_PER_MHZ),
SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_DUALIO_DTR_OP(0, 4, NULL, 0, 80 * HZ_PER_MHZ),
SPINAND_PAGE_READ_FROM_CACHE_X2_DTR_OP(0, 2, NULL, 0, 80 * HZ_PER_MHZ),
SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_DTR_OP(0, 2, NULL, 0, 80 * HZ_PER_MHZ),
SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0, 54 * HZ_PER_MHZ));
static SPINAND_OP_VARIANTS(read_cache_octal_variants,
SPINAND_PAGE_READ_FROM_CACHE_1S_1D_8D_OP(0, 2, NULL, 0, 105 * HZ_PER_MHZ),
SPINAND_PAGE_READ_FROM_CACHE_1S_8S_8S_OP(0, 16, NULL, 0, 86 * HZ_PER_MHZ),
SPINAND_PAGE_READ_FROM_CACHE_1S_1S_8S_OP(0, 1, NULL, 0, 133 * HZ_PER_MHZ),
SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_octal_variants,
SPINAND_PROG_LOAD_1S_8S_8S_OP(true, 0, NULL, 0),
SPINAND_PROG_LOAD_1S_1S_8S_OP(0, NULL, 0),
SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(update_cache_octal_variants,
SPINAND_PROG_LOAD_1S_8S_8S_OP(false, 0, NULL, 0),
SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
static SPINAND_OP_VARIANTS(read_cache_dual_quad_dtr_variants,
SPINAND_PAGE_READ_FROM_CACHE_1S_4D_4D_OP(0, 8, NULL, 0, 80 * HZ_PER_MHZ),
SPINAND_PAGE_READ_FROM_CACHE_1S_1D_4D_OP(0, 2, NULL, 0, 80 * HZ_PER_MHZ),
SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 2, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_1S_2D_2D_OP(0, 4, NULL, 0, 80 * HZ_PER_MHZ),
SPINAND_PAGE_READ_FROM_CACHE_1S_1D_2D_OP(0, 2, NULL, 0, 80 * HZ_PER_MHZ),
SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_1S_1D_1D_OP(0, 2, NULL, 0, 80 * HZ_PER_MHZ),
SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0, 54 * HZ_PER_MHZ));
static SPINAND_OP_VARIANTS(read_cache_variants,
SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 2, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
SPINAND_PROG_LOAD(true, 0, NULL, 0));
SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0),
SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(update_cache_variants,
SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
SPINAND_PROG_LOAD(false, 0, NULL, 0));
SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0),
SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
static int w25m02gv_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
@ -141,12 +157,47 @@ static const struct mtd_ooblayout_ops w25n02kv_ooblayout = {
.free = w25n02kv_ooblayout_free,
};
static int w35n01jw_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
{
if (section > 7)
return -ERANGE;
region->offset = (16 * section) + 12;
region->length = 4;
return 0;
}
static int w35n01jw_ooblayout_free(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
{
if (section > 7)
return -ERANGE;
region->offset = 16 * section;
region->length = 12;
/* Extract BBM */
if (!section) {
region->offset += 2;
region->length -= 2;
}
return 0;
}
static const struct mtd_ooblayout_ops w35n01jw_ooblayout = {
.ecc = w35n01jw_ooblayout_ecc,
.free = w35n01jw_ooblayout_free,
};
static int w25n02kv_ecc_get_status(struct spinand_device *spinand,
u8 status)
{
struct nand_device *nand = spinand_to_nand(spinand);
u8 mbf = 0;
struct spi_mem_op op = SPINAND_GET_FEATURE_OP(0x30, spinand->scratchbuf);
struct spi_mem_op op = SPINAND_GET_FEATURE_1S_1S_1S_OP(0x30, spinand->scratchbuf);
switch (status & STATUS_ECC_MASK) {
case STATUS_ECC_NO_BITFLIPS:
@ -213,7 +264,7 @@ static const struct spinand_info winbond_spinand_table[] = {
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xbc, 0x21),
NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(1, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_dtr_variants,
SPINAND_INFO_OP_VARIANTS(&read_cache_dual_quad_dtr_variants,
&write_cache_variants,
&update_cache_variants),
0,
@ -227,6 +278,33 @@ static const struct spinand_info winbond_spinand_table[] = {
&update_cache_variants),
0,
SPINAND_ECCINFO(&w25n01kv_ooblayout, w25n02kv_ecc_get_status)),
SPINAND_INFO("W35N01JW", /* 1.8V */
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xdc, 0x21),
NAND_MEMORG(1, 4096, 128, 64, 512, 10, 1, 1, 1),
NAND_ECCREQ(1, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_octal_variants,
&write_cache_octal_variants,
&update_cache_octal_variants),
0,
SPINAND_ECCINFO(&w35n01jw_ooblayout, NULL)),
SPINAND_INFO("W35N02JW", /* 1.8V */
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xdf, 0x22),
NAND_MEMORG(1, 4096, 128, 64, 512, 10, 2, 1, 1),
NAND_ECCREQ(1, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_octal_variants,
&write_cache_octal_variants,
&update_cache_octal_variants),
0,
SPINAND_ECCINFO(&w35n01jw_ooblayout, NULL)),
SPINAND_INFO("W35N04JW", /* 1.8V */
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xdf, 0x23),
NAND_MEMORG(1, 4096, 128, 64, 512, 10, 4, 1, 1),
NAND_ECCREQ(1, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_octal_variants,
&write_cache_octal_variants,
&update_cache_octal_variants),
0,
SPINAND_ECCINFO(&w35n01jw_ooblayout, NULL)),
/* 2G-bit densities */
SPINAND_INFO("W25M02GV", /* 2x1G-bit 3.3V */
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xab, 0x21),
@ -242,7 +320,7 @@ static const struct spinand_info winbond_spinand_table[] = {
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xbf, 0x22),
NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 2, 1),
NAND_ECCREQ(1, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_dtr_variants,
SPINAND_INFO_OP_VARIANTS(&read_cache_dual_quad_dtr_variants,
&write_cache_variants,
&update_cache_variants),
0,

View File

@ -23,20 +23,20 @@
#define XT26XXXD_STATUS_ECC_UNCOR_ERROR (2)
static SPINAND_OP_VARIANTS(read_cache_variants,
SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0));
SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
SPINAND_PROG_LOAD(true, 0, NULL, 0));
SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0),
SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0));
static SPINAND_OP_VARIANTS(update_cache_variants,
SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
SPINAND_PROG_LOAD(false, 0, NULL, 0));
SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0),
SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
static int xt26g0xa_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)

View File

@ -58,6 +58,31 @@ macronix_qpp4b_post_sfdp_fixups(struct spi_nor *nor)
return 0;
}
static int
mx25l3255e_late_init_fixups(struct spi_nor *nor)
{
struct spi_nor_flash_parameter *params = nor->params;
/*
* SFDP of MX25L3255E is JESD216, which does not include the Quad
* Enable bit Requirement in BFPT. As a result, during BFPT parsing,
* the quad_enable method is not set to spi_nor_sr1_bit6_quad_enable.
* Therefore, it is necessary to correct this setting by late_init.
*/
params->quad_enable = spi_nor_sr1_bit6_quad_enable;
/*
* In addition, MX25L3255E also supports 1-4-4 page program in 3-byte
* address mode. However, since the 3-byte address 1-4-4 page program
* is not defined in SFDP, it needs to be configured in late_init.
*/
params->hwcaps.mask |= SNOR_HWCAPS_PP_1_4_4;
spi_nor_set_pp_settings(&params->page_programs[SNOR_CMD_PP_1_4_4],
SPINOR_OP_PP_1_4_4, SNOR_PROTO_1_4_4);
return 0;
}
static const struct spi_nor_fixups mx25l25635_fixups = {
.post_bfpt = mx25l25635_post_bfpt_fixups,
.post_sfdp = macronix_qpp4b_post_sfdp_fixups,
@ -67,6 +92,10 @@ static const struct spi_nor_fixups macronix_qpp4b_fixups = {
.post_sfdp = macronix_qpp4b_post_sfdp_fixups,
};
static const struct spi_nor_fixups mx25l3255e_fixups = {
.late_init = mx25l3255e_late_init_fixups,
};
static const struct flash_info macronix_nor_parts[] = {
{
.id = SNOR_ID(0xc2, 0x20, 0x10),
@ -88,10 +117,8 @@ static const struct flash_info macronix_nor_parts[] = {
.name = "mx25l8005",
.size = SZ_1M,
}, {
/* MX25L1606E */
.id = SNOR_ID(0xc2, 0x20, 0x15),
.name = "mx25l1606e",
.size = SZ_2M,
.no_sfdp_flags = SECT_4K,
}, {
.id = SNOR_ID(0xc2, 0x20, 0x16),
.name = "mx25l3205d",
@ -103,29 +130,21 @@ static const struct flash_info macronix_nor_parts[] = {
.size = SZ_8M,
.no_sfdp_flags = SECT_4K,
}, {
/* MX25L12805D */
.id = SNOR_ID(0xc2, 0x20, 0x18),
.name = "mx25l12805d",
.size = SZ_16M,
.flags = SPI_NOR_HAS_LOCK | SPI_NOR_4BIT_BP,
.no_sfdp_flags = SECT_4K,
}, {
/* MX25L25635E, MX25L25645G */
.id = SNOR_ID(0xc2, 0x20, 0x19),
.name = "mx25l25635e",
.size = SZ_32M,
.no_sfdp_flags = SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
.fixups = &mx25l25635_fixups
}, {
/* MX66L51235F */
.id = SNOR_ID(0xc2, 0x20, 0x1a),
.name = "mx66l51235f",
.size = SZ_64M,
.no_sfdp_flags = SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
.fixup_flags = SPI_NOR_4B_OPCODES,
.fixups = &macronix_qpp4b_fixups,
}, {
/* MX66L1G45G */
.id = SNOR_ID(0xc2, 0x20, 0x1b),
.name = "mx66l1g45g",
.size = SZ_128M,
.no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
.fixups = &macronix_qpp4b_fixups,
}, {
/* MX66L2G45G */
@ -167,29 +186,16 @@ static const struct flash_info macronix_nor_parts[] = {
.size = SZ_16M,
.no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
}, {
/* MX25U51245G */
.id = SNOR_ID(0xc2, 0x25, 0x3a),
.name = "mx25u51245g",
.size = SZ_64M,
.no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
.fixup_flags = SPI_NOR_4B_OPCODES,
.fixups = &macronix_qpp4b_fixups,
}, {
.id = SNOR_ID(0xc2, 0x25, 0x3a),
.name = "mx66u51235f",
.size = SZ_64M,
.no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
.fixup_flags = SPI_NOR_4B_OPCODES,
.fixups = &macronix_qpp4b_fixups,
}, {
/* MX66U1G45G */
.id = SNOR_ID(0xc2, 0x25, 0x3b),
.fixups = &macronix_qpp4b_fixups,
}, {
/* MX66U2G45G */
.id = SNOR_ID(0xc2, 0x25, 0x3c),
.name = "mx66u2g45g",
.size = SZ_256M,
.no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
.fixup_flags = SPI_NOR_4B_OPCODES,
.fixups = &macronix_qpp4b_fixups,
}, {
.id = SNOR_ID(0xc2, 0x26, 0x18),
@ -215,15 +221,14 @@ static const struct flash_info macronix_nor_parts[] = {
.size = SZ_4M,
.no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
}, {
/* MX25UW51245G */
.id = SNOR_ID(0xc2, 0x81, 0x3a),
.name = "mx25uw51245g",
.n_banks = 4,
.flags = SPI_NOR_RWW,
}, {
/* MX25L3255E */
.id = SNOR_ID(0xc2, 0x9e, 0x16),
.name = "mx25l3255e",
.size = SZ_4M,
.no_sfdp_flags = SECT_4K,
.fixups = &mx25l3255e_fixups,
},
/*
* This spares us of adding new flash entries for flashes that can be

View File

@ -1616,6 +1616,7 @@ static void qcom_spi_remove(struct platform_device *pdev)
static const struct qcom_nandc_props ipq9574_snandc_props = {
.dev_cmd_reg_start = 0x7000,
.bam_offset = 0x30000,
.supports_bam = true,
};

View File

@ -199,9 +199,6 @@
*/
#define dev_cmd_reg_addr(nandc, reg) ((nandc)->props->dev_cmd_reg_start + (reg))
/* Returns the NAND register physical address */
#define nandc_reg_phys(chip, offset) ((chip)->base_phys + (offset))
/* Returns the dma address for reg read buffer */
#define reg_buf_dma_addr(chip, vaddr) \
((chip)->reg_read_dma + \
@ -454,6 +451,7 @@ struct qcom_nand_controller {
struct qcom_nandc_props {
u32 ecc_modes;
u32 dev_cmd_reg_start;
u32 bam_offset;
bool supports_bam;
bool nandc_part_of_qpic;
bool qpic_version2;

View File

@ -108,7 +108,7 @@ extern void deregister_mtd_parser(struct mtd_part_parser *parser);
deregister_mtd_parser)
int mtd_add_partition(struct mtd_info *master, const char *name,
long long offset, long long length);
long long offset, long long length, struct mtd_info **part);
int mtd_del_partition(struct mtd_info *master, int partno);
uint64_t mtd_get_device_size(const struct mtd_info *mtd);

View File

@ -20,174 +20,207 @@
* Standard SPI NAND flash operations
*/
#define SPINAND_RESET_OP \
#define SPINAND_RESET_1S_0_0_OP \
SPI_MEM_OP(SPI_MEM_OP_CMD(0xff, 1), \
SPI_MEM_OP_NO_ADDR, \
SPI_MEM_OP_NO_DUMMY, \
SPI_MEM_OP_NO_DATA)
#define SPINAND_WR_EN_DIS_OP(enable) \
#define SPINAND_WR_EN_DIS_1S_0_0_OP(enable) \
SPI_MEM_OP(SPI_MEM_OP_CMD((enable) ? 0x06 : 0x04, 1), \
SPI_MEM_OP_NO_ADDR, \
SPI_MEM_OP_NO_DUMMY, \
SPI_MEM_OP_NO_DATA)
#define SPINAND_READID_OP(naddr, ndummy, buf, len) \
#define SPINAND_READID_1S_1S_1S_OP(naddr, ndummy, buf, len) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0x9f, 1), \
SPI_MEM_OP_ADDR(naddr, 0, 1), \
SPI_MEM_OP_DUMMY(ndummy, 1), \
SPI_MEM_OP_DATA_IN(len, buf, 1))
#define SPINAND_SET_FEATURE_OP(reg, valptr) \
#define SPINAND_SET_FEATURE_1S_1S_1S_OP(reg, valptr) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0x1f, 1), \
SPI_MEM_OP_ADDR(1, reg, 1), \
SPI_MEM_OP_NO_DUMMY, \
SPI_MEM_OP_DATA_OUT(1, valptr, 1))
#define SPINAND_GET_FEATURE_OP(reg, valptr) \
#define SPINAND_GET_FEATURE_1S_1S_1S_OP(reg, valptr) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0x0f, 1), \
SPI_MEM_OP_ADDR(1, reg, 1), \
SPI_MEM_OP_NO_DUMMY, \
SPI_MEM_OP_DATA_IN(1, valptr, 1))
#define SPINAND_BLK_ERASE_OP(addr) \
#define SPINAND_BLK_ERASE_1S_1S_0_OP(addr) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0xd8, 1), \
SPI_MEM_OP_ADDR(3, addr, 1), \
SPI_MEM_OP_NO_DUMMY, \
SPI_MEM_OP_NO_DATA)
#define SPINAND_PAGE_READ_OP(addr) \
#define SPINAND_PAGE_READ_1S_1S_0_OP(addr) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0x13, 1), \
SPI_MEM_OP_ADDR(3, addr, 1), \
SPI_MEM_OP_NO_DUMMY, \
SPI_MEM_OP_NO_DATA)
#define SPINAND_PAGE_READ_FROM_CACHE_OP(addr, ndummy, buf, len, ...) \
#define SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(addr, ndummy, buf, len, ...) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0x03, 1), \
SPI_MEM_OP_ADDR(2, addr, 1), \
SPI_MEM_OP_DUMMY(ndummy, 1), \
SPI_MEM_OP_DATA_IN(len, buf, 1), \
SPI_MEM_OP_MAX_FREQ(__VA_ARGS__ + 0))
#define SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(addr, ndummy, buf, len) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0x0b, 1), \
#define SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(addr, ndummy, buf, len) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0x0b, 1), \
SPI_MEM_OP_ADDR(2, addr, 1), \
SPI_MEM_OP_DUMMY(ndummy, 1), \
SPI_MEM_OP_DATA_IN(len, buf, 1))
#define SPINAND_PAGE_READ_FROM_CACHE_OP_3A(addr, ndummy, buf, len) \
#define SPINAND_PAGE_READ_FROM_CACHE_3A_1S_1S_1S_OP(addr, ndummy, buf, len) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0x03, 1), \
SPI_MEM_OP_ADDR(3, addr, 1), \
SPI_MEM_OP_DUMMY(ndummy, 1), \
SPI_MEM_OP_DATA_IN(len, buf, 1))
#define SPINAND_PAGE_READ_FROM_CACHE_FAST_OP_3A(addr, ndummy, buf, len) \
#define SPINAND_PAGE_READ_FROM_CACHE_FAST_3A_1S_1S_1S_OP(addr, ndummy, buf, len) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0x0b, 1), \
SPI_MEM_OP_ADDR(3, addr, 1), \
SPI_MEM_OP_DUMMY(ndummy, 1), \
SPI_MEM_OP_DATA_IN(len, buf, 1))
#define SPINAND_PAGE_READ_FROM_CACHE_DTR_OP(addr, ndummy, buf, len, freq) \
#define SPINAND_PAGE_READ_FROM_CACHE_1S_1D_1D_OP(addr, ndummy, buf, len, freq) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0x0d, 1), \
SPI_MEM_DTR_OP_ADDR(2, addr, 1), \
SPI_MEM_DTR_OP_DUMMY(ndummy, 1), \
SPI_MEM_DTR_OP_DATA_IN(len, buf, 1), \
SPI_MEM_OP_MAX_FREQ(freq))
#define SPINAND_PAGE_READ_FROM_CACHE_X2_OP(addr, ndummy, buf, len) \
#define SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(addr, ndummy, buf, len) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0x3b, 1), \
SPI_MEM_OP_ADDR(2, addr, 1), \
SPI_MEM_OP_DUMMY(ndummy, 1), \
SPI_MEM_OP_DATA_IN(len, buf, 2))
#define SPINAND_PAGE_READ_FROM_CACHE_X2_OP_3A(addr, ndummy, buf, len) \
#define SPINAND_PAGE_READ_FROM_CACHE_3A_1S_1S_2S_OP(addr, ndummy, buf, len) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0x3b, 1), \
SPI_MEM_OP_ADDR(3, addr, 1), \
SPI_MEM_OP_DUMMY(ndummy, 1), \
SPI_MEM_OP_DATA_IN(len, buf, 2))
#define SPINAND_PAGE_READ_FROM_CACHE_X2_DTR_OP(addr, ndummy, buf, len, freq) \
#define SPINAND_PAGE_READ_FROM_CACHE_1S_1D_2D_OP(addr, ndummy, buf, len, freq) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0x3d, 1), \
SPI_MEM_DTR_OP_ADDR(2, addr, 1), \
SPI_MEM_DTR_OP_DUMMY(ndummy, 1), \
SPI_MEM_DTR_OP_DATA_IN(len, buf, 2), \
SPI_MEM_OP_MAX_FREQ(freq))
#define SPINAND_PAGE_READ_FROM_CACHE_X4_OP(addr, ndummy, buf, len) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0x6b, 1), \
SPI_MEM_OP_ADDR(2, addr, 1), \
SPI_MEM_OP_DUMMY(ndummy, 1), \
SPI_MEM_OP_DATA_IN(len, buf, 4))
#define SPINAND_PAGE_READ_FROM_CACHE_X4_OP_3A(addr, ndummy, buf, len) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0x6b, 1), \
SPI_MEM_OP_ADDR(3, addr, 1), \
SPI_MEM_OP_DUMMY(ndummy, 1), \
SPI_MEM_OP_DATA_IN(len, buf, 4))
#define SPINAND_PAGE_READ_FROM_CACHE_X4_DTR_OP(addr, ndummy, buf, len, freq) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0x6d, 1), \
SPI_MEM_DTR_OP_ADDR(2, addr, 1), \
SPI_MEM_DTR_OP_DUMMY(ndummy, 1), \
SPI_MEM_DTR_OP_DATA_IN(len, buf, 4), \
SPI_MEM_OP_MAX_FREQ(freq))
#define SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(addr, ndummy, buf, len) \
#define SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(addr, ndummy, buf, len) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0xbb, 1), \
SPI_MEM_OP_ADDR(2, addr, 2), \
SPI_MEM_OP_DUMMY(ndummy, 2), \
SPI_MEM_OP_DATA_IN(len, buf, 2))
#define SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP_3A(addr, ndummy, buf, len) \
#define SPINAND_PAGE_READ_FROM_CACHE_3A_1S_2S_2S_OP(addr, ndummy, buf, len) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0xbb, 1), \
SPI_MEM_OP_ADDR(3, addr, 2), \
SPI_MEM_OP_DUMMY(ndummy, 2), \
SPI_MEM_OP_DATA_IN(len, buf, 2))
#define SPINAND_PAGE_READ_FROM_CACHE_DUALIO_DTR_OP(addr, ndummy, buf, len, freq) \
#define SPINAND_PAGE_READ_FROM_CACHE_1S_2D_2D_OP(addr, ndummy, buf, len, freq) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0xbd, 1), \
SPI_MEM_DTR_OP_ADDR(2, addr, 2), \
SPI_MEM_DTR_OP_DUMMY(ndummy, 2), \
SPI_MEM_DTR_OP_DATA_IN(len, buf, 2), \
SPI_MEM_OP_MAX_FREQ(freq))
#define SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(addr, ndummy, buf, len) \
#define SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(addr, ndummy, buf, len) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0x6b, 1), \
SPI_MEM_OP_ADDR(2, addr, 1), \
SPI_MEM_OP_DUMMY(ndummy, 1), \
SPI_MEM_OP_DATA_IN(len, buf, 4))
#define SPINAND_PAGE_READ_FROM_CACHE_3A_1S_1S_4S_OP(addr, ndummy, buf, len) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0x6b, 1), \
SPI_MEM_OP_ADDR(3, addr, 1), \
SPI_MEM_OP_DUMMY(ndummy, 1), \
SPI_MEM_OP_DATA_IN(len, buf, 4))
#define SPINAND_PAGE_READ_FROM_CACHE_1S_1D_4D_OP(addr, ndummy, buf, len, freq) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0x6d, 1), \
SPI_MEM_DTR_OP_ADDR(2, addr, 1), \
SPI_MEM_DTR_OP_DUMMY(ndummy, 1), \
SPI_MEM_DTR_OP_DATA_IN(len, buf, 4), \
SPI_MEM_OP_MAX_FREQ(freq))
#define SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(addr, ndummy, buf, len) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0xeb, 1), \
SPI_MEM_OP_ADDR(2, addr, 4), \
SPI_MEM_OP_DUMMY(ndummy, 4), \
SPI_MEM_OP_DATA_IN(len, buf, 4))
#define SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP_3A(addr, ndummy, buf, len) \
#define SPINAND_PAGE_READ_FROM_CACHE_3A_1S_4S_4S_OP(addr, ndummy, buf, len) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0xeb, 1), \
SPI_MEM_OP_ADDR(3, addr, 4), \
SPI_MEM_OP_DUMMY(ndummy, 4), \
SPI_MEM_OP_DATA_IN(len, buf, 4))
#define SPINAND_PAGE_READ_FROM_CACHE_QUADIO_DTR_OP(addr, ndummy, buf, len, freq) \
#define SPINAND_PAGE_READ_FROM_CACHE_1S_4D_4D_OP(addr, ndummy, buf, len, freq) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0xed, 1), \
SPI_MEM_DTR_OP_ADDR(2, addr, 4), \
SPI_MEM_DTR_OP_DUMMY(ndummy, 4), \
SPI_MEM_DTR_OP_DATA_IN(len, buf, 4), \
SPI_MEM_OP_MAX_FREQ(freq))
#define SPINAND_PROG_EXEC_OP(addr) \
#define SPINAND_PAGE_READ_FROM_CACHE_1S_1S_8S_OP(addr, ndummy, buf, len, freq) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0x8b, 1), \
SPI_MEM_OP_ADDR(2, addr, 1), \
SPI_MEM_OP_DUMMY(ndummy, 1), \
SPI_MEM_OP_DATA_IN(len, buf, 8), \
SPI_MEM_OP_MAX_FREQ(freq))
#define SPINAND_PAGE_READ_FROM_CACHE_1S_8S_8S_OP(addr, ndummy, buf, len, freq) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0xcb, 1), \
SPI_MEM_OP_ADDR(2, addr, 8), \
SPI_MEM_OP_DUMMY(ndummy, 8), \
SPI_MEM_OP_DATA_IN(len, buf, 8), \
SPI_MEM_OP_MAX_FREQ(freq))
#define SPINAND_PAGE_READ_FROM_CACHE_1S_1D_8D_OP(addr, ndummy, buf, len, freq) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0x9d, 1), \
SPI_MEM_DTR_OP_ADDR(2, addr, 1), \
SPI_MEM_DTR_OP_DUMMY(ndummy, 1), \
SPI_MEM_DTR_OP_DATA_IN(len, buf, 8), \
SPI_MEM_OP_MAX_FREQ(freq))
#define SPINAND_PROG_EXEC_1S_1S_0_OP(addr) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0x10, 1), \
SPI_MEM_OP_ADDR(3, addr, 1), \
SPI_MEM_OP_NO_DUMMY, \
SPI_MEM_OP_NO_DATA)
#define SPINAND_PROG_LOAD(reset, addr, buf, len) \
#define SPINAND_PROG_LOAD_1S_1S_1S_OP(reset, addr, buf, len) \
SPI_MEM_OP(SPI_MEM_OP_CMD(reset ? 0x02 : 0x84, 1), \
SPI_MEM_OP_ADDR(2, addr, 1), \
SPI_MEM_OP_NO_DUMMY, \
SPI_MEM_OP_DATA_OUT(len, buf, 1))
#define SPINAND_PROG_LOAD_X4(reset, addr, buf, len) \
#define SPINAND_PROG_LOAD_1S_1S_4S_OP(reset, addr, buf, len) \
SPI_MEM_OP(SPI_MEM_OP_CMD(reset ? 0x32 : 0x34, 1), \
SPI_MEM_OP_ADDR(2, addr, 1), \
SPI_MEM_OP_NO_DUMMY, \
SPI_MEM_OP_DATA_OUT(len, buf, 4))
#define SPINAND_PROG_LOAD_1S_1S_8S_OP(addr, buf, len) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0x82, 1), \
SPI_MEM_OP_ADDR(2, addr, 1), \
SPI_MEM_OP_NO_DUMMY, \
SPI_MEM_OP_DATA_OUT(len, buf, 8))
#define SPINAND_PROG_LOAD_1S_8S_8S_OP(reset, addr, buf, len) \
SPI_MEM_OP(SPI_MEM_OP_CMD(reset ? 0xc2 : 0xc4, 1), \
SPI_MEM_OP_ADDR(2, addr, 8), \
SPI_MEM_OP_NO_DUMMY, \
SPI_MEM_OP_DATA_OUT(len, buf, 8))
/**
* Standard SPI NAND flash commands
*/