diff options
111 files changed, 6107 insertions, 1500 deletions
diff --git a/Documentation/devicetree/bindings/mtd/gpmi-nand.yaml b/Documentation/devicetree/bindings/mtd/gpmi-nand.yaml index 28ff8c581837..9d764e654e1d 100644 --- a/Documentation/devicetree/bindings/mtd/gpmi-nand.yaml +++ b/Documentation/devicetree/bindings/mtd/gpmi-nand.yaml @@ -9,9 +9,6 @@ title: Freescale General-Purpose Media Interface (GPMI) binding maintainers: - Han Xu <han.xu@nxp.com> -allOf: - - $ref: "nand-controller.yaml" - description: | The GPMI nand controller provides an interface to control the NAND flash chips. The device tree may optionally contain sub-nodes @@ -58,22 +55,10 @@ properties: clocks: minItems: 1 maxItems: 5 - items: - - description: SoC gpmi io clock - - description: SoC gpmi apb clock - - description: SoC gpmi bch clock - - description: SoC gpmi bch apb clock - - description: SoC per1 bch clock clock-names: minItems: 1 maxItems: 5 - items: - - const: gpmi_io - - const: gpmi_apb - - const: gpmi_bch - - const: gpmi_bch_apb - - const: per1_bch fsl,use-minimum-ecc: type: boolean @@ -107,6 +92,67 @@ required: unevaluatedProperties: false +allOf: + - $ref: "nand-controller.yaml" + + - if: + properties: + compatible: + contains: + enum: + - fsl,imx23-gpmi-nand + - fsl,imx28-gpmi-nand + then: + properties: + clocks: + items: + - description: SoC gpmi io clock + clock-names: + items: + - const: gpmi_io + + - if: + properties: + compatible: + contains: + enum: + - fsl,imx6q-gpmi-nand + - fsl,imx6sx-gpmi-nand + then: + properties: + clocks: + items: + - description: SoC gpmi io clock + - description: SoC gpmi apb clock + - description: SoC gpmi bch clock + - description: SoC gpmi bch apb clock + - description: SoC per1 bch clock + clock-names: + items: + - const: gpmi_io + - const: gpmi_apb + - const: gpmi_bch + - const: gpmi_bch_apb + - const: per1_bch + + - if: + properties: + compatible: + contains: + const: fsl,imx7d-gpmi-nand + then: + properties: + clocks: + items: + - description: SoC gpmi io clock + - description: SoC gpmi bch apb clock + clock-names: + minItems: 2 + maxItems: 2 + items: + - const: gpmi_io + - const: gpmi_bch_apb + examples: - | nand-controller@8000c000 { diff --git a/Documentation/devicetree/bindings/mtd/intel,lgm-nand.yaml b/Documentation/devicetree/bindings/mtd/intel,lgm-nand.yaml new file mode 100644 index 000000000000..30e0c66ab0eb --- /dev/null +++ b/Documentation/devicetree/bindings/mtd/intel,lgm-nand.yaml @@ -0,0 +1,99 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/mtd/intel,lgm-nand.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Intel LGM SoC NAND Controller Device Tree Bindings + +allOf: + - $ref: "nand-controller.yaml" + +maintainers: + - Ramuthevar Vadivel Murugan <vadivel.muruganx.ramuthevar@linux.intel.com> + +properties: + compatible: + const: intel,lgm-nand + + reg: + maxItems: 6 + + reg-names: + items: + - const: ebunand + - const: hsnand + - const: nand_cs0 + - const: nand_cs1 + - const: addr_sel0 + - const: addr_sel1 + + clocks: + maxItems: 1 + + dmas: + maxItems: 2 + + dma-names: + items: + - const: tx + - const: rx + + "#address-cells": + const: 1 + + "#size-cells": + const: 0 + +patternProperties: + "^nand@[a-f0-9]+$": + type: object + properties: + reg: + minimum: 0 + maximum: 7 + + nand-ecc-mode: true + + nand-ecc-algo: + const: hw + + additionalProperties: false + +required: + - compatible + - reg + - reg-names + - clocks + - dmas + - dma-names + - "#address-cells" + - "#size-cells" + +additionalProperties: false + +examples: + - | + nand-controller@e0f00000 { + compatible = "intel,lgm-nand"; + reg = <0xe0f00000 0x100>, + <0xe1000000 0x300>, + <0xe1400000 0x8000>, + <0xe1c00000 0x1000>, + <0x17400000 0x4>, + <0x17c00000 0x4>; + reg-names = "ebunand", "hsnand", "nand_cs0", "nand_cs1", + "addr_sel0", "addr_sel1"; + clocks = <&cgu0 125>; + dmas = <&dma0 8>, <&dma0 9>; + dma-names = "tx", "rx"; + #address-cells = <1>; + #size-cells = <0>; + + nand@0 { + reg = <0>; + nand-ecc-mode = "hw"; + }; + }; + +... diff --git a/Documentation/devicetree/bindings/mtd/nand-controller.yaml b/Documentation/devicetree/bindings/mtd/nand-controller.yaml index b29050fd7470..d0e422f4b3e0 100644 --- a/Documentation/devicetree/bindings/mtd/nand-controller.yaml +++ b/Documentation/devicetree/bindings/mtd/nand-controller.yaml @@ -46,15 +46,6 @@ patternProperties: description: Contains the native Ready/Busy IDs. - nand-ecc-mode: - description: - Desired ECC engine, either hardware (most of the time - embedded in the NAND controller) or software correction - (Linux will handle the calculations). soft_bch is deprecated - and should be replaced by soft and nand-ecc-algo. - $ref: /schemas/types.yaml#/definitions/string - enum: [none, soft, hw, hw_syndrome, hw_oob_first, on-die] - nand-ecc-engine: allOf: - $ref: /schemas/types.yaml#/definitions/phandle @@ -171,7 +162,7 @@ examples: nand@0 { reg = <0>; - nand-ecc-mode = "soft"; + nand-use-soft-ecc-engine; nand-ecc-algo = "bch"; /* controller specific properties */ diff --git a/Documentation/devicetree/bindings/mtd/qcom_nandc.txt b/Documentation/devicetree/bindings/mtd/qcom_nandc.txt index 5c2fba4b30fe..5647913d8837 100644 --- a/Documentation/devicetree/bindings/mtd/qcom_nandc.txt +++ b/Documentation/devicetree/bindings/mtd/qcom_nandc.txt @@ -6,8 +6,12 @@ Required properties: SoC and it uses ADM DMA * "qcom,ipq4019-nand" - for QPIC NAND controller v1.4.0 being used in IPQ4019 SoC and it uses BAM DMA + * "qcom,ipq6018-nand" - for QPIC NAND controller v1.5.0 being used in + IPQ6018 SoC and it uses BAM DMA * "qcom,ipq8074-nand" - for QPIC NAND controller v1.5.0 being used in IPQ8074 SoC and it uses BAM DMA + * "qcom,sdx55-nand" - for QPIC NAND controller v2.0.0 being used in + SDX55 SoC and it uses BAM DMA - reg: MMIO address range - clocks: must contain core clock and always on clock diff --git a/Documentation/devicetree/bindings/mtd/rockchip,nand-controller.yaml b/Documentation/devicetree/bindings/mtd/rockchip,nand-controller.yaml new file mode 100644 index 000000000000..0922536b1811 --- /dev/null +++ b/Documentation/devicetree/bindings/mtd/rockchip,nand-controller.yaml @@ -0,0 +1,161 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/mtd/rockchip,nand-controller.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Rockchip SoCs NAND FLASH Controller (NFC) + +allOf: + - $ref: "nand-controller.yaml#" + +maintainers: + - Heiko Stuebner <heiko@sntech.de> + +properties: + compatible: + oneOf: + - const: rockchip,px30-nfc + - const: rockchip,rk2928-nfc + - const: rockchip,rv1108-nfc + - items: + - const: rockchip,rk3036-nfc + - const: rockchip,rk2928-nfc + - items: + - const: rockchip,rk3308-nfc + - const: rockchip,rv1108-nfc + + reg: + maxItems: 1 + + interrupts: + maxItems: 1 + + clocks: + minItems: 1 + items: + - description: Bus Clock + - description: Module Clock + + clock-names: + minItems: 1 + items: + - const: ahb + - const: nfc + + assigned-clocks: + maxItems: 1 + + assigned-clock-rates: + maxItems: 1 + + power-domains: + maxItems: 1 + +patternProperties: + "^nand@[0-7]$": + type: object + properties: + reg: + minimum: 0 + maximum: 7 + + nand-ecc-mode: + const: hw + + nand-ecc-step-size: + const: 1024 + + nand-ecc-strength: + enum: [16, 24, 40, 60, 70] + description: | + The ECC configurations that can be supported are as follows. + NFC v600 ECC 16, 24, 40, 60 + RK2928, RK3066, RK3188 + + NFC v622 ECC 16, 24, 40, 60 + RK3036, RK3128 + + NFC v800 ECC 16 + RK3308, RV1108 + + NFC v900 ECC 16, 40, 60, 70 + RK3326, PX30 + + nand-bus-width: + const: 8 + + rockchip,boot-blks: + $ref: /schemas/types.yaml#/definitions/uint32 + minimum: 2 + default: 16 + description: + The NFC driver need this information to select ECC + algorithms supported by the boot ROM. + Only used in combination with 'nand-is-boot-medium'. + + rockchip,boot-ecc-strength: + enum: [16, 24, 40, 60, 70] + allOf: + - $ref: /schemas/types.yaml#/definitions/uint32 + description: | + If specified it indicates that a different BCH/ECC setting is + supported by the boot ROM. + NFC v600 ECC 16, 24 + RK2928, RK3066, RK3188 + + NFC v622 ECC 16, 24, 40, 60 + RK3036, RK3128 + + NFC v800 ECC 16 + RK3308, RV1108 + + NFC v900 ECC 16, 70 + RK3326, PX30 + + Only used in combination with 'nand-is-boot-medium'. + +required: + - compatible + - reg + - interrupts + - clocks + - clock-names + +unevaluatedProperties: false + +examples: + - | + #include <dt-bindings/clock/rk3308-cru.h> + #include <dt-bindings/interrupt-controller/arm-gic.h> + nfc: nand-controller@ff4b0000 { + compatible = "rockchip,rk3308-nfc", + "rockchip,rv1108-nfc"; + reg = <0xff4b0000 0x4000>; + interrupts = <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&cru HCLK_NANDC>, <&cru SCLK_NANDC>; + clock-names = "ahb", "nfc"; + assigned-clocks = <&clks SCLK_NANDC>; + assigned-clock-rates = <150000000>; + + pinctrl-0 = <&flash_ale &flash_bus8 &flash_cle &flash_csn0 + &flash_rdn &flash_rdy &flash_wrn>; + pinctrl-names = "default"; + + #address-cells = <1>; + #size-cells = <0>; + + nand@0 { + reg = <0>; + label = "rk-nand"; + nand-bus-width = <8>; + nand-ecc-mode = "hw"; + nand-ecc-step-size = <1024>; + nand-ecc-strength = <16>; + nand-is-boot-medium; + rockchip,boot-blks = <8>; + rockchip,boot-ecc-strength = <16>; + }; + }; + +... diff --git a/Documentation/driver-api/mtd/nand_ecc.rst b/Documentation/driver-api/mtd/nand_ecc.rst index e8d3c53a5056..74347c14a70b 100644 --- a/Documentation/driver-api/mtd/nand_ecc.rst +++ b/Documentation/driver-api/mtd/nand_ecc.rst @@ -5,7 +5,7 @@ NAND Error-correction Code Introduction ============ -Having looked at the linux mtd/nand driver and more specific at nand_ecc.c +Having looked at the linux mtd/nand Hamming software ECC engine driver I felt there was room for optimisation. I bashed the code for a few hours performing tricks like table lookup removing superfluous code etc. After that the speed was increased by 35-40%. diff --git a/Documentation/driver-api/mtdnand.rst b/Documentation/driver-api/mtdnand.rst index 0bf8d6ec3f54..ce77e024c4f1 100644 --- a/Documentation/driver-api/mtdnand.rst +++ b/Documentation/driver-api/mtdnand.rst @@ -972,9 +972,6 @@ hints" for an explanation. .. kernel-doc:: drivers/mtd/nand/raw/nand_base.c :export: -.. kernel-doc:: drivers/mtd/nand/raw/nand_ecc.c - :export: - Internal Functions Provided =========================== diff --git a/arch/arm/mach-s3c/common-smdk-s3c24xx.c b/arch/arm/mach-s3c/common-smdk-s3c24xx.c index f860d8bcba0e..6d124bbd384c 100644 --- a/arch/arm/mach-s3c/common-smdk-s3c24xx.c +++ b/arch/arm/mach-s3c/common-smdk-s3c24xx.c @@ -20,7 +20,7 @@ #include <linux/mtd/mtd.h> #include <linux/mtd/rawnand.h> -#include <linux/mtd/nand_ecc.h> +#include <linux/mtd/nand-ecc-sw-hamming.h> #include <linux/mtd/partitions.h> #include <linux/io.h> diff --git a/arch/arm/mach-s3c/mach-anubis.c b/arch/arm/mach-s3c/mach-anubis.c index 90e3fd98a3ac..04147cc0adcc 100644 --- a/arch/arm/mach-s3c/mach-anubis.c +++ b/arch/arm/mach-s3c/mach-anubis.c @@ -34,7 +34,7 @@ #include <linux/mtd/mtd.h> #include <linux/mtd/rawnand.h> -#include <linux/mtd/nand_ecc.h> +#include <linux/mtd/nand-ecc-sw-hamming.h> #include <linux/mtd/partitions.h> #include <net/ax88796.h> diff --git a/arch/arm/mach-s3c/mach-at2440evb.c b/arch/arm/mach-s3c/mach-at2440evb.c index 5fa49d4e2650..c6a5a51d84aa 100644 --- a/arch/arm/mach-s3c/mach-at2440evb.c +++ b/arch/arm/mach-s3c/mach-at2440evb.c @@ -35,7 +35,7 @@ #include <linux/mtd/mtd.h> #include <linux/mtd/rawnand.h> -#include <linux/mtd/nand_ecc.h> +#include <linux/mtd/nand-ecc-sw-hamming.h> #include <linux/mtd/partitions.h> #include "devs.h" diff --git a/arch/arm/mach-s3c/mach-bast.c b/arch/arm/mach-s3c/mach-bast.c index 328f5d9ae9f9..27e8d5950228 100644 --- a/arch/arm/mach-s3c/mach-bast.c +++ b/arch/arm/mach-s3c/mach-bast.c @@ -24,7 +24,7 @@ #include <linux/mtd/mtd.h> #include <linux/mtd/rawnand.h> -#include <linux/mtd/nand_ecc.h> +#include <linux/mtd/nand-ecc-sw-hamming.h> #include <linux/mtd/partitions.h> #include <linux/platform_data/asoc-s3c24xx_simtec.h> diff --git a/arch/arm/mach-s3c/mach-gta02.c b/arch/arm/mach-s3c/mach-gta02.c index 3c75c7d112ea..aec8b451c016 100644 --- a/arch/arm/mach-s3c/mach-gta02.c +++ b/arch/arm/mach-s3c/mach-gta02.c @@ -37,7 +37,7 @@ #include <linux/mtd/mtd.h> #include <linux/mtd/rawnand.h> -#include <linux/mtd/nand_ecc.h> +#include <linux/mtd/nand-ecc-sw-hamming.h> #include <linux/mtd/partitions.h> #include <linux/mtd/physmap.h> diff --git a/arch/arm/mach-s3c/mach-jive.c b/arch/arm/mach-s3c/mach-jive.c index 2a29c3eca559..0785638a9069 100644 --- a/arch/arm/mach-s3c/mach-jive.c +++ b/arch/arm/mach-s3c/mach-jive.c @@ -40,7 +40,7 @@ #include <linux/mtd/mtd.h> #include <linux/mtd/rawnand.h> -#include <linux/mtd/nand_ecc.h> +#include <linux/mtd/nand-ecc-sw-hamming.h> #include <linux/mtd/partitions.h> #include "gpio-cfg.h" diff --git a/arch/arm/mach-s3c/mach-mini2440.c b/arch/arm/mach-s3c/mach-mini2440.c index dc22ab839b95..4100905dfbd0 100644 --- a/arch/arm/mach-s3c/mach-mini2440.c +++ b/arch/arm/mach-s3c/mach-mini2440.c @@ -44,7 +44,7 @@ #include <linux/mtd/mtd.h> #include <linux/mtd/rawnand.h> -#include <linux/mtd/nand_ecc.h> +#include <linux/mtd/nand-ecc-sw-hamming.h> #include <linux/mtd/partitions.h> #include "gpio-cfg.h" diff --git a/arch/arm/mach-s3c/mach-osiris.c b/arch/arm/mach-s3c/mach-osiris.c index 81744ca67d1d..3aefb9d22340 100644 --- a/arch/arm/mach-s3c/mach-osiris.c +++ b/arch/arm/mach-s3c/mach-osiris.c @@ -33,7 +33,7 @@ #include <linux/mtd/mtd.h> #include <linux/mtd/rawnand.h> -#include <linux/mtd/nand_ecc.h> +#include <linux/mtd/nand-ecc-sw-hamming.h> #include <linux/mtd/partitions.h> #include "cpu.h" diff --git a/arch/arm/mach-s3c/mach-qt2410.c b/arch/arm/mach-s3c/mach-qt2410.c index 151e8e373d40..f88b961798fd 100644 --- a/arch/arm/mach-s3c/mach-qt2410.c +++ b/arch/arm/mach-s3c/mach-qt2410.c @@ -21,7 +21,7 @@ #include <linux/io.h> #include <linux/mtd/mtd.h> #include <linux/mtd/rawnand.h> -#include <linux/mtd/nand_ecc.h> +#include <linux/mtd/nand-ecc-sw-hamming.h> #include <linux/mtd/partitions.h> #include <asm/mach/arch.h> diff --git a/arch/arm/mach-s3c/mach-rx3715.c b/arch/arm/mach-s3c/mach-rx3715.c index a03662a47b38..9fd2d9dc3689 100644 --- a/arch/arm/mach-s3c/mach-rx3715.c +++ b/arch/arm/mach-s3c/mach-rx3715.c @@ -22,7 +22,7 @@ #include <linux/io.h> #include <linux/mtd/mtd.h> #include <linux/mtd/rawnand.h> -#include <linux/mtd/nand_ecc.h> +#include <linux/mtd/nand-ecc-sw-hamming.h> #include <linux/mtd/partitions.h> #include <asm/mach/arch.h> diff --git a/arch/arm/mach-s3c/mach-vstms.c b/arch/arm/mach-s3c/mach-vstms.c index 05f19f5ffabb..ec024af7b0ce 100644 --- a/arch/arm/mach-s3c/mach-vstms.c +++ b/arch/arm/mach-s3c/mach-vstms.c @@ -16,7 +16,7 @@ #include <linux/io.h> #include <linux/mtd/mtd.h> #include <linux/mtd/rawnand.h> -#include <linux/mtd/nand_ecc.h> +#include <linux/mtd/nand-ecc-sw-hamming.h> #include <linux/mtd/partitions.h> #include <linux/memblock.h> diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig index 6ddab796216d..8bab6f8718a9 100644 --- a/drivers/mtd/Kconfig +++ b/drivers/mtd/Kconfig @@ -152,6 +152,7 @@ config SM_FTL tristate "SmartMedia/xD new translation layer" depends on BLOCK select MTD_BLKDEVS + select MTD_NAND_CORE select MTD_NAND_ECC_SW_HAMMING help This enables EXPERIMENTAL R/W support for SmartMedia/xD diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c index a030792115bc..5b0ae5ddad74 100644 --- a/drivers/mtd/devices/docg3.c +++ b/drivers/mtd/devices/docg3.c @@ -816,7 +816,7 @@ static void doc_read_page_finish(struct docg3 *docg3) /** * calc_block_sector - Calculate blocks, pages and ofs. - + * * @from: offset in flash * @block0: first plane block index calculated * @block1: second plane block index calculated @@ -1783,10 +1783,9 @@ static int __init doc_set_driver_info(int chip_id, struct mtd_info *mtd) /** * doc_probe_device - Check if a device is available - * @base: the io space where the device is probed + * @cascade: the cascade of chips this devices will belong to * @floor: the floor of the probed device * @dev: the device - * @cascade: the cascade of chips this devices will belong to * * Checks whether a device at the specified IO range, and floor is available. * diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c index 087b5e86d1bf..cfd170946ba4 100644 --- a/drivers/mtd/devices/phram.c +++ b/drivers/mtd/devices/phram.c @@ -1,19 +1,19 @@ // SPDX-License-Identifier: GPL-2.0-only -/** +/* * Copyright (c) ???? Jochen Schäuble <psionic@psionic.de> * Copyright (c) 2003-2004 Joern Engel <joern@wh.fh-wedel.de> * * Usage: * * one commend line parameter per device, each in the form: - * phram=<name>,<start>,<len> + * phram=<name>,<start>,<len>[,<erasesize>] * <name> may be up to 63 characters. - * <start> and <len> can be octal, decimal or hexadecimal. If followed + * <start>, <len>, and <erasesize> can be octal, decimal or hexadecimal. If followed * by "ki", "Mi" or "Gi", the numbers will be interpreted as kilo, mega or - * gigabytes. + * gigabytes. <erasesize> is optional and defaults to PAGE_SIZE. * * Example: - * phram=swap,64Mi,128Mi phram=test,900Mi,1Mi + * phram=swap,64Mi,128Mi phram=test,900Mi,1Mi,64Ki */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt @@ -26,6 +26,7 @@ #include <linux/moduleparam.h> #include <linux/slab.h> #include <linux/mtd/mtd.h> +#include <asm/div64.h> struct phram_mtd_list { struct mtd_info mtd; @@ -88,7 +89,7 @@ static void unregister_devices(void) } } -static int register_device(char *name, phys_addr_t start, size_t len) +static int register_device(char *name, phys_addr_t start, size_t len, uint32_t erasesize) { struct phram_mtd_list *new; int ret = -ENOMEM; @@ -115,7 +116,7 @@ static int register_device(char *name, phys_addr_t start, size_t len) new->mtd._write = phram_write; new->mtd.owner = THIS_MODULE; new->mtd.type = MTD_RAM; - new->mtd.erasesize = PAGE_SIZE; + new->mtd.erasesize = erasesize; new->mtd.writesize = 1; ret = -EAGAIN; @@ -204,22 +205,23 @@ static inline void kill_final_newline(char *str) static int phram_init_called; /* * This shall contain the module parameter if any. It is of the form: - * - phram=<device>,<address>,<size> for module case - * - phram.phram=<device>,<address>,<size> for built-in case - * We leave 64 bytes for the device name, 20 for the address and 20 for the - * size. - * Example: phram.phram=rootfs,0xa0000000,512Mi + * - phram=<device>,<address>,<size>[,<erasesize>] for module case + * - phram.phram=<device>,<address>,<size>[,<erasesize>] for built-in case + * We leave 64 bytes for the device name, 20 for the address , 20 for the + * size and 20 for the erasesize. + * Example: phram.phram=rootfs,0xa0000000,512Mi,65536 */ -static char phram_paramline[64 + 20 + 20]; +static char phram_paramline[64 + 20 + 20 + 20]; #endif static int phram_setup(const char *val) { - char buf[64 + 20 + 20], *str = buf; - char *token[3]; + char buf[64 + 20 + 20 + 20], *str = buf; + char *token[4]; char *name; uint64_t start; uint64_t len; + uint64_t erasesize = PAGE_SIZE; int i, ret; if (strnlen(val, sizeof(buf)) >= sizeof(buf)) @@ -228,7 +230,7 @@ static int phram_setup(const char *val) strcpy(str, val); kill_final_newline(str); - for (i = 0; i < 3; i++) + for (i = 0; i < 4; i++) token[i] = strsep(&str, ","); if (str) @@ -253,11 +255,25 @@ static int phram_setup(const char *val) goto error; } - ret = register_device(name, start, len); + if (token[3]) { + ret = parse_num64(&erasesize, token[3]); + if (ret) { + parse_err("illegal erasesize\n"); + goto error; + } + } + + if (len == 0 || erasesize == 0 || erasesize > len + || erasesize > UINT_MAX || do_div(len, (uint32_t)erasesize) != 0) { + parse_err("illegal erasesize or len\n"); + goto error; + } + + ret = register_device(name, start, len, (uint32_t)erasesize); if (ret) goto error; - pr_info("%s device: %#llx at %#llx\n", name, len, start); + pr_info("%s device: %#llx at %#llx for erasesize %#llx\n", name, len, start, erasesize); return 0; error: @@ -298,7 +314,7 @@ static int phram_param_call(const char *val, const struct kernel_param *kp) } module_param_call(phram, phram_param_call, NULL, NULL, 0200); -MODULE_PARM_DESC(phram, "Memory region to map. \"phram=<name>,<start>,<length>\""); +MODULE_PARM_DESC(phram, "Memory region to map. \"phram=<name>,<start>,<length>[,<erasesize>]\""); static int __init init_phram(void) diff --git a/drivers/mtd/devices/powernv_flash.c b/drivers/mtd/devices/powernv_flash.c index 0b757d9ba2f6..6950a8764815 100644 --- a/drivers/mtd/devices/powernv_flash.c +++ b/drivers/mtd/devices/powernv_flash.c @@ -126,6 +126,7 @@ out: } /** + * powernv_flash_read * @mtd: the device * @from: the offset to read from * @len: the number of bytes to read @@ -142,6 +143,7 @@ static int powernv_flash_read(struct mtd_info *mtd, loff_t from, size_t len, } /** + * powernv_flash_write * @mtd: the device * @to: the offset to write to * @len: the number of bytes to write @@ -158,6 +160,7 @@ static int powernv_flash_write(struct mtd_info *mtd, loff_t to, size_t len, } /** + * powernv_flash_erase * @mtd: the device * @erase: the erase info * Returns 0 if erase successful or -ERRNO if an error occurred @@ -176,7 +179,7 @@ static int powernv_flash_erase(struct mtd_info *mtd, struct erase_info *erase) /** * powernv_flash_set_driver_info - Fill the mtd_info structure and docg3 - * structure @pdev: The platform device + * @dev: The device structure * @mtd: The structure to fill */ static int powernv_flash_set_driver_info(struct device *dev, diff --git a/drivers/mtd/maps/physmap-bt1-rom.c b/drivers/mtd/maps/physmap-bt1-rom.c index 27cfe1c63a2e..a35450002284 100644 --- a/drivers/mtd/maps/physmap-bt1-rom.c +++ b/drivers/mtd/maps/physmap-bt1-rom.c @@ -31,12 +31,12 @@ static map_word __xipram bt1_rom_map_read(struct map_info *map, unsigned long ofs) { void __iomem *src = map->virt + ofs; - unsigned long shift; + unsigned int shift; map_word ret; u32 data; /* Read data within offset dword. */ - shift = (unsigned long)src & 0x3; + shift = (uintptr_t)src & 0x3; data = readl_relaxed(src - shift); if (!shift) { ret.x[0] = data; @@ -60,7 +60,7 @@ static void __xipram bt1_rom_map_copy_from(struct map_info *map, ssize_t len) { void __iomem *src = map->virt + from; - ssize_t shift, chunk; + unsigned int shift, chunk; u32 data; if (len <= 0 || from >= map->size) @@ -75,7 +75,7 @@ static void __xipram bt1_rom_map_copy_from(struct map_info *map, * up into the next three stages: unaligned head, aligned body, * unaligned tail. */ - shift = (ssize_t)src & 0x3; + shift = (uintptr_t)src & 0x3; if (shift) { chunk = min_t(ssize_t, 4 - shift, len); data = readl_relaxed(src - shift); diff --git a/drivers/mtd/maps/plat-ram.c b/drivers/mtd/maps/plat-ram.c index 311742c78155..0bec7c791d17 100644 --- a/drivers/mtd/maps/plat-ram.c +++ b/drivers/mtd/maps/plat-ram.c @@ -177,8 +177,12 @@ static int platram_probe(struct platform_device *pdev) err = mtd_device_parse_register(info->mtd, pdata->probes, NULL, pdata->partitions, pdata->nr_partitions); - if (!err) - dev_info(&pdev->dev, "registered mtd device\n"); + if (err) { + dev_err(&pdev->dev, "failed to register mtd device\n"); + goto exit_free; + } + + dev_info(&pdev->dev, "registered mtd device\n"); if (pdata->nr_partitions) { /* add the whole device. */ @@ -186,10 +190,11 @@ static int platram_probe(struct platform_device *pdev) if (err) { dev_err(&pdev->dev, "failed to register the entire device\n"); + goto exit_free; } } - return err; + return 0; exit_free: platram_remove(pdev); diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c index b40f46a43fc6..323035d4f2d0 100644 --- a/drivers/mtd/mtdchar.c +++ b/drivers/mtd/mtdchar.c @@ -881,7 +881,6 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg) if (copy_from_user(&offs, argp, sizeof(loff_t))) return -EFAULT; return mtd_block_isbad(mtd, offs); - break; } case MEMSETBADBLOCK: @@ -891,7 +890,6 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg) if (copy_from_user(&offs, argp, sizeof(loff_t))) return -EFAULT; return mtd_block_markbad(mtd, offs); - break; } case OTPSELECT: diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c index e9e163ae9d86..2d6423d89a17 100644 --- a/drivers/mtd/mtdcore.c +++ b/drivers/mtd/mtdcore.c @@ -993,6 +993,8 @@ int __get_mtd_device(struct mtd_info *mtd) } } + master->usecount++; + while (mtd->parent) { mtd->usecount++; mtd = mtd->parent; @@ -1059,6 +1061,8 @@ void __put_mtd_device(struct mtd_info *mtd) mtd = mtd->parent; } + master->usecount--; + if (master->_put_device) master->_put_device(master); @@ -1578,7 +1582,7 @@ static int mtd_ooblayout_find_region(struct mtd_info *mtd, int byte, * ECC byte * @mtd: mtd info structure * @eccbyte: the byte we are searching for - * @sectionp: pointer where the section id will be stored + * @section: pointer where the section id will be stored * @oobregion: OOB region information * * Works like mtd_ooblayout_find_region() except it searches for a specific ECC diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c index c3575b686f79..12ca4f19cb14 100644 --- a/drivers/mtd/mtdpart.c +++ b/drivers/mtd/mtdpart.c @@ -292,7 +292,7 @@ EXPORT_SYMBOL_GPL(mtd_add_partition); /** * __mtd_del_partition - delete MTD partition * - * @priv: MTD structure to be deleted + * @mtd: MTD structure to be deleted * * This function must be called with the partitions mutex locked. */ diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig index 4a9aed4f0104..b40455234cbd 100644 --- a/drivers/mtd/nand/Kconfig +++ b/drivers/mtd/nand/Kconfig @@ -13,7 +13,38 @@ menu "ECC engine support" config MTD_NAND_ECC bool - depends on MTD_NAND_CORE + select MTD_NAND_CORE + +config MTD_NAND_ECC_SW_HAMMING + bool "Software Hamming ECC engine" + default y if MTD_RAW_NAND + select MTD_NAND_ECC + help + This enables support for software Hamming error + correction. This correction can correct up to 1 bit error + per chunk and detect up to 2 bit errors. While it used to be + widely used with old parts, newer NAND chips usually require + more strength correction and in this case BCH or RS will be + preferred. + +config MTD_NAND_ECC_SW_HAMMING_SMC + bool "NAND ECC Smart Media byte order" + depends on MTD_NAND_ECC_SW_HAMMING + default n + help + Software ECC according to the Smart Media Specification. + The original Linux implementation had byte 0 and 1 swapped. + +config MTD_NAND_ECC_SW_BCH + bool "Software BCH ECC engine" + select BCH + select MTD_NAND_ECC + default n + help + This enables support for software BCH error correction. Binary BCH + codes are more powerful and cpu intensive than traditional Hamming + ECC codes. They are used with NAND devices requiring more than 1 bit + of error correction. endmenu diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile index 981372953b56..1c0b46960eb1 100644 --- a/drivers/mtd/nand/Makefile +++ b/drivers/mtd/nand/Makefile @@ -8,3 +8,5 @@ obj-y += raw/ obj-y += spi/ nandcore-$(CONFIG_MTD_NAND_ECC) += ecc.o +nandcore-$(CONFIG_MTD_NAND_ECC_SW_HAMMING) += ecc-sw-hamming.o +nandcore-$(CONFIG_MTD_NAND_ECC_SW_BCH) += ecc-sw-bch.o diff --git a/drivers/mtd/nand/core.c b/drivers/mtd/nand/core.c index b6de955ac8bf..5e13a03d2b32 100644 --- a/drivers/mtd/nand/core.c +++ b/drivers/mtd/nand/core.c @@ -208,6 +208,130 @@ int nanddev_mtd_max_bad_blocks(struct mtd_info *mtd, loff_t offs, size_t len) EXPORT_SYMBOL_GPL(nanddev_mtd_max_bad_blocks); /** + * nanddev_get_ecc_engine() - Find and get a suitable ECC engine + * @nand: NAND device + */ +static int nanddev_get_ecc_engine(struct nand_device *nand) +{ + int engine_type; + + /* Read the user desires in terms of ECC engine/configuration */ + of_get_nand_ecc_user_config(nand); + + engine_type = nand->ecc.user_conf.engine_type; + if (engine_type == NAND_ECC_ENGINE_TYPE_INVALID) + engine_type = nand->ecc.defaults.engine_type; + + switch (engine_type) { + case NAND_ECC_ENGINE_TYPE_NONE: + return 0; + case NAND_ECC_ENGINE_TYPE_SOFT: + nand->ecc.engine = nand_ecc_get_sw_engine(nand); + break; + case NAND_ECC_ENGINE_TYPE_ON_DIE: + nand->ecc.engine = nand_ecc_get_on_die_hw_engine(nand); + break; + case NAND_ECC_ENGINE_TYPE_ON_HOST: + pr_err("On-host hardware ECC engines not supported yet\n"); + break; + default: + pr_err("Missing ECC engine type\n"); + } + + if (!nand->ecc.engine) + return -EINVAL; + + return 0; +} + +/** + * nanddev_put_ecc_engine() - Dettach and put the in-use ECC engine + * @nand: NAND device + */ +static int nanddev_put_ecc_engine(struct nand_device *nand) +{ + switch (nand->ecc.ctx.conf.engine_type) { + case NAND_ECC_ENGINE_TYPE_ON_HOST: + pr_err("On-host hardware ECC engines not supported yet\n"); + break; + case NAND_ECC_ENGINE_TYPE_NONE: + case NAND_ECC_ENGINE_TYPE_SOFT: + case NAND_ECC_ENGINE_TYPE_ON_DIE: + default: + break; + } + + return 0; +} + +/** + * nanddev_find_ecc_configuration() - Find a suitable ECC configuration + * @nand: NAND device + */ +static int nanddev_find_ecc_configuration(struct nand_device *nand) +{ + int ret; + + if (!nand->ecc.engine) + return -ENOTSUPP; + + ret = nand_ecc_init_ctx(nand); + if (ret) + return ret; + + if (!nand_ecc_is_strong_enough(nand)) + pr_warn("WARNING: %s: the ECC used on your system is too weak compared to the one required by the NAND chip\n", + nand->mtd.name); + + return 0; +} + +/** + * nanddev_ecc_engine_init() - Initialize an ECC engine for the chip + * @nand: NAND device + */ +int nanddev_ecc_engine_init(struct nand_device *nand) +{ + int ret; + + /* Look for the ECC engine to use */ + ret = nanddev_get_ecc_engine(nand); + if (ret) { + pr_err("No ECC engine found\n"); + return ret; + } + + /* No ECC engine requested */ + if (!nand->ecc.engine) + return 0; + + /* Configure the engine: balance user input and chip requirements */ + ret = nanddev_find_ecc_configuration(nand); + if (ret) { + pr_err("No suitable ECC configuration\n"); + nanddev_put_ecc_engine(nand); + + return ret; + } + + return 0; +} +EXPORT_SYMBOL_GPL(nanddev_ecc_engine_init); + +/** + * nanddev_ecc_engine_cleanup() - Cleanup ECC engine initializations + * @nand: NAND device + */ +void nanddev_ecc_engine_cleanup(struct nand_device *nand) +{ + if (nand->ecc.engine) + nand_ecc_cleanup_ctx(nand); + + nanddev_put_ecc_engine(nand); +} +EXPORT_SYMBOL_GPL(nanddev_ecc_engine_cleanup); + +/** * nanddev_init() - Initialize a NAND device * @nand: NAND device * @ops: NAND device operations diff --git a/drivers/mtd/nand/ecc-sw-bch.c b/drivers/mtd/nand/ecc-sw-bch.c new file mode 100644 index 000000000000..0a0ac11d5725 --- /dev/null +++ b/drivers/mtd/nand/ecc-sw-bch.c @@ -0,0 +1,406 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * This file provides ECC correction for more than 1 bit per block of data, + * using binary BCH codes. It relies on the generic BCH library lib/bch.c. + * + * Copyright © 2011 Ivan Djelic <ivan.djelic@parrot.com> + */ + +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/bitops.h> +#include <linux/mtd/nand.h> +#include <linux/mtd/nand-ecc-sw-bch.h> + +/** + * nand_ecc_sw_bch_calculate - Calculate the ECC corresponding to a data block + * @nand: NAND device + * @buf: Input buffer with raw data + * @code: Output buffer with ECC + */ +int nand_ecc_sw_bch_calculate(struct nand_device *nand, + const unsigned char *buf, unsigned char *code) +{ + struct nand_ecc_sw_bch_conf *engine_conf = nand->ecc.ctx.priv; + unsigned int i; + + memset(code, 0, engine_conf->code_size); + bch_encode(engine_conf->bch, buf, nand->ecc.ctx.conf.step_size, code); + + /* apply mask so that an erased page is a valid codeword */ + for (i = 0; i < engine_conf->code_size; i++) + code[i] ^= engine_conf->eccmask[i]; + + return 0; +} +EXPORT_SYMBOL(nand_ecc_sw_bch_calculate); + +/** + * nand_ecc_sw_bch_correct - Detect, correct and report bit error(s) + * @nand: NAND device + * @buf: Raw data read from the chip + * @read_ecc: ECC bytes from the chip + * @calc_ecc: ECC calculated from the raw data + * + * Detect and correct bit errors for a data block. + */ +int nand_ecc_sw_bch_correct(struct nand_device *nand, unsigned char *buf, + unsigned char *read_ecc, unsigned char *calc_ecc) +{ + struct nand_ecc_sw_bch_conf *engine_conf = nand->ecc.ctx.priv; + unsigned int step_size = nand->ecc.ctx.conf.step_size; + unsigned int *errloc = engine_conf->errloc; + int i, count; + + count = bch_decode(engine_conf->bch, NULL, step_size, read_ecc, + calc_ecc, NULL, errloc); + if (count > 0) { + for (i = 0; i < count; i++) { + if (errloc[i] < (step_size * 8)) + /* The error is in the data area: correct it */ + buf[errloc[i] >> 3] ^= (1 << (errloc[i] & 7)); + + /* Otherwise the error is in the ECC area: nothing to do */ + pr_debug("%s: corrected bitflip %u\n", __func__, + errloc[i]); + } + } else if (count < 0) { + pr_err("ECC unrecoverable error\n"); + count = -EBADMSG; + } + + return count; +} +EXPORT_SYMBOL(nand_ecc_sw_bch_correct); + +/** + * nand_ecc_sw_bch_cleanup - Cleanup software BCH ECC resources + * @nand: NAND device + */ +static void nand_ecc_sw_bch_cleanup(struct nand_device *nand) +{ + struct nand_ecc_sw_bch_conf *engine_conf = nand->ecc.ctx.priv; + + bch_free(engine_conf->bch); + kfree(engine_conf->errloc); + kfree(engine_conf->eccmask); +} + +/** + * nand_ecc_sw_bch_init - Initialize software BCH ECC engine + * @nand: NAND device + * + * Returns: a pointer to a new NAND BCH control structure, or NULL upon failure + * + * Initialize NAND BCH error correction. @nand.ecc parameters 'step_size' and + * 'bytes' are used to compute the following BCH parameters: + * m, the Galois field order + * t, the error correction capability + * 'bytes' should be equal to the number of bytes required to store m * t + * bits, where m is such that 2^m - 1 > step_size * 8. + * + * Example: to configure 4 bit correction per 512 bytes, you should pass + * step_size = 512 (thus, m = 13 is the smallest integer such that 2^m - 1 > 512 * 8) + * bytes = 7 (7 bytes are required to store m * t = 13 * 4 = 52 bits) + */ +static int nand_ecc_sw_bch_init(struct nand_device *nand) +{ + struct nand_ecc_sw_bch_conf *engine_conf = nand->ecc.ctx.priv; + unsigned int eccsize = nand->ecc.ctx.conf.step_size; + unsigned int eccbytes = engine_conf->code_size; + unsigned int m, t, i; + unsigned char *erased_page; + int ret; + + m = fls(1 + (8 * eccsize)); + t = (eccbytes * 8) / m; + + engine_conf->bch = bch_init(m, t, 0, false); + if (!engine_conf->bch) + return -EINVAL; + + engine_conf->eccmask = kzalloc(eccbytes, GFP_KERNEL); + engine_conf->errloc = kmalloc_array(t, sizeof(*engine_conf->errloc), + GFP_KERNEL); + if (!engine_conf->eccmask || !engine_conf->errloc) { + ret = -ENOMEM; + goto cleanup; + } + + /* Compute and store the inverted ECC of an erased step */ + erased_page = kmalloc(eccsize, GFP_KERNEL); + if (!erased_page) { + ret = -ENOMEM; + goto cleanup; + } + + memset(erased_page, 0xff, eccsize); + bch_encode(engine_conf->bch, erased_page, eccsize, + engine_conf->eccmask); + kfree(erased_page); + + for (i = 0; i < eccbytes; i++) + engine_conf->eccmask[i] ^= 0xff; + + /* Verify that the number of code bytes has the expected value */ + if (engine_conf->bch->ecc_bytes != eccbytes) { + pr_err("Invalid number of ECC bytes: %u, expected: %u\n", + eccbytes, engine_conf->bch->ecc_bytes); + ret = -EINVAL; + goto cleanup; + } + + /* Sanity checks */ + if (8 * (eccsize + eccbytes) >= (1 << m)) { + pr_err("ECC step size is too large (%u)\n", eccsize); + ret = -EINVAL; + goto cleanup; + } + + return 0; + +cleanup: + nand_ecc_sw_bch_cleanup(nand); + + return ret; +} + +int nand_ecc_sw_bch_init_ctx(struct nand_device *nand) +{ + struct nand_ecc_props *conf = &nand->ecc.ctx.conf; + struct mtd_info *mtd = nanddev_to_mtd(nand); + struct nand_ecc_sw_bch_conf *engine_conf; + unsigned int code_size = 0, nsteps; + int ret; + + /* Only large page NAND chips may use BCH */ + if (mtd->oobsize < 64) { + pr_err("BCH cannot be used with small page NAND chips\n"); + return -EINVAL; + } + + if (!mtd->ooblayout) + mtd_set_ooblayout(mtd, nand_get_large_page_ooblayout()); + + conf->engine_type = NAND_ECC_ENGINE_TYPE_SOFT; + conf->algo = NAND_ECC_ALGO_BCH; + conf->step_size = nand->ecc.user_conf.step_size; + conf->strength = nand->ecc.user_conf.strength; + + /* + * Board driver should supply ECC size and ECC strength + * values to select how many bits are correctable. + * Otherwise, default to 512 bytes for large page devices and 256 for + * small page devices. + */ + if (!conf->step_size) { + if (mtd->oobsize >= 64) + conf->step_size = 512; + else + conf->step_size = 256; + + conf->strength = 4; + } + + nsteps = mtd->writesize / conf->step_size; + + /* Maximize */ + if (nand->ecc.user_conf.flags & NAND_ECC_MAXIMIZE_STRENGTH) { + conf->step_size = 1024; + nsteps = mtd->writesize / conf->step_size; + /* Reserve 2 bytes for the BBM */ + code_size = (mtd->oobsize - 2) / nsteps; + conf->strength = code_size * 8 / fls(8 * conf->step_size); + } + + if (!code_size) + code_size = DIV_ROUND_UP(conf->strength * + fls(8 * conf->step_size), 8); + + if (!conf->strength) + conf->strength = (code_size * 8) / fls(8 * conf->step_size); + + if (!code_size && !conf->strength) { + pr_err("Missing ECC parameters\n"); + return -EINVAL; + } + + engine_conf = kzalloc(sizeof(*engine_conf), GFP_KERNEL); + if (!engine_conf) + return -ENOMEM; + + ret = nand_ecc_init_req_tweaking(&engine_conf->req_ctx, nand); + if (ret) + goto free_engine_conf; + + engine_conf->code_size = code_size; + engine_conf->nsteps = nsteps; + engine_conf->calc_buf = kzalloc(mtd->oobsize, GFP_KERNEL); + engine_conf->code_buf = kzalloc(mtd->oobsize, GFP_KERNEL); + if (!engine_conf->calc_buf || !engine_conf->code_buf) { + ret = -ENOMEM; + goto free_bufs; + } + + nand->ecc.ctx.priv = engine_conf; + nand->ecc.ctx.total = nsteps * code_size; + + ret = nand_ecc_sw_bch_init(nand); + if (ret) + goto free_bufs; + + /* Verify the layout validity */ + if (mtd_ooblayout_count_eccbytes(mtd) != + engine_conf->nsteps * engine_conf->code_size) { + pr_err("Invalid ECC layout\n"); + ret = -EINVAL; + goto cleanup_bch_ctx; + } + + return 0; + +cleanup_bch_ctx: + nand_ecc_sw_bch_cleanup(nand); +free_bufs: + nand_ecc_cleanup_req_tweaking(&engine_conf->req_ctx); + kfree(engine_conf->calc_buf); + kfree(engine_conf->code_buf); +free_engine_conf: + kfree(engine_conf); + + return ret; +} +EXPORT_SYMBOL(nand_ecc_sw_bch_init_ctx); + +void nand_ecc_sw_bch_cleanup_ctx(struct nand_device *nand) +{ + struct nand_ecc_sw_bch_conf *engine_conf = nand->ecc.ctx.priv; + + if (engine_conf) { + nand_ecc_sw_bch_cleanup(nand); + nand_ecc_cleanup_req_tweaking(&engine_conf->req_ctx); + kfree(engine_conf->calc_buf); + kfree(engine_conf->code_buf); + kfree(engine_conf); + } +} +EXPORT_SYMBOL(nand_ecc_sw_bch_cleanup_ctx); + +static int nand_ecc_sw_bch_prepare_io_req(struct nand_device *nand, + struct nand_page_io_req *req) +{ + struct nand_ecc_sw_bch_conf *engine_conf = nand->ecc.ctx.priv; + struct mtd_info *mtd = nanddev_to_mtd(nand); + int eccsize = nand->ecc.ctx.conf.step_size; + int eccbytes = engine_conf->code_size; + int eccsteps = engine_conf->nsteps; + int total = nand->ecc.ctx.total; + u8 *ecccalc = engine_conf->calc_buf; + const u8 *data; + int i; + + /* Nothing to do for a raw operation */ + if (req->mode == MTD_OPS_RAW) + return 0; + + /* This engine does not provide BBM/free OOB bytes protection */ + if (!req->datalen) + return 0; + + nand_ecc_tweak_req(&engine_conf->req_ctx, req); + + /* No more preparation for page read */ + if (req->type == NAND_PAGE_READ) + return 0; + + /* Preparation for page write: derive the ECC bytes and place them */ + for (i = 0, data = req->databuf.out; + eccsteps; + eccsteps--, i += eccbytes, data += eccsize) + nand_ecc_sw_bch_calculate(nand, data, &ecccalc[i]); + + return mtd_ooblayout_set_eccbytes(mtd, ecccalc, (void *)req->oobbuf.out, + 0, total); +} + +static int nand_ecc_sw_bch_finish_io_req(struct nand_device *nand, + struct nand_page_io_req *req) +{ + struct nand_ecc_sw_bch_conf *engine_conf = nand->ecc.ctx.priv; + struct mtd_info *mtd = nanddev_to_mtd(nand); + int eccsize = nand->ecc.ctx.conf.step_size; + int total = nand->ecc.ctx.total; + int eccbytes = engine_conf->code_size; + int eccsteps = engine_conf->nsteps; + u8 *ecccalc = engine_conf->calc_buf; + u8 *ecccode = engine_conf->code_buf; + unsigned int max_bitflips = 0; + u8 *data = req->databuf.in; + int i, ret; + + /* Nothing to do for a raw operation */ + if (req->mode == MTD_OPS_RAW) + return 0; + + /* This engine does not provide BBM/free OOB bytes protection */ + if (!req->datalen) + return 0; + + /* No more preparation for page write */ + if (req->type == NAND_PAGE_WRITE) { + nand_ecc_restore_req(&engine_conf->req_ctx, req); + return 0; + } + + /* Finish a page read: retrieve the (raw) ECC bytes*/ + ret = mtd_ooblayout_get_eccbytes(mtd, ecccode, req->oobbuf.in, 0, + total); + if (ret) + return ret; + + /* Calculate the ECC bytes */ + for (i = 0; eccsteps; eccsteps--, i += eccbytes, data += eccsize) + nand_ecc_sw_bch_calculate(nand, data, &ecccalc[i]); + + /* Finish a page read: compare and correct */ + for (eccsteps = engine_conf->nsteps, i = 0, data = req->databuf.in; + eccsteps; + eccsteps--, i += eccbytes, data += eccsize) { + int stat = nand_ecc_sw_bch_correct(nand, data, + &ecccode[i], + &ecccalc[i]); + if (stat < 0) { + mtd->ecc_stats.failed++; + } else { + mtd->ecc_stats.corrected += stat; + max_bitflips = max_t(unsigned int, max_bitflips, stat); + } + } + + nand_ecc_restore_req(&engine_conf->req_ctx, req); + + return max_bitflips; +} + +static struct nand_ecc_engine_ops nand_ecc_sw_bch_engine_ops = { + .init_ctx = nand_ecc_sw_bch_init_ctx, + .cleanup_ctx = nand_ecc_sw_bch_cleanup_ctx, + .prepare_io_req = nand_ecc_sw_bch_prepare_io_req, + .finish_io_req = nand_ecc_sw_bch_finish_io_req, +}; + +static struct nand_ecc_engine nand_ecc_sw_bch_engine = { + .ops = &nand_ecc_sw_bch_engine_ops, +}; + +struct nand_ecc_engine *nand_ecc_sw_bch_get_engine(void) +{ + return &nand_ecc_sw_bch_engine; +} +EXPORT_SYMBOL(nand_ecc_sw_bch_get_engine); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Ivan Djelic <ivan.djelic@parrot.com>"); +MODULE_DESCRIPTION("NAND software BCH ECC support"); diff --git a/drivers/mtd/nand/raw/nand_ecc.c b/drivers/mtd/nand/ecc-sw-hamming.c index b6a46b1b7781..6334d1d7735d 100644 --- a/drivers/mtd/nand/raw/nand_ecc.c +++ b/drivers/mtd/nand/ecc-sw-hamming.c @@ -17,9 +17,9 @@ #include <linux/types.h> #include <linux/kernel.h> #include <linux/module.h> -#include <linux/mtd/mtd.h> -#include <linux/mtd/rawnand.h> -#include <linux/mtd/nand_ecc.h> +#include <linux/mtd/nand.h> +#include <linux/mtd/nand-ecc-sw-hamming.h> +#include <linux/slab.h> #include <asm/byteorder.h> /* @@ -75,7 +75,7 @@ static const char bitsperbyte[256] = { * addressbits is a lookup table to filter out the bits from the xor-ed * ECC data that identify the faulty location. * this is only used for repairing parity - * see the comments in nand_correct_data for more details + * see the comments in nand_ecc_sw_hamming_correct for more details */ static const char addressbits[256] = { 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01, @@ -112,30 +112,21 @@ static const char addressbits[256] = { 0x0e, 0x0e, 0x0f, 0x0f, 0x0e, 0x0e, 0x0f, 0x0f }; -/** - * __nand_calculate_ecc - [NAND Interface] Calculate 3-byte ECC for 256/512-byte - * block - * @buf: input buffer with raw data - * @eccsize: data bytes per ECC step (256 or 512) - * @code: output buffer with ECC - * @sm_order: Smart Media byte ordering - */ -void __nand_calculate_ecc(const unsigned char *buf, unsigned int eccsize, - unsigned char *code, bool sm_order) +int ecc_sw_hamming_calculate(const unsigned char *buf, unsigned int step_size, + unsigned char *code, bool sm_order) { + const u32 *bp = (uint32_t *)buf; + const u32 eccsize_mult = (step_size == 256) ? 1 : 2; + /* current value in buffer */ + u32 cur; + /* rp0..rp17 are the various accumulated parities (per byte) */ + u32 rp0, rp1, rp2, rp3, rp4, rp5, rp6, rp7, rp8, rp9, rp10, rp11, rp12, + rp13, rp14, rp15, rp16, rp17; + /* Cumulative parity for all data */ + u32 par; + /* Cumulative parity at the end of the loop (rp12, rp14, rp16) */ + u32 tmppar; int i; - const uint32_t *bp = (uint32_t *)buf; - /* 256 or 512 bytes/ecc */ - const uint32_t eccsize_mult = eccsize >> 8; - uint32_t cur; /* current value in buffer */ - /* rp0..rp15..rp17 are the various accumulated parities (per byte) */ - uint32_t rp0, rp1, rp2, rp3, rp4, rp5, rp6, rp7; - uint32_t rp8, rp9, rp10, rp11, rp12, rp13, rp14, rp15, rp16; - uint32_t rp17; - uint32_t par; /* the cumulative parity for all data */ - uint32_t tmppar; /* the cumulative parity for this iteration; - for rp12, rp14 and rp16 at the end of the - loop */ par = 0; rp4 = 0; @@ -145,6 +136,7 @@ void __nand_calculate_ecc(const unsigned char *buf, unsigned int eccsize, rp12 = 0; rp14 = 0; rp16 = 0; + rp17 = 0; /* * The loop is unrolled a number of times; @@ -356,45 +348,35 @@ void __nand_calculate_ecc(const unsigned char *buf, unsigned int eccsize, (invparity[par & 0x55] << 2) | (invparity[rp17] << 1) | (invparity[rp16] << 0); + + return 0; } -EXPORT_SYMBOL(__nand_calculate_ecc); +EXPORT_SYMBOL(ecc_sw_hamming_calculate); /** - * nand_calculate_ecc - [NAND Interface] Calculate 3-byte ECC for 256/512-byte - * block - * @chip: NAND chip object - * @buf: input buffer with raw data - * @code: output buffer with ECC + * nand_ecc_sw_hamming_calculate - Calculate 3-byte ECC for 256/512-byte block + * @nand: NAND device + * @buf: Input buffer with raw data + * @code: Output buffer with ECC */ -int nand_calculate_ecc(struct nand_chip *chip, const unsigned char *buf, - unsigned char *code) +int nand_ecc_sw_hamming_calculate(struct nand_device *nand, + const unsigned char *buf, unsigned char *code) { - bool sm_order = chip->ecc.options & NAND_ECC_SOFT_HAMMING_SM_ORDER; + struct nand_ecc_sw_hamming_conf *engine_conf = nand->ecc.ctx.priv; + unsigned int step_size = nand->ecc.ctx.conf.step_size; - __nand_calculate_ecc(buf, chip->ecc.size, code, sm_order); - - return 0; + return ecc_sw_hamming_calculate(buf, step_size, code, + engine_conf->sm_order); } -EXPORT_SYMBOL(nand_calculate_ecc); +EXPORT_SYMBOL(nand_ecc_sw_hamming_calculate); -/** - * __nand_correct_data - [NAND Interface] Detect and correct bit error(s) - * @buf: raw data read from the chip - * @read_ecc: ECC from the chip - * @calc_ecc: the ECC calculated from raw data - * @eccsize: data bytes per ECC step (256 or 512) - * @sm_order: Smart Media byte order - * - * Detect and correct a 1 bit error for eccsize byte block - */ -int __nand_correct_data(unsigned char *buf, - unsigned char *read_ecc, unsigned char *calc_ecc, - unsigned int eccsize, bool sm_order) +int ecc_sw_hamming_correct(unsigned char *buf, unsigned char *read_ecc, + unsigned char *calc_ecc, unsigned int step_size, + bool sm_order) { + const u32 eccsize_mult = step_size >> 8; unsigned char b0, b1, b2, bit_addr; unsigned int byte_addr; - /* 256 or 512 bytes/ecc */ - const uint32_t eccsize_mult = eccsize >> 8; /* * b0 to b2 indicate which bit is faulty (if any) @@ -458,27 +440,220 @@ int __nand_correct_data(unsigned char *buf, pr_err("%s: uncorrectable ECC error\n", __func__); return -EBADMSG; } -EXPORT_SYMBOL(__nand_correct_data); +EXPORT_SYMBOL(ecc_sw_hamming_correct); /** - * nand_correct_data - [NAND Interface] Detect and correct bit error(s) - * @chip: NAND chip object - * @buf: raw data read from the chip - * @read_ecc: ECC from the chip - * @calc_ecc: the ECC calculated from raw data + * nand_ecc_sw_hamming_correct - Detect and correct bit error(s) + * @nand: NAND device + * @buf: Raw data read from the chip + * @read_ecc: ECC bytes read from the chip + * @calc_ecc: ECC calculated from the raw data * - * Detect and correct a 1 bit error for 256/512 byte block + * Detect and correct up to 1 bit error per 256/512-byte block. */ -int nand_correct_data(struct nand_chip *chip, unsigned char *buf, - unsigned char *read_ecc, unsigned char *calc_ecc) +int nand_ecc_sw_hamming_correct(struct nand_device *nand, unsigned char *buf, + unsigned char *read_ecc, + unsigned char *calc_ecc) +{ + struct nand_ecc_sw_hamming_conf *engine_conf = nand->ecc.ctx.priv; + unsigned int step_size = nand->ecc.ctx.conf.step_size; + + return ecc_sw_hamming_correct(buf, read_ecc, calc_ecc, step_size, + engine_conf->sm_order); +} +EXPORT_SYMBOL(nand_ecc_sw_hamming_correct); + +int nand_ecc_sw_hamming_init_ctx(struct nand_device *nand) +{ + struct nand_ecc_props *conf = &nand->ecc.ctx.conf; + struct nand_ecc_sw_hamming_conf *engine_conf; + struct mtd_info *mtd = nanddev_to_mtd(nand); + int ret; + + if (!mtd->ooblayout) { + switch (mtd->oobsize) { + case 8: + case 16: + mtd_set_ooblayout(mtd, nand_get_small_page_ooblayout()); + break; + case 64: + case 128: + mtd_set_ooblayout(mtd, + nand_get_large_page_hamming_ooblayout()); + break; + default: + return -ENOTSUPP; + } + } + + conf->engine_type = NAND_ECC_ENGINE_TYPE_SOFT; + conf->algo = NAND_ECC_ALGO_HAMMING; + conf->step_size = nand->ecc.user_conf.step_size; + conf->strength = 1; + + /* Use the strongest configuration by default */ + if (conf->step_size != 256 && conf->step_size != 512) + conf->step_size = 256; + + engine_conf = kzalloc(sizeof(*engine_conf), GFP_KERNEL); + if (!engine_conf) + return -ENOMEM; + + ret = nand_ecc_init_req_tweaking(&engine_conf->req_ctx, nand); + if (ret) + goto free_engine_conf; + + engine_conf->code_size = 3; + engine_conf->nsteps = mtd->writesize / conf->step_size; + engine_conf->calc_buf = kzalloc(mtd->oobsize, GFP_KERNEL); + engine_conf->code_buf = kzalloc(mtd->oobsize, GFP_KERNEL); + if (!engine_conf->calc_buf || !engine_conf->code_buf) { + ret = -ENOMEM; + goto free_bufs; + } + + nand->ecc.ctx.priv = engine_conf; + nand->ecc.ctx.total = engine_conf->nsteps * engine_conf->code_size; + + return 0; + +free_bufs: + nand_ecc_cleanup_req_tweaking(&engine_conf->req_ctx); + kfree(engine_conf->calc_buf); + kfree(engine_conf->code_buf); +free_engine_conf: + kfree(engine_conf); + + return ret; +} +EXPORT_SYMBOL(nand_ecc_sw_hamming_init_ctx); + +void nand_ecc_sw_hamming_cleanup_ctx(struct nand_device *nand) +{ + struct nand_ecc_sw_hamming_conf *engine_conf = nand->ecc.ctx.priv; + + if (engine_conf) { + nand_ecc_cleanup_req_tweaking(&engine_conf->req_ctx); + kfree(engine_conf->calc_buf); + kfree(engine_conf->code_buf); + kfree(engine_conf); + } +} +EXPORT_SYMBOL(nand_ecc_sw_hamming_cleanup_ctx); + +static int nand_ecc_sw_hamming_prepare_io_req(struct nand_device *nand, + struct nand_page_io_req *req) { - bool sm_order = chip->ecc.options & NAND_ECC_SOFT_HAMMING_SM_ORDER; + struct nand_ecc_sw_hamming_conf *engine_conf = nand->ecc.ctx.priv; + struct mtd_info *mtd = nanddev_to_mtd(nand); + int eccsize = nand->ecc.ctx.conf.step_size; + int eccbytes = engine_conf->code_size; + int eccsteps = engine_conf->nsteps; + int total = nand->ecc.ctx.total; + u8 *ecccalc = engine_conf->calc_buf; + const u8 *data; + int i; + + /* Nothing to do for a raw operation */ + if (req->mode == MTD_OPS_RAW) + return 0; + + /* This engine does not provide BBM/free OOB bytes protection */ + if (!req->datalen) + return 0; - return __nand_correct_data(buf, read_ecc, calc_ecc, chip->ecc.size, - sm_order); + nand_ecc_tweak_req(&engine_conf->req_ctx, req); + + /* No more preparation for page read */ + if (req->type == NAND_PAGE_READ) + return 0; + + /* Preparation for page write: derive the ECC bytes and place them */ + for (i = 0, data = req->databuf.out; + eccsteps; + eccsteps--, i += eccbytes, data += eccsize) + nand_ecc_sw_hamming_calculate(nand, data, &ecccalc[i]); + + return mtd_ooblayout_set_eccbytes(mtd, ecccalc, (void *)req->oobbuf.out, + 0, total); +} + +static int nand_ecc_sw_hamming_finish_io_req(struct nand_device *nand, + struct nand_page_io_req *req) +{ + struct nand_ecc_sw_hamming_conf *engine_conf = nand->ecc.ctx.priv; + struct mtd_info *mtd = nanddev_to_mtd(nand); + int eccsize = nand->ecc.ctx.conf.step_size; + int total = nand->ecc.ctx.total; + int eccbytes = engine_conf->code_size; + int eccsteps = engine_conf->nsteps; + u8 *ecccalc = engine_conf->calc_buf; + u8 *ecccode = engine_conf->code_buf; + unsigned int max_bitflips = 0; + u8 *data = req->databuf.in; + int i, ret; + + /* Nothing to do for a raw operation */ + if (req->mode == MTD_OPS_RAW) + return 0; + + /* This engine does not provide BBM/free OOB bytes protection */ + if (!req->datalen) + return 0; + + /* No more preparation for page write */ + if (req->type == NAND_PAGE_WRITE) { + nand_ecc_restore_req(&engine_conf->req_ctx, req); + return 0; + } + + /* Finish a page read: retrieve the (raw) ECC bytes*/ + ret = mtd_ooblayout_get_eccbytes(mtd, ecccode, req->oobbuf.in, 0, + total); + if (ret) + return ret; + + /* Calculate the ECC bytes */ + for (i = 0; eccsteps; eccsteps--, i += eccbytes, data += eccsize) + nand_ecc_sw_hamming_calculate(nand, data, &ecccalc[i]); + + /* Finish a page read: compare and correct */ + for (eccsteps = engine_conf->nsteps, i = 0, data = req->databuf.in; + eccsteps; + eccsteps--, i += eccbytes, data += eccsize) { + int stat = nand_ecc_sw_hamming_correct(nand, data, + &ecccode[i], + &ecccalc[i]); + if (stat < 0) { + mtd->ecc_stats.failed++; + } else { + mtd->ecc_stats.corrected += stat; + max_bitflips = max_t(unsigned int, max_bitflips, stat); + } + } + + nand_ecc_restore_req(&engine_conf->req_ctx, req); + + return max_bitflips; +} + +static struct nand_ecc_engine_ops nand_ecc_sw_hamming_engine_ops = { + .init_ctx = nand_ecc_sw_hamming_init_ctx, + .cleanup_ctx = nand_ecc_sw_hamming_cleanup_ctx, + .prepare_io_req = nand_ecc_sw_hamming_prepare_io_req, + .finish_io_req = nand_ecc_sw_hamming_finish_io_req, +}; + +static struct nand_ecc_engine nand_ecc_sw_hamming_engine = { + .ops = &nand_ecc_sw_hamming_engine_ops, +}; + +struct nand_ecc_engine *nand_ecc_sw_hamming_get_engine(void) +{ + return &nand_ecc_sw_hamming_engine; } -EXPORT_SYMBOL(nand_correct_data); +EXPORT_SYMBOL(nand_ecc_sw_hamming_get_engine); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Frans Meulenbroeks <fransmeulenbroeks@gmail.com>"); -MODULE_DESCRIPTION("Generic NAND ECC support"); +MODULE_DESCRIPTION("NAND software Hamming ECC support"); diff --git a/drivers/mtd/nand/ecc.c b/drivers/mtd/nand/ecc.c index 4a56e6c0da67..6c43dfda01d4 100644 --- a/drivers/mtd/nand/ecc.c +++ b/drivers/mtd/nand/ecc.c @@ -95,6 +95,7 @@ #include <linux/module.h> #include <linux/mtd/nand.h> +#include <linux/slab.h> /** * nand_ecc_init_ctx - Init the ECC engine context @@ -104,7 +105,7 @@ */ int nand_ecc_init_ctx(struct nand_device *nand) { - if (!nand->ecc.engine->ops->init_ctx) + if (!nand->ecc.engine || !nand->ecc.engine->ops->init_ctx) return 0; return nand->ecc.engine->ops->init_ctx(nand); @@ -117,7 +118,7 @@ EXPORT_SYMBOL(nand_ecc_init_ctx); */ void nand_ecc_cleanup_ctx(struct nand_device *nand) { - if (nand->ecc.engine->ops->cleanup_ctx) + if (nand->ecc.engine && nand->ecc.engine->ops->cleanup_ctx) nand->ecc.engine->ops->cleanup_ctx(nand); } EXPORT_SYMBOL(nand_ecc_cleanup_ctx); @@ -130,7 +131,7 @@ EXPORT_SYMBOL(nand_ecc_cleanup_ctx); int nand_ecc_prepare_io_req(struct nand_device *nand, struct nand_page_io_req *req) { - if (!nand->ecc.engine->ops->prepare_io_req) + if (!nand->ecc.engine || !nand->ecc.engine->ops->prepare_io_req) return 0; return nand->ecc.engine->ops->prepare_io_req(nand, req); @@ -145,7 +146,7 @@ EXPORT_SYMBOL(nand_ecc_prepare_io_req); int nand_ecc_finish_io_req(struct nand_device *nand, struct nand_page_io_req *req) { - if (!nand->ecc.engine->ops->finish_io_req) + if (!nand->ecc.engine || !nand->ecc.engine->ops->finish_io_req) return 0; return nand->ecc.engine->ops->finish_io_req(nand, req); @@ -479,6 +480,137 @@ bool nand_ecc_is_strong_enough(struct nand_device *nand) } EXPORT_SYMBOL(nand_ecc_is_strong_enough); +/* ECC engine driver internal helpers */ +int nand_ecc_init_req_tweaking(struct nand_ecc_req_tweak_ctx *ctx, + struct nand_device *nand) +{ + unsigned int total_buffer_size; + + ctx->nand = nand; + + /* Let the user decide the exact length of each buffer */ + if (!ctx->page_buffer_size) + ctx->page_buffer_size = nanddev_page_size(nand); + if (!ctx->oob_buffer_size) + ctx->oob_buffer_size = nanddev_per_page_oobsize(nand); + + total_buffer_size = ctx->page_buffer_size + ctx->oob_buffer_size; + + ctx->spare_databuf = kzalloc(total_buffer_size, GFP_KERNEL); + if (!ctx->spare_databuf) + return -ENOMEM; + + ctx->spare_oobbuf = ctx->spare_databuf + ctx->page_buffer_size; + + return 0; +} +EXPORT_SYMBOL_GPL(nand_ecc_init_req_tweaking); + +void nand_ecc_cleanup_req_tweaking(struct nand_ecc_req_tweak_ctx *ctx) +{ + kfree(ctx->spare_databuf); +} +EXPORT_SYMBOL_GPL(nand_ecc_cleanup_req_tweaking); + +/* + * Ensure data and OOB area is fully read/written otherwise the correction might + * not work as expected. + */ +void nand_ecc_tweak_req(struct nand_ecc_req_tweak_ctx *ctx, + struct nand_page_io_req *req) +{ + struct nand_device *nand = ctx->nand; + struct nand_page_io_req *orig, *tweak; + + /* Save the original request */ + ctx->orig_req = *req; + ctx->bounce_data = false; + ctx->bounce_oob = false; + orig = &ctx->orig_req; + tweak = req; + + /* Ensure the request covers the entire page */ + if (orig->datalen < nanddev_page_size(nand)) { + ctx->bounce_data = true; + tweak->dataoffs = 0; + tweak->datalen = nanddev_page_size(nand); + tweak->databuf.in = ctx->spare_databuf; + memset(tweak->databuf.in, 0xFF, ctx->page_buffer_size); + } + + if (orig->ooblen < nanddev_per_page_oobsize(nand)) { + ctx->bounce_oob = true; + tweak->ooboffs = 0; + tweak->ooblen = nanddev_per_page_oobsize(nand); + tweak->oobbuf.in = ctx->spare_oobbuf; + memset(tweak->oobbuf.in, 0xFF, ctx->oob_buffer_size); + } + + /* Copy the data that must be writen in the bounce buffers, if needed */ + if (orig->type == NAND_PAGE_WRITE) { + if (ctx->bounce_data) + memcpy((void *)tweak->databuf.out + orig->dataoffs, + orig->databuf.out, orig->datalen); + + if (ctx->bounce_oob) + memcpy((void *)tweak->oobbuf.out + orig->ooboffs, + orig->oobbuf.out, orig->ooblen); + } +} +EXPORT_SYMBOL_GPL(nand_ecc_tweak_req); + +void nand_ecc_restore_req(struct nand_ecc_req_tweak_ctx *ctx, + struct nand_page_io_req *req) +{ + struct nand_page_io_req *orig, *tweak; + + orig = &ctx->orig_req; + tweak = req; + + /* Restore the data read from the bounce buffers, if needed */ + if (orig->type == NAND_PAGE_READ) { + if (ctx->bounce_data) + memcpy(orig->databuf.in, + tweak->databuf.in + orig->dataoffs, + orig->datalen); + + if (ctx->bounce_oob) + memcpy(orig->oobbuf.in, + tweak->oobbuf.in + orig->ooboffs, + orig->ooblen); + } + + /* Ensure the original request is restored */ + *req = *orig; +} +EXPORT_SYMBOL_GPL(nand_ecc_restore_req); + +struct nand_ecc_engine *nand_ecc_get_sw_engine(struct nand_device *nand) +{ + unsigned int algo = nand->ecc.user_conf.algo; + + if (algo == NAND_ECC_ALGO_UNKNOWN) + algo = nand->ecc.defaults.algo; + + switch (algo) { + case NAND_ECC_ALGO_HAMMING: + return nand_ecc_sw_hamming_get_engine(); + case NAND_ECC_ALGO_BCH: + return nand_ecc_sw_bch_get_engine(); + default: + break; + } + + return NULL; +} +EXPORT_SYMBOL(nand_ecc_get_sw_engine); + +struct nand_ecc_engine *nand_ecc_get_on_die_hw_engine(struct nand_device *nand) +{ + return nand->ecc.ondie_engine; +} +EXPORT_SYMBOL(nand_ecc_get_on_die_hw_engine); + MODULE_LICENSE("GPL"); MODULE_AUTHOR("Miquel Raynal <miquel.raynal@bootlin.com>"); MODULE_DESCRIPTION("Generic ECC engine"); diff --git a/drivers/mtd/nand/onenand/onenand_base.c b/drivers/mtd/nand/onenand/onenand_base.c index 188b8061e1f7..a9fdea26ea46 100644 --- a/drivers/mtd/nand/onenand/onenand_base.c +++ b/drivers/mtd/nand/onenand/onenand_base.c @@ -132,7 +132,7 @@ static const struct mtd_ooblayout_ops onenand_oob_128_ooblayout_ops = { .free = onenand_ooblayout_128_free, }; -/** +/* * onenand_oob_32_64 - oob info for large (2KB) page */ static int onenand_ooblayout_32_64_ecc(struct mtd_info *mtd, int section, @@ -192,7 +192,7 @@ static const unsigned char ffchars[] = { /** * onenand_readw - [OneNAND Interface] Read OneNAND register - * @param addr address to read + * @addr: address to read * * Read OneNAND register */ @@ -203,8 +203,8 @@ static unsigned short onenand_readw(void __iomem *addr) /** * onenand_writew - [OneNAND Interface] Write OneNAND register with value - * @param value value to write - * @param addr address to write + * @value: value to write + * @addr: address to write * * Write OneNAND register with value */ @@ -215,8 +215,8 @@ static void onenand_writew(unsigned short value, void __iomem *addr) /** * onenand_block_address - [DEFAULT] Get block address - * @param this onenand chip data structure - * @param block the block + * @this: onenand chip data structure + * @block: the block * @return translated block address if DDP, otherwise same * * Setup Start Address 1 Register (F100h) @@ -232,8 +232,8 @@ static int onenand_block_address(struct onenand_chip *this, int block) /** * onenand_bufferram_address - [DEFAULT] Get bufferram address - * @param this onenand chip data structure - * @param block the block + * @this: onenand chip data structure + * @block: the block * @return set DBS value if DDP, otherwise 0 * * Setup Start Address 2 Register (F101h) for DDP @@ -249,8 +249,8 @@ static int onenand_bufferram_address(struct onenand_chip *this, int block) /** * onenand_page_address - [DEFAULT] Get page address - * @param page the page address - * @param sector the sector address + * @page: the page address + * @sector: the sector address * @return combined page and sector address * * Setup Start Address 8 Register (F107h) @@ -268,10 +268,10 @@ static int onenand_page_address(int page, int sector) /** * onenand_buffer_address - [DEFAULT] Get buffer address - * @param dataram1 DataRAM index - * @param sectors the sector address - * @param count the number of sectors - * @return the start buffer value + * @dataram1: DataRAM index + * @sectors: the sector address + * @count: the number of sectors + * Return: the start buffer value * * Setup Start Buffer Register (F200h) */ @@ -295,8 +295,8 @@ static int onenand_buffer_address(int dataram1, int sectors, int count) /** * flexonenand_block- For given address return block number - * @param this - OneNAND device structure - * @param addr - Address for which block number is needed + * @this: - OneNAND device structure + * @addr: - Address for which block number is needed */ static unsigned flexonenand_block(struct onenand_chip *this, loff_t addr) { @@ -359,7 +359,7 @@ EXPORT_SYMBOL(onenand_addr); /** * onenand_get_density - [DEFAULT] Get OneNAND density - * @param dev_id OneNAND device ID + * @dev_id: OneNAND device ID * * Get OneNAND density from device ID */ @@ -371,8 +371,8 @@ static inline int onenand_get_density(int dev_id) /** * flexonenand_region - [Flex-OneNAND] Return erase region of addr - * @param mtd MTD device structure - * @param addr address whose erase region needs to be identified + * @mtd: MTD device structure + * @addr: address whose erase region needs to be identified */ int flexonenand_region(struct mtd_info *mtd, loff_t addr) { @@ -387,10 +387,10 @@ EXPORT_SYMBOL(flexonenand_region); /** * onenand_command - [DEFAULT] Send command to OneNAND device - * @param mtd MTD device structure - * @param cmd the command to be sent - * @param addr offset to read from or write to - * @param len number of bytes to read or write + * @mtd: MTD device structure + * @cmd: the command to be sent + * @addr: offset to read from or write to + * @len: number of bytes to read or write * * Send command to OneNAND device. This function is used for middle/large page * devices (1KB/2KB Bytes per page) @@ -519,7 +519,7 @@ static int onenand_command(struct mtd_info *mtd, int cmd, loff_t addr, size_t le /** * onenand_read_ecc - return ecc status - * @param this onenand chip structure + * @this: onenand chip structure */ static inline int onenand_read_ecc(struct onenand_chip *this) { @@ -543,8 +543,8 @@ static inline int onenand_read_ecc(struct onenand_chip *this) /** * onenand_wait - [DEFAULT] wait until the command is done - * @param mtd MTD device structure - * @param state state to select the max. timeout value + * @mtd: MTD device structure + * @state: state to select the max. timeout value * * Wait for command done. This applies to all OneNAND command * Read can take up to 30us, erase up to 2ms and program up to 350us @@ -625,8 +625,8 @@ static int onenand_wait(struct mtd_info *mtd, int state) /* * onenand_interrupt - [DEFAULT] onenand interrupt handler - * @param irq onenand interrupt number - * @param dev_id interrupt data + * @irq: onenand interrupt number + * @dev_id: interrupt data * * complete the work */ @@ -643,8 +643,8 @@ static irqreturn_t onenand_interrupt(int irq, void *data) /* * onenand_interrupt_wait - [DEFAULT] wait until the command is done - * @param mtd MTD device structure - * @param state state to select the max. timeout value + * @mtd: MTD device structure + * @state: state to select the max. timeout value * * Wait for command done. */ @@ -659,8 +659,8 @@ static int onenand_interrupt_wait(struct mtd_info *mtd, int state) /* * onenand_try_interrupt_wait - [DEFAULT] try interrupt wait - * @param mtd MTD device structure - * @param state state to select the max. timeout value + * @mtd: MTD device structure + * @state: state to select the max. timeout value * * Try interrupt based wait (It is used one-time) */ @@ -689,7 +689,7 @@ static int onenand_try_interrupt_wait(struct mtd_info *mtd, int state) /* * onenand_setup_wait - [OneNAND Interface] setup onenand wait method - * @param mtd MTD device structure + * @mtd: MTD device structure * * There's two method to wait onenand work * 1. polling - read interrupt status register @@ -724,8 +724,8 @@ static void onenand_setup_wait(struct mtd_info *mtd) /** * onenand_bufferram_offset - [DEFAULT] BufferRAM offset - * @param mtd MTD data structure - * @param area BufferRAM area + * @mtd: MTD data structure + * @area: BufferRAM area * @return offset given area * * Return BufferRAM offset given area @@ -747,11 +747,11 @@ static inline int onenand_bufferram_offset(struct mtd_info *mtd, int area) /** * onenand_read_bufferram - [OneNAND Interface] Read the bufferram area - * @param mtd MTD data structure - * @param area BufferRAM area - * @param buffer the databuffer to put/get data - * @param offset offset to read from or write to - * @param count number of bytes to read/write + * @mtd: MTD data structure + * @area: BufferRAM area + * @buffer: the databuffer to put/get data + * @offset: offset to read from or write to + * @count: number of bytes to read/write * * Read the BufferRAM area */ @@ -783,11 +783,11 @@ static int onenand_read_bufferram(struct mtd_info *mtd, int area, /** * onenand_sync_read_bufferram - [OneNAND Interface] Read the bufferram area with Sync. Burst mode - * @param mtd MTD data structure - * @param area BufferRAM area - * @param buffer the databuffer to put/get data - * @param offset offset to read from or write to - * @param count number of bytes to read/write + * @mtd: MTD data structure + * @area: BufferRAM area + * @buffer: the databuffer to put/get data + * @offset: offset to read from or write to + * @count: number of bytes to read/write * * Read the BufferRAM area with Sync. Burst Mode */ @@ -823,11 +823,11 @@ static int onenand_sync_read_bufferram(struct mtd_info *mtd, int area, /** * onenand_write_bufferram - [OneNAND Interface] Write the bufferram area - * @param mtd MTD data structure - * @param area BufferRAM area - * @param buffer the databuffer to put/get data - * @param offset offset to read from or write to - * @param count number of bytes to read/write + * @mtd: MTD data structure + * @area: BufferRAM area + * @buffer: the databuffer to put/get data + * @offset: offset to read from or write to + * @count: number of bytes to read/write * * Write the BufferRAM area */ @@ -864,8 +864,8 @@ static int onenand_write_bufferram(struct mtd_info *mtd, int area, /** * onenand_get_2x_blockpage - [GENERIC] Get blockpage at 2x program mode - * @param mtd MTD data structure - * @param addr address to check + * @mtd: MTD data structure + * @addr: address to check * @return blockpage address * * Get blockpage address at 2x program mode @@ -888,8 +888,8 @@ static int onenand_get_2x_blockpage(struct mtd_info *mtd, loff_t addr) /** * onenand_check_bufferram - [GENERIC] Check BufferRAM information - * @param mtd MTD data structure - * @param addr address to check + * @mtd: MTD data structure + * @addr: address to check * @return 1 if there are valid data, otherwise 0 * * Check bufferram if there is data we required @@ -930,9 +930,9 @@ static int onenand_check_bufferram(struct mtd_info *mtd, loff_t addr) /** * onenand_update_bufferram - [GENERIC] Update BufferRAM information - * @param mtd MTD data structure - * @param addr address to update - * @param valid valid flag + * @mtd: MTD data structure + * @addr: address to update + * @valid: valid flag * * Update BufferRAM information */ @@ -963,9 +963,9 @@ static void onenand_update_bufferram(struct mtd_info *mtd, loff_t addr, /** * onenand_invalidate_bufferram - [GENERIC] Invalidate BufferRAM information - * @param mtd MTD data structure - * @param addr start address to invalidate - * @param len length to invalidate + * @mtd: MTD data structure + * @addr: start address to invalidate + * @len: length to invalidate * * Invalidate BufferRAM information */ @@ -986,8 +986,8 @@ static void onenand_invalidate_bufferram(struct mtd_info *mtd, loff_t addr, /** * onenand_get_device - [GENERIC] Get chip for selected access - * @param mtd MTD device structure - * @param new_state the state which is requested + * @mtd: MTD device structure + * @new_state: the state which is requested * * Get the device and lock it for exclusive access */ @@ -1024,7 +1024,7 @@ static int onenand_get_device(struct mtd_info *mtd, int new_state) /** * onenand_release_device - [GENERIC] release chip - * @param mtd MTD device structure + * @mtd: MTD device structure * * Deselect, release chip lock and wake up anyone waiting on the device */ @@ -1043,10 +1043,10 @@ static void onenand_release_device(struct mtd_info *mtd) /** * onenand_transfer_auto_oob - [INTERN] oob auto-placement transfer - * @param mtd MTD device structure - * @param buf destination address - * @param column oob offset to read from - * @param thislen oob length to read + * @mtd: MTD device structure + * @buf: destination address + * @column: oob offset to read from + * @thislen: oob length to read */ static int onenand_transfer_auto_oob(struct mtd_info *mtd, uint8_t *buf, int column, int thislen) @@ -1061,9 +1061,9 @@ static int onenand_transfer_auto_oob(struct mtd_info *mtd, uint8_t *buf, int col /** * onenand_recover_lsb - [Flex-OneNAND] Recover LSB page data - * @param mtd MTD device structure - * @param addr address to recover - * @param status return value from onenand_wait / onenand_bbt_wait + * @mtd: MTD device structure + * @addr: address to recover + * @status: return value from onenand_wait / onenand_bbt_wait * * MLC NAND Flash cell has paired pages - LSB page and MSB page. LSB page has * lower page address and MSB page has higher page address in paired pages. @@ -1104,9 +1104,9 @@ static int onenand_recover_lsb(struct mtd_info *mtd, loff_t addr, int status) /** * onenand_mlc_read_ops_nolock - MLC OneNAND read main and/or out-of-band - * @param mtd MTD device structure - * @param from offset to read from - * @param ops: oob operation description structure + * @mtd: MTD device structure + * @from: offset to read from + * @ops: oob operation description structure * * MLC OneNAND / Flex-OneNAND has 4KB page size and 4KB dataram. * So, read-while-load is not present. @@ -1206,9 +1206,9 @@ static int onenand_mlc_read_ops_nolock(struct mtd_info *mtd, loff_t from, /** * onenand_read_ops_nolock - [OneNAND Interface] OneNAND read main and/or out-of-band - * @param mtd MTD device structure - * @param from offset to read from - * @param ops: oob operation description structure + * @mtd: MTD device structure + * @from: offset to read from + * @ops: oob operation description structure * * OneNAND read main and/or out-of-band data */ @@ -1335,9 +1335,9 @@ static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from, /** * onenand_read_oob_nolock - [MTD Interface] OneNAND read out-of-band - * @param mtd MTD device structure - * @param from offset to read from - * @param ops: oob operation description structure + * @mtd: MTD device structure + * @from: offset to read from + * @ops: oob operation description structure * * OneNAND read out-of-band data from the spare area */ @@ -1430,10 +1430,10 @@ static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from, /** * onenand_read_oob - [MTD Interface] Read main and/or out-of-band - * @param mtd: MTD device structure - * @param from: offset to read from - * @param ops: oob operation description structure - + * @mtd: MTD device structure + * @from: offset to read from + * @ops: oob operation description structure + * * Read main and/or out-of-band */ static int onenand_read_oob(struct mtd_info *mtd, loff_t from, @@ -1466,8 +1466,8 @@ static int onenand_read_oob(struct mtd_info *mtd, loff_t from, /** * onenand_bbt_wait - [DEFAULT] wait until the command is done - * @param mtd MTD device structure - * @param state state to select the max. timeout value + * @mtd: MTD device structure + * @state: state to select the max. timeout value * * Wait for command done. */ @@ -1517,9 +1517,9 @@ static int onenand_bbt_wait(struct mtd_info *mtd, int state) /** * onenand_bbt_read_oob - [MTD Interface] OneNAND read out-of-band for bbt scan - * @param mtd MTD device structure - * @param from offset to read from - * @param ops oob operation description structure + * @mtd: MTD device structure + * @from: offset to read from + * @ops: oob operation description structure * * OneNAND read out-of-band data from the spare area for bbt scan */ @@ -1594,9 +1594,9 @@ int onenand_bbt_read_oob(struct mtd_info *mtd, loff_t from, #ifdef CONFIG_MTD_ONENAND_VERIFY_WRITE /** * onenand_verify_oob - [GENERIC] verify the oob contents after a write - * @param mtd MTD device structure - * @param buf the databuffer to verify - * @param to offset to read from + * @mtd: MTD device structure + * @buf: the databuffer to verify + * @to: offset to read from */ static int onenand_verify_oob(struct mtd_info *mtd, const u_char *buf, loff_t to) { @@ -1622,10 +1622,10 @@ static int onenand_verify_oob(struct mtd_info *mtd, const u_char *buf, loff_t to /** * onenand_verify - [GENERIC] verify the chip contents after a write - * @param mtd MTD device structure - * @param buf the databuffer to verify - * @param addr offset to read from - * @param len number of bytes to read and compare + * @mtd: MTD device structure + * @buf: the databuffer to verify + * @addr: offset to read from + * @len: number of bytes to read and compare */ static int onenand_verify(struct mtd_info *mtd, const u_char *buf, loff_t addr, size_t len) { @@ -1684,11 +1684,11 @@ static void onenand_panic_wait(struct mtd_info *mtd) /** * onenand_panic_write - [MTD Interface] write buffer to FLASH in a panic context - * @param mtd MTD device structure - * @param to offset to write to - * @param len number of bytes to write - * @param retlen pointer to variable to store the number of written bytes - * @param buf the data to write + * @mtd: MTD device structure + * @to: offset to write to + * @len: number of bytes to write + * @retlen: pointer to variable to store the number of written bytes + * @buf: the data to write * * Write with ECC */ @@ -1762,11 +1762,11 @@ static int onenand_panic_write(struct mtd_info *mtd, loff_t to, size_t len, /** * onenand_fill_auto_oob - [INTERN] oob auto-placement transfer - * @param mtd MTD device structure - * @param oob_buf oob buffer - * @param buf source address - * @param column oob offset to write to - * @param thislen oob length to write + * @mtd: MTD device structure + * @oob_buf: oob buffer + * @buf: source address + * @column: oob offset to write to + * @thislen: oob length to write */ static int onenand_fill_auto_oob(struct mtd_info *mtd, u_char *oob_buf, const u_char *buf, int column, int thislen) @@ -1776,9 +1776,9 @@ static int onenand_fill_auto_oob(struct mtd_info *mtd, u_char *oob_buf, /** * onenand_write_ops_nolock - [OneNAND Interface] write main and/or out-of-band - * @param mtd MTD device structure - * @param to offset to write to - * @param ops oob operation description structure + * @mtd: MTD device structure + * @to: offset to write to + * @ops: oob operation description structure * * Write main and/or oob with ECC */ @@ -1957,12 +1957,9 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to, /** * onenand_write_oob_nolock - [INTERN] OneNAND write out-of-band - * @param mtd MTD device structure - * @param to offset to write to - * @param len number of bytes to write - * @param retlen pointer to variable to store the number of written bytes - * @param buf the data to write - * @param mode operation mode + * @mtd: MTD device structure + * @to: offset to write to + * @ops: oob operation description structure * * OneNAND write out-of-band */ @@ -2070,9 +2067,9 @@ static int onenand_write_oob_nolock(struct mtd_info *mtd, loff_t to, /** * onenand_write_oob - [MTD Interface] NAND write data and/or out-of-band - * @param mtd: MTD device structure - * @param to: offset to write - * @param ops: oob operation description structure + * @mtd: MTD device structure + * @to: offset to write + * @ops: oob operation description structure */ static int onenand_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops) @@ -2101,9 +2098,9 @@ static int onenand_write_oob(struct mtd_info *mtd, loff_t to, /** * onenand_block_isbad_nolock - [GENERIC] Check if a block is marked bad - * @param mtd MTD device structure - * @param ofs offset from device start - * @param allowbbt 1, if its allowed to access the bbt area + * @mtd: MTD device structure + * @ofs: offset from device start + * @allowbbt: 1, if its allowed to access the bbt area * * Check, if the block is bad. Either by reading the bad block table or * calling of the scan function. @@ -2144,9 +2141,9 @@ static int onenand_multiblock_erase_verify(struct mtd_info *mtd, /** * onenand_multiblock_erase - [INTERN] erase block(s) using multiblock erase - * @param mtd MTD device structure - * @param instr erase instruction - * @param region erase region + * @mtd: MTD device structure + * @instr: erase instruction + * @block_size: block size * * Erase one or more blocks up to 64 block at a time */ @@ -2254,10 +2251,10 @@ static int onenand_multiblock_erase(struct mtd_info *mtd, /** * onenand_block_by_block_erase - [INTERN] erase block(s) using regular erase - * @param mtd MTD device structure - * @param instr erase instruction - * @param region erase region - * @param block_size erase block size + * @mtd: MTD device structure + * @instr: erase instruction + * @region: erase region + * @block_size: erase block size * * Erase one or more blocks one block at a time */ @@ -2326,8 +2323,8 @@ static int onenand_block_by_block_erase(struct mtd_info *mtd, /** * onenand_erase - [MTD Interface] erase block(s) - * @param mtd MTD device structure - * @param instr erase instruction + * @mtd: MTD device structure + * @instr: erase instruction * * Erase one or more blocks */ @@ -2391,7 +2388,7 @@ static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr) /** * onenand_sync - [MTD Interface] sync - * @param mtd MTD device structure + * @mtd: MTD device structure * * Sync is actually a wait for chip ready function */ @@ -2408,8 +2405,8 @@ static void onenand_sync(struct mtd_info *mtd) /** * onenand_block_isbad - [MTD Interface] Check whether the block at the given offset is bad - * @param mtd MTD device structure - * @param ofs offset relative to mtd start + * @mtd: MTD device structure + * @ofs: offset relative to mtd start * * Check whether the block is bad */ @@ -2425,8 +2422,8 @@ static int onenand_block_isbad(struct mtd_info *mtd, loff_t ofs) /** * onenand_default_block_markbad - [DEFAULT] mark a block bad - * @param mtd MTD device structure - * @param ofs offset from device start + * @mtd: MTD device structure + * @ofs: offset from device start * * This is the default implementation, which can be overridden by * a hardware specific driver. @@ -2460,8 +2457,8 @@ static int onenand_default_block_markbad(struct mtd_info *mtd, loff_t ofs) /** * onenand_block_markbad - [MTD Interface] Mark the block at the given offset as bad - * @param mtd MTD device structure - * @param ofs offset relative to mtd start + * @mtd: MTD device structure + * @ofs: offset relative to mtd start * * Mark the block as bad */ @@ -2486,10 +2483,10 @@ static int onenand_block_markbad(struct mtd_info *mtd, loff_t ofs) /** * onenand_do_lock_cmd - [OneNAND Interface] Lock or unlock block(s) - * @param mtd MTD device structure - * @param ofs offset relative to mtd start - * @param len number of bytes to lock or unlock - * @param cmd lock or unlock command + * @mtd: MTD device structure + * @ofs: offset relative to mtd start + * @len: number of bytes to lock or unlock + * @cmd: lock or unlock command * * Lock or unlock one or more blocks */ @@ -2566,9 +2563,9 @@ static int onenand_do_lock_cmd(struct mtd_info *mtd, loff_t ofs, size_t len, int /** * onenand_lock - [MTD Interface] Lock block(s) - * @param mtd MTD device structure - * @param ofs offset relative to mtd start - * @param len number of bytes to unlock + * @mtd: MTD device structure + * @ofs: offset relative to mtd start + * @len: number of bytes to unlock * * Lock one or more blocks */ @@ -2584,9 +2581,9 @@ static int onenand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) /** * onenand_unlock - [MTD Interface] Unlock block(s) - * @param mtd MTD device structure - * @param ofs offset relative to mtd start - * @param len number of bytes to unlock + * @mtd: MTD device structure + * @ofs: offset relative to mtd start + * @len: number of bytes to unlock * * Unlock one or more blocks */ @@ -2602,7 +2599,7 @@ static int onenand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) /** * onenand_check_lock_status - [OneNAND Interface] Check lock status - * @param this onenand chip data structure + * @this: onenand chip data structure * * Check lock status */ @@ -2636,7 +2633,7 @@ static int onenand_check_lock_status(struct onenand_chip *this) /** * onenand_unlock_all - [OneNAND Interface] unlock all blocks - * @param mtd MTD device structure + * @mtd: MTD device structure * * Unlock all blocks */ @@ -2683,10 +2680,10 @@ static void onenand_unlock_all(struct mtd_info *mtd) /** * onenand_otp_command - Send OTP specific command to OneNAND device - * @param mtd MTD device structure - * @param cmd the command to be sent - * @param addr offset to read from or write to - * @param len number of bytes to read or write + * @mtd: MTD device structure + * @cmd: the command to be sent + * @addr: offset to read from or write to + * @len: number of bytes to read or write */ static int onenand_otp_command(struct mtd_info *mtd, int cmd, loff_t addr, size_t len) @@ -2758,11 +2755,9 @@ static int onenand_otp_command(struct mtd_info *mtd, int cmd, loff_t addr, /** * onenand_otp_write_oob_nolock - [INTERN] OneNAND write out-of-band, specific to OTP - * @param mtd MTD device structure - * @param to offset to write to - * @param len number of bytes to write - * @param retlen pointer to variable to store the number of written bytes - * @param buf the data to write + * @mtd: MTD device structure + * @to: offset to write to + * @ops: oob operation description structure * * OneNAND write out-of-band only for OTP */ @@ -2889,11 +2884,11 @@ typedef int (*otp_op_t)(struct mtd_info *mtd, loff_t form, size_t len, /** * do_otp_read - [DEFAULT] Read OTP block area - * @param mtd MTD device structure - * @param from The offset to read - * @param len number of bytes to read - * @param retlen pointer to variable to store the number of readbytes - * @param buf the databuffer to put/get data + * @mtd: MTD device structure + * @from: The offset to read + * @len: number of bytes to read + * @retlen: pointer to variable to store the number of readbytes + * @buf: the databuffer to put/get data * * Read OTP block area. */ @@ -2926,11 +2921,11 @@ static int do_otp_read(struct mtd_info *mtd, loff_t from, size_t len, /** * do_otp_write - [DEFAULT] Write OTP block area - * @param mtd MTD device structure - * @param to The offset to write - * @param len number of bytes to write - * @param retlen pointer to variable to store the number of write bytes - * @param buf the databuffer to put/get data + * @mtd: MTD device structure + * @to: The offset to write + * @len: number of bytes to write + * @retlen: pointer to variable to store the number of write bytes + * @buf: the databuffer to put/get data * * Write OTP block area. */ @@ -2970,11 +2965,11 @@ static int do_otp_write(struct mtd_info *mtd, loff_t to, size_t len, /** * do_otp_lock - [DEFAULT] Lock OTP block area - * @param mtd MTD device structure - * @param from The offset to lock - * @param len number of bytes to lock - * @param retlen pointer to variable to store the number of lock bytes - * @param buf the databuffer to put/get data + * @mtd: MTD device structure + * @from: The offset to lock + * @len: number of bytes to lock + * @retlen: pointer to variable to store the number of lock bytes + * @buf: the databuffer to put/get data * * Lock OTP block area. */ @@ -3018,13 +3013,13 @@ static int do_otp_lock(struct mtd_info *mtd, loff_t from, size_t len, /** * onenand_otp_walk - [DEFAULT] Handle OTP operation - * @param mtd MTD device structure - * @param from The offset to read/write - * @param len number of bytes to read/write - * @param retlen pointer to variable to store the number of read bytes - * @param buf the databuffer to put/get data - * @param action do given action - * @param mode specify user and factory + * @mtd: MTD device structure + * @from: The offset to read/write + * @len: number of bytes to read/write + * @retlen: pointer to variable to store the number of read bytes + * @buf: the databuffer to put/get data + * @action: do given action + * @mode: specify user and factory * * Handle OTP operation. */ @@ -3099,10 +3094,10 @@ static int onenand_otp_walk(struct mtd_info *mtd, loff_t from, size_t len, /** * onenand_get_fact_prot_info - [MTD Interface] Read factory OTP info - * @param mtd MTD device structure - * @param len number of bytes to read - * @param retlen pointer to variable to store the number of read bytes - * @param buf the databuffer to put/get data + * @mtd: MTD device structure + * @len: number of bytes to read + * @retlen: pointer to variable to store the number of read bytes + * @buf: the databuffer to put/get data * * Read factory OTP info. */ @@ -3115,11 +3110,11 @@ static int onenand_get_fact_prot_info(struct mtd_info *mtd, size_t len, /** * onenand_read_fact_prot_reg - [MTD Interface] Read factory OTP area - * @param mtd MTD device structure - * @param from The offset to read - * @param len number of bytes to read - * @param retlen pointer to variable to store the number of read bytes - * @param buf the databuffer to put/get data + * @mtd: MTD device structure + * @from: The offset to read + * @len: number of bytes to read + * @retlen: pointer to variable to store the number of read bytes + * @buf: the databuffer to put/get data * * Read factory OTP area. */ @@ -3131,10 +3126,10 @@ static int onenand_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, /** * onenand_get_user_prot_info - [MTD Interface] Read user OTP info - * @param mtd MTD device structure - * @param retlen pointer to variable to store the number of read bytes - * @param len number of bytes to read - * @param buf the databuffer to put/get data + * @mtd: MTD device structure + * @retlen: pointer to variable to store the number of read bytes + * @len: number of bytes to read + * @buf: the databuffer to put/get data * * Read user OTP info. */ @@ -3147,11 +3142,11 @@ static int onenand_get_user_prot_info(struct mtd_info *mtd, size_t len, /** * onenand_read_user_prot_reg - [MTD Interface] Read user OTP area - * @param mtd MTD device structure - * @param from The offset to read - * @param len number of bytes to read - * @param retlen pointer to variable to store the number of read bytes - * @param buf the databuffer to put/get data + * @mtd: MTD device structure + * @from: The offset to read + * @len: number of bytes to read + * @retlen: pointer to variable to store the number of read bytes + * @buf: the databuffer to put/get data * * Read user OTP area. */ @@ -3163,11 +3158,11 @@ static int onenand_read_user_prot_reg(struct mtd_info *mtd, loff_t from, /** * onenand_write_user_prot_reg - [MTD Interface] Write user OTP area - * @param mtd MTD device structure - * @param from The offset to write - * @param len number of bytes to write - * @param retlen pointer to variable to store the number of write bytes - * @param buf the databuffer to put/get data + * @mtd: MTD device structure + * @from: The offset to write + * @len: number of bytes to write + * @retlen: pointer to variable to store the number of write bytes + * @buf: the databuffer to put/get data * * Write user OTP area. */ @@ -3179,9 +3174,9 @@ static int onenand_write_user_prot_reg(struct mtd_info *mtd, loff_t from, /** * onenand_lock_user_prot_reg - [MTD Interface] Lock user OTP area - * @param mtd MTD device structure - * @param from The offset to lock - * @param len number of bytes to unlock + * @mtd: MTD device structure + * @from: The offset to lock + * @len: number of bytes to unlock * * Write lock mark on spare area in page 0 in OTP block */ @@ -3234,7 +3229,7 @@ static int onenand_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, /** * onenand_check_features - Check and set OneNAND features - * @param mtd MTD data structure + * @mtd: MTD data structure * * Check and set OneNAND features * - lock scheme @@ -3324,8 +3319,8 @@ static void onenand_check_features(struct mtd_info *mtd) /** * onenand_print_device_info - Print device & version ID - * @param device device ID - * @param version version ID + * @device: device ID + * @version: version ID * * Print device & version ID */ @@ -3355,7 +3350,7 @@ static const struct onenand_manufacturers onenand_manuf_ids[] = { /** * onenand_check_maf - Check manufacturer ID - * @param manuf manufacturer ID + * @manuf: manufacturer ID * * Check manufacturer ID */ @@ -3380,9 +3375,9 @@ static int onenand_check_maf(int manuf) } /** -* flexonenand_get_boundary - Reads the SLC boundary -* @param onenand_info - onenand info structure -**/ + * flexonenand_get_boundary - Reads the SLC boundary + * @mtd: MTD data structure + */ static int flexonenand_get_boundary(struct mtd_info *mtd) { struct onenand_chip *this = mtd->priv; @@ -3422,7 +3417,7 @@ static int flexonenand_get_boundary(struct mtd_info *mtd) /** * flexonenand_get_size - Fill up fields in onenand_chip and mtd_info * boundary[], diesize[], mtd->size, mtd->erasesize - * @param mtd - MTD device structure + * @mtd: - MTD device structure */ static void flexonenand_get_size(struct mtd_info *mtd) { @@ -3493,9 +3488,9 @@ static void flexonenand_get_size(struct mtd_info *mtd) /** * flexonenand_check_blocks_erased - Check if blocks are erased - * @param mtd_info - mtd info structure - * @param start - first erase block to check - * @param end - last erase block to check + * @mtd: mtd info structure + * @start: first erase block to check + * @end: last erase block to check * * Converting an unerased block from MLC to SLC * causes byte values to change. Since both data and its ECC @@ -3548,9 +3543,8 @@ static int flexonenand_check_blocks_erased(struct mtd_info *mtd, int start, int return 0; } -/** +/* * flexonenand_set_boundary - Writes the SLC boundary - * @param mtd - mtd info structure */ static int flexonenand_set_boundary(struct mtd_info *mtd, int die, int boundary, int lock) @@ -3640,7 +3634,7 @@ out: /** * onenand_chip_probe - [OneNAND Interface] The generic chip probe - * @param mtd MTD device structure + * @mtd: MTD device structure * * OneNAND detection method: * Compare the values from command with ones from register @@ -3688,7 +3682,7 @@ static int onenand_chip_probe(struct mtd_info *mtd) /** * onenand_probe - [OneNAND Interface] Probe the OneNAND device - * @param mtd MTD device structure + * @mtd: MTD device structure */ static int onenand_probe(struct mtd_info *mtd) { @@ -3783,7 +3777,7 @@ static int onenand_probe(struct mtd_info *mtd) /** * onenand_suspend - [MTD Interface] Suspend the OneNAND flash - * @param mtd MTD device structure + * @mtd: MTD device structure */ static int onenand_suspend(struct mtd_info *mtd) { @@ -3792,7 +3786,7 @@ static int onenand_suspend(struct mtd_info *mtd) /** * onenand_resume - [MTD Interface] Resume the OneNAND flash - * @param mtd MTD device structure + * @mtd: MTD device structure */ static void onenand_resume(struct mtd_info *mtd) { @@ -3807,8 +3801,8 @@ static void onenand_resume(struct mtd_info *mtd) /** * onenand_scan - [OneNAND Interface] Scan for the OneNAND device - * @param mtd MTD device structure - * @param maxchips Number of chips to scan for + * @mtd: MTD device structure + * @maxchips: Number of chips to scan for * * This fills out all the not initialized function pointers * with the defaults. @@ -3985,7 +3979,7 @@ int onenand_scan(struct mtd_info *mtd, int maxchips) /** * onenand_release - [OneNAND Interface] Free resources held by the OneNAND device - * @param mtd MTD device structure + * @mtd: MTD device structure */ void onenand_release(struct mtd_info *mtd) { diff --git a/drivers/mtd/nand/onenand/onenand_bbt.c b/drivers/mtd/nand/onenand/onenand_bbt.c index 57c31c81be18..def89f108007 100644 --- a/drivers/mtd/nand/onenand/onenand_bbt.c +++ b/drivers/mtd/nand/onenand/onenand_bbt.c @@ -18,10 +18,10 @@ /** * check_short_pattern - [GENERIC] check if a pattern is in the buffer - * @param buf the buffer to search - * @param len the length of buffer to search - * @param paglen the pagelength - * @param td search pattern descriptor + * @buf: the buffer to search + * @len: the length of buffer to search + * @paglen: the pagelength + * @td: search pattern descriptor * * Check for a pattern at the given place. Used to search bad block * tables and good / bad block identifiers. Same as check_pattern, but @@ -44,10 +44,10 @@ static int check_short_pattern(uint8_t *buf, int len, int paglen, struct nand_bb /** * create_bbt - [GENERIC] Create a bad block table by scanning the device - * @param mtd MTD device structure - * @param buf temporary buffer - * @param bd descriptor for the good/bad block search pattern - * @param chip create the table for a specific chip, -1 read all chips. + * @mtd: MTD device structure + * @buf: temporary buffer + * @bd: descriptor for the good/bad block search pattern + * @chip: create the table for a specific chip, -1 read all chips. * Applies only if NAND_BBT_PERCHIP option is set * * Create a bad block table by scanning the device @@ -122,8 +122,8 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr /** * onenand_memory_bbt - [GENERIC] create a memory based bad block table - * @param mtd MTD device structure - * @param bd descriptor for the good/bad block search pattern + * @mtd: MTD device structure + * @bd: descriptor for the good/bad block search pattern * * The function creates a memory based bbt by scanning the device * for manufacturer / software marked good / bad blocks @@ -137,9 +137,9 @@ static inline int onenand_memory_bbt (struct mtd_info *mtd, struct nand_bbt_desc /** * onenand_isbad_bbt - [OneNAND Interface] Check if a block is bad - * @param mtd MTD device structure - * @param offs offset in the device - * @param allowbbt allow access to bad block table region + * @mtd: MTD device structure + * @offs: offset in the device + * @allowbbt: allow access to bad block table region */ static int onenand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt) { @@ -166,8 +166,8 @@ static int onenand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt) /** * onenand_scan_bbt - [OneNAND Interface] scan, find, read and maybe create bad block table(s) - * @param mtd MTD device structure - * @param bd descriptor for the good/bad block search pattern + * @mtd: MTD device structure + * @bd: descriptor for the good/bad block search pattern * * The function checks, if a bad block table(s) is/are already * available. If not it scans the device for manufacturer @@ -221,7 +221,7 @@ static struct nand_bbt_descr largepage_memorybased = { /** * onenand_default_bbt - [OneNAND Interface] Select a default bad block table for the device - * @param mtd MTD device structure + * @mtd: MTD device structure * * This function selects the default bad block table * support for the device and calls the onenand_scan_bbt function diff --git a/drivers/mtd/nand/onenand/onenand_omap2.c b/drivers/mtd/nand/onenand/onenand_omap2.c index d8c0bd002c2b..12825eb97938 100644 --- a/drivers/mtd/nand/onenand/onenand_omap2.c +++ b/drivers/mtd/nand/onenand/onenand_omap2.c @@ -371,12 +371,12 @@ static int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area, bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset; /* - * If the buffer address is not DMA-able, len is not long enough to make - * DMA transfers profitable or panic_write() may be in an interrupt - * context fallback to PIO mode. + * If the buffer address is not DMA-able, len is not long enough to + * make DMA transfers profitable or if invoked from panic_write() + * fallback to PIO mode. */ if (!virt_addr_valid(buf) || bram_offset & 3 || (size_t)buf & 3 || - count < 384 || in_interrupt() || oops_in_progress) + count < 384 || mtd->oops_panic_write) goto out_copy; xtra = count & 3; @@ -418,12 +418,12 @@ static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area, bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset; /* - * If the buffer address is not DMA-able, len is not long enough to make - * DMA transfers profitable or panic_write() may be in an interrupt - * context fallback to PIO mode. + * If the buffer address is not DMA-able, len is not long enough to + * make DMA transfers profitable or if invoked from panic_write() + * fallback to PIO mode. */ if (!virt_addr_valid(buf) || bram_offset & 3 || (size_t)buf & 3 || - count < 384 || in_interrupt() || oops_in_progress) + count < 384 || mtd->oops_panic_write) goto out_copy; dma_src = dma_map_single(dev, buf, count, DMA_TO_DEVICE); diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig index 6c46f25b57e2..442a039b92f3 100644 --- a/drivers/mtd/nand/raw/Kconfig +++ b/drivers/mtd/nand/raw/Kconfig @@ -1,20 +1,8 @@ # SPDX-License-Identifier: GPL-2.0-only -config MTD_NAND_ECC_SW_HAMMING - tristate - -config MTD_NAND_ECC_SW_HAMMING_SMC - bool "NAND ECC Smart Media byte order" - depends on MTD_NAND_ECC_SW_HAMMING - default n - help - Software ECC according to the Smart Media Specification. - The original Linux implementation had byte 0 and 1 swapped. - menuconfig MTD_RAW_NAND tristate "Raw/Parallel NAND Device Support" select MTD_NAND_CORE select MTD_NAND_ECC - select MTD_NAND_ECC_SW_HAMMING help This enables support for accessing all type of raw/parallel NAND flash devices. For further information see @@ -22,16 +10,6 @@ menuconfig MTD_RAW_NAND if MTD_RAW_NAND -config MTD_NAND_ECC_SW_BCH - bool "Support software BCH ECC" - select BCH - default n - help - This enables support for software BCH error correction. Binary BCH - codes are more powerful and cpu intensive than traditional Hamming - ECC codes. They are used with NAND devices requiring more than 1 bit - of error correction. - comment "Raw/parallel NAND flash controllers" config MTD_NAND_DENALI @@ -93,6 +71,7 @@ config MTD_NAND_AU1550 config MTD_NAND_NDFC tristate "IBM/MCC 4xx NAND controller" depends on 4xx + select MTD_NAND_ECC_SW_HAMMING select MTD_NAND_ECC_SW_HAMMING_SMC help NDFC Nand Flash Controllers are integrated in IBM/AMCC's 4xx SoCs @@ -313,7 +292,7 @@ config MTD_NAND_VF610_NFC config MTD_NAND_MXC tristate "Freescale MXC NAND controller" depends on ARCH_MXC || COMPILE_TEST - depends on HAS_IOMEM + depends on HAS_IOMEM && OF help This enables the driver for the NAND flash controller on the MXC processors. @@ -462,6 +441,26 @@ config MTD_NAND_ARASAN Enables the driver for the Arasan NAND flash controller on Zynq Ultrascale+ MPSoC. +config MTD_NAND_INTEL_LGM + tristate "Support for NAND controller on Intel LGM SoC" + depends on OF || COMPILE_TEST + depends on HAS_IOMEM + help + Enables support for NAND Flash chips on Intel's LGM SoC. + NAND flash controller interfaced through the External Bus Unit. + +config MTD_NAND_ROCKCHIP + tristate "Rockchip NAND controller" + depends on ARCH_ROCKCHIP && HAS_IOMEM + help + Enables support for NAND controller on Rockchip SoCs. + There are four different versions of NAND FLASH Controllers, + including: + NFC v600: RK2928, RK3066, RK3188 + NFC v622: RK3036, RK3128 + NFC v800: RK3308, RV1108 + NFC v900: PX30, RK3326 + comment "Misc" config MTD_SM_COMMON diff --git a/drivers/mtd/nand/raw/Makefile b/drivers/mtd/nand/raw/Makefile index 2930f5b9015d..32475a28d8f8 100644 --- a/drivers/mtd/nand/raw/Makefile +++ b/drivers/mtd/nand/raw/Makefile @@ -1,8 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_MTD_RAW_NAND) += nand.o -obj-$(CONFIG_MTD_NAND_ECC_SW_HAMMING) += nand_ecc.o -nand-$(CONFIG_MTD_NAND_ECC_SW_BCH) += nand_bch.o obj-$(CONFIG_MTD_SM_COMMON) += sm_common.o obj-$(CONFIG_MTD_NAND_CAFE) += cafe_nand.o @@ -58,6 +56,8 @@ obj-$(CONFIG_MTD_NAND_STM32_FMC2) += stm32_fmc2_nand.o obj-$(CONFIG_MTD_NAND_MESON) += meson_nand.o obj-$(CONFIG_MTD_NAND_CADENCE) += cadence-nand-controller.o obj-$(CONFIG_MTD_NAND_ARASAN) += arasan-nand-controller.o +obj-$(CONFIG_MTD_NAND_INTEL_LGM) += intel-nand-controller.o +obj-$(CONFIG_MTD_NAND_ROCKCHIP) += rockchip-nand-controller.o nand-objs := nand_base.o nand_legacy.o nand_bbt.o nand_timings.o nand_ids.o nand-objs += nand_onfi.o diff --git a/drivers/mtd/nand/raw/arasan-nand-controller.c b/drivers/mtd/nand/raw/arasan-nand-controller.c index fbb4ea751be8..549aac00228e 100644 --- a/drivers/mtd/nand/raw/arasan-nand-controller.c +++ b/drivers/mtd/nand/raw/arasan-nand-controller.c @@ -118,6 +118,7 @@ * @rdy_timeout_ms: Timeout for waits on Ready/Busy pin * @len: Data transfer length * @read: Data transfer direction from the controller point of view + * @buf: Data buffer */ struct anfc_op { u32 pkt_reg; diff --git a/drivers/mtd/nand/raw/au1550nd.c b/drivers/mtd/nand/raw/au1550nd.c index 7b6b354f2d39..99116896cfd6 100644 --- a/drivers/mtd/nand/raw/au1550nd.c +++ b/drivers/mtd/nand/raw/au1550nd.c @@ -3,6 +3,7 @@ * Copyright (C) 2004 Embedded Edge, LLC */ +#include <linux/delay.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/interrupt.h> diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c index 2da39ab89286..659eaa6f0980 100644 --- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c +++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c @@ -1846,7 +1846,7 @@ static void brcmnand_write_buf(struct nand_chip *chip, const uint8_t *buf, } } -/** +/* * Kick EDU engine */ static int brcmnand_edu_trans(struct brcmnand_host *host, u64 addr, u32 *buf, @@ -1937,7 +1937,7 @@ static int brcmnand_edu_trans(struct brcmnand_host *host, u64 addr, u32 *buf, return ret; } -/** +/* * Construct a FLASH_DMA descriptor as part of a linked list. You must know the * following ahead of time: * - Is this descriptor the beginning or end of a linked list? @@ -1970,7 +1970,7 @@ static int brcmnand_fill_dma_desc(struct brcmnand_host *host, return 0; } -/** +/* * Kick the FLASH_DMA engine, with a given DMA descriptor */ static void brcmnand_dma_run(struct brcmnand_host *host, dma_addr_t desc) diff --git a/drivers/mtd/nand/raw/cafe_nand.c b/drivers/mtd/nand/raw/cafe_nand.c index 2b94f385a1a8..d0e8ffd55c22 100644 --- a/drivers/mtd/nand/raw/cafe_nand.c +++ b/drivers/mtd/nand/raw/cafe_nand.c @@ -359,10 +359,10 @@ static int cafe_nand_read_oob(struct nand_chip *chip, int page) } /** * cafe_nand_read_page_syndrome - [REPLACEABLE] hardware ecc syndrome based page read - * @mtd: mtd info structure * @chip: nand chip info structure * @buf: buffer to store read data * @oob_required: caller expects OOB data read to chip->oob_poi + * @page: page number to read * * The hw generator calculates the error syndrome automatically. Therefore * we need a special oob layout and handling. diff --git a/drivers/mtd/nand/raw/cs553x_nand.c b/drivers/mtd/nand/raw/cs553x_nand.c index 282203debd0c..6edf78c16fc8 100644 --- a/drivers/mtd/nand/raw/cs553x_nand.c +++ b/drivers/mtd/nand/raw/cs553x_nand.c @@ -19,7 +19,6 @@ #include <linux/delay.h> #include <linux/mtd/mtd.h> #include <linux/mtd/rawnand.h> -#include <linux/mtd/nand_ecc.h> #include <linux/mtd/partitions.h> #include <linux/iopoll.h> @@ -252,7 +251,7 @@ static int cs553x_attach_chip(struct nand_chip *chip) chip->ecc.bytes = 3; chip->ecc.hwctl = cs_enable_hwecc; chip->ecc.calculate = cs_calculate_ecc; - chip->ecc.correct = nand_correct_data; + chip->ecc.correct = rawnand_sw_hamming_correct; chip->ecc.strength = 1; return 0; diff --git a/drivers/mtd/nand/raw/davinci_nand.c b/drivers/mtd/nand/raw/davinci_nand.c index f8c36d19ab47..118da9944e3b 100644 --- a/drivers/mtd/nand/raw/davinci_nand.c +++ b/drivers/mtd/nand/raw/davinci_nand.c @@ -586,10 +586,10 @@ static int davinci_nand_attach_chip(struct nand_chip *chip) return PTR_ERR(pdata); /* Use board-specific ECC config */ - info->chip.ecc.engine_type = pdata->engine_type; - info->chip.ecc.placement = pdata->ecc_placement; + chip->ecc.engine_type = pdata->engine_type; + chip->ecc.placement = pdata->ecc_placement; - switch (info->chip.ecc.engine_type) { + switch (chip->ecc.engine_type) { case NAND_ECC_ENGINE_TYPE_NONE: pdata->ecc_bits = 0; break; @@ -601,7 +601,7 @@ static int davinci_nand_attach_chip(struct nand_chip *chip) * NAND_ECC_ALGO_HAMMING to avoid adding an extra ->ecc_algo * field to davinci_nand_pdata. */ - info->chip.ecc.algo = NAND_ECC_ALGO_HAMMING; + chip->ecc.algo = NAND_ECC_ALGO_HAMMING; break; case NAND_ECC_ENGINE_TYPE_ON_HOST: if (pdata->ecc_bits == 4) { @@ -628,12 +628,12 @@ static int davinci_nand_attach_chip(struct nand_chip *chip) if (ret == -EBUSY) return ret; - info->chip.ecc.calculate = nand_davinci_calculate_4bit; - info->chip.ecc.correct = nand_davinci_correct_4bit; - info->chip.ecc.hwctl = nand_davinci_hwctl_4bit; - info->chip.ecc.bytes = 10; - info->chip.ecc.options = NAND_ECC_GENERIC_ERASED_CHECK; - info->chip.ecc.algo = NAND_ECC_ALGO_BCH; + chip->ecc.calculate = nand_davinci_calculate_4bit; + chip->ecc.correct = nand_davinci_correct_4bit; + chip->ecc.hwctl = nand_davinci_hwctl_4bit; + chip->ecc.bytes = 10; + chip->ecc.options = NAND_ECC_GENERIC_ERASED_CHECK; + chip->ecc.algo = NAND_ECC_ALGO_BCH; /* * Update ECC layout if needed ... for 1-bit HW ECC, the @@ -651,20 +651,20 @@ static int davinci_nand_attach_chip(struct nand_chip *chip) } else if (chunks == 4 || chunks == 8) { mtd_set_ooblayout(mtd, nand_get_large_page_ooblayout()); - info->chip.ecc.read_page = nand_davinci_read_page_hwecc_oob_first; + chip->ecc.read_page = nand_davinci_read_page_hwecc_oob_first; } else { return -EIO; } } else { /* 1bit ecc hamming */ - info->chip.ecc.calculate = nand_davinci_calculate_1bit; - info->chip.ecc.correct = nand_davinci_correct_1bit; - info->chip.ecc.hwctl = nand_davinci_hwctl_1bit; - info->chip.ecc.bytes = 3; - info->chip.ecc.algo = NAND_ECC_ALGO_HAMMING; + chip->ecc.calculate = nand_davinci_calculate_1bit; + chip->ecc.correct = nand_davinci_correct_1bit; + chip->ecc.hwctl = nand_davinci_hwctl_1bit; + chip->ecc.bytes = 3; + chip->ecc.algo = NAND_ECC_ALGO_HAMMING; } - info->chip.ecc.size = 512; - info->chip.ecc.strength = pdata->ecc_bits; + chip->ecc.size = 512; + chip->ecc.strength = pdata->ecc_bits; break; default: return -EINVAL; @@ -899,7 +899,7 @@ static int nand_davinci_remove(struct platform_device *pdev) int ret; spin_lock_irq(&davinci_nand_lock); - if (info->chip.ecc.placement == NAND_ECC_PLACEMENT_INTERLEAVED) + if (chip->ecc.placement == NAND_ECC_PLACEMENT_INTERLEAVED) ecc4_busy = false; spin_unlock_irq(&davinci_nand_lock); diff --git a/drivers/mtd/nand/raw/diskonchip.c b/drivers/mtd/nand/raw/diskonchip.c index 26b265e4384a..5d2ddb037a9a 100644 --- a/drivers/mtd/nand/raw/diskonchip.c +++ b/drivers/mtd/nand/raw/diskonchip.c @@ -216,7 +216,7 @@ static int doc_ecc_decode(struct rs_control *rs, uint8_t *data, uint8_t *ecc) static void DoC_Delay(struct doc_priv *doc, unsigned short cycles) { - volatile char dummy; + volatile char __always_unused dummy; int i; for (i = 0; i < cycles; i++) { @@ -703,7 +703,7 @@ static int doc200x_calculate_ecc(struct nand_chip *this, const u_char *dat, struct doc_priv *doc = nand_get_controller_data(this); void __iomem *docptr = doc->virtadr; int i; - int emptymatch = 1; + int __always_unused emptymatch = 1; /* flush the pipeline */ if (DoC_is_2000(doc)) { diff --git a/drivers/mtd/nand/raw/fsl_elbc_nand.c b/drivers/mtd/nand/raw/fsl_elbc_nand.c index b2af7f81fdf8..aab93b9e6052 100644 --- a/drivers/mtd/nand/raw/fsl_elbc_nand.c +++ b/drivers/mtd/nand/raw/fsl_elbc_nand.c @@ -22,7 +22,6 @@ #include <linux/mtd/mtd.h> #include <linux/mtd/rawnand.h> -#include <linux/mtd/nand_ecc.h> #include <linux/mtd/partitions.h> #include <asm/io.h> diff --git a/drivers/mtd/nand/raw/fsl_ifc_nand.c b/drivers/mtd/nand/raw/fsl_ifc_nand.c index e345f9d9f8e8..02d500176838 100644 --- a/drivers/mtd/nand/raw/fsl_ifc_nand.c +++ b/drivers/mtd/nand/raw/fsl_ifc_nand.c @@ -15,7 +15,6 @@ #include <linux/mtd/mtd.h> #include <linux/mtd/rawnand.h> #include <linux/mtd/partitions.h> -#include <linux/mtd/nand_ecc.h> #include <linux/fsl_ifc.h> #include <linux/iopoll.h> diff --git a/drivers/mtd/nand/raw/fsl_upm.c b/drivers/mtd/nand/raw/fsl_upm.c index d5813b9abc8e..b3cc427100a2 100644 --- a/drivers/mtd/nand/raw/fsl_upm.c +++ b/drivers/mtd/nand/raw/fsl_upm.c @@ -11,7 +11,6 @@ #include <linux/module.h> #include <linux/delay.h> #include <linux/mtd/rawnand.h> -#include <linux/mtd/nand_ecc.h> #include <linux/mtd/partitions.h> #include <linux/mtd/mtd.h> #include <linux/of_platform.h> diff --git a/drivers/mtd/nand/raw/fsmc_nand.c b/drivers/mtd/nand/raw/fsmc_nand.c index c88421a1c078..0101c0fab50a 100644 --- a/drivers/mtd/nand/raw/fsmc_nand.c +++ b/drivers/mtd/nand/raw/fsmc_nand.c @@ -26,7 +26,6 @@ #include <linux/types.h> #include <linux/mtd/mtd.h> #include <linux/mtd/rawnand.h> -#include <linux/mtd/nand_ecc.h> #include <linux/platform_device.h> #include <linux/of.h> #include <linux/mtd/partitions.h> @@ -918,7 +917,7 @@ static int fsmc_nand_attach_chip(struct nand_chip *nand) case NAND_ECC_ENGINE_TYPE_ON_HOST: dev_info(host->dev, "Using 1-bit HW ECC scheme\n"); nand->ecc.calculate = fsmc_read_hwecc_ecc1; - nand->ecc.correct = nand_correct_data; + nand->ecc.correct = rawnand_sw_hamming_correct; nand->ecc.hwctl = fsmc_enable_hwecc; nand->ecc.bytes = 3; nand->ecc.strength = 1; @@ -942,7 +941,7 @@ static int fsmc_nand_attach_chip(struct nand_chip *nand) /* * Don't set layout for BCH4 SW ECC. This will be - * generated later in nand_bch_init() later. + * generated later during BCH initialization. */ if (nand->ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_HOST) { switch (mtd->oobsize) { diff --git a/drivers/mtd/nand/raw/gpmi-nand/Makefile b/drivers/mtd/nand/raw/gpmi-nand/Makefile index 9bd81a31e02e..247cbfceaa19 100644 --- a/drivers/mtd/nand/raw/gpmi-nand/Makefile +++ b/drivers/mtd/nand/raw/gpmi-nand/Makefile @@ -1,3 +1,2 @@ # SPDX-License-Identifier: GPL-2.0-only -obj-$(CONFIG_MTD_NAND_GPMI_NAND) += gpmi_nand.o -gpmi_nand-objs += gpmi-nand.o +obj-$(CONFIG_MTD_NAND_GPMI_NAND) += gpmi-nand.o diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c index dc8104e67506..5cdf05bcbf8f 100644 --- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c +++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c @@ -149,8 +149,10 @@ static int gpmi_init(struct gpmi_nand_data *this) int ret; ret = pm_runtime_get_sync(this->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_noidle(this->dev); return ret; + } ret = gpmi_reset_block(r->gpmi_regs, false); if (ret) @@ -179,9 +181,11 @@ static int gpmi_init(struct gpmi_nand_data *this) /* * Decouple the chip select from dma channel. We use dma0 for all - * the chips. + * the chips, force all NAND RDY_BUSY inputs to be sourced from + * RDY_BUSY0. */ - writel(BM_GPMI_CTRL1_DECOUPLE_CS, r->gpmi_regs + HW_GPMI_CTRL1_SET); + writel(BM_GPMI_CTRL1_DECOUPLE_CS | BM_GPMI_CTRL1_GANGED_RDYBUSY, + r->gpmi_regs + HW_GPMI_CTRL1_SET); err_out: pm_runtime_mark_last_busy(this->dev); @@ -2252,7 +2256,7 @@ static int gpmi_nfc_exec_op(struct nand_chip *chip, void *buf_read = NULL; const void *buf_write = NULL; bool direct = false; - struct completion *completion; + struct completion *dma_completion, *bch_completion; unsigned long to; if (check_only) @@ -2263,8 +2267,10 @@ static int gpmi_nfc_exec_op(struct nand_chip *chip, this->transfers[i].direction = DMA_NONE; ret = pm_runtime_get_sync(this->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_noidle(this->dev); return ret; + } /* * This driver currently supports only one NAND chip. Plus, dies share @@ -2347,22 +2353,24 @@ static int gpmi_nfc_exec_op(struct nand_chip *chip, this->resources.bch_regs + HW_BCH_FLASH0LAYOUT1); } + desc->callback = dma_irq_callback; + desc->callback_param = this; + dma_completion = &this->dma_done; + bch_completion = NULL; + + init_completion(dma_completion); + if (this->bch && buf_read) { writel(BM_BCH_CTRL_COMPLETE_IRQ_EN, this->resources.bch_regs + HW_BCH_CTRL_SET); - completion = &this->bch_done; - } else { - desc->callback = dma_irq_callback; - desc->callback_param = this; - completion = &this->dma_done; + bch_completion = &this->bch_done; + init_completion(bch_completion); } - init_completion(completion); - dmaengine_submit(desc); dma_async_issue_pending(get_dma_chan(this)); - to = wait_for_completion_timeout(completion, msecs_to_jiffies(1000)); + to = wait_for_completion_timeout(dma_completion, msecs_to_jiffies(1000)); if (!to) { dev_err(this->dev, "DMA timeout, last DMA\n"); gpmi_dump_info(this); @@ -2370,6 +2378,16 @@ static int gpmi_nfc_exec_op(struct nand_chip *chip, goto unmap; } + if (this->bch && buf_read) { + to = wait_for_completion_timeout(bch_completion, msecs_to_jiffies(1000)); + if (!to) { + dev_err(this->dev, "BCH timeout, last DMA\n"); + gpmi_dump_info(this); + ret = -ETIMEDOUT; + goto unmap; + } + } + writel(BM_BCH_CTRL_COMPLETE_IRQ_EN, this->resources.bch_regs + HW_BCH_CTRL_CLR); gpmi_clear_bch(this); @@ -2461,43 +2479,25 @@ err_out: } static const struct of_device_id gpmi_nand_id_table[] = { - { - .compatible = "fsl,imx23-gpmi-nand", - .data = &gpmi_devdata_imx23, - }, { - .compatible = "fsl,imx28-gpmi-nand", - .data = &gpmi_devdata_imx28, - }, { - .compatible = "fsl,imx6q-gpmi-nand", - .data = &gpmi_devdata_imx6q, - }, { - .compatible = "fsl,imx6sx-gpmi-nand", - .data = &gpmi_devdata_imx6sx, - }, { - .compatible = "fsl,imx7d-gpmi-nand", - .data = &gpmi_devdata_imx7d, - }, {} + { .compatible = "fsl,imx23-gpmi-nand", .data = &gpmi_devdata_imx23, }, + { .compatible = "fsl,imx28-gpmi-nand", .data = &gpmi_devdata_imx28, }, + { .compatible = "fsl,imx6q-gpmi-nand", .data = &gpmi_devdata_imx6q, }, + { .compatible = "fsl,imx6sx-gpmi-nand", .data = &gpmi_devdata_imx6sx, }, + { .compatible = "fsl,imx7d-gpmi-nand", .data = &gpmi_devdata_imx7d,}, + {} }; MODULE_DEVICE_TABLE(of, gpmi_nand_id_table); static int gpmi_nand_probe(struct platform_device *pdev) { struct gpmi_nand_data *this; - const struct of_device_id *of_id; int ret; this = devm_kzalloc(&pdev->dev, sizeof(*this), GFP_KERNEL); if (!this) return -ENOMEM; - of_id = of_match_device(gpmi_nand_id_table, &pdev->dev); - if (of_id) { - this->devdata = of_id->data; - } else { - dev_err(&pdev->dev, "Failed to find the right device id.\n"); - return -ENODEV; - } - + this->devdata = of_device_get_match_data(&pdev->dev); platform_set_drvdata(pdev, this); this->pdev = pdev; this->dev = &pdev->dev; diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-regs.h b/drivers/mtd/nand/raw/gpmi-nand/gpmi-regs.h index f5e4f26c34da..fc31fd084dcf 100644 --- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-regs.h +++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-regs.h @@ -107,6 +107,7 @@ #define BV_GPMI_CTRL1_WRN_DLY_SEL_7_TO_12NS 0x2 #define BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY 0x3 +#define BM_GPMI_CTRL1_GANGED_RDYBUSY (1 << 19) #define BM_GPMI_CTRL1_BCH_MODE (1 << 18) #define BP_GPMI_CTRL1_DLL_ENABLE 17 diff --git a/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c b/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c index 8e22cd6ec71f..efe0ffe4f1ab 100644 --- a/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c +++ b/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c @@ -71,8 +71,6 @@ static struct ingenic_ecc *ingenic_ecc_get(struct device_node *np) if (!pdev || !platform_get_drvdata(pdev)) return ERR_PTR(-EPROBE_DEFER); - get_device(&pdev->dev); - ecc = platform_get_drvdata(pdev); clk_prepare_enable(ecc->clk); diff --git a/drivers/mtd/nand/raw/intel-nand-controller.c b/drivers/mtd/nand/raw/intel-nand-controller.c new file mode 100644 index 000000000000..fdb112e8a90d --- /dev/null +++ b/drivers/mtd/nand/raw/intel-nand-controller.c @@ -0,0 +1,721 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (c) 2020 Intel Corporation. */ + +#include <linux/clk.h> +#include <linux/completion.h> +#include <linux/dmaengine.h> +#include <linux/dma-direction.h> +#include <linux/dma-mapping.h> +#include <linux/err.h> +#include <linux/init.h> +#include <linux/iopoll.h> +#include <linux/kernel.h> +#include <linux/module.h> + +#include <linux/mtd/mtd.h> +#include <linux/mtd/rawnand.h> +#include <linux/mtd/nand.h> + +#include <linux/platform_device.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/types.h> +#include <asm/unaligned.h> + +#define EBU_CLC 0x000 +#define EBU_CLC_RST 0x00000000u + +#define EBU_ADDR_SEL(n) (0x020 + (n) * 4) +/* 5 bits 26:22 included for comparison in the ADDR_SELx */ +#define EBU_ADDR_MASK(x) ((x) << 4) +#define EBU_ADDR_SEL_REGEN 0x1 + +#define EBU_BUSCON(n) (0x060 + (n) * 4) +#define EBU_BUSCON_CMULT_V4 0x1 +#define EBU_BUSCON_RECOVC(n) ((n) << 2) +#define EBU_BUSCON_HOLDC(n) ((n) << 4) +#define EBU_BUSCON_WAITRDC(n) ((n) << 6) +#define EBU_BUSCON_WAITWRC(n) ((n) << 8) +#define EBU_BUSCON_BCGEN_CS 0x0 +#define EBU_BUSCON_SETUP_EN BIT(22) +#define EBU_BUSCON_ALEC 0xC000 + +#define EBU_CON 0x0B0 +#define EBU_CON_NANDM_EN BIT(0) +#define EBU_CON_NANDM_DIS 0x0 +#define EBU_CON_CSMUX_E_EN BIT(1) +#define EBU_CON_ALE_P_LOW BIT(2) +#define EBU_CON_CLE_P_LOW BIT(3) +#define EBU_CON_CS_P_LOW BIT(4) +#define EBU_CON_SE_P_LOW BIT(5) +#define EBU_CON_WP_P_LOW BIT(6) +#define EBU_CON_PRE_P_LOW BIT(7) +#define EBU_CON_IN_CS_S(n) ((n) << 8) +#define EBU_CON_OUT_CS_S(n) ((n) << 10) +#define EBU_CON_LAT_EN_CS_P ((0x3D) << 18) + +#define EBU_WAIT 0x0B4 +#define EBU_WAIT_RDBY BIT(0) +#define EBU_WAIT_WR_C BIT(3) + +#define HSNAND_CTL1 0x110 +#define HSNAND_CTL1_ADDR_SHIFT 24 + +#define HSNAND_CTL2 0x114 +#define HSNAND_CTL2_ADDR_SHIFT 8 +#define HSNAND_CTL2_CYC_N_V5 (0x2 << 16) + +#define HSNAND_INT_MSK_CTL 0x124 +#define HSNAND_INT_MSK_CTL_WR_C BIT(4) + +#define HSNAND_INT_STA 0x128 +#define HSNAND_INT_STA_WR_C BIT(4) + +#define HSNAND_CTL 0x130 +#define HSNAND_CTL_ENABLE_ECC BIT(0) +#define HSNAND_CTL_GO BIT(2) +#define HSNAND_CTL_CE_SEL_CS(n) BIT(3 + (n)) +#define HSNAND_CTL_RW_READ 0x0 +#define HSNAND_CTL_RW_WRITE BIT(10) +#define HSNAND_CTL_ECC_OFF_V8TH BIT(11) +#define HSNAND_CTL_CKFF_EN 0x0 +#define HSNAND_CTL_MSG_EN BIT(17) + +#define HSNAND_PARA0 0x13c +#define HSNAND_PARA0_PAGE_V8192 0x3 +#define HSNAND_PARA0_PIB_V256 (0x3 << 4) +#define HSNAND_PARA0_BYP_EN_NP 0x0 +#define HSNAND_PARA0_BYP_DEC_NP 0x0 +#define HSNAND_PARA0_TYPE_ONFI BIT(18) +#define HSNAND_PARA0_ADEP_EN BIT(21) + +#define HSNAND_CMSG_0 0x150 +#define HSNAND_CMSG_1 0x154 + +#define HSNAND_ALE_OFFS BIT(2) +#define HSNAND_CLE_OFFS BIT(3) +#define HSNAND_CS_OFFS BIT(4) + +#define HSNAND_ECC_OFFSET 0x008 + +#define NAND_DATA_IFACE_CHECK_ONLY -1 + +#define MAX_CS 2 + +#define HZ_PER_MHZ 1000000L +#define USEC_PER_SEC 1000000L + +struct ebu_nand_cs { + void __iomem *chipaddr; + dma_addr_t nand_pa; + u32 addr_sel; +}; + +struct ebu_nand_controller { + struct nand_controller controller; + struct nand_chip chip; + struct device *dev; + void __iomem *ebu; + void __iomem *hsnand; + struct dma_chan *dma_tx; + struct dma_chan *dma_rx; + struct completion dma_access_complete; + unsigned long clk_rate; + struct clk *clk; + u32 nd_para0; + u8 cs_num; + struct ebu_nand_cs cs[MAX_CS]; +}; + +static inline struct ebu_nand_controller *nand_to_ebu(struct nand_chip *chip) +{ + return container_of(chip, struct ebu_nand_controller, chip); +} + +static int ebu_nand_waitrdy(struct nand_chip *chip, int timeout_ms) +{ + struct ebu_nand_controller *ctrl = nand_to_ebu(chip); + u32 status; + + return readl_poll_timeout(ctrl->ebu + EBU_WAIT, status, + (status & EBU_WAIT_RDBY) || + (status & EBU_WAIT_WR_C), 20, timeout_ms); +} + +static u8 ebu_nand_readb(struct nand_chip *chip) +{ + struct ebu_nand_controller *ebu_host = nand_get_controller_data(chip); + u8 cs_num = ebu_host->cs_num; + u8 val; + + val = readb(ebu_host->cs[cs_num].chipaddr + HSNAND_CS_OFFS); + ebu_nand_waitrdy(chip, 1000); + return val; +} + +static void ebu_nand_writeb(struct nand_chip *chip, u32 offset, u8 value) +{ + struct ebu_nand_controller *ebu_host = nand_get_controller_data(chip); + u8 cs_num = ebu_host->cs_num; + + writeb(value, ebu_host->cs[cs_num].chipaddr + offset); + ebu_nand_waitrdy(chip, 1000); +} + +static void ebu_read_buf(struct nand_chip *chip, u_char *buf, unsigned int len) +{ + int i; + + for (i = 0; i < len; i++) + buf[i] = ebu_nand_readb(chip); +} + +static void ebu_write_buf(struct nand_chip *chip, const u_char *buf, int len) +{ + int i; + + for (i = 0; i < len; i++) + ebu_nand_writeb(chip, HSNAND_CS_OFFS, buf[i]); +} + +static void ebu_nand_disable(struct nand_chip *chip) +{ + struct ebu_nand_controller *ebu_host = nand_get_controller_data(chip); + + writel(0, ebu_host->ebu + EBU_CON); +} + +static void ebu_select_chip(struct nand_chip *chip) +{ + struct ebu_nand_controller *ebu_host = nand_get_controller_data(chip); + void __iomem *nand_con = ebu_host->ebu + EBU_CON; + u32 cs = ebu_host->cs_num; + + writel(EBU_CON_NANDM_EN | EBU_CON_CSMUX_E_EN | EBU_CON_CS_P_LOW | + EBU_CON_SE_P_LOW | EBU_CON_WP_P_LOW | EBU_CON_PRE_P_LOW | + EBU_CON_IN_CS_S(cs) | EBU_CON_OUT_CS_S(cs) | + EBU_CON_LAT_EN_CS_P, nand_con); +} + +static int ebu_nand_set_timings(struct nand_chip *chip, int csline, + const struct nand_interface_config *conf) +{ + struct ebu_nand_controller *ctrl = nand_to_ebu(chip); + unsigned int rate = clk_get_rate(ctrl->clk) / HZ_PER_MHZ; + unsigned int period = DIV_ROUND_UP(USEC_PER_SEC, rate); + const struct nand_sdr_timings *timings; + u32 trecov, thold, twrwait, trdwait; + u32 reg = 0; + + timings = nand_get_sdr_timings(conf); + if (IS_ERR(timings)) + return PTR_ERR(timings); + + if (csline == NAND_DATA_IFACE_CHECK_ONLY) + return 0; + + trecov = DIV_ROUND_UP(max(timings->tREA_max, timings->tREH_min), + period); + reg |= EBU_BUSCON_RECOVC(trecov); + + thold = DIV_ROUND_UP(max(timings->tDH_min, timings->tDS_min), period); + reg |= EBU_BUSCON_HOLDC(thold); + + trdwait = DIV_ROUND_UP(max(timings->tRC_min, timings->tREH_min), + period); + reg |= EBU_BUSCON_WAITRDC(trdwait); + + twrwait = DIV_ROUND_UP(max(timings->tWC_min, timings->tWH_min), period); + reg |= EBU_BUSCON_WAITWRC(twrwait); + + reg |= EBU_BUSCON_CMULT_V4 | EBU_BUSCON_BCGEN_CS | EBU_BUSCON_ALEC | + EBU_BUSCON_SETUP_EN; + + writel(reg, ctrl->ebu + EBU_BUSCON(ctrl->cs_num)); + + return 0; +} + +static int ebu_nand_ooblayout_ecc(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + + if (section) + return -ERANGE; + + oobregion->offset = HSNAND_ECC_OFFSET; + oobregion->length = chip->ecc.total; + + return 0; +} + +static int ebu_nand_ooblayout_free(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + + if (section) + return -ERANGE; + + oobregion->offset = chip->ecc.total + HSNAND_ECC_OFFSET; + oobregion->length = mtd->oobsize - oobregion->offset; + + return 0; +} + +static const struct mtd_ooblayout_ops ebu_nand_ooblayout_ops = { + .ecc = ebu_nand_ooblayout_ecc, + .free = ebu_nand_ooblayout_free, +}; + +static void ebu_dma_rx_callback(void *cookie) +{ + struct ebu_nand_controller *ebu_host = cookie; + + dmaengine_terminate_async(ebu_host->dma_rx); + + complete(&ebu_host->dma_access_complete); +} + +static void ebu_dma_tx_callback(void *cookie) +{ + struct ebu_nand_controller *ebu_host = cookie; + + dmaengine_terminate_async(ebu_host->dma_tx); + + complete(&ebu_host->dma_access_complete); +} + +static int ebu_dma_start(struct ebu_nand_controller *ebu_host, u32 dir, + const u8 *buf, u32 len) +{ + struct dma_async_tx_descriptor *tx; + struct completion *dma_completion; + dma_async_tx_callback callback; + struct dma_chan *chan; + dma_cookie_t cookie; + unsigned long flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT; + dma_addr_t buf_dma; + int ret; + u32 timeout; + + if (dir == DMA_DEV_TO_MEM) { + chan = ebu_host->dma_rx; + dma_completion = &ebu_host->dma_access_complete; + callback = ebu_dma_rx_callback; + } else { + chan = ebu_host->dma_tx; + dma_completion = &ebu_host->dma_access_complete; + callback = ebu_dma_tx_callback; + } + + buf_dma = dma_map_single(chan->device->dev, (void *)buf, len, dir); + if (dma_mapping_error(chan->device->dev, buf_dma)) { + dev_err(ebu_host->dev, "Failed to map DMA buffer\n"); + ret = -EIO; + goto err_unmap; + } + + tx = dmaengine_prep_slave_single(chan, buf_dma, len, dir, flags); + if (!tx) + return -ENXIO; + + tx->callback = callback; + tx->callback_param = ebu_host; + cookie = tx->tx_submit(tx); + + ret = dma_submit_error(cookie); + if (ret) { + dev_err(ebu_host->dev, "dma_submit_error %d\n", cookie); + ret = -EIO; + goto err_unmap; + } + + init_completion(dma_completion); + dma_async_issue_pending(chan); + + /* Wait DMA to finish the data transfer.*/ + timeout = wait_for_completion_timeout(dma_completion, msecs_to_jiffies(1000)); + if (!timeout) { + dev_err(ebu_host->dev, "I/O Error in DMA RX (status %d)\n", + dmaengine_tx_status(chan, cookie, NULL)); + dmaengine_terminate_sync(chan); + ret = -ETIMEDOUT; + goto err_unmap; + } + + return 0; + +err_unmap: + dma_unmap_single(ebu_host->dev, buf_dma, len, dir); + + return ret; +} + +static void ebu_nand_trigger(struct ebu_nand_controller *ebu_host, + int page, u32 cmd) +{ + unsigned int val; + + val = cmd | (page & 0xFF) << HSNAND_CTL1_ADDR_SHIFT; + writel(val, ebu_host->hsnand + HSNAND_CTL1); + val = (page & 0xFFFF00) >> 8 | HSNAND_CTL2_CYC_N_V5; + writel(val, ebu_host->hsnand + HSNAND_CTL2); + + writel(ebu_host->nd_para0, ebu_host->hsnand + HSNAND_PARA0); + + /* clear first, will update later */ + writel(0xFFFFFFFF, ebu_host->hsnand + HSNAND_CMSG_0); + writel(0xFFFFFFFF, ebu_host->hsnand + HSNAND_CMSG_1); + + writel(HSNAND_INT_MSK_CTL_WR_C, + ebu_host->hsnand + HSNAND_INT_MSK_CTL); + + if (!cmd) + val = HSNAND_CTL_RW_READ; + else + val = HSNAND_CTL_RW_WRITE; + + writel(HSNAND_CTL_MSG_EN | HSNAND_CTL_CKFF_EN | + HSNAND_CTL_ECC_OFF_V8TH | HSNAND_CTL_CE_SEL_CS(ebu_host->cs_num) | + HSNAND_CTL_ENABLE_ECC | HSNAND_CTL_GO | val, + ebu_host->hsnand + HSNAND_CTL); +} + +static int ebu_nand_read_page_hwecc(struct nand_chip *chip, u8 *buf, + int oob_required, int page) +{ + struct mtd_info *mtd = nand_to_mtd(chip); + struct ebu_nand_controller *ebu_host = nand_get_controller_data(chip); + int ret, reg_data; + + ebu_nand_trigger(ebu_host, page, NAND_CMD_READ0); + + ret = ebu_dma_start(ebu_host, DMA_DEV_TO_MEM, buf, mtd->writesize); + if (ret) + return ret; + + if (oob_required) + chip->ecc.read_oob(chip, page); + + reg_data = readl(ebu_host->hsnand + HSNAND_CTL); + reg_data &= ~HSNAND_CTL_GO; + writel(reg_data, ebu_host->hsnand + HSNAND_CTL); + + return 0; +} + +static int ebu_nand_write_page_hwecc(struct nand_chip *chip, const u8 *buf, + int oob_required, int page) +{ + struct mtd_info *mtd = nand_to_mtd(chip); + struct ebu_nand_controller *ebu_host = nand_get_controller_data(chip); + void __iomem *int_sta = ebu_host->hsnand + HSNAND_INT_STA; + int reg_data, ret, val; + u32 reg; + + ebu_nand_trigger(ebu_host, page, NAND_CMD_SEQIN); + + ret = ebu_dma_start(ebu_host, DMA_MEM_TO_DEV, buf, mtd->writesize); + if (ret) + return ret; + + if (oob_required) { + reg = get_unaligned_le32(chip->oob_poi); + writel(reg, ebu_host->hsnand + HSNAND_CMSG_0); + + reg = get_unaligned_le32(chip->oob_poi + 4); + writel(reg, ebu_host->hsnand + HSNAND_CMSG_1); + } + + ret = readl_poll_timeout_atomic(int_sta, val, !(val & HSNAND_INT_STA_WR_C), + 10, 1000); + if (ret) + return ret; + + reg_data = readl(ebu_host->hsnand + HSNAND_CTL); + reg_data &= ~HSNAND_CTL_GO; + writel(reg_data, ebu_host->hsnand + HSNAND_CTL); + + return 0; +} + +static const u8 ecc_strength[] = { 1, 1, 4, 8, 24, 32, 40, 60, }; + +static int ebu_nand_attach_chip(struct nand_chip *chip) +{ + struct mtd_info *mtd = nand_to_mtd(chip); + struct ebu_nand_controller *ebu_host = nand_get_controller_data(chip); + u32 ecc_steps, ecc_bytes, ecc_total, pagesize, pg_per_blk; + u32 ecc_strength_ds = chip->ecc.strength; + u32 ecc_size = chip->ecc.size; + u32 writesize = mtd->writesize; + u32 blocksize = mtd->erasesize; + int bch_algo, start, val; + + /* Default to an ECC size of 512 */ + if (!chip->ecc.size) + chip->ecc.size = 512; + + switch (ecc_size) { + case 512: + start = 1; + if (!ecc_strength_ds) + ecc_strength_ds = 4; + break; + case 1024: + start = 4; + if (!ecc_strength_ds) + ecc_strength_ds = 32; + break; + default: + return -EINVAL; + } + + /* BCH ECC algorithm Settings for number of bits per 512B/1024B */ + bch_algo = round_up(start + 1, 4); + for (val = start; val < bch_algo; val++) { + if (ecc_strength_ds == ecc_strength[val]) + break; + } + if (val == bch_algo) + return -EINVAL; + + if (ecc_strength_ds == 8) + ecc_bytes = 14; + else + ecc_bytes = DIV_ROUND_UP(ecc_strength_ds * fls(8 * ecc_size), 8); + + ecc_steps = writesize / ecc_size; + ecc_total = ecc_steps * ecc_bytes; + if ((ecc_total + 8) > mtd->oobsize) + return -ERANGE; + + chip->ecc.total = ecc_total; + pagesize = fls(writesize >> 11); + if (pagesize > HSNAND_PARA0_PAGE_V8192) + return -ERANGE; + + pg_per_blk = fls((blocksize / writesize) >> 6) / 8; + if (pg_per_blk > HSNAND_PARA0_PIB_V256) + return -ERANGE; + + ebu_host->nd_para0 = pagesize | pg_per_blk | HSNAND_PARA0_BYP_EN_NP | + HSNAND_PARA0_BYP_DEC_NP | HSNAND_PARA0_ADEP_EN | + HSNAND_PARA0_TYPE_ONFI | (val << 29); + + mtd_set_ooblayout(mtd, &ebu_nand_ooblayout_ops); + chip->ecc.read_page = ebu_nand_read_page_hwecc; + chip->ecc.write_page = ebu_nand_write_page_hwecc; + + return 0; +} + +static int ebu_nand_exec_op(struct nand_chip *chip, + const struct nand_operation *op, bool check_only) +{ + const struct nand_op_instr *instr = NULL; + unsigned int op_id; + int i, timeout_ms, ret = 0; + + if (check_only) + return 0; + + ebu_select_chip(chip); + for (op_id = 0; op_id < op->ninstrs; op_id++) { + instr = &op->instrs[op_id]; + + switch (instr->type) { + case NAND_OP_CMD_INSTR: + ebu_nand_writeb(chip, HSNAND_CLE_OFFS | HSNAND_CS_OFFS, + instr->ctx.cmd.opcode); + break; + + case NAND_OP_ADDR_INSTR: + for (i = 0; i < instr->ctx.addr.naddrs; i++) + ebu_nand_writeb(chip, + HSNAND_ALE_OFFS | HSNAND_CS_OFFS, + instr->ctx.addr.addrs[i]); + break; + + case NAND_OP_DATA_IN_INSTR: + ebu_read_buf(chip, instr->ctx.data.buf.in, + instr->ctx.data.len); + break; + + case NAND_OP_DATA_OUT_INSTR: + ebu_write_buf(chip, instr->ctx.data.buf.out, + instr->ctx.data.len); + break; + + case NAND_OP_WAITRDY_INSTR: + timeout_ms = instr->ctx.waitrdy.timeout_ms * 1000; + ret = ebu_nand_waitrdy(chip, timeout_ms); + break; + } + } + + return ret; +} + +static const struct nand_controller_ops ebu_nand_controller_ops = { + .attach_chip = ebu_nand_attach_chip, + .setup_interface = ebu_nand_set_timings, + .exec_op = ebu_nand_exec_op, +}; + +static void ebu_dma_cleanup(struct ebu_nand_controller *ebu_host) +{ + if (ebu_host->dma_rx) + dma_release_channel(ebu_host->dma_rx); + + if (ebu_host->dma_tx) + dma_release_channel(ebu_host->dma_tx); +} + +static int ebu_nand_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct ebu_nand_controller *ebu_host; + struct nand_chip *nand; + struct mtd_info *mtd = NULL; + struct resource *res; + char *resname; + int ret; + u32 cs; + + ebu_host = devm_kzalloc(dev, sizeof(*ebu_host), GFP_KERNEL); + if (!ebu_host) + return -ENOMEM; + + ebu_host->dev = dev; + nand_controller_init(&ebu_host->controller); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ebunand"); + ebu_host->ebu = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(ebu_host->ebu)) + return PTR_ERR(ebu_host->ebu); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hsnand"); + ebu_host->hsnand = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(ebu_host->hsnand)) + return PTR_ERR(ebu_host->hsnand); + + ret = device_property_read_u32(dev, "reg", &cs); + if (ret) { + dev_err(dev, "failed to get chip select: %d\n", ret); + return ret; + } + ebu_host->cs_num = cs; + + resname = devm_kasprintf(dev, GFP_KERNEL, "nand_cs%d", cs); + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, resname); + ebu_host->cs[cs].chipaddr = devm_ioremap_resource(dev, res); + ebu_host->cs[cs].nand_pa = res->start; + if (IS_ERR(ebu_host->cs[cs].chipaddr)) + return PTR_ERR(ebu_host->cs[cs].chipaddr); + + ebu_host->clk = devm_clk_get(dev, NULL); + if (IS_ERR(ebu_host->clk)) + return dev_err_probe(dev, PTR_ERR(ebu_host->clk), + "failed to get clock\n"); + + ret = clk_prepare_enable(ebu_host->clk); + if (ret) { + dev_err(dev, "failed to enable clock: %d\n", ret); + return ret; + } + ebu_host->clk_rate = clk_get_rate(ebu_host->clk); + + ebu_host->dma_tx = dma_request_chan(dev, "tx"); + if (IS_ERR(ebu_host->dma_tx)) + return dev_err_probe(dev, PTR_ERR(ebu_host->dma_tx), + "failed to request DMA tx chan!.\n"); + + ebu_host->dma_rx = dma_request_chan(dev, "rx"); + if (IS_ERR(ebu_host->dma_rx)) + return dev_err_probe(dev, PTR_ERR(ebu_host->dma_rx), + "failed to request DMA rx chan!.\n"); + + resname = devm_kasprintf(dev, GFP_KERNEL, "addr_sel%d", cs); + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, resname); + if (!res) + return -EINVAL; + ebu_host->cs[cs].addr_sel = res->start; + writel(ebu_host->cs[cs].addr_sel | EBU_ADDR_MASK(5) | EBU_ADDR_SEL_REGEN, + ebu_host->ebu + EBU_ADDR_SEL(cs)); + + nand_set_flash_node(&ebu_host->chip, dev->of_node); + if (!mtd->name) { + dev_err(ebu_host->dev, "NAND label property is mandatory\n"); + return -EINVAL; + } + + mtd = nand_to_mtd(&ebu_host->chip); + mtd->dev.parent = dev; + ebu_host->dev = dev; + + platform_set_drvdata(pdev, ebu_host); + nand_set_controller_data(&ebu_host->chip, ebu_host); + + nand = &ebu_host->chip; + nand->controller = &ebu_host->controller; + nand->controller->ops = &ebu_nand_controller_ops; + + /* Scan to find existence of the device */ + ret = nand_scan(&ebu_host->chip, 1); + if (ret) + goto err_cleanup_dma; + + ret = mtd_device_register(mtd, NULL, 0); + if (ret) + goto err_clean_nand; + + return 0; + +err_clean_nand: + nand_cleanup(&ebu_host->chip); +err_cleanup_dma: + ebu_dma_cleanup(ebu_host); + clk_disable_unprepare(ebu_host->clk); + + return ret; +} + +static int ebu_nand_remove(struct platform_device *pdev) +{ + struct ebu_nand_controller *ebu_host = platform_get_drvdata(pdev); + int ret; + + ret = mtd_device_unregister(nand_to_mtd(&ebu_host->chip)); + WARN_ON(ret); + nand_cleanup(&ebu_host->chip); + ebu_nand_disable(&ebu_host->chip); + ebu_dma_cleanup(ebu_host); + clk_disable_unprepare(ebu_host->clk); + + return 0; +} + +static const struct of_device_id ebu_nand_match[] = { + { .compatible = "intel,nand-controller" }, + { .compatible = "intel,lgm-ebunand" }, + {} +}; +MODULE_DEVICE_TABLE(of, ebu_nand_match); + +static struct platform_driver ebu_nand_driver = { + .probe = ebu_nand_probe, + .remove = ebu_nand_remove, + .driver = { + .name = "intel-nand-controller", + .of_match_table = ebu_nand_match, + }, + +}; +module_platform_driver(ebu_nand_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Vadivel Murugan R <vadivel.muruganx.ramuthevar@intel.com>"); +MODULE_DESCRIPTION("Intel's LGM External Bus NAND Controller driver"); diff --git a/drivers/mtd/nand/raw/lpc32xx_mlc.c b/drivers/mtd/nand/raw/lpc32xx_mlc.c index 9e728c731795..452ecaf7775a 100644 --- a/drivers/mtd/nand/raw/lpc32xx_mlc.c +++ b/drivers/mtd/nand/raw/lpc32xx_mlc.c @@ -31,7 +31,6 @@ #include <linux/mm.h> #include <linux/dma-mapping.h> #include <linux/dmaengine.h> -#include <linux/mtd/nand_ecc.h> #define DRV_NAME "lpc32xx_mlc" diff --git a/drivers/mtd/nand/raw/lpc32xx_slc.c b/drivers/mtd/nand/raw/lpc32xx_slc.c index dc7785e30d2f..6b7269cfb7d8 100644 --- a/drivers/mtd/nand/raw/lpc32xx_slc.c +++ b/drivers/mtd/nand/raw/lpc32xx_slc.c @@ -23,7 +23,6 @@ #include <linux/mm.h> #include <linux/dma-mapping.h> #include <linux/dmaengine.h> -#include <linux/mtd/nand_ecc.h> #include <linux/gpio.h> #include <linux/of.h> #include <linux/of_gpio.h> @@ -803,7 +802,7 @@ static int lpc32xx_nand_attach_chip(struct nand_chip *chip) chip->ecc.write_oob = lpc32xx_nand_write_oob_syndrome; chip->ecc.read_oob = lpc32xx_nand_read_oob_syndrome; chip->ecc.calculate = lpc32xx_nand_ecc_calculate; - chip->ecc.correct = nand_correct_data; + chip->ecc.correct = rawnand_sw_hamming_correct; chip->ecc.hwctl = lpc32xx_nand_ecc_enable; /* diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c index f5ca2002d08e..42d4881d598d 100644 --- a/drivers/mtd/nand/raw/marvell_nand.c +++ b/drivers/mtd/nand/raw/marvell_nand.c @@ -2679,12 +2679,6 @@ static int marvell_nand_chip_init(struct device *dev, struct marvell_nfc *nfc, mtd->dev.parent = dev; /* - * Default to HW ECC engine mode. If the nand-ecc-mode property is given - * in the DT node, this entry will be overwritten in nand_scan_ident(). - */ - chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST; - - /* * Save a reference value for timing registers before * ->setup_interface() is called. */ diff --git a/drivers/mtd/nand/raw/meson_nand.c b/drivers/mtd/nand/raw/meson_nand.c index 48e6dac96be6..817bddccb775 100644 --- a/drivers/mtd/nand/raw/meson_nand.c +++ b/drivers/mtd/nand/raw/meson_nand.c @@ -510,7 +510,7 @@ static int meson_nfc_dma_buffer_setup(struct nand_chip *nand, void *databuf, } static void meson_nfc_dma_buffer_release(struct nand_chip *nand, - int infolen, int datalen, + int datalen, int infolen, enum dma_data_direction dir) { struct meson_nfc *nfc = nand_get_controller_data(nand); @@ -1044,9 +1044,12 @@ static int meson_nfc_clk_init(struct meson_nfc *nfc) ret = clk_set_rate(nfc->device_clk, 24000000); if (ret) - goto err_phase_rx; + goto err_disable_rx; return 0; + +err_disable_rx: + clk_disable_unprepare(nfc->phase_rx); err_phase_rx: clk_disable_unprepare(nfc->phase_tx); err_phase_tx: diff --git a/drivers/mtd/nand/raw/mxc_nand.c b/drivers/mtd/nand/raw/mxc_nand.c index 684c51e5e60d..fd705dd1768d 100644 --- a/drivers/mtd/nand/raw/mxc_nand.c +++ b/drivers/mtd/nand/raw/mxc_nand.c @@ -21,7 +21,6 @@ #include <linux/completion.h> #include <linux/of.h> #include <linux/of_device.h> -#include <linux/platform_data/mtd-mxc_nand.h> #define DRIVER_NAME "mxc_nand" @@ -184,7 +183,6 @@ struct mxc_nand_host { unsigned int buf_start; const struct mxc_nand_devtype_data *devtype_data; - struct mxc_nand_platform_data pdata; }; static const char * const part_probes[] = { @@ -1611,70 +1609,16 @@ static inline int is_imx53_nfc(struct mxc_nand_host *host) return host->devtype_data == &imx53_nand_devtype_data; } -static const struct platform_device_id mxcnd_devtype[] = { - { - .name = "imx21-nand", - .driver_data = (kernel_ulong_t) &imx21_nand_devtype_data, - }, { - .name = "imx27-nand", - .driver_data = (kernel_ulong_t) &imx27_nand_devtype_data, - }, { - .name = "imx25-nand", - .driver_data = (kernel_ulong_t) &imx25_nand_devtype_data, - }, { - .name = "imx51-nand", - .driver_data = (kernel_ulong_t) &imx51_nand_devtype_data, - }, { - .name = "imx53-nand", - .driver_data = (kernel_ulong_t) &imx53_nand_devtype_data, - }, { - /* sentinel */ - } -}; -MODULE_DEVICE_TABLE(platform, mxcnd_devtype); - -#ifdef CONFIG_OF static const struct of_device_id mxcnd_dt_ids[] = { - { - .compatible = "fsl,imx21-nand", - .data = &imx21_nand_devtype_data, - }, { - .compatible = "fsl,imx27-nand", - .data = &imx27_nand_devtype_data, - }, { - .compatible = "fsl,imx25-nand", - .data = &imx25_nand_devtype_data, - }, { - .compatible = "fsl,imx51-nand", - .data = &imx51_nand_devtype_data, - }, { - .compatible = "fsl,imx53-nand", - .data = &imx53_nand_devtype_data, - }, + { .compatible = "fsl,imx21-nand", .data = &imx21_nand_devtype_data, }, + { .compatible = "fsl,imx27-nand", .data = &imx27_nand_devtype_data, }, + { .compatible = "fsl,imx25-nand", .data = &imx25_nand_devtype_data, }, + { .compatible = "fsl,imx51-nand", .data = &imx51_nand_devtype_data, }, + { .compatible = "fsl,imx53-nand", .data = &imx53_nand_devtype_data, }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, mxcnd_dt_ids); -static int mxcnd_probe_dt(struct mxc_nand_host *host) -{ - struct device_node *np = host->dev->of_node; - const struct of_device_id *of_id = - of_match_device(mxcnd_dt_ids, host->dev); - - if (!np) - return 1; - - host->devtype_data = of_id->data; - - return 0; -} -#else -static int mxcnd_probe_dt(struct mxc_nand_host *host) -{ - return 1; -} -#endif - static int mxcnd_attach_chip(struct nand_chip *chip) { struct mtd_info *mtd = nand_to_mtd(chip); @@ -1800,20 +1744,7 @@ static int mxcnd_probe(struct platform_device *pdev) if (IS_ERR(host->clk)) return PTR_ERR(host->clk); - err = mxcnd_probe_dt(host); - if (err > 0) { - struct mxc_nand_platform_data *pdata = - dev_get_platdata(&pdev->dev); - if (pdata) { - host->pdata = *pdata; - host->devtype_data = (struct mxc_nand_devtype_data *) - pdev->id_entry->driver_data; - } else { - err = -ENODEV; - } - } - if (err < 0) - return err; + host->devtype_data = device_get_match_data(&pdev->dev); if (!host->devtype_data->setup_interface) this->options |= NAND_KEEP_TIMINGS; @@ -1843,14 +1774,6 @@ static int mxcnd_probe(struct platform_device *pdev) this->legacy.select_chip = host->devtype_data->select_chip; - /* NAND bus width determines access functions used by upper layer */ - if (host->pdata.width == 2) - this->options |= NAND_BUSWIDTH_16; - - /* update flash based bbt */ - if (host->pdata.flash_bbt) - this->bbt_options |= NAND_BBT_USE_FLASH; - init_completion(&host->op_completion); host->irq = platform_get_irq(pdev, 0); @@ -1891,9 +1814,7 @@ static int mxcnd_probe(struct platform_device *pdev) goto escan; /* Register the partitions */ - err = mtd_device_parse_register(mtd, part_probes, NULL, - host->pdata.parts, - host->pdata.nr_parts); + err = mtd_device_parse_register(mtd, part_probes, NULL, NULL, 0); if (err) goto cleanup_nand; @@ -1930,7 +1851,6 @@ static struct platform_driver mxcnd_driver = { .name = DRIVER_NAME, .of_match_table = of_match_ptr(mxcnd_dt_ids), }, - .id_table = mxcnd_devtype, .probe = mxcnd_probe, .remove = mxcnd_remove, }; diff --git a/drivers/mtd/nand/raw/mxic_nand.c b/drivers/mtd/nand/raw/mxic_nand.c index d66b5b0971fa..da1070993994 100644 --- a/drivers/mtd/nand/raw/mxic_nand.c +++ b/drivers/mtd/nand/raw/mxic_nand.c @@ -12,8 +12,8 @@ #include <linux/interrupt.h> #include <linux/module.h> #include <linux/mtd/mtd.h> +#include <linux/mtd/nand-ecc-sw-hamming.h> #include <linux/mtd/rawnand.h> -#include <linux/mtd/nand_ecc.h> #include <linux/platform_device.h> #include "internals.h" diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c index 1f0d542d5923..c33fa1b1847f 100644 --- a/drivers/mtd/nand/raw/nand_base.c +++ b/drivers/mtd/nand/raw/nand_base.c @@ -35,8 +35,8 @@ #include <linux/types.h> #include <linux/mtd/mtd.h> #include <linux/mtd/nand.h> -#include <linux/mtd/nand_ecc.h> -#include <linux/mtd/nand_bch.h> +#include <linux/mtd/nand-ecc-sw-hamming.h> +#include <linux/mtd/nand-ecc-sw-bch.h> #include <linux/interrupt.h> #include <linux/bitops.h> #include <linux/io.h> @@ -5139,6 +5139,118 @@ static void nand_scan_ident_cleanup(struct nand_chip *chip) kfree(chip->parameters.onfi); } +int rawnand_sw_hamming_init(struct nand_chip *chip) +{ + struct nand_ecc_sw_hamming_conf *engine_conf; + struct nand_device *base = &chip->base; + int ret; + + base->ecc.user_conf.engine_type = NAND_ECC_ENGINE_TYPE_SOFT; + base->ecc.user_conf.algo = NAND_ECC_ALGO_HAMMING; + base->ecc.user_conf.strength = chip->ecc.strength; + base->ecc.user_conf.step_size = chip->ecc.size; + + ret = nand_ecc_sw_hamming_init_ctx(base); + if (ret) + return ret; + + engine_conf = base->ecc.ctx.priv; + + if (chip->ecc.options & NAND_ECC_SOFT_HAMMING_SM_ORDER) + engine_conf->sm_order = true; + + chip->ecc.size = base->ecc.ctx.conf.step_size; + chip->ecc.strength = base->ecc.ctx.conf.strength; + chip->ecc.total = base->ecc.ctx.total; + chip->ecc.steps = engine_conf->nsteps; + chip->ecc.bytes = engine_conf->code_size; + + return 0; +} +EXPORT_SYMBOL(rawnand_sw_hamming_init); + +int rawnand_sw_hamming_calculate(struct nand_chip *chip, + const unsigned char *buf, + unsigned char *code) +{ + struct nand_device *base = &chip->base; + + return nand_ecc_sw_hamming_calculate(base, buf, code); +} +EXPORT_SYMBOL(rawnand_sw_hamming_calculate); + +int rawnand_sw_hamming_correct(struct nand_chip *chip, + unsigned char *buf, + unsigned char *read_ecc, + unsigned char *calc_ecc) +{ + struct nand_device *base = &chip->base; + + return nand_ecc_sw_hamming_correct(base, buf, read_ecc, calc_ecc); +} +EXPORT_SYMBOL(rawnand_sw_hamming_correct); + +void rawnand_sw_hamming_cleanup(struct nand_chip *chip) +{ + struct nand_device *base = &chip->base; + + nand_ecc_sw_hamming_cleanup_ctx(base); +} +EXPORT_SYMBOL(rawnand_sw_hamming_cleanup); + +int rawnand_sw_bch_init(struct nand_chip *chip) +{ + struct nand_device *base = &chip->base; + struct nand_ecc_sw_bch_conf *engine_conf; + int ret; + + base->ecc.user_conf.engine_type = NAND_ECC_ENGINE_TYPE_SOFT; + base->ecc.user_conf.algo = NAND_ECC_ALGO_BCH; + base->ecc.user_conf.step_size = chip->ecc.size; + base->ecc.user_conf.strength = chip->ecc.strength; + + ret = nand_ecc_sw_bch_init_ctx(base); + if (ret) + return ret; + + engine_conf = base->ecc.ctx.priv; + + chip->ecc.size = base->ecc.ctx.conf.step_size; + chip->ecc.strength = base->ecc.ctx.conf.strength; + chip->ecc.total = base->ecc.ctx.total; + chip->ecc.steps = engine_conf->nsteps; + chip->ecc.bytes = engine_conf->code_size; + + return 0; +} +EXPORT_SYMBOL(rawnand_sw_bch_init); + +static int rawnand_sw_bch_calculate(struct nand_chip *chip, + const unsigned char *buf, + unsigned char *code) +{ + struct nand_device *base = &chip->base; + + return nand_ecc_sw_bch_calculate(base, buf, code); +} + +int rawnand_sw_bch_correct(struct nand_chip *chip, unsigned char *buf, + unsigned char *read_ecc, unsigned char *calc_ecc) +{ + struct nand_device *base = &chip->base; + + return nand_ecc_sw_bch_correct(base, buf, read_ecc, calc_ecc); +} +EXPORT_SYMBOL(rawnand_sw_bch_correct); + +void rawnand_sw_bch_cleanup(struct nand_chip *chip) +{ + struct nand_device *base = &chip->base; + + nand_ecc_sw_bch_cleanup_ctx(base); +} +EXPORT_SYMBOL(rawnand_sw_bch_cleanup); + static int nand_set_ecc_on_host_ops(struct nand_chip *chip) { struct nand_ecc_ctrl *ecc = &chip->ecc; @@ -5203,14 +5315,15 @@ static int nand_set_ecc_soft_ops(struct nand_chip *chip) struct mtd_info *mtd = nand_to_mtd(chip); struct nand_device *nanddev = mtd_to_nanddev(mtd); struct nand_ecc_ctrl *ecc = &chip->ecc; + int ret; if (WARN_ON(ecc->engine_type != NAND_ECC_ENGINE_TYPE_SOFT)) return -EINVAL; switch (ecc->algo) { case NAND_ECC_ALGO_HAMMING: - ecc->calculate = nand_calculate_ecc; - ecc->correct = nand_correct_data; + ecc->calculate = rawnand_sw_hamming_calculate; + ecc->correct = rawnand_sw_hamming_correct; ecc->read_page = nand_read_page_swecc; ecc->read_subpage = nand_read_subpage; ecc->write_page = nand_write_page_swecc; @@ -5228,14 +5341,20 @@ static int nand_set_ecc_soft_ops(struct nand_chip *chip) if (IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC)) ecc->options |= NAND_ECC_SOFT_HAMMING_SM_ORDER; + ret = rawnand_sw_hamming_init(chip); + if (ret) { + WARN(1, "Hamming ECC initialization failed!\n"); + return ret; + } + return 0; case NAND_ECC_ALGO_BCH: - if (!mtd_nand_has_bch()) { + if (!IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_BCH)) { WARN(1, "CONFIG_MTD_NAND_ECC_SW_BCH not enabled\n"); return -EINVAL; } - ecc->calculate = nand_bch_calculate_ecc; - ecc->correct = nand_bch_correct_data; + ecc->calculate = rawnand_sw_bch_calculate; + ecc->correct = rawnand_sw_bch_correct; ecc->read_page = nand_read_page_swecc; ecc->read_subpage = nand_read_subpage; ecc->write_page = nand_write_page_swecc; @@ -5247,55 +5366,20 @@ static int nand_set_ecc_soft_ops(struct nand_chip *chip) ecc->write_oob = nand_write_oob_std; /* - * Board driver should supply ecc.size and ecc.strength - * values to select how many bits are correctable. - * Otherwise, default to 4 bits for large page devices. - */ - if (!ecc->size && (mtd->oobsize >= 64)) { - ecc->size = 512; - ecc->strength = 4; - } - - /* - * if no ecc placement scheme was provided pickup the default - * large page one. - */ - if (!mtd->ooblayout) { - /* handle large page devices only */ - if (mtd->oobsize < 64) { - WARN(1, "OOB layout is required when using software BCH on small pages\n"); - return -EINVAL; - } - - mtd_set_ooblayout(mtd, nand_get_large_page_ooblayout()); - - } - - /* * We can only maximize ECC config when the default layout is * used, otherwise we don't know how many bytes can really be * used. */ - if (mtd->ooblayout == nand_get_large_page_ooblayout() && - nanddev->ecc.user_conf.flags & NAND_ECC_MAXIMIZE_STRENGTH) { - int steps, bytes; - - /* Always prefer 1k blocks over 512bytes ones */ - ecc->size = 1024; - steps = mtd->writesize / ecc->size; + if (nanddev->ecc.user_conf.flags & NAND_ECC_MAXIMIZE_STRENGTH && + mtd->ooblayout != nand_get_large_page_ooblayout()) + nanddev->ecc.user_conf.flags &= ~NAND_ECC_MAXIMIZE_STRENGTH; - /* Reserve 2 bytes for the BBM */ - bytes = (mtd->oobsize - 2) / steps; - ecc->strength = bytes * 8 / fls(8 * ecc->size); - } - - /* See nand_bch_init() for details. */ - ecc->bytes = 0; - ecc->priv = nand_bch_init(mtd); - if (!ecc->priv) { + ret = rawnand_sw_bch_init(chip); + if (ret) { WARN(1, "BCH ECC initialization failed!\n"); - return -EINVAL; + return ret; } + return 0; default: WARN(1, "Unsupported ECC algorithm!\n"); @@ -5639,7 +5723,9 @@ static int nand_scan_tail(struct nand_chip *chip) */ if (!mtd->ooblayout && !(ecc->engine_type == NAND_ECC_ENGINE_TYPE_SOFT && - ecc->algo == NAND_ECC_ALGO_BCH)) { + ecc->algo == NAND_ECC_ALGO_BCH) && + !(ecc->engine_type == NAND_ECC_ENGINE_TYPE_SOFT && + ecc->algo == NAND_ECC_ALGO_HAMMING)) { switch (mtd->oobsize) { case 8: case 16: @@ -5756,15 +5842,18 @@ static int nand_scan_tail(struct nand_chip *chip) * Set the number of read / write steps for one page depending on ECC * mode. */ - ecc->steps = mtd->writesize / ecc->size; + if (!ecc->steps) + ecc->steps = mtd->writesize / ecc->size; if (ecc->steps * ecc->size != mtd->writesize) { WARN(1, "Invalid ECC parameters\n"); ret = -EINVAL; goto err_nand_manuf_cleanup; } - ecc->total = ecc->steps * ecc->bytes; - chip->base.ecc.ctx.total = ecc->total; + if (!ecc->total) { + ecc->total = ecc->steps * ecc->bytes; + chip->base.ecc.ctx.total = ecc->total; + } if (ecc->total > mtd->oobsize) { WARN(1, "Total number of ECC bytes exceeded oobsize\n"); @@ -5953,9 +6042,12 @@ EXPORT_SYMBOL(nand_scan_with_ids); */ void nand_cleanup(struct nand_chip *chip) { - if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_SOFT && - chip->ecc.algo == NAND_ECC_ALGO_BCH) - nand_bch_free((struct nand_bch_control *)chip->ecc.priv); + if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_SOFT) { + if (chip->ecc.algo == NAND_ECC_ALGO_HAMMING) + rawnand_sw_hamming_cleanup(chip); + else if (chip->ecc.algo == NAND_ECC_ALGO_BCH) + rawnand_sw_bch_cleanup(chip); + } nanddev_cleanup(&chip->base); diff --git a/drivers/mtd/nand/raw/nand_bbt.c b/drivers/mtd/nand/raw/nand_bbt.c index 344a24fd2ca8..dced32a126d9 100644 --- a/drivers/mtd/nand/raw/nand_bbt.c +++ b/drivers/mtd/nand/raw/nand_bbt.c @@ -1087,7 +1087,7 @@ static int nand_update_bbt(struct nand_chip *this, loff_t offs) } /** - * mark_bbt_regions - [GENERIC] mark the bad block table regions + * mark_bbt_region - [GENERIC] mark the bad block table regions * @this: the NAND device * @td: bad block table descriptor * diff --git a/drivers/mtd/nand/raw/nand_bch.c b/drivers/mtd/nand/raw/nand_bch.c deleted file mode 100644 index 9d19ac14c196..000000000000 --- a/drivers/mtd/nand/raw/nand_bch.c +++ /dev/null @@ -1,219 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * This file provides ECC correction for more than 1 bit per block of data, - * using binary BCH codes. It relies on the generic BCH library lib/bch.c. - * - * Copyright © 2011 Ivan Djelic <ivan.djelic@parrot.com> - */ - -#include <linux/types.h> -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/slab.h> -#include <linux/bitops.h> -#include <linux/mtd/mtd.h> -#include <linux/mtd/rawnand.h> -#include <linux/mtd/nand_bch.h> -#include <linux/bch.h> - -/** - * struct nand_bch_control - private NAND BCH control structure - * @bch: BCH control structure - * @errloc: error location array - * @eccmask: XOR ecc mask, allows erased pages to be decoded as valid - */ -struct nand_bch_control { - struct bch_control *bch; - unsigned int *errloc; - unsigned char *eccmask; -}; - -/** - * nand_bch_calculate_ecc - [NAND Interface] Calculate ECC for data block - * @chip: NAND chip object - * @buf: input buffer with raw data - * @code: output buffer with ECC - */ -int nand_bch_calculate_ecc(struct nand_chip *chip, const unsigned char *buf, - unsigned char *code) -{ - struct nand_bch_control *nbc = chip->ecc.priv; - unsigned int i; - - memset(code, 0, chip->ecc.bytes); - bch_encode(nbc->bch, buf, chip->ecc.size, code); - - /* apply mask so that an erased page is a valid codeword */ - for (i = 0; i < chip->ecc.bytes; i++) - code[i] ^= nbc->eccmask[i]; - - return 0; -} -EXPORT_SYMBOL(nand_bch_calculate_ecc); - -/** - * nand_bch_correct_data - [NAND Interface] Detect and correct bit error(s) - * @chip: NAND chip object - * @buf: raw data read from the chip - * @read_ecc: ECC from the chip - * @calc_ecc: the ECC calculated from raw data - * - * Detect and correct bit errors for a data byte block - */ -int nand_bch_correct_data(struct nand_chip *chip, unsigned char *buf, - unsigned char *read_ecc, unsigned char *calc_ecc) -{ - struct nand_bch_control *nbc = chip->ecc.priv; - unsigned int *errloc = nbc->errloc; - int i, count; - - count = bch_decode(nbc->bch, NULL, chip->ecc.size, read_ecc, calc_ecc, - NULL, errloc); - if (count > 0) { - for (i = 0; i < count; i++) { - if (errloc[i] < (chip->ecc.size*8)) - /* error is located in data, correct it */ - buf[errloc[i] >> 3] ^= (1 << (errloc[i] & 7)); - /* else error in ecc, no action needed */ - - pr_debug("%s: corrected bitflip %u\n", __func__, - errloc[i]); - } - } else if (count < 0) { - pr_err("ecc unrecoverable error\n"); - count = -EBADMSG; - } - return count; -} -EXPORT_SYMBOL(nand_bch_correct_data); - -/** - * nand_bch_init - [NAND Interface] Initialize NAND BCH error correction - * @mtd: MTD block structure - * - * Returns: - * a pointer to a new NAND BCH control structure, or NULL upon failure - * - * Initialize NAND BCH error correction. Parameters @eccsize and @eccbytes - * are used to compute BCH parameters m (Galois field order) and t (error - * correction capability). @eccbytes should be equal to the number of bytes - * required to store m*t bits, where m is such that 2^m-1 > @eccsize*8. - * - * Example: to configure 4 bit correction per 512 bytes, you should pass - * @eccsize = 512 (thus, m=13 is the smallest integer such that 2^m-1 > 512*8) - * @eccbytes = 7 (7 bytes are required to store m*t = 13*4 = 52 bits) - */ -struct nand_bch_control *nand_bch_init(struct mtd_info *mtd) -{ - struct nand_chip *nand = mtd_to_nand(mtd); - unsigned int m, t, eccsteps, i; - struct nand_bch_control *nbc = NULL; - unsigned char *erased_page; - unsigned int eccsize = nand->ecc.size; - unsigned int eccbytes = nand->ecc.bytes; - unsigned int eccstrength = nand->ecc.strength; - - if (!eccbytes && eccstrength) { - eccbytes = DIV_ROUND_UP(eccstrength * fls(8 * eccsize), 8); - nand->ecc.bytes = eccbytes; - } - - if (!eccsize || !eccbytes) { - pr_warn("ecc parameters not supplied\n"); - goto fail; - } - - m = fls(1+8*eccsize); - t = (eccbytes*8)/m; - - nbc = kzalloc(sizeof(*nbc), GFP_KERNEL); - if (!nbc) - goto fail; - - nbc->bch = bch_init(m, t, 0, false); - if (!nbc->bch) - goto fail; - - /* verify that eccbytes has the expected value */ - if (nbc->bch->ecc_bytes != eccbytes) { - pr_warn("invalid eccbytes %u, should be %u\n", - eccbytes, nbc->bch->ecc_bytes); - goto fail; - } - - eccsteps = mtd->writesize/eccsize; - - /* Check that we have an oob layout description. */ - if (!mtd->ooblayout) { - pr_warn("missing oob scheme"); - goto fail; - } - - /* sanity checks */ - if (8*(eccsize+eccbytes) >= (1 << m)) { - pr_warn("eccsize %u is too large\n", eccsize); - goto fail; - } - - /* - * ecc->steps and ecc->total might be used by mtd->ooblayout->ecc(), - * which is called by mtd_ooblayout_count_eccbytes(). - * Make sure they are properly initialized before calling - * mtd_ooblayout_count_eccbytes(). - * FIXME: we should probably rework the sequencing in nand_scan_tail() - * to avoid setting those fields twice. - */ - nand->ecc.steps = eccsteps; - nand->ecc.total = eccsteps * eccbytes; - nand->base.ecc.ctx.total = nand->ecc.total; - if (mtd_ooblayout_count_eccbytes(mtd) != (eccsteps*eccbytes)) { - pr_warn("invalid ecc layout\n"); - goto fail; - } - - nbc->eccmask = kzalloc(eccbytes, GFP_KERNEL); - nbc->errloc = kmalloc_array(t, sizeof(*nbc->errloc), GFP_KERNEL); - if (!nbc->eccmask || !nbc->errloc) - goto fail; - /* - * compute and store the inverted ecc of an erased ecc block - */ - erased_page = kmalloc(eccsize, GFP_KERNEL); - if (!erased_page) - goto fail; - - memset(erased_page, 0xff, eccsize); - bch_encode(nbc->bch, erased_page, eccsize, nbc->eccmask); - kfree(erased_page); - - for (i = 0; i < eccbytes; i++) - nbc->eccmask[i] ^= 0xff; - - if (!eccstrength) - nand->ecc.strength = (eccbytes * 8) / fls(8 * eccsize); - - return nbc; -fail: - nand_bch_free(nbc); - return NULL; -} -EXPORT_SYMBOL(nand_bch_init); - -/** - * nand_bch_free - [NAND Interface] Release NAND BCH ECC resources - * @nbc: NAND BCH control structure - */ -void nand_bch_free(struct nand_bch_control *nbc) -{ - if (nbc) { - bch_free(nbc->bch); - kfree(nbc->errloc); - kfree(nbc->eccmask); - kfree(nbc); - } -} -EXPORT_SYMBOL(nand_bch_free); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Ivan Djelic <ivan.djelic@parrot.com>"); -MODULE_DESCRIPTION("NAND software BCH ECC support"); diff --git a/drivers/mtd/nand/raw/nand_legacy.c b/drivers/mtd/nand/raw/nand_legacy.c index 2bcc03714432..eccc18b266d5 100644 --- a/drivers/mtd/nand/raw/nand_legacy.c +++ b/drivers/mtd/nand/raw/nand_legacy.c @@ -192,9 +192,10 @@ static void panic_nand_wait_ready(struct nand_chip *chip, unsigned long timeo) */ void nand_wait_ready(struct nand_chip *chip) { + struct mtd_info *mtd = nand_to_mtd(chip); unsigned long timeo = 400; - if (in_interrupt() || oops_in_progress) + if (mtd->oops_panic_write) return panic_nand_wait_ready(chip, timeo); /* Wait until command is processed or timeout occurs */ @@ -531,7 +532,7 @@ EXPORT_SYMBOL(nand_get_set_features_notsupp); */ static int nand_wait(struct nand_chip *chip) { - + struct mtd_info *mtd = nand_to_mtd(chip); unsigned long timeo = 400; u8 status; int ret; @@ -546,9 +547,9 @@ static int nand_wait(struct nand_chip *chip) if (ret) return ret; - if (in_interrupt() || oops_in_progress) + if (mtd->oops_panic_write) { panic_nand_wait(chip, timeo); - else { + } else { timeo = jiffies + msecs_to_jiffies(timeo); do { if (chip->legacy.dev_ready) { diff --git a/drivers/mtd/nand/raw/nandsim.c b/drivers/mtd/nand/raw/nandsim.c index a8048cb8d220..f2b9250c0ea8 100644 --- a/drivers/mtd/nand/raw/nandsim.c +++ b/drivers/mtd/nand/raw/nandsim.c @@ -23,7 +23,6 @@ #include <linux/string.h> #include <linux/mtd/mtd.h> #include <linux/mtd/rawnand.h> -#include <linux/mtd/nand_bch.h> #include <linux/mtd/partitions.h> #include <linux/delay.h> #include <linux/list.h> @@ -2214,7 +2213,7 @@ static int ns_attach_chip(struct nand_chip *chip) if (!bch) return 0; - if (!mtd_nand_has_bch()) { + if (!IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_BCH)) { NS_ERR("BCH ECC support is disabled\n"); return -EINVAL; } diff --git a/drivers/mtd/nand/raw/ndfc.c b/drivers/mtd/nand/raw/ndfc.c index 0fb4ba93c41e..338d6b1a189e 100644 --- a/drivers/mtd/nand/raw/ndfc.c +++ b/drivers/mtd/nand/raw/ndfc.c @@ -18,7 +18,6 @@ */ #include <linux/module.h> #include <linux/mtd/rawnand.h> -#include <linux/mtd/nand_ecc.h> #include <linux/mtd/partitions.h> #include <linux/mtd/ndfc.h> #include <linux/slab.h> @@ -146,7 +145,7 @@ static int ndfc_chip_init(struct ndfc_controller *ndfc, chip->controller = &ndfc->ndfc_control; chip->legacy.read_buf = ndfc_read_buf; chip->legacy.write_buf = ndfc_write_buf; - chip->ecc.correct = nand_correct_data; + chip->ecc.correct = rawnand_sw_hamming_correct; chip->ecc.hwctl = ndfc_enable_hwecc; chip->ecc.calculate = ndfc_calculate_ecc; chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST; diff --git a/drivers/mtd/nand/raw/omap2.c b/drivers/mtd/nand/raw/omap2.c index 512f60780a50..fbb9955f2467 100644 --- a/drivers/mtd/nand/raw/omap2.c +++ b/drivers/mtd/nand/raw/omap2.c @@ -23,7 +23,6 @@ #include <linux/of.h> #include <linux/of_device.h> -#include <linux/mtd/nand_bch.h> #include <linux/platform_data/elm.h> #include <linux/omap-gpmc.h> @@ -185,6 +184,7 @@ static inline struct omap_nand_info *mtd_to_omap(struct mtd_info *mtd) * @dma_mode: dma mode enable (1) or disable (0) * @u32_count: number of bytes to be transferred * @is_write: prefetch read(0) or write post(1) mode + * @info: NAND device structure containing platform data */ static int omap_prefetch_enable(int cs, int fifo_th, int dma_mode, unsigned int u32_count, int is_write, struct omap_nand_info *info) @@ -214,7 +214,7 @@ static int omap_prefetch_enable(int cs, int fifo_th, int dma_mode, return 0; } -/** +/* * omap_prefetch_reset - disables and stops the prefetch engine */ static int omap_prefetch_reset(int cs, struct omap_nand_info *info) @@ -939,7 +939,7 @@ static int omap_calculate_ecc(struct nand_chip *chip, const u_char *dat, /** * omap_enable_hwecc - This function enables the hardware ecc functionality - * @mtd: MTD device structure + * @chip: NAND chip object * @mode: Read/Write mode */ static void omap_enable_hwecc(struct nand_chip *chip, int mode) @@ -1009,7 +1009,7 @@ static int omap_wait(struct nand_chip *this) /** * omap_dev_ready - checks the NAND Ready GPIO line - * @mtd: MTD device structure + * @chip: NAND chip object * * Returns true if ready and false if busy. */ @@ -1022,7 +1022,7 @@ static int omap_dev_ready(struct nand_chip *chip) /** * omap_enable_hwecc_bch - Program GPMC to perform BCH ECC calculation - * @mtd: MTD device structure + * @chip: NAND chip object * @mode: Read/Write mode * * When using BCH with SW correction (i.e. no ELM), sector size is set @@ -1131,7 +1131,7 @@ static u8 bch8_polynomial[] = {0xef, 0x51, 0x2e, 0x09, 0xed, 0x93, 0x9a, 0xc2, * _omap_calculate_ecc_bch - Generate ECC bytes for one sector * @mtd: MTD device structure * @dat: The pointer to data on which ecc is computed - * @ecc_code: The ecc_code buffer + * @ecc_calc: The ecc_code buffer * @i: The sector number (for a multi sector page) * * Support calculating of BCH4/8/16 ECC vectors for one sector @@ -1259,7 +1259,7 @@ static int _omap_calculate_ecc_bch(struct mtd_info *mtd, * omap_calculate_ecc_bch_sw - ECC generator for sector for SW based correction * @chip: NAND chip object * @dat: The pointer to data on which ecc is computed - * @ecc_code: The ecc_code buffer + * @ecc_calc: Buffer storing the calculated ECC bytes * * Support calculating of BCH4/8/16 ECC vectors for one sector. This is used * when SW based correction is required as ECC is required for one sector @@ -1275,7 +1275,7 @@ static int omap_calculate_ecc_bch_sw(struct nand_chip *chip, * omap_calculate_ecc_bch_multi - Generate ECC for multiple sectors * @mtd: MTD device structure * @dat: The pointer to data on which ecc is computed - * @ecc_code: The ecc_code buffer + * @ecc_calc: Buffer storing the calculated ECC bytes * * Support calculating of BCH4/8/16 ecc vectors for the entire page in one go. */ @@ -1674,7 +1674,8 @@ static int omap_read_page_bch(struct nand_chip *chip, uint8_t *buf, /** * is_elm_present - checks for presence of ELM module by scanning DT nodes - * @omap_nand_info: NAND device structure containing platform data + * @info: NAND device structure containing platform data + * @elm_node: ELM's DT node */ static bool is_elm_present(struct omap_nand_info *info, struct device_node *elm_node) @@ -2041,16 +2042,16 @@ static int omap_nand_attach_chip(struct nand_chip *chip) chip->ecc.bytes = 7; chip->ecc.strength = 4; chip->ecc.hwctl = omap_enable_hwecc_bch; - chip->ecc.correct = nand_bch_correct_data; + chip->ecc.correct = rawnand_sw_bch_correct; chip->ecc.calculate = omap_calculate_ecc_bch_sw; mtd_set_ooblayout(mtd, &omap_sw_ooblayout_ops); /* Reserve one byte for the OMAP marker */ oobbytes_per_step = chip->ecc.bytes + 1; /* Software BCH library is used for locating errors */ - chip->ecc.priv = nand_bch_init(mtd); - if (!chip->ecc.priv) { + err = rawnand_sw_bch_init(chip); + if (err) { dev_err(dev, "Unable to use BCH library\n"); - return -EINVAL; + return err; } break; @@ -2083,16 +2084,16 @@ static int omap_nand_attach_chip(struct nand_chip *chip) chip->ecc.bytes = 13; chip->ecc.strength = 8; chip->ecc.hwctl = omap_enable_hwecc_bch; - chip->ecc.correct = nand_bch_correct_data; + chip->ecc.correct = rawnand_sw_bch_correct; chip->ecc.calculate = omap_calculate_ecc_bch_sw; mtd_set_ooblayout(mtd, &omap_sw_ooblayout_ops); /* Reserve one byte for the OMAP marker */ oobbytes_per_step = chip->ecc.bytes + 1; /* Software BCH library is used for locating errors */ - chip->ecc.priv = nand_bch_init(mtd); - if (!chip->ecc.priv) { + err = rawnand_sw_bch_init(chip); + if (err) { dev_err(dev, "unable to use BCH library\n"); - return -EINVAL; + return err; } break; @@ -2195,7 +2196,6 @@ static int omap_nand_probe(struct platform_device *pdev) nand_chip = &info->nand; mtd = nand_to_mtd(nand_chip); mtd->dev.parent = &pdev->dev; - nand_chip->ecc.priv = NULL; nand_set_flash_node(nand_chip, dev->of_node); if (!mtd->name) { @@ -2271,10 +2271,9 @@ cleanup_nand: return_error: if (!IS_ERR_OR_NULL(info->dma)) dma_release_channel(info->dma); - if (nand_chip->ecc.priv) { - nand_bch_free(nand_chip->ecc.priv); - nand_chip->ecc.priv = NULL; - } + + rawnand_sw_bch_cleanup(nand_chip); + return err; } @@ -2285,10 +2284,8 @@ static int omap_nand_remove(struct platform_device *pdev) struct omap_nand_info *info = mtd_to_omap(mtd); int ret; - if (nand_chip->ecc.priv) { - nand_bch_free(nand_chip->ecc.priv); - nand_chip->ecc.priv = NULL; - } + rawnand_sw_bch_cleanup(nand_chip); + if (info->dma) dma_release_channel(info->dma); ret = mtd_device_unregister(mtd); diff --git a/drivers/mtd/nand/raw/omap_elm.c b/drivers/mtd/nand/raw/omap_elm.c index 4b799521a427..550695a4c1ab 100644 --- a/drivers/mtd/nand/raw/omap_elm.c +++ b/drivers/mtd/nand/raw/omap_elm.c @@ -96,6 +96,9 @@ static u32 elm_read_reg(struct elm_info *info, int offset) * elm_config - Configure ELM module * @dev: ELM device * @bch_type: Type of BCH ecc + * @ecc_steps: ECC steps to assign to config + * @ecc_step_size: ECC step size to assign to config + * @ecc_syndrome_size: ECC syndrome size to assign to config */ int elm_config(struct device *dev, enum bch_ecc bch_type, int ecc_steps, int ecc_step_size, int ecc_syndrome_size) @@ -432,7 +435,7 @@ static int elm_remove(struct platform_device *pdev) } #ifdef CONFIG_PM_SLEEP -/** +/* * elm_context_save * saves ELM configurations to preserve them across Hardware powered-down */ @@ -480,7 +483,7 @@ static int elm_context_save(struct elm_info *info) return 0; } -/** +/* * elm_context_restore * writes configurations saved duing power-down back into ELM registers */ diff --git a/drivers/mtd/nand/raw/pasemi_nand.c b/drivers/mtd/nand/raw/pasemi_nand.c index 68c08772d7c2..789f33312c15 100644 --- a/drivers/mtd/nand/raw/pasemi_nand.c +++ b/drivers/mtd/nand/raw/pasemi_nand.c @@ -14,7 +14,6 @@ #include <linux/module.h> #include <linux/mtd/mtd.h> #include <linux/mtd/rawnand.h> -#include <linux/mtd/nand_ecc.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/of_platform.h> diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c index 777fb0de0680..667e4bfe369f 100644 --- a/drivers/mtd/nand/raw/qcom_nandc.c +++ b/drivers/mtd/nand/raw/qcom_nandc.c @@ -145,6 +145,7 @@ #define OP_PAGE_READ 0x2 #define OP_PAGE_READ_WITH_ECC 0x3 #define OP_PAGE_READ_WITH_ECC_SPARE 0x4 +#define OP_PAGE_READ_ONFI_READ 0x5 #define OP_PROGRAM_PAGE 0x6 #define OP_PAGE_PROGRAM_WITH_ECC 0x7 #define OP_PROGRAM_PAGE_SPARE 0x9 @@ -460,12 +461,14 @@ struct qcom_nand_host { * @ecc_modes - ecc mode for NAND * @is_bam - whether NAND controller is using BAM * @is_qpic - whether NAND CTRL is part of qpic IP + * @qpic_v2 - flag to indicate QPIC IP version 2 * @dev_cmd_reg_start - NAND_DEV_CMD_* registers starting offset */ struct qcom_nandc_props { u32 ecc_modes; bool is_bam; bool is_qpic; + bool qpic_v2; u32 dev_cmd_reg_start; }; @@ -1164,7 +1167,13 @@ static int nandc_param(struct qcom_nand_host *host) * in use. we configure the controller to perform a raw read of 512 * bytes to read onfi params */ - nandc_set_reg(nandc, NAND_FLASH_CMD, OP_PAGE_READ | PAGE_ACC | LAST_PAGE); + if (nandc->props->qpic_v2) + nandc_set_reg(nandc, NAND_FLASH_CMD, OP_PAGE_READ_ONFI_READ | + PAGE_ACC | LAST_PAGE); + else + nandc_set_reg(nandc, NAND_FLASH_CMD, OP_PAGE_READ | + PAGE_ACC | LAST_PAGE); + nandc_set_reg(nandc, NAND_ADDR0, 0); nandc_set_reg(nandc, NAND_ADDR1, 0); nandc_set_reg(nandc, NAND_DEV0_CFG0, 0 << CW_PER_PAGE @@ -1180,21 +1189,28 @@ static int nandc_param(struct qcom_nand_host *host) | 1 << DEV0_CFG1_ECC_DISABLE); nandc_set_reg(nandc, NAND_EBI2_ECC_BUF_CFG, 1 << ECC_CFG_ECC_DISABLE); - /* configure CMD1 and VLD for ONFI param probing */ - nandc_set_reg(nandc, NAND_DEV_CMD_VLD, - (nandc->vld & ~READ_START_VLD)); - nandc_set_reg(nandc, NAND_DEV_CMD1, - (nandc->cmd1 & ~(0xFF << READ_ADDR)) - | NAND_CMD_PARAM << READ_ADDR); + /* configure CMD1 and VLD for ONFI param probing in QPIC v1 */ + if (!nandc->props->qpic_v2) { + nandc_set_reg(nandc, NAND_DEV_CMD_VLD, + (nandc->vld & ~READ_START_VLD)); + nandc_set_reg(nandc, NAND_DEV_CMD1, + (nandc->cmd1 & ~(0xFF << READ_ADDR)) + | NAND_CMD_PARAM << READ_ADDR); + } nandc_set_reg(nandc, NAND_EXEC_CMD, 1); - nandc_set_reg(nandc, NAND_DEV_CMD1_RESTORE, nandc->cmd1); - nandc_set_reg(nandc, NAND_DEV_CMD_VLD_RESTORE, nandc->vld); + if (!nandc->props->qpic_v2) { + nandc_set_reg(nandc, NAND_DEV_CMD1_RESTORE, nandc->cmd1); + nandc_set_reg(nandc, NAND_DEV_CMD_VLD_RESTORE, nandc->vld); + } + nandc_set_read_loc(nandc, 0, 0, 512, 1); - write_reg_dma(nandc, NAND_DEV_CMD_VLD, 1, 0); - write_reg_dma(nandc, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL); + if (!nandc->props->qpic_v2) { + write_reg_dma(nandc, NAND_DEV_CMD_VLD, 1, 0); + write_reg_dma(nandc, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL); + } nandc->buf_count = 512; memset(nandc->data_buffer, 0xff, nandc->buf_count); @@ -1205,8 +1221,10 @@ static int nandc_param(struct qcom_nand_host *host) nandc->buf_count, 0); /* restore CMD1 and VLD regs */ - write_reg_dma(nandc, NAND_DEV_CMD1_RESTORE, 1, 0); - write_reg_dma(nandc, NAND_DEV_CMD_VLD_RESTORE, 1, NAND_BAM_NEXT_SGL); + if (!nandc->props->qpic_v2) { + write_reg_dma(nandc, NAND_DEV_CMD1_RESTORE, 1, 0); + write_reg_dma(nandc, NAND_DEV_CMD_VLD_RESTORE, 1, NAND_BAM_NEXT_SGL); + } return 0; } @@ -1570,6 +1588,8 @@ static int check_flash_errors(struct qcom_nand_host *host, int cw_cnt) struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); int i; + nandc_read_buffer_sync(nandc, true); + for (i = 0; i < cw_cnt; i++) { u32 flash = le32_to_cpu(nandc->reg_read_buf[i]); @@ -2770,8 +2790,10 @@ static int qcom_nandc_setup(struct qcom_nand_controller *nandc) /* kill onenand */ if (!nandc->props->is_qpic) nandc_write(nandc, SFLASHC_BURST_CFG, 0); - nandc_write(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD), - NAND_DEV_CMD_VLD_VAL); + + if (!nandc->props->qpic_v2) + nandc_write(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD), + NAND_DEV_CMD_VLD_VAL); /* enable ADM or BAM DMA */ if (nandc->props->is_bam) { @@ -2791,8 +2813,10 @@ static int qcom_nandc_setup(struct qcom_nand_controller *nandc) } /* save the original values of these registers */ - nandc->cmd1 = nandc_read(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD1)); - nandc->vld = NAND_DEV_CMD_VLD_VAL; + if (!nandc->props->qpic_v2) { + nandc->cmd1 = nandc_read(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD1)); + nandc->vld = NAND_DEV_CMD_VLD_VAL; + } return 0; } @@ -3050,6 +3074,14 @@ static const struct qcom_nandc_props ipq8074_nandc_props = { .dev_cmd_reg_start = 0x7000, }; +static const struct qcom_nandc_props sdx55_nandc_props = { + .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT), + .is_bam = true, + .is_qpic = true, + .qpic_v2 = true, + .dev_cmd_reg_start = 0x7000, +}; + /* * data will hold a struct pointer containing more differences once we support * more controller variants @@ -3064,9 +3096,17 @@ static const struct of_device_id qcom_nandc_of_match[] = { .data = &ipq4019_nandc_props, }, { + .compatible = "qcom,ipq6018-nand", + .data = &ipq8074_nandc_props, + }, + { .compatible = "qcom,ipq8074-nand", .data = &ipq8074_nandc_props, }, + { + .compatible = "qcom,sdx55-nand", + .data = &sdx55_nandc_props, + }, {} }; MODULE_DEVICE_TABLE(of, qcom_nandc_of_match); diff --git a/drivers/mtd/nand/raw/rockchip-nand-controller.c b/drivers/mtd/nand/raw/rockchip-nand-controller.c new file mode 100644 index 000000000000..796b678cb108 --- /dev/null +++ b/drivers/mtd/nand/raw/rockchip-nand-controller.c @@ -0,0 +1,1495 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT +/* + * Rockchip NAND Flash controller driver. + * Copyright (C) 2020 Rockchip Inc. + * Author: Yifeng Zhao <yifeng.zhao@rock-chips.com> + */ + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/dmaengine.h> +#include <linux/interrupt.h> +#include <linux/iopoll.h> +#include <linux/module.h> +#include <linux/mtd/mtd.h> +#include <linux/mtd/rawnand.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/slab.h> + +/* + * NFC Page Data Layout: + * 1024 bytes data + 4Bytes sys data + 28Bytes~124Bytes ECC data + + * 1024 bytes data + 4Bytes sys data + 28Bytes~124Bytes ECC data + + * ...... + * NAND Page Data Layout: + * 1024 * n data + m Bytes oob + * Original Bad Block Mask Location: + * First byte of oob(spare). + * nand_chip->oob_poi data layout: + * 4Bytes sys data + .... + 4Bytes sys data + ECC data. + */ + +/* NAND controller register definition */ +#define NFC_READ (0) +#define NFC_WRITE (1) + +#define NFC_FMCTL (0x00) +#define FMCTL_CE_SEL_M 0xFF +#define FMCTL_CE_SEL(x) (1 << (x)) +#define FMCTL_WP BIT(8) +#define FMCTL_RDY BIT(9) + +#define NFC_FMWAIT (0x04) +#define FLCTL_RST BIT(0) +#define FLCTL_WR (1) /* 0: read, 1: write */ +#define FLCTL_XFER_ST BIT(2) +#define FLCTL_XFER_EN BIT(3) +#define FLCTL_ACORRECT BIT(10) /* Auto correct error bits. */ +#define FLCTL_XFER_READY BIT(20) +#define FLCTL_XFER_SECTOR (22) +#define FLCTL_TOG_FIX BIT(29) + +#define BCHCTL_BANK_M (7 << 5) +#define BCHCTL_BANK (5) + +#define DMA_ST BIT(0) +#define DMA_WR (1) /* 0: write, 1: read */ +#define DMA_EN BIT(2) +#define DMA_AHB_SIZE (3) /* 0: 1, 1: 2, 2: 4 */ +#define DMA_BURST_SIZE (6) /* 0: 1, 3: 4, 5: 8, 7: 16 */ +#define DMA_INC_NUM (9) /* 1 - 16 */ + +#define ECC_ERR_CNT(x, e) ((((x) >> (e).low) & (e).low_mask) |\ + (((x) >> (e).high) & (e).high_mask) << (e).low_bn) +#define INT_DMA BIT(0) +#define NFC_BANK (0x800) +#define NFC_BANK_STEP (0x100) +#define BANK_DATA (0x00) +#define BANK_ADDR (0x04) +#define BANK_CMD (0x08) +#define NFC_SRAM0 (0x1000) +#define NFC_SRAM1 (0x1400) +#define NFC_SRAM_SIZE (0x400) +#define NFC_TIMEOUT (500000) +#define NFC_MAX_OOB_PER_STEP 128 +#define NFC_MIN_OOB_PER_STEP 64 +#define MAX_DATA_SIZE 0xFFFC +#define MAX_ADDRESS_CYC 6 +#define NFC_ECC_MAX_MODES 4 +#define NFC_MAX_NSELS (8) /* Some Socs only have 1 or 2 CSs. */ +#define NFC_SYS_DATA_SIZE (4) /* 4 bytes sys data in oob pre 1024 data.*/ +#define RK_DEFAULT_CLOCK_RATE (150 * 1000 * 1000) /* 150 Mhz */ +#define ACCTIMING(csrw, rwpw, rwcs) ((csrw) << 12 | (rwpw) << 5 | (rwcs)) + +enum nfc_type { + NFC_V6, + NFC_V8, + NFC_V9, +}; + +/** + * struct rk_ecc_cnt_status: represent a ecc status data. + * @err_flag_bit: error flag bit index at register. + * @low: ECC count low bit index at register. + * @low_mask: mask bit. + * @low_bn: ECC count low bit number. + * @high: ECC count high bit index at register. + * @high_mask: mask bit + */ +struct ecc_cnt_status { + u8 err_flag_bit; + u8 low; + u8 low_mask; + u8 low_bn; + u8 high; + u8 high_mask; +}; + +/** + * @type: NFC version + * @ecc_strengths: ECC strengths + * @ecc_cfgs: ECC config values + * @flctl_off: FLCTL register offset + * @bchctl_off: BCHCTL register offset + * @dma_data_buf_off: DMA_DATA_BUF register offset + * @dma_oob_buf_off: DMA_OOB_BUF register offset + * @dma_cfg_off: DMA_CFG register offset + * @dma_st_off: DMA_ST register offset + * @bch_st_off: BCG_ST register offset + * @randmz_off: RANDMZ register offset + * @int_en_off: interrupt enable register offset + * @int_clr_off: interrupt clean register offset + * @int_st_off: interrupt status register offset + * @oob0_off: oob0 register offset + * @oob1_off: oob1 register offset + * @ecc0: represent ECC0 status data + * @ecc1: represent ECC1 status data + */ +struct nfc_cfg { + enum nfc_type type; + u8 ecc_strengths[NFC_ECC_MAX_MODES]; + u32 ecc_cfgs[NFC_ECC_MAX_MODES]; + u32 flctl_off; + u32 bchctl_off; + u32 dma_cfg_off; + u32 dma_data_buf_off; + u32 dma_oob_buf_off; + u32 dma_st_off; + u32 bch_st_off; + u32 randmz_off; + u32 int_en_off; + u32 int_clr_off; + u32 int_st_off; + u32 oob0_off; + u32 oob1_off; + struct ecc_cnt_status ecc0; + struct ecc_cnt_status ecc1; +}; + +struct rk_nfc_nand_chip { + struct list_head node; + struct nand_chip chip; + + u16 boot_blks; + u16 metadata_size; + u32 boot_ecc; + u32 timing; + + u8 nsels; + u8 sels[0]; + /* Nothing after this field. */ +}; + +struct rk_nfc { + struct nand_controller controller; + const struct nfc_cfg *cfg; + struct device *dev; + + struct clk *nfc_clk; + struct clk *ahb_clk; + void __iomem *regs; + + u32 selected_bank; + u32 band_offset; + u32 cur_ecc; + u32 cur_timing; + + struct completion done; + struct list_head chips; + + u8 *page_buf; + u32 *oob_buf; + u32 page_buf_size; + u32 oob_buf_size; + + unsigned long assigned_cs; +}; + +static inline struct rk_nfc_nand_chip *rk_nfc_to_rknand(struct nand_chip *chip) +{ + return container_of(chip, struct rk_nfc_nand_chip, chip); +} + +static inline u8 *rk_nfc_buf_to_data_ptr(struct nand_chip *chip, const u8 *p, int i) +{ + return (u8 *)p + i * chip->ecc.size; +} + +static inline u8 *rk_nfc_buf_to_oob_ptr(struct nand_chip *chip, int i) +{ + u8 *poi; + + poi = chip->oob_poi + i * NFC_SYS_DATA_SIZE; + + return poi; +} + +static inline u8 *rk_nfc_buf_to_oob_ecc_ptr(struct nand_chip *chip, int i) +{ + struct rk_nfc_nand_chip *rknand = rk_nfc_to_rknand(chip); + u8 *poi; + + poi = chip->oob_poi + rknand->metadata_size + chip->ecc.bytes * i; + + return poi; +} + +static inline int rk_nfc_data_len(struct nand_chip *chip) +{ + return chip->ecc.size + chip->ecc.bytes + NFC_SYS_DATA_SIZE; +} + +static inline u8 *rk_nfc_data_ptr(struct nand_chip *chip, int i) +{ + struct rk_nfc *nfc = nand_get_controller_data(chip); + + return nfc->page_buf + i * rk_nfc_data_len(chip); +} + +static inline u8 *rk_nfc_oob_ptr(struct nand_chip *chip, int i) +{ + struct rk_nfc *nfc = nand_get_controller_data(chip); + + return nfc->page_buf + i * rk_nfc_data_len(chip) + chip->ecc.size; +} + +static int rk_nfc_hw_ecc_setup(struct nand_chip *chip, u32 strength) +{ + struct rk_nfc *nfc = nand_get_controller_data(chip); + u32 reg, i; + + for (i = 0; i < NFC_ECC_MAX_MODES; i++) { + if (strength == nfc->cfg->ecc_strengths[i]) { + reg = nfc->cfg->ecc_cfgs[i]; + break; + } + } + + if (i >= NFC_ECC_MAX_MODES) + return -EINVAL; + + writel(reg, nfc->regs + nfc->cfg->bchctl_off); + + /* Save chip ECC setting */ + nfc->cur_ecc = strength; + + return 0; +} + +static void rk_nfc_select_chip(struct nand_chip *chip, int cs) +{ + struct rk_nfc *nfc = nand_get_controller_data(chip); + struct rk_nfc_nand_chip *rknand = rk_nfc_to_rknand(chip); + struct nand_ecc_ctrl *ecc = &chip->ecc; + u32 val; + + if (cs < 0) { + nfc->selected_bank = -1; + /* Deselect the currently selected target. */ + val = readl_relaxed(nfc->regs + NFC_FMCTL); + val &= ~FMCTL_CE_SEL_M; + writel(val, nfc->regs + NFC_FMCTL); + return; + } + + nfc->selected_bank = rknand->sels[cs]; + nfc->band_offset = NFC_BANK + nfc->selected_bank * NFC_BANK_STEP; + + val = readl_relaxed(nfc->regs + NFC_FMCTL); + val &= ~FMCTL_CE_SEL_M; + val |= FMCTL_CE_SEL(nfc->selected_bank); + + writel(val, nfc->regs + NFC_FMCTL); + + /* + * Compare current chip timing with selected chip timing and + * change if needed. + */ + if (nfc->cur_timing != rknand->timing) { + writel(rknand->timing, nfc->regs + NFC_FMWAIT); + nfc->cur_timing = rknand->timing; + } + + /* + * Compare current chip ECC setting with selected chip ECC setting and + * change if needed. + */ + if (nfc->cur_ecc != ecc->strength) + rk_nfc_hw_ecc_setup(chip, ecc->strength); +} + +static inline int rk_nfc_wait_ioready(struct rk_nfc *nfc) +{ + int rc; + u32 val; + + rc = readl_relaxed_poll_timeout(nfc->regs + NFC_FMCTL, val, + val & FMCTL_RDY, 10, NFC_TIMEOUT); + + return rc; +} + +static void rk_nfc_read_buf(struct rk_nfc *nfc, u8 *buf, int len) +{ + int i; + + for (i = 0; i < len; i++) + buf[i] = readb_relaxed(nfc->regs + nfc->band_offset + + BANK_DATA); +} + +static void rk_nfc_write_buf(struct rk_nfc *nfc, const u8 *buf, int len) +{ + int i; + + for (i = 0; i < len; i++) + writeb(buf[i], nfc->regs + nfc->band_offset + BANK_DATA); +} + +static int rk_nfc_cmd(struct nand_chip *chip, + const struct nand_subop *subop) +{ + struct rk_nfc *nfc = nand_get_controller_data(chip); + unsigned int i, j, remaining, start; + int reg_offset = nfc->band_offset; + u8 *inbuf = NULL; + const u8 *outbuf; + u32 cnt = 0; + int ret = 0; + + for (i = 0; i < subop->ninstrs; i++) { + const struct nand_op_instr *instr = &subop->instrs[i]; + + switch (instr->type) { + case NAND_OP_CMD_INSTR: + writeb(instr->ctx.cmd.opcode, + nfc->regs + reg_offset + BANK_CMD); + break; + + case NAND_OP_ADDR_INSTR: + remaining = nand_subop_get_num_addr_cyc(subop, i); + start = nand_subop_get_addr_start_off(subop, i); + + for (j = 0; j < 8 && j + start < remaining; j++) + writeb(instr->ctx.addr.addrs[j + start], + nfc->regs + reg_offset + BANK_ADDR); + break; + + case NAND_OP_DATA_IN_INSTR: + case NAND_OP_DATA_OUT_INSTR: + start = nand_subop_get_data_start_off(subop, i); + cnt = nand_subop_get_data_len(subop, i); + + if (instr->type == NAND_OP_DATA_OUT_INSTR) { + outbuf = instr->ctx.data.buf.out + start; + rk_nfc_write_buf(nfc, outbuf, cnt); + } else { + inbuf = instr->ctx.data.buf.in + start; + rk_nfc_read_buf(nfc, inbuf, cnt); + } + break; + + case NAND_OP_WAITRDY_INSTR: + if (rk_nfc_wait_ioready(nfc) < 0) { + ret = -ETIMEDOUT; + dev_err(nfc->dev, "IO not ready\n"); + } + break; + } + } + + return ret; +} + +static const struct nand_op_parser rk_nfc_op_parser = NAND_OP_PARSER( + NAND_OP_PARSER_PATTERN( + rk_nfc_cmd, + NAND_OP_PARSER_PAT_CMD_ELEM(true), + NAND_OP_PARSER_PAT_ADDR_ELEM(true, MAX_ADDRESS_CYC), + NAND_OP_PARSER_PAT_CMD_ELEM(true), + NAND_OP_PARSER_PAT_WAITRDY_ELEM(true), + NAND_OP_PARSER_PAT_DATA_IN_ELEM(true, MAX_DATA_SIZE)), + NAND_OP_PARSER_PATTERN( + rk_nfc_cmd, + NAND_OP_PARSER_PAT_CMD_ELEM(true), + NAND_OP_PARSER_PAT_ADDR_ELEM(true, MAX_ADDRESS_CYC), + NAND_OP_PARSER_PAT_DATA_OUT_ELEM(true, MAX_DATA_SIZE), + NAND_OP_PARSER_PAT_CMD_ELEM(true), + NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)), +); + +static int rk_nfc_exec_op(struct nand_chip *chip, + const struct nand_operation *op, + bool check_only) +{ + if (!check_only) + rk_nfc_select_chip(chip, op->cs); + + return nand_op_parser_exec_op(chip, &rk_nfc_op_parser, op, + check_only); +} + +static int rk_nfc_setup_interface(struct nand_chip *chip, int target, + const struct nand_interface_config *conf) +{ + struct rk_nfc_nand_chip *rknand = rk_nfc_to_rknand(chip); + struct rk_nfc *nfc = nand_get_controller_data(chip); + const struct nand_sdr_timings *timings; + u32 rate, tc2rw, trwpw, trw2c; + u32 temp; + + if (target < 0) + return 0; + + timings = nand_get_sdr_timings(conf); + if (IS_ERR(timings)) + return -EOPNOTSUPP; + + if (IS_ERR(nfc->nfc_clk)) + rate = clk_get_rate(nfc->ahb_clk); + else + rate = clk_get_rate(nfc->nfc_clk); + + /* Turn clock rate into kHz. */ + rate /= 1000; + + tc2rw = 1; + trw2c = 1; + + trwpw = max(timings->tWC_min, timings->tRC_min) / 1000; + trwpw = DIV_ROUND_UP(trwpw * rate, 1000000); + + temp = timings->tREA_max / 1000; + temp = DIV_ROUND_UP(temp * rate, 1000000); + + if (trwpw < temp) + trwpw = temp; + + /* + * ACCON: access timing control register + * ------------------------------------- + * 31:18: reserved + * 17:12: csrw, clock cycles from the falling edge of CSn to the + * falling edge of RDn or WRn + * 11:11: reserved + * 10:05: rwpw, the width of RDn or WRn in processor clock cycles + * 04:00: rwcs, clock cycles from the rising edge of RDn or WRn to the + * rising edge of CSn + */ + + /* Save chip timing */ + rknand->timing = ACCTIMING(tc2rw, trwpw, trw2c); + + return 0; +} + +static void rk_nfc_xfer_start(struct rk_nfc *nfc, u8 rw, u8 n_KB, + dma_addr_t dma_data, dma_addr_t dma_oob) +{ + u32 dma_reg, fl_reg, bch_reg; + + dma_reg = DMA_ST | ((!rw) << DMA_WR) | DMA_EN | (2 << DMA_AHB_SIZE) | + (7 << DMA_BURST_SIZE) | (16 << DMA_INC_NUM); + + fl_reg = (rw << FLCTL_WR) | FLCTL_XFER_EN | FLCTL_ACORRECT | + (n_KB << FLCTL_XFER_SECTOR) | FLCTL_TOG_FIX; + + if (nfc->cfg->type == NFC_V6 || nfc->cfg->type == NFC_V8) { + bch_reg = readl_relaxed(nfc->regs + nfc->cfg->bchctl_off); + bch_reg = (bch_reg & (~BCHCTL_BANK_M)) | + (nfc->selected_bank << BCHCTL_BANK); + writel(bch_reg, nfc->regs + nfc->cfg->bchctl_off); + } + + writel(dma_reg, nfc->regs + nfc->cfg->dma_cfg_off); + writel((u32)dma_data, nfc->regs + nfc->cfg->dma_data_buf_off); + writel((u32)dma_oob, nfc->regs + nfc->cfg->dma_oob_buf_off); + writel(fl_reg, nfc->regs + nfc->cfg->flctl_off); + fl_reg |= FLCTL_XFER_ST; + writel(fl_reg, nfc->regs + nfc->cfg->flctl_off); +} + +static int rk_nfc_wait_for_xfer_done(struct rk_nfc *nfc) +{ + void __iomem *ptr; + u32 reg; + + ptr = nfc->regs + nfc->cfg->flctl_off; + + return readl_relaxed_poll_timeout(ptr, reg, + reg & FLCTL_XFER_READY, + 10, NFC_TIMEOUT); +} + +static int rk_nfc_write_page_raw(struct nand_chip *chip, const u8 *buf, + int oob_on, int page) +{ + struct rk_nfc_nand_chip *rknand = rk_nfc_to_rknand(chip); + struct rk_nfc *nfc = nand_get_controller_data(chip); + struct mtd_info *mtd = nand_to_mtd(chip); + struct nand_ecc_ctrl *ecc = &chip->ecc; + int i, pages_per_blk; + + pages_per_blk = mtd->erasesize / mtd->writesize; + if ((chip->options & NAND_IS_BOOT_MEDIUM) && + (page < (pages_per_blk * rknand->boot_blks)) && + rknand->boot_ecc != ecc->strength) { + /* + * There's currently no method to notify the MTD framework that + * a different ECC strength is in use for the boot blocks. + */ + return -EIO; + } + + if (!buf) + memset(nfc->page_buf, 0xff, mtd->writesize + mtd->oobsize); + + for (i = 0; i < ecc->steps; i++) { + /* Copy data to the NFC buffer. */ + if (buf) + memcpy(rk_nfc_data_ptr(chip, i), + rk_nfc_buf_to_data_ptr(chip, buf, i), + ecc->size); + /* + * The first four bytes of OOB are reserved for the + * boot ROM. In some debugging cases, such as with a + * read, erase and write back test these 4 bytes stored + * in OOB also need to be written back. + * + * The function nand_block_bad detects bad blocks like: + * + * bad = chip->oob_poi[chip->badblockpos]; + * + * chip->badblockpos == 0 for a large page NAND Flash, + * so chip->oob_poi[0] is the bad block mask (BBM). + * + * The OOB data layout on the NFC is: + * + * PA0 PA1 PA2 PA3 | BBM OOB1 OOB2 OOB3 | ... + * + * or + * + * 0xFF 0xFF 0xFF 0xFF | BBM OOB1 OOB2 OOB3 | ... + * + * The code here just swaps the first 4 bytes with the last + * 4 bytes without losing any data. + * + * The chip->oob_poi data layout: + * + * BBM OOB1 OOB2 OOB3 |......| PA0 PA1 PA2 PA3 + * + * The rk_nfc_ooblayout_free() function already has reserved + * these 4 bytes with: + * + * oob_region->offset = NFC_SYS_DATA_SIZE + 2; + */ + if (!i) + memcpy(rk_nfc_oob_ptr(chip, i), + rk_nfc_buf_to_oob_ptr(chip, ecc->steps - 1), + NFC_SYS_DATA_SIZE); + else + memcpy(rk_nfc_oob_ptr(chip, i), + rk_nfc_buf_to_oob_ptr(chip, i - 1), + NFC_SYS_DATA_SIZE); + /* Copy ECC data to the NFC buffer. */ + memcpy(rk_nfc_oob_ptr(chip, i) + NFC_SYS_DATA_SIZE, + rk_nfc_buf_to_oob_ecc_ptr(chip, i), + ecc->bytes); + } + + nand_prog_page_begin_op(chip, page, 0, NULL, 0); + rk_nfc_write_buf(nfc, buf, mtd->writesize + mtd->oobsize); + return nand_prog_page_end_op(chip); +} + +static int rk_nfc_write_page_hwecc(struct nand_chip *chip, const u8 *buf, + int oob_on, int page) +{ + struct mtd_info *mtd = nand_to_mtd(chip); + struct rk_nfc *nfc = nand_get_controller_data(chip); + struct rk_nfc_nand_chip *rknand = rk_nfc_to_rknand(chip); + struct nand_ecc_ctrl *ecc = &chip->ecc; + int oob_step = (ecc->bytes > 60) ? NFC_MAX_OOB_PER_STEP : + NFC_MIN_OOB_PER_STEP; + int pages_per_blk = mtd->erasesize / mtd->writesize; + int ret = 0, i, boot_rom_mode = 0; + dma_addr_t dma_data, dma_oob; + u32 reg; + u8 *oob; + + nand_prog_page_begin_op(chip, page, 0, NULL, 0); + + if (buf) + memcpy(nfc->page_buf, buf, mtd->writesize); + else + memset(nfc->page_buf, 0xFF, mtd->writesize); + + /* + * The first blocks (4, 8 or 16 depending on the device) are used + * by the boot ROM and the first 32 bits of OOB need to link to + * the next page address in the same block. We can't directly copy + * OOB data from the MTD framework, because this page address + * conflicts for example with the bad block marker (BBM), + * so we shift all OOB data including the BBM with 4 byte positions. + * As a consequence the OOB size available to the MTD framework is + * also reduced with 4 bytes. + * + * PA0 PA1 PA2 PA3 | BBM OOB1 OOB2 OOB3 | ... + * + * If a NAND is not a boot medium or the page is not a boot block, + * the first 4 bytes are left untouched by writing 0xFF to them. + * + * 0xFF 0xFF 0xFF 0xFF | BBM OOB1 OOB2 OOB3 | ... + * + * Configure the ECC algorithm supported by the boot ROM. + */ + if ((page < (pages_per_blk * rknand->boot_blks)) && + (chip->options & NAND_IS_BOOT_MEDIUM)) { + boot_rom_mode = 1; + if (rknand->boot_ecc != ecc->strength) + rk_nfc_hw_ecc_setup(chip, rknand->boot_ecc); + } + + for (i = 0; i < ecc->steps; i++) { + if (!i) { + reg = 0xFFFFFFFF; + } else { + oob = chip->oob_poi + (i - 1) * NFC_SYS_DATA_SIZE; + reg = oob[0] | oob[1] << 8 | oob[2] << 16 | + oob[3] << 24; + } + + if (!i && boot_rom_mode) + reg = (page & (pages_per_blk - 1)) * 4; + + if (nfc->cfg->type == NFC_V9) + nfc->oob_buf[i] = reg; + else + nfc->oob_buf[i * (oob_step / 4)] = reg; + } + + dma_data = dma_map_single(nfc->dev, (void *)nfc->page_buf, + mtd->writesize, DMA_TO_DEVICE); + dma_oob = dma_map_single(nfc->dev, nfc->oob_buf, + ecc->steps * oob_step, + DMA_TO_DEVICE); + + reinit_completion(&nfc->done); + writel(INT_DMA, nfc->regs + nfc->cfg->int_en_off); + + rk_nfc_xfer_start(nfc, NFC_WRITE, ecc->steps, dma_data, + dma_oob); + ret = wait_for_completion_timeout(&nfc->done, + msecs_to_jiffies(100)); + if (!ret) + dev_warn(nfc->dev, "write: wait dma done timeout.\n"); + /* + * Whether the DMA transfer is completed or not. The driver + * needs to check the NFC`s status register to see if the data + * transfer was completed. + */ + ret = rk_nfc_wait_for_xfer_done(nfc); + + dma_unmap_single(nfc->dev, dma_data, mtd->writesize, + DMA_TO_DEVICE); + dma_unmap_single(nfc->dev, dma_oob, ecc->steps * oob_step, + DMA_TO_DEVICE); + + if (boot_rom_mode && rknand->boot_ecc != ecc->strength) + rk_nfc_hw_ecc_setup(chip, ecc->strength); + + if (ret) { + dev_err(nfc->dev, "write: wait transfer done timeout.\n"); + return -ETIMEDOUT; + } + + return nand_prog_page_end_op(chip); +} + +static int rk_nfc_write_oob(struct nand_chip *chip, int page) +{ + return rk_nfc_write_page_hwecc(chip, NULL, 1, page); +} + +static int rk_nfc_read_page_raw(struct nand_chip *chip, u8 *buf, int oob_on, + int page) +{ + struct rk_nfc_nand_chip *rknand = rk_nfc_to_rknand(chip); + struct rk_nfc *nfc = nand_get_controller_data(chip); + struct mtd_info *mtd = nand_to_mtd(chip); + struct nand_ecc_ctrl *ecc = &chip->ecc; + int i, pages_per_blk; + + pages_per_blk = mtd->erasesize / mtd->writesize; + if ((chip->options & NAND_IS_BOOT_MEDIUM) && + (page < (pages_per_blk * rknand->boot_blks)) && + rknand->boot_ecc != ecc->strength) { + /* + * There's currently no method to notify the MTD framework that + * a different ECC strength is in use for the boot blocks. + */ + return -EIO; + } + + nand_read_page_op(chip, page, 0, NULL, 0); + rk_nfc_read_buf(nfc, nfc->page_buf, mtd->writesize + mtd->oobsize); + for (i = 0; i < ecc->steps; i++) { + /* + * The first four bytes of OOB are reserved for the + * boot ROM. In some debugging cases, such as with a read, + * erase and write back test, these 4 bytes also must be + * saved somewhere, otherwise this information will be + * lost during a write back. + */ + if (!i) + memcpy(rk_nfc_buf_to_oob_ptr(chip, ecc->steps - 1), + rk_nfc_oob_ptr(chip, i), + NFC_SYS_DATA_SIZE); + else + memcpy(rk_nfc_buf_to_oob_ptr(chip, i - 1), + rk_nfc_oob_ptr(chip, i), + NFC_SYS_DATA_SIZE); + + /* Copy ECC data from the NFC buffer. */ + memcpy(rk_nfc_buf_to_oob_ecc_ptr(chip, i), + rk_nfc_oob_ptr(chip, i) + NFC_SYS_DATA_SIZE, + ecc->bytes); + + /* Copy data from the NFC buffer. */ + if (buf) + memcpy(rk_nfc_buf_to_data_ptr(chip, buf, i), + rk_nfc_data_ptr(chip, i), + ecc->size); + } + + return 0; +} + +static int rk_nfc_read_page_hwecc(struct nand_chip *chip, u8 *buf, int oob_on, + int page) +{ + struct mtd_info *mtd = nand_to_mtd(chip); + struct rk_nfc *nfc = nand_get_controller_data(chip); + struct rk_nfc_nand_chip *rknand = rk_nfc_to_rknand(chip); + struct nand_ecc_ctrl *ecc = &chip->ecc; + int oob_step = (ecc->bytes > 60) ? NFC_MAX_OOB_PER_STEP : + NFC_MIN_OOB_PER_STEP; + int pages_per_blk = mtd->erasesize / mtd->writesize; + dma_addr_t dma_data, dma_oob; + int ret = 0, i, cnt, boot_rom_mode = 0; + int max_bitflips = 0, bch_st, ecc_fail = 0; + u8 *oob; + u32 tmp; + + nand_read_page_op(chip, page, 0, NULL, 0); + + dma_data = dma_map_single(nfc->dev, nfc->page_buf, + mtd->writesize, + DMA_FROM_DEVICE); + dma_oob = dma_map_single(nfc->dev, nfc->oob_buf, + ecc->steps * oob_step, + DMA_FROM_DEVICE); + + /* + * The first blocks (4, 8 or 16 depending on the device) + * are used by the boot ROM. + * Configure the ECC algorithm supported by the boot ROM. + */ + if ((page < (pages_per_blk * rknand->boot_blks)) && + (chip->options & NAND_IS_BOOT_MEDIUM)) { + boot_rom_mode = 1; + if (rknand->boot_ecc != ecc->strength) + rk_nfc_hw_ecc_setup(chip, rknand->boot_ecc); + } + + reinit_completion(&nfc->done); + writel(INT_DMA, nfc->regs + nfc->cfg->int_en_off); + rk_nfc_xfer_start(nfc, NFC_READ, ecc->steps, dma_data, + dma_oob); + ret = wait_for_completion_timeout(&nfc->done, + msecs_to_jiffies(100)); + if (!ret) + dev_warn(nfc->dev, "read: wait dma done timeout.\n"); + /* + * Whether the DMA transfer is completed or not. The driver + * needs to check the NFC`s status register to see if the data + * transfer was completed. + */ + ret = rk_nfc_wait_for_xfer_done(nfc); + + dma_unmap_single(nfc->dev, dma_data, mtd->writesize, + DMA_FROM_DEVICE); + dma_unmap_single(nfc->dev, dma_oob, ecc->steps * oob_step, + DMA_FROM_DEVICE); + + if (ret) { + ret = -ETIMEDOUT; + dev_err(nfc->dev, "read: wait transfer done timeout.\n"); + goto timeout_err; + } + + for (i = 1; i < ecc->steps; i++) { + oob = chip->oob_poi + (i - 1) * NFC_SYS_DATA_SIZE; + if (nfc->cfg->type == NFC_V9) + tmp = nfc->oob_buf[i]; + else + tmp = nfc->oob_buf[i * (oob_step / 4)]; + *oob++ = (u8)tmp; + *oob++ = (u8)(tmp >> 8); + *oob++ = (u8)(tmp >> 16); + *oob++ = (u8)(tmp >> 24); + } + + for (i = 0; i < (ecc->steps / 2); i++) { + bch_st = readl_relaxed(nfc->regs + + nfc->cfg->bch_st_off + i * 4); + if (bch_st & BIT(nfc->cfg->ecc0.err_flag_bit) || + bch_st & BIT(nfc->cfg->ecc1.err_flag_bit)) { + mtd->ecc_stats.failed++; + ecc_fail = 1; + } else { + cnt = ECC_ERR_CNT(bch_st, nfc->cfg->ecc0); + mtd->ecc_stats.corrected += cnt; + max_bitflips = max_t(u32, max_bitflips, cnt); + + cnt = ECC_ERR_CNT(bch_st, nfc->cfg->ecc1); + mtd->ecc_stats.corrected += cnt; + max_bitflips = max_t(u32, max_bitflips, cnt); + } + } + + if (buf) + memcpy(buf, nfc->page_buf, mtd->writesize); + +timeout_err: + if (boot_rom_mode && rknand->boot_ecc != ecc->strength) + rk_nfc_hw_ecc_setup(chip, ecc->strength); + + if (ret) + return ret; + + if (ecc_fail) { + dev_err(nfc->dev, "read page: %x ecc error!\n", page); + return 0; + } + + return max_bitflips; +} + +static int rk_nfc_read_oob(struct nand_chip *chip, int page) +{ + return rk_nfc_read_page_hwecc(chip, NULL, 1, page); +} + +static inline void rk_nfc_hw_init(struct rk_nfc *nfc) +{ + /* Disable flash wp. */ + writel(FMCTL_WP, nfc->regs + NFC_FMCTL); + /* Config default timing 40ns at 150 Mhz NFC clock. */ + writel(0x1081, nfc->regs + NFC_FMWAIT); + nfc->cur_timing = 0x1081; + /* Disable randomizer and DMA. */ + writel(0, nfc->regs + nfc->cfg->randmz_off); + writel(0, nfc->regs + nfc->cfg->dma_cfg_off); + writel(FLCTL_RST, nfc->regs + nfc->cfg->flctl_off); +} + +static irqreturn_t rk_nfc_irq(int irq, void *id) +{ + struct rk_nfc *nfc = id; + u32 sta, ien; + + sta = readl_relaxed(nfc->regs + nfc->cfg->int_st_off); + ien = readl_relaxed(nfc->regs + nfc->cfg->int_en_off); + + if (!(sta & ien)) + return IRQ_NONE; + + writel(sta, nfc->regs + nfc->cfg->int_clr_off); + writel(~sta & ien, nfc->regs + nfc->cfg->int_en_off); + + complete(&nfc->done); + + return IRQ_HANDLED; +} + +static int rk_nfc_enable_clks(struct device *dev, struct rk_nfc *nfc) +{ + int ret; + + if (!IS_ERR(nfc->nfc_clk)) { + ret = clk_prepare_enable(nfc->nfc_clk); + if (ret) { + dev_err(dev, "failed to enable NFC clk\n"); + return ret; + } + } + + ret = clk_prepare_enable(nfc->ahb_clk); + if (ret) { + dev_err(dev, "failed to enable ahb clk\n"); + if (!IS_ERR(nfc->nfc_clk)) + clk_disable_unprepare(nfc->nfc_clk); + return ret; + } + + return 0; +} + +static void rk_nfc_disable_clks(struct rk_nfc *nfc) +{ + if (!IS_ERR(nfc->nfc_clk)) + clk_disable_unprepare(nfc->nfc_clk); + clk_disable_unprepare(nfc->ahb_clk); +} + +static int rk_nfc_ooblayout_free(struct mtd_info *mtd, int section, + struct mtd_oob_region *oob_region) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + struct rk_nfc_nand_chip *rknand = rk_nfc_to_rknand(chip); + + if (section) + return -ERANGE; + + /* + * The beginning of the OOB area stores the reserved data for the NFC, + * the size of the reserved data is NFC_SYS_DATA_SIZE bytes. + */ + oob_region->length = rknand->metadata_size - NFC_SYS_DATA_SIZE - 2; + oob_region->offset = NFC_SYS_DATA_SIZE + 2; + + return 0; +} + +static int rk_nfc_ooblayout_ecc(struct mtd_info *mtd, int section, + struct mtd_oob_region *oob_region) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + struct rk_nfc_nand_chip *rknand = rk_nfc_to_rknand(chip); + + if (section) + return -ERANGE; + + oob_region->length = mtd->oobsize - rknand->metadata_size; + oob_region->offset = rknand->metadata_size; + + return 0; +} + +static const struct mtd_ooblayout_ops rk_nfc_ooblayout_ops = { + .free = rk_nfc_ooblayout_free, + .ecc = rk_nfc_ooblayout_ecc, +}; + +static int rk_nfc_ecc_init(struct device *dev, struct mtd_info *mtd) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + struct rk_nfc *nfc = nand_get_controller_data(chip); + struct nand_ecc_ctrl *ecc = &chip->ecc; + const u8 *strengths = nfc->cfg->ecc_strengths; + u8 max_strength, nfc_max_strength; + int i; + + nfc_max_strength = nfc->cfg->ecc_strengths[0]; + /* If optional dt settings not present. */ + if (!ecc->size || !ecc->strength || + ecc->strength > nfc_max_strength) { + chip->ecc.size = 1024; + ecc->steps = mtd->writesize / ecc->size; + + /* + * HW ECC always requests the number of ECC bytes per 1024 byte + * blocks. The first 4 OOB bytes are reserved for sys data. + */ + max_strength = ((mtd->oobsize / ecc->steps) - 4) * 8 / + fls(8 * 1024); + if (max_strength > nfc_max_strength) + max_strength = nfc_max_strength; + + for (i = 0; i < 4; i++) { + if (max_strength >= strengths[i]) + break; + } + + if (i >= 4) { + dev_err(nfc->dev, "unsupported ECC strength\n"); + return -EOPNOTSUPP; + } + + ecc->strength = strengths[i]; + } + ecc->steps = mtd->writesize / ecc->size; + ecc->bytes = DIV_ROUND_UP(ecc->strength * fls(8 * chip->ecc.size), 8); + + return 0; +} + +static int rk_nfc_attach_chip(struct nand_chip *chip) +{ + struct mtd_info *mtd = nand_to_mtd(chip); + struct device *dev = mtd->dev.parent; + struct rk_nfc *nfc = nand_get_controller_data(chip); + struct rk_nfc_nand_chip *rknand = rk_nfc_to_rknand(chip); + struct nand_ecc_ctrl *ecc = &chip->ecc; + int new_page_len, new_oob_len; + void *buf; + int ret; + + if (chip->options & NAND_BUSWIDTH_16) { + dev_err(dev, "16 bits bus width not supported"); + return -EINVAL; + } + + if (ecc->engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST) + return 0; + + ret = rk_nfc_ecc_init(dev, mtd); + if (ret) + return ret; + + rknand->metadata_size = NFC_SYS_DATA_SIZE * ecc->steps; + + if (rknand->metadata_size < NFC_SYS_DATA_SIZE + 2) { + dev_err(dev, + "driver needs at least %d bytes of meta data\n", + NFC_SYS_DATA_SIZE + 2); + return -EIO; + } + + /* Check buffer first, avoid duplicate alloc buffer. */ + new_page_len = mtd->writesize + mtd->oobsize; + if (nfc->page_buf && new_page_len > nfc->page_buf_size) { + buf = krealloc(nfc->page_buf, new_page_len, + GFP_KERNEL | GFP_DMA); + if (!buf) + return -ENOMEM; + nfc->page_buf = buf; + nfc->page_buf_size = new_page_len; + } + + new_oob_len = ecc->steps * NFC_MAX_OOB_PER_STEP; + if (nfc->oob_buf && new_oob_len > nfc->oob_buf_size) { + buf = krealloc(nfc->oob_buf, new_oob_len, + GFP_KERNEL | GFP_DMA); + if (!buf) { + kfree(nfc->page_buf); + nfc->page_buf = NULL; + return -ENOMEM; + } + nfc->oob_buf = buf; + nfc->oob_buf_size = new_oob_len; + } + + if (!nfc->page_buf) { + nfc->page_buf = kzalloc(new_page_len, GFP_KERNEL | GFP_DMA); + if (!nfc->page_buf) + return -ENOMEM; + nfc->page_buf_size = new_page_len; + } + + if (!nfc->oob_buf) { + nfc->oob_buf = kzalloc(new_oob_len, GFP_KERNEL | GFP_DMA); + if (!nfc->oob_buf) { + kfree(nfc->page_buf); + nfc->page_buf = NULL; + return -ENOMEM; + } + nfc->oob_buf_size = new_oob_len; + } + + chip->ecc.write_page_raw = rk_nfc_write_page_raw; + chip->ecc.write_page = rk_nfc_write_page_hwecc; + chip->ecc.write_oob = rk_nfc_write_oob; + + chip->ecc.read_page_raw = rk_nfc_read_page_raw; + chip->ecc.read_page = rk_nfc_read_page_hwecc; + chip->ecc.read_oob = rk_nfc_read_oob; + + return 0; +} + +static const struct nand_controller_ops rk_nfc_controller_ops = { + .attach_chip = rk_nfc_attach_chip, + .exec_op = rk_nfc_exec_op, + .setup_interface = rk_nfc_setup_interface, +}; + +static int rk_nfc_nand_chip_init(struct device *dev, struct rk_nfc *nfc, + struct device_node *np) +{ + struct rk_nfc_nand_chip *rknand; + struct nand_chip *chip; + struct mtd_info *mtd; + int nsels; + u32 tmp; + int ret; + int i; + + if (!of_get_property(np, "reg", &nsels)) + return -ENODEV; + nsels /= sizeof(u32); + if (!nsels || nsels > NFC_MAX_NSELS) { + dev_err(dev, "invalid reg property size %d\n", nsels); + return -EINVAL; + } + + rknand = devm_kzalloc(dev, sizeof(*rknand) + nsels * sizeof(u8), + GFP_KERNEL); + if (!rknand) + return -ENOMEM; + + rknand->nsels = nsels; + for (i = 0; i < nsels; i++) { + ret = of_property_read_u32_index(np, "reg", i, &tmp); + if (ret) { + dev_err(dev, "reg property failure : %d\n", ret); + return ret; + } + + if (tmp >= NFC_MAX_NSELS) { + dev_err(dev, "invalid CS: %u\n", tmp); + return -EINVAL; + } + + if (test_and_set_bit(tmp, &nfc->assigned_cs)) { + dev_err(dev, "CS %u already assigned\n", tmp); + return -EINVAL; + } + + rknand->sels[i] = tmp; + } + + chip = &rknand->chip; + chip->controller = &nfc->controller; + + nand_set_flash_node(chip, np); + + nand_set_controller_data(chip, nfc); + + chip->options |= NAND_USES_DMA | NAND_NO_SUBPAGE_WRITE; + chip->bbt_options = NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB; + + /* Set default mode in case dt entry is missing. */ + chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST; + + mtd = nand_to_mtd(chip); + mtd->owner = THIS_MODULE; + mtd->dev.parent = dev; + + if (!mtd->name) { + dev_err(nfc->dev, "NAND label property is mandatory\n"); + return -EINVAL; + } + + mtd_set_ooblayout(mtd, &rk_nfc_ooblayout_ops); + rk_nfc_hw_init(nfc); + ret = nand_scan(chip, nsels); + if (ret) + return ret; + + if (chip->options & NAND_IS_BOOT_MEDIUM) { + ret = of_property_read_u32(np, "rockchip,boot-blks", &tmp); + rknand->boot_blks = ret ? 0 : tmp; + + ret = of_property_read_u32(np, "rockchip,boot-ecc-strength", + &tmp); + rknand->boot_ecc = ret ? chip->ecc.strength : tmp; + } + + ret = mtd_device_register(mtd, NULL, 0); + if (ret) { + dev_err(dev, "MTD parse partition error\n"); + nand_cleanup(chip); + return ret; + } + + list_add_tail(&rknand->node, &nfc->chips); + + return 0; +} + +static void rk_nfc_chips_cleanup(struct rk_nfc *nfc) +{ + struct rk_nfc_nand_chip *rknand, *tmp; + struct nand_chip *chip; + int ret; + + list_for_each_entry_safe(rknand, tmp, &nfc->chips, node) { + chip = &rknand->chip; + ret = mtd_device_unregister(nand_to_mtd(chip)); + WARN_ON(ret); + nand_cleanup(chip); + list_del(&rknand->node); + } +} + +static int rk_nfc_nand_chips_init(struct device *dev, struct rk_nfc *nfc) +{ + struct device_node *np = dev->of_node, *nand_np; + int nchips = of_get_child_count(np); + int ret; + + if (!nchips || nchips > NFC_MAX_NSELS) { + dev_err(nfc->dev, "incorrect number of NAND chips (%d)\n", + nchips); + return -EINVAL; + } + + for_each_child_of_node(np, nand_np) { + ret = rk_nfc_nand_chip_init(dev, nfc, nand_np); + if (ret) { + of_node_put(nand_np); + rk_nfc_chips_cleanup(nfc); + return ret; + } + } + + return 0; +} + +static struct nfc_cfg nfc_v6_cfg = { + .type = NFC_V6, + .ecc_strengths = {60, 40, 24, 16}, + .ecc_cfgs = { + 0x00040011, 0x00040001, 0x00000011, 0x00000001, + }, + .flctl_off = 0x08, + .bchctl_off = 0x0C, + .dma_cfg_off = 0x10, + .dma_data_buf_off = 0x14, + .dma_oob_buf_off = 0x18, + .dma_st_off = 0x1C, + .bch_st_off = 0x20, + .randmz_off = 0x150, + .int_en_off = 0x16C, + .int_clr_off = 0x170, + .int_st_off = 0x174, + .oob0_off = 0x200, + .oob1_off = 0x230, + .ecc0 = { + .err_flag_bit = 2, + .low = 3, + .low_mask = 0x1F, + .low_bn = 5, + .high = 27, + .high_mask = 0x1, + }, + .ecc1 = { + .err_flag_bit = 15, + .low = 16, + .low_mask = 0x1F, + .low_bn = 5, + .high = 29, + .high_mask = 0x1, + }, +}; + +static struct nfc_cfg nfc_v8_cfg = { + .type = NFC_V8, + .ecc_strengths = {16, 16, 16, 16}, + .ecc_cfgs = { + 0x00000001, 0x00000001, 0x00000001, 0x00000001, + }, + .flctl_off = 0x08, + .bchctl_off = 0x0C, + .dma_cfg_off = 0x10, + .dma_data_buf_off = 0x14, + .dma_oob_buf_off = 0x18, + .dma_st_off = 0x1C, + .bch_st_off = 0x20, + .randmz_off = 0x150, + .int_en_off = 0x16C, + .int_clr_off = 0x170, + .int_st_off = 0x174, + .oob0_off = 0x200, + .oob1_off = 0x230, + .ecc0 = { + .err_flag_bit = 2, + .low = 3, + .low_mask = 0x1F, + .low_bn = 5, + .high = 27, + .high_mask = 0x1, + }, + .ecc1 = { + .err_flag_bit = 15, + .low = 16, + .low_mask = 0x1F, + .low_bn = 5, + .high = 29, + .high_mask = 0x1, + }, +}; + +static struct nfc_cfg nfc_v9_cfg = { + .type = NFC_V9, + .ecc_strengths = {70, 60, 40, 16}, + .ecc_cfgs = { + 0x00000001, 0x06000001, 0x04000001, 0x02000001, + }, + .flctl_off = 0x10, + .bchctl_off = 0x20, + .dma_cfg_off = 0x30, + .dma_data_buf_off = 0x34, + .dma_oob_buf_off = 0x38, + .dma_st_off = 0x3C, + .bch_st_off = 0x150, + .randmz_off = 0x208, + .int_en_off = 0x120, + .int_clr_off = 0x124, + .int_st_off = 0x128, + .oob0_off = 0x200, + .oob1_off = 0x204, + .ecc0 = { + .err_flag_bit = 2, + .low = 3, + .low_mask = 0x7F, + .low_bn = 7, + .high = 0, + .high_mask = 0x0, + }, + .ecc1 = { + .err_flag_bit = 18, + .low = 19, + .low_mask = 0x7F, + .low_bn = 7, + .high = 0, + .high_mask = 0x0, + }, +}; + +static const struct of_device_id rk_nfc_id_table[] = { + { + .compatible = "rockchip,px30-nfc", + .data = &nfc_v9_cfg + }, + { + .compatible = "rockchip,rk2928-nfc", + .data = &nfc_v6_cfg + }, + { + .compatible = "rockchip,rv1108-nfc", + .data = &nfc_v8_cfg + }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, rk_nfc_id_table); + +static int rk_nfc_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct rk_nfc *nfc; + int ret, irq; + + nfc = devm_kzalloc(dev, sizeof(*nfc), GFP_KERNEL); + if (!nfc) + return -ENOMEM; + + nand_controller_init(&nfc->controller); + INIT_LIST_HEAD(&nfc->chips); + nfc->controller.ops = &rk_nfc_controller_ops; + + nfc->cfg = of_device_get_match_data(dev); + nfc->dev = dev; + + init_completion(&nfc->done); + + nfc->regs = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(nfc->regs)) { + ret = PTR_ERR(nfc->regs); + goto release_nfc; + } + + nfc->nfc_clk = devm_clk_get(dev, "nfc"); + if (IS_ERR(nfc->nfc_clk)) { + dev_dbg(dev, "no NFC clk\n"); + /* Some earlier models, such as rk3066, have no NFC clk. */ + } + + nfc->ahb_clk = devm_clk_get(dev, "ahb"); + if (IS_ERR(nfc->ahb_clk)) { + dev_err(dev, "no ahb clk\n"); + ret = PTR_ERR(nfc->ahb_clk); + goto release_nfc; + } + + ret = rk_nfc_enable_clks(dev, nfc); + if (ret) + goto release_nfc; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(dev, "no NFC irq resource\n"); + ret = -EINVAL; + goto clk_disable; + } + + writel(0, nfc->regs + nfc->cfg->int_en_off); + ret = devm_request_irq(dev, irq, rk_nfc_irq, 0x0, "rk-nand", nfc); + if (ret) { + dev_err(dev, "failed to request NFC irq\n"); + goto clk_disable; + } + + platform_set_drvdata(pdev, nfc); + + ret = rk_nfc_nand_chips_init(dev, nfc); + if (ret) { + dev_err(dev, "failed to init NAND chips\n"); + goto clk_disable; + } + return 0; + +clk_disable: + rk_nfc_disable_clks(nfc); +release_nfc: + return ret; +} + +static int rk_nfc_remove(struct platform_device *pdev) +{ + struct rk_nfc *nfc = platform_get_drvdata(pdev); + + kfree(nfc->page_buf); + kfree(nfc->oob_buf); + rk_nfc_chips_cleanup(nfc); + rk_nfc_disable_clks(nfc); + + return 0; +} + +static int __maybe_unused rk_nfc_suspend(struct device *dev) +{ + struct rk_nfc *nfc = dev_get_drvdata(dev); + + rk_nfc_disable_clks(nfc); + + return 0; +} + +static int __maybe_unused rk_nfc_resume(struct device *dev) +{ + struct rk_nfc *nfc = dev_get_drvdata(dev); + struct rk_nfc_nand_chip *rknand; + struct nand_chip *chip; + int ret; + u32 i; + + ret = rk_nfc_enable_clks(dev, nfc); + if (ret) + return ret; + + /* Reset NAND chip if VCC was powered off. */ + list_for_each_entry(rknand, &nfc->chips, node) { + chip = &rknand->chip; + for (i = 0; i < rknand->nsels; i++) + nand_reset(chip, i); + } + + return 0; +} + +static const struct dev_pm_ops rk_nfc_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(rk_nfc_suspend, rk_nfc_resume) +}; + +static struct platform_driver rk_nfc_driver = { + .probe = rk_nfc_probe, + .remove = rk_nfc_remove, + .driver = { + .name = "rockchip-nfc", + .of_match_table = rk_nfc_id_table, + .pm = &rk_nfc_pm_ops, + }, +}; + +module_platform_driver(rk_nfc_driver); + +MODULE_LICENSE("Dual MIT/GPL"); +MODULE_AUTHOR("Yifeng Zhao <yifeng.zhao@rock-chips.com>"); +MODULE_DESCRIPTION("Rockchip Nand Flash Controller Driver"); +MODULE_ALIAS("platform:rockchip-nand-controller"); diff --git a/drivers/mtd/nand/raw/s3c2410.c b/drivers/mtd/nand/raw/s3c2410.c index fbd0fa48e063..f0a4535c812a 100644 --- a/drivers/mtd/nand/raw/s3c2410.c +++ b/drivers/mtd/nand/raw/s3c2410.c @@ -30,7 +30,6 @@ #include <linux/mtd/mtd.h> #include <linux/mtd/rawnand.h> -#include <linux/mtd/nand_ecc.h> #include <linux/mtd/partitions.h> #include <linux/platform_data/mtd-nand-s3c2410.h> @@ -134,7 +133,8 @@ enum s3c_nand_clk_state { /** * struct s3c2410_nand_info - NAND controller state. - * @mtds: An array of MTD instances on this controoler. + * @controller: Base controller structure. + * @mtds: An array of MTD instances on this controller. * @platform: The platform data for this board. * @device: The platform device we bound to. * @clk: The clock resource for this controller. @@ -146,6 +146,7 @@ enum s3c_nand_clk_state { * @clk_rate: The clock rate from @clk. * @clk_state: The current clock state. * @cpu_type: The exact type of this controller. + * @freq_transition: CPUFreq notifier block */ struct s3c2410_nand_info { /* mtd info */ diff --git a/drivers/mtd/nand/raw/sharpsl.c b/drivers/mtd/nand/raw/sharpsl.c index af98bcc9d689..5612ee628425 100644 --- a/drivers/mtd/nand/raw/sharpsl.c +++ b/drivers/mtd/nand/raw/sharpsl.c @@ -12,7 +12,6 @@ #include <linux/delay.h> #include <linux/mtd/mtd.h> #include <linux/mtd/rawnand.h> -#include <linux/mtd/nand_ecc.h> #include <linux/mtd/partitions.h> #include <linux/mtd/sharpsl.h> #include <linux/interrupt.h> @@ -107,7 +106,7 @@ static int sharpsl_attach_chip(struct nand_chip *chip) chip->ecc.strength = 1; chip->ecc.hwctl = sharpsl_nand_enable_hwecc; chip->ecc.calculate = sharpsl_nand_calculate_ecc; - chip->ecc.correct = nand_correct_data; + chip->ecc.correct = rawnand_sw_hamming_correct; return 0; } diff --git a/drivers/mtd/nand/raw/sunxi_nand.c b/drivers/mtd/nand/raw/sunxi_nand.c index 2a7ca3072f35..923a9e236fcf 100644 --- a/drivers/mtd/nand/raw/sunxi_nand.c +++ b/drivers/mtd/nand/raw/sunxi_nand.c @@ -51,6 +51,7 @@ #define NFC_REG_USER_DATA(x) (0x0050 + ((x) * 4)) #define NFC_REG_SPARE_AREA 0x00A0 #define NFC_REG_PAT_ID 0x00A4 +#define NFC_REG_MDMA_ADDR 0x00C0 #define NFC_REG_MDMA_CNT 0x00C4 #define NFC_RAM0_BASE 0x0400 #define NFC_RAM1_BASE 0x0800 @@ -182,6 +183,7 @@ struct sunxi_nand_hw_ecc { * * @node: used to store NAND chips into a list * @nand: base NAND chip structure + * @ecc: ECC controller structure * @clk_rate: clk_rate required for this NAND chip * @timing_cfg: TIMING_CFG register value for this NAND chip * @timing_ctl: TIMING_CTL register value for this NAND chip @@ -191,6 +193,7 @@ struct sunxi_nand_hw_ecc { struct sunxi_nand_chip { struct list_head node; struct nand_chip nand; + struct sunxi_nand_hw_ecc *ecc; unsigned long clk_rate; u32 timing_cfg; u32 timing_ctl; @@ -207,13 +210,13 @@ static inline struct sunxi_nand_chip *to_sunxi_nand(struct nand_chip *nand) * NAND Controller capabilities structure: stores NAND controller capabilities * for distinction between compatible strings. * - * @extra_mbus_conf: Contrary to A10, A10s and A13, accessing internal RAM + * @has_mdma: Use mbus dma mode, otherwise general dma * through MBUS on A23/A33 needs extra configuration. * @reg_io_data: I/O data register * @dma_maxburst: DMA maxburst */ struct sunxi_nfc_caps { - bool extra_mbus_conf; + bool has_mdma; unsigned int reg_io_data; unsigned int dma_maxburst; }; @@ -233,6 +236,7 @@ struct sunxi_nfc_caps { * controller * @complete: a completion object used to wait for NAND controller events * @dmac: the DMA channel attached to the NAND controller + * @caps: NAND Controller capabilities */ struct sunxi_nfc { struct nand_controller controller; @@ -363,24 +367,31 @@ static int sunxi_nfc_dma_op_prepare(struct sunxi_nfc *nfc, const void *buf, if (!ret) return -ENOMEM; - dmad = dmaengine_prep_slave_sg(nfc->dmac, sg, 1, tdir, DMA_CTRL_ACK); - if (!dmad) { - ret = -EINVAL; - goto err_unmap_buf; + if (!nfc->caps->has_mdma) { + dmad = dmaengine_prep_slave_sg(nfc->dmac, sg, 1, tdir, DMA_CTRL_ACK); + if (!dmad) { + ret = -EINVAL; + goto err_unmap_buf; + } } writel(readl(nfc->regs + NFC_REG_CTL) | NFC_RAM_METHOD, nfc->regs + NFC_REG_CTL); writel(nchunks, nfc->regs + NFC_REG_SECTOR_NUM); writel(chunksize, nfc->regs + NFC_REG_CNT); - if (nfc->caps->extra_mbus_conf) - writel(chunksize * nchunks, nfc->regs + NFC_REG_MDMA_CNT); - dmat = dmaengine_submit(dmad); + if (nfc->caps->has_mdma) { + writel(readl(nfc->regs + NFC_REG_CTL) & ~NFC_DMA_TYPE_NORMAL, + nfc->regs + NFC_REG_CTL); + writel(chunksize * nchunks, nfc->regs + NFC_REG_MDMA_CNT); + writel(sg_dma_address(sg), nfc->regs + NFC_REG_MDMA_ADDR); + } else { + dmat = dmaengine_submit(dmad); - ret = dma_submit_error(dmat); - if (ret) - goto err_clr_dma_flag; + ret = dma_submit_error(dmat); + if (ret) + goto err_clr_dma_flag; + } return 0; @@ -676,15 +687,15 @@ static void sunxi_nfc_randomizer_read_buf(struct nand_chip *nand, uint8_t *buf, static void sunxi_nfc_hw_ecc_enable(struct nand_chip *nand) { + struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand); struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); - struct sunxi_nand_hw_ecc *data = nand->ecc.priv; u32 ecc_ctl; ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL); ecc_ctl &= ~(NFC_ECC_MODE_MSK | NFC_ECC_PIPELINE | NFC_ECC_BLOCK_SIZE_MSK); - ecc_ctl |= NFC_ECC_EN | NFC_ECC_MODE(data->mode) | NFC_ECC_EXCEPTION | - NFC_ECC_PIPELINE; + ecc_ctl |= NFC_ECC_EN | NFC_ECC_MODE(sunxi_nand->ecc->mode) | + NFC_ECC_EXCEPTION | NFC_ECC_PIPELINE; if (nand->ecc.size == 512) ecc_ctl |= NFC_ECC_BLOCK_512; @@ -911,7 +922,7 @@ static int sunxi_nfc_hw_ecc_read_chunks_dma(struct nand_chip *nand, uint8_t *buf unsigned int max_bitflips = 0; int ret, i, raw_mode = 0; struct scatterlist sg; - u32 status; + u32 status, wait; ret = sunxi_nfc_wait_cmd_fifo_empty(nfc); if (ret) @@ -929,13 +940,18 @@ static int sunxi_nfc_hw_ecc_read_chunks_dma(struct nand_chip *nand, uint8_t *buf writel((NAND_CMD_RNDOUTSTART << 16) | (NAND_CMD_RNDOUT << 8) | NAND_CMD_READSTART, nfc->regs + NFC_REG_RCMD_SET); - dma_async_issue_pending(nfc->dmac); + wait = NFC_CMD_INT_FLAG; + + if (nfc->caps->has_mdma) + wait |= NFC_DMA_INT_FLAG; + else + dma_async_issue_pending(nfc->dmac); writel(NFC_PAGE_OP | NFC_DATA_SWAP_METHOD | NFC_DATA_TRANS, nfc->regs + NFC_REG_CMD); - ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, false, 0); - if (ret) + ret = sunxi_nfc_wait_events(nfc, wait, false, 0); + if (ret && !nfc->caps->has_mdma) dmaengine_terminate_all(nfc->dmac); sunxi_nfc_randomizer_disable(nand); @@ -1276,6 +1292,7 @@ static int sunxi_nfc_hw_ecc_write_page_dma(struct nand_chip *nand, struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); struct nand_ecc_ctrl *ecc = &nand->ecc; struct scatterlist sg; + u32 wait; int ret, i; sunxi_nfc_select_chip(nand, nand->cur_cs); @@ -1304,14 +1321,19 @@ static int sunxi_nfc_hw_ecc_write_page_dma(struct nand_chip *nand, writel((NAND_CMD_RNDIN << 8) | NAND_CMD_PAGEPROG, nfc->regs + NFC_REG_WCMD_SET); - dma_async_issue_pending(nfc->dmac); + wait = NFC_CMD_INT_FLAG; + + if (nfc->caps->has_mdma) + wait |= NFC_DMA_INT_FLAG; + else + dma_async_issue_pending(nfc->dmac); writel(NFC_PAGE_OP | NFC_DATA_SWAP_METHOD | NFC_DATA_TRANS | NFC_ACCESS_DIR, nfc->regs + NFC_REG_CMD); - ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, false, 0); - if (ret) + ret = sunxi_nfc_wait_events(nfc, wait, false, 0); + if (ret && !nfc->caps->has_mdma) dmaengine_terminate_all(nfc->dmac); sunxi_nfc_randomizer_disable(nand); @@ -1597,9 +1619,9 @@ static const struct mtd_ooblayout_ops sunxi_nand_ooblayout_ops = { .free = sunxi_nand_ooblayout_free, }; -static void sunxi_nand_hw_ecc_ctrl_cleanup(struct nand_ecc_ctrl *ecc) +static void sunxi_nand_hw_ecc_ctrl_cleanup(struct sunxi_nand_chip *sunxi_nand) { - kfree(ecc->priv); + kfree(sunxi_nand->ecc); } static int sunxi_nand_hw_ecc_ctrl_init(struct nand_chip *nand, @@ -1607,10 +1629,10 @@ static int sunxi_nand_hw_ecc_ctrl_init(struct nand_chip *nand, struct device_node *np) { static const u8 strengths[] = { 16, 24, 28, 32, 40, 48, 56, 60, 64 }; + struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand); struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); struct mtd_info *mtd = nand_to_mtd(nand); struct nand_device *nanddev = mtd_to_nanddev(mtd); - struct sunxi_nand_hw_ecc *data; int nsectors; int ret; int i; @@ -1647,8 +1669,8 @@ static int sunxi_nand_hw_ecc_ctrl_init(struct nand_chip *nand, if (ecc->size != 512 && ecc->size != 1024) return -EINVAL; - data = kzalloc(sizeof(*data), GFP_KERNEL); - if (!data) + sunxi_nand->ecc = kzalloc(sizeof(*sunxi_nand->ecc), GFP_KERNEL); + if (!sunxi_nand->ecc) return -ENOMEM; /* Prefer 1k ECC chunk over 512 ones */ @@ -1675,7 +1697,7 @@ static int sunxi_nand_hw_ecc_ctrl_init(struct nand_chip *nand, goto err; } - data->mode = i; + sunxi_nand->ecc->mode = i; /* HW ECC always request ECC bytes for 1024 bytes blocks */ ecc->bytes = DIV_ROUND_UP(ecc->strength * fls(8 * 1024), 8); @@ -1693,9 +1715,8 @@ static int sunxi_nand_hw_ecc_ctrl_init(struct nand_chip *nand, ecc->read_oob = sunxi_nfc_hw_ecc_read_oob; ecc->write_oob = sunxi_nfc_hw_ecc_write_oob; mtd_set_ooblayout(mtd, &sunxi_nand_ooblayout_ops); - ecc->priv = data; - if (nfc->dmac) { + if (nfc->dmac || nfc->caps->has_mdma) { ecc->read_page = sunxi_nfc_hw_ecc_read_page_dma; ecc->read_subpage = sunxi_nfc_hw_ecc_read_subpage_dma; ecc->write_page = sunxi_nfc_hw_ecc_write_page_dma; @@ -1714,16 +1735,18 @@ static int sunxi_nand_hw_ecc_ctrl_init(struct nand_chip *nand, return 0; err: - kfree(data); + kfree(sunxi_nand->ecc); return ret; } -static void sunxi_nand_ecc_cleanup(struct nand_ecc_ctrl *ecc) +static void sunxi_nand_ecc_cleanup(struct sunxi_nand_chip *sunxi_nand) { + struct nand_ecc_ctrl *ecc = &sunxi_nand->nand.ecc; + switch (ecc->engine_type) { case NAND_ECC_ENGINE_TYPE_ON_HOST: - sunxi_nand_hw_ecc_ctrl_cleanup(ecc); + sunxi_nand_hw_ecc_ctrl_cleanup(sunxi_nand); break; case NAND_ECC_ENGINE_TYPE_NONE: default: @@ -2053,11 +2076,41 @@ static void sunxi_nand_chips_cleanup(struct sunxi_nfc *nfc) ret = mtd_device_unregister(nand_to_mtd(chip)); WARN_ON(ret); nand_cleanup(chip); - sunxi_nand_ecc_cleanup(&chip->ecc); + sunxi_nand_ecc_cleanup(sunxi_nand); list_del(&sunxi_nand->node); } } +static int sunxi_nfc_dma_init(struct sunxi_nfc *nfc, struct resource *r) +{ + int ret; + + if (nfc->caps->has_mdma) + return 0; + + nfc->dmac = dma_request_chan(nfc->dev, "rxtx"); + if (IS_ERR(nfc->dmac)) { + ret = PTR_ERR(nfc->dmac); + if (ret == -EPROBE_DEFER) + return ret; + + /* Ignore errors to fall back to PIO mode */ + dev_warn(nfc->dev, "failed to request rxtx DMA channel: %d\n", ret); + nfc->dmac = NULL; + } else { + struct dma_slave_config dmac_cfg = { }; + + dmac_cfg.src_addr = r->start + nfc->caps->reg_io_data; + dmac_cfg.dst_addr = dmac_cfg.src_addr; + dmac_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + dmac_cfg.dst_addr_width = dmac_cfg.src_addr_width; + dmac_cfg.src_maxburst = nfc->caps->dma_maxburst; + dmac_cfg.dst_maxburst = nfc->caps->dma_maxburst; + dmaengine_slave_config(nfc->dmac, &dmac_cfg); + } + return 0; +} + static int sunxi_nfc_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -2132,30 +2185,10 @@ static int sunxi_nfc_probe(struct platform_device *pdev) if (ret) goto out_ahb_reset_reassert; - nfc->dmac = dma_request_chan(dev, "rxtx"); - if (IS_ERR(nfc->dmac)) { - ret = PTR_ERR(nfc->dmac); - if (ret == -EPROBE_DEFER) - goto out_ahb_reset_reassert; - - /* Ignore errors to fall back to PIO mode */ - dev_warn(dev, "failed to request rxtx DMA channel: %d\n", ret); - nfc->dmac = NULL; - } else { - struct dma_slave_config dmac_cfg = { }; - - dmac_cfg.src_addr = r->start + nfc->caps->reg_io_data; - dmac_cfg.dst_addr = dmac_cfg.src_addr; - dmac_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; - dmac_cfg.dst_addr_width = dmac_cfg.src_addr_width; - dmac_cfg.src_maxburst = nfc->caps->dma_maxburst; - dmac_cfg.dst_maxburst = nfc->caps->dma_maxburst; - dmaengine_slave_config(nfc->dmac, &dmac_cfg); + ret = sunxi_nfc_dma_init(nfc, r); - if (nfc->caps->extra_mbus_conf) - writel(readl(nfc->regs + NFC_REG_CTL) | - NFC_DMA_TYPE_NORMAL, nfc->regs + NFC_REG_CTL); - } + if (ret) + goto out_ahb_reset_reassert; platform_set_drvdata(pdev, nfc); @@ -2202,7 +2235,7 @@ static const struct sunxi_nfc_caps sunxi_nfc_a10_caps = { }; static const struct sunxi_nfc_caps sunxi_nfc_a23_caps = { - .extra_mbus_conf = true, + .has_mdma = true, .reg_io_data = NFC_REG_A23_IO_DATA, .dma_maxburst = 8, }; diff --git a/drivers/mtd/nand/raw/tmio_nand.c b/drivers/mtd/nand/raw/tmio_nand.c index aa6c7e7bbf1b..de8e919d0ebe 100644 --- a/drivers/mtd/nand/raw/tmio_nand.c +++ b/drivers/mtd/nand/raw/tmio_nand.c @@ -35,7 +35,6 @@ #include <linux/ioport.h> #include <linux/mtd/mtd.h> #include <linux/mtd/rawnand.h> -#include <linux/mtd/nand_ecc.h> #include <linux/mtd/partitions.h> #include <linux/slab.h> @@ -293,11 +292,11 @@ static int tmio_nand_correct_data(struct nand_chip *chip, unsigned char *buf, int r0, r1; /* assume ecc.size = 512 and ecc.bytes = 6 */ - r0 = __nand_correct_data(buf, read_ecc, calc_ecc, 256, false); + r0 = rawnand_sw_hamming_correct(chip, buf, read_ecc, calc_ecc); if (r0 < 0) return r0; - r1 = __nand_correct_data(buf + 256, read_ecc + 3, calc_ecc + 3, 256, - false); + r1 = rawnand_sw_hamming_correct(chip, buf + 256, read_ecc + 3, + calc_ecc + 3); if (r1 < 0) return r1; return r0 + r1; diff --git a/drivers/mtd/nand/raw/txx9ndfmc.c b/drivers/mtd/nand/raw/txx9ndfmc.c index fe8ed2441588..1a9449e53bf9 100644 --- a/drivers/mtd/nand/raw/txx9ndfmc.c +++ b/drivers/mtd/nand/raw/txx9ndfmc.c @@ -14,7 +14,6 @@ #include <linux/delay.h> #include <linux/mtd/mtd.h> #include <linux/mtd/rawnand.h> -#include <linux/mtd/nand_ecc.h> #include <linux/mtd/partitions.h> #include <linux/io.h> #include <linux/platform_data/txx9/ndfmc.h> @@ -194,8 +193,8 @@ static int txx9ndfmc_correct_data(struct nand_chip *chip, unsigned char *buf, int stat; for (eccsize = chip->ecc.size; eccsize > 0; eccsize -= 256) { - stat = __nand_correct_data(buf, read_ecc, calc_ecc, 256, - false); + stat = rawnand_sw_hamming_correct(chip, buf, read_ecc, + calc_ecc); if (stat < 0) return stat; corrected += stat; diff --git a/drivers/mtd/nand/spi/Kconfig b/drivers/mtd/nand/spi/Kconfig index da89b250df7c..3d7649a2dd72 100644 --- a/drivers/mtd/nand/spi/Kconfig +++ b/drivers/mtd/nand/spi/Kconfig @@ -2,6 +2,7 @@ menuconfig MTD_SPI_NAND tristate "SPI NAND device Support" select MTD_NAND_CORE + select MTD_NAND_ECC depends on SPI_MASTER select SPI_MEM help diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c index c35221794645..8ea545bb924d 100644 --- a/drivers/mtd/nand/spi/core.c +++ b/drivers/mtd/nand/spi/core.c @@ -193,6 +193,135 @@ static int spinand_ecc_enable(struct spinand_device *spinand, enable ? CFG_ECC_ENABLE : 0); } +static int spinand_check_ecc_status(struct spinand_device *spinand, u8 status) +{ + struct nand_device *nand = spinand_to_nand(spinand); + + if (spinand->eccinfo.get_status) + return spinand->eccinfo.get_status(spinand, status); + + switch (status & STATUS_ECC_MASK) { + case STATUS_ECC_NO_BITFLIPS: + return 0; + + case STATUS_ECC_HAS_BITFLIPS: + /* + * We have no way to know exactly how many bitflips have been + * fixed, so let's return the maximum possible value so that + * wear-leveling layers move the data immediately. + */ + return nanddev_get_ecc_conf(nand)->strength; + + case STATUS_ECC_UNCOR_ERROR: + return -EBADMSG; + + default: + break; + } + + return -EINVAL; +} + +static int spinand_noecc_ooblayout_ecc(struct mtd_info *mtd, int section, + struct mtd_oob_region *region) +{ + return -ERANGE; +} + +static int spinand_noecc_ooblayout_free(struct mtd_info *mtd, int section, + struct mtd_oob_region *region) +{ + if (section) + return -ERANGE; + + /* Reserve 2 bytes for the BBM. */ + region->offset = 2; + region->length = 62; + + return 0; +} + +static const struct mtd_ooblayout_ops spinand_noecc_ooblayout = { + .ecc = spinand_noecc_ooblayout_ecc, + .free = spinand_noecc_ooblayout_free, +}; + +static int spinand_ondie_ecc_init_ctx(struct nand_device *nand) +{ + struct spinand_device *spinand = nand_to_spinand(nand); + struct mtd_info *mtd = nanddev_to_mtd(nand); + struct spinand_ondie_ecc_conf *engine_conf; + + nand->ecc.ctx.conf.engine_type = NAND_ECC_ENGINE_TYPE_ON_DIE; + nand->ecc.ctx.conf.step_size = nand->ecc.requirements.step_size; + nand->ecc.ctx.conf.strength = nand->ecc.requirements.strength; + + engine_conf = kzalloc(sizeof(*engine_conf), GFP_KERNEL); + if (!engine_conf) + return -ENOMEM; + + nand->ecc.ctx.priv = engine_conf; + + if (spinand->eccinfo.ooblayout) + mtd_set_ooblayout(mtd, spinand->eccinfo.ooblayout); + else + mtd_set_ooblayout(mtd, &spinand_noecc_ooblayout); + + return 0; +} + +static void spinand_ondie_ecc_cleanup_ctx(struct nand_device *nand) +{ + kfree(nand->ecc.ctx.priv); +} + +static int spinand_ondie_ecc_prepare_io_req(struct nand_device *nand, + struct nand_page_io_req *req) +{ + struct spinand_device *spinand = nand_to_spinand(nand); + bool enable = (req->mode != MTD_OPS_RAW); + + /* Only enable or disable the engine */ + return spinand_ecc_enable(spinand, enable); +} + +static int spinand_ondie_ecc_finish_io_req(struct nand_device *nand, + struct nand_page_io_req *req) +{ + struct spinand_ondie_ecc_conf *engine_conf = nand->ecc.ctx.priv; + struct spinand_device *spinand = nand_to_spinand(nand); + + if (req->mode == MTD_OPS_RAW) + return 0; + + /* Nothing to do when finishing a page write */ + if (req->type == NAND_PAGE_WRITE) + return 0; + + /* Finish a page write: check the status, report errors/bitflips */ + return spinand_check_ecc_status(spinand, engine_conf->status); +} + +static struct nand_ecc_engine_ops spinand_ondie_ecc_engine_ops = { + .init_ctx = spinand_ondie_ecc_init_ctx, + .cleanup_ctx = spinand_ondie_ecc_cleanup_ctx, + .prepare_io_req = spinand_ondie_ecc_prepare_io_req, + .finish_io_req = spinand_ondie_ecc_finish_io_req, +}; + +static struct nand_ecc_engine spinand_ondie_ecc_engine = { + .ops = &spinand_ondie_ecc_engine_ops, +}; + +static void spinand_ondie_ecc_save_status(struct nand_device *nand, u8 status) +{ + struct spinand_ondie_ecc_conf *engine_conf = nand->ecc.ctx.priv; + + if (nand->ecc.ctx.conf.engine_type == NAND_ECC_ENGINE_TYPE_ON_DIE && + engine_conf) + engine_conf->status = status; +} + static int spinand_write_enable_op(struct spinand_device *spinand) { struct spi_mem_op op = SPINAND_WR_EN_DIS_OP(true); @@ -214,7 +343,6 @@ static int spinand_read_from_cache_op(struct spinand_device *spinand, const struct nand_page_io_req *req) { struct nand_device *nand = spinand_to_nand(spinand); - struct mtd_info *mtd = nanddev_to_mtd(nand); struct spi_mem_dirmap_desc *rdesc; unsigned int nbytes = 0; void *buf = NULL; @@ -254,16 +382,9 @@ static int spinand_read_from_cache_op(struct spinand_device *spinand, memcpy(req->databuf.in, spinand->databuf + req->dataoffs, req->datalen); - if (req->ooblen) { - if (req->mode == MTD_OPS_AUTO_OOB) - mtd_ooblayout_get_databytes(mtd, req->oobbuf.in, - spinand->oobbuf, - req->ooboffs, - req->ooblen); - else - memcpy(req->oobbuf.in, spinand->oobbuf + req->ooboffs, - req->ooblen); - } + if (req->ooblen) + memcpy(req->oobbuf.in, spinand->oobbuf + req->ooboffs, + req->ooblen); return 0; } @@ -272,7 +393,7 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand, const struct nand_page_io_req *req) { struct nand_device *nand = spinand_to_nand(spinand); - struct mtd_info *mtd = nanddev_to_mtd(nand); + struct mtd_info *mtd = spinand_to_mtd(spinand); struct spi_mem_dirmap_desc *wdesc; unsigned int nbytes, column = 0; void *buf = spinand->databuf; @@ -284,9 +405,12 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand, * must fill the page cache entirely even if we only want to program * the data portion of the page, otherwise we might corrupt the BBM or * user data previously programmed in OOB area. + * + * Only reset the data buffer manually, the OOB buffer is prepared by + * ECC engines ->prepare_io_req() callback. */ nbytes = nanddev_page_size(nand) + nanddev_per_page_oobsize(nand); - memset(spinand->databuf, 0xff, nbytes); + memset(spinand->databuf, 0xff, nanddev_page_size(nand)); if (req->datalen) memcpy(spinand->databuf + req->dataoffs, req->databuf.out, @@ -402,42 +526,17 @@ static int spinand_lock_block(struct spinand_device *spinand, u8 lock) return spinand_write_reg_op(spinand, REG_BLOCK_LOCK, lock); } -static int spinand_check_ecc_status(struct spinand_device *spinand, u8 status) -{ - struct nand_device *nand = spinand_to_nand(spinand); - - if (spinand->eccinfo.get_status) - return spinand->eccinfo.get_status(spinand, status); - - switch (status & STATUS_ECC_MASK) { - case STATUS_ECC_NO_BITFLIPS: - return 0; - - case STATUS_ECC_HAS_BITFLIPS: - /* - * We have no way to know exactly how many bitflips have been - * fixed, so let's return the maximum possible value so that - * wear-leveling layers move the data immediately. - */ - return nanddev_get_ecc_conf(nand)->strength; - - case STATUS_ECC_UNCOR_ERROR: - return -EBADMSG; - - default: - break; - } - - return -EINVAL; -} - static int spinand_read_page(struct spinand_device *spinand, - const struct nand_page_io_req *req, - bool ecc_enabled) + const struct nand_page_io_req *req) { + struct nand_device *nand = spinand_to_nand(spinand); u8 status; int ret; + ret = nand_ecc_prepare_io_req(nand, (struct nand_page_io_req *)req); + if (ret) + return ret; + ret = spinand_load_page_op(spinand, req); if (ret) return ret; @@ -446,22 +545,26 @@ static int spinand_read_page(struct spinand_device *spinand, if (ret < 0) return ret; + spinand_ondie_ecc_save_status(nand, status); + ret = spinand_read_from_cache_op(spinand, req); if (ret) return ret; - if (!ecc_enabled) - return 0; - - return spinand_check_ecc_status(spinand, status); + return nand_ecc_finish_io_req(nand, (struct nand_page_io_req *)req); } static int spinand_write_page(struct spinand_device *spinand, const struct nand_page_io_req *req) { + struct nand_device *nand = spinand_to_nand(spinand); u8 status; int ret; + ret = nand_ecc_prepare_io_req(nand, (struct nand_page_io_req *)req); + if (ret) + return ret; + ret = spinand_write_enable_op(spinand); if (ret) return ret; @@ -476,9 +579,9 @@ static int spinand_write_page(struct spinand_device *spinand, ret = spinand_wait(spinand, &status); if (!ret && (status & STATUS_PROG_FAILED)) - ret = -EIO; + return -EIO; - return ret; + return nand_ecc_finish_io_req(nand, (struct nand_page_io_req *)req); } static int spinand_mtd_read(struct mtd_info *mtd, loff_t from, @@ -488,25 +591,24 @@ static int spinand_mtd_read(struct mtd_info *mtd, loff_t from, struct nand_device *nand = mtd_to_nanddev(mtd); unsigned int max_bitflips = 0; struct nand_io_iter iter; - bool enable_ecc = false; + bool disable_ecc = false; bool ecc_failed = false; int ret = 0; - if (ops->mode != MTD_OPS_RAW && spinand->eccinfo.ooblayout) - enable_ecc = true; + if (ops->mode == MTD_OPS_RAW || !spinand->eccinfo.ooblayout) + disable_ecc = true; mutex_lock(&spinand->lock); nanddev_io_for_each_page(nand, NAND_PAGE_READ, from, ops, &iter) { - ret = spinand_select_target(spinand, iter.req.pos.target); - if (ret) - break; + if (disable_ecc) + iter.req.mode = MTD_OPS_RAW; - ret = spinand_ecc_enable(spinand, enable_ecc); + ret = spinand_select_target(spinand, iter.req.pos.target); if (ret) break; - ret = spinand_read_page(spinand, &iter.req, enable_ecc); + ret = spinand_read_page(spinand, &iter.req); if (ret < 0 && ret != -EBADMSG) break; @@ -537,20 +639,19 @@ static int spinand_mtd_write(struct mtd_info *mtd, loff_t to, struct spinand_device *spinand = mtd_to_spinand(mtd); struct nand_device *nand = mtd_to_nanddev(mtd); struct nand_io_iter iter; - bool enable_ecc = false; + bool disable_ecc = false; int ret = 0; - if (ops->mode != MTD_OPS_RAW && mtd->ooblayout) - enable_ecc = true; + if (ops->mode == MTD_OPS_RAW || !mtd->ooblayout) + disable_ecc = true; mutex_lock(&spinand->lock); nanddev_io_for_each_page(nand, NAND_PAGE_WRITE, to, ops, &iter) { - ret = spinand_select_target(spinand, iter.req.pos.target); - if (ret) - break; + if (disable_ecc) + iter.req.mode = MTD_OPS_RAW; - ret = spinand_ecc_enable(spinand, enable_ecc); + ret = spinand_select_target(spinand, iter.req.pos.target); if (ret) break; @@ -580,7 +681,7 @@ static bool spinand_isbad(struct nand_device *nand, const struct nand_pos *pos) }; spinand_select_target(spinand, pos->target); - spinand_read_page(spinand, &req, false); + spinand_read_page(spinand, &req); if (marker[0] != 0xff || marker[1] != 0xff) return true; @@ -965,30 +1066,6 @@ static int spinand_detect(struct spinand_device *spinand) return 0; } -static int spinand_noecc_ooblayout_ecc(struct mtd_info *mtd, int section, - struct mtd_oob_region *region) -{ - return -ERANGE; -} - -static int spinand_noecc_ooblayout_free(struct mtd_info *mtd, int section, - struct mtd_oob_region *region) -{ - if (section) - return -ERANGE; - - /* Reserve 2 bytes for the BBM. */ - region->offset = 2; - region->length = 62; - - return 0; -} - -static const struct mtd_ooblayout_ops spinand_noecc_ooblayout = { - .ecc = spinand_noecc_ooblayout_ecc, - .free = spinand_noecc_ooblayout_free, -}; - static int spinand_init(struct spinand_device *spinand) { struct device *dev = &spinand->spimem->spi->dev; @@ -1066,10 +1143,15 @@ static int spinand_init(struct spinand_device *spinand) if (ret) goto err_manuf_cleanup; - /* - * Right now, we don't support ECC, so let the whole oob - * area is available for user. - */ + /* SPI-NAND default ECC engine is on-die */ + nand->ecc.defaults.engine_type = NAND_ECC_ENGINE_TYPE_ON_DIE; + nand->ecc.ondie_engine = &spinand_ondie_ecc_engine; + + spinand_ecc_enable(spinand, false); + ret = nanddev_ecc_engine_init(nand); + if (ret) + goto err_cleanup_nanddev; + mtd->_read_oob = spinand_mtd_read; mtd->_write_oob = spinand_mtd_write; mtd->_block_isbad = spinand_mtd_block_isbad; @@ -1078,14 +1160,11 @@ static int spinand_init(struct spinand_device *spinand) mtd->_erase = spinand_mtd_erase; mtd->_max_bad_blocks = nanddev_mtd_max_bad_blocks; - if (spinand->eccinfo.ooblayout) - mtd_set_ooblayout(mtd, spinand->eccinfo.ooblayout); - else - mtd_set_ooblayout(mtd, &spinand_noecc_ooblayout); - - ret = mtd_ooblayout_count_freebytes(mtd); - if (ret < 0) - goto err_cleanup_nanddev; + if (nand->ecc.engine) { + ret = mtd_ooblayout_count_freebytes(mtd); + if (ret < 0) + goto err_cleanup_ecc_engine; + } mtd->oobavail = ret; @@ -1095,6 +1174,9 @@ static int spinand_init(struct spinand_device *spinand) return 0; +err_cleanup_ecc_engine: + nanddev_ecc_engine_cleanup(nand); + err_cleanup_nanddev: nanddev_cleanup(nand); diff --git a/drivers/mtd/nand/spi/macronix.c b/drivers/mtd/nand/spi/macronix.c index 8e801e4c3a00..6701aaa21a49 100644 --- a/drivers/mtd/nand/spi/macronix.c +++ b/drivers/mtd/nand/spi/macronix.c @@ -119,6 +119,53 @@ static const struct spinand_info macronix_spinand_table[] = { &update_cache_variants), SPINAND_HAS_QE_BIT, SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)), + SPINAND_INFO("MX35LF2GE4AD", + SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x26), + NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 1, 1, 1), + NAND_ECCREQ(8, 512), + SPINAND_INFO_OP_VARIANTS(&read_cache_variants, + &write_cache_variants, + &update_cache_variants), + 0, + SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, + mx35lf1ge4ab_ecc_get_status)), + SPINAND_INFO("MX35LF4GE4AD", + SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x37), + NAND_MEMORG(1, 4096, 128, 64, 2048, 40, 1, 1, 1), + NAND_ECCREQ(8, 512), + SPINAND_INFO_OP_VARIANTS(&read_cache_variants, + &write_cache_variants, + &update_cache_variants), + 0, + SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, + mx35lf1ge4ab_ecc_get_status)), + SPINAND_INFO("MX35LF1G24AD", + SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x14), + NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1), + NAND_ECCREQ(8, 512), + SPINAND_INFO_OP_VARIANTS(&read_cache_variants, + &write_cache_variants, + &update_cache_variants), + 0, + SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)), + SPINAND_INFO("MX35LF2G24AD", + SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x24), + NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1), + NAND_ECCREQ(8, 512), + SPINAND_INFO_OP_VARIANTS(&read_cache_variants, + &write_cache_variants, + &update_cache_variants), + 0, + SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)), + SPINAND_INFO("MX35LF4G24AD", + SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x35), + NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 2, 1, 1), + NAND_ECCREQ(8, 512), + SPINAND_INFO_OP_VARIANTS(&read_cache_variants, + &write_cache_variants, + &update_cache_variants), + 0, + SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)), SPINAND_INFO("MX31LF1GE4BC", SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x1e), NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1), diff --git a/drivers/mtd/nand/spi/micron.c b/drivers/mtd/nand/spi/micron.c index 5d370cfcdaaa..50b7295bc922 100644 --- a/drivers/mtd/nand/spi/micron.c +++ b/drivers/mtd/nand/spi/micron.c @@ -28,7 +28,7 @@ #define MICRON_SELECT_DIE(x) ((x) << 6) -static SPINAND_OP_VARIANTS(read_cache_variants, +static SPINAND_OP_VARIANTS(quadio_read_cache_variants, SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0), SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0), @@ -36,14 +36,27 @@ static SPINAND_OP_VARIANTS(read_cache_variants, SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0), SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0)); -static SPINAND_OP_VARIANTS(write_cache_variants, +static SPINAND_OP_VARIANTS(x4_write_cache_variants, SPINAND_PROG_LOAD_X4(true, 0, NULL, 0), SPINAND_PROG_LOAD(true, 0, NULL, 0)); -static SPINAND_OP_VARIANTS(update_cache_variants, +static SPINAND_OP_VARIANTS(x4_update_cache_variants, SPINAND_PROG_LOAD_X4(false, 0, NULL, 0), SPINAND_PROG_LOAD(false, 0, NULL, 0)); +/* Micron MT29F2G01AAAED Device */ +static SPINAND_OP_VARIANTS(x4_read_cache_variants, + SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0)); + +static SPINAND_OP_VARIANTS(x1_write_cache_variants, + SPINAND_PROG_LOAD(true, 0, NULL, 0)); + +static SPINAND_OP_VARIANTS(x1_update_cache_variants, + SPINAND_PROG_LOAD(false, 0, NULL, 0)); + static int micron_8_ooblayout_ecc(struct mtd_info *mtd, int section, struct mtd_oob_region *region) { @@ -74,6 +87,47 @@ static const struct mtd_ooblayout_ops micron_8_ooblayout = { .free = micron_8_ooblayout_free, }; +static int micron_4_ooblayout_ecc(struct mtd_info *mtd, int section, + struct mtd_oob_region *region) +{ + struct spinand_device *spinand = mtd_to_spinand(mtd); + + if (section >= spinand->base.memorg.pagesize / + mtd->ecc_step_size) + return -ERANGE; + + region->offset = (section * 16) + 8; + region->length = 8; + + return 0; +} + +static int micron_4_ooblayout_free(struct mtd_info *mtd, int section, + struct mtd_oob_region *region) +{ + struct spinand_device *spinand = mtd_to_spinand(mtd); + + if (section >= spinand->base.memorg.pagesize / + mtd->ecc_step_size) + return -ERANGE; + + if (section) { + region->offset = 16 * section; + region->length = 8; + } else { + /* section 0 has two bytes reserved for the BBM */ + region->offset = 2; + region->length = 6; + } + + return 0; +} + +static const struct mtd_ooblayout_ops micron_4_ooblayout = { + .ecc = micron_4_ooblayout_ecc, + .free = micron_4_ooblayout_free, +}; + static int micron_select_target(struct spinand_device *spinand, unsigned int target) { @@ -120,9 +174,9 @@ static const struct spinand_info micron_spinand_table[] = { SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x24), NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 2, 1, 1), NAND_ECCREQ(8, 512), - SPINAND_INFO_OP_VARIANTS(&read_cache_variants, - &write_cache_variants, - &update_cache_variants), + SPINAND_INFO_OP_VARIANTS(&quadio_read_cache_variants, + &x4_write_cache_variants, + &x4_update_cache_variants), 0, SPINAND_ECCINFO(µn_8_ooblayout, micron_8_ecc_get_status)), @@ -131,9 +185,9 @@ static const struct spinand_info micron_spinand_table[] = { SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x25), NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 2, 1, 1), NAND_ECCREQ(8, 512), - SPINAND_INFO_OP_VARIANTS(&read_cache_variants, - &write_cache_variants, - &update_cache_variants), + SPINAND_INFO_OP_VARIANTS(&quadio_read_cache_variants, + &x4_write_cache_variants, + &x4_update_cache_variants), 0, SPINAND_ECCINFO(µn_8_ooblayout, micron_8_ecc_get_status)), @@ -142,9 +196,9 @@ static const struct spinand_info micron_spinand_table[] = { SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x14), NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1), NAND_ECCREQ(8, 512), - SPINAND_INFO_OP_VARIANTS(&read_cache_variants, - &write_cache_variants, - &update_cache_variants), + SPINAND_INFO_OP_VARIANTS(&quadio_read_cache_variants, + &x4_write_cache_variants, + &x4_update_cache_variants), 0, SPINAND_ECCINFO(µn_8_ooblayout, micron_8_ecc_get_status)), @@ -153,9 +207,9 @@ static const struct spinand_info micron_spinand_table[] = { SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x15), NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1), NAND_ECCREQ(8, 512), - SPINAND_INFO_OP_VARIANTS(&read_cache_variants, - &write_cache_variants, - &update_cache_variants), + SPINAND_INFO_OP_VARIANTS(&quadio_read_cache_variants, + &x4_write_cache_variants, + &x4_update_cache_variants), 0, SPINAND_ECCINFO(µn_8_ooblayout, micron_8_ecc_get_status)), @@ -164,9 +218,9 @@ static const struct spinand_info micron_spinand_table[] = { SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x36), NAND_MEMORG(1, 2048, 128, 64, 2048, 80, 2, 1, 2), NAND_ECCREQ(8, 512), - SPINAND_INFO_OP_VARIANTS(&read_cache_variants, - &write_cache_variants, - &update_cache_variants), + SPINAND_INFO_OP_VARIANTS(&quadio_read_cache_variants, + &x4_write_cache_variants, + &x4_update_cache_variants), 0, SPINAND_ECCINFO(µn_8_ooblayout, micron_8_ecc_get_status), @@ -176,9 +230,9 @@ static const struct spinand_info micron_spinand_table[] = { SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x34), NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1), NAND_ECCREQ(8, 512), - SPINAND_INFO_OP_VARIANTS(&read_cache_variants, - &write_cache_variants, - &update_cache_variants), + SPINAND_INFO_OP_VARIANTS(&quadio_read_cache_variants, + &x4_write_cache_variants, + &x4_update_cache_variants), SPINAND_HAS_CR_FEAT_BIT, SPINAND_ECCINFO(µn_8_ooblayout, micron_8_ecc_get_status)), @@ -187,9 +241,9 @@ static const struct spinand_info micron_spinand_table[] = { SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x35), NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1), NAND_ECCREQ(8, 512), - SPINAND_INFO_OP_VARIANTS(&read_cache_variants, - &write_cache_variants, - &update_cache_variants), + SPINAND_INFO_OP_VARIANTS(&quadio_read_cache_variants, + &x4_write_cache_variants, + &x4_update_cache_variants), SPINAND_HAS_CR_FEAT_BIT, SPINAND_ECCINFO(µn_8_ooblayout, micron_8_ecc_get_status)), @@ -198,9 +252,9 @@ static const struct spinand_info micron_spinand_table[] = { SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x46), NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 2), NAND_ECCREQ(8, 512), - SPINAND_INFO_OP_VARIANTS(&read_cache_variants, - &write_cache_variants, - &update_cache_variants), + SPINAND_INFO_OP_VARIANTS(&quadio_read_cache_variants, + &x4_write_cache_variants, + &x4_update_cache_variants), SPINAND_HAS_CR_FEAT_BIT, SPINAND_ECCINFO(µn_8_ooblayout, micron_8_ecc_get_status), @@ -210,13 +264,23 @@ static const struct spinand_info micron_spinand_table[] = { SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x47), NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 2), NAND_ECCREQ(8, 512), - SPINAND_INFO_OP_VARIANTS(&read_cache_variants, - &write_cache_variants, - &update_cache_variants), + SPINAND_INFO_OP_VARIANTS(&quadio_read_cache_variants, + &x4_write_cache_variants, + &x4_update_cache_variants), SPINAND_HAS_CR_FEAT_BIT, SPINAND_ECCINFO(µn_8_ooblayout, micron_8_ecc_get_status), SPINAND_SELECT_TARGET(micron_select_target)), + /* M69A 2Gb 3.3V */ + SPINAND_INFO("MT29F2G01AAAED", + SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x9F), + NAND_MEMORG(1, 2048, 64, 64, 2048, 80, 2, 1, 1), + NAND_ECCREQ(4, 512), + SPINAND_INFO_OP_VARIANTS(&x4_read_cache_variants, + &x1_write_cache_variants, + &x1_update_cache_variants), + 0, + SPINAND_ECCINFO(µn_4_ooblayout, NULL)), }; static int micron_spinand_init(struct spinand_device *spinand) diff --git a/drivers/mtd/nand/spi/toshiba.c b/drivers/mtd/nand/spi/toshiba.c index 21fde2875674..7380b1ebaccd 100644 --- a/drivers/mtd/nand/spi/toshiba.c +++ b/drivers/mtd/nand/spi/toshiba.c @@ -28,7 +28,7 @@ static SPINAND_OP_VARIANTS(update_cache_x4_variants, SPINAND_PROG_LOAD_X4(false, 0, NULL, 0), SPINAND_PROG_LOAD(false, 0, NULL, 0)); -/** +/* * Backward compatibility for 1st generation Serial NAND devices * which don't support Quad Program Load operation. */ diff --git a/drivers/mtd/parsers/cmdlinepart.c b/drivers/mtd/parsers/cmdlinepart.c index a79e4d866b08..0ddff1a4b51f 100644 --- a/drivers/mtd/parsers/cmdlinepart.c +++ b/drivers/mtd/parsers/cmdlinepart.c @@ -226,7 +226,7 @@ static int mtdpart_setup_real(char *s) struct cmdline_mtd_partition *this_mtd; struct mtd_partition *parts; int mtd_id_len, num_parts; - char *p, *mtd_id, *semicol; + char *p, *mtd_id, *semicol, *open_parenth; /* * Replace the first ';' by a NULL char so strrchr can work @@ -236,6 +236,14 @@ static int mtdpart_setup_real(char *s) if (semicol) *semicol = '\0'; + /* + * make sure that part-names with ":" will not be handled as + * part of the mtd-id with an ":" + */ + open_parenth = strchr(s, '('); + if (open_parenth) + *open_parenth = '\0'; + mtd_id = s; /* @@ -245,6 +253,10 @@ static int mtdpart_setup_real(char *s) */ p = strrchr(s, ':'); + /* Restore the '(' now. */ + if (open_parenth) + *open_parenth = '('; + /* Restore the ';' now. */ if (semicol) *semicol = ';'; diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c index b9f272408c4d..4d1ae25507ab 100644 --- a/drivers/mtd/sm_ftl.c +++ b/drivers/mtd/sm_ftl.c @@ -13,7 +13,7 @@ #include <linux/sysfs.h> #include <linux/bitops.h> #include <linux/slab.h> -#include <linux/mtd/nand_ecc.h> +#include <linux/mtd/nand-ecc-sw-hamming.h> #include "nand/raw/sm_common.h" #include "sm_ftl.h" @@ -216,20 +216,19 @@ static void sm_break_offset(struct sm_ftl *ftl, loff_t loffset, static int sm_correct_sector(uint8_t *buffer, struct sm_oob *oob) { + bool sm_order = IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC); uint8_t ecc[3]; - __nand_calculate_ecc(buffer, SM_SMALL_PAGE, ecc, - IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC)); - if (__nand_correct_data(buffer, ecc, oob->ecc1, SM_SMALL_PAGE, - IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC)) < 0) + ecc_sw_hamming_calculate(buffer, SM_SMALL_PAGE, ecc, sm_order); + if (ecc_sw_hamming_correct(buffer, ecc, oob->ecc1, SM_SMALL_PAGE, + sm_order) < 0) return -EIO; buffer += SM_SMALL_PAGE; - __nand_calculate_ecc(buffer, SM_SMALL_PAGE, ecc, - IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC)); - if (__nand_correct_data(buffer, ecc, oob->ecc2, SM_SMALL_PAGE, - IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC)) < 0) + ecc_sw_hamming_calculate(buffer, SM_SMALL_PAGE, ecc, sm_order); + if (ecc_sw_hamming_correct(buffer, ecc, oob->ecc2, SM_SMALL_PAGE, + sm_order) < 0) return -EIO; return 0; } @@ -369,6 +368,7 @@ static int sm_write_block(struct sm_ftl *ftl, uint8_t *buf, int zone, int block, int lba, unsigned long invalid_bitmap) { + bool sm_order = IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC); struct sm_oob oob; int boffset; int retry = 0; @@ -395,13 +395,13 @@ restart: } if (ftl->smallpagenand) { - __nand_calculate_ecc(buf + boffset, SM_SMALL_PAGE, - oob.ecc1, - IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC)); + ecc_sw_hamming_calculate(buf + boffset, + SM_SMALL_PAGE, oob.ecc1, + sm_order); - __nand_calculate_ecc(buf + boffset + SM_SMALL_PAGE, - SM_SMALL_PAGE, oob.ecc2, - IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC)); + ecc_sw_hamming_calculate(buf + boffset + SM_SMALL_PAGE, + SM_SMALL_PAGE, oob.ecc2, + sm_order); } if (!sm_write_sector(ftl, zone, block, boffset, buf + boffset, &oob)) diff --git a/drivers/mtd/spi-nor/Kconfig b/drivers/mtd/spi-nor/Kconfig index ffc4b380f2b1..24cd25de2b8b 100644 --- a/drivers/mtd/spi-nor/Kconfig +++ b/drivers/mtd/spi-nor/Kconfig @@ -24,6 +24,50 @@ config MTD_SPI_NOR_USE_4K_SECTORS Please note that some tools/drivers/filesystems may not work with 4096 B erase size (e.g. UBIFS requires 15 KiB as a minimum). +choice + prompt "Software write protection at boot" + default MTD_SPI_NOR_SWP_DISABLE_ON_VOLATILE + +config MTD_SPI_NOR_SWP_DISABLE + bool "Disable SWP on any flashes (legacy behavior)" + help + This option disables the software write protection on any SPI + flashes at boot-up. + + Depending on the flash chip this either clears the block protection + bits or does a "Global Unprotect" command. + + Don't use this if you intent to use the software write protection + of your SPI flash. This is only to keep backwards compatibility. + +config MTD_SPI_NOR_SWP_DISABLE_ON_VOLATILE + bool "Disable SWP on flashes w/ volatile protection bits" + help + Some SPI flashes have volatile block protection bits, ie. after a + power-up or a reset the flash is software write protected by + default. + + This option disables the software write protection for these kind + of flashes while keeping it enabled for any other SPI flashes + which have non-volatile write protection bits. + + If the software write protection will be disabled depending on + the flash either the block protection bits are cleared or a + "Global Unprotect" command is issued. + + If you are unsure, select this option. + +config MTD_SPI_NOR_SWP_KEEP + bool "Keep software write protection as is" + help + If you select this option the software write protection of any + SPI flashes will not be changed. If your flash is software write + protected or will be automatically software write protected after + power-up you have to manually unlock it before you are able to + write to it. + +endchoice + source "drivers/mtd/spi-nor/controllers/Kconfig" endif # MTD_SPI_NOR diff --git a/drivers/mtd/spi-nor/atmel.c b/drivers/mtd/spi-nor/atmel.c index 3f5f21a473a6..1fea5cab492c 100644 --- a/drivers/mtd/spi-nor/atmel.c +++ b/drivers/mtd/spi-nor/atmel.c @@ -8,39 +8,192 @@ #include "core.h" +#define ATMEL_SR_GLOBAL_PROTECT_MASK GENMASK(5, 2) + +/* + * The Atmel AT25FS010/AT25FS040 parts have some weird configuration for the + * block protection bits. We don't support them. But legacy behavior in linux + * is to unlock the whole flash array on startup. Therefore, we have to support + * exactly this operation. + */ +static int atmel_at25fs_lock(struct spi_nor *nor, loff_t ofs, uint64_t len) +{ + return -EOPNOTSUPP; +} + +static int atmel_at25fs_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len) +{ + int ret; + + /* We only support unlocking the whole flash array */ + if (ofs || len != nor->params->size) + return -EINVAL; + + /* Write 0x00 to the status register to disable write protection */ + ret = spi_nor_write_sr_and_check(nor, 0); + if (ret) + dev_dbg(nor->dev, "unable to clear BP bits, WP# asserted?\n"); + + return ret; +} + +static int atmel_at25fs_is_locked(struct spi_nor *nor, loff_t ofs, uint64_t len) +{ + return -EOPNOTSUPP; +} + +static const struct spi_nor_locking_ops atmel_at25fs_locking_ops = { + .lock = atmel_at25fs_lock, + .unlock = atmel_at25fs_unlock, + .is_locked = atmel_at25fs_is_locked, +}; + +static void atmel_at25fs_default_init(struct spi_nor *nor) +{ + nor->params->locking_ops = &atmel_at25fs_locking_ops; +} + +static const struct spi_nor_fixups atmel_at25fs_fixups = { + .default_init = atmel_at25fs_default_init, +}; + +/** + * atmel_set_global_protection - Do a Global Protect or Unprotect command + * @nor: pointer to 'struct spi_nor' + * @ofs: offset in bytes + * @len: len in bytes + * @is_protect: if true do a Global Protect otherwise it is a Global Unprotect + * + * Return: 0 on success, -error otherwise. + */ +static int atmel_set_global_protection(struct spi_nor *nor, loff_t ofs, + uint64_t len, bool is_protect) +{ + int ret; + u8 sr; + + /* We only support locking the whole flash array */ + if (ofs || len != nor->params->size) + return -EINVAL; + + ret = spi_nor_read_sr(nor, nor->bouncebuf); + if (ret) + return ret; + + sr = nor->bouncebuf[0]; + + /* SRWD bit needs to be cleared, otherwise the protection doesn't change */ + if (sr & SR_SRWD) { + sr &= ~SR_SRWD; + ret = spi_nor_write_sr_and_check(nor, sr); + if (ret) { + dev_dbg(nor->dev, "unable to clear SRWD bit, WP# asserted?\n"); + return ret; + } + } + + if (is_protect) { + sr |= ATMEL_SR_GLOBAL_PROTECT_MASK; + /* + * Set the SRWD bit again as soon as we are protecting + * anything. This will ensure that the WP# pin is working + * correctly. By doing this we also behave the same as + * spi_nor_sr_lock(), which sets SRWD if any block protection + * is active. + */ + sr |= SR_SRWD; + } else { + sr &= ~ATMEL_SR_GLOBAL_PROTECT_MASK; + } + + nor->bouncebuf[0] = sr; + + /* + * We cannot use the spi_nor_write_sr_and_check() because this command + * isn't really setting any bits, instead it is an pseudo command for + * "Global Unprotect" or "Global Protect" + */ + return spi_nor_write_sr(nor, nor->bouncebuf, 1); +} + +static int atmel_global_protect(struct spi_nor *nor, loff_t ofs, uint64_t len) +{ + return atmel_set_global_protection(nor, ofs, len, true); +} + +static int atmel_global_unprotect(struct spi_nor *nor, loff_t ofs, uint64_t len) +{ + return atmel_set_global_protection(nor, ofs, len, false); +} + +static int atmel_is_global_protected(struct spi_nor *nor, loff_t ofs, uint64_t len) +{ + int ret; + + if (ofs >= nor->params->size || (ofs + len) > nor->params->size) + return -EINVAL; + + ret = spi_nor_read_sr(nor, nor->bouncebuf); + if (ret) + return ret; + + return ((nor->bouncebuf[0] & ATMEL_SR_GLOBAL_PROTECT_MASK) == ATMEL_SR_GLOBAL_PROTECT_MASK); +} + +static const struct spi_nor_locking_ops atmel_global_protection_ops = { + .lock = atmel_global_protect, + .unlock = atmel_global_unprotect, + .is_locked = atmel_is_global_protected, +}; + +static void atmel_global_protection_default_init(struct spi_nor *nor) +{ + nor->params->locking_ops = &atmel_global_protection_ops; +} + +static const struct spi_nor_fixups atmel_global_protection_fixups = { + .default_init = atmel_global_protection_default_init, +}; + static const struct flash_info atmel_parts[] = { /* Atmel -- some are (confusingly) marketed as "DataFlash" */ - { "at25fs010", INFO(0x1f6601, 0, 32 * 1024, 4, SECT_4K) }, - { "at25fs040", INFO(0x1f6604, 0, 64 * 1024, 8, SECT_4K) }, + { "at25fs010", INFO(0x1f6601, 0, 32 * 1024, 4, SECT_4K | SPI_NOR_HAS_LOCK) + .fixups = &atmel_at25fs_fixups }, + { "at25fs040", INFO(0x1f6604, 0, 64 * 1024, 8, SECT_4K | SPI_NOR_HAS_LOCK) + .fixups = &atmel_at25fs_fixups }, - { "at25df041a", INFO(0x1f4401, 0, 64 * 1024, 8, SECT_4K) }, - { "at25df321", INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K) }, - { "at25df321a", INFO(0x1f4701, 0, 64 * 1024, 64, SECT_4K) }, - { "at25df641", INFO(0x1f4800, 0, 64 * 1024, 128, SECT_4K) }, + { "at25df041a", INFO(0x1f4401, 0, 64 * 1024, 8, + SECT_4K | SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) + .fixups = &atmel_global_protection_fixups }, + { "at25df321", INFO(0x1f4700, 0, 64 * 1024, 64, + SECT_4K | SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) + .fixups = &atmel_global_protection_fixups }, + { "at25df321a", INFO(0x1f4701, 0, 64 * 1024, 64, + SECT_4K | SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) + .fixups = &atmel_global_protection_fixups }, + { "at25df641", INFO(0x1f4800, 0, 64 * 1024, 128, + SECT_4K | SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) + .fixups = &atmel_global_protection_fixups }, { "at25sl321", INFO(0x1f4216, 0, 64 * 1024, 64, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, { "at26f004", INFO(0x1f0400, 0, 64 * 1024, 8, SECT_4K) }, - { "at26df081a", INFO(0x1f4501, 0, 64 * 1024, 16, SECT_4K) }, - { "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32, SECT_4K) }, - { "at26df321", INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K) }, + { "at26df081a", INFO(0x1f4501, 0, 64 * 1024, 16, + SECT_4K | SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) + .fixups = &atmel_global_protection_fixups }, + { "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32, + SECT_4K | SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) + .fixups = &atmel_global_protection_fixups }, + { "at26df321", INFO(0x1f4700, 0, 64 * 1024, 64, + SECT_4K | SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) + .fixups = &atmel_global_protection_fixups }, { "at45db081d", INFO(0x1f2500, 0, 64 * 1024, 16, SECT_4K) }, }; -static void atmel_default_init(struct spi_nor *nor) -{ - nor->flags |= SNOR_F_HAS_LOCK; -} - -static const struct spi_nor_fixups atmel_fixups = { - .default_init = atmel_default_init, -}; - const struct spi_nor_manufacturer spi_nor_atmel = { .name = "atmel", .parts = atmel_parts, .nparts = ARRAY_SIZE(atmel_parts), - .fixups = &atmel_fixups, }; diff --git a/drivers/mtd/spi-nor/controllers/hisi-sfc.c b/drivers/mtd/spi-nor/controllers/hisi-sfc.c index 95c502173cbd..7c26f8f565cb 100644 --- a/drivers/mtd/spi-nor/controllers/hisi-sfc.c +++ b/drivers/mtd/spi-nor/controllers/hisi-sfc.c @@ -320,7 +320,7 @@ static const struct spi_nor_controller_ops hisi_controller_ops = { .write = hisi_spi_nor_write, }; -/** +/* * Get spi flash device information and register it as a mtd device. */ static int hisi_spi_nor_register(struct device_node *np, diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c index f0ae7a01703a..20df44b753da 100644 --- a/drivers/mtd/spi-nor/core.c +++ b/drivers/mtd/spi-nor/core.c @@ -40,6 +40,81 @@ #define SPI_NOR_MAX_ADDR_WIDTH 4 +#define SPI_NOR_SRST_SLEEP_MIN 200 +#define SPI_NOR_SRST_SLEEP_MAX 400 + +/** + * spi_nor_get_cmd_ext() - Get the command opcode extension based on the + * extension type. + * @nor: pointer to a 'struct spi_nor' + * @op: pointer to the 'struct spi_mem_op' whose properties + * need to be initialized. + * + * Right now, only "repeat" and "invert" are supported. + * + * Return: The opcode extension. + */ +static u8 spi_nor_get_cmd_ext(const struct spi_nor *nor, + const struct spi_mem_op *op) +{ + switch (nor->cmd_ext_type) { + case SPI_NOR_EXT_INVERT: + return ~op->cmd.opcode; + + case SPI_NOR_EXT_REPEAT: + return op->cmd.opcode; + + default: + dev_err(nor->dev, "Unknown command extension type\n"); + return 0; + } +} + +/** + * spi_nor_spimem_setup_op() - Set up common properties of a spi-mem op. + * @nor: pointer to a 'struct spi_nor' + * @op: pointer to the 'struct spi_mem_op' whose properties + * need to be initialized. + * @proto: the protocol from which the properties need to be set. + */ +void spi_nor_spimem_setup_op(const struct spi_nor *nor, + struct spi_mem_op *op, + const enum spi_nor_protocol proto) +{ + u8 ext; + + op->cmd.buswidth = spi_nor_get_protocol_inst_nbits(proto); + + if (op->addr.nbytes) + op->addr.buswidth = spi_nor_get_protocol_addr_nbits(proto); + + if (op->dummy.nbytes) + op->dummy.buswidth = spi_nor_get_protocol_addr_nbits(proto); + + if (op->data.nbytes) + op->data.buswidth = spi_nor_get_protocol_data_nbits(proto); + + if (spi_nor_protocol_is_dtr(proto)) { + /* + * SPIMEM supports mixed DTR modes, but right now we can only + * have all phases either DTR or STR. IOW, SPIMEM can have + * something like 4S-4D-4D, but SPI NOR can't. So, set all 4 + * phases to either DTR or STR. + */ + op->cmd.dtr = true; + op->addr.dtr = true; + op->dummy.dtr = true; + op->data.dtr = true; + + /* 2 bytes per clock cycle in DTR mode. */ + op->dummy.nbytes *= 2; + + ext = spi_nor_get_cmd_ext(nor, op); + op->cmd.opcode = (op->cmd.opcode << 8) | ext; + op->cmd.nbytes = 2; + } +} + /** * spi_nor_spimem_bounce() - check if a bounce buffer is needed for the data * transfer @@ -82,6 +157,32 @@ static int spi_nor_spimem_exec_op(struct spi_nor *nor, struct spi_mem_op *op) return spi_mem_exec_op(nor->spimem, op); } +static int spi_nor_controller_ops_read_reg(struct spi_nor *nor, u8 opcode, + u8 *buf, size_t len) +{ + if (spi_nor_protocol_is_dtr(nor->reg_proto)) + return -EOPNOTSUPP; + + return nor->controller_ops->read_reg(nor, opcode, buf, len); +} + +static int spi_nor_controller_ops_write_reg(struct spi_nor *nor, u8 opcode, + const u8 *buf, size_t len) +{ + if (spi_nor_protocol_is_dtr(nor->reg_proto)) + return -EOPNOTSUPP; + + return nor->controller_ops->write_reg(nor, opcode, buf, len); +} + +static int spi_nor_controller_ops_erase(struct spi_nor *nor, loff_t offs) +{ + if (spi_nor_protocol_is_dtr(nor->write_proto)) + return -EOPNOTSUPP; + + return nor->controller_ops->erase(nor, offs); +} + /** * spi_nor_spimem_read_data() - read data from flash's memory region via * spi-mem @@ -96,22 +197,20 @@ static ssize_t spi_nor_spimem_read_data(struct spi_nor *nor, loff_t from, size_t len, u8 *buf) { struct spi_mem_op op = - SPI_MEM_OP(SPI_MEM_OP_CMD(nor->read_opcode, 1), - SPI_MEM_OP_ADDR(nor->addr_width, from, 1), - SPI_MEM_OP_DUMMY(nor->read_dummy, 1), - SPI_MEM_OP_DATA_IN(len, buf, 1)); + SPI_MEM_OP(SPI_MEM_OP_CMD(nor->read_opcode, 0), + SPI_MEM_OP_ADDR(nor->addr_width, from, 0), + SPI_MEM_OP_DUMMY(nor->read_dummy, 0), + SPI_MEM_OP_DATA_IN(len, buf, 0)); bool usebouncebuf; ssize_t nbytes; int error; - /* get transfer protocols. */ - op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(nor->read_proto); - op.addr.buswidth = spi_nor_get_protocol_addr_nbits(nor->read_proto); - op.dummy.buswidth = op.addr.buswidth; - op.data.buswidth = spi_nor_get_protocol_data_nbits(nor->read_proto); + spi_nor_spimem_setup_op(nor, &op, nor->read_proto); /* convert the dummy cycles to the number of bytes */ op.dummy.nbytes = (nor->read_dummy * op.dummy.buswidth) / 8; + if (spi_nor_protocol_is_dtr(nor->read_proto)) + op.dummy.nbytes *= 2; usebouncebuf = spi_nor_spimem_bounce(nor, &op); @@ -162,20 +261,18 @@ static ssize_t spi_nor_spimem_write_data(struct spi_nor *nor, loff_t to, size_t len, const u8 *buf) { struct spi_mem_op op = - SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 1), - SPI_MEM_OP_ADDR(nor->addr_width, to, 1), + SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 0), + SPI_MEM_OP_ADDR(nor->addr_width, to, 0), SPI_MEM_OP_NO_DUMMY, - SPI_MEM_OP_DATA_OUT(len, buf, 1)); + SPI_MEM_OP_DATA_OUT(len, buf, 0)); ssize_t nbytes; int error; - op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(nor->write_proto); - op.addr.buswidth = spi_nor_get_protocol_addr_nbits(nor->write_proto); - op.data.buswidth = spi_nor_get_protocol_data_nbits(nor->write_proto); - if (nor->program_opcode == SPINOR_OP_AAI_WP && nor->sst_write_second) op.addr.nbytes = 0; + spi_nor_spimem_setup_op(nor, &op, nor->write_proto); + if (spi_nor_spimem_bounce(nor, &op)) memcpy(nor->bouncebuf, buf, op.data.nbytes); @@ -222,15 +319,17 @@ int spi_nor_write_enable(struct spi_nor *nor) if (nor->spimem) { struct spi_mem_op op = - SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WREN, 1), + SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WREN, 0), SPI_MEM_OP_NO_ADDR, SPI_MEM_OP_NO_DUMMY, SPI_MEM_OP_NO_DATA); + spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); + ret = spi_mem_exec_op(nor->spimem, &op); } else { - ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WREN, - NULL, 0); + ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WREN, + NULL, 0); } if (ret) @@ -251,15 +350,17 @@ int spi_nor_write_disable(struct spi_nor *nor) if (nor->spimem) { struct spi_mem_op op = - SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRDI, 1), + SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRDI, 0), SPI_MEM_OP_NO_ADDR, SPI_MEM_OP_NO_DUMMY, SPI_MEM_OP_NO_DATA); + spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); + ret = spi_mem_exec_op(nor->spimem, &op); } else { - ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WRDI, - NULL, 0); + ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WRDI, + NULL, 0); } if (ret) @@ -272,25 +373,37 @@ int spi_nor_write_disable(struct spi_nor *nor) * spi_nor_read_sr() - Read the Status Register. * @nor: pointer to 'struct spi_nor'. * @sr: pointer to a DMA-able buffer where the value of the - * Status Register will be written. + * Status Register will be written. Should be at least 2 bytes. * * Return: 0 on success, -errno otherwise. */ -static int spi_nor_read_sr(struct spi_nor *nor, u8 *sr) +int spi_nor_read_sr(struct spi_nor *nor, u8 *sr) { int ret; if (nor->spimem) { struct spi_mem_op op = - SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR, 1), + SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR, 0), SPI_MEM_OP_NO_ADDR, SPI_MEM_OP_NO_DUMMY, - SPI_MEM_OP_DATA_IN(1, sr, 1)); + SPI_MEM_OP_DATA_IN(1, sr, 0)); + + if (nor->reg_proto == SNOR_PROTO_8_8_8_DTR) { + op.addr.nbytes = nor->params->rdsr_addr_nbytes; + op.dummy.nbytes = nor->params->rdsr_dummy; + /* + * We don't want to read only one byte in DTR mode. So, + * read 2 and then discard the second byte. + */ + op.data.nbytes = 2; + } + + spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); ret = spi_mem_exec_op(nor->spimem, &op); } else { - ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDSR, - sr, 1); + ret = spi_nor_controller_ops_read_reg(nor, SPINOR_OP_RDSR, sr, + 1); } if (ret) @@ -303,7 +416,8 @@ static int spi_nor_read_sr(struct spi_nor *nor, u8 *sr) * spi_nor_read_fsr() - Read the Flag Status Register. * @nor: pointer to 'struct spi_nor' * @fsr: pointer to a DMA-able buffer where the value of the - * Flag Status Register will be written. + * Flag Status Register will be written. Should be at least 2 + * bytes. * * Return: 0 on success, -errno otherwise. */ @@ -313,15 +427,27 @@ static int spi_nor_read_fsr(struct spi_nor *nor, u8 *fsr) if (nor->spimem) { struct spi_mem_op op = - SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDFSR, 1), + SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDFSR, 0), SPI_MEM_OP_NO_ADDR, SPI_MEM_OP_NO_DUMMY, - SPI_MEM_OP_DATA_IN(1, fsr, 1)); + SPI_MEM_OP_DATA_IN(1, fsr, 0)); + + if (nor->reg_proto == SNOR_PROTO_8_8_8_DTR) { + op.addr.nbytes = nor->params->rdsr_addr_nbytes; + op.dummy.nbytes = nor->params->rdsr_dummy; + /* + * We don't want to read only one byte in DTR mode. So, + * read 2 and then discard the second byte. + */ + op.data.nbytes = 2; + } + + spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); ret = spi_mem_exec_op(nor->spimem, &op); } else { - ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDFSR, - fsr, 1); + ret = spi_nor_controller_ops_read_reg(nor, SPINOR_OP_RDFSR, fsr, + 1); } if (ret) @@ -345,14 +471,17 @@ static int spi_nor_read_cr(struct spi_nor *nor, u8 *cr) if (nor->spimem) { struct spi_mem_op op = - SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDCR, 1), + SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDCR, 0), SPI_MEM_OP_NO_ADDR, SPI_MEM_OP_NO_DUMMY, - SPI_MEM_OP_DATA_IN(1, cr, 1)); + SPI_MEM_OP_DATA_IN(1, cr, 0)); + + spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); ret = spi_mem_exec_op(nor->spimem, &op); } else { - ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDCR, cr, 1); + ret = spi_nor_controller_ops_read_reg(nor, SPINOR_OP_RDCR, cr, + 1); } if (ret) @@ -378,17 +507,19 @@ int spi_nor_set_4byte_addr_mode(struct spi_nor *nor, bool enable) SPI_MEM_OP(SPI_MEM_OP_CMD(enable ? SPINOR_OP_EN4B : SPINOR_OP_EX4B, - 1), + 0), SPI_MEM_OP_NO_ADDR, SPI_MEM_OP_NO_DUMMY, SPI_MEM_OP_NO_DATA); + spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); + ret = spi_mem_exec_op(nor->spimem, &op); } else { - ret = nor->controller_ops->write_reg(nor, - enable ? SPINOR_OP_EN4B : - SPINOR_OP_EX4B, - NULL, 0); + ret = spi_nor_controller_ops_write_reg(nor, + enable ? SPINOR_OP_EN4B : + SPINOR_OP_EX4B, + NULL, 0); } if (ret) @@ -414,15 +545,17 @@ static int spansion_set_4byte_addr_mode(struct spi_nor *nor, bool enable) if (nor->spimem) { struct spi_mem_op op = - SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_BRWR, 1), + SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_BRWR, 0), SPI_MEM_OP_NO_ADDR, SPI_MEM_OP_NO_DUMMY, - SPI_MEM_OP_DATA_OUT(1, nor->bouncebuf, 1)); + SPI_MEM_OP_DATA_OUT(1, nor->bouncebuf, 0)); + + spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); ret = spi_mem_exec_op(nor->spimem, &op); } else { - ret = nor->controller_ops->write_reg(nor, SPINOR_OP_BRWR, - nor->bouncebuf, 1); + ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_BRWR, + nor->bouncebuf, 1); } if (ret) @@ -446,15 +579,17 @@ int spi_nor_write_ear(struct spi_nor *nor, u8 ear) if (nor->spimem) { struct spi_mem_op op = - SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WREAR, 1), + SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WREAR, 0), SPI_MEM_OP_NO_ADDR, SPI_MEM_OP_NO_DUMMY, - SPI_MEM_OP_DATA_OUT(1, nor->bouncebuf, 1)); + SPI_MEM_OP_DATA_OUT(1, nor->bouncebuf, 0)); + + spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); ret = spi_mem_exec_op(nor->spimem, &op); } else { - ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WREAR, - nor->bouncebuf, 1); + ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WREAR, + nor->bouncebuf, 1); } if (ret) @@ -477,15 +612,17 @@ int spi_nor_xread_sr(struct spi_nor *nor, u8 *sr) if (nor->spimem) { struct spi_mem_op op = - SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_XRDSR, 1), + SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_XRDSR, 0), SPI_MEM_OP_NO_ADDR, SPI_MEM_OP_NO_DUMMY, - SPI_MEM_OP_DATA_IN(1, sr, 1)); + SPI_MEM_OP_DATA_IN(1, sr, 0)); + + spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); ret = spi_mem_exec_op(nor->spimem, &op); } else { - ret = nor->controller_ops->read_reg(nor, SPINOR_OP_XRDSR, - sr, 1); + ret = spi_nor_controller_ops_read_reg(nor, SPINOR_OP_XRDSR, sr, + 1); } if (ret) @@ -522,15 +659,17 @@ static void spi_nor_clear_sr(struct spi_nor *nor) if (nor->spimem) { struct spi_mem_op op = - SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CLSR, 1), + SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CLSR, 0), SPI_MEM_OP_NO_ADDR, SPI_MEM_OP_NO_DUMMY, SPI_MEM_OP_NO_DATA); + spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); + ret = spi_mem_exec_op(nor->spimem, &op); } else { - ret = nor->controller_ops->write_reg(nor, SPINOR_OP_CLSR, - NULL, 0); + ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_CLSR, + NULL, 0); } if (ret) @@ -586,15 +725,17 @@ static void spi_nor_clear_fsr(struct spi_nor *nor) if (nor->spimem) { struct spi_mem_op op = - SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CLFSR, 1), + SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CLFSR, 0), SPI_MEM_OP_NO_ADDR, SPI_MEM_OP_NO_DUMMY, SPI_MEM_OP_NO_DATA); + spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); + ret = spi_mem_exec_op(nor->spimem, &op); } else { - ret = nor->controller_ops->write_reg(nor, SPINOR_OP_CLFSR, - NULL, 0); + ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_CLFSR, + NULL, 0); } if (ret) @@ -720,7 +861,7 @@ int spi_nor_wait_till_ready(struct spi_nor *nor) * * Return: 0 on success, -errno otherwise. */ -static int spi_nor_write_sr(struct spi_nor *nor, const u8 *sr, size_t len) +int spi_nor_write_sr(struct spi_nor *nor, const u8 *sr, size_t len) { int ret; @@ -730,15 +871,17 @@ static int spi_nor_write_sr(struct spi_nor *nor, const u8 *sr, size_t len) if (nor->spimem) { struct spi_mem_op op = - SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR, 1), + SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR, 0), SPI_MEM_OP_NO_ADDR, SPI_MEM_OP_NO_DUMMY, - SPI_MEM_OP_DATA_OUT(len, sr, 1)); + SPI_MEM_OP_DATA_OUT(len, sr, 0)); + + spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); ret = spi_mem_exec_op(nor->spimem, &op); } else { - ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WRSR, - sr, len); + ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WRSR, sr, + len); } if (ret) { @@ -906,7 +1049,7 @@ static int spi_nor_write_16bit_cr_and_check(struct spi_nor *nor, u8 cr) * * Return: 0 on success, -errno otherwise. */ -static int spi_nor_write_sr_and_check(struct spi_nor *nor, u8 sr1) +int spi_nor_write_sr_and_check(struct spi_nor *nor, u8 sr1) { if (nor->flags & SNOR_F_HAS_16BIT_SR) return spi_nor_write_16bit_sr_and_check(nor, sr1); @@ -932,15 +1075,17 @@ static int spi_nor_write_sr2(struct spi_nor *nor, const u8 *sr2) if (nor->spimem) { struct spi_mem_op op = - SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR2, 1), + SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR2, 0), SPI_MEM_OP_NO_ADDR, SPI_MEM_OP_NO_DUMMY, - SPI_MEM_OP_DATA_OUT(1, sr2, 1)); + SPI_MEM_OP_DATA_OUT(1, sr2, 0)); + + spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); ret = spi_mem_exec_op(nor->spimem, &op); } else { - ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WRSR2, - sr2, 1); + ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WRSR2, + sr2, 1); } if (ret) { @@ -966,15 +1111,17 @@ static int spi_nor_read_sr2(struct spi_nor *nor, u8 *sr2) if (nor->spimem) { struct spi_mem_op op = - SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR2, 1), + SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR2, 0), SPI_MEM_OP_NO_ADDR, SPI_MEM_OP_NO_DUMMY, - SPI_MEM_OP_DATA_IN(1, sr2, 1)); + SPI_MEM_OP_DATA_IN(1, sr2, 0)); + + spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); ret = spi_mem_exec_op(nor->spimem, &op); } else { - ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDSR2, - sr2, 1); + ret = spi_nor_controller_ops_read_reg(nor, SPINOR_OP_RDSR2, sr2, + 1); } if (ret) @@ -997,15 +1144,18 @@ static int spi_nor_erase_chip(struct spi_nor *nor) if (nor->spimem) { struct spi_mem_op op = - SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CHIP_ERASE, 1), + SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CHIP_ERASE, 0), SPI_MEM_OP_NO_ADDR, SPI_MEM_OP_NO_DUMMY, SPI_MEM_OP_NO_DATA); + spi_nor_spimem_setup_op(nor, &op, nor->write_proto); + ret = spi_mem_exec_op(nor->spimem, &op); } else { - ret = nor->controller_ops->write_reg(nor, SPINOR_OP_CHIP_ERASE, - NULL, 0); + ret = spi_nor_controller_ops_write_reg(nor, + SPINOR_OP_CHIP_ERASE, + NULL, 0); } if (ret) @@ -1139,14 +1289,16 @@ static int spi_nor_erase_sector(struct spi_nor *nor, u32 addr) if (nor->spimem) { struct spi_mem_op op = - SPI_MEM_OP(SPI_MEM_OP_CMD(nor->erase_opcode, 1), - SPI_MEM_OP_ADDR(nor->addr_width, addr, 1), + SPI_MEM_OP(SPI_MEM_OP_CMD(nor->erase_opcode, 0), + SPI_MEM_OP_ADDR(nor->addr_width, addr, 0), SPI_MEM_OP_NO_DUMMY, SPI_MEM_OP_NO_DATA); + spi_nor_spimem_setup_op(nor, &op, nor->write_proto); + return spi_mem_exec_op(nor->spimem, &op); } else if (nor->controller_ops->erase) { - return nor->controller_ops->erase(nor, addr); + return spi_nor_controller_ops_erase(nor, addr); } /* @@ -1158,8 +1310,8 @@ static int spi_nor_erase_sector(struct spi_nor *nor, u32 addr) addr >>= 8; } - return nor->controller_ops->write_reg(nor, nor->erase_opcode, - nor->bouncebuf, nor->addr_width); + return spi_nor_controller_ops_write_reg(nor, nor->erase_opcode, + nor->bouncebuf, nor->addr_width); } /** @@ -1447,7 +1599,7 @@ destroy_erase_cmd_list: /* * Erase an address range on the nor chip. The address range may extend - * one or more erase sectors. Return an error is there is a problem erasing. + * one or more erase sectors. Return an error if there is a problem erasing. */ static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr) { @@ -2204,7 +2356,7 @@ static int spi_nor_check(struct spi_nor *nor) return 0; } -static void +void spi_nor_set_read_settings(struct spi_nor_read_command *read, u8 num_mode_clocks, u8 num_wait_states, @@ -2253,6 +2405,7 @@ int spi_nor_hwcaps_read2cmd(u32 hwcaps) { SNOR_HWCAPS_READ_1_8_8, SNOR_CMD_READ_1_8_8 }, { SNOR_HWCAPS_READ_8_8_8, SNOR_CMD_READ_8_8_8 }, { SNOR_HWCAPS_READ_1_8_8_DTR, SNOR_CMD_READ_1_8_8_DTR }, + { SNOR_HWCAPS_READ_8_8_8_DTR, SNOR_CMD_READ_8_8_8_DTR }, }; return spi_nor_hwcaps2cmd(hwcaps, hwcaps_read2cmd, @@ -2269,6 +2422,7 @@ static int spi_nor_hwcaps_pp2cmd(u32 hwcaps) { SNOR_HWCAPS_PP_1_1_8, SNOR_CMD_PP_1_1_8 }, { SNOR_HWCAPS_PP_1_8_8, SNOR_CMD_PP_1_8_8 }, { SNOR_HWCAPS_PP_8_8_8, SNOR_CMD_PP_8_8_8 }, + { SNOR_HWCAPS_PP_8_8_8_DTR, SNOR_CMD_PP_8_8_8_DTR }, }; return spi_nor_hwcaps2cmd(hwcaps, hwcaps_pp2cmd, @@ -2281,7 +2435,7 @@ static int spi_nor_hwcaps_pp2cmd(u32 hwcaps) *@nor: pointer to a 'struct spi_nor' *@op: pointer to op template to be checked * - * Returns 0 if operation is supported, -ENOTSUPP otherwise. + * Returns 0 if operation is supported, -EOPNOTSUPP otherwise. */ static int spi_nor_spimem_check_op(struct spi_nor *nor, struct spi_mem_op *op) @@ -2295,12 +2449,12 @@ static int spi_nor_spimem_check_op(struct spi_nor *nor, op->addr.nbytes = 4; if (!spi_mem_supports_op(nor->spimem, op)) { if (nor->mtd.size > SZ_16M) - return -ENOTSUPP; + return -EOPNOTSUPP; /* If flash size <= 16MB, 3 address bytes are sufficient */ op->addr.nbytes = 3; if (!spi_mem_supports_op(nor->spimem, op)) - return -ENOTSUPP; + return -EOPNOTSUPP; } return 0; @@ -2312,22 +2466,22 @@ static int spi_nor_spimem_check_op(struct spi_nor *nor, *@nor: pointer to a 'struct spi_nor' *@read: pointer to op template to be checked * - * Returns 0 if operation is supported, -ENOTSUPP otherwise. + * Returns 0 if operation is supported, -EOPNOTSUPP otherwise. */ static int spi_nor_spimem_check_readop(struct spi_nor *nor, const struct spi_nor_read_command *read) { - struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(read->opcode, 1), - SPI_MEM_OP_ADDR(3, 0, 1), - SPI_MEM_OP_DUMMY(0, 1), - SPI_MEM_OP_DATA_IN(0, NULL, 1)); + struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(read->opcode, 0), + SPI_MEM_OP_ADDR(3, 0, 0), + SPI_MEM_OP_DUMMY(1, 0), + SPI_MEM_OP_DATA_IN(1, NULL, 0)); - op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(read->proto); - op.addr.buswidth = spi_nor_get_protocol_addr_nbits(read->proto); - op.data.buswidth = spi_nor_get_protocol_data_nbits(read->proto); - op.dummy.buswidth = op.addr.buswidth; - op.dummy.nbytes = (read->num_mode_clocks + read->num_wait_states) * - op.dummy.buswidth / 8; + spi_nor_spimem_setup_op(nor, &op, read->proto); + + /* convert the dummy cycles to the number of bytes */ + op.dummy.nbytes = (nor->read_dummy * op.dummy.buswidth) / 8; + if (spi_nor_protocol_is_dtr(nor->read_proto)) + op.dummy.nbytes *= 2; return spi_nor_spimem_check_op(nor, &op); } @@ -2338,19 +2492,17 @@ static int spi_nor_spimem_check_readop(struct spi_nor *nor, *@nor: pointer to a 'struct spi_nor' *@pp: pointer to op template to be checked * - * Returns 0 if operation is supported, -ENOTSUPP otherwise. + * Returns 0 if operation is supported, -EOPNOTSUPP otherwise. */ static int spi_nor_spimem_check_pp(struct spi_nor *nor, const struct spi_nor_pp_command *pp) { - struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(pp->opcode, 1), - SPI_MEM_OP_ADDR(3, 0, 1), + struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(pp->opcode, 0), + SPI_MEM_OP_ADDR(3, 0, 0), SPI_MEM_OP_NO_DUMMY, - SPI_MEM_OP_DATA_OUT(0, NULL, 1)); + SPI_MEM_OP_DATA_OUT(1, NULL, 0)); - op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(pp->proto); - op.addr.buswidth = spi_nor_get_protocol_addr_nbits(pp->proto); - op.data.buswidth = spi_nor_get_protocol_data_nbits(pp->proto); + spi_nor_spimem_setup_op(nor, &op, pp->proto); return spi_nor_spimem_check_op(nor, &op); } @@ -2368,12 +2520,16 @@ spi_nor_spimem_adjust_hwcaps(struct spi_nor *nor, u32 *hwcaps) struct spi_nor_flash_parameter *params = nor->params; unsigned int cap; - /* DTR modes are not supported yet, mask them all. */ - *hwcaps &= ~SNOR_HWCAPS_DTR; - /* X-X-X modes are not supported yet, mask them all. */ *hwcaps &= ~SNOR_HWCAPS_X_X_X; + /* + * If the reset line is broken, we do not want to enter a stateful + * mode. + */ + if (nor->flags & SNOR_F_BROKEN_RESET) + *hwcaps &= ~(SNOR_HWCAPS_X_X_X | SNOR_HWCAPS_X_X_X_DTR); + for (cap = 0; cap < sizeof(*hwcaps) * BITS_PER_BYTE; cap++) { int rdidx, ppidx; @@ -2537,7 +2693,7 @@ spi_nor_select_uniform_erase(struct spi_nor_erase_map *map, } /* - * Otherwise, the current erase size is still a valid canditate. + * Otherwise, the current erase size is still a valid candidate. * Select the biggest valid candidate. */ if (!erase && tested_erase->size) @@ -2628,7 +2784,7 @@ static int spi_nor_default_setup(struct spi_nor *nor, * controller directly implements the spi_nor interface. * Yet another reason to switch to spi-mem. */ - ignored_mask = SNOR_HWCAPS_X_X_X; + ignored_mask = SNOR_HWCAPS_X_X_X | SNOR_HWCAPS_X_X_X_DTR; if (shared_mask & ignored_mask) { dev_dbg(nor->dev, "SPI n-n-n protocols are not supported.\n"); @@ -2729,6 +2885,7 @@ static void spi_nor_info_init_params(struct spi_nor *nor) nor->flags |= SNOR_F_HAS_16BIT_SR; /* Set SPI NOR sizes. */ + params->writesize = 1; params->size = (u64)info->sector_size * info->n_sectors; params->page_size = info->page_size; @@ -2773,11 +2930,28 @@ static void spi_nor_info_init_params(struct spi_nor *nor) SNOR_PROTO_1_1_8); } + if (info->flags & SPI_NOR_OCTAL_DTR_READ) { + params->hwcaps.mask |= SNOR_HWCAPS_READ_8_8_8_DTR; + spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_8_8_8_DTR], + 0, 20, SPINOR_OP_READ_FAST, + SNOR_PROTO_8_8_8_DTR); + } + /* Page Program settings. */ params->hwcaps.mask |= SNOR_HWCAPS_PP; spi_nor_set_pp_settings(¶ms->page_programs[SNOR_CMD_PP], SPINOR_OP_PP, SNOR_PROTO_1_1_1); + if (info->flags & SPI_NOR_OCTAL_DTR_PP) { + params->hwcaps.mask |= SNOR_HWCAPS_PP_8_8_8_DTR; + /* + * Since xSPI Page Program opcode is backward compatible with + * Legacy SPI, use Legacy SPI opcode there as well. + */ + spi_nor_set_pp_settings(¶ms->page_programs[SNOR_CMD_PP_8_8_8_DTR], + SPINOR_OP_PP, SNOR_PROTO_8_8_8_DTR); + } + /* * Sector Erase settings. Sort Erase Types in ascending order, with the * smallest erase size starting at BIT(0). @@ -2885,7 +3059,8 @@ static int spi_nor_init_params(struct spi_nor *nor) spi_nor_manufacturer_init_params(nor); - if ((nor->info->flags & (SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)) && + if ((nor->info->flags & (SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | + SPI_NOR_OCTAL_READ | SPI_NOR_OCTAL_DTR_READ)) && !(nor->info->flags & SPI_NOR_SKIP_SFDP)) spi_nor_sfdp_init_params(nor); @@ -2896,6 +3071,38 @@ static int spi_nor_init_params(struct spi_nor *nor) return 0; } +/** spi_nor_octal_dtr_enable() - enable Octal DTR I/O if needed + * @nor: pointer to a 'struct spi_nor' + * @enable: whether to enable or disable Octal DTR + * + * Return: 0 on success, -errno otherwise. + */ +static int spi_nor_octal_dtr_enable(struct spi_nor *nor, bool enable) +{ + int ret; + + if (!nor->params->octal_dtr_enable) + return 0; + + if (!(nor->read_proto == SNOR_PROTO_8_8_8_DTR && + nor->write_proto == SNOR_PROTO_8_8_8_DTR)) + return 0; + + if (!(nor->flags & SNOR_F_IO_MODE_EN_VOLATILE)) + return 0; + + ret = nor->params->octal_dtr_enable(nor, enable); + if (ret) + return ret; + + if (enable) + nor->reg_proto = SNOR_PROTO_8_8_8_DTR; + else + nor->reg_proto = SNOR_PROTO_1_1_1; + + return 0; +} + /** * spi_nor_quad_enable() - enable Quad I/O if needed. * @nor: pointer to a 'struct spi_nor' @@ -2915,39 +3122,65 @@ static int spi_nor_quad_enable(struct spi_nor *nor) } /** - * spi_nor_unlock_all() - Unlocks the entire flash memory array. + * spi_nor_try_unlock_all() - Tries to unlock the entire flash memory array. * @nor: pointer to a 'struct spi_nor'. * * Some SPI NOR flashes are write protected by default after a power-on reset * cycle, in order to avoid inadvertent writes during power-up. Backward * compatibility imposes to unlock the entire flash memory array at power-up * by default. + * + * Unprotecting the entire flash array will fail for boards which are hardware + * write-protected. Thus any errors are ignored. */ -static int spi_nor_unlock_all(struct spi_nor *nor) +static void spi_nor_try_unlock_all(struct spi_nor *nor) { - if (nor->flags & SNOR_F_HAS_LOCK) - return spi_nor_unlock(&nor->mtd, 0, nor->params->size); + int ret; - return 0; + if (!(nor->flags & SNOR_F_HAS_LOCK)) + return; + + dev_dbg(nor->dev, "Unprotecting entire flash array\n"); + + ret = spi_nor_unlock(&nor->mtd, 0, nor->params->size); + if (ret) + dev_dbg(nor->dev, "Failed to unlock the entire flash memory array\n"); } static int spi_nor_init(struct spi_nor *nor) { int err; - err = spi_nor_quad_enable(nor); + err = spi_nor_octal_dtr_enable(nor, true); if (err) { - dev_dbg(nor->dev, "quad mode not supported\n"); + dev_dbg(nor->dev, "octal mode not supported\n"); return err; } - err = spi_nor_unlock_all(nor); + err = spi_nor_quad_enable(nor); if (err) { - dev_dbg(nor->dev, "Failed to unlock the entire flash memory array\n"); + dev_dbg(nor->dev, "quad mode not supported\n"); return err; } - if (nor->addr_width == 4 && !(nor->flags & SNOR_F_4B_OPCODES)) { + /* + * Some SPI NOR flashes are write protected by default after a power-on + * reset cycle, in order to avoid inadvertent writes during power-up. + * Backward compatibility imposes to unlock the entire flash memory + * array at power-up by default. Depending on the kernel configuration + * (1) do nothing, (2) always unlock the entire flash array or (3) + * unlock the entire flash array only when the software write + * protection bits are volatile. The latter is indicated by + * SNOR_F_SWP_IS_VOLATILE. + */ + if (IS_ENABLED(CONFIG_MTD_SPI_NOR_SWP_DISABLE) || + (IS_ENABLED(CONFIG_MTD_SPI_NOR_SWP_DISABLE_ON_VOLATILE) && + nor->flags & SNOR_F_SWP_IS_VOLATILE)) + spi_nor_try_unlock_all(nor); + + if (nor->addr_width == 4 && + nor->read_proto != SNOR_PROTO_8_8_8_DTR && + !(nor->flags & SNOR_F_4B_OPCODES)) { /* * If the RESET# pin isn't hooked up properly, or the system * otherwise doesn't perform a reset command in the boot @@ -2963,6 +3196,59 @@ static int spi_nor_init(struct spi_nor *nor) return 0; } +static void spi_nor_soft_reset(struct spi_nor *nor) +{ + struct spi_mem_op op; + int ret; + + op = (struct spi_mem_op)SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_SRSTEN, 0), + SPI_MEM_OP_NO_DUMMY, + SPI_MEM_OP_NO_ADDR, + SPI_MEM_OP_NO_DATA); + + spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); + + ret = spi_mem_exec_op(nor->spimem, &op); + if (ret) { + dev_warn(nor->dev, "Software reset failed: %d\n", ret); + return; + } + + op = (struct spi_mem_op)SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_SRST, 0), + SPI_MEM_OP_NO_DUMMY, + SPI_MEM_OP_NO_ADDR, + SPI_MEM_OP_NO_DATA); + + spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); + + ret = spi_mem_exec_op(nor->spimem, &op); + if (ret) { + dev_warn(nor->dev, "Software reset failed: %d\n", ret); + return; + } + + /* + * Software Reset is not instant, and the delay varies from flash to + * flash. Looking at a few flashes, most range somewhere below 100 + * microseconds. So, sleep for a range of 200-400 us. + */ + usleep_range(SPI_NOR_SRST_SLEEP_MIN, SPI_NOR_SRST_SLEEP_MAX); +} + +/* mtd suspend handler */ +static int spi_nor_suspend(struct mtd_info *mtd) +{ + struct spi_nor *nor = mtd_to_spi_nor(mtd); + int ret; + + /* Disable octal DTR mode if we enabled it. */ + ret = spi_nor_octal_dtr_enable(nor, false); + if (ret) + dev_err(nor->dev, "suspend() failed\n"); + + return ret; +} + /* mtd resume handler */ static void spi_nor_resume(struct mtd_info *mtd) { @@ -2982,6 +3268,9 @@ void spi_nor_restore(struct spi_nor *nor) if (nor->addr_width == 4 && !(nor->flags & SNOR_F_4B_OPCODES) && nor->flags & SNOR_F_BROKEN_RESET) nor->params->set_4byte_addr_mode(nor, false); + + if (nor->flags & SNOR_F_SOFT_RESET) + spi_nor_soft_reset(nor); } EXPORT_SYMBOL_GPL(spi_nor_restore); @@ -3006,6 +3295,20 @@ static int spi_nor_set_addr_width(struct spi_nor *nor) { if (nor->addr_width) { /* already configured from SFDP */ + } else if (nor->read_proto == SNOR_PROTO_8_8_8_DTR) { + /* + * In 8D-8D-8D mode, one byte takes half a cycle to transfer. So + * in this protocol an odd address width cannot be used because + * then the address phase would only span a cycle and a half. + * Half a cycle would be left over. We would then have to start + * the dummy phase in the middle of a cycle and so too the data + * phase, and we will end the transaction with half a cycle left + * over. + * + * Force all 8D-8D-8D flashes to use an address width of 4 to + * avoid this situation. + */ + nor->addr_width = 4; } else if (nor->info->addr_width) { nor->addr_width = nor->info->addr_width; } else { @@ -3146,11 +3449,12 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, mtd->name = dev_name(dev); mtd->priv = nor; mtd->type = MTD_NORFLASH; - mtd->writesize = 1; + mtd->writesize = nor->params->writesize; mtd->flags = MTD_CAP_NORFLASH; mtd->size = nor->params->size; mtd->_erase = spi_nor_erase; mtd->_read = spi_nor_read; + mtd->_suspend = spi_nor_suspend; mtd->_resume = spi_nor_resume; if (nor->params->locking_ops) { @@ -3171,6 +3475,8 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, nor->flags |= SNOR_F_NO_OP_CHIP_ERASE; if (info->flags & USE_CLSR) nor->flags |= SNOR_F_USE_CLSR; + if (info->flags & SPI_NOR_SWP_IS_VOLATILE) + nor->flags |= SNOR_F_SWP_IS_VOLATILE; if (info->flags & SPI_NOR_4BIT_BP) { nor->flags |= SNOR_F_HAS_4BIT_BP; @@ -3201,6 +3507,9 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, if (info->flags & SPI_NOR_4B_OPCODES) nor->flags |= SNOR_F_4B_OPCODES; + if (info->flags & SPI_NOR_IO_MODE_EN_VOLATILE) + nor->flags |= SNOR_F_IO_MODE_EN_VOLATILE; + ret = spi_nor_set_addr_width(nor); if (ret) return ret; @@ -3236,23 +3545,28 @@ EXPORT_SYMBOL_GPL(spi_nor_scan); static int spi_nor_create_read_dirmap(struct spi_nor *nor) { struct spi_mem_dirmap_info info = { - .op_tmpl = SPI_MEM_OP(SPI_MEM_OP_CMD(nor->read_opcode, 1), - SPI_MEM_OP_ADDR(nor->addr_width, 0, 1), - SPI_MEM_OP_DUMMY(nor->read_dummy, 1), - SPI_MEM_OP_DATA_IN(0, NULL, 1)), + .op_tmpl = SPI_MEM_OP(SPI_MEM_OP_CMD(nor->read_opcode, 0), + SPI_MEM_OP_ADDR(nor->addr_width, 0, 0), + SPI_MEM_OP_DUMMY(nor->read_dummy, 0), + SPI_MEM_OP_DATA_IN(0, NULL, 0)), .offset = 0, .length = nor->mtd.size, }; struct spi_mem_op *op = &info.op_tmpl; - /* get transfer protocols. */ - op->cmd.buswidth = spi_nor_get_protocol_inst_nbits(nor->read_proto); - op->addr.buswidth = spi_nor_get_protocol_addr_nbits(nor->read_proto); - op->dummy.buswidth = op->addr.buswidth; - op->data.buswidth = spi_nor_get_protocol_data_nbits(nor->read_proto); + spi_nor_spimem_setup_op(nor, op, nor->read_proto); /* convert the dummy cycles to the number of bytes */ op->dummy.nbytes = (nor->read_dummy * op->dummy.buswidth) / 8; + if (spi_nor_protocol_is_dtr(nor->read_proto)) + op->dummy.nbytes *= 2; + + /* + * Since spi_nor_spimem_setup_op() only sets buswidth when the number + * of data bytes is non-zero, the data buswidth won't be set here. So, + * do it explicitly. + */ + op->data.buswidth = spi_nor_get_protocol_data_nbits(nor->read_proto); nor->dirmap.rdesc = devm_spi_mem_dirmap_create(nor->dev, nor->spimem, &info); @@ -3262,24 +3576,27 @@ static int spi_nor_create_read_dirmap(struct spi_nor *nor) static int spi_nor_create_write_dirmap(struct spi_nor *nor) { struct spi_mem_dirmap_info info = { - .op_tmpl = SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 1), - SPI_MEM_OP_ADDR(nor->addr_width, 0, 1), + .op_tmpl = SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 0), + SPI_MEM_OP_ADDR(nor->addr_width, 0, 0), SPI_MEM_OP_NO_DUMMY, - SPI_MEM_OP_DATA_OUT(0, NULL, 1)), + SPI_MEM_OP_DATA_OUT(0, NULL, 0)), .offset = 0, .length = nor->mtd.size, }; struct spi_mem_op *op = &info.op_tmpl; - /* get transfer protocols. */ - op->cmd.buswidth = spi_nor_get_protocol_inst_nbits(nor->write_proto); - op->addr.buswidth = spi_nor_get_protocol_addr_nbits(nor->write_proto); - op->dummy.buswidth = op->addr.buswidth; - op->data.buswidth = spi_nor_get_protocol_data_nbits(nor->write_proto); - if (nor->program_opcode == SPINOR_OP_AAI_WP && nor->sst_write_second) op->addr.nbytes = 0; + spi_nor_spimem_setup_op(nor, op, nor->write_proto); + + /* + * Since spi_nor_spimem_setup_op() only sets buswidth when the number + * of data bytes is non-zero, the data buswidth won't be set here. So, + * do it explicitly. + */ + op->data.buswidth = spi_nor_get_protocol_data_nbits(nor->write_proto); + nor->dirmap.wdesc = devm_spi_mem_dirmap_create(nor->dev, nor->spimem, &info); return PTR_ERR_OR_ZERO(nor->dirmap.wdesc); diff --git a/drivers/mtd/spi-nor/core.h b/drivers/mtd/spi-nor/core.h index 6f2f6b27173f..d631ee299de3 100644 --- a/drivers/mtd/spi-nor/core.h +++ b/drivers/mtd/spi-nor/core.h @@ -26,6 +26,9 @@ enum spi_nor_option_flags { SNOR_F_HAS_SR_TB_BIT6 = BIT(11), SNOR_F_HAS_4BIT_BP = BIT(12), SNOR_F_HAS_SR_BP3_BIT6 = BIT(13), + SNOR_F_IO_MODE_EN_VOLATILE = BIT(14), + SNOR_F_SOFT_RESET = BIT(15), + SNOR_F_SWP_IS_VOLATILE = BIT(16), }; struct spi_nor_read_command { @@ -62,6 +65,7 @@ enum spi_nor_read_command_index { SNOR_CMD_READ_1_8_8, SNOR_CMD_READ_8_8_8, SNOR_CMD_READ_1_8_8_DTR, + SNOR_CMD_READ_8_8_8_DTR, SNOR_CMD_READ_MAX }; @@ -78,6 +82,7 @@ enum spi_nor_pp_command_index { SNOR_CMD_PP_1_1_8, SNOR_CMD_PP_1_8_8, SNOR_CMD_PP_8_8_8, + SNOR_CMD_PP_8_8_8_DTR, SNOR_CMD_PP_MAX }; @@ -189,7 +194,12 @@ struct spi_nor_locking_ops { * Serial Flash Discoverable Parameters (SFDP) tables. * * @size: the flash memory density in bytes. + * @writesize Minimal writable flash unit size. Defaults to 1. Set to + * ECC unit size for ECC-ed flashes. * @page_size: the page size of the SPI NOR flash memory. + * @rdsr_dummy: dummy cycles needed for Read Status Register command. + * @rdsr_addr_nbytes: dummy address bytes needed for Read Status Register + * command. * @hwcaps: describes the read and page program hardware * capabilities. * @reads: read capabilities ordered by priority: the higher index @@ -198,6 +208,7 @@ struct spi_nor_locking_ops { * higher index in the array, the higher priority. * @erase_map: the erase map parsed from the SFDP Sector Map Parameter * Table. + * @octal_dtr_enable: enables SPI NOR octal DTR mode. * @quad_enable: enables SPI NOR quad mode. * @set_4byte_addr_mode: puts the SPI NOR in 4 byte addressing mode. * @convert_addr: converts an absolute address into something the flash @@ -211,7 +222,10 @@ struct spi_nor_locking_ops { */ struct spi_nor_flash_parameter { u64 size; + u32 writesize; u32 page_size; + u8 rdsr_dummy; + u8 rdsr_addr_nbytes; struct spi_nor_hwcaps hwcaps; struct spi_nor_read_command reads[SNOR_CMD_READ_MAX]; @@ -219,6 +233,7 @@ struct spi_nor_flash_parameter { struct spi_nor_erase_map erase_map; + int (*octal_dtr_enable)(struct spi_nor *nor, bool enable); int (*quad_enable)(struct spi_nor *nor); int (*set_4byte_addr_mode)(struct spi_nor *nor, bool enable); u32 (*convert_addr)(struct spi_nor *nor, u32 addr); @@ -311,6 +326,18 @@ struct flash_info { * BP3 is bit 6 of status register. * Must be used with SPI_NOR_4BIT_BP. */ +#define SPI_NOR_OCTAL_DTR_READ BIT(19) /* Flash supports octal DTR Read. */ +#define SPI_NOR_OCTAL_DTR_PP BIT(20) /* Flash supports Octal DTR Page Program */ +#define SPI_NOR_IO_MODE_EN_VOLATILE BIT(21) /* + * Flash enables the best + * available I/O mode via a + * volatile bit. + */ +#define SPI_NOR_SWP_IS_VOLATILE BIT(22) /* + * Flash has volatile software write + * protection bits. Usually these will + * power-up in a write-protected state. + */ /* Part specific fixup hooks. */ const struct spi_nor_fixups *fixups; @@ -399,6 +426,9 @@ extern const struct spi_nor_manufacturer spi_nor_winbond; extern const struct spi_nor_manufacturer spi_nor_xilinx; extern const struct spi_nor_manufacturer spi_nor_xmc; +void spi_nor_spimem_setup_op(const struct spi_nor *nor, + struct spi_mem_op *op, + const enum spi_nor_protocol proto); int spi_nor_write_enable(struct spi_nor *nor); int spi_nor_write_disable(struct spi_nor *nor); int spi_nor_set_4byte_addr_mode(struct spi_nor *nor, bool enable); @@ -409,6 +439,9 @@ void spi_nor_unlock_and_unprep(struct spi_nor *nor); int spi_nor_sr1_bit6_quad_enable(struct spi_nor *nor); int spi_nor_sr2_bit1_quad_enable(struct spi_nor *nor); int spi_nor_sr2_bit7_quad_enable(struct spi_nor *nor); +int spi_nor_read_sr(struct spi_nor *nor, u8 *sr); +int spi_nor_write_sr(struct spi_nor *nor, const u8 *sr, size_t len); +int spi_nor_write_sr_and_check(struct spi_nor *nor, u8 sr1); int spi_nor_xread_sr(struct spi_nor *nor, u8 *sr); ssize_t spi_nor_read_data(struct spi_nor *nor, loff_t from, size_t len, @@ -418,6 +451,11 @@ ssize_t spi_nor_write_data(struct spi_nor *nor, loff_t to, size_t len, int spi_nor_hwcaps_read2cmd(u32 hwcaps); u8 spi_nor_convert_3to4_read(u8 opcode); +void spi_nor_set_read_settings(struct spi_nor_read_command *read, + u8 num_mode_clocks, + u8 num_wait_states, + u8 opcode, + enum spi_nor_protocol proto); void spi_nor_set_pp_settings(struct spi_nor_pp_command *pp, u8 opcode, enum spi_nor_protocol proto); diff --git a/drivers/mtd/spi-nor/esmt.c b/drivers/mtd/spi-nor/esmt.c index c93170008118..cfc9218c1053 100644 --- a/drivers/mtd/spi-nor/esmt.c +++ b/drivers/mtd/spi-nor/esmt.c @@ -11,7 +11,7 @@ static const struct flash_info esmt_parts[] = { /* ESMT */ { "f25l32pa", INFO(0x8c2016, 0, 64 * 1024, 64, - SECT_4K | SPI_NOR_HAS_LOCK) }, + SECT_4K | SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) }, { "f25l32qa", INFO(0x8c4116, 0, 64 * 1024, 64, SECT_4K | SPI_NOR_HAS_LOCK) }, { "f25l64qa", INFO(0x8c4117, 0, 64 * 1024, 128, diff --git a/drivers/mtd/spi-nor/intel.c b/drivers/mtd/spi-nor/intel.c index d8196f101368..8ece9cceb3cf 100644 --- a/drivers/mtd/spi-nor/intel.c +++ b/drivers/mtd/spi-nor/intel.c @@ -10,23 +10,16 @@ static const struct flash_info intel_parts[] = { /* Intel/Numonyx -- xxxs33b */ - { "160s33b", INFO(0x898911, 0, 64 * 1024, 32, 0) }, - { "320s33b", INFO(0x898912, 0, 64 * 1024, 64, 0) }, - { "640s33b", INFO(0x898913, 0, 64 * 1024, 128, 0) }, -}; - -static void intel_default_init(struct spi_nor *nor) -{ - nor->flags |= SNOR_F_HAS_LOCK; -} - -static const struct spi_nor_fixups intel_fixups = { - .default_init = intel_default_init, + { "160s33b", INFO(0x898911, 0, 64 * 1024, 32, + SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) }, + { "320s33b", INFO(0x898912, 0, 64 * 1024, 64, + SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) }, + { "640s33b", INFO(0x898913, 0, 64 * 1024, 128, + SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) }, }; const struct spi_nor_manufacturer spi_nor_intel = { .name = "intel", .parts = intel_parts, .nparts = ARRAY_SIZE(intel_parts), - .fixups = &intel_fixups, }; diff --git a/drivers/mtd/spi-nor/micron-st.c b/drivers/mtd/spi-nor/micron-st.c index ef3695080710..c224e59820a1 100644 --- a/drivers/mtd/spi-nor/micron-st.c +++ b/drivers/mtd/spi-nor/micron-st.c @@ -8,10 +8,123 @@ #include "core.h" +#define SPINOR_OP_MT_DTR_RD 0xfd /* Fast Read opcode in DTR mode */ +#define SPINOR_OP_MT_RD_ANY_REG 0x85 /* Read volatile register */ +#define SPINOR_OP_MT_WR_ANY_REG 0x81 /* Write volatile register */ +#define SPINOR_REG_MT_CFR0V 0x00 /* For setting octal DTR mode */ +#define SPINOR_REG_MT_CFR1V 0x01 /* For setting dummy cycles */ +#define SPINOR_MT_OCT_DTR 0xe7 /* Enable Octal DTR. */ +#define SPINOR_MT_EXSPI 0xff /* Enable Extended SPI (default) */ + +static int spi_nor_micron_octal_dtr_enable(struct spi_nor *nor, bool enable) +{ + struct spi_mem_op op; + u8 *buf = nor->bouncebuf; + int ret; + + if (enable) { + /* Use 20 dummy cycles for memory array reads. */ + ret = spi_nor_write_enable(nor); + if (ret) + return ret; + + *buf = 20; + op = (struct spi_mem_op) + SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_MT_WR_ANY_REG, 1), + SPI_MEM_OP_ADDR(3, SPINOR_REG_MT_CFR1V, 1), + SPI_MEM_OP_NO_DUMMY, + SPI_MEM_OP_DATA_OUT(1, buf, 1)); + + ret = spi_mem_exec_op(nor->spimem, &op); + if (ret) + return ret; + + ret = spi_nor_wait_till_ready(nor); + if (ret) + return ret; + } + + ret = spi_nor_write_enable(nor); + if (ret) + return ret; + + if (enable) + *buf = SPINOR_MT_OCT_DTR; + else + *buf = SPINOR_MT_EXSPI; + + op = (struct spi_mem_op) + SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_MT_WR_ANY_REG, 1), + SPI_MEM_OP_ADDR(enable ? 3 : 4, + SPINOR_REG_MT_CFR0V, 1), + SPI_MEM_OP_NO_DUMMY, + SPI_MEM_OP_DATA_OUT(1, buf, 1)); + + if (!enable) + spi_nor_spimem_setup_op(nor, &op, SNOR_PROTO_8_8_8_DTR); + + ret = spi_mem_exec_op(nor->spimem, &op); + if (ret) + return ret; + + /* Read flash ID to make sure the switch was successful. */ + op = (struct spi_mem_op) + SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDID, 1), + SPI_MEM_OP_NO_ADDR, + SPI_MEM_OP_DUMMY(enable ? 8 : 0, 1), + SPI_MEM_OP_DATA_IN(round_up(nor->info->id_len, 2), + buf, 1)); + + if (enable) + spi_nor_spimem_setup_op(nor, &op, SNOR_PROTO_8_8_8_DTR); + + ret = spi_mem_exec_op(nor->spimem, &op); + if (ret) + return ret; + + if (memcmp(buf, nor->info->id, nor->info->id_len)) + return -EINVAL; + + return 0; +} + +static void mt35xu512aba_default_init(struct spi_nor *nor) +{ + nor->params->octal_dtr_enable = spi_nor_micron_octal_dtr_enable; +} + +static void mt35xu512aba_post_sfdp_fixup(struct spi_nor *nor) +{ + /* Set the Fast Read settings. */ + nor->params->hwcaps.mask |= SNOR_HWCAPS_READ_8_8_8_DTR; + spi_nor_set_read_settings(&nor->params->reads[SNOR_CMD_READ_8_8_8_DTR], + 0, 20, SPINOR_OP_MT_DTR_RD, + SNOR_PROTO_8_8_8_DTR); + + nor->cmd_ext_type = SPI_NOR_EXT_REPEAT; + nor->params->rdsr_dummy = 8; + nor->params->rdsr_addr_nbytes = 0; + + /* + * The BFPT quad enable field is set to a reserved value so the quad + * enable function is ignored by spi_nor_parse_bfpt(). Make sure we + * disable it. + */ + nor->params->quad_enable = NULL; +} + +static struct spi_nor_fixups mt35xu512aba_fixups = { + .default_init = mt35xu512aba_default_init, + .post_sfdp = mt35xu512aba_post_sfdp_fixup, +}; + static const struct flash_info micron_parts[] = { { "mt35xu512aba", INFO(0x2c5b1a, 0, 128 * 1024, 512, SECT_4K | USE_FSR | SPI_NOR_OCTAL_READ | - SPI_NOR_4B_OPCODES) }, + SPI_NOR_4B_OPCODES | SPI_NOR_OCTAL_DTR_READ | + SPI_NOR_OCTAL_DTR_PP | + SPI_NOR_IO_MODE_EN_VOLATILE) + .fixups = &mt35xu512aba_fixups}, { "mt35xu02g", INFO(0x2c5b1c, 0, 128 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_OCTAL_READ | SPI_NOR_4B_OPCODES) }, diff --git a/drivers/mtd/spi-nor/sfdp.c b/drivers/mtd/spi-nor/sfdp.c index e2a43d39eb5f..6ee7719e5903 100644 --- a/drivers/mtd/spi-nor/sfdp.c +++ b/drivers/mtd/spi-nor/sfdp.c @@ -4,6 +4,7 @@ * Copyright (C) 2014, Freescale Semiconductor, Inc. */ +#include <linux/bitfield.h> #include <linux/slab.h> #include <linux/sort.h> #include <linux/mtd/spi-nor.h> @@ -19,6 +20,11 @@ #define SFDP_BFPT_ID 0xff00 /* Basic Flash Parameter Table */ #define SFDP_SECTOR_MAP_ID 0xff81 /* Sector Map Table */ #define SFDP_4BAIT_ID 0xff84 /* 4-byte Address Instruction Table */ +#define SFDP_PROFILE1_ID 0xff05 /* xSPI Profile 1.0 table. */ +#define SFDP_SCCR_MAP_ID 0xff87 /* + * Status, Control and Configuration + * Register Map. + */ #define SFDP_SIGNATURE 0x50444653U @@ -59,7 +65,7 @@ struct sfdp_bfpt_read { struct sfdp_bfpt_erase { /* - * The half-word at offset <shift> in DWORD <dwoard> encodes the + * The half-word at offset <shift> in DWORD <dword> encodes the * op code and erase sector size to be used by Sector Erase commands. */ u32 dword; @@ -602,10 +608,32 @@ static int spi_nor_parse_bfpt(struct spi_nor *nor, break; } + /* Soft Reset support. */ + if (bfpt.dwords[BFPT_DWORD(16)] & BFPT_DWORD16_SWRST_EN_RST) + nor->flags |= SNOR_F_SOFT_RESET; + /* Stop here if not JESD216 rev C or later. */ if (bfpt_header->length == BFPT_DWORD_MAX_JESD216B) return spi_nor_post_bfpt_fixups(nor, bfpt_header, &bfpt, params); + /* 8D-8D-8D command extension. */ + switch (bfpt.dwords[BFPT_DWORD(18)] & BFPT_DWORD18_CMD_EXT_MASK) { + case BFPT_DWORD18_CMD_EXT_REP: + nor->cmd_ext_type = SPI_NOR_EXT_REPEAT; + break; + + case BFPT_DWORD18_CMD_EXT_INV: + nor->cmd_ext_type = SPI_NOR_EXT_INVERT; + break; + + case BFPT_DWORD18_CMD_EXT_RES: + dev_dbg(nor->dev, "Reserved command extension used\n"); + break; + + case BFPT_DWORD18_CMD_EXT_16B: + dev_dbg(nor->dev, "16-bit opcodes not supported\n"); + return -EOPNOTSUPP; + } return spi_nor_post_bfpt_fixups(nor, bfpt_header, &bfpt, params); } @@ -1047,9 +1075,16 @@ static int spi_nor_parse_4bait(struct spi_nor *nor, } /* 4BAIT is the only SFDP table that indicates page program support. */ - if (pp_hwcaps & SNOR_HWCAPS_PP) + if (pp_hwcaps & SNOR_HWCAPS_PP) { spi_nor_set_pp_settings(¶ms_pp[SNOR_CMD_PP], SPINOR_OP_PP_4B, SNOR_PROTO_1_1_1); + /* + * Since xSPI Page Program opcode is backward compatible with + * Legacy SPI, use Legacy SPI opcode there as well. + */ + spi_nor_set_pp_settings(¶ms_pp[SNOR_CMD_PP_8_8_8_DTR], + SPINOR_OP_PP_4B, SNOR_PROTO_8_8_8_DTR); + } if (pp_hwcaps & SNOR_HWCAPS_PP_1_1_4) spi_nor_set_pp_settings(¶ms_pp[SNOR_CMD_PP_1_1_4], SPINOR_OP_PP_1_1_4_4B, @@ -1083,6 +1118,131 @@ out: return ret; } +#define PROFILE1_DWORD1_RDSR_ADDR_BYTES BIT(29) +#define PROFILE1_DWORD1_RDSR_DUMMY BIT(28) +#define PROFILE1_DWORD1_RD_FAST_CMD GENMASK(15, 8) +#define PROFILE1_DWORD4_DUMMY_200MHZ GENMASK(11, 7) +#define PROFILE1_DWORD5_DUMMY_166MHZ GENMASK(31, 27) +#define PROFILE1_DWORD5_DUMMY_133MHZ GENMASK(21, 17) +#define PROFILE1_DWORD5_DUMMY_100MHZ GENMASK(11, 7) + +/** + * spi_nor_parse_profile1() - parse the xSPI Profile 1.0 table + * @nor: pointer to a 'struct spi_nor' + * @profile1_header: pointer to the 'struct sfdp_parameter_header' describing + * the Profile 1.0 Table length and version. + * @params: pointer to the 'struct spi_nor_flash_parameter' to be. + * + * Return: 0 on success, -errno otherwise. + */ +static int spi_nor_parse_profile1(struct spi_nor *nor, + const struct sfdp_parameter_header *profile1_header, + struct spi_nor_flash_parameter *params) +{ + u32 *dwords, addr; + size_t len; + int ret; + u8 dummy, opcode; + + len = profile1_header->length * sizeof(*dwords); + dwords = kmalloc(len, GFP_KERNEL); + if (!dwords) + return -ENOMEM; + + addr = SFDP_PARAM_HEADER_PTP(profile1_header); + ret = spi_nor_read_sfdp(nor, addr, len, dwords); + if (ret) + goto out; + + le32_to_cpu_array(dwords, profile1_header->length); + + /* Get 8D-8D-8D fast read opcode and dummy cycles. */ + opcode = FIELD_GET(PROFILE1_DWORD1_RD_FAST_CMD, dwords[0]); + + /* Set the Read Status Register dummy cycles and dummy address bytes. */ + if (dwords[0] & PROFILE1_DWORD1_RDSR_DUMMY) + params->rdsr_dummy = 8; + else + params->rdsr_dummy = 4; + + if (dwords[0] & PROFILE1_DWORD1_RDSR_ADDR_BYTES) + params->rdsr_addr_nbytes = 4; + else + params->rdsr_addr_nbytes = 0; + + /* + * We don't know what speed the controller is running at. Find the + * dummy cycles for the fastest frequency the flash can run at to be + * sure we are never short of dummy cycles. A value of 0 means the + * frequency is not supported. + * + * Default to PROFILE1_DUMMY_DEFAULT if we don't find anything, and let + * flashes set the correct value if needed in their fixup hooks. + */ + dummy = FIELD_GET(PROFILE1_DWORD4_DUMMY_200MHZ, dwords[3]); + if (!dummy) + dummy = FIELD_GET(PROFILE1_DWORD5_DUMMY_166MHZ, dwords[4]); + if (!dummy) + dummy = FIELD_GET(PROFILE1_DWORD5_DUMMY_133MHZ, dwords[4]); + if (!dummy) + dummy = FIELD_GET(PROFILE1_DWORD5_DUMMY_100MHZ, dwords[4]); + if (!dummy) + dev_dbg(nor->dev, + "Can't find dummy cycles from Profile 1.0 table\n"); + + /* Round up to an even value to avoid tripping controllers up. */ + dummy = round_up(dummy, 2); + + /* Update the fast read settings. */ + spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_8_8_8_DTR], + 0, dummy, opcode, + SNOR_PROTO_8_8_8_DTR); + +out: + kfree(dwords); + return ret; +} + +#define SCCR_DWORD22_OCTAL_DTR_EN_VOLATILE BIT(31) + +/** + * spi_nor_parse_sccr() - Parse the Status, Control and Configuration Register + * Map. + * @nor: pointer to a 'struct spi_nor' + * @sccr_header: pointer to the 'struct sfdp_parameter_header' describing + * the SCCR Map table length and version. + * @params: pointer to the 'struct spi_nor_flash_parameter' to be. + * + * Return: 0 on success, -errno otherwise. + */ +static int spi_nor_parse_sccr(struct spi_nor *nor, + const struct sfdp_parameter_header *sccr_header, + struct spi_nor_flash_parameter *params) +{ + u32 *dwords, addr; + size_t len; + int ret; + + len = sccr_header->length * sizeof(*dwords); + dwords = kmalloc(len, GFP_KERNEL); + if (!dwords) + return -ENOMEM; + + addr = SFDP_PARAM_HEADER_PTP(sccr_header); + ret = spi_nor_read_sfdp(nor, addr, len, dwords); + if (ret) + goto out; + + le32_to_cpu_array(dwords, sccr_header->length); + + if (FIELD_GET(SCCR_DWORD22_OCTAL_DTR_EN_VOLATILE, dwords[22])) + nor->flags |= SNOR_F_IO_MODE_EN_VOLATILE; + +out: + kfree(dwords); + return ret; +} + /** * spi_nor_parse_sfdp() - parse the Serial Flash Discoverable Parameters. * @nor: pointer to a 'struct spi_nor' @@ -1184,6 +1344,14 @@ int spi_nor_parse_sfdp(struct spi_nor *nor, err = spi_nor_parse_4bait(nor, param_header, params); break; + case SFDP_PROFILE1_ID: + err = spi_nor_parse_profile1(nor, param_header, params); + break; + + case SFDP_SCCR_MAP_ID: + err = spi_nor_parse_sccr(nor, param_header, params); + break; + default: break; } diff --git a/drivers/mtd/spi-nor/sfdp.h b/drivers/mtd/spi-nor/sfdp.h index 7f9846b3a1ad..89152ae1cf3e 100644 --- a/drivers/mtd/spi-nor/sfdp.h +++ b/drivers/mtd/spi-nor/sfdp.h @@ -90,6 +90,14 @@ struct sfdp_bfpt { #define BFPT_DWORD15_QER_SR2_BIT1_NO_RD (0x4UL << 20) #define BFPT_DWORD15_QER_SR2_BIT1 (0x5UL << 20) /* Spansion */ +#define BFPT_DWORD16_SWRST_EN_RST BIT(12) + +#define BFPT_DWORD18_CMD_EXT_MASK GENMASK(30, 29) +#define BFPT_DWORD18_CMD_EXT_REP (0x0UL << 29) /* Repeat */ +#define BFPT_DWORD18_CMD_EXT_INV (0x1UL << 29) /* Invert */ +#define BFPT_DWORD18_CMD_EXT_RES (0x2UL << 29) /* Reserved */ +#define BFPT_DWORD18_CMD_EXT_16B (0x3UL << 29) /* 16-bit opcode */ + struct sfdp_parameter_header { u8 id_lsb; u8 minor; diff --git a/drivers/mtd/spi-nor/spansion.c b/drivers/mtd/spi-nor/spansion.c index 8429b4af999a..b0c5521c1e27 100644 --- a/drivers/mtd/spi-nor/spansion.c +++ b/drivers/mtd/spi-nor/spansion.c @@ -8,6 +8,173 @@ #include "core.h" +#define SPINOR_OP_RD_ANY_REG 0x65 /* Read any register */ +#define SPINOR_OP_WR_ANY_REG 0x71 /* Write any register */ +#define SPINOR_REG_CYPRESS_CFR2V 0x00800003 +#define SPINOR_REG_CYPRESS_CFR2V_MEMLAT_11_24 0xb +#define SPINOR_REG_CYPRESS_CFR3V 0x00800004 +#define SPINOR_REG_CYPRESS_CFR3V_PGSZ BIT(4) /* Page size. */ +#define SPINOR_REG_CYPRESS_CFR5V 0x00800006 +#define SPINOR_REG_CYPRESS_CFR5V_OCT_DTR_EN 0x3 +#define SPINOR_REG_CYPRESS_CFR5V_OCT_DTR_DS 0 +#define SPINOR_OP_CYPRESS_RD_FAST 0xee + +/** + * spi_nor_cypress_octal_dtr_enable() - Enable octal DTR on Cypress flashes. + * @nor: pointer to a 'struct spi_nor' + * @enable: whether to enable or disable Octal DTR + * + * This also sets the memory access latency cycles to 24 to allow the flash to + * run at up to 200MHz. + * + * Return: 0 on success, -errno otherwise. + */ +static int spi_nor_cypress_octal_dtr_enable(struct spi_nor *nor, bool enable) +{ + struct spi_mem_op op; + u8 *buf = nor->bouncebuf; + int ret; + + if (enable) { + /* Use 24 dummy cycles for memory array reads. */ + ret = spi_nor_write_enable(nor); + if (ret) + return ret; + + *buf = SPINOR_REG_CYPRESS_CFR2V_MEMLAT_11_24; + op = (struct spi_mem_op) + SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WR_ANY_REG, 1), + SPI_MEM_OP_ADDR(3, SPINOR_REG_CYPRESS_CFR2V, + 1), + SPI_MEM_OP_NO_DUMMY, + SPI_MEM_OP_DATA_OUT(1, buf, 1)); + + ret = spi_mem_exec_op(nor->spimem, &op); + if (ret) + return ret; + + ret = spi_nor_wait_till_ready(nor); + if (ret) + return ret; + + nor->read_dummy = 24; + } + + /* Set/unset the octal and DTR enable bits. */ + ret = spi_nor_write_enable(nor); + if (ret) + return ret; + + if (enable) + *buf = SPINOR_REG_CYPRESS_CFR5V_OCT_DTR_EN; + else + *buf = SPINOR_REG_CYPRESS_CFR5V_OCT_DTR_DS; + + op = (struct spi_mem_op) + SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WR_ANY_REG, 1), + SPI_MEM_OP_ADDR(enable ? 3 : 4, + SPINOR_REG_CYPRESS_CFR5V, + 1), + SPI_MEM_OP_NO_DUMMY, + SPI_MEM_OP_DATA_OUT(1, buf, 1)); + + if (!enable) + spi_nor_spimem_setup_op(nor, &op, SNOR_PROTO_8_8_8_DTR); + + ret = spi_mem_exec_op(nor->spimem, &op); + if (ret) + return ret; + + /* Read flash ID to make sure the switch was successful. */ + op = (struct spi_mem_op) + SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDID, 1), + SPI_MEM_OP_ADDR(enable ? 4 : 0, 0, 1), + SPI_MEM_OP_DUMMY(enable ? 3 : 0, 1), + SPI_MEM_OP_DATA_IN(round_up(nor->info->id_len, 2), + buf, 1)); + + if (enable) + spi_nor_spimem_setup_op(nor, &op, SNOR_PROTO_8_8_8_DTR); + + ret = spi_mem_exec_op(nor->spimem, &op); + if (ret) + return ret; + + if (memcmp(buf, nor->info->id, nor->info->id_len)) + return -EINVAL; + + return 0; +} + +static void s28hs512t_default_init(struct spi_nor *nor) +{ + nor->params->octal_dtr_enable = spi_nor_cypress_octal_dtr_enable; + nor->params->writesize = 16; +} + +static void s28hs512t_post_sfdp_fixup(struct spi_nor *nor) +{ + /* + * On older versions of the flash the xSPI Profile 1.0 table has the + * 8D-8D-8D Fast Read opcode as 0x00. But it actually should be 0xEE. + */ + if (nor->params->reads[SNOR_CMD_READ_8_8_8_DTR].opcode == 0) + nor->params->reads[SNOR_CMD_READ_8_8_8_DTR].opcode = + SPINOR_OP_CYPRESS_RD_FAST; + + /* This flash is also missing the 4-byte Page Program opcode bit. */ + spi_nor_set_pp_settings(&nor->params->page_programs[SNOR_CMD_PP], + SPINOR_OP_PP_4B, SNOR_PROTO_1_1_1); + /* + * Since xSPI Page Program opcode is backward compatible with + * Legacy SPI, use Legacy SPI opcode there as well. + */ + spi_nor_set_pp_settings(&nor->params->page_programs[SNOR_CMD_PP_8_8_8_DTR], + SPINOR_OP_PP_4B, SNOR_PROTO_8_8_8_DTR); + + /* + * The xSPI Profile 1.0 table advertises the number of additional + * address bytes needed for Read Status Register command as 0 but the + * actual value for that is 4. + */ + nor->params->rdsr_addr_nbytes = 4; +} + +static int s28hs512t_post_bfpt_fixup(struct spi_nor *nor, + const struct sfdp_parameter_header *bfpt_header, + const struct sfdp_bfpt *bfpt, + struct spi_nor_flash_parameter *params) +{ + /* + * The BFPT table advertises a 512B page size but the page size is + * actually configurable (with the default being 256B). Read from + * CFR3V[4] and set the correct size. + */ + struct spi_mem_op op = + SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RD_ANY_REG, 1), + SPI_MEM_OP_ADDR(3, SPINOR_REG_CYPRESS_CFR3V, 1), + SPI_MEM_OP_NO_DUMMY, + SPI_MEM_OP_DATA_IN(1, nor->bouncebuf, 1)); + int ret; + + ret = spi_mem_exec_op(nor->spimem, &op); + if (ret) + return ret; + + if (nor->bouncebuf[0] & SPINOR_REG_CYPRESS_CFR3V_PGSZ) + params->page_size = 512; + else + params->page_size = 256; + + return 0; +} + +static struct spi_nor_fixups s28hs512t_fixups = { + .default_init = s28hs512t_default_init, + .post_sfdp = s28hs512t_post_sfdp_fixup, + .post_bfpt = s28hs512t_post_bfpt_fixup, +}; + static int s25fs_s_post_bfpt_fixups(struct spi_nor *nor, const struct sfdp_parameter_header *bfpt_header, @@ -104,6 +271,11 @@ static const struct flash_info spansion_parts[] = { SPI_NOR_4B_OPCODES) }, { "cy15x104q", INFO6(0x042cc2, 0x7f7f7f, 512 * 1024, 1, SPI_NOR_NO_ERASE) }, + { "s28hs512t", INFO(0x345b1a, 0, 256 * 1024, 256, + SECT_4K | SPI_NOR_OCTAL_DTR_READ | + SPI_NOR_OCTAL_DTR_PP) + .fixups = &s28hs512t_fixups, + }, }; static void spansion_post_sfdp_fixups(struct spi_nor *nor) diff --git a/drivers/mtd/spi-nor/sst.c b/drivers/mtd/spi-nor/sst.c index e0af6d25d573..00e48da0744a 100644 --- a/drivers/mtd/spi-nor/sst.c +++ b/drivers/mtd/spi-nor/sst.c @@ -11,26 +11,28 @@ static const struct flash_info sst_parts[] = { /* SST -- large erase sizes are "overlays", "sectors" are 4K */ { "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024, 8, - SECT_4K | SST_WRITE) }, + SECT_4K | SST_WRITE | SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) }, { "sst25vf080b", INFO(0xbf258e, 0, 64 * 1024, 16, - SECT_4K | SST_WRITE) }, + SECT_4K | SST_WRITE | SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) }, { "sst25vf016b", INFO(0xbf2541, 0, 64 * 1024, 32, - SECT_4K | SST_WRITE) }, + SECT_4K | SST_WRITE | SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) }, { "sst25vf032b", INFO(0xbf254a, 0, 64 * 1024, 64, - SECT_4K | SST_WRITE) }, - { "sst25vf064c", INFO(0xbf254b, 0, 64 * 1024, 128, SECT_4K) }, + SECT_4K | SST_WRITE | SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) }, + { "sst25vf064c", INFO(0xbf254b, 0, 64 * 1024, 128, + SECT_4K | SPI_NOR_4BIT_BP | SPI_NOR_HAS_LOCK | + SPI_NOR_SWP_IS_VOLATILE) }, { "sst25wf512", INFO(0xbf2501, 0, 64 * 1024, 1, - SECT_4K | SST_WRITE) }, + SECT_4K | SST_WRITE | SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) }, { "sst25wf010", INFO(0xbf2502, 0, 64 * 1024, 2, - SECT_4K | SST_WRITE) }, + SECT_4K | SST_WRITE | SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) }, { "sst25wf020", INFO(0xbf2503, 0, 64 * 1024, 4, - SECT_4K | SST_WRITE) }, - { "sst25wf020a", INFO(0x621612, 0, 64 * 1024, 4, SECT_4K) }, - { "sst25wf040b", INFO(0x621613, 0, 64 * 1024, 8, SECT_4K) }, + SECT_4K | SST_WRITE | SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) }, + { "sst25wf020a", INFO(0x621612, 0, 64 * 1024, 4, SECT_4K | SPI_NOR_HAS_LOCK) }, + { "sst25wf040b", INFO(0x621613, 0, 64 * 1024, 8, SECT_4K | SPI_NOR_HAS_LOCK) }, { "sst25wf040", INFO(0xbf2504, 0, 64 * 1024, 8, - SECT_4K | SST_WRITE) }, + SECT_4K | SST_WRITE | SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) }, { "sst25wf080", INFO(0xbf2505, 0, 64 * 1024, 16, - SECT_4K | SST_WRITE) }, + SECT_4K | SST_WRITE | SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) }, { "sst26wf016b", INFO(0xbf2651, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, @@ -127,11 +129,6 @@ out: return ret; } -static void sst_default_init(struct spi_nor *nor) -{ - nor->flags |= SNOR_F_HAS_LOCK; -} - static void sst_post_sfdp_fixups(struct spi_nor *nor) { if (nor->info->flags & SST_WRITE) @@ -139,7 +136,6 @@ static void sst_post_sfdp_fixups(struct spi_nor *nor) } static const struct spi_nor_fixups sst_fixups = { - .default_init = sst_default_init, .post_sfdp = sst_post_sfdp_fixups, }; diff --git a/drivers/mtd/tests/mtd_nandecctest.c b/drivers/mtd/tests/mtd_nandecctest.c index 13bca9ea0cae..c4f271314f52 100644 --- a/drivers/mtd/tests/mtd_nandecctest.c +++ b/drivers/mtd/tests/mtd_nandecctest.c @@ -8,7 +8,7 @@ #include <linux/string.h> #include <linux/bitops.h> #include <linux/slab.h> -#include <linux/mtd/nand_ecc.h> +#include <linux/mtd/nand-ecc-sw-hamming.h> #include "mtd_test.h" @@ -119,13 +119,13 @@ static void no_bit_error(void *error_data, void *error_ecc, static int no_bit_error_verify(void *error_data, void *error_ecc, void *correct_data, const size_t size) { + bool sm_order = IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC); unsigned char calc_ecc[3]; int ret; - __nand_calculate_ecc(error_data, size, calc_ecc, - IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC)); - ret = __nand_correct_data(error_data, error_ecc, calc_ecc, size, - IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC)); + ecc_sw_hamming_calculate(error_data, size, calc_ecc, sm_order); + ret = ecc_sw_hamming_correct(error_data, error_ecc, calc_ecc, size, + sm_order); if (ret == 0 && !memcmp(correct_data, error_data, size)) return 0; @@ -149,13 +149,13 @@ static void single_bit_error_in_ecc(void *error_data, void *error_ecc, static int single_bit_error_correct(void *error_data, void *error_ecc, void *correct_data, const size_t size) { + bool sm_order = IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC); unsigned char calc_ecc[3]; int ret; - __nand_calculate_ecc(error_data, size, calc_ecc, - IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC)); - ret = __nand_correct_data(error_data, error_ecc, calc_ecc, size, - IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC)); + ecc_sw_hamming_calculate(error_data, size, calc_ecc, sm_order); + ret = ecc_sw_hamming_correct(error_data, error_ecc, calc_ecc, size, + sm_order); if (ret == 1 && !memcmp(correct_data, error_data, size)) return 0; @@ -186,13 +186,13 @@ static void double_bit_error_in_ecc(void *error_data, void *error_ecc, static int double_bit_error_detect(void *error_data, void *error_ecc, void *correct_data, const size_t size) { + bool sm_order = IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC); unsigned char calc_ecc[3]; int ret; - __nand_calculate_ecc(error_data, size, calc_ecc, - IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC)); - ret = __nand_correct_data(error_data, error_ecc, calc_ecc, size, - IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC)); + ecc_sw_hamming_calculate(error_data, size, calc_ecc, sm_order); + ret = ecc_sw_hamming_correct(error_data, error_ecc, calc_ecc, size, + sm_order); return (ret == -EBADMSG) ? 0 : -EINVAL; } @@ -248,6 +248,7 @@ static void dump_data_ecc(void *error_data, void *error_ecc, void *correct_data, static int nand_ecc_test_run(const size_t size) { + bool sm_order = IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC); int i; int err = 0; void *error_data; @@ -266,9 +267,7 @@ static int nand_ecc_test_run(const size_t size) } prandom_bytes(correct_data, size); - __nand_calculate_ecc(correct_data, size, correct_ecc, - IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC)); - + ecc_sw_hamming_calculate(correct_data, size, correct_ecc, sm_order); for (i = 0; i < ARRAY_SIZE(nand_ecc_test); i++) { nand_ecc_test[i].prepare(error_data, error_ecc, correct_data, correct_ecc, size); diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c index e85b04e9716b..40fa994ad6a8 100644 --- a/drivers/mtd/ubi/build.c +++ b/drivers/mtd/ubi/build.c @@ -50,6 +50,7 @@ * struct mtd_dev_param - MTD device parameter description data structure. * @name: MTD character device node path, MTD device name, or MTD device number * string + * @ubi_num: UBI number * @vid_hdr_offs: VID header offset * @max_beb_per1024: maximum expected number of bad PEBs per 1024 PEBs */ diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c index 0edecfdbd01f..892494c8cb7c 100644 --- a/drivers/mtd/ubi/eba.c +++ b/drivers/mtd/ubi/eba.c @@ -1290,7 +1290,7 @@ static int is_error_sane(int err) * @ubi: UBI device description object * @from: physical eraseblock number from where to copy * @to: physical eraseblock number where to copy - * @vid_hdr: VID header of the @from physical eraseblock + * @vidb: data structure from where the VID header is derived * * This function copies logical eraseblock from physical eraseblock @from to * physical eraseblock @to. The @vid_hdr buffer may be changed by this @@ -1463,6 +1463,7 @@ out_unlock_leb: /** * print_rsvd_warning - warn about not having enough reserved PEBs. * @ubi: UBI device description object + * @ai: UBI attach info object * * This is a helper function for 'ubi_eba_init()' which is called when UBI * cannot reserve enough PEBs for bad block handling. This function makes a diff --git a/drivers/mtd/ubi/gluebi.c b/drivers/mtd/ubi/gluebi.c index cc547b37cace..1b980d15d9fb 100644 --- a/drivers/mtd/ubi/gluebi.c +++ b/drivers/mtd/ubi/gluebi.c @@ -439,7 +439,7 @@ static int gluebi_resized(struct ubi_volume_info *vi) * gluebi_notify - UBI notification handler. * @nb: registered notifier block * @l: notification type - * @ptr: pointer to the &struct ubi_notification object + * @ns_ptr: pointer to the &struct ubi_notification object */ static int gluebi_notify(struct notifier_block *nb, unsigned long l, void *ns_ptr) diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c index 9718f5aaaf69..0fce99ff29b5 100644 --- a/drivers/mtd/ubi/kapi.c +++ b/drivers/mtd/ubi/kapi.c @@ -450,7 +450,7 @@ EXPORT_SYMBOL_GPL(ubi_leb_read); * ubi_leb_read_sg - read data into a scatter gather list. * @desc: volume descriptor * @lnum: logical eraseblock number to read from - * @buf: buffer where to store the read data + * @sgl: UBI scatter gather list to store the read data * @offset: offset within the logical eraseblock to read from * @len: how many bytes to read * @check: whether UBI has to check the read data's CRC or not. diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c index 7847de75a74c..8455f1d47f3c 100644 --- a/drivers/mtd/ubi/wl.c +++ b/drivers/mtd/ubi/wl.c @@ -575,6 +575,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, * @vol_id: the volume ID that last used this PEB * @lnum: the last used logical eraseblock number for the PEB * @torture: if the physical eraseblock has to be tortured + * @nested: denotes whether the work_sem is already held in read mode * * This function returns zero in case of success and a %-ENOMEM in case of * failure. @@ -1063,8 +1064,6 @@ out_unlock: * __erase_worker - physical eraseblock erase worker function. * @ubi: UBI device description object * @wl_wrk: the work object - * @shutdown: non-zero if the worker has to free memory and exit - * because the WL sub-system is shutting down * * This function erases a physical eraseblock and perform torture testing if * needed. It also takes care about marking the physical eraseblock bad if diff --git a/include/linux/mtd/nand-ecc-sw-bch.h b/include/linux/mtd/nand-ecc-sw-bch.h new file mode 100644 index 000000000000..22c92073b3dd --- /dev/null +++ b/include/linux/mtd/nand-ecc-sw-bch.h @@ -0,0 +1,73 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright © 2011 Ivan Djelic <ivan.djelic@parrot.com> + * + * This file is the header for the NAND BCH ECC implementation. + */ + +#ifndef __MTD_NAND_ECC_SW_BCH_H__ +#define __MTD_NAND_ECC_SW_BCH_H__ + +#include <linux/mtd/nand.h> +#include <linux/bch.h> + +/** + * struct nand_ecc_sw_bch_conf - private software BCH ECC engine structure + * @req_ctx: Save request context and tweak the original request to fit the + * engine needs + * @code_size: Number of bytes needed to store a code (one code per step) + * @nsteps: Number of steps + * @calc_buf: Buffer to use when calculating ECC bytes + * @code_buf: Buffer to use when reading (raw) ECC bytes from the chip + * @bch: BCH control structure + * @errloc: error location array + * @eccmask: XOR ecc mask, allows erased pages to be decoded as valid + */ +struct nand_ecc_sw_bch_conf { + struct nand_ecc_req_tweak_ctx req_ctx; + unsigned int code_size; + unsigned int nsteps; + u8 *calc_buf; + u8 *code_buf; + struct bch_control *bch; + unsigned int *errloc; + unsigned char *eccmask; +}; + +#if IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_BCH) + +int nand_ecc_sw_bch_calculate(struct nand_device *nand, + const unsigned char *buf, unsigned char *code); +int nand_ecc_sw_bch_correct(struct nand_device *nand, unsigned char *buf, + unsigned char *read_ecc, unsigned char *calc_ecc); +int nand_ecc_sw_bch_init_ctx(struct nand_device *nand); +void nand_ecc_sw_bch_cleanup_ctx(struct nand_device *nand); +struct nand_ecc_engine *nand_ecc_sw_bch_get_engine(void); + +#else /* !CONFIG_MTD_NAND_ECC_SW_BCH */ + +static inline int nand_ecc_sw_bch_calculate(struct nand_device *nand, + const unsigned char *buf, + unsigned char *code) +{ + return -ENOTSUPP; +} + +static inline int nand_ecc_sw_bch_correct(struct nand_device *nand, + unsigned char *buf, + unsigned char *read_ecc, + unsigned char *calc_ecc) +{ + return -ENOTSUPP; +} + +static inline int nand_ecc_sw_bch_init_ctx(struct nand_device *nand) +{ + return -ENOTSUPP; +} + +static inline void nand_ecc_sw_bch_cleanup_ctx(struct nand_device *nand) {} + +#endif /* CONFIG_MTD_NAND_ECC_SW_BCH */ + +#endif /* __MTD_NAND_ECC_SW_BCH_H__ */ diff --git a/include/linux/mtd/nand-ecc-sw-hamming.h b/include/linux/mtd/nand-ecc-sw-hamming.h new file mode 100644 index 000000000000..9f9073d86ff3 --- /dev/null +++ b/include/linux/mtd/nand-ecc-sw-hamming.h @@ -0,0 +1,91 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2000-2010 Steven J. Hill <sjhill@realitydiluted.com> + * David Woodhouse <dwmw2@infradead.org> + * Thomas Gleixner <tglx@linutronix.de> + * + * This file is the header for the NAND Hamming ECC implementation. + */ + +#ifndef __MTD_NAND_ECC_SW_HAMMING_H__ +#define __MTD_NAND_ECC_SW_HAMMING_H__ + +#include <linux/mtd/nand.h> + +/** + * struct nand_ecc_sw_hamming_conf - private software Hamming ECC engine structure + * @req_ctx: Save request context and tweak the original request to fit the + * engine needs + * @code_size: Number of bytes needed to store a code (one code per step) + * @nsteps: Number of steps + * @calc_buf: Buffer to use when calculating ECC bytes + * @code_buf: Buffer to use when reading (raw) ECC bytes from the chip + * @sm_order: Smart Media special ordering + */ +struct nand_ecc_sw_hamming_conf { + struct nand_ecc_req_tweak_ctx req_ctx; + unsigned int code_size; + unsigned int nsteps; + u8 *calc_buf; + u8 *code_buf; + unsigned int sm_order; +}; + +#if IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING) + +int nand_ecc_sw_hamming_init_ctx(struct nand_device *nand); +void nand_ecc_sw_hamming_cleanup_ctx(struct nand_device *nand); +int ecc_sw_hamming_calculate(const unsigned char *buf, unsigned int step_size, + unsigned char *code, bool sm_order); +int nand_ecc_sw_hamming_calculate(struct nand_device *nand, + const unsigned char *buf, + unsigned char *code); +int ecc_sw_hamming_correct(unsigned char *buf, unsigned char *read_ecc, + unsigned char *calc_ecc, unsigned int step_size, + bool sm_order); +int nand_ecc_sw_hamming_correct(struct nand_device *nand, unsigned char *buf, + unsigned char *read_ecc, + unsigned char *calc_ecc); + +#else /* !CONFIG_MTD_NAND_ECC_SW_HAMMING */ + +static inline int nand_ecc_sw_hamming_init_ctx(struct nand_device *nand) +{ + return -ENOTSUPP; +} + +static inline void nand_ecc_sw_hamming_cleanup_ctx(struct nand_device *nand) {} + +static inline int ecc_sw_hamming_calculate(const unsigned char *buf, + unsigned int step_size, + unsigned char *code, bool sm_order) +{ + return -ENOTSUPP; +} + +static inline int nand_ecc_sw_hamming_calculate(struct nand_device *nand, + const unsigned char *buf, + unsigned char *code) +{ + return -ENOTSUPP; +} + +static inline int ecc_sw_hamming_correct(unsigned char *buf, + unsigned char *read_ecc, + unsigned char *calc_ecc, + unsigned int step_size, bool sm_order) +{ + return -ENOTSUPP; +} + +static inline int nand_ecc_sw_hamming_correct(struct nand_device *nand, + unsigned char *buf, + unsigned char *read_ecc, + unsigned char *calc_ecc) +{ + return -ENOTSUPP; +} + +#endif /* CONFIG_MTD_NAND_ECC_SW_HAMMING */ + +#endif /* __MTD_NAND_ECC_SW_HAMMING_H__ */ diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index 697ea2474a7c..414f8a4d2853 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h @@ -277,6 +277,58 @@ int nand_ecc_prepare_io_req(struct nand_device *nand, int nand_ecc_finish_io_req(struct nand_device *nand, struct nand_page_io_req *req); bool nand_ecc_is_strong_enough(struct nand_device *nand); +struct nand_ecc_engine *nand_ecc_get_sw_engine(struct nand_device *nand); +struct nand_ecc_engine *nand_ecc_get_on_die_hw_engine(struct nand_device *nand); + +#if IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING) +struct nand_ecc_engine *nand_ecc_sw_hamming_get_engine(void); +#else +static inline struct nand_ecc_engine *nand_ecc_sw_hamming_get_engine(void) +{ + return NULL; +} +#endif /* CONFIG_MTD_NAND_ECC_SW_HAMMING */ + +#if IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_BCH) +struct nand_ecc_engine *nand_ecc_sw_bch_get_engine(void); +#else +static inline struct nand_ecc_engine *nand_ecc_sw_bch_get_engine(void) +{ + return NULL; +} +#endif /* CONFIG_MTD_NAND_ECC_SW_BCH */ + +/** + * struct nand_ecc_req_tweak_ctx - Help for automatically tweaking requests + * @orig_req: Pointer to the original IO request + * @nand: Related NAND device, to have access to its memory organization + * @page_buffer_size: Real size of the page buffer to use (can be set by the + * user before the tweaking mechanism initialization) + * @oob_buffer_size: Real size of the OOB buffer to use (can be set by the + * user before the tweaking mechanism initialization) + * @spare_databuf: Data bounce buffer + * @spare_oobbuf: OOB bounce buffer + * @bounce_data: Flag indicating a data bounce buffer is used + * @bounce_oob: Flag indicating an OOB bounce buffer is used + */ +struct nand_ecc_req_tweak_ctx { + struct nand_page_io_req orig_req; + struct nand_device *nand; + unsigned int page_buffer_size; + unsigned int oob_buffer_size; + void *spare_databuf; + void *spare_oobbuf; + bool bounce_data; + bool bounce_oob; +}; + +int nand_ecc_init_req_tweaking(struct nand_ecc_req_tweak_ctx *ctx, + struct nand_device *nand); +void nand_ecc_cleanup_req_tweaking(struct nand_ecc_req_tweak_ctx *ctx); +void nand_ecc_tweak_req(struct nand_ecc_req_tweak_ctx *ctx, + struct nand_page_io_req *req); +void nand_ecc_restore_req(struct nand_ecc_req_tweak_ctx *ctx, + struct nand_page_io_req *req); /** * struct nand_ecc - Information relative to the ECC @@ -884,6 +936,10 @@ bool nanddev_isreserved(struct nand_device *nand, const struct nand_pos *pos); int nanddev_erase(struct nand_device *nand, const struct nand_pos *pos); int nanddev_markbad(struct nand_device *nand, const struct nand_pos *pos); +/* ECC related functions */ +int nanddev_ecc_engine_init(struct nand_device *nand); +void nanddev_ecc_engine_cleanup(struct nand_device *nand); + /* BBT related functions */ enum nand_bbt_block_status { NAND_BBT_BLOCK_STATUS_UNKNOWN, diff --git a/include/linux/mtd/nand_bch.h b/include/linux/mtd/nand_bch.h deleted file mode 100644 index d5956cc48ba9..000000000000 --- a/include/linux/mtd/nand_bch.h +++ /dev/null @@ -1,66 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright © 2011 Ivan Djelic <ivan.djelic@parrot.com> - * - * This file is the header for the NAND BCH ECC implementation. - */ - -#ifndef __MTD_NAND_BCH_H__ -#define __MTD_NAND_BCH_H__ - -struct mtd_info; -struct nand_chip; -struct nand_bch_control; - -#if IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_BCH) - -static inline int mtd_nand_has_bch(void) { return 1; } - -/* - * Calculate BCH ecc code - */ -int nand_bch_calculate_ecc(struct nand_chip *chip, const u_char *dat, - u_char *ecc_code); - -/* - * Detect and correct bit errors - */ -int nand_bch_correct_data(struct nand_chip *chip, u_char *dat, - u_char *read_ecc, u_char *calc_ecc); -/* - * Initialize BCH encoder/decoder - */ -struct nand_bch_control *nand_bch_init(struct mtd_info *mtd); -/* - * Release BCH encoder/decoder resources - */ -void nand_bch_free(struct nand_bch_control *nbc); - -#else /* !CONFIG_MTD_NAND_ECC_SW_BCH */ - -static inline int mtd_nand_has_bch(void) { return 0; } - -static inline int -nand_bch_calculate_ecc(struct nand_chip *chip, const u_char *dat, - u_char *ecc_code) -{ - return -1; -} - -static inline int -nand_bch_correct_data(struct nand_chip *chip, unsigned char *buf, - unsigned char *read_ecc, unsigned char *calc_ecc) -{ - return -ENOTSUPP; -} - -static inline struct nand_bch_control *nand_bch_init(struct mtd_info *mtd) -{ - return NULL; -} - -static inline void nand_bch_free(struct nand_bch_control *nbc) {} - -#endif /* CONFIG_MTD_NAND_ECC_SW_BCH */ - -#endif /* __MTD_NAND_BCH_H__ */ diff --git a/include/linux/mtd/nand_ecc.h b/include/linux/mtd/nand_ecc.h deleted file mode 100644 index d423916b94f0..000000000000 --- a/include/linux/mtd/nand_ecc.h +++ /dev/null @@ -1,39 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) 2000-2010 Steven J. Hill <sjhill@realitydiluted.com> - * David Woodhouse <dwmw2@infradead.org> - * Thomas Gleixner <tglx@linutronix.de> - * - * This file is the header for the ECC algorithm. - */ - -#ifndef __MTD_NAND_ECC_H__ -#define __MTD_NAND_ECC_H__ - -struct nand_chip; - -/* - * Calculate 3 byte ECC code for eccsize byte block - */ -void __nand_calculate_ecc(const u_char *dat, unsigned int eccsize, - u_char *ecc_code, bool sm_order); - -/* - * Calculate 3 byte ECC code for 256/512 byte block - */ -int nand_calculate_ecc(struct nand_chip *chip, const u_char *dat, - u_char *ecc_code); - -/* - * Detect and correct a 1 bit error for eccsize byte block - */ -int __nand_correct_data(u_char *dat, u_char *read_ecc, u_char *calc_ecc, - unsigned int eccsize, bool sm_order); - -/* - * Detect and correct a 1 bit error for 256/512 byte block - */ -int nand_correct_data(struct nand_chip *chip, u_char *dat, u_char *read_ecc, - u_char *calc_ecc); - -#endif /* __MTD_NAND_ECC_H__ */ diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h index aac07940de09..6b3240e44310 100644 --- a/include/linux/mtd/rawnand.h +++ b/include/linux/mtd/rawnand.h @@ -302,7 +302,6 @@ static const struct nand_ecc_caps __name = { \ * @prepad: padding information for syndrome based ECC generators * @postpad: padding information for syndrome based ECC generators * @options: ECC specific options (see NAND_ECC_XXX flags defined above) - * @priv: pointer to private ECC control data * @calc_buf: buffer for calculated ECC, size is oobsize. * @code_buf: buffer for ECC read from flash, size is oobsize. * @hwctl: function to control hardware ECC generator. Must only @@ -355,7 +354,6 @@ struct nand_ecc_ctrl { int prepad; int postpad; unsigned int options; - void *priv; u8 *calc_buf; u8 *code_buf; void (*hwctl)(struct nand_chip *chip, int mode); @@ -1286,7 +1284,8 @@ static inline bool nand_is_slc(struct nand_chip *chip) } /** - * Check if the opcode's address should be sent only on the lower 8 bits + * nand_opcode_8bits - Check if the opcode's address should be sent only on the + * lower 8 bits * @command: opcode to check */ static inline int nand_opcode_8bits(unsigned int command) @@ -1303,6 +1302,20 @@ static inline int nand_opcode_8bits(unsigned int command) return 0; } +int rawnand_sw_hamming_init(struct nand_chip *chip); +int rawnand_sw_hamming_calculate(struct nand_chip *chip, + const unsigned char *buf, + unsigned char *code); +int rawnand_sw_hamming_correct(struct nand_chip *chip, + unsigned char *buf, + unsigned char *read_ecc, + unsigned char *calc_ecc); +void rawnand_sw_hamming_cleanup(struct nand_chip *chip); +int rawnand_sw_bch_init(struct nand_chip *chip); +int rawnand_sw_bch_correct(struct nand_chip *chip, unsigned char *buf, + unsigned char *read_ecc, unsigned char *calc_ecc); +void rawnand_sw_bch_cleanup(struct nand_chip *chip); + int nand_check_erased_ecc_chunk(void *data, int datalen, void *ecc, int ecclen, void *extraoob, int extraooblen, diff --git a/include/linux/mtd/sharpsl.h b/include/linux/mtd/sharpsl.h index d2c3cf29e0d1..231bd1c3f408 100644 --- a/include/linux/mtd/sharpsl.h +++ b/include/linux/mtd/sharpsl.h @@ -9,7 +9,6 @@ #define _MTD_SHARPSL_H #include <linux/mtd/rawnand.h> -#include <linux/mtd/nand_ecc.h> #include <linux/mtd/partitions.h> struct sharpsl_nand_platform_data { diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h index 60bac2c0ec45..d13958de6d8a 100644 --- a/include/linux/mtd/spi-nor.h +++ b/include/linux/mtd/spi-nor.h @@ -51,6 +51,8 @@ #define SPINOR_OP_CLFSR 0x50 /* Clear flag status register */ #define SPINOR_OP_RDEAR 0xc8 /* Read Extended Address Register */ #define SPINOR_OP_WREAR 0xc5 /* Write Extended Address Register */ +#define SPINOR_OP_SRSTEN 0x66 /* Software Reset Enable */ +#define SPINOR_OP_SRST 0x99 /* Software Reset */ /* 4-byte address opcodes - used on Spansion and some Macronix flashes. */ #define SPINOR_OP_READ_4B 0x13 /* Read data bytes (low frequency) */ @@ -182,6 +184,7 @@ enum spi_nor_protocol { SNOR_PROTO_1_2_2_DTR = SNOR_PROTO_DTR(1, 2, 2), SNOR_PROTO_1_4_4_DTR = SNOR_PROTO_DTR(1, 4, 4), SNOR_PROTO_1_8_8_DTR = SNOR_PROTO_DTR(1, 8, 8), + SNOR_PROTO_8_8_8_DTR = SNOR_PROTO_DTR(8, 8, 8), }; static inline bool spi_nor_protocol_is_dtr(enum spi_nor_protocol proto) @@ -228,7 +231,7 @@ struct spi_nor_hwcaps { * then Quad SPI protocols before Dual SPI protocols, Fast Read and lastly * (Slow) Read. */ -#define SNOR_HWCAPS_READ_MASK GENMASK(14, 0) +#define SNOR_HWCAPS_READ_MASK GENMASK(15, 0) #define SNOR_HWCAPS_READ BIT(0) #define SNOR_HWCAPS_READ_FAST BIT(1) #define SNOR_HWCAPS_READ_1_1_1_DTR BIT(2) @@ -245,11 +248,12 @@ struct spi_nor_hwcaps { #define SNOR_HWCAPS_READ_4_4_4 BIT(9) #define SNOR_HWCAPS_READ_1_4_4_DTR BIT(10) -#define SNOR_HWCAPS_READ_OCTAL GENMASK(14, 11) +#define SNOR_HWCAPS_READ_OCTAL GENMASK(15, 11) #define SNOR_HWCAPS_READ_1_1_8 BIT(11) #define SNOR_HWCAPS_READ_1_8_8 BIT(12) #define SNOR_HWCAPS_READ_8_8_8 BIT(13) #define SNOR_HWCAPS_READ_1_8_8_DTR BIT(14) +#define SNOR_HWCAPS_READ_8_8_8_DTR BIT(15) /* * Page Program capabilities. @@ -260,18 +264,19 @@ struct spi_nor_hwcaps { * JEDEC/SFDP standard to define them. Also at this moment no SPI flash memory * implements such commands. */ -#define SNOR_HWCAPS_PP_MASK GENMASK(22, 16) -#define SNOR_HWCAPS_PP BIT(16) +#define SNOR_HWCAPS_PP_MASK GENMASK(23, 16) +#define SNOR_HWCAPS_PP BIT(16) -#define SNOR_HWCAPS_PP_QUAD GENMASK(19, 17) -#define SNOR_HWCAPS_PP_1_1_4 BIT(17) -#define SNOR_HWCAPS_PP_1_4_4 BIT(18) -#define SNOR_HWCAPS_PP_4_4_4 BIT(19) +#define SNOR_HWCAPS_PP_QUAD GENMASK(19, 17) +#define SNOR_HWCAPS_PP_1_1_4 BIT(17) +#define SNOR_HWCAPS_PP_1_4_4 BIT(18) +#define SNOR_HWCAPS_PP_4_4_4 BIT(19) -#define SNOR_HWCAPS_PP_OCTAL GENMASK(22, 20) -#define SNOR_HWCAPS_PP_1_1_8 BIT(20) -#define SNOR_HWCAPS_PP_1_8_8 BIT(21) -#define SNOR_HWCAPS_PP_8_8_8 BIT(22) +#define SNOR_HWCAPS_PP_OCTAL GENMASK(23, 20) +#define SNOR_HWCAPS_PP_1_1_8 BIT(20) +#define SNOR_HWCAPS_PP_1_8_8 BIT(21) +#define SNOR_HWCAPS_PP_8_8_8 BIT(22) +#define SNOR_HWCAPS_PP_8_8_8_DTR BIT(23) #define SNOR_HWCAPS_X_X_X (SNOR_HWCAPS_READ_2_2_2 | \ SNOR_HWCAPS_READ_4_4_4 | \ @@ -279,10 +284,14 @@ struct spi_nor_hwcaps { SNOR_HWCAPS_PP_4_4_4 | \ SNOR_HWCAPS_PP_8_8_8) +#define SNOR_HWCAPS_X_X_X_DTR (SNOR_HWCAPS_READ_8_8_8_DTR | \ + SNOR_HWCAPS_PP_8_8_8_DTR) + #define SNOR_HWCAPS_DTR (SNOR_HWCAPS_READ_1_1_1_DTR | \ SNOR_HWCAPS_READ_1_2_2_DTR | \ SNOR_HWCAPS_READ_1_4_4_DTR | \ - SNOR_HWCAPS_READ_1_8_8_DTR) + SNOR_HWCAPS_READ_1_8_8_DTR | \ + SNOR_HWCAPS_READ_8_8_8_DTR) #define SNOR_HWCAPS_ALL (SNOR_HWCAPS_READ_MASK | \ SNOR_HWCAPS_PP_MASK) @@ -318,6 +327,22 @@ struct spi_nor_controller_ops { int (*erase)(struct spi_nor *nor, loff_t offs); }; +/** + * enum spi_nor_cmd_ext - describes the command opcode extension in DTR mode + * @SPI_NOR_EXT_NONE: no extension. This is the default, and is used in Legacy + * SPI mode + * @SPI_NOR_EXT_REPEAT: the extension is same as the opcode + * @SPI_NOR_EXT_INVERT: the extension is the bitwise inverse of the opcode + * @SPI_NOR_EXT_HEX: the extension is any hex value. The command and opcode + * combine to form a 16-bit opcode. + */ +enum spi_nor_cmd_ext { + SPI_NOR_EXT_NONE = 0, + SPI_NOR_EXT_REPEAT, + SPI_NOR_EXT_INVERT, + SPI_NOR_EXT_HEX, +}; + /* * Forward declarations that are used internally by the core and manufacturer * drivers. @@ -345,6 +370,7 @@ struct spi_nor_flash_parameter; * @program_opcode: the program opcode * @sst_write_second: used by the SST write operation * @flags: flag options for the current SPI NOR (SNOR_F_*) + * @cmd_ext_type: the command opcode extension type for DTR mode. * @read_proto: the SPI protocol for read operations * @write_proto: the SPI protocol for write operations * @reg_proto: the SPI protocol for read_reg/write_reg/erase operations @@ -376,6 +402,7 @@ struct spi_nor { enum spi_nor_protocol reg_proto; bool sst_write_second; u32 flags; + enum spi_nor_cmd_ext cmd_ext_type; const struct spi_nor_controller_ops *controller_ops; @@ -406,7 +433,7 @@ static inline struct device_node *spi_nor_get_flash_node(struct spi_nor *nor) * @name: the chip type name * @hwcaps: the hardware capabilities supported by the controller driver * - * The drivers can use this fuction to scan the SPI NOR. + * The drivers can use this function to scan the SPI NOR. * In the scanning, it will try to get all the necessary information to * fill the mtd_info{} and the spi_nor{}. * diff --git a/include/linux/mtd/spinand.h b/include/linux/mtd/spinand.h index 7b78c4ba9b3e..6bb92f26833e 100644 --- a/include/linux/mtd/spinand.h +++ b/include/linux/mtd/spinand.h @@ -287,6 +287,15 @@ struct spinand_ecc_info { #define SPINAND_HAS_CR_FEAT_BIT BIT(1) /** + * struct spinand_ondie_ecc_conf - private SPI-NAND on-die ECC engine structure + * @status: status of the last wait operation that will be used in case + * ->get_status() is not populated by the spinand device. + */ +struct spinand_ondie_ecc_conf { + u8 status; +}; + +/** * struct spinand_info - Structure used to describe SPI NAND chips * @model: model name * @devid: device ID diff --git a/include/linux/platform_data/mtd-mxc_nand.h b/include/linux/platform_data/mtd-mxc_nand.h deleted file mode 100644 index d1230030c6db..000000000000 --- a/include/linux/platform_data/mtd-mxc_nand.h +++ /dev/null @@ -1,19 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved. - * Copyright 2008 Sascha Hauer, kernel@pengutronix.de - */ - -#ifndef __ASM_ARCH_NAND_H -#define __ASM_ARCH_NAND_H - -#include <linux/mtd/partitions.h> - -struct mxc_nand_platform_data { - unsigned int width; /* data bus width in bytes */ - unsigned int hw_ecc:1; /* 0 if suppress hardware ECC */ - unsigned int flash_bbt:1; /* set to 1 to use a flash based bbt */ - struct mtd_partition *parts; /* partition table */ - int nr_parts; /* size of parts */ -}; -#endif /* __ASM_ARCH_NAND_H */ |