diff options
Diffstat (limited to 'include/linux/mtd')
-rw-r--r-- | include/linux/mtd/cfi.h | 16 | ||||
-rw-r--r-- | include/linux/mtd/cfi_endian.h | 76 | ||||
-rw-r--r-- | include/linux/mtd/map.h | 3 | ||||
-rw-r--r-- | include/linux/mtd/mtd.h | 334 | ||||
-rw-r--r-- | include/linux/mtd/nand.h | 1 | ||||
-rw-r--r-- | include/linux/mtd/physmap.h | 1 |
6 files changed, 296 insertions, 135 deletions
diff --git a/include/linux/mtd/cfi.h b/include/linux/mtd/cfi.h index d24925492972..d5d2ec6494bb 100644 --- a/include/linux/mtd/cfi.h +++ b/include/linux/mtd/cfi.h @@ -354,10 +354,10 @@ static inline map_word cfi_build_cmd(u_long cmd, struct map_info *map, struct cf onecmd = cmd; break; case 2: - onecmd = cpu_to_cfi16(cmd); + onecmd = cpu_to_cfi16(map, cmd); break; case 4: - onecmd = cpu_to_cfi32(cmd); + onecmd = cpu_to_cfi32(map, cmd); break; } @@ -437,10 +437,10 @@ static inline unsigned long cfi_merge_status(map_word val, struct map_info *map, case 1: break; case 2: - res = cfi16_to_cpu(res); + res = cfi16_to_cpu(map, res); break; case 4: - res = cfi32_to_cpu(res); + res = cfi32_to_cpu(map, res); break; default: BUG(); } @@ -480,12 +480,12 @@ static inline uint8_t cfi_read_query(struct map_info *map, uint32_t addr) if (map_bankwidth_is_1(map)) { return val.x[0]; } else if (map_bankwidth_is_2(map)) { - return cfi16_to_cpu(val.x[0]); + return cfi16_to_cpu(map, val.x[0]); } else { /* No point in a 64-bit byteswap since that would just be swapping the responses from different chips, and we are only interested in one chip (a representative sample) */ - return cfi32_to_cpu(val.x[0]); + return cfi32_to_cpu(map, val.x[0]); } } @@ -496,12 +496,12 @@ static inline uint16_t cfi_read_query16(struct map_info *map, uint32_t addr) if (map_bankwidth_is_1(map)) { return val.x[0] & 0xff; } else if (map_bankwidth_is_2(map)) { - return cfi16_to_cpu(val.x[0]); + return cfi16_to_cpu(map, val.x[0]); } else { /* No point in a 64-bit byteswap since that would just be swapping the responses from different chips, and we are only interested in one chip (a representative sample) */ - return cfi32_to_cpu(val.x[0]); + return cfi32_to_cpu(map, val.x[0]); } } diff --git a/include/linux/mtd/cfi_endian.h b/include/linux/mtd/cfi_endian.h index 51cc3f5917a8..b97a625071f8 100644 --- a/include/linux/mtd/cfi_endian.h +++ b/include/linux/mtd/cfi_endian.h @@ -19,53 +19,35 @@ #include <asm/byteorder.h> -#ifndef CONFIG_MTD_CFI_ADV_OPTIONS - -#define CFI_HOST_ENDIAN - -#else - -#ifdef CONFIG_MTD_CFI_NOSWAP -#define CFI_HOST_ENDIAN -#endif - -#ifdef CONFIG_MTD_CFI_LE_BYTE_SWAP -#define CFI_LITTLE_ENDIAN -#endif - -#ifdef CONFIG_MTD_CFI_BE_BYTE_SWAP -#define CFI_BIG_ENDIAN -#endif - -#endif - -#if defined(CFI_LITTLE_ENDIAN) -#define cpu_to_cfi8(x) (x) -#define cfi8_to_cpu(x) (x) -#define cpu_to_cfi16(x) cpu_to_le16(x) -#define cpu_to_cfi32(x) cpu_to_le32(x) -#define cpu_to_cfi64(x) cpu_to_le64(x) -#define cfi16_to_cpu(x) le16_to_cpu(x) -#define cfi32_to_cpu(x) le32_to_cpu(x) -#define cfi64_to_cpu(x) le64_to_cpu(x) -#elif defined (CFI_BIG_ENDIAN) -#define cpu_to_cfi8(x) (x) -#define cfi8_to_cpu(x) (x) -#define cpu_to_cfi16(x) cpu_to_be16(x) -#define cpu_to_cfi32(x) cpu_to_be32(x) -#define cpu_to_cfi64(x) cpu_to_be64(x) -#define cfi16_to_cpu(x) be16_to_cpu(x) -#define cfi32_to_cpu(x) be32_to_cpu(x) -#define cfi64_to_cpu(x) be64_to_cpu(x) -#elif defined (CFI_HOST_ENDIAN) -#define cpu_to_cfi8(x) (x) -#define cfi8_to_cpu(x) (x) -#define cpu_to_cfi16(x) (x) -#define cpu_to_cfi32(x) (x) -#define cpu_to_cfi64(x) (x) -#define cfi16_to_cpu(x) (x) -#define cfi32_to_cpu(x) (x) -#define cfi64_to_cpu(x) (x) +#define CFI_HOST_ENDIAN 1 +#define CFI_LITTLE_ENDIAN 2 +#define CFI_BIG_ENDIAN 3 + +#if !defined(CONFIG_MTD_CFI_ADV_OPTIONS) || defined(CONFIG_MTD_CFI_NOSWAP) +#define CFI_DEFAULT_ENDIAN CFI_HOST_ENDIAN +#elif defined(CONFIG_MTD_CFI_LE_BYTE_SWAP) +#define CFI_DEFAULT_ENDIAN CFI_LITTLE_ENDIAN +#elif defined(CONFIG_MTD_CFI_BE_BYTE_SWAP) +#define CFI_DEFAULT_ENDIAN CFI_BIG_ENDIAN #else #error No CFI endianness defined #endif + +#define cfi_default(s) ((s)?:CFI_DEFAULT_ENDIAN) +#define cfi_be(s) (cfi_default(s) == CFI_BIG_ENDIAN) +#define cfi_le(s) (cfi_default(s) == CFI_LITTLE_ENDIAN) +#define cfi_host(s) (cfi_default(s) == CFI_HOST_ENDIAN) + +#define cpu_to_cfi8(map, x) (x) +#define cfi8_to_cpu(map, x) (x) +#define cpu_to_cfi16(map, x) _cpu_to_cfi(16, (map)->swap, (x)) +#define cpu_to_cfi32(map, x) _cpu_to_cfi(32, (map)->swap, (x)) +#define cpu_to_cfi64(map, x) _cpu_to_cfi(64, (map)->swap, (x)) +#define cfi16_to_cpu(map, x) _cfi_to_cpu(16, (map)->swap, (x)) +#define cfi32_to_cpu(map, x) _cfi_to_cpu(32, (map)->swap, (x)) +#define cfi64_to_cpu(map, x) _cfi_to_cpu(64, (map)->swap, (x)) + +#define _cpu_to_cfi(w, s, x) (cfi_host(s)?(x):_swap_to_cfi(w, s, x)) +#define _cfi_to_cpu(w, s, x) (cfi_host(s)?(x):_swap_to_cpu(w, s, x)) +#define _swap_to_cfi(w, s, x) (cfi_be(s)?cpu_to_be##w(x):cpu_to_le##w(x)) +#define _swap_to_cpu(w, s, x) (cfi_be(s)?be##w##_to_cpu(x):le##w##_to_cpu(x)) diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h index a9e6ba46865e..94e924e2ecd5 100644 --- a/include/linux/mtd/map.h +++ b/include/linux/mtd/map.h @@ -26,7 +26,7 @@ #include <linux/list.h> #include <linux/string.h> #include <linux/bug.h> - +#include <linux/kernel.h> #include <asm/unaligned.h> #include <asm/system.h> @@ -214,6 +214,7 @@ struct map_info { void __iomem *virt; void *cached; + int swap; /* this mapping's byte-swapping requirement */ int bankwidth; /* in octets. This isn't necessarily the width of actual bus cycles -- it's the repeat interval in bytes, before you are talking to the first chip again. diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h index 9f5b312af783..1a81fde8f333 100644 --- a/include/linux/mtd/mtd.h +++ b/include/linux/mtd/mtd.h @@ -171,87 +171,60 @@ struct mtd_info { struct mtd_erase_region_info *eraseregions; /* - * Erase is an asynchronous operation. Device drivers are supposed - * to call instr->callback() whenever the operation completes, even - * if it completes with a failure. - * Callers are supposed to pass a callback function and wait for it - * to be called before writing to the block. + * Do not call via these pointers, use corresponding mtd_*() + * wrappers instead. */ int (*erase) (struct mtd_info *mtd, struct erase_info *instr); - - /* This stuff for eXecute-In-Place */ - /* phys is optional and may be set to NULL */ int (*point) (struct mtd_info *mtd, loff_t from, size_t len, - size_t *retlen, void **virt, resource_size_t *phys); - - /* We probably shouldn't allow XIP if the unpoint isn't a NULL */ + size_t *retlen, void **virt, resource_size_t *phys); void (*unpoint) (struct mtd_info *mtd, loff_t from, size_t len); - - /* Allow NOMMU mmap() to directly map the device (if not NULL) - * - return the address to which the offset maps - * - return -ENOSYS to indicate refusal to do the mapping - */ unsigned long (*get_unmapped_area) (struct mtd_info *mtd, unsigned long len, unsigned long offset, unsigned long flags); - - /* Backing device capabilities for this device - * - provides mmap capabilities - */ - struct backing_dev_info *backing_dev_info; - - - int (*read) (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf); - int (*write) (struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf); - - /* In blackbox flight recorder like scenarios we want to make successful - writes in interrupt context. panic_write() is only intended to be - called when its known the kernel is about to panic and we need the - write to succeed. Since the kernel is not going to be running for much - longer, this function can break locks and delay to ensure the write - succeeds (but not sleep). */ - - int (*panic_write) (struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf); - + int (*read) (struct mtd_info *mtd, loff_t from, size_t len, + size_t *retlen, u_char *buf); + int (*write) (struct mtd_info *mtd, loff_t to, size_t len, + size_t *retlen, const u_char *buf); + int (*panic_write) (struct mtd_info *mtd, loff_t to, size_t len, + size_t *retlen, const u_char *buf); int (*read_oob) (struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops); int (*write_oob) (struct mtd_info *mtd, loff_t to, - struct mtd_oob_ops *ops); - - /* - * Methods to access the protection register area, present in some - * flash devices. The user data is one time programmable but the - * factory data is read only. - */ - int (*get_fact_prot_info) (struct mtd_info *mtd, struct otp_info *buf, size_t len); - int (*read_fact_prot_reg) (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf); - int (*get_user_prot_info) (struct mtd_info *mtd, struct otp_info *buf, size_t len); - int (*read_user_prot_reg) (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf); - int (*write_user_prot_reg) (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf); - int (*lock_user_prot_reg) (struct mtd_info *mtd, loff_t from, size_t len); - - /* kvec-based read/write methods. - NB: The 'count' parameter is the number of _vectors_, each of - which contains an (ofs, len) tuple. - */ - int (*writev) (struct mtd_info *mtd, const struct kvec *vecs, unsigned long count, loff_t to, size_t *retlen); - - /* Sync */ + struct mtd_oob_ops *ops); + int (*get_fact_prot_info) (struct mtd_info *mtd, struct otp_info *buf, + size_t len); + int (*read_fact_prot_reg) (struct mtd_info *mtd, loff_t from, + size_t len, size_t *retlen, u_char *buf); + int (*get_user_prot_info) (struct mtd_info *mtd, struct otp_info *buf, + size_t len); + int (*read_user_prot_reg) (struct mtd_info *mtd, loff_t from, + size_t len, size_t *retlen, u_char *buf); + int (*write_user_prot_reg) (struct mtd_info *mtd, loff_t to, size_t len, + size_t *retlen, u_char *buf); + int (*lock_user_prot_reg) (struct mtd_info *mtd, loff_t from, + size_t len); + int (*writev) (struct mtd_info *mtd, const struct kvec *vecs, + unsigned long count, loff_t to, size_t *retlen); void (*sync) (struct mtd_info *mtd); - - /* Chip-supported device locking */ int (*lock) (struct mtd_info *mtd, loff_t ofs, uint64_t len); int (*unlock) (struct mtd_info *mtd, loff_t ofs, uint64_t len); int (*is_locked) (struct mtd_info *mtd, loff_t ofs, uint64_t len); - - /* Power Management functions */ + int (*block_isbad) (struct mtd_info *mtd, loff_t ofs); + int (*block_markbad) (struct mtd_info *mtd, loff_t ofs); int (*suspend) (struct mtd_info *mtd); void (*resume) (struct mtd_info *mtd); + /* + * If the driver is something smart, like UBI, it may need to maintain + * its own reference counting. The below functions are only for driver. + */ + int (*get_device) (struct mtd_info *mtd); + void (*put_device) (struct mtd_info *mtd); - /* Bad block management functions */ - int (*block_isbad) (struct mtd_info *mtd, loff_t ofs); - int (*block_markbad) (struct mtd_info *mtd, loff_t ofs); + /* Backing device capabilities for this device + * - provides mmap capabilities + */ + struct backing_dev_info *backing_dev_info; struct notifier_block reboot_notifier; /* default mode before reboot */ @@ -265,18 +238,218 @@ struct mtd_info { struct module *owner; struct device dev; int usecount; - - /* If the driver is something smart, like UBI, it may need to maintain - * its own reference counting. The below functions are only for driver. - * The driver may register its callbacks. These callbacks are not - * supposed to be called by MTD users */ - int (*get_device) (struct mtd_info *mtd); - void (*put_device) (struct mtd_info *mtd); }; -static inline struct mtd_info *dev_to_mtd(struct device *dev) +/* + * Erase is an asynchronous operation. Device drivers are supposed + * to call instr->callback() whenever the operation completes, even + * if it completes with a failure. + * Callers are supposed to pass a callback function and wait for it + * to be called before writing to the block. + */ +static inline int mtd_erase(struct mtd_info *mtd, struct erase_info *instr) +{ + return mtd->erase(mtd, instr); +} + +/* + * This stuff for eXecute-In-Place. phys is optional and may be set to NULL. + */ +static inline int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, + size_t *retlen, void **virt, resource_size_t *phys) +{ + *retlen = 0; + if (!mtd->point) + return -EOPNOTSUPP; + return mtd->point(mtd, from, len, retlen, virt, phys); +} + +/* We probably shouldn't allow XIP if the unpoint isn't a NULL */ +static inline void mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len) { - return dev ? dev_get_drvdata(dev) : NULL; + return mtd->unpoint(mtd, from, len); +} + +/* + * Allow NOMMU mmap() to directly map the device (if not NULL) + * - return the address to which the offset maps + * - return -ENOSYS to indicate refusal to do the mapping + */ +static inline unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, + unsigned long len, + unsigned long offset, + unsigned long flags) +{ + if (!mtd->get_unmapped_area) + return -EOPNOTSUPP; + return mtd->get_unmapped_area(mtd, len, offset, flags); +} + +static inline int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, + size_t *retlen, u_char *buf) +{ + return mtd->read(mtd, from, len, retlen, buf); +} + +static inline int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, + size_t *retlen, const u_char *buf) +{ + *retlen = 0; + if (!mtd->write) + return -EROFS; + return mtd->write(mtd, to, len, retlen, buf); +} + +/* + * In blackbox flight recorder like scenarios we want to make successful writes + * in interrupt context. panic_write() is only intended to be called when its + * known the kernel is about to panic and we need the write to succeed. Since + * the kernel is not going to be running for much longer, this function can + * break locks and delay to ensure the write succeeds (but not sleep). + */ +static inline int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, + size_t *retlen, const u_char *buf) +{ + *retlen = 0; + if (!mtd->panic_write) + return -EOPNOTSUPP; + return mtd->panic_write(mtd, to, len, retlen, buf); +} + +static inline int mtd_read_oob(struct mtd_info *mtd, loff_t from, + struct mtd_oob_ops *ops) +{ + ops->retlen = ops->oobretlen = 0; + if (!mtd->read_oob) + return -EOPNOTSUPP; + return mtd->read_oob(mtd, from, ops); +} + +static inline int mtd_write_oob(struct mtd_info *mtd, loff_t to, + struct mtd_oob_ops *ops) +{ + ops->retlen = ops->oobretlen = 0; + if (!mtd->write_oob) + return -EOPNOTSUPP; + return mtd->write_oob(mtd, to, ops); +} + +/* + * Method to access the protection register area, present in some flash + * devices. The user data is one time programmable but the factory data is read + * only. + */ +static inline int mtd_get_fact_prot_info(struct mtd_info *mtd, + struct otp_info *buf, size_t len) +{ + if (!mtd->get_fact_prot_info) + return -EOPNOTSUPP; + return mtd->get_fact_prot_info(mtd, buf, len); +} + +static inline int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, + size_t len, size_t *retlen, + u_char *buf) +{ + *retlen = 0; + if (!mtd->read_fact_prot_reg) + return -EOPNOTSUPP; + return mtd->read_fact_prot_reg(mtd, from, len, retlen, buf); +} + +static inline int mtd_get_user_prot_info(struct mtd_info *mtd, + struct otp_info *buf, + size_t len) +{ + if (!mtd->get_user_prot_info) + return -EOPNOTSUPP; + return mtd->get_user_prot_info(mtd, buf, len); +} + +static inline int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, + size_t len, size_t *retlen, + u_char *buf) +{ + *retlen = 0; + if (!mtd->read_user_prot_reg) + return -EOPNOTSUPP; + return mtd->read_user_prot_reg(mtd, from, len, retlen, buf); +} + +static inline int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, + size_t len, size_t *retlen, + u_char *buf) +{ + *retlen = 0; + if (!mtd->write_user_prot_reg) + return -EOPNOTSUPP; + return mtd->write_user_prot_reg(mtd, to, len, retlen, buf); +} + +static inline int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, + size_t len) +{ + if (!mtd->lock_user_prot_reg) + return -EOPNOTSUPP; + return mtd->lock_user_prot_reg(mtd, from, len); +} + +int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs, + unsigned long count, loff_t to, size_t *retlen); + +static inline void mtd_sync(struct mtd_info *mtd) +{ + if (mtd->sync) + mtd->sync(mtd); +} + +/* Chip-supported device locking */ +static inline int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) +{ + if (!mtd->lock) + return -EOPNOTSUPP; + return mtd->lock(mtd, ofs, len); +} + +static inline int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) +{ + if (!mtd->unlock) + return -EOPNOTSUPP; + return mtd->unlock(mtd, ofs, len); +} + +static inline int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len) +{ + if (!mtd->is_locked) + return -EOPNOTSUPP; + return mtd->is_locked(mtd, ofs, len); +} + +static inline int mtd_suspend(struct mtd_info *mtd) +{ + if (!mtd->suspend) + return -EOPNOTSUPP; + return mtd->suspend(mtd); +} + +static inline void mtd_resume(struct mtd_info *mtd) +{ + if (mtd->resume) + mtd->resume(mtd); +} + +static inline int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs) +{ + if (!mtd->block_isbad) + return -EOPNOTSUPP; + return mtd->block_isbad(mtd, ofs); +} + +static inline int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs) +{ + if (!mtd->block_markbad) + return -EOPNOTSUPP; + return mtd->block_markbad(mtd, ofs); } static inline uint32_t mtd_div_by_eb(uint64_t sz, struct mtd_info *mtd) @@ -309,6 +482,16 @@ static inline uint32_t mtd_mod_by_ws(uint64_t sz, struct mtd_info *mtd) return do_div(sz, mtd->writesize); } +static inline int mtd_has_oob(const struct mtd_info *mtd) +{ + return mtd->read_oob && mtd->write_oob; +} + +static inline int mtd_can_have_bb(const struct mtd_info *mtd) +{ + return !!mtd->block_isbad; +} + /* Kernel-side ioctl definitions */ struct mtd_partition; @@ -338,13 +521,6 @@ struct mtd_notifier { extern void register_mtd_user (struct mtd_notifier *new); extern int unregister_mtd_user (struct mtd_notifier *old); - -int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs, - unsigned long count, loff_t to, size_t *retlen); - -int default_mtd_readv(struct mtd_info *mtd, struct kvec *vecs, - unsigned long count, loff_t from, size_t *retlen); - void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size); void mtd_erase_callback(struct erase_info *instr); diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index 904131bab501..63b5a8b6dfbd 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h @@ -555,6 +555,7 @@ struct nand_chip { #define NAND_MFR_HYNIX 0xad #define NAND_MFR_MICRON 0x2c #define NAND_MFR_AMD 0x01 +#define NAND_MFR_MACRONIX 0xc2 /** * struct nand_flash_dev - NAND Flash Device ID Structure diff --git a/include/linux/mtd/physmap.h b/include/linux/mtd/physmap.h index 04e018160e2b..d2887e76b7f6 100644 --- a/include/linux/mtd/physmap.h +++ b/include/linux/mtd/physmap.h @@ -30,6 +30,7 @@ struct physmap_flash_data { unsigned int pfow_base; char *probe_type; struct mtd_partition *parts; + const char **part_probe_types; }; #endif /* __LINUX_MTD_PHYSMAP__ */ |