diff options
Diffstat (limited to 'include/linux')
54 files changed, 1088 insertions, 333 deletions
diff --git a/include/linux/acpi.h b/include/linux/acpi.h index fba8051fb297..6fce2fc2d124 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -131,22 +131,6 @@ extern int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity); */ void acpi_unregister_gsi (u32 gsi); -struct acpi_prt_entry { - struct list_head node; - struct acpi_pci_id id; - u8 pin; - struct { - acpi_handle handle; - u32 index; - } link; - u32 irq; -}; - -struct acpi_prt_list { - int count; - struct list_head entries; -}; - struct pci_dev; int acpi_pci_irq_enable (struct pci_dev *dev); @@ -270,6 +254,7 @@ int acpi_check_mem_region(resource_size_t start, resource_size_t n, #ifdef CONFIG_PM_SLEEP void __init acpi_no_s4_hw_signature(void); void __init acpi_old_suspend_ordering(void); +void __init acpi_s4_no_nvs(void); #endif /* CONFIG_PM_SLEEP */ #else /* CONFIG_ACPI */ diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h index 0f50d4cc4360..45f6297821bd 100644 --- a/include/linux/async_tx.h +++ b/include/linux/async_tx.h @@ -59,9 +59,7 @@ enum async_tx_flags { }; #ifdef CONFIG_DMA_ENGINE -void async_tx_issue_pending_all(void); -enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx); -void async_tx_run_dependencies(struct dma_async_tx_descriptor *tx); +#define async_tx_issue_pending_all dma_issue_pending_all #ifdef CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL #include <asm/async_tx.h> #else @@ -77,19 +75,6 @@ static inline void async_tx_issue_pending_all(void) do { } while (0); } -static inline enum dma_status -dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) -{ - return DMA_SUCCESS; -} - -static inline void -async_tx_run_dependencies(struct dma_async_tx_descriptor *tx, - struct dma_chan *host_chan) -{ - do { } while (0); -} - static inline struct dma_chan * async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx, enum dma_transaction_type tx_type, struct page **dst, int dst_count, diff --git a/include/linux/atmel-mci.h b/include/linux/atmel-mci.h index 2a2213eefd85..2f1f95737acb 100644 --- a/include/linux/atmel-mci.h +++ b/include/linux/atmel-mci.h @@ -3,7 +3,7 @@ #define ATMEL_MCI_MAX_NR_SLOTS 2 -struct dma_slave; +#include <linux/dw_dmac.h> /** * struct mci_slot_pdata - board-specific per-slot configuration @@ -28,11 +28,11 @@ struct mci_slot_pdata { /** * struct mci_platform_data - board-specific MMC/SDcard configuration - * @dma_slave: DMA slave interface to use in data transfers, or NULL. + * @dma_slave: DMA slave interface to use in data transfers. * @slot: Per-slot configuration data. */ struct mci_platform_data { - struct dma_slave *dma_slave; + struct dw_dma_slave dma_slave; struct mci_slot_pdata slot[ATMEL_MCI_MAX_NR_SLOTS]; }; diff --git a/include/linux/auxvec.h b/include/linux/auxvec.h index d7afa9dd6635..f3b5d4e3a2ac 100644 --- a/include/linux/auxvec.h +++ b/include/linux/auxvec.h @@ -23,16 +23,16 @@ #define AT_PLATFORM 15 /* string identifying CPU for optimizations */ #define AT_HWCAP 16 /* arch dependent hints at CPU capabilities */ #define AT_CLKTCK 17 /* frequency at which times() increments */ - +/* AT_* values 18 through 22 are reserved */ #define AT_SECURE 23 /* secure mode boolean */ - #define AT_BASE_PLATFORM 24 /* string identifying real platform, may * differ from AT_PLATFORM. */ +#define AT_RANDOM 25 /* address of 16 random bytes */ #define AT_EXECFN 31 /* filename of program */ #ifdef __KERNEL__ -#define AT_VECTOR_SIZE_BASE 18 /* NEW_AUX_ENT entries in auxiliary table */ +#define AT_VECTOR_SIZE_BASE 19 /* NEW_AUX_ENT entries in auxiliary table */ /* number of "#define AT_.*" above, minus {AT_NULL, AT_IGNORE, AT_NOTELF} */ #endif diff --git a/include/linux/backlight.h b/include/linux/backlight.h index 1ee9488ca2e4..79ca2da81c87 100644 --- a/include/linux/backlight.h +++ b/include/linux/backlight.h @@ -31,6 +31,10 @@ struct backlight_device; struct fb_info; struct backlight_ops { + unsigned int options; + +#define BL_CORE_SUSPENDRESUME (1 << 0) + /* Notify the backlight driver some property has changed */ int (*update_status)(struct backlight_device *); /* Return the current backlight brightness (accounting for power, @@ -51,7 +55,19 @@ struct backlight_properties { modes; 4: full off), see FB_BLANK_XXX */ int power; /* FB Blanking active? (values as for power) */ + /* Due to be removed, please use (state & BL_CORE_FBBLANK) */ int fb_blank; + /* Flags used to signal drivers of state changes */ + /* Upper 4 bits are reserved for driver internal use */ + unsigned int state; + +#define BL_CORE_SUSPENDED (1 << 0) /* backlight is suspended */ +#define BL_CORE_FBBLANK (1 << 1) /* backlight is under an fb blank event */ +#define BL_CORE_DRIVER4 (1 << 28) /* reserved for driver specific use */ +#define BL_CORE_DRIVER3 (1 << 29) /* reserved for driver specific use */ +#define BL_CORE_DRIVER2 (1 << 30) /* reserved for driver specific use */ +#define BL_CORE_DRIVER1 (1 << 31) /* reserved for driver specific use */ + }; struct backlight_device { diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 7035cec583b6..044467ef7b11 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -690,6 +690,8 @@ struct rq_map_data { struct page **pages; int page_order; int nr_entries; + unsigned long offset; + int null_mapped; }; struct req_iterator { diff --git a/include/linux/can/core.h b/include/linux/can/core.h index f50785ad4781..25085cbadcfc 100644 --- a/include/linux/can/core.h +++ b/include/linux/can/core.h @@ -19,7 +19,7 @@ #include <linux/skbuff.h> #include <linux/netdevice.h> -#define CAN_VERSION "20081130" +#define CAN_VERSION "20090105" /* increment this number each time you change some user-space interface */ #define CAN_ABI_VERSION "8" diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 08b78c09b09a..e267e62827bb 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -52,9 +52,9 @@ struct cgroup_subsys_state { * hierarchy structure */ struct cgroup *cgroup; - /* State maintained by the cgroup system to allow - * subsystems to be "busy". Should be accessed via css_get() - * and css_put() */ + /* State maintained by the cgroup system to allow subsystems + * to be "busy". Should be accessed via css_get(), + * css_tryget() and and css_put(). */ atomic_t refcnt; @@ -64,11 +64,14 @@ struct cgroup_subsys_state { /* bits in struct cgroup_subsys_state flags field */ enum { CSS_ROOT, /* This CSS is the root of the subsystem */ + CSS_REMOVED, /* This CSS is dead */ }; /* - * Call css_get() to hold a reference on the cgroup; - * + * Call css_get() to hold a reference on the css; it can be used + * for a reference obtained via: + * - an existing ref-counted reference to the css + * - task->cgroups for a locked task */ static inline void css_get(struct cgroup_subsys_state *css) @@ -77,9 +80,32 @@ static inline void css_get(struct cgroup_subsys_state *css) if (!test_bit(CSS_ROOT, &css->flags)) atomic_inc(&css->refcnt); } + +static inline bool css_is_removed(struct cgroup_subsys_state *css) +{ + return test_bit(CSS_REMOVED, &css->flags); +} + +/* + * Call css_tryget() to take a reference on a css if your existing + * (known-valid) reference isn't already ref-counted. Returns false if + * the css has been destroyed. + */ + +static inline bool css_tryget(struct cgroup_subsys_state *css) +{ + if (test_bit(CSS_ROOT, &css->flags)) + return true; + while (!atomic_inc_not_zero(&css->refcnt)) { + if (test_bit(CSS_REMOVED, &css->flags)) + return false; + } + return true; +} + /* * css_put() should be called to release a reference taken by - * css_get() + * css_get() or css_tryget() */ extern void __css_put(struct cgroup_subsys_state *css); @@ -116,7 +142,7 @@ struct cgroup { struct list_head children; /* my children */ struct cgroup *parent; /* my parent */ - struct dentry *dentry; /* cgroup fs entry */ + struct dentry *dentry; /* cgroup fs entry, RCU protected */ /* Private pointers for each registered subsystem */ struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT]; @@ -145,6 +171,9 @@ struct cgroup { int pids_use_count; /* Length of the current tasks_pids array */ int pids_length; + + /* For RCU-protected deletion */ + struct rcu_head rcu_head; }; /* A css_set is a structure holding pointers to a set of @@ -337,9 +366,23 @@ struct cgroup_subsys { #define MAX_CGROUP_TYPE_NAMELEN 32 const char *name; - /* Protected by RCU */ - struct cgroupfs_root *root; + /* + * Protects sibling/children links of cgroups in this + * hierarchy, plus protects which hierarchy (or none) the + * subsystem is a part of (i.e. root/sibling). To avoid + * potential deadlocks, the following operations should not be + * undertaken while holding any hierarchy_mutex: + * + * - allocating memory + * - initiating hotplug events + */ + struct mutex hierarchy_mutex; + /* + * Link to parent, and list entry in parent's children. + * Protected by this->hierarchy_mutex and cgroup_lock() + */ + struct cgroupfs_root *root; struct list_head sibling; }; diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index 51ea2bdea0f9..90c6074a36ca 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h @@ -20,8 +20,9 @@ extern int number_of_cpusets; /* How many cpusets are defined in system? */ extern int cpuset_init_early(void); extern int cpuset_init(void); extern void cpuset_init_smp(void); -extern void cpuset_cpus_allowed(struct task_struct *p, cpumask_t *mask); -extern void cpuset_cpus_allowed_locked(struct task_struct *p, cpumask_t *mask); +extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask); +extern void cpuset_cpus_allowed_locked(struct task_struct *p, + struct cpumask *mask); extern nodemask_t cpuset_mems_allowed(struct task_struct *p); #define cpuset_current_mems_allowed (current->mems_allowed) void cpuset_init_current_mems_allowed(void); @@ -86,12 +87,13 @@ static inline int cpuset_init_early(void) { return 0; } static inline int cpuset_init(void) { return 0; } static inline void cpuset_init_smp(void) {} -static inline void cpuset_cpus_allowed(struct task_struct *p, cpumask_t *mask) +static inline void cpuset_cpus_allowed(struct task_struct *p, + struct cpumask *mask) { *mask = cpu_possible_map; } static inline void cpuset_cpus_allowed_locked(struct task_struct *p, - cpumask_t *mask) + struct cpumask *mask) { *mask = cpu_possible_map; } diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index adb0b084eb5a..64dea2ab326c 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -29,32 +29,6 @@ #include <linux/dma-mapping.h> /** - * enum dma_state - resource PNP/power management state - * @DMA_RESOURCE_SUSPEND: DMA device going into low power state - * @DMA_RESOURCE_RESUME: DMA device returning to full power - * @DMA_RESOURCE_AVAILABLE: DMA device available to the system - * @DMA_RESOURCE_REMOVED: DMA device removed from the system - */ -enum dma_state { - DMA_RESOURCE_SUSPEND, - DMA_RESOURCE_RESUME, - DMA_RESOURCE_AVAILABLE, - DMA_RESOURCE_REMOVED, -}; - -/** - * enum dma_state_client - state of the channel in the client - * @DMA_ACK: client would like to use, or was using this channel - * @DMA_DUP: client has already seen this channel, or is not using this channel - * @DMA_NAK: client does not want to see any more channels - */ -enum dma_state_client { - DMA_ACK, - DMA_DUP, - DMA_NAK, -}; - -/** * typedef dma_cookie_t - an opaque DMA cookie * * if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code @@ -89,23 +63,13 @@ enum dma_transaction_type { DMA_MEMSET, DMA_MEMCPY_CRC32C, DMA_INTERRUPT, + DMA_PRIVATE, DMA_SLAVE, }; /* last transaction type for creation of the capabilities mask */ #define DMA_TX_TYPE_END (DMA_SLAVE + 1) -/** - * enum dma_slave_width - DMA slave register access width. - * @DMA_SLAVE_WIDTH_8BIT: Do 8-bit slave register accesses - * @DMA_SLAVE_WIDTH_16BIT: Do 16-bit slave register accesses - * @DMA_SLAVE_WIDTH_32BIT: Do 32-bit slave register accesses - */ -enum dma_slave_width { - DMA_SLAVE_WIDTH_8BIT, - DMA_SLAVE_WIDTH_16BIT, - DMA_SLAVE_WIDTH_32BIT, -}; /** * enum dma_ctrl_flags - DMA flags to augment operation preparation, @@ -132,32 +96,6 @@ enum dma_ctrl_flags { typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t; /** - * struct dma_slave - Information about a DMA slave - * @dev: device acting as DMA slave - * @dma_dev: required DMA master device. If non-NULL, the client can not be - * bound to other masters than this. - * @tx_reg: physical address of data register used for - * memory-to-peripheral transfers - * @rx_reg: physical address of data register used for - * peripheral-to-memory transfers - * @reg_width: peripheral register width - * - * If dma_dev is non-NULL, the client can not be bound to other DMA - * masters than the one corresponding to this device. The DMA master - * driver may use this to determine if there is controller-specific - * data wrapped around this struct. Drivers of platform code that sets - * the dma_dev field must therefore make sure to use an appropriate - * controller-specific dma slave structure wrapping this struct. - */ -struct dma_slave { - struct device *dev; - struct device *dma_dev; - dma_addr_t tx_reg; - dma_addr_t rx_reg; - enum dma_slave_width reg_width; -}; - -/** * struct dma_chan_percpu - the per-CPU part of struct dma_chan * @refcount: local_t used for open-coded "bigref" counting * @memcpy_count: transaction counter @@ -165,7 +103,6 @@ struct dma_slave { */ struct dma_chan_percpu { - local_t refcount; /* stats */ unsigned long memcpy_count; unsigned long bytes_transferred; @@ -176,13 +113,14 @@ struct dma_chan_percpu { * @device: ptr to the dma device who supplies this channel, always !%NULL * @cookie: last cookie value returned to client * @chan_id: channel ID for sysfs - * @class_dev: class device for sysfs + * @dev: class device for sysfs * @refcount: kref, used in "bigref" slow-mode * @slow_ref: indicates that the DMA channel is free * @rcu: the DMA channel's RCU head * @device_node: used to add this to the device chan list * @local: per-cpu pointer to a struct dma_chan_percpu * @client-count: how many clients are using this channel + * @table_count: number of appearances in the mem-to-mem allocation table */ struct dma_chan { struct dma_device *device; @@ -190,73 +128,47 @@ struct dma_chan { /* sysfs */ int chan_id; - struct device dev; - - struct kref refcount; - int slow_ref; - struct rcu_head rcu; + struct dma_chan_dev *dev; struct list_head device_node; struct dma_chan_percpu *local; int client_count; + int table_count; }; -#define to_dma_chan(p) container_of(p, struct dma_chan, dev) - -void dma_chan_cleanup(struct kref *kref); - -static inline void dma_chan_get(struct dma_chan *chan) -{ - if (unlikely(chan->slow_ref)) - kref_get(&chan->refcount); - else { - local_inc(&(per_cpu_ptr(chan->local, get_cpu())->refcount)); - put_cpu(); - } -} +/** + * struct dma_chan_dev - relate sysfs device node to backing channel device + * @chan - driver channel device + * @device - sysfs device + * @dev_id - parent dma_device dev_id + * @idr_ref - reference count to gate release of dma_device dev_id + */ +struct dma_chan_dev { + struct dma_chan *chan; + struct device device; + int dev_id; + atomic_t *idr_ref; +}; -static inline void dma_chan_put(struct dma_chan *chan) +static inline const char *dma_chan_name(struct dma_chan *chan) { - if (unlikely(chan->slow_ref)) - kref_put(&chan->refcount, dma_chan_cleanup); - else { - local_dec(&(per_cpu_ptr(chan->local, get_cpu())->refcount)); - put_cpu(); - } + return dev_name(&chan->dev->device); } -/* - * typedef dma_event_callback - function pointer to a DMA event callback - * For each channel added to the system this routine is called for each client. - * If the client would like to use the channel it returns '1' to signal (ack) - * the dmaengine core to take out a reference on the channel and its - * corresponding device. A client must not 'ack' an available channel more - * than once. When a channel is removed all clients are notified. If a client - * is using the channel it must 'ack' the removal. A client must not 'ack' a - * removed channel more than once. - * @client - 'this' pointer for the client context - * @chan - channel to be acted upon - * @state - available or removed - */ -struct dma_client; -typedef enum dma_state_client (*dma_event_callback) (struct dma_client *client, - struct dma_chan *chan, enum dma_state state); +void dma_chan_cleanup(struct kref *kref); /** - * struct dma_client - info on the entity making use of DMA services - * @event_callback: func ptr to call when something happens - * @cap_mask: only return channels that satisfy the requested capabilities - * a value of zero corresponds to any capability - * @slave: data for preparing slave transfer. Must be non-NULL iff the - * DMA_SLAVE capability is requested. - * @global_node: list_head for global dma_client_list + * typedef dma_filter_fn - callback filter for dma_request_channel + * @chan: channel to be reviewed + * @filter_param: opaque parameter passed through dma_request_channel + * + * When this optional parameter is specified in a call to dma_request_channel a + * suitable channel is passed to this routine for further dispositioning before + * being returned. Where 'suitable' indicates a non-busy channel that + * satisfies the given capability mask. It returns 'true' to indicate that the + * channel is suitable. */ -struct dma_client { - dma_event_callback event_callback; - dma_cap_mask_t cap_mask; - struct dma_slave *slave; - struct list_head global_node; -}; +typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param); typedef void (*dma_async_tx_callback)(void *dma_async_param); /** @@ -323,14 +235,10 @@ struct dma_device { dma_cap_mask_t cap_mask; int max_xor; - struct kref refcount; - struct completion done; - int dev_id; struct device *dev; - int (*device_alloc_chan_resources)(struct dma_chan *chan, - struct dma_client *client); + int (*device_alloc_chan_resources)(struct dma_chan *chan); void (*device_free_chan_resources)(struct dma_chan *chan); struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)( @@ -362,9 +270,8 @@ struct dma_device { /* --- public DMA engine API --- */ -void dma_async_client_register(struct dma_client *client); -void dma_async_client_unregister(struct dma_client *client); -void dma_async_client_chan_request(struct dma_client *client); +void dmaengine_get(void); +void dmaengine_put(void); dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest, void *src, size_t len); dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan, @@ -406,6 +313,12 @@ __dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp) set_bit(tx_type, dstp->bits); } +#define dma_cap_zero(mask) __dma_cap_zero(&(mask)) +static inline void __dma_cap_zero(dma_cap_mask_t *dstp) +{ + bitmap_zero(dstp->bits, DMA_TX_TYPE_END); +} + #define dma_has_cap(tx, mask) __dma_has_cap((tx), &(mask)) static inline int __dma_has_cap(enum dma_transaction_type tx_type, dma_cap_mask_t *srcp) @@ -475,11 +388,25 @@ static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie, } enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie); +#ifdef CONFIG_DMA_ENGINE +enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx); +#else +static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) +{ + return DMA_SUCCESS; +} +#endif /* --- DMA device --- */ int dma_async_device_register(struct dma_device *device); void dma_async_device_unregister(struct dma_device *device); +void dma_run_dependencies(struct dma_async_tx_descriptor *tx); +struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type); +void dma_issue_pending_all(void); +#define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y) +struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param); +void dma_release_channel(struct dma_chan *chan); /* --- Helper iov-locking functions --- */ diff --git a/include/linux/dw_dmac.h b/include/linux/dw_dmac.h index 04d217b442bf..d797dde247f7 100644 --- a/include/linux/dw_dmac.h +++ b/include/linux/dw_dmac.h @@ -22,14 +22,34 @@ struct dw_dma_platform_data { }; /** + * enum dw_dma_slave_width - DMA slave register access width. + * @DMA_SLAVE_WIDTH_8BIT: Do 8-bit slave register accesses + * @DMA_SLAVE_WIDTH_16BIT: Do 16-bit slave register accesses + * @DMA_SLAVE_WIDTH_32BIT: Do 32-bit slave register accesses + */ +enum dw_dma_slave_width { + DW_DMA_SLAVE_WIDTH_8BIT, + DW_DMA_SLAVE_WIDTH_16BIT, + DW_DMA_SLAVE_WIDTH_32BIT, +}; + +/** * struct dw_dma_slave - Controller-specific information about a slave - * @slave: Generic information about the slave - * @ctl_lo: Platform-specific initializer for the CTL_LO register + * + * @dma_dev: required DMA master device + * @tx_reg: physical address of data register used for + * memory-to-peripheral transfers + * @rx_reg: physical address of data register used for + * peripheral-to-memory transfers + * @reg_width: peripheral register width * @cfg_hi: Platform-specific initializer for the CFG_HI register * @cfg_lo: Platform-specific initializer for the CFG_LO register */ struct dw_dma_slave { - struct dma_slave slave; + struct device *dma_dev; + dma_addr_t tx_reg; + dma_addr_t rx_reg; + enum dw_dma_slave_width reg_width; u32 cfg_hi; u32 cfg_lo; }; @@ -54,9 +74,4 @@ struct dw_dma_slave { #define DWC_CFGL_HS_DST_POL (1 << 18) /* dst handshake active low */ #define DWC_CFGL_HS_SRC_POL (1 << 19) /* src handshake active low */ -static inline struct dw_dma_slave *to_dw_dma_slave(struct dma_slave *slave) -{ - return container_of(slave, struct dw_dma_slave, slave); -} - #endif /* DW_DMAC_H */ diff --git a/include/linux/ext2_fs.h b/include/linux/ext2_fs.h index 78c775a83f7c..121720d74e15 100644 --- a/include/linux/ext2_fs.h +++ b/include/linux/ext2_fs.h @@ -194,6 +194,30 @@ struct ext2_group_desc #define EXT2_FL_USER_VISIBLE FS_FL_USER_VISIBLE /* User visible flags */ #define EXT2_FL_USER_MODIFIABLE FS_FL_USER_MODIFIABLE /* User modifiable flags */ +/* Flags that should be inherited by new inodes from their parent. */ +#define EXT2_FL_INHERITED (EXT2_SECRM_FL | EXT2_UNRM_FL | EXT2_COMPR_FL |\ + EXT2_SYNC_FL | EXT2_IMMUTABLE_FL | EXT2_APPEND_FL |\ + EXT2_NODUMP_FL | EXT2_NOATIME_FL | EXT2_COMPRBLK_FL|\ + EXT2_NOCOMP_FL | EXT2_JOURNAL_DATA_FL |\ + EXT2_NOTAIL_FL | EXT2_DIRSYNC_FL) + +/* Flags that are appropriate for regular files (all but dir-specific ones). */ +#define EXT2_REG_FLMASK (~(EXT2_DIRSYNC_FL | EXT2_TOPDIR_FL)) + +/* Flags that are appropriate for non-directories/regular files. */ +#define EXT2_OTHER_FLMASK (EXT2_NODUMP_FL | EXT2_NOATIME_FL) + +/* Mask out flags that are inappropriate for the given type of inode. */ +static inline __u32 ext2_mask_flags(umode_t mode, __u32 flags) +{ + if (S_ISDIR(mode)) + return flags; + else if (S_ISREG(mode)) + return flags & EXT2_REG_FLMASK; + else + return flags & EXT2_OTHER_FLMASK; +} + /* * ioctl commands */ diff --git a/include/linux/ext2_fs_sb.h b/include/linux/ext2_fs_sb.h index dc541f3653d1..1cdb66367c98 100644 --- a/include/linux/ext2_fs_sb.h +++ b/include/linux/ext2_fs_sb.h @@ -101,7 +101,7 @@ struct ext2_sb_info { struct percpu_counter s_freeblocks_counter; struct percpu_counter s_freeinodes_counter; struct percpu_counter s_dirs_counter; - struct blockgroup_lock s_blockgroup_lock; + struct blockgroup_lock *s_blockgroup_lock; /* root of the per fs reservation window tree */ spinlock_t s_rsv_window_lock; struct rb_root s_rsv_window_root; @@ -111,7 +111,7 @@ struct ext2_sb_info { static inline spinlock_t * sb_bgl_lock(struct ext2_sb_info *sbi, unsigned int block_group) { - return bgl_lock_ptr(&sbi->s_blockgroup_lock, block_group); + return bgl_lock_ptr(sbi->s_blockgroup_lock, block_group); } #endif /* _LINUX_EXT2_FS_SB */ diff --git a/include/linux/ext3_fs.h b/include/linux/ext3_fs.h index d14f02918483..dd495b8c3091 100644 --- a/include/linux/ext3_fs.h +++ b/include/linux/ext3_fs.h @@ -178,6 +178,30 @@ struct ext3_group_desc #define EXT3_FL_USER_VISIBLE 0x0003DFFF /* User visible flags */ #define EXT3_FL_USER_MODIFIABLE 0x000380FF /* User modifiable flags */ +/* Flags that should be inherited by new inodes from their parent. */ +#define EXT3_FL_INHERITED (EXT3_SECRM_FL | EXT3_UNRM_FL | EXT3_COMPR_FL |\ + EXT3_SYNC_FL | EXT3_IMMUTABLE_FL | EXT3_APPEND_FL |\ + EXT3_NODUMP_FL | EXT3_NOATIME_FL | EXT3_COMPRBLK_FL|\ + EXT3_NOCOMPR_FL | EXT3_JOURNAL_DATA_FL |\ + EXT3_NOTAIL_FL | EXT3_DIRSYNC_FL) + +/* Flags that are appropriate for regular files (all but dir-specific ones). */ +#define EXT3_REG_FLMASK (~(EXT3_DIRSYNC_FL | EXT3_TOPDIR_FL)) + +/* Flags that are appropriate for non-directories/regular files. */ +#define EXT3_OTHER_FLMASK (EXT3_NODUMP_FL | EXT3_NOATIME_FL) + +/* Mask out flags that are inappropriate for the given type of inode. */ +static inline __u32 ext3_mask_flags(umode_t mode, __u32 flags) +{ + if (S_ISDIR(mode)) + return flags; + else if (S_ISREG(mode)) + return flags & EXT3_REG_FLMASK; + else + return flags & EXT3_OTHER_FLMASK; +} + /* * Inode dynamic state flags */ @@ -354,6 +378,13 @@ struct ext3_inode { #define EXT3_ORPHAN_FS 0x0004 /* Orphans being recovered */ /* + * Misc. filesystem flags + */ +#define EXT2_FLAGS_SIGNED_HASH 0x0001 /* Signed dirhash in use */ +#define EXT2_FLAGS_UNSIGNED_HASH 0x0002 /* Unsigned dirhash in use */ +#define EXT2_FLAGS_TEST_FILESYS 0x0004 /* to test development code */ + +/* * Mount flags */ #define EXT3_MOUNT_CHECK 0x00001 /* Do mount-time checks */ @@ -489,7 +520,23 @@ struct ext3_super_block { __u16 s_reserved_word_pad; __le32 s_default_mount_opts; __le32 s_first_meta_bg; /* First metablock block group */ - __u32 s_reserved[190]; /* Padding to the end of the block */ + __le32 s_mkfs_time; /* When the filesystem was created */ + __le32 s_jnl_blocks[17]; /* Backup of the journal inode */ + /* 64bit support valid if EXT4_FEATURE_COMPAT_64BIT */ +/*150*/ __le32 s_blocks_count_hi; /* Blocks count */ + __le32 s_r_blocks_count_hi; /* Reserved blocks count */ + __le32 s_free_blocks_count_hi; /* Free blocks count */ + __le16 s_min_extra_isize; /* All inodes have at least # bytes */ + __le16 s_want_extra_isize; /* New inodes should reserve # bytes */ + __le32 s_flags; /* Miscellaneous flags */ + __le16 s_raid_stride; /* RAID stride */ + __le16 s_mmp_interval; /* # seconds to wait in MMP checking */ + __le64 s_mmp_block; /* Block for multi-mount protection */ + __le32 s_raid_stripe_width; /* blocks on all data disks (N*stride)*/ + __u8 s_log_groups_per_flex; /* FLEX_BG group size */ + __u8 s_reserved_char_pad2; + __le16 s_reserved_pad; + __u32 s_reserved[162]; /* Padding to the end of the block */ }; #ifdef __KERNEL__ @@ -694,6 +741,9 @@ static inline __le16 ext3_rec_len_to_disk(unsigned len) #define DX_HASH_LEGACY 0 #define DX_HASH_HALF_MD4 1 #define DX_HASH_TEA 2 +#define DX_HASH_LEGACY_UNSIGNED 3 +#define DX_HASH_HALF_MD4_UNSIGNED 4 +#define DX_HASH_TEA_UNSIGNED 5 #ifdef __KERNEL__ diff --git a/include/linux/ext3_fs_sb.h b/include/linux/ext3_fs_sb.h index e024e38248ff..f07f34de2f0e 100644 --- a/include/linux/ext3_fs_sb.h +++ b/include/linux/ext3_fs_sb.h @@ -57,10 +57,11 @@ struct ext3_sb_info { u32 s_next_generation; u32 s_hash_seed[4]; int s_def_hash_version; + int s_hash_unsigned; /* 3 if hash should be signed, 0 if not */ struct percpu_counter s_freeblocks_counter; struct percpu_counter s_freeinodes_counter; struct percpu_counter s_dirs_counter; - struct blockgroup_lock s_blockgroup_lock; + struct blockgroup_lock *s_blockgroup_lock; /* root of the per fs reservation window tree */ spinlock_t s_rsv_window_lock; @@ -86,7 +87,7 @@ struct ext3_sb_info { static inline spinlock_t * sb_bgl_lock(struct ext3_sb_info *sbi, unsigned int block_group) { - return bgl_lock_ptr(&sbi->s_blockgroup_lock, block_group); + return bgl_lock_ptr(sbi->s_blockgroup_lock, block_group); } #endif /* _LINUX_EXT3_FS_SB */ diff --git a/include/linux/fs.h b/include/linux/fs.h index e38a64d71eff..0b87b29f4797 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -565,6 +565,7 @@ struct address_space { struct block_device { dev_t bd_dev; /* not a kdev_t - it's a search key */ struct inode * bd_inode; /* will die */ + struct super_block * bd_super; int bd_openers; struct mutex bd_mutex; /* open/close mutex */ struct semaphore bd_mount_sem; @@ -1389,6 +1390,7 @@ struct super_operations { ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t); ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t); #endif + int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t); }; /* diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index a5cb0c3f6dcf..f8ff918c208f 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h @@ -115,6 +115,11 @@ extern u16 vlan_dev_vlan_id(const struct net_device *dev); extern int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp, u16 vlan_tci, int polling); extern int vlan_hwaccel_do_receive(struct sk_buff *skb); +extern int vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp, + unsigned int vlan_tci, struct sk_buff *skb); +extern int vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp, + unsigned int vlan_tci, + struct napi_gro_fraginfo *info); #else static inline struct net_device *vlan_dev_real_dev(const struct net_device *dev) @@ -140,6 +145,20 @@ static inline int vlan_hwaccel_do_receive(struct sk_buff *skb) { return 0; } + +static inline int vlan_gro_receive(struct napi_struct *napi, + struct vlan_group *grp, + unsigned int vlan_tci, struct sk_buff *skb) +{ + return NET_RX_DROP; +} + +static inline int vlan_gro_frags(struct napi_struct *napi, + struct vlan_group *grp, unsigned int vlan_tci, + struct napi_gro_fraginfo *info) +{ + return NET_RX_DROP; +} #endif /** diff --git a/include/linux/ioprio.h b/include/linux/ioprio.h index f98a656b17e5..76dad4808847 100644 --- a/include/linux/ioprio.h +++ b/include/linux/ioprio.h @@ -86,4 +86,6 @@ static inline int task_nice_ioclass(struct task_struct *task) */ extern int ioprio_best(unsigned short aprio, unsigned short bprio); +extern int set_task_ioprio(struct task_struct *task, int ioprio); + #endif diff --git a/include/linux/jbd.h b/include/linux/jbd.h index 346e2b80be7d..6384b19efe64 100644 --- a/include/linux/jbd.h +++ b/include/linux/jbd.h @@ -543,6 +543,11 @@ struct transaction_s unsigned long t_expires; /* + * When this transaction started, in nanoseconds [no locking] + */ + ktime_t t_start_time; + + /* * How many handles used this transaction? [t_handle_lock] */ int t_handle_count; @@ -798,9 +803,19 @@ struct journal_s struct buffer_head **j_wbuf; int j_wbufsize; + /* + * this is the pid of the last person to run a synchronous operation + * through the journal. + */ pid_t j_last_sync_writer; /* + * the average amount of time in nanoseconds it takes to commit a + * transaction to the disk. [j_state_lock] + */ + u64 j_average_commit_time; + + /* * An opaque pointer to fs-private information. ext3 puts its * superblock pointer here */ diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index 34456476e761..b45109c61fba 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -638,6 +638,11 @@ struct transaction_s unsigned long t_expires; /* + * When this transaction started, in nanoseconds [no locking] + */ + ktime_t t_start_time; + + /* * How many handles used this transaction? [t_handle_lock] */ int t_handle_count; @@ -682,6 +687,8 @@ jbd2_time_diff(unsigned long start, unsigned long end) return end + (MAX_JIFFY_OFFSET - start); } +#define JBD2_NR_BATCH 64 + /** * struct journal_s - The journal_s type is the concrete type associated with * journal_t. @@ -826,6 +833,14 @@ struct journal_s struct mutex j_checkpoint_mutex; /* + * List of buffer heads used by the checkpoint routine. This + * was moved from jbd2_log_do_checkpoint() to reduce stack + * usage. Access to this array is controlled by the + * j_checkpoint_mutex. [j_checkpoint_mutex] + */ + struct buffer_head *j_chkpt_bhs[JBD2_NR_BATCH]; + + /* * Journal head: identifies the first unused block in the journal. * [j_state_lock] */ @@ -939,8 +954,26 @@ struct journal_s struct buffer_head **j_wbuf; int j_wbufsize; + /* + * this is the pid of hte last person to run a synchronous operation + * through the journal + */ pid_t j_last_sync_writer; + /* + * the average amount of time in nanoseconds it takes to commit a + * transaction to disk. [j_state_lock] + */ + u64 j_average_commit_time; + + /* + * minimum and maximum times that we should wait for + * additional filesystem operations to get batched into a + * synchronous handle in microseconds + */ + u32 j_min_batch_time; + u32 j_max_batch_time; + /* This function is called when a transaction is closed */ void (*j_commit_callback)(journal_t *, transaction_t *); @@ -1102,7 +1135,6 @@ extern int jbd2_journal_set_features (journal_t *, unsigned long, unsigned long, unsigned long); extern void jbd2_journal_clear_features (journal_t *, unsigned long, unsigned long, unsigned long); -extern int jbd2_journal_create (journal_t *); extern int jbd2_journal_load (journal_t *journal); extern int jbd2_journal_destroy (journal_t *); extern int jbd2_journal_recover (journal_t *journal); @@ -1177,8 +1209,8 @@ int jbd2_log_wait_commit(journal_t *journal, tid_t tid); int jbd2_log_do_checkpoint(journal_t *journal); void __jbd2_log_wait_for_space(journal_t *journal); -extern void __jbd2_journal_drop_transaction(journal_t *, transaction_t *); -extern int jbd2_cleanup_journal_tail(journal_t *); +extern void __jbd2_journal_drop_transaction(journal_t *, transaction_t *); +extern int jbd2_cleanup_journal_tail(journal_t *); /* Debugging code only: */ diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 6b8e2027165e..343df9ef2412 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -476,6 +476,12 @@ static inline char *pack_hex_byte(char *buf, u8 byte) __val = __val < __min ? __min: __val; \ __val > __max ? __max: __val; }) + +/* + * swap - swap value of @a and @b + */ +#define swap(a, b) ({ typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; }) + /** * container_of - cast a member of a structure out to the containing structure * @ptr: the pointer to the member. diff --git a/include/linux/libata.h b/include/linux/libata.h index 3449de597eff..4f7c8fb4d3fe 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -1518,6 +1518,7 @@ extern void sata_pmp_error_handler(struct ata_port *ap); extern const struct ata_port_operations ata_sff_port_ops; extern const struct ata_port_operations ata_bmdma_port_ops; +extern const struct ata_port_operations ata_bmdma32_port_ops; /* PIO only, sg_tablesize and dma_boundary limits can be removed */ #define ATA_PIO_SHT(drv_name) \ @@ -1545,6 +1546,8 @@ extern void ata_sff_exec_command(struct ata_port *ap, const struct ata_taskfile *tf); extern unsigned int ata_sff_data_xfer(struct ata_device *dev, unsigned char *buf, unsigned int buflen, int rw); +extern unsigned int ata_sff_data_xfer32(struct ata_device *dev, + unsigned char *buf, unsigned int buflen, int rw); extern unsigned int ata_sff_data_xfer_noirq(struct ata_device *dev, unsigned char *buf, unsigned int buflen, int rw); extern u8 ata_sff_irq_on(struct ata_port *ap); diff --git a/include/linux/magic.h b/include/linux/magic.h index f7f3fdddbef0..439f6f3cb0c4 100644 --- a/include/linux/magic.h +++ b/include/linux/magic.h @@ -13,6 +13,7 @@ #define EFS_SUPER_MAGIC 0x414A53 #define EXT2_SUPER_MAGIC 0xEF53 #define EXT3_SUPER_MAGIC 0xEF53 +#define XENFS_SUPER_MAGIC 0xabba1974 #define EXT4_SUPER_MAGIC 0xEF53 #define HPFS_SUPER_MAGIC 0xf995e849 #define ISOFS_SUPER_MAGIC 0x9660 diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 1fbe14d39521..326f45c86530 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -19,22 +19,45 @@ #ifndef _LINUX_MEMCONTROL_H #define _LINUX_MEMCONTROL_H - +#include <linux/cgroup.h> struct mem_cgroup; struct page_cgroup; struct page; struct mm_struct; #ifdef CONFIG_CGROUP_MEM_RES_CTLR +/* + * All "charge" functions with gfp_mask should use GFP_KERNEL or + * (gfp_mask & GFP_RECLAIM_MASK). In current implementatin, memcg doesn't + * alloc memory but reclaims memory from all available zones. So, "where I want + * memory from" bits of gfp_mask has no meaning. So any bits of that field is + * available but adding a rule is better. charge functions' gfp_mask should + * be set to GFP_KERNEL or gfp_mask & GFP_RECLAIM_MASK for avoiding ambiguous + * codes. + * (Of course, if memcg does memory allocation in future, GFP_KERNEL is sane.) + */ -extern int mem_cgroup_charge(struct page *page, struct mm_struct *mm, +extern int mem_cgroup_newpage_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask); +/* for swap handling */ +extern int mem_cgroup_try_charge_swapin(struct mm_struct *mm, + struct page *page, gfp_t mask, struct mem_cgroup **ptr); +extern void mem_cgroup_commit_charge_swapin(struct page *page, + struct mem_cgroup *ptr); +extern void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *ptr); + extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask); -extern void mem_cgroup_move_lists(struct page *page, enum lru_list lru); +extern void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru); +extern void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru); +extern void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru); +extern void mem_cgroup_del_lru(struct page *page); +extern void mem_cgroup_move_lists(struct page *page, + enum lru_list from, enum lru_list to); extern void mem_cgroup_uncharge_page(struct page *page); extern void mem_cgroup_uncharge_cache_page(struct page *page); -extern int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask); +extern int mem_cgroup_shrink_usage(struct page *page, + struct mm_struct *mm, gfp_t gfp_mask); extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, struct list_head *dst, @@ -47,12 +70,20 @@ int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem); extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); -#define mm_match_cgroup(mm, cgroup) \ - ((cgroup) == mem_cgroup_from_task((mm)->owner)) +static inline +int mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *cgroup) +{ + struct mem_cgroup *mem; + rcu_read_lock(); + mem = mem_cgroup_from_task((mm)->owner); + rcu_read_unlock(); + return cgroup == mem; +} extern int -mem_cgroup_prepare_migration(struct page *page, struct page *newpage); -extern void mem_cgroup_end_migration(struct page *page); +mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr); +extern void mem_cgroup_end_migration(struct mem_cgroup *mem, + struct page *oldpage, struct page *newpage); /* * For memory reclaim. @@ -65,13 +96,32 @@ extern void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem, int priority); extern void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, int priority); +int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg); +unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg, + struct zone *zone, + enum lru_list lru); +struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg, + struct zone *zone); +struct zone_reclaim_stat* +mem_cgroup_get_reclaim_stat_from_page(struct page *page); -extern long mem_cgroup_calc_reclaim(struct mem_cgroup *mem, struct zone *zone, - int priority, enum lru_list lru); +#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP +extern int do_swap_account; +#endif +static inline bool mem_cgroup_disabled(void) +{ + if (mem_cgroup_subsys.disabled) + return true; + return false; +} + +extern bool mem_cgroup_oom_called(struct task_struct *task); #else /* CONFIG_CGROUP_MEM_RES_CTLR */ -static inline int mem_cgroup_charge(struct page *page, +struct mem_cgroup; + +static inline int mem_cgroup_newpage_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask) { return 0; @@ -83,6 +133,21 @@ static inline int mem_cgroup_cache_charge(struct page *page, return 0; } +static inline int mem_cgroup_try_charge_swapin(struct mm_struct *mm, + struct page *page, gfp_t gfp_mask, struct mem_cgroup **ptr) +{ + return 0; +} + +static inline void mem_cgroup_commit_charge_swapin(struct page *page, + struct mem_cgroup *ptr) +{ +} + +static inline void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *ptr) +{ +} + static inline void mem_cgroup_uncharge_page(struct page *page) { } @@ -91,12 +156,33 @@ static inline void mem_cgroup_uncharge_cache_page(struct page *page) { } -static inline int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask) +static inline int mem_cgroup_shrink_usage(struct page *page, + struct mm_struct *mm, gfp_t gfp_mask) { return 0; } -static inline void mem_cgroup_move_lists(struct page *page, bool active) +static inline void mem_cgroup_add_lru_list(struct page *page, int lru) +{ +} + +static inline void mem_cgroup_del_lru_list(struct page *page, int lru) +{ + return ; +} + +static inline void mem_cgroup_rotate_lru_list(struct page *page, int lru) +{ + return ; +} + +static inline void mem_cgroup_del_lru(struct page *page) +{ + return ; +} + +static inline void +mem_cgroup_move_lists(struct page *page, enum lru_list from, enum lru_list to) { } @@ -112,12 +198,14 @@ static inline int task_in_mem_cgroup(struct task_struct *task, } static inline int -mem_cgroup_prepare_migration(struct page *page, struct page *newpage) +mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr) { return 0; } -static inline void mem_cgroup_end_migration(struct page *page) +static inline void mem_cgroup_end_migration(struct mem_cgroup *mem, + struct page *oldpage, + struct page *newpage) { } @@ -146,12 +234,42 @@ static inline void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, { } -static inline long mem_cgroup_calc_reclaim(struct mem_cgroup *mem, - struct zone *zone, int priority, - enum lru_list lru) +static inline bool mem_cgroup_disabled(void) +{ + return true; +} + +static inline bool mem_cgroup_oom_called(struct task_struct *task) +{ + return false; +} + +static inline int +mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg) +{ + return 1; +} + +static inline unsigned long +mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg, struct zone *zone, + enum lru_list lru) { return 0; } + + +static inline struct zone_reclaim_stat* +mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg, struct zone *zone) +{ + return NULL; +} + +static inline struct zone_reclaim_stat* +mem_cgroup_get_reclaim_stat_from_page(struct page *page) +{ + return NULL; +} + #endif /* CONFIG_CGROUP_MEM_CONT */ #endif /* _LINUX_MEMCONTROL_H */ diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h index c948350c378e..7fbb97267556 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h @@ -28,6 +28,7 @@ add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list l) { list_add(&page->lru, &zone->lru[l].list); __inc_zone_state(zone, NR_LRU_BASE + l); + mem_cgroup_add_lru_list(page, l); } static inline void @@ -35,6 +36,7 @@ del_page_from_lru_list(struct zone *zone, struct page *page, enum lru_list l) { list_del(&page->lru); __dec_zone_state(zone, NR_LRU_BASE + l); + mem_cgroup_del_lru_list(page, l); } static inline void @@ -54,6 +56,7 @@ del_page_from_lru(struct zone *zone, struct page *page) l += page_is_file_cache(page); } __dec_zone_state(zone, NR_LRU_BASE + l); + mem_cgroup_del_lru_list(page, l); } /** @@ -78,23 +81,4 @@ static inline enum lru_list page_lru(struct page *page) return lru; } -/** - * inactive_anon_is_low - check if anonymous pages need to be deactivated - * @zone: zone to check - * - * Returns true if the zone does not have enough inactive anon pages, - * meaning some active anon pages need to be deactivated. - */ -static inline int inactive_anon_is_low(struct zone *zone) -{ - unsigned long active, inactive; - - active = zone_page_state(zone, NR_ACTIVE_ANON); - inactive = zone_page_state(zone, NR_INACTIVE_ANON); - - if (inactive * zone->inactive_ratio < active) - return 1; - - return 0; -} #endif diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 35a7b5e19465..09c14e213b63 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -263,6 +263,19 @@ enum zone_type { #error ZONES_SHIFT -- too many zones configured adjust calculation #endif +struct zone_reclaim_stat { + /* + * The pageout code in vmscan.c keeps track of how many of the + * mem/swap backed and file backed pages are refeferenced. + * The higher the rotated/scanned ratio, the more valuable + * that cache is. + * + * The anon LRU stats live in [0], file LRU stats in [1] + */ + unsigned long recent_rotated[2]; + unsigned long recent_scanned[2]; +}; + struct zone { /* Fields commonly accessed by the page allocator */ unsigned long pages_min, pages_low, pages_high; @@ -315,16 +328,7 @@ struct zone { unsigned long nr_scan; } lru[NR_LRU_LISTS]; - /* - * The pageout code in vmscan.c keeps track of how many of the - * mem/swap backed and file backed pages are refeferenced. - * The higher the rotated/scanned ratio, the more valuable - * that cache is. - * - * The anon LRU stats live in [0], file LRU stats in [1] - */ - unsigned long recent_rotated[2]; - unsigned long recent_scanned[2]; + struct zone_reclaim_stat reclaim_stat; unsigned long pages_scanned; /* since last reclaim */ unsigned long flags; /* zone flags, see below */ diff --git a/include/linux/mtd/cfi.h b/include/linux/mtd/cfi.h index 00e2b575021f..88d3d8fbf9f2 100644 --- a/include/linux/mtd/cfi.h +++ b/include/linux/mtd/cfi.h @@ -520,6 +520,7 @@ struct cfi_fixup { #define CFI_MFR_AMD 0x0001 #define CFI_MFR_ATMEL 0x001F +#define CFI_MFR_SAMSUNG 0x00EC #define CFI_MFR_ST 0x0020 /* STMicroelectronics */ void cfi_fixup(struct mtd_info *mtd, struct cfi_fixup* fixups); diff --git a/include/linux/mtd/ftl.h b/include/linux/mtd/ftl.h index 0be442f881dd..0555f7a0b9ed 100644 --- a/include/linux/mtd/ftl.h +++ b/include/linux/mtd/ftl.h @@ -32,25 +32,25 @@ #define _LINUX_FTL_H typedef struct erase_unit_header_t { - u_int8_t LinkTargetTuple[5]; - u_int8_t DataOrgTuple[10]; - u_int8_t NumTransferUnits; - u_int32_t EraseCount; - u_int16_t LogicalEUN; - u_int8_t BlockSize; - u_int8_t EraseUnitSize; - u_int16_t FirstPhysicalEUN; - u_int16_t NumEraseUnits; - u_int32_t FormattedSize; - u_int32_t FirstVMAddress; - u_int16_t NumVMPages; - u_int8_t Flags; - u_int8_t Code; - u_int32_t SerialNumber; - u_int32_t AltEUHOffset; - u_int32_t BAMOffset; - u_int8_t Reserved[12]; - u_int8_t EndTuple[2]; + uint8_t LinkTargetTuple[5]; + uint8_t DataOrgTuple[10]; + uint8_t NumTransferUnits; + uint32_t EraseCount; + uint16_t LogicalEUN; + uint8_t BlockSize; + uint8_t EraseUnitSize; + uint16_t FirstPhysicalEUN; + uint16_t NumEraseUnits; + uint32_t FormattedSize; + uint32_t FirstVMAddress; + uint16_t NumVMPages; + uint8_t Flags; + uint8_t Code; + uint32_t SerialNumber; + uint32_t AltEUHOffset; + uint32_t BAMOffset; + uint8_t Reserved[12]; + uint8_t EndTuple[2]; } erase_unit_header_t; /* Flags in erase_unit_header_t */ diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h index aa30244492c6..b981b8772217 100644 --- a/include/linux/mtd/map.h +++ b/include/linux/mtd/map.h @@ -223,6 +223,7 @@ struct map_info { must leave it enabled. */ void (*set_vpp)(struct map_info *, int); + unsigned long pfow_base; unsigned long map_priv_1; unsigned long map_priv_2; void *fldrv_priv; diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h index 64433eb411d7..3aa5d77c2cdb 100644 --- a/include/linux/mtd/mtd.h +++ b/include/linux/mtd/mtd.h @@ -15,6 +15,8 @@ #include <linux/mtd/compatmac.h> #include <mtd/mtd-abi.h> +#include <asm/div64.h> + #define MTD_CHAR_MAJOR 90 #define MTD_BLOCK_MAJOR 31 #define MAX_MTD_DEVICES 32 @@ -25,20 +27,20 @@ #define MTD_ERASE_DONE 0x08 #define MTD_ERASE_FAILED 0x10 -#define MTD_FAIL_ADDR_UNKNOWN 0xffffffff +#define MTD_FAIL_ADDR_UNKNOWN -1LL /* If the erase fails, fail_addr might indicate exactly which block failed. If fail_addr = MTD_FAIL_ADDR_UNKNOWN, the failure was not at the device level or was not specific to any particular block. */ struct erase_info { struct mtd_info *mtd; - u_int32_t addr; - u_int32_t len; - u_int32_t fail_addr; + uint64_t addr; + uint64_t len; + uint64_t fail_addr; u_long time; u_long retries; - u_int dev; - u_int cell; + unsigned dev; + unsigned cell; void (*callback) (struct erase_info *self); u_long priv; u_char state; @@ -46,9 +48,9 @@ struct erase_info { }; struct mtd_erase_region_info { - u_int32_t offset; /* At which this region starts, from the beginning of the MTD */ - u_int32_t erasesize; /* For this region */ - u_int32_t numblocks; /* Number of blocks of erasesize in this region */ + uint64_t offset; /* At which this region starts, from the beginning of the MTD */ + uint32_t erasesize; /* For this region */ + uint32_t numblocks; /* Number of blocks of erasesize in this region */ unsigned long *lockmap; /* If keeping bitmap of locks */ }; @@ -100,14 +102,14 @@ struct mtd_oob_ops { struct mtd_info { u_char type; - u_int32_t flags; - u_int32_t size; // Total size of the MTD + uint32_t flags; + uint64_t size; // Total size of the MTD /* "Major" erase size for the device. Naïve users may take this * to be the only erase size available, or may use the more detailed * information below if they desire */ - u_int32_t erasesize; + uint32_t erasesize; /* Minimal writable flash unit size. In case of NOR flash it is 1 (even * though individual bits can be cleared), in case of NAND flash it is * one NAND page (or half, or one-fourths of it), in case of ECC-ed NOR @@ -115,10 +117,20 @@ struct mtd_info { * Any driver registering a struct mtd_info must ensure a writesize of * 1 or larger. */ - u_int32_t writesize; + uint32_t writesize; + + uint32_t oobsize; // Amount of OOB data per block (e.g. 16) + uint32_t oobavail; // Available OOB bytes per block - u_int32_t oobsize; // Amount of OOB data per block (e.g. 16) - u_int32_t oobavail; // Available OOB bytes per block + /* + * If erasesize is a power of 2 then the shift is stored in + * erasesize_shift otherwise erasesize_shift is zero. Ditto writesize. + */ + unsigned int erasesize_shift; + unsigned int writesize_shift; + /* Masks based on erasesize_shift and writesize_shift */ + unsigned int erasesize_mask; + unsigned int writesize_mask; // Kernel-only stuff starts here. const char *name; @@ -190,8 +202,8 @@ struct mtd_info { void (*sync) (struct mtd_info *mtd); /* Chip-supported device locking */ - int (*lock) (struct mtd_info *mtd, loff_t ofs, size_t len); - int (*unlock) (struct mtd_info *mtd, loff_t ofs, size_t len); + int (*lock) (struct mtd_info *mtd, loff_t ofs, uint64_t len); + int (*unlock) (struct mtd_info *mtd, loff_t ofs, uint64_t len); /* Power Management functions */ int (*suspend) (struct mtd_info *mtd); @@ -221,6 +233,35 @@ struct mtd_info { void (*put_device) (struct mtd_info *mtd); }; +static inline uint32_t mtd_div_by_eb(uint64_t sz, struct mtd_info *mtd) +{ + if (mtd->erasesize_shift) + return sz >> mtd->erasesize_shift; + do_div(sz, mtd->erasesize); + return sz; +} + +static inline uint32_t mtd_mod_by_eb(uint64_t sz, struct mtd_info *mtd) +{ + if (mtd->erasesize_shift) + return sz & mtd->erasesize_mask; + return do_div(sz, mtd->erasesize); +} + +static inline uint32_t mtd_div_by_ws(uint64_t sz, struct mtd_info *mtd) +{ + if (mtd->writesize_shift) + return sz >> mtd->writesize_shift; + do_div(sz, mtd->writesize); + return sz; +} + +static inline uint32_t mtd_mod_by_ws(uint64_t sz, struct mtd_info *mtd) +{ + if (mtd->writesize_shift) + return sz & mtd->writesize_mask; + return do_div(sz, mtd->writesize); +} /* Kernel-side ioctl definitions */ diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index 733d3f3b4eb8..db5b63da2a7e 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h @@ -335,17 +335,12 @@ struct nand_buffers { * @erase_cmd: [INTERN] erase command write function, selectable due to AND support * @scan_bbt: [REPLACEABLE] function to scan bad block table * @chip_delay: [BOARDSPECIFIC] chip dependent delay for transfering data from array to read regs (tR) - * @wq: [INTERN] wait queue to sleep on if a NAND operation is in progress * @state: [INTERN] the current state of the NAND device * @oob_poi: poison value buffer * @page_shift: [INTERN] number of address bits in a page (column address bits) * @phys_erase_shift: [INTERN] number of address bits in a physical eraseblock * @bbt_erase_shift: [INTERN] number of address bits in a bbt entry * @chip_shift: [INTERN] number of address bits in one chip - * @datbuf: [INTERN] internal buffer for one page + oob - * @oobbuf: [INTERN] oob buffer for one eraseblock - * @oobdirty: [INTERN] indicates that oob_buf must be reinitialized - * @data_poi: [INTERN] pointer to a data buffer * @options: [BOARDSPECIFIC] various chip options. They can partly be set to inform nand_scan about * special functionality. See the defines for further explanation * @badblockpos: [INTERN] position of the bad block marker in the oob area @@ -399,7 +394,7 @@ struct nand_chip { int bbt_erase_shift; int chip_shift; int numchips; - unsigned long chipsize; + uint64_t chipsize; int pagemask; int pagebuf; int subpagesize; diff --git a/include/linux/mtd/partitions.h b/include/linux/mtd/partitions.h index c92b4d439609..a45dd831b3f8 100644 --- a/include/linux/mtd/partitions.h +++ b/include/linux/mtd/partitions.h @@ -36,9 +36,9 @@ struct mtd_partition { char *name; /* identifier string */ - u_int32_t size; /* partition size */ - u_int32_t offset; /* offset within the master MTD space */ - u_int32_t mask_flags; /* master MTD flags to mask out for this partition */ + uint64_t size; /* partition size */ + uint64_t offset; /* offset within the master MTD space */ + uint32_t mask_flags; /* master MTD flags to mask out for this partition */ struct nand_ecclayout *ecclayout; /* out of band layout for this partition (NAND only)*/ struct mtd_info **mtdp; /* pointer to store the MTD object */ }; diff --git a/include/linux/mtd/pfow.h b/include/linux/mtd/pfow.h new file mode 100644 index 000000000000..b730d4f84655 --- /dev/null +++ b/include/linux/mtd/pfow.h @@ -0,0 +1,159 @@ +/* Primary function overlay window definitions + * and service functions used by LPDDR chips + */ +#ifndef __LINUX_MTD_PFOW_H +#define __LINUX_MTD_PFOW_H + +#include <linux/mtd/qinfo.h> + +/* PFOW registers addressing */ +/* Address of symbol "P" */ +#define PFOW_QUERY_STRING_P 0x0000 +/* Address of symbol "F" */ +#define PFOW_QUERY_STRING_F 0x0002 +/* Address of symbol "O" */ +#define PFOW_QUERY_STRING_O 0x0004 +/* Address of symbol "W" */ +#define PFOW_QUERY_STRING_W 0x0006 +/* Identification info for LPDDR chip */ +#define PFOW_MANUFACTURER_ID 0x0020 +#define PFOW_DEVICE_ID 0x0022 +/* Address in PFOW where prog buffer can can be found */ +#define PFOW_PROGRAM_BUFFER_OFFSET 0x0040 +/* Size of program buffer in words */ +#define PFOW_PROGRAM_BUFFER_SIZE 0x0042 +/* Address command code register */ +#define PFOW_COMMAND_CODE 0x0080 +/* command data register */ +#define PFOW_COMMAND_DATA 0x0084 +/* command address register lower address bits */ +#define PFOW_COMMAND_ADDRESS_L 0x0088 +/* command address register upper address bits */ +#define PFOW_COMMAND_ADDRESS_H 0x008a +/* number of bytes to be proggrammed lower address bits */ +#define PFOW_DATA_COUNT_L 0x0090 +/* number of bytes to be proggrammed higher address bits */ +#define PFOW_DATA_COUNT_H 0x0092 +/* command execution register, the only possible value is 0x01 */ +#define PFOW_COMMAND_EXECUTE 0x00c0 +/* 0x01 should be written at this address to clear buffer */ +#define PFOW_CLEAR_PROGRAM_BUFFER 0x00c4 +/* device program/erase suspend register */ +#define PFOW_PROGRAM_ERASE_SUSPEND 0x00c8 +/* device status register */ +#define PFOW_DSR 0x00cc + +/* LPDDR memory device command codes */ +/* They are possible values of PFOW command code register */ +#define LPDDR_WORD_PROGRAM 0x0041 +#define LPDDR_BUFF_PROGRAM 0x00E9 +#define LPDDR_BLOCK_ERASE 0x0020 +#define LPDDR_LOCK_BLOCK 0x0061 +#define LPDDR_UNLOCK_BLOCK 0x0062 +#define LPDDR_READ_BLOCK_LOCK_STATUS 0x0065 +#define LPDDR_INFO_QUERY 0x0098 +#define LPDDR_READ_OTP 0x0097 +#define LPDDR_PROG_OTP 0x00C0 +#define LPDDR_RESUME 0x00D0 + +/* Defines possible value of PFOW command execution register */ +#define LPDDR_START_EXECUTION 0x0001 + +/* Defines possible value of PFOW program/erase suspend register */ +#define LPDDR_SUSPEND 0x0001 + +/* Possible values of PFOW device status register */ +/* access R - read; RC read & clearable */ +#define DSR_DPS (1<<1) /* RC; device protect status + * 0 - not protected 1 - locked */ +#define DSR_PSS (1<<2) /* R; program suspend status; + * 0-prog in progress/completed, + * 1- prog suspended */ +#define DSR_VPPS (1<<3) /* RC; 0-Vpp OK, * 1-Vpp low */ +#define DSR_PROGRAM_STATUS (1<<4) /* RC; 0-successful, 1-error */ +#define DSR_ERASE_STATUS (1<<5) /* RC; erase or blank check status; + * 0-success erase/blank check, + * 1 blank check error */ +#define DSR_ESS (1<<6) /* R; erase suspend status; + * 0-erase in progress/complete, + * 1 erase suspended */ +#define DSR_READY_STATUS (1<<7) /* R; Device status + * 0-busy, + * 1-ready */ +#define DSR_RPS (0x3<<8) /* RC; region program status + * 00 - Success, + * 01-re-program attempt in region with + * object mode data, + * 10-object mode program w attempt in + * region with control mode data + * 11-attempt to program invalid half + * with 0x41 command */ +#define DSR_AOS (1<<12) /* RC; 1- AO related failure */ +#define DSR_AVAILABLE (1<<15) /* R; Device availbility + * 1 - Device available + * 0 - not available */ + +/* The superset of all possible error bits in DSR */ +#define DSR_ERR 0x133A + +static inline void send_pfow_command(struct map_info *map, + unsigned long cmd_code, unsigned long adr, + unsigned long len, map_word *datum) +{ + int bits_per_chip = map_bankwidth(map) * 8; + int chipnum; + struct lpddr_private *lpddr = map->fldrv_priv; + chipnum = adr >> lpddr->chipshift; + + map_write(map, CMD(cmd_code), map->pfow_base + PFOW_COMMAND_CODE); + map_write(map, CMD(adr & ((1<<bits_per_chip) - 1)), + map->pfow_base + PFOW_COMMAND_ADDRESS_L); + map_write(map, CMD(adr>>bits_per_chip), + map->pfow_base + PFOW_COMMAND_ADDRESS_H); + if (len) { + map_write(map, CMD(len & ((1<<bits_per_chip) - 1)), + map->pfow_base + PFOW_DATA_COUNT_L); + map_write(map, CMD(len>>bits_per_chip), + map->pfow_base + PFOW_DATA_COUNT_H); + } + if (datum) + map_write(map, *datum, map->pfow_base + PFOW_COMMAND_DATA); + + /* Command execution start */ + map_write(map, CMD(LPDDR_START_EXECUTION), + map->pfow_base + PFOW_COMMAND_EXECUTE); +} + +static inline void print_drs_error(unsigned dsr) +{ + int prog_status = (dsr & DSR_RPS) >> 8; + + if (!(dsr & DSR_AVAILABLE)) + printk(KERN_NOTICE"DSR.15: (0) Device not Available\n"); + if (prog_status & 0x03) + printk(KERN_NOTICE"DSR.9,8: (11) Attempt to program invalid " + "half with 41h command\n"); + else if (prog_status & 0x02) + printk(KERN_NOTICE"DSR.9,8: (10) Object Mode Program attempt " + "in region with Control Mode data\n"); + else if (prog_status & 0x01) + printk(KERN_NOTICE"DSR.9,8: (01) Program attempt in region " + "with Object Mode data\n"); + if (!(dsr & DSR_READY_STATUS)) + printk(KERN_NOTICE"DSR.7: (0) Device is Busy\n"); + if (dsr & DSR_ESS) + printk(KERN_NOTICE"DSR.6: (1) Erase Suspended\n"); + if (dsr & DSR_ERASE_STATUS) + printk(KERN_NOTICE"DSR.5: (1) Erase/Blank check error\n"); + if (dsr & DSR_PROGRAM_STATUS) + printk(KERN_NOTICE"DSR.4: (1) Program Error\n"); + if (dsr & DSR_VPPS) + printk(KERN_NOTICE"DSR.3: (1) Vpp low detect, operation " + "aborted\n"); + if (dsr & DSR_PSS) + printk(KERN_NOTICE"DSR.2: (1) Program suspended\n"); + if (dsr & DSR_DPS) + printk(KERN_NOTICE"DSR.1: (1) Aborted Erase/Program attempt " + "on locked block\n"); +} +#endif /* __LINUX_MTD_PFOW_H */ diff --git a/include/linux/mtd/physmap.h b/include/linux/mtd/physmap.h index c8e63a5ee72e..76f7cabf07d3 100644 --- a/include/linux/mtd/physmap.h +++ b/include/linux/mtd/physmap.h @@ -24,6 +24,7 @@ struct physmap_flash_data { unsigned int width; void (*set_vpp)(struct map_info *, int); unsigned int nr_parts; + unsigned int pfow_base; struct mtd_partition *parts; }; diff --git a/include/linux/mtd/qinfo.h b/include/linux/mtd/qinfo.h new file mode 100644 index 000000000000..7b3d487d8b3f --- /dev/null +++ b/include/linux/mtd/qinfo.h @@ -0,0 +1,91 @@ +#ifndef __LINUX_MTD_QINFO_H +#define __LINUX_MTD_QINFO_H + +#include <linux/mtd/map.h> +#include <linux/wait.h> +#include <linux/spinlock.h> +#include <linux/delay.h> +#include <linux/mtd/mtd.h> +#include <linux/mtd/flashchip.h> +#include <linux/mtd/partitions.h> + +/* lpddr_private describes lpddr flash chip in memory map + * @ManufactId - Chip Manufacture ID + * @DevId - Chip Device ID + * @qinfo - pointer to qinfo records describing the chip + * @numchips - number of chips including virual RWW partitions + * @chipshift - Chip/partiton size 2^chipshift + * @chips - per-chip data structure + */ +struct lpddr_private { + uint16_t ManufactId; + uint16_t DevId; + struct qinfo_chip *qinfo; + int numchips; + unsigned long chipshift; + struct flchip chips[0]; +}; + +/* qinfo_query_info structure contains request information for + * each qinfo record + * @major - major number of qinfo record + * @major - minor number of qinfo record + * @id_str - descriptive string to access the record + * @desc - detailed description for the qinfo record + */ +struct qinfo_query_info { + uint8_t major; + uint8_t minor; + char *id_str; + char *desc; +}; + +/* + * qinfo_chip structure contains necessary qinfo records data + * @DevSizeShift - Device size 2^n bytes + * @BufSizeShift - Program buffer size 2^n bytes + * @TotalBlocksNum - Total number of blocks + * @UniformBlockSizeShift - Uniform block size 2^UniformBlockSizeShift bytes + * @HWPartsNum - Number of hardware partitions + * @SuspEraseSupp - Suspend erase supported + * @SingleWordProgTime - Single word program 2^SingleWordProgTime u-sec + * @ProgBufferTime - Program buffer write 2^ProgBufferTime u-sec + * @BlockEraseTime - Block erase 2^BlockEraseTime m-sec + */ +struct qinfo_chip { + /* General device info */ + uint16_t DevSizeShift; + uint16_t BufSizeShift; + /* Erase block information */ + uint16_t TotalBlocksNum; + uint16_t UniformBlockSizeShift; + /* Partition information */ + uint16_t HWPartsNum; + /* Optional features */ + uint16_t SuspEraseSupp; + /* Operation typical time */ + uint16_t SingleWordProgTime; + uint16_t ProgBufferTime; + uint16_t BlockEraseTime; +}; + +/* defines for fixup usage */ +#define LPDDR_MFR_ANY 0xffff +#define LPDDR_ID_ANY 0xffff +#define NUMONYX_MFGR_ID 0x0089 +#define R18_DEVICE_ID_1G 0x893c + +static inline map_word lpddr_build_cmd(u_long cmd, struct map_info *map) +{ + map_word val = { {0} }; + val.x[0] = cmd; + return val; +} + +#define CMD(x) lpddr_build_cmd(x, map) +#define CMDVAL(cmd) cmd.x[0] + +struct mtd_info *lpddr_cmdset(struct map_info *); + +#endif + diff --git a/include/linux/mtd/sharpsl.h b/include/linux/mtd/sharpsl.h new file mode 100644 index 000000000000..25f4d2a845c1 --- /dev/null +++ b/include/linux/mtd/sharpsl.h @@ -0,0 +1,20 @@ +/* + * SharpSL NAND support + * + * Copyright (C) 2008 Dmitry Baryshkov + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/mtd/nand.h> +#include <linux/mtd/nand_ecc.h> +#include <linux/mtd/partitions.h> + +struct sharpsl_nand_platform_data { + struct nand_bbt_descr *badblock_pattern; + struct nand_ecclayout *ecc_layout; + struct mtd_partition *partitions; + unsigned int nr_partitions; +}; diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index c28bbba3c23d..f24556813375 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1125,9 +1125,6 @@ struct softnet_data struct sk_buff *completion_queue; struct napi_struct backlog; -#ifdef CONFIG_NET_DMA - struct dma_chan *net_dma; -#endif }; DECLARE_PER_CPU(struct softnet_data,softnet_data); @@ -1373,8 +1370,14 @@ extern int netif_rx_ni(struct sk_buff *skb); #define HAVE_NETIF_RECEIVE_SKB 1 extern int netif_receive_skb(struct sk_buff *skb); extern void napi_gro_flush(struct napi_struct *napi); +extern int dev_gro_receive(struct napi_struct *napi, + struct sk_buff *skb); extern int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb); +extern void napi_reuse_skb(struct napi_struct *napi, + struct sk_buff *skb); +extern struct sk_buff * napi_fraginfo_skb(struct napi_struct *napi, + struct napi_gro_fraginfo *info); extern int napi_gro_frags(struct napi_struct *napi, struct napi_gro_fraginfo *info); extern void netif_nit_deliver(struct sk_buff *skb); diff --git a/include/linux/nwpserial.h b/include/linux/nwpserial.h new file mode 100644 index 000000000000..9acb21572eaf --- /dev/null +++ b/include/linux/nwpserial.h @@ -0,0 +1,18 @@ +/* + * Serial Port driver for a NWP uart device + * + * Copyright (C) 2008 IBM Corp., Benjamin Krill <ben@codiert.org> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + */ +#ifndef _NWPSERIAL_H +#define _NWPSERIAL_H + +int nwpserial_register_port(struct uart_port *port); +void nwpserial_unregister_port(int line); + +#endif /* _NWPSERIAL_H */ diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h index 1ce9fe572e51..1d9518bc4c58 100644 --- a/include/linux/oprofile.h +++ b/include/linux/oprofile.h @@ -164,4 +164,22 @@ void oprofile_put_buff(unsigned long *buf, unsigned int start, unsigned long oprofile_get_cpu_buffer_size(void); void oprofile_cpu_buffer_inc_smpl_lost(void); +/* cpu buffer functions */ + +struct op_sample; + +struct op_entry { + struct ring_buffer_event *event; + struct op_sample *sample; + unsigned long irq_flags; + unsigned long size; + unsigned long *data; +}; + +void oprofile_write_reserve(struct op_entry *entry, + struct pt_regs * const regs, + unsigned long pc, int code, int size); +int oprofile_add_data(struct op_entry *entry, unsigned long val); +int oprofile_write_commit(struct op_entry *entry); + #endif /* OPROFILE_H */ diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h index 1e6d34bfa094..602cc1fdee90 100644 --- a/include/linux/page_cgroup.h +++ b/include/linux/page_cgroup.h @@ -26,10 +26,6 @@ enum { PCG_LOCK, /* page cgroup is locked */ PCG_CACHE, /* charged as cache */ PCG_USED, /* this object is in use. */ - /* flags for LRU placement */ - PCG_ACTIVE, /* page is active in this cgroup */ - PCG_FILE, /* page is file system backed */ - PCG_UNEVICTABLE, /* page is unevictableable */ }; #define TESTPCGFLAG(uname, lname) \ @@ -50,19 +46,6 @@ TESTPCGFLAG(Cache, CACHE) TESTPCGFLAG(Used, USED) CLEARPCGFLAG(Used, USED) -/* LRU management flags (from global-lru definition) */ -TESTPCGFLAG(File, FILE) -SETPCGFLAG(File, FILE) -CLEARPCGFLAG(File, FILE) - -TESTPCGFLAG(Active, ACTIVE) -SETPCGFLAG(Active, ACTIVE) -CLEARPCGFLAG(Active, ACTIVE) - -TESTPCGFLAG(Unevictable, UNEVICTABLE) -SETPCGFLAG(Unevictable, UNEVICTABLE) -CLEARPCGFLAG(Unevictable, UNEVICTABLE) - static inline int page_cgroup_nid(struct page_cgroup *pc) { return page_to_nid(pc->page); @@ -105,4 +88,39 @@ static inline void page_cgroup_init(void) } #endif + +#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP +#include <linux/swap.h> +extern struct mem_cgroup * +swap_cgroup_record(swp_entry_t ent, struct mem_cgroup *mem); +extern struct mem_cgroup *lookup_swap_cgroup(swp_entry_t ent); +extern int swap_cgroup_swapon(int type, unsigned long max_pages); +extern void swap_cgroup_swapoff(int type); +#else +#include <linux/swap.h> + +static inline +struct mem_cgroup *swap_cgroup_record(swp_entry_t ent, struct mem_cgroup *mem) +{ + return NULL; +} + +static inline +struct mem_cgroup *lookup_swap_cgroup(swp_entry_t ent) +{ + return NULL; +} + +static inline int +swap_cgroup_swapon(int type, unsigned long max_pages) +{ + return 0; +} + +static inline void swap_cgroup_swapoff(int type) +{ + return; +} + +#endif #endif diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h index f7cc204fab07..20998746518e 100644 --- a/include/linux/pci_hotplug.h +++ b/include/linux/pci_hotplug.h @@ -223,7 +223,6 @@ struct hotplug_params { #ifdef CONFIG_ACPI #include <acpi/acpi.h> #include <acpi/acpi_bus.h> -#include <acpi/actypes.h> extern acpi_status acpi_get_hp_params_from_firmware(struct pci_bus *bus, struct hotplug_params *hpp); int acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev, u32 flags); diff --git a/include/linux/pid.h b/include/linux/pid.h index bb206c56d1f0..49f1c2f66e95 100644 --- a/include/linux/pid.h +++ b/include/linux/pid.h @@ -123,6 +123,24 @@ extern struct pid *alloc_pid(struct pid_namespace *ns); extern void free_pid(struct pid *pid); /* + * ns_of_pid() returns the pid namespace in which the specified pid was + * allocated. + * + * NOTE: + * ns_of_pid() is expected to be called for a process (task) that has + * an attached 'struct pid' (see attach_pid(), detach_pid()) i.e @pid + * is expected to be non-NULL. If @pid is NULL, caller should handle + * the resulting NULL pid-ns. + */ +static inline struct pid_namespace *ns_of_pid(struct pid *pid) +{ + struct pid_namespace *ns = NULL; + if (pid) + ns = pid->numbers[pid->level].ns; + return ns; +} + +/* * the helpers to get the pid's id seen from different namespaces * * pid_nr() : global id, i.e. the id seen from the init namespace; diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h index d82fe825d62f..38d10326246a 100644 --- a/include/linux/pid_namespace.h +++ b/include/linux/pid_namespace.h @@ -79,11 +79,7 @@ static inline void zap_pid_ns_processes(struct pid_namespace *ns) } #endif /* CONFIG_PID_NS */ -static inline struct pid_namespace *task_active_pid_ns(struct task_struct *tsk) -{ - return tsk->nsproxy->pid_ns; -} - +extern struct pid_namespace *task_active_pid_ns(struct task_struct *tsk); void pidhash_init(void); void pidmap_init(void); diff --git a/include/linux/raid/md_k.h b/include/linux/raid/md_k.h index 8fc909ef6787..9743e4dbc918 100644 --- a/include/linux/raid/md_k.h +++ b/include/linux/raid/md_k.h @@ -137,6 +137,9 @@ struct mddev_s struct gendisk *gendisk; struct kobject kobj; + int hold_active; +#define UNTIL_IOCTL 1 +#define UNTIL_STOP 2 /* Superblock information */ int major_version, @@ -215,6 +218,9 @@ struct mddev_s #define MD_RECOVERY_FROZEN 9 unsigned long recovery; + int recovery_disabled; /* if we detect that recovery + * will always fail, set this + * so we don't loop trying */ int in_sync; /* know to not need resync */ struct mutex reconfig_mutex; @@ -244,6 +250,9 @@ struct mddev_s struct sysfs_dirent *sysfs_state; /* handle for 'array_state' * file in sysfs. */ + struct sysfs_dirent *sysfs_action; /* handle for 'sync_action' */ + + struct work_struct del_work; /* used for delayed sysfs removal */ spinlock_t write_lock; wait_queue_head_t sb_wait; /* for waiting on superblock updates */ @@ -334,17 +343,14 @@ static inline char * mdname (mddev_t * mddev) * iterates through some rdev ringlist. It's safe to remove the * current 'rdev'. Dont touch 'tmp' though. */ -#define rdev_for_each_list(rdev, tmp, list) \ - \ - for ((tmp) = (list).next; \ - (rdev) = (list_entry((tmp), mdk_rdev_t, same_set)), \ - (tmp) = (tmp)->next, (tmp)->prev != &(list) \ - ; ) +#define rdev_for_each_list(rdev, tmp, head) \ + list_for_each_entry_safe(rdev, tmp, head, same_set) + /* * iterates through the 'same array disks' ringlist */ #define rdev_for_each(rdev, tmp, mddev) \ - rdev_for_each_list(rdev, tmp, (mddev)->disks) + list_for_each_entry_safe(rdev, tmp, &((mddev)->disks), same_set) #define rdev_for_each_rcu(rdev, mddev) \ list_for_each_entry_rcu(rdev, &((mddev)->disks), same_set) diff --git a/include/linux/raid/md_p.h b/include/linux/raid/md_p.h index 8b4de4a41ff1..9491026afe66 100644 --- a/include/linux/raid/md_p.h +++ b/include/linux/raid/md_p.h @@ -194,6 +194,8 @@ static inline __u64 md_event(mdp_super_t *sb) { return (ev<<32)| sb->events_lo; } +#define MD_SUPERBLOCK_1_TIME_SEC_MASK ((1ULL<<40) - 1) + /* * The version-1 superblock : * All numeric fields are little-endian. diff --git a/include/linux/raid/raid0.h b/include/linux/raid/raid0.h index 1b2dda035f8e..fd42aa87c391 100644 --- a/include/linux/raid/raid0.h +++ b/include/linux/raid/raid0.h @@ -5,9 +5,9 @@ struct strip_zone { - sector_t zone_offset; /* Zone offset in md_dev */ - sector_t dev_offset; /* Zone offset in real dev */ - sector_t size; /* Zone size */ + sector_t zone_start; /* Zone offset in md_dev (in sectors) */ + sector_t dev_start; /* Zone offset in real dev (in sectors) */ + sector_t sectors; /* Zone size in sectors */ int nb_dev; /* # of devices attached to the zone */ mdk_rdev_t **dev; /* Devices attached to the zone */ }; @@ -19,8 +19,8 @@ struct raid0_private_data mdk_rdev_t **devlist; /* lists of rdevs, pointed to by strip_zone->dev */ int nr_strip_zones; - sector_t hash_spacing; - int preshift; /* shift this before divide by hash_spacing */ + sector_t spacing; + int sector_shift; /* shift this before divide by spacing */ }; typedef struct raid0_private_data raid0_conf_t; diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h index afdc4558bb94..801bf77ff4e2 100644 --- a/include/linux/regulator/consumer.h +++ b/include/linux/regulator/consumer.h @@ -104,10 +104,10 @@ struct regulator; /** * struct regulator_bulk_data - Data used for bulk regulator operations. * - * @supply The name of the supply. Initialised by the user before - * using the bulk regulator APIs. - * @consumer The regulator consumer for the supply. This will be managed - * by the bulk API. + * @supply: The name of the supply. Initialised by the user before + * using the bulk regulator APIs. + * @consumer: The regulator consumer for the supply. This will be managed + * by the bulk API. * * The regulator APIs provide a series of regulator_bulk_() API calls as * a convenience to consumers which require multiple supplies. This diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h index e37d80561985..2dae05705f13 100644 --- a/include/linux/regulator/driver.h +++ b/include/linux/regulator/driver.h @@ -24,7 +24,33 @@ struct regulator_init_data; /** * struct regulator_ops - regulator operations. * - * This struct describes regulator operations. + * This struct describes regulator operations which can be implemented by + * regulator chip drivers. + * + * @enable: Enable the regulator. + * @disable: Disable the regulator. + * @is_enabled: Return 1 if the regulator is enabled, 0 otherwise. + * + * @set_voltage: Set the voltage for the regulator within the range specified. + * The driver should select the voltage closest to min_uV. + * @get_voltage: Return the currently configured voltage for the regulator. + * + * @set_current_limit: Configure a limit for a current-limited regulator. + * @get_current_limit: Get the limit for a current-limited regulator. + * + * @set_mode: Set the operating mode for the regulator. + * @get_mode: Get the current operating mode for the regulator. + * @get_optimum_mode: Get the most efficient operating mode for the regulator + * when running with the specified parameters. + * + * @set_suspend_voltage: Set the voltage for the regulator when the system + * is suspended. + * @set_suspend_enable: Mark the regulator as enabled when the system is + * suspended. + * @set_suspend_disable: Mark the regulator as disabled when the system is + * suspended. + * @set_suspend_mode: Set the operating mode for the regulator when the + * system is suspended. */ struct regulator_ops { @@ -75,6 +101,15 @@ enum regulator_type { /** * struct regulator_desc - Regulator descriptor * + * Each regulator registered with the core is described with a structure of + * this type. + * + * @name: Identifying name for the regulator. + * @id: Numerical identifier for the regulator. + * @ops: Regulator operations table. + * @irq: Interrupt number for the regulator. + * @type: Indicates if the regulator is a voltage or current regulator. + * @owner: Module providing the regulator, used for refcounting. */ struct regulator_desc { const char *name; diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h index c6d69331a81e..3794773b23d2 100644 --- a/include/linux/regulator/machine.h +++ b/include/linux/regulator/machine.h @@ -44,6 +44,10 @@ struct regulator; * struct regulator_state - regulator state during low power syatem states * * This describes a regulators state during a system wide low power state. + * + * @uV: Operating voltage during suspend. + * @mode: Operating mode during suspend. + * @enabled: Enabled during suspend. */ struct regulator_state { int uV; /* suspend voltage */ @@ -55,6 +59,30 @@ struct regulator_state { * struct regulation_constraints - regulator operating constraints. * * This struct describes regulator and board/machine specific constraints. + * + * @name: Descriptive name for the constraints, used for display purposes. + * + * @min_uV: Smallest voltage consumers may set. + * @max_uV: Largest voltage consumers may set. + * + * @min_uA: Smallest consumers consumers may set. + * @max_uA: Largest current consumers may set. + * + * @valid_modes_mask: Mask of modes which may be configured by consumers. + * @valid_ops_mask: Operations which may be performed by consumers. + * + * @always_on: Set if the regulator should never be disabled. + * @boot_on: Set if the regulator is enabled when the system is initially + * started. + * @apply_uV: Apply the voltage constraint when initialising. + * + * @input_uV: Input voltage for regulator when supplied by another regulator. + * + * @state_disk: State for regulator when system is suspended in disk mode. + * @state_mem: State for regulator when system is suspended in mem mode. + * @state_standby: State for regulator when system is suspended in standby + * mode. + * @initial_state: Suspend state to set by default. */ struct regulation_constraints { @@ -93,6 +121,9 @@ struct regulation_constraints { * struct regulator_consumer_supply - supply -> device mapping * * This maps a supply name to a device. + * + * @dev: Device structure for the consumer. + * @supply: Name for the supply. */ struct regulator_consumer_supply { struct device *dev; /* consumer */ @@ -103,6 +134,16 @@ struct regulator_consumer_supply { * struct regulator_init_data - regulator platform initialisation data. * * Initialisation constraints, our supply and consumers supplies. + * + * @supply_regulator_dev: Parent regulator (if any). + * + * @constraints: Constraints. These must be specified for the regulator to + * be usable. + * @num_consumer_supplies: Number of consumer device supplies. + * @consumer_supplies: Consumer device supply configuration. + * + * @regulator_init: Callback invoked when the regulator has been registered. + * @driver_data: Data passed to regulator_init. */ struct regulator_init_data { struct device *supply_regulator_dev; /* or NULL for LINE */ diff --git a/include/linux/res_counter.h b/include/linux/res_counter.h index 271c1c2c9f6f..dede0a2cfc45 100644 --- a/include/linux/res_counter.h +++ b/include/linux/res_counter.h @@ -43,6 +43,10 @@ struct res_counter { * the routines below consider this to be IRQ-safe */ spinlock_t lock; + /* + * Parent counter, used for hierarchial resource accounting + */ + struct res_counter *parent; }; /** @@ -87,7 +91,7 @@ enum { * helpers for accounting */ -void res_counter_init(struct res_counter *counter); +void res_counter_init(struct res_counter *counter, struct res_counter *parent); /* * charge - try to consume more resource. @@ -103,7 +107,7 @@ void res_counter_init(struct res_counter *counter); int __must_check res_counter_charge_locked(struct res_counter *counter, unsigned long val); int __must_check res_counter_charge(struct res_counter *counter, - unsigned long val); + unsigned long val, struct res_counter **limit_fail_at); /* * uncharge - tell that some portion of the resource is released diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index b4199841f1fc..90bbbf0b1161 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -161,6 +161,9 @@ #define PORT_S3C6400 84 +/* NWPSERIAL */ +#define PORT_NWPSERIAL 85 + #ifdef __KERNEL__ #include <linux/compiler.h> diff --git a/include/linux/spi/tdo24m.h b/include/linux/spi/tdo24m.h new file mode 100644 index 000000000000..7572d4e1fe76 --- /dev/null +++ b/include/linux/spi/tdo24m.h @@ -0,0 +1,13 @@ +#ifndef __TDO24M_H__ +#define __TDO24M_H__ + +enum tdo24m_model { + TDO24M, + TDO35S, +}; + +struct tdo24m_platform_data { + enum tdo24m_model model; +}; + +#endif /* __TDO24M_H__ */ diff --git a/include/linux/suspend.h b/include/linux/suspend.h index 2ce8207686e2..2b409c44db83 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -232,6 +232,11 @@ extern unsigned long get_safe_page(gfp_t gfp_mask); extern void hibernation_set_ops(struct platform_hibernation_ops *ops); extern int hibernate(void); +extern int hibernate_nvs_register(unsigned long start, unsigned long size); +extern int hibernate_nvs_alloc(void); +extern void hibernate_nvs_free(void); +extern void hibernate_nvs_save(void); +extern void hibernate_nvs_restore(void); #else /* CONFIG_HIBERNATION */ static inline int swsusp_page_is_forbidden(struct page *p) { return 0; } static inline void swsusp_set_page_free(struct page *p) {} @@ -239,6 +244,14 @@ static inline void swsusp_unset_page_free(struct page *p) {} static inline void hibernation_set_ops(struct platform_hibernation_ops *ops) {} static inline int hibernate(void) { return -ENOSYS; } +static inline int hibernate_nvs_register(unsigned long a, unsigned long b) +{ + return 0; +} +static inline int hibernate_nvs_alloc(void) { return 0; } +static inline void hibernate_nvs_free(void) {} +static inline void hibernate_nvs_save(void) {} +static inline void hibernate_nvs_restore(void) {} #endif /* CONFIG_HIBERNATION */ #ifdef CONFIG_PM_SLEEP diff --git a/include/linux/swap.h b/include/linux/swap.h index 91dee50fe260..d30215578877 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -214,7 +214,8 @@ static inline void lru_cache_add_active_file(struct page *page) extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask); extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem, - gfp_t gfp_mask); + gfp_t gfp_mask, bool noswap, + unsigned int swappiness); extern int __isolate_lru_page(struct page *page, int mode, int file); extern unsigned long shrink_all_memory(unsigned long nr_pages); extern int vm_swappiness; @@ -333,6 +334,22 @@ static inline void disable_swap_token(void) put_swap_token(swap_token_mm); } +#ifdef CONFIG_CGROUP_MEM_RES_CTLR +extern void mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent); +#else +static inline void +mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent) +{ +} +#endif +#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP +extern void mem_cgroup_uncharge_swap(swp_entry_t ent); +#else +static inline void mem_cgroup_uncharge_swap(swp_entry_t ent) +{ +} +#endif + #else /* CONFIG_SWAP */ #define nr_swap_pages 0L @@ -409,6 +426,12 @@ static inline swp_entry_t get_swap_page(void) #define has_swap_token(x) 0 #define disable_swap_token() do { } while(0) +static inline int mem_cgroup_cache_charge_swapin(struct page *page, + struct mm_struct *mm, gfp_t mask, bool locked) +{ + return 0; +} + #endif /* CONFIG_SWAP */ #endif /* __KERNEL__*/ #endif /* _LINUX_SWAP_H */ |