diff options
Diffstat (limited to 'include/linux')
-rw-r--r-- | include/linux/dcache.h | 20 | ||||
-rw-r--r-- | include/linux/fscache-cache.h | 4 | ||||
-rw-r--r-- | include/linux/fscache.h | 42 | ||||
-rw-r--r-- | include/linux/inetdevice.h | 34 | ||||
-rw-r--r-- | include/linux/ipv6.h | 1 | ||||
-rw-r--r-- | include/linux/lockref.h | 71 | ||||
-rw-r--r-- | include/linux/mlx5/device.h | 22 | ||||
-rw-r--r-- | include/linux/mlx5/driver.h | 7 | ||||
-rw-r--r-- | include/linux/mm_types.h | 1 | ||||
-rw-r--r-- | include/linux/nsproxy.h | 6 | ||||
-rw-r--r-- | include/linux/regmap.h | 1 | ||||
-rw-r--r-- | include/linux/sched.h | 1 | ||||
-rw-r--r-- | include/linux/wait.h | 57 |
13 files changed, 205 insertions, 62 deletions
diff --git a/include/linux/dcache.h b/include/linux/dcache.h index b90337c9d468..efdc94434c30 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -9,6 +9,7 @@ #include <linux/seqlock.h> #include <linux/cache.h> #include <linux/rcupdate.h> +#include <linux/lockref.h> struct nameidata; struct path; @@ -100,6 +101,8 @@ extern unsigned int full_name_hash(const unsigned char *, unsigned int); # endif #endif +#define d_lock d_lockref.lock + struct dentry { /* RCU lookup touched fields */ unsigned int d_flags; /* protected by d_lock */ @@ -112,8 +115,7 @@ struct dentry { unsigned char d_iname[DNAME_INLINE_LEN]; /* small names */ /* Ref lookup also touches following */ - unsigned int d_count; /* protected by d_lock */ - spinlock_t d_lock; /* per dentry lock */ + struct lockref d_lockref; /* per-dentry lock and refcount */ const struct dentry_operations *d_op; struct super_block *d_sb; /* The root of the dentry tree */ unsigned long d_time; /* used by d_revalidate */ @@ -318,7 +320,7 @@ static inline int __d_rcu_to_refcount(struct dentry *dentry, unsigned seq) assert_spin_locked(&dentry->d_lock); if (!read_seqcount_retry(&dentry->d_seq, seq)) { ret = 1; - dentry->d_count++; + dentry->d_lockref.count++; } return ret; @@ -326,7 +328,7 @@ static inline int __d_rcu_to_refcount(struct dentry *dentry, unsigned seq) static inline unsigned d_count(const struct dentry *dentry) { - return dentry->d_count; + return dentry->d_lockref.count; } /* validate "insecure" dentry pointer */ @@ -336,6 +338,7 @@ extern int d_validate(struct dentry *, struct dentry *); * helper function for dentry_operations.d_dname() members */ extern char *dynamic_dname(struct dentry *, char *, int, const char *, ...); +extern char *simple_dname(struct dentry *, char *, int); extern char *__d_path(const struct path *, const struct path *, char *, int); extern char *d_absolute_path(const struct path *, char *, int); @@ -356,17 +359,14 @@ extern char *dentry_path(struct dentry *, char *, int); static inline struct dentry *dget_dlock(struct dentry *dentry) { if (dentry) - dentry->d_count++; + dentry->d_lockref.count++; return dentry; } static inline struct dentry *dget(struct dentry *dentry) { - if (dentry) { - spin_lock(&dentry->d_lock); - dget_dlock(dentry); - spin_unlock(&dentry->d_lock); - } + if (dentry) + lockref_get(&dentry->d_lockref); return dentry; } diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h index a9ff9a36b86d..7823e9ef995e 100644 --- a/include/linux/fscache-cache.h +++ b/include/linux/fscache-cache.h @@ -251,6 +251,10 @@ struct fscache_cache_ops { /* unpin an object in the cache */ void (*unpin_object)(struct fscache_object *object); + /* check the consistency between the backing cache and the FS-Cache + * cookie */ + bool (*check_consistency)(struct fscache_operation *op); + /* store the updated auxiliary data on an object */ void (*update_object)(struct fscache_object *object); diff --git a/include/linux/fscache.h b/include/linux/fscache.h index 7a086235da4b..19b46458e4e8 100644 --- a/include/linux/fscache.h +++ b/include/linux/fscache.h @@ -183,6 +183,7 @@ extern struct fscache_cookie *__fscache_acquire_cookie( const struct fscache_cookie_def *, void *); extern void __fscache_relinquish_cookie(struct fscache_cookie *, int); +extern int __fscache_check_consistency(struct fscache_cookie *); extern void __fscache_update_cookie(struct fscache_cookie *); extern int __fscache_attr_changed(struct fscache_cookie *); extern void __fscache_invalidate(struct fscache_cookie *); @@ -208,6 +209,8 @@ extern bool __fscache_maybe_release_page(struct fscache_cookie *, struct page *, gfp_t); extern void __fscache_uncache_all_inode_pages(struct fscache_cookie *, struct inode *); +extern void __fscache_readpages_cancel(struct fscache_cookie *cookie, + struct list_head *pages); /** * fscache_register_netfs - Register a filesystem as desiring caching services @@ -326,6 +329,25 @@ void fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire) } /** + * fscache_check_consistency - Request that if the cache is updated + * @cookie: The cookie representing the cache object + * + * Request an consistency check from fscache, which passes the request + * to the backing cache. + * + * Returns 0 if consistent and -ESTALE if inconsistent. May also + * return -ENOMEM and -ERESTARTSYS. + */ +static inline +int fscache_check_consistency(struct fscache_cookie *cookie) +{ + if (fscache_cookie_valid(cookie)) + return __fscache_check_consistency(cookie); + else + return 0; +} + +/** * fscache_update_cookie - Request that a cache object be updated * @cookie: The cookie representing the cache object * @@ -570,6 +592,26 @@ int fscache_alloc_page(struct fscache_cookie *cookie, } /** + * fscache_readpages_cancel - Cancel read/alloc on pages + * @cookie: The cookie representing the inode's cache object. + * @pages: The netfs pages that we canceled write on in readpages() + * + * Uncache/unreserve the pages reserved earlier in readpages() via + * fscache_readpages_or_alloc() and similar. In most successful caches in + * readpages() this doesn't do anything. In cases when the underlying netfs's + * readahead failed we need to clean up the pagelist (unmark and uncache). + * + * This function may sleep as it may have to clean up disk state. + */ +static inline +void fscache_readpages_cancel(struct fscache_cookie *cookie, + struct list_head *pages) +{ + if (fscache_cookie_valid(cookie)) + __fscache_readpages_cancel(cookie, pages); +} + +/** * fscache_write_page - Request storage of a page in the cache * @cookie: The cookie representing the cache object * @page: The netfs page to store diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h index b99cd23f3474..79640e015a86 100644 --- a/include/linux/inetdevice.h +++ b/include/linux/inetdevice.h @@ -5,45 +5,13 @@ #include <linux/bitmap.h> #include <linux/if.h> +#include <linux/ip.h> #include <linux/netdevice.h> #include <linux/rcupdate.h> #include <linux/timer.h> #include <linux/sysctl.h> #include <linux/rtnetlink.h> -enum -{ - IPV4_DEVCONF_FORWARDING=1, - IPV4_DEVCONF_MC_FORWARDING, - IPV4_DEVCONF_PROXY_ARP, - IPV4_DEVCONF_ACCEPT_REDIRECTS, - IPV4_DEVCONF_SECURE_REDIRECTS, - IPV4_DEVCONF_SEND_REDIRECTS, - IPV4_DEVCONF_SHARED_MEDIA, - IPV4_DEVCONF_RP_FILTER, - IPV4_DEVCONF_ACCEPT_SOURCE_ROUTE, - IPV4_DEVCONF_BOOTP_RELAY, - IPV4_DEVCONF_LOG_MARTIANS, - IPV4_DEVCONF_TAG, - IPV4_DEVCONF_ARPFILTER, - IPV4_DEVCONF_MEDIUM_ID, - IPV4_DEVCONF_NOXFRM, - IPV4_DEVCONF_NOPOLICY, - IPV4_DEVCONF_FORCE_IGMP_VERSION, - IPV4_DEVCONF_ARP_ANNOUNCE, - IPV4_DEVCONF_ARP_IGNORE, - IPV4_DEVCONF_PROMOTE_SECONDARIES, - IPV4_DEVCONF_ARP_ACCEPT, - IPV4_DEVCONF_ARP_NOTIFY, - IPV4_DEVCONF_ACCEPT_LOCAL, - IPV4_DEVCONF_SRC_VMARK, - IPV4_DEVCONF_PROXY_ARP_PVLAN, - IPV4_DEVCONF_ROUTE_LOCALNET, - __IPV4_DEVCONF_MAX -}; - -#define IPV4_DEVCONF_MAX (__IPV4_DEVCONF_MAX - 1) - struct ipv4_devconf { void *sysctl; int data[IPV4_DEVCONF_MAX]; diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index 850e95bc766c..b8b7dc755752 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -101,6 +101,7 @@ struct inet6_skb_parm { #define IP6SKB_FORWARDED 2 #define IP6SKB_REROUTED 4 #define IP6SKB_ROUTERALERT 8 +#define IP6SKB_FRAGMENTED 16 }; #define IP6CB(skb) ((struct inet6_skb_parm*)((skb)->cb)) diff --git a/include/linux/lockref.h b/include/linux/lockref.h new file mode 100644 index 000000000000..01233e01627a --- /dev/null +++ b/include/linux/lockref.h @@ -0,0 +1,71 @@ +#ifndef __LINUX_LOCKREF_H +#define __LINUX_LOCKREF_H + +/* + * Locked reference counts. + * + * These are different from just plain atomic refcounts in that they + * are atomic with respect to the spinlock that goes with them. In + * particular, there can be implementations that don't actually get + * the spinlock for the common decrement/increment operations, but they + * still have to check that the operation is done semantically as if + * the spinlock had been taken (using a cmpxchg operation that covers + * both the lock and the count word, or using memory transactions, for + * example). + */ + +#include <linux/spinlock.h> + +struct lockref { + spinlock_t lock; + unsigned int count; +}; + +/** + * lockref_get - Increments reference count unconditionally + * @lockcnt: pointer to lockref structure + * + * This operation is only valid if you already hold a reference + * to the object, so you know the count cannot be zero. + */ +static inline void lockref_get(struct lockref *lockref) +{ + spin_lock(&lockref->lock); + lockref->count++; + spin_unlock(&lockref->lock); +} + +/** + * lockref_get_not_zero - Increments count unless the count is 0 + * @lockcnt: pointer to lockref structure + * Return: 1 if count updated successfully or 0 if count is 0 + */ +static inline int lockref_get_not_zero(struct lockref *lockref) +{ + int retval = 0; + + spin_lock(&lockref->lock); + if (lockref->count) { + lockref->count++; + retval = 1; + } + spin_unlock(&lockref->lock); + return retval; +} + +/** + * lockref_put_or_lock - decrements count unless count <= 1 before decrement + * @lockcnt: pointer to lockref structure + * Return: 1 if count updated successfully or 0 if count <= 1 and lock taken + */ +static inline int lockref_put_or_lock(struct lockref *lockref) +{ + spin_lock(&lockref->lock); + if (lockref->count <= 1) + return 0; + lockref->count--; + spin_unlock(&lockref->lock); + return 1; +} + +#endif /* __LINUX_LOCKREF_H */ diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 737685e9e852..68029b30c3dc 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -309,21 +309,20 @@ struct mlx5_hca_cap { __be16 max_desc_sz_rq; u8 rsvd21[2]; __be16 max_desc_sz_sq_dc; - u8 rsvd22[4]; - __be16 max_qp_mcg; - u8 rsvd23; + __be32 max_qp_mcg; + u8 rsvd22[3]; u8 log_max_mcg; - u8 rsvd24; + u8 rsvd23; u8 log_max_pd; - u8 rsvd25; + u8 rsvd24; u8 log_max_xrcd; - u8 rsvd26[42]; + u8 rsvd25[42]; __be16 log_uar_page_sz; - u8 rsvd27[28]; + u8 rsvd26[28]; u8 log_msx_atomic_size_qp; - u8 rsvd28[2]; + u8 rsvd27[2]; u8 log_msx_atomic_size_dc; - u8 rsvd29[76]; + u8 rsvd28[76]; }; @@ -472,9 +471,8 @@ struct mlx5_eqe_cmd { struct mlx5_eqe_page_req { u8 rsvd0[2]; __be16 func_id; - u8 rsvd1[2]; - __be16 num_pages; - __be32 rsvd2[5]; + __be32 num_pages; + __be32 rsvd1[5]; }; union ev_data { diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 2aa258b0ced1..8888381fc150 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -358,7 +358,7 @@ struct mlx5_caps { u32 reserved_lkey; u8 local_ca_ack_delay; u8 log_max_mcg; - u16 max_qp_mcg; + u32 max_qp_mcg; int min_page_sz; }; @@ -691,7 +691,7 @@ void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev); int mlx5_pagealloc_start(struct mlx5_core_dev *dev); void mlx5_pagealloc_stop(struct mlx5_core_dev *dev); void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, - s16 npages); + s32 npages); int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot); int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev); void mlx5_register_debugfs(void); @@ -731,9 +731,6 @@ void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev); int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db); void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db); -typedef void (*health_handler_t)(struct pci_dev *pdev, struct health_buffer __iomem *buf, int size); -int mlx5_register_health_report_handler(health_handler_t handler); -void mlx5_unregister_health_report_handler(void); const char *mlx5_command_str(int command); int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev); void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev); diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index fb425aa16c01..faf4b7c1ad12 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -332,6 +332,7 @@ struct mm_struct { unsigned long pgoff, unsigned long flags); #endif unsigned long mmap_base; /* base of mmap area */ + unsigned long mmap_legacy_base; /* base of mmap area in bottom-up allocations */ unsigned long task_size; /* size of task vm space */ unsigned long highest_vm_end; /* highest vma end address */ pgd_t * pgd; diff --git a/include/linux/nsproxy.h b/include/linux/nsproxy.h index 10e5947491c7..b4ec59d159ac 100644 --- a/include/linux/nsproxy.h +++ b/include/linux/nsproxy.h @@ -14,6 +14,10 @@ struct fs_struct; * A structure to contain pointers to all per-process * namespaces - fs (mount), uts, network, sysvipc, etc. * + * The pid namespace is an exception -- it's accessed using + * task_active_pid_ns. The pid namespace here is the + * namespace that children will use. + * * 'count' is the number of tasks holding a reference. * The count for each namespace, then, will be the number * of nsproxies pointing to it, not the number of tasks. @@ -27,7 +31,7 @@ struct nsproxy { struct uts_namespace *uts_ns; struct ipc_namespace *ipc_ns; struct mnt_namespace *mnt_ns; - struct pid_namespace *pid_ns; + struct pid_namespace *pid_ns_for_children; struct net *net_ns; }; extern struct nsproxy init_nsproxy; diff --git a/include/linux/regmap.h b/include/linux/regmap.h index 580a5320cc96..6d91fcb4c5cb 100644 --- a/include/linux/regmap.h +++ b/include/linux/regmap.h @@ -16,6 +16,7 @@ #include <linux/list.h> #include <linux/rbtree.h> #include <linux/err.h> +#include <linux/bug.h> struct module; struct device; diff --git a/include/linux/sched.h b/include/linux/sched.h index e9995eb5985c..078066daffd4 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -314,7 +314,6 @@ struct nsproxy; struct user_namespace; #ifdef CONFIG_MMU -extern unsigned long mmap_legacy_base(void); extern void arch_pick_mmap_layout(struct mm_struct *mm); extern unsigned long arch_get_unmapped_area(struct file *, unsigned long, unsigned long, diff --git a/include/linux/wait.h b/include/linux/wait.h index f487a4750b7f..a67fc1635592 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -811,6 +811,63 @@ do { \ __ret; \ }) +#define __wait_event_interruptible_lock_irq_timeout(wq, condition, \ + lock, ret) \ +do { \ + DEFINE_WAIT(__wait); \ + \ + for (;;) { \ + prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \ + if (condition) \ + break; \ + if (signal_pending(current)) { \ + ret = -ERESTARTSYS; \ + break; \ + } \ + spin_unlock_irq(&lock); \ + ret = schedule_timeout(ret); \ + spin_lock_irq(&lock); \ + if (!ret) \ + break; \ + } \ + finish_wait(&wq, &__wait); \ +} while (0) + +/** + * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets true or a timeout elapses. + * The condition is checked under the lock. This is expected + * to be called with the lock taken. + * @wq: the waitqueue to wait on + * @condition: a C expression for the event to wait for + * @lock: a locked spinlock_t, which will be released before schedule() + * and reacquired afterwards. + * @timeout: timeout, in jiffies + * + * The process is put to sleep (TASK_INTERRUPTIBLE) until the + * @condition evaluates to true or signal is received. The @condition is + * checked each time the waitqueue @wq is woken up. + * + * wake_up() has to be called after changing any variable that could + * change the result of the wait condition. + * + * This is supposed to be called while holding the lock. The lock is + * dropped before going to sleep and is reacquired afterwards. + * + * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it + * was interrupted by a signal, and the remaining jiffies otherwise + * if the condition evaluated to true before the timeout elapsed. + */ +#define wait_event_interruptible_lock_irq_timeout(wq, condition, lock, \ + timeout) \ +({ \ + int __ret = timeout; \ + \ + if (!(condition)) \ + __wait_event_interruptible_lock_irq_timeout( \ + wq, condition, lock, __ret); \ + __ret; \ +}) + /* * These are the old interfaces to sleep waiting for an event. |